summaryrefslogtreecommitdiffstats
path: root/src/mesa/drivers/dri/i965/intel_batchbuffer.c
diff options
context:
space:
mode:
authorKenneth Graunke <kenneth@whitecape.org>2013-10-15 16:00:16 -0700
committerKenneth Graunke <kenneth@whitecape.org>2013-10-17 14:27:03 -0700
commit80a9c42e9e9012bf0b4c143f1b8dd325c8c88120 (patch)
treea34861fd23922621c6a6b0b6e8706e9d86a317fa /src/mesa/drivers/dri/i965/intel_batchbuffer.c
parent6613f346acc54a161046ee77e4a017c3e3d1a99f (diff)
downloadexternal_mesa3d-80a9c42e9e9012bf0b4c143f1b8dd325c8c88120.zip
external_mesa3d-80a9c42e9e9012bf0b4c143f1b8dd325c8c88120.tar.gz
external_mesa3d-80a9c42e9e9012bf0b4c143f1b8dd325c8c88120.tar.bz2
i965: Un-virtualize brw_new_batch().
Since the i915/i965 split, there's only one implementation of this virtual function. We may as well just call it directly. Signed-off-by: Kenneth Graunke <kenneth@whitecape.org> Reviewed-by: Eric Anholt <eric@anholt.net>
Diffstat (limited to 'src/mesa/drivers/dri/i965/intel_batchbuffer.c')
-rw-r--r--src/mesa/drivers/dri/i965/intel_batchbuffer.c43
1 files changed, 42 insertions, 1 deletions
diff --git a/src/mesa/drivers/dri/i965/intel_batchbuffer.c b/src/mesa/drivers/dri/i965/intel_batchbuffer.c
index 20a6d83..6d1ae79 100644
--- a/src/mesa/drivers/dri/i965/intel_batchbuffer.c
+++ b/src/mesa/drivers/dri/i965/intel_batchbuffer.c
@@ -173,6 +173,47 @@ do_batch_dump(struct brw_context *brw)
}
/**
+ * Called when starting a new batch buffer.
+ */
+static void
+brw_new_batch(struct brw_context *brw)
+{
+ /* If the kernel supports hardware contexts, then most hardware state is
+ * preserved between batches; we only need to re-emit state that is required
+ * to be in every batch. Otherwise we need to re-emit all the state that
+ * would otherwise be stored in the context (which for all intents and
+ * purposes means everything).
+ */
+ if (brw->hw_ctx == NULL)
+ brw->state.dirty.brw |= BRW_NEW_CONTEXT;
+
+ brw->state.dirty.brw |= BRW_NEW_BATCH;
+
+ /* Assume that the last command before the start of our batch was a
+ * primitive, for safety.
+ */
+ brw->batch.need_workaround_flush = true;
+
+ brw->state_batch_count = 0;
+
+ brw->ib.type = -1;
+
+ /* Mark that the current program cache BO has been used by the GPU.
+ * It will be reallocated if we need to put new programs in for the
+ * next batch.
+ */
+ brw->cache.bo_used_by_gpu = true;
+
+ /* We need to periodically reap the shader time results, because rollover
+ * happens every few seconds. We also want to see results every once in a
+ * while, because many programs won't cleanly destroy our context, so the
+ * end-of-run printout may not happen.
+ */
+ if (INTEL_DEBUG & DEBUG_SHADER_TIME)
+ brw_collect_and_report_shader_time(brw);
+}
+
+/**
* Called from intel_batchbuffer_flush before emitting MI_BATCHBUFFER_END and
* sending it off.
*
@@ -245,7 +286,7 @@ do_flush_locked(struct brw_context *brw)
fprintf(stderr, "intel_do_flush_locked failed: %s\n", strerror(-ret));
exit(1);
}
- brw->vtbl.new_batch(brw);
+ brw_new_batch(brw);
return ret;
}