summaryrefslogtreecommitdiffstats
path: root/src/mesa/drivers/dri/i965/intel_batchbuffer.h
diff options
context:
space:
mode:
authorMatt Turner <mattst88@gmail.com>2015-11-24 17:17:29 -0800
committerMatt Turner <mattst88@gmail.com>2016-03-30 13:12:34 -0700
commita74fc3fe8ada87e1fedeea86f2d93f736a1217bc (patch)
tree85f6845d7d204e081053c91a0ddfc4878db0663e /src/mesa/drivers/dri/i965/intel_batchbuffer.h
parent1faca438bdbf11d85a6158d41ea91cab40fc2033 (diff)
downloadexternal_mesa3d-a74fc3fe8ada87e1fedeea86f2d93f736a1217bc.zip
external_mesa3d-a74fc3fe8ada87e1fedeea86f2d93f736a1217bc.tar.gz
external_mesa3d-a74fc3fe8ada87e1fedeea86f2d93f736a1217bc.tar.bz2
i965: Don't inline intel_batchbuffer_require_space().
It's called by the inline intel_batchbuffer_begin() function which itself is used in BEGIN_BATCH. So in sequence of code emitting multiple packets, we have inlined this ~200 byte function multiple times. Making it an out-of-line function presumably improved icache usage. Improves performance of Gl32Batch7 by 3.39898% +/- 0.358674% (n=155) on Ivybridge. Reviewed-by: Abdiel Janulgue <abdiel.janulgue@linux.intel.com>
Diffstat (limited to 'src/mesa/drivers/dri/i965/intel_batchbuffer.h')
-rw-r--r--src/mesa/drivers/dri/i965/intel_batchbuffer.h28
1 files changed, 2 insertions, 26 deletions
diff --git a/src/mesa/drivers/dri/i965/intel_batchbuffer.h b/src/mesa/drivers/dri/i965/intel_batchbuffer.h
index f473690..aa1dc38 100644
--- a/src/mesa/drivers/dri/i965/intel_batchbuffer.h
+++ b/src/mesa/drivers/dri/i965/intel_batchbuffer.h
@@ -44,6 +44,8 @@ void intel_batchbuffer_init(struct brw_context *brw);
void intel_batchbuffer_free(struct brw_context *brw);
void intel_batchbuffer_save_state(struct brw_context *brw);
void intel_batchbuffer_reset_to_saved(struct brw_context *brw);
+void intel_batchbuffer_require_space(struct brw_context *brw, GLuint sz,
+ enum brw_gpu_ring ring);
int _intel_batchbuffer_flush(struct brw_context *brw,
const char *file, int line);
@@ -117,32 +119,6 @@ intel_batchbuffer_emit_float(struct brw_context *brw, float f)
}
static inline void
-intel_batchbuffer_require_space(struct brw_context *brw, GLuint sz,
- enum brw_gpu_ring ring)
-{
- /* If we're switching rings, implicitly flush the batch. */
- if (unlikely(ring != brw->batch.ring) && brw->batch.ring != UNKNOWN_RING &&
- brw->gen >= 6) {
- intel_batchbuffer_flush(brw);
- }
-
-#ifdef DEBUG
- assert(sz < BATCH_SZ - BATCH_RESERVED);
-#endif
- if (intel_batchbuffer_space(brw) < sz)
- intel_batchbuffer_flush(brw);
-
- enum brw_gpu_ring prev_ring = brw->batch.ring;
- /* The intel_batchbuffer_flush() calls above might have changed
- * brw->batch.ring to UNKNOWN_RING, so we need to set it here at the end.
- */
- brw->batch.ring = ring;
-
- if (unlikely(prev_ring == UNKNOWN_RING && ring == RENDER_RING))
- intel_batchbuffer_emit_render_ring_prelude(brw);
-}
-
-static inline void
intel_batchbuffer_begin(struct brw_context *brw, int n, enum brw_gpu_ring ring)
{
intel_batchbuffer_require_space(brw, n * 4, ring);