summaryrefslogtreecommitdiffstats
path: root/src/mesa/drivers/dri/i965/gen6_queryobj.c
diff options
context:
space:
mode:
authorKenneth Graunke <kenneth@whitecape.org>2013-07-02 23:17:14 -0700
committerKenneth Graunke <kenneth@whitecape.org>2013-07-09 14:08:53 -0700
commitca437579b3974b91a5298707c459908a628c1098 (patch)
tree294ac2ad5aa080554c23ebb79552e50b23fbcf31 /src/mesa/drivers/dri/i965/gen6_queryobj.c
parent86f2711722dc10c25c2fabc09d8bd020a1ba6029 (diff)
downloadexternal_mesa3d-ca437579b3974b91a5298707c459908a628c1098.zip
external_mesa3d-ca437579b3974b91a5298707c459908a628c1098.tar.gz
external_mesa3d-ca437579b3974b91a5298707c459908a628c1098.tar.bz2
i965: Pass brw_context to functions rather than intel_context.
This makes brw_context available in every function that used intel_context. This makes it possible to start migrating fields from intel_context to brw_context. Surprisingly, this actually removes some code, as functions that use OUT_BATCH don't need to declare "intel"; they just use "brw." Signed-off-by: Kenneth Graunke <kenneth@whitecape.org> Acked-by: Chris Forbes <chrisf@ijw.co.nz> Acked-by: Paul Berry <stereotype441@gmail.com> Acked-by: Anuj Phogat <anuj.phogat@gmail.com>
Diffstat (limited to 'src/mesa/drivers/dri/i965/gen6_queryobj.c')
-rw-r--r--src/mesa/drivers/dri/i965/gen6_queryobj.c49
1 files changed, 28 insertions, 21 deletions
diff --git a/src/mesa/drivers/dri/i965/gen6_queryobj.c b/src/mesa/drivers/dri/i965/gen6_queryobj.c
index 8c38bd5..918e50c 100644
--- a/src/mesa/drivers/dri/i965/gen6_queryobj.c
+++ b/src/mesa/drivers/dri/i965/gen6_queryobj.c
@@ -43,8 +43,9 @@
* Emit PIPE_CONTROLs to write the current GPU timestamp into a buffer.
*/
static void
-write_timestamp(struct intel_context *intel, drm_intel_bo *query_bo, int idx)
+write_timestamp(struct brw_context *brw, drm_intel_bo *query_bo, int idx)
{
+ struct intel_context *intel = &brw->intel;
/* Emit workaround flushes: */
if (intel->gen == 6) {
/* The timestamp write below is a non-zero post-sync op, which on
@@ -75,11 +76,12 @@ write_timestamp(struct intel_context *intel, drm_intel_bo *query_bo, int idx)
* Emit PIPE_CONTROLs to write the PS_DEPTH_COUNT register into a buffer.
*/
static void
-write_depth_count(struct intel_context *intel, drm_intel_bo *query_bo, int idx)
+write_depth_count(struct brw_context *brw, drm_intel_bo *query_bo, int idx)
{
+ struct intel_context *intel = &brw->intel;
/* Emit Sandybridge workaround flush: */
if (intel->gen == 6)
- intel_emit_post_sync_nonzero_flush(intel);
+ intel_emit_post_sync_nonzero_flush(brw);
BEGIN_BATCH(5);
OUT_BATCH(_3DSTATE_PIPE_CONTROL | (5 - 2));
@@ -102,12 +104,13 @@ write_depth_count(struct intel_context *intel, drm_intel_bo *query_bo, int idx)
* function also performs a pipeline flush for proper synchronization.
*/
static void
-write_reg(struct intel_context *intel,
+write_reg(struct brw_context *brw,
drm_intel_bo *query_bo, uint32_t reg, int idx)
{
+ struct intel_context *intel = &brw->intel;
assert(intel->gen >= 6);
- intel_batchbuffer_emit_mi_flush(intel);
+ intel_batchbuffer_emit_mi_flush(brw);
/* MI_STORE_REGISTER_MEM only stores a single 32-bit value, so to
* read a full 64-bit register, we need to do two of them.
@@ -128,20 +131,21 @@ write_reg(struct intel_context *intel,
}
static void
-write_primitives_generated(struct intel_context *intel,
+write_primitives_generated(struct brw_context *brw,
drm_intel_bo *query_bo, int idx)
{
- write_reg(intel, query_bo, CL_INVOCATION_COUNT, idx);
+ write_reg(brw, query_bo, CL_INVOCATION_COUNT, idx);
}
static void
-write_xfb_primitives_written(struct intel_context *intel,
+write_xfb_primitives_written(struct brw_context *brw,
drm_intel_bo *query_bo, int idx)
{
+ struct intel_context *intel = &brw->intel;
if (intel->gen >= 7) {
- write_reg(intel, query_bo, SO_NUM_PRIMS_WRITTEN0_IVB, idx);
+ write_reg(brw, query_bo, SO_NUM_PRIMS_WRITTEN0_IVB, idx);
} else {
- write_reg(intel, query_bo, SO_NUM_PRIMS_WRITTEN, idx);
+ write_reg(brw, query_bo, SO_NUM_PRIMS_WRITTEN, idx);
}
}
@@ -152,6 +156,7 @@ static void
gen6_queryobj_get_results(struct gl_context *ctx,
struct brw_query_object *query)
{
+ struct brw_context *brw = brw_context(ctx);
struct intel_context *intel = intel_context(ctx);
if (query->bo == NULL)
@@ -162,7 +167,7 @@ gen6_queryobj_get_results(struct gl_context *ctx,
* when mapped.
*/
if (drm_intel_bo_references(intel->batch.bo, query->bo))
- intel_batchbuffer_flush(intel);
+ intel_batchbuffer_flush(brw);
if (unlikely(intel->perf_debug)) {
if (drm_intel_bo_busy(query->bo)) {
@@ -243,6 +248,7 @@ gen6_queryobj_get_results(struct gl_context *ctx,
static void
gen6_begin_query(struct gl_context *ctx, struct gl_query_object *q)
{
+ struct brw_context *brw = brw_context(ctx);
struct intel_context *intel = intel_context(ctx);
struct brw_query_object *query = (struct brw_query_object *)q;
@@ -271,21 +277,21 @@ gen6_begin_query(struct gl_context *ctx, struct gl_query_object *q)
* obtain the time elapsed. Notably, this includes time elapsed while
* the system was doing other work, such as running other applications.
*/
- write_timestamp(intel, query->bo, 0);
+ write_timestamp(brw, query->bo, 0);
break;
case GL_ANY_SAMPLES_PASSED:
case GL_ANY_SAMPLES_PASSED_CONSERVATIVE:
case GL_SAMPLES_PASSED_ARB:
- write_depth_count(intel, query->bo, 0);
+ write_depth_count(brw, query->bo, 0);
break;
case GL_PRIMITIVES_GENERATED:
- write_primitives_generated(intel, query->bo, 0);
+ write_primitives_generated(brw, query->bo, 0);
break;
case GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN:
- write_xfb_primitives_written(intel, query->bo, 0);
+ write_xfb_primitives_written(brw, query->bo, 0);
break;
default:
@@ -305,26 +311,26 @@ gen6_begin_query(struct gl_context *ctx, struct gl_query_object *q)
static void
gen6_end_query(struct gl_context *ctx, struct gl_query_object *q)
{
- struct intel_context *intel = intel_context(ctx);
+ struct brw_context *brw = brw_context(ctx);
struct brw_query_object *query = (struct brw_query_object *)q;
switch (query->Base.Target) {
case GL_TIME_ELAPSED:
- write_timestamp(intel, query->bo, 1);
+ write_timestamp(brw, query->bo, 1);
break;
case GL_ANY_SAMPLES_PASSED:
case GL_ANY_SAMPLES_PASSED_CONSERVATIVE:
case GL_SAMPLES_PASSED_ARB:
- write_depth_count(intel, query->bo, 1);
+ write_depth_count(brw, query->bo, 1);
break;
case GL_PRIMITIVES_GENERATED:
- write_primitives_generated(intel, query->bo, 1);
+ write_primitives_generated(brw, query->bo, 1);
break;
case GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN:
- write_xfb_primitives_written(intel, query->bo, 1);
+ write_xfb_primitives_written(brw, query->bo, 1);
break;
default:
@@ -355,6 +361,7 @@ static void gen6_wait_query(struct gl_context *ctx, struct gl_query_object *q)
*/
static void gen6_check_query(struct gl_context *ctx, struct gl_query_object *q)
{
+ struct brw_context *brw = brw_context(ctx);
struct intel_context *intel = intel_context(ctx);
struct brw_query_object *query = (struct brw_query_object *)q;
@@ -366,7 +373,7 @@ static void gen6_check_query(struct gl_context *ctx, struct gl_query_object *q)
* the async query will return true in finite time.
*/
if (query->bo && drm_intel_bo_references(intel->batch.bo, query->bo))
- intel_batchbuffer_flush(intel);
+ intel_batchbuffer_flush(brw);
if (query->bo == NULL || !drm_intel_bo_busy(query->bo)) {
gen6_queryobj_get_results(ctx, query);