summaryrefslogtreecommitdiffstats
path: root/src/glsl
diff options
context:
space:
mode:
authorJason Ekstrand <jason.ekstrand@intel.com>2014-12-08 17:34:52 -0800
committerJason Ekstrand <jason.ekstrand@intel.com>2015-01-15 07:20:21 -0800
commit534d145e5ea039d57833395a36eed90721f6b272 (patch)
tree0e08819ec9aa87417e8a5fe499e75828cbfa1406 /src/glsl
parent6a5604ca6a7346278188cb05996444a5091070b5 (diff)
downloadexternal_mesa3d-534d145e5ea039d57833395a36eed90721f6b272.zip
external_mesa3d-534d145e5ea039d57833395a36eed90721f6b272.tar.gz
external_mesa3d-534d145e5ea039d57833395a36eed90721f6b272.tar.bz2
nir: Use a source for uniform buffer indices instead of an index
In GLSL-to-NIR we were just setting the base index to 0 whenever there was an indirect so having it expressed as a sum makes no sense. Also, while a base offset may make sense for the memory location (first element in the array, etc.) it makes less sense for the actual uniform buffer index. This may change later, but it seems to make more sense for now. Reviewed-by: Connor Abbott <cwabbott0@gmail.com>
Diffstat (limited to 'src/glsl')
-rw-r--r--src/glsl/nir/glsl_to_nir.cpp8
-rw-r--r--src/glsl/nir/nir_intrinsics.h27
2 files changed, 17 insertions, 18 deletions
diff --git a/src/glsl/nir/glsl_to_nir.cpp b/src/glsl/nir/glsl_to_nir.cpp
index 70f0c85..78070af 100644
--- a/src/glsl/nir/glsl_to_nir.cpp
+++ b/src/glsl/nir/glsl_to_nir.cpp
@@ -897,11 +897,11 @@ nir_visitor::visit(ir_expression *ir)
}
nir_intrinsic_instr *load = nir_intrinsic_instr_create(this->shader, op);
load->num_components = ir->type->vector_elements;
- load->const_index[0] = ir->operands[0]->as_constant()->value.u[0];
- load->const_index[1] = const_index ? const_index->value.u[0] : 0; /* base offset */
- load->const_index[2] = 1; /* number of vec4's */
+ load->const_index[0] = const_index ? const_index->value.u[0] : 0; /* base offset */
+ load->const_index[1] = 1; /* number of vec4's */
+ load->src[0] = evaluate_rvalue(ir->operands[0]);
if (!const_index)
- load->src[0] = evaluate_rvalue(ir->operands[1]);
+ load->src[1] = evaluate_rvalue(ir->operands[1]);
add_instr(&load->instr, ir->type->vector_elements);
/*
diff --git a/src/glsl/nir/nir_intrinsics.h b/src/glsl/nir/nir_intrinsics.h
index e66273d..d94866c 100644
--- a/src/glsl/nir/nir_intrinsics.h
+++ b/src/glsl/nir/nir_intrinsics.h
@@ -101,11 +101,11 @@ SYSTEM_VALUE(invocation_id, 1)
/*
* The first index is the address to load from, and the second index is the
- * number of array elements to load. For UBO's (and SSBO's), the first index
- * is the UBO buffer index (TODO nonconstant UBO buffer index) and the second
- * and third indices play the role of the first and second indices in the other
- * loads. Indirect loads have an additional register input, which is added
- * to the constant address to compute the final address to load from.
+ * number of array elements to load. Indirect loads have an additional
+ * register input, which is added to the constant address to compute the
+ * final address to load from. For UBO's (and SSBO's), the first source is
+ * the (possibly constant) UBO buffer index and the indirect (if it exists)
+ * is the second source.
*
* For vector backends, the address is in terms of one vec4, and so each array
* element is +4 scalar components from the previous array element. For scalar
@@ -113,16 +113,15 @@ SYSTEM_VALUE(invocation_id, 1)
* elements begin immediately after the previous array element.
*/
-#define LOAD(name, num_indices, flags) \
- INTRINSIC(load_##name, 0, ARR(), true, 0, 0, num_indices, \
- NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER) \
- INTRINSIC(load_##name##_indirect, 1, ARR(1), true, 0, 0, num_indices, \
- NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER) \
+#define LOAD(name, extra_srcs, flags) \
+ INTRINSIC(load_##name, extra_srcs, ARR(1), true, 0, 0, 2, flags) \
+ INTRINSIC(load_##name##_indirect, extra_srcs + 1, ARR(1, 1), \
+ true, 0, 0, 2, flags)
-LOAD(uniform, 2, NIR_INTRINSIC_CAN_REORDER)
-LOAD(ubo, 3, NIR_INTRINSIC_CAN_REORDER)
-LOAD(input, 2, NIR_INTRINSIC_CAN_REORDER)
-/* LOAD(ssbo, 2, 0) */
+LOAD(uniform, 0, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
+LOAD(ubo, 1, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
+LOAD(input, 0, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
+/* LOAD(ssbo, 1, 0) */
/*
* Stores work the same way as loads, except now the first register input is