summaryrefslogtreecommitdiffstats
path: root/src/compiler/nir/nir_opt_constant_folding.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/compiler/nir/nir_opt_constant_folding.c')
-rw-r--r--src/compiler/nir/nir_opt_constant_folding.c31
1 files changed, 28 insertions, 3 deletions
diff --git a/src/compiler/nir/nir_opt_constant_folding.c b/src/compiler/nir/nir_opt_constant_folding.c
index 04876a4..e64ca36 100644
--- a/src/compiler/nir/nir_opt_constant_folding.c
+++ b/src/compiler/nir/nir_opt_constant_folding.c
@@ -46,10 +46,28 @@ constant_fold_alu_instr(nir_alu_instr *instr, void *mem_ctx)
if (!instr->dest.dest.is_ssa)
return false;
+ /* In the case that any outputs/inputs have unsized types, then we need to
+ * guess the bit-size. In this case, the validator ensures that all
+ * bit-sizes match so we can just take the bit-size from first
+ * output/input with an unsized type. If all the outputs/inputs are sized
+ * then we don't need to guess the bit-size at all because the code we
+ * generate for constant opcodes in this case already knows the sizes of
+ * the types involved and does not need the provided bit-size for anything
+ * (although it still requires to receive a valid bit-size).
+ */
+ unsigned bit_size = 0;
+ if (!nir_alu_type_get_type_size(nir_op_infos[instr->op].output_type))
+ bit_size = instr->dest.dest.ssa.bit_size;
+
for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
if (!instr->src[i].src.is_ssa)
return false;
+ if (bit_size == 0 &&
+ !nir_alu_type_get_type_size(nir_op_infos[instr->op].input_sizes[i])) {
+ bit_size = instr->src[i].src.ssa->bit_size;
+ }
+
nir_instr *src_instr = instr->src[i].src.ssa->parent_instr;
if (src_instr->type != nir_instr_type_load_const)
@@ -58,24 +76,31 @@ constant_fold_alu_instr(nir_alu_instr *instr, void *mem_ctx)
for (unsigned j = 0; j < nir_ssa_alu_instr_src_components(instr, i);
j++) {
- src[i].u[j] = load_const->value.u[instr->src[i].swizzle[j]];
+ if (load_const->def.bit_size == 64)
+ src[i].u64[j] = load_const->value.u64[instr->src[i].swizzle[j]];
+ else
+ src[i].u32[j] = load_const->value.u32[instr->src[i].swizzle[j]];
}
/* We shouldn't have any source modifiers in the optimization loop. */
assert(!instr->src[i].abs && !instr->src[i].negate);
}
+ if (bit_size == 0)
+ bit_size = 32;
+
/* We shouldn't have any saturate modifiers in the optimization loop. */
assert(!instr->dest.saturate);
nir_const_value dest =
nir_eval_const_opcode(instr->op, instr->dest.dest.ssa.num_components,
- src);
+ bit_size, src);
nir_load_const_instr *new_instr =
nir_load_const_instr_create(mem_ctx,
instr->dest.dest.ssa.num_components);
+ new_instr->def.bit_size = instr->dest.dest.ssa.bit_size;
new_instr->value = dest;
nir_instr_insert_before(&instr->instr, &new_instr->instr);
@@ -106,7 +131,7 @@ constant_fold_deref(nir_instr *instr, nir_deref_var *deref)
nir_load_const_instr *indirect =
nir_instr_as_load_const(arr->indirect.ssa->parent_instr);
- arr->base_offset += indirect->value.u[0];
+ arr->base_offset += indirect->value.u32[0];
/* Clear out the source */
nir_instr_rewrite_src(instr, &arr->indirect, nir_src_for_ssa(NULL));