diff options
Diffstat (limited to 'src/glsl/nir/nir_intrinsics.h')
-rw-r--r-- | src/glsl/nir/nir_intrinsics.h | 55 |
1 files changed, 53 insertions, 2 deletions
diff --git a/src/glsl/nir/nir_intrinsics.h b/src/glsl/nir/nir_intrinsics.h index 62eead4..3e7cf73 100644 --- a/src/glsl/nir/nir_intrinsics.h +++ b/src/glsl/nir/nir_intrinsics.h @@ -176,6 +176,52 @@ INTRINSIC(image_samples, 0, ARR(), true, 1, 1, 0, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER) /* + * Vulkan descriptor set intrinsic + * + * The Vulkan API uses a different binding model from GL. In the Vulkan + * API, all external resources are represented by a tripple: + * + * (descriptor set, binding, array index) + * + * where the array index is the only thing allowed to be indirect. The + * vulkan_surface_index intrinsic takes the descriptor set and binding as + * its first two indices and the array index as its source. The third + * index is a nir_variable_mode in case that's useful to the backend. + * + * The intended usage is that the shader will call vulkan_surface_index to + * get an index and then pass that as the buffer index ubo/ssbo calls. + */ +INTRINSIC(vulkan_resource_index, 1, ARR(1), true, 1, 0, 3, + NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER) + +/* + * variable atomic intrinsics + * + * All of these variable atomic memory operations read a value from memory, + * compute a new value using one of the operations below, write the new value + * to memory, and return the original value read. + * + * All operations take 1 source except CompSwap that takes 2. These sources + * represent: + * + * 0: The data parameter to the atomic function (i.e. the value to add + * in shared_atomic_add, etc). + * 1: For CompSwap only: the second data parameter. + * + * All operations take 1 variable deref. + */ +INTRINSIC(var_atomic_add, 1, ARR(1), true, 1, 1, 0, 0) +INTRINSIC(var_atomic_imin, 1, ARR(1), true, 1, 1, 0, 0) +INTRINSIC(var_atomic_umin, 1, ARR(1), true, 1, 1, 0, 0) +INTRINSIC(var_atomic_imax, 1, ARR(1), true, 1, 1, 0, 0) +INTRINSIC(var_atomic_umax, 1, ARR(1), true, 1, 1, 0, 0) +INTRINSIC(var_atomic_and, 1, ARR(1), true, 1, 1, 0, 0) +INTRINSIC(var_atomic_or, 1, ARR(1), true, 1, 1, 0, 0) +INTRINSIC(var_atomic_xor, 1, ARR(1), true, 1, 1, 0, 0) +INTRINSIC(var_atomic_exchange, 1, ARR(1), true, 1, 1, 0, 0) +INTRINSIC(var_atomic_comp_swap, 2, ARR(1, 1), true, 1, 1, 0, 0) + +/* * SSBO atomic intrinsics * * All of the SSBO atomic memory operations read a value from memory, @@ -265,6 +311,9 @@ SYSTEM_VALUE(helper_invocation, 1, 0) * of the start of the variable being loaded and and the offset source is a * offset into that variable. * + * Uniform load operations have a second index that specifies the size of the + * variable being loaded. If const_index[1] == 0, then the size is unknown. + * * Some load operations such as UBO/SSBO load and per_vertex loads take an * additional source to specify which UBO/SSBO/vertex to load from. * @@ -277,8 +326,8 @@ SYSTEM_VALUE(helper_invocation, 1, 0) #define LOAD(name, srcs, indices, flags) \ INTRINSIC(load_##name, srcs, ARR(1, 1, 1, 1), true, 0, 0, indices, flags) -/* src[] = { offset }. const_index[] = { base } */ -LOAD(uniform, 1, 1, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER) +/* src[] = { offset }. const_index[] = { base, size } */ +LOAD(uniform, 1, 2, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER) /* src[] = { buffer_index, offset }. No const_index */ LOAD(ubo, 2, 0, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER) /* src[] = { offset }. const_index[] = { base } */ @@ -293,6 +342,8 @@ LOAD(output, 1, 1, NIR_INTRINSIC_CAN_ELIMINATE) LOAD(per_vertex_output, 2, 1, NIR_INTRINSIC_CAN_ELIMINATE) /* src[] = { offset }. const_index[] = { base } */ LOAD(shared, 1, 1, NIR_INTRINSIC_CAN_ELIMINATE) +/* src[] = { offset }. const_index[] = { base, size } */ +LOAD(push_constant, 1, 2, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER) /* * Stores work the same way as loads, except now the first source is the value |