summaryrefslogtreecommitdiffstats
path: root/src/intel/vulkan/anv_pipeline_cache.c
diff options
context:
space:
mode:
authorKristian Høgsberg Kristensen <kristian.h.kristensen@intel.com>2016-03-04 12:21:43 -0800
committerKristian Høgsberg Kristensen <kristian.h.kristensen@intel.com>2016-03-05 13:50:07 -0800
commit07441c344c845bd663398529dbf484759d09cd54 (patch)
tree08b88f79614ecf00cb54cbecf6509e63815a036f /src/intel/vulkan/anv_pipeline_cache.c
parent87967a2c854c200ba8a7cabe1fe3f7e19291f187 (diff)
downloadexternal_mesa3d-07441c344c845bd663398529dbf484759d09cd54.zip
external_mesa3d-07441c344c845bd663398529dbf484759d09cd54.tar.gz
external_mesa3d-07441c344c845bd663398529dbf484759d09cd54.tar.bz2
anv: Rename anv_pipeline_cache_add_entry() to 'set'
This function is a helper that unconditionally sets a hash table entry and expects the cache to have enough room. Calling it 'add_entry' suggests it will grow the cache as needed.
Diffstat (limited to 'src/intel/vulkan/anv_pipeline_cache.c')
-rw-r--r--src/intel/vulkan/anv_pipeline_cache.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/src/intel/vulkan/anv_pipeline_cache.c b/src/intel/vulkan/anv_pipeline_cache.c
index 024fdf7..0b26052 100644
--- a/src/intel/vulkan/anv_pipeline_cache.c
+++ b/src/intel/vulkan/anv_pipeline_cache.c
@@ -140,7 +140,7 @@ anv_pipeline_cache_search(struct anv_pipeline_cache *cache,
}
static void
-anv_pipeline_cache_add_entry(struct anv_pipeline_cache *cache,
+anv_pipeline_cache_set_entry(struct anv_pipeline_cache *cache,
struct cache_entry *entry, uint32_t entry_offset)
{
const uint32_t mask = cache->table_size - 1;
@@ -187,7 +187,7 @@ anv_pipeline_cache_grow(struct anv_pipeline_cache *cache)
struct cache_entry *entry =
cache->program_stream.block_pool->map + offset;
- anv_pipeline_cache_add_entry(cache, entry, offset);
+ anv_pipeline_cache_set_entry(cache, entry, offset);
}
free(old_table);
@@ -231,7 +231,7 @@ anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache *cache,
* have enough space to add this new kernel. Only add it if there's room.
*/
if (cache->kernel_count < cache->table_size / 2)
- anv_pipeline_cache_add_entry(cache, entry, state.offset);
+ anv_pipeline_cache_set_entry(cache, entry, state.offset);
}
pthread_mutex_unlock(&cache->mutex);