summaryrefslogtreecommitdiffstats
path: root/src/gallium/drivers/vc4/vc4_bufmgr.c
diff options
context:
space:
mode:
authorEric Anholt <eric@anholt.net>2015-05-29 18:06:32 -0700
committerEric Anholt <eric@anholt.net>2015-05-29 18:15:00 -0700
commit21a22a61c02a1d1807ff03df8eb8fa16ebdd1b74 (patch)
tree861543ba1ed72db41976e513ab8cf353ea1d61d9 /src/gallium/drivers/vc4/vc4_bufmgr.c
parentc821ccf0e3a051e5e867792898ae9b8f08e4601a (diff)
downloadexternal_mesa3d-21a22a61c02a1d1807ff03df8eb8fa16ebdd1b74.zip
external_mesa3d-21a22a61c02a1d1807ff03df8eb8fa16ebdd1b74.tar.gz
external_mesa3d-21a22a61c02a1d1807ff03df8eb8fa16ebdd1b74.tar.bz2
vc4: Make sure we allocate idle BOs from the cache.
We were returning the most recently freed BO, without checking if it was idle yet. This meant that we generally stalled immediately on the previous frame when generating a new one. Instead, allocate new BOs when the *oldest* BO is still busy, so that the cache scales with how much is needed to keep some frames outstanding, as originally intended. Note that if you don't have some throttling happening, this means that you can accidentally run the system out of memory. The kernel is now applying some throttling on all execs, to hopefully avoid this.
Diffstat (limited to 'src/gallium/drivers/vc4/vc4_bufmgr.c')
-rw-r--r--src/gallium/drivers/vc4/vc4_bufmgr.c12
1 files changed, 11 insertions, 1 deletions
diff --git a/src/gallium/drivers/vc4/vc4_bufmgr.c b/src/gallium/drivers/vc4/vc4_bufmgr.c
index 8f9d9c3..8d97670 100644
--- a/src/gallium/drivers/vc4/vc4_bufmgr.c
+++ b/src/gallium/drivers/vc4/vc4_bufmgr.c
@@ -49,8 +49,18 @@ vc4_bo_from_cache(struct vc4_screen *screen, uint32_t size, const char *name)
struct vc4_bo *bo = NULL;
pipe_mutex_lock(cache->lock);
if (!is_empty_list(&cache->size_list[page_index])) {
- struct simple_node *node = last_elem(&cache->size_list[page_index]);
+ struct simple_node *node = first_elem(&cache->size_list[page_index]);
bo = container_of(node, struct vc4_bo, size_list);
+
+ /* Check that the BO has gone idle. If not, then we want to
+ * allocate something new instead, since we assume that the
+ * user will proceed to CPU map it and fill it with stuff.
+ */
+ if (!vc4_bo_wait(bo, 0)) {
+ pipe_mutex_unlock(cache->lock);
+ return NULL;
+ }
+
pipe_reference_init(&bo->reference, 1);
remove_from_list(&bo->time_list);
remove_from_list(&bo->size_list);