aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2012-12-02 14:49:44 +0100
committerDaniel Vetter <daniel.vetter@ffwll.ch>2013-01-20 15:57:55 +0100
commit59ad1465423d968f06f243bc52cc3b1a320a27cf (patch)
treee565409bc6d08385bda2e6208f9781396bdf509b
parent7147573a5ce499dec3979e6b524691d47e1288d5 (diff)
downloadkernel_goldelico_gta04-59ad1465423d968f06f243bc52cc3b1a320a27cf.zip
kernel_goldelico_gta04-59ad1465423d968f06f243bc52cc3b1a320a27cf.tar.gz
kernel_goldelico_gta04-59ad1465423d968f06f243bc52cc3b1a320a27cf.tar.bz2
drm/nouveau: protect evo_wait/evo_kick sections with a channel mutex
With per-crtc locks modeset operations can run in parallel, and the cursor code uses the device-global evo master channel for hw frobbing. But the pageflip code can also sync with the master under some circumstances. Hence just wrap things up in a mutex to ensure that pushbuf access doesn't intermingle. The approach here is a bit overkill since the per-crtc channels used to schedule the pageflips could probably be used without this pushbuf locking, but I'm not familiar enough with the nouveau codebase to be sure of that. v2: Add missing mutex_init to avoid angering lockdep. Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c10
1 files changed, 10 insertions, 0 deletions
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 3587408..d4cbea1 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -128,6 +128,11 @@ struct nv50_dmac {
struct nv50_chan base;
dma_addr_t handle;
u32 *ptr;
+
+ /* Protects against concurrent pushbuf access to this channel, lock is
+ * grabbed by evo_wait (if the pushbuf reservation is successful) and
+ * dropped again by evo_kick. */
+ struct mutex lock;
};
static void
@@ -271,6 +276,8 @@ nv50_dmac_create(struct nouveau_object *core, u32 bclass, u8 head,
u32 pushbuf = *(u32 *)data;
int ret;
+ mutex_init(&dmac->lock);
+
dmac->ptr = pci_alloc_consistent(nv_device(core)->pdev, PAGE_SIZE,
&dmac->handle);
if (!dmac->ptr)
@@ -395,11 +402,13 @@ evo_wait(void *evoc, int nr)
struct nv50_dmac *dmac = evoc;
u32 put = nv_ro32(dmac->base.user, 0x0000) / 4;
+ mutex_lock(&dmac->lock);
if (put + nr >= (PAGE_SIZE / 4) - 8) {
dmac->ptr[put] = 0x20000000;
nv_wo32(dmac->base.user, 0x0000, 0x00000000);
if (!nv_wait(dmac->base.user, 0x0004, ~0, 0x00000000)) {
+ mutex_unlock(&dmac->lock);
NV_ERROR(dmac->base.user, "channel stalled\n");
return NULL;
}
@@ -415,6 +424,7 @@ evo_kick(u32 *push, void *evoc)
{
struct nv50_dmac *dmac = evoc;
nv_wo32(dmac->base.user, 0x0000, (push - dmac->ptr) << 2);
+ mutex_unlock(&dmac->lock);
}
#define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m))