diff options
author | Ben Skeggs <bskeggs@redhat.com> | 2011-04-01 11:33:21 +1000 |
---|---|---|
committer | Ben Skeggs <bskeggs@redhat.com> | 2011-05-16 10:48:14 +1000 |
commit | 39c8d368273bca9b5f309f9feadfc8575c9fd993 (patch) | |
tree | 104e1d8be0189d3c3f21206d7c97ab3d9ca93f54 /drivers/gpu/drm/nouveau | |
parent | 7a45cd19c95a383d81a7b2f5297958c0c16b5a08 (diff) | |
download | kernel_samsung_smdk4412-39c8d368273bca9b5f309f9feadfc8575c9fd993.zip kernel_samsung_smdk4412-39c8d368273bca9b5f309f9feadfc8575c9fd993.tar.gz kernel_samsung_smdk4412-39c8d368273bca9b5f309f9feadfc8575c9fd993.tar.bz2 |
drm/nv40/gr: move to exec engine interfaces
Like nv50, this needs a good cleanup.
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau')
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_drv.h | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_state.c | 25 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv40_graph.c | 295 |
3 files changed, 158 insertions, 171 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 139864f..a20e49d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -1177,14 +1177,7 @@ extern int nv30_graph_init(struct drm_device *); extern void nv20_graph_set_tile_region(struct drm_device *dev, int i); /* nv40_graph.c */ -extern int nv40_graph_init(struct drm_device *); -extern void nv40_graph_takedown(struct drm_device *); -extern struct nouveau_channel *nv40_graph_channel(struct drm_device *); -extern int nv40_graph_create_context(struct nouveau_channel *); -extern void nv40_graph_destroy_context(struct nouveau_channel *); -extern int nv40_graph_load_context(struct nouveau_channel *); -extern int nv40_graph_unload_context(struct drm_device *); -extern int nv40_graph_object_new(struct nouveau_channel *, u32, u16); +extern int nv40_graph_create(struct drm_device *); extern void nv40_grctx_init(struct nouveau_grctx *); extern void nv40_graph_set_tile_region(struct drm_device *dev, int i); diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 958f312..d8852ed 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -299,15 +299,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->fb.init_tile_region = nv30_fb_init_tile_region; engine->fb.set_tile_region = nv40_fb_set_tile_region; engine->fb.free_tile_region = nv30_fb_free_tile_region; - engine->graph.init = nv40_graph_init; - engine->graph.takedown = nv40_graph_takedown; - engine->graph.fifo_access = nv04_graph_fifo_access; - engine->graph.channel = nv40_graph_channel; - engine->graph.create_context = nv40_graph_create_context; - engine->graph.destroy_context = nv40_graph_destroy_context; - engine->graph.load_context = nv40_graph_load_context; - engine->graph.unload_context = nv40_graph_unload_context; - engine->graph.object_new = nv40_graph_object_new; + engine->graph.init = nouveau_stub_init; + engine->graph.takedown = nouveau_stub_takedown; + engine->graph.fifo_access = nvc0_graph_fifo_access; + engine->graph.channel = nvc0_graph_channel; engine->graph.set_tile_region = nv40_graph_set_tile_region; engine->fifo.channels = 32; engine->fifo.init = nv40_fifo_init; @@ -618,11 +613,17 @@ nouveau_card_init(struct drm_device *dev) if (ret) goto out_timer; - if (dev_priv->card_type == NV_50) + switch (dev_priv->card_type) { + case NV_40: + nv40_graph_create(dev); + break; + case NV_50: nv50_graph_create(dev); - else - if (dev_priv->card_type == NV_C0) + break; + case NV_C0: nvc0_graph_create(dev); + break; + } switch (dev_priv->chipset) { case 0x84: diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c index 2952daf..f0c6a64 100644 --- a/drivers/gpu/drm/nouveau/nv40_graph.c +++ b/drivers/gpu/drm/nouveau/nv40_graph.c @@ -30,13 +30,16 @@ #include "nouveau_grctx.h" #include "nouveau_ramht.h" -static int nv40_graph_register(struct drm_device *); -static void nv40_graph_isr(struct drm_device *); +struct nv40_graph_engine { + struct nouveau_exec_engine base; + u32 grctx_size; +}; -struct nouveau_channel * +static struct nouveau_channel * nv40_graph_channel(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *grctx; uint32_t inst; int i; @@ -46,74 +49,17 @@ nv40_graph_channel(struct drm_device *dev) inst = (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) << 4; for (i = 0; i < dev_priv->engine.fifo.channels; i++) { - struct nouveau_channel *chan = dev_priv->channels.ptr[i]; + if (!dev_priv->channels.ptr[i]) + continue; - if (chan && chan->ramin_grctx && - chan->ramin_grctx->pinst == inst) - return chan; + grctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_GR]; + if (grctx && grctx->pinst == inst) + return dev_priv->channels.ptr[i]; } return NULL; } -int -nv40_graph_create_context(struct nouveau_channel *chan) -{ - struct drm_device *dev = chan->dev; - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; - struct nouveau_grctx ctx = {}; - unsigned long flags; - int ret; - - ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 16, - NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin_grctx); - if (ret) - return ret; - - /* Initialise default context values */ - ctx.dev = chan->dev; - ctx.mode = NOUVEAU_GRCTX_VALS; - ctx.data = chan->ramin_grctx; - nv40_grctx_init(&ctx); - - nv_wo32(chan->ramin_grctx, 0, chan->ramin_grctx->pinst); - - /* init grctx pointer in ramfc, and on PFIFO if channel is - * already active there - */ - spin_lock_irqsave(&dev_priv->context_switch_lock, flags); - nv_wo32(chan->ramfc, 0x38, chan->ramin_grctx->pinst >> 4); - nv_mask(dev, 0x002500, 0x00000001, 0x00000000); - if ((nv_rd32(dev, 0x003204) & 0x0000001f) == chan->id) - nv_wr32(dev, 0x0032e0, chan->ramin_grctx->pinst >> 4); - nv_mask(dev, 0x002500, 0x00000001, 0x00000001); - spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); - return 0; -} - -void -nv40_graph_destroy_context(struct nouveau_channel *chan) -{ - struct drm_device *dev = chan->dev; - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; - unsigned long flags; - - spin_lock_irqsave(&dev_priv->context_switch_lock, flags); - pgraph->fifo_access(dev, false); - - /* Unload the context if it's the currently active one */ - if (pgraph->channel(dev) == chan) - pgraph->unload_context(dev); - - pgraph->fifo_access(dev, true); - spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); - - /* Free the context resources */ - nouveau_gpuobj_ref(NULL, &chan->ramin_grctx); -} - static int nv40_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save) { @@ -155,58 +101,88 @@ nv40_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save) return 0; } -/* Restore the context for a specific channel into PGRAPH */ -int -nv40_graph_load_context(struct nouveau_channel *chan) +static int +nv40_graph_unload_context(struct drm_device *dev) { - struct drm_device *dev = chan->dev; uint32_t inst; int ret; - if (!chan->ramin_grctx) - return -EINVAL; - inst = chan->ramin_grctx->pinst >> 4; + inst = nv_rd32(dev, NV40_PGRAPH_CTXCTL_CUR); + if (!(inst & NV40_PGRAPH_CTXCTL_CUR_LOADED)) + return 0; + inst &= NV40_PGRAPH_CTXCTL_CUR_INSTANCE; + + ret = nv40_graph_transfer_context(dev, inst, 1); + + nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, inst); + return ret; +} - ret = nv40_graph_transfer_context(dev, inst, 0); +static int +nv40_graph_context_new(struct nouveau_channel *chan, int engine) +{ + struct nv40_graph_engine *pgraph = nv_engine(chan->dev, engine); + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *grctx = NULL; + struct nouveau_grctx ctx = {}; + unsigned long flags; + int ret; + + ret = nouveau_gpuobj_new(dev, NULL, pgraph->grctx_size, 16, + NVOBJ_FLAG_ZERO_ALLOC, &grctx); if (ret) return ret; - /* 0x40032C, no idea of it's exact function. Could simply be a - * record of the currently active PGRAPH context. It's currently - * unknown as to what bit 24 does. The nv ddx has it set, so we will - * set it here too. - */ - nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); - nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, - (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) | - NV40_PGRAPH_CTXCTL_CUR_LOADED); - /* 0x32E0 records the instance address of the active FIFO's PGRAPH - * context. If at any time this doesn't match 0x40032C, you will - * receive PGRAPH_INTR_CONTEXT_SWITCH + /* Initialise default context values */ + ctx.dev = chan->dev; + ctx.mode = NOUVEAU_GRCTX_VALS; + ctx.data = grctx; + nv40_grctx_init(&ctx); + + nv_wo32(grctx, 0, grctx->vinst); + + /* init grctx pointer in ramfc, and on PFIFO if channel is + * already active there */ - nv_wr32(dev, NV40_PFIFO_GRCTX_INSTANCE, inst); + spin_lock_irqsave(&dev_priv->context_switch_lock, flags); + nv_wo32(chan->ramfc, 0x38, grctx->vinst >> 4); + nv_mask(dev, 0x002500, 0x00000001, 0x00000000); + if ((nv_rd32(dev, 0x003204) & 0x0000001f) == chan->id) + nv_wr32(dev, 0x0032e0, grctx->vinst >> 4); + nv_mask(dev, 0x002500, 0x00000001, 0x00000001); + spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); + + chan->engctx[engine] = grctx; return 0; } -int -nv40_graph_unload_context(struct drm_device *dev) +static void +nv40_graph_context_del(struct nouveau_channel *chan, int engine) { - uint32_t inst; - int ret; + struct nouveau_gpuobj *grctx = chan->engctx[engine]; + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + unsigned long flags; - inst = nv_rd32(dev, NV40_PGRAPH_CTXCTL_CUR); - if (!(inst & NV40_PGRAPH_CTXCTL_CUR_LOADED)) - return 0; - inst &= NV40_PGRAPH_CTXCTL_CUR_INSTANCE; + spin_lock_irqsave(&dev_priv->context_switch_lock, flags); + nv04_graph_fifo_access(dev, false); - ret = nv40_graph_transfer_context(dev, inst, 1); + /* Unload the context if it's the currently active one */ + if (nv40_graph_channel(dev) == chan) + nv40_graph_unload_context(dev); - nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, inst); - return ret; + nv04_graph_fifo_access(dev, true); + spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); + + /* Free the context resources */ + nouveau_gpuobj_ref(NULL, &grctx); + chan->engctx[engine] = NULL; } int -nv40_graph_object_new(struct nouveau_channel *chan, u32 handle, u16 class) +nv40_graph_object_new(struct nouveau_channel *chan, int engine, + u32 handle, u16 class) { struct drm_device *dev = chan->dev; struct nouveau_gpuobj *obj = NULL; @@ -284,14 +260,14 @@ nv40_graph_set_tile_region(struct drm_device *dev, int i) * C51 0x4e */ int -nv40_graph_init(struct drm_device *dev) +nv40_graph_init(struct drm_device *dev, int engine) { - struct drm_nouveau_private *dev_priv = - (struct drm_nouveau_private *)dev->dev_private; + struct nv40_graph_engine *pgraph = nv_engine(dev, engine); + struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; struct nouveau_grctx ctx = {}; uint32_t vramsz, *cp; - int ret, i, j; + int i, j; nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH); @@ -307,7 +283,7 @@ nv40_graph_init(struct drm_device *dev) ctx.data = cp; ctx.ctxprog_max = 256; nv40_grctx_init(&ctx); - dev_priv->engine.graph.grctx_size = ctx.ctxvals_pos * 4; + pgraph->grctx_size = ctx.ctxvals_pos * 4; nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); for (i = 0; i < ctx.ctxprog_len; i++) @@ -315,14 +291,9 @@ nv40_graph_init(struct drm_device *dev) kfree(cp); - ret = nv40_graph_register(dev); - if (ret) - return ret; - /* No context present currently */ nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000); - nouveau_irq_register(dev, 12, nv40_graph_isr); nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF); @@ -455,47 +426,10 @@ nv40_graph_init(struct drm_device *dev) return 0; } -void nv40_graph_takedown(struct drm_device *dev) -{ - nouveau_irq_unregister(dev, 12); -} - static int -nv40_graph_register(struct drm_device *dev) +nv40_graph_fini(struct drm_device *dev, int engine) { - struct drm_nouveau_private *dev_priv = dev->dev_private; - - if (dev_priv->engine.graph.registered) - return 0; - - NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */ - NVOBJ_CLASS(dev, 0x0030, GR); /* null */ - NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */ - NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */ - NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */ - NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */ - NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */ - NVOBJ_CLASS(dev, 0x3089, GR); /* sifm (nv40) */ - NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */ - NVOBJ_CLASS(dev, 0x3062, GR); /* surf2d (nv40) */ - NVOBJ_CLASS(dev, 0x0043, GR); /* rop */ - NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */ - NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */ - NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */ - NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */ - NVOBJ_CLASS(dev, 0x309e, GR); /* swzsurf */ - - /* curie */ - if (nv44_graph_class(dev)) - NVOBJ_CLASS(dev, 0x4497, GR); - else - NVOBJ_CLASS(dev, 0x4097, GR); - - /* nvsw */ - NVOBJ_CLASS(dev, 0x506e, SW); - NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip); - - dev_priv->engine.graph.registered = true; + nv40_graph_unload_context(dev); return 0; } @@ -503,17 +437,17 @@ static int nv40_graph_isr_chid(struct drm_device *dev, u32 inst) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_channel *chan; + struct nouveau_gpuobj *grctx; unsigned long flags; int i; spin_lock_irqsave(&dev_priv->channels.lock, flags); for (i = 0; i < dev_priv->engine.fifo.channels; i++) { - chan = dev_priv->channels.ptr[i]; - if (!chan || !chan->ramin_grctx) + if (!dev_priv->channels.ptr[i]) continue; + grctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_GR]; - if (inst == chan->ramin_grctx->pinst) + if (grctx && grctx->pinst == inst) break; } spin_unlock_irqrestore(&dev_priv->channels.lock, flags); @@ -564,3 +498,62 @@ nv40_graph_isr(struct drm_device *dev) } } } + +static void +nv40_graph_destroy(struct drm_device *dev, int engine) +{ + struct nv40_graph_engine *pgraph = nv_engine(dev, engine); + + nouveau_irq_unregister(dev, 12); + + NVOBJ_ENGINE_DEL(dev, GR); + kfree(pgraph); +} + +int +nv40_graph_create(struct drm_device *dev) +{ + struct nv40_graph_engine *pgraph; + + pgraph = kzalloc(sizeof(*pgraph), GFP_KERNEL); + if (!pgraph) + return -ENOMEM; + + pgraph->base.destroy = nv40_graph_destroy; + pgraph->base.init = nv40_graph_init; + pgraph->base.fini = nv40_graph_fini; + pgraph->base.context_new = nv40_graph_context_new; + pgraph->base.context_del = nv40_graph_context_del; + pgraph->base.object_new = nv40_graph_object_new; + + NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base); + nouveau_irq_register(dev, 12, nv40_graph_isr); + + NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */ + NVOBJ_CLASS(dev, 0x0030, GR); /* null */ + NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */ + NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */ + NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */ + NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */ + NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */ + NVOBJ_CLASS(dev, 0x3089, GR); /* sifm (nv40) */ + NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */ + NVOBJ_CLASS(dev, 0x3062, GR); /* surf2d (nv40) */ + NVOBJ_CLASS(dev, 0x0043, GR); /* rop */ + NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */ + NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */ + NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */ + NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */ + NVOBJ_CLASS(dev, 0x309e, GR); /* swzsurf */ + + /* curie */ + if (nv44_graph_class(dev)) + NVOBJ_CLASS(dev, 0x4497, GR); + else + NVOBJ_CLASS(dev, 0x4097, GR); + + /* nvsw */ + NVOBJ_CLASS(dev, 0x506e, SW); + NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip); + return 0; +} |