diff options
author | Ben Skeggs <bskeggs@redhat.com> | 2012-05-01 02:46:28 -0400 |
---|---|---|
committer | Ben Skeggs <bskeggs@redhat.com> | 2012-05-24 02:56:08 -0400 |
commit | 5511d490da02b73c4c2144c7a2d368634caca67f (patch) | |
tree | 2883fdaf570e8741510a8d4214cbe0e5f2d264ef | |
parent | 7f2062e9de357e4158645b72b472ccba229cb3aa (diff) |
drm/nv50: remove manual context unload on context destruction
PFIFO context destruction triggers this automagically now.
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
-rw-r--r-- | drivers/gpu/drm/nouveau/nv50_graph.c | 59 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv50_mpeg.c | 18 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nva3_copy.c | 24 |
3 files changed, 7 insertions, 94 deletions
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c index 6fe5962c4393..69be4d190778 100644 --- a/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/drivers/gpu/drm/nouveau/nv50_graph.c | |||
@@ -39,45 +39,6 @@ struct nv50_graph_engine { | |||
39 | u32 grctx_size; | 39 | u32 grctx_size; |
40 | }; | 40 | }; |
41 | 41 | ||
42 | static void | ||
43 | nv50_graph_fifo_access(struct drm_device *dev, bool enabled) | ||
44 | { | ||
45 | const uint32_t mask = 0x00010001; | ||
46 | |||
47 | if (enabled) | ||
48 | nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | mask); | ||
49 | else | ||
50 | nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) & ~mask); | ||
51 | } | ||
52 | |||
53 | static struct nouveau_channel * | ||
54 | nv50_graph_channel(struct drm_device *dev) | ||
55 | { | ||
56 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
57 | uint32_t inst; | ||
58 | int i; | ||
59 | |||
60 | /* Be sure we're not in the middle of a context switch or bad things | ||
61 | * will happen, such as unloading the wrong pgraph context. | ||
62 | */ | ||
63 | if (!nv_wait(dev, 0x400300, 0x00000001, 0x00000000)) | ||
64 | NV_ERROR(dev, "Ctxprog is still running\n"); | ||
65 | |||
66 | inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR); | ||
67 | if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED)) | ||
68 | return NULL; | ||
69 | inst = (inst & NV50_PGRAPH_CTXCTL_CUR_INSTANCE) << 12; | ||
70 | |||
71 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { | ||
72 | struct nouveau_channel *chan = dev_priv->channels.ptr[i]; | ||
73 | |||
74 | if (chan && chan->ramin && chan->ramin->vinst == inst) | ||
75 | return chan; | ||
76 | } | ||
77 | |||
78 | return NULL; | ||
79 | } | ||
80 | |||
81 | static int | 42 | static int |
82 | nv50_graph_do_load_context(struct drm_device *dev, uint32_t inst) | 43 | nv50_graph_do_load_context(struct drm_device *dev, uint32_t inst) |
83 | { | 44 | { |
@@ -257,31 +218,13 @@ nv50_graph_context_del(struct nouveau_channel *chan, int engine) | |||
257 | struct drm_device *dev = chan->dev; | 218 | struct drm_device *dev = chan->dev; |
258 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 219 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
259 | int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20; | 220 | int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20; |
260 | unsigned long flags; | ||
261 | |||
262 | NV_DEBUG(dev, "ch%d\n", chan->id); | ||
263 | |||
264 | if (!chan->ramin) | ||
265 | return; | ||
266 | |||
267 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | ||
268 | nv_wr32(dev, NV03_PFIFO_CACHES, 0); | ||
269 | nv50_graph_fifo_access(dev, false); | ||
270 | |||
271 | if (nv50_graph_channel(dev) == chan) | ||
272 | nv50_graph_unload_context(dev); | ||
273 | 221 | ||
274 | for (i = hdr; i < hdr + 24; i += 4) | 222 | for (i = hdr; i < hdr + 24; i += 4) |
275 | nv_wo32(chan->ramin, i, 0); | 223 | nv_wo32(chan->ramin, i, 0); |
276 | dev_priv->engine.instmem.flush(dev); | 224 | dev_priv->engine.instmem.flush(dev); |
277 | 225 | ||
278 | nv50_graph_fifo_access(dev, true); | ||
279 | nv_wr32(dev, NV03_PFIFO_CACHES, 1); | ||
280 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | ||
281 | |||
282 | nouveau_gpuobj_ref(NULL, &grctx); | ||
283 | |||
284 | atomic_dec(&chan->vm->engref[engine]); | 226 | atomic_dec(&chan->vm->engref[engine]); |
227 | nouveau_gpuobj_ref(NULL, &grctx); | ||
285 | chan->engctx[engine] = NULL; | 228 | chan->engctx[engine] = NULL; |
286 | } | 229 | } |
287 | 230 | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_mpeg.c b/drivers/gpu/drm/nouveau/nv50_mpeg.c index 95c3305b54c6..90e8ed22cfcb 100644 --- a/drivers/gpu/drm/nouveau/nv50_mpeg.c +++ b/drivers/gpu/drm/nouveau/nv50_mpeg.c | |||
@@ -77,27 +77,13 @@ nv50_mpeg_context_new(struct nouveau_channel *chan, int engine) | |||
77 | static void | 77 | static void |
78 | nv50_mpeg_context_del(struct nouveau_channel *chan, int engine) | 78 | nv50_mpeg_context_del(struct nouveau_channel *chan, int engine) |
79 | { | 79 | { |
80 | struct drm_nouveau_private *dev_priv = chan->dev->dev_private; | ||
81 | struct nouveau_gpuobj *ctx = chan->engctx[engine]; | 80 | struct nouveau_gpuobj *ctx = chan->engctx[engine]; |
82 | struct drm_device *dev = chan->dev; | 81 | struct drm_device *dev = chan->dev; |
83 | unsigned long flags; | 82 | int i; |
84 | u32 inst, i; | ||
85 | |||
86 | if (!chan->ramin) | ||
87 | return; | ||
88 | |||
89 | inst = chan->ramin->vinst >> 12; | ||
90 | inst |= 0x80000000; | ||
91 | |||
92 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | ||
93 | nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000); | ||
94 | if (nv_rd32(dev, 0x00b318) == inst) | ||
95 | nv_mask(dev, 0x00b318, 0x80000000, 0x00000000); | ||
96 | nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001); | ||
97 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | ||
98 | 83 | ||
99 | for (i = 0x00; i <= 0x14; i += 4) | 84 | for (i = 0x00; i <= 0x14; i += 4) |
100 | nv_wo32(chan->ramin, CTX_PTR(dev, i), 0x00000000); | 85 | nv_wo32(chan->ramin, CTX_PTR(dev, i), 0x00000000); |
86 | |||
101 | nouveau_gpuobj_ref(NULL, &ctx); | 87 | nouveau_gpuobj_ref(NULL, &ctx); |
102 | chan->engctx[engine] = NULL; | 88 | chan->engctx[engine] = NULL; |
103 | } | 89 | } |
diff --git a/drivers/gpu/drm/nouveau/nva3_copy.c b/drivers/gpu/drm/nouveau/nva3_copy.c index b63ef87173a6..0387dc7f4f42 100644 --- a/drivers/gpu/drm/nouveau/nva3_copy.c +++ b/drivers/gpu/drm/nouveau/nva3_copy.c | |||
@@ -79,29 +79,13 @@ static void | |||
79 | nva3_copy_context_del(struct nouveau_channel *chan, int engine) | 79 | nva3_copy_context_del(struct nouveau_channel *chan, int engine) |
80 | { | 80 | { |
81 | struct nouveau_gpuobj *ctx = chan->engctx[engine]; | 81 | struct nouveau_gpuobj *ctx = chan->engctx[engine]; |
82 | struct drm_device *dev = chan->dev; | 82 | int i; |
83 | u32 inst; | ||
84 | |||
85 | inst = (chan->ramin->vinst >> 12); | ||
86 | inst |= 0x40000000; | ||
87 | |||
88 | /* disable fifo access */ | ||
89 | nv_wr32(dev, 0x104048, 0x00000000); | ||
90 | /* mark channel as unloaded if it's currently active */ | ||
91 | if (nv_rd32(dev, 0x104050) == inst) | ||
92 | nv_mask(dev, 0x104050, 0x40000000, 0x00000000); | ||
93 | /* mark next channel as invalid if it's about to be loaded */ | ||
94 | if (nv_rd32(dev, 0x104054) == inst) | ||
95 | nv_mask(dev, 0x104054, 0x40000000, 0x00000000); | ||
96 | /* restore fifo access */ | ||
97 | nv_wr32(dev, 0x104048, 0x00000003); | ||
98 | |||
99 | for (inst = 0xc0; inst <= 0xd4; inst += 4) | ||
100 | nv_wo32(chan->ramin, inst, 0x00000000); | ||
101 | 83 | ||
102 | nouveau_gpuobj_ref(NULL, &ctx); | 84 | for (i = 0xc0; i <= 0xd4; i += 4) |
85 | nv_wo32(chan->ramin, i, 0x00000000); | ||
103 | 86 | ||
104 | atomic_dec(&chan->vm->engref[engine]); | 87 | atomic_dec(&chan->vm->engref[engine]); |
88 | nouveau_gpuobj_ref(NULL, &ctx); | ||
105 | chan->engctx[engine] = ctx; | 89 | chan->engctx[engine] = ctx; |
106 | } | 90 | } |
107 | 91 | ||