aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nvc0_fifo.c
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2012-05-01 06:48:08 -0400
committerBen Skeggs <bskeggs@redhat.com>2012-05-24 02:56:11 -0400
commitc420b2dc8dc3cdd507214f4df5c5f96f08812cbe (patch)
tree6dca9f0aba3de22a2bda5fe647d6945d4f4e986e /drivers/gpu/drm/nouveau/nvc0_fifo.c
parenta226c32a386bca0426e500954b79e3fd46afc0d9 (diff)
drm/nouveau/fifo: turn all fifo modules into engine modules
Been tested on each major revision that's relevant here, but I'm sure there are still bugs waiting to be ironed out. This is a *very* invasive change. There's a couple of pieces left that I don't like much (eg. other engines using fifo_priv for the channel count), but that's an artefact of there being a master channel list still. This is changing, slowly. Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nvc0_fifo.c')
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fifo.c281
1 files changed, 125 insertions, 156 deletions
diff --git a/drivers/gpu/drm/nouveau/nvc0_fifo.c b/drivers/gpu/drm/nouveau/nvc0_fifo.c
index 471723eaf8ad..7d85553d518c 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fifo.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fifo.c
@@ -26,10 +26,12 @@
26 26
27#include "nouveau_drv.h" 27#include "nouveau_drv.h"
28#include "nouveau_mm.h" 28#include "nouveau_mm.h"
29#include "nouveau_fifo.h"
29 30
30static void nvc0_fifo_isr(struct drm_device *); 31static void nvc0_fifo_isr(struct drm_device *);
31 32
32struct nvc0_fifo_priv { 33struct nvc0_fifo_priv {
34 struct nouveau_fifo_priv base;
33 struct nouveau_gpuobj *playlist[2]; 35 struct nouveau_gpuobj *playlist[2];
34 int cur_playlist; 36 int cur_playlist;
35 struct nouveau_vma user_vma; 37 struct nouveau_vma user_vma;
@@ -37,8 +39,8 @@ struct nvc0_fifo_priv {
37}; 39};
38 40
39struct nvc0_fifo_chan { 41struct nvc0_fifo_chan {
42 struct nouveau_fifo_chan base;
40 struct nouveau_gpuobj *user; 43 struct nouveau_gpuobj *user;
41 struct nouveau_gpuobj *ramfc;
42}; 44};
43 45
44static void 46static void
@@ -46,8 +48,7 @@ nvc0_fifo_playlist_update(struct drm_device *dev)
46{ 48{
47 struct drm_nouveau_private *dev_priv = dev->dev_private; 49 struct drm_nouveau_private *dev_priv = dev->dev_private;
48 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; 50 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
49 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 51 struct nvc0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
50 struct nvc0_fifo_priv *priv = pfifo->priv;
51 struct nouveau_gpuobj *cur; 52 struct nouveau_gpuobj *cur;
52 int i, p; 53 int i, p;
53 54
@@ -69,31 +70,20 @@ nvc0_fifo_playlist_update(struct drm_device *dev)
69 NV_ERROR(dev, "PFIFO - playlist update failed\n"); 70 NV_ERROR(dev, "PFIFO - playlist update failed\n");
70} 71}
71 72
72int 73static int
73nvc0_fifo_create_context(struct nouveau_channel *chan) 74nvc0_fifo_context_new(struct nouveau_channel *chan, int engine)
74{ 75{
75 struct drm_device *dev = chan->dev; 76 struct drm_device *dev = chan->dev;
76 struct drm_nouveau_private *dev_priv = dev->dev_private; 77 struct drm_nouveau_private *dev_priv = dev->dev_private;
77 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; 78 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
78 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 79 struct nvc0_fifo_priv *priv = nv_engine(dev, engine);
79 struct nvc0_fifo_priv *priv = pfifo->priv; 80 struct nvc0_fifo_chan *fctx;
80 struct nvc0_fifo_chan *fifoch;
81 u64 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4; 81 u64 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4;
82 int ret; 82 int ret, i;
83 83
84 chan->fifo_priv = kzalloc(sizeof(*fifoch), GFP_KERNEL); 84 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
85 if (!chan->fifo_priv) 85 if (!fctx)
86 return -ENOMEM; 86 return -ENOMEM;
87 fifoch = chan->fifo_priv;
88
89 /* allocate vram for control regs, map into polling area */
90 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000,
91 NVOBJ_FLAG_ZERO_ALLOC, &fifoch->user);
92 if (ret)
93 goto error;
94
95 nouveau_vm_map_at(&priv->user_vma, chan->id * 0x1000,
96 *(struct nouveau_mem **)fifoch->user->node);
97 87
98 chan->user = ioremap_wc(pci_resource_start(dev->pdev, 1) + 88 chan->user = ioremap_wc(pci_resource_start(dev->pdev, 1) +
99 priv->user_vma.offset + (chan->id * 0x1000), 89 priv->user_vma.offset + (chan->id * 0x1000),
@@ -103,175 +93,77 @@ nvc0_fifo_create_context(struct nouveau_channel *chan)
103 goto error; 93 goto error;
104 } 94 }
105 95
106 /* ramfc */ 96 /* allocate vram for control regs, map into polling area */
107 ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst, 97 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000,
108 chan->ramin->vinst, 0x100, 98 NVOBJ_FLAG_ZERO_ALLOC, &fctx->user);
109 NVOBJ_FLAG_ZERO_ALLOC, &fifoch->ramfc);
110 if (ret) 99 if (ret)
111 goto error; 100 goto error;
112 101
113 nv_wo32(fifoch->ramfc, 0x08, lower_32_bits(fifoch->user->vinst)); 102 nouveau_vm_map_at(&priv->user_vma, chan->id * 0x1000,
114 nv_wo32(fifoch->ramfc, 0x0c, upper_32_bits(fifoch->user->vinst)); 103 *(struct nouveau_mem **)fctx->user->node);
115 nv_wo32(fifoch->ramfc, 0x10, 0x0000face); 104
116 nv_wo32(fifoch->ramfc, 0x30, 0xfffff902); 105 for (i = 0; i < 0x100; i += 4)
117 nv_wo32(fifoch->ramfc, 0x48, lower_32_bits(ib_virt)); 106 nv_wo32(chan->ramin, i, 0x00000000);
118 nv_wo32(fifoch->ramfc, 0x4c, drm_order(chan->dma.ib_max + 1) << 16 | 107 nv_wo32(chan->ramin, 0x08, lower_32_bits(fctx->user->vinst));
108 nv_wo32(chan->ramin, 0x0c, upper_32_bits(fctx->user->vinst));
109 nv_wo32(chan->ramin, 0x10, 0x0000face);
110 nv_wo32(chan->ramin, 0x30, 0xfffff902);
111 nv_wo32(chan->ramin, 0x48, lower_32_bits(ib_virt));
112 nv_wo32(chan->ramin, 0x4c, drm_order(chan->dma.ib_max + 1) << 16 |
119 upper_32_bits(ib_virt)); 113 upper_32_bits(ib_virt));
120 nv_wo32(fifoch->ramfc, 0x54, 0x00000002); 114 nv_wo32(chan->ramin, 0x54, 0x00000002);
121 nv_wo32(fifoch->ramfc, 0x84, 0x20400000); 115 nv_wo32(chan->ramin, 0x84, 0x20400000);
122 nv_wo32(fifoch->ramfc, 0x94, 0x30000001); 116 nv_wo32(chan->ramin, 0x94, 0x30000001);
123 nv_wo32(fifoch->ramfc, 0x9c, 0x00000100); 117 nv_wo32(chan->ramin, 0x9c, 0x00000100);
124 nv_wo32(fifoch->ramfc, 0xa4, 0x1f1f1f1f); 118 nv_wo32(chan->ramin, 0xa4, 0x1f1f1f1f);
125 nv_wo32(fifoch->ramfc, 0xa8, 0x1f1f1f1f); 119 nv_wo32(chan->ramin, 0xa8, 0x1f1f1f1f);
126 nv_wo32(fifoch->ramfc, 0xac, 0x0000001f); 120 nv_wo32(chan->ramin, 0xac, 0x0000001f);
127 nv_wo32(fifoch->ramfc, 0xb8, 0xf8000000); 121 nv_wo32(chan->ramin, 0xb8, 0xf8000000);
128 nv_wo32(fifoch->ramfc, 0xf8, 0x10003080); /* 0x002310 */ 122 nv_wo32(chan->ramin, 0xf8, 0x10003080); /* 0x002310 */
129 nv_wo32(fifoch->ramfc, 0xfc, 0x10000010); /* 0x002350 */ 123 nv_wo32(chan->ramin, 0xfc, 0x10000010); /* 0x002350 */
130 pinstmem->flush(dev); 124 pinstmem->flush(dev);
131 125
132 nv_wr32(dev, 0x003000 + (chan->id * 8), 0xc0000000 | 126 nv_wr32(dev, 0x003000 + (chan->id * 8), 0xc0000000 |
133 (chan->ramin->vinst >> 12)); 127 (chan->ramin->vinst >> 12));
134 nv_wr32(dev, 0x003004 + (chan->id * 8), 0x001f0001); 128 nv_wr32(dev, 0x003004 + (chan->id * 8), 0x001f0001);
135 nvc0_fifo_playlist_update(dev); 129 nvc0_fifo_playlist_update(dev);
136 return 0;
137 130
138error: 131error:
139 pfifo->destroy_context(chan); 132 if (ret)
133 priv->base.base.context_del(chan, engine);
140 return ret; 134 return ret;
141} 135}
142 136
143void 137static void
144nvc0_fifo_destroy_context(struct nouveau_channel *chan) 138nvc0_fifo_context_del(struct nouveau_channel *chan, int engine)
145{ 139{
140 struct nvc0_fifo_chan *fctx = chan->engctx[engine];
146 struct drm_device *dev = chan->dev; 141 struct drm_device *dev = chan->dev;
147 struct nvc0_fifo_chan *fifoch;
148 142
149 nv_mask(dev, 0x003004 + (chan->id * 8), 0x00000001, 0x00000000); 143 nv_mask(dev, 0x003004 + (chan->id * 8), 0x00000001, 0x00000000);
150 nv_wr32(dev, 0x002634, chan->id); 144 nv_wr32(dev, 0x002634, chan->id);
151 if (!nv_wait(dev, 0x0002634, 0xffffffff, chan->id)) 145 if (!nv_wait(dev, 0x0002634, 0xffffffff, chan->id))
152 NV_WARN(dev, "0x2634 != chid: 0x%08x\n", nv_rd32(dev, 0x2634)); 146 NV_WARN(dev, "0x2634 != chid: 0x%08x\n", nv_rd32(dev, 0x2634));
153
154 nvc0_fifo_playlist_update(dev); 147 nvc0_fifo_playlist_update(dev);
155
156 nv_wr32(dev, 0x003000 + (chan->id * 8), 0x00000000); 148 nv_wr32(dev, 0x003000 + (chan->id * 8), 0x00000000);
157 149
150 nouveau_gpuobj_ref(NULL, &fctx->user);
158 if (chan->user) { 151 if (chan->user) {
159 iounmap(chan->user); 152 iounmap(chan->user);
160 chan->user = NULL; 153 chan->user = NULL;
161 } 154 }
162 155
163 fifoch = chan->fifo_priv; 156 chan->engctx[engine] = NULL;
164 chan->fifo_priv = NULL; 157 kfree(fctx);
165 if (!fifoch)
166 return;
167
168 nouveau_gpuobj_ref(NULL, &fifoch->ramfc);
169 nouveau_gpuobj_ref(NULL, &fifoch->user);
170 kfree(fifoch);
171}
172
173int
174nvc0_fifo_load_context(struct nouveau_channel *chan)
175{
176 return 0;
177}
178
179int
180nvc0_fifo_unload_context(struct drm_device *dev)
181{
182 int i;
183
184 for (i = 0; i < 128; i++) {
185 if (!(nv_rd32(dev, 0x003004 + (i * 8)) & 1))
186 continue;
187
188 nv_mask(dev, 0x003004 + (i * 8), 0x00000001, 0x00000000);
189 nv_wr32(dev, 0x002634, i);
190 if (!nv_wait(dev, 0x002634, 0xffffffff, i)) {
191 NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n",
192 i, nv_rd32(dev, 0x002634));
193 return -EBUSY;
194 }
195 }
196
197 return 0;
198}
199
200static void
201nvc0_fifo_destroy(struct drm_device *dev)
202{
203 struct drm_nouveau_private *dev_priv = dev->dev_private;
204 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
205 struct nvc0_fifo_priv *priv;
206
207 priv = pfifo->priv;
208 if (!priv)
209 return;
210
211 nouveau_vm_put(&priv->user_vma);
212 nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
213 nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
214 kfree(priv);
215}
216
217void
218nvc0_fifo_takedown(struct drm_device *dev)
219{
220 nv_wr32(dev, 0x002140, 0x00000000);
221 nvc0_fifo_destroy(dev);
222} 158}
223 159
224static int 160static int
225nvc0_fifo_create(struct drm_device *dev) 161nvc0_fifo_init(struct drm_device *dev, int engine)
226{
227 struct drm_nouveau_private *dev_priv = dev->dev_private;
228 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
229 struct nvc0_fifo_priv *priv;
230 int ret;
231
232 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
233 if (!priv)
234 return -ENOMEM;
235 pfifo->priv = priv;
236
237 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000, 0,
238 &priv->playlist[0]);
239 if (ret)
240 goto error;
241
242 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000, 0,
243 &priv->playlist[1]);
244 if (ret)
245 goto error;
246
247 ret = nouveau_vm_get(dev_priv->bar1_vm, pfifo->channels * 0x1000,
248 12, NV_MEM_ACCESS_RW, &priv->user_vma);
249 if (ret)
250 goto error;
251
252 nouveau_irq_register(dev, 8, nvc0_fifo_isr);
253 return 0;
254
255error:
256 nvc0_fifo_destroy(dev);
257 return ret;
258}
259
260int
261nvc0_fifo_init(struct drm_device *dev)
262{ 162{
263 struct drm_nouveau_private *dev_priv = dev->dev_private; 163 struct drm_nouveau_private *dev_priv = dev->dev_private;
264 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 164 struct nvc0_fifo_priv *priv = nv_engine(dev, engine);
265 struct nouveau_channel *chan; 165 struct nouveau_channel *chan;
266 struct nvc0_fifo_priv *priv; 166 int i;
267 int ret, i;
268
269 if (!pfifo->priv) {
270 ret = nvc0_fifo_create(dev);
271 if (ret)
272 return ret;
273 }
274 priv = pfifo->priv;
275 167
276 /* reset PFIFO, enable all available PSUBFIFO areas */ 168 /* reset PFIFO, enable all available PSUBFIFO areas */
277 nv_mask(dev, 0x000200, 0x00000100, 0x00000000); 169 nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
@@ -309,7 +201,7 @@ nvc0_fifo_init(struct drm_device *dev)
309 /* restore PFIFO context table */ 201 /* restore PFIFO context table */
310 for (i = 0; i < 128; i++) { 202 for (i = 0; i < 128; i++) {
311 chan = dev_priv->channels.ptr[i]; 203 chan = dev_priv->channels.ptr[i];
312 if (!chan || !chan->fifo_priv) 204 if (!chan || !chan->engctx[engine])
313 continue; 205 continue;
314 206
315 nv_wr32(dev, 0x003000 + (i * 8), 0xc0000000 | 207 nv_wr32(dev, 0x003000 + (i * 8), 0xc0000000 |
@@ -321,6 +213,29 @@ nvc0_fifo_init(struct drm_device *dev)
321 return 0; 213 return 0;
322} 214}
323 215
216static int
217nvc0_fifo_fini(struct drm_device *dev, int engine, bool suspend)
218{
219 int i;
220
221 for (i = 0; i < 128; i++) {
222 if (!(nv_rd32(dev, 0x003004 + (i * 8)) & 1))
223 continue;
224
225 nv_mask(dev, 0x003004 + (i * 8), 0x00000001, 0x00000000);
226 nv_wr32(dev, 0x002634, i);
227 if (!nv_wait(dev, 0x002634, 0xffffffff, i)) {
228 NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n",
229 i, nv_rd32(dev, 0x002634));
230 return -EBUSY;
231 }
232 }
233
234 nv_wr32(dev, 0x002140, 0x00000000);
235 return 0;
236}
237
238
324struct nouveau_enum nvc0_fifo_fault_unit[] = { 239struct nouveau_enum nvc0_fifo_fault_unit[] = {
325 { 0x00, "PGRAPH" }, 240 { 0x00, "PGRAPH" },
326 { 0x03, "PEEPHOLE" }, 241 { 0x03, "PEEPHOLE" },
@@ -410,13 +325,14 @@ nvc0_fifo_isr_vm_fault(struct drm_device *dev, int unit)
410static int 325static int
411nvc0_fifo_page_flip(struct drm_device *dev, u32 chid) 326nvc0_fifo_page_flip(struct drm_device *dev, u32 chid)
412{ 327{
328 struct nvc0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
413 struct drm_nouveau_private *dev_priv = dev->dev_private; 329 struct drm_nouveau_private *dev_priv = dev->dev_private;
414 struct nouveau_channel *chan = NULL; 330 struct nouveau_channel *chan = NULL;
415 unsigned long flags; 331 unsigned long flags;
416 int ret = -EINVAL; 332 int ret = -EINVAL;
417 333
418 spin_lock_irqsave(&dev_priv->channels.lock, flags); 334 spin_lock_irqsave(&dev_priv->channels.lock, flags);
419 if (likely(chid >= 0 && chid < dev_priv->engine.fifo.channels)) { 335 if (likely(chid >= 0 && chid < priv->base.channels)) {
420 chan = dev_priv->channels.ptr[chid]; 336 chan = dev_priv->channels.ptr[chid];
421 if (likely(chan)) 337 if (likely(chan))
422 ret = nouveau_finish_page_flip(chan, NULL); 338 ret = nouveau_finish_page_flip(chan, NULL);
@@ -505,3 +421,56 @@ nvc0_fifo_isr(struct drm_device *dev)
505 nv_wr32(dev, 0x002140, 0); 421 nv_wr32(dev, 0x002140, 0);
506 } 422 }
507} 423}
424
425static void
426nvc0_fifo_destroy(struct drm_device *dev, int engine)
427{
428 struct nvc0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
429 struct drm_nouveau_private *dev_priv = dev->dev_private;
430
431 nouveau_vm_put(&priv->user_vma);
432 nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
433 nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
434
435 dev_priv->eng[engine] = NULL;
436 kfree(priv);
437}
438
439int
440nvc0_fifo_create(struct drm_device *dev)
441{
442 struct drm_nouveau_private *dev_priv = dev->dev_private;
443 struct nvc0_fifo_priv *priv;
444 int ret;
445
446 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
447 if (!priv)
448 return -ENOMEM;
449
450 priv->base.base.destroy = nvc0_fifo_destroy;
451 priv->base.base.init = nvc0_fifo_init;
452 priv->base.base.fini = nvc0_fifo_fini;
453 priv->base.base.context_new = nvc0_fifo_context_new;
454 priv->base.base.context_del = nvc0_fifo_context_del;
455 priv->base.channels = 128;
456 dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
457
458 ret = nouveau_gpuobj_new(dev, NULL, 4096, 4096, 0, &priv->playlist[0]);
459 if (ret)
460 goto error;
461
462 ret = nouveau_gpuobj_new(dev, NULL, 4096, 4096, 0, &priv->playlist[1]);
463 if (ret)
464 goto error;
465
466 ret = nouveau_vm_get(dev_priv->bar1_vm, priv->base.channels * 0x1000,
467 12, NV_MEM_ACCESS_RW, &priv->user_vma);
468 if (ret)
469 goto error;
470
471 nouveau_irq_register(dev, 8, nvc0_fifo_isr);
472error:
473 if (ret)
474 priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO);
475 return ret;
476}