diff options
Diffstat (limited to 'drivers/gpu')
30 files changed, 1374 insertions, 1386 deletions
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile index ce222eb0a318..fe5267d06ab5 100644 --- a/drivers/gpu/drm/nouveau/Makefile +++ b/drivers/gpu/drm/nouveau/Makefile | |||
@@ -16,8 +16,8 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ | |||
16 | nv04_mc.o nv40_mc.o nv50_mc.o \ | 16 | nv04_mc.o nv40_mc.o nv50_mc.o \ |
17 | nv04_fb.o nv10_fb.o nv20_fb.o nv30_fb.o nv40_fb.o \ | 17 | nv04_fb.o nv10_fb.o nv20_fb.o nv30_fb.o nv40_fb.o \ |
18 | nv50_fb.o nvc0_fb.o \ | 18 | nv50_fb.o nvc0_fb.o \ |
19 | nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o nvc0_fifo.o \ | 19 | nv04_fifo.o nv10_fifo.o nv17_fifo.o nv40_fifo.o nv50_fifo.o \ |
20 | nve0_fifo.o \ | 20 | nv84_fifo.o nvc0_fifo.o nve0_fifo.o \ |
21 | nv04_fence.o nv10_fence.o nv84_fence.o nvc0_fence.o \ | 21 | nv04_fence.o nv10_fence.o nv84_fence.o nvc0_fence.o \ |
22 | nv04_software.o nv50_software.o nvc0_software.o \ | 22 | nv04_software.o nv50_software.o nvc0_software.o \ |
23 | nv04_graph.o nv10_graph.o nv20_graph.o \ | 23 | nv04_graph.o nv10_graph.o nv20_graph.o \ |
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index a1f566758e7b..9420538d2374 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include "nouveau_drv.h" | 27 | #include "nouveau_drv.h" |
28 | #include "nouveau_drm.h" | 28 | #include "nouveau_drm.h" |
29 | #include "nouveau_dma.h" | 29 | #include "nouveau_dma.h" |
30 | #include "nouveau_fifo.h" | ||
30 | #include "nouveau_ramht.h" | 31 | #include "nouveau_ramht.h" |
31 | #include "nouveau_fence.h" | 32 | #include "nouveau_fence.h" |
32 | #include "nouveau_software.h" | 33 | #include "nouveau_software.h" |
@@ -120,8 +121,8 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, | |||
120 | uint32_t vram_handle, uint32_t gart_handle) | 121 | uint32_t vram_handle, uint32_t gart_handle) |
121 | { | 122 | { |
122 | struct nouveau_exec_engine *fence = nv_engine(dev, NVOBJ_ENGINE_FENCE); | 123 | struct nouveau_exec_engine *fence = nv_engine(dev, NVOBJ_ENGINE_FENCE); |
124 | struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO); | ||
123 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 125 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
124 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | ||
125 | struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv); | 126 | struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv); |
126 | struct nouveau_channel *chan; | 127 | struct nouveau_channel *chan; |
127 | unsigned long flags; | 128 | unsigned long flags; |
@@ -189,20 +190,13 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, | |||
189 | if (dev_priv->card_type >= NV_50) | 190 | if (dev_priv->card_type >= NV_50) |
190 | chan->user_get_hi = 0x60; | 191 | chan->user_get_hi = 0x60; |
191 | 192 | ||
192 | /* disable the fifo caches */ | 193 | /* create fifo context */ |
193 | if (dev_priv->card_type < NV_50) | 194 | ret = pfifo->base.context_new(chan, NVOBJ_ENGINE_FIFO); |
194 | nv_wr32(dev, NV03_PFIFO_CACHES, 0); | ||
195 | |||
196 | /* Construct initial RAMFC for new channel */ | ||
197 | ret = pfifo->create_context(chan); | ||
198 | if (ret) { | 195 | if (ret) { |
199 | nouveau_channel_put(&chan); | 196 | nouveau_channel_put(&chan); |
200 | return ret; | 197 | return ret; |
201 | } | 198 | } |
202 | 199 | ||
203 | if (dev_priv->card_type < NV_50) | ||
204 | nv_wr32(dev, NV03_PFIFO_CACHES, 1); | ||
205 | |||
206 | /* Insert NOPs for NOUVEAU_DMA_SKIPS */ | 200 | /* Insert NOPs for NOUVEAU_DMA_SKIPS */ |
207 | ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS); | 201 | ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS); |
208 | if (ret) { | 202 | if (ret) { |
@@ -288,7 +282,6 @@ nouveau_channel_put_unlocked(struct nouveau_channel **pchan) | |||
288 | struct nouveau_channel *chan = *pchan; | 282 | struct nouveau_channel *chan = *pchan; |
289 | struct drm_device *dev = chan->dev; | 283 | struct drm_device *dev = chan->dev; |
290 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 284 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
291 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | ||
292 | unsigned long flags; | 285 | unsigned long flags; |
293 | int i; | 286 | int i; |
294 | 287 | ||
@@ -305,22 +298,12 @@ nouveau_channel_put_unlocked(struct nouveau_channel **pchan) | |||
305 | /* give it chance to idle */ | 298 | /* give it chance to idle */ |
306 | nouveau_channel_idle(chan); | 299 | nouveau_channel_idle(chan); |
307 | 300 | ||
308 | /* boot it off the hardware */ | ||
309 | if (dev_priv->card_type < NV_50) | ||
310 | nv_wr32(dev, NV03_PFIFO_CACHES, 0); | ||
311 | |||
312 | /* destroy the engine specific contexts */ | 301 | /* destroy the engine specific contexts */ |
313 | for (i = NVOBJ_ENGINE_NR - 1; i >= 0; i--) { | 302 | for (i = NVOBJ_ENGINE_NR - 1; i >= 0; i--) { |
314 | if (chan->engctx[i]) | 303 | if (chan->engctx[i]) |
315 | dev_priv->eng[i]->context_del(chan, i); | 304 | dev_priv->eng[i]->context_del(chan, i); |
316 | /*XXX: clean this up later, order is important */ | ||
317 | if (i == NVOBJ_ENGINE_FENCE) | ||
318 | pfifo->destroy_context(chan); | ||
319 | } | 305 | } |
320 | 306 | ||
321 | if (dev_priv->card_type < NV_50) | ||
322 | nv_wr32(dev, NV03_PFIFO_CACHES, 1); | ||
323 | |||
324 | /* aside from its resources, the channel should now be dead, | 307 | /* aside from its resources, the channel should now be dead, |
325 | * remove it from the channel list | 308 | * remove it from the channel list |
326 | */ | 309 | */ |
@@ -393,13 +376,15 @@ nouveau_channel_idle(struct nouveau_channel *chan) | |||
393 | void | 376 | void |
394 | nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv) | 377 | nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv) |
395 | { | 378 | { |
396 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 379 | struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO); |
397 | struct nouveau_engine *engine = &dev_priv->engine; | ||
398 | struct nouveau_channel *chan; | 380 | struct nouveau_channel *chan; |
399 | int i; | 381 | int i; |
400 | 382 | ||
383 | if (!pfifo) | ||
384 | return; | ||
385 | |||
401 | NV_DEBUG(dev, "clearing FIFO enables from file_priv\n"); | 386 | NV_DEBUG(dev, "clearing FIFO enables from file_priv\n"); |
402 | for (i = 0; i < engine->fifo.channels; i++) { | 387 | for (i = 0; i < pfifo->channels; i++) { |
403 | chan = nouveau_channel_get(file_priv, i); | 388 | chan = nouveau_channel_get(file_priv, i); |
404 | if (IS_ERR(chan)) | 389 | if (IS_ERR(chan)) |
405 | continue; | 390 | continue; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c index 910b97e813fa..cad254c8e387 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.c +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include "nouveau_fb.h" | 33 | #include "nouveau_fb.h" |
34 | #include "nouveau_fbcon.h" | 34 | #include "nouveau_fbcon.h" |
35 | #include "nouveau_pm.h" | 35 | #include "nouveau_pm.h" |
36 | #include "nouveau_fifo.h" | ||
36 | #include "nv50_display.h" | 37 | #include "nv50_display.h" |
37 | 38 | ||
38 | #include "drm_pciids.h" | 39 | #include "drm_pciids.h" |
@@ -175,7 +176,7 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state) | |||
175 | struct drm_device *dev = pci_get_drvdata(pdev); | 176 | struct drm_device *dev = pci_get_drvdata(pdev); |
176 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 177 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
177 | struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; | 178 | struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; |
178 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | 179 | struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO); |
179 | struct nouveau_channel *chan; | 180 | struct nouveau_channel *chan; |
180 | struct drm_crtc *crtc; | 181 | struct drm_crtc *crtc; |
181 | int ret, i, e; | 182 | int ret, i, e; |
@@ -214,21 +215,13 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state) | |||
214 | ttm_bo_evict_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM); | 215 | ttm_bo_evict_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM); |
215 | 216 | ||
216 | NV_INFO(dev, "Idling channels...\n"); | 217 | NV_INFO(dev, "Idling channels...\n"); |
217 | for (i = 0; i < pfifo->channels; i++) { | 218 | for (i = 0; i < (pfifo ? pfifo->channels : 0); i++) { |
218 | chan = dev_priv->channels.ptr[i]; | 219 | chan = dev_priv->channels.ptr[i]; |
219 | 220 | ||
220 | if (chan && chan->pushbuf_bo) | 221 | if (chan && chan->pushbuf_bo) |
221 | nouveau_channel_idle(chan); | 222 | nouveau_channel_idle(chan); |
222 | } | 223 | } |
223 | 224 | ||
224 | if (dev_priv->card_type < NV_50) { | ||
225 | nv_wr32(dev, NV03_PFIFO_CACHES, 0); | ||
226 | nv_mask(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0); | ||
227 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 0); | ||
228 | nv_mask(dev, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0); | ||
229 | } | ||
230 | pfifo->unload_context(dev); | ||
231 | |||
232 | for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) { | 225 | for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) { |
233 | if (!dev_priv->eng[e]) | 226 | if (!dev_priv->eng[e]) |
234 | continue; | 227 | continue; |
@@ -269,11 +262,6 @@ out_abort: | |||
269 | if (dev_priv->eng[e]) | 262 | if (dev_priv->eng[e]) |
270 | dev_priv->eng[e]->init(dev, e); | 263 | dev_priv->eng[e]->init(dev, e); |
271 | } | 264 | } |
272 | if (dev_priv->card_type < NV_50) { | ||
273 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1); | ||
274 | nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1); | ||
275 | nv_wr32(dev, NV03_PFIFO_CACHES, 1); | ||
276 | } | ||
277 | return ret; | 265 | return ret; |
278 | } | 266 | } |
279 | 267 | ||
@@ -281,6 +269,7 @@ int | |||
281 | nouveau_pci_resume(struct pci_dev *pdev) | 269 | nouveau_pci_resume(struct pci_dev *pdev) |
282 | { | 270 | { |
283 | struct drm_device *dev = pci_get_drvdata(pdev); | 271 | struct drm_device *dev = pci_get_drvdata(pdev); |
272 | struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO); | ||
284 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 273 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
285 | struct nouveau_engine *engine = &dev_priv->engine; | 274 | struct nouveau_engine *engine = &dev_priv->engine; |
286 | struct drm_crtc *crtc; | 275 | struct drm_crtc *crtc; |
@@ -328,7 +317,6 @@ nouveau_pci_resume(struct pci_dev *pdev) | |||
328 | if (dev_priv->eng[i]) | 317 | if (dev_priv->eng[i]) |
329 | dev_priv->eng[i]->init(dev, i); | 318 | dev_priv->eng[i]->init(dev, i); |
330 | } | 319 | } |
331 | engine->fifo.init(dev); | ||
332 | 320 | ||
333 | nouveau_irq_postinstall(dev); | 321 | nouveau_irq_postinstall(dev); |
334 | 322 | ||
@@ -337,7 +325,7 @@ nouveau_pci_resume(struct pci_dev *pdev) | |||
337 | struct nouveau_channel *chan; | 325 | struct nouveau_channel *chan; |
338 | int j; | 326 | int j; |
339 | 327 | ||
340 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { | 328 | for (i = 0; i < (pfifo ? pfifo->channels : 0); i++) { |
341 | chan = dev_priv->channels.ptr[i]; | 329 | chan = dev_priv->channels.ptr[i]; |
342 | if (!chan || !chan->pushbuf_bo) | 330 | if (!chan || !chan->pushbuf_bo) |
343 | continue; | 331 | continue; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 9943ccf764ce..1ede35491f54 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h | |||
@@ -165,9 +165,10 @@ enum nouveau_flags { | |||
165 | #define NVOBJ_ENGINE_PPP NVOBJ_ENGINE_MPEG | 165 | #define NVOBJ_ENGINE_PPP NVOBJ_ENGINE_MPEG |
166 | #define NVOBJ_ENGINE_BSP 6 | 166 | #define NVOBJ_ENGINE_BSP 6 |
167 | #define NVOBJ_ENGINE_VP 7 | 167 | #define NVOBJ_ENGINE_VP 7 |
168 | #define NVOBJ_ENGINE_FENCE 14 | 168 | #define NVOBJ_ENGINE_FIFO 14 |
169 | #define NVOBJ_ENGINE_DISPLAY 15 | 169 | #define NVOBJ_ENGINE_FENCE 15 |
170 | #define NVOBJ_ENGINE_NR 16 | 170 | #define NVOBJ_ENGINE_NR 16 |
171 | #define NVOBJ_ENGINE_DISPLAY (NVOBJ_ENGINE_NR + 0) /*XXX*/ | ||
171 | 172 | ||
172 | #define NVOBJ_FLAG_DONT_MAP (1 << 0) | 173 | #define NVOBJ_FLAG_DONT_MAP (1 << 0) |
173 | #define NVOBJ_FLAG_ZERO_ALLOC (1 << 1) | 174 | #define NVOBJ_FLAG_ZERO_ALLOC (1 << 1) |
@@ -248,8 +249,6 @@ struct nouveau_channel { | |||
248 | 249 | ||
249 | /* PFIFO context */ | 250 | /* PFIFO context */ |
250 | struct nouveau_gpuobj *ramfc; | 251 | struct nouveau_gpuobj *ramfc; |
251 | struct nouveau_gpuobj *cache; | ||
252 | void *fifo_priv; | ||
253 | 252 | ||
254 | /* Execution engine contexts */ | 253 | /* Execution engine contexts */ |
255 | void *engctx[NVOBJ_ENGINE_NR]; | 254 | void *engctx[NVOBJ_ENGINE_NR]; |
@@ -283,8 +282,6 @@ struct nouveau_channel { | |||
283 | int ib_put; | 282 | int ib_put; |
284 | } dma; | 283 | } dma; |
285 | 284 | ||
286 | uint32_t sw_subchannel[8]; | ||
287 | |||
288 | struct { | 285 | struct { |
289 | bool active; | 286 | bool active; |
290 | char name[32]; | 287 | char name[32]; |
@@ -347,23 +344,6 @@ struct nouveau_fb_engine { | |||
347 | void (*free_tile_region)(struct drm_device *dev, int i); | 344 | void (*free_tile_region)(struct drm_device *dev, int i); |
348 | }; | 345 | }; |
349 | 346 | ||
350 | struct nouveau_fifo_engine { | ||
351 | void *priv; | ||
352 | int channels; | ||
353 | |||
354 | struct nouveau_gpuobj *playlist[2]; | ||
355 | int cur_playlist; | ||
356 | |||
357 | int (*init)(struct drm_device *); | ||
358 | void (*takedown)(struct drm_device *); | ||
359 | |||
360 | int (*create_context)(struct nouveau_channel *); | ||
361 | void (*destroy_context)(struct nouveau_channel *); | ||
362 | int (*load_context)(struct nouveau_channel *); | ||
363 | int (*unload_context)(struct drm_device *); | ||
364 | void (*tlb_flush)(struct drm_device *dev); | ||
365 | }; | ||
366 | |||
367 | struct nouveau_display_engine { | 347 | struct nouveau_display_engine { |
368 | void *priv; | 348 | void *priv; |
369 | int (*early_init)(struct drm_device *); | 349 | int (*early_init)(struct drm_device *); |
@@ -571,7 +551,6 @@ struct nouveau_engine { | |||
571 | struct nouveau_mc_engine mc; | 551 | struct nouveau_mc_engine mc; |
572 | struct nouveau_timer_engine timer; | 552 | struct nouveau_timer_engine timer; |
573 | struct nouveau_fb_engine fb; | 553 | struct nouveau_fb_engine fb; |
574 | struct nouveau_fifo_engine fifo; | ||
575 | struct nouveau_display_engine display; | 554 | struct nouveau_display_engine display; |
576 | struct nouveau_gpio_engine gpio; | 555 | struct nouveau_gpio_engine gpio; |
577 | struct nouveau_pm_engine pm; | 556 | struct nouveau_pm_engine pm; |
@@ -1183,52 +1162,6 @@ extern void nv50_fb_vm_trap(struct drm_device *, int display); | |||
1183 | extern int nvc0_fb_init(struct drm_device *); | 1162 | extern int nvc0_fb_init(struct drm_device *); |
1184 | extern void nvc0_fb_takedown(struct drm_device *); | 1163 | extern void nvc0_fb_takedown(struct drm_device *); |
1185 | 1164 | ||
1186 | /* nv04_fifo.c */ | ||
1187 | extern int nv04_fifo_init(struct drm_device *); | ||
1188 | extern void nv04_fifo_fini(struct drm_device *); | ||
1189 | extern int nv04_fifo_create_context(struct nouveau_channel *); | ||
1190 | extern void nv04_fifo_destroy_context(struct nouveau_channel *); | ||
1191 | extern int nv04_fifo_load_context(struct nouveau_channel *); | ||
1192 | extern int nv04_fifo_unload_context(struct drm_device *); | ||
1193 | extern void nv04_fifo_isr(struct drm_device *); | ||
1194 | bool nv04_fifo_cache_pull(struct drm_device *, bool enable); | ||
1195 | |||
1196 | /* nv10_fifo.c */ | ||
1197 | extern int nv10_fifo_init(struct drm_device *); | ||
1198 | extern int nv10_fifo_create_context(struct nouveau_channel *); | ||
1199 | extern int nv10_fifo_load_context(struct nouveau_channel *); | ||
1200 | extern int nv10_fifo_unload_context(struct drm_device *); | ||
1201 | |||
1202 | /* nv40_fifo.c */ | ||
1203 | extern int nv40_fifo_init(struct drm_device *); | ||
1204 | extern int nv40_fifo_create_context(struct nouveau_channel *); | ||
1205 | extern int nv40_fifo_load_context(struct nouveau_channel *); | ||
1206 | extern int nv40_fifo_unload_context(struct drm_device *); | ||
1207 | |||
1208 | /* nv50_fifo.c */ | ||
1209 | extern int nv50_fifo_init(struct drm_device *); | ||
1210 | extern void nv50_fifo_takedown(struct drm_device *); | ||
1211 | extern int nv50_fifo_create_context(struct nouveau_channel *); | ||
1212 | extern void nv50_fifo_destroy_context(struct nouveau_channel *); | ||
1213 | extern int nv50_fifo_load_context(struct nouveau_channel *); | ||
1214 | extern int nv50_fifo_unload_context(struct drm_device *); | ||
1215 | extern void nv50_fifo_tlb_flush(struct drm_device *dev); | ||
1216 | |||
1217 | /* nvc0_fifo.c */ | ||
1218 | extern int nvc0_fifo_init(struct drm_device *); | ||
1219 | extern void nvc0_fifo_takedown(struct drm_device *); | ||
1220 | extern int nvc0_fifo_create_context(struct nouveau_channel *); | ||
1221 | extern void nvc0_fifo_destroy_context(struct nouveau_channel *); | ||
1222 | extern int nvc0_fifo_load_context(struct nouveau_channel *); | ||
1223 | extern int nvc0_fifo_unload_context(struct drm_device *); | ||
1224 | |||
1225 | /* nve0_fifo.c */ | ||
1226 | extern int nve0_fifo_init(struct drm_device *); | ||
1227 | extern void nve0_fifo_takedown(struct drm_device *); | ||
1228 | extern int nve0_fifo_create_context(struct nouveau_channel *); | ||
1229 | extern void nve0_fifo_destroy_context(struct nouveau_channel *); | ||
1230 | extern int nve0_fifo_unload_context(struct drm_device *); | ||
1231 | |||
1232 | /* nv04_graph.c */ | 1165 | /* nv04_graph.c */ |
1233 | extern int nv04_graph_create(struct drm_device *); | 1166 | extern int nv04_graph_create(struct drm_device *); |
1234 | extern int nv04_graph_object_new(struct nouveau_channel *, int, u32, u16); | 1167 | extern int nv04_graph_object_new(struct nouveau_channel *, int, u32, u16); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fifo.h b/drivers/gpu/drm/nouveau/nouveau_fifo.h new file mode 100644 index 000000000000..ce99cab2f257 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_fifo.h | |||
@@ -0,0 +1,32 @@ | |||
1 | #ifndef __NOUVEAU_FIFO_H__ | ||
2 | #define __NOUVEAU_FIFO_H__ | ||
3 | |||
4 | struct nouveau_fifo_priv { | ||
5 | struct nouveau_exec_engine base; | ||
6 | u32 channels; | ||
7 | }; | ||
8 | |||
9 | struct nouveau_fifo_chan { | ||
10 | }; | ||
11 | |||
12 | bool nv04_fifo_cache_pull(struct drm_device *, bool); | ||
13 | void nv04_fifo_context_del(struct nouveau_channel *, int); | ||
14 | int nv04_fifo_fini(struct drm_device *, int, bool); | ||
15 | int nv04_fifo_init(struct drm_device *, int); | ||
16 | void nv04_fifo_isr(struct drm_device *); | ||
17 | void nv04_fifo_destroy(struct drm_device *, int); | ||
18 | |||
19 | void nv50_fifo_playlist_update(struct drm_device *); | ||
20 | void nv50_fifo_destroy(struct drm_device *, int); | ||
21 | void nv50_fifo_tlb_flush(struct drm_device *, int); | ||
22 | |||
23 | int nv04_fifo_create(struct drm_device *); | ||
24 | int nv10_fifo_create(struct drm_device *); | ||
25 | int nv17_fifo_create(struct drm_device *); | ||
26 | int nv40_fifo_create(struct drm_device *); | ||
27 | int nv50_fifo_create(struct drm_device *); | ||
28 | int nv84_fifo_create(struct drm_device *); | ||
29 | int nvc0_fifo_create(struct drm_device *); | ||
30 | int nve0_fifo_create(struct drm_device *); | ||
31 | |||
32 | #endif | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index fd7273459ad6..5b498ea32e14 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include "nouveau_pm.h" | 39 | #include "nouveau_pm.h" |
40 | #include "nouveau_mm.h" | 40 | #include "nouveau_mm.h" |
41 | #include "nouveau_vm.h" | 41 | #include "nouveau_vm.h" |
42 | #include "nouveau_fifo.h" | ||
42 | #include "nouveau_fence.h" | 43 | #include "nouveau_fence.h" |
43 | 44 | ||
44 | /* | 45 | /* |
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index 8e0b38f35975..d7e56ce410b0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include "drm.h" | 34 | #include "drm.h" |
35 | #include "nouveau_drv.h" | 35 | #include "nouveau_drv.h" |
36 | #include "nouveau_drm.h" | 36 | #include "nouveau_drm.h" |
37 | #include "nouveau_fifo.h" | ||
37 | #include "nouveau_ramht.h" | 38 | #include "nouveau_ramht.h" |
38 | #include "nouveau_software.h" | 39 | #include "nouveau_software.h" |
39 | #include "nouveau_vm.h" | 40 | #include "nouveau_vm.h" |
@@ -120,12 +121,13 @@ nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid, | |||
120 | u32 class, u32 mthd, u32 data) | 121 | u32 class, u32 mthd, u32 data) |
121 | { | 122 | { |
122 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 123 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
124 | struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO); | ||
123 | struct nouveau_channel *chan = NULL; | 125 | struct nouveau_channel *chan = NULL; |
124 | unsigned long flags; | 126 | unsigned long flags; |
125 | int ret = -EINVAL; | 127 | int ret = -EINVAL; |
126 | 128 | ||
127 | spin_lock_irqsave(&dev_priv->channels.lock, flags); | 129 | spin_lock_irqsave(&dev_priv->channels.lock, flags); |
128 | if (chid >= 0 && chid < dev_priv->engine.fifo.channels) | 130 | if (chid >= 0 && chid < pfifo->channels) |
129 | chan = dev_priv->channels.ptr[chid]; | 131 | chan = dev_priv->channels.ptr[chid]; |
130 | if (chan) | 132 | if (chan) |
131 | ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data); | 133 | ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 660a033b6ddf..e4e73a13a2b2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include "nouveau_gpio.h" | 39 | #include "nouveau_gpio.h" |
40 | #include "nouveau_pm.h" | 40 | #include "nouveau_pm.h" |
41 | #include "nv50_display.h" | 41 | #include "nv50_display.h" |
42 | #include "nouveau_fifo.h" | ||
42 | #include "nouveau_fence.h" | 43 | #include "nouveau_fence.h" |
43 | #include "nouveau_software.h" | 44 | #include "nouveau_software.h" |
44 | 45 | ||
@@ -68,13 +69,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
68 | engine->timer.takedown = nv04_timer_takedown; | 69 | engine->timer.takedown = nv04_timer_takedown; |
69 | engine->fb.init = nv04_fb_init; | 70 | engine->fb.init = nv04_fb_init; |
70 | engine->fb.takedown = nv04_fb_takedown; | 71 | engine->fb.takedown = nv04_fb_takedown; |
71 | engine->fifo.channels = 16; | ||
72 | engine->fifo.init = nv04_fifo_init; | ||
73 | engine->fifo.takedown = nv04_fifo_fini; | ||
74 | engine->fifo.create_context = nv04_fifo_create_context; | ||
75 | engine->fifo.destroy_context = nv04_fifo_destroy_context; | ||
76 | engine->fifo.load_context = nv04_fifo_load_context; | ||
77 | engine->fifo.unload_context = nv04_fifo_unload_context; | ||
78 | engine->display.early_init = nv04_display_early_init; | 72 | engine->display.early_init = nv04_display_early_init; |
79 | engine->display.late_takedown = nv04_display_late_takedown; | 73 | engine->display.late_takedown = nv04_display_late_takedown; |
80 | engine->display.create = nv04_display_create; | 74 | engine->display.create = nv04_display_create; |
@@ -108,13 +102,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
108 | engine->fb.init_tile_region = nv10_fb_init_tile_region; | 102 | engine->fb.init_tile_region = nv10_fb_init_tile_region; |
109 | engine->fb.set_tile_region = nv10_fb_set_tile_region; | 103 | engine->fb.set_tile_region = nv10_fb_set_tile_region; |
110 | engine->fb.free_tile_region = nv10_fb_free_tile_region; | 104 | engine->fb.free_tile_region = nv10_fb_free_tile_region; |
111 | engine->fifo.channels = 32; | ||
112 | engine->fifo.init = nv10_fifo_init; | ||
113 | engine->fifo.takedown = nv04_fifo_fini; | ||
114 | engine->fifo.create_context = nv10_fifo_create_context; | ||
115 | engine->fifo.destroy_context = nv04_fifo_destroy_context; | ||
116 | engine->fifo.load_context = nv10_fifo_load_context; | ||
117 | engine->fifo.unload_context = nv10_fifo_unload_context; | ||
118 | engine->display.early_init = nv04_display_early_init; | 105 | engine->display.early_init = nv04_display_early_init; |
119 | engine->display.late_takedown = nv04_display_late_takedown; | 106 | engine->display.late_takedown = nv04_display_late_takedown; |
120 | engine->display.create = nv04_display_create; | 107 | engine->display.create = nv04_display_create; |
@@ -154,13 +141,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
154 | engine->fb.init_tile_region = nv20_fb_init_tile_region; | 141 | engine->fb.init_tile_region = nv20_fb_init_tile_region; |
155 | engine->fb.set_tile_region = nv20_fb_set_tile_region; | 142 | engine->fb.set_tile_region = nv20_fb_set_tile_region; |
156 | engine->fb.free_tile_region = nv20_fb_free_tile_region; | 143 | engine->fb.free_tile_region = nv20_fb_free_tile_region; |
157 | engine->fifo.channels = 32; | ||
158 | engine->fifo.init = nv10_fifo_init; | ||
159 | engine->fifo.takedown = nv04_fifo_fini; | ||
160 | engine->fifo.create_context = nv10_fifo_create_context; | ||
161 | engine->fifo.destroy_context = nv04_fifo_destroy_context; | ||
162 | engine->fifo.load_context = nv10_fifo_load_context; | ||
163 | engine->fifo.unload_context = nv10_fifo_unload_context; | ||
164 | engine->display.early_init = nv04_display_early_init; | 144 | engine->display.early_init = nv04_display_early_init; |
165 | engine->display.late_takedown = nv04_display_late_takedown; | 145 | engine->display.late_takedown = nv04_display_late_takedown; |
166 | engine->display.create = nv04_display_create; | 146 | engine->display.create = nv04_display_create; |
@@ -196,13 +176,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
196 | engine->fb.init_tile_region = nv30_fb_init_tile_region; | 176 | engine->fb.init_tile_region = nv30_fb_init_tile_region; |
197 | engine->fb.set_tile_region = nv10_fb_set_tile_region; | 177 | engine->fb.set_tile_region = nv10_fb_set_tile_region; |
198 | engine->fb.free_tile_region = nv30_fb_free_tile_region; | 178 | engine->fb.free_tile_region = nv30_fb_free_tile_region; |
199 | engine->fifo.channels = 32; | ||
200 | engine->fifo.init = nv10_fifo_init; | ||
201 | engine->fifo.takedown = nv04_fifo_fini; | ||
202 | engine->fifo.create_context = nv10_fifo_create_context; | ||
203 | engine->fifo.destroy_context = nv04_fifo_destroy_context; | ||
204 | engine->fifo.load_context = nv10_fifo_load_context; | ||
205 | engine->fifo.unload_context = nv10_fifo_unload_context; | ||
206 | engine->display.early_init = nv04_display_early_init; | 179 | engine->display.early_init = nv04_display_early_init; |
207 | engine->display.late_takedown = nv04_display_late_takedown; | 180 | engine->display.late_takedown = nv04_display_late_takedown; |
208 | engine->display.create = nv04_display_create; | 181 | engine->display.create = nv04_display_create; |
@@ -241,13 +214,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
241 | engine->fb.init_tile_region = nv30_fb_init_tile_region; | 214 | engine->fb.init_tile_region = nv30_fb_init_tile_region; |
242 | engine->fb.set_tile_region = nv40_fb_set_tile_region; | 215 | engine->fb.set_tile_region = nv40_fb_set_tile_region; |
243 | engine->fb.free_tile_region = nv30_fb_free_tile_region; | 216 | engine->fb.free_tile_region = nv30_fb_free_tile_region; |
244 | engine->fifo.channels = 32; | ||
245 | engine->fifo.init = nv40_fifo_init; | ||
246 | engine->fifo.takedown = nv04_fifo_fini; | ||
247 | engine->fifo.create_context = nv40_fifo_create_context; | ||
248 | engine->fifo.destroy_context = nv04_fifo_destroy_context; | ||
249 | engine->fifo.load_context = nv40_fifo_load_context; | ||
250 | engine->fifo.unload_context = nv40_fifo_unload_context; | ||
251 | engine->display.early_init = nv04_display_early_init; | 217 | engine->display.early_init = nv04_display_early_init; |
252 | engine->display.late_takedown = nv04_display_late_takedown; | 218 | engine->display.late_takedown = nv04_display_late_takedown; |
253 | engine->display.create = nv04_display_create; | 219 | engine->display.create = nv04_display_create; |
@@ -294,14 +260,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
294 | engine->timer.takedown = nv04_timer_takedown; | 260 | engine->timer.takedown = nv04_timer_takedown; |
295 | engine->fb.init = nv50_fb_init; | 261 | engine->fb.init = nv50_fb_init; |
296 | engine->fb.takedown = nv50_fb_takedown; | 262 | engine->fb.takedown = nv50_fb_takedown; |
297 | engine->fifo.channels = 128; | ||
298 | engine->fifo.init = nv50_fifo_init; | ||
299 | engine->fifo.takedown = nv50_fifo_takedown; | ||
300 | engine->fifo.create_context = nv50_fifo_create_context; | ||
301 | engine->fifo.destroy_context = nv50_fifo_destroy_context; | ||
302 | engine->fifo.load_context = nv50_fifo_load_context; | ||
303 | engine->fifo.unload_context = nv50_fifo_unload_context; | ||
304 | engine->fifo.tlb_flush = nv50_fifo_tlb_flush; | ||
305 | engine->display.early_init = nv50_display_early_init; | 263 | engine->display.early_init = nv50_display_early_init; |
306 | engine->display.late_takedown = nv50_display_late_takedown; | 264 | engine->display.late_takedown = nv50_display_late_takedown; |
307 | engine->display.create = nv50_display_create; | 265 | engine->display.create = nv50_display_create; |
@@ -365,13 +323,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
365 | engine->timer.takedown = nv04_timer_takedown; | 323 | engine->timer.takedown = nv04_timer_takedown; |
366 | engine->fb.init = nvc0_fb_init; | 324 | engine->fb.init = nvc0_fb_init; |
367 | engine->fb.takedown = nvc0_fb_takedown; | 325 | engine->fb.takedown = nvc0_fb_takedown; |
368 | engine->fifo.channels = 128; | ||
369 | engine->fifo.init = nvc0_fifo_init; | ||
370 | engine->fifo.takedown = nvc0_fifo_takedown; | ||
371 | engine->fifo.create_context = nvc0_fifo_create_context; | ||
372 | engine->fifo.destroy_context = nvc0_fifo_destroy_context; | ||
373 | engine->fifo.load_context = nvc0_fifo_load_context; | ||
374 | engine->fifo.unload_context = nvc0_fifo_unload_context; | ||
375 | engine->display.early_init = nv50_display_early_init; | 326 | engine->display.early_init = nv50_display_early_init; |
376 | engine->display.late_takedown = nv50_display_late_takedown; | 327 | engine->display.late_takedown = nv50_display_late_takedown; |
377 | engine->display.create = nv50_display_create; | 328 | engine->display.create = nv50_display_create; |
@@ -414,13 +365,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
414 | engine->timer.takedown = nv04_timer_takedown; | 365 | engine->timer.takedown = nv04_timer_takedown; |
415 | engine->fb.init = nvc0_fb_init; | 366 | engine->fb.init = nvc0_fb_init; |
416 | engine->fb.takedown = nvc0_fb_takedown; | 367 | engine->fb.takedown = nvc0_fb_takedown; |
417 | engine->fifo.channels = 128; | ||
418 | engine->fifo.init = nvc0_fifo_init; | ||
419 | engine->fifo.takedown = nvc0_fifo_takedown; | ||
420 | engine->fifo.create_context = nvc0_fifo_create_context; | ||
421 | engine->fifo.destroy_context = nvc0_fifo_destroy_context; | ||
422 | engine->fifo.load_context = nvc0_fifo_load_context; | ||
423 | engine->fifo.unload_context = nvc0_fifo_unload_context; | ||
424 | engine->display.early_init = nouveau_stub_init; | 368 | engine->display.early_init = nouveau_stub_init; |
425 | engine->display.late_takedown = nouveau_stub_takedown; | 369 | engine->display.late_takedown = nouveau_stub_takedown; |
426 | engine->display.create = nvd0_display_create; | 370 | engine->display.create = nvd0_display_create; |
@@ -461,13 +405,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
461 | engine->timer.takedown = nv04_timer_takedown; | 405 | engine->timer.takedown = nv04_timer_takedown; |
462 | engine->fb.init = nvc0_fb_init; | 406 | engine->fb.init = nvc0_fb_init; |
463 | engine->fb.takedown = nvc0_fb_takedown; | 407 | engine->fb.takedown = nvc0_fb_takedown; |
464 | engine->fifo.channels = 4096; | ||
465 | engine->fifo.init = nve0_fifo_init; | ||
466 | engine->fifo.takedown = nve0_fifo_takedown; | ||
467 | engine->fifo.create_context = nve0_fifo_create_context; | ||
468 | engine->fifo.destroy_context = nve0_fifo_destroy_context; | ||
469 | engine->fifo.load_context = nvc0_fifo_load_context; | ||
470 | engine->fifo.unload_context = nve0_fifo_unload_context; | ||
471 | engine->display.early_init = nouveau_stub_init; | 408 | engine->display.early_init = nouveau_stub_init; |
472 | engine->display.late_takedown = nouveau_stub_takedown; | 409 | engine->display.late_takedown = nouveau_stub_takedown; |
473 | engine->display.create = nvd0_display_create; | 410 | engine->display.create = nvd0_display_create; |
@@ -728,6 +665,38 @@ nouveau_card_init(struct drm_device *dev) | |||
728 | if (!dev_priv->noaccel) { | 665 | if (!dev_priv->noaccel) { |
729 | switch (dev_priv->card_type) { | 666 | switch (dev_priv->card_type) { |
730 | case NV_04: | 667 | case NV_04: |
668 | nv04_fifo_create(dev); | ||
669 | break; | ||
670 | case NV_10: | ||
671 | case NV_20: | ||
672 | case NV_30: | ||
673 | if (dev_priv->chipset < 0x17) | ||
674 | nv10_fifo_create(dev); | ||
675 | else | ||
676 | nv17_fifo_create(dev); | ||
677 | break; | ||
678 | case NV_40: | ||
679 | nv40_fifo_create(dev); | ||
680 | break; | ||
681 | case NV_50: | ||
682 | if (dev_priv->chipset == 0x50) | ||
683 | nv50_fifo_create(dev); | ||
684 | else | ||
685 | nv84_fifo_create(dev); | ||
686 | break; | ||
687 | case NV_C0: | ||
688 | case NV_D0: | ||
689 | nvc0_fifo_create(dev); | ||
690 | break; | ||
691 | case NV_E0: | ||
692 | nve0_fifo_create(dev); | ||
693 | break; | ||
694 | default: | ||
695 | break; | ||
696 | } | ||
697 | |||
698 | switch (dev_priv->card_type) { | ||
699 | case NV_04: | ||
731 | nv04_fence_create(dev); | 700 | nv04_fence_create(dev); |
732 | break; | 701 | break; |
733 | case NV_10: | 702 | case NV_10: |
@@ -859,16 +828,11 @@ nouveau_card_init(struct drm_device *dev) | |||
859 | goto out_engine; | 828 | goto out_engine; |
860 | } | 829 | } |
861 | } | 830 | } |
862 | |||
863 | /* PFIFO */ | ||
864 | ret = engine->fifo.init(dev); | ||
865 | if (ret) | ||
866 | goto out_engine; | ||
867 | } | 831 | } |
868 | 832 | ||
869 | ret = nouveau_irq_init(dev); | 833 | ret = nouveau_irq_init(dev); |
870 | if (ret) | 834 | if (ret) |
871 | goto out_fifo; | 835 | goto out_engine; |
872 | 836 | ||
873 | ret = nouveau_display_create(dev); | 837 | ret = nouveau_display_create(dev); |
874 | if (ret) | 838 | if (ret) |
@@ -901,9 +865,6 @@ out_pm: | |||
901 | nouveau_display_destroy(dev); | 865 | nouveau_display_destroy(dev); |
902 | out_irq: | 866 | out_irq: |
903 | nouveau_irq_fini(dev); | 867 | nouveau_irq_fini(dev); |
904 | out_fifo: | ||
905 | if (!dev_priv->noaccel) | ||
906 | engine->fifo.takedown(dev); | ||
907 | out_engine: | 868 | out_engine: |
908 | if (!dev_priv->noaccel) { | 869 | if (!dev_priv->noaccel) { |
909 | for (e = e - 1; e >= 0; e--) { | 870 | for (e = e - 1; e >= 0; e--) { |
@@ -956,7 +917,6 @@ static void nouveau_card_takedown(struct drm_device *dev) | |||
956 | nouveau_display_destroy(dev); | 917 | nouveau_display_destroy(dev); |
957 | 918 | ||
958 | if (!dev_priv->noaccel) { | 919 | if (!dev_priv->noaccel) { |
959 | engine->fifo.takedown(dev); | ||
960 | for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) { | 920 | for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) { |
961 | if (dev_priv->eng[e]) { | 921 | if (dev_priv->eng[e]) { |
962 | dev_priv->eng[e]->fini(dev, e, false); | 922 | dev_priv->eng[e]->fini(dev, e, false); |
diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c index 584c24d457f0..a6295cd00ec7 100644 --- a/drivers/gpu/drm/nouveau/nv04_fifo.c +++ b/drivers/gpu/drm/nouveau/nv04_fifo.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2007 Ben Skeggs. | 2 | * Copyright (C) 2012 Ben Skeggs. |
3 | * All Rights Reserved. | 3 | * All Rights Reserved. |
4 | * | 4 | * |
5 | * Permission is hereby granted, free of charge, to any person obtaining | 5 | * Permission is hereby granted, free of charge, to any person obtaining |
@@ -27,21 +27,38 @@ | |||
27 | #include "drmP.h" | 27 | #include "drmP.h" |
28 | #include "drm.h" | 28 | #include "drm.h" |
29 | #include "nouveau_drv.h" | 29 | #include "nouveau_drv.h" |
30 | #include "nouveau_ramht.h" | 30 | #include "nouveau_fifo.h" |
31 | #include "nouveau_util.h" | 31 | #include "nouveau_util.h" |
32 | 32 | #include "nouveau_ramht.h" | |
33 | #define NV04_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV04_RAMFC__SIZE)) | 33 | #include "nouveau_software.h" |
34 | #define NV04_RAMFC__SIZE 32 | 34 | |
35 | #define NV04_RAMFC_DMA_PUT 0x00 | 35 | static struct ramfc_desc { |
36 | #define NV04_RAMFC_DMA_GET 0x04 | 36 | unsigned bits:6; |
37 | #define NV04_RAMFC_DMA_INSTANCE 0x08 | 37 | unsigned ctxs:5; |
38 | #define NV04_RAMFC_DMA_STATE 0x0C | 38 | unsigned ctxp:8; |
39 | #define NV04_RAMFC_DMA_FETCH 0x10 | 39 | unsigned regs:5; |
40 | #define NV04_RAMFC_ENGINE 0x14 | 40 | unsigned regp; |
41 | #define NV04_RAMFC_PULL1_ENGINE 0x18 | 41 | } nv04_ramfc[] = { |
42 | 42 | { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT }, | |
43 | #define RAMFC_WR(offset, val) nv_wo32(chan->ramfc, NV04_RAMFC_##offset, (val)) | 43 | { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET }, |
44 | #define RAMFC_RD(offset) nv_ro32(chan->ramfc, NV04_RAMFC_##offset) | 44 | { 16, 0, 0x08, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE }, |
45 | { 16, 16, 0x08, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT }, | ||
46 | { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_STATE }, | ||
47 | { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_FETCH }, | ||
48 | { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_ENGINE }, | ||
49 | { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_PULL1 }, | ||
50 | {} | ||
51 | }; | ||
52 | |||
53 | struct nv04_fifo_priv { | ||
54 | struct nouveau_fifo_priv base; | ||
55 | struct ramfc_desc *ramfc_desc; | ||
56 | }; | ||
57 | |||
58 | struct nv04_fifo_chan { | ||
59 | struct nouveau_fifo_chan base; | ||
60 | struct nouveau_gpuobj *ramfc; | ||
61 | }; | ||
45 | 62 | ||
46 | bool | 63 | bool |
47 | nv04_fifo_cache_pull(struct drm_device *dev, bool enable) | 64 | nv04_fifo_cache_pull(struct drm_device *dev, bool enable) |
@@ -58,13 +75,13 @@ nv04_fifo_cache_pull(struct drm_device *dev, bool enable) | |||
58 | * invalidate the most recently calculated instance. | 75 | * invalidate the most recently calculated instance. |
59 | */ | 76 | */ |
60 | if (!nv_wait(dev, NV04_PFIFO_CACHE1_PULL0, | 77 | if (!nv_wait(dev, NV04_PFIFO_CACHE1_PULL0, |
61 | NV04_PFIFO_CACHE1_PULL0_HASH_BUSY, 0)) | 78 | NV04_PFIFO_CACHE1_PULL0_HASH_BUSY, 0)) |
62 | NV_ERROR(dev, "Timeout idling the PFIFO puller.\n"); | 79 | NV_ERROR(dev, "Timeout idling the PFIFO puller.\n"); |
63 | 80 | ||
64 | if (nv_rd32(dev, NV04_PFIFO_CACHE1_PULL0) & | 81 | if (nv_rd32(dev, NV04_PFIFO_CACHE1_PULL0) & |
65 | NV04_PFIFO_CACHE1_PULL0_HASH_FAILED) | 82 | NV04_PFIFO_CACHE1_PULL0_HASH_FAILED) |
66 | nv_wr32(dev, NV03_PFIFO_INTR_0, | 83 | nv_wr32(dev, NV03_PFIFO_INTR_0, |
67 | NV_PFIFO_INTR_CACHE_ERROR); | 84 | NV_PFIFO_INTR_CACHE_ERROR); |
68 | 85 | ||
69 | nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0); | 86 | nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0); |
70 | } | 87 | } |
@@ -72,238 +89,182 @@ nv04_fifo_cache_pull(struct drm_device *dev, bool enable) | |||
72 | return pull & 1; | 89 | return pull & 1; |
73 | } | 90 | } |
74 | 91 | ||
75 | #ifdef __BIG_ENDIAN | 92 | static int |
76 | #define DMA_FETCH_ENDIANNESS NV_PFIFO_CACHE1_BIG_ENDIAN | 93 | nv04_fifo_context_new(struct nouveau_channel *chan, int engine) |
77 | #else | ||
78 | #define DMA_FETCH_ENDIANNESS 0 | ||
79 | #endif | ||
80 | |||
81 | int | ||
82 | nv04_fifo_create_context(struct nouveau_channel *chan) | ||
83 | { | 94 | { |
84 | struct drm_device *dev = chan->dev; | 95 | struct drm_device *dev = chan->dev; |
85 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 96 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
97 | struct nv04_fifo_priv *priv = nv_engine(dev, engine); | ||
98 | struct nv04_fifo_chan *fctx; | ||
86 | unsigned long flags; | 99 | unsigned long flags; |
87 | int ret; | 100 | int ret; |
88 | 101 | ||
89 | ret = nouveau_gpuobj_new_fake(dev, NV04_RAMFC(chan->id), ~0, | 102 | fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL); |
90 | NV04_RAMFC__SIZE, | 103 | if (!fctx) |
91 | NVOBJ_FLAG_ZERO_ALLOC | | 104 | return -ENOMEM; |
92 | NVOBJ_FLAG_ZERO_FREE, | ||
93 | &chan->ramfc); | ||
94 | if (ret) | ||
95 | return ret; | ||
96 | 105 | ||
106 | /* map channel control registers */ | ||
97 | chan->user = ioremap(pci_resource_start(dev->pdev, 0) + | 107 | chan->user = ioremap(pci_resource_start(dev->pdev, 0) + |
98 | NV03_USER(chan->id), PAGE_SIZE); | 108 | NV03_USER(chan->id), PAGE_SIZE); |
99 | if (!chan->user) | 109 | if (!chan->user) { |
100 | return -ENOMEM; | 110 | ret = -ENOMEM; |
101 | 111 | goto error; | |
102 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | 112 | } |
103 | |||
104 | /* Setup initial state */ | ||
105 | RAMFC_WR(DMA_PUT, chan->pushbuf_base); | ||
106 | RAMFC_WR(DMA_GET, chan->pushbuf_base); | ||
107 | RAMFC_WR(DMA_INSTANCE, chan->pushbuf->pinst >> 4); | ||
108 | RAMFC_WR(DMA_FETCH, (NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | | ||
109 | NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | | ||
110 | NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | | ||
111 | DMA_FETCH_ENDIANNESS)); | ||
112 | 113 | ||
113 | /* enable the fifo dma operation */ | 114 | /* initialise default fifo context */ |
114 | nv_wr32(dev, NV04_PFIFO_MODE, | 115 | ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramfc->pinst + |
115 | nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id)); | 116 | chan->id * 32, ~0, 32, |
117 | NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc); | ||
118 | if (ret) | ||
119 | goto error; | ||
120 | |||
121 | nv_wo32(fctx->ramfc, 0x00, chan->pushbuf_base); | ||
122 | nv_wo32(fctx->ramfc, 0x04, chan->pushbuf_base); | ||
123 | nv_wo32(fctx->ramfc, 0x08, chan->pushbuf->pinst >> 4); | ||
124 | nv_wo32(fctx->ramfc, 0x0c, 0x00000000); | ||
125 | nv_wo32(fctx->ramfc, 0x10, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | | ||
126 | NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | | ||
127 | #ifdef __BIG_ENDIAN | ||
128 | NV_PFIFO_CACHE1_BIG_ENDIAN | | ||
129 | #endif | ||
130 | NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8); | ||
131 | nv_wo32(fctx->ramfc, 0x14, 0x00000000); | ||
132 | nv_wo32(fctx->ramfc, 0x18, 0x00000000); | ||
133 | nv_wo32(fctx->ramfc, 0x1c, 0x00000000); | ||
116 | 134 | ||
135 | /* enable dma mode on the channel */ | ||
136 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | ||
137 | nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id)); | ||
117 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | 138 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); |
118 | return 0; | 139 | |
140 | error: | ||
141 | if (ret) | ||
142 | priv->base.base.context_del(chan, engine); | ||
143 | return ret; | ||
119 | } | 144 | } |
120 | 145 | ||
121 | void | 146 | void |
122 | nv04_fifo_destroy_context(struct nouveau_channel *chan) | 147 | nv04_fifo_context_del(struct nouveau_channel *chan, int engine) |
123 | { | 148 | { |
124 | struct drm_device *dev = chan->dev; | 149 | struct drm_device *dev = chan->dev; |
125 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 150 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
126 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | 151 | struct nv04_fifo_priv *priv = nv_engine(chan->dev, engine); |
152 | struct nv04_fifo_chan *fctx = chan->engctx[engine]; | ||
153 | struct ramfc_desc *c = priv->ramfc_desc; | ||
127 | unsigned long flags; | 154 | unsigned long flags; |
155 | int chid; | ||
128 | 156 | ||
157 | /* prevent fifo context switches */ | ||
129 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | 158 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); |
130 | nv_wr32(dev, NV03_PFIFO_CACHES, 0); | 159 | nv_wr32(dev, NV03_PFIFO_CACHES, 0); |
131 | 160 | ||
132 | /* Unload the context if it's the currently active one */ | 161 | /* if this channel is active, replace it with a null context */ |
133 | if ((nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & 0xf) == chan->id) { | 162 | chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & priv->base.channels; |
163 | if (chid == chan->id) { | ||
134 | nv_mask(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0); | 164 | nv_mask(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0); |
135 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 0); | 165 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 0); |
136 | nv_mask(dev, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0); | 166 | nv_mask(dev, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0); |
137 | pfifo->unload_context(dev); | 167 | |
168 | do { | ||
169 | u32 mask = ((1ULL << c->bits) - 1) << c->regs; | ||
170 | nv_mask(dev, c->regp, mask, 0x00000000); | ||
171 | } while ((++c)->bits); | ||
172 | |||
173 | nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0); | ||
174 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0); | ||
175 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels); | ||
138 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1); | 176 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1); |
139 | nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1); | 177 | nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1); |
140 | } | 178 | } |
141 | 179 | ||
142 | /* Keep it from being rescheduled */ | 180 | /* restore normal operation, after disabling dma mode */ |
143 | nv_mask(dev, NV04_PFIFO_MODE, 1 << chan->id, 0); | 181 | nv_mask(dev, NV04_PFIFO_MODE, 1 << chan->id, 0); |
144 | nv_wr32(dev, NV03_PFIFO_CACHES, 1); | 182 | nv_wr32(dev, NV03_PFIFO_CACHES, 1); |
145 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | 183 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); |
146 | 184 | ||
147 | /* Free the channel resources */ | 185 | /* clean up */ |
186 | nouveau_gpuobj_ref(NULL, &fctx->ramfc); | ||
187 | nouveau_gpuobj_ref(NULL, &chan->ramfc); /*XXX: nv40 */ | ||
148 | if (chan->user) { | 188 | if (chan->user) { |
149 | iounmap(chan->user); | 189 | iounmap(chan->user); |
150 | chan->user = NULL; | 190 | chan->user = NULL; |
151 | } | 191 | } |
152 | nouveau_gpuobj_ref(NULL, &chan->ramfc); | ||
153 | } | ||
154 | |||
155 | static void | ||
156 | nv04_fifo_do_load_context(struct drm_device *dev, int chid) | ||
157 | { | ||
158 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
159 | uint32_t fc = NV04_RAMFC(chid), tmp; | ||
160 | |||
161 | nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0)); | ||
162 | nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4)); | ||
163 | tmp = nv_ri32(dev, fc + 8); | ||
164 | nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE, tmp & 0xFFFF); | ||
165 | nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT, tmp >> 16); | ||
166 | nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, nv_ri32(dev, fc + 12)); | ||
167 | nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_FETCH, nv_ri32(dev, fc + 16)); | ||
168 | nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 20)); | ||
169 | nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 24)); | ||
170 | |||
171 | nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0); | ||
172 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0); | ||
173 | } | 192 | } |
174 | 193 | ||
175 | int | 194 | int |
176 | nv04_fifo_load_context(struct nouveau_channel *chan) | 195 | nv04_fifo_init(struct drm_device *dev, int engine) |
177 | { | ||
178 | uint32_t tmp; | ||
179 | |||
180 | nv_wr32(chan->dev, NV03_PFIFO_CACHE1_PUSH1, | ||
181 | NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id); | ||
182 | nv04_fifo_do_load_context(chan->dev, chan->id); | ||
183 | nv_wr32(chan->dev, NV04_PFIFO_CACHE1_DMA_PUSH, 1); | ||
184 | |||
185 | /* Reset NV04_PFIFO_CACHE1_DMA_CTL_AT_INFO to INVALID */ | ||
186 | tmp = nv_rd32(chan->dev, NV04_PFIFO_CACHE1_DMA_CTL) & ~(1 << 31); | ||
187 | nv_wr32(chan->dev, NV04_PFIFO_CACHE1_DMA_CTL, tmp); | ||
188 | |||
189 | return 0; | ||
190 | } | ||
191 | |||
192 | int | ||
193 | nv04_fifo_unload_context(struct drm_device *dev) | ||
194 | { | 196 | { |
195 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 197 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
196 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | 198 | struct nv04_fifo_priv *priv = nv_engine(dev, engine); |
197 | struct nouveau_channel *chan = NULL; | 199 | int i; |
198 | uint32_t tmp; | ||
199 | int chid; | ||
200 | 200 | ||
201 | chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & 0xf; | 201 | nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, 0); |
202 | if (chid < 0 || chid >= dev_priv->engine.fifo.channels) | 202 | nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, NV_PMC_ENABLE_PFIFO); |
203 | return 0; | ||
204 | 203 | ||
205 | chan = dev_priv->channels.ptr[chid]; | 204 | nv_wr32(dev, NV04_PFIFO_DELAY_0, 0x000000ff); |
206 | if (!chan) { | 205 | nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff); |
207 | NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid); | ||
208 | return -EINVAL; | ||
209 | } | ||
210 | |||
211 | RAMFC_WR(DMA_PUT, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT)); | ||
212 | RAMFC_WR(DMA_GET, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET)); | ||
213 | tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16; | ||
214 | tmp |= nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE); | ||
215 | RAMFC_WR(DMA_INSTANCE, tmp); | ||
216 | RAMFC_WR(DMA_STATE, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_STATE)); | ||
217 | RAMFC_WR(DMA_FETCH, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH)); | ||
218 | RAMFC_WR(ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE)); | ||
219 | RAMFC_WR(PULL1_ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1)); | ||
220 | |||
221 | nv04_fifo_do_load_context(dev, pfifo->channels - 1); | ||
222 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1); | ||
223 | return 0; | ||
224 | } | ||
225 | |||
226 | static void | ||
227 | nv04_fifo_init_reset(struct drm_device *dev) | ||
228 | { | ||
229 | nv_wr32(dev, NV03_PMC_ENABLE, | ||
230 | nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PFIFO); | ||
231 | nv_wr32(dev, NV03_PMC_ENABLE, | ||
232 | nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PFIFO); | ||
233 | |||
234 | nv_wr32(dev, 0x003224, 0x000f0078); | ||
235 | nv_wr32(dev, 0x002044, 0x0101ffff); | ||
236 | nv_wr32(dev, 0x002040, 0x000000ff); | ||
237 | nv_wr32(dev, 0x002500, 0x00000000); | ||
238 | nv_wr32(dev, 0x003000, 0x00000000); | ||
239 | nv_wr32(dev, 0x003050, 0x00000000); | ||
240 | nv_wr32(dev, 0x003200, 0x00000000); | ||
241 | nv_wr32(dev, 0x003250, 0x00000000); | ||
242 | nv_wr32(dev, 0x003220, 0x00000000); | ||
243 | |||
244 | nv_wr32(dev, 0x003250, 0x00000000); | ||
245 | nv_wr32(dev, 0x003270, 0x00000000); | ||
246 | nv_wr32(dev, 0x003210, 0x00000000); | ||
247 | } | ||
248 | |||
249 | static void | ||
250 | nv04_fifo_init_ramxx(struct drm_device *dev) | ||
251 | { | ||
252 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
253 | 206 | ||
254 | nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | | 207 | nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | |
255 | ((dev_priv->ramht->bits - 9) << 16) | | 208 | ((dev_priv->ramht->bits - 9) << 16) | |
256 | (dev_priv->ramht->gpuobj->pinst >> 8)); | 209 | (dev_priv->ramht->gpuobj->pinst >> 8)); |
257 | nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8); | 210 | nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8); |
258 | nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc->pinst >> 8); | 211 | nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc->pinst >> 8); |
259 | } | ||
260 | 212 | ||
261 | static void | 213 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels); |
262 | nv04_fifo_init_intr(struct drm_device *dev) | ||
263 | { | ||
264 | nouveau_irq_register(dev, 8, nv04_fifo_isr); | ||
265 | nv_wr32(dev, 0x002100, 0xffffffff); | ||
266 | nv_wr32(dev, 0x002140, 0xffffffff); | ||
267 | } | ||
268 | 214 | ||
269 | int | 215 | nv_wr32(dev, NV03_PFIFO_INTR_0, 0xffffffff); |
270 | nv04_fifo_init(struct drm_device *dev) | 216 | nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xffffffff); |
271 | { | ||
272 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
273 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | ||
274 | int i; | ||
275 | 217 | ||
276 | nv04_fifo_init_reset(dev); | ||
277 | nv04_fifo_init_ramxx(dev); | ||
278 | |||
279 | nv04_fifo_do_load_context(dev, pfifo->channels - 1); | ||
280 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1); | ||
281 | |||
282 | nv04_fifo_init_intr(dev); | ||
283 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1); | 218 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1); |
284 | nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1); | 219 | nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1); |
285 | nv_wr32(dev, NV03_PFIFO_CACHES, 1); | 220 | nv_wr32(dev, NV03_PFIFO_CACHES, 1); |
286 | 221 | ||
287 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { | 222 | for (i = 0; i < priv->base.channels; i++) { |
288 | if (dev_priv->channels.ptr[i]) { | 223 | if (dev_priv->channels.ptr[i]) |
289 | uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE); | 224 | nv_mask(dev, NV04_PFIFO_MODE, (1 << i), (1 << i)); |
290 | nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i)); | ||
291 | } | ||
292 | } | 225 | } |
293 | 226 | ||
294 | return 0; | 227 | return 0; |
295 | } | 228 | } |
296 | 229 | ||
297 | void | 230 | int |
298 | nv04_fifo_fini(struct drm_device *dev) | 231 | nv04_fifo_fini(struct drm_device *dev, int engine, bool suspend) |
299 | { | 232 | { |
300 | nv_wr32(dev, 0x2140, 0x00000000); | 233 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
301 | nouveau_irq_unregister(dev, 8); | 234 | struct nv04_fifo_priv *priv = nv_engine(dev, engine); |
235 | struct nouveau_channel *chan; | ||
236 | int chid; | ||
237 | |||
238 | /* prevent context switches and halt fifo operation */ | ||
239 | nv_wr32(dev, NV03_PFIFO_CACHES, 0); | ||
240 | nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0); | ||
241 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 0); | ||
242 | nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 0); | ||
243 | |||
244 | /* store current fifo context in ramfc */ | ||
245 | chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & priv->base.channels; | ||
246 | chan = dev_priv->channels.ptr[chid]; | ||
247 | if (suspend && chid != priv->base.channels && chan) { | ||
248 | struct nv04_fifo_chan *fctx = chan->engctx[engine]; | ||
249 | struct nouveau_gpuobj *ctx = fctx->ramfc; | ||
250 | struct ramfc_desc *c = priv->ramfc_desc; | ||
251 | do { | ||
252 | u32 rm = ((1ULL << c->bits) - 1) << c->regs; | ||
253 | u32 cm = ((1ULL << c->bits) - 1) << c->ctxs; | ||
254 | u32 rv = (nv_rd32(dev, c->regp) & rm) >> c->regs; | ||
255 | u32 cv = (nv_ro32(ctx, c->ctxp) & ~cm); | ||
256 | nv_wo32(ctx, c->ctxp, cv | (rv << c->ctxs)); | ||
257 | } while ((++c)->bits); | ||
258 | } | ||
259 | |||
260 | nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0x00000000); | ||
261 | return 0; | ||
302 | } | 262 | } |
303 | 263 | ||
304 | static bool | 264 | static bool |
305 | nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data) | 265 | nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data) |
306 | { | 266 | { |
267 | struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO); | ||
307 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 268 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
308 | struct nouveau_channel *chan = NULL; | 269 | struct nouveau_channel *chan = NULL; |
309 | struct nouveau_gpuobj *obj; | 270 | struct nouveau_gpuobj *obj; |
@@ -314,7 +275,7 @@ nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data) | |||
314 | u32 engine; | 275 | u32 engine; |
315 | 276 | ||
316 | spin_lock_irqsave(&dev_priv->channels.lock, flags); | 277 | spin_lock_irqsave(&dev_priv->channels.lock, flags); |
317 | if (likely(chid >= 0 && chid < dev_priv->engine.fifo.channels)) | 278 | if (likely(chid >= 0 && chid < pfifo->channels)) |
318 | chan = dev_priv->channels.ptr[chid]; | 279 | chan = dev_priv->channels.ptr[chid]; |
319 | if (unlikely(!chan)) | 280 | if (unlikely(!chan)) |
320 | goto out; | 281 | goto out; |
@@ -325,7 +286,6 @@ nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data) | |||
325 | if (unlikely(!obj || obj->engine != NVOBJ_ENGINE_SW)) | 286 | if (unlikely(!obj || obj->engine != NVOBJ_ENGINE_SW)) |
326 | break; | 287 | break; |
327 | 288 | ||
328 | chan->sw_subchannel[subc] = obj->class; | ||
329 | engine = 0x0000000f << (subc * 4); | 289 | engine = 0x0000000f << (subc * 4); |
330 | 290 | ||
331 | nv_mask(dev, NV04_PFIFO_CACHE1_ENGINE, engine, 0x00000000); | 291 | nv_mask(dev, NV04_PFIFO_CACHE1_ENGINE, engine, 0x00000000); |
@@ -336,7 +296,7 @@ nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data) | |||
336 | if (unlikely(((engine >> (subc * 4)) & 0xf) != 0)) | 296 | if (unlikely(((engine >> (subc * 4)) & 0xf) != 0)) |
337 | break; | 297 | break; |
338 | 298 | ||
339 | if (!nouveau_gpuobj_mthd_call(chan, chan->sw_subchannel[subc], | 299 | if (!nouveau_gpuobj_mthd_call(chan, nouveau_software_class(dev), |
340 | mthd, data)) | 300 | mthd, data)) |
341 | handled = true; | 301 | handled = true; |
342 | break; | 302 | break; |
@@ -359,6 +319,7 @@ static const char *nv_dma_state_err(u32 state) | |||
359 | void | 319 | void |
360 | nv04_fifo_isr(struct drm_device *dev) | 320 | nv04_fifo_isr(struct drm_device *dev) |
361 | { | 321 | { |
322 | struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO); | ||
362 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 323 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
363 | uint32_t status, reassign; | 324 | uint32_t status, reassign; |
364 | int cnt = 0; | 325 | int cnt = 0; |
@@ -369,8 +330,7 @@ nv04_fifo_isr(struct drm_device *dev) | |||
369 | 330 | ||
370 | nv_wr32(dev, NV03_PFIFO_CACHES, 0); | 331 | nv_wr32(dev, NV03_PFIFO_CACHES, 0); |
371 | 332 | ||
372 | chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1); | 333 | chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & pfifo->channels; |
373 | chid &= dev_priv->engine.fifo.channels - 1; | ||
374 | get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET); | 334 | get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET); |
375 | 335 | ||
376 | if (status & NV_PFIFO_INTR_CACHE_ERROR) { | 336 | if (status & NV_PFIFO_INTR_CACHE_ERROR) { |
@@ -509,3 +469,38 @@ nv04_fifo_isr(struct drm_device *dev) | |||
509 | 469 | ||
510 | nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING); | 470 | nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING); |
511 | } | 471 | } |
472 | |||
473 | void | ||
474 | nv04_fifo_destroy(struct drm_device *dev, int engine) | ||
475 | { | ||
476 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
477 | struct nv04_fifo_priv *priv = nv_engine(dev, engine); | ||
478 | |||
479 | nouveau_irq_unregister(dev, 8); | ||
480 | |||
481 | dev_priv->eng[engine] = NULL; | ||
482 | kfree(priv); | ||
483 | } | ||
484 | |||
485 | int | ||
486 | nv04_fifo_create(struct drm_device *dev) | ||
487 | { | ||
488 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
489 | struct nv04_fifo_priv *priv; | ||
490 | |||
491 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | ||
492 | if (!priv) | ||
493 | return -ENOMEM; | ||
494 | |||
495 | priv->base.base.destroy = nv04_fifo_destroy; | ||
496 | priv->base.base.init = nv04_fifo_init; | ||
497 | priv->base.base.fini = nv04_fifo_fini; | ||
498 | priv->base.base.context_new = nv04_fifo_context_new; | ||
499 | priv->base.base.context_del = nv04_fifo_context_del; | ||
500 | priv->base.channels = 15; | ||
501 | priv->ramfc_desc = nv04_ramfc; | ||
502 | dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base; | ||
503 | |||
504 | nouveau_irq_register(dev, 8, nv04_fifo_isr); | ||
505 | return 0; | ||
506 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c index 5b5f3ba85f5c..72f1a62903b3 100644 --- a/drivers/gpu/drm/nouveau/nv04_graph.c +++ b/drivers/gpu/drm/nouveau/nv04_graph.c | |||
@@ -356,12 +356,12 @@ static struct nouveau_channel * | |||
356 | nv04_graph_channel(struct drm_device *dev) | 356 | nv04_graph_channel(struct drm_device *dev) |
357 | { | 357 | { |
358 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 358 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
359 | int chid = dev_priv->engine.fifo.channels; | 359 | int chid = 15; |
360 | 360 | ||
361 | if (nv_rd32(dev, NV04_PGRAPH_CTX_CONTROL) & 0x00010000) | 361 | if (nv_rd32(dev, NV04_PGRAPH_CTX_CONTROL) & 0x00010000) |
362 | chid = nv_rd32(dev, NV04_PGRAPH_CTX_USER) >> 24; | 362 | chid = nv_rd32(dev, NV04_PGRAPH_CTX_USER) >> 24; |
363 | 363 | ||
364 | if (chid >= dev_priv->engine.fifo.channels) | 364 | if (chid > 15) |
365 | return NULL; | 365 | return NULL; |
366 | 366 | ||
367 | return dev_priv->channels.ptr[chid]; | 367 | return dev_priv->channels.ptr[chid]; |
@@ -404,7 +404,6 @@ nv04_graph_load_context(struct nouveau_channel *chan) | |||
404 | static int | 404 | static int |
405 | nv04_graph_unload_context(struct drm_device *dev) | 405 | nv04_graph_unload_context(struct drm_device *dev) |
406 | { | 406 | { |
407 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
408 | struct nouveau_channel *chan = NULL; | 407 | struct nouveau_channel *chan = NULL; |
409 | struct graph_state *ctx; | 408 | struct graph_state *ctx; |
410 | uint32_t tmp; | 409 | uint32_t tmp; |
@@ -420,7 +419,7 @@ nv04_graph_unload_context(struct drm_device *dev) | |||
420 | 419 | ||
421 | nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10000000); | 420 | nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10000000); |
422 | tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff; | 421 | tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff; |
423 | tmp |= (dev_priv->engine.fifo.channels - 1) << 24; | 422 | tmp |= 15 << 24; |
424 | nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp); | 423 | nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp); |
425 | return 0; | 424 | return 0; |
426 | } | 425 | } |
@@ -495,7 +494,6 @@ nv04_graph_object_new(struct nouveau_channel *chan, int engine, | |||
495 | static int | 494 | static int |
496 | nv04_graph_init(struct drm_device *dev, int engine) | 495 | nv04_graph_init(struct drm_device *dev, int engine) |
497 | { | 496 | { |
498 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
499 | uint32_t tmp; | 497 | uint32_t tmp; |
500 | 498 | ||
501 | nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & | 499 | nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & |
@@ -527,7 +525,7 @@ nv04_graph_init(struct drm_device *dev, int engine) | |||
527 | nv_wr32(dev, NV04_PGRAPH_STATE , 0xFFFFFFFF); | 525 | nv_wr32(dev, NV04_PGRAPH_STATE , 0xFFFFFFFF); |
528 | nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL , 0x10000100); | 526 | nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL , 0x10000100); |
529 | tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff; | 527 | tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff; |
530 | tmp |= (dev_priv->engine.fifo.channels - 1) << 24; | 528 | tmp |= 15 << 24; |
531 | nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp); | 529 | nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp); |
532 | 530 | ||
533 | /* These don't belong here, they're part of a per-channel context */ | 531 | /* These don't belong here, they're part of a per-channel context */ |
diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c index 1acc626f74b0..ef7a934a499a 100644 --- a/drivers/gpu/drm/nouveau/nv04_instmem.c +++ b/drivers/gpu/drm/nouveau/nv04_instmem.c | |||
@@ -1,6 +1,8 @@ | |||
1 | #include "drmP.h" | 1 | #include "drmP.h" |
2 | #include "drm.h" | 2 | #include "drm.h" |
3 | |||
3 | #include "nouveau_drv.h" | 4 | #include "nouveau_drv.h" |
5 | #include "nouveau_fifo.h" | ||
4 | #include "nouveau_ramht.h" | 6 | #include "nouveau_ramht.h" |
5 | 7 | ||
6 | /* returns the size of fifo context */ | 8 | /* returns the size of fifo context */ |
@@ -10,12 +12,15 @@ nouveau_fifo_ctx_size(struct drm_device *dev) | |||
10 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 12 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
11 | 13 | ||
12 | if (dev_priv->chipset >= 0x40) | 14 | if (dev_priv->chipset >= 0x40) |
13 | return 128; | 15 | return 128 * 32; |
14 | else | 16 | else |
15 | if (dev_priv->chipset >= 0x17) | 17 | if (dev_priv->chipset >= 0x17) |
16 | return 64; | 18 | return 64 * 32; |
19 | else | ||
20 | if (dev_priv->chipset >= 0x10) | ||
21 | return 32 * 32; | ||
17 | 22 | ||
18 | return 32; | 23 | return 32 * 16; |
19 | } | 24 | } |
20 | 25 | ||
21 | int nv04_instmem_init(struct drm_device *dev) | 26 | int nv04_instmem_init(struct drm_device *dev) |
@@ -39,7 +44,7 @@ int nv04_instmem_init(struct drm_device *dev) | |||
39 | else if (nv44_graph_class(dev)) rsvd = 0x4980 * vs; | 44 | else if (nv44_graph_class(dev)) rsvd = 0x4980 * vs; |
40 | else rsvd = 0x4a40 * vs; | 45 | else rsvd = 0x4a40 * vs; |
41 | rsvd += 16 * 1024; | 46 | rsvd += 16 * 1024; |
42 | rsvd *= dev_priv->engine.fifo.channels; | 47 | rsvd *= 32; /* per-channel */ |
43 | 48 | ||
44 | rsvd += 512 * 1024; /* pci(e)gart table */ | 49 | rsvd += 512 * 1024; /* pci(e)gart table */ |
45 | rsvd += 512 * 1024; /* object storage */ | 50 | rsvd += 512 * 1024; /* object storage */ |
@@ -67,7 +72,7 @@ int nv04_instmem_init(struct drm_device *dev) | |||
67 | return ret; | 72 | return ret; |
68 | 73 | ||
69 | /* And RAMFC */ | 74 | /* And RAMFC */ |
70 | length = dev_priv->engine.fifo.channels * nouveau_fifo_ctx_size(dev); | 75 | length = nouveau_fifo_ctx_size(dev); |
71 | switch (dev_priv->card_type) { | 76 | switch (dev_priv->card_type) { |
72 | case NV_40: | 77 | case NV_40: |
73 | offset = 0x20000; | 78 | offset = 0x20000; |
diff --git a/drivers/gpu/drm/nouveau/nv10_fifo.c b/drivers/gpu/drm/nouveau/nv10_fifo.c index 476451c6f961..f1fe7d758241 100644 --- a/drivers/gpu/drm/nouveau/nv10_fifo.c +++ b/drivers/gpu/drm/nouveau/nv10_fifo.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2007 Ben Skeggs. | 2 | * Copyright (C) 2012 Ben Skeggs. |
3 | * All Rights Reserved. | 3 | * All Rights Reserved. |
4 | * | 4 | * |
5 | * Permission is hereby granted, free of charge, to any person obtaining | 5 | * Permission is hereby granted, free of charge, to any person obtaining |
@@ -27,214 +27,112 @@ | |||
27 | #include "drmP.h" | 27 | #include "drmP.h" |
28 | #include "drm.h" | 28 | #include "drm.h" |
29 | #include "nouveau_drv.h" | 29 | #include "nouveau_drv.h" |
30 | #include "nouveau_fifo.h" | ||
31 | #include "nouveau_util.h" | ||
30 | #include "nouveau_ramht.h" | 32 | #include "nouveau_ramht.h" |
31 | 33 | ||
32 | #define NV10_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV10_RAMFC__SIZE)) | 34 | static struct ramfc_desc { |
33 | #define NV10_RAMFC__SIZE ((dev_priv->chipset) >= 0x17 ? 64 : 32) | 35 | unsigned bits:6; |
34 | 36 | unsigned ctxs:5; | |
35 | int | 37 | unsigned ctxp:8; |
36 | nv10_fifo_create_context(struct nouveau_channel *chan) | 38 | unsigned regs:5; |
39 | unsigned regp; | ||
40 | } nv10_ramfc[] = { | ||
41 | { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT }, | ||
42 | { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET }, | ||
43 | { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT }, | ||
44 | { 16, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE }, | ||
45 | { 16, 16, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT }, | ||
46 | { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_STATE }, | ||
47 | { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_FETCH }, | ||
48 | { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_ENGINE }, | ||
49 | { 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_PULL1 }, | ||
50 | {} | ||
51 | }; | ||
52 | |||
53 | struct nv10_fifo_priv { | ||
54 | struct nouveau_fifo_priv base; | ||
55 | struct ramfc_desc *ramfc_desc; | ||
56 | }; | ||
57 | |||
58 | struct nv10_fifo_chan { | ||
59 | struct nouveau_fifo_chan base; | ||
60 | struct nouveau_gpuobj *ramfc; | ||
61 | }; | ||
62 | |||
63 | static int | ||
64 | nv10_fifo_context_new(struct nouveau_channel *chan, int engine) | ||
37 | { | 65 | { |
38 | struct drm_nouveau_private *dev_priv = chan->dev->dev_private; | ||
39 | struct drm_device *dev = chan->dev; | 66 | struct drm_device *dev = chan->dev; |
40 | uint32_t fc = NV10_RAMFC(chan->id); | 67 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
68 | struct nv10_fifo_priv *priv = nv_engine(dev, engine); | ||
69 | struct nv10_fifo_chan *fctx; | ||
70 | unsigned long flags; | ||
41 | int ret; | 71 | int ret; |
42 | 72 | ||
43 | ret = nouveau_gpuobj_new_fake(dev, NV10_RAMFC(chan->id), ~0, | 73 | fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL); |
44 | NV10_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC | | 74 | if (!fctx) |
45 | NVOBJ_FLAG_ZERO_FREE, &chan->ramfc); | 75 | return -ENOMEM; |
46 | if (ret) | ||
47 | return ret; | ||
48 | 76 | ||
77 | /* map channel control registers */ | ||
49 | chan->user = ioremap(pci_resource_start(dev->pdev, 0) + | 78 | chan->user = ioremap(pci_resource_start(dev->pdev, 0) + |
50 | NV03_USER(chan->id), PAGE_SIZE); | 79 | NV03_USER(chan->id), PAGE_SIZE); |
51 | if (!chan->user) | 80 | if (!chan->user) { |
52 | return -ENOMEM; | 81 | ret = -ENOMEM; |
82 | goto error; | ||
83 | } | ||
53 | 84 | ||
54 | /* Fill entries that are seen filled in dumps of nvidia driver just | 85 | /* initialise default fifo context */ |
55 | * after channel's is put into DMA mode | 86 | ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramfc->pinst + |
56 | */ | 87 | chan->id * 32, ~0, 32, |
57 | nv_wi32(dev, fc + 0, chan->pushbuf_base); | 88 | NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc); |
58 | nv_wi32(dev, fc + 4, chan->pushbuf_base); | 89 | if (ret) |
59 | nv_wi32(dev, fc + 12, chan->pushbuf->pinst >> 4); | 90 | goto error; |
60 | nv_wi32(dev, fc + 20, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | | 91 | |
61 | NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | | 92 | nv_wo32(fctx->ramfc, 0x00, chan->pushbuf_base); |
62 | NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | | 93 | nv_wo32(fctx->ramfc, 0x04, chan->pushbuf_base); |
94 | nv_wo32(fctx->ramfc, 0x08, 0x00000000); | ||
95 | nv_wo32(fctx->ramfc, 0x0c, chan->pushbuf->pinst >> 4); | ||
96 | nv_wo32(fctx->ramfc, 0x10, 0x00000000); | ||
97 | nv_wo32(fctx->ramfc, 0x14, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | | ||
98 | NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | | ||
63 | #ifdef __BIG_ENDIAN | 99 | #ifdef __BIG_ENDIAN |
64 | NV_PFIFO_CACHE1_BIG_ENDIAN | | 100 | NV_PFIFO_CACHE1_BIG_ENDIAN | |
65 | #endif | 101 | #endif |
66 | 0); | 102 | NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8); |
67 | 103 | nv_wo32(fctx->ramfc, 0x18, 0x00000000); | |
68 | /* enable the fifo dma operation */ | 104 | nv_wo32(fctx->ramfc, 0x1c, 0x00000000); |
69 | nv_wr32(dev, NV04_PFIFO_MODE, | ||
70 | nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id)); | ||
71 | return 0; | ||
72 | } | ||
73 | |||
74 | static void | ||
75 | nv10_fifo_do_load_context(struct drm_device *dev, int chid) | ||
76 | { | ||
77 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
78 | uint32_t fc = NV10_RAMFC(chid), tmp; | ||
79 | |||
80 | nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0)); | ||
81 | nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4)); | ||
82 | nv_wr32(dev, NV10_PFIFO_CACHE1_REF_CNT, nv_ri32(dev, fc + 8)); | ||
83 | 105 | ||
84 | tmp = nv_ri32(dev, fc + 12); | 106 | /* enable dma mode on the channel */ |
85 | nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE, tmp & 0xFFFF); | 107 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); |
86 | nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT, tmp >> 16); | 108 | nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id)); |
109 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | ||
87 | 110 | ||
88 | nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, nv_ri32(dev, fc + 16)); | 111 | error: |
89 | nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_FETCH, nv_ri32(dev, fc + 20)); | 112 | if (ret) |
90 | nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 24)); | 113 | priv->base.base.context_del(chan, engine); |
91 | nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 28)); | 114 | return ret; |
92 | |||
93 | if (dev_priv->chipset < 0x17) | ||
94 | goto out; | ||
95 | |||
96 | nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE, nv_ri32(dev, fc + 32)); | ||
97 | tmp = nv_ri32(dev, fc + 36); | ||
98 | nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP, tmp); | ||
99 | nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT, nv_ri32(dev, fc + 40)); | ||
100 | nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, nv_ri32(dev, fc + 44)); | ||
101 | nv_wr32(dev, NV10_PFIFO_CACHE1_DMA_SUBROUTINE, nv_ri32(dev, fc + 48)); | ||
102 | |||
103 | out: | ||
104 | nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0); | ||
105 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0); | ||
106 | } | ||
107 | |||
108 | int | ||
109 | nv10_fifo_load_context(struct nouveau_channel *chan) | ||
110 | { | ||
111 | struct drm_device *dev = chan->dev; | ||
112 | uint32_t tmp; | ||
113 | |||
114 | nv10_fifo_do_load_context(dev, chan->id); | ||
115 | |||
116 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, | ||
117 | NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id); | ||
118 | nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 1); | ||
119 | |||
120 | /* Reset NV04_PFIFO_CACHE1_DMA_CTL_AT_INFO to INVALID */ | ||
121 | tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_CTL) & ~(1 << 31); | ||
122 | nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_CTL, tmp); | ||
123 | |||
124 | return 0; | ||
125 | } | 115 | } |
126 | 116 | ||
127 | int | 117 | int |
128 | nv10_fifo_unload_context(struct drm_device *dev) | 118 | nv10_fifo_create(struct drm_device *dev) |
129 | { | 119 | { |
130 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 120 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
131 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | 121 | struct nv10_fifo_priv *priv; |
132 | uint32_t fc, tmp; | ||
133 | int chid; | ||
134 | |||
135 | chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & 0x1f; | ||
136 | if (chid < 0 || chid >= dev_priv->engine.fifo.channels) | ||
137 | return 0; | ||
138 | fc = NV10_RAMFC(chid); | ||
139 | 122 | ||
140 | nv_wi32(dev, fc + 0, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT)); | 123 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); |
141 | nv_wi32(dev, fc + 4, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET)); | 124 | if (!priv) |
142 | nv_wi32(dev, fc + 8, nv_rd32(dev, NV10_PFIFO_CACHE1_REF_CNT)); | 125 | return -ENOMEM; |
143 | tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE) & 0xFFFF; | ||
144 | tmp |= (nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16); | ||
145 | nv_wi32(dev, fc + 12, tmp); | ||
146 | nv_wi32(dev, fc + 16, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_STATE)); | ||
147 | nv_wi32(dev, fc + 20, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH)); | ||
148 | nv_wi32(dev, fc + 24, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE)); | ||
149 | nv_wi32(dev, fc + 28, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1)); | ||
150 | |||
151 | if (dev_priv->chipset < 0x17) | ||
152 | goto out; | ||
153 | |||
154 | nv_wi32(dev, fc + 32, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE)); | ||
155 | tmp = nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP); | ||
156 | nv_wi32(dev, fc + 36, tmp); | ||
157 | nv_wi32(dev, fc + 40, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT)); | ||
158 | nv_wi32(dev, fc + 44, nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE)); | ||
159 | nv_wi32(dev, fc + 48, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET)); | ||
160 | |||
161 | out: | ||
162 | nv10_fifo_do_load_context(dev, pfifo->channels - 1); | ||
163 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1); | ||
164 | return 0; | ||
165 | } | ||
166 | |||
167 | static void | ||
168 | nv10_fifo_init_reset(struct drm_device *dev) | ||
169 | { | ||
170 | nv_wr32(dev, NV03_PMC_ENABLE, | ||
171 | nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PFIFO); | ||
172 | nv_wr32(dev, NV03_PMC_ENABLE, | ||
173 | nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PFIFO); | ||
174 | |||
175 | nv_wr32(dev, 0x003224, 0x000f0078); | ||
176 | nv_wr32(dev, 0x002044, 0x0101ffff); | ||
177 | nv_wr32(dev, 0x002040, 0x000000ff); | ||
178 | nv_wr32(dev, 0x002500, 0x00000000); | ||
179 | nv_wr32(dev, 0x003000, 0x00000000); | ||
180 | nv_wr32(dev, 0x003050, 0x00000000); | ||
181 | |||
182 | nv_wr32(dev, 0x003258, 0x00000000); | ||
183 | nv_wr32(dev, 0x003210, 0x00000000); | ||
184 | nv_wr32(dev, 0x003270, 0x00000000); | ||
185 | } | ||
186 | |||
187 | static void | ||
188 | nv10_fifo_init_ramxx(struct drm_device *dev) | ||
189 | { | ||
190 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
191 | 126 | ||
192 | nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | | 127 | priv->base.base.destroy = nv04_fifo_destroy; |
193 | ((dev_priv->ramht->bits - 9) << 16) | | 128 | priv->base.base.init = nv04_fifo_init; |
194 | (dev_priv->ramht->gpuobj->pinst >> 8)); | 129 | priv->base.base.fini = nv04_fifo_fini; |
195 | nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8); | 130 | priv->base.base.context_new = nv10_fifo_context_new; |
131 | priv->base.base.context_del = nv04_fifo_context_del; | ||
132 | priv->base.channels = 31; | ||
133 | priv->ramfc_desc = nv10_ramfc; | ||
134 | dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base; | ||
196 | 135 | ||
197 | if (dev_priv->chipset < 0x17) { | ||
198 | nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc->pinst >> 8); | ||
199 | } else { | ||
200 | nv_wr32(dev, NV03_PFIFO_RAMFC, (dev_priv->ramfc->pinst >> 8) | | ||
201 | (1 << 16) /* 64 Bytes entry*/); | ||
202 | /* XXX nvidia blob set bit 18, 21,23 for nv20 & nv30 */ | ||
203 | } | ||
204 | } | ||
205 | |||
206 | static void | ||
207 | nv10_fifo_init_intr(struct drm_device *dev) | ||
208 | { | ||
209 | nouveau_irq_register(dev, 8, nv04_fifo_isr); | 136 | nouveau_irq_register(dev, 8, nv04_fifo_isr); |
210 | nv_wr32(dev, 0x002100, 0xffffffff); | ||
211 | nv_wr32(dev, 0x002140, 0xffffffff); | ||
212 | } | ||
213 | |||
214 | int | ||
215 | nv10_fifo_init(struct drm_device *dev) | ||
216 | { | ||
217 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
218 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | ||
219 | int i; | ||
220 | |||
221 | nv10_fifo_init_reset(dev); | ||
222 | nv10_fifo_init_ramxx(dev); | ||
223 | |||
224 | nv10_fifo_do_load_context(dev, pfifo->channels - 1); | ||
225 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1); | ||
226 | |||
227 | nv10_fifo_init_intr(dev); | ||
228 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1); | ||
229 | nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1); | ||
230 | nv_wr32(dev, NV03_PFIFO_CACHES, 1); | ||
231 | |||
232 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { | ||
233 | if (dev_priv->channels.ptr[i]) { | ||
234 | uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE); | ||
235 | nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i)); | ||
236 | } | ||
237 | } | ||
238 | |||
239 | return 0; | 137 | return 0; |
240 | } | 138 | } |
diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c index 10c0eb5d4233..fb1d88a951de 100644 --- a/drivers/gpu/drm/nouveau/nv10_graph.c +++ b/drivers/gpu/drm/nouveau/nv10_graph.c | |||
@@ -759,7 +759,6 @@ static int | |||
759 | nv10_graph_unload_context(struct drm_device *dev) | 759 | nv10_graph_unload_context(struct drm_device *dev) |
760 | { | 760 | { |
761 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 761 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
762 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | ||
763 | struct nouveau_channel *chan; | 762 | struct nouveau_channel *chan; |
764 | struct graph_state *ctx; | 763 | struct graph_state *ctx; |
765 | uint32_t tmp; | 764 | uint32_t tmp; |
@@ -782,7 +781,7 @@ nv10_graph_unload_context(struct drm_device *dev) | |||
782 | 781 | ||
783 | nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000); | 782 | nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000); |
784 | tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff; | 783 | tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff; |
785 | tmp |= (pfifo->channels - 1) << 24; | 784 | tmp |= 31 << 24; |
786 | nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp); | 785 | nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp); |
787 | return 0; | 786 | return 0; |
788 | } | 787 | } |
@@ -822,12 +821,12 @@ struct nouveau_channel * | |||
822 | nv10_graph_channel(struct drm_device *dev) | 821 | nv10_graph_channel(struct drm_device *dev) |
823 | { | 822 | { |
824 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 823 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
825 | int chid = dev_priv->engine.fifo.channels; | 824 | int chid = 31; |
826 | 825 | ||
827 | if (nv_rd32(dev, NV10_PGRAPH_CTX_CONTROL) & 0x00010000) | 826 | if (nv_rd32(dev, NV10_PGRAPH_CTX_CONTROL) & 0x00010000) |
828 | chid = nv_rd32(dev, NV10_PGRAPH_CTX_USER) >> 24; | 827 | chid = nv_rd32(dev, NV10_PGRAPH_CTX_USER) >> 24; |
829 | 828 | ||
830 | if (chid >= dev_priv->engine.fifo.channels) | 829 | if (chid >= 31) |
831 | return NULL; | 830 | return NULL; |
832 | 831 | ||
833 | return dev_priv->channels.ptr[chid]; | 832 | return dev_priv->channels.ptr[chid]; |
@@ -948,7 +947,7 @@ nv10_graph_init(struct drm_device *dev, int engine) | |||
948 | nv_wr32(dev, NV10_PGRAPH_STATE, 0xFFFFFFFF); | 947 | nv_wr32(dev, NV10_PGRAPH_STATE, 0xFFFFFFFF); |
949 | 948 | ||
950 | tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff; | 949 | tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff; |
951 | tmp |= (dev_priv->engine.fifo.channels - 1) << 24; | 950 | tmp |= 31 << 24; |
952 | nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp); | 951 | nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp); |
953 | nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100); | 952 | nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100); |
954 | nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, 0x08000000); | 953 | nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, 0x08000000); |
diff --git a/drivers/gpu/drm/nouveau/nv17_fifo.c b/drivers/gpu/drm/nouveau/nv17_fifo.c new file mode 100644 index 000000000000..d9e482e4abee --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv17_fifo.c | |||
@@ -0,0 +1,177 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 Ben Skeggs. | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * Permission is hereby granted, free of charge, to any person obtaining | ||
6 | * a copy of this software and associated documentation files (the | ||
7 | * "Software"), to deal in the Software without restriction, including | ||
8 | * without limitation the rights to use, copy, modify, merge, publish, | ||
9 | * distribute, sublicense, and/or sell copies of the Software, and to | ||
10 | * permit persons to whom the Software is furnished to do so, subject to | ||
11 | * the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice (including the | ||
14 | * next paragraph) shall be included in all copies or substantial | ||
15 | * portions of the Software. | ||
16 | * | ||
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
18 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
19 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. | ||
20 | * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE | ||
21 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | ||
22 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | ||
23 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | #include "drmP.h" | ||
28 | #include "drm.h" | ||
29 | #include "nouveau_drv.h" | ||
30 | #include "nouveau_fifo.h" | ||
31 | #include "nouveau_util.h" | ||
32 | #include "nouveau_ramht.h" | ||
33 | |||
34 | static struct ramfc_desc { | ||
35 | unsigned bits:6; | ||
36 | unsigned ctxs:5; | ||
37 | unsigned ctxp:8; | ||
38 | unsigned regs:5; | ||
39 | unsigned regp; | ||
40 | } nv17_ramfc[] = { | ||
41 | { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT }, | ||
42 | { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET }, | ||
43 | { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT }, | ||
44 | { 16, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE }, | ||
45 | { 16, 16, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT }, | ||
46 | { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_STATE }, | ||
47 | { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_FETCH }, | ||
48 | { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_ENGINE }, | ||
49 | { 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_PULL1 }, | ||
50 | { 32, 0, 0x20, 0, NV10_PFIFO_CACHE1_ACQUIRE_VALUE }, | ||
51 | { 32, 0, 0x24, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP }, | ||
52 | { 32, 0, 0x28, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT }, | ||
53 | { 32, 0, 0x2c, 0, NV10_PFIFO_CACHE1_SEMAPHORE }, | ||
54 | { 32, 0, 0x30, 0, NV10_PFIFO_CACHE1_DMA_SUBROUTINE }, | ||
55 | {} | ||
56 | }; | ||
57 | |||
58 | struct nv17_fifo_priv { | ||
59 | struct nouveau_fifo_priv base; | ||
60 | struct ramfc_desc *ramfc_desc; | ||
61 | }; | ||
62 | |||
63 | struct nv17_fifo_chan { | ||
64 | struct nouveau_fifo_chan base; | ||
65 | struct nouveau_gpuobj *ramfc; | ||
66 | }; | ||
67 | |||
68 | static int | ||
69 | nv17_fifo_context_new(struct nouveau_channel *chan, int engine) | ||
70 | { | ||
71 | struct drm_device *dev = chan->dev; | ||
72 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
73 | struct nv17_fifo_priv *priv = nv_engine(dev, engine); | ||
74 | struct nv17_fifo_chan *fctx; | ||
75 | unsigned long flags; | ||
76 | int ret; | ||
77 | |||
78 | fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL); | ||
79 | if (!fctx) | ||
80 | return -ENOMEM; | ||
81 | |||
82 | /* map channel control registers */ | ||
83 | chan->user = ioremap(pci_resource_start(dev->pdev, 0) + | ||
84 | NV03_USER(chan->id), PAGE_SIZE); | ||
85 | if (!chan->user) { | ||
86 | ret = -ENOMEM; | ||
87 | goto error; | ||
88 | } | ||
89 | |||
90 | /* initialise default fifo context */ | ||
91 | ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramfc->pinst + | ||
92 | chan->id * 64, ~0, 64, | ||
93 | NVOBJ_FLAG_ZERO_ALLOC | | ||
94 | NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc); | ||
95 | if (ret) | ||
96 | goto error; | ||
97 | |||
98 | nv_wo32(fctx->ramfc, 0x00, chan->pushbuf_base); | ||
99 | nv_wo32(fctx->ramfc, 0x04, chan->pushbuf_base); | ||
100 | nv_wo32(fctx->ramfc, 0x0c, chan->pushbuf->pinst >> 4); | ||
101 | nv_wo32(fctx->ramfc, 0x14, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | | ||
102 | NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | | ||
103 | #ifdef __BIG_ENDIAN | ||
104 | NV_PFIFO_CACHE1_BIG_ENDIAN | | ||
105 | #endif | ||
106 | NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8); | ||
107 | |||
108 | /* enable dma mode on the channel */ | ||
109 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | ||
110 | nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id)); | ||
111 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | ||
112 | |||
113 | error: | ||
114 | if (ret) | ||
115 | priv->base.base.context_del(chan, engine); | ||
116 | return ret; | ||
117 | } | ||
118 | |||
119 | static int | ||
120 | nv17_fifo_init(struct drm_device *dev, int engine) | ||
121 | { | ||
122 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
123 | struct nv17_fifo_priv *priv = nv_engine(dev, engine); | ||
124 | int i; | ||
125 | |||
126 | nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, 0); | ||
127 | nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, NV_PMC_ENABLE_PFIFO); | ||
128 | |||
129 | nv_wr32(dev, NV04_PFIFO_DELAY_0, 0x000000ff); | ||
130 | nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff); | ||
131 | |||
132 | nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | | ||
133 | ((dev_priv->ramht->bits - 9) << 16) | | ||
134 | (dev_priv->ramht->gpuobj->pinst >> 8)); | ||
135 | nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8); | ||
136 | nv_wr32(dev, NV03_PFIFO_RAMFC, 0x00010000 | | ||
137 | dev_priv->ramfc->pinst >> 8); | ||
138 | |||
139 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels); | ||
140 | |||
141 | nv_wr32(dev, NV03_PFIFO_INTR_0, 0xffffffff); | ||
142 | nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xffffffff); | ||
143 | |||
144 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1); | ||
145 | nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1); | ||
146 | nv_wr32(dev, NV03_PFIFO_CACHES, 1); | ||
147 | |||
148 | for (i = 0; i < priv->base.channels; i++) { | ||
149 | if (dev_priv->channels.ptr[i]) | ||
150 | nv_mask(dev, NV04_PFIFO_MODE, (1 << i), (1 << i)); | ||
151 | } | ||
152 | |||
153 | return 0; | ||
154 | } | ||
155 | |||
156 | int | ||
157 | nv17_fifo_create(struct drm_device *dev) | ||
158 | { | ||
159 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
160 | struct nv17_fifo_priv *priv; | ||
161 | |||
162 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | ||
163 | if (!priv) | ||
164 | return -ENOMEM; | ||
165 | |||
166 | priv->base.base.destroy = nv04_fifo_destroy; | ||
167 | priv->base.base.init = nv17_fifo_init; | ||
168 | priv->base.base.fini = nv04_fifo_fini; | ||
169 | priv->base.base.context_new = nv17_fifo_context_new; | ||
170 | priv->base.base.context_del = nv04_fifo_context_del; | ||
171 | priv->base.channels = 31; | ||
172 | priv->ramfc_desc = nv17_ramfc; | ||
173 | dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base; | ||
174 | |||
175 | nouveau_irq_register(dev, 8, nv04_fifo_isr); | ||
176 | return 0; | ||
177 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c index 385e2b49a554..e34ea30758f6 100644 --- a/drivers/gpu/drm/nouveau/nv20_graph.c +++ b/drivers/gpu/drm/nouveau/nv20_graph.c | |||
@@ -43,8 +43,6 @@ struct nv20_graph_engine { | |||
43 | int | 43 | int |
44 | nv20_graph_unload_context(struct drm_device *dev) | 44 | nv20_graph_unload_context(struct drm_device *dev) |
45 | { | 45 | { |
46 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
47 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | ||
48 | struct nouveau_channel *chan; | 46 | struct nouveau_channel *chan; |
49 | struct nouveau_gpuobj *grctx; | 47 | struct nouveau_gpuobj *grctx; |
50 | u32 tmp; | 48 | u32 tmp; |
@@ -62,7 +60,7 @@ nv20_graph_unload_context(struct drm_device *dev) | |||
62 | 60 | ||
63 | nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000); | 61 | nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000); |
64 | tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff; | 62 | tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff; |
65 | tmp |= (pfifo->channels - 1) << 24; | 63 | tmp |= 31 << 24; |
66 | nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp); | 64 | nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp); |
67 | return 0; | 65 | return 0; |
68 | } | 66 | } |
diff --git a/drivers/gpu/drm/nouveau/nv31_mpeg.c b/drivers/gpu/drm/nouveau/nv31_mpeg.c index 6f06a0713f00..5f239bf658c4 100644 --- a/drivers/gpu/drm/nouveau/nv31_mpeg.c +++ b/drivers/gpu/drm/nouveau/nv31_mpeg.c | |||
@@ -24,6 +24,7 @@ | |||
24 | 24 | ||
25 | #include "drmP.h" | 25 | #include "drmP.h" |
26 | #include "nouveau_drv.h" | 26 | #include "nouveau_drv.h" |
27 | #include "nouveau_fifo.h" | ||
27 | #include "nouveau_ramht.h" | 28 | #include "nouveau_ramht.h" |
28 | 29 | ||
29 | struct nv31_mpeg_engine { | 30 | struct nv31_mpeg_engine { |
@@ -208,6 +209,7 @@ nv31_mpeg_mthd_dma(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data) | |||
208 | static int | 209 | static int |
209 | nv31_mpeg_isr_chid(struct drm_device *dev, u32 inst) | 210 | nv31_mpeg_isr_chid(struct drm_device *dev, u32 inst) |
210 | { | 211 | { |
212 | struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO); | ||
211 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 213 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
212 | struct nouveau_gpuobj *ctx; | 214 | struct nouveau_gpuobj *ctx; |
213 | unsigned long flags; | 215 | unsigned long flags; |
@@ -218,7 +220,7 @@ nv31_mpeg_isr_chid(struct drm_device *dev, u32 inst) | |||
218 | return 0; | 220 | return 0; |
219 | 221 | ||
220 | spin_lock_irqsave(&dev_priv->channels.lock, flags); | 222 | spin_lock_irqsave(&dev_priv->channels.lock, flags); |
221 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { | 223 | for (i = 0; i < pfifo->channels; i++) { |
222 | if (!dev_priv->channels.ptr[i]) | 224 | if (!dev_priv->channels.ptr[i]) |
223 | continue; | 225 | continue; |
224 | 226 | ||
diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c index 8d346617f55f..cdc818479b0a 100644 --- a/drivers/gpu/drm/nouveau/nv40_fifo.c +++ b/drivers/gpu/drm/nouveau/nv40_fifo.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2007 Ben Skeggs. | 2 | * Copyright (C) 2012 Ben Skeggs. |
3 | * All Rights Reserved. | 3 | * All Rights Reserved. |
4 | * | 4 | * |
5 | * Permission is hereby granted, free of charge, to any person obtaining | 5 | * Permission is hereby granted, free of charge, to any person obtaining |
@@ -25,215 +25,123 @@ | |||
25 | */ | 25 | */ |
26 | 26 | ||
27 | #include "drmP.h" | 27 | #include "drmP.h" |
28 | #include "drm.h" | ||
28 | #include "nouveau_drv.h" | 29 | #include "nouveau_drv.h" |
29 | #include "nouveau_drm.h" | 30 | #include "nouveau_fifo.h" |
31 | #include "nouveau_util.h" | ||
30 | #include "nouveau_ramht.h" | 32 | #include "nouveau_ramht.h" |
31 | 33 | ||
32 | #define NV40_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV40_RAMFC__SIZE)) | 34 | static struct ramfc_desc { |
33 | #define NV40_RAMFC__SIZE 128 | 35 | unsigned bits:6; |
34 | 36 | unsigned ctxs:5; | |
35 | int | 37 | unsigned ctxp:8; |
36 | nv40_fifo_create_context(struct nouveau_channel *chan) | 38 | unsigned regs:5; |
39 | unsigned regp; | ||
40 | } nv40_ramfc[] = { | ||
41 | { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT }, | ||
42 | { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET }, | ||
43 | { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT }, | ||
44 | { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE }, | ||
45 | { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT }, | ||
46 | { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_STATE }, | ||
47 | { 28, 0, 0x18, 0, NV04_PFIFO_CACHE1_DMA_FETCH }, | ||
48 | { 2, 28, 0x18, 28, 0x002058 }, | ||
49 | { 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_ENGINE }, | ||
50 | { 32, 0, 0x20, 0, NV04_PFIFO_CACHE1_PULL1 }, | ||
51 | { 32, 0, 0x24, 0, NV10_PFIFO_CACHE1_ACQUIRE_VALUE }, | ||
52 | { 32, 0, 0x28, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP }, | ||
53 | { 32, 0, 0x2c, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT }, | ||
54 | { 32, 0, 0x30, 0, NV10_PFIFO_CACHE1_SEMAPHORE }, | ||
55 | { 32, 0, 0x34, 0, NV10_PFIFO_CACHE1_DMA_SUBROUTINE }, | ||
56 | { 32, 0, 0x38, 0, NV40_PFIFO_GRCTX_INSTANCE }, | ||
57 | { 17, 0, 0x3c, 0, NV04_PFIFO_DMA_TIMESLICE }, | ||
58 | { 32, 0, 0x40, 0, 0x0032e4 }, | ||
59 | { 32, 0, 0x44, 0, 0x0032e8 }, | ||
60 | { 32, 0, 0x4c, 0, 0x002088 }, | ||
61 | { 32, 0, 0x50, 0, 0x003300 }, | ||
62 | { 32, 0, 0x54, 0, 0x00330c }, | ||
63 | {} | ||
64 | }; | ||
65 | |||
66 | struct nv40_fifo_priv { | ||
67 | struct nouveau_fifo_priv base; | ||
68 | struct ramfc_desc *ramfc_desc; | ||
69 | }; | ||
70 | |||
71 | struct nv40_fifo_chan { | ||
72 | struct nouveau_fifo_chan base; | ||
73 | struct nouveau_gpuobj *ramfc; | ||
74 | }; | ||
75 | |||
76 | static int | ||
77 | nv40_fifo_context_new(struct nouveau_channel *chan, int engine) | ||
37 | { | 78 | { |
38 | struct drm_device *dev = chan->dev; | 79 | struct drm_device *dev = chan->dev; |
39 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 80 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
40 | uint32_t fc = NV40_RAMFC(chan->id); | 81 | struct nv40_fifo_priv *priv = nv_engine(dev, engine); |
82 | struct nv40_fifo_chan *fctx; | ||
41 | unsigned long flags; | 83 | unsigned long flags; |
42 | int ret; | 84 | int ret; |
43 | 85 | ||
44 | ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(chan->id), ~0, | 86 | fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL); |
45 | NV40_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC | | 87 | if (!fctx) |
46 | NVOBJ_FLAG_ZERO_FREE, &chan->ramfc); | ||
47 | if (ret) | ||
48 | return ret; | ||
49 | |||
50 | chan->user = ioremap(pci_resource_start(dev->pdev, 0) + | ||
51 | NV40_USER(chan->id), PAGE_SIZE); | ||
52 | if (!chan->user) | ||
53 | return -ENOMEM; | 88 | return -ENOMEM; |
54 | 89 | ||
55 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | 90 | /* map channel control registers */ |
91 | chan->user = ioremap(pci_resource_start(dev->pdev, 0) + | ||
92 | NV03_USER(chan->id), PAGE_SIZE); | ||
93 | if (!chan->user) { | ||
94 | ret = -ENOMEM; | ||
95 | goto error; | ||
96 | } | ||
56 | 97 | ||
57 | nv_wi32(dev, fc + 0, chan->pushbuf_base); | 98 | /* initialise default fifo context */ |
58 | nv_wi32(dev, fc + 4, chan->pushbuf_base); | 99 | ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramfc->pinst + |
59 | nv_wi32(dev, fc + 12, chan->pushbuf->pinst >> 4); | 100 | chan->id * 128, ~0, 128, |
60 | nv_wi32(dev, fc + 24, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | | 101 | NVOBJ_FLAG_ZERO_ALLOC | |
61 | NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | | 102 | NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc); |
62 | NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | | 103 | if (ret) |
104 | goto error; | ||
105 | |||
106 | nv_wo32(fctx->ramfc, 0x00, chan->pushbuf_base); | ||
107 | nv_wo32(fctx->ramfc, 0x04, chan->pushbuf_base); | ||
108 | nv_wo32(fctx->ramfc, 0x0c, chan->pushbuf->pinst >> 4); | ||
109 | nv_wo32(fctx->ramfc, 0x18, 0x30000000 | | ||
110 | NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | | ||
111 | NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | | ||
63 | #ifdef __BIG_ENDIAN | 112 | #ifdef __BIG_ENDIAN |
64 | NV_PFIFO_CACHE1_BIG_ENDIAN | | 113 | NV_PFIFO_CACHE1_BIG_ENDIAN | |
65 | #endif | 114 | #endif |
66 | 0x30000000 /* no idea.. */); | 115 | NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8); |
67 | nv_wi32(dev, fc + 60, 0x0001FFFF); | 116 | nv_wo32(fctx->ramfc, 0x3c, 0x0001ffff); |
68 | |||
69 | /* enable the fifo dma operation */ | ||
70 | nv_wr32(dev, NV04_PFIFO_MODE, | ||
71 | nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id)); | ||
72 | 117 | ||
118 | /* enable dma mode on the channel */ | ||
119 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | ||
120 | nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id)); | ||
73 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | 121 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); |
74 | return 0; | ||
75 | } | ||
76 | |||
77 | static void | ||
78 | nv40_fifo_do_load_context(struct drm_device *dev, int chid) | ||
79 | { | ||
80 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
81 | uint32_t fc = NV40_RAMFC(chid), tmp, tmp2; | ||
82 | |||
83 | nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0)); | ||
84 | nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4)); | ||
85 | nv_wr32(dev, NV10_PFIFO_CACHE1_REF_CNT, nv_ri32(dev, fc + 8)); | ||
86 | nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE, nv_ri32(dev, fc + 12)); | ||
87 | nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT, nv_ri32(dev, fc + 16)); | ||
88 | nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, nv_ri32(dev, fc + 20)); | ||
89 | |||
90 | /* No idea what 0x2058 is.. */ | ||
91 | tmp = nv_ri32(dev, fc + 24); | ||
92 | tmp2 = nv_rd32(dev, 0x2058) & 0xFFF; | ||
93 | tmp2 |= (tmp & 0x30000000); | ||
94 | nv_wr32(dev, 0x2058, tmp2); | ||
95 | tmp &= ~0x30000000; | ||
96 | nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_FETCH, tmp); | ||
97 | |||
98 | nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 28)); | ||
99 | nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 32)); | ||
100 | nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE, nv_ri32(dev, fc + 36)); | ||
101 | tmp = nv_ri32(dev, fc + 40); | ||
102 | nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP, tmp); | ||
103 | nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT, nv_ri32(dev, fc + 44)); | ||
104 | nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, nv_ri32(dev, fc + 48)); | ||
105 | nv_wr32(dev, NV10_PFIFO_CACHE1_DMA_SUBROUTINE, nv_ri32(dev, fc + 52)); | ||
106 | nv_wr32(dev, NV40_PFIFO_GRCTX_INSTANCE, nv_ri32(dev, fc + 56)); | ||
107 | |||
108 | /* Don't clobber the TIMEOUT_ENABLED flag when restoring from RAMFC */ | ||
109 | tmp = nv_rd32(dev, NV04_PFIFO_DMA_TIMESLICE) & ~0x1FFFF; | ||
110 | tmp |= nv_ri32(dev, fc + 60) & 0x1FFFF; | ||
111 | nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, tmp); | ||
112 | 122 | ||
113 | nv_wr32(dev, 0x32e4, nv_ri32(dev, fc + 64)); | 123 | /*XXX: remove this later, need fifo engine context commit hook */ |
114 | /* NVIDIA does this next line twice... */ | 124 | nouveau_gpuobj_ref(fctx->ramfc, &chan->ramfc); |
115 | nv_wr32(dev, 0x32e8, nv_ri32(dev, fc + 68)); | ||
116 | nv_wr32(dev, 0x2088, nv_ri32(dev, fc + 76)); | ||
117 | nv_wr32(dev, 0x3300, nv_ri32(dev, fc + 80)); | ||
118 | nv_wr32(dev, 0x330c, nv_ri32(dev, fc + 84)); | ||
119 | 125 | ||
120 | nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0); | 126 | error: |
121 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0); | 127 | if (ret) |
122 | } | 128 | priv->base.base.context_del(chan, engine); |
123 | 129 | return ret; | |
124 | int | ||
125 | nv40_fifo_load_context(struct nouveau_channel *chan) | ||
126 | { | ||
127 | struct drm_device *dev = chan->dev; | ||
128 | uint32_t tmp; | ||
129 | |||
130 | nv40_fifo_do_load_context(dev, chan->id); | ||
131 | |||
132 | /* Set channel active, and in DMA mode */ | ||
133 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, | ||
134 | NV40_PFIFO_CACHE1_PUSH1_DMA | chan->id); | ||
135 | nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 1); | ||
136 | |||
137 | /* Reset DMA_CTL_AT_INFO to INVALID */ | ||
138 | tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_CTL) & ~(1 << 31); | ||
139 | nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_CTL, tmp); | ||
140 | |||
141 | return 0; | ||
142 | } | 130 | } |
143 | 131 | ||
144 | int | 132 | static int |
145 | nv40_fifo_unload_context(struct drm_device *dev) | 133 | nv40_fifo_init(struct drm_device *dev, int engine) |
146 | { | 134 | { |
147 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 135 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
148 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | 136 | struct nv40_fifo_priv *priv = nv_engine(dev, engine); |
149 | uint32_t fc, tmp; | ||
150 | int chid; | ||
151 | |||
152 | chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & 0x1f; | ||
153 | if (chid < 0 || chid >= dev_priv->engine.fifo.channels) | ||
154 | return 0; | ||
155 | fc = NV40_RAMFC(chid); | ||
156 | |||
157 | nv_wi32(dev, fc + 0, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT)); | ||
158 | nv_wi32(dev, fc + 4, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET)); | ||
159 | nv_wi32(dev, fc + 8, nv_rd32(dev, NV10_PFIFO_CACHE1_REF_CNT)); | ||
160 | nv_wi32(dev, fc + 12, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE)); | ||
161 | nv_wi32(dev, fc + 16, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT)); | ||
162 | nv_wi32(dev, fc + 20, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_STATE)); | ||
163 | tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH); | ||
164 | tmp |= nv_rd32(dev, 0x2058) & 0x30000000; | ||
165 | nv_wi32(dev, fc + 24, tmp); | ||
166 | nv_wi32(dev, fc + 28, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE)); | ||
167 | nv_wi32(dev, fc + 32, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1)); | ||
168 | nv_wi32(dev, fc + 36, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE)); | ||
169 | tmp = nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP); | ||
170 | nv_wi32(dev, fc + 40, tmp); | ||
171 | nv_wi32(dev, fc + 44, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT)); | ||
172 | nv_wi32(dev, fc + 48, nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE)); | ||
173 | /* NVIDIA read 0x3228 first, then write DMA_GET here.. maybe something | ||
174 | * more involved depending on the value of 0x3228? | ||
175 | */ | ||
176 | nv_wi32(dev, fc + 52, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET)); | ||
177 | nv_wi32(dev, fc + 56, nv_rd32(dev, NV40_PFIFO_GRCTX_INSTANCE)); | ||
178 | nv_wi32(dev, fc + 60, nv_rd32(dev, NV04_PFIFO_DMA_TIMESLICE) & 0x1ffff); | ||
179 | /* No idea what the below is for exactly, ripped from a mmio-trace */ | ||
180 | nv_wi32(dev, fc + 64, nv_rd32(dev, NV40_PFIFO_UNK32E4)); | ||
181 | /* NVIDIA do this next line twice.. bug? */ | ||
182 | nv_wi32(dev, fc + 68, nv_rd32(dev, 0x32e8)); | ||
183 | nv_wi32(dev, fc + 76, nv_rd32(dev, 0x2088)); | ||
184 | nv_wi32(dev, fc + 80, nv_rd32(dev, 0x3300)); | ||
185 | #if 0 /* no real idea which is PUT/GET in UNK_48.. */ | ||
186 | tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_GET); | ||
187 | tmp |= (nv_rd32(dev, NV04_PFIFO_CACHE1_PUT) << 16); | ||
188 | nv_wi32(dev, fc + 72, tmp); | ||
189 | #endif | ||
190 | nv_wi32(dev, fc + 84, nv_rd32(dev, 0x330c)); | ||
191 | |||
192 | nv40_fifo_do_load_context(dev, pfifo->channels - 1); | ||
193 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, | ||
194 | NV40_PFIFO_CACHE1_PUSH1_DMA | (pfifo->channels - 1)); | ||
195 | return 0; | ||
196 | } | ||
197 | |||
198 | static void | ||
199 | nv40_fifo_init_reset(struct drm_device *dev) | ||
200 | { | ||
201 | int i; | 137 | int i; |
202 | 138 | ||
203 | nv_wr32(dev, NV03_PMC_ENABLE, | 139 | nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, 0); |
204 | nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PFIFO); | 140 | nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, NV_PMC_ENABLE_PFIFO); |
205 | nv_wr32(dev, NV03_PMC_ENABLE, | ||
206 | nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PFIFO); | ||
207 | 141 | ||
208 | nv_wr32(dev, 0x003224, 0x000f0078); | ||
209 | nv_wr32(dev, 0x003210, 0x00000000); | ||
210 | nv_wr32(dev, 0x003270, 0x00000000); | ||
211 | nv_wr32(dev, 0x003240, 0x00000000); | ||
212 | nv_wr32(dev, 0x003244, 0x00000000); | ||
213 | nv_wr32(dev, 0x003258, 0x00000000); | ||
214 | nv_wr32(dev, 0x002504, 0x00000000); | ||
215 | for (i = 0; i < 16; i++) | ||
216 | nv_wr32(dev, 0x002510 + (i * 4), 0x00000000); | ||
217 | nv_wr32(dev, 0x00250c, 0x0000ffff); | ||
218 | nv_wr32(dev, 0x002048, 0x00000000); | ||
219 | nv_wr32(dev, 0x003228, 0x00000000); | ||
220 | nv_wr32(dev, 0x0032e8, 0x00000000); | ||
221 | nv_wr32(dev, 0x002410, 0x00000000); | ||
222 | nv_wr32(dev, 0x002420, 0x00000000); | ||
223 | nv_wr32(dev, 0x002058, 0x00000001); | ||
224 | nv_wr32(dev, 0x00221c, 0x00000000); | ||
225 | /* something with 0x2084, read/modify/write, no change */ | ||
226 | nv_wr32(dev, 0x002040, 0x000000ff); | 142 | nv_wr32(dev, 0x002040, 0x000000ff); |
227 | nv_wr32(dev, 0x002500, 0x00000000); | 143 | nv_wr32(dev, 0x002044, 0x2101ffff); |
228 | nv_wr32(dev, 0x003200, 0x00000000); | 144 | nv_wr32(dev, 0x002058, 0x00000001); |
229 | |||
230 | nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, 0x2101ffff); | ||
231 | } | ||
232 | |||
233 | static void | ||
234 | nv40_fifo_init_ramxx(struct drm_device *dev) | ||
235 | { | ||
236 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
237 | 145 | ||
238 | nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | | 146 | nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | |
239 | ((dev_priv->ramht->bits - 9) << 16) | | 147 | ((dev_priv->ramht->bits - 9) << 16) | |
@@ -244,65 +152,59 @@ nv40_fifo_init_ramxx(struct drm_device *dev) | |||
244 | case 0x47: | 152 | case 0x47: |
245 | case 0x49: | 153 | case 0x49: |
246 | case 0x4b: | 154 | case 0x4b: |
247 | nv_wr32(dev, 0x2230, 1); | 155 | nv_wr32(dev, 0x002230, 0x00000001); |
248 | break; | ||
249 | default: | ||
250 | break; | ||
251 | } | ||
252 | |||
253 | switch (dev_priv->chipset) { | ||
254 | case 0x40: | 156 | case 0x40: |
255 | case 0x41: | 157 | case 0x41: |
256 | case 0x42: | 158 | case 0x42: |
257 | case 0x43: | 159 | case 0x43: |
258 | case 0x45: | 160 | case 0x45: |
259 | case 0x47: | ||
260 | case 0x48: | 161 | case 0x48: |
261 | case 0x49: | 162 | nv_wr32(dev, 0x002220, 0x00030002); |
262 | case 0x4b: | ||
263 | nv_wr32(dev, NV40_PFIFO_RAMFC, 0x30002); | ||
264 | break; | 163 | break; |
265 | default: | 164 | default: |
266 | nv_wr32(dev, 0x2230, 0); | 165 | nv_wr32(dev, 0x002230, 0x00000000); |
267 | nv_wr32(dev, NV40_PFIFO_RAMFC, | 166 | nv_wr32(dev, 0x002220, ((dev_priv->vram_size - 512 * 1024 + |
268 | ((dev_priv->vram_size - 512 * 1024 + | 167 | dev_priv->ramfc->pinst) >> 16) | |
269 | dev_priv->ramfc->pinst) >> 16) | (3 << 16)); | 168 | 0x00030000); |
270 | break; | 169 | break; |
271 | } | 170 | } |
272 | } | ||
273 | |||
274 | static void | ||
275 | nv40_fifo_init_intr(struct drm_device *dev) | ||
276 | { | ||
277 | nouveau_irq_register(dev, 8, nv04_fifo_isr); | ||
278 | nv_wr32(dev, 0x002100, 0xffffffff); | ||
279 | nv_wr32(dev, 0x002140, 0xffffffff); | ||
280 | } | ||
281 | |||
282 | int | ||
283 | nv40_fifo_init(struct drm_device *dev) | ||
284 | { | ||
285 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
286 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | ||
287 | int i; | ||
288 | 171 | ||
289 | nv40_fifo_init_reset(dev); | 172 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels); |
290 | nv40_fifo_init_ramxx(dev); | ||
291 | 173 | ||
292 | nv40_fifo_do_load_context(dev, pfifo->channels - 1); | 174 | nv_wr32(dev, NV03_PFIFO_INTR_0, 0xffffffff); |
293 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1); | 175 | nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xffffffff); |
294 | 176 | ||
295 | nv40_fifo_init_intr(dev); | ||
296 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1); | 177 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1); |
297 | nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1); | 178 | nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1); |
298 | nv_wr32(dev, NV03_PFIFO_CACHES, 1); | 179 | nv_wr32(dev, NV03_PFIFO_CACHES, 1); |
299 | 180 | ||
300 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { | 181 | for (i = 0; i < priv->base.channels; i++) { |
301 | if (dev_priv->channels.ptr[i]) { | 182 | if (dev_priv->channels.ptr[i]) |
302 | uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE); | 183 | nv_mask(dev, NV04_PFIFO_MODE, (1 << i), (1 << i)); |
303 | nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i)); | ||
304 | } | ||
305 | } | 184 | } |
306 | 185 | ||
307 | return 0; | 186 | return 0; |
308 | } | 187 | } |
188 | |||
189 | int | ||
190 | nv40_fifo_create(struct drm_device *dev) | ||
191 | { | ||
192 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
193 | struct nv40_fifo_priv *priv; | ||
194 | |||
195 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | ||
196 | if (!priv) | ||
197 | return -ENOMEM; | ||
198 | |||
199 | priv->base.base.destroy = nv04_fifo_destroy; | ||
200 | priv->base.base.init = nv40_fifo_init; | ||
201 | priv->base.base.fini = nv04_fifo_fini; | ||
202 | priv->base.base.context_new = nv40_fifo_context_new; | ||
203 | priv->base.base.context_del = nv04_fifo_context_del; | ||
204 | priv->base.channels = 31; | ||
205 | priv->ramfc_desc = nv40_ramfc; | ||
206 | dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base; | ||
207 | |||
208 | nouveau_irq_register(dev, 8, nv04_fifo_isr); | ||
209 | return 0; | ||
210 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c index 03837200762d..aa9e2df64a26 100644 --- a/drivers/gpu/drm/nouveau/nv40_graph.c +++ b/drivers/gpu/drm/nouveau/nv40_graph.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include "drmP.h" | 27 | #include "drmP.h" |
28 | #include "drm.h" | 28 | #include "drm.h" |
29 | #include "nouveau_drv.h" | 29 | #include "nouveau_drv.h" |
30 | #include "nouveau_fifo.h" | ||
30 | #include "nouveau_ramht.h" | 31 | #include "nouveau_ramht.h" |
31 | 32 | ||
32 | struct nv40_graph_engine { | 33 | struct nv40_graph_engine { |
@@ -345,13 +346,14 @@ nv40_graph_fini(struct drm_device *dev, int engine, bool suspend) | |||
345 | static int | 346 | static int |
346 | nv40_graph_isr_chid(struct drm_device *dev, u32 inst) | 347 | nv40_graph_isr_chid(struct drm_device *dev, u32 inst) |
347 | { | 348 | { |
349 | struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO); | ||
348 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 350 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
349 | struct nouveau_gpuobj *grctx; | 351 | struct nouveau_gpuobj *grctx; |
350 | unsigned long flags; | 352 | unsigned long flags; |
351 | int i; | 353 | int i; |
352 | 354 | ||
353 | spin_lock_irqsave(&dev_priv->channels.lock, flags); | 355 | spin_lock_irqsave(&dev_priv->channels.lock, flags); |
354 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { | 356 | for (i = 0; i < pfifo->channels; i++) { |
355 | if (!dev_priv->channels.ptr[i]) | 357 | if (!dev_priv->channels.ptr[i]) |
356 | continue; | 358 | continue; |
357 | grctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_GR]; | 359 | grctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_GR]; |
diff --git a/drivers/gpu/drm/nouveau/nv40_pm.c b/drivers/gpu/drm/nouveau/nv40_pm.c index c7615381c5d9..e66273aff493 100644 --- a/drivers/gpu/drm/nouveau/nv40_pm.c +++ b/drivers/gpu/drm/nouveau/nv40_pm.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include "nouveau_bios.h" | 27 | #include "nouveau_bios.h" |
28 | #include "nouveau_pm.h" | 28 | #include "nouveau_pm.h" |
29 | #include "nouveau_hw.h" | 29 | #include "nouveau_hw.h" |
30 | #include "nouveau_fifo.h" | ||
30 | 31 | ||
31 | #define min2(a,b) ((a) < (b) ? (a) : (b)) | 32 | #define min2(a,b) ((a) < (b) ? (a) : (b)) |
32 | 33 | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_fb.c b/drivers/gpu/drm/nouveau/nv50_fb.c index bdd2afe29205..f1e4b9e07d14 100644 --- a/drivers/gpu/drm/nouveau/nv50_fb.c +++ b/drivers/gpu/drm/nouveau/nv50_fb.c | |||
@@ -2,6 +2,7 @@ | |||
2 | #include "drm.h" | 2 | #include "drm.h" |
3 | #include "nouveau_drv.h" | 3 | #include "nouveau_drv.h" |
4 | #include "nouveau_drm.h" | 4 | #include "nouveau_drm.h" |
5 | #include "nouveau_fifo.h" | ||
5 | 6 | ||
6 | struct nv50_fb_priv { | 7 | struct nv50_fb_priv { |
7 | struct page *r100c08_page; | 8 | struct page *r100c08_page; |
@@ -212,6 +213,7 @@ static struct nouveau_enum vm_fault[] = { | |||
212 | void | 213 | void |
213 | nv50_fb_vm_trap(struct drm_device *dev, int display) | 214 | nv50_fb_vm_trap(struct drm_device *dev, int display) |
214 | { | 215 | { |
216 | struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO); | ||
215 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 217 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
216 | const struct nouveau_enum *en, *cl; | 218 | const struct nouveau_enum *en, *cl; |
217 | unsigned long flags; | 219 | unsigned long flags; |
@@ -236,7 +238,7 @@ nv50_fb_vm_trap(struct drm_device *dev, int display) | |||
236 | /* lookup channel id */ | 238 | /* lookup channel id */ |
237 | chinst = (trap[2] << 16) | trap[1]; | 239 | chinst = (trap[2] << 16) | trap[1]; |
238 | spin_lock_irqsave(&dev_priv->channels.lock, flags); | 240 | spin_lock_irqsave(&dev_priv->channels.lock, flags); |
239 | for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) { | 241 | for (ch = 0; ch < pfifo->channels; ch++) { |
240 | struct nouveau_channel *chan = dev_priv->channels.ptr[ch]; | 242 | struct nouveau_channel *chan = dev_priv->channels.ptr[ch]; |
241 | 243 | ||
242 | if (!chan || !chan->ramin) | 244 | if (!chan || !chan->ramin) |
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c index 12e9e8270f45..55383b85db0b 100644 --- a/drivers/gpu/drm/nouveau/nv50_fifo.c +++ b/drivers/gpu/drm/nouveau/nv50_fifo.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2007 Ben Skeggs. | 2 | * Copyright (C) 2012 Ben Skeggs. |
3 | * All Rights Reserved. | 3 | * All Rights Reserved. |
4 | * | 4 | * |
5 | * Permission is hereby granted, free of charge, to any person obtaining | 5 | * Permission is hereby granted, free of charge, to any person obtaining |
@@ -27,288 +27,135 @@ | |||
27 | #include "drmP.h" | 27 | #include "drmP.h" |
28 | #include "drm.h" | 28 | #include "drm.h" |
29 | #include "nouveau_drv.h" | 29 | #include "nouveau_drv.h" |
30 | #include "nouveau_fifo.h" | ||
30 | #include "nouveau_ramht.h" | 31 | #include "nouveau_ramht.h" |
31 | #include "nouveau_vm.h" | 32 | #include "nouveau_vm.h" |
32 | 33 | ||
33 | static void | 34 | struct nv50_fifo_priv { |
35 | struct nouveau_fifo_priv base; | ||
36 | struct nouveau_gpuobj *playlist[2]; | ||
37 | int cur_playlist; | ||
38 | }; | ||
39 | |||
40 | struct nv50_fifo_chan { | ||
41 | struct nouveau_fifo_chan base; | ||
42 | }; | ||
43 | |||
44 | void | ||
34 | nv50_fifo_playlist_update(struct drm_device *dev) | 45 | nv50_fifo_playlist_update(struct drm_device *dev) |
35 | { | 46 | { |
47 | struct nv50_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO); | ||
36 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 48 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
37 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | ||
38 | struct nouveau_gpuobj *cur; | 49 | struct nouveau_gpuobj *cur; |
39 | int i, p; | 50 | int i, p; |
40 | 51 | ||
41 | NV_DEBUG(dev, "\n"); | 52 | cur = priv->playlist[priv->cur_playlist]; |
42 | 53 | priv->cur_playlist = !priv->cur_playlist; | |
43 | cur = pfifo->playlist[pfifo->cur_playlist]; | ||
44 | pfifo->cur_playlist = !pfifo->cur_playlist; | ||
45 | 54 | ||
46 | for (i = 0, p = 0; i < pfifo->channels; i++) { | 55 | for (i = 0, p = 0; i < priv->base.channels; i++) { |
47 | if (nv_rd32(dev, 0x002600 + (i * 4)) & 0x80000000) | 56 | if (nv_rd32(dev, 0x002600 + (i * 4)) & 0x80000000) |
48 | nv_wo32(cur, p++ * 4, i); | 57 | nv_wo32(cur, p++ * 4, i); |
49 | } | 58 | } |
50 | 59 | ||
51 | dev_priv->engine.instmem.flush(dev); | 60 | dev_priv->engine.instmem.flush(dev); |
52 | 61 | ||
53 | nv_wr32(dev, 0x32f4, cur->vinst >> 12); | 62 | nv_wr32(dev, 0x0032f4, cur->vinst >> 12); |
54 | nv_wr32(dev, 0x32ec, p); | 63 | nv_wr32(dev, 0x0032ec, p); |
55 | nv_wr32(dev, 0x2500, 0x101); | 64 | nv_wr32(dev, 0x002500, 0x00000101); |
56 | } | ||
57 | |||
58 | static void | ||
59 | nv50_fifo_channel_enable(struct drm_device *dev, int channel) | ||
60 | { | ||
61 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
62 | struct nouveau_channel *chan = dev_priv->channels.ptr[channel]; | ||
63 | uint32_t inst; | ||
64 | |||
65 | NV_DEBUG(dev, "ch%d\n", channel); | ||
66 | |||
67 | if (dev_priv->chipset == 0x50) | ||
68 | inst = chan->ramfc->vinst >> 12; | ||
69 | else | ||
70 | inst = chan->ramfc->vinst >> 8; | ||
71 | |||
72 | nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), inst | | ||
73 | NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED); | ||
74 | } | ||
75 | |||
76 | static void | ||
77 | nv50_fifo_channel_disable(struct drm_device *dev, int channel) | ||
78 | { | ||
79 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
80 | uint32_t inst; | ||
81 | |||
82 | NV_DEBUG(dev, "ch%d\n", channel); | ||
83 | |||
84 | if (dev_priv->chipset == 0x50) | ||
85 | inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80; | ||
86 | else | ||
87 | inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84; | ||
88 | nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), inst); | ||
89 | } | ||
90 | |||
91 | static void | ||
92 | nv50_fifo_init_reset(struct drm_device *dev) | ||
93 | { | ||
94 | uint32_t pmc_e = NV_PMC_ENABLE_PFIFO; | ||
95 | |||
96 | NV_DEBUG(dev, "\n"); | ||
97 | |||
98 | nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~pmc_e); | ||
99 | nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | pmc_e); | ||
100 | } | ||
101 | |||
102 | static void | ||
103 | nv50_fifo_init_intr(struct drm_device *dev) | ||
104 | { | ||
105 | NV_DEBUG(dev, "\n"); | ||
106 | |||
107 | nouveau_irq_register(dev, 8, nv04_fifo_isr); | ||
108 | nv_wr32(dev, NV03_PFIFO_INTR_0, 0xFFFFFFFF); | ||
109 | nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF); | ||
110 | } | ||
111 | |||
112 | static void | ||
113 | nv50_fifo_init_context_table(struct drm_device *dev) | ||
114 | { | ||
115 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
116 | int i; | ||
117 | |||
118 | NV_DEBUG(dev, "\n"); | ||
119 | |||
120 | for (i = 0; i < NV50_PFIFO_CTX_TABLE__SIZE; i++) { | ||
121 | if (dev_priv->channels.ptr[i]) | ||
122 | nv50_fifo_channel_enable(dev, i); | ||
123 | else | ||
124 | nv50_fifo_channel_disable(dev, i); | ||
125 | } | ||
126 | |||
127 | nv50_fifo_playlist_update(dev); | ||
128 | } | ||
129 | |||
130 | static void | ||
131 | nv50_fifo_init_regs__nv(struct drm_device *dev) | ||
132 | { | ||
133 | NV_DEBUG(dev, "\n"); | ||
134 | |||
135 | nv_wr32(dev, 0x250c, 0x6f3cfc34); | ||
136 | } | ||
137 | |||
138 | static void | ||
139 | nv50_fifo_init_regs(struct drm_device *dev) | ||
140 | { | ||
141 | NV_DEBUG(dev, "\n"); | ||
142 | |||
143 | nv_wr32(dev, 0x2500, 0); | ||
144 | nv_wr32(dev, 0x3250, 0); | ||
145 | nv_wr32(dev, 0x3220, 0); | ||
146 | nv_wr32(dev, 0x3204, 0); | ||
147 | nv_wr32(dev, 0x3210, 0); | ||
148 | nv_wr32(dev, 0x3270, 0); | ||
149 | nv_wr32(dev, 0x2044, 0x01003fff); | ||
150 | |||
151 | /* Enable dummy channels setup by nv50_instmem.c */ | ||
152 | nv50_fifo_channel_enable(dev, 0); | ||
153 | nv50_fifo_channel_enable(dev, 127); | ||
154 | } | ||
155 | |||
156 | int | ||
157 | nv50_fifo_init(struct drm_device *dev) | ||
158 | { | ||
159 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
160 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | ||
161 | int ret; | ||
162 | |||
163 | NV_DEBUG(dev, "\n"); | ||
164 | |||
165 | if (pfifo->playlist[0]) { | ||
166 | pfifo->cur_playlist = !pfifo->cur_playlist; | ||
167 | goto just_reset; | ||
168 | } | ||
169 | |||
170 | ret = nouveau_gpuobj_new(dev, NULL, 128*4, 0x1000, | ||
171 | NVOBJ_FLAG_ZERO_ALLOC, | ||
172 | &pfifo->playlist[0]); | ||
173 | if (ret) { | ||
174 | NV_ERROR(dev, "error creating playlist 0: %d\n", ret); | ||
175 | return ret; | ||
176 | } | ||
177 | |||
178 | ret = nouveau_gpuobj_new(dev, NULL, 128*4, 0x1000, | ||
179 | NVOBJ_FLAG_ZERO_ALLOC, | ||
180 | &pfifo->playlist[1]); | ||
181 | if (ret) { | ||
182 | nouveau_gpuobj_ref(NULL, &pfifo->playlist[0]); | ||
183 | NV_ERROR(dev, "error creating playlist 1: %d\n", ret); | ||
184 | return ret; | ||
185 | } | ||
186 | |||
187 | just_reset: | ||
188 | nv50_fifo_init_reset(dev); | ||
189 | nv50_fifo_init_intr(dev); | ||
190 | nv50_fifo_init_context_table(dev); | ||
191 | nv50_fifo_init_regs__nv(dev); | ||
192 | nv50_fifo_init_regs(dev); | ||
193 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1); | ||
194 | nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1); | ||
195 | nv_wr32(dev, NV03_PFIFO_CACHES, 1); | ||
196 | |||
197 | return 0; | ||
198 | } | 65 | } |
199 | 66 | ||
200 | void | 67 | static int |
201 | nv50_fifo_takedown(struct drm_device *dev) | 68 | nv50_fifo_context_new(struct nouveau_channel *chan, int engine) |
202 | { | ||
203 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
204 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | ||
205 | |||
206 | NV_DEBUG(dev, "\n"); | ||
207 | |||
208 | if (!pfifo->playlist[0]) | ||
209 | return; | ||
210 | |||
211 | nv_wr32(dev, 0x2140, 0x00000000); | ||
212 | nouveau_irq_unregister(dev, 8); | ||
213 | |||
214 | nouveau_gpuobj_ref(NULL, &pfifo->playlist[0]); | ||
215 | nouveau_gpuobj_ref(NULL, &pfifo->playlist[1]); | ||
216 | } | ||
217 | |||
218 | int | ||
219 | nv50_fifo_create_context(struct nouveau_channel *chan) | ||
220 | { | 69 | { |
70 | struct nv50_fifo_priv *priv = nv_engine(chan->dev, engine); | ||
71 | struct nv50_fifo_chan *fctx; | ||
221 | struct drm_device *dev = chan->dev; | 72 | struct drm_device *dev = chan->dev; |
222 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 73 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
223 | struct nouveau_gpuobj *ramfc = NULL; | 74 | u64 ib_offset = chan->pushbuf_base + chan->dma.ib_base * 4; |
224 | uint64_t ib_offset = chan->pushbuf_base + chan->dma.ib_base * 4; | 75 | u64 instance = chan->ramin->vinst >> 12; |
225 | unsigned long flags; | 76 | unsigned long flags; |
226 | int ret; | 77 | int ret = 0, i; |
227 | 78 | ||
228 | NV_DEBUG(dev, "ch%d\n", chan->id); | 79 | fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL); |
229 | 80 | if (!fctx) | |
230 | if (dev_priv->chipset == 0x50) { | 81 | return -ENOMEM; |
231 | ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst, | 82 | atomic_inc(&chan->vm->engref[engine]); |
232 | chan->ramin->vinst, 0x100, | ||
233 | NVOBJ_FLAG_ZERO_ALLOC | | ||
234 | NVOBJ_FLAG_ZERO_FREE, | ||
235 | &chan->ramfc); | ||
236 | if (ret) | ||
237 | return ret; | ||
238 | |||
239 | ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst + 0x0400, | ||
240 | chan->ramin->vinst + 0x0400, | ||
241 | 4096, 0, &chan->cache); | ||
242 | if (ret) | ||
243 | return ret; | ||
244 | } else { | ||
245 | ret = nouveau_gpuobj_new(dev, chan, 0x100, 256, | ||
246 | NVOBJ_FLAG_ZERO_ALLOC | | ||
247 | NVOBJ_FLAG_ZERO_FREE, &chan->ramfc); | ||
248 | if (ret) | ||
249 | return ret; | ||
250 | |||
251 | ret = nouveau_gpuobj_new(dev, chan, 4096, 1024, | ||
252 | 0, &chan->cache); | ||
253 | if (ret) | ||
254 | return ret; | ||
255 | } | ||
256 | ramfc = chan->ramfc; | ||
257 | 83 | ||
258 | chan->user = ioremap(pci_resource_start(dev->pdev, 0) + | 84 | chan->user = ioremap(pci_resource_start(dev->pdev, 0) + |
259 | NV50_USER(chan->id), PAGE_SIZE); | 85 | NV50_USER(chan->id), PAGE_SIZE); |
260 | if (!chan->user) | 86 | if (!chan->user) { |
261 | return -ENOMEM; | 87 | ret = -ENOMEM; |
262 | 88 | goto error; | |
263 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | ||
264 | |||
265 | nv_wo32(ramfc, 0x48, chan->pushbuf->cinst >> 4); | ||
266 | nv_wo32(ramfc, 0x80, ((chan->ramht->bits - 9) << 27) | | ||
267 | (4 << 24) /* SEARCH_FULL */ | | ||
268 | (chan->ramht->gpuobj->cinst >> 4)); | ||
269 | nv_wo32(ramfc, 0x44, 0x01003fff); | ||
270 | nv_wo32(ramfc, 0x60, 0x7fffffff); | ||
271 | nv_wo32(ramfc, 0x40, 0x00000000); | ||
272 | nv_wo32(ramfc, 0x7c, 0x30000001); | ||
273 | nv_wo32(ramfc, 0x78, 0x00000000); | ||
274 | nv_wo32(ramfc, 0x3c, 0x403f6078); | ||
275 | nv_wo32(ramfc, 0x50, lower_32_bits(ib_offset)); | ||
276 | nv_wo32(ramfc, 0x54, upper_32_bits(ib_offset) | | ||
277 | drm_order(chan->dma.ib_max + 1) << 16); | ||
278 | |||
279 | if (dev_priv->chipset != 0x50) { | ||
280 | nv_wo32(chan->ramin, 0, chan->id); | ||
281 | nv_wo32(chan->ramin, 4, chan->ramfc->vinst >> 8); | ||
282 | |||
283 | nv_wo32(ramfc, 0x88, chan->cache->vinst >> 10); | ||
284 | nv_wo32(ramfc, 0x98, chan->ramin->vinst >> 12); | ||
285 | } | 89 | } |
286 | 90 | ||
91 | for (i = 0; i < 0x100; i += 4) | ||
92 | nv_wo32(chan->ramin, i, 0x00000000); | ||
93 | nv_wo32(chan->ramin, 0x3c, 0x403f6078); | ||
94 | nv_wo32(chan->ramin, 0x40, 0x00000000); | ||
95 | nv_wo32(chan->ramin, 0x44, 0x01003fff); | ||
96 | nv_wo32(chan->ramin, 0x48, chan->pushbuf->cinst >> 4); | ||
97 | nv_wo32(chan->ramin, 0x50, lower_32_bits(ib_offset)); | ||
98 | nv_wo32(chan->ramin, 0x54, upper_32_bits(ib_offset) | | ||
99 | drm_order(chan->dma.ib_max + 1) << 16); | ||
100 | nv_wo32(chan->ramin, 0x60, 0x7fffffff); | ||
101 | nv_wo32(chan->ramin, 0x78, 0x00000000); | ||
102 | nv_wo32(chan->ramin, 0x7c, 0x30000001); | ||
103 | nv_wo32(chan->ramin, 0x80, ((chan->ramht->bits - 9) << 27) | | ||
104 | (4 << 24) /* SEARCH_FULL */ | | ||
105 | (chan->ramht->gpuobj->cinst >> 4)); | ||
106 | |||
287 | dev_priv->engine.instmem.flush(dev); | 107 | dev_priv->engine.instmem.flush(dev); |
288 | 108 | ||
289 | nv50_fifo_channel_enable(dev, chan->id); | 109 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); |
110 | nv_wr32(dev, 0x002600 + (chan->id * 4), 0x80000000 | instance); | ||
290 | nv50_fifo_playlist_update(dev); | 111 | nv50_fifo_playlist_update(dev); |
291 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | 112 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); |
292 | return 0; | 113 | |
114 | error: | ||
115 | if (ret) | ||
116 | priv->base.base.context_del(chan, engine); | ||
117 | return ret; | ||
293 | } | 118 | } |
294 | 119 | ||
295 | static bool | 120 | static bool |
296 | nv50_fifo_wait_kickoff(void *data) | 121 | nv50_fifo_kickoff(struct nouveau_channel *chan) |
297 | { | 122 | { |
298 | struct drm_nouveau_private *dev_priv = data; | 123 | struct drm_device *dev = chan->dev; |
299 | struct drm_device *dev = dev_priv->dev; | 124 | bool done = true; |
300 | 125 | u32 me; | |
301 | if (dev_priv->chipset == 0x50) { | 126 | |
302 | u32 me_enable = nv_mask(dev, 0x00b860, 0x00000001, 0x00000001); | 127 | /* HW bug workaround: |
303 | nv_wr32(dev, 0x00b860, me_enable); | 128 | * |
129 | * PFIFO will hang forever if the connected engines don't report | ||
130 | * that they've processed the context switch request. | ||
131 | * | ||
132 | * In order for the kickoff to work, we need to ensure all the | ||
133 | * connected engines are in a state where they can answer. | ||
134 | * | ||
135 | * Newer chipsets don't seem to suffer from this issue, and well, | ||
136 | * there's also a "ignore these engines" bitmask reg we can use | ||
137 | * if we hit the issue there.. | ||
138 | */ | ||
139 | |||
140 | /* PME: make sure engine is enabled */ | ||
141 | me = nv_mask(dev, 0x00b860, 0x00000001, 0x00000001); | ||
142 | |||
143 | /* do the kickoff... */ | ||
144 | nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12); | ||
145 | if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff)) { | ||
146 | NV_INFO(dev, "PFIFO: channel %d unload timeout\n", chan->id); | ||
147 | done = false; | ||
304 | } | 148 | } |
305 | 149 | ||
306 | return nv_rd32(dev, 0x0032fc) != 0xffffffff; | 150 | /* restore any engine states we changed, and exit */ |
151 | nv_wr32(dev, 0x00b860, me); | ||
152 | return done; | ||
307 | } | 153 | } |
308 | 154 | ||
309 | void | 155 | static void |
310 | nv50_fifo_destroy_context(struct nouveau_channel *chan) | 156 | nv50_fifo_context_del(struct nouveau_channel *chan, int engine) |
311 | { | 157 | { |
158 | struct nv50_fifo_chan *fctx = chan->engctx[engine]; | ||
312 | struct drm_device *dev = chan->dev; | 159 | struct drm_device *dev = chan->dev; |
313 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 160 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
314 | unsigned long flags; | 161 | unsigned long flags; |
@@ -319,9 +166,7 @@ nv50_fifo_destroy_context(struct nouveau_channel *chan) | |||
319 | nv50_fifo_playlist_update(dev); | 166 | nv50_fifo_playlist_update(dev); |
320 | 167 | ||
321 | /* tell any engines on this channel to unload their contexts */ | 168 | /* tell any engines on this channel to unload their contexts */ |
322 | nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12); | 169 | nv50_fifo_kickoff(chan); |
323 | if (!nv_wait_cb(dev, nv50_fifo_wait_kickoff, dev_priv)) | ||
324 | NV_INFO(dev, "PFIFO: channel %d unload timeout\n", chan->id); | ||
325 | 170 | ||
326 | nv_wr32(dev, 0x002600 + (chan->id * 4), 0x00000000); | 171 | nv_wr32(dev, 0x002600 + (chan->id * 4), 0x00000000); |
327 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | 172 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); |
@@ -332,41 +177,118 @@ nv50_fifo_destroy_context(struct nouveau_channel *chan) | |||
332 | chan->user = NULL; | 177 | chan->user = NULL; |
333 | } | 178 | } |
334 | 179 | ||
335 | nouveau_gpuobj_ref(NULL, &chan->ramfc); | 180 | atomic_dec(&chan->vm->engref[engine]); |
336 | nouveau_gpuobj_ref(NULL, &chan->cache); | 181 | chan->engctx[engine] = NULL; |
182 | kfree(fctx); | ||
337 | } | 183 | } |
338 | 184 | ||
339 | int | 185 | static int |
340 | nv50_fifo_load_context(struct nouveau_channel *chan) | 186 | nv50_fifo_init(struct drm_device *dev, int engine) |
341 | { | 187 | { |
188 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
189 | u32 instance; | ||
190 | int i; | ||
191 | |||
192 | nv_mask(dev, 0x000200, 0x00000100, 0x00000000); | ||
193 | nv_mask(dev, 0x000200, 0x00000100, 0x00000100); | ||
194 | nv_wr32(dev, 0x00250c, 0x6f3cfc34); | ||
195 | nv_wr32(dev, 0x002044, 0x01003fff); | ||
196 | |||
197 | nv_wr32(dev, 0x002100, 0xffffffff); | ||
198 | nv_wr32(dev, 0x002140, 0xffffffff); | ||
199 | |||
200 | for (i = 0; i < 128; i++) { | ||
201 | struct nouveau_channel *chan = dev_priv->channels.ptr[i]; | ||
202 | if (chan && chan->engctx[engine]) | ||
203 | instance = 0x80000000 | chan->ramin->vinst >> 12; | ||
204 | else | ||
205 | instance = 0x00000000; | ||
206 | nv_wr32(dev, 0x002600 + (i * 4), instance); | ||
207 | } | ||
208 | |||
209 | nv50_fifo_playlist_update(dev); | ||
210 | |||
211 | nv_wr32(dev, 0x003200, 1); | ||
212 | nv_wr32(dev, 0x003250, 1); | ||
213 | nv_wr32(dev, 0x002500, 1); | ||
342 | return 0; | 214 | return 0; |
343 | } | 215 | } |
344 | 216 | ||
345 | int | 217 | static int |
346 | nv50_fifo_unload_context(struct drm_device *dev) | 218 | nv50_fifo_fini(struct drm_device *dev, int engine, bool suspend) |
347 | { | 219 | { |
348 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 220 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
221 | struct nv50_fifo_priv *priv = nv_engine(dev, engine); | ||
349 | int i; | 222 | int i; |
350 | 223 | ||
351 | /* set playlist length to zero, fifo will unload context */ | 224 | /* set playlist length to zero, fifo will unload context */ |
352 | nv_wr32(dev, 0x0032ec, 0); | 225 | nv_wr32(dev, 0x0032ec, 0); |
353 | 226 | ||
354 | /* tell all connected engines to unload their contexts */ | 227 | /* tell all connected engines to unload their contexts */ |
355 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { | 228 | for (i = 0; i < priv->base.channels; i++) { |
356 | struct nouveau_channel *chan = dev_priv->channels.ptr[i]; | 229 | struct nouveau_channel *chan = dev_priv->channels.ptr[i]; |
357 | if (chan) | 230 | if (chan && !nv50_fifo_kickoff(chan)) |
358 | nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12); | ||
359 | if (!nv_wait_cb(dev, nv50_fifo_wait_kickoff, dev_priv)) { | ||
360 | NV_INFO(dev, "PFIFO: channel %d unload timeout\n", i); | ||
361 | return -EBUSY; | 231 | return -EBUSY; |
362 | } | ||
363 | } | 232 | } |
364 | 233 | ||
234 | nv_wr32(dev, 0x002140, 0); | ||
365 | return 0; | 235 | return 0; |
366 | } | 236 | } |
367 | 237 | ||
368 | void | 238 | void |
369 | nv50_fifo_tlb_flush(struct drm_device *dev) | 239 | nv50_fifo_tlb_flush(struct drm_device *dev, int engine) |
370 | { | 240 | { |
371 | nv50_vm_flush_engine(dev, 5); | 241 | nv50_vm_flush_engine(dev, 5); |
372 | } | 242 | } |
243 | |||
244 | void | ||
245 | nv50_fifo_destroy(struct drm_device *dev, int engine) | ||
246 | { | ||
247 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
248 | struct nv50_fifo_priv *priv = nv_engine(dev, engine); | ||
249 | |||
250 | nouveau_irq_unregister(dev, 8); | ||
251 | |||
252 | nouveau_gpuobj_ref(NULL, &priv->playlist[0]); | ||
253 | nouveau_gpuobj_ref(NULL, &priv->playlist[1]); | ||
254 | |||
255 | dev_priv->eng[engine] = NULL; | ||
256 | kfree(priv); | ||
257 | } | ||
258 | |||
259 | int | ||
260 | nv50_fifo_create(struct drm_device *dev) | ||
261 | { | ||
262 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
263 | struct nv50_fifo_priv *priv; | ||
264 | int ret; | ||
265 | |||
266 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | ||
267 | if (!priv) | ||
268 | return -ENOMEM; | ||
269 | |||
270 | priv->base.base.destroy = nv50_fifo_destroy; | ||
271 | priv->base.base.init = nv50_fifo_init; | ||
272 | priv->base.base.fini = nv50_fifo_fini; | ||
273 | priv->base.base.context_new = nv50_fifo_context_new; | ||
274 | priv->base.base.context_del = nv50_fifo_context_del; | ||
275 | priv->base.base.tlb_flush = nv50_fifo_tlb_flush; | ||
276 | priv->base.channels = 127; | ||
277 | dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base; | ||
278 | |||
279 | ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000, | ||
280 | NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[0]); | ||
281 | if (ret) | ||
282 | goto error; | ||
283 | |||
284 | ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000, | ||
285 | NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[1]); | ||
286 | if (ret) | ||
287 | goto error; | ||
288 | |||
289 | nouveau_irq_register(dev, 8, nv04_fifo_isr); | ||
290 | error: | ||
291 | if (ret) | ||
292 | priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO); | ||
293 | return ret; | ||
294 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c index 46b2c95de4d0..636e22b1530c 100644 --- a/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/drivers/gpu/drm/nouveau/nv50_graph.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include "drmP.h" | 27 | #include "drmP.h" |
28 | #include "drm.h" | 28 | #include "drm.h" |
29 | #include "nouveau_drv.h" | 29 | #include "nouveau_drv.h" |
30 | #include "nouveau_fifo.h" | ||
30 | #include "nouveau_ramht.h" | 31 | #include "nouveau_ramht.h" |
31 | #include "nouveau_dma.h" | 32 | #include "nouveau_dma.h" |
32 | #include "nouveau_vm.h" | 33 | #include "nouveau_vm.h" |
@@ -710,13 +711,14 @@ nv50_pgraph_trap_handler(struct drm_device *dev, u32 display, u64 inst, u32 chid | |||
710 | int | 711 | int |
711 | nv50_graph_isr_chid(struct drm_device *dev, u64 inst) | 712 | nv50_graph_isr_chid(struct drm_device *dev, u64 inst) |
712 | { | 713 | { |
714 | struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO); | ||
713 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 715 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
714 | struct nouveau_channel *chan; | 716 | struct nouveau_channel *chan; |
715 | unsigned long flags; | 717 | unsigned long flags; |
716 | int i; | 718 | int i; |
717 | 719 | ||
718 | spin_lock_irqsave(&dev_priv->channels.lock, flags); | 720 | spin_lock_irqsave(&dev_priv->channels.lock, flags); |
719 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { | 721 | for (i = 0; i < pfifo->channels; i++) { |
720 | chan = dev_priv->channels.ptr[i]; | 722 | chan = dev_priv->channels.ptr[i]; |
721 | if (!chan || !chan->ramin) | 723 | if (!chan || !chan->ramin) |
722 | continue; | 724 | continue; |
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c index 44fbac9c7d93..179bb42a635c 100644 --- a/drivers/gpu/drm/nouveau/nv50_vm.c +++ b/drivers/gpu/drm/nouveau/nv50_vm.c | |||
@@ -147,7 +147,6 @@ nv50_vm_flush(struct nouveau_vm *vm) | |||
147 | { | 147 | { |
148 | struct drm_nouveau_private *dev_priv = vm->dev->dev_private; | 148 | struct drm_nouveau_private *dev_priv = vm->dev->dev_private; |
149 | struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; | 149 | struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; |
150 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | ||
151 | int i; | 150 | int i; |
152 | 151 | ||
153 | pinstmem->flush(vm->dev); | 152 | pinstmem->flush(vm->dev); |
@@ -158,7 +157,6 @@ nv50_vm_flush(struct nouveau_vm *vm) | |||
158 | return; | 157 | return; |
159 | } | 158 | } |
160 | 159 | ||
161 | pfifo->tlb_flush(vm->dev); | ||
162 | for (i = 0; i < NVOBJ_ENGINE_NR; i++) { | 160 | for (i = 0; i < NVOBJ_ENGINE_NR; i++) { |
163 | if (atomic_read(&vm->engref[i])) | 161 | if (atomic_read(&vm->engref[i])) |
164 | dev_priv->eng[i]->tlb_flush(vm->dev, i); | 162 | dev_priv->eng[i]->tlb_flush(vm->dev, i); |
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c index 0ac98c0efc71..c2f889b0d340 100644 --- a/drivers/gpu/drm/nouveau/nv84_fence.c +++ b/drivers/gpu/drm/nouveau/nv84_fence.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include "drmP.h" | 25 | #include "drmP.h" |
26 | #include "nouveau_drv.h" | 26 | #include "nouveau_drv.h" |
27 | #include "nouveau_dma.h" | 27 | #include "nouveau_dma.h" |
28 | #include "nouveau_fifo.h" | ||
28 | #include "nouveau_ramht.h" | 29 | #include "nouveau_ramht.h" |
29 | #include "nouveau_fence.h" | 30 | #include "nouveau_fence.h" |
30 | 31 | ||
@@ -145,8 +146,8 @@ nv84_fence_destroy(struct drm_device *dev, int engine) | |||
145 | int | 146 | int |
146 | nv84_fence_create(struct drm_device *dev) | 147 | nv84_fence_create(struct drm_device *dev) |
147 | { | 148 | { |
149 | struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO); | ||
148 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 150 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
149 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | ||
150 | struct nv84_fence_priv *priv; | 151 | struct nv84_fence_priv *priv; |
151 | int ret; | 152 | int ret; |
152 | 153 | ||
diff --git a/drivers/gpu/drm/nouveau/nv84_fifo.c b/drivers/gpu/drm/nouveau/nv84_fifo.c new file mode 100644 index 000000000000..cc82d799fc3b --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv84_fifo.c | |||
@@ -0,0 +1,241 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 Ben Skeggs. | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * Permission is hereby granted, free of charge, to any person obtaining | ||
6 | * a copy of this software and associated documentation files (the | ||
7 | * "Software"), to deal in the Software without restriction, including | ||
8 | * without limitation the rights to use, copy, modify, merge, publish, | ||
9 | * distribute, sublicense, and/or sell copies of the Software, and to | ||
10 | * permit persons to whom the Software is furnished to do so, subject to | ||
11 | * the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice (including the | ||
14 | * next paragraph) shall be included in all copies or substantial | ||
15 | * portions of the Software. | ||
16 | * | ||
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
18 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
19 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. | ||
20 | * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE | ||
21 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | ||
22 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | ||
23 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | #include "drmP.h" | ||
28 | #include "drm.h" | ||
29 | #include "nouveau_drv.h" | ||
30 | #include "nouveau_fifo.h" | ||
31 | #include "nouveau_ramht.h" | ||
32 | #include "nouveau_vm.h" | ||
33 | |||
34 | struct nv84_fifo_priv { | ||
35 | struct nouveau_fifo_priv base; | ||
36 | struct nouveau_gpuobj *playlist[2]; | ||
37 | int cur_playlist; | ||
38 | }; | ||
39 | |||
40 | struct nv84_fifo_chan { | ||
41 | struct nouveau_fifo_chan base; | ||
42 | struct nouveau_gpuobj *ramfc; | ||
43 | struct nouveau_gpuobj *cache; | ||
44 | }; | ||
45 | |||
46 | static int | ||
47 | nv84_fifo_context_new(struct nouveau_channel *chan, int engine) | ||
48 | { | ||
49 | struct nv84_fifo_priv *priv = nv_engine(chan->dev, engine); | ||
50 | struct nv84_fifo_chan *fctx; | ||
51 | struct drm_device *dev = chan->dev; | ||
52 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
53 | u64 ib_offset = chan->pushbuf_base + chan->dma.ib_base * 4; | ||
54 | u64 instance; | ||
55 | unsigned long flags; | ||
56 | int ret; | ||
57 | |||
58 | fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL); | ||
59 | if (!fctx) | ||
60 | return -ENOMEM; | ||
61 | atomic_inc(&chan->vm->engref[engine]); | ||
62 | |||
63 | chan->user = ioremap(pci_resource_start(dev->pdev, 0) + | ||
64 | NV50_USER(chan->id), PAGE_SIZE); | ||
65 | if (!chan->user) { | ||
66 | ret = -ENOMEM; | ||
67 | goto error; | ||
68 | } | ||
69 | |||
70 | ret = nouveau_gpuobj_new(dev, chan, 256, 256, NVOBJ_FLAG_ZERO_ALLOC | | ||
71 | NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc); | ||
72 | if (ret) | ||
73 | goto error; | ||
74 | |||
75 | instance = fctx->ramfc->vinst >> 8; | ||
76 | |||
77 | ret = nouveau_gpuobj_new(dev, chan, 4096, 1024, 0, &fctx->cache); | ||
78 | if (ret) | ||
79 | goto error; | ||
80 | |||
81 | nv_wo32(fctx->ramfc, 0x3c, 0x403f6078); | ||
82 | nv_wo32(fctx->ramfc, 0x40, 0x00000000); | ||
83 | nv_wo32(fctx->ramfc, 0x44, 0x01003fff); | ||
84 | nv_wo32(fctx->ramfc, 0x48, chan->pushbuf->cinst >> 4); | ||
85 | nv_wo32(fctx->ramfc, 0x50, lower_32_bits(ib_offset)); | ||
86 | nv_wo32(fctx->ramfc, 0x54, upper_32_bits(ib_offset) | | ||
87 | drm_order(chan->dma.ib_max + 1) << 16); | ||
88 | nv_wo32(fctx->ramfc, 0x60, 0x7fffffff); | ||
89 | nv_wo32(fctx->ramfc, 0x78, 0x00000000); | ||
90 | nv_wo32(fctx->ramfc, 0x7c, 0x30000001); | ||
91 | nv_wo32(fctx->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) | | ||
92 | (4 << 24) /* SEARCH_FULL */ | | ||
93 | (chan->ramht->gpuobj->cinst >> 4)); | ||
94 | nv_wo32(fctx->ramfc, 0x88, fctx->cache->vinst >> 10); | ||
95 | nv_wo32(fctx->ramfc, 0x98, chan->ramin->vinst >> 12); | ||
96 | |||
97 | nv_wo32(chan->ramin, 0x00, chan->id); | ||
98 | nv_wo32(chan->ramin, 0x04, fctx->ramfc->vinst >> 8); | ||
99 | |||
100 | dev_priv->engine.instmem.flush(dev); | ||
101 | |||
102 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | ||
103 | nv_wr32(dev, 0x002600 + (chan->id * 4), 0x80000000 | instance); | ||
104 | nv50_fifo_playlist_update(dev); | ||
105 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | ||
106 | |||
107 | error: | ||
108 | if (ret) | ||
109 | priv->base.base.context_del(chan, engine); | ||
110 | return ret; | ||
111 | } | ||
112 | |||
113 | static void | ||
114 | nv84_fifo_context_del(struct nouveau_channel *chan, int engine) | ||
115 | { | ||
116 | struct nv84_fifo_chan *fctx = chan->engctx[engine]; | ||
117 | struct drm_device *dev = chan->dev; | ||
118 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
119 | unsigned long flags; | ||
120 | |||
121 | /* remove channel from playlist, will context switch if active */ | ||
122 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | ||
123 | nv_mask(dev, 0x002600 + (chan->id * 4), 0x80000000, 0x00000000); | ||
124 | nv50_fifo_playlist_update(dev); | ||
125 | |||
126 | /* tell any engines on this channel to unload their contexts */ | ||
127 | nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12); | ||
128 | if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff)) | ||
129 | NV_INFO(dev, "PFIFO: channel %d unload timeout\n", chan->id); | ||
130 | |||
131 | nv_wr32(dev, 0x002600 + (chan->id * 4), 0x00000000); | ||
132 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | ||
133 | |||
134 | /* clean up */ | ||
135 | if (chan->user) { | ||
136 | iounmap(chan->user); | ||
137 | chan->user = NULL; | ||
138 | } | ||
139 | |||
140 | nouveau_gpuobj_ref(NULL, &fctx->ramfc); | ||
141 | nouveau_gpuobj_ref(NULL, &fctx->cache); | ||
142 | |||
143 | atomic_dec(&chan->vm->engref[engine]); | ||
144 | chan->engctx[engine] = NULL; | ||
145 | kfree(fctx); | ||
146 | } | ||
147 | |||
148 | static int | ||
149 | nv84_fifo_init(struct drm_device *dev, int engine) | ||
150 | { | ||
151 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
152 | struct nv84_fifo_chan *fctx; | ||
153 | u32 instance; | ||
154 | int i; | ||
155 | |||
156 | nv_mask(dev, 0x000200, 0x00000100, 0x00000000); | ||
157 | nv_mask(dev, 0x000200, 0x00000100, 0x00000100); | ||
158 | nv_wr32(dev, 0x00250c, 0x6f3cfc34); | ||
159 | nv_wr32(dev, 0x002044, 0x01003fff); | ||
160 | |||
161 | nv_wr32(dev, 0x002100, 0xffffffff); | ||
162 | nv_wr32(dev, 0x002140, 0xffffffff); | ||
163 | |||
164 | for (i = 0; i < 128; i++) { | ||
165 | struct nouveau_channel *chan = dev_priv->channels.ptr[i]; | ||
166 | if (chan && (fctx = chan->engctx[engine])) | ||
167 | instance = 0x80000000 | fctx->ramfc->vinst >> 8; | ||
168 | else | ||
169 | instance = 0x00000000; | ||
170 | nv_wr32(dev, 0x002600 + (i * 4), instance); | ||
171 | } | ||
172 | |||
173 | nv50_fifo_playlist_update(dev); | ||
174 | |||
175 | nv_wr32(dev, 0x003200, 1); | ||
176 | nv_wr32(dev, 0x003250, 1); | ||
177 | nv_wr32(dev, 0x002500, 1); | ||
178 | return 0; | ||
179 | } | ||
180 | |||
181 | static int | ||
182 | nv84_fifo_fini(struct drm_device *dev, int engine, bool suspend) | ||
183 | { | ||
184 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
185 | struct nv84_fifo_priv *priv = nv_engine(dev, engine); | ||
186 | int i; | ||
187 | |||
188 | /* set playlist length to zero, fifo will unload context */ | ||
189 | nv_wr32(dev, 0x0032ec, 0); | ||
190 | |||
191 | /* tell all connected engines to unload their contexts */ | ||
192 | for (i = 0; i < priv->base.channels; i++) { | ||
193 | struct nouveau_channel *chan = dev_priv->channels.ptr[i]; | ||
194 | if (chan) | ||
195 | nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12); | ||
196 | if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff)) { | ||
197 | NV_INFO(dev, "PFIFO: channel %d unload timeout\n", i); | ||
198 | return -EBUSY; | ||
199 | } | ||
200 | } | ||
201 | |||
202 | nv_wr32(dev, 0x002140, 0); | ||
203 | return 0; | ||
204 | } | ||
205 | |||
206 | int | ||
207 | nv84_fifo_create(struct drm_device *dev) | ||
208 | { | ||
209 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
210 | struct nv84_fifo_priv *priv; | ||
211 | int ret; | ||
212 | |||
213 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | ||
214 | if (!priv) | ||
215 | return -ENOMEM; | ||
216 | |||
217 | priv->base.base.destroy = nv50_fifo_destroy; | ||
218 | priv->base.base.init = nv84_fifo_init; | ||
219 | priv->base.base.fini = nv84_fifo_fini; | ||
220 | priv->base.base.context_new = nv84_fifo_context_new; | ||
221 | priv->base.base.context_del = nv84_fifo_context_del; | ||
222 | priv->base.base.tlb_flush = nv50_fifo_tlb_flush; | ||
223 | priv->base.channels = 127; | ||
224 | dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base; | ||
225 | |||
226 | ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000, | ||
227 | NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[0]); | ||
228 | if (ret) | ||
229 | goto error; | ||
230 | |||
231 | ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000, | ||
232 | NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[1]); | ||
233 | if (ret) | ||
234 | goto error; | ||
235 | |||
236 | nouveau_irq_register(dev, 8, nv04_fifo_isr); | ||
237 | error: | ||
238 | if (ret) | ||
239 | priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO); | ||
240 | return ret; | ||
241 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nvc0_fence.c b/drivers/gpu/drm/nouveau/nvc0_fence.c index 817228cd1a95..47ab388a606e 100644 --- a/drivers/gpu/drm/nouveau/nvc0_fence.c +++ b/drivers/gpu/drm/nouveau/nvc0_fence.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include "drmP.h" | 25 | #include "drmP.h" |
26 | #include "nouveau_drv.h" | 26 | #include "nouveau_drv.h" |
27 | #include "nouveau_dma.h" | 27 | #include "nouveau_dma.h" |
28 | #include "nouveau_fifo.h" | ||
28 | #include "nouveau_ramht.h" | 29 | #include "nouveau_ramht.h" |
29 | #include "nouveau_fence.h" | 30 | #include "nouveau_fence.h" |
30 | 31 | ||
@@ -148,8 +149,8 @@ nvc0_fence_destroy(struct drm_device *dev, int engine) | |||
148 | int | 149 | int |
149 | nvc0_fence_create(struct drm_device *dev) | 150 | nvc0_fence_create(struct drm_device *dev) |
150 | { | 151 | { |
152 | struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO); | ||
151 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 153 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
152 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | ||
153 | struct nvc0_fence_priv *priv; | 154 | struct nvc0_fence_priv *priv; |
154 | int ret; | 155 | int ret; |
155 | 156 | ||
diff --git a/drivers/gpu/drm/nouveau/nvc0_fifo.c b/drivers/gpu/drm/nouveau/nvc0_fifo.c index 471723eaf8ad..7d85553d518c 100644 --- a/drivers/gpu/drm/nouveau/nvc0_fifo.c +++ b/drivers/gpu/drm/nouveau/nvc0_fifo.c | |||
@@ -26,10 +26,12 @@ | |||
26 | 26 | ||
27 | #include "nouveau_drv.h" | 27 | #include "nouveau_drv.h" |
28 | #include "nouveau_mm.h" | 28 | #include "nouveau_mm.h" |
29 | #include "nouveau_fifo.h" | ||
29 | 30 | ||
30 | static void nvc0_fifo_isr(struct drm_device *); | 31 | static void nvc0_fifo_isr(struct drm_device *); |
31 | 32 | ||
32 | struct nvc0_fifo_priv { | 33 | struct nvc0_fifo_priv { |
34 | struct nouveau_fifo_priv base; | ||
33 | struct nouveau_gpuobj *playlist[2]; | 35 | struct nouveau_gpuobj *playlist[2]; |
34 | int cur_playlist; | 36 | int cur_playlist; |
35 | struct nouveau_vma user_vma; | 37 | struct nouveau_vma user_vma; |
@@ -37,8 +39,8 @@ struct nvc0_fifo_priv { | |||
37 | }; | 39 | }; |
38 | 40 | ||
39 | struct nvc0_fifo_chan { | 41 | struct nvc0_fifo_chan { |
42 | struct nouveau_fifo_chan base; | ||
40 | struct nouveau_gpuobj *user; | 43 | struct nouveau_gpuobj *user; |
41 | struct nouveau_gpuobj *ramfc; | ||
42 | }; | 44 | }; |
43 | 45 | ||
44 | static void | 46 | static void |
@@ -46,8 +48,7 @@ nvc0_fifo_playlist_update(struct drm_device *dev) | |||
46 | { | 48 | { |
47 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 49 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
48 | struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; | 50 | struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; |
49 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | 51 | struct nvc0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO); |
50 | struct nvc0_fifo_priv *priv = pfifo->priv; | ||
51 | struct nouveau_gpuobj *cur; | 52 | struct nouveau_gpuobj *cur; |
52 | int i, p; | 53 | int i, p; |
53 | 54 | ||
@@ -69,31 +70,20 @@ nvc0_fifo_playlist_update(struct drm_device *dev) | |||
69 | NV_ERROR(dev, "PFIFO - playlist update failed\n"); | 70 | NV_ERROR(dev, "PFIFO - playlist update failed\n"); |
70 | } | 71 | } |
71 | 72 | ||
72 | int | 73 | static int |
73 | nvc0_fifo_create_context(struct nouveau_channel *chan) | 74 | nvc0_fifo_context_new(struct nouveau_channel *chan, int engine) |
74 | { | 75 | { |
75 | struct drm_device *dev = chan->dev; | 76 | struct drm_device *dev = chan->dev; |
76 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 77 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
77 | struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; | 78 | struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; |
78 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | 79 | struct nvc0_fifo_priv *priv = nv_engine(dev, engine); |
79 | struct nvc0_fifo_priv *priv = pfifo->priv; | 80 | struct nvc0_fifo_chan *fctx; |
80 | struct nvc0_fifo_chan *fifoch; | ||
81 | u64 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4; | 81 | u64 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4; |
82 | int ret; | 82 | int ret, i; |
83 | 83 | ||
84 | chan->fifo_priv = kzalloc(sizeof(*fifoch), GFP_KERNEL); | 84 | fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL); |
85 | if (!chan->fifo_priv) | 85 | if (!fctx) |
86 | return -ENOMEM; | 86 | return -ENOMEM; |
87 | fifoch = chan->fifo_priv; | ||
88 | |||
89 | /* allocate vram for control regs, map into polling area */ | ||
90 | ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000, | ||
91 | NVOBJ_FLAG_ZERO_ALLOC, &fifoch->user); | ||
92 | if (ret) | ||
93 | goto error; | ||
94 | |||
95 | nouveau_vm_map_at(&priv->user_vma, chan->id * 0x1000, | ||
96 | *(struct nouveau_mem **)fifoch->user->node); | ||
97 | 87 | ||
98 | chan->user = ioremap_wc(pci_resource_start(dev->pdev, 1) + | 88 | chan->user = ioremap_wc(pci_resource_start(dev->pdev, 1) + |
99 | priv->user_vma.offset + (chan->id * 0x1000), | 89 | priv->user_vma.offset + (chan->id * 0x1000), |
@@ -103,175 +93,77 @@ nvc0_fifo_create_context(struct nouveau_channel *chan) | |||
103 | goto error; | 93 | goto error; |
104 | } | 94 | } |
105 | 95 | ||
106 | /* ramfc */ | 96 | /* allocate vram for control regs, map into polling area */ |
107 | ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst, | 97 | ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000, |
108 | chan->ramin->vinst, 0x100, | 98 | NVOBJ_FLAG_ZERO_ALLOC, &fctx->user); |
109 | NVOBJ_FLAG_ZERO_ALLOC, &fifoch->ramfc); | ||
110 | if (ret) | 99 | if (ret) |
111 | goto error; | 100 | goto error; |
112 | 101 | ||
113 | nv_wo32(fifoch->ramfc, 0x08, lower_32_bits(fifoch->user->vinst)); | 102 | nouveau_vm_map_at(&priv->user_vma, chan->id * 0x1000, |
114 | nv_wo32(fifoch->ramfc, 0x0c, upper_32_bits(fifoch->user->vinst)); | 103 | *(struct nouveau_mem **)fctx->user->node); |
115 | nv_wo32(fifoch->ramfc, 0x10, 0x0000face); | 104 | |
116 | nv_wo32(fifoch->ramfc, 0x30, 0xfffff902); | 105 | for (i = 0; i < 0x100; i += 4) |
117 | nv_wo32(fifoch->ramfc, 0x48, lower_32_bits(ib_virt)); | 106 | nv_wo32(chan->ramin, i, 0x00000000); |
118 | nv_wo32(fifoch->ramfc, 0x4c, drm_order(chan->dma.ib_max + 1) << 16 | | 107 | nv_wo32(chan->ramin, 0x08, lower_32_bits(fctx->user->vinst)); |
108 | nv_wo32(chan->ramin, 0x0c, upper_32_bits(fctx->user->vinst)); | ||
109 | nv_wo32(chan->ramin, 0x10, 0x0000face); | ||
110 | nv_wo32(chan->ramin, 0x30, 0xfffff902); | ||
111 | nv_wo32(chan->ramin, 0x48, lower_32_bits(ib_virt)); | ||
112 | nv_wo32(chan->ramin, 0x4c, drm_order(chan->dma.ib_max + 1) << 16 | | ||
119 | upper_32_bits(ib_virt)); | 113 | upper_32_bits(ib_virt)); |
120 | nv_wo32(fifoch->ramfc, 0x54, 0x00000002); | 114 | nv_wo32(chan->ramin, 0x54, 0x00000002); |
121 | nv_wo32(fifoch->ramfc, 0x84, 0x20400000); | 115 | nv_wo32(chan->ramin, 0x84, 0x20400000); |
122 | nv_wo32(fifoch->ramfc, 0x94, 0x30000001); | 116 | nv_wo32(chan->ramin, 0x94, 0x30000001); |
123 | nv_wo32(fifoch->ramfc, 0x9c, 0x00000100); | 117 | nv_wo32(chan->ramin, 0x9c, 0x00000100); |
124 | nv_wo32(fifoch->ramfc, 0xa4, 0x1f1f1f1f); | 118 | nv_wo32(chan->ramin, 0xa4, 0x1f1f1f1f); |
125 | nv_wo32(fifoch->ramfc, 0xa8, 0x1f1f1f1f); | 119 | nv_wo32(chan->ramin, 0xa8, 0x1f1f1f1f); |
126 | nv_wo32(fifoch->ramfc, 0xac, 0x0000001f); | 120 | nv_wo32(chan->ramin, 0xac, 0x0000001f); |
127 | nv_wo32(fifoch->ramfc, 0xb8, 0xf8000000); | 121 | nv_wo32(chan->ramin, 0xb8, 0xf8000000); |
128 | nv_wo32(fifoch->ramfc, 0xf8, 0x10003080); /* 0x002310 */ | 122 | nv_wo32(chan->ramin, 0xf8, 0x10003080); /* 0x002310 */ |
129 | nv_wo32(fifoch->ramfc, 0xfc, 0x10000010); /* 0x002350 */ | 123 | nv_wo32(chan->ramin, 0xfc, 0x10000010); /* 0x002350 */ |
130 | pinstmem->flush(dev); | 124 | pinstmem->flush(dev); |
131 | 125 | ||
132 | nv_wr32(dev, 0x003000 + (chan->id * 8), 0xc0000000 | | 126 | nv_wr32(dev, 0x003000 + (chan->id * 8), 0xc0000000 | |
133 | (chan->ramin->vinst >> 12)); | 127 | (chan->ramin->vinst >> 12)); |
134 | nv_wr32(dev, 0x003004 + (chan->id * 8), 0x001f0001); | 128 | nv_wr32(dev, 0x003004 + (chan->id * 8), 0x001f0001); |
135 | nvc0_fifo_playlist_update(dev); | 129 | nvc0_fifo_playlist_update(dev); |
136 | return 0; | ||
137 | 130 | ||
138 | error: | 131 | error: |
139 | pfifo->destroy_context(chan); | 132 | if (ret) |
133 | priv->base.base.context_del(chan, engine); | ||
140 | return ret; | 134 | return ret; |
141 | } | 135 | } |
142 | 136 | ||
143 | void | 137 | static void |
144 | nvc0_fifo_destroy_context(struct nouveau_channel *chan) | 138 | nvc0_fifo_context_del(struct nouveau_channel *chan, int engine) |
145 | { | 139 | { |
140 | struct nvc0_fifo_chan *fctx = chan->engctx[engine]; | ||
146 | struct drm_device *dev = chan->dev; | 141 | struct drm_device *dev = chan->dev; |
147 | struct nvc0_fifo_chan *fifoch; | ||
148 | 142 | ||
149 | nv_mask(dev, 0x003004 + (chan->id * 8), 0x00000001, 0x00000000); | 143 | nv_mask(dev, 0x003004 + (chan->id * 8), 0x00000001, 0x00000000); |
150 | nv_wr32(dev, 0x002634, chan->id); | 144 | nv_wr32(dev, 0x002634, chan->id); |
151 | if (!nv_wait(dev, 0x0002634, 0xffffffff, chan->id)) | 145 | if (!nv_wait(dev, 0x0002634, 0xffffffff, chan->id)) |
152 | NV_WARN(dev, "0x2634 != chid: 0x%08x\n", nv_rd32(dev, 0x2634)); | 146 | NV_WARN(dev, "0x2634 != chid: 0x%08x\n", nv_rd32(dev, 0x2634)); |
153 | |||
154 | nvc0_fifo_playlist_update(dev); | 147 | nvc0_fifo_playlist_update(dev); |
155 | |||
156 | nv_wr32(dev, 0x003000 + (chan->id * 8), 0x00000000); | 148 | nv_wr32(dev, 0x003000 + (chan->id * 8), 0x00000000); |
157 | 149 | ||
150 | nouveau_gpuobj_ref(NULL, &fctx->user); | ||
158 | if (chan->user) { | 151 | if (chan->user) { |
159 | iounmap(chan->user); | 152 | iounmap(chan->user); |
160 | chan->user = NULL; | 153 | chan->user = NULL; |
161 | } | 154 | } |
162 | 155 | ||
163 | fifoch = chan->fifo_priv; | 156 | chan->engctx[engine] = NULL; |
164 | chan->fifo_priv = NULL; | 157 | kfree(fctx); |
165 | if (!fifoch) | ||
166 | return; | ||
167 | |||
168 | nouveau_gpuobj_ref(NULL, &fifoch->ramfc); | ||
169 | nouveau_gpuobj_ref(NULL, &fifoch->user); | ||
170 | kfree(fifoch); | ||
171 | } | ||
172 | |||
173 | int | ||
174 | nvc0_fifo_load_context(struct nouveau_channel *chan) | ||
175 | { | ||
176 | return 0; | ||
177 | } | ||
178 | |||
179 | int | ||
180 | nvc0_fifo_unload_context(struct drm_device *dev) | ||
181 | { | ||
182 | int i; | ||
183 | |||
184 | for (i = 0; i < 128; i++) { | ||
185 | if (!(nv_rd32(dev, 0x003004 + (i * 8)) & 1)) | ||
186 | continue; | ||
187 | |||
188 | nv_mask(dev, 0x003004 + (i * 8), 0x00000001, 0x00000000); | ||
189 | nv_wr32(dev, 0x002634, i); | ||
190 | if (!nv_wait(dev, 0x002634, 0xffffffff, i)) { | ||
191 | NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n", | ||
192 | i, nv_rd32(dev, 0x002634)); | ||
193 | return -EBUSY; | ||
194 | } | ||
195 | } | ||
196 | |||
197 | return 0; | ||
198 | } | ||
199 | |||
200 | static void | ||
201 | nvc0_fifo_destroy(struct drm_device *dev) | ||
202 | { | ||
203 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
204 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | ||
205 | struct nvc0_fifo_priv *priv; | ||
206 | |||
207 | priv = pfifo->priv; | ||
208 | if (!priv) | ||
209 | return; | ||
210 | |||
211 | nouveau_vm_put(&priv->user_vma); | ||
212 | nouveau_gpuobj_ref(NULL, &priv->playlist[1]); | ||
213 | nouveau_gpuobj_ref(NULL, &priv->playlist[0]); | ||
214 | kfree(priv); | ||
215 | } | ||
216 | |||
217 | void | ||
218 | nvc0_fifo_takedown(struct drm_device *dev) | ||
219 | { | ||
220 | nv_wr32(dev, 0x002140, 0x00000000); | ||
221 | nvc0_fifo_destroy(dev); | ||
222 | } | 158 | } |
223 | 159 | ||
224 | static int | 160 | static int |
225 | nvc0_fifo_create(struct drm_device *dev) | 161 | nvc0_fifo_init(struct drm_device *dev, int engine) |
226 | { | ||
227 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
228 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | ||
229 | struct nvc0_fifo_priv *priv; | ||
230 | int ret; | ||
231 | |||
232 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | ||
233 | if (!priv) | ||
234 | return -ENOMEM; | ||
235 | pfifo->priv = priv; | ||
236 | |||
237 | ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000, 0, | ||
238 | &priv->playlist[0]); | ||
239 | if (ret) | ||
240 | goto error; | ||
241 | |||
242 | ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000, 0, | ||
243 | &priv->playlist[1]); | ||
244 | if (ret) | ||
245 | goto error; | ||
246 | |||
247 | ret = nouveau_vm_get(dev_priv->bar1_vm, pfifo->channels * 0x1000, | ||
248 | 12, NV_MEM_ACCESS_RW, &priv->user_vma); | ||
249 | if (ret) | ||
250 | goto error; | ||
251 | |||
252 | nouveau_irq_register(dev, 8, nvc0_fifo_isr); | ||
253 | return 0; | ||
254 | |||
255 | error: | ||
256 | nvc0_fifo_destroy(dev); | ||
257 | return ret; | ||
258 | } | ||
259 | |||
260 | int | ||
261 | nvc0_fifo_init(struct drm_device *dev) | ||
262 | { | 162 | { |
263 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 163 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
264 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | 164 | struct nvc0_fifo_priv *priv = nv_engine(dev, engine); |
265 | struct nouveau_channel *chan; | 165 | struct nouveau_channel *chan; |
266 | struct nvc0_fifo_priv *priv; | 166 | int i; |
267 | int ret, i; | ||
268 | |||
269 | if (!pfifo->priv) { | ||
270 | ret = nvc0_fifo_create(dev); | ||
271 | if (ret) | ||
272 | return ret; | ||
273 | } | ||
274 | priv = pfifo->priv; | ||
275 | 167 | ||
276 | /* reset PFIFO, enable all available PSUBFIFO areas */ | 168 | /* reset PFIFO, enable all available PSUBFIFO areas */ |
277 | nv_mask(dev, 0x000200, 0x00000100, 0x00000000); | 169 | nv_mask(dev, 0x000200, 0x00000100, 0x00000000); |
@@ -309,7 +201,7 @@ nvc0_fifo_init(struct drm_device *dev) | |||
309 | /* restore PFIFO context table */ | 201 | /* restore PFIFO context table */ |
310 | for (i = 0; i < 128; i++) { | 202 | for (i = 0; i < 128; i++) { |
311 | chan = dev_priv->channels.ptr[i]; | 203 | chan = dev_priv->channels.ptr[i]; |
312 | if (!chan || !chan->fifo_priv) | 204 | if (!chan || !chan->engctx[engine]) |
313 | continue; | 205 | continue; |
314 | 206 | ||
315 | nv_wr32(dev, 0x003000 + (i * 8), 0xc0000000 | | 207 | nv_wr32(dev, 0x003000 + (i * 8), 0xc0000000 | |
@@ -321,6 +213,29 @@ nvc0_fifo_init(struct drm_device *dev) | |||
321 | return 0; | 213 | return 0; |
322 | } | 214 | } |
323 | 215 | ||
216 | static int | ||
217 | nvc0_fifo_fini(struct drm_device *dev, int engine, bool suspend) | ||
218 | { | ||
219 | int i; | ||
220 | |||
221 | for (i = 0; i < 128; i++) { | ||
222 | if (!(nv_rd32(dev, 0x003004 + (i * 8)) & 1)) | ||
223 | continue; | ||
224 | |||
225 | nv_mask(dev, 0x003004 + (i * 8), 0x00000001, 0x00000000); | ||
226 | nv_wr32(dev, 0x002634, i); | ||
227 | if (!nv_wait(dev, 0x002634, 0xffffffff, i)) { | ||
228 | NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n", | ||
229 | i, nv_rd32(dev, 0x002634)); | ||
230 | return -EBUSY; | ||
231 | } | ||
232 | } | ||
233 | |||
234 | nv_wr32(dev, 0x002140, 0x00000000); | ||
235 | return 0; | ||
236 | } | ||
237 | |||
238 | |||
324 | struct nouveau_enum nvc0_fifo_fault_unit[] = { | 239 | struct nouveau_enum nvc0_fifo_fault_unit[] = { |
325 | { 0x00, "PGRAPH" }, | 240 | { 0x00, "PGRAPH" }, |
326 | { 0x03, "PEEPHOLE" }, | 241 | { 0x03, "PEEPHOLE" }, |
@@ -410,13 +325,14 @@ nvc0_fifo_isr_vm_fault(struct drm_device *dev, int unit) | |||
410 | static int | 325 | static int |
411 | nvc0_fifo_page_flip(struct drm_device *dev, u32 chid) | 326 | nvc0_fifo_page_flip(struct drm_device *dev, u32 chid) |
412 | { | 327 | { |
328 | struct nvc0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO); | ||
413 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 329 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
414 | struct nouveau_channel *chan = NULL; | 330 | struct nouveau_channel *chan = NULL; |
415 | unsigned long flags; | 331 | unsigned long flags; |
416 | int ret = -EINVAL; | 332 | int ret = -EINVAL; |
417 | 333 | ||
418 | spin_lock_irqsave(&dev_priv->channels.lock, flags); | 334 | spin_lock_irqsave(&dev_priv->channels.lock, flags); |
419 | if (likely(chid >= 0 && chid < dev_priv->engine.fifo.channels)) { | 335 | if (likely(chid >= 0 && chid < priv->base.channels)) { |
420 | chan = dev_priv->channels.ptr[chid]; | 336 | chan = dev_priv->channels.ptr[chid]; |
421 | if (likely(chan)) | 337 | if (likely(chan)) |
422 | ret = nouveau_finish_page_flip(chan, NULL); | 338 | ret = nouveau_finish_page_flip(chan, NULL); |
@@ -505,3 +421,56 @@ nvc0_fifo_isr(struct drm_device *dev) | |||
505 | nv_wr32(dev, 0x002140, 0); | 421 | nv_wr32(dev, 0x002140, 0); |
506 | } | 422 | } |
507 | } | 423 | } |
424 | |||
425 | static void | ||
426 | nvc0_fifo_destroy(struct drm_device *dev, int engine) | ||
427 | { | ||
428 | struct nvc0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO); | ||
429 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
430 | |||
431 | nouveau_vm_put(&priv->user_vma); | ||
432 | nouveau_gpuobj_ref(NULL, &priv->playlist[1]); | ||
433 | nouveau_gpuobj_ref(NULL, &priv->playlist[0]); | ||
434 | |||
435 | dev_priv->eng[engine] = NULL; | ||
436 | kfree(priv); | ||
437 | } | ||
438 | |||
439 | int | ||
440 | nvc0_fifo_create(struct drm_device *dev) | ||
441 | { | ||
442 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
443 | struct nvc0_fifo_priv *priv; | ||
444 | int ret; | ||
445 | |||
446 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | ||
447 | if (!priv) | ||
448 | return -ENOMEM; | ||
449 | |||
450 | priv->base.base.destroy = nvc0_fifo_destroy; | ||
451 | priv->base.base.init = nvc0_fifo_init; | ||
452 | priv->base.base.fini = nvc0_fifo_fini; | ||
453 | priv->base.base.context_new = nvc0_fifo_context_new; | ||
454 | priv->base.base.context_del = nvc0_fifo_context_del; | ||
455 | priv->base.channels = 128; | ||
456 | dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base; | ||
457 | |||
458 | ret = nouveau_gpuobj_new(dev, NULL, 4096, 4096, 0, &priv->playlist[0]); | ||
459 | if (ret) | ||
460 | goto error; | ||
461 | |||
462 | ret = nouveau_gpuobj_new(dev, NULL, 4096, 4096, 0, &priv->playlist[1]); | ||
463 | if (ret) | ||
464 | goto error; | ||
465 | |||
466 | ret = nouveau_vm_get(dev_priv->bar1_vm, priv->base.channels * 0x1000, | ||
467 | 12, NV_MEM_ACCESS_RW, &priv->user_vma); | ||
468 | if (ret) | ||
469 | goto error; | ||
470 | |||
471 | nouveau_irq_register(dev, 8, nvc0_fifo_isr); | ||
472 | error: | ||
473 | if (ret) | ||
474 | priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO); | ||
475 | return ret; | ||
476 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c index 9066102d1159..2a01e6e47724 100644 --- a/drivers/gpu/drm/nouveau/nvc0_graph.c +++ b/drivers/gpu/drm/nouveau/nvc0_graph.c | |||
@@ -29,6 +29,7 @@ | |||
29 | 29 | ||
30 | #include "nouveau_drv.h" | 30 | #include "nouveau_drv.h" |
31 | #include "nouveau_mm.h" | 31 | #include "nouveau_mm.h" |
32 | #include "nouveau_fifo.h" | ||
32 | 33 | ||
33 | #include "nvc0_graph.h" | 34 | #include "nvc0_graph.h" |
34 | #include "nvc0_grhub.fuc.h" | 35 | #include "nvc0_grhub.fuc.h" |
@@ -620,13 +621,14 @@ nvc0_graph_init(struct drm_device *dev, int engine) | |||
620 | int | 621 | int |
621 | nvc0_graph_isr_chid(struct drm_device *dev, u64 inst) | 622 | nvc0_graph_isr_chid(struct drm_device *dev, u64 inst) |
622 | { | 623 | { |
624 | struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO); | ||
623 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 625 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
624 | struct nouveau_channel *chan; | 626 | struct nouveau_channel *chan; |
625 | unsigned long flags; | 627 | unsigned long flags; |
626 | int i; | 628 | int i; |
627 | 629 | ||
628 | spin_lock_irqsave(&dev_priv->channels.lock, flags); | 630 | spin_lock_irqsave(&dev_priv->channels.lock, flags); |
629 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { | 631 | for (i = 0; i < pfifo->channels; i++) { |
630 | chan = dev_priv->channels.ptr[i]; | 632 | chan = dev_priv->channels.ptr[i]; |
631 | if (!chan || !chan->ramin) | 633 | if (!chan || !chan->ramin) |
632 | continue; | 634 | continue; |
diff --git a/drivers/gpu/drm/nouveau/nve0_fifo.c b/drivers/gpu/drm/nouveau/nve0_fifo.c index 52c54e0fdcee..1855ecbd843b 100644 --- a/drivers/gpu/drm/nouveau/nve0_fifo.c +++ b/drivers/gpu/drm/nouveau/nve0_fifo.c | |||
@@ -26,6 +26,7 @@ | |||
26 | 26 | ||
27 | #include "nouveau_drv.h" | 27 | #include "nouveau_drv.h" |
28 | #include "nouveau_mm.h" | 28 | #include "nouveau_mm.h" |
29 | #include "nouveau_fifo.h" | ||
29 | 30 | ||
30 | #define NVE0_FIFO_ENGINE_NUM 32 | 31 | #define NVE0_FIFO_ENGINE_NUM 32 |
31 | 32 | ||
@@ -37,6 +38,7 @@ struct nve0_fifo_engine { | |||
37 | }; | 38 | }; |
38 | 39 | ||
39 | struct nve0_fifo_priv { | 40 | struct nve0_fifo_priv { |
41 | struct nouveau_fifo_priv base; | ||
40 | struct nve0_fifo_engine engine[NVE0_FIFO_ENGINE_NUM]; | 42 | struct nve0_fifo_engine engine[NVE0_FIFO_ENGINE_NUM]; |
41 | struct { | 43 | struct { |
42 | struct nouveau_gpuobj *mem; | 44 | struct nouveau_gpuobj *mem; |
@@ -46,7 +48,7 @@ struct nve0_fifo_priv { | |||
46 | }; | 48 | }; |
47 | 49 | ||
48 | struct nve0_fifo_chan { | 50 | struct nve0_fifo_chan { |
49 | struct nouveau_gpuobj *ramfc; | 51 | struct nouveau_fifo_chan base; |
50 | u32 engine; | 52 | u32 engine; |
51 | }; | 53 | }; |
52 | 54 | ||
@@ -55,8 +57,7 @@ nve0_fifo_playlist_update(struct drm_device *dev, u32 engine) | |||
55 | { | 57 | { |
56 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 58 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
57 | struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; | 59 | struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; |
58 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | 60 | struct nve0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO); |
59 | struct nve0_fifo_priv *priv = pfifo->priv; | ||
60 | struct nve0_fifo_engine *peng = &priv->engine[engine]; | 61 | struct nve0_fifo_engine *peng = &priv->engine[engine]; |
61 | struct nouveau_gpuobj *cur; | 62 | struct nouveau_gpuobj *cur; |
62 | u32 match = (engine << 16) | 0x00000001; | 63 | u32 match = (engine << 16) | 0x00000001; |
@@ -75,7 +76,7 @@ nve0_fifo_playlist_update(struct drm_device *dev, u32 engine) | |||
75 | 76 | ||
76 | peng->cur_playlist = !peng->cur_playlist; | 77 | peng->cur_playlist = !peng->cur_playlist; |
77 | 78 | ||
78 | for (i = 0, p = 0; i < pfifo->channels; i++) { | 79 | for (i = 0, p = 0; i < priv->base.channels; i++) { |
79 | u32 ctrl = nv_rd32(dev, 0x800004 + (i * 8)) & 0x001f0001; | 80 | u32 ctrl = nv_rd32(dev, 0x800004 + (i * 8)) & 0x001f0001; |
80 | if (ctrl != match) | 81 | if (ctrl != match) |
81 | continue; | 82 | continue; |
@@ -91,24 +92,23 @@ nve0_fifo_playlist_update(struct drm_device *dev, u32 engine) | |||
91 | NV_ERROR(dev, "PFIFO: playlist %d update timeout\n", engine); | 92 | NV_ERROR(dev, "PFIFO: playlist %d update timeout\n", engine); |
92 | } | 93 | } |
93 | 94 | ||
94 | int | 95 | static int |
95 | nve0_fifo_create_context(struct nouveau_channel *chan) | 96 | nve0_fifo_context_new(struct nouveau_channel *chan, int engine) |
96 | { | 97 | { |
97 | struct drm_device *dev = chan->dev; | 98 | struct drm_device *dev = chan->dev; |
98 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 99 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
99 | struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; | 100 | struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; |
100 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | 101 | struct nve0_fifo_priv *priv = nv_engine(dev, engine); |
101 | struct nve0_fifo_priv *priv = pfifo->priv; | 102 | struct nve0_fifo_chan *fctx; |
102 | struct nve0_fifo_chan *fifoch; | ||
103 | u64 usermem = priv->user.mem->vinst + chan->id * 512; | 103 | u64 usermem = priv->user.mem->vinst + chan->id * 512; |
104 | u64 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4; | 104 | u64 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4; |
105 | int ret; | 105 | int ret = 0, i; |
106 | 106 | ||
107 | chan->fifo_priv = kzalloc(sizeof(*fifoch), GFP_KERNEL); | 107 | fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL); |
108 | if (!chan->fifo_priv) | 108 | if (!fctx) |
109 | return -ENOMEM; | 109 | return -ENOMEM; |
110 | fifoch = chan->fifo_priv; | 110 | |
111 | fifoch->engine = 0; /* PGRAPH */ | 111 | fctx->engine = 0; /* PGRAPH */ |
112 | 112 | ||
113 | /* allocate vram for control regs, map into polling area */ | 113 | /* allocate vram for control regs, map into polling area */ |
114 | chan->user = ioremap_wc(pci_resource_start(dev->pdev, 1) + | 114 | chan->user = ioremap_wc(pci_resource_start(dev->pdev, 1) + |
@@ -118,56 +118,48 @@ nve0_fifo_create_context(struct nouveau_channel *chan) | |||
118 | goto error; | 118 | goto error; |
119 | } | 119 | } |
120 | 120 | ||
121 | /* ramfc */ | 121 | for (i = 0; i < 0x100; i += 4) |
122 | ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst, | 122 | nv_wo32(chan->ramin, i, 0x00000000); |
123 | chan->ramin->vinst, 0x100, | 123 | nv_wo32(chan->ramin, 0x08, lower_32_bits(usermem)); |
124 | NVOBJ_FLAG_ZERO_ALLOC, &fifoch->ramfc); | 124 | nv_wo32(chan->ramin, 0x0c, upper_32_bits(usermem)); |
125 | if (ret) | 125 | nv_wo32(chan->ramin, 0x10, 0x0000face); |
126 | goto error; | 126 | nv_wo32(chan->ramin, 0x30, 0xfffff902); |
127 | 127 | nv_wo32(chan->ramin, 0x48, lower_32_bits(ib_virt)); | |
128 | nv_wo32(fifoch->ramfc, 0x08, lower_32_bits(usermem)); | 128 | nv_wo32(chan->ramin, 0x4c, drm_order(chan->dma.ib_max + 1) << 16 | |
129 | nv_wo32(fifoch->ramfc, 0x0c, upper_32_bits(usermem)); | ||
130 | nv_wo32(fifoch->ramfc, 0x10, 0x0000face); | ||
131 | nv_wo32(fifoch->ramfc, 0x30, 0xfffff902); | ||
132 | nv_wo32(fifoch->ramfc, 0x48, lower_32_bits(ib_virt)); | ||
133 | nv_wo32(fifoch->ramfc, 0x4c, drm_order(chan->dma.ib_max + 1) << 16 | | ||
134 | upper_32_bits(ib_virt)); | 129 | upper_32_bits(ib_virt)); |
135 | nv_wo32(fifoch->ramfc, 0x84, 0x20400000); | 130 | nv_wo32(chan->ramin, 0x84, 0x20400000); |
136 | nv_wo32(fifoch->ramfc, 0x94, 0x30000001); | 131 | nv_wo32(chan->ramin, 0x94, 0x30000001); |
137 | nv_wo32(fifoch->ramfc, 0x9c, 0x00000100); | 132 | nv_wo32(chan->ramin, 0x9c, 0x00000100); |
138 | nv_wo32(fifoch->ramfc, 0xac, 0x0000001f); | 133 | nv_wo32(chan->ramin, 0xac, 0x0000001f); |
139 | nv_wo32(fifoch->ramfc, 0xe4, 0x00000000); | 134 | nv_wo32(chan->ramin, 0xe4, 0x00000000); |
140 | nv_wo32(fifoch->ramfc, 0xe8, chan->id); | 135 | nv_wo32(chan->ramin, 0xe8, chan->id); |
141 | nv_wo32(fifoch->ramfc, 0xf8, 0x10003080); /* 0x002310 */ | 136 | nv_wo32(chan->ramin, 0xf8, 0x10003080); /* 0x002310 */ |
142 | nv_wo32(fifoch->ramfc, 0xfc, 0x10000010); /* 0x002350 */ | 137 | nv_wo32(chan->ramin, 0xfc, 0x10000010); /* 0x002350 */ |
143 | pinstmem->flush(dev); | 138 | pinstmem->flush(dev); |
144 | 139 | ||
145 | nv_wr32(dev, 0x800000 + (chan->id * 8), 0x80000000 | | 140 | nv_wr32(dev, 0x800000 + (chan->id * 8), 0x80000000 | |
146 | (chan->ramin->vinst >> 12)); | 141 | (chan->ramin->vinst >> 12)); |
147 | nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400); | 142 | nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400); |
148 | nve0_fifo_playlist_update(dev, fifoch->engine); | 143 | nve0_fifo_playlist_update(dev, fctx->engine); |
149 | nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400); | 144 | nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400); |
150 | return 0; | ||
151 | 145 | ||
152 | error: | 146 | error: |
153 | pfifo->destroy_context(chan); | 147 | if (ret) |
148 | priv->base.base.context_del(chan, engine); | ||
154 | return ret; | 149 | return ret; |
155 | } | 150 | } |
156 | 151 | ||
157 | void | 152 | static void |
158 | nve0_fifo_destroy_context(struct nouveau_channel *chan) | 153 | nve0_fifo_context_del(struct nouveau_channel *chan, int engine) |
159 | { | 154 | { |
160 | struct nve0_fifo_chan *fifoch = chan->fifo_priv; | 155 | struct nve0_fifo_chan *fctx = chan->engctx[engine]; |
161 | struct drm_device *dev = chan->dev; | 156 | struct drm_device *dev = chan->dev; |
162 | 157 | ||
163 | if (!fifoch) | ||
164 | return; | ||
165 | |||
166 | nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000800, 0x00000800); | 158 | nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000800, 0x00000800); |
167 | nv_wr32(dev, 0x002634, chan->id); | 159 | nv_wr32(dev, 0x002634, chan->id); |
168 | if (!nv_wait(dev, 0x0002634, 0xffffffff, chan->id)) | 160 | if (!nv_wait(dev, 0x0002634, 0xffffffff, chan->id)) |
169 | NV_WARN(dev, "0x2634 != chid: 0x%08x\n", nv_rd32(dev, 0x2634)); | 161 | NV_WARN(dev, "0x2634 != chid: 0x%08x\n", nv_rd32(dev, 0x2634)); |
170 | nve0_fifo_playlist_update(dev, fifoch->engine); | 162 | nve0_fifo_playlist_update(dev, fctx->engine); |
171 | nv_wr32(dev, 0x800000 + (chan->id * 8), 0x00000000); | 163 | nv_wr32(dev, 0x800000 + (chan->id * 8), 0x00000000); |
172 | 164 | ||
173 | if (chan->user) { | 165 | if (chan->user) { |
@@ -175,118 +167,17 @@ nve0_fifo_destroy_context(struct nouveau_channel *chan) | |||
175 | chan->user = NULL; | 167 | chan->user = NULL; |
176 | } | 168 | } |
177 | 169 | ||
178 | nouveau_gpuobj_ref(NULL, &fifoch->ramfc); | 170 | chan->engctx[NVOBJ_ENGINE_FIFO] = NULL; |
179 | chan->fifo_priv = NULL; | 171 | kfree(fctx); |
180 | kfree(fifoch); | ||
181 | } | ||
182 | |||
183 | int | ||
184 | nve0_fifo_load_context(struct nouveau_channel *chan) | ||
185 | { | ||
186 | return 0; | ||
187 | } | ||
188 | |||
189 | int | ||
190 | nve0_fifo_unload_context(struct drm_device *dev) | ||
191 | { | ||
192 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
193 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | ||
194 | int i; | ||
195 | |||
196 | for (i = 0; i < pfifo->channels; i++) { | ||
197 | if (!(nv_rd32(dev, 0x800004 + (i * 8)) & 1)) | ||
198 | continue; | ||
199 | |||
200 | nv_mask(dev, 0x800004 + (i * 8), 0x00000800, 0x00000800); | ||
201 | nv_wr32(dev, 0x002634, i); | ||
202 | if (!nv_wait(dev, 0x002634, 0xffffffff, i)) { | ||
203 | NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n", | ||
204 | i, nv_rd32(dev, 0x002634)); | ||
205 | return -EBUSY; | ||
206 | } | ||
207 | } | ||
208 | |||
209 | return 0; | ||
210 | } | ||
211 | |||
212 | static void | ||
213 | nve0_fifo_destroy(struct drm_device *dev) | ||
214 | { | ||
215 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
216 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | ||
217 | struct nve0_fifo_priv *priv; | ||
218 | int i; | ||
219 | |||
220 | priv = pfifo->priv; | ||
221 | if (!priv) | ||
222 | return; | ||
223 | |||
224 | nouveau_vm_put(&priv->user.bar); | ||
225 | nouveau_gpuobj_ref(NULL, &priv->user.mem); | ||
226 | |||
227 | for (i = 0; i < NVE0_FIFO_ENGINE_NUM; i++) { | ||
228 | nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[0]); | ||
229 | nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[1]); | ||
230 | } | ||
231 | kfree(priv); | ||
232 | } | ||
233 | |||
234 | void | ||
235 | nve0_fifo_takedown(struct drm_device *dev) | ||
236 | { | ||
237 | nv_wr32(dev, 0x002140, 0x00000000); | ||
238 | nve0_fifo_destroy(dev); | ||
239 | } | 172 | } |
240 | 173 | ||
241 | static int | 174 | static int |
242 | nve0_fifo_create(struct drm_device *dev) | 175 | nve0_fifo_init(struct drm_device *dev, int engine) |
243 | { | ||
244 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
245 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | ||
246 | struct nve0_fifo_priv *priv; | ||
247 | int ret; | ||
248 | |||
249 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | ||
250 | if (!priv) | ||
251 | return -ENOMEM; | ||
252 | pfifo->priv = priv; | ||
253 | |||
254 | ret = nouveau_gpuobj_new(dev, NULL, pfifo->channels * 512, 0x1000, | ||
255 | NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem); | ||
256 | if (ret) | ||
257 | goto error; | ||
258 | |||
259 | ret = nouveau_vm_get(dev_priv->bar1_vm, priv->user.mem->size, | ||
260 | 12, NV_MEM_ACCESS_RW, &priv->user.bar); | ||
261 | if (ret) | ||
262 | goto error; | ||
263 | |||
264 | nouveau_vm_map(&priv->user.bar, *(struct nouveau_mem **)priv->user.mem->node); | ||
265 | |||
266 | nouveau_irq_register(dev, 8, nve0_fifo_isr); | ||
267 | return 0; | ||
268 | |||
269 | error: | ||
270 | nve0_fifo_destroy(dev); | ||
271 | return ret; | ||
272 | } | ||
273 | |||
274 | int | ||
275 | nve0_fifo_init(struct drm_device *dev) | ||
276 | { | 176 | { |
277 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 177 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
278 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | 178 | struct nve0_fifo_priv *priv = nv_engine(dev, engine); |
279 | struct nouveau_channel *chan; | 179 | struct nve0_fifo_chan *fctx; |
280 | struct nve0_fifo_chan *fifoch; | 180 | int i; |
281 | struct nve0_fifo_priv *priv; | ||
282 | int ret, i; | ||
283 | |||
284 | if (!pfifo->priv) { | ||
285 | ret = nve0_fifo_create(dev); | ||
286 | if (ret) | ||
287 | return ret; | ||
288 | } | ||
289 | priv = pfifo->priv; | ||
290 | 181 | ||
291 | /* reset PFIFO, enable all available PSUBFIFO areas */ | 182 | /* reset PFIFO, enable all available PSUBFIFO areas */ |
292 | nv_mask(dev, 0x000200, 0x00000100, 0x00000000); | 183 | nv_mask(dev, 0x000200, 0x00000100, 0x00000000); |
@@ -310,22 +201,44 @@ nve0_fifo_init(struct drm_device *dev) | |||
310 | nv_wr32(dev, 0x002140, 0xbfffffff); | 201 | nv_wr32(dev, 0x002140, 0xbfffffff); |
311 | 202 | ||
312 | /* restore PFIFO context table */ | 203 | /* restore PFIFO context table */ |
313 | for (i = 0; i < pfifo->channels; i++) { | 204 | for (i = 0; i < priv->base.channels; i++) { |
314 | chan = dev_priv->channels.ptr[i]; | 205 | struct nouveau_channel *chan = dev_priv->channels.ptr[i]; |
315 | if (!chan || !chan->fifo_priv) | 206 | if (!chan || !(fctx = chan->engctx[engine])) |
316 | continue; | 207 | continue; |
317 | fifoch = chan->fifo_priv; | ||
318 | 208 | ||
319 | nv_wr32(dev, 0x800000 + (i * 8), 0x80000000 | | 209 | nv_wr32(dev, 0x800000 + (i * 8), 0x80000000 | |
320 | (chan->ramin->vinst >> 12)); | 210 | (chan->ramin->vinst >> 12)); |
321 | nv_mask(dev, 0x800004 + (i * 8), 0x00000400, 0x00000400); | 211 | nv_mask(dev, 0x800004 + (i * 8), 0x00000400, 0x00000400); |
322 | nve0_fifo_playlist_update(dev, fifoch->engine); | 212 | nve0_fifo_playlist_update(dev, fctx->engine); |
323 | nv_mask(dev, 0x800004 + (i * 8), 0x00000400, 0x00000400); | 213 | nv_mask(dev, 0x800004 + (i * 8), 0x00000400, 0x00000400); |
324 | } | 214 | } |
325 | 215 | ||
326 | return 0; | 216 | return 0; |
327 | } | 217 | } |
328 | 218 | ||
219 | static int | ||
220 | nve0_fifo_fini(struct drm_device *dev, int engine, bool suspend) | ||
221 | { | ||
222 | struct nve0_fifo_priv *priv = nv_engine(dev, engine); | ||
223 | int i; | ||
224 | |||
225 | for (i = 0; i < priv->base.channels; i++) { | ||
226 | if (!(nv_rd32(dev, 0x800004 + (i * 8)) & 1)) | ||
227 | continue; | ||
228 | |||
229 | nv_mask(dev, 0x800004 + (i * 8), 0x00000800, 0x00000800); | ||
230 | nv_wr32(dev, 0x002634, i); | ||
231 | if (!nv_wait(dev, 0x002634, 0xffffffff, i)) { | ||
232 | NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n", | ||
233 | i, nv_rd32(dev, 0x002634)); | ||
234 | return -EBUSY; | ||
235 | } | ||
236 | } | ||
237 | |||
238 | nv_wr32(dev, 0x002140, 0x00000000); | ||
239 | return 0; | ||
240 | } | ||
241 | |||
329 | struct nouveau_enum nve0_fifo_fault_unit[] = { | 242 | struct nouveau_enum nve0_fifo_fault_unit[] = { |
330 | {} | 243 | {} |
331 | }; | 244 | }; |
@@ -451,3 +364,60 @@ nve0_fifo_isr(struct drm_device *dev) | |||
451 | nv_wr32(dev, 0x002140, 0); | 364 | nv_wr32(dev, 0x002140, 0); |
452 | } | 365 | } |
453 | } | 366 | } |
367 | |||
368 | static void | ||
369 | nve0_fifo_destroy(struct drm_device *dev, int engine) | ||
370 | { | ||
371 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
372 | struct nve0_fifo_priv *priv = nv_engine(dev, engine); | ||
373 | int i; | ||
374 | |||
375 | nouveau_vm_put(&priv->user.bar); | ||
376 | nouveau_gpuobj_ref(NULL, &priv->user.mem); | ||
377 | |||
378 | for (i = 0; i < NVE0_FIFO_ENGINE_NUM; i++) { | ||
379 | nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[0]); | ||
380 | nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[1]); | ||
381 | } | ||
382 | |||
383 | dev_priv->eng[engine] = NULL; | ||
384 | kfree(priv); | ||
385 | } | ||
386 | |||
387 | int | ||
388 | nve0_fifo_create(struct drm_device *dev) | ||
389 | { | ||
390 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
391 | struct nve0_fifo_priv *priv; | ||
392 | int ret; | ||
393 | |||
394 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | ||
395 | if (!priv) | ||
396 | return -ENOMEM; | ||
397 | |||
398 | priv->base.base.destroy = nve0_fifo_destroy; | ||
399 | priv->base.base.init = nve0_fifo_init; | ||
400 | priv->base.base.fini = nve0_fifo_fini; | ||
401 | priv->base.base.context_new = nve0_fifo_context_new; | ||
402 | priv->base.base.context_del = nve0_fifo_context_del; | ||
403 | priv->base.channels = 4096; | ||
404 | dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base; | ||
405 | |||
406 | ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 512, 0x1000, | ||
407 | NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem); | ||
408 | if (ret) | ||
409 | goto error; | ||
410 | |||
411 | ret = nouveau_vm_get(dev_priv->bar1_vm, priv->user.mem->size, | ||
412 | 12, NV_MEM_ACCESS_RW, &priv->user.bar); | ||
413 | if (ret) | ||
414 | goto error; | ||
415 | |||
416 | nouveau_vm_map(&priv->user.bar, *(struct nouveau_mem **)priv->user.mem->node); | ||
417 | |||
418 | nouveau_irq_register(dev, 8, nve0_fifo_isr); | ||
419 | error: | ||
420 | if (ret) | ||
421 | priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO); | ||
422 | return ret; | ||
423 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nve0_graph.c b/drivers/gpu/drm/nouveau/nve0_graph.c index 9cc3cf246e09..41356274ca61 100644 --- a/drivers/gpu/drm/nouveau/nve0_graph.c +++ b/drivers/gpu/drm/nouveau/nve0_graph.c | |||
@@ -29,6 +29,7 @@ | |||
29 | 29 | ||
30 | #include "nouveau_drv.h" | 30 | #include "nouveau_drv.h" |
31 | #include "nouveau_mm.h" | 31 | #include "nouveau_mm.h" |
32 | #include "nouveau_fifo.h" | ||
32 | 33 | ||
33 | #include "nve0_graph.h" | 34 | #include "nve0_graph.h" |
34 | 35 | ||
@@ -548,13 +549,14 @@ nve0_graph_init(struct drm_device *dev, int engine) | |||
548 | int | 549 | int |
549 | nve0_graph_isr_chid(struct drm_device *dev, u64 inst) | 550 | nve0_graph_isr_chid(struct drm_device *dev, u64 inst) |
550 | { | 551 | { |
552 | struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO); | ||
551 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 553 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
552 | struct nouveau_channel *chan; | 554 | struct nouveau_channel *chan; |
553 | unsigned long flags; | 555 | unsigned long flags; |
554 | int i; | 556 | int i; |
555 | 557 | ||
556 | spin_lock_irqsave(&dev_priv->channels.lock, flags); | 558 | spin_lock_irqsave(&dev_priv->channels.lock, flags); |
557 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { | 559 | for (i = 0; i < pfifo->channels; i++) { |
558 | chan = dev_priv->channels.ptr[i]; | 560 | chan = dev_priv->channels.ptr[i]; |
559 | if (!chan || !chan->ramin) | 561 | if (!chan || !chan->ramin) |
560 | continue; | 562 | continue; |