diff options
author | Ben Skeggs <bskeggs@redhat.com> | 2012-05-01 02:33:37 -0400 |
---|---|---|
committer | Ben Skeggs <bskeggs@redhat.com> | 2012-05-24 02:56:05 -0400 |
commit | 03bd6efa1468830d1dc9380654229d427aa487d7 (patch) | |
tree | 3a264d0a0e7503d4a50a1d388396c0e726e244bd /drivers/gpu/drm/nouveau | |
parent | 71af5e62db5d7d6348e838d0f79533653e2f8cfe (diff) |
drm/nv50/fifo: use hardware channel kickoff functionality
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau')
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_channel.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_drv.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv50_fifo.c | 197 |
3 files changed, 43 insertions, 166 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index 9f4a5c5d5903..a1f566758e7b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c | |||
@@ -190,7 +190,7 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, | |||
190 | chan->user_get_hi = 0x60; | 190 | chan->user_get_hi = 0x60; |
191 | 191 | ||
192 | /* disable the fifo caches */ | 192 | /* disable the fifo caches */ |
193 | if (dev_priv->card_type < NV_C0) | 193 | if (dev_priv->card_type < NV_50) |
194 | nv_wr32(dev, NV03_PFIFO_CACHES, 0); | 194 | nv_wr32(dev, NV03_PFIFO_CACHES, 0); |
195 | 195 | ||
196 | /* Construct initial RAMFC for new channel */ | 196 | /* Construct initial RAMFC for new channel */ |
@@ -200,7 +200,7 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, | |||
200 | return ret; | 200 | return ret; |
201 | } | 201 | } |
202 | 202 | ||
203 | if (dev_priv->card_type < NV_C0) | 203 | if (dev_priv->card_type < NV_50) |
204 | nv_wr32(dev, NV03_PFIFO_CACHES, 1); | 204 | nv_wr32(dev, NV03_PFIFO_CACHES, 1); |
205 | 205 | ||
206 | /* Insert NOPs for NOUVEAU_DMA_SKIPS */ | 206 | /* Insert NOPs for NOUVEAU_DMA_SKIPS */ |
@@ -306,7 +306,7 @@ nouveau_channel_put_unlocked(struct nouveau_channel **pchan) | |||
306 | nouveau_channel_idle(chan); | 306 | nouveau_channel_idle(chan); |
307 | 307 | ||
308 | /* boot it off the hardware */ | 308 | /* boot it off the hardware */ |
309 | if (dev_priv->card_type < NV_C0) | 309 | if (dev_priv->card_type < NV_50) |
310 | nv_wr32(dev, NV03_PFIFO_CACHES, 0); | 310 | nv_wr32(dev, NV03_PFIFO_CACHES, 0); |
311 | 311 | ||
312 | /* destroy the engine specific contexts */ | 312 | /* destroy the engine specific contexts */ |
@@ -318,7 +318,7 @@ nouveau_channel_put_unlocked(struct nouveau_channel **pchan) | |||
318 | pfifo->destroy_context(chan); | 318 | pfifo->destroy_context(chan); |
319 | } | 319 | } |
320 | 320 | ||
321 | if (dev_priv->card_type < NV_C0) | 321 | if (dev_priv->card_type < NV_50) |
322 | nv_wr32(dev, NV03_PFIFO_CACHES, 1); | 322 | nv_wr32(dev, NV03_PFIFO_CACHES, 1); |
323 | 323 | ||
324 | /* aside from its resources, the channel should now be dead, | 324 | /* aside from its resources, the channel should now be dead, |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c index adc6502d296c..910b97e813fa 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.c +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c | |||
@@ -221,7 +221,7 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state) | |||
221 | nouveau_channel_idle(chan); | 221 | nouveau_channel_idle(chan); |
222 | } | 222 | } |
223 | 223 | ||
224 | if (dev_priv->card_type < NV_C0) { | 224 | if (dev_priv->card_type < NV_50) { |
225 | nv_wr32(dev, NV03_PFIFO_CACHES, 0); | 225 | nv_wr32(dev, NV03_PFIFO_CACHES, 0); |
226 | nv_mask(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0); | 226 | nv_mask(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0); |
227 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 0); | 227 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 0); |
@@ -269,7 +269,7 @@ out_abort: | |||
269 | if (dev_priv->eng[e]) | 269 | if (dev_priv->eng[e]) |
270 | dev_priv->eng[e]->init(dev, e); | 270 | dev_priv->eng[e]->init(dev, e); |
271 | } | 271 | } |
272 | if (dev_priv->card_type < NV_C0) { | 272 | if (dev_priv->card_type < NV_50) { |
273 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1); | 273 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1); |
274 | nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1); | 274 | nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1); |
275 | nv_wr32(dev, NV03_PFIFO_CACHES, 1); | 275 | nv_wr32(dev, NV03_PFIFO_CACHES, 1); |
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c index f041b197dd77..12e9e8270f45 100644 --- a/drivers/gpu/drm/nouveau/nv50_fifo.c +++ b/drivers/gpu/drm/nouveau/nv50_fifo.c | |||
@@ -292,44 +292,46 @@ nv50_fifo_create_context(struct nouveau_channel *chan) | |||
292 | return 0; | 292 | return 0; |
293 | } | 293 | } |
294 | 294 | ||
295 | static bool | ||
296 | nv50_fifo_wait_kickoff(void *data) | ||
297 | { | ||
298 | struct drm_nouveau_private *dev_priv = data; | ||
299 | struct drm_device *dev = dev_priv->dev; | ||
300 | |||
301 | if (dev_priv->chipset == 0x50) { | ||
302 | u32 me_enable = nv_mask(dev, 0x00b860, 0x00000001, 0x00000001); | ||
303 | nv_wr32(dev, 0x00b860, me_enable); | ||
304 | } | ||
305 | |||
306 | return nv_rd32(dev, 0x0032fc) != 0xffffffff; | ||
307 | } | ||
308 | |||
295 | void | 309 | void |
296 | nv50_fifo_destroy_context(struct nouveau_channel *chan) | 310 | nv50_fifo_destroy_context(struct nouveau_channel *chan) |
297 | { | 311 | { |
298 | struct drm_device *dev = chan->dev; | 312 | struct drm_device *dev = chan->dev; |
299 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 313 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
300 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | ||
301 | unsigned long flags; | 314 | unsigned long flags; |
302 | 315 | ||
303 | NV_DEBUG(dev, "ch%d\n", chan->id); | 316 | /* remove channel from playlist, will context switch if active */ |
304 | |||
305 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | 317 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); |
306 | nv_wr32(dev, NV03_PFIFO_CACHES, 0); | 318 | nv_mask(dev, 0x002600 + (chan->id * 4), 0x80000000, 0x00000000); |
307 | |||
308 | /* Unload the context if it's the currently active one */ | ||
309 | if ((nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & 0x7f) == chan->id) { | ||
310 | nv_mask(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0); | ||
311 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 0); | ||
312 | nv_mask(dev, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0); | ||
313 | pfifo->unload_context(dev); | ||
314 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1); | ||
315 | nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1); | ||
316 | } | ||
317 | |||
318 | nv50_fifo_channel_disable(dev, chan->id); | ||
319 | |||
320 | /* Dummy channel, also used on ch 127 */ | ||
321 | if (chan->id == 0) | ||
322 | nv50_fifo_channel_disable(dev, 127); | ||
323 | nv50_fifo_playlist_update(dev); | 319 | nv50_fifo_playlist_update(dev); |
324 | 320 | ||
325 | nv_wr32(dev, NV03_PFIFO_CACHES, 1); | 321 | /* tell any engines on this channel to unload their contexts */ |
322 | nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12); | ||
323 | if (!nv_wait_cb(dev, nv50_fifo_wait_kickoff, dev_priv)) | ||
324 | NV_INFO(dev, "PFIFO: channel %d unload timeout\n", chan->id); | ||
325 | |||
326 | nv_wr32(dev, 0x002600 + (chan->id * 4), 0x00000000); | ||
326 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | 327 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); |
327 | 328 | ||
328 | /* Free the channel resources */ | 329 | /* clean up */ |
329 | if (chan->user) { | 330 | if (chan->user) { |
330 | iounmap(chan->user); | 331 | iounmap(chan->user); |
331 | chan->user = NULL; | 332 | chan->user = NULL; |
332 | } | 333 | } |
334 | |||
333 | nouveau_gpuobj_ref(NULL, &chan->ramfc); | 335 | nouveau_gpuobj_ref(NULL, &chan->ramfc); |
334 | nouveau_gpuobj_ref(NULL, &chan->cache); | 336 | nouveau_gpuobj_ref(NULL, &chan->cache); |
335 | } | 337 | } |
@@ -337,68 +339,6 @@ nv50_fifo_destroy_context(struct nouveau_channel *chan) | |||
337 | int | 339 | int |
338 | nv50_fifo_load_context(struct nouveau_channel *chan) | 340 | nv50_fifo_load_context(struct nouveau_channel *chan) |
339 | { | 341 | { |
340 | struct drm_device *dev = chan->dev; | ||
341 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
342 | struct nouveau_gpuobj *ramfc = chan->ramfc; | ||
343 | struct nouveau_gpuobj *cache = chan->cache; | ||
344 | int ptr, cnt; | ||
345 | |||
346 | NV_DEBUG(dev, "ch%d\n", chan->id); | ||
347 | |||
348 | nv_wr32(dev, 0x3330, nv_ro32(ramfc, 0x00)); | ||
349 | nv_wr32(dev, 0x3334, nv_ro32(ramfc, 0x04)); | ||
350 | nv_wr32(dev, 0x3240, nv_ro32(ramfc, 0x08)); | ||
351 | nv_wr32(dev, 0x3320, nv_ro32(ramfc, 0x0c)); | ||
352 | nv_wr32(dev, 0x3244, nv_ro32(ramfc, 0x10)); | ||
353 | nv_wr32(dev, 0x3328, nv_ro32(ramfc, 0x14)); | ||
354 | nv_wr32(dev, 0x3368, nv_ro32(ramfc, 0x18)); | ||
355 | nv_wr32(dev, 0x336c, nv_ro32(ramfc, 0x1c)); | ||
356 | nv_wr32(dev, 0x3370, nv_ro32(ramfc, 0x20)); | ||
357 | nv_wr32(dev, 0x3374, nv_ro32(ramfc, 0x24)); | ||
358 | nv_wr32(dev, 0x3378, nv_ro32(ramfc, 0x28)); | ||
359 | nv_wr32(dev, 0x337c, nv_ro32(ramfc, 0x2c)); | ||
360 | nv_wr32(dev, 0x3228, nv_ro32(ramfc, 0x30)); | ||
361 | nv_wr32(dev, 0x3364, nv_ro32(ramfc, 0x34)); | ||
362 | nv_wr32(dev, 0x32a0, nv_ro32(ramfc, 0x38)); | ||
363 | nv_wr32(dev, 0x3224, nv_ro32(ramfc, 0x3c)); | ||
364 | nv_wr32(dev, 0x324c, nv_ro32(ramfc, 0x40)); | ||
365 | nv_wr32(dev, 0x2044, nv_ro32(ramfc, 0x44)); | ||
366 | nv_wr32(dev, 0x322c, nv_ro32(ramfc, 0x48)); | ||
367 | nv_wr32(dev, 0x3234, nv_ro32(ramfc, 0x4c)); | ||
368 | nv_wr32(dev, 0x3340, nv_ro32(ramfc, 0x50)); | ||
369 | nv_wr32(dev, 0x3344, nv_ro32(ramfc, 0x54)); | ||
370 | nv_wr32(dev, 0x3280, nv_ro32(ramfc, 0x58)); | ||
371 | nv_wr32(dev, 0x3254, nv_ro32(ramfc, 0x5c)); | ||
372 | nv_wr32(dev, 0x3260, nv_ro32(ramfc, 0x60)); | ||
373 | nv_wr32(dev, 0x3264, nv_ro32(ramfc, 0x64)); | ||
374 | nv_wr32(dev, 0x3268, nv_ro32(ramfc, 0x68)); | ||
375 | nv_wr32(dev, 0x326c, nv_ro32(ramfc, 0x6c)); | ||
376 | nv_wr32(dev, 0x32e4, nv_ro32(ramfc, 0x70)); | ||
377 | nv_wr32(dev, 0x3248, nv_ro32(ramfc, 0x74)); | ||
378 | nv_wr32(dev, 0x2088, nv_ro32(ramfc, 0x78)); | ||
379 | nv_wr32(dev, 0x2058, nv_ro32(ramfc, 0x7c)); | ||
380 | nv_wr32(dev, 0x2210, nv_ro32(ramfc, 0x80)); | ||
381 | |||
382 | cnt = nv_ro32(ramfc, 0x84); | ||
383 | for (ptr = 0; ptr < cnt; ptr++) { | ||
384 | nv_wr32(dev, NV40_PFIFO_CACHE1_METHOD(ptr), | ||
385 | nv_ro32(cache, (ptr * 8) + 0)); | ||
386 | nv_wr32(dev, NV40_PFIFO_CACHE1_DATA(ptr), | ||
387 | nv_ro32(cache, (ptr * 8) + 4)); | ||
388 | } | ||
389 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, cnt << 2); | ||
390 | nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0); | ||
391 | |||
392 | /* guessing that all the 0x34xx regs aren't on NV50 */ | ||
393 | if (dev_priv->chipset != 0x50) { | ||
394 | nv_wr32(dev, 0x340c, nv_ro32(ramfc, 0x88)); | ||
395 | nv_wr32(dev, 0x3400, nv_ro32(ramfc, 0x8c)); | ||
396 | nv_wr32(dev, 0x3404, nv_ro32(ramfc, 0x90)); | ||
397 | nv_wr32(dev, 0x3408, nv_ro32(ramfc, 0x94)); | ||
398 | nv_wr32(dev, 0x3410, nv_ro32(ramfc, 0x98)); | ||
399 | } | ||
400 | |||
401 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16)); | ||
402 | return 0; | 342 | return 0; |
403 | } | 343 | } |
404 | 344 | ||
@@ -406,85 +346,22 @@ int | |||
406 | nv50_fifo_unload_context(struct drm_device *dev) | 346 | nv50_fifo_unload_context(struct drm_device *dev) |
407 | { | 347 | { |
408 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 348 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
409 | struct nouveau_gpuobj *ramfc, *cache; | 349 | int i; |
410 | struct nouveau_channel *chan = NULL; | ||
411 | int chid, get, put, ptr; | ||
412 | |||
413 | NV_DEBUG(dev, "\n"); | ||
414 | |||
415 | chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & 0x7f; | ||
416 | if (chid < 1 || chid >= dev_priv->engine.fifo.channels - 1) | ||
417 | return 0; | ||
418 | |||
419 | chan = dev_priv->channels.ptr[chid]; | ||
420 | if (!chan) { | ||
421 | NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid); | ||
422 | return -EINVAL; | ||
423 | } | ||
424 | NV_DEBUG(dev, "ch%d\n", chan->id); | ||
425 | ramfc = chan->ramfc; | ||
426 | cache = chan->cache; | ||
427 | |||
428 | nv_wo32(ramfc, 0x00, nv_rd32(dev, 0x3330)); | ||
429 | nv_wo32(ramfc, 0x04, nv_rd32(dev, 0x3334)); | ||
430 | nv_wo32(ramfc, 0x08, nv_rd32(dev, 0x3240)); | ||
431 | nv_wo32(ramfc, 0x0c, nv_rd32(dev, 0x3320)); | ||
432 | nv_wo32(ramfc, 0x10, nv_rd32(dev, 0x3244)); | ||
433 | nv_wo32(ramfc, 0x14, nv_rd32(dev, 0x3328)); | ||
434 | nv_wo32(ramfc, 0x18, nv_rd32(dev, 0x3368)); | ||
435 | nv_wo32(ramfc, 0x1c, nv_rd32(dev, 0x336c)); | ||
436 | nv_wo32(ramfc, 0x20, nv_rd32(dev, 0x3370)); | ||
437 | nv_wo32(ramfc, 0x24, nv_rd32(dev, 0x3374)); | ||
438 | nv_wo32(ramfc, 0x28, nv_rd32(dev, 0x3378)); | ||
439 | nv_wo32(ramfc, 0x2c, nv_rd32(dev, 0x337c)); | ||
440 | nv_wo32(ramfc, 0x30, nv_rd32(dev, 0x3228)); | ||
441 | nv_wo32(ramfc, 0x34, nv_rd32(dev, 0x3364)); | ||
442 | nv_wo32(ramfc, 0x38, nv_rd32(dev, 0x32a0)); | ||
443 | nv_wo32(ramfc, 0x3c, nv_rd32(dev, 0x3224)); | ||
444 | nv_wo32(ramfc, 0x40, nv_rd32(dev, 0x324c)); | ||
445 | nv_wo32(ramfc, 0x44, nv_rd32(dev, 0x2044)); | ||
446 | nv_wo32(ramfc, 0x48, nv_rd32(dev, 0x322c)); | ||
447 | nv_wo32(ramfc, 0x4c, nv_rd32(dev, 0x3234)); | ||
448 | nv_wo32(ramfc, 0x50, nv_rd32(dev, 0x3340)); | ||
449 | nv_wo32(ramfc, 0x54, nv_rd32(dev, 0x3344)); | ||
450 | nv_wo32(ramfc, 0x58, nv_rd32(dev, 0x3280)); | ||
451 | nv_wo32(ramfc, 0x5c, nv_rd32(dev, 0x3254)); | ||
452 | nv_wo32(ramfc, 0x60, nv_rd32(dev, 0x3260)); | ||
453 | nv_wo32(ramfc, 0x64, nv_rd32(dev, 0x3264)); | ||
454 | nv_wo32(ramfc, 0x68, nv_rd32(dev, 0x3268)); | ||
455 | nv_wo32(ramfc, 0x6c, nv_rd32(dev, 0x326c)); | ||
456 | nv_wo32(ramfc, 0x70, nv_rd32(dev, 0x32e4)); | ||
457 | nv_wo32(ramfc, 0x74, nv_rd32(dev, 0x3248)); | ||
458 | nv_wo32(ramfc, 0x78, nv_rd32(dev, 0x2088)); | ||
459 | nv_wo32(ramfc, 0x7c, nv_rd32(dev, 0x2058)); | ||
460 | nv_wo32(ramfc, 0x80, nv_rd32(dev, 0x2210)); | ||
461 | |||
462 | put = (nv_rd32(dev, NV03_PFIFO_CACHE1_PUT) & 0x7ff) >> 2; | ||
463 | get = (nv_rd32(dev, NV03_PFIFO_CACHE1_GET) & 0x7ff) >> 2; | ||
464 | ptr = 0; | ||
465 | while (put != get) { | ||
466 | nv_wo32(cache, ptr + 0, | ||
467 | nv_rd32(dev, NV40_PFIFO_CACHE1_METHOD(get))); | ||
468 | nv_wo32(cache, ptr + 4, | ||
469 | nv_rd32(dev, NV40_PFIFO_CACHE1_DATA(get))); | ||
470 | get = (get + 1) & 0x1ff; | ||
471 | ptr += 8; | ||
472 | } | ||
473 | 350 | ||
474 | /* guessing that all the 0x34xx regs aren't on NV50 */ | 351 | /* set playlist length to zero, fifo will unload context */ |
475 | if (dev_priv->chipset != 0x50) { | 352 | nv_wr32(dev, 0x0032ec, 0); |
476 | nv_wo32(ramfc, 0x84, ptr >> 3); | 353 | |
477 | nv_wo32(ramfc, 0x88, nv_rd32(dev, 0x340c)); | 354 | /* tell all connected engines to unload their contexts */ |
478 | nv_wo32(ramfc, 0x8c, nv_rd32(dev, 0x3400)); | 355 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { |
479 | nv_wo32(ramfc, 0x90, nv_rd32(dev, 0x3404)); | 356 | struct nouveau_channel *chan = dev_priv->channels.ptr[i]; |
480 | nv_wo32(ramfc, 0x94, nv_rd32(dev, 0x3408)); | 357 | if (chan) |
481 | nv_wo32(ramfc, 0x98, nv_rd32(dev, 0x3410)); | 358 | nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12); |
359 | if (!nv_wait_cb(dev, nv50_fifo_wait_kickoff, dev_priv)) { | ||
360 | NV_INFO(dev, "PFIFO: channel %d unload timeout\n", i); | ||
361 | return -EBUSY; | ||
362 | } | ||
482 | } | 363 | } |
483 | 364 | ||
484 | dev_priv->engine.instmem.flush(dev); | ||
485 | |||
486 | /*XXX: probably reload ch127 (NULL) state back too */ | ||
487 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, 127); | ||
488 | return 0; | 365 | return 0; |
489 | } | 366 | } |
490 | 367 | ||