aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nouveau_object.c
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2010-11-15 20:50:09 -0500
committerBen Skeggs <bskeggs@redhat.com>2010-12-03 00:11:59 -0500
commit7f4a195fcbd8b16f25f1de7f1419414d7505daa5 (patch)
treed54405e52a42c41f6e88ff3ae3685afe2aa57f34 /drivers/gpu/drm/nouveau/nouveau_object.c
parent6d6c5a157af45a5bd50ab913b07d826811a9ea0a (diff)
drm/nouveau: tidy up and extend dma object creation interfaces
Reviewed-by: Francisco Jerez <currojerez@riseup.net> Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_object.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c235
1 files changed, 136 insertions, 99 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index e8c74de905ec..924653c30783 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -404,113 +404,157 @@ nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class)
404 The method below creates a DMA object in instance RAM and returns a handle 404 The method below creates a DMA object in instance RAM and returns a handle
405 to it that can be used to set up context objects. 405 to it that can be used to set up context objects.
406*/ 406*/
407int 407
408nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, 408void
409 uint64_t offset, uint64_t size, int access, 409nv50_gpuobj_dma_init(struct nouveau_gpuobj *obj, u32 offset, int class,
410 int target, struct nouveau_gpuobj **gpuobj) 410 u64 base, u64 size, int target, int access,
411 u32 type, u32 comp)
411{ 412{
412 struct drm_device *dev = chan->dev; 413 struct drm_nouveau_private *dev_priv = obj->dev->dev_private;
413 struct drm_nouveau_private *dev_priv = dev->dev_private; 414 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
414 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; 415 u32 flags0;
415 int ret;
416 416
417 NV_DEBUG(dev, "ch%d class=0x%04x offset=0x%llx size=0x%llx\n", 417 flags0 = (comp << 29) | (type << 22) | class;
418 chan->id, class, offset, size); 418 flags0 |= 0x00100000;
419 NV_DEBUG(dev, "access=%d target=%d\n", access, target); 419
420 switch (access) {
421 case NV_MEM_ACCESS_RO: flags0 |= 0x00040000; break;
422 case NV_MEM_ACCESS_RW:
423 case NV_MEM_ACCESS_WO: flags0 |= 0x00080000; break;
424 default:
425 break;
426 }
420 427
421 switch (target) { 428 switch (target) {
422 case NV_DMA_TARGET_AGP: 429 case NV_MEM_TARGET_VRAM:
423 offset += dev_priv->gart_info.aper_base; 430 flags0 |= 0x00010000;
431 break;
432 case NV_MEM_TARGET_PCI:
433 flags0 |= 0x00020000;
434 break;
435 case NV_MEM_TARGET_PCI_NOSNOOP:
436 flags0 |= 0x00030000;
424 break; 437 break;
438 case NV_MEM_TARGET_GART:
439 base += dev_priv->vm_gart_base;
425 default: 440 default:
441 flags0 &= ~0x00100000;
426 break; 442 break;
427 } 443 }
428 444
429 ret = nouveau_gpuobj_new(dev, chan, 445 /* convert to base + limit */
430 nouveau_gpuobj_class_instmem_size(dev, class), 446 size = (base + size) - 1;
431 16, NVOBJ_FLAG_ZERO_ALLOC |
432 NVOBJ_FLAG_ZERO_FREE, gpuobj);
433 if (ret) {
434 NV_ERROR(dev, "Error creating gpuobj: %d\n", ret);
435 return ret;
436 }
437 447
438 if (dev_priv->card_type < NV_50) { 448 nv_wo32(obj, offset + 0x00, flags0);
439 uint32_t frame, adjust, pte_flags = 0; 449 nv_wo32(obj, offset + 0x04, lower_32_bits(size));
440 450 nv_wo32(obj, offset + 0x08, lower_32_bits(base));
441 if (access != NV_DMA_ACCESS_RO) 451 nv_wo32(obj, offset + 0x0c, upper_32_bits(size) << 24 |
442 pte_flags |= (1<<1); 452 upper_32_bits(base));
443 adjust = offset & 0x00000fff; 453 nv_wo32(obj, offset + 0x10, 0x00000000);
444 frame = offset & ~0x00000fff; 454 nv_wo32(obj, offset + 0x14, 0x00000000);
445
446 nv_wo32(*gpuobj, 0, ((1<<12) | (1<<13) | (adjust << 20) |
447 (access << 14) | (target << 16) |
448 class));
449 nv_wo32(*gpuobj, 4, size - 1);
450 nv_wo32(*gpuobj, 8, frame | pte_flags);
451 nv_wo32(*gpuobj, 12, frame | pte_flags);
452 } else {
453 uint64_t limit = offset + size - 1;
454 uint32_t flags0, flags5;
455 455
456 if (target == NV_DMA_TARGET_VIDMEM) { 456 pinstmem->flush(obj->dev);
457 flags0 = 0x00190000; 457}
458 flags5 = 0x00010000;
459 } else {
460 flags0 = 0x7fc00000;
461 flags5 = 0x00080000;
462 }
463 458
464 nv_wo32(*gpuobj, 0, flags0 | class); 459int
465 nv_wo32(*gpuobj, 4, lower_32_bits(limit)); 460nv50_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base, u64 size,
466 nv_wo32(*gpuobj, 8, lower_32_bits(offset)); 461 int target, int access, u32 type, u32 comp,
467 nv_wo32(*gpuobj, 12, ((upper_32_bits(limit) & 0xff) << 24) | 462 struct nouveau_gpuobj **pobj)
468 (upper_32_bits(offset) & 0xff)); 463{
469 nv_wo32(*gpuobj, 20, flags5); 464 struct drm_device *dev = chan->dev;
470 } 465 int ret;
471 466
472 instmem->flush(dev); 467 ret = nouveau_gpuobj_new(dev, chan, 24, 16, NVOBJ_FLAG_ZERO_ALLOC |
468 NVOBJ_FLAG_ZERO_FREE, pobj);
469 if (ret)
470 return ret;
473 471
474 (*gpuobj)->engine = NVOBJ_ENGINE_SW; 472 nv50_gpuobj_dma_init(*pobj, 0, class, base, size, target,
475 (*gpuobj)->class = class; 473 access, type, comp);
476 return 0; 474 return 0;
477} 475}
478 476
479int 477int
480nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan, 478nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base,
481 uint64_t offset, uint64_t size, int access, 479 u64 size, int access, int target,
482 struct nouveau_gpuobj **gpuobj, 480 struct nouveau_gpuobj **pobj)
483 uint32_t *o_ret)
484{ 481{
482 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
485 struct drm_device *dev = chan->dev; 483 struct drm_device *dev = chan->dev;
486 struct drm_nouveau_private *dev_priv = dev->dev_private; 484 struct nouveau_gpuobj *obj;
485 u32 page_addr, flags0, flags2;
487 int ret; 486 int ret;
488 487
489 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP || 488 if (dev_priv->card_type >= NV_50) {
490 (dev_priv->card_type >= NV_50 && 489 u32 comp = (target == NV_MEM_TARGET_VM) ? NV_MEM_COMP_VM : 0;
491 dev_priv->gart_info.type == NOUVEAU_GART_SGDMA)) { 490 u32 type = (target == NV_MEM_TARGET_VM) ? NV_MEM_TYPE_VM : 0;
492 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 491
493 offset + dev_priv->vm_gart_base, 492 return nv50_gpuobj_dma_new(chan, class, base, size,
494 size, access, NV_DMA_TARGET_AGP, 493 target, access, type, comp, pobj);
495 gpuobj); 494 }
496 if (o_ret) 495
497 *o_ret = 0; 496 if (target == NV_MEM_TARGET_GART) {
498 } else 497 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
499 if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) { 498 target = NV_MEM_TARGET_PCI_NOSNOOP;
500 nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, gpuobj); 499 base += dev_priv->gart_info.aper_base;
501 if (offset & ~0xffffffffULL) { 500 } else
502 NV_ERROR(dev, "obj offset exceeds 32-bits\n"); 501 if (base != 0) {
503 return -EINVAL; 502 ret = nouveau_sgdma_get_page(dev, base, &page_addr);
503 if (ret)
504 return ret;
505
506 target = NV_MEM_TARGET_PCI;
507 base = page_addr;
508 } else {
509 nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, pobj);
510 return 0;
504 } 511 }
505 if (o_ret)
506 *o_ret = (uint32_t)offset;
507 ret = (*gpuobj != NULL) ? 0 : -EINVAL;
508 } else {
509 NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
510 return -EINVAL;
511 } 512 }
512 513
513 return ret; 514 flags0 = class;
515 flags0 |= 0x00003000; /* PT present, PT linear */
516 flags2 = 0;
517
518 switch (target) {
519 case NV_MEM_TARGET_PCI:
520 flags0 |= 0x00020000;
521 break;
522 case NV_MEM_TARGET_PCI_NOSNOOP:
523 flags0 |= 0x00030000;
524 break;
525 default:
526 break;
527 }
528
529 switch (access) {
530 case NV_MEM_ACCESS_RO:
531 flags0 |= 0x00004000;
532 break;
533 case NV_MEM_ACCESS_WO:
534 flags0 |= 0x00008000;
535 default:
536 flags2 |= 0x00000002;
537 break;
538 }
539
540 flags0 |= (base & 0x00000fff) << 20;
541 flags2 |= (base & 0xfffff000);
542
543 ret = nouveau_gpuobj_new(dev, chan, (dev_priv->card_type >= NV_40) ?
544 32 : 16, 16, NVOBJ_FLAG_ZERO_ALLOC |
545 NVOBJ_FLAG_ZERO_FREE, &obj);
546 if (ret)
547 return ret;
548
549 nv_wo32(obj, 0x00, flags0);
550 nv_wo32(obj, 0x04, size - 1);
551 nv_wo32(obj, 0x08, flags2);
552 nv_wo32(obj, 0x0c, flags2);
553
554 obj->engine = NVOBJ_ENGINE_SW;
555 obj->class = class;
556 *pobj = obj;
557 return 0;
514} 558}
515 559
516/* Context objects in the instance RAM have the following structure. 560/* Context objects in the instance RAM have the following structure.
@@ -806,8 +850,8 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
806 if (dev_priv->card_type >= NV_50) { 850 if (dev_priv->card_type >= NV_50) {
807 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 851 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
808 0, dev_priv->vm_end, 852 0, dev_priv->vm_end,
809 NV_DMA_ACCESS_RW, 853 NV_MEM_ACCESS_RW,
810 NV_DMA_TARGET_AGP, &vram); 854 NV_MEM_TARGET_VM, &vram);
811 if (ret) { 855 if (ret) {
812 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret); 856 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
813 return ret; 857 return ret;
@@ -815,8 +859,8 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
815 } else { 859 } else {
816 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 860 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
817 0, dev_priv->fb_available_size, 861 0, dev_priv->fb_available_size,
818 NV_DMA_ACCESS_RW, 862 NV_MEM_ACCESS_RW,
819 NV_DMA_TARGET_VIDMEM, &vram); 863 NV_MEM_TARGET_VRAM, &vram);
820 if (ret) { 864 if (ret) {
821 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret); 865 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
822 return ret; 866 return ret;
@@ -834,20 +878,13 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
834 if (dev_priv->card_type >= NV_50) { 878 if (dev_priv->card_type >= NV_50) {
835 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 879 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
836 0, dev_priv->vm_end, 880 0, dev_priv->vm_end,
837 NV_DMA_ACCESS_RW, 881 NV_MEM_ACCESS_RW,
838 NV_DMA_TARGET_AGP, &tt); 882 NV_MEM_TARGET_VM, &tt);
839 if (ret) {
840 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
841 return ret;
842 }
843 } else
844 if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) {
845 ret = nouveau_gpuobj_gart_dma_new(chan, 0,
846 dev_priv->gart_info.aper_size,
847 NV_DMA_ACCESS_RW, &tt, NULL);
848 } else { 883 } else {
849 NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type); 884 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
850 ret = -EINVAL; 885 0, dev_priv->gart_info.aper_size,
886 NV_MEM_ACCESS_RW,
887 NV_MEM_TARGET_GART, &tt);
851 } 888 }
852 889
853 if (ret) { 890 if (ret) {