aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2010-11-15 20:50:09 -0500
committerBen Skeggs <bskeggs@redhat.com>2010-12-03 00:11:59 -0500
commit7f4a195fcbd8b16f25f1de7f1419414d7505daa5 (patch)
treed54405e52a42c41f6e88ff3ae3685afe2aa57f34
parent6d6c5a157af45a5bd50ab913b07d826811a9ea0a (diff)
drm/nouveau: tidy up and extend dma object creation interfaces
Reviewed-by: Francisco Jerez <currojerez@riseup.net> Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h26
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c29
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c235
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_reg.h11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c9
8 files changed, 184 insertions, 160 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 11b2370e16da..0f33132fba3b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -39,22 +39,22 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
39 39
40 if (dev_priv->card_type >= NV_50) { 40 if (dev_priv->card_type >= NV_50) {
41 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, 41 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
42 dev_priv->vm_end, NV_DMA_ACCESS_RO, 42 dev_priv->vm_end, NV_MEM_ACCESS_RO,
43 NV_DMA_TARGET_AGP, &pushbuf); 43 NV_MEM_TARGET_VM, &pushbuf);
44 chan->pushbuf_base = pb->bo.offset; 44 chan->pushbuf_base = pb->bo.offset;
45 } else 45 } else
46 if (pb->bo.mem.mem_type == TTM_PL_TT) { 46 if (pb->bo.mem.mem_type == TTM_PL_TT) {
47 ret = nouveau_gpuobj_gart_dma_new(chan, 0, 47 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
48 dev_priv->gart_info.aper_size, 48 dev_priv->gart_info.aper_size,
49 NV_DMA_ACCESS_RO, &pushbuf, 49 NV_MEM_ACCESS_RO,
50 NULL); 50 NV_MEM_TARGET_GART, &pushbuf);
51 chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; 51 chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
52 } else 52 } else
53 if (dev_priv->card_type != NV_04) { 53 if (dev_priv->card_type != NV_04) {
54 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, 54 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
55 dev_priv->fb_available_size, 55 dev_priv->fb_available_size,
56 NV_DMA_ACCESS_RO, 56 NV_MEM_ACCESS_RO,
57 NV_DMA_TARGET_VIDMEM, &pushbuf); 57 NV_MEM_TARGET_VRAM, &pushbuf);
58 chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; 58 chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
59 } else { 59 } else {
60 /* NV04 cmdbuf hack, from original ddx.. not sure of it's 60 /* NV04 cmdbuf hack, from original ddx.. not sure of it's
@@ -62,11 +62,10 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
62 * VRAM. 62 * VRAM.
63 */ 63 */
64 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 64 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
65 pci_resource_start(dev->pdev, 65 pci_resource_start(dev->pdev, 1),
66 1),
67 dev_priv->fb_available_size, 66 dev_priv->fb_available_size,
68 NV_DMA_ACCESS_RO, 67 NV_MEM_ACCESS_RO,
69 NV_DMA_TARGET_PCI, &pushbuf); 68 NV_MEM_TARGET_PCI, &pushbuf);
70 chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; 69 chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
71 } 70 }
72 71
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index d76d2c09049d..a52b1da32031 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -886,12 +886,14 @@ extern int nouveau_gpuobj_new_fake(struct drm_device *, u32 pinst, u64 vinst,
886extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class, 886extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class,
887 uint64_t offset, uint64_t size, int access, 887 uint64_t offset, uint64_t size, int access,
888 int target, struct nouveau_gpuobj **); 888 int target, struct nouveau_gpuobj **);
889extern int nouveau_gpuobj_gart_dma_new(struct nouveau_channel *,
890 uint64_t offset, uint64_t size,
891 int access, struct nouveau_gpuobj **,
892 uint32_t *o_ret);
893extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, int class, 889extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, int class,
894 struct nouveau_gpuobj **); 890 struct nouveau_gpuobj **);
891extern int nv50_gpuobj_dma_new(struct nouveau_channel *, int class, u64 base,
892 u64 size, int target, int access, u32 type,
893 u32 comp, struct nouveau_gpuobj **pobj);
894extern void nv50_gpuobj_dma_init(struct nouveau_gpuobj *, u32 offset,
895 int class, u64 base, u64 size, int target,
896 int access, u32 type, u32 comp);
895extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data, 897extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data,
896 struct drm_file *); 898 struct drm_file *);
897extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data, 899extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data,
@@ -1545,6 +1547,22 @@ nv_match_device(struct drm_device *dev, unsigned device,
1545 dev->pdev->subsystem_device == sub_device; 1547 dev->pdev->subsystem_device == sub_device;
1546} 1548}
1547 1549
1550/* memory type/access flags, do not match hardware values */
1551#define NV_MEM_ACCESS_RO 1
1552#define NV_MEM_ACCESS_WO 2
1553#define NV_MEM_ACCESS_RW (NV_MEM_ACCESS_RO | NV_MEM_ACCESS_WO)
1554#define NV_MEM_ACCESS_VM 4
1555
1556#define NV_MEM_TARGET_VRAM 0
1557#define NV_MEM_TARGET_PCI 1
1558#define NV_MEM_TARGET_PCI_NOSNOOP 2
1559#define NV_MEM_TARGET_VM 3
1560#define NV_MEM_TARGET_GART 4
1561
1562#define NV_MEM_TYPE_VM 0x7f
1563#define NV_MEM_COMP_VM 0x03
1564
1565/* NV_SW object class */
1548#define NV_SW 0x0000506e 1566#define NV_SW 0x0000506e
1549#define NV_SW_DMA_SEMAPHORE 0x00000060 1567#define NV_SW_DMA_SEMAPHORE 0x00000060
1550#define NV_SW_SEMAPHORE_OFFSET 0x00000064 1568#define NV_SW_SEMAPHORE_OFFSET 0x00000064
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 91aa6c54cc96..2579fc69d182 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -459,8 +459,8 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
459 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 459 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
460 mem->start << PAGE_SHIFT, 460 mem->start << PAGE_SHIFT,
461 mem->size << PAGE_SHIFT, 461 mem->size << PAGE_SHIFT,
462 NV_DMA_ACCESS_RW, 462 NV_MEM_ACCESS_RW,
463 NV_DMA_TARGET_VIDMEM, &obj); 463 NV_MEM_TARGET_VRAM, &obj);
464 if (ret) 464 if (ret)
465 return ret; 465 return ret;
466 466
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index 2c5a1f66f7f0..a050b7b69782 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -99,7 +99,6 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
99 int size, uint32_t *b_offset) 99 int size, uint32_t *b_offset)
100{ 100{
101 struct drm_device *dev = chan->dev; 101 struct drm_device *dev = chan->dev;
102 struct drm_nouveau_private *dev_priv = dev->dev_private;
103 struct nouveau_gpuobj *nobj = NULL; 102 struct nouveau_gpuobj *nobj = NULL;
104 struct drm_mm_node *mem; 103 struct drm_mm_node *mem;
105 uint32_t offset; 104 uint32_t offset;
@@ -113,31 +112,15 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
113 return -ENOMEM; 112 return -ENOMEM;
114 } 113 }
115 114
116 offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT; 115 if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM)
117 if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM) { 116 target = NV_MEM_TARGET_VRAM;
118 target = NV_DMA_TARGET_VIDMEM; 117 else
119 } else 118 target = NV_MEM_TARGET_GART;
120 if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_TT) { 119 offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT;
121 if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA &&
122 dev_priv->card_type < NV_50) {
123 ret = nouveau_sgdma_get_page(dev, offset, &offset);
124 if (ret)
125 return ret;
126 target = NV_DMA_TARGET_PCI;
127 } else {
128 target = NV_DMA_TARGET_AGP;
129 if (dev_priv->card_type >= NV_50)
130 offset += dev_priv->vm_gart_base;
131 }
132 } else {
133 NV_ERROR(dev, "Bad DMA target, mem_type %d!\n",
134 chan->notifier_bo->bo.mem.mem_type);
135 return -EINVAL;
136 }
137 offset += mem->start; 120 offset += mem->start;
138 121
139 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, offset, 122 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, offset,
140 mem->size, NV_DMA_ACCESS_RW, target, 123 mem->size, NV_MEM_ACCESS_RW, target,
141 &nobj); 124 &nobj);
142 if (ret) { 125 if (ret) {
143 drm_mm_put_block(mem); 126 drm_mm_put_block(mem);
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index e8c74de905ec..924653c30783 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -404,113 +404,157 @@ nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class)
404 The method below creates a DMA object in instance RAM and returns a handle 404 The method below creates a DMA object in instance RAM and returns a handle
405 to it that can be used to set up context objects. 405 to it that can be used to set up context objects.
406*/ 406*/
407int 407
408nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, 408void
409 uint64_t offset, uint64_t size, int access, 409nv50_gpuobj_dma_init(struct nouveau_gpuobj *obj, u32 offset, int class,
410 int target, struct nouveau_gpuobj **gpuobj) 410 u64 base, u64 size, int target, int access,
411 u32 type, u32 comp)
411{ 412{
412 struct drm_device *dev = chan->dev; 413 struct drm_nouveau_private *dev_priv = obj->dev->dev_private;
413 struct drm_nouveau_private *dev_priv = dev->dev_private; 414 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
414 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; 415 u32 flags0;
415 int ret;
416 416
417 NV_DEBUG(dev, "ch%d class=0x%04x offset=0x%llx size=0x%llx\n", 417 flags0 = (comp << 29) | (type << 22) | class;
418 chan->id, class, offset, size); 418 flags0 |= 0x00100000;
419 NV_DEBUG(dev, "access=%d target=%d\n", access, target); 419
420 switch (access) {
421 case NV_MEM_ACCESS_RO: flags0 |= 0x00040000; break;
422 case NV_MEM_ACCESS_RW:
423 case NV_MEM_ACCESS_WO: flags0 |= 0x00080000; break;
424 default:
425 break;
426 }
420 427
421 switch (target) { 428 switch (target) {
422 case NV_DMA_TARGET_AGP: 429 case NV_MEM_TARGET_VRAM:
423 offset += dev_priv->gart_info.aper_base; 430 flags0 |= 0x00010000;
431 break;
432 case NV_MEM_TARGET_PCI:
433 flags0 |= 0x00020000;
434 break;
435 case NV_MEM_TARGET_PCI_NOSNOOP:
436 flags0 |= 0x00030000;
424 break; 437 break;
438 case NV_MEM_TARGET_GART:
439 base += dev_priv->vm_gart_base;
425 default: 440 default:
441 flags0 &= ~0x00100000;
426 break; 442 break;
427 } 443 }
428 444
429 ret = nouveau_gpuobj_new(dev, chan, 445 /* convert to base + limit */
430 nouveau_gpuobj_class_instmem_size(dev, class), 446 size = (base + size) - 1;
431 16, NVOBJ_FLAG_ZERO_ALLOC |
432 NVOBJ_FLAG_ZERO_FREE, gpuobj);
433 if (ret) {
434 NV_ERROR(dev, "Error creating gpuobj: %d\n", ret);
435 return ret;
436 }
437 447
438 if (dev_priv->card_type < NV_50) { 448 nv_wo32(obj, offset + 0x00, flags0);
439 uint32_t frame, adjust, pte_flags = 0; 449 nv_wo32(obj, offset + 0x04, lower_32_bits(size));
440 450 nv_wo32(obj, offset + 0x08, lower_32_bits(base));
441 if (access != NV_DMA_ACCESS_RO) 451 nv_wo32(obj, offset + 0x0c, upper_32_bits(size) << 24 |
442 pte_flags |= (1<<1); 452 upper_32_bits(base));
443 adjust = offset & 0x00000fff; 453 nv_wo32(obj, offset + 0x10, 0x00000000);
444 frame = offset & ~0x00000fff; 454 nv_wo32(obj, offset + 0x14, 0x00000000);
445
446 nv_wo32(*gpuobj, 0, ((1<<12) | (1<<13) | (adjust << 20) |
447 (access << 14) | (target << 16) |
448 class));
449 nv_wo32(*gpuobj, 4, size - 1);
450 nv_wo32(*gpuobj, 8, frame | pte_flags);
451 nv_wo32(*gpuobj, 12, frame | pte_flags);
452 } else {
453 uint64_t limit = offset + size - 1;
454 uint32_t flags0, flags5;
455 455
456 if (target == NV_DMA_TARGET_VIDMEM) { 456 pinstmem->flush(obj->dev);
457 flags0 = 0x00190000; 457}
458 flags5 = 0x00010000;
459 } else {
460 flags0 = 0x7fc00000;
461 flags5 = 0x00080000;
462 }
463 458
464 nv_wo32(*gpuobj, 0, flags0 | class); 459int
465 nv_wo32(*gpuobj, 4, lower_32_bits(limit)); 460nv50_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base, u64 size,
466 nv_wo32(*gpuobj, 8, lower_32_bits(offset)); 461 int target, int access, u32 type, u32 comp,
467 nv_wo32(*gpuobj, 12, ((upper_32_bits(limit) & 0xff) << 24) | 462 struct nouveau_gpuobj **pobj)
468 (upper_32_bits(offset) & 0xff)); 463{
469 nv_wo32(*gpuobj, 20, flags5); 464 struct drm_device *dev = chan->dev;
470 } 465 int ret;
471 466
472 instmem->flush(dev); 467 ret = nouveau_gpuobj_new(dev, chan, 24, 16, NVOBJ_FLAG_ZERO_ALLOC |
468 NVOBJ_FLAG_ZERO_FREE, pobj);
469 if (ret)
470 return ret;
473 471
474 (*gpuobj)->engine = NVOBJ_ENGINE_SW; 472 nv50_gpuobj_dma_init(*pobj, 0, class, base, size, target,
475 (*gpuobj)->class = class; 473 access, type, comp);
476 return 0; 474 return 0;
477} 475}
478 476
479int 477int
480nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan, 478nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base,
481 uint64_t offset, uint64_t size, int access, 479 u64 size, int access, int target,
482 struct nouveau_gpuobj **gpuobj, 480 struct nouveau_gpuobj **pobj)
483 uint32_t *o_ret)
484{ 481{
482 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
485 struct drm_device *dev = chan->dev; 483 struct drm_device *dev = chan->dev;
486 struct drm_nouveau_private *dev_priv = dev->dev_private; 484 struct nouveau_gpuobj *obj;
485 u32 page_addr, flags0, flags2;
487 int ret; 486 int ret;
488 487
489 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP || 488 if (dev_priv->card_type >= NV_50) {
490 (dev_priv->card_type >= NV_50 && 489 u32 comp = (target == NV_MEM_TARGET_VM) ? NV_MEM_COMP_VM : 0;
491 dev_priv->gart_info.type == NOUVEAU_GART_SGDMA)) { 490 u32 type = (target == NV_MEM_TARGET_VM) ? NV_MEM_TYPE_VM : 0;
492 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 491
493 offset + dev_priv->vm_gart_base, 492 return nv50_gpuobj_dma_new(chan, class, base, size,
494 size, access, NV_DMA_TARGET_AGP, 493 target, access, type, comp, pobj);
495 gpuobj); 494 }
496 if (o_ret) 495
497 *o_ret = 0; 496 if (target == NV_MEM_TARGET_GART) {
498 } else 497 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
499 if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) { 498 target = NV_MEM_TARGET_PCI_NOSNOOP;
500 nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, gpuobj); 499 base += dev_priv->gart_info.aper_base;
501 if (offset & ~0xffffffffULL) { 500 } else
502 NV_ERROR(dev, "obj offset exceeds 32-bits\n"); 501 if (base != 0) {
503 return -EINVAL; 502 ret = nouveau_sgdma_get_page(dev, base, &page_addr);
503 if (ret)
504 return ret;
505
506 target = NV_MEM_TARGET_PCI;
507 base = page_addr;
508 } else {
509 nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, pobj);
510 return 0;
504 } 511 }
505 if (o_ret)
506 *o_ret = (uint32_t)offset;
507 ret = (*gpuobj != NULL) ? 0 : -EINVAL;
508 } else {
509 NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
510 return -EINVAL;
511 } 512 }
512 513
513 return ret; 514 flags0 = class;
515 flags0 |= 0x00003000; /* PT present, PT linear */
516 flags2 = 0;
517
518 switch (target) {
519 case NV_MEM_TARGET_PCI:
520 flags0 |= 0x00020000;
521 break;
522 case NV_MEM_TARGET_PCI_NOSNOOP:
523 flags0 |= 0x00030000;
524 break;
525 default:
526 break;
527 }
528
529 switch (access) {
530 case NV_MEM_ACCESS_RO:
531 flags0 |= 0x00004000;
532 break;
533 case NV_MEM_ACCESS_WO:
534 flags0 |= 0x00008000;
535 default:
536 flags2 |= 0x00000002;
537 break;
538 }
539
540 flags0 |= (base & 0x00000fff) << 20;
541 flags2 |= (base & 0xfffff000);
542
543 ret = nouveau_gpuobj_new(dev, chan, (dev_priv->card_type >= NV_40) ?
544 32 : 16, 16, NVOBJ_FLAG_ZERO_ALLOC |
545 NVOBJ_FLAG_ZERO_FREE, &obj);
546 if (ret)
547 return ret;
548
549 nv_wo32(obj, 0x00, flags0);
550 nv_wo32(obj, 0x04, size - 1);
551 nv_wo32(obj, 0x08, flags2);
552 nv_wo32(obj, 0x0c, flags2);
553
554 obj->engine = NVOBJ_ENGINE_SW;
555 obj->class = class;
556 *pobj = obj;
557 return 0;
514} 558}
515 559
516/* Context objects in the instance RAM have the following structure. 560/* Context objects in the instance RAM have the following structure.
@@ -806,8 +850,8 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
806 if (dev_priv->card_type >= NV_50) { 850 if (dev_priv->card_type >= NV_50) {
807 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 851 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
808 0, dev_priv->vm_end, 852 0, dev_priv->vm_end,
809 NV_DMA_ACCESS_RW, 853 NV_MEM_ACCESS_RW,
810 NV_DMA_TARGET_AGP, &vram); 854 NV_MEM_TARGET_VM, &vram);
811 if (ret) { 855 if (ret) {
812 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret); 856 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
813 return ret; 857 return ret;
@@ -815,8 +859,8 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
815 } else { 859 } else {
816 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 860 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
817 0, dev_priv->fb_available_size, 861 0, dev_priv->fb_available_size,
818 NV_DMA_ACCESS_RW, 862 NV_MEM_ACCESS_RW,
819 NV_DMA_TARGET_VIDMEM, &vram); 863 NV_MEM_TARGET_VRAM, &vram);
820 if (ret) { 864 if (ret) {
821 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret); 865 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
822 return ret; 866 return ret;
@@ -834,20 +878,13 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
834 if (dev_priv->card_type >= NV_50) { 878 if (dev_priv->card_type >= NV_50) {
835 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 879 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
836 0, dev_priv->vm_end, 880 0, dev_priv->vm_end,
837 NV_DMA_ACCESS_RW, 881 NV_MEM_ACCESS_RW,
838 NV_DMA_TARGET_AGP, &tt); 882 NV_MEM_TARGET_VM, &tt);
839 if (ret) {
840 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
841 return ret;
842 }
843 } else
844 if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) {
845 ret = nouveau_gpuobj_gart_dma_new(chan, 0,
846 dev_priv->gart_info.aper_size,
847 NV_DMA_ACCESS_RW, &tt, NULL);
848 } else { 883 } else {
849 NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type); 884 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
850 ret = -EINVAL; 885 0, dev_priv->gart_info.aper_size,
886 NV_MEM_ACCESS_RW,
887 NV_MEM_TARGET_GART, &tt);
851 } 888 }
852 889
853 if (ret) { 890 if (ret) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h
index df3a87e792f2..04e8fb795269 100644
--- a/drivers/gpu/drm/nouveau/nouveau_reg.h
+++ b/drivers/gpu/drm/nouveau/nouveau_reg.h
@@ -79,17 +79,6 @@
79# define NV40_RAMHT_CONTEXT_ENGINE_SHIFT 20 79# define NV40_RAMHT_CONTEXT_ENGINE_SHIFT 20
80# define NV40_RAMHT_CONTEXT_INSTANCE_SHIFT 0 80# define NV40_RAMHT_CONTEXT_INSTANCE_SHIFT 0
81 81
82/* DMA object defines */
83#define NV_DMA_ACCESS_RW 0
84#define NV_DMA_ACCESS_RO 1
85#define NV_DMA_ACCESS_WO 2
86#define NV_DMA_TARGET_VIDMEM 0
87#define NV_DMA_TARGET_PCI 2
88#define NV_DMA_TARGET_AGP 3
89/* The following is not a real value used by the card, it's changed by
90 * nouveau_object_dma_create */
91#define NV_DMA_TARGET_PCI_NONLINEAR 8
92
93/* Some object classes we care about in the drm */ 82/* Some object classes we care about in the drm */
94#define NV_CLASS_DMA_FROM_MEMORY 0x00000002 83#define NV_CLASS_DMA_FROM_MEMORY 0x00000002
95#define NV_CLASS_DMA_TO_MEMORY 0x00000003 84#define NV_CLASS_DMA_TO_MEMORY 0x00000003
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 54af7608d45c..db32644f6114 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -247,14 +247,11 @@ nouveau_sgdma_init(struct drm_device *dev)
247 */ 247 */
248 gpuobj->cinst = gpuobj->pinst; 248 gpuobj->cinst = gpuobj->pinst;
249 249
250 /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
251 * confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE
252 * on those cards? */
253 nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY | 250 nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
254 (1 << 12) /* PT present */ | 251 (1 << 12) /* PT present */ |
255 (0 << 13) /* PT *not* linear */ | 252 (0 << 13) /* PT *not* linear */ |
256 (NV_DMA_ACCESS_RW << 14) | 253 (0 << 14) /* RW */ |
257 (NV_DMA_TARGET_PCI << 16)); 254 (2 << 16) /* PCI */);
258 nv_wo32(gpuobj, 4, aper_size - 1); 255 nv_wo32(gpuobj, 4, aper_size - 1);
259 for (i = 2; i < 2 + (aper_size >> 12); i++) 256 for (i = 2; i < 2 + (aper_size >> 12); i++)
260 nv_wo32(gpuobj, i * 4, 0x00000000); 257 nv_wo32(gpuobj, i * 4, 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 35b28406caf6..e779e9320453 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -536,7 +536,7 @@ nouveau_card_init_channel(struct drm_device *dev)
536 536
537 ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY, 537 ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
538 0, dev_priv->vram_size, 538 0, dev_priv->vram_size,
539 NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM, 539 NV_MEM_ACCESS_RW, NV_MEM_TARGET_VRAM,
540 &gpuobj); 540 &gpuobj);
541 if (ret) 541 if (ret)
542 goto out_err; 542 goto out_err;
@@ -546,9 +546,10 @@ nouveau_card_init_channel(struct drm_device *dev)
546 if (ret) 546 if (ret)
547 goto out_err; 547 goto out_err;
548 548
549 ret = nouveau_gpuobj_gart_dma_new(dev_priv->channel, 0, 549 ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
550 dev_priv->gart_info.aper_size, 550 0, dev_priv->gart_info.aper_size,
551 NV_DMA_ACCESS_RW, &gpuobj, NULL); 551 NV_MEM_ACCESS_RW, NV_MEM_TARGET_GART,
552 &gpuobj);
552 if (ret) 553 if (ret)
553 goto out_err; 554 goto out_err;
554 555