diff options
author | Ben Skeggs <bskeggs@redhat.com> | 2010-09-01 01:24:29 -0400 |
---|---|---|
committer | Ben Skeggs <bskeggs@redhat.com> | 2010-09-24 02:20:00 -0400 |
commit | b3beb167af0de6d7cb03aed0687eca645cfd06a6 (patch) | |
tree | 2699384f75536511e57a862b206c83b4405fa197 /drivers/gpu/drm/nouveau/nouveau_object.c | |
parent | 479dcaea09bf17e8de7005015345e4266723666d (diff) |
drm/nouveau: modify object accessors, offset in bytes rather than dwords
Reviewed-by: Francisco Jerez <currojerez@riseup.net>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_object.c')
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_object.c | 92 |
1 files changed, 53 insertions, 39 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index e658aa2dbe67..52db13cd75b2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c | |||
@@ -88,6 +88,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, | |||
88 | if (!gpuobj) | 88 | if (!gpuobj) |
89 | return -ENOMEM; | 89 | return -ENOMEM; |
90 | NV_DEBUG(dev, "gpuobj %p\n", gpuobj); | 90 | NV_DEBUG(dev, "gpuobj %p\n", gpuobj); |
91 | gpuobj->dev = dev; | ||
91 | gpuobj->flags = flags; | 92 | gpuobj->flags = flags; |
92 | gpuobj->im_channel = chan; | 93 | gpuobj->im_channel = chan; |
93 | 94 | ||
@@ -134,7 +135,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, | |||
134 | int i; | 135 | int i; |
135 | 136 | ||
136 | for (i = 0; i < gpuobj->im_pramin->size; i += 4) | 137 | for (i = 0; i < gpuobj->im_pramin->size; i += 4) |
137 | nv_wo32(dev, gpuobj, i/4, 0); | 138 | nv_wo32(gpuobj, i, 0); |
138 | engine->instmem.flush(dev); | 139 | engine->instmem.flush(dev); |
139 | } | 140 | } |
140 | 141 | ||
@@ -224,7 +225,7 @@ nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj) | |||
224 | 225 | ||
225 | if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) { | 226 | if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) { |
226 | for (i = 0; i < gpuobj->im_pramin->size; i += 4) | 227 | for (i = 0; i < gpuobj->im_pramin->size; i += 4) |
227 | nv_wo32(dev, gpuobj, i/4, 0); | 228 | nv_wo32(gpuobj, i, 0); |
228 | engine->instmem.flush(dev); | 229 | engine->instmem.flush(dev); |
229 | } | 230 | } |
230 | 231 | ||
@@ -435,6 +436,7 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset, | |||
435 | if (!gpuobj) | 436 | if (!gpuobj) |
436 | return -ENOMEM; | 437 | return -ENOMEM; |
437 | NV_DEBUG(dev, "gpuobj %p\n", gpuobj); | 438 | NV_DEBUG(dev, "gpuobj %p\n", gpuobj); |
439 | gpuobj->dev = dev; | ||
438 | gpuobj->im_channel = NULL; | 440 | gpuobj->im_channel = NULL; |
439 | gpuobj->flags = flags | NVOBJ_FLAG_FAKE; | 441 | gpuobj->flags = flags | NVOBJ_FLAG_FAKE; |
440 | 442 | ||
@@ -458,7 +460,7 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset, | |||
458 | 460 | ||
459 | if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { | 461 | if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { |
460 | for (i = 0; i < gpuobj->im_pramin->size; i += 4) | 462 | for (i = 0; i < gpuobj->im_pramin->size; i += 4) |
461 | nv_wo32(dev, gpuobj, i/4, 0); | 463 | nv_wo32(gpuobj, i, 0); |
462 | dev_priv->engine.instmem.flush(dev); | 464 | dev_priv->engine.instmem.flush(dev); |
463 | } | 465 | } |
464 | 466 | ||
@@ -555,14 +557,12 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, | |||
555 | adjust = offset & 0x00000fff; | 557 | adjust = offset & 0x00000fff; |
556 | frame = offset & ~0x00000fff; | 558 | frame = offset & ~0x00000fff; |
557 | 559 | ||
558 | nv_wo32(dev, *gpuobj, 0, ((1<<12) | (1<<13) | | 560 | nv_wo32(*gpuobj, 0, ((1<<12) | (1<<13) | (adjust << 20) | |
559 | (adjust << 20) | | 561 | (access << 14) | (target << 16) | |
560 | (access << 14) | | 562 | class)); |
561 | (target << 16) | | 563 | nv_wo32(*gpuobj, 4, size - 1); |
562 | class)); | 564 | nv_wo32(*gpuobj, 8, frame | pte_flags); |
563 | nv_wo32(dev, *gpuobj, 1, size - 1); | 565 | nv_wo32(*gpuobj, 12, frame | pte_flags); |
564 | nv_wo32(dev, *gpuobj, 2, frame | pte_flags); | ||
565 | nv_wo32(dev, *gpuobj, 3, frame | pte_flags); | ||
566 | } else { | 566 | } else { |
567 | uint64_t limit = offset + size - 1; | 567 | uint64_t limit = offset + size - 1; |
568 | uint32_t flags0, flags5; | 568 | uint32_t flags0, flags5; |
@@ -575,12 +575,12 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, | |||
575 | flags5 = 0x00080000; | 575 | flags5 = 0x00080000; |
576 | } | 576 | } |
577 | 577 | ||
578 | nv_wo32(dev, *gpuobj, 0, flags0 | class); | 578 | nv_wo32(*gpuobj, 0, flags0 | class); |
579 | nv_wo32(dev, *gpuobj, 1, lower_32_bits(limit)); | 579 | nv_wo32(*gpuobj, 4, lower_32_bits(limit)); |
580 | nv_wo32(dev, *gpuobj, 2, lower_32_bits(offset)); | 580 | nv_wo32(*gpuobj, 8, lower_32_bits(offset)); |
581 | nv_wo32(dev, *gpuobj, 3, ((upper_32_bits(limit) & 0xff) << 24) | | 581 | nv_wo32(*gpuobj, 12, ((upper_32_bits(limit) & 0xff) << 24) | |
582 | (upper_32_bits(offset) & 0xff)); | 582 | (upper_32_bits(offset) & 0xff)); |
583 | nv_wo32(dev, *gpuobj, 5, flags5); | 583 | nv_wo32(*gpuobj, 20, flags5); |
584 | } | 584 | } |
585 | 585 | ||
586 | instmem->flush(dev); | 586 | instmem->flush(dev); |
@@ -699,25 +699,25 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class, | |||
699 | } | 699 | } |
700 | 700 | ||
701 | if (dev_priv->card_type >= NV_50) { | 701 | if (dev_priv->card_type >= NV_50) { |
702 | nv_wo32(dev, *gpuobj, 0, class); | 702 | nv_wo32(*gpuobj, 0, class); |
703 | nv_wo32(dev, *gpuobj, 5, 0x00010000); | 703 | nv_wo32(*gpuobj, 20, 0x00010000); |
704 | } else { | 704 | } else { |
705 | switch (class) { | 705 | switch (class) { |
706 | case NV_CLASS_NULL: | 706 | case NV_CLASS_NULL: |
707 | nv_wo32(dev, *gpuobj, 0, 0x00001030); | 707 | nv_wo32(*gpuobj, 0, 0x00001030); |
708 | nv_wo32(dev, *gpuobj, 1, 0xFFFFFFFF); | 708 | nv_wo32(*gpuobj, 4, 0xFFFFFFFF); |
709 | break; | 709 | break; |
710 | default: | 710 | default: |
711 | if (dev_priv->card_type >= NV_40) { | 711 | if (dev_priv->card_type >= NV_40) { |
712 | nv_wo32(dev, *gpuobj, 0, class); | 712 | nv_wo32(*gpuobj, 0, class); |
713 | #ifdef __BIG_ENDIAN | 713 | #ifdef __BIG_ENDIAN |
714 | nv_wo32(dev, *gpuobj, 2, 0x01000000); | 714 | nv_wo32(*gpuobj, 8, 0x01000000); |
715 | #endif | 715 | #endif |
716 | } else { | 716 | } else { |
717 | #ifdef __BIG_ENDIAN | 717 | #ifdef __BIG_ENDIAN |
718 | nv_wo32(dev, *gpuobj, 0, class | 0x00080000); | 718 | nv_wo32(*gpuobj, 0, class | 0x00080000); |
719 | #else | 719 | #else |
720 | nv_wo32(dev, *gpuobj, 0, class); | 720 | nv_wo32(*gpuobj, 0, class); |
721 | #endif | 721 | #endif |
722 | } | 722 | } |
723 | } | 723 | } |
@@ -836,21 +836,20 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, | |||
836 | if (ret) | 836 | if (ret) |
837 | return ret; | 837 | return ret; |
838 | for (i = 0; i < 0x4000; i += 8) { | 838 | for (i = 0; i < 0x4000; i += 8) { |
839 | nv_wo32(dev, chan->vm_pd, (i+0)/4, 0x00000000); | 839 | nv_wo32(chan->vm_pd, i + 0, 0x00000000); |
840 | nv_wo32(dev, chan->vm_pd, (i+4)/4, 0xdeadcafe); | 840 | nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe); |
841 | } | 841 | } |
842 | 842 | ||
843 | pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 2; | 843 | pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 8; |
844 | ret = nouveau_gpuobj_ref_add(dev, NULL, 0, | 844 | ret = nouveau_gpuobj_ref_add(dev, NULL, 0, |
845 | dev_priv->gart_info.sg_ctxdma, | 845 | dev_priv->gart_info.sg_ctxdma, |
846 | &chan->vm_gart_pt); | 846 | &chan->vm_gart_pt); |
847 | if (ret) | 847 | if (ret) |
848 | return ret; | 848 | return ret; |
849 | nv_wo32(dev, chan->vm_pd, pde++, | 849 | nv_wo32(chan->vm_pd, pde + 0, chan->vm_gart_pt->instance | 3); |
850 | chan->vm_gart_pt->instance | 0x03); | 850 | nv_wo32(chan->vm_pd, pde + 4, 0x00000000); |
851 | nv_wo32(dev, chan->vm_pd, pde++, 0x00000000); | ||
852 | 851 | ||
853 | pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 2; | 852 | pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 8; |
854 | for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) { | 853 | for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) { |
855 | ret = nouveau_gpuobj_ref_add(dev, NULL, 0, | 854 | ret = nouveau_gpuobj_ref_add(dev, NULL, 0, |
856 | dev_priv->vm_vram_pt[i], | 855 | dev_priv->vm_vram_pt[i], |
@@ -858,9 +857,10 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, | |||
858 | if (ret) | 857 | if (ret) |
859 | return ret; | 858 | return ret; |
860 | 859 | ||
861 | nv_wo32(dev, chan->vm_pd, pde++, | 860 | nv_wo32(chan->vm_pd, pde + 0, |
862 | chan->vm_vram_pt[i]->instance | 0x61); | 861 | chan->vm_vram_pt[i]->instance | 0x61); |
863 | nv_wo32(dev, chan->vm_pd, pde++, 0x00000000); | 862 | nv_wo32(chan->vm_pd, pde + 4, 0x00000000); |
863 | pde += 8; | ||
864 | } | 864 | } |
865 | 865 | ||
866 | instmem->flush(dev); | 866 | instmem->flush(dev); |
@@ -996,8 +996,8 @@ nouveau_gpuobj_suspend(struct drm_device *dev) | |||
996 | return -ENOMEM; | 996 | return -ENOMEM; |
997 | } | 997 | } |
998 | 998 | ||
999 | for (i = 0; i < gpuobj->im_pramin->size / 4; i++) | 999 | for (i = 0; i < gpuobj->im_pramin->size; i += 4) |
1000 | gpuobj->im_backing_suspend[i] = nv_ro32(dev, gpuobj, i); | 1000 | gpuobj->im_backing_suspend[i/4] = nv_ro32(gpuobj, i); |
1001 | } | 1001 | } |
1002 | 1002 | ||
1003 | return 0; | 1003 | return 0; |
@@ -1042,8 +1042,8 @@ nouveau_gpuobj_resume(struct drm_device *dev) | |||
1042 | if (!gpuobj->im_backing_suspend) | 1042 | if (!gpuobj->im_backing_suspend) |
1043 | continue; | 1043 | continue; |
1044 | 1044 | ||
1045 | for (i = 0; i < gpuobj->im_pramin->size / 4; i++) | 1045 | for (i = 0; i < gpuobj->im_pramin->size; i += 4) |
1046 | nv_wo32(dev, gpuobj, i, gpuobj->im_backing_suspend[i]); | 1046 | nv_wo32(gpuobj, i, gpuobj->im_backing_suspend[i/4]); |
1047 | dev_priv->engine.instmem.flush(dev); | 1047 | dev_priv->engine.instmem.flush(dev); |
1048 | } | 1048 | } |
1049 | 1049 | ||
@@ -1120,3 +1120,17 @@ int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data, | |||
1120 | 1120 | ||
1121 | return 0; | 1121 | return 0; |
1122 | } | 1122 | } |
1123 | |||
1124 | u32 | ||
1125 | nv_ro32(struct nouveau_gpuobj *gpuobj, u32 offset) | ||
1126 | { | ||
1127 | struct drm_device *dev = gpuobj->dev; | ||
1128 | return nv_ri32(dev, gpuobj->im_pramin->start + offset); | ||
1129 | } | ||
1130 | |||
1131 | void | ||
1132 | nv_wo32(struct nouveau_gpuobj *gpuobj, u32 offset, u32 val) | ||
1133 | { | ||
1134 | struct drm_device *dev = gpuobj->dev; | ||
1135 | nv_wi32(dev, gpuobj->im_pramin->start + offset, val); | ||
1136 | } | ||