aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nouveau_object.c
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2010-11-14 20:54:21 -0500
committerBen Skeggs <bskeggs@redhat.com>2010-12-07 22:48:13 -0500
commit4c1361429841344ce4d164492ee7620cf3286eb7 (patch)
tree7cd23e9e99299b3265b2e59d49e3aa5b77a465f0 /drivers/gpu/drm/nouveau/nouveau_object.c
parentf869ef882382a4b6cb42d259e399aeec3781d4bb (diff)
drm/nv50: implement global channel address space on new VM code
As of this commit, it's guaranteed that if an object is in VRAM that its GPU virtual address will be constant. Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_object.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c47
1 files changed, 10 insertions, 37 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index dd1859f7d8b0..573fd7316d63 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -35,6 +35,7 @@
35#include "nouveau_drv.h" 35#include "nouveau_drv.h"
36#include "nouveau_drm.h" 36#include "nouveau_drm.h"
37#include "nouveau_ramht.h" 37#include "nouveau_ramht.h"
38#include "nouveau_vm.h"
38 39
39struct nouveau_gpuobj_method { 40struct nouveau_gpuobj_method {
40 struct list_head head; 41 struct list_head head;
@@ -770,9 +771,8 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
770{ 771{
771 struct drm_device *dev = chan->dev; 772 struct drm_device *dev = chan->dev;
772 struct drm_nouveau_private *dev_priv = dev->dev_private; 773 struct drm_nouveau_private *dev_priv = dev->dev_private;
773 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
774 struct nouveau_gpuobj *vram = NULL, *tt = NULL; 774 struct nouveau_gpuobj *vram = NULL, *tt = NULL;
775 int ret, i; 775 int ret;
776 776
777 NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h); 777 NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
778 778
@@ -783,16 +783,14 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
783 return ret; 783 return ret;
784 } 784 }
785 785
786 /* NV50 VM 786 /* NV50/NVC0 VM
787 * - Allocate per-channel page-directory 787 * - Allocate per-channel page-directory
788 * - Map GART and VRAM into the channel's address space at the 788 * - Link with shared channel VM
789 * locations determined during init.
790 */ 789 */
791 if (dev_priv->card_type >= NV_50) { 790 if (dev_priv->chan_vm) {
792 u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200; 791 u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
793 u64 vm_vinst = chan->ramin->vinst + pgd_offs; 792 u64 vm_vinst = chan->ramin->vinst + pgd_offs;
794 u32 vm_pinst = chan->ramin->pinst; 793 u32 vm_pinst = chan->ramin->pinst;
795 u32 pde;
796 794
797 if (vm_pinst != ~0) 795 if (vm_pinst != ~0)
798 vm_pinst += pgd_offs; 796 vm_pinst += pgd_offs;
@@ -801,29 +799,9 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
801 0, &chan->vm_pd); 799 0, &chan->vm_pd);
802 if (ret) 800 if (ret)
803 return ret; 801 return ret;
804 for (i = 0; i < 0x4000; i += 8) {
805 nv_wo32(chan->vm_pd, i + 0, 0x00000000);
806 nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe);
807 }
808
809 nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma,
810 &chan->vm_gart_pt);
811 pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 8;
812 nv_wo32(chan->vm_pd, pde + 0, chan->vm_gart_pt->vinst | 3);
813 nv_wo32(chan->vm_pd, pde + 4, 0x00000000);
814
815 pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 8;
816 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
817 nouveau_gpuobj_ref(dev_priv->vm_vram_pt[i],
818 &chan->vm_vram_pt[i]);
819
820 nv_wo32(chan->vm_pd, pde + 0,
821 chan->vm_vram_pt[i]->vinst | 0x61);
822 nv_wo32(chan->vm_pd, pde + 4, 0x00000000);
823 pde += 8;
824 }
825 802
826 instmem->flush(dev); 803 nouveau_vm_ref(dev_priv->chan_vm, &chan->vm, chan->vm_pd);
804 chan->vm->map_pgt(chan->vm_pd, 12, 1, dev_priv->gart_info.sg_ctxdma);
827 } 805 }
828 806
829 /* RAMHT */ 807 /* RAMHT */
@@ -846,8 +824,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
846 /* VRAM ctxdma */ 824 /* VRAM ctxdma */
847 if (dev_priv->card_type >= NV_50) { 825 if (dev_priv->card_type >= NV_50) {
848 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 826 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
849 0, dev_priv->vm_end, 827 0, (1ULL << 40), NV_MEM_ACCESS_RW,
850 NV_MEM_ACCESS_RW,
851 NV_MEM_TARGET_VM, &vram); 828 NV_MEM_TARGET_VM, &vram);
852 if (ret) { 829 if (ret) {
853 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret); 830 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
@@ -874,8 +851,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
874 /* TT memory ctxdma */ 851 /* TT memory ctxdma */
875 if (dev_priv->card_type >= NV_50) { 852 if (dev_priv->card_type >= NV_50) {
876 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 853 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
877 0, dev_priv->vm_end, 854 0, (1ULL << 40), NV_MEM_ACCESS_RW,
878 NV_MEM_ACCESS_RW,
879 NV_MEM_TARGET_VM, &tt); 855 NV_MEM_TARGET_VM, &tt);
880 } else { 856 } else {
881 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 857 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
@@ -902,9 +878,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
902void 878void
903nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan) 879nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
904{ 880{
905 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
906 struct drm_device *dev = chan->dev; 881 struct drm_device *dev = chan->dev;
907 int i;
908 882
909 NV_DEBUG(dev, "ch%d\n", chan->id); 883 NV_DEBUG(dev, "ch%d\n", chan->id);
910 884
@@ -913,10 +887,9 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
913 887
914 nouveau_ramht_ref(NULL, &chan->ramht, chan); 888 nouveau_ramht_ref(NULL, &chan->ramht, chan);
915 889
890 nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
916 nouveau_gpuobj_ref(NULL, &chan->vm_pd); 891 nouveau_gpuobj_ref(NULL, &chan->vm_pd);
917 nouveau_gpuobj_ref(NULL, &chan->vm_gart_pt); 892 nouveau_gpuobj_ref(NULL, &chan->vm_gart_pt);
918 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++)
919 nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]);
920 893
921 if (chan->ramin_heap.free_stack.next) 894 if (chan->ramin_heap.free_stack.next)
922 drm_mm_takedown(&chan->ramin_heap); 895 drm_mm_takedown(&chan->ramin_heap);