aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2011-02-09 21:22:52 -0500
committerBen Skeggs <bskeggs@redhat.com>2011-02-24 15:45:55 -0500
commitd5f423947a11103c43ad26ebb680d049c2d8edd6 (patch)
tree8ca591a7ea0de3c957f34d0e63f8b9f0a5addaa0
parentb5e2f0769a64046cefbfc307cbe6f7fa40dddf10 (diff)
drm/nouveau: rename nouveau_vram to nouveau_mem
This structure will also be used for GART in the near future. Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c26
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mm.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.h10
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vram.c42
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_vm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_vram.c26
11 files changed, 70 insertions, 70 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 4e74957ef2c..dcb1d72f3dd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -509,7 +509,7 @@ nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
509 509
510 src_offset = old_mem->start << PAGE_SHIFT; 510 src_offset = old_mem->start << PAGE_SHIFT;
511 if (old_mem->mem_type == TTM_PL_VRAM) { 511 if (old_mem->mem_type == TTM_PL_VRAM) {
512 struct nouveau_vram *node = old_mem->mm_node; 512 struct nouveau_mem *node = old_mem->mm_node;
513 src_offset = node->tmp_vma.offset; 513 src_offset = node->tmp_vma.offset;
514 } else { 514 } else {
515 src_offset += dev_priv->gart_info.aper_base; 515 src_offset += dev_priv->gart_info.aper_base;
@@ -562,7 +562,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
562 562
563 src_offset = old_mem->start << PAGE_SHIFT; 563 src_offset = old_mem->start << PAGE_SHIFT;
564 if (old_mem->mem_type == TTM_PL_VRAM) { 564 if (old_mem->mem_type == TTM_PL_VRAM) {
565 struct nouveau_vram *node = old_mem->mm_node; 565 struct nouveau_mem *node = old_mem->mm_node;
566 src_offset = node->tmp_vma.offset; 566 src_offset = node->tmp_vma.offset;
567 } else { 567 } else {
568 src_offset += dev_priv->gart_info.aper_base; 568 src_offset += dev_priv->gart_info.aper_base;
@@ -729,7 +729,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
729 * up after ttm destroys the ttm_mem_reg 729 * up after ttm destroys the ttm_mem_reg
730 */ 730 */
731 if (dev_priv->card_type >= NV_50 && old_mem->mem_type == TTM_PL_VRAM) { 731 if (dev_priv->card_type >= NV_50 && old_mem->mem_type == TTM_PL_VRAM) {
732 struct nouveau_vram *node = old_mem->mm_node; 732 struct nouveau_mem *node = old_mem->mm_node;
733 733
734 ret = nouveau_vm_get(chan->vm, old_mem->num_pages << PAGE_SHIFT, 734 ret = nouveau_vm_get(chan->vm, old_mem->num_pages << PAGE_SHIFT,
735 nvbo->vma.node->type, NV_MEM_ACCESS_RO, 735 nvbo->vma.node->type, NV_MEM_ACCESS_RO,
@@ -972,7 +972,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
972 break; 972 break;
973 case TTM_PL_VRAM: 973 case TTM_PL_VRAM:
974 { 974 {
975 struct nouveau_vram *vram = mem->mm_node; 975 struct nouveau_mem *node = mem->mm_node;
976 u8 page_shift; 976 u8 page_shift;
977 977
978 if (!dev_priv->bar1_vm) { 978 if (!dev_priv->bar1_vm) {
@@ -983,23 +983,23 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
983 } 983 }
984 984
985 if (dev_priv->card_type == NV_C0) 985 if (dev_priv->card_type == NV_C0)
986 page_shift = vram->page_shift; 986 page_shift = node->page_shift;
987 else 987 else
988 page_shift = 12; 988 page_shift = 12;
989 989
990 ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size, 990 ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size,
991 page_shift, NV_MEM_ACCESS_RW, 991 page_shift, NV_MEM_ACCESS_RW,
992 &vram->bar_vma); 992 &node->bar_vma);
993 if (ret) 993 if (ret)
994 return ret; 994 return ret;
995 995
996 nouveau_vm_map(&vram->bar_vma, vram); 996 nouveau_vm_map(&node->bar_vma, node);
997 if (ret) { 997 if (ret) {
998 nouveau_vm_put(&vram->bar_vma); 998 nouveau_vm_put(&node->bar_vma);
999 return ret; 999 return ret;
1000 } 1000 }
1001 1001
1002 mem->bus.offset = vram->bar_vma.offset; 1002 mem->bus.offset = node->bar_vma.offset;
1003 if (dev_priv->card_type == NV_50) /*XXX*/ 1003 if (dev_priv->card_type == NV_50) /*XXX*/
1004 mem->bus.offset -= 0x0020000000ULL; 1004 mem->bus.offset -= 0x0020000000ULL;
1005 mem->bus.base = pci_resource_start(dev->pdev, 1); 1005 mem->bus.base = pci_resource_start(dev->pdev, 1);
@@ -1016,16 +1016,16 @@ static void
1016nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 1016nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1017{ 1017{
1018 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); 1018 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
1019 struct nouveau_vram *vram = mem->mm_node; 1019 struct nouveau_mem *node = mem->mm_node;
1020 1020
1021 if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM) 1021 if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM)
1022 return; 1022 return;
1023 1023
1024 if (!vram->bar_vma.node) 1024 if (!node->bar_vma.node)
1025 return; 1025 return;
1026 1026
1027 nouveau_vm_unmap(&vram->bar_vma); 1027 nouveau_vm_unmap(&node->bar_vma);
1028 nouveau_vm_put(&vram->bar_vma); 1028 nouveau_vm_put(&node->bar_vma);
1029} 1029}
1030 1030
1031static int 1031static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 45609ee447b..73cf214ba8c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -57,7 +57,7 @@ struct nouveau_fpriv {
57#include "nouveau_util.h" 57#include "nouveau_util.h"
58 58
59struct nouveau_grctx; 59struct nouveau_grctx;
60struct nouveau_vram; 60struct nouveau_mem;
61#include "nouveau_vm.h" 61#include "nouveau_vm.h"
62 62
63#define MAX_NUM_DCB_ENTRIES 16 63#define MAX_NUM_DCB_ENTRIES 16
@@ -65,7 +65,7 @@ struct nouveau_vram;
65#define NOUVEAU_MAX_CHANNEL_NR 128 65#define NOUVEAU_MAX_CHANNEL_NR 128
66#define NOUVEAU_MAX_TILE_NR 15 66#define NOUVEAU_MAX_TILE_NR 15
67 67
68struct nouveau_vram { 68struct nouveau_mem {
69 struct drm_device *dev; 69 struct drm_device *dev;
70 70
71 struct nouveau_vma bar_vma; 71 struct nouveau_vma bar_vma;
@@ -510,8 +510,8 @@ struct nouveau_crypt_engine {
510struct nouveau_vram_engine { 510struct nouveau_vram_engine {
511 int (*init)(struct drm_device *); 511 int (*init)(struct drm_device *);
512 int (*get)(struct drm_device *, u64, u32 align, u32 size_nc, 512 int (*get)(struct drm_device *, u64, u32 align, u32 size_nc,
513 u32 type, struct nouveau_vram **); 513 u32 type, struct nouveau_mem **);
514 void (*put)(struct drm_device *, struct nouveau_vram **); 514 void (*put)(struct drm_device *, struct nouveau_mem **);
515 515
516 bool (*flags_valid)(struct drm_device *, u32 tile_flags); 516 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
517}; 517};
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index b90383fd18f..ff5fe28b467 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -710,7 +710,7 @@ nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
710{ 710{
711 struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev); 711 struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
712 struct nouveau_vram_engine *vram = &dev_priv->engine.vram; 712 struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
713 struct nouveau_vram *node = mem->mm_node; 713 struct nouveau_mem *node = mem->mm_node;
714 struct drm_device *dev = dev_priv->dev; 714 struct drm_device *dev = dev_priv->dev;
715 715
716 if (node->tmp_vma.node) { 716 if (node->tmp_vma.node) {
@@ -718,7 +718,7 @@ nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
718 nouveau_vm_put(&node->tmp_vma); 718 nouveau_vm_put(&node->tmp_vma);
719 } 719 }
720 720
721 vram->put(dev, (struct nouveau_vram **)&mem->mm_node); 721 vram->put(dev, (struct nouveau_mem **)&mem->mm_node);
722} 722}
723 723
724static int 724static int
@@ -731,7 +731,7 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
731 struct nouveau_vram_engine *vram = &dev_priv->engine.vram; 731 struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
732 struct drm_device *dev = dev_priv->dev; 732 struct drm_device *dev = dev_priv->dev;
733 struct nouveau_bo *nvbo = nouveau_bo(bo); 733 struct nouveau_bo *nvbo = nouveau_bo(bo);
734 struct nouveau_vram *node; 734 struct nouveau_mem *node;
735 u32 size_nc = 0; 735 u32 size_nc = 0;
736 int ret; 736 int ret;
737 737
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.h b/drivers/gpu/drm/nouveau/nouveau_mm.h
index 798eaf39691..1f7483aae9a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.h
@@ -53,13 +53,13 @@ void nouveau_mm_put(struct nouveau_mm *, struct nouveau_mm_node *);
53 53
54int nv50_vram_init(struct drm_device *); 54int nv50_vram_init(struct drm_device *);
55int nv50_vram_new(struct drm_device *, u64 size, u32 align, u32 size_nc, 55int nv50_vram_new(struct drm_device *, u64 size, u32 align, u32 size_nc,
56 u32 memtype, struct nouveau_vram **); 56 u32 memtype, struct nouveau_mem **);
57void nv50_vram_del(struct drm_device *, struct nouveau_vram **); 57void nv50_vram_del(struct drm_device *, struct nouveau_mem **);
58bool nv50_vram_flags_valid(struct drm_device *, u32 tile_flags); 58bool nv50_vram_flags_valid(struct drm_device *, u32 tile_flags);
59 59
60int nvc0_vram_init(struct drm_device *); 60int nvc0_vram_init(struct drm_device *);
61int nvc0_vram_new(struct drm_device *, u64 size, u32 align, u32 ncmin, 61int nvc0_vram_new(struct drm_device *, u64 size, u32 align, u32 ncmin,
62 u32 memtype, struct nouveau_vram **); 62 u32 memtype, struct nouveau_mem **);
63bool nvc0_vram_flags_valid(struct drm_device *, u32 tile_flags); 63bool nvc0_vram_flags_valid(struct drm_device *, u32 tile_flags);
64 64
65#endif 65#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.c b/drivers/gpu/drm/nouveau/nouveau_vm.c
index 97d82aedf86..eeaecc3743c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.c
@@ -28,7 +28,7 @@
28#include "nouveau_vm.h" 28#include "nouveau_vm.h"
29 29
30void 30void
31nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram) 31nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node)
32{ 32{
33 struct nouveau_vm *vm = vma->vm; 33 struct nouveau_vm *vm = vma->vm;
34 struct nouveau_mm_node *r; 34 struct nouveau_mm_node *r;
@@ -40,7 +40,7 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
40 u32 max = 1 << (vm->pgt_bits - bits); 40 u32 max = 1 << (vm->pgt_bits - bits);
41 u32 end, len; 41 u32 end, len;
42 42
43 list_for_each_entry(r, &vram->regions, rl_entry) { 43 list_for_each_entry(r, &node->regions, rl_entry) {
44 u64 phys = (u64)r->offset << 12; 44 u64 phys = (u64)r->offset << 12;
45 u32 num = r->length >> bits; 45 u32 num = r->length >> bits;
46 46
@@ -52,7 +52,7 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
52 end = max; 52 end = max;
53 len = end - pte; 53 len = end - pte;
54 54
55 vm->map(vma, pgt, vram, pte, len, phys); 55 vm->map(vma, pgt, node, pte, len, phys);
56 56
57 num -= len; 57 num -= len;
58 pte += len; 58 pte += len;
@@ -67,9 +67,9 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
67} 67}
68 68
69void 69void
70nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_vram *vram) 70nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node)
71{ 71{
72 nouveau_vm_map_at(vma, 0, vram); 72 nouveau_vm_map_at(vma, 0, node);
73} 73}
74 74
75void 75void
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.h b/drivers/gpu/drm/nouveau/nouveau_vm.h
index e1193515771..ace7269b89f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.h
@@ -67,7 +67,7 @@ struct nouveau_vm {
67 void (*map_pgt)(struct nouveau_gpuobj *pgd, u32 pde, 67 void (*map_pgt)(struct nouveau_gpuobj *pgd, u32 pde,
68 struct nouveau_gpuobj *pgt[2]); 68 struct nouveau_gpuobj *pgt[2]);
69 void (*map)(struct nouveau_vma *, struct nouveau_gpuobj *, 69 void (*map)(struct nouveau_vma *, struct nouveau_gpuobj *,
70 struct nouveau_vram *, u32 pte, u32 cnt, u64 phys); 70 struct nouveau_mem *, u32 pte, u32 cnt, u64 phys);
71 void (*map_sg)(struct nouveau_vma *, struct nouveau_gpuobj *, 71 void (*map_sg)(struct nouveau_vma *, struct nouveau_gpuobj *,
72 u32 pte, dma_addr_t *, u32 cnt); 72 u32 pte, dma_addr_t *, u32 cnt);
73 void (*unmap)(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt); 73 void (*unmap)(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt);
@@ -82,8 +82,8 @@ int nouveau_vm_ref(struct nouveau_vm *, struct nouveau_vm **,
82int nouveau_vm_get(struct nouveau_vm *, u64 size, u32 page_shift, 82int nouveau_vm_get(struct nouveau_vm *, u64 size, u32 page_shift,
83 u32 access, struct nouveau_vma *); 83 u32 access, struct nouveau_vma *);
84void nouveau_vm_put(struct nouveau_vma *); 84void nouveau_vm_put(struct nouveau_vma *);
85void nouveau_vm_map(struct nouveau_vma *, struct nouveau_vram *); 85void nouveau_vm_map(struct nouveau_vma *, struct nouveau_mem *);
86void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_vram *); 86void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_mem *);
87void nouveau_vm_unmap(struct nouveau_vma *); 87void nouveau_vm_unmap(struct nouveau_vma *);
88void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length); 88void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length);
89void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length, 89void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length,
@@ -93,7 +93,7 @@ void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length,
93void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde, 93void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
94 struct nouveau_gpuobj *pgt[2]); 94 struct nouveau_gpuobj *pgt[2]);
95void nv50_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *, 95void nv50_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
96 struct nouveau_vram *, u32 pte, u32 cnt, u64 phys); 96 struct nouveau_mem *, u32 pte, u32 cnt, u64 phys);
97void nv50_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *, 97void nv50_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
98 u32 pte, dma_addr_t *, u32 cnt); 98 u32 pte, dma_addr_t *, u32 cnt);
99void nv50_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt); 99void nv50_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
@@ -104,7 +104,7 @@ void nv50_vm_flush_engine(struct drm_device *, int engine);
104void nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde, 104void nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
105 struct nouveau_gpuobj *pgt[2]); 105 struct nouveau_gpuobj *pgt[2]);
106void nvc0_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *, 106void nvc0_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
107 struct nouveau_vram *, u32 pte, u32 cnt, u64 phys); 107 struct nouveau_mem *, u32 pte, u32 cnt, u64 phys);
108void nvc0_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *, 108void nvc0_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
109 u32 pte, dma_addr_t *, u32 cnt); 109 u32 pte, dma_addr_t *, u32 cnt);
110void nvc0_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt); 110void nvc0_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index 300285ae8e9..306d4b1f585 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -300,7 +300,7 @@ nv50_instmem_resume(struct drm_device *dev)
300} 300}
301 301
302struct nv50_gpuobj_node { 302struct nv50_gpuobj_node {
303 struct nouveau_vram *vram; 303 struct nouveau_mem *vram;
304 struct nouveau_vma chan_vma; 304 struct nouveau_vma chan_vma;
305 u32 align; 305 u32 align;
306}; 306};
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c
index 03c1a63b24f..d5e03a4a8ea 100644
--- a/drivers/gpu/drm/nouveau/nv50_vm.c
+++ b/drivers/gpu/drm/nouveau/nv50_vm.c
@@ -84,7 +84,7 @@ nv50_vm_addr(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
84 84
85void 85void
86nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, 86nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
87 struct nouveau_vram *mem, u32 pte, u32 cnt, u64 phys) 87 struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys)
88{ 88{
89 u32 block; 89 u32 block;
90 int i; 90 int i;
diff --git a/drivers/gpu/drm/nouveau/nv50_vram.c b/drivers/gpu/drm/nouveau/nv50_vram.c
index 58e98ad3634..ff6cbae40e5 100644
--- a/drivers/gpu/drm/nouveau/nv50_vram.c
+++ b/drivers/gpu/drm/nouveau/nv50_vram.c
@@ -48,42 +48,42 @@ nv50_vram_flags_valid(struct drm_device *dev, u32 tile_flags)
48} 48}
49 49
50void 50void
51nv50_vram_del(struct drm_device *dev, struct nouveau_vram **pvram) 51nv50_vram_del(struct drm_device *dev, struct nouveau_mem **pmem)
52{ 52{
53 struct drm_nouveau_private *dev_priv = dev->dev_private; 53 struct drm_nouveau_private *dev_priv = dev->dev_private;
54 struct ttm_bo_device *bdev = &dev_priv->ttm.bdev; 54 struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
55 struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM]; 55 struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
56 struct nouveau_mm *mm = man->priv; 56 struct nouveau_mm *mm = man->priv;
57 struct nouveau_mm_node *this; 57 struct nouveau_mm_node *this;
58 struct nouveau_vram *vram; 58 struct nouveau_mem *mem;
59 59
60 vram = *pvram; 60 mem = *pmem;
61 *pvram = NULL; 61 *pmem = NULL;
62 if (unlikely(vram == NULL)) 62 if (unlikely(mem == NULL))
63 return; 63 return;
64 64
65 mutex_lock(&mm->mutex); 65 mutex_lock(&mm->mutex);
66 while (!list_empty(&vram->regions)) { 66 while (!list_empty(&mem->regions)) {
67 this = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry); 67 this = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
68 68
69 list_del(&this->rl_entry); 69 list_del(&this->rl_entry);
70 nouveau_mm_put(mm, this); 70 nouveau_mm_put(mm, this);
71 } 71 }
72 mutex_unlock(&mm->mutex); 72 mutex_unlock(&mm->mutex);
73 73
74 kfree(vram); 74 kfree(mem);
75} 75}
76 76
77int 77int
78nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc, 78nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc,
79 u32 type, struct nouveau_vram **pvram) 79 u32 type, struct nouveau_mem **pmem)
80{ 80{
81 struct drm_nouveau_private *dev_priv = dev->dev_private; 81 struct drm_nouveau_private *dev_priv = dev->dev_private;
82 struct ttm_bo_device *bdev = &dev_priv->ttm.bdev; 82 struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
83 struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM]; 83 struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
84 struct nouveau_mm *mm = man->priv; 84 struct nouveau_mm *mm = man->priv;
85 struct nouveau_mm_node *r; 85 struct nouveau_mm_node *r;
86 struct nouveau_vram *vram; 86 struct nouveau_mem *mem;
87 int ret; 87 int ret;
88 88
89 if (!types[type]) 89 if (!types[type])
@@ -92,32 +92,32 @@ nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc,
92 align >>= 12; 92 align >>= 12;
93 size_nc >>= 12; 93 size_nc >>= 12;
94 94
95 vram = kzalloc(sizeof(*vram), GFP_KERNEL); 95 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
96 if (!vram) 96 if (!mem)
97 return -ENOMEM; 97 return -ENOMEM;
98 98
99 INIT_LIST_HEAD(&vram->regions); 99 INIT_LIST_HEAD(&mem->regions);
100 vram->dev = dev_priv->dev; 100 mem->dev = dev_priv->dev;
101 vram->memtype = type; 101 mem->memtype = type;
102 vram->size = size; 102 mem->size = size;
103 103
104 mutex_lock(&mm->mutex); 104 mutex_lock(&mm->mutex);
105 do { 105 do {
106 ret = nouveau_mm_get(mm, types[type], size, size_nc, align, &r); 106 ret = nouveau_mm_get(mm, types[type], size, size_nc, align, &r);
107 if (ret) { 107 if (ret) {
108 mutex_unlock(&mm->mutex); 108 mutex_unlock(&mm->mutex);
109 nv50_vram_del(dev, &vram); 109 nv50_vram_del(dev, &mem);
110 return ret; 110 return ret;
111 } 111 }
112 112
113 list_add_tail(&r->rl_entry, &vram->regions); 113 list_add_tail(&r->rl_entry, &mem->regions);
114 size -= r->length; 114 size -= r->length;
115 } while (size); 115 } while (size);
116 mutex_unlock(&mm->mutex); 116 mutex_unlock(&mm->mutex);
117 117
118 r = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry); 118 r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
119 vram->offset = (u64)r->offset << 12; 119 mem->offset = (u64)r->offset << 12;
120 *pvram = vram; 120 *pmem = mem;
121 return 0; 121 return 0;
122} 122}
123 123
diff --git a/drivers/gpu/drm/nouveau/nvc0_vm.c b/drivers/gpu/drm/nouveau/nvc0_vm.c
index e4e83c2caf5..2a06cb86312 100644
--- a/drivers/gpu/drm/nouveau/nvc0_vm.c
+++ b/drivers/gpu/drm/nouveau/nvc0_vm.c
@@ -59,7 +59,7 @@ nvc0_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target)
59 59
60void 60void
61nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, 61nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
62 struct nouveau_vram *mem, u32 pte, u32 cnt, u64 phys) 62 struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys)
63{ 63{
64 u32 next = 1 << (vma->node->type - 8); 64 u32 next = 1 << (vma->node->type - 8);
65 65
diff --git a/drivers/gpu/drm/nouveau/nvc0_vram.c b/drivers/gpu/drm/nouveau/nvc0_vram.c
index b2ef210ae54..6d777a2a04d 100644
--- a/drivers/gpu/drm/nouveau/nvc0_vram.c
+++ b/drivers/gpu/drm/nouveau/nvc0_vram.c
@@ -58,46 +58,46 @@ nvc0_vram_flags_valid(struct drm_device *dev, u32 tile_flags)
58 58
59int 59int
60nvc0_vram_new(struct drm_device *dev, u64 size, u32 align, u32 ncmin, 60nvc0_vram_new(struct drm_device *dev, u64 size, u32 align, u32 ncmin,
61 u32 type, struct nouveau_vram **pvram) 61 u32 type, struct nouveau_mem **pmem)
62{ 62{
63 struct drm_nouveau_private *dev_priv = dev->dev_private; 63 struct drm_nouveau_private *dev_priv = dev->dev_private;
64 struct ttm_bo_device *bdev = &dev_priv->ttm.bdev; 64 struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
65 struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM]; 65 struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
66 struct nouveau_mm *mm = man->priv; 66 struct nouveau_mm *mm = man->priv;
67 struct nouveau_mm_node *r; 67 struct nouveau_mm_node *r;
68 struct nouveau_vram *vram; 68 struct nouveau_mem *mem;
69 int ret; 69 int ret;
70 70
71 size >>= 12; 71 size >>= 12;
72 align >>= 12; 72 align >>= 12;
73 ncmin >>= 12; 73 ncmin >>= 12;
74 74
75 vram = kzalloc(sizeof(*vram), GFP_KERNEL); 75 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
76 if (!vram) 76 if (!mem)
77 return -ENOMEM; 77 return -ENOMEM;
78 78
79 INIT_LIST_HEAD(&vram->regions); 79 INIT_LIST_HEAD(&mem->regions);
80 vram->dev = dev_priv->dev; 80 mem->dev = dev_priv->dev;
81 vram->memtype = type; 81 mem->memtype = type;
82 vram->size = size; 82 mem->size = size;
83 83
84 mutex_lock(&mm->mutex); 84 mutex_lock(&mm->mutex);
85 do { 85 do {
86 ret = nouveau_mm_get(mm, 1, size, ncmin, align, &r); 86 ret = nouveau_mm_get(mm, 1, size, ncmin, align, &r);
87 if (ret) { 87 if (ret) {
88 mutex_unlock(&mm->mutex); 88 mutex_unlock(&mm->mutex);
89 nv50_vram_del(dev, &vram); 89 nv50_vram_del(dev, &mem);
90 return ret; 90 return ret;
91 } 91 }
92 92
93 list_add_tail(&r->rl_entry, &vram->regions); 93 list_add_tail(&r->rl_entry, &mem->regions);
94 size -= r->length; 94 size -= r->length;
95 } while (size); 95 } while (size);
96 mutex_unlock(&mm->mutex); 96 mutex_unlock(&mm->mutex);
97 97
98 r = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry); 98 r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
99 vram->offset = (u64)r->offset << 12; 99 mem->offset = (u64)r->offset << 12;
100 *pvram = vram; 100 *pmem = mem;
101 return 0; 101 return 0;
102} 102}
103 103