aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2012-07-19 03:54:21 -0400
committerBen Skeggs <bskeggs@redhat.com>2012-10-02 23:12:54 -0400
commitbc9e7b9a61e9e92ddb58920cb2cb5c2e2825ca8a (patch)
treed7384189e54e2d8fa4630c05e2abc1ac7c7900e5
parenta73c5c526a8a39b2e61709c753d44be597c9a4c0 (diff)
drm/nouveau: move some more code around to more appropriate places
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c111
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c332
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c229
4 files changed, 336 insertions, 342 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 9f5696a1fbb..c3e66ae04c8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -36,10 +36,115 @@
36#include <core/mm.h> 36#include <core/mm.h>
37#include "nouveau_fence.h" 37#include "nouveau_fence.h"
38#include <core/ramht.h> 38#include <core/ramht.h>
39#include <engine/fifo.h>
39 40
40#include <linux/log2.h> 41#include <linux/log2.h>
41#include <linux/slab.h> 42#include <linux/slab.h>
42 43
44/*
45 * NV10-NV40 tiling helpers
46 */
47
48static void
49nv10_bo_update_tile_region(struct drm_device *dev,
50 struct nouveau_tile_reg *tilereg, uint32_t addr,
51 uint32_t size, uint32_t pitch, uint32_t flags)
52{
53 struct drm_nouveau_private *dev_priv = dev->dev_private;
54 int i = tilereg - dev_priv->tile.reg, j;
55 struct nouveau_fb_tile *tile = nvfb_tile(dev, i);
56 unsigned long save;
57
58 nouveau_fence_unref(&tilereg->fence);
59
60 if (tile->pitch)
61 nvfb_tile_fini(dev, i);
62
63 if (pitch)
64 nvfb_tile_init(dev, i, addr, size, pitch, flags);
65
66 spin_lock_irqsave(&dev_priv->context_switch_lock, save);
67 nv_wr32(dev, NV03_PFIFO_CACHES, 0);
68 nv04_fifo_cache_pull(dev, false);
69
70 nouveau_wait_for_idle(dev);
71
72 nvfb_tile_prog(dev, i);
73 for (j = 0; j < NVOBJ_ENGINE_NR; j++) {
74 if (dev_priv->eng[j] && dev_priv->eng[j]->set_tile_region)
75 dev_priv->eng[j]->set_tile_region(dev, i);
76 }
77
78 nv04_fifo_cache_pull(dev, true);
79 nv_wr32(dev, NV03_PFIFO_CACHES, 1);
80 spin_unlock_irqrestore(&dev_priv->context_switch_lock, save);
81}
82
83static struct nouveau_tile_reg *
84nv10_bo_get_tile_region(struct drm_device *dev, int i)
85{
86 struct drm_nouveau_private *dev_priv = dev->dev_private;
87 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
88
89 spin_lock(&dev_priv->tile.lock);
90
91 if (!tile->used &&
92 (!tile->fence || nouveau_fence_done(tile->fence)))
93 tile->used = true;
94 else
95 tile = NULL;
96
97 spin_unlock(&dev_priv->tile.lock);
98 return tile;
99}
100
101static void
102nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_tile_reg *tile,
103 struct nouveau_fence *fence)
104{
105 struct drm_nouveau_private *dev_priv = dev->dev_private;
106
107 if (tile) {
108 spin_lock(&dev_priv->tile.lock);
109 if (fence) {
110 /* Mark it as pending. */
111 tile->fence = fence;
112 nouveau_fence_ref(fence);
113 }
114
115 tile->used = false;
116 spin_unlock(&dev_priv->tile.lock);
117 }
118}
119
120static struct nouveau_tile_reg *
121nv10_bo_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
122 uint32_t pitch, uint32_t flags)
123{
124 struct nouveau_tile_reg *tile, *found = NULL;
125 int i;
126
127 for (i = 0; i < nvfb_tile_nr(dev); i++) {
128 tile = nv10_bo_get_tile_region(dev, i);
129
130 if (pitch && !found) {
131 found = tile;
132 continue;
133
134 } else if (tile && nvfb_tile(dev, i)->pitch) {
135 /* Kill an unused tile region. */
136 nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
137 }
138
139 nv10_bo_put_tile_region(dev, tile, NULL);
140 }
141
142 if (found)
143 nv10_bo_update_tile_region(dev, found, addr, size,
144 pitch, flags);
145 return found;
146}
147
43static void 148static void
44nouveau_bo_del_ttm(struct ttm_buffer_object *bo) 149nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
45{ 150{
@@ -50,7 +155,7 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
50 if (unlikely(nvbo->gem)) 155 if (unlikely(nvbo->gem))
51 DRM_ERROR("bo %p still attached to GEM object\n", bo); 156 DRM_ERROR("bo %p still attached to GEM object\n", bo);
52 157
53 nv10_mem_put_tile_region(dev, nvbo->tile, NULL); 158 nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
54 kfree(nvbo); 159 kfree(nvbo);
55} 160}
56 161
@@ -1075,7 +1180,7 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
1075 return 0; 1180 return 0;
1076 1181
1077 if (dev_priv->card_type >= NV_10) { 1182 if (dev_priv->card_type >= NV_10) {
1078 *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size, 1183 *new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size,
1079 nvbo->tile_mode, 1184 nvbo->tile_mode,
1080 nvbo->tile_flags); 1185 nvbo->tile_flags);
1081 } 1186 }
@@ -1091,7 +1196,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
1091 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); 1196 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
1092 struct drm_device *dev = dev_priv->dev; 1197 struct drm_device *dev = dev_priv->dev;
1093 1198
1094 nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj); 1199 nv10_bo_put_tile_region(dev, *old_tile, bo->sync_obj);
1095 *old_tile = new_tile; 1200 *old_tile = new_tile;
1096} 1201}
1097 1202
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 42ea8ad5b91..08ce60be3f3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -596,12 +596,6 @@ extern int nouveau_mem_timing_calc(struct drm_device *, u32 freq,
596extern void nouveau_mem_timing_read(struct drm_device *, 596extern void nouveau_mem_timing_read(struct drm_device *,
597 struct nouveau_pm_memtiming *); 597 struct nouveau_pm_memtiming *);
598extern int nouveau_mem_vbios_type(struct drm_device *); 598extern int nouveau_mem_vbios_type(struct drm_device *);
599extern struct nouveau_tile_reg *nv10_mem_set_tiling(
600 struct drm_device *dev, uint32_t addr, uint32_t size,
601 uint32_t pitch, uint32_t flags);
602extern void nv10_mem_put_tile_region(struct drm_device *dev,
603 struct nouveau_tile_reg *tile,
604 struct nouveau_fence *fence);
605extern const struct ttm_mem_type_manager_func nouveau_vram_manager; 599extern const struct ttm_mem_type_manager_func nouveau_vram_manager;
606extern const struct ttm_mem_type_manager_func nouveau_gart_manager; 600extern const struct ttm_mem_type_manager_func nouveau_gart_manager;
607extern const struct ttm_mem_type_manager_func nv04_gart_manager; 601extern const struct ttm_mem_type_manager_func nv04_gart_manager;
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 48131ceeeb8..73176bcd1b6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -42,110 +42,6 @@
42#include "nouveau_fence.h" 42#include "nouveau_fence.h"
43 43
44/* 44/*
45 * NV10-NV40 tiling helpers
46 */
47
48static void
49nv10_mem_update_tile_region(struct drm_device *dev,
50 struct nouveau_tile_reg *tilereg, uint32_t addr,
51 uint32_t size, uint32_t pitch, uint32_t flags)
52{
53 struct drm_nouveau_private *dev_priv = dev->dev_private;
54 int i = tilereg - dev_priv->tile.reg, j;
55 struct nouveau_fb_tile *tile = nvfb_tile(dev, i);
56 unsigned long save;
57
58 nouveau_fence_unref(&tilereg->fence);
59
60 if (tile->pitch)
61 nvfb_tile_fini(dev, i);
62
63 if (pitch)
64 nvfb_tile_init(dev, i, addr, size, pitch, flags);
65
66 spin_lock_irqsave(&dev_priv->context_switch_lock, save);
67 nv_wr32(dev, NV03_PFIFO_CACHES, 0);
68 nv04_fifo_cache_pull(dev, false);
69
70 nouveau_wait_for_idle(dev);
71
72 nvfb_tile_prog(dev, i);
73 for (j = 0; j < NVOBJ_ENGINE_NR; j++) {
74 if (dev_priv->eng[j] && dev_priv->eng[j]->set_tile_region)
75 dev_priv->eng[j]->set_tile_region(dev, i);
76 }
77
78 nv04_fifo_cache_pull(dev, true);
79 nv_wr32(dev, NV03_PFIFO_CACHES, 1);
80 spin_unlock_irqrestore(&dev_priv->context_switch_lock, save);
81}
82
83static struct nouveau_tile_reg *
84nv10_mem_get_tile_region(struct drm_device *dev, int i)
85{
86 struct drm_nouveau_private *dev_priv = dev->dev_private;
87 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
88
89 spin_lock(&dev_priv->tile.lock);
90
91 if (!tile->used &&
92 (!tile->fence || nouveau_fence_done(tile->fence)))
93 tile->used = true;
94 else
95 tile = NULL;
96
97 spin_unlock(&dev_priv->tile.lock);
98 return tile;
99}
100
101void
102nv10_mem_put_tile_region(struct drm_device *dev, struct nouveau_tile_reg *tile,
103 struct nouveau_fence *fence)
104{
105 struct drm_nouveau_private *dev_priv = dev->dev_private;
106
107 if (tile) {
108 spin_lock(&dev_priv->tile.lock);
109 if (fence) {
110 /* Mark it as pending. */
111 tile->fence = fence;
112 nouveau_fence_ref(fence);
113 }
114
115 tile->used = false;
116 spin_unlock(&dev_priv->tile.lock);
117 }
118}
119
120struct nouveau_tile_reg *
121nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
122 uint32_t pitch, uint32_t flags)
123{
124 struct nouveau_tile_reg *tile, *found = NULL;
125 int i;
126
127 for (i = 0; i < nvfb_tile_nr(dev); i++) {
128 tile = nv10_mem_get_tile_region(dev, i);
129
130 if (pitch && !found) {
131 found = tile;
132 continue;
133
134 } else if (tile && nvfb_tile(dev, i)->pitch) {
135 /* Kill an unused tile region. */
136 nv10_mem_update_tile_region(dev, tile, 0, 0, 0, 0);
137 }
138
139 nv10_mem_put_tile_region(dev, tile, NULL);
140 }
141
142 if (found)
143 nv10_mem_update_tile_region(dev, found, addr, size,
144 pitch, flags);
145 return found;
146}
147
148/*
149 * Cleanup everything 45 * Cleanup everything
150 */ 46 */
151void 47void
@@ -897,231 +793,3 @@ nouveau_mem_vbios_type(struct drm_device *dev)
897 } 793 }
898 return NV_MEM_TYPE_UNKNOWN; 794 return NV_MEM_TYPE_UNKNOWN;
899} 795}
900
901static int
902nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
903{
904 /* nothing to do */
905 return 0;
906}
907
908static int
909nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
910{
911 /* nothing to do */
912 return 0;
913}
914
915static inline void
916nouveau_mem_node_cleanup(struct nouveau_mem *node)
917{
918 if (node->vma[0].node) {
919 nouveau_vm_unmap(&node->vma[0]);
920 nouveau_vm_put(&node->vma[0]);
921 }
922
923 if (node->vma[1].node) {
924 nouveau_vm_unmap(&node->vma[1]);
925 nouveau_vm_put(&node->vma[1]);
926 }
927}
928
929static void
930nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
931 struct ttm_mem_reg *mem)
932{
933 struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
934 struct drm_device *dev = dev_priv->dev;
935
936 nouveau_mem_node_cleanup(mem->mm_node);
937 nvfb_vram_put(dev, (struct nouveau_mem **)&mem->mm_node);
938}
939
940static int
941nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
942 struct ttm_buffer_object *bo,
943 struct ttm_placement *placement,
944 struct ttm_mem_reg *mem)
945{
946 struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
947 struct drm_device *dev = dev_priv->dev;
948 struct nouveau_bo *nvbo = nouveau_bo(bo);
949 struct nouveau_mem *node;
950 u32 size_nc = 0;
951 int ret;
952
953 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
954 size_nc = 1 << nvbo->page_shift;
955
956 ret = nvfb_vram_get(dev, mem->num_pages << PAGE_SHIFT,
957 mem->page_alignment << PAGE_SHIFT, size_nc,
958 (nvbo->tile_flags >> 8) & 0x3ff, &node);
959 if (ret) {
960 mem->mm_node = NULL;
961 return (ret == -ENOSPC) ? 0 : ret;
962 }
963
964 node->page_shift = nvbo->page_shift;
965
966 mem->mm_node = node;
967 mem->start = node->offset >> PAGE_SHIFT;
968 return 0;
969}
970
971void
972nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
973{
974 struct nouveau_mm *mm = man->priv;
975 struct nouveau_mm_node *r;
976 u32 total = 0, free = 0;
977
978 mutex_lock(&mm->mutex);
979 list_for_each_entry(r, &mm->nodes, nl_entry) {
980 printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n",
981 prefix, r->type, ((u64)r->offset << 12),
982 (((u64)r->offset + r->length) << 12));
983
984 total += r->length;
985 if (!r->type)
986 free += r->length;
987 }
988 mutex_unlock(&mm->mutex);
989
990 printk(KERN_DEBUG "%s total: 0x%010llx free: 0x%010llx\n",
991 prefix, (u64)total << 12, (u64)free << 12);
992 printk(KERN_DEBUG "%s block: 0x%08x\n",
993 prefix, mm->block_size << 12);
994}
995
996const struct ttm_mem_type_manager_func nouveau_vram_manager = {
997 nouveau_vram_manager_init,
998 nouveau_vram_manager_fini,
999 nouveau_vram_manager_new,
1000 nouveau_vram_manager_del,
1001 nouveau_vram_manager_debug
1002};
1003
1004static int
1005nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
1006{
1007 return 0;
1008}
1009
1010static int
1011nouveau_gart_manager_fini(struct ttm_mem_type_manager *man)
1012{
1013 return 0;
1014}
1015
1016static void
1017nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
1018 struct ttm_mem_reg *mem)
1019{
1020 nouveau_mem_node_cleanup(mem->mm_node);
1021 kfree(mem->mm_node);
1022 mem->mm_node = NULL;
1023}
1024
1025static int
1026nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
1027 struct ttm_buffer_object *bo,
1028 struct ttm_placement *placement,
1029 struct ttm_mem_reg *mem)
1030{
1031 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
1032 struct nouveau_mem *node;
1033
1034 if (unlikely((mem->num_pages << PAGE_SHIFT) >=
1035 dev_priv->gart_info.aper_size))
1036 return -ENOMEM;
1037
1038 node = kzalloc(sizeof(*node), GFP_KERNEL);
1039 if (!node)
1040 return -ENOMEM;
1041 node->page_shift = 12;
1042
1043 mem->mm_node = node;
1044 mem->start = 0;
1045 return 0;
1046}
1047
1048void
1049nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
1050{
1051}
1052
1053const struct ttm_mem_type_manager_func nouveau_gart_manager = {
1054 nouveau_gart_manager_init,
1055 nouveau_gart_manager_fini,
1056 nouveau_gart_manager_new,
1057 nouveau_gart_manager_del,
1058 nouveau_gart_manager_debug
1059};
1060
1061static int
1062nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
1063{
1064 struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
1065 struct drm_device *dev = dev_priv->dev;
1066 man->priv = nv04vm_ref(dev);
1067 return (man->priv != NULL) ? 0 : -ENODEV;
1068}
1069
1070static int
1071nv04_gart_manager_fini(struct ttm_mem_type_manager *man)
1072{
1073 struct nouveau_vm *vm = man->priv;
1074 nouveau_vm_ref(NULL, &vm, NULL);
1075 man->priv = NULL;
1076 return 0;
1077}
1078
1079static void
1080nv04_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem)
1081{
1082 struct nouveau_mem *node = mem->mm_node;
1083 if (node->vma[0].node)
1084 nouveau_vm_put(&node->vma[0]);
1085 kfree(mem->mm_node);
1086 mem->mm_node = NULL;
1087}
1088
1089static int
1090nv04_gart_manager_new(struct ttm_mem_type_manager *man,
1091 struct ttm_buffer_object *bo,
1092 struct ttm_placement *placement,
1093 struct ttm_mem_reg *mem)
1094{
1095 struct nouveau_mem *node;
1096 int ret;
1097
1098 node = kzalloc(sizeof(*node), GFP_KERNEL);
1099 if (!node)
1100 return -ENOMEM;
1101
1102 node->page_shift = 12;
1103
1104 ret = nouveau_vm_get(man->priv, mem->num_pages << 12, node->page_shift,
1105 NV_MEM_ACCESS_RW, &node->vma[0]);
1106 if (ret) {
1107 kfree(node);
1108 return ret;
1109 }
1110
1111 mem->mm_node = node;
1112 mem->start = node->vma[0].offset >> PAGE_SHIFT;
1113 return 0;
1114}
1115
1116void
1117nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
1118{
1119}
1120
1121const struct ttm_mem_type_manager_func nv04_gart_manager = {
1122 nv04_gart_manager_init,
1123 nv04_gart_manager_fini,
1124 nv04_gart_manager_new,
1125 nv04_gart_manager_del,
1126 nv04_gart_manager_debug
1127};
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index bd35f930568..e729535e9b2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -28,6 +28,234 @@
28 28
29#include "nouveau_drv.h" 29#include "nouveau_drv.h"
30 30
31static int
32nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
33{
34 /* nothing to do */
35 return 0;
36}
37
38static int
39nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
40{
41 /* nothing to do */
42 return 0;
43}
44
45static inline void
46nouveau_mem_node_cleanup(struct nouveau_mem *node)
47{
48 if (node->vma[0].node) {
49 nouveau_vm_unmap(&node->vma[0]);
50 nouveau_vm_put(&node->vma[0]);
51 }
52
53 if (node->vma[1].node) {
54 nouveau_vm_unmap(&node->vma[1]);
55 nouveau_vm_put(&node->vma[1]);
56 }
57}
58
59static void
60nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
61 struct ttm_mem_reg *mem)
62{
63 struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
64 struct drm_device *dev = dev_priv->dev;
65
66 nouveau_mem_node_cleanup(mem->mm_node);
67 nvfb_vram_put(dev, (struct nouveau_mem **)&mem->mm_node);
68}
69
70static int
71nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
72 struct ttm_buffer_object *bo,
73 struct ttm_placement *placement,
74 struct ttm_mem_reg *mem)
75{
76 struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
77 struct drm_device *dev = dev_priv->dev;
78 struct nouveau_bo *nvbo = nouveau_bo(bo);
79 struct nouveau_mem *node;
80 u32 size_nc = 0;
81 int ret;
82
83 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
84 size_nc = 1 << nvbo->page_shift;
85
86 ret = nvfb_vram_get(dev, mem->num_pages << PAGE_SHIFT,
87 mem->page_alignment << PAGE_SHIFT, size_nc,
88 (nvbo->tile_flags >> 8) & 0x3ff, &node);
89 if (ret) {
90 mem->mm_node = NULL;
91 return (ret == -ENOSPC) ? 0 : ret;
92 }
93
94 node->page_shift = nvbo->page_shift;
95
96 mem->mm_node = node;
97 mem->start = node->offset >> PAGE_SHIFT;
98 return 0;
99}
100
101void
102nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
103{
104 struct nouveau_mm *mm = man->priv;
105 struct nouveau_mm_node *r;
106 u32 total = 0, free = 0;
107
108 mutex_lock(&mm->mutex);
109 list_for_each_entry(r, &mm->nodes, nl_entry) {
110 printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n",
111 prefix, r->type, ((u64)r->offset << 12),
112 (((u64)r->offset + r->length) << 12));
113
114 total += r->length;
115 if (!r->type)
116 free += r->length;
117 }
118 mutex_unlock(&mm->mutex);
119
120 printk(KERN_DEBUG "%s total: 0x%010llx free: 0x%010llx\n",
121 prefix, (u64)total << 12, (u64)free << 12);
122 printk(KERN_DEBUG "%s block: 0x%08x\n",
123 prefix, mm->block_size << 12);
124}
125
126const struct ttm_mem_type_manager_func nouveau_vram_manager = {
127 nouveau_vram_manager_init,
128 nouveau_vram_manager_fini,
129 nouveau_vram_manager_new,
130 nouveau_vram_manager_del,
131 nouveau_vram_manager_debug
132};
133
134static int
135nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
136{
137 return 0;
138}
139
140static int
141nouveau_gart_manager_fini(struct ttm_mem_type_manager *man)
142{
143 return 0;
144}
145
146static void
147nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
148 struct ttm_mem_reg *mem)
149{
150 nouveau_mem_node_cleanup(mem->mm_node);
151 kfree(mem->mm_node);
152 mem->mm_node = NULL;
153}
154
155static int
156nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
157 struct ttm_buffer_object *bo,
158 struct ttm_placement *placement,
159 struct ttm_mem_reg *mem)
160{
161 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
162 struct nouveau_mem *node;
163
164 if (unlikely((mem->num_pages << PAGE_SHIFT) >=
165 dev_priv->gart_info.aper_size))
166 return -ENOMEM;
167
168 node = kzalloc(sizeof(*node), GFP_KERNEL);
169 if (!node)
170 return -ENOMEM;
171 node->page_shift = 12;
172
173 mem->mm_node = node;
174 mem->start = 0;
175 return 0;
176}
177
178void
179nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
180{
181}
182
183const struct ttm_mem_type_manager_func nouveau_gart_manager = {
184 nouveau_gart_manager_init,
185 nouveau_gart_manager_fini,
186 nouveau_gart_manager_new,
187 nouveau_gart_manager_del,
188 nouveau_gart_manager_debug
189};
190
191static int
192nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
193{
194 struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
195 struct drm_device *dev = dev_priv->dev;
196 man->priv = nv04vm_ref(dev);
197 return (man->priv != NULL) ? 0 : -ENODEV;
198}
199
200static int
201nv04_gart_manager_fini(struct ttm_mem_type_manager *man)
202{
203 struct nouveau_vm *vm = man->priv;
204 nouveau_vm_ref(NULL, &vm, NULL);
205 man->priv = NULL;
206 return 0;
207}
208
209static void
210nv04_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem)
211{
212 struct nouveau_mem *node = mem->mm_node;
213 if (node->vma[0].node)
214 nouveau_vm_put(&node->vma[0]);
215 kfree(mem->mm_node);
216 mem->mm_node = NULL;
217}
218
219static int
220nv04_gart_manager_new(struct ttm_mem_type_manager *man,
221 struct ttm_buffer_object *bo,
222 struct ttm_placement *placement,
223 struct ttm_mem_reg *mem)
224{
225 struct nouveau_mem *node;
226 int ret;
227
228 node = kzalloc(sizeof(*node), GFP_KERNEL);
229 if (!node)
230 return -ENOMEM;
231
232 node->page_shift = 12;
233
234 ret = nouveau_vm_get(man->priv, mem->num_pages << 12, node->page_shift,
235 NV_MEM_ACCESS_RW, &node->vma[0]);
236 if (ret) {
237 kfree(node);
238 return ret;
239 }
240
241 mem->mm_node = node;
242 mem->start = node->vma[0].offset >> PAGE_SHIFT;
243 return 0;
244}
245
246void
247nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
248{
249}
250
251const struct ttm_mem_type_manager_func nv04_gart_manager = {
252 nv04_gart_manager_init,
253 nv04_gart_manager_fini,
254 nv04_gart_manager_new,
255 nv04_gart_manager_del,
256 nv04_gart_manager_debug
257};
258
31int 259int
32nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma) 260nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
33{ 261{
@@ -100,4 +328,3 @@ nouveau_ttm_global_release(struct drm_nouveau_private *dev_priv)
100 drm_global_item_unref(&dev_priv->ttm.mem_global_ref); 328 drm_global_item_unref(&dev_priv->ttm.mem_global_ref);
101 dev_priv->ttm.mem_global_ref.release = NULL; 329 dev_priv->ttm.mem_global_ref.release = NULL;
102} 330}
103