aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2010-12-14 20:04:39 -0500
committerBen Skeggs <bskeggs@redhat.com>2010-12-21 02:17:01 -0500
commit3ee0128140eed7d32b785a335099a2ec38258283 (patch)
tree073b55ee965a27e521ad4d4ae6ac7deecbb903c9 /drivers/gpu
parent147dc38e976f4dd6d888d585649e724a3e82a9b2 (diff)
drm/nouveau: modify vm to accomodate dual page tables for nvc0
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.c59
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.h16
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c16
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vm.c49
4 files changed, 65 insertions, 75 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.c b/drivers/gpu/drm/nouveau/nouveau_vm.c
index 07ab1749cf7d..b023a64c27d8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.c
@@ -32,6 +32,7 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
32{ 32{
33 struct nouveau_vm *vm = vma->vm; 33 struct nouveau_vm *vm = vma->vm;
34 struct nouveau_mm_node *r; 34 struct nouveau_mm_node *r;
35 int big = vma->node->type != vm->spg_shift;
35 u32 offset = vma->node->offset + (delta >> 12); 36 u32 offset = vma->node->offset + (delta >> 12);
36 u32 bits = vma->node->type - 12; 37 u32 bits = vma->node->type - 12;
37 u32 pde = (offset >> vm->pgt_bits) - vm->fpde; 38 u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
@@ -44,7 +45,7 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
44 u32 num = r->length >> bits; 45 u32 num = r->length >> bits;
45 46
46 while (num) { 47 while (num) {
47 struct nouveau_gpuobj *pgt = vm->pgt[pde].obj; 48 struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
48 49
49 end = (pte + num); 50 end = (pte + num);
50 if (unlikely(end >= max)) 51 if (unlikely(end >= max))
@@ -76,6 +77,7 @@ nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
76 dma_addr_t *list) 77 dma_addr_t *list)
77{ 78{
78 struct nouveau_vm *vm = vma->vm; 79 struct nouveau_vm *vm = vma->vm;
80 int big = vma->node->type != vm->spg_shift;
79 u32 offset = vma->node->offset + (delta >> 12); 81 u32 offset = vma->node->offset + (delta >> 12);
80 u32 bits = vma->node->type - 12; 82 u32 bits = vma->node->type - 12;
81 u32 num = length >> vma->node->type; 83 u32 num = length >> vma->node->type;
@@ -85,7 +87,7 @@ nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
85 u32 end, len; 87 u32 end, len;
86 88
87 while (num) { 89 while (num) {
88 struct nouveau_gpuobj *pgt = vm->pgt[pde].obj; 90 struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
89 91
90 end = (pte + num); 92 end = (pte + num);
91 if (unlikely(end >= max)) 93 if (unlikely(end >= max))
@@ -110,6 +112,7 @@ void
110nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length) 112nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
111{ 113{
112 struct nouveau_vm *vm = vma->vm; 114 struct nouveau_vm *vm = vma->vm;
115 int big = vma->node->type != vm->spg_shift;
113 u32 offset = vma->node->offset + (delta >> 12); 116 u32 offset = vma->node->offset + (delta >> 12);
114 u32 bits = vma->node->type - 12; 117 u32 bits = vma->node->type - 12;
115 u32 num = length >> vma->node->type; 118 u32 num = length >> vma->node->type;
@@ -119,7 +122,7 @@ nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
119 u32 end, len; 122 u32 end, len;
120 123
121 while (num) { 124 while (num) {
122 struct nouveau_gpuobj *pgt = vm->pgt[pde].obj; 125 struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
123 126
124 end = (pte + num); 127 end = (pte + num);
125 if (unlikely(end >= max)) 128 if (unlikely(end >= max))
@@ -146,7 +149,7 @@ nouveau_vm_unmap(struct nouveau_vma *vma)
146} 149}
147 150
148static void 151static void
149nouveau_vm_unmap_pgt(struct nouveau_vm *vm, u32 fpde, u32 lpde) 152nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde)
150{ 153{
151 struct nouveau_vm_pgd *vpgd; 154 struct nouveau_vm_pgd *vpgd;
152 struct nouveau_vm_pgt *vpgt; 155 struct nouveau_vm_pgt *vpgt;
@@ -155,16 +158,16 @@ nouveau_vm_unmap_pgt(struct nouveau_vm *vm, u32 fpde, u32 lpde)
155 158
156 for (pde = fpde; pde <= lpde; pde++) { 159 for (pde = fpde; pde <= lpde; pde++) {
157 vpgt = &vm->pgt[pde - vm->fpde]; 160 vpgt = &vm->pgt[pde - vm->fpde];
158 if (--vpgt->refcount) 161 if (--vpgt->refcount[big])
159 continue; 162 continue;
160 163
164 pgt = vpgt->obj[big];
165 vpgt->obj[big] = NULL;
166
161 list_for_each_entry(vpgd, &vm->pgd_list, head) { 167 list_for_each_entry(vpgd, &vm->pgd_list, head) {
162 vm->unmap_pgt(vpgd->obj, pde); 168 vm->map_pgt(vpgd->obj, pde, vpgt->obj);
163 } 169 }
164 170
165 pgt = vpgt->obj;
166 vpgt->obj = NULL;
167
168 mutex_unlock(&vm->mm->mutex); 171 mutex_unlock(&vm->mm->mutex);
169 nouveau_gpuobj_ref(NULL, &pgt); 172 nouveau_gpuobj_ref(NULL, &pgt);
170 mutex_lock(&vm->mm->mutex); 173 mutex_lock(&vm->mm->mutex);
@@ -177,6 +180,7 @@ nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type)
177 struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde]; 180 struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
178 struct nouveau_vm_pgd *vpgd; 181 struct nouveau_vm_pgd *vpgd;
179 struct nouveau_gpuobj *pgt; 182 struct nouveau_gpuobj *pgt;
183 int big = (type != vm->spg_shift);
180 u32 pgt_size; 184 u32 pgt_size;
181 int ret; 185 int ret;
182 186
@@ -191,19 +195,18 @@ nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type)
191 return ret; 195 return ret;
192 196
193 /* someone beat us to filling the PDE while we didn't have the lock */ 197 /* someone beat us to filling the PDE while we didn't have the lock */
194 if (unlikely(vpgt->refcount++)) { 198 if (unlikely(vpgt->refcount[big]++)) {
195 mutex_unlock(&vm->mm->mutex); 199 mutex_unlock(&vm->mm->mutex);
196 nouveau_gpuobj_ref(NULL, &pgt); 200 nouveau_gpuobj_ref(NULL, &pgt);
197 mutex_lock(&vm->mm->mutex); 201 mutex_lock(&vm->mm->mutex);
198 return 0; 202 return 0;
199 } 203 }
200 204
205 vpgt->obj[big] = pgt;
201 list_for_each_entry(vpgd, &vm->pgd_list, head) { 206 list_for_each_entry(vpgd, &vm->pgd_list, head) {
202 vm->map_pgt(vpgd->obj, type, pde, pgt); 207 vm->map_pgt(vpgd->obj, pde, vpgt->obj);
203 } 208 }
204 209
205 vpgt->page_shift = type;
206 vpgt->obj = pgt;
207 return 0; 210 return 0;
208} 211}
209 212
@@ -227,16 +230,17 @@ nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
227 lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits; 230 lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
228 for (pde = fpde; pde <= lpde; pde++) { 231 for (pde = fpde; pde <= lpde; pde++) {
229 struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde]; 232 struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
233 int big = (vma->node->type != vm->spg_shift);
230 234
231 if (likely(vpgt->refcount)) { 235 if (likely(vpgt->refcount[big])) {
232 vpgt->refcount++; 236 vpgt->refcount[big]++;
233 continue; 237 continue;
234 } 238 }
235 239
236 ret = nouveau_vm_map_pgt(vm, pde, vma->node->type); 240 ret = nouveau_vm_map_pgt(vm, pde, vma->node->type);
237 if (ret) { 241 if (ret) {
238 if (pde != fpde) 242 if (pde != fpde)
239 nouveau_vm_unmap_pgt(vm, fpde, pde - 1); 243 nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1);
240 nouveau_mm_put(vm->mm, vma->node); 244 nouveau_mm_put(vm->mm, vma->node);
241 mutex_unlock(&vm->mm->mutex); 245 mutex_unlock(&vm->mm->mutex);
242 vma->node = NULL; 246 vma->node = NULL;
@@ -263,21 +267,20 @@ nouveau_vm_put(struct nouveau_vma *vma)
263 lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits; 267 lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
264 268
265 mutex_lock(&vm->mm->mutex); 269 mutex_lock(&vm->mm->mutex);
270 nouveau_vm_unmap_pgt(vm, vma->node->type != vm->spg_shift, fpde, lpde);
266 nouveau_mm_put(vm->mm, vma->node); 271 nouveau_mm_put(vm->mm, vma->node);
267 vma->node = NULL; 272 vma->node = NULL;
268 nouveau_vm_unmap_pgt(vm, fpde, lpde);
269 mutex_unlock(&vm->mm->mutex); 273 mutex_unlock(&vm->mm->mutex);
270} 274}
271 275
272int 276int
273nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length, u64 mm_offset, 277nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length, u64 mm_offset,
274 u8 pgt_bits, u8 spg_shift, u8 lpg_shift,
275 struct nouveau_vm **pvm) 278 struct nouveau_vm **pvm)
276{ 279{
277 struct drm_nouveau_private *dev_priv = dev->dev_private; 280 struct drm_nouveau_private *dev_priv = dev->dev_private;
278 struct nouveau_vm *vm; 281 struct nouveau_vm *vm;
279 u64 mm_length = (offset + length) - mm_offset; 282 u64 mm_length = (offset + length) - mm_offset;
280 u32 block; 283 u32 block, pgt_bits;
281 int ret; 284 int ret;
282 285
283 vm = kzalloc(sizeof(*vm), GFP_KERNEL); 286 vm = kzalloc(sizeof(*vm), GFP_KERNEL);
@@ -286,11 +289,13 @@ nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length, u64 mm_offset,
286 289
287 if (dev_priv->card_type == NV_50) { 290 if (dev_priv->card_type == NV_50) {
288 vm->map_pgt = nv50_vm_map_pgt; 291 vm->map_pgt = nv50_vm_map_pgt;
289 vm->unmap_pgt = nv50_vm_unmap_pgt;
290 vm->map = nv50_vm_map; 292 vm->map = nv50_vm_map;
291 vm->map_sg = nv50_vm_map_sg; 293 vm->map_sg = nv50_vm_map_sg;
292 vm->unmap = nv50_vm_unmap; 294 vm->unmap = nv50_vm_unmap;
293 vm->flush = nv50_vm_flush; 295 vm->flush = nv50_vm_flush;
296 vm->spg_shift = 12;
297 vm->lpg_shift = 16;
298 pgt_bits = 29;
294 } else { 299 } else {
295 kfree(vm); 300 kfree(vm);
296 return -ENOSYS; 301 return -ENOSYS;
@@ -308,8 +313,6 @@ nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length, u64 mm_offset,
308 vm->dev = dev; 313 vm->dev = dev;
309 vm->refcount = 1; 314 vm->refcount = 1;
310 vm->pgt_bits = pgt_bits - 12; 315 vm->pgt_bits = pgt_bits - 12;
311 vm->spg_shift = spg_shift;
312 vm->lpg_shift = lpg_shift;
313 316
314 block = (1 << pgt_bits); 317 block = (1 << pgt_bits);
315 if (length < block) 318 if (length < block)
@@ -342,16 +345,8 @@ nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
342 nouveau_gpuobj_ref(pgd, &vpgd->obj); 345 nouveau_gpuobj_ref(pgd, &vpgd->obj);
343 346
344 mutex_lock(&vm->mm->mutex); 347 mutex_lock(&vm->mm->mutex);
345 for (i = vm->fpde; i <= vm->lpde; i++) { 348 for (i = vm->fpde; i <= vm->lpde; i++)
346 struct nouveau_vm_pgt *vpgt = &vm->pgt[i - vm->fpde]; 349 vm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj);
347
348 if (!vpgt->obj) {
349 vm->unmap_pgt(pgd, i);
350 continue;
351 }
352
353 vm->map_pgt(pgd, vpgt->page_shift, i, vpgt->obj);
354 }
355 list_add(&vpgd->head, &vm->pgd_list); 350 list_add(&vpgd->head, &vm->pgd_list);
356 mutex_unlock(&vm->mm->mutex); 351 mutex_unlock(&vm->mm->mutex);
357 return 0; 352 return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.h b/drivers/gpu/drm/nouveau/nouveau_vm.h
index b6755cfa7b71..105b6f65f19d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.h
@@ -31,9 +31,8 @@
31#include "nouveau_mm.h" 31#include "nouveau_mm.h"
32 32
33struct nouveau_vm_pgt { 33struct nouveau_vm_pgt {
34 struct nouveau_gpuobj *obj; 34 struct nouveau_gpuobj *obj[2];
35 u32 page_shift; 35 u32 refcount[2];
36 u32 refcount;
37}; 36};
38 37
39struct nouveau_vm_pgd { 38struct nouveau_vm_pgd {
@@ -65,9 +64,8 @@ struct nouveau_vm {
65 u8 spg_shift; 64 u8 spg_shift;
66 u8 lpg_shift; 65 u8 lpg_shift;
67 66
68 void (*map_pgt)(struct nouveau_gpuobj *pgd, u32 type, u32 pde, 67 void (*map_pgt)(struct nouveau_gpuobj *pgd, u32 pde,
69 struct nouveau_gpuobj *pgt); 68 struct nouveau_gpuobj *pgt[2]);
70 void (*unmap_pgt)(struct nouveau_gpuobj *pgd, u32 pde);
71 void (*map)(struct nouveau_vma *, struct nouveau_gpuobj *, 69 void (*map)(struct nouveau_vma *, struct nouveau_gpuobj *,
72 struct nouveau_vram *, u32 pte, u32 cnt, u64 phys); 70 struct nouveau_vram *, u32 pte, u32 cnt, u64 phys);
73 void (*map_sg)(struct nouveau_vma *, struct nouveau_gpuobj *, 71 void (*map_sg)(struct nouveau_vma *, struct nouveau_gpuobj *,
@@ -78,7 +76,6 @@ struct nouveau_vm {
78 76
79/* nouveau_vm.c */ 77/* nouveau_vm.c */
80int nouveau_vm_new(struct drm_device *, u64 offset, u64 length, u64 mm_offset, 78int nouveau_vm_new(struct drm_device *, u64 offset, u64 length, u64 mm_offset,
81 u8 pgt_bits, u8 spg_shift, u8 lpg_shift,
82 struct nouveau_vm **); 79 struct nouveau_vm **);
83int nouveau_vm_ref(struct nouveau_vm *, struct nouveau_vm **, 80int nouveau_vm_ref(struct nouveau_vm *, struct nouveau_vm **,
84 struct nouveau_gpuobj *pgd); 81 struct nouveau_gpuobj *pgd);
@@ -93,9 +90,8 @@ void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length,
93 dma_addr_t *); 90 dma_addr_t *);
94 91
95/* nv50_vm.c */ 92/* nv50_vm.c */
96void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 type, u32 pde, 93void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
97 struct nouveau_gpuobj *pgt); 94 struct nouveau_gpuobj *pgt[2]);
98void nv50_vm_unmap_pgt(struct nouveau_gpuobj *pgd, u32 pde);
99void nv50_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *, 95void nv50_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
100 struct nouveau_vram *, u32 pte, u32 cnt, u64 phys); 96 struct nouveau_vram *, u32 pte, u32 cnt, u64 phys);
101void nv50_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *, 97void nv50_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index adac4da98f7e..2e1b1cd19a4b 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -151,20 +151,19 @@ nv50_instmem_init(struct drm_device *dev)
151 151
152 /* BAR3 */ 152 /* BAR3 */
153 ret = nouveau_vm_new(dev, BAR3_VM_BASE, BAR3_VM_SIZE, BAR3_VM_BASE, 153 ret = nouveau_vm_new(dev, BAR3_VM_BASE, BAR3_VM_SIZE, BAR3_VM_BASE,
154 29, 12, 16, &dev_priv->bar3_vm); 154 &dev_priv->bar3_vm);
155 if (ret) 155 if (ret)
156 goto error; 156 goto error;
157 157
158 ret = nouveau_gpuobj_new(dev, NULL, (BAR3_VM_SIZE >> 12) * 8, 158 ret = nouveau_gpuobj_new(dev, NULL, (BAR3_VM_SIZE >> 12) * 8,
159 0x1000, NVOBJ_FLAG_DONT_MAP | 159 0x1000, NVOBJ_FLAG_DONT_MAP |
160 NVOBJ_FLAG_ZERO_ALLOC, 160 NVOBJ_FLAG_ZERO_ALLOC,
161 &dev_priv->bar3_vm->pgt[0].obj); 161 &dev_priv->bar3_vm->pgt[0].obj[0]);
162 if (ret) 162 if (ret)
163 goto error; 163 goto error;
164 dev_priv->bar3_vm->pgt[0].page_shift = 12; 164 dev_priv->bar3_vm->pgt[0].refcount[0] = 1;
165 dev_priv->bar3_vm->pgt[0].refcount = 1;
166 165
167 nv50_instmem_map(dev_priv->bar3_vm->pgt[0].obj); 166 nv50_instmem_map(dev_priv->bar3_vm->pgt[0].obj[0]);
168 167
169 ret = nv50_channel_new(dev, 128 * 1024, dev_priv->bar3_vm, &chan); 168 ret = nv50_channel_new(dev, 128 * 1024, dev_priv->bar3_vm, &chan);
170 if (ret) 169 if (ret)
@@ -195,8 +194,7 @@ nv50_instmem_init(struct drm_device *dev)
195 nv_wo32(chan->ramin, 0, tmp); 194 nv_wo32(chan->ramin, 0, tmp);
196 195
197 /* BAR1 */ 196 /* BAR1 */
198 ret = nouveau_vm_new(dev, BAR1_VM_BASE, BAR1_VM_SIZE, BAR1_VM_BASE, 197 ret = nouveau_vm_new(dev, BAR1_VM_BASE, BAR1_VM_SIZE, BAR1_VM_BASE, &vm);
199 29, 12, 16, &vm);
200 if (ret) 198 if (ret)
201 goto error; 199 goto error;
202 200
@@ -220,7 +218,7 @@ nv50_instmem_init(struct drm_device *dev)
220 * to catch "NULL pointer" references 218 * to catch "NULL pointer" references
221 */ 219 */
222 ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0020000000ULL, 220 ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0020000000ULL,
223 29, 12, 16, &dev_priv->chan_vm); 221 &dev_priv->chan_vm);
224 if (ret) 222 if (ret)
225 return ret; 223 return ret;
226 224
@@ -258,7 +256,7 @@ nv50_instmem_takedown(struct drm_device *dev)
258 dev_priv->channels.ptr[127] = 0; 256 dev_priv->channels.ptr[127] = 0;
259 nv50_channel_del(&dev_priv->channels.ptr[0]); 257 nv50_channel_del(&dev_priv->channels.ptr[0]);
260 258
261 nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj); 259 nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj[0]);
262 nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL); 260 nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL);
263 261
264 if (dev_priv->ramin_heap.free_stack.next) 262 if (dev_priv->ramin_heap.free_stack.next)
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c
index 7939387f7f80..38e523e10995 100644
--- a/drivers/gpu/drm/nouveau/nv50_vm.c
+++ b/drivers/gpu/drm/nouveau/nv50_vm.c
@@ -28,39 +28,40 @@
28#include "nouveau_vm.h" 28#include "nouveau_vm.h"
29 29
30void 30void
31nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 type, u32 pde, 31nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
32 struct nouveau_gpuobj *pgt) 32 struct nouveau_gpuobj *pgt[2])
33{ 33{
34 struct drm_nouveau_private *dev_priv = pgd->dev->dev_private; 34 struct drm_nouveau_private *dev_priv = pgd->dev->dev_private;
35 u32 coverage = (pgt->size >> 3) << type; 35 u64 phys = 0xdeadcafe00000000ULL;
36 u64 phys; 36 u32 coverage = 0;
37 37
38 phys = pgt->vinst; 38 if (pgt[0]) {
39 phys |= 0x01; /* present */ 39 phys = 0x00000003 | pgt[0]->vinst; /* present, 4KiB pages */
40 phys |= (type == 12) ? 0x02 : 0x00; /* 4KiB pages */ 40 coverage = (pgt[0]->size >> 3) << 12;
41 if (dev_priv->vram_sys_base) { 41 } else
42 phys += dev_priv->vram_sys_base; 42 if (pgt[1]) {
43 phys |= 0x30; 43 phys = 0x00000001 | pgt[1]->vinst; /* present */
44 coverage = (pgt[1]->size >> 3) << 16;
44 } 45 }
45 46
46 if (coverage <= 32 * 1024 * 1024) 47 if (phys & 1) {
47 phys |= 0x60; 48 if (dev_priv->vram_sys_base) {
48 else if (coverage <= 64 * 1024 * 1024) 49 phys += dev_priv->vram_sys_base;
49 phys |= 0x40; 50 phys |= 0x30;
50 else if (coverage < 128 * 1024 * 1024) 51 }
51 phys |= 0x20; 52
53 if (coverage <= 32 * 1024 * 1024)
54 phys |= 0x60;
55 else if (coverage <= 64 * 1024 * 1024)
56 phys |= 0x40;
57 else if (coverage < 128 * 1024 * 1024)
58 phys |= 0x20;
59 }
52 60
53 nv_wo32(pgd, (pde * 8) + 0, lower_32_bits(phys)); 61 nv_wo32(pgd, (pde * 8) + 0, lower_32_bits(phys));
54 nv_wo32(pgd, (pde * 8) + 4, upper_32_bits(phys)); 62 nv_wo32(pgd, (pde * 8) + 4, upper_32_bits(phys));
55} 63}
56 64
57void
58nv50_vm_unmap_pgt(struct nouveau_gpuobj *pgd, u32 pde)
59{
60 nv_wo32(pgd, (pde * 8) + 0, 0x00000000);
61 nv_wo32(pgd, (pde * 8) + 4, 0xdeadcafe);
62}
63
64static inline u64 65static inline u64
65nv50_vm_addr(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, 66nv50_vm_addr(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
66 u64 phys, u32 memtype, u32 target) 67 u64 phys, u32 memtype, u32 target)