diff options
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_mem.c | 68 |
1 files changed, 44 insertions, 24 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 04885d2fb15f..6832c4c969a3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c | |||
@@ -291,31 +291,17 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size, | |||
291 | pages = size >> 16; | 291 | pages = size >> 16; |
292 | 292 | ||
293 | dev_priv->engine.instmem.prepare_access(dev, true); | 293 | dev_priv->engine.instmem.prepare_access(dev, true); |
294 | if (flags & 0x80000000) { | 294 | while (pages--) { |
295 | while (pages--) { | 295 | struct nouveau_gpuobj *pt = dev_priv->vm_vram_pt[virt >> 29]; |
296 | struct nouveau_gpuobj *pt = | 296 | unsigned pte = ((virt & 0x1fffffffULL) >> 16) << 1; |
297 | dev_priv->vm_vram_pt[virt >> 29]; | 297 | unsigned offset_h = upper_32_bits(phys) & 0xff; |
298 | unsigned pte = ((virt & 0x1fffffffULL) >> 16) << 1; | 298 | unsigned offset_l = lower_32_bits(phys); |
299 | 299 | ||
300 | nv_wo32(dev, pt, pte++, 0x00000000); | 300 | nv_wo32(dev, pt, pte++, offset_l | 1); |
301 | nv_wo32(dev, pt, pte++, 0x00000000); | 301 | nv_wo32(dev, pt, pte++, offset_h | flags); |
302 | 302 | ||
303 | virt += (1 << 16); | 303 | phys += (1 << 16); |
304 | } | 304 | virt += (1 << 16); |
305 | } else { | ||
306 | while (pages--) { | ||
307 | struct nouveau_gpuobj *pt = | ||
308 | dev_priv->vm_vram_pt[virt >> 29]; | ||
309 | unsigned pte = ((virt & 0x1fffffffULL) >> 16) << 1; | ||
310 | unsigned offset_h = upper_32_bits(phys) & 0xff; | ||
311 | unsigned offset_l = lower_32_bits(phys); | ||
312 | |||
313 | nv_wo32(dev, pt, pte++, offset_l | 1); | ||
314 | nv_wo32(dev, pt, pte++, offset_h | flags); | ||
315 | |||
316 | phys += (1 << 16); | ||
317 | virt += (1 << 16); | ||
318 | } | ||
319 | } | 305 | } |
320 | dev_priv->engine.instmem.finish_access(dev); | 306 | dev_priv->engine.instmem.finish_access(dev); |
321 | 307 | ||
@@ -339,7 +325,41 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size, | |||
339 | void | 325 | void |
340 | nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size) | 326 | nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size) |
341 | { | 327 | { |
342 | nv50_mem_vm_bind_linear(dev, virt, size, 0x80000000, 0); | 328 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
329 | struct nouveau_gpuobj *pgt; | ||
330 | unsigned pages, pte, end; | ||
331 | |||
332 | virt -= dev_priv->vm_vram_base; | ||
333 | pages = (size >> 16) << 1; | ||
334 | |||
335 | dev_priv->engine.instmem.prepare_access(dev, true); | ||
336 | while (pages) { | ||
337 | pgt = dev_priv->vm_vram_pt[virt >> 29]; | ||
338 | pte = (virt & 0x1ffe0000ULL) >> 15; | ||
339 | |||
340 | end = pte + pages; | ||
341 | if (end > 16384) | ||
342 | end = 16384; | ||
343 | pages -= (end - pte); | ||
344 | virt += (end - pte) << 15; | ||
345 | |||
346 | while (pte < end) | ||
347 | nv_wo32(dev, pgt, pte++, 0); | ||
348 | } | ||
349 | dev_priv->engine.instmem.finish_access(dev); | ||
350 | |||
351 | nv_wr32(dev, 0x100c80, 0x00050001); | ||
352 | if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { | ||
353 | NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); | ||
354 | NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); | ||
355 | return; | ||
356 | } | ||
357 | |||
358 | nv_wr32(dev, 0x100c80, 0x00000001); | ||
359 | if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { | ||
360 | NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); | ||
361 | NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); | ||
362 | } | ||
343 | } | 363 | } |
344 | 364 | ||
345 | /* | 365 | /* |