diff options
| -rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_mem.c | 43 |
1 files changed, 13 insertions, 30 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 8f3a12f614ed..04885d2fb15f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c | |||
| @@ -285,53 +285,36 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size, | |||
| 285 | uint32_t flags, uint64_t phys) | 285 | uint32_t flags, uint64_t phys) |
| 286 | { | 286 | { |
| 287 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 287 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 288 | struct nouveau_gpuobj **pgt; | 288 | unsigned pages; |
| 289 | unsigned psz, pfl, pages; | ||
| 290 | |||
| 291 | if (virt >= dev_priv->vm_gart_base && | ||
| 292 | (virt + size) < (dev_priv->vm_gart_base + dev_priv->vm_gart_size)) { | ||
| 293 | psz = 12; | ||
| 294 | pgt = &dev_priv->gart_info.sg_ctxdma; | ||
| 295 | pfl = 0x21; | ||
| 296 | virt -= dev_priv->vm_gart_base; | ||
| 297 | } else | ||
| 298 | if (virt >= dev_priv->vm_vram_base && | ||
| 299 | (virt + size) < (dev_priv->vm_vram_base + dev_priv->vm_vram_size)) { | ||
| 300 | psz = 16; | ||
| 301 | pgt = dev_priv->vm_vram_pt; | ||
| 302 | pfl = 0x01; | ||
| 303 | virt -= dev_priv->vm_vram_base; | ||
| 304 | } else { | ||
| 305 | NV_ERROR(dev, "Invalid address: 0x%16llx-0x%16llx\n", | ||
| 306 | virt, virt + size - 1); | ||
| 307 | return -EINVAL; | ||
| 308 | } | ||
| 309 | 289 | ||
| 310 | pages = size >> psz; | 290 | virt -= dev_priv->vm_vram_base; |
| 291 | pages = size >> 16; | ||
| 311 | 292 | ||
| 312 | dev_priv->engine.instmem.prepare_access(dev, true); | 293 | dev_priv->engine.instmem.prepare_access(dev, true); |
| 313 | if (flags & 0x80000000) { | 294 | if (flags & 0x80000000) { |
| 314 | while (pages--) { | 295 | while (pages--) { |
| 315 | struct nouveau_gpuobj *pt = pgt[virt >> 29]; | 296 | struct nouveau_gpuobj *pt = |
| 316 | unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1; | 297 | dev_priv->vm_vram_pt[virt >> 29]; |
| 298 | unsigned pte = ((virt & 0x1fffffffULL) >> 16) << 1; | ||
| 317 | 299 | ||
| 318 | nv_wo32(dev, pt, pte++, 0x00000000); | 300 | nv_wo32(dev, pt, pte++, 0x00000000); |
| 319 | nv_wo32(dev, pt, pte++, 0x00000000); | 301 | nv_wo32(dev, pt, pte++, 0x00000000); |
| 320 | 302 | ||
| 321 | virt += (1 << psz); | 303 | virt += (1 << 16); |
| 322 | } | 304 | } |
| 323 | } else { | 305 | } else { |
| 324 | while (pages--) { | 306 | while (pages--) { |
| 325 | struct nouveau_gpuobj *pt = pgt[virt >> 29]; | 307 | struct nouveau_gpuobj *pt = |
| 326 | unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1; | 308 | dev_priv->vm_vram_pt[virt >> 29]; |
| 309 | unsigned pte = ((virt & 0x1fffffffULL) >> 16) << 1; | ||
| 327 | unsigned offset_h = upper_32_bits(phys) & 0xff; | 310 | unsigned offset_h = upper_32_bits(phys) & 0xff; |
| 328 | unsigned offset_l = lower_32_bits(phys); | 311 | unsigned offset_l = lower_32_bits(phys); |
| 329 | 312 | ||
| 330 | nv_wo32(dev, pt, pte++, offset_l | pfl); | 313 | nv_wo32(dev, pt, pte++, offset_l | 1); |
| 331 | nv_wo32(dev, pt, pte++, offset_h | flags); | 314 | nv_wo32(dev, pt, pte++, offset_h | flags); |
| 332 | 315 | ||
| 333 | phys += (1 << psz); | 316 | phys += (1 << 16); |
| 334 | virt += (1 << psz); | 317 | virt += (1 << 16); |
| 335 | } | 318 | } |
| 336 | } | 319 | } |
| 337 | dev_priv->engine.instmem.finish_access(dev); | 320 | dev_priv->engine.instmem.finish_access(dev); |
