diff options
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_mem.c')
| -rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_mem.c | 426 |
1 files changed, 201 insertions, 225 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index fe4a30dc4b42..69044eb104bb 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c | |||
| @@ -36,183 +36,112 @@ | |||
| 36 | 36 | ||
| 37 | #include "nouveau_drv.h" | 37 | #include "nouveau_drv.h" |
| 38 | #include "nouveau_pm.h" | 38 | #include "nouveau_pm.h" |
| 39 | #include "nouveau_mm.h" | ||
| 40 | #include "nouveau_vm.h" | ||
| 39 | 41 | ||
| 40 | /* | 42 | /* |
| 41 | * NV10-NV40 tiling helpers | 43 | * NV10-NV40 tiling helpers |
| 42 | */ | 44 | */ |
| 43 | 45 | ||
| 44 | static void | 46 | static void |
| 45 | nv10_mem_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, | 47 | nv10_mem_update_tile_region(struct drm_device *dev, |
| 46 | uint32_t size, uint32_t pitch) | 48 | struct nouveau_tile_reg *tile, uint32_t addr, |
| 49 | uint32_t size, uint32_t pitch, uint32_t flags) | ||
| 47 | { | 50 | { |
| 48 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 51 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 49 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | 52 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; |
| 50 | struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; | 53 | struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; |
| 51 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; | 54 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; |
| 52 | struct nouveau_tile_reg *tile = &dev_priv->tile[i]; | 55 | int i = tile - dev_priv->tile.reg; |
| 56 | unsigned long save; | ||
| 53 | 57 | ||
| 54 | tile->addr = addr; | 58 | nouveau_fence_unref(&tile->fence); |
| 55 | tile->size = size; | ||
| 56 | tile->used = !!pitch; | ||
| 57 | nouveau_fence_unref((void **)&tile->fence); | ||
| 58 | 59 | ||
| 60 | if (tile->pitch) | ||
| 61 | pfb->free_tile_region(dev, i); | ||
| 62 | |||
| 63 | if (pitch) | ||
| 64 | pfb->init_tile_region(dev, i, addr, size, pitch, flags); | ||
| 65 | |||
| 66 | spin_lock_irqsave(&dev_priv->context_switch_lock, save); | ||
| 59 | pfifo->reassign(dev, false); | 67 | pfifo->reassign(dev, false); |
| 60 | pfifo->cache_pull(dev, false); | 68 | pfifo->cache_pull(dev, false); |
| 61 | 69 | ||
| 62 | nouveau_wait_for_idle(dev); | 70 | nouveau_wait_for_idle(dev); |
| 63 | 71 | ||
| 64 | pgraph->set_region_tiling(dev, i, addr, size, pitch); | 72 | pfb->set_tile_region(dev, i); |
| 65 | pfb->set_region_tiling(dev, i, addr, size, pitch); | 73 | pgraph->set_tile_region(dev, i); |
| 66 | 74 | ||
| 67 | pfifo->cache_pull(dev, true); | 75 | pfifo->cache_pull(dev, true); |
| 68 | pfifo->reassign(dev, true); | 76 | pfifo->reassign(dev, true); |
| 77 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, save); | ||
| 69 | } | 78 | } |
| 70 | 79 | ||
| 71 | struct nouveau_tile_reg * | 80 | static struct nouveau_tile_reg * |
| 72 | nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size, | 81 | nv10_mem_get_tile_region(struct drm_device *dev, int i) |
| 73 | uint32_t pitch) | ||
| 74 | { | 82 | { |
| 75 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 83 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 76 | struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; | 84 | struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; |
| 77 | struct nouveau_tile_reg *found = NULL; | ||
| 78 | unsigned long i, flags; | ||
| 79 | |||
| 80 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | ||
| 81 | |||
| 82 | for (i = 0; i < pfb->num_tiles; i++) { | ||
| 83 | struct nouveau_tile_reg *tile = &dev_priv->tile[i]; | ||
| 84 | |||
| 85 | if (tile->used) | ||
| 86 | /* Tile region in use. */ | ||
| 87 | continue; | ||
| 88 | 85 | ||
| 89 | if (tile->fence && | 86 | spin_lock(&dev_priv->tile.lock); |
| 90 | !nouveau_fence_signalled(tile->fence, NULL)) | ||
| 91 | /* Pending tile region. */ | ||
| 92 | continue; | ||
| 93 | |||
| 94 | if (max(tile->addr, addr) < | ||
| 95 | min(tile->addr + tile->size, addr + size)) | ||
| 96 | /* Kill an intersecting tile region. */ | ||
| 97 | nv10_mem_set_region_tiling(dev, i, 0, 0, 0); | ||
| 98 | |||
| 99 | if (pitch && !found) { | ||
| 100 | /* Free tile region. */ | ||
| 101 | nv10_mem_set_region_tiling(dev, i, addr, size, pitch); | ||
| 102 | found = tile; | ||
| 103 | } | ||
| 104 | } | ||
| 105 | 87 | ||
| 106 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | 88 | if (!tile->used && |
| 89 | (!tile->fence || nouveau_fence_signalled(tile->fence))) | ||
| 90 | tile->used = true; | ||
| 91 | else | ||
| 92 | tile = NULL; | ||
| 107 | 93 | ||
| 108 | return found; | 94 | spin_unlock(&dev_priv->tile.lock); |
| 95 | return tile; | ||
| 109 | } | 96 | } |
| 110 | 97 | ||
| 111 | void | 98 | void |
| 112 | nv10_mem_expire_tiling(struct drm_device *dev, struct nouveau_tile_reg *tile, | 99 | nv10_mem_put_tile_region(struct drm_device *dev, struct nouveau_tile_reg *tile, |
| 113 | struct nouveau_fence *fence) | 100 | struct nouveau_fence *fence) |
| 114 | { | ||
| 115 | if (fence) { | ||
| 116 | /* Mark it as pending. */ | ||
| 117 | tile->fence = fence; | ||
| 118 | nouveau_fence_ref(fence); | ||
| 119 | } | ||
| 120 | |||
| 121 | tile->used = false; | ||
| 122 | } | ||
| 123 | |||
| 124 | /* | ||
| 125 | * NV50 VM helpers | ||
| 126 | */ | ||
| 127 | int | ||
| 128 | nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size, | ||
| 129 | uint32_t flags, uint64_t phys) | ||
| 130 | { | 101 | { |
| 131 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 102 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 132 | struct nouveau_gpuobj *pgt; | ||
| 133 | unsigned block; | ||
| 134 | int i; | ||
| 135 | 103 | ||
| 136 | virt = ((virt - dev_priv->vm_vram_base) >> 16) << 1; | 104 | if (tile) { |
| 137 | size = (size >> 16) << 1; | 105 | spin_lock(&dev_priv->tile.lock); |
| 138 | 106 | if (fence) { | |
| 139 | phys |= ((uint64_t)flags << 32); | 107 | /* Mark it as pending. */ |
| 140 | phys |= 1; | 108 | tile->fence = fence; |
| 141 | if (dev_priv->vram_sys_base) { | 109 | nouveau_fence_ref(fence); |
| 142 | phys += dev_priv->vram_sys_base; | ||
| 143 | phys |= 0x30; | ||
| 144 | } | ||
| 145 | |||
| 146 | while (size) { | ||
| 147 | unsigned offset_h = upper_32_bits(phys); | ||
| 148 | unsigned offset_l = lower_32_bits(phys); | ||
| 149 | unsigned pte, end; | ||
| 150 | |||
| 151 | for (i = 7; i >= 0; i--) { | ||
| 152 | block = 1 << (i + 1); | ||
| 153 | if (size >= block && !(virt & (block - 1))) | ||
| 154 | break; | ||
| 155 | } | 110 | } |
| 156 | offset_l |= (i << 7); | ||
| 157 | |||
| 158 | phys += block << 15; | ||
| 159 | size -= block; | ||
| 160 | |||
| 161 | while (block) { | ||
| 162 | pgt = dev_priv->vm_vram_pt[virt >> 14]; | ||
| 163 | pte = virt & 0x3ffe; | ||
| 164 | |||
| 165 | end = pte + block; | ||
| 166 | if (end > 16384) | ||
| 167 | end = 16384; | ||
| 168 | block -= (end - pte); | ||
| 169 | virt += (end - pte); | ||
| 170 | |||
| 171 | while (pte < end) { | ||
| 172 | nv_wo32(pgt, (pte * 4) + 0, offset_l); | ||
| 173 | nv_wo32(pgt, (pte * 4) + 4, offset_h); | ||
| 174 | pte += 2; | ||
| 175 | } | ||
| 176 | } | ||
| 177 | } | ||
| 178 | 111 | ||
| 179 | dev_priv->engine.instmem.flush(dev); | 112 | tile->used = false; |
| 180 | dev_priv->engine.fifo.tlb_flush(dev); | 113 | spin_unlock(&dev_priv->tile.lock); |
| 181 | dev_priv->engine.graph.tlb_flush(dev); | 114 | } |
| 182 | nv50_vm_flush(dev, 6); | ||
| 183 | return 0; | ||
| 184 | } | 115 | } |
| 185 | 116 | ||
| 186 | void | 117 | struct nouveau_tile_reg * |
| 187 | nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size) | 118 | nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size, |
| 119 | uint32_t pitch, uint32_t flags) | ||
| 188 | { | 120 | { |
| 189 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 121 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 190 | struct nouveau_gpuobj *pgt; | 122 | struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; |
| 191 | unsigned pages, pte, end; | 123 | struct nouveau_tile_reg *tile, *found = NULL; |
| 192 | 124 | int i; | |
| 193 | virt -= dev_priv->vm_vram_base; | ||
| 194 | pages = (size >> 16) << 1; | ||
| 195 | 125 | ||
| 196 | while (pages) { | 126 | for (i = 0; i < pfb->num_tiles; i++) { |
| 197 | pgt = dev_priv->vm_vram_pt[virt >> 29]; | 127 | tile = nv10_mem_get_tile_region(dev, i); |
| 198 | pte = (virt & 0x1ffe0000ULL) >> 15; | ||
| 199 | 128 | ||
| 200 | end = pte + pages; | 129 | if (pitch && !found) { |
| 201 | if (end > 16384) | 130 | found = tile; |
| 202 | end = 16384; | 131 | continue; |
| 203 | pages -= (end - pte); | ||
| 204 | virt += (end - pte) << 15; | ||
| 205 | 132 | ||
| 206 | while (pte < end) { | 133 | } else if (tile && tile->pitch) { |
| 207 | nv_wo32(pgt, (pte * 4), 0); | 134 | /* Kill an unused tile region. */ |
| 208 | pte++; | 135 | nv10_mem_update_tile_region(dev, tile, 0, 0, 0, 0); |
| 209 | } | 136 | } |
| 137 | |||
| 138 | nv10_mem_put_tile_region(dev, tile, NULL); | ||
| 210 | } | 139 | } |
| 211 | 140 | ||
| 212 | dev_priv->engine.instmem.flush(dev); | 141 | if (found) |
| 213 | dev_priv->engine.fifo.tlb_flush(dev); | 142 | nv10_mem_update_tile_region(dev, found, addr, size, |
| 214 | dev_priv->engine.graph.tlb_flush(dev); | 143 | pitch, flags); |
| 215 | nv50_vm_flush(dev, 6); | 144 | return found; |
| 216 | } | 145 | } |
| 217 | 146 | ||
| 218 | /* | 147 | /* |
| @@ -312,62 +241,7 @@ nouveau_mem_detect_nforce(struct drm_device *dev) | |||
| 312 | return 0; | 241 | return 0; |
| 313 | } | 242 | } |
| 314 | 243 | ||
| 315 | static void | 244 | int |
| 316 | nv50_vram_preinit(struct drm_device *dev) | ||
| 317 | { | ||
| 318 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
| 319 | int i, parts, colbits, rowbitsa, rowbitsb, banks; | ||
| 320 | u64 rowsize, predicted; | ||
| 321 | u32 r0, r4, rt, ru; | ||
| 322 | |||
| 323 | r0 = nv_rd32(dev, 0x100200); | ||
| 324 | r4 = nv_rd32(dev, 0x100204); | ||
| 325 | rt = nv_rd32(dev, 0x100250); | ||
| 326 | ru = nv_rd32(dev, 0x001540); | ||
| 327 | NV_DEBUG(dev, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru); | ||
| 328 | |||
| 329 | for (i = 0, parts = 0; i < 8; i++) { | ||
| 330 | if (ru & (0x00010000 << i)) | ||
| 331 | parts++; | ||
| 332 | } | ||
| 333 | |||
| 334 | colbits = (r4 & 0x0000f000) >> 12; | ||
| 335 | rowbitsa = ((r4 & 0x000f0000) >> 16) + 8; | ||
| 336 | rowbitsb = ((r4 & 0x00f00000) >> 20) + 8; | ||
| 337 | banks = ((r4 & 0x01000000) ? 8 : 4); | ||
| 338 | |||
| 339 | rowsize = parts * banks * (1 << colbits) * 8; | ||
| 340 | predicted = rowsize << rowbitsa; | ||
| 341 | if (r0 & 0x00000004) | ||
| 342 | predicted += rowsize << rowbitsb; | ||
| 343 | |||
| 344 | if (predicted != dev_priv->vram_size) { | ||
| 345 | NV_WARN(dev, "memory controller reports %dMiB VRAM\n", | ||
| 346 | (u32)(dev_priv->vram_size >> 20)); | ||
| 347 | NV_WARN(dev, "we calculated %dMiB VRAM\n", | ||
| 348 | (u32)(predicted >> 20)); | ||
| 349 | } | ||
| 350 | |||
| 351 | dev_priv->vram_rblock_size = rowsize >> 12; | ||
| 352 | if (rt & 1) | ||
| 353 | dev_priv->vram_rblock_size *= 3; | ||
| 354 | |||
| 355 | NV_DEBUG(dev, "rblock %lld bytes\n", | ||
| 356 | (u64)dev_priv->vram_rblock_size << 12); | ||
| 357 | } | ||
| 358 | |||
| 359 | static void | ||
| 360 | nvaa_vram_preinit(struct drm_device *dev) | ||
| 361 | { | ||
| 362 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
| 363 | |||
| 364 | /* To our knowledge, there's no large scale reordering of pages | ||
| 365 | * that occurs on IGP chipsets. | ||
| 366 | */ | ||
| 367 | dev_priv->vram_rblock_size = 1; | ||
| 368 | } | ||
| 369 | |||
| 370 | static int | ||
| 371 | nouveau_mem_detect(struct drm_device *dev) | 245 | nouveau_mem_detect(struct drm_device *dev) |
| 372 | { | 246 | { |
| 373 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 247 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| @@ -381,33 +255,6 @@ nouveau_mem_detect(struct drm_device *dev) | |||
| 381 | if (dev_priv->card_type < NV_50) { | 255 | if (dev_priv->card_type < NV_50) { |
| 382 | dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA); | 256 | dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA); |
| 383 | dev_priv->vram_size &= NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_MASK; | 257 | dev_priv->vram_size &= NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_MASK; |
| 384 | } else | ||
| 385 | if (dev_priv->card_type < NV_C0) { | ||
| 386 | dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA); | ||
| 387 | dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32; | ||
| 388 | dev_priv->vram_size &= 0xffffffff00ll; | ||
| 389 | |||
| 390 | switch (dev_priv->chipset) { | ||
| 391 | case 0xaa: | ||
| 392 | case 0xac: | ||
| 393 | case 0xaf: | ||
| 394 | dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10); | ||
| 395 | dev_priv->vram_sys_base <<= 12; | ||
| 396 | nvaa_vram_preinit(dev); | ||
| 397 | break; | ||
| 398 | default: | ||
| 399 | nv50_vram_preinit(dev); | ||
| 400 | break; | ||
| 401 | } | ||
| 402 | } else { | ||
| 403 | dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20; | ||
| 404 | dev_priv->vram_size *= nv_rd32(dev, 0x121c74); | ||
| 405 | } | ||
| 406 | |||
| 407 | NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20)); | ||
| 408 | if (dev_priv->vram_sys_base) { | ||
| 409 | NV_INFO(dev, "Stolen system memory at: 0x%010llx\n", | ||
| 410 | dev_priv->vram_sys_base); | ||
| 411 | } | 258 | } |
| 412 | 259 | ||
| 413 | if (dev_priv->vram_size) | 260 | if (dev_priv->vram_size) |
| @@ -415,6 +262,15 @@ nouveau_mem_detect(struct drm_device *dev) | |||
| 415 | return -ENOMEM; | 262 | return -ENOMEM; |
| 416 | } | 263 | } |
| 417 | 264 | ||
| 265 | bool | ||
| 266 | nouveau_mem_flags_valid(struct drm_device *dev, u32 tile_flags) | ||
| 267 | { | ||
| 268 | if (!(tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK)) | ||
| 269 | return true; | ||
| 270 | |||
| 271 | return false; | ||
| 272 | } | ||
| 273 | |||
| 418 | #if __OS_HAS_AGP | 274 | #if __OS_HAS_AGP |
| 419 | static unsigned long | 275 | static unsigned long |
| 420 | get_agp_mode(struct drm_device *dev, unsigned long mode) | 276 | get_agp_mode(struct drm_device *dev, unsigned long mode) |
| @@ -547,10 +403,6 @@ nouveau_mem_vram_init(struct drm_device *dev) | |||
| 547 | if (ret) | 403 | if (ret) |
| 548 | return ret; | 404 | return ret; |
| 549 | 405 | ||
| 550 | ret = nouveau_mem_detect(dev); | ||
| 551 | if (ret) | ||
| 552 | return ret; | ||
| 553 | |||
| 554 | dev_priv->fb_phys = pci_resource_start(dev->pdev, 1); | 406 | dev_priv->fb_phys = pci_resource_start(dev->pdev, 1); |
| 555 | 407 | ||
| 556 | ret = nouveau_ttm_global_init(dev_priv); | 408 | ret = nouveau_ttm_global_init(dev_priv); |
| @@ -566,13 +418,6 @@ nouveau_mem_vram_init(struct drm_device *dev) | |||
| 566 | return ret; | 418 | return ret; |
| 567 | } | 419 | } |
| 568 | 420 | ||
| 569 | dev_priv->fb_available_size = dev_priv->vram_size; | ||
| 570 | dev_priv->fb_mappable_pages = dev_priv->fb_available_size; | ||
| 571 | if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1)) | ||
| 572 | dev_priv->fb_mappable_pages = | ||
| 573 | pci_resource_len(dev->pdev, 1); | ||
| 574 | dev_priv->fb_mappable_pages >>= PAGE_SHIFT; | ||
| 575 | |||
| 576 | /* reserve space at end of VRAM for PRAMIN */ | 421 | /* reserve space at end of VRAM for PRAMIN */ |
| 577 | if (dev_priv->chipset == 0x40 || dev_priv->chipset == 0x47 || | 422 | if (dev_priv->chipset == 0x40 || dev_priv->chipset == 0x47 || |
| 578 | dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b) | 423 | dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b) |
| @@ -583,6 +428,22 @@ nouveau_mem_vram_init(struct drm_device *dev) | |||
| 583 | else | 428 | else |
| 584 | dev_priv->ramin_rsvd_vram = (512 * 1024); | 429 | dev_priv->ramin_rsvd_vram = (512 * 1024); |
| 585 | 430 | ||
| 431 | ret = dev_priv->engine.vram.init(dev); | ||
| 432 | if (ret) | ||
| 433 | return ret; | ||
| 434 | |||
| 435 | NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20)); | ||
| 436 | if (dev_priv->vram_sys_base) { | ||
| 437 | NV_INFO(dev, "Stolen system memory at: 0x%010llx\n", | ||
| 438 | dev_priv->vram_sys_base); | ||
| 439 | } | ||
| 440 | |||
| 441 | dev_priv->fb_available_size = dev_priv->vram_size; | ||
| 442 | dev_priv->fb_mappable_pages = dev_priv->fb_available_size; | ||
| 443 | if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1)) | ||
| 444 | dev_priv->fb_mappable_pages = pci_resource_len(dev->pdev, 1); | ||
| 445 | dev_priv->fb_mappable_pages >>= PAGE_SHIFT; | ||
| 446 | |||
| 586 | dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram; | 447 | dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram; |
| 587 | dev_priv->fb_aper_free = dev_priv->fb_available_size; | 448 | dev_priv->fb_aper_free = dev_priv->fb_available_size; |
| 588 | 449 | ||
| @@ -799,3 +660,118 @@ nouveau_mem_timing_fini(struct drm_device *dev) | |||
| 799 | 660 | ||
| 800 | kfree(mem->timing); | 661 | kfree(mem->timing); |
| 801 | } | 662 | } |
| 663 | |||
| 664 | static int | ||
| 665 | nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long p_size) | ||
| 666 | { | ||
| 667 | struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev); | ||
| 668 | struct nouveau_mm *mm; | ||
| 669 | u32 b_size; | ||
| 670 | int ret; | ||
| 671 | |||
| 672 | p_size = (p_size << PAGE_SHIFT) >> 12; | ||
| 673 | b_size = dev_priv->vram_rblock_size >> 12; | ||
| 674 | |||
| 675 | ret = nouveau_mm_init(&mm, 0, p_size, b_size); | ||
| 676 | if (ret) | ||
| 677 | return ret; | ||
| 678 | |||
| 679 | man->priv = mm; | ||
| 680 | return 0; | ||
| 681 | } | ||
| 682 | |||
| 683 | static int | ||
| 684 | nouveau_vram_manager_fini(struct ttm_mem_type_manager *man) | ||
| 685 | { | ||
| 686 | struct nouveau_mm *mm = man->priv; | ||
| 687 | int ret; | ||
| 688 | |||
| 689 | ret = nouveau_mm_fini(&mm); | ||
| 690 | if (ret) | ||
| 691 | return ret; | ||
| 692 | |||
| 693 | man->priv = NULL; | ||
| 694 | return 0; | ||
| 695 | } | ||
| 696 | |||
| 697 | static void | ||
| 698 | nouveau_vram_manager_del(struct ttm_mem_type_manager *man, | ||
| 699 | struct ttm_mem_reg *mem) | ||
| 700 | { | ||
| 701 | struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev); | ||
| 702 | struct nouveau_vram_engine *vram = &dev_priv->engine.vram; | ||
| 703 | struct drm_device *dev = dev_priv->dev; | ||
| 704 | |||
| 705 | vram->put(dev, (struct nouveau_vram **)&mem->mm_node); | ||
| 706 | } | ||
| 707 | |||
| 708 | static int | ||
| 709 | nouveau_vram_manager_new(struct ttm_mem_type_manager *man, | ||
| 710 | struct ttm_buffer_object *bo, | ||
| 711 | struct ttm_placement *placement, | ||
| 712 | struct ttm_mem_reg *mem) | ||
| 713 | { | ||
| 714 | struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev); | ||
| 715 | struct nouveau_vram_engine *vram = &dev_priv->engine.vram; | ||
| 716 | struct drm_device *dev = dev_priv->dev; | ||
| 717 | struct nouveau_bo *nvbo = nouveau_bo(bo); | ||
| 718 | struct nouveau_vram *node; | ||
| 719 | u32 size_nc = 0; | ||
| 720 | int ret; | ||
| 721 | |||
| 722 | if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) | ||
| 723 | size_nc = 1 << nvbo->vma.node->type; | ||
| 724 | |||
| 725 | ret = vram->get(dev, mem->num_pages << PAGE_SHIFT, | ||
| 726 | mem->page_alignment << PAGE_SHIFT, size_nc, | ||
| 727 | (nvbo->tile_flags >> 8) & 0xff, &node); | ||
| 728 | if (ret) | ||
| 729 | return ret; | ||
| 730 | |||
| 731 | node->page_shift = 12; | ||
| 732 | if (nvbo->vma.node) | ||
| 733 | node->page_shift = nvbo->vma.node->type; | ||
| 734 | |||
| 735 | mem->mm_node = node; | ||
| 736 | mem->start = node->offset >> PAGE_SHIFT; | ||
| 737 | return 0; | ||
| 738 | } | ||
| 739 | |||
| 740 | void | ||
| 741 | nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix) | ||
| 742 | { | ||
| 743 | struct nouveau_mm *mm = man->priv; | ||
| 744 | struct nouveau_mm_node *r; | ||
| 745 | u64 total = 0, ttotal[3] = {}, tused[3] = {}, tfree[3] = {}; | ||
| 746 | int i; | ||
| 747 | |||
| 748 | mutex_lock(&mm->mutex); | ||
| 749 | list_for_each_entry(r, &mm->nodes, nl_entry) { | ||
| 750 | printk(KERN_DEBUG "%s %s-%d: 0x%010llx 0x%010llx\n", | ||
| 751 | prefix, r->free ? "free" : "used", r->type, | ||
| 752 | ((u64)r->offset << 12), | ||
| 753 | (((u64)r->offset + r->length) << 12)); | ||
| 754 | total += r->length; | ||
| 755 | ttotal[r->type] += r->length; | ||
| 756 | if (r->free) | ||
| 757 | tfree[r->type] += r->length; | ||
| 758 | else | ||
| 759 | tused[r->type] += r->length; | ||
| 760 | } | ||
| 761 | mutex_unlock(&mm->mutex); | ||
| 762 | |||
| 763 | printk(KERN_DEBUG "%s total: 0x%010llx\n", prefix, total << 12); | ||
| 764 | for (i = 0; i < 3; i++) { | ||
| 765 | printk(KERN_DEBUG "%s type %d: 0x%010llx, " | ||
| 766 | "used 0x%010llx, free 0x%010llx\n", prefix, | ||
| 767 | i, ttotal[i] << 12, tused[i] << 12, tfree[i] << 12); | ||
| 768 | } | ||
| 769 | } | ||
| 770 | |||
| 771 | const struct ttm_mem_type_manager_func nouveau_vram_manager = { | ||
| 772 | nouveau_vram_manager_init, | ||
| 773 | nouveau_vram_manager_fini, | ||
| 774 | nouveau_vram_manager_new, | ||
| 775 | nouveau_vram_manager_del, | ||
| 776 | nouveau_vram_manager_debug | ||
| 777 | }; | ||
