aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_instmem.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c16
-rw-r--r--drivers/gpu/drm/ttm/Makefile3
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c100
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_manager.c148
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c3
-rw-r--r--include/drm/ttm/ttm_bo_api.h3
-rw-r--r--include/drm/ttm/ttm_bo_driver.h21
18 files changed, 229 insertions, 116 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index f685f392c226..80353e2b8409 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -381,6 +381,7 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
381 man->default_caching = TTM_PL_FLAG_CACHED; 381 man->default_caching = TTM_PL_FLAG_CACHED;
382 break; 382 break;
383 case TTM_PL_VRAM: 383 case TTM_PL_VRAM:
384 man->func = &ttm_bo_manager_func;
384 man->flags = TTM_MEMTYPE_FLAG_FIXED | 385 man->flags = TTM_MEMTYPE_FLAG_FIXED |
385 TTM_MEMTYPE_FLAG_MAPPABLE; 386 TTM_MEMTYPE_FLAG_MAPPABLE;
386 man->available_caching = TTM_PL_FLAG_UNCACHED | 387 man->available_caching = TTM_PL_FLAG_UNCACHED |
@@ -392,6 +393,7 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
392 man->gpu_offset = 0; 393 man->gpu_offset = 0;
393 break; 394 break;
394 case TTM_PL_TT: 395 case TTM_PL_TT:
396 man->func = &ttm_bo_manager_func;
395 switch (dev_priv->gart_info.type) { 397 switch (dev_priv->gart_info.type) {
396 case NOUVEAU_GART_AGP: 398 case NOUVEAU_GART_AGP:
397 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; 399 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
@@ -494,8 +496,8 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
494 u64 src_offset, dst_offset; 496 u64 src_offset, dst_offset;
495 int ret; 497 int ret;
496 498
497 src_offset = old_mem->mm_node->start << PAGE_SHIFT; 499 src_offset = old_mem->start << PAGE_SHIFT;
498 dst_offset = new_mem->mm_node->start << PAGE_SHIFT; 500 dst_offset = new_mem->start << PAGE_SHIFT;
499 if (!nvbo->no_vm) { 501 if (!nvbo->no_vm) {
500 if (old_mem->mem_type == TTM_PL_VRAM) 502 if (old_mem->mem_type == TTM_PL_VRAM)
501 src_offset += dev_priv->vm_vram_base; 503 src_offset += dev_priv->vm_vram_base;
@@ -597,8 +599,8 @@ static int
597nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, 599nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
598 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) 600 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
599{ 601{
600 u32 src_offset = old_mem->mm_node->start << PAGE_SHIFT; 602 u32 src_offset = old_mem->start << PAGE_SHIFT;
601 u32 dst_offset = new_mem->mm_node->start << PAGE_SHIFT; 603 u32 dst_offset = new_mem->start << PAGE_SHIFT;
602 u32 page_count = new_mem->num_pages; 604 u32 page_count = new_mem->num_pages;
603 int ret; 605 int ret;
604 606
@@ -746,7 +748,7 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
746 return 0; 748 return 0;
747 } 749 }
748 750
749 offset = new_mem->mm_node->start << PAGE_SHIFT; 751 offset = new_mem->start << PAGE_SHIFT;
750 752
751 if (dev_priv->card_type == NV_50) { 753 if (dev_priv->card_type == NV_50) {
752 ret = nv50_mem_vm_bind_linear(dev, 754 ret = nv50_mem_vm_bind_linear(dev,
@@ -860,14 +862,14 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
860 case TTM_PL_TT: 862 case TTM_PL_TT:
861#if __OS_HAS_AGP 863#if __OS_HAS_AGP
862 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) { 864 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
863 mem->bus.offset = mem->mm_node->start << PAGE_SHIFT; 865 mem->bus.offset = mem->start << PAGE_SHIFT;
864 mem->bus.base = dev_priv->gart_info.aper_base; 866 mem->bus.base = dev_priv->gart_info.aper_base;
865 mem->bus.is_iomem = true; 867 mem->bus.is_iomem = true;
866 } 868 }
867#endif 869#endif
868 break; 870 break;
869 case TTM_PL_VRAM: 871 case TTM_PL_VRAM:
870 mem->bus.offset = mem->mm_node->start << PAGE_SHIFT; 872 mem->bus.offset = mem->start << PAGE_SHIFT;
871 mem->bus.base = pci_resource_start(dev->pdev, 1); 873 mem->bus.base = pci_resource_start(dev->pdev, 1);
872 mem->bus.is_iomem = true; 874 mem->bus.is_iomem = true;
873 break; 875 break;
@@ -897,7 +899,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
897 } 899 }
898 900
899 /* make sure bo is in mappable vram */ 901 /* make sure bo is in mappable vram */
900 if (bo->mem.mm_node->start + bo->mem.num_pages < dev_priv->fb_mappable_pages) 902 if (bo->mem.start + bo->mem.num_pages < dev_priv->fb_mappable_pages)
901 return 0; 903 return 0;
902 904
903 905
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 5eb4c966273f..373950e34814 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -48,14 +48,14 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
48 dev_priv->gart_info.aper_size, 48 dev_priv->gart_info.aper_size,
49 NV_DMA_ACCESS_RO, &pushbuf, 49 NV_DMA_ACCESS_RO, &pushbuf,
50 NULL); 50 NULL);
51 chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT; 51 chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
52 } else 52 } else
53 if (dev_priv->card_type != NV_04) { 53 if (dev_priv->card_type != NV_04) {
54 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, 54 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
55 dev_priv->fb_available_size, 55 dev_priv->fb_available_size,
56 NV_DMA_ACCESS_RO, 56 NV_DMA_ACCESS_RO,
57 NV_DMA_TARGET_VIDMEM, &pushbuf); 57 NV_DMA_TARGET_VIDMEM, &pushbuf);
58 chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT; 58 chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
59 } else { 59 } else {
60 /* NV04 cmdbuf hack, from original ddx.. not sure of it's 60 /* NV04 cmdbuf hack, from original ddx.. not sure of it's
61 * exact reason for existing :) PCI access to cmdbuf in 61 * exact reason for existing :) PCI access to cmdbuf in
@@ -67,7 +67,7 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
67 dev_priv->fb_available_size, 67 dev_priv->fb_available_size,
68 NV_DMA_ACCESS_RO, 68 NV_DMA_ACCESS_RO,
69 NV_DMA_TARGET_PCI, &pushbuf); 69 NV_DMA_TARGET_PCI, &pushbuf);
70 chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT; 70 chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
71 } 71 }
72 72
73 nouveau_gpuobj_ref(pushbuf, &chan->pushbuf); 73 nouveau_gpuobj_ref(pushbuf, &chan->pushbuf);
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index 22b86189b7bb..2cc59f8c658b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -113,7 +113,7 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
113 return -ENOMEM; 113 return -ENOMEM;
114 } 114 }
115 115
116 offset = chan->notifier_bo->bo.mem.mm_node->start << PAGE_SHIFT; 116 offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT;
117 if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM) { 117 if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM) {
118 target = NV_DMA_TARGET_VIDMEM; 118 target = NV_DMA_TARGET_VIDMEM;
119 } else 119 } else
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 7f028fee7a58..288bacac7e5a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -95,9 +95,9 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
95 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; 95 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
96 unsigned i, j, pte; 96 unsigned i, j, pte;
97 97
98 NV_DEBUG(dev, "pg=0x%lx\n", mem->mm_node->start); 98 NV_DEBUG(dev, "pg=0x%lx\n", mem->start);
99 99
100 pte = nouveau_sgdma_pte(nvbe->dev, mem->mm_node->start << PAGE_SHIFT); 100 pte = nouveau_sgdma_pte(nvbe->dev, mem->start << PAGE_SHIFT);
101 nvbe->pte_start = pte; 101 nvbe->pte_start = pte;
102 for (i = 0; i < nvbe->nr_pages; i++) { 102 for (i = 0; i < nvbe->nr_pages; i++) {
103 dma_addr_t dma_offset = nvbe->pages[i]; 103 dma_addr_t dma_offset = nvbe->pages[i];
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index 1686f8291b6d..3f2fb4ec63ab 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -104,8 +104,7 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
104 OUT_RING(evo, nv_crtc->lut.depth == 8 ? 104 OUT_RING(evo, nv_crtc->lut.depth == 8 ?
105 NV50_EVO_CRTC_CLUT_MODE_OFF : 105 NV50_EVO_CRTC_CLUT_MODE_OFF :
106 NV50_EVO_CRTC_CLUT_MODE_ON); 106 NV50_EVO_CRTC_CLUT_MODE_ON);
107 OUT_RING(evo, (nv_crtc->lut.nvbo->bo.mem.mm_node->start << 107 OUT_RING(evo, (nv_crtc->lut.nvbo->bo.mem.start << PAGE_SHIFT) >> 8);
108 PAGE_SHIFT) >> 8);
109 if (dev_priv->chipset != 0x50) { 108 if (dev_priv->chipset != 0x50) {
110 BEGIN_RING(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1); 109 BEGIN_RING(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
111 OUT_RING(evo, NvEvoVRAM); 110 OUT_RING(evo, NvEvoVRAM);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 11d366ad4036..55c9663ef2bf 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -345,7 +345,7 @@ nv50_display_init(struct drm_device *dev)
345 345
346 /* initialise fifo */ 346 /* initialise fifo */
347 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_DMA_CB(0), 347 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_DMA_CB(0),
348 ((evo->pushbuf_bo->bo.mem.mm_node->start << PAGE_SHIFT) >> 8) | 348 ((evo->pushbuf_bo->bo.mem.start << PAGE_SHIFT) >> 8) |
349 NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM | 349 NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM |
350 NV50_PDISPLAY_CHANNEL_DMA_CB_VALID); 350 NV50_PDISPLAY_CHANNEL_DMA_CB_VALID);
351 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK2(0), 0x00010000); 351 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK2(0), 0x00010000);
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index f5800f21a9dc..a53fc974332b 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -347,7 +347,7 @@ nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
347 return ret; 347 return ret;
348 } 348 }
349 349
350 gpuobj->vinst = gpuobj->im_backing->bo.mem.mm_node->start << PAGE_SHIFT; 350 gpuobj->vinst = gpuobj->im_backing->bo.mem.start << PAGE_SHIFT;
351 return 0; 351 return 0;
352} 352}
353 353
diff --git a/drivers/gpu/drm/nouveau/nvc0_instmem.c b/drivers/gpu/drm/nouveau/nvc0_instmem.c
index 6a41d644e044..13a0f78a9088 100644
--- a/drivers/gpu/drm/nouveau/nvc0_instmem.c
+++ b/drivers/gpu/drm/nouveau/nvc0_instmem.c
@@ -50,7 +50,7 @@ nvc0_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
50 return ret; 50 return ret;
51 } 51 }
52 52
53 gpuobj->vinst = gpuobj->im_backing->bo.mem.mm_node->start << PAGE_SHIFT; 53 gpuobj->vinst = gpuobj->im_backing->bo.mem.start << PAGE_SHIFT;
54 return 0; 54 return 0;
55} 55}
56 56
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 0afd1e62347d..c26106066ec2 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -435,7 +435,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo)
435 435
436out: 436out:
437 radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch, 437 radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
438 bo->tbo.mem.mm_node->start << PAGE_SHIFT, 438 bo->tbo.mem.start << PAGE_SHIFT,
439 bo->tbo.num_pages << PAGE_SHIFT); 439 bo->tbo.num_pages << PAGE_SHIFT);
440 return 0; 440 return 0;
441} 441}
@@ -532,7 +532,7 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
532 rdev = rbo->rdev; 532 rdev = rbo->rdev;
533 if (bo->mem.mem_type == TTM_PL_VRAM) { 533 if (bo->mem.mem_type == TTM_PL_VRAM) {
534 size = bo->mem.num_pages << PAGE_SHIFT; 534 size = bo->mem.num_pages << PAGE_SHIFT;
535 offset = bo->mem.mm_node->start << PAGE_SHIFT; 535 offset = bo->mem.start << PAGE_SHIFT;
536 if ((offset + size) > rdev->mc.visible_vram_size) { 536 if ((offset + size) > rdev->mc.visible_vram_size) {
537 /* hurrah the memory is not visible ! */ 537 /* hurrah the memory is not visible ! */
538 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); 538 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
@@ -540,7 +540,7 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
540 r = ttm_bo_validate(bo, &rbo->placement, false, true, false); 540 r = ttm_bo_validate(bo, &rbo->placement, false, true, false);
541 if (unlikely(r != 0)) 541 if (unlikely(r != 0))
542 return r; 542 return r;
543 offset = bo->mem.mm_node->start << PAGE_SHIFT; 543 offset = bo->mem.start << PAGE_SHIFT;
544 /* this should not happen */ 544 /* this should not happen */
545 if ((offset + size) > rdev->mc.visible_vram_size) 545 if ((offset + size) > rdev->mc.visible_vram_size)
546 return -EINVAL; 546 return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index cc19aba9bb74..0921910698d4 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -152,6 +152,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
152 man->default_caching = TTM_PL_FLAG_CACHED; 152 man->default_caching = TTM_PL_FLAG_CACHED;
153 break; 153 break;
154 case TTM_PL_TT: 154 case TTM_PL_TT:
155 man->func = &ttm_bo_manager_func;
155 man->gpu_offset = rdev->mc.gtt_start; 156 man->gpu_offset = rdev->mc.gtt_start;
156 man->available_caching = TTM_PL_MASK_CACHING; 157 man->available_caching = TTM_PL_MASK_CACHING;
157 man->default_caching = TTM_PL_FLAG_CACHED; 158 man->default_caching = TTM_PL_FLAG_CACHED;
@@ -173,6 +174,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
173 break; 174 break;
174 case TTM_PL_VRAM: 175 case TTM_PL_VRAM:
175 /* "On-card" video ram */ 176 /* "On-card" video ram */
177 man->func = &ttm_bo_manager_func;
176 man->gpu_offset = rdev->mc.vram_start; 178 man->gpu_offset = rdev->mc.vram_start;
177 man->flags = TTM_MEMTYPE_FLAG_FIXED | 179 man->flags = TTM_MEMTYPE_FLAG_FIXED |
178 TTM_MEMTYPE_FLAG_MAPPABLE; 180 TTM_MEMTYPE_FLAG_MAPPABLE;
@@ -246,8 +248,8 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
246 if (unlikely(r)) { 248 if (unlikely(r)) {
247 return r; 249 return r;
248 } 250 }
249 old_start = old_mem->mm_node->start << PAGE_SHIFT; 251 old_start = old_mem->start << PAGE_SHIFT;
250 new_start = new_mem->mm_node->start << PAGE_SHIFT; 252 new_start = new_mem->start << PAGE_SHIFT;
251 253
252 switch (old_mem->mem_type) { 254 switch (old_mem->mem_type) {
253 case TTM_PL_VRAM: 255 case TTM_PL_VRAM:
@@ -435,14 +437,14 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
435#if __OS_HAS_AGP 437#if __OS_HAS_AGP
436 if (rdev->flags & RADEON_IS_AGP) { 438 if (rdev->flags & RADEON_IS_AGP) {
437 /* RADEON_IS_AGP is set only if AGP is active */ 439 /* RADEON_IS_AGP is set only if AGP is active */
438 mem->bus.offset = mem->mm_node->start << PAGE_SHIFT; 440 mem->bus.offset = mem->start << PAGE_SHIFT;
439 mem->bus.base = rdev->mc.agp_base; 441 mem->bus.base = rdev->mc.agp_base;
440 mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture; 442 mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture;
441 } 443 }
442#endif 444#endif
443 break; 445 break;
444 case TTM_PL_VRAM: 446 case TTM_PL_VRAM:
445 mem->bus.offset = mem->mm_node->start << PAGE_SHIFT; 447 mem->bus.offset = mem->start << PAGE_SHIFT;
446 /* check if it's visible */ 448 /* check if it's visible */
447 if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size) 449 if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size)
448 return -EINVAL; 450 return -EINVAL;
@@ -685,7 +687,7 @@ static int radeon_ttm_backend_bind(struct ttm_backend *backend,
685 int r; 687 int r;
686 688
687 gtt = container_of(backend, struct radeon_ttm_backend, backend); 689 gtt = container_of(backend, struct radeon_ttm_backend, backend);
688 gtt->offset = bo_mem->mm_node->start << PAGE_SHIFT; 690 gtt->offset = bo_mem->start << PAGE_SHIFT;
689 if (!gtt->num_pages) { 691 if (!gtt->num_pages) {
690 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", gtt->num_pages, bo_mem, backend); 692 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", gtt->num_pages, bo_mem, backend);
691 } 693 }
@@ -784,9 +786,9 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
784 radeon_mem_types_list[i].show = &radeon_mm_dump_table; 786 radeon_mem_types_list[i].show = &radeon_mm_dump_table;
785 radeon_mem_types_list[i].driver_features = 0; 787 radeon_mem_types_list[i].driver_features = 0;
786 if (i == 0) 788 if (i == 0)
787 radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_VRAM].manager; 789 radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_VRAM].priv;
788 else 790 else
789 radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].manager; 791 radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].priv;
790 792
791 } 793 }
792 /* Add ttm page pool to debugfs */ 794 /* Add ttm page pool to debugfs */
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index b256d4adfafe..f3cf6f02c997 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -4,6 +4,7 @@
4ccflags-y := -Iinclude/drm 4ccflags-y := -Iinclude/drm
5ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \ 5ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
6 ttm_bo_util.o ttm_bo_vm.o ttm_module.o \ 6 ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
7 ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o 7 ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o \
8 ttm_bo_manager.o
8 9
9obj-$(CONFIG_DRM_TTM) += ttm.o 10obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index 4bf69c404491..f999e36f30b4 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -74,6 +74,7 @@ static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
74{ 74{
75 struct ttm_agp_backend *agp_be = 75 struct ttm_agp_backend *agp_be =
76 container_of(backend, struct ttm_agp_backend, backend); 76 container_of(backend, struct ttm_agp_backend, backend);
77 struct drm_mm_node *node = bo_mem->mm_node;
77 struct agp_memory *mem = agp_be->mem; 78 struct agp_memory *mem = agp_be->mem;
78 int cached = (bo_mem->placement & TTM_PL_FLAG_CACHED); 79 int cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
79 int ret; 80 int ret;
@@ -81,7 +82,7 @@ static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
81 mem->is_flushed = 1; 82 mem->is_flushed = 1;
82 mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY; 83 mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY;
83 84
84 ret = agp_bind_memory(mem, bo_mem->mm_node->start); 85 ret = agp_bind_memory(mem, node->start);
85 if (ret) 86 if (ret)
86 printk(KERN_ERR TTM_PFX "AGP Bind memory failed.\n"); 87 printk(KERN_ERR TTM_PFX "AGP Bind memory failed.\n");
87 88
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 80d37b460a8c..af7b57a47fbc 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -84,11 +84,8 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
84 man->available_caching); 84 man->available_caching);
85 printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n", 85 printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n",
86 man->default_caching); 86 man->default_caching);
87 if (mem_type != TTM_PL_SYSTEM) { 87 if (mem_type != TTM_PL_SYSTEM)
88 spin_lock(&bdev->glob->lru_lock); 88 (*man->func->debug)(man, TTM_PFX);
89 drm_mm_debug_table(&man->manager, TTM_PFX);
90 spin_unlock(&bdev->glob->lru_lock);
91 }
92} 89}
93 90
94static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, 91static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
@@ -421,7 +418,7 @@ moved:
421 418
422 if (bo->mem.mm_node) { 419 if (bo->mem.mm_node) {
423 spin_lock(&bo->lock); 420 spin_lock(&bo->lock);
424 bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) + 421 bo->offset = (bo->mem.start << PAGE_SHIFT) +
425 bdev->man[bo->mem.mem_type].gpu_offset; 422 bdev->man[bo->mem.mem_type].gpu_offset;
426 bo->cur_placement = bo->mem.placement; 423 bo->cur_placement = bo->mem.placement;
427 spin_unlock(&bo->lock); 424 spin_unlock(&bo->lock);
@@ -724,52 +721,12 @@ retry:
724 return ret; 721 return ret;
725} 722}
726 723
727static int ttm_bo_man_get_node(struct ttm_buffer_object *bo,
728 struct ttm_mem_type_manager *man,
729 struct ttm_placement *placement,
730 struct ttm_mem_reg *mem,
731 struct drm_mm_node **node)
732{
733 struct ttm_bo_global *glob = bo->glob;
734 unsigned long lpfn;
735 int ret;
736
737 lpfn = placement->lpfn;
738 if (!lpfn)
739 lpfn = man->size;
740 *node = NULL;
741 do {
742 ret = drm_mm_pre_get(&man->manager);
743 if (unlikely(ret))
744 return ret;
745
746 spin_lock(&glob->lru_lock);
747 *node = drm_mm_search_free_in_range(&man->manager,
748 mem->num_pages, mem->page_alignment,
749 placement->fpfn, lpfn, 1);
750 if (unlikely(*node == NULL)) {
751 spin_unlock(&glob->lru_lock);
752 return 0;
753 }
754 *node = drm_mm_get_block_atomic_range(*node, mem->num_pages,
755 mem->page_alignment,
756 placement->fpfn,
757 lpfn);
758 spin_unlock(&glob->lru_lock);
759 } while (*node == NULL);
760 return 0;
761}
762
763void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) 724void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
764{ 725{
765 struct ttm_bo_global *glob = bo->glob; 726 struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
766 727
767 if (mem->mm_node) { 728 if (mem->mm_node)
768 spin_lock(&glob->lru_lock); 729 (*man->func->put_node)(man, mem);
769 drm_mm_put_block(mem->mm_node);
770 spin_unlock(&glob->lru_lock);
771 mem->mm_node = NULL;
772 }
773} 730}
774EXPORT_SYMBOL(ttm_bo_mem_put); 731EXPORT_SYMBOL(ttm_bo_mem_put);
775 732
@@ -788,14 +745,13 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
788 struct ttm_bo_device *bdev = bo->bdev; 745 struct ttm_bo_device *bdev = bo->bdev;
789 struct ttm_bo_global *glob = bdev->glob; 746 struct ttm_bo_global *glob = bdev->glob;
790 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 747 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
791 struct drm_mm_node *node;
792 int ret; 748 int ret;
793 749
794 do { 750 do {
795 ret = ttm_bo_man_get_node(bo, man, placement, mem, &node); 751 ret = (*man->func->get_node)(man, bo, placement, mem);
796 if (unlikely(ret != 0)) 752 if (unlikely(ret != 0))
797 return ret; 753 return ret;
798 if (node) 754 if (mem->mm_node)
799 break; 755 break;
800 spin_lock(&glob->lru_lock); 756 spin_lock(&glob->lru_lock);
801 if (list_empty(&man->lru)) { 757 if (list_empty(&man->lru)) {
@@ -808,9 +764,8 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
808 if (unlikely(ret != 0)) 764 if (unlikely(ret != 0))
809 return ret; 765 return ret;
810 } while (1); 766 } while (1);
811 if (node == NULL) 767 if (mem->mm_node == NULL)
812 return -ENOMEM; 768 return -ENOMEM;
813 mem->mm_node = node;
814 mem->mem_type = mem_type; 769 mem->mem_type = mem_type;
815 return 0; 770 return 0;
816} 771}
@@ -884,7 +839,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
884 bool type_found = false; 839 bool type_found = false;
885 bool type_ok = false; 840 bool type_ok = false;
886 bool has_erestartsys = false; 841 bool has_erestartsys = false;
887 struct drm_mm_node *node = NULL;
888 int i, ret; 842 int i, ret;
889 843
890 mem->mm_node = NULL; 844 mem->mm_node = NULL;
@@ -918,17 +872,15 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
918 872
919 if (man->has_type && man->use_type) { 873 if (man->has_type && man->use_type) {
920 type_found = true; 874 type_found = true;
921 ret = ttm_bo_man_get_node(bo, man, placement, mem, 875 ret = (*man->func->get_node)(man, bo, placement, mem);
922 &node);
923 if (unlikely(ret)) 876 if (unlikely(ret))
924 return ret; 877 return ret;
925 } 878 }
926 if (node) 879 if (mem->mm_node)
927 break; 880 break;
928 } 881 }
929 882
930 if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) { 883 if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
931 mem->mm_node = node;
932 mem->mem_type = mem_type; 884 mem->mem_type = mem_type;
933 mem->placement = cur_flags; 885 mem->placement = cur_flags;
934 return 0; 886 return 0;
@@ -998,7 +950,6 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
998 bool interruptible, bool no_wait_reserve, 950 bool interruptible, bool no_wait_reserve,
999 bool no_wait_gpu) 951 bool no_wait_gpu)
1000{ 952{
1001 struct ttm_bo_global *glob = bo->glob;
1002 int ret = 0; 953 int ret = 0;
1003 struct ttm_mem_reg mem; 954 struct ttm_mem_reg mem;
1004 955
@@ -1026,11 +977,8 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1026 goto out_unlock; 977 goto out_unlock;
1027 ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu); 978 ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu);
1028out_unlock: 979out_unlock:
1029 if (ret && mem.mm_node) { 980 if (ret && mem.mm_node)
1030 spin_lock(&glob->lru_lock); 981 ttm_bo_mem_put(bo, &mem);
1031 drm_mm_put_block(mem.mm_node);
1032 spin_unlock(&glob->lru_lock);
1033 }
1034 return ret; 982 return ret;
1035} 983}
1036 984
@@ -1038,11 +986,10 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement,
1038 struct ttm_mem_reg *mem) 986 struct ttm_mem_reg *mem)
1039{ 987{
1040 int i; 988 int i;
1041 struct drm_mm_node *node = mem->mm_node;
1042 989
1043 if (node && placement->lpfn != 0 && 990 if (mem->mm_node && placement->lpfn != 0 &&
1044 (node->start < placement->fpfn || 991 (mem->start < placement->fpfn ||
1045 node->start + node->size > placement->lpfn)) 992 mem->start + mem->num_pages > placement->lpfn))
1046 return -1; 993 return -1;
1047 994
1048 for (i = 0; i < placement->num_placement; i++) { 995 for (i = 0; i < placement->num_placement; i++) {
@@ -1286,7 +1233,6 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1286 1233
1287int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) 1234int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1288{ 1235{
1289 struct ttm_bo_global *glob = bdev->glob;
1290 struct ttm_mem_type_manager *man; 1236 struct ttm_mem_type_manager *man;
1291 int ret = -EINVAL; 1237 int ret = -EINVAL;
1292 1238
@@ -1309,13 +1255,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1309 if (mem_type > 0) { 1255 if (mem_type > 0) {
1310 ttm_bo_force_list_clean(bdev, mem_type, false); 1256 ttm_bo_force_list_clean(bdev, mem_type, false);
1311 1257
1312 spin_lock(&glob->lru_lock); 1258 ret = (*man->func->takedown)(man);
1313 if (drm_mm_clean(&man->manager))
1314 drm_mm_takedown(&man->manager);
1315 else
1316 ret = -EBUSY;
1317
1318 spin_unlock(&glob->lru_lock);
1319 } 1259 }
1320 1260
1321 return ret; 1261 return ret;
@@ -1366,6 +1306,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1366 ret = bdev->driver->init_mem_type(bdev, type, man); 1306 ret = bdev->driver->init_mem_type(bdev, type, man);
1367 if (ret) 1307 if (ret)
1368 return ret; 1308 return ret;
1309 man->bdev = bdev;
1369 1310
1370 ret = 0; 1311 ret = 0;
1371 if (type != TTM_PL_SYSTEM) { 1312 if (type != TTM_PL_SYSTEM) {
@@ -1375,7 +1316,8 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1375 type); 1316 type);
1376 return ret; 1317 return ret;
1377 } 1318 }
1378 ret = drm_mm_init(&man->manager, 0, p_size); 1319
1320 ret = (*man->func->init)(man, p_size);
1379 if (ret) 1321 if (ret)
1380 return ret; 1322 return ret;
1381 } 1323 }
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
new file mode 100644
index 000000000000..7410c190c891
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -0,0 +1,148 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31#include "ttm/ttm_module.h"
32#include "ttm/ttm_bo_driver.h"
33#include "ttm/ttm_placement.h"
34#include <linux/jiffies.h>
35#include <linux/slab.h>
36#include <linux/sched.h>
37#include <linux/mm.h>
38#include <linux/file.h>
39#include <linux/module.h>
40
41static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
42 struct ttm_buffer_object *bo,
43 struct ttm_placement *placement,
44 struct ttm_mem_reg *mem)
45{
46 struct ttm_bo_global *glob = man->bdev->glob;
47 struct drm_mm *mm = man->priv;
48 struct drm_mm_node *node = NULL;
49 unsigned long lpfn;
50 int ret;
51
52 lpfn = placement->lpfn;
53 if (!lpfn)
54 lpfn = man->size;
55 do {
56 ret = drm_mm_pre_get(mm);
57 if (unlikely(ret))
58 return ret;
59
60 spin_lock(&glob->lru_lock);
61 node = drm_mm_search_free_in_range(mm,
62 mem->num_pages, mem->page_alignment,
63 placement->fpfn, lpfn, 1);
64 if (unlikely(node == NULL)) {
65 spin_unlock(&glob->lru_lock);
66 return 0;
67 }
68 node = drm_mm_get_block_atomic_range(node, mem->num_pages,
69 mem->page_alignment,
70 placement->fpfn,
71 lpfn);
72 spin_unlock(&glob->lru_lock);
73 } while (node == NULL);
74
75 mem->mm_node = node;
76 mem->start = node->start;
77 return 0;
78}
79
80static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
81 struct ttm_mem_reg *mem)
82{
83 struct ttm_bo_global *glob = man->bdev->glob;
84
85 if (mem->mm_node) {
86 spin_lock(&glob->lru_lock);
87 drm_mm_put_block(mem->mm_node);
88 spin_unlock(&glob->lru_lock);
89 mem->mm_node = NULL;
90 }
91}
92
93static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
94 unsigned long p_size)
95{
96 struct drm_mm *mm;
97 int ret;
98
99 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
100 if (!mm)
101 return -ENOMEM;
102
103 ret = drm_mm_init(mm, 0, p_size);
104 if (ret) {
105 kfree(mm);
106 return ret;
107 }
108
109 man->priv = mm;
110 return 0;
111}
112
113static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
114{
115 struct ttm_bo_global *glob = man->bdev->glob;
116 struct drm_mm *mm = man->priv;
117 int ret = 0;
118
119 spin_lock(&glob->lru_lock);
120 if (drm_mm_clean(mm)) {
121 drm_mm_takedown(mm);
122 kfree(mm);
123 man->priv = NULL;
124 } else
125 ret = -EBUSY;
126 spin_unlock(&glob->lru_lock);
127 return ret;
128}
129
130static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
131 const char *prefix)
132{
133 struct ttm_bo_global *glob = man->bdev->glob;
134 struct drm_mm *mm = man->priv;
135
136 spin_lock(&glob->lru_lock);
137 drm_mm_debug_table(mm, prefix);
138 spin_unlock(&glob->lru_lock);
139}
140
141const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
142 ttm_bo_man_init,
143 ttm_bo_man_takedown,
144 ttm_bo_man_get_node,
145 ttm_bo_man_put_node,
146 ttm_bo_man_debug
147};
148EXPORT_SYMBOL(ttm_bo_manager_func);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 0ebfe0d94931..c9d2d4d8d066 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -256,8 +256,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
256 dir = 1; 256 dir = 1;
257 257
258 if ((old_mem->mem_type == new_mem->mem_type) && 258 if ((old_mem->mem_type == new_mem->mem_type) &&
259 (new_mem->mm_node->start < 259 (new_mem->start < old_mem->start + old_mem->size)) {
260 old_mem->mm_node->start + old_mem->mm_node->size)) {
261 dir = -1; 260 dir = -1;
262 add = new_mem->num_pages - 1; 261 add = new_mem->num_pages - 1;
263 } 262 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index c4f5114aee7c..1b3bd8c6c67e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -147,6 +147,7 @@ int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
147 break; 147 break;
148 case TTM_PL_VRAM: 148 case TTM_PL_VRAM:
149 /* "On-card" video ram */ 149 /* "On-card" video ram */
150 man->func = &ttm_bo_manager_func;
150 man->gpu_offset = 0; 151 man->gpu_offset = 0;
151 man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE; 152 man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
152 man->available_caching = TTM_PL_MASK_CACHING; 153 man->available_caching = TTM_PL_MASK_CACHING;
@@ -203,7 +204,7 @@ static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg
203 /* System memory */ 204 /* System memory */
204 return 0; 205 return 0;
205 case TTM_PL_VRAM: 206 case TTM_PL_VRAM:
206 mem->bus.offset = mem->mm_node->start << PAGE_SHIFT; 207 mem->bus.offset = mem->start << PAGE_SHIFT;
207 mem->bus.base = dev_priv->vram_start; 208 mem->bus.base = dev_priv->vram_start;
208 mem->bus.is_iomem = true; 209 mem->bus.is_iomem = true;
209 break; 210 break;
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 267a86c74e2e..49b43c23636a 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -102,7 +102,8 @@ struct ttm_bus_placement {
102 */ 102 */
103 103
104struct ttm_mem_reg { 104struct ttm_mem_reg {
105 struct drm_mm_node *mm_node; 105 void *mm_node;
106 unsigned long start;
106 unsigned long size; 107 unsigned long size;
107 unsigned long num_pages; 108 unsigned long num_pages;
108 uint32_t page_alignment; 109 uint32_t page_alignment;
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 6c694d86e03d..e3371dbe6a10 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -203,7 +203,22 @@ struct ttm_tt {
203 * It's set up by the ttm_bo_driver::init_mem_type method. 203 * It's set up by the ttm_bo_driver::init_mem_type method.
204 */ 204 */
205 205
206struct ttm_mem_type_manager;
207
208struct ttm_mem_type_manager_func {
209 int (*init)(struct ttm_mem_type_manager *man, unsigned long p_size);
210 int (*takedown)(struct ttm_mem_type_manager *man);
211 int (*get_node)(struct ttm_mem_type_manager *man,
212 struct ttm_buffer_object *bo,
213 struct ttm_placement *placement,
214 struct ttm_mem_reg *mem);
215 void (*put_node)(struct ttm_mem_type_manager *man,
216 struct ttm_mem_reg *mem);
217 void (*debug)(struct ttm_mem_type_manager *man, const char *prefix);
218};
219
206struct ttm_mem_type_manager { 220struct ttm_mem_type_manager {
221 struct ttm_bo_device *bdev;
207 222
208 /* 223 /*
209 * No protection. Constant from start. 224 * No protection. Constant from start.
@@ -222,8 +237,8 @@ struct ttm_mem_type_manager {
222 * TODO: Consider one lru_lock per ttm_mem_type_manager. 237 * TODO: Consider one lru_lock per ttm_mem_type_manager.
223 * Plays ill with list removal, though. 238 * Plays ill with list removal, though.
224 */ 239 */
225 240 const struct ttm_mem_type_manager_func *func;
226 struct drm_mm manager; 241 void *priv;
227 struct list_head lru; 242 struct list_head lru;
228}; 243};
229 244
@@ -895,6 +910,8 @@ extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
895 */ 910 */
896extern pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp); 911extern pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp);
897 912
913extern const struct ttm_mem_type_manager_func ttm_bo_manager_func;
914
898#if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE))) 915#if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
899#define TTM_HAS_AGP 916#define TTM_HAS_AGP
900#include <linux/agp_backend.h> 917#include <linux/agp_backend.h>