diff options
author | Ben Skeggs <bskeggs@redhat.com> | 2010-08-04 20:48:18 -0400 |
---|---|---|
committer | Ben Skeggs <bskeggs@redhat.com> | 2010-10-04 20:01:20 -0400 |
commit | d961db75ce86a84f1f04e91ad1014653ed7d9f46 (patch) | |
tree | a827b77524fdc0c37da70936fbb0627ac7e4b492 /drivers/gpu/drm/radeon/radeon_ttm.c | |
parent | 42311ff90dc8746bd81427b2ed6efda9af791b77 (diff) |
drm/ttm: restructure to allow driver to plug in alternate memory manager
Nouveau will need this on GeForce 8 and up to account for the GPU
reordering physical VRAM for some memory types.
Reviewed-by: Jerome Glisse <jglisse@redhat.com>
Acked-by: Thomas Hellström <thellstrom@vmware.com>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_ttm.c')
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_ttm.c | 16 |
1 files changed, 9 insertions, 7 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index cc19aba9bb74..0921910698d4 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c | |||
@@ -152,6 +152,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |||
152 | man->default_caching = TTM_PL_FLAG_CACHED; | 152 | man->default_caching = TTM_PL_FLAG_CACHED; |
153 | break; | 153 | break; |
154 | case TTM_PL_TT: | 154 | case TTM_PL_TT: |
155 | man->func = &ttm_bo_manager_func; | ||
155 | man->gpu_offset = rdev->mc.gtt_start; | 156 | man->gpu_offset = rdev->mc.gtt_start; |
156 | man->available_caching = TTM_PL_MASK_CACHING; | 157 | man->available_caching = TTM_PL_MASK_CACHING; |
157 | man->default_caching = TTM_PL_FLAG_CACHED; | 158 | man->default_caching = TTM_PL_FLAG_CACHED; |
@@ -173,6 +174,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |||
173 | break; | 174 | break; |
174 | case TTM_PL_VRAM: | 175 | case TTM_PL_VRAM: |
175 | /* "On-card" video ram */ | 176 | /* "On-card" video ram */ |
177 | man->func = &ttm_bo_manager_func; | ||
176 | man->gpu_offset = rdev->mc.vram_start; | 178 | man->gpu_offset = rdev->mc.vram_start; |
177 | man->flags = TTM_MEMTYPE_FLAG_FIXED | | 179 | man->flags = TTM_MEMTYPE_FLAG_FIXED | |
178 | TTM_MEMTYPE_FLAG_MAPPABLE; | 180 | TTM_MEMTYPE_FLAG_MAPPABLE; |
@@ -246,8 +248,8 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, | |||
246 | if (unlikely(r)) { | 248 | if (unlikely(r)) { |
247 | return r; | 249 | return r; |
248 | } | 250 | } |
249 | old_start = old_mem->mm_node->start << PAGE_SHIFT; | 251 | old_start = old_mem->start << PAGE_SHIFT; |
250 | new_start = new_mem->mm_node->start << PAGE_SHIFT; | 252 | new_start = new_mem->start << PAGE_SHIFT; |
251 | 253 | ||
252 | switch (old_mem->mem_type) { | 254 | switch (old_mem->mem_type) { |
253 | case TTM_PL_VRAM: | 255 | case TTM_PL_VRAM: |
@@ -435,14 +437,14 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_ | |||
435 | #if __OS_HAS_AGP | 437 | #if __OS_HAS_AGP |
436 | if (rdev->flags & RADEON_IS_AGP) { | 438 | if (rdev->flags & RADEON_IS_AGP) { |
437 | /* RADEON_IS_AGP is set only if AGP is active */ | 439 | /* RADEON_IS_AGP is set only if AGP is active */ |
438 | mem->bus.offset = mem->mm_node->start << PAGE_SHIFT; | 440 | mem->bus.offset = mem->start << PAGE_SHIFT; |
439 | mem->bus.base = rdev->mc.agp_base; | 441 | mem->bus.base = rdev->mc.agp_base; |
440 | mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture; | 442 | mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture; |
441 | } | 443 | } |
442 | #endif | 444 | #endif |
443 | break; | 445 | break; |
444 | case TTM_PL_VRAM: | 446 | case TTM_PL_VRAM: |
445 | mem->bus.offset = mem->mm_node->start << PAGE_SHIFT; | 447 | mem->bus.offset = mem->start << PAGE_SHIFT; |
446 | /* check if it's visible */ | 448 | /* check if it's visible */ |
447 | if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size) | 449 | if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size) |
448 | return -EINVAL; | 450 | return -EINVAL; |
@@ -685,7 +687,7 @@ static int radeon_ttm_backend_bind(struct ttm_backend *backend, | |||
685 | int r; | 687 | int r; |
686 | 688 | ||
687 | gtt = container_of(backend, struct radeon_ttm_backend, backend); | 689 | gtt = container_of(backend, struct radeon_ttm_backend, backend); |
688 | gtt->offset = bo_mem->mm_node->start << PAGE_SHIFT; | 690 | gtt->offset = bo_mem->start << PAGE_SHIFT; |
689 | if (!gtt->num_pages) { | 691 | if (!gtt->num_pages) { |
690 | WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", gtt->num_pages, bo_mem, backend); | 692 | WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", gtt->num_pages, bo_mem, backend); |
691 | } | 693 | } |
@@ -784,9 +786,9 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev) | |||
784 | radeon_mem_types_list[i].show = &radeon_mm_dump_table; | 786 | radeon_mem_types_list[i].show = &radeon_mm_dump_table; |
785 | radeon_mem_types_list[i].driver_features = 0; | 787 | radeon_mem_types_list[i].driver_features = 0; |
786 | if (i == 0) | 788 | if (i == 0) |
787 | radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_VRAM].manager; | 789 | radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_VRAM].priv; |
788 | else | 790 | else |
789 | radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].manager; | 791 | radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].priv; |
790 | 792 | ||
791 | } | 793 | } |
792 | /* Add ttm page pool to debugfs */ | 794 | /* Add ttm page pool to debugfs */ |