aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorJerome Glisse <jglisse@redhat.com>2011-11-02 23:59:28 -0400
committerDave Airlie <airlied@redhat.com>2011-12-06 05:39:24 -0500
commitb1e5f172325547270f35e7d1e42416a606e1dbd2 (patch)
tree03fc21fd5f74add89441308008b45987d09cfbc6 /drivers
parent649bf3ca77343e3be1e0af8e21356fa569b1abd9 (diff)
drm/ttm: introduce callback for ttm_tt populate & unpopulate V4
Move the page allocation and freeing to driver callback and provide ttm code helper function for those. Most intrusive change, is the fact that we now only fully populate an object this simplify some of code designed around the page fault design. V2 Rebase on top of memory accounting overhaul V3 New rebase on top of more memory accouting changes V4 Rebase on top of no memory account changes (where/when is my delorean when i need it ?) Signed-off-by: Jerome Glisse <jglisse@redhat.com> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Reviewed-by: Thomas Hellstrom <thellstrom@vmware.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c31
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c9
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c57
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c91
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c3
7 files changed, 94 insertions, 102 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index b060fa48135c..f19ac42578bb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -28,6 +28,7 @@
28 */ 28 */
29 29
30#include "drmP.h" 30#include "drmP.h"
31#include "ttm/ttm_page_alloc.h"
31 32
32#include "nouveau_drm.h" 33#include "nouveau_drm.h"
33#include "nouveau_drv.h" 34#include "nouveau_drv.h"
@@ -1050,6 +1051,8 @@ nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
1050 1051
1051struct ttm_bo_driver nouveau_bo_driver = { 1052struct ttm_bo_driver nouveau_bo_driver = {
1052 .ttm_tt_create = &nouveau_ttm_tt_create, 1053 .ttm_tt_create = &nouveau_ttm_tt_create,
1054 .ttm_tt_populate = &ttm_pool_populate,
1055 .ttm_tt_unpopulate = &ttm_pool_unpopulate,
1053 .invalidate_caches = nouveau_bo_invalidate_caches, 1056 .invalidate_caches = nouveau_bo_invalidate_caches,
1054 .init_mem_type = nouveau_bo_init_mem_type, 1057 .init_mem_type = nouveau_bo_init_mem_type,
1055 .evict_flags = nouveau_bo_evict_flags, 1058 .evict_flags = nouveau_bo_evict_flags,
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index af4d5f258afb..b1768cb971c6 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -581,6 +581,8 @@ struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev,
581 581
582static struct ttm_bo_driver radeon_bo_driver = { 582static struct ttm_bo_driver radeon_bo_driver = {
583 .ttm_tt_create = &radeon_ttm_tt_create, 583 .ttm_tt_create = &radeon_ttm_tt_create,
584 .ttm_tt_populate = &ttm_pool_populate,
585 .ttm_tt_unpopulate = &ttm_pool_unpopulate,
584 .invalidate_caches = &radeon_invalidate_caches, 586 .invalidate_caches = &radeon_invalidate_caches,
585 .init_mem_type = &radeon_init_mem_type, 587 .init_mem_type = &radeon_init_mem_type,
586 .evict_flags = &radeon_evict_flags, 588 .evict_flags = &radeon_evict_flags,
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 082fcaea583f..60f204d67dbb 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -244,7 +244,7 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
244 unsigned long page, 244 unsigned long page,
245 pgprot_t prot) 245 pgprot_t prot)
246{ 246{
247 struct page *d = ttm_tt_get_page(ttm, page); 247 struct page *d = ttm->pages[page];
248 void *dst; 248 void *dst;
249 249
250 if (!d) 250 if (!d)
@@ -281,7 +281,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
281 unsigned long page, 281 unsigned long page,
282 pgprot_t prot) 282 pgprot_t prot)
283{ 283{
284 struct page *s = ttm_tt_get_page(ttm, page); 284 struct page *s = ttm->pages[page];
285 void *src; 285 void *src;
286 286
287 if (!s) 287 if (!s)
@@ -342,6 +342,12 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
342 if (old_iomap == NULL && ttm == NULL) 342 if (old_iomap == NULL && ttm == NULL)
343 goto out2; 343 goto out2;
344 344
345 if (ttm->state == tt_unpopulated) {
346 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
347 if (ret)
348 goto out1;
349 }
350
345 add = 0; 351 add = 0;
346 dir = 1; 352 dir = 1;
347 353
@@ -502,10 +508,16 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
502{ 508{
503 struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot; 509 struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
504 struct ttm_tt *ttm = bo->ttm; 510 struct ttm_tt *ttm = bo->ttm;
505 struct page *d; 511 int ret;
506 int i;
507 512
508 BUG_ON(!ttm); 513 BUG_ON(!ttm);
514
515 if (ttm->state == tt_unpopulated) {
516 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
517 if (ret)
518 return ret;
519 }
520
509 if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) { 521 if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
510 /* 522 /*
511 * We're mapping a single page, and the desired 523 * We're mapping a single page, and the desired
@@ -513,18 +525,9 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
513 */ 525 */
514 526
515 map->bo_kmap_type = ttm_bo_map_kmap; 527 map->bo_kmap_type = ttm_bo_map_kmap;
516 map->page = ttm_tt_get_page(ttm, start_page); 528 map->page = ttm->pages[start_page];
517 map->virtual = kmap(map->page); 529 map->virtual = kmap(map->page);
518 } else { 530 } else {
519 /*
520 * Populate the part we're mapping;
521 */
522 for (i = start_page; i < start_page + num_pages; ++i) {
523 d = ttm_tt_get_page(ttm, i);
524 if (!d)
525 return -ENOMEM;
526 }
527
528 /* 531 /*
529 * We need to use vmap to get the desired page protection 532 * We need to use vmap to get the desired page protection
530 * or to make the buffer object look contiguous. 533 * or to make the buffer object look contiguous.
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 221b924acebe..54412848de88 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -174,18 +174,23 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
174 vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ? 174 vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
175 vm_get_page_prot(vma->vm_flags) : 175 vm_get_page_prot(vma->vm_flags) :
176 ttm_io_prot(bo->mem.placement, vma->vm_page_prot); 176 ttm_io_prot(bo->mem.placement, vma->vm_page_prot);
177
178 /* Allocate all page at once, most common usage */
179 if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
180 retval = VM_FAULT_OOM;
181 goto out_io_unlock;
182 }
177 } 183 }
178 184
179 /* 185 /*
180 * Speculatively prefault a number of pages. Only error on 186 * Speculatively prefault a number of pages. Only error on
181 * first page. 187 * first page.
182 */ 188 */
183
184 for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) { 189 for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
185 if (bo->mem.bus.is_iomem) 190 if (bo->mem.bus.is_iomem)
186 pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset; 191 pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
187 else { 192 else {
188 page = ttm_tt_get_page(ttm, page_offset); 193 page = ttm->pages[page_offset];
189 if (unlikely(!page && i == 0)) { 194 if (unlikely(!page && i == 0)) {
190 retval = VM_FAULT_OOM; 195 retval = VM_FAULT_OOM;
191 goto out_io_unlock; 196 goto out_io_unlock;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 0f3e6d2395b3..8d6267e434ab 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -855,6 +855,63 @@ void ttm_page_alloc_fini(void)
855 _manager = NULL; 855 _manager = NULL;
856} 856}
857 857
858int ttm_pool_populate(struct ttm_tt *ttm)
859{
860 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
861 unsigned i;
862 int ret;
863
864 if (ttm->state != tt_unpopulated)
865 return 0;
866
867 for (i = 0; i < ttm->num_pages; ++i) {
868 ret = ttm_get_pages(&ttm->pages[i], ttm->page_flags,
869 ttm->caching_state, 1,
870 &ttm->dma_address[i]);
871 if (ret != 0) {
872 ttm_pool_unpopulate(ttm);
873 return -ENOMEM;
874 }
875
876 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
877 false, false);
878 if (unlikely(ret != 0)) {
879 ttm_pool_unpopulate(ttm);
880 return -ENOMEM;
881 }
882 }
883
884 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
885 ret = ttm_tt_swapin(ttm);
886 if (unlikely(ret != 0)) {
887 ttm_pool_unpopulate(ttm);
888 return ret;
889 }
890 }
891
892 ttm->state = tt_unbound;
893 return 0;
894}
895EXPORT_SYMBOL(ttm_pool_populate);
896
897void ttm_pool_unpopulate(struct ttm_tt *ttm)
898{
899 unsigned i;
900
901 for (i = 0; i < ttm->num_pages; ++i) {
902 if (ttm->pages[i]) {
903 ttm_mem_global_free_page(ttm->glob->mem_glob,
904 ttm->pages[i]);
905 ttm_put_pages(&ttm->pages[i], 1,
906 ttm->page_flags,
907 ttm->caching_state,
908 ttm->dma_address);
909 }
910 }
911 ttm->state = tt_unpopulated;
912}
913EXPORT_SYMBOL(ttm_pool_unpopulate);
914
858int ttm_page_alloc_debugfs(struct seq_file *m, void *data) 915int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
859{ 916{
860 struct ttm_page_pool *p; 917 struct ttm_page_pool *p;
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index fbc90dce1de8..77f0e6f79f30 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -43,8 +43,6 @@
43#include "ttm/ttm_placement.h" 43#include "ttm/ttm_placement.h"
44#include "ttm/ttm_page_alloc.h" 44#include "ttm/ttm_page_alloc.h"
45 45
46static int ttm_tt_swapin(struct ttm_tt *ttm);
47
48/** 46/**
49 * Allocates storage for pointers to the pages that back the ttm. 47 * Allocates storage for pointers to the pages that back the ttm.
50 */ 48 */
@@ -63,69 +61,6 @@ static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
63 ttm->dma_address = NULL; 61 ttm->dma_address = NULL;
64} 62}
65 63
66static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
67{
68 struct page *p;
69 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
70 int ret;
71
72 if (NULL == (p = ttm->pages[index])) {
73
74 ret = ttm_get_pages(&p, ttm->page_flags, ttm->caching_state, 1,
75 &ttm->dma_address[index]);
76 if (ret != 0)
77 return NULL;
78
79 ret = ttm_mem_global_alloc_page(mem_glob, p, false, false);
80 if (unlikely(ret != 0))
81 goto out_err;
82
83 ttm->pages[index] = p;
84 }
85 return p;
86out_err:
87 ttm_put_pages(&p, 1, ttm->page_flags,
88 ttm->caching_state, &ttm->dma_address[index]);
89 return NULL;
90}
91
92struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index)
93{
94 int ret;
95
96 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
97 ret = ttm_tt_swapin(ttm);
98 if (unlikely(ret != 0))
99 return NULL;
100 }
101 return __ttm_tt_get_page(ttm, index);
102}
103
104int ttm_tt_populate(struct ttm_tt *ttm)
105{
106 struct page *page;
107 unsigned long i;
108 int ret;
109
110 if (ttm->state != tt_unpopulated)
111 return 0;
112
113 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
114 ret = ttm_tt_swapin(ttm);
115 if (unlikely(ret != 0))
116 return ret;
117 }
118
119 for (i = 0; i < ttm->num_pages; ++i) {
120 page = __ttm_tt_get_page(ttm, i);
121 if (!page)
122 return -ENOMEM;
123 }
124 ttm->state = tt_unbound;
125 return 0;
126}
127EXPORT_SYMBOL(ttm_tt_populate);
128
129#ifdef CONFIG_X86 64#ifdef CONFIG_X86
130static inline int ttm_tt_set_page_caching(struct page *p, 65static inline int ttm_tt_set_page_caching(struct page *p,
131 enum ttm_caching_state c_old, 66 enum ttm_caching_state c_old,
@@ -227,21 +162,6 @@ int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
227} 162}
228EXPORT_SYMBOL(ttm_tt_set_placement_caching); 163EXPORT_SYMBOL(ttm_tt_set_placement_caching);
229 164
230static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
231{
232 unsigned i;
233
234 for (i = 0; i < ttm->num_pages; ++i) {
235 if (ttm->pages[i]) {
236 ttm_mem_global_free_page(ttm->glob->mem_glob,
237 ttm->pages[i]);
238 ttm_put_pages(&ttm->pages[i], 1, ttm->page_flags,
239 ttm->caching_state, &ttm->dma_address[i]);
240 }
241 }
242 ttm->state = tt_unpopulated;
243}
244
245void ttm_tt_destroy(struct ttm_tt *ttm) 165void ttm_tt_destroy(struct ttm_tt *ttm)
246{ 166{
247 if (unlikely(ttm == NULL)) 167 if (unlikely(ttm == NULL))
@@ -252,7 +172,7 @@ void ttm_tt_destroy(struct ttm_tt *ttm)
252 } 172 }
253 173
254 if (likely(ttm->pages != NULL)) { 174 if (likely(ttm->pages != NULL)) {
255 ttm_tt_free_alloced_pages(ttm); 175 ttm->bdev->driver->ttm_tt_unpopulate(ttm);
256 ttm_tt_free_page_directory(ttm); 176 ttm_tt_free_page_directory(ttm);
257 } 177 }
258 178
@@ -307,7 +227,7 @@ int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
307 if (ttm->state == tt_bound) 227 if (ttm->state == tt_bound)
308 return 0; 228 return 0;
309 229
310 ret = ttm_tt_populate(ttm); 230 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
311 if (ret) 231 if (ret)
312 return ret; 232 return ret;
313 233
@@ -321,7 +241,7 @@ int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
321} 241}
322EXPORT_SYMBOL(ttm_tt_bind); 242EXPORT_SYMBOL(ttm_tt_bind);
323 243
324static int ttm_tt_swapin(struct ttm_tt *ttm) 244int ttm_tt_swapin(struct ttm_tt *ttm)
325{ 245{
326 struct address_space *swap_space; 246 struct address_space *swap_space;
327 struct file *swap_storage; 247 struct file *swap_storage;
@@ -343,7 +263,7 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
343 ret = PTR_ERR(from_page); 263 ret = PTR_ERR(from_page);
344 goto out_err; 264 goto out_err;
345 } 265 }
346 to_page = __ttm_tt_get_page(ttm, i); 266 to_page = ttm->pages[i];
347 if (unlikely(to_page == NULL)) 267 if (unlikely(to_page == NULL))
348 goto out_err; 268 goto out_err;
349 269
@@ -364,7 +284,6 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
364 284
365 return 0; 285 return 0;
366out_err: 286out_err:
367 ttm_tt_free_alloced_pages(ttm);
368 return ret; 287 return ret;
369} 288}
370 289
@@ -416,7 +335,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
416 page_cache_release(to_page); 335 page_cache_release(to_page);
417 } 336 }
418 337
419 ttm_tt_free_alloced_pages(ttm); 338 ttm->bdev->driver->ttm_tt_unpopulate(ttm);
420 ttm->swap_storage = swap_storage; 339 ttm->swap_storage = swap_storage;
421 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED; 340 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
422 if (persistent_swap_storage) 341 if (persistent_swap_storage)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index cc7243592425..3986d7468232 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -28,6 +28,7 @@
28#include "vmwgfx_drv.h" 28#include "vmwgfx_drv.h"
29#include "ttm/ttm_bo_driver.h" 29#include "ttm/ttm_bo_driver.h"
30#include "ttm/ttm_placement.h" 30#include "ttm/ttm_placement.h"
31#include "ttm/ttm_page_alloc.h"
31 32
32static uint32_t vram_placement_flags = TTM_PL_FLAG_VRAM | 33static uint32_t vram_placement_flags = TTM_PL_FLAG_VRAM |
33 TTM_PL_FLAG_CACHED; 34 TTM_PL_FLAG_CACHED;
@@ -334,6 +335,8 @@ static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg,
334 335
335struct ttm_bo_driver vmw_bo_driver = { 336struct ttm_bo_driver vmw_bo_driver = {
336 .ttm_tt_create = &vmw_ttm_tt_create, 337 .ttm_tt_create = &vmw_ttm_tt_create,
338 .ttm_tt_populate = &ttm_pool_populate,
339 .ttm_tt_unpopulate = &ttm_pool_unpopulate,
337 .invalidate_caches = vmw_invalidate_caches, 340 .invalidate_caches = vmw_invalidate_caches,
338 .init_mem_type = vmw_init_mem_type, 341 .init_mem_type = vmw_init_mem_type,
339 .evict_flags = vmw_evict_flags, 342 .evict_flags = vmw_evict_flags,