aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Hellstrom <thellstrom@vmware.com>2009-08-18 10:51:56 -0400
committerDave Airlie <airlied@linux.ie>2009-08-19 02:10:34 -0400
commita987fcaa805fcb24ba885c2e29fd4fdb6816f08f (patch)
tree561b6dd8e002e2eb1a75132b1edbd303782dc2fb
parent5fd9cbad3a4ae82c83c55b9c621d156c326724ef (diff)
ttm: Make parts of a struct ttm_bo_device global.
Common resources, like memory accounting and swap lists should be global and not per device. Introduce a struct ttm_bo_global to accomodate this, and register it with sysfs. Add a small sysfs interface to return the number of active buffer objects. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Signed-off-by: Dave Airlie <airlied@linux.ie>
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c33
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c292
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c12
-rw-r--r--include/drm/ttm/ttm_bo_api.h1
-rw-r--r--include/drm/ttm/ttm_bo_driver.h94
7 files changed, 296 insertions, 141 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 473e4775dc5a..10e8af6bb456 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -37,6 +37,7 @@
37 * TTM. 37 * TTM.
38 */ 38 */
39struct radeon_mman { 39struct radeon_mman {
40 struct ttm_bo_global_ref bo_global_ref;
40 struct ttm_global_reference mem_global_ref; 41 struct ttm_global_reference mem_global_ref;
41 bool mem_global_referenced; 42 bool mem_global_referenced;
42 struct ttm_bo_device bdev; 43 struct ttm_bo_device bdev;
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 1227a97f5169..343b6d6b99c6 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -77,9 +77,25 @@ static int radeon_ttm_global_init(struct radeon_device *rdev)
77 global_ref->release = &radeon_ttm_mem_global_release; 77 global_ref->release = &radeon_ttm_mem_global_release;
78 r = ttm_global_item_ref(global_ref); 78 r = ttm_global_item_ref(global_ref);
79 if (r != 0) { 79 if (r != 0) {
80 DRM_ERROR("Failed referencing a global TTM memory object.\n"); 80 DRM_ERROR("Failed setting up TTM memory accounting "
81 "subsystem.\n");
81 return r; 82 return r;
82 } 83 }
84
85 rdev->mman.bo_global_ref.mem_glob =
86 rdev->mman.mem_global_ref.object;
87 global_ref = &rdev->mman.bo_global_ref.ref;
88 global_ref->global_type = TTM_GLOBAL_TTM_BO;
89 global_ref->size = sizeof(struct ttm_mem_global);
90 global_ref->init = &ttm_bo_global_init;
91 global_ref->release = &ttm_bo_global_release;
92 r = ttm_global_item_ref(global_ref);
93 if (r != 0) {
94 DRM_ERROR("Failed setting up TTM BO subsystem.\n");
95 ttm_global_item_unref(&rdev->mman.mem_global_ref);
96 return r;
97 }
98
83 rdev->mman.mem_global_referenced = true; 99 rdev->mman.mem_global_referenced = true;
84 return 0; 100 return 0;
85} 101}
@@ -87,6 +103,7 @@ static int radeon_ttm_global_init(struct radeon_device *rdev)
87static void radeon_ttm_global_fini(struct radeon_device *rdev) 103static void radeon_ttm_global_fini(struct radeon_device *rdev)
88{ 104{
89 if (rdev->mman.mem_global_referenced) { 105 if (rdev->mman.mem_global_referenced) {
106 ttm_global_item_unref(&rdev->mman.bo_global_ref.ref);
90 ttm_global_item_unref(&rdev->mman.mem_global_ref); 107 ttm_global_item_unref(&rdev->mman.mem_global_ref);
91 rdev->mman.mem_global_referenced = false; 108 rdev->mman.mem_global_referenced = false;
92 } 109 }
@@ -286,9 +303,11 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
286 r = ttm_bo_move_ttm(bo, true, no_wait, new_mem); 303 r = ttm_bo_move_ttm(bo, true, no_wait, new_mem);
287out_cleanup: 304out_cleanup:
288 if (tmp_mem.mm_node) { 305 if (tmp_mem.mm_node) {
289 spin_lock(&rdev->mman.bdev.lru_lock); 306 struct ttm_bo_global *glob = rdev->mman.bdev.glob;
307
308 spin_lock(&glob->lru_lock);
290 drm_mm_put_block(tmp_mem.mm_node); 309 drm_mm_put_block(tmp_mem.mm_node);
291 spin_unlock(&rdev->mman.bdev.lru_lock); 310 spin_unlock(&glob->lru_lock);
292 return r; 311 return r;
293 } 312 }
294 return r; 313 return r;
@@ -323,9 +342,11 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
323 } 342 }
324out_cleanup: 343out_cleanup:
325 if (tmp_mem.mm_node) { 344 if (tmp_mem.mm_node) {
326 spin_lock(&rdev->mman.bdev.lru_lock); 345 struct ttm_bo_global *glob = rdev->mman.bdev.glob;
346
347 spin_lock(&glob->lru_lock);
327 drm_mm_put_block(tmp_mem.mm_node); 348 drm_mm_put_block(tmp_mem.mm_node);
328 spin_unlock(&rdev->mman.bdev.lru_lock); 349 spin_unlock(&glob->lru_lock);
329 return r; 350 return r;
330 } 351 }
331 return r; 352 return r;
@@ -441,7 +462,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
441 } 462 }
442 /* No others user of address space so set it to 0 */ 463 /* No others user of address space so set it to 0 */
443 r = ttm_bo_device_init(&rdev->mman.bdev, 464 r = ttm_bo_device_init(&rdev->mman.bdev,
444 rdev->mman.mem_global_ref.object, 465 rdev->mman.bo_global_ref.ref.object,
445 &radeon_bo_driver, DRM_FILE_PAGE_OFFSET); 466 &radeon_bo_driver, DRM_FILE_PAGE_OFFSET);
446 if (r) { 467 if (r) {
447 DRM_ERROR("failed initializing buffer object driver(%d).\n", r); 468 DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index f16909ceec93..0d0b1b7afbcf 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -45,6 +45,39 @@
45static int ttm_bo_setup_vm(struct ttm_buffer_object *bo); 45static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
46static void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); 46static void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
47static int ttm_bo_swapout(struct ttm_mem_shrink *shrink); 47static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
48static void ttm_bo_global_kobj_release(struct kobject *kobj);
49
50static struct attribute ttm_bo_count = {
51 .name = "bo_count",
52 .mode = S_IRUGO
53};
54
55static ssize_t ttm_bo_global_show(struct kobject *kobj,
56 struct attribute *attr,
57 char *buffer)
58{
59 struct ttm_bo_global *glob =
60 container_of(kobj, struct ttm_bo_global, kobj);
61
62 return snprintf(buffer, PAGE_SIZE, "%lu\n",
63 (unsigned long) atomic_read(&glob->bo_count));
64}
65
66static struct attribute *ttm_bo_global_attrs[] = {
67 &ttm_bo_count,
68 NULL
69};
70
71static struct sysfs_ops ttm_bo_global_ops = {
72 .show = &ttm_bo_global_show
73};
74
75static struct kobj_type ttm_bo_glob_kobj_type = {
76 .release = &ttm_bo_global_kobj_release,
77 .sysfs_ops = &ttm_bo_global_ops,
78 .default_attrs = ttm_bo_global_attrs
79};
80
48 81
49static inline uint32_t ttm_bo_type_flags(unsigned type) 82static inline uint32_t ttm_bo_type_flags(unsigned type)
50{ 83{
@@ -67,10 +100,11 @@ static void ttm_bo_release_list(struct kref *list_kref)
67 100
68 if (bo->ttm) 101 if (bo->ttm)
69 ttm_tt_destroy(bo->ttm); 102 ttm_tt_destroy(bo->ttm);
103 atomic_dec(&bo->glob->bo_count);
70 if (bo->destroy) 104 if (bo->destroy)
71 bo->destroy(bo); 105 bo->destroy(bo);
72 else { 106 else {
73 ttm_mem_global_free(bdev->mem_glob, bo->acc_size); 107 ttm_mem_global_free(bdev->glob->mem_glob, bo->acc_size);
74 kfree(bo); 108 kfree(bo);
75 } 109 }
76} 110}
@@ -107,7 +141,7 @@ static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
107 kref_get(&bo->list_kref); 141 kref_get(&bo->list_kref);
108 142
109 if (bo->ttm != NULL) { 143 if (bo->ttm != NULL) {
110 list_add_tail(&bo->swap, &bdev->swap_lru); 144 list_add_tail(&bo->swap, &bo->glob->swap_lru);
111 kref_get(&bo->list_kref); 145 kref_get(&bo->list_kref);
112 } 146 }
113 } 147 }
@@ -142,7 +176,7 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
142 bool interruptible, 176 bool interruptible,
143 bool no_wait, bool use_sequence, uint32_t sequence) 177 bool no_wait, bool use_sequence, uint32_t sequence)
144{ 178{
145 struct ttm_bo_device *bdev = bo->bdev; 179 struct ttm_bo_global *glob = bo->glob;
146 int ret; 180 int ret;
147 181
148 while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) { 182 while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
@@ -154,9 +188,9 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
154 if (no_wait) 188 if (no_wait)
155 return -EBUSY; 189 return -EBUSY;
156 190
157 spin_unlock(&bdev->lru_lock); 191 spin_unlock(&glob->lru_lock);
158 ret = ttm_bo_wait_unreserved(bo, interruptible); 192 ret = ttm_bo_wait_unreserved(bo, interruptible);
159 spin_lock(&bdev->lru_lock); 193 spin_lock(&glob->lru_lock);
160 194
161 if (unlikely(ret)) 195 if (unlikely(ret))
162 return ret; 196 return ret;
@@ -182,16 +216,16 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo,
182 bool interruptible, 216 bool interruptible,
183 bool no_wait, bool use_sequence, uint32_t sequence) 217 bool no_wait, bool use_sequence, uint32_t sequence)
184{ 218{
185 struct ttm_bo_device *bdev = bo->bdev; 219 struct ttm_bo_global *glob = bo->glob;
186 int put_count = 0; 220 int put_count = 0;
187 int ret; 221 int ret;
188 222
189 spin_lock(&bdev->lru_lock); 223 spin_lock(&glob->lru_lock);
190 ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence, 224 ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
191 sequence); 225 sequence);
192 if (likely(ret == 0)) 226 if (likely(ret == 0))
193 put_count = ttm_bo_del_from_lru(bo); 227 put_count = ttm_bo_del_from_lru(bo);
194 spin_unlock(&bdev->lru_lock); 228 spin_unlock(&glob->lru_lock);
195 229
196 while (put_count--) 230 while (put_count--)
197 kref_put(&bo->list_kref, ttm_bo_ref_bug); 231 kref_put(&bo->list_kref, ttm_bo_ref_bug);
@@ -201,13 +235,13 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo,
201 235
202void ttm_bo_unreserve(struct ttm_buffer_object *bo) 236void ttm_bo_unreserve(struct ttm_buffer_object *bo)
203{ 237{
204 struct ttm_bo_device *bdev = bo->bdev; 238 struct ttm_bo_global *glob = bo->glob;
205 239
206 spin_lock(&bdev->lru_lock); 240 spin_lock(&glob->lru_lock);
207 ttm_bo_add_to_lru(bo); 241 ttm_bo_add_to_lru(bo);
208 atomic_set(&bo->reserved, 0); 242 atomic_set(&bo->reserved, 0);
209 wake_up_all(&bo->event_queue); 243 wake_up_all(&bo->event_queue);
210 spin_unlock(&bdev->lru_lock); 244 spin_unlock(&glob->lru_lock);
211} 245}
212EXPORT_SYMBOL(ttm_bo_unreserve); 246EXPORT_SYMBOL(ttm_bo_unreserve);
213 247
@@ -218,6 +252,7 @@ EXPORT_SYMBOL(ttm_bo_unreserve);
218static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) 252static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
219{ 253{
220 struct ttm_bo_device *bdev = bo->bdev; 254 struct ttm_bo_device *bdev = bo->bdev;
255 struct ttm_bo_global *glob = bo->glob;
221 int ret = 0; 256 int ret = 0;
222 uint32_t page_flags = 0; 257 uint32_t page_flags = 0;
223 258
@@ -230,14 +265,14 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
230 page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC; 265 page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
231 case ttm_bo_type_kernel: 266 case ttm_bo_type_kernel:
232 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, 267 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
233 page_flags, bdev->dummy_read_page); 268 page_flags, glob->dummy_read_page);
234 if (unlikely(bo->ttm == NULL)) 269 if (unlikely(bo->ttm == NULL))
235 ret = -ENOMEM; 270 ret = -ENOMEM;
236 break; 271 break;
237 case ttm_bo_type_user: 272 case ttm_bo_type_user:
238 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, 273 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
239 page_flags | TTM_PAGE_FLAG_USER, 274 page_flags | TTM_PAGE_FLAG_USER,
240 bdev->dummy_read_page); 275 glob->dummy_read_page);
241 if (unlikely(bo->ttm == NULL)) 276 if (unlikely(bo->ttm == NULL))
242 ret = -ENOMEM; 277 ret = -ENOMEM;
243 break; 278 break;
@@ -355,6 +390,7 @@ out_err:
355static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) 390static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
356{ 391{
357 struct ttm_bo_device *bdev = bo->bdev; 392 struct ttm_bo_device *bdev = bo->bdev;
393 struct ttm_bo_global *glob = bo->glob;
358 struct ttm_bo_driver *driver = bdev->driver; 394 struct ttm_bo_driver *driver = bdev->driver;
359 int ret; 395 int ret;
360 396
@@ -366,7 +402,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
366 402
367 spin_unlock(&bo->lock); 403 spin_unlock(&bo->lock);
368 404
369 spin_lock(&bdev->lru_lock); 405 spin_lock(&glob->lru_lock);
370 ret = ttm_bo_reserve_locked(bo, false, false, false, 0); 406 ret = ttm_bo_reserve_locked(bo, false, false, false, 0);
371 BUG_ON(ret); 407 BUG_ON(ret);
372 if (bo->ttm) 408 if (bo->ttm)
@@ -381,7 +417,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
381 bo->mem.mm_node = NULL; 417 bo->mem.mm_node = NULL;
382 } 418 }
383 put_count = ttm_bo_del_from_lru(bo); 419 put_count = ttm_bo_del_from_lru(bo);
384 spin_unlock(&bdev->lru_lock); 420 spin_unlock(&glob->lru_lock);
385 421
386 atomic_set(&bo->reserved, 0); 422 atomic_set(&bo->reserved, 0);
387 423
@@ -391,14 +427,14 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
391 return 0; 427 return 0;
392 } 428 }
393 429
394 spin_lock(&bdev->lru_lock); 430 spin_lock(&glob->lru_lock);
395 if (list_empty(&bo->ddestroy)) { 431 if (list_empty(&bo->ddestroy)) {
396 void *sync_obj = bo->sync_obj; 432 void *sync_obj = bo->sync_obj;
397 void *sync_obj_arg = bo->sync_obj_arg; 433 void *sync_obj_arg = bo->sync_obj_arg;
398 434
399 kref_get(&bo->list_kref); 435 kref_get(&bo->list_kref);
400 list_add_tail(&bo->ddestroy, &bdev->ddestroy); 436 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
401 spin_unlock(&bdev->lru_lock); 437 spin_unlock(&glob->lru_lock);
402 spin_unlock(&bo->lock); 438 spin_unlock(&bo->lock);
403 439
404 if (sync_obj) 440 if (sync_obj)
@@ -408,7 +444,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
408 ret = 0; 444 ret = 0;
409 445
410 } else { 446 } else {
411 spin_unlock(&bdev->lru_lock); 447 spin_unlock(&glob->lru_lock);
412 spin_unlock(&bo->lock); 448 spin_unlock(&bo->lock);
413 ret = -EBUSY; 449 ret = -EBUSY;
414 } 450 }
@@ -423,11 +459,12 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
423 459
424static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) 460static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
425{ 461{
462 struct ttm_bo_global *glob = bdev->glob;
426 struct ttm_buffer_object *entry, *nentry; 463 struct ttm_buffer_object *entry, *nentry;
427 struct list_head *list, *next; 464 struct list_head *list, *next;
428 int ret; 465 int ret;
429 466
430 spin_lock(&bdev->lru_lock); 467 spin_lock(&glob->lru_lock);
431 list_for_each_safe(list, next, &bdev->ddestroy) { 468 list_for_each_safe(list, next, &bdev->ddestroy) {
432 entry = list_entry(list, struct ttm_buffer_object, ddestroy); 469 entry = list_entry(list, struct ttm_buffer_object, ddestroy);
433 nentry = NULL; 470 nentry = NULL;
@@ -444,16 +481,16 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
444 } 481 }
445 kref_get(&entry->list_kref); 482 kref_get(&entry->list_kref);
446 483
447 spin_unlock(&bdev->lru_lock); 484 spin_unlock(&glob->lru_lock);
448 ret = ttm_bo_cleanup_refs(entry, remove_all); 485 ret = ttm_bo_cleanup_refs(entry, remove_all);
449 kref_put(&entry->list_kref, ttm_bo_release_list); 486 kref_put(&entry->list_kref, ttm_bo_release_list);
450 487
451 spin_lock(&bdev->lru_lock); 488 spin_lock(&glob->lru_lock);
452 if (nentry) { 489 if (nentry) {
453 bool next_onlist = !list_empty(next); 490 bool next_onlist = !list_empty(next);
454 spin_unlock(&bdev->lru_lock); 491 spin_unlock(&glob->lru_lock);
455 kref_put(&nentry->list_kref, ttm_bo_release_list); 492 kref_put(&nentry->list_kref, ttm_bo_release_list);
456 spin_lock(&bdev->lru_lock); 493 spin_lock(&glob->lru_lock);
457 /* 494 /*
458 * Someone might have raced us and removed the 495 * Someone might have raced us and removed the
459 * next entry from the list. We don't bother restarting 496 * next entry from the list. We don't bother restarting
@@ -467,7 +504,7 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
467 break; 504 break;
468 } 505 }
469 ret = !list_empty(&bdev->ddestroy); 506 ret = !list_empty(&bdev->ddestroy);
470 spin_unlock(&bdev->lru_lock); 507 spin_unlock(&glob->lru_lock);
471 508
472 return ret; 509 return ret;
473} 510}
@@ -517,6 +554,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
517{ 554{
518 int ret = 0; 555 int ret = 0;
519 struct ttm_bo_device *bdev = bo->bdev; 556 struct ttm_bo_device *bdev = bo->bdev;
557 struct ttm_bo_global *glob = bo->glob;
520 struct ttm_mem_reg evict_mem; 558 struct ttm_mem_reg evict_mem;
521 uint32_t proposed_placement; 559 uint32_t proposed_placement;
522 560
@@ -565,12 +603,12 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
565 goto out; 603 goto out;
566 } 604 }
567 605
568 spin_lock(&bdev->lru_lock); 606 spin_lock(&glob->lru_lock);
569 if (evict_mem.mm_node) { 607 if (evict_mem.mm_node) {
570 drm_mm_put_block(evict_mem.mm_node); 608 drm_mm_put_block(evict_mem.mm_node);
571 evict_mem.mm_node = NULL; 609 evict_mem.mm_node = NULL;
572 } 610 }
573 spin_unlock(&bdev->lru_lock); 611 spin_unlock(&glob->lru_lock);
574 bo->evicted = true; 612 bo->evicted = true;
575out: 613out:
576 return ret; 614 return ret;
@@ -585,6 +623,7 @@ static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev,
585 uint32_t mem_type, 623 uint32_t mem_type,
586 bool interruptible, bool no_wait) 624 bool interruptible, bool no_wait)
587{ 625{
626 struct ttm_bo_global *glob = bdev->glob;
588 struct drm_mm_node *node; 627 struct drm_mm_node *node;
589 struct ttm_buffer_object *entry; 628 struct ttm_buffer_object *entry;
590 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 629 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
@@ -598,7 +637,7 @@ retry_pre_get:
598 if (unlikely(ret != 0)) 637 if (unlikely(ret != 0))
599 return ret; 638 return ret;
600 639
601 spin_lock(&bdev->lru_lock); 640 spin_lock(&glob->lru_lock);
602 do { 641 do {
603 node = drm_mm_search_free(&man->manager, num_pages, 642 node = drm_mm_search_free(&man->manager, num_pages,
604 mem->page_alignment, 1); 643 mem->page_alignment, 1);
@@ -619,7 +658,7 @@ retry_pre_get:
619 if (likely(ret == 0)) 658 if (likely(ret == 0))
620 put_count = ttm_bo_del_from_lru(entry); 659 put_count = ttm_bo_del_from_lru(entry);
621 660
622 spin_unlock(&bdev->lru_lock); 661 spin_unlock(&glob->lru_lock);
623 662
624 if (unlikely(ret != 0)) 663 if (unlikely(ret != 0))
625 return ret; 664 return ret;
@@ -635,21 +674,21 @@ retry_pre_get:
635 if (ret) 674 if (ret)
636 return ret; 675 return ret;
637 676
638 spin_lock(&bdev->lru_lock); 677 spin_lock(&glob->lru_lock);
639 } while (1); 678 } while (1);
640 679
641 if (!node) { 680 if (!node) {
642 spin_unlock(&bdev->lru_lock); 681 spin_unlock(&glob->lru_lock);
643 return -ENOMEM; 682 return -ENOMEM;
644 } 683 }
645 684
646 node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment); 685 node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment);
647 if (unlikely(!node)) { 686 if (unlikely(!node)) {
648 spin_unlock(&bdev->lru_lock); 687 spin_unlock(&glob->lru_lock);
649 goto retry_pre_get; 688 goto retry_pre_get;
650 } 689 }
651 690
652 spin_unlock(&bdev->lru_lock); 691 spin_unlock(&glob->lru_lock);
653 mem->mm_node = node; 692 mem->mm_node = node;
654 mem->mem_type = mem_type; 693 mem->mem_type = mem_type;
655 return 0; 694 return 0;
@@ -697,6 +736,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
697 bool interruptible, bool no_wait) 736 bool interruptible, bool no_wait)
698{ 737{
699 struct ttm_bo_device *bdev = bo->bdev; 738 struct ttm_bo_device *bdev = bo->bdev;
739 struct ttm_bo_global *glob = bo->glob;
700 struct ttm_mem_type_manager *man; 740 struct ttm_mem_type_manager *man;
701 741
702 uint32_t num_prios = bdev->driver->num_mem_type_prio; 742 uint32_t num_prios = bdev->driver->num_mem_type_prio;
@@ -733,20 +773,20 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
733 if (unlikely(ret)) 773 if (unlikely(ret))
734 return ret; 774 return ret;
735 775
736 spin_lock(&bdev->lru_lock); 776 spin_lock(&glob->lru_lock);
737 node = drm_mm_search_free(&man->manager, 777 node = drm_mm_search_free(&man->manager,
738 mem->num_pages, 778 mem->num_pages,
739 mem->page_alignment, 779 mem->page_alignment,
740 1); 780 1);
741 if (unlikely(!node)) { 781 if (unlikely(!node)) {
742 spin_unlock(&bdev->lru_lock); 782 spin_unlock(&glob->lru_lock);
743 break; 783 break;
744 } 784 }
745 node = drm_mm_get_block_atomic(node, 785 node = drm_mm_get_block_atomic(node,
746 mem->num_pages, 786 mem->num_pages,
747 mem-> 787 mem->
748 page_alignment); 788 page_alignment);
749 spin_unlock(&bdev->lru_lock); 789 spin_unlock(&glob->lru_lock);
750 } while (!node); 790 } while (!node);
751 } 791 }
752 if (node) 792 if (node)
@@ -816,7 +856,7 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
816 uint32_t proposed_placement, 856 uint32_t proposed_placement,
817 bool interruptible, bool no_wait) 857 bool interruptible, bool no_wait)
818{ 858{
819 struct ttm_bo_device *bdev = bo->bdev; 859 struct ttm_bo_global *glob = bo->glob;
820 int ret = 0; 860 int ret = 0;
821 struct ttm_mem_reg mem; 861 struct ttm_mem_reg mem;
822 862
@@ -852,9 +892,9 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
852 892
853out_unlock: 893out_unlock:
854 if (ret && mem.mm_node) { 894 if (ret && mem.mm_node) {
855 spin_lock(&bdev->lru_lock); 895 spin_lock(&glob->lru_lock);
856 drm_mm_put_block(mem.mm_node); 896 drm_mm_put_block(mem.mm_node);
857 spin_unlock(&bdev->lru_lock); 897 spin_unlock(&glob->lru_lock);
858 } 898 }
859 return ret; 899 return ret;
860} 900}
@@ -990,6 +1030,7 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev,
990 INIT_LIST_HEAD(&bo->ddestroy); 1030 INIT_LIST_HEAD(&bo->ddestroy);
991 INIT_LIST_HEAD(&bo->swap); 1031 INIT_LIST_HEAD(&bo->swap);
992 bo->bdev = bdev; 1032 bo->bdev = bdev;
1033 bo->glob = bdev->glob;
993 bo->type = type; 1034 bo->type = type;
994 bo->num_pages = num_pages; 1035 bo->num_pages = num_pages;
995 bo->mem.mem_type = TTM_PL_SYSTEM; 1036 bo->mem.mem_type = TTM_PL_SYSTEM;
@@ -1002,6 +1043,7 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev,
1002 bo->seq_valid = false; 1043 bo->seq_valid = false;
1003 bo->persistant_swap_storage = persistant_swap_storage; 1044 bo->persistant_swap_storage = persistant_swap_storage;
1004 bo->acc_size = acc_size; 1045 bo->acc_size = acc_size;
1046 atomic_inc(&bo->glob->bo_count);
1005 1047
1006 ret = ttm_bo_check_placement(bo, flags, 0ULL); 1048 ret = ttm_bo_check_placement(bo, flags, 0ULL);
1007 if (unlikely(ret != 0)) 1049 if (unlikely(ret != 0))
@@ -1040,13 +1082,13 @@ out_err:
1040} 1082}
1041EXPORT_SYMBOL(ttm_buffer_object_init); 1083EXPORT_SYMBOL(ttm_buffer_object_init);
1042 1084
1043static inline size_t ttm_bo_size(struct ttm_bo_device *bdev, 1085static inline size_t ttm_bo_size(struct ttm_bo_global *glob,
1044 unsigned long num_pages) 1086 unsigned long num_pages)
1045{ 1087{
1046 size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) & 1088 size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
1047 PAGE_MASK; 1089 PAGE_MASK;
1048 1090
1049 return bdev->ttm_bo_size + 2 * page_array_size; 1091 return glob->ttm_bo_size + 2 * page_array_size;
1050} 1092}
1051 1093
1052int ttm_buffer_object_create(struct ttm_bo_device *bdev, 1094int ttm_buffer_object_create(struct ttm_bo_device *bdev,
@@ -1061,10 +1103,10 @@ int ttm_buffer_object_create(struct ttm_bo_device *bdev,
1061{ 1103{
1062 struct ttm_buffer_object *bo; 1104 struct ttm_buffer_object *bo;
1063 int ret; 1105 int ret;
1064 struct ttm_mem_global *mem_glob = bdev->mem_glob; 1106 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1065 1107
1066 size_t acc_size = 1108 size_t acc_size =
1067 ttm_bo_size(bdev, (size + PAGE_SIZE - 1) >> PAGE_SHIFT); 1109 ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
1068 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false); 1110 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
1069 if (unlikely(ret != 0)) 1111 if (unlikely(ret != 0))
1070 return ret; 1112 return ret;
@@ -1118,6 +1160,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1118 struct list_head *head, 1160 struct list_head *head,
1119 unsigned mem_type, bool allow_errors) 1161 unsigned mem_type, bool allow_errors)
1120{ 1162{
1163 struct ttm_bo_global *glob = bdev->glob;
1121 struct ttm_buffer_object *entry; 1164 struct ttm_buffer_object *entry;
1122 int ret; 1165 int ret;
1123 int put_count; 1166 int put_count;
@@ -1126,30 +1169,31 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1126 * Can't use standard list traversal since we're unlocking. 1169 * Can't use standard list traversal since we're unlocking.
1127 */ 1170 */
1128 1171
1129 spin_lock(&bdev->lru_lock); 1172 spin_lock(&glob->lru_lock);
1130 1173
1131 while (!list_empty(head)) { 1174 while (!list_empty(head)) {
1132 entry = list_first_entry(head, struct ttm_buffer_object, lru); 1175 entry = list_first_entry(head, struct ttm_buffer_object, lru);
1133 kref_get(&entry->list_kref); 1176 kref_get(&entry->list_kref);
1134 ret = ttm_bo_reserve_locked(entry, false, false, false, 0); 1177 ret = ttm_bo_reserve_locked(entry, false, false, false, 0);
1135 put_count = ttm_bo_del_from_lru(entry); 1178 put_count = ttm_bo_del_from_lru(entry);
1136 spin_unlock(&bdev->lru_lock); 1179 spin_unlock(&glob->lru_lock);
1137 while (put_count--) 1180 while (put_count--)
1138 kref_put(&entry->list_kref, ttm_bo_ref_bug); 1181 kref_put(&entry->list_kref, ttm_bo_ref_bug);
1139 BUG_ON(ret); 1182 BUG_ON(ret);
1140 ret = ttm_bo_leave_list(entry, mem_type, allow_errors); 1183 ret = ttm_bo_leave_list(entry, mem_type, allow_errors);
1141 ttm_bo_unreserve(entry); 1184 ttm_bo_unreserve(entry);
1142 kref_put(&entry->list_kref, ttm_bo_release_list); 1185 kref_put(&entry->list_kref, ttm_bo_release_list);
1143 spin_lock(&bdev->lru_lock); 1186 spin_lock(&glob->lru_lock);
1144 } 1187 }
1145 1188
1146 spin_unlock(&bdev->lru_lock); 1189 spin_unlock(&glob->lru_lock);
1147 1190
1148 return 0; 1191 return 0;
1149} 1192}
1150 1193
1151int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) 1194int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1152{ 1195{
1196 struct ttm_bo_global *glob = bdev->glob;
1153 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 1197 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1154 int ret = -EINVAL; 1198 int ret = -EINVAL;
1155 1199
@@ -1171,13 +1215,13 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1171 if (mem_type > 0) { 1215 if (mem_type > 0) {
1172 ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false); 1216 ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false);
1173 1217
1174 spin_lock(&bdev->lru_lock); 1218 spin_lock(&glob->lru_lock);
1175 if (drm_mm_clean(&man->manager)) 1219 if (drm_mm_clean(&man->manager))
1176 drm_mm_takedown(&man->manager); 1220 drm_mm_takedown(&man->manager);
1177 else 1221 else
1178 ret = -EBUSY; 1222 ret = -EBUSY;
1179 1223
1180 spin_unlock(&bdev->lru_lock); 1224 spin_unlock(&glob->lru_lock);
1181 } 1225 }
1182 1226
1183 return ret; 1227 return ret;
@@ -1251,11 +1295,83 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1251} 1295}
1252EXPORT_SYMBOL(ttm_bo_init_mm); 1296EXPORT_SYMBOL(ttm_bo_init_mm);
1253 1297
1298static void ttm_bo_global_kobj_release(struct kobject *kobj)
1299{
1300 struct ttm_bo_global *glob =
1301 container_of(kobj, struct ttm_bo_global, kobj);
1302
1303 printk(KERN_INFO TTM_PFX "Freeing bo global.\n");
1304 ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
1305 __free_page(glob->dummy_read_page);
1306 kfree(glob);
1307}
1308
1309void ttm_bo_global_release(struct ttm_global_reference *ref)
1310{
1311 struct ttm_bo_global *glob = ref->object;
1312
1313 kobject_del(&glob->kobj);
1314 kobject_put(&glob->kobj);
1315}
1316EXPORT_SYMBOL(ttm_bo_global_release);
1317
1318int ttm_bo_global_init(struct ttm_global_reference *ref)
1319{
1320 struct ttm_bo_global_ref *bo_ref =
1321 container_of(ref, struct ttm_bo_global_ref, ref);
1322 struct ttm_bo_global *glob = ref->object;
1323 int ret;
1324
1325 mutex_init(&glob->device_list_mutex);
1326 spin_lock_init(&glob->lru_lock);
1327 glob->mem_glob = bo_ref->mem_glob;
1328 glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1329
1330 if (unlikely(glob->dummy_read_page == NULL)) {
1331 ret = -ENOMEM;
1332 goto out_no_drp;
1333 }
1334
1335 INIT_LIST_HEAD(&glob->swap_lru);
1336 INIT_LIST_HEAD(&glob->device_list);
1337
1338 ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
1339 ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
1340 if (unlikely(ret != 0)) {
1341 printk(KERN_ERR TTM_PFX
1342 "Could not register buffer object swapout.\n");
1343 goto out_no_shrink;
1344 }
1345
1346 glob->ttm_bo_extra_size =
1347 ttm_round_pot(sizeof(struct ttm_tt)) +
1348 ttm_round_pot(sizeof(struct ttm_backend));
1349
1350 glob->ttm_bo_size = glob->ttm_bo_extra_size +
1351 ttm_round_pot(sizeof(struct ttm_buffer_object));
1352
1353 atomic_set(&glob->bo_count, 0);
1354
1355 kobject_init(&glob->kobj, &ttm_bo_glob_kobj_type);
1356 ret = kobject_add(&glob->kobj, ttm_get_kobj(), "buffer_objects");
1357 if (unlikely(ret != 0))
1358 kobject_put(&glob->kobj);
1359 return ret;
1360out_no_shrink:
1361 __free_page(glob->dummy_read_page);
1362out_no_drp:
1363 kfree(glob);
1364 return ret;
1365}
1366EXPORT_SYMBOL(ttm_bo_global_init);
1367
1368
1254int ttm_bo_device_release(struct ttm_bo_device *bdev) 1369int ttm_bo_device_release(struct ttm_bo_device *bdev)
1255{ 1370{
1256 int ret = 0; 1371 int ret = 0;
1257 unsigned i = TTM_NUM_MEM_TYPES; 1372 unsigned i = TTM_NUM_MEM_TYPES;
1258 struct ttm_mem_type_manager *man; 1373 struct ttm_mem_type_manager *man;
1374 struct ttm_bo_global *glob = bdev->glob;
1259 1375
1260 while (i--) { 1376 while (i--) {
1261 man = &bdev->man[i]; 1377 man = &bdev->man[i];
@@ -1271,98 +1387,74 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
1271 } 1387 }
1272 } 1388 }
1273 1389
1390 mutex_lock(&glob->device_list_mutex);
1391 list_del(&bdev->device_list);
1392 mutex_unlock(&glob->device_list_mutex);
1393
1274 if (!cancel_delayed_work(&bdev->wq)) 1394 if (!cancel_delayed_work(&bdev->wq))
1275 flush_scheduled_work(); 1395 flush_scheduled_work();
1276 1396
1277 while (ttm_bo_delayed_delete(bdev, true)) 1397 while (ttm_bo_delayed_delete(bdev, true))
1278 ; 1398 ;
1279 1399
1280 spin_lock(&bdev->lru_lock); 1400 spin_lock(&glob->lru_lock);
1281 if (list_empty(&bdev->ddestroy)) 1401 if (list_empty(&bdev->ddestroy))
1282 TTM_DEBUG("Delayed destroy list was clean\n"); 1402 TTM_DEBUG("Delayed destroy list was clean\n");
1283 1403
1284 if (list_empty(&bdev->man[0].lru)) 1404 if (list_empty(&bdev->man[0].lru))
1285 TTM_DEBUG("Swap list was clean\n"); 1405 TTM_DEBUG("Swap list was clean\n");
1286 spin_unlock(&bdev->lru_lock); 1406 spin_unlock(&glob->lru_lock);
1287 1407
1288 ttm_mem_unregister_shrink(bdev->mem_glob, &bdev->shrink);
1289 BUG_ON(!drm_mm_clean(&bdev->addr_space_mm)); 1408 BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
1290 write_lock(&bdev->vm_lock); 1409 write_lock(&bdev->vm_lock);
1291 drm_mm_takedown(&bdev->addr_space_mm); 1410 drm_mm_takedown(&bdev->addr_space_mm);
1292 write_unlock(&bdev->vm_lock); 1411 write_unlock(&bdev->vm_lock);
1293 1412
1294 __free_page(bdev->dummy_read_page);
1295 return ret; 1413 return ret;
1296} 1414}
1297EXPORT_SYMBOL(ttm_bo_device_release); 1415EXPORT_SYMBOL(ttm_bo_device_release);
1298 1416
1299/*
1300 * This function is intended to be called on drm driver load.
1301 * If you decide to call it from firstopen, you must protect the call
1302 * from a potentially racing ttm_bo_driver_finish in lastclose.
1303 * (This may happen on X server restart).
1304 */
1305
1306int ttm_bo_device_init(struct ttm_bo_device *bdev, 1417int ttm_bo_device_init(struct ttm_bo_device *bdev,
1307 struct ttm_mem_global *mem_glob, 1418 struct ttm_bo_global *glob,
1308 struct ttm_bo_driver *driver, uint64_t file_page_offset) 1419 struct ttm_bo_driver *driver,
1420 uint64_t file_page_offset)
1309{ 1421{
1310 int ret = -EINVAL; 1422 int ret = -EINVAL;
1311 1423
1312 bdev->dummy_read_page = NULL;
1313 rwlock_init(&bdev->vm_lock); 1424 rwlock_init(&bdev->vm_lock);
1314 spin_lock_init(&bdev->lru_lock); 1425 spin_lock_init(&glob->lru_lock);
1315 1426
1316 bdev->driver = driver; 1427 bdev->driver = driver;
1317 bdev->mem_glob = mem_glob;
1318 1428
1319 memset(bdev->man, 0, sizeof(bdev->man)); 1429 memset(bdev->man, 0, sizeof(bdev->man));
1320 1430
1321 bdev->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1322 if (unlikely(bdev->dummy_read_page == NULL)) {
1323 ret = -ENOMEM;
1324 goto out_err0;
1325 }
1326
1327 /* 1431 /*
1328 * Initialize the system memory buffer type. 1432 * Initialize the system memory buffer type.
1329 * Other types need to be driver / IOCTL initialized. 1433 * Other types need to be driver / IOCTL initialized.
1330 */ 1434 */
1331 ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0); 1435 ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0);
1332 if (unlikely(ret != 0)) 1436 if (unlikely(ret != 0))
1333 goto out_err1; 1437 goto out_no_sys;
1334 1438
1335 bdev->addr_space_rb = RB_ROOT; 1439 bdev->addr_space_rb = RB_ROOT;
1336 ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000); 1440 ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
1337 if (unlikely(ret != 0)) 1441 if (unlikely(ret != 0))
1338 goto out_err2; 1442 goto out_no_addr_mm;
1339 1443
1340 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); 1444 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1341 bdev->nice_mode = true; 1445 bdev->nice_mode = true;
1342 INIT_LIST_HEAD(&bdev->ddestroy); 1446 INIT_LIST_HEAD(&bdev->ddestroy);
1343 INIT_LIST_HEAD(&bdev->swap_lru);
1344 bdev->dev_mapping = NULL; 1447 bdev->dev_mapping = NULL;
1345 ttm_mem_init_shrink(&bdev->shrink, ttm_bo_swapout); 1448 bdev->glob = glob;
1346 ret = ttm_mem_register_shrink(mem_glob, &bdev->shrink);
1347 if (unlikely(ret != 0)) {
1348 printk(KERN_ERR TTM_PFX
1349 "Could not register buffer object swapout.\n");
1350 goto out_err2;
1351 }
1352 1449
1353 bdev->ttm_bo_extra_size = 1450 mutex_lock(&glob->device_list_mutex);
1354 ttm_round_pot(sizeof(struct ttm_tt)) + 1451 list_add_tail(&bdev->device_list, &glob->device_list);
1355 ttm_round_pot(sizeof(struct ttm_backend)); 1452 mutex_unlock(&glob->device_list_mutex);
1356
1357 bdev->ttm_bo_size = bdev->ttm_bo_extra_size +
1358 ttm_round_pot(sizeof(struct ttm_buffer_object));
1359 1453
1360 return 0; 1454 return 0;
1361out_err2: 1455out_no_addr_mm:
1362 ttm_bo_clean_mm(bdev, 0); 1456 ttm_bo_clean_mm(bdev, 0);
1363out_err1: 1457out_no_sys:
1364 __free_page(bdev->dummy_read_page);
1365out_err0:
1366 return ret; 1458 return ret;
1367} 1459}
1368EXPORT_SYMBOL(ttm_bo_device_init); 1460EXPORT_SYMBOL(ttm_bo_device_init);
@@ -1607,21 +1699,21 @@ void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
1607 1699
1608static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) 1700static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1609{ 1701{
1610 struct ttm_bo_device *bdev = 1702 struct ttm_bo_global *glob =
1611 container_of(shrink, struct ttm_bo_device, shrink); 1703 container_of(shrink, struct ttm_bo_global, shrink);
1612 struct ttm_buffer_object *bo; 1704 struct ttm_buffer_object *bo;
1613 int ret = -EBUSY; 1705 int ret = -EBUSY;
1614 int put_count; 1706 int put_count;
1615 uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM); 1707 uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
1616 1708
1617 spin_lock(&bdev->lru_lock); 1709 spin_lock(&glob->lru_lock);
1618 while (ret == -EBUSY) { 1710 while (ret == -EBUSY) {
1619 if (unlikely(list_empty(&bdev->swap_lru))) { 1711 if (unlikely(list_empty(&glob->swap_lru))) {
1620 spin_unlock(&bdev->lru_lock); 1712 spin_unlock(&glob->lru_lock);
1621 return -EBUSY; 1713 return -EBUSY;
1622 } 1714 }
1623 1715
1624 bo = list_first_entry(&bdev->swap_lru, 1716 bo = list_first_entry(&glob->swap_lru,
1625 struct ttm_buffer_object, swap); 1717 struct ttm_buffer_object, swap);
1626 kref_get(&bo->list_kref); 1718 kref_get(&bo->list_kref);
1627 1719
@@ -1633,16 +1725,16 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1633 1725
1634 ret = ttm_bo_reserve_locked(bo, false, true, false, 0); 1726 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
1635 if (unlikely(ret == -EBUSY)) { 1727 if (unlikely(ret == -EBUSY)) {
1636 spin_unlock(&bdev->lru_lock); 1728 spin_unlock(&glob->lru_lock);
1637 ttm_bo_wait_unreserved(bo, false); 1729 ttm_bo_wait_unreserved(bo, false);
1638 kref_put(&bo->list_kref, ttm_bo_release_list); 1730 kref_put(&bo->list_kref, ttm_bo_release_list);
1639 spin_lock(&bdev->lru_lock); 1731 spin_lock(&glob->lru_lock);
1640 } 1732 }
1641 } 1733 }
1642 1734
1643 BUG_ON(ret != 0); 1735 BUG_ON(ret != 0);
1644 put_count = ttm_bo_del_from_lru(bo); 1736 put_count = ttm_bo_del_from_lru(bo);
1645 spin_unlock(&bdev->lru_lock); 1737 spin_unlock(&glob->lru_lock);
1646 1738
1647 while (put_count--) 1739 while (put_count--)
1648 kref_put(&bo->list_kref, ttm_bo_ref_bug); 1740 kref_put(&bo->list_kref, ttm_bo_ref_bug);
@@ -1696,6 +1788,6 @@ out:
1696 1788
1697void ttm_bo_swapout_all(struct ttm_bo_device *bdev) 1789void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1698{ 1790{
1699 while (ttm_bo_swapout(&bdev->shrink) == 0) 1791 while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
1700 ; 1792 ;
1701} 1793}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index bdec583901eb..12cd47aa18ce 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -41,9 +41,9 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
41 struct ttm_mem_reg *old_mem = &bo->mem; 41 struct ttm_mem_reg *old_mem = &bo->mem;
42 42
43 if (old_mem->mm_node) { 43 if (old_mem->mm_node) {
44 spin_lock(&bo->bdev->lru_lock); 44 spin_lock(&bo->glob->lru_lock);
45 drm_mm_put_block(old_mem->mm_node); 45 drm_mm_put_block(old_mem->mm_node);
46 spin_unlock(&bo->bdev->lru_lock); 46 spin_unlock(&bo->glob->lru_lock);
47 } 47 }
48 old_mem->mm_node = NULL; 48 old_mem->mm_node = NULL;
49} 49}
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 4e1e2566d519..b0f73096d372 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -166,7 +166,7 @@ static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
166 set_page_dirty_lock(page); 166 set_page_dirty_lock(page);
167 167
168 ttm->pages[i] = NULL; 168 ttm->pages[i] = NULL;
169 ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE); 169 ttm_mem_global_free(ttm->glob->mem_glob, PAGE_SIZE);
170 put_page(page); 170 put_page(page);
171 } 171 }
172 ttm->state = tt_unpopulated; 172 ttm->state = tt_unpopulated;
@@ -177,8 +177,7 @@ static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
177static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index) 177static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
178{ 178{
179 struct page *p; 179 struct page *p;
180 struct ttm_bo_device *bdev = ttm->bdev; 180 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
181 struct ttm_mem_global *mem_glob = bdev->mem_glob;
182 int ret; 181 int ret;
183 182
184 while (NULL == (p = ttm->pages[index])) { 183 while (NULL == (p = ttm->pages[index])) {
@@ -348,7 +347,7 @@ static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
348 printk(KERN_ERR TTM_PFX 347 printk(KERN_ERR TTM_PFX
349 "Erroneous page count. " 348 "Erroneous page count. "
350 "Leaking pages.\n"); 349 "Leaking pages.\n");
351 ttm_mem_global_free_page(ttm->bdev->mem_glob, 350 ttm_mem_global_free_page(ttm->glob->mem_glob,
352 cur_page); 351 cur_page);
353 __free_page(cur_page); 352 __free_page(cur_page);
354 } 353 }
@@ -394,7 +393,7 @@ int ttm_tt_set_user(struct ttm_tt *ttm,
394 struct mm_struct *mm = tsk->mm; 393 struct mm_struct *mm = tsk->mm;
395 int ret; 394 int ret;
396 int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0; 395 int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0;
397 struct ttm_mem_global *mem_glob = ttm->bdev->mem_glob; 396 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
398 397
399 BUG_ON(num_pages != ttm->num_pages); 398 BUG_ON(num_pages != ttm->num_pages);
400 BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0); 399 BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0);
@@ -439,8 +438,7 @@ struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
439 if (!ttm) 438 if (!ttm)
440 return NULL; 439 return NULL;
441 440
442 ttm->bdev = bdev; 441 ttm->glob = bdev->glob;
443
444 ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 442 ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
445 ttm->first_himem_page = ttm->num_pages; 443 ttm->first_himem_page = ttm->num_pages;
446 ttm->last_lomem_page = -1; 444 ttm->last_lomem_page = -1;
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 99dc521aa1a9..491146170522 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -155,6 +155,7 @@ struct ttm_buffer_object {
155 * Members constant at init. 155 * Members constant at init.
156 */ 156 */
157 157
158 struct ttm_bo_global *glob;
158 struct ttm_bo_device *bdev; 159 struct ttm_bo_device *bdev;
159 unsigned long buffer_start; 160 unsigned long buffer_start;
160 enum ttm_bo_type type; 161 enum ttm_bo_type type;
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 62ed733c52a2..9dc32f70b9a2 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -32,6 +32,7 @@
32 32
33#include "ttm/ttm_bo_api.h" 33#include "ttm/ttm_bo_api.h"
34#include "ttm/ttm_memory.h" 34#include "ttm/ttm_memory.h"
35#include "ttm/ttm_module.h"
35#include "drm_mm.h" 36#include "drm_mm.h"
36#include "linux/workqueue.h" 37#include "linux/workqueue.h"
37#include "linux/fs.h" 38#include "linux/fs.h"
@@ -160,7 +161,7 @@ struct ttm_tt {
160 long last_lomem_page; 161 long last_lomem_page;
161 uint32_t page_flags; 162 uint32_t page_flags;
162 unsigned long num_pages; 163 unsigned long num_pages;
163 struct ttm_bo_device *bdev; 164 struct ttm_bo_global *glob;
164 struct ttm_backend *be; 165 struct ttm_backend *be;
165 struct task_struct *tsk; 166 struct task_struct *tsk;
166 unsigned long start; 167 unsigned long start;
@@ -355,24 +356,73 @@ struct ttm_bo_driver {
355 void *(*sync_obj_ref) (void *sync_obj); 356 void *(*sync_obj_ref) (void *sync_obj);
356}; 357};
357 358
358#define TTM_NUM_MEM_TYPES 8 359/**
360 * struct ttm_bo_global_ref - Argument to initialize a struct ttm_bo_global.
361 */
362
363struct ttm_bo_global_ref {
364 struct ttm_global_reference ref;
365 struct ttm_mem_global *mem_glob;
366};
359 367
360#define TTM_BO_PRIV_FLAG_MOVING 0 /* Buffer object is moving and needs
361 idling before CPU mapping */
362#define TTM_BO_PRIV_FLAG_MAX 1
363/** 368/**
364 * struct ttm_bo_device - Buffer object driver device-specific data. 369 * struct ttm_bo_global - Buffer object driver global data.
365 * 370 *
366 * @mem_glob: Pointer to a struct ttm_mem_global object for accounting. 371 * @mem_glob: Pointer to a struct ttm_mem_global object for accounting.
367 * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
368 * @count: Current number of buffer object.
369 * @pages: Current number of pinned pages.
370 * @dummy_read_page: Pointer to a dummy page used for mapping requests 372 * @dummy_read_page: Pointer to a dummy page used for mapping requests
371 * of unpopulated pages. 373 * of unpopulated pages.
372 * @shrink: A shrink callback object used for buffre object swap. 374 * @shrink: A shrink callback object used for buffer object swap.
373 * @ttm_bo_extra_size: Extra size (sizeof(struct ttm_buffer_object) excluded) 375 * @ttm_bo_extra_size: Extra size (sizeof(struct ttm_buffer_object) excluded)
374 * used by a buffer object. This is excluding page arrays and backing pages. 376 * used by a buffer object. This is excluding page arrays and backing pages.
375 * @ttm_bo_size: This is @ttm_bo_extra_size + sizeof(struct ttm_buffer_object). 377 * @ttm_bo_size: This is @ttm_bo_extra_size + sizeof(struct ttm_buffer_object).
378 * @device_list_mutex: Mutex protecting the device list.
379 * This mutex is held while traversing the device list for pm options.
380 * @lru_lock: Spinlock protecting the bo subsystem lru lists.
381 * @device_list: List of buffer object devices.
382 * @swap_lru: Lru list of buffer objects used for swapping.
383 */
384
385struct ttm_bo_global {
386
387 /**
388 * Constant after init.
389 */
390
391 struct kobject kobj;
392 struct ttm_mem_global *mem_glob;
393 struct page *dummy_read_page;
394 struct ttm_mem_shrink shrink;
395 size_t ttm_bo_extra_size;
396 size_t ttm_bo_size;
397 struct mutex device_list_mutex;
398 spinlock_t lru_lock;
399
400 /**
401 * Protected by device_list_mutex.
402 */
403 struct list_head device_list;
404
405 /**
406 * Protected by the lru_lock.
407 */
408 struct list_head swap_lru;
409
410 /**
411 * Internal protection.
412 */
413 atomic_t bo_count;
414};
415
416
417#define TTM_NUM_MEM_TYPES 8
418
419#define TTM_BO_PRIV_FLAG_MOVING 0 /* Buffer object is moving and needs
420 idling before CPU mapping */
421#define TTM_BO_PRIV_FLAG_MAX 1
422/**
423 * struct ttm_bo_device - Buffer object driver device-specific data.
424 *
425 * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
376 * @man: An array of mem_type_managers. 426 * @man: An array of mem_type_managers.
377 * @addr_space_mm: Range manager for the device address space. 427 * @addr_space_mm: Range manager for the device address space.
378 * lru_lock: Spinlock that protects the buffer+device lru lists and 428 * lru_lock: Spinlock that protects the buffer+device lru lists and
@@ -390,32 +440,21 @@ struct ttm_bo_device {
390 /* 440 /*
391 * Constant after bo device init / atomic. 441 * Constant after bo device init / atomic.
392 */ 442 */
393 443 struct list_head device_list;
394 struct ttm_mem_global *mem_glob; 444 struct ttm_bo_global *glob;
395 struct ttm_bo_driver *driver; 445 struct ttm_bo_driver *driver;
396 struct page *dummy_read_page;
397 struct ttm_mem_shrink shrink;
398
399 size_t ttm_bo_extra_size;
400 size_t ttm_bo_size;
401
402 rwlock_t vm_lock; 446 rwlock_t vm_lock;
447 struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
403 /* 448 /*
404 * Protected by the vm lock. 449 * Protected by the vm lock.
405 */ 450 */
406 struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
407 struct rb_root addr_space_rb; 451 struct rb_root addr_space_rb;
408 struct drm_mm addr_space_mm; 452 struct drm_mm addr_space_mm;
409 453
410 /* 454 /*
411 * Might want to change this to one lock per manager. 455 * Protected by the global:lru lock.
412 */
413 spinlock_t lru_lock;
414 /*
415 * Protected by the lru lock.
416 */ 456 */
417 struct list_head ddestroy; 457 struct list_head ddestroy;
418 struct list_head swap_lru;
419 458
420 /* 459 /*
421 * Protected by load / firstopen / lastclose /unload sync. 460 * Protected by load / firstopen / lastclose /unload sync.
@@ -629,6 +668,9 @@ extern int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
629 unsigned long *bus_offset, 668 unsigned long *bus_offset,
630 unsigned long *bus_size); 669 unsigned long *bus_size);
631 670
671extern void ttm_bo_global_release(struct ttm_global_reference *ref);
672extern int ttm_bo_global_init(struct ttm_global_reference *ref);
673
632extern int ttm_bo_device_release(struct ttm_bo_device *bdev); 674extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
633 675
634/** 676/**
@@ -646,7 +688,7 @@ extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
646 * !0: Failure. 688 * !0: Failure.
647 */ 689 */
648extern int ttm_bo_device_init(struct ttm_bo_device *bdev, 690extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
649 struct ttm_mem_global *mem_glob, 691 struct ttm_bo_global *glob,
650 struct ttm_bo_driver *driver, 692 struct ttm_bo_driver *driver,
651 uint64_t file_page_offset); 693 uint64_t file_page_offset);
652 694