aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/ttm/ttm_bo.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_bo.c')
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c292
1 files changed, 192 insertions, 100 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index f16909ceec93..0d0b1b7afbcf 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -45,6 +45,39 @@
45static int ttm_bo_setup_vm(struct ttm_buffer_object *bo); 45static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
46static void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); 46static void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
47static int ttm_bo_swapout(struct ttm_mem_shrink *shrink); 47static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
48static void ttm_bo_global_kobj_release(struct kobject *kobj);
49
50static struct attribute ttm_bo_count = {
51 .name = "bo_count",
52 .mode = S_IRUGO
53};
54
55static ssize_t ttm_bo_global_show(struct kobject *kobj,
56 struct attribute *attr,
57 char *buffer)
58{
59 struct ttm_bo_global *glob =
60 container_of(kobj, struct ttm_bo_global, kobj);
61
62 return snprintf(buffer, PAGE_SIZE, "%lu\n",
63 (unsigned long) atomic_read(&glob->bo_count));
64}
65
66static struct attribute *ttm_bo_global_attrs[] = {
67 &ttm_bo_count,
68 NULL
69};
70
71static struct sysfs_ops ttm_bo_global_ops = {
72 .show = &ttm_bo_global_show
73};
74
75static struct kobj_type ttm_bo_glob_kobj_type = {
76 .release = &ttm_bo_global_kobj_release,
77 .sysfs_ops = &ttm_bo_global_ops,
78 .default_attrs = ttm_bo_global_attrs
79};
80
48 81
49static inline uint32_t ttm_bo_type_flags(unsigned type) 82static inline uint32_t ttm_bo_type_flags(unsigned type)
50{ 83{
@@ -67,10 +100,11 @@ static void ttm_bo_release_list(struct kref *list_kref)
67 100
68 if (bo->ttm) 101 if (bo->ttm)
69 ttm_tt_destroy(bo->ttm); 102 ttm_tt_destroy(bo->ttm);
103 atomic_dec(&bo->glob->bo_count);
70 if (bo->destroy) 104 if (bo->destroy)
71 bo->destroy(bo); 105 bo->destroy(bo);
72 else { 106 else {
73 ttm_mem_global_free(bdev->mem_glob, bo->acc_size); 107 ttm_mem_global_free(bdev->glob->mem_glob, bo->acc_size);
74 kfree(bo); 108 kfree(bo);
75 } 109 }
76} 110}
@@ -107,7 +141,7 @@ static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
107 kref_get(&bo->list_kref); 141 kref_get(&bo->list_kref);
108 142
109 if (bo->ttm != NULL) { 143 if (bo->ttm != NULL) {
110 list_add_tail(&bo->swap, &bdev->swap_lru); 144 list_add_tail(&bo->swap, &bo->glob->swap_lru);
111 kref_get(&bo->list_kref); 145 kref_get(&bo->list_kref);
112 } 146 }
113 } 147 }
@@ -142,7 +176,7 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
142 bool interruptible, 176 bool interruptible,
143 bool no_wait, bool use_sequence, uint32_t sequence) 177 bool no_wait, bool use_sequence, uint32_t sequence)
144{ 178{
145 struct ttm_bo_device *bdev = bo->bdev; 179 struct ttm_bo_global *glob = bo->glob;
146 int ret; 180 int ret;
147 181
148 while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) { 182 while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
@@ -154,9 +188,9 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
154 if (no_wait) 188 if (no_wait)
155 return -EBUSY; 189 return -EBUSY;
156 190
157 spin_unlock(&bdev->lru_lock); 191 spin_unlock(&glob->lru_lock);
158 ret = ttm_bo_wait_unreserved(bo, interruptible); 192 ret = ttm_bo_wait_unreserved(bo, interruptible);
159 spin_lock(&bdev->lru_lock); 193 spin_lock(&glob->lru_lock);
160 194
161 if (unlikely(ret)) 195 if (unlikely(ret))
162 return ret; 196 return ret;
@@ -182,16 +216,16 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo,
182 bool interruptible, 216 bool interruptible,
183 bool no_wait, bool use_sequence, uint32_t sequence) 217 bool no_wait, bool use_sequence, uint32_t sequence)
184{ 218{
185 struct ttm_bo_device *bdev = bo->bdev; 219 struct ttm_bo_global *glob = bo->glob;
186 int put_count = 0; 220 int put_count = 0;
187 int ret; 221 int ret;
188 222
189 spin_lock(&bdev->lru_lock); 223 spin_lock(&glob->lru_lock);
190 ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence, 224 ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
191 sequence); 225 sequence);
192 if (likely(ret == 0)) 226 if (likely(ret == 0))
193 put_count = ttm_bo_del_from_lru(bo); 227 put_count = ttm_bo_del_from_lru(bo);
194 spin_unlock(&bdev->lru_lock); 228 spin_unlock(&glob->lru_lock);
195 229
196 while (put_count--) 230 while (put_count--)
197 kref_put(&bo->list_kref, ttm_bo_ref_bug); 231 kref_put(&bo->list_kref, ttm_bo_ref_bug);
@@ -201,13 +235,13 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo,
201 235
202void ttm_bo_unreserve(struct ttm_buffer_object *bo) 236void ttm_bo_unreserve(struct ttm_buffer_object *bo)
203{ 237{
204 struct ttm_bo_device *bdev = bo->bdev; 238 struct ttm_bo_global *glob = bo->glob;
205 239
206 spin_lock(&bdev->lru_lock); 240 spin_lock(&glob->lru_lock);
207 ttm_bo_add_to_lru(bo); 241 ttm_bo_add_to_lru(bo);
208 atomic_set(&bo->reserved, 0); 242 atomic_set(&bo->reserved, 0);
209 wake_up_all(&bo->event_queue); 243 wake_up_all(&bo->event_queue);
210 spin_unlock(&bdev->lru_lock); 244 spin_unlock(&glob->lru_lock);
211} 245}
212EXPORT_SYMBOL(ttm_bo_unreserve); 246EXPORT_SYMBOL(ttm_bo_unreserve);
213 247
@@ -218,6 +252,7 @@ EXPORT_SYMBOL(ttm_bo_unreserve);
218static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) 252static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
219{ 253{
220 struct ttm_bo_device *bdev = bo->bdev; 254 struct ttm_bo_device *bdev = bo->bdev;
255 struct ttm_bo_global *glob = bo->glob;
221 int ret = 0; 256 int ret = 0;
222 uint32_t page_flags = 0; 257 uint32_t page_flags = 0;
223 258
@@ -230,14 +265,14 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
230 page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC; 265 page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
231 case ttm_bo_type_kernel: 266 case ttm_bo_type_kernel:
232 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, 267 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
233 page_flags, bdev->dummy_read_page); 268 page_flags, glob->dummy_read_page);
234 if (unlikely(bo->ttm == NULL)) 269 if (unlikely(bo->ttm == NULL))
235 ret = -ENOMEM; 270 ret = -ENOMEM;
236 break; 271 break;
237 case ttm_bo_type_user: 272 case ttm_bo_type_user:
238 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, 273 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
239 page_flags | TTM_PAGE_FLAG_USER, 274 page_flags | TTM_PAGE_FLAG_USER,
240 bdev->dummy_read_page); 275 glob->dummy_read_page);
241 if (unlikely(bo->ttm == NULL)) 276 if (unlikely(bo->ttm == NULL))
242 ret = -ENOMEM; 277 ret = -ENOMEM;
243 break; 278 break;
@@ -355,6 +390,7 @@ out_err:
355static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) 390static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
356{ 391{
357 struct ttm_bo_device *bdev = bo->bdev; 392 struct ttm_bo_device *bdev = bo->bdev;
393 struct ttm_bo_global *glob = bo->glob;
358 struct ttm_bo_driver *driver = bdev->driver; 394 struct ttm_bo_driver *driver = bdev->driver;
359 int ret; 395 int ret;
360 396
@@ -366,7 +402,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
366 402
367 spin_unlock(&bo->lock); 403 spin_unlock(&bo->lock);
368 404
369 spin_lock(&bdev->lru_lock); 405 spin_lock(&glob->lru_lock);
370 ret = ttm_bo_reserve_locked(bo, false, false, false, 0); 406 ret = ttm_bo_reserve_locked(bo, false, false, false, 0);
371 BUG_ON(ret); 407 BUG_ON(ret);
372 if (bo->ttm) 408 if (bo->ttm)
@@ -381,7 +417,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
381 bo->mem.mm_node = NULL; 417 bo->mem.mm_node = NULL;
382 } 418 }
383 put_count = ttm_bo_del_from_lru(bo); 419 put_count = ttm_bo_del_from_lru(bo);
384 spin_unlock(&bdev->lru_lock); 420 spin_unlock(&glob->lru_lock);
385 421
386 atomic_set(&bo->reserved, 0); 422 atomic_set(&bo->reserved, 0);
387 423
@@ -391,14 +427,14 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
391 return 0; 427 return 0;
392 } 428 }
393 429
394 spin_lock(&bdev->lru_lock); 430 spin_lock(&glob->lru_lock);
395 if (list_empty(&bo->ddestroy)) { 431 if (list_empty(&bo->ddestroy)) {
396 void *sync_obj = bo->sync_obj; 432 void *sync_obj = bo->sync_obj;
397 void *sync_obj_arg = bo->sync_obj_arg; 433 void *sync_obj_arg = bo->sync_obj_arg;
398 434
399 kref_get(&bo->list_kref); 435 kref_get(&bo->list_kref);
400 list_add_tail(&bo->ddestroy, &bdev->ddestroy); 436 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
401 spin_unlock(&bdev->lru_lock); 437 spin_unlock(&glob->lru_lock);
402 spin_unlock(&bo->lock); 438 spin_unlock(&bo->lock);
403 439
404 if (sync_obj) 440 if (sync_obj)
@@ -408,7 +444,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
408 ret = 0; 444 ret = 0;
409 445
410 } else { 446 } else {
411 spin_unlock(&bdev->lru_lock); 447 spin_unlock(&glob->lru_lock);
412 spin_unlock(&bo->lock); 448 spin_unlock(&bo->lock);
413 ret = -EBUSY; 449 ret = -EBUSY;
414 } 450 }
@@ -423,11 +459,12 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
423 459
424static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) 460static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
425{ 461{
462 struct ttm_bo_global *glob = bdev->glob;
426 struct ttm_buffer_object *entry, *nentry; 463 struct ttm_buffer_object *entry, *nentry;
427 struct list_head *list, *next; 464 struct list_head *list, *next;
428 int ret; 465 int ret;
429 466
430 spin_lock(&bdev->lru_lock); 467 spin_lock(&glob->lru_lock);
431 list_for_each_safe(list, next, &bdev->ddestroy) { 468 list_for_each_safe(list, next, &bdev->ddestroy) {
432 entry = list_entry(list, struct ttm_buffer_object, ddestroy); 469 entry = list_entry(list, struct ttm_buffer_object, ddestroy);
433 nentry = NULL; 470 nentry = NULL;
@@ -444,16 +481,16 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
444 } 481 }
445 kref_get(&entry->list_kref); 482 kref_get(&entry->list_kref);
446 483
447 spin_unlock(&bdev->lru_lock); 484 spin_unlock(&glob->lru_lock);
448 ret = ttm_bo_cleanup_refs(entry, remove_all); 485 ret = ttm_bo_cleanup_refs(entry, remove_all);
449 kref_put(&entry->list_kref, ttm_bo_release_list); 486 kref_put(&entry->list_kref, ttm_bo_release_list);
450 487
451 spin_lock(&bdev->lru_lock); 488 spin_lock(&glob->lru_lock);
452 if (nentry) { 489 if (nentry) {
453 bool next_onlist = !list_empty(next); 490 bool next_onlist = !list_empty(next);
454 spin_unlock(&bdev->lru_lock); 491 spin_unlock(&glob->lru_lock);
455 kref_put(&nentry->list_kref, ttm_bo_release_list); 492 kref_put(&nentry->list_kref, ttm_bo_release_list);
456 spin_lock(&bdev->lru_lock); 493 spin_lock(&glob->lru_lock);
457 /* 494 /*
458 * Someone might have raced us and removed the 495 * Someone might have raced us and removed the
459 * next entry from the list. We don't bother restarting 496 * next entry from the list. We don't bother restarting
@@ -467,7 +504,7 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
467 break; 504 break;
468 } 505 }
469 ret = !list_empty(&bdev->ddestroy); 506 ret = !list_empty(&bdev->ddestroy);
470 spin_unlock(&bdev->lru_lock); 507 spin_unlock(&glob->lru_lock);
471 508
472 return ret; 509 return ret;
473} 510}
@@ -517,6 +554,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
517{ 554{
518 int ret = 0; 555 int ret = 0;
519 struct ttm_bo_device *bdev = bo->bdev; 556 struct ttm_bo_device *bdev = bo->bdev;
557 struct ttm_bo_global *glob = bo->glob;
520 struct ttm_mem_reg evict_mem; 558 struct ttm_mem_reg evict_mem;
521 uint32_t proposed_placement; 559 uint32_t proposed_placement;
522 560
@@ -565,12 +603,12 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
565 goto out; 603 goto out;
566 } 604 }
567 605
568 spin_lock(&bdev->lru_lock); 606 spin_lock(&glob->lru_lock);
569 if (evict_mem.mm_node) { 607 if (evict_mem.mm_node) {
570 drm_mm_put_block(evict_mem.mm_node); 608 drm_mm_put_block(evict_mem.mm_node);
571 evict_mem.mm_node = NULL; 609 evict_mem.mm_node = NULL;
572 } 610 }
573 spin_unlock(&bdev->lru_lock); 611 spin_unlock(&glob->lru_lock);
574 bo->evicted = true; 612 bo->evicted = true;
575out: 613out:
576 return ret; 614 return ret;
@@ -585,6 +623,7 @@ static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev,
585 uint32_t mem_type, 623 uint32_t mem_type,
586 bool interruptible, bool no_wait) 624 bool interruptible, bool no_wait)
587{ 625{
626 struct ttm_bo_global *glob = bdev->glob;
588 struct drm_mm_node *node; 627 struct drm_mm_node *node;
589 struct ttm_buffer_object *entry; 628 struct ttm_buffer_object *entry;
590 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 629 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
@@ -598,7 +637,7 @@ retry_pre_get:
598 if (unlikely(ret != 0)) 637 if (unlikely(ret != 0))
599 return ret; 638 return ret;
600 639
601 spin_lock(&bdev->lru_lock); 640 spin_lock(&glob->lru_lock);
602 do { 641 do {
603 node = drm_mm_search_free(&man->manager, num_pages, 642 node = drm_mm_search_free(&man->manager, num_pages,
604 mem->page_alignment, 1); 643 mem->page_alignment, 1);
@@ -619,7 +658,7 @@ retry_pre_get:
619 if (likely(ret == 0)) 658 if (likely(ret == 0))
620 put_count = ttm_bo_del_from_lru(entry); 659 put_count = ttm_bo_del_from_lru(entry);
621 660
622 spin_unlock(&bdev->lru_lock); 661 spin_unlock(&glob->lru_lock);
623 662
624 if (unlikely(ret != 0)) 663 if (unlikely(ret != 0))
625 return ret; 664 return ret;
@@ -635,21 +674,21 @@ retry_pre_get:
635 if (ret) 674 if (ret)
636 return ret; 675 return ret;
637 676
638 spin_lock(&bdev->lru_lock); 677 spin_lock(&glob->lru_lock);
639 } while (1); 678 } while (1);
640 679
641 if (!node) { 680 if (!node) {
642 spin_unlock(&bdev->lru_lock); 681 spin_unlock(&glob->lru_lock);
643 return -ENOMEM; 682 return -ENOMEM;
644 } 683 }
645 684
646 node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment); 685 node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment);
647 if (unlikely(!node)) { 686 if (unlikely(!node)) {
648 spin_unlock(&bdev->lru_lock); 687 spin_unlock(&glob->lru_lock);
649 goto retry_pre_get; 688 goto retry_pre_get;
650 } 689 }
651 690
652 spin_unlock(&bdev->lru_lock); 691 spin_unlock(&glob->lru_lock);
653 mem->mm_node = node; 692 mem->mm_node = node;
654 mem->mem_type = mem_type; 693 mem->mem_type = mem_type;
655 return 0; 694 return 0;
@@ -697,6 +736,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
697 bool interruptible, bool no_wait) 736 bool interruptible, bool no_wait)
698{ 737{
699 struct ttm_bo_device *bdev = bo->bdev; 738 struct ttm_bo_device *bdev = bo->bdev;
739 struct ttm_bo_global *glob = bo->glob;
700 struct ttm_mem_type_manager *man; 740 struct ttm_mem_type_manager *man;
701 741
702 uint32_t num_prios = bdev->driver->num_mem_type_prio; 742 uint32_t num_prios = bdev->driver->num_mem_type_prio;
@@ -733,20 +773,20 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
733 if (unlikely(ret)) 773 if (unlikely(ret))
734 return ret; 774 return ret;
735 775
736 spin_lock(&bdev->lru_lock); 776 spin_lock(&glob->lru_lock);
737 node = drm_mm_search_free(&man->manager, 777 node = drm_mm_search_free(&man->manager,
738 mem->num_pages, 778 mem->num_pages,
739 mem->page_alignment, 779 mem->page_alignment,
740 1); 780 1);
741 if (unlikely(!node)) { 781 if (unlikely(!node)) {
742 spin_unlock(&bdev->lru_lock); 782 spin_unlock(&glob->lru_lock);
743 break; 783 break;
744 } 784 }
745 node = drm_mm_get_block_atomic(node, 785 node = drm_mm_get_block_atomic(node,
746 mem->num_pages, 786 mem->num_pages,
747 mem-> 787 mem->
748 page_alignment); 788 page_alignment);
749 spin_unlock(&bdev->lru_lock); 789 spin_unlock(&glob->lru_lock);
750 } while (!node); 790 } while (!node);
751 } 791 }
752 if (node) 792 if (node)
@@ -816,7 +856,7 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
816 uint32_t proposed_placement, 856 uint32_t proposed_placement,
817 bool interruptible, bool no_wait) 857 bool interruptible, bool no_wait)
818{ 858{
819 struct ttm_bo_device *bdev = bo->bdev; 859 struct ttm_bo_global *glob = bo->glob;
820 int ret = 0; 860 int ret = 0;
821 struct ttm_mem_reg mem; 861 struct ttm_mem_reg mem;
822 862
@@ -852,9 +892,9 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
852 892
853out_unlock: 893out_unlock:
854 if (ret && mem.mm_node) { 894 if (ret && mem.mm_node) {
855 spin_lock(&bdev->lru_lock); 895 spin_lock(&glob->lru_lock);
856 drm_mm_put_block(mem.mm_node); 896 drm_mm_put_block(mem.mm_node);
857 spin_unlock(&bdev->lru_lock); 897 spin_unlock(&glob->lru_lock);
858 } 898 }
859 return ret; 899 return ret;
860} 900}
@@ -990,6 +1030,7 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev,
990 INIT_LIST_HEAD(&bo->ddestroy); 1030 INIT_LIST_HEAD(&bo->ddestroy);
991 INIT_LIST_HEAD(&bo->swap); 1031 INIT_LIST_HEAD(&bo->swap);
992 bo->bdev = bdev; 1032 bo->bdev = bdev;
1033 bo->glob = bdev->glob;
993 bo->type = type; 1034 bo->type = type;
994 bo->num_pages = num_pages; 1035 bo->num_pages = num_pages;
995 bo->mem.mem_type = TTM_PL_SYSTEM; 1036 bo->mem.mem_type = TTM_PL_SYSTEM;
@@ -1002,6 +1043,7 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev,
1002 bo->seq_valid = false; 1043 bo->seq_valid = false;
1003 bo->persistant_swap_storage = persistant_swap_storage; 1044 bo->persistant_swap_storage = persistant_swap_storage;
1004 bo->acc_size = acc_size; 1045 bo->acc_size = acc_size;
1046 atomic_inc(&bo->glob->bo_count);
1005 1047
1006 ret = ttm_bo_check_placement(bo, flags, 0ULL); 1048 ret = ttm_bo_check_placement(bo, flags, 0ULL);
1007 if (unlikely(ret != 0)) 1049 if (unlikely(ret != 0))
@@ -1040,13 +1082,13 @@ out_err:
1040} 1082}
1041EXPORT_SYMBOL(ttm_buffer_object_init); 1083EXPORT_SYMBOL(ttm_buffer_object_init);
1042 1084
1043static inline size_t ttm_bo_size(struct ttm_bo_device *bdev, 1085static inline size_t ttm_bo_size(struct ttm_bo_global *glob,
1044 unsigned long num_pages) 1086 unsigned long num_pages)
1045{ 1087{
1046 size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) & 1088 size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
1047 PAGE_MASK; 1089 PAGE_MASK;
1048 1090
1049 return bdev->ttm_bo_size + 2 * page_array_size; 1091 return glob->ttm_bo_size + 2 * page_array_size;
1050} 1092}
1051 1093
1052int ttm_buffer_object_create(struct ttm_bo_device *bdev, 1094int ttm_buffer_object_create(struct ttm_bo_device *bdev,
@@ -1061,10 +1103,10 @@ int ttm_buffer_object_create(struct ttm_bo_device *bdev,
1061{ 1103{
1062 struct ttm_buffer_object *bo; 1104 struct ttm_buffer_object *bo;
1063 int ret; 1105 int ret;
1064 struct ttm_mem_global *mem_glob = bdev->mem_glob; 1106 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1065 1107
1066 size_t acc_size = 1108 size_t acc_size =
1067 ttm_bo_size(bdev, (size + PAGE_SIZE - 1) >> PAGE_SHIFT); 1109 ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
1068 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false); 1110 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
1069 if (unlikely(ret != 0)) 1111 if (unlikely(ret != 0))
1070 return ret; 1112 return ret;
@@ -1118,6 +1160,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1118 struct list_head *head, 1160 struct list_head *head,
1119 unsigned mem_type, bool allow_errors) 1161 unsigned mem_type, bool allow_errors)
1120{ 1162{
1163 struct ttm_bo_global *glob = bdev->glob;
1121 struct ttm_buffer_object *entry; 1164 struct ttm_buffer_object *entry;
1122 int ret; 1165 int ret;
1123 int put_count; 1166 int put_count;
@@ -1126,30 +1169,31 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1126 * Can't use standard list traversal since we're unlocking. 1169 * Can't use standard list traversal since we're unlocking.
1127 */ 1170 */
1128 1171
1129 spin_lock(&bdev->lru_lock); 1172 spin_lock(&glob->lru_lock);
1130 1173
1131 while (!list_empty(head)) { 1174 while (!list_empty(head)) {
1132 entry = list_first_entry(head, struct ttm_buffer_object, lru); 1175 entry = list_first_entry(head, struct ttm_buffer_object, lru);
1133 kref_get(&entry->list_kref); 1176 kref_get(&entry->list_kref);
1134 ret = ttm_bo_reserve_locked(entry, false, false, false, 0); 1177 ret = ttm_bo_reserve_locked(entry, false, false, false, 0);
1135 put_count = ttm_bo_del_from_lru(entry); 1178 put_count = ttm_bo_del_from_lru(entry);
1136 spin_unlock(&bdev->lru_lock); 1179 spin_unlock(&glob->lru_lock);
1137 while (put_count--) 1180 while (put_count--)
1138 kref_put(&entry->list_kref, ttm_bo_ref_bug); 1181 kref_put(&entry->list_kref, ttm_bo_ref_bug);
1139 BUG_ON(ret); 1182 BUG_ON(ret);
1140 ret = ttm_bo_leave_list(entry, mem_type, allow_errors); 1183 ret = ttm_bo_leave_list(entry, mem_type, allow_errors);
1141 ttm_bo_unreserve(entry); 1184 ttm_bo_unreserve(entry);
1142 kref_put(&entry->list_kref, ttm_bo_release_list); 1185 kref_put(&entry->list_kref, ttm_bo_release_list);
1143 spin_lock(&bdev->lru_lock); 1186 spin_lock(&glob->lru_lock);
1144 } 1187 }
1145 1188
1146 spin_unlock(&bdev->lru_lock); 1189 spin_unlock(&glob->lru_lock);
1147 1190
1148 return 0; 1191 return 0;
1149} 1192}
1150 1193
1151int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) 1194int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1152{ 1195{
1196 struct ttm_bo_global *glob = bdev->glob;
1153 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 1197 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1154 int ret = -EINVAL; 1198 int ret = -EINVAL;
1155 1199
@@ -1171,13 +1215,13 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1171 if (mem_type > 0) { 1215 if (mem_type > 0) {
1172 ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false); 1216 ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false);
1173 1217
1174 spin_lock(&bdev->lru_lock); 1218 spin_lock(&glob->lru_lock);
1175 if (drm_mm_clean(&man->manager)) 1219 if (drm_mm_clean(&man->manager))
1176 drm_mm_takedown(&man->manager); 1220 drm_mm_takedown(&man->manager);
1177 else 1221 else
1178 ret = -EBUSY; 1222 ret = -EBUSY;
1179 1223
1180 spin_unlock(&bdev->lru_lock); 1224 spin_unlock(&glob->lru_lock);
1181 } 1225 }
1182 1226
1183 return ret; 1227 return ret;
@@ -1251,11 +1295,83 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1251} 1295}
1252EXPORT_SYMBOL(ttm_bo_init_mm); 1296EXPORT_SYMBOL(ttm_bo_init_mm);
1253 1297
1298static void ttm_bo_global_kobj_release(struct kobject *kobj)
1299{
1300 struct ttm_bo_global *glob =
1301 container_of(kobj, struct ttm_bo_global, kobj);
1302
1303 printk(KERN_INFO TTM_PFX "Freeing bo global.\n");
1304 ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
1305 __free_page(glob->dummy_read_page);
1306 kfree(glob);
1307}
1308
1309void ttm_bo_global_release(struct ttm_global_reference *ref)
1310{
1311 struct ttm_bo_global *glob = ref->object;
1312
1313 kobject_del(&glob->kobj);
1314 kobject_put(&glob->kobj);
1315}
1316EXPORT_SYMBOL(ttm_bo_global_release);
1317
1318int ttm_bo_global_init(struct ttm_global_reference *ref)
1319{
1320 struct ttm_bo_global_ref *bo_ref =
1321 container_of(ref, struct ttm_bo_global_ref, ref);
1322 struct ttm_bo_global *glob = ref->object;
1323 int ret;
1324
1325 mutex_init(&glob->device_list_mutex);
1326 spin_lock_init(&glob->lru_lock);
1327 glob->mem_glob = bo_ref->mem_glob;
1328 glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1329
1330 if (unlikely(glob->dummy_read_page == NULL)) {
1331 ret = -ENOMEM;
1332 goto out_no_drp;
1333 }
1334
1335 INIT_LIST_HEAD(&glob->swap_lru);
1336 INIT_LIST_HEAD(&glob->device_list);
1337
1338 ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
1339 ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
1340 if (unlikely(ret != 0)) {
1341 printk(KERN_ERR TTM_PFX
1342 "Could not register buffer object swapout.\n");
1343 goto out_no_shrink;
1344 }
1345
1346 glob->ttm_bo_extra_size =
1347 ttm_round_pot(sizeof(struct ttm_tt)) +
1348 ttm_round_pot(sizeof(struct ttm_backend));
1349
1350 glob->ttm_bo_size = glob->ttm_bo_extra_size +
1351 ttm_round_pot(sizeof(struct ttm_buffer_object));
1352
1353 atomic_set(&glob->bo_count, 0);
1354
1355 kobject_init(&glob->kobj, &ttm_bo_glob_kobj_type);
1356 ret = kobject_add(&glob->kobj, ttm_get_kobj(), "buffer_objects");
1357 if (unlikely(ret != 0))
1358 kobject_put(&glob->kobj);
1359 return ret;
1360out_no_shrink:
1361 __free_page(glob->dummy_read_page);
1362out_no_drp:
1363 kfree(glob);
1364 return ret;
1365}
1366EXPORT_SYMBOL(ttm_bo_global_init);
1367
1368
1254int ttm_bo_device_release(struct ttm_bo_device *bdev) 1369int ttm_bo_device_release(struct ttm_bo_device *bdev)
1255{ 1370{
1256 int ret = 0; 1371 int ret = 0;
1257 unsigned i = TTM_NUM_MEM_TYPES; 1372 unsigned i = TTM_NUM_MEM_TYPES;
1258 struct ttm_mem_type_manager *man; 1373 struct ttm_mem_type_manager *man;
1374 struct ttm_bo_global *glob = bdev->glob;
1259 1375
1260 while (i--) { 1376 while (i--) {
1261 man = &bdev->man[i]; 1377 man = &bdev->man[i];
@@ -1271,98 +1387,74 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
1271 } 1387 }
1272 } 1388 }
1273 1389
1390 mutex_lock(&glob->device_list_mutex);
1391 list_del(&bdev->device_list);
1392 mutex_unlock(&glob->device_list_mutex);
1393
1274 if (!cancel_delayed_work(&bdev->wq)) 1394 if (!cancel_delayed_work(&bdev->wq))
1275 flush_scheduled_work(); 1395 flush_scheduled_work();
1276 1396
1277 while (ttm_bo_delayed_delete(bdev, true)) 1397 while (ttm_bo_delayed_delete(bdev, true))
1278 ; 1398 ;
1279 1399
1280 spin_lock(&bdev->lru_lock); 1400 spin_lock(&glob->lru_lock);
1281 if (list_empty(&bdev->ddestroy)) 1401 if (list_empty(&bdev->ddestroy))
1282 TTM_DEBUG("Delayed destroy list was clean\n"); 1402 TTM_DEBUG("Delayed destroy list was clean\n");
1283 1403
1284 if (list_empty(&bdev->man[0].lru)) 1404 if (list_empty(&bdev->man[0].lru))
1285 TTM_DEBUG("Swap list was clean\n"); 1405 TTM_DEBUG("Swap list was clean\n");
1286 spin_unlock(&bdev->lru_lock); 1406 spin_unlock(&glob->lru_lock);
1287 1407
1288 ttm_mem_unregister_shrink(bdev->mem_glob, &bdev->shrink);
1289 BUG_ON(!drm_mm_clean(&bdev->addr_space_mm)); 1408 BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
1290 write_lock(&bdev->vm_lock); 1409 write_lock(&bdev->vm_lock);
1291 drm_mm_takedown(&bdev->addr_space_mm); 1410 drm_mm_takedown(&bdev->addr_space_mm);
1292 write_unlock(&bdev->vm_lock); 1411 write_unlock(&bdev->vm_lock);
1293 1412
1294 __free_page(bdev->dummy_read_page);
1295 return ret; 1413 return ret;
1296} 1414}
1297EXPORT_SYMBOL(ttm_bo_device_release); 1415EXPORT_SYMBOL(ttm_bo_device_release);
1298 1416
1299/*
1300 * This function is intended to be called on drm driver load.
1301 * If you decide to call it from firstopen, you must protect the call
1302 * from a potentially racing ttm_bo_driver_finish in lastclose.
1303 * (This may happen on X server restart).
1304 */
1305
1306int ttm_bo_device_init(struct ttm_bo_device *bdev, 1417int ttm_bo_device_init(struct ttm_bo_device *bdev,
1307 struct ttm_mem_global *mem_glob, 1418 struct ttm_bo_global *glob,
1308 struct ttm_bo_driver *driver, uint64_t file_page_offset) 1419 struct ttm_bo_driver *driver,
1420 uint64_t file_page_offset)
1309{ 1421{
1310 int ret = -EINVAL; 1422 int ret = -EINVAL;
1311 1423
1312 bdev->dummy_read_page = NULL;
1313 rwlock_init(&bdev->vm_lock); 1424 rwlock_init(&bdev->vm_lock);
1314 spin_lock_init(&bdev->lru_lock); 1425 spin_lock_init(&glob->lru_lock);
1315 1426
1316 bdev->driver = driver; 1427 bdev->driver = driver;
1317 bdev->mem_glob = mem_glob;
1318 1428
1319 memset(bdev->man, 0, sizeof(bdev->man)); 1429 memset(bdev->man, 0, sizeof(bdev->man));
1320 1430
1321 bdev->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1322 if (unlikely(bdev->dummy_read_page == NULL)) {
1323 ret = -ENOMEM;
1324 goto out_err0;
1325 }
1326
1327 /* 1431 /*
1328 * Initialize the system memory buffer type. 1432 * Initialize the system memory buffer type.
1329 * Other types need to be driver / IOCTL initialized. 1433 * Other types need to be driver / IOCTL initialized.
1330 */ 1434 */
1331 ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0); 1435 ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0);
1332 if (unlikely(ret != 0)) 1436 if (unlikely(ret != 0))
1333 goto out_err1; 1437 goto out_no_sys;
1334 1438
1335 bdev->addr_space_rb = RB_ROOT; 1439 bdev->addr_space_rb = RB_ROOT;
1336 ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000); 1440 ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
1337 if (unlikely(ret != 0)) 1441 if (unlikely(ret != 0))
1338 goto out_err2; 1442 goto out_no_addr_mm;
1339 1443
1340 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); 1444 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1341 bdev->nice_mode = true; 1445 bdev->nice_mode = true;
1342 INIT_LIST_HEAD(&bdev->ddestroy); 1446 INIT_LIST_HEAD(&bdev->ddestroy);
1343 INIT_LIST_HEAD(&bdev->swap_lru);
1344 bdev->dev_mapping = NULL; 1447 bdev->dev_mapping = NULL;
1345 ttm_mem_init_shrink(&bdev->shrink, ttm_bo_swapout); 1448 bdev->glob = glob;
1346 ret = ttm_mem_register_shrink(mem_glob, &bdev->shrink);
1347 if (unlikely(ret != 0)) {
1348 printk(KERN_ERR TTM_PFX
1349 "Could not register buffer object swapout.\n");
1350 goto out_err2;
1351 }
1352 1449
1353 bdev->ttm_bo_extra_size = 1450 mutex_lock(&glob->device_list_mutex);
1354 ttm_round_pot(sizeof(struct ttm_tt)) + 1451 list_add_tail(&bdev->device_list, &glob->device_list);
1355 ttm_round_pot(sizeof(struct ttm_backend)); 1452 mutex_unlock(&glob->device_list_mutex);
1356
1357 bdev->ttm_bo_size = bdev->ttm_bo_extra_size +
1358 ttm_round_pot(sizeof(struct ttm_buffer_object));
1359 1453
1360 return 0; 1454 return 0;
1361out_err2: 1455out_no_addr_mm:
1362 ttm_bo_clean_mm(bdev, 0); 1456 ttm_bo_clean_mm(bdev, 0);
1363out_err1: 1457out_no_sys:
1364 __free_page(bdev->dummy_read_page);
1365out_err0:
1366 return ret; 1458 return ret;
1367} 1459}
1368EXPORT_SYMBOL(ttm_bo_device_init); 1460EXPORT_SYMBOL(ttm_bo_device_init);
@@ -1607,21 +1699,21 @@ void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
1607 1699
1608static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) 1700static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1609{ 1701{
1610 struct ttm_bo_device *bdev = 1702 struct ttm_bo_global *glob =
1611 container_of(shrink, struct ttm_bo_device, shrink); 1703 container_of(shrink, struct ttm_bo_global, shrink);
1612 struct ttm_buffer_object *bo; 1704 struct ttm_buffer_object *bo;
1613 int ret = -EBUSY; 1705 int ret = -EBUSY;
1614 int put_count; 1706 int put_count;
1615 uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM); 1707 uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
1616 1708
1617 spin_lock(&bdev->lru_lock); 1709 spin_lock(&glob->lru_lock);
1618 while (ret == -EBUSY) { 1710 while (ret == -EBUSY) {
1619 if (unlikely(list_empty(&bdev->swap_lru))) { 1711 if (unlikely(list_empty(&glob->swap_lru))) {
1620 spin_unlock(&bdev->lru_lock); 1712 spin_unlock(&glob->lru_lock);
1621 return -EBUSY; 1713 return -EBUSY;
1622 } 1714 }
1623 1715
1624 bo = list_first_entry(&bdev->swap_lru, 1716 bo = list_first_entry(&glob->swap_lru,
1625 struct ttm_buffer_object, swap); 1717 struct ttm_buffer_object, swap);
1626 kref_get(&bo->list_kref); 1718 kref_get(&bo->list_kref);
1627 1719
@@ -1633,16 +1725,16 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1633 1725
1634 ret = ttm_bo_reserve_locked(bo, false, true, false, 0); 1726 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
1635 if (unlikely(ret == -EBUSY)) { 1727 if (unlikely(ret == -EBUSY)) {
1636 spin_unlock(&bdev->lru_lock); 1728 spin_unlock(&glob->lru_lock);
1637 ttm_bo_wait_unreserved(bo, false); 1729 ttm_bo_wait_unreserved(bo, false);
1638 kref_put(&bo->list_kref, ttm_bo_release_list); 1730 kref_put(&bo->list_kref, ttm_bo_release_list);
1639 spin_lock(&bdev->lru_lock); 1731 spin_lock(&glob->lru_lock);
1640 } 1732 }
1641 } 1733 }
1642 1734
1643 BUG_ON(ret != 0); 1735 BUG_ON(ret != 0);
1644 put_count = ttm_bo_del_from_lru(bo); 1736 put_count = ttm_bo_del_from_lru(bo);
1645 spin_unlock(&bdev->lru_lock); 1737 spin_unlock(&glob->lru_lock);
1646 1738
1647 while (put_count--) 1739 while (put_count--)
1648 kref_put(&bo->list_kref, ttm_bo_ref_bug); 1740 kref_put(&bo->list_kref, ttm_bo_ref_bug);
@@ -1696,6 +1788,6 @@ out:
1696 1788
1697void ttm_bo_swapout_all(struct ttm_bo_device *bdev) 1789void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1698{ 1790{
1699 while (ttm_bo_swapout(&bdev->shrink) == 0) 1791 while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
1700 ; 1792 ;
1701} 1793}