aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/ttm/ttm_bo.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_bo.c')
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c77
1 files changed, 52 insertions, 25 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 1fbb2eea5e88..2920f9a279e1 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -71,34 +71,34 @@ static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
71 return -EINVAL; 71 return -EINVAL;
72} 72}
73 73
74static void ttm_mem_type_manager_debug(struct ttm_bo_global *glob, 74static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
75 struct ttm_mem_type_manager *man)
76{ 75{
76 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
77
77 printk(KERN_ERR TTM_PFX " has_type: %d\n", man->has_type); 78 printk(KERN_ERR TTM_PFX " has_type: %d\n", man->has_type);
78 printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type); 79 printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type);
79 printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags); 80 printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags);
80 printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset); 81 printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset);
81 printk(KERN_ERR TTM_PFX " io_offset: 0x%08lX\n", man->io_offset); 82 printk(KERN_ERR TTM_PFX " io_offset: 0x%08lX\n", man->io_offset);
82 printk(KERN_ERR TTM_PFX " io_size: %ld\n", man->io_size); 83 printk(KERN_ERR TTM_PFX " io_size: %ld\n", man->io_size);
83 printk(KERN_ERR TTM_PFX " size: %ld\n", (unsigned long)man->size); 84 printk(KERN_ERR TTM_PFX " size: %llu\n", man->size);
84 printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n", 85 printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n",
85 man->available_caching); 86 man->available_caching);
86 printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n", 87 printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n",
87 man->default_caching); 88 man->default_caching);
88 spin_lock(&glob->lru_lock); 89 if (mem_type != TTM_PL_SYSTEM) {
89 drm_mm_debug_table(&man->manager, TTM_PFX); 90 spin_lock(&bdev->glob->lru_lock);
90 spin_unlock(&glob->lru_lock); 91 drm_mm_debug_table(&man->manager, TTM_PFX);
92 spin_unlock(&bdev->glob->lru_lock);
93 }
91} 94}
92 95
93static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, 96static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
94 struct ttm_placement *placement) 97 struct ttm_placement *placement)
95{ 98{
96 struct ttm_bo_device *bdev = bo->bdev;
97 struct ttm_bo_global *glob = bo->glob;
98 struct ttm_mem_type_manager *man;
99 int i, ret, mem_type; 99 int i, ret, mem_type;
100 100
101 printk(KERN_ERR TTM_PFX "No space for %p (%ld pages, %ldK, %ldM)\n", 101 printk(KERN_ERR TTM_PFX "No space for %p (%lu pages, %luK, %luM)\n",
102 bo, bo->mem.num_pages, bo->mem.size >> 10, 102 bo, bo->mem.num_pages, bo->mem.size >> 10,
103 bo->mem.size >> 20); 103 bo->mem.size >> 20);
104 for (i = 0; i < placement->num_placement; i++) { 104 for (i = 0; i < placement->num_placement; i++) {
@@ -106,10 +106,9 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
106 &mem_type); 106 &mem_type);
107 if (ret) 107 if (ret)
108 return; 108 return;
109 man = &bdev->man[mem_type];
110 printk(KERN_ERR TTM_PFX " placement[%d]=0x%08X (%d)\n", 109 printk(KERN_ERR TTM_PFX " placement[%d]=0x%08X (%d)\n",
111 i, placement->placement[i], mem_type); 110 i, placement->placement[i], mem_type);
112 ttm_mem_type_manager_debug(glob, man); 111 ttm_mem_type_debug(bo->bdev, mem_type);
113 } 112 }
114} 113}
115 114
@@ -465,6 +464,8 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
465 spin_unlock(&bo->lock); 464 spin_unlock(&bo->lock);
466 465
467 spin_lock(&glob->lru_lock); 466 spin_lock(&glob->lru_lock);
467 put_count = ttm_bo_del_from_lru(bo);
468
468 ret = ttm_bo_reserve_locked(bo, false, false, false, 0); 469 ret = ttm_bo_reserve_locked(bo, false, false, false, 0);
469 BUG_ON(ret); 470 BUG_ON(ret);
470 if (bo->ttm) 471 if (bo->ttm)
@@ -472,20 +473,19 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
472 473
473 if (!list_empty(&bo->ddestroy)) { 474 if (!list_empty(&bo->ddestroy)) {
474 list_del_init(&bo->ddestroy); 475 list_del_init(&bo->ddestroy);
475 kref_put(&bo->list_kref, ttm_bo_ref_bug); 476 ++put_count;
476 } 477 }
477 if (bo->mem.mm_node) { 478 if (bo->mem.mm_node) {
478 bo->mem.mm_node->private = NULL; 479 bo->mem.mm_node->private = NULL;
479 drm_mm_put_block(bo->mem.mm_node); 480 drm_mm_put_block(bo->mem.mm_node);
480 bo->mem.mm_node = NULL; 481 bo->mem.mm_node = NULL;
481 } 482 }
482 put_count = ttm_bo_del_from_lru(bo);
483 spin_unlock(&glob->lru_lock); 483 spin_unlock(&glob->lru_lock);
484 484
485 atomic_set(&bo->reserved, 0); 485 atomic_set(&bo->reserved, 0);
486 486
487 while (put_count--) 487 while (put_count--)
488 kref_put(&bo->list_kref, ttm_bo_release_list); 488 kref_put(&bo->list_kref, ttm_bo_ref_bug);
489 489
490 return 0; 490 return 0;
491 } 491 }
@@ -684,19 +684,45 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
684 struct ttm_buffer_object *bo; 684 struct ttm_buffer_object *bo;
685 int ret, put_count = 0; 685 int ret, put_count = 0;
686 686
687retry:
687 spin_lock(&glob->lru_lock); 688 spin_lock(&glob->lru_lock);
689 if (list_empty(&man->lru)) {
690 spin_unlock(&glob->lru_lock);
691 return -EBUSY;
692 }
693
688 bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru); 694 bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
689 kref_get(&bo->list_kref); 695 kref_get(&bo->list_kref);
690 ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, false, 0); 696
691 if (likely(ret == 0)) 697 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
692 put_count = ttm_bo_del_from_lru(bo); 698
699 if (unlikely(ret == -EBUSY)) {
700 spin_unlock(&glob->lru_lock);
701 if (likely(!no_wait))
702 ret = ttm_bo_wait_unreserved(bo, interruptible);
703
704 kref_put(&bo->list_kref, ttm_bo_release_list);
705
706 /**
707 * We *need* to retry after releasing the lru lock.
708 */
709
710 if (unlikely(ret != 0))
711 return ret;
712 goto retry;
713 }
714
715 put_count = ttm_bo_del_from_lru(bo);
693 spin_unlock(&glob->lru_lock); 716 spin_unlock(&glob->lru_lock);
694 if (unlikely(ret != 0)) 717
695 return ret; 718 BUG_ON(ret != 0);
719
696 while (put_count--) 720 while (put_count--)
697 kref_put(&bo->list_kref, ttm_bo_ref_bug); 721 kref_put(&bo->list_kref, ttm_bo_ref_bug);
722
698 ret = ttm_bo_evict(bo, interruptible, no_wait); 723 ret = ttm_bo_evict(bo, interruptible, no_wait);
699 ttm_bo_unreserve(bo); 724 ttm_bo_unreserve(bo);
725
700 kref_put(&bo->list_kref, ttm_bo_release_list); 726 kref_put(&bo->list_kref, ttm_bo_release_list);
701 return ret; 727 return ret;
702} 728}
@@ -849,7 +875,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
849 int i, ret; 875 int i, ret;
850 876
851 mem->mm_node = NULL; 877 mem->mm_node = NULL;
852 for (i = 0; i <= placement->num_placement; ++i) { 878 for (i = 0; i < placement->num_placement; ++i) {
853 ret = ttm_mem_type_from_flags(placement->placement[i], 879 ret = ttm_mem_type_from_flags(placement->placement[i],
854 &mem_type); 880 &mem_type);
855 if (ret) 881 if (ret)
@@ -900,8 +926,8 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
900 if (!type_found) 926 if (!type_found)
901 return -EINVAL; 927 return -EINVAL;
902 928
903 for (i = 0; i <= placement->num_busy_placement; ++i) { 929 for (i = 0; i < placement->num_busy_placement; ++i) {
904 ret = ttm_mem_type_from_flags(placement->placement[i], 930 ret = ttm_mem_type_from_flags(placement->busy_placement[i],
905 &mem_type); 931 &mem_type);
906 if (ret) 932 if (ret)
907 return ret; 933 return ret;
@@ -911,7 +937,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
911 if (!ttm_bo_mt_compatible(man, 937 if (!ttm_bo_mt_compatible(man,
912 bo->type == ttm_bo_type_user, 938 bo->type == ttm_bo_type_user,
913 mem_type, 939 mem_type,
914 placement->placement[i], 940 placement->busy_placement[i],
915 &cur_flags)) 941 &cur_flags))
916 continue; 942 continue;
917 943
@@ -921,7 +947,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
921 * Use the access and other non-mapping-related flag bits from 947 * Use the access and other non-mapping-related flag bits from
922 * the memory placement flags to the current flags 948 * the memory placement flags to the current flags
923 */ 949 */
924 ttm_flag_masked(&cur_flags, placement->placement[i], 950 ttm_flag_masked(&cur_flags, placement->busy_placement[i],
925 ~TTM_PL_MASK_MEMTYPE); 951 ~TTM_PL_MASK_MEMTYPE);
926 952
927 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem, 953 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
@@ -1115,6 +1141,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1115 bo->glob = bdev->glob; 1141 bo->glob = bdev->glob;
1116 bo->type = type; 1142 bo->type = type;
1117 bo->num_pages = num_pages; 1143 bo->num_pages = num_pages;
1144 bo->mem.size = num_pages << PAGE_SHIFT;
1118 bo->mem.mem_type = TTM_PL_SYSTEM; 1145 bo->mem.mem_type = TTM_PL_SYSTEM;
1119 bo->mem.num_pages = bo->num_pages; 1146 bo->mem.num_pages = bo->num_pages;
1120 bo->mem.mm_node = NULL; 1147 bo->mem.mm_node = NULL;