aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/ttm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c69
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c9
-rw-r--r--drivers/gpu/drm/ttm/ttm_lock.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c23
5 files changed, 58 insertions, 47 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 2920f9a279e1..1a3e909b7bba 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -426,7 +426,8 @@ moved:
426 bdev->man[bo->mem.mem_type].gpu_offset; 426 bdev->man[bo->mem.mem_type].gpu_offset;
427 bo->cur_placement = bo->mem.placement; 427 bo->cur_placement = bo->mem.placement;
428 spin_unlock(&bo->lock); 428 spin_unlock(&bo->lock);
429 } 429 } else
430 bo->offset = 0;
430 431
431 return 0; 432 return 0;
432 433
@@ -523,52 +524,44 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
523static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) 524static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
524{ 525{
525 struct ttm_bo_global *glob = bdev->glob; 526 struct ttm_bo_global *glob = bdev->glob;
526 struct ttm_buffer_object *entry, *nentry; 527 struct ttm_buffer_object *entry = NULL;
527 struct list_head *list, *next; 528 int ret = 0;
528 int ret;
529 529
530 spin_lock(&glob->lru_lock); 530 spin_lock(&glob->lru_lock);
531 list_for_each_safe(list, next, &bdev->ddestroy) { 531 if (list_empty(&bdev->ddestroy))
532 entry = list_entry(list, struct ttm_buffer_object, ddestroy); 532 goto out_unlock;
533 nentry = NULL;
534 533
535 /* 534 entry = list_first_entry(&bdev->ddestroy,
536 * Protect the next list entry from destruction while we 535 struct ttm_buffer_object, ddestroy);
537 * unlock the lru_lock. 536 kref_get(&entry->list_kref);
538 */
539 537
540 if (next != &bdev->ddestroy) { 538 for (;;) {
541 nentry = list_entry(next, struct ttm_buffer_object, 539 struct ttm_buffer_object *nentry = NULL;
542 ddestroy); 540
541 if (entry->ddestroy.next != &bdev->ddestroy) {
542 nentry = list_first_entry(&entry->ddestroy,
543 struct ttm_buffer_object, ddestroy);
543 kref_get(&nentry->list_kref); 544 kref_get(&nentry->list_kref);
544 } 545 }
545 kref_get(&entry->list_kref);
546 546
547 spin_unlock(&glob->lru_lock); 547 spin_unlock(&glob->lru_lock);
548 ret = ttm_bo_cleanup_refs(entry, remove_all); 548 ret = ttm_bo_cleanup_refs(entry, remove_all);
549 kref_put(&entry->list_kref, ttm_bo_release_list); 549 kref_put(&entry->list_kref, ttm_bo_release_list);
550 entry = nentry;
551
552 if (ret || !entry)
553 goto out;
550 554
551 spin_lock(&glob->lru_lock); 555 spin_lock(&glob->lru_lock);
552 if (nentry) { 556 if (list_empty(&entry->ddestroy))
553 bool next_onlist = !list_empty(next);
554 spin_unlock(&glob->lru_lock);
555 kref_put(&nentry->list_kref, ttm_bo_release_list);
556 spin_lock(&glob->lru_lock);
557 /*
558 * Someone might have raced us and removed the
559 * next entry from the list. We don't bother restarting
560 * list traversal.
561 */
562
563 if (!next_onlist)
564 break;
565 }
566 if (ret)
567 break; 557 break;
568 } 558 }
569 ret = !list_empty(&bdev->ddestroy);
570 spin_unlock(&glob->lru_lock);
571 559
560out_unlock:
561 spin_unlock(&glob->lru_lock);
562out:
563 if (entry)
564 kref_put(&entry->list_kref, ttm_bo_release_list);
572 return ret; 565 return ret;
573} 566}
574 567
@@ -950,6 +943,14 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
950 ttm_flag_masked(&cur_flags, placement->busy_placement[i], 943 ttm_flag_masked(&cur_flags, placement->busy_placement[i],
951 ~TTM_PL_MASK_MEMTYPE); 944 ~TTM_PL_MASK_MEMTYPE);
952 945
946
947 if (mem_type == TTM_PL_SYSTEM) {
948 mem->mem_type = mem_type;
949 mem->placement = cur_flags;
950 mem->mm_node = NULL;
951 return 0;
952 }
953
953 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem, 954 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
954 interruptible, no_wait); 955 interruptible, no_wait);
955 if (ret == 0 && mem->mm_node) { 956 if (ret == 0 && mem->mm_node) {
@@ -1844,6 +1845,9 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1844 * anyone tries to access a ttm page. 1845 * anyone tries to access a ttm page.
1845 */ 1846 */
1846 1847
1848 if (bo->bdev->driver->swap_notify)
1849 bo->bdev->driver->swap_notify(bo);
1850
1847 ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage); 1851 ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage);
1848out: 1852out:
1849 1853
@@ -1864,3 +1868,4 @@ void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1864 while (ttm_bo_swapout(&bdev->glob->shrink) == 0) 1868 while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
1865 ; 1869 ;
1866} 1870}
1871EXPORT_SYMBOL(ttm_bo_swapout_all);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 2ecf7d0c64f6..5ca37a58a98c 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -53,7 +53,6 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
53{ 53{
54 struct ttm_tt *ttm = bo->ttm; 54 struct ttm_tt *ttm = bo->ttm;
55 struct ttm_mem_reg *old_mem = &bo->mem; 55 struct ttm_mem_reg *old_mem = &bo->mem;
56 uint32_t save_flags = old_mem->placement;
57 int ret; 56 int ret;
58 57
59 if (old_mem->mem_type != TTM_PL_SYSTEM) { 58 if (old_mem->mem_type != TTM_PL_SYSTEM) {
@@ -62,7 +61,6 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
62 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM, 61 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
63 TTM_PL_MASK_MEM); 62 TTM_PL_MASK_MEM);
64 old_mem->mem_type = TTM_PL_SYSTEM; 63 old_mem->mem_type = TTM_PL_SYSTEM;
65 save_flags = old_mem->placement;
66 } 64 }
67 65
68 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement); 66 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
@@ -77,7 +75,7 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
77 75
78 *old_mem = *new_mem; 76 *old_mem = *new_mem;
79 new_mem->mm_node = NULL; 77 new_mem->mm_node = NULL;
80 ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE); 78
81 return 0; 79 return 0;
82} 80}
83EXPORT_SYMBOL(ttm_bo_move_ttm); 81EXPORT_SYMBOL(ttm_bo_move_ttm);
@@ -219,7 +217,6 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
219 void *old_iomap; 217 void *old_iomap;
220 void *new_iomap; 218 void *new_iomap;
221 int ret; 219 int ret;
222 uint32_t save_flags = old_mem->placement;
223 unsigned long i; 220 unsigned long i;
224 unsigned long page; 221 unsigned long page;
225 unsigned long add = 0; 222 unsigned long add = 0;
@@ -270,7 +267,6 @@ out2:
270 267
271 *old_mem = *new_mem; 268 *old_mem = *new_mem;
272 new_mem->mm_node = NULL; 269 new_mem->mm_node = NULL;
273 ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE);
274 270
275 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) { 271 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
276 ttm_tt_unbind(ttm); 272 ttm_tt_unbind(ttm);
@@ -537,7 +533,6 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
537 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; 533 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
538 struct ttm_mem_reg *old_mem = &bo->mem; 534 struct ttm_mem_reg *old_mem = &bo->mem;
539 int ret; 535 int ret;
540 uint32_t save_flags = old_mem->placement;
541 struct ttm_buffer_object *ghost_obj; 536 struct ttm_buffer_object *ghost_obj;
542 void *tmp_obj = NULL; 537 void *tmp_obj = NULL;
543 538
@@ -598,7 +593,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
598 593
599 *old_mem = *new_mem; 594 *old_mem = *new_mem;
600 new_mem->mm_node = NULL; 595 new_mem->mm_node = NULL;
601 ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE); 596
602 return 0; 597 return 0;
603} 598}
604EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); 599EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c
index f619ebcaa4ec..3d172ef04ee1 100644
--- a/drivers/gpu/drm/ttm/ttm_lock.c
+++ b/drivers/gpu/drm/ttm/ttm_lock.c
@@ -288,6 +288,7 @@ void ttm_suspend_unlock(struct ttm_lock *lock)
288 wake_up_all(&lock->queue); 288 wake_up_all(&lock->queue);
289 spin_unlock(&lock->lock); 289 spin_unlock(&lock->lock);
290} 290}
291EXPORT_SYMBOL(ttm_suspend_unlock);
291 292
292static bool __ttm_suspend_lock(struct ttm_lock *lock) 293static bool __ttm_suspend_lock(struct ttm_lock *lock)
293{ 294{
@@ -309,3 +310,4 @@ void ttm_suspend_lock(struct ttm_lock *lock)
309{ 310{
310 wait_event(lock->queue, __ttm_suspend_lock(lock)); 311 wait_event(lock->queue, __ttm_suspend_lock(lock));
311} 312}
313EXPORT_SYMBOL(ttm_suspend_lock);
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index 1099abac824b..75e9d6f86ba4 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -109,8 +109,8 @@ struct ttm_ref_object {
109 struct drm_hash_item hash; 109 struct drm_hash_item hash;
110 struct list_head head; 110 struct list_head head;
111 struct kref kref; 111 struct kref kref;
112 struct ttm_base_object *obj;
113 enum ttm_ref_type ref_type; 112 enum ttm_ref_type ref_type;
113 struct ttm_base_object *obj;
114 struct ttm_object_file *tfile; 114 struct ttm_object_file *tfile;
115}; 115};
116 116
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 9c2b1cc5dba5..e2123af7775a 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -198,17 +198,26 @@ EXPORT_SYMBOL(ttm_tt_populate);
198static inline int ttm_tt_set_page_caching(struct page *p, 198static inline int ttm_tt_set_page_caching(struct page *p,
199 enum ttm_caching_state c_state) 199 enum ttm_caching_state c_state)
200{ 200{
201 int ret = 0;
202
201 if (PageHighMem(p)) 203 if (PageHighMem(p))
202 return 0; 204 return 0;
203 205
204 switch (c_state) { 206 if (get_page_memtype(p) != -1) {
205 case tt_cached: 207 /* p isn't in the default caching state, set it to
206 return set_pages_wb(p, 1); 208 * writeback first to free its current memtype. */
207 case tt_wc: 209
208 return set_memory_wc((unsigned long) page_address(p), 1); 210 ret = set_pages_wb(p, 1);
209 default: 211 if (ret)
210 return set_pages_uc(p, 1); 212 return ret;
211 } 213 }
214
215 if (c_state == tt_wc)
216 ret = set_memory_wc((unsigned long) page_address(p), 1);
217 else if (c_state == tt_uncached)
218 ret = set_pages_uc(p, 1);
219
220 return ret;
212} 221}
213#else /* CONFIG_X86 */ 222#else /* CONFIG_X86 */
214static inline int ttm_tt_set_page_caching(struct page *p, 223static inline int ttm_tt_set_page_caching(struct page *p,