aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c32
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c124
-rw-r--r--include/drm/ttm/ttm_bo_api.h38
-rw-r--r--include/drm/ttm/ttm_bo_driver.h14
-rw-r--r--include/drm/ttm/ttm_execbuf_util.h6
5 files changed, 186 insertions, 28 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 148a322d8f5d..a586378b1b2b 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -169,7 +169,7 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
169} 169}
170EXPORT_SYMBOL(ttm_bo_wait_unreserved); 170EXPORT_SYMBOL(ttm_bo_wait_unreserved);
171 171
172static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) 172void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
173{ 173{
174 struct ttm_bo_device *bdev = bo->bdev; 174 struct ttm_bo_device *bdev = bo->bdev;
175 struct ttm_mem_type_manager *man; 175 struct ttm_mem_type_manager *man;
@@ -191,11 +191,7 @@ static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
191 } 191 }
192} 192}
193 193
194/** 194int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
195 * Call with the lru_lock held.
196 */
197
198static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
199{ 195{
200 int put_count = 0; 196 int put_count = 0;
201 197
@@ -267,6 +263,15 @@ static void ttm_bo_ref_bug(struct kref *list_kref)
267 BUG(); 263 BUG();
268} 264}
269 265
266void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
267 bool never_free)
268{
269 while (count--)
270 kref_put(&bo->list_kref,
271 (never_free || (count >= 0)) ? ttm_bo_ref_bug :
272 ttm_bo_release_list);
273}
274
270int ttm_bo_reserve(struct ttm_buffer_object *bo, 275int ttm_bo_reserve(struct ttm_buffer_object *bo,
271 bool interruptible, 276 bool interruptible,
272 bool no_wait, bool use_sequence, uint32_t sequence) 277 bool no_wait, bool use_sequence, uint32_t sequence)
@@ -282,8 +287,7 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo,
282 put_count = ttm_bo_del_from_lru(bo); 287 put_count = ttm_bo_del_from_lru(bo);
283 spin_unlock(&glob->lru_lock); 288 spin_unlock(&glob->lru_lock);
284 289
285 while (put_count--) 290 ttm_bo_list_ref_sub(bo, put_count, true);
286 kref_put(&bo->list_kref, ttm_bo_ref_bug);
287 291
288 return ret; 292 return ret;
289} 293}
@@ -496,8 +500,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
496 spin_unlock(&glob->lru_lock); 500 spin_unlock(&glob->lru_lock);
497 ttm_bo_cleanup_memtype_use(bo); 501 ttm_bo_cleanup_memtype_use(bo);
498 502
499 while (put_count--) 503 ttm_bo_list_ref_sub(bo, put_count, true);
500 kref_put(&bo->list_kref, ttm_bo_ref_bug);
501 504
502 return; 505 return;
503 } else { 506 } else {
@@ -580,8 +583,7 @@ retry:
580 spin_unlock(&glob->lru_lock); 583 spin_unlock(&glob->lru_lock);
581 ttm_bo_cleanup_memtype_use(bo); 584 ttm_bo_cleanup_memtype_use(bo);
582 585
583 while (put_count--) 586 ttm_bo_list_ref_sub(bo, put_count, true);
584 kref_put(&bo->list_kref, ttm_bo_ref_bug);
585 587
586 return 0; 588 return 0;
587} 589}
@@ -802,8 +804,7 @@ retry:
802 804
803 BUG_ON(ret != 0); 805 BUG_ON(ret != 0);
804 806
805 while (put_count--) 807 ttm_bo_list_ref_sub(bo, put_count, true);
806 kref_put(&bo->list_kref, ttm_bo_ref_bug);
807 808
808 ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu); 809 ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
809 ttm_bo_unreserve(bo); 810 ttm_bo_unreserve(bo);
@@ -1783,8 +1784,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1783 put_count = ttm_bo_del_from_lru(bo); 1784 put_count = ttm_bo_del_from_lru(bo);
1784 spin_unlock(&glob->lru_lock); 1785 spin_unlock(&glob->lru_lock);
1785 1786
1786 while (put_count--) 1787 ttm_bo_list_ref_sub(bo, put_count, true);
1787 kref_put(&bo->list_kref, ttm_bo_ref_bug);
1788 1788
1789 /** 1789 /**
1790 * Wait for GPU, then move to system cached. 1790 * Wait for GPU, then move to system cached.
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index c285c2902d15..201a71d111ec 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -32,6 +32,72 @@
32#include <linux/sched.h> 32#include <linux/sched.h>
33#include <linux/module.h> 33#include <linux/module.h>
34 34
35static void ttm_eu_backoff_reservation_locked(struct list_head *list)
36{
37 struct ttm_validate_buffer *entry;
38
39 list_for_each_entry(entry, list, head) {
40 struct ttm_buffer_object *bo = entry->bo;
41 if (!entry->reserved)
42 continue;
43
44 if (entry->removed) {
45 ttm_bo_add_to_lru(bo);
46 entry->removed = false;
47
48 }
49 entry->reserved = false;
50 atomic_set(&bo->reserved, 0);
51 wake_up_all(&bo->event_queue);
52 }
53}
54
55static void ttm_eu_del_from_lru_locked(struct list_head *list)
56{
57 struct ttm_validate_buffer *entry;
58
59 list_for_each_entry(entry, list, head) {
60 struct ttm_buffer_object *bo = entry->bo;
61 if (!entry->reserved)
62 continue;
63
64 if (!entry->removed) {
65 entry->put_count = ttm_bo_del_from_lru(bo);
66 entry->removed = true;
67 }
68 }
69}
70
71static void ttm_eu_list_ref_sub(struct list_head *list)
72{
73 struct ttm_validate_buffer *entry;
74
75 list_for_each_entry(entry, list, head) {
76 struct ttm_buffer_object *bo = entry->bo;
77
78 if (entry->put_count) {
79 ttm_bo_list_ref_sub(bo, entry->put_count, true);
80 entry->put_count = 0;
81 }
82 }
83}
84
85static int ttm_eu_wait_unreserved_locked(struct list_head *list,
86 struct ttm_buffer_object *bo)
87{
88 struct ttm_bo_global *glob = bo->glob;
89 int ret;
90
91 ttm_eu_del_from_lru_locked(list);
92 spin_unlock(&glob->lru_lock);
93 ret = ttm_bo_wait_unreserved(bo, true);
94 spin_lock(&glob->lru_lock);
95 if (unlikely(ret != 0))
96 ttm_eu_backoff_reservation_locked(list);
97 return ret;
98}
99
100
35void ttm_eu_backoff_reservation(struct list_head *list) 101void ttm_eu_backoff_reservation(struct list_head *list)
36{ 102{
37 struct ttm_validate_buffer *entry; 103 struct ttm_validate_buffer *entry;
@@ -61,35 +127,71 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation);
61 127
62int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq) 128int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq)
63{ 129{
130 struct ttm_bo_global *glob;
64 struct ttm_validate_buffer *entry; 131 struct ttm_validate_buffer *entry;
65 int ret; 132 int ret;
66 133
134 if (list_empty(list))
135 return 0;
136
137 list_for_each_entry(entry, list, head) {
138 entry->reserved = false;
139 entry->put_count = 0;
140 entry->removed = false;
141 }
142
143 entry = list_first_entry(list, struct ttm_validate_buffer, head);
144 glob = entry->bo->glob;
145
67retry: 146retry:
147 spin_lock(&glob->lru_lock);
68 list_for_each_entry(entry, list, head) { 148 list_for_each_entry(entry, list, head) {
69 struct ttm_buffer_object *bo = entry->bo; 149 struct ttm_buffer_object *bo = entry->bo;
70 150
71 entry->reserved = false; 151retry_this_bo:
72 ret = ttm_bo_reserve(bo, true, false, true, val_seq); 152 ret = ttm_bo_reserve_locked(bo, true, true, true, val_seq);
73 if (ret != 0) { 153 switch (ret) {
74 ttm_eu_backoff_reservation(list); 154 case 0:
75 if (ret == -EAGAIN) { 155 break;
76 ret = ttm_bo_wait_unreserved(bo, true); 156 case -EBUSY:
77 if (unlikely(ret != 0)) 157 ret = ttm_eu_wait_unreserved_locked(list, bo);
78 return ret; 158 if (unlikely(ret != 0)) {
79 goto retry; 159 spin_unlock(&glob->lru_lock);
80 } else 160 ttm_eu_list_ref_sub(list);
161 return ret;
162 }
163 goto retry_this_bo;
164 case -EAGAIN:
165 ttm_eu_backoff_reservation_locked(list);
166 spin_unlock(&glob->lru_lock);
167 ttm_eu_list_ref_sub(list);
168 ret = ttm_bo_wait_unreserved(bo, true);
169 if (unlikely(ret != 0))
81 return ret; 170 return ret;
171 goto retry;
172 default:
173 ttm_eu_backoff_reservation_locked(list);
174 spin_unlock(&glob->lru_lock);
175 ttm_eu_list_ref_sub(list);
176 return ret;
82 } 177 }
83 178
84 entry->reserved = true; 179 entry->reserved = true;
85 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { 180 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
86 ttm_eu_backoff_reservation(list); 181 ttm_eu_backoff_reservation_locked(list);
182 spin_unlock(&glob->lru_lock);
183 ttm_eu_list_ref_sub(list);
87 ret = ttm_bo_wait_cpu(bo, false); 184 ret = ttm_bo_wait_cpu(bo, false);
88 if (ret) 185 if (ret)
89 return ret; 186 return ret;
90 goto retry; 187 goto retry;
91 } 188 }
92 } 189 }
190
191 ttm_eu_del_from_lru_locked(list);
192 spin_unlock(&glob->lru_lock);
193 ttm_eu_list_ref_sub(list);
194
93 return 0; 195 return 0;
94} 196}
95EXPORT_SYMBOL(ttm_eu_reserve_buffers); 197EXPORT_SYMBOL(ttm_eu_reserve_buffers);
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index beafc156a535..b0fc9c12554b 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -364,6 +364,44 @@ extern int ttm_bo_validate(struct ttm_buffer_object *bo,
364 */ 364 */
365extern void ttm_bo_unref(struct ttm_buffer_object **bo); 365extern void ttm_bo_unref(struct ttm_buffer_object **bo);
366 366
367
368/**
369 * ttm_bo_list_ref_sub
370 *
371 * @bo: The buffer object.
372 * @count: The number of references with which to decrease @bo::list_kref;
373 * @never_free: The refcount should not reach zero with this operation.
374 *
375 * Release @count lru list references to this buffer object.
376 */
377extern void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
378 bool never_free);
379
380/**
381 * ttm_bo_add_to_lru
382 *
383 * @bo: The buffer object.
384 *
385 * Add this bo to the relevant mem type lru and, if it's backed by
386 * system pages (ttms) to the swap list.
387 * This function must be called with struct ttm_bo_global::lru_lock held, and
388 * is typically called immediately prior to unreserving a bo.
389 */
390extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
391
392/**
393 * ttm_bo_del_from_lru
394 *
395 * @bo: The buffer object.
396 *
397 * Remove this bo from all lru lists used to lookup and reserve an object.
398 * This function must be called with struct ttm_bo_global::lru_lock held,
399 * and is usually called just immediately after the bo has been reserved to
400 * avoid recursive reservation from lru lists.
401 */
402extern int ttm_bo_del_from_lru(struct ttm_buffer_object *bo);
403
404
367/** 405/**
368 * ttm_bo_lock_delayed_workqueue 406 * ttm_bo_lock_delayed_workqueue
369 * 407 *
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 8e0c848326b6..95068e6024db 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -864,6 +864,20 @@ extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
864 bool interruptible, 864 bool interruptible,
865 bool no_wait, bool use_sequence, uint32_t sequence); 865 bool no_wait, bool use_sequence, uint32_t sequence);
866 866
867
868/**
869 * ttm_bo_reserve_locked:
870 *
871 * Similar to ttm_bo_reserve, but must be called with the glob::lru_lock
872 * spinlock held, and will not remove reserved buffers from the lru lists.
873 * The function may release the LRU spinlock if it needs to sleep.
874 */
875
876extern int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
877 bool interruptible,
878 bool no_wait, bool use_sequence,
879 uint32_t sequence);
880
867/** 881/**
868 * ttm_bo_unreserve 882 * ttm_bo_unreserve
869 * 883 *
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h
index cd2c475da9ea..fd09b8438977 100644
--- a/include/drm/ttm/ttm_execbuf_util.h
+++ b/include/drm/ttm/ttm_execbuf_util.h
@@ -41,7 +41,9 @@
41 * @bo: refcounted buffer object pointer. 41 * @bo: refcounted buffer object pointer.
42 * @new_sync_obj_arg: New sync_obj_arg for @bo, to be used once 42 * @new_sync_obj_arg: New sync_obj_arg for @bo, to be used once
43 * adding a new sync object. 43 * adding a new sync object.
44 * @reservied: Indicates whether @bo has been reserved for validation. 44 * @reserved: Indicates whether @bo has been reserved for validation.
45 * @removed: Indicates whether @bo has been removed from lru lists.
46 * @put_count: Number of outstanding references on bo::list_kref.
45 */ 47 */
46 48
47struct ttm_validate_buffer { 49struct ttm_validate_buffer {
@@ -49,6 +51,8 @@ struct ttm_validate_buffer {
49 struct ttm_buffer_object *bo; 51 struct ttm_buffer_object *bo;
50 void *new_sync_obj_arg; 52 void *new_sync_obj_arg;
51 bool reserved; 53 bool reserved;
54 bool removed;
55 int put_count;
52}; 56};
53 57
54/** 58/**