aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/ttm
diff options
context:
space:
mode:
authorMaarten Lankhorst <m.b.lankhorst@gmail.com>2013-06-27 07:48:17 -0400
committerDave Airlie <airlied@redhat.com>2013-06-27 22:02:20 -0400
commitecff665f5e3f1c6909353e00b9420e45ae23d995 (patch)
tree8fed7d4570ec707427e954c6d2695d1549e08364 /drivers/gpu/drm/ttm
parent786d7257e537da0674c02e16e3b30a44665d1cee (diff)
drm/ttm: make ttm reservation calls behave like reservation calls
This commit converts the source of the val_seq counter to the ww_mutex api. The reservation objects are converted later, because there is still a lockdep splat in nouveau that has to resolved first. Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com> Reviewed-by: Jerome Glisse <jglisse@redhat.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c50
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c58
2 files changed, 62 insertions, 46 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 9b07b7d44a58..b912375b9c18 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -215,7 +215,8 @@ int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
215 215
216int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo, 216int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
217 bool interruptible, 217 bool interruptible,
218 bool no_wait, bool use_sequence, uint32_t sequence) 218 bool no_wait, bool use_ticket,
219 struct ww_acquire_ctx *ticket)
219{ 220{
220 int ret; 221 int ret;
221 222
@@ -223,17 +224,17 @@ int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
223 /** 224 /**
224 * Deadlock avoidance for multi-bo reserving. 225 * Deadlock avoidance for multi-bo reserving.
225 */ 226 */
226 if (use_sequence && bo->seq_valid) { 227 if (use_ticket && bo->seq_valid) {
227 /** 228 /**
228 * We've already reserved this one. 229 * We've already reserved this one.
229 */ 230 */
230 if (unlikely(sequence == bo->val_seq)) 231 if (unlikely(ticket->stamp == bo->val_seq))
231 return -EDEADLK; 232 return -EDEADLK;
232 /** 233 /**
233 * Already reserved by a thread that will not back 234 * Already reserved by a thread that will not back
234 * off for us. We need to back off. 235 * off for us. We need to back off.
235 */ 236 */
236 if (unlikely(sequence - bo->val_seq < (1 << 31))) 237 if (unlikely(ticket->stamp - bo->val_seq <= LONG_MAX))
237 return -EAGAIN; 238 return -EAGAIN;
238 } 239 }
239 240
@@ -246,13 +247,14 @@ int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
246 return ret; 247 return ret;
247 } 248 }
248 249
249 if (use_sequence) { 250 if (use_ticket) {
250 bool wake_up = false; 251 bool wake_up = false;
252
251 /** 253 /**
252 * Wake up waiters that may need to recheck for deadlock, 254 * Wake up waiters that may need to recheck for deadlock,
253 * if we decreased the sequence number. 255 * if we decreased the sequence number.
254 */ 256 */
255 if (unlikely((bo->val_seq - sequence < (1 << 31)) 257 if (unlikely((bo->val_seq - ticket->stamp <= LONG_MAX)
256 || !bo->seq_valid)) 258 || !bo->seq_valid))
257 wake_up = true; 259 wake_up = true;
258 260
@@ -266,7 +268,7 @@ int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
266 * written before val_seq was, and just means some slightly 268 * written before val_seq was, and just means some slightly
267 * increased cpu usage 269 * increased cpu usage
268 */ 270 */
269 bo->val_seq = sequence; 271 bo->val_seq = ticket->stamp;
270 bo->seq_valid = true; 272 bo->seq_valid = true;
271 if (wake_up) 273 if (wake_up)
272 wake_up_all(&bo->event_queue); 274 wake_up_all(&bo->event_queue);
@@ -292,14 +294,15 @@ void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
292 294
293int ttm_bo_reserve(struct ttm_buffer_object *bo, 295int ttm_bo_reserve(struct ttm_buffer_object *bo,
294 bool interruptible, 296 bool interruptible,
295 bool no_wait, bool use_sequence, uint32_t sequence) 297 bool no_wait, bool use_ticket,
298 struct ww_acquire_ctx *ticket)
296{ 299{
297 struct ttm_bo_global *glob = bo->glob; 300 struct ttm_bo_global *glob = bo->glob;
298 int put_count = 0; 301 int put_count = 0;
299 int ret; 302 int ret;
300 303
301 ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_sequence, 304 ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_ticket,
302 sequence); 305 ticket);
303 if (likely(ret == 0)) { 306 if (likely(ret == 0)) {
304 spin_lock(&glob->lru_lock); 307 spin_lock(&glob->lru_lock);
305 put_count = ttm_bo_del_from_lru(bo); 308 put_count = ttm_bo_del_from_lru(bo);
@@ -311,13 +314,14 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo,
311} 314}
312 315
313int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo, 316int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo,
314 bool interruptible, uint32_t sequence) 317 bool interruptible,
318 struct ww_acquire_ctx *ticket)
315{ 319{
316 bool wake_up = false; 320 bool wake_up = false;
317 int ret; 321 int ret;
318 322
319 while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) { 323 while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
320 WARN_ON(bo->seq_valid && sequence == bo->val_seq); 324 WARN_ON(bo->seq_valid && ticket->stamp == bo->val_seq);
321 325
322 ret = ttm_bo_wait_unreserved(bo, interruptible); 326 ret = ttm_bo_wait_unreserved(bo, interruptible);
323 327
@@ -325,14 +329,14 @@ int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo,
325 return ret; 329 return ret;
326 } 330 }
327 331
328 if ((bo->val_seq - sequence < (1 << 31)) || !bo->seq_valid) 332 if (bo->val_seq - ticket->stamp < LONG_MAX || !bo->seq_valid)
329 wake_up = true; 333 wake_up = true;
330 334
331 /** 335 /**
332 * Wake up waiters that may need to recheck for deadlock, 336 * Wake up waiters that may need to recheck for deadlock,
333 * if we decreased the sequence number. 337 * if we decreased the sequence number.
334 */ 338 */
335 bo->val_seq = sequence; 339 bo->val_seq = ticket->stamp;
336 bo->seq_valid = true; 340 bo->seq_valid = true;
337 if (wake_up) 341 if (wake_up)
338 wake_up_all(&bo->event_queue); 342 wake_up_all(&bo->event_queue);
@@ -341,12 +345,12 @@ int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo,
341} 345}
342 346
343int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, 347int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
344 bool interruptible, uint32_t sequence) 348 bool interruptible, struct ww_acquire_ctx *ticket)
345{ 349{
346 struct ttm_bo_global *glob = bo->glob; 350 struct ttm_bo_global *glob = bo->glob;
347 int put_count, ret; 351 int put_count, ret;
348 352
349 ret = ttm_bo_reserve_slowpath_nolru(bo, interruptible, sequence); 353 ret = ttm_bo_reserve_slowpath_nolru(bo, interruptible, ticket);
350 if (likely(!ret)) { 354 if (likely(!ret)) {
351 spin_lock(&glob->lru_lock); 355 spin_lock(&glob->lru_lock);
352 put_count = ttm_bo_del_from_lru(bo); 356 put_count = ttm_bo_del_from_lru(bo);
@@ -357,7 +361,7 @@ int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
357} 361}
358EXPORT_SYMBOL(ttm_bo_reserve_slowpath); 362EXPORT_SYMBOL(ttm_bo_reserve_slowpath);
359 363
360void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo) 364void ttm_bo_unreserve_ticket_locked(struct ttm_buffer_object *bo, struct ww_acquire_ctx *ticket)
361{ 365{
362 ttm_bo_add_to_lru(bo); 366 ttm_bo_add_to_lru(bo);
363 atomic_set(&bo->reserved, 0); 367 atomic_set(&bo->reserved, 0);
@@ -369,11 +373,21 @@ void ttm_bo_unreserve(struct ttm_buffer_object *bo)
369 struct ttm_bo_global *glob = bo->glob; 373 struct ttm_bo_global *glob = bo->glob;
370 374
371 spin_lock(&glob->lru_lock); 375 spin_lock(&glob->lru_lock);
372 ttm_bo_unreserve_locked(bo); 376 ttm_bo_unreserve_ticket_locked(bo, NULL);
373 spin_unlock(&glob->lru_lock); 377 spin_unlock(&glob->lru_lock);
374} 378}
375EXPORT_SYMBOL(ttm_bo_unreserve); 379EXPORT_SYMBOL(ttm_bo_unreserve);
376 380
381void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo, struct ww_acquire_ctx *ticket)
382{
383 struct ttm_bo_global *glob = bo->glob;
384
385 spin_lock(&glob->lru_lock);
386 ttm_bo_unreserve_ticket_locked(bo, ticket);
387 spin_unlock(&glob->lru_lock);
388}
389EXPORT_SYMBOL(ttm_bo_unreserve_ticket);
390
377/* 391/*
378 * Call bo->mutex locked. 392 * Call bo->mutex locked.
379 */ 393 */
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 7b90def15674..efcb734e5543 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -32,7 +32,8 @@
32#include <linux/sched.h> 32#include <linux/sched.h>
33#include <linux/module.h> 33#include <linux/module.h>
34 34
35static void ttm_eu_backoff_reservation_locked(struct list_head *list) 35static void ttm_eu_backoff_reservation_locked(struct list_head *list,
36 struct ww_acquire_ctx *ticket)
36{ 37{
37 struct ttm_validate_buffer *entry; 38 struct ttm_validate_buffer *entry;
38 39
@@ -41,14 +42,15 @@ static void ttm_eu_backoff_reservation_locked(struct list_head *list)
41 if (!entry->reserved) 42 if (!entry->reserved)
42 continue; 43 continue;
43 44
45 entry->reserved = false;
44 if (entry->removed) { 46 if (entry->removed) {
45 ttm_bo_add_to_lru(bo); 47 ttm_bo_unreserve_ticket_locked(bo, ticket);
46 entry->removed = false; 48 entry->removed = false;
47 49
50 } else {
51 atomic_set(&bo->reserved, 0);
52 wake_up_all(&bo->event_queue);
48 } 53 }
49 entry->reserved = false;
50 atomic_set(&bo->reserved, 0);
51 wake_up_all(&bo->event_queue);
52 } 54 }
53} 55}
54 56
@@ -82,7 +84,8 @@ static void ttm_eu_list_ref_sub(struct list_head *list)
82 } 84 }
83} 85}
84 86
85void ttm_eu_backoff_reservation(struct list_head *list) 87void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
88 struct list_head *list)
86{ 89{
87 struct ttm_validate_buffer *entry; 90 struct ttm_validate_buffer *entry;
88 struct ttm_bo_global *glob; 91 struct ttm_bo_global *glob;
@@ -93,7 +96,8 @@ void ttm_eu_backoff_reservation(struct list_head *list)
93 entry = list_first_entry(list, struct ttm_validate_buffer, head); 96 entry = list_first_entry(list, struct ttm_validate_buffer, head);
94 glob = entry->bo->glob; 97 glob = entry->bo->glob;
95 spin_lock(&glob->lru_lock); 98 spin_lock(&glob->lru_lock);
96 ttm_eu_backoff_reservation_locked(list); 99 ttm_eu_backoff_reservation_locked(list, ticket);
100 ww_acquire_fini(ticket);
97 spin_unlock(&glob->lru_lock); 101 spin_unlock(&glob->lru_lock);
98} 102}
99EXPORT_SYMBOL(ttm_eu_backoff_reservation); 103EXPORT_SYMBOL(ttm_eu_backoff_reservation);
@@ -110,12 +114,12 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation);
110 * buffers in different orders. 114 * buffers in different orders.
111 */ 115 */
112 116
113int ttm_eu_reserve_buffers(struct list_head *list) 117int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
118 struct list_head *list)
114{ 119{
115 struct ttm_bo_global *glob; 120 struct ttm_bo_global *glob;
116 struct ttm_validate_buffer *entry; 121 struct ttm_validate_buffer *entry;
117 int ret; 122 int ret;
118 uint32_t val_seq;
119 123
120 if (list_empty(list)) 124 if (list_empty(list))
121 return 0; 125 return 0;
@@ -129,8 +133,8 @@ int ttm_eu_reserve_buffers(struct list_head *list)
129 entry = list_first_entry(list, struct ttm_validate_buffer, head); 133 entry = list_first_entry(list, struct ttm_validate_buffer, head);
130 glob = entry->bo->glob; 134 glob = entry->bo->glob;
131 135
136 ww_acquire_init(ticket, &reservation_ww_class);
132 spin_lock(&glob->lru_lock); 137 spin_lock(&glob->lru_lock);
133 val_seq = entry->bo->bdev->val_seq++;
134 138
135retry: 139retry:
136 list_for_each_entry(entry, list, head) { 140 list_for_each_entry(entry, list, head) {
@@ -140,7 +144,7 @@ retry:
140 if (entry->reserved) 144 if (entry->reserved)
141 continue; 145 continue;
142 146
143 ret = ttm_bo_reserve_nolru(bo, true, true, true, val_seq); 147 ret = ttm_bo_reserve_nolru(bo, true, true, true, ticket);
144 switch (ret) { 148 switch (ret) {
145 case 0: 149 case 0:
146 break; 150 break;
@@ -148,8 +152,9 @@ retry:
148 ttm_eu_del_from_lru_locked(list); 152 ttm_eu_del_from_lru_locked(list);
149 spin_unlock(&glob->lru_lock); 153 spin_unlock(&glob->lru_lock);
150 ret = ttm_bo_reserve_nolru(bo, true, false, 154 ret = ttm_bo_reserve_nolru(bo, true, false,
151 true, val_seq); 155 true, ticket);
152 spin_lock(&glob->lru_lock); 156 spin_lock(&glob->lru_lock);
157
153 if (!ret) 158 if (!ret)
154 break; 159 break;
155 160
@@ -158,21 +163,13 @@ retry:
158 163
159 /* fallthrough */ 164 /* fallthrough */
160 case -EAGAIN: 165 case -EAGAIN:
161 ttm_eu_backoff_reservation_locked(list); 166 ttm_eu_backoff_reservation_locked(list, ticket);
162
163 /*
164 * temporarily increase sequence number every retry,
165 * to prevent us from seeing our old reservation
166 * sequence when someone else reserved the buffer,
167 * but hasn't updated the seq_valid/seqno members yet.
168 */
169 val_seq = entry->bo->bdev->val_seq++;
170
171 spin_unlock(&glob->lru_lock); 167 spin_unlock(&glob->lru_lock);
172 ttm_eu_list_ref_sub(list); 168 ttm_eu_list_ref_sub(list);
173 ret = ttm_bo_reserve_slowpath_nolru(bo, true, val_seq); 169 ret = ttm_bo_reserve_slowpath_nolru(bo, true, ticket);
174 if (unlikely(ret != 0)) 170 if (unlikely(ret != 0))
175 return ret; 171 goto err_fini;
172
176 spin_lock(&glob->lru_lock); 173 spin_lock(&glob->lru_lock);
177 entry->reserved = true; 174 entry->reserved = true;
178 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { 175 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
@@ -191,21 +188,25 @@ retry:
191 } 188 }
192 } 189 }
193 190
191 ww_acquire_done(ticket);
194 ttm_eu_del_from_lru_locked(list); 192 ttm_eu_del_from_lru_locked(list);
195 spin_unlock(&glob->lru_lock); 193 spin_unlock(&glob->lru_lock);
196 ttm_eu_list_ref_sub(list); 194 ttm_eu_list_ref_sub(list);
197
198 return 0; 195 return 0;
199 196
200err: 197err:
201 ttm_eu_backoff_reservation_locked(list); 198 ttm_eu_backoff_reservation_locked(list, ticket);
202 spin_unlock(&glob->lru_lock); 199 spin_unlock(&glob->lru_lock);
203 ttm_eu_list_ref_sub(list); 200 ttm_eu_list_ref_sub(list);
201err_fini:
202 ww_acquire_done(ticket);
203 ww_acquire_fini(ticket);
204 return ret; 204 return ret;
205} 205}
206EXPORT_SYMBOL(ttm_eu_reserve_buffers); 206EXPORT_SYMBOL(ttm_eu_reserve_buffers);
207 207
208void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj) 208void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
209 struct list_head *list, void *sync_obj)
209{ 210{
210 struct ttm_validate_buffer *entry; 211 struct ttm_validate_buffer *entry;
211 struct ttm_buffer_object *bo; 212 struct ttm_buffer_object *bo;
@@ -228,11 +229,12 @@ void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
228 bo = entry->bo; 229 bo = entry->bo;
229 entry->old_sync_obj = bo->sync_obj; 230 entry->old_sync_obj = bo->sync_obj;
230 bo->sync_obj = driver->sync_obj_ref(sync_obj); 231 bo->sync_obj = driver->sync_obj_ref(sync_obj);
231 ttm_bo_unreserve_locked(bo); 232 ttm_bo_unreserve_ticket_locked(bo, ticket);
232 entry->reserved = false; 233 entry->reserved = false;
233 } 234 }
234 spin_unlock(&bdev->fence_lock); 235 spin_unlock(&bdev->fence_lock);
235 spin_unlock(&glob->lru_lock); 236 spin_unlock(&glob->lru_lock);
237 ww_acquire_fini(ticket);
236 238
237 list_for_each_entry(entry, list, head) { 239 list_for_each_entry(entry, list, head) {
238 if (entry->old_sync_obj) 240 if (entry->old_sync_obj)