aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/ttm/ttm_execbuf_util.c
diff options
context:
space:
mode:
authorMaarten Lankhorst <m.b.lankhorst@gmail.com>2013-06-27 07:48:17 -0400
committerDave Airlie <airlied@redhat.com>2013-06-27 22:02:20 -0400
commitecff665f5e3f1c6909353e00b9420e45ae23d995 (patch)
tree8fed7d4570ec707427e954c6d2695d1549e08364 /drivers/gpu/drm/ttm/ttm_execbuf_util.c
parent786d7257e537da0674c02e16e3b30a44665d1cee (diff)
drm/ttm: make ttm reservation calls behave like reservation calls
This commit converts the source of the val_seq counter to the ww_mutex api. The reservation objects are converted later, because there is still a lockdep splat in nouveau that has to resolved first. Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com> Reviewed-by: Jerome Glisse <jglisse@redhat.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_execbuf_util.c')
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c58
1 files changed, 30 insertions, 28 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 7b90def15674..efcb734e5543 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -32,7 +32,8 @@
32#include <linux/sched.h> 32#include <linux/sched.h>
33#include <linux/module.h> 33#include <linux/module.h>
34 34
35static void ttm_eu_backoff_reservation_locked(struct list_head *list) 35static void ttm_eu_backoff_reservation_locked(struct list_head *list,
36 struct ww_acquire_ctx *ticket)
36{ 37{
37 struct ttm_validate_buffer *entry; 38 struct ttm_validate_buffer *entry;
38 39
@@ -41,14 +42,15 @@ static void ttm_eu_backoff_reservation_locked(struct list_head *list)
41 if (!entry->reserved) 42 if (!entry->reserved)
42 continue; 43 continue;
43 44
45 entry->reserved = false;
44 if (entry->removed) { 46 if (entry->removed) {
45 ttm_bo_add_to_lru(bo); 47 ttm_bo_unreserve_ticket_locked(bo, ticket);
46 entry->removed = false; 48 entry->removed = false;
47 49
50 } else {
51 atomic_set(&bo->reserved, 0);
52 wake_up_all(&bo->event_queue);
48 } 53 }
49 entry->reserved = false;
50 atomic_set(&bo->reserved, 0);
51 wake_up_all(&bo->event_queue);
52 } 54 }
53} 55}
54 56
@@ -82,7 +84,8 @@ static void ttm_eu_list_ref_sub(struct list_head *list)
82 } 84 }
83} 85}
84 86
85void ttm_eu_backoff_reservation(struct list_head *list) 87void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
88 struct list_head *list)
86{ 89{
87 struct ttm_validate_buffer *entry; 90 struct ttm_validate_buffer *entry;
88 struct ttm_bo_global *glob; 91 struct ttm_bo_global *glob;
@@ -93,7 +96,8 @@ void ttm_eu_backoff_reservation(struct list_head *list)
93 entry = list_first_entry(list, struct ttm_validate_buffer, head); 96 entry = list_first_entry(list, struct ttm_validate_buffer, head);
94 glob = entry->bo->glob; 97 glob = entry->bo->glob;
95 spin_lock(&glob->lru_lock); 98 spin_lock(&glob->lru_lock);
96 ttm_eu_backoff_reservation_locked(list); 99 ttm_eu_backoff_reservation_locked(list, ticket);
100 ww_acquire_fini(ticket);
97 spin_unlock(&glob->lru_lock); 101 spin_unlock(&glob->lru_lock);
98} 102}
99EXPORT_SYMBOL(ttm_eu_backoff_reservation); 103EXPORT_SYMBOL(ttm_eu_backoff_reservation);
@@ -110,12 +114,12 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation);
110 * buffers in different orders. 114 * buffers in different orders.
111 */ 115 */
112 116
113int ttm_eu_reserve_buffers(struct list_head *list) 117int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
118 struct list_head *list)
114{ 119{
115 struct ttm_bo_global *glob; 120 struct ttm_bo_global *glob;
116 struct ttm_validate_buffer *entry; 121 struct ttm_validate_buffer *entry;
117 int ret; 122 int ret;
118 uint32_t val_seq;
119 123
120 if (list_empty(list)) 124 if (list_empty(list))
121 return 0; 125 return 0;
@@ -129,8 +133,8 @@ int ttm_eu_reserve_buffers(struct list_head *list)
129 entry = list_first_entry(list, struct ttm_validate_buffer, head); 133 entry = list_first_entry(list, struct ttm_validate_buffer, head);
130 glob = entry->bo->glob; 134 glob = entry->bo->glob;
131 135
136 ww_acquire_init(ticket, &reservation_ww_class);
132 spin_lock(&glob->lru_lock); 137 spin_lock(&glob->lru_lock);
133 val_seq = entry->bo->bdev->val_seq++;
134 138
135retry: 139retry:
136 list_for_each_entry(entry, list, head) { 140 list_for_each_entry(entry, list, head) {
@@ -140,7 +144,7 @@ retry:
140 if (entry->reserved) 144 if (entry->reserved)
141 continue; 145 continue;
142 146
143 ret = ttm_bo_reserve_nolru(bo, true, true, true, val_seq); 147 ret = ttm_bo_reserve_nolru(bo, true, true, true, ticket);
144 switch (ret) { 148 switch (ret) {
145 case 0: 149 case 0:
146 break; 150 break;
@@ -148,8 +152,9 @@ retry:
148 ttm_eu_del_from_lru_locked(list); 152 ttm_eu_del_from_lru_locked(list);
149 spin_unlock(&glob->lru_lock); 153 spin_unlock(&glob->lru_lock);
150 ret = ttm_bo_reserve_nolru(bo, true, false, 154 ret = ttm_bo_reserve_nolru(bo, true, false,
151 true, val_seq); 155 true, ticket);
152 spin_lock(&glob->lru_lock); 156 spin_lock(&glob->lru_lock);
157
153 if (!ret) 158 if (!ret)
154 break; 159 break;
155 160
@@ -158,21 +163,13 @@ retry:
158 163
159 /* fallthrough */ 164 /* fallthrough */
160 case -EAGAIN: 165 case -EAGAIN:
161 ttm_eu_backoff_reservation_locked(list); 166 ttm_eu_backoff_reservation_locked(list, ticket);
162
163 /*
164 * temporarily increase sequence number every retry,
165 * to prevent us from seeing our old reservation
166 * sequence when someone else reserved the buffer,
167 * but hasn't updated the seq_valid/seqno members yet.
168 */
169 val_seq = entry->bo->bdev->val_seq++;
170
171 spin_unlock(&glob->lru_lock); 167 spin_unlock(&glob->lru_lock);
172 ttm_eu_list_ref_sub(list); 168 ttm_eu_list_ref_sub(list);
173 ret = ttm_bo_reserve_slowpath_nolru(bo, true, val_seq); 169 ret = ttm_bo_reserve_slowpath_nolru(bo, true, ticket);
174 if (unlikely(ret != 0)) 170 if (unlikely(ret != 0))
175 return ret; 171 goto err_fini;
172
176 spin_lock(&glob->lru_lock); 173 spin_lock(&glob->lru_lock);
177 entry->reserved = true; 174 entry->reserved = true;
178 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { 175 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
@@ -191,21 +188,25 @@ retry:
191 } 188 }
192 } 189 }
193 190
191 ww_acquire_done(ticket);
194 ttm_eu_del_from_lru_locked(list); 192 ttm_eu_del_from_lru_locked(list);
195 spin_unlock(&glob->lru_lock); 193 spin_unlock(&glob->lru_lock);
196 ttm_eu_list_ref_sub(list); 194 ttm_eu_list_ref_sub(list);
197
198 return 0; 195 return 0;
199 196
200err: 197err:
201 ttm_eu_backoff_reservation_locked(list); 198 ttm_eu_backoff_reservation_locked(list, ticket);
202 spin_unlock(&glob->lru_lock); 199 spin_unlock(&glob->lru_lock);
203 ttm_eu_list_ref_sub(list); 200 ttm_eu_list_ref_sub(list);
201err_fini:
202 ww_acquire_done(ticket);
203 ww_acquire_fini(ticket);
204 return ret; 204 return ret;
205} 205}
206EXPORT_SYMBOL(ttm_eu_reserve_buffers); 206EXPORT_SYMBOL(ttm_eu_reserve_buffers);
207 207
208void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj) 208void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
209 struct list_head *list, void *sync_obj)
209{ 210{
210 struct ttm_validate_buffer *entry; 211 struct ttm_validate_buffer *entry;
211 struct ttm_buffer_object *bo; 212 struct ttm_buffer_object *bo;
@@ -228,11 +229,12 @@ void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
228 bo = entry->bo; 229 bo = entry->bo;
229 entry->old_sync_obj = bo->sync_obj; 230 entry->old_sync_obj = bo->sync_obj;
230 bo->sync_obj = driver->sync_obj_ref(sync_obj); 231 bo->sync_obj = driver->sync_obj_ref(sync_obj);
231 ttm_bo_unreserve_locked(bo); 232 ttm_bo_unreserve_ticket_locked(bo, ticket);
232 entry->reserved = false; 233 entry->reserved = false;
233 } 234 }
234 spin_unlock(&bdev->fence_lock); 235 spin_unlock(&bdev->fence_lock);
235 spin_unlock(&glob->lru_lock); 236 spin_unlock(&glob->lru_lock);
237 ww_acquire_fini(ticket);
236 238
237 list_for_each_entry(entry, list, head) { 239 list_for_each_entry(entry, list, head) {
238 if (entry->old_sync_obj) 240 if (entry->old_sync_obj)