aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/ttm/ttm_execbuf_util.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_execbuf_util.c')
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c23
1 files changed, 21 insertions, 2 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index c7d323657798..7b90def15674 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -129,13 +129,17 @@ int ttm_eu_reserve_buffers(struct list_head *list)
129 entry = list_first_entry(list, struct ttm_validate_buffer, head); 129 entry = list_first_entry(list, struct ttm_validate_buffer, head);
130 glob = entry->bo->glob; 130 glob = entry->bo->glob;
131 131
132retry:
133 spin_lock(&glob->lru_lock); 132 spin_lock(&glob->lru_lock);
134 val_seq = entry->bo->bdev->val_seq++; 133 val_seq = entry->bo->bdev->val_seq++;
135 134
135retry:
136 list_for_each_entry(entry, list, head) { 136 list_for_each_entry(entry, list, head) {
137 struct ttm_buffer_object *bo = entry->bo; 137 struct ttm_buffer_object *bo = entry->bo;
138 138
139 /* already slowpath reserved? */
140 if (entry->reserved)
141 continue;
142
139 ret = ttm_bo_reserve_nolru(bo, true, true, true, val_seq); 143 ret = ttm_bo_reserve_nolru(bo, true, true, true, val_seq);
140 switch (ret) { 144 switch (ret) {
141 case 0: 145 case 0:
@@ -155,11 +159,26 @@ retry:
155 /* fallthrough */ 159 /* fallthrough */
156 case -EAGAIN: 160 case -EAGAIN:
157 ttm_eu_backoff_reservation_locked(list); 161 ttm_eu_backoff_reservation_locked(list);
162
163 /*
164 * temporarily increase sequence number every retry,
165 * to prevent us from seeing our old reservation
166 * sequence when someone else reserved the buffer,
167 * but hasn't updated the seq_valid/seqno members yet.
168 */
169 val_seq = entry->bo->bdev->val_seq++;
170
158 spin_unlock(&glob->lru_lock); 171 spin_unlock(&glob->lru_lock);
159 ttm_eu_list_ref_sub(list); 172 ttm_eu_list_ref_sub(list);
160 ret = ttm_bo_wait_unreserved(bo, true); 173 ret = ttm_bo_reserve_slowpath_nolru(bo, true, val_seq);
161 if (unlikely(ret != 0)) 174 if (unlikely(ret != 0))
162 return ret; 175 return ret;
176 spin_lock(&glob->lru_lock);
177 entry->reserved = true;
178 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
179 ret = -EBUSY;
180 goto err;
181 }
163 goto retry; 182 goto retry;
164 default: 183 default:
165 goto err; 184 goto err;