aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorMaarten Lankhorst <maarten.lankhorst@canonical.com>2013-01-15 08:57:10 -0500
committerMaarten Lankhorst <maarten.lankhorst@canonical.com>2013-01-15 08:57:10 -0500
commitf2d476a110bc24fde008698ae9018c99e803e25c (patch)
tree2d33f2036764ac5a6d0ee4c45104f1d90530f5ba /drivers
parent5e45d7dfd74100d622f9cdc70bfd1f9fae1671de (diff)
drm/ttm: use ttm_bo_reserve_slowpath_nolru in ttm_eu_reserve_buffers, v2
This requires re-use of the seqno, which increases fairness slightly. Instead of spinning with a new seqno every time we keep the current one, but still drop all other reservations we hold. Only when we succeed, we try to get back our other reservations again. This should increase fairness slightly as well. Changes since v1: - Increase val_seq before calling ttm_bo_reserve_slowpath_nolru and retrying to take all entries to prevent a race. Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com> Reviewed-by: Jerome Glisse <jglisse@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c23
1 files changed, 21 insertions, 2 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index c7d323657798..7b90def15674 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -129,13 +129,17 @@ int ttm_eu_reserve_buffers(struct list_head *list)
129 entry = list_first_entry(list, struct ttm_validate_buffer, head); 129 entry = list_first_entry(list, struct ttm_validate_buffer, head);
130 glob = entry->bo->glob; 130 glob = entry->bo->glob;
131 131
132retry:
133 spin_lock(&glob->lru_lock); 132 spin_lock(&glob->lru_lock);
134 val_seq = entry->bo->bdev->val_seq++; 133 val_seq = entry->bo->bdev->val_seq++;
135 134
135retry:
136 list_for_each_entry(entry, list, head) { 136 list_for_each_entry(entry, list, head) {
137 struct ttm_buffer_object *bo = entry->bo; 137 struct ttm_buffer_object *bo = entry->bo;
138 138
139 /* already slowpath reserved? */
140 if (entry->reserved)
141 continue;
142
139 ret = ttm_bo_reserve_nolru(bo, true, true, true, val_seq); 143 ret = ttm_bo_reserve_nolru(bo, true, true, true, val_seq);
140 switch (ret) { 144 switch (ret) {
141 case 0: 145 case 0:
@@ -155,11 +159,26 @@ retry:
155 /* fallthrough */ 159 /* fallthrough */
156 case -EAGAIN: 160 case -EAGAIN:
157 ttm_eu_backoff_reservation_locked(list); 161 ttm_eu_backoff_reservation_locked(list);
162
163 /*
164 * temporarily increase sequence number every retry,
165 * to prevent us from seeing our old reservation
166 * sequence when someone else reserved the buffer,
167 * but hasn't updated the seq_valid/seqno members yet.
168 */
169 val_seq = entry->bo->bdev->val_seq++;
170
158 spin_unlock(&glob->lru_lock); 171 spin_unlock(&glob->lru_lock);
159 ttm_eu_list_ref_sub(list); 172 ttm_eu_list_ref_sub(list);
160 ret = ttm_bo_wait_unreserved(bo, true); 173 ret = ttm_bo_reserve_slowpath_nolru(bo, true, val_seq);
161 if (unlikely(ret != 0)) 174 if (unlikely(ret != 0))
162 return ret; 175 return ret;
176 spin_lock(&glob->lru_lock);
177 entry->reserved = true;
178 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
179 ret = -EBUSY;
180 goto err;
181 }
163 goto retry; 182 goto retry;
164 default: 183 default:
165 goto err; 184 goto err;