aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/ttm/ttm_execbuf_util.c
diff options
context:
space:
mode:
authorMaarten Lankhorst <maarten.lankhorst@canonical.com>2013-01-15 08:56:48 -0500
committerMaarten Lankhorst <maarten.lankhorst@canonical.com>2013-01-15 08:56:48 -0500
commit7a1863084c9d90ce4b67d645bf9b0f1612e68f62 (patch)
treef888cffea1079ef31e4e27bf5d7215c92b325756 /drivers/gpu/drm/ttm/ttm_execbuf_util.c
parent63d0a4195560362e2e00a3ad38fc331d34e1da9b (diff)
drm/ttm: cleanup ttm_eu_reserve_buffers handling
With the lru lock no longer required for protecting reservations we can just do a ttm_bo_reserve_nolru on -EBUSY, and handle all errors in a single path. Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com> Reviewed-by: Jerome Glisse <jglisse@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_execbuf_util.c')
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c53
1 files changed, 21 insertions, 32 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index bd37b5cb8553..c7d323657798 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -82,22 +82,6 @@ static void ttm_eu_list_ref_sub(struct list_head *list)
82 } 82 }
83} 83}
84 84
85static int ttm_eu_wait_unreserved_locked(struct list_head *list,
86 struct ttm_buffer_object *bo)
87{
88 struct ttm_bo_global *glob = bo->glob;
89 int ret;
90
91 ttm_eu_del_from_lru_locked(list);
92 spin_unlock(&glob->lru_lock);
93 ret = ttm_bo_wait_unreserved(bo, true);
94 spin_lock(&glob->lru_lock);
95 if (unlikely(ret != 0))
96 ttm_eu_backoff_reservation_locked(list);
97 return ret;
98}
99
100
101void ttm_eu_backoff_reservation(struct list_head *list) 85void ttm_eu_backoff_reservation(struct list_head *list)
102{ 86{
103 struct ttm_validate_buffer *entry; 87 struct ttm_validate_buffer *entry;
@@ -152,19 +136,23 @@ retry:
152 list_for_each_entry(entry, list, head) { 136 list_for_each_entry(entry, list, head) {
153 struct ttm_buffer_object *bo = entry->bo; 137 struct ttm_buffer_object *bo = entry->bo;
154 138
155retry_this_bo:
156 ret = ttm_bo_reserve_nolru(bo, true, true, true, val_seq); 139 ret = ttm_bo_reserve_nolru(bo, true, true, true, val_seq);
157 switch (ret) { 140 switch (ret) {
158 case 0: 141 case 0:
159 break; 142 break;
160 case -EBUSY: 143 case -EBUSY:
161 ret = ttm_eu_wait_unreserved_locked(list, bo); 144 ttm_eu_del_from_lru_locked(list);
162 if (unlikely(ret != 0)) { 145 spin_unlock(&glob->lru_lock);
163 spin_unlock(&glob->lru_lock); 146 ret = ttm_bo_reserve_nolru(bo, true, false,
164 ttm_eu_list_ref_sub(list); 147 true, val_seq);
165 return ret; 148 spin_lock(&glob->lru_lock);
166 } 149 if (!ret)
167 goto retry_this_bo; 150 break;
151
152 if (unlikely(ret != -EAGAIN))
153 goto err;
154
155 /* fallthrough */
168 case -EAGAIN: 156 case -EAGAIN:
169 ttm_eu_backoff_reservation_locked(list); 157 ttm_eu_backoff_reservation_locked(list);
170 spin_unlock(&glob->lru_lock); 158 spin_unlock(&glob->lru_lock);
@@ -174,18 +162,13 @@ retry_this_bo:
174 return ret; 162 return ret;
175 goto retry; 163 goto retry;
176 default: 164 default:
177 ttm_eu_backoff_reservation_locked(list); 165 goto err;
178 spin_unlock(&glob->lru_lock);
179 ttm_eu_list_ref_sub(list);
180 return ret;
181 } 166 }
182 167
183 entry->reserved = true; 168 entry->reserved = true;
184 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { 169 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
185 ttm_eu_backoff_reservation_locked(list); 170 ret = -EBUSY;
186 spin_unlock(&glob->lru_lock); 171 goto err;
187 ttm_eu_list_ref_sub(list);
188 return -EBUSY;
189 } 172 }
190 } 173 }
191 174
@@ -194,6 +177,12 @@ retry_this_bo:
194 ttm_eu_list_ref_sub(list); 177 ttm_eu_list_ref_sub(list);
195 178
196 return 0; 179 return 0;
180
181err:
182 ttm_eu_backoff_reservation_locked(list);
183 spin_unlock(&glob->lru_lock);
184 ttm_eu_list_ref_sub(list);
185 return ret;
197} 186}
198EXPORT_SYMBOL(ttm_eu_reserve_buffers); 187EXPORT_SYMBOL(ttm_eu_reserve_buffers);
199 188