diff options
author | Maarten Lankhorst <maarten.lankhorst@canonical.com> | 2014-01-09 05:03:08 -0500 |
---|---|---|
committer | Maarten Lankhorst <maarten.lankhorst@canonical.com> | 2014-09-01 04:18:03 -0400 |
commit | 1f0dc9a59afeccb96a35ebec36661266260f5eee (patch) | |
tree | 24f8dfbf51ce943412a60dd82f738a333ab905f4 /drivers/gpu/drm/ttm/ttm_execbuf_util.c | |
parent | 58b4d720c1620bbf09e42b4f218dcb2d0d8cdf3e (diff) |
drm/ttm: kill off some members to ttm_validate_buffer
This reorders the list to keep track of what buffers are reserved,
so previous members are always unreserved.
This gets rid of some bookkeeping that's no longer needed,
while simplifying the code some.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com>
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_execbuf_util.c')
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_execbuf_util.c | 142 |
1 files changed, 50 insertions, 92 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index 87d7deefc806..108730e9147b 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c | |||
@@ -32,20 +32,12 @@ | |||
32 | #include <linux/sched.h> | 32 | #include <linux/sched.h> |
33 | #include <linux/module.h> | 33 | #include <linux/module.h> |
34 | 34 | ||
35 | static void ttm_eu_backoff_reservation_locked(struct list_head *list) | 35 | static void ttm_eu_backoff_reservation_reverse(struct list_head *list, |
36 | struct ttm_validate_buffer *entry) | ||
36 | { | 37 | { |
37 | struct ttm_validate_buffer *entry; | 38 | list_for_each_entry_continue_reverse(entry, list, head) { |
38 | |||
39 | list_for_each_entry(entry, list, head) { | ||
40 | struct ttm_buffer_object *bo = entry->bo; | 39 | struct ttm_buffer_object *bo = entry->bo; |
41 | if (!entry->reserved) | ||
42 | continue; | ||
43 | 40 | ||
44 | entry->reserved = false; | ||
45 | if (entry->removed) { | ||
46 | ttm_bo_add_to_lru(bo); | ||
47 | entry->removed = false; | ||
48 | } | ||
49 | __ttm_bo_unreserve(bo); | 41 | __ttm_bo_unreserve(bo); |
50 | } | 42 | } |
51 | } | 43 | } |
@@ -56,27 +48,9 @@ static void ttm_eu_del_from_lru_locked(struct list_head *list) | |||
56 | 48 | ||
57 | list_for_each_entry(entry, list, head) { | 49 | list_for_each_entry(entry, list, head) { |
58 | struct ttm_buffer_object *bo = entry->bo; | 50 | struct ttm_buffer_object *bo = entry->bo; |
59 | if (!entry->reserved) | 51 | unsigned put_count = ttm_bo_del_from_lru(bo); |
60 | continue; | ||
61 | 52 | ||
62 | if (!entry->removed) { | 53 | ttm_bo_list_ref_sub(bo, put_count, true); |
63 | entry->put_count = ttm_bo_del_from_lru(bo); | ||
64 | entry->removed = true; | ||
65 | } | ||
66 | } | ||
67 | } | ||
68 | |||
69 | static void ttm_eu_list_ref_sub(struct list_head *list) | ||
70 | { | ||
71 | struct ttm_validate_buffer *entry; | ||
72 | |||
73 | list_for_each_entry(entry, list, head) { | ||
74 | struct ttm_buffer_object *bo = entry->bo; | ||
75 | |||
76 | if (entry->put_count) { | ||
77 | ttm_bo_list_ref_sub(bo, entry->put_count, true); | ||
78 | entry->put_count = 0; | ||
79 | } | ||
80 | } | 54 | } |
81 | } | 55 | } |
82 | 56 | ||
@@ -91,11 +65,18 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, | |||
91 | 65 | ||
92 | entry = list_first_entry(list, struct ttm_validate_buffer, head); | 66 | entry = list_first_entry(list, struct ttm_validate_buffer, head); |
93 | glob = entry->bo->glob; | 67 | glob = entry->bo->glob; |
68 | |||
94 | spin_lock(&glob->lru_lock); | 69 | spin_lock(&glob->lru_lock); |
95 | ttm_eu_backoff_reservation_locked(list); | 70 | list_for_each_entry(entry, list, head) { |
71 | struct ttm_buffer_object *bo = entry->bo; | ||
72 | |||
73 | ttm_bo_add_to_lru(bo); | ||
74 | __ttm_bo_unreserve(bo); | ||
75 | } | ||
76 | spin_unlock(&glob->lru_lock); | ||
77 | |||
96 | if (ticket) | 78 | if (ticket) |
97 | ww_acquire_fini(ticket); | 79 | ww_acquire_fini(ticket); |
98 | spin_unlock(&glob->lru_lock); | ||
99 | } | 80 | } |
100 | EXPORT_SYMBOL(ttm_eu_backoff_reservation); | 81 | EXPORT_SYMBOL(ttm_eu_backoff_reservation); |
101 | 82 | ||
@@ -121,64 +102,55 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, | |||
121 | if (list_empty(list)) | 102 | if (list_empty(list)) |
122 | return 0; | 103 | return 0; |
123 | 104 | ||
124 | list_for_each_entry(entry, list, head) { | ||
125 | entry->reserved = false; | ||
126 | entry->put_count = 0; | ||
127 | entry->removed = false; | ||
128 | } | ||
129 | |||
130 | entry = list_first_entry(list, struct ttm_validate_buffer, head); | 105 | entry = list_first_entry(list, struct ttm_validate_buffer, head); |
131 | glob = entry->bo->glob; | 106 | glob = entry->bo->glob; |
132 | 107 | ||
133 | if (ticket) | 108 | if (ticket) |
134 | ww_acquire_init(ticket, &reservation_ww_class); | 109 | ww_acquire_init(ticket, &reservation_ww_class); |
135 | retry: | 110 | |
136 | list_for_each_entry(entry, list, head) { | 111 | list_for_each_entry(entry, list, head) { |
137 | struct ttm_buffer_object *bo = entry->bo; | 112 | struct ttm_buffer_object *bo = entry->bo; |
138 | 113 | ||
139 | /* already slowpath reserved? */ | ||
140 | if (entry->reserved) | ||
141 | continue; | ||
142 | |||
143 | ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), true, | 114 | ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), true, |
144 | ticket); | 115 | ticket); |
116 | if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) { | ||
117 | __ttm_bo_unreserve(bo); | ||
145 | 118 | ||
146 | if (ret == -EDEADLK) { | ||
147 | /* uh oh, we lost out, drop every reservation and try | ||
148 | * to only reserve this buffer, then start over if | ||
149 | * this succeeds. | ||
150 | */ | ||
151 | BUG_ON(ticket == NULL); | ||
152 | spin_lock(&glob->lru_lock); | ||
153 | ttm_eu_backoff_reservation_locked(list); | ||
154 | spin_unlock(&glob->lru_lock); | ||
155 | ttm_eu_list_ref_sub(list); | ||
156 | |||
157 | if (intr) { | ||
158 | ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, | ||
159 | ticket); | ||
160 | if (unlikely(ret != 0)) { | ||
161 | if (ret == -EINTR) | ||
162 | ret = -ERESTARTSYS; | ||
163 | goto err_fini; | ||
164 | } | ||
165 | } else | ||
166 | ww_mutex_lock_slow(&bo->resv->lock, ticket); | ||
167 | |||
168 | entry->reserved = true; | ||
169 | if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { | ||
170 | ret = -EBUSY; | ||
171 | goto err; | ||
172 | } | ||
173 | goto retry; | ||
174 | } else if (ret) | ||
175 | goto err; | ||
176 | |||
177 | entry->reserved = true; | ||
178 | if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { | ||
179 | ret = -EBUSY; | 119 | ret = -EBUSY; |
180 | goto err; | ||
181 | } | 120 | } |
121 | |||
122 | if (!ret) | ||
123 | continue; | ||
124 | |||
125 | /* uh oh, we lost out, drop every reservation and try | ||
126 | * to only reserve this buffer, then start over if | ||
127 | * this succeeds. | ||
128 | */ | ||
129 | ttm_eu_backoff_reservation_reverse(list, entry); | ||
130 | |||
131 | if (ret == -EDEADLK && intr) { | ||
132 | ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, | ||
133 | ticket); | ||
134 | } else if (ret == -EDEADLK) { | ||
135 | ww_mutex_lock_slow(&bo->resv->lock, ticket); | ||
136 | ret = 0; | ||
137 | } | ||
138 | |||
139 | if (unlikely(ret != 0)) { | ||
140 | if (ret == -EINTR) | ||
141 | ret = -ERESTARTSYS; | ||
142 | if (ticket) { | ||
143 | ww_acquire_done(ticket); | ||
144 | ww_acquire_fini(ticket); | ||
145 | } | ||
146 | return ret; | ||
147 | } | ||
148 | |||
149 | /* move this item to the front of the list, | ||
150 | * forces correct iteration of the loop without keeping track | ||
151 | */ | ||
152 | list_del(&entry->head); | ||
153 | list_add(&entry->head, list); | ||
182 | } | 154 | } |
183 | 155 | ||
184 | if (ticket) | 156 | if (ticket) |
@@ -186,20 +158,7 @@ retry: | |||
186 | spin_lock(&glob->lru_lock); | 158 | spin_lock(&glob->lru_lock); |
187 | ttm_eu_del_from_lru_locked(list); | 159 | ttm_eu_del_from_lru_locked(list); |
188 | spin_unlock(&glob->lru_lock); | 160 | spin_unlock(&glob->lru_lock); |
189 | ttm_eu_list_ref_sub(list); | ||
190 | return 0; | 161 | return 0; |
191 | |||
192 | err: | ||
193 | spin_lock(&glob->lru_lock); | ||
194 | ttm_eu_backoff_reservation_locked(list); | ||
195 | spin_unlock(&glob->lru_lock); | ||
196 | ttm_eu_list_ref_sub(list); | ||
197 | err_fini: | ||
198 | if (ticket) { | ||
199 | ww_acquire_done(ticket); | ||
200 | ww_acquire_fini(ticket); | ||
201 | } | ||
202 | return ret; | ||
203 | } | 162 | } |
204 | EXPORT_SYMBOL(ttm_eu_reserve_buffers); | 163 | EXPORT_SYMBOL(ttm_eu_reserve_buffers); |
205 | 164 | ||
@@ -228,7 +187,6 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, | |||
228 | bo->sync_obj = driver->sync_obj_ref(sync_obj); | 187 | bo->sync_obj = driver->sync_obj_ref(sync_obj); |
229 | ttm_bo_add_to_lru(bo); | 188 | ttm_bo_add_to_lru(bo); |
230 | __ttm_bo_unreserve(bo); | 189 | __ttm_bo_unreserve(bo); |
231 | entry->reserved = false; | ||
232 | } | 190 | } |
233 | spin_unlock(&glob->lru_lock); | 191 | spin_unlock(&glob->lru_lock); |
234 | if (ticket) | 192 | if (ticket) |