aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/ttm/ttm_execbuf_util.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-14 03:39:08 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-14 03:39:08 -0400
commit2d65a9f48fcdf7866aab6457bc707ca233e0c791 (patch)
treef93e5838d6ac2e59434367f4ff905f7d9c45fc2b /drivers/gpu/drm/ttm/ttm_execbuf_util.c
parentda92da3638a04894afdca8b99e973ddd20268471 (diff)
parentdfda0df3426483cf5fc7441f23f318edbabecb03 (diff)
Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
Pull drm updates from Dave Airlie: "This is the main git pull for the drm, I pretty much froze major pulls at -rc5/6 time, and haven't had much fallout, so will probably continue doing that. Lots of changes all over, big internal header cleanup to make it clear drm features are legacy things and what are things that modern KMS drivers should be using. Also big move to use the new generic fences in all the TTM drivers. core: atomic prep work, vblank rework changes, allows immediate vblank disables major header reworking and cleanups to better delinate legacy interfaces from what KMS drivers should be using. cursor planes locking fixes ttm: move to generic fences (affects all TTM drivers) ppc64 caching fixes radeon: userptr support, uvd for old asics, reset rework for fence changes better buffer placement changes, dpm feature enablement hdmi audio support fixes intel: Cherryview work, 180 degree rotation, skylake prep work, execlist command submission full ppgtt prep work cursor improvements edid caching, vdd handling improvements nouveau: fence reworking kepler memory clock work gt21x clock work fan control improvements hdmi infoframe fixes DP audio ast: ppc64 fixes caching fix rcar: rcar-du DT support ipuv3: prep work for capture support msm: LVDS support for mdp4, new panel, gpu refactoring exynos: exynos3250 SoC support, drop bad mmap interface, mipi dsi changes, and component match support" * 'drm-next' of git://people.freedesktop.org/~airlied/linux: (640 commits) drm/mst: rework payload table allocation to conform better. drm/ast: Fix HW cursor image drm/radeon/kv: add uvd/vce info to dpm debugfs output drm/radeon/ci: add uvd/vce info to dpm debugfs output drm/radeon: export reservation_object from dmabuf to ttm drm/radeon: cope with foreign fences inside the reservation object drm/radeon: cope with foreign fences inside display drm/core: use helper to check driver features drm/radeon/cik: write gfx ucode version to ucode addr reg drm/radeon/si: print full CS when we hit a packet 0 drm/radeon: remove unecessary includes drm/radeon/combios: declare legacy_connector_convert as static drm/radeon/atombios: declare connector convert tables as static drm/radeon: drop btc_get_max_clock_from_voltage_dependency_table drm/radeon/dpm: drop clk/voltage dependency filters for BTC drm/radeon/dpm: drop clk/voltage dependency filters for CI drm/radeon/dpm: drop clk/voltage dependency filters for SI drm/radeon/dpm: drop clk/voltage dependency filters for NI drm/radeon: disable audio when we disable hdmi (v2) drm/radeon: split audio enable between eg and r600 (v2) ...
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_execbuf_util.c')
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c160
1 files changed, 63 insertions, 97 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index e8dac8758528..8ce508e76208 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -32,20 +32,12 @@
32#include <linux/sched.h> 32#include <linux/sched.h>
33#include <linux/module.h> 33#include <linux/module.h>
34 34
35static void ttm_eu_backoff_reservation_locked(struct list_head *list) 35static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
36 struct ttm_validate_buffer *entry)
36{ 37{
37 struct ttm_validate_buffer *entry; 38 list_for_each_entry_continue_reverse(entry, list, head) {
38
39 list_for_each_entry(entry, list, head) {
40 struct ttm_buffer_object *bo = entry->bo; 39 struct ttm_buffer_object *bo = entry->bo;
41 if (!entry->reserved)
42 continue;
43 40
44 entry->reserved = false;
45 if (entry->removed) {
46 ttm_bo_add_to_lru(bo);
47 entry->removed = false;
48 }
49 __ttm_bo_unreserve(bo); 41 __ttm_bo_unreserve(bo);
50 } 42 }
51} 43}
@@ -56,27 +48,9 @@ static void ttm_eu_del_from_lru_locked(struct list_head *list)
56 48
57 list_for_each_entry(entry, list, head) { 49 list_for_each_entry(entry, list, head) {
58 struct ttm_buffer_object *bo = entry->bo; 50 struct ttm_buffer_object *bo = entry->bo;
59 if (!entry->reserved) 51 unsigned put_count = ttm_bo_del_from_lru(bo);
60 continue;
61
62 if (!entry->removed) {
63 entry->put_count = ttm_bo_del_from_lru(bo);
64 entry->removed = true;
65 }
66 }
67}
68
69static void ttm_eu_list_ref_sub(struct list_head *list)
70{
71 struct ttm_validate_buffer *entry;
72
73 list_for_each_entry(entry, list, head) {
74 struct ttm_buffer_object *bo = entry->bo;
75 52
76 if (entry->put_count) { 53 ttm_bo_list_ref_sub(bo, put_count, true);
77 ttm_bo_list_ref_sub(bo, entry->put_count, true);
78 entry->put_count = 0;
79 }
80 } 54 }
81} 55}
82 56
@@ -91,11 +65,18 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
91 65
92 entry = list_first_entry(list, struct ttm_validate_buffer, head); 66 entry = list_first_entry(list, struct ttm_validate_buffer, head);
93 glob = entry->bo->glob; 67 glob = entry->bo->glob;
68
94 spin_lock(&glob->lru_lock); 69 spin_lock(&glob->lru_lock);
95 ttm_eu_backoff_reservation_locked(list); 70 list_for_each_entry(entry, list, head) {
71 struct ttm_buffer_object *bo = entry->bo;
72
73 ttm_bo_add_to_lru(bo);
74 __ttm_bo_unreserve(bo);
75 }
76 spin_unlock(&glob->lru_lock);
77
96 if (ticket) 78 if (ticket)
97 ww_acquire_fini(ticket); 79 ww_acquire_fini(ticket);
98 spin_unlock(&glob->lru_lock);
99} 80}
100EXPORT_SYMBOL(ttm_eu_backoff_reservation); 81EXPORT_SYMBOL(ttm_eu_backoff_reservation);
101 82
@@ -112,7 +93,7 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation);
112 */ 93 */
113 94
114int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, 95int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
115 struct list_head *list) 96 struct list_head *list, bool intr)
116{ 97{
117 struct ttm_bo_global *glob; 98 struct ttm_bo_global *glob;
118 struct ttm_validate_buffer *entry; 99 struct ttm_validate_buffer *entry;
@@ -121,60 +102,64 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
121 if (list_empty(list)) 102 if (list_empty(list))
122 return 0; 103 return 0;
123 104
124 list_for_each_entry(entry, list, head) {
125 entry->reserved = false;
126 entry->put_count = 0;
127 entry->removed = false;
128 }
129
130 entry = list_first_entry(list, struct ttm_validate_buffer, head); 105 entry = list_first_entry(list, struct ttm_validate_buffer, head);
131 glob = entry->bo->glob; 106 glob = entry->bo->glob;
132 107
133 if (ticket) 108 if (ticket)
134 ww_acquire_init(ticket, &reservation_ww_class); 109 ww_acquire_init(ticket, &reservation_ww_class);
135retry: 110
136 list_for_each_entry(entry, list, head) { 111 list_for_each_entry(entry, list, head) {
137 struct ttm_buffer_object *bo = entry->bo; 112 struct ttm_buffer_object *bo = entry->bo;
138 113
139 /* already slowpath reserved? */ 114 ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), true,
140 if (entry->reserved)
141 continue;
142
143 ret = __ttm_bo_reserve(bo, true, (ticket == NULL), true,
144 ticket); 115 ticket);
116 if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) {
117 __ttm_bo_unreserve(bo);
118
119 ret = -EBUSY;
120 }
145 121
146 if (ret == -EDEADLK) { 122 if (!ret) {
147 /* uh oh, we lost out, drop every reservation and try 123 if (!entry->shared)
148 * to only reserve this buffer, then start over if 124 continue;
149 * this succeeds. 125
150 */ 126 ret = reservation_object_reserve_shared(bo->resv);
151 BUG_ON(ticket == NULL); 127 if (!ret)
152 spin_lock(&glob->lru_lock); 128 continue;
153 ttm_eu_backoff_reservation_locked(list); 129 }
154 spin_unlock(&glob->lru_lock); 130
155 ttm_eu_list_ref_sub(list); 131 /* uh oh, we lost out, drop every reservation and try
132 * to only reserve this buffer, then start over if
133 * this succeeds.
134 */
135 ttm_eu_backoff_reservation_reverse(list, entry);
136
137 if (ret == -EDEADLK && intr) {
156 ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, 138 ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
157 ticket); 139 ticket);
158 if (unlikely(ret != 0)) { 140 } else if (ret == -EDEADLK) {
159 if (ret == -EINTR) 141 ww_mutex_lock_slow(&bo->resv->lock, ticket);
160 ret = -ERESTARTSYS; 142 ret = 0;
161 goto err_fini; 143 }
162 }
163 144
164 entry->reserved = true; 145 if (!ret && entry->shared)
165 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { 146 ret = reservation_object_reserve_shared(bo->resv);
166 ret = -EBUSY;
167 goto err;
168 }
169 goto retry;
170 } else if (ret)
171 goto err;
172 147
173 entry->reserved = true; 148 if (unlikely(ret != 0)) {
174 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { 149 if (ret == -EINTR)
175 ret = -EBUSY; 150 ret = -ERESTARTSYS;
176 goto err; 151 if (ticket) {
152 ww_acquire_done(ticket);
153 ww_acquire_fini(ticket);
154 }
155 return ret;
177 } 156 }
157
158 /* move this item to the front of the list,
159 * forces correct iteration of the loop without keeping track
160 */
161 list_del(&entry->head);
162 list_add(&entry->head, list);
178 } 163 }
179 164
180 if (ticket) 165 if (ticket)
@@ -182,25 +167,12 @@ retry:
182 spin_lock(&glob->lru_lock); 167 spin_lock(&glob->lru_lock);
183 ttm_eu_del_from_lru_locked(list); 168 ttm_eu_del_from_lru_locked(list);
184 spin_unlock(&glob->lru_lock); 169 spin_unlock(&glob->lru_lock);
185 ttm_eu_list_ref_sub(list);
186 return 0; 170 return 0;
187
188err:
189 spin_lock(&glob->lru_lock);
190 ttm_eu_backoff_reservation_locked(list);
191 spin_unlock(&glob->lru_lock);
192 ttm_eu_list_ref_sub(list);
193err_fini:
194 if (ticket) {
195 ww_acquire_done(ticket);
196 ww_acquire_fini(ticket);
197 }
198 return ret;
199} 171}
200EXPORT_SYMBOL(ttm_eu_reserve_buffers); 172EXPORT_SYMBOL(ttm_eu_reserve_buffers);
201 173
202void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, 174void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
203 struct list_head *list, void *sync_obj) 175 struct list_head *list, struct fence *fence)
204{ 176{
205 struct ttm_validate_buffer *entry; 177 struct ttm_validate_buffer *entry;
206 struct ttm_buffer_object *bo; 178 struct ttm_buffer_object *bo;
@@ -217,24 +189,18 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
217 glob = bo->glob; 189 glob = bo->glob;
218 190
219 spin_lock(&glob->lru_lock); 191 spin_lock(&glob->lru_lock);
220 spin_lock(&bdev->fence_lock);
221 192
222 list_for_each_entry(entry, list, head) { 193 list_for_each_entry(entry, list, head) {
223 bo = entry->bo; 194 bo = entry->bo;
224 entry->old_sync_obj = bo->sync_obj; 195 if (entry->shared)
225 bo->sync_obj = driver->sync_obj_ref(sync_obj); 196 reservation_object_add_shared_fence(bo->resv, fence);
197 else
198 reservation_object_add_excl_fence(bo->resv, fence);
226 ttm_bo_add_to_lru(bo); 199 ttm_bo_add_to_lru(bo);
227 __ttm_bo_unreserve(bo); 200 __ttm_bo_unreserve(bo);
228 entry->reserved = false;
229 } 201 }
230 spin_unlock(&bdev->fence_lock);
231 spin_unlock(&glob->lru_lock); 202 spin_unlock(&glob->lru_lock);
232 if (ticket) 203 if (ticket)
233 ww_acquire_fini(ticket); 204 ww_acquire_fini(ticket);
234
235 list_for_each_entry(entry, list, head) {
236 if (entry->old_sync_obj)
237 driver->sync_obj_unref(&entry->old_sync_obj);
238 }
239} 205}
240EXPORT_SYMBOL(ttm_eu_fence_buffer_objects); 206EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);