aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMaarten Lankhorst <maarten.lankhorst@canonical.com>2014-07-01 06:58:00 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2014-07-08 16:41:08 -0400
commit3c3b177a9369b26890ced004867fb32708e8ef5b (patch)
treed7f1840cd62b8c0d427ea56b203485c2e27bb29e
parent04a5faa8cbe5a8eaf152cb88959ba6360c26e702 (diff)
reservation: add suppport for read-only access using rcu
This adds some extra functions to deal with rcu. reservation_object_get_fences_rcu() will obtain the list of shared and exclusive fences without obtaining the ww_mutex. reservation_object_wait_timeout_rcu() will wait on all fences of the reservation_object, without obtaining the ww_mutex. reservation_object_test_signaled_rcu() will test if all fences of the reservation_object are signaled without using the ww_mutex. reservation_object_get_excl and reservation_object_get_list require the reservation object to be held, updating requires write_seqcount_begin/end. If only the exclusive fence is needed, rcu_dereference followed by fence_get_rcu can be used, if the shared fences are needed it's recommended to use the supplied functions. Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com> Acked-by: Sumit Semwal <sumit.semwal@linaro.org> Acked-by: Daniel Vetter <daniel@ffwll.ch> Reviewed-By: Thomas Hellstrom <thellstrom@vmware.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--drivers/dma-buf/dma-buf.c47
-rw-r--r--drivers/dma-buf/fence.c2
-rw-r--r--drivers/dma-buf/reservation.c336
-rw-r--r--include/linux/fence.h17
-rw-r--r--include/linux/reservation.h52
5 files changed, 400 insertions, 54 deletions
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index cb8379dfeed5..f3014c448e1e 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -137,7 +137,7 @@ static unsigned int dma_buf_poll(struct file *file, poll_table *poll)
137 struct reservation_object_list *fobj; 137 struct reservation_object_list *fobj;
138 struct fence *fence_excl; 138 struct fence *fence_excl;
139 unsigned long events; 139 unsigned long events;
140 unsigned shared_count; 140 unsigned shared_count, seq;
141 141
142 dmabuf = file->private_data; 142 dmabuf = file->private_data;
143 if (!dmabuf || !dmabuf->resv) 143 if (!dmabuf || !dmabuf->resv)
@@ -151,14 +151,20 @@ static unsigned int dma_buf_poll(struct file *file, poll_table *poll)
151 if (!events) 151 if (!events)
152 return 0; 152 return 0;
153 153
154 ww_mutex_lock(&resv->lock, NULL); 154retry:
155 seq = read_seqcount_begin(&resv->seq);
156 rcu_read_lock();
155 157
156 fobj = resv->fence; 158 fobj = rcu_dereference(resv->fence);
157 if (!fobj) 159 if (fobj)
158 goto out; 160 shared_count = fobj->shared_count;
159 161 else
160 shared_count = fobj->shared_count; 162 shared_count = 0;
161 fence_excl = resv->fence_excl; 163 fence_excl = rcu_dereference(resv->fence_excl);
164 if (read_seqcount_retry(&resv->seq, seq)) {
165 rcu_read_unlock();
166 goto retry;
167 }
162 168
163 if (fence_excl && (!(events & POLLOUT) || shared_count == 0)) { 169 if (fence_excl && (!(events & POLLOUT) || shared_count == 0)) {
164 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl; 170 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
@@ -176,14 +182,20 @@ static unsigned int dma_buf_poll(struct file *file, poll_table *poll)
176 spin_unlock_irq(&dmabuf->poll.lock); 182 spin_unlock_irq(&dmabuf->poll.lock);
177 183
178 if (events & pevents) { 184 if (events & pevents) {
179 if (!fence_add_callback(fence_excl, &dcb->cb, 185 if (!fence_get_rcu(fence_excl)) {
186 /* force a recheck */
187 events &= ~pevents;
188 dma_buf_poll_cb(NULL, &dcb->cb);
189 } else if (!fence_add_callback(fence_excl, &dcb->cb,
180 dma_buf_poll_cb)) { 190 dma_buf_poll_cb)) {
181 events &= ~pevents; 191 events &= ~pevents;
192 fence_put(fence_excl);
182 } else { 193 } else {
183 /* 194 /*
184 * No callback queued, wake up any additional 195 * No callback queued, wake up any additional
185 * waiters. 196 * waiters.
186 */ 197 */
198 fence_put(fence_excl);
187 dma_buf_poll_cb(NULL, &dcb->cb); 199 dma_buf_poll_cb(NULL, &dcb->cb);
188 } 200 }
189 } 201 }
@@ -205,13 +217,26 @@ static unsigned int dma_buf_poll(struct file *file, poll_table *poll)
205 goto out; 217 goto out;
206 218
207 for (i = 0; i < shared_count; ++i) { 219 for (i = 0; i < shared_count; ++i) {
208 struct fence *fence = fobj->shared[i]; 220 struct fence *fence = rcu_dereference(fobj->shared[i]);
209 221
222 if (!fence_get_rcu(fence)) {
223 /*
224 * fence refcount dropped to zero, this means
225 * that fobj has been freed
226 *
227 * call dma_buf_poll_cb and force a recheck!
228 */
229 events &= ~POLLOUT;
230 dma_buf_poll_cb(NULL, &dcb->cb);
231 break;
232 }
210 if (!fence_add_callback(fence, &dcb->cb, 233 if (!fence_add_callback(fence, &dcb->cb,
211 dma_buf_poll_cb)) { 234 dma_buf_poll_cb)) {
235 fence_put(fence);
212 events &= ~POLLOUT; 236 events &= ~POLLOUT;
213 break; 237 break;
214 } 238 }
239 fence_put(fence);
215 } 240 }
216 241
217 /* No callback queued, wake up any additional waiters. */ 242 /* No callback queued, wake up any additional waiters. */
@@ -220,7 +245,7 @@ static unsigned int dma_buf_poll(struct file *file, poll_table *poll)
220 } 245 }
221 246
222out: 247out:
223 ww_mutex_unlock(&resv->lock); 248 rcu_read_unlock();
224 return events; 249 return events;
225} 250}
226 251
diff --git a/drivers/dma-buf/fence.c b/drivers/dma-buf/fence.c
index 948bf00d955e..4222cb2aa96a 100644
--- a/drivers/dma-buf/fence.c
+++ b/drivers/dma-buf/fence.c
@@ -184,7 +184,7 @@ EXPORT_SYMBOL(fence_release);
184 184
185void fence_free(struct fence *fence) 185void fence_free(struct fence *fence)
186{ 186{
187 kfree(fence); 187 kfree_rcu(fence, rcu);
188} 188}
189EXPORT_SYMBOL(fence_free); 189EXPORT_SYMBOL(fence_free);
190 190
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
index e6166723a9ae..3c97c8fa8d02 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/reservation.c
@@ -38,6 +38,11 @@
38DEFINE_WW_CLASS(reservation_ww_class); 38DEFINE_WW_CLASS(reservation_ww_class);
39EXPORT_SYMBOL(reservation_ww_class); 39EXPORT_SYMBOL(reservation_ww_class);
40 40
41struct lock_class_key reservation_seqcount_class;
42EXPORT_SYMBOL(reservation_seqcount_class);
43
44const char reservation_seqcount_string[] = "reservation_seqcount";
45EXPORT_SYMBOL(reservation_seqcount_string);
41/* 46/*
42 * Reserve space to add a shared fence to a reservation_object, 47 * Reserve space to add a shared fence to a reservation_object,
43 * must be called with obj->lock held. 48 * must be called with obj->lock held.
@@ -82,27 +87,37 @@ reservation_object_add_shared_inplace(struct reservation_object *obj,
82{ 87{
83 u32 i; 88 u32 i;
84 89
90 fence_get(fence);
91
92 preempt_disable();
93 write_seqcount_begin(&obj->seq);
94
85 for (i = 0; i < fobj->shared_count; ++i) { 95 for (i = 0; i < fobj->shared_count; ++i) {
86 if (fobj->shared[i]->context == fence->context) { 96 struct fence *old_fence;
87 struct fence *old_fence = fobj->shared[i];
88 97
89 fence_get(fence); 98 old_fence = rcu_dereference_protected(fobj->shared[i],
99 reservation_object_held(obj));
90 100
91 fobj->shared[i] = fence; 101 if (old_fence->context == fence->context) {
102 /* memory barrier is added by write_seqcount_begin */
103 RCU_INIT_POINTER(fobj->shared[i], fence);
104 write_seqcount_end(&obj->seq);
105 preempt_enable();
92 106
93 fence_put(old_fence); 107 fence_put(old_fence);
94 return; 108 return;
95 } 109 }
96 } 110 }
97 111
98 fence_get(fence);
99 fobj->shared[fobj->shared_count] = fence;
100 /* 112 /*
101 * make the new fence visible before incrementing 113 * memory barrier is added by write_seqcount_begin,
102 * fobj->shared_count 114 * fobj->shared_count is protected by this lock too
103 */ 115 */
104 smp_wmb(); 116 RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
105 fobj->shared_count++; 117 fobj->shared_count++;
118
119 write_seqcount_end(&obj->seq);
120 preempt_enable();
106} 121}
107 122
108static void 123static void
@@ -112,11 +127,12 @@ reservation_object_add_shared_replace(struct reservation_object *obj,
112 struct fence *fence) 127 struct fence *fence)
113{ 128{
114 unsigned i; 129 unsigned i;
130 struct fence *old_fence = NULL;
115 131
116 fence_get(fence); 132 fence_get(fence);
117 133
118 if (!old) { 134 if (!old) {
119 fobj->shared[0] = fence; 135 RCU_INIT_POINTER(fobj->shared[0], fence);
120 fobj->shared_count = 1; 136 fobj->shared_count = 1;
121 goto done; 137 goto done;
122 } 138 }
@@ -130,19 +146,38 @@ reservation_object_add_shared_replace(struct reservation_object *obj,
130 fobj->shared_count = old->shared_count; 146 fobj->shared_count = old->shared_count;
131 147
132 for (i = 0; i < old->shared_count; ++i) { 148 for (i = 0; i < old->shared_count; ++i) {
133 if (fence && old->shared[i]->context == fence->context) { 149 struct fence *check;
134 fence_put(old->shared[i]); 150
135 fobj->shared[i] = fence; 151 check = rcu_dereference_protected(old->shared[i],
136 fence = NULL; 152 reservation_object_held(obj));
153
154 if (!old_fence && check->context == fence->context) {
155 old_fence = check;
156 RCU_INIT_POINTER(fobj->shared[i], fence);
137 } else 157 } else
138 fobj->shared[i] = old->shared[i]; 158 RCU_INIT_POINTER(fobj->shared[i], check);
159 }
160 if (!old_fence) {
161 RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
162 fobj->shared_count++;
139 } 163 }
140 if (fence)
141 fobj->shared[fobj->shared_count++] = fence;
142 164
143done: 165done:
144 obj->fence = fobj; 166 preempt_disable();
145 kfree(old); 167 write_seqcount_begin(&obj->seq);
168 /*
169 * RCU_INIT_POINTER can be used here,
170 * seqcount provides the necessary barriers
171 */
172 RCU_INIT_POINTER(obj->fence, fobj);
173 write_seqcount_end(&obj->seq);
174 preempt_enable();
175
176 if (old)
177 kfree_rcu(old, rcu);
178
179 if (old_fence)
180 fence_put(old_fence);
146} 181}
147 182
148/* 183/*
@@ -158,7 +193,7 @@ void reservation_object_add_shared_fence(struct reservation_object *obj,
158 obj->staged = NULL; 193 obj->staged = NULL;
159 194
160 if (!fobj) { 195 if (!fobj) {
161 BUG_ON(old->shared_count == old->shared_max); 196 BUG_ON(old->shared_count >= old->shared_max);
162 reservation_object_add_shared_inplace(obj, old, fence); 197 reservation_object_add_shared_inplace(obj, old, fence);
163 } else 198 } else
164 reservation_object_add_shared_replace(obj, old, fobj, fence); 199 reservation_object_add_shared_replace(obj, old, fobj, fence);
@@ -168,26 +203,275 @@ EXPORT_SYMBOL(reservation_object_add_shared_fence);
168void reservation_object_add_excl_fence(struct reservation_object *obj, 203void reservation_object_add_excl_fence(struct reservation_object *obj,
169 struct fence *fence) 204 struct fence *fence)
170{ 205{
171 struct fence *old_fence = obj->fence_excl; 206 struct fence *old_fence = reservation_object_get_excl(obj);
172 struct reservation_object_list *old; 207 struct reservation_object_list *old;
173 u32 i = 0; 208 u32 i = 0;
174 209
175 old = reservation_object_get_list(obj); 210 old = reservation_object_get_list(obj);
176 if (old) { 211 if (old)
177 i = old->shared_count; 212 i = old->shared_count;
178 old->shared_count = 0;
179 }
180 213
181 if (fence) 214 if (fence)
182 fence_get(fence); 215 fence_get(fence);
183 216
184 obj->fence_excl = fence; 217 preempt_disable();
218 write_seqcount_begin(&obj->seq);
219 /* write_seqcount_begin provides the necessary memory barrier */
220 RCU_INIT_POINTER(obj->fence_excl, fence);
221 if (old)
222 old->shared_count = 0;
223 write_seqcount_end(&obj->seq);
224 preempt_enable();
185 225
186 /* inplace update, no shared fences */ 226 /* inplace update, no shared fences */
187 while (i--) 227 while (i--)
188 fence_put(old->shared[i]); 228 fence_put(rcu_dereference_protected(old->shared[i],
229 reservation_object_held(obj)));
189 230
190 if (old_fence) 231 if (old_fence)
191 fence_put(old_fence); 232 fence_put(old_fence);
192} 233}
193EXPORT_SYMBOL(reservation_object_add_excl_fence); 234EXPORT_SYMBOL(reservation_object_add_excl_fence);
235
236int reservation_object_get_fences_rcu(struct reservation_object *obj,
237 struct fence **pfence_excl,
238 unsigned *pshared_count,
239 struct fence ***pshared)
240{
241 unsigned shared_count = 0;
242 unsigned retry = 1;
243 struct fence **shared = NULL, *fence_excl = NULL;
244 int ret = 0;
245
246 while (retry) {
247 struct reservation_object_list *fobj;
248 unsigned seq;
249
250 seq = read_seqcount_begin(&obj->seq);
251
252 rcu_read_lock();
253
254 fobj = rcu_dereference(obj->fence);
255 if (fobj) {
256 struct fence **nshared;
257 size_t sz = sizeof(*shared) * fobj->shared_max;
258
259 nshared = krealloc(shared, sz,
260 GFP_NOWAIT | __GFP_NOWARN);
261 if (!nshared) {
262 rcu_read_unlock();
263 nshared = krealloc(shared, sz, GFP_KERNEL);
264 if (nshared) {
265 shared = nshared;
266 continue;
267 }
268
269 ret = -ENOMEM;
270 shared_count = 0;
271 break;
272 }
273 shared = nshared;
274 memcpy(shared, fobj->shared, sz);
275 shared_count = fobj->shared_count;
276 } else
277 shared_count = 0;
278 fence_excl = rcu_dereference(obj->fence_excl);
279
280 retry = read_seqcount_retry(&obj->seq, seq);
281 if (retry)
282 goto unlock;
283
284 if (!fence_excl || fence_get_rcu(fence_excl)) {
285 unsigned i;
286
287 for (i = 0; i < shared_count; ++i) {
288 if (fence_get_rcu(shared[i]))
289 continue;
290
291 /* uh oh, refcount failed, abort and retry */
292 while (i--)
293 fence_put(shared[i]);
294
295 if (fence_excl) {
296 fence_put(fence_excl);
297 fence_excl = NULL;
298 }
299
300 retry = 1;
301 break;
302 }
303 } else
304 retry = 1;
305
306unlock:
307 rcu_read_unlock();
308 }
309 *pshared_count = shared_count;
310 if (shared_count)
311 *pshared = shared;
312 else {
313 *pshared = NULL;
314 kfree(shared);
315 }
316 *pfence_excl = fence_excl;
317
318 return ret;
319}
320EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu);
321
322long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
323 bool wait_all, bool intr,
324 unsigned long timeout)
325{
326 struct fence *fence;
327 unsigned seq, shared_count, i = 0;
328 long ret = timeout;
329
330retry:
331 fence = NULL;
332 shared_count = 0;
333 seq = read_seqcount_begin(&obj->seq);
334 rcu_read_lock();
335
336 if (wait_all) {
337 struct reservation_object_list *fobj = rcu_dereference(obj->fence);
338
339 if (fobj)
340 shared_count = fobj->shared_count;
341
342 if (read_seqcount_retry(&obj->seq, seq))
343 goto unlock_retry;
344
345 for (i = 0; i < shared_count; ++i) {
346 struct fence *lfence = rcu_dereference(fobj->shared[i]);
347
348 if (test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags))
349 continue;
350
351 if (!fence_get_rcu(lfence))
352 goto unlock_retry;
353
354 if (fence_is_signaled(lfence)) {
355 fence_put(lfence);
356 continue;
357 }
358
359 fence = lfence;
360 break;
361 }
362 }
363
364 if (!shared_count) {
365 struct fence *fence_excl = rcu_dereference(obj->fence_excl);
366
367 if (read_seqcount_retry(&obj->seq, seq))
368 goto unlock_retry;
369
370 if (fence_excl &&
371 !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence_excl->flags)) {
372 if (!fence_get_rcu(fence_excl))
373 goto unlock_retry;
374
375 if (fence_is_signaled(fence_excl))
376 fence_put(fence_excl);
377 else
378 fence = fence_excl;
379 }
380 }
381
382 rcu_read_unlock();
383 if (fence) {
384 ret = fence_wait_timeout(fence, intr, ret);
385 fence_put(fence);
386 if (ret > 0 && wait_all && (i + 1 < shared_count))
387 goto retry;
388 }
389 return ret;
390
391unlock_retry:
392 rcu_read_unlock();
393 goto retry;
394}
395EXPORT_SYMBOL_GPL(reservation_object_wait_timeout_rcu);
396
397
398static inline int
399reservation_object_test_signaled_single(struct fence *passed_fence)
400{
401 struct fence *fence, *lfence = passed_fence;
402 int ret = 1;
403
404 if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
405 int ret;
406
407 fence = fence_get_rcu(lfence);
408 if (!fence)
409 return -1;
410
411 ret = !!fence_is_signaled(fence);
412 fence_put(fence);
413 }
414 return ret;
415}
416
417bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
418 bool test_all)
419{
420 unsigned seq, shared_count;
421 int ret = true;
422
423retry:
424 shared_count = 0;
425 seq = read_seqcount_begin(&obj->seq);
426 rcu_read_lock();
427
428 if (test_all) {
429 unsigned i;
430
431 struct reservation_object_list *fobj = rcu_dereference(obj->fence);
432
433 if (fobj)
434 shared_count = fobj->shared_count;
435
436 if (read_seqcount_retry(&obj->seq, seq))
437 goto unlock_retry;
438
439 for (i = 0; i < shared_count; ++i) {
440 struct fence *fence = rcu_dereference(fobj->shared[i]);
441
442 ret = reservation_object_test_signaled_single(fence);
443 if (ret < 0)
444 goto unlock_retry;
445 else if (!ret)
446 break;
447 }
448
449 /*
450 * There could be a read_seqcount_retry here, but nothing cares
451 * about whether it's the old or newer fence pointers that are
452 * signaled. That race could still have happened after checking
453 * read_seqcount_retry. If you care, use ww_mutex_lock.
454 */
455 }
456
457 if (!shared_count) {
458 struct fence *fence_excl = rcu_dereference(obj->fence_excl);
459
460 if (read_seqcount_retry(&obj->seq, seq))
461 goto unlock_retry;
462
463 if (fence_excl) {
464 ret = reservation_object_test_signaled_single(fence_excl);
465 if (ret < 0)
466 goto unlock_retry;
467 }
468 }
469
470 rcu_read_unlock();
471 return ret;
472
473unlock_retry:
474 rcu_read_unlock();
475 goto retry;
476}
477EXPORT_SYMBOL_GPL(reservation_object_test_signaled_rcu);
diff --git a/include/linux/fence.h b/include/linux/fence.h
index b935cc650123..d174585b874b 100644
--- a/include/linux/fence.h
+++ b/include/linux/fence.h
@@ -28,6 +28,7 @@
28#include <linux/kref.h> 28#include <linux/kref.h>
29#include <linux/sched.h> 29#include <linux/sched.h>
30#include <linux/printk.h> 30#include <linux/printk.h>
31#include <linux/rcupdate.h>
31 32
32struct fence; 33struct fence;
33struct fence_ops; 34struct fence_ops;
@@ -37,6 +38,7 @@ struct fence_cb;
37 * struct fence - software synchronization primitive 38 * struct fence - software synchronization primitive
38 * @refcount: refcount for this fence 39 * @refcount: refcount for this fence
39 * @ops: fence_ops associated with this fence 40 * @ops: fence_ops associated with this fence
41 * @rcu: used for releasing fence with kfree_rcu
40 * @cb_list: list of all callbacks to call 42 * @cb_list: list of all callbacks to call
41 * @lock: spin_lock_irqsave used for locking 43 * @lock: spin_lock_irqsave used for locking
42 * @context: execution context this fence belongs to, returned by 44 * @context: execution context this fence belongs to, returned by
@@ -70,6 +72,7 @@ struct fence_cb;
70struct fence { 72struct fence {
71 struct kref refcount; 73 struct kref refcount;
72 const struct fence_ops *ops; 74 const struct fence_ops *ops;
75 struct rcu_head rcu;
73 struct list_head cb_list; 76 struct list_head cb_list;
74 spinlock_t *lock; 77 spinlock_t *lock;
75 unsigned context, seqno; 78 unsigned context, seqno;
@@ -192,6 +195,20 @@ static inline struct fence *fence_get(struct fence *fence)
192} 195}
193 196
194/** 197/**
198 * fence_get_rcu - get a fence from a reservation_object_list with rcu read lock
199 * @fence: [in] fence to increase refcount of
200 *
201 * Function returns NULL if no refcount could be obtained, or the fence.
202 */
203static inline struct fence *fence_get_rcu(struct fence *fence)
204{
205 if (kref_get_unless_zero(&fence->refcount))
206 return fence;
207 else
208 return NULL;
209}
210
211/**
195 * fence_put - decreases refcount of the fence 212 * fence_put - decreases refcount of the fence
196 * @fence: [in] fence to reduce refcount of 213 * @fence: [in] fence to reduce refcount of
197 */ 214 */
diff --git a/include/linux/reservation.h b/include/linux/reservation.h
index 2affe67dea6e..5a0b64cf68b4 100644
--- a/include/linux/reservation.h
+++ b/include/linux/reservation.h
@@ -42,22 +42,29 @@
42#include <linux/ww_mutex.h> 42#include <linux/ww_mutex.h>
43#include <linux/fence.h> 43#include <linux/fence.h>
44#include <linux/slab.h> 44#include <linux/slab.h>
45#include <linux/seqlock.h>
46#include <linux/rcupdate.h>
45 47
46extern struct ww_class reservation_ww_class; 48extern struct ww_class reservation_ww_class;
49extern struct lock_class_key reservation_seqcount_class;
50extern const char reservation_seqcount_string[];
47 51
48struct reservation_object_list { 52struct reservation_object_list {
53 struct rcu_head rcu;
49 u32 shared_count, shared_max; 54 u32 shared_count, shared_max;
50 struct fence *shared[]; 55 struct fence __rcu *shared[];
51}; 56};
52 57
53struct reservation_object { 58struct reservation_object {
54 struct ww_mutex lock; 59 struct ww_mutex lock;
60 seqcount_t seq;
55 61
56 struct fence *fence_excl; 62 struct fence __rcu *fence_excl;
57 struct reservation_object_list *fence; 63 struct reservation_object_list __rcu *fence;
58 struct reservation_object_list *staged; 64 struct reservation_object_list *staged;
59}; 65};
60 66
67#define reservation_object_held(obj) lockdep_is_held(&(obj)->lock.base)
61#define reservation_object_assert_held(obj) \ 68#define reservation_object_assert_held(obj) \
62 lockdep_assert_held(&(obj)->lock.base) 69 lockdep_assert_held(&(obj)->lock.base)
63 70
@@ -66,8 +73,9 @@ reservation_object_init(struct reservation_object *obj)
66{ 73{
67 ww_mutex_init(&obj->lock, &reservation_ww_class); 74 ww_mutex_init(&obj->lock, &reservation_ww_class);
68 75
69 obj->fence_excl = NULL; 76 __seqcount_init(&obj->seq, reservation_seqcount_string, &reservation_seqcount_class);
70 obj->fence = NULL; 77 RCU_INIT_POINTER(obj->fence, NULL);
78 RCU_INIT_POINTER(obj->fence_excl, NULL);
71 obj->staged = NULL; 79 obj->staged = NULL;
72} 80}
73 81
@@ -76,18 +84,20 @@ reservation_object_fini(struct reservation_object *obj)
76{ 84{
77 int i; 85 int i;
78 struct reservation_object_list *fobj; 86 struct reservation_object_list *fobj;
87 struct fence *excl;
79 88
80 /* 89 /*
81 * This object should be dead and all references must have 90 * This object should be dead and all references must have
82 * been released to it. 91 * been released to it, so no need to be protected with rcu.
83 */ 92 */
84 if (obj->fence_excl) 93 excl = rcu_dereference_protected(obj->fence_excl, 1);
85 fence_put(obj->fence_excl); 94 if (excl)
95 fence_put(excl);
86 96
87 fobj = obj->fence; 97 fobj = rcu_dereference_protected(obj->fence, 1);
88 if (fobj) { 98 if (fobj) {
89 for (i = 0; i < fobj->shared_count; ++i) 99 for (i = 0; i < fobj->shared_count; ++i)
90 fence_put(fobj->shared[i]); 100 fence_put(rcu_dereference_protected(fobj->shared[i], 1));
91 101
92 kfree(fobj); 102 kfree(fobj);
93 } 103 }
@@ -99,17 +109,15 @@ reservation_object_fini(struct reservation_object *obj)
99static inline struct reservation_object_list * 109static inline struct reservation_object_list *
100reservation_object_get_list(struct reservation_object *obj) 110reservation_object_get_list(struct reservation_object *obj)
101{ 111{
102 reservation_object_assert_held(obj); 112 return rcu_dereference_protected(obj->fence,
103 113 reservation_object_held(obj));
104 return obj->fence;
105} 114}
106 115
107static inline struct fence * 116static inline struct fence *
108reservation_object_get_excl(struct reservation_object *obj) 117reservation_object_get_excl(struct reservation_object *obj)
109{ 118{
110 reservation_object_assert_held(obj); 119 return rcu_dereference_protected(obj->fence_excl,
111 120 reservation_object_held(obj));
112 return obj->fence_excl;
113} 121}
114 122
115int reservation_object_reserve_shared(struct reservation_object *obj); 123int reservation_object_reserve_shared(struct reservation_object *obj);
@@ -119,4 +127,16 @@ void reservation_object_add_shared_fence(struct reservation_object *obj,
119void reservation_object_add_excl_fence(struct reservation_object *obj, 127void reservation_object_add_excl_fence(struct reservation_object *obj,
120 struct fence *fence); 128 struct fence *fence);
121 129
130int reservation_object_get_fences_rcu(struct reservation_object *obj,
131 struct fence **pfence_excl,
132 unsigned *pshared_count,
133 struct fence ***pshared);
134
135long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
136 bool wait_all, bool intr,
137 unsigned long timeout);
138
139bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
140 bool test_all);
141
122#endif /* _LINUX_RESERVATION_H */ 142#endif /* _LINUX_RESERVATION_H */