aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma-buf/dma-buf.c
diff options
context:
space:
mode:
authorMaarten Lankhorst <maarten.lankhorst@canonical.com>2014-07-01 06:58:00 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2014-07-08 16:41:08 -0400
commit3c3b177a9369b26890ced004867fb32708e8ef5b (patch)
treed7f1840cd62b8c0d427ea56b203485c2e27bb29e /drivers/dma-buf/dma-buf.c
parent04a5faa8cbe5a8eaf152cb88959ba6360c26e702 (diff)
reservation: add suppport for read-only access using rcu
This adds some extra functions to deal with rcu. reservation_object_get_fences_rcu() will obtain the list of shared and exclusive fences without obtaining the ww_mutex. reservation_object_wait_timeout_rcu() will wait on all fences of the reservation_object, without obtaining the ww_mutex. reservation_object_test_signaled_rcu() will test if all fences of the reservation_object are signaled without using the ww_mutex. reservation_object_get_excl and reservation_object_get_list require the reservation object to be held, updating requires write_seqcount_begin/end. If only the exclusive fence is needed, rcu_dereference followed by fence_get_rcu can be used, if the shared fences are needed it's recommended to use the supplied functions. Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com> Acked-by: Sumit Semwal <sumit.semwal@linaro.org> Acked-by: Daniel Vetter <daniel@ffwll.ch> Reviewed-By: Thomas Hellstrom <thellstrom@vmware.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/dma-buf/dma-buf.c')
-rw-r--r--drivers/dma-buf/dma-buf.c47
1 files changed, 36 insertions, 11 deletions
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index cb8379dfeed5..f3014c448e1e 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -137,7 +137,7 @@ static unsigned int dma_buf_poll(struct file *file, poll_table *poll)
137 struct reservation_object_list *fobj; 137 struct reservation_object_list *fobj;
138 struct fence *fence_excl; 138 struct fence *fence_excl;
139 unsigned long events; 139 unsigned long events;
140 unsigned shared_count; 140 unsigned shared_count, seq;
141 141
142 dmabuf = file->private_data; 142 dmabuf = file->private_data;
143 if (!dmabuf || !dmabuf->resv) 143 if (!dmabuf || !dmabuf->resv)
@@ -151,14 +151,20 @@ static unsigned int dma_buf_poll(struct file *file, poll_table *poll)
151 if (!events) 151 if (!events)
152 return 0; 152 return 0;
153 153
154 ww_mutex_lock(&resv->lock, NULL); 154retry:
155 seq = read_seqcount_begin(&resv->seq);
156 rcu_read_lock();
155 157
156 fobj = resv->fence; 158 fobj = rcu_dereference(resv->fence);
157 if (!fobj) 159 if (fobj)
158 goto out; 160 shared_count = fobj->shared_count;
159 161 else
160 shared_count = fobj->shared_count; 162 shared_count = 0;
161 fence_excl = resv->fence_excl; 163 fence_excl = rcu_dereference(resv->fence_excl);
164 if (read_seqcount_retry(&resv->seq, seq)) {
165 rcu_read_unlock();
166 goto retry;
167 }
162 168
163 if (fence_excl && (!(events & POLLOUT) || shared_count == 0)) { 169 if (fence_excl && (!(events & POLLOUT) || shared_count == 0)) {
164 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl; 170 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
@@ -176,14 +182,20 @@ static unsigned int dma_buf_poll(struct file *file, poll_table *poll)
176 spin_unlock_irq(&dmabuf->poll.lock); 182 spin_unlock_irq(&dmabuf->poll.lock);
177 183
178 if (events & pevents) { 184 if (events & pevents) {
179 if (!fence_add_callback(fence_excl, &dcb->cb, 185 if (!fence_get_rcu(fence_excl)) {
186 /* force a recheck */
187 events &= ~pevents;
188 dma_buf_poll_cb(NULL, &dcb->cb);
189 } else if (!fence_add_callback(fence_excl, &dcb->cb,
180 dma_buf_poll_cb)) { 190 dma_buf_poll_cb)) {
181 events &= ~pevents; 191 events &= ~pevents;
192 fence_put(fence_excl);
182 } else { 193 } else {
183 /* 194 /*
184 * No callback queued, wake up any additional 195 * No callback queued, wake up any additional
185 * waiters. 196 * waiters.
186 */ 197 */
198 fence_put(fence_excl);
187 dma_buf_poll_cb(NULL, &dcb->cb); 199 dma_buf_poll_cb(NULL, &dcb->cb);
188 } 200 }
189 } 201 }
@@ -205,13 +217,26 @@ static unsigned int dma_buf_poll(struct file *file, poll_table *poll)
205 goto out; 217 goto out;
206 218
207 for (i = 0; i < shared_count; ++i) { 219 for (i = 0; i < shared_count; ++i) {
208 struct fence *fence = fobj->shared[i]; 220 struct fence *fence = rcu_dereference(fobj->shared[i]);
209 221
222 if (!fence_get_rcu(fence)) {
223 /*
224 * fence refcount dropped to zero, this means
225 * that fobj has been freed
226 *
227 * call dma_buf_poll_cb and force a recheck!
228 */
229 events &= ~POLLOUT;
230 dma_buf_poll_cb(NULL, &dcb->cb);
231 break;
232 }
210 if (!fence_add_callback(fence, &dcb->cb, 233 if (!fence_add_callback(fence, &dcb->cb,
211 dma_buf_poll_cb)) { 234 dma_buf_poll_cb)) {
235 fence_put(fence);
212 events &= ~POLLOUT; 236 events &= ~POLLOUT;
213 break; 237 break;
214 } 238 }
239 fence_put(fence);
215 } 240 }
216 241
217 /* No callback queued, wake up any additional waiters. */ 242 /* No callback queued, wake up any additional waiters. */
@@ -220,7 +245,7 @@ static unsigned int dma_buf_poll(struct file *file, poll_table *poll)
220 } 245 }
221 246
222out: 247out:
223 ww_mutex_unlock(&resv->lock); 248 rcu_read_unlock();
224 return events; 249 return events;
225} 250}
226 251