aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma-buf/dma-buf.c
diff options
context:
space:
mode:
authorMaarten Lankhorst <maarten.lankhorst@canonical.com>2014-07-01 06:57:43 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2014-07-08 16:36:52 -0400
commit9b495a5887994a6d74d5c261d012083a92b94738 (patch)
tree513e525cdc0bbce66f76632058bf6379c368184e /drivers/dma-buf/dma-buf.c
parent0ba6b8fb91fc051535c7612f6241c8197d92323b (diff)
dma-buf: add poll support, v3
Thanks to Fengguang Wu for spotting a missing static cast. v2: - Kill unused variable need_shared. v3: - Clarify the BUG() in dma_buf_release some more. (Rob Clark) Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com> Acked-by: Sumit Semwal <sumit.semwal@linaro.org> Acked-by: Daniel Vetter <daniel@ffwll.ch> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/dma-buf/dma-buf.c')
-rw-r--r--drivers/dma-buf/dma-buf.c108
1 files changed, 108 insertions, 0 deletions
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index cd40ca22911f..25e8c4165936 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -30,6 +30,7 @@
30#include <linux/export.h> 30#include <linux/export.h>
31#include <linux/debugfs.h> 31#include <linux/debugfs.h>
32#include <linux/seq_file.h> 32#include <linux/seq_file.h>
33#include <linux/poll.h>
33#include <linux/reservation.h> 34#include <linux/reservation.h>
34 35
35static inline int is_dma_buf_file(struct file *); 36static inline int is_dma_buf_file(struct file *);
@@ -52,6 +53,16 @@ static int dma_buf_release(struct inode *inode, struct file *file)
52 53
53 BUG_ON(dmabuf->vmapping_counter); 54 BUG_ON(dmabuf->vmapping_counter);
54 55
56 /*
57 * Any fences that a dma-buf poll can wait on should be signaled
58 * before releasing dma-buf. This is the responsibility of each
59 * driver that uses the reservation objects.
60 *
61 * If you hit this BUG() it means someone dropped their ref to the
62 * dma-buf while still having pending operation to the buffer.
63 */
64 BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
65
55 dmabuf->ops->release(dmabuf); 66 dmabuf->ops->release(dmabuf);
56 67
57 mutex_lock(&db_list.lock); 68 mutex_lock(&db_list.lock);
@@ -108,10 +119,103 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
108 return base + offset; 119 return base + offset;
109} 120}
110 121
122static void dma_buf_poll_cb(struct fence *fence, struct fence_cb *cb)
123{
124 struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
125 unsigned long flags;
126
127 spin_lock_irqsave(&dcb->poll->lock, flags);
128 wake_up_locked_poll(dcb->poll, dcb->active);
129 dcb->active = 0;
130 spin_unlock_irqrestore(&dcb->poll->lock, flags);
131}
132
133static unsigned int dma_buf_poll(struct file *file, poll_table *poll)
134{
135 struct dma_buf *dmabuf;
136 struct reservation_object *resv;
137 unsigned long events;
138
139 dmabuf = file->private_data;
140 if (!dmabuf || !dmabuf->resv)
141 return POLLERR;
142
143 resv = dmabuf->resv;
144
145 poll_wait(file, &dmabuf->poll, poll);
146
147 events = poll_requested_events(poll) & (POLLIN | POLLOUT);
148 if (!events)
149 return 0;
150
151 ww_mutex_lock(&resv->lock, NULL);
152
153 if (resv->fence_excl && (!(events & POLLOUT) ||
154 resv->fence_shared_count == 0)) {
155 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
156 unsigned long pevents = POLLIN;
157
158 if (resv->fence_shared_count == 0)
159 pevents |= POLLOUT;
160
161 spin_lock_irq(&dmabuf->poll.lock);
162 if (dcb->active) {
163 dcb->active |= pevents;
164 events &= ~pevents;
165 } else
166 dcb->active = pevents;
167 spin_unlock_irq(&dmabuf->poll.lock);
168
169 if (events & pevents) {
170 if (!fence_add_callback(resv->fence_excl,
171 &dcb->cb, dma_buf_poll_cb))
172 events &= ~pevents;
173 else
174 /*
175 * No callback queued, wake up any additional
176 * waiters.
177 */
178 dma_buf_poll_cb(NULL, &dcb->cb);
179 }
180 }
181
182 if ((events & POLLOUT) && resv->fence_shared_count > 0) {
183 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
184 int i;
185
186 /* Only queue a new callback if no event has fired yet */
187 spin_lock_irq(&dmabuf->poll.lock);
188 if (dcb->active)
189 events &= ~POLLOUT;
190 else
191 dcb->active = POLLOUT;
192 spin_unlock_irq(&dmabuf->poll.lock);
193
194 if (!(events & POLLOUT))
195 goto out;
196
197 for (i = 0; i < resv->fence_shared_count; ++i)
198 if (!fence_add_callback(resv->fence_shared[i],
199 &dcb->cb, dma_buf_poll_cb)) {
200 events &= ~POLLOUT;
201 break;
202 }
203
204 /* No callback queued, wake up any additional waiters. */
205 if (i == resv->fence_shared_count)
206 dma_buf_poll_cb(NULL, &dcb->cb);
207 }
208
209out:
210 ww_mutex_unlock(&resv->lock);
211 return events;
212}
213
111static const struct file_operations dma_buf_fops = { 214static const struct file_operations dma_buf_fops = {
112 .release = dma_buf_release, 215 .release = dma_buf_release,
113 .mmap = dma_buf_mmap_internal, 216 .mmap = dma_buf_mmap_internal,
114 .llseek = dma_buf_llseek, 217 .llseek = dma_buf_llseek,
218 .poll = dma_buf_poll,
115}; 219};
116 220
117/* 221/*
@@ -171,6 +275,10 @@ struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops,
171 dmabuf->ops = ops; 275 dmabuf->ops = ops;
172 dmabuf->size = size; 276 dmabuf->size = size;
173 dmabuf->exp_name = exp_name; 277 dmabuf->exp_name = exp_name;
278 init_waitqueue_head(&dmabuf->poll);
279 dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
280 dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
281
174 if (!resv) { 282 if (!resv) {
175 resv = (struct reservation_object *)&dmabuf[1]; 283 resv = (struct reservation_object *)&dmabuf[1];
176 reservation_object_init(resv); 284 reservation_object_init(resv);