aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma-buf/dma-buf.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-02-11 17:34:03 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2018-02-11 17:34:03 -0500
commita9a08845e9acbd224e4ee466f5c1275ed50054e8 (patch)
tree415d6e6a82e001c65e6b161539411f54ba5fe8ce /drivers/dma-buf/dma-buf.c
parentee5daa1361fceb6f482c005bcc9ba8d01b92ea5c (diff)
vfs: do bulk POLL* -> EPOLL* replacement
This is the mindless scripted replacement of kernel use of POLL* variables as described by Al, done by this script: for V in IN OUT PRI ERR RDNORM RDBAND WRNORM WRBAND HUP RDHUP NVAL MSG; do L=`git grep -l -w POLL$V | grep -v '^t' | grep -v /um/ | grep -v '^sa' | grep -v '/poll.h$'|grep -v '^D'` for f in $L; do sed -i "-es/^\([^\"]*\)\(\<POLL$V\>\)/\\1E\\2/" $f; done done with de-mangling cleanups yet to come. NOTE! On almost all architectures, the EPOLL* constants have the same values as the POLL* constants do. But they keyword here is "almost". For various bad reasons they aren't the same, and epoll() doesn't actually work quite correctly in some cases due to this on Sparc et al. The next patch from Al will sort out the final differences, and we should be all done. Scripted-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/dma-buf/dma-buf.c')
-rw-r--r--drivers/dma-buf/dma-buf.c26
1 files changed, 13 insertions, 13 deletions
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 539450713838..d78d5fc173dc 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -135,10 +135,10 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
135 * Userspace can query the state of these implicitly tracked fences using poll() 135 * Userspace can query the state of these implicitly tracked fences using poll()
136 * and related system calls: 136 * and related system calls:
137 * 137 *
138 * - Checking for POLLIN, i.e. read access, can be use to query the state of the 138 * - Checking for EPOLLIN, i.e. read access, can be use to query the state of the
139 * most recent write or exclusive fence. 139 * most recent write or exclusive fence.
140 * 140 *
141 * - Checking for POLLOUT, i.e. write access, can be used to query the state of 141 * - Checking for EPOLLOUT, i.e. write access, can be used to query the state of
142 * all attached fences, shared and exclusive ones. 142 * all attached fences, shared and exclusive ones.
143 * 143 *
144 * Note that this only signals the completion of the respective fences, i.e. the 144 * Note that this only signals the completion of the respective fences, i.e. the
@@ -168,13 +168,13 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
168 168
169 dmabuf = file->private_data; 169 dmabuf = file->private_data;
170 if (!dmabuf || !dmabuf->resv) 170 if (!dmabuf || !dmabuf->resv)
171 return POLLERR; 171 return EPOLLERR;
172 172
173 resv = dmabuf->resv; 173 resv = dmabuf->resv;
174 174
175 poll_wait(file, &dmabuf->poll, poll); 175 poll_wait(file, &dmabuf->poll, poll);
176 176
177 events = poll_requested_events(poll) & (POLLIN | POLLOUT); 177 events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT);
178 if (!events) 178 if (!events)
179 return 0; 179 return 0;
180 180
@@ -193,12 +193,12 @@ retry:
193 goto retry; 193 goto retry;
194 } 194 }
195 195
196 if (fence_excl && (!(events & POLLOUT) || shared_count == 0)) { 196 if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) {
197 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl; 197 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
198 __poll_t pevents = POLLIN; 198 __poll_t pevents = EPOLLIN;
199 199
200 if (shared_count == 0) 200 if (shared_count == 0)
201 pevents |= POLLOUT; 201 pevents |= EPOLLOUT;
202 202
203 spin_lock_irq(&dmabuf->poll.lock); 203 spin_lock_irq(&dmabuf->poll.lock);
204 if (dcb->active) { 204 if (dcb->active) {
@@ -228,19 +228,19 @@ retry:
228 } 228 }
229 } 229 }
230 230
231 if ((events & POLLOUT) && shared_count > 0) { 231 if ((events & EPOLLOUT) && shared_count > 0) {
232 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared; 232 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
233 int i; 233 int i;
234 234
235 /* Only queue a new callback if no event has fired yet */ 235 /* Only queue a new callback if no event has fired yet */
236 spin_lock_irq(&dmabuf->poll.lock); 236 spin_lock_irq(&dmabuf->poll.lock);
237 if (dcb->active) 237 if (dcb->active)
238 events &= ~POLLOUT; 238 events &= ~EPOLLOUT;
239 else 239 else
240 dcb->active = POLLOUT; 240 dcb->active = EPOLLOUT;
241 spin_unlock_irq(&dmabuf->poll.lock); 241 spin_unlock_irq(&dmabuf->poll.lock);
242 242
243 if (!(events & POLLOUT)) 243 if (!(events & EPOLLOUT))
244 goto out; 244 goto out;
245 245
246 for (i = 0; i < shared_count; ++i) { 246 for (i = 0; i < shared_count; ++i) {
@@ -253,14 +253,14 @@ retry:
253 * 253 *
254 * call dma_buf_poll_cb and force a recheck! 254 * call dma_buf_poll_cb and force a recheck!
255 */ 255 */
256 events &= ~POLLOUT; 256 events &= ~EPOLLOUT;
257 dma_buf_poll_cb(NULL, &dcb->cb); 257 dma_buf_poll_cb(NULL, &dcb->cb);
258 break; 258 break;
259 } 259 }
260 if (!dma_fence_add_callback(fence, &dcb->cb, 260 if (!dma_fence_add_callback(fence, &dcb->cb,
261 dma_buf_poll_cb)) { 261 dma_buf_poll_cb)) {
262 dma_fence_put(fence); 262 dma_fence_put(fence);
263 events &= ~POLLOUT; 263 events &= ~EPOLLOUT;
264 break; 264 break;
265 } 265 }
266 dma_fence_put(fence); 266 dma_fence_put(fence);