diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
commit | 8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch) | |
tree | a8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /fs/aio.c | |
parent | 406089d01562f1e2bf9f089fd7637009ebaad589 (diff) |
Patched in Tegra support.
Diffstat (limited to 'fs/aio.c')
-rw-r--r-- | fs/aio.c | 367 |
1 files changed, 186 insertions, 181 deletions
@@ -13,7 +13,7 @@ | |||
13 | #include <linux/errno.h> | 13 | #include <linux/errno.h> |
14 | #include <linux/time.h> | 14 | #include <linux/time.h> |
15 | #include <linux/aio_abi.h> | 15 | #include <linux/aio_abi.h> |
16 | #include <linux/export.h> | 16 | #include <linux/module.h> |
17 | #include <linux/syscalls.h> | 17 | #include <linux/syscalls.h> |
18 | #include <linux/backing-dev.h> | 18 | #include <linux/backing-dev.h> |
19 | #include <linux/uio.h> | 19 | #include <linux/uio.h> |
@@ -56,6 +56,13 @@ static struct kmem_cache *kioctx_cachep; | |||
56 | 56 | ||
57 | static struct workqueue_struct *aio_wq; | 57 | static struct workqueue_struct *aio_wq; |
58 | 58 | ||
59 | /* Used for rare fput completion. */ | ||
60 | static void aio_fput_routine(struct work_struct *); | ||
61 | static DECLARE_WORK(fput_work, aio_fput_routine); | ||
62 | |||
63 | static DEFINE_SPINLOCK(fput_lock); | ||
64 | static LIST_HEAD(fput_head); | ||
65 | |||
59 | static void aio_kick_handler(struct work_struct *); | 66 | static void aio_kick_handler(struct work_struct *); |
60 | static void aio_queue_work(struct kioctx *); | 67 | static void aio_queue_work(struct kioctx *); |
61 | 68 | ||
@@ -86,8 +93,9 @@ static void aio_free_ring(struct kioctx *ctx) | |||
86 | put_page(info->ring_pages[i]); | 93 | put_page(info->ring_pages[i]); |
87 | 94 | ||
88 | if (info->mmap_size) { | 95 | if (info->mmap_size) { |
89 | BUG_ON(ctx->mm != current->mm); | 96 | down_write(&ctx->mm->mmap_sem); |
90 | vm_munmap(info->mmap_base, info->mmap_size); | 97 | do_munmap(ctx->mm, info->mmap_base, info->mmap_size); |
98 | up_write(&ctx->mm->mmap_sem); | ||
91 | } | 99 | } |
92 | 100 | ||
93 | if (info->ring_pages && info->ring_pages != info->internal_pages) | 101 | if (info->ring_pages && info->ring_pages != info->internal_pages) |
@@ -127,9 +135,9 @@ static int aio_setup_ring(struct kioctx *ctx) | |||
127 | info->mmap_size = nr_pages * PAGE_SIZE; | 135 | info->mmap_size = nr_pages * PAGE_SIZE; |
128 | dprintk("attempting mmap of %lu bytes\n", info->mmap_size); | 136 | dprintk("attempting mmap of %lu bytes\n", info->mmap_size); |
129 | down_write(&ctx->mm->mmap_sem); | 137 | down_write(&ctx->mm->mmap_sem); |
130 | info->mmap_base = do_mmap_pgoff(NULL, 0, info->mmap_size, | 138 | info->mmap_base = do_mmap(NULL, 0, info->mmap_size, |
131 | PROT_READ|PROT_WRITE, | 139 | PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE, |
132 | MAP_ANONYMOUS|MAP_PRIVATE, 0); | 140 | 0); |
133 | if (IS_ERR((void *)info->mmap_base)) { | 141 | if (IS_ERR((void *)info->mmap_base)) { |
134 | up_write(&ctx->mm->mmap_sem); | 142 | up_write(&ctx->mm->mmap_sem); |
135 | info->mmap_size = 0; | 143 | info->mmap_size = 0; |
@@ -152,7 +160,7 @@ static int aio_setup_ring(struct kioctx *ctx) | |||
152 | 160 | ||
153 | info->nr = nr_events; /* trusted copy */ | 161 | info->nr = nr_events; /* trusted copy */ |
154 | 162 | ||
155 | ring = kmap_atomic(info->ring_pages[0]); | 163 | ring = kmap_atomic(info->ring_pages[0], KM_USER0); |
156 | ring->nr = nr_events; /* user copy */ | 164 | ring->nr = nr_events; /* user copy */ |
157 | ring->id = ctx->user_id; | 165 | ring->id = ctx->user_id; |
158 | ring->head = ring->tail = 0; | 166 | ring->head = ring->tail = 0; |
@@ -160,38 +168,47 @@ static int aio_setup_ring(struct kioctx *ctx) | |||
160 | ring->compat_features = AIO_RING_COMPAT_FEATURES; | 168 | ring->compat_features = AIO_RING_COMPAT_FEATURES; |
161 | ring->incompat_features = AIO_RING_INCOMPAT_FEATURES; | 169 | ring->incompat_features = AIO_RING_INCOMPAT_FEATURES; |
162 | ring->header_length = sizeof(struct aio_ring); | 170 | ring->header_length = sizeof(struct aio_ring); |
163 | kunmap_atomic(ring); | 171 | kunmap_atomic(ring, KM_USER0); |
164 | 172 | ||
165 | return 0; | 173 | return 0; |
166 | } | 174 | } |
167 | 175 | ||
168 | 176 | ||
169 | /* aio_ring_event: returns a pointer to the event at the given index from | 177 | /* aio_ring_event: returns a pointer to the event at the given index from |
170 | * kmap_atomic(). Release the pointer with put_aio_ring_event(); | 178 | * kmap_atomic(, km). Release the pointer with put_aio_ring_event(); |
171 | */ | 179 | */ |
172 | #define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event)) | 180 | #define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event)) |
173 | #define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event)) | 181 | #define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event)) |
174 | #define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE) | 182 | #define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE) |
175 | 183 | ||
176 | #define aio_ring_event(info, nr) ({ \ | 184 | #define aio_ring_event(info, nr, km) ({ \ |
177 | unsigned pos = (nr) + AIO_EVENTS_OFFSET; \ | 185 | unsigned pos = (nr) + AIO_EVENTS_OFFSET; \ |
178 | struct io_event *__event; \ | 186 | struct io_event *__event; \ |
179 | __event = kmap_atomic( \ | 187 | __event = kmap_atomic( \ |
180 | (info)->ring_pages[pos / AIO_EVENTS_PER_PAGE]); \ | 188 | (info)->ring_pages[pos / AIO_EVENTS_PER_PAGE], km); \ |
181 | __event += pos % AIO_EVENTS_PER_PAGE; \ | 189 | __event += pos % AIO_EVENTS_PER_PAGE; \ |
182 | __event; \ | 190 | __event; \ |
183 | }) | 191 | }) |
184 | 192 | ||
185 | #define put_aio_ring_event(event) do { \ | 193 | #define put_aio_ring_event(event, km) do { \ |
186 | struct io_event *__event = (event); \ | 194 | struct io_event *__event = (event); \ |
187 | (void)__event; \ | 195 | (void)__event; \ |
188 | kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK)); \ | 196 | kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK), km); \ |
189 | } while(0) | 197 | } while(0) |
190 | 198 | ||
191 | static void ctx_rcu_free(struct rcu_head *head) | 199 | static void ctx_rcu_free(struct rcu_head *head) |
192 | { | 200 | { |
193 | struct kioctx *ctx = container_of(head, struct kioctx, rcu_head); | 201 | struct kioctx *ctx = container_of(head, struct kioctx, rcu_head); |
202 | unsigned nr_events = ctx->max_reqs; | ||
203 | |||
194 | kmem_cache_free(kioctx_cachep, ctx); | 204 | kmem_cache_free(kioctx_cachep, ctx); |
205 | |||
206 | if (nr_events) { | ||
207 | spin_lock(&aio_nr_lock); | ||
208 | BUG_ON(aio_nr - nr_events > aio_nr); | ||
209 | aio_nr -= nr_events; | ||
210 | spin_unlock(&aio_nr_lock); | ||
211 | } | ||
195 | } | 212 | } |
196 | 213 | ||
197 | /* __put_ioctx | 214 | /* __put_ioctx |
@@ -200,23 +217,23 @@ static void ctx_rcu_free(struct rcu_head *head) | |||
200 | */ | 217 | */ |
201 | static void __put_ioctx(struct kioctx *ctx) | 218 | static void __put_ioctx(struct kioctx *ctx) |
202 | { | 219 | { |
203 | unsigned nr_events = ctx->max_reqs; | ||
204 | BUG_ON(ctx->reqs_active); | 220 | BUG_ON(ctx->reqs_active); |
205 | 221 | ||
206 | cancel_delayed_work_sync(&ctx->wq); | 222 | cancel_delayed_work(&ctx->wq); |
223 | cancel_work_sync(&ctx->wq.work); | ||
207 | aio_free_ring(ctx); | 224 | aio_free_ring(ctx); |
208 | mmdrop(ctx->mm); | 225 | mmdrop(ctx->mm); |
209 | ctx->mm = NULL; | 226 | ctx->mm = NULL; |
210 | if (nr_events) { | ||
211 | spin_lock(&aio_nr_lock); | ||
212 | BUG_ON(aio_nr - nr_events > aio_nr); | ||
213 | aio_nr -= nr_events; | ||
214 | spin_unlock(&aio_nr_lock); | ||
215 | } | ||
216 | pr_debug("__put_ioctx: freeing %p\n", ctx); | 227 | pr_debug("__put_ioctx: freeing %p\n", ctx); |
217 | call_rcu(&ctx->rcu_head, ctx_rcu_free); | 228 | call_rcu(&ctx->rcu_head, ctx_rcu_free); |
218 | } | 229 | } |
219 | 230 | ||
231 | static inline void get_ioctx(struct kioctx *kioctx) | ||
232 | { | ||
233 | BUG_ON(atomic_read(&kioctx->users) <= 0); | ||
234 | atomic_inc(&kioctx->users); | ||
235 | } | ||
236 | |||
220 | static inline int try_get_ioctx(struct kioctx *kioctx) | 237 | static inline int try_get_ioctx(struct kioctx *kioctx) |
221 | { | 238 | { |
222 | return atomic_inc_not_zero(&kioctx->users); | 239 | return atomic_inc_not_zero(&kioctx->users); |
@@ -236,7 +253,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) | |||
236 | { | 253 | { |
237 | struct mm_struct *mm; | 254 | struct mm_struct *mm; |
238 | struct kioctx *ctx; | 255 | struct kioctx *ctx; |
239 | int err = -ENOMEM; | 256 | int did_sync = 0; |
240 | 257 | ||
241 | /* Prevent overflows */ | 258 | /* Prevent overflows */ |
242 | if ((nr_events > (0x10000000U / sizeof(struct io_event))) || | 259 | if ((nr_events > (0x10000000U / sizeof(struct io_event))) || |
@@ -245,7 +262,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) | |||
245 | return ERR_PTR(-EINVAL); | 262 | return ERR_PTR(-EINVAL); |
246 | } | 263 | } |
247 | 264 | ||
248 | if (!nr_events || (unsigned long)nr_events > aio_max_nr) | 265 | if ((unsigned long)nr_events > aio_max_nr) |
249 | return ERR_PTR(-EAGAIN); | 266 | return ERR_PTR(-EAGAIN); |
250 | 267 | ||
251 | ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL); | 268 | ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL); |
@@ -256,7 +273,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) | |||
256 | mm = ctx->mm = current->mm; | 273 | mm = ctx->mm = current->mm; |
257 | atomic_inc(&mm->mm_count); | 274 | atomic_inc(&mm->mm_count); |
258 | 275 | ||
259 | atomic_set(&ctx->users, 2); | 276 | atomic_set(&ctx->users, 1); |
260 | spin_lock_init(&ctx->ctx_lock); | 277 | spin_lock_init(&ctx->ctx_lock); |
261 | spin_lock_init(&ctx->ring_info.ring_lock); | 278 | spin_lock_init(&ctx->ring_info.ring_lock); |
262 | init_waitqueue_head(&ctx->wait); | 279 | init_waitqueue_head(&ctx->wait); |
@@ -269,14 +286,25 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) | |||
269 | goto out_freectx; | 286 | goto out_freectx; |
270 | 287 | ||
271 | /* limit the number of system wide aios */ | 288 | /* limit the number of system wide aios */ |
272 | spin_lock(&aio_nr_lock); | 289 | do { |
273 | if (aio_nr + nr_events > aio_max_nr || | 290 | spin_lock_bh(&aio_nr_lock); |
274 | aio_nr + nr_events < aio_nr) { | 291 | if (aio_nr + nr_events > aio_max_nr || |
275 | spin_unlock(&aio_nr_lock); | 292 | aio_nr + nr_events < aio_nr) |
293 | ctx->max_reqs = 0; | ||
294 | else | ||
295 | aio_nr += ctx->max_reqs; | ||
296 | spin_unlock_bh(&aio_nr_lock); | ||
297 | if (ctx->max_reqs || did_sync) | ||
298 | break; | ||
299 | |||
300 | /* wait for rcu callbacks to have completed before giving up */ | ||
301 | synchronize_rcu(); | ||
302 | did_sync = 1; | ||
303 | ctx->max_reqs = nr_events; | ||
304 | } while (1); | ||
305 | |||
306 | if (ctx->max_reqs == 0) | ||
276 | goto out_cleanup; | 307 | goto out_cleanup; |
277 | } | ||
278 | aio_nr += ctx->max_reqs; | ||
279 | spin_unlock(&aio_nr_lock); | ||
280 | 308 | ||
281 | /* now link into global list. */ | 309 | /* now link into global list. */ |
282 | spin_lock(&mm->ioctx_lock); | 310 | spin_lock(&mm->ioctx_lock); |
@@ -288,27 +316,27 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) | |||
288 | return ctx; | 316 | return ctx; |
289 | 317 | ||
290 | out_cleanup: | 318 | out_cleanup: |
291 | err = -EAGAIN; | 319 | __put_ioctx(ctx); |
292 | aio_free_ring(ctx); | 320 | return ERR_PTR(-EAGAIN); |
321 | |||
293 | out_freectx: | 322 | out_freectx: |
294 | mmdrop(mm); | 323 | mmdrop(mm); |
295 | kmem_cache_free(kioctx_cachep, ctx); | 324 | kmem_cache_free(kioctx_cachep, ctx); |
296 | dprintk("aio: error allocating ioctx %d\n", err); | 325 | ctx = ERR_PTR(-ENOMEM); |
297 | return ERR_PTR(err); | 326 | |
327 | dprintk("aio: error allocating ioctx %p\n", ctx); | ||
328 | return ctx; | ||
298 | } | 329 | } |
299 | 330 | ||
300 | /* kill_ctx | 331 | /* aio_cancel_all |
301 | * Cancels all outstanding aio requests on an aio context. Used | 332 | * Cancels all outstanding aio requests on an aio context. Used |
302 | * when the processes owning a context have all exited to encourage | 333 | * when the processes owning a context have all exited to encourage |
303 | * the rapid destruction of the kioctx. | 334 | * the rapid destruction of the kioctx. |
304 | */ | 335 | */ |
305 | static void kill_ctx(struct kioctx *ctx) | 336 | static void aio_cancel_all(struct kioctx *ctx) |
306 | { | 337 | { |
307 | int (*cancel)(struct kiocb *, struct io_event *); | 338 | int (*cancel)(struct kiocb *, struct io_event *); |
308 | struct task_struct *tsk = current; | ||
309 | DECLARE_WAITQUEUE(wait, tsk); | ||
310 | struct io_event res; | 339 | struct io_event res; |
311 | |||
312 | spin_lock_irq(&ctx->ctx_lock); | 340 | spin_lock_irq(&ctx->ctx_lock); |
313 | ctx->dead = 1; | 341 | ctx->dead = 1; |
314 | while (!list_empty(&ctx->active_reqs)) { | 342 | while (!list_empty(&ctx->active_reqs)) { |
@@ -324,7 +352,15 @@ static void kill_ctx(struct kioctx *ctx) | |||
324 | spin_lock_irq(&ctx->ctx_lock); | 352 | spin_lock_irq(&ctx->ctx_lock); |
325 | } | 353 | } |
326 | } | 354 | } |
355 | spin_unlock_irq(&ctx->ctx_lock); | ||
356 | } | ||
357 | |||
358 | static void wait_for_all_aios(struct kioctx *ctx) | ||
359 | { | ||
360 | struct task_struct *tsk = current; | ||
361 | DECLARE_WAITQUEUE(wait, tsk); | ||
327 | 362 | ||
363 | spin_lock_irq(&ctx->ctx_lock); | ||
328 | if (!ctx->reqs_active) | 364 | if (!ctx->reqs_active) |
329 | goto out; | 365 | goto out; |
330 | 366 | ||
@@ -374,24 +410,19 @@ void exit_aio(struct mm_struct *mm) | |||
374 | ctx = hlist_entry(mm->ioctx_list.first, struct kioctx, list); | 410 | ctx = hlist_entry(mm->ioctx_list.first, struct kioctx, list); |
375 | hlist_del_rcu(&ctx->list); | 411 | hlist_del_rcu(&ctx->list); |
376 | 412 | ||
377 | kill_ctx(ctx); | 413 | aio_cancel_all(ctx); |
414 | |||
415 | wait_for_all_aios(ctx); | ||
416 | /* | ||
417 | * Ensure we don't leave the ctx on the aio_wq | ||
418 | */ | ||
419 | cancel_work_sync(&ctx->wq.work); | ||
378 | 420 | ||
379 | if (1 != atomic_read(&ctx->users)) | 421 | if (1 != atomic_read(&ctx->users)) |
380 | printk(KERN_DEBUG | 422 | printk(KERN_DEBUG |
381 | "exit_aio:ioctx still alive: %d %d %d\n", | 423 | "exit_aio:ioctx still alive: %d %d %d\n", |
382 | atomic_read(&ctx->users), ctx->dead, | 424 | atomic_read(&ctx->users), ctx->dead, |
383 | ctx->reqs_active); | 425 | ctx->reqs_active); |
384 | /* | ||
385 | * We don't need to bother with munmap() here - | ||
386 | * exit_mmap(mm) is coming and it'll unmap everything. | ||
387 | * Since aio_free_ring() uses non-zero ->mmap_size | ||
388 | * as indicator that it needs to unmap the area, | ||
389 | * just set it to 0; aio_free_ring() is the only | ||
390 | * place that uses ->mmap_size, so it's safe. | ||
391 | * That way we get all munmap done to current->mm - | ||
392 | * all other callers have ctx->mm == current->mm. | ||
393 | */ | ||
394 | ctx->ring_info.mmap_size = 0; | ||
395 | put_ioctx(ctx); | 426 | put_ioctx(ctx); |
396 | } | 427 | } |
397 | } | 428 | } |
@@ -409,6 +440,8 @@ void exit_aio(struct mm_struct *mm) | |||
409 | static struct kiocb *__aio_get_req(struct kioctx *ctx) | 440 | static struct kiocb *__aio_get_req(struct kioctx *ctx) |
410 | { | 441 | { |
411 | struct kiocb *req = NULL; | 442 | struct kiocb *req = NULL; |
443 | struct aio_ring *ring; | ||
444 | int okay = 0; | ||
412 | 445 | ||
413 | req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL); | 446 | req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL); |
414 | if (unlikely(!req)) | 447 | if (unlikely(!req)) |
@@ -426,105 +459,39 @@ static struct kiocb *__aio_get_req(struct kioctx *ctx) | |||
426 | INIT_LIST_HEAD(&req->ki_run_list); | 459 | INIT_LIST_HEAD(&req->ki_run_list); |
427 | req->ki_eventfd = NULL; | 460 | req->ki_eventfd = NULL; |
428 | 461 | ||
429 | return req; | 462 | /* Check if the completion queue has enough free space to |
430 | } | 463 | * accept an event from this io. |
431 | 464 | */ | |
432 | /* | ||
433 | * struct kiocb's are allocated in batches to reduce the number of | ||
434 | * times the ctx lock is acquired and released. | ||
435 | */ | ||
436 | #define KIOCB_BATCH_SIZE 32L | ||
437 | struct kiocb_batch { | ||
438 | struct list_head head; | ||
439 | long count; /* number of requests left to allocate */ | ||
440 | }; | ||
441 | |||
442 | static void kiocb_batch_init(struct kiocb_batch *batch, long total) | ||
443 | { | ||
444 | INIT_LIST_HEAD(&batch->head); | ||
445 | batch->count = total; | ||
446 | } | ||
447 | |||
448 | static void kiocb_batch_free(struct kioctx *ctx, struct kiocb_batch *batch) | ||
449 | { | ||
450 | struct kiocb *req, *n; | ||
451 | |||
452 | if (list_empty(&batch->head)) | ||
453 | return; | ||
454 | |||
455 | spin_lock_irq(&ctx->ctx_lock); | ||
456 | list_for_each_entry_safe(req, n, &batch->head, ki_batch) { | ||
457 | list_del(&req->ki_batch); | ||
458 | list_del(&req->ki_list); | ||
459 | kmem_cache_free(kiocb_cachep, req); | ||
460 | ctx->reqs_active--; | ||
461 | } | ||
462 | if (unlikely(!ctx->reqs_active && ctx->dead)) | ||
463 | wake_up_all(&ctx->wait); | ||
464 | spin_unlock_irq(&ctx->ctx_lock); | ||
465 | } | ||
466 | |||
467 | /* | ||
468 | * Allocate a batch of kiocbs. This avoids taking and dropping the | ||
469 | * context lock a lot during setup. | ||
470 | */ | ||
471 | static int kiocb_batch_refill(struct kioctx *ctx, struct kiocb_batch *batch) | ||
472 | { | ||
473 | unsigned short allocated, to_alloc; | ||
474 | long avail; | ||
475 | struct kiocb *req, *n; | ||
476 | struct aio_ring *ring; | ||
477 | |||
478 | to_alloc = min(batch->count, KIOCB_BATCH_SIZE); | ||
479 | for (allocated = 0; allocated < to_alloc; allocated++) { | ||
480 | req = __aio_get_req(ctx); | ||
481 | if (!req) | ||
482 | /* allocation failed, go with what we've got */ | ||
483 | break; | ||
484 | list_add(&req->ki_batch, &batch->head); | ||
485 | } | ||
486 | |||
487 | if (allocated == 0) | ||
488 | goto out; | ||
489 | |||
490 | spin_lock_irq(&ctx->ctx_lock); | 465 | spin_lock_irq(&ctx->ctx_lock); |
491 | ring = kmap_atomic(ctx->ring_info.ring_pages[0]); | 466 | ring = kmap_atomic(ctx->ring_info.ring_pages[0], KM_USER0); |
492 | 467 | if (ctx->reqs_active < aio_ring_avail(&ctx->ring_info, ring)) { | |
493 | avail = aio_ring_avail(&ctx->ring_info, ring) - ctx->reqs_active; | ||
494 | BUG_ON(avail < 0); | ||
495 | if (avail < allocated) { | ||
496 | /* Trim back the number of requests. */ | ||
497 | list_for_each_entry_safe(req, n, &batch->head, ki_batch) { | ||
498 | list_del(&req->ki_batch); | ||
499 | kmem_cache_free(kiocb_cachep, req); | ||
500 | if (--allocated <= avail) | ||
501 | break; | ||
502 | } | ||
503 | } | ||
504 | |||
505 | batch->count -= allocated; | ||
506 | list_for_each_entry(req, &batch->head, ki_batch) { | ||
507 | list_add(&req->ki_list, &ctx->active_reqs); | 468 | list_add(&req->ki_list, &ctx->active_reqs); |
508 | ctx->reqs_active++; | 469 | ctx->reqs_active++; |
470 | okay = 1; | ||
509 | } | 471 | } |
510 | 472 | kunmap_atomic(ring, KM_USER0); | |
511 | kunmap_atomic(ring); | ||
512 | spin_unlock_irq(&ctx->ctx_lock); | 473 | spin_unlock_irq(&ctx->ctx_lock); |
513 | 474 | ||
514 | out: | 475 | if (!okay) { |
515 | return allocated; | 476 | kmem_cache_free(kiocb_cachep, req); |
477 | req = NULL; | ||
478 | } | ||
479 | |||
480 | return req; | ||
516 | } | 481 | } |
517 | 482 | ||
518 | static inline struct kiocb *aio_get_req(struct kioctx *ctx, | 483 | static inline struct kiocb *aio_get_req(struct kioctx *ctx) |
519 | struct kiocb_batch *batch) | ||
520 | { | 484 | { |
521 | struct kiocb *req; | 485 | struct kiocb *req; |
522 | 486 | /* Handle a potential starvation case -- should be exceedingly rare as | |
523 | if (list_empty(&batch->head)) | 487 | * requests will be stuck on fput_head only if the aio_fput_routine is |
524 | if (kiocb_batch_refill(ctx, batch) == 0) | 488 | * delayed and the requests were the last user of the struct file. |
525 | return NULL; | 489 | */ |
526 | req = list_first_entry(&batch->head, struct kiocb, ki_batch); | 490 | req = __aio_get_req(ctx); |
527 | list_del(&req->ki_batch); | 491 | if (unlikely(NULL == req)) { |
492 | aio_fput_routine(NULL); | ||
493 | req = __aio_get_req(ctx); | ||
494 | } | ||
528 | return req; | 495 | return req; |
529 | } | 496 | } |
530 | 497 | ||
@@ -545,6 +512,31 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req) | |||
545 | wake_up_all(&ctx->wait); | 512 | wake_up_all(&ctx->wait); |
546 | } | 513 | } |
547 | 514 | ||
515 | static void aio_fput_routine(struct work_struct *data) | ||
516 | { | ||
517 | spin_lock_irq(&fput_lock); | ||
518 | while (likely(!list_empty(&fput_head))) { | ||
519 | struct kiocb *req = list_kiocb(fput_head.next); | ||
520 | struct kioctx *ctx = req->ki_ctx; | ||
521 | |||
522 | list_del(&req->ki_list); | ||
523 | spin_unlock_irq(&fput_lock); | ||
524 | |||
525 | /* Complete the fput(s) */ | ||
526 | if (req->ki_filp != NULL) | ||
527 | fput(req->ki_filp); | ||
528 | |||
529 | /* Link the iocb into the context's free list */ | ||
530 | spin_lock_irq(&ctx->ctx_lock); | ||
531 | really_put_req(ctx, req); | ||
532 | spin_unlock_irq(&ctx->ctx_lock); | ||
533 | |||
534 | put_ioctx(ctx); | ||
535 | spin_lock_irq(&fput_lock); | ||
536 | } | ||
537 | spin_unlock_irq(&fput_lock); | ||
538 | } | ||
539 | |||
548 | /* __aio_put_req | 540 | /* __aio_put_req |
549 | * Returns true if this put was the last user of the request. | 541 | * Returns true if this put was the last user of the request. |
550 | */ | 542 | */ |
@@ -563,9 +555,22 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req) | |||
563 | req->ki_cancel = NULL; | 555 | req->ki_cancel = NULL; |
564 | req->ki_retry = NULL; | 556 | req->ki_retry = NULL; |
565 | 557 | ||
566 | fput(req->ki_filp); | 558 | /* |
567 | req->ki_filp = NULL; | 559 | * Try to optimize the aio and eventfd file* puts, by avoiding to |
568 | really_put_req(ctx, req); | 560 | * schedule work in case it is not final fput() time. In normal cases, |
561 | * we would not be holding the last reference to the file*, so | ||
562 | * this function will be executed w/out any aio kthread wakeup. | ||
563 | */ | ||
564 | if (unlikely(!fput_atomic(req->ki_filp))) { | ||
565 | get_ioctx(ctx); | ||
566 | spin_lock(&fput_lock); | ||
567 | list_add(&req->ki_list, &fput_head); | ||
568 | spin_unlock(&fput_lock); | ||
569 | schedule_work(&fput_work); | ||
570 | } else { | ||
571 | req->ki_filp = NULL; | ||
572 | really_put_req(ctx, req); | ||
573 | } | ||
569 | return 1; | 574 | return 1; |
570 | } | 575 | } |
571 | 576 | ||
@@ -835,7 +840,7 @@ static void aio_kick_handler(struct work_struct *work) | |||
835 | unuse_mm(mm); | 840 | unuse_mm(mm); |
836 | set_fs(oldfs); | 841 | set_fs(oldfs); |
837 | /* | 842 | /* |
838 | * we're in a worker thread already; no point using non-zero delay | 843 | * we're in a worker thread already, don't use queue_delayed_work, |
839 | */ | 844 | */ |
840 | if (requeue) | 845 | if (requeue) |
841 | queue_delayed_work(aio_wq, &ctx->wq, 0); | 846 | queue_delayed_work(aio_wq, &ctx->wq, 0); |
@@ -934,10 +939,10 @@ int aio_complete(struct kiocb *iocb, long res, long res2) | |||
934 | if (kiocbIsCancelled(iocb)) | 939 | if (kiocbIsCancelled(iocb)) |
935 | goto put_rq; | 940 | goto put_rq; |
936 | 941 | ||
937 | ring = kmap_atomic(info->ring_pages[0]); | 942 | ring = kmap_atomic(info->ring_pages[0], KM_IRQ1); |
938 | 943 | ||
939 | tail = info->tail; | 944 | tail = info->tail; |
940 | event = aio_ring_event(info, tail); | 945 | event = aio_ring_event(info, tail, KM_IRQ0); |
941 | if (++tail >= info->nr) | 946 | if (++tail >= info->nr) |
942 | tail = 0; | 947 | tail = 0; |
943 | 948 | ||
@@ -958,8 +963,8 @@ int aio_complete(struct kiocb *iocb, long res, long res2) | |||
958 | info->tail = tail; | 963 | info->tail = tail; |
959 | ring->tail = tail; | 964 | ring->tail = tail; |
960 | 965 | ||
961 | put_aio_ring_event(event); | 966 | put_aio_ring_event(event, KM_IRQ0); |
962 | kunmap_atomic(ring); | 967 | kunmap_atomic(ring, KM_IRQ1); |
963 | 968 | ||
964 | pr_debug("added to ring %p at [%lu]\n", iocb, tail); | 969 | pr_debug("added to ring %p at [%lu]\n", iocb, tail); |
965 | 970 | ||
@@ -1004,7 +1009,7 @@ static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent) | |||
1004 | unsigned long head; | 1009 | unsigned long head; |
1005 | int ret = 0; | 1010 | int ret = 0; |
1006 | 1011 | ||
1007 | ring = kmap_atomic(info->ring_pages[0]); | 1012 | ring = kmap_atomic(info->ring_pages[0], KM_USER0); |
1008 | dprintk("in aio_read_evt h%lu t%lu m%lu\n", | 1013 | dprintk("in aio_read_evt h%lu t%lu m%lu\n", |
1009 | (unsigned long)ring->head, (unsigned long)ring->tail, | 1014 | (unsigned long)ring->head, (unsigned long)ring->tail, |
1010 | (unsigned long)ring->nr); | 1015 | (unsigned long)ring->nr); |
@@ -1016,18 +1021,18 @@ static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent) | |||
1016 | 1021 | ||
1017 | head = ring->head % info->nr; | 1022 | head = ring->head % info->nr; |
1018 | if (head != ring->tail) { | 1023 | if (head != ring->tail) { |
1019 | struct io_event *evp = aio_ring_event(info, head); | 1024 | struct io_event *evp = aio_ring_event(info, head, KM_USER1); |
1020 | *ent = *evp; | 1025 | *ent = *evp; |
1021 | head = (head + 1) % info->nr; | 1026 | head = (head + 1) % info->nr; |
1022 | smp_mb(); /* finish reading the event before updatng the head */ | 1027 | smp_mb(); /* finish reading the event before updatng the head */ |
1023 | ring->head = head; | 1028 | ring->head = head; |
1024 | ret = 1; | 1029 | ret = 1; |
1025 | put_aio_ring_event(evp); | 1030 | put_aio_ring_event(evp, KM_USER1); |
1026 | } | 1031 | } |
1027 | spin_unlock(&info->ring_lock); | 1032 | spin_unlock(&info->ring_lock); |
1028 | 1033 | ||
1029 | out: | 1034 | out: |
1030 | kunmap_atomic(ring); | 1035 | kunmap_atomic(ring, KM_USER0); |
1031 | dprintk("leaving aio_read_evt: %d h%lu t%lu\n", ret, | 1036 | dprintk("leaving aio_read_evt: %d h%lu t%lu\n", ret, |
1032 | (unsigned long)ring->head, (unsigned long)ring->tail); | 1037 | (unsigned long)ring->head, (unsigned long)ring->tail); |
1033 | return ret; | 1038 | return ret; |
@@ -1205,7 +1210,8 @@ static void io_destroy(struct kioctx *ioctx) | |||
1205 | if (likely(!was_dead)) | 1210 | if (likely(!was_dead)) |
1206 | put_ioctx(ioctx); /* twice for the list */ | 1211 | put_ioctx(ioctx); /* twice for the list */ |
1207 | 1212 | ||
1208 | kill_ctx(ioctx); | 1213 | aio_cancel_all(ioctx); |
1214 | wait_for_all_aios(ioctx); | ||
1209 | 1215 | ||
1210 | /* | 1216 | /* |
1211 | * Wake up any waiters. The setting of ctx->dead must be seen | 1217 | * Wake up any waiters. The setting of ctx->dead must be seen |
@@ -1213,6 +1219,7 @@ static void io_destroy(struct kioctx *ioctx) | |||
1213 | * locking done by the above calls to ensure this consistency. | 1219 | * locking done by the above calls to ensure this consistency. |
1214 | */ | 1220 | */ |
1215 | wake_up_all(&ioctx->wait); | 1221 | wake_up_all(&ioctx->wait); |
1222 | put_ioctx(ioctx); /* once for the lookup */ | ||
1216 | } | 1223 | } |
1217 | 1224 | ||
1218 | /* sys_io_setup: | 1225 | /* sys_io_setup: |
@@ -1249,9 +1256,11 @@ SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp) | |||
1249 | ret = PTR_ERR(ioctx); | 1256 | ret = PTR_ERR(ioctx); |
1250 | if (!IS_ERR(ioctx)) { | 1257 | if (!IS_ERR(ioctx)) { |
1251 | ret = put_user(ioctx->user_id, ctxp); | 1258 | ret = put_user(ioctx->user_id, ctxp); |
1252 | if (ret) | 1259 | if (!ret) |
1253 | io_destroy(ioctx); | 1260 | return 0; |
1254 | put_ioctx(ioctx); | 1261 | |
1262 | get_ioctx(ioctx); /* io_destroy() expects us to hold a ref */ | ||
1263 | io_destroy(ioctx); | ||
1255 | } | 1264 | } |
1256 | 1265 | ||
1257 | out: | 1266 | out: |
@@ -1269,7 +1278,6 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx) | |||
1269 | struct kioctx *ioctx = lookup_ioctx(ctx); | 1278 | struct kioctx *ioctx = lookup_ioctx(ctx); |
1270 | if (likely(NULL != ioctx)) { | 1279 | if (likely(NULL != ioctx)) { |
1271 | io_destroy(ioctx); | 1280 | io_destroy(ioctx); |
1272 | put_ioctx(ioctx); | ||
1273 | return 0; | 1281 | return 0; |
1274 | } | 1282 | } |
1275 | pr_debug("EINVAL: io_destroy: invalid context id\n"); | 1283 | pr_debug("EINVAL: io_destroy: invalid context id\n"); |
@@ -1389,10 +1397,6 @@ static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat) | |||
1389 | if (ret < 0) | 1397 | if (ret < 0) |
1390 | goto out; | 1398 | goto out; |
1391 | 1399 | ||
1392 | ret = rw_verify_area(type, kiocb->ki_filp, &kiocb->ki_pos, ret); | ||
1393 | if (ret < 0) | ||
1394 | goto out; | ||
1395 | |||
1396 | kiocb->ki_nr_segs = kiocb->ki_nbytes; | 1400 | kiocb->ki_nr_segs = kiocb->ki_nbytes; |
1397 | kiocb->ki_cur_seg = 0; | 1401 | kiocb->ki_cur_seg = 0; |
1398 | /* ki_nbytes/left now reflect bytes instead of segs */ | 1402 | /* ki_nbytes/left now reflect bytes instead of segs */ |
@@ -1404,17 +1408,11 @@ out: | |||
1404 | return ret; | 1408 | return ret; |
1405 | } | 1409 | } |
1406 | 1410 | ||
1407 | static ssize_t aio_setup_single_vector(int type, struct file * file, struct kiocb *kiocb) | 1411 | static ssize_t aio_setup_single_vector(struct kiocb *kiocb) |
1408 | { | 1412 | { |
1409 | int bytes; | ||
1410 | |||
1411 | bytes = rw_verify_area(type, file, &kiocb->ki_pos, kiocb->ki_left); | ||
1412 | if (bytes < 0) | ||
1413 | return bytes; | ||
1414 | |||
1415 | kiocb->ki_iovec = &kiocb->ki_inline_vec; | 1413 | kiocb->ki_iovec = &kiocb->ki_inline_vec; |
1416 | kiocb->ki_iovec->iov_base = kiocb->ki_buf; | 1414 | kiocb->ki_iovec->iov_base = kiocb->ki_buf; |
1417 | kiocb->ki_iovec->iov_len = bytes; | 1415 | kiocb->ki_iovec->iov_len = kiocb->ki_left; |
1418 | kiocb->ki_nr_segs = 1; | 1416 | kiocb->ki_nr_segs = 1; |
1419 | kiocb->ki_cur_seg = 0; | 1417 | kiocb->ki_cur_seg = 0; |
1420 | return 0; | 1418 | return 0; |
@@ -1439,7 +1437,10 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat) | |||
1439 | if (unlikely(!access_ok(VERIFY_WRITE, kiocb->ki_buf, | 1437 | if (unlikely(!access_ok(VERIFY_WRITE, kiocb->ki_buf, |
1440 | kiocb->ki_left))) | 1438 | kiocb->ki_left))) |
1441 | break; | 1439 | break; |
1442 | ret = aio_setup_single_vector(READ, file, kiocb); | 1440 | ret = security_file_permission(file, MAY_READ); |
1441 | if (unlikely(ret)) | ||
1442 | break; | ||
1443 | ret = aio_setup_single_vector(kiocb); | ||
1443 | if (ret) | 1444 | if (ret) |
1444 | break; | 1445 | break; |
1445 | ret = -EINVAL; | 1446 | ret = -EINVAL; |
@@ -1454,7 +1455,10 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat) | |||
1454 | if (unlikely(!access_ok(VERIFY_READ, kiocb->ki_buf, | 1455 | if (unlikely(!access_ok(VERIFY_READ, kiocb->ki_buf, |
1455 | kiocb->ki_left))) | 1456 | kiocb->ki_left))) |
1456 | break; | 1457 | break; |
1457 | ret = aio_setup_single_vector(WRITE, file, kiocb); | 1458 | ret = security_file_permission(file, MAY_WRITE); |
1459 | if (unlikely(ret)) | ||
1460 | break; | ||
1461 | ret = aio_setup_single_vector(kiocb); | ||
1458 | if (ret) | 1462 | if (ret) |
1459 | break; | 1463 | break; |
1460 | ret = -EINVAL; | 1464 | ret = -EINVAL; |
@@ -1465,6 +1469,9 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat) | |||
1465 | ret = -EBADF; | 1469 | ret = -EBADF; |
1466 | if (unlikely(!(file->f_mode & FMODE_READ))) | 1470 | if (unlikely(!(file->f_mode & FMODE_READ))) |
1467 | break; | 1471 | break; |
1472 | ret = security_file_permission(file, MAY_READ); | ||
1473 | if (unlikely(ret)) | ||
1474 | break; | ||
1468 | ret = aio_setup_vectored_rw(READ, kiocb, compat); | 1475 | ret = aio_setup_vectored_rw(READ, kiocb, compat); |
1469 | if (ret) | 1476 | if (ret) |
1470 | break; | 1477 | break; |
@@ -1476,6 +1483,9 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat) | |||
1476 | ret = -EBADF; | 1483 | ret = -EBADF; |
1477 | if (unlikely(!(file->f_mode & FMODE_WRITE))) | 1484 | if (unlikely(!(file->f_mode & FMODE_WRITE))) |
1478 | break; | 1485 | break; |
1486 | ret = security_file_permission(file, MAY_WRITE); | ||
1487 | if (unlikely(ret)) | ||
1488 | break; | ||
1479 | ret = aio_setup_vectored_rw(WRITE, kiocb, compat); | 1489 | ret = aio_setup_vectored_rw(WRITE, kiocb, compat); |
1480 | if (ret) | 1490 | if (ret) |
1481 | break; | 1491 | break; |
@@ -1505,8 +1515,7 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat) | |||
1505 | } | 1515 | } |
1506 | 1516 | ||
1507 | static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, | 1517 | static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, |
1508 | struct iocb *iocb, struct kiocb_batch *batch, | 1518 | struct iocb *iocb, bool compat) |
1509 | bool compat) | ||
1510 | { | 1519 | { |
1511 | struct kiocb *req; | 1520 | struct kiocb *req; |
1512 | struct file *file; | 1521 | struct file *file; |
@@ -1532,7 +1541,7 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, | |||
1532 | if (unlikely(!file)) | 1541 | if (unlikely(!file)) |
1533 | return -EBADF; | 1542 | return -EBADF; |
1534 | 1543 | ||
1535 | req = aio_get_req(ctx, batch); /* returns with 2 references to req */ | 1544 | req = aio_get_req(ctx); /* returns with 2 references to req */ |
1536 | if (unlikely(!req)) { | 1545 | if (unlikely(!req)) { |
1537 | fput(file); | 1546 | fput(file); |
1538 | return -EAGAIN; | 1547 | return -EAGAIN; |
@@ -1612,9 +1621,8 @@ long do_io_submit(aio_context_t ctx_id, long nr, | |||
1612 | { | 1621 | { |
1613 | struct kioctx *ctx; | 1622 | struct kioctx *ctx; |
1614 | long ret = 0; | 1623 | long ret = 0; |
1615 | int i = 0; | 1624 | int i; |
1616 | struct blk_plug plug; | 1625 | struct blk_plug plug; |
1617 | struct kiocb_batch batch; | ||
1618 | 1626 | ||
1619 | if (unlikely(nr < 0)) | 1627 | if (unlikely(nr < 0)) |
1620 | return -EINVAL; | 1628 | return -EINVAL; |
@@ -1631,8 +1639,6 @@ long do_io_submit(aio_context_t ctx_id, long nr, | |||
1631 | return -EINVAL; | 1639 | return -EINVAL; |
1632 | } | 1640 | } |
1633 | 1641 | ||
1634 | kiocb_batch_init(&batch, nr); | ||
1635 | |||
1636 | blk_start_plug(&plug); | 1642 | blk_start_plug(&plug); |
1637 | 1643 | ||
1638 | /* | 1644 | /* |
@@ -1653,13 +1659,12 @@ long do_io_submit(aio_context_t ctx_id, long nr, | |||
1653 | break; | 1659 | break; |
1654 | } | 1660 | } |
1655 | 1661 | ||
1656 | ret = io_submit_one(ctx, user_iocb, &tmp, &batch, compat); | 1662 | ret = io_submit_one(ctx, user_iocb, &tmp, compat); |
1657 | if (ret) | 1663 | if (ret) |
1658 | break; | 1664 | break; |
1659 | } | 1665 | } |
1660 | blk_finish_plug(&plug); | 1666 | blk_finish_plug(&plug); |
1661 | 1667 | ||
1662 | kiocb_batch_free(ctx, &batch); | ||
1663 | put_ioctx(ctx); | 1668 | put_ioctx(ctx); |
1664 | return i ? i : ret; | 1669 | return i ? i : ret; |
1665 | } | 1670 | } |