aboutsummaryrefslogtreecommitdiffstats
path: root/fs/aio.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/aio.c')
-rw-r--r--fs/aio.c51
1 files changed, 16 insertions, 35 deletions
diff --git a/fs/aio.c b/fs/aio.c
index 94766599db00..ee20fc4240e0 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -47,19 +47,19 @@ unsigned long aio_nr; /* current system wide number of aio requests */
47unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */ 47unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */
48/*----end sysctl variables---*/ 48/*----end sysctl variables---*/
49 49
50static kmem_cache_t *kiocb_cachep; 50static struct kmem_cache *kiocb_cachep;
51static kmem_cache_t *kioctx_cachep; 51static struct kmem_cache *kioctx_cachep;
52 52
53static struct workqueue_struct *aio_wq; 53static struct workqueue_struct *aio_wq;
54 54
55/* Used for rare fput completion. */ 55/* Used for rare fput completion. */
56static void aio_fput_routine(void *); 56static void aio_fput_routine(struct work_struct *);
57static DECLARE_WORK(fput_work, aio_fput_routine, NULL); 57static DECLARE_WORK(fput_work, aio_fput_routine);
58 58
59static DEFINE_SPINLOCK(fput_lock); 59static DEFINE_SPINLOCK(fput_lock);
60static LIST_HEAD(fput_head); 60static LIST_HEAD(fput_head);
61 61
62static void aio_kick_handler(void *); 62static void aio_kick_handler(struct work_struct *);
63static void aio_queue_work(struct kioctx *); 63static void aio_queue_work(struct kioctx *);
64 64
65/* aio_setup 65/* aio_setup
@@ -227,7 +227,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
227 227
228 INIT_LIST_HEAD(&ctx->active_reqs); 228 INIT_LIST_HEAD(&ctx->active_reqs);
229 INIT_LIST_HEAD(&ctx->run_list); 229 INIT_LIST_HEAD(&ctx->run_list);
230 INIT_WORK(&ctx->wq, aio_kick_handler, ctx); 230 INIT_DELAYED_WORK(&ctx->wq, aio_kick_handler);
231 231
232 if (aio_setup_ring(ctx) < 0) 232 if (aio_setup_ring(ctx) < 0)
233 goto out_freectx; 233 goto out_freectx;
@@ -367,8 +367,7 @@ void fastcall __put_ioctx(struct kioctx *ctx)
367{ 367{
368 unsigned nr_events = ctx->max_reqs; 368 unsigned nr_events = ctx->max_reqs;
369 369
370 if (unlikely(ctx->reqs_active)) 370 BUG_ON(ctx->reqs_active);
371 BUG();
372 371
373 cancel_delayed_work(&ctx->wq); 372 cancel_delayed_work(&ctx->wq);
374 flush_workqueue(aio_wq); 373 flush_workqueue(aio_wq);
@@ -470,7 +469,7 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
470 wake_up(&ctx->wait); 469 wake_up(&ctx->wait);
471} 470}
472 471
473static void aio_fput_routine(void *data) 472static void aio_fput_routine(struct work_struct *data)
474{ 473{
475 spin_lock_irq(&fput_lock); 474 spin_lock_irq(&fput_lock);
476 while (likely(!list_empty(&fput_head))) { 475 while (likely(!list_empty(&fput_head))) {
@@ -505,8 +504,7 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
505 assert_spin_locked(&ctx->ctx_lock); 504 assert_spin_locked(&ctx->ctx_lock);
506 505
507 req->ki_users --; 506 req->ki_users --;
508 if (unlikely(req->ki_users < 0)) 507 BUG_ON(req->ki_users < 0);
509 BUG();
510 if (likely(req->ki_users)) 508 if (likely(req->ki_users))
511 return 0; 509 return 0;
512 list_del(&req->ki_list); /* remove from active_reqs */ 510 list_del(&req->ki_list); /* remove from active_reqs */
@@ -588,7 +586,7 @@ static void use_mm(struct mm_struct *mm)
588 * Note that on UML this *requires* PF_BORROWED_MM to be set, otherwise 586 * Note that on UML this *requires* PF_BORROWED_MM to be set, otherwise
589 * it won't work. Update it accordingly if you change it here 587 * it won't work. Update it accordingly if you change it here
590 */ 588 */
591 activate_mm(active_mm, mm); 589 switch_mm(active_mm, mm, tsk);
592 task_unlock(tsk); 590 task_unlock(tsk);
593 591
594 mmdrop(active_mm); 592 mmdrop(active_mm);
@@ -601,9 +599,6 @@ static void use_mm(struct mm_struct *mm)
601 * by the calling kernel thread 599 * by the calling kernel thread
602 * (Note: this routine is intended to be called only 600 * (Note: this routine is intended to be called only
603 * from a kernel thread context) 601 * from a kernel thread context)
604 *
605 * Comments: Called with ctx->ctx_lock held. This nests
606 * task_lock instead ctx_lock.
607 */ 602 */
608static void unuse_mm(struct mm_struct *mm) 603static void unuse_mm(struct mm_struct *mm)
609{ 604{
@@ -668,17 +663,6 @@ static ssize_t aio_run_iocb(struct kiocb *iocb)
668 ssize_t (*retry)(struct kiocb *); 663 ssize_t (*retry)(struct kiocb *);
669 ssize_t ret; 664 ssize_t ret;
670 665
671 if (iocb->ki_retried++ > 1024*1024) {
672 printk("Maximal retry count. Bytes done %Zd\n",
673 iocb->ki_nbytes - iocb->ki_left);
674 return -EAGAIN;
675 }
676
677 if (!(iocb->ki_retried & 0xff)) {
678 pr_debug("%ld retry: %zd of %zd\n", iocb->ki_retried,
679 iocb->ki_nbytes - iocb->ki_left, iocb->ki_nbytes);
680 }
681
682 if (!(retry = iocb->ki_retry)) { 666 if (!(retry = iocb->ki_retry)) {
683 printk("aio_run_iocb: iocb->ki_retry = NULL\n"); 667 printk("aio_run_iocb: iocb->ki_retry = NULL\n");
684 return 0; 668 return 0;
@@ -859,24 +843,26 @@ static inline void aio_run_all_iocbs(struct kioctx *ctx)
859 * space. 843 * space.
860 * Run on aiod's context. 844 * Run on aiod's context.
861 */ 845 */
862static void aio_kick_handler(void *data) 846static void aio_kick_handler(struct work_struct *work)
863{ 847{
864 struct kioctx *ctx = data; 848 struct kioctx *ctx = container_of(work, struct kioctx, wq.work);
865 mm_segment_t oldfs = get_fs(); 849 mm_segment_t oldfs = get_fs();
850 struct mm_struct *mm;
866 int requeue; 851 int requeue;
867 852
868 set_fs(USER_DS); 853 set_fs(USER_DS);
869 use_mm(ctx->mm); 854 use_mm(ctx->mm);
870 spin_lock_irq(&ctx->ctx_lock); 855 spin_lock_irq(&ctx->ctx_lock);
871 requeue =__aio_run_iocbs(ctx); 856 requeue =__aio_run_iocbs(ctx);
872 unuse_mm(ctx->mm); 857 mm = ctx->mm;
873 spin_unlock_irq(&ctx->ctx_lock); 858 spin_unlock_irq(&ctx->ctx_lock);
859 unuse_mm(mm);
874 set_fs(oldfs); 860 set_fs(oldfs);
875 /* 861 /*
876 * we're in a worker thread already, don't use queue_delayed_work, 862 * we're in a worker thread already, don't use queue_delayed_work,
877 */ 863 */
878 if (requeue) 864 if (requeue)
879 queue_work(aio_wq, &ctx->wq); 865 queue_delayed_work(aio_wq, &ctx->wq, 0);
880} 866}
881 867
882 868
@@ -1007,9 +993,6 @@ int fastcall aio_complete(struct kiocb *iocb, long res, long res2)
1007 kunmap_atomic(ring, KM_IRQ1); 993 kunmap_atomic(ring, KM_IRQ1);
1008 994
1009 pr_debug("added to ring %p at [%lu]\n", iocb, tail); 995 pr_debug("added to ring %p at [%lu]\n", iocb, tail);
1010
1011 pr_debug("%ld retries: %zd of %zd\n", iocb->ki_retried,
1012 iocb->ki_nbytes - iocb->ki_left, iocb->ki_nbytes);
1013put_rq: 996put_rq:
1014 /* everything turned out well, dispose of the aiocb. */ 997 /* everything turned out well, dispose of the aiocb. */
1015 ret = __aio_put_req(ctx, iocb); 998 ret = __aio_put_req(ctx, iocb);
@@ -1415,7 +1398,6 @@ static ssize_t aio_setup_single_vector(struct kiocb *kiocb)
1415 kiocb->ki_iovec->iov_len = kiocb->ki_left; 1398 kiocb->ki_iovec->iov_len = kiocb->ki_left;
1416 kiocb->ki_nr_segs = 1; 1399 kiocb->ki_nr_segs = 1;
1417 kiocb->ki_cur_seg = 0; 1400 kiocb->ki_cur_seg = 0;
1418 kiocb->ki_nbytes = kiocb->ki_left;
1419 return 0; 1401 return 0;
1420} 1402}
1421 1403
@@ -1593,7 +1575,6 @@ int fastcall io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1593 req->ki_opcode = iocb->aio_lio_opcode; 1575 req->ki_opcode = iocb->aio_lio_opcode;
1594 init_waitqueue_func_entry(&req->ki_wait, aio_wake_function); 1576 init_waitqueue_func_entry(&req->ki_wait, aio_wake_function);
1595 INIT_LIST_HEAD(&req->ki_wait.task_list); 1577 INIT_LIST_HEAD(&req->ki_wait.task_list);
1596 req->ki_retried = 0;
1597 1578
1598 ret = aio_setup_iocb(req); 1579 ret = aio_setup_iocb(req);
1599 1580