diff options
Diffstat (limited to 'fs/aio.c')
-rw-r--r-- | fs/aio.c | 59 |
1 files changed, 16 insertions, 43 deletions
@@ -40,9 +40,6 @@ | |||
40 | #define dprintk(x...) do { ; } while (0) | 40 | #define dprintk(x...) do { ; } while (0) |
41 | #endif | 41 | #endif |
42 | 42 | ||
43 | static long aio_run = 0; /* for testing only */ | ||
44 | static long aio_wakeups = 0; /* for testing only */ | ||
45 | |||
46 | /*------ sysctl variables----*/ | 43 | /*------ sysctl variables----*/ |
47 | atomic_t aio_nr = ATOMIC_INIT(0); /* current system wide number of aio requests */ | 44 | atomic_t aio_nr = ATOMIC_INIT(0); /* current system wide number of aio requests */ |
48 | unsigned aio_max_nr = 0x10000; /* system wide maximum number of aio requests */ | 45 | unsigned aio_max_nr = 0x10000; /* system wide maximum number of aio requests */ |
@@ -405,7 +402,6 @@ static struct kiocb fastcall *__aio_get_req(struct kioctx *ctx) | |||
405 | req->ki_ctx = ctx; | 402 | req->ki_ctx = ctx; |
406 | req->ki_cancel = NULL; | 403 | req->ki_cancel = NULL; |
407 | req->ki_retry = NULL; | 404 | req->ki_retry = NULL; |
408 | req->ki_obj.user = NULL; | ||
409 | req->ki_dtor = NULL; | 405 | req->ki_dtor = NULL; |
410 | req->private = NULL; | 406 | req->private = NULL; |
411 | INIT_LIST_HEAD(&req->ki_run_list); | 407 | INIT_LIST_HEAD(&req->ki_run_list); |
@@ -451,11 +447,6 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req) | |||
451 | { | 447 | { |
452 | if (req->ki_dtor) | 448 | if (req->ki_dtor) |
453 | req->ki_dtor(req); | 449 | req->ki_dtor(req); |
454 | req->ki_ctx = NULL; | ||
455 | req->ki_filp = NULL; | ||
456 | req->ki_obj.user = NULL; | ||
457 | req->ki_dtor = NULL; | ||
458 | req->private = NULL; | ||
459 | kmem_cache_free(kiocb_cachep, req); | 450 | kmem_cache_free(kiocb_cachep, req); |
460 | ctx->reqs_active--; | 451 | ctx->reqs_active--; |
461 | 452 | ||
@@ -623,7 +614,6 @@ static inline int __queue_kicked_iocb(struct kiocb *iocb) | |||
623 | if (list_empty(&iocb->ki_run_list)) { | 614 | if (list_empty(&iocb->ki_run_list)) { |
624 | list_add_tail(&iocb->ki_run_list, | 615 | list_add_tail(&iocb->ki_run_list, |
625 | &ctx->run_list); | 616 | &ctx->run_list); |
626 | iocb->ki_queued++; | ||
627 | return 1; | 617 | return 1; |
628 | } | 618 | } |
629 | return 0; | 619 | return 0; |
@@ -664,10 +654,8 @@ static ssize_t aio_run_iocb(struct kiocb *iocb) | |||
664 | } | 654 | } |
665 | 655 | ||
666 | if (!(iocb->ki_retried & 0xff)) { | 656 | if (!(iocb->ki_retried & 0xff)) { |
667 | pr_debug("%ld retry: %d of %d (kick %ld, Q %ld run %ld, wake %ld)\n", | 657 | pr_debug("%ld retry: %d of %d\n", iocb->ki_retried, |
668 | iocb->ki_retried, | 658 | iocb->ki_nbytes - iocb->ki_left, iocb->ki_nbytes); |
669 | iocb->ki_nbytes - iocb->ki_left, iocb->ki_nbytes, | ||
670 | iocb->ki_kicked, iocb->ki_queued, aio_run, aio_wakeups); | ||
671 | } | 659 | } |
672 | 660 | ||
673 | if (!(retry = iocb->ki_retry)) { | 661 | if (!(retry = iocb->ki_retry)) { |
@@ -774,7 +762,6 @@ out: | |||
774 | static int __aio_run_iocbs(struct kioctx *ctx) | 762 | static int __aio_run_iocbs(struct kioctx *ctx) |
775 | { | 763 | { |
776 | struct kiocb *iocb; | 764 | struct kiocb *iocb; |
777 | int count = 0; | ||
778 | LIST_HEAD(run_list); | 765 | LIST_HEAD(run_list); |
779 | 766 | ||
780 | list_splice_init(&ctx->run_list, &run_list); | 767 | list_splice_init(&ctx->run_list, &run_list); |
@@ -789,9 +776,7 @@ static int __aio_run_iocbs(struct kioctx *ctx) | |||
789 | aio_run_iocb(iocb); | 776 | aio_run_iocb(iocb); |
790 | if (__aio_put_req(ctx, iocb)) /* drop extra ref */ | 777 | if (__aio_put_req(ctx, iocb)) /* drop extra ref */ |
791 | put_ioctx(ctx); | 778 | put_ioctx(ctx); |
792 | count++; | ||
793 | } | 779 | } |
794 | aio_run++; | ||
795 | if (!list_empty(&ctx->run_list)) | 780 | if (!list_empty(&ctx->run_list)) |
796 | return 1; | 781 | return 1; |
797 | return 0; | 782 | return 0; |
@@ -890,10 +875,8 @@ static void queue_kicked_iocb(struct kiocb *iocb) | |||
890 | spin_lock_irqsave(&ctx->ctx_lock, flags); | 875 | spin_lock_irqsave(&ctx->ctx_lock, flags); |
891 | run = __queue_kicked_iocb(iocb); | 876 | run = __queue_kicked_iocb(iocb); |
892 | spin_unlock_irqrestore(&ctx->ctx_lock, flags); | 877 | spin_unlock_irqrestore(&ctx->ctx_lock, flags); |
893 | if (run) { | 878 | if (run) |
894 | aio_queue_work(ctx); | 879 | aio_queue_work(ctx); |
895 | aio_wakeups++; | ||
896 | } | ||
897 | } | 880 | } |
898 | 881 | ||
899 | /* | 882 | /* |
@@ -913,7 +896,6 @@ void fastcall kick_iocb(struct kiocb *iocb) | |||
913 | return; | 896 | return; |
914 | } | 897 | } |
915 | 898 | ||
916 | iocb->ki_kicked++; | ||
917 | /* If its already kicked we shouldn't queue it again */ | 899 | /* If its already kicked we shouldn't queue it again */ |
918 | if (!kiocbTryKick(iocb)) { | 900 | if (!kiocbTryKick(iocb)) { |
919 | queue_kicked_iocb(iocb); | 901 | queue_kicked_iocb(iocb); |
@@ -984,7 +966,8 @@ int fastcall aio_complete(struct kiocb *iocb, long res, long res2) | |||
984 | 966 | ||
985 | tail = info->tail; | 967 | tail = info->tail; |
986 | event = aio_ring_event(info, tail, KM_IRQ0); | 968 | event = aio_ring_event(info, tail, KM_IRQ0); |
987 | tail = (tail + 1) % info->nr; | 969 | if (++tail >= info->nr) |
970 | tail = 0; | ||
988 | 971 | ||
989 | event->obj = (u64)(unsigned long)iocb->ki_obj.user; | 972 | event->obj = (u64)(unsigned long)iocb->ki_obj.user; |
990 | event->data = iocb->ki_user_data; | 973 | event->data = iocb->ki_user_data; |
@@ -1008,10 +991,8 @@ int fastcall aio_complete(struct kiocb *iocb, long res, long res2) | |||
1008 | 991 | ||
1009 | pr_debug("added to ring %p at [%lu]\n", iocb, tail); | 992 | pr_debug("added to ring %p at [%lu]\n", iocb, tail); |
1010 | 993 | ||
1011 | pr_debug("%ld retries: %d of %d (kicked %ld, Q %ld run %ld wake %ld)\n", | 994 | pr_debug("%ld retries: %d of %d\n", iocb->ki_retried, |
1012 | iocb->ki_retried, | 995 | iocb->ki_nbytes - iocb->ki_left, iocb->ki_nbytes); |
1013 | iocb->ki_nbytes - iocb->ki_left, iocb->ki_nbytes, | ||
1014 | iocb->ki_kicked, iocb->ki_queued, aio_run, aio_wakeups); | ||
1015 | put_rq: | 996 | put_rq: |
1016 | /* everything turned out well, dispose of the aiocb. */ | 997 | /* everything turned out well, dispose of the aiocb. */ |
1017 | ret = __aio_put_req(ctx, iocb); | 998 | ret = __aio_put_req(ctx, iocb); |
@@ -1119,7 +1100,6 @@ static int read_events(struct kioctx *ctx, | |||
1119 | int i = 0; | 1100 | int i = 0; |
1120 | struct io_event ent; | 1101 | struct io_event ent; |
1121 | struct aio_timeout to; | 1102 | struct aio_timeout to; |
1122 | int event_loop = 0; /* testing only */ | ||
1123 | int retry = 0; | 1103 | int retry = 0; |
1124 | 1104 | ||
1125 | /* needed to zero any padding within an entry (there shouldn't be | 1105 | /* needed to zero any padding within an entry (there shouldn't be |
@@ -1186,7 +1166,6 @@ retry: | |||
1186 | if (to.timed_out) /* Only check after read evt */ | 1166 | if (to.timed_out) /* Only check after read evt */ |
1187 | break; | 1167 | break; |
1188 | schedule(); | 1168 | schedule(); |
1189 | event_loop++; | ||
1190 | if (signal_pending(tsk)) { | 1169 | if (signal_pending(tsk)) { |
1191 | ret = -EINTR; | 1170 | ret = -EINTR; |
1192 | break; | 1171 | break; |
@@ -1214,9 +1193,6 @@ retry: | |||
1214 | if (timeout) | 1193 | if (timeout) |
1215 | clear_timeout(&to); | 1194 | clear_timeout(&to); |
1216 | out: | 1195 | out: |
1217 | pr_debug("event loop executed %d times\n", event_loop); | ||
1218 | pr_debug("aio_run %ld\n", aio_run); | ||
1219 | pr_debug("aio_wakeups %ld\n", aio_wakeups); | ||
1220 | return i ? i : ret; | 1196 | return i ? i : ret; |
1221 | } | 1197 | } |
1222 | 1198 | ||
@@ -1515,8 +1491,7 @@ int fastcall io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, | |||
1515 | } | 1491 | } |
1516 | 1492 | ||
1517 | req->ki_filp = file; | 1493 | req->ki_filp = file; |
1518 | iocb->aio_key = req->ki_key; | 1494 | ret = put_user(req->ki_key, &user_iocb->aio_key); |
1519 | ret = put_user(iocb->aio_key, &user_iocb->aio_key); | ||
1520 | if (unlikely(ret)) { | 1495 | if (unlikely(ret)) { |
1521 | dprintk("EFAULT: aio_key\n"); | 1496 | dprintk("EFAULT: aio_key\n"); |
1522 | goto out_put_req; | 1497 | goto out_put_req; |
@@ -1531,13 +1506,7 @@ int fastcall io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, | |||
1531 | req->ki_opcode = iocb->aio_lio_opcode; | 1506 | req->ki_opcode = iocb->aio_lio_opcode; |
1532 | init_waitqueue_func_entry(&req->ki_wait, aio_wake_function); | 1507 | init_waitqueue_func_entry(&req->ki_wait, aio_wake_function); |
1533 | INIT_LIST_HEAD(&req->ki_wait.task_list); | 1508 | INIT_LIST_HEAD(&req->ki_wait.task_list); |
1534 | req->ki_run_list.next = req->ki_run_list.prev = NULL; | ||
1535 | req->ki_retry = NULL; | ||
1536 | req->ki_retried = 0; | 1509 | req->ki_retried = 0; |
1537 | req->ki_kicked = 0; | ||
1538 | req->ki_queued = 0; | ||
1539 | aio_run = 0; | ||
1540 | aio_wakeups = 0; | ||
1541 | 1510 | ||
1542 | ret = aio_setup_iocb(req); | 1511 | ret = aio_setup_iocb(req); |
1543 | 1512 | ||
@@ -1545,10 +1514,14 @@ int fastcall io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, | |||
1545 | goto out_put_req; | 1514 | goto out_put_req; |
1546 | 1515 | ||
1547 | spin_lock_irq(&ctx->ctx_lock); | 1516 | spin_lock_irq(&ctx->ctx_lock); |
1548 | list_add_tail(&req->ki_run_list, &ctx->run_list); | 1517 | if (likely(list_empty(&ctx->run_list))) { |
1549 | /* drain the run list */ | 1518 | aio_run_iocb(req); |
1550 | while (__aio_run_iocbs(ctx)) | 1519 | } else { |
1551 | ; | 1520 | list_add_tail(&req->ki_run_list, &ctx->run_list); |
1521 | /* drain the run list */ | ||
1522 | while (__aio_run_iocbs(ctx)) | ||
1523 | ; | ||
1524 | } | ||
1552 | spin_unlock_irq(&ctx->ctx_lock); | 1525 | spin_unlock_irq(&ctx->ctx_lock); |
1553 | aio_put_req(req); /* drop extra ref to req */ | 1526 | aio_put_req(req); /* drop extra ref to req */ |
1554 | return 0; | 1527 | return 0; |