diff options
Diffstat (limited to 'fs/aio.c')
-rw-r--r-- | fs/aio.c | 13 |
1 files changed, 11 insertions, 2 deletions
@@ -830,16 +830,20 @@ void exit_aio(struct mm_struct *mm) | |||
830 | static void put_reqs_available(struct kioctx *ctx, unsigned nr) | 830 | static void put_reqs_available(struct kioctx *ctx, unsigned nr) |
831 | { | 831 | { |
832 | struct kioctx_cpu *kcpu; | 832 | struct kioctx_cpu *kcpu; |
833 | unsigned long flags; | ||
833 | 834 | ||
834 | preempt_disable(); | 835 | preempt_disable(); |
835 | kcpu = this_cpu_ptr(ctx->cpu); | 836 | kcpu = this_cpu_ptr(ctx->cpu); |
836 | 837 | ||
838 | local_irq_save(flags); | ||
837 | kcpu->reqs_available += nr; | 839 | kcpu->reqs_available += nr; |
840 | |||
838 | while (kcpu->reqs_available >= ctx->req_batch * 2) { | 841 | while (kcpu->reqs_available >= ctx->req_batch * 2) { |
839 | kcpu->reqs_available -= ctx->req_batch; | 842 | kcpu->reqs_available -= ctx->req_batch; |
840 | atomic_add(ctx->req_batch, &ctx->reqs_available); | 843 | atomic_add(ctx->req_batch, &ctx->reqs_available); |
841 | } | 844 | } |
842 | 845 | ||
846 | local_irq_restore(flags); | ||
843 | preempt_enable(); | 847 | preempt_enable(); |
844 | } | 848 | } |
845 | 849 | ||
@@ -847,10 +851,12 @@ static bool get_reqs_available(struct kioctx *ctx) | |||
847 | { | 851 | { |
848 | struct kioctx_cpu *kcpu; | 852 | struct kioctx_cpu *kcpu; |
849 | bool ret = false; | 853 | bool ret = false; |
854 | unsigned long flags; | ||
850 | 855 | ||
851 | preempt_disable(); | 856 | preempt_disable(); |
852 | kcpu = this_cpu_ptr(ctx->cpu); | 857 | kcpu = this_cpu_ptr(ctx->cpu); |
853 | 858 | ||
859 | local_irq_save(flags); | ||
854 | if (!kcpu->reqs_available) { | 860 | if (!kcpu->reqs_available) { |
855 | int old, avail = atomic_read(&ctx->reqs_available); | 861 | int old, avail = atomic_read(&ctx->reqs_available); |
856 | 862 | ||
@@ -869,6 +875,7 @@ static bool get_reqs_available(struct kioctx *ctx) | |||
869 | ret = true; | 875 | ret = true; |
870 | kcpu->reqs_available--; | 876 | kcpu->reqs_available--; |
871 | out: | 877 | out: |
878 | local_irq_restore(flags); | ||
872 | preempt_enable(); | 879 | preempt_enable(); |
873 | return ret; | 880 | return ret; |
874 | } | 881 | } |
@@ -1021,6 +1028,7 @@ void aio_complete(struct kiocb *iocb, long res, long res2) | |||
1021 | 1028 | ||
1022 | /* everything turned out well, dispose of the aiocb. */ | 1029 | /* everything turned out well, dispose of the aiocb. */ |
1023 | kiocb_free(iocb); | 1030 | kiocb_free(iocb); |
1031 | put_reqs_available(ctx, 1); | ||
1024 | 1032 | ||
1025 | /* | 1033 | /* |
1026 | * We have to order our ring_info tail store above and test | 1034 | * We have to order our ring_info tail store above and test |
@@ -1062,6 +1070,9 @@ static long aio_read_events_ring(struct kioctx *ctx, | |||
1062 | if (head == tail) | 1070 | if (head == tail) |
1063 | goto out; | 1071 | goto out; |
1064 | 1072 | ||
1073 | head %= ctx->nr_events; | ||
1074 | tail %= ctx->nr_events; | ||
1075 | |||
1065 | while (ret < nr) { | 1076 | while (ret < nr) { |
1066 | long avail; | 1077 | long avail; |
1067 | struct io_event *ev; | 1078 | struct io_event *ev; |
@@ -1100,8 +1111,6 @@ static long aio_read_events_ring(struct kioctx *ctx, | |||
1100 | flush_dcache_page(ctx->ring_pages[0]); | 1111 | flush_dcache_page(ctx->ring_pages[0]); |
1101 | 1112 | ||
1102 | pr_debug("%li h%u t%u\n", ret, head, tail); | 1113 | pr_debug("%li h%u t%u\n", ret, head, tail); |
1103 | |||
1104 | put_reqs_available(ctx, ret); | ||
1105 | out: | 1114 | out: |
1106 | mutex_unlock(&ctx->ring_lock); | 1115 | mutex_unlock(&ctx->ring_lock); |
1107 | 1116 | ||