diff options
| -rw-r--r-- | fs/aio.c | 42 |
1 files changed, 23 insertions, 19 deletions
| @@ -406,7 +406,8 @@ static void free_ioctx(struct work_struct *work) | |||
| 406 | struct kioctx *ctx = container_of(work, struct kioctx, free_work); | 406 | struct kioctx *ctx = container_of(work, struct kioctx, free_work); |
| 407 | struct aio_ring *ring; | 407 | struct aio_ring *ring; |
| 408 | struct kiocb *req; | 408 | struct kiocb *req; |
| 409 | unsigned cpu, head, avail; | 409 | unsigned cpu, avail; |
| 410 | DEFINE_WAIT(wait); | ||
| 410 | 411 | ||
| 411 | spin_lock_irq(&ctx->ctx_lock); | 412 | spin_lock_irq(&ctx->ctx_lock); |
| 412 | 413 | ||
| @@ -427,22 +428,24 @@ static void free_ioctx(struct work_struct *work) | |||
| 427 | kcpu->reqs_available = 0; | 428 | kcpu->reqs_available = 0; |
| 428 | } | 429 | } |
| 429 | 430 | ||
| 430 | ring = kmap_atomic(ctx->ring_pages[0]); | 431 | while (1) { |
| 431 | head = ring->head; | 432 | prepare_to_wait(&ctx->wait, &wait, TASK_UNINTERRUPTIBLE); |
| 432 | kunmap_atomic(ring); | ||
| 433 | 433 | ||
| 434 | while (atomic_read(&ctx->reqs_available) < ctx->nr_events - 1) { | 434 | ring = kmap_atomic(ctx->ring_pages[0]); |
| 435 | wait_event(ctx->wait, | 435 | avail = (ring->head <= ring->tail) |
| 436 | (head != ctx->tail) || | 436 | ? ring->tail - ring->head |
| 437 | (atomic_read(&ctx->reqs_available) >= | 437 | : ctx->nr_events - ring->head + ring->tail; |
| 438 | ctx->nr_events - 1)); | ||
| 439 | |||
| 440 | avail = (head <= ctx->tail ? ctx->tail : ctx->nr_events) - head; | ||
| 441 | 438 | ||
| 442 | atomic_add(avail, &ctx->reqs_available); | 439 | atomic_add(avail, &ctx->reqs_available); |
| 443 | head += avail; | 440 | ring->head = ring->tail; |
| 444 | head %= ctx->nr_events; | 441 | kunmap_atomic(ring); |
| 442 | |||
| 443 | if (atomic_read(&ctx->reqs_available) >= ctx->nr_events - 1) | ||
| 444 | break; | ||
| 445 | |||
| 446 | schedule(); | ||
| 445 | } | 447 | } |
| 448 | finish_wait(&ctx->wait, &wait); | ||
| 446 | 449 | ||
| 447 | WARN_ON(atomic_read(&ctx->reqs_available) > ctx->nr_events - 1); | 450 | WARN_ON(atomic_read(&ctx->reqs_available) > ctx->nr_events - 1); |
| 448 | 451 | ||
| @@ -869,7 +872,7 @@ static long aio_read_events_ring(struct kioctx *ctx, | |||
| 869 | struct io_event __user *event, long nr) | 872 | struct io_event __user *event, long nr) |
| 870 | { | 873 | { |
| 871 | struct aio_ring *ring; | 874 | struct aio_ring *ring; |
| 872 | unsigned head, pos; | 875 | unsigned head, tail, pos; |
| 873 | long ret = 0; | 876 | long ret = 0; |
| 874 | int copy_ret; | 877 | int copy_ret; |
| 875 | 878 | ||
| @@ -877,11 +880,12 @@ static long aio_read_events_ring(struct kioctx *ctx, | |||
| 877 | 880 | ||
| 878 | ring = kmap_atomic(ctx->ring_pages[0]); | 881 | ring = kmap_atomic(ctx->ring_pages[0]); |
| 879 | head = ring->head; | 882 | head = ring->head; |
| 883 | tail = ring->tail; | ||
| 880 | kunmap_atomic(ring); | 884 | kunmap_atomic(ring); |
| 881 | 885 | ||
| 882 | pr_debug("h%u t%u m%u\n", head, ctx->tail, ctx->nr_events); | 886 | pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events); |
| 883 | 887 | ||
| 884 | if (head == ctx->tail) | 888 | if (head == tail) |
| 885 | goto out; | 889 | goto out; |
| 886 | 890 | ||
| 887 | while (ret < nr) { | 891 | while (ret < nr) { |
| @@ -889,8 +893,8 @@ static long aio_read_events_ring(struct kioctx *ctx, | |||
| 889 | struct io_event *ev; | 893 | struct io_event *ev; |
| 890 | struct page *page; | 894 | struct page *page; |
| 891 | 895 | ||
| 892 | avail = (head <= ctx->tail ? ctx->tail : ctx->nr_events) - head; | 896 | avail = (head <= tail ? tail : ctx->nr_events) - head; |
| 893 | if (head == ctx->tail) | 897 | if (head == tail) |
| 894 | break; | 898 | break; |
| 895 | 899 | ||
| 896 | avail = min(avail, nr - ret); | 900 | avail = min(avail, nr - ret); |
| @@ -921,7 +925,7 @@ static long aio_read_events_ring(struct kioctx *ctx, | |||
| 921 | kunmap_atomic(ring); | 925 | kunmap_atomic(ring); |
| 922 | flush_dcache_page(ctx->ring_pages[0]); | 926 | flush_dcache_page(ctx->ring_pages[0]); |
| 923 | 927 | ||
| 924 | pr_debug("%li h%u t%u\n", ret, head, ctx->tail); | 928 | pr_debug("%li h%u t%u\n", ret, head, tail); |
| 925 | 929 | ||
| 926 | put_reqs_available(ctx, ret); | 930 | put_reqs_available(ctx, ret); |
| 927 | out: | 931 | out: |
