aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKent Overstreet <koverstreet@google.com>2013-05-07 19:18:47 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-05-07 21:38:29 -0400
commit21b40200cfe961b1428a529c63c33b1f1e1b4738 (patch)
tree377ec268cbd783868d8308b5d98e01f3cf14d0f5
parenta31ad380bed817aa25f8830ad23e1a0480fef797 (diff)
aio: use flush_dcache_page()
This wasn't causing problems before because it's not needed on x86, but it is needed on other architectures. Signed-off-by: Kent Overstreet <koverstreet@google.com> Cc: Zach Brown <zab@redhat.com> Cc: Felipe Balbi <balbi@ti.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Mark Fasheh <mfasheh@suse.com> Cc: Joel Becker <jlbec@evilplan.org> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Jens Axboe <axboe@kernel.dk> Cc: Asai Thambi S P <asamymuthupa@micron.com> Cc: Selvan Mani <smani@micron.com> Cc: Sam Bradshaw <sbradshaw@micron.com> Cc: Jeff Moyer <jmoyer@redhat.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Benjamin LaHaise <bcrl@kvack.org> Cc: Theodore Ts'o <tytso@mit.edu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--fs/aio.c45
1 files changed, 17 insertions, 28 deletions
diff --git a/fs/aio.c b/fs/aio.c
index 3f348a45e23b..aea060d8c1e8 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -211,33 +211,15 @@ static int aio_setup_ring(struct kioctx *ctx)
211 ring->incompat_features = AIO_RING_INCOMPAT_FEATURES; 211 ring->incompat_features = AIO_RING_INCOMPAT_FEATURES;
212 ring->header_length = sizeof(struct aio_ring); 212 ring->header_length = sizeof(struct aio_ring);
213 kunmap_atomic(ring); 213 kunmap_atomic(ring);
214 flush_dcache_page(info->ring_pages[0]);
214 215
215 return 0; 216 return 0;
216} 217}
217 218
218
219/* aio_ring_event: returns a pointer to the event at the given index from
220 * kmap_atomic(). Release the pointer with put_aio_ring_event();
221 */
222#define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event)) 219#define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event))
223#define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event)) 220#define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event))
224#define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE) 221#define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE)
225 222
226#define aio_ring_event(info, nr) ({ \
227 unsigned pos = (nr) + AIO_EVENTS_OFFSET; \
228 struct io_event *__event; \
229 __event = kmap_atomic( \
230 (info)->ring_pages[pos / AIO_EVENTS_PER_PAGE]); \
231 __event += pos % AIO_EVENTS_PER_PAGE; \
232 __event; \
233})
234
235#define put_aio_ring_event(event) do { \
236 struct io_event *__event = (event); \
237 (void)__event; \
238 kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK)); \
239} while(0)
240
241static int kiocb_cancel(struct kioctx *ctx, struct kiocb *kiocb, 223static int kiocb_cancel(struct kioctx *ctx, struct kiocb *kiocb,
242 struct io_event *res) 224 struct io_event *res)
243{ 225{
@@ -649,9 +631,9 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
649 struct kioctx *ctx = iocb->ki_ctx; 631 struct kioctx *ctx = iocb->ki_ctx;
650 struct aio_ring_info *info; 632 struct aio_ring_info *info;
651 struct aio_ring *ring; 633 struct aio_ring *ring;
652 struct io_event *event; 634 struct io_event *ev_page, *event;
653 unsigned long flags; 635 unsigned long flags;
654 unsigned long tail; 636 unsigned tail, pos;
655 637
656 /* 638 /*
657 * Special case handling for sync iocbs: 639 * Special case handling for sync iocbs:
@@ -690,19 +672,24 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
690 if (kiocbIsCancelled(iocb)) 672 if (kiocbIsCancelled(iocb))
691 goto put_rq; 673 goto put_rq;
692 674
693 ring = kmap_atomic(info->ring_pages[0]);
694
695 tail = info->tail; 675 tail = info->tail;
696 event = aio_ring_event(info, tail); 676 pos = tail + AIO_EVENTS_OFFSET;
677
697 if (++tail >= info->nr) 678 if (++tail >= info->nr)
698 tail = 0; 679 tail = 0;
699 680
681 ev_page = kmap_atomic(info->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
682 event = ev_page + pos % AIO_EVENTS_PER_PAGE;
683
700 event->obj = (u64)(unsigned long)iocb->ki_obj.user; 684 event->obj = (u64)(unsigned long)iocb->ki_obj.user;
701 event->data = iocb->ki_user_data; 685 event->data = iocb->ki_user_data;
702 event->res = res; 686 event->res = res;
703 event->res2 = res2; 687 event->res2 = res2;
704 688
705 pr_debug("%p[%lu]: %p: %p %Lx %lx %lx\n", 689 kunmap_atomic(ev_page);
690 flush_dcache_page(info->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
691
692 pr_debug("%p[%u]: %p: %p %Lx %lx %lx\n",
706 ctx, tail, iocb, iocb->ki_obj.user, iocb->ki_user_data, 693 ctx, tail, iocb, iocb->ki_obj.user, iocb->ki_user_data,
707 res, res2); 694 res, res2);
708 695
@@ -712,12 +699,13 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
712 smp_wmb(); /* make event visible before updating tail */ 699 smp_wmb(); /* make event visible before updating tail */
713 700
714 info->tail = tail; 701 info->tail = tail;
715 ring->tail = tail;
716 702
717 put_aio_ring_event(event); 703 ring = kmap_atomic(info->ring_pages[0]);
704 ring->tail = tail;
718 kunmap_atomic(ring); 705 kunmap_atomic(ring);
706 flush_dcache_page(info->ring_pages[0]);
719 707
720 pr_debug("added to ring %p at [%lu]\n", iocb, tail); 708 pr_debug("added to ring %p at [%u]\n", iocb, tail);
721 709
722 /* 710 /*
723 * Check if the user asked us to deliver the result through an 711 * Check if the user asked us to deliver the result through an
@@ -807,6 +795,7 @@ static long aio_read_events_ring(struct kioctx *ctx,
807 ring = kmap_atomic(info->ring_pages[0]); 795 ring = kmap_atomic(info->ring_pages[0]);
808 ring->head = head; 796 ring->head = head;
809 kunmap_atomic(ring); 797 kunmap_atomic(ring);
798 flush_dcache_page(info->ring_pages[0]);
810 799
811 pr_debug("%li h%u t%u\n", ret, head, info->tail); 800 pr_debug("%li h%u t%u\n", ret, head, info->tail);
812out: 801out: