diff options
-rw-r--r-- | drivers/usb/gadget/function/f_fs.c | 5 | ||||
-rw-r--r-- | drivers/usb/gadget/legacy/inode.c | 5 | ||||
-rw-r--r-- | fs/aio.c | 94 | ||||
-rw-r--r-- | fs/direct-io.c | 4 | ||||
-rw-r--r-- | fs/fuse/file.c | 2 | ||||
-rw-r--r-- | fs/nfs/direct.c | 2 | ||||
-rw-r--r-- | include/linux/aio.h | 46 |
7 files changed, 81 insertions, 77 deletions
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 175c9956cbe3..b64538b498dc 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c | |||
@@ -655,9 +655,10 @@ static void ffs_user_copy_worker(struct work_struct *work) | |||
655 | unuse_mm(io_data->mm); | 655 | unuse_mm(io_data->mm); |
656 | } | 656 | } |
657 | 657 | ||
658 | aio_complete(io_data->kiocb, ret, ret); | 658 | io_data->kiocb->ki_complete(io_data->kiocb, ret, ret); |
659 | 659 | ||
660 | if (io_data->ffs->ffs_eventfd && !io_data->kiocb->ki_eventfd) | 660 | if (io_data->ffs->ffs_eventfd && |
661 | !(io_data->kiocb->ki_flags & IOCB_EVENTFD)) | ||
661 | eventfd_signal(io_data->ffs->ffs_eventfd, 1); | 662 | eventfd_signal(io_data->ffs->ffs_eventfd, 1); |
662 | 663 | ||
663 | usb_ep_free_request(io_data->ep, io_data->req); | 664 | usb_ep_free_request(io_data->ep, io_data->req); |
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index 200f9a584064..a4a80694f607 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c | |||
@@ -469,7 +469,7 @@ static void ep_user_copy_worker(struct work_struct *work) | |||
469 | ret = -EFAULT; | 469 | ret = -EFAULT; |
470 | 470 | ||
471 | /* completing the iocb can drop the ctx and mm, don't touch mm after */ | 471 | /* completing the iocb can drop the ctx and mm, don't touch mm after */ |
472 | aio_complete(iocb, ret, ret); | 472 | iocb->ki_complete(iocb, ret, ret); |
473 | 473 | ||
474 | kfree(priv->buf); | 474 | kfree(priv->buf); |
475 | kfree(priv->to_free); | 475 | kfree(priv->to_free); |
@@ -497,7 +497,8 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req) | |||
497 | kfree(priv); | 497 | kfree(priv); |
498 | iocb->private = NULL; | 498 | iocb->private = NULL; |
499 | /* aio_complete() reports bytes-transferred _and_ faults */ | 499 | /* aio_complete() reports bytes-transferred _and_ faults */ |
500 | aio_complete(iocb, req->actual ? req->actual : req->status, | 500 | |
501 | iocb->ki_complete(iocb, req->actual ? req->actual : req->status, | ||
501 | req->status); | 502 | req->status); |
502 | } else { | 503 | } else { |
503 | /* ep_copy_to_user() won't report both; we hide some faults */ | 504 | /* ep_copy_to_user() won't report both; we hide some faults */ |
@@ -151,6 +151,38 @@ struct kioctx { | |||
151 | unsigned id; | 151 | unsigned id; |
152 | }; | 152 | }; |
153 | 153 | ||
154 | /* | ||
155 | * We use ki_cancel == KIOCB_CANCELLED to indicate that a kiocb has been either | ||
156 | * cancelled or completed (this makes a certain amount of sense because | ||
157 | * successful cancellation - io_cancel() - does deliver the completion to | ||
158 | * userspace). | ||
159 | * | ||
160 | * And since most things don't implement kiocb cancellation and we'd really like | ||
161 | * kiocb completion to be lockless when possible, we use ki_cancel to | ||
162 | * synchronize cancellation and completion - we only set it to KIOCB_CANCELLED | ||
163 | * with xchg() or cmpxchg(), see batch_complete_aio() and kiocb_cancel(). | ||
164 | */ | ||
165 | #define KIOCB_CANCELLED ((void *) (~0ULL)) | ||
166 | |||
167 | struct aio_kiocb { | ||
168 | struct kiocb common; | ||
169 | |||
170 | struct kioctx *ki_ctx; | ||
171 | kiocb_cancel_fn *ki_cancel; | ||
172 | |||
173 | struct iocb __user *ki_user_iocb; /* user's aiocb */ | ||
174 | __u64 ki_user_data; /* user's data for completion */ | ||
175 | |||
176 | struct list_head ki_list; /* the aio core uses this | ||
177 | * for cancellation */ | ||
178 | |||
179 | /* | ||
180 | * If the aio_resfd field of the userspace iocb is not zero, | ||
181 | * this is the underlying eventfd context to deliver events to. | ||
182 | */ | ||
183 | struct eventfd_ctx *ki_eventfd; | ||
184 | }; | ||
185 | |||
154 | /*------ sysctl variables----*/ | 186 | /*------ sysctl variables----*/ |
155 | static DEFINE_SPINLOCK(aio_nr_lock); | 187 | static DEFINE_SPINLOCK(aio_nr_lock); |
156 | unsigned long aio_nr; /* current system wide number of aio requests */ | 188 | unsigned long aio_nr; /* current system wide number of aio requests */ |
@@ -220,7 +252,7 @@ static int __init aio_setup(void) | |||
220 | if (IS_ERR(aio_mnt)) | 252 | if (IS_ERR(aio_mnt)) |
221 | panic("Failed to create aio fs mount."); | 253 | panic("Failed to create aio fs mount."); |
222 | 254 | ||
223 | kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC); | 255 | kiocb_cachep = KMEM_CACHE(aio_kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC); |
224 | kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC); | 256 | kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC); |
225 | 257 | ||
226 | pr_debug("sizeof(struct page) = %zu\n", sizeof(struct page)); | 258 | pr_debug("sizeof(struct page) = %zu\n", sizeof(struct page)); |
@@ -480,8 +512,9 @@ static int aio_setup_ring(struct kioctx *ctx) | |||
480 | #define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event)) | 512 | #define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event)) |
481 | #define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE) | 513 | #define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE) |
482 | 514 | ||
483 | void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel) | 515 | void kiocb_set_cancel_fn(struct kiocb *iocb, kiocb_cancel_fn *cancel) |
484 | { | 516 | { |
517 | struct aio_kiocb *req = container_of(iocb, struct aio_kiocb, common); | ||
485 | struct kioctx *ctx = req->ki_ctx; | 518 | struct kioctx *ctx = req->ki_ctx; |
486 | unsigned long flags; | 519 | unsigned long flags; |
487 | 520 | ||
@@ -496,7 +529,7 @@ void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel) | |||
496 | } | 529 | } |
497 | EXPORT_SYMBOL(kiocb_set_cancel_fn); | 530 | EXPORT_SYMBOL(kiocb_set_cancel_fn); |
498 | 531 | ||
499 | static int kiocb_cancel(struct kiocb *kiocb) | 532 | static int kiocb_cancel(struct aio_kiocb *kiocb) |
500 | { | 533 | { |
501 | kiocb_cancel_fn *old, *cancel; | 534 | kiocb_cancel_fn *old, *cancel; |
502 | 535 | ||
@@ -514,7 +547,7 @@ static int kiocb_cancel(struct kiocb *kiocb) | |||
514 | cancel = cmpxchg(&kiocb->ki_cancel, old, KIOCB_CANCELLED); | 547 | cancel = cmpxchg(&kiocb->ki_cancel, old, KIOCB_CANCELLED); |
515 | } while (cancel != old); | 548 | } while (cancel != old); |
516 | 549 | ||
517 | return cancel(kiocb); | 550 | return cancel(&kiocb->common); |
518 | } | 551 | } |
519 | 552 | ||
520 | static void free_ioctx(struct work_struct *work) | 553 | static void free_ioctx(struct work_struct *work) |
@@ -550,13 +583,13 @@ static void free_ioctx_reqs(struct percpu_ref *ref) | |||
550 | static void free_ioctx_users(struct percpu_ref *ref) | 583 | static void free_ioctx_users(struct percpu_ref *ref) |
551 | { | 584 | { |
552 | struct kioctx *ctx = container_of(ref, struct kioctx, users); | 585 | struct kioctx *ctx = container_of(ref, struct kioctx, users); |
553 | struct kiocb *req; | 586 | struct aio_kiocb *req; |
554 | 587 | ||
555 | spin_lock_irq(&ctx->ctx_lock); | 588 | spin_lock_irq(&ctx->ctx_lock); |
556 | 589 | ||
557 | while (!list_empty(&ctx->active_reqs)) { | 590 | while (!list_empty(&ctx->active_reqs)) { |
558 | req = list_first_entry(&ctx->active_reqs, | 591 | req = list_first_entry(&ctx->active_reqs, |
559 | struct kiocb, ki_list); | 592 | struct aio_kiocb, ki_list); |
560 | 593 | ||
561 | list_del_init(&req->ki_list); | 594 | list_del_init(&req->ki_list); |
562 | kiocb_cancel(req); | 595 | kiocb_cancel(req); |
@@ -932,9 +965,9 @@ static void user_refill_reqs_available(struct kioctx *ctx) | |||
932 | * Allocate a slot for an aio request. | 965 | * Allocate a slot for an aio request. |
933 | * Returns NULL if no requests are free. | 966 | * Returns NULL if no requests are free. |
934 | */ | 967 | */ |
935 | static inline struct kiocb *aio_get_req(struct kioctx *ctx) | 968 | static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx) |
936 | { | 969 | { |
937 | struct kiocb *req; | 970 | struct aio_kiocb *req; |
938 | 971 | ||
939 | if (!get_reqs_available(ctx)) { | 972 | if (!get_reqs_available(ctx)) { |
940 | user_refill_reqs_available(ctx); | 973 | user_refill_reqs_available(ctx); |
@@ -955,10 +988,10 @@ out_put: | |||
955 | return NULL; | 988 | return NULL; |
956 | } | 989 | } |
957 | 990 | ||
958 | static void kiocb_free(struct kiocb *req) | 991 | static void kiocb_free(struct aio_kiocb *req) |
959 | { | 992 | { |
960 | if (req->ki_filp) | 993 | if (req->common.ki_filp) |
961 | fput(req->ki_filp); | 994 | fput(req->common.ki_filp); |
962 | if (req->ki_eventfd != NULL) | 995 | if (req->ki_eventfd != NULL) |
963 | eventfd_ctx_put(req->ki_eventfd); | 996 | eventfd_ctx_put(req->ki_eventfd); |
964 | kmem_cache_free(kiocb_cachep, req); | 997 | kmem_cache_free(kiocb_cachep, req); |
@@ -994,8 +1027,9 @@ out: | |||
994 | /* aio_complete | 1027 | /* aio_complete |
995 | * Called when the io request on the given iocb is complete. | 1028 | * Called when the io request on the given iocb is complete. |
996 | */ | 1029 | */ |
997 | void aio_complete(struct kiocb *iocb, long res, long res2) | 1030 | static void aio_complete(struct kiocb *kiocb, long res, long res2) |
998 | { | 1031 | { |
1032 | struct aio_kiocb *iocb = container_of(kiocb, struct aio_kiocb, common); | ||
999 | struct kioctx *ctx = iocb->ki_ctx; | 1033 | struct kioctx *ctx = iocb->ki_ctx; |
1000 | struct aio_ring *ring; | 1034 | struct aio_ring *ring; |
1001 | struct io_event *ev_page, *event; | 1035 | struct io_event *ev_page, *event; |
@@ -1009,7 +1043,7 @@ void aio_complete(struct kiocb *iocb, long res, long res2) | |||
1009 | * ref, no other paths have a way to get another ref | 1043 | * ref, no other paths have a way to get another ref |
1010 | * - the sync task helpfully left a reference to itself in the iocb | 1044 | * - the sync task helpfully left a reference to itself in the iocb |
1011 | */ | 1045 | */ |
1012 | BUG_ON(is_sync_kiocb(iocb)); | 1046 | BUG_ON(is_sync_kiocb(kiocb)); |
1013 | 1047 | ||
1014 | if (iocb->ki_list.next) { | 1048 | if (iocb->ki_list.next) { |
1015 | unsigned long flags; | 1049 | unsigned long flags; |
@@ -1035,7 +1069,7 @@ void aio_complete(struct kiocb *iocb, long res, long res2) | |||
1035 | ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); | 1069 | ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); |
1036 | event = ev_page + pos % AIO_EVENTS_PER_PAGE; | 1070 | event = ev_page + pos % AIO_EVENTS_PER_PAGE; |
1037 | 1071 | ||
1038 | event->obj = (u64)(unsigned long)iocb->ki_obj.user; | 1072 | event->obj = (u64)(unsigned long)iocb->ki_user_iocb; |
1039 | event->data = iocb->ki_user_data; | 1073 | event->data = iocb->ki_user_data; |
1040 | event->res = res; | 1074 | event->res = res; |
1041 | event->res2 = res2; | 1075 | event->res2 = res2; |
@@ -1044,7 +1078,7 @@ void aio_complete(struct kiocb *iocb, long res, long res2) | |||
1044 | flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); | 1078 | flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); |
1045 | 1079 | ||
1046 | pr_debug("%p[%u]: %p: %p %Lx %lx %lx\n", | 1080 | pr_debug("%p[%u]: %p: %p %Lx %lx %lx\n", |
1047 | ctx, tail, iocb, iocb->ki_obj.user, iocb->ki_user_data, | 1081 | ctx, tail, iocb, iocb->ki_user_iocb, iocb->ki_user_data, |
1048 | res, res2); | 1082 | res, res2); |
1049 | 1083 | ||
1050 | /* after flagging the request as done, we | 1084 | /* after flagging the request as done, we |
@@ -1091,7 +1125,6 @@ void aio_complete(struct kiocb *iocb, long res, long res2) | |||
1091 | 1125 | ||
1092 | percpu_ref_put(&ctx->reqs); | 1126 | percpu_ref_put(&ctx->reqs); |
1093 | } | 1127 | } |
1094 | EXPORT_SYMBOL(aio_complete); | ||
1095 | 1128 | ||
1096 | /* aio_read_events_ring | 1129 | /* aio_read_events_ring |
1097 | * Pull an event off of the ioctx's event ring. Returns the number of | 1130 | * Pull an event off of the ioctx's event ring. Returns the number of |
@@ -1480,7 +1513,7 @@ rw_common: | |||
1480 | static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, | 1513 | static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, |
1481 | struct iocb *iocb, bool compat) | 1514 | struct iocb *iocb, bool compat) |
1482 | { | 1515 | { |
1483 | struct kiocb *req; | 1516 | struct aio_kiocb *req; |
1484 | ssize_t ret; | 1517 | ssize_t ret; |
1485 | 1518 | ||
1486 | /* enforce forwards compatibility on users */ | 1519 | /* enforce forwards compatibility on users */ |
@@ -1503,11 +1536,14 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, | |||
1503 | if (unlikely(!req)) | 1536 | if (unlikely(!req)) |
1504 | return -EAGAIN; | 1537 | return -EAGAIN; |
1505 | 1538 | ||
1506 | req->ki_filp = fget(iocb->aio_fildes); | 1539 | req->common.ki_filp = fget(iocb->aio_fildes); |
1507 | if (unlikely(!req->ki_filp)) { | 1540 | if (unlikely(!req->common.ki_filp)) { |
1508 | ret = -EBADF; | 1541 | ret = -EBADF; |
1509 | goto out_put_req; | 1542 | goto out_put_req; |
1510 | } | 1543 | } |
1544 | req->common.ki_pos = iocb->aio_offset; | ||
1545 | req->common.ki_complete = aio_complete; | ||
1546 | req->common.ki_flags = 0; | ||
1511 | 1547 | ||
1512 | if (iocb->aio_flags & IOCB_FLAG_RESFD) { | 1548 | if (iocb->aio_flags & IOCB_FLAG_RESFD) { |
1513 | /* | 1549 | /* |
@@ -1522,6 +1558,8 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, | |||
1522 | req->ki_eventfd = NULL; | 1558 | req->ki_eventfd = NULL; |
1523 | goto out_put_req; | 1559 | goto out_put_req; |
1524 | } | 1560 | } |
1561 | |||
1562 | req->common.ki_flags |= IOCB_EVENTFD; | ||
1525 | } | 1563 | } |
1526 | 1564 | ||
1527 | ret = put_user(KIOCB_KEY, &user_iocb->aio_key); | 1565 | ret = put_user(KIOCB_KEY, &user_iocb->aio_key); |
@@ -1530,11 +1568,10 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, | |||
1530 | goto out_put_req; | 1568 | goto out_put_req; |
1531 | } | 1569 | } |
1532 | 1570 | ||
1533 | req->ki_obj.user = user_iocb; | 1571 | req->ki_user_iocb = user_iocb; |
1534 | req->ki_user_data = iocb->aio_data; | 1572 | req->ki_user_data = iocb->aio_data; |
1535 | req->ki_pos = iocb->aio_offset; | ||
1536 | 1573 | ||
1537 | ret = aio_run_iocb(req, iocb->aio_lio_opcode, | 1574 | ret = aio_run_iocb(&req->common, iocb->aio_lio_opcode, |
1538 | (char __user *)(unsigned long)iocb->aio_buf, | 1575 | (char __user *)(unsigned long)iocb->aio_buf, |
1539 | iocb->aio_nbytes, | 1576 | iocb->aio_nbytes, |
1540 | compat); | 1577 | compat); |
@@ -1623,10 +1660,10 @@ SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr, | |||
1623 | /* lookup_kiocb | 1660 | /* lookup_kiocb |
1624 | * Finds a given iocb for cancellation. | 1661 | * Finds a given iocb for cancellation. |
1625 | */ | 1662 | */ |
1626 | static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb, | 1663 | static struct aio_kiocb * |
1627 | u32 key) | 1664 | lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb, u32 key) |
1628 | { | 1665 | { |
1629 | struct list_head *pos; | 1666 | struct aio_kiocb *kiocb; |
1630 | 1667 | ||
1631 | assert_spin_locked(&ctx->ctx_lock); | 1668 | assert_spin_locked(&ctx->ctx_lock); |
1632 | 1669 | ||
@@ -1634,9 +1671,8 @@ static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb, | |||
1634 | return NULL; | 1671 | return NULL; |
1635 | 1672 | ||
1636 | /* TODO: use a hash or array, this sucks. */ | 1673 | /* TODO: use a hash or array, this sucks. */ |
1637 | list_for_each(pos, &ctx->active_reqs) { | 1674 | list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) { |
1638 | struct kiocb *kiocb = list_kiocb(pos); | 1675 | if (kiocb->ki_user_iocb == iocb) |
1639 | if (kiocb->ki_obj.user == iocb) | ||
1640 | return kiocb; | 1676 | return kiocb; |
1641 | } | 1677 | } |
1642 | return NULL; | 1678 | return NULL; |
@@ -1656,7 +1692,7 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, | |||
1656 | struct io_event __user *, result) | 1692 | struct io_event __user *, result) |
1657 | { | 1693 | { |
1658 | struct kioctx *ctx; | 1694 | struct kioctx *ctx; |
1659 | struct kiocb *kiocb; | 1695 | struct aio_kiocb *kiocb; |
1660 | u32 key; | 1696 | u32 key; |
1661 | int ret; | 1697 | int ret; |
1662 | 1698 | ||
diff --git a/fs/direct-io.c b/fs/direct-io.c index e181b6b2e297..c38b460776e6 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c | |||
@@ -265,7 +265,7 @@ static ssize_t dio_complete(struct dio *dio, loff_t offset, ssize_t ret, | |||
265 | ret = err; | 265 | ret = err; |
266 | } | 266 | } |
267 | 267 | ||
268 | aio_complete(dio->iocb, ret, 0); | 268 | dio->iocb->ki_complete(dio->iocb, ret, 0); |
269 | } | 269 | } |
270 | 270 | ||
271 | kmem_cache_free(dio_cache, dio); | 271 | kmem_cache_free(dio_cache, dio); |
@@ -1056,7 +1056,7 @@ static inline int drop_refcount(struct dio *dio) | |||
1056 | * operation. AIO can if it was a broken operation described above or | 1056 | * operation. AIO can if it was a broken operation described above or |
1057 | * in fact if all the bios race to complete before we get here. In | 1057 | * in fact if all the bios race to complete before we get here. In |
1058 | * that case dio_complete() translates the EIOCBQUEUED into the proper | 1058 | * that case dio_complete() translates the EIOCBQUEUED into the proper |
1059 | * return code that the caller will hand to aio_complete(). | 1059 | * return code that the caller will hand to ->complete(). |
1060 | * | 1060 | * |
1061 | * This is managed by the bio_lock instead of being an atomic_t so that | 1061 | * This is managed by the bio_lock instead of being an atomic_t so that |
1062 | * completion paths can drop their ref and use the remaining count to | 1062 | * completion paths can drop their ref and use the remaining count to |
diff --git a/fs/fuse/file.c b/fs/fuse/file.c index f81d83eb9758..a5c5e38b3ff8 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c | |||
@@ -584,7 +584,7 @@ static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos) | |||
584 | spin_unlock(&fc->lock); | 584 | spin_unlock(&fc->lock); |
585 | } | 585 | } |
586 | 586 | ||
587 | aio_complete(io->iocb, res, 0); | 587 | io->iocb->ki_complete(io->iocb, res, 0); |
588 | kfree(io); | 588 | kfree(io); |
589 | } | 589 | } |
590 | } | 590 | } |
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 27cebf164070..5db3385fc108 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c | |||
@@ -393,7 +393,7 @@ static void nfs_direct_complete(struct nfs_direct_req *dreq, bool write) | |||
393 | long res = (long) dreq->error; | 393 | long res = (long) dreq->error; |
394 | if (!res) | 394 | if (!res) |
395 | res = (long) dreq->count; | 395 | res = (long) dreq->count; |
396 | aio_complete(dreq->iocb, res, 0); | 396 | dreq->iocb->ki_complete(dreq->iocb, res, 0); |
397 | } | 397 | } |
398 | 398 | ||
399 | complete_all(&dreq->completion); | 399 | complete_all(&dreq->completion); |
diff --git a/include/linux/aio.h b/include/linux/aio.h index f8516430490d..5c40b61285ac 100644 --- a/include/linux/aio.h +++ b/include/linux/aio.h | |||
@@ -14,67 +14,38 @@ struct kiocb; | |||
14 | 14 | ||
15 | #define KIOCB_KEY 0 | 15 | #define KIOCB_KEY 0 |
16 | 16 | ||
17 | /* | ||
18 | * We use ki_cancel == KIOCB_CANCELLED to indicate that a kiocb has been either | ||
19 | * cancelled or completed (this makes a certain amount of sense because | ||
20 | * successful cancellation - io_cancel() - does deliver the completion to | ||
21 | * userspace). | ||
22 | * | ||
23 | * And since most things don't implement kiocb cancellation and we'd really like | ||
24 | * kiocb completion to be lockless when possible, we use ki_cancel to | ||
25 | * synchronize cancellation and completion - we only set it to KIOCB_CANCELLED | ||
26 | * with xchg() or cmpxchg(), see batch_complete_aio() and kiocb_cancel(). | ||
27 | */ | ||
28 | #define KIOCB_CANCELLED ((void *) (~0ULL)) | ||
29 | |||
30 | typedef int (kiocb_cancel_fn)(struct kiocb *); | 17 | typedef int (kiocb_cancel_fn)(struct kiocb *); |
31 | 18 | ||
19 | #define IOCB_EVENTFD (1 << 0) | ||
20 | |||
32 | struct kiocb { | 21 | struct kiocb { |
33 | struct file *ki_filp; | 22 | struct file *ki_filp; |
34 | struct kioctx *ki_ctx; /* NULL for sync ops */ | ||
35 | kiocb_cancel_fn *ki_cancel; | ||
36 | void *private; | ||
37 | |||
38 | union { | ||
39 | void __user *user; | ||
40 | } ki_obj; | ||
41 | |||
42 | __u64 ki_user_data; /* user's data for completion */ | ||
43 | loff_t ki_pos; | 23 | loff_t ki_pos; |
44 | 24 | void (*ki_complete)(struct kiocb *iocb, long ret, long ret2); | |
45 | struct list_head ki_list; /* the aio core uses this | 25 | void *private; |
46 | * for cancellation */ | 26 | int ki_flags; |
47 | |||
48 | /* | ||
49 | * If the aio_resfd field of the userspace iocb is not zero, | ||
50 | * this is the underlying eventfd context to deliver events to. | ||
51 | */ | ||
52 | struct eventfd_ctx *ki_eventfd; | ||
53 | }; | 27 | }; |
54 | 28 | ||
55 | static inline bool is_sync_kiocb(struct kiocb *kiocb) | 29 | static inline bool is_sync_kiocb(struct kiocb *kiocb) |
56 | { | 30 | { |
57 | return kiocb->ki_ctx == NULL; | 31 | return kiocb->ki_complete == NULL; |
58 | } | 32 | } |
59 | 33 | ||
60 | static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp) | 34 | static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp) |
61 | { | 35 | { |
62 | *kiocb = (struct kiocb) { | 36 | *kiocb = (struct kiocb) { |
63 | .ki_ctx = NULL, | ||
64 | .ki_filp = filp, | 37 | .ki_filp = filp, |
65 | }; | 38 | }; |
66 | } | 39 | } |
67 | 40 | ||
68 | /* prototypes */ | 41 | /* prototypes */ |
69 | #ifdef CONFIG_AIO | 42 | #ifdef CONFIG_AIO |
70 | extern void aio_complete(struct kiocb *iocb, long res, long res2); | ||
71 | struct mm_struct; | 43 | struct mm_struct; |
72 | extern void exit_aio(struct mm_struct *mm); | 44 | extern void exit_aio(struct mm_struct *mm); |
73 | extern long do_io_submit(aio_context_t ctx_id, long nr, | 45 | extern long do_io_submit(aio_context_t ctx_id, long nr, |
74 | struct iocb __user *__user *iocbpp, bool compat); | 46 | struct iocb __user *__user *iocbpp, bool compat); |
75 | void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel); | 47 | void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel); |
76 | #else | 48 | #else |
77 | static inline void aio_complete(struct kiocb *iocb, long res, long res2) { } | ||
78 | struct mm_struct; | 49 | struct mm_struct; |
79 | static inline void exit_aio(struct mm_struct *mm) { } | 50 | static inline void exit_aio(struct mm_struct *mm) { } |
80 | static inline long do_io_submit(aio_context_t ctx_id, long nr, | 51 | static inline long do_io_submit(aio_context_t ctx_id, long nr, |
@@ -84,11 +55,6 @@ static inline void kiocb_set_cancel_fn(struct kiocb *req, | |||
84 | kiocb_cancel_fn *cancel) { } | 55 | kiocb_cancel_fn *cancel) { } |
85 | #endif /* CONFIG_AIO */ | 56 | #endif /* CONFIG_AIO */ |
86 | 57 | ||
87 | static inline struct kiocb *list_kiocb(struct list_head *h) | ||
88 | { | ||
89 | return list_entry(h, struct kiocb, ki_list); | ||
90 | } | ||
91 | |||
92 | /* for sysctl: */ | 58 | /* for sysctl: */ |
93 | extern unsigned long aio_nr; | 59 | extern unsigned long aio_nr; |
94 | extern unsigned long aio_max_nr; | 60 | extern unsigned long aio_max_nr; |