aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-05-07 23:49:51 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-05-07 23:49:51 -0400
commit5af43c24ca59a448c9312dd4a4a51d27ec3b9a73 (patch)
tree65288caabc91fc04242acace38789a6dd5b86ed4 /fs
parent9affd6becbfb2c3f0d04e554bb87234761b37aba (diff)
parenta27bb332c04cec8c4afd7912df0dc7890db27560 (diff)
Merge branch 'akpm' (incoming from Andrew)
Merge more incoming from Andrew Morton: - Various fixes which were stalled or which I picked up recently - A large rotorooting of the AIO code. Allegedly to improve performance but I don't really have good performance numbers (I might have lost the email) and I can't raise Kent today. I held this out of 3.9 and we could give it another cycle if it's all too late/scary. I ended up taking only the first two thirds of the AIO rotorooting. I left the percpu parts and the batch completion for later. - Linus * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (33 commits) aio: don't include aio.h in sched.h aio: kill ki_retry aio: kill ki_key aio: give shared kioctx fields their own cachelines aio: kill struct aio_ring_info aio: kill batch allocation aio: change reqs_active to include unreaped completions aio: use cancellation list lazily aio: use flush_dcache_page() aio: make aio_read_evt() more efficient, convert to hrtimers wait: add wait_event_hrtimeout() aio: refcounting cleanup aio: make aio_put_req() lockless aio: do fget() after aio_get_req() aio: dprintk() -> pr_debug() aio: move private stuff out of aio.h aio: add kiocb_cancel() aio: kill return value of aio_complete() char: add aio_{read,write} to /dev/{null,zero} aio: remove retry-based AIO ...
Diffstat (limited to 'fs')
-rw-r--r--fs/9p/vfs_addr.c1
-rw-r--r--fs/afs/write.c1
-rw-r--r--fs/aio.c1578
-rw-r--r--fs/bio.c1
-rw-r--r--fs/block_dev.c1
-rw-r--r--fs/btrfs/file.c1
-rw-r--r--fs/btrfs/inode.c1
-rw-r--r--fs/ceph/file.c1
-rw-r--r--fs/compat.c1
-rw-r--r--fs/direct-io.c1
-rw-r--r--fs/ecryptfs/file.c1
-rw-r--r--fs/ext2/inode.c1
-rw-r--r--fs/ext3/inode.c1
-rw-r--r--fs/ext4/file.c1
-rw-r--r--fs/ext4/indirect.c1
-rw-r--r--fs/ext4/inode.c1
-rw-r--r--fs/ext4/page-io.c1
-rw-r--r--fs/f2fs/data.c1
-rw-r--r--fs/fat/inode.c1
-rw-r--r--fs/fuse/cuse.c1
-rw-r--r--fs/fuse/dev.c1
-rw-r--r--fs/fuse/file.c1
-rw-r--r--fs/gfs2/aops.c1
-rw-r--r--fs/gfs2/file.c1
-rw-r--r--fs/hfs/inode.c1
-rw-r--r--fs/hfsplus/inode.c1
-rw-r--r--fs/hugetlbfs/inode.c24
-rw-r--r--fs/jfs/inode.c1
-rw-r--r--fs/nilfs2/inode.c2
-rw-r--r--fs/ntfs/file.c1
-rw-r--r--fs/ntfs/inode.c1
-rw-r--r--fs/ocfs2/aops.h2
-rw-r--r--fs/ocfs2/dlmglue.c2
-rw-r--r--fs/ocfs2/inode.h2
-rw-r--r--fs/pipe.c1
-rw-r--r--fs/read_write.c35
-rw-r--r--fs/reiserfs/inode.c1
-rw-r--r--fs/ubifs/file.c1
-rw-r--r--fs/udf/inode.c1
-rw-r--r--fs/xfs/xfs_aops.c1
-rw-r--r--fs/xfs/xfs_file.c1
41 files changed, 602 insertions, 1077 deletions
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
index 0ad61c6a65a5..055562c580b4 100644
--- a/fs/9p/vfs_addr.c
+++ b/fs/9p/vfs_addr.c
@@ -33,6 +33,7 @@
33#include <linux/pagemap.h> 33#include <linux/pagemap.h>
34#include <linux/idr.h> 34#include <linux/idr.h>
35#include <linux/sched.h> 35#include <linux/sched.h>
36#include <linux/aio.h>
36#include <net/9p/9p.h> 37#include <net/9p/9p.h>
37#include <net/9p/client.h> 38#include <net/9p/client.h>
38 39
diff --git a/fs/afs/write.c b/fs/afs/write.c
index 7e03eadb40c0..a890db4b9898 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -14,6 +14,7 @@
14#include <linux/pagemap.h> 14#include <linux/pagemap.h>
15#include <linux/writeback.h> 15#include <linux/writeback.h>
16#include <linux/pagevec.h> 16#include <linux/pagevec.h>
17#include <linux/aio.h>
17#include "internal.h" 18#include "internal.h"
18 19
19static int afs_write_back_from_locked_page(struct afs_writeback *wb, 20static int afs_write_back_from_locked_page(struct afs_writeback *wb,
diff --git a/fs/aio.c b/fs/aio.c
index 351afe7ac78e..c5b1a8c10411 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -8,6 +8,8 @@
8 * 8 *
9 * See ../COPYING for licensing terms. 9 * See ../COPYING for licensing terms.
10 */ 10 */
11#define pr_fmt(fmt) "%s: " fmt, __func__
12
11#include <linux/kernel.h> 13#include <linux/kernel.h>
12#include <linux/init.h> 14#include <linux/init.h>
13#include <linux/errno.h> 15#include <linux/errno.h>
@@ -18,8 +20,6 @@
18#include <linux/backing-dev.h> 20#include <linux/backing-dev.h>
19#include <linux/uio.h> 21#include <linux/uio.h>
20 22
21#define DEBUG 0
22
23#include <linux/sched.h> 23#include <linux/sched.h>
24#include <linux/fs.h> 24#include <linux/fs.h>
25#include <linux/file.h> 25#include <linux/file.h>
@@ -39,11 +39,76 @@
39#include <asm/kmap_types.h> 39#include <asm/kmap_types.h>
40#include <asm/uaccess.h> 40#include <asm/uaccess.h>
41 41
42#if DEBUG > 1 42#define AIO_RING_MAGIC 0xa10a10a1
43#define dprintk printk 43#define AIO_RING_COMPAT_FEATURES 1
44#else 44#define AIO_RING_INCOMPAT_FEATURES 0
45#define dprintk(x...) do { ; } while (0) 45struct aio_ring {
46#endif 46 unsigned id; /* kernel internal index number */
47 unsigned nr; /* number of io_events */
48 unsigned head;
49 unsigned tail;
50
51 unsigned magic;
52 unsigned compat_features;
53 unsigned incompat_features;
54 unsigned header_length; /* size of aio_ring */
55
56
57 struct io_event io_events[0];
58}; /* 128 bytes + ring size */
59
60#define AIO_RING_PAGES 8
61
62struct kioctx {
63 atomic_t users;
64 atomic_t dead;
65
66 /* This needs improving */
67 unsigned long user_id;
68 struct hlist_node list;
69
70 /*
71 * This is what userspace passed to io_setup(), it's not used for
72 * anything but counting against the global max_reqs quota.
73 *
74 * The real limit is nr_events - 1, which will be larger (see
75 * aio_setup_ring())
76 */
77 unsigned max_reqs;
78
79 /* Size of ringbuffer, in units of struct io_event */
80 unsigned nr_events;
81
82 unsigned long mmap_base;
83 unsigned long mmap_size;
84
85 struct page **ring_pages;
86 long nr_pages;
87
88 struct rcu_head rcu_head;
89 struct work_struct rcu_work;
90
91 struct {
92 atomic_t reqs_active;
93 } ____cacheline_aligned_in_smp;
94
95 struct {
96 spinlock_t ctx_lock;
97 struct list_head active_reqs; /* used for cancellation */
98 } ____cacheline_aligned_in_smp;
99
100 struct {
101 struct mutex ring_lock;
102 wait_queue_head_t wait;
103 } ____cacheline_aligned_in_smp;
104
105 struct {
106 unsigned tail;
107 spinlock_t completion_lock;
108 } ____cacheline_aligned_in_smp;
109
110 struct page *internal_pages[AIO_RING_PAGES];
111};
47 112
48/*------ sysctl variables----*/ 113/*------ sysctl variables----*/
49static DEFINE_SPINLOCK(aio_nr_lock); 114static DEFINE_SPINLOCK(aio_nr_lock);
@@ -54,11 +119,6 @@ unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio request
54static struct kmem_cache *kiocb_cachep; 119static struct kmem_cache *kiocb_cachep;
55static struct kmem_cache *kioctx_cachep; 120static struct kmem_cache *kioctx_cachep;
56 121
57static struct workqueue_struct *aio_wq;
58
59static void aio_kick_handler(struct work_struct *);
60static void aio_queue_work(struct kioctx *);
61
62/* aio_setup 122/* aio_setup
63 * Creates the slab caches used by the aio routines, panic on 123 * Creates the slab caches used by the aio routines, panic on
64 * failure as this is done early during the boot sequence. 124 * failure as this is done early during the boot sequence.
@@ -68,10 +128,7 @@ static int __init aio_setup(void)
68 kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC); 128 kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
69 kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC); 129 kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
70 130
71 aio_wq = alloc_workqueue("aio", 0, 1); /* used to limit concurrency */ 131 pr_debug("sizeof(struct page) = %zu\n", sizeof(struct page));
72 BUG_ON(!aio_wq);
73
74 pr_debug("aio_setup: sizeof(struct page) = %d\n", (int)sizeof(struct page));
75 132
76 return 0; 133 return 0;
77} 134}
@@ -79,28 +136,23 @@ __initcall(aio_setup);
79 136
80static void aio_free_ring(struct kioctx *ctx) 137static void aio_free_ring(struct kioctx *ctx)
81{ 138{
82 struct aio_ring_info *info = &ctx->ring_info;
83 long i; 139 long i;
84 140
85 for (i=0; i<info->nr_pages; i++) 141 for (i = 0; i < ctx->nr_pages; i++)
86 put_page(info->ring_pages[i]); 142 put_page(ctx->ring_pages[i]);
87 143
88 if (info->mmap_size) { 144 if (ctx->mmap_size)
89 BUG_ON(ctx->mm != current->mm); 145 vm_munmap(ctx->mmap_base, ctx->mmap_size);
90 vm_munmap(info->mmap_base, info->mmap_size);
91 }
92 146
93 if (info->ring_pages && info->ring_pages != info->internal_pages) 147 if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages)
94 kfree(info->ring_pages); 148 kfree(ctx->ring_pages);
95 info->ring_pages = NULL;
96 info->nr = 0;
97} 149}
98 150
99static int aio_setup_ring(struct kioctx *ctx) 151static int aio_setup_ring(struct kioctx *ctx)
100{ 152{
101 struct aio_ring *ring; 153 struct aio_ring *ring;
102 struct aio_ring_info *info = &ctx->ring_info;
103 unsigned nr_events = ctx->max_reqs; 154 unsigned nr_events = ctx->max_reqs;
155 struct mm_struct *mm = current->mm;
104 unsigned long size, populate; 156 unsigned long size, populate;
105 int nr_pages; 157 int nr_pages;
106 158
@@ -116,46 +168,44 @@ static int aio_setup_ring(struct kioctx *ctx)
116 168
117 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event); 169 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
118 170
119 info->nr = 0; 171 ctx->nr_events = 0;
120 info->ring_pages = info->internal_pages; 172 ctx->ring_pages = ctx->internal_pages;
121 if (nr_pages > AIO_RING_PAGES) { 173 if (nr_pages > AIO_RING_PAGES) {
122 info->ring_pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); 174 ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *),
123 if (!info->ring_pages) 175 GFP_KERNEL);
176 if (!ctx->ring_pages)
124 return -ENOMEM; 177 return -ENOMEM;
125 } 178 }
126 179
127 info->mmap_size = nr_pages * PAGE_SIZE; 180 ctx->mmap_size = nr_pages * PAGE_SIZE;
128 dprintk("attempting mmap of %lu bytes\n", info->mmap_size); 181 pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size);
129 down_write(&ctx->mm->mmap_sem); 182 down_write(&mm->mmap_sem);
130 info->mmap_base = do_mmap_pgoff(NULL, 0, info->mmap_size, 183 ctx->mmap_base = do_mmap_pgoff(NULL, 0, ctx->mmap_size,
131 PROT_READ|PROT_WRITE, 184 PROT_READ|PROT_WRITE,
132 MAP_ANONYMOUS|MAP_PRIVATE, 0, 185 MAP_ANONYMOUS|MAP_PRIVATE, 0, &populate);
133 &populate); 186 if (IS_ERR((void *)ctx->mmap_base)) {
134 if (IS_ERR((void *)info->mmap_base)) { 187 up_write(&mm->mmap_sem);
135 up_write(&ctx->mm->mmap_sem); 188 ctx->mmap_size = 0;
136 info->mmap_size = 0;
137 aio_free_ring(ctx); 189 aio_free_ring(ctx);
138 return -EAGAIN; 190 return -EAGAIN;
139 } 191 }
140 192
141 dprintk("mmap address: 0x%08lx\n", info->mmap_base); 193 pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base);
142 info->nr_pages = get_user_pages(current, ctx->mm, 194 ctx->nr_pages = get_user_pages(current, mm, ctx->mmap_base, nr_pages,
143 info->mmap_base, nr_pages, 195 1, 0, ctx->ring_pages, NULL);
144 1, 0, info->ring_pages, NULL); 196 up_write(&mm->mmap_sem);
145 up_write(&ctx->mm->mmap_sem);
146 197
147 if (unlikely(info->nr_pages != nr_pages)) { 198 if (unlikely(ctx->nr_pages != nr_pages)) {
148 aio_free_ring(ctx); 199 aio_free_ring(ctx);
149 return -EAGAIN; 200 return -EAGAIN;
150 } 201 }
151 if (populate) 202 if (populate)
152 mm_populate(info->mmap_base, populate); 203 mm_populate(ctx->mmap_base, populate);
153 204
154 ctx->user_id = info->mmap_base; 205 ctx->user_id = ctx->mmap_base;
206 ctx->nr_events = nr_events; /* trusted copy */
155 207
156 info->nr = nr_events; /* trusted copy */ 208 ring = kmap_atomic(ctx->ring_pages[0]);
157
158 ring = kmap_atomic(info->ring_pages[0]);
159 ring->nr = nr_events; /* user copy */ 209 ring->nr = nr_events; /* user copy */
160 ring->id = ctx->user_id; 210 ring->id = ctx->user_id;
161 ring->head = ring->tail = 0; 211 ring->head = ring->tail = 0;
@@ -164,72 +214,133 @@ static int aio_setup_ring(struct kioctx *ctx)
164 ring->incompat_features = AIO_RING_INCOMPAT_FEATURES; 214 ring->incompat_features = AIO_RING_INCOMPAT_FEATURES;
165 ring->header_length = sizeof(struct aio_ring); 215 ring->header_length = sizeof(struct aio_ring);
166 kunmap_atomic(ring); 216 kunmap_atomic(ring);
217 flush_dcache_page(ctx->ring_pages[0]);
167 218
168 return 0; 219 return 0;
169} 220}
170 221
171
172/* aio_ring_event: returns a pointer to the event at the given index from
173 * kmap_atomic(). Release the pointer with put_aio_ring_event();
174 */
175#define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event)) 222#define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event))
176#define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event)) 223#define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event))
177#define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE) 224#define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE)
178 225
179#define aio_ring_event(info, nr) ({ \ 226void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel)
180 unsigned pos = (nr) + AIO_EVENTS_OFFSET; \ 227{
181 struct io_event *__event; \ 228 struct kioctx *ctx = req->ki_ctx;
182 __event = kmap_atomic( \ 229 unsigned long flags;
183 (info)->ring_pages[pos / AIO_EVENTS_PER_PAGE]); \ 230
184 __event += pos % AIO_EVENTS_PER_PAGE; \ 231 spin_lock_irqsave(&ctx->ctx_lock, flags);
185 __event; \ 232
186}) 233 if (!req->ki_list.next)
187 234 list_add(&req->ki_list, &ctx->active_reqs);
188#define put_aio_ring_event(event) do { \ 235
189 struct io_event *__event = (event); \ 236 req->ki_cancel = cancel;
190 (void)__event; \ 237
191 kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK)); \ 238 spin_unlock_irqrestore(&ctx->ctx_lock, flags);
192} while(0) 239}
193 240EXPORT_SYMBOL(kiocb_set_cancel_fn);
194static void ctx_rcu_free(struct rcu_head *head) 241
242static int kiocb_cancel(struct kioctx *ctx, struct kiocb *kiocb,
243 struct io_event *res)
244{
245 kiocb_cancel_fn *old, *cancel;
246 int ret = -EINVAL;
247
248 /*
249 * Don't want to set kiocb->ki_cancel = KIOCB_CANCELLED unless it
250 * actually has a cancel function, hence the cmpxchg()
251 */
252
253 cancel = ACCESS_ONCE(kiocb->ki_cancel);
254 do {
255 if (!cancel || cancel == KIOCB_CANCELLED)
256 return ret;
257
258 old = cancel;
259 cancel = cmpxchg(&kiocb->ki_cancel, old, KIOCB_CANCELLED);
260 } while (cancel != old);
261
262 atomic_inc(&kiocb->ki_users);
263 spin_unlock_irq(&ctx->ctx_lock);
264
265 memset(res, 0, sizeof(*res));
266 res->obj = (u64)(unsigned long)kiocb->ki_obj.user;
267 res->data = kiocb->ki_user_data;
268 ret = cancel(kiocb, res);
269
270 spin_lock_irq(&ctx->ctx_lock);
271
272 return ret;
273}
274
275static void free_ioctx_rcu(struct rcu_head *head)
195{ 276{
196 struct kioctx *ctx = container_of(head, struct kioctx, rcu_head); 277 struct kioctx *ctx = container_of(head, struct kioctx, rcu_head);
197 kmem_cache_free(kioctx_cachep, ctx); 278 kmem_cache_free(kioctx_cachep, ctx);
198} 279}
199 280
200/* __put_ioctx 281/*
201 * Called when the last user of an aio context has gone away, 282 * When this function runs, the kioctx has been removed from the "hash table"
202 * and the struct needs to be freed. 283 * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
284 * now it's safe to cancel any that need to be.
203 */ 285 */
204static void __put_ioctx(struct kioctx *ctx) 286static void free_ioctx(struct kioctx *ctx)
205{ 287{
206 unsigned nr_events = ctx->max_reqs; 288 struct aio_ring *ring;
207 BUG_ON(ctx->reqs_active); 289 struct io_event res;
290 struct kiocb *req;
291 unsigned head, avail;
208 292
209 cancel_delayed_work_sync(&ctx->wq); 293 spin_lock_irq(&ctx->ctx_lock);
210 aio_free_ring(ctx); 294
211 mmdrop(ctx->mm); 295 while (!list_empty(&ctx->active_reqs)) {
212 ctx->mm = NULL; 296 req = list_first_entry(&ctx->active_reqs,
213 if (nr_events) { 297 struct kiocb, ki_list);
214 spin_lock(&aio_nr_lock); 298
215 BUG_ON(aio_nr - nr_events > aio_nr); 299 list_del_init(&req->ki_list);
216 aio_nr -= nr_events; 300 kiocb_cancel(ctx, req, &res);
217 spin_unlock(&aio_nr_lock);
218 } 301 }
219 pr_debug("__put_ioctx: freeing %p\n", ctx);
220 call_rcu(&ctx->rcu_head, ctx_rcu_free);
221}
222 302
223static inline int try_get_ioctx(struct kioctx *kioctx) 303 spin_unlock_irq(&ctx->ctx_lock);
224{ 304
225 return atomic_inc_not_zero(&kioctx->users); 305 ring = kmap_atomic(ctx->ring_pages[0]);
306 head = ring->head;
307 kunmap_atomic(ring);
308
309 while (atomic_read(&ctx->reqs_active) > 0) {
310 wait_event(ctx->wait, head != ctx->tail);
311
312 avail = (head <= ctx->tail ? ctx->tail : ctx->nr_events) - head;
313
314 atomic_sub(avail, &ctx->reqs_active);
315 head += avail;
316 head %= ctx->nr_events;
317 }
318
319 WARN_ON(atomic_read(&ctx->reqs_active) < 0);
320
321 aio_free_ring(ctx);
322
323 spin_lock(&aio_nr_lock);
324 BUG_ON(aio_nr - ctx->max_reqs > aio_nr);
325 aio_nr -= ctx->max_reqs;
326 spin_unlock(&aio_nr_lock);
327
328 pr_debug("freeing %p\n", ctx);
329
330 /*
331 * Here the call_rcu() is between the wait_event() for reqs_active to
332 * hit 0, and freeing the ioctx.
333 *
334 * aio_complete() decrements reqs_active, but it has to touch the ioctx
335 * after to issue a wakeup so we use rcu.
336 */
337 call_rcu(&ctx->rcu_head, free_ioctx_rcu);
226} 338}
227 339
228static inline void put_ioctx(struct kioctx *kioctx) 340static void put_ioctx(struct kioctx *ctx)
229{ 341{
230 BUG_ON(atomic_read(&kioctx->users) <= 0); 342 if (unlikely(atomic_dec_and_test(&ctx->users)))
231 if (unlikely(atomic_dec_and_test(&kioctx->users))) 343 free_ioctx(ctx);
232 __put_ioctx(kioctx);
233} 344}
234 345
235/* ioctx_alloc 346/* ioctx_alloc
@@ -237,7 +348,7 @@ static inline void put_ioctx(struct kioctx *kioctx)
237 */ 348 */
238static struct kioctx *ioctx_alloc(unsigned nr_events) 349static struct kioctx *ioctx_alloc(unsigned nr_events)
239{ 350{
240 struct mm_struct *mm; 351 struct mm_struct *mm = current->mm;
241 struct kioctx *ctx; 352 struct kioctx *ctx;
242 int err = -ENOMEM; 353 int err = -ENOMEM;
243 354
@@ -256,17 +367,15 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
256 return ERR_PTR(-ENOMEM); 367 return ERR_PTR(-ENOMEM);
257 368
258 ctx->max_reqs = nr_events; 369 ctx->max_reqs = nr_events;
259 mm = ctx->mm = current->mm;
260 atomic_inc(&mm->mm_count);
261 370
262 atomic_set(&ctx->users, 2); 371 atomic_set(&ctx->users, 2);
372 atomic_set(&ctx->dead, 0);
263 spin_lock_init(&ctx->ctx_lock); 373 spin_lock_init(&ctx->ctx_lock);
264 spin_lock_init(&ctx->ring_info.ring_lock); 374 spin_lock_init(&ctx->completion_lock);
375 mutex_init(&ctx->ring_lock);
265 init_waitqueue_head(&ctx->wait); 376 init_waitqueue_head(&ctx->wait);
266 377
267 INIT_LIST_HEAD(&ctx->active_reqs); 378 INIT_LIST_HEAD(&ctx->active_reqs);
268 INIT_LIST_HEAD(&ctx->run_list);
269 INIT_DELAYED_WORK(&ctx->wq, aio_kick_handler);
270 379
271 if (aio_setup_ring(ctx) < 0) 380 if (aio_setup_ring(ctx) < 0)
272 goto out_freectx; 381 goto out_freectx;
@@ -286,64 +395,56 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
286 hlist_add_head_rcu(&ctx->list, &mm->ioctx_list); 395 hlist_add_head_rcu(&ctx->list, &mm->ioctx_list);
287 spin_unlock(&mm->ioctx_lock); 396 spin_unlock(&mm->ioctx_lock);
288 397
289 dprintk("aio: allocated ioctx %p[%ld]: mm=%p mask=0x%x\n", 398 pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
290 ctx, ctx->user_id, current->mm, ctx->ring_info.nr); 399 ctx, ctx->user_id, mm, ctx->nr_events);
291 return ctx; 400 return ctx;
292 401
293out_cleanup: 402out_cleanup:
294 err = -EAGAIN; 403 err = -EAGAIN;
295 aio_free_ring(ctx); 404 aio_free_ring(ctx);
296out_freectx: 405out_freectx:
297 mmdrop(mm);
298 kmem_cache_free(kioctx_cachep, ctx); 406 kmem_cache_free(kioctx_cachep, ctx);
299 dprintk("aio: error allocating ioctx %d\n", err); 407 pr_debug("error allocating ioctx %d\n", err);
300 return ERR_PTR(err); 408 return ERR_PTR(err);
301} 409}
302 410
303/* kill_ctx 411static void kill_ioctx_work(struct work_struct *work)
304 * Cancels all outstanding aio requests on an aio context. Used
305 * when the processes owning a context have all exited to encourage
306 * the rapid destruction of the kioctx.
307 */
308static void kill_ctx(struct kioctx *ctx)
309{ 412{
310 int (*cancel)(struct kiocb *, struct io_event *); 413 struct kioctx *ctx = container_of(work, struct kioctx, rcu_work);
311 struct task_struct *tsk = current;
312 DECLARE_WAITQUEUE(wait, tsk);
313 struct io_event res;
314 414
315 spin_lock_irq(&ctx->ctx_lock); 415 wake_up_all(&ctx->wait);
316 ctx->dead = 1; 416 put_ioctx(ctx);
317 while (!list_empty(&ctx->active_reqs)) { 417}
318 struct list_head *pos = ctx->active_reqs.next;
319 struct kiocb *iocb = list_kiocb(pos);
320 list_del_init(&iocb->ki_list);
321 cancel = iocb->ki_cancel;
322 kiocbSetCancelled(iocb);
323 if (cancel) {
324 iocb->ki_users++;
325 spin_unlock_irq(&ctx->ctx_lock);
326 cancel(iocb, &res);
327 spin_lock_irq(&ctx->ctx_lock);
328 }
329 }
330 418
331 if (!ctx->reqs_active) 419static void kill_ioctx_rcu(struct rcu_head *head)
332 goto out; 420{
421 struct kioctx *ctx = container_of(head, struct kioctx, rcu_head);
333 422
334 add_wait_queue(&ctx->wait, &wait); 423 INIT_WORK(&ctx->rcu_work, kill_ioctx_work);
335 set_task_state(tsk, TASK_UNINTERRUPTIBLE); 424 schedule_work(&ctx->rcu_work);
336 while (ctx->reqs_active) { 425}
337 spin_unlock_irq(&ctx->ctx_lock);
338 io_schedule();
339 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
340 spin_lock_irq(&ctx->ctx_lock);
341 }
342 __set_task_state(tsk, TASK_RUNNING);
343 remove_wait_queue(&ctx->wait, &wait);
344 426
345out: 427/* kill_ioctx
346 spin_unlock_irq(&ctx->ctx_lock); 428 * Cancels all outstanding aio requests on an aio context. Used
429 * when the processes owning a context have all exited to encourage
430 * the rapid destruction of the kioctx.
431 */
432static void kill_ioctx(struct kioctx *ctx)
433{
434 if (!atomic_xchg(&ctx->dead, 1)) {
435 hlist_del_rcu(&ctx->list);
436 /* Between hlist_del_rcu() and dropping the initial ref */
437 synchronize_rcu();
438
439 /*
440 * We can't punt to workqueue here because put_ioctx() ->
441 * free_ioctx() will unmap the ringbuffer, and that has to be
442 * done in the original process's context. kill_ioctx_rcu/work()
443 * exist for exit_aio(), as in that path free_ioctx() won't do
444 * the unmap.
445 */
446 kill_ioctx_work(&ctx->rcu_work);
447 }
347} 448}
348 449
349/* wait_on_sync_kiocb: 450/* wait_on_sync_kiocb:
@@ -351,9 +452,9 @@ out:
351 */ 452 */
352ssize_t wait_on_sync_kiocb(struct kiocb *iocb) 453ssize_t wait_on_sync_kiocb(struct kiocb *iocb)
353{ 454{
354 while (iocb->ki_users) { 455 while (atomic_read(&iocb->ki_users)) {
355 set_current_state(TASK_UNINTERRUPTIBLE); 456 set_current_state(TASK_UNINTERRUPTIBLE);
356 if (!iocb->ki_users) 457 if (!atomic_read(&iocb->ki_users))
357 break; 458 break;
358 io_schedule(); 459 io_schedule();
359 } 460 }
@@ -362,28 +463,26 @@ ssize_t wait_on_sync_kiocb(struct kiocb *iocb)
362} 463}
363EXPORT_SYMBOL(wait_on_sync_kiocb); 464EXPORT_SYMBOL(wait_on_sync_kiocb);
364 465
365/* exit_aio: called when the last user of mm goes away. At this point, 466/*
366 * there is no way for any new requests to be submited or any of the 467 * exit_aio: called when the last user of mm goes away. At this point, there is
367 * io_* syscalls to be called on the context. However, there may be 468 * no way for any new requests to be submited or any of the io_* syscalls to be
368 * outstanding requests which hold references to the context; as they 469 * called on the context.
369 * go away, they will call put_ioctx and release any pinned memory 470 *
370 * associated with the request (held via struct page * references). 471 * There may be outstanding kiocbs, but free_ioctx() will explicitly wait on
472 * them.
371 */ 473 */
372void exit_aio(struct mm_struct *mm) 474void exit_aio(struct mm_struct *mm)
373{ 475{
374 struct kioctx *ctx; 476 struct kioctx *ctx;
477 struct hlist_node *n;
375 478
376 while (!hlist_empty(&mm->ioctx_list)) { 479 hlist_for_each_entry_safe(ctx, n, &mm->ioctx_list, list) {
377 ctx = hlist_entry(mm->ioctx_list.first, struct kioctx, list);
378 hlist_del_rcu(&ctx->list);
379
380 kill_ctx(ctx);
381
382 if (1 != atomic_read(&ctx->users)) 480 if (1 != atomic_read(&ctx->users))
383 printk(KERN_DEBUG 481 printk(KERN_DEBUG
384 "exit_aio:ioctx still alive: %d %d %d\n", 482 "exit_aio:ioctx still alive: %d %d %d\n",
385 atomic_read(&ctx->users), ctx->dead, 483 atomic_read(&ctx->users),
386 ctx->reqs_active); 484 atomic_read(&ctx->dead),
485 atomic_read(&ctx->reqs_active));
387 /* 486 /*
388 * We don't need to bother with munmap() here - 487 * We don't need to bother with munmap() here -
389 * exit_mmap(mm) is coming and it'll unmap everything. 488 * exit_mmap(mm) is coming and it'll unmap everything.
@@ -391,150 +490,53 @@ void exit_aio(struct mm_struct *mm)
391 * as indicator that it needs to unmap the area, 490 * as indicator that it needs to unmap the area,
392 * just set it to 0; aio_free_ring() is the only 491 * just set it to 0; aio_free_ring() is the only
393 * place that uses ->mmap_size, so it's safe. 492 * place that uses ->mmap_size, so it's safe.
394 * That way we get all munmap done to current->mm -
395 * all other callers have ctx->mm == current->mm.
396 */ 493 */
397 ctx->ring_info.mmap_size = 0; 494 ctx->mmap_size = 0;
398 put_ioctx(ctx); 495
496 if (!atomic_xchg(&ctx->dead, 1)) {
497 hlist_del_rcu(&ctx->list);
498 call_rcu(&ctx->rcu_head, kill_ioctx_rcu);
499 }
399 } 500 }
400} 501}
401 502
402/* aio_get_req 503/* aio_get_req
403 * Allocate a slot for an aio request. Increments the users count 504 * Allocate a slot for an aio request. Increments the ki_users count
404 * of the kioctx so that the kioctx stays around until all requests are 505 * of the kioctx so that the kioctx stays around until all requests are
405 * complete. Returns NULL if no requests are free. 506 * complete. Returns NULL if no requests are free.
406 * 507 *
407 * Returns with kiocb->users set to 2. The io submit code path holds 508 * Returns with kiocb->ki_users set to 2. The io submit code path holds
408 * an extra reference while submitting the i/o. 509 * an extra reference while submitting the i/o.
409 * This prevents races between the aio code path referencing the 510 * This prevents races between the aio code path referencing the
410 * req (after submitting it) and aio_complete() freeing the req. 511 * req (after submitting it) and aio_complete() freeing the req.
411 */ 512 */
412static struct kiocb *__aio_get_req(struct kioctx *ctx) 513static inline struct kiocb *aio_get_req(struct kioctx *ctx)
413{ 514{
414 struct kiocb *req = NULL; 515 struct kiocb *req;
415 516
416 req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL); 517 if (atomic_read(&ctx->reqs_active) >= ctx->nr_events)
417 if (unlikely(!req))
418 return NULL; 518 return NULL;
419 519
420 req->ki_flags = 0; 520 if (atomic_inc_return(&ctx->reqs_active) > ctx->nr_events - 1)
421 req->ki_users = 2; 521 goto out_put;
422 req->ki_key = 0;
423 req->ki_ctx = ctx;
424 req->ki_cancel = NULL;
425 req->ki_retry = NULL;
426 req->ki_dtor = NULL;
427 req->private = NULL;
428 req->ki_iovec = NULL;
429 INIT_LIST_HEAD(&req->ki_run_list);
430 req->ki_eventfd = NULL;
431
432 return req;
433}
434
435/*
436 * struct kiocb's are allocated in batches to reduce the number of
437 * times the ctx lock is acquired and released.
438 */
439#define KIOCB_BATCH_SIZE 32L
440struct kiocb_batch {
441 struct list_head head;
442 long count; /* number of requests left to allocate */
443};
444
445static void kiocb_batch_init(struct kiocb_batch *batch, long total)
446{
447 INIT_LIST_HEAD(&batch->head);
448 batch->count = total;
449}
450
451static void kiocb_batch_free(struct kioctx *ctx, struct kiocb_batch *batch)
452{
453 struct kiocb *req, *n;
454
455 if (list_empty(&batch->head))
456 return;
457
458 spin_lock_irq(&ctx->ctx_lock);
459 list_for_each_entry_safe(req, n, &batch->head, ki_batch) {
460 list_del(&req->ki_batch);
461 list_del(&req->ki_list);
462 kmem_cache_free(kiocb_cachep, req);
463 ctx->reqs_active--;
464 }
465 if (unlikely(!ctx->reqs_active && ctx->dead))
466 wake_up_all(&ctx->wait);
467 spin_unlock_irq(&ctx->ctx_lock);
468}
469
470/*
471 * Allocate a batch of kiocbs. This avoids taking and dropping the
472 * context lock a lot during setup.
473 */
474static int kiocb_batch_refill(struct kioctx *ctx, struct kiocb_batch *batch)
475{
476 unsigned short allocated, to_alloc;
477 long avail;
478 struct kiocb *req, *n;
479 struct aio_ring *ring;
480
481 to_alloc = min(batch->count, KIOCB_BATCH_SIZE);
482 for (allocated = 0; allocated < to_alloc; allocated++) {
483 req = __aio_get_req(ctx);
484 if (!req)
485 /* allocation failed, go with what we've got */
486 break;
487 list_add(&req->ki_batch, &batch->head);
488 }
489
490 if (allocated == 0)
491 goto out;
492
493 spin_lock_irq(&ctx->ctx_lock);
494 ring = kmap_atomic(ctx->ring_info.ring_pages[0]);
495
496 avail = aio_ring_avail(&ctx->ring_info, ring) - ctx->reqs_active;
497 BUG_ON(avail < 0);
498 if (avail < allocated) {
499 /* Trim back the number of requests. */
500 list_for_each_entry_safe(req, n, &batch->head, ki_batch) {
501 list_del(&req->ki_batch);
502 kmem_cache_free(kiocb_cachep, req);
503 if (--allocated <= avail)
504 break;
505 }
506 }
507
508 batch->count -= allocated;
509 list_for_each_entry(req, &batch->head, ki_batch) {
510 list_add(&req->ki_list, &ctx->active_reqs);
511 ctx->reqs_active++;
512 }
513 522
514 kunmap_atomic(ring); 523 req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL|__GFP_ZERO);
515 spin_unlock_irq(&ctx->ctx_lock); 524 if (unlikely(!req))
516 525 goto out_put;
517out:
518 return allocated;
519}
520 526
521static inline struct kiocb *aio_get_req(struct kioctx *ctx, 527 atomic_set(&req->ki_users, 2);
522 struct kiocb_batch *batch) 528 req->ki_ctx = ctx;
523{
524 struct kiocb *req;
525 529
526 if (list_empty(&batch->head))
527 if (kiocb_batch_refill(ctx, batch) == 0)
528 return NULL;
529 req = list_first_entry(&batch->head, struct kiocb, ki_batch);
530 list_del(&req->ki_batch);
531 return req; 530 return req;
531out_put:
532 atomic_dec(&ctx->reqs_active);
533 return NULL;
532} 534}
533 535
534static inline void really_put_req(struct kioctx *ctx, struct kiocb *req) 536static void kiocb_free(struct kiocb *req)
535{ 537{
536 assert_spin_locked(&ctx->ctx_lock); 538 if (req->ki_filp)
537 539 fput(req->ki_filp);
538 if (req->ki_eventfd != NULL) 540 if (req->ki_eventfd != NULL)
539 eventfd_ctx_put(req->ki_eventfd); 541 eventfd_ctx_put(req->ki_eventfd);
540 if (req->ki_dtor) 542 if (req->ki_dtor)
@@ -542,48 +544,12 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
542 if (req->ki_iovec != &req->ki_inline_vec) 544 if (req->ki_iovec != &req->ki_inline_vec)
543 kfree(req->ki_iovec); 545 kfree(req->ki_iovec);
544 kmem_cache_free(kiocb_cachep, req); 546 kmem_cache_free(kiocb_cachep, req);
545 ctx->reqs_active--;
546
547 if (unlikely(!ctx->reqs_active && ctx->dead))
548 wake_up_all(&ctx->wait);
549} 547}
550 548
551/* __aio_put_req 549void aio_put_req(struct kiocb *req)
552 * Returns true if this put was the last user of the request.
553 */
554static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
555{ 550{
556 dprintk(KERN_DEBUG "aio_put(%p): f_count=%ld\n", 551 if (atomic_dec_and_test(&req->ki_users))
557 req, atomic_long_read(&req->ki_filp->f_count)); 552 kiocb_free(req);
558
559 assert_spin_locked(&ctx->ctx_lock);
560
561 req->ki_users--;
562 BUG_ON(req->ki_users < 0);
563 if (likely(req->ki_users))
564 return 0;
565 list_del(&req->ki_list); /* remove from active_reqs */
566 req->ki_cancel = NULL;
567 req->ki_retry = NULL;
568
569 fput(req->ki_filp);
570 req->ki_filp = NULL;
571 really_put_req(ctx, req);
572 return 1;
573}
574
575/* aio_put_req
576 * Returns true if this put was the last user of the kiocb,
577 * false if the request is still in use.
578 */
579int aio_put_req(struct kiocb *req)
580{
581 struct kioctx *ctx = req->ki_ctx;
582 int ret;
583 spin_lock_irq(&ctx->ctx_lock);
584 ret = __aio_put_req(ctx, req);
585 spin_unlock_irq(&ctx->ctx_lock);
586 return ret;
587} 553}
588EXPORT_SYMBOL(aio_put_req); 554EXPORT_SYMBOL(aio_put_req);
589 555
@@ -595,13 +561,8 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
595 rcu_read_lock(); 561 rcu_read_lock();
596 562
597 hlist_for_each_entry_rcu(ctx, &mm->ioctx_list, list) { 563 hlist_for_each_entry_rcu(ctx, &mm->ioctx_list, list) {
598 /* 564 if (ctx->user_id == ctx_id) {
599 * RCU protects us against accessing freed memory but 565 atomic_inc(&ctx->users);
600 * we have to be careful not to get a reference when the
601 * reference count already dropped to 0 (ctx->dead test
602 * is unreliable because of races).
603 */
604 if (ctx->user_id == ctx_id && !ctx->dead && try_get_ioctx(ctx)){
605 ret = ctx; 566 ret = ctx;
606 break; 567 break;
607 } 568 }
@@ -611,295 +572,16 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
611 return ret; 572 return ret;
612} 573}
613 574
614/*
615 * Queue up a kiocb to be retried. Assumes that the kiocb
616 * has already been marked as kicked, and places it on
617 * the retry run list for the corresponding ioctx, if it
618 * isn't already queued. Returns 1 if it actually queued
619 * the kiocb (to tell the caller to activate the work
620 * queue to process it), or 0, if it found that it was
621 * already queued.
622 */
623static inline int __queue_kicked_iocb(struct kiocb *iocb)
624{
625 struct kioctx *ctx = iocb->ki_ctx;
626
627 assert_spin_locked(&ctx->ctx_lock);
628
629 if (list_empty(&iocb->ki_run_list)) {
630 list_add_tail(&iocb->ki_run_list,
631 &ctx->run_list);
632 return 1;
633 }
634 return 0;
635}
636
637/* aio_run_iocb
638 * This is the core aio execution routine. It is
639 * invoked both for initial i/o submission and
640 * subsequent retries via the aio_kick_handler.
641 * Expects to be invoked with iocb->ki_ctx->lock
642 * already held. The lock is released and reacquired
643 * as needed during processing.
644 *
645 * Calls the iocb retry method (already setup for the
646 * iocb on initial submission) for operation specific
647 * handling, but takes care of most of common retry
648 * execution details for a given iocb. The retry method
649 * needs to be non-blocking as far as possible, to avoid
650 * holding up other iocbs waiting to be serviced by the
651 * retry kernel thread.
652 *
653 * The trickier parts in this code have to do with
654 * ensuring that only one retry instance is in progress
655 * for a given iocb at any time. Providing that guarantee
656 * simplifies the coding of individual aio operations as
657 * it avoids various potential races.
658 */
659static ssize_t aio_run_iocb(struct kiocb *iocb)
660{
661 struct kioctx *ctx = iocb->ki_ctx;
662 ssize_t (*retry)(struct kiocb *);
663 ssize_t ret;
664
665 if (!(retry = iocb->ki_retry)) {
666 printk("aio_run_iocb: iocb->ki_retry = NULL\n");
667 return 0;
668 }
669
670 /*
671 * We don't want the next retry iteration for this
672 * operation to start until this one has returned and
673 * updated the iocb state. However, wait_queue functions
674 * can trigger a kick_iocb from interrupt context in the
675 * meantime, indicating that data is available for the next
676 * iteration. We want to remember that and enable the
677 * next retry iteration _after_ we are through with
678 * this one.
679 *
680 * So, in order to be able to register a "kick", but
681 * prevent it from being queued now, we clear the kick
682 * flag, but make the kick code *think* that the iocb is
683 * still on the run list until we are actually done.
684 * When we are done with this iteration, we check if
685 * the iocb was kicked in the meantime and if so, queue
686 * it up afresh.
687 */
688
689 kiocbClearKicked(iocb);
690
691 /*
692 * This is so that aio_complete knows it doesn't need to
693 * pull the iocb off the run list (We can't just call
694 * INIT_LIST_HEAD because we don't want a kick_iocb to
695 * queue this on the run list yet)
696 */
697 iocb->ki_run_list.next = iocb->ki_run_list.prev = NULL;
698 spin_unlock_irq(&ctx->ctx_lock);
699
700 /* Quit retrying if the i/o has been cancelled */
701 if (kiocbIsCancelled(iocb)) {
702 ret = -EINTR;
703 aio_complete(iocb, ret, 0);
704 /* must not access the iocb after this */
705 goto out;
706 }
707
708 /*
709 * Now we are all set to call the retry method in async
710 * context.
711 */
712 ret = retry(iocb);
713
714 if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED) {
715 /*
716 * There's no easy way to restart the syscall since other AIO's
717 * may be already running. Just fail this IO with EINTR.
718 */
719 if (unlikely(ret == -ERESTARTSYS || ret == -ERESTARTNOINTR ||
720 ret == -ERESTARTNOHAND || ret == -ERESTART_RESTARTBLOCK))
721 ret = -EINTR;
722 aio_complete(iocb, ret, 0);
723 }
724out:
725 spin_lock_irq(&ctx->ctx_lock);
726
727 if (-EIOCBRETRY == ret) {
728 /*
729 * OK, now that we are done with this iteration
730 * and know that there is more left to go,
731 * this is where we let go so that a subsequent
732 * "kick" can start the next iteration
733 */
734
735 /* will make __queue_kicked_iocb succeed from here on */
736 INIT_LIST_HEAD(&iocb->ki_run_list);
737 /* we must queue the next iteration ourselves, if it
738 * has already been kicked */
739 if (kiocbIsKicked(iocb)) {
740 __queue_kicked_iocb(iocb);
741
742 /*
743 * __queue_kicked_iocb will always return 1 here, because
744 * iocb->ki_run_list is empty at this point so it should
745 * be safe to unconditionally queue the context into the
746 * work queue.
747 */
748 aio_queue_work(ctx);
749 }
750 }
751 return ret;
752}
753
754/*
755 * __aio_run_iocbs:
756 * Process all pending retries queued on the ioctx
757 * run list.
758 * Assumes it is operating within the aio issuer's mm
759 * context.
760 */
761static int __aio_run_iocbs(struct kioctx *ctx)
762{
763 struct kiocb *iocb;
764 struct list_head run_list;
765
766 assert_spin_locked(&ctx->ctx_lock);
767
768 list_replace_init(&ctx->run_list, &run_list);
769 while (!list_empty(&run_list)) {
770 iocb = list_entry(run_list.next, struct kiocb,
771 ki_run_list);
772 list_del(&iocb->ki_run_list);
773 /*
774 * Hold an extra reference while retrying i/o.
775 */
776 iocb->ki_users++; /* grab extra reference */
777 aio_run_iocb(iocb);
778 __aio_put_req(ctx, iocb);
779 }
780 if (!list_empty(&ctx->run_list))
781 return 1;
782 return 0;
783}
784
785static void aio_queue_work(struct kioctx * ctx)
786{
787 unsigned long timeout;
788 /*
789 * if someone is waiting, get the work started right
790 * away, otherwise, use a longer delay
791 */
792 smp_mb();
793 if (waitqueue_active(&ctx->wait))
794 timeout = 1;
795 else
796 timeout = HZ/10;
797 queue_delayed_work(aio_wq, &ctx->wq, timeout);
798}
799
800/*
801 * aio_run_all_iocbs:
802 * Process all pending retries queued on the ioctx
803 * run list, and keep running them until the list
804 * stays empty.
805 * Assumes it is operating within the aio issuer's mm context.
806 */
807static inline void aio_run_all_iocbs(struct kioctx *ctx)
808{
809 spin_lock_irq(&ctx->ctx_lock);
810 while (__aio_run_iocbs(ctx))
811 ;
812 spin_unlock_irq(&ctx->ctx_lock);
813}
814
815/*
816 * aio_kick_handler:
817 * Work queue handler triggered to process pending
818 * retries on an ioctx. Takes on the aio issuer's
819 * mm context before running the iocbs, so that
820 * copy_xxx_user operates on the issuer's address
821 * space.
822 * Run on aiod's context.
823 */
824static void aio_kick_handler(struct work_struct *work)
825{
826 struct kioctx *ctx = container_of(work, struct kioctx, wq.work);
827 mm_segment_t oldfs = get_fs();
828 struct mm_struct *mm;
829 int requeue;
830
831 set_fs(USER_DS);
832 use_mm(ctx->mm);
833 spin_lock_irq(&ctx->ctx_lock);
834 requeue =__aio_run_iocbs(ctx);
835 mm = ctx->mm;
836 spin_unlock_irq(&ctx->ctx_lock);
837 unuse_mm(mm);
838 set_fs(oldfs);
839 /*
840 * we're in a worker thread already; no point using non-zero delay
841 */
842 if (requeue)
843 queue_delayed_work(aio_wq, &ctx->wq, 0);
844}
845
846
847/*
848 * Called by kick_iocb to queue the kiocb for retry
849 * and if required activate the aio work queue to process
850 * it
851 */
852static void try_queue_kicked_iocb(struct kiocb *iocb)
853{
854 struct kioctx *ctx = iocb->ki_ctx;
855 unsigned long flags;
856 int run = 0;
857
858 spin_lock_irqsave(&ctx->ctx_lock, flags);
859 /* set this inside the lock so that we can't race with aio_run_iocb()
860 * testing it and putting the iocb on the run list under the lock */
861 if (!kiocbTryKick(iocb))
862 run = __queue_kicked_iocb(iocb);
863 spin_unlock_irqrestore(&ctx->ctx_lock, flags);
864 if (run)
865 aio_queue_work(ctx);
866}
867
868/*
869 * kick_iocb:
870 * Called typically from a wait queue callback context
871 * to trigger a retry of the iocb.
872 * The retry is usually executed by aio workqueue
873 * threads (See aio_kick_handler).
874 */
875void kick_iocb(struct kiocb *iocb)
876{
877 /* sync iocbs are easy: they can only ever be executing from a
878 * single context. */
879 if (is_sync_kiocb(iocb)) {
880 kiocbSetKicked(iocb);
881 wake_up_process(iocb->ki_obj.tsk);
882 return;
883 }
884
885 try_queue_kicked_iocb(iocb);
886}
887EXPORT_SYMBOL(kick_iocb);
888
889/* aio_complete 575/* aio_complete
890 * Called when the io request on the given iocb is complete. 576 * Called when the io request on the given iocb is complete.
891 * Returns true if this is the last user of the request. The
892 * only other user of the request can be the cancellation code.
893 */ 577 */
894int aio_complete(struct kiocb *iocb, long res, long res2) 578void aio_complete(struct kiocb *iocb, long res, long res2)
895{ 579{
896 struct kioctx *ctx = iocb->ki_ctx; 580 struct kioctx *ctx = iocb->ki_ctx;
897 struct aio_ring_info *info;
898 struct aio_ring *ring; 581 struct aio_ring *ring;
899 struct io_event *event; 582 struct io_event *ev_page, *event;
900 unsigned long flags; 583 unsigned long flags;
901 unsigned long tail; 584 unsigned tail, pos;
902 int ret;
903 585
904 /* 586 /*
905 * Special case handling for sync iocbs: 587 * Special case handling for sync iocbs:
@@ -909,61 +591,81 @@ int aio_complete(struct kiocb *iocb, long res, long res2)
909 * - the sync task helpfully left a reference to itself in the iocb 591 * - the sync task helpfully left a reference to itself in the iocb
910 */ 592 */
911 if (is_sync_kiocb(iocb)) { 593 if (is_sync_kiocb(iocb)) {
912 BUG_ON(iocb->ki_users != 1); 594 BUG_ON(atomic_read(&iocb->ki_users) != 1);
913 iocb->ki_user_data = res; 595 iocb->ki_user_data = res;
914 iocb->ki_users = 0; 596 atomic_set(&iocb->ki_users, 0);
915 wake_up_process(iocb->ki_obj.tsk); 597 wake_up_process(iocb->ki_obj.tsk);
916 return 1; 598 return;
917 } 599 }
918 600
919 info = &ctx->ring_info; 601 /*
920 602 * Take rcu_read_lock() in case the kioctx is being destroyed, as we
921 /* add a completion event to the ring buffer. 603 * need to issue a wakeup after decrementing reqs_active.
922 * must be done holding ctx->ctx_lock to prevent
923 * other code from messing with the tail
924 * pointer since we might be called from irq
925 * context.
926 */ 604 */
927 spin_lock_irqsave(&ctx->ctx_lock, flags); 605 rcu_read_lock();
928 606
929 if (iocb->ki_run_list.prev && !list_empty(&iocb->ki_run_list)) 607 if (iocb->ki_list.next) {
930 list_del_init(&iocb->ki_run_list); 608 unsigned long flags;
609
610 spin_lock_irqsave(&ctx->ctx_lock, flags);
611 list_del(&iocb->ki_list);
612 spin_unlock_irqrestore(&ctx->ctx_lock, flags);
613 }
931 614
932 /* 615 /*
933 * cancelled requests don't get events, userland was given one 616 * cancelled requests don't get events, userland was given one
934 * when the event got cancelled. 617 * when the event got cancelled.
935 */ 618 */
936 if (kiocbIsCancelled(iocb)) 619 if (unlikely(xchg(&iocb->ki_cancel,
620 KIOCB_CANCELLED) == KIOCB_CANCELLED)) {
621 atomic_dec(&ctx->reqs_active);
622 /* Still need the wake_up in case free_ioctx is waiting */
937 goto put_rq; 623 goto put_rq;
624 }
938 625
939 ring = kmap_atomic(info->ring_pages[0]); 626 /*
627 * Add a completion event to the ring buffer. Must be done holding
628 * ctx->ctx_lock to prevent other code from messing with the tail
629 * pointer since we might be called from irq context.
630 */
631 spin_lock_irqsave(&ctx->completion_lock, flags);
940 632
941 tail = info->tail; 633 tail = ctx->tail;
942 event = aio_ring_event(info, tail); 634 pos = tail + AIO_EVENTS_OFFSET;
943 if (++tail >= info->nr) 635
636 if (++tail >= ctx->nr_events)
944 tail = 0; 637 tail = 0;
945 638
639 ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
640 event = ev_page + pos % AIO_EVENTS_PER_PAGE;
641
946 event->obj = (u64)(unsigned long)iocb->ki_obj.user; 642 event->obj = (u64)(unsigned long)iocb->ki_obj.user;
947 event->data = iocb->ki_user_data; 643 event->data = iocb->ki_user_data;
948 event->res = res; 644 event->res = res;
949 event->res2 = res2; 645 event->res2 = res2;
950 646
951 dprintk("aio_complete: %p[%lu]: %p: %p %Lx %lx %lx\n", 647 kunmap_atomic(ev_page);
952 ctx, tail, iocb, iocb->ki_obj.user, iocb->ki_user_data, 648 flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
953 res, res2); 649
650 pr_debug("%p[%u]: %p: %p %Lx %lx %lx\n",
651 ctx, tail, iocb, iocb->ki_obj.user, iocb->ki_user_data,
652 res, res2);
954 653
955 /* after flagging the request as done, we 654 /* after flagging the request as done, we
956 * must never even look at it again 655 * must never even look at it again
957 */ 656 */
958 smp_wmb(); /* make event visible before updating tail */ 657 smp_wmb(); /* make event visible before updating tail */
959 658
960 info->tail = tail; 659 ctx->tail = tail;
961 ring->tail = tail;
962 660
963 put_aio_ring_event(event); 661 ring = kmap_atomic(ctx->ring_pages[0]);
662 ring->tail = tail;
964 kunmap_atomic(ring); 663 kunmap_atomic(ring);
664 flush_dcache_page(ctx->ring_pages[0]);
665
666 spin_unlock_irqrestore(&ctx->completion_lock, flags);
965 667
966 pr_debug("added to ring %p at [%lu]\n", iocb, tail); 668 pr_debug("added to ring %p at [%u]\n", iocb, tail);
967 669
968 /* 670 /*
969 * Check if the user asked us to deliver the result through an 671 * Check if the user asked us to deliver the result through an
@@ -975,7 +677,7 @@ int aio_complete(struct kiocb *iocb, long res, long res2)
975 677
976put_rq: 678put_rq:
977 /* everything turned out well, dispose of the aiocb. */ 679 /* everything turned out well, dispose of the aiocb. */
978 ret = __aio_put_req(ctx, iocb); 680 aio_put_req(iocb);
979 681
980 /* 682 /*
981 * We have to order our ring_info tail store above and test 683 * We have to order our ring_info tail store above and test
@@ -988,233 +690,133 @@ put_rq:
988 if (waitqueue_active(&ctx->wait)) 690 if (waitqueue_active(&ctx->wait))
989 wake_up(&ctx->wait); 691 wake_up(&ctx->wait);
990 692
991 spin_unlock_irqrestore(&ctx->ctx_lock, flags); 693 rcu_read_unlock();
992 return ret;
993} 694}
994EXPORT_SYMBOL(aio_complete); 695EXPORT_SYMBOL(aio_complete);
995 696
996/* aio_read_evt 697/* aio_read_events
997 * Pull an event off of the ioctx's event ring. Returns the number of 698 * Pull an event off of the ioctx's event ring. Returns the number of
998 * events fetched (0 or 1 ;-) 699 * events fetched
999 * FIXME: make this use cmpxchg.
1000 * TODO: make the ringbuffer user mmap()able (requires FIXME).
1001 */ 700 */
1002static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent) 701static long aio_read_events_ring(struct kioctx *ctx,
702 struct io_event __user *event, long nr)
1003{ 703{
1004 struct aio_ring_info *info = &ioctx->ring_info;
1005 struct aio_ring *ring; 704 struct aio_ring *ring;
1006 unsigned long head; 705 unsigned head, pos;
1007 int ret = 0; 706 long ret = 0;
1008 707 int copy_ret;
1009 ring = kmap_atomic(info->ring_pages[0]);
1010 dprintk("in aio_read_evt h%lu t%lu m%lu\n",
1011 (unsigned long)ring->head, (unsigned long)ring->tail,
1012 (unsigned long)ring->nr);
1013
1014 if (ring->head == ring->tail)
1015 goto out;
1016 708
1017 spin_lock(&info->ring_lock); 709 mutex_lock(&ctx->ring_lock);
1018
1019 head = ring->head % info->nr;
1020 if (head != ring->tail) {
1021 struct io_event *evp = aio_ring_event(info, head);
1022 *ent = *evp;
1023 head = (head + 1) % info->nr;
1024 smp_mb(); /* finish reading the event before updatng the head */
1025 ring->head = head;
1026 ret = 1;
1027 put_aio_ring_event(evp);
1028 }
1029 spin_unlock(&info->ring_lock);
1030 710
1031out: 711 ring = kmap_atomic(ctx->ring_pages[0]);
1032 dprintk("leaving aio_read_evt: %d h%lu t%lu\n", ret, 712 head = ring->head;
1033 (unsigned long)ring->head, (unsigned long)ring->tail);
1034 kunmap_atomic(ring); 713 kunmap_atomic(ring);
1035 return ret;
1036}
1037 714
1038struct aio_timeout { 715 pr_debug("h%u t%u m%u\n", head, ctx->tail, ctx->nr_events);
1039 struct timer_list timer;
1040 int timed_out;
1041 struct task_struct *p;
1042};
1043 716
1044static void timeout_func(unsigned long data) 717 if (head == ctx->tail)
1045{ 718 goto out;
1046 struct aio_timeout *to = (struct aio_timeout *)data;
1047 719
1048 to->timed_out = 1; 720 while (ret < nr) {
1049 wake_up_process(to->p); 721 long avail;
1050} 722 struct io_event *ev;
723 struct page *page;
1051 724
1052static inline void init_timeout(struct aio_timeout *to) 725 avail = (head <= ctx->tail ? ctx->tail : ctx->nr_events) - head;
1053{ 726 if (head == ctx->tail)
1054 setup_timer_on_stack(&to->timer, timeout_func, (unsigned long) to); 727 break;
1055 to->timed_out = 0;
1056 to->p = current;
1057}
1058 728
1059static inline void set_timeout(long start_jiffies, struct aio_timeout *to, 729 avail = min(avail, nr - ret);
1060 const struct timespec *ts) 730 avail = min_t(long, avail, AIO_EVENTS_PER_PAGE -
1061{ 731 ((head + AIO_EVENTS_OFFSET) % AIO_EVENTS_PER_PAGE));
1062 to->timer.expires = start_jiffies + timespec_to_jiffies(ts);
1063 if (time_after(to->timer.expires, jiffies))
1064 add_timer(&to->timer);
1065 else
1066 to->timed_out = 1;
1067}
1068 732
1069static inline void clear_timeout(struct aio_timeout *to) 733 pos = head + AIO_EVENTS_OFFSET;
1070{ 734 page = ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE];
1071 del_singleshot_timer_sync(&to->timer); 735 pos %= AIO_EVENTS_PER_PAGE;
1072}
1073 736
1074static int read_events(struct kioctx *ctx, 737 ev = kmap(page);
1075 long min_nr, long nr, 738 copy_ret = copy_to_user(event + ret, ev + pos,
1076 struct io_event __user *event, 739 sizeof(*ev) * avail);
1077 struct timespec __user *timeout) 740 kunmap(page);
1078{
1079 long start_jiffies = jiffies;
1080 struct task_struct *tsk = current;
1081 DECLARE_WAITQUEUE(wait, tsk);
1082 int ret;
1083 int i = 0;
1084 struct io_event ent;
1085 struct aio_timeout to;
1086 int retry = 0;
1087
1088 /* needed to zero any padding within an entry (there shouldn't be
1089 * any, but C is fun!
1090 */
1091 memset(&ent, 0, sizeof(ent));
1092retry:
1093 ret = 0;
1094 while (likely(i < nr)) {
1095 ret = aio_read_evt(ctx, &ent);
1096 if (unlikely(ret <= 0))
1097 break;
1098
1099 dprintk("read event: %Lx %Lx %Lx %Lx\n",
1100 ent.data, ent.obj, ent.res, ent.res2);
1101 741
1102 /* Could we split the check in two? */ 742 if (unlikely(copy_ret)) {
1103 ret = -EFAULT; 743 ret = -EFAULT;
1104 if (unlikely(copy_to_user(event, &ent, sizeof(ent)))) { 744 goto out;
1105 dprintk("aio: lost an event due to EFAULT.\n");
1106 break;
1107 } 745 }
1108 ret = 0;
1109 746
1110 /* Good, event copied to userland, update counts. */ 747 ret += avail;
1111 event ++; 748 head += avail;
1112 i ++; 749 head %= ctx->nr_events;
1113 } 750 }
1114 751
1115 if (min_nr <= i) 752 ring = kmap_atomic(ctx->ring_pages[0]);
1116 return i; 753 ring->head = head;
1117 if (ret) 754 kunmap_atomic(ring);
1118 return ret; 755 flush_dcache_page(ctx->ring_pages[0]);
1119 756
1120 /* End fast path */ 757 pr_debug("%li h%u t%u\n", ret, head, ctx->tail);
1121 758
1122 /* racey check, but it gets redone */ 759 atomic_sub(ret, &ctx->reqs_active);
1123 if (!retry && unlikely(!list_empty(&ctx->run_list))) { 760out:
1124 retry = 1; 761 mutex_unlock(&ctx->ring_lock);
1125 aio_run_all_iocbs(ctx);
1126 goto retry;
1127 }
1128 762
1129 init_timeout(&to); 763 return ret;
1130 if (timeout) { 764}
1131 struct timespec ts;
1132 ret = -EFAULT;
1133 if (unlikely(copy_from_user(&ts, timeout, sizeof(ts))))
1134 goto out;
1135 765
1136 set_timeout(start_jiffies, &to, &ts); 766static bool aio_read_events(struct kioctx *ctx, long min_nr, long nr,
1137 } 767 struct io_event __user *event, long *i)
768{
769 long ret = aio_read_events_ring(ctx, event + *i, nr - *i);
1138 770
1139 while (likely(i < nr)) { 771 if (ret > 0)
1140 add_wait_queue_exclusive(&ctx->wait, &wait); 772 *i += ret;
1141 do {
1142 set_task_state(tsk, TASK_INTERRUPTIBLE);
1143 ret = aio_read_evt(ctx, &ent);
1144 if (ret)
1145 break;
1146 if (min_nr <= i)
1147 break;
1148 if (unlikely(ctx->dead)) {
1149 ret = -EINVAL;
1150 break;
1151 }
1152 if (to.timed_out) /* Only check after read evt */
1153 break;
1154 /* Try to only show up in io wait if there are ops
1155 * in flight */
1156 if (ctx->reqs_active)
1157 io_schedule();
1158 else
1159 schedule();
1160 if (signal_pending(tsk)) {
1161 ret = -EINTR;
1162 break;
1163 }
1164 /*ret = aio_read_evt(ctx, &ent);*/
1165 } while (1) ;
1166
1167 set_task_state(tsk, TASK_RUNNING);
1168 remove_wait_queue(&ctx->wait, &wait);
1169
1170 if (unlikely(ret <= 0))
1171 break;
1172 773
1173 ret = -EFAULT; 774 if (unlikely(atomic_read(&ctx->dead)))
1174 if (unlikely(copy_to_user(event, &ent, sizeof(ent)))) { 775 ret = -EINVAL;
1175 dprintk("aio: lost an event due to EFAULT.\n");
1176 break;
1177 }
1178 776
1179 /* Good, event copied to userland, update counts. */ 777 if (!*i)
1180 event ++; 778 *i = ret;
1181 i ++;
1182 }
1183 779
1184 if (timeout) 780 return ret < 0 || *i >= min_nr;
1185 clear_timeout(&to);
1186out:
1187 destroy_timer_on_stack(&to.timer);
1188 return i ? i : ret;
1189} 781}
1190 782
1191/* Take an ioctx and remove it from the list of ioctx's. Protects 783static long read_events(struct kioctx *ctx, long min_nr, long nr,
1192 * against races with itself via ->dead. 784 struct io_event __user *event,
1193 */ 785 struct timespec __user *timeout)
1194static void io_destroy(struct kioctx *ioctx)
1195{ 786{
1196 struct mm_struct *mm = current->mm; 787 ktime_t until = { .tv64 = KTIME_MAX };
1197 int was_dead; 788 long ret = 0;
1198 789
1199 /* delete the entry from the list is someone else hasn't already */ 790 if (timeout) {
1200 spin_lock(&mm->ioctx_lock); 791 struct timespec ts;
1201 was_dead = ioctx->dead;
1202 ioctx->dead = 1;
1203 hlist_del_rcu(&ioctx->list);
1204 spin_unlock(&mm->ioctx_lock);
1205 792
1206 dprintk("aio_release(%p)\n", ioctx); 793 if (unlikely(copy_from_user(&ts, timeout, sizeof(ts))))
1207 if (likely(!was_dead)) 794 return -EFAULT;
1208 put_ioctx(ioctx); /* twice for the list */
1209 795
1210 kill_ctx(ioctx); 796 until = timespec_to_ktime(ts);
797 }
1211 798
1212 /* 799 /*
1213 * Wake up any waiters. The setting of ctx->dead must be seen 800 * Note that aio_read_events() is being called as the conditional - i.e.
1214 * by other CPUs at this point. Right now, we rely on the 801 * we're calling it after prepare_to_wait() has set task state to
1215 * locking done by the above calls to ensure this consistency. 802 * TASK_INTERRUPTIBLE.
803 *
804 * But aio_read_events() can block, and if it blocks it's going to flip
805 * the task state back to TASK_RUNNING.
806 *
807 * This should be ok, provided it doesn't flip the state back to
808 * TASK_RUNNING and return 0 too much - that causes us to spin. That
809 * will only happen if the mutex_lock() call blocks, and we then find
810 * the ringbuffer empty. So in practice we should be ok, but it's
811 * something to be aware of when touching this code.
1216 */ 812 */
1217 wake_up_all(&ioctx->wait); 813 wait_event_interruptible_hrtimeout(ctx->wait,
814 aio_read_events(ctx, min_nr, nr, event, &ret), until);
815
816 if (!ret && signal_pending(current))
817 ret = -EINTR;
818
819 return ret;
1218} 820}
1219 821
1220/* sys_io_setup: 822/* sys_io_setup:
@@ -1252,7 +854,7 @@ SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
1252 if (!IS_ERR(ioctx)) { 854 if (!IS_ERR(ioctx)) {
1253 ret = put_user(ioctx->user_id, ctxp); 855 ret = put_user(ioctx->user_id, ctxp);
1254 if (ret) 856 if (ret)
1255 io_destroy(ioctx); 857 kill_ioctx(ioctx);
1256 put_ioctx(ioctx); 858 put_ioctx(ioctx);
1257 } 859 }
1258 860
@@ -1270,7 +872,7 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
1270{ 872{
1271 struct kioctx *ioctx = lookup_ioctx(ctx); 873 struct kioctx *ioctx = lookup_ioctx(ctx);
1272 if (likely(NULL != ioctx)) { 874 if (likely(NULL != ioctx)) {
1273 io_destroy(ioctx); 875 kill_ioctx(ioctx);
1274 put_ioctx(ioctx); 876 put_ioctx(ioctx);
1275 return 0; 877 return 0;
1276 } 878 }
@@ -1301,30 +903,21 @@ static void aio_advance_iovec(struct kiocb *iocb, ssize_t ret)
1301 BUG_ON(ret > 0 && iocb->ki_left == 0); 903 BUG_ON(ret > 0 && iocb->ki_left == 0);
1302} 904}
1303 905
1304static ssize_t aio_rw_vect_retry(struct kiocb *iocb) 906typedef ssize_t (aio_rw_op)(struct kiocb *, const struct iovec *,
907 unsigned long, loff_t);
908
909static ssize_t aio_rw_vect_retry(struct kiocb *iocb, int rw, aio_rw_op *rw_op)
1305{ 910{
1306 struct file *file = iocb->ki_filp; 911 struct file *file = iocb->ki_filp;
1307 struct address_space *mapping = file->f_mapping; 912 struct address_space *mapping = file->f_mapping;
1308 struct inode *inode = mapping->host; 913 struct inode *inode = mapping->host;
1309 ssize_t (*rw_op)(struct kiocb *, const struct iovec *,
1310 unsigned long, loff_t);
1311 ssize_t ret = 0; 914 ssize_t ret = 0;
1312 unsigned short opcode;
1313
1314 if ((iocb->ki_opcode == IOCB_CMD_PREADV) ||
1315 (iocb->ki_opcode == IOCB_CMD_PREAD)) {
1316 rw_op = file->f_op->aio_read;
1317 opcode = IOCB_CMD_PREADV;
1318 } else {
1319 rw_op = file->f_op->aio_write;
1320 opcode = IOCB_CMD_PWRITEV;
1321 }
1322 915
1323 /* This matches the pread()/pwrite() logic */ 916 /* This matches the pread()/pwrite() logic */
1324 if (iocb->ki_pos < 0) 917 if (iocb->ki_pos < 0)
1325 return -EINVAL; 918 return -EINVAL;
1326 919
1327 if (opcode == IOCB_CMD_PWRITEV) 920 if (rw == WRITE)
1328 file_start_write(file); 921 file_start_write(file);
1329 do { 922 do {
1330 ret = rw_op(iocb, &iocb->ki_iovec[iocb->ki_cur_seg], 923 ret = rw_op(iocb, &iocb->ki_iovec[iocb->ki_cur_seg],
@@ -1336,9 +929,9 @@ static ssize_t aio_rw_vect_retry(struct kiocb *iocb)
1336 /* retry all partial writes. retry partial reads as long as its a 929 /* retry all partial writes. retry partial reads as long as its a
1337 * regular file. */ 930 * regular file. */
1338 } while (ret > 0 && iocb->ki_left > 0 && 931 } while (ret > 0 && iocb->ki_left > 0 &&
1339 (opcode == IOCB_CMD_PWRITEV || 932 (rw == WRITE ||
1340 (!S_ISFIFO(inode->i_mode) && !S_ISSOCK(inode->i_mode)))); 933 (!S_ISFIFO(inode->i_mode) && !S_ISSOCK(inode->i_mode))));
1341 if (opcode == IOCB_CMD_PWRITEV) 934 if (rw == WRITE)
1342 file_end_write(file); 935 file_end_write(file);
1343 936
1344 /* This means we must have transferred all that we could */ 937 /* This means we must have transferred all that we could */
@@ -1348,81 +941,49 @@ static ssize_t aio_rw_vect_retry(struct kiocb *iocb)
1348 941
1349 /* If we managed to write some out we return that, rather than 942 /* If we managed to write some out we return that, rather than
1350 * the eventual error. */ 943 * the eventual error. */
1351 if (opcode == IOCB_CMD_PWRITEV 944 if (rw == WRITE
1352 && ret < 0 && ret != -EIOCBQUEUED && ret != -EIOCBRETRY 945 && ret < 0 && ret != -EIOCBQUEUED
1353 && iocb->ki_nbytes - iocb->ki_left) 946 && iocb->ki_nbytes - iocb->ki_left)
1354 ret = iocb->ki_nbytes - iocb->ki_left; 947 ret = iocb->ki_nbytes - iocb->ki_left;
1355 948
1356 return ret; 949 return ret;
1357} 950}
1358 951
1359static ssize_t aio_fdsync(struct kiocb *iocb) 952static ssize_t aio_setup_vectored_rw(int rw, struct kiocb *kiocb, bool compat)
1360{
1361 struct file *file = iocb->ki_filp;
1362 ssize_t ret = -EINVAL;
1363
1364 if (file->f_op->aio_fsync)
1365 ret = file->f_op->aio_fsync(iocb, 1);
1366 return ret;
1367}
1368
1369static ssize_t aio_fsync(struct kiocb *iocb)
1370{
1371 struct file *file = iocb->ki_filp;
1372 ssize_t ret = -EINVAL;
1373
1374 if (file->f_op->aio_fsync)
1375 ret = file->f_op->aio_fsync(iocb, 0);
1376 return ret;
1377}
1378
1379static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
1380{ 953{
1381 ssize_t ret; 954 ssize_t ret;
1382 955
956 kiocb->ki_nr_segs = kiocb->ki_nbytes;
957
1383#ifdef CONFIG_COMPAT 958#ifdef CONFIG_COMPAT
1384 if (compat) 959 if (compat)
1385 ret = compat_rw_copy_check_uvector(type, 960 ret = compat_rw_copy_check_uvector(rw,
1386 (struct compat_iovec __user *)kiocb->ki_buf, 961 (struct compat_iovec __user *)kiocb->ki_buf,
1387 kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec, 962 kiocb->ki_nr_segs, 1, &kiocb->ki_inline_vec,
1388 &kiocb->ki_iovec); 963 &kiocb->ki_iovec);
1389 else 964 else
1390#endif 965#endif
1391 ret = rw_copy_check_uvector(type, 966 ret = rw_copy_check_uvector(rw,
1392 (struct iovec __user *)kiocb->ki_buf, 967 (struct iovec __user *)kiocb->ki_buf,
1393 kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec, 968 kiocb->ki_nr_segs, 1, &kiocb->ki_inline_vec,
1394 &kiocb->ki_iovec); 969 &kiocb->ki_iovec);
1395 if (ret < 0) 970 if (ret < 0)
1396 goto out; 971 return ret;
1397
1398 ret = rw_verify_area(type, kiocb->ki_filp, &kiocb->ki_pos, ret);
1399 if (ret < 0)
1400 goto out;
1401 972
1402 kiocb->ki_nr_segs = kiocb->ki_nbytes; 973 /* ki_nbytes now reflect bytes instead of segs */
1403 kiocb->ki_cur_seg = 0;
1404 /* ki_nbytes/left now reflect bytes instead of segs */
1405 kiocb->ki_nbytes = ret; 974 kiocb->ki_nbytes = ret;
1406 kiocb->ki_left = ret; 975 return 0;
1407
1408 ret = 0;
1409out:
1410 return ret;
1411} 976}
1412 977
1413static ssize_t aio_setup_single_vector(int type, struct file * file, struct kiocb *kiocb) 978static ssize_t aio_setup_single_vector(int rw, struct kiocb *kiocb)
1414{ 979{
1415 int bytes; 980 if (unlikely(!access_ok(!rw, kiocb->ki_buf, kiocb->ki_nbytes)))
1416 981 return -EFAULT;
1417 bytes = rw_verify_area(type, file, &kiocb->ki_pos, kiocb->ki_left);
1418 if (bytes < 0)
1419 return bytes;
1420 982
1421 kiocb->ki_iovec = &kiocb->ki_inline_vec; 983 kiocb->ki_iovec = &kiocb->ki_inline_vec;
1422 kiocb->ki_iovec->iov_base = kiocb->ki_buf; 984 kiocb->ki_iovec->iov_base = kiocb->ki_buf;
1423 kiocb->ki_iovec->iov_len = bytes; 985 kiocb->ki_iovec->iov_len = kiocb->ki_nbytes;
1424 kiocb->ki_nr_segs = 1; 986 kiocb->ki_nr_segs = 1;
1425 kiocb->ki_cur_seg = 0;
1426 return 0; 987 return 0;
1427} 988}
1428 989
@@ -1431,96 +992,95 @@ static ssize_t aio_setup_single_vector(int type, struct file * file, struct kioc
1431 * Performs the initial checks and aio retry method 992 * Performs the initial checks and aio retry method
1432 * setup for the kiocb at the time of io submission. 993 * setup for the kiocb at the time of io submission.
1433 */ 994 */
1434static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat) 995static ssize_t aio_run_iocb(struct kiocb *req, bool compat)
1435{ 996{
1436 struct file *file = kiocb->ki_filp; 997 struct file *file = req->ki_filp;
1437 ssize_t ret = 0; 998 ssize_t ret;
999 int rw;
1000 fmode_t mode;
1001 aio_rw_op *rw_op;
1438 1002
1439 switch (kiocb->ki_opcode) { 1003 switch (req->ki_opcode) {
1440 case IOCB_CMD_PREAD: 1004 case IOCB_CMD_PREAD:
1441 ret = -EBADF;
1442 if (unlikely(!(file->f_mode & FMODE_READ)))
1443 break;
1444 ret = -EFAULT;
1445 if (unlikely(!access_ok(VERIFY_WRITE, kiocb->ki_buf,
1446 kiocb->ki_left)))
1447 break;
1448 ret = aio_setup_single_vector(READ, file, kiocb);
1449 if (ret)
1450 break;
1451 ret = -EINVAL;
1452 if (file->f_op->aio_read)
1453 kiocb->ki_retry = aio_rw_vect_retry;
1454 break;
1455 case IOCB_CMD_PWRITE:
1456 ret = -EBADF;
1457 if (unlikely(!(file->f_mode & FMODE_WRITE)))
1458 break;
1459 ret = -EFAULT;
1460 if (unlikely(!access_ok(VERIFY_READ, kiocb->ki_buf,
1461 kiocb->ki_left)))
1462 break;
1463 ret = aio_setup_single_vector(WRITE, file, kiocb);
1464 if (ret)
1465 break;
1466 ret = -EINVAL;
1467 if (file->f_op->aio_write)
1468 kiocb->ki_retry = aio_rw_vect_retry;
1469 break;
1470 case IOCB_CMD_PREADV: 1005 case IOCB_CMD_PREADV:
1471 ret = -EBADF; 1006 mode = FMODE_READ;
1472 if (unlikely(!(file->f_mode & FMODE_READ))) 1007 rw = READ;
1473 break; 1008 rw_op = file->f_op->aio_read;
1474 ret = aio_setup_vectored_rw(READ, kiocb, compat); 1009 goto rw_common;
1475 if (ret) 1010
1476 break; 1011 case IOCB_CMD_PWRITE:
1477 ret = -EINVAL;
1478 if (file->f_op->aio_read)
1479 kiocb->ki_retry = aio_rw_vect_retry;
1480 break;
1481 case IOCB_CMD_PWRITEV: 1012 case IOCB_CMD_PWRITEV:
1482 ret = -EBADF; 1013 mode = FMODE_WRITE;
1483 if (unlikely(!(file->f_mode & FMODE_WRITE))) 1014 rw = WRITE;
1484 break; 1015 rw_op = file->f_op->aio_write;
1485 ret = aio_setup_vectored_rw(WRITE, kiocb, compat); 1016 goto rw_common;
1017rw_common:
1018 if (unlikely(!(file->f_mode & mode)))
1019 return -EBADF;
1020
1021 if (!rw_op)
1022 return -EINVAL;
1023
1024 ret = (req->ki_opcode == IOCB_CMD_PREADV ||
1025 req->ki_opcode == IOCB_CMD_PWRITEV)
1026 ? aio_setup_vectored_rw(rw, req, compat)
1027 : aio_setup_single_vector(rw, req);
1486 if (ret) 1028 if (ret)
1487 break; 1029 return ret;
1488 ret = -EINVAL; 1030
1489 if (file->f_op->aio_write) 1031 ret = rw_verify_area(rw, file, &req->ki_pos, req->ki_nbytes);
1490 kiocb->ki_retry = aio_rw_vect_retry; 1032 if (ret < 0)
1033 return ret;
1034
1035 req->ki_nbytes = ret;
1036 req->ki_left = ret;
1037
1038 ret = aio_rw_vect_retry(req, rw, rw_op);
1491 break; 1039 break;
1040
1492 case IOCB_CMD_FDSYNC: 1041 case IOCB_CMD_FDSYNC:
1493 ret = -EINVAL; 1042 if (!file->f_op->aio_fsync)
1494 if (file->f_op->aio_fsync) 1043 return -EINVAL;
1495 kiocb->ki_retry = aio_fdsync; 1044
1045 ret = file->f_op->aio_fsync(req, 1);
1496 break; 1046 break;
1047
1497 case IOCB_CMD_FSYNC: 1048 case IOCB_CMD_FSYNC:
1498 ret = -EINVAL; 1049 if (!file->f_op->aio_fsync)
1499 if (file->f_op->aio_fsync) 1050 return -EINVAL;
1500 kiocb->ki_retry = aio_fsync; 1051
1052 ret = file->f_op->aio_fsync(req, 0);
1501 break; 1053 break;
1054
1502 default: 1055 default:
1503 dprintk("EINVAL: io_submit: no operation provided\n"); 1056 pr_debug("EINVAL: no operation provided\n");
1504 ret = -EINVAL; 1057 return -EINVAL;
1505 } 1058 }
1506 1059
1507 if (!kiocb->ki_retry) 1060 if (ret != -EIOCBQUEUED) {
1508 return ret; 1061 /*
1062 * There's no easy way to restart the syscall since other AIO's
1063 * may be already running. Just fail this IO with EINTR.
1064 */
1065 if (unlikely(ret == -ERESTARTSYS || ret == -ERESTARTNOINTR ||
1066 ret == -ERESTARTNOHAND ||
1067 ret == -ERESTART_RESTARTBLOCK))
1068 ret = -EINTR;
1069 aio_complete(req, ret, 0);
1070 }
1509 1071
1510 return 0; 1072 return 0;
1511} 1073}
1512 1074
1513static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, 1075static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1514 struct iocb *iocb, struct kiocb_batch *batch, 1076 struct iocb *iocb, bool compat)
1515 bool compat)
1516{ 1077{
1517 struct kiocb *req; 1078 struct kiocb *req;
1518 struct file *file;
1519 ssize_t ret; 1079 ssize_t ret;
1520 1080
1521 /* enforce forwards compatibility on users */ 1081 /* enforce forwards compatibility on users */
1522 if (unlikely(iocb->aio_reserved1 || iocb->aio_reserved2)) { 1082 if (unlikely(iocb->aio_reserved1 || iocb->aio_reserved2)) {
1523 pr_debug("EINVAL: io_submit: reserve field set\n"); 1083 pr_debug("EINVAL: reserve field set\n");
1524 return -EINVAL; 1084 return -EINVAL;
1525 } 1085 }
1526 1086
@@ -1534,16 +1094,16 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1534 return -EINVAL; 1094 return -EINVAL;
1535 } 1095 }
1536 1096
1537 file = fget(iocb->aio_fildes); 1097 req = aio_get_req(ctx);
1538 if (unlikely(!file)) 1098 if (unlikely(!req))
1539 return -EBADF;
1540
1541 req = aio_get_req(ctx, batch); /* returns with 2 references to req */
1542 if (unlikely(!req)) {
1543 fput(file);
1544 return -EAGAIN; 1099 return -EAGAIN;
1100
1101 req->ki_filp = fget(iocb->aio_fildes);
1102 if (unlikely(!req->ki_filp)) {
1103 ret = -EBADF;
1104 goto out_put_req;
1545 } 1105 }
1546 req->ki_filp = file; 1106
1547 if (iocb->aio_flags & IOCB_FLAG_RESFD) { 1107 if (iocb->aio_flags & IOCB_FLAG_RESFD) {
1548 /* 1108 /*
1549 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an 1109 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an
@@ -1559,9 +1119,9 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1559 } 1119 }
1560 } 1120 }
1561 1121
1562 ret = put_user(req->ki_key, &user_iocb->aio_key); 1122 ret = put_user(KIOCB_KEY, &user_iocb->aio_key);
1563 if (unlikely(ret)) { 1123 if (unlikely(ret)) {
1564 dprintk("EFAULT: aio_key\n"); 1124 pr_debug("EFAULT: aio_key\n");
1565 goto out_put_req; 1125 goto out_put_req;
1566 } 1126 }
1567 1127
@@ -1573,41 +1133,14 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1573 req->ki_left = req->ki_nbytes = iocb->aio_nbytes; 1133 req->ki_left = req->ki_nbytes = iocb->aio_nbytes;
1574 req->ki_opcode = iocb->aio_lio_opcode; 1134 req->ki_opcode = iocb->aio_lio_opcode;
1575 1135
1576 ret = aio_setup_iocb(req, compat); 1136 ret = aio_run_iocb(req, compat);
1577
1578 if (ret) 1137 if (ret)
1579 goto out_put_req; 1138 goto out_put_req;
1580 1139
1581 spin_lock_irq(&ctx->ctx_lock);
1582 /*
1583 * We could have raced with io_destroy() and are currently holding a
1584 * reference to ctx which should be destroyed. We cannot submit IO
1585 * since ctx gets freed as soon as io_submit() puts its reference. The
1586 * check here is reliable: io_destroy() sets ctx->dead before waiting
1587 * for outstanding IO and the barrier between these two is realized by
1588 * unlock of mm->ioctx_lock and lock of ctx->ctx_lock. Analogously we
1589 * increment ctx->reqs_active before checking for ctx->dead and the
1590 * barrier is realized by unlock and lock of ctx->ctx_lock. Thus if we
1591 * don't see ctx->dead set here, io_destroy() waits for our IO to
1592 * finish.
1593 */
1594 if (ctx->dead) {
1595 spin_unlock_irq(&ctx->ctx_lock);
1596 ret = -EINVAL;
1597 goto out_put_req;
1598 }
1599 aio_run_iocb(req);
1600 if (!list_empty(&ctx->run_list)) {
1601 /* drain the run list */
1602 while (__aio_run_iocbs(ctx))
1603 ;
1604 }
1605 spin_unlock_irq(&ctx->ctx_lock);
1606
1607 aio_put_req(req); /* drop extra ref to req */ 1140 aio_put_req(req); /* drop extra ref to req */
1608 return 0; 1141 return 0;
1609
1610out_put_req: 1142out_put_req:
1143 atomic_dec(&ctx->reqs_active);
1611 aio_put_req(req); /* drop extra ref to req */ 1144 aio_put_req(req); /* drop extra ref to req */
1612 aio_put_req(req); /* drop i/o ref to req */ 1145 aio_put_req(req); /* drop i/o ref to req */
1613 return ret; 1146 return ret;
@@ -1620,7 +1153,6 @@ long do_io_submit(aio_context_t ctx_id, long nr,
1620 long ret = 0; 1153 long ret = 0;
1621 int i = 0; 1154 int i = 0;
1622 struct blk_plug plug; 1155 struct blk_plug plug;
1623 struct kiocb_batch batch;
1624 1156
1625 if (unlikely(nr < 0)) 1157 if (unlikely(nr < 0))
1626 return -EINVAL; 1158 return -EINVAL;
@@ -1633,12 +1165,10 @@ long do_io_submit(aio_context_t ctx_id, long nr,
1633 1165
1634 ctx = lookup_ioctx(ctx_id); 1166 ctx = lookup_ioctx(ctx_id);
1635 if (unlikely(!ctx)) { 1167 if (unlikely(!ctx)) {
1636 pr_debug("EINVAL: io_submit: invalid context id\n"); 1168 pr_debug("EINVAL: invalid context id\n");
1637 return -EINVAL; 1169 return -EINVAL;
1638 } 1170 }
1639 1171
1640 kiocb_batch_init(&batch, nr);
1641
1642 blk_start_plug(&plug); 1172 blk_start_plug(&plug);
1643 1173
1644 /* 1174 /*
@@ -1659,13 +1189,12 @@ long do_io_submit(aio_context_t ctx_id, long nr,
1659 break; 1189 break;
1660 } 1190 }
1661 1191
1662 ret = io_submit_one(ctx, user_iocb, &tmp, &batch, compat); 1192 ret = io_submit_one(ctx, user_iocb, &tmp, compat);
1663 if (ret) 1193 if (ret)
1664 break; 1194 break;
1665 } 1195 }
1666 blk_finish_plug(&plug); 1196 blk_finish_plug(&plug);
1667 1197
1668 kiocb_batch_free(ctx, &batch);
1669 put_ioctx(ctx); 1198 put_ioctx(ctx);
1670 return i ? i : ret; 1199 return i ? i : ret;
1671} 1200}
@@ -1698,10 +1227,13 @@ static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb,
1698 1227
1699 assert_spin_locked(&ctx->ctx_lock); 1228 assert_spin_locked(&ctx->ctx_lock);
1700 1229
1230 if (key != KIOCB_KEY)
1231 return NULL;
1232
1701 /* TODO: use a hash or array, this sucks. */ 1233 /* TODO: use a hash or array, this sucks. */
1702 list_for_each(pos, &ctx->active_reqs) { 1234 list_for_each(pos, &ctx->active_reqs) {
1703 struct kiocb *kiocb = list_kiocb(pos); 1235 struct kiocb *kiocb = list_kiocb(pos);
1704 if (kiocb->ki_obj.user == iocb && kiocb->ki_key == key) 1236 if (kiocb->ki_obj.user == iocb)
1705 return kiocb; 1237 return kiocb;
1706 } 1238 }
1707 return NULL; 1239 return NULL;
@@ -1720,7 +1252,7 @@ static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb,
1720SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, 1252SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
1721 struct io_event __user *, result) 1253 struct io_event __user *, result)
1722{ 1254{
1723 int (*cancel)(struct kiocb *iocb, struct io_event *res); 1255 struct io_event res;
1724 struct kioctx *ctx; 1256 struct kioctx *ctx;
1725 struct kiocb *kiocb; 1257 struct kiocb *kiocb;
1726 u32 key; 1258 u32 key;
@@ -1735,32 +1267,22 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
1735 return -EINVAL; 1267 return -EINVAL;
1736 1268
1737 spin_lock_irq(&ctx->ctx_lock); 1269 spin_lock_irq(&ctx->ctx_lock);
1738 ret = -EAGAIN; 1270
1739 kiocb = lookup_kiocb(ctx, iocb, key); 1271 kiocb = lookup_kiocb(ctx, iocb, key);
1740 if (kiocb && kiocb->ki_cancel) { 1272 if (kiocb)
1741 cancel = kiocb->ki_cancel; 1273 ret = kiocb_cancel(ctx, kiocb, &res);
1742 kiocb->ki_users ++; 1274 else
1743 kiocbSetCancelled(kiocb); 1275 ret = -EINVAL;
1744 } else 1276
1745 cancel = NULL;
1746 spin_unlock_irq(&ctx->ctx_lock); 1277 spin_unlock_irq(&ctx->ctx_lock);
1747 1278
1748 if (NULL != cancel) { 1279 if (!ret) {
1749 struct io_event tmp; 1280 /* Cancellation succeeded -- copy the result
1750 pr_debug("calling cancel\n"); 1281 * into the user's buffer.
1751 memset(&tmp, 0, sizeof(tmp)); 1282 */
1752 tmp.obj = (u64)(unsigned long)kiocb->ki_obj.user; 1283 if (copy_to_user(result, &res, sizeof(res)))
1753 tmp.data = kiocb->ki_user_data; 1284 ret = -EFAULT;
1754 ret = cancel(kiocb, &tmp); 1285 }
1755 if (!ret) {
1756 /* Cancellation succeeded -- copy the result
1757 * into the user's buffer.
1758 */
1759 if (copy_to_user(result, &tmp, sizeof(tmp)))
1760 ret = -EFAULT;
1761 }
1762 } else
1763 ret = -EINVAL;
1764 1286
1765 put_ioctx(ctx); 1287 put_ioctx(ctx);
1766 1288
diff --git a/fs/bio.c b/fs/bio.c
index b96fc6ce4855..954d73124b41 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -19,6 +19,7 @@
19#include <linux/swap.h> 19#include <linux/swap.h>
20#include <linux/bio.h> 20#include <linux/bio.h>
21#include <linux/blkdev.h> 21#include <linux/blkdev.h>
22#include <linux/uio.h>
22#include <linux/iocontext.h> 23#include <linux/iocontext.h>
23#include <linux/slab.h> 24#include <linux/slab.h>
24#include <linux/init.h> 25#include <linux/init.h>
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 3823d3ffb760..d9871c1f0894 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -27,6 +27,7 @@
27#include <linux/namei.h> 27#include <linux/namei.h>
28#include <linux/log2.h> 28#include <linux/log2.h>
29#include <linux/cleancache.h> 29#include <linux/cleancache.h>
30#include <linux/aio.h>
30#include <asm/uaccess.h> 31#include <asm/uaccess.h>
31#include "internal.h" 32#include "internal.h"
32 33
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index bb8b7a0e28a6..bc4d54c465a0 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -24,6 +24,7 @@
24#include <linux/string.h> 24#include <linux/string.h>
25#include <linux/backing-dev.h> 25#include <linux/backing-dev.h>
26#include <linux/mpage.h> 26#include <linux/mpage.h>
27#include <linux/aio.h>
27#include <linux/falloc.h> 28#include <linux/falloc.h>
28#include <linux/swap.h> 29#include <linux/swap.h>
29#include <linux/writeback.h> 30#include <linux/writeback.h>
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 09c58a35b429..898da0a01e04 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -32,6 +32,7 @@
32#include <linux/writeback.h> 32#include <linux/writeback.h>
33#include <linux/statfs.h> 33#include <linux/statfs.h>
34#include <linux/compat.h> 34#include <linux/compat.h>
35#include <linux/aio.h>
35#include <linux/bit_spinlock.h> 36#include <linux/bit_spinlock.h>
36#include <linux/xattr.h> 37#include <linux/xattr.h>
37#include <linux/posix_acl.h> 38#include <linux/posix_acl.h>
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index d70830c66833..656e16907430 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -7,6 +7,7 @@
7#include <linux/mount.h> 7#include <linux/mount.h>
8#include <linux/namei.h> 8#include <linux/namei.h>
9#include <linux/writeback.h> 9#include <linux/writeback.h>
10#include <linux/aio.h>
10 11
11#include "super.h" 12#include "super.h"
12#include "mds_client.h" 13#include "mds_client.h"
diff --git a/fs/compat.c b/fs/compat.c
index 93f7d021b716..fc3b55dce184 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -47,6 +47,7 @@
47#include <linux/fs_struct.h> 47#include <linux/fs_struct.h>
48#include <linux/slab.h> 48#include <linux/slab.h>
49#include <linux/pagemap.h> 49#include <linux/pagemap.h>
50#include <linux/aio.h>
50 51
51#include <asm/uaccess.h> 52#include <asm/uaccess.h>
52#include <asm/mmu_context.h> 53#include <asm/mmu_context.h>
diff --git a/fs/direct-io.c b/fs/direct-io.c
index cfb816dc6d9f..51d16e067d68 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -37,6 +37,7 @@
37#include <linux/uio.h> 37#include <linux/uio.h>
38#include <linux/atomic.h> 38#include <linux/atomic.h>
39#include <linux/prefetch.h> 39#include <linux/prefetch.h>
40#include <linux/aio.h>
40 41
41/* 42/*
42 * How many user pages to map in one call to get_user_pages(). This determines 43 * How many user pages to map in one call to get_user_pages(). This determines
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index 63b1f54b6a1f..201f0a0d6b0a 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -31,6 +31,7 @@
31#include <linux/security.h> 31#include <linux/security.h>
32#include <linux/compat.h> 32#include <linux/compat.h>
33#include <linux/fs_stack.h> 33#include <linux/fs_stack.h>
34#include <linux/aio.h>
34#include "ecryptfs_kernel.h" 35#include "ecryptfs_kernel.h"
35 36
36/** 37/**
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index fe60cc1117d8..0a87bb10998d 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -31,6 +31,7 @@
31#include <linux/mpage.h> 31#include <linux/mpage.h>
32#include <linux/fiemap.h> 32#include <linux/fiemap.h>
33#include <linux/namei.h> 33#include <linux/namei.h>
34#include <linux/aio.h>
34#include "ext2.h" 35#include "ext2.h"
35#include "acl.h" 36#include "acl.h"
36#include "xip.h" 37#include "xip.h"
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index d706dbfa6220..23c712825640 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -27,6 +27,7 @@
27#include <linux/writeback.h> 27#include <linux/writeback.h>
28#include <linux/mpage.h> 28#include <linux/mpage.h>
29#include <linux/namei.h> 29#include <linux/namei.h>
30#include <linux/aio.h>
30#include "ext3.h" 31#include "ext3.h"
31#include "xattr.h" 32#include "xattr.h"
32#include "acl.h" 33#include "acl.h"
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 64848b595b24..4959e29573b6 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -23,6 +23,7 @@
23#include <linux/jbd2.h> 23#include <linux/jbd2.h>
24#include <linux/mount.h> 24#include <linux/mount.h>
25#include <linux/path.h> 25#include <linux/path.h>
26#include <linux/aio.h>
26#include <linux/quotaops.h> 27#include <linux/quotaops.h>
27#include <linux/pagevec.h> 28#include <linux/pagevec.h>
28#include "ext4.h" 29#include "ext4.h"
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
index 98be6f697463..b8d5d351e24f 100644
--- a/fs/ext4/indirect.c
+++ b/fs/ext4/indirect.c
@@ -20,6 +20,7 @@
20 * (sct@redhat.com), 1993, 1998 20 * (sct@redhat.com), 1993, 1998
21 */ 21 */
22 22
23#include <linux/aio.h>
23#include "ext4_jbd2.h" 24#include "ext4_jbd2.h"
24#include "truncate.h" 25#include "truncate.h"
25#include "ext4_extents.h" /* Needed for EXT_MAX_BLOCKS */ 26#include "ext4_extents.h" /* Needed for EXT_MAX_BLOCKS */
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 793d44b84d7f..0723774bdfb5 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -37,6 +37,7 @@
37#include <linux/printk.h> 37#include <linux/printk.h>
38#include <linux/slab.h> 38#include <linux/slab.h>
39#include <linux/ratelimit.h> 39#include <linux/ratelimit.h>
40#include <linux/aio.h>
40 41
41#include "ext4_jbd2.h" 42#include "ext4_jbd2.h"
42#include "xattr.h" 43#include "xattr.h"
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 5929cd0baa20..19599bded62a 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -18,6 +18,7 @@
18#include <linux/pagevec.h> 18#include <linux/pagevec.h>
19#include <linux/mpage.h> 19#include <linux/mpage.h>
20#include <linux/namei.h> 20#include <linux/namei.h>
21#include <linux/aio.h>
21#include <linux/uio.h> 22#include <linux/uio.h>
22#include <linux/bio.h> 23#include <linux/bio.h>
23#include <linux/workqueue.h> 24#include <linux/workqueue.h>
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 7bd22a201125..d0ed4ba4b61b 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -12,6 +12,7 @@
12#include <linux/f2fs_fs.h> 12#include <linux/f2fs_fs.h>
13#include <linux/buffer_head.h> 13#include <linux/buffer_head.h>
14#include <linux/mpage.h> 14#include <linux/mpage.h>
15#include <linux/aio.h>
15#include <linux/writeback.h> 16#include <linux/writeback.h>
16#include <linux/backing-dev.h> 17#include <linux/backing-dev.h>
17#include <linux/blkdev.h> 18#include <linux/blkdev.h>
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 4ff901632b26..dfce656ddb33 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -19,6 +19,7 @@
19#include <linux/mpage.h> 19#include <linux/mpage.h>
20#include <linux/buffer_head.h> 20#include <linux/buffer_head.h>
21#include <linux/mount.h> 21#include <linux/mount.h>
22#include <linux/aio.h>
22#include <linux/vfs.h> 23#include <linux/vfs.h>
23#include <linux/parser.h> 24#include <linux/parser.h>
24#include <linux/uio.h> 25#include <linux/uio.h>
diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
index b3aaf7b3578b..aef34b1e635e 100644
--- a/fs/fuse/cuse.c
+++ b/fs/fuse/cuse.c
@@ -38,6 +38,7 @@
38#include <linux/device.h> 38#include <linux/device.h>
39#include <linux/file.h> 39#include <linux/file.h>
40#include <linux/fs.h> 40#include <linux/fs.h>
41#include <linux/aio.h>
41#include <linux/kdev_t.h> 42#include <linux/kdev_t.h>
42#include <linux/kthread.h> 43#include <linux/kthread.h>
43#include <linux/list.h> 44#include <linux/list.h>
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index a6c1664e330b..1d55f9465400 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -19,6 +19,7 @@
19#include <linux/pipe_fs_i.h> 19#include <linux/pipe_fs_i.h>
20#include <linux/swap.h> 20#include <linux/swap.h>
21#include <linux/splice.h> 21#include <linux/splice.h>
22#include <linux/aio.h>
22 23
23MODULE_ALIAS_MISCDEV(FUSE_MINOR); 24MODULE_ALIAS_MISCDEV(FUSE_MINOR);
24MODULE_ALIAS("devname:fuse"); 25MODULE_ALIAS("devname:fuse");
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 4655e59d545b..d1c9b85b3f58 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -15,6 +15,7 @@
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/compat.h> 16#include <linux/compat.h>
17#include <linux/swap.h> 17#include <linux/swap.h>
18#include <linux/aio.h>
18 19
19static const struct file_operations fuse_direct_io_file_operations; 20static const struct file_operations fuse_direct_io_file_operations;
20 21
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 9883694f1e7c..0bad69ed6336 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -20,6 +20,7 @@
20#include <linux/swap.h> 20#include <linux/swap.h>
21#include <linux/gfs2_ondisk.h> 21#include <linux/gfs2_ondisk.h>
22#include <linux/backing-dev.h> 22#include <linux/backing-dev.h>
23#include <linux/aio.h>
23 24
24#include "gfs2.h" 25#include "gfs2.h"
25#include "incore.h" 26#include "incore.h"
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index d79c2dadc536..acd16764b133 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -25,6 +25,7 @@
25#include <asm/uaccess.h> 25#include <asm/uaccess.h>
26#include <linux/dlm.h> 26#include <linux/dlm.h>
27#include <linux/dlm_plock.h> 27#include <linux/dlm_plock.h>
28#include <linux/aio.h>
28 29
29#include "gfs2.h" 30#include "gfs2.h"
30#include "incore.h" 31#include "incore.h"
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index 716e1aafb2e2..f9299d8a64e3 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -14,6 +14,7 @@
14#include <linux/pagemap.h> 14#include <linux/pagemap.h>
15#include <linux/mpage.h> 15#include <linux/mpage.h>
16#include <linux/sched.h> 16#include <linux/sched.h>
17#include <linux/aio.h>
17 18
18#include "hfs_fs.h" 19#include "hfs_fs.h"
19#include "btree.h" 20#include "btree.h"
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index 7faaa964968e..f833d35630ab 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -14,6 +14,7 @@
14#include <linux/pagemap.h> 14#include <linux/pagemap.h>
15#include <linux/mpage.h> 15#include <linux/mpage.h>
16#include <linux/sched.h> 16#include <linux/sched.h>
17#include <linux/aio.h>
17 18
18#include "hfsplus_fs.h" 19#include "hfsplus_fs.h"
19#include "hfsplus_raw.h" 20#include "hfsplus_raw.h"
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 523464e62849..a3f868ae3fd4 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -909,11 +909,8 @@ static int can_do_hugetlb_shm(void)
909 909
910static int get_hstate_idx(int page_size_log) 910static int get_hstate_idx(int page_size_log)
911{ 911{
912 struct hstate *h; 912 struct hstate *h = hstate_sizelog(page_size_log);
913 913
914 if (!page_size_log)
915 return default_hstate_idx;
916 h = size_to_hstate(1 << page_size_log);
917 if (!h) 914 if (!h)
918 return -1; 915 return -1;
919 return h - hstates; 916 return h - hstates;
@@ -929,9 +926,12 @@ static struct dentry_operations anon_ops = {
929 .d_dname = hugetlb_dname 926 .d_dname = hugetlb_dname
930}; 927};
931 928
932struct file *hugetlb_file_setup(const char *name, unsigned long addr, 929/*
933 size_t size, vm_flags_t acctflag, 930 * Note that size should be aligned to proper hugepage size in caller side,
934 struct user_struct **user, 931 * otherwise hugetlb_reserve_pages reserves one less hugepages than intended.
932 */
933struct file *hugetlb_file_setup(const char *name, size_t size,
934 vm_flags_t acctflag, struct user_struct **user,
935 int creat_flags, int page_size_log) 935 int creat_flags, int page_size_log)
936{ 936{
937 struct file *file = ERR_PTR(-ENOMEM); 937 struct file *file = ERR_PTR(-ENOMEM);
@@ -939,8 +939,6 @@ struct file *hugetlb_file_setup(const char *name, unsigned long addr,
939 struct path path; 939 struct path path;
940 struct super_block *sb; 940 struct super_block *sb;
941 struct qstr quick_string; 941 struct qstr quick_string;
942 struct hstate *hstate;
943 unsigned long num_pages;
944 int hstate_idx; 942 int hstate_idx;
945 943
946 hstate_idx = get_hstate_idx(page_size_log); 944 hstate_idx = get_hstate_idx(page_size_log);
@@ -980,12 +978,10 @@ struct file *hugetlb_file_setup(const char *name, unsigned long addr,
980 if (!inode) 978 if (!inode)
981 goto out_dentry; 979 goto out_dentry;
982 980
983 hstate = hstate_inode(inode);
984 size += addr & ~huge_page_mask(hstate);
985 num_pages = ALIGN(size, huge_page_size(hstate)) >>
986 huge_page_shift(hstate);
987 file = ERR_PTR(-ENOMEM); 981 file = ERR_PTR(-ENOMEM);
988 if (hugetlb_reserve_pages(inode, 0, num_pages, NULL, acctflag)) 982 if (hugetlb_reserve_pages(inode, 0,
983 size >> huge_page_shift(hstate_inode(inode)), NULL,
984 acctflag))
989 goto out_inode; 985 goto out_inode;
990 986
991 d_instantiate(path.dentry, inode); 987 d_instantiate(path.dentry, inode);
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index 77554b61d124..730f24e282a6 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -23,6 +23,7 @@
23#include <linux/pagemap.h> 23#include <linux/pagemap.h>
24#include <linux/quotaops.h> 24#include <linux/quotaops.h>
25#include <linux/writeback.h> 25#include <linux/writeback.h>
26#include <linux/aio.h>
26#include "jfs_incore.h" 27#include "jfs_incore.h"
27#include "jfs_inode.h" 28#include "jfs_inode.h"
28#include "jfs_filsys.h" 29#include "jfs_filsys.h"
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index cf02f5530713..689fb608648e 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -25,7 +25,7 @@
25#include <linux/gfp.h> 25#include <linux/gfp.h>
26#include <linux/mpage.h> 26#include <linux/mpage.h>
27#include <linux/writeback.h> 27#include <linux/writeback.h>
28#include <linux/uio.h> 28#include <linux/aio.h>
29#include "nilfs.h" 29#include "nilfs.h"
30#include "btnode.h" 30#include "btnode.h"
31#include "segment.h" 31#include "segment.h"
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index 1da4b81e6f76..c5670b8d198c 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -27,6 +27,7 @@
27#include <linux/swap.h> 27#include <linux/swap.h>
28#include <linux/uio.h> 28#include <linux/uio.h>
29#include <linux/writeback.h> 29#include <linux/writeback.h>
30#include <linux/aio.h>
30 31
31#include <asm/page.h> 32#include <asm/page.h>
32#include <asm/uaccess.h> 33#include <asm/uaccess.h>
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index d3e118cc6ffa..2778b0255dc6 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -28,6 +28,7 @@
28#include <linux/quotaops.h> 28#include <linux/quotaops.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <linux/log2.h> 30#include <linux/log2.h>
31#include <linux/aio.h>
31 32
32#include "aops.h" 33#include "aops.h"
33#include "attrib.h" 34#include "attrib.h"
diff --git a/fs/ocfs2/aops.h b/fs/ocfs2/aops.h
index ffb2da370a99..f671e49beb34 100644
--- a/fs/ocfs2/aops.h
+++ b/fs/ocfs2/aops.h
@@ -22,6 +22,8 @@
22#ifndef OCFS2_AOPS_H 22#ifndef OCFS2_AOPS_H
23#define OCFS2_AOPS_H 23#define OCFS2_AOPS_H
24 24
25#include <linux/aio.h>
26
25handle_t *ocfs2_start_walk_page_trans(struct inode *inode, 27handle_t *ocfs2_start_walk_page_trans(struct inode *inode,
26 struct page *page, 28 struct page *page,
27 unsigned from, 29 unsigned from,
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 12ae194ac943..3a44a648dae7 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -2322,7 +2322,7 @@ int ocfs2_inode_lock_full_nested(struct inode *inode,
2322 status = __ocfs2_cluster_lock(osb, lockres, level, dlm_flags, 2322 status = __ocfs2_cluster_lock(osb, lockres, level, dlm_flags,
2323 arg_flags, subclass, _RET_IP_); 2323 arg_flags, subclass, _RET_IP_);
2324 if (status < 0) { 2324 if (status < 0) {
2325 if (status != -EAGAIN && status != -EIOCBRETRY) 2325 if (status != -EAGAIN)
2326 mlog_errno(status); 2326 mlog_errno(status);
2327 goto bail; 2327 goto bail;
2328 } 2328 }
diff --git a/fs/ocfs2/inode.h b/fs/ocfs2/inode.h
index 88924a3133fa..621fc73bf23d 100644
--- a/fs/ocfs2/inode.h
+++ b/fs/ocfs2/inode.h
@@ -147,8 +147,6 @@ void ocfs2_refresh_inode(struct inode *inode,
147int ocfs2_mark_inode_dirty(handle_t *handle, 147int ocfs2_mark_inode_dirty(handle_t *handle,
148 struct inode *inode, 148 struct inode *inode,
149 struct buffer_head *bh); 149 struct buffer_head *bh);
150int ocfs2_aio_read(struct file *file, struct kiocb *req, struct iocb *iocb);
151int ocfs2_aio_write(struct file *file, struct kiocb *req, struct iocb *iocb);
152struct buffer_head *ocfs2_bread(struct inode *inode, 150struct buffer_head *ocfs2_bread(struct inode *inode,
153 int block, int *err, int reada); 151 int block, int *err, int reada);
154 152
diff --git a/fs/pipe.c b/fs/pipe.c
index a029a14bacf1..d2c45e14e6d8 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -21,6 +21,7 @@
21#include <linux/audit.h> 21#include <linux/audit.h>
22#include <linux/syscalls.h> 22#include <linux/syscalls.h>
23#include <linux/fcntl.h> 23#include <linux/fcntl.h>
24#include <linux/aio.h>
24 25
25#include <asm/uaccess.h> 26#include <asm/uaccess.h>
26#include <asm/ioctls.h> 27#include <asm/ioctls.h>
diff --git a/fs/read_write.c b/fs/read_write.c
index 90ba3b350e50..03430008704e 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -9,6 +9,7 @@
9#include <linux/fcntl.h> 9#include <linux/fcntl.h>
10#include <linux/file.h> 10#include <linux/file.h>
11#include <linux/uio.h> 11#include <linux/uio.h>
12#include <linux/aio.h>
12#include <linux/fsnotify.h> 13#include <linux/fsnotify.h>
13#include <linux/security.h> 14#include <linux/security.h>
14#include <linux/export.h> 15#include <linux/export.h>
@@ -329,16 +330,6 @@ int rw_verify_area(int read_write, struct file *file, loff_t *ppos, size_t count
329 return count > MAX_RW_COUNT ? MAX_RW_COUNT : count; 330 return count > MAX_RW_COUNT ? MAX_RW_COUNT : count;
330} 331}
331 332
332static void wait_on_retry_sync_kiocb(struct kiocb *iocb)
333{
334 set_current_state(TASK_UNINTERRUPTIBLE);
335 if (!kiocbIsKicked(iocb))
336 schedule();
337 else
338 kiocbClearKicked(iocb);
339 __set_current_state(TASK_RUNNING);
340}
341
342ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos) 333ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
343{ 334{
344 struct iovec iov = { .iov_base = buf, .iov_len = len }; 335 struct iovec iov = { .iov_base = buf, .iov_len = len };
@@ -350,13 +341,7 @@ ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *pp
350 kiocb.ki_left = len; 341 kiocb.ki_left = len;
351 kiocb.ki_nbytes = len; 342 kiocb.ki_nbytes = len;
352 343
353 for (;;) { 344 ret = filp->f_op->aio_read(&kiocb, &iov, 1, kiocb.ki_pos);
354 ret = filp->f_op->aio_read(&kiocb, &iov, 1, kiocb.ki_pos);
355 if (ret != -EIOCBRETRY)
356 break;
357 wait_on_retry_sync_kiocb(&kiocb);
358 }
359
360 if (-EIOCBQUEUED == ret) 345 if (-EIOCBQUEUED == ret)
361 ret = wait_on_sync_kiocb(&kiocb); 346 ret = wait_on_sync_kiocb(&kiocb);
362 *ppos = kiocb.ki_pos; 347 *ppos = kiocb.ki_pos;
@@ -406,13 +391,7 @@ ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, lof
406 kiocb.ki_left = len; 391 kiocb.ki_left = len;
407 kiocb.ki_nbytes = len; 392 kiocb.ki_nbytes = len;
408 393
409 for (;;) { 394 ret = filp->f_op->aio_write(&kiocb, &iov, 1, kiocb.ki_pos);
410 ret = filp->f_op->aio_write(&kiocb, &iov, 1, kiocb.ki_pos);
411 if (ret != -EIOCBRETRY)
412 break;
413 wait_on_retry_sync_kiocb(&kiocb);
414 }
415
416 if (-EIOCBQUEUED == ret) 395 if (-EIOCBQUEUED == ret)
417 ret = wait_on_sync_kiocb(&kiocb); 396 ret = wait_on_sync_kiocb(&kiocb);
418 *ppos = kiocb.ki_pos; 397 *ppos = kiocb.ki_pos;
@@ -592,13 +571,7 @@ static ssize_t do_sync_readv_writev(struct file *filp, const struct iovec *iov,
592 kiocb.ki_left = len; 571 kiocb.ki_left = len;
593 kiocb.ki_nbytes = len; 572 kiocb.ki_nbytes = len;
594 573
595 for (;;) { 574 ret = fn(&kiocb, iov, nr_segs, kiocb.ki_pos);
596 ret = fn(&kiocb, iov, nr_segs, kiocb.ki_pos);
597 if (ret != -EIOCBRETRY)
598 break;
599 wait_on_retry_sync_kiocb(&kiocb);
600 }
601
602 if (ret == -EIOCBQUEUED) 575 if (ret == -EIOCBQUEUED)
603 ret = wait_on_sync_kiocb(&kiocb); 576 ret = wait_on_sync_kiocb(&kiocb);
604 *ppos = kiocb.ki_pos; 577 *ppos = kiocb.ki_pos;
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index ea5061fd4f3e..77d6d47abc83 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -18,6 +18,7 @@
18#include <linux/writeback.h> 18#include <linux/writeback.h>
19#include <linux/quotaops.h> 19#include <linux/quotaops.h>
20#include <linux/swap.h> 20#include <linux/swap.h>
21#include <linux/aio.h>
21 22
22int reiserfs_commit_write(struct file *f, struct page *page, 23int reiserfs_commit_write(struct file *f, struct page *page,
23 unsigned from, unsigned to); 24 unsigned from, unsigned to);
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index f12189d2db1d..14374530784c 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -50,6 +50,7 @@
50 */ 50 */
51 51
52#include "ubifs.h" 52#include "ubifs.h"
53#include <linux/aio.h>
53#include <linux/mount.h> 54#include <linux/mount.h>
54#include <linux/namei.h> 55#include <linux/namei.h>
55#include <linux/slab.h> 56#include <linux/slab.h>
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 7a12e48ad819..b6d15d349810 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -38,6 +38,7 @@
38#include <linux/slab.h> 38#include <linux/slab.h>
39#include <linux/crc-itu-t.h> 39#include <linux/crc-itu-t.h>
40#include <linux/mpage.h> 40#include <linux/mpage.h>
41#include <linux/aio.h>
41 42
42#include "udf_i.h" 43#include "udf_i.h"
43#include "udf_sb.h" 44#include "udf_sb.h"
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 3244c988d379..2b2691b73428 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -31,6 +31,7 @@
31#include "xfs_vnodeops.h" 31#include "xfs_vnodeops.h"
32#include "xfs_trace.h" 32#include "xfs_trace.h"
33#include "xfs_bmap.h" 33#include "xfs_bmap.h"
34#include <linux/aio.h>
34#include <linux/gfp.h> 35#include <linux/gfp.h>
35#include <linux/mpage.h> 36#include <linux/mpage.h>
36#include <linux/pagevec.h> 37#include <linux/pagevec.h>
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 054d60c0ac57..a5f2042aec8b 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -36,6 +36,7 @@
36#include "xfs_ioctl.h" 36#include "xfs_ioctl.h"
37#include "xfs_trace.h" 37#include "xfs_trace.h"
38 38
39#include <linux/aio.h>
39#include <linux/dcache.h> 40#include <linux/dcache.h>
40#include <linux/falloc.h> 41#include <linux/falloc.h>
41#include <linux/pagevec.h> 42#include <linux/pagevec.h>