aboutsummaryrefslogtreecommitdiffstats
path: root/fs/direct-io.c
diff options
context:
space:
mode:
authorDmitry Torokhov <dmitry.torokhov@gmail.com>2012-01-09 02:38:23 -0500
committerDmitry Torokhov <dmitry.torokhov@gmail.com>2012-01-09 02:38:23 -0500
commitda733563be5a9da26fe81d9f007262d00b846e22 (patch)
treedb28291df94a2043af2123911984c5c173da4e6f /fs/direct-io.c
parent6ccbcf2cb41131f8d56ef0723bf3f7c1f8486076 (diff)
parentdab78d7924598ea4031663dd10db814e2e324928 (diff)
Merge branch 'next' into for-linus
Diffstat (limited to 'fs/direct-io.c')
-rw-r--r--fs/direct-io.c646
1 files changed, 337 insertions, 309 deletions
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 44a360ca8046..d740ab67ff6e 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -39,7 +39,7 @@
39 39
40/* 40/*
41 * How many user pages to map in one call to get_user_pages(). This determines 41 * How many user pages to map in one call to get_user_pages(). This determines
42 * the size of a structure on the stack. 42 * the size of a structure in the slab cache
43 */ 43 */
44#define DIO_PAGES 64 44#define DIO_PAGES 64
45 45
@@ -55,13 +55,10 @@
55 * blocksize. 55 * blocksize.
56 */ 56 */
57 57
58struct dio { 58/* dio_state only used in the submission path */
59 /* BIO submission state */ 59
60struct dio_submit {
60 struct bio *bio; /* bio under assembly */ 61 struct bio *bio; /* bio under assembly */
61 struct inode *inode;
62 int rw;
63 loff_t i_size; /* i_size when submitted */
64 int flags; /* doesn't change */
65 unsigned blkbits; /* doesn't change */ 62 unsigned blkbits; /* doesn't change */
66 unsigned blkfactor; /* When we're using an alignment which 63 unsigned blkfactor; /* When we're using an alignment which
67 is finer than the filesystem's soft 64 is finer than the filesystem's soft
@@ -76,18 +73,17 @@ struct dio {
76 sector_t block_in_file; /* Current offset into the underlying 73 sector_t block_in_file; /* Current offset into the underlying
77 file in dio_block units. */ 74 file in dio_block units. */
78 unsigned blocks_available; /* At block_in_file. changes */ 75 unsigned blocks_available; /* At block_in_file. changes */
76 int reap_counter; /* rate limit reaping */
79 sector_t final_block_in_request;/* doesn't change */ 77 sector_t final_block_in_request;/* doesn't change */
80 unsigned first_block_in_page; /* doesn't change, Used only once */ 78 unsigned first_block_in_page; /* doesn't change, Used only once */
81 int boundary; /* prev block is at a boundary */ 79 int boundary; /* prev block is at a boundary */
82 int reap_counter; /* rate limit reaping */
83 get_block_t *get_block; /* block mapping function */ 80 get_block_t *get_block; /* block mapping function */
84 dio_iodone_t *end_io; /* IO completion function */
85 dio_submit_t *submit_io; /* IO submition function */ 81 dio_submit_t *submit_io; /* IO submition function */
82
86 loff_t logical_offset_in_bio; /* current first logical block in bio */ 83 loff_t logical_offset_in_bio; /* current first logical block in bio */
87 sector_t final_block_in_bio; /* current final block in bio + 1 */ 84 sector_t final_block_in_bio; /* current final block in bio + 1 */
88 sector_t next_block_for_io; /* next block to be put under IO, 85 sector_t next_block_for_io; /* next block to be put under IO,
89 in dio_blocks units */ 86 in dio_blocks units */
90 struct buffer_head map_bh; /* last get_block() result */
91 87
92 /* 88 /*
93 * Deferred addition of a page to the dio. These variables are 89 * Deferred addition of a page to the dio. These variables are
@@ -100,18 +96,6 @@ struct dio {
100 sector_t cur_page_block; /* Where it starts */ 96 sector_t cur_page_block; /* Where it starts */
101 loff_t cur_page_fs_offset; /* Offset in file */ 97 loff_t cur_page_fs_offset; /* Offset in file */
102 98
103 /* BIO completion state */
104 spinlock_t bio_lock; /* protects BIO fields below */
105 unsigned long refcount; /* direct_io_worker() and bios */
106 struct bio *bio_list; /* singly linked via bi_private */
107 struct task_struct *waiter; /* waiting task (NULL if none) */
108
109 /* AIO related stuff */
110 struct kiocb *iocb; /* kiocb */
111 int is_async; /* is IO async ? */
112 int io_error; /* IO error in completion path */
113 ssize_t result; /* IO result */
114
115 /* 99 /*
116 * Page fetching state. These variables belong to dio_refill_pages(). 100 * Page fetching state. These variables belong to dio_refill_pages().
117 */ 101 */
@@ -125,7 +109,30 @@ struct dio {
125 */ 109 */
126 unsigned head; /* next page to process */ 110 unsigned head; /* next page to process */
127 unsigned tail; /* last valid page + 1 */ 111 unsigned tail; /* last valid page + 1 */
112};
113
114/* dio_state communicated between submission path and end_io */
115struct dio {
116 int flags; /* doesn't change */
117 int rw;
118 struct inode *inode;
119 loff_t i_size; /* i_size when submitted */
120 dio_iodone_t *end_io; /* IO completion function */
121
122 void *private; /* copy from map_bh.b_private */
123
124 /* BIO completion state */
125 spinlock_t bio_lock; /* protects BIO fields below */
128 int page_errors; /* errno from get_user_pages() */ 126 int page_errors; /* errno from get_user_pages() */
127 int is_async; /* is IO async ? */
128 int io_error; /* IO error in completion path */
129 unsigned long refcount; /* direct_io_worker() and bios */
130 struct bio *bio_list; /* singly linked via bi_private */
131 struct task_struct *waiter; /* waiting task (NULL if none) */
132
133 /* AIO related stuff */
134 struct kiocb *iocb; /* kiocb */
135 ssize_t result; /* IO result */
129 136
130 /* 137 /*
131 * pages[] (and any fields placed after it) are not zeroed out at 138 * pages[] (and any fields placed after it) are not zeroed out at
@@ -133,7 +140,9 @@ struct dio {
133 * wish that they not be zeroed. 140 * wish that they not be zeroed.
134 */ 141 */
135 struct page *pages[DIO_PAGES]; /* page buffer */ 142 struct page *pages[DIO_PAGES]; /* page buffer */
136}; 143} ____cacheline_aligned_in_smp;
144
145static struct kmem_cache *dio_cache __read_mostly;
137 146
138static void __inode_dio_wait(struct inode *inode) 147static void __inode_dio_wait(struct inode *inode)
139{ 148{
@@ -182,27 +191,27 @@ EXPORT_SYMBOL_GPL(inode_dio_done);
182/* 191/*
183 * How many pages are in the queue? 192 * How many pages are in the queue?
184 */ 193 */
185static inline unsigned dio_pages_present(struct dio *dio) 194static inline unsigned dio_pages_present(struct dio_submit *sdio)
186{ 195{
187 return dio->tail - dio->head; 196 return sdio->tail - sdio->head;
188} 197}
189 198
190/* 199/*
191 * Go grab and pin some userspace pages. Typically we'll get 64 at a time. 200 * Go grab and pin some userspace pages. Typically we'll get 64 at a time.
192 */ 201 */
193static int dio_refill_pages(struct dio *dio) 202static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio)
194{ 203{
195 int ret; 204 int ret;
196 int nr_pages; 205 int nr_pages;
197 206
198 nr_pages = min(dio->total_pages - dio->curr_page, DIO_PAGES); 207 nr_pages = min(sdio->total_pages - sdio->curr_page, DIO_PAGES);
199 ret = get_user_pages_fast( 208 ret = get_user_pages_fast(
200 dio->curr_user_address, /* Where from? */ 209 sdio->curr_user_address, /* Where from? */
201 nr_pages, /* How many pages? */ 210 nr_pages, /* How many pages? */
202 dio->rw == READ, /* Write to memory? */ 211 dio->rw == READ, /* Write to memory? */
203 &dio->pages[0]); /* Put results here */ 212 &dio->pages[0]); /* Put results here */
204 213
205 if (ret < 0 && dio->blocks_available && (dio->rw & WRITE)) { 214 if (ret < 0 && sdio->blocks_available && (dio->rw & WRITE)) {
206 struct page *page = ZERO_PAGE(0); 215 struct page *page = ZERO_PAGE(0);
207 /* 216 /*
208 * A memory fault, but the filesystem has some outstanding 217 * A memory fault, but the filesystem has some outstanding
@@ -213,17 +222,17 @@ static int dio_refill_pages(struct dio *dio)
213 dio->page_errors = ret; 222 dio->page_errors = ret;
214 page_cache_get(page); 223 page_cache_get(page);
215 dio->pages[0] = page; 224 dio->pages[0] = page;
216 dio->head = 0; 225 sdio->head = 0;
217 dio->tail = 1; 226 sdio->tail = 1;
218 ret = 0; 227 ret = 0;
219 goto out; 228 goto out;
220 } 229 }
221 230
222 if (ret >= 0) { 231 if (ret >= 0) {
223 dio->curr_user_address += ret * PAGE_SIZE; 232 sdio->curr_user_address += ret * PAGE_SIZE;
224 dio->curr_page += ret; 233 sdio->curr_page += ret;
225 dio->head = 0; 234 sdio->head = 0;
226 dio->tail = ret; 235 sdio->tail = ret;
227 ret = 0; 236 ret = 0;
228 } 237 }
229out: 238out:
@@ -236,17 +245,18 @@ out:
236 * decent number of pages, less frequently. To provide nicer use of the 245 * decent number of pages, less frequently. To provide nicer use of the
237 * L1 cache. 246 * L1 cache.
238 */ 247 */
239static struct page *dio_get_page(struct dio *dio) 248static inline struct page *dio_get_page(struct dio *dio,
249 struct dio_submit *sdio)
240{ 250{
241 if (dio_pages_present(dio) == 0) { 251 if (dio_pages_present(sdio) == 0) {
242 int ret; 252 int ret;
243 253
244 ret = dio_refill_pages(dio); 254 ret = dio_refill_pages(dio, sdio);
245 if (ret) 255 if (ret)
246 return ERR_PTR(ret); 256 return ERR_PTR(ret);
247 BUG_ON(dio_pages_present(dio) == 0); 257 BUG_ON(dio_pages_present(sdio) == 0);
248 } 258 }
249 return dio->pages[dio->head++]; 259 return dio->pages[sdio->head++];
250} 260}
251 261
252/** 262/**
@@ -292,7 +302,7 @@ static ssize_t dio_complete(struct dio *dio, loff_t offset, ssize_t ret, bool is
292 302
293 if (dio->end_io && dio->result) { 303 if (dio->end_io && dio->result) {
294 dio->end_io(dio->iocb, offset, transferred, 304 dio->end_io(dio->iocb, offset, transferred,
295 dio->map_bh.b_private, ret, is_async); 305 dio->private, ret, is_async);
296 } else { 306 } else {
297 if (is_async) 307 if (is_async)
298 aio_complete(dio->iocb, ret, 0); 308 aio_complete(dio->iocb, ret, 0);
@@ -323,7 +333,7 @@ static void dio_bio_end_aio(struct bio *bio, int error)
323 333
324 if (remaining == 0) { 334 if (remaining == 0) {
325 dio_complete(dio, dio->iocb->ki_pos, 0, true); 335 dio_complete(dio, dio->iocb->ki_pos, 0, true);
326 kfree(dio); 336 kmem_cache_free(dio_cache, dio);
327 } 337 }
328} 338}
329 339
@@ -367,9 +377,10 @@ void dio_end_io(struct bio *bio, int error)
367} 377}
368EXPORT_SYMBOL_GPL(dio_end_io); 378EXPORT_SYMBOL_GPL(dio_end_io);
369 379
370static void 380static inline void
371dio_bio_alloc(struct dio *dio, struct block_device *bdev, 381dio_bio_alloc(struct dio *dio, struct dio_submit *sdio,
372 sector_t first_sector, int nr_vecs) 382 struct block_device *bdev,
383 sector_t first_sector, int nr_vecs)
373{ 384{
374 struct bio *bio; 385 struct bio *bio;
375 386
@@ -386,8 +397,8 @@ dio_bio_alloc(struct dio *dio, struct block_device *bdev,
386 else 397 else
387 bio->bi_end_io = dio_bio_end_io; 398 bio->bi_end_io = dio_bio_end_io;
388 399
389 dio->bio = bio; 400 sdio->bio = bio;
390 dio->logical_offset_in_bio = dio->cur_page_fs_offset; 401 sdio->logical_offset_in_bio = sdio->cur_page_fs_offset;
391} 402}
392 403
393/* 404/*
@@ -397,9 +408,9 @@ dio_bio_alloc(struct dio *dio, struct block_device *bdev,
397 * 408 *
398 * bios hold a dio reference between submit_bio and ->end_io. 409 * bios hold a dio reference between submit_bio and ->end_io.
399 */ 410 */
400static void dio_bio_submit(struct dio *dio) 411static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio)
401{ 412{
402 struct bio *bio = dio->bio; 413 struct bio *bio = sdio->bio;
403 unsigned long flags; 414 unsigned long flags;
404 415
405 bio->bi_private = dio; 416 bio->bi_private = dio;
@@ -411,24 +422,24 @@ static void dio_bio_submit(struct dio *dio)
411 if (dio->is_async && dio->rw == READ) 422 if (dio->is_async && dio->rw == READ)
412 bio_set_pages_dirty(bio); 423 bio_set_pages_dirty(bio);
413 424
414 if (dio->submit_io) 425 if (sdio->submit_io)
415 dio->submit_io(dio->rw, bio, dio->inode, 426 sdio->submit_io(dio->rw, bio, dio->inode,
416 dio->logical_offset_in_bio); 427 sdio->logical_offset_in_bio);
417 else 428 else
418 submit_bio(dio->rw, bio); 429 submit_bio(dio->rw, bio);
419 430
420 dio->bio = NULL; 431 sdio->bio = NULL;
421 dio->boundary = 0; 432 sdio->boundary = 0;
422 dio->logical_offset_in_bio = 0; 433 sdio->logical_offset_in_bio = 0;
423} 434}
424 435
425/* 436/*
426 * Release any resources in case of a failure 437 * Release any resources in case of a failure
427 */ 438 */
428static void dio_cleanup(struct dio *dio) 439static inline void dio_cleanup(struct dio *dio, struct dio_submit *sdio)
429{ 440{
430 while (dio_pages_present(dio)) 441 while (dio_pages_present(sdio))
431 page_cache_release(dio_get_page(dio)); 442 page_cache_release(dio_get_page(dio, sdio));
432} 443}
433 444
434/* 445/*
@@ -518,11 +529,11 @@ static void dio_await_completion(struct dio *dio)
518 * 529 *
519 * This also helps to limit the peak amount of pinned userspace memory. 530 * This also helps to limit the peak amount of pinned userspace memory.
520 */ 531 */
521static int dio_bio_reap(struct dio *dio) 532static inline int dio_bio_reap(struct dio *dio, struct dio_submit *sdio)
522{ 533{
523 int ret = 0; 534 int ret = 0;
524 535
525 if (dio->reap_counter++ >= 64) { 536 if (sdio->reap_counter++ >= 64) {
526 while (dio->bio_list) { 537 while (dio->bio_list) {
527 unsigned long flags; 538 unsigned long flags;
528 struct bio *bio; 539 struct bio *bio;
@@ -536,14 +547,14 @@ static int dio_bio_reap(struct dio *dio)
536 if (ret == 0) 547 if (ret == 0)
537 ret = ret2; 548 ret = ret2;
538 } 549 }
539 dio->reap_counter = 0; 550 sdio->reap_counter = 0;
540 } 551 }
541 return ret; 552 return ret;
542} 553}
543 554
544/* 555/*
545 * Call into the fs to map some more disk blocks. We record the current number 556 * Call into the fs to map some more disk blocks. We record the current number
546 * of available blocks at dio->blocks_available. These are in units of the 557 * of available blocks at sdio->blocks_available. These are in units of the
547 * fs blocksize, (1 << inode->i_blkbits). 558 * fs blocksize, (1 << inode->i_blkbits).
548 * 559 *
549 * The fs is allowed to map lots of blocks at once. If it wants to do that, 560 * The fs is allowed to map lots of blocks at once. If it wants to do that,
@@ -564,10 +575,10 @@ static int dio_bio_reap(struct dio *dio)
564 * buffer_mapped(). However the direct-io code will only process holes one 575 * buffer_mapped(). However the direct-io code will only process holes one
565 * block at a time - it will repeatedly call get_block() as it walks the hole. 576 * block at a time - it will repeatedly call get_block() as it walks the hole.
566 */ 577 */
567static int get_more_blocks(struct dio *dio) 578static int get_more_blocks(struct dio *dio, struct dio_submit *sdio,
579 struct buffer_head *map_bh)
568{ 580{
569 int ret; 581 int ret;
570 struct buffer_head *map_bh = &dio->map_bh;
571 sector_t fs_startblk; /* Into file, in filesystem-sized blocks */ 582 sector_t fs_startblk; /* Into file, in filesystem-sized blocks */
572 unsigned long fs_count; /* Number of filesystem-sized blocks */ 583 unsigned long fs_count; /* Number of filesystem-sized blocks */
573 unsigned long dio_count;/* Number of dio_block-sized blocks */ 584 unsigned long dio_count;/* Number of dio_block-sized blocks */
@@ -580,11 +591,11 @@ static int get_more_blocks(struct dio *dio)
580 */ 591 */
581 ret = dio->page_errors; 592 ret = dio->page_errors;
582 if (ret == 0) { 593 if (ret == 0) {
583 BUG_ON(dio->block_in_file >= dio->final_block_in_request); 594 BUG_ON(sdio->block_in_file >= sdio->final_block_in_request);
584 fs_startblk = dio->block_in_file >> dio->blkfactor; 595 fs_startblk = sdio->block_in_file >> sdio->blkfactor;
585 dio_count = dio->final_block_in_request - dio->block_in_file; 596 dio_count = sdio->final_block_in_request - sdio->block_in_file;
586 fs_count = dio_count >> dio->blkfactor; 597 fs_count = dio_count >> sdio->blkfactor;
587 blkmask = (1 << dio->blkfactor) - 1; 598 blkmask = (1 << sdio->blkfactor) - 1;
588 if (dio_count & blkmask) 599 if (dio_count & blkmask)
589 fs_count++; 600 fs_count++;
590 601
@@ -604,13 +615,16 @@ static int get_more_blocks(struct dio *dio)
604 */ 615 */
605 create = dio->rw & WRITE; 616 create = dio->rw & WRITE;
606 if (dio->flags & DIO_SKIP_HOLES) { 617 if (dio->flags & DIO_SKIP_HOLES) {
607 if (dio->block_in_file < (i_size_read(dio->inode) >> 618 if (sdio->block_in_file < (i_size_read(dio->inode) >>
608 dio->blkbits)) 619 sdio->blkbits))
609 create = 0; 620 create = 0;
610 } 621 }
611 622
612 ret = (*dio->get_block)(dio->inode, fs_startblk, 623 ret = (*sdio->get_block)(dio->inode, fs_startblk,
613 map_bh, create); 624 map_bh, create);
625
626 /* Store for completion */
627 dio->private = map_bh->b_private;
614 } 628 }
615 return ret; 629 return ret;
616} 630}
@@ -618,20 +632,21 @@ static int get_more_blocks(struct dio *dio)
618/* 632/*
619 * There is no bio. Make one now. 633 * There is no bio. Make one now.
620 */ 634 */
621static int dio_new_bio(struct dio *dio, sector_t start_sector) 635static inline int dio_new_bio(struct dio *dio, struct dio_submit *sdio,
636 sector_t start_sector, struct buffer_head *map_bh)
622{ 637{
623 sector_t sector; 638 sector_t sector;
624 int ret, nr_pages; 639 int ret, nr_pages;
625 640
626 ret = dio_bio_reap(dio); 641 ret = dio_bio_reap(dio, sdio);
627 if (ret) 642 if (ret)
628 goto out; 643 goto out;
629 sector = start_sector << (dio->blkbits - 9); 644 sector = start_sector << (sdio->blkbits - 9);
630 nr_pages = min(dio->pages_in_io, bio_get_nr_vecs(dio->map_bh.b_bdev)); 645 nr_pages = min(sdio->pages_in_io, bio_get_nr_vecs(map_bh->b_bdev));
631 nr_pages = min(nr_pages, BIO_MAX_PAGES); 646 nr_pages = min(nr_pages, BIO_MAX_PAGES);
632 BUG_ON(nr_pages <= 0); 647 BUG_ON(nr_pages <= 0);
633 dio_bio_alloc(dio, dio->map_bh.b_bdev, sector, nr_pages); 648 dio_bio_alloc(dio, sdio, map_bh->b_bdev, sector, nr_pages);
634 dio->boundary = 0; 649 sdio->boundary = 0;
635out: 650out:
636 return ret; 651 return ret;
637} 652}
@@ -643,21 +658,21 @@ out:
643 * 658 *
644 * Return zero on success. Non-zero means the caller needs to start a new BIO. 659 * Return zero on success. Non-zero means the caller needs to start a new BIO.
645 */ 660 */
646static int dio_bio_add_page(struct dio *dio) 661static inline int dio_bio_add_page(struct dio_submit *sdio)
647{ 662{
648 int ret; 663 int ret;
649 664
650 ret = bio_add_page(dio->bio, dio->cur_page, 665 ret = bio_add_page(sdio->bio, sdio->cur_page,
651 dio->cur_page_len, dio->cur_page_offset); 666 sdio->cur_page_len, sdio->cur_page_offset);
652 if (ret == dio->cur_page_len) { 667 if (ret == sdio->cur_page_len) {
653 /* 668 /*
654 * Decrement count only, if we are done with this page 669 * Decrement count only, if we are done with this page
655 */ 670 */
656 if ((dio->cur_page_len + dio->cur_page_offset) == PAGE_SIZE) 671 if ((sdio->cur_page_len + sdio->cur_page_offset) == PAGE_SIZE)
657 dio->pages_in_io--; 672 sdio->pages_in_io--;
658 page_cache_get(dio->cur_page); 673 page_cache_get(sdio->cur_page);
659 dio->final_block_in_bio = dio->cur_page_block + 674 sdio->final_block_in_bio = sdio->cur_page_block +
660 (dio->cur_page_len >> dio->blkbits); 675 (sdio->cur_page_len >> sdio->blkbits);
661 ret = 0; 676 ret = 0;
662 } else { 677 } else {
663 ret = 1; 678 ret = 1;
@@ -675,14 +690,15 @@ static int dio_bio_add_page(struct dio *dio)
675 * The caller of this function is responsible for removing cur_page from the 690 * The caller of this function is responsible for removing cur_page from the
676 * dio, and for dropping the refcount which came from that presence. 691 * dio, and for dropping the refcount which came from that presence.
677 */ 692 */
678static int dio_send_cur_page(struct dio *dio) 693static inline int dio_send_cur_page(struct dio *dio, struct dio_submit *sdio,
694 struct buffer_head *map_bh)
679{ 695{
680 int ret = 0; 696 int ret = 0;
681 697
682 if (dio->bio) { 698 if (sdio->bio) {
683 loff_t cur_offset = dio->cur_page_fs_offset; 699 loff_t cur_offset = sdio->cur_page_fs_offset;
684 loff_t bio_next_offset = dio->logical_offset_in_bio + 700 loff_t bio_next_offset = sdio->logical_offset_in_bio +
685 dio->bio->bi_size; 701 sdio->bio->bi_size;
686 702
687 /* 703 /*
688 * See whether this new request is contiguous with the old. 704 * See whether this new request is contiguous with the old.
@@ -698,28 +714,28 @@ static int dio_send_cur_page(struct dio *dio)
698 * be the next logical offset in the bio, submit the bio we 714 * be the next logical offset in the bio, submit the bio we
699 * have. 715 * have.
700 */ 716 */
701 if (dio->final_block_in_bio != dio->cur_page_block || 717 if (sdio->final_block_in_bio != sdio->cur_page_block ||
702 cur_offset != bio_next_offset) 718 cur_offset != bio_next_offset)
703 dio_bio_submit(dio); 719 dio_bio_submit(dio, sdio);
704 /* 720 /*
705 * Submit now if the underlying fs is about to perform a 721 * Submit now if the underlying fs is about to perform a
706 * metadata read 722 * metadata read
707 */ 723 */
708 else if (dio->boundary) 724 else if (sdio->boundary)
709 dio_bio_submit(dio); 725 dio_bio_submit(dio, sdio);
710 } 726 }
711 727
712 if (dio->bio == NULL) { 728 if (sdio->bio == NULL) {
713 ret = dio_new_bio(dio, dio->cur_page_block); 729 ret = dio_new_bio(dio, sdio, sdio->cur_page_block, map_bh);
714 if (ret) 730 if (ret)
715 goto out; 731 goto out;
716 } 732 }
717 733
718 if (dio_bio_add_page(dio) != 0) { 734 if (dio_bio_add_page(sdio) != 0) {
719 dio_bio_submit(dio); 735 dio_bio_submit(dio, sdio);
720 ret = dio_new_bio(dio, dio->cur_page_block); 736 ret = dio_new_bio(dio, sdio, sdio->cur_page_block, map_bh);
721 if (ret == 0) { 737 if (ret == 0) {
722 ret = dio_bio_add_page(dio); 738 ret = dio_bio_add_page(sdio);
723 BUG_ON(ret != 0); 739 BUG_ON(ret != 0);
724 } 740 }
725 } 741 }
@@ -744,9 +760,10 @@ out:
744 * If that doesn't work out then we put the old page into the bio and add this 760 * If that doesn't work out then we put the old page into the bio and add this
745 * page to the dio instead. 761 * page to the dio instead.
746 */ 762 */
747static int 763static inline int
748submit_page_section(struct dio *dio, struct page *page, 764submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page,
749 unsigned offset, unsigned len, sector_t blocknr) 765 unsigned offset, unsigned len, sector_t blocknr,
766 struct buffer_head *map_bh)
750{ 767{
751 int ret = 0; 768 int ret = 0;
752 769
@@ -760,20 +777,20 @@ submit_page_section(struct dio *dio, struct page *page,
760 /* 777 /*
761 * Can we just grow the current page's presence in the dio? 778 * Can we just grow the current page's presence in the dio?
762 */ 779 */
763 if ( (dio->cur_page == page) && 780 if (sdio->cur_page == page &&
764 (dio->cur_page_offset + dio->cur_page_len == offset) && 781 sdio->cur_page_offset + sdio->cur_page_len == offset &&
765 (dio->cur_page_block + 782 sdio->cur_page_block +
766 (dio->cur_page_len >> dio->blkbits) == blocknr)) { 783 (sdio->cur_page_len >> sdio->blkbits) == blocknr) {
767 dio->cur_page_len += len; 784 sdio->cur_page_len += len;
768 785
769 /* 786 /*
770 * If dio->boundary then we want to schedule the IO now to 787 * If sdio->boundary then we want to schedule the IO now to
771 * avoid metadata seeks. 788 * avoid metadata seeks.
772 */ 789 */
773 if (dio->boundary) { 790 if (sdio->boundary) {
774 ret = dio_send_cur_page(dio); 791 ret = dio_send_cur_page(dio, sdio, map_bh);
775 page_cache_release(dio->cur_page); 792 page_cache_release(sdio->cur_page);
776 dio->cur_page = NULL; 793 sdio->cur_page = NULL;
777 } 794 }
778 goto out; 795 goto out;
779 } 796 }
@@ -781,20 +798,20 @@ submit_page_section(struct dio *dio, struct page *page,
781 /* 798 /*
782 * If there's a deferred page already there then send it. 799 * If there's a deferred page already there then send it.
783 */ 800 */
784 if (dio->cur_page) { 801 if (sdio->cur_page) {
785 ret = dio_send_cur_page(dio); 802 ret = dio_send_cur_page(dio, sdio, map_bh);
786 page_cache_release(dio->cur_page); 803 page_cache_release(sdio->cur_page);
787 dio->cur_page = NULL; 804 sdio->cur_page = NULL;
788 if (ret) 805 if (ret)
789 goto out; 806 goto out;
790 } 807 }
791 808
792 page_cache_get(page); /* It is in dio */ 809 page_cache_get(page); /* It is in dio */
793 dio->cur_page = page; 810 sdio->cur_page = page;
794 dio->cur_page_offset = offset; 811 sdio->cur_page_offset = offset;
795 dio->cur_page_len = len; 812 sdio->cur_page_len = len;
796 dio->cur_page_block = blocknr; 813 sdio->cur_page_block = blocknr;
797 dio->cur_page_fs_offset = dio->block_in_file << dio->blkbits; 814 sdio->cur_page_fs_offset = sdio->block_in_file << sdio->blkbits;
798out: 815out:
799 return ret; 816 return ret;
800} 817}
@@ -804,16 +821,16 @@ out:
804 * file blocks. Only called for S_ISREG files - blockdevs do not set 821 * file blocks. Only called for S_ISREG files - blockdevs do not set
805 * buffer_new 822 * buffer_new
806 */ 823 */
807static void clean_blockdev_aliases(struct dio *dio) 824static void clean_blockdev_aliases(struct dio *dio, struct buffer_head *map_bh)
808{ 825{
809 unsigned i; 826 unsigned i;
810 unsigned nblocks; 827 unsigned nblocks;
811 828
812 nblocks = dio->map_bh.b_size >> dio->inode->i_blkbits; 829 nblocks = map_bh->b_size >> dio->inode->i_blkbits;
813 830
814 for (i = 0; i < nblocks; i++) { 831 for (i = 0; i < nblocks; i++) {
815 unmap_underlying_metadata(dio->map_bh.b_bdev, 832 unmap_underlying_metadata(map_bh->b_bdev,
816 dio->map_bh.b_blocknr + i); 833 map_bh->b_blocknr + i);
817 } 834 }
818} 835}
819 836
@@ -826,19 +843,20 @@ static void clean_blockdev_aliases(struct dio *dio)
826 * `end' is zero if we're doing the start of the IO, 1 at the end of the 843 * `end' is zero if we're doing the start of the IO, 1 at the end of the
827 * IO. 844 * IO.
828 */ 845 */
829static void dio_zero_block(struct dio *dio, int end) 846static inline void dio_zero_block(struct dio *dio, struct dio_submit *sdio,
847 int end, struct buffer_head *map_bh)
830{ 848{
831 unsigned dio_blocks_per_fs_block; 849 unsigned dio_blocks_per_fs_block;
832 unsigned this_chunk_blocks; /* In dio_blocks */ 850 unsigned this_chunk_blocks; /* In dio_blocks */
833 unsigned this_chunk_bytes; 851 unsigned this_chunk_bytes;
834 struct page *page; 852 struct page *page;
835 853
836 dio->start_zero_done = 1; 854 sdio->start_zero_done = 1;
837 if (!dio->blkfactor || !buffer_new(&dio->map_bh)) 855 if (!sdio->blkfactor || !buffer_new(map_bh))
838 return; 856 return;
839 857
840 dio_blocks_per_fs_block = 1 << dio->blkfactor; 858 dio_blocks_per_fs_block = 1 << sdio->blkfactor;
841 this_chunk_blocks = dio->block_in_file & (dio_blocks_per_fs_block - 1); 859 this_chunk_blocks = sdio->block_in_file & (dio_blocks_per_fs_block - 1);
842 860
843 if (!this_chunk_blocks) 861 if (!this_chunk_blocks)
844 return; 862 return;
@@ -850,14 +868,14 @@ static void dio_zero_block(struct dio *dio, int end)
850 if (end) 868 if (end)
851 this_chunk_blocks = dio_blocks_per_fs_block - this_chunk_blocks; 869 this_chunk_blocks = dio_blocks_per_fs_block - this_chunk_blocks;
852 870
853 this_chunk_bytes = this_chunk_blocks << dio->blkbits; 871 this_chunk_bytes = this_chunk_blocks << sdio->blkbits;
854 872
855 page = ZERO_PAGE(0); 873 page = ZERO_PAGE(0);
856 if (submit_page_section(dio, page, 0, this_chunk_bytes, 874 if (submit_page_section(dio, sdio, page, 0, this_chunk_bytes,
857 dio->next_block_for_io)) 875 sdio->next_block_for_io, map_bh))
858 return; 876 return;
859 877
860 dio->next_block_for_io += this_chunk_blocks; 878 sdio->next_block_for_io += this_chunk_blocks;
861} 879}
862 880
863/* 881/*
@@ -876,20 +894,20 @@ static void dio_zero_block(struct dio *dio, int end)
876 * it should set b_size to PAGE_SIZE or more inside get_block(). This gives 894 * it should set b_size to PAGE_SIZE or more inside get_block(). This gives
877 * fine alignment but still allows this function to work in PAGE_SIZE units. 895 * fine alignment but still allows this function to work in PAGE_SIZE units.
878 */ 896 */
879static int do_direct_IO(struct dio *dio) 897static int do_direct_IO(struct dio *dio, struct dio_submit *sdio,
898 struct buffer_head *map_bh)
880{ 899{
881 const unsigned blkbits = dio->blkbits; 900 const unsigned blkbits = sdio->blkbits;
882 const unsigned blocks_per_page = PAGE_SIZE >> blkbits; 901 const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
883 struct page *page; 902 struct page *page;
884 unsigned block_in_page; 903 unsigned block_in_page;
885 struct buffer_head *map_bh = &dio->map_bh;
886 int ret = 0; 904 int ret = 0;
887 905
888 /* The I/O can start at any block offset within the first page */ 906 /* The I/O can start at any block offset within the first page */
889 block_in_page = dio->first_block_in_page; 907 block_in_page = sdio->first_block_in_page;
890 908
891 while (dio->block_in_file < dio->final_block_in_request) { 909 while (sdio->block_in_file < sdio->final_block_in_request) {
892 page = dio_get_page(dio); 910 page = dio_get_page(dio, sdio);
893 if (IS_ERR(page)) { 911 if (IS_ERR(page)) {
894 ret = PTR_ERR(page); 912 ret = PTR_ERR(page);
895 goto out; 913 goto out;
@@ -901,14 +919,14 @@ static int do_direct_IO(struct dio *dio)
901 unsigned this_chunk_blocks; /* # of blocks */ 919 unsigned this_chunk_blocks; /* # of blocks */
902 unsigned u; 920 unsigned u;
903 921
904 if (dio->blocks_available == 0) { 922 if (sdio->blocks_available == 0) {
905 /* 923 /*
906 * Need to go and map some more disk 924 * Need to go and map some more disk
907 */ 925 */
908 unsigned long blkmask; 926 unsigned long blkmask;
909 unsigned long dio_remainder; 927 unsigned long dio_remainder;
910 928
911 ret = get_more_blocks(dio); 929 ret = get_more_blocks(dio, sdio, map_bh);
912 if (ret) { 930 if (ret) {
913 page_cache_release(page); 931 page_cache_release(page);
914 goto out; 932 goto out;
@@ -916,18 +934,18 @@ static int do_direct_IO(struct dio *dio)
916 if (!buffer_mapped(map_bh)) 934 if (!buffer_mapped(map_bh))
917 goto do_holes; 935 goto do_holes;
918 936
919 dio->blocks_available = 937 sdio->blocks_available =
920 map_bh->b_size >> dio->blkbits; 938 map_bh->b_size >> sdio->blkbits;
921 dio->next_block_for_io = 939 sdio->next_block_for_io =
922 map_bh->b_blocknr << dio->blkfactor; 940 map_bh->b_blocknr << sdio->blkfactor;
923 if (buffer_new(map_bh)) 941 if (buffer_new(map_bh))
924 clean_blockdev_aliases(dio); 942 clean_blockdev_aliases(dio, map_bh);
925 943
926 if (!dio->blkfactor) 944 if (!sdio->blkfactor)
927 goto do_holes; 945 goto do_holes;
928 946
929 blkmask = (1 << dio->blkfactor) - 1; 947 blkmask = (1 << sdio->blkfactor) - 1;
930 dio_remainder = (dio->block_in_file & blkmask); 948 dio_remainder = (sdio->block_in_file & blkmask);
931 949
932 /* 950 /*
933 * If we are at the start of IO and that IO 951 * If we are at the start of IO and that IO
@@ -941,8 +959,8 @@ static int do_direct_IO(struct dio *dio)
941 * on-disk 959 * on-disk
942 */ 960 */
943 if (!buffer_new(map_bh)) 961 if (!buffer_new(map_bh))
944 dio->next_block_for_io += dio_remainder; 962 sdio->next_block_for_io += dio_remainder;
945 dio->blocks_available -= dio_remainder; 963 sdio->blocks_available -= dio_remainder;
946 } 964 }
947do_holes: 965do_holes:
948 /* Handle holes */ 966 /* Handle holes */
@@ -961,7 +979,7 @@ do_holes:
961 */ 979 */
962 i_size_aligned = ALIGN(i_size_read(dio->inode), 980 i_size_aligned = ALIGN(i_size_read(dio->inode),
963 1 << blkbits); 981 1 << blkbits);
964 if (dio->block_in_file >= 982 if (sdio->block_in_file >=
965 i_size_aligned >> blkbits) { 983 i_size_aligned >> blkbits) {
966 /* We hit eof */ 984 /* We hit eof */
967 page_cache_release(page); 985 page_cache_release(page);
@@ -969,7 +987,7 @@ do_holes:
969 } 987 }
970 zero_user(page, block_in_page << blkbits, 988 zero_user(page, block_in_page << blkbits,
971 1 << blkbits); 989 1 << blkbits);
972 dio->block_in_file++; 990 sdio->block_in_file++;
973 block_in_page++; 991 block_in_page++;
974 goto next_block; 992 goto next_block;
975 } 993 }
@@ -979,38 +997,41 @@ do_holes:
979 * is finer than the underlying fs, go check to see if 997 * is finer than the underlying fs, go check to see if
980 * we must zero out the start of this block. 998 * we must zero out the start of this block.
981 */ 999 */
982 if (unlikely(dio->blkfactor && !dio->start_zero_done)) 1000 if (unlikely(sdio->blkfactor && !sdio->start_zero_done))
983 dio_zero_block(dio, 0); 1001 dio_zero_block(dio, sdio, 0, map_bh);
984 1002
985 /* 1003 /*
986 * Work out, in this_chunk_blocks, how much disk we 1004 * Work out, in this_chunk_blocks, how much disk we
987 * can add to this page 1005 * can add to this page
988 */ 1006 */
989 this_chunk_blocks = dio->blocks_available; 1007 this_chunk_blocks = sdio->blocks_available;
990 u = (PAGE_SIZE - offset_in_page) >> blkbits; 1008 u = (PAGE_SIZE - offset_in_page) >> blkbits;
991 if (this_chunk_blocks > u) 1009 if (this_chunk_blocks > u)
992 this_chunk_blocks = u; 1010 this_chunk_blocks = u;
993 u = dio->final_block_in_request - dio->block_in_file; 1011 u = sdio->final_block_in_request - sdio->block_in_file;
994 if (this_chunk_blocks > u) 1012 if (this_chunk_blocks > u)
995 this_chunk_blocks = u; 1013 this_chunk_blocks = u;
996 this_chunk_bytes = this_chunk_blocks << blkbits; 1014 this_chunk_bytes = this_chunk_blocks << blkbits;
997 BUG_ON(this_chunk_bytes == 0); 1015 BUG_ON(this_chunk_bytes == 0);
998 1016
999 dio->boundary = buffer_boundary(map_bh); 1017 sdio->boundary = buffer_boundary(map_bh);
1000 ret = submit_page_section(dio, page, offset_in_page, 1018 ret = submit_page_section(dio, sdio, page,
1001 this_chunk_bytes, dio->next_block_for_io); 1019 offset_in_page,
1020 this_chunk_bytes,
1021 sdio->next_block_for_io,
1022 map_bh);
1002 if (ret) { 1023 if (ret) {
1003 page_cache_release(page); 1024 page_cache_release(page);
1004 goto out; 1025 goto out;
1005 } 1026 }
1006 dio->next_block_for_io += this_chunk_blocks; 1027 sdio->next_block_for_io += this_chunk_blocks;
1007 1028
1008 dio->block_in_file += this_chunk_blocks; 1029 sdio->block_in_file += this_chunk_blocks;
1009 block_in_page += this_chunk_blocks; 1030 block_in_page += this_chunk_blocks;
1010 dio->blocks_available -= this_chunk_blocks; 1031 sdio->blocks_available -= this_chunk_blocks;
1011next_block: 1032next_block:
1012 BUG_ON(dio->block_in_file > dio->final_block_in_request); 1033 BUG_ON(sdio->block_in_file > sdio->final_block_in_request);
1013 if (dio->block_in_file == dio->final_block_in_request) 1034 if (sdio->block_in_file == sdio->final_block_in_request)
1014 break; 1035 break;
1015 } 1036 }
1016 1037
@@ -1022,135 +1043,10 @@ out:
1022 return ret; 1043 return ret;
1023} 1044}
1024 1045
1025static ssize_t 1046static inline int drop_refcount(struct dio *dio)
1026direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
1027 const struct iovec *iov, loff_t offset, unsigned long nr_segs,
1028 unsigned blkbits, get_block_t get_block, dio_iodone_t end_io,
1029 dio_submit_t submit_io, struct dio *dio)
1030{ 1047{
1031 unsigned long user_addr; 1048 int ret2;
1032 unsigned long flags; 1049 unsigned long flags;
1033 int seg;
1034 ssize_t ret = 0;
1035 ssize_t ret2;
1036 size_t bytes;
1037
1038 dio->inode = inode;
1039 dio->rw = rw;
1040 dio->blkbits = blkbits;
1041 dio->blkfactor = inode->i_blkbits - blkbits;
1042 dio->block_in_file = offset >> blkbits;
1043
1044 dio->get_block = get_block;
1045 dio->end_io = end_io;
1046 dio->submit_io = submit_io;
1047 dio->final_block_in_bio = -1;
1048 dio->next_block_for_io = -1;
1049
1050 dio->iocb = iocb;
1051 dio->i_size = i_size_read(inode);
1052
1053 spin_lock_init(&dio->bio_lock);
1054 dio->refcount = 1;
1055
1056 /*
1057 * In case of non-aligned buffers, we may need 2 more
1058 * pages since we need to zero out first and last block.
1059 */
1060 if (unlikely(dio->blkfactor))
1061 dio->pages_in_io = 2;
1062
1063 for (seg = 0; seg < nr_segs; seg++) {
1064 user_addr = (unsigned long)iov[seg].iov_base;
1065 dio->pages_in_io +=
1066 ((user_addr+iov[seg].iov_len +PAGE_SIZE-1)/PAGE_SIZE
1067 - user_addr/PAGE_SIZE);
1068 }
1069
1070 for (seg = 0; seg < nr_segs; seg++) {
1071 user_addr = (unsigned long)iov[seg].iov_base;
1072 dio->size += bytes = iov[seg].iov_len;
1073
1074 /* Index into the first page of the first block */
1075 dio->first_block_in_page = (user_addr & ~PAGE_MASK) >> blkbits;
1076 dio->final_block_in_request = dio->block_in_file +
1077 (bytes >> blkbits);
1078 /* Page fetching state */
1079 dio->head = 0;
1080 dio->tail = 0;
1081 dio->curr_page = 0;
1082
1083 dio->total_pages = 0;
1084 if (user_addr & (PAGE_SIZE-1)) {
1085 dio->total_pages++;
1086 bytes -= PAGE_SIZE - (user_addr & (PAGE_SIZE - 1));
1087 }
1088 dio->total_pages += (bytes + PAGE_SIZE - 1) / PAGE_SIZE;
1089 dio->curr_user_address = user_addr;
1090
1091 ret = do_direct_IO(dio);
1092
1093 dio->result += iov[seg].iov_len -
1094 ((dio->final_block_in_request - dio->block_in_file) <<
1095 blkbits);
1096
1097 if (ret) {
1098 dio_cleanup(dio);
1099 break;
1100 }
1101 } /* end iovec loop */
1102
1103 if (ret == -ENOTBLK) {
1104 /*
1105 * The remaining part of the request will be
1106 * be handled by buffered I/O when we return
1107 */
1108 ret = 0;
1109 }
1110 /*
1111 * There may be some unwritten disk at the end of a part-written
1112 * fs-block-sized block. Go zero that now.
1113 */
1114 dio_zero_block(dio, 1);
1115
1116 if (dio->cur_page) {
1117 ret2 = dio_send_cur_page(dio);
1118 if (ret == 0)
1119 ret = ret2;
1120 page_cache_release(dio->cur_page);
1121 dio->cur_page = NULL;
1122 }
1123 if (dio->bio)
1124 dio_bio_submit(dio);
1125
1126 /*
1127 * It is possible that, we return short IO due to end of file.
1128 * In that case, we need to release all the pages we got hold on.
1129 */
1130 dio_cleanup(dio);
1131
1132 /*
1133 * All block lookups have been performed. For READ requests
1134 * we can let i_mutex go now that its achieved its purpose
1135 * of protecting us from looking up uninitialized blocks.
1136 */
1137 if (rw == READ && (dio->flags & DIO_LOCKING))
1138 mutex_unlock(&dio->inode->i_mutex);
1139
1140 /*
1141 * The only time we want to leave bios in flight is when a successful
1142 * partial aio read or full aio write have been setup. In that case
1143 * bio completion will call aio_complete. The only time it's safe to
1144 * call aio_complete is when we return -EIOCBQUEUED, so we key on that.
1145 * This had *better* be the only place that raises -EIOCBQUEUED.
1146 */
1147 BUG_ON(ret == -EIOCBQUEUED);
1148 if (dio->is_async && ret == 0 && dio->result &&
1149 ((rw & READ) || (dio->result == dio->size)))
1150 ret = -EIOCBQUEUED;
1151
1152 if (ret != -EIOCBQUEUED)
1153 dio_await_completion(dio);
1154 1050
1155 /* 1051 /*
1156 * Sync will always be dropping the final ref and completing the 1052 * Sync will always be dropping the final ref and completing the
@@ -1166,14 +1062,7 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
1166 spin_lock_irqsave(&dio->bio_lock, flags); 1062 spin_lock_irqsave(&dio->bio_lock, flags);
1167 ret2 = --dio->refcount; 1063 ret2 = --dio->refcount;
1168 spin_unlock_irqrestore(&dio->bio_lock, flags); 1064 spin_unlock_irqrestore(&dio->bio_lock, flags);
1169 1065 return ret2;
1170 if (ret2 == 0) {
1171 ret = dio_complete(dio, offset, ret, false);
1172 kfree(dio);
1173 } else
1174 BUG_ON(ret != -EIOCBQUEUED);
1175
1176 return ret;
1177} 1066}
1178 1067
1179/* 1068/*
@@ -1195,6 +1084,11 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
1195 * expected that filesystem provide exclusion between new direct I/O 1084 * expected that filesystem provide exclusion between new direct I/O
1196 * and truncates. For DIO_LOCKING filesystems this is done by i_mutex, 1085 * and truncates. For DIO_LOCKING filesystems this is done by i_mutex,
1197 * but other filesystems need to take care of this on their own. 1086 * but other filesystems need to take care of this on their own.
1087 *
1088 * NOTE: if you pass "sdio" to anything by pointer make sure that function
1089 * is always inlined. Otherwise gcc is unable to split the structure into
1090 * individual fields and will generate much worse code. This is important
1091 * for the whole file.
1198 */ 1092 */
1199ssize_t 1093ssize_t
1200__blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, 1094__blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
@@ -1211,6 +1105,10 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
1211 ssize_t retval = -EINVAL; 1105 ssize_t retval = -EINVAL;
1212 loff_t end = offset; 1106 loff_t end = offset;
1213 struct dio *dio; 1107 struct dio *dio;
1108 struct dio_submit sdio = { 0, };
1109 unsigned long user_addr;
1110 size_t bytes;
1111 struct buffer_head map_bh = { 0, };
1214 1112
1215 if (rw & WRITE) 1113 if (rw & WRITE)
1216 rw = WRITE_ODIRECT; 1114 rw = WRITE_ODIRECT;
@@ -1244,7 +1142,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
1244 if (rw == READ && end == offset) 1142 if (rw == READ && end == offset)
1245 return 0; 1143 return 0;
1246 1144
1247 dio = kmalloc(sizeof(*dio), GFP_KERNEL); 1145 dio = kmem_cache_alloc(dio_cache, GFP_KERNEL);
1248 retval = -ENOMEM; 1146 retval = -ENOMEM;
1249 if (!dio) 1147 if (!dio)
1250 goto out; 1148 goto out;
@@ -1268,7 +1166,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
1268 end - 1); 1166 end - 1);
1269 if (retval) { 1167 if (retval) {
1270 mutex_unlock(&inode->i_mutex); 1168 mutex_unlock(&inode->i_mutex);
1271 kfree(dio); 1169 kmem_cache_free(dio_cache, dio);
1272 goto out; 1170 goto out;
1273 } 1171 }
1274 } 1172 }
@@ -1288,11 +1186,141 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
1288 dio->is_async = !is_sync_kiocb(iocb) && !((rw & WRITE) && 1186 dio->is_async = !is_sync_kiocb(iocb) && !((rw & WRITE) &&
1289 (end > i_size_read(inode))); 1187 (end > i_size_read(inode)));
1290 1188
1291 retval = direct_io_worker(rw, iocb, inode, iov, offset, 1189 retval = 0;
1292 nr_segs, blkbits, get_block, end_io, 1190
1293 submit_io, dio); 1191 dio->inode = inode;
1192 dio->rw = rw;
1193 sdio.blkbits = blkbits;
1194 sdio.blkfactor = inode->i_blkbits - blkbits;
1195 sdio.block_in_file = offset >> blkbits;
1196
1197 sdio.get_block = get_block;
1198 dio->end_io = end_io;
1199 sdio.submit_io = submit_io;
1200 sdio.final_block_in_bio = -1;
1201 sdio.next_block_for_io = -1;
1202
1203 dio->iocb = iocb;
1204 dio->i_size = i_size_read(inode);
1205
1206 spin_lock_init(&dio->bio_lock);
1207 dio->refcount = 1;
1208
1209 /*
1210 * In case of non-aligned buffers, we may need 2 more
1211 * pages since we need to zero out first and last block.
1212 */
1213 if (unlikely(sdio.blkfactor))
1214 sdio.pages_in_io = 2;
1215
1216 for (seg = 0; seg < nr_segs; seg++) {
1217 user_addr = (unsigned long)iov[seg].iov_base;
1218 sdio.pages_in_io +=
1219 ((user_addr + iov[seg].iov_len + PAGE_SIZE-1) /
1220 PAGE_SIZE - user_addr / PAGE_SIZE);
1221 }
1222
1223 for (seg = 0; seg < nr_segs; seg++) {
1224 user_addr = (unsigned long)iov[seg].iov_base;
1225 sdio.size += bytes = iov[seg].iov_len;
1226
1227 /* Index into the first page of the first block */
1228 sdio.first_block_in_page = (user_addr & ~PAGE_MASK) >> blkbits;
1229 sdio.final_block_in_request = sdio.block_in_file +
1230 (bytes >> blkbits);
1231 /* Page fetching state */
1232 sdio.head = 0;
1233 sdio.tail = 0;
1234 sdio.curr_page = 0;
1235
1236 sdio.total_pages = 0;
1237 if (user_addr & (PAGE_SIZE-1)) {
1238 sdio.total_pages++;
1239 bytes -= PAGE_SIZE - (user_addr & (PAGE_SIZE - 1));
1240 }
1241 sdio.total_pages += (bytes + PAGE_SIZE - 1) / PAGE_SIZE;
1242 sdio.curr_user_address = user_addr;
1243
1244 retval = do_direct_IO(dio, &sdio, &map_bh);
1245
1246 dio->result += iov[seg].iov_len -
1247 ((sdio.final_block_in_request - sdio.block_in_file) <<
1248 blkbits);
1249
1250 if (retval) {
1251 dio_cleanup(dio, &sdio);
1252 break;
1253 }
1254 } /* end iovec loop */
1255
1256 if (retval == -ENOTBLK) {
1257 /*
1258 * The remaining part of the request will be
1259 * be handled by buffered I/O when we return
1260 */
1261 retval = 0;
1262 }
1263 /*
1264 * There may be some unwritten disk at the end of a part-written
1265 * fs-block-sized block. Go zero that now.
1266 */
1267 dio_zero_block(dio, &sdio, 1, &map_bh);
1268
1269 if (sdio.cur_page) {
1270 ssize_t ret2;
1271
1272 ret2 = dio_send_cur_page(dio, &sdio, &map_bh);
1273 if (retval == 0)
1274 retval = ret2;
1275 page_cache_release(sdio.cur_page);
1276 sdio.cur_page = NULL;
1277 }
1278 if (sdio.bio)
1279 dio_bio_submit(dio, &sdio);
1280
1281 /*
1282 * It is possible that, we return short IO due to end of file.
1283 * In that case, we need to release all the pages we got hold on.
1284 */
1285 dio_cleanup(dio, &sdio);
1286
1287 /*
1288 * All block lookups have been performed. For READ requests
1289 * we can let i_mutex go now that its achieved its purpose
1290 * of protecting us from looking up uninitialized blocks.
1291 */
1292 if (rw == READ && (dio->flags & DIO_LOCKING))
1293 mutex_unlock(&dio->inode->i_mutex);
1294
1295 /*
1296 * The only time we want to leave bios in flight is when a successful
1297 * partial aio read or full aio write have been setup. In that case
1298 * bio completion will call aio_complete. The only time it's safe to
1299 * call aio_complete is when we return -EIOCBQUEUED, so we key on that.
1300 * This had *better* be the only place that raises -EIOCBQUEUED.
1301 */
1302 BUG_ON(retval == -EIOCBQUEUED);
1303 if (dio->is_async && retval == 0 && dio->result &&
1304 ((rw & READ) || (dio->result == sdio.size)))
1305 retval = -EIOCBQUEUED;
1306
1307 if (retval != -EIOCBQUEUED)
1308 dio_await_completion(dio);
1309
1310 if (drop_refcount(dio) == 0) {
1311 retval = dio_complete(dio, offset, retval, false);
1312 kmem_cache_free(dio_cache, dio);
1313 } else
1314 BUG_ON(retval != -EIOCBQUEUED);
1294 1315
1295out: 1316out:
1296 return retval; 1317 return retval;
1297} 1318}
1298EXPORT_SYMBOL(__blockdev_direct_IO); 1319EXPORT_SYMBOL(__blockdev_direct_IO);
1320
1321static __init int dio_init(void)
1322{
1323 dio_cache = KMEM_CACHE(dio, SLAB_PANIC);
1324 return 0;
1325}
1326module_init(dio_init)