aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ext4/inode.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/ext4/inode.c')
-rw-r--r--fs/ext4/inode.c586
1 files changed, 509 insertions, 77 deletions
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 064746fad581..2c8caa51addb 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -37,6 +37,7 @@
37#include <linux/namei.h> 37#include <linux/namei.h>
38#include <linux/uio.h> 38#include <linux/uio.h>
39#include <linux/bio.h> 39#include <linux/bio.h>
40#include <linux/workqueue.h>
40 41
41#include "ext4_jbd2.h" 42#include "ext4_jbd2.h"
42#include "xattr.h" 43#include "xattr.h"
@@ -192,7 +193,7 @@ static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
192 * so before we call here everything must be consistently dirtied against 193 * so before we call here everything must be consistently dirtied against
193 * this transaction. 194 * this transaction.
194 */ 195 */
195 int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode, 196int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
196 int nblocks) 197 int nblocks)
197{ 198{
198 int ret; 199 int ret;
@@ -208,6 +209,7 @@ static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
208 up_write(&EXT4_I(inode)->i_data_sem); 209 up_write(&EXT4_I(inode)->i_data_sem);
209 ret = ext4_journal_restart(handle, blocks_for_truncate(inode)); 210 ret = ext4_journal_restart(handle, blocks_for_truncate(inode));
210 down_write(&EXT4_I(inode)->i_data_sem); 211 down_write(&EXT4_I(inode)->i_data_sem);
212 ext4_discard_preallocations(inode);
211 213
212 return ret; 214 return ret;
213} 215}
@@ -1145,6 +1147,64 @@ static int check_block_validity(struct inode *inode, const char *msg,
1145} 1147}
1146 1148
1147/* 1149/*
1150 * Return the number of contiguous dirty pages in a given inode
1151 * starting at page frame idx.
1152 */
1153static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx,
1154 unsigned int max_pages)
1155{
1156 struct address_space *mapping = inode->i_mapping;
1157 pgoff_t index;
1158 struct pagevec pvec;
1159 pgoff_t num = 0;
1160 int i, nr_pages, done = 0;
1161
1162 if (max_pages == 0)
1163 return 0;
1164 pagevec_init(&pvec, 0);
1165 while (!done) {
1166 index = idx;
1167 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1168 PAGECACHE_TAG_DIRTY,
1169 (pgoff_t)PAGEVEC_SIZE);
1170 if (nr_pages == 0)
1171 break;
1172 for (i = 0; i < nr_pages; i++) {
1173 struct page *page = pvec.pages[i];
1174 struct buffer_head *bh, *head;
1175
1176 lock_page(page);
1177 if (unlikely(page->mapping != mapping) ||
1178 !PageDirty(page) ||
1179 PageWriteback(page) ||
1180 page->index != idx) {
1181 done = 1;
1182 unlock_page(page);
1183 break;
1184 }
1185 if (page_has_buffers(page)) {
1186 bh = head = page_buffers(page);
1187 do {
1188 if (!buffer_delay(bh) &&
1189 !buffer_unwritten(bh))
1190 done = 1;
1191 bh = bh->b_this_page;
1192 } while (!done && (bh != head));
1193 }
1194 unlock_page(page);
1195 if (done)
1196 break;
1197 idx++;
1198 num++;
1199 if (num >= max_pages)
1200 break;
1201 }
1202 pagevec_release(&pvec);
1203 }
1204 return num;
1205}
1206
1207/*
1148 * The ext4_get_blocks() function tries to look up the requested blocks, 1208 * The ext4_get_blocks() function tries to look up the requested blocks,
1149 * and returns if the blocks are already mapped. 1209 * and returns if the blocks are already mapped.
1150 * 1210 *
@@ -1175,6 +1235,9 @@ int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block,
1175 clear_buffer_mapped(bh); 1235 clear_buffer_mapped(bh);
1176 clear_buffer_unwritten(bh); 1236 clear_buffer_unwritten(bh);
1177 1237
1238 ext_debug("ext4_get_blocks(): inode %lu, flag %d, max_blocks %u,"
1239 "logical block %lu\n", inode->i_ino, flags, max_blocks,
1240 (unsigned long)block);
1178 /* 1241 /*
1179 * Try to see if we can get the block without requesting a new 1242 * Try to see if we can get the block without requesting a new
1180 * file system block. 1243 * file system block.
@@ -1796,11 +1859,11 @@ repeat:
1796 1859
1797 if (ext4_claim_free_blocks(sbi, total)) { 1860 if (ext4_claim_free_blocks(sbi, total)) {
1798 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1861 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1862 vfs_dq_release_reservation_block(inode, total);
1799 if (ext4_should_retry_alloc(inode->i_sb, &retries)) { 1863 if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
1800 yield(); 1864 yield();
1801 goto repeat; 1865 goto repeat;
1802 } 1866 }
1803 vfs_dq_release_reservation_block(inode, total);
1804 return -ENOSPC; 1867 return -ENOSPC;
1805 } 1868 }
1806 EXT4_I(inode)->i_reserved_data_blocks += nrblocks; 1869 EXT4_I(inode)->i_reserved_data_blocks += nrblocks;
@@ -2092,18 +2155,18 @@ static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd,
2092static void ext4_print_free_blocks(struct inode *inode) 2155static void ext4_print_free_blocks(struct inode *inode)
2093{ 2156{
2094 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2157 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2095 printk(KERN_EMERG "Total free blocks count %lld\n", 2158 printk(KERN_CRIT "Total free blocks count %lld\n",
2096 ext4_count_free_blocks(inode->i_sb)); 2159 ext4_count_free_blocks(inode->i_sb));
2097 printk(KERN_EMERG "Free/Dirty block details\n"); 2160 printk(KERN_CRIT "Free/Dirty block details\n");
2098 printk(KERN_EMERG "free_blocks=%lld\n", 2161 printk(KERN_CRIT "free_blocks=%lld\n",
2099 (long long)percpu_counter_sum(&sbi->s_freeblocks_counter)); 2162 (long long) percpu_counter_sum(&sbi->s_freeblocks_counter));
2100 printk(KERN_EMERG "dirty_blocks=%lld\n", 2163 printk(KERN_CRIT "dirty_blocks=%lld\n",
2101 (long long)percpu_counter_sum(&sbi->s_dirtyblocks_counter)); 2164 (long long) percpu_counter_sum(&sbi->s_dirtyblocks_counter));
2102 printk(KERN_EMERG "Block reservation details\n"); 2165 printk(KERN_CRIT "Block reservation details\n");
2103 printk(KERN_EMERG "i_reserved_data_blocks=%u\n", 2166 printk(KERN_CRIT "i_reserved_data_blocks=%u\n",
2104 EXT4_I(inode)->i_reserved_data_blocks); 2167 EXT4_I(inode)->i_reserved_data_blocks);
2105 printk(KERN_EMERG "i_reserved_meta_blocks=%u\n", 2168 printk(KERN_CRIT "i_reserved_meta_blocks=%u\n",
2106 EXT4_I(inode)->i_reserved_meta_blocks); 2169 EXT4_I(inode)->i_reserved_meta_blocks);
2107 return; 2170 return;
2108} 2171}
2109 2172
@@ -2189,14 +2252,14 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd)
2189 * writepage and writepages will again try to write 2252 * writepage and writepages will again try to write
2190 * the same. 2253 * the same.
2191 */ 2254 */
2192 printk(KERN_EMERG "%s block allocation failed for inode %lu " 2255 ext4_msg(mpd->inode->i_sb, KERN_CRIT,
2193 "at logical offset %llu with max blocks " 2256 "delayed block allocation failed for inode %lu at "
2194 "%zd with error %d\n", 2257 "logical offset %llu with max blocks %zd with "
2195 __func__, mpd->inode->i_ino, 2258 "error %d\n", mpd->inode->i_ino,
2196 (unsigned long long)next, 2259 (unsigned long long) next,
2197 mpd->b_size >> mpd->inode->i_blkbits, err); 2260 mpd->b_size >> mpd->inode->i_blkbits, err);
2198 printk(KERN_EMERG "This should not happen.!! " 2261 printk(KERN_CRIT "This should not happen!! "
2199 "Data will be lost\n"); 2262 "Data will be lost\n");
2200 if (err == -ENOSPC) { 2263 if (err == -ENOSPC) {
2201 ext4_print_free_blocks(mpd->inode); 2264 ext4_print_free_blocks(mpd->inode);
2202 } 2265 }
@@ -2743,8 +2806,10 @@ static int ext4_da_writepages(struct address_space *mapping,
2743 int no_nrwrite_index_update; 2806 int no_nrwrite_index_update;
2744 int pages_written = 0; 2807 int pages_written = 0;
2745 long pages_skipped; 2808 long pages_skipped;
2809 unsigned int max_pages;
2746 int range_cyclic, cycled = 1, io_done = 0; 2810 int range_cyclic, cycled = 1, io_done = 0;
2747 int needed_blocks, ret = 0, nr_to_writebump = 0; 2811 int needed_blocks, ret = 0;
2812 long desired_nr_to_write, nr_to_writebump = 0;
2748 loff_t range_start = wbc->range_start; 2813 loff_t range_start = wbc->range_start;
2749 struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb); 2814 struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
2750 2815
@@ -2771,16 +2836,6 @@ static int ext4_da_writepages(struct address_space *mapping,
2771 if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED)) 2836 if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED))
2772 return -EROFS; 2837 return -EROFS;
2773 2838
2774 /*
2775 * Make sure nr_to_write is >= sbi->s_mb_stream_request
2776 * This make sure small files blocks are allocated in
2777 * single attempt. This ensure that small files
2778 * get less fragmented.
2779 */
2780 if (wbc->nr_to_write < sbi->s_mb_stream_request) {
2781 nr_to_writebump = sbi->s_mb_stream_request - wbc->nr_to_write;
2782 wbc->nr_to_write = sbi->s_mb_stream_request;
2783 }
2784 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 2839 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2785 range_whole = 1; 2840 range_whole = 1;
2786 2841
@@ -2795,6 +2850,36 @@ static int ext4_da_writepages(struct address_space *mapping,
2795 } else 2850 } else
2796 index = wbc->range_start >> PAGE_CACHE_SHIFT; 2851 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2797 2852
2853 /*
2854 * This works around two forms of stupidity. The first is in
2855 * the writeback code, which caps the maximum number of pages
2856 * written to be 1024 pages. This is wrong on multiple
2857 * levels; different architectues have a different page size,
2858 * which changes the maximum amount of data which gets
2859 * written. Secondly, 4 megabytes is way too small. XFS
2860 * forces this value to be 16 megabytes by multiplying
2861 * nr_to_write parameter by four, and then relies on its
2862 * allocator to allocate larger extents to make them
2863 * contiguous. Unfortunately this brings us to the second
2864 * stupidity, which is that ext4's mballoc code only allocates
2865 * at most 2048 blocks. So we force contiguous writes up to
2866 * the number of dirty blocks in the inode, or
2867 * sbi->max_writeback_mb_bump whichever is smaller.
2868 */
2869 max_pages = sbi->s_max_writeback_mb_bump << (20 - PAGE_CACHE_SHIFT);
2870 if (!range_cyclic && range_whole)
2871 desired_nr_to_write = wbc->nr_to_write * 8;
2872 else
2873 desired_nr_to_write = ext4_num_dirty_pages(inode, index,
2874 max_pages);
2875 if (desired_nr_to_write > max_pages)
2876 desired_nr_to_write = max_pages;
2877
2878 if (wbc->nr_to_write < desired_nr_to_write) {
2879 nr_to_writebump = desired_nr_to_write - wbc->nr_to_write;
2880 wbc->nr_to_write = desired_nr_to_write;
2881 }
2882
2798 mpd.wbc = wbc; 2883 mpd.wbc = wbc;
2799 mpd.inode = mapping->host; 2884 mpd.inode = mapping->host;
2800 2885
@@ -2822,10 +2907,9 @@ retry:
2822 handle = ext4_journal_start(inode, needed_blocks); 2907 handle = ext4_journal_start(inode, needed_blocks);
2823 if (IS_ERR(handle)) { 2908 if (IS_ERR(handle)) {
2824 ret = PTR_ERR(handle); 2909 ret = PTR_ERR(handle);
2825 printk(KERN_CRIT "%s: jbd2_start: " 2910 ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
2826 "%ld pages, ino %lu; err %d\n", __func__, 2911 "%ld pages, ino %lu; err %d\n", __func__,
2827 wbc->nr_to_write, inode->i_ino, ret); 2912 wbc->nr_to_write, inode->i_ino, ret);
2828 dump_stack();
2829 goto out_writepages; 2913 goto out_writepages;
2830 } 2914 }
2831 2915
@@ -2897,9 +2981,10 @@ retry:
2897 goto retry; 2981 goto retry;
2898 } 2982 }
2899 if (pages_skipped != wbc->pages_skipped) 2983 if (pages_skipped != wbc->pages_skipped)
2900 printk(KERN_EMERG "This should not happen leaving %s " 2984 ext4_msg(inode->i_sb, KERN_CRIT,
2901 "with nr_to_write = %ld ret = %d\n", 2985 "This should not happen leaving %s "
2902 __func__, wbc->nr_to_write, ret); 2986 "with nr_to_write = %ld ret = %d\n",
2987 __func__, wbc->nr_to_write, ret);
2903 2988
2904 /* Update index */ 2989 /* Update index */
2905 index += pages_written; 2990 index += pages_written;
@@ -2914,7 +2999,8 @@ retry:
2914out_writepages: 2999out_writepages:
2915 if (!no_nrwrite_index_update) 3000 if (!no_nrwrite_index_update)
2916 wbc->no_nrwrite_index_update = 0; 3001 wbc->no_nrwrite_index_update = 0;
2917 wbc->nr_to_write -= nr_to_writebump; 3002 if (wbc->nr_to_write > nr_to_writebump)
3003 wbc->nr_to_write -= nr_to_writebump;
2918 wbc->range_start = range_start; 3004 wbc->range_start = range_start;
2919 trace_ext4_da_writepages_result(inode, wbc, ret, pages_written); 3005 trace_ext4_da_writepages_result(inode, wbc, ret, pages_written);
2920 return ret; 3006 return ret;
@@ -3272,6 +3358,8 @@ static int ext4_releasepage(struct page *page, gfp_t wait)
3272} 3358}
3273 3359
3274/* 3360/*
3361 * O_DIRECT for ext3 (or indirect map) based files
3362 *
3275 * If the O_DIRECT write will extend the file then add this inode to the 3363 * If the O_DIRECT write will extend the file then add this inode to the
3276 * orphan list. So recovery will truncate it back to the original size 3364 * orphan list. So recovery will truncate it back to the original size
3277 * if the machine crashes during the write. 3365 * if the machine crashes during the write.
@@ -3280,7 +3368,7 @@ static int ext4_releasepage(struct page *page, gfp_t wait)
3280 * crashes then stale disk data _may_ be exposed inside the file. But current 3368 * crashes then stale disk data _may_ be exposed inside the file. But current
3281 * VFS code falls back into buffered path in that case so we are safe. 3369 * VFS code falls back into buffered path in that case so we are safe.
3282 */ 3370 */
3283static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb, 3371static ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
3284 const struct iovec *iov, loff_t offset, 3372 const struct iovec *iov, loff_t offset,
3285 unsigned long nr_segs) 3373 unsigned long nr_segs)
3286{ 3374{
@@ -3291,6 +3379,7 @@ static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
3291 ssize_t ret; 3379 ssize_t ret;
3292 int orphan = 0; 3380 int orphan = 0;
3293 size_t count = iov_length(iov, nr_segs); 3381 size_t count = iov_length(iov, nr_segs);
3382 int retries = 0;
3294 3383
3295 if (rw == WRITE) { 3384 if (rw == WRITE) {
3296 loff_t final_size = offset + count; 3385 loff_t final_size = offset + count;
@@ -3313,9 +3402,12 @@ static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
3313 } 3402 }
3314 } 3403 }
3315 3404
3405retry:
3316 ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, 3406 ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
3317 offset, nr_segs, 3407 offset, nr_segs,
3318 ext4_get_block, NULL); 3408 ext4_get_block, NULL);
3409 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
3410 goto retry;
3319 3411
3320 if (orphan) { 3412 if (orphan) {
3321 int err; 3413 int err;
@@ -3354,6 +3446,364 @@ out:
3354 return ret; 3446 return ret;
3355} 3447}
3356 3448
3449static int ext4_get_block_dio_write(struct inode *inode, sector_t iblock,
3450 struct buffer_head *bh_result, int create)
3451{
3452 handle_t *handle = NULL;
3453 int ret = 0;
3454 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
3455 int dio_credits;
3456
3457 ext4_debug("ext4_get_block_dio_write: inode %lu, create flag %d\n",
3458 inode->i_ino, create);
3459 /*
3460 * DIO VFS code passes create = 0 flag for write to
3461 * the middle of file. It does this to avoid block
3462 * allocation for holes, to prevent expose stale data
3463 * out when there is parallel buffered read (which does
3464 * not hold the i_mutex lock) while direct IO write has
3465 * not completed. DIO request on holes finally falls back
3466 * to buffered IO for this reason.
3467 *
3468 * For ext4 extent based file, since we support fallocate,
3469 * new allocated extent as uninitialized, for holes, we
3470 * could fallocate blocks for holes, thus parallel
3471 * buffered IO read will zero out the page when read on
3472 * a hole while parallel DIO write to the hole has not completed.
3473 *
3474 * when we come here, we know it's a direct IO write to
3475 * to the middle of file (<i_size)
3476 * so it's safe to override the create flag from VFS.
3477 */
3478 create = EXT4_GET_BLOCKS_DIO_CREATE_EXT;
3479
3480 if (max_blocks > DIO_MAX_BLOCKS)
3481 max_blocks = DIO_MAX_BLOCKS;
3482 dio_credits = ext4_chunk_trans_blocks(inode, max_blocks);
3483 handle = ext4_journal_start(inode, dio_credits);
3484 if (IS_ERR(handle)) {
3485 ret = PTR_ERR(handle);
3486 goto out;
3487 }
3488 ret = ext4_get_blocks(handle, inode, iblock, max_blocks, bh_result,
3489 create);
3490 if (ret > 0) {
3491 bh_result->b_size = (ret << inode->i_blkbits);
3492 ret = 0;
3493 }
3494 ext4_journal_stop(handle);
3495out:
3496 return ret;
3497}
3498
3499static void ext4_free_io_end(ext4_io_end_t *io)
3500{
3501 BUG_ON(!io);
3502 iput(io->inode);
3503 kfree(io);
3504}
3505static void dump_aio_dio_list(struct inode * inode)
3506{
3507#ifdef EXT4_DEBUG
3508 struct list_head *cur, *before, *after;
3509 ext4_io_end_t *io, *io0, *io1;
3510
3511 if (list_empty(&EXT4_I(inode)->i_aio_dio_complete_list)){
3512 ext4_debug("inode %lu aio dio list is empty\n", inode->i_ino);
3513 return;
3514 }
3515
3516 ext4_debug("Dump inode %lu aio_dio_completed_IO list \n", inode->i_ino);
3517 list_for_each_entry(io, &EXT4_I(inode)->i_aio_dio_complete_list, list){
3518 cur = &io->list;
3519 before = cur->prev;
3520 io0 = container_of(before, ext4_io_end_t, list);
3521 after = cur->next;
3522 io1 = container_of(after, ext4_io_end_t, list);
3523
3524 ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n",
3525 io, inode->i_ino, io0, io1);
3526 }
3527#endif
3528}
3529
3530/*
3531 * check a range of space and convert unwritten extents to written.
3532 */
3533static int ext4_end_aio_dio_nolock(ext4_io_end_t *io)
3534{
3535 struct inode *inode = io->inode;
3536 loff_t offset = io->offset;
3537 size_t size = io->size;
3538 int ret = 0;
3539
3540 ext4_debug("end_aio_dio_onlock: io 0x%p from inode %lu,list->next 0x%p,"
3541 "list->prev 0x%p\n",
3542 io, inode->i_ino, io->list.next, io->list.prev);
3543
3544 if (list_empty(&io->list))
3545 return ret;
3546
3547 if (io->flag != DIO_AIO_UNWRITTEN)
3548 return ret;
3549
3550 if (offset + size <= i_size_read(inode))
3551 ret = ext4_convert_unwritten_extents(inode, offset, size);
3552
3553 if (ret < 0) {
3554 printk(KERN_EMERG "%s: failed to convert unwritten"
3555 "extents to written extents, error is %d"
3556 " io is still on inode %lu aio dio list\n",
3557 __func__, ret, inode->i_ino);
3558 return ret;
3559 }
3560
3561 /* clear the DIO AIO unwritten flag */
3562 io->flag = 0;
3563 return ret;
3564}
3565/*
3566 * work on completed aio dio IO, to convert unwritten extents to extents
3567 */
3568static void ext4_end_aio_dio_work(struct work_struct *work)
3569{
3570 ext4_io_end_t *io = container_of(work, ext4_io_end_t, work);
3571 struct inode *inode = io->inode;
3572 int ret = 0;
3573
3574 mutex_lock(&inode->i_mutex);
3575 ret = ext4_end_aio_dio_nolock(io);
3576 if (ret >= 0) {
3577 if (!list_empty(&io->list))
3578 list_del_init(&io->list);
3579 ext4_free_io_end(io);
3580 }
3581 mutex_unlock(&inode->i_mutex);
3582}
3583/*
3584 * This function is called from ext4_sync_file().
3585 *
3586 * When AIO DIO IO is completed, the work to convert unwritten
3587 * extents to written is queued on workqueue but may not get immediately
3588 * scheduled. When fsync is called, we need to ensure the
3589 * conversion is complete before fsync returns.
3590 * The inode keeps track of a list of completed AIO from DIO path
3591 * that might needs to do the conversion. This function walks through
3592 * the list and convert the related unwritten extents to written.
3593 */
3594int flush_aio_dio_completed_IO(struct inode *inode)
3595{
3596 ext4_io_end_t *io;
3597 int ret = 0;
3598 int ret2 = 0;
3599
3600 if (list_empty(&EXT4_I(inode)->i_aio_dio_complete_list))
3601 return ret;
3602
3603 dump_aio_dio_list(inode);
3604 while (!list_empty(&EXT4_I(inode)->i_aio_dio_complete_list)){
3605 io = list_entry(EXT4_I(inode)->i_aio_dio_complete_list.next,
3606 ext4_io_end_t, list);
3607 /*
3608 * Calling ext4_end_aio_dio_nolock() to convert completed
3609 * IO to written.
3610 *
3611 * When ext4_sync_file() is called, run_queue() may already
3612 * about to flush the work corresponding to this io structure.
3613 * It will be upset if it founds the io structure related
3614 * to the work-to-be schedule is freed.
3615 *
3616 * Thus we need to keep the io structure still valid here after
3617 * convertion finished. The io structure has a flag to
3618 * avoid double converting from both fsync and background work
3619 * queue work.
3620 */
3621 ret = ext4_end_aio_dio_nolock(io);
3622 if (ret < 0)
3623 ret2 = ret;
3624 else
3625 list_del_init(&io->list);
3626 }
3627 return (ret2 < 0) ? ret2 : 0;
3628}
3629
3630static ext4_io_end_t *ext4_init_io_end (struct inode *inode)
3631{
3632 ext4_io_end_t *io = NULL;
3633
3634 io = kmalloc(sizeof(*io), GFP_NOFS);
3635
3636 if (io) {
3637 igrab(inode);
3638 io->inode = inode;
3639 io->flag = 0;
3640 io->offset = 0;
3641 io->size = 0;
3642 io->error = 0;
3643 INIT_WORK(&io->work, ext4_end_aio_dio_work);
3644 INIT_LIST_HEAD(&io->list);
3645 }
3646
3647 return io;
3648}
3649
3650static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
3651 ssize_t size, void *private)
3652{
3653 ext4_io_end_t *io_end = iocb->private;
3654 struct workqueue_struct *wq;
3655
3656 /* if not async direct IO or dio with 0 bytes write, just return */
3657 if (!io_end || !size)
3658 return;
3659
3660 ext_debug("ext4_end_io_dio(): io_end 0x%p"
3661 "for inode %lu, iocb 0x%p, offset %llu, size %llu\n",
3662 iocb->private, io_end->inode->i_ino, iocb, offset,
3663 size);
3664
3665 /* if not aio dio with unwritten extents, just free io and return */
3666 if (io_end->flag != DIO_AIO_UNWRITTEN){
3667 ext4_free_io_end(io_end);
3668 iocb->private = NULL;
3669 return;
3670 }
3671
3672 io_end->offset = offset;
3673 io_end->size = size;
3674 wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq;
3675
3676 /* queue the work to convert unwritten extents to written */
3677 queue_work(wq, &io_end->work);
3678
3679 /* Add the io_end to per-inode completed aio dio list*/
3680 list_add_tail(&io_end->list,
3681 &EXT4_I(io_end->inode)->i_aio_dio_complete_list);
3682 iocb->private = NULL;
3683}
3684/*
3685 * For ext4 extent files, ext4 will do direct-io write to holes,
3686 * preallocated extents, and those write extend the file, no need to
3687 * fall back to buffered IO.
3688 *
3689 * For holes, we fallocate those blocks, mark them as unintialized
3690 * If those blocks were preallocated, we mark sure they are splited, but
3691 * still keep the range to write as unintialized.
3692 *
3693 * The unwrritten extents will be converted to written when DIO is completed.
3694 * For async direct IO, since the IO may still pending when return, we
3695 * set up an end_io call back function, which will do the convertion
3696 * when async direct IO completed.
3697 *
3698 * If the O_DIRECT write will extend the file then add this inode to the
3699 * orphan list. So recovery will truncate it back to the original size
3700 * if the machine crashes during the write.
3701 *
3702 */
3703static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
3704 const struct iovec *iov, loff_t offset,
3705 unsigned long nr_segs)
3706{
3707 struct file *file = iocb->ki_filp;
3708 struct inode *inode = file->f_mapping->host;
3709 ssize_t ret;
3710 size_t count = iov_length(iov, nr_segs);
3711
3712 loff_t final_size = offset + count;
3713 if (rw == WRITE && final_size <= inode->i_size) {
3714 /*
3715 * We could direct write to holes and fallocate.
3716 *
3717 * Allocated blocks to fill the hole are marked as uninitialized
3718 * to prevent paralel buffered read to expose the stale data
3719 * before DIO complete the data IO.
3720 *
3721 * As to previously fallocated extents, ext4 get_block
3722 * will just simply mark the buffer mapped but still
3723 * keep the extents uninitialized.
3724 *
3725 * for non AIO case, we will convert those unwritten extents
3726 * to written after return back from blockdev_direct_IO.
3727 *
3728 * for async DIO, the conversion needs to be defered when
3729 * the IO is completed. The ext4 end_io callback function
3730 * will be called to take care of the conversion work.
3731 * Here for async case, we allocate an io_end structure to
3732 * hook to the iocb.
3733 */
3734 iocb->private = NULL;
3735 EXT4_I(inode)->cur_aio_dio = NULL;
3736 if (!is_sync_kiocb(iocb)) {
3737 iocb->private = ext4_init_io_end(inode);
3738 if (!iocb->private)
3739 return -ENOMEM;
3740 /*
3741 * we save the io structure for current async
3742 * direct IO, so that later ext4_get_blocks()
3743 * could flag the io structure whether there
3744 * is a unwritten extents needs to be converted
3745 * when IO is completed.
3746 */
3747 EXT4_I(inode)->cur_aio_dio = iocb->private;
3748 }
3749
3750 ret = blockdev_direct_IO(rw, iocb, inode,
3751 inode->i_sb->s_bdev, iov,
3752 offset, nr_segs,
3753 ext4_get_block_dio_write,
3754 ext4_end_io_dio);
3755 if (iocb->private)
3756 EXT4_I(inode)->cur_aio_dio = NULL;
3757 /*
3758 * The io_end structure takes a reference to the inode,
3759 * that structure needs to be destroyed and the
3760 * reference to the inode need to be dropped, when IO is
3761 * complete, even with 0 byte write, or failed.
3762 *
3763 * In the successful AIO DIO case, the io_end structure will be
3764 * desctroyed and the reference to the inode will be dropped
3765 * after the end_io call back function is called.
3766 *
3767 * In the case there is 0 byte write, or error case, since
3768 * VFS direct IO won't invoke the end_io call back function,
3769 * we need to free the end_io structure here.
3770 */
3771 if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) {
3772 ext4_free_io_end(iocb->private);
3773 iocb->private = NULL;
3774 } else if (ret > 0 && (EXT4_I(inode)->i_state &
3775 EXT4_STATE_DIO_UNWRITTEN)) {
3776 int err;
3777 /*
3778 * for non AIO case, since the IO is already
3779 * completed, we could do the convertion right here
3780 */
3781 err = ext4_convert_unwritten_extents(inode,
3782 offset, ret);
3783 if (err < 0)
3784 ret = err;
3785 EXT4_I(inode)->i_state &= ~EXT4_STATE_DIO_UNWRITTEN;
3786 }
3787 return ret;
3788 }
3789
3790 /* for write the the end of file case, we fall back to old way */
3791 return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
3792}
3793
3794static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
3795 const struct iovec *iov, loff_t offset,
3796 unsigned long nr_segs)
3797{
3798 struct file *file = iocb->ki_filp;
3799 struct inode *inode = file->f_mapping->host;
3800
3801 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)
3802 return ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs);
3803
3804 return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
3805}
3806
3357/* 3807/*
3358 * Pages can be marked dirty completely asynchronously from ext4's journalling 3808 * Pages can be marked dirty completely asynchronously from ext4's journalling
3359 * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do 3809 * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do
@@ -4551,8 +5001,7 @@ static int ext4_inode_blocks_set(handle_t *handle,
4551 */ 5001 */
4552static int ext4_do_update_inode(handle_t *handle, 5002static int ext4_do_update_inode(handle_t *handle,
4553 struct inode *inode, 5003 struct inode *inode,
4554 struct ext4_iloc *iloc, 5004 struct ext4_iloc *iloc)
4555 int do_sync)
4556{ 5005{
4557 struct ext4_inode *raw_inode = ext4_raw_inode(iloc); 5006 struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
4558 struct ext4_inode_info *ei = EXT4_I(inode); 5007 struct ext4_inode_info *ei = EXT4_I(inode);
@@ -4653,22 +5102,10 @@ static int ext4_do_update_inode(handle_t *handle,
4653 raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize); 5102 raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
4654 } 5103 }
4655 5104
4656 /* 5105 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
4657 * If we're not using a journal and we were called from 5106 rc = ext4_handle_dirty_metadata(handle, inode, bh);
4658 * ext4_write_inode() to sync the inode (making do_sync true), 5107 if (!err)
4659 * we can just use sync_dirty_buffer() directly to do our dirty 5108 err = rc;
4660 * work. Testing s_journal here is a bit redundant but it's
4661 * worth it to avoid potential future trouble.
4662 */
4663 if (EXT4_SB(inode->i_sb)->s_journal == NULL && do_sync) {
4664 BUFFER_TRACE(bh, "call sync_dirty_buffer");
4665 sync_dirty_buffer(bh);
4666 } else {
4667 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
4668 rc = ext4_handle_dirty_metadata(handle, inode, bh);
4669 if (!err)
4670 err = rc;
4671 }
4672 ei->i_state &= ~EXT4_STATE_NEW; 5109 ei->i_state &= ~EXT4_STATE_NEW;
4673 5110
4674out_brelse: 5111out_brelse:
@@ -4736,8 +5173,16 @@ int ext4_write_inode(struct inode *inode, int wait)
4736 err = ext4_get_inode_loc(inode, &iloc); 5173 err = ext4_get_inode_loc(inode, &iloc);
4737 if (err) 5174 if (err)
4738 return err; 5175 return err;
4739 err = ext4_do_update_inode(EXT4_NOJOURNAL_HANDLE, 5176 if (wait)
4740 inode, &iloc, wait); 5177 sync_dirty_buffer(iloc.bh);
5178 if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
5179 ext4_error(inode->i_sb, __func__,
5180 "IO error syncing inode, "
5181 "inode=%lu, block=%llu",
5182 inode->i_ino,
5183 (unsigned long long)iloc.bh->b_blocknr);
5184 err = -EIO;
5185 }
4741 } 5186 }
4742 return err; 5187 return err;
4743} 5188}
@@ -5033,7 +5478,7 @@ int ext4_mark_iloc_dirty(handle_t *handle,
5033 get_bh(iloc->bh); 5478 get_bh(iloc->bh);
5034 5479
5035 /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */ 5480 /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
5036 err = ext4_do_update_inode(handle, inode, iloc, 0); 5481 err = ext4_do_update_inode(handle, inode, iloc);
5037 put_bh(iloc->bh); 5482 put_bh(iloc->bh);
5038 return err; 5483 return err;
5039} 5484}
@@ -5177,27 +5622,14 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
5177 */ 5622 */
5178void ext4_dirty_inode(struct inode *inode) 5623void ext4_dirty_inode(struct inode *inode)
5179{ 5624{
5180 handle_t *current_handle = ext4_journal_current_handle();
5181 handle_t *handle; 5625 handle_t *handle;
5182 5626
5183 if (!ext4_handle_valid(current_handle)) {
5184 ext4_mark_inode_dirty(current_handle, inode);
5185 return;
5186 }
5187
5188 handle = ext4_journal_start(inode, 2); 5627 handle = ext4_journal_start(inode, 2);
5189 if (IS_ERR(handle)) 5628 if (IS_ERR(handle))
5190 goto out; 5629 goto out;
5191 if (current_handle && 5630
5192 current_handle->h_transaction != handle->h_transaction) { 5631 ext4_mark_inode_dirty(handle, inode);
5193 /* This task has a transaction open against a different fs */ 5632
5194 printk(KERN_EMERG "%s: transactions do not match!\n",
5195 __func__);
5196 } else {
5197 jbd_debug(5, "marking dirty. outer handle=%p\n",
5198 current_handle);
5199 ext4_mark_inode_dirty(handle, inode);
5200 }
5201 ext4_journal_stop(handle); 5633 ext4_journal_stop(handle);
5202out: 5634out:
5203 return; 5635 return;