diff options
Diffstat (limited to 'fs/ext4')
-rw-r--r-- | fs/ext4/balloc.c | 35 | ||||
-rw-r--r-- | fs/ext4/block_validity.c | 4 | ||||
-rw-r--r-- | fs/ext4/dir.c | 14 | ||||
-rw-r--r-- | fs/ext4/ext4.h | 117 | ||||
-rw-r--r-- | fs/ext4/ext4_jbd2.c | 4 | ||||
-rw-r--r-- | fs/ext4/ext4_jbd2.h | 24 | ||||
-rw-r--r-- | fs/ext4/extents.c | 281 | ||||
-rw-r--r-- | fs/ext4/file.c | 13 | ||||
-rw-r--r-- | fs/ext4/fsync.c | 2 | ||||
-rw-r--r-- | fs/ext4/ialloc.c | 48 | ||||
-rw-r--r-- | fs/ext4/inode.c | 564 | ||||
-rw-r--r-- | fs/ext4/ioctl.c | 12 | ||||
-rw-r--r-- | fs/ext4/mballoc.c | 81 | ||||
-rw-r--r-- | fs/ext4/mballoc.h | 9 | ||||
-rw-r--r-- | fs/ext4/migrate.c | 35 | ||||
-rw-r--r-- | fs/ext4/move_extent.c | 36 | ||||
-rw-r--r-- | fs/ext4/namei.c | 86 | ||||
-rw-r--r-- | fs/ext4/resize.c | 102 | ||||
-rw-r--r-- | fs/ext4/super.c | 361 | ||||
-rw-r--r-- | fs/ext4/xattr.c | 64 |
20 files changed, 1181 insertions, 711 deletions
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index 22bc7435d913..d2f37a5516c7 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c | |||
@@ -97,8 +97,8 @@ unsigned ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh, | |||
97 | /* If checksum is bad mark all blocks used to prevent allocation | 97 | /* If checksum is bad mark all blocks used to prevent allocation |
98 | * essentially implementing a per-group read-only flag. */ | 98 | * essentially implementing a per-group read-only flag. */ |
99 | if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) { | 99 | if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) { |
100 | ext4_error(sb, __func__, | 100 | ext4_error(sb, "Checksum bad for group %u", |
101 | "Checksum bad for group %u", block_group); | 101 | block_group); |
102 | ext4_free_blks_set(sb, gdp, 0); | 102 | ext4_free_blks_set(sb, gdp, 0); |
103 | ext4_free_inodes_set(sb, gdp, 0); | 103 | ext4_free_inodes_set(sb, gdp, 0); |
104 | ext4_itable_unused_set(sb, gdp, 0); | 104 | ext4_itable_unused_set(sb, gdp, 0); |
@@ -130,8 +130,7 @@ unsigned ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh, | |||
130 | * to make sure we calculate the right free blocks | 130 | * to make sure we calculate the right free blocks |
131 | */ | 131 | */ |
132 | group_blocks = ext4_blocks_count(sbi->s_es) - | 132 | group_blocks = ext4_blocks_count(sbi->s_es) - |
133 | le32_to_cpu(sbi->s_es->s_first_data_block) - | 133 | ext4_group_first_block_no(sb, ngroups - 1); |
134 | (EXT4_BLOCKS_PER_GROUP(sb) * (ngroups - 1)); | ||
135 | } else { | 134 | } else { |
136 | group_blocks = EXT4_BLOCKS_PER_GROUP(sb); | 135 | group_blocks = EXT4_BLOCKS_PER_GROUP(sb); |
137 | } | 136 | } |
@@ -189,9 +188,6 @@ unsigned ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh, | |||
189 | * when a file system is mounted (see ext4_fill_super). | 188 | * when a file system is mounted (see ext4_fill_super). |
190 | */ | 189 | */ |
191 | 190 | ||
192 | |||
193 | #define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1) | ||
194 | |||
195 | /** | 191 | /** |
196 | * ext4_get_group_desc() -- load group descriptor from disk | 192 | * ext4_get_group_desc() -- load group descriptor from disk |
197 | * @sb: super block | 193 | * @sb: super block |
@@ -210,10 +206,8 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb, | |||
210 | struct ext4_sb_info *sbi = EXT4_SB(sb); | 206 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
211 | 207 | ||
212 | if (block_group >= ngroups) { | 208 | if (block_group >= ngroups) { |
213 | ext4_error(sb, "ext4_get_group_desc", | 209 | ext4_error(sb, "block_group >= groups_count - block_group = %u," |
214 | "block_group >= groups_count - " | 210 | " groups_count = %u", block_group, ngroups); |
215 | "block_group = %u, groups_count = %u", | ||
216 | block_group, ngroups); | ||
217 | 211 | ||
218 | return NULL; | 212 | return NULL; |
219 | } | 213 | } |
@@ -221,8 +215,7 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb, | |||
221 | group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb); | 215 | group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb); |
222 | offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1); | 216 | offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1); |
223 | if (!sbi->s_group_desc[group_desc]) { | 217 | if (!sbi->s_group_desc[group_desc]) { |
224 | ext4_error(sb, "ext4_get_group_desc", | 218 | ext4_error(sb, "Group descriptor not loaded - " |
225 | "Group descriptor not loaded - " | ||
226 | "block_group = %u, group_desc = %u, desc = %u", | 219 | "block_group = %u, group_desc = %u, desc = %u", |
227 | block_group, group_desc, offset); | 220 | block_group, group_desc, offset); |
228 | return NULL; | 221 | return NULL; |
@@ -282,9 +275,7 @@ static int ext4_valid_block_bitmap(struct super_block *sb, | |||
282 | return 1; | 275 | return 1; |
283 | 276 | ||
284 | err_out: | 277 | err_out: |
285 | ext4_error(sb, __func__, | 278 | ext4_error(sb, "Invalid block bitmap - block_group = %d, block = %llu", |
286 | "Invalid block bitmap - " | ||
287 | "block_group = %d, block = %llu", | ||
288 | block_group, bitmap_blk); | 279 | block_group, bitmap_blk); |
289 | return 0; | 280 | return 0; |
290 | } | 281 | } |
@@ -311,8 +302,7 @@ ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group) | |||
311 | bitmap_blk = ext4_block_bitmap(sb, desc); | 302 | bitmap_blk = ext4_block_bitmap(sb, desc); |
312 | bh = sb_getblk(sb, bitmap_blk); | 303 | bh = sb_getblk(sb, bitmap_blk); |
313 | if (unlikely(!bh)) { | 304 | if (unlikely(!bh)) { |
314 | ext4_error(sb, __func__, | 305 | ext4_error(sb, "Cannot read block bitmap - " |
315 | "Cannot read block bitmap - " | ||
316 | "block_group = %u, block_bitmap = %llu", | 306 | "block_group = %u, block_bitmap = %llu", |
317 | block_group, bitmap_blk); | 307 | block_group, bitmap_blk); |
318 | return NULL; | 308 | return NULL; |
@@ -354,8 +344,7 @@ ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group) | |||
354 | set_bitmap_uptodate(bh); | 344 | set_bitmap_uptodate(bh); |
355 | if (bh_submit_read(bh) < 0) { | 345 | if (bh_submit_read(bh) < 0) { |
356 | put_bh(bh); | 346 | put_bh(bh); |
357 | ext4_error(sb, __func__, | 347 | ext4_error(sb, "Cannot read block bitmap - " |
358 | "Cannot read block bitmap - " | ||
359 | "block_group = %u, block_bitmap = %llu", | 348 | "block_group = %u, block_bitmap = %llu", |
360 | block_group, bitmap_blk); | 349 | block_group, bitmap_blk); |
361 | return NULL; | 350 | return NULL; |
@@ -419,8 +408,7 @@ void ext4_add_groupblocks(handle_t *handle, struct super_block *sb, | |||
419 | in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) || | 408 | in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) || |
420 | in_range(block + count - 1, ext4_inode_table(sb, desc), | 409 | in_range(block + count - 1, ext4_inode_table(sb, desc), |
421 | sbi->s_itb_per_group)) { | 410 | sbi->s_itb_per_group)) { |
422 | ext4_error(sb, __func__, | 411 | ext4_error(sb, "Adding blocks in system zones - " |
423 | "Adding blocks in system zones - " | ||
424 | "Block = %llu, count = %lu", | 412 | "Block = %llu, count = %lu", |
425 | block, count); | 413 | block, count); |
426 | goto error_return; | 414 | goto error_return; |
@@ -453,8 +441,7 @@ void ext4_add_groupblocks(handle_t *handle, struct super_block *sb, | |||
453 | BUFFER_TRACE(bitmap_bh, "clear bit"); | 441 | BUFFER_TRACE(bitmap_bh, "clear bit"); |
454 | if (!ext4_clear_bit_atomic(ext4_group_lock_ptr(sb, block_group), | 442 | if (!ext4_clear_bit_atomic(ext4_group_lock_ptr(sb, block_group), |
455 | bit + i, bitmap_bh->b_data)) { | 443 | bit + i, bitmap_bh->b_data)) { |
456 | ext4_error(sb, __func__, | 444 | ext4_error(sb, "bit already cleared for block %llu", |
457 | "bit already cleared for block %llu", | ||
458 | (ext4_fsblk_t)(block + i)); | 445 | (ext4_fsblk_t)(block + i)); |
459 | BUFFER_TRACE(bitmap_bh, "bit already cleared"); | 446 | BUFFER_TRACE(bitmap_bh, "bit already cleared"); |
460 | } else { | 447 | } else { |
diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c index a60ab9aad57d..983f0e127493 100644 --- a/fs/ext4/block_validity.c +++ b/fs/ext4/block_validity.c | |||
@@ -205,14 +205,14 @@ void ext4_release_system_zone(struct super_block *sb) | |||
205 | entry = rb_entry(n, struct ext4_system_zone, node); | 205 | entry = rb_entry(n, struct ext4_system_zone, node); |
206 | kmem_cache_free(ext4_system_zone_cachep, entry); | 206 | kmem_cache_free(ext4_system_zone_cachep, entry); |
207 | if (!parent) | 207 | if (!parent) |
208 | EXT4_SB(sb)->system_blks.rb_node = NULL; | 208 | EXT4_SB(sb)->system_blks = RB_ROOT; |
209 | else if (parent->rb_left == n) | 209 | else if (parent->rb_left == n) |
210 | parent->rb_left = NULL; | 210 | parent->rb_left = NULL; |
211 | else if (parent->rb_right == n) | 211 | else if (parent->rb_right == n) |
212 | parent->rb_right = NULL; | 212 | parent->rb_right = NULL; |
213 | n = parent; | 213 | n = parent; |
214 | } | 214 | } |
215 | EXT4_SB(sb)->system_blks.rb_node = NULL; | 215 | EXT4_SB(sb)->system_blks = RB_ROOT; |
216 | } | 216 | } |
217 | 217 | ||
218 | /* | 218 | /* |
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c index 9dc93168e262..86cb6d86a048 100644 --- a/fs/ext4/dir.c +++ b/fs/ext4/dir.c | |||
@@ -83,10 +83,12 @@ int ext4_check_dir_entry(const char *function, struct inode *dir, | |||
83 | error_msg = "inode out of bounds"; | 83 | error_msg = "inode out of bounds"; |
84 | 84 | ||
85 | if (error_msg != NULL) | 85 | if (error_msg != NULL) |
86 | ext4_error(dir->i_sb, function, | 86 | __ext4_error(dir->i_sb, function, |
87 | "bad entry in directory #%lu: %s - " | 87 | "bad entry in directory #%lu: %s - block=%llu" |
88 | "offset=%u, inode=%u, rec_len=%d, name_len=%d", | 88 | "offset=%u(%u), inode=%u, rec_len=%d, name_len=%d", |
89 | dir->i_ino, error_msg, offset, | 89 | dir->i_ino, error_msg, |
90 | (unsigned long long) bh->b_blocknr, | ||
91 | (unsigned) (offset%bh->b_size), offset, | ||
90 | le32_to_cpu(de->inode), | 92 | le32_to_cpu(de->inode), |
91 | rlen, de->name_len); | 93 | rlen, de->name_len); |
92 | return error_msg == NULL ? 1 : 0; | 94 | return error_msg == NULL ? 1 : 0; |
@@ -150,7 +152,7 @@ static int ext4_readdir(struct file *filp, | |||
150 | */ | 152 | */ |
151 | if (!bh) { | 153 | if (!bh) { |
152 | if (!dir_has_error) { | 154 | if (!dir_has_error) { |
153 | ext4_error(sb, __func__, "directory #%lu " | 155 | ext4_error(sb, "directory #%lu " |
154 | "contains a hole at offset %Lu", | 156 | "contains a hole at offset %Lu", |
155 | inode->i_ino, | 157 | inode->i_ino, |
156 | (unsigned long long) filp->f_pos); | 158 | (unsigned long long) filp->f_pos); |
@@ -303,7 +305,7 @@ static void free_rb_tree_fname(struct rb_root *root) | |||
303 | kfree(old); | 305 | kfree(old); |
304 | } | 306 | } |
305 | if (!parent) | 307 | if (!parent) |
306 | root->rb_node = NULL; | 308 | *root = RB_ROOT; |
307 | else if (parent->rb_left == n) | 309 | else if (parent->rb_left == n) |
308 | parent->rb_left = NULL; | 310 | parent->rb_left = NULL; |
309 | else if (parent->rb_right == n) | 311 | else if (parent->rb_right == n) |
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index af7b62699ea9..bf938cf7c5f0 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h | |||
@@ -53,6 +53,12 @@ | |||
53 | #define ext4_debug(f, a...) do {} while (0) | 53 | #define ext4_debug(f, a...) do {} while (0) |
54 | #endif | 54 | #endif |
55 | 55 | ||
56 | #define EXT4_ERROR_INODE(inode, fmt, a...) \ | ||
57 | ext4_error_inode(__func__, (inode), (fmt), ## a); | ||
58 | |||
59 | #define EXT4_ERROR_FILE(file, fmt, a...) \ | ||
60 | ext4_error_file(__func__, (file), (fmt), ## a); | ||
61 | |||
56 | /* data type for block offset of block group */ | 62 | /* data type for block offset of block group */ |
57 | typedef int ext4_grpblk_t; | 63 | typedef int ext4_grpblk_t; |
58 | 64 | ||
@@ -133,14 +139,14 @@ struct mpage_da_data { | |||
133 | int pages_written; | 139 | int pages_written; |
134 | int retval; | 140 | int retval; |
135 | }; | 141 | }; |
136 | #define DIO_AIO_UNWRITTEN 0x1 | 142 | #define EXT4_IO_UNWRITTEN 0x1 |
137 | typedef struct ext4_io_end { | 143 | typedef struct ext4_io_end { |
138 | struct list_head list; /* per-file finished AIO list */ | 144 | struct list_head list; /* per-file finished AIO list */ |
139 | struct inode *inode; /* file being written to */ | 145 | struct inode *inode; /* file being written to */ |
140 | unsigned int flag; /* unwritten or not */ | 146 | unsigned int flag; /* unwritten or not */ |
141 | int error; /* I/O error code */ | 147 | struct page *page; /* page struct for buffer write */ |
142 | ext4_lblk_t offset; /* offset in the file */ | 148 | loff_t offset; /* offset in the file */ |
143 | size_t size; /* size of the extent */ | 149 | ssize_t size; /* size of the extent */ |
144 | struct work_struct work; /* data work queue */ | 150 | struct work_struct work; /* data work queue */ |
145 | } ext4_io_end_t; | 151 | } ext4_io_end_t; |
146 | 152 | ||
@@ -284,10 +290,12 @@ struct flex_groups { | |||
284 | #define EXT4_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/ | 290 | #define EXT4_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/ |
285 | #define EXT4_HUGE_FILE_FL 0x00040000 /* Set to each huge file */ | 291 | #define EXT4_HUGE_FILE_FL 0x00040000 /* Set to each huge file */ |
286 | #define EXT4_EXTENTS_FL 0x00080000 /* Inode uses extents */ | 292 | #define EXT4_EXTENTS_FL 0x00080000 /* Inode uses extents */ |
293 | #define EXT4_EA_INODE_FL 0x00200000 /* Inode used for large EA */ | ||
294 | #define EXT4_EOFBLOCKS_FL 0x00400000 /* Blocks allocated beyond EOF */ | ||
287 | #define EXT4_RESERVED_FL 0x80000000 /* reserved for ext4 lib */ | 295 | #define EXT4_RESERVED_FL 0x80000000 /* reserved for ext4 lib */ |
288 | 296 | ||
289 | #define EXT4_FL_USER_VISIBLE 0x000BDFFF /* User visible flags */ | 297 | #define EXT4_FL_USER_VISIBLE 0x004BDFFF /* User visible flags */ |
290 | #define EXT4_FL_USER_MODIFIABLE 0x000B80FF /* User modifiable flags */ | 298 | #define EXT4_FL_USER_MODIFIABLE 0x004B80FF /* User modifiable flags */ |
291 | 299 | ||
292 | /* Flags that should be inherited by new inodes from their parent. */ | 300 | /* Flags that should be inherited by new inodes from their parent. */ |
293 | #define EXT4_FL_INHERITED (EXT4_SECRM_FL | EXT4_UNRM_FL | EXT4_COMPR_FL |\ | 301 | #define EXT4_FL_INHERITED (EXT4_SECRM_FL | EXT4_UNRM_FL | EXT4_COMPR_FL |\ |
@@ -313,17 +321,6 @@ static inline __u32 ext4_mask_flags(umode_t mode, __u32 flags) | |||
313 | return flags & EXT4_OTHER_FLMASK; | 321 | return flags & EXT4_OTHER_FLMASK; |
314 | } | 322 | } |
315 | 323 | ||
316 | /* | ||
317 | * Inode dynamic state flags | ||
318 | */ | ||
319 | #define EXT4_STATE_JDATA 0x00000001 /* journaled data exists */ | ||
320 | #define EXT4_STATE_NEW 0x00000002 /* inode is newly created */ | ||
321 | #define EXT4_STATE_XATTR 0x00000004 /* has in-inode xattrs */ | ||
322 | #define EXT4_STATE_NO_EXPAND 0x00000008 /* No space for expansion */ | ||
323 | #define EXT4_STATE_DA_ALLOC_CLOSE 0x00000010 /* Alloc DA blks on close */ | ||
324 | #define EXT4_STATE_EXT_MIGRATE 0x00000020 /* Inode is migrating */ | ||
325 | #define EXT4_STATE_DIO_UNWRITTEN 0x00000040 /* need convert on dio done*/ | ||
326 | |||
327 | /* Used to pass group descriptor data when online resize is done */ | 324 | /* Used to pass group descriptor data when online resize is done */ |
328 | struct ext4_new_group_input { | 325 | struct ext4_new_group_input { |
329 | __u32 group; /* Group number for this data */ | 326 | __u32 group; /* Group number for this data */ |
@@ -361,25 +358,23 @@ struct ext4_new_group_data { | |||
361 | so set the magic i_delalloc_reserve_flag after taking the | 358 | so set the magic i_delalloc_reserve_flag after taking the |
362 | inode allocation semaphore for */ | 359 | inode allocation semaphore for */ |
363 | #define EXT4_GET_BLOCKS_DELALLOC_RESERVE 0x0004 | 360 | #define EXT4_GET_BLOCKS_DELALLOC_RESERVE 0x0004 |
364 | /* Call ext4_da_update_reserve_space() after successfully | ||
365 | allocating the blocks */ | ||
366 | #define EXT4_GET_BLOCKS_UPDATE_RESERVE_SPACE 0x0008 | ||
367 | /* caller is from the direct IO path, request to creation of an | 361 | /* caller is from the direct IO path, request to creation of an |
368 | unitialized extents if not allocated, split the uninitialized | 362 | unitialized extents if not allocated, split the uninitialized |
369 | extent if blocks has been preallocated already*/ | 363 | extent if blocks has been preallocated already*/ |
370 | #define EXT4_GET_BLOCKS_DIO 0x0010 | 364 | #define EXT4_GET_BLOCKS_PRE_IO 0x0008 |
371 | #define EXT4_GET_BLOCKS_CONVERT 0x0020 | 365 | #define EXT4_GET_BLOCKS_CONVERT 0x0010 |
372 | #define EXT4_GET_BLOCKS_DIO_CREATE_EXT (EXT4_GET_BLOCKS_DIO|\ | 366 | #define EXT4_GET_BLOCKS_IO_CREATE_EXT (EXT4_GET_BLOCKS_PRE_IO|\ |
367 | EXT4_GET_BLOCKS_CREATE_UNINIT_EXT) | ||
368 | /* Convert extent to initialized after IO complete */ | ||
369 | #define EXT4_GET_BLOCKS_IO_CONVERT_EXT (EXT4_GET_BLOCKS_CONVERT|\ | ||
373 | EXT4_GET_BLOCKS_CREATE_UNINIT_EXT) | 370 | EXT4_GET_BLOCKS_CREATE_UNINIT_EXT) |
374 | /* Convert extent to initialized after direct IO complete */ | ||
375 | #define EXT4_GET_BLOCKS_DIO_CONVERT_EXT (EXT4_GET_BLOCKS_CONVERT|\ | ||
376 | EXT4_GET_BLOCKS_DIO_CREATE_EXT) | ||
377 | 371 | ||
378 | /* | 372 | /* |
379 | * Flags used by ext4_free_blocks | 373 | * Flags used by ext4_free_blocks |
380 | */ | 374 | */ |
381 | #define EXT4_FREE_BLOCKS_METADATA 0x0001 | 375 | #define EXT4_FREE_BLOCKS_METADATA 0x0001 |
382 | #define EXT4_FREE_BLOCKS_FORGET 0x0002 | 376 | #define EXT4_FREE_BLOCKS_FORGET 0x0002 |
377 | #define EXT4_FREE_BLOCKS_VALIDATED 0x0004 | ||
383 | 378 | ||
384 | /* | 379 | /* |
385 | * ioctl commands | 380 | * ioctl commands |
@@ -633,7 +628,7 @@ struct ext4_inode_info { | |||
633 | * near to their parent directory's inode. | 628 | * near to their parent directory's inode. |
634 | */ | 629 | */ |
635 | ext4_group_t i_block_group; | 630 | ext4_group_t i_block_group; |
636 | __u32 i_state; /* Dynamic state flags for ext4 */ | 631 | unsigned long i_state_flags; /* Dynamic state flags */ |
637 | 632 | ||
638 | ext4_lblk_t i_dir_start_lookup; | 633 | ext4_lblk_t i_dir_start_lookup; |
639 | #ifdef CONFIG_EXT4_FS_XATTR | 634 | #ifdef CONFIG_EXT4_FS_XATTR |
@@ -711,8 +706,9 @@ struct ext4_inode_info { | |||
711 | qsize_t i_reserved_quota; | 706 | qsize_t i_reserved_quota; |
712 | #endif | 707 | #endif |
713 | 708 | ||
714 | /* completed async DIOs that might need unwritten extents handling */ | 709 | /* completed IOs that might need unwritten extents handling */ |
715 | struct list_head i_aio_dio_complete_list; | 710 | struct list_head i_completed_io_list; |
711 | spinlock_t i_completed_io_lock; | ||
716 | /* current io_end structure for async DIO write*/ | 712 | /* current io_end structure for async DIO write*/ |
717 | ext4_io_end_t *cur_aio_dio; | 713 | ext4_io_end_t *cur_aio_dio; |
718 | 714 | ||
@@ -763,6 +759,7 @@ struct ext4_inode_info { | |||
763 | #define EXT4_MOUNT_QUOTA 0x80000 /* Some quota option set */ | 759 | #define EXT4_MOUNT_QUOTA 0x80000 /* Some quota option set */ |
764 | #define EXT4_MOUNT_USRQUOTA 0x100000 /* "old" user quota */ | 760 | #define EXT4_MOUNT_USRQUOTA 0x100000 /* "old" user quota */ |
765 | #define EXT4_MOUNT_GRPQUOTA 0x200000 /* "old" group quota */ | 761 | #define EXT4_MOUNT_GRPQUOTA 0x200000 /* "old" group quota */ |
762 | #define EXT4_MOUNT_DIOREAD_NOLOCK 0x400000 /* Enable support for dio read nolocking */ | ||
766 | #define EXT4_MOUNT_JOURNAL_CHECKSUM 0x800000 /* Journal checksums */ | 763 | #define EXT4_MOUNT_JOURNAL_CHECKSUM 0x800000 /* Journal checksums */ |
767 | #define EXT4_MOUNT_JOURNAL_ASYNC_COMMIT 0x1000000 /* Journal Async Commit */ | 764 | #define EXT4_MOUNT_JOURNAL_ASYNC_COMMIT 0x1000000 /* Journal Async Commit */ |
768 | #define EXT4_MOUNT_I_VERSION 0x2000000 /* i_version support */ | 765 | #define EXT4_MOUNT_I_VERSION 0x2000000 /* i_version support */ |
@@ -1017,7 +1014,7 @@ struct ext4_sb_info { | |||
1017 | atomic_t s_lock_busy; | 1014 | atomic_t s_lock_busy; |
1018 | 1015 | ||
1019 | /* locality groups */ | 1016 | /* locality groups */ |
1020 | struct ext4_locality_group *s_locality_groups; | 1017 | struct ext4_locality_group __percpu *s_locality_groups; |
1021 | 1018 | ||
1022 | /* for write statistics */ | 1019 | /* for write statistics */ |
1023 | unsigned long s_sectors_written_start; | 1020 | unsigned long s_sectors_written_start; |
@@ -1053,6 +1050,34 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino) | |||
1053 | (ino >= EXT4_FIRST_INO(sb) && | 1050 | (ino >= EXT4_FIRST_INO(sb) && |
1054 | ino <= le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count)); | 1051 | ino <= le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count)); |
1055 | } | 1052 | } |
1053 | |||
1054 | /* | ||
1055 | * Inode dynamic state flags | ||
1056 | */ | ||
1057 | enum { | ||
1058 | EXT4_STATE_JDATA, /* journaled data exists */ | ||
1059 | EXT4_STATE_NEW, /* inode is newly created */ | ||
1060 | EXT4_STATE_XATTR, /* has in-inode xattrs */ | ||
1061 | EXT4_STATE_NO_EXPAND, /* No space for expansion */ | ||
1062 | EXT4_STATE_DA_ALLOC_CLOSE, /* Alloc DA blks on close */ | ||
1063 | EXT4_STATE_EXT_MIGRATE, /* Inode is migrating */ | ||
1064 | EXT4_STATE_DIO_UNWRITTEN, /* need convert on dio done*/ | ||
1065 | }; | ||
1066 | |||
1067 | static inline int ext4_test_inode_state(struct inode *inode, int bit) | ||
1068 | { | ||
1069 | return test_bit(bit, &EXT4_I(inode)->i_state_flags); | ||
1070 | } | ||
1071 | |||
1072 | static inline void ext4_set_inode_state(struct inode *inode, int bit) | ||
1073 | { | ||
1074 | set_bit(bit, &EXT4_I(inode)->i_state_flags); | ||
1075 | } | ||
1076 | |||
1077 | static inline void ext4_clear_inode_state(struct inode *inode, int bit) | ||
1078 | { | ||
1079 | clear_bit(bit, &EXT4_I(inode)->i_state_flags); | ||
1080 | } | ||
1056 | #else | 1081 | #else |
1057 | /* Assume that user mode programs are passing in an ext4fs superblock, not | 1082 | /* Assume that user mode programs are passing in an ext4fs superblock, not |
1058 | * a kernel struct super_block. This will allow us to call the feature-test | 1083 | * a kernel struct super_block. This will allow us to call the feature-test |
@@ -1129,6 +1154,8 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino) | |||
1129 | #define EXT4_FEATURE_INCOMPAT_64BIT 0x0080 | 1154 | #define EXT4_FEATURE_INCOMPAT_64BIT 0x0080 |
1130 | #define EXT4_FEATURE_INCOMPAT_MMP 0x0100 | 1155 | #define EXT4_FEATURE_INCOMPAT_MMP 0x0100 |
1131 | #define EXT4_FEATURE_INCOMPAT_FLEX_BG 0x0200 | 1156 | #define EXT4_FEATURE_INCOMPAT_FLEX_BG 0x0200 |
1157 | #define EXT4_FEATURE_INCOMPAT_EA_INODE 0x0400 /* EA in inode */ | ||
1158 | #define EXT4_FEATURE_INCOMPAT_DIRDATA 0x1000 /* data in dirent */ | ||
1132 | 1159 | ||
1133 | #define EXT4_FEATURE_COMPAT_SUPP EXT2_FEATURE_COMPAT_EXT_ATTR | 1160 | #define EXT4_FEATURE_COMPAT_SUPP EXT2_FEATURE_COMPAT_EXT_ATTR |
1134 | #define EXT4_FEATURE_INCOMPAT_SUPP (EXT4_FEATURE_INCOMPAT_FILETYPE| \ | 1161 | #define EXT4_FEATURE_INCOMPAT_SUPP (EXT4_FEATURE_INCOMPAT_FILETYPE| \ |
@@ -1419,7 +1446,7 @@ int ext4_get_block(struct inode *inode, sector_t iblock, | |||
1419 | struct buffer_head *bh_result, int create); | 1446 | struct buffer_head *bh_result, int create); |
1420 | 1447 | ||
1421 | extern struct inode *ext4_iget(struct super_block *, unsigned long); | 1448 | extern struct inode *ext4_iget(struct super_block *, unsigned long); |
1422 | extern int ext4_write_inode(struct inode *, int); | 1449 | extern int ext4_write_inode(struct inode *, struct writeback_control *); |
1423 | extern int ext4_setattr(struct dentry *, struct iattr *); | 1450 | extern int ext4_setattr(struct dentry *, struct iattr *); |
1424 | extern int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry, | 1451 | extern int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry, |
1425 | struct kstat *stat); | 1452 | struct kstat *stat); |
@@ -1442,7 +1469,9 @@ extern int ext4_block_truncate_page(handle_t *handle, | |||
1442 | struct address_space *mapping, loff_t from); | 1469 | struct address_space *mapping, loff_t from); |
1443 | extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); | 1470 | extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); |
1444 | extern qsize_t *ext4_get_reserved_space(struct inode *inode); | 1471 | extern qsize_t *ext4_get_reserved_space(struct inode *inode); |
1445 | extern int flush_aio_dio_completed_IO(struct inode *inode); | 1472 | extern int flush_completed_IO(struct inode *inode); |
1473 | extern void ext4_da_update_reserve_space(struct inode *inode, | ||
1474 | int used, int quota_claim); | ||
1446 | /* ioctl.c */ | 1475 | /* ioctl.c */ |
1447 | extern long ext4_ioctl(struct file *, unsigned int, unsigned long); | 1476 | extern long ext4_ioctl(struct file *, unsigned int, unsigned long); |
1448 | extern long ext4_compat_ioctl(struct file *, unsigned int, unsigned long); | 1477 | extern long ext4_compat_ioctl(struct file *, unsigned int, unsigned long); |
@@ -1466,13 +1495,20 @@ extern int ext4_group_extend(struct super_block *sb, | |||
1466 | ext4_fsblk_t n_blocks_count); | 1495 | ext4_fsblk_t n_blocks_count); |
1467 | 1496 | ||
1468 | /* super.c */ | 1497 | /* super.c */ |
1469 | extern void ext4_error(struct super_block *, const char *, const char *, ...) | 1498 | extern void __ext4_error(struct super_block *, const char *, const char *, ...) |
1499 | __attribute__ ((format (printf, 3, 4))); | ||
1500 | #define ext4_error(sb, message...) __ext4_error(sb, __func__, ## message) | ||
1501 | extern void ext4_error_inode(const char *, struct inode *, const char *, ...) | ||
1502 | __attribute__ ((format (printf, 3, 4))); | ||
1503 | extern void ext4_error_file(const char *, struct file *, const char *, ...) | ||
1470 | __attribute__ ((format (printf, 3, 4))); | 1504 | __attribute__ ((format (printf, 3, 4))); |
1471 | extern void __ext4_std_error(struct super_block *, const char *, int); | 1505 | extern void __ext4_std_error(struct super_block *, const char *, int); |
1472 | extern void ext4_abort(struct super_block *, const char *, const char *, ...) | 1506 | extern void ext4_abort(struct super_block *, const char *, const char *, ...) |
1473 | __attribute__ ((format (printf, 3, 4))); | 1507 | __attribute__ ((format (printf, 3, 4))); |
1474 | extern void ext4_warning(struct super_block *, const char *, const char *, ...) | 1508 | extern void __ext4_warning(struct super_block *, const char *, |
1509 | const char *, ...) | ||
1475 | __attribute__ ((format (printf, 3, 4))); | 1510 | __attribute__ ((format (printf, 3, 4))); |
1511 | #define ext4_warning(sb, message...) __ext4_warning(sb, __func__, ## message) | ||
1476 | extern void ext4_msg(struct super_block *, const char *, const char *, ...) | 1512 | extern void ext4_msg(struct super_block *, const char *, const char *, ...) |
1477 | __attribute__ ((format (printf, 3, 4))); | 1513 | __attribute__ ((format (printf, 3, 4))); |
1478 | extern void ext4_grp_locked_error(struct super_block *, ext4_group_t, | 1514 | extern void ext4_grp_locked_error(struct super_block *, ext4_group_t, |
@@ -1745,7 +1781,7 @@ extern void ext4_ext_release(struct super_block *); | |||
1745 | extern long ext4_fallocate(struct inode *inode, int mode, loff_t offset, | 1781 | extern long ext4_fallocate(struct inode *inode, int mode, loff_t offset, |
1746 | loff_t len); | 1782 | loff_t len); |
1747 | extern int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset, | 1783 | extern int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset, |
1748 | loff_t len); | 1784 | ssize_t len); |
1749 | extern int ext4_get_blocks(handle_t *handle, struct inode *inode, | 1785 | extern int ext4_get_blocks(handle_t *handle, struct inode *inode, |
1750 | sector_t block, unsigned int max_blocks, | 1786 | sector_t block, unsigned int max_blocks, |
1751 | struct buffer_head *bh, int flags); | 1787 | struct buffer_head *bh, int flags); |
@@ -1757,6 +1793,15 @@ extern int ext4_move_extents(struct file *o_filp, struct file *d_filp, | |||
1757 | __u64 len, __u64 *moved_len); | 1793 | __u64 len, __u64 *moved_len); |
1758 | 1794 | ||
1759 | 1795 | ||
1796 | /* BH_Uninit flag: blocks are allocated but uninitialized on disk */ | ||
1797 | enum ext4_state_bits { | ||
1798 | BH_Uninit /* blocks are allocated but uninitialized on disk */ | ||
1799 | = BH_JBDPrivateStart, | ||
1800 | }; | ||
1801 | |||
1802 | BUFFER_FNS(Uninit, uninit) | ||
1803 | TAS_BUFFER_FNS(Uninit, uninit) | ||
1804 | |||
1760 | /* | 1805 | /* |
1761 | * Add new method to test wether block and inode bitmaps are properly | 1806 | * Add new method to test wether block and inode bitmaps are properly |
1762 | * initialized. With uninit_bg reading the block from disk is not enough | 1807 | * initialized. With uninit_bg reading the block from disk is not enough |
@@ -1774,6 +1819,8 @@ static inline void set_bitmap_uptodate(struct buffer_head *bh) | |||
1774 | set_bit(BH_BITMAP_UPTODATE, &(bh)->b_state); | 1819 | set_bit(BH_BITMAP_UPTODATE, &(bh)->b_state); |
1775 | } | 1820 | } |
1776 | 1821 | ||
1822 | #define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1) | ||
1823 | |||
1777 | #endif /* __KERNEL__ */ | 1824 | #endif /* __KERNEL__ */ |
1778 | 1825 | ||
1779 | #endif /* _EXT4_H */ | 1826 | #endif /* _EXT4_H */ |
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c index b57e5c711b6d..53d2764d71ca 100644 --- a/fs/ext4/ext4_jbd2.c +++ b/fs/ext4/ext4_jbd2.c | |||
@@ -125,14 +125,14 @@ int __ext4_handle_dirty_metadata(const char *where, handle_t *handle, | |||
125 | ext4_journal_abort_handle(where, __func__, bh, | 125 | ext4_journal_abort_handle(where, __func__, bh, |
126 | handle, err); | 126 | handle, err); |
127 | } else { | 127 | } else { |
128 | if (inode && bh) | 128 | if (inode) |
129 | mark_buffer_dirty_inode(bh, inode); | 129 | mark_buffer_dirty_inode(bh, inode); |
130 | else | 130 | else |
131 | mark_buffer_dirty(bh); | 131 | mark_buffer_dirty(bh); |
132 | if (inode && inode_needs_sync(inode)) { | 132 | if (inode && inode_needs_sync(inode)) { |
133 | sync_dirty_buffer(bh); | 133 | sync_dirty_buffer(bh); |
134 | if (buffer_req(bh) && !buffer_uptodate(bh)) { | 134 | if (buffer_req(bh) && !buffer_uptodate(bh)) { |
135 | ext4_error(inode->i_sb, __func__, | 135 | ext4_error(inode->i_sb, |
136 | "IO error syncing inode, " | 136 | "IO error syncing inode, " |
137 | "inode=%lu, block=%llu", | 137 | "inode=%lu, block=%llu", |
138 | inode->i_ino, | 138 | inode->i_ino, |
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h index 05eca817d704..b79ad5126468 100644 --- a/fs/ext4/ext4_jbd2.h +++ b/fs/ext4/ext4_jbd2.h | |||
@@ -304,4 +304,28 @@ static inline int ext4_should_writeback_data(struct inode *inode) | |||
304 | return 0; | 304 | return 0; |
305 | } | 305 | } |
306 | 306 | ||
307 | /* | ||
308 | * This function controls whether or not we should try to go down the | ||
309 | * dioread_nolock code paths, which makes it safe to avoid taking | ||
310 | * i_mutex for direct I/O reads. This only works for extent-based | ||
311 | * files, and it doesn't work for nobh or if data journaling is | ||
312 | * enabled, since the dioread_nolock code uses b_private to pass | ||
313 | * information back to the I/O completion handler, and this conflicts | ||
314 | * with the jbd's use of b_private. | ||
315 | */ | ||
316 | static inline int ext4_should_dioread_nolock(struct inode *inode) | ||
317 | { | ||
318 | if (!test_opt(inode->i_sb, DIOREAD_NOLOCK)) | ||
319 | return 0; | ||
320 | if (test_opt(inode->i_sb, NOBH)) | ||
321 | return 0; | ||
322 | if (!S_ISREG(inode->i_mode)) | ||
323 | return 0; | ||
324 | if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) | ||
325 | return 0; | ||
326 | if (ext4_should_journal_data(inode)) | ||
327 | return 0; | ||
328 | return 1; | ||
329 | } | ||
330 | |||
307 | #endif /* _EXT4_JBD2_H */ | 331 | #endif /* _EXT4_JBD2_H */ |
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 7d7b74e94687..94c8ee81f5e1 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c | |||
@@ -195,8 +195,7 @@ static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode, | |||
195 | if (S_ISREG(inode->i_mode)) | 195 | if (S_ISREG(inode->i_mode)) |
196 | block_group++; | 196 | block_group++; |
197 | } | 197 | } |
198 | bg_start = (block_group * EXT4_BLOCKS_PER_GROUP(inode->i_sb)) + | 198 | bg_start = ext4_group_first_block_no(inode->i_sb, block_group); |
199 | le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_first_data_block); | ||
200 | last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1; | 199 | last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1; |
201 | 200 | ||
202 | /* | 201 | /* |
@@ -440,7 +439,7 @@ static int __ext4_ext_check(const char *function, struct inode *inode, | |||
440 | return 0; | 439 | return 0; |
441 | 440 | ||
442 | corrupted: | 441 | corrupted: |
443 | ext4_error(inode->i_sb, function, | 442 | __ext4_error(inode->i_sb, function, |
444 | "bad header/extent in inode #%lu: %s - magic %x, " | 443 | "bad header/extent in inode #%lu: %s - magic %x, " |
445 | "entries %u, max %u(%u), depth %u(%u)", | 444 | "entries %u, max %u(%u), depth %u(%u)", |
446 | inode->i_ino, error_msg, le16_to_cpu(eh->eh_magic), | 445 | inode->i_ino, error_msg, le16_to_cpu(eh->eh_magic), |
@@ -703,7 +702,12 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block, | |||
703 | } | 702 | } |
704 | eh = ext_block_hdr(bh); | 703 | eh = ext_block_hdr(bh); |
705 | ppos++; | 704 | ppos++; |
706 | BUG_ON(ppos > depth); | 705 | if (unlikely(ppos > depth)) { |
706 | put_bh(bh); | ||
707 | EXT4_ERROR_INODE(inode, | ||
708 | "ppos %d > depth %d", ppos, depth); | ||
709 | goto err; | ||
710 | } | ||
707 | path[ppos].p_bh = bh; | 711 | path[ppos].p_bh = bh; |
708 | path[ppos].p_hdr = eh; | 712 | path[ppos].p_hdr = eh; |
709 | i--; | 713 | i--; |
@@ -749,7 +753,12 @@ int ext4_ext_insert_index(handle_t *handle, struct inode *inode, | |||
749 | if (err) | 753 | if (err) |
750 | return err; | 754 | return err; |
751 | 755 | ||
752 | BUG_ON(logical == le32_to_cpu(curp->p_idx->ei_block)); | 756 | if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) { |
757 | EXT4_ERROR_INODE(inode, | ||
758 | "logical %d == ei_block %d!", | ||
759 | logical, le32_to_cpu(curp->p_idx->ei_block)); | ||
760 | return -EIO; | ||
761 | } | ||
753 | len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx; | 762 | len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx; |
754 | if (logical > le32_to_cpu(curp->p_idx->ei_block)) { | 763 | if (logical > le32_to_cpu(curp->p_idx->ei_block)) { |
755 | /* insert after */ | 764 | /* insert after */ |
@@ -779,9 +788,17 @@ int ext4_ext_insert_index(handle_t *handle, struct inode *inode, | |||
779 | ext4_idx_store_pblock(ix, ptr); | 788 | ext4_idx_store_pblock(ix, ptr); |
780 | le16_add_cpu(&curp->p_hdr->eh_entries, 1); | 789 | le16_add_cpu(&curp->p_hdr->eh_entries, 1); |
781 | 790 | ||
782 | BUG_ON(le16_to_cpu(curp->p_hdr->eh_entries) | 791 | if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries) |
783 | > le16_to_cpu(curp->p_hdr->eh_max)); | 792 | > le16_to_cpu(curp->p_hdr->eh_max))) { |
784 | BUG_ON(ix > EXT_LAST_INDEX(curp->p_hdr)); | 793 | EXT4_ERROR_INODE(inode, |
794 | "logical %d == ei_block %d!", | ||
795 | logical, le32_to_cpu(curp->p_idx->ei_block)); | ||
796 | return -EIO; | ||
797 | } | ||
798 | if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) { | ||
799 | EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!"); | ||
800 | return -EIO; | ||
801 | } | ||
785 | 802 | ||
786 | err = ext4_ext_dirty(handle, inode, curp); | 803 | err = ext4_ext_dirty(handle, inode, curp); |
787 | ext4_std_error(inode->i_sb, err); | 804 | ext4_std_error(inode->i_sb, err); |
@@ -819,7 +836,10 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, | |||
819 | 836 | ||
820 | /* if current leaf will be split, then we should use | 837 | /* if current leaf will be split, then we should use |
821 | * border from split point */ | 838 | * border from split point */ |
822 | BUG_ON(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr)); | 839 | if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) { |
840 | EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!"); | ||
841 | return -EIO; | ||
842 | } | ||
823 | if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) { | 843 | if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) { |
824 | border = path[depth].p_ext[1].ee_block; | 844 | border = path[depth].p_ext[1].ee_block; |
825 | ext_debug("leaf will be split." | 845 | ext_debug("leaf will be split." |
@@ -860,7 +880,11 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, | |||
860 | 880 | ||
861 | /* initialize new leaf */ | 881 | /* initialize new leaf */ |
862 | newblock = ablocks[--a]; | 882 | newblock = ablocks[--a]; |
863 | BUG_ON(newblock == 0); | 883 | if (unlikely(newblock == 0)) { |
884 | EXT4_ERROR_INODE(inode, "newblock == 0!"); | ||
885 | err = -EIO; | ||
886 | goto cleanup; | ||
887 | } | ||
864 | bh = sb_getblk(inode->i_sb, newblock); | 888 | bh = sb_getblk(inode->i_sb, newblock); |
865 | if (!bh) { | 889 | if (!bh) { |
866 | err = -EIO; | 890 | err = -EIO; |
@@ -880,7 +904,14 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, | |||
880 | ex = EXT_FIRST_EXTENT(neh); | 904 | ex = EXT_FIRST_EXTENT(neh); |
881 | 905 | ||
882 | /* move remainder of path[depth] to the new leaf */ | 906 | /* move remainder of path[depth] to the new leaf */ |
883 | BUG_ON(path[depth].p_hdr->eh_entries != path[depth].p_hdr->eh_max); | 907 | if (unlikely(path[depth].p_hdr->eh_entries != |
908 | path[depth].p_hdr->eh_max)) { | ||
909 | EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!", | ||
910 | path[depth].p_hdr->eh_entries, | ||
911 | path[depth].p_hdr->eh_max); | ||
912 | err = -EIO; | ||
913 | goto cleanup; | ||
914 | } | ||
884 | /* start copy from next extent */ | 915 | /* start copy from next extent */ |
885 | /* TODO: we could do it by single memmove */ | 916 | /* TODO: we could do it by single memmove */ |
886 | m = 0; | 917 | m = 0; |
@@ -927,7 +958,11 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, | |||
927 | 958 | ||
928 | /* create intermediate indexes */ | 959 | /* create intermediate indexes */ |
929 | k = depth - at - 1; | 960 | k = depth - at - 1; |
930 | BUG_ON(k < 0); | 961 | if (unlikely(k < 0)) { |
962 | EXT4_ERROR_INODE(inode, "k %d < 0!", k); | ||
963 | err = -EIO; | ||
964 | goto cleanup; | ||
965 | } | ||
931 | if (k) | 966 | if (k) |
932 | ext_debug("create %d intermediate indices\n", k); | 967 | ext_debug("create %d intermediate indices\n", k); |
933 | /* insert new index into current index block */ | 968 | /* insert new index into current index block */ |
@@ -964,8 +999,14 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, | |||
964 | 999 | ||
965 | ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx, | 1000 | ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx, |
966 | EXT_MAX_INDEX(path[i].p_hdr)); | 1001 | EXT_MAX_INDEX(path[i].p_hdr)); |
967 | BUG_ON(EXT_MAX_INDEX(path[i].p_hdr) != | 1002 | if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) != |
968 | EXT_LAST_INDEX(path[i].p_hdr)); | 1003 | EXT_LAST_INDEX(path[i].p_hdr))) { |
1004 | EXT4_ERROR_INODE(inode, | ||
1005 | "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!", | ||
1006 | le32_to_cpu(path[i].p_ext->ee_block)); | ||
1007 | err = -EIO; | ||
1008 | goto cleanup; | ||
1009 | } | ||
969 | while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) { | 1010 | while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) { |
970 | ext_debug("%d: move %d:%llu in new index %llu\n", i, | 1011 | ext_debug("%d: move %d:%llu in new index %llu\n", i, |
971 | le32_to_cpu(path[i].p_idx->ei_block), | 1012 | le32_to_cpu(path[i].p_idx->ei_block), |
@@ -1203,7 +1244,10 @@ ext4_ext_search_left(struct inode *inode, struct ext4_ext_path *path, | |||
1203 | struct ext4_extent *ex; | 1244 | struct ext4_extent *ex; |
1204 | int depth, ee_len; | 1245 | int depth, ee_len; |
1205 | 1246 | ||
1206 | BUG_ON(path == NULL); | 1247 | if (unlikely(path == NULL)) { |
1248 | EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical); | ||
1249 | return -EIO; | ||
1250 | } | ||
1207 | depth = path->p_depth; | 1251 | depth = path->p_depth; |
1208 | *phys = 0; | 1252 | *phys = 0; |
1209 | 1253 | ||
@@ -1217,15 +1261,33 @@ ext4_ext_search_left(struct inode *inode, struct ext4_ext_path *path, | |||
1217 | ex = path[depth].p_ext; | 1261 | ex = path[depth].p_ext; |
1218 | ee_len = ext4_ext_get_actual_len(ex); | 1262 | ee_len = ext4_ext_get_actual_len(ex); |
1219 | if (*logical < le32_to_cpu(ex->ee_block)) { | 1263 | if (*logical < le32_to_cpu(ex->ee_block)) { |
1220 | BUG_ON(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex); | 1264 | if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) { |
1265 | EXT4_ERROR_INODE(inode, | ||
1266 | "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!", | ||
1267 | *logical, le32_to_cpu(ex->ee_block)); | ||
1268 | return -EIO; | ||
1269 | } | ||
1221 | while (--depth >= 0) { | 1270 | while (--depth >= 0) { |
1222 | ix = path[depth].p_idx; | 1271 | ix = path[depth].p_idx; |
1223 | BUG_ON(ix != EXT_FIRST_INDEX(path[depth].p_hdr)); | 1272 | if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) { |
1273 | EXT4_ERROR_INODE(inode, | ||
1274 | "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!", | ||
1275 | ix != NULL ? ix->ei_block : 0, | ||
1276 | EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ? | ||
1277 | EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block : 0, | ||
1278 | depth); | ||
1279 | return -EIO; | ||
1280 | } | ||
1224 | } | 1281 | } |
1225 | return 0; | 1282 | return 0; |
1226 | } | 1283 | } |
1227 | 1284 | ||
1228 | BUG_ON(*logical < (le32_to_cpu(ex->ee_block) + ee_len)); | 1285 | if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) { |
1286 | EXT4_ERROR_INODE(inode, | ||
1287 | "logical %d < ee_block %d + ee_len %d!", | ||
1288 | *logical, le32_to_cpu(ex->ee_block), ee_len); | ||
1289 | return -EIO; | ||
1290 | } | ||
1229 | 1291 | ||
1230 | *logical = le32_to_cpu(ex->ee_block) + ee_len - 1; | 1292 | *logical = le32_to_cpu(ex->ee_block) + ee_len - 1; |
1231 | *phys = ext_pblock(ex) + ee_len - 1; | 1293 | *phys = ext_pblock(ex) + ee_len - 1; |
@@ -1251,7 +1313,10 @@ ext4_ext_search_right(struct inode *inode, struct ext4_ext_path *path, | |||
1251 | int depth; /* Note, NOT eh_depth; depth from top of tree */ | 1313 | int depth; /* Note, NOT eh_depth; depth from top of tree */ |
1252 | int ee_len; | 1314 | int ee_len; |
1253 | 1315 | ||
1254 | BUG_ON(path == NULL); | 1316 | if (unlikely(path == NULL)) { |
1317 | EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical); | ||
1318 | return -EIO; | ||
1319 | } | ||
1255 | depth = path->p_depth; | 1320 | depth = path->p_depth; |
1256 | *phys = 0; | 1321 | *phys = 0; |
1257 | 1322 | ||
@@ -1265,17 +1330,32 @@ ext4_ext_search_right(struct inode *inode, struct ext4_ext_path *path, | |||
1265 | ex = path[depth].p_ext; | 1330 | ex = path[depth].p_ext; |
1266 | ee_len = ext4_ext_get_actual_len(ex); | 1331 | ee_len = ext4_ext_get_actual_len(ex); |
1267 | if (*logical < le32_to_cpu(ex->ee_block)) { | 1332 | if (*logical < le32_to_cpu(ex->ee_block)) { |
1268 | BUG_ON(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex); | 1333 | if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) { |
1334 | EXT4_ERROR_INODE(inode, | ||
1335 | "first_extent(path[%d].p_hdr) != ex", | ||
1336 | depth); | ||
1337 | return -EIO; | ||
1338 | } | ||
1269 | while (--depth >= 0) { | 1339 | while (--depth >= 0) { |
1270 | ix = path[depth].p_idx; | 1340 | ix = path[depth].p_idx; |
1271 | BUG_ON(ix != EXT_FIRST_INDEX(path[depth].p_hdr)); | 1341 | if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) { |
1342 | EXT4_ERROR_INODE(inode, | ||
1343 | "ix != EXT_FIRST_INDEX *logical %d!", | ||
1344 | *logical); | ||
1345 | return -EIO; | ||
1346 | } | ||
1272 | } | 1347 | } |
1273 | *logical = le32_to_cpu(ex->ee_block); | 1348 | *logical = le32_to_cpu(ex->ee_block); |
1274 | *phys = ext_pblock(ex); | 1349 | *phys = ext_pblock(ex); |
1275 | return 0; | 1350 | return 0; |
1276 | } | 1351 | } |
1277 | 1352 | ||
1278 | BUG_ON(*logical < (le32_to_cpu(ex->ee_block) + ee_len)); | 1353 | if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) { |
1354 | EXT4_ERROR_INODE(inode, | ||
1355 | "logical %d < ee_block %d + ee_len %d!", | ||
1356 | *logical, le32_to_cpu(ex->ee_block), ee_len); | ||
1357 | return -EIO; | ||
1358 | } | ||
1279 | 1359 | ||
1280 | if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) { | 1360 | if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) { |
1281 | /* next allocated block in this leaf */ | 1361 | /* next allocated block in this leaf */ |
@@ -1414,8 +1494,12 @@ static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode, | |||
1414 | 1494 | ||
1415 | eh = path[depth].p_hdr; | 1495 | eh = path[depth].p_hdr; |
1416 | ex = path[depth].p_ext; | 1496 | ex = path[depth].p_ext; |
1417 | BUG_ON(ex == NULL); | 1497 | |
1418 | BUG_ON(eh == NULL); | 1498 | if (unlikely(ex == NULL || eh == NULL)) { |
1499 | EXT4_ERROR_INODE(inode, | ||
1500 | "ex %p == NULL or eh %p == NULL", ex, eh); | ||
1501 | return -EIO; | ||
1502 | } | ||
1419 | 1503 | ||
1420 | if (depth == 0) { | 1504 | if (depth == 0) { |
1421 | /* there is no tree at all */ | 1505 | /* there is no tree at all */ |
@@ -1538,8 +1622,9 @@ int ext4_ext_try_to_merge(struct inode *inode, | |||
1538 | merge_done = 1; | 1622 | merge_done = 1; |
1539 | WARN_ON(eh->eh_entries == 0); | 1623 | WARN_ON(eh->eh_entries == 0); |
1540 | if (!eh->eh_entries) | 1624 | if (!eh->eh_entries) |
1541 | ext4_error(inode->i_sb, "ext4_ext_try_to_merge", | 1625 | ext4_error(inode->i_sb, |
1542 | "inode#%lu, eh->eh_entries = 0!", inode->i_ino); | 1626 | "inode#%lu, eh->eh_entries = 0!", |
1627 | inode->i_ino); | ||
1543 | } | 1628 | } |
1544 | 1629 | ||
1545 | return merge_done; | 1630 | return merge_done; |
@@ -1612,13 +1697,19 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, | |||
1612 | ext4_lblk_t next; | 1697 | ext4_lblk_t next; |
1613 | unsigned uninitialized = 0; | 1698 | unsigned uninitialized = 0; |
1614 | 1699 | ||
1615 | BUG_ON(ext4_ext_get_actual_len(newext) == 0); | 1700 | if (unlikely(ext4_ext_get_actual_len(newext) == 0)) { |
1701 | EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0"); | ||
1702 | return -EIO; | ||
1703 | } | ||
1616 | depth = ext_depth(inode); | 1704 | depth = ext_depth(inode); |
1617 | ex = path[depth].p_ext; | 1705 | ex = path[depth].p_ext; |
1618 | BUG_ON(path[depth].p_hdr == NULL); | 1706 | if (unlikely(path[depth].p_hdr == NULL)) { |
1707 | EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); | ||
1708 | return -EIO; | ||
1709 | } | ||
1619 | 1710 | ||
1620 | /* try to insert block into found extent and return */ | 1711 | /* try to insert block into found extent and return */ |
1621 | if (ex && (flag != EXT4_GET_BLOCKS_DIO_CREATE_EXT) | 1712 | if (ex && !(flag & EXT4_GET_BLOCKS_PRE_IO) |
1622 | && ext4_can_extents_be_merged(inode, ex, newext)) { | 1713 | && ext4_can_extents_be_merged(inode, ex, newext)) { |
1623 | ext_debug("append [%d]%d block to %d:[%d]%d (from %llu)\n", | 1714 | ext_debug("append [%d]%d block to %d:[%d]%d (from %llu)\n", |
1624 | ext4_ext_is_uninitialized(newext), | 1715 | ext4_ext_is_uninitialized(newext), |
@@ -1739,7 +1830,7 @@ has_space: | |||
1739 | 1830 | ||
1740 | merge: | 1831 | merge: |
1741 | /* try to merge extents to the right */ | 1832 | /* try to merge extents to the right */ |
1742 | if (flag != EXT4_GET_BLOCKS_DIO_CREATE_EXT) | 1833 | if (!(flag & EXT4_GET_BLOCKS_PRE_IO)) |
1743 | ext4_ext_try_to_merge(inode, path, nearex); | 1834 | ext4_ext_try_to_merge(inode, path, nearex); |
1744 | 1835 | ||
1745 | /* try to merge extents to the left */ | 1836 | /* try to merge extents to the left */ |
@@ -1787,7 +1878,11 @@ int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block, | |||
1787 | } | 1878 | } |
1788 | 1879 | ||
1789 | depth = ext_depth(inode); | 1880 | depth = ext_depth(inode); |
1790 | BUG_ON(path[depth].p_hdr == NULL); | 1881 | if (unlikely(path[depth].p_hdr == NULL)) { |
1882 | EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); | ||
1883 | err = -EIO; | ||
1884 | break; | ||
1885 | } | ||
1791 | ex = path[depth].p_ext; | 1886 | ex = path[depth].p_ext; |
1792 | next = ext4_ext_next_allocated_block(path); | 1887 | next = ext4_ext_next_allocated_block(path); |
1793 | 1888 | ||
@@ -1838,7 +1933,11 @@ int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block, | |||
1838 | cbex.ec_type = EXT4_EXT_CACHE_EXTENT; | 1933 | cbex.ec_type = EXT4_EXT_CACHE_EXTENT; |
1839 | } | 1934 | } |
1840 | 1935 | ||
1841 | BUG_ON(cbex.ec_len == 0); | 1936 | if (unlikely(cbex.ec_len == 0)) { |
1937 | EXT4_ERROR_INODE(inode, "cbex.ec_len == 0"); | ||
1938 | err = -EIO; | ||
1939 | break; | ||
1940 | } | ||
1842 | err = func(inode, path, &cbex, ex, cbdata); | 1941 | err = func(inode, path, &cbex, ex, cbdata); |
1843 | ext4_ext_drop_refs(path); | 1942 | ext4_ext_drop_refs(path); |
1844 | 1943 | ||
@@ -1952,7 +2051,7 @@ ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block, | |||
1952 | 2051 | ||
1953 | BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP && | 2052 | BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP && |
1954 | cex->ec_type != EXT4_EXT_CACHE_EXTENT); | 2053 | cex->ec_type != EXT4_EXT_CACHE_EXTENT); |
1955 | if (block >= cex->ec_block && block < cex->ec_block + cex->ec_len) { | 2054 | if (in_range(block, cex->ec_block, cex->ec_len)) { |
1956 | ex->ee_block = cpu_to_le32(cex->ec_block); | 2055 | ex->ee_block = cpu_to_le32(cex->ec_block); |
1957 | ext4_ext_store_pblock(ex, cex->ec_start); | 2056 | ext4_ext_store_pblock(ex, cex->ec_start); |
1958 | ex->ee_len = cpu_to_le16(cex->ec_len); | 2057 | ex->ee_len = cpu_to_le16(cex->ec_len); |
@@ -1981,7 +2080,10 @@ static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode, | |||
1981 | /* free index block */ | 2080 | /* free index block */ |
1982 | path--; | 2081 | path--; |
1983 | leaf = idx_pblock(path->p_idx); | 2082 | leaf = idx_pblock(path->p_idx); |
1984 | BUG_ON(path->p_hdr->eh_entries == 0); | 2083 | if (unlikely(path->p_hdr->eh_entries == 0)) { |
2084 | EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0"); | ||
2085 | return -EIO; | ||
2086 | } | ||
1985 | err = ext4_ext_get_access(handle, inode, path); | 2087 | err = ext4_ext_get_access(handle, inode, path); |
1986 | if (err) | 2088 | if (err) |
1987 | return err; | 2089 | return err; |
@@ -2119,8 +2221,10 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, | |||
2119 | if (!path[depth].p_hdr) | 2221 | if (!path[depth].p_hdr) |
2120 | path[depth].p_hdr = ext_block_hdr(path[depth].p_bh); | 2222 | path[depth].p_hdr = ext_block_hdr(path[depth].p_bh); |
2121 | eh = path[depth].p_hdr; | 2223 | eh = path[depth].p_hdr; |
2122 | BUG_ON(eh == NULL); | 2224 | if (unlikely(path[depth].p_hdr == NULL)) { |
2123 | 2225 | EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); | |
2226 | return -EIO; | ||
2227 | } | ||
2124 | /* find where to start removing */ | 2228 | /* find where to start removing */ |
2125 | ex = EXT_LAST_EXTENT(eh); | 2229 | ex = EXT_LAST_EXTENT(eh); |
2126 | 2230 | ||
@@ -2983,7 +3087,7 @@ fix_extent_len: | |||
2983 | ext4_ext_dirty(handle, inode, path + depth); | 3087 | ext4_ext_dirty(handle, inode, path + depth); |
2984 | return err; | 3088 | return err; |
2985 | } | 3089 | } |
2986 | static int ext4_convert_unwritten_extents_dio(handle_t *handle, | 3090 | static int ext4_convert_unwritten_extents_endio(handle_t *handle, |
2987 | struct inode *inode, | 3091 | struct inode *inode, |
2988 | struct ext4_ext_path *path) | 3092 | struct ext4_ext_path *path) |
2989 | { | 3093 | { |
@@ -3063,8 +3167,8 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, | |||
3063 | flags, allocated); | 3167 | flags, allocated); |
3064 | ext4_ext_show_leaf(inode, path); | 3168 | ext4_ext_show_leaf(inode, path); |
3065 | 3169 | ||
3066 | /* DIO get_block() before submit the IO, split the extent */ | 3170 | /* get_block() before submit the IO, split the extent */ |
3067 | if (flags == EXT4_GET_BLOCKS_DIO_CREATE_EXT) { | 3171 | if ((flags & EXT4_GET_BLOCKS_PRE_IO)) { |
3068 | ret = ext4_split_unwritten_extents(handle, | 3172 | ret = ext4_split_unwritten_extents(handle, |
3069 | inode, path, iblock, | 3173 | inode, path, iblock, |
3070 | max_blocks, flags); | 3174 | max_blocks, flags); |
@@ -3074,14 +3178,16 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, | |||
3074 | * completed | 3178 | * completed |
3075 | */ | 3179 | */ |
3076 | if (io) | 3180 | if (io) |
3077 | io->flag = DIO_AIO_UNWRITTEN; | 3181 | io->flag = EXT4_IO_UNWRITTEN; |
3078 | else | 3182 | else |
3079 | EXT4_I(inode)->i_state |= EXT4_STATE_DIO_UNWRITTEN; | 3183 | ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); |
3184 | if (ext4_should_dioread_nolock(inode)) | ||
3185 | set_buffer_uninit(bh_result); | ||
3080 | goto out; | 3186 | goto out; |
3081 | } | 3187 | } |
3082 | /* async DIO end_io complete, convert the filled extent to written */ | 3188 | /* IO end_io complete, convert the filled extent to written */ |
3083 | if (flags == EXT4_GET_BLOCKS_DIO_CONVERT_EXT) { | 3189 | if ((flags & EXT4_GET_BLOCKS_CONVERT)) { |
3084 | ret = ext4_convert_unwritten_extents_dio(handle, inode, | 3190 | ret = ext4_convert_unwritten_extents_endio(handle, inode, |
3085 | path); | 3191 | path); |
3086 | if (ret >= 0) | 3192 | if (ret >= 0) |
3087 | ext4_update_inode_fsync_trans(handle, inode, 1); | 3193 | ext4_update_inode_fsync_trans(handle, inode, 1); |
@@ -3132,7 +3238,19 @@ out: | |||
3132 | unmap_underlying_metadata_blocks(inode->i_sb->s_bdev, | 3238 | unmap_underlying_metadata_blocks(inode->i_sb->s_bdev, |
3133 | newblock + max_blocks, | 3239 | newblock + max_blocks, |
3134 | allocated - max_blocks); | 3240 | allocated - max_blocks); |
3241 | allocated = max_blocks; | ||
3135 | } | 3242 | } |
3243 | |||
3244 | /* | ||
3245 | * If we have done fallocate with the offset that is already | ||
3246 | * delayed allocated, we would have block reservation | ||
3247 | * and quota reservation done in the delayed write path. | ||
3248 | * But fallocate would have already updated quota and block | ||
3249 | * count for this offset. So cancel these reservation | ||
3250 | */ | ||
3251 | if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) | ||
3252 | ext4_da_update_reserve_space(inode, allocated, 0); | ||
3253 | |||
3136 | map_out: | 3254 | map_out: |
3137 | set_buffer_mapped(bh_result); | 3255 | set_buffer_mapped(bh_result); |
3138 | out1: | 3256 | out1: |
@@ -3173,7 +3291,7 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, | |||
3173 | { | 3291 | { |
3174 | struct ext4_ext_path *path = NULL; | 3292 | struct ext4_ext_path *path = NULL; |
3175 | struct ext4_extent_header *eh; | 3293 | struct ext4_extent_header *eh; |
3176 | struct ext4_extent newex, *ex; | 3294 | struct ext4_extent newex, *ex, *last_ex; |
3177 | ext4_fsblk_t newblock; | 3295 | ext4_fsblk_t newblock; |
3178 | int err = 0, depth, ret, cache_type; | 3296 | int err = 0, depth, ret, cache_type; |
3179 | unsigned int allocated = 0; | 3297 | unsigned int allocated = 0; |
@@ -3225,10 +3343,10 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, | |||
3225 | * this situation is possible, though, _during_ tree modification; | 3343 | * this situation is possible, though, _during_ tree modification; |
3226 | * this is why assert can't be put in ext4_ext_find_extent() | 3344 | * this is why assert can't be put in ext4_ext_find_extent() |
3227 | */ | 3345 | */ |
3228 | if (path[depth].p_ext == NULL && depth != 0) { | 3346 | if (unlikely(path[depth].p_ext == NULL && depth != 0)) { |
3229 | ext4_error(inode->i_sb, __func__, "bad extent address " | 3347 | EXT4_ERROR_INODE(inode, "bad extent address " |
3230 | "inode: %lu, iblock: %d, depth: %d", | 3348 | "iblock: %d, depth: %d pblock %lld", |
3231 | inode->i_ino, iblock, depth); | 3349 | iblock, depth, path[depth].p_block); |
3232 | err = -EIO; | 3350 | err = -EIO; |
3233 | goto out2; | 3351 | goto out2; |
3234 | } | 3352 | } |
@@ -3246,7 +3364,7 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, | |||
3246 | */ | 3364 | */ |
3247 | ee_len = ext4_ext_get_actual_len(ex); | 3365 | ee_len = ext4_ext_get_actual_len(ex); |
3248 | /* if found extent covers block, simply return it */ | 3366 | /* if found extent covers block, simply return it */ |
3249 | if (iblock >= ee_block && iblock < ee_block + ee_len) { | 3367 | if (in_range(iblock, ee_block, ee_len)) { |
3250 | newblock = iblock - ee_block + ee_start; | 3368 | newblock = iblock - ee_block + ee_start; |
3251 | /* number of remaining blocks in the extent */ | 3369 | /* number of remaining blocks in the extent */ |
3252 | allocated = ee_len - (iblock - ee_block); | 3370 | allocated = ee_len - (iblock - ee_block); |
@@ -3338,21 +3456,35 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, | |||
3338 | if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){ | 3456 | if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){ |
3339 | ext4_ext_mark_uninitialized(&newex); | 3457 | ext4_ext_mark_uninitialized(&newex); |
3340 | /* | 3458 | /* |
3341 | * io_end structure was created for every async | 3459 | * io_end structure was created for every IO write to an |
3342 | * direct IO write to the middle of the file. | 3460 | * uninitialized extent. To avoid unecessary conversion, |
3343 | * To avoid unecessary convertion for every aio dio rewrite | 3461 | * here we flag the IO that really needs the conversion. |
3344 | * to the mid of file, here we flag the IO that is really | ||
3345 | * need the convertion. | ||
3346 | * For non asycn direct IO case, flag the inode state | 3462 | * For non asycn direct IO case, flag the inode state |
3347 | * that we need to perform convertion when IO is done. | 3463 | * that we need to perform convertion when IO is done. |
3348 | */ | 3464 | */ |
3349 | if (flags == EXT4_GET_BLOCKS_DIO_CREATE_EXT) { | 3465 | if ((flags & EXT4_GET_BLOCKS_PRE_IO)) { |
3350 | if (io) | 3466 | if (io) |
3351 | io->flag = DIO_AIO_UNWRITTEN; | 3467 | io->flag = EXT4_IO_UNWRITTEN; |
3352 | else | 3468 | else |
3353 | EXT4_I(inode)->i_state |= | 3469 | ext4_set_inode_state(inode, |
3354 | EXT4_STATE_DIO_UNWRITTEN;; | 3470 | EXT4_STATE_DIO_UNWRITTEN); |
3355 | } | 3471 | } |
3472 | if (ext4_should_dioread_nolock(inode)) | ||
3473 | set_buffer_uninit(bh_result); | ||
3474 | } | ||
3475 | |||
3476 | if (unlikely(EXT4_I(inode)->i_flags & EXT4_EOFBLOCKS_FL)) { | ||
3477 | if (unlikely(!eh->eh_entries)) { | ||
3478 | EXT4_ERROR_INODE(inode, | ||
3479 | "eh->eh_entries == 0 ee_block %d", | ||
3480 | ex->ee_block); | ||
3481 | err = -EIO; | ||
3482 | goto out2; | ||
3483 | } | ||
3484 | last_ex = EXT_LAST_EXTENT(eh); | ||
3485 | if (iblock + ar.len > le32_to_cpu(last_ex->ee_block) | ||
3486 | + ext4_ext_get_actual_len(last_ex)) | ||
3487 | EXT4_I(inode)->i_flags &= ~EXT4_EOFBLOCKS_FL; | ||
3356 | } | 3488 | } |
3357 | err = ext4_ext_insert_extent(handle, inode, path, &newex, flags); | 3489 | err = ext4_ext_insert_extent(handle, inode, path, &newex, flags); |
3358 | if (err) { | 3490 | if (err) { |
@@ -3368,9 +3500,18 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, | |||
3368 | /* previous routine could use block we allocated */ | 3500 | /* previous routine could use block we allocated */ |
3369 | newblock = ext_pblock(&newex); | 3501 | newblock = ext_pblock(&newex); |
3370 | allocated = ext4_ext_get_actual_len(&newex); | 3502 | allocated = ext4_ext_get_actual_len(&newex); |
3503 | if (allocated > max_blocks) | ||
3504 | allocated = max_blocks; | ||
3371 | set_buffer_new(bh_result); | 3505 | set_buffer_new(bh_result); |
3372 | 3506 | ||
3373 | /* | 3507 | /* |
3508 | * Update reserved blocks/metadata blocks after successful | ||
3509 | * block allocation which had been deferred till now. | ||
3510 | */ | ||
3511 | if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) | ||
3512 | ext4_da_update_reserve_space(inode, allocated, 1); | ||
3513 | |||
3514 | /* | ||
3374 | * Cache the extent and update transaction to commit on fdatasync only | 3515 | * Cache the extent and update transaction to commit on fdatasync only |
3375 | * when it is _not_ an uninitialized extent. | 3516 | * when it is _not_ an uninitialized extent. |
3376 | */ | 3517 | */ |
@@ -3478,6 +3619,13 @@ static void ext4_falloc_update_inode(struct inode *inode, | |||
3478 | i_size_write(inode, new_size); | 3619 | i_size_write(inode, new_size); |
3479 | if (new_size > EXT4_I(inode)->i_disksize) | 3620 | if (new_size > EXT4_I(inode)->i_disksize) |
3480 | ext4_update_i_disksize(inode, new_size); | 3621 | ext4_update_i_disksize(inode, new_size); |
3622 | } else { | ||
3623 | /* | ||
3624 | * Mark that we allocate beyond EOF so the subsequent truncate | ||
3625 | * can proceed even if the new size is the same as i_size. | ||
3626 | */ | ||
3627 | if (new_size > i_size_read(inode)) | ||
3628 | EXT4_I(inode)->i_flags |= EXT4_EOFBLOCKS_FL; | ||
3481 | } | 3629 | } |
3482 | 3630 | ||
3483 | } | 3631 | } |
@@ -3582,7 +3730,7 @@ retry: | |||
3582 | * Returns 0 on success. | 3730 | * Returns 0 on success. |
3583 | */ | 3731 | */ |
3584 | int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset, | 3732 | int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset, |
3585 | loff_t len) | 3733 | ssize_t len) |
3586 | { | 3734 | { |
3587 | handle_t *handle; | 3735 | handle_t *handle; |
3588 | ext4_lblk_t block; | 3736 | ext4_lblk_t block; |
@@ -3614,7 +3762,7 @@ int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset, | |||
3614 | map_bh.b_state = 0; | 3762 | map_bh.b_state = 0; |
3615 | ret = ext4_get_blocks(handle, inode, block, | 3763 | ret = ext4_get_blocks(handle, inode, block, |
3616 | max_blocks, &map_bh, | 3764 | max_blocks, &map_bh, |
3617 | EXT4_GET_BLOCKS_DIO_CONVERT_EXT); | 3765 | EXT4_GET_BLOCKS_IO_CONVERT_EXT); |
3618 | if (ret <= 0) { | 3766 | if (ret <= 0) { |
3619 | WARN_ON(ret <= 0); | 3767 | WARN_ON(ret <= 0); |
3620 | printk(KERN_ERR "%s: ext4_ext_get_blocks " | 3768 | printk(KERN_ERR "%s: ext4_ext_get_blocks " |
@@ -3718,7 +3866,7 @@ static int ext4_xattr_fiemap(struct inode *inode, | |||
3718 | int error = 0; | 3866 | int error = 0; |
3719 | 3867 | ||
3720 | /* in-inode? */ | 3868 | /* in-inode? */ |
3721 | if (EXT4_I(inode)->i_state & EXT4_STATE_XATTR) { | 3869 | if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) { |
3722 | struct ext4_iloc iloc; | 3870 | struct ext4_iloc iloc; |
3723 | int offset; /* offset of xattr in inode */ | 3871 | int offset; /* offset of xattr in inode */ |
3724 | 3872 | ||
@@ -3746,7 +3894,6 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | |||
3746 | __u64 start, __u64 len) | 3894 | __u64 start, __u64 len) |
3747 | { | 3895 | { |
3748 | ext4_lblk_t start_blk; | 3896 | ext4_lblk_t start_blk; |
3749 | ext4_lblk_t len_blks; | ||
3750 | int error = 0; | 3897 | int error = 0; |
3751 | 3898 | ||
3752 | /* fallback to generic here if not in extents fmt */ | 3899 | /* fallback to generic here if not in extents fmt */ |
@@ -3760,8 +3907,14 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | |||
3760 | if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) { | 3907 | if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) { |
3761 | error = ext4_xattr_fiemap(inode, fieinfo); | 3908 | error = ext4_xattr_fiemap(inode, fieinfo); |
3762 | } else { | 3909 | } else { |
3910 | ext4_lblk_t len_blks; | ||
3911 | __u64 last_blk; | ||
3912 | |||
3763 | start_blk = start >> inode->i_sb->s_blocksize_bits; | 3913 | start_blk = start >> inode->i_sb->s_blocksize_bits; |
3764 | len_blks = len >> inode->i_sb->s_blocksize_bits; | 3914 | last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits; |
3915 | if (last_blk >= EXT_MAX_BLOCK) | ||
3916 | last_blk = EXT_MAX_BLOCK-1; | ||
3917 | len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1; | ||
3765 | 3918 | ||
3766 | /* | 3919 | /* |
3767 | * Walk the extent tree gathering extent information. | 3920 | * Walk the extent tree gathering extent information. |
diff --git a/fs/ext4/file.c b/fs/ext4/file.c index 9630583cef28..d0776e410f34 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/jbd2.h> | 23 | #include <linux/jbd2.h> |
24 | #include <linux/mount.h> | 24 | #include <linux/mount.h> |
25 | #include <linux/path.h> | 25 | #include <linux/path.h> |
26 | #include <linux/quotaops.h> | ||
26 | #include "ext4.h" | 27 | #include "ext4.h" |
27 | #include "ext4_jbd2.h" | 28 | #include "ext4_jbd2.h" |
28 | #include "xattr.h" | 29 | #include "xattr.h" |
@@ -35,9 +36,9 @@ | |||
35 | */ | 36 | */ |
36 | static int ext4_release_file(struct inode *inode, struct file *filp) | 37 | static int ext4_release_file(struct inode *inode, struct file *filp) |
37 | { | 38 | { |
38 | if (EXT4_I(inode)->i_state & EXT4_STATE_DA_ALLOC_CLOSE) { | 39 | if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) { |
39 | ext4_alloc_da_blocks(inode); | 40 | ext4_alloc_da_blocks(inode); |
40 | EXT4_I(inode)->i_state &= ~EXT4_STATE_DA_ALLOC_CLOSE; | 41 | ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE); |
41 | } | 42 | } |
42 | /* if we are the last writer on the inode, drop the block reservation */ | 43 | /* if we are the last writer on the inode, drop the block reservation */ |
43 | if ((filp->f_mode & FMODE_WRITE) && | 44 | if ((filp->f_mode & FMODE_WRITE) && |
@@ -116,18 +117,16 @@ static int ext4_file_open(struct inode * inode, struct file * filp) | |||
116 | * devices or filesystem images. | 117 | * devices or filesystem images. |
117 | */ | 118 | */ |
118 | memset(buf, 0, sizeof(buf)); | 119 | memset(buf, 0, sizeof(buf)); |
119 | path.mnt = mnt->mnt_parent; | 120 | path.mnt = mnt; |
120 | path.dentry = mnt->mnt_mountpoint; | 121 | path.dentry = mnt->mnt_root; |
121 | path_get(&path); | ||
122 | cp = d_path(&path, buf, sizeof(buf)); | 122 | cp = d_path(&path, buf, sizeof(buf)); |
123 | path_put(&path); | ||
124 | if (!IS_ERR(cp)) { | 123 | if (!IS_ERR(cp)) { |
125 | memcpy(sbi->s_es->s_last_mounted, cp, | 124 | memcpy(sbi->s_es->s_last_mounted, cp, |
126 | sizeof(sbi->s_es->s_last_mounted)); | 125 | sizeof(sbi->s_es->s_last_mounted)); |
127 | sb->s_dirt = 1; | 126 | sb->s_dirt = 1; |
128 | } | 127 | } |
129 | } | 128 | } |
130 | return generic_file_open(inode, filp); | 129 | return dquot_file_open(inode, filp); |
131 | } | 130 | } |
132 | 131 | ||
133 | const struct file_operations ext4_file_operations = { | 132 | const struct file_operations ext4_file_operations = { |
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c index 98bd140aad01..0d0c3239c1cd 100644 --- a/fs/ext4/fsync.c +++ b/fs/ext4/fsync.c | |||
@@ -63,7 +63,7 @@ int ext4_sync_file(struct file *file, struct dentry *dentry, int datasync) | |||
63 | if (inode->i_sb->s_flags & MS_RDONLY) | 63 | if (inode->i_sb->s_flags & MS_RDONLY) |
64 | return 0; | 64 | return 0; |
65 | 65 | ||
66 | ret = flush_aio_dio_completed_IO(inode); | 66 | ret = flush_completed_IO(inode); |
67 | if (ret < 0) | 67 | if (ret < 0) |
68 | return ret; | 68 | return ret; |
69 | 69 | ||
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index f3624ead4f6c..361c0b9962a8 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c | |||
@@ -76,8 +76,7 @@ unsigned ext4_init_inode_bitmap(struct super_block *sb, struct buffer_head *bh, | |||
76 | /* If checksum is bad mark all blocks and inodes use to prevent | 76 | /* If checksum is bad mark all blocks and inodes use to prevent |
77 | * allocation, essentially implementing a per-group read-only flag. */ | 77 | * allocation, essentially implementing a per-group read-only flag. */ |
78 | if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) { | 78 | if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) { |
79 | ext4_error(sb, __func__, "Checksum bad for group %u", | 79 | ext4_error(sb, "Checksum bad for group %u", block_group); |
80 | block_group); | ||
81 | ext4_free_blks_set(sb, gdp, 0); | 80 | ext4_free_blks_set(sb, gdp, 0); |
82 | ext4_free_inodes_set(sb, gdp, 0); | 81 | ext4_free_inodes_set(sb, gdp, 0); |
83 | ext4_itable_unused_set(sb, gdp, 0); | 82 | ext4_itable_unused_set(sb, gdp, 0); |
@@ -111,8 +110,7 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group) | |||
111 | bitmap_blk = ext4_inode_bitmap(sb, desc); | 110 | bitmap_blk = ext4_inode_bitmap(sb, desc); |
112 | bh = sb_getblk(sb, bitmap_blk); | 111 | bh = sb_getblk(sb, bitmap_blk); |
113 | if (unlikely(!bh)) { | 112 | if (unlikely(!bh)) { |
114 | ext4_error(sb, __func__, | 113 | ext4_error(sb, "Cannot read inode bitmap - " |
115 | "Cannot read inode bitmap - " | ||
116 | "block_group = %u, inode_bitmap = %llu", | 114 | "block_group = %u, inode_bitmap = %llu", |
117 | block_group, bitmap_blk); | 115 | block_group, bitmap_blk); |
118 | return NULL; | 116 | return NULL; |
@@ -153,8 +151,7 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group) | |||
153 | set_bitmap_uptodate(bh); | 151 | set_bitmap_uptodate(bh); |
154 | if (bh_submit_read(bh) < 0) { | 152 | if (bh_submit_read(bh) < 0) { |
155 | put_bh(bh); | 153 | put_bh(bh); |
156 | ext4_error(sb, __func__, | 154 | ext4_error(sb, "Cannot read inode bitmap - " |
157 | "Cannot read inode bitmap - " | ||
158 | "block_group = %u, inode_bitmap = %llu", | 155 | "block_group = %u, inode_bitmap = %llu", |
159 | block_group, bitmap_blk); | 156 | block_group, bitmap_blk); |
160 | return NULL; | 157 | return NULL; |
@@ -217,10 +214,10 @@ void ext4_free_inode(handle_t *handle, struct inode *inode) | |||
217 | * Note: we must free any quota before locking the superblock, | 214 | * Note: we must free any quota before locking the superblock, |
218 | * as writing the quota to disk may need the lock as well. | 215 | * as writing the quota to disk may need the lock as well. |
219 | */ | 216 | */ |
220 | vfs_dq_init(inode); | 217 | dquot_initialize(inode); |
221 | ext4_xattr_delete_inode(handle, inode); | 218 | ext4_xattr_delete_inode(handle, inode); |
222 | vfs_dq_free_inode(inode); | 219 | dquot_free_inode(inode); |
223 | vfs_dq_drop(inode); | 220 | dquot_drop(inode); |
224 | 221 | ||
225 | is_directory = S_ISDIR(inode->i_mode); | 222 | is_directory = S_ISDIR(inode->i_mode); |
226 | 223 | ||
@@ -229,8 +226,7 @@ void ext4_free_inode(handle_t *handle, struct inode *inode) | |||
229 | 226 | ||
230 | es = EXT4_SB(sb)->s_es; | 227 | es = EXT4_SB(sb)->s_es; |
231 | if (ino < EXT4_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) { | 228 | if (ino < EXT4_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) { |
232 | ext4_error(sb, "ext4_free_inode", | 229 | ext4_error(sb, "reserved or nonexistent inode %lu", ino); |
233 | "reserved or nonexistent inode %lu", ino); | ||
234 | goto error_return; | 230 | goto error_return; |
235 | } | 231 | } |
236 | block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb); | 232 | block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb); |
@@ -248,8 +244,7 @@ void ext4_free_inode(handle_t *handle, struct inode *inode) | |||
248 | cleared = ext4_clear_bit_atomic(ext4_group_lock_ptr(sb, block_group), | 244 | cleared = ext4_clear_bit_atomic(ext4_group_lock_ptr(sb, block_group), |
249 | bit, bitmap_bh->b_data); | 245 | bit, bitmap_bh->b_data); |
250 | if (!cleared) | 246 | if (!cleared) |
251 | ext4_error(sb, "ext4_free_inode", | 247 | ext4_error(sb, "bit already cleared for inode %lu", ino); |
252 | "bit already cleared for inode %lu", ino); | ||
253 | else { | 248 | else { |
254 | gdp = ext4_get_group_desc(sb, block_group, &bh2); | 249 | gdp = ext4_get_group_desc(sb, block_group, &bh2); |
255 | 250 | ||
@@ -736,8 +731,7 @@ static int ext4_claim_inode(struct super_block *sb, | |||
736 | if ((group == 0 && ino < EXT4_FIRST_INO(sb)) || | 731 | if ((group == 0 && ino < EXT4_FIRST_INO(sb)) || |
737 | ino > EXT4_INODES_PER_GROUP(sb)) { | 732 | ino > EXT4_INODES_PER_GROUP(sb)) { |
738 | ext4_unlock_group(sb, group); | 733 | ext4_unlock_group(sb, group); |
739 | ext4_error(sb, __func__, | 734 | ext4_error(sb, "reserved inode or inode > inodes count - " |
740 | "reserved inode or inode > inodes count - " | ||
741 | "block_group = %u, inode=%lu", group, | 735 | "block_group = %u, inode=%lu", group, |
742 | ino + group * EXT4_INODES_PER_GROUP(sb)); | 736 | ino + group * EXT4_INODES_PER_GROUP(sb)); |
743 | return 1; | 737 | return 1; |
@@ -904,7 +898,7 @@ repeat_in_this_group: | |||
904 | BUFFER_TRACE(inode_bitmap_bh, | 898 | BUFFER_TRACE(inode_bitmap_bh, |
905 | "call ext4_handle_dirty_metadata"); | 899 | "call ext4_handle_dirty_metadata"); |
906 | err = ext4_handle_dirty_metadata(handle, | 900 | err = ext4_handle_dirty_metadata(handle, |
907 | inode, | 901 | NULL, |
908 | inode_bitmap_bh); | 902 | inode_bitmap_bh); |
909 | if (err) | 903 | if (err) |
910 | goto fail; | 904 | goto fail; |
@@ -1029,15 +1023,16 @@ got: | |||
1029 | inode->i_generation = sbi->s_next_generation++; | 1023 | inode->i_generation = sbi->s_next_generation++; |
1030 | spin_unlock(&sbi->s_next_gen_lock); | 1024 | spin_unlock(&sbi->s_next_gen_lock); |
1031 | 1025 | ||
1032 | ei->i_state = EXT4_STATE_NEW; | 1026 | ei->i_state_flags = 0; |
1027 | ext4_set_inode_state(inode, EXT4_STATE_NEW); | ||
1033 | 1028 | ||
1034 | ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize; | 1029 | ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize; |
1035 | 1030 | ||
1036 | ret = inode; | 1031 | ret = inode; |
1037 | if (vfs_dq_alloc_inode(inode)) { | 1032 | dquot_initialize(inode); |
1038 | err = -EDQUOT; | 1033 | err = dquot_alloc_inode(inode); |
1034 | if (err) | ||
1039 | goto fail_drop; | 1035 | goto fail_drop; |
1040 | } | ||
1041 | 1036 | ||
1042 | err = ext4_init_acl(handle, inode, dir); | 1037 | err = ext4_init_acl(handle, inode, dir); |
1043 | if (err) | 1038 | if (err) |
@@ -1074,10 +1069,10 @@ really_out: | |||
1074 | return ret; | 1069 | return ret; |
1075 | 1070 | ||
1076 | fail_free_drop: | 1071 | fail_free_drop: |
1077 | vfs_dq_free_inode(inode); | 1072 | dquot_free_inode(inode); |
1078 | 1073 | ||
1079 | fail_drop: | 1074 | fail_drop: |
1080 | vfs_dq_drop(inode); | 1075 | dquot_drop(inode); |
1081 | inode->i_flags |= S_NOQUOTA; | 1076 | inode->i_flags |= S_NOQUOTA; |
1082 | inode->i_nlink = 0; | 1077 | inode->i_nlink = 0; |
1083 | unlock_new_inode(inode); | 1078 | unlock_new_inode(inode); |
@@ -1098,8 +1093,7 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino) | |||
1098 | 1093 | ||
1099 | /* Error cases - e2fsck has already cleaned up for us */ | 1094 | /* Error cases - e2fsck has already cleaned up for us */ |
1100 | if (ino > max_ino) { | 1095 | if (ino > max_ino) { |
1101 | ext4_warning(sb, __func__, | 1096 | ext4_warning(sb, "bad orphan ino %lu! e2fsck was run?", ino); |
1102 | "bad orphan ino %lu! e2fsck was run?", ino); | ||
1103 | goto error; | 1097 | goto error; |
1104 | } | 1098 | } |
1105 | 1099 | ||
@@ -1107,8 +1101,7 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino) | |||
1107 | bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb); | 1101 | bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb); |
1108 | bitmap_bh = ext4_read_inode_bitmap(sb, block_group); | 1102 | bitmap_bh = ext4_read_inode_bitmap(sb, block_group); |
1109 | if (!bitmap_bh) { | 1103 | if (!bitmap_bh) { |
1110 | ext4_warning(sb, __func__, | 1104 | ext4_warning(sb, "inode bitmap error for orphan %lu", ino); |
1111 | "inode bitmap error for orphan %lu", ino); | ||
1112 | goto error; | 1105 | goto error; |
1113 | } | 1106 | } |
1114 | 1107 | ||
@@ -1140,8 +1133,7 @@ iget_failed: | |||
1140 | err = PTR_ERR(inode); | 1133 | err = PTR_ERR(inode); |
1141 | inode = NULL; | 1134 | inode = NULL; |
1142 | bad_orphan: | 1135 | bad_orphan: |
1143 | ext4_warning(sb, __func__, | 1136 | ext4_warning(sb, "bad orphan inode %lu! e2fsck was run?", ino); |
1144 | "bad orphan inode %lu! e2fsck was run?", ino); | ||
1145 | printk(KERN_NOTICE "ext4_test_bit(bit=%d, block=%llu) = %d\n", | 1137 | printk(KERN_NOTICE "ext4_test_bit(bit=%d, block=%llu) = %d\n", |
1146 | bit, (unsigned long long)bitmap_bh->b_blocknr, | 1138 | bit, (unsigned long long)bitmap_bh->b_blocknr, |
1147 | ext4_test_bit(bit, bitmap_bh->b_data)); | 1139 | ext4_test_bit(bit, bitmap_bh->b_data)); |
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index c818972c8302..986120f30066 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <linux/uio.h> | 38 | #include <linux/uio.h> |
39 | #include <linux/bio.h> | 39 | #include <linux/bio.h> |
40 | #include <linux/workqueue.h> | 40 | #include <linux/workqueue.h> |
41 | #include <linux/kernel.h> | ||
41 | 42 | ||
42 | #include "ext4_jbd2.h" | 43 | #include "ext4_jbd2.h" |
43 | #include "xattr.h" | 44 | #include "xattr.h" |
@@ -170,6 +171,9 @@ void ext4_delete_inode(struct inode *inode) | |||
170 | handle_t *handle; | 171 | handle_t *handle; |
171 | int err; | 172 | int err; |
172 | 173 | ||
174 | if (!is_bad_inode(inode)) | ||
175 | dquot_initialize(inode); | ||
176 | |||
173 | if (ext4_should_order_data(inode)) | 177 | if (ext4_should_order_data(inode)) |
174 | ext4_begin_ordered_truncate(inode, 0); | 178 | ext4_begin_ordered_truncate(inode, 0); |
175 | truncate_inode_pages(&inode->i_data, 0); | 179 | truncate_inode_pages(&inode->i_data, 0); |
@@ -194,7 +198,7 @@ void ext4_delete_inode(struct inode *inode) | |||
194 | inode->i_size = 0; | 198 | inode->i_size = 0; |
195 | err = ext4_mark_inode_dirty(handle, inode); | 199 | err = ext4_mark_inode_dirty(handle, inode); |
196 | if (err) { | 200 | if (err) { |
197 | ext4_warning(inode->i_sb, __func__, | 201 | ext4_warning(inode->i_sb, |
198 | "couldn't mark inode dirty (err %d)", err); | 202 | "couldn't mark inode dirty (err %d)", err); |
199 | goto stop_handle; | 203 | goto stop_handle; |
200 | } | 204 | } |
@@ -212,7 +216,7 @@ void ext4_delete_inode(struct inode *inode) | |||
212 | if (err > 0) | 216 | if (err > 0) |
213 | err = ext4_journal_restart(handle, 3); | 217 | err = ext4_journal_restart(handle, 3); |
214 | if (err != 0) { | 218 | if (err != 0) { |
215 | ext4_warning(inode->i_sb, __func__, | 219 | ext4_warning(inode->i_sb, |
216 | "couldn't extend journal (err %d)", err); | 220 | "couldn't extend journal (err %d)", err); |
217 | stop_handle: | 221 | stop_handle: |
218 | ext4_journal_stop(handle); | 222 | ext4_journal_stop(handle); |
@@ -323,8 +327,7 @@ static int ext4_block_to_path(struct inode *inode, | |||
323 | offsets[n++] = i_block & (ptrs - 1); | 327 | offsets[n++] = i_block & (ptrs - 1); |
324 | final = ptrs; | 328 | final = ptrs; |
325 | } else { | 329 | } else { |
326 | ext4_warning(inode->i_sb, "ext4_block_to_path", | 330 | ext4_warning(inode->i_sb, "block %lu > max in inode %lu", |
327 | "block %lu > max in inode %lu", | ||
328 | i_block + direct_blocks + | 331 | i_block + direct_blocks + |
329 | indirect_blocks + double_blocks, inode->i_ino); | 332 | indirect_blocks + double_blocks, inode->i_ino); |
330 | } | 333 | } |
@@ -344,7 +347,7 @@ static int __ext4_check_blockref(const char *function, struct inode *inode, | |||
344 | if (blk && | 347 | if (blk && |
345 | unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb), | 348 | unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb), |
346 | blk, 1))) { | 349 | blk, 1))) { |
347 | ext4_error(inode->i_sb, function, | 350 | __ext4_error(inode->i_sb, function, |
348 | "invalid block reference %u " | 351 | "invalid block reference %u " |
349 | "in inode #%lu", blk, inode->i_ino); | 352 | "in inode #%lu", blk, inode->i_ino); |
350 | return -EIO; | 353 | return -EIO; |
@@ -607,7 +610,14 @@ static int ext4_alloc_blocks(handle_t *handle, struct inode *inode, | |||
607 | if (*err) | 610 | if (*err) |
608 | goto failed_out; | 611 | goto failed_out; |
609 | 612 | ||
610 | BUG_ON(current_block + count > EXT4_MAX_BLOCK_FILE_PHYS); | 613 | if (unlikely(current_block + count > EXT4_MAX_BLOCK_FILE_PHYS)) { |
614 | EXT4_ERROR_INODE(inode, | ||
615 | "current_block %llu + count %lu > %d!", | ||
616 | current_block, count, | ||
617 | EXT4_MAX_BLOCK_FILE_PHYS); | ||
618 | *err = -EIO; | ||
619 | goto failed_out; | ||
620 | } | ||
611 | 621 | ||
612 | target -= count; | 622 | target -= count; |
613 | /* allocate blocks for indirect blocks */ | 623 | /* allocate blocks for indirect blocks */ |
@@ -643,7 +653,14 @@ static int ext4_alloc_blocks(handle_t *handle, struct inode *inode, | |||
643 | ar.flags = EXT4_MB_HINT_DATA; | 653 | ar.flags = EXT4_MB_HINT_DATA; |
644 | 654 | ||
645 | current_block = ext4_mb_new_blocks(handle, &ar, err); | 655 | current_block = ext4_mb_new_blocks(handle, &ar, err); |
646 | BUG_ON(current_block + ar.len > EXT4_MAX_BLOCK_FILE_PHYS); | 656 | if (unlikely(current_block + ar.len > EXT4_MAX_BLOCK_FILE_PHYS)) { |
657 | EXT4_ERROR_INODE(inode, | ||
658 | "current_block %llu + ar.len %d > %d!", | ||
659 | current_block, ar.len, | ||
660 | EXT4_MAX_BLOCK_FILE_PHYS); | ||
661 | *err = -EIO; | ||
662 | goto failed_out; | ||
663 | } | ||
647 | 664 | ||
648 | if (*err && (target == blks)) { | 665 | if (*err && (target == blks)) { |
649 | /* | 666 | /* |
@@ -1053,13 +1070,15 @@ static int ext4_calc_metadata_amount(struct inode *inode, sector_t lblock) | |||
1053 | * Called with i_data_sem down, which is important since we can call | 1070 | * Called with i_data_sem down, which is important since we can call |
1054 | * ext4_discard_preallocations() from here. | 1071 | * ext4_discard_preallocations() from here. |
1055 | */ | 1072 | */ |
1056 | static void ext4_da_update_reserve_space(struct inode *inode, int used) | 1073 | void ext4_da_update_reserve_space(struct inode *inode, |
1074 | int used, int quota_claim) | ||
1057 | { | 1075 | { |
1058 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); | 1076 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); |
1059 | struct ext4_inode_info *ei = EXT4_I(inode); | 1077 | struct ext4_inode_info *ei = EXT4_I(inode); |
1060 | int mdb_free = 0; | 1078 | int mdb_free = 0, allocated_meta_blocks = 0; |
1061 | 1079 | ||
1062 | spin_lock(&ei->i_block_reservation_lock); | 1080 | spin_lock(&ei->i_block_reservation_lock); |
1081 | trace_ext4_da_update_reserve_space(inode, used); | ||
1063 | if (unlikely(used > ei->i_reserved_data_blocks)) { | 1082 | if (unlikely(used > ei->i_reserved_data_blocks)) { |
1064 | ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, used %d " | 1083 | ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, used %d " |
1065 | "with only %d reserved data blocks\n", | 1084 | "with only %d reserved data blocks\n", |
@@ -1073,6 +1092,7 @@ static void ext4_da_update_reserve_space(struct inode *inode, int used) | |||
1073 | ei->i_reserved_data_blocks -= used; | 1092 | ei->i_reserved_data_blocks -= used; |
1074 | used += ei->i_allocated_meta_blocks; | 1093 | used += ei->i_allocated_meta_blocks; |
1075 | ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks; | 1094 | ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks; |
1095 | allocated_meta_blocks = ei->i_allocated_meta_blocks; | ||
1076 | ei->i_allocated_meta_blocks = 0; | 1096 | ei->i_allocated_meta_blocks = 0; |
1077 | percpu_counter_sub(&sbi->s_dirtyblocks_counter, used); | 1097 | percpu_counter_sub(&sbi->s_dirtyblocks_counter, used); |
1078 | 1098 | ||
@@ -1090,9 +1110,23 @@ static void ext4_da_update_reserve_space(struct inode *inode, int used) | |||
1090 | spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); | 1110 | spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); |
1091 | 1111 | ||
1092 | /* Update quota subsystem */ | 1112 | /* Update quota subsystem */ |
1093 | vfs_dq_claim_block(inode, used); | 1113 | if (quota_claim) { |
1094 | if (mdb_free) | 1114 | dquot_claim_block(inode, used); |
1095 | vfs_dq_release_reservation_block(inode, mdb_free); | 1115 | if (mdb_free) |
1116 | dquot_release_reservation_block(inode, mdb_free); | ||
1117 | } else { | ||
1118 | /* | ||
1119 | * We did fallocate with an offset that is already delayed | ||
1120 | * allocated. So on delayed allocated writeback we should | ||
1121 | * not update the quota for allocated blocks. But then | ||
1122 | * converting an fallocate region to initialized region would | ||
1123 | * have caused a metadata allocation. So claim quota for | ||
1124 | * that | ||
1125 | */ | ||
1126 | if (allocated_meta_blocks) | ||
1127 | dquot_claim_block(inode, allocated_meta_blocks); | ||
1128 | dquot_release_reservation_block(inode, mdb_free + used); | ||
1129 | } | ||
1096 | 1130 | ||
1097 | /* | 1131 | /* |
1098 | * If we have done all the pending block allocations and if | 1132 | * If we have done all the pending block allocations and if |
@@ -1108,7 +1142,7 @@ static int check_block_validity(struct inode *inode, const char *msg, | |||
1108 | sector_t logical, sector_t phys, int len) | 1142 | sector_t logical, sector_t phys, int len) |
1109 | { | 1143 | { |
1110 | if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), phys, len)) { | 1144 | if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), phys, len)) { |
1111 | ext4_error(inode->i_sb, msg, | 1145 | __ext4_error(inode->i_sb, msg, |
1112 | "inode #%lu logical block %llu mapped to %llu " | 1146 | "inode #%lu logical block %llu mapped to %llu " |
1113 | "(size %d)", inode->i_ino, | 1147 | "(size %d)", inode->i_ino, |
1114 | (unsigned long long) logical, | 1148 | (unsigned long long) logical, |
@@ -1290,20 +1324,22 @@ int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block, | |||
1290 | * i_data's format changing. Force the migrate | 1324 | * i_data's format changing. Force the migrate |
1291 | * to fail by clearing migrate flags | 1325 | * to fail by clearing migrate flags |
1292 | */ | 1326 | */ |
1293 | EXT4_I(inode)->i_state &= ~EXT4_STATE_EXT_MIGRATE; | 1327 | ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE); |
1294 | } | 1328 | } |
1295 | } | ||
1296 | 1329 | ||
1330 | /* | ||
1331 | * Update reserved blocks/metadata blocks after successful | ||
1332 | * block allocation which had been deferred till now. We don't | ||
1333 | * support fallocate for non extent files. So we can update | ||
1334 | * reserve space here. | ||
1335 | */ | ||
1336 | if ((retval > 0) && | ||
1337 | (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)) | ||
1338 | ext4_da_update_reserve_space(inode, retval, 1); | ||
1339 | } | ||
1297 | if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) | 1340 | if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) |
1298 | EXT4_I(inode)->i_delalloc_reserved_flag = 0; | 1341 | EXT4_I(inode)->i_delalloc_reserved_flag = 0; |
1299 | 1342 | ||
1300 | /* | ||
1301 | * Update reserved blocks/metadata blocks after successful | ||
1302 | * block allocation which had been deferred till now. | ||
1303 | */ | ||
1304 | if ((retval > 0) && (flags & EXT4_GET_BLOCKS_UPDATE_RESERVE_SPACE)) | ||
1305 | ext4_da_update_reserve_space(inode, retval); | ||
1306 | |||
1307 | up_write((&EXT4_I(inode)->i_data_sem)); | 1343 | up_write((&EXT4_I(inode)->i_data_sem)); |
1308 | if (retval > 0 && buffer_mapped(bh)) { | 1344 | if (retval > 0 && buffer_mapped(bh)) { |
1309 | int ret = check_block_validity(inode, "file system " | 1345 | int ret = check_block_validity(inode, "file system " |
@@ -1516,6 +1552,8 @@ static void ext4_truncate_failed_write(struct inode *inode) | |||
1516 | ext4_truncate(inode); | 1552 | ext4_truncate(inode); |
1517 | } | 1553 | } |
1518 | 1554 | ||
1555 | static int ext4_get_block_write(struct inode *inode, sector_t iblock, | ||
1556 | struct buffer_head *bh_result, int create); | ||
1519 | static int ext4_write_begin(struct file *file, struct address_space *mapping, | 1557 | static int ext4_write_begin(struct file *file, struct address_space *mapping, |
1520 | loff_t pos, unsigned len, unsigned flags, | 1558 | loff_t pos, unsigned len, unsigned flags, |
1521 | struct page **pagep, void **fsdata) | 1559 | struct page **pagep, void **fsdata) |
@@ -1557,8 +1595,12 @@ retry: | |||
1557 | } | 1595 | } |
1558 | *pagep = page; | 1596 | *pagep = page; |
1559 | 1597 | ||
1560 | ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata, | 1598 | if (ext4_should_dioread_nolock(inode)) |
1561 | ext4_get_block); | 1599 | ret = block_write_begin(file, mapping, pos, len, flags, pagep, |
1600 | fsdata, ext4_get_block_write); | ||
1601 | else | ||
1602 | ret = block_write_begin(file, mapping, pos, len, flags, pagep, | ||
1603 | fsdata, ext4_get_block); | ||
1562 | 1604 | ||
1563 | if (!ret && ext4_should_journal_data(inode)) { | 1605 | if (!ret && ext4_should_journal_data(inode)) { |
1564 | ret = walk_page_buffers(handle, page_buffers(page), | 1606 | ret = walk_page_buffers(handle, page_buffers(page), |
@@ -1775,7 +1817,7 @@ static int ext4_journalled_write_end(struct file *file, | |||
1775 | new_i_size = pos + copied; | 1817 | new_i_size = pos + copied; |
1776 | if (new_i_size > inode->i_size) | 1818 | if (new_i_size > inode->i_size) |
1777 | i_size_write(inode, pos+copied); | 1819 | i_size_write(inode, pos+copied); |
1778 | EXT4_I(inode)->i_state |= EXT4_STATE_JDATA; | 1820 | ext4_set_inode_state(inode, EXT4_STATE_JDATA); |
1779 | if (new_i_size > EXT4_I(inode)->i_disksize) { | 1821 | if (new_i_size > EXT4_I(inode)->i_disksize) { |
1780 | ext4_update_i_disksize(inode, new_i_size); | 1822 | ext4_update_i_disksize(inode, new_i_size); |
1781 | ret2 = ext4_mark_inode_dirty(handle, inode); | 1823 | ret2 = ext4_mark_inode_dirty(handle, inode); |
@@ -1818,6 +1860,7 @@ static int ext4_da_reserve_space(struct inode *inode, sector_t lblock) | |||
1818 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); | 1860 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); |
1819 | struct ext4_inode_info *ei = EXT4_I(inode); | 1861 | struct ext4_inode_info *ei = EXT4_I(inode); |
1820 | unsigned long md_needed, md_reserved; | 1862 | unsigned long md_needed, md_reserved; |
1863 | int ret; | ||
1821 | 1864 | ||
1822 | /* | 1865 | /* |
1823 | * recalculate the amount of metadata blocks to reserve | 1866 | * recalculate the amount of metadata blocks to reserve |
@@ -1828,6 +1871,7 @@ repeat: | |||
1828 | spin_lock(&ei->i_block_reservation_lock); | 1871 | spin_lock(&ei->i_block_reservation_lock); |
1829 | md_reserved = ei->i_reserved_meta_blocks; | 1872 | md_reserved = ei->i_reserved_meta_blocks; |
1830 | md_needed = ext4_calc_metadata_amount(inode, lblock); | 1873 | md_needed = ext4_calc_metadata_amount(inode, lblock); |
1874 | trace_ext4_da_reserve_space(inode, md_needed); | ||
1831 | spin_unlock(&ei->i_block_reservation_lock); | 1875 | spin_unlock(&ei->i_block_reservation_lock); |
1832 | 1876 | ||
1833 | /* | 1877 | /* |
@@ -1835,24 +1879,13 @@ repeat: | |||
1835 | * later. Real quota accounting is done at pages writeout | 1879 | * later. Real quota accounting is done at pages writeout |
1836 | * time. | 1880 | * time. |
1837 | */ | 1881 | */ |
1838 | if (vfs_dq_reserve_block(inode, md_needed + 1)) { | 1882 | ret = dquot_reserve_block(inode, md_needed + 1); |
1839 | /* | 1883 | if (ret) |
1840 | * We tend to badly over-estimate the amount of | 1884 | return ret; |
1841 | * metadata blocks which are needed, so if we have | ||
1842 | * reserved any metadata blocks, try to force out the | ||
1843 | * inode and see if we have any better luck. | ||
1844 | */ | ||
1845 | if (md_reserved && retries++ <= 3) | ||
1846 | goto retry; | ||
1847 | return -EDQUOT; | ||
1848 | } | ||
1849 | 1885 | ||
1850 | if (ext4_claim_free_blocks(sbi, md_needed + 1)) { | 1886 | if (ext4_claim_free_blocks(sbi, md_needed + 1)) { |
1851 | vfs_dq_release_reservation_block(inode, md_needed + 1); | 1887 | dquot_release_reservation_block(inode, md_needed + 1); |
1852 | if (ext4_should_retry_alloc(inode->i_sb, &retries)) { | 1888 | if (ext4_should_retry_alloc(inode->i_sb, &retries)) { |
1853 | retry: | ||
1854 | if (md_reserved) | ||
1855 | write_inode_now(inode, (retries == 3)); | ||
1856 | yield(); | 1889 | yield(); |
1857 | goto repeat; | 1890 | goto repeat; |
1858 | } | 1891 | } |
@@ -1908,7 +1941,7 @@ static void ext4_da_release_space(struct inode *inode, int to_free) | |||
1908 | 1941 | ||
1909 | spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); | 1942 | spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); |
1910 | 1943 | ||
1911 | vfs_dq_release_reservation_block(inode, to_free); | 1944 | dquot_release_reservation_block(inode, to_free); |
1912 | } | 1945 | } |
1913 | 1946 | ||
1914 | static void ext4_da_page_release_reservation(struct page *page, | 1947 | static void ext4_da_page_release_reservation(struct page *page, |
@@ -2085,6 +2118,8 @@ static void mpage_put_bnr_to_bhs(struct mpage_da_data *mpd, sector_t logical, | |||
2085 | } else if (buffer_mapped(bh)) | 2118 | } else if (buffer_mapped(bh)) |
2086 | BUG_ON(bh->b_blocknr != pblock); | 2119 | BUG_ON(bh->b_blocknr != pblock); |
2087 | 2120 | ||
2121 | if (buffer_uninit(exbh)) | ||
2122 | set_buffer_uninit(bh); | ||
2088 | cur_logical++; | 2123 | cur_logical++; |
2089 | pblock++; | 2124 | pblock++; |
2090 | } while ((bh = bh->b_this_page) != head); | 2125 | } while ((bh = bh->b_this_page) != head); |
@@ -2127,17 +2162,16 @@ static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd, | |||
2127 | break; | 2162 | break; |
2128 | for (i = 0; i < nr_pages; i++) { | 2163 | for (i = 0; i < nr_pages; i++) { |
2129 | struct page *page = pvec.pages[i]; | 2164 | struct page *page = pvec.pages[i]; |
2130 | index = page->index; | 2165 | if (page->index > end) |
2131 | if (index > end) | ||
2132 | break; | 2166 | break; |
2133 | index++; | ||
2134 | |||
2135 | BUG_ON(!PageLocked(page)); | 2167 | BUG_ON(!PageLocked(page)); |
2136 | BUG_ON(PageWriteback(page)); | 2168 | BUG_ON(PageWriteback(page)); |
2137 | block_invalidatepage(page, 0); | 2169 | block_invalidatepage(page, 0); |
2138 | ClearPageUptodate(page); | 2170 | ClearPageUptodate(page); |
2139 | unlock_page(page); | 2171 | unlock_page(page); |
2140 | } | 2172 | } |
2173 | index = pvec.pages[nr_pages - 1]->index + 1; | ||
2174 | pagevec_release(&pvec); | ||
2141 | } | 2175 | } |
2142 | return; | 2176 | return; |
2143 | } | 2177 | } |
@@ -2213,10 +2247,12 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd) | |||
2213 | * variables are updated after the blocks have been allocated. | 2247 | * variables are updated after the blocks have been allocated. |
2214 | */ | 2248 | */ |
2215 | new.b_state = 0; | 2249 | new.b_state = 0; |
2216 | get_blocks_flags = (EXT4_GET_BLOCKS_CREATE | | 2250 | get_blocks_flags = EXT4_GET_BLOCKS_CREATE; |
2217 | EXT4_GET_BLOCKS_DELALLOC_RESERVE); | 2251 | if (ext4_should_dioread_nolock(mpd->inode)) |
2252 | get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT; | ||
2218 | if (mpd->b_state & (1 << BH_Delay)) | 2253 | if (mpd->b_state & (1 << BH_Delay)) |
2219 | get_blocks_flags |= EXT4_GET_BLOCKS_UPDATE_RESERVE_SPACE; | 2254 | get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE; |
2255 | |||
2220 | blks = ext4_get_blocks(handle, mpd->inode, next, max_blocks, | 2256 | blks = ext4_get_blocks(handle, mpd->inode, next, max_blocks, |
2221 | &new, get_blocks_flags); | 2257 | &new, get_blocks_flags); |
2222 | if (blks < 0) { | 2258 | if (blks < 0) { |
@@ -2624,11 +2660,14 @@ static int __ext4_journalled_writepage(struct page *page, | |||
2624 | ret = err; | 2660 | ret = err; |
2625 | 2661 | ||
2626 | walk_page_buffers(handle, page_bufs, 0, len, NULL, bput_one); | 2662 | walk_page_buffers(handle, page_bufs, 0, len, NULL, bput_one); |
2627 | EXT4_I(inode)->i_state |= EXT4_STATE_JDATA; | 2663 | ext4_set_inode_state(inode, EXT4_STATE_JDATA); |
2628 | out: | 2664 | out: |
2629 | return ret; | 2665 | return ret; |
2630 | } | 2666 | } |
2631 | 2667 | ||
2668 | static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode); | ||
2669 | static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate); | ||
2670 | |||
2632 | /* | 2671 | /* |
2633 | * Note that we don't need to start a transaction unless we're journaling data | 2672 | * Note that we don't need to start a transaction unless we're journaling data |
2634 | * because we should have holes filled from ext4_page_mkwrite(). We even don't | 2673 | * because we should have holes filled from ext4_page_mkwrite(). We even don't |
@@ -2676,7 +2715,7 @@ static int ext4_writepage(struct page *page, | |||
2676 | int ret = 0; | 2715 | int ret = 0; |
2677 | loff_t size; | 2716 | loff_t size; |
2678 | unsigned int len; | 2717 | unsigned int len; |
2679 | struct buffer_head *page_bufs; | 2718 | struct buffer_head *page_bufs = NULL; |
2680 | struct inode *inode = page->mapping->host; | 2719 | struct inode *inode = page->mapping->host; |
2681 | 2720 | ||
2682 | trace_ext4_writepage(inode, page); | 2721 | trace_ext4_writepage(inode, page); |
@@ -2752,7 +2791,11 @@ static int ext4_writepage(struct page *page, | |||
2752 | 2791 | ||
2753 | if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode)) | 2792 | if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode)) |
2754 | ret = nobh_writepage(page, noalloc_get_block_write, wbc); | 2793 | ret = nobh_writepage(page, noalloc_get_block_write, wbc); |
2755 | else | 2794 | else if (page_bufs && buffer_uninit(page_bufs)) { |
2795 | ext4_set_bh_endio(page_bufs, inode); | ||
2796 | ret = block_write_full_page_endio(page, noalloc_get_block_write, | ||
2797 | wbc, ext4_end_io_buffer_write); | ||
2798 | } else | ||
2756 | ret = block_write_full_page(page, noalloc_get_block_write, | 2799 | ret = block_write_full_page(page, noalloc_get_block_write, |
2757 | wbc); | 2800 | wbc); |
2758 | 2801 | ||
@@ -3032,7 +3075,7 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping, | |||
3032 | loff_t pos, unsigned len, unsigned flags, | 3075 | loff_t pos, unsigned len, unsigned flags, |
3033 | struct page **pagep, void **fsdata) | 3076 | struct page **pagep, void **fsdata) |
3034 | { | 3077 | { |
3035 | int ret, retries = 0; | 3078 | int ret, retries = 0, quota_retries = 0; |
3036 | struct page *page; | 3079 | struct page *page; |
3037 | pgoff_t index; | 3080 | pgoff_t index; |
3038 | unsigned from, to; | 3081 | unsigned from, to; |
@@ -3091,6 +3134,22 @@ retry: | |||
3091 | 3134 | ||
3092 | if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) | 3135 | if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) |
3093 | goto retry; | 3136 | goto retry; |
3137 | |||
3138 | if ((ret == -EDQUOT) && | ||
3139 | EXT4_I(inode)->i_reserved_meta_blocks && | ||
3140 | (quota_retries++ < 3)) { | ||
3141 | /* | ||
3142 | * Since we often over-estimate the number of meta | ||
3143 | * data blocks required, we may sometimes get a | ||
3144 | * spurios out of quota error even though there would | ||
3145 | * be enough space once we write the data blocks and | ||
3146 | * find out how many meta data blocks were _really_ | ||
3147 | * required. So try forcing the inode write to see if | ||
3148 | * that helps. | ||
3149 | */ | ||
3150 | write_inode_now(inode, (quota_retries == 3)); | ||
3151 | goto retry; | ||
3152 | } | ||
3094 | out: | 3153 | out: |
3095 | return ret; | 3154 | return ret; |
3096 | } | 3155 | } |
@@ -3279,7 +3338,8 @@ static sector_t ext4_bmap(struct address_space *mapping, sector_t block) | |||
3279 | filemap_write_and_wait(mapping); | 3338 | filemap_write_and_wait(mapping); |
3280 | } | 3339 | } |
3281 | 3340 | ||
3282 | if (EXT4_JOURNAL(inode) && EXT4_I(inode)->i_state & EXT4_STATE_JDATA) { | 3341 | if (EXT4_JOURNAL(inode) && |
3342 | ext4_test_inode_state(inode, EXT4_STATE_JDATA)) { | ||
3283 | /* | 3343 | /* |
3284 | * This is a REALLY heavyweight approach, but the use of | 3344 | * This is a REALLY heavyweight approach, but the use of |
3285 | * bmap on dirty files is expected to be extremely rare: | 3345 | * bmap on dirty files is expected to be extremely rare: |
@@ -3298,7 +3358,7 @@ static sector_t ext4_bmap(struct address_space *mapping, sector_t block) | |||
3298 | * everything they get. | 3358 | * everything they get. |
3299 | */ | 3359 | */ |
3300 | 3360 | ||
3301 | EXT4_I(inode)->i_state &= ~EXT4_STATE_JDATA; | 3361 | ext4_clear_inode_state(inode, EXT4_STATE_JDATA); |
3302 | journal = EXT4_JOURNAL(inode); | 3362 | journal = EXT4_JOURNAL(inode); |
3303 | jbd2_journal_lock_updates(journal); | 3363 | jbd2_journal_lock_updates(journal); |
3304 | err = jbd2_journal_flush(journal); | 3364 | err = jbd2_journal_flush(journal); |
@@ -3323,11 +3383,45 @@ ext4_readpages(struct file *file, struct address_space *mapping, | |||
3323 | return mpage_readpages(mapping, pages, nr_pages, ext4_get_block); | 3383 | return mpage_readpages(mapping, pages, nr_pages, ext4_get_block); |
3324 | } | 3384 | } |
3325 | 3385 | ||
3386 | static void ext4_free_io_end(ext4_io_end_t *io) | ||
3387 | { | ||
3388 | BUG_ON(!io); | ||
3389 | if (io->page) | ||
3390 | put_page(io->page); | ||
3391 | iput(io->inode); | ||
3392 | kfree(io); | ||
3393 | } | ||
3394 | |||
3395 | static void ext4_invalidatepage_free_endio(struct page *page, unsigned long offset) | ||
3396 | { | ||
3397 | struct buffer_head *head, *bh; | ||
3398 | unsigned int curr_off = 0; | ||
3399 | |||
3400 | if (!page_has_buffers(page)) | ||
3401 | return; | ||
3402 | head = bh = page_buffers(page); | ||
3403 | do { | ||
3404 | if (offset <= curr_off && test_clear_buffer_uninit(bh) | ||
3405 | && bh->b_private) { | ||
3406 | ext4_free_io_end(bh->b_private); | ||
3407 | bh->b_private = NULL; | ||
3408 | bh->b_end_io = NULL; | ||
3409 | } | ||
3410 | curr_off = curr_off + bh->b_size; | ||
3411 | bh = bh->b_this_page; | ||
3412 | } while (bh != head); | ||
3413 | } | ||
3414 | |||
3326 | static void ext4_invalidatepage(struct page *page, unsigned long offset) | 3415 | static void ext4_invalidatepage(struct page *page, unsigned long offset) |
3327 | { | 3416 | { |
3328 | journal_t *journal = EXT4_JOURNAL(page->mapping->host); | 3417 | journal_t *journal = EXT4_JOURNAL(page->mapping->host); |
3329 | 3418 | ||
3330 | /* | 3419 | /* |
3420 | * free any io_end structure allocated for buffers to be discarded | ||
3421 | */ | ||
3422 | if (ext4_should_dioread_nolock(page->mapping->host)) | ||
3423 | ext4_invalidatepage_free_endio(page, offset); | ||
3424 | /* | ||
3331 | * If it's a full truncate we just forget about the pending dirtying | 3425 | * If it's a full truncate we just forget about the pending dirtying |
3332 | */ | 3426 | */ |
3333 | if (offset == 0) | 3427 | if (offset == 0) |
@@ -3398,7 +3492,14 @@ static ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb, | |||
3398 | } | 3492 | } |
3399 | 3493 | ||
3400 | retry: | 3494 | retry: |
3401 | ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, | 3495 | if (rw == READ && ext4_should_dioread_nolock(inode)) |
3496 | ret = blockdev_direct_IO_no_locking(rw, iocb, inode, | ||
3497 | inode->i_sb->s_bdev, iov, | ||
3498 | offset, nr_segs, | ||
3499 | ext4_get_block, NULL); | ||
3500 | else | ||
3501 | ret = blockdev_direct_IO(rw, iocb, inode, | ||
3502 | inode->i_sb->s_bdev, iov, | ||
3402 | offset, nr_segs, | 3503 | offset, nr_segs, |
3403 | ext4_get_block, NULL); | 3504 | ext4_get_block, NULL); |
3404 | if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) | 3505 | if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) |
@@ -3414,6 +3515,9 @@ retry: | |||
3414 | * but cannot extend i_size. Bail out and pretend | 3515 | * but cannot extend i_size. Bail out and pretend |
3415 | * the write failed... */ | 3516 | * the write failed... */ |
3416 | ret = PTR_ERR(handle); | 3517 | ret = PTR_ERR(handle); |
3518 | if (inode->i_nlink) | ||
3519 | ext4_orphan_del(NULL, inode); | ||
3520 | |||
3417 | goto out; | 3521 | goto out; |
3418 | } | 3522 | } |
3419 | if (inode->i_nlink) | 3523 | if (inode->i_nlink) |
@@ -3441,75 +3545,63 @@ out: | |||
3441 | return ret; | 3545 | return ret; |
3442 | } | 3546 | } |
3443 | 3547 | ||
3444 | static int ext4_get_block_dio_write(struct inode *inode, sector_t iblock, | 3548 | static int ext4_get_block_write(struct inode *inode, sector_t iblock, |
3445 | struct buffer_head *bh_result, int create) | 3549 | struct buffer_head *bh_result, int create) |
3446 | { | 3550 | { |
3447 | handle_t *handle = NULL; | 3551 | handle_t *handle = ext4_journal_current_handle(); |
3448 | int ret = 0; | 3552 | int ret = 0; |
3449 | unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; | 3553 | unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; |
3450 | int dio_credits; | 3554 | int dio_credits; |
3555 | int started = 0; | ||
3451 | 3556 | ||
3452 | ext4_debug("ext4_get_block_dio_write: inode %lu, create flag %d\n", | 3557 | ext4_debug("ext4_get_block_write: inode %lu, create flag %d\n", |
3453 | inode->i_ino, create); | 3558 | inode->i_ino, create); |
3454 | /* | 3559 | /* |
3455 | * DIO VFS code passes create = 0 flag for write to | 3560 | * ext4_get_block in prepare for a DIO write or buffer write. |
3456 | * the middle of file. It does this to avoid block | 3561 | * We allocate an uinitialized extent if blocks haven't been allocated. |
3457 | * allocation for holes, to prevent expose stale data | 3562 | * The extent will be converted to initialized after IO complete. |
3458 | * out when there is parallel buffered read (which does | ||
3459 | * not hold the i_mutex lock) while direct IO write has | ||
3460 | * not completed. DIO request on holes finally falls back | ||
3461 | * to buffered IO for this reason. | ||
3462 | * | ||
3463 | * For ext4 extent based file, since we support fallocate, | ||
3464 | * new allocated extent as uninitialized, for holes, we | ||
3465 | * could fallocate blocks for holes, thus parallel | ||
3466 | * buffered IO read will zero out the page when read on | ||
3467 | * a hole while parallel DIO write to the hole has not completed. | ||
3468 | * | ||
3469 | * when we come here, we know it's a direct IO write to | ||
3470 | * to the middle of file (<i_size) | ||
3471 | * so it's safe to override the create flag from VFS. | ||
3472 | */ | 3563 | */ |
3473 | create = EXT4_GET_BLOCKS_DIO_CREATE_EXT; | 3564 | create = EXT4_GET_BLOCKS_IO_CREATE_EXT; |
3474 | 3565 | ||
3475 | if (max_blocks > DIO_MAX_BLOCKS) | 3566 | if (!handle) { |
3476 | max_blocks = DIO_MAX_BLOCKS; | 3567 | if (max_blocks > DIO_MAX_BLOCKS) |
3477 | dio_credits = ext4_chunk_trans_blocks(inode, max_blocks); | 3568 | max_blocks = DIO_MAX_BLOCKS; |
3478 | handle = ext4_journal_start(inode, dio_credits); | 3569 | dio_credits = ext4_chunk_trans_blocks(inode, max_blocks); |
3479 | if (IS_ERR(handle)) { | 3570 | handle = ext4_journal_start(inode, dio_credits); |
3480 | ret = PTR_ERR(handle); | 3571 | if (IS_ERR(handle)) { |
3481 | goto out; | 3572 | ret = PTR_ERR(handle); |
3573 | goto out; | ||
3574 | } | ||
3575 | started = 1; | ||
3482 | } | 3576 | } |
3577 | |||
3483 | ret = ext4_get_blocks(handle, inode, iblock, max_blocks, bh_result, | 3578 | ret = ext4_get_blocks(handle, inode, iblock, max_blocks, bh_result, |
3484 | create); | 3579 | create); |
3485 | if (ret > 0) { | 3580 | if (ret > 0) { |
3486 | bh_result->b_size = (ret << inode->i_blkbits); | 3581 | bh_result->b_size = (ret << inode->i_blkbits); |
3487 | ret = 0; | 3582 | ret = 0; |
3488 | } | 3583 | } |
3489 | ext4_journal_stop(handle); | 3584 | if (started) |
3585 | ext4_journal_stop(handle); | ||
3490 | out: | 3586 | out: |
3491 | return ret; | 3587 | return ret; |
3492 | } | 3588 | } |
3493 | 3589 | ||
3494 | static void ext4_free_io_end(ext4_io_end_t *io) | 3590 | static void dump_completed_IO(struct inode * inode) |
3495 | { | ||
3496 | BUG_ON(!io); | ||
3497 | iput(io->inode); | ||
3498 | kfree(io); | ||
3499 | } | ||
3500 | static void dump_aio_dio_list(struct inode * inode) | ||
3501 | { | 3591 | { |
3502 | #ifdef EXT4_DEBUG | 3592 | #ifdef EXT4_DEBUG |
3503 | struct list_head *cur, *before, *after; | 3593 | struct list_head *cur, *before, *after; |
3504 | ext4_io_end_t *io, *io0, *io1; | 3594 | ext4_io_end_t *io, *io0, *io1; |
3595 | unsigned long flags; | ||
3505 | 3596 | ||
3506 | if (list_empty(&EXT4_I(inode)->i_aio_dio_complete_list)){ | 3597 | if (list_empty(&EXT4_I(inode)->i_completed_io_list)){ |
3507 | ext4_debug("inode %lu aio dio list is empty\n", inode->i_ino); | 3598 | ext4_debug("inode %lu completed_io list is empty\n", inode->i_ino); |
3508 | return; | 3599 | return; |
3509 | } | 3600 | } |
3510 | 3601 | ||
3511 | ext4_debug("Dump inode %lu aio_dio_completed_IO list \n", inode->i_ino); | 3602 | ext4_debug("Dump inode %lu completed_io list \n", inode->i_ino); |
3512 | list_for_each_entry(io, &EXT4_I(inode)->i_aio_dio_complete_list, list){ | 3603 | spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags); |
3604 | list_for_each_entry(io, &EXT4_I(inode)->i_completed_io_list, list){ | ||
3513 | cur = &io->list; | 3605 | cur = &io->list; |
3514 | before = cur->prev; | 3606 | before = cur->prev; |
3515 | io0 = container_of(before, ext4_io_end_t, list); | 3607 | io0 = container_of(before, ext4_io_end_t, list); |
@@ -3519,32 +3611,31 @@ static void dump_aio_dio_list(struct inode * inode) | |||
3519 | ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n", | 3611 | ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n", |
3520 | io, inode->i_ino, io0, io1); | 3612 | io, inode->i_ino, io0, io1); |
3521 | } | 3613 | } |
3614 | spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags); | ||
3522 | #endif | 3615 | #endif |
3523 | } | 3616 | } |
3524 | 3617 | ||
3525 | /* | 3618 | /* |
3526 | * check a range of space and convert unwritten extents to written. | 3619 | * check a range of space and convert unwritten extents to written. |
3527 | */ | 3620 | */ |
3528 | static int ext4_end_aio_dio_nolock(ext4_io_end_t *io) | 3621 | static int ext4_end_io_nolock(ext4_io_end_t *io) |
3529 | { | 3622 | { |
3530 | struct inode *inode = io->inode; | 3623 | struct inode *inode = io->inode; |
3531 | loff_t offset = io->offset; | 3624 | loff_t offset = io->offset; |
3532 | size_t size = io->size; | 3625 | ssize_t size = io->size; |
3533 | int ret = 0; | 3626 | int ret = 0; |
3534 | 3627 | ||
3535 | ext4_debug("end_aio_dio_onlock: io 0x%p from inode %lu,list->next 0x%p," | 3628 | ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p," |
3536 | "list->prev 0x%p\n", | 3629 | "list->prev 0x%p\n", |
3537 | io, inode->i_ino, io->list.next, io->list.prev); | 3630 | io, inode->i_ino, io->list.next, io->list.prev); |
3538 | 3631 | ||
3539 | if (list_empty(&io->list)) | 3632 | if (list_empty(&io->list)) |
3540 | return ret; | 3633 | return ret; |
3541 | 3634 | ||
3542 | if (io->flag != DIO_AIO_UNWRITTEN) | 3635 | if (io->flag != EXT4_IO_UNWRITTEN) |
3543 | return ret; | 3636 | return ret; |
3544 | 3637 | ||
3545 | if (offset + size <= i_size_read(inode)) | 3638 | ret = ext4_convert_unwritten_extents(inode, offset, size); |
3546 | ret = ext4_convert_unwritten_extents(inode, offset, size); | ||
3547 | |||
3548 | if (ret < 0) { | 3639 | if (ret < 0) { |
3549 | printk(KERN_EMERG "%s: failed to convert unwritten" | 3640 | printk(KERN_EMERG "%s: failed to convert unwritten" |
3550 | "extents to written extents, error is %d" | 3641 | "extents to written extents, error is %d" |
@@ -3557,50 +3648,64 @@ static int ext4_end_aio_dio_nolock(ext4_io_end_t *io) | |||
3557 | io->flag = 0; | 3648 | io->flag = 0; |
3558 | return ret; | 3649 | return ret; |
3559 | } | 3650 | } |
3651 | |||
3560 | /* | 3652 | /* |
3561 | * work on completed aio dio IO, to convert unwritten extents to extents | 3653 | * work on completed aio dio IO, to convert unwritten extents to extents |
3562 | */ | 3654 | */ |
3563 | static void ext4_end_aio_dio_work(struct work_struct *work) | 3655 | static void ext4_end_io_work(struct work_struct *work) |
3564 | { | 3656 | { |
3565 | ext4_io_end_t *io = container_of(work, ext4_io_end_t, work); | 3657 | ext4_io_end_t *io = container_of(work, ext4_io_end_t, work); |
3566 | struct inode *inode = io->inode; | 3658 | struct inode *inode = io->inode; |
3567 | int ret = 0; | 3659 | struct ext4_inode_info *ei = EXT4_I(inode); |
3660 | unsigned long flags; | ||
3661 | int ret; | ||
3568 | 3662 | ||
3569 | mutex_lock(&inode->i_mutex); | 3663 | mutex_lock(&inode->i_mutex); |
3570 | ret = ext4_end_aio_dio_nolock(io); | 3664 | ret = ext4_end_io_nolock(io); |
3571 | if (ret >= 0) { | 3665 | if (ret < 0) { |
3572 | if (!list_empty(&io->list)) | 3666 | mutex_unlock(&inode->i_mutex); |
3573 | list_del_init(&io->list); | 3667 | return; |
3574 | ext4_free_io_end(io); | ||
3575 | } | 3668 | } |
3669 | |||
3670 | spin_lock_irqsave(&ei->i_completed_io_lock, flags); | ||
3671 | if (!list_empty(&io->list)) | ||
3672 | list_del_init(&io->list); | ||
3673 | spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); | ||
3576 | mutex_unlock(&inode->i_mutex); | 3674 | mutex_unlock(&inode->i_mutex); |
3675 | ext4_free_io_end(io); | ||
3577 | } | 3676 | } |
3677 | |||
3578 | /* | 3678 | /* |
3579 | * This function is called from ext4_sync_file(). | 3679 | * This function is called from ext4_sync_file(). |
3580 | * | 3680 | * |
3581 | * When AIO DIO IO is completed, the work to convert unwritten | 3681 | * When IO is completed, the work to convert unwritten extents to |
3582 | * extents to written is queued on workqueue but may not get immediately | 3682 | * written is queued on workqueue but may not get immediately |
3583 | * scheduled. When fsync is called, we need to ensure the | 3683 | * scheduled. When fsync is called, we need to ensure the |
3584 | * conversion is complete before fsync returns. | 3684 | * conversion is complete before fsync returns. |
3585 | * The inode keeps track of a list of completed AIO from DIO path | 3685 | * The inode keeps track of a list of pending/completed IO that |
3586 | * that might needs to do the conversion. This function walks through | 3686 | * might needs to do the conversion. This function walks through |
3587 | * the list and convert the related unwritten extents to written. | 3687 | * the list and convert the related unwritten extents for completed IO |
3688 | * to written. | ||
3689 | * The function return the number of pending IOs on success. | ||
3588 | */ | 3690 | */ |
3589 | int flush_aio_dio_completed_IO(struct inode *inode) | 3691 | int flush_completed_IO(struct inode *inode) |
3590 | { | 3692 | { |
3591 | ext4_io_end_t *io; | 3693 | ext4_io_end_t *io; |
3694 | struct ext4_inode_info *ei = EXT4_I(inode); | ||
3695 | unsigned long flags; | ||
3592 | int ret = 0; | 3696 | int ret = 0; |
3593 | int ret2 = 0; | 3697 | int ret2 = 0; |
3594 | 3698 | ||
3595 | if (list_empty(&EXT4_I(inode)->i_aio_dio_complete_list)) | 3699 | if (list_empty(&ei->i_completed_io_list)) |
3596 | return ret; | 3700 | return ret; |
3597 | 3701 | ||
3598 | dump_aio_dio_list(inode); | 3702 | dump_completed_IO(inode); |
3599 | while (!list_empty(&EXT4_I(inode)->i_aio_dio_complete_list)){ | 3703 | spin_lock_irqsave(&ei->i_completed_io_lock, flags); |
3600 | io = list_entry(EXT4_I(inode)->i_aio_dio_complete_list.next, | 3704 | while (!list_empty(&ei->i_completed_io_list)){ |
3705 | io = list_entry(ei->i_completed_io_list.next, | ||
3601 | ext4_io_end_t, list); | 3706 | ext4_io_end_t, list); |
3602 | /* | 3707 | /* |
3603 | * Calling ext4_end_aio_dio_nolock() to convert completed | 3708 | * Calling ext4_end_io_nolock() to convert completed |
3604 | * IO to written. | 3709 | * IO to written. |
3605 | * | 3710 | * |
3606 | * When ext4_sync_file() is called, run_queue() may already | 3711 | * When ext4_sync_file() is called, run_queue() may already |
@@ -3613,20 +3718,23 @@ int flush_aio_dio_completed_IO(struct inode *inode) | |||
3613 | * avoid double converting from both fsync and background work | 3718 | * avoid double converting from both fsync and background work |
3614 | * queue work. | 3719 | * queue work. |
3615 | */ | 3720 | */ |
3616 | ret = ext4_end_aio_dio_nolock(io); | 3721 | spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); |
3722 | ret = ext4_end_io_nolock(io); | ||
3723 | spin_lock_irqsave(&ei->i_completed_io_lock, flags); | ||
3617 | if (ret < 0) | 3724 | if (ret < 0) |
3618 | ret2 = ret; | 3725 | ret2 = ret; |
3619 | else | 3726 | else |
3620 | list_del_init(&io->list); | 3727 | list_del_init(&io->list); |
3621 | } | 3728 | } |
3729 | spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); | ||
3622 | return (ret2 < 0) ? ret2 : 0; | 3730 | return (ret2 < 0) ? ret2 : 0; |
3623 | } | 3731 | } |
3624 | 3732 | ||
3625 | static ext4_io_end_t *ext4_init_io_end (struct inode *inode) | 3733 | static ext4_io_end_t *ext4_init_io_end (struct inode *inode, gfp_t flags) |
3626 | { | 3734 | { |
3627 | ext4_io_end_t *io = NULL; | 3735 | ext4_io_end_t *io = NULL; |
3628 | 3736 | ||
3629 | io = kmalloc(sizeof(*io), GFP_NOFS); | 3737 | io = kmalloc(sizeof(*io), flags); |
3630 | 3738 | ||
3631 | if (io) { | 3739 | if (io) { |
3632 | igrab(inode); | 3740 | igrab(inode); |
@@ -3634,8 +3742,8 @@ static ext4_io_end_t *ext4_init_io_end (struct inode *inode) | |||
3634 | io->flag = 0; | 3742 | io->flag = 0; |
3635 | io->offset = 0; | 3743 | io->offset = 0; |
3636 | io->size = 0; | 3744 | io->size = 0; |
3637 | io->error = 0; | 3745 | io->page = NULL; |
3638 | INIT_WORK(&io->work, ext4_end_aio_dio_work); | 3746 | INIT_WORK(&io->work, ext4_end_io_work); |
3639 | INIT_LIST_HEAD(&io->list); | 3747 | INIT_LIST_HEAD(&io->list); |
3640 | } | 3748 | } |
3641 | 3749 | ||
@@ -3647,6 +3755,8 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, | |||
3647 | { | 3755 | { |
3648 | ext4_io_end_t *io_end = iocb->private; | 3756 | ext4_io_end_t *io_end = iocb->private; |
3649 | struct workqueue_struct *wq; | 3757 | struct workqueue_struct *wq; |
3758 | unsigned long flags; | ||
3759 | struct ext4_inode_info *ei; | ||
3650 | 3760 | ||
3651 | /* if not async direct IO or dio with 0 bytes write, just return */ | 3761 | /* if not async direct IO or dio with 0 bytes write, just return */ |
3652 | if (!io_end || !size) | 3762 | if (!io_end || !size) |
@@ -3658,7 +3768,7 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, | |||
3658 | size); | 3768 | size); |
3659 | 3769 | ||
3660 | /* if not aio dio with unwritten extents, just free io and return */ | 3770 | /* if not aio dio with unwritten extents, just free io and return */ |
3661 | if (io_end->flag != DIO_AIO_UNWRITTEN){ | 3771 | if (io_end->flag != EXT4_IO_UNWRITTEN){ |
3662 | ext4_free_io_end(io_end); | 3772 | ext4_free_io_end(io_end); |
3663 | iocb->private = NULL; | 3773 | iocb->private = NULL; |
3664 | return; | 3774 | return; |
@@ -3666,16 +3776,85 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, | |||
3666 | 3776 | ||
3667 | io_end->offset = offset; | 3777 | io_end->offset = offset; |
3668 | io_end->size = size; | 3778 | io_end->size = size; |
3779 | io_end->flag = EXT4_IO_UNWRITTEN; | ||
3669 | wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq; | 3780 | wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq; |
3670 | 3781 | ||
3671 | /* queue the work to convert unwritten extents to written */ | 3782 | /* queue the work to convert unwritten extents to written */ |
3672 | queue_work(wq, &io_end->work); | 3783 | queue_work(wq, &io_end->work); |
3673 | 3784 | ||
3674 | /* Add the io_end to per-inode completed aio dio list*/ | 3785 | /* Add the io_end to per-inode completed aio dio list*/ |
3675 | list_add_tail(&io_end->list, | 3786 | ei = EXT4_I(io_end->inode); |
3676 | &EXT4_I(io_end->inode)->i_aio_dio_complete_list); | 3787 | spin_lock_irqsave(&ei->i_completed_io_lock, flags); |
3788 | list_add_tail(&io_end->list, &ei->i_completed_io_list); | ||
3789 | spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); | ||
3677 | iocb->private = NULL; | 3790 | iocb->private = NULL; |
3678 | } | 3791 | } |
3792 | |||
3793 | static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate) | ||
3794 | { | ||
3795 | ext4_io_end_t *io_end = bh->b_private; | ||
3796 | struct workqueue_struct *wq; | ||
3797 | struct inode *inode; | ||
3798 | unsigned long flags; | ||
3799 | |||
3800 | if (!test_clear_buffer_uninit(bh) || !io_end) | ||
3801 | goto out; | ||
3802 | |||
3803 | if (!(io_end->inode->i_sb->s_flags & MS_ACTIVE)) { | ||
3804 | printk("sb umounted, discard end_io request for inode %lu\n", | ||
3805 | io_end->inode->i_ino); | ||
3806 | ext4_free_io_end(io_end); | ||
3807 | goto out; | ||
3808 | } | ||
3809 | |||
3810 | io_end->flag = EXT4_IO_UNWRITTEN; | ||
3811 | inode = io_end->inode; | ||
3812 | |||
3813 | /* Add the io_end to per-inode completed io list*/ | ||
3814 | spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags); | ||
3815 | list_add_tail(&io_end->list, &EXT4_I(inode)->i_completed_io_list); | ||
3816 | spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags); | ||
3817 | |||
3818 | wq = EXT4_SB(inode->i_sb)->dio_unwritten_wq; | ||
3819 | /* queue the work to convert unwritten extents to written */ | ||
3820 | queue_work(wq, &io_end->work); | ||
3821 | out: | ||
3822 | bh->b_private = NULL; | ||
3823 | bh->b_end_io = NULL; | ||
3824 | clear_buffer_uninit(bh); | ||
3825 | end_buffer_async_write(bh, uptodate); | ||
3826 | } | ||
3827 | |||
3828 | static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode) | ||
3829 | { | ||
3830 | ext4_io_end_t *io_end; | ||
3831 | struct page *page = bh->b_page; | ||
3832 | loff_t offset = (sector_t)page->index << PAGE_CACHE_SHIFT; | ||
3833 | size_t size = bh->b_size; | ||
3834 | |||
3835 | retry: | ||
3836 | io_end = ext4_init_io_end(inode, GFP_ATOMIC); | ||
3837 | if (!io_end) { | ||
3838 | if (printk_ratelimit()) | ||
3839 | printk(KERN_WARNING "%s: allocation fail\n", __func__); | ||
3840 | schedule(); | ||
3841 | goto retry; | ||
3842 | } | ||
3843 | io_end->offset = offset; | ||
3844 | io_end->size = size; | ||
3845 | /* | ||
3846 | * We need to hold a reference to the page to make sure it | ||
3847 | * doesn't get evicted before ext4_end_io_work() has a chance | ||
3848 | * to convert the extent from written to unwritten. | ||
3849 | */ | ||
3850 | io_end->page = page; | ||
3851 | get_page(io_end->page); | ||
3852 | |||
3853 | bh->b_private = io_end; | ||
3854 | bh->b_end_io = ext4_end_io_buffer_write; | ||
3855 | return 0; | ||
3856 | } | ||
3857 | |||
3679 | /* | 3858 | /* |
3680 | * For ext4 extent files, ext4 will do direct-io write to holes, | 3859 | * For ext4 extent files, ext4 will do direct-io write to holes, |
3681 | * preallocated extents, and those write extend the file, no need to | 3860 | * preallocated extents, and those write extend the file, no need to |
@@ -3729,7 +3908,7 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb, | |||
3729 | iocb->private = NULL; | 3908 | iocb->private = NULL; |
3730 | EXT4_I(inode)->cur_aio_dio = NULL; | 3909 | EXT4_I(inode)->cur_aio_dio = NULL; |
3731 | if (!is_sync_kiocb(iocb)) { | 3910 | if (!is_sync_kiocb(iocb)) { |
3732 | iocb->private = ext4_init_io_end(inode); | 3911 | iocb->private = ext4_init_io_end(inode, GFP_NOFS); |
3733 | if (!iocb->private) | 3912 | if (!iocb->private) |
3734 | return -ENOMEM; | 3913 | return -ENOMEM; |
3735 | /* | 3914 | /* |
@@ -3745,7 +3924,7 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb, | |||
3745 | ret = blockdev_direct_IO(rw, iocb, inode, | 3924 | ret = blockdev_direct_IO(rw, iocb, inode, |
3746 | inode->i_sb->s_bdev, iov, | 3925 | inode->i_sb->s_bdev, iov, |
3747 | offset, nr_segs, | 3926 | offset, nr_segs, |
3748 | ext4_get_block_dio_write, | 3927 | ext4_get_block_write, |
3749 | ext4_end_io_dio); | 3928 | ext4_end_io_dio); |
3750 | if (iocb->private) | 3929 | if (iocb->private) |
3751 | EXT4_I(inode)->cur_aio_dio = NULL; | 3930 | EXT4_I(inode)->cur_aio_dio = NULL; |
@@ -3766,8 +3945,8 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb, | |||
3766 | if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) { | 3945 | if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) { |
3767 | ext4_free_io_end(iocb->private); | 3946 | ext4_free_io_end(iocb->private); |
3768 | iocb->private = NULL; | 3947 | iocb->private = NULL; |
3769 | } else if (ret > 0 && (EXT4_I(inode)->i_state & | 3948 | } else if (ret > 0 && ext4_test_inode_state(inode, |
3770 | EXT4_STATE_DIO_UNWRITTEN)) { | 3949 | EXT4_STATE_DIO_UNWRITTEN)) { |
3771 | int err; | 3950 | int err; |
3772 | /* | 3951 | /* |
3773 | * for non AIO case, since the IO is already | 3952 | * for non AIO case, since the IO is already |
@@ -3777,7 +3956,7 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb, | |||
3777 | offset, ret); | 3956 | offset, ret); |
3778 | if (err < 0) | 3957 | if (err < 0) |
3779 | ret = err; | 3958 | ret = err; |
3780 | EXT4_I(inode)->i_state &= ~EXT4_STATE_DIO_UNWRITTEN; | 3959 | ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); |
3781 | } | 3960 | } |
3782 | return ret; | 3961 | return ret; |
3783 | } | 3962 | } |
@@ -4108,18 +4287,27 @@ no_top: | |||
4108 | * We release `count' blocks on disk, but (last - first) may be greater | 4287 | * We release `count' blocks on disk, but (last - first) may be greater |
4109 | * than `count' because there can be holes in there. | 4288 | * than `count' because there can be holes in there. |
4110 | */ | 4289 | */ |
4111 | static void ext4_clear_blocks(handle_t *handle, struct inode *inode, | 4290 | static int ext4_clear_blocks(handle_t *handle, struct inode *inode, |
4112 | struct buffer_head *bh, | 4291 | struct buffer_head *bh, |
4113 | ext4_fsblk_t block_to_free, | 4292 | ext4_fsblk_t block_to_free, |
4114 | unsigned long count, __le32 *first, | 4293 | unsigned long count, __le32 *first, |
4115 | __le32 *last) | 4294 | __le32 *last) |
4116 | { | 4295 | { |
4117 | __le32 *p; | 4296 | __le32 *p; |
4118 | int flags = EXT4_FREE_BLOCKS_FORGET; | 4297 | int flags = EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_VALIDATED; |
4119 | 4298 | ||
4120 | if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) | 4299 | if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) |
4121 | flags |= EXT4_FREE_BLOCKS_METADATA; | 4300 | flags |= EXT4_FREE_BLOCKS_METADATA; |
4122 | 4301 | ||
4302 | if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), block_to_free, | ||
4303 | count)) { | ||
4304 | ext4_error(inode->i_sb, "inode #%lu: " | ||
4305 | "attempt to clear blocks %llu len %lu, invalid", | ||
4306 | inode->i_ino, (unsigned long long) block_to_free, | ||
4307 | count); | ||
4308 | return 1; | ||
4309 | } | ||
4310 | |||
4123 | if (try_to_extend_transaction(handle, inode)) { | 4311 | if (try_to_extend_transaction(handle, inode)) { |
4124 | if (bh) { | 4312 | if (bh) { |
4125 | BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); | 4313 | BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); |
@@ -4138,6 +4326,7 @@ static void ext4_clear_blocks(handle_t *handle, struct inode *inode, | |||
4138 | *p = 0; | 4326 | *p = 0; |
4139 | 4327 | ||
4140 | ext4_free_blocks(handle, inode, 0, block_to_free, count, flags); | 4328 | ext4_free_blocks(handle, inode, 0, block_to_free, count, flags); |
4329 | return 0; | ||
4141 | } | 4330 | } |
4142 | 4331 | ||
4143 | /** | 4332 | /** |
@@ -4193,9 +4382,10 @@ static void ext4_free_data(handle_t *handle, struct inode *inode, | |||
4193 | } else if (nr == block_to_free + count) { | 4382 | } else if (nr == block_to_free + count) { |
4194 | count++; | 4383 | count++; |
4195 | } else { | 4384 | } else { |
4196 | ext4_clear_blocks(handle, inode, this_bh, | 4385 | if (ext4_clear_blocks(handle, inode, this_bh, |
4197 | block_to_free, | 4386 | block_to_free, count, |
4198 | count, block_to_free_p, p); | 4387 | block_to_free_p, p)) |
4388 | break; | ||
4199 | block_to_free = nr; | 4389 | block_to_free = nr; |
4200 | block_to_free_p = p; | 4390 | block_to_free_p = p; |
4201 | count = 1; | 4391 | count = 1; |
@@ -4219,7 +4409,7 @@ static void ext4_free_data(handle_t *handle, struct inode *inode, | |||
4219 | if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh)) | 4409 | if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh)) |
4220 | ext4_handle_dirty_metadata(handle, inode, this_bh); | 4410 | ext4_handle_dirty_metadata(handle, inode, this_bh); |
4221 | else | 4411 | else |
4222 | ext4_error(inode->i_sb, __func__, | 4412 | ext4_error(inode->i_sb, |
4223 | "circular indirect block detected, " | 4413 | "circular indirect block detected, " |
4224 | "inode=%lu, block=%llu", | 4414 | "inode=%lu, block=%llu", |
4225 | inode->i_ino, | 4415 | inode->i_ino, |
@@ -4259,6 +4449,16 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode, | |||
4259 | if (!nr) | 4449 | if (!nr) |
4260 | continue; /* A hole */ | 4450 | continue; /* A hole */ |
4261 | 4451 | ||
4452 | if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), | ||
4453 | nr, 1)) { | ||
4454 | ext4_error(inode->i_sb, | ||
4455 | "indirect mapped block in inode " | ||
4456 | "#%lu invalid (level %d, blk #%lu)", | ||
4457 | inode->i_ino, depth, | ||
4458 | (unsigned long) nr); | ||
4459 | break; | ||
4460 | } | ||
4461 | |||
4262 | /* Go read the buffer for the next level down */ | 4462 | /* Go read the buffer for the next level down */ |
4263 | bh = sb_bread(inode->i_sb, nr); | 4463 | bh = sb_bread(inode->i_sb, nr); |
4264 | 4464 | ||
@@ -4267,7 +4467,7 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode, | |||
4267 | * (should be rare). | 4467 | * (should be rare). |
4268 | */ | 4468 | */ |
4269 | if (!bh) { | 4469 | if (!bh) { |
4270 | ext4_error(inode->i_sb, "ext4_free_branches", | 4470 | ext4_error(inode->i_sb, |
4271 | "Read failure, inode=%lu, block=%llu", | 4471 | "Read failure, inode=%lu, block=%llu", |
4272 | inode->i_ino, nr); | 4472 | inode->i_ino, nr); |
4273 | continue; | 4473 | continue; |
@@ -4411,8 +4611,10 @@ void ext4_truncate(struct inode *inode) | |||
4411 | if (!ext4_can_truncate(inode)) | 4611 | if (!ext4_can_truncate(inode)) |
4412 | return; | 4612 | return; |
4413 | 4613 | ||
4614 | EXT4_I(inode)->i_flags &= ~EXT4_EOFBLOCKS_FL; | ||
4615 | |||
4414 | if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC)) | 4616 | if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC)) |
4415 | ei->i_state |= EXT4_STATE_DA_ALLOC_CLOSE; | 4617 | ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE); |
4416 | 4618 | ||
4417 | if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) { | 4619 | if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) { |
4418 | ext4_ext_truncate(inode); | 4620 | ext4_ext_truncate(inode); |
@@ -4582,9 +4784,8 @@ static int __ext4_get_inode_loc(struct inode *inode, | |||
4582 | 4784 | ||
4583 | bh = sb_getblk(sb, block); | 4785 | bh = sb_getblk(sb, block); |
4584 | if (!bh) { | 4786 | if (!bh) { |
4585 | ext4_error(sb, "ext4_get_inode_loc", "unable to read " | 4787 | ext4_error(sb, "unable to read inode block - " |
4586 | "inode block - inode=%lu, block=%llu", | 4788 | "inode=%lu, block=%llu", inode->i_ino, block); |
4587 | inode->i_ino, block); | ||
4588 | return -EIO; | 4789 | return -EIO; |
4589 | } | 4790 | } |
4590 | if (!buffer_uptodate(bh)) { | 4791 | if (!buffer_uptodate(bh)) { |
@@ -4682,9 +4883,8 @@ make_io: | |||
4682 | submit_bh(READ_META, bh); | 4883 | submit_bh(READ_META, bh); |
4683 | wait_on_buffer(bh); | 4884 | wait_on_buffer(bh); |
4684 | if (!buffer_uptodate(bh)) { | 4885 | if (!buffer_uptodate(bh)) { |
4685 | ext4_error(sb, __func__, | 4886 | ext4_error(sb, "unable to read inode block - inode=%lu," |
4686 | "unable to read inode block - inode=%lu, " | 4887 | " block=%llu", inode->i_ino, block); |
4687 | "block=%llu", inode->i_ino, block); | ||
4688 | brelse(bh); | 4888 | brelse(bh); |
4689 | return -EIO; | 4889 | return -EIO; |
4690 | } | 4890 | } |
@@ -4698,7 +4898,7 @@ int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc) | |||
4698 | { | 4898 | { |
4699 | /* We have all inode data except xattrs in memory here. */ | 4899 | /* We have all inode data except xattrs in memory here. */ |
4700 | return __ext4_get_inode_loc(inode, iloc, | 4900 | return __ext4_get_inode_loc(inode, iloc, |
4701 | !(EXT4_I(inode)->i_state & EXT4_STATE_XATTR)); | 4901 | !ext4_test_inode_state(inode, EXT4_STATE_XATTR)); |
4702 | } | 4902 | } |
4703 | 4903 | ||
4704 | void ext4_set_inode_flags(struct inode *inode) | 4904 | void ext4_set_inode_flags(struct inode *inode) |
@@ -4792,7 +4992,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) | |||
4792 | } | 4992 | } |
4793 | inode->i_nlink = le16_to_cpu(raw_inode->i_links_count); | 4993 | inode->i_nlink = le16_to_cpu(raw_inode->i_links_count); |
4794 | 4994 | ||
4795 | ei->i_state = 0; | 4995 | ei->i_state_flags = 0; |
4796 | ei->i_dir_start_lookup = 0; | 4996 | ei->i_dir_start_lookup = 0; |
4797 | ei->i_dtime = le32_to_cpu(raw_inode->i_dtime); | 4997 | ei->i_dtime = le32_to_cpu(raw_inode->i_dtime); |
4798 | /* We now have enough fields to check if the inode was active or not. | 4998 | /* We now have enough fields to check if the inode was active or not. |
@@ -4875,7 +5075,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) | |||
4875 | EXT4_GOOD_OLD_INODE_SIZE + | 5075 | EXT4_GOOD_OLD_INODE_SIZE + |
4876 | ei->i_extra_isize; | 5076 | ei->i_extra_isize; |
4877 | if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC)) | 5077 | if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC)) |
4878 | ei->i_state |= EXT4_STATE_XATTR; | 5078 | ext4_set_inode_state(inode, EXT4_STATE_XATTR); |
4879 | } | 5079 | } |
4880 | } else | 5080 | } else |
4881 | ei->i_extra_isize = 0; | 5081 | ei->i_extra_isize = 0; |
@@ -4895,8 +5095,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) | |||
4895 | ret = 0; | 5095 | ret = 0; |
4896 | if (ei->i_file_acl && | 5096 | if (ei->i_file_acl && |
4897 | !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) { | 5097 | !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) { |
4898 | ext4_error(sb, __func__, | 5098 | ext4_error(sb, "bad extended attribute block %llu inode #%lu", |
4899 | "bad extended attribute block %llu in inode #%lu", | ||
4900 | ei->i_file_acl, inode->i_ino); | 5099 | ei->i_file_acl, inode->i_ino); |
4901 | ret = -EIO; | 5100 | ret = -EIO; |
4902 | goto bad_inode; | 5101 | goto bad_inode; |
@@ -4942,8 +5141,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) | |||
4942 | new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); | 5141 | new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); |
4943 | } else { | 5142 | } else { |
4944 | ret = -EIO; | 5143 | ret = -EIO; |
4945 | ext4_error(inode->i_sb, __func__, | 5144 | ext4_error(inode->i_sb, "bogus i_mode (%o) for inode=%lu", |
4946 | "bogus i_mode (%o) for inode=%lu", | ||
4947 | inode->i_mode, inode->i_ino); | 5145 | inode->i_mode, inode->i_ino); |
4948 | goto bad_inode; | 5146 | goto bad_inode; |
4949 | } | 5147 | } |
@@ -5015,7 +5213,7 @@ static int ext4_do_update_inode(handle_t *handle, | |||
5015 | 5213 | ||
5016 | /* For fields not not tracking in the in-memory inode, | 5214 | /* For fields not not tracking in the in-memory inode, |
5017 | * initialise them to zero for new inodes. */ | 5215 | * initialise them to zero for new inodes. */ |
5018 | if (ei->i_state & EXT4_STATE_NEW) | 5216 | if (ext4_test_inode_state(inode, EXT4_STATE_NEW)) |
5019 | memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); | 5217 | memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); |
5020 | 5218 | ||
5021 | ext4_get_inode_flags(ei); | 5219 | ext4_get_inode_flags(ei); |
@@ -5079,7 +5277,7 @@ static int ext4_do_update_inode(handle_t *handle, | |||
5079 | EXT4_FEATURE_RO_COMPAT_LARGE_FILE); | 5277 | EXT4_FEATURE_RO_COMPAT_LARGE_FILE); |
5080 | sb->s_dirt = 1; | 5278 | sb->s_dirt = 1; |
5081 | ext4_handle_sync(handle); | 5279 | ext4_handle_sync(handle); |
5082 | err = ext4_handle_dirty_metadata(handle, inode, | 5280 | err = ext4_handle_dirty_metadata(handle, NULL, |
5083 | EXT4_SB(sb)->s_sbh); | 5281 | EXT4_SB(sb)->s_sbh); |
5084 | } | 5282 | } |
5085 | } | 5283 | } |
@@ -5108,10 +5306,10 @@ static int ext4_do_update_inode(handle_t *handle, | |||
5108 | } | 5306 | } |
5109 | 5307 | ||
5110 | BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); | 5308 | BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); |
5111 | rc = ext4_handle_dirty_metadata(handle, inode, bh); | 5309 | rc = ext4_handle_dirty_metadata(handle, NULL, bh); |
5112 | if (!err) | 5310 | if (!err) |
5113 | err = rc; | 5311 | err = rc; |
5114 | ei->i_state &= ~EXT4_STATE_NEW; | 5312 | ext4_clear_inode_state(inode, EXT4_STATE_NEW); |
5115 | 5313 | ||
5116 | ext4_update_inode_fsync_trans(handle, inode, 0); | 5314 | ext4_update_inode_fsync_trans(handle, inode, 0); |
5117 | out_brelse: | 5315 | out_brelse: |
@@ -5155,7 +5353,7 @@ out_brelse: | |||
5155 | * `stuff()' is running, and the new i_size will be lost. Plus the inode | 5353 | * `stuff()' is running, and the new i_size will be lost. Plus the inode |
5156 | * will no longer be on the superblock's dirty inode list. | 5354 | * will no longer be on the superblock's dirty inode list. |
5157 | */ | 5355 | */ |
5158 | int ext4_write_inode(struct inode *inode, int wait) | 5356 | int ext4_write_inode(struct inode *inode, struct writeback_control *wbc) |
5159 | { | 5357 | { |
5160 | int err; | 5358 | int err; |
5161 | 5359 | ||
@@ -5169,7 +5367,7 @@ int ext4_write_inode(struct inode *inode, int wait) | |||
5169 | return -EIO; | 5367 | return -EIO; |
5170 | } | 5368 | } |
5171 | 5369 | ||
5172 | if (!wait) | 5370 | if (wbc->sync_mode != WB_SYNC_ALL) |
5173 | return 0; | 5371 | return 0; |
5174 | 5372 | ||
5175 | err = ext4_force_commit(inode->i_sb); | 5373 | err = ext4_force_commit(inode->i_sb); |
@@ -5179,13 +5377,11 @@ int ext4_write_inode(struct inode *inode, int wait) | |||
5179 | err = ext4_get_inode_loc(inode, &iloc); | 5377 | err = ext4_get_inode_loc(inode, &iloc); |
5180 | if (err) | 5378 | if (err) |
5181 | return err; | 5379 | return err; |
5182 | if (wait) | 5380 | if (wbc->sync_mode == WB_SYNC_ALL) |
5183 | sync_dirty_buffer(iloc.bh); | 5381 | sync_dirty_buffer(iloc.bh); |
5184 | if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) { | 5382 | if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) { |
5185 | ext4_error(inode->i_sb, __func__, | 5383 | ext4_error(inode->i_sb, "IO error syncing inode, " |
5186 | "IO error syncing inode, " | 5384 | "inode=%lu, block=%llu", inode->i_ino, |
5187 | "inode=%lu, block=%llu", | ||
5188 | inode->i_ino, | ||
5189 | (unsigned long long)iloc.bh->b_blocknr); | 5385 | (unsigned long long)iloc.bh->b_blocknr); |
5190 | err = -EIO; | 5386 | err = -EIO; |
5191 | } | 5387 | } |
@@ -5227,6 +5423,8 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr) | |||
5227 | if (error) | 5423 | if (error) |
5228 | return error; | 5424 | return error; |
5229 | 5425 | ||
5426 | if (ia_valid & ATTR_SIZE) | ||
5427 | dquot_initialize(inode); | ||
5230 | if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) || | 5428 | if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) || |
5231 | (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) { | 5429 | (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) { |
5232 | handle_t *handle; | 5430 | handle_t *handle; |
@@ -5239,7 +5437,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr) | |||
5239 | error = PTR_ERR(handle); | 5437 | error = PTR_ERR(handle); |
5240 | goto err_out; | 5438 | goto err_out; |
5241 | } | 5439 | } |
5242 | error = vfs_dq_transfer(inode, attr) ? -EDQUOT : 0; | 5440 | error = dquot_transfer(inode, attr); |
5243 | if (error) { | 5441 | if (error) { |
5244 | ext4_journal_stop(handle); | 5442 | ext4_journal_stop(handle); |
5245 | return error; | 5443 | return error; |
@@ -5266,7 +5464,9 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr) | |||
5266 | } | 5464 | } |
5267 | 5465 | ||
5268 | if (S_ISREG(inode->i_mode) && | 5466 | if (S_ISREG(inode->i_mode) && |
5269 | attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) { | 5467 | attr->ia_valid & ATTR_SIZE && |
5468 | (attr->ia_size < inode->i_size || | ||
5469 | (EXT4_I(inode)->i_flags & EXT4_EOFBLOCKS_FL))) { | ||
5270 | handle_t *handle; | 5470 | handle_t *handle; |
5271 | 5471 | ||
5272 | handle = ext4_journal_start(inode, 3); | 5472 | handle = ext4_journal_start(inode, 3); |
@@ -5297,6 +5497,9 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr) | |||
5297 | goto err_out; | 5497 | goto err_out; |
5298 | } | 5498 | } |
5299 | } | 5499 | } |
5500 | /* ext4_truncate will clear the flag */ | ||
5501 | if ((EXT4_I(inode)->i_flags & EXT4_EOFBLOCKS_FL)) | ||
5502 | ext4_truncate(inode); | ||
5300 | } | 5503 | } |
5301 | 5504 | ||
5302 | rc = inode_setattr(inode, attr); | 5505 | rc = inode_setattr(inode, attr); |
@@ -5535,8 +5738,8 @@ static int ext4_expand_extra_isize(struct inode *inode, | |||
5535 | entry = IFIRST(header); | 5738 | entry = IFIRST(header); |
5536 | 5739 | ||
5537 | /* No extended attributes present */ | 5740 | /* No extended attributes present */ |
5538 | if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR) || | 5741 | if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) || |
5539 | header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) { | 5742 | header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) { |
5540 | memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0, | 5743 | memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0, |
5541 | new_extra_isize); | 5744 | new_extra_isize); |
5542 | EXT4_I(inode)->i_extra_isize = new_extra_isize; | 5745 | EXT4_I(inode)->i_extra_isize = new_extra_isize; |
@@ -5580,7 +5783,7 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode) | |||
5580 | err = ext4_reserve_inode_write(handle, inode, &iloc); | 5783 | err = ext4_reserve_inode_write(handle, inode, &iloc); |
5581 | if (ext4_handle_valid(handle) && | 5784 | if (ext4_handle_valid(handle) && |
5582 | EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize && | 5785 | EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize && |
5583 | !(EXT4_I(inode)->i_state & EXT4_STATE_NO_EXPAND)) { | 5786 | !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) { |
5584 | /* | 5787 | /* |
5585 | * We need extra buffer credits since we may write into EA block | 5788 | * We need extra buffer credits since we may write into EA block |
5586 | * with this same handle. If journal_extend fails, then it will | 5789 | * with this same handle. If journal_extend fails, then it will |
@@ -5594,10 +5797,11 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode) | |||
5594 | sbi->s_want_extra_isize, | 5797 | sbi->s_want_extra_isize, |
5595 | iloc, handle); | 5798 | iloc, handle); |
5596 | if (ret) { | 5799 | if (ret) { |
5597 | EXT4_I(inode)->i_state |= EXT4_STATE_NO_EXPAND; | 5800 | ext4_set_inode_state(inode, |
5801 | EXT4_STATE_NO_EXPAND); | ||
5598 | if (mnt_count != | 5802 | if (mnt_count != |
5599 | le16_to_cpu(sbi->s_es->s_mnt_count)) { | 5803 | le16_to_cpu(sbi->s_es->s_mnt_count)) { |
5600 | ext4_warning(inode->i_sb, __func__, | 5804 | ext4_warning(inode->i_sb, |
5601 | "Unable to expand inode %lu. Delete" | 5805 | "Unable to expand inode %lu. Delete" |
5602 | " some EAs or run e2fsck.", | 5806 | " some EAs or run e2fsck.", |
5603 | inode->i_ino); | 5807 | inode->i_ino); |
@@ -5619,7 +5823,7 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode) | |||
5619 | * i_size has been changed by generic_commit_write() and we thus need | 5823 | * i_size has been changed by generic_commit_write() and we thus need |
5620 | * to include the updated inode in the current transaction. | 5824 | * to include the updated inode in the current transaction. |
5621 | * | 5825 | * |
5622 | * Also, vfs_dq_alloc_block() will always dirty the inode when blocks | 5826 | * Also, dquot_alloc_block() will always dirty the inode when blocks |
5623 | * are allocated to the file. | 5827 | * are allocated to the file. |
5624 | * | 5828 | * |
5625 | * If the inode is marked synchronous, we don't honour that here - doing | 5829 | * If the inode is marked synchronous, we don't honour that here - doing |
@@ -5661,7 +5865,7 @@ static int ext4_pin_inode(handle_t *handle, struct inode *inode) | |||
5661 | err = jbd2_journal_get_write_access(handle, iloc.bh); | 5865 | err = jbd2_journal_get_write_access(handle, iloc.bh); |
5662 | if (!err) | 5866 | if (!err) |
5663 | err = ext4_handle_dirty_metadata(handle, | 5867 | err = ext4_handle_dirty_metadata(handle, |
5664 | inode, | 5868 | NULL, |
5665 | iloc.bh); | 5869 | iloc.bh); |
5666 | brelse(iloc.bh); | 5870 | brelse(iloc.bh); |
5667 | } | 5871 | } |
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index b63d193126db..016d0249294f 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c | |||
@@ -92,6 +92,15 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | |||
92 | flags &= ~EXT4_EXTENTS_FL; | 92 | flags &= ~EXT4_EXTENTS_FL; |
93 | } | 93 | } |
94 | 94 | ||
95 | if (flags & EXT4_EOFBLOCKS_FL) { | ||
96 | /* we don't support adding EOFBLOCKS flag */ | ||
97 | if (!(oldflags & EXT4_EOFBLOCKS_FL)) { | ||
98 | err = -EOPNOTSUPP; | ||
99 | goto flags_out; | ||
100 | } | ||
101 | } else if (oldflags & EXT4_EOFBLOCKS_FL) | ||
102 | ext4_truncate(inode); | ||
103 | |||
95 | handle = ext4_journal_start(inode, 1); | 104 | handle = ext4_journal_start(inode, 1); |
96 | if (IS_ERR(handle)) { | 105 | if (IS_ERR(handle)) { |
97 | err = PTR_ERR(handle); | 106 | err = PTR_ERR(handle); |
@@ -249,7 +258,8 @@ setversion_out: | |||
249 | if (me.moved_len > 0) | 258 | if (me.moved_len > 0) |
250 | file_remove_suid(donor_filp); | 259 | file_remove_suid(donor_filp); |
251 | 260 | ||
252 | if (copy_to_user((struct move_extent *)arg, &me, sizeof(me))) | 261 | if (copy_to_user((struct move_extent __user *)arg, |
262 | &me, sizeof(me))) | ||
253 | err = -EFAULT; | 263 | err = -EFAULT; |
254 | mext_out: | 264 | mext_out: |
255 | fput(donor_filp); | 265 | fput(donor_filp); |
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index d34afad3e137..54df209d2eed 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c | |||
@@ -69,7 +69,7 @@ | |||
69 | * | 69 | * |
70 | * pa_lstart -> the logical start block for this prealloc space | 70 | * pa_lstart -> the logical start block for this prealloc space |
71 | * pa_pstart -> the physical start block for this prealloc space | 71 | * pa_pstart -> the physical start block for this prealloc space |
72 | * pa_len -> lenght for this prealloc space | 72 | * pa_len -> length for this prealloc space |
73 | * pa_free -> free space available in this prealloc space | 73 | * pa_free -> free space available in this prealloc space |
74 | * | 74 | * |
75 | * The inode preallocation space is used looking at the _logical_ start | 75 | * The inode preallocation space is used looking at the _logical_ start |
@@ -441,10 +441,9 @@ static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b, | |||
441 | for (i = 0; i < count; i++) { | 441 | for (i = 0; i < count; i++) { |
442 | if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) { | 442 | if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) { |
443 | ext4_fsblk_t blocknr; | 443 | ext4_fsblk_t blocknr; |
444 | blocknr = e4b->bd_group * EXT4_BLOCKS_PER_GROUP(sb); | 444 | |
445 | blocknr = ext4_group_first_block_no(sb, e4b->bd_group); | ||
445 | blocknr += first + i; | 446 | blocknr += first + i; |
446 | blocknr += | ||
447 | le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block); | ||
448 | ext4_grp_locked_error(sb, e4b->bd_group, | 447 | ext4_grp_locked_error(sb, e4b->bd_group, |
449 | __func__, "double-free of inode" | 448 | __func__, "double-free of inode" |
450 | " %lu's block %llu(bit %u in group %u)", | 449 | " %lu's block %llu(bit %u in group %u)", |
@@ -1255,10 +1254,9 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b, | |||
1255 | 1254 | ||
1256 | if (!mb_test_bit(block, EXT4_MB_BITMAP(e4b))) { | 1255 | if (!mb_test_bit(block, EXT4_MB_BITMAP(e4b))) { |
1257 | ext4_fsblk_t blocknr; | 1256 | ext4_fsblk_t blocknr; |
1258 | blocknr = e4b->bd_group * EXT4_BLOCKS_PER_GROUP(sb); | 1257 | |
1258 | blocknr = ext4_group_first_block_no(sb, e4b->bd_group); | ||
1259 | blocknr += block; | 1259 | blocknr += block; |
1260 | blocknr += | ||
1261 | le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block); | ||
1262 | ext4_grp_locked_error(sb, e4b->bd_group, | 1260 | ext4_grp_locked_error(sb, e4b->bd_group, |
1263 | __func__, "double-free of inode" | 1261 | __func__, "double-free of inode" |
1264 | " %lu's block %llu(bit %u in group %u)", | 1262 | " %lu's block %llu(bit %u in group %u)", |
@@ -1631,7 +1629,6 @@ int ext4_mb_find_by_goal(struct ext4_allocation_context *ac, | |||
1631 | int max; | 1629 | int max; |
1632 | int err; | 1630 | int err; |
1633 | struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); | 1631 | struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); |
1634 | struct ext4_super_block *es = sbi->s_es; | ||
1635 | struct ext4_free_extent ex; | 1632 | struct ext4_free_extent ex; |
1636 | 1633 | ||
1637 | if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL)) | 1634 | if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL)) |
@@ -1648,8 +1645,8 @@ int ext4_mb_find_by_goal(struct ext4_allocation_context *ac, | |||
1648 | if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) { | 1645 | if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) { |
1649 | ext4_fsblk_t start; | 1646 | ext4_fsblk_t start; |
1650 | 1647 | ||
1651 | start = (e4b->bd_group * EXT4_BLOCKS_PER_GROUP(ac->ac_sb)) + | 1648 | start = ext4_group_first_block_no(ac->ac_sb, e4b->bd_group) + |
1652 | ex.fe_start + le32_to_cpu(es->s_first_data_block); | 1649 | ex.fe_start; |
1653 | /* use do_div to get remainder (would be 64-bit modulo) */ | 1650 | /* use do_div to get remainder (would be 64-bit modulo) */ |
1654 | if (do_div(start, sbi->s_stripe) == 0) { | 1651 | if (do_div(start, sbi->s_stripe) == 0) { |
1655 | ac->ac_found++; | 1652 | ac->ac_found++; |
@@ -1803,8 +1800,8 @@ void ext4_mb_scan_aligned(struct ext4_allocation_context *ac, | |||
1803 | BUG_ON(sbi->s_stripe == 0); | 1800 | BUG_ON(sbi->s_stripe == 0); |
1804 | 1801 | ||
1805 | /* find first stripe-aligned block in group */ | 1802 | /* find first stripe-aligned block in group */ |
1806 | first_group_block = e4b->bd_group * EXT4_BLOCKS_PER_GROUP(sb) | 1803 | first_group_block = ext4_group_first_block_no(sb, e4b->bd_group); |
1807 | + le32_to_cpu(sbi->s_es->s_first_data_block); | 1804 | |
1808 | a = first_group_block + sbi->s_stripe - 1; | 1805 | a = first_group_block + sbi->s_stripe - 1; |
1809 | do_div(a, sbi->s_stripe); | 1806 | do_div(a, sbi->s_stripe); |
1810 | i = (a * sbi->s_stripe) - first_group_block; | 1807 | i = (a * sbi->s_stripe) - first_group_block; |
@@ -2256,7 +2253,7 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, | |||
2256 | 2253 | ||
2257 | INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list); | 2254 | INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list); |
2258 | init_rwsem(&meta_group_info[i]->alloc_sem); | 2255 | init_rwsem(&meta_group_info[i]->alloc_sem); |
2259 | meta_group_info[i]->bb_free_root.rb_node = NULL; | 2256 | meta_group_info[i]->bb_free_root = RB_ROOT; |
2260 | 2257 | ||
2261 | #ifdef DOUBLE_CHECK | 2258 | #ifdef DOUBLE_CHECK |
2262 | { | 2259 | { |
@@ -2560,12 +2557,9 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn) | |||
2560 | ext4_unlock_group(sb, entry->group); | 2557 | ext4_unlock_group(sb, entry->group); |
2561 | if (test_opt(sb, DISCARD)) { | 2558 | if (test_opt(sb, DISCARD)) { |
2562 | ext4_fsblk_t discard_block; | 2559 | ext4_fsblk_t discard_block; |
2563 | struct ext4_super_block *es = EXT4_SB(sb)->s_es; | ||
2564 | 2560 | ||
2565 | discard_block = (ext4_fsblk_t)entry->group * | 2561 | discard_block = entry->start_blk + |
2566 | EXT4_BLOCKS_PER_GROUP(sb) | 2562 | ext4_group_first_block_no(sb, entry->group); |
2567 | + entry->start_blk | ||
2568 | + le32_to_cpu(es->s_first_data_block); | ||
2569 | trace_ext4_discard_blocks(sb, | 2563 | trace_ext4_discard_blocks(sb, |
2570 | (unsigned long long)discard_block, | 2564 | (unsigned long long)discard_block, |
2571 | entry->count); | 2565 | entry->count); |
@@ -2703,14 +2697,11 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, | |||
2703 | if (err) | 2697 | if (err) |
2704 | goto out_err; | 2698 | goto out_err; |
2705 | 2699 | ||
2706 | block = ac->ac_b_ex.fe_group * EXT4_BLOCKS_PER_GROUP(sb) | 2700 | block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); |
2707 | + ac->ac_b_ex.fe_start | ||
2708 | + le32_to_cpu(es->s_first_data_block); | ||
2709 | 2701 | ||
2710 | len = ac->ac_b_ex.fe_len; | 2702 | len = ac->ac_b_ex.fe_len; |
2711 | if (!ext4_data_block_valid(sbi, block, len)) { | 2703 | if (!ext4_data_block_valid(sbi, block, len)) { |
2712 | ext4_error(sb, __func__, | 2704 | ext4_error(sb, "Allocating blocks %llu-%llu which overlap " |
2713 | "Allocating blocks %llu-%llu which overlap " | ||
2714 | "fs metadata\n", block, block+len); | 2705 | "fs metadata\n", block, block+len); |
2715 | /* File system mounted not to panic on error | 2706 | /* File system mounted not to panic on error |
2716 | * Fix the bitmap and repeat the block allocation | 2707 | * Fix the bitmap and repeat the block allocation |
@@ -3161,9 +3152,7 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac) | |||
3161 | /* The max size of hash table is PREALLOC_TB_SIZE */ | 3152 | /* The max size of hash table is PREALLOC_TB_SIZE */ |
3162 | order = PREALLOC_TB_SIZE - 1; | 3153 | order = PREALLOC_TB_SIZE - 1; |
3163 | 3154 | ||
3164 | goal_block = ac->ac_g_ex.fe_group * EXT4_BLOCKS_PER_GROUP(ac->ac_sb) + | 3155 | goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex); |
3165 | ac->ac_g_ex.fe_start + | ||
3166 | le32_to_cpu(EXT4_SB(ac->ac_sb)->s_es->s_first_data_block); | ||
3167 | /* | 3156 | /* |
3168 | * search for the prealloc space that is having | 3157 | * search for the prealloc space that is having |
3169 | * minimal distance from the goal block. | 3158 | * minimal distance from the goal block. |
@@ -3526,8 +3515,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh, | |||
3526 | if (bit >= end) | 3515 | if (bit >= end) |
3527 | break; | 3516 | break; |
3528 | next = mb_find_next_bit(bitmap_bh->b_data, end, bit); | 3517 | next = mb_find_next_bit(bitmap_bh->b_data, end, bit); |
3529 | start = group * EXT4_BLOCKS_PER_GROUP(sb) + bit + | 3518 | start = ext4_group_first_block_no(sb, group) + bit; |
3530 | le32_to_cpu(sbi->s_es->s_first_data_block); | ||
3531 | mb_debug(1, " free preallocated %u/%u in group %u\n", | 3519 | mb_debug(1, " free preallocated %u/%u in group %u\n", |
3532 | (unsigned) start, (unsigned) next - bit, | 3520 | (unsigned) start, (unsigned) next - bit, |
3533 | (unsigned) group); | 3521 | (unsigned) group); |
@@ -3623,15 +3611,13 @@ ext4_mb_discard_group_preallocations(struct super_block *sb, | |||
3623 | 3611 | ||
3624 | bitmap_bh = ext4_read_block_bitmap(sb, group); | 3612 | bitmap_bh = ext4_read_block_bitmap(sb, group); |
3625 | if (bitmap_bh == NULL) { | 3613 | if (bitmap_bh == NULL) { |
3626 | ext4_error(sb, __func__, "Error in reading block " | 3614 | ext4_error(sb, "Error reading block bitmap for %u", group); |
3627 | "bitmap for %u", group); | ||
3628 | return 0; | 3615 | return 0; |
3629 | } | 3616 | } |
3630 | 3617 | ||
3631 | err = ext4_mb_load_buddy(sb, group, &e4b); | 3618 | err = ext4_mb_load_buddy(sb, group, &e4b); |
3632 | if (err) { | 3619 | if (err) { |
3633 | ext4_error(sb, __func__, "Error in loading buddy " | 3620 | ext4_error(sb, "Error loading buddy information for %u", group); |
3634 | "information for %u", group); | ||
3635 | put_bh(bitmap_bh); | 3621 | put_bh(bitmap_bh); |
3636 | return 0; | 3622 | return 0; |
3637 | } | 3623 | } |
@@ -3804,15 +3790,15 @@ repeat: | |||
3804 | 3790 | ||
3805 | err = ext4_mb_load_buddy(sb, group, &e4b); | 3791 | err = ext4_mb_load_buddy(sb, group, &e4b); |
3806 | if (err) { | 3792 | if (err) { |
3807 | ext4_error(sb, __func__, "Error in loading buddy " | 3793 | ext4_error(sb, "Error loading buddy information for %u", |
3808 | "information for %u", group); | 3794 | group); |
3809 | continue; | 3795 | continue; |
3810 | } | 3796 | } |
3811 | 3797 | ||
3812 | bitmap_bh = ext4_read_block_bitmap(sb, group); | 3798 | bitmap_bh = ext4_read_block_bitmap(sb, group); |
3813 | if (bitmap_bh == NULL) { | 3799 | if (bitmap_bh == NULL) { |
3814 | ext4_error(sb, __func__, "Error in reading block " | 3800 | ext4_error(sb, "Error reading block bitmap for %u", |
3815 | "bitmap for %u", group); | 3801 | group); |
3816 | ext4_mb_release_desc(&e4b); | 3802 | ext4_mb_release_desc(&e4b); |
3817 | continue; | 3803 | continue; |
3818 | } | 3804 | } |
@@ -3938,7 +3924,7 @@ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac) | |||
3938 | 3924 | ||
3939 | /* don't use group allocation for large files */ | 3925 | /* don't use group allocation for large files */ |
3940 | size = max(size, isize); | 3926 | size = max(size, isize); |
3941 | if (size >= sbi->s_mb_stream_request) { | 3927 | if (size > sbi->s_mb_stream_request) { |
3942 | ac->ac_flags |= EXT4_MB_STREAM_ALLOC; | 3928 | ac->ac_flags |= EXT4_MB_STREAM_ALLOC; |
3943 | return; | 3929 | return; |
3944 | } | 3930 | } |
@@ -4077,8 +4063,8 @@ ext4_mb_discard_lg_preallocations(struct super_block *sb, | |||
4077 | 4063 | ||
4078 | ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL); | 4064 | ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL); |
4079 | if (ext4_mb_load_buddy(sb, group, &e4b)) { | 4065 | if (ext4_mb_load_buddy(sb, group, &e4b)) { |
4080 | ext4_error(sb, __func__, "Error in loading buddy " | 4066 | ext4_error(sb, "Error loading buddy information for %u", |
4081 | "information for %u", group); | 4067 | group); |
4082 | continue; | 4068 | continue; |
4083 | } | 4069 | } |
4084 | ext4_lock_group(sb, group); | 4070 | ext4_lock_group(sb, group); |
@@ -4254,7 +4240,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, | |||
4254 | return 0; | 4240 | return 0; |
4255 | } | 4241 | } |
4256 | reserv_blks = ar->len; | 4242 | reserv_blks = ar->len; |
4257 | while (ar->len && vfs_dq_alloc_block(ar->inode, ar->len)) { | 4243 | while (ar->len && dquot_alloc_block(ar->inode, ar->len)) { |
4258 | ar->flags |= EXT4_MB_HINT_NOPREALLOC; | 4244 | ar->flags |= EXT4_MB_HINT_NOPREALLOC; |
4259 | ar->len--; | 4245 | ar->len--; |
4260 | } | 4246 | } |
@@ -4331,7 +4317,7 @@ out2: | |||
4331 | kmem_cache_free(ext4_ac_cachep, ac); | 4317 | kmem_cache_free(ext4_ac_cachep, ac); |
4332 | out1: | 4318 | out1: |
4333 | if (inquota && ar->len < inquota) | 4319 | if (inquota && ar->len < inquota) |
4334 | vfs_dq_free_block(ar->inode, inquota - ar->len); | 4320 | dquot_free_block(ar->inode, inquota - ar->len); |
4335 | out3: | 4321 | out3: |
4336 | if (!ar->len) { | 4322 | if (!ar->len) { |
4337 | if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag) | 4323 | if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag) |
@@ -4476,10 +4462,10 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode, | |||
4476 | 4462 | ||
4477 | sbi = EXT4_SB(sb); | 4463 | sbi = EXT4_SB(sb); |
4478 | es = EXT4_SB(sb)->s_es; | 4464 | es = EXT4_SB(sb)->s_es; |
4479 | if (!ext4_data_block_valid(sbi, block, count)) { | 4465 | if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) && |
4480 | ext4_error(sb, __func__, | 4466 | !ext4_data_block_valid(sbi, block, count)) { |
4481 | "Freeing blocks not in datazone - " | 4467 | ext4_error(sb, "Freeing blocks not in datazone - " |
4482 | "block = %llu, count = %lu", block, count); | 4468 | "block = %llu, count = %lu", block, count); |
4483 | goto error_return; | 4469 | goto error_return; |
4484 | } | 4470 | } |
4485 | 4471 | ||
@@ -4547,8 +4533,7 @@ do_more: | |||
4547 | in_range(block + count - 1, ext4_inode_table(sb, gdp), | 4533 | in_range(block + count - 1, ext4_inode_table(sb, gdp), |
4548 | EXT4_SB(sb)->s_itb_per_group)) { | 4534 | EXT4_SB(sb)->s_itb_per_group)) { |
4549 | 4535 | ||
4550 | ext4_error(sb, __func__, | 4536 | ext4_error(sb, "Freeing blocks in system zone - " |
4551 | "Freeing blocks in system zone - " | ||
4552 | "Block = %llu, count = %lu", block, count); | 4537 | "Block = %llu, count = %lu", block, count); |
4553 | /* err = 0. ext4_std_error should be a no op */ | 4538 | /* err = 0. ext4_std_error should be a no op */ |
4554 | goto error_return; | 4539 | goto error_return; |
@@ -4646,7 +4631,7 @@ do_more: | |||
4646 | sb->s_dirt = 1; | 4631 | sb->s_dirt = 1; |
4647 | error_return: | 4632 | error_return: |
4648 | if (freed) | 4633 | if (freed) |
4649 | vfs_dq_free_block(inode, freed); | 4634 | dquot_free_block(inode, freed); |
4650 | brelse(bitmap_bh); | 4635 | brelse(bitmap_bh); |
4651 | ext4_std_error(sb, err); | 4636 | ext4_std_error(sb, err); |
4652 | if (ac) | 4637 | if (ac) |
diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h index 436521cae456..b619322c76f0 100644 --- a/fs/ext4/mballoc.h +++ b/fs/ext4/mballoc.h | |||
@@ -220,16 +220,9 @@ struct ext4_buddy { | |||
220 | #define EXT4_MB_BITMAP(e4b) ((e4b)->bd_bitmap) | 220 | #define EXT4_MB_BITMAP(e4b) ((e4b)->bd_bitmap) |
221 | #define EXT4_MB_BUDDY(e4b) ((e4b)->bd_buddy) | 221 | #define EXT4_MB_BUDDY(e4b) ((e4b)->bd_buddy) |
222 | 222 | ||
223 | #define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1) | ||
224 | |||
225 | static inline ext4_fsblk_t ext4_grp_offs_to_block(struct super_block *sb, | 223 | static inline ext4_fsblk_t ext4_grp_offs_to_block(struct super_block *sb, |
226 | struct ext4_free_extent *fex) | 224 | struct ext4_free_extent *fex) |
227 | { | 225 | { |
228 | ext4_fsblk_t block; | 226 | return ext4_group_first_block_no(sb, fex->fe_group) + fex->fe_start; |
229 | |||
230 | block = (ext4_fsblk_t) fex->fe_group * EXT4_BLOCKS_PER_GROUP(sb) | ||
231 | + fex->fe_start | ||
232 | + le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block); | ||
233 | return block; | ||
234 | } | 227 | } |
235 | #endif | 228 | #endif |
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c index 81415814b00b..8b87bd0eac95 100644 --- a/fs/ext4/migrate.c +++ b/fs/ext4/migrate.c | |||
@@ -365,12 +365,12 @@ static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode, | |||
365 | * happened after we started the migrate. We need to | 365 | * happened after we started the migrate. We need to |
366 | * fail the migrate | 366 | * fail the migrate |
367 | */ | 367 | */ |
368 | if (!(EXT4_I(inode)->i_state & EXT4_STATE_EXT_MIGRATE)) { | 368 | if (!ext4_test_inode_state(inode, EXT4_STATE_EXT_MIGRATE)) { |
369 | retval = -EAGAIN; | 369 | retval = -EAGAIN; |
370 | up_write(&EXT4_I(inode)->i_data_sem); | 370 | up_write(&EXT4_I(inode)->i_data_sem); |
371 | goto err_out; | 371 | goto err_out; |
372 | } else | 372 | } else |
373 | EXT4_I(inode)->i_state &= ~EXT4_STATE_EXT_MIGRATE; | 373 | ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE); |
374 | /* | 374 | /* |
375 | * We have the extent map build with the tmp inode. | 375 | * We have the extent map build with the tmp inode. |
376 | * Now copy the i_data across | 376 | * Now copy the i_data across |
@@ -503,14 +503,10 @@ int ext4_ext_migrate(struct inode *inode) | |||
503 | } | 503 | } |
504 | i_size_write(tmp_inode, i_size_read(inode)); | 504 | i_size_write(tmp_inode, i_size_read(inode)); |
505 | /* | 505 | /* |
506 | * We don't want the inode to be reclaimed | 506 | * Set the i_nlink to zero so it will be deleted later |
507 | * if we got interrupted in between. We have | 507 | * when we drop inode reference. |
508 | * this tmp inode carrying reference to the | ||
509 | * data blocks of the original file. We set | ||
510 | * the i_nlink to zero at the last stage after | ||
511 | * switching the original file to extent format | ||
512 | */ | 508 | */ |
513 | tmp_inode->i_nlink = 1; | 509 | tmp_inode->i_nlink = 0; |
514 | 510 | ||
515 | ext4_ext_tree_init(handle, tmp_inode); | 511 | ext4_ext_tree_init(handle, tmp_inode); |
516 | ext4_orphan_add(handle, tmp_inode); | 512 | ext4_orphan_add(handle, tmp_inode); |
@@ -533,10 +529,20 @@ int ext4_ext_migrate(struct inode *inode) | |||
533 | * allocation. | 529 | * allocation. |
534 | */ | 530 | */ |
535 | down_read((&EXT4_I(inode)->i_data_sem)); | 531 | down_read((&EXT4_I(inode)->i_data_sem)); |
536 | EXT4_I(inode)->i_state |= EXT4_STATE_EXT_MIGRATE; | 532 | ext4_set_inode_state(inode, EXT4_STATE_EXT_MIGRATE); |
537 | up_read((&EXT4_I(inode)->i_data_sem)); | 533 | up_read((&EXT4_I(inode)->i_data_sem)); |
538 | 534 | ||
539 | handle = ext4_journal_start(inode, 1); | 535 | handle = ext4_journal_start(inode, 1); |
536 | if (IS_ERR(handle)) { | ||
537 | /* | ||
538 | * It is impossible to update on-disk structures without | ||
539 | * a handle, so just rollback in-core changes and live other | ||
540 | * work to orphan_list_cleanup() | ||
541 | */ | ||
542 | ext4_orphan_del(NULL, tmp_inode); | ||
543 | retval = PTR_ERR(handle); | ||
544 | goto out; | ||
545 | } | ||
540 | 546 | ||
541 | ei = EXT4_I(inode); | 547 | ei = EXT4_I(inode); |
542 | i_data = ei->i_data; | 548 | i_data = ei->i_data; |
@@ -618,15 +624,8 @@ err_out: | |||
618 | 624 | ||
619 | /* Reset the extent details */ | 625 | /* Reset the extent details */ |
620 | ext4_ext_tree_init(handle, tmp_inode); | 626 | ext4_ext_tree_init(handle, tmp_inode); |
621 | |||
622 | /* | ||
623 | * Set the i_nlink to zero so that | ||
624 | * generic_drop_inode really deletes the | ||
625 | * inode | ||
626 | */ | ||
627 | tmp_inode->i_nlink = 0; | ||
628 | |||
629 | ext4_journal_stop(handle); | 627 | ext4_journal_stop(handle); |
628 | out: | ||
630 | unlock_new_inode(tmp_inode); | 629 | unlock_new_inode(tmp_inode); |
631 | iput(tmp_inode); | 630 | iput(tmp_inode); |
632 | 631 | ||
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c index 82c415be87a4..aa5fe28d180f 100644 --- a/fs/ext4/move_extent.c +++ b/fs/ext4/move_extent.c | |||
@@ -152,12 +152,12 @@ mext_check_null_inode(struct inode *inode1, struct inode *inode2, | |||
152 | int ret = 0; | 152 | int ret = 0; |
153 | 153 | ||
154 | if (inode1 == NULL) { | 154 | if (inode1 == NULL) { |
155 | ext4_error(inode2->i_sb, function, | 155 | __ext4_error(inode2->i_sb, function, |
156 | "Both inodes should not be NULL: " | 156 | "Both inodes should not be NULL: " |
157 | "inode1 NULL inode2 %lu", inode2->i_ino); | 157 | "inode1 NULL inode2 %lu", inode2->i_ino); |
158 | ret = -EIO; | 158 | ret = -EIO; |
159 | } else if (inode2 == NULL) { | 159 | } else if (inode2 == NULL) { |
160 | ext4_error(inode1->i_sb, function, | 160 | __ext4_error(inode1->i_sb, function, |
161 | "Both inodes should not be NULL: " | 161 | "Both inodes should not be NULL: " |
162 | "inode1 %lu inode2 NULL", inode1->i_ino); | 162 | "inode1 %lu inode2 NULL", inode1->i_ino); |
163 | ret = -EIO; | 163 | ret = -EIO; |
@@ -252,6 +252,7 @@ mext_insert_across_blocks(handle_t *handle, struct inode *orig_inode, | |||
252 | } | 252 | } |
253 | 253 | ||
254 | o_start->ee_len = start_ext->ee_len; | 254 | o_start->ee_len = start_ext->ee_len; |
255 | eblock = le32_to_cpu(start_ext->ee_block); | ||
255 | new_flag = 1; | 256 | new_flag = 1; |
256 | 257 | ||
257 | } else if (start_ext->ee_len && new_ext->ee_len && | 258 | } else if (start_ext->ee_len && new_ext->ee_len && |
@@ -262,6 +263,7 @@ mext_insert_across_blocks(handle_t *handle, struct inode *orig_inode, | |||
262 | * orig |------------------------------| | 263 | * orig |------------------------------| |
263 | */ | 264 | */ |
264 | o_start->ee_len = start_ext->ee_len; | 265 | o_start->ee_len = start_ext->ee_len; |
266 | eblock = le32_to_cpu(start_ext->ee_block); | ||
265 | new_flag = 1; | 267 | new_flag = 1; |
266 | 268 | ||
267 | } else if (!start_ext->ee_len && new_ext->ee_len && | 269 | } else if (!start_ext->ee_len && new_ext->ee_len && |
@@ -475,7 +477,6 @@ mext_leaf_block(handle_t *handle, struct inode *orig_inode, | |||
475 | struct ext4_extent *oext, *o_start, *o_end, *prev_ext; | 477 | struct ext4_extent *oext, *o_start, *o_end, *prev_ext; |
476 | struct ext4_extent new_ext, start_ext, end_ext; | 478 | struct ext4_extent new_ext, start_ext, end_ext; |
477 | ext4_lblk_t new_ext_end; | 479 | ext4_lblk_t new_ext_end; |
478 | ext4_fsblk_t new_phys_end; | ||
479 | int oext_alen, new_ext_alen, end_ext_alen; | 480 | int oext_alen, new_ext_alen, end_ext_alen; |
480 | int depth = ext_depth(orig_inode); | 481 | int depth = ext_depth(orig_inode); |
481 | int ret; | 482 | int ret; |
@@ -489,7 +490,6 @@ mext_leaf_block(handle_t *handle, struct inode *orig_inode, | |||
489 | new_ext.ee_len = dext->ee_len; | 490 | new_ext.ee_len = dext->ee_len; |
490 | new_ext_alen = ext4_ext_get_actual_len(&new_ext); | 491 | new_ext_alen = ext4_ext_get_actual_len(&new_ext); |
491 | new_ext_end = le32_to_cpu(new_ext.ee_block) + new_ext_alen - 1; | 492 | new_ext_end = le32_to_cpu(new_ext.ee_block) + new_ext_alen - 1; |
492 | new_phys_end = ext_pblock(&new_ext) + new_ext_alen - 1; | ||
493 | 493 | ||
494 | /* | 494 | /* |
495 | * Case: original extent is first | 495 | * Case: original extent is first |
@@ -502,6 +502,7 @@ mext_leaf_block(handle_t *handle, struct inode *orig_inode, | |||
502 | le32_to_cpu(oext->ee_block) + oext_alen) { | 502 | le32_to_cpu(oext->ee_block) + oext_alen) { |
503 | start_ext.ee_len = cpu_to_le16(le32_to_cpu(new_ext.ee_block) - | 503 | start_ext.ee_len = cpu_to_le16(le32_to_cpu(new_ext.ee_block) - |
504 | le32_to_cpu(oext->ee_block)); | 504 | le32_to_cpu(oext->ee_block)); |
505 | start_ext.ee_block = oext->ee_block; | ||
505 | copy_extent_status(oext, &start_ext); | 506 | copy_extent_status(oext, &start_ext); |
506 | } else if (oext > EXT_FIRST_EXTENT(orig_path[depth].p_hdr)) { | 507 | } else if (oext > EXT_FIRST_EXTENT(orig_path[depth].p_hdr)) { |
507 | prev_ext = oext - 1; | 508 | prev_ext = oext - 1; |
@@ -515,6 +516,7 @@ mext_leaf_block(handle_t *handle, struct inode *orig_inode, | |||
515 | start_ext.ee_len = cpu_to_le16( | 516 | start_ext.ee_len = cpu_to_le16( |
516 | ext4_ext_get_actual_len(prev_ext) + | 517 | ext4_ext_get_actual_len(prev_ext) + |
517 | new_ext_alen); | 518 | new_ext_alen); |
519 | start_ext.ee_block = oext->ee_block; | ||
518 | copy_extent_status(prev_ext, &start_ext); | 520 | copy_extent_status(prev_ext, &start_ext); |
519 | new_ext.ee_len = 0; | 521 | new_ext.ee_len = 0; |
520 | } | 522 | } |
@@ -526,7 +528,7 @@ mext_leaf_block(handle_t *handle, struct inode *orig_inode, | |||
526 | * new_ext |-------| | 528 | * new_ext |-------| |
527 | */ | 529 | */ |
528 | if (le32_to_cpu(oext->ee_block) + oext_alen - 1 < new_ext_end) { | 530 | if (le32_to_cpu(oext->ee_block) + oext_alen - 1 < new_ext_end) { |
529 | ext4_error(orig_inode->i_sb, __func__, | 531 | ext4_error(orig_inode->i_sb, |
530 | "new_ext_end(%u) should be less than or equal to " | 532 | "new_ext_end(%u) should be less than or equal to " |
531 | "oext->ee_block(%u) + oext_alen(%d) - 1", | 533 | "oext->ee_block(%u) + oext_alen(%d) - 1", |
532 | new_ext_end, le32_to_cpu(oext->ee_block), | 534 | new_ext_end, le32_to_cpu(oext->ee_block), |
@@ -689,12 +691,12 @@ mext_replace_branches(handle_t *handle, struct inode *orig_inode, | |||
689 | while (1) { | 691 | while (1) { |
690 | /* The extent for donor must be found. */ | 692 | /* The extent for donor must be found. */ |
691 | if (!dext) { | 693 | if (!dext) { |
692 | ext4_error(donor_inode->i_sb, __func__, | 694 | ext4_error(donor_inode->i_sb, |
693 | "The extent for donor must be found"); | 695 | "The extent for donor must be found"); |
694 | *err = -EIO; | 696 | *err = -EIO; |
695 | goto out; | 697 | goto out; |
696 | } else if (donor_off != le32_to_cpu(tmp_dext.ee_block)) { | 698 | } else if (donor_off != le32_to_cpu(tmp_dext.ee_block)) { |
697 | ext4_error(donor_inode->i_sb, __func__, | 699 | ext4_error(donor_inode->i_sb, |
698 | "Donor offset(%u) and the first block of donor " | 700 | "Donor offset(%u) and the first block of donor " |
699 | "extent(%u) should be equal", | 701 | "extent(%u) should be equal", |
700 | donor_off, | 702 | donor_off, |
@@ -928,7 +930,7 @@ out2: | |||
928 | } | 930 | } |
929 | 931 | ||
930 | /** | 932 | /** |
931 | * mext_check_argumants - Check whether move extent can be done | 933 | * mext_check_arguments - Check whether move extent can be done |
932 | * | 934 | * |
933 | * @orig_inode: original inode | 935 | * @orig_inode: original inode |
934 | * @donor_inode: donor inode | 936 | * @donor_inode: donor inode |
@@ -949,14 +951,6 @@ mext_check_arguments(struct inode *orig_inode, | |||
949 | unsigned int blkbits = orig_inode->i_blkbits; | 951 | unsigned int blkbits = orig_inode->i_blkbits; |
950 | unsigned int blocksize = 1 << blkbits; | 952 | unsigned int blocksize = 1 << blkbits; |
951 | 953 | ||
952 | /* Regular file check */ | ||
953 | if (!S_ISREG(orig_inode->i_mode) || !S_ISREG(donor_inode->i_mode)) { | ||
954 | ext4_debug("ext4 move extent: The argument files should be " | ||
955 | "regular file [ino:orig %lu, donor %lu]\n", | ||
956 | orig_inode->i_ino, donor_inode->i_ino); | ||
957 | return -EINVAL; | ||
958 | } | ||
959 | |||
960 | if (donor_inode->i_mode & (S_ISUID|S_ISGID)) { | 954 | if (donor_inode->i_mode & (S_ISUID|S_ISGID)) { |
961 | ext4_debug("ext4 move extent: suid or sgid is set" | 955 | ext4_debug("ext4 move extent: suid or sgid is set" |
962 | " to donor file [ino:orig %lu, donor %lu]\n", | 956 | " to donor file [ino:orig %lu, donor %lu]\n", |
@@ -1204,6 +1198,14 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, | |||
1204 | return -EINVAL; | 1198 | return -EINVAL; |
1205 | } | 1199 | } |
1206 | 1200 | ||
1201 | /* Regular file check */ | ||
1202 | if (!S_ISREG(orig_inode->i_mode) || !S_ISREG(donor_inode->i_mode)) { | ||
1203 | ext4_debug("ext4 move extent: The argument files should be " | ||
1204 | "regular file [ino:orig %lu, donor %lu]\n", | ||
1205 | orig_inode->i_ino, donor_inode->i_ino); | ||
1206 | return -EINVAL; | ||
1207 | } | ||
1208 | |||
1207 | /* Protect orig and donor inodes against a truncate */ | 1209 | /* Protect orig and donor inodes against a truncate */ |
1208 | ret1 = mext_inode_double_lock(orig_inode, donor_inode); | 1210 | ret1 = mext_inode_double_lock(orig_inode, donor_inode); |
1209 | if (ret1 < 0) | 1211 | if (ret1 < 0) |
@@ -1351,7 +1353,7 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, | |||
1351 | if (ret1 < 0) | 1353 | if (ret1 < 0) |
1352 | break; | 1354 | break; |
1353 | if (*moved_len > len) { | 1355 | if (*moved_len > len) { |
1354 | ext4_error(orig_inode->i_sb, __func__, | 1356 | ext4_error(orig_inode->i_sb, |
1355 | "We replaced blocks too much! " | 1357 | "We replaced blocks too much! " |
1356 | "sum of replaced: %llu requested: %llu", | 1358 | "sum of replaced: %llu requested: %llu", |
1357 | *moved_len, len); | 1359 | *moved_len, len); |
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 17a17e10dd60..0c070fabd108 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c | |||
@@ -383,8 +383,7 @@ dx_probe(const struct qstr *d_name, struct inode *dir, | |||
383 | if (root->info.hash_version != DX_HASH_TEA && | 383 | if (root->info.hash_version != DX_HASH_TEA && |
384 | root->info.hash_version != DX_HASH_HALF_MD4 && | 384 | root->info.hash_version != DX_HASH_HALF_MD4 && |
385 | root->info.hash_version != DX_HASH_LEGACY) { | 385 | root->info.hash_version != DX_HASH_LEGACY) { |
386 | ext4_warning(dir->i_sb, __func__, | 386 | ext4_warning(dir->i_sb, "Unrecognised inode hash code %d", |
387 | "Unrecognised inode hash code %d", | ||
388 | root->info.hash_version); | 387 | root->info.hash_version); |
389 | brelse(bh); | 388 | brelse(bh); |
390 | *err = ERR_BAD_DX_DIR; | 389 | *err = ERR_BAD_DX_DIR; |
@@ -399,8 +398,7 @@ dx_probe(const struct qstr *d_name, struct inode *dir, | |||
399 | hash = hinfo->hash; | 398 | hash = hinfo->hash; |
400 | 399 | ||
401 | if (root->info.unused_flags & 1) { | 400 | if (root->info.unused_flags & 1) { |
402 | ext4_warning(dir->i_sb, __func__, | 401 | ext4_warning(dir->i_sb, "Unimplemented inode hash flags: %#06x", |
403 | "Unimplemented inode hash flags: %#06x", | ||
404 | root->info.unused_flags); | 402 | root->info.unused_flags); |
405 | brelse(bh); | 403 | brelse(bh); |
406 | *err = ERR_BAD_DX_DIR; | 404 | *err = ERR_BAD_DX_DIR; |
@@ -408,8 +406,7 @@ dx_probe(const struct qstr *d_name, struct inode *dir, | |||
408 | } | 406 | } |
409 | 407 | ||
410 | if ((indirect = root->info.indirect_levels) > 1) { | 408 | if ((indirect = root->info.indirect_levels) > 1) { |
411 | ext4_warning(dir->i_sb, __func__, | 409 | ext4_warning(dir->i_sb, "Unimplemented inode hash depth: %#06x", |
412 | "Unimplemented inode hash depth: %#06x", | ||
413 | root->info.indirect_levels); | 410 | root->info.indirect_levels); |
414 | brelse(bh); | 411 | brelse(bh); |
415 | *err = ERR_BAD_DX_DIR; | 412 | *err = ERR_BAD_DX_DIR; |
@@ -421,8 +418,7 @@ dx_probe(const struct qstr *d_name, struct inode *dir, | |||
421 | 418 | ||
422 | if (dx_get_limit(entries) != dx_root_limit(dir, | 419 | if (dx_get_limit(entries) != dx_root_limit(dir, |
423 | root->info.info_length)) { | 420 | root->info.info_length)) { |
424 | ext4_warning(dir->i_sb, __func__, | 421 | ext4_warning(dir->i_sb, "dx entry: limit != root limit"); |
425 | "dx entry: limit != root limit"); | ||
426 | brelse(bh); | 422 | brelse(bh); |
427 | *err = ERR_BAD_DX_DIR; | 423 | *err = ERR_BAD_DX_DIR; |
428 | goto fail; | 424 | goto fail; |
@@ -433,7 +429,7 @@ dx_probe(const struct qstr *d_name, struct inode *dir, | |||
433 | { | 429 | { |
434 | count = dx_get_count(entries); | 430 | count = dx_get_count(entries); |
435 | if (!count || count > dx_get_limit(entries)) { | 431 | if (!count || count > dx_get_limit(entries)) { |
436 | ext4_warning(dir->i_sb, __func__, | 432 | ext4_warning(dir->i_sb, |
437 | "dx entry: no count or count > limit"); | 433 | "dx entry: no count or count > limit"); |
438 | brelse(bh); | 434 | brelse(bh); |
439 | *err = ERR_BAD_DX_DIR; | 435 | *err = ERR_BAD_DX_DIR; |
@@ -478,7 +474,7 @@ dx_probe(const struct qstr *d_name, struct inode *dir, | |||
478 | goto fail2; | 474 | goto fail2; |
479 | at = entries = ((struct dx_node *) bh->b_data)->entries; | 475 | at = entries = ((struct dx_node *) bh->b_data)->entries; |
480 | if (dx_get_limit(entries) != dx_node_limit (dir)) { | 476 | if (dx_get_limit(entries) != dx_node_limit (dir)) { |
481 | ext4_warning(dir->i_sb, __func__, | 477 | ext4_warning(dir->i_sb, |
482 | "dx entry: limit != node limit"); | 478 | "dx entry: limit != node limit"); |
483 | brelse(bh); | 479 | brelse(bh); |
484 | *err = ERR_BAD_DX_DIR; | 480 | *err = ERR_BAD_DX_DIR; |
@@ -494,7 +490,7 @@ fail2: | |||
494 | } | 490 | } |
495 | fail: | 491 | fail: |
496 | if (*err == ERR_BAD_DX_DIR) | 492 | if (*err == ERR_BAD_DX_DIR) |
497 | ext4_warning(dir->i_sb, __func__, | 493 | ext4_warning(dir->i_sb, |
498 | "Corrupt dir inode %ld, running e2fsck is " | 494 | "Corrupt dir inode %ld, running e2fsck is " |
499 | "recommended.", dir->i_ino); | 495 | "recommended.", dir->i_ino); |
500 | return NULL; | 496 | return NULL; |
@@ -947,9 +943,8 @@ restart: | |||
947 | wait_on_buffer(bh); | 943 | wait_on_buffer(bh); |
948 | if (!buffer_uptodate(bh)) { | 944 | if (!buffer_uptodate(bh)) { |
949 | /* read error, skip block & hope for the best */ | 945 | /* read error, skip block & hope for the best */ |
950 | ext4_error(sb, __func__, "reading directory #%lu " | 946 | ext4_error(sb, "reading directory #%lu offset %lu", |
951 | "offset %lu", dir->i_ino, | 947 | dir->i_ino, (unsigned long)block); |
952 | (unsigned long)block); | ||
953 | brelse(bh); | 948 | brelse(bh); |
954 | goto next; | 949 | goto next; |
955 | } | 950 | } |
@@ -1041,7 +1036,7 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct q | |||
1041 | retval = ext4_htree_next_block(dir, hash, frame, | 1036 | retval = ext4_htree_next_block(dir, hash, frame, |
1042 | frames, NULL); | 1037 | frames, NULL); |
1043 | if (retval < 0) { | 1038 | if (retval < 0) { |
1044 | ext4_warning(sb, __func__, | 1039 | ext4_warning(sb, |
1045 | "error reading index page in directory #%lu", | 1040 | "error reading index page in directory #%lu", |
1046 | dir->i_ino); | 1041 | dir->i_ino); |
1047 | *err = retval; | 1042 | *err = retval; |
@@ -1071,14 +1066,13 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, stru | |||
1071 | __u32 ino = le32_to_cpu(de->inode); | 1066 | __u32 ino = le32_to_cpu(de->inode); |
1072 | brelse(bh); | 1067 | brelse(bh); |
1073 | if (!ext4_valid_inum(dir->i_sb, ino)) { | 1068 | if (!ext4_valid_inum(dir->i_sb, ino)) { |
1074 | ext4_error(dir->i_sb, "ext4_lookup", | 1069 | ext4_error(dir->i_sb, "bad inode number: %u", ino); |
1075 | "bad inode number: %u", ino); | ||
1076 | return ERR_PTR(-EIO); | 1070 | return ERR_PTR(-EIO); |
1077 | } | 1071 | } |
1078 | inode = ext4_iget(dir->i_sb, ino); | 1072 | inode = ext4_iget(dir->i_sb, ino); |
1079 | if (unlikely(IS_ERR(inode))) { | 1073 | if (unlikely(IS_ERR(inode))) { |
1080 | if (PTR_ERR(inode) == -ESTALE) { | 1074 | if (PTR_ERR(inode) == -ESTALE) { |
1081 | ext4_error(dir->i_sb, __func__, | 1075 | ext4_error(dir->i_sb, |
1082 | "deleted inode referenced: %u", | 1076 | "deleted inode referenced: %u", |
1083 | ino); | 1077 | ino); |
1084 | return ERR_PTR(-EIO); | 1078 | return ERR_PTR(-EIO); |
@@ -1110,7 +1104,7 @@ struct dentry *ext4_get_parent(struct dentry *child) | |||
1110 | brelse(bh); | 1104 | brelse(bh); |
1111 | 1105 | ||
1112 | if (!ext4_valid_inum(child->d_inode->i_sb, ino)) { | 1106 | if (!ext4_valid_inum(child->d_inode->i_sb, ino)) { |
1113 | ext4_error(child->d_inode->i_sb, "ext4_get_parent", | 1107 | ext4_error(child->d_inode->i_sb, |
1114 | "bad inode number: %u", ino); | 1108 | "bad inode number: %u", ino); |
1115 | return ERR_PTR(-EIO); | 1109 | return ERR_PTR(-EIO); |
1116 | } | 1110 | } |
@@ -1410,7 +1404,7 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry, | |||
1410 | de = (struct ext4_dir_entry_2 *)((char *)fde + | 1404 | de = (struct ext4_dir_entry_2 *)((char *)fde + |
1411 | ext4_rec_len_from_disk(fde->rec_len, blocksize)); | 1405 | ext4_rec_len_from_disk(fde->rec_len, blocksize)); |
1412 | if ((char *) de >= (((char *) root) + blocksize)) { | 1406 | if ((char *) de >= (((char *) root) + blocksize)) { |
1413 | ext4_error(dir->i_sb, __func__, | 1407 | ext4_error(dir->i_sb, |
1414 | "invalid rec_len for '..' in inode %lu", | 1408 | "invalid rec_len for '..' in inode %lu", |
1415 | dir->i_ino); | 1409 | dir->i_ino); |
1416 | brelse(bh); | 1410 | brelse(bh); |
@@ -1575,8 +1569,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry, | |||
1575 | 1569 | ||
1576 | if (levels && (dx_get_count(frames->entries) == | 1570 | if (levels && (dx_get_count(frames->entries) == |
1577 | dx_get_limit(frames->entries))) { | 1571 | dx_get_limit(frames->entries))) { |
1578 | ext4_warning(sb, __func__, | 1572 | ext4_warning(sb, "Directory index full!"); |
1579 | "Directory index full!"); | ||
1580 | err = -ENOSPC; | 1573 | err = -ENOSPC; |
1581 | goto cleanup; | 1574 | goto cleanup; |
1582 | } | 1575 | } |
@@ -1766,6 +1759,8 @@ static int ext4_create(struct inode *dir, struct dentry *dentry, int mode, | |||
1766 | struct inode *inode; | 1759 | struct inode *inode; |
1767 | int err, retries = 0; | 1760 | int err, retries = 0; |
1768 | 1761 | ||
1762 | dquot_initialize(dir); | ||
1763 | |||
1769 | retry: | 1764 | retry: |
1770 | handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + | 1765 | handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + |
1771 | EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 + | 1766 | EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 + |
@@ -1800,6 +1795,8 @@ static int ext4_mknod(struct inode *dir, struct dentry *dentry, | |||
1800 | if (!new_valid_dev(rdev)) | 1795 | if (!new_valid_dev(rdev)) |
1801 | return -EINVAL; | 1796 | return -EINVAL; |
1802 | 1797 | ||
1798 | dquot_initialize(dir); | ||
1799 | |||
1803 | retry: | 1800 | retry: |
1804 | handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + | 1801 | handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + |
1805 | EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 + | 1802 | EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 + |
@@ -1837,6 +1834,8 @@ static int ext4_mkdir(struct inode *dir, struct dentry *dentry, int mode) | |||
1837 | if (EXT4_DIR_LINK_MAX(dir)) | 1834 | if (EXT4_DIR_LINK_MAX(dir)) |
1838 | return -EMLINK; | 1835 | return -EMLINK; |
1839 | 1836 | ||
1837 | dquot_initialize(dir); | ||
1838 | |||
1840 | retry: | 1839 | retry: |
1841 | handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + | 1840 | handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + |
1842 | EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 + | 1841 | EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 + |
@@ -1916,11 +1915,11 @@ static int empty_dir(struct inode *inode) | |||
1916 | if (inode->i_size < EXT4_DIR_REC_LEN(1) + EXT4_DIR_REC_LEN(2) || | 1915 | if (inode->i_size < EXT4_DIR_REC_LEN(1) + EXT4_DIR_REC_LEN(2) || |
1917 | !(bh = ext4_bread(NULL, inode, 0, 0, &err))) { | 1916 | !(bh = ext4_bread(NULL, inode, 0, 0, &err))) { |
1918 | if (err) | 1917 | if (err) |
1919 | ext4_error(inode->i_sb, __func__, | 1918 | ext4_error(inode->i_sb, |
1920 | "error %d reading directory #%lu offset 0", | 1919 | "error %d reading directory #%lu offset 0", |
1921 | err, inode->i_ino); | 1920 | err, inode->i_ino); |
1922 | else | 1921 | else |
1923 | ext4_warning(inode->i_sb, __func__, | 1922 | ext4_warning(inode->i_sb, |
1924 | "bad directory (dir #%lu) - no data block", | 1923 | "bad directory (dir #%lu) - no data block", |
1925 | inode->i_ino); | 1924 | inode->i_ino); |
1926 | return 1; | 1925 | return 1; |
@@ -1931,7 +1930,7 @@ static int empty_dir(struct inode *inode) | |||
1931 | !le32_to_cpu(de1->inode) || | 1930 | !le32_to_cpu(de1->inode) || |
1932 | strcmp(".", de->name) || | 1931 | strcmp(".", de->name) || |
1933 | strcmp("..", de1->name)) { | 1932 | strcmp("..", de1->name)) { |
1934 | ext4_warning(inode->i_sb, "empty_dir", | 1933 | ext4_warning(inode->i_sb, |
1935 | "bad directory (dir #%lu) - no `.' or `..'", | 1934 | "bad directory (dir #%lu) - no `.' or `..'", |
1936 | inode->i_ino); | 1935 | inode->i_ino); |
1937 | brelse(bh); | 1936 | brelse(bh); |
@@ -1949,7 +1948,7 @@ static int empty_dir(struct inode *inode) | |||
1949 | offset >> EXT4_BLOCK_SIZE_BITS(sb), 0, &err); | 1948 | offset >> EXT4_BLOCK_SIZE_BITS(sb), 0, &err); |
1950 | if (!bh) { | 1949 | if (!bh) { |
1951 | if (err) | 1950 | if (err) |
1952 | ext4_error(sb, __func__, | 1951 | ext4_error(sb, |
1953 | "error %d reading directory" | 1952 | "error %d reading directory" |
1954 | " #%lu offset %u", | 1953 | " #%lu offset %u", |
1955 | err, inode->i_ino, offset); | 1954 | err, inode->i_ino, offset); |
@@ -2020,11 +2019,18 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode) | |||
2020 | err = ext4_reserve_inode_write(handle, inode, &iloc); | 2019 | err = ext4_reserve_inode_write(handle, inode, &iloc); |
2021 | if (err) | 2020 | if (err) |
2022 | goto out_unlock; | 2021 | goto out_unlock; |
2022 | /* | ||
2023 | * Due to previous errors inode may be already a part of on-disk | ||
2024 | * orphan list. If so skip on-disk list modification. | ||
2025 | */ | ||
2026 | if (NEXT_ORPHAN(inode) && NEXT_ORPHAN(inode) <= | ||
2027 | (le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))) | ||
2028 | goto mem_insert; | ||
2023 | 2029 | ||
2024 | /* Insert this inode at the head of the on-disk orphan list... */ | 2030 | /* Insert this inode at the head of the on-disk orphan list... */ |
2025 | NEXT_ORPHAN(inode) = le32_to_cpu(EXT4_SB(sb)->s_es->s_last_orphan); | 2031 | NEXT_ORPHAN(inode) = le32_to_cpu(EXT4_SB(sb)->s_es->s_last_orphan); |
2026 | EXT4_SB(sb)->s_es->s_last_orphan = cpu_to_le32(inode->i_ino); | 2032 | EXT4_SB(sb)->s_es->s_last_orphan = cpu_to_le32(inode->i_ino); |
2027 | err = ext4_handle_dirty_metadata(handle, inode, EXT4_SB(sb)->s_sbh); | 2033 | err = ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh); |
2028 | rc = ext4_mark_iloc_dirty(handle, inode, &iloc); | 2034 | rc = ext4_mark_iloc_dirty(handle, inode, &iloc); |
2029 | if (!err) | 2035 | if (!err) |
2030 | err = rc; | 2036 | err = rc; |
@@ -2037,6 +2043,7 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode) | |||
2037 | * | 2043 | * |
2038 | * This is safe: on error we're going to ignore the orphan list | 2044 | * This is safe: on error we're going to ignore the orphan list |
2039 | * anyway on the next recovery. */ | 2045 | * anyway on the next recovery. */ |
2046 | mem_insert: | ||
2040 | if (!err) | 2047 | if (!err) |
2041 | list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan); | 2048 | list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan); |
2042 | 2049 | ||
@@ -2096,7 +2103,7 @@ int ext4_orphan_del(handle_t *handle, struct inode *inode) | |||
2096 | if (err) | 2103 | if (err) |
2097 | goto out_brelse; | 2104 | goto out_brelse; |
2098 | sbi->s_es->s_last_orphan = cpu_to_le32(ino_next); | 2105 | sbi->s_es->s_last_orphan = cpu_to_le32(ino_next); |
2099 | err = ext4_handle_dirty_metadata(handle, inode, sbi->s_sbh); | 2106 | err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh); |
2100 | } else { | 2107 | } else { |
2101 | struct ext4_iloc iloc2; | 2108 | struct ext4_iloc iloc2; |
2102 | struct inode *i_prev = | 2109 | struct inode *i_prev = |
@@ -2136,7 +2143,9 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry) | |||
2136 | 2143 | ||
2137 | /* Initialize quotas before so that eventual writes go in | 2144 | /* Initialize quotas before so that eventual writes go in |
2138 | * separate transaction */ | 2145 | * separate transaction */ |
2139 | vfs_dq_init(dentry->d_inode); | 2146 | dquot_initialize(dir); |
2147 | dquot_initialize(dentry->d_inode); | ||
2148 | |||
2140 | handle = ext4_journal_start(dir, EXT4_DELETE_TRANS_BLOCKS(dir->i_sb)); | 2149 | handle = ext4_journal_start(dir, EXT4_DELETE_TRANS_BLOCKS(dir->i_sb)); |
2141 | if (IS_ERR(handle)) | 2150 | if (IS_ERR(handle)) |
2142 | return PTR_ERR(handle); | 2151 | return PTR_ERR(handle); |
@@ -2163,7 +2172,7 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry) | |||
2163 | if (retval) | 2172 | if (retval) |
2164 | goto end_rmdir; | 2173 | goto end_rmdir; |
2165 | if (!EXT4_DIR_LINK_EMPTY(inode)) | 2174 | if (!EXT4_DIR_LINK_EMPTY(inode)) |
2166 | ext4_warning(inode->i_sb, "ext4_rmdir", | 2175 | ext4_warning(inode->i_sb, |
2167 | "empty directory has too many links (%d)", | 2176 | "empty directory has too many links (%d)", |
2168 | inode->i_nlink); | 2177 | inode->i_nlink); |
2169 | inode->i_version++; | 2178 | inode->i_version++; |
@@ -2195,7 +2204,9 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry) | |||
2195 | 2204 | ||
2196 | /* Initialize quotas before so that eventual writes go | 2205 | /* Initialize quotas before so that eventual writes go |
2197 | * in separate transaction */ | 2206 | * in separate transaction */ |
2198 | vfs_dq_init(dentry->d_inode); | 2207 | dquot_initialize(dir); |
2208 | dquot_initialize(dentry->d_inode); | ||
2209 | |||
2199 | handle = ext4_journal_start(dir, EXT4_DELETE_TRANS_BLOCKS(dir->i_sb)); | 2210 | handle = ext4_journal_start(dir, EXT4_DELETE_TRANS_BLOCKS(dir->i_sb)); |
2200 | if (IS_ERR(handle)) | 2211 | if (IS_ERR(handle)) |
2201 | return PTR_ERR(handle); | 2212 | return PTR_ERR(handle); |
@@ -2215,7 +2226,7 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry) | |||
2215 | goto end_unlink; | 2226 | goto end_unlink; |
2216 | 2227 | ||
2217 | if (!inode->i_nlink) { | 2228 | if (!inode->i_nlink) { |
2218 | ext4_warning(inode->i_sb, "ext4_unlink", | 2229 | ext4_warning(inode->i_sb, |
2219 | "Deleting nonexistent file (%lu), %d", | 2230 | "Deleting nonexistent file (%lu), %d", |
2220 | inode->i_ino, inode->i_nlink); | 2231 | inode->i_ino, inode->i_nlink); |
2221 | inode->i_nlink = 1; | 2232 | inode->i_nlink = 1; |
@@ -2250,6 +2261,8 @@ static int ext4_symlink(struct inode *dir, | |||
2250 | if (l > dir->i_sb->s_blocksize) | 2261 | if (l > dir->i_sb->s_blocksize) |
2251 | return -ENAMETOOLONG; | 2262 | return -ENAMETOOLONG; |
2252 | 2263 | ||
2264 | dquot_initialize(dir); | ||
2265 | |||
2253 | retry: | 2266 | retry: |
2254 | handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + | 2267 | handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + |
2255 | EXT4_INDEX_EXTRA_TRANS_BLOCKS + 5 + | 2268 | EXT4_INDEX_EXTRA_TRANS_BLOCKS + 5 + |
@@ -2308,6 +2321,8 @@ static int ext4_link(struct dentry *old_dentry, | |||
2308 | if (inode->i_nlink >= EXT4_LINK_MAX) | 2321 | if (inode->i_nlink >= EXT4_LINK_MAX) |
2309 | return -EMLINK; | 2322 | return -EMLINK; |
2310 | 2323 | ||
2324 | dquot_initialize(dir); | ||
2325 | |||
2311 | /* | 2326 | /* |
2312 | * Return -ENOENT if we've raced with unlink and i_nlink is 0. Doing | 2327 | * Return -ENOENT if we've raced with unlink and i_nlink is 0. Doing |
2313 | * otherwise has the potential to corrupt the orphan inode list. | 2328 | * otherwise has the potential to corrupt the orphan inode list. |
@@ -2358,12 +2373,15 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
2358 | struct ext4_dir_entry_2 *old_de, *new_de; | 2373 | struct ext4_dir_entry_2 *old_de, *new_de; |
2359 | int retval, force_da_alloc = 0; | 2374 | int retval, force_da_alloc = 0; |
2360 | 2375 | ||
2376 | dquot_initialize(old_dir); | ||
2377 | dquot_initialize(new_dir); | ||
2378 | |||
2361 | old_bh = new_bh = dir_bh = NULL; | 2379 | old_bh = new_bh = dir_bh = NULL; |
2362 | 2380 | ||
2363 | /* Initialize quotas before so that eventual writes go | 2381 | /* Initialize quotas before so that eventual writes go |
2364 | * in separate transaction */ | 2382 | * in separate transaction */ |
2365 | if (new_dentry->d_inode) | 2383 | if (new_dentry->d_inode) |
2366 | vfs_dq_init(new_dentry->d_inode); | 2384 | dquot_initialize(new_dentry->d_inode); |
2367 | handle = ext4_journal_start(old_dir, 2 * | 2385 | handle = ext4_journal_start(old_dir, 2 * |
2368 | EXT4_DATA_TRANS_BLOCKS(old_dir->i_sb) + | 2386 | EXT4_DATA_TRANS_BLOCKS(old_dir->i_sb) + |
2369 | EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2); | 2387 | EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2); |
@@ -2462,7 +2480,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
2462 | } | 2480 | } |
2463 | } | 2481 | } |
2464 | if (retval) { | 2482 | if (retval) { |
2465 | ext4_warning(old_dir->i_sb, "ext4_rename", | 2483 | ext4_warning(old_dir->i_sb, |
2466 | "Deleting old file (%lu), %d, error=%d", | 2484 | "Deleting old file (%lu), %d, error=%d", |
2467 | old_dir->i_ino, old_dir->i_nlink, retval); | 2485 | old_dir->i_ino, old_dir->i_nlink, retval); |
2468 | } | 2486 | } |
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c index 3b2c5541d8a6..5692c48754a0 100644 --- a/fs/ext4/resize.c +++ b/fs/ext4/resize.c | |||
@@ -48,65 +48,54 @@ static int verify_group_input(struct super_block *sb, | |||
48 | 48 | ||
49 | ext4_get_group_no_and_offset(sb, start, NULL, &offset); | 49 | ext4_get_group_no_and_offset(sb, start, NULL, &offset); |
50 | if (group != sbi->s_groups_count) | 50 | if (group != sbi->s_groups_count) |
51 | ext4_warning(sb, __func__, | 51 | ext4_warning(sb, "Cannot add at group %u (only %u groups)", |
52 | "Cannot add at group %u (only %u groups)", | ||
53 | input->group, sbi->s_groups_count); | 52 | input->group, sbi->s_groups_count); |
54 | else if (offset != 0) | 53 | else if (offset != 0) |
55 | ext4_warning(sb, __func__, "Last group not full"); | 54 | ext4_warning(sb, "Last group not full"); |
56 | else if (input->reserved_blocks > input->blocks_count / 5) | 55 | else if (input->reserved_blocks > input->blocks_count / 5) |
57 | ext4_warning(sb, __func__, "Reserved blocks too high (%u)", | 56 | ext4_warning(sb, "Reserved blocks too high (%u)", |
58 | input->reserved_blocks); | 57 | input->reserved_blocks); |
59 | else if (free_blocks_count < 0) | 58 | else if (free_blocks_count < 0) |
60 | ext4_warning(sb, __func__, "Bad blocks count %u", | 59 | ext4_warning(sb, "Bad blocks count %u", |
61 | input->blocks_count); | 60 | input->blocks_count); |
62 | else if (!(bh = sb_bread(sb, end - 1))) | 61 | else if (!(bh = sb_bread(sb, end - 1))) |
63 | ext4_warning(sb, __func__, | 62 | ext4_warning(sb, "Cannot read last block (%llu)", |
64 | "Cannot read last block (%llu)", | ||
65 | end - 1); | 63 | end - 1); |
66 | else if (outside(input->block_bitmap, start, end)) | 64 | else if (outside(input->block_bitmap, start, end)) |
67 | ext4_warning(sb, __func__, | 65 | ext4_warning(sb, "Block bitmap not in group (block %llu)", |
68 | "Block bitmap not in group (block %llu)", | ||
69 | (unsigned long long)input->block_bitmap); | 66 | (unsigned long long)input->block_bitmap); |
70 | else if (outside(input->inode_bitmap, start, end)) | 67 | else if (outside(input->inode_bitmap, start, end)) |
71 | ext4_warning(sb, __func__, | 68 | ext4_warning(sb, "Inode bitmap not in group (block %llu)", |
72 | "Inode bitmap not in group (block %llu)", | ||
73 | (unsigned long long)input->inode_bitmap); | 69 | (unsigned long long)input->inode_bitmap); |
74 | else if (outside(input->inode_table, start, end) || | 70 | else if (outside(input->inode_table, start, end) || |
75 | outside(itend - 1, start, end)) | 71 | outside(itend - 1, start, end)) |
76 | ext4_warning(sb, __func__, | 72 | ext4_warning(sb, "Inode table not in group (blocks %llu-%llu)", |
77 | "Inode table not in group (blocks %llu-%llu)", | ||
78 | (unsigned long long)input->inode_table, itend - 1); | 73 | (unsigned long long)input->inode_table, itend - 1); |
79 | else if (input->inode_bitmap == input->block_bitmap) | 74 | else if (input->inode_bitmap == input->block_bitmap) |
80 | ext4_warning(sb, __func__, | 75 | ext4_warning(sb, "Block bitmap same as inode bitmap (%llu)", |
81 | "Block bitmap same as inode bitmap (%llu)", | ||
82 | (unsigned long long)input->block_bitmap); | 76 | (unsigned long long)input->block_bitmap); |
83 | else if (inside(input->block_bitmap, input->inode_table, itend)) | 77 | else if (inside(input->block_bitmap, input->inode_table, itend)) |
84 | ext4_warning(sb, __func__, | 78 | ext4_warning(sb, "Block bitmap (%llu) in inode table " |
85 | "Block bitmap (%llu) in inode table (%llu-%llu)", | 79 | "(%llu-%llu)", |
86 | (unsigned long long)input->block_bitmap, | 80 | (unsigned long long)input->block_bitmap, |
87 | (unsigned long long)input->inode_table, itend - 1); | 81 | (unsigned long long)input->inode_table, itend - 1); |
88 | else if (inside(input->inode_bitmap, input->inode_table, itend)) | 82 | else if (inside(input->inode_bitmap, input->inode_table, itend)) |
89 | ext4_warning(sb, __func__, | 83 | ext4_warning(sb, "Inode bitmap (%llu) in inode table " |
90 | "Inode bitmap (%llu) in inode table (%llu-%llu)", | 84 | "(%llu-%llu)", |
91 | (unsigned long long)input->inode_bitmap, | 85 | (unsigned long long)input->inode_bitmap, |
92 | (unsigned long long)input->inode_table, itend - 1); | 86 | (unsigned long long)input->inode_table, itend - 1); |
93 | else if (inside(input->block_bitmap, start, metaend)) | 87 | else if (inside(input->block_bitmap, start, metaend)) |
94 | ext4_warning(sb, __func__, | 88 | ext4_warning(sb, "Block bitmap (%llu) in GDT table (%llu-%llu)", |
95 | "Block bitmap (%llu) in GDT table" | ||
96 | " (%llu-%llu)", | ||
97 | (unsigned long long)input->block_bitmap, | 89 | (unsigned long long)input->block_bitmap, |
98 | start, metaend - 1); | 90 | start, metaend - 1); |
99 | else if (inside(input->inode_bitmap, start, metaend)) | 91 | else if (inside(input->inode_bitmap, start, metaend)) |
100 | ext4_warning(sb, __func__, | 92 | ext4_warning(sb, "Inode bitmap (%llu) in GDT table (%llu-%llu)", |
101 | "Inode bitmap (%llu) in GDT table" | ||
102 | " (%llu-%llu)", | ||
103 | (unsigned long long)input->inode_bitmap, | 93 | (unsigned long long)input->inode_bitmap, |
104 | start, metaend - 1); | 94 | start, metaend - 1); |
105 | else if (inside(input->inode_table, start, metaend) || | 95 | else if (inside(input->inode_table, start, metaend) || |
106 | inside(itend - 1, start, metaend)) | 96 | inside(itend - 1, start, metaend)) |
107 | ext4_warning(sb, __func__, | 97 | ext4_warning(sb, "Inode table (%llu-%llu) overlaps GDT table " |
108 | "Inode table (%llu-%llu) overlaps" | 98 | "(%llu-%llu)", |
109 | "GDT table (%llu-%llu)", | ||
110 | (unsigned long long)input->inode_table, | 99 | (unsigned long long)input->inode_table, |
111 | itend - 1, start, metaend - 1); | 100 | itend - 1, start, metaend - 1); |
112 | else | 101 | else |
@@ -364,8 +353,7 @@ static int verify_reserved_gdb(struct super_block *sb, | |||
364 | while ((grp = ext4_list_backups(sb, &three, &five, &seven)) < end) { | 353 | while ((grp = ext4_list_backups(sb, &three, &five, &seven)) < end) { |
365 | if (le32_to_cpu(*p++) != | 354 | if (le32_to_cpu(*p++) != |
366 | grp * EXT4_BLOCKS_PER_GROUP(sb) + blk){ | 355 | grp * EXT4_BLOCKS_PER_GROUP(sb) + blk){ |
367 | ext4_warning(sb, __func__, | 356 | ext4_warning(sb, "reserved GDT %llu" |
368 | "reserved GDT %llu" | ||
369 | " missing grp %d (%llu)", | 357 | " missing grp %d (%llu)", |
370 | blk, grp, | 358 | blk, grp, |
371 | grp * | 359 | grp * |
@@ -420,8 +408,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode, | |||
420 | */ | 408 | */ |
421 | if (EXT4_SB(sb)->s_sbh->b_blocknr != | 409 | if (EXT4_SB(sb)->s_sbh->b_blocknr != |
422 | le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) { | 410 | le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) { |
423 | ext4_warning(sb, __func__, | 411 | ext4_warning(sb, "won't resize using backup superblock at %llu", |
424 | "won't resize using backup superblock at %llu", | ||
425 | (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr); | 412 | (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr); |
426 | return -EPERM; | 413 | return -EPERM; |
427 | } | 414 | } |
@@ -444,8 +431,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode, | |||
444 | 431 | ||
445 | data = (__le32 *)dind->b_data; | 432 | data = (__le32 *)dind->b_data; |
446 | if (le32_to_cpu(data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)]) != gdblock) { | 433 | if (le32_to_cpu(data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)]) != gdblock) { |
447 | ext4_warning(sb, __func__, | 434 | ext4_warning(sb, "new group %u GDT block %llu not reserved", |
448 | "new group %u GDT block %llu not reserved", | ||
449 | input->group, gdblock); | 435 | input->group, gdblock); |
450 | err = -EINVAL; | 436 | err = -EINVAL; |
451 | goto exit_dind; | 437 | goto exit_dind; |
@@ -468,7 +454,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode, | |||
468 | GFP_NOFS); | 454 | GFP_NOFS); |
469 | if (!n_group_desc) { | 455 | if (!n_group_desc) { |
470 | err = -ENOMEM; | 456 | err = -ENOMEM; |
471 | ext4_warning(sb, __func__, | 457 | ext4_warning(sb, |
472 | "not enough memory for %lu groups", gdb_num + 1); | 458 | "not enough memory for %lu groups", gdb_num + 1); |
473 | goto exit_inode; | 459 | goto exit_inode; |
474 | } | 460 | } |
@@ -567,8 +553,7 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode, | |||
567 | /* Get each reserved primary GDT block and verify it holds backups */ | 553 | /* Get each reserved primary GDT block and verify it holds backups */ |
568 | for (res = 0; res < reserved_gdb; res++, blk++) { | 554 | for (res = 0; res < reserved_gdb; res++, blk++) { |
569 | if (le32_to_cpu(*data) != blk) { | 555 | if (le32_to_cpu(*data) != blk) { |
570 | ext4_warning(sb, __func__, | 556 | ext4_warning(sb, "reserved block %llu" |
571 | "reserved block %llu" | ||
572 | " not at offset %ld", | 557 | " not at offset %ld", |
573 | blk, | 558 | blk, |
574 | (long)(data - (__le32 *)dind->b_data)); | 559 | (long)(data - (__le32 *)dind->b_data)); |
@@ -713,8 +698,7 @@ static void update_backups(struct super_block *sb, | |||
713 | */ | 698 | */ |
714 | exit_err: | 699 | exit_err: |
715 | if (err) { | 700 | if (err) { |
716 | ext4_warning(sb, __func__, | 701 | ext4_warning(sb, "can't update backup for group %u (err %d), " |
717 | "can't update backup for group %u (err %d), " | ||
718 | "forcing fsck on next reboot", group, err); | 702 | "forcing fsck on next reboot", group, err); |
719 | sbi->s_mount_state &= ~EXT4_VALID_FS; | 703 | sbi->s_mount_state &= ~EXT4_VALID_FS; |
720 | sbi->s_es->s_state &= cpu_to_le16(~EXT4_VALID_FS); | 704 | sbi->s_es->s_state &= cpu_to_le16(~EXT4_VALID_FS); |
@@ -753,20 +737,19 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input) | |||
753 | 737 | ||
754 | if (gdb_off == 0 && !EXT4_HAS_RO_COMPAT_FEATURE(sb, | 738 | if (gdb_off == 0 && !EXT4_HAS_RO_COMPAT_FEATURE(sb, |
755 | EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER)) { | 739 | EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER)) { |
756 | ext4_warning(sb, __func__, | 740 | ext4_warning(sb, "Can't resize non-sparse filesystem further"); |
757 | "Can't resize non-sparse filesystem further"); | ||
758 | return -EPERM; | 741 | return -EPERM; |
759 | } | 742 | } |
760 | 743 | ||
761 | if (ext4_blocks_count(es) + input->blocks_count < | 744 | if (ext4_blocks_count(es) + input->blocks_count < |
762 | ext4_blocks_count(es)) { | 745 | ext4_blocks_count(es)) { |
763 | ext4_warning(sb, __func__, "blocks_count overflow"); | 746 | ext4_warning(sb, "blocks_count overflow"); |
764 | return -EINVAL; | 747 | return -EINVAL; |
765 | } | 748 | } |
766 | 749 | ||
767 | if (le32_to_cpu(es->s_inodes_count) + EXT4_INODES_PER_GROUP(sb) < | 750 | if (le32_to_cpu(es->s_inodes_count) + EXT4_INODES_PER_GROUP(sb) < |
768 | le32_to_cpu(es->s_inodes_count)) { | 751 | le32_to_cpu(es->s_inodes_count)) { |
769 | ext4_warning(sb, __func__, "inodes_count overflow"); | 752 | ext4_warning(sb, "inodes_count overflow"); |
770 | return -EINVAL; | 753 | return -EINVAL; |
771 | } | 754 | } |
772 | 755 | ||
@@ -774,14 +757,13 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input) | |||
774 | if (!EXT4_HAS_COMPAT_FEATURE(sb, | 757 | if (!EXT4_HAS_COMPAT_FEATURE(sb, |
775 | EXT4_FEATURE_COMPAT_RESIZE_INODE) | 758 | EXT4_FEATURE_COMPAT_RESIZE_INODE) |
776 | || !le16_to_cpu(es->s_reserved_gdt_blocks)) { | 759 | || !le16_to_cpu(es->s_reserved_gdt_blocks)) { |
777 | ext4_warning(sb, __func__, | 760 | ext4_warning(sb, |
778 | "No reserved GDT blocks, can't resize"); | 761 | "No reserved GDT blocks, can't resize"); |
779 | return -EPERM; | 762 | return -EPERM; |
780 | } | 763 | } |
781 | inode = ext4_iget(sb, EXT4_RESIZE_INO); | 764 | inode = ext4_iget(sb, EXT4_RESIZE_INO); |
782 | if (IS_ERR(inode)) { | 765 | if (IS_ERR(inode)) { |
783 | ext4_warning(sb, __func__, | 766 | ext4_warning(sb, "Error opening resize inode"); |
784 | "Error opening resize inode"); | ||
785 | return PTR_ERR(inode); | 767 | return PTR_ERR(inode); |
786 | } | 768 | } |
787 | } | 769 | } |
@@ -810,8 +792,7 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input) | |||
810 | 792 | ||
811 | mutex_lock(&sbi->s_resize_lock); | 793 | mutex_lock(&sbi->s_resize_lock); |
812 | if (input->group != sbi->s_groups_count) { | 794 | if (input->group != sbi->s_groups_count) { |
813 | ext4_warning(sb, __func__, | 795 | ext4_warning(sb, "multiple resizers run on filesystem!"); |
814 | "multiple resizers run on filesystem!"); | ||
815 | err = -EBUSY; | 796 | err = -EBUSY; |
816 | goto exit_journal; | 797 | goto exit_journal; |
817 | } | 798 | } |
@@ -997,13 +978,12 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es, | |||
997 | " too large to resize to %llu blocks safely\n", | 978 | " too large to resize to %llu blocks safely\n", |
998 | sb->s_id, n_blocks_count); | 979 | sb->s_id, n_blocks_count); |
999 | if (sizeof(sector_t) < 8) | 980 | if (sizeof(sector_t) < 8) |
1000 | ext4_warning(sb, __func__, "CONFIG_LBDAF not enabled"); | 981 | ext4_warning(sb, "CONFIG_LBDAF not enabled"); |
1001 | return -EINVAL; | 982 | return -EINVAL; |
1002 | } | 983 | } |
1003 | 984 | ||
1004 | if (n_blocks_count < o_blocks_count) { | 985 | if (n_blocks_count < o_blocks_count) { |
1005 | ext4_warning(sb, __func__, | 986 | ext4_warning(sb, "can't shrink FS - resize aborted"); |
1006 | "can't shrink FS - resize aborted"); | ||
1007 | return -EBUSY; | 987 | return -EBUSY; |
1008 | } | 988 | } |
1009 | 989 | ||
@@ -1011,15 +991,14 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es, | |||
1011 | ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last); | 991 | ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last); |
1012 | 992 | ||
1013 | if (last == 0) { | 993 | if (last == 0) { |
1014 | ext4_warning(sb, __func__, | 994 | ext4_warning(sb, "need to use ext2online to resize further"); |
1015 | "need to use ext2online to resize further"); | ||
1016 | return -EPERM; | 995 | return -EPERM; |
1017 | } | 996 | } |
1018 | 997 | ||
1019 | add = EXT4_BLOCKS_PER_GROUP(sb) - last; | 998 | add = EXT4_BLOCKS_PER_GROUP(sb) - last; |
1020 | 999 | ||
1021 | if (o_blocks_count + add < o_blocks_count) { | 1000 | if (o_blocks_count + add < o_blocks_count) { |
1022 | ext4_warning(sb, __func__, "blocks_count overflow"); | 1001 | ext4_warning(sb, "blocks_count overflow"); |
1023 | return -EINVAL; | 1002 | return -EINVAL; |
1024 | } | 1003 | } |
1025 | 1004 | ||
@@ -1027,16 +1006,13 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es, | |||
1027 | add = n_blocks_count - o_blocks_count; | 1006 | add = n_blocks_count - o_blocks_count; |
1028 | 1007 | ||
1029 | if (o_blocks_count + add < n_blocks_count) | 1008 | if (o_blocks_count + add < n_blocks_count) |
1030 | ext4_warning(sb, __func__, | 1009 | ext4_warning(sb, "will only finish group (%llu blocks, %u new)", |
1031 | "will only finish group (%llu" | ||
1032 | " blocks, %u new)", | ||
1033 | o_blocks_count + add, add); | 1010 | o_blocks_count + add, add); |
1034 | 1011 | ||
1035 | /* See if the device is actually as big as what was requested */ | 1012 | /* See if the device is actually as big as what was requested */ |
1036 | bh = sb_bread(sb, o_blocks_count + add - 1); | 1013 | bh = sb_bread(sb, o_blocks_count + add - 1); |
1037 | if (!bh) { | 1014 | if (!bh) { |
1038 | ext4_warning(sb, __func__, | 1015 | ext4_warning(sb, "can't read last block, resize aborted"); |
1039 | "can't read last block, resize aborted"); | ||
1040 | return -ENOSPC; | 1016 | return -ENOSPC; |
1041 | } | 1017 | } |
1042 | brelse(bh); | 1018 | brelse(bh); |
@@ -1047,14 +1023,13 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es, | |||
1047 | handle = ext4_journal_start_sb(sb, 3); | 1023 | handle = ext4_journal_start_sb(sb, 3); |
1048 | if (IS_ERR(handle)) { | 1024 | if (IS_ERR(handle)) { |
1049 | err = PTR_ERR(handle); | 1025 | err = PTR_ERR(handle); |
1050 | ext4_warning(sb, __func__, "error %d on journal start", err); | 1026 | ext4_warning(sb, "error %d on journal start", err); |
1051 | goto exit_put; | 1027 | goto exit_put; |
1052 | } | 1028 | } |
1053 | 1029 | ||
1054 | mutex_lock(&EXT4_SB(sb)->s_resize_lock); | 1030 | mutex_lock(&EXT4_SB(sb)->s_resize_lock); |
1055 | if (o_blocks_count != ext4_blocks_count(es)) { | 1031 | if (o_blocks_count != ext4_blocks_count(es)) { |
1056 | ext4_warning(sb, __func__, | 1032 | ext4_warning(sb, "multiple resizers run on filesystem!"); |
1057 | "multiple resizers run on filesystem!"); | ||
1058 | mutex_unlock(&EXT4_SB(sb)->s_resize_lock); | 1033 | mutex_unlock(&EXT4_SB(sb)->s_resize_lock); |
1059 | ext4_journal_stop(handle); | 1034 | ext4_journal_stop(handle); |
1060 | err = -EBUSY; | 1035 | err = -EBUSY; |
@@ -1063,8 +1038,7 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es, | |||
1063 | 1038 | ||
1064 | if ((err = ext4_journal_get_write_access(handle, | 1039 | if ((err = ext4_journal_get_write_access(handle, |
1065 | EXT4_SB(sb)->s_sbh))) { | 1040 | EXT4_SB(sb)->s_sbh))) { |
1066 | ext4_warning(sb, __func__, | 1041 | ext4_warning(sb, "error %d on journal write access", err); |
1067 | "error %d on journal write access", err); | ||
1068 | mutex_unlock(&EXT4_SB(sb)->s_resize_lock); | 1042 | mutex_unlock(&EXT4_SB(sb)->s_resize_lock); |
1069 | ext4_journal_stop(handle); | 1043 | ext4_journal_stop(handle); |
1070 | goto exit_put; | 1044 | goto exit_put; |
diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 735c20d5fd56..ba191dae8730 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c | |||
@@ -302,7 +302,7 @@ void ext4_journal_abort_handle(const char *caller, const char *err_fn, | |||
302 | * write out the superblock safely. | 302 | * write out the superblock safely. |
303 | * | 303 | * |
304 | * We'll just use the jbd2_journal_abort() error code to record an error in | 304 | * We'll just use the jbd2_journal_abort() error code to record an error in |
305 | * the journal instead. On recovery, the journal will compain about | 305 | * the journal instead. On recovery, the journal will complain about |
306 | * that error until we've noted it down and cleared it. | 306 | * that error until we've noted it down and cleared it. |
307 | */ | 307 | */ |
308 | 308 | ||
@@ -333,7 +333,7 @@ static void ext4_handle_error(struct super_block *sb) | |||
333 | sb->s_id); | 333 | sb->s_id); |
334 | } | 334 | } |
335 | 335 | ||
336 | void ext4_error(struct super_block *sb, const char *function, | 336 | void __ext4_error(struct super_block *sb, const char *function, |
337 | const char *fmt, ...) | 337 | const char *fmt, ...) |
338 | { | 338 | { |
339 | va_list args; | 339 | va_list args; |
@@ -347,6 +347,42 @@ void ext4_error(struct super_block *sb, const char *function, | |||
347 | ext4_handle_error(sb); | 347 | ext4_handle_error(sb); |
348 | } | 348 | } |
349 | 349 | ||
350 | void ext4_error_inode(const char *function, struct inode *inode, | ||
351 | const char *fmt, ...) | ||
352 | { | ||
353 | va_list args; | ||
354 | |||
355 | va_start(args, fmt); | ||
356 | printk(KERN_CRIT "EXT4-fs error (device %s): %s: inode #%lu: (comm %s) ", | ||
357 | inode->i_sb->s_id, function, inode->i_ino, current->comm); | ||
358 | vprintk(fmt, args); | ||
359 | printk("\n"); | ||
360 | va_end(args); | ||
361 | |||
362 | ext4_handle_error(inode->i_sb); | ||
363 | } | ||
364 | |||
365 | void ext4_error_file(const char *function, struct file *file, | ||
366 | const char *fmt, ...) | ||
367 | { | ||
368 | va_list args; | ||
369 | struct inode *inode = file->f_dentry->d_inode; | ||
370 | char pathname[80], *path; | ||
371 | |||
372 | va_start(args, fmt); | ||
373 | path = d_path(&(file->f_path), pathname, sizeof(pathname)); | ||
374 | if (!path) | ||
375 | path = "(unknown)"; | ||
376 | printk(KERN_CRIT | ||
377 | "EXT4-fs error (device %s): %s: inode #%lu (comm %s path %s): ", | ||
378 | inode->i_sb->s_id, function, inode->i_ino, current->comm, path); | ||
379 | vprintk(fmt, args); | ||
380 | printk("\n"); | ||
381 | va_end(args); | ||
382 | |||
383 | ext4_handle_error(inode->i_sb); | ||
384 | } | ||
385 | |||
350 | static const char *ext4_decode_error(struct super_block *sb, int errno, | 386 | static const char *ext4_decode_error(struct super_block *sb, int errno, |
351 | char nbuf[16]) | 387 | char nbuf[16]) |
352 | { | 388 | { |
@@ -450,7 +486,7 @@ void ext4_msg (struct super_block * sb, const char *prefix, | |||
450 | va_end(args); | 486 | va_end(args); |
451 | } | 487 | } |
452 | 488 | ||
453 | void ext4_warning(struct super_block *sb, const char *function, | 489 | void __ext4_warning(struct super_block *sb, const char *function, |
454 | const char *fmt, ...) | 490 | const char *fmt, ...) |
455 | { | 491 | { |
456 | va_list args; | 492 | va_list args; |
@@ -507,7 +543,7 @@ void ext4_update_dynamic_rev(struct super_block *sb) | |||
507 | if (le32_to_cpu(es->s_rev_level) > EXT4_GOOD_OLD_REV) | 543 | if (le32_to_cpu(es->s_rev_level) > EXT4_GOOD_OLD_REV) |
508 | return; | 544 | return; |
509 | 545 | ||
510 | ext4_warning(sb, __func__, | 546 | ext4_warning(sb, |
511 | "updating to rev %d because of new feature flag, " | 547 | "updating to rev %d because of new feature flag, " |
512 | "running e2fsck is recommended", | 548 | "running e2fsck is recommended", |
513 | EXT4_DYNAMIC_REV); | 549 | EXT4_DYNAMIC_REV); |
@@ -708,7 +744,8 @@ static struct inode *ext4_alloc_inode(struct super_block *sb) | |||
708 | #ifdef CONFIG_QUOTA | 744 | #ifdef CONFIG_QUOTA |
709 | ei->i_reserved_quota = 0; | 745 | ei->i_reserved_quota = 0; |
710 | #endif | 746 | #endif |
711 | INIT_LIST_HEAD(&ei->i_aio_dio_complete_list); | 747 | INIT_LIST_HEAD(&ei->i_completed_io_list); |
748 | spin_lock_init(&ei->i_completed_io_lock); | ||
712 | ei->cur_aio_dio = NULL; | 749 | ei->cur_aio_dio = NULL; |
713 | ei->i_sync_tid = 0; | 750 | ei->i_sync_tid = 0; |
714 | ei->i_datasync_tid = 0; | 751 | ei->i_datasync_tid = 0; |
@@ -761,6 +798,7 @@ static void destroy_inodecache(void) | |||
761 | 798 | ||
762 | static void ext4_clear_inode(struct inode *inode) | 799 | static void ext4_clear_inode(struct inode *inode) |
763 | { | 800 | { |
801 | dquot_drop(inode); | ||
764 | ext4_discard_preallocations(inode); | 802 | ext4_discard_preallocations(inode); |
765 | if (EXT4_JOURNAL(inode)) | 803 | if (EXT4_JOURNAL(inode)) |
766 | jbd2_journal_release_jbd_inode(EXT4_SB(inode->i_sb)->s_journal, | 804 | jbd2_journal_release_jbd_inode(EXT4_SB(inode->i_sb)->s_journal, |
@@ -796,10 +834,10 @@ static inline void ext4_show_quota_options(struct seq_file *seq, | |||
796 | if (sbi->s_qf_names[GRPQUOTA]) | 834 | if (sbi->s_qf_names[GRPQUOTA]) |
797 | seq_printf(seq, ",grpjquota=%s", sbi->s_qf_names[GRPQUOTA]); | 835 | seq_printf(seq, ",grpjquota=%s", sbi->s_qf_names[GRPQUOTA]); |
798 | 836 | ||
799 | if (sbi->s_mount_opt & EXT4_MOUNT_USRQUOTA) | 837 | if (test_opt(sb, USRQUOTA)) |
800 | seq_puts(seq, ",usrquota"); | 838 | seq_puts(seq, ",usrquota"); |
801 | 839 | ||
802 | if (sbi->s_mount_opt & EXT4_MOUNT_GRPQUOTA) | 840 | if (test_opt(sb, GRPQUOTA)) |
803 | seq_puts(seq, ",grpquota"); | 841 | seq_puts(seq, ",grpquota"); |
804 | #endif | 842 | #endif |
805 | } | 843 | } |
@@ -926,6 +964,9 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs) | |||
926 | if (test_opt(sb, NOLOAD)) | 964 | if (test_opt(sb, NOLOAD)) |
927 | seq_puts(seq, ",norecovery"); | 965 | seq_puts(seq, ",norecovery"); |
928 | 966 | ||
967 | if (test_opt(sb, DIOREAD_NOLOCK)) | ||
968 | seq_puts(seq, ",dioread_nolock"); | ||
969 | |||
929 | ext4_show_quota_options(seq, sb); | 970 | ext4_show_quota_options(seq, sb); |
930 | 971 | ||
931 | return 0; | 972 | return 0; |
@@ -1012,19 +1053,9 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type, | |||
1012 | const char *data, size_t len, loff_t off); | 1053 | const char *data, size_t len, loff_t off); |
1013 | 1054 | ||
1014 | static const struct dquot_operations ext4_quota_operations = { | 1055 | static const struct dquot_operations ext4_quota_operations = { |
1015 | .initialize = dquot_initialize, | ||
1016 | .drop = dquot_drop, | ||
1017 | .alloc_space = dquot_alloc_space, | ||
1018 | .reserve_space = dquot_reserve_space, | ||
1019 | .claim_space = dquot_claim_space, | ||
1020 | .release_rsv = dquot_release_reserved_space, | ||
1021 | #ifdef CONFIG_QUOTA | 1056 | #ifdef CONFIG_QUOTA |
1022 | .get_reserved_space = ext4_get_reserved_space, | 1057 | .get_reserved_space = ext4_get_reserved_space, |
1023 | #endif | 1058 | #endif |
1024 | .alloc_inode = dquot_alloc_inode, | ||
1025 | .free_space = dquot_free_space, | ||
1026 | .free_inode = dquot_free_inode, | ||
1027 | .transfer = dquot_transfer, | ||
1028 | .write_dquot = ext4_write_dquot, | 1059 | .write_dquot = ext4_write_dquot, |
1029 | .acquire_dquot = ext4_acquire_dquot, | 1060 | .acquire_dquot = ext4_acquire_dquot, |
1030 | .release_dquot = ext4_release_dquot, | 1061 | .release_dquot = ext4_release_dquot, |
@@ -1109,6 +1140,7 @@ enum { | |||
1109 | Opt_stripe, Opt_delalloc, Opt_nodelalloc, | 1140 | Opt_stripe, Opt_delalloc, Opt_nodelalloc, |
1110 | Opt_block_validity, Opt_noblock_validity, | 1141 | Opt_block_validity, Opt_noblock_validity, |
1111 | Opt_inode_readahead_blks, Opt_journal_ioprio, | 1142 | Opt_inode_readahead_blks, Opt_journal_ioprio, |
1143 | Opt_dioread_nolock, Opt_dioread_lock, | ||
1112 | Opt_discard, Opt_nodiscard, | 1144 | Opt_discard, Opt_nodiscard, |
1113 | }; | 1145 | }; |
1114 | 1146 | ||
@@ -1176,6 +1208,8 @@ static const match_table_t tokens = { | |||
1176 | {Opt_auto_da_alloc, "auto_da_alloc=%u"}, | 1208 | {Opt_auto_da_alloc, "auto_da_alloc=%u"}, |
1177 | {Opt_auto_da_alloc, "auto_da_alloc"}, | 1209 | {Opt_auto_da_alloc, "auto_da_alloc"}, |
1178 | {Opt_noauto_da_alloc, "noauto_da_alloc"}, | 1210 | {Opt_noauto_da_alloc, "noauto_da_alloc"}, |
1211 | {Opt_dioread_nolock, "dioread_nolock"}, | ||
1212 | {Opt_dioread_lock, "dioread_lock"}, | ||
1179 | {Opt_discard, "discard"}, | 1213 | {Opt_discard, "discard"}, |
1180 | {Opt_nodiscard, "nodiscard"}, | 1214 | {Opt_nodiscard, "nodiscard"}, |
1181 | {Opt_err, NULL}, | 1215 | {Opt_err, NULL}, |
@@ -1205,6 +1239,66 @@ static ext4_fsblk_t get_sb_block(void **data) | |||
1205 | } | 1239 | } |
1206 | 1240 | ||
1207 | #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3)) | 1241 | #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3)) |
1242 | static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n" | ||
1243 | "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n"; | ||
1244 | |||
1245 | #ifdef CONFIG_QUOTA | ||
1246 | static int set_qf_name(struct super_block *sb, int qtype, substring_t *args) | ||
1247 | { | ||
1248 | struct ext4_sb_info *sbi = EXT4_SB(sb); | ||
1249 | char *qname; | ||
1250 | |||
1251 | if (sb_any_quota_loaded(sb) && | ||
1252 | !sbi->s_qf_names[qtype]) { | ||
1253 | ext4_msg(sb, KERN_ERR, | ||
1254 | "Cannot change journaled " | ||
1255 | "quota options when quota turned on"); | ||
1256 | return 0; | ||
1257 | } | ||
1258 | qname = match_strdup(args); | ||
1259 | if (!qname) { | ||
1260 | ext4_msg(sb, KERN_ERR, | ||
1261 | "Not enough memory for storing quotafile name"); | ||
1262 | return 0; | ||
1263 | } | ||
1264 | if (sbi->s_qf_names[qtype] && | ||
1265 | strcmp(sbi->s_qf_names[qtype], qname)) { | ||
1266 | ext4_msg(sb, KERN_ERR, | ||
1267 | "%s quota file already specified", QTYPE2NAME(qtype)); | ||
1268 | kfree(qname); | ||
1269 | return 0; | ||
1270 | } | ||
1271 | sbi->s_qf_names[qtype] = qname; | ||
1272 | if (strchr(sbi->s_qf_names[qtype], '/')) { | ||
1273 | ext4_msg(sb, KERN_ERR, | ||
1274 | "quotafile must be on filesystem root"); | ||
1275 | kfree(sbi->s_qf_names[qtype]); | ||
1276 | sbi->s_qf_names[qtype] = NULL; | ||
1277 | return 0; | ||
1278 | } | ||
1279 | set_opt(sbi->s_mount_opt, QUOTA); | ||
1280 | return 1; | ||
1281 | } | ||
1282 | |||
1283 | static int clear_qf_name(struct super_block *sb, int qtype) | ||
1284 | { | ||
1285 | |||
1286 | struct ext4_sb_info *sbi = EXT4_SB(sb); | ||
1287 | |||
1288 | if (sb_any_quota_loaded(sb) && | ||
1289 | sbi->s_qf_names[qtype]) { | ||
1290 | ext4_msg(sb, KERN_ERR, "Cannot change journaled quota options" | ||
1291 | " when quota turned on"); | ||
1292 | return 0; | ||
1293 | } | ||
1294 | /* | ||
1295 | * The space will be released later when all options are confirmed | ||
1296 | * to be correct | ||
1297 | */ | ||
1298 | sbi->s_qf_names[qtype] = NULL; | ||
1299 | return 1; | ||
1300 | } | ||
1301 | #endif | ||
1208 | 1302 | ||
1209 | static int parse_options(char *options, struct super_block *sb, | 1303 | static int parse_options(char *options, struct super_block *sb, |
1210 | unsigned long *journal_devnum, | 1304 | unsigned long *journal_devnum, |
@@ -1217,8 +1311,7 @@ static int parse_options(char *options, struct super_block *sb, | |||
1217 | int data_opt = 0; | 1311 | int data_opt = 0; |
1218 | int option; | 1312 | int option; |
1219 | #ifdef CONFIG_QUOTA | 1313 | #ifdef CONFIG_QUOTA |
1220 | int qtype, qfmt; | 1314 | int qfmt; |
1221 | char *qname; | ||
1222 | #endif | 1315 | #endif |
1223 | 1316 | ||
1224 | if (!options) | 1317 | if (!options) |
@@ -1229,19 +1322,31 @@ static int parse_options(char *options, struct super_block *sb, | |||
1229 | if (!*p) | 1322 | if (!*p) |
1230 | continue; | 1323 | continue; |
1231 | 1324 | ||
1325 | /* | ||
1326 | * Initialize args struct so we know whether arg was | ||
1327 | * found; some options take optional arguments. | ||
1328 | */ | ||
1329 | args[0].to = args[0].from = 0; | ||
1232 | token = match_token(p, tokens, args); | 1330 | token = match_token(p, tokens, args); |
1233 | switch (token) { | 1331 | switch (token) { |
1234 | case Opt_bsd_df: | 1332 | case Opt_bsd_df: |
1333 | ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38"); | ||
1235 | clear_opt(sbi->s_mount_opt, MINIX_DF); | 1334 | clear_opt(sbi->s_mount_opt, MINIX_DF); |
1236 | break; | 1335 | break; |
1237 | case Opt_minix_df: | 1336 | case Opt_minix_df: |
1337 | ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38"); | ||
1238 | set_opt(sbi->s_mount_opt, MINIX_DF); | 1338 | set_opt(sbi->s_mount_opt, MINIX_DF); |
1339 | |||
1239 | break; | 1340 | break; |
1240 | case Opt_grpid: | 1341 | case Opt_grpid: |
1342 | ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38"); | ||
1241 | set_opt(sbi->s_mount_opt, GRPID); | 1343 | set_opt(sbi->s_mount_opt, GRPID); |
1344 | |||
1242 | break; | 1345 | break; |
1243 | case Opt_nogrpid: | 1346 | case Opt_nogrpid: |
1347 | ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38"); | ||
1244 | clear_opt(sbi->s_mount_opt, GRPID); | 1348 | clear_opt(sbi->s_mount_opt, GRPID); |
1349 | |||
1245 | break; | 1350 | break; |
1246 | case Opt_resuid: | 1351 | case Opt_resuid: |
1247 | if (match_int(&args[0], &option)) | 1352 | if (match_int(&args[0], &option)) |
@@ -1378,14 +1483,13 @@ static int parse_options(char *options, struct super_block *sb, | |||
1378 | data_opt = EXT4_MOUNT_WRITEBACK_DATA; | 1483 | data_opt = EXT4_MOUNT_WRITEBACK_DATA; |
1379 | datacheck: | 1484 | datacheck: |
1380 | if (is_remount) { | 1485 | if (is_remount) { |
1381 | if ((sbi->s_mount_opt & EXT4_MOUNT_DATA_FLAGS) | 1486 | if (test_opt(sb, DATA_FLAGS) != data_opt) { |
1382 | != data_opt) { | ||
1383 | ext4_msg(sb, KERN_ERR, | 1487 | ext4_msg(sb, KERN_ERR, |
1384 | "Cannot change data mode on remount"); | 1488 | "Cannot change data mode on remount"); |
1385 | return 0; | 1489 | return 0; |
1386 | } | 1490 | } |
1387 | } else { | 1491 | } else { |
1388 | sbi->s_mount_opt &= ~EXT4_MOUNT_DATA_FLAGS; | 1492 | clear_opt(sbi->s_mount_opt, DATA_FLAGS); |
1389 | sbi->s_mount_opt |= data_opt; | 1493 | sbi->s_mount_opt |= data_opt; |
1390 | } | 1494 | } |
1391 | break; | 1495 | break; |
@@ -1397,63 +1501,22 @@ static int parse_options(char *options, struct super_block *sb, | |||
1397 | break; | 1501 | break; |
1398 | #ifdef CONFIG_QUOTA | 1502 | #ifdef CONFIG_QUOTA |
1399 | case Opt_usrjquota: | 1503 | case Opt_usrjquota: |
1400 | qtype = USRQUOTA; | 1504 | if (!set_qf_name(sb, USRQUOTA, &args[0])) |
1401 | goto set_qf_name; | ||
1402 | case Opt_grpjquota: | ||
1403 | qtype = GRPQUOTA; | ||
1404 | set_qf_name: | ||
1405 | if (sb_any_quota_loaded(sb) && | ||
1406 | !sbi->s_qf_names[qtype]) { | ||
1407 | ext4_msg(sb, KERN_ERR, | ||
1408 | "Cannot change journaled " | ||
1409 | "quota options when quota turned on"); | ||
1410 | return 0; | 1505 | return 0; |
1411 | } | 1506 | break; |
1412 | qname = match_strdup(&args[0]); | 1507 | case Opt_grpjquota: |
1413 | if (!qname) { | 1508 | if (!set_qf_name(sb, GRPQUOTA, &args[0])) |
1414 | ext4_msg(sb, KERN_ERR, | ||
1415 | "Not enough memory for " | ||
1416 | "storing quotafile name"); | ||
1417 | return 0; | ||
1418 | } | ||
1419 | if (sbi->s_qf_names[qtype] && | ||
1420 | strcmp(sbi->s_qf_names[qtype], qname)) { | ||
1421 | ext4_msg(sb, KERN_ERR, | ||
1422 | "%s quota file already " | ||
1423 | "specified", QTYPE2NAME(qtype)); | ||
1424 | kfree(qname); | ||
1425 | return 0; | ||
1426 | } | ||
1427 | sbi->s_qf_names[qtype] = qname; | ||
1428 | if (strchr(sbi->s_qf_names[qtype], '/')) { | ||
1429 | ext4_msg(sb, KERN_ERR, | ||
1430 | "quotafile must be on " | ||
1431 | "filesystem root"); | ||
1432 | kfree(sbi->s_qf_names[qtype]); | ||
1433 | sbi->s_qf_names[qtype] = NULL; | ||
1434 | return 0; | 1509 | return 0; |
1435 | } | ||
1436 | set_opt(sbi->s_mount_opt, QUOTA); | ||
1437 | break; | 1510 | break; |
1438 | case Opt_offusrjquota: | 1511 | case Opt_offusrjquota: |
1439 | qtype = USRQUOTA; | 1512 | if (!clear_qf_name(sb, USRQUOTA)) |
1440 | goto clear_qf_name; | 1513 | return 0; |
1514 | break; | ||
1441 | case Opt_offgrpjquota: | 1515 | case Opt_offgrpjquota: |
1442 | qtype = GRPQUOTA; | 1516 | if (!clear_qf_name(sb, GRPQUOTA)) |
1443 | clear_qf_name: | ||
1444 | if (sb_any_quota_loaded(sb) && | ||
1445 | sbi->s_qf_names[qtype]) { | ||
1446 | ext4_msg(sb, KERN_ERR, "Cannot change " | ||
1447 | "journaled quota options when " | ||
1448 | "quota turned on"); | ||
1449 | return 0; | 1517 | return 0; |
1450 | } | ||
1451 | /* | ||
1452 | * The space will be released later when all options | ||
1453 | * are confirmed to be correct | ||
1454 | */ | ||
1455 | sbi->s_qf_names[qtype] = NULL; | ||
1456 | break; | 1518 | break; |
1519 | |||
1457 | case Opt_jqfmt_vfsold: | 1520 | case Opt_jqfmt_vfsold: |
1458 | qfmt = QFMT_VFS_OLD; | 1521 | qfmt = QFMT_VFS_OLD; |
1459 | goto set_qf_format; | 1522 | goto set_qf_format; |
@@ -1518,10 +1581,11 @@ set_qf_format: | |||
1518 | clear_opt(sbi->s_mount_opt, BARRIER); | 1581 | clear_opt(sbi->s_mount_opt, BARRIER); |
1519 | break; | 1582 | break; |
1520 | case Opt_barrier: | 1583 | case Opt_barrier: |
1521 | if (match_int(&args[0], &option)) { | 1584 | if (args[0].from) { |
1522 | set_opt(sbi->s_mount_opt, BARRIER); | 1585 | if (match_int(&args[0], &option)) |
1523 | break; | 1586 | return 0; |
1524 | } | 1587 | } else |
1588 | option = 1; /* No argument, default to 1 */ | ||
1525 | if (option) | 1589 | if (option) |
1526 | set_opt(sbi->s_mount_opt, BARRIER); | 1590 | set_opt(sbi->s_mount_opt, BARRIER); |
1527 | else | 1591 | else |
@@ -1594,10 +1658,11 @@ set_qf_format: | |||
1594 | set_opt(sbi->s_mount_opt,NO_AUTO_DA_ALLOC); | 1658 | set_opt(sbi->s_mount_opt,NO_AUTO_DA_ALLOC); |
1595 | break; | 1659 | break; |
1596 | case Opt_auto_da_alloc: | 1660 | case Opt_auto_da_alloc: |
1597 | if (match_int(&args[0], &option)) { | 1661 | if (args[0].from) { |
1598 | clear_opt(sbi->s_mount_opt, NO_AUTO_DA_ALLOC); | 1662 | if (match_int(&args[0], &option)) |
1599 | break; | 1663 | return 0; |
1600 | } | 1664 | } else |
1665 | option = 1; /* No argument, default to 1 */ | ||
1601 | if (option) | 1666 | if (option) |
1602 | clear_opt(sbi->s_mount_opt, NO_AUTO_DA_ALLOC); | 1667 | clear_opt(sbi->s_mount_opt, NO_AUTO_DA_ALLOC); |
1603 | else | 1668 | else |
@@ -1609,6 +1674,12 @@ set_qf_format: | |||
1609 | case Opt_nodiscard: | 1674 | case Opt_nodiscard: |
1610 | clear_opt(sbi->s_mount_opt, DISCARD); | 1675 | clear_opt(sbi->s_mount_opt, DISCARD); |
1611 | break; | 1676 | break; |
1677 | case Opt_dioread_nolock: | ||
1678 | set_opt(sbi->s_mount_opt, DIOREAD_NOLOCK); | ||
1679 | break; | ||
1680 | case Opt_dioread_lock: | ||
1681 | clear_opt(sbi->s_mount_opt, DIOREAD_NOLOCK); | ||
1682 | break; | ||
1612 | default: | 1683 | default: |
1613 | ext4_msg(sb, KERN_ERR, | 1684 | ext4_msg(sb, KERN_ERR, |
1614 | "Unrecognized mount option \"%s\" " | 1685 | "Unrecognized mount option \"%s\" " |
@@ -1618,18 +1689,13 @@ set_qf_format: | |||
1618 | } | 1689 | } |
1619 | #ifdef CONFIG_QUOTA | 1690 | #ifdef CONFIG_QUOTA |
1620 | if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) { | 1691 | if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) { |
1621 | if ((sbi->s_mount_opt & EXT4_MOUNT_USRQUOTA) && | 1692 | if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA]) |
1622 | sbi->s_qf_names[USRQUOTA]) | ||
1623 | clear_opt(sbi->s_mount_opt, USRQUOTA); | 1693 | clear_opt(sbi->s_mount_opt, USRQUOTA); |
1624 | 1694 | ||
1625 | if ((sbi->s_mount_opt & EXT4_MOUNT_GRPQUOTA) && | 1695 | if (test_opt(sb, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA]) |
1626 | sbi->s_qf_names[GRPQUOTA]) | ||
1627 | clear_opt(sbi->s_mount_opt, GRPQUOTA); | 1696 | clear_opt(sbi->s_mount_opt, GRPQUOTA); |
1628 | 1697 | ||
1629 | if ((sbi->s_qf_names[USRQUOTA] && | 1698 | if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) { |
1630 | (sbi->s_mount_opt & EXT4_MOUNT_GRPQUOTA)) || | ||
1631 | (sbi->s_qf_names[GRPQUOTA] && | ||
1632 | (sbi->s_mount_opt & EXT4_MOUNT_USRQUOTA))) { | ||
1633 | ext4_msg(sb, KERN_ERR, "old and new quota " | 1699 | ext4_msg(sb, KERN_ERR, "old and new quota " |
1634 | "format mixing"); | 1700 | "format mixing"); |
1635 | return 0; | 1701 | return 0; |
@@ -1939,7 +2005,7 @@ static void ext4_orphan_cleanup(struct super_block *sb, | |||
1939 | } | 2005 | } |
1940 | 2006 | ||
1941 | list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan); | 2007 | list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan); |
1942 | vfs_dq_init(inode); | 2008 | dquot_initialize(inode); |
1943 | if (inode->i_nlink) { | 2009 | if (inode->i_nlink) { |
1944 | ext4_msg(sb, KERN_DEBUG, | 2010 | ext4_msg(sb, KERN_DEBUG, |
1945 | "%s: truncating inode %lu to %lld bytes", | 2011 | "%s: truncating inode %lu to %lld bytes", |
@@ -2292,7 +2358,7 @@ static void ext4_sb_release(struct kobject *kobj) | |||
2292 | } | 2358 | } |
2293 | 2359 | ||
2294 | 2360 | ||
2295 | static struct sysfs_ops ext4_attr_ops = { | 2361 | static const struct sysfs_ops ext4_attr_ops = { |
2296 | .show = ext4_attr_show, | 2362 | .show = ext4_attr_show, |
2297 | .store = ext4_attr_store, | 2363 | .store = ext4_attr_store, |
2298 | }; | 2364 | }; |
@@ -2432,8 +2498,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) | |||
2432 | def_mount_opts = le32_to_cpu(es->s_default_mount_opts); | 2498 | def_mount_opts = le32_to_cpu(es->s_default_mount_opts); |
2433 | if (def_mount_opts & EXT4_DEFM_DEBUG) | 2499 | if (def_mount_opts & EXT4_DEFM_DEBUG) |
2434 | set_opt(sbi->s_mount_opt, DEBUG); | 2500 | set_opt(sbi->s_mount_opt, DEBUG); |
2435 | if (def_mount_opts & EXT4_DEFM_BSDGROUPS) | 2501 | if (def_mount_opts & EXT4_DEFM_BSDGROUPS) { |
2502 | ext4_msg(sb, KERN_WARNING, deprecated_msg, "bsdgroups", | ||
2503 | "2.6.38"); | ||
2436 | set_opt(sbi->s_mount_opt, GRPID); | 2504 | set_opt(sbi->s_mount_opt, GRPID); |
2505 | } | ||
2437 | if (def_mount_opts & EXT4_DEFM_UID16) | 2506 | if (def_mount_opts & EXT4_DEFM_UID16) |
2438 | set_opt(sbi->s_mount_opt, NO_UID32); | 2507 | set_opt(sbi->s_mount_opt, NO_UID32); |
2439 | #ifdef CONFIG_EXT4_FS_XATTR | 2508 | #ifdef CONFIG_EXT4_FS_XATTR |
@@ -2445,11 +2514,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) | |||
2445 | set_opt(sbi->s_mount_opt, POSIX_ACL); | 2514 | set_opt(sbi->s_mount_opt, POSIX_ACL); |
2446 | #endif | 2515 | #endif |
2447 | if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA) | 2516 | if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA) |
2448 | sbi->s_mount_opt |= EXT4_MOUNT_JOURNAL_DATA; | 2517 | set_opt(sbi->s_mount_opt, JOURNAL_DATA); |
2449 | else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED) | 2518 | else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED) |
2450 | sbi->s_mount_opt |= EXT4_MOUNT_ORDERED_DATA; | 2519 | set_opt(sbi->s_mount_opt, ORDERED_DATA); |
2451 | else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK) | 2520 | else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK) |
2452 | sbi->s_mount_opt |= EXT4_MOUNT_WRITEBACK_DATA; | 2521 | set_opt(sbi->s_mount_opt, WRITEBACK_DATA); |
2453 | 2522 | ||
2454 | if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_PANIC) | 2523 | if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_PANIC) |
2455 | set_opt(sbi->s_mount_opt, ERRORS_PANIC); | 2524 | set_opt(sbi->s_mount_opt, ERRORS_PANIC); |
@@ -2477,7 +2546,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) | |||
2477 | goto failed_mount; | 2546 | goto failed_mount; |
2478 | 2547 | ||
2479 | sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | | 2548 | sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | |
2480 | ((sbi->s_mount_opt & EXT4_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0); | 2549 | (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0); |
2481 | 2550 | ||
2482 | if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV && | 2551 | if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV && |
2483 | (EXT4_HAS_COMPAT_FEATURE(sb, ~0U) || | 2552 | (EXT4_HAS_COMPAT_FEATURE(sb, ~0U) || |
@@ -2766,7 +2835,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) | |||
2766 | EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER)) { | 2835 | EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER)) { |
2767 | ext4_msg(sb, KERN_ERR, "required journal recovery " | 2836 | ext4_msg(sb, KERN_ERR, "required journal recovery " |
2768 | "suppressed and not mounted read-only"); | 2837 | "suppressed and not mounted read-only"); |
2769 | goto failed_mount4; | 2838 | goto failed_mount_wq; |
2770 | } else { | 2839 | } else { |
2771 | clear_opt(sbi->s_mount_opt, DATA_FLAGS); | 2840 | clear_opt(sbi->s_mount_opt, DATA_FLAGS); |
2772 | set_opt(sbi->s_mount_opt, WRITEBACK_DATA); | 2841 | set_opt(sbi->s_mount_opt, WRITEBACK_DATA); |
@@ -2779,7 +2848,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) | |||
2779 | !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0, | 2848 | !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0, |
2780 | JBD2_FEATURE_INCOMPAT_64BIT)) { | 2849 | JBD2_FEATURE_INCOMPAT_64BIT)) { |
2781 | ext4_msg(sb, KERN_ERR, "Failed to set 64-bit journal feature"); | 2850 | ext4_msg(sb, KERN_ERR, "Failed to set 64-bit journal feature"); |
2782 | goto failed_mount4; | 2851 | goto failed_mount_wq; |
2783 | } | 2852 | } |
2784 | 2853 | ||
2785 | if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) { | 2854 | if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) { |
@@ -2818,7 +2887,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) | |||
2818 | (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) { | 2887 | (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) { |
2819 | ext4_msg(sb, KERN_ERR, "Journal does not support " | 2888 | ext4_msg(sb, KERN_ERR, "Journal does not support " |
2820 | "requested data journaling mode"); | 2889 | "requested data journaling mode"); |
2821 | goto failed_mount4; | 2890 | goto failed_mount_wq; |
2822 | } | 2891 | } |
2823 | default: | 2892 | default: |
2824 | break; | 2893 | break; |
@@ -2826,13 +2895,17 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) | |||
2826 | set_task_ioprio(sbi->s_journal->j_task, journal_ioprio); | 2895 | set_task_ioprio(sbi->s_journal->j_task, journal_ioprio); |
2827 | 2896 | ||
2828 | no_journal: | 2897 | no_journal: |
2829 | |||
2830 | if (test_opt(sb, NOBH)) { | 2898 | if (test_opt(sb, NOBH)) { |
2831 | if (!(test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)) { | 2899 | if (!(test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)) { |
2832 | ext4_msg(sb, KERN_WARNING, "Ignoring nobh option - " | 2900 | ext4_msg(sb, KERN_WARNING, "Ignoring nobh option - " |
2833 | "its supported only with writeback mode"); | 2901 | "its supported only with writeback mode"); |
2834 | clear_opt(sbi->s_mount_opt, NOBH); | 2902 | clear_opt(sbi->s_mount_opt, NOBH); |
2835 | } | 2903 | } |
2904 | if (test_opt(sb, DIOREAD_NOLOCK)) { | ||
2905 | ext4_msg(sb, KERN_WARNING, "dioread_nolock option is " | ||
2906 | "not supported with nobh mode"); | ||
2907 | goto failed_mount_wq; | ||
2908 | } | ||
2836 | } | 2909 | } |
2837 | EXT4_SB(sb)->dio_unwritten_wq = create_workqueue("ext4-dio-unwritten"); | 2910 | EXT4_SB(sb)->dio_unwritten_wq = create_workqueue("ext4-dio-unwritten"); |
2838 | if (!EXT4_SB(sb)->dio_unwritten_wq) { | 2911 | if (!EXT4_SB(sb)->dio_unwritten_wq) { |
@@ -2897,6 +2970,18 @@ no_journal: | |||
2897 | "requested data journaling mode"); | 2970 | "requested data journaling mode"); |
2898 | clear_opt(sbi->s_mount_opt, DELALLOC); | 2971 | clear_opt(sbi->s_mount_opt, DELALLOC); |
2899 | } | 2972 | } |
2973 | if (test_opt(sb, DIOREAD_NOLOCK)) { | ||
2974 | if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) { | ||
2975 | ext4_msg(sb, KERN_WARNING, "Ignoring dioread_nolock " | ||
2976 | "option - requested data journaling mode"); | ||
2977 | clear_opt(sbi->s_mount_opt, DIOREAD_NOLOCK); | ||
2978 | } | ||
2979 | if (sb->s_blocksize < PAGE_SIZE) { | ||
2980 | ext4_msg(sb, KERN_WARNING, "Ignoring dioread_nolock " | ||
2981 | "option - block size is too small"); | ||
2982 | clear_opt(sbi->s_mount_opt, DIOREAD_NOLOCK); | ||
2983 | } | ||
2984 | } | ||
2900 | 2985 | ||
2901 | err = ext4_setup_system_zone(sb); | 2986 | err = ext4_setup_system_zone(sb); |
2902 | if (err) { | 2987 | if (err) { |
@@ -3360,10 +3445,9 @@ static void ext4_clear_journal_err(struct super_block *sb, | |||
3360 | char nbuf[16]; | 3445 | char nbuf[16]; |
3361 | 3446 | ||
3362 | errstr = ext4_decode_error(sb, j_errno, nbuf); | 3447 | errstr = ext4_decode_error(sb, j_errno, nbuf); |
3363 | ext4_warning(sb, __func__, "Filesystem error recorded " | 3448 | ext4_warning(sb, "Filesystem error recorded " |
3364 | "from previous mount: %s", errstr); | 3449 | "from previous mount: %s", errstr); |
3365 | ext4_warning(sb, __func__, "Marking fs in need of " | 3450 | ext4_warning(sb, "Marking fs in need of filesystem check."); |
3366 | "filesystem check."); | ||
3367 | 3451 | ||
3368 | EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS; | 3452 | EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS; |
3369 | es->s_state |= cpu_to_le16(EXT4_ERROR_FS); | 3453 | es->s_state |= cpu_to_le16(EXT4_ERROR_FS); |
@@ -3514,7 +3598,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) | |||
3514 | ext4_abort(sb, __func__, "Abort forced by user"); | 3598 | ext4_abort(sb, __func__, "Abort forced by user"); |
3515 | 3599 | ||
3516 | sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | | 3600 | sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | |
3517 | ((sbi->s_mount_opt & EXT4_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0); | 3601 | (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0); |
3518 | 3602 | ||
3519 | es = sbi->s_es; | 3603 | es = sbi->s_es; |
3520 | 3604 | ||
@@ -3708,7 +3792,7 @@ static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf) | |||
3708 | * Process 1 Process 2 | 3792 | * Process 1 Process 2 |
3709 | * ext4_create() quota_sync() | 3793 | * ext4_create() quota_sync() |
3710 | * jbd2_journal_start() write_dquot() | 3794 | * jbd2_journal_start() write_dquot() |
3711 | * vfs_dq_init() down(dqio_mutex) | 3795 | * dquot_initialize() down(dqio_mutex) |
3712 | * down(dqio_mutex) jbd2_journal_start() | 3796 | * down(dqio_mutex) jbd2_journal_start() |
3713 | * | 3797 | * |
3714 | */ | 3798 | */ |
@@ -3917,9 +4001,7 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type, | |||
3917 | ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb); | 4001 | ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb); |
3918 | int err = 0; | 4002 | int err = 0; |
3919 | int offset = off & (sb->s_blocksize - 1); | 4003 | int offset = off & (sb->s_blocksize - 1); |
3920 | int tocopy; | ||
3921 | int journal_quota = EXT4_SB(sb)->s_qf_names[type] != NULL; | 4004 | int journal_quota = EXT4_SB(sb)->s_qf_names[type] != NULL; |
3922 | size_t towrite = len; | ||
3923 | struct buffer_head *bh; | 4005 | struct buffer_head *bh; |
3924 | handle_t *handle = journal_current_handle(); | 4006 | handle_t *handle = journal_current_handle(); |
3925 | 4007 | ||
@@ -3929,52 +4011,53 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type, | |||
3929 | (unsigned long long)off, (unsigned long long)len); | 4011 | (unsigned long long)off, (unsigned long long)len); |
3930 | return -EIO; | 4012 | return -EIO; |
3931 | } | 4013 | } |
4014 | /* | ||
4015 | * Since we account only one data block in transaction credits, | ||
4016 | * then it is impossible to cross a block boundary. | ||
4017 | */ | ||
4018 | if (sb->s_blocksize - offset < len) { | ||
4019 | ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)" | ||
4020 | " cancelled because not block aligned", | ||
4021 | (unsigned long long)off, (unsigned long long)len); | ||
4022 | return -EIO; | ||
4023 | } | ||
4024 | |||
3932 | mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA); | 4025 | mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA); |
3933 | while (towrite > 0) { | 4026 | bh = ext4_bread(handle, inode, blk, 1, &err); |
3934 | tocopy = sb->s_blocksize - offset < towrite ? | 4027 | if (!bh) |
3935 | sb->s_blocksize - offset : towrite; | 4028 | goto out; |
3936 | bh = ext4_bread(handle, inode, blk, 1, &err); | 4029 | if (journal_quota) { |
3937 | if (!bh) | 4030 | err = ext4_journal_get_write_access(handle, bh); |
4031 | if (err) { | ||
4032 | brelse(bh); | ||
3938 | goto out; | 4033 | goto out; |
3939 | if (journal_quota) { | ||
3940 | err = ext4_journal_get_write_access(handle, bh); | ||
3941 | if (err) { | ||
3942 | brelse(bh); | ||
3943 | goto out; | ||
3944 | } | ||
3945 | } | 4034 | } |
3946 | lock_buffer(bh); | ||
3947 | memcpy(bh->b_data+offset, data, tocopy); | ||
3948 | flush_dcache_page(bh->b_page); | ||
3949 | unlock_buffer(bh); | ||
3950 | if (journal_quota) | ||
3951 | err = ext4_handle_dirty_metadata(handle, NULL, bh); | ||
3952 | else { | ||
3953 | /* Always do at least ordered writes for quotas */ | ||
3954 | err = ext4_jbd2_file_inode(handle, inode); | ||
3955 | mark_buffer_dirty(bh); | ||
3956 | } | ||
3957 | brelse(bh); | ||
3958 | if (err) | ||
3959 | goto out; | ||
3960 | offset = 0; | ||
3961 | towrite -= tocopy; | ||
3962 | data += tocopy; | ||
3963 | blk++; | ||
3964 | } | 4035 | } |
4036 | lock_buffer(bh); | ||
4037 | memcpy(bh->b_data+offset, data, len); | ||
4038 | flush_dcache_page(bh->b_page); | ||
4039 | unlock_buffer(bh); | ||
4040 | if (journal_quota) | ||
4041 | err = ext4_handle_dirty_metadata(handle, NULL, bh); | ||
4042 | else { | ||
4043 | /* Always do at least ordered writes for quotas */ | ||
4044 | err = ext4_jbd2_file_inode(handle, inode); | ||
4045 | mark_buffer_dirty(bh); | ||
4046 | } | ||
4047 | brelse(bh); | ||
3965 | out: | 4048 | out: |
3966 | if (len == towrite) { | 4049 | if (err) { |
3967 | mutex_unlock(&inode->i_mutex); | 4050 | mutex_unlock(&inode->i_mutex); |
3968 | return err; | 4051 | return err; |
3969 | } | 4052 | } |
3970 | if (inode->i_size < off+len-towrite) { | 4053 | if (inode->i_size < off + len) { |
3971 | i_size_write(inode, off+len-towrite); | 4054 | i_size_write(inode, off + len); |
3972 | EXT4_I(inode)->i_disksize = inode->i_size; | 4055 | EXT4_I(inode)->i_disksize = inode->i_size; |
3973 | } | 4056 | } |
3974 | inode->i_mtime = inode->i_ctime = CURRENT_TIME; | 4057 | inode->i_mtime = inode->i_ctime = CURRENT_TIME; |
3975 | ext4_mark_inode_dirty(handle, inode); | 4058 | ext4_mark_inode_dirty(handle, inode); |
3976 | mutex_unlock(&inode->i_mutex); | 4059 | mutex_unlock(&inode->i_mutex); |
3977 | return len - towrite; | 4060 | return len; |
3978 | } | 4061 | } |
3979 | 4062 | ||
3980 | #endif | 4063 | #endif |
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index f3a2f7ed45aa..b4c5aa8489d8 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c | |||
@@ -227,7 +227,8 @@ ext4_xattr_block_get(struct inode *inode, int name_index, const char *name, | |||
227 | ea_bdebug(bh, "b_count=%d, refcount=%d", | 227 | ea_bdebug(bh, "b_count=%d, refcount=%d", |
228 | atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount)); | 228 | atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount)); |
229 | if (ext4_xattr_check_block(bh)) { | 229 | if (ext4_xattr_check_block(bh)) { |
230 | bad_block: ext4_error(inode->i_sb, __func__, | 230 | bad_block: |
231 | ext4_error(inode->i_sb, | ||
231 | "inode %lu: bad block %llu", inode->i_ino, | 232 | "inode %lu: bad block %llu", inode->i_ino, |
232 | EXT4_I(inode)->i_file_acl); | 233 | EXT4_I(inode)->i_file_acl); |
233 | error = -EIO; | 234 | error = -EIO; |
@@ -267,7 +268,7 @@ ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name, | |||
267 | void *end; | 268 | void *end; |
268 | int error; | 269 | int error; |
269 | 270 | ||
270 | if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR)) | 271 | if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR)) |
271 | return -ENODATA; | 272 | return -ENODATA; |
272 | error = ext4_get_inode_loc(inode, &iloc); | 273 | error = ext4_get_inode_loc(inode, &iloc); |
273 | if (error) | 274 | if (error) |
@@ -371,7 +372,7 @@ ext4_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size) | |||
371 | ea_bdebug(bh, "b_count=%d, refcount=%d", | 372 | ea_bdebug(bh, "b_count=%d, refcount=%d", |
372 | atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount)); | 373 | atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount)); |
373 | if (ext4_xattr_check_block(bh)) { | 374 | if (ext4_xattr_check_block(bh)) { |
374 | ext4_error(inode->i_sb, __func__, | 375 | ext4_error(inode->i_sb, |
375 | "inode %lu: bad block %llu", inode->i_ino, | 376 | "inode %lu: bad block %llu", inode->i_ino, |
376 | EXT4_I(inode)->i_file_acl); | 377 | EXT4_I(inode)->i_file_acl); |
377 | error = -EIO; | 378 | error = -EIO; |
@@ -396,7 +397,7 @@ ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size) | |||
396 | void *end; | 397 | void *end; |
397 | int error; | 398 | int error; |
398 | 399 | ||
399 | if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR)) | 400 | if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR)) |
400 | return 0; | 401 | return 0; |
401 | error = ext4_get_inode_loc(inode, &iloc); | 402 | error = ext4_get_inode_loc(inode, &iloc); |
402 | if (error) | 403 | if (error) |
@@ -494,7 +495,7 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode, | |||
494 | error = ext4_handle_dirty_metadata(handle, inode, bh); | 495 | error = ext4_handle_dirty_metadata(handle, inode, bh); |
495 | if (IS_SYNC(inode)) | 496 | if (IS_SYNC(inode)) |
496 | ext4_handle_sync(handle); | 497 | ext4_handle_sync(handle); |
497 | vfs_dq_free_block(inode, 1); | 498 | dquot_free_block(inode, 1); |
498 | ea_bdebug(bh, "refcount now=%d; releasing", | 499 | ea_bdebug(bh, "refcount now=%d; releasing", |
499 | le32_to_cpu(BHDR(bh)->h_refcount)); | 500 | le32_to_cpu(BHDR(bh)->h_refcount)); |
500 | if (ce) | 501 | if (ce) |
@@ -665,9 +666,8 @@ ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i, | |||
665 | atomic_read(&(bs->bh->b_count)), | 666 | atomic_read(&(bs->bh->b_count)), |
666 | le32_to_cpu(BHDR(bs->bh)->h_refcount)); | 667 | le32_to_cpu(BHDR(bs->bh)->h_refcount)); |
667 | if (ext4_xattr_check_block(bs->bh)) { | 668 | if (ext4_xattr_check_block(bs->bh)) { |
668 | ext4_error(sb, __func__, | 669 | ext4_error(sb, "inode %lu: bad block %llu", |
669 | "inode %lu: bad block %llu", inode->i_ino, | 670 | inode->i_ino, EXT4_I(inode)->i_file_acl); |
670 | EXT4_I(inode)->i_file_acl); | ||
671 | error = -EIO; | 671 | error = -EIO; |
672 | goto cleanup; | 672 | goto cleanup; |
673 | } | 673 | } |
@@ -787,8 +787,8 @@ inserted: | |||
787 | else { | 787 | else { |
788 | /* The old block is released after updating | 788 | /* The old block is released after updating |
789 | the inode. */ | 789 | the inode. */ |
790 | error = -EDQUOT; | 790 | error = dquot_alloc_block(inode, 1); |
791 | if (vfs_dq_alloc_block(inode, 1)) | 791 | if (error) |
792 | goto cleanup; | 792 | goto cleanup; |
793 | error = ext4_journal_get_write_access(handle, | 793 | error = ext4_journal_get_write_access(handle, |
794 | new_bh); | 794 | new_bh); |
@@ -876,13 +876,12 @@ cleanup: | |||
876 | return error; | 876 | return error; |
877 | 877 | ||
878 | cleanup_dquot: | 878 | cleanup_dquot: |
879 | vfs_dq_free_block(inode, 1); | 879 | dquot_free_block(inode, 1); |
880 | goto cleanup; | 880 | goto cleanup; |
881 | 881 | ||
882 | bad_block: | 882 | bad_block: |
883 | ext4_error(inode->i_sb, __func__, | 883 | ext4_error(inode->i_sb, "inode %lu: bad block %llu", |
884 | "inode %lu: bad block %llu", inode->i_ino, | 884 | inode->i_ino, EXT4_I(inode)->i_file_acl); |
885 | EXT4_I(inode)->i_file_acl); | ||
886 | goto cleanup; | 885 | goto cleanup; |
887 | 886 | ||
888 | #undef header | 887 | #undef header |
@@ -908,7 +907,7 @@ ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i, | |||
908 | is->s.base = is->s.first = IFIRST(header); | 907 | is->s.base = is->s.first = IFIRST(header); |
909 | is->s.here = is->s.first; | 908 | is->s.here = is->s.first; |
910 | is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size; | 909 | is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size; |
911 | if (EXT4_I(inode)->i_state & EXT4_STATE_XATTR) { | 910 | if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) { |
912 | error = ext4_xattr_check_names(IFIRST(header), is->s.end); | 911 | error = ext4_xattr_check_names(IFIRST(header), is->s.end); |
913 | if (error) | 912 | if (error) |
914 | return error; | 913 | return error; |
@@ -940,10 +939,10 @@ ext4_xattr_ibody_set(handle_t *handle, struct inode *inode, | |||
940 | header = IHDR(inode, ext4_raw_inode(&is->iloc)); | 939 | header = IHDR(inode, ext4_raw_inode(&is->iloc)); |
941 | if (!IS_LAST_ENTRY(s->first)) { | 940 | if (!IS_LAST_ENTRY(s->first)) { |
942 | header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC); | 941 | header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC); |
943 | EXT4_I(inode)->i_state |= EXT4_STATE_XATTR; | 942 | ext4_set_inode_state(inode, EXT4_STATE_XATTR); |
944 | } else { | 943 | } else { |
945 | header->h_magic = cpu_to_le32(0); | 944 | header->h_magic = cpu_to_le32(0); |
946 | EXT4_I(inode)->i_state &= ~EXT4_STATE_XATTR; | 945 | ext4_clear_inode_state(inode, EXT4_STATE_XATTR); |
947 | } | 946 | } |
948 | return 0; | 947 | return 0; |
949 | } | 948 | } |
@@ -986,8 +985,8 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index, | |||
986 | if (strlen(name) > 255) | 985 | if (strlen(name) > 255) |
987 | return -ERANGE; | 986 | return -ERANGE; |
988 | down_write(&EXT4_I(inode)->xattr_sem); | 987 | down_write(&EXT4_I(inode)->xattr_sem); |
989 | no_expand = EXT4_I(inode)->i_state & EXT4_STATE_NO_EXPAND; | 988 | no_expand = ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND); |
990 | EXT4_I(inode)->i_state |= EXT4_STATE_NO_EXPAND; | 989 | ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND); |
991 | 990 | ||
992 | error = ext4_get_inode_loc(inode, &is.iloc); | 991 | error = ext4_get_inode_loc(inode, &is.iloc); |
993 | if (error) | 992 | if (error) |
@@ -997,10 +996,10 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index, | |||
997 | if (error) | 996 | if (error) |
998 | goto cleanup; | 997 | goto cleanup; |
999 | 998 | ||
1000 | if (EXT4_I(inode)->i_state & EXT4_STATE_NEW) { | 999 | if (ext4_test_inode_state(inode, EXT4_STATE_NEW)) { |
1001 | struct ext4_inode *raw_inode = ext4_raw_inode(&is.iloc); | 1000 | struct ext4_inode *raw_inode = ext4_raw_inode(&is.iloc); |
1002 | memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); | 1001 | memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); |
1003 | EXT4_I(inode)->i_state &= ~EXT4_STATE_NEW; | 1002 | ext4_clear_inode_state(inode, EXT4_STATE_NEW); |
1004 | } | 1003 | } |
1005 | 1004 | ||
1006 | error = ext4_xattr_ibody_find(inode, &i, &is); | 1005 | error = ext4_xattr_ibody_find(inode, &i, &is); |
@@ -1052,7 +1051,7 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index, | |||
1052 | ext4_xattr_update_super_block(handle, inode->i_sb); | 1051 | ext4_xattr_update_super_block(handle, inode->i_sb); |
1053 | inode->i_ctime = ext4_current_time(inode); | 1052 | inode->i_ctime = ext4_current_time(inode); |
1054 | if (!value) | 1053 | if (!value) |
1055 | EXT4_I(inode)->i_state &= ~EXT4_STATE_NO_EXPAND; | 1054 | ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND); |
1056 | error = ext4_mark_iloc_dirty(handle, inode, &is.iloc); | 1055 | error = ext4_mark_iloc_dirty(handle, inode, &is.iloc); |
1057 | /* | 1056 | /* |
1058 | * The bh is consumed by ext4_mark_iloc_dirty, even with | 1057 | * The bh is consumed by ext4_mark_iloc_dirty, even with |
@@ -1067,7 +1066,7 @@ cleanup: | |||
1067 | brelse(is.iloc.bh); | 1066 | brelse(is.iloc.bh); |
1068 | brelse(bs.bh); | 1067 | brelse(bs.bh); |
1069 | if (no_expand == 0) | 1068 | if (no_expand == 0) |
1070 | EXT4_I(inode)->i_state &= ~EXT4_STATE_NO_EXPAND; | 1069 | ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND); |
1071 | up_write(&EXT4_I(inode)->xattr_sem); | 1070 | up_write(&EXT4_I(inode)->xattr_sem); |
1072 | return error; | 1071 | return error; |
1073 | } | 1072 | } |
@@ -1195,9 +1194,8 @@ retry: | |||
1195 | if (!bh) | 1194 | if (!bh) |
1196 | goto cleanup; | 1195 | goto cleanup; |
1197 | if (ext4_xattr_check_block(bh)) { | 1196 | if (ext4_xattr_check_block(bh)) { |
1198 | ext4_error(inode->i_sb, __func__, | 1197 | ext4_error(inode->i_sb, "inode %lu: bad block %llu", |
1199 | "inode %lu: bad block %llu", inode->i_ino, | 1198 | inode->i_ino, EXT4_I(inode)->i_file_acl); |
1200 | EXT4_I(inode)->i_file_acl); | ||
1201 | error = -EIO; | 1199 | error = -EIO; |
1202 | goto cleanup; | 1200 | goto cleanup; |
1203 | } | 1201 | } |
@@ -1302,6 +1300,8 @@ retry: | |||
1302 | 1300 | ||
1303 | /* Remove the chosen entry from the inode */ | 1301 | /* Remove the chosen entry from the inode */ |
1304 | error = ext4_xattr_ibody_set(handle, inode, &i, is); | 1302 | error = ext4_xattr_ibody_set(handle, inode, &i, is); |
1303 | if (error) | ||
1304 | goto cleanup; | ||
1305 | 1305 | ||
1306 | entry = IFIRST(header); | 1306 | entry = IFIRST(header); |
1307 | if (entry_size + EXT4_XATTR_SIZE(size) >= new_extra_isize) | 1307 | if (entry_size + EXT4_XATTR_SIZE(size) >= new_extra_isize) |
@@ -1372,16 +1372,14 @@ ext4_xattr_delete_inode(handle_t *handle, struct inode *inode) | |||
1372 | goto cleanup; | 1372 | goto cleanup; |
1373 | bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl); | 1373 | bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl); |
1374 | if (!bh) { | 1374 | if (!bh) { |
1375 | ext4_error(inode->i_sb, __func__, | 1375 | ext4_error(inode->i_sb, "inode %lu: block %llu read error", |
1376 | "inode %lu: block %llu read error", inode->i_ino, | 1376 | inode->i_ino, EXT4_I(inode)->i_file_acl); |
1377 | EXT4_I(inode)->i_file_acl); | ||
1378 | goto cleanup; | 1377 | goto cleanup; |
1379 | } | 1378 | } |
1380 | if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) || | 1379 | if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) || |
1381 | BHDR(bh)->h_blocks != cpu_to_le32(1)) { | 1380 | BHDR(bh)->h_blocks != cpu_to_le32(1)) { |
1382 | ext4_error(inode->i_sb, __func__, | 1381 | ext4_error(inode->i_sb, "inode %lu: bad block %llu", |
1383 | "inode %lu: bad block %llu", inode->i_ino, | 1382 | inode->i_ino, EXT4_I(inode)->i_file_acl); |
1384 | EXT4_I(inode)->i_file_acl); | ||
1385 | goto cleanup; | 1383 | goto cleanup; |
1386 | } | 1384 | } |
1387 | ext4_xattr_release_block(handle, inode, bh); | 1385 | ext4_xattr_release_block(handle, inode, bh); |
@@ -1506,7 +1504,7 @@ again: | |||
1506 | } | 1504 | } |
1507 | bh = sb_bread(inode->i_sb, ce->e_block); | 1505 | bh = sb_bread(inode->i_sb, ce->e_block); |
1508 | if (!bh) { | 1506 | if (!bh) { |
1509 | ext4_error(inode->i_sb, __func__, | 1507 | ext4_error(inode->i_sb, |
1510 | "inode %lu: block %lu read error", | 1508 | "inode %lu: block %lu read error", |
1511 | inode->i_ino, (unsigned long) ce->e_block); | 1509 | inode->i_ino, (unsigned long) ce->e_block); |
1512 | } else if (le32_to_cpu(BHDR(bh)->h_refcount) >= | 1510 | } else if (le32_to_cpu(BHDR(bh)->h_refcount) >= |