aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-08-04 23:46:54 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-04 23:46:54 -0400
commit8e099d1e8be3f598dcefd04d3cd5eb3673d4e098 (patch)
treeb1a451984750c29a5b8741b287155960f4583b09 /fs
parentb54ecfb7022d93e8d342ed4a2512d858d0682c0c (diff)
parent86f0afd463215fc3e58020493482faa4ac3a4d69 (diff)
Merge tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4
Pull ext4 updates from Ted Ts'o: "Bug fixes and clean ups for the 3.17 merge window" * tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4: ext4: fix ext4_discard_allocated_blocks() if we can't allocate the pa struct ext4: fix COLLAPSE RANGE test for bigalloc file systems ext4: check inline directory before converting ext4: fix incorrect locking in move_extent_per_page ext4: use correct depth value ext4: add i_data_sem sanity check ext4: fix wrong size computation in ext4_mb_normalize_request() ext4: make ext4_has_inline_data() as a inline function ext4: remove readpage() check in ext4_mmap_file() ext4: fix punch hole on files with indirect mapping ext4: remove metadata reservation checks ext4: rearrange initialization to fix EXT4FS_DEBUG
Diffstat (limited to 'fs')
-rw-r--r--fs/ext4/balloc.c1
-rw-r--r--fs/ext4/dir.c25
-rw-r--r--fs/ext4/ext4.h14
-rw-r--r--fs/ext4/extents.c14
-rw-r--r--fs/ext4/file.c4
-rw-r--r--fs/ext4/indirect.c281
-rw-r--r--fs/ext4/inline.c18
-rw-r--r--fs/ext4/inode.c130
-rw-r--r--fs/ext4/mballoc.c41
-rw-r--r--fs/ext4/migrate.c7
-rw-r--r--fs/ext4/move_extent.c3
-rw-r--r--fs/ext4/super.c88
12 files changed, 333 insertions, 293 deletions
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index fca382037ddd..581ef40fbe90 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -639,7 +639,6 @@ ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
639 if (!(*errp) && 639 if (!(*errp) &&
640 ext4_test_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED)) { 640 ext4_test_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED)) {
641 spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 641 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
642 EXT4_I(inode)->i_allocated_meta_blocks += ar.len;
643 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 642 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
644 dquot_alloc_block_nofail(inode, 643 dquot_alloc_block_nofail(inode,
645 EXT4_C2B(EXT4_SB(inode->i_sb), ar.len)); 644 EXT4_C2B(EXT4_SB(inode->i_sb), ar.len));
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index ef1bed66c14f..0bb3f9ea0832 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -571,6 +571,31 @@ static int ext4_release_dir(struct inode *inode, struct file *filp)
571 return 0; 571 return 0;
572} 572}
573 573
574int ext4_check_all_de(struct inode *dir, struct buffer_head *bh, void *buf,
575 int buf_size)
576{
577 struct ext4_dir_entry_2 *de;
578 int nlen, rlen;
579 unsigned int offset = 0;
580 char *top;
581
582 de = (struct ext4_dir_entry_2 *)buf;
583 top = buf + buf_size;
584 while ((char *) de < top) {
585 if (ext4_check_dir_entry(dir, NULL, de, bh,
586 buf, buf_size, offset))
587 return -EIO;
588 nlen = EXT4_DIR_REC_LEN(de->name_len);
589 rlen = ext4_rec_len_from_disk(de->rec_len, buf_size);
590 de = (struct ext4_dir_entry_2 *)((char *)de + rlen);
591 offset += rlen;
592 }
593 if ((char *) de > top)
594 return -EIO;
595
596 return 0;
597}
598
574const struct file_operations ext4_dir_operations = { 599const struct file_operations ext4_dir_operations = {
575 .llseek = ext4_dir_llseek, 600 .llseek = ext4_dir_llseek,
576 .read = generic_read_dir, 601 .read = generic_read_dir,
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 7cc5a0e23688..5b19760b1de5 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -591,7 +591,6 @@ enum {
591#define EXT4_FREE_BLOCKS_NO_QUOT_UPDATE 0x0008 591#define EXT4_FREE_BLOCKS_NO_QUOT_UPDATE 0x0008
592#define EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER 0x0010 592#define EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER 0x0010
593#define EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER 0x0020 593#define EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER 0x0020
594#define EXT4_FREE_BLOCKS_RESERVE 0x0040
595 594
596/* 595/*
597 * ioctl commands 596 * ioctl commands
@@ -2029,6 +2028,8 @@ static inline unsigned char get_dtype(struct super_block *sb, int filetype)
2029 2028
2030 return ext4_filetype_table[filetype]; 2029 return ext4_filetype_table[filetype];
2031} 2030}
2031extern int ext4_check_all_de(struct inode *dir, struct buffer_head *bh,
2032 void *buf, int buf_size);
2032 2033
2033/* fsync.c */ 2034/* fsync.c */
2034extern int ext4_sync_file(struct file *, loff_t, loff_t, int); 2035extern int ext4_sync_file(struct file *, loff_t, loff_t, int);
@@ -2144,8 +2145,8 @@ extern ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
2144extern int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock); 2145extern int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock);
2145extern int ext4_ind_trans_blocks(struct inode *inode, int nrblocks); 2146extern int ext4_ind_trans_blocks(struct inode *inode, int nrblocks);
2146extern void ext4_ind_truncate(handle_t *, struct inode *inode); 2147extern void ext4_ind_truncate(handle_t *, struct inode *inode);
2147extern int ext4_free_hole_blocks(handle_t *handle, struct inode *inode, 2148extern int ext4_ind_remove_space(handle_t *handle, struct inode *inode,
2148 ext4_lblk_t first, ext4_lblk_t stop); 2149 ext4_lblk_t start, ext4_lblk_t end);
2149 2150
2150/* ioctl.c */ 2151/* ioctl.c */
2151extern long ext4_ioctl(struct file *, unsigned int, unsigned long); 2152extern long ext4_ioctl(struct file *, unsigned int, unsigned long);
@@ -2560,7 +2561,6 @@ extern const struct file_operations ext4_file_operations;
2560extern loff_t ext4_llseek(struct file *file, loff_t offset, int origin); 2561extern loff_t ext4_llseek(struct file *file, loff_t offset, int origin);
2561 2562
2562/* inline.c */ 2563/* inline.c */
2563extern int ext4_has_inline_data(struct inode *inode);
2564extern int ext4_get_max_inline_size(struct inode *inode); 2564extern int ext4_get_max_inline_size(struct inode *inode);
2565extern int ext4_find_inline_data_nolock(struct inode *inode); 2565extern int ext4_find_inline_data_nolock(struct inode *inode);
2566extern int ext4_init_inline_data(handle_t *handle, struct inode *inode, 2566extern int ext4_init_inline_data(handle_t *handle, struct inode *inode,
@@ -2626,6 +2626,12 @@ extern void ext4_inline_data_truncate(struct inode *inode, int *has_inline);
2626 2626
2627extern int ext4_convert_inline_data(struct inode *inode); 2627extern int ext4_convert_inline_data(struct inode *inode);
2628 2628
2629static inline int ext4_has_inline_data(struct inode *inode)
2630{
2631 return ext4_test_inode_flag(inode, EXT4_INODE_INLINE_DATA) &&
2632 EXT4_I(inode)->i_inline_off;
2633}
2634
2629/* namei.c */ 2635/* namei.c */
2630extern const struct inode_operations ext4_dir_inode_operations; 2636extern const struct inode_operations ext4_dir_inode_operations;
2631extern const struct inode_operations ext4_special_inode_operations; 2637extern const struct inode_operations ext4_special_inode_operations;
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 4da228a0e6d0..76c2df382b7d 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -161,6 +161,8 @@ int __ext4_ext_dirty(const char *where, unsigned int line, handle_t *handle,
161 struct inode *inode, struct ext4_ext_path *path) 161 struct inode *inode, struct ext4_ext_path *path)
162{ 162{
163 int err; 163 int err;
164
165 WARN_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem));
164 if (path->p_bh) { 166 if (path->p_bh) {
165 ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh)); 167 ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh));
166 /* path points to block */ 168 /* path points to block */
@@ -1808,8 +1810,7 @@ static void ext4_ext_try_to_merge_up(handle_t *handle,
1808 1810
1809 brelse(path[1].p_bh); 1811 brelse(path[1].p_bh);
1810 ext4_free_blocks(handle, inode, NULL, blk, 1, 1812 ext4_free_blocks(handle, inode, NULL, blk, 1,
1811 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET | 1813 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
1812 EXT4_FREE_BLOCKS_RESERVE);
1813} 1814}
1814 1815
1815/* 1816/*
@@ -3253,7 +3254,7 @@ out:
3253 3254
3254fix_extent_len: 3255fix_extent_len:
3255 ex->ee_len = orig_ex.ee_len; 3256 ex->ee_len = orig_ex.ee_len;
3256 ext4_ext_dirty(handle, inode, path + depth); 3257 ext4_ext_dirty(handle, inode, path + path->p_depth);
3257 return err; 3258 return err;
3258} 3259}
3259 3260
@@ -5403,16 +5404,13 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
5403 int ret; 5404 int ret;
5404 5405
5405 /* Collapse range works only on fs block size aligned offsets. */ 5406 /* Collapse range works only on fs block size aligned offsets. */
5406 if (offset & (EXT4_BLOCK_SIZE(sb) - 1) || 5407 if (offset & (EXT4_CLUSTER_SIZE(sb) - 1) ||
5407 len & (EXT4_BLOCK_SIZE(sb) - 1)) 5408 len & (EXT4_CLUSTER_SIZE(sb) - 1))
5408 return -EINVAL; 5409 return -EINVAL;
5409 5410
5410 if (!S_ISREG(inode->i_mode)) 5411 if (!S_ISREG(inode->i_mode))
5411 return -EINVAL; 5412 return -EINVAL;
5412 5413
5413 if (EXT4_SB(inode->i_sb)->s_cluster_ratio > 1)
5414 return -EOPNOTSUPP;
5415
5416 trace_ext4_collapse_range(inode, offset, len); 5414 trace_ext4_collapse_range(inode, offset, len);
5417 5415
5418 punch_start = offset >> EXT4_BLOCK_SIZE_BITS(sb); 5416 punch_start = offset >> EXT4_BLOCK_SIZE_BITS(sb);
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 8695f70af1ef..aca7b24a4432 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -200,10 +200,6 @@ static const struct vm_operations_struct ext4_file_vm_ops = {
200 200
201static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma) 201static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
202{ 202{
203 struct address_space *mapping = file->f_mapping;
204
205 if (!mapping->a_ops->readpage)
206 return -ENOEXEC;
207 file_accessed(file); 203 file_accessed(file);
208 vma->vm_ops = &ext4_file_vm_ops; 204 vma->vm_ops = &ext4_file_vm_ops;
209 return 0; 205 return 0;
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
index fd69da194826..e75f840000a0 100644
--- a/fs/ext4/indirect.c
+++ b/fs/ext4/indirect.c
@@ -1295,97 +1295,220 @@ do_indirects:
1295 } 1295 }
1296} 1296}
1297 1297
1298static int free_hole_blocks(handle_t *handle, struct inode *inode, 1298/**
1299 struct buffer_head *parent_bh, __le32 *i_data, 1299 * ext4_ind_remove_space - remove space from the range
1300 int level, ext4_lblk_t first, 1300 * @handle: JBD handle for this transaction
1301 ext4_lblk_t count, int max) 1301 * @inode: inode we are dealing with
1302 * @start: First block to remove
1303 * @end: One block after the last block to remove (exclusive)
1304 *
1305 * Free the blocks in the defined range (end is exclusive endpoint of
1306 * range). This is used by ext4_punch_hole().
1307 */
1308int ext4_ind_remove_space(handle_t *handle, struct inode *inode,
1309 ext4_lblk_t start, ext4_lblk_t end)
1302{ 1310{
1303 struct buffer_head *bh = NULL; 1311 struct ext4_inode_info *ei = EXT4_I(inode);
1312 __le32 *i_data = ei->i_data;
1304 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); 1313 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
1305 int ret = 0; 1314 ext4_lblk_t offsets[4], offsets2[4];
1306 int i, inc; 1315 Indirect chain[4], chain2[4];
1307 ext4_lblk_t offset; 1316 Indirect *partial, *partial2;
1308 __le32 blk; 1317 ext4_lblk_t max_block;
1309 1318 __le32 nr = 0, nr2 = 0;
1310 inc = 1 << ((EXT4_BLOCK_SIZE_BITS(inode->i_sb) - 2) * level); 1319 int n = 0, n2 = 0;
1311 for (i = 0, offset = 0; i < max; i++, i_data++, offset += inc) { 1320 unsigned blocksize = inode->i_sb->s_blocksize;
1312 if (offset >= count + first)
1313 break;
1314 if (*i_data == 0 || (offset + inc) <= first)
1315 continue;
1316 blk = *i_data;
1317 if (level > 0) {
1318 ext4_lblk_t first2;
1319 ext4_lblk_t count2;
1320 1321
1321 bh = sb_bread(inode->i_sb, le32_to_cpu(blk)); 1322 max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1)
1322 if (!bh) { 1323 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
1323 EXT4_ERROR_INODE_BLOCK(inode, le32_to_cpu(blk), 1324 if (end >= max_block)
1324 "Read failure"); 1325 end = max_block;
1325 return -EIO; 1326 if ((start >= end) || (start > max_block))
1326 } 1327 return 0;
1327 if (first > offset) { 1328
1328 first2 = first - offset; 1329 n = ext4_block_to_path(inode, start, offsets, NULL);
1329 count2 = count; 1330 n2 = ext4_block_to_path(inode, end, offsets2, NULL);
1331
1332 BUG_ON(n > n2);
1333
1334 if ((n == 1) && (n == n2)) {
1335 /* We're punching only within direct block range */
1336 ext4_free_data(handle, inode, NULL, i_data + offsets[0],
1337 i_data + offsets2[0]);
1338 return 0;
1339 } else if (n2 > n) {
1340 /*
1341 * Start and end are on a different levels so we're going to
1342 * free partial block at start, and partial block at end of
1343 * the range. If there are some levels in between then
1344 * do_indirects label will take care of that.
1345 */
1346
1347 if (n == 1) {
1348 /*
1349 * Start is at the direct block level, free
1350 * everything to the end of the level.
1351 */
1352 ext4_free_data(handle, inode, NULL, i_data + offsets[0],
1353 i_data + EXT4_NDIR_BLOCKS);
1354 goto end_range;
1355 }
1356
1357
1358 partial = ext4_find_shared(inode, n, offsets, chain, &nr);
1359 if (nr) {
1360 if (partial == chain) {
1361 /* Shared branch grows from the inode */
1362 ext4_free_branches(handle, inode, NULL,
1363 &nr, &nr+1, (chain+n-1) - partial);
1364 *partial->p = 0;
1330 } else { 1365 } else {
1331 first2 = 0; 1366 /* Shared branch grows from an indirect block */
1332 count2 = count - (offset - first); 1367 BUFFER_TRACE(partial->bh, "get_write_access");
1368 ext4_free_branches(handle, inode, partial->bh,
1369 partial->p,
1370 partial->p+1, (chain+n-1) - partial);
1333 } 1371 }
1334 ret = free_hole_blocks(handle, inode, bh, 1372 }
1335 (__le32 *)bh->b_data, level - 1, 1373
1336 first2, count2, 1374 /*
1337 inode->i_sb->s_blocksize >> 2); 1375 * Clear the ends of indirect blocks on the shared branch
1338 if (ret) { 1376 * at the start of the range
1339 brelse(bh); 1377 */
1340 goto err; 1378 while (partial > chain) {
1379 ext4_free_branches(handle, inode, partial->bh,
1380 partial->p + 1,
1381 (__le32 *)partial->bh->b_data+addr_per_block,
1382 (chain+n-1) - partial);
1383 BUFFER_TRACE(partial->bh, "call brelse");
1384 brelse(partial->bh);
1385 partial--;
1386 }
1387
1388end_range:
1389 partial2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
1390 if (nr2) {
1391 if (partial2 == chain2) {
1392 /*
1393 * Remember, end is exclusive so here we're at
1394 * the start of the next level we're not going
1395 * to free. Everything was covered by the start
1396 * of the range.
1397 */
1398 return 0;
1399 } else {
1400 /* Shared branch grows from an indirect block */
1401 partial2--;
1341 } 1402 }
1403 } else {
1404 /*
1405 * ext4_find_shared returns Indirect structure which
1406 * points to the last element which should not be
1407 * removed by truncate. But this is end of the range
1408 * in punch_hole so we need to point to the next element
1409 */
1410 partial2->p++;
1342 } 1411 }
1343 if (level == 0 || 1412
1344 (bh && all_zeroes((__le32 *)bh->b_data, 1413 /*
1345 (__le32 *)bh->b_data + addr_per_block))) { 1414 * Clear the ends of indirect blocks on the shared branch
1346 ext4_free_data(handle, inode, parent_bh, 1415 * at the end of the range
1347 i_data, i_data + 1); 1416 */
1417 while (partial2 > chain2) {
1418 ext4_free_branches(handle, inode, partial2->bh,
1419 (__le32 *)partial2->bh->b_data,
1420 partial2->p,
1421 (chain2+n2-1) - partial2);
1422 BUFFER_TRACE(partial2->bh, "call brelse");
1423 brelse(partial2->bh);
1424 partial2--;
1348 } 1425 }
1349 brelse(bh); 1426 goto do_indirects;
1350 bh = NULL;
1351 } 1427 }
1352 1428
1353err: 1429 /* Punch happened within the same level (n == n2) */
1354 return ret; 1430 partial = ext4_find_shared(inode, n, offsets, chain, &nr);
1355} 1431 partial2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
1356 1432 /*
1357int ext4_free_hole_blocks(handle_t *handle, struct inode *inode, 1433 * ext4_find_shared returns Indirect structure which
1358 ext4_lblk_t first, ext4_lblk_t stop) 1434 * points to the last element which should not be
1359{ 1435 * removed by truncate. But this is end of the range
1360 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); 1436 * in punch_hole so we need to point to the next element
1361 int level, ret = 0; 1437 */
1362 int num = EXT4_NDIR_BLOCKS; 1438 partial2->p++;
1363 ext4_lblk_t count, max = EXT4_NDIR_BLOCKS; 1439 while ((partial > chain) || (partial2 > chain2)) {
1364 __le32 *i_data = EXT4_I(inode)->i_data; 1440 /* We're at the same block, so we're almost finished */
1365 1441 if ((partial->bh && partial2->bh) &&
1366 count = stop - first; 1442 (partial->bh->b_blocknr == partial2->bh->b_blocknr)) {
1367 for (level = 0; level < 4; level++, max *= addr_per_block) { 1443 if ((partial > chain) && (partial2 > chain2)) {
1368 if (first < max) { 1444 ext4_free_branches(handle, inode, partial->bh,
1369 ret = free_hole_blocks(handle, inode, NULL, i_data, 1445 partial->p + 1,
1370 level, first, count, num); 1446 partial2->p,
1371 if (ret) 1447 (chain+n-1) - partial);
1372 goto err; 1448 BUFFER_TRACE(partial->bh, "call brelse");
1373 if (count > max - first) 1449 brelse(partial->bh);
1374 count -= max - first; 1450 BUFFER_TRACE(partial2->bh, "call brelse");
1375 else 1451 brelse(partial2->bh);
1376 break; 1452 }
1377 first = 0; 1453 return 0;
1378 } else {
1379 first -= max;
1380 } 1454 }
1381 i_data += num; 1455 /*
1382 if (level == 0) { 1456 * Clear the ends of indirect blocks on the shared branch
1383 num = 1; 1457 * at the start of the range
1384 max = 1; 1458 */
1459 if (partial > chain) {
1460 ext4_free_branches(handle, inode, partial->bh,
1461 partial->p + 1,
1462 (__le32 *)partial->bh->b_data+addr_per_block,
1463 (chain+n-1) - partial);
1464 BUFFER_TRACE(partial->bh, "call brelse");
1465 brelse(partial->bh);
1466 partial--;
1467 }
1468 /*
1469 * Clear the ends of indirect blocks on the shared branch
1470 * at the end of the range
1471 */
1472 if (partial2 > chain2) {
1473 ext4_free_branches(handle, inode, partial2->bh,
1474 (__le32 *)partial2->bh->b_data,
1475 partial2->p,
1476 (chain2+n-1) - partial2);
1477 BUFFER_TRACE(partial2->bh, "call brelse");
1478 brelse(partial2->bh);
1479 partial2--;
1385 } 1480 }
1386 } 1481 }
1387 1482
1388err: 1483do_indirects:
1389 return ret; 1484 /* Kill the remaining (whole) subtrees */
1485 switch (offsets[0]) {
1486 default:
1487 if (++n >= n2)
1488 return 0;
1489 nr = i_data[EXT4_IND_BLOCK];
1490 if (nr) {
1491 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
1492 i_data[EXT4_IND_BLOCK] = 0;
1493 }
1494 case EXT4_IND_BLOCK:
1495 if (++n >= n2)
1496 return 0;
1497 nr = i_data[EXT4_DIND_BLOCK];
1498 if (nr) {
1499 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
1500 i_data[EXT4_DIND_BLOCK] = 0;
1501 }
1502 case EXT4_DIND_BLOCK:
1503 if (++n >= n2)
1504 return 0;
1505 nr = i_data[EXT4_TIND_BLOCK];
1506 if (nr) {
1507 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
1508 i_data[EXT4_TIND_BLOCK] = 0;
1509 }
1510 case EXT4_TIND_BLOCK:
1511 ;
1512 }
1513 return 0;
1390} 1514}
1391
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 645205d8ada6..bea662bd0ca6 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -120,12 +120,6 @@ int ext4_get_max_inline_size(struct inode *inode)
120 return max_inline_size + EXT4_MIN_INLINE_DATA_SIZE; 120 return max_inline_size + EXT4_MIN_INLINE_DATA_SIZE;
121} 121}
122 122
123int ext4_has_inline_data(struct inode *inode)
124{
125 return ext4_test_inode_flag(inode, EXT4_INODE_INLINE_DATA) &&
126 EXT4_I(inode)->i_inline_off;
127}
128
129/* 123/*
130 * this function does not take xattr_sem, which is OK because it is 124 * this function does not take xattr_sem, which is OK because it is
131 * currently only used in a code path coming form ext4_iget, before 125 * currently only used in a code path coming form ext4_iget, before
@@ -1178,6 +1172,18 @@ static int ext4_convert_inline_data_nolock(handle_t *handle,
1178 if (error < 0) 1172 if (error < 0)
1179 goto out; 1173 goto out;
1180 1174
1175 /*
1176 * Make sure the inline directory entries pass checks before we try to
1177 * convert them, so that we avoid touching stuff that needs fsck.
1178 */
1179 if (S_ISDIR(inode->i_mode)) {
1180 error = ext4_check_all_de(inode, iloc->bh,
1181 buf + EXT4_INLINE_DOTDOT_SIZE,
1182 inline_size - EXT4_INLINE_DOTDOT_SIZE);
1183 if (error)
1184 goto out;
1185 }
1186
1181 error = ext4_destroy_inline_data_nolock(handle, inode); 1187 error = ext4_destroy_inline_data_nolock(handle, inode);
1182 if (error) 1188 if (error)
1183 goto out; 1189 goto out;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 8a064734e6eb..367a60c07cf0 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -325,18 +325,6 @@ qsize_t *ext4_get_reserved_space(struct inode *inode)
325#endif 325#endif
326 326
327/* 327/*
328 * Calculate the number of metadata blocks need to reserve
329 * to allocate a block located at @lblock
330 */
331static int ext4_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
332{
333 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
334 return ext4_ext_calc_metadata_amount(inode, lblock);
335
336 return ext4_ind_calc_metadata_amount(inode, lblock);
337}
338
339/*
340 * Called with i_data_sem down, which is important since we can call 328 * Called with i_data_sem down, which is important since we can call
341 * ext4_discard_preallocations() from here. 329 * ext4_discard_preallocations() from here.
342 */ 330 */
@@ -357,35 +345,10 @@ void ext4_da_update_reserve_space(struct inode *inode,
357 used = ei->i_reserved_data_blocks; 345 used = ei->i_reserved_data_blocks;
358 } 346 }
359 347
360 if (unlikely(ei->i_allocated_meta_blocks > ei->i_reserved_meta_blocks)) {
361 ext4_warning(inode->i_sb, "ino %lu, allocated %d "
362 "with only %d reserved metadata blocks "
363 "(releasing %d blocks with reserved %d data blocks)",
364 inode->i_ino, ei->i_allocated_meta_blocks,
365 ei->i_reserved_meta_blocks, used,
366 ei->i_reserved_data_blocks);
367 WARN_ON(1);
368 ei->i_allocated_meta_blocks = ei->i_reserved_meta_blocks;
369 }
370
371 /* Update per-inode reservations */ 348 /* Update per-inode reservations */
372 ei->i_reserved_data_blocks -= used; 349 ei->i_reserved_data_blocks -= used;
373 ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks; 350 percpu_counter_sub(&sbi->s_dirtyclusters_counter, used);
374 percpu_counter_sub(&sbi->s_dirtyclusters_counter,
375 used + ei->i_allocated_meta_blocks);
376 ei->i_allocated_meta_blocks = 0;
377 351
378 if (ei->i_reserved_data_blocks == 0) {
379 /*
380 * We can release all of the reserved metadata blocks
381 * only when we have written all of the delayed
382 * allocation blocks.
383 */
384 percpu_counter_sub(&sbi->s_dirtyclusters_counter,
385 ei->i_reserved_meta_blocks);
386 ei->i_reserved_meta_blocks = 0;
387 ei->i_da_metadata_calc_len = 0;
388 }
389 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 352 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
390 353
391 /* Update quota subsystem for data blocks */ 354 /* Update quota subsystem for data blocks */
@@ -1222,49 +1185,6 @@ static int ext4_journalled_write_end(struct file *file,
1222} 1185}
1223 1186
1224/* 1187/*
1225 * Reserve a metadata for a single block located at lblock
1226 */
1227static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock)
1228{
1229 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1230 struct ext4_inode_info *ei = EXT4_I(inode);
1231 unsigned int md_needed;
1232 ext4_lblk_t save_last_lblock;
1233 int save_len;
1234
1235 /*
1236 * recalculate the amount of metadata blocks to reserve
1237 * in order to allocate nrblocks
1238 * worse case is one extent per block
1239 */
1240 spin_lock(&ei->i_block_reservation_lock);
1241 /*
1242 * ext4_calc_metadata_amount() has side effects, which we have
1243 * to be prepared undo if we fail to claim space.
1244 */
1245 save_len = ei->i_da_metadata_calc_len;
1246 save_last_lblock = ei->i_da_metadata_calc_last_lblock;
1247 md_needed = EXT4_NUM_B2C(sbi,
1248 ext4_calc_metadata_amount(inode, lblock));
1249 trace_ext4_da_reserve_space(inode, md_needed);
1250
1251 /*
1252 * We do still charge estimated metadata to the sb though;
1253 * we cannot afford to run out of free blocks.
1254 */
1255 if (ext4_claim_free_clusters(sbi, md_needed, 0)) {
1256 ei->i_da_metadata_calc_len = save_len;
1257 ei->i_da_metadata_calc_last_lblock = save_last_lblock;
1258 spin_unlock(&ei->i_block_reservation_lock);
1259 return -ENOSPC;
1260 }
1261 ei->i_reserved_meta_blocks += md_needed;
1262 spin_unlock(&ei->i_block_reservation_lock);
1263
1264 return 0; /* success */
1265}
1266
1267/*
1268 * Reserve a single cluster located at lblock 1188 * Reserve a single cluster located at lblock
1269 */ 1189 */
1270static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock) 1190static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
@@ -1273,8 +1193,6 @@ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
1273 struct ext4_inode_info *ei = EXT4_I(inode); 1193 struct ext4_inode_info *ei = EXT4_I(inode);
1274 unsigned int md_needed; 1194 unsigned int md_needed;
1275 int ret; 1195 int ret;
1276 ext4_lblk_t save_last_lblock;
1277 int save_len;
1278 1196
1279 /* 1197 /*
1280 * We will charge metadata quota at writeout time; this saves 1198 * We will charge metadata quota at writeout time; this saves
@@ -1295,25 +1213,15 @@ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
1295 * ext4_calc_metadata_amount() has side effects, which we have 1213 * ext4_calc_metadata_amount() has side effects, which we have
1296 * to be prepared undo if we fail to claim space. 1214 * to be prepared undo if we fail to claim space.
1297 */ 1215 */
1298 save_len = ei->i_da_metadata_calc_len; 1216 md_needed = 0;
1299 save_last_lblock = ei->i_da_metadata_calc_last_lblock; 1217 trace_ext4_da_reserve_space(inode, 0);
1300 md_needed = EXT4_NUM_B2C(sbi,
1301 ext4_calc_metadata_amount(inode, lblock));
1302 trace_ext4_da_reserve_space(inode, md_needed);
1303 1218
1304 /* 1219 if (ext4_claim_free_clusters(sbi, 1, 0)) {
1305 * We do still charge estimated metadata to the sb though;
1306 * we cannot afford to run out of free blocks.
1307 */
1308 if (ext4_claim_free_clusters(sbi, md_needed + 1, 0)) {
1309 ei->i_da_metadata_calc_len = save_len;
1310 ei->i_da_metadata_calc_last_lblock = save_last_lblock;
1311 spin_unlock(&ei->i_block_reservation_lock); 1220 spin_unlock(&ei->i_block_reservation_lock);
1312 dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1)); 1221 dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
1313 return -ENOSPC; 1222 return -ENOSPC;
1314 } 1223 }
1315 ei->i_reserved_data_blocks++; 1224 ei->i_reserved_data_blocks++;
1316 ei->i_reserved_meta_blocks += md_needed;
1317 spin_unlock(&ei->i_block_reservation_lock); 1225 spin_unlock(&ei->i_block_reservation_lock);
1318 1226
1319 return 0; /* success */ 1227 return 0; /* success */
@@ -1346,20 +1254,6 @@ static void ext4_da_release_space(struct inode *inode, int to_free)
1346 } 1254 }
1347 ei->i_reserved_data_blocks -= to_free; 1255 ei->i_reserved_data_blocks -= to_free;
1348 1256
1349 if (ei->i_reserved_data_blocks == 0) {
1350 /*
1351 * We can release all of the reserved metadata blocks
1352 * only when we have written all of the delayed
1353 * allocation blocks.
1354 * Note that in case of bigalloc, i_reserved_meta_blocks,
1355 * i_reserved_data_blocks, etc. refer to number of clusters.
1356 */
1357 percpu_counter_sub(&sbi->s_dirtyclusters_counter,
1358 ei->i_reserved_meta_blocks);
1359 ei->i_reserved_meta_blocks = 0;
1360 ei->i_da_metadata_calc_len = 0;
1361 }
1362
1363 /* update fs dirty data blocks counter */ 1257 /* update fs dirty data blocks counter */
1364 percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free); 1258 percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free);
1365 1259
@@ -1500,10 +1394,6 @@ static void ext4_print_free_blocks(struct inode *inode)
1500 ext4_msg(sb, KERN_CRIT, "Block reservation details"); 1394 ext4_msg(sb, KERN_CRIT, "Block reservation details");
1501 ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u", 1395 ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u",
1502 ei->i_reserved_data_blocks); 1396 ei->i_reserved_data_blocks);
1503 ext4_msg(sb, KERN_CRIT, "i_reserved_meta_blocks=%u",
1504 ei->i_reserved_meta_blocks);
1505 ext4_msg(sb, KERN_CRIT, "i_allocated_meta_blocks=%u",
1506 ei->i_allocated_meta_blocks);
1507 return; 1397 return;
1508} 1398}
1509 1399
@@ -1620,13 +1510,6 @@ add_delayed:
1620 retval = ret; 1510 retval = ret;
1621 goto out_unlock; 1511 goto out_unlock;
1622 } 1512 }
1623 } else {
1624 ret = ext4_da_reserve_metadata(inode, iblock);
1625 if (ret) {
1626 /* not enough space to reserve */
1627 retval = ret;
1628 goto out_unlock;
1629 }
1630 } 1513 }
1631 1514
1632 ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len, 1515 ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
@@ -2843,8 +2726,7 @@ int ext4_alloc_da_blocks(struct inode *inode)
2843{ 2726{
2844 trace_ext4_alloc_da_blocks(inode); 2727 trace_ext4_alloc_da_blocks(inode);
2845 2728
2846 if (!EXT4_I(inode)->i_reserved_data_blocks && 2729 if (!EXT4_I(inode)->i_reserved_data_blocks)
2847 !EXT4_I(inode)->i_reserved_meta_blocks)
2848 return 0; 2730 return 0;
2849 2731
2850 /* 2732 /*
@@ -3624,7 +3506,7 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
3624 ret = ext4_ext_remove_space(inode, first_block, 3506 ret = ext4_ext_remove_space(inode, first_block,
3625 stop_block - 1); 3507 stop_block - 1);
3626 else 3508 else
3627 ret = ext4_free_hole_blocks(handle, inode, first_block, 3509 ret = ext4_ind_remove_space(handle, inode, first_block,
3628 stop_block); 3510 stop_block);
3629 3511
3630 up_write(&EXT4_I(inode)->i_data_sem); 3512 up_write(&EXT4_I(inode)->i_data_sem);
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 2dcb936be90e..956027711faf 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -3075,8 +3075,9 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
3075 (23 - bsbits)) << 23; 3075 (23 - bsbits)) << 23;
3076 size = 8 * 1024 * 1024; 3076 size = 8 * 1024 * 1024;
3077 } else { 3077 } else {
3078 start_off = (loff_t)ac->ac_o_ex.fe_logical << bsbits; 3078 start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits;
3079 size = ac->ac_o_ex.fe_len << bsbits; 3079 size = (loff_t) EXT4_C2B(EXT4_SB(ac->ac_sb),
3080 ac->ac_o_ex.fe_len) << bsbits;
3080 } 3081 }
3081 size = size >> bsbits; 3082 size = size >> bsbits;
3082 start = start_off >> bsbits; 3083 start = start_off >> bsbits;
@@ -3216,8 +3217,27 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
3216static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac) 3217static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
3217{ 3218{
3218 struct ext4_prealloc_space *pa = ac->ac_pa; 3219 struct ext4_prealloc_space *pa = ac->ac_pa;
3220 struct ext4_buddy e4b;
3221 int err;
3219 3222
3220 if (pa && pa->pa_type == MB_INODE_PA) 3223 if (pa == NULL) {
3224 err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b);
3225 if (err) {
3226 /*
3227 * This should never happen since we pin the
3228 * pages in the ext4_allocation_context so
3229 * ext4_mb_load_buddy() should never fail.
3230 */
3231 WARN(1, "mb_load_buddy failed (%d)", err);
3232 return;
3233 }
3234 ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
3235 mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start,
3236 ac->ac_f_ex.fe_len);
3237 ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
3238 return;
3239 }
3240 if (pa->pa_type == MB_INODE_PA)
3221 pa->pa_free += ac->ac_b_ex.fe_len; 3241 pa->pa_free += ac->ac_b_ex.fe_len;
3222} 3242}
3223 3243
@@ -4627,7 +4647,6 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
4627 struct buffer_head *gd_bh; 4647 struct buffer_head *gd_bh;
4628 ext4_group_t block_group; 4648 ext4_group_t block_group;
4629 struct ext4_sb_info *sbi; 4649 struct ext4_sb_info *sbi;
4630 struct ext4_inode_info *ei = EXT4_I(inode);
4631 struct ext4_buddy e4b; 4650 struct ext4_buddy e4b;
4632 unsigned int count_clusters; 4651 unsigned int count_clusters;
4633 int err = 0; 4652 int err = 0;
@@ -4838,19 +4857,7 @@ do_more:
4838 &sbi->s_flex_groups[flex_group].free_clusters); 4857 &sbi->s_flex_groups[flex_group].free_clusters);
4839 } 4858 }
4840 4859
4841 if (flags & EXT4_FREE_BLOCKS_RESERVE && ei->i_reserved_data_blocks) { 4860 if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
4842 percpu_counter_add(&sbi->s_dirtyclusters_counter,
4843 count_clusters);
4844 spin_lock(&ei->i_block_reservation_lock);
4845 if (flags & EXT4_FREE_BLOCKS_METADATA)
4846 ei->i_reserved_meta_blocks += count_clusters;
4847 else
4848 ei->i_reserved_data_blocks += count_clusters;
4849 spin_unlock(&ei->i_block_reservation_lock);
4850 if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
4851 dquot_reclaim_block(inode,
4852 EXT4_C2B(sbi, count_clusters));
4853 } else if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
4854 dquot_free_block(inode, EXT4_C2B(sbi, count_clusters)); 4861 dquot_free_block(inode, EXT4_C2B(sbi, count_clusters));
4855 percpu_counter_add(&sbi->s_freeclusters_counter, count_clusters); 4862 percpu_counter_add(&sbi->s_freeclusters_counter, count_clusters);
4856 4863
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
index ec092437d3e0..d3567f27bae7 100644
--- a/fs/ext4/migrate.c
+++ b/fs/ext4/migrate.c
@@ -39,6 +39,8 @@ static int finish_range(handle_t *handle, struct inode *inode,
39 newext.ee_block = cpu_to_le32(lb->first_block); 39 newext.ee_block = cpu_to_le32(lb->first_block);
40 newext.ee_len = cpu_to_le16(lb->last_block - lb->first_block + 1); 40 newext.ee_len = cpu_to_le16(lb->last_block - lb->first_block + 1);
41 ext4_ext_store_pblock(&newext, lb->first_pblock); 41 ext4_ext_store_pblock(&newext, lb->first_pblock);
42 /* Locking only for convinience since we are operating on temp inode */
43 down_write(&EXT4_I(inode)->i_data_sem);
42 path = ext4_ext_find_extent(inode, lb->first_block, NULL, 0); 44 path = ext4_ext_find_extent(inode, lb->first_block, NULL, 0);
43 45
44 if (IS_ERR(path)) { 46 if (IS_ERR(path)) {
@@ -61,7 +63,9 @@ static int finish_range(handle_t *handle, struct inode *inode,
61 */ 63 */
62 if (needed && ext4_handle_has_enough_credits(handle, 64 if (needed && ext4_handle_has_enough_credits(handle,
63 EXT4_RESERVE_TRANS_BLOCKS)) { 65 EXT4_RESERVE_TRANS_BLOCKS)) {
66 up_write((&EXT4_I(inode)->i_data_sem));
64 retval = ext4_journal_restart(handle, needed); 67 retval = ext4_journal_restart(handle, needed);
68 down_write((&EXT4_I(inode)->i_data_sem));
65 if (retval) 69 if (retval)
66 goto err_out; 70 goto err_out;
67 } else if (needed) { 71 } else if (needed) {
@@ -70,13 +74,16 @@ static int finish_range(handle_t *handle, struct inode *inode,
70 /* 74 /*
71 * IF not able to extend the journal restart the journal 75 * IF not able to extend the journal restart the journal
72 */ 76 */
77 up_write((&EXT4_I(inode)->i_data_sem));
73 retval = ext4_journal_restart(handle, needed); 78 retval = ext4_journal_restart(handle, needed);
79 down_write((&EXT4_I(inode)->i_data_sem));
74 if (retval) 80 if (retval)
75 goto err_out; 81 goto err_out;
76 } 82 }
77 } 83 }
78 retval = ext4_ext_insert_extent(handle, inode, path, &newext, 0); 84 retval = ext4_ext_insert_extent(handle, inode, path, &newext, 0);
79err_out: 85err_out:
86 up_write((&EXT4_I(inode)->i_data_sem));
80 if (path) { 87 if (path) {
81 ext4_ext_drop_refs(path); 88 ext4_ext_drop_refs(path);
82 kfree(path); 89 kfree(path);
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index 2484c7ec6a72..671a74b14fd7 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -1013,10 +1013,11 @@ data_copy:
1013 *err = -EBUSY; 1013 *err = -EBUSY;
1014 goto unlock_pages; 1014 goto unlock_pages;
1015 } 1015 }
1016 1016 ext4_double_down_write_data_sem(orig_inode, donor_inode);
1017 replaced_count = mext_replace_branches(handle, orig_inode, donor_inode, 1017 replaced_count = mext_replace_branches(handle, orig_inode, donor_inode,
1018 orig_blk_offset, 1018 orig_blk_offset,
1019 block_len_in_page, err); 1019 block_len_in_page, err);
1020 ext4_double_up_write_data_sem(orig_inode, donor_inode);
1020 if (*err) { 1021 if (*err) {
1021 if (replaced_count) { 1022 if (replaced_count) {
1022 block_len_in_page = replaced_count; 1023 block_len_in_page = replaced_count;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 6df7bc611dbd..32b43ad154b9 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -2142,10 +2142,6 @@ static int ext4_check_descriptors(struct super_block *sb,
2142 } 2142 }
2143 if (NULL != first_not_zeroed) 2143 if (NULL != first_not_zeroed)
2144 *first_not_zeroed = grp; 2144 *first_not_zeroed = grp;
2145
2146 ext4_free_blocks_count_set(sbi->s_es,
2147 EXT4_C2B(sbi, ext4_count_free_clusters(sb)));
2148 sbi->s_es->s_free_inodes_count =cpu_to_le32(ext4_count_free_inodes(sb));
2149 return 1; 2145 return 1;
2150} 2146}
2151 2147
@@ -3883,13 +3879,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3883 ext4_msg(sb, KERN_ERR, "group descriptors corrupted!"); 3879 ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
3884 goto failed_mount2; 3880 goto failed_mount2;
3885 } 3881 }
3886 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG))
3887 if (!ext4_fill_flex_info(sb)) {
3888 ext4_msg(sb, KERN_ERR,
3889 "unable to initialize "
3890 "flex_bg meta info!");
3891 goto failed_mount2;
3892 }
3893 3882
3894 sbi->s_gdb_count = db_count; 3883 sbi->s_gdb_count = db_count;
3895 get_random_bytes(&sbi->s_next_generation, sizeof(u32)); 3884 get_random_bytes(&sbi->s_next_generation, sizeof(u32));
@@ -3902,23 +3891,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3902 /* Register extent status tree shrinker */ 3891 /* Register extent status tree shrinker */
3903 ext4_es_register_shrinker(sbi); 3892 ext4_es_register_shrinker(sbi);
3904 3893
3905 err = percpu_counter_init(&sbi->s_freeclusters_counter, 3894 if ((err = percpu_counter_init(&sbi->s_extent_cache_cnt, 0)) != 0) {
3906 ext4_count_free_clusters(sb));
3907 if (!err) {
3908 err = percpu_counter_init(&sbi->s_freeinodes_counter,
3909 ext4_count_free_inodes(sb));
3910 }
3911 if (!err) {
3912 err = percpu_counter_init(&sbi->s_dirs_counter,
3913 ext4_count_dirs(sb));
3914 }
3915 if (!err) {
3916 err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0);
3917 }
3918 if (!err) {
3919 err = percpu_counter_init(&sbi->s_extent_cache_cnt, 0);
3920 }
3921 if (err) {
3922 ext4_msg(sb, KERN_ERR, "insufficient memory"); 3895 ext4_msg(sb, KERN_ERR, "insufficient memory");
3923 goto failed_mount3; 3896 goto failed_mount3;
3924 } 3897 }
@@ -4022,18 +3995,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
4022 3995
4023 sbi->s_journal->j_commit_callback = ext4_journal_commit_callback; 3996 sbi->s_journal->j_commit_callback = ext4_journal_commit_callback;
4024 3997
4025 /*
4026 * The journal may have updated the bg summary counts, so we
4027 * need to update the global counters.
4028 */
4029 percpu_counter_set(&sbi->s_freeclusters_counter,
4030 ext4_count_free_clusters(sb));
4031 percpu_counter_set(&sbi->s_freeinodes_counter,
4032 ext4_count_free_inodes(sb));
4033 percpu_counter_set(&sbi->s_dirs_counter,
4034 ext4_count_dirs(sb));
4035 percpu_counter_set(&sbi->s_dirtyclusters_counter, 0);
4036
4037no_journal: 3998no_journal:
4038 if (ext4_mballoc_ready) { 3999 if (ext4_mballoc_ready) {
4039 sbi->s_mb_cache = ext4_xattr_create_cache(sb->s_id); 4000 sbi->s_mb_cache = ext4_xattr_create_cache(sb->s_id);
@@ -4141,6 +4102,33 @@ no_journal:
4141 goto failed_mount5; 4102 goto failed_mount5;
4142 } 4103 }
4143 4104
4105 block = ext4_count_free_clusters(sb);
4106 ext4_free_blocks_count_set(sbi->s_es,
4107 EXT4_C2B(sbi, block));
4108 err = percpu_counter_init(&sbi->s_freeclusters_counter, block);
4109 if (!err) {
4110 unsigned long freei = ext4_count_free_inodes(sb);
4111 sbi->s_es->s_free_inodes_count = cpu_to_le32(freei);
4112 err = percpu_counter_init(&sbi->s_freeinodes_counter, freei);
4113 }
4114 if (!err)
4115 err = percpu_counter_init(&sbi->s_dirs_counter,
4116 ext4_count_dirs(sb));
4117 if (!err)
4118 err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0);
4119 if (err) {
4120 ext4_msg(sb, KERN_ERR, "insufficient memory");
4121 goto failed_mount6;
4122 }
4123
4124 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG))
4125 if (!ext4_fill_flex_info(sb)) {
4126 ext4_msg(sb, KERN_ERR,
4127 "unable to initialize "
4128 "flex_bg meta info!");
4129 goto failed_mount6;
4130 }
4131
4144 err = ext4_register_li_request(sb, first_not_zeroed); 4132 err = ext4_register_li_request(sb, first_not_zeroed);
4145 if (err) 4133 if (err)
4146 goto failed_mount6; 4134 goto failed_mount6;
@@ -4215,6 +4203,12 @@ failed_mount7:
4215 ext4_unregister_li_request(sb); 4203 ext4_unregister_li_request(sb);
4216failed_mount6: 4204failed_mount6:
4217 ext4_mb_release(sb); 4205 ext4_mb_release(sb);
4206 if (sbi->s_flex_groups)
4207 ext4_kvfree(sbi->s_flex_groups);
4208 percpu_counter_destroy(&sbi->s_freeclusters_counter);
4209 percpu_counter_destroy(&sbi->s_freeinodes_counter);
4210 percpu_counter_destroy(&sbi->s_dirs_counter);
4211 percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
4218failed_mount5: 4212failed_mount5:
4219 ext4_ext_release(sb); 4213 ext4_ext_release(sb);
4220 ext4_release_system_zone(sb); 4214 ext4_release_system_zone(sb);
@@ -4233,12 +4227,6 @@ failed_mount_wq:
4233failed_mount3: 4227failed_mount3:
4234 ext4_es_unregister_shrinker(sbi); 4228 ext4_es_unregister_shrinker(sbi);
4235 del_timer_sync(&sbi->s_err_report); 4229 del_timer_sync(&sbi->s_err_report);
4236 if (sbi->s_flex_groups)
4237 ext4_kvfree(sbi->s_flex_groups);
4238 percpu_counter_destroy(&sbi->s_freeclusters_counter);
4239 percpu_counter_destroy(&sbi->s_freeinodes_counter);
4240 percpu_counter_destroy(&sbi->s_dirs_counter);
4241 percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
4242 percpu_counter_destroy(&sbi->s_extent_cache_cnt); 4230 percpu_counter_destroy(&sbi->s_extent_cache_cnt);
4243 if (sbi->s_mmp_tsk) 4231 if (sbi->s_mmp_tsk)
4244 kthread_stop(sbi->s_mmp_tsk); 4232 kthread_stop(sbi->s_mmp_tsk);
@@ -4556,11 +4544,13 @@ static int ext4_commit_super(struct super_block *sb, int sync)
4556 else 4544 else
4557 es->s_kbytes_written = 4545 es->s_kbytes_written =
4558 cpu_to_le64(EXT4_SB(sb)->s_kbytes_written); 4546 cpu_to_le64(EXT4_SB(sb)->s_kbytes_written);
4559 ext4_free_blocks_count_set(es, 4547 if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeclusters_counter))
4548 ext4_free_blocks_count_set(es,
4560 EXT4_C2B(EXT4_SB(sb), percpu_counter_sum_positive( 4549 EXT4_C2B(EXT4_SB(sb), percpu_counter_sum_positive(
4561 &EXT4_SB(sb)->s_freeclusters_counter))); 4550 &EXT4_SB(sb)->s_freeclusters_counter)));
4562 es->s_free_inodes_count = 4551 if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeinodes_counter))
4563 cpu_to_le32(percpu_counter_sum_positive( 4552 es->s_free_inodes_count =
4553 cpu_to_le32(percpu_counter_sum_positive(
4564 &EXT4_SB(sb)->s_freeinodes_counter)); 4554 &EXT4_SB(sb)->s_freeinodes_counter));
4565 BUFFER_TRACE(sbh, "marking dirty"); 4555 BUFFER_TRACE(sbh, "marking dirty");
4566 ext4_superblock_csum_set(sb); 4556 ext4_superblock_csum_set(sb);