aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorLukas Czerner <lczerner@redhat.com>2013-04-09 22:11:22 -0400
committerTheodore Ts'o <tytso@mit.edu>2013-04-09 22:11:22 -0400
commit27dd43854227bb0e6ab70129bd21b60d396db2e7 (patch)
tree14e490b9d0ac63583849c4cad4b3ad0123902c3a /fs
parentf45a5ef91bef7e02149a216ed6dc3fcdd8b38268 (diff)
ext4: introduce reserved space
Currently in ENOSPC condition when writing into unwritten space, or punching a hole, we might need to split the extent and grow extent tree. However since we can not allocate any new metadata blocks we'll have to zero out unwritten part of extent or punched out part of extent, or in the worst case return ENOSPC even though use actually does not allocate any space. Also in delalloc path we do reserve metadata and data blocks for the time we're going to write out, however metadata block reservation is very tricky especially since we expect that logical connectivity implies physical connectivity, however that might not be the case and hence we might end up allocating more metadata blocks than previously reserved. So in future, metadata reservation checks should be removed since we can not assure that we do not under reserve. And this is where reserved space comes into the picture. When mounting the file system we slice off a little bit of the file system space (2% or 4096 clusters, whichever is smaller) which can be then used for the cases mentioned above to prevent costly zeroout, or unexpected ENOSPC. The number of reserved clusters can be set via sysfs, however it can never be bigger than number of free clusters in the file system. Note that this patch fixes the failure of xfstest 274 as expected. Signed-off-by: Lukas Czerner <lczerner@redhat.com> Signed-off-by: "Theodore Ts'o" <tytso@mit.edu> Reviewed-by: Carlos Maiolino <cmaiolino@redhat.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/ext4/balloc.c18
-rw-r--r--fs/ext4/ext4.h13
-rw-r--r--fs/ext4/extents.c27
-rw-r--r--fs/ext4/inode.c11
-rw-r--r--fs/ext4/super.c84
5 files changed, 130 insertions, 23 deletions
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 9e8d8ffb063f..8dcaea69e37f 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -499,20 +499,22 @@ ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
499static int ext4_has_free_clusters(struct ext4_sb_info *sbi, 499static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
500 s64 nclusters, unsigned int flags) 500 s64 nclusters, unsigned int flags)
501{ 501{
502 s64 free_clusters, dirty_clusters, root_clusters; 502 s64 free_clusters, dirty_clusters, rsv, resv_clusters;
503 struct percpu_counter *fcc = &sbi->s_freeclusters_counter; 503 struct percpu_counter *fcc = &sbi->s_freeclusters_counter;
504 struct percpu_counter *dcc = &sbi->s_dirtyclusters_counter; 504 struct percpu_counter *dcc = &sbi->s_dirtyclusters_counter;
505 505
506 free_clusters = percpu_counter_read_positive(fcc); 506 free_clusters = percpu_counter_read_positive(fcc);
507 dirty_clusters = percpu_counter_read_positive(dcc); 507 dirty_clusters = percpu_counter_read_positive(dcc);
508 resv_clusters = atomic64_read(&sbi->s_resv_clusters);
508 509
509 /* 510 /*
510 * r_blocks_count should always be multiple of the cluster ratio so 511 * r_blocks_count should always be multiple of the cluster ratio so
511 * we are safe to do a plane bit shift only. 512 * we are safe to do a plane bit shift only.
512 */ 513 */
513 root_clusters = ext4_r_blocks_count(sbi->s_es) >> sbi->s_cluster_bits; 514 rsv = (ext4_r_blocks_count(sbi->s_es) >> sbi->s_cluster_bits) +
515 resv_clusters;
514 516
515 if (free_clusters - (nclusters + root_clusters + dirty_clusters) < 517 if (free_clusters - (nclusters + rsv + dirty_clusters) <
516 EXT4_FREECLUSTERS_WATERMARK) { 518 EXT4_FREECLUSTERS_WATERMARK) {
517 free_clusters = percpu_counter_sum_positive(fcc); 519 free_clusters = percpu_counter_sum_positive(fcc);
518 dirty_clusters = percpu_counter_sum_positive(dcc); 520 dirty_clusters = percpu_counter_sum_positive(dcc);
@@ -520,15 +522,21 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
520 /* Check whether we have space after accounting for current 522 /* Check whether we have space after accounting for current
521 * dirty clusters & root reserved clusters. 523 * dirty clusters & root reserved clusters.
522 */ 524 */
523 if (free_clusters >= ((root_clusters + nclusters) + dirty_clusters)) 525 if (free_clusters >= (rsv + nclusters + dirty_clusters))
524 return 1; 526 return 1;
525 527
526 /* Hm, nope. Are (enough) root reserved clusters available? */ 528 /* Hm, nope. Are (enough) root reserved clusters available? */
527 if (uid_eq(sbi->s_resuid, current_fsuid()) || 529 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
528 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) || 530 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
529 capable(CAP_SYS_RESOURCE) || 531 capable(CAP_SYS_RESOURCE) ||
530 (flags & EXT4_MB_USE_ROOT_BLOCKS)) { 532 (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
531 533
534 if (free_clusters >= (nclusters + dirty_clusters +
535 resv_clusters))
536 return 1;
537 }
538 /* No free blocks. Let's see if we can dip into reserved pool */
539 if (flags & EXT4_MB_USE_RESERVED) {
532 if (free_clusters >= (nclusters + dirty_clusters)) 540 if (free_clusters >= (nclusters + dirty_clusters))
533 return 1; 541 return 1;
534 } 542 }
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index d91871570982..12b560435aba 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -121,6 +121,8 @@ typedef unsigned int ext4_group_t;
121#define EXT4_MB_STREAM_ALLOC 0x0800 121#define EXT4_MB_STREAM_ALLOC 0x0800
122/* Use reserved root blocks if needed */ 122/* Use reserved root blocks if needed */
123#define EXT4_MB_USE_ROOT_BLOCKS 0x1000 123#define EXT4_MB_USE_ROOT_BLOCKS 0x1000
124/* Use blocks from reserved pool */
125#define EXT4_MB_USE_RESERVED 0x2000
124 126
125struct ext4_allocation_request { 127struct ext4_allocation_request {
126 /* target inode for block we're allocating */ 128 /* target inode for block we're allocating */
@@ -557,9 +559,8 @@ enum {
557#define EXT4_GET_BLOCKS_UNINIT_EXT 0x0002 559#define EXT4_GET_BLOCKS_UNINIT_EXT 0x0002
558#define EXT4_GET_BLOCKS_CREATE_UNINIT_EXT (EXT4_GET_BLOCKS_UNINIT_EXT|\ 560#define EXT4_GET_BLOCKS_CREATE_UNINIT_EXT (EXT4_GET_BLOCKS_UNINIT_EXT|\
559 EXT4_GET_BLOCKS_CREATE) 561 EXT4_GET_BLOCKS_CREATE)
560 /* Caller is from the delayed allocation writeout path, 562 /* Caller is from the delayed allocation writeout path
561 so set the magic i_delalloc_reserve_flag after taking the 563 * finally doing the actual allocation of delayed blocks */
562 inode allocation semaphore for */
563#define EXT4_GET_BLOCKS_DELALLOC_RESERVE 0x0004 564#define EXT4_GET_BLOCKS_DELALLOC_RESERVE 0x0004
564 /* caller is from the direct IO path, request to creation of an 565 /* caller is from the direct IO path, request to creation of an
565 unitialized extents if not allocated, split the uninitialized 566 unitialized extents if not allocated, split the uninitialized
@@ -571,8 +572,9 @@ enum {
571 /* Convert extent to initialized after IO complete */ 572 /* Convert extent to initialized after IO complete */
572#define EXT4_GET_BLOCKS_IO_CONVERT_EXT (EXT4_GET_BLOCKS_CONVERT|\ 573#define EXT4_GET_BLOCKS_IO_CONVERT_EXT (EXT4_GET_BLOCKS_CONVERT|\
573 EXT4_GET_BLOCKS_CREATE_UNINIT_EXT) 574 EXT4_GET_BLOCKS_CREATE_UNINIT_EXT)
574 /* Punch out blocks of an extent */ 575 /* Eventual metadata allocation (due to growing extent tree)
575#define EXT4_GET_BLOCKS_PUNCH_OUT_EXT 0x0020 576 * should not fail, so try to use reserved blocks for that.*/
577#define EXT4_GET_BLOCKS_METADATA_NOFAIL 0x0020
576 /* Don't normalize allocation size (used for fallocate) */ 578 /* Don't normalize allocation size (used for fallocate) */
577#define EXT4_GET_BLOCKS_NO_NORMALIZE 0x0040 579#define EXT4_GET_BLOCKS_NO_NORMALIZE 0x0040
578 /* Request will not result in inode size update (user for fallocate) */ 580 /* Request will not result in inode size update (user for fallocate) */
@@ -1188,6 +1190,7 @@ struct ext4_sb_info {
1188 unsigned int s_mount_flags; 1190 unsigned int s_mount_flags;
1189 unsigned int s_def_mount_opt; 1191 unsigned int s_def_mount_opt;
1190 ext4_fsblk_t s_sb_block; 1192 ext4_fsblk_t s_sb_block;
1193 atomic64_t s_resv_clusters;
1191 kuid_t s_resuid; 1194 kuid_t s_resuid;
1192 kgid_t s_resgid; 1195 kgid_t s_resgid;
1193 unsigned short s_mount_state; 1196 unsigned short s_mount_state;
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index ea607f907232..8b158ae2443b 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -1942,8 +1942,8 @@ prepend:
1942 * There is no free space in the found leaf. 1942 * There is no free space in the found leaf.
1943 * We're gonna add a new leaf in the tree. 1943 * We're gonna add a new leaf in the tree.
1944 */ 1944 */
1945 if (flag & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) 1945 if (flag & EXT4_GET_BLOCKS_METADATA_NOFAIL)
1946 flags = EXT4_MB_USE_ROOT_BLOCKS; 1946 flags = EXT4_MB_USE_RESERVED;
1947 err = ext4_ext_create_new_leaf(handle, inode, flags, path, newext); 1947 err = ext4_ext_create_new_leaf(handle, inode, flags, path, newext);
1948 if (err) 1948 if (err)
1949 goto cleanup; 1949 goto cleanup;
@@ -2729,12 +2729,14 @@ again:
2729 2729
2730 /* 2730 /*
2731 * Split the extent in two so that 'end' is the last 2731 * Split the extent in two so that 'end' is the last
2732 * block in the first new extent 2732 * block in the first new extent. Also we should not
2733 * fail removing space due to ENOSPC so try to use
2734 * reserved block if that happens.
2733 */ 2735 */
2734 err = ext4_split_extent_at(handle, inode, path, 2736 err = ext4_split_extent_at(handle, inode, path,
2735 end + 1, split_flag, 2737 end + 1, split_flag,
2736 EXT4_GET_BLOCKS_PRE_IO | 2738 EXT4_GET_BLOCKS_PRE_IO |
2737 EXT4_GET_BLOCKS_PUNCH_OUT_EXT); 2739 EXT4_GET_BLOCKS_METADATA_NOFAIL);
2738 2740
2739 if (err < 0) 2741 if (err < 0)
2740 goto out; 2742 goto out;
@@ -3209,7 +3211,8 @@ out:
3209static int ext4_ext_convert_to_initialized(handle_t *handle, 3211static int ext4_ext_convert_to_initialized(handle_t *handle,
3210 struct inode *inode, 3212 struct inode *inode,
3211 struct ext4_map_blocks *map, 3213 struct ext4_map_blocks *map,
3212 struct ext4_ext_path *path) 3214 struct ext4_ext_path *path,
3215 int flags)
3213{ 3216{
3214 struct ext4_sb_info *sbi; 3217 struct ext4_sb_info *sbi;
3215 struct ext4_extent_header *eh; 3218 struct ext4_extent_header *eh;
@@ -3435,7 +3438,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
3435 } 3438 }
3436 3439
3437 allocated = ext4_split_extent(handle, inode, path, 3440 allocated = ext4_split_extent(handle, inode, path,
3438 &split_map, split_flag, 0); 3441 &split_map, split_flag, flags);
3439 if (allocated < 0) 3442 if (allocated < 0)
3440 err = allocated; 3443 err = allocated;
3441 3444
@@ -3755,6 +3758,12 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
3755 flags, allocated); 3758 flags, allocated);
3756 ext4_ext_show_leaf(inode, path); 3759 ext4_ext_show_leaf(inode, path);
3757 3760
3761 /*
3762 * When writing into uninitialized space, we should not fail to
3763 * allocate metadata blocks for the new extent block if needed.
3764 */
3765 flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL;
3766
3758 trace_ext4_ext_handle_uninitialized_extents(inode, map, flags, 3767 trace_ext4_ext_handle_uninitialized_extents(inode, map, flags,
3759 allocated, newblock); 3768 allocated, newblock);
3760 3769
@@ -3818,7 +3827,7 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
3818 } 3827 }
3819 3828
3820 /* buffered write, writepage time, convert*/ 3829 /* buffered write, writepage time, convert*/
3821 ret = ext4_ext_convert_to_initialized(handle, inode, map, path); 3830 ret = ext4_ext_convert_to_initialized(handle, inode, map, path, flags);
3822 if (ret >= 0) 3831 if (ret >= 0)
3823 ext4_update_inode_fsync_trans(handle, inode, 1); 3832 ext4_update_inode_fsync_trans(handle, inode, 1);
3824out: 3833out:
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index f9b0b479ff4c..629d67b62dfb 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1688,12 +1688,21 @@ static void mpage_da_map_and_submit(struct mpage_da_data *mpd)
1688 */ 1688 */
1689 map.m_lblk = next; 1689 map.m_lblk = next;
1690 map.m_len = max_blocks; 1690 map.m_len = max_blocks;
1691 get_blocks_flags = EXT4_GET_BLOCKS_CREATE; 1691 /*
1692 * We're in delalloc path and it is possible that we're going to
1693 * need more metadata blocks than previously reserved. However
1694 * we must not fail because we're in writeback and there is
1695 * nothing we can do about it so it might result in data loss.
1696 * So use reserved blocks to allocate metadata if possible.
1697 */
1698 get_blocks_flags = EXT4_GET_BLOCKS_CREATE |
1699 EXT4_GET_BLOCKS_METADATA_NOFAIL;
1692 if (ext4_should_dioread_nolock(mpd->inode)) 1700 if (ext4_should_dioread_nolock(mpd->inode))
1693 get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT; 1701 get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
1694 if (mpd->b_state & (1 << BH_Delay)) 1702 if (mpd->b_state & (1 << BH_Delay))
1695 get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE; 1703 get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
1696 1704
1705
1697 blks = ext4_map_blocks(handle, mpd->inode, &map, get_blocks_flags); 1706 blks = ext4_map_blocks(handle, mpd->inode, &map, get_blocks_flags);
1698 if (blks < 0) { 1707 if (blks < 0) {
1699 struct super_block *sb = mpd->inode->i_sb; 1708 struct super_block *sb = mpd->inode->i_sb;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 968ca9369175..6fea87db7daa 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -81,6 +81,7 @@ static int ext4_feature_set_ok(struct super_block *sb, int readonly);
81static void ext4_destroy_lazyinit_thread(void); 81static void ext4_destroy_lazyinit_thread(void);
82static void ext4_unregister_li_request(struct super_block *sb); 82static void ext4_unregister_li_request(struct super_block *sb);
83static void ext4_clear_request_list(void); 83static void ext4_clear_request_list(void);
84static int ext4_reserve_clusters(struct ext4_sb_info *, ext4_fsblk_t);
84 85
85#if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23) 86#if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23)
86static struct file_system_type ext2_fs_type = { 87static struct file_system_type ext2_fs_type = {
@@ -2382,6 +2383,17 @@ struct ext4_attr {
2382 int offset; 2383 int offset;
2383}; 2384};
2384 2385
2386static int parse_strtoull(const char *buf,
2387 unsigned long long max, unsigned long long *value)
2388{
2389 int ret;
2390
2391 ret = kstrtoull(skip_spaces(buf), 0, value);
2392 if (!ret && *value > max)
2393 ret = -EINVAL;
2394 return ret;
2395}
2396
2385static int parse_strtoul(const char *buf, 2397static int parse_strtoul(const char *buf,
2386 unsigned long max, unsigned long *value) 2398 unsigned long max, unsigned long *value)
2387{ 2399{
@@ -2466,6 +2478,27 @@ static ssize_t sbi_ui_store(struct ext4_attr *a,
2466 return count; 2478 return count;
2467} 2479}
2468 2480
2481static ssize_t reserved_clusters_show(struct ext4_attr *a,
2482 struct ext4_sb_info *sbi, char *buf)
2483{
2484 return snprintf(buf, PAGE_SIZE, "%llu\n",
2485 (unsigned long long) atomic64_read(&sbi->s_resv_clusters));
2486}
2487
2488static ssize_t reserved_clusters_store(struct ext4_attr *a,
2489 struct ext4_sb_info *sbi,
2490 const char *buf, size_t count)
2491{
2492 unsigned long long val;
2493 int ret;
2494
2495 if (parse_strtoull(buf, -1ULL, &val))
2496 return -EINVAL;
2497 ret = ext4_reserve_clusters(sbi, val);
2498
2499 return ret ? ret : count;
2500}
2501
2469static ssize_t trigger_test_error(struct ext4_attr *a, 2502static ssize_t trigger_test_error(struct ext4_attr *a,
2470 struct ext4_sb_info *sbi, 2503 struct ext4_sb_info *sbi,
2471 const char *buf, size_t count) 2504 const char *buf, size_t count)
@@ -2503,6 +2536,7 @@ static struct ext4_attr ext4_attr_##name = __ATTR(name, mode, show, store)
2503EXT4_RO_ATTR(delayed_allocation_blocks); 2536EXT4_RO_ATTR(delayed_allocation_blocks);
2504EXT4_RO_ATTR(session_write_kbytes); 2537EXT4_RO_ATTR(session_write_kbytes);
2505EXT4_RO_ATTR(lifetime_write_kbytes); 2538EXT4_RO_ATTR(lifetime_write_kbytes);
2539EXT4_RW_ATTR(reserved_clusters);
2506EXT4_ATTR_OFFSET(inode_readahead_blks, 0644, sbi_ui_show, 2540EXT4_ATTR_OFFSET(inode_readahead_blks, 0644, sbi_ui_show,
2507 inode_readahead_blks_store, s_inode_readahead_blks); 2541 inode_readahead_blks_store, s_inode_readahead_blks);
2508EXT4_RW_ATTR_SBI_UI(inode_goal, s_inode_goal); 2542EXT4_RW_ATTR_SBI_UI(inode_goal, s_inode_goal);
@@ -2520,6 +2554,7 @@ static struct attribute *ext4_attrs[] = {
2520 ATTR_LIST(delayed_allocation_blocks), 2554 ATTR_LIST(delayed_allocation_blocks),
2521 ATTR_LIST(session_write_kbytes), 2555 ATTR_LIST(session_write_kbytes),
2522 ATTR_LIST(lifetime_write_kbytes), 2556 ATTR_LIST(lifetime_write_kbytes),
2557 ATTR_LIST(reserved_clusters),
2523 ATTR_LIST(inode_readahead_blks), 2558 ATTR_LIST(inode_readahead_blks),
2524 ATTR_LIST(inode_goal), 2559 ATTR_LIST(inode_goal),
2525 ATTR_LIST(mb_stats), 2560 ATTR_LIST(mb_stats),
@@ -3195,6 +3230,40 @@ int ext4_calculate_overhead(struct super_block *sb)
3195 return 0; 3230 return 0;
3196} 3231}
3197 3232
3233
3234static ext4_fsblk_t ext4_calculate_resv_clusters(struct ext4_sb_info *sbi)
3235{
3236 ext4_fsblk_t resv_clusters;
3237
3238 /*
3239 * By default we reserve 2% or 4096 clusters, whichever is smaller.
3240 * This should cover the situations where we can not afford to run
3241 * out of space like for example punch hole, or converting
3242 * uninitialized extents in delalloc path. In most cases such
3243 * allocation would require 1, or 2 blocks, higher numbers are
3244 * very rare.
3245 */
3246 resv_clusters = ext4_blocks_count(sbi->s_es) >> sbi->s_cluster_bits;
3247
3248 do_div(resv_clusters, 50);
3249 resv_clusters = min_t(ext4_fsblk_t, resv_clusters, 4096);
3250
3251 return resv_clusters;
3252}
3253
3254
3255static int ext4_reserve_clusters(struct ext4_sb_info *sbi, ext4_fsblk_t count)
3256{
3257 ext4_fsblk_t clusters = ext4_blocks_count(sbi->s_es) >>
3258 sbi->s_cluster_bits;
3259
3260 if (count >= clusters)
3261 return -EINVAL;
3262
3263 atomic64_set(&sbi->s_resv_clusters, count);
3264 return 0;
3265}
3266
3198static int ext4_fill_super(struct super_block *sb, void *data, int silent) 3267static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3199{ 3268{
3200 char *orig_data = kstrdup(data, GFP_KERNEL); 3269 char *orig_data = kstrdup(data, GFP_KERNEL);
@@ -3918,6 +3987,13 @@ no_journal:
3918 "available"); 3987 "available");
3919 } 3988 }
3920 3989
3990 err = ext4_reserve_clusters(sbi, ext4_calculate_resv_clusters(sbi));
3991 if (err) {
3992 ext4_msg(sb, KERN_ERR, "failed to reserve %llu clusters for "
3993 "reserved pool", ext4_calculate_resv_clusters(sbi));
3994 goto failed_mount4a;
3995 }
3996
3921 err = ext4_setup_system_zone(sb); 3997 err = ext4_setup_system_zone(sb);
3922 if (err) { 3998 if (err) {
3923 ext4_msg(sb, KERN_ERR, "failed to initialize system " 3999 ext4_msg(sb, KERN_ERR, "failed to initialize system "
@@ -4750,9 +4826,10 @@ static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
4750 struct super_block *sb = dentry->d_sb; 4826 struct super_block *sb = dentry->d_sb;
4751 struct ext4_sb_info *sbi = EXT4_SB(sb); 4827 struct ext4_sb_info *sbi = EXT4_SB(sb);
4752 struct ext4_super_block *es = sbi->s_es; 4828 struct ext4_super_block *es = sbi->s_es;
4753 ext4_fsblk_t overhead = 0; 4829 ext4_fsblk_t overhead = 0, resv_blocks;
4754 u64 fsid; 4830 u64 fsid;
4755 s64 bfree; 4831 s64 bfree;
4832 resv_blocks = EXT4_C2B(sbi, atomic64_read(&sbi->s_resv_clusters));
4756 4833
4757 if (!test_opt(sb, MINIX_DF)) 4834 if (!test_opt(sb, MINIX_DF))
4758 overhead = sbi->s_overhead; 4835 overhead = sbi->s_overhead;
@@ -4764,8 +4841,9 @@ static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
4764 percpu_counter_sum_positive(&sbi->s_dirtyclusters_counter); 4841 percpu_counter_sum_positive(&sbi->s_dirtyclusters_counter);
4765 /* prevent underflow in case that few free space is available */ 4842 /* prevent underflow in case that few free space is available */
4766 buf->f_bfree = EXT4_C2B(sbi, max_t(s64, bfree, 0)); 4843 buf->f_bfree = EXT4_C2B(sbi, max_t(s64, bfree, 0));
4767 buf->f_bavail = buf->f_bfree - ext4_r_blocks_count(es); 4844 buf->f_bavail = buf->f_bfree -
4768 if (buf->f_bfree < ext4_r_blocks_count(es)) 4845 (ext4_r_blocks_count(es) + resv_blocks);
4846 if (buf->f_bfree < (ext4_r_blocks_count(es) + resv_blocks))
4769 buf->f_bavail = 0; 4847 buf->f_bavail = 0;
4770 buf->f_files = le32_to_cpu(es->s_inodes_count); 4848 buf->f_files = le32_to_cpu(es->s_inodes_count);
4771 buf->f_ffree = percpu_counter_sum_positive(&sbi->s_freeinodes_counter); 4849 buf->f_ffree = percpu_counter_sum_positive(&sbi->s_freeinodes_counter);