aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ext4/super.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/ext4/super.c')
-rw-r--r--fs/ext4/super.c323
1 files changed, 259 insertions, 64 deletions
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index d8759401ecae..2d51cd9af225 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -74,7 +74,6 @@ static const char *ext4_decode_error(struct super_block *sb, int errno,
74static int ext4_remount(struct super_block *sb, int *flags, char *data); 74static int ext4_remount(struct super_block *sb, int *flags, char *data);
75static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf); 75static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf);
76static int ext4_unfreeze(struct super_block *sb); 76static int ext4_unfreeze(struct super_block *sb);
77static void ext4_write_super(struct super_block *sb);
78static int ext4_freeze(struct super_block *sb); 77static int ext4_freeze(struct super_block *sb);
79static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags, 78static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
80 const char *dev_name, void *data); 79 const char *dev_name, void *data);
@@ -896,7 +895,7 @@ static void ext4_put_super(struct super_block *sb)
896 EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER); 895 EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
897 es->s_state = cpu_to_le16(sbi->s_mount_state); 896 es->s_state = cpu_to_le16(sbi->s_mount_state);
898 } 897 }
899 if (sb->s_dirt || !(sb->s_flags & MS_RDONLY)) 898 if (!(sb->s_flags & MS_RDONLY))
900 ext4_commit_super(sb, 1); 899 ext4_commit_super(sb, 1);
901 900
902 if (sbi->s_proc) { 901 if (sbi->s_proc) {
@@ -1137,12 +1136,18 @@ static int ext4_mark_dquot_dirty(struct dquot *dquot);
1137static int ext4_write_info(struct super_block *sb, int type); 1136static int ext4_write_info(struct super_block *sb, int type);
1138static int ext4_quota_on(struct super_block *sb, int type, int format_id, 1137static int ext4_quota_on(struct super_block *sb, int type, int format_id,
1139 struct path *path); 1138 struct path *path);
1139static int ext4_quota_on_sysfile(struct super_block *sb, int type,
1140 int format_id);
1140static int ext4_quota_off(struct super_block *sb, int type); 1141static int ext4_quota_off(struct super_block *sb, int type);
1142static int ext4_quota_off_sysfile(struct super_block *sb, int type);
1141static int ext4_quota_on_mount(struct super_block *sb, int type); 1143static int ext4_quota_on_mount(struct super_block *sb, int type);
1142static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data, 1144static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
1143 size_t len, loff_t off); 1145 size_t len, loff_t off);
1144static ssize_t ext4_quota_write(struct super_block *sb, int type, 1146static ssize_t ext4_quota_write(struct super_block *sb, int type,
1145 const char *data, size_t len, loff_t off); 1147 const char *data, size_t len, loff_t off);
1148static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
1149 unsigned int flags);
1150static int ext4_enable_quotas(struct super_block *sb);
1146 1151
1147static const struct dquot_operations ext4_quota_operations = { 1152static const struct dquot_operations ext4_quota_operations = {
1148 .get_reserved_space = ext4_get_reserved_space, 1153 .get_reserved_space = ext4_get_reserved_space,
@@ -1164,6 +1169,16 @@ static const struct quotactl_ops ext4_qctl_operations = {
1164 .get_dqblk = dquot_get_dqblk, 1169 .get_dqblk = dquot_get_dqblk,
1165 .set_dqblk = dquot_set_dqblk 1170 .set_dqblk = dquot_set_dqblk
1166}; 1171};
1172
1173static const struct quotactl_ops ext4_qctl_sysfile_operations = {
1174 .quota_on_meta = ext4_quota_on_sysfile,
1175 .quota_off = ext4_quota_off_sysfile,
1176 .quota_sync = dquot_quota_sync,
1177 .get_info = dquot_get_dqinfo,
1178 .set_info = dquot_set_dqinfo,
1179 .get_dqblk = dquot_get_dqblk,
1180 .set_dqblk = dquot_set_dqblk
1181};
1167#endif 1182#endif
1168 1183
1169static const struct super_operations ext4_sops = { 1184static const struct super_operations ext4_sops = {
@@ -1194,7 +1209,6 @@ static const struct super_operations ext4_nojournal_sops = {
1194 .dirty_inode = ext4_dirty_inode, 1209 .dirty_inode = ext4_dirty_inode,
1195 .drop_inode = ext4_drop_inode, 1210 .drop_inode = ext4_drop_inode,
1196 .evict_inode = ext4_evict_inode, 1211 .evict_inode = ext4_evict_inode,
1197 .write_super = ext4_write_super,
1198 .put_super = ext4_put_super, 1212 .put_super = ext4_put_super,
1199 .statfs = ext4_statfs, 1213 .statfs = ext4_statfs,
1200 .remount_fs = ext4_remount, 1214 .remount_fs = ext4_remount,
@@ -2661,6 +2675,16 @@ static int ext4_feature_set_ok(struct super_block *sb, int readonly)
2661 "extents feature\n"); 2675 "extents feature\n");
2662 return 0; 2676 return 0;
2663 } 2677 }
2678
2679#ifndef CONFIG_QUOTA
2680 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA) &&
2681 !readonly) {
2682 ext4_msg(sb, KERN_ERR,
2683 "Filesystem with quota feature cannot be mounted RDWR "
2684 "without CONFIG_QUOTA");
2685 return 0;
2686 }
2687#endif /* CONFIG_QUOTA */
2664 return 1; 2688 return 1;
2665} 2689}
2666 2690
@@ -3085,6 +3109,114 @@ static int set_journal_csum_feature_set(struct super_block *sb)
3085 return ret; 3109 return ret;
3086} 3110}
3087 3111
3112/*
3113 * Note: calculating the overhead so we can be compatible with
3114 * historical BSD practice is quite difficult in the face of
3115 * clusters/bigalloc. This is because multiple metadata blocks from
3116 * different block group can end up in the same allocation cluster.
3117 * Calculating the exact overhead in the face of clustered allocation
3118 * requires either O(all block bitmaps) in memory or O(number of block
3119 * groups**2) in time. We will still calculate the superblock for
3120 * older file systems --- and if we come across with a bigalloc file
3121 * system with zero in s_overhead_clusters the estimate will be close to
3122 * correct especially for very large cluster sizes --- but for newer
3123 * file systems, it's better to calculate this figure once at mkfs
3124 * time, and store it in the superblock. If the superblock value is
3125 * present (even for non-bigalloc file systems), we will use it.
3126 */
3127static int count_overhead(struct super_block *sb, ext4_group_t grp,
3128 char *buf)
3129{
3130 struct ext4_sb_info *sbi = EXT4_SB(sb);
3131 struct ext4_group_desc *gdp;
3132 ext4_fsblk_t first_block, last_block, b;
3133 ext4_group_t i, ngroups = ext4_get_groups_count(sb);
3134 int s, j, count = 0;
3135
3136 first_block = le32_to_cpu(sbi->s_es->s_first_data_block) +
3137 (grp * EXT4_BLOCKS_PER_GROUP(sb));
3138 last_block = first_block + EXT4_BLOCKS_PER_GROUP(sb) - 1;
3139 for (i = 0; i < ngroups; i++) {
3140 gdp = ext4_get_group_desc(sb, i, NULL);
3141 b = ext4_block_bitmap(sb, gdp);
3142 if (b >= first_block && b <= last_block) {
3143 ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
3144 count++;
3145 }
3146 b = ext4_inode_bitmap(sb, gdp);
3147 if (b >= first_block && b <= last_block) {
3148 ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
3149 count++;
3150 }
3151 b = ext4_inode_table(sb, gdp);
3152 if (b >= first_block && b + sbi->s_itb_per_group <= last_block)
3153 for (j = 0; j < sbi->s_itb_per_group; j++, b++) {
3154 int c = EXT4_B2C(sbi, b - first_block);
3155 ext4_set_bit(c, buf);
3156 count++;
3157 }
3158 if (i != grp)
3159 continue;
3160 s = 0;
3161 if (ext4_bg_has_super(sb, grp)) {
3162 ext4_set_bit(s++, buf);
3163 count++;
3164 }
3165 for (j = ext4_bg_num_gdb(sb, grp); j > 0; j--) {
3166 ext4_set_bit(EXT4_B2C(sbi, s++), buf);
3167 count++;
3168 }
3169 }
3170 if (!count)
3171 return 0;
3172 return EXT4_CLUSTERS_PER_GROUP(sb) -
3173 ext4_count_free(buf, EXT4_CLUSTERS_PER_GROUP(sb) / 8);
3174}
3175
3176/*
3177 * Compute the overhead and stash it in sbi->s_overhead
3178 */
3179int ext4_calculate_overhead(struct super_block *sb)
3180{
3181 struct ext4_sb_info *sbi = EXT4_SB(sb);
3182 struct ext4_super_block *es = sbi->s_es;
3183 ext4_group_t i, ngroups = ext4_get_groups_count(sb);
3184 ext4_fsblk_t overhead = 0;
3185 char *buf = (char *) get_zeroed_page(GFP_KERNEL);
3186
3187 memset(buf, 0, PAGE_SIZE);
3188 if (!buf)
3189 return -ENOMEM;
3190
3191 /*
3192 * Compute the overhead (FS structures). This is constant
3193 * for a given filesystem unless the number of block groups
3194 * changes so we cache the previous value until it does.
3195 */
3196
3197 /*
3198 * All of the blocks before first_data_block are overhead
3199 */
3200 overhead = EXT4_B2C(sbi, le32_to_cpu(es->s_first_data_block));
3201
3202 /*
3203 * Add the overhead found in each block group
3204 */
3205 for (i = 0; i < ngroups; i++) {
3206 int blks;
3207
3208 blks = count_overhead(sb, i, buf);
3209 overhead += blks;
3210 if (blks)
3211 memset(buf, 0, PAGE_SIZE);
3212 cond_resched();
3213 }
3214 sbi->s_overhead = overhead;
3215 smp_wmb();
3216 free_page((unsigned long) buf);
3217 return 0;
3218}
3219
3088static int ext4_fill_super(struct super_block *sb, void *data, int silent) 3220static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3089{ 3221{
3090 char *orig_data = kstrdup(data, GFP_KERNEL); 3222 char *orig_data = kstrdup(data, GFP_KERNEL);
@@ -3640,6 +3772,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3640#ifdef CONFIG_QUOTA 3772#ifdef CONFIG_QUOTA
3641 sb->s_qcop = &ext4_qctl_operations; 3773 sb->s_qcop = &ext4_qctl_operations;
3642 sb->dq_op = &ext4_quota_operations; 3774 sb->dq_op = &ext4_quota_operations;
3775
3776 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA)) {
3777 /* Use qctl operations for hidden quota files. */
3778 sb->s_qcop = &ext4_qctl_sysfile_operations;
3779 }
3643#endif 3780#endif
3644 memcpy(sb->s_uuid, es->s_uuid, sizeof(es->s_uuid)); 3781 memcpy(sb->s_uuid, es->s_uuid, sizeof(es->s_uuid));
3645 3782
@@ -3735,6 +3872,18 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3735 3872
3736no_journal: 3873no_journal:
3737 /* 3874 /*
3875 * Get the # of file system overhead blocks from the
3876 * superblock if present.
3877 */
3878 if (es->s_overhead_clusters)
3879 sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
3880 else {
3881 ret = ext4_calculate_overhead(sb);
3882 if (ret)
3883 goto failed_mount_wq;
3884 }
3885
3886 /*
3738 * The maximum number of concurrent works can be high and 3887 * The maximum number of concurrent works can be high and
3739 * concurrency isn't really necessary. Limit it to 1. 3888 * concurrency isn't really necessary. Limit it to 1.
3740 */ 3889 */
@@ -3840,6 +3989,16 @@ no_journal:
3840 } else 3989 } else
3841 descr = "out journal"; 3990 descr = "out journal";
3842 3991
3992#ifdef CONFIG_QUOTA
3993 /* Enable quota usage during mount. */
3994 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA) &&
3995 !(sb->s_flags & MS_RDONLY)) {
3996 ret = ext4_enable_quotas(sb);
3997 if (ret)
3998 goto failed_mount7;
3999 }
4000#endif /* CONFIG_QUOTA */
4001
3843 ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. " 4002 ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
3844 "Opts: %s%s%s", descr, sbi->s_es->s_mount_opts, 4003 "Opts: %s%s%s", descr, sbi->s_es->s_mount_opts,
3845 *sbi->s_es->s_mount_opts ? "; " : "", orig_data); 4004 *sbi->s_es->s_mount_opts ? "; " : "", orig_data);
@@ -4203,7 +4362,6 @@ static int ext4_commit_super(struct super_block *sb, int sync)
4203 es->s_free_inodes_count = 4362 es->s_free_inodes_count =
4204 cpu_to_le32(percpu_counter_sum_positive( 4363 cpu_to_le32(percpu_counter_sum_positive(
4205 &EXT4_SB(sb)->s_freeinodes_counter)); 4364 &EXT4_SB(sb)->s_freeinodes_counter));
4206 sb->s_dirt = 0;
4207 BUFFER_TRACE(sbh, "marking dirty"); 4365 BUFFER_TRACE(sbh, "marking dirty");
4208 ext4_superblock_csum_set(sb, es); 4366 ext4_superblock_csum_set(sb, es);
4209 mark_buffer_dirty(sbh); 4367 mark_buffer_dirty(sbh);
@@ -4310,13 +4468,6 @@ int ext4_force_commit(struct super_block *sb)
4310 return ret; 4468 return ret;
4311} 4469}
4312 4470
4313static void ext4_write_super(struct super_block *sb)
4314{
4315 lock_super(sb);
4316 ext4_commit_super(sb, 1);
4317 unlock_super(sb);
4318}
4319
4320static int ext4_sync_fs(struct super_block *sb, int wait) 4471static int ext4_sync_fs(struct super_block *sb, int wait)
4321{ 4472{
4322 int ret = 0; 4473 int ret = 0;
@@ -4567,16 +4718,26 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
4567 if (sbi->s_journal == NULL) 4718 if (sbi->s_journal == NULL)
4568 ext4_commit_super(sb, 1); 4719 ext4_commit_super(sb, 1);
4569 4720
4721 unlock_super(sb);
4570#ifdef CONFIG_QUOTA 4722#ifdef CONFIG_QUOTA
4571 /* Release old quota file names */ 4723 /* Release old quota file names */
4572 for (i = 0; i < MAXQUOTAS; i++) 4724 for (i = 0; i < MAXQUOTAS; i++)
4573 if (old_opts.s_qf_names[i] && 4725 if (old_opts.s_qf_names[i] &&
4574 old_opts.s_qf_names[i] != sbi->s_qf_names[i]) 4726 old_opts.s_qf_names[i] != sbi->s_qf_names[i])
4575 kfree(old_opts.s_qf_names[i]); 4727 kfree(old_opts.s_qf_names[i]);
4728 if (enable_quota) {
4729 if (sb_any_quota_suspended(sb))
4730 dquot_resume(sb, -1);
4731 else if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
4732 EXT4_FEATURE_RO_COMPAT_QUOTA)) {
4733 err = ext4_enable_quotas(sb);
4734 if (err) {
4735 lock_super(sb);
4736 goto restore_opts;
4737 }
4738 }
4739 }
4576#endif 4740#endif
4577 unlock_super(sb);
4578 if (enable_quota)
4579 dquot_resume(sb, -1);
4580 4741
4581 ext4_msg(sb, KERN_INFO, "re-mounted. Opts: %s", orig_data); 4742 ext4_msg(sb, KERN_INFO, "re-mounted. Opts: %s", orig_data);
4582 kfree(orig_data); 4743 kfree(orig_data);
@@ -4605,67 +4766,21 @@ restore_opts:
4605 return err; 4766 return err;
4606} 4767}
4607 4768
4608/*
4609 * Note: calculating the overhead so we can be compatible with
4610 * historical BSD practice is quite difficult in the face of
4611 * clusters/bigalloc. This is because multiple metadata blocks from
4612 * different block group can end up in the same allocation cluster.
4613 * Calculating the exact overhead in the face of clustered allocation
4614 * requires either O(all block bitmaps) in memory or O(number of block
4615 * groups**2) in time. We will still calculate the superblock for
4616 * older file systems --- and if we come across with a bigalloc file
4617 * system with zero in s_overhead_clusters the estimate will be close to
4618 * correct especially for very large cluster sizes --- but for newer
4619 * file systems, it's better to calculate this figure once at mkfs
4620 * time, and store it in the superblock. If the superblock value is
4621 * present (even for non-bigalloc file systems), we will use it.
4622 */
4623static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf) 4769static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
4624{ 4770{
4625 struct super_block *sb = dentry->d_sb; 4771 struct super_block *sb = dentry->d_sb;
4626 struct ext4_sb_info *sbi = EXT4_SB(sb); 4772 struct ext4_sb_info *sbi = EXT4_SB(sb);
4627 struct ext4_super_block *es = sbi->s_es; 4773 struct ext4_super_block *es = sbi->s_es;
4628 struct ext4_group_desc *gdp; 4774 ext4_fsblk_t overhead = 0;
4629 u64 fsid; 4775 u64 fsid;
4630 s64 bfree; 4776 s64 bfree;
4631 4777
4632 if (test_opt(sb, MINIX_DF)) { 4778 if (!test_opt(sb, MINIX_DF))
4633 sbi->s_overhead_last = 0; 4779 overhead = sbi->s_overhead;
4634 } else if (es->s_overhead_clusters) {
4635 sbi->s_overhead_last = le32_to_cpu(es->s_overhead_clusters);
4636 } else if (sbi->s_blocks_last != ext4_blocks_count(es)) {
4637 ext4_group_t i, ngroups = ext4_get_groups_count(sb);
4638 ext4_fsblk_t overhead = 0;
4639
4640 /*
4641 * Compute the overhead (FS structures). This is constant
4642 * for a given filesystem unless the number of block groups
4643 * changes so we cache the previous value until it does.
4644 */
4645
4646 /*
4647 * All of the blocks before first_data_block are
4648 * overhead
4649 */
4650 overhead = EXT4_B2C(sbi, le32_to_cpu(es->s_first_data_block));
4651
4652 /*
4653 * Add the overhead found in each block group
4654 */
4655 for (i = 0; i < ngroups; i++) {
4656 gdp = ext4_get_group_desc(sb, i, NULL);
4657 overhead += ext4_num_overhead_clusters(sb, i, gdp);
4658 cond_resched();
4659 }
4660 sbi->s_overhead_last = overhead;
4661 smp_wmb();
4662 sbi->s_blocks_last = ext4_blocks_count(es);
4663 }
4664 4780
4665 buf->f_type = EXT4_SUPER_MAGIC; 4781 buf->f_type = EXT4_SUPER_MAGIC;
4666 buf->f_bsize = sb->s_blocksize; 4782 buf->f_bsize = sb->s_blocksize;
4667 buf->f_blocks = (ext4_blocks_count(es) - 4783 buf->f_blocks = ext4_blocks_count(es) - EXT4_C2B(sbi, sbi->s_overhead);
4668 EXT4_C2B(sbi, sbi->s_overhead_last));
4669 bfree = percpu_counter_sum_positive(&sbi->s_freeclusters_counter) - 4784 bfree = percpu_counter_sum_positive(&sbi->s_freeclusters_counter) -
4670 percpu_counter_sum_positive(&sbi->s_dirtyclusters_counter); 4785 percpu_counter_sum_positive(&sbi->s_dirtyclusters_counter);
4671 /* prevent underflow in case that few free space is available */ 4786 /* prevent underflow in case that few free space is available */
@@ -4835,6 +4950,74 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id,
4835 return dquot_quota_on(sb, type, format_id, path); 4950 return dquot_quota_on(sb, type, format_id, path);
4836} 4951}
4837 4952
4953static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
4954 unsigned int flags)
4955{
4956 int err;
4957 struct inode *qf_inode;
4958 unsigned long qf_inums[MAXQUOTAS] = {
4959 le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
4960 le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum)
4961 };
4962
4963 BUG_ON(!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA));
4964
4965 if (!qf_inums[type])
4966 return -EPERM;
4967
4968 qf_inode = ext4_iget(sb, qf_inums[type]);
4969 if (IS_ERR(qf_inode)) {
4970 ext4_error(sb, "Bad quota inode # %lu", qf_inums[type]);
4971 return PTR_ERR(qf_inode);
4972 }
4973
4974 err = dquot_enable(qf_inode, type, format_id, flags);
4975 iput(qf_inode);
4976
4977 return err;
4978}
4979
4980/* Enable usage tracking for all quota types. */
4981static int ext4_enable_quotas(struct super_block *sb)
4982{
4983 int type, err = 0;
4984 unsigned long qf_inums[MAXQUOTAS] = {
4985 le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
4986 le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum)
4987 };
4988
4989 sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
4990 for (type = 0; type < MAXQUOTAS; type++) {
4991 if (qf_inums[type]) {
4992 err = ext4_quota_enable(sb, type, QFMT_VFS_V1,
4993 DQUOT_USAGE_ENABLED);
4994 if (err) {
4995 ext4_warning(sb,
4996 "Failed to enable quota (type=%d) "
4997 "tracking. Please run e2fsck to fix.",
4998 type);
4999 return err;
5000 }
5001 }
5002 }
5003 return 0;
5004}
5005
5006/*
5007 * quota_on function that is used when QUOTA feature is set.
5008 */
5009static int ext4_quota_on_sysfile(struct super_block *sb, int type,
5010 int format_id)
5011{
5012 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA))
5013 return -EINVAL;
5014
5015 /*
5016 * USAGE was enabled at mount time. Only need to enable LIMITS now.
5017 */
5018 return ext4_quota_enable(sb, type, format_id, DQUOT_LIMITS_ENABLED);
5019}
5020
4838static int ext4_quota_off(struct super_block *sb, int type) 5021static int ext4_quota_off(struct super_block *sb, int type)
4839{ 5022{
4840 struct inode *inode = sb_dqopt(sb)->files[type]; 5023 struct inode *inode = sb_dqopt(sb)->files[type];
@@ -4861,6 +5044,18 @@ out:
4861 return dquot_quota_off(sb, type); 5044 return dquot_quota_off(sb, type);
4862} 5045}
4863 5046
5047/*
5048 * quota_off function that is used when QUOTA feature is set.
5049 */
5050static int ext4_quota_off_sysfile(struct super_block *sb, int type)
5051{
5052 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA))
5053 return -EINVAL;
5054
5055 /* Disable only the limits. */
5056 return dquot_disable(sb, type, DQUOT_LIMITS_ENABLED);
5057}
5058
4864/* Read data from quotafile - avoid pagecache and such because we cannot afford 5059/* Read data from quotafile - avoid pagecache and such because we cannot afford
4865 * acquiring the locks... As quota files are never truncated and quota code 5060 * acquiring the locks... As quota files are never truncated and quota code
4866 * itself serializes the operations (and no one else should touch the files) 5061 * itself serializes the operations (and no one else should touch the files)