aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/filesystems/ext3.txt15
-rw-r--r--fs/ext2/balloc.c6
-rw-r--r--fs/ext2/ialloc.c9
-rw-r--r--fs/ext2/inode.c7
-rw-r--r--fs/ext2/super.c99
-rw-r--r--fs/ext2/xattr.c2
-rw-r--r--fs/ext3/balloc.c6
-rw-r--r--fs/ext3/fsync.c20
-rw-r--r--fs/ext3/inode.c2
-rw-r--r--fs/ext3/super.c77
-rw-r--r--fs/ext4/inode.c2
-rw-r--r--fs/gfs2/quota.c12
-rw-r--r--fs/jbd/commit.c8
-rw-r--r--fs/jbd/journal.c33
-rw-r--r--fs/jfs/file.c2
-rw-r--r--fs/ocfs2/dlmglue.c3
-rw-r--r--fs/ocfs2/file.c21
-rw-r--r--fs/ocfs2/quota.h12
-rw-r--r--fs/ocfs2/quota_global.c347
-rw-r--r--fs/ocfs2/quota_local.c133
-rw-r--r--fs/ocfs2/super.c4
-rw-r--r--fs/quota/dquot.c275
-rw-r--r--fs/quota/quota.c63
-rw-r--r--fs/quota/quota_tree.c50
-rw-r--r--fs/quota/quota_tree.h6
-rw-r--r--fs/quota/quota_v1.c4
-rw-r--r--fs/quota/quota_v2.c6
-rw-r--r--fs/reiserfs/inode.c3
-rw-r--r--fs/udf/file.c2
-rw-r--r--fs/ufs/inode.c2
-rw-r--r--fs/ufs/namei.c2
-rw-r--r--fs/ufs/symlink.c8
-rw-r--r--fs/ufs/truncate.c10
-rw-r--r--fs/ufs/ufs.h2
-rw-r--r--fs/xfs/linux-2.6/xfs_quotaops.c8
-rw-r--r--fs/xfs/quota/xfs_qm_syscalls.c10
-rw-r--r--include/linux/dqblk_xfs.h9
-rw-r--r--include/linux/ext2_fs_sb.h9
-rw-r--r--include/linux/jbd.h3
-rw-r--r--include/linux/quota.h48
-rw-r--r--include/linux/quotaops.h15
41 files changed, 829 insertions, 526 deletions
diff --git a/Documentation/filesystems/ext3.txt b/Documentation/filesystems/ext3.txt
index 867c5b50cb42..272f80d5f966 100644
--- a/Documentation/filesystems/ext3.txt
+++ b/Documentation/filesystems/ext3.txt
@@ -59,8 +59,19 @@ commit=nrsec (*) Ext3 can be told to sync all its data and metadata
59 Setting it to very large values will improve 59 Setting it to very large values will improve
60 performance. 60 performance.
61 61
62barrier=1 This enables/disables barriers. barrier=0 disables 62barrier=<0(*)|1> This enables/disables the use of write barriers in
63 it, barrier=1 enables it. 63barrier the jbd code. barrier=0 disables, barrier=1 enables.
64nobarrier (*) This also requires an IO stack which can support
65 barriers, and if jbd gets an error on a barrier
66 write, it will disable again with a warning.
67 Write barriers enforce proper on-disk ordering
68 of journal commits, making volatile disk write caches
69 safe to use, at some performance penalty. If
70 your disks are battery-backed in one way or another,
71 disabling barriers may safely improve performance.
72 The mount options "barrier" and "nobarrier" can
73 also be used to enable or disable barriers, for
74 consistency with other ext3 mount options.
64 75
65orlov (*) This enables the new Orlov block allocator. It is 76orlov (*) This enables the new Orlov block allocator. It is
66 enabled by default. 77 enabled by default.
diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
index 3cf038c055d7..e8766a396776 100644
--- a/fs/ext2/balloc.c
+++ b/fs/ext2/balloc.c
@@ -1332,6 +1332,12 @@ retry_alloc:
1332 1332
1333 free_blocks = le16_to_cpu(gdp->bg_free_blocks_count); 1333 free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
1334 /* 1334 /*
1335 * skip this group (and avoid loading bitmap) if there
1336 * are no free blocks
1337 */
1338 if (!free_blocks)
1339 continue;
1340 /*
1335 * skip this group if the number of 1341 * skip this group if the number of
1336 * free blocks is less than half of the reservation 1342 * free blocks is less than half of the reservation
1337 * window size. 1343 * window size.
diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
index ad7d572ee8dc..f0c5286f9342 100644
--- a/fs/ext2/ialloc.c
+++ b/fs/ext2/ialloc.c
@@ -106,7 +106,7 @@ void ext2_free_inode (struct inode * inode)
106 struct super_block * sb = inode->i_sb; 106 struct super_block * sb = inode->i_sb;
107 int is_directory; 107 int is_directory;
108 unsigned long ino; 108 unsigned long ino;
109 struct buffer_head *bitmap_bh = NULL; 109 struct buffer_head *bitmap_bh;
110 unsigned long block_group; 110 unsigned long block_group;
111 unsigned long bit; 111 unsigned long bit;
112 struct ext2_super_block * es; 112 struct ext2_super_block * es;
@@ -135,14 +135,13 @@ void ext2_free_inode (struct inode * inode)
135 ino > le32_to_cpu(es->s_inodes_count)) { 135 ino > le32_to_cpu(es->s_inodes_count)) {
136 ext2_error (sb, "ext2_free_inode", 136 ext2_error (sb, "ext2_free_inode",
137 "reserved or nonexistent inode %lu", ino); 137 "reserved or nonexistent inode %lu", ino);
138 goto error_return; 138 return;
139 } 139 }
140 block_group = (ino - 1) / EXT2_INODES_PER_GROUP(sb); 140 block_group = (ino - 1) / EXT2_INODES_PER_GROUP(sb);
141 bit = (ino - 1) % EXT2_INODES_PER_GROUP(sb); 141 bit = (ino - 1) % EXT2_INODES_PER_GROUP(sb);
142 brelse(bitmap_bh);
143 bitmap_bh = read_inode_bitmap(sb, block_group); 142 bitmap_bh = read_inode_bitmap(sb, block_group);
144 if (!bitmap_bh) 143 if (!bitmap_bh)
145 goto error_return; 144 return;
146 145
147 /* Ok, now we can actually update the inode bitmaps.. */ 146 /* Ok, now we can actually update the inode bitmaps.. */
148 if (!ext2_clear_bit_atomic(sb_bgl_lock(EXT2_SB(sb), block_group), 147 if (!ext2_clear_bit_atomic(sb_bgl_lock(EXT2_SB(sb), block_group),
@@ -154,7 +153,7 @@ void ext2_free_inode (struct inode * inode)
154 mark_buffer_dirty(bitmap_bh); 153 mark_buffer_dirty(bitmap_bh);
155 if (sb->s_flags & MS_SYNCHRONOUS) 154 if (sb->s_flags & MS_SYNCHRONOUS)
156 sync_dirty_buffer(bitmap_bh); 155 sync_dirty_buffer(bitmap_bh);
157error_return: 156
158 brelse(bitmap_bh); 157 brelse(bitmap_bh);
159} 158}
160 159
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index fc13cc119aad..527c46d9bc1f 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -22,7 +22,6 @@
22 * Assorted race fixes, rewrite of ext2_get_block() by Al Viro, 2000 22 * Assorted race fixes, rewrite of ext2_get_block() by Al Viro, 2000
23 */ 23 */
24 24
25#include <linux/smp_lock.h>
26#include <linux/time.h> 25#include <linux/time.h>
27#include <linux/highuid.h> 26#include <linux/highuid.h>
28#include <linux/pagemap.h> 27#include <linux/pagemap.h>
@@ -1406,11 +1405,11 @@ static int __ext2_write_inode(struct inode *inode, int do_sync)
1406 /* If this is the first large file 1405 /* If this is the first large file
1407 * created, add a flag to the superblock. 1406 * created, add a flag to the superblock.
1408 */ 1407 */
1409 lock_kernel(); 1408 spin_lock(&EXT2_SB(sb)->s_lock);
1410 ext2_update_dynamic_rev(sb); 1409 ext2_update_dynamic_rev(sb);
1411 EXT2_SET_RO_COMPAT_FEATURE(sb, 1410 EXT2_SET_RO_COMPAT_FEATURE(sb,
1412 EXT2_FEATURE_RO_COMPAT_LARGE_FILE); 1411 EXT2_FEATURE_RO_COMPAT_LARGE_FILE);
1413 unlock_kernel(); 1412 spin_unlock(&EXT2_SB(sb)->s_lock);
1414 ext2_write_super(sb); 1413 ext2_write_super(sb);
1415 } 1414 }
1416 } 1415 }
@@ -1467,7 +1466,7 @@ int ext2_setattr(struct dentry *dentry, struct iattr *iattr)
1467 if (error) 1466 if (error)
1468 return error; 1467 return error;
1469 1468
1470 if (iattr->ia_valid & ATTR_SIZE) 1469 if (is_quota_modification(inode, iattr))
1471 dquot_initialize(inode); 1470 dquot_initialize(inode);
1472 if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) || 1471 if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) ||
1473 (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) { 1472 (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) {
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 42e4a303b675..71e9eb1fa696 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -26,7 +26,6 @@
26#include <linux/random.h> 26#include <linux/random.h>
27#include <linux/buffer_head.h> 27#include <linux/buffer_head.h>
28#include <linux/exportfs.h> 28#include <linux/exportfs.h>
29#include <linux/smp_lock.h>
30#include <linux/vfs.h> 29#include <linux/vfs.h>
31#include <linux/seq_file.h> 30#include <linux/seq_file.h>
32#include <linux/mount.h> 31#include <linux/mount.h>
@@ -39,7 +38,7 @@
39#include "xip.h" 38#include "xip.h"
40 39
41static void ext2_sync_super(struct super_block *sb, 40static void ext2_sync_super(struct super_block *sb,
42 struct ext2_super_block *es); 41 struct ext2_super_block *es, int wait);
43static int ext2_remount (struct super_block * sb, int * flags, char * data); 42static int ext2_remount (struct super_block * sb, int * flags, char * data);
44static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf); 43static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf);
45static int ext2_sync_fs(struct super_block *sb, int wait); 44static int ext2_sync_fs(struct super_block *sb, int wait);
@@ -52,9 +51,11 @@ void ext2_error (struct super_block * sb, const char * function,
52 struct ext2_super_block *es = sbi->s_es; 51 struct ext2_super_block *es = sbi->s_es;
53 52
54 if (!(sb->s_flags & MS_RDONLY)) { 53 if (!(sb->s_flags & MS_RDONLY)) {
54 spin_lock(&sbi->s_lock);
55 sbi->s_mount_state |= EXT2_ERROR_FS; 55 sbi->s_mount_state |= EXT2_ERROR_FS;
56 es->s_state |= cpu_to_le16(EXT2_ERROR_FS); 56 es->s_state |= cpu_to_le16(EXT2_ERROR_FS);
57 ext2_sync_super(sb, es); 57 spin_unlock(&sbi->s_lock);
58 ext2_sync_super(sb, es, 1);
58 } 59 }
59 60
60 va_start(args, fmt); 61 va_start(args, fmt);
@@ -84,6 +85,9 @@ void ext2_msg(struct super_block *sb, const char *prefix,
84 va_end(args); 85 va_end(args);
85} 86}
86 87
88/*
89 * This must be called with sbi->s_lock held.
90 */
87void ext2_update_dynamic_rev(struct super_block *sb) 91void ext2_update_dynamic_rev(struct super_block *sb)
88{ 92{
89 struct ext2_super_block *es = EXT2_SB(sb)->s_es; 93 struct ext2_super_block *es = EXT2_SB(sb)->s_es;
@@ -115,8 +119,6 @@ static void ext2_put_super (struct super_block * sb)
115 int i; 119 int i;
116 struct ext2_sb_info *sbi = EXT2_SB(sb); 120 struct ext2_sb_info *sbi = EXT2_SB(sb);
117 121
118 lock_kernel();
119
120 if (sb->s_dirt) 122 if (sb->s_dirt)
121 ext2_write_super(sb); 123 ext2_write_super(sb);
122 124
@@ -124,8 +126,10 @@ static void ext2_put_super (struct super_block * sb)
124 if (!(sb->s_flags & MS_RDONLY)) { 126 if (!(sb->s_flags & MS_RDONLY)) {
125 struct ext2_super_block *es = sbi->s_es; 127 struct ext2_super_block *es = sbi->s_es;
126 128
129 spin_lock(&sbi->s_lock);
127 es->s_state = cpu_to_le16(sbi->s_mount_state); 130 es->s_state = cpu_to_le16(sbi->s_mount_state);
128 ext2_sync_super(sb, es); 131 spin_unlock(&sbi->s_lock);
132 ext2_sync_super(sb, es, 1);
129 } 133 }
130 db_count = sbi->s_gdb_count; 134 db_count = sbi->s_gdb_count;
131 for (i = 0; i < db_count; i++) 135 for (i = 0; i < db_count; i++)
@@ -140,8 +144,6 @@ static void ext2_put_super (struct super_block * sb)
140 sb->s_fs_info = NULL; 144 sb->s_fs_info = NULL;
141 kfree(sbi->s_blockgroup_lock); 145 kfree(sbi->s_blockgroup_lock);
142 kfree(sbi); 146 kfree(sbi);
143
144 unlock_kernel();
145} 147}
146 148
147static struct kmem_cache * ext2_inode_cachep; 149static struct kmem_cache * ext2_inode_cachep;
@@ -209,6 +211,7 @@ static int ext2_show_options(struct seq_file *seq, struct vfsmount *vfs)
209 struct ext2_super_block *es = sbi->s_es; 211 struct ext2_super_block *es = sbi->s_es;
210 unsigned long def_mount_opts; 212 unsigned long def_mount_opts;
211 213
214 spin_lock(&sbi->s_lock);
212 def_mount_opts = le32_to_cpu(es->s_default_mount_opts); 215 def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
213 216
214 if (sbi->s_sb_block != 1) 217 if (sbi->s_sb_block != 1)
@@ -281,6 +284,7 @@ static int ext2_show_options(struct seq_file *seq, struct vfsmount *vfs)
281 if (!test_opt(sb, RESERVATION)) 284 if (!test_opt(sb, RESERVATION))
282 seq_puts(seq, ",noreservation"); 285 seq_puts(seq, ",noreservation");
283 286
287 spin_unlock(&sbi->s_lock);
284 return 0; 288 return 0;
285} 289}
286 290
@@ -606,7 +610,6 @@ static int ext2_setup_super (struct super_block * sb,
606 if (!le16_to_cpu(es->s_max_mnt_count)) 610 if (!le16_to_cpu(es->s_max_mnt_count))
607 es->s_max_mnt_count = cpu_to_le16(EXT2_DFL_MAX_MNT_COUNT); 611 es->s_max_mnt_count = cpu_to_le16(EXT2_DFL_MAX_MNT_COUNT);
608 le16_add_cpu(&es->s_mnt_count, 1); 612 le16_add_cpu(&es->s_mnt_count, 1);
609 ext2_write_super(sb);
610 if (test_opt (sb, DEBUG)) 613 if (test_opt (sb, DEBUG))
611 ext2_msg(sb, KERN_INFO, "%s, %s, bs=%lu, fs=%lu, gc=%lu, " 614 ext2_msg(sb, KERN_INFO, "%s, %s, bs=%lu, fs=%lu, gc=%lu, "
612 "bpg=%lu, ipg=%lu, mo=%04lx]", 615 "bpg=%lu, ipg=%lu, mo=%04lx]",
@@ -767,6 +770,8 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
767 sb->s_fs_info = sbi; 770 sb->s_fs_info = sbi;
768 sbi->s_sb_block = sb_block; 771 sbi->s_sb_block = sb_block;
769 772
773 spin_lock_init(&sbi->s_lock);
774
770 /* 775 /*
771 * See what the current blocksize for the device is, and 776 * See what the current blocksize for the device is, and
772 * use that as the blocksize. Otherwise (or if the blocksize 777 * use that as the blocksize. Otherwise (or if the blocksize
@@ -1079,7 +1084,9 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
1079 if (EXT2_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL)) 1084 if (EXT2_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL))
1080 ext2_msg(sb, KERN_WARNING, 1085 ext2_msg(sb, KERN_WARNING,
1081 "warning: mounting ext3 filesystem as ext2"); 1086 "warning: mounting ext3 filesystem as ext2");
1082 ext2_setup_super (sb, es, sb->s_flags & MS_RDONLY); 1087 if (ext2_setup_super (sb, es, sb->s_flags & MS_RDONLY))
1088 sb->s_flags |= MS_RDONLY;
1089 ext2_write_super(sb);
1083 return 0; 1090 return 0;
1084 1091
1085cantfind_ext2: 1092cantfind_ext2:
@@ -1120,30 +1127,26 @@ static void ext2_clear_super_error(struct super_block *sb)
1120 * be remapped. Nothing we can do but to retry the 1127 * be remapped. Nothing we can do but to retry the
1121 * write and hope for the best. 1128 * write and hope for the best.
1122 */ 1129 */
1123 printk(KERN_ERR "EXT2-fs: %s previous I/O error to " 1130 ext2_msg(sb, KERN_ERR,
1124 "superblock detected", sb->s_id); 1131 "previous I/O error to superblock detected\n");
1125 clear_buffer_write_io_error(sbh); 1132 clear_buffer_write_io_error(sbh);
1126 set_buffer_uptodate(sbh); 1133 set_buffer_uptodate(sbh);
1127 } 1134 }
1128} 1135}
1129 1136
1130static void ext2_commit_super (struct super_block * sb, 1137static void ext2_sync_super(struct super_block *sb, struct ext2_super_block *es,
1131 struct ext2_super_block * es) 1138 int wait)
1132{
1133 ext2_clear_super_error(sb);
1134 es->s_wtime = cpu_to_le32(get_seconds());
1135 mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
1136 sb->s_dirt = 0;
1137}
1138
1139static void ext2_sync_super(struct super_block *sb, struct ext2_super_block *es)
1140{ 1139{
1141 ext2_clear_super_error(sb); 1140 ext2_clear_super_error(sb);
1141 spin_lock(&EXT2_SB(sb)->s_lock);
1142 es->s_free_blocks_count = cpu_to_le32(ext2_count_free_blocks(sb)); 1142 es->s_free_blocks_count = cpu_to_le32(ext2_count_free_blocks(sb));
1143 es->s_free_inodes_count = cpu_to_le32(ext2_count_free_inodes(sb)); 1143 es->s_free_inodes_count = cpu_to_le32(ext2_count_free_inodes(sb));
1144 es->s_wtime = cpu_to_le32(get_seconds()); 1144 es->s_wtime = cpu_to_le32(get_seconds());
1145 /* unlock before we do IO */
1146 spin_unlock(&EXT2_SB(sb)->s_lock);
1145 mark_buffer_dirty(EXT2_SB(sb)->s_sbh); 1147 mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
1146 sync_dirty_buffer(EXT2_SB(sb)->s_sbh); 1148 if (wait)
1149 sync_dirty_buffer(EXT2_SB(sb)->s_sbh);
1147 sb->s_dirt = 0; 1150 sb->s_dirt = 0;
1148} 1151}
1149 1152
@@ -1157,43 +1160,18 @@ static void ext2_sync_super(struct super_block *sb, struct ext2_super_block *es)
1157 * may have been checked while mounted and e2fsck may have 1160 * may have been checked while mounted and e2fsck may have
1158 * set s_state to EXT2_VALID_FS after some corrections. 1161 * set s_state to EXT2_VALID_FS after some corrections.
1159 */ 1162 */
1160
1161static int ext2_sync_fs(struct super_block *sb, int wait) 1163static int ext2_sync_fs(struct super_block *sb, int wait)
1162{ 1164{
1165 struct ext2_sb_info *sbi = EXT2_SB(sb);
1163 struct ext2_super_block *es = EXT2_SB(sb)->s_es; 1166 struct ext2_super_block *es = EXT2_SB(sb)->s_es;
1164 struct buffer_head *sbh = EXT2_SB(sb)->s_sbh;
1165
1166 lock_kernel();
1167 if (buffer_write_io_error(sbh)) {
1168 /*
1169 * Oh, dear. A previous attempt to write the
1170 * superblock failed. This could happen because the
1171 * USB device was yanked out. Or it could happen to
1172 * be a transient write error and maybe the block will
1173 * be remapped. Nothing we can do but to retry the
1174 * write and hope for the best.
1175 */
1176 ext2_msg(sb, KERN_ERR,
1177 "previous I/O error to superblock detected\n");
1178 clear_buffer_write_io_error(sbh);
1179 set_buffer_uptodate(sbh);
1180 }
1181 1167
1168 spin_lock(&sbi->s_lock);
1182 if (es->s_state & cpu_to_le16(EXT2_VALID_FS)) { 1169 if (es->s_state & cpu_to_le16(EXT2_VALID_FS)) {
1183 ext2_debug("setting valid to 0\n"); 1170 ext2_debug("setting valid to 0\n");
1184 es->s_state &= cpu_to_le16(~EXT2_VALID_FS); 1171 es->s_state &= cpu_to_le16(~EXT2_VALID_FS);
1185 es->s_free_blocks_count =
1186 cpu_to_le32(ext2_count_free_blocks(sb));
1187 es->s_free_inodes_count =
1188 cpu_to_le32(ext2_count_free_inodes(sb));
1189 es->s_mtime = cpu_to_le32(get_seconds());
1190 ext2_sync_super(sb, es);
1191 } else {
1192 ext2_commit_super(sb, es);
1193 } 1172 }
1194 sb->s_dirt = 0; 1173 spin_unlock(&sbi->s_lock);
1195 unlock_kernel(); 1174 ext2_sync_super(sb, es, wait);
1196
1197 return 0; 1175 return 0;
1198} 1176}
1199 1177
@@ -1215,7 +1193,7 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
1215 unsigned long old_sb_flags; 1193 unsigned long old_sb_flags;
1216 int err; 1194 int err;
1217 1195
1218 lock_kernel(); 1196 spin_lock(&sbi->s_lock);
1219 1197
1220 /* Store the old options */ 1198 /* Store the old options */
1221 old_sb_flags = sb->s_flags; 1199 old_sb_flags = sb->s_flags;
@@ -1254,13 +1232,13 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
1254 sbi->s_mount_opt |= old_mount_opt & EXT2_MOUNT_XIP; 1232 sbi->s_mount_opt |= old_mount_opt & EXT2_MOUNT_XIP;
1255 } 1233 }
1256 if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) { 1234 if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) {
1257 unlock_kernel(); 1235 spin_unlock(&sbi->s_lock);
1258 return 0; 1236 return 0;
1259 } 1237 }
1260 if (*flags & MS_RDONLY) { 1238 if (*flags & MS_RDONLY) {
1261 if (le16_to_cpu(es->s_state) & EXT2_VALID_FS || 1239 if (le16_to_cpu(es->s_state) & EXT2_VALID_FS ||
1262 !(sbi->s_mount_state & EXT2_VALID_FS)) { 1240 !(sbi->s_mount_state & EXT2_VALID_FS)) {
1263 unlock_kernel(); 1241 spin_unlock(&sbi->s_lock);
1264 return 0; 1242 return 0;
1265 } 1243 }
1266 /* 1244 /*
@@ -1269,6 +1247,8 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
1269 */ 1247 */
1270 es->s_state = cpu_to_le16(sbi->s_mount_state); 1248 es->s_state = cpu_to_le16(sbi->s_mount_state);
1271 es->s_mtime = cpu_to_le32(get_seconds()); 1249 es->s_mtime = cpu_to_le32(get_seconds());
1250 spin_unlock(&sbi->s_lock);
1251 ext2_sync_super(sb, es, 1);
1272 } else { 1252 } else {
1273 __le32 ret = EXT2_HAS_RO_COMPAT_FEATURE(sb, 1253 __le32 ret = EXT2_HAS_RO_COMPAT_FEATURE(sb,
1274 ~EXT2_FEATURE_RO_COMPAT_SUPP); 1254 ~EXT2_FEATURE_RO_COMPAT_SUPP);
@@ -1288,16 +1268,16 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
1288 sbi->s_mount_state = le16_to_cpu(es->s_state); 1268 sbi->s_mount_state = le16_to_cpu(es->s_state);
1289 if (!ext2_setup_super (sb, es, 0)) 1269 if (!ext2_setup_super (sb, es, 0))
1290 sb->s_flags &= ~MS_RDONLY; 1270 sb->s_flags &= ~MS_RDONLY;
1271 spin_unlock(&sbi->s_lock);
1272 ext2_write_super(sb);
1291 } 1273 }
1292 ext2_sync_super(sb, es);
1293 unlock_kernel();
1294 return 0; 1274 return 0;
1295restore_opts: 1275restore_opts:
1296 sbi->s_mount_opt = old_opts.s_mount_opt; 1276 sbi->s_mount_opt = old_opts.s_mount_opt;
1297 sbi->s_resuid = old_opts.s_resuid; 1277 sbi->s_resuid = old_opts.s_resuid;
1298 sbi->s_resgid = old_opts.s_resgid; 1278 sbi->s_resgid = old_opts.s_resgid;
1299 sb->s_flags = old_sb_flags; 1279 sb->s_flags = old_sb_flags;
1300 unlock_kernel(); 1280 spin_unlock(&sbi->s_lock);
1301 return err; 1281 return err;
1302} 1282}
1303 1283
@@ -1308,6 +1288,8 @@ static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf)
1308 struct ext2_super_block *es = sbi->s_es; 1288 struct ext2_super_block *es = sbi->s_es;
1309 u64 fsid; 1289 u64 fsid;
1310 1290
1291 spin_lock(&sbi->s_lock);
1292
1311 if (test_opt (sb, MINIX_DF)) 1293 if (test_opt (sb, MINIX_DF))
1312 sbi->s_overhead_last = 0; 1294 sbi->s_overhead_last = 0;
1313 else if (sbi->s_blocks_last != le32_to_cpu(es->s_blocks_count)) { 1295 else if (sbi->s_blocks_last != le32_to_cpu(es->s_blocks_count)) {
@@ -1362,6 +1344,7 @@ static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf)
1362 le64_to_cpup((void *)es->s_uuid + sizeof(u64)); 1344 le64_to_cpup((void *)es->s_uuid + sizeof(u64));
1363 buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL; 1345 buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL;
1364 buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL; 1346 buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL;
1347 spin_unlock(&sbi->s_lock);
1365 return 0; 1348 return 0;
1366} 1349}
1367 1350
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index e44dc92609be..3b96045a00ce 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -345,7 +345,9 @@ static void ext2_xattr_update_super_block(struct super_block *sb)
345 if (EXT2_HAS_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR)) 345 if (EXT2_HAS_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR))
346 return; 346 return;
347 347
348 spin_lock(&EXT2_SB(sb)->s_lock);
348 EXT2_SET_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR); 349 EXT2_SET_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR);
350 spin_unlock(&EXT2_SB(sb)->s_lock);
349 sb->s_dirt = 1; 351 sb->s_dirt = 1;
350 mark_buffer_dirty(EXT2_SB(sb)->s_sbh); 352 mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
351} 353}
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
index a177122a1b25..4a32511f4ded 100644
--- a/fs/ext3/balloc.c
+++ b/fs/ext3/balloc.c
@@ -1584,6 +1584,12 @@ retry_alloc:
1584 goto io_error; 1584 goto io_error;
1585 free_blocks = le16_to_cpu(gdp->bg_free_blocks_count); 1585 free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
1586 /* 1586 /*
1587 * skip this group (and avoid loading bitmap) if there
1588 * are no free blocks
1589 */
1590 if (!free_blocks)
1591 continue;
1592 /*
1587 * skip this group if the number of 1593 * skip this group if the number of
1588 * free blocks is less than half of the reservation 1594 * free blocks is less than half of the reservation
1589 * window size. 1595 * window size.
diff --git a/fs/ext3/fsync.c b/fs/ext3/fsync.c
index 8209f266e9ad..26289e8f4163 100644
--- a/fs/ext3/fsync.c
+++ b/fs/ext3/fsync.c
@@ -48,7 +48,7 @@ int ext3_sync_file(struct file * file, struct dentry *dentry, int datasync)
48 struct inode *inode = dentry->d_inode; 48 struct inode *inode = dentry->d_inode;
49 struct ext3_inode_info *ei = EXT3_I(inode); 49 struct ext3_inode_info *ei = EXT3_I(inode);
50 journal_t *journal = EXT3_SB(inode->i_sb)->s_journal; 50 journal_t *journal = EXT3_SB(inode->i_sb)->s_journal;
51 int ret = 0; 51 int ret, needs_barrier = 0;
52 tid_t commit_tid; 52 tid_t commit_tid;
53 53
54 if (inode->i_sb->s_flags & MS_RDONLY) 54 if (inode->i_sb->s_flags & MS_RDONLY)
@@ -70,28 +70,26 @@ int ext3_sync_file(struct file * file, struct dentry *dentry, int datasync)
70 * (they were dirtied by commit). But that's OK - the blocks are 70 * (they were dirtied by commit). But that's OK - the blocks are
71 * safe in-journal, which is all fsync() needs to ensure. 71 * safe in-journal, which is all fsync() needs to ensure.
72 */ 72 */
73 if (ext3_should_journal_data(inode)) { 73 if (ext3_should_journal_data(inode))
74 ret = ext3_force_commit(inode->i_sb); 74 return ext3_force_commit(inode->i_sb);
75 goto out;
76 }
77 75
78 if (datasync) 76 if (datasync)
79 commit_tid = atomic_read(&ei->i_datasync_tid); 77 commit_tid = atomic_read(&ei->i_datasync_tid);
80 else 78 else
81 commit_tid = atomic_read(&ei->i_sync_tid); 79 commit_tid = atomic_read(&ei->i_sync_tid);
82 80
83 if (log_start_commit(journal, commit_tid)) { 81 if (test_opt(inode->i_sb, BARRIER) &&
84 log_wait_commit(journal, commit_tid); 82 !journal_trans_will_send_data_barrier(journal, commit_tid))
85 goto out; 83 needs_barrier = 1;
86 } 84 log_start_commit(journal, commit_tid);
85 ret = log_wait_commit(journal, commit_tid);
87 86
88 /* 87 /*
89 * In case we didn't commit a transaction, we have to flush 88 * In case we didn't commit a transaction, we have to flush
90 * disk caches manually so that data really is on persistent 89 * disk caches manually so that data really is on persistent
91 * storage 90 * storage
92 */ 91 */
93 if (test_opt(inode->i_sb, BARRIER)) 92 if (needs_barrier)
94 blkdev_issue_flush(inode->i_sb->s_bdev, NULL); 93 blkdev_issue_flush(inode->i_sb->s_bdev, NULL);
95out:
96 return ret; 94 return ret;
97} 95}
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index ea33bdf0a300..735f0190ec2a 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -3151,7 +3151,7 @@ int ext3_setattr(struct dentry *dentry, struct iattr *attr)
3151 if (error) 3151 if (error)
3152 return error; 3152 return error;
3153 3153
3154 if (ia_valid & ATTR_SIZE) 3154 if (is_quota_modification(inode, attr))
3155 dquot_initialize(inode); 3155 dquot_initialize(inode);
3156 if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) || 3156 if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
3157 (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) { 3157 (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 1bee604cc6cd..0fc1293d0e96 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -653,8 +653,12 @@ static int ext3_show_options(struct seq_file *seq, struct vfsmount *vfs)
653 seq_printf(seq, ",commit=%u", 653 seq_printf(seq, ",commit=%u",
654 (unsigned) (sbi->s_commit_interval / HZ)); 654 (unsigned) (sbi->s_commit_interval / HZ));
655 } 655 }
656 if (test_opt(sb, BARRIER)) 656
657 seq_puts(seq, ",barrier=1"); 657 /*
658 * Always display barrier state so it's clear what the status is.
659 */
660 seq_puts(seq, ",barrier=");
661 seq_puts(seq, test_opt(sb, BARRIER) ? "1" : "0");
658 if (test_opt(sb, NOBH)) 662 if (test_opt(sb, NOBH))
659 seq_puts(seq, ",nobh"); 663 seq_puts(seq, ",nobh");
660 664
@@ -810,8 +814,8 @@ enum {
810 Opt_data_err_abort, Opt_data_err_ignore, 814 Opt_data_err_abort, Opt_data_err_ignore,
811 Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota, 815 Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
812 Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota, 816 Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
813 Opt_noquota, Opt_ignore, Opt_barrier, Opt_err, Opt_resize, 817 Opt_noquota, Opt_ignore, Opt_barrier, Opt_nobarrier, Opt_err,
814 Opt_usrquota, Opt_grpquota 818 Opt_resize, Opt_usrquota, Opt_grpquota
815}; 819};
816 820
817static const match_table_t tokens = { 821static const match_table_t tokens = {
@@ -865,6 +869,8 @@ static const match_table_t tokens = {
865 {Opt_quota, "quota"}, 869 {Opt_quota, "quota"},
866 {Opt_usrquota, "usrquota"}, 870 {Opt_usrquota, "usrquota"},
867 {Opt_barrier, "barrier=%u"}, 871 {Opt_barrier, "barrier=%u"},
872 {Opt_barrier, "barrier"},
873 {Opt_nobarrier, "nobarrier"},
868 {Opt_resize, "resize"}, 874 {Opt_resize, "resize"},
869 {Opt_err, NULL}, 875 {Opt_err, NULL},
870}; 876};
@@ -967,7 +973,11 @@ static int parse_options (char *options, struct super_block *sb,
967 int token; 973 int token;
968 if (!*p) 974 if (!*p)
969 continue; 975 continue;
970 976 /*
977 * Initialize args struct so we know whether arg was
978 * found; some options take optional arguments.
979 */
980 args[0].to = args[0].from = 0;
971 token = match_token(p, tokens, args); 981 token = match_token(p, tokens, args);
972 switch (token) { 982 switch (token) {
973 case Opt_bsd_df: 983 case Opt_bsd_df:
@@ -1215,9 +1225,15 @@ set_qf_format:
1215 case Opt_abort: 1225 case Opt_abort:
1216 set_opt(sbi->s_mount_opt, ABORT); 1226 set_opt(sbi->s_mount_opt, ABORT);
1217 break; 1227 break;
1228 case Opt_nobarrier:
1229 clear_opt(sbi->s_mount_opt, BARRIER);
1230 break;
1218 case Opt_barrier: 1231 case Opt_barrier:
1219 if (match_int(&args[0], &option)) 1232 if (args[0].from) {
1220 return 0; 1233 if (match_int(&args[0], &option))
1234 return 0;
1235 } else
1236 option = 1; /* No argument, default to 1 */
1221 if (option) 1237 if (option)
1222 set_opt(sbi->s_mount_opt, BARRIER); 1238 set_opt(sbi->s_mount_opt, BARRIER);
1223 else 1239 else
@@ -1890,21 +1906,6 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
1890 get_random_bytes(&sbi->s_next_generation, sizeof(u32)); 1906 get_random_bytes(&sbi->s_next_generation, sizeof(u32));
1891 spin_lock_init(&sbi->s_next_gen_lock); 1907 spin_lock_init(&sbi->s_next_gen_lock);
1892 1908
1893 err = percpu_counter_init(&sbi->s_freeblocks_counter,
1894 ext3_count_free_blocks(sb));
1895 if (!err) {
1896 err = percpu_counter_init(&sbi->s_freeinodes_counter,
1897 ext3_count_free_inodes(sb));
1898 }
1899 if (!err) {
1900 err = percpu_counter_init(&sbi->s_dirs_counter,
1901 ext3_count_dirs(sb));
1902 }
1903 if (err) {
1904 ext3_msg(sb, KERN_ERR, "error: insufficient memory");
1905 goto failed_mount3;
1906 }
1907
1908 /* per fileystem reservation list head & lock */ 1909 /* per fileystem reservation list head & lock */
1909 spin_lock_init(&sbi->s_rsv_window_lock); 1910 spin_lock_init(&sbi->s_rsv_window_lock);
1910 sbi->s_rsv_window_root = RB_ROOT; 1911 sbi->s_rsv_window_root = RB_ROOT;
@@ -1945,15 +1946,29 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
1945 if (!test_opt(sb, NOLOAD) && 1946 if (!test_opt(sb, NOLOAD) &&
1946 EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL)) { 1947 EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL)) {
1947 if (ext3_load_journal(sb, es, journal_devnum)) 1948 if (ext3_load_journal(sb, es, journal_devnum))
1948 goto failed_mount3; 1949 goto failed_mount2;
1949 } else if (journal_inum) { 1950 } else if (journal_inum) {
1950 if (ext3_create_journal(sb, es, journal_inum)) 1951 if (ext3_create_journal(sb, es, journal_inum))
1951 goto failed_mount3; 1952 goto failed_mount2;
1952 } else { 1953 } else {
1953 if (!silent) 1954 if (!silent)
1954 ext3_msg(sb, KERN_ERR, 1955 ext3_msg(sb, KERN_ERR,
1955 "error: no journal found. " 1956 "error: no journal found. "
1956 "mounting ext3 over ext2?"); 1957 "mounting ext3 over ext2?");
1958 goto failed_mount2;
1959 }
1960 err = percpu_counter_init(&sbi->s_freeblocks_counter,
1961 ext3_count_free_blocks(sb));
1962 if (!err) {
1963 err = percpu_counter_init(&sbi->s_freeinodes_counter,
1964 ext3_count_free_inodes(sb));
1965 }
1966 if (!err) {
1967 err = percpu_counter_init(&sbi->s_dirs_counter,
1968 ext3_count_dirs(sb));
1969 }
1970 if (err) {
1971 ext3_msg(sb, KERN_ERR, "error: insufficient memory");
1957 goto failed_mount3; 1972 goto failed_mount3;
1958 } 1973 }
1959 1974
@@ -1978,7 +1993,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
1978 ext3_msg(sb, KERN_ERR, 1993 ext3_msg(sb, KERN_ERR,
1979 "error: journal does not support " 1994 "error: journal does not support "
1980 "requested data journaling mode"); 1995 "requested data journaling mode");
1981 goto failed_mount4; 1996 goto failed_mount3;
1982 } 1997 }
1983 default: 1998 default:
1984 break; 1999 break;
@@ -2001,19 +2016,19 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
2001 if (IS_ERR(root)) { 2016 if (IS_ERR(root)) {
2002 ext3_msg(sb, KERN_ERR, "error: get root inode failed"); 2017 ext3_msg(sb, KERN_ERR, "error: get root inode failed");
2003 ret = PTR_ERR(root); 2018 ret = PTR_ERR(root);
2004 goto failed_mount4; 2019 goto failed_mount3;
2005 } 2020 }
2006 if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) { 2021 if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
2007 iput(root); 2022 iput(root);
2008 ext3_msg(sb, KERN_ERR, "error: corrupt root inode, run e2fsck"); 2023 ext3_msg(sb, KERN_ERR, "error: corrupt root inode, run e2fsck");
2009 goto failed_mount4; 2024 goto failed_mount3;
2010 } 2025 }
2011 sb->s_root = d_alloc_root(root); 2026 sb->s_root = d_alloc_root(root);
2012 if (!sb->s_root) { 2027 if (!sb->s_root) {
2013 ext3_msg(sb, KERN_ERR, "error: get root dentry failed"); 2028 ext3_msg(sb, KERN_ERR, "error: get root dentry failed");
2014 iput(root); 2029 iput(root);
2015 ret = -ENOMEM; 2030 ret = -ENOMEM;
2016 goto failed_mount4; 2031 goto failed_mount3;
2017 } 2032 }
2018 2033
2019 ext3_setup_super (sb, es, sb->s_flags & MS_RDONLY); 2034 ext3_setup_super (sb, es, sb->s_flags & MS_RDONLY);
@@ -2039,12 +2054,11 @@ cantfind_ext3:
2039 sb->s_id); 2054 sb->s_id);
2040 goto failed_mount; 2055 goto failed_mount;
2041 2056
2042failed_mount4:
2043 journal_destroy(sbi->s_journal);
2044failed_mount3: 2057failed_mount3:
2045 percpu_counter_destroy(&sbi->s_freeblocks_counter); 2058 percpu_counter_destroy(&sbi->s_freeblocks_counter);
2046 percpu_counter_destroy(&sbi->s_freeinodes_counter); 2059 percpu_counter_destroy(&sbi->s_freeinodes_counter);
2047 percpu_counter_destroy(&sbi->s_dirs_counter); 2060 percpu_counter_destroy(&sbi->s_dirs_counter);
2061 journal_destroy(sbi->s_journal);
2048failed_mount2: 2062failed_mount2:
2049 for (i = 0; i < db_count; i++) 2063 for (i = 0; i < db_count; i++)
2050 brelse(sbi->s_group_desc[i]); 2064 brelse(sbi->s_group_desc[i]);
@@ -2278,6 +2292,9 @@ static int ext3_load_journal(struct super_block *sb,
2278 return -EINVAL; 2292 return -EINVAL;
2279 } 2293 }
2280 2294
2295 if (!(journal->j_flags & JFS_BARRIER))
2296 printk(KERN_INFO "EXT3-fs: barriers not enabled\n");
2297
2281 if (!really_read_only && test_opt(sb, UPDATE_JOURNAL)) { 2298 if (!really_read_only && test_opt(sb, UPDATE_JOURNAL)) {
2282 err = journal_update_format(journal); 2299 err = journal_update_format(journal);
2283 if (err) { 2300 if (err) {
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 81d605412844..3e0f6af9d08d 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -5425,7 +5425,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
5425 if (error) 5425 if (error)
5426 return error; 5426 return error;
5427 5427
5428 if (ia_valid & ATTR_SIZE) 5428 if (is_quota_modification(inode, attr))
5429 dquot_initialize(inode); 5429 dquot_initialize(inode);
5430 if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) || 5430 if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
5431 (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) { 5431 (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index d5f4661287f9..49667d68769e 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -1476,8 +1476,8 @@ static int gfs2_quota_get_xstate(struct super_block *sb,
1476 return 0; 1476 return 0;
1477} 1477}
1478 1478
1479static int gfs2_xquota_get(struct super_block *sb, int type, qid_t id, 1479static int gfs2_get_dqblk(struct super_block *sb, int type, qid_t id,
1480 struct fs_disk_quota *fdq) 1480 struct fs_disk_quota *fdq)
1481{ 1481{
1482 struct gfs2_sbd *sdp = sb->s_fs_info; 1482 struct gfs2_sbd *sdp = sb->s_fs_info;
1483 struct gfs2_quota_lvb *qlvb; 1483 struct gfs2_quota_lvb *qlvb;
@@ -1521,8 +1521,8 @@ out:
1521/* GFS2 only supports a subset of the XFS fields */ 1521/* GFS2 only supports a subset of the XFS fields */
1522#define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD) 1522#define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD)
1523 1523
1524static int gfs2_xquota_set(struct super_block *sb, int type, qid_t id, 1524static int gfs2_set_dqblk(struct super_block *sb, int type, qid_t id,
1525 struct fs_disk_quota *fdq) 1525 struct fs_disk_quota *fdq)
1526{ 1526{
1527 struct gfs2_sbd *sdp = sb->s_fs_info; 1527 struct gfs2_sbd *sdp = sb->s_fs_info;
1528 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); 1528 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
@@ -1629,7 +1629,7 @@ out_put:
1629const struct quotactl_ops gfs2_quotactl_ops = { 1629const struct quotactl_ops gfs2_quotactl_ops = {
1630 .quota_sync = gfs2_quota_sync, 1630 .quota_sync = gfs2_quota_sync,
1631 .get_xstate = gfs2_quota_get_xstate, 1631 .get_xstate = gfs2_quota_get_xstate,
1632 .get_xquota = gfs2_xquota_get, 1632 .get_dqblk = gfs2_get_dqblk,
1633 .set_xquota = gfs2_xquota_set, 1633 .set_dqblk = gfs2_set_dqblk,
1634}; 1634};
1635 1635
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
index ecb44c94ba8d..28a9ddaa0c49 100644
--- a/fs/jbd/commit.c
+++ b/fs/jbd/commit.c
@@ -786,6 +786,12 @@ wait_for_iobuf:
786 786
787 jbd_debug(3, "JBD: commit phase 6\n"); 787 jbd_debug(3, "JBD: commit phase 6\n");
788 788
789 /* All metadata is written, now write commit record and do cleanup */
790 spin_lock(&journal->j_state_lock);
791 J_ASSERT(commit_transaction->t_state == T_COMMIT);
792 commit_transaction->t_state = T_COMMIT_RECORD;
793 spin_unlock(&journal->j_state_lock);
794
789 if (journal_write_commit_record(journal, commit_transaction)) 795 if (journal_write_commit_record(journal, commit_transaction))
790 err = -EIO; 796 err = -EIO;
791 797
@@ -923,7 +929,7 @@ restart_loop:
923 929
924 jbd_debug(3, "JBD: commit phase 8\n"); 930 jbd_debug(3, "JBD: commit phase 8\n");
925 931
926 J_ASSERT(commit_transaction->t_state == T_COMMIT); 932 J_ASSERT(commit_transaction->t_state == T_COMMIT_RECORD);
927 933
928 commit_transaction->t_state = T_FINISHED; 934 commit_transaction->t_state = T_FINISHED;
929 J_ASSERT(commit_transaction == journal->j_committing_transaction); 935 J_ASSERT(commit_transaction == journal->j_committing_transaction);
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index bd224eec9b07..93d1e47647bd 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -565,6 +565,38 @@ int log_wait_commit(journal_t *journal, tid_t tid)
565} 565}
566 566
567/* 567/*
568 * Return 1 if a given transaction has not yet sent barrier request
569 * connected with a transaction commit. If 0 is returned, transaction
570 * may or may not have sent the barrier. Used to avoid sending barrier
571 * twice in common cases.
572 */
573int journal_trans_will_send_data_barrier(journal_t *journal, tid_t tid)
574{
575 int ret = 0;
576 transaction_t *commit_trans;
577
578 if (!(journal->j_flags & JFS_BARRIER))
579 return 0;
580 spin_lock(&journal->j_state_lock);
581 /* Transaction already committed? */
582 if (tid_geq(journal->j_commit_sequence, tid))
583 goto out;
584 /*
585 * Transaction is being committed and we already proceeded to
586 * writing commit record?
587 */
588 commit_trans = journal->j_committing_transaction;
589 if (commit_trans && commit_trans->t_tid == tid &&
590 commit_trans->t_state >= T_COMMIT_RECORD)
591 goto out;
592 ret = 1;
593out:
594 spin_unlock(&journal->j_state_lock);
595 return ret;
596}
597EXPORT_SYMBOL(journal_trans_will_send_data_barrier);
598
599/*
568 * Log buffer allocation routines: 600 * Log buffer allocation routines:
569 */ 601 */
570 602
@@ -1157,6 +1189,7 @@ int journal_destroy(journal_t *journal)
1157{ 1189{
1158 int err = 0; 1190 int err = 0;
1159 1191
1192
1160 /* Wait for the commit thread to wake up and die. */ 1193 /* Wait for the commit thread to wake up and die. */
1161 journal_kill_thread(journal); 1194 journal_kill_thread(journal);
1162 1195
diff --git a/fs/jfs/file.c b/fs/jfs/file.c
index 14ba982b3f24..85d9ec659225 100644
--- a/fs/jfs/file.c
+++ b/fs/jfs/file.c
@@ -98,7 +98,7 @@ int jfs_setattr(struct dentry *dentry, struct iattr *iattr)
98 if (rc) 98 if (rc)
99 return rc; 99 return rc;
100 100
101 if (iattr->ia_valid & ATTR_SIZE) 101 if (is_quota_modification(inode, iattr))
102 dquot_initialize(inode); 102 dquot_initialize(inode);
103 if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) || 103 if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) ||
104 (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) { 104 (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) {
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 50c4ee805da4..39eb16ac5f98 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -3897,7 +3897,8 @@ static int ocfs2_refresh_qinfo(struct ocfs2_mem_dqinfo *oinfo)
3897 oinfo->dqi_gi.dqi_free_entry = 3897 oinfo->dqi_gi.dqi_free_entry =
3898 be32_to_cpu(lvb->lvb_free_entry); 3898 be32_to_cpu(lvb->lvb_free_entry);
3899 } else { 3899 } else {
3900 status = ocfs2_read_quota_block(oinfo->dqi_gqinode, 0, &bh); 3900 status = ocfs2_read_quota_phys_block(oinfo->dqi_gqinode,
3901 oinfo->dqi_giblk, &bh);
3901 if (status) { 3902 if (status) {
3902 mlog_errno(status); 3903 mlog_errno(status);
3903 goto bail; 3904 goto bail;
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index f74f1400eccd..97e54b9e654b 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -933,9 +933,8 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
933 struct ocfs2_super *osb = OCFS2_SB(sb); 933 struct ocfs2_super *osb = OCFS2_SB(sb);
934 struct buffer_head *bh = NULL; 934 struct buffer_head *bh = NULL;
935 handle_t *handle = NULL; 935 handle_t *handle = NULL;
936 int qtype;
937 struct dquot *transfer_from[MAXQUOTAS] = { };
938 struct dquot *transfer_to[MAXQUOTAS] = { }; 936 struct dquot *transfer_to[MAXQUOTAS] = { };
937 int qtype;
939 938
940 mlog_entry("(0x%p, '%.*s')\n", dentry, 939 mlog_entry("(0x%p, '%.*s')\n", dentry,
941 dentry->d_name.len, dentry->d_name.name); 940 dentry->d_name.len, dentry->d_name.name);
@@ -966,10 +965,10 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
966 if (status) 965 if (status)
967 return status; 966 return status;
968 967
968 if (is_quota_modification(inode, attr))
969 dquot_initialize(inode);
969 size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE; 970 size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE;
970 if (size_change) { 971 if (size_change) {
971 dquot_initialize(inode);
972
973 status = ocfs2_rw_lock(inode, 1); 972 status = ocfs2_rw_lock(inode, 1);
974 if (status < 0) { 973 if (status < 0) {
975 mlog_errno(status); 974 mlog_errno(status);
@@ -1019,9 +1018,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
1019 OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) { 1018 OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) {
1020 transfer_to[USRQUOTA] = dqget(sb, attr->ia_uid, 1019 transfer_to[USRQUOTA] = dqget(sb, attr->ia_uid,
1021 USRQUOTA); 1020 USRQUOTA);
1022 transfer_from[USRQUOTA] = dqget(sb, inode->i_uid, 1021 if (!transfer_to[USRQUOTA]) {
1023 USRQUOTA);
1024 if (!transfer_to[USRQUOTA] || !transfer_from[USRQUOTA]) {
1025 status = -ESRCH; 1022 status = -ESRCH;
1026 goto bail_unlock; 1023 goto bail_unlock;
1027 } 1024 }
@@ -1031,9 +1028,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
1031 OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)) { 1028 OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)) {
1032 transfer_to[GRPQUOTA] = dqget(sb, attr->ia_gid, 1029 transfer_to[GRPQUOTA] = dqget(sb, attr->ia_gid,
1033 GRPQUOTA); 1030 GRPQUOTA);
1034 transfer_from[GRPQUOTA] = dqget(sb, inode->i_gid, 1031 if (!transfer_to[GRPQUOTA]) {
1035 GRPQUOTA);
1036 if (!transfer_to[GRPQUOTA] || !transfer_from[GRPQUOTA]) {
1037 status = -ESRCH; 1032 status = -ESRCH;
1038 goto bail_unlock; 1033 goto bail_unlock;
1039 } 1034 }
@@ -1045,7 +1040,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
1045 mlog_errno(status); 1040 mlog_errno(status);
1046 goto bail_unlock; 1041 goto bail_unlock;
1047 } 1042 }
1048 status = dquot_transfer(inode, attr); 1043 status = __dquot_transfer(inode, transfer_to);
1049 if (status < 0) 1044 if (status < 0)
1050 goto bail_commit; 1045 goto bail_commit;
1051 } else { 1046 } else {
@@ -1085,10 +1080,8 @@ bail:
1085 brelse(bh); 1080 brelse(bh);
1086 1081
1087 /* Release quota pointers in case we acquired them */ 1082 /* Release quota pointers in case we acquired them */
1088 for (qtype = 0; qtype < MAXQUOTAS; qtype++) { 1083 for (qtype = 0; qtype < MAXQUOTAS; qtype++)
1089 dqput(transfer_to[qtype]); 1084 dqput(transfer_to[qtype]);
1090 dqput(transfer_from[qtype]);
1091 }
1092 1085
1093 if (!status && attr->ia_valid & ATTR_MODE) { 1086 if (!status && attr->ia_valid & ATTR_MODE) {
1094 status = ocfs2_acl_chmod(inode); 1087 status = ocfs2_acl_chmod(inode);
diff --git a/fs/ocfs2/quota.h b/fs/ocfs2/quota.h
index 123bc520a2c0..196fcb52d95d 100644
--- a/fs/ocfs2/quota.h
+++ b/fs/ocfs2/quota.h
@@ -23,6 +23,7 @@
23struct ocfs2_dquot { 23struct ocfs2_dquot {
24 struct dquot dq_dquot; /* Generic VFS dquot */ 24 struct dquot dq_dquot; /* Generic VFS dquot */
25 loff_t dq_local_off; /* Offset in the local quota file */ 25 loff_t dq_local_off; /* Offset in the local quota file */
26 u64 dq_local_phys_blk; /* Physical block carrying quota structure */
26 struct ocfs2_quota_chunk *dq_chunk; /* Chunk dquot is in */ 27 struct ocfs2_quota_chunk *dq_chunk; /* Chunk dquot is in */
27 unsigned int dq_use_count; /* Number of nodes having reference to this entry in global quota file */ 28 unsigned int dq_use_count; /* Number of nodes having reference to this entry in global quota file */
28 s64 dq_origspace; /* Last globally synced space usage */ 29 s64 dq_origspace; /* Last globally synced space usage */
@@ -51,8 +52,9 @@ struct ocfs2_mem_dqinfo {
51 struct ocfs2_lock_res dqi_gqlock; /* Lock protecting quota information structure */ 52 struct ocfs2_lock_res dqi_gqlock; /* Lock protecting quota information structure */
52 struct buffer_head *dqi_gqi_bh; /* Buffer head with global quota file inode - set only if inode lock is obtained */ 53 struct buffer_head *dqi_gqi_bh; /* Buffer head with global quota file inode - set only if inode lock is obtained */
53 int dqi_gqi_count; /* Number of holders of dqi_gqi_bh */ 54 int dqi_gqi_count; /* Number of holders of dqi_gqi_bh */
55 u64 dqi_giblk; /* Number of block with global information header */
54 struct buffer_head *dqi_lqi_bh; /* Buffer head with local quota file inode */ 56 struct buffer_head *dqi_lqi_bh; /* Buffer head with local quota file inode */
55 struct buffer_head *dqi_ibh; /* Buffer with information header */ 57 struct buffer_head *dqi_libh; /* Buffer with local information header */
56 struct qtree_mem_dqinfo dqi_gi; /* Info about global file */ 58 struct qtree_mem_dqinfo dqi_gi; /* Info about global file */
57 struct delayed_work dqi_sync_work; /* Work for syncing dquots */ 59 struct delayed_work dqi_sync_work; /* Work for syncing dquots */
58 struct ocfs2_quota_recovery *dqi_rec; /* Pointer to recovery 60 struct ocfs2_quota_recovery *dqi_rec; /* Pointer to recovery
@@ -102,8 +104,12 @@ static inline int ocfs2_global_release_dquot(struct dquot *dquot)
102 104
103int ocfs2_lock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex); 105int ocfs2_lock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex);
104void ocfs2_unlock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex); 106void ocfs2_unlock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex);
105int ocfs2_read_quota_block(struct inode *inode, u64 v_block, 107int ocfs2_validate_quota_block(struct super_block *sb, struct buffer_head *bh);
106 struct buffer_head **bh); 108int ocfs2_read_quota_phys_block(struct inode *inode, u64 p_block,
109 struct buffer_head **bh);
110int ocfs2_create_local_dquot(struct dquot *dquot);
111int ocfs2_local_release_dquot(handle_t *handle, struct dquot *dquot);
112int ocfs2_local_write_dquot(struct dquot *dquot);
107 113
108extern const struct dquot_operations ocfs2_quota_operations; 114extern const struct dquot_operations ocfs2_quota_operations;
109extern struct quota_format_type ocfs2_quota_format; 115extern struct quota_format_type ocfs2_quota_format;
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
index 04ae76d8c6ab..2bb35fe00511 100644
--- a/fs/ocfs2/quota_global.c
+++ b/fs/ocfs2/quota_global.c
@@ -25,8 +25,44 @@
25#include "dlmglue.h" 25#include "dlmglue.h"
26#include "uptodate.h" 26#include "uptodate.h"
27#include "super.h" 27#include "super.h"
28#include "buffer_head_io.h"
28#include "quota.h" 29#include "quota.h"
29 30
31/*
32 * Locking of quotas with OCFS2 is rather complex. Here are rules that
33 * should be obeyed by all the functions:
34 * - any write of quota structure (either to local or global file) is protected
35 * by dqio_mutex or dquot->dq_lock.
36 * - any modification of global quota file holds inode cluster lock, i_mutex,
37 * and ip_alloc_sem of the global quota file (achieved by
38 * ocfs2_lock_global_qf). It also has to hold qinfo_lock.
39 * - an allocation of new blocks for local quota file is protected by
40 * its ip_alloc_sem
41 *
42 * A rough sketch of locking dependencies (lf = local file, gf = global file):
43 * Normal filesystem operation:
44 * start_trans -> dqio_mutex -> write to lf
45 * Syncing of local and global file:
46 * ocfs2_lock_global_qf -> start_trans -> dqio_mutex -> qinfo_lock ->
47 * write to gf
48 * -> write to lf
49 * Acquire dquot for the first time:
50 * dq_lock -> ocfs2_lock_global_qf -> qinfo_lock -> read from gf
51 * -> alloc space for gf
52 * -> start_trans -> qinfo_lock -> write to gf
53 * -> ip_alloc_sem of lf -> alloc space for lf
54 * -> write to lf
55 * Release last reference to dquot:
56 * dq_lock -> ocfs2_lock_global_qf -> start_trans -> qinfo_lock -> write to gf
57 * -> write to lf
58 * Note that all the above operations also hold the inode cluster lock of lf.
59 * Recovery:
60 * inode cluster lock of recovered lf
61 * -> read bitmaps -> ip_alloc_sem of lf
62 * -> ocfs2_lock_global_qf -> start_trans -> dqio_mutex -> qinfo_lock ->
63 * write to gf
64 */
65
30static struct workqueue_struct *ocfs2_quota_wq = NULL; 66static struct workqueue_struct *ocfs2_quota_wq = NULL;
31 67
32static void qsync_work_fn(struct work_struct *work); 68static void qsync_work_fn(struct work_struct *work);
@@ -91,8 +127,7 @@ struct qtree_fmt_operations ocfs2_global_ops = {
91 .is_id = ocfs2_global_is_id, 127 .is_id = ocfs2_global_is_id,
92}; 128};
93 129
94static int ocfs2_validate_quota_block(struct super_block *sb, 130int ocfs2_validate_quota_block(struct super_block *sb, struct buffer_head *bh)
95 struct buffer_head *bh)
96{ 131{
97 struct ocfs2_disk_dqtrailer *dqt = 132 struct ocfs2_disk_dqtrailer *dqt =
98 ocfs2_block_dqtrailer(sb->s_blocksize, bh->b_data); 133 ocfs2_block_dqtrailer(sb->s_blocksize, bh->b_data);
@@ -110,54 +145,19 @@ static int ocfs2_validate_quota_block(struct super_block *sb,
110 return ocfs2_validate_meta_ecc(sb, bh->b_data, &dqt->dq_check); 145 return ocfs2_validate_meta_ecc(sb, bh->b_data, &dqt->dq_check);
111} 146}
112 147
113int ocfs2_read_quota_block(struct inode *inode, u64 v_block, 148int ocfs2_read_quota_phys_block(struct inode *inode, u64 p_block,
114 struct buffer_head **bh) 149 struct buffer_head **bhp)
115{ 150{
116 int rc = 0; 151 int rc;
117 struct buffer_head *tmp = *bh; 152
118 153 *bhp = NULL;
119 if (i_size_read(inode) >> inode->i_sb->s_blocksize_bits <= v_block) { 154 rc = ocfs2_read_blocks(INODE_CACHE(inode), p_block, 1, bhp, 0,
120 ocfs2_error(inode->i_sb, 155 ocfs2_validate_quota_block);
121 "Quota file %llu is probably corrupted! Requested "
122 "to read block %Lu but file has size only %Lu\n",
123 (unsigned long long)OCFS2_I(inode)->ip_blkno,
124 (unsigned long long)v_block,
125 (unsigned long long)i_size_read(inode));
126 return -EIO;
127 }
128 rc = ocfs2_read_virt_blocks(inode, v_block, 1, &tmp, 0,
129 ocfs2_validate_quota_block);
130 if (rc) 156 if (rc)
131 mlog_errno(rc); 157 mlog_errno(rc);
132
133 /* If ocfs2_read_virt_blocks() got us a new bh, pass it up. */
134 if (!rc && !*bh)
135 *bh = tmp;
136
137 return rc; 158 return rc;
138} 159}
139 160
140static int ocfs2_get_quota_block(struct inode *inode, int block,
141 struct buffer_head **bh)
142{
143 u64 pblock, pcount;
144 int err;
145
146 down_read(&OCFS2_I(inode)->ip_alloc_sem);
147 err = ocfs2_extent_map_get_blocks(inode, block, &pblock, &pcount, NULL);
148 up_read(&OCFS2_I(inode)->ip_alloc_sem);
149 if (err) {
150 mlog_errno(err);
151 return err;
152 }
153 *bh = sb_getblk(inode->i_sb, pblock);
154 if (!*bh) {
155 err = -EIO;
156 mlog_errno(err);
157 }
158 return err;
159}
160
161/* Read data from global quotafile - avoid pagecache and such because we cannot 161/* Read data from global quotafile - avoid pagecache and such because we cannot
162 * afford acquiring the locks... We use quota cluster lock to serialize 162 * afford acquiring the locks... We use quota cluster lock to serialize
163 * operations. Caller is responsible for acquiring it. */ 163 * operations. Caller is responsible for acquiring it. */
@@ -172,6 +172,7 @@ ssize_t ocfs2_quota_read(struct super_block *sb, int type, char *data,
172 int err = 0; 172 int err = 0;
173 struct buffer_head *bh; 173 struct buffer_head *bh;
174 size_t toread, tocopy; 174 size_t toread, tocopy;
175 u64 pblock = 0, pcount = 0;
175 176
176 if (off > i_size) 177 if (off > i_size)
177 return 0; 178 return 0;
@@ -180,8 +181,19 @@ ssize_t ocfs2_quota_read(struct super_block *sb, int type, char *data,
180 toread = len; 181 toread = len;
181 while (toread > 0) { 182 while (toread > 0) {
182 tocopy = min_t(size_t, (sb->s_blocksize - offset), toread); 183 tocopy = min_t(size_t, (sb->s_blocksize - offset), toread);
184 if (!pcount) {
185 err = ocfs2_extent_map_get_blocks(gqinode, blk, &pblock,
186 &pcount, NULL);
187 if (err) {
188 mlog_errno(err);
189 return err;
190 }
191 } else {
192 pcount--;
193 pblock++;
194 }
183 bh = NULL; 195 bh = NULL;
184 err = ocfs2_read_quota_block(gqinode, blk, &bh); 196 err = ocfs2_read_quota_phys_block(gqinode, pblock, &bh);
185 if (err) { 197 if (err) {
186 mlog_errno(err); 198 mlog_errno(err);
187 return err; 199 return err;
@@ -209,6 +221,7 @@ ssize_t ocfs2_quota_write(struct super_block *sb, int type,
209 int err = 0, new = 0, ja_type; 221 int err = 0, new = 0, ja_type;
210 struct buffer_head *bh = NULL; 222 struct buffer_head *bh = NULL;
211 handle_t *handle = journal_current_handle(); 223 handle_t *handle = journal_current_handle();
224 u64 pblock, pcount;
212 225
213 if (!handle) { 226 if (!handle) {
214 mlog(ML_ERROR, "Quota write (off=%llu, len=%llu) cancelled " 227 mlog(ML_ERROR, "Quota write (off=%llu, len=%llu) cancelled "
@@ -221,12 +234,11 @@ ssize_t ocfs2_quota_write(struct super_block *sb, int type,
221 len = sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE - offset; 234 len = sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE - offset;
222 } 235 }
223 236
224 mutex_lock_nested(&gqinode->i_mutex, I_MUTEX_QUOTA);
225 if (gqinode->i_size < off + len) { 237 if (gqinode->i_size < off + len) {
226 loff_t rounded_end = 238 loff_t rounded_end =
227 ocfs2_align_bytes_to_blocks(sb, off + len); 239 ocfs2_align_bytes_to_blocks(sb, off + len);
228 240
229 /* Space is already allocated in ocfs2_global_read_dquot() */ 241 /* Space is already allocated in ocfs2_acquire_dquot() */
230 err = ocfs2_simple_size_update(gqinode, 242 err = ocfs2_simple_size_update(gqinode,
231 oinfo->dqi_gqi_bh, 243 oinfo->dqi_gqi_bh,
232 rounded_end); 244 rounded_end);
@@ -234,13 +246,20 @@ ssize_t ocfs2_quota_write(struct super_block *sb, int type,
234 goto out; 246 goto out;
235 new = 1; 247 new = 1;
236 } 248 }
249 err = ocfs2_extent_map_get_blocks(gqinode, blk, &pblock, &pcount, NULL);
250 if (err) {
251 mlog_errno(err);
252 goto out;
253 }
237 /* Not rewriting whole block? */ 254 /* Not rewriting whole block? */
238 if ((offset || len < sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE) && 255 if ((offset || len < sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE) &&
239 !new) { 256 !new) {
240 err = ocfs2_read_quota_block(gqinode, blk, &bh); 257 err = ocfs2_read_quota_phys_block(gqinode, pblock, &bh);
241 ja_type = OCFS2_JOURNAL_ACCESS_WRITE; 258 ja_type = OCFS2_JOURNAL_ACCESS_WRITE;
242 } else { 259 } else {
243 err = ocfs2_get_quota_block(gqinode, blk, &bh); 260 bh = sb_getblk(sb, pblock);
261 if (!bh)
262 err = -ENOMEM;
244 ja_type = OCFS2_JOURNAL_ACCESS_CREATE; 263 ja_type = OCFS2_JOURNAL_ACCESS_CREATE;
245 } 264 }
246 if (err) { 265 if (err) {
@@ -265,13 +284,11 @@ ssize_t ocfs2_quota_write(struct super_block *sb, int type,
265 brelse(bh); 284 brelse(bh);
266out: 285out:
267 if (err) { 286 if (err) {
268 mutex_unlock(&gqinode->i_mutex);
269 mlog_errno(err); 287 mlog_errno(err);
270 return err; 288 return err;
271 } 289 }
272 gqinode->i_version++; 290 gqinode->i_version++;
273 ocfs2_mark_inode_dirty(handle, gqinode, oinfo->dqi_gqi_bh); 291 ocfs2_mark_inode_dirty(handle, gqinode, oinfo->dqi_gqi_bh);
274 mutex_unlock(&gqinode->i_mutex);
275 return len; 292 return len;
276} 293}
277 294
@@ -289,11 +306,23 @@ int ocfs2_lock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
289 else 306 else
290 WARN_ON(bh != oinfo->dqi_gqi_bh); 307 WARN_ON(bh != oinfo->dqi_gqi_bh);
291 spin_unlock(&dq_data_lock); 308 spin_unlock(&dq_data_lock);
309 if (ex) {
310 mutex_lock(&oinfo->dqi_gqinode->i_mutex);
311 down_write(&OCFS2_I(oinfo->dqi_gqinode)->ip_alloc_sem);
312 } else {
313 down_read(&OCFS2_I(oinfo->dqi_gqinode)->ip_alloc_sem);
314 }
292 return 0; 315 return 0;
293} 316}
294 317
295void ocfs2_unlock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex) 318void ocfs2_unlock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
296{ 319{
320 if (ex) {
321 up_write(&OCFS2_I(oinfo->dqi_gqinode)->ip_alloc_sem);
322 mutex_unlock(&oinfo->dqi_gqinode->i_mutex);
323 } else {
324 up_read(&OCFS2_I(oinfo->dqi_gqinode)->ip_alloc_sem);
325 }
297 ocfs2_inode_unlock(oinfo->dqi_gqinode, ex); 326 ocfs2_inode_unlock(oinfo->dqi_gqinode, ex);
298 brelse(oinfo->dqi_gqi_bh); 327 brelse(oinfo->dqi_gqi_bh);
299 spin_lock(&dq_data_lock); 328 spin_lock(&dq_data_lock);
@@ -311,6 +340,7 @@ int ocfs2_global_read_info(struct super_block *sb, int type)
311 struct ocfs2_global_disk_dqinfo dinfo; 340 struct ocfs2_global_disk_dqinfo dinfo;
312 struct mem_dqinfo *info = sb_dqinfo(sb, type); 341 struct mem_dqinfo *info = sb_dqinfo(sb, type);
313 struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv; 342 struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
343 u64 pcount;
314 int status; 344 int status;
315 345
316 mlog_entry_void(); 346 mlog_entry_void();
@@ -337,9 +367,19 @@ int ocfs2_global_read_info(struct super_block *sb, int type)
337 mlog_errno(status); 367 mlog_errno(status);
338 goto out_err; 368 goto out_err;
339 } 369 }
370
371 status = ocfs2_extent_map_get_blocks(gqinode, 0, &oinfo->dqi_giblk,
372 &pcount, NULL);
373 if (status < 0)
374 goto out_unlock;
375
376 status = ocfs2_qinfo_lock(oinfo, 0);
377 if (status < 0)
378 goto out_unlock;
340 status = sb->s_op->quota_read(sb, type, (char *)&dinfo, 379 status = sb->s_op->quota_read(sb, type, (char *)&dinfo,
341 sizeof(struct ocfs2_global_disk_dqinfo), 380 sizeof(struct ocfs2_global_disk_dqinfo),
342 OCFS2_GLOBAL_INFO_OFF); 381 OCFS2_GLOBAL_INFO_OFF);
382 ocfs2_qinfo_unlock(oinfo, 0);
343 ocfs2_unlock_global_qf(oinfo, 0); 383 ocfs2_unlock_global_qf(oinfo, 0);
344 if (status != sizeof(struct ocfs2_global_disk_dqinfo)) { 384 if (status != sizeof(struct ocfs2_global_disk_dqinfo)) {
345 mlog(ML_ERROR, "Cannot read global quota info (%d).\n", 385 mlog(ML_ERROR, "Cannot read global quota info (%d).\n",
@@ -366,6 +406,10 @@ int ocfs2_global_read_info(struct super_block *sb, int type)
366out_err: 406out_err:
367 mlog_exit(status); 407 mlog_exit(status);
368 return status; 408 return status;
409out_unlock:
410 ocfs2_unlock_global_qf(oinfo, 0);
411 mlog_errno(status);
412 goto out_err;
369} 413}
370 414
371/* Write information to global quota file. Expects exlusive lock on quota 415/* Write information to global quota file. Expects exlusive lock on quota
@@ -424,78 +468,10 @@ static int ocfs2_global_qinit_alloc(struct super_block *sb, int type)
424 468
425static int ocfs2_calc_global_qinit_credits(struct super_block *sb, int type) 469static int ocfs2_calc_global_qinit_credits(struct super_block *sb, int type)
426{ 470{
427 /* We modify all the allocated blocks, tree root, and info block */ 471 /* We modify all the allocated blocks, tree root, info block and
472 * the inode */
428 return (ocfs2_global_qinit_alloc(sb, type) + 2) * 473 return (ocfs2_global_qinit_alloc(sb, type) + 2) *
429 OCFS2_QUOTA_BLOCK_UPDATE_CREDITS; 474 OCFS2_QUOTA_BLOCK_UPDATE_CREDITS + 1;
430}
431
432/* Read in information from global quota file and acquire a reference to it.
433 * dquot_acquire() has already started the transaction and locked quota file */
434int ocfs2_global_read_dquot(struct dquot *dquot)
435{
436 int err, err2, ex = 0;
437 struct super_block *sb = dquot->dq_sb;
438 int type = dquot->dq_type;
439 struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
440 struct ocfs2_super *osb = OCFS2_SB(sb);
441 struct inode *gqinode = info->dqi_gqinode;
442 int need_alloc = ocfs2_global_qinit_alloc(sb, type);
443 handle_t *handle = NULL;
444
445 err = ocfs2_qinfo_lock(info, 0);
446 if (err < 0)
447 goto out;
448 err = qtree_read_dquot(&info->dqi_gi, dquot);
449 if (err < 0)
450 goto out_qlock;
451 OCFS2_DQUOT(dquot)->dq_use_count++;
452 OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
453 OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes;
454 ocfs2_qinfo_unlock(info, 0);
455
456 if (!dquot->dq_off) { /* No real quota entry? */
457 ex = 1;
458 /*
459 * Add blocks to quota file before we start a transaction since
460 * locking allocators ranks above a transaction start
461 */
462 WARN_ON(journal_current_handle());
463 down_write(&OCFS2_I(gqinode)->ip_alloc_sem);
464 err = ocfs2_extend_no_holes(gqinode,
465 gqinode->i_size + (need_alloc << sb->s_blocksize_bits),
466 gqinode->i_size);
467 up_write(&OCFS2_I(gqinode)->ip_alloc_sem);
468 if (err < 0)
469 goto out;
470 }
471
472 handle = ocfs2_start_trans(osb,
473 ocfs2_calc_global_qinit_credits(sb, type));
474 if (IS_ERR(handle)) {
475 err = PTR_ERR(handle);
476 goto out;
477 }
478 err = ocfs2_qinfo_lock(info, ex);
479 if (err < 0)
480 goto out_trans;
481 err = qtree_write_dquot(&info->dqi_gi, dquot);
482 if (ex && info_dirty(sb_dqinfo(dquot->dq_sb, dquot->dq_type))) {
483 err2 = __ocfs2_global_write_info(dquot->dq_sb, dquot->dq_type);
484 if (!err)
485 err = err2;
486 }
487out_qlock:
488 if (ex)
489 ocfs2_qinfo_unlock(info, 1);
490 else
491 ocfs2_qinfo_unlock(info, 0);
492out_trans:
493 if (handle)
494 ocfs2_commit_trans(osb, handle);
495out:
496 if (err < 0)
497 mlog_errno(err);
498 return err;
499} 475}
500 476
501/* Sync local information about quota modifications with global quota file. 477/* Sync local information about quota modifications with global quota file.
@@ -636,14 +612,13 @@ static int ocfs2_sync_dquot_helper(struct dquot *dquot, unsigned long type)
636 } 612 }
637 mutex_lock(&sb_dqopt(sb)->dqio_mutex); 613 mutex_lock(&sb_dqopt(sb)->dqio_mutex);
638 status = ocfs2_sync_dquot(dquot); 614 status = ocfs2_sync_dquot(dquot);
639 mutex_unlock(&sb_dqopt(sb)->dqio_mutex);
640 if (status < 0) 615 if (status < 0)
641 mlog_errno(status); 616 mlog_errno(status);
642 /* We have to write local structure as well... */ 617 /* We have to write local structure as well... */
643 dquot_mark_dquot_dirty(dquot); 618 status = ocfs2_local_write_dquot(dquot);
644 status = dquot_commit(dquot);
645 if (status < 0) 619 if (status < 0)
646 mlog_errno(status); 620 mlog_errno(status);
621 mutex_unlock(&sb_dqopt(sb)->dqio_mutex);
647 ocfs2_commit_trans(osb, handle); 622 ocfs2_commit_trans(osb, handle);
648out_ilock: 623out_ilock:
649 ocfs2_unlock_global_qf(oinfo, 1); 624 ocfs2_unlock_global_qf(oinfo, 1);
@@ -682,7 +657,9 @@ static int ocfs2_write_dquot(struct dquot *dquot)
682 mlog_errno(status); 657 mlog_errno(status);
683 goto out; 658 goto out;
684 } 659 }
685 status = dquot_commit(dquot); 660 mutex_lock(&sb_dqopt(dquot->dq_sb)->dqio_mutex);
661 status = ocfs2_local_write_dquot(dquot);
662 mutex_unlock(&sb_dqopt(dquot->dq_sb)->dqio_mutex);
686 ocfs2_commit_trans(osb, handle); 663 ocfs2_commit_trans(osb, handle);
687out: 664out:
688 mlog_exit(status); 665 mlog_exit(status);
@@ -713,6 +690,10 @@ static int ocfs2_release_dquot(struct dquot *dquot)
713 690
714 mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type); 691 mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
715 692
693 mutex_lock(&dquot->dq_lock);
694 /* Check whether we are not racing with some other dqget() */
695 if (atomic_read(&dquot->dq_count) > 1)
696 goto out;
716 status = ocfs2_lock_global_qf(oinfo, 1); 697 status = ocfs2_lock_global_qf(oinfo, 1);
717 if (status < 0) 698 if (status < 0)
718 goto out; 699 goto out;
@@ -723,30 +704,113 @@ static int ocfs2_release_dquot(struct dquot *dquot)
723 mlog_errno(status); 704 mlog_errno(status);
724 goto out_ilock; 705 goto out_ilock;
725 } 706 }
726 status = dquot_release(dquot); 707
708 status = ocfs2_global_release_dquot(dquot);
709 if (status < 0) {
710 mlog_errno(status);
711 goto out_trans;
712 }
713 status = ocfs2_local_release_dquot(handle, dquot);
714 /*
715 * If we fail here, we cannot do much as global structure is
716 * already released. So just complain...
717 */
718 if (status < 0)
719 mlog_errno(status);
720 clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
721out_trans:
727 ocfs2_commit_trans(osb, handle); 722 ocfs2_commit_trans(osb, handle);
728out_ilock: 723out_ilock:
729 ocfs2_unlock_global_qf(oinfo, 1); 724 ocfs2_unlock_global_qf(oinfo, 1);
730out: 725out:
726 mutex_unlock(&dquot->dq_lock);
731 mlog_exit(status); 727 mlog_exit(status);
732 return status; 728 return status;
733} 729}
734 730
731/*
732 * Read global dquot structure from disk or create it if it does
733 * not exist. Also update use count of the global structure and
734 * create structure in node-local quota file.
735 */
735static int ocfs2_acquire_dquot(struct dquot *dquot) 736static int ocfs2_acquire_dquot(struct dquot *dquot)
736{ 737{
737 struct ocfs2_mem_dqinfo *oinfo = 738 int status = 0, err;
738 sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv; 739 int ex = 0;
739 int status = 0; 740 struct super_block *sb = dquot->dq_sb;
741 struct ocfs2_super *osb = OCFS2_SB(sb);
742 int type = dquot->dq_type;
743 struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
744 struct inode *gqinode = info->dqi_gqinode;
745 int need_alloc = ocfs2_global_qinit_alloc(sb, type);
746 handle_t *handle;
740 747
741 mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type); 748 mlog_entry("id=%u, type=%d", dquot->dq_id, type);
742 /* We need an exclusive lock, because we're going to update use count 749 mutex_lock(&dquot->dq_lock);
743 * and instantiate possibly new dquot structure */ 750 /*
744 status = ocfs2_lock_global_qf(oinfo, 1); 751 * We need an exclusive lock, because we're going to update use count
752 * and instantiate possibly new dquot structure
753 */
754 status = ocfs2_lock_global_qf(info, 1);
745 if (status < 0) 755 if (status < 0)
746 goto out; 756 goto out;
747 status = dquot_acquire(dquot); 757 if (!test_bit(DQ_READ_B, &dquot->dq_flags)) {
748 ocfs2_unlock_global_qf(oinfo, 1); 758 status = ocfs2_qinfo_lock(info, 0);
759 if (status < 0)
760 goto out_dq;
761 status = qtree_read_dquot(&info->dqi_gi, dquot);
762 ocfs2_qinfo_unlock(info, 0);
763 if (status < 0)
764 goto out_dq;
765 }
766 set_bit(DQ_READ_B, &dquot->dq_flags);
767
768 OCFS2_DQUOT(dquot)->dq_use_count++;
769 OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
770 OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes;
771 if (!dquot->dq_off) { /* No real quota entry? */
772 ex = 1;
773 /*
774 * Add blocks to quota file before we start a transaction since
775 * locking allocators ranks above a transaction start
776 */
777 WARN_ON(journal_current_handle());
778 status = ocfs2_extend_no_holes(gqinode,
779 gqinode->i_size + (need_alloc << sb->s_blocksize_bits),
780 gqinode->i_size);
781 if (status < 0)
782 goto out_dq;
783 }
784
785 handle = ocfs2_start_trans(osb,
786 ocfs2_calc_global_qinit_credits(sb, type));
787 if (IS_ERR(handle)) {
788 status = PTR_ERR(handle);
789 goto out_dq;
790 }
791 status = ocfs2_qinfo_lock(info, ex);
792 if (status < 0)
793 goto out_trans;
794 status = qtree_write_dquot(&info->dqi_gi, dquot);
795 if (ex && info_dirty(sb_dqinfo(sb, type))) {
796 err = __ocfs2_global_write_info(sb, type);
797 if (!status)
798 status = err;
799 }
800 ocfs2_qinfo_unlock(info, ex);
801out_trans:
802 ocfs2_commit_trans(osb, handle);
803out_dq:
804 ocfs2_unlock_global_qf(info, 1);
805 if (status < 0)
806 goto out;
807
808 status = ocfs2_create_local_dquot(dquot);
809 if (status < 0)
810 goto out;
811 set_bit(DQ_ACTIVE_B, &dquot->dq_flags);
749out: 812out:
813 mutex_unlock(&dquot->dq_lock);
750 mlog_exit(status); 814 mlog_exit(status);
751 return status; 815 return status;
752} 816}
@@ -768,7 +832,6 @@ static int ocfs2_mark_dquot_dirty(struct dquot *dquot)
768 struct ocfs2_super *osb = OCFS2_SB(sb); 832 struct ocfs2_super *osb = OCFS2_SB(sb);
769 833
770 mlog_entry("id=%u, type=%d", dquot->dq_id, type); 834 mlog_entry("id=%u, type=%d", dquot->dq_id, type);
771 dquot_mark_dquot_dirty(dquot);
772 835
773 /* In case user set some limits, sync dquot immediately to global 836 /* In case user set some limits, sync dquot immediately to global
774 * quota file so that information propagates quicker */ 837 * quota file so that information propagates quicker */
@@ -791,14 +854,16 @@ static int ocfs2_mark_dquot_dirty(struct dquot *dquot)
791 mlog_errno(status); 854 mlog_errno(status);
792 goto out_ilock; 855 goto out_ilock;
793 } 856 }
857 mutex_lock(&sb_dqopt(sb)->dqio_mutex);
794 status = ocfs2_sync_dquot(dquot); 858 status = ocfs2_sync_dquot(dquot);
795 if (status < 0) { 859 if (status < 0) {
796 mlog_errno(status); 860 mlog_errno(status);
797 goto out_trans; 861 goto out_dlock;
798 } 862 }
799 /* Now write updated local dquot structure */ 863 /* Now write updated local dquot structure */
800 status = dquot_commit(dquot); 864 status = ocfs2_local_write_dquot(dquot);
801out_trans: 865out_dlock:
866 mutex_unlock(&sb_dqopt(sb)->dqio_mutex);
802 ocfs2_commit_trans(osb, handle); 867 ocfs2_commit_trans(osb, handle);
803out_ilock: 868out_ilock:
804 ocfs2_unlock_global_qf(oinfo, 1); 869 ocfs2_unlock_global_qf(oinfo, 1);
@@ -850,7 +915,7 @@ static void ocfs2_destroy_dquot(struct dquot *dquot)
850} 915}
851 916
852const struct dquot_operations ocfs2_quota_operations = { 917const struct dquot_operations ocfs2_quota_operations = {
853 .write_dquot = ocfs2_write_dquot, 918 /* We never make dquot dirty so .write_dquot is never called */
854 .acquire_dquot = ocfs2_acquire_dquot, 919 .acquire_dquot = ocfs2_acquire_dquot,
855 .release_dquot = ocfs2_release_dquot, 920 .release_dquot = ocfs2_release_dquot,
856 .mark_dirty = ocfs2_mark_dquot_dirty, 921 .mark_dirty = ocfs2_mark_dquot_dirty,
diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
index 884b641f199e..8bd70d4d184d 100644
--- a/fs/ocfs2/quota_local.c
+++ b/fs/ocfs2/quota_local.c
@@ -22,6 +22,7 @@
22#include "dlmglue.h" 22#include "dlmglue.h"
23#include "quota.h" 23#include "quota.h"
24#include "uptodate.h" 24#include "uptodate.h"
25#include "super.h"
25 26
26/* Number of local quota structures per block */ 27/* Number of local quota structures per block */
27static inline unsigned int ol_quota_entries_per_block(struct super_block *sb) 28static inline unsigned int ol_quota_entries_per_block(struct super_block *sb)
@@ -129,6 +130,39 @@ static int ocfs2_modify_bh(struct inode *inode, struct buffer_head *bh,
129 return 0; 130 return 0;
130} 131}
131 132
133/*
134 * Read quota block from a given logical offset.
135 *
136 * This function acquires ip_alloc_sem and thus it must not be called with a
137 * transaction started.
138 */
139static int ocfs2_read_quota_block(struct inode *inode, u64 v_block,
140 struct buffer_head **bh)
141{
142 int rc = 0;
143 struct buffer_head *tmp = *bh;
144
145 if (i_size_read(inode) >> inode->i_sb->s_blocksize_bits <= v_block) {
146 ocfs2_error(inode->i_sb,
147 "Quota file %llu is probably corrupted! Requested "
148 "to read block %Lu but file has size only %Lu\n",
149 (unsigned long long)OCFS2_I(inode)->ip_blkno,
150 (unsigned long long)v_block,
151 (unsigned long long)i_size_read(inode));
152 return -EIO;
153 }
154 rc = ocfs2_read_virt_blocks(inode, v_block, 1, &tmp, 0,
155 ocfs2_validate_quota_block);
156 if (rc)
157 mlog_errno(rc);
158
159 /* If ocfs2_read_virt_blocks() got us a new bh, pass it up. */
160 if (!rc && !*bh)
161 *bh = tmp;
162
163 return rc;
164}
165
132/* Check whether we understand format of quota files */ 166/* Check whether we understand format of quota files */
133static int ocfs2_local_check_quota_file(struct super_block *sb, int type) 167static int ocfs2_local_check_quota_file(struct super_block *sb, int type)
134{ 168{
@@ -671,7 +705,7 @@ static int ocfs2_local_read_info(struct super_block *sb, int type)
671 INIT_LIST_HEAD(&oinfo->dqi_chunk); 705 INIT_LIST_HEAD(&oinfo->dqi_chunk);
672 oinfo->dqi_rec = NULL; 706 oinfo->dqi_rec = NULL;
673 oinfo->dqi_lqi_bh = NULL; 707 oinfo->dqi_lqi_bh = NULL;
674 oinfo->dqi_ibh = NULL; 708 oinfo->dqi_libh = NULL;
675 709
676 status = ocfs2_global_read_info(sb, type); 710 status = ocfs2_global_read_info(sb, type);
677 if (status < 0) 711 if (status < 0)
@@ -697,7 +731,7 @@ static int ocfs2_local_read_info(struct super_block *sb, int type)
697 info->dqi_flags = le32_to_cpu(ldinfo->dqi_flags); 731 info->dqi_flags = le32_to_cpu(ldinfo->dqi_flags);
698 oinfo->dqi_chunks = le32_to_cpu(ldinfo->dqi_chunks); 732 oinfo->dqi_chunks = le32_to_cpu(ldinfo->dqi_chunks);
699 oinfo->dqi_blocks = le32_to_cpu(ldinfo->dqi_blocks); 733 oinfo->dqi_blocks = le32_to_cpu(ldinfo->dqi_blocks);
700 oinfo->dqi_ibh = bh; 734 oinfo->dqi_libh = bh;
701 735
702 /* We crashed when using local quota file? */ 736 /* We crashed when using local quota file? */
703 if (!(info->dqi_flags & OLQF_CLEAN)) { 737 if (!(info->dqi_flags & OLQF_CLEAN)) {
@@ -759,7 +793,7 @@ static int ocfs2_local_write_info(struct super_block *sb, int type)
759{ 793{
760 struct mem_dqinfo *info = sb_dqinfo(sb, type); 794 struct mem_dqinfo *info = sb_dqinfo(sb, type);
761 struct buffer_head *bh = ((struct ocfs2_mem_dqinfo *)info->dqi_priv) 795 struct buffer_head *bh = ((struct ocfs2_mem_dqinfo *)info->dqi_priv)
762 ->dqi_ibh; 796 ->dqi_libh;
763 int status; 797 int status;
764 798
765 status = ocfs2_modify_bh(sb_dqopt(sb)->files[type], bh, olq_update_info, 799 status = ocfs2_modify_bh(sb_dqopt(sb)->files[type], bh, olq_update_info,
@@ -782,10 +816,6 @@ static int ocfs2_local_free_info(struct super_block *sb, int type)
782 int mark_clean = 1, len; 816 int mark_clean = 1, len;
783 int status; 817 int status;
784 818
785 /* At this point we know there are no more dquots and thus
786 * even if there's some sync in the pdflush queue, it won't
787 * find any dquots and return without doing anything */
788 cancel_delayed_work_sync(&oinfo->dqi_sync_work);
789 iput(oinfo->dqi_gqinode); 819 iput(oinfo->dqi_gqinode);
790 ocfs2_simple_drop_lockres(OCFS2_SB(sb), &oinfo->dqi_gqlock); 820 ocfs2_simple_drop_lockres(OCFS2_SB(sb), &oinfo->dqi_gqlock);
791 ocfs2_lock_res_free(&oinfo->dqi_gqlock); 821 ocfs2_lock_res_free(&oinfo->dqi_gqlock);
@@ -820,7 +850,7 @@ static int ocfs2_local_free_info(struct super_block *sb, int type)
820 /* Mark local file as clean */ 850 /* Mark local file as clean */
821 info->dqi_flags |= OLQF_CLEAN; 851 info->dqi_flags |= OLQF_CLEAN;
822 status = ocfs2_modify_bh(sb_dqopt(sb)->files[type], 852 status = ocfs2_modify_bh(sb_dqopt(sb)->files[type],
823 oinfo->dqi_ibh, 853 oinfo->dqi_libh,
824 olq_update_info, 854 olq_update_info,
825 info); 855 info);
826 if (status < 0) { 856 if (status < 0) {
@@ -830,7 +860,7 @@ static int ocfs2_local_free_info(struct super_block *sb, int type)
830 860
831out: 861out:
832 ocfs2_inode_unlock(sb_dqopt(sb)->files[type], 1); 862 ocfs2_inode_unlock(sb_dqopt(sb)->files[type], 1);
833 brelse(oinfo->dqi_ibh); 863 brelse(oinfo->dqi_libh);
834 brelse(oinfo->dqi_lqi_bh); 864 brelse(oinfo->dqi_lqi_bh);
835 kfree(oinfo); 865 kfree(oinfo);
836 return 0; 866 return 0;
@@ -858,22 +888,21 @@ static void olq_set_dquot(struct buffer_head *bh, void *private)
858} 888}
859 889
860/* Write dquot to local quota file */ 890/* Write dquot to local quota file */
861static int ocfs2_local_write_dquot(struct dquot *dquot) 891int ocfs2_local_write_dquot(struct dquot *dquot)
862{ 892{
863 struct super_block *sb = dquot->dq_sb; 893 struct super_block *sb = dquot->dq_sb;
864 struct ocfs2_dquot *od = OCFS2_DQUOT(dquot); 894 struct ocfs2_dquot *od = OCFS2_DQUOT(dquot);
865 struct buffer_head *bh = NULL; 895 struct buffer_head *bh;
896 struct inode *lqinode = sb_dqopt(sb)->files[dquot->dq_type];
866 int status; 897 int status;
867 898
868 status = ocfs2_read_quota_block(sb_dqopt(sb)->files[dquot->dq_type], 899 status = ocfs2_read_quota_phys_block(lqinode, od->dq_local_phys_blk,
869 ol_dqblk_file_block(sb, od->dq_local_off), 900 &bh);
870 &bh);
871 if (status) { 901 if (status) {
872 mlog_errno(status); 902 mlog_errno(status);
873 goto out; 903 goto out;
874 } 904 }
875 status = ocfs2_modify_bh(sb_dqopt(sb)->files[dquot->dq_type], bh, 905 status = ocfs2_modify_bh(lqinode, bh, olq_set_dquot, od);
876 olq_set_dquot, od);
877 if (status < 0) { 906 if (status < 0) {
878 mlog_errno(status); 907 mlog_errno(status);
879 goto out; 908 goto out;
@@ -973,10 +1002,8 @@ static struct ocfs2_quota_chunk *ocfs2_local_quota_add_chunk(
973 } 1002 }
974 1003
975 /* Initialize chunk header */ 1004 /* Initialize chunk header */
976 down_read(&OCFS2_I(lqinode)->ip_alloc_sem);
977 status = ocfs2_extent_map_get_blocks(lqinode, oinfo->dqi_blocks, 1005 status = ocfs2_extent_map_get_blocks(lqinode, oinfo->dqi_blocks,
978 &p_blkno, NULL, NULL); 1006 &p_blkno, NULL, NULL);
979 up_read(&OCFS2_I(lqinode)->ip_alloc_sem);
980 if (status < 0) { 1007 if (status < 0) {
981 mlog_errno(status); 1008 mlog_errno(status);
982 goto out_trans; 1009 goto out_trans;
@@ -1004,10 +1031,8 @@ static struct ocfs2_quota_chunk *ocfs2_local_quota_add_chunk(
1004 ocfs2_journal_dirty(handle, bh); 1031 ocfs2_journal_dirty(handle, bh);
1005 1032
1006 /* Initialize new block with structures */ 1033 /* Initialize new block with structures */
1007 down_read(&OCFS2_I(lqinode)->ip_alloc_sem);
1008 status = ocfs2_extent_map_get_blocks(lqinode, oinfo->dqi_blocks + 1, 1034 status = ocfs2_extent_map_get_blocks(lqinode, oinfo->dqi_blocks + 1,
1009 &p_blkno, NULL, NULL); 1035 &p_blkno, NULL, NULL);
1010 up_read(&OCFS2_I(lqinode)->ip_alloc_sem);
1011 if (status < 0) { 1036 if (status < 0) {
1012 mlog_errno(status); 1037 mlog_errno(status);
1013 goto out_trans; 1038 goto out_trans;
@@ -1104,10 +1129,8 @@ static struct ocfs2_quota_chunk *ocfs2_extend_local_quota_file(
1104 } 1129 }
1105 1130
1106 /* Get buffer from the just added block */ 1131 /* Get buffer from the just added block */
1107 down_read(&OCFS2_I(lqinode)->ip_alloc_sem);
1108 status = ocfs2_extent_map_get_blocks(lqinode, oinfo->dqi_blocks, 1132 status = ocfs2_extent_map_get_blocks(lqinode, oinfo->dqi_blocks,
1109 &p_blkno, NULL, NULL); 1133 &p_blkno, NULL, NULL);
1110 up_read(&OCFS2_I(lqinode)->ip_alloc_sem);
1111 if (status < 0) { 1134 if (status < 0) {
1112 mlog_errno(status); 1135 mlog_errno(status);
1113 goto out; 1136 goto out;
@@ -1188,7 +1211,7 @@ static void olq_alloc_dquot(struct buffer_head *bh, void *private)
1188} 1211}
1189 1212
1190/* Create dquot in the local file for given id */ 1213/* Create dquot in the local file for given id */
1191static int ocfs2_create_local_dquot(struct dquot *dquot) 1214int ocfs2_create_local_dquot(struct dquot *dquot)
1192{ 1215{
1193 struct super_block *sb = dquot->dq_sb; 1216 struct super_block *sb = dquot->dq_sb;
1194 int type = dquot->dq_type; 1217 int type = dquot->dq_type;
@@ -1197,17 +1220,27 @@ static int ocfs2_create_local_dquot(struct dquot *dquot)
1197 struct ocfs2_dquot *od = OCFS2_DQUOT(dquot); 1220 struct ocfs2_dquot *od = OCFS2_DQUOT(dquot);
1198 int offset; 1221 int offset;
1199 int status; 1222 int status;
1223 u64 pcount;
1200 1224
1225 down_write(&OCFS2_I(lqinode)->ip_alloc_sem);
1201 chunk = ocfs2_find_free_entry(sb, type, &offset); 1226 chunk = ocfs2_find_free_entry(sb, type, &offset);
1202 if (!chunk) { 1227 if (!chunk) {
1203 chunk = ocfs2_extend_local_quota_file(sb, type, &offset); 1228 chunk = ocfs2_extend_local_quota_file(sb, type, &offset);
1204 if (IS_ERR(chunk)) 1229 if (IS_ERR(chunk)) {
1205 return PTR_ERR(chunk); 1230 status = PTR_ERR(chunk);
1231 goto out;
1232 }
1206 } else if (IS_ERR(chunk)) { 1233 } else if (IS_ERR(chunk)) {
1207 return PTR_ERR(chunk); 1234 status = PTR_ERR(chunk);
1235 goto out;
1208 } 1236 }
1209 od->dq_local_off = ol_dqblk_off(sb, chunk->qc_num, offset); 1237 od->dq_local_off = ol_dqblk_off(sb, chunk->qc_num, offset);
1210 od->dq_chunk = chunk; 1238 od->dq_chunk = chunk;
1239 status = ocfs2_extent_map_get_blocks(lqinode,
1240 ol_dqblk_block(sb, chunk->qc_num, offset),
1241 &od->dq_local_phys_blk,
1242 &pcount,
1243 NULL);
1211 1244
1212 /* Initialize dquot structure on disk */ 1245 /* Initialize dquot structure on disk */
1213 status = ocfs2_local_write_dquot(dquot); 1246 status = ocfs2_local_write_dquot(dquot);
@@ -1224,39 +1257,15 @@ static int ocfs2_create_local_dquot(struct dquot *dquot)
1224 goto out; 1257 goto out;
1225 } 1258 }
1226out: 1259out:
1260 up_write(&OCFS2_I(lqinode)->ip_alloc_sem);
1227 return status; 1261 return status;
1228} 1262}
1229 1263
1230/* Create entry in local file for dquot, load data from the global file */ 1264/*
1231static int ocfs2_local_read_dquot(struct dquot *dquot) 1265 * Release dquot structure from local quota file. ocfs2_release_dquot() has
1232{ 1266 * already started a transaction and written all changes to global quota file
1233 int status; 1267 */
1234 1268int ocfs2_local_release_dquot(handle_t *handle, struct dquot *dquot)
1235 mlog_entry("id=%u, type=%d\n", dquot->dq_id, dquot->dq_type);
1236
1237 status = ocfs2_global_read_dquot(dquot);
1238 if (status < 0) {
1239 mlog_errno(status);
1240 goto out_err;
1241 }
1242
1243 /* Now create entry in the local quota file */
1244 status = ocfs2_create_local_dquot(dquot);
1245 if (status < 0) {
1246 mlog_errno(status);
1247 goto out_err;
1248 }
1249 mlog_exit(0);
1250 return 0;
1251out_err:
1252 mlog_exit(status);
1253 return status;
1254}
1255
1256/* Release dquot structure from local quota file. ocfs2_release_dquot() has
1257 * already started a transaction and obtained exclusive lock for global
1258 * quota file. */
1259static int ocfs2_local_release_dquot(struct dquot *dquot)
1260{ 1269{
1261 int status; 1270 int status;
1262 int type = dquot->dq_type; 1271 int type = dquot->dq_type;
@@ -1264,15 +1273,6 @@ static int ocfs2_local_release_dquot(struct dquot *dquot)
1264 struct super_block *sb = dquot->dq_sb; 1273 struct super_block *sb = dquot->dq_sb;
1265 struct ocfs2_local_disk_chunk *dchunk; 1274 struct ocfs2_local_disk_chunk *dchunk;
1266 int offset; 1275 int offset;
1267 handle_t *handle = journal_current_handle();
1268
1269 BUG_ON(!handle);
1270 /* First write all local changes to global file */
1271 status = ocfs2_global_release_dquot(dquot);
1272 if (status < 0) {
1273 mlog_errno(status);
1274 goto out;
1275 }
1276 1276
1277 status = ocfs2_journal_access_dq(handle, 1277 status = ocfs2_journal_access_dq(handle,
1278 INODE_CACHE(sb_dqopt(sb)->files[type]), 1278 INODE_CACHE(sb_dqopt(sb)->files[type]),
@@ -1305,9 +1305,6 @@ static const struct quota_format_ops ocfs2_format_ops = {
1305 .read_file_info = ocfs2_local_read_info, 1305 .read_file_info = ocfs2_local_read_info,
1306 .write_file_info = ocfs2_global_write_info, 1306 .write_file_info = ocfs2_global_write_info,
1307 .free_file_info = ocfs2_local_free_info, 1307 .free_file_info = ocfs2_local_free_info,
1308 .read_dqblk = ocfs2_local_read_dquot,
1309 .commit_dqblk = ocfs2_local_write_dquot,
1310 .release_dqblk = ocfs2_local_release_dquot,
1311}; 1308};
1312 1309
1313struct quota_format_type ocfs2_quota_format = { 1310struct quota_format_type ocfs2_quota_format = {
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 1c2c39f6f0b6..2c26ce251cb3 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -938,12 +938,16 @@ static void ocfs2_disable_quotas(struct ocfs2_super *osb)
938 int type; 938 int type;
939 struct inode *inode; 939 struct inode *inode;
940 struct super_block *sb = osb->sb; 940 struct super_block *sb = osb->sb;
941 struct ocfs2_mem_dqinfo *oinfo;
941 942
942 /* We mostly ignore errors in this function because there's not much 943 /* We mostly ignore errors in this function because there's not much
943 * we can do when we see them */ 944 * we can do when we see them */
944 for (type = 0; type < MAXQUOTAS; type++) { 945 for (type = 0; type < MAXQUOTAS; type++) {
945 if (!sb_has_quota_loaded(sb, type)) 946 if (!sb_has_quota_loaded(sb, type))
946 continue; 947 continue;
948 /* Cancel periodic syncing before we grab dqonoff_mutex */
949 oinfo = sb_dqinfo(sb, type)->dqi_priv;
950 cancel_delayed_work_sync(&oinfo->dqi_sync_work);
947 inode = igrab(sb->s_dquot.files[type]); 951 inode = igrab(sb->s_dquot.files[type]);
948 /* Turn off quotas. This will remove all dquot structures from 952 /* Turn off quotas. This will remove all dquot structures from
949 * memory and so they will be automatically synced to global 953 * memory and so they will be automatically synced to global
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 788b5802a7ce..655a4c52b8c3 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -82,7 +82,7 @@
82 82
83/* 83/*
84 * There are three quota SMP locks. dq_list_lock protects all lists with quotas 84 * There are three quota SMP locks. dq_list_lock protects all lists with quotas
85 * and quota formats, dqstats structure containing statistics about the lists 85 * and quota formats.
86 * dq_data_lock protects data from dq_dqb and also mem_dqinfo structures and 86 * dq_data_lock protects data from dq_dqb and also mem_dqinfo structures and
87 * also guards consistency of dquot->dq_dqb with inode->i_blocks, i_bytes. 87 * also guards consistency of dquot->dq_dqb with inode->i_blocks, i_bytes.
88 * i_blocks and i_bytes updates itself are guarded by i_lock acquired directly 88 * i_blocks and i_bytes updates itself are guarded by i_lock acquired directly
@@ -132,7 +132,9 @@ static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_state_lock);
132__cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock); 132__cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock);
133EXPORT_SYMBOL(dq_data_lock); 133EXPORT_SYMBOL(dq_data_lock);
134 134
135#if defined(CONFIG_QUOTA_DEBUG) || defined(CONFIG_PRINT_QUOTA_WARNING)
135static char *quotatypes[] = INITQFNAMES; 136static char *quotatypes[] = INITQFNAMES;
137#endif
136static struct quota_format_type *quota_formats; /* List of registered formats */ 138static struct quota_format_type *quota_formats; /* List of registered formats */
137static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES; 139static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES;
138 140
@@ -226,6 +228,10 @@ static struct hlist_head *dquot_hash;
226 228
227struct dqstats dqstats; 229struct dqstats dqstats;
228EXPORT_SYMBOL(dqstats); 230EXPORT_SYMBOL(dqstats);
231#ifdef CONFIG_SMP
232struct dqstats *dqstats_pcpu;
233EXPORT_SYMBOL(dqstats_pcpu);
234#endif
229 235
230static qsize_t inode_get_rsv_space(struct inode *inode); 236static qsize_t inode_get_rsv_space(struct inode *inode);
231static void __dquot_initialize(struct inode *inode, int type); 237static void __dquot_initialize(struct inode *inode, int type);
@@ -273,7 +279,7 @@ static struct dquot *find_dquot(unsigned int hashent, struct super_block *sb,
273static inline void put_dquot_last(struct dquot *dquot) 279static inline void put_dquot_last(struct dquot *dquot)
274{ 280{
275 list_add_tail(&dquot->dq_free, &free_dquots); 281 list_add_tail(&dquot->dq_free, &free_dquots);
276 dqstats.free_dquots++; 282 dqstats_inc(DQST_FREE_DQUOTS);
277} 283}
278 284
279static inline void remove_free_dquot(struct dquot *dquot) 285static inline void remove_free_dquot(struct dquot *dquot)
@@ -281,7 +287,7 @@ static inline void remove_free_dquot(struct dquot *dquot)
281 if (list_empty(&dquot->dq_free)) 287 if (list_empty(&dquot->dq_free))
282 return; 288 return;
283 list_del_init(&dquot->dq_free); 289 list_del_init(&dquot->dq_free);
284 dqstats.free_dquots--; 290 dqstats_dec(DQST_FREE_DQUOTS);
285} 291}
286 292
287static inline void put_inuse(struct dquot *dquot) 293static inline void put_inuse(struct dquot *dquot)
@@ -289,12 +295,12 @@ static inline void put_inuse(struct dquot *dquot)
289 /* We add to the back of inuse list so we don't have to restart 295 /* We add to the back of inuse list so we don't have to restart
290 * when traversing this list and we block */ 296 * when traversing this list and we block */
291 list_add_tail(&dquot->dq_inuse, &inuse_list); 297 list_add_tail(&dquot->dq_inuse, &inuse_list);
292 dqstats.allocated_dquots++; 298 dqstats_inc(DQST_ALLOC_DQUOTS);
293} 299}
294 300
295static inline void remove_inuse(struct dquot *dquot) 301static inline void remove_inuse(struct dquot *dquot)
296{ 302{
297 dqstats.allocated_dquots--; 303 dqstats_dec(DQST_ALLOC_DQUOTS);
298 list_del(&dquot->dq_inuse); 304 list_del(&dquot->dq_inuse);
299} 305}
300/* 306/*
@@ -317,14 +323,23 @@ static inline int mark_dquot_dirty(struct dquot *dquot)
317 return dquot->dq_sb->dq_op->mark_dirty(dquot); 323 return dquot->dq_sb->dq_op->mark_dirty(dquot);
318} 324}
319 325
326/* Mark dquot dirty in atomic manner, and return it's old dirty flag state */
320int dquot_mark_dquot_dirty(struct dquot *dquot) 327int dquot_mark_dquot_dirty(struct dquot *dquot)
321{ 328{
329 int ret = 1;
330
331 /* If quota is dirty already, we don't have to acquire dq_list_lock */
332 if (test_bit(DQ_MOD_B, &dquot->dq_flags))
333 return 1;
334
322 spin_lock(&dq_list_lock); 335 spin_lock(&dq_list_lock);
323 if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags)) 336 if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags)) {
324 list_add(&dquot->dq_dirty, &sb_dqopt(dquot->dq_sb)-> 337 list_add(&dquot->dq_dirty, &sb_dqopt(dquot->dq_sb)->
325 info[dquot->dq_type].dqi_dirty_list); 338 info[dquot->dq_type].dqi_dirty_list);
339 ret = 0;
340 }
326 spin_unlock(&dq_list_lock); 341 spin_unlock(&dq_list_lock);
327 return 0; 342 return ret;
328} 343}
329EXPORT_SYMBOL(dquot_mark_dquot_dirty); 344EXPORT_SYMBOL(dquot_mark_dquot_dirty);
330 345
@@ -550,8 +565,8 @@ int dquot_scan_active(struct super_block *sb,
550 continue; 565 continue;
551 /* Now we have active dquot so we can just increase use count */ 566 /* Now we have active dquot so we can just increase use count */
552 atomic_inc(&dquot->dq_count); 567 atomic_inc(&dquot->dq_count);
553 dqstats.lookups++;
554 spin_unlock(&dq_list_lock); 568 spin_unlock(&dq_list_lock);
569 dqstats_inc(DQST_LOOKUPS);
555 dqput(old_dquot); 570 dqput(old_dquot);
556 old_dquot = dquot; 571 old_dquot = dquot;
557 ret = fn(dquot, priv); 572 ret = fn(dquot, priv);
@@ -596,8 +611,8 @@ int vfs_quota_sync(struct super_block *sb, int type, int wait)
596 * holding reference so we can safely just increase 611 * holding reference so we can safely just increase
597 * use count */ 612 * use count */
598 atomic_inc(&dquot->dq_count); 613 atomic_inc(&dquot->dq_count);
599 dqstats.lookups++;
600 spin_unlock(&dq_list_lock); 614 spin_unlock(&dq_list_lock);
615 dqstats_inc(DQST_LOOKUPS);
601 sb->dq_op->write_dquot(dquot); 616 sb->dq_op->write_dquot(dquot);
602 dqput(dquot); 617 dqput(dquot);
603 spin_lock(&dq_list_lock); 618 spin_lock(&dq_list_lock);
@@ -609,9 +624,7 @@ int vfs_quota_sync(struct super_block *sb, int type, int wait)
609 if ((cnt == type || type == -1) && sb_has_quota_active(sb, cnt) 624 if ((cnt == type || type == -1) && sb_has_quota_active(sb, cnt)
610 && info_dirty(&dqopt->info[cnt])) 625 && info_dirty(&dqopt->info[cnt]))
611 sb->dq_op->write_info(sb, cnt); 626 sb->dq_op->write_info(sb, cnt);
612 spin_lock(&dq_list_lock); 627 dqstats_inc(DQST_SYNCS);
613 dqstats.syncs++;
614 spin_unlock(&dq_list_lock);
615 mutex_unlock(&dqopt->dqonoff_mutex); 628 mutex_unlock(&dqopt->dqonoff_mutex);
616 629
617 if (!wait || (sb_dqopt(sb)->flags & DQUOT_QUOTA_SYS_FILE)) 630 if (!wait || (sb_dqopt(sb)->flags & DQUOT_QUOTA_SYS_FILE))
@@ -663,6 +676,22 @@ static void prune_dqcache(int count)
663 } 676 }
664} 677}
665 678
679static int dqstats_read(unsigned int type)
680{
681 int count = 0;
682#ifdef CONFIG_SMP
683 int cpu;
684 for_each_possible_cpu(cpu)
685 count += per_cpu_ptr(dqstats_pcpu, cpu)->stat[type];
686 /* Statistics reading is racy, but absolute accuracy isn't required */
687 if (count < 0)
688 count = 0;
689#else
690 count = dqstats.stat[type];
691#endif
692 return count;
693}
694
666/* 695/*
667 * This is called from kswapd when we think we need some 696 * This is called from kswapd when we think we need some
668 * more memory 697 * more memory
@@ -675,7 +704,7 @@ static int shrink_dqcache_memory(int nr, gfp_t gfp_mask)
675 prune_dqcache(nr); 704 prune_dqcache(nr);
676 spin_unlock(&dq_list_lock); 705 spin_unlock(&dq_list_lock);
677 } 706 }
678 return (dqstats.free_dquots / 100) * sysctl_vfs_cache_pressure; 707 return (dqstats_read(DQST_FREE_DQUOTS)/100) * sysctl_vfs_cache_pressure;
679} 708}
680 709
681static struct shrinker dqcache_shrinker = { 710static struct shrinker dqcache_shrinker = {
@@ -703,10 +732,7 @@ void dqput(struct dquot *dquot)
703 BUG(); 732 BUG();
704 } 733 }
705#endif 734#endif
706 735 dqstats_inc(DQST_DROPS);
707 spin_lock(&dq_list_lock);
708 dqstats.drops++;
709 spin_unlock(&dq_list_lock);
710we_slept: 736we_slept:
711 spin_lock(&dq_list_lock); 737 spin_lock(&dq_list_lock);
712 if (atomic_read(&dquot->dq_count) > 1) { 738 if (atomic_read(&dquot->dq_count) > 1) {
@@ -823,15 +849,15 @@ we_slept:
823 put_inuse(dquot); 849 put_inuse(dquot);
824 /* hash it first so it can be found */ 850 /* hash it first so it can be found */
825 insert_dquot_hash(dquot); 851 insert_dquot_hash(dquot);
826 dqstats.lookups++;
827 spin_unlock(&dq_list_lock); 852 spin_unlock(&dq_list_lock);
853 dqstats_inc(DQST_LOOKUPS);
828 } else { 854 } else {
829 if (!atomic_read(&dquot->dq_count)) 855 if (!atomic_read(&dquot->dq_count))
830 remove_free_dquot(dquot); 856 remove_free_dquot(dquot);
831 atomic_inc(&dquot->dq_count); 857 atomic_inc(&dquot->dq_count);
832 dqstats.cache_hits++;
833 dqstats.lookups++;
834 spin_unlock(&dq_list_lock); 858 spin_unlock(&dq_list_lock);
859 dqstats_inc(DQST_CACHE_HITS);
860 dqstats_inc(DQST_LOOKUPS);
835 } 861 }
836 /* Wait for dq_lock - after this we know that either dquot_release() is 862 /* Wait for dq_lock - after this we know that either dquot_release() is
837 * already finished or it will be canceled due to dq_count > 1 test */ 863 * already finished or it will be canceled due to dq_count > 1 test */
@@ -1677,16 +1703,19 @@ EXPORT_SYMBOL(dquot_free_inode);
1677 1703
1678/* 1704/*
1679 * Transfer the number of inode and blocks from one diskquota to an other. 1705 * Transfer the number of inode and blocks from one diskquota to an other.
1706 * On success, dquot references in transfer_to are consumed and references
1707 * to original dquots that need to be released are placed there. On failure,
1708 * references are kept untouched.
1680 * 1709 *
1681 * This operation can block, but only after everything is updated 1710 * This operation can block, but only after everything is updated
1682 * A transaction must be started when entering this function. 1711 * A transaction must be started when entering this function.
1712 *
1683 */ 1713 */
1684static int __dquot_transfer(struct inode *inode, qid_t *chid, unsigned long mask) 1714int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
1685{ 1715{
1686 qsize_t space, cur_space; 1716 qsize_t space, cur_space;
1687 qsize_t rsv_space = 0; 1717 qsize_t rsv_space = 0;
1688 struct dquot *transfer_from[MAXQUOTAS]; 1718 struct dquot *transfer_from[MAXQUOTAS] = {};
1689 struct dquot *transfer_to[MAXQUOTAS];
1690 int cnt, ret = 0; 1719 int cnt, ret = 0;
1691 char warntype_to[MAXQUOTAS]; 1720 char warntype_to[MAXQUOTAS];
1692 char warntype_from_inodes[MAXQUOTAS], warntype_from_space[MAXQUOTAS]; 1721 char warntype_from_inodes[MAXQUOTAS], warntype_from_space[MAXQUOTAS];
@@ -1696,19 +1725,12 @@ static int __dquot_transfer(struct inode *inode, qid_t *chid, unsigned long mask
1696 if (IS_NOQUOTA(inode)) 1725 if (IS_NOQUOTA(inode))
1697 return 0; 1726 return 0;
1698 /* Initialize the arrays */ 1727 /* Initialize the arrays */
1699 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1728 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1700 transfer_from[cnt] = NULL;
1701 transfer_to[cnt] = NULL;
1702 warntype_to[cnt] = QUOTA_NL_NOWARN; 1729 warntype_to[cnt] = QUOTA_NL_NOWARN;
1703 }
1704 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1705 if (mask & (1 << cnt))
1706 transfer_to[cnt] = dqget(inode->i_sb, chid[cnt], cnt);
1707 }
1708 down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); 1730 down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1709 if (IS_NOQUOTA(inode)) { /* File without quota accounting? */ 1731 if (IS_NOQUOTA(inode)) { /* File without quota accounting? */
1710 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); 1732 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1711 goto put_all; 1733 return 0;
1712 } 1734 }
1713 spin_lock(&dq_data_lock); 1735 spin_lock(&dq_data_lock);
1714 cur_space = inode_get_bytes(inode); 1736 cur_space = inode_get_bytes(inode);
@@ -1760,47 +1782,41 @@ static int __dquot_transfer(struct inode *inode, qid_t *chid, unsigned long mask
1760 1782
1761 mark_all_dquot_dirty(transfer_from); 1783 mark_all_dquot_dirty(transfer_from);
1762 mark_all_dquot_dirty(transfer_to); 1784 mark_all_dquot_dirty(transfer_to);
1763 /* The reference we got is transferred to the inode */ 1785 /* Pass back references to put */
1764 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 1786 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1765 transfer_to[cnt] = NULL; 1787 transfer_to[cnt] = transfer_from[cnt];
1766warn_put_all: 1788warn:
1767 flush_warnings(transfer_to, warntype_to); 1789 flush_warnings(transfer_to, warntype_to);
1768 flush_warnings(transfer_from, warntype_from_inodes); 1790 flush_warnings(transfer_from, warntype_from_inodes);
1769 flush_warnings(transfer_from, warntype_from_space); 1791 flush_warnings(transfer_from, warntype_from_space);
1770put_all:
1771 dqput_all(transfer_from);
1772 dqput_all(transfer_to);
1773 return ret; 1792 return ret;
1774over_quota: 1793over_quota:
1775 spin_unlock(&dq_data_lock); 1794 spin_unlock(&dq_data_lock);
1776 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); 1795 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1777 /* Clear dquot pointers we don't want to dqput() */ 1796 goto warn;
1778 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1779 transfer_from[cnt] = NULL;
1780 goto warn_put_all;
1781} 1797}
1798EXPORT_SYMBOL(__dquot_transfer);
1782 1799
1783/* Wrapper for transferring ownership of an inode for uid/gid only 1800/* Wrapper for transferring ownership of an inode for uid/gid only
1784 * Called from FSXXX_setattr() 1801 * Called from FSXXX_setattr()
1785 */ 1802 */
1786int dquot_transfer(struct inode *inode, struct iattr *iattr) 1803int dquot_transfer(struct inode *inode, struct iattr *iattr)
1787{ 1804{
1788 qid_t chid[MAXQUOTAS]; 1805 struct dquot *transfer_to[MAXQUOTAS] = {};
1789 unsigned long mask = 0; 1806 struct super_block *sb = inode->i_sb;
1807 int ret;
1790 1808
1791 if (iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) { 1809 if (!sb_any_quota_active(sb) || IS_NOQUOTA(inode))
1792 mask |= 1 << USRQUOTA; 1810 return 0;
1793 chid[USRQUOTA] = iattr->ia_uid; 1811
1794 } 1812 if (iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid)
1795 if (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid) { 1813 transfer_to[USRQUOTA] = dqget(sb, iattr->ia_uid, USRQUOTA);
1796 mask |= 1 << GRPQUOTA; 1814 if (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)
1797 chid[GRPQUOTA] = iattr->ia_gid; 1815 transfer_to[GRPQUOTA] = dqget(sb, iattr->ia_uid, GRPQUOTA);
1798 } 1816
1799 if (sb_any_quota_active(inode->i_sb) && !IS_NOQUOTA(inode)) { 1817 ret = __dquot_transfer(inode, transfer_to);
1800 dquot_initialize(inode); 1818 dqput_all(transfer_to);
1801 return __dquot_transfer(inode, chid, mask); 1819 return ret;
1802 }
1803 return 0;
1804} 1820}
1805EXPORT_SYMBOL(dquot_transfer); 1821EXPORT_SYMBOL(dquot_transfer);
1806 1822
@@ -2275,25 +2291,30 @@ static inline qsize_t stoqb(qsize_t space)
2275} 2291}
2276 2292
2277/* Generic routine for getting common part of quota structure */ 2293/* Generic routine for getting common part of quota structure */
2278static void do_get_dqblk(struct dquot *dquot, struct if_dqblk *di) 2294static void do_get_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
2279{ 2295{
2280 struct mem_dqblk *dm = &dquot->dq_dqb; 2296 struct mem_dqblk *dm = &dquot->dq_dqb;
2281 2297
2298 memset(di, 0, sizeof(*di));
2299 di->d_version = FS_DQUOT_VERSION;
2300 di->d_flags = dquot->dq_type == USRQUOTA ?
2301 XFS_USER_QUOTA : XFS_GROUP_QUOTA;
2302 di->d_id = dquot->dq_id;
2303
2282 spin_lock(&dq_data_lock); 2304 spin_lock(&dq_data_lock);
2283 di->dqb_bhardlimit = stoqb(dm->dqb_bhardlimit); 2305 di->d_blk_hardlimit = stoqb(dm->dqb_bhardlimit);
2284 di->dqb_bsoftlimit = stoqb(dm->dqb_bsoftlimit); 2306 di->d_blk_softlimit = stoqb(dm->dqb_bsoftlimit);
2285 di->dqb_curspace = dm->dqb_curspace + dm->dqb_rsvspace; 2307 di->d_ino_hardlimit = dm->dqb_ihardlimit;
2286 di->dqb_ihardlimit = dm->dqb_ihardlimit; 2308 di->d_ino_softlimit = dm->dqb_isoftlimit;
2287 di->dqb_isoftlimit = dm->dqb_isoftlimit; 2309 di->d_bcount = dm->dqb_curspace + dm->dqb_rsvspace;
2288 di->dqb_curinodes = dm->dqb_curinodes; 2310 di->d_icount = dm->dqb_curinodes;
2289 di->dqb_btime = dm->dqb_btime; 2311 di->d_btimer = dm->dqb_btime;
2290 di->dqb_itime = dm->dqb_itime; 2312 di->d_itimer = dm->dqb_itime;
2291 di->dqb_valid = QIF_ALL;
2292 spin_unlock(&dq_data_lock); 2313 spin_unlock(&dq_data_lock);
2293} 2314}
2294 2315
2295int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, 2316int vfs_get_dqblk(struct super_block *sb, int type, qid_t id,
2296 struct if_dqblk *di) 2317 struct fs_disk_quota *di)
2297{ 2318{
2298 struct dquot *dquot; 2319 struct dquot *dquot;
2299 2320
@@ -2307,51 +2328,70 @@ int vfs_get_dqblk(struct super_block *sb, int type, qid_t id,
2307} 2328}
2308EXPORT_SYMBOL(vfs_get_dqblk); 2329EXPORT_SYMBOL(vfs_get_dqblk);
2309 2330
2331#define VFS_FS_DQ_MASK \
2332 (FS_DQ_BCOUNT | FS_DQ_BSOFT | FS_DQ_BHARD | \
2333 FS_DQ_ICOUNT | FS_DQ_ISOFT | FS_DQ_IHARD | \
2334 FS_DQ_BTIMER | FS_DQ_ITIMER)
2335
2310/* Generic routine for setting common part of quota structure */ 2336/* Generic routine for setting common part of quota structure */
2311static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di) 2337static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
2312{ 2338{
2313 struct mem_dqblk *dm = &dquot->dq_dqb; 2339 struct mem_dqblk *dm = &dquot->dq_dqb;
2314 int check_blim = 0, check_ilim = 0; 2340 int check_blim = 0, check_ilim = 0;
2315 struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type]; 2341 struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type];
2316 2342
2317 if ((di->dqb_valid & QIF_BLIMITS && 2343 if (di->d_fieldmask & ~VFS_FS_DQ_MASK)
2318 (di->dqb_bhardlimit > dqi->dqi_maxblimit || 2344 return -EINVAL;
2319 di->dqb_bsoftlimit > dqi->dqi_maxblimit)) || 2345
2320 (di->dqb_valid & QIF_ILIMITS && 2346 if (((di->d_fieldmask & FS_DQ_BSOFT) &&
2321 (di->dqb_ihardlimit > dqi->dqi_maxilimit || 2347 (di->d_blk_softlimit > dqi->dqi_maxblimit)) ||
2322 di->dqb_isoftlimit > dqi->dqi_maxilimit))) 2348 ((di->d_fieldmask & FS_DQ_BHARD) &&
2349 (di->d_blk_hardlimit > dqi->dqi_maxblimit)) ||
2350 ((di->d_fieldmask & FS_DQ_ISOFT) &&
2351 (di->d_ino_softlimit > dqi->dqi_maxilimit)) ||
2352 ((di->d_fieldmask & FS_DQ_IHARD) &&
2353 (di->d_ino_hardlimit > dqi->dqi_maxilimit)))
2323 return -ERANGE; 2354 return -ERANGE;
2324 2355
2325 spin_lock(&dq_data_lock); 2356 spin_lock(&dq_data_lock);
2326 if (di->dqb_valid & QIF_SPACE) { 2357 if (di->d_fieldmask & FS_DQ_BCOUNT) {
2327 dm->dqb_curspace = di->dqb_curspace - dm->dqb_rsvspace; 2358 dm->dqb_curspace = di->d_bcount - dm->dqb_rsvspace;
2328 check_blim = 1; 2359 check_blim = 1;
2329 set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags); 2360 set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
2330 } 2361 }
2331 if (di->dqb_valid & QIF_BLIMITS) { 2362
2332 dm->dqb_bsoftlimit = qbtos(di->dqb_bsoftlimit); 2363 if (di->d_fieldmask & FS_DQ_BSOFT)
2333 dm->dqb_bhardlimit = qbtos(di->dqb_bhardlimit); 2364 dm->dqb_bsoftlimit = qbtos(di->d_blk_softlimit);
2365 if (di->d_fieldmask & FS_DQ_BHARD)
2366 dm->dqb_bhardlimit = qbtos(di->d_blk_hardlimit);
2367 if (di->d_fieldmask & (FS_DQ_BSOFT | FS_DQ_BHARD)) {
2334 check_blim = 1; 2368 check_blim = 1;
2335 set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags); 2369 set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
2336 } 2370 }
2337 if (di->dqb_valid & QIF_INODES) { 2371
2338 dm->dqb_curinodes = di->dqb_curinodes; 2372 if (di->d_fieldmask & FS_DQ_ICOUNT) {
2373 dm->dqb_curinodes = di->d_icount;
2339 check_ilim = 1; 2374 check_ilim = 1;
2340 set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags); 2375 set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
2341 } 2376 }
2342 if (di->dqb_valid & QIF_ILIMITS) { 2377
2343 dm->dqb_isoftlimit = di->dqb_isoftlimit; 2378 if (di->d_fieldmask & FS_DQ_ISOFT)
2344 dm->dqb_ihardlimit = di->dqb_ihardlimit; 2379 dm->dqb_isoftlimit = di->d_ino_softlimit;
2380 if (di->d_fieldmask & FS_DQ_IHARD)
2381 dm->dqb_ihardlimit = di->d_ino_hardlimit;
2382 if (di->d_fieldmask & (FS_DQ_ISOFT | FS_DQ_IHARD)) {
2345 check_ilim = 1; 2383 check_ilim = 1;
2346 set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags); 2384 set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
2347 } 2385 }
2348 if (di->dqb_valid & QIF_BTIME) { 2386
2349 dm->dqb_btime = di->dqb_btime; 2387 if (di->d_fieldmask & FS_DQ_BTIMER) {
2388 dm->dqb_btime = di->d_btimer;
2350 check_blim = 1; 2389 check_blim = 1;
2351 set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags); 2390 set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
2352 } 2391 }
2353 if (di->dqb_valid & QIF_ITIME) { 2392
2354 dm->dqb_itime = di->dqb_itime; 2393 if (di->d_fieldmask & FS_DQ_ITIMER) {
2394 dm->dqb_itime = di->d_itimer;
2355 check_ilim = 1; 2395 check_ilim = 1;
2356 set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags); 2396 set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
2357 } 2397 }
@@ -2361,7 +2401,7 @@ static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
2361 dm->dqb_curspace < dm->dqb_bsoftlimit) { 2401 dm->dqb_curspace < dm->dqb_bsoftlimit) {
2362 dm->dqb_btime = 0; 2402 dm->dqb_btime = 0;
2363 clear_bit(DQ_BLKS_B, &dquot->dq_flags); 2403 clear_bit(DQ_BLKS_B, &dquot->dq_flags);
2364 } else if (!(di->dqb_valid & QIF_BTIME)) 2404 } else if (!(di->d_fieldmask & FS_DQ_BTIMER))
2365 /* Set grace only if user hasn't provided his own... */ 2405 /* Set grace only if user hasn't provided his own... */
2366 dm->dqb_btime = get_seconds() + dqi->dqi_bgrace; 2406 dm->dqb_btime = get_seconds() + dqi->dqi_bgrace;
2367 } 2407 }
@@ -2370,7 +2410,7 @@ static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
2370 dm->dqb_curinodes < dm->dqb_isoftlimit) { 2410 dm->dqb_curinodes < dm->dqb_isoftlimit) {
2371 dm->dqb_itime = 0; 2411 dm->dqb_itime = 0;
2372 clear_bit(DQ_INODES_B, &dquot->dq_flags); 2412 clear_bit(DQ_INODES_B, &dquot->dq_flags);
2373 } else if (!(di->dqb_valid & QIF_ITIME)) 2413 } else if (!(di->d_fieldmask & FS_DQ_ITIMER))
2374 /* Set grace only if user hasn't provided his own... */ 2414 /* Set grace only if user hasn't provided his own... */
2375 dm->dqb_itime = get_seconds() + dqi->dqi_igrace; 2415 dm->dqb_itime = get_seconds() + dqi->dqi_igrace;
2376 } 2416 }
@@ -2386,7 +2426,7 @@ static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
2386} 2426}
2387 2427
2388int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, 2428int vfs_set_dqblk(struct super_block *sb, int type, qid_t id,
2389 struct if_dqblk *di) 2429 struct fs_disk_quota *di)
2390{ 2430{
2391 struct dquot *dquot; 2431 struct dquot *dquot;
2392 int rc; 2432 int rc;
@@ -2465,62 +2505,74 @@ const struct quotactl_ops vfs_quotactl_ops = {
2465 .set_dqblk = vfs_set_dqblk 2505 .set_dqblk = vfs_set_dqblk
2466}; 2506};
2467 2507
2508
2509static int do_proc_dqstats(struct ctl_table *table, int write,
2510 void __user *buffer, size_t *lenp, loff_t *ppos)
2511{
2512#ifdef CONFIG_SMP
2513 /* Update global table */
2514 unsigned int type = (int *)table->data - dqstats.stat;
2515 dqstats.stat[type] = dqstats_read(type);
2516#endif
2517 return proc_dointvec(table, write, buffer, lenp, ppos);
2518}
2519
2468static ctl_table fs_dqstats_table[] = { 2520static ctl_table fs_dqstats_table[] = {
2469 { 2521 {
2470 .procname = "lookups", 2522 .procname = "lookups",
2471 .data = &dqstats.lookups, 2523 .data = &dqstats.stat[DQST_LOOKUPS],
2472 .maxlen = sizeof(int), 2524 .maxlen = sizeof(int),
2473 .mode = 0444, 2525 .mode = 0444,
2474 .proc_handler = proc_dointvec, 2526 .proc_handler = do_proc_dqstats,
2475 }, 2527 },
2476 { 2528 {
2477 .procname = "drops", 2529 .procname = "drops",
2478 .data = &dqstats.drops, 2530 .data = &dqstats.stat[DQST_DROPS],
2479 .maxlen = sizeof(int), 2531 .maxlen = sizeof(int),
2480 .mode = 0444, 2532 .mode = 0444,
2481 .proc_handler = proc_dointvec, 2533 .proc_handler = do_proc_dqstats,
2482 }, 2534 },
2483 { 2535 {
2484 .procname = "reads", 2536 .procname = "reads",
2485 .data = &dqstats.reads, 2537 .data = &dqstats.stat[DQST_READS],
2486 .maxlen = sizeof(int), 2538 .maxlen = sizeof(int),
2487 .mode = 0444, 2539 .mode = 0444,
2488 .proc_handler = proc_dointvec, 2540 .proc_handler = do_proc_dqstats,
2489 }, 2541 },
2490 { 2542 {
2491 .procname = "writes", 2543 .procname = "writes",
2492 .data = &dqstats.writes, 2544 .data = &dqstats.stat[DQST_WRITES],
2493 .maxlen = sizeof(int), 2545 .maxlen = sizeof(int),
2494 .mode = 0444, 2546 .mode = 0444,
2495 .proc_handler = proc_dointvec, 2547 .proc_handler = do_proc_dqstats,
2496 }, 2548 },
2497 { 2549 {
2498 .procname = "cache_hits", 2550 .procname = "cache_hits",
2499 .data = &dqstats.cache_hits, 2551 .data = &dqstats.stat[DQST_CACHE_HITS],
2500 .maxlen = sizeof(int), 2552 .maxlen = sizeof(int),
2501 .mode = 0444, 2553 .mode = 0444,
2502 .proc_handler = proc_dointvec, 2554 .proc_handler = do_proc_dqstats,
2503 }, 2555 },
2504 { 2556 {
2505 .procname = "allocated_dquots", 2557 .procname = "allocated_dquots",
2506 .data = &dqstats.allocated_dquots, 2558 .data = &dqstats.stat[DQST_ALLOC_DQUOTS],
2507 .maxlen = sizeof(int), 2559 .maxlen = sizeof(int),
2508 .mode = 0444, 2560 .mode = 0444,
2509 .proc_handler = proc_dointvec, 2561 .proc_handler = do_proc_dqstats,
2510 }, 2562 },
2511 { 2563 {
2512 .procname = "free_dquots", 2564 .procname = "free_dquots",
2513 .data = &dqstats.free_dquots, 2565 .data = &dqstats.stat[DQST_FREE_DQUOTS],
2514 .maxlen = sizeof(int), 2566 .maxlen = sizeof(int),
2515 .mode = 0444, 2567 .mode = 0444,
2516 .proc_handler = proc_dointvec, 2568 .proc_handler = do_proc_dqstats,
2517 }, 2569 },
2518 { 2570 {
2519 .procname = "syncs", 2571 .procname = "syncs",
2520 .data = &dqstats.syncs, 2572 .data = &dqstats.stat[DQST_SYNCS],
2521 .maxlen = sizeof(int), 2573 .maxlen = sizeof(int),
2522 .mode = 0444, 2574 .mode = 0444,
2523 .proc_handler = proc_dointvec, 2575 .proc_handler = do_proc_dqstats,
2524 }, 2576 },
2525#ifdef CONFIG_PRINT_QUOTA_WARNING 2577#ifdef CONFIG_PRINT_QUOTA_WARNING
2526 { 2578 {
@@ -2572,6 +2624,13 @@ static int __init dquot_init(void)
2572 if (!dquot_hash) 2624 if (!dquot_hash)
2573 panic("Cannot create dquot hash table"); 2625 panic("Cannot create dquot hash table");
2574 2626
2627#ifdef CONFIG_SMP
2628 dqstats_pcpu = alloc_percpu(struct dqstats);
2629 if (!dqstats_pcpu)
2630 panic("Cannot create dquot stats table");
2631#endif
2632 memset(&dqstats, 0, sizeof(struct dqstats));
2633
2575 /* Find power-of-two hlist_heads which can fit into allocation */ 2634 /* Find power-of-two hlist_heads which can fit into allocation */
2576 nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head); 2635 nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head);
2577 dq_hash_bits = 0; 2636 dq_hash_bits = 0;
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
index 95388f9b7356..cfc78826da90 100644
--- a/fs/quota/quota.c
+++ b/fs/quota/quota.c
@@ -113,8 +113,6 @@ static int quota_getinfo(struct super_block *sb, int type, void __user *addr)
113 struct if_dqinfo info; 113 struct if_dqinfo info;
114 int ret; 114 int ret;
115 115
116 if (!sb_has_quota_active(sb, type))
117 return -ESRCH;
118 if (!sb->s_qcop->get_info) 116 if (!sb->s_qcop->get_info)
119 return -ENOSYS; 117 return -ENOSYS;
120 ret = sb->s_qcop->get_info(sb, type, &info); 118 ret = sb->s_qcop->get_info(sb, type, &info);
@@ -129,43 +127,80 @@ static int quota_setinfo(struct super_block *sb, int type, void __user *addr)
129 127
130 if (copy_from_user(&info, addr, sizeof(info))) 128 if (copy_from_user(&info, addr, sizeof(info)))
131 return -EFAULT; 129 return -EFAULT;
132 if (!sb_has_quota_active(sb, type))
133 return -ESRCH;
134 if (!sb->s_qcop->set_info) 130 if (!sb->s_qcop->set_info)
135 return -ENOSYS; 131 return -ENOSYS;
136 return sb->s_qcop->set_info(sb, type, &info); 132 return sb->s_qcop->set_info(sb, type, &info);
137} 133}
138 134
135static void copy_to_if_dqblk(struct if_dqblk *dst, struct fs_disk_quota *src)
136{
137 dst->dqb_bhardlimit = src->d_blk_hardlimit;
138 dst->dqb_bsoftlimit = src->d_blk_softlimit;
139 dst->dqb_curspace = src->d_bcount;
140 dst->dqb_ihardlimit = src->d_ino_hardlimit;
141 dst->dqb_isoftlimit = src->d_ino_softlimit;
142 dst->dqb_curinodes = src->d_icount;
143 dst->dqb_btime = src->d_btimer;
144 dst->dqb_itime = src->d_itimer;
145 dst->dqb_valid = QIF_ALL;
146}
147
139static int quota_getquota(struct super_block *sb, int type, qid_t id, 148static int quota_getquota(struct super_block *sb, int type, qid_t id,
140 void __user *addr) 149 void __user *addr)
141{ 150{
151 struct fs_disk_quota fdq;
142 struct if_dqblk idq; 152 struct if_dqblk idq;
143 int ret; 153 int ret;
144 154
145 if (!sb_has_quota_active(sb, type))
146 return -ESRCH;
147 if (!sb->s_qcop->get_dqblk) 155 if (!sb->s_qcop->get_dqblk)
148 return -ENOSYS; 156 return -ENOSYS;
149 ret = sb->s_qcop->get_dqblk(sb, type, id, &idq); 157 ret = sb->s_qcop->get_dqblk(sb, type, id, &fdq);
150 if (ret) 158 if (ret)
151 return ret; 159 return ret;
160 copy_to_if_dqblk(&idq, &fdq);
152 if (copy_to_user(addr, &idq, sizeof(idq))) 161 if (copy_to_user(addr, &idq, sizeof(idq)))
153 return -EFAULT; 162 return -EFAULT;
154 return 0; 163 return 0;
155} 164}
156 165
166static void copy_from_if_dqblk(struct fs_disk_quota *dst, struct if_dqblk *src)
167{
168 dst->d_blk_hardlimit = src->dqb_bhardlimit;
169 dst->d_blk_softlimit = src->dqb_bsoftlimit;
170 dst->d_bcount = src->dqb_curspace;
171 dst->d_ino_hardlimit = src->dqb_ihardlimit;
172 dst->d_ino_softlimit = src->dqb_isoftlimit;
173 dst->d_icount = src->dqb_curinodes;
174 dst->d_btimer = src->dqb_btime;
175 dst->d_itimer = src->dqb_itime;
176
177 dst->d_fieldmask = 0;
178 if (src->dqb_valid & QIF_BLIMITS)
179 dst->d_fieldmask |= FS_DQ_BSOFT | FS_DQ_BHARD;
180 if (src->dqb_valid & QIF_SPACE)
181 dst->d_fieldmask |= FS_DQ_BCOUNT;
182 if (src->dqb_valid & QIF_ILIMITS)
183 dst->d_fieldmask |= FS_DQ_ISOFT | FS_DQ_IHARD;
184 if (src->dqb_valid & QIF_INODES)
185 dst->d_fieldmask |= FS_DQ_ICOUNT;
186 if (src->dqb_valid & QIF_BTIME)
187 dst->d_fieldmask |= FS_DQ_BTIMER;
188 if (src->dqb_valid & QIF_ITIME)
189 dst->d_fieldmask |= FS_DQ_ITIMER;
190}
191
157static int quota_setquota(struct super_block *sb, int type, qid_t id, 192static int quota_setquota(struct super_block *sb, int type, qid_t id,
158 void __user *addr) 193 void __user *addr)
159{ 194{
195 struct fs_disk_quota fdq;
160 struct if_dqblk idq; 196 struct if_dqblk idq;
161 197
162 if (copy_from_user(&idq, addr, sizeof(idq))) 198 if (copy_from_user(&idq, addr, sizeof(idq)))
163 return -EFAULT; 199 return -EFAULT;
164 if (!sb_has_quota_active(sb, type))
165 return -ESRCH;
166 if (!sb->s_qcop->set_dqblk) 200 if (!sb->s_qcop->set_dqblk)
167 return -ENOSYS; 201 return -ENOSYS;
168 return sb->s_qcop->set_dqblk(sb, type, id, &idq); 202 copy_from_if_dqblk(&fdq, &idq);
203 return sb->s_qcop->set_dqblk(sb, type, id, &fdq);
169} 204}
170 205
171static int quota_setxstate(struct super_block *sb, int cmd, void __user *addr) 206static int quota_setxstate(struct super_block *sb, int cmd, void __user *addr)
@@ -199,9 +234,9 @@ static int quota_setxquota(struct super_block *sb, int type, qid_t id,
199 234
200 if (copy_from_user(&fdq, addr, sizeof(fdq))) 235 if (copy_from_user(&fdq, addr, sizeof(fdq)))
201 return -EFAULT; 236 return -EFAULT;
202 if (!sb->s_qcop->set_xquota) 237 if (!sb->s_qcop->set_dqblk)
203 return -ENOSYS; 238 return -ENOSYS;
204 return sb->s_qcop->set_xquota(sb, type, id, &fdq); 239 return sb->s_qcop->set_dqblk(sb, type, id, &fdq);
205} 240}
206 241
207static int quota_getxquota(struct super_block *sb, int type, qid_t id, 242static int quota_getxquota(struct super_block *sb, int type, qid_t id,
@@ -210,9 +245,9 @@ static int quota_getxquota(struct super_block *sb, int type, qid_t id,
210 struct fs_disk_quota fdq; 245 struct fs_disk_quota fdq;
211 int ret; 246 int ret;
212 247
213 if (!sb->s_qcop->get_xquota) 248 if (!sb->s_qcop->get_dqblk)
214 return -ENOSYS; 249 return -ENOSYS;
215 ret = sb->s_qcop->get_xquota(sb, type, id, &fdq); 250 ret = sb->s_qcop->get_dqblk(sb, type, id, &fdq);
216 if (!ret && copy_to_user(addr, &fdq, sizeof(fdq))) 251 if (!ret && copy_to_user(addr, &fdq, sizeof(fdq)))
217 return -EFAULT; 252 return -EFAULT;
218 return ret; 253 return ret;
diff --git a/fs/quota/quota_tree.c b/fs/quota/quota_tree.c
index f81f4bcfb178..24f03407eeb5 100644
--- a/fs/quota/quota_tree.c
+++ b/fs/quota/quota_tree.c
@@ -60,9 +60,17 @@ static ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
60static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf) 60static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
61{ 61{
62 struct super_block *sb = info->dqi_sb; 62 struct super_block *sb = info->dqi_sb;
63 ssize_t ret;
63 64
64 return sb->s_op->quota_write(sb, info->dqi_type, buf, 65 ret = sb->s_op->quota_write(sb, info->dqi_type, buf,
65 info->dqi_usable_bs, blk << info->dqi_blocksize_bits); 66 info->dqi_usable_bs, blk << info->dqi_blocksize_bits);
67 if (ret != info->dqi_usable_bs) {
68 q_warn(KERN_WARNING "VFS: dquota write failed on "
69 "dev %s\n", sb->s_id);
70 if (ret >= 0)
71 ret = -EIO;
72 }
73 return ret;
66} 74}
67 75
68/* Remove empty block from list and return it */ 76/* Remove empty block from list and return it */
@@ -152,7 +160,7 @@ static int remove_free_dqentry(struct qtree_mem_dqinfo *info, char *buf,
152 dh->dqdh_next_free = dh->dqdh_prev_free = cpu_to_le32(0); 160 dh->dqdh_next_free = dh->dqdh_prev_free = cpu_to_le32(0);
153 /* No matter whether write succeeds block is out of list */ 161 /* No matter whether write succeeds block is out of list */
154 if (write_blk(info, blk, buf) < 0) 162 if (write_blk(info, blk, buf) < 0)
155 printk(KERN_ERR 163 q_warn(KERN_ERR
156 "VFS: Can't write block (%u) with free entries.\n", 164 "VFS: Can't write block (%u) with free entries.\n",
157 blk); 165 blk);
158 return 0; 166 return 0;
@@ -244,7 +252,7 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
244 if (le16_to_cpu(dh->dqdh_entries) + 1 >= qtree_dqstr_in_blk(info)) { 252 if (le16_to_cpu(dh->dqdh_entries) + 1 >= qtree_dqstr_in_blk(info)) {
245 *err = remove_free_dqentry(info, buf, blk); 253 *err = remove_free_dqentry(info, buf, blk);
246 if (*err < 0) { 254 if (*err < 0) {
247 printk(KERN_ERR "VFS: find_free_dqentry(): Can't " 255 q_warn(KERN_ERR "VFS: find_free_dqentry(): Can't "
248 "remove block (%u) from entry free list.\n", 256 "remove block (%u) from entry free list.\n",
249 blk); 257 blk);
250 goto out_buf; 258 goto out_buf;
@@ -268,7 +276,7 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
268#endif 276#endif
269 *err = write_blk(info, blk, buf); 277 *err = write_blk(info, blk, buf);
270 if (*err < 0) { 278 if (*err < 0) {
271 printk(KERN_ERR "VFS: find_free_dqentry(): Can't write quota " 279 q_warn(KERN_ERR "VFS: find_free_dqentry(): Can't write quota "
272 "data block %u.\n", blk); 280 "data block %u.\n", blk);
273 goto out_buf; 281 goto out_buf;
274 } 282 }
@@ -303,7 +311,7 @@ static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
303 } else { 311 } else {
304 ret = read_blk(info, *treeblk, buf); 312 ret = read_blk(info, *treeblk, buf);
305 if (ret < 0) { 313 if (ret < 0) {
306 printk(KERN_ERR "VFS: Can't read tree quota block " 314 q_warn(KERN_ERR "VFS: Can't read tree quota block "
307 "%u.\n", *treeblk); 315 "%u.\n", *treeblk);
308 goto out_buf; 316 goto out_buf;
309 } 317 }
@@ -365,7 +373,7 @@ int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
365 if (!dquot->dq_off) { 373 if (!dquot->dq_off) {
366 ret = dq_insert_tree(info, dquot); 374 ret = dq_insert_tree(info, dquot);
367 if (ret < 0) { 375 if (ret < 0) {
368 printk(KERN_ERR "VFS: Error %zd occurred while " 376 q_warn(KERN_ERR "VFS: Error %zd occurred while "
369 "creating quota.\n", ret); 377 "creating quota.\n", ret);
370 kfree(ddquot); 378 kfree(ddquot);
371 return ret; 379 return ret;
@@ -377,14 +385,14 @@ int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
377 ret = sb->s_op->quota_write(sb, type, ddquot, info->dqi_entry_size, 385 ret = sb->s_op->quota_write(sb, type, ddquot, info->dqi_entry_size,
378 dquot->dq_off); 386 dquot->dq_off);
379 if (ret != info->dqi_entry_size) { 387 if (ret != info->dqi_entry_size) {
380 printk(KERN_WARNING "VFS: dquota write failed on dev %s\n", 388 q_warn(KERN_WARNING "VFS: dquota write failed on dev %s\n",
381 sb->s_id); 389 sb->s_id);
382 if (ret >= 0) 390 if (ret >= 0)
383 ret = -ENOSPC; 391 ret = -ENOSPC;
384 } else { 392 } else {
385 ret = 0; 393 ret = 0;
386 } 394 }
387 dqstats.writes++; 395 dqstats_inc(DQST_WRITES);
388 kfree(ddquot); 396 kfree(ddquot);
389 397
390 return ret; 398 return ret;
@@ -402,14 +410,14 @@ static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot,
402 if (!buf) 410 if (!buf)
403 return -ENOMEM; 411 return -ENOMEM;
404 if (dquot->dq_off >> info->dqi_blocksize_bits != blk) { 412 if (dquot->dq_off >> info->dqi_blocksize_bits != blk) {
405 printk(KERN_ERR "VFS: Quota structure has offset to other " 413 q_warn(KERN_ERR "VFS: Quota structure has offset to other "
406 "block (%u) than it should (%u).\n", blk, 414 "block (%u) than it should (%u).\n", blk,
407 (uint)(dquot->dq_off >> info->dqi_blocksize_bits)); 415 (uint)(dquot->dq_off >> info->dqi_blocksize_bits));
408 goto out_buf; 416 goto out_buf;
409 } 417 }
410 ret = read_blk(info, blk, buf); 418 ret = read_blk(info, blk, buf);
411 if (ret < 0) { 419 if (ret < 0) {
412 printk(KERN_ERR "VFS: Can't read quota data block %u\n", blk); 420 q_warn(KERN_ERR "VFS: Can't read quota data block %u\n", blk);
413 goto out_buf; 421 goto out_buf;
414 } 422 }
415 dh = (struct qt_disk_dqdbheader *)buf; 423 dh = (struct qt_disk_dqdbheader *)buf;
@@ -419,7 +427,7 @@ static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot,
419 if (ret >= 0) 427 if (ret >= 0)
420 ret = put_free_dqblk(info, buf, blk); 428 ret = put_free_dqblk(info, buf, blk);
421 if (ret < 0) { 429 if (ret < 0) {
422 printk(KERN_ERR "VFS: Can't move quota data block (%u) " 430 q_warn(KERN_ERR "VFS: Can't move quota data block (%u) "
423 "to free list.\n", blk); 431 "to free list.\n", blk);
424 goto out_buf; 432 goto out_buf;
425 } 433 }
@@ -432,14 +440,14 @@ static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot,
432 /* Insert will write block itself */ 440 /* Insert will write block itself */
433 ret = insert_free_dqentry(info, buf, blk); 441 ret = insert_free_dqentry(info, buf, blk);
434 if (ret < 0) { 442 if (ret < 0) {
435 printk(KERN_ERR "VFS: Can't insert quota data " 443 q_warn(KERN_ERR "VFS: Can't insert quota data "
436 "block (%u) to free entry list.\n", blk); 444 "block (%u) to free entry list.\n", blk);
437 goto out_buf; 445 goto out_buf;
438 } 446 }
439 } else { 447 } else {
440 ret = write_blk(info, blk, buf); 448 ret = write_blk(info, blk, buf);
441 if (ret < 0) { 449 if (ret < 0) {
442 printk(KERN_ERR "VFS: Can't write quota data " 450 q_warn(KERN_ERR "VFS: Can't write quota data "
443 "block %u\n", blk); 451 "block %u\n", blk);
444 goto out_buf; 452 goto out_buf;
445 } 453 }
@@ -464,7 +472,7 @@ static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
464 return -ENOMEM; 472 return -ENOMEM;
465 ret = read_blk(info, *blk, buf); 473 ret = read_blk(info, *blk, buf);
466 if (ret < 0) { 474 if (ret < 0) {
467 printk(KERN_ERR "VFS: Can't read quota data block %u\n", *blk); 475 q_warn(KERN_ERR "VFS: Can't read quota data block %u\n", *blk);
468 goto out_buf; 476 goto out_buf;
469 } 477 }
470 newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); 478 newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
@@ -488,7 +496,7 @@ static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
488 } else { 496 } else {
489 ret = write_blk(info, *blk, buf); 497 ret = write_blk(info, *blk, buf);
490 if (ret < 0) 498 if (ret < 0)
491 printk(KERN_ERR "VFS: Can't write quota tree " 499 q_warn(KERN_ERR "VFS: Can't write quota tree "
492 "block %u.\n", *blk); 500 "block %u.\n", *blk);
493 } 501 }
494 } 502 }
@@ -521,7 +529,7 @@ static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
521 return -ENOMEM; 529 return -ENOMEM;
522 ret = read_blk(info, blk, buf); 530 ret = read_blk(info, blk, buf);
523 if (ret < 0) { 531 if (ret < 0) {
524 printk(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk); 532 q_warn(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk);
525 goto out_buf; 533 goto out_buf;
526 } 534 }
527 ddquot = buf + sizeof(struct qt_disk_dqdbheader); 535 ddquot = buf + sizeof(struct qt_disk_dqdbheader);
@@ -531,7 +539,7 @@ static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
531 ddquot += info->dqi_entry_size; 539 ddquot += info->dqi_entry_size;
532 } 540 }
533 if (i == qtree_dqstr_in_blk(info)) { 541 if (i == qtree_dqstr_in_blk(info)) {
534 printk(KERN_ERR "VFS: Quota for id %u referenced " 542 q_warn(KERN_ERR "VFS: Quota for id %u referenced "
535 "but not present.\n", dquot->dq_id); 543 "but not present.\n", dquot->dq_id);
536 ret = -EIO; 544 ret = -EIO;
537 goto out_buf; 545 goto out_buf;
@@ -556,7 +564,7 @@ static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info,
556 return -ENOMEM; 564 return -ENOMEM;
557 ret = read_blk(info, blk, buf); 565 ret = read_blk(info, blk, buf);
558 if (ret < 0) { 566 if (ret < 0) {
559 printk(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk); 567 q_warn(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk);
560 goto out_buf; 568 goto out_buf;
561 } 569 }
562 ret = 0; 570 ret = 0;
@@ -599,7 +607,7 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
599 offset = find_dqentry(info, dquot); 607 offset = find_dqentry(info, dquot);
600 if (offset <= 0) { /* Entry not present? */ 608 if (offset <= 0) { /* Entry not present? */
601 if (offset < 0) 609 if (offset < 0)
602 printk(KERN_ERR "VFS: Can't read quota " 610 q_warn(KERN_ERR "VFS: Can't read quota "
603 "structure for id %u.\n", dquot->dq_id); 611 "structure for id %u.\n", dquot->dq_id);
604 dquot->dq_off = 0; 612 dquot->dq_off = 0;
605 set_bit(DQ_FAKE_B, &dquot->dq_flags); 613 set_bit(DQ_FAKE_B, &dquot->dq_flags);
@@ -617,7 +625,7 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
617 if (ret != info->dqi_entry_size) { 625 if (ret != info->dqi_entry_size) {
618 if (ret >= 0) 626 if (ret >= 0)
619 ret = -EIO; 627 ret = -EIO;
620 printk(KERN_ERR "VFS: Error while reading quota " 628 q_warn(KERN_ERR "VFS: Error while reading quota "
621 "structure for id %u.\n", dquot->dq_id); 629 "structure for id %u.\n", dquot->dq_id);
622 set_bit(DQ_FAKE_B, &dquot->dq_flags); 630 set_bit(DQ_FAKE_B, &dquot->dq_flags);
623 memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk)); 631 memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk));
@@ -634,7 +642,7 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
634 spin_unlock(&dq_data_lock); 642 spin_unlock(&dq_data_lock);
635 kfree(ddquot); 643 kfree(ddquot);
636out: 644out:
637 dqstats.reads++; 645 dqstats_inc(DQST_READS);
638 return ret; 646 return ret;
639} 647}
640EXPORT_SYMBOL(qtree_read_dquot); 648EXPORT_SYMBOL(qtree_read_dquot);
diff --git a/fs/quota/quota_tree.h b/fs/quota/quota_tree.h
index a1ab8db81a51..ccc3e71fb1d8 100644
--- a/fs/quota/quota_tree.h
+++ b/fs/quota/quota_tree.h
@@ -22,4 +22,10 @@ struct qt_disk_dqdbheader {
22 22
23#define QT_TREEOFF 1 /* Offset of tree in file in blocks */ 23#define QT_TREEOFF 1 /* Offset of tree in file in blocks */
24 24
25#define q_warn(fmt, args...) \
26do { \
27 if (printk_ratelimit()) \
28 printk(fmt, ## args); \
29} while(0)
30
25#endif /* _LINUX_QUOTAIO_TREE_H */ 31#endif /* _LINUX_QUOTAIO_TREE_H */
diff --git a/fs/quota/quota_v1.c b/fs/quota/quota_v1.c
index 2ae757e9c008..4af344c5852a 100644
--- a/fs/quota/quota_v1.c
+++ b/fs/quota/quota_v1.c
@@ -71,7 +71,7 @@ static int v1_read_dqblk(struct dquot *dquot)
71 dquot->dq_dqb.dqb_ihardlimit == 0 && 71 dquot->dq_dqb.dqb_ihardlimit == 0 &&
72 dquot->dq_dqb.dqb_isoftlimit == 0) 72 dquot->dq_dqb.dqb_isoftlimit == 0)
73 set_bit(DQ_FAKE_B, &dquot->dq_flags); 73 set_bit(DQ_FAKE_B, &dquot->dq_flags);
74 dqstats.reads++; 74 dqstats_inc(DQST_READS);
75 75
76 return 0; 76 return 0;
77} 77}
@@ -104,7 +104,7 @@ static int v1_commit_dqblk(struct dquot *dquot)
104 ret = 0; 104 ret = 0;
105 105
106out: 106out:
107 dqstats.writes++; 107 dqstats_inc(DQST_WRITES);
108 108
109 return ret; 109 return ret;
110} 110}
diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c
index e3da02f4986f..135206af1458 100644
--- a/fs/quota/quota_v2.c
+++ b/fs/quota/quota_v2.c
@@ -63,7 +63,7 @@ static int v2_read_header(struct super_block *sb, int type,
63 size = sb->s_op->quota_read(sb, type, (char *)dqhead, 63 size = sb->s_op->quota_read(sb, type, (char *)dqhead,
64 sizeof(struct v2_disk_dqheader), 0); 64 sizeof(struct v2_disk_dqheader), 0);
65 if (size != sizeof(struct v2_disk_dqheader)) { 65 if (size != sizeof(struct v2_disk_dqheader)) {
66 printk(KERN_WARNING "quota_v2: Failed header read:" 66 q_warn(KERN_WARNING "quota_v2: Failed header read:"
67 " expected=%zd got=%zd\n", 67 " expected=%zd got=%zd\n",
68 sizeof(struct v2_disk_dqheader), size); 68 sizeof(struct v2_disk_dqheader), size);
69 return 0; 69 return 0;
@@ -106,7 +106,7 @@ static int v2_read_file_info(struct super_block *sb, int type)
106 size = sb->s_op->quota_read(sb, type, (char *)&dinfo, 106 size = sb->s_op->quota_read(sb, type, (char *)&dinfo,
107 sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF); 107 sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF);
108 if (size != sizeof(struct v2_disk_dqinfo)) { 108 if (size != sizeof(struct v2_disk_dqinfo)) {
109 printk(KERN_WARNING "quota_v2: Can't read info structure on device %s.\n", 109 q_warn(KERN_WARNING "quota_v2: Can't read info structure on device %s.\n",
110 sb->s_id); 110 sb->s_id);
111 return -1; 111 return -1;
112 } 112 }
@@ -167,7 +167,7 @@ static int v2_write_file_info(struct super_block *sb, int type)
167 size = sb->s_op->quota_write(sb, type, (char *)&dinfo, 167 size = sb->s_op->quota_write(sb, type, (char *)&dinfo,
168 sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF); 168 sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF);
169 if (size != sizeof(struct v2_disk_dqinfo)) { 169 if (size != sizeof(struct v2_disk_dqinfo)) {
170 printk(KERN_WARNING "Can't write info structure on device %s.\n", 170 q_warn(KERN_WARNING "Can't write info structure on device %s.\n",
171 sb->s_id); 171 sb->s_id);
172 return -1; 172 return -1;
173 } 173 }
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index dc2c65e04853..0f22fdaf54ac 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -3076,9 +3076,10 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
3076 ia_valid = attr->ia_valid &= ~(ATTR_KILL_SUID|ATTR_KILL_SGID); 3076 ia_valid = attr->ia_valid &= ~(ATTR_KILL_SUID|ATTR_KILL_SGID);
3077 3077
3078 depth = reiserfs_write_lock_once(inode->i_sb); 3078 depth = reiserfs_write_lock_once(inode->i_sb);
3079 if (attr->ia_valid & ATTR_SIZE) { 3079 if (is_quota_modification(inode, attr))
3080 dquot_initialize(inode); 3080 dquot_initialize(inode);
3081 3081
3082 if (attr->ia_valid & ATTR_SIZE) {
3082 /* version 2 items will be caught by the s_maxbytes check 3083 /* version 2 items will be caught by the s_maxbytes check
3083 ** done for us in vmtruncate 3084 ** done for us in vmtruncate
3084 */ 3085 */
diff --git a/fs/udf/file.c b/fs/udf/file.c
index 4b6a46ccbf46..6ebc043f3a2a 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -227,7 +227,7 @@ int udf_setattr(struct dentry *dentry, struct iattr *iattr)
227 if (error) 227 if (error)
228 return error; 228 return error;
229 229
230 if (iattr->ia_valid & ATTR_SIZE) 230 if (is_quota_modification(inode, iattr))
231 dquot_initialize(inode); 231 dquot_initialize(inode);
232 232
233 if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) || 233 if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) ||
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index 80b68c3702d1..cffa756f1047 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -603,7 +603,7 @@ static void ufs_set_inode_ops(struct inode *inode)
603 if (!inode->i_blocks) 603 if (!inode->i_blocks)
604 inode->i_op = &ufs_fast_symlink_inode_operations; 604 inode->i_op = &ufs_fast_symlink_inode_operations;
605 else { 605 else {
606 inode->i_op = &page_symlink_inode_operations; 606 inode->i_op = &ufs_symlink_inode_operations;
607 inode->i_mapping->a_ops = &ufs_aops; 607 inode->i_mapping->a_ops = &ufs_aops;
608 } 608 }
609 } else 609 } else
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
index 118556243e7a..eabc02eb1294 100644
--- a/fs/ufs/namei.c
+++ b/fs/ufs/namei.c
@@ -148,7 +148,7 @@ static int ufs_symlink (struct inode * dir, struct dentry * dentry,
148 148
149 if (l > UFS_SB(sb)->s_uspi->s_maxsymlinklen) { 149 if (l > UFS_SB(sb)->s_uspi->s_maxsymlinklen) {
150 /* slow symlink */ 150 /* slow symlink */
151 inode->i_op = &page_symlink_inode_operations; 151 inode->i_op = &ufs_symlink_inode_operations;
152 inode->i_mapping->a_ops = &ufs_aops; 152 inode->i_mapping->a_ops = &ufs_aops;
153 err = page_symlink(inode, symname, l); 153 err = page_symlink(inode, symname, l);
154 if (err) 154 if (err)
diff --git a/fs/ufs/symlink.c b/fs/ufs/symlink.c
index c0156eda44bc..d283628b4778 100644
--- a/fs/ufs/symlink.c
+++ b/fs/ufs/symlink.c
@@ -42,4 +42,12 @@ static void *ufs_follow_link(struct dentry *dentry, struct nameidata *nd)
42const struct inode_operations ufs_fast_symlink_inode_operations = { 42const struct inode_operations ufs_fast_symlink_inode_operations = {
43 .readlink = generic_readlink, 43 .readlink = generic_readlink,
44 .follow_link = ufs_follow_link, 44 .follow_link = ufs_follow_link,
45 .setattr = ufs_setattr,
46};
47
48const struct inode_operations ufs_symlink_inode_operations = {
49 .readlink = generic_readlink,
50 .follow_link = page_follow_link_light,
51 .put_link = page_put_link,
52 .setattr = ufs_setattr,
45}; 53};
diff --git a/fs/ufs/truncate.c b/fs/ufs/truncate.c
index d3b6270cb377..f294c44577dc 100644
--- a/fs/ufs/truncate.c
+++ b/fs/ufs/truncate.c
@@ -508,7 +508,7 @@ out:
508 * - there is no way to know old size 508 * - there is no way to know old size
509 * - there is no way inform user about error, if it happens in `truncate' 509 * - there is no way inform user about error, if it happens in `truncate'
510 */ 510 */
511static int ufs_setattr(struct dentry *dentry, struct iattr *attr) 511int ufs_setattr(struct dentry *dentry, struct iattr *attr)
512{ 512{
513 struct inode *inode = dentry->d_inode; 513 struct inode *inode = dentry->d_inode;
514 unsigned int ia_valid = attr->ia_valid; 514 unsigned int ia_valid = attr->ia_valid;
@@ -518,18 +518,18 @@ static int ufs_setattr(struct dentry *dentry, struct iattr *attr)
518 if (error) 518 if (error)
519 return error; 519 return error;
520 520
521 if (is_quota_modification(inode, attr))
522 dquot_initialize(inode);
523
521 if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) || 524 if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
522 (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) { 525 (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
523 error = dquot_transfer(inode, attr); 526 error = dquot_transfer(inode, attr);
524 if (error) 527 if (error)
525 return error; 528 return error;
526 } 529 }
527 if (ia_valid & ATTR_SIZE && 530 if (ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) {
528 attr->ia_size != i_size_read(inode)) {
529 loff_t old_i_size = inode->i_size; 531 loff_t old_i_size = inode->i_size;
530 532
531 dquot_initialize(inode);
532
533 error = vmtruncate(inode, attr->ia_size); 533 error = vmtruncate(inode, attr->ia_size);
534 if (error) 534 if (error)
535 return error; 535 return error;
diff --git a/fs/ufs/ufs.h b/fs/ufs/ufs.h
index 43f9f5d5670e..179ae6b3180a 100644
--- a/fs/ufs/ufs.h
+++ b/fs/ufs/ufs.h
@@ -122,9 +122,11 @@ extern void ufs_panic (struct super_block *, const char *, const char *, ...) __
122 122
123/* symlink.c */ 123/* symlink.c */
124extern const struct inode_operations ufs_fast_symlink_inode_operations; 124extern const struct inode_operations ufs_fast_symlink_inode_operations;
125extern const struct inode_operations ufs_symlink_inode_operations;
125 126
126/* truncate.c */ 127/* truncate.c */
127extern int ufs_truncate (struct inode *, loff_t); 128extern int ufs_truncate (struct inode *, loff_t);
129extern int ufs_setattr(struct dentry *dentry, struct iattr *attr);
128 130
129static inline struct ufs_sb_info *UFS_SB(struct super_block *sb) 131static inline struct ufs_sb_info *UFS_SB(struct super_block *sb)
130{ 132{
diff --git a/fs/xfs/linux-2.6/xfs_quotaops.c b/fs/xfs/linux-2.6/xfs_quotaops.c
index 1947514ce1ad..e31bf21fe5d3 100644
--- a/fs/xfs/linux-2.6/xfs_quotaops.c
+++ b/fs/xfs/linux-2.6/xfs_quotaops.c
@@ -97,7 +97,7 @@ xfs_fs_set_xstate(
97} 97}
98 98
99STATIC int 99STATIC int
100xfs_fs_get_xquota( 100xfs_fs_get_dqblk(
101 struct super_block *sb, 101 struct super_block *sb,
102 int type, 102 int type,
103 qid_t id, 103 qid_t id,
@@ -114,7 +114,7 @@ xfs_fs_get_xquota(
114} 114}
115 115
116STATIC int 116STATIC int
117xfs_fs_set_xquota( 117xfs_fs_set_dqblk(
118 struct super_block *sb, 118 struct super_block *sb,
119 int type, 119 int type,
120 qid_t id, 120 qid_t id,
@@ -135,6 +135,6 @@ xfs_fs_set_xquota(
135const struct quotactl_ops xfs_quotactl_operations = { 135const struct quotactl_ops xfs_quotactl_operations = {
136 .get_xstate = xfs_fs_get_xstate, 136 .get_xstate = xfs_fs_get_xstate,
137 .set_xstate = xfs_fs_set_xstate, 137 .set_xstate = xfs_fs_set_xstate,
138 .get_xquota = xfs_fs_get_xquota, 138 .get_dqblk = xfs_fs_get_dqblk,
139 .set_xquota = xfs_fs_set_xquota, 139 .set_dqblk = xfs_fs_set_dqblk,
140}; 140};
diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c
index 26fa43140f2e..92b002f1805f 100644
--- a/fs/xfs/quota/xfs_qm_syscalls.c
+++ b/fs/xfs/quota/xfs_qm_syscalls.c
@@ -448,6 +448,9 @@ xfs_qm_scall_getqstat(
448 return 0; 448 return 0;
449} 449}
450 450
451#define XFS_DQ_MASK \
452 (FS_DQ_LIMIT_MASK | FS_DQ_TIMER_MASK | FS_DQ_WARNS_MASK)
453
451/* 454/*
452 * Adjust quota limits, and start/stop timers accordingly. 455 * Adjust quota limits, and start/stop timers accordingly.
453 */ 456 */
@@ -465,9 +468,10 @@ xfs_qm_scall_setqlim(
465 int error; 468 int error;
466 xfs_qcnt_t hard, soft; 469 xfs_qcnt_t hard, soft;
467 470
468 if ((newlim->d_fieldmask & 471 if (newlim->d_fieldmask & ~XFS_DQ_MASK)
469 (FS_DQ_LIMIT_MASK|FS_DQ_TIMER_MASK|FS_DQ_WARNS_MASK)) == 0) 472 return EINVAL;
470 return (0); 473 if ((newlim->d_fieldmask & XFS_DQ_MASK) == 0)
474 return 0;
471 475
472 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM); 476 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM);
473 if ((error = xfs_trans_reserve(tp, 0, sizeof(xfs_disk_dquot_t) + 128, 477 if ((error = xfs_trans_reserve(tp, 0, sizeof(xfs_disk_dquot_t) + 128,
diff --git a/include/linux/dqblk_xfs.h b/include/linux/dqblk_xfs.h
index 527504c11c5e..4389ae72024e 100644
--- a/include/linux/dqblk_xfs.h
+++ b/include/linux/dqblk_xfs.h
@@ -110,6 +110,15 @@ typedef struct fs_disk_quota {
110#define FS_DQ_WARNS_MASK (FS_DQ_BWARNS | FS_DQ_IWARNS | FS_DQ_RTBWARNS) 110#define FS_DQ_WARNS_MASK (FS_DQ_BWARNS | FS_DQ_IWARNS | FS_DQ_RTBWARNS)
111 111
112/* 112/*
113 * Accounting values. These can only be set for filesystem with
114 * non-transactional quotas that require quotacheck(8) in userspace.
115 */
116#define FS_DQ_BCOUNT (1<<12)
117#define FS_DQ_ICOUNT (1<<13)
118#define FS_DQ_RTBCOUNT (1<<14)
119#define FS_DQ_ACCT_MASK (FS_DQ_BCOUNT | FS_DQ_ICOUNT | FS_DQ_RTBCOUNT)
120
121/*
113 * Various flags related to quotactl(2). Only relevant to XFS filesystems. 122 * Various flags related to quotactl(2). Only relevant to XFS filesystems.
114 */ 123 */
115#define XFS_QUOTA_UDQ_ACCT (1<<0) /* user quota accounting */ 124#define XFS_QUOTA_UDQ_ACCT (1<<0) /* user quota accounting */
diff --git a/include/linux/ext2_fs_sb.h b/include/linux/ext2_fs_sb.h
index 1cdb66367c98..db4d9f586bb6 100644
--- a/include/linux/ext2_fs_sb.h
+++ b/include/linux/ext2_fs_sb.h
@@ -106,6 +106,15 @@ struct ext2_sb_info {
106 spinlock_t s_rsv_window_lock; 106 spinlock_t s_rsv_window_lock;
107 struct rb_root s_rsv_window_root; 107 struct rb_root s_rsv_window_root;
108 struct ext2_reserve_window_node s_rsv_window_head; 108 struct ext2_reserve_window_node s_rsv_window_head;
109 /*
110 * s_lock protects against concurrent modifications of s_mount_state,
111 * s_blocks_last, s_overhead_last and the content of superblock's
112 * buffer pointed to by sbi->s_es.
113 *
114 * Note: It is used in ext2_show_options() to provide a consistent view
115 * of the mount options.
116 */
117 spinlock_t s_lock;
109}; 118};
110 119
111static inline spinlock_t * 120static inline spinlock_t *
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index 516a2a27e87a..e06965081ba5 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -427,9 +427,9 @@ struct transaction_s
427 enum { 427 enum {
428 T_RUNNING, 428 T_RUNNING,
429 T_LOCKED, 429 T_LOCKED,
430 T_RUNDOWN,
431 T_FLUSH, 430 T_FLUSH,
432 T_COMMIT, 431 T_COMMIT,
432 T_COMMIT_RECORD,
433 T_FINISHED 433 T_FINISHED
434 } t_state; 434 } t_state;
435 435
@@ -991,6 +991,7 @@ int journal_start_commit(journal_t *journal, tid_t *tid);
991int journal_force_commit_nested(journal_t *journal); 991int journal_force_commit_nested(journal_t *journal);
992int log_wait_commit(journal_t *journal, tid_t tid); 992int log_wait_commit(journal_t *journal, tid_t tid);
993int log_do_checkpoint(journal_t *journal); 993int log_do_checkpoint(journal_t *journal);
994int journal_trans_will_send_data_barrier(journal_t *journal, tid_t tid);
994 995
995void __log_wait_for_space(journal_t *journal); 996void __log_wait_for_space(journal_t *journal);
996extern void __journal_drop_transaction(journal_t *, transaction_t *); 997extern void __journal_drop_transaction(journal_t *, transaction_t *);
diff --git a/include/linux/quota.h b/include/linux/quota.h
index b462916b2a0a..7126a15467f1 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -174,6 +174,8 @@ enum {
174#include <linux/rwsem.h> 174#include <linux/rwsem.h>
175#include <linux/spinlock.h> 175#include <linux/spinlock.h>
176#include <linux/wait.h> 176#include <linux/wait.h>
177#include <linux/percpu.h>
178#include <linux/smp.h>
177 179
178#include <linux/dqblk_xfs.h> 180#include <linux/dqblk_xfs.h>
179#include <linux/dqblk_v1.h> 181#include <linux/dqblk_v1.h>
@@ -238,19 +240,43 @@ static inline int info_dirty(struct mem_dqinfo *info)
238 return test_bit(DQF_INFO_DIRTY_B, &info->dqi_flags); 240 return test_bit(DQF_INFO_DIRTY_B, &info->dqi_flags);
239} 241}
240 242
243enum {
244 DQST_LOOKUPS,
245 DQST_DROPS,
246 DQST_READS,
247 DQST_WRITES,
248 DQST_CACHE_HITS,
249 DQST_ALLOC_DQUOTS,
250 DQST_FREE_DQUOTS,
251 DQST_SYNCS,
252 _DQST_DQSTAT_LAST
253};
254
241struct dqstats { 255struct dqstats {
242 int lookups; 256 int stat[_DQST_DQSTAT_LAST];
243 int drops;
244 int reads;
245 int writes;
246 int cache_hits;
247 int allocated_dquots;
248 int free_dquots;
249 int syncs;
250}; 257};
251 258
259extern struct dqstats *dqstats_pcpu;
252extern struct dqstats dqstats; 260extern struct dqstats dqstats;
253 261
262static inline void dqstats_inc(unsigned int type)
263{
264#ifdef CONFIG_SMP
265 per_cpu_ptr(dqstats_pcpu, smp_processor_id())->stat[type]++;
266#else
267 dqstats.stat[type]++;
268#endif
269}
270
271static inline void dqstats_dec(unsigned int type)
272{
273#ifdef CONFIG_SMP
274 per_cpu_ptr(dqstats_pcpu, smp_processor_id())->stat[type]--;
275#else
276 dqstats.stat[type]--;
277#endif
278}
279
254#define DQ_MOD_B 0 /* dquot modified since read */ 280#define DQ_MOD_B 0 /* dquot modified since read */
255#define DQ_BLKS_B 1 /* uid/gid has been warned about blk limit */ 281#define DQ_BLKS_B 1 /* uid/gid has been warned about blk limit */
256#define DQ_INODES_B 2 /* uid/gid has been warned about inode limit */ 282#define DQ_INODES_B 2 /* uid/gid has been warned about inode limit */
@@ -311,12 +337,10 @@ struct quotactl_ops {
311 int (*quota_sync)(struct super_block *, int, int); 337 int (*quota_sync)(struct super_block *, int, int);
312 int (*get_info)(struct super_block *, int, struct if_dqinfo *); 338 int (*get_info)(struct super_block *, int, struct if_dqinfo *);
313 int (*set_info)(struct super_block *, int, struct if_dqinfo *); 339 int (*set_info)(struct super_block *, int, struct if_dqinfo *);
314 int (*get_dqblk)(struct super_block *, int, qid_t, struct if_dqblk *); 340 int (*get_dqblk)(struct super_block *, int, qid_t, struct fs_disk_quota *);
315 int (*set_dqblk)(struct super_block *, int, qid_t, struct if_dqblk *); 341 int (*set_dqblk)(struct super_block *, int, qid_t, struct fs_disk_quota *);
316 int (*get_xstate)(struct super_block *, struct fs_quota_stat *); 342 int (*get_xstate)(struct super_block *, struct fs_quota_stat *);
317 int (*set_xstate)(struct super_block *, unsigned int, int); 343 int (*set_xstate)(struct super_block *, unsigned int, int);
318 int (*get_xquota)(struct super_block *, int, qid_t, struct fs_disk_quota *);
319 int (*set_xquota)(struct super_block *, int, qid_t, struct fs_disk_quota *);
320}; 344};
321 345
322struct quota_format_type { 346struct quota_format_type {
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index e6fa7acce290..370abb1e99cb 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -14,6 +14,14 @@ static inline struct quota_info *sb_dqopt(struct super_block *sb)
14 return &sb->s_dquot; 14 return &sb->s_dquot;
15} 15}
16 16
17/* i_mutex must being held */
18static inline bool is_quota_modification(struct inode *inode, struct iattr *ia)
19{
20 return (ia->ia_valid & ATTR_SIZE && ia->ia_size != inode->i_size) ||
21 (ia->ia_valid & ATTR_UID && ia->ia_uid != inode->i_uid) ||
22 (ia->ia_valid & ATTR_GID && ia->ia_gid != inode->i_gid);
23}
24
17#if defined(CONFIG_QUOTA) 25#if defined(CONFIG_QUOTA)
18 26
19/* 27/*
@@ -63,9 +71,12 @@ int vfs_quota_disable(struct super_block *sb, int type, unsigned int flags);
63int vfs_quota_sync(struct super_block *sb, int type, int wait); 71int vfs_quota_sync(struct super_block *sb, int type, int wait);
64int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii); 72int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii);
65int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii); 73int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii);
66int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di); 74int vfs_get_dqblk(struct super_block *sb, int type, qid_t id,
67int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di); 75 struct fs_disk_quota *di);
76int vfs_set_dqblk(struct super_block *sb, int type, qid_t id,
77 struct fs_disk_quota *di);
68 78
79int __dquot_transfer(struct inode *inode, struct dquot **transfer_to);
69int dquot_transfer(struct inode *inode, struct iattr *iattr); 80int dquot_transfer(struct inode *inode, struct iattr *iattr);
70int vfs_dq_quota_on_remount(struct super_block *sb); 81int vfs_dq_quota_on_remount(struct super_block *sb);
71 82