aboutsummaryrefslogtreecommitdiffstats
path: root/fs/quota/dquot.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/quota/dquot.c')
-rw-r--r--fs/quota/dquot.c415
1 files changed, 224 insertions, 191 deletions
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index dea86abdf2e7..e0b870f4749f 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -100,9 +100,13 @@
100 * 100 *
101 * Any operation working on dquots via inode pointers must hold dqptr_sem. If 101 * Any operation working on dquots via inode pointers must hold dqptr_sem. If
102 * operation is just reading pointers from inode (or not using them at all) the 102 * operation is just reading pointers from inode (or not using them at all) the
103 * read lock is enough. If pointers are altered function must hold write lock 103 * read lock is enough. If pointers are altered function must hold write lock.
104 * (these locking rules also apply for S_NOQUOTA flag in the inode - note that 104 * Special care needs to be taken about S_NOQUOTA inode flag (marking that
105 * for altering the flag i_mutex is also needed). 105 * inode is a quota file). Functions adding pointers from inode to dquots have
106 * to check this flag under dqptr_sem and then (if S_NOQUOTA is not set) they
107 * have to do all pointer modifications before dropping dqptr_sem. This makes
108 * sure they cannot race with quotaon which first sets S_NOQUOTA flag and
109 * then drops all pointers to dquots from an inode.
106 * 110 *
107 * Each dquot has its dq_lock mutex. Locked dquots might not be referenced 111 * Each dquot has its dq_lock mutex. Locked dquots might not be referenced
108 * from inodes (dquot_alloc_space() and such don't check the dq_lock). 112 * from inodes (dquot_alloc_space() and such don't check the dq_lock).
@@ -225,6 +229,9 @@ static struct hlist_head *dquot_hash;
225struct dqstats dqstats; 229struct dqstats dqstats;
226EXPORT_SYMBOL(dqstats); 230EXPORT_SYMBOL(dqstats);
227 231
232static qsize_t inode_get_rsv_space(struct inode *inode);
233static void __dquot_initialize(struct inode *inode, int type);
234
228static inline unsigned int 235static inline unsigned int
229hashfn(const struct super_block *sb, unsigned int id, int type) 236hashfn(const struct super_block *sb, unsigned int id, int type)
230{ 237{
@@ -564,7 +571,7 @@ out:
564} 571}
565EXPORT_SYMBOL(dquot_scan_active); 572EXPORT_SYMBOL(dquot_scan_active);
566 573
567int vfs_quota_sync(struct super_block *sb, int type) 574int vfs_quota_sync(struct super_block *sb, int type, int wait)
568{ 575{
569 struct list_head *dirty; 576 struct list_head *dirty;
570 struct dquot *dquot; 577 struct dquot *dquot;
@@ -609,6 +616,33 @@ int vfs_quota_sync(struct super_block *sb, int type)
609 spin_unlock(&dq_list_lock); 616 spin_unlock(&dq_list_lock);
610 mutex_unlock(&dqopt->dqonoff_mutex); 617 mutex_unlock(&dqopt->dqonoff_mutex);
611 618
619 if (!wait || (sb_dqopt(sb)->flags & DQUOT_QUOTA_SYS_FILE))
620 return 0;
621
622 /* This is not very clever (and fast) but currently I don't know about
623 * any other simple way of getting quota data to disk and we must get
624 * them there for userspace to be visible... */
625 if (sb->s_op->sync_fs)
626 sb->s_op->sync_fs(sb, 1);
627 sync_blockdev(sb->s_bdev);
628
629 /*
630 * Now when everything is written we can discard the pagecache so
631 * that userspace sees the changes.
632 */
633 mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
634 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
635 if (type != -1 && cnt != type)
636 continue;
637 if (!sb_has_quota_active(sb, cnt))
638 continue;
639 mutex_lock_nested(&sb_dqopt(sb)->files[cnt]->i_mutex,
640 I_MUTEX_QUOTA);
641 truncate_inode_pages(&sb_dqopt(sb)->files[cnt]->i_data, 0);
642 mutex_unlock(&sb_dqopt(sb)->files[cnt]->i_mutex);
643 }
644 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
645
612 return 0; 646 return 0;
613} 647}
614EXPORT_SYMBOL(vfs_quota_sync); 648EXPORT_SYMBOL(vfs_quota_sync);
@@ -840,11 +874,14 @@ static int dqinit_needed(struct inode *inode, int type)
840static void add_dquot_ref(struct super_block *sb, int type) 874static void add_dquot_ref(struct super_block *sb, int type)
841{ 875{
842 struct inode *inode, *old_inode = NULL; 876 struct inode *inode, *old_inode = NULL;
877 int reserved = 0;
843 878
844 spin_lock(&inode_lock); 879 spin_lock(&inode_lock);
845 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { 880 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
846 if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW)) 881 if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW))
847 continue; 882 continue;
883 if (unlikely(inode_get_rsv_space(inode) > 0))
884 reserved = 1;
848 if (!atomic_read(&inode->i_writecount)) 885 if (!atomic_read(&inode->i_writecount))
849 continue; 886 continue;
850 if (!dqinit_needed(inode, type)) 887 if (!dqinit_needed(inode, type))
@@ -854,7 +891,7 @@ static void add_dquot_ref(struct super_block *sb, int type)
854 spin_unlock(&inode_lock); 891 spin_unlock(&inode_lock);
855 892
856 iput(old_inode); 893 iput(old_inode);
857 sb->dq_op->initialize(inode, type); 894 __dquot_initialize(inode, type);
858 /* We hold a reference to 'inode' so it couldn't have been 895 /* We hold a reference to 'inode' so it couldn't have been
859 * removed from s_inodes list while we dropped the inode_lock. 896 * removed from s_inodes list while we dropped the inode_lock.
860 * We cannot iput the inode now as we can be holding the last 897 * We cannot iput the inode now as we can be holding the last
@@ -865,6 +902,12 @@ static void add_dquot_ref(struct super_block *sb, int type)
865 } 902 }
866 spin_unlock(&inode_lock); 903 spin_unlock(&inode_lock);
867 iput(old_inode); 904 iput(old_inode);
905
906 if (reserved) {
907 printk(KERN_WARNING "VFS (%s): Writes happened before quota"
908 " was turned on thus quota information is probably "
909 "inconsistent. Please run quotacheck(8).\n", sb->s_id);
910 }
868} 911}
869 912
870/* 913/*
@@ -978,10 +1021,12 @@ static inline void dquot_resv_space(struct dquot *dquot, qsize_t number)
978/* 1021/*
979 * Claim reserved quota space 1022 * Claim reserved quota space
980 */ 1023 */
981static void dquot_claim_reserved_space(struct dquot *dquot, 1024static void dquot_claim_reserved_space(struct dquot *dquot, qsize_t number)
982 qsize_t number)
983{ 1025{
984 WARN_ON(dquot->dq_dqb.dqb_rsvspace < number); 1026 if (dquot->dq_dqb.dqb_rsvspace < number) {
1027 WARN_ON_ONCE(1);
1028 number = dquot->dq_dqb.dqb_rsvspace;
1029 }
985 dquot->dq_dqb.dqb_curspace += number; 1030 dquot->dq_dqb.dqb_curspace += number;
986 dquot->dq_dqb.dqb_rsvspace -= number; 1031 dquot->dq_dqb.dqb_rsvspace -= number;
987} 1032}
@@ -989,7 +1034,12 @@ static void dquot_claim_reserved_space(struct dquot *dquot,
989static inline 1034static inline
990void dquot_free_reserved_space(struct dquot *dquot, qsize_t number) 1035void dquot_free_reserved_space(struct dquot *dquot, qsize_t number)
991{ 1036{
992 dquot->dq_dqb.dqb_rsvspace -= number; 1037 if (dquot->dq_dqb.dqb_rsvspace >= number)
1038 dquot->dq_dqb.dqb_rsvspace -= number;
1039 else {
1040 WARN_ON_ONCE(1);
1041 dquot->dq_dqb.dqb_rsvspace = 0;
1042 }
993} 1043}
994 1044
995static void dquot_decr_inodes(struct dquot *dquot, qsize_t number) 1045static void dquot_decr_inodes(struct dquot *dquot, qsize_t number)
@@ -1131,13 +1181,13 @@ static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype)
1131 *warntype = QUOTA_NL_NOWARN; 1181 *warntype = QUOTA_NL_NOWARN;
1132 if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) || 1182 if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) ||
1133 test_bit(DQ_FAKE_B, &dquot->dq_flags)) 1183 test_bit(DQ_FAKE_B, &dquot->dq_flags))
1134 return QUOTA_OK; 1184 return 0;
1135 1185
1136 if (dquot->dq_dqb.dqb_ihardlimit && 1186 if (dquot->dq_dqb.dqb_ihardlimit &&
1137 newinodes > dquot->dq_dqb.dqb_ihardlimit && 1187 newinodes > dquot->dq_dqb.dqb_ihardlimit &&
1138 !ignore_hardlimit(dquot)) { 1188 !ignore_hardlimit(dquot)) {
1139 *warntype = QUOTA_NL_IHARDWARN; 1189 *warntype = QUOTA_NL_IHARDWARN;
1140 return NO_QUOTA; 1190 return -EDQUOT;
1141 } 1191 }
1142 1192
1143 if (dquot->dq_dqb.dqb_isoftlimit && 1193 if (dquot->dq_dqb.dqb_isoftlimit &&
@@ -1146,7 +1196,7 @@ static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype)
1146 get_seconds() >= dquot->dq_dqb.dqb_itime && 1196 get_seconds() >= dquot->dq_dqb.dqb_itime &&
1147 !ignore_hardlimit(dquot)) { 1197 !ignore_hardlimit(dquot)) {
1148 *warntype = QUOTA_NL_ISOFTLONGWARN; 1198 *warntype = QUOTA_NL_ISOFTLONGWARN;
1149 return NO_QUOTA; 1199 return -EDQUOT;
1150 } 1200 }
1151 1201
1152 if (dquot->dq_dqb.dqb_isoftlimit && 1202 if (dquot->dq_dqb.dqb_isoftlimit &&
@@ -1157,7 +1207,7 @@ static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype)
1157 sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace; 1207 sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace;
1158 } 1208 }
1159 1209
1160 return QUOTA_OK; 1210 return 0;
1161} 1211}
1162 1212
1163/* needs dq_data_lock */ 1213/* needs dq_data_lock */
@@ -1169,7 +1219,7 @@ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *war
1169 *warntype = QUOTA_NL_NOWARN; 1219 *warntype = QUOTA_NL_NOWARN;
1170 if (!sb_has_quota_limits_enabled(sb, dquot->dq_type) || 1220 if (!sb_has_quota_limits_enabled(sb, dquot->dq_type) ||
1171 test_bit(DQ_FAKE_B, &dquot->dq_flags)) 1221 test_bit(DQ_FAKE_B, &dquot->dq_flags))
1172 return QUOTA_OK; 1222 return 0;
1173 1223
1174 tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace 1224 tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace
1175 + space; 1225 + space;
@@ -1179,7 +1229,7 @@ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *war
1179 !ignore_hardlimit(dquot)) { 1229 !ignore_hardlimit(dquot)) {
1180 if (!prealloc) 1230 if (!prealloc)
1181 *warntype = QUOTA_NL_BHARDWARN; 1231 *warntype = QUOTA_NL_BHARDWARN;
1182 return NO_QUOTA; 1232 return -EDQUOT;
1183 } 1233 }
1184 1234
1185 if (dquot->dq_dqb.dqb_bsoftlimit && 1235 if (dquot->dq_dqb.dqb_bsoftlimit &&
@@ -1189,7 +1239,7 @@ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *war
1189 !ignore_hardlimit(dquot)) { 1239 !ignore_hardlimit(dquot)) {
1190 if (!prealloc) 1240 if (!prealloc)
1191 *warntype = QUOTA_NL_BSOFTLONGWARN; 1241 *warntype = QUOTA_NL_BSOFTLONGWARN;
1192 return NO_QUOTA; 1242 return -EDQUOT;
1193 } 1243 }
1194 1244
1195 if (dquot->dq_dqb.dqb_bsoftlimit && 1245 if (dquot->dq_dqb.dqb_bsoftlimit &&
@@ -1205,10 +1255,10 @@ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *war
1205 * We don't allow preallocation to exceed softlimit so exceeding will 1255 * We don't allow preallocation to exceed softlimit so exceeding will
1206 * be always printed 1256 * be always printed
1207 */ 1257 */
1208 return NO_QUOTA; 1258 return -EDQUOT;
1209 } 1259 }
1210 1260
1211 return QUOTA_OK; 1261 return 0;
1212} 1262}
1213 1263
1214static int info_idq_free(struct dquot *dquot, qsize_t inodes) 1264static int info_idq_free(struct dquot *dquot, qsize_t inodes)
@@ -1242,25 +1292,32 @@ static int info_bdq_free(struct dquot *dquot, qsize_t space)
1242 return QUOTA_NL_BHARDBELOW; 1292 return QUOTA_NL_BHARDBELOW;
1243 return QUOTA_NL_NOWARN; 1293 return QUOTA_NL_NOWARN;
1244} 1294}
1295
1245/* 1296/*
1246 * Initialize quota pointers in inode 1297 * Initialize quota pointers in inode
1247 * We do things in a bit complicated way but by that we avoid calling 1298 *
1248 * dqget() and thus filesystem callbacks under dqptr_sem. 1299 * We do things in a bit complicated way but by that we avoid calling
1300 * dqget() and thus filesystem callbacks under dqptr_sem.
1301 *
1302 * It is better to call this function outside of any transaction as it
1303 * might need a lot of space in journal for dquot structure allocation.
1249 */ 1304 */
1250int dquot_initialize(struct inode *inode, int type) 1305static void __dquot_initialize(struct inode *inode, int type)
1251{ 1306{
1252 unsigned int id = 0; 1307 unsigned int id = 0;
1253 int cnt, ret = 0; 1308 int cnt;
1254 struct dquot *got[MAXQUOTAS] = { NULL, NULL }; 1309 struct dquot *got[MAXQUOTAS];
1255 struct super_block *sb = inode->i_sb; 1310 struct super_block *sb = inode->i_sb;
1311 qsize_t rsv;
1256 1312
1257 /* First test before acquiring mutex - solves deadlocks when we 1313 /* First test before acquiring mutex - solves deadlocks when we
1258 * re-enter the quota code and are already holding the mutex */ 1314 * re-enter the quota code and are already holding the mutex */
1259 if (IS_NOQUOTA(inode)) 1315 if (!sb_any_quota_active(inode->i_sb) || IS_NOQUOTA(inode))
1260 return 0; 1316 return;
1261 1317
1262 /* First get references to structures we might need. */ 1318 /* First get references to structures we might need. */
1263 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1319 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1320 got[cnt] = NULL;
1264 if (type != -1 && cnt != type) 1321 if (type != -1 && cnt != type)
1265 continue; 1322 continue;
1266 switch (cnt) { 1323 switch (cnt) {
@@ -1275,7 +1332,6 @@ int dquot_initialize(struct inode *inode, int type)
1275 } 1332 }
1276 1333
1277 down_write(&sb_dqopt(sb)->dqptr_sem); 1334 down_write(&sb_dqopt(sb)->dqptr_sem);
1278 /* Having dqptr_sem we know NOQUOTA flags can't be altered... */
1279 if (IS_NOQUOTA(inode)) 1335 if (IS_NOQUOTA(inode))
1280 goto out_err; 1336 goto out_err;
1281 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1337 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
@@ -1287,20 +1343,31 @@ int dquot_initialize(struct inode *inode, int type)
1287 if (!inode->i_dquot[cnt]) { 1343 if (!inode->i_dquot[cnt]) {
1288 inode->i_dquot[cnt] = got[cnt]; 1344 inode->i_dquot[cnt] = got[cnt];
1289 got[cnt] = NULL; 1345 got[cnt] = NULL;
1346 /*
1347 * Make quota reservation system happy if someone
1348 * did a write before quota was turned on
1349 */
1350 rsv = inode_get_rsv_space(inode);
1351 if (unlikely(rsv))
1352 dquot_resv_space(inode->i_dquot[cnt], rsv);
1290 } 1353 }
1291 } 1354 }
1292out_err: 1355out_err:
1293 up_write(&sb_dqopt(sb)->dqptr_sem); 1356 up_write(&sb_dqopt(sb)->dqptr_sem);
1294 /* Drop unused references */ 1357 /* Drop unused references */
1295 dqput_all(got); 1358 dqput_all(got);
1296 return ret; 1359}
1360
1361void dquot_initialize(struct inode *inode)
1362{
1363 __dquot_initialize(inode, -1);
1297} 1364}
1298EXPORT_SYMBOL(dquot_initialize); 1365EXPORT_SYMBOL(dquot_initialize);
1299 1366
1300/* 1367/*
1301 * Release all quotas referenced by inode 1368 * Release all quotas referenced by inode
1302 */ 1369 */
1303int dquot_drop(struct inode *inode) 1370static void __dquot_drop(struct inode *inode)
1304{ 1371{
1305 int cnt; 1372 int cnt;
1306 struct dquot *put[MAXQUOTAS]; 1373 struct dquot *put[MAXQUOTAS];
@@ -1312,32 +1379,31 @@ int dquot_drop(struct inode *inode)
1312 } 1379 }
1313 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); 1380 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1314 dqput_all(put); 1381 dqput_all(put);
1315 return 0;
1316} 1382}
1317EXPORT_SYMBOL(dquot_drop);
1318 1383
1319/* Wrapper to remove references to quota structures from inode */ 1384void dquot_drop(struct inode *inode)
1320void vfs_dq_drop(struct inode *inode) 1385{
1321{ 1386 int cnt;
1322 /* Here we can get arbitrary inode from clear_inode() so we have 1387
1323 * to be careful. OTOH we don't need locking as quota operations 1388 if (IS_NOQUOTA(inode))
1324 * are allowed to change only at mount time */ 1389 return;
1325 if (!IS_NOQUOTA(inode) && inode->i_sb && inode->i_sb->dq_op 1390
1326 && inode->i_sb->dq_op->drop) { 1391 /*
1327 int cnt; 1392 * Test before calling to rule out calls from proc and such
1328 /* Test before calling to rule out calls from proc and such 1393 * where we are not allowed to block. Note that this is
1329 * where we are not allowed to block. Note that this is 1394 * actually reliable test even without the lock - the caller
1330 * actually reliable test even without the lock - the caller 1395 * must assure that nobody can come after the DQUOT_DROP and
1331 * must assure that nobody can come after the DQUOT_DROP and 1396 * add quota pointers back anyway.
1332 * add quota pointers back anyway */ 1397 */
1333 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 1398 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1334 if (inode->i_dquot[cnt]) 1399 if (inode->i_dquot[cnt])
1335 break; 1400 break;
1336 if (cnt < MAXQUOTAS) 1401 }
1337 inode->i_sb->dq_op->drop(inode); 1402
1338 } 1403 if (cnt < MAXQUOTAS)
1339} 1404 __dquot_drop(inode);
1340EXPORT_SYMBOL(vfs_dq_drop); 1405}
1406EXPORT_SYMBOL(dquot_drop);
1341 1407
1342/* 1408/*
1343 * inode_reserved_space is managed internally by quota, and protected by 1409 * inode_reserved_space is managed internally by quota, and protected by
@@ -1351,32 +1417,37 @@ static qsize_t *inode_reserved_space(struct inode * inode)
1351 return inode->i_sb->dq_op->get_reserved_space(inode); 1417 return inode->i_sb->dq_op->get_reserved_space(inode);
1352} 1418}
1353 1419
1354static void inode_add_rsv_space(struct inode *inode, qsize_t number) 1420void inode_add_rsv_space(struct inode *inode, qsize_t number)
1355{ 1421{
1356 spin_lock(&inode->i_lock); 1422 spin_lock(&inode->i_lock);
1357 *inode_reserved_space(inode) += number; 1423 *inode_reserved_space(inode) += number;
1358 spin_unlock(&inode->i_lock); 1424 spin_unlock(&inode->i_lock);
1359} 1425}
1426EXPORT_SYMBOL(inode_add_rsv_space);
1360 1427
1361 1428void inode_claim_rsv_space(struct inode *inode, qsize_t number)
1362static void inode_claim_rsv_space(struct inode *inode, qsize_t number)
1363{ 1429{
1364 spin_lock(&inode->i_lock); 1430 spin_lock(&inode->i_lock);
1365 *inode_reserved_space(inode) -= number; 1431 *inode_reserved_space(inode) -= number;
1366 __inode_add_bytes(inode, number); 1432 __inode_add_bytes(inode, number);
1367 spin_unlock(&inode->i_lock); 1433 spin_unlock(&inode->i_lock);
1368} 1434}
1435EXPORT_SYMBOL(inode_claim_rsv_space);
1369 1436
1370static void inode_sub_rsv_space(struct inode *inode, qsize_t number) 1437void inode_sub_rsv_space(struct inode *inode, qsize_t number)
1371{ 1438{
1372 spin_lock(&inode->i_lock); 1439 spin_lock(&inode->i_lock);
1373 *inode_reserved_space(inode) -= number; 1440 *inode_reserved_space(inode) -= number;
1374 spin_unlock(&inode->i_lock); 1441 spin_unlock(&inode->i_lock);
1375} 1442}
1443EXPORT_SYMBOL(inode_sub_rsv_space);
1376 1444
1377static qsize_t inode_get_rsv_space(struct inode *inode) 1445static qsize_t inode_get_rsv_space(struct inode *inode)
1378{ 1446{
1379 qsize_t ret; 1447 qsize_t ret;
1448
1449 if (!inode->i_sb->dq_op->get_reserved_space)
1450 return 0;
1380 spin_lock(&inode->i_lock); 1451 spin_lock(&inode->i_lock);
1381 ret = *inode_reserved_space(inode); 1452 ret = *inode_reserved_space(inode);
1382 spin_unlock(&inode->i_lock); 1453 spin_unlock(&inode->i_lock);
@@ -1401,38 +1472,34 @@ static void inode_decr_space(struct inode *inode, qsize_t number, int reserve)
1401} 1472}
1402 1473
1403/* 1474/*
1404 * Following four functions update i_blocks+i_bytes fields and 1475 * This functions updates i_blocks+i_bytes fields and quota information
1405 * quota information (together with appropriate checks) 1476 * (together with appropriate checks).
1406 * NOTE: We absolutely rely on the fact that caller dirties 1477 *
1407 * the inode (usually macros in quotaops.h care about this) and 1478 * NOTE: We absolutely rely on the fact that caller dirties the inode
1408 * holds a handle for the current transaction so that dquot write and 1479 * (usually helpers in quotaops.h care about this) and holds a handle for
1409 * inode write go into the same transaction. 1480 * the current transaction so that dquot write and inode write go into the
1481 * same transaction.
1410 */ 1482 */
1411 1483
1412/* 1484/*
1413 * This operation can block, but only after everything is updated 1485 * This operation can block, but only after everything is updated
1414 */ 1486 */
1415int __dquot_alloc_space(struct inode *inode, qsize_t number, 1487int __dquot_alloc_space(struct inode *inode, qsize_t number,
1416 int warn, int reserve) 1488 int warn, int reserve)
1417{ 1489{
1418 int cnt, ret = QUOTA_OK; 1490 int cnt, ret = 0;
1419 char warntype[MAXQUOTAS]; 1491 char warntype[MAXQUOTAS];
1420 1492
1421 /* 1493 /*
1422 * First test before acquiring mutex - solves deadlocks when we 1494 * First test before acquiring mutex - solves deadlocks when we
1423 * re-enter the quota code and are already holding the mutex 1495 * re-enter the quota code and are already holding the mutex
1424 */ 1496 */
1425 if (IS_NOQUOTA(inode)) { 1497 if (!sb_any_quota_active(inode->i_sb) || IS_NOQUOTA(inode)) {
1426 inode_incr_space(inode, number, reserve); 1498 inode_incr_space(inode, number, reserve);
1427 goto out; 1499 goto out;
1428 } 1500 }
1429 1501
1430 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1502 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1431 if (IS_NOQUOTA(inode)) {
1432 inode_incr_space(inode, number, reserve);
1433 goto out_unlock;
1434 }
1435
1436 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 1503 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1437 warntype[cnt] = QUOTA_NL_NOWARN; 1504 warntype[cnt] = QUOTA_NL_NOWARN;
1438 1505
@@ -1440,9 +1507,9 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number,
1440 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1507 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1441 if (!inode->i_dquot[cnt]) 1508 if (!inode->i_dquot[cnt])
1442 continue; 1509 continue;
1443 if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt) 1510 ret = check_bdq(inode->i_dquot[cnt], number, !warn,
1444 == NO_QUOTA) { 1511 warntype+cnt);
1445 ret = NO_QUOTA; 1512 if (ret) {
1446 spin_unlock(&dq_data_lock); 1513 spin_unlock(&dq_data_lock);
1447 goto out_flush_warn; 1514 goto out_flush_warn;
1448 } 1515 }
@@ -1463,61 +1530,45 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number,
1463 mark_all_dquot_dirty(inode->i_dquot); 1530 mark_all_dquot_dirty(inode->i_dquot);
1464out_flush_warn: 1531out_flush_warn:
1465 flush_warnings(inode->i_dquot, warntype); 1532 flush_warnings(inode->i_dquot, warntype);
1466out_unlock:
1467 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1533 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1468out: 1534out:
1469 return ret; 1535 return ret;
1470} 1536}
1471 1537EXPORT_SYMBOL(__dquot_alloc_space);
1472int dquot_alloc_space(struct inode *inode, qsize_t number, int warn)
1473{
1474 return __dquot_alloc_space(inode, number, warn, 0);
1475}
1476EXPORT_SYMBOL(dquot_alloc_space);
1477
1478int dquot_reserve_space(struct inode *inode, qsize_t number, int warn)
1479{
1480 return __dquot_alloc_space(inode, number, warn, 1);
1481}
1482EXPORT_SYMBOL(dquot_reserve_space);
1483 1538
1484/* 1539/*
1485 * This operation can block, but only after everything is updated 1540 * This operation can block, but only after everything is updated
1486 */ 1541 */
1487int dquot_alloc_inode(const struct inode *inode, qsize_t number) 1542int dquot_alloc_inode(const struct inode *inode)
1488{ 1543{
1489 int cnt, ret = NO_QUOTA; 1544 int cnt, ret = 0;
1490 char warntype[MAXQUOTAS]; 1545 char warntype[MAXQUOTAS];
1491 1546
1492 /* First test before acquiring mutex - solves deadlocks when we 1547 /* First test before acquiring mutex - solves deadlocks when we
1493 * re-enter the quota code and are already holding the mutex */ 1548 * re-enter the quota code and are already holding the mutex */
1494 if (IS_NOQUOTA(inode)) 1549 if (!sb_any_quota_active(inode->i_sb) || IS_NOQUOTA(inode))
1495 return QUOTA_OK; 1550 return 0;
1496 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 1551 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1497 warntype[cnt] = QUOTA_NL_NOWARN; 1552 warntype[cnt] = QUOTA_NL_NOWARN;
1498 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1553 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1499 if (IS_NOQUOTA(inode)) {
1500 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1501 return QUOTA_OK;
1502 }
1503 spin_lock(&dq_data_lock); 1554 spin_lock(&dq_data_lock);
1504 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1555 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1505 if (!inode->i_dquot[cnt]) 1556 if (!inode->i_dquot[cnt])
1506 continue; 1557 continue;
1507 if (check_idq(inode->i_dquot[cnt], number, warntype+cnt) 1558 ret = check_idq(inode->i_dquot[cnt], 1, warntype + cnt);
1508 == NO_QUOTA) 1559 if (ret)
1509 goto warn_put_all; 1560 goto warn_put_all;
1510 } 1561 }
1511 1562
1512 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1563 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1513 if (!inode->i_dquot[cnt]) 1564 if (!inode->i_dquot[cnt])
1514 continue; 1565 continue;
1515 dquot_incr_inodes(inode->i_dquot[cnt], number); 1566 dquot_incr_inodes(inode->i_dquot[cnt], 1);
1516 } 1567 }
1517 ret = QUOTA_OK; 1568
1518warn_put_all: 1569warn_put_all:
1519 spin_unlock(&dq_data_lock); 1570 spin_unlock(&dq_data_lock);
1520 if (ret == QUOTA_OK) 1571 if (ret == 0)
1521 mark_all_dquot_dirty(inode->i_dquot); 1572 mark_all_dquot_dirty(inode->i_dquot);
1522 flush_warnings(inode->i_dquot, warntype); 1573 flush_warnings(inode->i_dquot, warntype);
1523 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1574 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
@@ -1525,23 +1576,19 @@ warn_put_all:
1525} 1576}
1526EXPORT_SYMBOL(dquot_alloc_inode); 1577EXPORT_SYMBOL(dquot_alloc_inode);
1527 1578
1528int dquot_claim_space(struct inode *inode, qsize_t number) 1579/*
1580 * Convert in-memory reserved quotas to real consumed quotas
1581 */
1582int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
1529{ 1583{
1530 int cnt; 1584 int cnt;
1531 int ret = QUOTA_OK;
1532 1585
1533 if (IS_NOQUOTA(inode)) { 1586 if (!sb_any_quota_active(inode->i_sb) || IS_NOQUOTA(inode)) {
1534 inode_claim_rsv_space(inode, number); 1587 inode_claim_rsv_space(inode, number);
1535 goto out; 1588 return 0;
1536 } 1589 }
1537 1590
1538 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1591 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1539 if (IS_NOQUOTA(inode)) {
1540 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1541 inode_claim_rsv_space(inode, number);
1542 goto out;
1543 }
1544
1545 spin_lock(&dq_data_lock); 1592 spin_lock(&dq_data_lock);
1546 /* Claim reserved quotas to allocated quotas */ 1593 /* Claim reserved quotas to allocated quotas */
1547 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1594 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
@@ -1554,33 +1601,26 @@ int dquot_claim_space(struct inode *inode, qsize_t number)
1554 spin_unlock(&dq_data_lock); 1601 spin_unlock(&dq_data_lock);
1555 mark_all_dquot_dirty(inode->i_dquot); 1602 mark_all_dquot_dirty(inode->i_dquot);
1556 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1603 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1557out: 1604 return 0;
1558 return ret;
1559} 1605}
1560EXPORT_SYMBOL(dquot_claim_space); 1606EXPORT_SYMBOL(dquot_claim_space_nodirty);
1561 1607
1562/* 1608/*
1563 * This operation can block, but only after everything is updated 1609 * This operation can block, but only after everything is updated
1564 */ 1610 */
1565int __dquot_free_space(struct inode *inode, qsize_t number, int reserve) 1611void __dquot_free_space(struct inode *inode, qsize_t number, int reserve)
1566{ 1612{
1567 unsigned int cnt; 1613 unsigned int cnt;
1568 char warntype[MAXQUOTAS]; 1614 char warntype[MAXQUOTAS];
1569 1615
1570 /* First test before acquiring mutex - solves deadlocks when we 1616 /* First test before acquiring mutex - solves deadlocks when we
1571 * re-enter the quota code and are already holding the mutex */ 1617 * re-enter the quota code and are already holding the mutex */
1572 if (IS_NOQUOTA(inode)) { 1618 if (!sb_any_quota_active(inode->i_sb) || IS_NOQUOTA(inode)) {
1573out_sub:
1574 inode_decr_space(inode, number, reserve); 1619 inode_decr_space(inode, number, reserve);
1575 return QUOTA_OK; 1620 return;
1576 } 1621 }
1577 1622
1578 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1623 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1579 /* Now recheck reliably when holding dqptr_sem */
1580 if (IS_NOQUOTA(inode)) {
1581 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1582 goto out_sub;
1583 }
1584 spin_lock(&dq_data_lock); 1624 spin_lock(&dq_data_lock);
1585 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1625 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1586 if (!inode->i_dquot[cnt]) 1626 if (!inode->i_dquot[cnt])
@@ -1600,56 +1640,34 @@ out_sub:
1600out_unlock: 1640out_unlock:
1601 flush_warnings(inode->i_dquot, warntype); 1641 flush_warnings(inode->i_dquot, warntype);
1602 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1642 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1603 return QUOTA_OK;
1604}
1605
1606int dquot_free_space(struct inode *inode, qsize_t number)
1607{
1608 return __dquot_free_space(inode, number, 0);
1609}
1610EXPORT_SYMBOL(dquot_free_space);
1611
1612/*
1613 * Release reserved quota space
1614 */
1615void dquot_release_reserved_space(struct inode *inode, qsize_t number)
1616{
1617 __dquot_free_space(inode, number, 1);
1618
1619} 1643}
1620EXPORT_SYMBOL(dquot_release_reserved_space); 1644EXPORT_SYMBOL(__dquot_free_space);
1621 1645
1622/* 1646/*
1623 * This operation can block, but only after everything is updated 1647 * This operation can block, but only after everything is updated
1624 */ 1648 */
1625int dquot_free_inode(const struct inode *inode, qsize_t number) 1649void dquot_free_inode(const struct inode *inode)
1626{ 1650{
1627 unsigned int cnt; 1651 unsigned int cnt;
1628 char warntype[MAXQUOTAS]; 1652 char warntype[MAXQUOTAS];
1629 1653
1630 /* First test before acquiring mutex - solves deadlocks when we 1654 /* First test before acquiring mutex - solves deadlocks when we
1631 * re-enter the quota code and are already holding the mutex */ 1655 * re-enter the quota code and are already holding the mutex */
1632 if (IS_NOQUOTA(inode)) 1656 if (!sb_any_quota_active(inode->i_sb) || IS_NOQUOTA(inode))
1633 return QUOTA_OK; 1657 return;
1634 1658
1635 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1659 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1636 /* Now recheck reliably when holding dqptr_sem */
1637 if (IS_NOQUOTA(inode)) {
1638 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1639 return QUOTA_OK;
1640 }
1641 spin_lock(&dq_data_lock); 1660 spin_lock(&dq_data_lock);
1642 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1661 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1643 if (!inode->i_dquot[cnt]) 1662 if (!inode->i_dquot[cnt])
1644 continue; 1663 continue;
1645 warntype[cnt] = info_idq_free(inode->i_dquot[cnt], number); 1664 warntype[cnt] = info_idq_free(inode->i_dquot[cnt], 1);
1646 dquot_decr_inodes(inode->i_dquot[cnt], number); 1665 dquot_decr_inodes(inode->i_dquot[cnt], 1);
1647 } 1666 }
1648 spin_unlock(&dq_data_lock); 1667 spin_unlock(&dq_data_lock);
1649 mark_all_dquot_dirty(inode->i_dquot); 1668 mark_all_dquot_dirty(inode->i_dquot);
1650 flush_warnings(inode->i_dquot, warntype); 1669 flush_warnings(inode->i_dquot, warntype);
1651 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1670 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1652 return QUOTA_OK;
1653} 1671}
1654EXPORT_SYMBOL(dquot_free_inode); 1672EXPORT_SYMBOL(dquot_free_inode);
1655 1673
@@ -1659,37 +1677,31 @@ EXPORT_SYMBOL(dquot_free_inode);
1659 * This operation can block, but only after everything is updated 1677 * This operation can block, but only after everything is updated
1660 * A transaction must be started when entering this function. 1678 * A transaction must be started when entering this function.
1661 */ 1679 */
1662int dquot_transfer(struct inode *inode, struct iattr *iattr) 1680static int __dquot_transfer(struct inode *inode, qid_t *chid, unsigned long mask)
1663{ 1681{
1664 qsize_t space, cur_space; 1682 qsize_t space, cur_space;
1665 qsize_t rsv_space = 0; 1683 qsize_t rsv_space = 0;
1666 struct dquot *transfer_from[MAXQUOTAS]; 1684 struct dquot *transfer_from[MAXQUOTAS];
1667 struct dquot *transfer_to[MAXQUOTAS]; 1685 struct dquot *transfer_to[MAXQUOTAS];
1668 int cnt, ret = QUOTA_OK; 1686 int cnt, ret = 0;
1669 int chuid = iattr->ia_valid & ATTR_UID && inode->i_uid != iattr->ia_uid,
1670 chgid = iattr->ia_valid & ATTR_GID && inode->i_gid != iattr->ia_gid;
1671 char warntype_to[MAXQUOTAS]; 1687 char warntype_to[MAXQUOTAS];
1672 char warntype_from_inodes[MAXQUOTAS], warntype_from_space[MAXQUOTAS]; 1688 char warntype_from_inodes[MAXQUOTAS], warntype_from_space[MAXQUOTAS];
1673 1689
1674 /* First test before acquiring mutex - solves deadlocks when we 1690 /* First test before acquiring mutex - solves deadlocks when we
1675 * re-enter the quota code and are already holding the mutex */ 1691 * re-enter the quota code and are already holding the mutex */
1676 if (IS_NOQUOTA(inode)) 1692 if (IS_NOQUOTA(inode))
1677 return QUOTA_OK; 1693 return 0;
1678 /* Initialize the arrays */ 1694 /* Initialize the arrays */
1679 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1695 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1680 transfer_from[cnt] = NULL; 1696 transfer_from[cnt] = NULL;
1681 transfer_to[cnt] = NULL; 1697 transfer_to[cnt] = NULL;
1682 warntype_to[cnt] = QUOTA_NL_NOWARN; 1698 warntype_to[cnt] = QUOTA_NL_NOWARN;
1683 } 1699 }
1684 if (chuid) 1700 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1685 transfer_to[USRQUOTA] = dqget(inode->i_sb, iattr->ia_uid, 1701 if (mask & (1 << cnt))
1686 USRQUOTA); 1702 transfer_to[cnt] = dqget(inode->i_sb, chid[cnt], cnt);
1687 if (chgid) 1703 }
1688 transfer_to[GRPQUOTA] = dqget(inode->i_sb, iattr->ia_gid,
1689 GRPQUOTA);
1690
1691 down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); 1704 down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1692 /* Now recheck reliably when holding dqptr_sem */
1693 if (IS_NOQUOTA(inode)) { /* File without quota accounting? */ 1705 if (IS_NOQUOTA(inode)) { /* File without quota accounting? */
1694 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); 1706 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1695 goto put_all; 1707 goto put_all;
@@ -1703,9 +1715,11 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
1703 if (!transfer_to[cnt]) 1715 if (!transfer_to[cnt])
1704 continue; 1716 continue;
1705 transfer_from[cnt] = inode->i_dquot[cnt]; 1717 transfer_from[cnt] = inode->i_dquot[cnt];
1706 if (check_idq(transfer_to[cnt], 1, warntype_to + cnt) == 1718 ret = check_idq(transfer_to[cnt], 1, warntype_to + cnt);
1707 NO_QUOTA || check_bdq(transfer_to[cnt], space, 0, 1719 if (ret)
1708 warntype_to + cnt) == NO_QUOTA) 1720 goto over_quota;
1721 ret = check_bdq(transfer_to[cnt], space, 0, warntype_to + cnt);
1722 if (ret)
1709 goto over_quota; 1723 goto over_quota;
1710 } 1724 }
1711 1725
@@ -1759,22 +1773,32 @@ over_quota:
1759 /* Clear dquot pointers we don't want to dqput() */ 1773 /* Clear dquot pointers we don't want to dqput() */
1760 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 1774 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1761 transfer_from[cnt] = NULL; 1775 transfer_from[cnt] = NULL;
1762 ret = NO_QUOTA;
1763 goto warn_put_all; 1776 goto warn_put_all;
1764} 1777}
1765EXPORT_SYMBOL(dquot_transfer);
1766 1778
1767/* Wrapper for transferring ownership of an inode */ 1779/* Wrapper for transferring ownership of an inode for uid/gid only
1768int vfs_dq_transfer(struct inode *inode, struct iattr *iattr) 1780 * Called from FSXXX_setattr()
1781 */
1782int dquot_transfer(struct inode *inode, struct iattr *iattr)
1769{ 1783{
1784 qid_t chid[MAXQUOTAS];
1785 unsigned long mask = 0;
1786
1787 if (iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) {
1788 mask |= 1 << USRQUOTA;
1789 chid[USRQUOTA] = iattr->ia_uid;
1790 }
1791 if (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid) {
1792 mask |= 1 << GRPQUOTA;
1793 chid[GRPQUOTA] = iattr->ia_gid;
1794 }
1770 if (sb_any_quota_active(inode->i_sb) && !IS_NOQUOTA(inode)) { 1795 if (sb_any_quota_active(inode->i_sb) && !IS_NOQUOTA(inode)) {
1771 vfs_dq_init(inode); 1796 dquot_initialize(inode);
1772 if (inode->i_sb->dq_op->transfer(inode, iattr) == NO_QUOTA) 1797 return __dquot_transfer(inode, chid, mask);
1773 return 1;
1774 } 1798 }
1775 return 0; 1799 return 0;
1776} 1800}
1777EXPORT_SYMBOL(vfs_dq_transfer); 1801EXPORT_SYMBOL(dquot_transfer);
1778 1802
1779/* 1803/*
1780 * Write info of quota file to disk 1804 * Write info of quota file to disk
@@ -1795,13 +1819,6 @@ EXPORT_SYMBOL(dquot_commit_info);
1795 * Definitions of diskquota operations. 1819 * Definitions of diskquota operations.
1796 */ 1820 */
1797const struct dquot_operations dquot_operations = { 1821const struct dquot_operations dquot_operations = {
1798 .initialize = dquot_initialize,
1799 .drop = dquot_drop,
1800 .alloc_space = dquot_alloc_space,
1801 .alloc_inode = dquot_alloc_inode,
1802 .free_space = dquot_free_space,
1803 .free_inode = dquot_free_inode,
1804 .transfer = dquot_transfer,
1805 .write_dquot = dquot_commit, 1822 .write_dquot = dquot_commit,
1806 .acquire_dquot = dquot_acquire, 1823 .acquire_dquot = dquot_acquire,
1807 .release_dquot = dquot_release, 1824 .release_dquot = dquot_release,
@@ -1812,6 +1829,20 @@ const struct dquot_operations dquot_operations = {
1812}; 1829};
1813 1830
1814/* 1831/*
1832 * Generic helper for ->open on filesystems supporting disk quotas.
1833 */
1834int dquot_file_open(struct inode *inode, struct file *file)
1835{
1836 int error;
1837
1838 error = generic_file_open(inode, file);
1839 if (!error && (file->f_mode & FMODE_WRITE))
1840 dquot_initialize(inode);
1841 return error;
1842}
1843EXPORT_SYMBOL(dquot_file_open);
1844
1845/*
1815 * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount) 1846 * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount)
1816 */ 1847 */
1817int vfs_quota_disable(struct super_block *sb, int type, unsigned int flags) 1848int vfs_quota_disable(struct super_block *sb, int type, unsigned int flags)
@@ -1990,11 +2021,13 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
1990 } 2021 }
1991 2022
1992 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) { 2023 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
1993 /* As we bypass the pagecache we must now flush the inode so 2024 /* As we bypass the pagecache we must now flush all the
1994 * that we see all the changes from userspace... */ 2025 * dirty data and invalidate caches so that kernel sees
1995 write_inode_now(inode, 1); 2026 * changes from userspace. It is not enough to just flush
1996 /* And now flush the block cache so that kernel sees the 2027 * the quota file since if blocksize < pagesize, invalidation
1997 * changes */ 2028 * of the cache could fail because of other unrelated dirty
2029 * data */
2030 sync_filesystem(sb);
1998 invalidate_bdev(sb->s_bdev); 2031 invalidate_bdev(sb->s_bdev);
1999 } 2032 }
2000 mutex_lock(&dqopt->dqonoff_mutex); 2033 mutex_lock(&dqopt->dqonoff_mutex);
@@ -2007,14 +2040,16 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
2007 /* We don't want quota and atime on quota files (deadlocks 2040 /* We don't want quota and atime on quota files (deadlocks
2008 * possible) Also nobody should write to the file - we use 2041 * possible) Also nobody should write to the file - we use
2009 * special IO operations which ignore the immutable bit. */ 2042 * special IO operations which ignore the immutable bit. */
2010 down_write(&dqopt->dqptr_sem);
2011 mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA); 2043 mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
2012 oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE | 2044 oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE |
2013 S_NOQUOTA); 2045 S_NOQUOTA);
2014 inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE; 2046 inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE;
2015 mutex_unlock(&inode->i_mutex); 2047 mutex_unlock(&inode->i_mutex);
2016 up_write(&dqopt->dqptr_sem); 2048 /*
2017 sb->dq_op->drop(inode); 2049 * When S_NOQUOTA is set, remove dquot references as no more
2050 * references can be added
2051 */
2052 __dquot_drop(inode);
2018 } 2053 }
2019 2054
2020 error = -EIO; 2055 error = -EIO;
@@ -2050,14 +2085,12 @@ out_file_init:
2050 iput(inode); 2085 iput(inode);
2051out_lock: 2086out_lock:
2052 if (oldflags != -1) { 2087 if (oldflags != -1) {
2053 down_write(&dqopt->dqptr_sem);
2054 mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA); 2088 mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
2055 /* Set the flags back (in the case of accidental quotaon() 2089 /* Set the flags back (in the case of accidental quotaon()
2056 * on a wrong file we don't want to mess up the flags) */ 2090 * on a wrong file we don't want to mess up the flags) */
2057 inode->i_flags &= ~(S_NOATIME | S_NOQUOTA | S_IMMUTABLE); 2091 inode->i_flags &= ~(S_NOATIME | S_NOQUOTA | S_IMMUTABLE);
2058 inode->i_flags |= oldflags; 2092 inode->i_flags |= oldflags;
2059 mutex_unlock(&inode->i_mutex); 2093 mutex_unlock(&inode->i_mutex);
2060 up_write(&dqopt->dqptr_sem);
2061 } 2094 }
2062 mutex_unlock(&dqopt->dqonoff_mutex); 2095 mutex_unlock(&dqopt->dqonoff_mutex);
2063out_fmt: 2096out_fmt: