diff options
author | Jan Kara <jack@suse.cz> | 2009-01-12 11:23:05 -0500 |
---|---|---|
committer | Jan Kara <jack@suse.cz> | 2009-01-16 12:02:10 -0500 |
commit | cc33412fb1f11613e20f9dfc2919a77ecd63fbc4 (patch) | |
tree | 657ccb0860127852179efe8c13fc24b5ba624017 /fs | |
parent | 7cb36b6ccdca03bd87e8faca7fd920643dd1aec7 (diff) |
quota: Improve locking
We implement dqget() and dqput() that need neither dqonoff_mutex nor dqptr_sem.
Then move dqget() and dqput() calls so that they are not called from under
dqptr_sem. This is important because filesystem callbacks aren't called from
under dqptr_sem which used to cause *lots* of problems with lock ranking
(and with OCFS2 they became close to unsolvable).
The patch also removes two functions which were introduced solely because OCFS2
needed them to cope with the old locking scheme. As time showed, they were not
enough for OCFS2 anyway and it would be unnecessary work to adapt them to the
new locking scheme in which they aren't needed. As a result OCFS2 needs the
following patch to compile properly with quotas. Sorry to any bisecters which
hit this in advance.
Signed-off-by: Jan Kara <jack@suse.cz>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/dquot.c | 218 |
1 files changed, 122 insertions, 96 deletions
diff --git a/fs/dquot.c b/fs/dquot.c index 48c0571f831d..bca3cac4bee7 100644 --- a/fs/dquot.c +++ b/fs/dquot.c | |||
@@ -87,14 +87,17 @@ | |||
87 | #define __DQUOT_PARANOIA | 87 | #define __DQUOT_PARANOIA |
88 | 88 | ||
89 | /* | 89 | /* |
90 | * There are two quota SMP locks. dq_list_lock protects all lists with quotas | 90 | * There are three quota SMP locks. dq_list_lock protects all lists with quotas |
91 | * and quota formats and also dqstats structure containing statistics about the | 91 | * and quota formats, dqstats structure containing statistics about the lists |
92 | * lists. dq_data_lock protects data from dq_dqb and also mem_dqinfo structures | 92 | * dq_data_lock protects data from dq_dqb and also mem_dqinfo structures and |
93 | * and also guards consistency of dquot->dq_dqb with inode->i_blocks, i_bytes. | 93 | * also guards consistency of dquot->dq_dqb with inode->i_blocks, i_bytes. |
94 | * i_blocks and i_bytes updates itself are guarded by i_lock acquired directly | 94 | * i_blocks and i_bytes updates itself are guarded by i_lock acquired directly |
95 | * in inode_add_bytes() and inode_sub_bytes(). | 95 | * in inode_add_bytes() and inode_sub_bytes(). dq_state_lock protects |
96 | * modifications of quota state (on quotaon and quotaoff) and readers who care | ||
97 | * about latest values take it as well. | ||
96 | * | 98 | * |
97 | * The spinlock ordering is hence: dq_data_lock > dq_list_lock > i_lock | 99 | * The spinlock ordering is hence: dq_data_lock > dq_list_lock > i_lock, |
100 | * dq_list_lock > dq_state_lock | ||
98 | * | 101 | * |
99 | * Note that some things (eg. sb pointer, type, id) doesn't change during | 102 | * Note that some things (eg. sb pointer, type, id) doesn't change during |
100 | * the life of the dquot structure and so needn't to be protected by a lock | 103 | * the life of the dquot structure and so needn't to be protected by a lock |
@@ -103,12 +106,7 @@ | |||
103 | * operation is just reading pointers from inode (or not using them at all) the | 106 | * operation is just reading pointers from inode (or not using them at all) the |
104 | * read lock is enough. If pointers are altered function must hold write lock | 107 | * read lock is enough. If pointers are altered function must hold write lock |
105 | * (these locking rules also apply for S_NOQUOTA flag in the inode - note that | 108 | * (these locking rules also apply for S_NOQUOTA flag in the inode - note that |
106 | * for altering the flag i_mutex is also needed). If operation is holding | 109 | * for altering the flag i_mutex is also needed). |
107 | * reference to dquot in other way (e.g. quotactl ops) it must be guarded by | ||
108 | * dqonoff_mutex. | ||
109 | * This locking assures that: | ||
110 | * a) update/access to dquot pointers in inode is serialized | ||
111 | * b) everyone is guarded against invalidate_dquots() | ||
112 | * | 110 | * |
113 | * Each dquot has its dq_lock mutex. Locked dquots might not be referenced | 111 | * Each dquot has its dq_lock mutex. Locked dquots might not be referenced |
114 | * from inodes (dquot_alloc_space() and such don't check the dq_lock). | 112 | * from inodes (dquot_alloc_space() and such don't check the dq_lock). |
@@ -122,10 +120,17 @@ | |||
122 | * Lock ordering (including related VFS locks) is the following: | 120 | * Lock ordering (including related VFS locks) is the following: |
123 | * i_mutex > dqonoff_sem > journal_lock > dqptr_sem > dquot->dq_lock > | 121 | * i_mutex > dqonoff_sem > journal_lock > dqptr_sem > dquot->dq_lock > |
124 | * dqio_mutex | 122 | * dqio_mutex |
123 | * The lock ordering of dqptr_sem imposed by quota code is only dqonoff_sem > | ||
124 | * dqptr_sem. But filesystem has to count with the fact that functions such as | ||
125 | * dquot_alloc_space() acquire dqptr_sem and they usually have to be called | ||
126 | * from inside a transaction to keep filesystem consistency after a crash. Also | ||
127 | * filesystems usually want to do some IO on dquot from ->mark_dirty which is | ||
128 | * called with dqptr_sem held. | ||
125 | * i_mutex on quota files is special (it's below dqio_mutex) | 129 | * i_mutex on quota files is special (it's below dqio_mutex) |
126 | */ | 130 | */ |
127 | 131 | ||
128 | static DEFINE_SPINLOCK(dq_list_lock); | 132 | static DEFINE_SPINLOCK(dq_list_lock); |
133 | static DEFINE_SPINLOCK(dq_state_lock); | ||
129 | DEFINE_SPINLOCK(dq_data_lock); | 134 | DEFINE_SPINLOCK(dq_data_lock); |
130 | 135 | ||
131 | static char *quotatypes[] = INITQFNAMES; | 136 | static char *quotatypes[] = INITQFNAMES; |
@@ -428,7 +433,7 @@ static inline void do_destroy_dquot(struct dquot *dquot) | |||
428 | * quota is disabled and pointers from inodes removed so there cannot be new | 433 | * quota is disabled and pointers from inodes removed so there cannot be new |
429 | * quota users. There can still be some users of quotas due to inodes being | 434 | * quota users. There can still be some users of quotas due to inodes being |
430 | * just deleted or pruned by prune_icache() (those are not attached to any | 435 | * just deleted or pruned by prune_icache() (those are not attached to any |
431 | * list). We have to wait for such users. | 436 | * list) or parallel quotactl call. We have to wait for such users. |
432 | */ | 437 | */ |
433 | static void invalidate_dquots(struct super_block *sb, int type) | 438 | static void invalidate_dquots(struct super_block *sb, int type) |
434 | { | 439 | { |
@@ -600,7 +605,6 @@ static struct shrinker dqcache_shrinker = { | |||
600 | /* | 605 | /* |
601 | * Put reference to dquot | 606 | * Put reference to dquot |
602 | * NOTE: If you change this function please check whether dqput_blocks() works right... | 607 | * NOTE: If you change this function please check whether dqput_blocks() works right... |
603 | * MUST be called with either dqptr_sem or dqonoff_mutex held | ||
604 | */ | 608 | */ |
605 | void dqput(struct dquot *dquot) | 609 | void dqput(struct dquot *dquot) |
606 | { | 610 | { |
@@ -697,36 +701,30 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type) | |||
697 | } | 701 | } |
698 | 702 | ||
699 | /* | 703 | /* |
700 | * Check whether dquot is in memory. | ||
701 | * MUST be called with either dqptr_sem or dqonoff_mutex held | ||
702 | */ | ||
703 | int dquot_is_cached(struct super_block *sb, unsigned int id, int type) | ||
704 | { | ||
705 | unsigned int hashent = hashfn(sb, id, type); | ||
706 | int ret = 0; | ||
707 | |||
708 | if (!sb_has_quota_active(sb, type)) | ||
709 | return 0; | ||
710 | spin_lock(&dq_list_lock); | ||
711 | if (find_dquot(hashent, sb, id, type) != NODQUOT) | ||
712 | ret = 1; | ||
713 | spin_unlock(&dq_list_lock); | ||
714 | return ret; | ||
715 | } | ||
716 | |||
717 | /* | ||
718 | * Get reference to dquot | 704 | * Get reference to dquot |
719 | * MUST be called with either dqptr_sem or dqonoff_mutex held | 705 | * |
706 | * Locking is slightly tricky here. We are guarded from parallel quotaoff() | ||
707 | * destroying our dquot by: | ||
708 | * a) checking for quota flags under dq_list_lock and | ||
709 | * b) getting a reference to dquot before we release dq_list_lock | ||
720 | */ | 710 | */ |
721 | struct dquot *dqget(struct super_block *sb, unsigned int id, int type) | 711 | struct dquot *dqget(struct super_block *sb, unsigned int id, int type) |
722 | { | 712 | { |
723 | unsigned int hashent = hashfn(sb, id, type); | 713 | unsigned int hashent = hashfn(sb, id, type); |
724 | struct dquot *dquot, *empty = NODQUOT; | 714 | struct dquot *dquot = NODQUOT, *empty = NODQUOT; |
725 | 715 | ||
726 | if (!sb_has_quota_active(sb, type)) | 716 | if (!sb_has_quota_active(sb, type)) |
727 | return NODQUOT; | 717 | return NODQUOT; |
728 | we_slept: | 718 | we_slept: |
729 | spin_lock(&dq_list_lock); | 719 | spin_lock(&dq_list_lock); |
720 | spin_lock(&dq_state_lock); | ||
721 | if (!sb_has_quota_active(sb, type)) { | ||
722 | spin_unlock(&dq_state_lock); | ||
723 | spin_unlock(&dq_list_lock); | ||
724 | goto out; | ||
725 | } | ||
726 | spin_unlock(&dq_state_lock); | ||
727 | |||
730 | if ((dquot = find_dquot(hashent, sb, id, type)) == NODQUOT) { | 728 | if ((dquot = find_dquot(hashent, sb, id, type)) == NODQUOT) { |
731 | if (empty == NODQUOT) { | 729 | if (empty == NODQUOT) { |
732 | spin_unlock(&dq_list_lock); | 730 | spin_unlock(&dq_list_lock); |
@@ -735,6 +733,7 @@ we_slept: | |||
735 | goto we_slept; | 733 | goto we_slept; |
736 | } | 734 | } |
737 | dquot = empty; | 735 | dquot = empty; |
736 | empty = NODQUOT; | ||
738 | dquot->dq_id = id; | 737 | dquot->dq_id = id; |
739 | /* all dquots go on the inuse_list */ | 738 | /* all dquots go on the inuse_list */ |
740 | put_inuse(dquot); | 739 | put_inuse(dquot); |
@@ -749,8 +748,6 @@ we_slept: | |||
749 | dqstats.cache_hits++; | 748 | dqstats.cache_hits++; |
750 | dqstats.lookups++; | 749 | dqstats.lookups++; |
751 | spin_unlock(&dq_list_lock); | 750 | spin_unlock(&dq_list_lock); |
752 | if (empty) | ||
753 | do_destroy_dquot(empty); | ||
754 | } | 751 | } |
755 | /* Wait for dq_lock - after this we know that either dquot_release() is already | 752 | /* Wait for dq_lock - after this we know that either dquot_release() is already |
756 | * finished or it will be canceled due to dq_count > 1 test */ | 753 | * finished or it will be canceled due to dq_count > 1 test */ |
@@ -758,11 +755,15 @@ we_slept: | |||
758 | /* Read the dquot and instantiate it (everything done only if needed) */ | 755 | /* Read the dquot and instantiate it (everything done only if needed) */ |
759 | if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && sb->dq_op->acquire_dquot(dquot) < 0) { | 756 | if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && sb->dq_op->acquire_dquot(dquot) < 0) { |
760 | dqput(dquot); | 757 | dqput(dquot); |
761 | return NODQUOT; | 758 | dquot = NODQUOT; |
759 | goto out; | ||
762 | } | 760 | } |
763 | #ifdef __DQUOT_PARANOIA | 761 | #ifdef __DQUOT_PARANOIA |
764 | BUG_ON(!dquot->dq_sb); /* Has somebody invalidated entry under us? */ | 762 | BUG_ON(!dquot->dq_sb); /* Has somebody invalidated entry under us? */ |
765 | #endif | 763 | #endif |
764 | out: | ||
765 | if (empty) | ||
766 | do_destroy_dquot(empty); | ||
766 | 767 | ||
767 | return dquot; | 768 | return dquot; |
768 | } | 769 | } |
@@ -1198,63 +1199,76 @@ static int info_bdq_free(struct dquot *dquot, qsize_t space) | |||
1198 | } | 1199 | } |
1199 | /* | 1200 | /* |
1200 | * Initialize quota pointers in inode | 1201 | * Initialize quota pointers in inode |
1201 | * Transaction must be started at entry | 1202 | * We do things in a bit complicated way but by that we avoid calling |
1203 | * dqget() and thus filesystem callbacks under dqptr_sem. | ||
1202 | */ | 1204 | */ |
1203 | int dquot_initialize(struct inode *inode, int type) | 1205 | int dquot_initialize(struct inode *inode, int type) |
1204 | { | 1206 | { |
1205 | unsigned int id = 0; | 1207 | unsigned int id = 0; |
1206 | int cnt, ret = 0; | 1208 | int cnt, ret = 0; |
1209 | struct dquot *got[MAXQUOTAS] = { NODQUOT, NODQUOT }; | ||
1210 | struct super_block *sb = inode->i_sb; | ||
1207 | 1211 | ||
1208 | /* First test before acquiring mutex - solves deadlocks when we | 1212 | /* First test before acquiring mutex - solves deadlocks when we |
1209 | * re-enter the quota code and are already holding the mutex */ | 1213 | * re-enter the quota code and are already holding the mutex */ |
1210 | if (IS_NOQUOTA(inode)) | 1214 | if (IS_NOQUOTA(inode)) |
1211 | return 0; | 1215 | return 0; |
1212 | down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1216 | |
1217 | /* First get references to structures we might need. */ | ||
1218 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
1219 | if (type != -1 && cnt != type) | ||
1220 | continue; | ||
1221 | switch (cnt) { | ||
1222 | case USRQUOTA: | ||
1223 | id = inode->i_uid; | ||
1224 | break; | ||
1225 | case GRPQUOTA: | ||
1226 | id = inode->i_gid; | ||
1227 | break; | ||
1228 | } | ||
1229 | got[cnt] = dqget(sb, id, cnt); | ||
1230 | } | ||
1231 | |||
1232 | down_write(&sb_dqopt(sb)->dqptr_sem); | ||
1213 | /* Having dqptr_sem we know NOQUOTA flags can't be altered... */ | 1233 | /* Having dqptr_sem we know NOQUOTA flags can't be altered... */ |
1214 | if (IS_NOQUOTA(inode)) | 1234 | if (IS_NOQUOTA(inode)) |
1215 | goto out_err; | 1235 | goto out_err; |
1216 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | 1236 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { |
1217 | if (type != -1 && cnt != type) | 1237 | if (type != -1 && cnt != type) |
1218 | continue; | 1238 | continue; |
1239 | /* Avoid races with quotaoff() */ | ||
1240 | if (!sb_has_quota_active(sb, cnt)) | ||
1241 | continue; | ||
1219 | if (inode->i_dquot[cnt] == NODQUOT) { | 1242 | if (inode->i_dquot[cnt] == NODQUOT) { |
1220 | switch (cnt) { | 1243 | inode->i_dquot[cnt] = got[cnt]; |
1221 | case USRQUOTA: | 1244 | got[cnt] = NODQUOT; |
1222 | id = inode->i_uid; | ||
1223 | break; | ||
1224 | case GRPQUOTA: | ||
1225 | id = inode->i_gid; | ||
1226 | break; | ||
1227 | } | ||
1228 | inode->i_dquot[cnt] = dqget(inode->i_sb, id, cnt); | ||
1229 | } | 1245 | } |
1230 | } | 1246 | } |
1231 | out_err: | 1247 | out_err: |
1232 | up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1248 | up_write(&sb_dqopt(sb)->dqptr_sem); |
1249 | /* Drop unused references */ | ||
1250 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
1251 | dqput(got[cnt]); | ||
1233 | return ret; | 1252 | return ret; |
1234 | } | 1253 | } |
1235 | 1254 | ||
1236 | /* | 1255 | /* |
1237 | * Release all quotas referenced by inode | 1256 | * Release all quotas referenced by inode |
1238 | * Transaction must be started at an entry | ||
1239 | */ | 1257 | */ |
1240 | int dquot_drop_locked(struct inode *inode) | 1258 | int dquot_drop(struct inode *inode) |
1241 | { | 1259 | { |
1242 | int cnt; | 1260 | int cnt; |
1261 | struct dquot *put[MAXQUOTAS]; | ||
1243 | 1262 | ||
1263 | down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1244 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | 1264 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { |
1245 | if (inode->i_dquot[cnt] != NODQUOT) { | 1265 | put[cnt] = inode->i_dquot[cnt]; |
1246 | dqput(inode->i_dquot[cnt]); | 1266 | inode->i_dquot[cnt] = NODQUOT; |
1247 | inode->i_dquot[cnt] = NODQUOT; | ||
1248 | } | ||
1249 | } | 1267 | } |
1250 | return 0; | ||
1251 | } | ||
1252 | |||
1253 | int dquot_drop(struct inode *inode) | ||
1254 | { | ||
1255 | down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1256 | dquot_drop_locked(inode); | ||
1257 | up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1268 | up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); |
1269 | |||
1270 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
1271 | dqput(put[cnt]); | ||
1258 | return 0; | 1272 | return 0; |
1259 | } | 1273 | } |
1260 | 1274 | ||
@@ -1470,8 +1484,9 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr) | |||
1470 | qsize_t space; | 1484 | qsize_t space; |
1471 | struct dquot *transfer_from[MAXQUOTAS]; | 1485 | struct dquot *transfer_from[MAXQUOTAS]; |
1472 | struct dquot *transfer_to[MAXQUOTAS]; | 1486 | struct dquot *transfer_to[MAXQUOTAS]; |
1473 | int cnt, ret = NO_QUOTA, chuid = (iattr->ia_valid & ATTR_UID) && inode->i_uid != iattr->ia_uid, | 1487 | int cnt, ret = QUOTA_OK; |
1474 | chgid = (iattr->ia_valid & ATTR_GID) && inode->i_gid != iattr->ia_gid; | 1488 | int chuid = iattr->ia_valid & ATTR_UID && inode->i_uid != iattr->ia_uid, |
1489 | chgid = iattr->ia_valid & ATTR_GID && inode->i_gid != iattr->ia_gid; | ||
1475 | char warntype_to[MAXQUOTAS]; | 1490 | char warntype_to[MAXQUOTAS]; |
1476 | char warntype_from_inodes[MAXQUOTAS], warntype_from_space[MAXQUOTAS]; | 1491 | char warntype_from_inodes[MAXQUOTAS], warntype_from_space[MAXQUOTAS]; |
1477 | 1492 | ||
@@ -1479,21 +1494,11 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr) | |||
1479 | * re-enter the quota code and are already holding the mutex */ | 1494 | * re-enter the quota code and are already holding the mutex */ |
1480 | if (IS_NOQUOTA(inode)) | 1495 | if (IS_NOQUOTA(inode)) |
1481 | return QUOTA_OK; | 1496 | return QUOTA_OK; |
1482 | /* Clear the arrays */ | 1497 | /* Initialize the arrays */ |
1483 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | 1498 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { |
1484 | transfer_to[cnt] = transfer_from[cnt] = NODQUOT; | 1499 | transfer_from[cnt] = NODQUOT; |
1500 | transfer_to[cnt] = NODQUOT; | ||
1485 | warntype_to[cnt] = QUOTA_NL_NOWARN; | 1501 | warntype_to[cnt] = QUOTA_NL_NOWARN; |
1486 | } | ||
1487 | down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1488 | /* Now recheck reliably when holding dqptr_sem */ | ||
1489 | if (IS_NOQUOTA(inode)) { /* File without quota accounting? */ | ||
1490 | up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1491 | return QUOTA_OK; | ||
1492 | } | ||
1493 | /* First build the transfer_to list - here we can block on | ||
1494 | * reading/instantiating of dquots. We know that the transaction for | ||
1495 | * us was already started so we don't violate lock ranking here */ | ||
1496 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
1497 | switch (cnt) { | 1502 | switch (cnt) { |
1498 | case USRQUOTA: | 1503 | case USRQUOTA: |
1499 | if (!chuid) | 1504 | if (!chuid) |
@@ -1507,6 +1512,13 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr) | |||
1507 | break; | 1512 | break; |
1508 | } | 1513 | } |
1509 | } | 1514 | } |
1515 | |||
1516 | down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1517 | /* Now recheck reliably when holding dqptr_sem */ | ||
1518 | if (IS_NOQUOTA(inode)) { /* File without quota accounting? */ | ||
1519 | up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1520 | goto put_all; | ||
1521 | } | ||
1510 | spin_lock(&dq_data_lock); | 1522 | spin_lock(&dq_data_lock); |
1511 | space = inode_get_bytes(inode); | 1523 | space = inode_get_bytes(inode); |
1512 | /* Build the transfer_from list and check the limits */ | 1524 | /* Build the transfer_from list and check the limits */ |
@@ -1517,7 +1529,7 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr) | |||
1517 | if (check_idq(transfer_to[cnt], 1, warntype_to + cnt) == | 1529 | if (check_idq(transfer_to[cnt], 1, warntype_to + cnt) == |
1518 | NO_QUOTA || check_bdq(transfer_to[cnt], space, 0, | 1530 | NO_QUOTA || check_bdq(transfer_to[cnt], space, 0, |
1519 | warntype_to + cnt) == NO_QUOTA) | 1531 | warntype_to + cnt) == NO_QUOTA) |
1520 | goto warn_put_all; | 1532 | goto over_quota; |
1521 | } | 1533 | } |
1522 | 1534 | ||
1523 | /* | 1535 | /* |
@@ -1545,28 +1557,37 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr) | |||
1545 | 1557 | ||
1546 | inode->i_dquot[cnt] = transfer_to[cnt]; | 1558 | inode->i_dquot[cnt] = transfer_to[cnt]; |
1547 | } | 1559 | } |
1548 | ret = QUOTA_OK; | ||
1549 | warn_put_all: | ||
1550 | spin_unlock(&dq_data_lock); | 1560 | spin_unlock(&dq_data_lock); |
1561 | up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1562 | |||
1551 | /* Dirtify all the dquots - this can block when journalling */ | 1563 | /* Dirtify all the dquots - this can block when journalling */ |
1552 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | 1564 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { |
1553 | if (transfer_from[cnt]) | 1565 | if (transfer_from[cnt]) |
1554 | mark_dquot_dirty(transfer_from[cnt]); | 1566 | mark_dquot_dirty(transfer_from[cnt]); |
1555 | if (transfer_to[cnt]) | 1567 | if (transfer_to[cnt]) { |
1556 | mark_dquot_dirty(transfer_to[cnt]); | 1568 | mark_dquot_dirty(transfer_to[cnt]); |
1569 | /* The reference we got is transferred to the inode */ | ||
1570 | transfer_to[cnt] = NODQUOT; | ||
1571 | } | ||
1557 | } | 1572 | } |
1573 | warn_put_all: | ||
1558 | flush_warnings(transfer_to, warntype_to); | 1574 | flush_warnings(transfer_to, warntype_to); |
1559 | flush_warnings(transfer_from, warntype_from_inodes); | 1575 | flush_warnings(transfer_from, warntype_from_inodes); |
1560 | flush_warnings(transfer_from, warntype_from_space); | 1576 | flush_warnings(transfer_from, warntype_from_space); |
1561 | 1577 | put_all: | |
1562 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | 1578 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { |
1563 | if (ret == QUOTA_OK && transfer_from[cnt] != NODQUOT) | 1579 | dqput(transfer_from[cnt]); |
1564 | dqput(transfer_from[cnt]); | 1580 | dqput(transfer_to[cnt]); |
1565 | if (ret == NO_QUOTA && transfer_to[cnt] != NODQUOT) | ||
1566 | dqput(transfer_to[cnt]); | ||
1567 | } | 1581 | } |
1568 | up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1569 | return ret; | 1582 | return ret; |
1583 | over_quota: | ||
1584 | spin_unlock(&dq_data_lock); | ||
1585 | up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1586 | /* Clear dquot pointers we don't want to dqput() */ | ||
1587 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
1588 | transfer_from[cnt] = NODQUOT; | ||
1589 | ret = NO_QUOTA; | ||
1590 | goto warn_put_all; | ||
1570 | } | 1591 | } |
1571 | 1592 | ||
1572 | /* Wrapper for transferring ownership of an inode */ | 1593 | /* Wrapper for transferring ownership of an inode */ |
@@ -1651,19 +1672,24 @@ int vfs_quota_disable(struct super_block *sb, int type, unsigned int flags) | |||
1651 | continue; | 1672 | continue; |
1652 | 1673 | ||
1653 | if (flags & DQUOT_SUSPENDED) { | 1674 | if (flags & DQUOT_SUSPENDED) { |
1675 | spin_lock(&dq_state_lock); | ||
1654 | dqopt->flags |= | 1676 | dqopt->flags |= |
1655 | dquot_state_flag(DQUOT_SUSPENDED, cnt); | 1677 | dquot_state_flag(DQUOT_SUSPENDED, cnt); |
1678 | spin_unlock(&dq_state_lock); | ||
1656 | } else { | 1679 | } else { |
1680 | spin_lock(&dq_state_lock); | ||
1657 | dqopt->flags &= ~dquot_state_flag(flags, cnt); | 1681 | dqopt->flags &= ~dquot_state_flag(flags, cnt); |
1658 | /* Turning off suspended quotas? */ | 1682 | /* Turning off suspended quotas? */ |
1659 | if (!sb_has_quota_loaded(sb, cnt) && | 1683 | if (!sb_has_quota_loaded(sb, cnt) && |
1660 | sb_has_quota_suspended(sb, cnt)) { | 1684 | sb_has_quota_suspended(sb, cnt)) { |
1661 | dqopt->flags &= ~dquot_state_flag( | 1685 | dqopt->flags &= ~dquot_state_flag( |
1662 | DQUOT_SUSPENDED, cnt); | 1686 | DQUOT_SUSPENDED, cnt); |
1687 | spin_unlock(&dq_state_lock); | ||
1663 | iput(dqopt->files[cnt]); | 1688 | iput(dqopt->files[cnt]); |
1664 | dqopt->files[cnt] = NULL; | 1689 | dqopt->files[cnt] = NULL; |
1665 | continue; | 1690 | continue; |
1666 | } | 1691 | } |
1692 | spin_unlock(&dq_state_lock); | ||
1667 | } | 1693 | } |
1668 | 1694 | ||
1669 | /* We still have to keep quota loaded? */ | 1695 | /* We still have to keep quota loaded? */ |
@@ -1830,7 +1856,9 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id, | |||
1830 | } | 1856 | } |
1831 | mutex_unlock(&dqopt->dqio_mutex); | 1857 | mutex_unlock(&dqopt->dqio_mutex); |
1832 | mutex_unlock(&inode->i_mutex); | 1858 | mutex_unlock(&inode->i_mutex); |
1859 | spin_lock(&dq_state_lock); | ||
1833 | dqopt->flags |= dquot_state_flag(flags, type); | 1860 | dqopt->flags |= dquot_state_flag(flags, type); |
1861 | spin_unlock(&dq_state_lock); | ||
1834 | 1862 | ||
1835 | add_dquot_ref(sb, type); | 1863 | add_dquot_ref(sb, type); |
1836 | mutex_unlock(&dqopt->dqonoff_mutex); | 1864 | mutex_unlock(&dqopt->dqonoff_mutex); |
@@ -1872,9 +1900,11 @@ static int vfs_quota_on_remount(struct super_block *sb, int type) | |||
1872 | } | 1900 | } |
1873 | inode = dqopt->files[type]; | 1901 | inode = dqopt->files[type]; |
1874 | dqopt->files[type] = NULL; | 1902 | dqopt->files[type] = NULL; |
1903 | spin_lock(&dq_state_lock); | ||
1875 | flags = dqopt->flags & dquot_state_flag(DQUOT_USAGE_ENABLED | | 1904 | flags = dqopt->flags & dquot_state_flag(DQUOT_USAGE_ENABLED | |
1876 | DQUOT_LIMITS_ENABLED, type); | 1905 | DQUOT_LIMITS_ENABLED, type); |
1877 | dqopt->flags &= ~dquot_state_flag(DQUOT_STATE_FLAGS, type); | 1906 | dqopt->flags &= ~dquot_state_flag(DQUOT_STATE_FLAGS, type); |
1907 | spin_unlock(&dq_state_lock); | ||
1878 | mutex_unlock(&dqopt->dqonoff_mutex); | 1908 | mutex_unlock(&dqopt->dqonoff_mutex); |
1879 | 1909 | ||
1880 | flags = dquot_generic_flag(flags, type); | 1910 | flags = dquot_generic_flag(flags, type); |
@@ -1952,7 +1982,9 @@ int vfs_quota_enable(struct inode *inode, int type, int format_id, | |||
1952 | ret = -EBUSY; | 1982 | ret = -EBUSY; |
1953 | goto out_lock; | 1983 | goto out_lock; |
1954 | } | 1984 | } |
1985 | spin_lock(&dq_state_lock); | ||
1955 | sb_dqopt(sb)->flags |= dquot_state_flag(flags, type); | 1986 | sb_dqopt(sb)->flags |= dquot_state_flag(flags, type); |
1987 | spin_unlock(&dq_state_lock); | ||
1956 | out_lock: | 1988 | out_lock: |
1957 | mutex_unlock(&dqopt->dqonoff_mutex); | 1989 | mutex_unlock(&dqopt->dqonoff_mutex); |
1958 | return ret; | 1990 | return ret; |
@@ -2039,14 +2071,12 @@ int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *d | |||
2039 | { | 2071 | { |
2040 | struct dquot *dquot; | 2072 | struct dquot *dquot; |
2041 | 2073 | ||
2042 | mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); | 2074 | dquot = dqget(sb, id, type); |
2043 | if (!(dquot = dqget(sb, id, type))) { | 2075 | if (dquot == NODQUOT) |
2044 | mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); | ||
2045 | return -ESRCH; | 2076 | return -ESRCH; |
2046 | } | ||
2047 | do_get_dqblk(dquot, di); | 2077 | do_get_dqblk(dquot, di); |
2048 | dqput(dquot); | 2078 | dqput(dquot); |
2049 | mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); | 2079 | |
2050 | return 0; | 2080 | return 0; |
2051 | } | 2081 | } |
2052 | 2082 | ||
@@ -2130,7 +2160,6 @@ int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *d | |||
2130 | struct dquot *dquot; | 2160 | struct dquot *dquot; |
2131 | int rc; | 2161 | int rc; |
2132 | 2162 | ||
2133 | mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); | ||
2134 | dquot = dqget(sb, id, type); | 2163 | dquot = dqget(sb, id, type); |
2135 | if (!dquot) { | 2164 | if (!dquot) { |
2136 | rc = -ESRCH; | 2165 | rc = -ESRCH; |
@@ -2139,7 +2168,6 @@ int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *d | |||
2139 | rc = do_set_dqblk(dquot, di); | 2168 | rc = do_set_dqblk(dquot, di); |
2140 | dqput(dquot); | 2169 | dqput(dquot); |
2141 | out: | 2170 | out: |
2142 | mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); | ||
2143 | return rc; | 2171 | return rc; |
2144 | } | 2172 | } |
2145 | 2173 | ||
@@ -2370,11 +2398,9 @@ EXPORT_SYMBOL(dquot_release); | |||
2370 | EXPORT_SYMBOL(dquot_mark_dquot_dirty); | 2398 | EXPORT_SYMBOL(dquot_mark_dquot_dirty); |
2371 | EXPORT_SYMBOL(dquot_initialize); | 2399 | EXPORT_SYMBOL(dquot_initialize); |
2372 | EXPORT_SYMBOL(dquot_drop); | 2400 | EXPORT_SYMBOL(dquot_drop); |
2373 | EXPORT_SYMBOL(dquot_drop_locked); | ||
2374 | EXPORT_SYMBOL(vfs_dq_drop); | 2401 | EXPORT_SYMBOL(vfs_dq_drop); |
2375 | EXPORT_SYMBOL(dqget); | 2402 | EXPORT_SYMBOL(dqget); |
2376 | EXPORT_SYMBOL(dqput); | 2403 | EXPORT_SYMBOL(dqput); |
2377 | EXPORT_SYMBOL(dquot_is_cached); | ||
2378 | EXPORT_SYMBOL(dquot_alloc_space); | 2404 | EXPORT_SYMBOL(dquot_alloc_space); |
2379 | EXPORT_SYMBOL(dquot_alloc_inode); | 2405 | EXPORT_SYMBOL(dquot_alloc_inode); |
2380 | EXPORT_SYMBOL(dquot_free_space); | 2406 | EXPORT_SYMBOL(dquot_free_space); |