aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/dquot.c218
-rw-r--r--fs/ocfs2/quota_global.c169
-rw-r--r--include/linux/quotaops.h2
3 files changed, 124 insertions, 265 deletions
diff --git a/fs/dquot.c b/fs/dquot.c
index 48c0571f831d..bca3cac4bee7 100644
--- a/fs/dquot.c
+++ b/fs/dquot.c
@@ -87,14 +87,17 @@
87#define __DQUOT_PARANOIA 87#define __DQUOT_PARANOIA
88 88
89/* 89/*
90 * There are two quota SMP locks. dq_list_lock protects all lists with quotas 90 * There are three quota SMP locks. dq_list_lock protects all lists with quotas
91 * and quota formats and also dqstats structure containing statistics about the 91 * and quota formats, dqstats structure containing statistics about the lists
92 * lists. dq_data_lock protects data from dq_dqb and also mem_dqinfo structures 92 * dq_data_lock protects data from dq_dqb and also mem_dqinfo structures and
93 * and also guards consistency of dquot->dq_dqb with inode->i_blocks, i_bytes. 93 * also guards consistency of dquot->dq_dqb with inode->i_blocks, i_bytes.
94 * i_blocks and i_bytes updates itself are guarded by i_lock acquired directly 94 * i_blocks and i_bytes updates itself are guarded by i_lock acquired directly
95 * in inode_add_bytes() and inode_sub_bytes(). 95 * in inode_add_bytes() and inode_sub_bytes(). dq_state_lock protects
96 * modifications of quota state (on quotaon and quotaoff) and readers who care
97 * about latest values take it as well.
96 * 98 *
97 * The spinlock ordering is hence: dq_data_lock > dq_list_lock > i_lock 99 * The spinlock ordering is hence: dq_data_lock > dq_list_lock > i_lock,
100 * dq_list_lock > dq_state_lock
98 * 101 *
99 * Note that some things (eg. sb pointer, type, id) doesn't change during 102 * Note that some things (eg. sb pointer, type, id) doesn't change during
100 * the life of the dquot structure and so needn't to be protected by a lock 103 * the life of the dquot structure and so needn't to be protected by a lock
@@ -103,12 +106,7 @@
103 * operation is just reading pointers from inode (or not using them at all) the 106 * operation is just reading pointers from inode (or not using them at all) the
104 * read lock is enough. If pointers are altered function must hold write lock 107 * read lock is enough. If pointers are altered function must hold write lock
105 * (these locking rules also apply for S_NOQUOTA flag in the inode - note that 108 * (these locking rules also apply for S_NOQUOTA flag in the inode - note that
106 * for altering the flag i_mutex is also needed). If operation is holding 109 * for altering the flag i_mutex is also needed).
107 * reference to dquot in other way (e.g. quotactl ops) it must be guarded by
108 * dqonoff_mutex.
109 * This locking assures that:
110 * a) update/access to dquot pointers in inode is serialized
111 * b) everyone is guarded against invalidate_dquots()
112 * 110 *
113 * Each dquot has its dq_lock mutex. Locked dquots might not be referenced 111 * Each dquot has its dq_lock mutex. Locked dquots might not be referenced
114 * from inodes (dquot_alloc_space() and such don't check the dq_lock). 112 * from inodes (dquot_alloc_space() and such don't check the dq_lock).
@@ -122,10 +120,17 @@
122 * Lock ordering (including related VFS locks) is the following: 120 * Lock ordering (including related VFS locks) is the following:
123 * i_mutex > dqonoff_sem > journal_lock > dqptr_sem > dquot->dq_lock > 121 * i_mutex > dqonoff_sem > journal_lock > dqptr_sem > dquot->dq_lock >
124 * dqio_mutex 122 * dqio_mutex
123 * The lock ordering of dqptr_sem imposed by quota code is only dqonoff_sem >
124 * dqptr_sem. But filesystem has to count with the fact that functions such as
125 * dquot_alloc_space() acquire dqptr_sem and they usually have to be called
126 * from inside a transaction to keep filesystem consistency after a crash. Also
127 * filesystems usually want to do some IO on dquot from ->mark_dirty which is
128 * called with dqptr_sem held.
125 * i_mutex on quota files is special (it's below dqio_mutex) 129 * i_mutex on quota files is special (it's below dqio_mutex)
126 */ 130 */
127 131
128static DEFINE_SPINLOCK(dq_list_lock); 132static DEFINE_SPINLOCK(dq_list_lock);
133static DEFINE_SPINLOCK(dq_state_lock);
129DEFINE_SPINLOCK(dq_data_lock); 134DEFINE_SPINLOCK(dq_data_lock);
130 135
131static char *quotatypes[] = INITQFNAMES; 136static char *quotatypes[] = INITQFNAMES;
@@ -428,7 +433,7 @@ static inline void do_destroy_dquot(struct dquot *dquot)
428 * quota is disabled and pointers from inodes removed so there cannot be new 433 * quota is disabled and pointers from inodes removed so there cannot be new
429 * quota users. There can still be some users of quotas due to inodes being 434 * quota users. There can still be some users of quotas due to inodes being
430 * just deleted or pruned by prune_icache() (those are not attached to any 435 * just deleted or pruned by prune_icache() (those are not attached to any
431 * list). We have to wait for such users. 436 * list) or parallel quotactl call. We have to wait for such users.
432 */ 437 */
433static void invalidate_dquots(struct super_block *sb, int type) 438static void invalidate_dquots(struct super_block *sb, int type)
434{ 439{
@@ -600,7 +605,6 @@ static struct shrinker dqcache_shrinker = {
600/* 605/*
601 * Put reference to dquot 606 * Put reference to dquot
602 * NOTE: If you change this function please check whether dqput_blocks() works right... 607 * NOTE: If you change this function please check whether dqput_blocks() works right...
603 * MUST be called with either dqptr_sem or dqonoff_mutex held
604 */ 608 */
605void dqput(struct dquot *dquot) 609void dqput(struct dquot *dquot)
606{ 610{
@@ -697,36 +701,30 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type)
697} 701}
698 702
699/* 703/*
700 * Check whether dquot is in memory.
701 * MUST be called with either dqptr_sem or dqonoff_mutex held
702 */
703int dquot_is_cached(struct super_block *sb, unsigned int id, int type)
704{
705 unsigned int hashent = hashfn(sb, id, type);
706 int ret = 0;
707
708 if (!sb_has_quota_active(sb, type))
709 return 0;
710 spin_lock(&dq_list_lock);
711 if (find_dquot(hashent, sb, id, type) != NODQUOT)
712 ret = 1;
713 spin_unlock(&dq_list_lock);
714 return ret;
715}
716
717/*
718 * Get reference to dquot 704 * Get reference to dquot
719 * MUST be called with either dqptr_sem or dqonoff_mutex held 705 *
706 * Locking is slightly tricky here. We are guarded from parallel quotaoff()
707 * destroying our dquot by:
708 * a) checking for quota flags under dq_list_lock and
709 * b) getting a reference to dquot before we release dq_list_lock
720 */ 710 */
721struct dquot *dqget(struct super_block *sb, unsigned int id, int type) 711struct dquot *dqget(struct super_block *sb, unsigned int id, int type)
722{ 712{
723 unsigned int hashent = hashfn(sb, id, type); 713 unsigned int hashent = hashfn(sb, id, type);
724 struct dquot *dquot, *empty = NODQUOT; 714 struct dquot *dquot = NODQUOT, *empty = NODQUOT;
725 715
726 if (!sb_has_quota_active(sb, type)) 716 if (!sb_has_quota_active(sb, type))
727 return NODQUOT; 717 return NODQUOT;
728we_slept: 718we_slept:
729 spin_lock(&dq_list_lock); 719 spin_lock(&dq_list_lock);
720 spin_lock(&dq_state_lock);
721 if (!sb_has_quota_active(sb, type)) {
722 spin_unlock(&dq_state_lock);
723 spin_unlock(&dq_list_lock);
724 goto out;
725 }
726 spin_unlock(&dq_state_lock);
727
730 if ((dquot = find_dquot(hashent, sb, id, type)) == NODQUOT) { 728 if ((dquot = find_dquot(hashent, sb, id, type)) == NODQUOT) {
731 if (empty == NODQUOT) { 729 if (empty == NODQUOT) {
732 spin_unlock(&dq_list_lock); 730 spin_unlock(&dq_list_lock);
@@ -735,6 +733,7 @@ we_slept:
735 goto we_slept; 733 goto we_slept;
736 } 734 }
737 dquot = empty; 735 dquot = empty;
736 empty = NODQUOT;
738 dquot->dq_id = id; 737 dquot->dq_id = id;
739 /* all dquots go on the inuse_list */ 738 /* all dquots go on the inuse_list */
740 put_inuse(dquot); 739 put_inuse(dquot);
@@ -749,8 +748,6 @@ we_slept:
749 dqstats.cache_hits++; 748 dqstats.cache_hits++;
750 dqstats.lookups++; 749 dqstats.lookups++;
751 spin_unlock(&dq_list_lock); 750 spin_unlock(&dq_list_lock);
752 if (empty)
753 do_destroy_dquot(empty);
754 } 751 }
755 /* Wait for dq_lock - after this we know that either dquot_release() is already 752 /* Wait for dq_lock - after this we know that either dquot_release() is already
756 * finished or it will be canceled due to dq_count > 1 test */ 753 * finished or it will be canceled due to dq_count > 1 test */
@@ -758,11 +755,15 @@ we_slept:
758 /* Read the dquot and instantiate it (everything done only if needed) */ 755 /* Read the dquot and instantiate it (everything done only if needed) */
759 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && sb->dq_op->acquire_dquot(dquot) < 0) { 756 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && sb->dq_op->acquire_dquot(dquot) < 0) {
760 dqput(dquot); 757 dqput(dquot);
761 return NODQUOT; 758 dquot = NODQUOT;
759 goto out;
762 } 760 }
763#ifdef __DQUOT_PARANOIA 761#ifdef __DQUOT_PARANOIA
764 BUG_ON(!dquot->dq_sb); /* Has somebody invalidated entry under us? */ 762 BUG_ON(!dquot->dq_sb); /* Has somebody invalidated entry under us? */
765#endif 763#endif
764out:
765 if (empty)
766 do_destroy_dquot(empty);
766 767
767 return dquot; 768 return dquot;
768} 769}
@@ -1198,63 +1199,76 @@ static int info_bdq_free(struct dquot *dquot, qsize_t space)
1198} 1199}
1199/* 1200/*
1200 * Initialize quota pointers in inode 1201 * Initialize quota pointers in inode
1201 * Transaction must be started at entry 1202 * We do things in a bit complicated way but by that we avoid calling
1203 * dqget() and thus filesystem callbacks under dqptr_sem.
1202 */ 1204 */
1203int dquot_initialize(struct inode *inode, int type) 1205int dquot_initialize(struct inode *inode, int type)
1204{ 1206{
1205 unsigned int id = 0; 1207 unsigned int id = 0;
1206 int cnt, ret = 0; 1208 int cnt, ret = 0;
1209 struct dquot *got[MAXQUOTAS] = { NODQUOT, NODQUOT };
1210 struct super_block *sb = inode->i_sb;
1207 1211
1208 /* First test before acquiring mutex - solves deadlocks when we 1212 /* First test before acquiring mutex - solves deadlocks when we
1209 * re-enter the quota code and are already holding the mutex */ 1213 * re-enter the quota code and are already holding the mutex */
1210 if (IS_NOQUOTA(inode)) 1214 if (IS_NOQUOTA(inode))
1211 return 0; 1215 return 0;
1212 down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); 1216
1217 /* First get references to structures we might need. */
1218 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1219 if (type != -1 && cnt != type)
1220 continue;
1221 switch (cnt) {
1222 case USRQUOTA:
1223 id = inode->i_uid;
1224 break;
1225 case GRPQUOTA:
1226 id = inode->i_gid;
1227 break;
1228 }
1229 got[cnt] = dqget(sb, id, cnt);
1230 }
1231
1232 down_write(&sb_dqopt(sb)->dqptr_sem);
1213 /* Having dqptr_sem we know NOQUOTA flags can't be altered... */ 1233 /* Having dqptr_sem we know NOQUOTA flags can't be altered... */
1214 if (IS_NOQUOTA(inode)) 1234 if (IS_NOQUOTA(inode))
1215 goto out_err; 1235 goto out_err;
1216 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1236 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1217 if (type != -1 && cnt != type) 1237 if (type != -1 && cnt != type)
1218 continue; 1238 continue;
1239 /* Avoid races with quotaoff() */
1240 if (!sb_has_quota_active(sb, cnt))
1241 continue;
1219 if (inode->i_dquot[cnt] == NODQUOT) { 1242 if (inode->i_dquot[cnt] == NODQUOT) {
1220 switch (cnt) { 1243 inode->i_dquot[cnt] = got[cnt];
1221 case USRQUOTA: 1244 got[cnt] = NODQUOT;
1222 id = inode->i_uid;
1223 break;
1224 case GRPQUOTA:
1225 id = inode->i_gid;
1226 break;
1227 }
1228 inode->i_dquot[cnt] = dqget(inode->i_sb, id, cnt);
1229 } 1245 }
1230 } 1246 }
1231out_err: 1247out_err:
1232 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); 1248 up_write(&sb_dqopt(sb)->dqptr_sem);
1249 /* Drop unused references */
1250 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1251 dqput(got[cnt]);
1233 return ret; 1252 return ret;
1234} 1253}
1235 1254
1236/* 1255/*
1237 * Release all quotas referenced by inode 1256 * Release all quotas referenced by inode
1238 * Transaction must be started at an entry
1239 */ 1257 */
1240int dquot_drop_locked(struct inode *inode) 1258int dquot_drop(struct inode *inode)
1241{ 1259{
1242 int cnt; 1260 int cnt;
1261 struct dquot *put[MAXQUOTAS];
1243 1262
1263 down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1244 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1264 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1245 if (inode->i_dquot[cnt] != NODQUOT) { 1265 put[cnt] = inode->i_dquot[cnt];
1246 dqput(inode->i_dquot[cnt]); 1266 inode->i_dquot[cnt] = NODQUOT;
1247 inode->i_dquot[cnt] = NODQUOT;
1248 }
1249 } 1267 }
1250 return 0;
1251}
1252
1253int dquot_drop(struct inode *inode)
1254{
1255 down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1256 dquot_drop_locked(inode);
1257 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); 1268 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1269
1270 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1271 dqput(put[cnt]);
1258 return 0; 1272 return 0;
1259} 1273}
1260 1274
@@ -1470,8 +1484,9 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
1470 qsize_t space; 1484 qsize_t space;
1471 struct dquot *transfer_from[MAXQUOTAS]; 1485 struct dquot *transfer_from[MAXQUOTAS];
1472 struct dquot *transfer_to[MAXQUOTAS]; 1486 struct dquot *transfer_to[MAXQUOTAS];
1473 int cnt, ret = NO_QUOTA, chuid = (iattr->ia_valid & ATTR_UID) && inode->i_uid != iattr->ia_uid, 1487 int cnt, ret = QUOTA_OK;
1474 chgid = (iattr->ia_valid & ATTR_GID) && inode->i_gid != iattr->ia_gid; 1488 int chuid = iattr->ia_valid & ATTR_UID && inode->i_uid != iattr->ia_uid,
1489 chgid = iattr->ia_valid & ATTR_GID && inode->i_gid != iattr->ia_gid;
1475 char warntype_to[MAXQUOTAS]; 1490 char warntype_to[MAXQUOTAS];
1476 char warntype_from_inodes[MAXQUOTAS], warntype_from_space[MAXQUOTAS]; 1491 char warntype_from_inodes[MAXQUOTAS], warntype_from_space[MAXQUOTAS];
1477 1492
@@ -1479,21 +1494,11 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
1479 * re-enter the quota code and are already holding the mutex */ 1494 * re-enter the quota code and are already holding the mutex */
1480 if (IS_NOQUOTA(inode)) 1495 if (IS_NOQUOTA(inode))
1481 return QUOTA_OK; 1496 return QUOTA_OK;
1482 /* Clear the arrays */ 1497 /* Initialize the arrays */
1483 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1498 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1484 transfer_to[cnt] = transfer_from[cnt] = NODQUOT; 1499 transfer_from[cnt] = NODQUOT;
1500 transfer_to[cnt] = NODQUOT;
1485 warntype_to[cnt] = QUOTA_NL_NOWARN; 1501 warntype_to[cnt] = QUOTA_NL_NOWARN;
1486 }
1487 down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1488 /* Now recheck reliably when holding dqptr_sem */
1489 if (IS_NOQUOTA(inode)) { /* File without quota accounting? */
1490 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1491 return QUOTA_OK;
1492 }
1493 /* First build the transfer_to list - here we can block on
1494 * reading/instantiating of dquots. We know that the transaction for
1495 * us was already started so we don't violate lock ranking here */
1496 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1497 switch (cnt) { 1502 switch (cnt) {
1498 case USRQUOTA: 1503 case USRQUOTA:
1499 if (!chuid) 1504 if (!chuid)
@@ -1507,6 +1512,13 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
1507 break; 1512 break;
1508 } 1513 }
1509 } 1514 }
1515
1516 down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1517 /* Now recheck reliably when holding dqptr_sem */
1518 if (IS_NOQUOTA(inode)) { /* File without quota accounting? */
1519 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1520 goto put_all;
1521 }
1510 spin_lock(&dq_data_lock); 1522 spin_lock(&dq_data_lock);
1511 space = inode_get_bytes(inode); 1523 space = inode_get_bytes(inode);
1512 /* Build the transfer_from list and check the limits */ 1524 /* Build the transfer_from list and check the limits */
@@ -1517,7 +1529,7 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
1517 if (check_idq(transfer_to[cnt], 1, warntype_to + cnt) == 1529 if (check_idq(transfer_to[cnt], 1, warntype_to + cnt) ==
1518 NO_QUOTA || check_bdq(transfer_to[cnt], space, 0, 1530 NO_QUOTA || check_bdq(transfer_to[cnt], space, 0,
1519 warntype_to + cnt) == NO_QUOTA) 1531 warntype_to + cnt) == NO_QUOTA)
1520 goto warn_put_all; 1532 goto over_quota;
1521 } 1533 }
1522 1534
1523 /* 1535 /*
@@ -1545,28 +1557,37 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
1545 1557
1546 inode->i_dquot[cnt] = transfer_to[cnt]; 1558 inode->i_dquot[cnt] = transfer_to[cnt];
1547 } 1559 }
1548 ret = QUOTA_OK;
1549warn_put_all:
1550 spin_unlock(&dq_data_lock); 1560 spin_unlock(&dq_data_lock);
1561 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1562
1551 /* Dirtify all the dquots - this can block when journalling */ 1563 /* Dirtify all the dquots - this can block when journalling */
1552 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1564 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1553 if (transfer_from[cnt]) 1565 if (transfer_from[cnt])
1554 mark_dquot_dirty(transfer_from[cnt]); 1566 mark_dquot_dirty(transfer_from[cnt]);
1555 if (transfer_to[cnt]) 1567 if (transfer_to[cnt]) {
1556 mark_dquot_dirty(transfer_to[cnt]); 1568 mark_dquot_dirty(transfer_to[cnt]);
1569 /* The reference we got is transferred to the inode */
1570 transfer_to[cnt] = NODQUOT;
1571 }
1557 } 1572 }
1573warn_put_all:
1558 flush_warnings(transfer_to, warntype_to); 1574 flush_warnings(transfer_to, warntype_to);
1559 flush_warnings(transfer_from, warntype_from_inodes); 1575 flush_warnings(transfer_from, warntype_from_inodes);
1560 flush_warnings(transfer_from, warntype_from_space); 1576 flush_warnings(transfer_from, warntype_from_space);
1561 1577put_all:
1562 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1578 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1563 if (ret == QUOTA_OK && transfer_from[cnt] != NODQUOT) 1579 dqput(transfer_from[cnt]);
1564 dqput(transfer_from[cnt]); 1580 dqput(transfer_to[cnt]);
1565 if (ret == NO_QUOTA && transfer_to[cnt] != NODQUOT)
1566 dqput(transfer_to[cnt]);
1567 } 1581 }
1568 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1569 return ret; 1582 return ret;
1583over_quota:
1584 spin_unlock(&dq_data_lock);
1585 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1586 /* Clear dquot pointers we don't want to dqput() */
1587 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1588 transfer_from[cnt] = NODQUOT;
1589 ret = NO_QUOTA;
1590 goto warn_put_all;
1570} 1591}
1571 1592
1572/* Wrapper for transferring ownership of an inode */ 1593/* Wrapper for transferring ownership of an inode */
@@ -1651,19 +1672,24 @@ int vfs_quota_disable(struct super_block *sb, int type, unsigned int flags)
1651 continue; 1672 continue;
1652 1673
1653 if (flags & DQUOT_SUSPENDED) { 1674 if (flags & DQUOT_SUSPENDED) {
1675 spin_lock(&dq_state_lock);
1654 dqopt->flags |= 1676 dqopt->flags |=
1655 dquot_state_flag(DQUOT_SUSPENDED, cnt); 1677 dquot_state_flag(DQUOT_SUSPENDED, cnt);
1678 spin_unlock(&dq_state_lock);
1656 } else { 1679 } else {
1680 spin_lock(&dq_state_lock);
1657 dqopt->flags &= ~dquot_state_flag(flags, cnt); 1681 dqopt->flags &= ~dquot_state_flag(flags, cnt);
1658 /* Turning off suspended quotas? */ 1682 /* Turning off suspended quotas? */
1659 if (!sb_has_quota_loaded(sb, cnt) && 1683 if (!sb_has_quota_loaded(sb, cnt) &&
1660 sb_has_quota_suspended(sb, cnt)) { 1684 sb_has_quota_suspended(sb, cnt)) {
1661 dqopt->flags &= ~dquot_state_flag( 1685 dqopt->flags &= ~dquot_state_flag(
1662 DQUOT_SUSPENDED, cnt); 1686 DQUOT_SUSPENDED, cnt);
1687 spin_unlock(&dq_state_lock);
1663 iput(dqopt->files[cnt]); 1688 iput(dqopt->files[cnt]);
1664 dqopt->files[cnt] = NULL; 1689 dqopt->files[cnt] = NULL;
1665 continue; 1690 continue;
1666 } 1691 }
1692 spin_unlock(&dq_state_lock);
1667 } 1693 }
1668 1694
1669 /* We still have to keep quota loaded? */ 1695 /* We still have to keep quota loaded? */
@@ -1830,7 +1856,9 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
1830 } 1856 }
1831 mutex_unlock(&dqopt->dqio_mutex); 1857 mutex_unlock(&dqopt->dqio_mutex);
1832 mutex_unlock(&inode->i_mutex); 1858 mutex_unlock(&inode->i_mutex);
1859 spin_lock(&dq_state_lock);
1833 dqopt->flags |= dquot_state_flag(flags, type); 1860 dqopt->flags |= dquot_state_flag(flags, type);
1861 spin_unlock(&dq_state_lock);
1834 1862
1835 add_dquot_ref(sb, type); 1863 add_dquot_ref(sb, type);
1836 mutex_unlock(&dqopt->dqonoff_mutex); 1864 mutex_unlock(&dqopt->dqonoff_mutex);
@@ -1872,9 +1900,11 @@ static int vfs_quota_on_remount(struct super_block *sb, int type)
1872 } 1900 }
1873 inode = dqopt->files[type]; 1901 inode = dqopt->files[type];
1874 dqopt->files[type] = NULL; 1902 dqopt->files[type] = NULL;
1903 spin_lock(&dq_state_lock);
1875 flags = dqopt->flags & dquot_state_flag(DQUOT_USAGE_ENABLED | 1904 flags = dqopt->flags & dquot_state_flag(DQUOT_USAGE_ENABLED |
1876 DQUOT_LIMITS_ENABLED, type); 1905 DQUOT_LIMITS_ENABLED, type);
1877 dqopt->flags &= ~dquot_state_flag(DQUOT_STATE_FLAGS, type); 1906 dqopt->flags &= ~dquot_state_flag(DQUOT_STATE_FLAGS, type);
1907 spin_unlock(&dq_state_lock);
1878 mutex_unlock(&dqopt->dqonoff_mutex); 1908 mutex_unlock(&dqopt->dqonoff_mutex);
1879 1909
1880 flags = dquot_generic_flag(flags, type); 1910 flags = dquot_generic_flag(flags, type);
@@ -1952,7 +1982,9 @@ int vfs_quota_enable(struct inode *inode, int type, int format_id,
1952 ret = -EBUSY; 1982 ret = -EBUSY;
1953 goto out_lock; 1983 goto out_lock;
1954 } 1984 }
1985 spin_lock(&dq_state_lock);
1955 sb_dqopt(sb)->flags |= dquot_state_flag(flags, type); 1986 sb_dqopt(sb)->flags |= dquot_state_flag(flags, type);
1987 spin_unlock(&dq_state_lock);
1956out_lock: 1988out_lock:
1957 mutex_unlock(&dqopt->dqonoff_mutex); 1989 mutex_unlock(&dqopt->dqonoff_mutex);
1958 return ret; 1990 return ret;
@@ -2039,14 +2071,12 @@ int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *d
2039{ 2071{
2040 struct dquot *dquot; 2072 struct dquot *dquot;
2041 2073
2042 mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); 2074 dquot = dqget(sb, id, type);
2043 if (!(dquot = dqget(sb, id, type))) { 2075 if (dquot == NODQUOT)
2044 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
2045 return -ESRCH; 2076 return -ESRCH;
2046 }
2047 do_get_dqblk(dquot, di); 2077 do_get_dqblk(dquot, di);
2048 dqput(dquot); 2078 dqput(dquot);
2049 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); 2079
2050 return 0; 2080 return 0;
2051} 2081}
2052 2082
@@ -2130,7 +2160,6 @@ int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *d
2130 struct dquot *dquot; 2160 struct dquot *dquot;
2131 int rc; 2161 int rc;
2132 2162
2133 mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
2134 dquot = dqget(sb, id, type); 2163 dquot = dqget(sb, id, type);
2135 if (!dquot) { 2164 if (!dquot) {
2136 rc = -ESRCH; 2165 rc = -ESRCH;
@@ -2139,7 +2168,6 @@ int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *d
2139 rc = do_set_dqblk(dquot, di); 2168 rc = do_set_dqblk(dquot, di);
2140 dqput(dquot); 2169 dqput(dquot);
2141out: 2170out:
2142 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
2143 return rc; 2171 return rc;
2144} 2172}
2145 2173
@@ -2370,11 +2398,9 @@ EXPORT_SYMBOL(dquot_release);
2370EXPORT_SYMBOL(dquot_mark_dquot_dirty); 2398EXPORT_SYMBOL(dquot_mark_dquot_dirty);
2371EXPORT_SYMBOL(dquot_initialize); 2399EXPORT_SYMBOL(dquot_initialize);
2372EXPORT_SYMBOL(dquot_drop); 2400EXPORT_SYMBOL(dquot_drop);
2373EXPORT_SYMBOL(dquot_drop_locked);
2374EXPORT_SYMBOL(vfs_dq_drop); 2401EXPORT_SYMBOL(vfs_dq_drop);
2375EXPORT_SYMBOL(dqget); 2402EXPORT_SYMBOL(dqget);
2376EXPORT_SYMBOL(dqput); 2403EXPORT_SYMBOL(dqput);
2377EXPORT_SYMBOL(dquot_is_cached);
2378EXPORT_SYMBOL(dquot_alloc_space); 2404EXPORT_SYMBOL(dquot_alloc_space);
2379EXPORT_SYMBOL(dquot_alloc_inode); 2405EXPORT_SYMBOL(dquot_alloc_inode);
2380EXPORT_SYMBOL(dquot_free_space); 2406EXPORT_SYMBOL(dquot_free_space);
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
index 6aff8f2d3e49..f4efa89baee5 100644
--- a/fs/ocfs2/quota_global.c
+++ b/fs/ocfs2/quota_global.c
@@ -810,171 +810,6 @@ out:
810 return status; 810 return status;
811} 811}
812 812
813/* This is difficult. We have to lock quota inode and start transaction
814 * in this function but we don't want to take the penalty of exlusive
815 * quota file lock when we are just going to use cached structures. So
816 * we just take read lock check whether we have dquot cached and if so,
817 * we don't have to take the write lock... */
818static int ocfs2_dquot_initialize(struct inode *inode, int type)
819{
820 handle_t *handle = NULL;
821 int status = 0;
822 struct super_block *sb = inode->i_sb;
823 struct ocfs2_mem_dqinfo *oinfo;
824 int exclusive = 0;
825 int cnt;
826 qid_t id;
827
828 mlog_entry_void();
829
830 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
831 if (type != -1 && cnt != type)
832 continue;
833 if (!sb_has_quota_active(sb, cnt))
834 continue;
835 oinfo = sb_dqinfo(sb, cnt)->dqi_priv;
836 status = ocfs2_lock_global_qf(oinfo, 0);
837 if (status < 0)
838 goto out;
839 /* This is just a performance optimization not a reliable test.
840 * Since we hold an inode lock, noone can actually release
841 * the structure until we are finished with initialization. */
842 if (inode->i_dquot[cnt] != NODQUOT) {
843 ocfs2_unlock_global_qf(oinfo, 0);
844 continue;
845 }
846 /* When we have inode lock, we know that no dquot_release() can
847 * run and thus we can safely check whether we need to
848 * read+modify global file to get quota information or whether
849 * our node already has it. */
850 if (cnt == USRQUOTA)
851 id = inode->i_uid;
852 else if (cnt == GRPQUOTA)
853 id = inode->i_gid;
854 else
855 BUG();
856 /* Obtain exclusion from quota off... */
857 down_write(&sb_dqopt(sb)->dqptr_sem);
858 exclusive = !dquot_is_cached(sb, id, cnt);
859 up_write(&sb_dqopt(sb)->dqptr_sem);
860 if (exclusive) {
861 status = ocfs2_lock_global_qf(oinfo, 1);
862 if (status < 0) {
863 exclusive = 0;
864 mlog_errno(status);
865 goto out_ilock;
866 }
867 handle = ocfs2_start_trans(OCFS2_SB(sb),
868 ocfs2_calc_qinit_credits(sb, cnt));
869 if (IS_ERR(handle)) {
870 status = PTR_ERR(handle);
871 mlog_errno(status);
872 goto out_ilock;
873 }
874 }
875 dquot_initialize(inode, cnt);
876 if (exclusive) {
877 ocfs2_commit_trans(OCFS2_SB(sb), handle);
878 ocfs2_unlock_global_qf(oinfo, 1);
879 }
880 ocfs2_unlock_global_qf(oinfo, 0);
881 }
882 mlog_exit(0);
883 return 0;
884out_ilock:
885 if (exclusive)
886 ocfs2_unlock_global_qf(oinfo, 1);
887 ocfs2_unlock_global_qf(oinfo, 0);
888out:
889 mlog_exit(status);
890 return status;
891}
892
893static int ocfs2_dquot_drop_slow(struct inode *inode)
894{
895 int status = 0;
896 int cnt;
897 int got_lock[MAXQUOTAS] = {0, 0};
898 handle_t *handle;
899 struct super_block *sb = inode->i_sb;
900 struct ocfs2_mem_dqinfo *oinfo;
901
902 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
903 if (!sb_has_quota_active(sb, cnt))
904 continue;
905 oinfo = sb_dqinfo(sb, cnt)->dqi_priv;
906 status = ocfs2_lock_global_qf(oinfo, 1);
907 if (status < 0)
908 goto out;
909 got_lock[cnt] = 1;
910 }
911 handle = ocfs2_start_trans(OCFS2_SB(sb),
912 ocfs2_calc_qinit_credits(sb, USRQUOTA) +
913 ocfs2_calc_qinit_credits(sb, GRPQUOTA));
914 if (IS_ERR(handle)) {
915 status = PTR_ERR(handle);
916 mlog_errno(status);
917 goto out;
918 }
919 dquot_drop(inode);
920 ocfs2_commit_trans(OCFS2_SB(sb), handle);
921out:
922 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
923 if (got_lock[cnt]) {
924 oinfo = sb_dqinfo(sb, cnt)->dqi_priv;
925 ocfs2_unlock_global_qf(oinfo, 1);
926 }
927 return status;
928}
929
930/* See the comment before ocfs2_dquot_initialize. */
931static int ocfs2_dquot_drop(struct inode *inode)
932{
933 int status = 0;
934 struct super_block *sb = inode->i_sb;
935 struct ocfs2_mem_dqinfo *oinfo;
936 int exclusive = 0;
937 int cnt;
938 int got_lock[MAXQUOTAS] = {0, 0};
939
940 mlog_entry_void();
941 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
942 if (!sb_has_quota_active(sb, cnt))
943 continue;
944 oinfo = sb_dqinfo(sb, cnt)->dqi_priv;
945 status = ocfs2_lock_global_qf(oinfo, 0);
946 if (status < 0)
947 goto out;
948 got_lock[cnt] = 1;
949 }
950 /* Lock against anyone releasing references so that when when we check
951 * we know we are not going to be last ones to release dquot */
952 down_write(&sb_dqopt(sb)->dqptr_sem);
953 /* Urgh, this is a terrible hack :( */
954 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
955 if (inode->i_dquot[cnt] != NODQUOT &&
956 atomic_read(&inode->i_dquot[cnt]->dq_count) > 1) {
957 exclusive = 1;
958 break;
959 }
960 }
961 if (!exclusive)
962 dquot_drop_locked(inode);
963 up_write(&sb_dqopt(sb)->dqptr_sem);
964out:
965 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
966 if (got_lock[cnt]) {
967 oinfo = sb_dqinfo(sb, cnt)->dqi_priv;
968 ocfs2_unlock_global_qf(oinfo, 0);
969 }
970 /* In case we bailed out because we had to do expensive locking
971 * do it now... */
972 if (exclusive)
973 status = ocfs2_dquot_drop_slow(inode);
974 mlog_exit(status);
975 return status;
976}
977
978static struct dquot *ocfs2_alloc_dquot(struct super_block *sb, int type) 813static struct dquot *ocfs2_alloc_dquot(struct super_block *sb, int type)
979{ 814{
980 struct ocfs2_dquot *dquot = 815 struct ocfs2_dquot *dquot =
@@ -991,8 +826,8 @@ static void ocfs2_destroy_dquot(struct dquot *dquot)
991} 826}
992 827
993struct dquot_operations ocfs2_quota_operations = { 828struct dquot_operations ocfs2_quota_operations = {
994 .initialize = ocfs2_dquot_initialize, 829 .initialize = dquot_initialize,
995 .drop = ocfs2_dquot_drop, 830 .drop = dquot_drop,
996 .alloc_space = dquot_alloc_space, 831 .alloc_space = dquot_alloc_space,
997 .alloc_inode = dquot_alloc_inode, 832 .alloc_inode = dquot_alloc_inode,
998 .free_space = dquot_free_space, 833 .free_space = dquot_free_space,
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 21b781a3350f..0b35b3a1be05 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -24,10 +24,8 @@ void sync_dquots(struct super_block *sb, int type);
24 24
25int dquot_initialize(struct inode *inode, int type); 25int dquot_initialize(struct inode *inode, int type);
26int dquot_drop(struct inode *inode); 26int dquot_drop(struct inode *inode);
27int dquot_drop_locked(struct inode *inode);
28struct dquot *dqget(struct super_block *sb, unsigned int id, int type); 27struct dquot *dqget(struct super_block *sb, unsigned int id, int type);
29void dqput(struct dquot *dquot); 28void dqput(struct dquot *dquot);
30int dquot_is_cached(struct super_block *sb, unsigned int id, int type);
31int dquot_scan_active(struct super_block *sb, 29int dquot_scan_active(struct super_block *sb,
32 int (*fn)(struct dquot *dquot, unsigned long priv), 30 int (*fn)(struct dquot *dquot, unsigned long priv),
33 unsigned long priv); 31 unsigned long priv);