aboutsummaryrefslogtreecommitdiffstats
path: root/fs/quota
diff options
context:
space:
mode:
authorNiu Yawei <yawei.niu@gmail.com>2014-06-04 00:23:19 -0400
committerJan Kara <jack@suse.cz>2014-07-15 16:40:22 -0400
commitb9ba6f94b2382ef832f97122976b73004f714714 (patch)
tree29bab149a28538c9a7f97aae52a78aec0c2ee3b5 /fs/quota
parent9eb6463f31cf720deaf0e810cacc403d7720b10c (diff)
quota: remove dqptr_sem
Remove dqptr_sem to make quota code scalable: Remove the dqptr_sem, accessing inode->i_dquot now protected by dquot_srcu, and changing inode->i_dquot is now serialized by dq_data_lock. Signed-off-by: Lai Siyao <lai.siyao@intel.com> Signed-off-by: Niu Yawei <yawei.niu@intel.com> Signed-off-by: Jan Kara <jack@suse.cz>
Diffstat (limited to 'fs/quota')
-rw-r--r--fs/quota/dquot.c114
1 files changed, 49 insertions, 65 deletions
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index fb2d2e2a89e7..f2d0eee9d1f1 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -96,13 +96,16 @@
96 * Note that some things (eg. sb pointer, type, id) doesn't change during 96 * Note that some things (eg. sb pointer, type, id) doesn't change during
97 * the life of the dquot structure and so needn't to be protected by a lock 97 * the life of the dquot structure and so needn't to be protected by a lock
98 * 98 *
99 * Any operation working on dquots via inode pointers must hold dqptr_sem. If 99 * Operation accessing dquots via inode pointers are protected by dquot_srcu.
100 * operation is just reading pointers from inode (or not using them at all) the 100 * Operation of reading pointer needs srcu_read_lock(&dquot_srcu), and
101 * read lock is enough. If pointers are altered function must hold write lock. 101 * synchronize_srcu(&dquot_srcu) is called after clearing pointers from
102 * inode and before dropping dquot references to avoid use of dquots after
103 * they are freed. dq_data_lock is used to serialize the pointer setting and
104 * clearing operations.
102 * Special care needs to be taken about S_NOQUOTA inode flag (marking that 105 * Special care needs to be taken about S_NOQUOTA inode flag (marking that
103 * inode is a quota file). Functions adding pointers from inode to dquots have 106 * inode is a quota file). Functions adding pointers from inode to dquots have
104 * to check this flag under dqptr_sem and then (if S_NOQUOTA is not set) they 107 * to check this flag under dq_data_lock and then (if S_NOQUOTA is not set) they
105 * have to do all pointer modifications before dropping dqptr_sem. This makes 108 * have to do all pointer modifications before dropping dq_data_lock. This makes
106 * sure they cannot race with quotaon which first sets S_NOQUOTA flag and 109 * sure they cannot race with quotaon which first sets S_NOQUOTA flag and
107 * then drops all pointers to dquots from an inode. 110 * then drops all pointers to dquots from an inode.
108 * 111 *
@@ -116,21 +119,15 @@
116 * spinlock to internal buffers before writing. 119 * spinlock to internal buffers before writing.
117 * 120 *
118 * Lock ordering (including related VFS locks) is the following: 121 * Lock ordering (including related VFS locks) is the following:
119 * dqonoff_mutex > i_mutex > journal_lock > dqptr_sem > dquot->dq_lock > 122 * dqonoff_mutex > i_mutex > journal_lock > dquot->dq_lock > dqio_mutex
120 * dqio_mutex
121 * dqonoff_mutex > i_mutex comes from dquot_quota_sync, dquot_enable, etc. 123 * dqonoff_mutex > i_mutex comes from dquot_quota_sync, dquot_enable, etc.
122 * The lock ordering of dqptr_sem imposed by quota code is only dqonoff_sem >
123 * dqptr_sem. But filesystem has to count with the fact that functions such as
124 * dquot_alloc_space() acquire dqptr_sem and they usually have to be called
125 * from inside a transaction to keep filesystem consistency after a crash. Also
126 * filesystems usually want to do some IO on dquot from ->mark_dirty which is
127 * called with dqptr_sem held.
128 */ 124 */
129 125
130static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock); 126static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock);
131static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_state_lock); 127static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_state_lock);
132__cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock); 128__cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock);
133EXPORT_SYMBOL(dq_data_lock); 129EXPORT_SYMBOL(dq_data_lock);
130DEFINE_STATIC_SRCU(dquot_srcu);
134 131
135void __quota_error(struct super_block *sb, const char *func, 132void __quota_error(struct super_block *sb, const char *func,
136 const char *fmt, ...) 133 const char *fmt, ...)
@@ -964,7 +961,6 @@ static void add_dquot_ref(struct super_block *sb, int type)
964/* 961/*
965 * Remove references to dquots from inode and add dquot to list for freeing 962 * Remove references to dquots from inode and add dquot to list for freeing
966 * if we have the last reference to dquot 963 * if we have the last reference to dquot
967 * We can't race with anybody because we hold dqptr_sem for writing...
968 */ 964 */
969static void remove_inode_dquot_ref(struct inode *inode, int type, 965static void remove_inode_dquot_ref(struct inode *inode, int type,
970 struct list_head *tofree_head) 966 struct list_head *tofree_head)
@@ -1024,13 +1020,15 @@ static void remove_dquot_ref(struct super_block *sb, int type,
1024 * We have to scan also I_NEW inodes because they can already 1020 * We have to scan also I_NEW inodes because they can already
1025 * have quota pointer initialized. Luckily, we need to touch 1021 * have quota pointer initialized. Luckily, we need to touch
1026 * only quota pointers and these have separate locking 1022 * only quota pointers and these have separate locking
1027 * (dqptr_sem). 1023 * (dq_data_lock).
1028 */ 1024 */
1025 spin_lock(&dq_data_lock);
1029 if (!IS_NOQUOTA(inode)) { 1026 if (!IS_NOQUOTA(inode)) {
1030 if (unlikely(inode_get_rsv_space(inode) > 0)) 1027 if (unlikely(inode_get_rsv_space(inode) > 0))
1031 reserved = 1; 1028 reserved = 1;
1032 remove_inode_dquot_ref(inode, type, tofree_head); 1029 remove_inode_dquot_ref(inode, type, tofree_head);
1033 } 1030 }
1031 spin_unlock(&dq_data_lock);
1034 } 1032 }
1035 spin_unlock(&inode_sb_list_lock); 1033 spin_unlock(&inode_sb_list_lock);
1036#ifdef CONFIG_QUOTA_DEBUG 1034#ifdef CONFIG_QUOTA_DEBUG
@@ -1048,9 +1046,8 @@ static void drop_dquot_ref(struct super_block *sb, int type)
1048 LIST_HEAD(tofree_head); 1046 LIST_HEAD(tofree_head);
1049 1047
1050 if (sb->dq_op) { 1048 if (sb->dq_op) {
1051 down_write(&sb_dqopt(sb)->dqptr_sem);
1052 remove_dquot_ref(sb, type, &tofree_head); 1049 remove_dquot_ref(sb, type, &tofree_head);
1053 up_write(&sb_dqopt(sb)->dqptr_sem); 1050 synchronize_srcu(&dquot_srcu);
1054 put_dquot_list(&tofree_head); 1051 put_dquot_list(&tofree_head);
1055 } 1052 }
1056} 1053}
@@ -1381,9 +1378,6 @@ static int dquot_active(const struct inode *inode)
1381/* 1378/*
1382 * Initialize quota pointers in inode 1379 * Initialize quota pointers in inode
1383 * 1380 *
1384 * We do things in a bit complicated way but by that we avoid calling
1385 * dqget() and thus filesystem callbacks under dqptr_sem.
1386 *
1387 * It is better to call this function outside of any transaction as it 1381 * It is better to call this function outside of any transaction as it
1388 * might need a lot of space in journal for dquot structure allocation. 1382 * might need a lot of space in journal for dquot structure allocation.
1389 */ 1383 */
@@ -1394,8 +1388,6 @@ static void __dquot_initialize(struct inode *inode, int type)
1394 struct super_block *sb = inode->i_sb; 1388 struct super_block *sb = inode->i_sb;
1395 qsize_t rsv; 1389 qsize_t rsv;
1396 1390
1397 /* First test before acquiring mutex - solves deadlocks when we
1398 * re-enter the quota code and are already holding the mutex */
1399 if (!dquot_active(inode)) 1391 if (!dquot_active(inode))
1400 return; 1392 return;
1401 1393
@@ -1429,7 +1421,7 @@ static void __dquot_initialize(struct inode *inode, int type)
1429 if (!init_needed) 1421 if (!init_needed)
1430 return; 1422 return;
1431 1423
1432 down_write(&sb_dqopt(sb)->dqptr_sem); 1424 spin_lock(&dq_data_lock);
1433 if (IS_NOQUOTA(inode)) 1425 if (IS_NOQUOTA(inode))
1434 goto out_err; 1426 goto out_err;
1435 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1427 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
@@ -1449,15 +1441,12 @@ static void __dquot_initialize(struct inode *inode, int type)
1449 * did a write before quota was turned on 1441 * did a write before quota was turned on
1450 */ 1442 */
1451 rsv = inode_get_rsv_space(inode); 1443 rsv = inode_get_rsv_space(inode);
1452 if (unlikely(rsv)) { 1444 if (unlikely(rsv))
1453 spin_lock(&dq_data_lock);
1454 dquot_resv_space(inode->i_dquot[cnt], rsv); 1445 dquot_resv_space(inode->i_dquot[cnt], rsv);
1455 spin_unlock(&dq_data_lock);
1456 }
1457 } 1446 }
1458 } 1447 }
1459out_err: 1448out_err:
1460 up_write(&sb_dqopt(sb)->dqptr_sem); 1449 spin_unlock(&dq_data_lock);
1461 /* Drop unused references */ 1450 /* Drop unused references */
1462 dqput_all(got); 1451 dqput_all(got);
1463} 1452}
@@ -1469,19 +1458,24 @@ void dquot_initialize(struct inode *inode)
1469EXPORT_SYMBOL(dquot_initialize); 1458EXPORT_SYMBOL(dquot_initialize);
1470 1459
1471/* 1460/*
1472 * Release all quotas referenced by inode 1461 * Release all quotas referenced by inode.
1462 *
1463 * This function only be called on inode free or converting
1464 * a file to quota file, no other users for the i_dquot in
1465 * both cases, so we needn't call synchronize_srcu() after
1466 * clearing i_dquot.
1473 */ 1467 */
1474static void __dquot_drop(struct inode *inode) 1468static void __dquot_drop(struct inode *inode)
1475{ 1469{
1476 int cnt; 1470 int cnt;
1477 struct dquot *put[MAXQUOTAS]; 1471 struct dquot *put[MAXQUOTAS];
1478 1472
1479 down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); 1473 spin_lock(&dq_data_lock);
1480 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1474 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1481 put[cnt] = inode->i_dquot[cnt]; 1475 put[cnt] = inode->i_dquot[cnt];
1482 inode->i_dquot[cnt] = NULL; 1476 inode->i_dquot[cnt] = NULL;
1483 } 1477 }
1484 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); 1478 spin_unlock(&dq_data_lock);
1485 dqput_all(put); 1479 dqput_all(put);
1486} 1480}
1487 1481
@@ -1599,15 +1593,11 @@ static void inode_decr_space(struct inode *inode, qsize_t number, int reserve)
1599 */ 1593 */
1600int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags) 1594int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
1601{ 1595{
1602 int cnt, ret = 0; 1596 int cnt, ret = 0, index;
1603 struct dquot_warn warn[MAXQUOTAS]; 1597 struct dquot_warn warn[MAXQUOTAS];
1604 struct dquot **dquots = inode->i_dquot; 1598 struct dquot **dquots = inode->i_dquot;
1605 int reserve = flags & DQUOT_SPACE_RESERVE; 1599 int reserve = flags & DQUOT_SPACE_RESERVE;
1606 1600
1607 /*
1608 * First test before acquiring mutex - solves deadlocks when we
1609 * re-enter the quota code and are already holding the mutex
1610 */
1611 if (!dquot_active(inode)) { 1601 if (!dquot_active(inode)) {
1612 inode_incr_space(inode, number, reserve); 1602 inode_incr_space(inode, number, reserve);
1613 goto out; 1603 goto out;
@@ -1616,7 +1606,7 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
1616 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 1606 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1617 warn[cnt].w_type = QUOTA_NL_NOWARN; 1607 warn[cnt].w_type = QUOTA_NL_NOWARN;
1618 1608
1619 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1609 index = srcu_read_lock(&dquot_srcu);
1620 spin_lock(&dq_data_lock); 1610 spin_lock(&dq_data_lock);
1621 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1611 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1622 if (!dquots[cnt]) 1612 if (!dquots[cnt])
@@ -1643,7 +1633,7 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
1643 goto out_flush_warn; 1633 goto out_flush_warn;
1644 mark_all_dquot_dirty(dquots); 1634 mark_all_dquot_dirty(dquots);
1645out_flush_warn: 1635out_flush_warn:
1646 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1636 srcu_read_unlock(&dquot_srcu, index);
1647 flush_warnings(warn); 1637 flush_warnings(warn);
1648out: 1638out:
1649 return ret; 1639 return ret;
@@ -1655,17 +1645,16 @@ EXPORT_SYMBOL(__dquot_alloc_space);
1655 */ 1645 */
1656int dquot_alloc_inode(const struct inode *inode) 1646int dquot_alloc_inode(const struct inode *inode)
1657{ 1647{
1658 int cnt, ret = 0; 1648 int cnt, ret = 0, index;
1659 struct dquot_warn warn[MAXQUOTAS]; 1649 struct dquot_warn warn[MAXQUOTAS];
1660 struct dquot * const *dquots = inode->i_dquot; 1650 struct dquot * const *dquots = inode->i_dquot;
1661 1651
1662 /* First test before acquiring mutex - solves deadlocks when we
1663 * re-enter the quota code and are already holding the mutex */
1664 if (!dquot_active(inode)) 1652 if (!dquot_active(inode))
1665 return 0; 1653 return 0;
1666 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 1654 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1667 warn[cnt].w_type = QUOTA_NL_NOWARN; 1655 warn[cnt].w_type = QUOTA_NL_NOWARN;
1668 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1656
1657 index = srcu_read_lock(&dquot_srcu);
1669 spin_lock(&dq_data_lock); 1658 spin_lock(&dq_data_lock);
1670 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1659 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1671 if (!dquots[cnt]) 1660 if (!dquots[cnt])
@@ -1685,7 +1674,7 @@ warn_put_all:
1685 spin_unlock(&dq_data_lock); 1674 spin_unlock(&dq_data_lock);
1686 if (ret == 0) 1675 if (ret == 0)
1687 mark_all_dquot_dirty(dquots); 1676 mark_all_dquot_dirty(dquots);
1688 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1677 srcu_read_unlock(&dquot_srcu, index);
1689 flush_warnings(warn); 1678 flush_warnings(warn);
1690 return ret; 1679 return ret;
1691} 1680}
@@ -1696,14 +1685,14 @@ EXPORT_SYMBOL(dquot_alloc_inode);
1696 */ 1685 */
1697int dquot_claim_space_nodirty(struct inode *inode, qsize_t number) 1686int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
1698{ 1687{
1699 int cnt; 1688 int cnt, index;
1700 1689
1701 if (!dquot_active(inode)) { 1690 if (!dquot_active(inode)) {
1702 inode_claim_rsv_space(inode, number); 1691 inode_claim_rsv_space(inode, number);
1703 return 0; 1692 return 0;
1704 } 1693 }
1705 1694
1706 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1695 index = srcu_read_lock(&dquot_srcu);
1707 spin_lock(&dq_data_lock); 1696 spin_lock(&dq_data_lock);
1708 /* Claim reserved quotas to allocated quotas */ 1697 /* Claim reserved quotas to allocated quotas */
1709 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1698 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
@@ -1715,7 +1704,7 @@ int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
1715 inode_claim_rsv_space(inode, number); 1704 inode_claim_rsv_space(inode, number);
1716 spin_unlock(&dq_data_lock); 1705 spin_unlock(&dq_data_lock);
1717 mark_all_dquot_dirty(inode->i_dquot); 1706 mark_all_dquot_dirty(inode->i_dquot);
1718 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1707 srcu_read_unlock(&dquot_srcu, index);
1719 return 0; 1708 return 0;
1720} 1709}
1721EXPORT_SYMBOL(dquot_claim_space_nodirty); 1710EXPORT_SYMBOL(dquot_claim_space_nodirty);
@@ -1725,14 +1714,14 @@ EXPORT_SYMBOL(dquot_claim_space_nodirty);
1725 */ 1714 */
1726void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number) 1715void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number)
1727{ 1716{
1728 int cnt; 1717 int cnt, index;
1729 1718
1730 if (!dquot_active(inode)) { 1719 if (!dquot_active(inode)) {
1731 inode_reclaim_rsv_space(inode, number); 1720 inode_reclaim_rsv_space(inode, number);
1732 return; 1721 return;
1733 } 1722 }
1734 1723
1735 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1724 index = srcu_read_lock(&dquot_srcu);
1736 spin_lock(&dq_data_lock); 1725 spin_lock(&dq_data_lock);
1737 /* Claim reserved quotas to allocated quotas */ 1726 /* Claim reserved quotas to allocated quotas */
1738 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1727 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
@@ -1744,7 +1733,7 @@ void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number)
1744 inode_reclaim_rsv_space(inode, number); 1733 inode_reclaim_rsv_space(inode, number);
1745 spin_unlock(&dq_data_lock); 1734 spin_unlock(&dq_data_lock);
1746 mark_all_dquot_dirty(inode->i_dquot); 1735 mark_all_dquot_dirty(inode->i_dquot);
1747 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1736 srcu_read_unlock(&dquot_srcu, index);
1748 return; 1737 return;
1749} 1738}
1750EXPORT_SYMBOL(dquot_reclaim_space_nodirty); 1739EXPORT_SYMBOL(dquot_reclaim_space_nodirty);
@@ -1757,16 +1746,14 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
1757 unsigned int cnt; 1746 unsigned int cnt;
1758 struct dquot_warn warn[MAXQUOTAS]; 1747 struct dquot_warn warn[MAXQUOTAS];
1759 struct dquot **dquots = inode->i_dquot; 1748 struct dquot **dquots = inode->i_dquot;
1760 int reserve = flags & DQUOT_SPACE_RESERVE; 1749 int reserve = flags & DQUOT_SPACE_RESERVE, index;
1761 1750
1762 /* First test before acquiring mutex - solves deadlocks when we
1763 * re-enter the quota code and are already holding the mutex */
1764 if (!dquot_active(inode)) { 1751 if (!dquot_active(inode)) {
1765 inode_decr_space(inode, number, reserve); 1752 inode_decr_space(inode, number, reserve);
1766 return; 1753 return;
1767 } 1754 }
1768 1755
1769 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1756 index = srcu_read_lock(&dquot_srcu);
1770 spin_lock(&dq_data_lock); 1757 spin_lock(&dq_data_lock);
1771 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1758 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1772 int wtype; 1759 int wtype;
@@ -1789,7 +1776,7 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
1789 goto out_unlock; 1776 goto out_unlock;
1790 mark_all_dquot_dirty(dquots); 1777 mark_all_dquot_dirty(dquots);
1791out_unlock: 1778out_unlock:
1792 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1779 srcu_read_unlock(&dquot_srcu, index);
1793 flush_warnings(warn); 1780 flush_warnings(warn);
1794} 1781}
1795EXPORT_SYMBOL(__dquot_free_space); 1782EXPORT_SYMBOL(__dquot_free_space);
@@ -1802,13 +1789,12 @@ void dquot_free_inode(const struct inode *inode)
1802 unsigned int cnt; 1789 unsigned int cnt;
1803 struct dquot_warn warn[MAXQUOTAS]; 1790 struct dquot_warn warn[MAXQUOTAS];
1804 struct dquot * const *dquots = inode->i_dquot; 1791 struct dquot * const *dquots = inode->i_dquot;
1792 int index;
1805 1793
1806 /* First test before acquiring mutex - solves deadlocks when we
1807 * re-enter the quota code and are already holding the mutex */
1808 if (!dquot_active(inode)) 1794 if (!dquot_active(inode))
1809 return; 1795 return;
1810 1796
1811 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1797 index = srcu_read_lock(&dquot_srcu);
1812 spin_lock(&dq_data_lock); 1798 spin_lock(&dq_data_lock);
1813 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1799 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1814 int wtype; 1800 int wtype;
@@ -1823,7 +1809,7 @@ void dquot_free_inode(const struct inode *inode)
1823 } 1809 }
1824 spin_unlock(&dq_data_lock); 1810 spin_unlock(&dq_data_lock);
1825 mark_all_dquot_dirty(dquots); 1811 mark_all_dquot_dirty(dquots);
1826 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1812 srcu_read_unlock(&dquot_srcu, index);
1827 flush_warnings(warn); 1813 flush_warnings(warn);
1828} 1814}
1829EXPORT_SYMBOL(dquot_free_inode); 1815EXPORT_SYMBOL(dquot_free_inode);
@@ -1837,6 +1823,8 @@ EXPORT_SYMBOL(dquot_free_inode);
1837 * This operation can block, but only after everything is updated 1823 * This operation can block, but only after everything is updated
1838 * A transaction must be started when entering this function. 1824 * A transaction must be started when entering this function.
1839 * 1825 *
1826 * We are holding reference on transfer_from & transfer_to, no need to
1827 * protect them by srcu_read_lock().
1840 */ 1828 */
1841int __dquot_transfer(struct inode *inode, struct dquot **transfer_to) 1829int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
1842{ 1830{
@@ -1849,8 +1837,6 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
1849 struct dquot_warn warn_from_inodes[MAXQUOTAS]; 1837 struct dquot_warn warn_from_inodes[MAXQUOTAS];
1850 struct dquot_warn warn_from_space[MAXQUOTAS]; 1838 struct dquot_warn warn_from_space[MAXQUOTAS];
1851 1839
1852 /* First test before acquiring mutex - solves deadlocks when we
1853 * re-enter the quota code and are already holding the mutex */
1854 if (IS_NOQUOTA(inode)) 1840 if (IS_NOQUOTA(inode))
1855 return 0; 1841 return 0;
1856 /* Initialize the arrays */ 1842 /* Initialize the arrays */
@@ -1859,12 +1845,12 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
1859 warn_from_inodes[cnt].w_type = QUOTA_NL_NOWARN; 1845 warn_from_inodes[cnt].w_type = QUOTA_NL_NOWARN;
1860 warn_from_space[cnt].w_type = QUOTA_NL_NOWARN; 1846 warn_from_space[cnt].w_type = QUOTA_NL_NOWARN;
1861 } 1847 }
1862 down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); 1848
1849 spin_lock(&dq_data_lock);
1863 if (IS_NOQUOTA(inode)) { /* File without quota accounting? */ 1850 if (IS_NOQUOTA(inode)) { /* File without quota accounting? */
1864 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); 1851 spin_unlock(&dq_data_lock);
1865 return 0; 1852 return 0;
1866 } 1853 }
1867 spin_lock(&dq_data_lock);
1868 cur_space = inode_get_bytes(inode); 1854 cur_space = inode_get_bytes(inode);
1869 rsv_space = inode_get_rsv_space(inode); 1855 rsv_space = inode_get_rsv_space(inode);
1870 space = cur_space + rsv_space; 1856 space = cur_space + rsv_space;
@@ -1918,7 +1904,6 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
1918 inode->i_dquot[cnt] = transfer_to[cnt]; 1904 inode->i_dquot[cnt] = transfer_to[cnt];
1919 } 1905 }
1920 spin_unlock(&dq_data_lock); 1906 spin_unlock(&dq_data_lock);
1921 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1922 1907
1923 mark_all_dquot_dirty(transfer_from); 1908 mark_all_dquot_dirty(transfer_from);
1924 mark_all_dquot_dirty(transfer_to); 1909 mark_all_dquot_dirty(transfer_to);
@@ -1932,7 +1917,6 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
1932 return 0; 1917 return 0;
1933over_quota: 1918over_quota:
1934 spin_unlock(&dq_data_lock); 1919 spin_unlock(&dq_data_lock);
1935 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1936 flush_warnings(warn_to); 1920 flush_warnings(warn_to);
1937 return ret; 1921 return ret;
1938} 1922}