aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-08-13 19:45:40 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-13 19:45:40 -0400
commitcec997093bbff881c3da49084dfba4f76361e96a (patch)
tree7c84f8c30ceef7209a18d7cd216a3c16536008c5
parent8d2d441ac4af223eae466c3c31ff737cc31a1411 (diff)
parent01777836c87081e4f68c4a43c9abe6114805f91e (diff)
Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs
Pull quota, reiserfs, UDF updates from Jan Kara: "Scalability improvements for quota, a few reiserfs fixes, and couple of misc cleanups (udf, ext2)" * 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs: reiserfs: Fix use after free in journal teardown reiserfs: fix corruption introduced by balance_leaf refactor udf: avoid redundant memcpy when writing data in ICB fs/udf: re-use hex_asc_upper_{hi,lo} macros fs/quota: kernel-doc warning fixes udf: use linux/uaccess.h fs/ext2/super.c: Drop memory allocation cast quota: remove dqptr_sem quota: simplify remove_inode_dquot_ref() quota: avoid unnecessary dqget()/dqput() calls quota: protect Q_GETFMT by dqonoff_mutex
-rw-r--r--fs/ext2/super.c2
-rw-r--r--fs/quota/dquot.c180
-rw-r--r--fs/quota/kqid.c2
-rw-r--r--fs/quota/netlink.c3
-rw-r--r--fs/quota/quota.c6
-rw-r--r--fs/reiserfs/do_balan.c111
-rw-r--r--fs/reiserfs/journal.c22
-rw-r--r--fs/reiserfs/lbalance.c5
-rw-r--r--fs/reiserfs/reiserfs.h9
-rw-r--r--fs/reiserfs/super.c6
-rw-r--r--fs/super.c1
-rw-r--r--fs/udf/file.c22
-rw-r--r--fs/udf/lowlevel.c2
-rw-r--r--fs/udf/super.c2
-rw-r--r--fs/udf/symlink.c2
-rw-r--r--fs/udf/unicode.c9
-rw-r--r--include/linux/quota.h1
17 files changed, 189 insertions, 196 deletions
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 3750031cfa2f..b88edc05c230 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -161,7 +161,7 @@ static struct kmem_cache * ext2_inode_cachep;
161static struct inode *ext2_alloc_inode(struct super_block *sb) 161static struct inode *ext2_alloc_inode(struct super_block *sb)
162{ 162{
163 struct ext2_inode_info *ei; 163 struct ext2_inode_info *ei;
164 ei = (struct ext2_inode_info *)kmem_cache_alloc(ext2_inode_cachep, GFP_KERNEL); 164 ei = kmem_cache_alloc(ext2_inode_cachep, GFP_KERNEL);
165 if (!ei) 165 if (!ei)
166 return NULL; 166 return NULL;
167 ei->i_block_alloc_info = NULL; 167 ei->i_block_alloc_info = NULL;
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 7f30bdc57d13..f2d0eee9d1f1 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -96,13 +96,16 @@
96 * Note that some things (eg. sb pointer, type, id) doesn't change during 96 * Note that some things (eg. sb pointer, type, id) doesn't change during
97 * the life of the dquot structure and so needn't to be protected by a lock 97 * the life of the dquot structure and so needn't to be protected by a lock
98 * 98 *
99 * Any operation working on dquots via inode pointers must hold dqptr_sem. If 99 * Operation accessing dquots via inode pointers are protected by dquot_srcu.
100 * operation is just reading pointers from inode (or not using them at all) the 100 * Operation of reading pointer needs srcu_read_lock(&dquot_srcu), and
101 * read lock is enough. If pointers are altered function must hold write lock. 101 * synchronize_srcu(&dquot_srcu) is called after clearing pointers from
102 * inode and before dropping dquot references to avoid use of dquots after
103 * they are freed. dq_data_lock is used to serialize the pointer setting and
104 * clearing operations.
102 * Special care needs to be taken about S_NOQUOTA inode flag (marking that 105 * Special care needs to be taken about S_NOQUOTA inode flag (marking that
103 * inode is a quota file). Functions adding pointers from inode to dquots have 106 * inode is a quota file). Functions adding pointers from inode to dquots have
104 * to check this flag under dqptr_sem and then (if S_NOQUOTA is not set) they 107 * to check this flag under dq_data_lock and then (if S_NOQUOTA is not set) they
105 * have to do all pointer modifications before dropping dqptr_sem. This makes 108 * have to do all pointer modifications before dropping dq_data_lock. This makes
106 * sure they cannot race with quotaon which first sets S_NOQUOTA flag and 109 * sure they cannot race with quotaon which first sets S_NOQUOTA flag and
107 * then drops all pointers to dquots from an inode. 110 * then drops all pointers to dquots from an inode.
108 * 111 *
@@ -116,21 +119,15 @@
116 * spinlock to internal buffers before writing. 119 * spinlock to internal buffers before writing.
117 * 120 *
118 * Lock ordering (including related VFS locks) is the following: 121 * Lock ordering (including related VFS locks) is the following:
119 * dqonoff_mutex > i_mutex > journal_lock > dqptr_sem > dquot->dq_lock > 122 * dqonoff_mutex > i_mutex > journal_lock > dquot->dq_lock > dqio_mutex
120 * dqio_mutex
121 * dqonoff_mutex > i_mutex comes from dquot_quota_sync, dquot_enable, etc. 123 * dqonoff_mutex > i_mutex comes from dquot_quota_sync, dquot_enable, etc.
122 * The lock ordering of dqptr_sem imposed by quota code is only dqonoff_sem >
123 * dqptr_sem. But filesystem has to count with the fact that functions such as
124 * dquot_alloc_space() acquire dqptr_sem and they usually have to be called
125 * from inside a transaction to keep filesystem consistency after a crash. Also
126 * filesystems usually want to do some IO on dquot from ->mark_dirty which is
127 * called with dqptr_sem held.
128 */ 124 */
129 125
130static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock); 126static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock);
131static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_state_lock); 127static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_state_lock);
132__cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock); 128__cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock);
133EXPORT_SYMBOL(dq_data_lock); 129EXPORT_SYMBOL(dq_data_lock);
130DEFINE_STATIC_SRCU(dquot_srcu);
134 131
135void __quota_error(struct super_block *sb, const char *func, 132void __quota_error(struct super_block *sb, const char *func,
136 const char *fmt, ...) 133 const char *fmt, ...)
@@ -733,7 +730,6 @@ static struct shrinker dqcache_shrinker = {
733 730
734/* 731/*
735 * Put reference to dquot 732 * Put reference to dquot
736 * NOTE: If you change this function please check whether dqput_blocks() works right...
737 */ 733 */
738void dqput(struct dquot *dquot) 734void dqput(struct dquot *dquot)
739{ 735{
@@ -963,46 +959,33 @@ static void add_dquot_ref(struct super_block *sb, int type)
963} 959}
964 960
965/* 961/*
966 * Return 0 if dqput() won't block.
967 * (note that 1 doesn't necessarily mean blocking)
968 */
969static inline int dqput_blocks(struct dquot *dquot)
970{
971 if (atomic_read(&dquot->dq_count) <= 1)
972 return 1;
973 return 0;
974}
975
976/*
977 * Remove references to dquots from inode and add dquot to list for freeing 962 * Remove references to dquots from inode and add dquot to list for freeing
978 * if we have the last reference to dquot 963 * if we have the last reference to dquot
979 * We can't race with anybody because we hold dqptr_sem for writing...
980 */ 964 */
981static int remove_inode_dquot_ref(struct inode *inode, int type, 965static void remove_inode_dquot_ref(struct inode *inode, int type,
982 struct list_head *tofree_head) 966 struct list_head *tofree_head)
983{ 967{
984 struct dquot *dquot = inode->i_dquot[type]; 968 struct dquot *dquot = inode->i_dquot[type];
985 969
986 inode->i_dquot[type] = NULL; 970 inode->i_dquot[type] = NULL;
987 if (dquot) { 971 if (!dquot)
988 if (dqput_blocks(dquot)) { 972 return;
989#ifdef CONFIG_QUOTA_DEBUG 973
990 if (atomic_read(&dquot->dq_count) != 1) 974 if (list_empty(&dquot->dq_free)) {
991 quota_error(inode->i_sb, "Adding dquot with " 975 /*
992 "dq_count %d to dispose list", 976 * The inode still has reference to dquot so it can't be in the
993 atomic_read(&dquot->dq_count)); 977 * free list
994#endif 978 */
995 spin_lock(&dq_list_lock); 979 spin_lock(&dq_list_lock);
996 /* As dquot must have currently users it can't be on 980 list_add(&dquot->dq_free, tofree_head);
997 * the free list... */ 981 spin_unlock(&dq_list_lock);
998 list_add(&dquot->dq_free, tofree_head); 982 } else {
999 spin_unlock(&dq_list_lock); 983 /*
1000 return 1; 984 * Dquot is already in a list to put so we won't drop the last
1001 } 985 * reference here.
1002 else 986 */
1003 dqput(dquot); /* We have guaranteed we won't block */ 987 dqput(dquot);
1004 } 988 }
1005 return 0;
1006} 989}
1007 990
1008/* 991/*
@@ -1037,13 +1020,15 @@ static void remove_dquot_ref(struct super_block *sb, int type,
1037 * We have to scan also I_NEW inodes because they can already 1020 * We have to scan also I_NEW inodes because they can already
1038 * have quota pointer initialized. Luckily, we need to touch 1021 * have quota pointer initialized. Luckily, we need to touch
1039 * only quota pointers and these have separate locking 1022 * only quota pointers and these have separate locking
1040 * (dqptr_sem). 1023 * (dq_data_lock).
1041 */ 1024 */
1025 spin_lock(&dq_data_lock);
1042 if (!IS_NOQUOTA(inode)) { 1026 if (!IS_NOQUOTA(inode)) {
1043 if (unlikely(inode_get_rsv_space(inode) > 0)) 1027 if (unlikely(inode_get_rsv_space(inode) > 0))
1044 reserved = 1; 1028 reserved = 1;
1045 remove_inode_dquot_ref(inode, type, tofree_head); 1029 remove_inode_dquot_ref(inode, type, tofree_head);
1046 } 1030 }
1031 spin_unlock(&dq_data_lock);
1047 } 1032 }
1048 spin_unlock(&inode_sb_list_lock); 1033 spin_unlock(&inode_sb_list_lock);
1049#ifdef CONFIG_QUOTA_DEBUG 1034#ifdef CONFIG_QUOTA_DEBUG
@@ -1061,9 +1046,8 @@ static void drop_dquot_ref(struct super_block *sb, int type)
1061 LIST_HEAD(tofree_head); 1046 LIST_HEAD(tofree_head);
1062 1047
1063 if (sb->dq_op) { 1048 if (sb->dq_op) {
1064 down_write(&sb_dqopt(sb)->dqptr_sem);
1065 remove_dquot_ref(sb, type, &tofree_head); 1049 remove_dquot_ref(sb, type, &tofree_head);
1066 up_write(&sb_dqopt(sb)->dqptr_sem); 1050 synchronize_srcu(&dquot_srcu);
1067 put_dquot_list(&tofree_head); 1051 put_dquot_list(&tofree_head);
1068 } 1052 }
1069} 1053}
@@ -1394,21 +1378,16 @@ static int dquot_active(const struct inode *inode)
1394/* 1378/*
1395 * Initialize quota pointers in inode 1379 * Initialize quota pointers in inode
1396 * 1380 *
1397 * We do things in a bit complicated way but by that we avoid calling
1398 * dqget() and thus filesystem callbacks under dqptr_sem.
1399 *
1400 * It is better to call this function outside of any transaction as it 1381 * It is better to call this function outside of any transaction as it
1401 * might need a lot of space in journal for dquot structure allocation. 1382 * might need a lot of space in journal for dquot structure allocation.
1402 */ 1383 */
1403static void __dquot_initialize(struct inode *inode, int type) 1384static void __dquot_initialize(struct inode *inode, int type)
1404{ 1385{
1405 int cnt; 1386 int cnt, init_needed = 0;
1406 struct dquot *got[MAXQUOTAS]; 1387 struct dquot *got[MAXQUOTAS];
1407 struct super_block *sb = inode->i_sb; 1388 struct super_block *sb = inode->i_sb;
1408 qsize_t rsv; 1389 qsize_t rsv;
1409 1390
1410 /* First test before acquiring mutex - solves deadlocks when we
1411 * re-enter the quota code and are already holding the mutex */
1412 if (!dquot_active(inode)) 1391 if (!dquot_active(inode))
1413 return; 1392 return;
1414 1393
@@ -1418,6 +1397,15 @@ static void __dquot_initialize(struct inode *inode, int type)
1418 got[cnt] = NULL; 1397 got[cnt] = NULL;
1419 if (type != -1 && cnt != type) 1398 if (type != -1 && cnt != type)
1420 continue; 1399 continue;
1400 /*
1401 * The i_dquot should have been initialized in most cases,
1402 * we check it without locking here to avoid unnecessary
1403 * dqget()/dqput() calls.
1404 */
1405 if (inode->i_dquot[cnt])
1406 continue;
1407 init_needed = 1;
1408
1421 switch (cnt) { 1409 switch (cnt) {
1422 case USRQUOTA: 1410 case USRQUOTA:
1423 qid = make_kqid_uid(inode->i_uid); 1411 qid = make_kqid_uid(inode->i_uid);
@@ -1429,7 +1417,11 @@ static void __dquot_initialize(struct inode *inode, int type)
1429 got[cnt] = dqget(sb, qid); 1417 got[cnt] = dqget(sb, qid);
1430 } 1418 }
1431 1419
1432 down_write(&sb_dqopt(sb)->dqptr_sem); 1420 /* All required i_dquot has been initialized */
1421 if (!init_needed)
1422 return;
1423
1424 spin_lock(&dq_data_lock);
1433 if (IS_NOQUOTA(inode)) 1425 if (IS_NOQUOTA(inode))
1434 goto out_err; 1426 goto out_err;
1435 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1427 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
@@ -1449,15 +1441,12 @@ static void __dquot_initialize(struct inode *inode, int type)
1449 * did a write before quota was turned on 1441 * did a write before quota was turned on
1450 */ 1442 */
1451 rsv = inode_get_rsv_space(inode); 1443 rsv = inode_get_rsv_space(inode);
1452 if (unlikely(rsv)) { 1444 if (unlikely(rsv))
1453 spin_lock(&dq_data_lock);
1454 dquot_resv_space(inode->i_dquot[cnt], rsv); 1445 dquot_resv_space(inode->i_dquot[cnt], rsv);
1455 spin_unlock(&dq_data_lock);
1456 }
1457 } 1446 }
1458 } 1447 }
1459out_err: 1448out_err:
1460 up_write(&sb_dqopt(sb)->dqptr_sem); 1449 spin_unlock(&dq_data_lock);
1461 /* Drop unused references */ 1450 /* Drop unused references */
1462 dqput_all(got); 1451 dqput_all(got);
1463} 1452}
@@ -1469,19 +1458,24 @@ void dquot_initialize(struct inode *inode)
1469EXPORT_SYMBOL(dquot_initialize); 1458EXPORT_SYMBOL(dquot_initialize);
1470 1459
1471/* 1460/*
1472 * Release all quotas referenced by inode 1461 * Release all quotas referenced by inode.
1462 *
1463 * This function only be called on inode free or converting
1464 * a file to quota file, no other users for the i_dquot in
1465 * both cases, so we needn't call synchronize_srcu() after
1466 * clearing i_dquot.
1473 */ 1467 */
1474static void __dquot_drop(struct inode *inode) 1468static void __dquot_drop(struct inode *inode)
1475{ 1469{
1476 int cnt; 1470 int cnt;
1477 struct dquot *put[MAXQUOTAS]; 1471 struct dquot *put[MAXQUOTAS];
1478 1472
1479 down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); 1473 spin_lock(&dq_data_lock);
1480 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1474 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1481 put[cnt] = inode->i_dquot[cnt]; 1475 put[cnt] = inode->i_dquot[cnt];
1482 inode->i_dquot[cnt] = NULL; 1476 inode->i_dquot[cnt] = NULL;
1483 } 1477 }
1484 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); 1478 spin_unlock(&dq_data_lock);
1485 dqput_all(put); 1479 dqput_all(put);
1486} 1480}
1487 1481
@@ -1599,15 +1593,11 @@ static void inode_decr_space(struct inode *inode, qsize_t number, int reserve)
1599 */ 1593 */
1600int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags) 1594int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
1601{ 1595{
1602 int cnt, ret = 0; 1596 int cnt, ret = 0, index;
1603 struct dquot_warn warn[MAXQUOTAS]; 1597 struct dquot_warn warn[MAXQUOTAS];
1604 struct dquot **dquots = inode->i_dquot; 1598 struct dquot **dquots = inode->i_dquot;
1605 int reserve = flags & DQUOT_SPACE_RESERVE; 1599 int reserve = flags & DQUOT_SPACE_RESERVE;
1606 1600
1607 /*
1608 * First test before acquiring mutex - solves deadlocks when we
1609 * re-enter the quota code and are already holding the mutex
1610 */
1611 if (!dquot_active(inode)) { 1601 if (!dquot_active(inode)) {
1612 inode_incr_space(inode, number, reserve); 1602 inode_incr_space(inode, number, reserve);
1613 goto out; 1603 goto out;
@@ -1616,7 +1606,7 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
1616 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 1606 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1617 warn[cnt].w_type = QUOTA_NL_NOWARN; 1607 warn[cnt].w_type = QUOTA_NL_NOWARN;
1618 1608
1619 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1609 index = srcu_read_lock(&dquot_srcu);
1620 spin_lock(&dq_data_lock); 1610 spin_lock(&dq_data_lock);
1621 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1611 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1622 if (!dquots[cnt]) 1612 if (!dquots[cnt])
@@ -1643,7 +1633,7 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
1643 goto out_flush_warn; 1633 goto out_flush_warn;
1644 mark_all_dquot_dirty(dquots); 1634 mark_all_dquot_dirty(dquots);
1645out_flush_warn: 1635out_flush_warn:
1646 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1636 srcu_read_unlock(&dquot_srcu, index);
1647 flush_warnings(warn); 1637 flush_warnings(warn);
1648out: 1638out:
1649 return ret; 1639 return ret;
@@ -1655,17 +1645,16 @@ EXPORT_SYMBOL(__dquot_alloc_space);
1655 */ 1645 */
1656int dquot_alloc_inode(const struct inode *inode) 1646int dquot_alloc_inode(const struct inode *inode)
1657{ 1647{
1658 int cnt, ret = 0; 1648 int cnt, ret = 0, index;
1659 struct dquot_warn warn[MAXQUOTAS]; 1649 struct dquot_warn warn[MAXQUOTAS];
1660 struct dquot * const *dquots = inode->i_dquot; 1650 struct dquot * const *dquots = inode->i_dquot;
1661 1651
1662 /* First test before acquiring mutex - solves deadlocks when we
1663 * re-enter the quota code and are already holding the mutex */
1664 if (!dquot_active(inode)) 1652 if (!dquot_active(inode))
1665 return 0; 1653 return 0;
1666 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 1654 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1667 warn[cnt].w_type = QUOTA_NL_NOWARN; 1655 warn[cnt].w_type = QUOTA_NL_NOWARN;
1668 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1656
1657 index = srcu_read_lock(&dquot_srcu);
1669 spin_lock(&dq_data_lock); 1658 spin_lock(&dq_data_lock);
1670 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1659 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1671 if (!dquots[cnt]) 1660 if (!dquots[cnt])
@@ -1685,7 +1674,7 @@ warn_put_all:
1685 spin_unlock(&dq_data_lock); 1674 spin_unlock(&dq_data_lock);
1686 if (ret == 0) 1675 if (ret == 0)
1687 mark_all_dquot_dirty(dquots); 1676 mark_all_dquot_dirty(dquots);
1688 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1677 srcu_read_unlock(&dquot_srcu, index);
1689 flush_warnings(warn); 1678 flush_warnings(warn);
1690 return ret; 1679 return ret;
1691} 1680}
@@ -1696,14 +1685,14 @@ EXPORT_SYMBOL(dquot_alloc_inode);
1696 */ 1685 */
1697int dquot_claim_space_nodirty(struct inode *inode, qsize_t number) 1686int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
1698{ 1687{
1699 int cnt; 1688 int cnt, index;
1700 1689
1701 if (!dquot_active(inode)) { 1690 if (!dquot_active(inode)) {
1702 inode_claim_rsv_space(inode, number); 1691 inode_claim_rsv_space(inode, number);
1703 return 0; 1692 return 0;
1704 } 1693 }
1705 1694
1706 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1695 index = srcu_read_lock(&dquot_srcu);
1707 spin_lock(&dq_data_lock); 1696 spin_lock(&dq_data_lock);
1708 /* Claim reserved quotas to allocated quotas */ 1697 /* Claim reserved quotas to allocated quotas */
1709 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1698 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
@@ -1715,7 +1704,7 @@ int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
1715 inode_claim_rsv_space(inode, number); 1704 inode_claim_rsv_space(inode, number);
1716 spin_unlock(&dq_data_lock); 1705 spin_unlock(&dq_data_lock);
1717 mark_all_dquot_dirty(inode->i_dquot); 1706 mark_all_dquot_dirty(inode->i_dquot);
1718 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1707 srcu_read_unlock(&dquot_srcu, index);
1719 return 0; 1708 return 0;
1720} 1709}
1721EXPORT_SYMBOL(dquot_claim_space_nodirty); 1710EXPORT_SYMBOL(dquot_claim_space_nodirty);
@@ -1725,14 +1714,14 @@ EXPORT_SYMBOL(dquot_claim_space_nodirty);
1725 */ 1714 */
1726void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number) 1715void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number)
1727{ 1716{
1728 int cnt; 1717 int cnt, index;
1729 1718
1730 if (!dquot_active(inode)) { 1719 if (!dquot_active(inode)) {
1731 inode_reclaim_rsv_space(inode, number); 1720 inode_reclaim_rsv_space(inode, number);
1732 return; 1721 return;
1733 } 1722 }
1734 1723
1735 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1724 index = srcu_read_lock(&dquot_srcu);
1736 spin_lock(&dq_data_lock); 1725 spin_lock(&dq_data_lock);
1737 /* Claim reserved quotas to allocated quotas */ 1726 /* Claim reserved quotas to allocated quotas */
1738 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1727 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
@@ -1744,7 +1733,7 @@ void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number)
1744 inode_reclaim_rsv_space(inode, number); 1733 inode_reclaim_rsv_space(inode, number);
1745 spin_unlock(&dq_data_lock); 1734 spin_unlock(&dq_data_lock);
1746 mark_all_dquot_dirty(inode->i_dquot); 1735 mark_all_dquot_dirty(inode->i_dquot);
1747 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1736 srcu_read_unlock(&dquot_srcu, index);
1748 return; 1737 return;
1749} 1738}
1750EXPORT_SYMBOL(dquot_reclaim_space_nodirty); 1739EXPORT_SYMBOL(dquot_reclaim_space_nodirty);
@@ -1757,16 +1746,14 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
1757 unsigned int cnt; 1746 unsigned int cnt;
1758 struct dquot_warn warn[MAXQUOTAS]; 1747 struct dquot_warn warn[MAXQUOTAS];
1759 struct dquot **dquots = inode->i_dquot; 1748 struct dquot **dquots = inode->i_dquot;
1760 int reserve = flags & DQUOT_SPACE_RESERVE; 1749 int reserve = flags & DQUOT_SPACE_RESERVE, index;
1761 1750
1762 /* First test before acquiring mutex - solves deadlocks when we
1763 * re-enter the quota code and are already holding the mutex */
1764 if (!dquot_active(inode)) { 1751 if (!dquot_active(inode)) {
1765 inode_decr_space(inode, number, reserve); 1752 inode_decr_space(inode, number, reserve);
1766 return; 1753 return;
1767 } 1754 }
1768 1755
1769 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1756 index = srcu_read_lock(&dquot_srcu);
1770 spin_lock(&dq_data_lock); 1757 spin_lock(&dq_data_lock);
1771 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1758 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1772 int wtype; 1759 int wtype;
@@ -1789,7 +1776,7 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
1789 goto out_unlock; 1776 goto out_unlock;
1790 mark_all_dquot_dirty(dquots); 1777 mark_all_dquot_dirty(dquots);
1791out_unlock: 1778out_unlock:
1792 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1779 srcu_read_unlock(&dquot_srcu, index);
1793 flush_warnings(warn); 1780 flush_warnings(warn);
1794} 1781}
1795EXPORT_SYMBOL(__dquot_free_space); 1782EXPORT_SYMBOL(__dquot_free_space);
@@ -1802,13 +1789,12 @@ void dquot_free_inode(const struct inode *inode)
1802 unsigned int cnt; 1789 unsigned int cnt;
1803 struct dquot_warn warn[MAXQUOTAS]; 1790 struct dquot_warn warn[MAXQUOTAS];
1804 struct dquot * const *dquots = inode->i_dquot; 1791 struct dquot * const *dquots = inode->i_dquot;
1792 int index;
1805 1793
1806 /* First test before acquiring mutex - solves deadlocks when we
1807 * re-enter the quota code and are already holding the mutex */
1808 if (!dquot_active(inode)) 1794 if (!dquot_active(inode))
1809 return; 1795 return;
1810 1796
1811 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1797 index = srcu_read_lock(&dquot_srcu);
1812 spin_lock(&dq_data_lock); 1798 spin_lock(&dq_data_lock);
1813 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1799 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1814 int wtype; 1800 int wtype;
@@ -1823,7 +1809,7 @@ void dquot_free_inode(const struct inode *inode)
1823 } 1809 }
1824 spin_unlock(&dq_data_lock); 1810 spin_unlock(&dq_data_lock);
1825 mark_all_dquot_dirty(dquots); 1811 mark_all_dquot_dirty(dquots);
1826 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1812 srcu_read_unlock(&dquot_srcu, index);
1827 flush_warnings(warn); 1813 flush_warnings(warn);
1828} 1814}
1829EXPORT_SYMBOL(dquot_free_inode); 1815EXPORT_SYMBOL(dquot_free_inode);
@@ -1837,6 +1823,8 @@ EXPORT_SYMBOL(dquot_free_inode);
1837 * This operation can block, but only after everything is updated 1823 * This operation can block, but only after everything is updated
1838 * A transaction must be started when entering this function. 1824 * A transaction must be started when entering this function.
1839 * 1825 *
1826 * We are holding reference on transfer_from & transfer_to, no need to
1827 * protect them by srcu_read_lock().
1840 */ 1828 */
1841int __dquot_transfer(struct inode *inode, struct dquot **transfer_to) 1829int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
1842{ 1830{
@@ -1849,8 +1837,6 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
1849 struct dquot_warn warn_from_inodes[MAXQUOTAS]; 1837 struct dquot_warn warn_from_inodes[MAXQUOTAS];
1850 struct dquot_warn warn_from_space[MAXQUOTAS]; 1838 struct dquot_warn warn_from_space[MAXQUOTAS];
1851 1839
1852 /* First test before acquiring mutex - solves deadlocks when we
1853 * re-enter the quota code and are already holding the mutex */
1854 if (IS_NOQUOTA(inode)) 1840 if (IS_NOQUOTA(inode))
1855 return 0; 1841 return 0;
1856 /* Initialize the arrays */ 1842 /* Initialize the arrays */
@@ -1859,12 +1845,12 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
1859 warn_from_inodes[cnt].w_type = QUOTA_NL_NOWARN; 1845 warn_from_inodes[cnt].w_type = QUOTA_NL_NOWARN;
1860 warn_from_space[cnt].w_type = QUOTA_NL_NOWARN; 1846 warn_from_space[cnt].w_type = QUOTA_NL_NOWARN;
1861 } 1847 }
1862 down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); 1848
1849 spin_lock(&dq_data_lock);
1863 if (IS_NOQUOTA(inode)) { /* File without quota accounting? */ 1850 if (IS_NOQUOTA(inode)) { /* File without quota accounting? */
1864 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); 1851 spin_unlock(&dq_data_lock);
1865 return 0; 1852 return 0;
1866 } 1853 }
1867 spin_lock(&dq_data_lock);
1868 cur_space = inode_get_bytes(inode); 1854 cur_space = inode_get_bytes(inode);
1869 rsv_space = inode_get_rsv_space(inode); 1855 rsv_space = inode_get_rsv_space(inode);
1870 space = cur_space + rsv_space; 1856 space = cur_space + rsv_space;
@@ -1918,7 +1904,6 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
1918 inode->i_dquot[cnt] = transfer_to[cnt]; 1904 inode->i_dquot[cnt] = transfer_to[cnt];
1919 } 1905 }
1920 spin_unlock(&dq_data_lock); 1906 spin_unlock(&dq_data_lock);
1921 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1922 1907
1923 mark_all_dquot_dirty(transfer_from); 1908 mark_all_dquot_dirty(transfer_from);
1924 mark_all_dquot_dirty(transfer_to); 1909 mark_all_dquot_dirty(transfer_to);
@@ -1932,7 +1917,6 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
1932 return 0; 1917 return 0;
1933over_quota: 1918over_quota:
1934 spin_unlock(&dq_data_lock); 1919 spin_unlock(&dq_data_lock);
1935 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1936 flush_warnings(warn_to); 1920 flush_warnings(warn_to);
1937 return ret; 1921 return ret;
1938} 1922}
diff --git a/fs/quota/kqid.c b/fs/quota/kqid.c
index 2f97b0e2c501..ebc5e6285800 100644
--- a/fs/quota/kqid.c
+++ b/fs/quota/kqid.c
@@ -55,7 +55,7 @@ EXPORT_SYMBOL(qid_lt);
55/** 55/**
56 * from_kqid - Create a qid from a kqid user-namespace pair. 56 * from_kqid - Create a qid from a kqid user-namespace pair.
57 * @targ: The user namespace we want a qid in. 57 * @targ: The user namespace we want a qid in.
58 * @kuid: The kernel internal quota identifier to start with. 58 * @kqid: The kernel internal quota identifier to start with.
59 * 59 *
60 * Map @kqid into the user-namespace specified by @targ and 60 * Map @kqid into the user-namespace specified by @targ and
61 * return the resulting qid. 61 * return the resulting qid.
diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
index 72d29177998e..bb2869f5dfd8 100644
--- a/fs/quota/netlink.c
+++ b/fs/quota/netlink.c
@@ -32,8 +32,7 @@ static struct genl_family quota_genl_family = {
32 32
33/** 33/**
34 * quota_send_warning - Send warning to userspace about exceeded quota 34 * quota_send_warning - Send warning to userspace about exceeded quota
35 * @type: The quota type: USRQQUOTA, GRPQUOTA,... 35 * @qid: The kernel internal quota identifier.
36 * @id: The user or group id of the quota that was exceeded
37 * @dev: The device on which the fs is mounted (sb->s_dev) 36 * @dev: The device on which the fs is mounted (sb->s_dev)
38 * @warntype: The type of the warning: QUOTA_NL_... 37 * @warntype: The type of the warning: QUOTA_NL_...
39 * 38 *
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
index ff3f0b3cfdb3..75621649dbd7 100644
--- a/fs/quota/quota.c
+++ b/fs/quota/quota.c
@@ -79,13 +79,13 @@ static int quota_getfmt(struct super_block *sb, int type, void __user *addr)
79{ 79{
80 __u32 fmt; 80 __u32 fmt;
81 81
82 down_read(&sb_dqopt(sb)->dqptr_sem); 82 mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
83 if (!sb_has_quota_active(sb, type)) { 83 if (!sb_has_quota_active(sb, type)) {
84 up_read(&sb_dqopt(sb)->dqptr_sem); 84 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
85 return -ESRCH; 85 return -ESRCH;
86 } 86 }
87 fmt = sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id; 87 fmt = sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id;
88 up_read(&sb_dqopt(sb)->dqptr_sem); 88 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
89 if (copy_to_user(addr, &fmt, sizeof(fmt))) 89 if (copy_to_user(addr, &fmt, sizeof(fmt)))
90 return -EFAULT; 90 return -EFAULT;
91 return 0; 91 return 0;
diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
index 5739cb99de7b..9c02d96d3a42 100644
--- a/fs/reiserfs/do_balan.c
+++ b/fs/reiserfs/do_balan.c
@@ -286,12 +286,14 @@ static int balance_leaf_when_delete(struct tree_balance *tb, int flag)
286 return 0; 286 return 0;
287} 287}
288 288
289static void balance_leaf_insert_left(struct tree_balance *tb, 289static unsigned int balance_leaf_insert_left(struct tree_balance *tb,
290 struct item_head *ih, const char *body) 290 struct item_head *const ih,
291 const char * const body)
291{ 292{
292 int ret; 293 int ret;
293 struct buffer_info bi; 294 struct buffer_info bi;
294 int n = B_NR_ITEMS(tb->L[0]); 295 int n = B_NR_ITEMS(tb->L[0]);
296 unsigned body_shift_bytes = 0;
295 297
296 if (tb->item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) { 298 if (tb->item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) {
297 /* part of new item falls into L[0] */ 299 /* part of new item falls into L[0] */
@@ -329,7 +331,7 @@ static void balance_leaf_insert_left(struct tree_balance *tb,
329 331
330 put_ih_item_len(ih, new_item_len); 332 put_ih_item_len(ih, new_item_len);
331 if (tb->lbytes > tb->zeroes_num) { 333 if (tb->lbytes > tb->zeroes_num) {
332 body += (tb->lbytes - tb->zeroes_num); 334 body_shift_bytes = tb->lbytes - tb->zeroes_num;
333 tb->zeroes_num = 0; 335 tb->zeroes_num = 0;
334 } else 336 } else
335 tb->zeroes_num -= tb->lbytes; 337 tb->zeroes_num -= tb->lbytes;
@@ -349,11 +351,12 @@ static void balance_leaf_insert_left(struct tree_balance *tb,
349 tb->insert_size[0] = 0; 351 tb->insert_size[0] = 0;
350 tb->zeroes_num = 0; 352 tb->zeroes_num = 0;
351 } 353 }
354 return body_shift_bytes;
352} 355}
353 356
354static void balance_leaf_paste_left_shift_dirent(struct tree_balance *tb, 357static void balance_leaf_paste_left_shift_dirent(struct tree_balance *tb,
355 struct item_head *ih, 358 struct item_head * const ih,
356 const char *body) 359 const char * const body)
357{ 360{
358 int n = B_NR_ITEMS(tb->L[0]); 361 int n = B_NR_ITEMS(tb->L[0]);
359 struct buffer_info bi; 362 struct buffer_info bi;
@@ -413,17 +416,18 @@ static void balance_leaf_paste_left_shift_dirent(struct tree_balance *tb,
413 tb->pos_in_item -= tb->lbytes; 416 tb->pos_in_item -= tb->lbytes;
414} 417}
415 418
416static void balance_leaf_paste_left_shift(struct tree_balance *tb, 419static unsigned int balance_leaf_paste_left_shift(struct tree_balance *tb,
417 struct item_head *ih, 420 struct item_head * const ih,
418 const char *body) 421 const char * const body)
419{ 422{
420 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 423 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
421 int n = B_NR_ITEMS(tb->L[0]); 424 int n = B_NR_ITEMS(tb->L[0]);
422 struct buffer_info bi; 425 struct buffer_info bi;
426 int body_shift_bytes = 0;
423 427
424 if (is_direntry_le_ih(item_head(tbS0, tb->item_pos))) { 428 if (is_direntry_le_ih(item_head(tbS0, tb->item_pos))) {
425 balance_leaf_paste_left_shift_dirent(tb, ih, body); 429 balance_leaf_paste_left_shift_dirent(tb, ih, body);
426 return; 430 return 0;
427 } 431 }
428 432
429 RFALSE(tb->lbytes <= 0, 433 RFALSE(tb->lbytes <= 0,
@@ -497,7 +501,7 @@ static void balance_leaf_paste_left_shift(struct tree_balance *tb,
497 * insert_size[0] 501 * insert_size[0]
498 */ 502 */
499 if (l_n > tb->zeroes_num) { 503 if (l_n > tb->zeroes_num) {
500 body += (l_n - tb->zeroes_num); 504 body_shift_bytes = l_n - tb->zeroes_num;
501 tb->zeroes_num = 0; 505 tb->zeroes_num = 0;
502 } else 506 } else
503 tb->zeroes_num -= l_n; 507 tb->zeroes_num -= l_n;
@@ -526,13 +530,14 @@ static void balance_leaf_paste_left_shift(struct tree_balance *tb,
526 */ 530 */
527 leaf_shift_left(tb, tb->lnum[0], tb->lbytes); 531 leaf_shift_left(tb, tb->lnum[0], tb->lbytes);
528 } 532 }
533 return body_shift_bytes;
529} 534}
530 535
531 536
532/* appended item will be in L[0] in whole */ 537/* appended item will be in L[0] in whole */
533static void balance_leaf_paste_left_whole(struct tree_balance *tb, 538static void balance_leaf_paste_left_whole(struct tree_balance *tb,
534 struct item_head *ih, 539 struct item_head * const ih,
535 const char *body) 540 const char * const body)
536{ 541{
537 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 542 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
538 int n = B_NR_ITEMS(tb->L[0]); 543 int n = B_NR_ITEMS(tb->L[0]);
@@ -584,39 +589,44 @@ static void balance_leaf_paste_left_whole(struct tree_balance *tb,
584 tb->zeroes_num = 0; 589 tb->zeroes_num = 0;
585} 590}
586 591
587static void balance_leaf_paste_left(struct tree_balance *tb, 592static unsigned int balance_leaf_paste_left(struct tree_balance *tb,
588 struct item_head *ih, const char *body) 593 struct item_head * const ih,
594 const char * const body)
589{ 595{
590 /* we must shift the part of the appended item */ 596 /* we must shift the part of the appended item */
591 if (tb->item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) 597 if (tb->item_pos == tb->lnum[0] - 1 && tb->lbytes != -1)
592 balance_leaf_paste_left_shift(tb, ih, body); 598 return balance_leaf_paste_left_shift(tb, ih, body);
593 else 599 else
594 balance_leaf_paste_left_whole(tb, ih, body); 600 balance_leaf_paste_left_whole(tb, ih, body);
601 return 0;
595} 602}
596 603
597/* Shift lnum[0] items from S[0] to the left neighbor L[0] */ 604/* Shift lnum[0] items from S[0] to the left neighbor L[0] */
598static void balance_leaf_left(struct tree_balance *tb, struct item_head *ih, 605static unsigned int balance_leaf_left(struct tree_balance *tb,
599 const char *body, int flag) 606 struct item_head * const ih,
607 const char * const body, int flag)
600{ 608{
601 if (tb->lnum[0] <= 0) 609 if (tb->lnum[0] <= 0)
602 return; 610 return 0;
603 611
604 /* new item or it part falls to L[0], shift it too */ 612 /* new item or it part falls to L[0], shift it too */
605 if (tb->item_pos < tb->lnum[0]) { 613 if (tb->item_pos < tb->lnum[0]) {
606 BUG_ON(flag != M_INSERT && flag != M_PASTE); 614 BUG_ON(flag != M_INSERT && flag != M_PASTE);
607 615
608 if (flag == M_INSERT) 616 if (flag == M_INSERT)
609 balance_leaf_insert_left(tb, ih, body); 617 return balance_leaf_insert_left(tb, ih, body);
610 else /* M_PASTE */ 618 else /* M_PASTE */
611 balance_leaf_paste_left(tb, ih, body); 619 return balance_leaf_paste_left(tb, ih, body);
612 } else 620 } else
613 /* new item doesn't fall into L[0] */ 621 /* new item doesn't fall into L[0] */
614 leaf_shift_left(tb, tb->lnum[0], tb->lbytes); 622 leaf_shift_left(tb, tb->lnum[0], tb->lbytes);
623 return 0;
615} 624}
616 625
617 626
618static void balance_leaf_insert_right(struct tree_balance *tb, 627static void balance_leaf_insert_right(struct tree_balance *tb,
619 struct item_head *ih, const char *body) 628 struct item_head * const ih,
629 const char * const body)
620{ 630{
621 631
622 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 632 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
@@ -704,7 +714,8 @@ static void balance_leaf_insert_right(struct tree_balance *tb,
704 714
705 715
706static void balance_leaf_paste_right_shift_dirent(struct tree_balance *tb, 716static void balance_leaf_paste_right_shift_dirent(struct tree_balance *tb,
707 struct item_head *ih, const char *body) 717 struct item_head * const ih,
718 const char * const body)
708{ 719{
709 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 720 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
710 struct buffer_info bi; 721 struct buffer_info bi;
@@ -754,7 +765,8 @@ static void balance_leaf_paste_right_shift_dirent(struct tree_balance *tb,
754} 765}
755 766
756static void balance_leaf_paste_right_shift(struct tree_balance *tb, 767static void balance_leaf_paste_right_shift(struct tree_balance *tb,
757 struct item_head *ih, const char *body) 768 struct item_head * const ih,
769 const char * const body)
758{ 770{
759 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 771 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
760 int n_shift, n_rem, r_zeroes_number, version; 772 int n_shift, n_rem, r_zeroes_number, version;
@@ -831,7 +843,8 @@ static void balance_leaf_paste_right_shift(struct tree_balance *tb,
831} 843}
832 844
833static void balance_leaf_paste_right_whole(struct tree_balance *tb, 845static void balance_leaf_paste_right_whole(struct tree_balance *tb,
834 struct item_head *ih, const char *body) 846 struct item_head * const ih,
847 const char * const body)
835{ 848{
836 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 849 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
837 int n = B_NR_ITEMS(tbS0); 850 int n = B_NR_ITEMS(tbS0);
@@ -874,7 +887,8 @@ static void balance_leaf_paste_right_whole(struct tree_balance *tb,
874} 887}
875 888
876static void balance_leaf_paste_right(struct tree_balance *tb, 889static void balance_leaf_paste_right(struct tree_balance *tb,
877 struct item_head *ih, const char *body) 890 struct item_head * const ih,
891 const char * const body)
878{ 892{
879 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 893 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
880 int n = B_NR_ITEMS(tbS0); 894 int n = B_NR_ITEMS(tbS0);
@@ -896,8 +910,9 @@ static void balance_leaf_paste_right(struct tree_balance *tb,
896} 910}
897 911
898/* shift rnum[0] items from S[0] to the right neighbor R[0] */ 912/* shift rnum[0] items from S[0] to the right neighbor R[0] */
899static void balance_leaf_right(struct tree_balance *tb, struct item_head *ih, 913static void balance_leaf_right(struct tree_balance *tb,
900 const char *body, int flag) 914 struct item_head * const ih,
915 const char * const body, int flag)
901{ 916{
902 if (tb->rnum[0] <= 0) 917 if (tb->rnum[0] <= 0)
903 return; 918 return;
@@ -911,8 +926,8 @@ static void balance_leaf_right(struct tree_balance *tb, struct item_head *ih,
911} 926}
912 927
913static void balance_leaf_new_nodes_insert(struct tree_balance *tb, 928static void balance_leaf_new_nodes_insert(struct tree_balance *tb,
914 struct item_head *ih, 929 struct item_head * const ih,
915 const char *body, 930 const char * const body,
916 struct item_head *insert_key, 931 struct item_head *insert_key,
917 struct buffer_head **insert_ptr, 932 struct buffer_head **insert_ptr,
918 int i) 933 int i)
@@ -1003,8 +1018,8 @@ static void balance_leaf_new_nodes_insert(struct tree_balance *tb,
1003 1018
1004/* we append to directory item */ 1019/* we append to directory item */
1005static void balance_leaf_new_nodes_paste_dirent(struct tree_balance *tb, 1020static void balance_leaf_new_nodes_paste_dirent(struct tree_balance *tb,
1006 struct item_head *ih, 1021 struct item_head * const ih,
1007 const char *body, 1022 const char * const body,
1008 struct item_head *insert_key, 1023 struct item_head *insert_key,
1009 struct buffer_head **insert_ptr, 1024 struct buffer_head **insert_ptr,
1010 int i) 1025 int i)
@@ -1058,8 +1073,8 @@ static void balance_leaf_new_nodes_paste_dirent(struct tree_balance *tb,
1058} 1073}
1059 1074
1060static void balance_leaf_new_nodes_paste_shift(struct tree_balance *tb, 1075static void balance_leaf_new_nodes_paste_shift(struct tree_balance *tb,
1061 struct item_head *ih, 1076 struct item_head * const ih,
1062 const char *body, 1077 const char * const body,
1063 struct item_head *insert_key, 1078 struct item_head *insert_key,
1064 struct buffer_head **insert_ptr, 1079 struct buffer_head **insert_ptr,
1065 int i) 1080 int i)
@@ -1131,8 +1146,8 @@ static void balance_leaf_new_nodes_paste_shift(struct tree_balance *tb,
1131} 1146}
1132 1147
1133static void balance_leaf_new_nodes_paste_whole(struct tree_balance *tb, 1148static void balance_leaf_new_nodes_paste_whole(struct tree_balance *tb,
1134 struct item_head *ih, 1149 struct item_head * const ih,
1135 const char *body, 1150 const char * const body,
1136 struct item_head *insert_key, 1151 struct item_head *insert_key,
1137 struct buffer_head **insert_ptr, 1152 struct buffer_head **insert_ptr,
1138 int i) 1153 int i)
@@ -1184,8 +1199,8 @@ static void balance_leaf_new_nodes_paste_whole(struct tree_balance *tb,
1184 1199
1185} 1200}
1186static void balance_leaf_new_nodes_paste(struct tree_balance *tb, 1201static void balance_leaf_new_nodes_paste(struct tree_balance *tb,
1187 struct item_head *ih, 1202 struct item_head * const ih,
1188 const char *body, 1203 const char * const body,
1189 struct item_head *insert_key, 1204 struct item_head *insert_key,
1190 struct buffer_head **insert_ptr, 1205 struct buffer_head **insert_ptr,
1191 int i) 1206 int i)
@@ -1214,8 +1229,8 @@ static void balance_leaf_new_nodes_paste(struct tree_balance *tb,
1214 1229
1215/* Fill new nodes that appear in place of S[0] */ 1230/* Fill new nodes that appear in place of S[0] */
1216static void balance_leaf_new_nodes(struct tree_balance *tb, 1231static void balance_leaf_new_nodes(struct tree_balance *tb,
1217 struct item_head *ih, 1232 struct item_head * const ih,
1218 const char *body, 1233 const char * const body,
1219 struct item_head *insert_key, 1234 struct item_head *insert_key,
1220 struct buffer_head **insert_ptr, 1235 struct buffer_head **insert_ptr,
1221 int flag) 1236 int flag)
@@ -1254,8 +1269,8 @@ static void balance_leaf_new_nodes(struct tree_balance *tb,
1254} 1269}
1255 1270
1256static void balance_leaf_finish_node_insert(struct tree_balance *tb, 1271static void balance_leaf_finish_node_insert(struct tree_balance *tb,
1257 struct item_head *ih, 1272 struct item_head * const ih,
1258 const char *body) 1273 const char * const body)
1259{ 1274{
1260 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 1275 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
1261 struct buffer_info bi; 1276 struct buffer_info bi;
@@ -1271,8 +1286,8 @@ static void balance_leaf_finish_node_insert(struct tree_balance *tb,
1271} 1286}
1272 1287
1273static void balance_leaf_finish_node_paste_dirent(struct tree_balance *tb, 1288static void balance_leaf_finish_node_paste_dirent(struct tree_balance *tb,
1274 struct item_head *ih, 1289 struct item_head * const ih,
1275 const char *body) 1290 const char * const body)
1276{ 1291{
1277 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 1292 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
1278 struct item_head *pasted = item_head(tbS0, tb->item_pos); 1293 struct item_head *pasted = item_head(tbS0, tb->item_pos);
@@ -1305,8 +1320,8 @@ static void balance_leaf_finish_node_paste_dirent(struct tree_balance *tb,
1305} 1320}
1306 1321
1307static void balance_leaf_finish_node_paste(struct tree_balance *tb, 1322static void balance_leaf_finish_node_paste(struct tree_balance *tb,
1308 struct item_head *ih, 1323 struct item_head * const ih,
1309 const char *body) 1324 const char * const body)
1310{ 1325{
1311 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 1326 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
1312 struct buffer_info bi; 1327 struct buffer_info bi;
@@ -1349,8 +1364,8 @@ static void balance_leaf_finish_node_paste(struct tree_balance *tb,
1349 * of the affected item which remains in S 1364 * of the affected item which remains in S
1350 */ 1365 */
1351static void balance_leaf_finish_node(struct tree_balance *tb, 1366static void balance_leaf_finish_node(struct tree_balance *tb,
1352 struct item_head *ih, 1367 struct item_head * const ih,
1353 const char *body, int flag) 1368 const char * const body, int flag)
1354{ 1369{
1355 /* if we must insert or append into buffer S[0] */ 1370 /* if we must insert or append into buffer S[0] */
1356 if (0 <= tb->item_pos && tb->item_pos < tb->s0num) { 1371 if (0 <= tb->item_pos && tb->item_pos < tb->s0num) {
@@ -1402,7 +1417,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih,
1402 && is_indirect_le_ih(item_head(tbS0, tb->item_pos))) 1417 && is_indirect_le_ih(item_head(tbS0, tb->item_pos)))
1403 tb->pos_in_item *= UNFM_P_SIZE; 1418 tb->pos_in_item *= UNFM_P_SIZE;
1404 1419
1405 balance_leaf_left(tb, ih, body, flag); 1420 body += balance_leaf_left(tb, ih, body, flag);
1406 1421
1407 /* tb->lnum[0] > 0 */ 1422 /* tb->lnum[0] > 0 */
1408 /* Calculate new item position */ 1423 /* Calculate new item position */
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index e8870de4627e..a88b1b3e7db3 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -1947,8 +1947,6 @@ static int do_journal_release(struct reiserfs_transaction_handle *th,
1947 } 1947 }
1948 } 1948 }
1949 1949
1950 /* wait for all commits to finish */
1951 cancel_delayed_work(&SB_JOURNAL(sb)->j_work);
1952 1950
1953 /* 1951 /*
1954 * We must release the write lock here because 1952 * We must release the write lock here because
@@ -1956,8 +1954,14 @@ static int do_journal_release(struct reiserfs_transaction_handle *th,
1956 */ 1954 */
1957 reiserfs_write_unlock(sb); 1955 reiserfs_write_unlock(sb);
1958 1956
1957 /*
1958 * Cancel flushing of old commits. Note that neither of these works
1959 * will be requeued because superblock is being shutdown and doesn't
1960 * have MS_ACTIVE set.
1961 */
1959 cancel_delayed_work_sync(&REISERFS_SB(sb)->old_work); 1962 cancel_delayed_work_sync(&REISERFS_SB(sb)->old_work);
1960 flush_workqueue(REISERFS_SB(sb)->commit_wq); 1963 /* wait for all commits to finish */
1964 cancel_delayed_work_sync(&SB_JOURNAL(sb)->j_work);
1961 1965
1962 free_journal_ram(sb); 1966 free_journal_ram(sb);
1963 1967
@@ -4292,9 +4296,15 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, int flags)
4292 if (flush) { 4296 if (flush) {
4293 flush_commit_list(sb, jl, 1); 4297 flush_commit_list(sb, jl, 1);
4294 flush_journal_list(sb, jl, 1); 4298 flush_journal_list(sb, jl, 1);
4295 } else if (!(jl->j_state & LIST_COMMIT_PENDING)) 4299 } else if (!(jl->j_state & LIST_COMMIT_PENDING)) {
4296 queue_delayed_work(REISERFS_SB(sb)->commit_wq, 4300 /*
4297 &journal->j_work, HZ / 10); 4301 * Avoid queueing work when sb is being shut down. Transaction
4302 * will be flushed on journal shutdown.
4303 */
4304 if (sb->s_flags & MS_ACTIVE)
4305 queue_delayed_work(REISERFS_SB(sb)->commit_wq,
4306 &journal->j_work, HZ / 10);
4307 }
4298 4308
4299 /* 4309 /*
4300 * if the next transaction has any chance of wrapping, flush 4310 * if the next transaction has any chance of wrapping, flush
diff --git a/fs/reiserfs/lbalance.c b/fs/reiserfs/lbalance.c
index 814dda3ec998..249594a821e0 100644
--- a/fs/reiserfs/lbalance.c
+++ b/fs/reiserfs/lbalance.c
@@ -899,8 +899,9 @@ void leaf_delete_items(struct buffer_info *cur_bi, int last_first,
899 899
900/* insert item into the leaf node in position before */ 900/* insert item into the leaf node in position before */
901void leaf_insert_into_buf(struct buffer_info *bi, int before, 901void leaf_insert_into_buf(struct buffer_info *bi, int before,
902 struct item_head *inserted_item_ih, 902 struct item_head * const inserted_item_ih,
903 const char *inserted_item_body, int zeros_number) 903 const char * const inserted_item_body,
904 int zeros_number)
904{ 905{
905 struct buffer_head *bh = bi->bi_bh; 906 struct buffer_head *bh = bi->bi_bh;
906 int nr, free_space; 907 int nr, free_space;
diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
index bf53888c7f59..735c2c2b4536 100644
--- a/fs/reiserfs/reiserfs.h
+++ b/fs/reiserfs/reiserfs.h
@@ -3216,11 +3216,12 @@ int leaf_shift_right(struct tree_balance *tb, int shift_num, int shift_bytes);
3216void leaf_delete_items(struct buffer_info *cur_bi, int last_first, int first, 3216void leaf_delete_items(struct buffer_info *cur_bi, int last_first, int first,
3217 int del_num, int del_bytes); 3217 int del_num, int del_bytes);
3218void leaf_insert_into_buf(struct buffer_info *bi, int before, 3218void leaf_insert_into_buf(struct buffer_info *bi, int before,
3219 struct item_head *inserted_item_ih, 3219 struct item_head * const inserted_item_ih,
3220 const char *inserted_item_body, int zeros_number); 3220 const char * const inserted_item_body,
3221void leaf_paste_in_buffer(struct buffer_info *bi, int pasted_item_num,
3222 int pos_in_item, int paste_size, const char *body,
3223 int zeros_number); 3221 int zeros_number);
3222void leaf_paste_in_buffer(struct buffer_info *bi, int pasted_item_num,
3223 int pos_in_item, int paste_size,
3224 const char * const body, int zeros_number);
3224void leaf_cut_from_buffer(struct buffer_info *bi, int cut_item_num, 3225void leaf_cut_from_buffer(struct buffer_info *bi, int cut_item_num,
3225 int pos_in_item, int cut_size); 3226 int pos_in_item, int cut_size);
3226void leaf_paste_entries(struct buffer_info *bi, int item_num, int before, 3227void leaf_paste_entries(struct buffer_info *bi, int item_num, int before,
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 709ea92d716f..d46e88a33b02 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -100,7 +100,11 @@ void reiserfs_schedule_old_flush(struct super_block *s)
100 struct reiserfs_sb_info *sbi = REISERFS_SB(s); 100 struct reiserfs_sb_info *sbi = REISERFS_SB(s);
101 unsigned long delay; 101 unsigned long delay;
102 102
103 if (s->s_flags & MS_RDONLY) 103 /*
104 * Avoid scheduling flush when sb is being shut down. It can race
105 * with journal shutdown and free still queued delayed work.
106 */
107 if (s->s_flags & MS_RDONLY || !(s->s_flags & MS_ACTIVE))
104 return; 108 return;
105 109
106 spin_lock(&sbi->old_work_lock); 110 spin_lock(&sbi->old_work_lock);
diff --git a/fs/super.c b/fs/super.c
index a371ce6aa919..b9a214d2fe98 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -217,7 +217,6 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags)
217 lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key); 217 lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key);
218 mutex_init(&s->s_dquot.dqio_mutex); 218 mutex_init(&s->s_dquot.dqio_mutex);
219 mutex_init(&s->s_dquot.dqonoff_mutex); 219 mutex_init(&s->s_dquot.dqonoff_mutex);
220 init_rwsem(&s->s_dquot.dqptr_sem);
221 s->s_maxbytes = MAX_NON_LFS; 220 s->s_maxbytes = MAX_NON_LFS;
222 s->s_op = &default_op; 221 s->s_op = &default_op;
223 s->s_time_gran = 1000000000; 222 s->s_time_gran = 1000000000;
diff --git a/fs/udf/file.c b/fs/udf/file.c
index d80738fdf424..86c6743ec1fe 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -27,7 +27,7 @@
27 27
28#include "udfdecl.h" 28#include "udfdecl.h"
29#include <linux/fs.h> 29#include <linux/fs.h>
30#include <asm/uaccess.h> 30#include <linux/uaccess.h>
31#include <linux/kernel.h> 31#include <linux/kernel.h>
32#include <linux/string.h> /* memset */ 32#include <linux/string.h> /* memset */
33#include <linux/capability.h> 33#include <linux/capability.h>
@@ -100,24 +100,6 @@ static int udf_adinicb_write_begin(struct file *file,
100 return 0; 100 return 0;
101} 101}
102 102
103static int udf_adinicb_write_end(struct file *file,
104 struct address_space *mapping,
105 loff_t pos, unsigned len, unsigned copied,
106 struct page *page, void *fsdata)
107{
108 struct inode *inode = mapping->host;
109 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
110 char *kaddr;
111 struct udf_inode_info *iinfo = UDF_I(inode);
112
113 kaddr = kmap_atomic(page);
114 memcpy(iinfo->i_ext.i_data + iinfo->i_lenEAttr + offset,
115 kaddr + offset, copied);
116 kunmap_atomic(kaddr);
117
118 return simple_write_end(file, mapping, pos, len, copied, page, fsdata);
119}
120
121static ssize_t udf_adinicb_direct_IO(int rw, struct kiocb *iocb, 103static ssize_t udf_adinicb_direct_IO(int rw, struct kiocb *iocb,
122 struct iov_iter *iter, 104 struct iov_iter *iter,
123 loff_t offset) 105 loff_t offset)
@@ -130,7 +112,7 @@ const struct address_space_operations udf_adinicb_aops = {
130 .readpage = udf_adinicb_readpage, 112 .readpage = udf_adinicb_readpage,
131 .writepage = udf_adinicb_writepage, 113 .writepage = udf_adinicb_writepage,
132 .write_begin = udf_adinicb_write_begin, 114 .write_begin = udf_adinicb_write_begin,
133 .write_end = udf_adinicb_write_end, 115 .write_end = simple_write_end,
134 .direct_IO = udf_adinicb_direct_IO, 116 .direct_IO = udf_adinicb_direct_IO,
135}; 117};
136 118
diff --git a/fs/udf/lowlevel.c b/fs/udf/lowlevel.c
index 6583fe9b0645..6ad5a453af97 100644
--- a/fs/udf/lowlevel.c
+++ b/fs/udf/lowlevel.c
@@ -21,7 +21,7 @@
21 21
22#include <linux/blkdev.h> 22#include <linux/blkdev.h>
23#include <linux/cdrom.h> 23#include <linux/cdrom.h>
24#include <asm/uaccess.h> 24#include <linux/uaccess.h>
25 25
26#include "udf_sb.h" 26#include "udf_sb.h"
27 27
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 3286db047a40..813da94d447b 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -63,7 +63,7 @@
63#include "udf_i.h" 63#include "udf_i.h"
64 64
65#include <linux/init.h> 65#include <linux/init.h>
66#include <asm/uaccess.h> 66#include <linux/uaccess.h>
67 67
68#define VDS_POS_PRIMARY_VOL_DESC 0 68#define VDS_POS_PRIMARY_VOL_DESC 0
69#define VDS_POS_UNALLOC_SPACE_DESC 1 69#define VDS_POS_UNALLOC_SPACE_DESC 1
diff --git a/fs/udf/symlink.c b/fs/udf/symlink.c
index d7c6dbe4194b..6fb7945c1e6e 100644
--- a/fs/udf/symlink.c
+++ b/fs/udf/symlink.c
@@ -20,7 +20,7 @@
20 */ 20 */
21 21
22#include "udfdecl.h" 22#include "udfdecl.h"
23#include <asm/uaccess.h> 23#include <linux/uaccess.h>
24#include <linux/errno.h> 24#include <linux/errno.h>
25#include <linux/fs.h> 25#include <linux/fs.h>
26#include <linux/time.h> 26#include <linux/time.h>
diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c
index 44b815e57f94..afd470e588ff 100644
--- a/fs/udf/unicode.c
+++ b/fs/udf/unicode.c
@@ -412,7 +412,6 @@ static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName,
412 int extIndex = 0, newExtIndex = 0, hasExt = 0; 412 int extIndex = 0, newExtIndex = 0, hasExt = 0;
413 unsigned short valueCRC; 413 unsigned short valueCRC;
414 uint8_t curr; 414 uint8_t curr;
415 const uint8_t hexChar[] = "0123456789ABCDEF";
416 415
417 if (udfName[0] == '.' && 416 if (udfName[0] == '.' &&
418 (udfLen == 1 || (udfLen == 2 && udfName[1] == '.'))) { 417 (udfLen == 1 || (udfLen == 2 && udfName[1] == '.'))) {
@@ -477,10 +476,10 @@ static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName,
477 newIndex = 250; 476 newIndex = 250;
478 newName[newIndex++] = CRC_MARK; 477 newName[newIndex++] = CRC_MARK;
479 valueCRC = crc_itu_t(0, fidName, fidNameLen); 478 valueCRC = crc_itu_t(0, fidName, fidNameLen);
480 newName[newIndex++] = hexChar[(valueCRC & 0xf000) >> 12]; 479 newName[newIndex++] = hex_asc_upper_hi(valueCRC >> 8);
481 newName[newIndex++] = hexChar[(valueCRC & 0x0f00) >> 8]; 480 newName[newIndex++] = hex_asc_upper_lo(valueCRC >> 8);
482 newName[newIndex++] = hexChar[(valueCRC & 0x00f0) >> 4]; 481 newName[newIndex++] = hex_asc_upper_hi(valueCRC);
483 newName[newIndex++] = hexChar[(valueCRC & 0x000f)]; 482 newName[newIndex++] = hex_asc_upper_lo(valueCRC);
484 483
485 if (hasExt) { 484 if (hasExt) {
486 newName[newIndex++] = EXT_MARK; 485 newName[newIndex++] = EXT_MARK;
diff --git a/include/linux/quota.h b/include/linux/quota.h
index 0f3c5d38da1f..80d345a3524c 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -390,7 +390,6 @@ struct quota_info {
390 unsigned int flags; /* Flags for diskquotas on this device */ 390 unsigned int flags; /* Flags for diskquotas on this device */
391 struct mutex dqio_mutex; /* lock device while I/O in progress */ 391 struct mutex dqio_mutex; /* lock device while I/O in progress */
392 struct mutex dqonoff_mutex; /* Serialize quotaon & quotaoff */ 392 struct mutex dqonoff_mutex; /* Serialize quotaon & quotaoff */
393 struct rw_semaphore dqptr_sem; /* serialize ops using quota_info struct, pointers from inode to dquots */
394 struct inode *files[MAXQUOTAS]; /* inodes of quotafiles */ 393 struct inode *files[MAXQUOTAS]; /* inodes of quotafiles */
395 struct mem_dqinfo info[MAXQUOTAS]; /* Information for each quota type */ 394 struct mem_dqinfo info[MAXQUOTAS]; /* Information for each quota type */
396 const struct quota_format_ops *ops[MAXQUOTAS]; /* Operations for each type */ 395 const struct quota_format_ops *ops[MAXQUOTAS]; /* Operations for each type */