aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/filesystems/ext3.txt7
-rw-r--r--fs/ext3/super.c43
-rw-r--r--fs/isofs/inode.c16
-rw-r--r--fs/jbd/commit.c2
-rw-r--r--fs/jbd/journal.c18
-rw-r--r--fs/reiserfs/bitmap.c22
-rw-r--r--fs/reiserfs/dir.c7
-rw-r--r--fs/reiserfs/fix_node.c26
-rw-r--r--fs/reiserfs/inode.c114
-rw-r--r--fs/reiserfs/ioctl.c7
-rw-r--r--fs/reiserfs/journal.c104
-rw-r--r--fs/reiserfs/lock.c43
-rw-r--r--fs/reiserfs/namei.c24
-rw-r--r--fs/reiserfs/prints.c5
-rw-r--r--fs/reiserfs/reiserfs.h36
-rw-r--r--fs/reiserfs/resize.c10
-rw-r--r--fs/reiserfs/stree.c74
-rw-r--r--fs/reiserfs/super.c75
-rw-r--r--fs/reiserfs/xattr.c46
-rw-r--r--fs/reiserfs/xattr_acl.c16
-rw-r--r--fs/udf/super.c342
-rw-r--r--include/linux/jbd.h17
22 files changed, 609 insertions, 445 deletions
diff --git a/Documentation/filesystems/ext3.txt b/Documentation/filesystems/ext3.txt
index 293855e95000..7ed0d17d6721 100644
--- a/Documentation/filesystems/ext3.txt
+++ b/Documentation/filesystems/ext3.txt
@@ -26,11 +26,12 @@ journal=inum When a journal already exists, this option is ignored.
26 Otherwise, it specifies the number of the inode which 26 Otherwise, it specifies the number of the inode which
27 will represent the ext3 file system's journal file. 27 will represent the ext3 file system's journal file.
28 28
29journal_path=path
29journal_dev=devnum When the external journal device's major/minor numbers 30journal_dev=devnum When the external journal device's major/minor numbers
30 have changed, this option allows the user to specify 31 have changed, these options allow the user to specify
31 the new journal location. The journal device is 32 the new journal location. The journal device is
32 identified through its new major/minor numbers encoded 33 identified through either its new major/minor numbers
33 in devnum. 34 encoded in devnum, or via a path to the device.
34 35
35norecovery Don't load the journal on mounting. Note that this forces 36norecovery Don't load the journal on mounting. Note that this forces
36noload mount of inconsistent filesystem, which can lead to 37noload mount of inconsistent filesystem, which can lead to
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index c47f14750722..c50c76190373 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -27,6 +27,7 @@
27#include <linux/seq_file.h> 27#include <linux/seq_file.h>
28#include <linux/log2.h> 28#include <linux/log2.h>
29#include <linux/cleancache.h> 29#include <linux/cleancache.h>
30#include <linux/namei.h>
30 31
31#include <asm/uaccess.h> 32#include <asm/uaccess.h>
32 33
@@ -819,6 +820,7 @@ enum {
819 Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl, 820 Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl,
820 Opt_reservation, Opt_noreservation, Opt_noload, Opt_nobh, Opt_bh, 821 Opt_reservation, Opt_noreservation, Opt_noload, Opt_nobh, Opt_bh,
821 Opt_commit, Opt_journal_update, Opt_journal_inum, Opt_journal_dev, 822 Opt_commit, Opt_journal_update, Opt_journal_inum, Opt_journal_dev,
823 Opt_journal_path,
822 Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback, 824 Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
823 Opt_data_err_abort, Opt_data_err_ignore, 825 Opt_data_err_abort, Opt_data_err_ignore,
824 Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota, 826 Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
@@ -860,6 +862,7 @@ static const match_table_t tokens = {
860 {Opt_journal_update, "journal=update"}, 862 {Opt_journal_update, "journal=update"},
861 {Opt_journal_inum, "journal=%u"}, 863 {Opt_journal_inum, "journal=%u"},
862 {Opt_journal_dev, "journal_dev=%u"}, 864 {Opt_journal_dev, "journal_dev=%u"},
865 {Opt_journal_path, "journal_path=%s"},
863 {Opt_abort, "abort"}, 866 {Opt_abort, "abort"},
864 {Opt_data_journal, "data=journal"}, 867 {Opt_data_journal, "data=journal"},
865 {Opt_data_ordered, "data=ordered"}, 868 {Opt_data_ordered, "data=ordered"},
@@ -975,6 +978,11 @@ static int parse_options (char *options, struct super_block *sb,
975 int option; 978 int option;
976 kuid_t uid; 979 kuid_t uid;
977 kgid_t gid; 980 kgid_t gid;
981 char *journal_path;
982 struct inode *journal_inode;
983 struct path path;
984 int error;
985
978#ifdef CONFIG_QUOTA 986#ifdef CONFIG_QUOTA
979 int qfmt; 987 int qfmt;
980#endif 988#endif
@@ -1129,6 +1137,41 @@ static int parse_options (char *options, struct super_block *sb,
1129 return 0; 1137 return 0;
1130 *journal_devnum = option; 1138 *journal_devnum = option;
1131 break; 1139 break;
1140 case Opt_journal_path:
1141 if (is_remount) {
1142 ext3_msg(sb, KERN_ERR, "error: cannot specify "
1143 "journal on remount");
1144 return 0;
1145 }
1146
1147 journal_path = match_strdup(&args[0]);
1148 if (!journal_path) {
1149 ext3_msg(sb, KERN_ERR, "error: could not dup "
1150 "journal device string");
1151 return 0;
1152 }
1153
1154 error = kern_path(journal_path, LOOKUP_FOLLOW, &path);
1155 if (error) {
1156 ext3_msg(sb, KERN_ERR, "error: could not find "
1157 "journal device path: error %d", error);
1158 kfree(journal_path);
1159 return 0;
1160 }
1161
1162 journal_inode = path.dentry->d_inode;
1163 if (!S_ISBLK(journal_inode->i_mode)) {
1164 ext3_msg(sb, KERN_ERR, "error: journal path %s "
1165 "is not a block device", journal_path);
1166 path_put(&path);
1167 kfree(journal_path);
1168 return 0;
1169 }
1170
1171 *journal_devnum = new_encode_dev(journal_inode->i_rdev);
1172 path_put(&path);
1173 kfree(journal_path);
1174 break;
1132 case Opt_noload: 1175 case Opt_noload:
1133 set_opt (sbi->s_mount_opt, NOLOAD); 1176 set_opt (sbi->s_mount_opt, NOLOAD);
1134 break; 1177 break;
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index c348d6d88624..e5d408a7ea4a 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -117,8 +117,8 @@ static void destroy_inodecache(void)
117 117
118static int isofs_remount(struct super_block *sb, int *flags, char *data) 118static int isofs_remount(struct super_block *sb, int *flags, char *data)
119{ 119{
120 /* we probably want a lot more here */ 120 if (!(*flags & MS_RDONLY))
121 *flags |= MS_RDONLY; 121 return -EROFS;
122 return 0; 122 return 0;
123} 123}
124 124
@@ -763,15 +763,6 @@ root_found:
763 */ 763 */
764 s->s_maxbytes = 0x80000000000LL; 764 s->s_maxbytes = 0x80000000000LL;
765 765
766 /*
767 * The CDROM is read-only, has no nodes (devices) on it, and since
768 * all of the files appear to be owned by root, we really do not want
769 * to allow suid. (suid or devices will not show up unless we have
770 * Rock Ridge extensions)
771 */
772
773 s->s_flags |= MS_RDONLY /* | MS_NODEV | MS_NOSUID */;
774
775 /* Set this for reference. Its not currently used except on write 766 /* Set this for reference. Its not currently used except on write
776 which we don't have .. */ 767 which we don't have .. */
777 768
@@ -1530,6 +1521,9 @@ struct inode *isofs_iget(struct super_block *sb,
1530static struct dentry *isofs_mount(struct file_system_type *fs_type, 1521static struct dentry *isofs_mount(struct file_system_type *fs_type,
1531 int flags, const char *dev_name, void *data) 1522 int flags, const char *dev_name, void *data)
1532{ 1523{
1524 /* We don't support read-write mounts */
1525 if (!(flags & MS_RDONLY))
1526 return ERR_PTR(-EACCES);
1533 return mount_bdev(fs_type, flags, dev_name, data, isofs_fill_super); 1527 return mount_bdev(fs_type, flags, dev_name, data, isofs_fill_super);
1534} 1528}
1535 1529
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
index 11bb11f48b3a..bb217dcb41af 100644
--- a/fs/jbd/commit.c
+++ b/fs/jbd/commit.c
@@ -340,13 +340,13 @@ void journal_commit_transaction(journal_t *journal)
340 J_ASSERT(journal->j_committing_transaction == NULL); 340 J_ASSERT(journal->j_committing_transaction == NULL);
341 341
342 commit_transaction = journal->j_running_transaction; 342 commit_transaction = journal->j_running_transaction;
343 J_ASSERT(commit_transaction->t_state == T_RUNNING);
344 343
345 trace_jbd_start_commit(journal, commit_transaction); 344 trace_jbd_start_commit(journal, commit_transaction);
346 jbd_debug(1, "JBD: starting commit of transaction %d\n", 345 jbd_debug(1, "JBD: starting commit of transaction %d\n",
347 commit_transaction->t_tid); 346 commit_transaction->t_tid);
348 347
349 spin_lock(&journal->j_state_lock); 348 spin_lock(&journal->j_state_lock);
349 J_ASSERT(commit_transaction->t_state == T_RUNNING);
350 commit_transaction->t_state = T_LOCKED; 350 commit_transaction->t_state = T_LOCKED;
351 351
352 trace_jbd_commit_locking(journal, commit_transaction); 352 trace_jbd_commit_locking(journal, commit_transaction);
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index 6510d6355729..2d04f9afafd7 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -90,6 +90,24 @@ static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *);
90static void __journal_abort_soft (journal_t *journal, int errno); 90static void __journal_abort_soft (journal_t *journal, int errno);
91static const char *journal_dev_name(journal_t *journal, char *buffer); 91static const char *journal_dev_name(journal_t *journal, char *buffer);
92 92
93#ifdef CONFIG_JBD_DEBUG
94void __jbd_debug(int level, const char *file, const char *func,
95 unsigned int line, const char *fmt, ...)
96{
97 struct va_format vaf;
98 va_list args;
99
100 if (level > journal_enable_debug)
101 return;
102 va_start(args, fmt);
103 vaf.fmt = fmt;
104 vaf.va = &args;
105 printk(KERN_DEBUG "%s: (%s, %u): %pV\n", file, func, line, &vaf);
106 va_end(args);
107}
108EXPORT_SYMBOL(__jbd_debug);
109#endif
110
93/* 111/*
94 * Helper function used to manage commit timeouts 112 * Helper function used to manage commit timeouts
95 */ 113 */
diff --git a/fs/reiserfs/bitmap.c b/fs/reiserfs/bitmap.c
index a98b7740a0fc..dc9a6829f7c6 100644
--- a/fs/reiserfs/bitmap.c
+++ b/fs/reiserfs/bitmap.c
@@ -423,8 +423,11 @@ static void _reiserfs_free_block(struct reiserfs_transaction_handle *th,
423 set_sb_free_blocks(rs, sb_free_blocks(rs) + 1); 423 set_sb_free_blocks(rs, sb_free_blocks(rs) + 1);
424 424
425 journal_mark_dirty(th, s, sbh); 425 journal_mark_dirty(th, s, sbh);
426 if (for_unformatted) 426 if (for_unformatted) {
427 int depth = reiserfs_write_unlock_nested(s);
427 dquot_free_block_nodirty(inode, 1); 428 dquot_free_block_nodirty(inode, 1);
429 reiserfs_write_lock_nested(s, depth);
430 }
428} 431}
429 432
430void reiserfs_free_block(struct reiserfs_transaction_handle *th, 433void reiserfs_free_block(struct reiserfs_transaction_handle *th,
@@ -1128,6 +1131,7 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start
1128 b_blocknr_t finish = SB_BLOCK_COUNT(s) - 1; 1131 b_blocknr_t finish = SB_BLOCK_COUNT(s) - 1;
1129 int passno = 0; 1132 int passno = 0;
1130 int nr_allocated = 0; 1133 int nr_allocated = 0;
1134 int depth;
1131 1135
1132 determine_prealloc_size(hint); 1136 determine_prealloc_size(hint);
1133 if (!hint->formatted_node) { 1137 if (!hint->formatted_node) {
@@ -1137,10 +1141,13 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start
1137 "reiserquota: allocating %d blocks id=%u", 1141 "reiserquota: allocating %d blocks id=%u",
1138 amount_needed, hint->inode->i_uid); 1142 amount_needed, hint->inode->i_uid);
1139#endif 1143#endif
1144 depth = reiserfs_write_unlock_nested(s);
1140 quota_ret = 1145 quota_ret =
1141 dquot_alloc_block_nodirty(hint->inode, amount_needed); 1146 dquot_alloc_block_nodirty(hint->inode, amount_needed);
1142 if (quota_ret) /* Quota exceeded? */ 1147 if (quota_ret) { /* Quota exceeded? */
1148 reiserfs_write_lock_nested(s, depth);
1143 return QUOTA_EXCEEDED; 1149 return QUOTA_EXCEEDED;
1150 }
1144 if (hint->preallocate && hint->prealloc_size) { 1151 if (hint->preallocate && hint->prealloc_size) {
1145#ifdef REISERQUOTA_DEBUG 1152#ifdef REISERQUOTA_DEBUG
1146 reiserfs_debug(s, REISERFS_DEBUG_CODE, 1153 reiserfs_debug(s, REISERFS_DEBUG_CODE,
@@ -1153,6 +1160,7 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start
1153 hint->preallocate = hint->prealloc_size = 0; 1160 hint->preallocate = hint->prealloc_size = 0;
1154 } 1161 }
1155 /* for unformatted nodes, force large allocations */ 1162 /* for unformatted nodes, force large allocations */
1163 reiserfs_write_lock_nested(s, depth);
1156 } 1164 }
1157 1165
1158 do { 1166 do {
@@ -1181,9 +1189,11 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start
1181 hint->inode->i_uid); 1189 hint->inode->i_uid);
1182#endif 1190#endif
1183 /* Free not allocated blocks */ 1191 /* Free not allocated blocks */
1192 depth = reiserfs_write_unlock_nested(s);
1184 dquot_free_block_nodirty(hint->inode, 1193 dquot_free_block_nodirty(hint->inode,
1185 amount_needed + hint->prealloc_size - 1194 amount_needed + hint->prealloc_size -
1186 nr_allocated); 1195 nr_allocated);
1196 reiserfs_write_lock_nested(s, depth);
1187 } 1197 }
1188 while (nr_allocated--) 1198 while (nr_allocated--)
1189 reiserfs_free_block(hint->th, hint->inode, 1199 reiserfs_free_block(hint->th, hint->inode,
@@ -1214,10 +1224,13 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start
1214 REISERFS_I(hint->inode)->i_prealloc_count, 1224 REISERFS_I(hint->inode)->i_prealloc_count,
1215 hint->inode->i_uid); 1225 hint->inode->i_uid);
1216#endif 1226#endif
1227
1228 depth = reiserfs_write_unlock_nested(s);
1217 dquot_free_block_nodirty(hint->inode, amount_needed + 1229 dquot_free_block_nodirty(hint->inode, amount_needed +
1218 hint->prealloc_size - nr_allocated - 1230 hint->prealloc_size - nr_allocated -
1219 REISERFS_I(hint->inode)-> 1231 REISERFS_I(hint->inode)->
1220 i_prealloc_count); 1232 i_prealloc_count);
1233 reiserfs_write_lock_nested(s, depth);
1221 } 1234 }
1222 1235
1223 return CARRY_ON; 1236 return CARRY_ON;
@@ -1340,10 +1353,11 @@ struct buffer_head *reiserfs_read_bitmap_block(struct super_block *sb,
1340 "reading failed", __func__, block); 1353 "reading failed", __func__, block);
1341 else { 1354 else {
1342 if (buffer_locked(bh)) { 1355 if (buffer_locked(bh)) {
1356 int depth;
1343 PROC_INFO_INC(sb, scan_bitmap.wait); 1357 PROC_INFO_INC(sb, scan_bitmap.wait);
1344 reiserfs_write_unlock(sb); 1358 depth = reiserfs_write_unlock_nested(sb);
1345 __wait_on_buffer(bh); 1359 __wait_on_buffer(bh);
1346 reiserfs_write_lock(sb); 1360 reiserfs_write_lock_nested(sb, depth);
1347 } 1361 }
1348 BUG_ON(!buffer_uptodate(bh)); 1362 BUG_ON(!buffer_uptodate(bh));
1349 BUG_ON(atomic_read(&bh->b_count) == 0); 1363 BUG_ON(atomic_read(&bh->b_count) == 0);
diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
index 03e4ca5624d6..1fd2051109a3 100644
--- a/fs/reiserfs/dir.c
+++ b/fs/reiserfs/dir.c
@@ -71,6 +71,7 @@ int reiserfs_readdir_inode(struct inode *inode, struct dir_context *ctx)
71 char small_buf[32]; /* avoid kmalloc if we can */ 71 char small_buf[32]; /* avoid kmalloc if we can */
72 struct reiserfs_dir_entry de; 72 struct reiserfs_dir_entry de;
73 int ret = 0; 73 int ret = 0;
74 int depth;
74 75
75 reiserfs_write_lock(inode->i_sb); 76 reiserfs_write_lock(inode->i_sb);
76 77
@@ -181,17 +182,17 @@ int reiserfs_readdir_inode(struct inode *inode, struct dir_context *ctx)
181 * Since filldir might sleep, we can release 182 * Since filldir might sleep, we can release
182 * the write lock here for other waiters 183 * the write lock here for other waiters
183 */ 184 */
184 reiserfs_write_unlock(inode->i_sb); 185 depth = reiserfs_write_unlock_nested(inode->i_sb);
185 if (!dir_emit 186 if (!dir_emit
186 (ctx, local_buf, d_reclen, d_ino, 187 (ctx, local_buf, d_reclen, d_ino,
187 DT_UNKNOWN)) { 188 DT_UNKNOWN)) {
188 reiserfs_write_lock(inode->i_sb); 189 reiserfs_write_lock_nested(inode->i_sb, depth);
189 if (local_buf != small_buf) { 190 if (local_buf != small_buf) {
190 kfree(local_buf); 191 kfree(local_buf);
191 } 192 }
192 goto end; 193 goto end;
193 } 194 }
194 reiserfs_write_lock(inode->i_sb); 195 reiserfs_write_lock_nested(inode->i_sb, depth);
195 if (local_buf != small_buf) { 196 if (local_buf != small_buf) {
196 kfree(local_buf); 197 kfree(local_buf);
197 } 198 }
diff --git a/fs/reiserfs/fix_node.c b/fs/reiserfs/fix_node.c
index 430e0658704c..dc4d41530316 100644
--- a/fs/reiserfs/fix_node.c
+++ b/fs/reiserfs/fix_node.c
@@ -1022,9 +1022,9 @@ static int get_far_parent(struct tree_balance *tb,
1022 if (buffer_locked(*pcom_father)) { 1022 if (buffer_locked(*pcom_father)) {
1023 1023
1024 /* Release the write lock while the buffer is busy */ 1024 /* Release the write lock while the buffer is busy */
1025 reiserfs_write_unlock(tb->tb_sb); 1025 int depth = reiserfs_write_unlock_nested(tb->tb_sb);
1026 __wait_on_buffer(*pcom_father); 1026 __wait_on_buffer(*pcom_father);
1027 reiserfs_write_lock(tb->tb_sb); 1027 reiserfs_write_lock_nested(tb->tb_sb, depth);
1028 if (FILESYSTEM_CHANGED_TB(tb)) { 1028 if (FILESYSTEM_CHANGED_TB(tb)) {
1029 brelse(*pcom_father); 1029 brelse(*pcom_father);
1030 return REPEAT_SEARCH; 1030 return REPEAT_SEARCH;
@@ -1929,9 +1929,9 @@ static int get_direct_parent(struct tree_balance *tb, int h)
1929 return REPEAT_SEARCH; 1929 return REPEAT_SEARCH;
1930 1930
1931 if (buffer_locked(bh)) { 1931 if (buffer_locked(bh)) {
1932 reiserfs_write_unlock(tb->tb_sb); 1932 int depth = reiserfs_write_unlock_nested(tb->tb_sb);
1933 __wait_on_buffer(bh); 1933 __wait_on_buffer(bh);
1934 reiserfs_write_lock(tb->tb_sb); 1934 reiserfs_write_lock_nested(tb->tb_sb, depth);
1935 if (FILESYSTEM_CHANGED_TB(tb)) 1935 if (FILESYSTEM_CHANGED_TB(tb))
1936 return REPEAT_SEARCH; 1936 return REPEAT_SEARCH;
1937 } 1937 }
@@ -1952,6 +1952,7 @@ static int get_neighbors(struct tree_balance *tb, int h)
1952 unsigned long son_number; 1952 unsigned long son_number;
1953 struct super_block *sb = tb->tb_sb; 1953 struct super_block *sb = tb->tb_sb;
1954 struct buffer_head *bh; 1954 struct buffer_head *bh;
1955 int depth;
1955 1956
1956 PROC_INFO_INC(sb, get_neighbors[h]); 1957 PROC_INFO_INC(sb, get_neighbors[h]);
1957 1958
@@ -1969,9 +1970,9 @@ static int get_neighbors(struct tree_balance *tb, int h)
1969 tb->FL[h]) ? tb->lkey[h] : B_NR_ITEMS(tb-> 1970 tb->FL[h]) ? tb->lkey[h] : B_NR_ITEMS(tb->
1970 FL[h]); 1971 FL[h]);
1971 son_number = B_N_CHILD_NUM(tb->FL[h], child_position); 1972 son_number = B_N_CHILD_NUM(tb->FL[h], child_position);
1972 reiserfs_write_unlock(sb); 1973 depth = reiserfs_write_unlock_nested(tb->tb_sb);
1973 bh = sb_bread(sb, son_number); 1974 bh = sb_bread(sb, son_number);
1974 reiserfs_write_lock(sb); 1975 reiserfs_write_lock_nested(tb->tb_sb, depth);
1975 if (!bh) 1976 if (!bh)
1976 return IO_ERROR; 1977 return IO_ERROR;
1977 if (FILESYSTEM_CHANGED_TB(tb)) { 1978 if (FILESYSTEM_CHANGED_TB(tb)) {
@@ -2009,9 +2010,9 @@ static int get_neighbors(struct tree_balance *tb, int h)
2009 child_position = 2010 child_position =
2010 (bh == tb->FR[h]) ? tb->rkey[h] + 1 : 0; 2011 (bh == tb->FR[h]) ? tb->rkey[h] + 1 : 0;
2011 son_number = B_N_CHILD_NUM(tb->FR[h], child_position); 2012 son_number = B_N_CHILD_NUM(tb->FR[h], child_position);
2012 reiserfs_write_unlock(sb); 2013 depth = reiserfs_write_unlock_nested(tb->tb_sb);
2013 bh = sb_bread(sb, son_number); 2014 bh = sb_bread(sb, son_number);
2014 reiserfs_write_lock(sb); 2015 reiserfs_write_lock_nested(tb->tb_sb, depth);
2015 if (!bh) 2016 if (!bh)
2016 return IO_ERROR; 2017 return IO_ERROR;
2017 if (FILESYSTEM_CHANGED_TB(tb)) { 2018 if (FILESYSTEM_CHANGED_TB(tb)) {
@@ -2272,6 +2273,7 @@ static int wait_tb_buffers_until_unlocked(struct tree_balance *tb)
2272 } 2273 }
2273 2274
2274 if (locked) { 2275 if (locked) {
2276 int depth;
2275#ifdef CONFIG_REISERFS_CHECK 2277#ifdef CONFIG_REISERFS_CHECK
2276 repeat_counter++; 2278 repeat_counter++;
2277 if ((repeat_counter % 10000) == 0) { 2279 if ((repeat_counter % 10000) == 0) {
@@ -2286,9 +2288,9 @@ static int wait_tb_buffers_until_unlocked(struct tree_balance *tb)
2286 REPEAT_SEARCH : CARRY_ON; 2288 REPEAT_SEARCH : CARRY_ON;
2287 } 2289 }
2288#endif 2290#endif
2289 reiserfs_write_unlock(tb->tb_sb); 2291 depth = reiserfs_write_unlock_nested(tb->tb_sb);
2290 __wait_on_buffer(locked); 2292 __wait_on_buffer(locked);
2291 reiserfs_write_lock(tb->tb_sb); 2293 reiserfs_write_lock_nested(tb->tb_sb, depth);
2292 if (FILESYSTEM_CHANGED_TB(tb)) 2294 if (FILESYSTEM_CHANGED_TB(tb))
2293 return REPEAT_SEARCH; 2295 return REPEAT_SEARCH;
2294 } 2296 }
@@ -2359,9 +2361,9 @@ int fix_nodes(int op_mode, struct tree_balance *tb,
2359 2361
2360 /* if it possible in indirect_to_direct conversion */ 2362 /* if it possible in indirect_to_direct conversion */
2361 if (buffer_locked(tbS0)) { 2363 if (buffer_locked(tbS0)) {
2362 reiserfs_write_unlock(tb->tb_sb); 2364 int depth = reiserfs_write_unlock_nested(tb->tb_sb);
2363 __wait_on_buffer(tbS0); 2365 __wait_on_buffer(tbS0);
2364 reiserfs_write_lock(tb->tb_sb); 2366 reiserfs_write_lock_nested(tb->tb_sb, depth);
2365 if (FILESYSTEM_CHANGED_TB(tb)) 2367 if (FILESYSTEM_CHANGED_TB(tb))
2366 return REPEAT_SEARCH; 2368 return REPEAT_SEARCH;
2367 } 2369 }
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 0048cc16a6a8..ad62bdbb451e 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -30,7 +30,6 @@ void reiserfs_evict_inode(struct inode *inode)
30 JOURNAL_PER_BALANCE_CNT * 2 + 30 JOURNAL_PER_BALANCE_CNT * 2 +
31 2 * REISERFS_QUOTA_INIT_BLOCKS(inode->i_sb); 31 2 * REISERFS_QUOTA_INIT_BLOCKS(inode->i_sb);
32 struct reiserfs_transaction_handle th; 32 struct reiserfs_transaction_handle th;
33 int depth;
34 int err; 33 int err;
35 34
36 if (!inode->i_nlink && !is_bad_inode(inode)) 35 if (!inode->i_nlink && !is_bad_inode(inode))
@@ -40,12 +39,13 @@ void reiserfs_evict_inode(struct inode *inode)
40 if (inode->i_nlink) 39 if (inode->i_nlink)
41 goto no_delete; 40 goto no_delete;
42 41
43 depth = reiserfs_write_lock_once(inode->i_sb);
44
45 /* The = 0 happens when we abort creating a new inode for some reason like lack of space.. */ 42 /* The = 0 happens when we abort creating a new inode for some reason like lack of space.. */
46 if (!(inode->i_state & I_NEW) && INODE_PKEY(inode)->k_objectid != 0) { /* also handles bad_inode case */ 43 if (!(inode->i_state & I_NEW) && INODE_PKEY(inode)->k_objectid != 0) { /* also handles bad_inode case */
44
47 reiserfs_delete_xattrs(inode); 45 reiserfs_delete_xattrs(inode);
48 46
47 reiserfs_write_lock(inode->i_sb);
48
49 if (journal_begin(&th, inode->i_sb, jbegin_count)) 49 if (journal_begin(&th, inode->i_sb, jbegin_count))
50 goto out; 50 goto out;
51 reiserfs_update_inode_transaction(inode); 51 reiserfs_update_inode_transaction(inode);
@@ -57,8 +57,11 @@ void reiserfs_evict_inode(struct inode *inode)
57 /* Do quota update inside a transaction for journaled quotas. We must do that 57 /* Do quota update inside a transaction for journaled quotas. We must do that
58 * after delete_object so that quota updates go into the same transaction as 58 * after delete_object so that quota updates go into the same transaction as
59 * stat data deletion */ 59 * stat data deletion */
60 if (!err) 60 if (!err) {
61 int depth = reiserfs_write_unlock_nested(inode->i_sb);
61 dquot_free_inode(inode); 62 dquot_free_inode(inode);
63 reiserfs_write_lock_nested(inode->i_sb, depth);
64 }
62 65
63 if (journal_end(&th, inode->i_sb, jbegin_count)) 66 if (journal_end(&th, inode->i_sb, jbegin_count))
64 goto out; 67 goto out;
@@ -72,12 +75,12 @@ void reiserfs_evict_inode(struct inode *inode)
72 /* all items of file are deleted, so we can remove "save" link */ 75 /* all items of file are deleted, so we can remove "save" link */
73 remove_save_link(inode, 0 /* not truncate */ ); /* we can't do anything 76 remove_save_link(inode, 0 /* not truncate */ ); /* we can't do anything
74 * about an error here */ 77 * about an error here */
78out:
79 reiserfs_write_unlock(inode->i_sb);
75 } else { 80 } else {
76 /* no object items are in the tree */ 81 /* no object items are in the tree */
77 ; 82 ;
78 } 83 }
79 out:
80 reiserfs_write_unlock_once(inode->i_sb, depth);
81 clear_inode(inode); /* note this must go after the journal_end to prevent deadlock */ 84 clear_inode(inode); /* note this must go after the journal_end to prevent deadlock */
82 dquot_drop(inode); 85 dquot_drop(inode);
83 inode->i_blocks = 0; 86 inode->i_blocks = 0;
@@ -610,7 +613,6 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
610 __le32 *item; 613 __le32 *item;
611 int done; 614 int done;
612 int fs_gen; 615 int fs_gen;
613 int lock_depth;
614 struct reiserfs_transaction_handle *th = NULL; 616 struct reiserfs_transaction_handle *th = NULL;
615 /* space reserved in transaction batch: 617 /* space reserved in transaction batch:
616 . 3 balancings in direct->indirect conversion 618 . 3 balancings in direct->indirect conversion
@@ -626,11 +628,11 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
626 loff_t new_offset = 628 loff_t new_offset =
627 (((loff_t) block) << inode->i_sb->s_blocksize_bits) + 1; 629 (((loff_t) block) << inode->i_sb->s_blocksize_bits) + 1;
628 630
629 lock_depth = reiserfs_write_lock_once(inode->i_sb); 631 reiserfs_write_lock(inode->i_sb);
630 version = get_inode_item_key_version(inode); 632 version = get_inode_item_key_version(inode);
631 633
632 if (!file_capable(inode, block)) { 634 if (!file_capable(inode, block)) {
633 reiserfs_write_unlock_once(inode->i_sb, lock_depth); 635 reiserfs_write_unlock(inode->i_sb);
634 return -EFBIG; 636 return -EFBIG;
635 } 637 }
636 638
@@ -642,7 +644,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
642 /* find number of block-th logical block of the file */ 644 /* find number of block-th logical block of the file */
643 ret = _get_block_create_0(inode, block, bh_result, 645 ret = _get_block_create_0(inode, block, bh_result,
644 create | GET_BLOCK_READ_DIRECT); 646 create | GET_BLOCK_READ_DIRECT);
645 reiserfs_write_unlock_once(inode->i_sb, lock_depth); 647 reiserfs_write_unlock(inode->i_sb);
646 return ret; 648 return ret;
647 } 649 }
648 /* 650 /*
@@ -760,7 +762,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
760 if (!dangle && th) 762 if (!dangle && th)
761 retval = reiserfs_end_persistent_transaction(th); 763 retval = reiserfs_end_persistent_transaction(th);
762 764
763 reiserfs_write_unlock_once(inode->i_sb, lock_depth); 765 reiserfs_write_unlock(inode->i_sb);
764 766
765 /* the item was found, so new blocks were not added to the file 767 /* the item was found, so new blocks were not added to the file
766 ** there is no need to make sure the inode is updated with this 768 ** there is no need to make sure the inode is updated with this
@@ -1011,11 +1013,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
1011 * long time. reschedule if needed and also release the write 1013 * long time. reschedule if needed and also release the write
1012 * lock for others. 1014 * lock for others.
1013 */ 1015 */
1014 if (need_resched()) { 1016 reiserfs_cond_resched(inode->i_sb);
1015 reiserfs_write_unlock_once(inode->i_sb, lock_depth);
1016 schedule();
1017 lock_depth = reiserfs_write_lock_once(inode->i_sb);
1018 }
1019 1017
1020 retval = search_for_position_by_key(inode->i_sb, &key, &path); 1018 retval = search_for_position_by_key(inode->i_sb, &key, &path);
1021 if (retval == IO_ERROR) { 1019 if (retval == IO_ERROR) {
@@ -1050,7 +1048,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
1050 retval = err; 1048 retval = err;
1051 } 1049 }
1052 1050
1053 reiserfs_write_unlock_once(inode->i_sb, lock_depth); 1051 reiserfs_write_unlock(inode->i_sb);
1054 reiserfs_check_path(&path); 1052 reiserfs_check_path(&path);
1055 return retval; 1053 return retval;
1056} 1054}
@@ -1509,14 +1507,15 @@ struct inode *reiserfs_iget(struct super_block *s, const struct cpu_key *key)
1509{ 1507{
1510 struct inode *inode; 1508 struct inode *inode;
1511 struct reiserfs_iget_args args; 1509 struct reiserfs_iget_args args;
1510 int depth;
1512 1511
1513 args.objectid = key->on_disk_key.k_objectid; 1512 args.objectid = key->on_disk_key.k_objectid;
1514 args.dirid = key->on_disk_key.k_dir_id; 1513 args.dirid = key->on_disk_key.k_dir_id;
1515 reiserfs_write_unlock(s); 1514 depth = reiserfs_write_unlock_nested(s);
1516 inode = iget5_locked(s, key->on_disk_key.k_objectid, 1515 inode = iget5_locked(s, key->on_disk_key.k_objectid,
1517 reiserfs_find_actor, reiserfs_init_locked_inode, 1516 reiserfs_find_actor, reiserfs_init_locked_inode,
1518 (void *)(&args)); 1517 (void *)(&args));
1519 reiserfs_write_lock(s); 1518 reiserfs_write_lock_nested(s, depth);
1520 if (!inode) 1519 if (!inode)
1521 return ERR_PTR(-ENOMEM); 1520 return ERR_PTR(-ENOMEM);
1522 1521
@@ -1772,7 +1771,7 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
1772 struct inode *inode, 1771 struct inode *inode,
1773 struct reiserfs_security_handle *security) 1772 struct reiserfs_security_handle *security)
1774{ 1773{
1775 struct super_block *sb; 1774 struct super_block *sb = dir->i_sb;
1776 struct reiserfs_iget_args args; 1775 struct reiserfs_iget_args args;
1777 INITIALIZE_PATH(path_to_key); 1776 INITIALIZE_PATH(path_to_key);
1778 struct cpu_key key; 1777 struct cpu_key key;
@@ -1780,12 +1779,13 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
1780 struct stat_data sd; 1779 struct stat_data sd;
1781 int retval; 1780 int retval;
1782 int err; 1781 int err;
1782 int depth;
1783 1783
1784 BUG_ON(!th->t_trans_id); 1784 BUG_ON(!th->t_trans_id);
1785 1785
1786 reiserfs_write_unlock(inode->i_sb); 1786 depth = reiserfs_write_unlock_nested(sb);
1787 err = dquot_alloc_inode(inode); 1787 err = dquot_alloc_inode(inode);
1788 reiserfs_write_lock(inode->i_sb); 1788 reiserfs_write_lock_nested(sb, depth);
1789 if (err) 1789 if (err)
1790 goto out_end_trans; 1790 goto out_end_trans;
1791 if (!dir->i_nlink) { 1791 if (!dir->i_nlink) {
@@ -1793,8 +1793,6 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
1793 goto out_bad_inode; 1793 goto out_bad_inode;
1794 } 1794 }
1795 1795
1796 sb = dir->i_sb;
1797
1798 /* item head of new item */ 1796 /* item head of new item */
1799 ih.ih_key.k_dir_id = reiserfs_choose_packing(dir); 1797 ih.ih_key.k_dir_id = reiserfs_choose_packing(dir);
1800 ih.ih_key.k_objectid = cpu_to_le32(reiserfs_get_unused_objectid(th)); 1798 ih.ih_key.k_objectid = cpu_to_le32(reiserfs_get_unused_objectid(th));
@@ -1812,10 +1810,10 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
1812 memcpy(INODE_PKEY(inode), &(ih.ih_key), KEY_SIZE); 1810 memcpy(INODE_PKEY(inode), &(ih.ih_key), KEY_SIZE);
1813 args.dirid = le32_to_cpu(ih.ih_key.k_dir_id); 1811 args.dirid = le32_to_cpu(ih.ih_key.k_dir_id);
1814 1812
1815 reiserfs_write_unlock(inode->i_sb); 1813 depth = reiserfs_write_unlock_nested(inode->i_sb);
1816 err = insert_inode_locked4(inode, args.objectid, 1814 err = insert_inode_locked4(inode, args.objectid,
1817 reiserfs_find_actor, &args); 1815 reiserfs_find_actor, &args);
1818 reiserfs_write_lock(inode->i_sb); 1816 reiserfs_write_lock_nested(inode->i_sb, depth);
1819 if (err) { 1817 if (err) {
1820 err = -EINVAL; 1818 err = -EINVAL;
1821 goto out_bad_inode; 1819 goto out_bad_inode;
@@ -1941,7 +1939,9 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
1941 } 1939 }
1942 1940
1943 if (reiserfs_posixacl(inode->i_sb)) { 1941 if (reiserfs_posixacl(inode->i_sb)) {
1942 reiserfs_write_unlock(inode->i_sb);
1944 retval = reiserfs_inherit_default_acl(th, dir, dentry, inode); 1943 retval = reiserfs_inherit_default_acl(th, dir, dentry, inode);
1944 reiserfs_write_lock(inode->i_sb);
1945 if (retval) { 1945 if (retval) {
1946 err = retval; 1946 err = retval;
1947 reiserfs_check_path(&path_to_key); 1947 reiserfs_check_path(&path_to_key);
@@ -1956,7 +1956,9 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
1956 inode->i_flags |= S_PRIVATE; 1956 inode->i_flags |= S_PRIVATE;
1957 1957
1958 if (security->name) { 1958 if (security->name) {
1959 reiserfs_write_unlock(inode->i_sb);
1959 retval = reiserfs_security_write(th, inode, security); 1960 retval = reiserfs_security_write(th, inode, security);
1961 reiserfs_write_lock(inode->i_sb);
1960 if (retval) { 1962 if (retval) {
1961 err = retval; 1963 err = retval;
1962 reiserfs_check_path(&path_to_key); 1964 reiserfs_check_path(&path_to_key);
@@ -1982,14 +1984,16 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
1982 INODE_PKEY(inode)->k_objectid = 0; 1984 INODE_PKEY(inode)->k_objectid = 0;
1983 1985
1984 /* Quota change must be inside a transaction for journaling */ 1986 /* Quota change must be inside a transaction for journaling */
1987 depth = reiserfs_write_unlock_nested(inode->i_sb);
1985 dquot_free_inode(inode); 1988 dquot_free_inode(inode);
1989 reiserfs_write_lock_nested(inode->i_sb, depth);
1986 1990
1987 out_end_trans: 1991 out_end_trans:
1988 journal_end(th, th->t_super, th->t_blocks_allocated); 1992 journal_end(th, th->t_super, th->t_blocks_allocated);
1989 reiserfs_write_unlock(inode->i_sb);
1990 /* Drop can be outside and it needs more credits so it's better to have it outside */ 1993 /* Drop can be outside and it needs more credits so it's better to have it outside */
1994 depth = reiserfs_write_unlock_nested(inode->i_sb);
1991 dquot_drop(inode); 1995 dquot_drop(inode);
1992 reiserfs_write_lock(inode->i_sb); 1996 reiserfs_write_lock_nested(inode->i_sb, depth);
1993 inode->i_flags |= S_NOQUOTA; 1997 inode->i_flags |= S_NOQUOTA;
1994 make_bad_inode(inode); 1998 make_bad_inode(inode);
1995 1999
@@ -2103,9 +2107,8 @@ int reiserfs_truncate_file(struct inode *inode, int update_timestamps)
2103 int error; 2107 int error;
2104 struct buffer_head *bh = NULL; 2108 struct buffer_head *bh = NULL;
2105 int err2; 2109 int err2;
2106 int lock_depth;
2107 2110
2108 lock_depth = reiserfs_write_lock_once(inode->i_sb); 2111 reiserfs_write_lock(inode->i_sb);
2109 2112
2110 if (inode->i_size > 0) { 2113 if (inode->i_size > 0) {
2111 error = grab_tail_page(inode, &page, &bh); 2114 error = grab_tail_page(inode, &page, &bh);
@@ -2174,7 +2177,7 @@ int reiserfs_truncate_file(struct inode *inode, int update_timestamps)
2174 page_cache_release(page); 2177 page_cache_release(page);
2175 } 2178 }
2176 2179
2177 reiserfs_write_unlock_once(inode->i_sb, lock_depth); 2180 reiserfs_write_unlock(inode->i_sb);
2178 2181
2179 return 0; 2182 return 0;
2180 out: 2183 out:
@@ -2183,7 +2186,7 @@ int reiserfs_truncate_file(struct inode *inode, int update_timestamps)
2183 page_cache_release(page); 2186 page_cache_release(page);
2184 } 2187 }
2185 2188
2186 reiserfs_write_unlock_once(inode->i_sb, lock_depth); 2189 reiserfs_write_unlock(inode->i_sb);
2187 2190
2188 return error; 2191 return error;
2189} 2192}
@@ -2648,10 +2651,11 @@ int __reiserfs_write_begin(struct page *page, unsigned from, unsigned len)
2648 struct inode *inode = page->mapping->host; 2651 struct inode *inode = page->mapping->host;
2649 int ret; 2652 int ret;
2650 int old_ref = 0; 2653 int old_ref = 0;
2654 int depth;
2651 2655
2652 reiserfs_write_unlock(inode->i_sb); 2656 depth = reiserfs_write_unlock_nested(inode->i_sb);
2653 reiserfs_wait_on_write_block(inode->i_sb); 2657 reiserfs_wait_on_write_block(inode->i_sb);
2654 reiserfs_write_lock(inode->i_sb); 2658 reiserfs_write_lock_nested(inode->i_sb, depth);
2655 2659
2656 fix_tail_page_for_writing(page); 2660 fix_tail_page_for_writing(page);
2657 if (reiserfs_transaction_running(inode->i_sb)) { 2661 if (reiserfs_transaction_running(inode->i_sb)) {
@@ -2708,7 +2712,6 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
2708 int update_sd = 0; 2712 int update_sd = 0;
2709 struct reiserfs_transaction_handle *th; 2713 struct reiserfs_transaction_handle *th;
2710 unsigned start; 2714 unsigned start;
2711 int lock_depth = 0;
2712 bool locked = false; 2715 bool locked = false;
2713 2716
2714 if ((unsigned long)fsdata & AOP_FLAG_CONT_EXPAND) 2717 if ((unsigned long)fsdata & AOP_FLAG_CONT_EXPAND)
@@ -2737,7 +2740,7 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
2737 */ 2740 */
2738 if (pos + copied > inode->i_size) { 2741 if (pos + copied > inode->i_size) {
2739 struct reiserfs_transaction_handle myth; 2742 struct reiserfs_transaction_handle myth;
2740 lock_depth = reiserfs_write_lock_once(inode->i_sb); 2743 reiserfs_write_lock(inode->i_sb);
2741 locked = true; 2744 locked = true;
2742 /* If the file have grown beyond the border where it 2745 /* If the file have grown beyond the border where it
2743 can have a tail, unmark it as needing a tail 2746 can have a tail, unmark it as needing a tail
@@ -2768,7 +2771,7 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
2768 } 2771 }
2769 if (th) { 2772 if (th) {
2770 if (!locked) { 2773 if (!locked) {
2771 lock_depth = reiserfs_write_lock_once(inode->i_sb); 2774 reiserfs_write_lock(inode->i_sb);
2772 locked = true; 2775 locked = true;
2773 } 2776 }
2774 if (!update_sd) 2777 if (!update_sd)
@@ -2780,7 +2783,7 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
2780 2783
2781 out: 2784 out:
2782 if (locked) 2785 if (locked)
2783 reiserfs_write_unlock_once(inode->i_sb, lock_depth); 2786 reiserfs_write_unlock(inode->i_sb);
2784 unlock_page(page); 2787 unlock_page(page);
2785 page_cache_release(page); 2788 page_cache_release(page);
2786 2789
@@ -2790,7 +2793,7 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
2790 return ret == 0 ? copied : ret; 2793 return ret == 0 ? copied : ret;
2791 2794
2792 journal_error: 2795 journal_error:
2793 reiserfs_write_unlock_once(inode->i_sb, lock_depth); 2796 reiserfs_write_unlock(inode->i_sb);
2794 locked = false; 2797 locked = false;
2795 if (th) { 2798 if (th) {
2796 if (!update_sd) 2799 if (!update_sd)
@@ -2808,10 +2811,11 @@ int reiserfs_commit_write(struct file *f, struct page *page,
2808 int ret = 0; 2811 int ret = 0;
2809 int update_sd = 0; 2812 int update_sd = 0;
2810 struct reiserfs_transaction_handle *th = NULL; 2813 struct reiserfs_transaction_handle *th = NULL;
2814 int depth;
2811 2815
2812 reiserfs_write_unlock(inode->i_sb); 2816 depth = reiserfs_write_unlock_nested(inode->i_sb);
2813 reiserfs_wait_on_write_block(inode->i_sb); 2817 reiserfs_wait_on_write_block(inode->i_sb);
2814 reiserfs_write_lock(inode->i_sb); 2818 reiserfs_write_lock_nested(inode->i_sb, depth);
2815 2819
2816 if (reiserfs_transaction_running(inode->i_sb)) { 2820 if (reiserfs_transaction_running(inode->i_sb)) {
2817 th = current->journal_info; 2821 th = current->journal_info;
@@ -3110,7 +3114,6 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
3110{ 3114{
3111 struct inode *inode = dentry->d_inode; 3115 struct inode *inode = dentry->d_inode;
3112 unsigned int ia_valid; 3116 unsigned int ia_valid;
3113 int depth;
3114 int error; 3117 int error;
3115 3118
3116 error = inode_change_ok(inode, attr); 3119 error = inode_change_ok(inode, attr);
@@ -3122,13 +3125,14 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
3122 3125
3123 if (is_quota_modification(inode, attr)) 3126 if (is_quota_modification(inode, attr))
3124 dquot_initialize(inode); 3127 dquot_initialize(inode);
3125 depth = reiserfs_write_lock_once(inode->i_sb); 3128 reiserfs_write_lock(inode->i_sb);
3126 if (attr->ia_valid & ATTR_SIZE) { 3129 if (attr->ia_valid & ATTR_SIZE) {
3127 /* version 2 items will be caught by the s_maxbytes check 3130 /* version 2 items will be caught by the s_maxbytes check
3128 ** done for us in vmtruncate 3131 ** done for us in vmtruncate
3129 */ 3132 */
3130 if (get_inode_item_key_version(inode) == KEY_FORMAT_3_5 && 3133 if (get_inode_item_key_version(inode) == KEY_FORMAT_3_5 &&
3131 attr->ia_size > MAX_NON_LFS) { 3134 attr->ia_size > MAX_NON_LFS) {
3135 reiserfs_write_unlock(inode->i_sb);
3132 error = -EFBIG; 3136 error = -EFBIG;
3133 goto out; 3137 goto out;
3134 } 3138 }
@@ -3150,8 +3154,10 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
3150 if (err) 3154 if (err)
3151 error = err; 3155 error = err;
3152 } 3156 }
3153 if (error) 3157 if (error) {
3158 reiserfs_write_unlock(inode->i_sb);
3154 goto out; 3159 goto out;
3160 }
3155 /* 3161 /*
3156 * file size is changed, ctime and mtime are 3162 * file size is changed, ctime and mtime are
3157 * to be updated 3163 * to be updated
@@ -3159,6 +3165,7 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
3159 attr->ia_valid |= (ATTR_MTIME | ATTR_CTIME); 3165 attr->ia_valid |= (ATTR_MTIME | ATTR_CTIME);
3160 } 3166 }
3161 } 3167 }
3168 reiserfs_write_unlock(inode->i_sb);
3162 3169
3163 if ((((attr->ia_valid & ATTR_UID) && (from_kuid(&init_user_ns, attr->ia_uid) & ~0xffff)) || 3170 if ((((attr->ia_valid & ATTR_UID) && (from_kuid(&init_user_ns, attr->ia_uid) & ~0xffff)) ||
3164 ((attr->ia_valid & ATTR_GID) && (from_kgid(&init_user_ns, attr->ia_gid) & ~0xffff))) && 3171 ((attr->ia_valid & ATTR_GID) && (from_kgid(&init_user_ns, attr->ia_gid) & ~0xffff))) &&
@@ -3183,14 +3190,16 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
3183 return error; 3190 return error;
3184 3191
3185 /* (user+group)*(old+new) structure - we count quota info and , inode write (sb, inode) */ 3192 /* (user+group)*(old+new) structure - we count quota info and , inode write (sb, inode) */
3193 reiserfs_write_lock(inode->i_sb);
3186 error = journal_begin(&th, inode->i_sb, jbegin_count); 3194 error = journal_begin(&th, inode->i_sb, jbegin_count);
3195 reiserfs_write_unlock(inode->i_sb);
3187 if (error) 3196 if (error)
3188 goto out; 3197 goto out;
3189 reiserfs_write_unlock_once(inode->i_sb, depth);
3190 error = dquot_transfer(inode, attr); 3198 error = dquot_transfer(inode, attr);
3191 depth = reiserfs_write_lock_once(inode->i_sb); 3199 reiserfs_write_lock(inode->i_sb);
3192 if (error) { 3200 if (error) {
3193 journal_end(&th, inode->i_sb, jbegin_count); 3201 journal_end(&th, inode->i_sb, jbegin_count);
3202 reiserfs_write_unlock(inode->i_sb);
3194 goto out; 3203 goto out;
3195 } 3204 }
3196 3205
@@ -3202,17 +3211,11 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
3202 inode->i_gid = attr->ia_gid; 3211 inode->i_gid = attr->ia_gid;
3203 mark_inode_dirty(inode); 3212 mark_inode_dirty(inode);
3204 error = journal_end(&th, inode->i_sb, jbegin_count); 3213 error = journal_end(&th, inode->i_sb, jbegin_count);
3214 reiserfs_write_unlock(inode->i_sb);
3205 if (error) 3215 if (error)
3206 goto out; 3216 goto out;
3207 } 3217 }
3208 3218
3209 /*
3210 * Relax the lock here, as it might truncate the
3211 * inode pages and wait for inode pages locks.
3212 * To release such page lock, the owner needs the
3213 * reiserfs lock
3214 */
3215 reiserfs_write_unlock_once(inode->i_sb, depth);
3216 if ((attr->ia_valid & ATTR_SIZE) && 3219 if ((attr->ia_valid & ATTR_SIZE) &&
3217 attr->ia_size != i_size_read(inode)) { 3220 attr->ia_size != i_size_read(inode)) {
3218 error = inode_newsize_ok(inode, attr->ia_size); 3221 error = inode_newsize_ok(inode, attr->ia_size);
@@ -3226,16 +3229,13 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
3226 setattr_copy(inode, attr); 3229 setattr_copy(inode, attr);
3227 mark_inode_dirty(inode); 3230 mark_inode_dirty(inode);
3228 } 3231 }
3229 depth = reiserfs_write_lock_once(inode->i_sb);
3230 3232
3231 if (!error && reiserfs_posixacl(inode->i_sb)) { 3233 if (!error && reiserfs_posixacl(inode->i_sb)) {
3232 if (attr->ia_valid & ATTR_MODE) 3234 if (attr->ia_valid & ATTR_MODE)
3233 error = reiserfs_acl_chmod(inode); 3235 error = reiserfs_acl_chmod(inode);
3234 } 3236 }
3235 3237
3236 out: 3238out:
3237 reiserfs_write_unlock_once(inode->i_sb, depth);
3238
3239 return error; 3239 return error;
3240} 3240}
3241 3241
diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c
index 15cb5fe6b425..946ccbf5b5a1 100644
--- a/fs/reiserfs/ioctl.c
+++ b/fs/reiserfs/ioctl.c
@@ -167,7 +167,6 @@ int reiserfs_commit_write(struct file *f, struct page *page,
167int reiserfs_unpack(struct inode *inode, struct file *filp) 167int reiserfs_unpack(struct inode *inode, struct file *filp)
168{ 168{
169 int retval = 0; 169 int retval = 0;
170 int depth;
171 int index; 170 int index;
172 struct page *page; 171 struct page *page;
173 struct address_space *mapping; 172 struct address_space *mapping;
@@ -183,11 +182,11 @@ int reiserfs_unpack(struct inode *inode, struct file *filp)
183 return 0; 182 return 0;
184 } 183 }
185 184
186 depth = reiserfs_write_lock_once(inode->i_sb);
187
188 /* we need to make sure nobody is changing the file size beneath us */ 185 /* we need to make sure nobody is changing the file size beneath us */
189 reiserfs_mutex_lock_safe(&inode->i_mutex, inode->i_sb); 186 reiserfs_mutex_lock_safe(&inode->i_mutex, inode->i_sb);
190 187
188 reiserfs_write_lock(inode->i_sb);
189
191 write_from = inode->i_size & (blocksize - 1); 190 write_from = inode->i_size & (blocksize - 1);
192 /* if we are on a block boundary, we are already unpacked. */ 191 /* if we are on a block boundary, we are already unpacked. */
193 if (write_from == 0) { 192 if (write_from == 0) {
@@ -221,6 +220,6 @@ int reiserfs_unpack(struct inode *inode, struct file *filp)
221 220
222 out: 221 out:
223 mutex_unlock(&inode->i_mutex); 222 mutex_unlock(&inode->i_mutex);
224 reiserfs_write_unlock_once(inode->i_sb, depth); 223 reiserfs_write_unlock(inode->i_sb);
225 return retval; 224 return retval;
226} 225}
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 742fdd4c209a..73feacc49b2e 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -947,9 +947,11 @@ static int reiserfs_async_progress_wait(struct super_block *s)
947 struct reiserfs_journal *j = SB_JOURNAL(s); 947 struct reiserfs_journal *j = SB_JOURNAL(s);
948 948
949 if (atomic_read(&j->j_async_throttle)) { 949 if (atomic_read(&j->j_async_throttle)) {
950 reiserfs_write_unlock(s); 950 int depth;
951
952 depth = reiserfs_write_unlock_nested(s);
951 congestion_wait(BLK_RW_ASYNC, HZ / 10); 953 congestion_wait(BLK_RW_ASYNC, HZ / 10);
952 reiserfs_write_lock(s); 954 reiserfs_write_lock_nested(s, depth);
953 } 955 }
954 956
955 return 0; 957 return 0;
@@ -972,6 +974,7 @@ static int flush_commit_list(struct super_block *s,
972 struct reiserfs_journal *journal = SB_JOURNAL(s); 974 struct reiserfs_journal *journal = SB_JOURNAL(s);
973 int retval = 0; 975 int retval = 0;
974 int write_len; 976 int write_len;
977 int depth;
975 978
976 reiserfs_check_lock_depth(s, "flush_commit_list"); 979 reiserfs_check_lock_depth(s, "flush_commit_list");
977 980
@@ -1018,12 +1021,12 @@ static int flush_commit_list(struct super_block *s,
1018 * We might sleep in numerous places inside 1021 * We might sleep in numerous places inside
1019 * write_ordered_buffers. Relax the write lock. 1022 * write_ordered_buffers. Relax the write lock.
1020 */ 1023 */
1021 reiserfs_write_unlock(s); 1024 depth = reiserfs_write_unlock_nested(s);
1022 ret = write_ordered_buffers(&journal->j_dirty_buffers_lock, 1025 ret = write_ordered_buffers(&journal->j_dirty_buffers_lock,
1023 journal, jl, &jl->j_bh_list); 1026 journal, jl, &jl->j_bh_list);
1024 if (ret < 0 && retval == 0) 1027 if (ret < 0 && retval == 0)
1025 retval = ret; 1028 retval = ret;
1026 reiserfs_write_lock(s); 1029 reiserfs_write_lock_nested(s, depth);
1027 } 1030 }
1028 BUG_ON(!list_empty(&jl->j_bh_list)); 1031 BUG_ON(!list_empty(&jl->j_bh_list));
1029 /* 1032 /*
@@ -1043,9 +1046,9 @@ static int flush_commit_list(struct super_block *s,
1043 tbh = journal_find_get_block(s, bn); 1046 tbh = journal_find_get_block(s, bn);
1044 if (tbh) { 1047 if (tbh) {
1045 if (buffer_dirty(tbh)) { 1048 if (buffer_dirty(tbh)) {
1046 reiserfs_write_unlock(s); 1049 depth = reiserfs_write_unlock_nested(s);
1047 ll_rw_block(WRITE, 1, &tbh); 1050 ll_rw_block(WRITE, 1, &tbh);
1048 reiserfs_write_lock(s); 1051 reiserfs_write_lock_nested(s, depth);
1049 } 1052 }
1050 put_bh(tbh) ; 1053 put_bh(tbh) ;
1051 } 1054 }
@@ -1057,17 +1060,17 @@ static int flush_commit_list(struct super_block *s,
1057 (jl->j_start + i) % SB_ONDISK_JOURNAL_SIZE(s); 1060 (jl->j_start + i) % SB_ONDISK_JOURNAL_SIZE(s);
1058 tbh = journal_find_get_block(s, bn); 1061 tbh = journal_find_get_block(s, bn);
1059 1062
1060 reiserfs_write_unlock(s); 1063 depth = reiserfs_write_unlock_nested(s);
1061 wait_on_buffer(tbh); 1064 __wait_on_buffer(tbh);
1062 reiserfs_write_lock(s); 1065 reiserfs_write_lock_nested(s, depth);
1063 // since we're using ll_rw_blk above, it might have skipped over 1066 // since we're using ll_rw_blk above, it might have skipped over
1064 // a locked buffer. Double check here 1067 // a locked buffer. Double check here
1065 // 1068 //
1066 /* redundant, sync_dirty_buffer() checks */ 1069 /* redundant, sync_dirty_buffer() checks */
1067 if (buffer_dirty(tbh)) { 1070 if (buffer_dirty(tbh)) {
1068 reiserfs_write_unlock(s); 1071 depth = reiserfs_write_unlock_nested(s);
1069 sync_dirty_buffer(tbh); 1072 sync_dirty_buffer(tbh);
1070 reiserfs_write_lock(s); 1073 reiserfs_write_lock_nested(s, depth);
1071 } 1074 }
1072 if (unlikely(!buffer_uptodate(tbh))) { 1075 if (unlikely(!buffer_uptodate(tbh))) {
1073#ifdef CONFIG_REISERFS_CHECK 1076#ifdef CONFIG_REISERFS_CHECK
@@ -1091,12 +1094,12 @@ static int flush_commit_list(struct super_block *s,
1091 if (buffer_dirty(jl->j_commit_bh)) 1094 if (buffer_dirty(jl->j_commit_bh))
1092 BUG(); 1095 BUG();
1093 mark_buffer_dirty(jl->j_commit_bh) ; 1096 mark_buffer_dirty(jl->j_commit_bh) ;
1094 reiserfs_write_unlock(s); 1097 depth = reiserfs_write_unlock_nested(s);
1095 if (reiserfs_barrier_flush(s)) 1098 if (reiserfs_barrier_flush(s))
1096 __sync_dirty_buffer(jl->j_commit_bh, WRITE_FLUSH_FUA); 1099 __sync_dirty_buffer(jl->j_commit_bh, WRITE_FLUSH_FUA);
1097 else 1100 else
1098 sync_dirty_buffer(jl->j_commit_bh); 1101 sync_dirty_buffer(jl->j_commit_bh);
1099 reiserfs_write_lock(s); 1102 reiserfs_write_lock_nested(s, depth);
1100 } 1103 }
1101 1104
1102 /* If there was a write error in the journal - we can't commit this 1105 /* If there was a write error in the journal - we can't commit this
@@ -1228,15 +1231,16 @@ static int _update_journal_header_block(struct super_block *sb,
1228{ 1231{
1229 struct reiserfs_journal_header *jh; 1232 struct reiserfs_journal_header *jh;
1230 struct reiserfs_journal *journal = SB_JOURNAL(sb); 1233 struct reiserfs_journal *journal = SB_JOURNAL(sb);
1234 int depth;
1231 1235
1232 if (reiserfs_is_journal_aborted(journal)) 1236 if (reiserfs_is_journal_aborted(journal))
1233 return -EIO; 1237 return -EIO;
1234 1238
1235 if (trans_id >= journal->j_last_flush_trans_id) { 1239 if (trans_id >= journal->j_last_flush_trans_id) {
1236 if (buffer_locked((journal->j_header_bh))) { 1240 if (buffer_locked((journal->j_header_bh))) {
1237 reiserfs_write_unlock(sb); 1241 depth = reiserfs_write_unlock_nested(sb);
1238 wait_on_buffer((journal->j_header_bh)); 1242 __wait_on_buffer(journal->j_header_bh);
1239 reiserfs_write_lock(sb); 1243 reiserfs_write_lock_nested(sb, depth);
1240 if (unlikely(!buffer_uptodate(journal->j_header_bh))) { 1244 if (unlikely(!buffer_uptodate(journal->j_header_bh))) {
1241#ifdef CONFIG_REISERFS_CHECK 1245#ifdef CONFIG_REISERFS_CHECK
1242 reiserfs_warning(sb, "journal-699", 1246 reiserfs_warning(sb, "journal-699",
@@ -1254,14 +1258,14 @@ static int _update_journal_header_block(struct super_block *sb,
1254 jh->j_mount_id = cpu_to_le32(journal->j_mount_id); 1258 jh->j_mount_id = cpu_to_le32(journal->j_mount_id);
1255 1259
1256 set_buffer_dirty(journal->j_header_bh); 1260 set_buffer_dirty(journal->j_header_bh);
1257 reiserfs_write_unlock(sb); 1261 depth = reiserfs_write_unlock_nested(sb);
1258 1262
1259 if (reiserfs_barrier_flush(sb)) 1263 if (reiserfs_barrier_flush(sb))
1260 __sync_dirty_buffer(journal->j_header_bh, WRITE_FLUSH_FUA); 1264 __sync_dirty_buffer(journal->j_header_bh, WRITE_FLUSH_FUA);
1261 else 1265 else
1262 sync_dirty_buffer(journal->j_header_bh); 1266 sync_dirty_buffer(journal->j_header_bh);
1263 1267
1264 reiserfs_write_lock(sb); 1268 reiserfs_write_lock_nested(sb, depth);
1265 if (!buffer_uptodate(journal->j_header_bh)) { 1269 if (!buffer_uptodate(journal->j_header_bh)) {
1266 reiserfs_warning(sb, "journal-837", 1270 reiserfs_warning(sb, "journal-837",
1267 "IO error during journal replay"); 1271 "IO error during journal replay");
@@ -1341,6 +1345,7 @@ static int flush_journal_list(struct super_block *s,
1341 unsigned long j_len_saved = jl->j_len; 1345 unsigned long j_len_saved = jl->j_len;
1342 struct reiserfs_journal *journal = SB_JOURNAL(s); 1346 struct reiserfs_journal *journal = SB_JOURNAL(s);
1343 int err = 0; 1347 int err = 0;
1348 int depth;
1344 1349
1345 BUG_ON(j_len_saved <= 0); 1350 BUG_ON(j_len_saved <= 0);
1346 1351
@@ -1495,9 +1500,9 @@ static int flush_journal_list(struct super_block *s,
1495 "cn->bh is NULL"); 1500 "cn->bh is NULL");
1496 } 1501 }
1497 1502
1498 reiserfs_write_unlock(s); 1503 depth = reiserfs_write_unlock_nested(s);
1499 wait_on_buffer(cn->bh); 1504 __wait_on_buffer(cn->bh);
1500 reiserfs_write_lock(s); 1505 reiserfs_write_lock_nested(s, depth);
1501 1506
1502 if (!cn->bh) { 1507 if (!cn->bh) {
1503 reiserfs_panic(s, "journal-1012", 1508 reiserfs_panic(s, "journal-1012",
@@ -1974,6 +1979,7 @@ static int journal_compare_desc_commit(struct super_block *sb,
1974/* returns 0 if it did not find a description block 1979/* returns 0 if it did not find a description block
1975** returns -1 if it found a corrupt commit block 1980** returns -1 if it found a corrupt commit block
1976** returns 1 if both desc and commit were valid 1981** returns 1 if both desc and commit were valid
1982** NOTE: only called during fs mount
1977*/ 1983*/
1978static int journal_transaction_is_valid(struct super_block *sb, 1984static int journal_transaction_is_valid(struct super_block *sb,
1979 struct buffer_head *d_bh, 1985 struct buffer_head *d_bh,
@@ -2073,8 +2079,9 @@ static void brelse_array(struct buffer_head **heads, int num)
2073 2079
2074/* 2080/*
2075** given the start, and values for the oldest acceptable transactions, 2081** given the start, and values for the oldest acceptable transactions,
2076** this either reads in a replays a transaction, or returns because the transaction 2082** this either reads in a replays a transaction, or returns because the
2077** is invalid, or too old. 2083** transaction is invalid, or too old.
2084** NOTE: only called during fs mount
2078*/ 2085*/
2079static int journal_read_transaction(struct super_block *sb, 2086static int journal_read_transaction(struct super_block *sb,
2080 unsigned long cur_dblock, 2087 unsigned long cur_dblock,
@@ -2208,10 +2215,7 @@ static int journal_read_transaction(struct super_block *sb,
2208 ll_rw_block(READ, get_desc_trans_len(desc), log_blocks); 2215 ll_rw_block(READ, get_desc_trans_len(desc), log_blocks);
2209 for (i = 0; i < get_desc_trans_len(desc); i++) { 2216 for (i = 0; i < get_desc_trans_len(desc); i++) {
2210 2217
2211 reiserfs_write_unlock(sb);
2212 wait_on_buffer(log_blocks[i]); 2218 wait_on_buffer(log_blocks[i]);
2213 reiserfs_write_lock(sb);
2214
2215 if (!buffer_uptodate(log_blocks[i])) { 2219 if (!buffer_uptodate(log_blocks[i])) {
2216 reiserfs_warning(sb, "journal-1212", 2220 reiserfs_warning(sb, "journal-1212",
2217 "REPLAY FAILURE fsck required! " 2221 "REPLAY FAILURE fsck required! "
@@ -2318,12 +2322,13 @@ static struct buffer_head *reiserfs_breada(struct block_device *dev,
2318 2322
2319/* 2323/*
2320** read and replay the log 2324** read and replay the log
2321** on a clean unmount, the journal header's next unflushed pointer will be to an invalid 2325** on a clean unmount, the journal header's next unflushed pointer will
2322** transaction. This tests that before finding all the transactions in the log, which makes normal mount times fast. 2326** be to an invalid transaction. This tests that before finding all the
2323** 2327** transactions in the log, which makes normal mount times fast.
2324** After a crash, this starts with the next unflushed transaction, and replays until it finds one too old, or invalid. 2328** After a crash, this starts with the next unflushed transaction, and
2325** 2329** replays until it finds one too old, or invalid.
2326** On exit, it sets things up so the first transaction will work correctly. 2330** On exit, it sets things up so the first transaction will work correctly.
2331** NOTE: only called during fs mount
2327*/ 2332*/
2328static int journal_read(struct super_block *sb) 2333static int journal_read(struct super_block *sb)
2329{ 2334{
@@ -2501,14 +2506,18 @@ static int journal_read(struct super_block *sb)
2501 "replayed %d transactions in %lu seconds\n", 2506 "replayed %d transactions in %lu seconds\n",
2502 replay_count, get_seconds() - start); 2507 replay_count, get_seconds() - start);
2503 } 2508 }
2509 /* needed to satisfy the locking in _update_journal_header_block */
2510 reiserfs_write_lock(sb);
2504 if (!bdev_read_only(sb->s_bdev) && 2511 if (!bdev_read_only(sb->s_bdev) &&
2505 _update_journal_header_block(sb, journal->j_start, 2512 _update_journal_header_block(sb, journal->j_start,
2506 journal->j_last_flush_trans_id)) { 2513 journal->j_last_flush_trans_id)) {
2514 reiserfs_write_unlock(sb);
2507 /* replay failed, caller must call free_journal_ram and abort 2515 /* replay failed, caller must call free_journal_ram and abort
2508 ** the mount 2516 ** the mount
2509 */ 2517 */
2510 return -1; 2518 return -1;
2511 } 2519 }
2520 reiserfs_write_unlock(sb);
2512 return 0; 2521 return 0;
2513} 2522}
2514 2523
@@ -2828,13 +2837,7 @@ int journal_init(struct super_block *sb, const char *j_dev_name,
2828 goto free_and_return; 2837 goto free_and_return;
2829 } 2838 }
2830 2839
2831 /*
2832 * Journal_read needs to be inspected in order to push down
2833 * the lock further inside (or even remove it).
2834 */
2835 reiserfs_write_lock(sb);
2836 ret = journal_read(sb); 2840 ret = journal_read(sb);
2837 reiserfs_write_unlock(sb);
2838 if (ret < 0) { 2841 if (ret < 0) {
2839 reiserfs_warning(sb, "reiserfs-2006", 2842 reiserfs_warning(sb, "reiserfs-2006",
2840 "Replay Failure, unable to mount"); 2843 "Replay Failure, unable to mount");
@@ -2923,9 +2926,9 @@ static void queue_log_writer(struct super_block *s)
2923 add_wait_queue(&journal->j_join_wait, &wait); 2926 add_wait_queue(&journal->j_join_wait, &wait);
2924 set_current_state(TASK_UNINTERRUPTIBLE); 2927 set_current_state(TASK_UNINTERRUPTIBLE);
2925 if (test_bit(J_WRITERS_QUEUED, &journal->j_state)) { 2928 if (test_bit(J_WRITERS_QUEUED, &journal->j_state)) {
2926 reiserfs_write_unlock(s); 2929 int depth = reiserfs_write_unlock_nested(s);
2927 schedule(); 2930 schedule();
2928 reiserfs_write_lock(s); 2931 reiserfs_write_lock_nested(s, depth);
2929 } 2932 }
2930 __set_current_state(TASK_RUNNING); 2933 __set_current_state(TASK_RUNNING);
2931 remove_wait_queue(&journal->j_join_wait, &wait); 2934 remove_wait_queue(&journal->j_join_wait, &wait);
@@ -2943,9 +2946,12 @@ static void let_transaction_grow(struct super_block *sb, unsigned int trans_id)
2943 struct reiserfs_journal *journal = SB_JOURNAL(sb); 2946 struct reiserfs_journal *journal = SB_JOURNAL(sb);
2944 unsigned long bcount = journal->j_bcount; 2947 unsigned long bcount = journal->j_bcount;
2945 while (1) { 2948 while (1) {
2946 reiserfs_write_unlock(sb); 2949 int depth;
2950
2951 depth = reiserfs_write_unlock_nested(sb);
2947 schedule_timeout_uninterruptible(1); 2952 schedule_timeout_uninterruptible(1);
2948 reiserfs_write_lock(sb); 2953 reiserfs_write_lock_nested(sb, depth);
2954
2949 journal->j_current_jl->j_state |= LIST_COMMIT_PENDING; 2955 journal->j_current_jl->j_state |= LIST_COMMIT_PENDING;
2950 while ((atomic_read(&journal->j_wcount) > 0 || 2956 while ((atomic_read(&journal->j_wcount) > 0 ||
2951 atomic_read(&journal->j_jlock)) && 2957 atomic_read(&journal->j_jlock)) &&
@@ -2976,6 +2982,7 @@ static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
2976 struct reiserfs_transaction_handle myth; 2982 struct reiserfs_transaction_handle myth;
2977 int sched_count = 0; 2983 int sched_count = 0;
2978 int retval; 2984 int retval;
2985 int depth;
2979 2986
2980 reiserfs_check_lock_depth(sb, "journal_begin"); 2987 reiserfs_check_lock_depth(sb, "journal_begin");
2981 BUG_ON(nblocks > journal->j_trans_max); 2988 BUG_ON(nblocks > journal->j_trans_max);
@@ -2996,9 +3003,9 @@ static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
2996 3003
2997 if (test_bit(J_WRITERS_BLOCKED, &journal->j_state)) { 3004 if (test_bit(J_WRITERS_BLOCKED, &journal->j_state)) {
2998 unlock_journal(sb); 3005 unlock_journal(sb);
2999 reiserfs_write_unlock(sb); 3006 depth = reiserfs_write_unlock_nested(sb);
3000 reiserfs_wait_on_write_block(sb); 3007 reiserfs_wait_on_write_block(sb);
3001 reiserfs_write_lock(sb); 3008 reiserfs_write_lock_nested(sb, depth);
3002 PROC_INFO_INC(sb, journal.journal_relock_writers); 3009 PROC_INFO_INC(sb, journal.journal_relock_writers);
3003 goto relock; 3010 goto relock;
3004 } 3011 }
@@ -3821,6 +3828,7 @@ void reiserfs_restore_prepared_buffer(struct super_block *sb,
3821 if (test_clear_buffer_journal_restore_dirty(bh) && 3828 if (test_clear_buffer_journal_restore_dirty(bh) &&
3822 buffer_journal_dirty(bh)) { 3829 buffer_journal_dirty(bh)) {
3823 struct reiserfs_journal_cnode *cn; 3830 struct reiserfs_journal_cnode *cn;
3831 reiserfs_write_lock(sb);
3824 cn = get_journal_hash_dev(sb, 3832 cn = get_journal_hash_dev(sb,
3825 journal->j_list_hash_table, 3833 journal->j_list_hash_table,
3826 bh->b_blocknr); 3834 bh->b_blocknr);
@@ -3828,6 +3836,7 @@ void reiserfs_restore_prepared_buffer(struct super_block *sb,
3828 set_buffer_journal_test(bh); 3836 set_buffer_journal_test(bh);
3829 mark_buffer_dirty(bh); 3837 mark_buffer_dirty(bh);
3830 } 3838 }
3839 reiserfs_write_unlock(sb);
3831 } 3840 }
3832 clear_buffer_journal_prepared(bh); 3841 clear_buffer_journal_prepared(bh);
3833} 3842}
@@ -3911,6 +3920,7 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
3911 unsigned long jindex; 3920 unsigned long jindex;
3912 unsigned int commit_trans_id; 3921 unsigned int commit_trans_id;
3913 int trans_half; 3922 int trans_half;
3923 int depth;
3914 3924
3915 BUG_ON(th->t_refcount > 1); 3925 BUG_ON(th->t_refcount > 1);
3916 BUG_ON(!th->t_trans_id); 3926 BUG_ON(!th->t_trans_id);
@@ -4116,9 +4126,7 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
4116 next = cn->next; 4126 next = cn->next;
4117 free_cnode(sb, cn); 4127 free_cnode(sb, cn);
4118 cn = next; 4128 cn = next;
4119 reiserfs_write_unlock(sb); 4129 reiserfs_cond_resched(sb);
4120 cond_resched();
4121 reiserfs_write_lock(sb);
4122 } 4130 }
4123 4131
4124 /* we are done with both the c_bh and d_bh, but 4132 /* we are done with both the c_bh and d_bh, but
@@ -4165,10 +4173,10 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
4165 * is lost. 4173 * is lost.
4166 */ 4174 */
4167 if (!list_empty(&jl->j_tail_bh_list)) { 4175 if (!list_empty(&jl->j_tail_bh_list)) {
4168 reiserfs_write_unlock(sb); 4176 depth = reiserfs_write_unlock_nested(sb);
4169 write_ordered_buffers(&journal->j_dirty_buffers_lock, 4177 write_ordered_buffers(&journal->j_dirty_buffers_lock,
4170 journal, jl, &jl->j_tail_bh_list); 4178 journal, jl, &jl->j_tail_bh_list);
4171 reiserfs_write_lock(sb); 4179 reiserfs_write_lock_nested(sb, depth);
4172 } 4180 }
4173 BUG_ON(!list_empty(&jl->j_tail_bh_list)); 4181 BUG_ON(!list_empty(&jl->j_tail_bh_list));
4174 mutex_unlock(&jl->j_commit_mutex); 4182 mutex_unlock(&jl->j_commit_mutex);
diff --git a/fs/reiserfs/lock.c b/fs/reiserfs/lock.c
index d735bc8470e3..045b83ef9fd9 100644
--- a/fs/reiserfs/lock.c
+++ b/fs/reiserfs/lock.c
@@ -48,30 +48,35 @@ void reiserfs_write_unlock(struct super_block *s)
48 } 48 }
49} 49}
50 50
51/* 51int __must_check reiserfs_write_unlock_nested(struct super_block *s)
52 * If we already own the lock, just exit and don't increase the depth.
53 * Useful when we don't want to lock more than once.
54 *
55 * We always return the lock_depth we had before calling
56 * this function.
57 */
58int reiserfs_write_lock_once(struct super_block *s)
59{ 52{
60 struct reiserfs_sb_info *sb_i = REISERFS_SB(s); 53 struct reiserfs_sb_info *sb_i = REISERFS_SB(s);
54 int depth;
61 55
62 if (sb_i->lock_owner != current) { 56 /* this can happen when the lock isn't always held */
63 mutex_lock(&sb_i->lock); 57 if (sb_i->lock_owner != current)
64 sb_i->lock_owner = current; 58 return -1;
65 return sb_i->lock_depth++; 59
66 } 60 depth = sb_i->lock_depth;
61
62 sb_i->lock_depth = -1;
63 sb_i->lock_owner = NULL;
64 mutex_unlock(&sb_i->lock);
67 65
68 return sb_i->lock_depth; 66 return depth;
69} 67}
70 68
71void reiserfs_write_unlock_once(struct super_block *s, int lock_depth) 69void reiserfs_write_lock_nested(struct super_block *s, int depth)
72{ 70{
73 if (lock_depth == -1) 71 struct reiserfs_sb_info *sb_i = REISERFS_SB(s);
74 reiserfs_write_unlock(s); 72
73 /* this can happen when the lock isn't always held */
74 if (depth == -1)
75 return;
76
77 mutex_lock(&sb_i->lock);
78 sb_i->lock_owner = current;
79 sb_i->lock_depth = depth;
75} 80}
76 81
77/* 82/*
@@ -82,9 +87,7 @@ void reiserfs_check_lock_depth(struct super_block *sb, char *caller)
82{ 87{
83 struct reiserfs_sb_info *sb_i = REISERFS_SB(sb); 88 struct reiserfs_sb_info *sb_i = REISERFS_SB(sb);
84 89
85 if (sb_i->lock_depth < 0) 90 WARN_ON(sb_i->lock_depth < 0);
86 reiserfs_panic(sb, "%s called without kernel lock held %d",
87 caller);
88} 91}
89 92
90#ifdef CONFIG_REISERFS_CHECK 93#ifdef CONFIG_REISERFS_CHECK
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
index 8567fb847601..dc5236f6de1b 100644
--- a/fs/reiserfs/namei.c
+++ b/fs/reiserfs/namei.c
@@ -325,7 +325,6 @@ static struct dentry *reiserfs_lookup(struct inode *dir, struct dentry *dentry,
325 unsigned int flags) 325 unsigned int flags)
326{ 326{
327 int retval; 327 int retval;
328 int lock_depth;
329 struct inode *inode = NULL; 328 struct inode *inode = NULL;
330 struct reiserfs_dir_entry de; 329 struct reiserfs_dir_entry de;
331 INITIALIZE_PATH(path_to_entry); 330 INITIALIZE_PATH(path_to_entry);
@@ -333,12 +332,7 @@ static struct dentry *reiserfs_lookup(struct inode *dir, struct dentry *dentry,
333 if (REISERFS_MAX_NAME(dir->i_sb->s_blocksize) < dentry->d_name.len) 332 if (REISERFS_MAX_NAME(dir->i_sb->s_blocksize) < dentry->d_name.len)
334 return ERR_PTR(-ENAMETOOLONG); 333 return ERR_PTR(-ENAMETOOLONG);
335 334
336 /* 335 reiserfs_write_lock(dir->i_sb);
337 * Might be called with or without the write lock, must be careful
338 * to not recursively hold it in case we want to release the lock
339 * before rescheduling.
340 */
341 lock_depth = reiserfs_write_lock_once(dir->i_sb);
342 336
343 de.de_gen_number_bit_string = NULL; 337 de.de_gen_number_bit_string = NULL;
344 retval = 338 retval =
@@ -349,7 +343,7 @@ static struct dentry *reiserfs_lookup(struct inode *dir, struct dentry *dentry,
349 inode = reiserfs_iget(dir->i_sb, 343 inode = reiserfs_iget(dir->i_sb,
350 (struct cpu_key *)&(de.de_dir_id)); 344 (struct cpu_key *)&(de.de_dir_id));
351 if (!inode || IS_ERR(inode)) { 345 if (!inode || IS_ERR(inode)) {
352 reiserfs_write_unlock_once(dir->i_sb, lock_depth); 346 reiserfs_write_unlock(dir->i_sb);
353 return ERR_PTR(-EACCES); 347 return ERR_PTR(-EACCES);
354 } 348 }
355 349
@@ -358,7 +352,7 @@ static struct dentry *reiserfs_lookup(struct inode *dir, struct dentry *dentry,
358 if (IS_PRIVATE(dir)) 352 if (IS_PRIVATE(dir))
359 inode->i_flags |= S_PRIVATE; 353 inode->i_flags |= S_PRIVATE;
360 } 354 }
361 reiserfs_write_unlock_once(dir->i_sb, lock_depth); 355 reiserfs_write_unlock(dir->i_sb);
362 if (retval == IO_ERROR) { 356 if (retval == IO_ERROR) {
363 return ERR_PTR(-EIO); 357 return ERR_PTR(-EIO);
364 } 358 }
@@ -727,7 +721,6 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
727 struct inode *inode; 721 struct inode *inode;
728 struct reiserfs_transaction_handle th; 722 struct reiserfs_transaction_handle th;
729 struct reiserfs_security_handle security; 723 struct reiserfs_security_handle security;
730 int lock_depth;
731 /* We need blocks for transaction + (user+group)*(quotas for new inode + update of quota for directory owner) */ 724 /* We need blocks for transaction + (user+group)*(quotas for new inode + update of quota for directory owner) */
732 int jbegin_count = 725 int jbegin_count =
733 JOURNAL_PER_BALANCE_CNT * 3 + 726 JOURNAL_PER_BALANCE_CNT * 3 +
@@ -753,7 +746,7 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
753 return retval; 746 return retval;
754 } 747 }
755 jbegin_count += retval; 748 jbegin_count += retval;
756 lock_depth = reiserfs_write_lock_once(dir->i_sb); 749 reiserfs_write_lock(dir->i_sb);
757 750
758 retval = journal_begin(&th, dir->i_sb, jbegin_count); 751 retval = journal_begin(&th, dir->i_sb, jbegin_count);
759 if (retval) { 752 if (retval) {
@@ -804,7 +797,7 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
804 d_instantiate(dentry, inode); 797 d_instantiate(dentry, inode);
805 retval = journal_end(&th, dir->i_sb, jbegin_count); 798 retval = journal_end(&th, dir->i_sb, jbegin_count);
806out_failed: 799out_failed:
807 reiserfs_write_unlock_once(dir->i_sb, lock_depth); 800 reiserfs_write_unlock(dir->i_sb);
808 return retval; 801 return retval;
809} 802}
810 803
@@ -920,7 +913,6 @@ static int reiserfs_unlink(struct inode *dir, struct dentry *dentry)
920 struct reiserfs_transaction_handle th; 913 struct reiserfs_transaction_handle th;
921 int jbegin_count; 914 int jbegin_count;
922 unsigned long savelink; 915 unsigned long savelink;
923 int depth;
924 916
925 dquot_initialize(dir); 917 dquot_initialize(dir);
926 918
@@ -934,7 +926,7 @@ static int reiserfs_unlink(struct inode *dir, struct dentry *dentry)
934 JOURNAL_PER_BALANCE_CNT * 2 + 2 + 926 JOURNAL_PER_BALANCE_CNT * 2 + 2 +
935 4 * REISERFS_QUOTA_TRANS_BLOCKS(dir->i_sb); 927 4 * REISERFS_QUOTA_TRANS_BLOCKS(dir->i_sb);
936 928
937 depth = reiserfs_write_lock_once(dir->i_sb); 929 reiserfs_write_lock(dir->i_sb);
938 retval = journal_begin(&th, dir->i_sb, jbegin_count); 930 retval = journal_begin(&th, dir->i_sb, jbegin_count);
939 if (retval) 931 if (retval)
940 goto out_unlink; 932 goto out_unlink;
@@ -995,7 +987,7 @@ static int reiserfs_unlink(struct inode *dir, struct dentry *dentry)
995 987
996 retval = journal_end(&th, dir->i_sb, jbegin_count); 988 retval = journal_end(&th, dir->i_sb, jbegin_count);
997 reiserfs_check_path(&path); 989 reiserfs_check_path(&path);
998 reiserfs_write_unlock_once(dir->i_sb, depth); 990 reiserfs_write_unlock(dir->i_sb);
999 return retval; 991 return retval;
1000 992
1001 end_unlink: 993 end_unlink:
@@ -1005,7 +997,7 @@ static int reiserfs_unlink(struct inode *dir, struct dentry *dentry)
1005 if (err) 997 if (err)
1006 retval = err; 998 retval = err;
1007 out_unlink: 999 out_unlink:
1008 reiserfs_write_unlock_once(dir->i_sb, depth); 1000 reiserfs_write_unlock(dir->i_sb);
1009 return retval; 1001 return retval;
1010} 1002}
1011 1003
diff --git a/fs/reiserfs/prints.c b/fs/reiserfs/prints.c
index c0b1112ab7e3..54944d5a4a6e 100644
--- a/fs/reiserfs/prints.c
+++ b/fs/reiserfs/prints.c
@@ -358,12 +358,13 @@ void __reiserfs_panic(struct super_block *sb, const char *id,
358 dump_stack(); 358 dump_stack();
359#endif 359#endif
360 if (sb) 360 if (sb)
361 panic(KERN_WARNING "REISERFS panic (device %s): %s%s%s: %s\n", 361 printk(KERN_WARNING "REISERFS panic (device %s): %s%s%s: %s\n",
362 sb->s_id, id ? id : "", id ? " " : "", 362 sb->s_id, id ? id : "", id ? " " : "",
363 function, error_buf); 363 function, error_buf);
364 else 364 else
365 panic(KERN_WARNING "REISERFS panic: %s%s%s: %s\n", 365 printk(KERN_WARNING "REISERFS panic: %s%s%s: %s\n",
366 id ? id : "", id ? " " : "", function, error_buf); 366 id ? id : "", id ? " " : "", function, error_buf);
367 BUG();
367} 368}
368 369
369void __reiserfs_error(struct super_block *sb, const char *id, 370void __reiserfs_error(struct super_block *sb, const char *id,
diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
index 3df5ce6c724d..f8adaee537c2 100644
--- a/fs/reiserfs/reiserfs.h
+++ b/fs/reiserfs/reiserfs.h
@@ -630,8 +630,8 @@ static inline int __reiserfs_is_journal_aborted(struct reiserfs_journal
630 */ 630 */
631void reiserfs_write_lock(struct super_block *s); 631void reiserfs_write_lock(struct super_block *s);
632void reiserfs_write_unlock(struct super_block *s); 632void reiserfs_write_unlock(struct super_block *s);
633int reiserfs_write_lock_once(struct super_block *s); 633int __must_check reiserfs_write_unlock_nested(struct super_block *s);
634void reiserfs_write_unlock_once(struct super_block *s, int lock_depth); 634void reiserfs_write_lock_nested(struct super_block *s, int depth);
635 635
636#ifdef CONFIG_REISERFS_CHECK 636#ifdef CONFIG_REISERFS_CHECK
637void reiserfs_lock_check_recursive(struct super_block *s); 637void reiserfs_lock_check_recursive(struct super_block *s);
@@ -667,31 +667,33 @@ static inline void reiserfs_lock_check_recursive(struct super_block *s) { }
667 * - The inode mutex 667 * - The inode mutex
668 */ 668 */
669static inline void reiserfs_mutex_lock_safe(struct mutex *m, 669static inline void reiserfs_mutex_lock_safe(struct mutex *m,
670 struct super_block *s) 670 struct super_block *s)
671{ 671{
672 reiserfs_lock_check_recursive(s); 672 int depth;
673 reiserfs_write_unlock(s); 673
674 depth = reiserfs_write_unlock_nested(s);
674 mutex_lock(m); 675 mutex_lock(m);
675 reiserfs_write_lock(s); 676 reiserfs_write_lock_nested(s, depth);
676} 677}
677 678
678static inline void 679static inline void
679reiserfs_mutex_lock_nested_safe(struct mutex *m, unsigned int subclass, 680reiserfs_mutex_lock_nested_safe(struct mutex *m, unsigned int subclass,
680 struct super_block *s) 681 struct super_block *s)
681{ 682{
682 reiserfs_lock_check_recursive(s); 683 int depth;
683 reiserfs_write_unlock(s); 684
685 depth = reiserfs_write_unlock_nested(s);
684 mutex_lock_nested(m, subclass); 686 mutex_lock_nested(m, subclass);
685 reiserfs_write_lock(s); 687 reiserfs_write_lock_nested(s, depth);
686} 688}
687 689
688static inline void 690static inline void
689reiserfs_down_read_safe(struct rw_semaphore *sem, struct super_block *s) 691reiserfs_down_read_safe(struct rw_semaphore *sem, struct super_block *s)
690{ 692{
691 reiserfs_lock_check_recursive(s); 693 int depth;
692 reiserfs_write_unlock(s); 694 depth = reiserfs_write_unlock_nested(s);
693 down_read(sem); 695 down_read(sem);
694 reiserfs_write_lock(s); 696 reiserfs_write_lock_nested(s, depth);
695} 697}
696 698
697/* 699/*
@@ -701,9 +703,11 @@ reiserfs_down_read_safe(struct rw_semaphore *sem, struct super_block *s)
701static inline void reiserfs_cond_resched(struct super_block *s) 703static inline void reiserfs_cond_resched(struct super_block *s)
702{ 704{
703 if (need_resched()) { 705 if (need_resched()) {
704 reiserfs_write_unlock(s); 706 int depth;
707
708 depth = reiserfs_write_unlock_nested(s);
705 schedule(); 709 schedule();
706 reiserfs_write_lock(s); 710 reiserfs_write_lock_nested(s, depth);
707 } 711 }
708} 712}
709 713
diff --git a/fs/reiserfs/resize.c b/fs/reiserfs/resize.c
index 3ce02cff5e90..a4ef5cd606eb 100644
--- a/fs/reiserfs/resize.c
+++ b/fs/reiserfs/resize.c
@@ -34,6 +34,7 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
34 unsigned long int block_count, free_blocks; 34 unsigned long int block_count, free_blocks;
35 int i; 35 int i;
36 int copy_size; 36 int copy_size;
37 int depth;
37 38
38 sb = SB_DISK_SUPER_BLOCK(s); 39 sb = SB_DISK_SUPER_BLOCK(s);
39 40
@@ -43,7 +44,9 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
43 } 44 }
44 45
45 /* check the device size */ 46 /* check the device size */
47 depth = reiserfs_write_unlock_nested(s);
46 bh = sb_bread(s, block_count_new - 1); 48 bh = sb_bread(s, block_count_new - 1);
49 reiserfs_write_lock_nested(s, depth);
47 if (!bh) { 50 if (!bh) {
48 printk("reiserfs_resize: can\'t read last block\n"); 51 printk("reiserfs_resize: can\'t read last block\n");
49 return -EINVAL; 52 return -EINVAL;
@@ -125,9 +128,12 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
125 * transaction begins, and the new bitmaps don't matter if the 128 * transaction begins, and the new bitmaps don't matter if the
126 * transaction fails. */ 129 * transaction fails. */
127 for (i = bmap_nr; i < bmap_nr_new; i++) { 130 for (i = bmap_nr; i < bmap_nr_new; i++) {
131 int depth;
128 /* don't use read_bitmap_block since it will cache 132 /* don't use read_bitmap_block since it will cache
129 * the uninitialized bitmap */ 133 * the uninitialized bitmap */
134 depth = reiserfs_write_unlock_nested(s);
130 bh = sb_bread(s, i * s->s_blocksize * 8); 135 bh = sb_bread(s, i * s->s_blocksize * 8);
136 reiserfs_write_lock_nested(s, depth);
131 if (!bh) { 137 if (!bh) {
132 vfree(bitmap); 138 vfree(bitmap);
133 return -EIO; 139 return -EIO;
@@ -138,9 +144,9 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
138 144
139 set_buffer_uptodate(bh); 145 set_buffer_uptodate(bh);
140 mark_buffer_dirty(bh); 146 mark_buffer_dirty(bh);
141 reiserfs_write_unlock(s); 147 depth = reiserfs_write_unlock_nested(s);
142 sync_dirty_buffer(bh); 148 sync_dirty_buffer(bh);
143 reiserfs_write_lock(s); 149 reiserfs_write_lock_nested(s, depth);
144 // update bitmap_info stuff 150 // update bitmap_info stuff
145 bitmap[i].free_count = sb_blocksize(sb) * 8 - 1; 151 bitmap[i].free_count = sb_blocksize(sb) * 8 - 1;
146 brelse(bh); 152 brelse(bh);
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
index 2f40a4c70a4d..b14706a05d52 100644
--- a/fs/reiserfs/stree.c
+++ b/fs/reiserfs/stree.c
@@ -524,14 +524,14 @@ static int is_tree_node(struct buffer_head *bh, int level)
524 * the caller (search_by_key) will perform other schedule-unsafe 524 * the caller (search_by_key) will perform other schedule-unsafe
525 * operations just after calling this function. 525 * operations just after calling this function.
526 * 526 *
527 * @return true if we have unlocked 527 * @return depth of lock to be restored after read completes
528 */ 528 */
529static bool search_by_key_reada(struct super_block *s, 529static int search_by_key_reada(struct super_block *s,
530 struct buffer_head **bh, 530 struct buffer_head **bh,
531 b_blocknr_t *b, int num) 531 b_blocknr_t *b, int num)
532{ 532{
533 int i, j; 533 int i, j;
534 bool unlocked = false; 534 int depth = -1;
535 535
536 for (i = 0; i < num; i++) { 536 for (i = 0; i < num; i++) {
537 bh[i] = sb_getblk(s, b[i]); 537 bh[i] = sb_getblk(s, b[i]);
@@ -549,15 +549,13 @@ static bool search_by_key_reada(struct super_block *s,
549 * you have to make sure the prepared bit isn't set on this buffer 549 * you have to make sure the prepared bit isn't set on this buffer
550 */ 550 */
551 if (!buffer_uptodate(bh[j])) { 551 if (!buffer_uptodate(bh[j])) {
552 if (!unlocked) { 552 if (depth == -1)
553 reiserfs_write_unlock(s); 553 depth = reiserfs_write_unlock_nested(s);
554 unlocked = true;
555 }
556 ll_rw_block(READA, 1, bh + j); 554 ll_rw_block(READA, 1, bh + j);
557 } 555 }
558 brelse(bh[j]); 556 brelse(bh[j]);
559 } 557 }
560 return unlocked; 558 return depth;
561} 559}
562 560
563/************************************************************************** 561/**************************************************************************
@@ -645,26 +643,26 @@ int search_by_key(struct super_block *sb, const struct cpu_key *key, /* Key to s
645 have a pointer to it. */ 643 have a pointer to it. */
646 if ((bh = last_element->pe_buffer = 644 if ((bh = last_element->pe_buffer =
647 sb_getblk(sb, block_number))) { 645 sb_getblk(sb, block_number))) {
648 bool unlocked = false;
649 646
650 if (!buffer_uptodate(bh) && reada_count > 1)
651 /* may unlock the write lock */
652 unlocked = search_by_key_reada(sb, reada_bh,
653 reada_blocks, reada_count);
654 /* 647 /*
655 * If we haven't already unlocked the write lock, 648 * We'll need to drop the lock if we encounter any
656 * then we need to do that here before reading 649 * buffers that need to be read. If all of them are
657 * the current block 650 * already up to date, we don't need to drop the lock.
658 */ 651 */
659 if (!buffer_uptodate(bh) && !unlocked) { 652 int depth = -1;
660 reiserfs_write_unlock(sb); 653
661 unlocked = true; 654 if (!buffer_uptodate(bh) && reada_count > 1)
662 } 655 depth = search_by_key_reada(sb, reada_bh,
656 reada_blocks, reada_count);
657
658 if (!buffer_uptodate(bh) && depth == -1)
659 depth = reiserfs_write_unlock_nested(sb);
660
663 ll_rw_block(READ, 1, &bh); 661 ll_rw_block(READ, 1, &bh);
664 wait_on_buffer(bh); 662 wait_on_buffer(bh);
665 663
666 if (unlocked) 664 if (depth != -1)
667 reiserfs_write_lock(sb); 665 reiserfs_write_lock_nested(sb, depth);
668 if (!buffer_uptodate(bh)) 666 if (!buffer_uptodate(bh))
669 goto io_error; 667 goto io_error;
670 } else { 668 } else {
@@ -1059,9 +1057,7 @@ static char prepare_for_delete_or_cut(struct reiserfs_transaction_handle *th, st
1059 reiserfs_free_block(th, inode, block, 1); 1057 reiserfs_free_block(th, inode, block, 1);
1060 } 1058 }
1061 1059
1062 reiserfs_write_unlock(sb); 1060 reiserfs_cond_resched(sb);
1063 cond_resched();
1064 reiserfs_write_lock(sb);
1065 1061
1066 if (item_moved (&s_ih, path)) { 1062 if (item_moved (&s_ih, path)) {
1067 need_re_search = 1; 1063 need_re_search = 1;
@@ -1190,6 +1186,7 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
1190 struct item_head *q_ih; 1186 struct item_head *q_ih;
1191 int quota_cut_bytes; 1187 int quota_cut_bytes;
1192 int ret_value, del_size, removed; 1188 int ret_value, del_size, removed;
1189 int depth;
1193 1190
1194#ifdef CONFIG_REISERFS_CHECK 1191#ifdef CONFIG_REISERFS_CHECK
1195 char mode; 1192 char mode;
@@ -1299,7 +1296,9 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
1299 "reiserquota delete_item(): freeing %u, id=%u type=%c", 1296 "reiserquota delete_item(): freeing %u, id=%u type=%c",
1300 quota_cut_bytes, inode->i_uid, head2type(&s_ih)); 1297 quota_cut_bytes, inode->i_uid, head2type(&s_ih));
1301#endif 1298#endif
1299 depth = reiserfs_write_unlock_nested(inode->i_sb);
1302 dquot_free_space_nodirty(inode, quota_cut_bytes); 1300 dquot_free_space_nodirty(inode, quota_cut_bytes);
1301 reiserfs_write_lock_nested(inode->i_sb, depth);
1303 1302
1304 /* Return deleted body length */ 1303 /* Return deleted body length */
1305 return ret_value; 1304 return ret_value;
@@ -1325,6 +1324,7 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
1325void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th, 1324void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
1326 struct inode *inode, struct reiserfs_key *key) 1325 struct inode *inode, struct reiserfs_key *key)
1327{ 1326{
1327 struct super_block *sb = th->t_super;
1328 struct tree_balance tb; 1328 struct tree_balance tb;
1329 INITIALIZE_PATH(path); 1329 INITIALIZE_PATH(path);
1330 int item_len = 0; 1330 int item_len = 0;
@@ -1377,14 +1377,17 @@ void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
1377 if (retval == CARRY_ON) { 1377 if (retval == CARRY_ON) {
1378 do_balance(&tb, NULL, NULL, M_DELETE); 1378 do_balance(&tb, NULL, NULL, M_DELETE);
1379 if (inode) { /* Should we count quota for item? (we don't count quotas for save-links) */ 1379 if (inode) { /* Should we count quota for item? (we don't count quotas for save-links) */
1380 int depth;
1380#ifdef REISERQUOTA_DEBUG 1381#ifdef REISERQUOTA_DEBUG
1381 reiserfs_debug(th->t_super, REISERFS_DEBUG_CODE, 1382 reiserfs_debug(th->t_super, REISERFS_DEBUG_CODE,
1382 "reiserquota delete_solid_item(): freeing %u id=%u type=%c", 1383 "reiserquota delete_solid_item(): freeing %u id=%u type=%c",
1383 quota_cut_bytes, inode->i_uid, 1384 quota_cut_bytes, inode->i_uid,
1384 key2type(key)); 1385 key2type(key));
1385#endif 1386#endif
1387 depth = reiserfs_write_unlock_nested(sb);
1386 dquot_free_space_nodirty(inode, 1388 dquot_free_space_nodirty(inode,
1387 quota_cut_bytes); 1389 quota_cut_bytes);
1390 reiserfs_write_lock_nested(sb, depth);
1388 } 1391 }
1389 break; 1392 break;
1390 } 1393 }
@@ -1561,6 +1564,7 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
1561 int retval2 = -1; 1564 int retval2 = -1;
1562 int quota_cut_bytes; 1565 int quota_cut_bytes;
1563 loff_t tail_pos = 0; 1566 loff_t tail_pos = 0;
1567 int depth;
1564 1568
1565 BUG_ON(!th->t_trans_id); 1569 BUG_ON(!th->t_trans_id);
1566 1570
@@ -1733,7 +1737,9 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
1733 "reiserquota cut_from_item(): freeing %u id=%u type=%c", 1737 "reiserquota cut_from_item(): freeing %u id=%u type=%c",
1734 quota_cut_bytes, inode->i_uid, '?'); 1738 quota_cut_bytes, inode->i_uid, '?');
1735#endif 1739#endif
1740 depth = reiserfs_write_unlock_nested(sb);
1736 dquot_free_space_nodirty(inode, quota_cut_bytes); 1741 dquot_free_space_nodirty(inode, quota_cut_bytes);
1742 reiserfs_write_lock_nested(sb, depth);
1737 return ret_value; 1743 return ret_value;
1738} 1744}
1739 1745
@@ -1953,9 +1959,11 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
1953 const char *body, /* Pointer to the bytes to paste. */ 1959 const char *body, /* Pointer to the bytes to paste. */
1954 int pasted_size) 1960 int pasted_size)
1955{ /* Size of pasted bytes. */ 1961{ /* Size of pasted bytes. */
1962 struct super_block *sb = inode->i_sb;
1956 struct tree_balance s_paste_balance; 1963 struct tree_balance s_paste_balance;
1957 int retval; 1964 int retval;
1958 int fs_gen; 1965 int fs_gen;
1966 int depth;
1959 1967
1960 BUG_ON(!th->t_trans_id); 1968 BUG_ON(!th->t_trans_id);
1961 1969
@@ -1968,9 +1976,9 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
1968 key2type(&(key->on_disk_key))); 1976 key2type(&(key->on_disk_key)));
1969#endif 1977#endif
1970 1978
1971 reiserfs_write_unlock(inode->i_sb); 1979 depth = reiserfs_write_unlock_nested(sb);
1972 retval = dquot_alloc_space_nodirty(inode, pasted_size); 1980 retval = dquot_alloc_space_nodirty(inode, pasted_size);
1973 reiserfs_write_lock(inode->i_sb); 1981 reiserfs_write_lock_nested(sb, depth);
1974 if (retval) { 1982 if (retval) {
1975 pathrelse(search_path); 1983 pathrelse(search_path);
1976 return retval; 1984 return retval;
@@ -2027,7 +2035,9 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
2027 pasted_size, inode->i_uid, 2035 pasted_size, inode->i_uid,
2028 key2type(&(key->on_disk_key))); 2036 key2type(&(key->on_disk_key)));
2029#endif 2037#endif
2038 depth = reiserfs_write_unlock_nested(sb);
2030 dquot_free_space_nodirty(inode, pasted_size); 2039 dquot_free_space_nodirty(inode, pasted_size);
2040 reiserfs_write_lock_nested(sb, depth);
2031 return retval; 2041 return retval;
2032} 2042}
2033 2043
@@ -2050,6 +2060,7 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
2050 BUG_ON(!th->t_trans_id); 2060 BUG_ON(!th->t_trans_id);
2051 2061
2052 if (inode) { /* Do we count quotas for item? */ 2062 if (inode) { /* Do we count quotas for item? */
2063 int depth;
2053 fs_gen = get_generation(inode->i_sb); 2064 fs_gen = get_generation(inode->i_sb);
2054 quota_bytes = ih_item_len(ih); 2065 quota_bytes = ih_item_len(ih);
2055 2066
@@ -2063,11 +2074,11 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
2063 "reiserquota insert_item(): allocating %u id=%u type=%c", 2074 "reiserquota insert_item(): allocating %u id=%u type=%c",
2064 quota_bytes, inode->i_uid, head2type(ih)); 2075 quota_bytes, inode->i_uid, head2type(ih));
2065#endif 2076#endif
2066 reiserfs_write_unlock(inode->i_sb);
2067 /* We can't dirty inode here. It would be immediately written but 2077 /* We can't dirty inode here. It would be immediately written but
2068 * appropriate stat item isn't inserted yet... */ 2078 * appropriate stat item isn't inserted yet... */
2079 depth = reiserfs_write_unlock_nested(inode->i_sb);
2069 retval = dquot_alloc_space_nodirty(inode, quota_bytes); 2080 retval = dquot_alloc_space_nodirty(inode, quota_bytes);
2070 reiserfs_write_lock(inode->i_sb); 2081 reiserfs_write_lock_nested(inode->i_sb, depth);
2071 if (retval) { 2082 if (retval) {
2072 pathrelse(path); 2083 pathrelse(path);
2073 return retval; 2084 return retval;
@@ -2118,7 +2129,10 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
2118 "reiserquota insert_item(): freeing %u id=%u type=%c", 2129 "reiserquota insert_item(): freeing %u id=%u type=%c",
2119 quota_bytes, inode->i_uid, head2type(ih)); 2130 quota_bytes, inode->i_uid, head2type(ih));
2120#endif 2131#endif
2121 if (inode) 2132 if (inode) {
2133 int depth = reiserfs_write_unlock_nested(inode->i_sb);
2122 dquot_free_space_nodirty(inode, quota_bytes); 2134 dquot_free_space_nodirty(inode, quota_bytes);
2135 reiserfs_write_lock_nested(inode->i_sb, depth);
2136 }
2123 return retval; 2137 return retval;
2124} 2138}
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index e2e202a07b31..3ead145dadc4 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -243,6 +243,7 @@ static int finish_unfinished(struct super_block *s)
243 done = 0; 243 done = 0;
244 REISERFS_SB(s)->s_is_unlinked_ok = 1; 244 REISERFS_SB(s)->s_is_unlinked_ok = 1;
245 while (!retval) { 245 while (!retval) {
246 int depth;
246 retval = search_item(s, &max_cpu_key, &path); 247 retval = search_item(s, &max_cpu_key, &path);
247 if (retval != ITEM_NOT_FOUND) { 248 if (retval != ITEM_NOT_FOUND) {
248 reiserfs_error(s, "vs-2140", 249 reiserfs_error(s, "vs-2140",
@@ -298,9 +299,9 @@ static int finish_unfinished(struct super_block *s)
298 retval = remove_save_link_only(s, &save_link_key, 0); 299 retval = remove_save_link_only(s, &save_link_key, 0);
299 continue; 300 continue;
300 } 301 }
301 reiserfs_write_unlock(s); 302 depth = reiserfs_write_unlock_nested(inode->i_sb);
302 dquot_initialize(inode); 303 dquot_initialize(inode);
303 reiserfs_write_lock(s); 304 reiserfs_write_lock_nested(inode->i_sb, depth);
304 305
305 if (truncate && S_ISDIR(inode->i_mode)) { 306 if (truncate && S_ISDIR(inode->i_mode)) {
306 /* We got a truncate request for a dir which is impossible. 307 /* We got a truncate request for a dir which is impossible.
@@ -356,10 +357,12 @@ static int finish_unfinished(struct super_block *s)
356 357
357#ifdef CONFIG_QUOTA 358#ifdef CONFIG_QUOTA
358 /* Turn quotas off */ 359 /* Turn quotas off */
360 reiserfs_write_unlock(s);
359 for (i = 0; i < MAXQUOTAS; i++) { 361 for (i = 0; i < MAXQUOTAS; i++) {
360 if (sb_dqopt(s)->files[i] && quota_enabled[i]) 362 if (sb_dqopt(s)->files[i] && quota_enabled[i])
361 dquot_quota_off(s, i); 363 dquot_quota_off(s, i);
362 } 364 }
365 reiserfs_write_lock(s);
363 if (ms_active_set) 366 if (ms_active_set)
364 /* Restore the flag back */ 367 /* Restore the flag back */
365 s->s_flags &= ~MS_ACTIVE; 368 s->s_flags &= ~MS_ACTIVE;
@@ -623,7 +626,6 @@ static void reiserfs_dirty_inode(struct inode *inode, int flags)
623 struct reiserfs_transaction_handle th; 626 struct reiserfs_transaction_handle th;
624 627
625 int err = 0; 628 int err = 0;
626 int lock_depth;
627 629
628 if (inode->i_sb->s_flags & MS_RDONLY) { 630 if (inode->i_sb->s_flags & MS_RDONLY) {
629 reiserfs_warning(inode->i_sb, "clm-6006", 631 reiserfs_warning(inode->i_sb, "clm-6006",
@@ -631,7 +633,7 @@ static void reiserfs_dirty_inode(struct inode *inode, int flags)
631 inode->i_ino); 633 inode->i_ino);
632 return; 634 return;
633 } 635 }
634 lock_depth = reiserfs_write_lock_once(inode->i_sb); 636 reiserfs_write_lock(inode->i_sb);
635 637
636 /* this is really only used for atime updates, so they don't have 638 /* this is really only used for atime updates, so they don't have
637 ** to be included in O_SYNC or fsync 639 ** to be included in O_SYNC or fsync
@@ -644,7 +646,7 @@ static void reiserfs_dirty_inode(struct inode *inode, int flags)
644 journal_end(&th, inode->i_sb, 1); 646 journal_end(&th, inode->i_sb, 1);
645 647
646out: 648out:
647 reiserfs_write_unlock_once(inode->i_sb, lock_depth); 649 reiserfs_write_unlock(inode->i_sb);
648} 650}
649 651
650static int reiserfs_show_options(struct seq_file *seq, struct dentry *root) 652static int reiserfs_show_options(struct seq_file *seq, struct dentry *root)
@@ -1334,7 +1336,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
1334 kfree(qf_names[i]); 1336 kfree(qf_names[i]);
1335#endif 1337#endif
1336 err = -EINVAL; 1338 err = -EINVAL;
1337 goto out_unlock; 1339 goto out_err_unlock;
1338 } 1340 }
1339#ifdef CONFIG_QUOTA 1341#ifdef CONFIG_QUOTA
1340 handle_quota_files(s, qf_names, &qfmt); 1342 handle_quota_files(s, qf_names, &qfmt);
@@ -1378,35 +1380,32 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
1378 if (blocks) { 1380 if (blocks) {
1379 err = reiserfs_resize(s, blocks); 1381 err = reiserfs_resize(s, blocks);
1380 if (err != 0) 1382 if (err != 0)
1381 goto out_unlock; 1383 goto out_err_unlock;
1382 } 1384 }
1383 1385
1384 if (*mount_flags & MS_RDONLY) { 1386 if (*mount_flags & MS_RDONLY) {
1387 reiserfs_write_unlock(s);
1385 reiserfs_xattr_init(s, *mount_flags); 1388 reiserfs_xattr_init(s, *mount_flags);
1386 /* remount read-only */ 1389 /* remount read-only */
1387 if (s->s_flags & MS_RDONLY) 1390 if (s->s_flags & MS_RDONLY)
1388 /* it is read-only already */ 1391 /* it is read-only already */
1389 goto out_ok; 1392 goto out_ok_unlocked;
1390 1393
1391 /*
1392 * Drop write lock. Quota will retake it when needed and lock
1393 * ordering requires calling dquot_suspend() without it.
1394 */
1395 reiserfs_write_unlock(s);
1396 err = dquot_suspend(s, -1); 1394 err = dquot_suspend(s, -1);
1397 if (err < 0) 1395 if (err < 0)
1398 goto out_err; 1396 goto out_err;
1399 reiserfs_write_lock(s);
1400 1397
1401 /* try to remount file system with read-only permissions */ 1398 /* try to remount file system with read-only permissions */
1402 if (sb_umount_state(rs) == REISERFS_VALID_FS 1399 if (sb_umount_state(rs) == REISERFS_VALID_FS
1403 || REISERFS_SB(s)->s_mount_state != REISERFS_VALID_FS) { 1400 || REISERFS_SB(s)->s_mount_state != REISERFS_VALID_FS) {
1404 goto out_ok; 1401 goto out_ok_unlocked;
1405 } 1402 }
1406 1403
1404 reiserfs_write_lock(s);
1405
1407 err = journal_begin(&th, s, 10); 1406 err = journal_begin(&th, s, 10);
1408 if (err) 1407 if (err)
1409 goto out_unlock; 1408 goto out_err_unlock;
1410 1409
1411 /* Mounting a rw partition read-only. */ 1410 /* Mounting a rw partition read-only. */
1412 reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1); 1411 reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
@@ -1415,13 +1414,14 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
1415 } else { 1414 } else {
1416 /* remount read-write */ 1415 /* remount read-write */
1417 if (!(s->s_flags & MS_RDONLY)) { 1416 if (!(s->s_flags & MS_RDONLY)) {
1417 reiserfs_write_unlock(s);
1418 reiserfs_xattr_init(s, *mount_flags); 1418 reiserfs_xattr_init(s, *mount_flags);
1419 goto out_ok; /* We are read-write already */ 1419 goto out_ok_unlocked; /* We are read-write already */
1420 } 1420 }
1421 1421
1422 if (reiserfs_is_journal_aborted(journal)) { 1422 if (reiserfs_is_journal_aborted(journal)) {
1423 err = journal->j_errno; 1423 err = journal->j_errno;
1424 goto out_unlock; 1424 goto out_err_unlock;
1425 } 1425 }
1426 1426
1427 handle_data_mode(s, mount_options); 1427 handle_data_mode(s, mount_options);
@@ -1430,7 +1430,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
1430 s->s_flags &= ~MS_RDONLY; /* now it is safe to call journal_begin */ 1430 s->s_flags &= ~MS_RDONLY; /* now it is safe to call journal_begin */
1431 err = journal_begin(&th, s, 10); 1431 err = journal_begin(&th, s, 10);
1432 if (err) 1432 if (err)
1433 goto out_unlock; 1433 goto out_err_unlock;
1434 1434
1435 /* Mount a partition which is read-only, read-write */ 1435 /* Mount a partition which is read-only, read-write */
1436 reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1); 1436 reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
@@ -1447,26 +1447,22 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
1447 SB_JOURNAL(s)->j_must_wait = 1; 1447 SB_JOURNAL(s)->j_must_wait = 1;
1448 err = journal_end(&th, s, 10); 1448 err = journal_end(&th, s, 10);
1449 if (err) 1449 if (err)
1450 goto out_unlock; 1450 goto out_err_unlock;
1451 1451
1452 reiserfs_write_unlock(s);
1452 if (!(*mount_flags & MS_RDONLY)) { 1453 if (!(*mount_flags & MS_RDONLY)) {
1453 /*
1454 * Drop write lock. Quota will retake it when needed and lock
1455 * ordering requires calling dquot_resume() without it.
1456 */
1457 reiserfs_write_unlock(s);
1458 dquot_resume(s, -1); 1454 dquot_resume(s, -1);
1459 reiserfs_write_lock(s); 1455 reiserfs_write_lock(s);
1460 finish_unfinished(s); 1456 finish_unfinished(s);
1457 reiserfs_write_unlock(s);
1461 reiserfs_xattr_init(s, *mount_flags); 1458 reiserfs_xattr_init(s, *mount_flags);
1462 } 1459 }
1463 1460
1464out_ok: 1461out_ok_unlocked:
1465 replace_mount_options(s, new_opts); 1462 replace_mount_options(s, new_opts);
1466 reiserfs_write_unlock(s);
1467 return 0; 1463 return 0;
1468 1464
1469out_unlock: 1465out_err_unlock:
1470 reiserfs_write_unlock(s); 1466 reiserfs_write_unlock(s);
1471out_err: 1467out_err:
1472 kfree(new_opts); 1468 kfree(new_opts);
@@ -2013,12 +2009,14 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
2013 goto error; 2009 goto error;
2014 } 2010 }
2015 2011
2012 reiserfs_write_unlock(s);
2016 if ((errval = reiserfs_lookup_privroot(s)) || 2013 if ((errval = reiserfs_lookup_privroot(s)) ||
2017 (errval = reiserfs_xattr_init(s, s->s_flags))) { 2014 (errval = reiserfs_xattr_init(s, s->s_flags))) {
2018 dput(s->s_root); 2015 dput(s->s_root);
2019 s->s_root = NULL; 2016 s->s_root = NULL;
2020 goto error; 2017 goto error_unlocked;
2021 } 2018 }
2019 reiserfs_write_lock(s);
2022 2020
2023 /* look for files which were to be removed in previous session */ 2021 /* look for files which were to be removed in previous session */
2024 finish_unfinished(s); 2022 finish_unfinished(s);
@@ -2027,12 +2025,14 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
2027 reiserfs_info(s, "using 3.5.x disk format\n"); 2025 reiserfs_info(s, "using 3.5.x disk format\n");
2028 } 2026 }
2029 2027
2028 reiserfs_write_unlock(s);
2030 if ((errval = reiserfs_lookup_privroot(s)) || 2029 if ((errval = reiserfs_lookup_privroot(s)) ||
2031 (errval = reiserfs_xattr_init(s, s->s_flags))) { 2030 (errval = reiserfs_xattr_init(s, s->s_flags))) {
2032 dput(s->s_root); 2031 dput(s->s_root);
2033 s->s_root = NULL; 2032 s->s_root = NULL;
2034 goto error; 2033 goto error_unlocked;
2035 } 2034 }
2035 reiserfs_write_lock(s);
2036 } 2036 }
2037 // mark hash in super block: it could be unset. overwrite should be ok 2037 // mark hash in super block: it could be unset. overwrite should be ok
2038 set_sb_hash_function_code(rs, function2code(sbi->s_hash_function)); 2038 set_sb_hash_function_code(rs, function2code(sbi->s_hash_function));
@@ -2100,6 +2100,7 @@ static int reiserfs_write_dquot(struct dquot *dquot)
2100{ 2100{
2101 struct reiserfs_transaction_handle th; 2101 struct reiserfs_transaction_handle th;
2102 int ret, err; 2102 int ret, err;
2103 int depth;
2103 2104
2104 reiserfs_write_lock(dquot->dq_sb); 2105 reiserfs_write_lock(dquot->dq_sb);
2105 ret = 2106 ret =
@@ -2107,9 +2108,9 @@ static int reiserfs_write_dquot(struct dquot *dquot)
2107 REISERFS_QUOTA_TRANS_BLOCKS(dquot->dq_sb)); 2108 REISERFS_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
2108 if (ret) 2109 if (ret)
2109 goto out; 2110 goto out;
2110 reiserfs_write_unlock(dquot->dq_sb); 2111 depth = reiserfs_write_unlock_nested(dquot->dq_sb);
2111 ret = dquot_commit(dquot); 2112 ret = dquot_commit(dquot);
2112 reiserfs_write_lock(dquot->dq_sb); 2113 reiserfs_write_lock_nested(dquot->dq_sb, depth);
2113 err = 2114 err =
2114 journal_end(&th, dquot->dq_sb, 2115 journal_end(&th, dquot->dq_sb,
2115 REISERFS_QUOTA_TRANS_BLOCKS(dquot->dq_sb)); 2116 REISERFS_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
@@ -2124,6 +2125,7 @@ static int reiserfs_acquire_dquot(struct dquot *dquot)
2124{ 2125{
2125 struct reiserfs_transaction_handle th; 2126 struct reiserfs_transaction_handle th;
2126 int ret, err; 2127 int ret, err;
2128 int depth;
2127 2129
2128 reiserfs_write_lock(dquot->dq_sb); 2130 reiserfs_write_lock(dquot->dq_sb);
2129 ret = 2131 ret =
@@ -2131,9 +2133,9 @@ static int reiserfs_acquire_dquot(struct dquot *dquot)
2131 REISERFS_QUOTA_INIT_BLOCKS(dquot->dq_sb)); 2133 REISERFS_QUOTA_INIT_BLOCKS(dquot->dq_sb));
2132 if (ret) 2134 if (ret)
2133 goto out; 2135 goto out;
2134 reiserfs_write_unlock(dquot->dq_sb); 2136 depth = reiserfs_write_unlock_nested(dquot->dq_sb);
2135 ret = dquot_acquire(dquot); 2137 ret = dquot_acquire(dquot);
2136 reiserfs_write_lock(dquot->dq_sb); 2138 reiserfs_write_lock_nested(dquot->dq_sb, depth);
2137 err = 2139 err =
2138 journal_end(&th, dquot->dq_sb, 2140 journal_end(&th, dquot->dq_sb,
2139 REISERFS_QUOTA_INIT_BLOCKS(dquot->dq_sb)); 2141 REISERFS_QUOTA_INIT_BLOCKS(dquot->dq_sb));
@@ -2186,15 +2188,16 @@ static int reiserfs_write_info(struct super_block *sb, int type)
2186{ 2188{
2187 struct reiserfs_transaction_handle th; 2189 struct reiserfs_transaction_handle th;
2188 int ret, err; 2190 int ret, err;
2191 int depth;
2189 2192
2190 /* Data block + inode block */ 2193 /* Data block + inode block */
2191 reiserfs_write_lock(sb); 2194 reiserfs_write_lock(sb);
2192 ret = journal_begin(&th, sb, 2); 2195 ret = journal_begin(&th, sb, 2);
2193 if (ret) 2196 if (ret)
2194 goto out; 2197 goto out;
2195 reiserfs_write_unlock(sb); 2198 depth = reiserfs_write_unlock_nested(sb);
2196 ret = dquot_commit_info(sb, type); 2199 ret = dquot_commit_info(sb, type);
2197 reiserfs_write_lock(sb); 2200 reiserfs_write_lock_nested(sb, depth);
2198 err = journal_end(&th, sb, 2); 2201 err = journal_end(&th, sb, 2);
2199 if (!ret && err) 2202 if (!ret && err)
2200 ret = err; 2203 ret = err;
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index c69cdd749f09..8a9e2dcfe004 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -81,8 +81,7 @@ static int xattr_unlink(struct inode *dir, struct dentry *dentry)
81 int error; 81 int error;
82 BUG_ON(!mutex_is_locked(&dir->i_mutex)); 82 BUG_ON(!mutex_is_locked(&dir->i_mutex));
83 83
84 reiserfs_mutex_lock_nested_safe(&dentry->d_inode->i_mutex, 84 mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_CHILD);
85 I_MUTEX_CHILD, dir->i_sb);
86 error = dir->i_op->unlink(dir, dentry); 85 error = dir->i_op->unlink(dir, dentry);
87 mutex_unlock(&dentry->d_inode->i_mutex); 86 mutex_unlock(&dentry->d_inode->i_mutex);
88 87
@@ -96,8 +95,7 @@ static int xattr_rmdir(struct inode *dir, struct dentry *dentry)
96 int error; 95 int error;
97 BUG_ON(!mutex_is_locked(&dir->i_mutex)); 96 BUG_ON(!mutex_is_locked(&dir->i_mutex));
98 97
99 reiserfs_mutex_lock_nested_safe(&dentry->d_inode->i_mutex, 98 mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_CHILD);
100 I_MUTEX_CHILD, dir->i_sb);
101 error = dir->i_op->rmdir(dir, dentry); 99 error = dir->i_op->rmdir(dir, dentry);
102 if (!error) 100 if (!error)
103 dentry->d_inode->i_flags |= S_DEAD; 101 dentry->d_inode->i_flags |= S_DEAD;
@@ -232,22 +230,17 @@ static int reiserfs_for_each_xattr(struct inode *inode,
232 if (IS_PRIVATE(inode) || get_inode_sd_version(inode) == STAT_DATA_V1) 230 if (IS_PRIVATE(inode) || get_inode_sd_version(inode) == STAT_DATA_V1)
233 return 0; 231 return 0;
234 232
235 reiserfs_write_unlock(inode->i_sb);
236 dir = open_xa_dir(inode, XATTR_REPLACE); 233 dir = open_xa_dir(inode, XATTR_REPLACE);
237 if (IS_ERR(dir)) { 234 if (IS_ERR(dir)) {
238 err = PTR_ERR(dir); 235 err = PTR_ERR(dir);
239 reiserfs_write_lock(inode->i_sb);
240 goto out; 236 goto out;
241 } else if (!dir->d_inode) { 237 } else if (!dir->d_inode) {
242 err = 0; 238 err = 0;
243 reiserfs_write_lock(inode->i_sb);
244 goto out_dir; 239 goto out_dir;
245 } 240 }
246 241
247 mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_XATTR); 242 mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_XATTR);
248 243
249 reiserfs_write_lock(inode->i_sb);
250
251 buf.xadir = dir; 244 buf.xadir = dir;
252 while (1) { 245 while (1) {
253 err = reiserfs_readdir_inode(dir->d_inode, &buf.ctx); 246 err = reiserfs_readdir_inode(dir->d_inode, &buf.ctx);
@@ -281,14 +274,17 @@ static int reiserfs_for_each_xattr(struct inode *inode,
281 int blocks = JOURNAL_PER_BALANCE_CNT * 2 + 2 + 274 int blocks = JOURNAL_PER_BALANCE_CNT * 2 + 2 +
282 4 * REISERFS_QUOTA_TRANS_BLOCKS(inode->i_sb); 275 4 * REISERFS_QUOTA_TRANS_BLOCKS(inode->i_sb);
283 struct reiserfs_transaction_handle th; 276 struct reiserfs_transaction_handle th;
277 reiserfs_write_lock(inode->i_sb);
284 err = journal_begin(&th, inode->i_sb, blocks); 278 err = journal_begin(&th, inode->i_sb, blocks);
279 reiserfs_write_unlock(inode->i_sb);
285 if (!err) { 280 if (!err) {
286 int jerror; 281 int jerror;
287 reiserfs_mutex_lock_nested_safe( 282 mutex_lock_nested(&dir->d_parent->d_inode->i_mutex,
288 &dir->d_parent->d_inode->i_mutex, 283 I_MUTEX_XATTR);
289 I_MUTEX_XATTR, inode->i_sb);
290 err = action(dir, data); 284 err = action(dir, data);
285 reiserfs_write_lock(inode->i_sb);
291 jerror = journal_end(&th, inode->i_sb, blocks); 286 jerror = journal_end(&th, inode->i_sb, blocks);
287 reiserfs_write_unlock(inode->i_sb);
292 mutex_unlock(&dir->d_parent->d_inode->i_mutex); 288 mutex_unlock(&dir->d_parent->d_inode->i_mutex);
293 err = jerror ?: err; 289 err = jerror ?: err;
294 } 290 }
@@ -455,9 +451,7 @@ static int lookup_and_delete_xattr(struct inode *inode, const char *name)
455 } 451 }
456 452
457 if (dentry->d_inode) { 453 if (dentry->d_inode) {
458 reiserfs_write_lock(inode->i_sb);
459 err = xattr_unlink(xadir->d_inode, dentry); 454 err = xattr_unlink(xadir->d_inode, dentry);
460 reiserfs_write_unlock(inode->i_sb);
461 update_ctime(inode); 455 update_ctime(inode);
462 } 456 }
463 457
@@ -491,24 +485,17 @@ reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th,
491 if (get_inode_sd_version(inode) == STAT_DATA_V1) 485 if (get_inode_sd_version(inode) == STAT_DATA_V1)
492 return -EOPNOTSUPP; 486 return -EOPNOTSUPP;
493 487
494 reiserfs_write_unlock(inode->i_sb);
495
496 if (!buffer) { 488 if (!buffer) {
497 err = lookup_and_delete_xattr(inode, name); 489 err = lookup_and_delete_xattr(inode, name);
498 reiserfs_write_lock(inode->i_sb);
499 return err; 490 return err;
500 } 491 }
501 492
502 dentry = xattr_lookup(inode, name, flags); 493 dentry = xattr_lookup(inode, name, flags);
503 if (IS_ERR(dentry)) { 494 if (IS_ERR(dentry))
504 reiserfs_write_lock(inode->i_sb);
505 return PTR_ERR(dentry); 495 return PTR_ERR(dentry);
506 }
507 496
508 down_write(&REISERFS_I(inode)->i_xattr_sem); 497 down_write(&REISERFS_I(inode)->i_xattr_sem);
509 498
510 reiserfs_write_lock(inode->i_sb);
511
512 xahash = xattr_hash(buffer, buffer_size); 499 xahash = xattr_hash(buffer, buffer_size);
513 while (buffer_pos < buffer_size || buffer_pos == 0) { 500 while (buffer_pos < buffer_size || buffer_pos == 0) {
514 size_t chunk; 501 size_t chunk;
@@ -538,6 +525,7 @@ reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th,
538 rxh->h_hash = cpu_to_le32(xahash); 525 rxh->h_hash = cpu_to_le32(xahash);
539 } 526 }
540 527
528 reiserfs_write_lock(inode->i_sb);
541 err = __reiserfs_write_begin(page, page_offset, chunk + skip); 529 err = __reiserfs_write_begin(page, page_offset, chunk + skip);
542 if (!err) { 530 if (!err) {
543 if (buffer) 531 if (buffer)
@@ -546,6 +534,7 @@ reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th,
546 page_offset + chunk + 534 page_offset + chunk +
547 skip); 535 skip);
548 } 536 }
537 reiserfs_write_unlock(inode->i_sb);
549 unlock_page(page); 538 unlock_page(page);
550 reiserfs_put_page(page); 539 reiserfs_put_page(page);
551 buffer_pos += chunk; 540 buffer_pos += chunk;
@@ -563,10 +552,8 @@ reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th,
563 .ia_valid = ATTR_SIZE | ATTR_CTIME, 552 .ia_valid = ATTR_SIZE | ATTR_CTIME,
564 }; 553 };
565 554
566 reiserfs_write_unlock(inode->i_sb);
567 mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_XATTR); 555 mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_XATTR);
568 inode_dio_wait(dentry->d_inode); 556 inode_dio_wait(dentry->d_inode);
569 reiserfs_write_lock(inode->i_sb);
570 557
571 err = reiserfs_setattr(dentry, &newattrs); 558 err = reiserfs_setattr(dentry, &newattrs);
572 mutex_unlock(&dentry->d_inode->i_mutex); 559 mutex_unlock(&dentry->d_inode->i_mutex);
@@ -592,18 +579,19 @@ int reiserfs_xattr_set(struct inode *inode, const char *name,
592 579
593 reiserfs_write_lock(inode->i_sb); 580 reiserfs_write_lock(inode->i_sb);
594 error = journal_begin(&th, inode->i_sb, jbegin_count); 581 error = journal_begin(&th, inode->i_sb, jbegin_count);
582 reiserfs_write_unlock(inode->i_sb);
595 if (error) { 583 if (error) {
596 reiserfs_write_unlock(inode->i_sb);
597 return error; 584 return error;
598 } 585 }
599 586
600 error = reiserfs_xattr_set_handle(&th, inode, name, 587 error = reiserfs_xattr_set_handle(&th, inode, name,
601 buffer, buffer_size, flags); 588 buffer, buffer_size, flags);
602 589
590 reiserfs_write_lock(inode->i_sb);
603 error2 = journal_end(&th, inode->i_sb, jbegin_count); 591 error2 = journal_end(&th, inode->i_sb, jbegin_count);
592 reiserfs_write_unlock(inode->i_sb);
604 if (error == 0) 593 if (error == 0)
605 error = error2; 594 error = error2;
606 reiserfs_write_unlock(inode->i_sb);
607 595
608 return error; 596 return error;
609} 597}
@@ -968,7 +956,7 @@ int reiserfs_lookup_privroot(struct super_block *s)
968 int err = 0; 956 int err = 0;
969 957
970 /* If we don't have the privroot located yet - go find it */ 958 /* If we don't have the privroot located yet - go find it */
971 reiserfs_mutex_lock_safe(&s->s_root->d_inode->i_mutex, s); 959 mutex_lock(&s->s_root->d_inode->i_mutex);
972 dentry = lookup_one_len(PRIVROOT_NAME, s->s_root, 960 dentry = lookup_one_len(PRIVROOT_NAME, s->s_root,
973 strlen(PRIVROOT_NAME)); 961 strlen(PRIVROOT_NAME));
974 if (!IS_ERR(dentry)) { 962 if (!IS_ERR(dentry)) {
@@ -996,14 +984,14 @@ int reiserfs_xattr_init(struct super_block *s, int mount_flags)
996 goto error; 984 goto error;
997 985
998 if (!privroot->d_inode && !(mount_flags & MS_RDONLY)) { 986 if (!privroot->d_inode && !(mount_flags & MS_RDONLY)) {
999 reiserfs_mutex_lock_safe(&s->s_root->d_inode->i_mutex, s); 987 mutex_lock(&s->s_root->d_inode->i_mutex);
1000 err = create_privroot(REISERFS_SB(s)->priv_root); 988 err = create_privroot(REISERFS_SB(s)->priv_root);
1001 mutex_unlock(&s->s_root->d_inode->i_mutex); 989 mutex_unlock(&s->s_root->d_inode->i_mutex);
1002 } 990 }
1003 991
1004 if (privroot->d_inode) { 992 if (privroot->d_inode) {
1005 s->s_xattr = reiserfs_xattr_handlers; 993 s->s_xattr = reiserfs_xattr_handlers;
1006 reiserfs_mutex_lock_safe(&privroot->d_inode->i_mutex, s); 994 mutex_lock(&privroot->d_inode->i_mutex);
1007 if (!REISERFS_SB(s)->xattr_root) { 995 if (!REISERFS_SB(s)->xattr_root) {
1008 struct dentry *dentry; 996 struct dentry *dentry;
1009 dentry = lookup_one_len(XAROOT_NAME, privroot, 997 dentry = lookup_one_len(XAROOT_NAME, privroot,
diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c
index 6c8767fdfc6a..06c04f73da65 100644
--- a/fs/reiserfs/xattr_acl.c
+++ b/fs/reiserfs/xattr_acl.c
@@ -49,13 +49,15 @@ posix_acl_set(struct dentry *dentry, const char *name, const void *value,
49 49
50 reiserfs_write_lock(inode->i_sb); 50 reiserfs_write_lock(inode->i_sb);
51 error = journal_begin(&th, inode->i_sb, jcreate_blocks); 51 error = journal_begin(&th, inode->i_sb, jcreate_blocks);
52 reiserfs_write_unlock(inode->i_sb);
52 if (error == 0) { 53 if (error == 0) {
53 error = reiserfs_set_acl(&th, inode, type, acl); 54 error = reiserfs_set_acl(&th, inode, type, acl);
55 reiserfs_write_lock(inode->i_sb);
54 error2 = journal_end(&th, inode->i_sb, jcreate_blocks); 56 error2 = journal_end(&th, inode->i_sb, jcreate_blocks);
57 reiserfs_write_unlock(inode->i_sb);
55 if (error2) 58 if (error2)
56 error = error2; 59 error = error2;
57 } 60 }
58 reiserfs_write_unlock(inode->i_sb);
59 61
60 release_and_out: 62 release_and_out:
61 posix_acl_release(acl); 63 posix_acl_release(acl);
@@ -435,12 +437,14 @@ int reiserfs_cache_default_acl(struct inode *inode)
435 return nblocks; 437 return nblocks;
436} 438}
437 439
440/*
441 * Called under i_mutex
442 */
438int reiserfs_acl_chmod(struct inode *inode) 443int reiserfs_acl_chmod(struct inode *inode)
439{ 444{
440 struct reiserfs_transaction_handle th; 445 struct reiserfs_transaction_handle th;
441 struct posix_acl *acl; 446 struct posix_acl *acl;
442 size_t size; 447 size_t size;
443 int depth;
444 int error; 448 int error;
445 449
446 if (IS_PRIVATE(inode)) 450 if (IS_PRIVATE(inode))
@@ -454,9 +458,7 @@ int reiserfs_acl_chmod(struct inode *inode)
454 return 0; 458 return 0;
455 } 459 }
456 460
457 reiserfs_write_unlock(inode->i_sb);
458 acl = reiserfs_get_acl(inode, ACL_TYPE_ACCESS); 461 acl = reiserfs_get_acl(inode, ACL_TYPE_ACCESS);
459 reiserfs_write_lock(inode->i_sb);
460 if (!acl) 462 if (!acl)
461 return 0; 463 return 0;
462 if (IS_ERR(acl)) 464 if (IS_ERR(acl))
@@ -466,16 +468,18 @@ int reiserfs_acl_chmod(struct inode *inode)
466 return error; 468 return error;
467 469
468 size = reiserfs_xattr_nblocks(inode, reiserfs_acl_size(acl->a_count)); 470 size = reiserfs_xattr_nblocks(inode, reiserfs_acl_size(acl->a_count));
469 depth = reiserfs_write_lock_once(inode->i_sb); 471 reiserfs_write_lock(inode->i_sb);
470 error = journal_begin(&th, inode->i_sb, size * 2); 472 error = journal_begin(&th, inode->i_sb, size * 2);
473 reiserfs_write_unlock(inode->i_sb);
471 if (!error) { 474 if (!error) {
472 int error2; 475 int error2;
473 error = reiserfs_set_acl(&th, inode, ACL_TYPE_ACCESS, acl); 476 error = reiserfs_set_acl(&th, inode, ACL_TYPE_ACCESS, acl);
477 reiserfs_write_lock(inode->i_sb);
474 error2 = journal_end(&th, inode->i_sb, size * 2); 478 error2 = journal_end(&th, inode->i_sb, size * 2);
479 reiserfs_write_unlock(inode->i_sb);
475 if (error2) 480 if (error2)
476 error = error2; 481 error = error2;
477 } 482 }
478 reiserfs_write_unlock_once(inode->i_sb, depth);
479 posix_acl_release(acl); 483 posix_acl_release(acl);
480 return error; 484 return error;
481} 485}
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 9ac4057a86c9..839a2bad7f45 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -630,6 +630,12 @@ static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
630 struct udf_sb_info *sbi = UDF_SB(sb); 630 struct udf_sb_info *sbi = UDF_SB(sb);
631 int error = 0; 631 int error = 0;
632 632
633 if (sbi->s_lvid_bh) {
634 int write_rev = le16_to_cpu(udf_sb_lvidiu(sbi)->minUDFWriteRev);
635 if (write_rev > UDF_MAX_WRITE_VERSION && !(*flags & MS_RDONLY))
636 return -EACCES;
637 }
638
633 uopt.flags = sbi->s_flags; 639 uopt.flags = sbi->s_flags;
634 uopt.uid = sbi->s_uid; 640 uopt.uid = sbi->s_uid;
635 uopt.gid = sbi->s_gid; 641 uopt.gid = sbi->s_gid;
@@ -649,12 +655,6 @@ static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
649 sbi->s_dmode = uopt.dmode; 655 sbi->s_dmode = uopt.dmode;
650 write_unlock(&sbi->s_cred_lock); 656 write_unlock(&sbi->s_cred_lock);
651 657
652 if (sbi->s_lvid_bh) {
653 int write_rev = le16_to_cpu(udf_sb_lvidiu(sbi)->minUDFWriteRev);
654 if (write_rev > UDF_MAX_WRITE_VERSION)
655 *flags |= MS_RDONLY;
656 }
657
658 if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) 658 if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
659 goto out_unlock; 659 goto out_unlock;
660 660
@@ -843,27 +843,38 @@ static int udf_find_fileset(struct super_block *sb,
843 return 1; 843 return 1;
844} 844}
845 845
846/*
847 * Load primary Volume Descriptor Sequence
848 *
849 * Return <0 on error, 0 on success. -EAGAIN is special meaning next sequence
850 * should be tried.
851 */
846static int udf_load_pvoldesc(struct super_block *sb, sector_t block) 852static int udf_load_pvoldesc(struct super_block *sb, sector_t block)
847{ 853{
848 struct primaryVolDesc *pvoldesc; 854 struct primaryVolDesc *pvoldesc;
849 struct ustr *instr, *outstr; 855 struct ustr *instr, *outstr;
850 struct buffer_head *bh; 856 struct buffer_head *bh;
851 uint16_t ident; 857 uint16_t ident;
852 int ret = 1; 858 int ret = -ENOMEM;
853 859
854 instr = kmalloc(sizeof(struct ustr), GFP_NOFS); 860 instr = kmalloc(sizeof(struct ustr), GFP_NOFS);
855 if (!instr) 861 if (!instr)
856 return 1; 862 return -ENOMEM;
857 863
858 outstr = kmalloc(sizeof(struct ustr), GFP_NOFS); 864 outstr = kmalloc(sizeof(struct ustr), GFP_NOFS);
859 if (!outstr) 865 if (!outstr)
860 goto out1; 866 goto out1;
861 867
862 bh = udf_read_tagged(sb, block, block, &ident); 868 bh = udf_read_tagged(sb, block, block, &ident);
863 if (!bh) 869 if (!bh) {
870 ret = -EAGAIN;
864 goto out2; 871 goto out2;
872 }
865 873
866 BUG_ON(ident != TAG_IDENT_PVD); 874 if (ident != TAG_IDENT_PVD) {
875 ret = -EIO;
876 goto out_bh;
877 }
867 878
868 pvoldesc = (struct primaryVolDesc *)bh->b_data; 879 pvoldesc = (struct primaryVolDesc *)bh->b_data;
869 880
@@ -889,8 +900,9 @@ static int udf_load_pvoldesc(struct super_block *sb, sector_t block)
889 if (udf_CS0toUTF8(outstr, instr)) 900 if (udf_CS0toUTF8(outstr, instr))
890 udf_debug("volSetIdent[] = '%s'\n", outstr->u_name); 901 udf_debug("volSetIdent[] = '%s'\n", outstr->u_name);
891 902
892 brelse(bh);
893 ret = 0; 903 ret = 0;
904out_bh:
905 brelse(bh);
894out2: 906out2:
895 kfree(outstr); 907 kfree(outstr);
896out1: 908out1:
@@ -947,7 +959,7 @@ static int udf_load_metadata_files(struct super_block *sb, int partition)
947 959
948 if (mdata->s_mirror_fe == NULL) { 960 if (mdata->s_mirror_fe == NULL) {
949 udf_err(sb, "Both metadata and mirror metadata inode efe can not found\n"); 961 udf_err(sb, "Both metadata and mirror metadata inode efe can not found\n");
950 goto error_exit; 962 return -EIO;
951 } 963 }
952 } 964 }
953 965
@@ -964,23 +976,18 @@ static int udf_load_metadata_files(struct super_block *sb, int partition)
964 addr.logicalBlockNum, addr.partitionReferenceNum); 976 addr.logicalBlockNum, addr.partitionReferenceNum);
965 977
966 mdata->s_bitmap_fe = udf_iget(sb, &addr); 978 mdata->s_bitmap_fe = udf_iget(sb, &addr);
967
968 if (mdata->s_bitmap_fe == NULL) { 979 if (mdata->s_bitmap_fe == NULL) {
969 if (sb->s_flags & MS_RDONLY) 980 if (sb->s_flags & MS_RDONLY)
970 udf_warn(sb, "bitmap inode efe not found but it's ok since the disc is mounted read-only\n"); 981 udf_warn(sb, "bitmap inode efe not found but it's ok since the disc is mounted read-only\n");
971 else { 982 else {
972 udf_err(sb, "bitmap inode efe not found and attempted read-write mount\n"); 983 udf_err(sb, "bitmap inode efe not found and attempted read-write mount\n");
973 goto error_exit; 984 return -EIO;
974 } 985 }
975 } 986 }
976 } 987 }
977 988
978 udf_debug("udf_load_metadata_files Ok\n"); 989 udf_debug("udf_load_metadata_files Ok\n");
979
980 return 0; 990 return 0;
981
982error_exit:
983 return 1;
984} 991}
985 992
986static void udf_load_fileset(struct super_block *sb, struct buffer_head *bh, 993static void udf_load_fileset(struct super_block *sb, struct buffer_head *bh,
@@ -1069,7 +1076,7 @@ static int udf_fill_partdesc_info(struct super_block *sb,
1069 if (!map->s_uspace.s_table) { 1076 if (!map->s_uspace.s_table) {
1070 udf_debug("cannot load unallocSpaceTable (part %d)\n", 1077 udf_debug("cannot load unallocSpaceTable (part %d)\n",
1071 p_index); 1078 p_index);
1072 return 1; 1079 return -EIO;
1073 } 1080 }
1074 map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_TABLE; 1081 map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_TABLE;
1075 udf_debug("unallocSpaceTable (part %d) @ %ld\n", 1082 udf_debug("unallocSpaceTable (part %d) @ %ld\n",
@@ -1079,7 +1086,7 @@ static int udf_fill_partdesc_info(struct super_block *sb,
1079 if (phd->unallocSpaceBitmap.extLength) { 1086 if (phd->unallocSpaceBitmap.extLength) {
1080 struct udf_bitmap *bitmap = udf_sb_alloc_bitmap(sb, p_index); 1087 struct udf_bitmap *bitmap = udf_sb_alloc_bitmap(sb, p_index);
1081 if (!bitmap) 1088 if (!bitmap)
1082 return 1; 1089 return -ENOMEM;
1083 map->s_uspace.s_bitmap = bitmap; 1090 map->s_uspace.s_bitmap = bitmap;
1084 bitmap->s_extPosition = le32_to_cpu( 1091 bitmap->s_extPosition = le32_to_cpu(
1085 phd->unallocSpaceBitmap.extPosition); 1092 phd->unallocSpaceBitmap.extPosition);
@@ -1102,7 +1109,7 @@ static int udf_fill_partdesc_info(struct super_block *sb,
1102 if (!map->s_fspace.s_table) { 1109 if (!map->s_fspace.s_table) {
1103 udf_debug("cannot load freedSpaceTable (part %d)\n", 1110 udf_debug("cannot load freedSpaceTable (part %d)\n",
1104 p_index); 1111 p_index);
1105 return 1; 1112 return -EIO;
1106 } 1113 }
1107 1114
1108 map->s_partition_flags |= UDF_PART_FLAG_FREED_TABLE; 1115 map->s_partition_flags |= UDF_PART_FLAG_FREED_TABLE;
@@ -1113,7 +1120,7 @@ static int udf_fill_partdesc_info(struct super_block *sb,
1113 if (phd->freedSpaceBitmap.extLength) { 1120 if (phd->freedSpaceBitmap.extLength) {
1114 struct udf_bitmap *bitmap = udf_sb_alloc_bitmap(sb, p_index); 1121 struct udf_bitmap *bitmap = udf_sb_alloc_bitmap(sb, p_index);
1115 if (!bitmap) 1122 if (!bitmap)
1116 return 1; 1123 return -ENOMEM;
1117 map->s_fspace.s_bitmap = bitmap; 1124 map->s_fspace.s_bitmap = bitmap;
1118 bitmap->s_extPosition = le32_to_cpu( 1125 bitmap->s_extPosition = le32_to_cpu(
1119 phd->freedSpaceBitmap.extPosition); 1126 phd->freedSpaceBitmap.extPosition);
@@ -1165,7 +1172,7 @@ static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
1165 udf_find_vat_block(sb, p_index, type1_index, blocks - 1); 1172 udf_find_vat_block(sb, p_index, type1_index, blocks - 1);
1166 } 1173 }
1167 if (!sbi->s_vat_inode) 1174 if (!sbi->s_vat_inode)
1168 return 1; 1175 return -EIO;
1169 1176
1170 if (map->s_partition_type == UDF_VIRTUAL_MAP15) { 1177 if (map->s_partition_type == UDF_VIRTUAL_MAP15) {
1171 map->s_type_specific.s_virtual.s_start_offset = 0; 1178 map->s_type_specific.s_virtual.s_start_offset = 0;
@@ -1177,7 +1184,7 @@ static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
1177 pos = udf_block_map(sbi->s_vat_inode, 0); 1184 pos = udf_block_map(sbi->s_vat_inode, 0);
1178 bh = sb_bread(sb, pos); 1185 bh = sb_bread(sb, pos);
1179 if (!bh) 1186 if (!bh)
1180 return 1; 1187 return -EIO;
1181 vat20 = (struct virtualAllocationTable20 *)bh->b_data; 1188 vat20 = (struct virtualAllocationTable20 *)bh->b_data;
1182 } else { 1189 } else {
1183 vat20 = (struct virtualAllocationTable20 *) 1190 vat20 = (struct virtualAllocationTable20 *)
@@ -1195,6 +1202,12 @@ static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
1195 return 0; 1202 return 0;
1196} 1203}
1197 1204
1205/*
1206 * Load partition descriptor block
1207 *
1208 * Returns <0 on error, 0 on success, -EAGAIN is special - try next descriptor
1209 * sequence.
1210 */
1198static int udf_load_partdesc(struct super_block *sb, sector_t block) 1211static int udf_load_partdesc(struct super_block *sb, sector_t block)
1199{ 1212{
1200 struct buffer_head *bh; 1213 struct buffer_head *bh;
@@ -1204,13 +1217,15 @@ static int udf_load_partdesc(struct super_block *sb, sector_t block)
1204 int i, type1_idx; 1217 int i, type1_idx;
1205 uint16_t partitionNumber; 1218 uint16_t partitionNumber;
1206 uint16_t ident; 1219 uint16_t ident;
1207 int ret = 0; 1220 int ret;
1208 1221
1209 bh = udf_read_tagged(sb, block, block, &ident); 1222 bh = udf_read_tagged(sb, block, block, &ident);
1210 if (!bh) 1223 if (!bh)
1211 return 1; 1224 return -EAGAIN;
1212 if (ident != TAG_IDENT_PD) 1225 if (ident != TAG_IDENT_PD) {
1226 ret = 0;
1213 goto out_bh; 1227 goto out_bh;
1228 }
1214 1229
1215 p = (struct partitionDesc *)bh->b_data; 1230 p = (struct partitionDesc *)bh->b_data;
1216 partitionNumber = le16_to_cpu(p->partitionNumber); 1231 partitionNumber = le16_to_cpu(p->partitionNumber);
@@ -1229,10 +1244,13 @@ static int udf_load_partdesc(struct super_block *sb, sector_t block)
1229 if (i >= sbi->s_partitions) { 1244 if (i >= sbi->s_partitions) {
1230 udf_debug("Partition (%d) not found in partition map\n", 1245 udf_debug("Partition (%d) not found in partition map\n",
1231 partitionNumber); 1246 partitionNumber);
1247 ret = 0;
1232 goto out_bh; 1248 goto out_bh;
1233 } 1249 }
1234 1250
1235 ret = udf_fill_partdesc_info(sb, p, i); 1251 ret = udf_fill_partdesc_info(sb, p, i);
1252 if (ret < 0)
1253 goto out_bh;
1236 1254
1237 /* 1255 /*
1238 * Now rescan for VIRTUAL or METADATA partitions when SPARABLE and 1256 * Now rescan for VIRTUAL or METADATA partitions when SPARABLE and
@@ -1249,32 +1267,37 @@ static int udf_load_partdesc(struct super_block *sb, sector_t block)
1249 break; 1267 break;
1250 } 1268 }
1251 1269
1252 if (i >= sbi->s_partitions) 1270 if (i >= sbi->s_partitions) {
1271 ret = 0;
1253 goto out_bh; 1272 goto out_bh;
1273 }
1254 1274
1255 ret = udf_fill_partdesc_info(sb, p, i); 1275 ret = udf_fill_partdesc_info(sb, p, i);
1256 if (ret) 1276 if (ret < 0)
1257 goto out_bh; 1277 goto out_bh;
1258 1278
1259 if (map->s_partition_type == UDF_METADATA_MAP25) { 1279 if (map->s_partition_type == UDF_METADATA_MAP25) {
1260 ret = udf_load_metadata_files(sb, i); 1280 ret = udf_load_metadata_files(sb, i);
1261 if (ret) { 1281 if (ret < 0) {
1262 udf_err(sb, "error loading MetaData partition map %d\n", 1282 udf_err(sb, "error loading MetaData partition map %d\n",
1263 i); 1283 i);
1264 goto out_bh; 1284 goto out_bh;
1265 } 1285 }
1266 } else { 1286 } else {
1267 ret = udf_load_vat(sb, i, type1_idx);
1268 if (ret)
1269 goto out_bh;
1270 /* 1287 /*
1271 * Mark filesystem read-only if we have a partition with 1288 * If we have a partition with virtual map, we don't handle
1272 * virtual map since we don't handle writing to it (we 1289 * writing to it (we overwrite blocks instead of relocating
1273 * overwrite blocks instead of relocating them). 1290 * them).
1274 */ 1291 */
1275 sb->s_flags |= MS_RDONLY; 1292 if (!(sb->s_flags & MS_RDONLY)) {
1276 pr_notice("Filesystem marked read-only because writing to pseudooverwrite partition is not implemented\n"); 1293 ret = -EACCES;
1294 goto out_bh;
1295 }
1296 ret = udf_load_vat(sb, i, type1_idx);
1297 if (ret < 0)
1298 goto out_bh;
1277 } 1299 }
1300 ret = 0;
1278out_bh: 1301out_bh:
1279 /* In case loading failed, we handle cleanup in udf_fill_super */ 1302 /* In case loading failed, we handle cleanup in udf_fill_super */
1280 brelse(bh); 1303 brelse(bh);
@@ -1340,11 +1363,11 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
1340 uint16_t ident; 1363 uint16_t ident;
1341 struct buffer_head *bh; 1364 struct buffer_head *bh;
1342 unsigned int table_len; 1365 unsigned int table_len;
1343 int ret = 0; 1366 int ret;
1344 1367
1345 bh = udf_read_tagged(sb, block, block, &ident); 1368 bh = udf_read_tagged(sb, block, block, &ident);
1346 if (!bh) 1369 if (!bh)
1347 return 1; 1370 return -EAGAIN;
1348 BUG_ON(ident != TAG_IDENT_LVD); 1371 BUG_ON(ident != TAG_IDENT_LVD);
1349 lvd = (struct logicalVolDesc *)bh->b_data; 1372 lvd = (struct logicalVolDesc *)bh->b_data;
1350 table_len = le32_to_cpu(lvd->mapTableLength); 1373 table_len = le32_to_cpu(lvd->mapTableLength);
@@ -1352,7 +1375,7 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
1352 udf_err(sb, "error loading logical volume descriptor: " 1375 udf_err(sb, "error loading logical volume descriptor: "
1353 "Partition table too long (%u > %lu)\n", table_len, 1376 "Partition table too long (%u > %lu)\n", table_len,
1354 sb->s_blocksize - sizeof(*lvd)); 1377 sb->s_blocksize - sizeof(*lvd));
1355 ret = 1; 1378 ret = -EIO;
1356 goto out_bh; 1379 goto out_bh;
1357 } 1380 }
1358 1381
@@ -1396,11 +1419,10 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
1396 } else if (!strncmp(upm2->partIdent.ident, 1419 } else if (!strncmp(upm2->partIdent.ident,
1397 UDF_ID_SPARABLE, 1420 UDF_ID_SPARABLE,
1398 strlen(UDF_ID_SPARABLE))) { 1421 strlen(UDF_ID_SPARABLE))) {
1399 if (udf_load_sparable_map(sb, map, 1422 ret = udf_load_sparable_map(sb, map,
1400 (struct sparablePartitionMap *)gpm) < 0) { 1423 (struct sparablePartitionMap *)gpm);
1401 ret = 1; 1424 if (ret < 0)
1402 goto out_bh; 1425 goto out_bh;
1403 }
1404 } else if (!strncmp(upm2->partIdent.ident, 1426 } else if (!strncmp(upm2->partIdent.ident,
1405 UDF_ID_METADATA, 1427 UDF_ID_METADATA,
1406 strlen(UDF_ID_METADATA))) { 1428 strlen(UDF_ID_METADATA))) {
@@ -1465,7 +1487,7 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
1465 } 1487 }
1466 if (lvd->integritySeqExt.extLength) 1488 if (lvd->integritySeqExt.extLength)
1467 udf_load_logicalvolint(sb, leea_to_cpu(lvd->integritySeqExt)); 1489 udf_load_logicalvolint(sb, leea_to_cpu(lvd->integritySeqExt));
1468 1490 ret = 0;
1469out_bh: 1491out_bh:
1470 brelse(bh); 1492 brelse(bh);
1471 return ret; 1493 return ret;
@@ -1503,22 +1525,18 @@ static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_
1503} 1525}
1504 1526
1505/* 1527/*
1506 * udf_process_sequence 1528 * Process a main/reserve volume descriptor sequence.
1507 * 1529 * @block First block of first extent of the sequence.
1508 * PURPOSE 1530 * @lastblock Lastblock of first extent of the sequence.
1509 * Process a main/reserve volume descriptor sequence. 1531 * @fileset There we store extent containing root fileset
1510 *
1511 * PRE-CONDITIONS
1512 * sb Pointer to _locked_ superblock.
1513 * block First block of first extent of the sequence.
1514 * lastblock Lastblock of first extent of the sequence.
1515 * 1532 *
1516 * HISTORY 1533 * Returns <0 on error, 0 on success. -EAGAIN is special - try next descriptor
1517 * July 1, 1997 - Andrew E. Mileski 1534 * sequence
1518 * Written, tested, and released.
1519 */ 1535 */
1520static noinline int udf_process_sequence(struct super_block *sb, long block, 1536static noinline int udf_process_sequence(
1521 long lastblock, struct kernel_lb_addr *fileset) 1537 struct super_block *sb,
1538 sector_t block, sector_t lastblock,
1539 struct kernel_lb_addr *fileset)
1522{ 1540{
1523 struct buffer_head *bh = NULL; 1541 struct buffer_head *bh = NULL;
1524 struct udf_vds_record vds[VDS_POS_LENGTH]; 1542 struct udf_vds_record vds[VDS_POS_LENGTH];
@@ -1529,6 +1547,7 @@ static noinline int udf_process_sequence(struct super_block *sb, long block,
1529 uint32_t vdsn; 1547 uint32_t vdsn;
1530 uint16_t ident; 1548 uint16_t ident;
1531 long next_s = 0, next_e = 0; 1549 long next_s = 0, next_e = 0;
1550 int ret;
1532 1551
1533 memset(vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH); 1552 memset(vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH);
1534 1553
@@ -1543,7 +1562,7 @@ static noinline int udf_process_sequence(struct super_block *sb, long block,
1543 udf_err(sb, 1562 udf_err(sb,
1544 "Block %llu of volume descriptor sequence is corrupted or we could not read it\n", 1563 "Block %llu of volume descriptor sequence is corrupted or we could not read it\n",
1545 (unsigned long long)block); 1564 (unsigned long long)block);
1546 return 1; 1565 return -EAGAIN;
1547 } 1566 }
1548 1567
1549 /* Process each descriptor (ISO 13346 3/8.3-8.4) */ 1568 /* Process each descriptor (ISO 13346 3/8.3-8.4) */
@@ -1616,14 +1635,19 @@ static noinline int udf_process_sequence(struct super_block *sb, long block,
1616 */ 1635 */
1617 if (!vds[VDS_POS_PRIMARY_VOL_DESC].block) { 1636 if (!vds[VDS_POS_PRIMARY_VOL_DESC].block) {
1618 udf_err(sb, "Primary Volume Descriptor not found!\n"); 1637 udf_err(sb, "Primary Volume Descriptor not found!\n");
1619 return 1; 1638 return -EAGAIN;
1639 }
1640 ret = udf_load_pvoldesc(sb, vds[VDS_POS_PRIMARY_VOL_DESC].block);
1641 if (ret < 0)
1642 return ret;
1643
1644 if (vds[VDS_POS_LOGICAL_VOL_DESC].block) {
1645 ret = udf_load_logicalvol(sb,
1646 vds[VDS_POS_LOGICAL_VOL_DESC].block,
1647 fileset);
1648 if (ret < 0)
1649 return ret;
1620 } 1650 }
1621 if (udf_load_pvoldesc(sb, vds[VDS_POS_PRIMARY_VOL_DESC].block))
1622 return 1;
1623
1624 if (vds[VDS_POS_LOGICAL_VOL_DESC].block && udf_load_logicalvol(sb,
1625 vds[VDS_POS_LOGICAL_VOL_DESC].block, fileset))
1626 return 1;
1627 1651
1628 if (vds[VDS_POS_PARTITION_DESC].block) { 1652 if (vds[VDS_POS_PARTITION_DESC].block) {
1629 /* 1653 /*
@@ -1632,19 +1656,27 @@ static noinline int udf_process_sequence(struct super_block *sb, long block,
1632 */ 1656 */
1633 for (block = vds[VDS_POS_PARTITION_DESC].block; 1657 for (block = vds[VDS_POS_PARTITION_DESC].block;
1634 block < vds[VDS_POS_TERMINATING_DESC].block; 1658 block < vds[VDS_POS_TERMINATING_DESC].block;
1635 block++) 1659 block++) {
1636 if (udf_load_partdesc(sb, block)) 1660 ret = udf_load_partdesc(sb, block);
1637 return 1; 1661 if (ret < 0)
1662 return ret;
1663 }
1638 } 1664 }
1639 1665
1640 return 0; 1666 return 0;
1641} 1667}
1642 1668
1669/*
1670 * Load Volume Descriptor Sequence described by anchor in bh
1671 *
1672 * Returns <0 on error, 0 on success
1673 */
1643static int udf_load_sequence(struct super_block *sb, struct buffer_head *bh, 1674static int udf_load_sequence(struct super_block *sb, struct buffer_head *bh,
1644 struct kernel_lb_addr *fileset) 1675 struct kernel_lb_addr *fileset)
1645{ 1676{
1646 struct anchorVolDescPtr *anchor; 1677 struct anchorVolDescPtr *anchor;
1647 long main_s, main_e, reserve_s, reserve_e; 1678 sector_t main_s, main_e, reserve_s, reserve_e;
1679 int ret;
1648 1680
1649 anchor = (struct anchorVolDescPtr *)bh->b_data; 1681 anchor = (struct anchorVolDescPtr *)bh->b_data;
1650 1682
@@ -1662,18 +1694,26 @@ static int udf_load_sequence(struct super_block *sb, struct buffer_head *bh,
1662 1694
1663 /* Process the main & reserve sequences */ 1695 /* Process the main & reserve sequences */
1664 /* responsible for finding the PartitionDesc(s) */ 1696 /* responsible for finding the PartitionDesc(s) */
1665 if (!udf_process_sequence(sb, main_s, main_e, fileset)) 1697 ret = udf_process_sequence(sb, main_s, main_e, fileset);
1666 return 1; 1698 if (ret != -EAGAIN)
1667 udf_sb_free_partitions(sb); 1699 return ret;
1668 if (!udf_process_sequence(sb, reserve_s, reserve_e, fileset))
1669 return 1;
1670 udf_sb_free_partitions(sb); 1700 udf_sb_free_partitions(sb);
1671 return 0; 1701 ret = udf_process_sequence(sb, reserve_s, reserve_e, fileset);
1702 if (ret < 0) {
1703 udf_sb_free_partitions(sb);
1704 /* No sequence was OK, return -EIO */
1705 if (ret == -EAGAIN)
1706 ret = -EIO;
1707 }
1708 return ret;
1672} 1709}
1673 1710
1674/* 1711/*
1675 * Check whether there is an anchor block in the given block and 1712 * Check whether there is an anchor block in the given block and
1676 * load Volume Descriptor Sequence if so. 1713 * load Volume Descriptor Sequence if so.
1714 *
1715 * Returns <0 on error, 0 on success, -EAGAIN is special - try next anchor
1716 * block
1677 */ 1717 */
1678static int udf_check_anchor_block(struct super_block *sb, sector_t block, 1718static int udf_check_anchor_block(struct super_block *sb, sector_t block,
1679 struct kernel_lb_addr *fileset) 1719 struct kernel_lb_addr *fileset)
@@ -1685,33 +1725,40 @@ static int udf_check_anchor_block(struct super_block *sb, sector_t block,
1685 if (UDF_QUERY_FLAG(sb, UDF_FLAG_VARCONV) && 1725 if (UDF_QUERY_FLAG(sb, UDF_FLAG_VARCONV) &&
1686 udf_fixed_to_variable(block) >= 1726 udf_fixed_to_variable(block) >=
1687 sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits) 1727 sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits)
1688 return 0; 1728 return -EAGAIN;
1689 1729
1690 bh = udf_read_tagged(sb, block, block, &ident); 1730 bh = udf_read_tagged(sb, block, block, &ident);
1691 if (!bh) 1731 if (!bh)
1692 return 0; 1732 return -EAGAIN;
1693 if (ident != TAG_IDENT_AVDP) { 1733 if (ident != TAG_IDENT_AVDP) {
1694 brelse(bh); 1734 brelse(bh);
1695 return 0; 1735 return -EAGAIN;
1696 } 1736 }
1697 ret = udf_load_sequence(sb, bh, fileset); 1737 ret = udf_load_sequence(sb, bh, fileset);
1698 brelse(bh); 1738 brelse(bh);
1699 return ret; 1739 return ret;
1700} 1740}
1701 1741
1702/* Search for an anchor volume descriptor pointer */ 1742/*
1703static sector_t udf_scan_anchors(struct super_block *sb, sector_t lastblock, 1743 * Search for an anchor volume descriptor pointer.
1704 struct kernel_lb_addr *fileset) 1744 *
1745 * Returns < 0 on error, 0 on success. -EAGAIN is special - try next set
1746 * of anchors.
1747 */
1748static int udf_scan_anchors(struct super_block *sb, sector_t *lastblock,
1749 struct kernel_lb_addr *fileset)
1705{ 1750{
1706 sector_t last[6]; 1751 sector_t last[6];
1707 int i; 1752 int i;
1708 struct udf_sb_info *sbi = UDF_SB(sb); 1753 struct udf_sb_info *sbi = UDF_SB(sb);
1709 int last_count = 0; 1754 int last_count = 0;
1755 int ret;
1710 1756
1711 /* First try user provided anchor */ 1757 /* First try user provided anchor */
1712 if (sbi->s_anchor) { 1758 if (sbi->s_anchor) {
1713 if (udf_check_anchor_block(sb, sbi->s_anchor, fileset)) 1759 ret = udf_check_anchor_block(sb, sbi->s_anchor, fileset);
1714 return lastblock; 1760 if (ret != -EAGAIN)
1761 return ret;
1715 } 1762 }
1716 /* 1763 /*
1717 * according to spec, anchor is in either: 1764 * according to spec, anchor is in either:
@@ -1720,39 +1767,46 @@ static sector_t udf_scan_anchors(struct super_block *sb, sector_t lastblock,
1720 * lastblock 1767 * lastblock
1721 * however, if the disc isn't closed, it could be 512. 1768 * however, if the disc isn't closed, it could be 512.
1722 */ 1769 */
1723 if (udf_check_anchor_block(sb, sbi->s_session + 256, fileset)) 1770 ret = udf_check_anchor_block(sb, sbi->s_session + 256, fileset);
1724 return lastblock; 1771 if (ret != -EAGAIN)
1772 return ret;
1725 /* 1773 /*
1726 * The trouble is which block is the last one. Drives often misreport 1774 * The trouble is which block is the last one. Drives often misreport
1727 * this so we try various possibilities. 1775 * this so we try various possibilities.
1728 */ 1776 */
1729 last[last_count++] = lastblock; 1777 last[last_count++] = *lastblock;
1730 if (lastblock >= 1) 1778 if (*lastblock >= 1)
1731 last[last_count++] = lastblock - 1; 1779 last[last_count++] = *lastblock - 1;
1732 last[last_count++] = lastblock + 1; 1780 last[last_count++] = *lastblock + 1;
1733 if (lastblock >= 2) 1781 if (*lastblock >= 2)
1734 last[last_count++] = lastblock - 2; 1782 last[last_count++] = *lastblock - 2;
1735 if (lastblock >= 150) 1783 if (*lastblock >= 150)
1736 last[last_count++] = lastblock - 150; 1784 last[last_count++] = *lastblock - 150;
1737 if (lastblock >= 152) 1785 if (*lastblock >= 152)
1738 last[last_count++] = lastblock - 152; 1786 last[last_count++] = *lastblock - 152;
1739 1787
1740 for (i = 0; i < last_count; i++) { 1788 for (i = 0; i < last_count; i++) {
1741 if (last[i] >= sb->s_bdev->bd_inode->i_size >> 1789 if (last[i] >= sb->s_bdev->bd_inode->i_size >>
1742 sb->s_blocksize_bits) 1790 sb->s_blocksize_bits)
1743 continue; 1791 continue;
1744 if (udf_check_anchor_block(sb, last[i], fileset)) 1792 ret = udf_check_anchor_block(sb, last[i], fileset);
1745 return last[i]; 1793 if (ret != -EAGAIN) {
1794 if (!ret)
1795 *lastblock = last[i];
1796 return ret;
1797 }
1746 if (last[i] < 256) 1798 if (last[i] < 256)
1747 continue; 1799 continue;
1748 if (udf_check_anchor_block(sb, last[i] - 256, fileset)) 1800 ret = udf_check_anchor_block(sb, last[i] - 256, fileset);
1749 return last[i]; 1801 if (ret != -EAGAIN) {
1802 if (!ret)
1803 *lastblock = last[i];
1804 return ret;
1805 }
1750 } 1806 }
1751 1807
1752 /* Finally try block 512 in case media is open */ 1808 /* Finally try block 512 in case media is open */
1753 if (udf_check_anchor_block(sb, sbi->s_session + 512, fileset)) 1809 return udf_check_anchor_block(sb, sbi->s_session + 512, fileset);
1754 return last[0];
1755 return 0;
1756} 1810}
1757 1811
1758/* 1812/*
@@ -1760,54 +1814,59 @@ static sector_t udf_scan_anchors(struct super_block *sb, sector_t lastblock,
1760 * area specified by it. The function expects sbi->s_lastblock to be the last 1814 * area specified by it. The function expects sbi->s_lastblock to be the last
1761 * block on the media. 1815 * block on the media.
1762 * 1816 *
1763 * Return 1 if ok, 0 if not found. 1817 * Return <0 on error, 0 if anchor found. -EAGAIN is special meaning anchor
1764 * 1818 * was not found.
1765 */ 1819 */
1766static int udf_find_anchor(struct super_block *sb, 1820static int udf_find_anchor(struct super_block *sb,
1767 struct kernel_lb_addr *fileset) 1821 struct kernel_lb_addr *fileset)
1768{ 1822{
1769 sector_t lastblock;
1770 struct udf_sb_info *sbi = UDF_SB(sb); 1823 struct udf_sb_info *sbi = UDF_SB(sb);
1824 sector_t lastblock = sbi->s_last_block;
1825 int ret;
1771 1826
1772 lastblock = udf_scan_anchors(sb, sbi->s_last_block, fileset); 1827 ret = udf_scan_anchors(sb, &lastblock, fileset);
1773 if (lastblock) 1828 if (ret != -EAGAIN)
1774 goto out; 1829 goto out;
1775 1830
1776 /* No anchor found? Try VARCONV conversion of block numbers */ 1831 /* No anchor found? Try VARCONV conversion of block numbers */
1777 UDF_SET_FLAG(sb, UDF_FLAG_VARCONV); 1832 UDF_SET_FLAG(sb, UDF_FLAG_VARCONV);
1833 lastblock = udf_variable_to_fixed(sbi->s_last_block);
1778 /* Firstly, we try to not convert number of the last block */ 1834 /* Firstly, we try to not convert number of the last block */
1779 lastblock = udf_scan_anchors(sb, 1835 ret = udf_scan_anchors(sb, &lastblock, fileset);
1780 udf_variable_to_fixed(sbi->s_last_block), 1836 if (ret != -EAGAIN)
1781 fileset);
1782 if (lastblock)
1783 goto out; 1837 goto out;
1784 1838
1839 lastblock = sbi->s_last_block;
1785 /* Secondly, we try with converted number of the last block */ 1840 /* Secondly, we try with converted number of the last block */
1786 lastblock = udf_scan_anchors(sb, sbi->s_last_block, fileset); 1841 ret = udf_scan_anchors(sb, &lastblock, fileset);
1787 if (!lastblock) { 1842 if (ret < 0) {
1788 /* VARCONV didn't help. Clear it. */ 1843 /* VARCONV didn't help. Clear it. */
1789 UDF_CLEAR_FLAG(sb, UDF_FLAG_VARCONV); 1844 UDF_CLEAR_FLAG(sb, UDF_FLAG_VARCONV);
1790 return 0;
1791 } 1845 }
1792out: 1846out:
1793 sbi->s_last_block = lastblock; 1847 if (ret == 0)
1794 return 1; 1848 sbi->s_last_block = lastblock;
1849 return ret;
1795} 1850}
1796 1851
1797/* 1852/*
1798 * Check Volume Structure Descriptor, find Anchor block and load Volume 1853 * Check Volume Structure Descriptor, find Anchor block and load Volume
1799 * Descriptor Sequence 1854 * Descriptor Sequence.
1855 *
1856 * Returns < 0 on error, 0 on success. -EAGAIN is special meaning anchor
1857 * block was not found.
1800 */ 1858 */
1801static int udf_load_vrs(struct super_block *sb, struct udf_options *uopt, 1859static int udf_load_vrs(struct super_block *sb, struct udf_options *uopt,
1802 int silent, struct kernel_lb_addr *fileset) 1860 int silent, struct kernel_lb_addr *fileset)
1803{ 1861{
1804 struct udf_sb_info *sbi = UDF_SB(sb); 1862 struct udf_sb_info *sbi = UDF_SB(sb);
1805 loff_t nsr_off; 1863 loff_t nsr_off;
1864 int ret;
1806 1865
1807 if (!sb_set_blocksize(sb, uopt->blocksize)) { 1866 if (!sb_set_blocksize(sb, uopt->blocksize)) {
1808 if (!silent) 1867 if (!silent)
1809 udf_warn(sb, "Bad block size\n"); 1868 udf_warn(sb, "Bad block size\n");
1810 return 0; 1869 return -EINVAL;
1811 } 1870 }
1812 sbi->s_last_block = uopt->lastblock; 1871 sbi->s_last_block = uopt->lastblock;
1813 if (!uopt->novrs) { 1872 if (!uopt->novrs) {
@@ -1828,12 +1887,13 @@ static int udf_load_vrs(struct super_block *sb, struct udf_options *uopt,
1828 1887
1829 /* Look for anchor block and load Volume Descriptor Sequence */ 1888 /* Look for anchor block and load Volume Descriptor Sequence */
1830 sbi->s_anchor = uopt->anchor; 1889 sbi->s_anchor = uopt->anchor;
1831 if (!udf_find_anchor(sb, fileset)) { 1890 ret = udf_find_anchor(sb, fileset);
1832 if (!silent) 1891 if (ret < 0) {
1892 if (!silent && ret == -EAGAIN)
1833 udf_warn(sb, "No anchor found\n"); 1893 udf_warn(sb, "No anchor found\n");
1834 return 0; 1894 return ret;
1835 } 1895 }
1836 return 1; 1896 return 0;
1837} 1897}
1838 1898
1839static void udf_open_lvid(struct super_block *sb) 1899static void udf_open_lvid(struct super_block *sb)
@@ -1939,7 +1999,7 @@ u64 lvid_get_unique_id(struct super_block *sb)
1939 1999
1940static int udf_fill_super(struct super_block *sb, void *options, int silent) 2000static int udf_fill_super(struct super_block *sb, void *options, int silent)
1941{ 2001{
1942 int ret; 2002 int ret = -EINVAL;
1943 struct inode *inode = NULL; 2003 struct inode *inode = NULL;
1944 struct udf_options uopt; 2004 struct udf_options uopt;
1945 struct kernel_lb_addr rootdir, fileset; 2005 struct kernel_lb_addr rootdir, fileset;
@@ -2011,7 +2071,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
2011 } else { 2071 } else {
2012 uopt.blocksize = bdev_logical_block_size(sb->s_bdev); 2072 uopt.blocksize = bdev_logical_block_size(sb->s_bdev);
2013 ret = udf_load_vrs(sb, &uopt, silent, &fileset); 2073 ret = udf_load_vrs(sb, &uopt, silent, &fileset);
2014 if (!ret && uopt.blocksize != UDF_DEFAULT_BLOCKSIZE) { 2074 if (ret == -EAGAIN && uopt.blocksize != UDF_DEFAULT_BLOCKSIZE) {
2015 if (!silent) 2075 if (!silent)
2016 pr_notice("Rescanning with blocksize %d\n", 2076 pr_notice("Rescanning with blocksize %d\n",
2017 UDF_DEFAULT_BLOCKSIZE); 2077 UDF_DEFAULT_BLOCKSIZE);
@@ -2021,8 +2081,11 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
2021 ret = udf_load_vrs(sb, &uopt, silent, &fileset); 2081 ret = udf_load_vrs(sb, &uopt, silent, &fileset);
2022 } 2082 }
2023 } 2083 }
2024 if (!ret) { 2084 if (ret < 0) {
2025 udf_warn(sb, "No partition found (1)\n"); 2085 if (ret == -EAGAIN) {
2086 udf_warn(sb, "No partition found (1)\n");
2087 ret = -EINVAL;
2088 }
2026 goto error_out; 2089 goto error_out;
2027 } 2090 }
2028 2091
@@ -2040,9 +2103,13 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
2040 udf_err(sb, "minUDFReadRev=%x (max is %x)\n", 2103 udf_err(sb, "minUDFReadRev=%x (max is %x)\n",
2041 le16_to_cpu(lvidiu->minUDFReadRev), 2104 le16_to_cpu(lvidiu->minUDFReadRev),
2042 UDF_MAX_READ_VERSION); 2105 UDF_MAX_READ_VERSION);
2106 ret = -EINVAL;
2107 goto error_out;
2108 } else if (minUDFWriteRev > UDF_MAX_WRITE_VERSION &&
2109 !(sb->s_flags & MS_RDONLY)) {
2110 ret = -EACCES;
2043 goto error_out; 2111 goto error_out;
2044 } else if (minUDFWriteRev > UDF_MAX_WRITE_VERSION) 2112 }
2045 sb->s_flags |= MS_RDONLY;
2046 2113
2047 sbi->s_udfrev = minUDFWriteRev; 2114 sbi->s_udfrev = minUDFWriteRev;
2048 2115
@@ -2054,17 +2121,20 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
2054 2121
2055 if (!sbi->s_partitions) { 2122 if (!sbi->s_partitions) {
2056 udf_warn(sb, "No partition found (2)\n"); 2123 udf_warn(sb, "No partition found (2)\n");
2124 ret = -EINVAL;
2057 goto error_out; 2125 goto error_out;
2058 } 2126 }
2059 2127
2060 if (sbi->s_partmaps[sbi->s_partition].s_partition_flags & 2128 if (sbi->s_partmaps[sbi->s_partition].s_partition_flags &
2061 UDF_PART_FLAG_READ_ONLY) { 2129 UDF_PART_FLAG_READ_ONLY &&
2062 pr_notice("Partition marked readonly; forcing readonly mount\n"); 2130 !(sb->s_flags & MS_RDONLY)) {
2063 sb->s_flags |= MS_RDONLY; 2131 ret = -EACCES;
2132 goto error_out;
2064 } 2133 }
2065 2134
2066 if (udf_find_fileset(sb, &fileset, &rootdir)) { 2135 if (udf_find_fileset(sb, &fileset, &rootdir)) {
2067 udf_warn(sb, "No fileset found\n"); 2136 udf_warn(sb, "No fileset found\n");
2137 ret = -EINVAL;
2068 goto error_out; 2138 goto error_out;
2069 } 2139 }
2070 2140
@@ -2086,6 +2156,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
2086 if (!inode) { 2156 if (!inode) {
2087 udf_err(sb, "Error in udf_iget, block=%d, partition=%d\n", 2157 udf_err(sb, "Error in udf_iget, block=%d, partition=%d\n",
2088 rootdir.logicalBlockNum, rootdir.partitionReferenceNum); 2158 rootdir.logicalBlockNum, rootdir.partitionReferenceNum);
2159 ret = -EIO;
2089 goto error_out; 2160 goto error_out;
2090 } 2161 }
2091 2162
@@ -2093,6 +2164,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
2093 sb->s_root = d_make_root(inode); 2164 sb->s_root = d_make_root(inode);
2094 if (!sb->s_root) { 2165 if (!sb->s_root) {
2095 udf_err(sb, "Couldn't allocate root dentry\n"); 2166 udf_err(sb, "Couldn't allocate root dentry\n");
2167 ret = -ENOMEM;
2096 goto error_out; 2168 goto error_out;
2097 } 2169 }
2098 sb->s_maxbytes = MAX_LFS_FILESIZE; 2170 sb->s_maxbytes = MAX_LFS_FILESIZE;
@@ -2113,7 +2185,7 @@ error_out:
2113 kfree(sbi); 2185 kfree(sbi);
2114 sb->s_fs_info = NULL; 2186 sb->s_fs_info = NULL;
2115 2187
2116 return -EINVAL; 2188 return ret;
2117} 2189}
2118 2190
2119void _udf_err(struct super_block *sb, const char *function, 2191void _udf_err(struct super_block *sb, const char *function,
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index 8685d1be12c7..31229e0be90b 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -57,16 +57,13 @@
57#define JBD_EXPENSIVE_CHECKING 57#define JBD_EXPENSIVE_CHECKING
58extern u8 journal_enable_debug; 58extern u8 journal_enable_debug;
59 59
60#define jbd_debug(n, f, a...) \ 60void __jbd_debug(int level, const char *file, const char *func,
61 do { \ 61 unsigned int line, const char *fmt, ...);
62 if ((n) <= journal_enable_debug) { \ 62
63 printk (KERN_DEBUG "(%s, %d): %s: ", \ 63#define jbd_debug(n, fmt, a...) \
64 __FILE__, __LINE__, __func__); \ 64 __jbd_debug((n), __FILE__, __func__, __LINE__, (fmt), ##a)
65 printk (f, ## a); \
66 } \
67 } while (0)
68#else 65#else
69#define jbd_debug(f, a...) /**/ 66#define jbd_debug(n, fmt, a...) /**/
70#endif 67#endif
71 68
72static inline void *jbd_alloc(size_t size, gfp_t flags) 69static inline void *jbd_alloc(size_t size, gfp_t flags)
@@ -77,7 +74,7 @@ static inline void *jbd_alloc(size_t size, gfp_t flags)
77static inline void jbd_free(void *ptr, size_t size) 74static inline void jbd_free(void *ptr, size_t size)
78{ 75{
79 free_pages((unsigned long)ptr, get_order(size)); 76 free_pages((unsigned long)ptr, get_order(size));
80}; 77}
81 78
82#define JFS_MIN_JOURNAL_BLOCKS 1024 79#define JFS_MIN_JOURNAL_BLOCKS 1024
83 80