aboutsummaryrefslogtreecommitdiffstats
path: root/fs/reiserfs
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-08-13 19:45:40 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-13 19:45:40 -0400
commitcec997093bbff881c3da49084dfba4f76361e96a (patch)
tree7c84f8c30ceef7209a18d7cd216a3c16536008c5 /fs/reiserfs
parent8d2d441ac4af223eae466c3c31ff737cc31a1411 (diff)
parent01777836c87081e4f68c4a43c9abe6114805f91e (diff)
Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs
Pull quota, reiserfs, UDF updates from Jan Kara: "Scalability improvements for quota, a few reiserfs fixes, and couple of misc cleanups (udf, ext2)" * 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs: reiserfs: Fix use after free in journal teardown reiserfs: fix corruption introduced by balance_leaf refactor udf: avoid redundant memcpy when writing data in ICB fs/udf: re-use hex_asc_upper_{hi,lo} macros fs/quota: kernel-doc warning fixes udf: use linux/uaccess.h fs/ext2/super.c: Drop memory allocation cast quota: remove dqptr_sem quota: simplify remove_inode_dquot_ref() quota: avoid unnecessary dqget()/dqput() calls quota: protect Q_GETFMT by dqonoff_mutex
Diffstat (limited to 'fs/reiserfs')
-rw-r--r--fs/reiserfs/do_balan.c111
-rw-r--r--fs/reiserfs/journal.c22
-rw-r--r--fs/reiserfs/lbalance.c5
-rw-r--r--fs/reiserfs/reiserfs.h9
-rw-r--r--fs/reiserfs/super.c6
5 files changed, 92 insertions, 61 deletions
diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
index 5739cb99de7b..9c02d96d3a42 100644
--- a/fs/reiserfs/do_balan.c
+++ b/fs/reiserfs/do_balan.c
@@ -286,12 +286,14 @@ static int balance_leaf_when_delete(struct tree_balance *tb, int flag)
286 return 0; 286 return 0;
287} 287}
288 288
289static void balance_leaf_insert_left(struct tree_balance *tb, 289static unsigned int balance_leaf_insert_left(struct tree_balance *tb,
290 struct item_head *ih, const char *body) 290 struct item_head *const ih,
291 const char * const body)
291{ 292{
292 int ret; 293 int ret;
293 struct buffer_info bi; 294 struct buffer_info bi;
294 int n = B_NR_ITEMS(tb->L[0]); 295 int n = B_NR_ITEMS(tb->L[0]);
296 unsigned body_shift_bytes = 0;
295 297
296 if (tb->item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) { 298 if (tb->item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) {
297 /* part of new item falls into L[0] */ 299 /* part of new item falls into L[0] */
@@ -329,7 +331,7 @@ static void balance_leaf_insert_left(struct tree_balance *tb,
329 331
330 put_ih_item_len(ih, new_item_len); 332 put_ih_item_len(ih, new_item_len);
331 if (tb->lbytes > tb->zeroes_num) { 333 if (tb->lbytes > tb->zeroes_num) {
332 body += (tb->lbytes - tb->zeroes_num); 334 body_shift_bytes = tb->lbytes - tb->zeroes_num;
333 tb->zeroes_num = 0; 335 tb->zeroes_num = 0;
334 } else 336 } else
335 tb->zeroes_num -= tb->lbytes; 337 tb->zeroes_num -= tb->lbytes;
@@ -349,11 +351,12 @@ static void balance_leaf_insert_left(struct tree_balance *tb,
349 tb->insert_size[0] = 0; 351 tb->insert_size[0] = 0;
350 tb->zeroes_num = 0; 352 tb->zeroes_num = 0;
351 } 353 }
354 return body_shift_bytes;
352} 355}
353 356
354static void balance_leaf_paste_left_shift_dirent(struct tree_balance *tb, 357static void balance_leaf_paste_left_shift_dirent(struct tree_balance *tb,
355 struct item_head *ih, 358 struct item_head * const ih,
356 const char *body) 359 const char * const body)
357{ 360{
358 int n = B_NR_ITEMS(tb->L[0]); 361 int n = B_NR_ITEMS(tb->L[0]);
359 struct buffer_info bi; 362 struct buffer_info bi;
@@ -413,17 +416,18 @@ static void balance_leaf_paste_left_shift_dirent(struct tree_balance *tb,
413 tb->pos_in_item -= tb->lbytes; 416 tb->pos_in_item -= tb->lbytes;
414} 417}
415 418
416static void balance_leaf_paste_left_shift(struct tree_balance *tb, 419static unsigned int balance_leaf_paste_left_shift(struct tree_balance *tb,
417 struct item_head *ih, 420 struct item_head * const ih,
418 const char *body) 421 const char * const body)
419{ 422{
420 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 423 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
421 int n = B_NR_ITEMS(tb->L[0]); 424 int n = B_NR_ITEMS(tb->L[0]);
422 struct buffer_info bi; 425 struct buffer_info bi;
426 int body_shift_bytes = 0;
423 427
424 if (is_direntry_le_ih(item_head(tbS0, tb->item_pos))) { 428 if (is_direntry_le_ih(item_head(tbS0, tb->item_pos))) {
425 balance_leaf_paste_left_shift_dirent(tb, ih, body); 429 balance_leaf_paste_left_shift_dirent(tb, ih, body);
426 return; 430 return 0;
427 } 431 }
428 432
429 RFALSE(tb->lbytes <= 0, 433 RFALSE(tb->lbytes <= 0,
@@ -497,7 +501,7 @@ static void balance_leaf_paste_left_shift(struct tree_balance *tb,
497 * insert_size[0] 501 * insert_size[0]
498 */ 502 */
499 if (l_n > tb->zeroes_num) { 503 if (l_n > tb->zeroes_num) {
500 body += (l_n - tb->zeroes_num); 504 body_shift_bytes = l_n - tb->zeroes_num;
501 tb->zeroes_num = 0; 505 tb->zeroes_num = 0;
502 } else 506 } else
503 tb->zeroes_num -= l_n; 507 tb->zeroes_num -= l_n;
@@ -526,13 +530,14 @@ static void balance_leaf_paste_left_shift(struct tree_balance *tb,
526 */ 530 */
527 leaf_shift_left(tb, tb->lnum[0], tb->lbytes); 531 leaf_shift_left(tb, tb->lnum[0], tb->lbytes);
528 } 532 }
533 return body_shift_bytes;
529} 534}
530 535
531 536
532/* appended item will be in L[0] in whole */ 537/* appended item will be in L[0] in whole */
533static void balance_leaf_paste_left_whole(struct tree_balance *tb, 538static void balance_leaf_paste_left_whole(struct tree_balance *tb,
534 struct item_head *ih, 539 struct item_head * const ih,
535 const char *body) 540 const char * const body)
536{ 541{
537 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 542 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
538 int n = B_NR_ITEMS(tb->L[0]); 543 int n = B_NR_ITEMS(tb->L[0]);
@@ -584,39 +589,44 @@ static void balance_leaf_paste_left_whole(struct tree_balance *tb,
584 tb->zeroes_num = 0; 589 tb->zeroes_num = 0;
585} 590}
586 591
587static void balance_leaf_paste_left(struct tree_balance *tb, 592static unsigned int balance_leaf_paste_left(struct tree_balance *tb,
588 struct item_head *ih, const char *body) 593 struct item_head * const ih,
594 const char * const body)
589{ 595{
590 /* we must shift the part of the appended item */ 596 /* we must shift the part of the appended item */
591 if (tb->item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) 597 if (tb->item_pos == tb->lnum[0] - 1 && tb->lbytes != -1)
592 balance_leaf_paste_left_shift(tb, ih, body); 598 return balance_leaf_paste_left_shift(tb, ih, body);
593 else 599 else
594 balance_leaf_paste_left_whole(tb, ih, body); 600 balance_leaf_paste_left_whole(tb, ih, body);
601 return 0;
595} 602}
596 603
597/* Shift lnum[0] items from S[0] to the left neighbor L[0] */ 604/* Shift lnum[0] items from S[0] to the left neighbor L[0] */
598static void balance_leaf_left(struct tree_balance *tb, struct item_head *ih, 605static unsigned int balance_leaf_left(struct tree_balance *tb,
599 const char *body, int flag) 606 struct item_head * const ih,
607 const char * const body, int flag)
600{ 608{
601 if (tb->lnum[0] <= 0) 609 if (tb->lnum[0] <= 0)
602 return; 610 return 0;
603 611
604 /* new item or it part falls to L[0], shift it too */ 612 /* new item or it part falls to L[0], shift it too */
605 if (tb->item_pos < tb->lnum[0]) { 613 if (tb->item_pos < tb->lnum[0]) {
606 BUG_ON(flag != M_INSERT && flag != M_PASTE); 614 BUG_ON(flag != M_INSERT && flag != M_PASTE);
607 615
608 if (flag == M_INSERT) 616 if (flag == M_INSERT)
609 balance_leaf_insert_left(tb, ih, body); 617 return balance_leaf_insert_left(tb, ih, body);
610 else /* M_PASTE */ 618 else /* M_PASTE */
611 balance_leaf_paste_left(tb, ih, body); 619 return balance_leaf_paste_left(tb, ih, body);
612 } else 620 } else
613 /* new item doesn't fall into L[0] */ 621 /* new item doesn't fall into L[0] */
614 leaf_shift_left(tb, tb->lnum[0], tb->lbytes); 622 leaf_shift_left(tb, tb->lnum[0], tb->lbytes);
623 return 0;
615} 624}
616 625
617 626
618static void balance_leaf_insert_right(struct tree_balance *tb, 627static void balance_leaf_insert_right(struct tree_balance *tb,
619 struct item_head *ih, const char *body) 628 struct item_head * const ih,
629 const char * const body)
620{ 630{
621 631
622 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 632 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
@@ -704,7 +714,8 @@ static void balance_leaf_insert_right(struct tree_balance *tb,
704 714
705 715
706static void balance_leaf_paste_right_shift_dirent(struct tree_balance *tb, 716static void balance_leaf_paste_right_shift_dirent(struct tree_balance *tb,
707 struct item_head *ih, const char *body) 717 struct item_head * const ih,
718 const char * const body)
708{ 719{
709 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 720 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
710 struct buffer_info bi; 721 struct buffer_info bi;
@@ -754,7 +765,8 @@ static void balance_leaf_paste_right_shift_dirent(struct tree_balance *tb,
754} 765}
755 766
756static void balance_leaf_paste_right_shift(struct tree_balance *tb, 767static void balance_leaf_paste_right_shift(struct tree_balance *tb,
757 struct item_head *ih, const char *body) 768 struct item_head * const ih,
769 const char * const body)
758{ 770{
759 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 771 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
760 int n_shift, n_rem, r_zeroes_number, version; 772 int n_shift, n_rem, r_zeroes_number, version;
@@ -831,7 +843,8 @@ static void balance_leaf_paste_right_shift(struct tree_balance *tb,
831} 843}
832 844
833static void balance_leaf_paste_right_whole(struct tree_balance *tb, 845static void balance_leaf_paste_right_whole(struct tree_balance *tb,
834 struct item_head *ih, const char *body) 846 struct item_head * const ih,
847 const char * const body)
835{ 848{
836 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 849 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
837 int n = B_NR_ITEMS(tbS0); 850 int n = B_NR_ITEMS(tbS0);
@@ -874,7 +887,8 @@ static void balance_leaf_paste_right_whole(struct tree_balance *tb,
874} 887}
875 888
876static void balance_leaf_paste_right(struct tree_balance *tb, 889static void balance_leaf_paste_right(struct tree_balance *tb,
877 struct item_head *ih, const char *body) 890 struct item_head * const ih,
891 const char * const body)
878{ 892{
879 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 893 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
880 int n = B_NR_ITEMS(tbS0); 894 int n = B_NR_ITEMS(tbS0);
@@ -896,8 +910,9 @@ static void balance_leaf_paste_right(struct tree_balance *tb,
896} 910}
897 911
898/* shift rnum[0] items from S[0] to the right neighbor R[0] */ 912/* shift rnum[0] items from S[0] to the right neighbor R[0] */
899static void balance_leaf_right(struct tree_balance *tb, struct item_head *ih, 913static void balance_leaf_right(struct tree_balance *tb,
900 const char *body, int flag) 914 struct item_head * const ih,
915 const char * const body, int flag)
901{ 916{
902 if (tb->rnum[0] <= 0) 917 if (tb->rnum[0] <= 0)
903 return; 918 return;
@@ -911,8 +926,8 @@ static void balance_leaf_right(struct tree_balance *tb, struct item_head *ih,
911} 926}
912 927
913static void balance_leaf_new_nodes_insert(struct tree_balance *tb, 928static void balance_leaf_new_nodes_insert(struct tree_balance *tb,
914 struct item_head *ih, 929 struct item_head * const ih,
915 const char *body, 930 const char * const body,
916 struct item_head *insert_key, 931 struct item_head *insert_key,
917 struct buffer_head **insert_ptr, 932 struct buffer_head **insert_ptr,
918 int i) 933 int i)
@@ -1003,8 +1018,8 @@ static void balance_leaf_new_nodes_insert(struct tree_balance *tb,
1003 1018
1004/* we append to directory item */ 1019/* we append to directory item */
1005static void balance_leaf_new_nodes_paste_dirent(struct tree_balance *tb, 1020static void balance_leaf_new_nodes_paste_dirent(struct tree_balance *tb,
1006 struct item_head *ih, 1021 struct item_head * const ih,
1007 const char *body, 1022 const char * const body,
1008 struct item_head *insert_key, 1023 struct item_head *insert_key,
1009 struct buffer_head **insert_ptr, 1024 struct buffer_head **insert_ptr,
1010 int i) 1025 int i)
@@ -1058,8 +1073,8 @@ static void balance_leaf_new_nodes_paste_dirent(struct tree_balance *tb,
1058} 1073}
1059 1074
1060static void balance_leaf_new_nodes_paste_shift(struct tree_balance *tb, 1075static void balance_leaf_new_nodes_paste_shift(struct tree_balance *tb,
1061 struct item_head *ih, 1076 struct item_head * const ih,
1062 const char *body, 1077 const char * const body,
1063 struct item_head *insert_key, 1078 struct item_head *insert_key,
1064 struct buffer_head **insert_ptr, 1079 struct buffer_head **insert_ptr,
1065 int i) 1080 int i)
@@ -1131,8 +1146,8 @@ static void balance_leaf_new_nodes_paste_shift(struct tree_balance *tb,
1131} 1146}
1132 1147
1133static void balance_leaf_new_nodes_paste_whole(struct tree_balance *tb, 1148static void balance_leaf_new_nodes_paste_whole(struct tree_balance *tb,
1134 struct item_head *ih, 1149 struct item_head * const ih,
1135 const char *body, 1150 const char * const body,
1136 struct item_head *insert_key, 1151 struct item_head *insert_key,
1137 struct buffer_head **insert_ptr, 1152 struct buffer_head **insert_ptr,
1138 int i) 1153 int i)
@@ -1184,8 +1199,8 @@ static void balance_leaf_new_nodes_paste_whole(struct tree_balance *tb,
1184 1199
1185} 1200}
1186static void balance_leaf_new_nodes_paste(struct tree_balance *tb, 1201static void balance_leaf_new_nodes_paste(struct tree_balance *tb,
1187 struct item_head *ih, 1202 struct item_head * const ih,
1188 const char *body, 1203 const char * const body,
1189 struct item_head *insert_key, 1204 struct item_head *insert_key,
1190 struct buffer_head **insert_ptr, 1205 struct buffer_head **insert_ptr,
1191 int i) 1206 int i)
@@ -1214,8 +1229,8 @@ static void balance_leaf_new_nodes_paste(struct tree_balance *tb,
1214 1229
1215/* Fill new nodes that appear in place of S[0] */ 1230/* Fill new nodes that appear in place of S[0] */
1216static void balance_leaf_new_nodes(struct tree_balance *tb, 1231static void balance_leaf_new_nodes(struct tree_balance *tb,
1217 struct item_head *ih, 1232 struct item_head * const ih,
1218 const char *body, 1233 const char * const body,
1219 struct item_head *insert_key, 1234 struct item_head *insert_key,
1220 struct buffer_head **insert_ptr, 1235 struct buffer_head **insert_ptr,
1221 int flag) 1236 int flag)
@@ -1254,8 +1269,8 @@ static void balance_leaf_new_nodes(struct tree_balance *tb,
1254} 1269}
1255 1270
1256static void balance_leaf_finish_node_insert(struct tree_balance *tb, 1271static void balance_leaf_finish_node_insert(struct tree_balance *tb,
1257 struct item_head *ih, 1272 struct item_head * const ih,
1258 const char *body) 1273 const char * const body)
1259{ 1274{
1260 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 1275 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
1261 struct buffer_info bi; 1276 struct buffer_info bi;
@@ -1271,8 +1286,8 @@ static void balance_leaf_finish_node_insert(struct tree_balance *tb,
1271} 1286}
1272 1287
1273static void balance_leaf_finish_node_paste_dirent(struct tree_balance *tb, 1288static void balance_leaf_finish_node_paste_dirent(struct tree_balance *tb,
1274 struct item_head *ih, 1289 struct item_head * const ih,
1275 const char *body) 1290 const char * const body)
1276{ 1291{
1277 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 1292 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
1278 struct item_head *pasted = item_head(tbS0, tb->item_pos); 1293 struct item_head *pasted = item_head(tbS0, tb->item_pos);
@@ -1305,8 +1320,8 @@ static void balance_leaf_finish_node_paste_dirent(struct tree_balance *tb,
1305} 1320}
1306 1321
1307static void balance_leaf_finish_node_paste(struct tree_balance *tb, 1322static void balance_leaf_finish_node_paste(struct tree_balance *tb,
1308 struct item_head *ih, 1323 struct item_head * const ih,
1309 const char *body) 1324 const char * const body)
1310{ 1325{
1311 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 1326 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
1312 struct buffer_info bi; 1327 struct buffer_info bi;
@@ -1349,8 +1364,8 @@ static void balance_leaf_finish_node_paste(struct tree_balance *tb,
1349 * of the affected item which remains in S 1364 * of the affected item which remains in S
1350 */ 1365 */
1351static void balance_leaf_finish_node(struct tree_balance *tb, 1366static void balance_leaf_finish_node(struct tree_balance *tb,
1352 struct item_head *ih, 1367 struct item_head * const ih,
1353 const char *body, int flag) 1368 const char * const body, int flag)
1354{ 1369{
1355 /* if we must insert or append into buffer S[0] */ 1370 /* if we must insert or append into buffer S[0] */
1356 if (0 <= tb->item_pos && tb->item_pos < tb->s0num) { 1371 if (0 <= tb->item_pos && tb->item_pos < tb->s0num) {
@@ -1402,7 +1417,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih,
1402 && is_indirect_le_ih(item_head(tbS0, tb->item_pos))) 1417 && is_indirect_le_ih(item_head(tbS0, tb->item_pos)))
1403 tb->pos_in_item *= UNFM_P_SIZE; 1418 tb->pos_in_item *= UNFM_P_SIZE;
1404 1419
1405 balance_leaf_left(tb, ih, body, flag); 1420 body += balance_leaf_left(tb, ih, body, flag);
1406 1421
1407 /* tb->lnum[0] > 0 */ 1422 /* tb->lnum[0] > 0 */
1408 /* Calculate new item position */ 1423 /* Calculate new item position */
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index e8870de4627e..a88b1b3e7db3 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -1947,8 +1947,6 @@ static int do_journal_release(struct reiserfs_transaction_handle *th,
1947 } 1947 }
1948 } 1948 }
1949 1949
1950 /* wait for all commits to finish */
1951 cancel_delayed_work(&SB_JOURNAL(sb)->j_work);
1952 1950
1953 /* 1951 /*
1954 * We must release the write lock here because 1952 * We must release the write lock here because
@@ -1956,8 +1954,14 @@ static int do_journal_release(struct reiserfs_transaction_handle *th,
1956 */ 1954 */
1957 reiserfs_write_unlock(sb); 1955 reiserfs_write_unlock(sb);
1958 1956
1957 /*
1958 * Cancel flushing of old commits. Note that neither of these works
1959 * will be requeued because superblock is being shutdown and doesn't
1960 * have MS_ACTIVE set.
1961 */
1959 cancel_delayed_work_sync(&REISERFS_SB(sb)->old_work); 1962 cancel_delayed_work_sync(&REISERFS_SB(sb)->old_work);
1960 flush_workqueue(REISERFS_SB(sb)->commit_wq); 1963 /* wait for all commits to finish */
1964 cancel_delayed_work_sync(&SB_JOURNAL(sb)->j_work);
1961 1965
1962 free_journal_ram(sb); 1966 free_journal_ram(sb);
1963 1967
@@ -4292,9 +4296,15 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, int flags)
4292 if (flush) { 4296 if (flush) {
4293 flush_commit_list(sb, jl, 1); 4297 flush_commit_list(sb, jl, 1);
4294 flush_journal_list(sb, jl, 1); 4298 flush_journal_list(sb, jl, 1);
4295 } else if (!(jl->j_state & LIST_COMMIT_PENDING)) 4299 } else if (!(jl->j_state & LIST_COMMIT_PENDING)) {
4296 queue_delayed_work(REISERFS_SB(sb)->commit_wq, 4300 /*
4297 &journal->j_work, HZ / 10); 4301 * Avoid queueing work when sb is being shut down. Transaction
4302 * will be flushed on journal shutdown.
4303 */
4304 if (sb->s_flags & MS_ACTIVE)
4305 queue_delayed_work(REISERFS_SB(sb)->commit_wq,
4306 &journal->j_work, HZ / 10);
4307 }
4298 4308
4299 /* 4309 /*
4300 * if the next transaction has any chance of wrapping, flush 4310 * if the next transaction has any chance of wrapping, flush
diff --git a/fs/reiserfs/lbalance.c b/fs/reiserfs/lbalance.c
index 814dda3ec998..249594a821e0 100644
--- a/fs/reiserfs/lbalance.c
+++ b/fs/reiserfs/lbalance.c
@@ -899,8 +899,9 @@ void leaf_delete_items(struct buffer_info *cur_bi, int last_first,
899 899
900/* insert item into the leaf node in position before */ 900/* insert item into the leaf node in position before */
901void leaf_insert_into_buf(struct buffer_info *bi, int before, 901void leaf_insert_into_buf(struct buffer_info *bi, int before,
902 struct item_head *inserted_item_ih, 902 struct item_head * const inserted_item_ih,
903 const char *inserted_item_body, int zeros_number) 903 const char * const inserted_item_body,
904 int zeros_number)
904{ 905{
905 struct buffer_head *bh = bi->bi_bh; 906 struct buffer_head *bh = bi->bi_bh;
906 int nr, free_space; 907 int nr, free_space;
diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
index bf53888c7f59..735c2c2b4536 100644
--- a/fs/reiserfs/reiserfs.h
+++ b/fs/reiserfs/reiserfs.h
@@ -3216,11 +3216,12 @@ int leaf_shift_right(struct tree_balance *tb, int shift_num, int shift_bytes);
3216void leaf_delete_items(struct buffer_info *cur_bi, int last_first, int first, 3216void leaf_delete_items(struct buffer_info *cur_bi, int last_first, int first,
3217 int del_num, int del_bytes); 3217 int del_num, int del_bytes);
3218void leaf_insert_into_buf(struct buffer_info *bi, int before, 3218void leaf_insert_into_buf(struct buffer_info *bi, int before,
3219 struct item_head *inserted_item_ih, 3219 struct item_head * const inserted_item_ih,
3220 const char *inserted_item_body, int zeros_number); 3220 const char * const inserted_item_body,
3221void leaf_paste_in_buffer(struct buffer_info *bi, int pasted_item_num,
3222 int pos_in_item, int paste_size, const char *body,
3223 int zeros_number); 3221 int zeros_number);
3222void leaf_paste_in_buffer(struct buffer_info *bi, int pasted_item_num,
3223 int pos_in_item, int paste_size,
3224 const char * const body, int zeros_number);
3224void leaf_cut_from_buffer(struct buffer_info *bi, int cut_item_num, 3225void leaf_cut_from_buffer(struct buffer_info *bi, int cut_item_num,
3225 int pos_in_item, int cut_size); 3226 int pos_in_item, int cut_size);
3226void leaf_paste_entries(struct buffer_info *bi, int item_num, int before, 3227void leaf_paste_entries(struct buffer_info *bi, int item_num, int before,
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 709ea92d716f..d46e88a33b02 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -100,7 +100,11 @@ void reiserfs_schedule_old_flush(struct super_block *s)
100 struct reiserfs_sb_info *sbi = REISERFS_SB(s); 100 struct reiserfs_sb_info *sbi = REISERFS_SB(s);
101 unsigned long delay; 101 unsigned long delay;
102 102
103 if (s->s_flags & MS_RDONLY) 103 /*
104 * Avoid scheduling flush when sb is being shut down. It can race
105 * with journal shutdown and free still queued delayed work.
106 */
107 if (s->s_flags & MS_RDONLY || !(s->s_flags & MS_ACTIVE))
104 return; 108 return;
105 109
106 spin_lock(&sbi->old_work_lock); 110 spin_lock(&sbi->old_work_lock);