aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ocfs2/journal.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/ocfs2/journal.c')
-rw-r--r--fs/ocfs2/journal.c176
1 files changed, 59 insertions, 117 deletions
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index 9b57c0350ff9..295d56454e8b 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -31,7 +31,6 @@
31#include <linux/time.h> 31#include <linux/time.h>
32#include <linux/random.h> 32#include <linux/random.h>
33 33
34#define MLOG_MASK_PREFIX ML_JOURNAL
35#include <cluster/masklog.h> 34#include <cluster/masklog.h>
36 35
37#include "ocfs2.h" 36#include "ocfs2.h"
@@ -52,6 +51,7 @@
52#include "quota.h" 51#include "quota.h"
53 52
54#include "buffer_head_io.h" 53#include "buffer_head_io.h"
54#include "ocfs2_trace.h"
55 55
56DEFINE_SPINLOCK(trans_inc_lock); 56DEFINE_SPINLOCK(trans_inc_lock);
57 57
@@ -301,19 +301,17 @@ static int ocfs2_commit_cache(struct ocfs2_super *osb)
301{ 301{
302 int status = 0; 302 int status = 0;
303 unsigned int flushed; 303 unsigned int flushed;
304 unsigned long old_id;
305 struct ocfs2_journal *journal = NULL; 304 struct ocfs2_journal *journal = NULL;
306 305
307 mlog_entry_void();
308
309 journal = osb->journal; 306 journal = osb->journal;
310 307
311 /* Flush all pending commits and checkpoint the journal. */ 308 /* Flush all pending commits and checkpoint the journal. */
312 down_write(&journal->j_trans_barrier); 309 down_write(&journal->j_trans_barrier);
313 310
314 if (atomic_read(&journal->j_num_trans) == 0) { 311 flushed = atomic_read(&journal->j_num_trans);
312 trace_ocfs2_commit_cache_begin(flushed);
313 if (flushed == 0) {
315 up_write(&journal->j_trans_barrier); 314 up_write(&journal->j_trans_barrier);
316 mlog(0, "No transactions for me to flush!\n");
317 goto finally; 315 goto finally;
318 } 316 }
319 317
@@ -326,25 +324,20 @@ static int ocfs2_commit_cache(struct ocfs2_super *osb)
326 goto finally; 324 goto finally;
327 } 325 }
328 326
329 old_id = ocfs2_inc_trans_id(journal); 327 ocfs2_inc_trans_id(journal);
330 328
331 flushed = atomic_read(&journal->j_num_trans); 329 flushed = atomic_read(&journal->j_num_trans);
332 atomic_set(&journal->j_num_trans, 0); 330 atomic_set(&journal->j_num_trans, 0);
333 up_write(&journal->j_trans_barrier); 331 up_write(&journal->j_trans_barrier);
334 332
335 mlog(0, "commit_thread: flushed transaction %lu (%u handles)\n", 333 trace_ocfs2_commit_cache_end(journal->j_trans_id, flushed);
336 journal->j_trans_id, flushed);
337 334
338 ocfs2_wake_downconvert_thread(osb); 335 ocfs2_wake_downconvert_thread(osb);
339 wake_up(&journal->j_checkpointed); 336 wake_up(&journal->j_checkpointed);
340finally: 337finally:
341 mlog_exit(status);
342 return status; 338 return status;
343} 339}
344 340
345/* pass it NULL and it will allocate a new handle object for you. If
346 * you pass it a handle however, it may still return error, in which
347 * case it has free'd the passed handle for you. */
348handle_t *ocfs2_start_trans(struct ocfs2_super *osb, int max_buffs) 341handle_t *ocfs2_start_trans(struct ocfs2_super *osb, int max_buffs)
349{ 342{
350 journal_t *journal = osb->journal->j_journal; 343 journal_t *journal = osb->journal->j_journal;
@@ -429,9 +422,8 @@ int ocfs2_extend_trans(handle_t *handle, int nblocks)
429 return 0; 422 return 0;
430 423
431 old_nblocks = handle->h_buffer_credits; 424 old_nblocks = handle->h_buffer_credits;
432 mlog_entry_void();
433 425
434 mlog(0, "Trying to extend transaction by %d blocks\n", nblocks); 426 trace_ocfs2_extend_trans(old_nblocks, nblocks);
435 427
436#ifdef CONFIG_OCFS2_DEBUG_FS 428#ifdef CONFIG_OCFS2_DEBUG_FS
437 status = 1; 429 status = 1;
@@ -444,9 +436,7 @@ int ocfs2_extend_trans(handle_t *handle, int nblocks)
444#endif 436#endif
445 437
446 if (status > 0) { 438 if (status > 0) {
447 mlog(0, 439 trace_ocfs2_extend_trans_restart(old_nblocks + nblocks);
448 "jbd2_journal_extend failed, trying "
449 "jbd2_journal_restart\n");
450 status = jbd2_journal_restart(handle, 440 status = jbd2_journal_restart(handle,
451 old_nblocks + nblocks); 441 old_nblocks + nblocks);
452 if (status < 0) { 442 if (status < 0) {
@@ -457,8 +447,6 @@ int ocfs2_extend_trans(handle_t *handle, int nblocks)
457 447
458 status = 0; 448 status = 0;
459bail: 449bail:
460
461 mlog_exit(status);
462 return status; 450 return status;
463} 451}
464 452
@@ -626,12 +614,9 @@ static int __ocfs2_journal_access(handle_t *handle,
626 BUG_ON(!handle); 614 BUG_ON(!handle);
627 BUG_ON(!bh); 615 BUG_ON(!bh);
628 616
629 mlog_entry("bh->b_blocknr=%llu, type=%d (\"%s\"), bh->b_size = %zu\n", 617 trace_ocfs2_journal_access(
630 (unsigned long long)bh->b_blocknr, type, 618 (unsigned long long)ocfs2_metadata_cache_owner(ci),
631 (type == OCFS2_JOURNAL_ACCESS_CREATE) ? 619 (unsigned long long)bh->b_blocknr, type, bh->b_size);
632 "OCFS2_JOURNAL_ACCESS_CREATE" :
633 "OCFS2_JOURNAL_ACCESS_WRITE",
634 bh->b_size);
635 620
636 /* we can safely remove this assertion after testing. */ 621 /* we can safely remove this assertion after testing. */
637 if (!buffer_uptodate(bh)) { 622 if (!buffer_uptodate(bh)) {
@@ -672,7 +657,6 @@ static int __ocfs2_journal_access(handle_t *handle,
672 mlog(ML_ERROR, "Error %d getting %d access to buffer!\n", 657 mlog(ML_ERROR, "Error %d getting %d access to buffer!\n",
673 status, type); 658 status, type);
674 659
675 mlog_exit(status);
676 return status; 660 return status;
677} 661}
678 662
@@ -741,13 +725,10 @@ void ocfs2_journal_dirty(handle_t *handle, struct buffer_head *bh)
741{ 725{
742 int status; 726 int status;
743 727
744 mlog_entry("(bh->b_blocknr=%llu)\n", 728 trace_ocfs2_journal_dirty((unsigned long long)bh->b_blocknr);
745 (unsigned long long)bh->b_blocknr);
746 729
747 status = jbd2_journal_dirty_metadata(handle, bh); 730 status = jbd2_journal_dirty_metadata(handle, bh);
748 BUG_ON(status); 731 BUG_ON(status);
749
750 mlog_exit_void();
751} 732}
752 733
753#define OCFS2_DEFAULT_COMMIT_INTERVAL (HZ * JBD2_DEFAULT_MAX_COMMIT_AGE) 734#define OCFS2_DEFAULT_COMMIT_INTERVAL (HZ * JBD2_DEFAULT_MAX_COMMIT_AGE)
@@ -779,8 +760,6 @@ int ocfs2_journal_init(struct ocfs2_journal *journal, int *dirty)
779 struct ocfs2_super *osb; 760 struct ocfs2_super *osb;
780 int inode_lock = 0; 761 int inode_lock = 0;
781 762
782 mlog_entry_void();
783
784 BUG_ON(!journal); 763 BUG_ON(!journal);
785 764
786 osb = journal->j_osb; 765 osb = journal->j_osb;
@@ -824,10 +803,9 @@ int ocfs2_journal_init(struct ocfs2_journal *journal, int *dirty)
824 goto done; 803 goto done;
825 } 804 }
826 805
827 mlog(0, "inode->i_size = %lld\n", inode->i_size); 806 trace_ocfs2_journal_init(inode->i_size,
828 mlog(0, "inode->i_blocks = %llu\n", 807 (unsigned long long)inode->i_blocks,
829 (unsigned long long)inode->i_blocks); 808 OCFS2_I(inode)->ip_clusters);
830 mlog(0, "inode->ip_clusters = %u\n", OCFS2_I(inode)->ip_clusters);
831 809
832 /* call the kernels journal init function now */ 810 /* call the kernels journal init function now */
833 j_journal = jbd2_journal_init_inode(inode); 811 j_journal = jbd2_journal_init_inode(inode);
@@ -837,8 +815,7 @@ int ocfs2_journal_init(struct ocfs2_journal *journal, int *dirty)
837 goto done; 815 goto done;
838 } 816 }
839 817
840 mlog(0, "Returned from jbd2_journal_init_inode\n"); 818 trace_ocfs2_journal_init_maxlen(j_journal->j_maxlen);
841 mlog(0, "j_journal->j_maxlen = %u\n", j_journal->j_maxlen);
842 819
843 *dirty = (le32_to_cpu(di->id1.journal1.ij_flags) & 820 *dirty = (le32_to_cpu(di->id1.journal1.ij_flags) &
844 OCFS2_JOURNAL_DIRTY_FL); 821 OCFS2_JOURNAL_DIRTY_FL);
@@ -863,7 +840,6 @@ done:
863 } 840 }
864 } 841 }
865 842
866 mlog_exit(status);
867 return status; 843 return status;
868} 844}
869 845
@@ -886,8 +862,6 @@ static int ocfs2_journal_toggle_dirty(struct ocfs2_super *osb,
886 struct buffer_head *bh = journal->j_bh; 862 struct buffer_head *bh = journal->j_bh;
887 struct ocfs2_dinode *fe; 863 struct ocfs2_dinode *fe;
888 864
889 mlog_entry_void();
890
891 fe = (struct ocfs2_dinode *)bh->b_data; 865 fe = (struct ocfs2_dinode *)bh->b_data;
892 866
893 /* The journal bh on the osb always comes from ocfs2_journal_init() 867 /* The journal bh on the osb always comes from ocfs2_journal_init()
@@ -910,7 +884,6 @@ static int ocfs2_journal_toggle_dirty(struct ocfs2_super *osb,
910 if (status < 0) 884 if (status < 0)
911 mlog_errno(status); 885 mlog_errno(status);
912 886
913 mlog_exit(status);
914 return status; 887 return status;
915} 888}
916 889
@@ -925,8 +898,6 @@ void ocfs2_journal_shutdown(struct ocfs2_super *osb)
925 struct inode *inode = NULL; 898 struct inode *inode = NULL;
926 int num_running_trans = 0; 899 int num_running_trans = 0;
927 900
928 mlog_entry_void();
929
930 BUG_ON(!osb); 901 BUG_ON(!osb);
931 902
932 journal = osb->journal; 903 journal = osb->journal;
@@ -943,10 +914,7 @@ void ocfs2_journal_shutdown(struct ocfs2_super *osb)
943 BUG(); 914 BUG();
944 915
945 num_running_trans = atomic_read(&(osb->journal->j_num_trans)); 916 num_running_trans = atomic_read(&(osb->journal->j_num_trans));
946 if (num_running_trans > 0) 917 trace_ocfs2_journal_shutdown(num_running_trans);
947 mlog(0, "Shutting down journal: must wait on %d "
948 "running transactions!\n",
949 num_running_trans);
950 918
951 /* Do a commit_cache here. It will flush our journal, *and* 919 /* Do a commit_cache here. It will flush our journal, *and*
952 * release any locks that are still held. 920 * release any locks that are still held.
@@ -959,7 +927,7 @@ void ocfs2_journal_shutdown(struct ocfs2_super *osb)
959 * completely destroy the journal. */ 927 * completely destroy the journal. */
960 if (osb->commit_task) { 928 if (osb->commit_task) {
961 /* Wait for the commit thread */ 929 /* Wait for the commit thread */
962 mlog(0, "Waiting for ocfs2commit to exit....\n"); 930 trace_ocfs2_journal_shutdown_wait(osb->commit_task);
963 kthread_stop(osb->commit_task); 931 kthread_stop(osb->commit_task);
964 osb->commit_task = NULL; 932 osb->commit_task = NULL;
965 } 933 }
@@ -1002,7 +970,6 @@ void ocfs2_journal_shutdown(struct ocfs2_super *osb)
1002done: 970done:
1003 if (inode) 971 if (inode)
1004 iput(inode); 972 iput(inode);
1005 mlog_exit_void();
1006} 973}
1007 974
1008static void ocfs2_clear_journal_error(struct super_block *sb, 975static void ocfs2_clear_journal_error(struct super_block *sb,
@@ -1028,8 +995,6 @@ int ocfs2_journal_load(struct ocfs2_journal *journal, int local, int replayed)
1028 int status = 0; 995 int status = 0;
1029 struct ocfs2_super *osb; 996 struct ocfs2_super *osb;
1030 997
1031 mlog_entry_void();
1032
1033 BUG_ON(!journal); 998 BUG_ON(!journal);
1034 999
1035 osb = journal->j_osb; 1000 osb = journal->j_osb;
@@ -1063,7 +1028,6 @@ int ocfs2_journal_load(struct ocfs2_journal *journal, int local, int replayed)
1063 osb->commit_task = NULL; 1028 osb->commit_task = NULL;
1064 1029
1065done: 1030done:
1066 mlog_exit(status);
1067 return status; 1031 return status;
1068} 1032}
1069 1033
@@ -1074,8 +1038,6 @@ int ocfs2_journal_wipe(struct ocfs2_journal *journal, int full)
1074{ 1038{
1075 int status; 1039 int status;
1076 1040
1077 mlog_entry_void();
1078
1079 BUG_ON(!journal); 1041 BUG_ON(!journal);
1080 1042
1081 status = jbd2_journal_wipe(journal->j_journal, full); 1043 status = jbd2_journal_wipe(journal->j_journal, full);
@@ -1089,7 +1051,6 @@ int ocfs2_journal_wipe(struct ocfs2_journal *journal, int full)
1089 mlog_errno(status); 1051 mlog_errno(status);
1090 1052
1091bail: 1053bail:
1092 mlog_exit(status);
1093 return status; 1054 return status;
1094} 1055}
1095 1056
@@ -1128,8 +1089,6 @@ static int ocfs2_force_read_journal(struct inode *inode)
1128#define CONCURRENT_JOURNAL_FILL 32ULL 1089#define CONCURRENT_JOURNAL_FILL 32ULL
1129 struct buffer_head *bhs[CONCURRENT_JOURNAL_FILL]; 1090 struct buffer_head *bhs[CONCURRENT_JOURNAL_FILL];
1130 1091
1131 mlog_entry_void();
1132
1133 memset(bhs, 0, sizeof(struct buffer_head *) * CONCURRENT_JOURNAL_FILL); 1092 memset(bhs, 0, sizeof(struct buffer_head *) * CONCURRENT_JOURNAL_FILL);
1134 1093
1135 num_blocks = ocfs2_blocks_for_bytes(inode->i_sb, inode->i_size); 1094 num_blocks = ocfs2_blocks_for_bytes(inode->i_sb, inode->i_size);
@@ -1165,7 +1124,6 @@ static int ocfs2_force_read_journal(struct inode *inode)
1165bail: 1124bail:
1166 for(i = 0; i < CONCURRENT_JOURNAL_FILL; i++) 1125 for(i = 0; i < CONCURRENT_JOURNAL_FILL; i++)
1167 brelse(bhs[i]); 1126 brelse(bhs[i]);
1168 mlog_exit(status);
1169 return status; 1127 return status;
1170} 1128}
1171 1129
@@ -1189,7 +1147,7 @@ struct ocfs2_la_recovery_item {
1189 */ 1147 */
1190void ocfs2_complete_recovery(struct work_struct *work) 1148void ocfs2_complete_recovery(struct work_struct *work)
1191{ 1149{
1192 int ret; 1150 int ret = 0;
1193 struct ocfs2_journal *journal = 1151 struct ocfs2_journal *journal =
1194 container_of(work, struct ocfs2_journal, j_recovery_work); 1152 container_of(work, struct ocfs2_journal, j_recovery_work);
1195 struct ocfs2_super *osb = journal->j_osb; 1153 struct ocfs2_super *osb = journal->j_osb;
@@ -1198,9 +1156,8 @@ void ocfs2_complete_recovery(struct work_struct *work)
1198 struct ocfs2_quota_recovery *qrec; 1156 struct ocfs2_quota_recovery *qrec;
1199 LIST_HEAD(tmp_la_list); 1157 LIST_HEAD(tmp_la_list);
1200 1158
1201 mlog_entry_void(); 1159 trace_ocfs2_complete_recovery(
1202 1160 (unsigned long long)OCFS2_I(journal->j_inode)->ip_blkno);
1203 mlog(0, "completing recovery from keventd\n");
1204 1161
1205 spin_lock(&journal->j_lock); 1162 spin_lock(&journal->j_lock);
1206 list_splice_init(&journal->j_la_cleanups, &tmp_la_list); 1163 list_splice_init(&journal->j_la_cleanups, &tmp_la_list);
@@ -1209,15 +1166,18 @@ void ocfs2_complete_recovery(struct work_struct *work)
1209 list_for_each_entry_safe(item, n, &tmp_la_list, lri_list) { 1166 list_for_each_entry_safe(item, n, &tmp_la_list, lri_list) {
1210 list_del_init(&item->lri_list); 1167 list_del_init(&item->lri_list);
1211 1168
1212 mlog(0, "Complete recovery for slot %d\n", item->lri_slot);
1213
1214 ocfs2_wait_on_quotas(osb); 1169 ocfs2_wait_on_quotas(osb);
1215 1170
1216 la_dinode = item->lri_la_dinode; 1171 la_dinode = item->lri_la_dinode;
1217 if (la_dinode) { 1172 tl_dinode = item->lri_tl_dinode;
1218 mlog(0, "Clean up local alloc %llu\n", 1173 qrec = item->lri_qrec;
1219 (unsigned long long)le64_to_cpu(la_dinode->i_blkno));
1220 1174
1175 trace_ocfs2_complete_recovery_slot(item->lri_slot,
1176 la_dinode ? le64_to_cpu(la_dinode->i_blkno) : 0,
1177 tl_dinode ? le64_to_cpu(tl_dinode->i_blkno) : 0,
1178 qrec);
1179
1180 if (la_dinode) {
1221 ret = ocfs2_complete_local_alloc_recovery(osb, 1181 ret = ocfs2_complete_local_alloc_recovery(osb,
1222 la_dinode); 1182 la_dinode);
1223 if (ret < 0) 1183 if (ret < 0)
@@ -1226,11 +1186,7 @@ void ocfs2_complete_recovery(struct work_struct *work)
1226 kfree(la_dinode); 1186 kfree(la_dinode);
1227 } 1187 }
1228 1188
1229 tl_dinode = item->lri_tl_dinode;
1230 if (tl_dinode) { 1189 if (tl_dinode) {
1231 mlog(0, "Clean up truncate log %llu\n",
1232 (unsigned long long)le64_to_cpu(tl_dinode->i_blkno));
1233
1234 ret = ocfs2_complete_truncate_log_recovery(osb, 1190 ret = ocfs2_complete_truncate_log_recovery(osb,
1235 tl_dinode); 1191 tl_dinode);
1236 if (ret < 0) 1192 if (ret < 0)
@@ -1243,9 +1199,7 @@ void ocfs2_complete_recovery(struct work_struct *work)
1243 if (ret < 0) 1199 if (ret < 0)
1244 mlog_errno(ret); 1200 mlog_errno(ret);
1245 1201
1246 qrec = item->lri_qrec;
1247 if (qrec) { 1202 if (qrec) {
1248 mlog(0, "Recovering quota files");
1249 ret = ocfs2_finish_quota_recovery(osb, qrec, 1203 ret = ocfs2_finish_quota_recovery(osb, qrec,
1250 item->lri_slot); 1204 item->lri_slot);
1251 if (ret < 0) 1205 if (ret < 0)
@@ -1256,8 +1210,7 @@ void ocfs2_complete_recovery(struct work_struct *work)
1256 kfree(item); 1210 kfree(item);
1257 } 1211 }
1258 1212
1259 mlog(0, "Recovery completion\n"); 1213 trace_ocfs2_complete_recovery_end(ret);
1260 mlog_exit_void();
1261} 1214}
1262 1215
1263/* NOTE: This function always eats your references to la_dinode and 1216/* NOTE: This function always eats your references to la_dinode and
@@ -1307,6 +1260,9 @@ void ocfs2_complete_mount_recovery(struct ocfs2_super *osb)
1307{ 1260{
1308 struct ocfs2_journal *journal = osb->journal; 1261 struct ocfs2_journal *journal = osb->journal;
1309 1262
1263 if (ocfs2_is_hard_readonly(osb))
1264 return;
1265
1310 /* No need to queue up our truncate_log as regular cleanup will catch 1266 /* No need to queue up our truncate_log as regular cleanup will catch
1311 * that */ 1267 * that */
1312 ocfs2_queue_recovery_completion(journal, osb->slot_num, 1268 ocfs2_queue_recovery_completion(journal, osb->slot_num,
@@ -1343,8 +1299,6 @@ static int __ocfs2_recovery_thread(void *arg)
1343 int rm_quota_used = 0, i; 1299 int rm_quota_used = 0, i;
1344 struct ocfs2_quota_recovery *qrec; 1300 struct ocfs2_quota_recovery *qrec;
1345 1301
1346 mlog_entry_void();
1347
1348 status = ocfs2_wait_on_mount(osb); 1302 status = ocfs2_wait_on_mount(osb);
1349 if (status < 0) { 1303 if (status < 0) {
1350 goto bail; 1304 goto bail;
@@ -1376,15 +1330,12 @@ restart:
1376 * clear it until ocfs2_recover_node() has succeeded. */ 1330 * clear it until ocfs2_recover_node() has succeeded. */
1377 node_num = rm->rm_entries[0]; 1331 node_num = rm->rm_entries[0];
1378 spin_unlock(&osb->osb_lock); 1332 spin_unlock(&osb->osb_lock);
1379 mlog(0, "checking node %d\n", node_num);
1380 slot_num = ocfs2_node_num_to_slot(osb, node_num); 1333 slot_num = ocfs2_node_num_to_slot(osb, node_num);
1334 trace_ocfs2_recovery_thread_node(node_num, slot_num);
1381 if (slot_num == -ENOENT) { 1335 if (slot_num == -ENOENT) {
1382 status = 0; 1336 status = 0;
1383 mlog(0, "no slot for this node, so no recovery"
1384 "required.\n");
1385 goto skip_recovery; 1337 goto skip_recovery;
1386 } 1338 }
1387 mlog(0, "node %d was using slot %d\n", node_num, slot_num);
1388 1339
1389 /* It is a bit subtle with quota recovery. We cannot do it 1340 /* It is a bit subtle with quota recovery. We cannot do it
1390 * immediately because we have to obtain cluster locks from 1341 * immediately because we have to obtain cluster locks from
@@ -1411,7 +1362,7 @@ skip_recovery:
1411 spin_lock(&osb->osb_lock); 1362 spin_lock(&osb->osb_lock);
1412 } 1363 }
1413 spin_unlock(&osb->osb_lock); 1364 spin_unlock(&osb->osb_lock);
1414 mlog(0, "All nodes recovered\n"); 1365 trace_ocfs2_recovery_thread_end(status);
1415 1366
1416 /* Refresh all journal recovery generations from disk */ 1367 /* Refresh all journal recovery generations from disk */
1417 status = ocfs2_check_journals_nolocks(osb); 1368 status = ocfs2_check_journals_nolocks(osb);
@@ -1420,7 +1371,7 @@ skip_recovery:
1420 mlog_errno(status); 1371 mlog_errno(status);
1421 1372
1422 /* Now it is right time to recover quotas... We have to do this under 1373 /* Now it is right time to recover quotas... We have to do this under
1423 * superblock lock so that noone can start using the slot (and crash) 1374 * superblock lock so that no one can start using the slot (and crash)
1424 * before we recover it */ 1375 * before we recover it */
1425 for (i = 0; i < rm_quota_used; i++) { 1376 for (i = 0; i < rm_quota_used; i++) {
1426 qrec = ocfs2_begin_quota_recovery(osb, rm_quota[i]); 1377 qrec = ocfs2_begin_quota_recovery(osb, rm_quota[i]);
@@ -1455,7 +1406,6 @@ bail:
1455 if (rm_quota) 1406 if (rm_quota)
1456 kfree(rm_quota); 1407 kfree(rm_quota);
1457 1408
1458 mlog_exit(status);
1459 /* no one is callint kthread_stop() for us so the kthread() api 1409 /* no one is callint kthread_stop() for us so the kthread() api
1460 * requires that we call do_exit(). And it isn't exported, but 1410 * requires that we call do_exit(). And it isn't exported, but
1461 * complete_and_exit() seems to be a minimal wrapper around it. */ 1411 * complete_and_exit() seems to be a minimal wrapper around it. */
@@ -1465,19 +1415,15 @@ bail:
1465 1415
1466void ocfs2_recovery_thread(struct ocfs2_super *osb, int node_num) 1416void ocfs2_recovery_thread(struct ocfs2_super *osb, int node_num)
1467{ 1417{
1468 mlog_entry("(node_num=%d, osb->node_num = %d)\n",
1469 node_num, osb->node_num);
1470
1471 mutex_lock(&osb->recovery_lock); 1418 mutex_lock(&osb->recovery_lock);
1472 if (osb->disable_recovery)
1473 goto out;
1474 1419
1475 /* People waiting on recovery will wait on 1420 trace_ocfs2_recovery_thread(node_num, osb->node_num,
1476 * the recovery map to empty. */ 1421 osb->disable_recovery, osb->recovery_thread_task,
1477 if (ocfs2_recovery_map_set(osb, node_num)) 1422 osb->disable_recovery ?
1478 mlog(0, "node %d already in recovery map.\n", node_num); 1423 -1 : ocfs2_recovery_map_set(osb, node_num));
1479 1424
1480 mlog(0, "starting recovery thread...\n"); 1425 if (osb->disable_recovery)
1426 goto out;
1481 1427
1482 if (osb->recovery_thread_task) 1428 if (osb->recovery_thread_task)
1483 goto out; 1429 goto out;
@@ -1492,8 +1438,6 @@ void ocfs2_recovery_thread(struct ocfs2_super *osb, int node_num)
1492out: 1438out:
1493 mutex_unlock(&osb->recovery_lock); 1439 mutex_unlock(&osb->recovery_lock);
1494 wake_up(&osb->recovery_event); 1440 wake_up(&osb->recovery_event);
1495
1496 mlog_exit_void();
1497} 1441}
1498 1442
1499static int ocfs2_read_journal_inode(struct ocfs2_super *osb, 1443static int ocfs2_read_journal_inode(struct ocfs2_super *osb,
@@ -1567,7 +1511,7 @@ static int ocfs2_replay_journal(struct ocfs2_super *osb,
1567 * If not, it needs recovery. 1511 * If not, it needs recovery.
1568 */ 1512 */
1569 if (osb->slot_recovery_generations[slot_num] != slot_reco_gen) { 1513 if (osb->slot_recovery_generations[slot_num] != slot_reco_gen) {
1570 mlog(0, "Slot %u already recovered (old/new=%u/%u)\n", slot_num, 1514 trace_ocfs2_replay_journal_recovered(slot_num,
1571 osb->slot_recovery_generations[slot_num], slot_reco_gen); 1515 osb->slot_recovery_generations[slot_num], slot_reco_gen);
1572 osb->slot_recovery_generations[slot_num] = slot_reco_gen; 1516 osb->slot_recovery_generations[slot_num] = slot_reco_gen;
1573 status = -EBUSY; 1517 status = -EBUSY;
@@ -1578,7 +1522,7 @@ static int ocfs2_replay_journal(struct ocfs2_super *osb,
1578 1522
1579 status = ocfs2_inode_lock_full(inode, &bh, 1, OCFS2_META_LOCK_RECOVERY); 1523 status = ocfs2_inode_lock_full(inode, &bh, 1, OCFS2_META_LOCK_RECOVERY);
1580 if (status < 0) { 1524 if (status < 0) {
1581 mlog(0, "status returned from ocfs2_inode_lock=%d\n", status); 1525 trace_ocfs2_replay_journal_lock_err(status);
1582 if (status != -ERESTARTSYS) 1526 if (status != -ERESTARTSYS)
1583 mlog(ML_ERROR, "Could not lock journal!\n"); 1527 mlog(ML_ERROR, "Could not lock journal!\n");
1584 goto done; 1528 goto done;
@@ -1591,7 +1535,7 @@ static int ocfs2_replay_journal(struct ocfs2_super *osb,
1591 slot_reco_gen = ocfs2_get_recovery_generation(fe); 1535 slot_reco_gen = ocfs2_get_recovery_generation(fe);
1592 1536
1593 if (!(flags & OCFS2_JOURNAL_DIRTY_FL)) { 1537 if (!(flags & OCFS2_JOURNAL_DIRTY_FL)) {
1594 mlog(0, "No recovery required for node %d\n", node_num); 1538 trace_ocfs2_replay_journal_skip(node_num);
1595 /* Refresh recovery generation for the slot */ 1539 /* Refresh recovery generation for the slot */
1596 osb->slot_recovery_generations[slot_num] = slot_reco_gen; 1540 osb->slot_recovery_generations[slot_num] = slot_reco_gen;
1597 goto done; 1541 goto done;
@@ -1612,7 +1556,6 @@ static int ocfs2_replay_journal(struct ocfs2_super *osb,
1612 goto done; 1556 goto done;
1613 } 1557 }
1614 1558
1615 mlog(0, "calling journal_init_inode\n");
1616 journal = jbd2_journal_init_inode(inode); 1559 journal = jbd2_journal_init_inode(inode);
1617 if (journal == NULL) { 1560 if (journal == NULL) {
1618 mlog(ML_ERROR, "Linux journal layer error\n"); 1561 mlog(ML_ERROR, "Linux journal layer error\n");
@@ -1632,7 +1575,6 @@ static int ocfs2_replay_journal(struct ocfs2_super *osb,
1632 ocfs2_clear_journal_error(osb->sb, journal, slot_num); 1575 ocfs2_clear_journal_error(osb->sb, journal, slot_num);
1633 1576
1634 /* wipe the journal */ 1577 /* wipe the journal */
1635 mlog(0, "flushing the journal.\n");
1636 jbd2_journal_lock_updates(journal); 1578 jbd2_journal_lock_updates(journal);
1637 status = jbd2_journal_flush(journal); 1579 status = jbd2_journal_flush(journal);
1638 jbd2_journal_unlock_updates(journal); 1580 jbd2_journal_unlock_updates(journal);
@@ -1669,7 +1611,6 @@ done:
1669 1611
1670 brelse(bh); 1612 brelse(bh);
1671 1613
1672 mlog_exit(status);
1673 return status; 1614 return status;
1674} 1615}
1675 1616
@@ -1692,8 +1633,7 @@ static int ocfs2_recover_node(struct ocfs2_super *osb,
1692 struct ocfs2_dinode *la_copy = NULL; 1633 struct ocfs2_dinode *la_copy = NULL;
1693 struct ocfs2_dinode *tl_copy = NULL; 1634 struct ocfs2_dinode *tl_copy = NULL;
1694 1635
1695 mlog_entry("(node_num=%d, slot_num=%d, osb->node_num = %d)\n", 1636 trace_ocfs2_recover_node(node_num, slot_num, osb->node_num);
1696 node_num, slot_num, osb->node_num);
1697 1637
1698 /* Should not ever be called to recover ourselves -- in that 1638 /* Should not ever be called to recover ourselves -- in that
1699 * case we should've called ocfs2_journal_load instead. */ 1639 * case we should've called ocfs2_journal_load instead. */
@@ -1702,9 +1642,7 @@ static int ocfs2_recover_node(struct ocfs2_super *osb,
1702 status = ocfs2_replay_journal(osb, node_num, slot_num); 1642 status = ocfs2_replay_journal(osb, node_num, slot_num);
1703 if (status < 0) { 1643 if (status < 0) {
1704 if (status == -EBUSY) { 1644 if (status == -EBUSY) {
1705 mlog(0, "Skipping recovery for slot %u (node %u) " 1645 trace_ocfs2_recover_node_skip(slot_num, node_num);
1706 "as another node has recovered it\n", slot_num,
1707 node_num);
1708 status = 0; 1646 status = 0;
1709 goto done; 1647 goto done;
1710 } 1648 }
@@ -1739,7 +1677,6 @@ static int ocfs2_recover_node(struct ocfs2_super *osb,
1739 status = 0; 1677 status = 0;
1740done: 1678done:
1741 1679
1742 mlog_exit(status);
1743 return status; 1680 return status;
1744} 1681}
1745 1682
@@ -1812,8 +1749,8 @@ int ocfs2_mark_dead_nodes(struct ocfs2_super *osb)
1812 spin_lock(&osb->osb_lock); 1749 spin_lock(&osb->osb_lock);
1813 osb->slot_recovery_generations[i] = gen; 1750 osb->slot_recovery_generations[i] = gen;
1814 1751
1815 mlog(0, "Slot %u recovery generation is %u\n", i, 1752 trace_ocfs2_mark_dead_nodes(i,
1816 osb->slot_recovery_generations[i]); 1753 osb->slot_recovery_generations[i]);
1817 1754
1818 if (i == osb->slot_num) { 1755 if (i == osb->slot_num) {
1819 spin_unlock(&osb->osb_lock); 1756 spin_unlock(&osb->osb_lock);
@@ -1849,7 +1786,6 @@ int ocfs2_mark_dead_nodes(struct ocfs2_super *osb)
1849 1786
1850 status = 0; 1787 status = 0;
1851bail: 1788bail:
1852 mlog_exit(status);
1853 return status; 1789 return status;
1854} 1790}
1855 1791
@@ -1891,6 +1827,9 @@ void ocfs2_queue_orphan_scan(struct ocfs2_super *osb)
1891 if (atomic_read(&os->os_state) == ORPHAN_SCAN_INACTIVE) 1827 if (atomic_read(&os->os_state) == ORPHAN_SCAN_INACTIVE)
1892 goto out; 1828 goto out;
1893 1829
1830 trace_ocfs2_queue_orphan_scan_begin(os->os_count, os->os_seqno,
1831 atomic_read(&os->os_state));
1832
1894 status = ocfs2_orphan_scan_lock(osb, &seqno); 1833 status = ocfs2_orphan_scan_lock(osb, &seqno);
1895 if (status < 0) { 1834 if (status < 0) {
1896 if (status != -EAGAIN) 1835 if (status != -EAGAIN)
@@ -1920,6 +1859,8 @@ void ocfs2_queue_orphan_scan(struct ocfs2_super *osb)
1920unlock: 1859unlock:
1921 ocfs2_orphan_scan_unlock(osb, seqno); 1860 ocfs2_orphan_scan_unlock(osb, seqno);
1922out: 1861out:
1862 trace_ocfs2_queue_orphan_scan_end(os->os_count, os->os_seqno,
1863 atomic_read(&os->os_state));
1923 return; 1864 return;
1924} 1865}
1925 1866
@@ -2003,8 +1944,7 @@ static int ocfs2_orphan_filldir(void *priv, const char *name, int name_len,
2003 if (IS_ERR(iter)) 1944 if (IS_ERR(iter))
2004 return 0; 1945 return 0;
2005 1946
2006 mlog(0, "queue orphan %llu\n", 1947 trace_ocfs2_orphan_filldir((unsigned long long)OCFS2_I(iter)->ip_blkno);
2007 (unsigned long long)OCFS2_I(iter)->ip_blkno);
2008 /* No locking is required for the next_orphan queue as there 1948 /* No locking is required for the next_orphan queue as there
2009 * is only ever a single process doing orphan recovery. */ 1949 * is only ever a single process doing orphan recovery. */
2010 OCFS2_I(iter)->ip_next_orphan = p->head; 1950 OCFS2_I(iter)->ip_next_orphan = p->head;
@@ -2120,7 +2060,7 @@ static int ocfs2_recover_orphans(struct ocfs2_super *osb,
2120 struct inode *iter; 2060 struct inode *iter;
2121 struct ocfs2_inode_info *oi; 2061 struct ocfs2_inode_info *oi;
2122 2062
2123 mlog(0, "Recover inodes from orphan dir in slot %d\n", slot); 2063 trace_ocfs2_recover_orphans(slot);
2124 2064
2125 ocfs2_mark_recovering_orphan_dir(osb, slot); 2065 ocfs2_mark_recovering_orphan_dir(osb, slot);
2126 ret = ocfs2_queue_orphans(osb, slot, &inode); 2066 ret = ocfs2_queue_orphans(osb, slot, &inode);
@@ -2133,7 +2073,8 @@ static int ocfs2_recover_orphans(struct ocfs2_super *osb,
2133 2073
2134 while (inode) { 2074 while (inode) {
2135 oi = OCFS2_I(inode); 2075 oi = OCFS2_I(inode);
2136 mlog(0, "iput orphan %llu\n", (unsigned long long)oi->ip_blkno); 2076 trace_ocfs2_recover_orphans_iput(
2077 (unsigned long long)oi->ip_blkno);
2137 2078
2138 iter = oi->ip_next_orphan; 2079 iter = oi->ip_next_orphan;
2139 2080
@@ -2171,6 +2112,7 @@ static int __ocfs2_wait_on_mount(struct ocfs2_super *osb, int quota)
2171 * MOUNTED flag, but this is set right before 2112 * MOUNTED flag, but this is set right before
2172 * dismount_volume() so we can trust it. */ 2113 * dismount_volume() so we can trust it. */
2173 if (atomic_read(&osb->vol_state) == VOLUME_DISABLED) { 2114 if (atomic_read(&osb->vol_state) == VOLUME_DISABLED) {
2115 trace_ocfs2_wait_on_mount(VOLUME_DISABLED);
2174 mlog(0, "mount error, exiting!\n"); 2116 mlog(0, "mount error, exiting!\n");
2175 return -EBUSY; 2117 return -EBUSY;
2176 } 2118 }