aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_log_recover.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/xfs_log_recover.c')
-rw-r--r--fs/xfs/xfs_log_recover.c919
1 files changed, 390 insertions, 529 deletions
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 6f3f5fa37acf..04142caedb2b 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -53,6 +53,17 @@ STATIC void xlog_recover_check_summary(xlog_t *);
53#endif 53#endif
54 54
55/* 55/*
56 * This structure is used during recovery to record the buf log items which
57 * have been canceled and should not be replayed.
58 */
59struct xfs_buf_cancel {
60 xfs_daddr_t bc_blkno;
61 uint bc_len;
62 int bc_refcount;
63 struct list_head bc_list;
64};
65
66/*
56 * Sector aligned buffer routines for buffer create/read/write/access 67 * Sector aligned buffer routines for buffer create/read/write/access
57 */ 68 */
58 69
@@ -81,7 +92,7 @@ xlog_get_bp(
81 int nbblks) 92 int nbblks)
82{ 93{
83 if (!xlog_buf_bbcount_valid(log, nbblks)) { 94 if (!xlog_buf_bbcount_valid(log, nbblks)) {
84 xlog_warn("XFS: Invalid block length (0x%x) given for buffer", 95 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
85 nbblks); 96 nbblks);
86 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp); 97 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
87 return NULL; 98 return NULL;
@@ -90,7 +101,7 @@ xlog_get_bp(
90 /* 101 /*
91 * We do log I/O in units of log sectors (a power-of-2 102 * We do log I/O in units of log sectors (a power-of-2
92 * multiple of the basic block size), so we round up the 103 * multiple of the basic block size), so we round up the
93 * requested size to acommodate the basic blocks required 104 * requested size to accommodate the basic blocks required
94 * for complete log sectors. 105 * for complete log sectors.
95 * 106 *
96 * In addition, the buffer may be used for a non-sector- 107 * In addition, the buffer may be used for a non-sector-
@@ -101,13 +112,14 @@ xlog_get_bp(
101 * an issue. Nor will this be a problem if the log I/O is 112 * an issue. Nor will this be a problem if the log I/O is
102 * done in basic blocks (sector size 1). But otherwise we 113 * done in basic blocks (sector size 1). But otherwise we
103 * extend the buffer by one extra log sector to ensure 114 * extend the buffer by one extra log sector to ensure
104 * there's space to accomodate this possiblility. 115 * there's space to accommodate this possibility.
105 */ 116 */
106 if (nbblks > 1 && log->l_sectBBsize > 1) 117 if (nbblks > 1 && log->l_sectBBsize > 1)
107 nbblks += log->l_sectBBsize; 118 nbblks += log->l_sectBBsize;
108 nbblks = round_up(nbblks, log->l_sectBBsize); 119 nbblks = round_up(nbblks, log->l_sectBBsize);
109 120
110 return xfs_buf_get_noaddr(BBTOB(nbblks), log->l_mp->m_logdev_targp); 121 return xfs_buf_get_uncached(log->l_mp->m_logdev_targp,
122 BBTOB(nbblks), 0);
111} 123}
112 124
113STATIC void 125STATIC void
@@ -148,7 +160,7 @@ xlog_bread_noalign(
148 int error; 160 int error;
149 161
150 if (!xlog_buf_bbcount_valid(log, nbblks)) { 162 if (!xlog_buf_bbcount_valid(log, nbblks)) {
151 xlog_warn("XFS: Invalid block length (0x%x) given for buffer", 163 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
152 nbblks); 164 nbblks);
153 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp); 165 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
154 return EFSCORRUPTED; 166 return EFSCORRUPTED;
@@ -167,7 +179,7 @@ xlog_bread_noalign(
167 XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp); 179 XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp);
168 180
169 xfsbdstrat(log->l_mp, bp); 181 xfsbdstrat(log->l_mp, bp);
170 error = xfs_iowait(bp); 182 error = xfs_buf_iowait(bp);
171 if (error) 183 if (error)
172 xfs_ioerror_alert("xlog_bread", log->l_mp, 184 xfs_ioerror_alert("xlog_bread", log->l_mp,
173 bp, XFS_BUF_ADDR(bp)); 185 bp, XFS_BUF_ADDR(bp));
@@ -193,6 +205,35 @@ xlog_bread(
193} 205}
194 206
195/* 207/*
208 * Read at an offset into the buffer. Returns with the buffer in it's original
209 * state regardless of the result of the read.
210 */
211STATIC int
212xlog_bread_offset(
213 xlog_t *log,
214 xfs_daddr_t blk_no, /* block to read from */
215 int nbblks, /* blocks to read */
216 xfs_buf_t *bp,
217 xfs_caddr_t offset)
218{
219 xfs_caddr_t orig_offset = XFS_BUF_PTR(bp);
220 int orig_len = bp->b_buffer_length;
221 int error, error2;
222
223 error = XFS_BUF_SET_PTR(bp, offset, BBTOB(nbblks));
224 if (error)
225 return error;
226
227 error = xlog_bread_noalign(log, blk_no, nbblks, bp);
228
229 /* must reset buffer pointer even on error */
230 error2 = XFS_BUF_SET_PTR(bp, orig_offset, orig_len);
231 if (error)
232 return error;
233 return error2;
234}
235
236/*
196 * Write out the buffer at the given block for the given number of blocks. 237 * Write out the buffer at the given block for the given number of blocks.
197 * The buffer is kept locked across the write and is returned locked. 238 * The buffer is kept locked across the write and is returned locked.
198 * This can only be used for synchronous log writes. 239 * This can only be used for synchronous log writes.
@@ -207,7 +248,7 @@ xlog_bwrite(
207 int error; 248 int error;
208 249
209 if (!xlog_buf_bbcount_valid(log, nbblks)) { 250 if (!xlog_buf_bbcount_valid(log, nbblks)) {
210 xlog_warn("XFS: Invalid block length (0x%x) given for buffer", 251 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
211 nbblks); 252 nbblks);
212 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp); 253 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
213 return EFSCORRUPTED; 254 return EFSCORRUPTED;
@@ -242,9 +283,9 @@ xlog_header_check_dump(
242 xfs_mount_t *mp, 283 xfs_mount_t *mp,
243 xlog_rec_header_t *head) 284 xlog_rec_header_t *head)
244{ 285{
245 cmn_err(CE_DEBUG, "%s: SB : uuid = %pU, fmt = %d\n", 286 xfs_debug(mp, "%s: SB : uuid = %pU, fmt = %d\n",
246 __func__, &mp->m_sb.sb_uuid, XLOG_FMT); 287 __func__, &mp->m_sb.sb_uuid, XLOG_FMT);
247 cmn_err(CE_DEBUG, " log : uuid = %pU, fmt = %d\n", 288 xfs_debug(mp, " log : uuid = %pU, fmt = %d\n",
248 &head->h_fs_uuid, be32_to_cpu(head->h_fmt)); 289 &head->h_fs_uuid, be32_to_cpu(head->h_fmt));
249} 290}
250#else 291#else
@@ -267,15 +308,15 @@ xlog_header_check_recover(
267 * a dirty log created in IRIX. 308 * a dirty log created in IRIX.
268 */ 309 */
269 if (unlikely(be32_to_cpu(head->h_fmt) != XLOG_FMT)) { 310 if (unlikely(be32_to_cpu(head->h_fmt) != XLOG_FMT)) {
270 xlog_warn( 311 xfs_warn(mp,
271 "XFS: dirty log written in incompatible format - can't recover"); 312 "dirty log written in incompatible format - can't recover");
272 xlog_header_check_dump(mp, head); 313 xlog_header_check_dump(mp, head);
273 XFS_ERROR_REPORT("xlog_header_check_recover(1)", 314 XFS_ERROR_REPORT("xlog_header_check_recover(1)",
274 XFS_ERRLEVEL_HIGH, mp); 315 XFS_ERRLEVEL_HIGH, mp);
275 return XFS_ERROR(EFSCORRUPTED); 316 return XFS_ERROR(EFSCORRUPTED);
276 } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) { 317 } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
277 xlog_warn( 318 xfs_warn(mp,
278 "XFS: dirty log entry has mismatched uuid - can't recover"); 319 "dirty log entry has mismatched uuid - can't recover");
279 xlog_header_check_dump(mp, head); 320 xlog_header_check_dump(mp, head);
280 XFS_ERROR_REPORT("xlog_header_check_recover(2)", 321 XFS_ERROR_REPORT("xlog_header_check_recover(2)",
281 XFS_ERRLEVEL_HIGH, mp); 322 XFS_ERRLEVEL_HIGH, mp);
@@ -300,9 +341,9 @@ xlog_header_check_mount(
300 * h_fs_uuid is nil, we assume this log was last mounted 341 * h_fs_uuid is nil, we assume this log was last mounted
301 * by IRIX and continue. 342 * by IRIX and continue.
302 */ 343 */
303 xlog_warn("XFS: nil uuid in log - IRIX style log"); 344 xfs_warn(mp, "nil uuid in log - IRIX style log");
304 } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) { 345 } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
305 xlog_warn("XFS: log has mismatched uuid - can't recover"); 346 xfs_warn(mp, "log has mismatched uuid - can't recover");
306 xlog_header_check_dump(mp, head); 347 xlog_header_check_dump(mp, head);
307 XFS_ERROR_REPORT("xlog_header_check_mount", 348 XFS_ERROR_REPORT("xlog_header_check_mount",
308 XFS_ERRLEVEL_HIGH, mp); 349 XFS_ERRLEVEL_HIGH, mp);
@@ -321,12 +362,13 @@ xlog_recover_iodone(
321 * this during recovery. One strike! 362 * this during recovery. One strike!
322 */ 363 */
323 xfs_ioerror_alert("xlog_recover_iodone", 364 xfs_ioerror_alert("xlog_recover_iodone",
324 bp->b_mount, bp, XFS_BUF_ADDR(bp)); 365 bp->b_target->bt_mount, bp,
325 xfs_force_shutdown(bp->b_mount, SHUTDOWN_META_IO_ERROR); 366 XFS_BUF_ADDR(bp));
367 xfs_force_shutdown(bp->b_target->bt_mount,
368 SHUTDOWN_META_IO_ERROR);
326 } 369 }
327 bp->b_mount = NULL;
328 XFS_BUF_CLR_IODONE_FUNC(bp); 370 XFS_BUF_CLR_IODONE_FUNC(bp);
329 xfs_biodone(bp); 371 xfs_buf_ioend(bp, 0);
330} 372}
331 373
332/* 374/*
@@ -477,8 +519,8 @@ xlog_find_verify_log_record(
477 for (i = (*last_blk) - 1; i >= 0; i--) { 519 for (i = (*last_blk) - 1; i >= 0; i--) {
478 if (i < start_blk) { 520 if (i < start_blk) {
479 /* valid log record not found */ 521 /* valid log record not found */
480 xlog_warn( 522 xfs_warn(log->l_mp,
481 "XFS: Log inconsistent (didn't find previous header)"); 523 "Log inconsistent (didn't find previous header)");
482 ASSERT(0); 524 ASSERT(0);
483 error = XFS_ERROR(EIO); 525 error = XFS_ERROR(EIO);
484 goto out; 526 goto out;
@@ -578,12 +620,12 @@ xlog_find_head(
578 * mkfs etc write a dummy unmount record to a fresh 620 * mkfs etc write a dummy unmount record to a fresh
579 * log so we can store the uuid in there 621 * log so we can store the uuid in there
580 */ 622 */
581 xlog_warn("XFS: totally zeroed log"); 623 xfs_warn(log->l_mp, "totally zeroed log");
582 } 624 }
583 625
584 return 0; 626 return 0;
585 } else if (error) { 627 } else if (error) {
586 xlog_warn("XFS: empty log check failed"); 628 xfs_warn(log->l_mp, "empty log check failed");
587 return error; 629 return error;
588 } 630 }
589 631
@@ -806,7 +848,7 @@ validate_head:
806 xlog_put_bp(bp); 848 xlog_put_bp(bp);
807 849
808 if (error) 850 if (error)
809 xlog_warn("XFS: failed to find log head"); 851 xfs_warn(log->l_mp, "failed to find log head");
810 return error; 852 return error;
811} 853}
812 854
@@ -899,7 +941,7 @@ xlog_find_tail(
899 } 941 }
900 } 942 }
901 if (!found) { 943 if (!found) {
902 xlog_warn("XFS: xlog_find_tail: couldn't find sync record"); 944 xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
903 ASSERT(0); 945 ASSERT(0);
904 return XFS_ERROR(EIO); 946 return XFS_ERROR(EIO);
905 } 947 }
@@ -923,12 +965,12 @@ xlog_find_tail(
923 log->l_curr_cycle = be32_to_cpu(rhead->h_cycle); 965 log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
924 if (found == 2) 966 if (found == 2)
925 log->l_curr_cycle++; 967 log->l_curr_cycle++;
926 log->l_tail_lsn = be64_to_cpu(rhead->h_tail_lsn); 968 atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
927 log->l_last_sync_lsn = be64_to_cpu(rhead->h_lsn); 969 atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
928 log->l_grant_reserve_cycle = log->l_curr_cycle; 970 xlog_assign_grant_head(&log->l_grant_reserve_head, log->l_curr_cycle,
929 log->l_grant_reserve_bytes = BBTOB(log->l_curr_block); 971 BBTOB(log->l_curr_block));
930 log->l_grant_write_cycle = log->l_curr_cycle; 972 xlog_assign_grant_head(&log->l_grant_write_head, log->l_curr_cycle,
931 log->l_grant_write_bytes = BBTOB(log->l_curr_block); 973 BBTOB(log->l_curr_block));
932 974
933 /* 975 /*
934 * Look for unmount record. If we find it, then we know there 976 * Look for unmount record. If we find it, then we know there
@@ -958,7 +1000,7 @@ xlog_find_tail(
958 } 1000 }
959 after_umount_blk = (i + hblks + (int) 1001 after_umount_blk = (i + hblks + (int)
960 BTOBB(be32_to_cpu(rhead->h_len))) % log->l_logBBsize; 1002 BTOBB(be32_to_cpu(rhead->h_len))) % log->l_logBBsize;
961 tail_lsn = log->l_tail_lsn; 1003 tail_lsn = atomic64_read(&log->l_tail_lsn);
962 if (*head_blk == after_umount_blk && 1004 if (*head_blk == after_umount_blk &&
963 be32_to_cpu(rhead->h_num_logops) == 1) { 1005 be32_to_cpu(rhead->h_num_logops) == 1) {
964 umount_data_blk = (i + hblks) % log->l_logBBsize; 1006 umount_data_blk = (i + hblks) % log->l_logBBsize;
@@ -973,12 +1015,10 @@ xlog_find_tail(
973 * log records will point recovery to after the 1015 * log records will point recovery to after the
974 * current unmount record. 1016 * current unmount record.
975 */ 1017 */
976 log->l_tail_lsn = 1018 xlog_assign_atomic_lsn(&log->l_tail_lsn,
977 xlog_assign_lsn(log->l_curr_cycle, 1019 log->l_curr_cycle, after_umount_blk);
978 after_umount_blk); 1020 xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
979 log->l_last_sync_lsn = 1021 log->l_curr_cycle, after_umount_blk);
980 xlog_assign_lsn(log->l_curr_cycle,
981 after_umount_blk);
982 *tail_blk = after_umount_blk; 1022 *tail_blk = after_umount_blk;
983 1023
984 /* 1024 /*
@@ -1017,7 +1057,7 @@ done:
1017 xlog_put_bp(bp); 1057 xlog_put_bp(bp);
1018 1058
1019 if (error) 1059 if (error)
1020 xlog_warn("XFS: failed to locate log tail"); 1060 xfs_warn(log->l_mp, "failed to locate log tail");
1021 return error; 1061 return error;
1022} 1062}
1023 1063
@@ -1081,7 +1121,8 @@ xlog_find_zeroed(
1081 * the first block must be 1. If it's not, maybe we're 1121 * the first block must be 1. If it's not, maybe we're
1082 * not looking at a log... Bail out. 1122 * not looking at a log... Bail out.
1083 */ 1123 */
1084 xlog_warn("XFS: Log inconsistent or not a log (last==0, first!=1)"); 1124 xfs_warn(log->l_mp,
1125 "Log inconsistent or not a log (last==0, first!=1)");
1085 return XFS_ERROR(EINVAL); 1126 return XFS_ERROR(EINVAL);
1086 } 1127 }
1087 1128
@@ -1217,20 +1258,12 @@ xlog_write_log_records(
1217 */ 1258 */
1218 ealign = round_down(end_block, sectbb); 1259 ealign = round_down(end_block, sectbb);
1219 if (j == 0 && (start_block + endcount > ealign)) { 1260 if (j == 0 && (start_block + endcount > ealign)) {
1220 offset = XFS_BUF_PTR(bp); 1261 offset = XFS_BUF_PTR(bp) + BBTOB(ealign - start_block);
1221 balign = BBTOB(ealign - start_block); 1262 error = xlog_bread_offset(log, ealign, sectbb,
1222 error = XFS_BUF_SET_PTR(bp, offset + balign, 1263 bp, offset);
1223 BBTOB(sectbb));
1224 if (error)
1225 break;
1226
1227 error = xlog_bread_noalign(log, ealign, sectbb, bp);
1228 if (error) 1264 if (error)
1229 break; 1265 break;
1230 1266
1231 error = XFS_BUF_SET_PTR(bp, offset, bufblks);
1232 if (error)
1233 break;
1234 } 1267 }
1235 1268
1236 offset = xlog_align(log, start_block, endcount, bp); 1269 offset = xlog_align(log, start_block, endcount, bp);
@@ -1495,8 +1528,8 @@ xlog_recover_add_to_trans(
1495 if (list_empty(&trans->r_itemq)) { 1528 if (list_empty(&trans->r_itemq)) {
1496 /* we need to catch log corruptions here */ 1529 /* we need to catch log corruptions here */
1497 if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) { 1530 if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
1498 xlog_warn("XFS: xlog_recover_add_to_trans: " 1531 xfs_warn(log->l_mp, "%s: bad header magic number",
1499 "bad header magic number"); 1532 __func__);
1500 ASSERT(0); 1533 ASSERT(0);
1501 return XFS_ERROR(EIO); 1534 return XFS_ERROR(EIO);
1502 } 1535 }
@@ -1523,8 +1556,8 @@ xlog_recover_add_to_trans(
1523 if (item->ri_total == 0) { /* first region to be added */ 1556 if (item->ri_total == 0) { /* first region to be added */
1524 if (in_f->ilf_size == 0 || 1557 if (in_f->ilf_size == 0 ||
1525 in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) { 1558 in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) {
1526 xlog_warn( 1559 xfs_warn(log->l_mp,
1527 "XFS: bad number of regions (%d) in inode log format", 1560 "bad number of regions (%d) in inode log format",
1528 in_f->ilf_size); 1561 in_f->ilf_size);
1529 ASSERT(0); 1562 ASSERT(0);
1530 return XFS_ERROR(EIO); 1563 return XFS_ERROR(EIO);
@@ -1581,8 +1614,9 @@ xlog_recover_reorder_trans(
1581 list_move_tail(&item->ri_list, &trans->r_itemq); 1614 list_move_tail(&item->ri_list, &trans->r_itemq);
1582 break; 1615 break;
1583 default: 1616 default:
1584 xlog_warn( 1617 xfs_warn(log->l_mp,
1585 "XFS: xlog_recover_reorder_trans: unrecognized type of log operation"); 1618 "%s: unrecognized type of log operation",
1619 __func__);
1586 ASSERT(0); 1620 ASSERT(0);
1587 return XFS_ERROR(EIO); 1621 return XFS_ERROR(EIO);
1588 } 1622 }
@@ -1603,82 +1637,45 @@ xlog_recover_reorder_trans(
1603 * record in the table to tell us how many times we expect to see this 1637 * record in the table to tell us how many times we expect to see this
1604 * record during the second pass. 1638 * record during the second pass.
1605 */ 1639 */
1606STATIC void 1640STATIC int
1607xlog_recover_do_buffer_pass1( 1641xlog_recover_buffer_pass1(
1608 xlog_t *log, 1642 struct log *log,
1609 xfs_buf_log_format_t *buf_f) 1643 xlog_recover_item_t *item)
1610{ 1644{
1611 xfs_buf_cancel_t *bcp; 1645 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
1612 xfs_buf_cancel_t *nextp; 1646 struct list_head *bucket;
1613 xfs_buf_cancel_t *prevp; 1647 struct xfs_buf_cancel *bcp;
1614 xfs_buf_cancel_t **bucket;
1615 xfs_daddr_t blkno = 0;
1616 uint len = 0;
1617 ushort flags = 0;
1618
1619 switch (buf_f->blf_type) {
1620 case XFS_LI_BUF:
1621 blkno = buf_f->blf_blkno;
1622 len = buf_f->blf_len;
1623 flags = buf_f->blf_flags;
1624 break;
1625 }
1626 1648
1627 /* 1649 /*
1628 * If this isn't a cancel buffer item, then just return. 1650 * If this isn't a cancel buffer item, then just return.
1629 */ 1651 */
1630 if (!(flags & XFS_BLF_CANCEL)) { 1652 if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) {
1631 trace_xfs_log_recover_buf_not_cancel(log, buf_f); 1653 trace_xfs_log_recover_buf_not_cancel(log, buf_f);
1632 return; 1654 return 0;
1633 }
1634
1635 /*
1636 * Insert an xfs_buf_cancel record into the hash table of
1637 * them. If there is already an identical record, bump
1638 * its reference count.
1639 */
1640 bucket = &log->l_buf_cancel_table[(__uint64_t)blkno %
1641 XLOG_BC_TABLE_SIZE];
1642 /*
1643 * If the hash bucket is empty then just insert a new record into
1644 * the bucket.
1645 */
1646 if (*bucket == NULL) {
1647 bcp = (xfs_buf_cancel_t *)kmem_alloc(sizeof(xfs_buf_cancel_t),
1648 KM_SLEEP);
1649 bcp->bc_blkno = blkno;
1650 bcp->bc_len = len;
1651 bcp->bc_refcount = 1;
1652 bcp->bc_next = NULL;
1653 *bucket = bcp;
1654 return;
1655 } 1655 }
1656 1656
1657 /* 1657 /*
1658 * The hash bucket is not empty, so search for duplicates of our 1658 * Insert an xfs_buf_cancel record into the hash table of them.
1659 * record. If we find one them just bump its refcount. If not 1659 * If there is already an identical record, bump its reference count.
1660 * then add us at the end of the list.
1661 */ 1660 */
1662 prevp = NULL; 1661 bucket = XLOG_BUF_CANCEL_BUCKET(log, buf_f->blf_blkno);
1663 nextp = *bucket; 1662 list_for_each_entry(bcp, bucket, bc_list) {
1664 while (nextp != NULL) { 1663 if (bcp->bc_blkno == buf_f->blf_blkno &&
1665 if (nextp->bc_blkno == blkno && nextp->bc_len == len) { 1664 bcp->bc_len == buf_f->blf_len) {
1666 nextp->bc_refcount++; 1665 bcp->bc_refcount++;
1667 trace_xfs_log_recover_buf_cancel_ref_inc(log, buf_f); 1666 trace_xfs_log_recover_buf_cancel_ref_inc(log, buf_f);
1668 return; 1667 return 0;
1669 } 1668 }
1670 prevp = nextp; 1669 }
1671 nextp = nextp->bc_next; 1670
1672 } 1671 bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), KM_SLEEP);
1673 ASSERT(prevp != NULL); 1672 bcp->bc_blkno = buf_f->blf_blkno;
1674 bcp = (xfs_buf_cancel_t *)kmem_alloc(sizeof(xfs_buf_cancel_t), 1673 bcp->bc_len = buf_f->blf_len;
1675 KM_SLEEP);
1676 bcp->bc_blkno = blkno;
1677 bcp->bc_len = len;
1678 bcp->bc_refcount = 1; 1674 bcp->bc_refcount = 1;
1679 bcp->bc_next = NULL; 1675 list_add_tail(&bcp->bc_list, bucket);
1680 prevp->bc_next = bcp; 1676
1681 trace_xfs_log_recover_buf_cancel_add(log, buf_f); 1677 trace_xfs_log_recover_buf_cancel_add(log, buf_f);
1678 return 0;
1682} 1679}
1683 1680
1684/* 1681/*
@@ -1696,14 +1693,13 @@ xlog_recover_do_buffer_pass1(
1696 */ 1693 */
1697STATIC int 1694STATIC int
1698xlog_check_buffer_cancelled( 1695xlog_check_buffer_cancelled(
1699 xlog_t *log, 1696 struct log *log,
1700 xfs_daddr_t blkno, 1697 xfs_daddr_t blkno,
1701 uint len, 1698 uint len,
1702 ushort flags) 1699 ushort flags)
1703{ 1700{
1704 xfs_buf_cancel_t *bcp; 1701 struct list_head *bucket;
1705 xfs_buf_cancel_t *prevp; 1702 struct xfs_buf_cancel *bcp;
1706 xfs_buf_cancel_t **bucket;
1707 1703
1708 if (log->l_buf_cancel_table == NULL) { 1704 if (log->l_buf_cancel_table == NULL) {
1709 /* 1705 /*
@@ -1714,128 +1710,70 @@ xlog_check_buffer_cancelled(
1714 return 0; 1710 return 0;
1715 } 1711 }
1716 1712
1717 bucket = &log->l_buf_cancel_table[(__uint64_t)blkno %
1718 XLOG_BC_TABLE_SIZE];
1719 bcp = *bucket;
1720 if (bcp == NULL) {
1721 /*
1722 * There is no corresponding entry in the table built
1723 * in pass one, so this buffer has not been cancelled.
1724 */
1725 ASSERT(!(flags & XFS_BLF_CANCEL));
1726 return 0;
1727 }
1728
1729 /* 1713 /*
1730 * Search for an entry in the buffer cancel table that 1714 * Search for an entry in the cancel table that matches our buffer.
1731 * matches our buffer.
1732 */ 1715 */
1733 prevp = NULL; 1716 bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno);
1734 while (bcp != NULL) { 1717 list_for_each_entry(bcp, bucket, bc_list) {
1735 if (bcp->bc_blkno == blkno && bcp->bc_len == len) { 1718 if (bcp->bc_blkno == blkno && bcp->bc_len == len)
1736 /* 1719 goto found;
1737 * We've go a match, so return 1 so that the
1738 * recovery of this buffer is cancelled.
1739 * If this buffer is actually a buffer cancel
1740 * log item, then decrement the refcount on the
1741 * one in the table and remove it if this is the
1742 * last reference.
1743 */
1744 if (flags & XFS_BLF_CANCEL) {
1745 bcp->bc_refcount--;
1746 if (bcp->bc_refcount == 0) {
1747 if (prevp == NULL) {
1748 *bucket = bcp->bc_next;
1749 } else {
1750 prevp->bc_next = bcp->bc_next;
1751 }
1752 kmem_free(bcp);
1753 }
1754 }
1755 return 1;
1756 }
1757 prevp = bcp;
1758 bcp = bcp->bc_next;
1759 } 1720 }
1721
1760 /* 1722 /*
1761 * We didn't find a corresponding entry in the table, so 1723 * We didn't find a corresponding entry in the table, so return 0 so
1762 * return 0 so that the buffer is NOT cancelled. 1724 * that the buffer is NOT cancelled.
1763 */ 1725 */
1764 ASSERT(!(flags & XFS_BLF_CANCEL)); 1726 ASSERT(!(flags & XFS_BLF_CANCEL));
1765 return 0; 1727 return 0;
1766}
1767 1728
1768STATIC int 1729found:
1769xlog_recover_do_buffer_pass2( 1730 /*
1770 xlog_t *log, 1731 * We've go a match, so return 1 so that the recovery of this buffer
1771 xfs_buf_log_format_t *buf_f) 1732 * is cancelled. If this buffer is actually a buffer cancel log
1772{ 1733 * item, then decrement the refcount on the one in the table and
1773 xfs_daddr_t blkno = 0; 1734 * remove it if this is the last reference.
1774 ushort flags = 0; 1735 */
1775 uint len = 0; 1736 if (flags & XFS_BLF_CANCEL) {
1776 1737 if (--bcp->bc_refcount == 0) {
1777 switch (buf_f->blf_type) { 1738 list_del(&bcp->bc_list);
1778 case XFS_LI_BUF: 1739 kmem_free(bcp);
1779 blkno = buf_f->blf_blkno; 1740 }
1780 flags = buf_f->blf_flags;
1781 len = buf_f->blf_len;
1782 break;
1783 } 1741 }
1784 1742 return 1;
1785 return xlog_check_buffer_cancelled(log, blkno, len, flags);
1786} 1743}
1787 1744
1788/* 1745/*
1789 * Perform recovery for a buffer full of inodes. In these buffers, 1746 * Perform recovery for a buffer full of inodes. In these buffers, the only
1790 * the only data which should be recovered is that which corresponds 1747 * data which should be recovered is that which corresponds to the
1791 * to the di_next_unlinked pointers in the on disk inode structures. 1748 * di_next_unlinked pointers in the on disk inode structures. The rest of the
1792 * The rest of the data for the inodes is always logged through the 1749 * data for the inodes is always logged through the inodes themselves rather
1793 * inodes themselves rather than the inode buffer and is recovered 1750 * than the inode buffer and is recovered in xlog_recover_inode_pass2().
1794 * in xlog_recover_do_inode_trans().
1795 * 1751 *
1796 * The only time when buffers full of inodes are fully recovered is 1752 * The only time when buffers full of inodes are fully recovered is when the
1797 * when the buffer is full of newly allocated inodes. In this case 1753 * buffer is full of newly allocated inodes. In this case the buffer will
1798 * the buffer will not be marked as an inode buffer and so will be 1754 * not be marked as an inode buffer and so will be sent to
1799 * sent to xlog_recover_do_reg_buffer() below during recovery. 1755 * xlog_recover_do_reg_buffer() below during recovery.
1800 */ 1756 */
1801STATIC int 1757STATIC int
1802xlog_recover_do_inode_buffer( 1758xlog_recover_do_inode_buffer(
1803 xfs_mount_t *mp, 1759 struct xfs_mount *mp,
1804 xlog_recover_item_t *item, 1760 xlog_recover_item_t *item,
1805 xfs_buf_t *bp, 1761 struct xfs_buf *bp,
1806 xfs_buf_log_format_t *buf_f) 1762 xfs_buf_log_format_t *buf_f)
1807{ 1763{
1808 int i; 1764 int i;
1809 int item_index; 1765 int item_index = 0;
1810 int bit; 1766 int bit = 0;
1811 int nbits; 1767 int nbits = 0;
1812 int reg_buf_offset; 1768 int reg_buf_offset = 0;
1813 int reg_buf_bytes; 1769 int reg_buf_bytes = 0;
1814 int next_unlinked_offset; 1770 int next_unlinked_offset;
1815 int inodes_per_buf; 1771 int inodes_per_buf;
1816 xfs_agino_t *logged_nextp; 1772 xfs_agino_t *logged_nextp;
1817 xfs_agino_t *buffer_nextp; 1773 xfs_agino_t *buffer_nextp;
1818 unsigned int *data_map = NULL;
1819 unsigned int map_size = 0;
1820 1774
1821 trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f); 1775 trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f);
1822 1776
1823 switch (buf_f->blf_type) {
1824 case XFS_LI_BUF:
1825 data_map = buf_f->blf_data_map;
1826 map_size = buf_f->blf_map_size;
1827 break;
1828 }
1829 /*
1830 * Set the variables corresponding to the current region to
1831 * 0 so that we'll initialize them on the first pass through
1832 * the loop.
1833 */
1834 reg_buf_offset = 0;
1835 reg_buf_bytes = 0;
1836 bit = 0;
1837 nbits = 0;
1838 item_index = 0;
1839 inodes_per_buf = XFS_BUF_COUNT(bp) >> mp->m_sb.sb_inodelog; 1777 inodes_per_buf = XFS_BUF_COUNT(bp) >> mp->m_sb.sb_inodelog;
1840 for (i = 0; i < inodes_per_buf; i++) { 1778 for (i = 0; i < inodes_per_buf; i++) {
1841 next_unlinked_offset = (i * mp->m_sb.sb_inodesize) + 1779 next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
@@ -1850,18 +1788,18 @@ xlog_recover_do_inode_buffer(
1850 * the current di_next_unlinked field. 1788 * the current di_next_unlinked field.
1851 */ 1789 */
1852 bit += nbits; 1790 bit += nbits;
1853 bit = xfs_next_bit(data_map, map_size, bit); 1791 bit = xfs_next_bit(buf_f->blf_data_map,
1792 buf_f->blf_map_size, bit);
1854 1793
1855 /* 1794 /*
1856 * If there are no more logged regions in the 1795 * If there are no more logged regions in the
1857 * buffer, then we're done. 1796 * buffer, then we're done.
1858 */ 1797 */
1859 if (bit == -1) { 1798 if (bit == -1)
1860 return 0; 1799 return 0;
1861 }
1862 1800
1863 nbits = xfs_contig_bits(data_map, map_size, 1801 nbits = xfs_contig_bits(buf_f->blf_data_map,
1864 bit); 1802 buf_f->blf_map_size, bit);
1865 ASSERT(nbits > 0); 1803 ASSERT(nbits > 0);
1866 reg_buf_offset = bit << XFS_BLF_SHIFT; 1804 reg_buf_offset = bit << XFS_BLF_SHIFT;
1867 reg_buf_bytes = nbits << XFS_BLF_SHIFT; 1805 reg_buf_bytes = nbits << XFS_BLF_SHIFT;
@@ -1873,9 +1811,8 @@ xlog_recover_do_inode_buffer(
1873 * di_next_unlinked field, then move on to the next 1811 * di_next_unlinked field, then move on to the next
1874 * di_next_unlinked field. 1812 * di_next_unlinked field.
1875 */ 1813 */
1876 if (next_unlinked_offset < reg_buf_offset) { 1814 if (next_unlinked_offset < reg_buf_offset)
1877 continue; 1815 continue;
1878 }
1879 1816
1880 ASSERT(item->ri_buf[item_index].i_addr != NULL); 1817 ASSERT(item->ri_buf[item_index].i_addr != NULL);
1881 ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0); 1818 ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0);
@@ -1889,8 +1826,9 @@ xlog_recover_do_inode_buffer(
1889 logged_nextp = item->ri_buf[item_index].i_addr + 1826 logged_nextp = item->ri_buf[item_index].i_addr +
1890 next_unlinked_offset - reg_buf_offset; 1827 next_unlinked_offset - reg_buf_offset;
1891 if (unlikely(*logged_nextp == 0)) { 1828 if (unlikely(*logged_nextp == 0)) {
1892 xfs_fs_cmn_err(CE_ALERT, mp, 1829 xfs_alert(mp,
1893 "bad inode buffer log record (ptr = 0x%p, bp = 0x%p). XFS trying to replay bad (0) inode di_next_unlinked field", 1830 "Bad inode buffer log record (ptr = 0x%p, bp = 0x%p). "
1831 "Trying to replay bad (0) inode di_next_unlinked field.",
1894 item, bp); 1832 item, bp);
1895 XFS_ERROR_REPORT("xlog_recover_do_inode_buf", 1833 XFS_ERROR_REPORT("xlog_recover_do_inode_buf",
1896 XFS_ERRLEVEL_LOW, mp); 1834 XFS_ERRLEVEL_LOW, mp);
@@ -1911,36 +1849,29 @@ xlog_recover_do_inode_buffer(
1911 * given buffer. The bitmap in the buf log format structure indicates 1849 * given buffer. The bitmap in the buf log format structure indicates
1912 * where to place the logged data. 1850 * where to place the logged data.
1913 */ 1851 */
1914/*ARGSUSED*/
1915STATIC void 1852STATIC void
1916xlog_recover_do_reg_buffer( 1853xlog_recover_do_reg_buffer(
1917 struct xfs_mount *mp, 1854 struct xfs_mount *mp,
1918 xlog_recover_item_t *item, 1855 xlog_recover_item_t *item,
1919 xfs_buf_t *bp, 1856 struct xfs_buf *bp,
1920 xfs_buf_log_format_t *buf_f) 1857 xfs_buf_log_format_t *buf_f)
1921{ 1858{
1922 int i; 1859 int i;
1923 int bit; 1860 int bit;
1924 int nbits; 1861 int nbits;
1925 unsigned int *data_map = NULL;
1926 unsigned int map_size = 0;
1927 int error; 1862 int error;
1928 1863
1929 trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f); 1864 trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f);
1930 1865
1931 switch (buf_f->blf_type) {
1932 case XFS_LI_BUF:
1933 data_map = buf_f->blf_data_map;
1934 map_size = buf_f->blf_map_size;
1935 break;
1936 }
1937 bit = 0; 1866 bit = 0;
1938 i = 1; /* 0 is the buf format structure */ 1867 i = 1; /* 0 is the buf format structure */
1939 while (1) { 1868 while (1) {
1940 bit = xfs_next_bit(data_map, map_size, bit); 1869 bit = xfs_next_bit(buf_f->blf_data_map,
1870 buf_f->blf_map_size, bit);
1941 if (bit == -1) 1871 if (bit == -1)
1942 break; 1872 break;
1943 nbits = xfs_contig_bits(data_map, map_size, bit); 1873 nbits = xfs_contig_bits(buf_f->blf_data_map,
1874 buf_f->blf_map_size, bit);
1944 ASSERT(nbits > 0); 1875 ASSERT(nbits > 0);
1945 ASSERT(item->ri_buf[i].i_addr != NULL); 1876 ASSERT(item->ri_buf[i].i_addr != NULL);
1946 ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0); 1877 ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0);
@@ -1956,17 +1887,17 @@ xlog_recover_do_reg_buffer(
1956 if (buf_f->blf_flags & 1887 if (buf_f->blf_flags &
1957 (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) { 1888 (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
1958 if (item->ri_buf[i].i_addr == NULL) { 1889 if (item->ri_buf[i].i_addr == NULL) {
1959 cmn_err(CE_ALERT, 1890 xfs_alert(mp,
1960 "XFS: NULL dquot in %s.", __func__); 1891 "XFS: NULL dquot in %s.", __func__);
1961 goto next; 1892 goto next;
1962 } 1893 }
1963 if (item->ri_buf[i].i_len < sizeof(xfs_disk_dquot_t)) { 1894 if (item->ri_buf[i].i_len < sizeof(xfs_disk_dquot_t)) {
1964 cmn_err(CE_ALERT, 1895 xfs_alert(mp,
1965 "XFS: dquot too small (%d) in %s.", 1896 "XFS: dquot too small (%d) in %s.",
1966 item->ri_buf[i].i_len, __func__); 1897 item->ri_buf[i].i_len, __func__);
1967 goto next; 1898 goto next;
1968 } 1899 }
1969 error = xfs_qm_dqcheck(item->ri_buf[i].i_addr, 1900 error = xfs_qm_dqcheck(mp, item->ri_buf[i].i_addr,
1970 -1, 0, XFS_QMOPT_DOWARN, 1901 -1, 0, XFS_QMOPT_DOWARN,
1971 "dquot_buf_recover"); 1902 "dquot_buf_recover");
1972 if (error) 1903 if (error)
@@ -1991,6 +1922,7 @@ xlog_recover_do_reg_buffer(
1991 */ 1922 */
1992int 1923int
1993xfs_qm_dqcheck( 1924xfs_qm_dqcheck(
1925 struct xfs_mount *mp,
1994 xfs_disk_dquot_t *ddq, 1926 xfs_disk_dquot_t *ddq,
1995 xfs_dqid_t id, 1927 xfs_dqid_t id,
1996 uint type, /* used only when IO_dorepair is true */ 1928 uint type, /* used only when IO_dorepair is true */
@@ -2017,14 +1949,14 @@ xfs_qm_dqcheck(
2017 */ 1949 */
2018 if (be16_to_cpu(ddq->d_magic) != XFS_DQUOT_MAGIC) { 1950 if (be16_to_cpu(ddq->d_magic) != XFS_DQUOT_MAGIC) {
2019 if (flags & XFS_QMOPT_DOWARN) 1951 if (flags & XFS_QMOPT_DOWARN)
2020 cmn_err(CE_ALERT, 1952 xfs_alert(mp,
2021 "%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x", 1953 "%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x",
2022 str, id, be16_to_cpu(ddq->d_magic), XFS_DQUOT_MAGIC); 1954 str, id, be16_to_cpu(ddq->d_magic), XFS_DQUOT_MAGIC);
2023 errs++; 1955 errs++;
2024 } 1956 }
2025 if (ddq->d_version != XFS_DQUOT_VERSION) { 1957 if (ddq->d_version != XFS_DQUOT_VERSION) {
2026 if (flags & XFS_QMOPT_DOWARN) 1958 if (flags & XFS_QMOPT_DOWARN)
2027 cmn_err(CE_ALERT, 1959 xfs_alert(mp,
2028 "%s : XFS dquot ID 0x%x, version 0x%x != 0x%x", 1960 "%s : XFS dquot ID 0x%x, version 0x%x != 0x%x",
2029 str, id, ddq->d_version, XFS_DQUOT_VERSION); 1961 str, id, ddq->d_version, XFS_DQUOT_VERSION);
2030 errs++; 1962 errs++;
@@ -2034,7 +1966,7 @@ xfs_qm_dqcheck(
2034 ddq->d_flags != XFS_DQ_PROJ && 1966 ddq->d_flags != XFS_DQ_PROJ &&
2035 ddq->d_flags != XFS_DQ_GROUP) { 1967 ddq->d_flags != XFS_DQ_GROUP) {
2036 if (flags & XFS_QMOPT_DOWARN) 1968 if (flags & XFS_QMOPT_DOWARN)
2037 cmn_err(CE_ALERT, 1969 xfs_alert(mp,
2038 "%s : XFS dquot ID 0x%x, unknown flags 0x%x", 1970 "%s : XFS dquot ID 0x%x, unknown flags 0x%x",
2039 str, id, ddq->d_flags); 1971 str, id, ddq->d_flags);
2040 errs++; 1972 errs++;
@@ -2042,7 +1974,7 @@ xfs_qm_dqcheck(
2042 1974
2043 if (id != -1 && id != be32_to_cpu(ddq->d_id)) { 1975 if (id != -1 && id != be32_to_cpu(ddq->d_id)) {
2044 if (flags & XFS_QMOPT_DOWARN) 1976 if (flags & XFS_QMOPT_DOWARN)
2045 cmn_err(CE_ALERT, 1977 xfs_alert(mp,
2046 "%s : ondisk-dquot 0x%p, ID mismatch: " 1978 "%s : ondisk-dquot 0x%p, ID mismatch: "
2047 "0x%x expected, found id 0x%x", 1979 "0x%x expected, found id 0x%x",
2048 str, ddq, id, be32_to_cpu(ddq->d_id)); 1980 str, ddq, id, be32_to_cpu(ddq->d_id));
@@ -2055,9 +1987,8 @@ xfs_qm_dqcheck(
2055 be64_to_cpu(ddq->d_blk_softlimit)) { 1987 be64_to_cpu(ddq->d_blk_softlimit)) {
2056 if (!ddq->d_btimer) { 1988 if (!ddq->d_btimer) {
2057 if (flags & XFS_QMOPT_DOWARN) 1989 if (flags & XFS_QMOPT_DOWARN)
2058 cmn_err(CE_ALERT, 1990 xfs_alert(mp,
2059 "%s : Dquot ID 0x%x (0x%p) " 1991 "%s : Dquot ID 0x%x (0x%p) BLK TIMER NOT STARTED",
2060 "BLK TIMER NOT STARTED",
2061 str, (int)be32_to_cpu(ddq->d_id), ddq); 1992 str, (int)be32_to_cpu(ddq->d_id), ddq);
2062 errs++; 1993 errs++;
2063 } 1994 }
@@ -2067,9 +1998,8 @@ xfs_qm_dqcheck(
2067 be64_to_cpu(ddq->d_ino_softlimit)) { 1998 be64_to_cpu(ddq->d_ino_softlimit)) {
2068 if (!ddq->d_itimer) { 1999 if (!ddq->d_itimer) {
2069 if (flags & XFS_QMOPT_DOWARN) 2000 if (flags & XFS_QMOPT_DOWARN)
2070 cmn_err(CE_ALERT, 2001 xfs_alert(mp,
2071 "%s : Dquot ID 0x%x (0x%p) " 2002 "%s : Dquot ID 0x%x (0x%p) INODE TIMER NOT STARTED",
2072 "INODE TIMER NOT STARTED",
2073 str, (int)be32_to_cpu(ddq->d_id), ddq); 2003 str, (int)be32_to_cpu(ddq->d_id), ddq);
2074 errs++; 2004 errs++;
2075 } 2005 }
@@ -2079,9 +2009,8 @@ xfs_qm_dqcheck(
2079 be64_to_cpu(ddq->d_rtb_softlimit)) { 2009 be64_to_cpu(ddq->d_rtb_softlimit)) {
2080 if (!ddq->d_rtbtimer) { 2010 if (!ddq->d_rtbtimer) {
2081 if (flags & XFS_QMOPT_DOWARN) 2011 if (flags & XFS_QMOPT_DOWARN)
2082 cmn_err(CE_ALERT, 2012 xfs_alert(mp,
2083 "%s : Dquot ID 0x%x (0x%p) " 2013 "%s : Dquot ID 0x%x (0x%p) RTBLK TIMER NOT STARTED",
2084 "RTBLK TIMER NOT STARTED",
2085 str, (int)be32_to_cpu(ddq->d_id), ddq); 2014 str, (int)be32_to_cpu(ddq->d_id), ddq);
2086 errs++; 2015 errs++;
2087 } 2016 }
@@ -2092,7 +2021,7 @@ xfs_qm_dqcheck(
2092 return errs; 2021 return errs;
2093 2022
2094 if (flags & XFS_QMOPT_DOWARN) 2023 if (flags & XFS_QMOPT_DOWARN)
2095 cmn_err(CE_NOTE, "Re-initializing dquot ID 0x%x", id); 2024 xfs_notice(mp, "Re-initializing dquot ID 0x%x", id);
2096 2025
2097 /* 2026 /*
2098 * Typically, a repair is only requested by quotacheck. 2027 * Typically, a repair is only requested by quotacheck.
@@ -2174,77 +2103,46 @@ xlog_recover_do_dquot_buffer(
2174 * for more details on the implementation of the table of cancel records. 2103 * for more details on the implementation of the table of cancel records.
2175 */ 2104 */
2176STATIC int 2105STATIC int
2177xlog_recover_do_buffer_trans( 2106xlog_recover_buffer_pass2(
2178 xlog_t *log, 2107 xlog_t *log,
2179 xlog_recover_item_t *item, 2108 xlog_recover_item_t *item)
2180 int pass)
2181{ 2109{
2182 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr; 2110 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
2183 xfs_mount_t *mp; 2111 xfs_mount_t *mp = log->l_mp;
2184 xfs_buf_t *bp; 2112 xfs_buf_t *bp;
2185 int error; 2113 int error;
2186 int cancel;
2187 xfs_daddr_t blkno;
2188 int len;
2189 ushort flags;
2190 uint buf_flags; 2114 uint buf_flags;
2191 2115
2192 if (pass == XLOG_RECOVER_PASS1) { 2116 /*
2193 /* 2117 * In this pass we only want to recover all the buffers which have
2194 * In this pass we're only looking for buf items 2118 * not been cancelled and are not cancellation buffers themselves.
2195 * with the XFS_BLF_CANCEL bit set. 2119 */
2196 */ 2120 if (xlog_check_buffer_cancelled(log, buf_f->blf_blkno,
2197 xlog_recover_do_buffer_pass1(log, buf_f); 2121 buf_f->blf_len, buf_f->blf_flags)) {
2122 trace_xfs_log_recover_buf_cancel(log, buf_f);
2198 return 0; 2123 return 0;
2199 } else {
2200 /*
2201 * In this pass we want to recover all the buffers
2202 * which have not been cancelled and are not
2203 * cancellation buffers themselves. The routine
2204 * we call here will tell us whether or not to
2205 * continue with the replay of this buffer.
2206 */
2207 cancel = xlog_recover_do_buffer_pass2(log, buf_f);
2208 if (cancel) {
2209 trace_xfs_log_recover_buf_cancel(log, buf_f);
2210 return 0;
2211 }
2212 } 2124 }
2125
2213 trace_xfs_log_recover_buf_recover(log, buf_f); 2126 trace_xfs_log_recover_buf_recover(log, buf_f);
2214 switch (buf_f->blf_type) {
2215 case XFS_LI_BUF:
2216 blkno = buf_f->blf_blkno;
2217 len = buf_f->blf_len;
2218 flags = buf_f->blf_flags;
2219 break;
2220 default:
2221 xfs_fs_cmn_err(CE_ALERT, log->l_mp,
2222 "xfs_log_recover: unknown buffer type 0x%x, logdev %s",
2223 buf_f->blf_type, log->l_mp->m_logname ?
2224 log->l_mp->m_logname : "internal");
2225 XFS_ERROR_REPORT("xlog_recover_do_buffer_trans",
2226 XFS_ERRLEVEL_LOW, log->l_mp);
2227 return XFS_ERROR(EFSCORRUPTED);
2228 }
2229 2127
2230 mp = log->l_mp;
2231 buf_flags = XBF_LOCK; 2128 buf_flags = XBF_LOCK;
2232 if (!(flags & XFS_BLF_INODE_BUF)) 2129 if (!(buf_f->blf_flags & XFS_BLF_INODE_BUF))
2233 buf_flags |= XBF_MAPPED; 2130 buf_flags |= XBF_MAPPED;
2234 2131
2235 bp = xfs_buf_read(mp->m_ddev_targp, blkno, len, buf_flags); 2132 bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len,
2133 buf_flags);
2236 if (XFS_BUF_ISERROR(bp)) { 2134 if (XFS_BUF_ISERROR(bp)) {
2237 xfs_ioerror_alert("xlog_recover_do..(read#1)", log->l_mp, 2135 xfs_ioerror_alert("xlog_recover_do..(read#1)", mp,
2238 bp, blkno); 2136 bp, buf_f->blf_blkno);
2239 error = XFS_BUF_GETERROR(bp); 2137 error = XFS_BUF_GETERROR(bp);
2240 xfs_buf_relse(bp); 2138 xfs_buf_relse(bp);
2241 return error; 2139 return error;
2242 } 2140 }
2243 2141
2244 error = 0; 2142 error = 0;
2245 if (flags & XFS_BLF_INODE_BUF) { 2143 if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
2246 error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f); 2144 error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
2247 } else if (flags & 2145 } else if (buf_f->blf_flags &
2248 (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) { 2146 (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2249 xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f); 2147 xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
2250 } else { 2148 } else {
@@ -2275,8 +2173,7 @@ xlog_recover_do_buffer_trans(
2275 XFS_BUF_STALE(bp); 2173 XFS_BUF_STALE(bp);
2276 error = xfs_bwrite(mp, bp); 2174 error = xfs_bwrite(mp, bp);
2277 } else { 2175 } else {
2278 ASSERT(bp->b_mount == NULL || bp->b_mount == mp); 2176 ASSERT(bp->b_target->bt_mount == mp);
2279 bp->b_mount = mp;
2280 XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone); 2177 XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
2281 xfs_bdwrite(mp, bp); 2178 xfs_bdwrite(mp, bp);
2282 } 2179 }
@@ -2285,16 +2182,14 @@ xlog_recover_do_buffer_trans(
2285} 2182}
2286 2183
2287STATIC int 2184STATIC int
2288xlog_recover_do_inode_trans( 2185xlog_recover_inode_pass2(
2289 xlog_t *log, 2186 xlog_t *log,
2290 xlog_recover_item_t *item, 2187 xlog_recover_item_t *item)
2291 int pass)
2292{ 2188{
2293 xfs_inode_log_format_t *in_f; 2189 xfs_inode_log_format_t *in_f;
2294 xfs_mount_t *mp; 2190 xfs_mount_t *mp = log->l_mp;
2295 xfs_buf_t *bp; 2191 xfs_buf_t *bp;
2296 xfs_dinode_t *dip; 2192 xfs_dinode_t *dip;
2297 xfs_ino_t ino;
2298 int len; 2193 int len;
2299 xfs_caddr_t src; 2194 xfs_caddr_t src;
2300 xfs_caddr_t dest; 2195 xfs_caddr_t dest;
@@ -2304,10 +2199,6 @@ xlog_recover_do_inode_trans(
2304 xfs_icdinode_t *dicp; 2199 xfs_icdinode_t *dicp;
2305 int need_free = 0; 2200 int need_free = 0;
2306 2201
2307 if (pass == XLOG_RECOVER_PASS1) {
2308 return 0;
2309 }
2310
2311 if (item->ri_buf[0].i_len == sizeof(xfs_inode_log_format_t)) { 2202 if (item->ri_buf[0].i_len == sizeof(xfs_inode_log_format_t)) {
2312 in_f = item->ri_buf[0].i_addr; 2203 in_f = item->ri_buf[0].i_addr;
2313 } else { 2204 } else {
@@ -2317,8 +2208,6 @@ xlog_recover_do_inode_trans(
2317 if (error) 2208 if (error)
2318 goto error; 2209 goto error;
2319 } 2210 }
2320 ino = in_f->ilf_ino;
2321 mp = log->l_mp;
2322 2211
2323 /* 2212 /*
2324 * Inode buffers can be freed, look out for it, 2213 * Inode buffers can be freed, look out for it,
@@ -2351,10 +2240,10 @@ xlog_recover_do_inode_trans(
2351 */ 2240 */
2352 if (unlikely(be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC)) { 2241 if (unlikely(be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC)) {
2353 xfs_buf_relse(bp); 2242 xfs_buf_relse(bp);
2354 xfs_fs_cmn_err(CE_ALERT, mp, 2243 xfs_alert(mp,
2355 "xfs_inode_recover: Bad inode magic number, dino ptr = 0x%p, dino bp = 0x%p, ino = %Ld", 2244 "%s: Bad inode magic number, dip = 0x%p, dino bp = 0x%p, ino = %Ld",
2356 dip, bp, ino); 2245 __func__, dip, bp, in_f->ilf_ino);
2357 XFS_ERROR_REPORT("xlog_recover_do_inode_trans(1)", 2246 XFS_ERROR_REPORT("xlog_recover_inode_pass2(1)",
2358 XFS_ERRLEVEL_LOW, mp); 2247 XFS_ERRLEVEL_LOW, mp);
2359 error = EFSCORRUPTED; 2248 error = EFSCORRUPTED;
2360 goto error; 2249 goto error;
@@ -2362,10 +2251,10 @@ xlog_recover_do_inode_trans(
2362 dicp = item->ri_buf[1].i_addr; 2251 dicp = item->ri_buf[1].i_addr;
2363 if (unlikely(dicp->di_magic != XFS_DINODE_MAGIC)) { 2252 if (unlikely(dicp->di_magic != XFS_DINODE_MAGIC)) {
2364 xfs_buf_relse(bp); 2253 xfs_buf_relse(bp);
2365 xfs_fs_cmn_err(CE_ALERT, mp, 2254 xfs_alert(mp,
2366 "xfs_inode_recover: Bad inode log record, rec ptr 0x%p, ino %Ld", 2255 "%s: Bad inode log record, rec ptr 0x%p, ino %Ld",
2367 item, ino); 2256 __func__, item, in_f->ilf_ino);
2368 XFS_ERROR_REPORT("xlog_recover_do_inode_trans(2)", 2257 XFS_ERROR_REPORT("xlog_recover_inode_pass2(2)",
2369 XFS_ERRLEVEL_LOW, mp); 2258 XFS_ERRLEVEL_LOW, mp);
2370 error = EFSCORRUPTED; 2259 error = EFSCORRUPTED;
2371 goto error; 2260 goto error;
@@ -2393,12 +2282,13 @@ xlog_recover_do_inode_trans(
2393 if (unlikely((dicp->di_mode & S_IFMT) == S_IFREG)) { 2282 if (unlikely((dicp->di_mode & S_IFMT) == S_IFREG)) {
2394 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) && 2283 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2395 (dicp->di_format != XFS_DINODE_FMT_BTREE)) { 2284 (dicp->di_format != XFS_DINODE_FMT_BTREE)) {
2396 XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(3)", 2285 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)",
2397 XFS_ERRLEVEL_LOW, mp, dicp); 2286 XFS_ERRLEVEL_LOW, mp, dicp);
2398 xfs_buf_relse(bp); 2287 xfs_buf_relse(bp);
2399 xfs_fs_cmn_err(CE_ALERT, mp, 2288 xfs_alert(mp,
2400 "xfs_inode_recover: Bad regular inode log record, rec ptr 0x%p, ino ptr = 0x%p, ino bp = 0x%p, ino %Ld", 2289 "%s: Bad regular inode log record, rec ptr 0x%p, "
2401 item, dip, bp, ino); 2290 "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2291 __func__, item, dip, bp, in_f->ilf_ino);
2402 error = EFSCORRUPTED; 2292 error = EFSCORRUPTED;
2403 goto error; 2293 goto error;
2404 } 2294 }
@@ -2406,45 +2296,48 @@ xlog_recover_do_inode_trans(
2406 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) && 2296 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2407 (dicp->di_format != XFS_DINODE_FMT_BTREE) && 2297 (dicp->di_format != XFS_DINODE_FMT_BTREE) &&
2408 (dicp->di_format != XFS_DINODE_FMT_LOCAL)) { 2298 (dicp->di_format != XFS_DINODE_FMT_LOCAL)) {
2409 XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(4)", 2299 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)",
2410 XFS_ERRLEVEL_LOW, mp, dicp); 2300 XFS_ERRLEVEL_LOW, mp, dicp);
2411 xfs_buf_relse(bp); 2301 xfs_buf_relse(bp);
2412 xfs_fs_cmn_err(CE_ALERT, mp, 2302 xfs_alert(mp,
2413 "xfs_inode_recover: Bad dir inode log record, rec ptr 0x%p, ino ptr = 0x%p, ino bp = 0x%p, ino %Ld", 2303 "%s: Bad dir inode log record, rec ptr 0x%p, "
2414 item, dip, bp, ino); 2304 "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2305 __func__, item, dip, bp, in_f->ilf_ino);
2415 error = EFSCORRUPTED; 2306 error = EFSCORRUPTED;
2416 goto error; 2307 goto error;
2417 } 2308 }
2418 } 2309 }
2419 if (unlikely(dicp->di_nextents + dicp->di_anextents > dicp->di_nblocks)){ 2310 if (unlikely(dicp->di_nextents + dicp->di_anextents > dicp->di_nblocks)){
2420 XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(5)", 2311 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)",
2421 XFS_ERRLEVEL_LOW, mp, dicp); 2312 XFS_ERRLEVEL_LOW, mp, dicp);
2422 xfs_buf_relse(bp); 2313 xfs_buf_relse(bp);
2423 xfs_fs_cmn_err(CE_ALERT, mp, 2314 xfs_alert(mp,
2424 "xfs_inode_recover: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld", 2315 "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
2425 item, dip, bp, ino, 2316 "dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld",
2317 __func__, item, dip, bp, in_f->ilf_ino,
2426 dicp->di_nextents + dicp->di_anextents, 2318 dicp->di_nextents + dicp->di_anextents,
2427 dicp->di_nblocks); 2319 dicp->di_nblocks);
2428 error = EFSCORRUPTED; 2320 error = EFSCORRUPTED;
2429 goto error; 2321 goto error;
2430 } 2322 }
2431 if (unlikely(dicp->di_forkoff > mp->m_sb.sb_inodesize)) { 2323 if (unlikely(dicp->di_forkoff > mp->m_sb.sb_inodesize)) {
2432 XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(6)", 2324 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)",
2433 XFS_ERRLEVEL_LOW, mp, dicp); 2325 XFS_ERRLEVEL_LOW, mp, dicp);
2434 xfs_buf_relse(bp); 2326 xfs_buf_relse(bp);
2435 xfs_fs_cmn_err(CE_ALERT, mp, 2327 xfs_alert(mp,
2436 "xfs_inode_recover: Bad inode log rec ptr 0x%p, dino ptr 0x%p, dino bp 0x%p, ino %Ld, forkoff 0x%x", 2328 "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
2437 item, dip, bp, ino, dicp->di_forkoff); 2329 "dino bp 0x%p, ino %Ld, forkoff 0x%x", __func__,
2330 item, dip, bp, in_f->ilf_ino, dicp->di_forkoff);
2438 error = EFSCORRUPTED; 2331 error = EFSCORRUPTED;
2439 goto error; 2332 goto error;
2440 } 2333 }
2441 if (unlikely(item->ri_buf[1].i_len > sizeof(struct xfs_icdinode))) { 2334 if (unlikely(item->ri_buf[1].i_len > sizeof(struct xfs_icdinode))) {
2442 XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(7)", 2335 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)",
2443 XFS_ERRLEVEL_LOW, mp, dicp); 2336 XFS_ERRLEVEL_LOW, mp, dicp);
2444 xfs_buf_relse(bp); 2337 xfs_buf_relse(bp);
2445 xfs_fs_cmn_err(CE_ALERT, mp, 2338 xfs_alert(mp,
2446 "xfs_inode_recover: Bad inode log record length %d, rec ptr 0x%p", 2339 "%s: Bad inode log record length %d, rec ptr 0x%p",
2447 item->ri_buf[1].i_len, item); 2340 __func__, item->ri_buf[1].i_len, item);
2448 error = EFSCORRUPTED; 2341 error = EFSCORRUPTED;
2449 goto error; 2342 goto error;
2450 } 2343 }
@@ -2531,7 +2424,7 @@ xlog_recover_do_inode_trans(
2531 break; 2424 break;
2532 2425
2533 default: 2426 default:
2534 xlog_warn("XFS: xlog_recover_do_inode_trans: Invalid flag"); 2427 xfs_warn(log->l_mp, "%s: Invalid flag", __func__);
2535 ASSERT(0); 2428 ASSERT(0);
2536 xfs_buf_relse(bp); 2429 xfs_buf_relse(bp);
2537 error = EIO; 2430 error = EIO;
@@ -2540,8 +2433,7 @@ xlog_recover_do_inode_trans(
2540 } 2433 }
2541 2434
2542write_inode_buffer: 2435write_inode_buffer:
2543 ASSERT(bp->b_mount == NULL || bp->b_mount == mp); 2436 ASSERT(bp->b_target->bt_mount == mp);
2544 bp->b_mount = mp;
2545 XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone); 2437 XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
2546 xfs_bdwrite(mp, bp); 2438 xfs_bdwrite(mp, bp);
2547error: 2439error:
@@ -2556,18 +2448,11 @@ error:
2556 * of that type. 2448 * of that type.
2557 */ 2449 */
2558STATIC int 2450STATIC int
2559xlog_recover_do_quotaoff_trans( 2451xlog_recover_quotaoff_pass1(
2560 xlog_t *log, 2452 xlog_t *log,
2561 xlog_recover_item_t *item, 2453 xlog_recover_item_t *item)
2562 int pass)
2563{ 2454{
2564 xfs_qoff_logformat_t *qoff_f; 2455 xfs_qoff_logformat_t *qoff_f = item->ri_buf[0].i_addr;
2565
2566 if (pass == XLOG_RECOVER_PASS2) {
2567 return (0);
2568 }
2569
2570 qoff_f = item->ri_buf[0].i_addr;
2571 ASSERT(qoff_f); 2456 ASSERT(qoff_f);
2572 2457
2573 /* 2458 /*
@@ -2588,22 +2473,17 @@ xlog_recover_do_quotaoff_trans(
2588 * Recover a dquot record 2473 * Recover a dquot record
2589 */ 2474 */
2590STATIC int 2475STATIC int
2591xlog_recover_do_dquot_trans( 2476xlog_recover_dquot_pass2(
2592 xlog_t *log, 2477 xlog_t *log,
2593 xlog_recover_item_t *item, 2478 xlog_recover_item_t *item)
2594 int pass)
2595{ 2479{
2596 xfs_mount_t *mp; 2480 xfs_mount_t *mp = log->l_mp;
2597 xfs_buf_t *bp; 2481 xfs_buf_t *bp;
2598 struct xfs_disk_dquot *ddq, *recddq; 2482 struct xfs_disk_dquot *ddq, *recddq;
2599 int error; 2483 int error;
2600 xfs_dq_logformat_t *dq_f; 2484 xfs_dq_logformat_t *dq_f;
2601 uint type; 2485 uint type;
2602 2486
2603 if (pass == XLOG_RECOVER_PASS1) {
2604 return 0;
2605 }
2606 mp = log->l_mp;
2607 2487
2608 /* 2488 /*
2609 * Filesystems are required to send in quota flags at mount time. 2489 * Filesystems are required to send in quota flags at mount time.
@@ -2613,13 +2493,11 @@ xlog_recover_do_dquot_trans(
2613 2493
2614 recddq = item->ri_buf[1].i_addr; 2494 recddq = item->ri_buf[1].i_addr;
2615 if (recddq == NULL) { 2495 if (recddq == NULL) {
2616 cmn_err(CE_ALERT, 2496 xfs_alert(log->l_mp, "NULL dquot in %s.", __func__);
2617 "XFS: NULL dquot in %s.", __func__);
2618 return XFS_ERROR(EIO); 2497 return XFS_ERROR(EIO);
2619 } 2498 }
2620 if (item->ri_buf[1].i_len < sizeof(xfs_disk_dquot_t)) { 2499 if (item->ri_buf[1].i_len < sizeof(xfs_disk_dquot_t)) {
2621 cmn_err(CE_ALERT, 2500 xfs_alert(log->l_mp, "dquot too small (%d) in %s.",
2622 "XFS: dquot too small (%d) in %s.",
2623 item->ri_buf[1].i_len, __func__); 2501 item->ri_buf[1].i_len, __func__);
2624 return XFS_ERROR(EIO); 2502 return XFS_ERROR(EIO);
2625 } 2503 }
@@ -2644,12 +2522,10 @@ xlog_recover_do_dquot_trans(
2644 */ 2522 */
2645 dq_f = item->ri_buf[0].i_addr; 2523 dq_f = item->ri_buf[0].i_addr;
2646 ASSERT(dq_f); 2524 ASSERT(dq_f);
2647 if ((error = xfs_qm_dqcheck(recddq, 2525 error = xfs_qm_dqcheck(mp, recddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
2648 dq_f->qlf_id, 2526 "xlog_recover_dquot_pass2 (log copy)");
2649 0, XFS_QMOPT_DOWARN, 2527 if (error)
2650 "xlog_recover_do_dquot_trans (log copy)"))) {
2651 return XFS_ERROR(EIO); 2528 return XFS_ERROR(EIO);
2652 }
2653 ASSERT(dq_f->qlf_len == 1); 2529 ASSERT(dq_f->qlf_len == 1);
2654 2530
2655 error = xfs_read_buf(mp, mp->m_ddev_targp, 2531 error = xfs_read_buf(mp, mp->m_ddev_targp,
@@ -2669,8 +2545,9 @@ xlog_recover_do_dquot_trans(
2669 * was among a chunk of dquots created earlier, and we did some 2545 * was among a chunk of dquots created earlier, and we did some
2670 * minimal initialization then. 2546 * minimal initialization then.
2671 */ 2547 */
2672 if (xfs_qm_dqcheck(ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN, 2548 error = xfs_qm_dqcheck(mp, ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
2673 "xlog_recover_do_dquot_trans")) { 2549 "xlog_recover_dquot_pass2");
2550 if (error) {
2674 xfs_buf_relse(bp); 2551 xfs_buf_relse(bp);
2675 return XFS_ERROR(EIO); 2552 return XFS_ERROR(EIO);
2676 } 2553 }
@@ -2678,8 +2555,7 @@ xlog_recover_do_dquot_trans(
2678 memcpy(ddq, recddq, item->ri_buf[1].i_len); 2555 memcpy(ddq, recddq, item->ri_buf[1].i_len);
2679 2556
2680 ASSERT(dq_f->qlf_size == 2); 2557 ASSERT(dq_f->qlf_size == 2);
2681 ASSERT(bp->b_mount == NULL || bp->b_mount == mp); 2558 ASSERT(bp->b_target->bt_mount == mp);
2682 bp->b_mount = mp;
2683 XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone); 2559 XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
2684 xfs_bdwrite(mp, bp); 2560 xfs_bdwrite(mp, bp);
2685 2561
@@ -2694,38 +2570,31 @@ xlog_recover_do_dquot_trans(
2694 * LSN. 2570 * LSN.
2695 */ 2571 */
2696STATIC int 2572STATIC int
2697xlog_recover_do_efi_trans( 2573xlog_recover_efi_pass2(
2698 xlog_t *log, 2574 xlog_t *log,
2699 xlog_recover_item_t *item, 2575 xlog_recover_item_t *item,
2700 xfs_lsn_t lsn, 2576 xfs_lsn_t lsn)
2701 int pass)
2702{ 2577{
2703 int error; 2578 int error;
2704 xfs_mount_t *mp; 2579 xfs_mount_t *mp = log->l_mp;
2705 xfs_efi_log_item_t *efip; 2580 xfs_efi_log_item_t *efip;
2706 xfs_efi_log_format_t *efi_formatp; 2581 xfs_efi_log_format_t *efi_formatp;
2707 2582
2708 if (pass == XLOG_RECOVER_PASS1) {
2709 return 0;
2710 }
2711
2712 efi_formatp = item->ri_buf[0].i_addr; 2583 efi_formatp = item->ri_buf[0].i_addr;
2713 2584
2714 mp = log->l_mp;
2715 efip = xfs_efi_init(mp, efi_formatp->efi_nextents); 2585 efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
2716 if ((error = xfs_efi_copy_format(&(item->ri_buf[0]), 2586 if ((error = xfs_efi_copy_format(&(item->ri_buf[0]),
2717 &(efip->efi_format)))) { 2587 &(efip->efi_format)))) {
2718 xfs_efi_item_free(efip); 2588 xfs_efi_item_free(efip);
2719 return error; 2589 return error;
2720 } 2590 }
2721 efip->efi_next_extent = efi_formatp->efi_nextents; 2591 atomic_set(&efip->efi_next_extent, efi_formatp->efi_nextents);
2722 efip->efi_flags |= XFS_EFI_COMMITTED;
2723 2592
2724 spin_lock(&log->l_ailp->xa_lock); 2593 spin_lock(&log->l_ailp->xa_lock);
2725 /* 2594 /*
2726 * xfs_trans_ail_update() drops the AIL lock. 2595 * xfs_trans_ail_update() drops the AIL lock.
2727 */ 2596 */
2728 xfs_trans_ail_update(log->l_ailp, (xfs_log_item_t *)efip, lsn); 2597 xfs_trans_ail_update(log->l_ailp, &efip->efi_item, lsn);
2729 return 0; 2598 return 0;
2730} 2599}
2731 2600
@@ -2738,11 +2607,10 @@ xlog_recover_do_efi_trans(
2738 * efd format structure. If we find it, we remove the efi from the 2607 * efd format structure. If we find it, we remove the efi from the
2739 * AIL and free it. 2608 * AIL and free it.
2740 */ 2609 */
2741STATIC void 2610STATIC int
2742xlog_recover_do_efd_trans( 2611xlog_recover_efd_pass2(
2743 xlog_t *log, 2612 xlog_t *log,
2744 xlog_recover_item_t *item, 2613 xlog_recover_item_t *item)
2745 int pass)
2746{ 2614{
2747 xfs_efd_log_format_t *efd_formatp; 2615 xfs_efd_log_format_t *efd_formatp;
2748 xfs_efi_log_item_t *efip = NULL; 2616 xfs_efi_log_item_t *efip = NULL;
@@ -2751,10 +2619,6 @@ xlog_recover_do_efd_trans(
2751 struct xfs_ail_cursor cur; 2619 struct xfs_ail_cursor cur;
2752 struct xfs_ail *ailp = log->l_ailp; 2620 struct xfs_ail *ailp = log->l_ailp;
2753 2621
2754 if (pass == XLOG_RECOVER_PASS1) {
2755 return;
2756 }
2757
2758 efd_formatp = item->ri_buf[0].i_addr; 2622 efd_formatp = item->ri_buf[0].i_addr;
2759 ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) + 2623 ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) +
2760 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) || 2624 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) ||
@@ -2786,62 +2650,6 @@ xlog_recover_do_efd_trans(
2786 } 2650 }
2787 xfs_trans_ail_cursor_done(ailp, &cur); 2651 xfs_trans_ail_cursor_done(ailp, &cur);
2788 spin_unlock(&ailp->xa_lock); 2652 spin_unlock(&ailp->xa_lock);
2789}
2790
2791/*
2792 * Perform the transaction
2793 *
2794 * If the transaction modifies a buffer or inode, do it now. Otherwise,
2795 * EFIs and EFDs get queued up by adding entries into the AIL for them.
2796 */
2797STATIC int
2798xlog_recover_do_trans(
2799 xlog_t *log,
2800 xlog_recover_t *trans,
2801 int pass)
2802{
2803 int error = 0;
2804 xlog_recover_item_t *item;
2805
2806 error = xlog_recover_reorder_trans(log, trans, pass);
2807 if (error)
2808 return error;
2809
2810 list_for_each_entry(item, &trans->r_itemq, ri_list) {
2811 trace_xfs_log_recover_item_recover(log, trans, item, pass);
2812 switch (ITEM_TYPE(item)) {
2813 case XFS_LI_BUF:
2814 error = xlog_recover_do_buffer_trans(log, item, pass);
2815 break;
2816 case XFS_LI_INODE:
2817 error = xlog_recover_do_inode_trans(log, item, pass);
2818 break;
2819 case XFS_LI_EFI:
2820 error = xlog_recover_do_efi_trans(log, item,
2821 trans->r_lsn, pass);
2822 break;
2823 case XFS_LI_EFD:
2824 xlog_recover_do_efd_trans(log, item, pass);
2825 error = 0;
2826 break;
2827 case XFS_LI_DQUOT:
2828 error = xlog_recover_do_dquot_trans(log, item, pass);
2829 break;
2830 case XFS_LI_QUOTAOFF:
2831 error = xlog_recover_do_quotaoff_trans(log, item,
2832 pass);
2833 break;
2834 default:
2835 xlog_warn(
2836 "XFS: invalid item type (%d) xlog_recover_do_trans", ITEM_TYPE(item));
2837 ASSERT(0);
2838 error = XFS_ERROR(EIO);
2839 break;
2840 }
2841
2842 if (error)
2843 return error;
2844 }
2845 2653
2846 return 0; 2654 return 0;
2847} 2655}
@@ -2853,7 +2661,7 @@ xlog_recover_do_trans(
2853 */ 2661 */
2854STATIC void 2662STATIC void
2855xlog_recover_free_trans( 2663xlog_recover_free_trans(
2856 xlog_recover_t *trans) 2664 struct xlog_recover *trans)
2857{ 2665{
2858 xlog_recover_item_t *item, *n; 2666 xlog_recover_item_t *item, *n;
2859 int i; 2667 int i;
@@ -2872,26 +2680,103 @@ xlog_recover_free_trans(
2872} 2680}
2873 2681
2874STATIC int 2682STATIC int
2683xlog_recover_commit_pass1(
2684 struct log *log,
2685 struct xlog_recover *trans,
2686 xlog_recover_item_t *item)
2687{
2688 trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1);
2689
2690 switch (ITEM_TYPE(item)) {
2691 case XFS_LI_BUF:
2692 return xlog_recover_buffer_pass1(log, item);
2693 case XFS_LI_QUOTAOFF:
2694 return xlog_recover_quotaoff_pass1(log, item);
2695 case XFS_LI_INODE:
2696 case XFS_LI_EFI:
2697 case XFS_LI_EFD:
2698 case XFS_LI_DQUOT:
2699 /* nothing to do in pass 1 */
2700 return 0;
2701 default:
2702 xfs_warn(log->l_mp, "%s: invalid item type (%d)",
2703 __func__, ITEM_TYPE(item));
2704 ASSERT(0);
2705 return XFS_ERROR(EIO);
2706 }
2707}
2708
2709STATIC int
2710xlog_recover_commit_pass2(
2711 struct log *log,
2712 struct xlog_recover *trans,
2713 xlog_recover_item_t *item)
2714{
2715 trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2);
2716
2717 switch (ITEM_TYPE(item)) {
2718 case XFS_LI_BUF:
2719 return xlog_recover_buffer_pass2(log, item);
2720 case XFS_LI_INODE:
2721 return xlog_recover_inode_pass2(log, item);
2722 case XFS_LI_EFI:
2723 return xlog_recover_efi_pass2(log, item, trans->r_lsn);
2724 case XFS_LI_EFD:
2725 return xlog_recover_efd_pass2(log, item);
2726 case XFS_LI_DQUOT:
2727 return xlog_recover_dquot_pass2(log, item);
2728 case XFS_LI_QUOTAOFF:
2729 /* nothing to do in pass2 */
2730 return 0;
2731 default:
2732 xfs_warn(log->l_mp, "%s: invalid item type (%d)",
2733 __func__, ITEM_TYPE(item));
2734 ASSERT(0);
2735 return XFS_ERROR(EIO);
2736 }
2737}
2738
2739/*
2740 * Perform the transaction.
2741 *
2742 * If the transaction modifies a buffer or inode, do it now. Otherwise,
2743 * EFIs and EFDs get queued up by adding entries into the AIL for them.
2744 */
2745STATIC int
2875xlog_recover_commit_trans( 2746xlog_recover_commit_trans(
2876 xlog_t *log, 2747 struct log *log,
2877 xlog_recover_t *trans, 2748 struct xlog_recover *trans,
2878 int pass) 2749 int pass)
2879{ 2750{
2880 int error; 2751 int error = 0;
2752 xlog_recover_item_t *item;
2881 2753
2882 hlist_del(&trans->r_list); 2754 hlist_del(&trans->r_list);
2883 if ((error = xlog_recover_do_trans(log, trans, pass))) 2755
2756 error = xlog_recover_reorder_trans(log, trans, pass);
2757 if (error)
2884 return error; 2758 return error;
2885 xlog_recover_free_trans(trans); /* no error */ 2759
2760 list_for_each_entry(item, &trans->r_itemq, ri_list) {
2761 if (pass == XLOG_RECOVER_PASS1)
2762 error = xlog_recover_commit_pass1(log, trans, item);
2763 else
2764 error = xlog_recover_commit_pass2(log, trans, item);
2765 if (error)
2766 return error;
2767 }
2768
2769 xlog_recover_free_trans(trans);
2886 return 0; 2770 return 0;
2887} 2771}
2888 2772
2889STATIC int 2773STATIC int
2890xlog_recover_unmount_trans( 2774xlog_recover_unmount_trans(
2775 struct log *log,
2891 xlog_recover_t *trans) 2776 xlog_recover_t *trans)
2892{ 2777{
2893 /* Do nothing now */ 2778 /* Do nothing now */
2894 xlog_warn("XFS: xlog_recover_unmount_trans: Unmount LR"); 2779 xfs_warn(log->l_mp, "%s: Unmount LR", __func__);
2895 return 0; 2780 return 0;
2896} 2781}
2897 2782
@@ -2934,8 +2819,8 @@ xlog_recover_process_data(
2934 dp += sizeof(xlog_op_header_t); 2819 dp += sizeof(xlog_op_header_t);
2935 if (ohead->oh_clientid != XFS_TRANSACTION && 2820 if (ohead->oh_clientid != XFS_TRANSACTION &&
2936 ohead->oh_clientid != XFS_LOG) { 2821 ohead->oh_clientid != XFS_LOG) {
2937 xlog_warn( 2822 xfs_warn(log->l_mp, "%s: bad clientid 0x%x",
2938 "XFS: xlog_recover_process_data: bad clientid"); 2823 __func__, ohead->oh_clientid);
2939 ASSERT(0); 2824 ASSERT(0);
2940 return (XFS_ERROR(EIO)); 2825 return (XFS_ERROR(EIO));
2941 } 2826 }
@@ -2948,8 +2833,8 @@ xlog_recover_process_data(
2948 be64_to_cpu(rhead->h_lsn)); 2833 be64_to_cpu(rhead->h_lsn));
2949 } else { 2834 } else {
2950 if (dp + be32_to_cpu(ohead->oh_len) > lp) { 2835 if (dp + be32_to_cpu(ohead->oh_len) > lp) {
2951 xlog_warn( 2836 xfs_warn(log->l_mp, "%s: bad length 0x%x",
2952 "XFS: xlog_recover_process_data: bad length"); 2837 __func__, be32_to_cpu(ohead->oh_len));
2953 WARN_ON(1); 2838 WARN_ON(1);
2954 return (XFS_ERROR(EIO)); 2839 return (XFS_ERROR(EIO));
2955 } 2840 }
@@ -2962,7 +2847,7 @@ xlog_recover_process_data(
2962 trans, pass); 2847 trans, pass);
2963 break; 2848 break;
2964 case XLOG_UNMOUNT_TRANS: 2849 case XLOG_UNMOUNT_TRANS:
2965 error = xlog_recover_unmount_trans(trans); 2850 error = xlog_recover_unmount_trans(log, trans);
2966 break; 2851 break;
2967 case XLOG_WAS_CONT_TRANS: 2852 case XLOG_WAS_CONT_TRANS:
2968 error = xlog_recover_add_to_cont_trans(log, 2853 error = xlog_recover_add_to_cont_trans(log,
@@ -2970,8 +2855,8 @@ xlog_recover_process_data(
2970 be32_to_cpu(ohead->oh_len)); 2855 be32_to_cpu(ohead->oh_len));
2971 break; 2856 break;
2972 case XLOG_START_TRANS: 2857 case XLOG_START_TRANS:
2973 xlog_warn( 2858 xfs_warn(log->l_mp, "%s: bad transaction",
2974 "XFS: xlog_recover_process_data: bad transaction"); 2859 __func__);
2975 ASSERT(0); 2860 ASSERT(0);
2976 error = XFS_ERROR(EIO); 2861 error = XFS_ERROR(EIO);
2977 break; 2862 break;
@@ -2981,8 +2866,8 @@ xlog_recover_process_data(
2981 dp, be32_to_cpu(ohead->oh_len)); 2866 dp, be32_to_cpu(ohead->oh_len));
2982 break; 2867 break;
2983 default: 2868 default:
2984 xlog_warn( 2869 xfs_warn(log->l_mp, "%s: bad flag 0x%x",
2985 "XFS: xlog_recover_process_data: bad flag"); 2870 __func__, flags);
2986 ASSERT(0); 2871 ASSERT(0);
2987 error = XFS_ERROR(EIO); 2872 error = XFS_ERROR(EIO);
2988 break; 2873 break;
@@ -3012,7 +2897,7 @@ xlog_recover_process_efi(
3012 xfs_extent_t *extp; 2897 xfs_extent_t *extp;
3013 xfs_fsblock_t startblock_fsb; 2898 xfs_fsblock_t startblock_fsb;
3014 2899
3015 ASSERT(!(efip->efi_flags & XFS_EFI_RECOVERED)); 2900 ASSERT(!test_bit(XFS_EFI_RECOVERED, &efip->efi_flags));
3016 2901
3017 /* 2902 /*
3018 * First check the validity of the extents described by the 2903 * First check the validity of the extents described by the
@@ -3051,7 +2936,7 @@ xlog_recover_process_efi(
3051 extp->ext_len); 2936 extp->ext_len);
3052 } 2937 }
3053 2938
3054 efip->efi_flags |= XFS_EFI_RECOVERED; 2939 set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
3055 error = xfs_trans_commit(tp, 0); 2940 error = xfs_trans_commit(tp, 0);
3056 return error; 2941 return error;
3057 2942
@@ -3108,7 +2993,7 @@ xlog_recover_process_efis(
3108 * Skip EFIs that we've already processed. 2993 * Skip EFIs that we've already processed.
3109 */ 2994 */
3110 efip = (xfs_efi_log_item_t *)lip; 2995 efip = (xfs_efi_log_item_t *)lip;
3111 if (efip->efi_flags & XFS_EFI_RECOVERED) { 2996 if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags)) {
3112 lip = xfs_trans_ail_cursor_next(ailp, &cur); 2997 lip = xfs_trans_ail_cursor_next(ailp, &cur);
3113 continue; 2998 continue;
3114 } 2999 }
@@ -3167,8 +3052,7 @@ xlog_recover_clear_agi_bucket(
3167out_abort: 3052out_abort:
3168 xfs_trans_cancel(tp, XFS_TRANS_ABORT); 3053 xfs_trans_cancel(tp, XFS_TRANS_ABORT);
3169out_error: 3054out_error:
3170 xfs_fs_cmn_err(CE_WARN, mp, "xlog_recover_clear_agi_bucket: " 3055 xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__, agno);
3171 "failed to clear agi %d. Continuing.", agno);
3172 return; 3056 return;
3173} 3057}
3174 3058
@@ -3419,7 +3303,7 @@ xlog_valid_rec_header(
3419 if (unlikely( 3303 if (unlikely(
3420 (!rhead->h_version || 3304 (!rhead->h_version ||
3421 (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) { 3305 (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) {
3422 xlog_warn("XFS: %s: unrecognised log version (%d).", 3306 xfs_warn(log->l_mp, "%s: unrecognised log version (%d).",
3423 __func__, be32_to_cpu(rhead->h_version)); 3307 __func__, be32_to_cpu(rhead->h_version));
3424 return XFS_ERROR(EIO); 3308 return XFS_ERROR(EIO);
3425 } 3309 }
@@ -3585,19 +3469,9 @@ xlog_do_recovery_pass(
3585 * - order is important. 3469 * - order is important.
3586 */ 3470 */
3587 wrapped_hblks = hblks - split_hblks; 3471 wrapped_hblks = hblks - split_hblks;
3588 error = XFS_BUF_SET_PTR(hbp, 3472 error = xlog_bread_offset(log, 0,
3589 offset + BBTOB(split_hblks), 3473 wrapped_hblks, hbp,
3590 BBTOB(hblks - split_hblks)); 3474 offset + BBTOB(split_hblks));
3591 if (error)
3592 goto bread_err2;
3593
3594 error = xlog_bread_noalign(log, 0,
3595 wrapped_hblks, hbp);
3596 if (error)
3597 goto bread_err2;
3598
3599 error = XFS_BUF_SET_PTR(hbp, offset,
3600 BBTOB(hblks));
3601 if (error) 3475 if (error)
3602 goto bread_err2; 3476 goto bread_err2;
3603 } 3477 }
@@ -3648,19 +3522,9 @@ xlog_do_recovery_pass(
3648 * _first_, then the log start (LR header end) 3522 * _first_, then the log start (LR header end)
3649 * - order is important. 3523 * - order is important.
3650 */ 3524 */
3651 error = XFS_BUF_SET_PTR(dbp, 3525 error = xlog_bread_offset(log, 0,
3652 offset + BBTOB(split_bblks), 3526 bblks - split_bblks, hbp,
3653 BBTOB(bblks - split_bblks)); 3527 offset + BBTOB(split_bblks));
3654 if (error)
3655 goto bread_err2;
3656
3657 error = xlog_bread_noalign(log, wrapped_hblks,
3658 bblks - split_bblks,
3659 dbp);
3660 if (error)
3661 goto bread_err2;
3662
3663 error = XFS_BUF_SET_PTR(dbp, offset, h_size);
3664 if (error) 3528 if (error)
3665 goto bread_err2; 3529 goto bread_err2;
3666 } 3530 }
@@ -3725,7 +3589,7 @@ xlog_do_log_recovery(
3725 xfs_daddr_t head_blk, 3589 xfs_daddr_t head_blk,
3726 xfs_daddr_t tail_blk) 3590 xfs_daddr_t tail_blk)
3727{ 3591{
3728 int error; 3592 int error, i;
3729 3593
3730 ASSERT(head_blk != tail_blk); 3594 ASSERT(head_blk != tail_blk);
3731 3595
@@ -3733,10 +3597,12 @@ xlog_do_log_recovery(
3733 * First do a pass to find all of the cancelled buf log items. 3597 * First do a pass to find all of the cancelled buf log items.
3734 * Store them in the buf_cancel_table for use in the second pass. 3598 * Store them in the buf_cancel_table for use in the second pass.
3735 */ 3599 */
3736 log->l_buf_cancel_table = 3600 log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE *
3737 (xfs_buf_cancel_t **)kmem_zalloc(XLOG_BC_TABLE_SIZE * 3601 sizeof(struct list_head),
3738 sizeof(xfs_buf_cancel_t*),
3739 KM_SLEEP); 3602 KM_SLEEP);
3603 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
3604 INIT_LIST_HEAD(&log->l_buf_cancel_table[i]);
3605
3740 error = xlog_do_recovery_pass(log, head_blk, tail_blk, 3606 error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3741 XLOG_RECOVER_PASS1); 3607 XLOG_RECOVER_PASS1);
3742 if (error != 0) { 3608 if (error != 0) {
@@ -3755,7 +3621,7 @@ xlog_do_log_recovery(
3755 int i; 3621 int i;
3756 3622
3757 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++) 3623 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
3758 ASSERT(log->l_buf_cancel_table[i] == NULL); 3624 ASSERT(list_empty(&log->l_buf_cancel_table[i]));
3759 } 3625 }
3760#endif /* DEBUG */ 3626#endif /* DEBUG */
3761 3627
@@ -3817,7 +3683,7 @@ xlog_do_recover(
3817 XFS_BUF_READ(bp); 3683 XFS_BUF_READ(bp);
3818 XFS_BUF_UNASYNC(bp); 3684 XFS_BUF_UNASYNC(bp);
3819 xfsbdstrat(log->l_mp, bp); 3685 xfsbdstrat(log->l_mp, bp);
3820 error = xfs_iowait(bp); 3686 error = xfs_buf_iowait(bp);
3821 if (error) { 3687 if (error) {
3822 xfs_ioerror_alert("xlog_do_recover", 3688 xfs_ioerror_alert("xlog_do_recover",
3823 log->l_mp, bp, XFS_BUF_ADDR(bp)); 3689 log->l_mp, bp, XFS_BUF_ADDR(bp));
@@ -3875,10 +3741,9 @@ xlog_recover(
3875 return error; 3741 return error;
3876 } 3742 }
3877 3743
3878 cmn_err(CE_NOTE, 3744 xfs_notice(log->l_mp, "Starting recovery (logdev: %s)",
3879 "Starting XFS recovery on filesystem: %s (logdev: %s)", 3745 log->l_mp->m_logname ? log->l_mp->m_logname
3880 log->l_mp->m_fsname, log->l_mp->m_logname ? 3746 : "internal");
3881 log->l_mp->m_logname : "internal");
3882 3747
3883 error = xlog_do_recover(log, head_blk, tail_blk); 3748 error = xlog_do_recover(log, head_blk, tail_blk);
3884 log->l_flags |= XLOG_RECOVERY_NEEDED; 3749 log->l_flags |= XLOG_RECOVERY_NEEDED;
@@ -3911,9 +3776,7 @@ xlog_recover_finish(
3911 int error; 3776 int error;
3912 error = xlog_recover_process_efis(log); 3777 error = xlog_recover_process_efis(log);
3913 if (error) { 3778 if (error) {
3914 cmn_err(CE_ALERT, 3779 xfs_alert(log->l_mp, "Failed to recover EFIs");
3915 "Failed to recover EFIs on filesystem: %s",
3916 log->l_mp->m_fsname);
3917 return error; 3780 return error;
3918 } 3781 }
3919 /* 3782 /*
@@ -3928,15 +3791,12 @@ xlog_recover_finish(
3928 3791
3929 xlog_recover_check_summary(log); 3792 xlog_recover_check_summary(log);
3930 3793
3931 cmn_err(CE_NOTE, 3794 xfs_notice(log->l_mp, "Ending recovery (logdev: %s)",
3932 "Ending XFS recovery on filesystem: %s (logdev: %s)", 3795 log->l_mp->m_logname ? log->l_mp->m_logname
3933 log->l_mp->m_fsname, log->l_mp->m_logname ? 3796 : "internal");
3934 log->l_mp->m_logname : "internal");
3935 log->l_flags &= ~XLOG_RECOVERY_NEEDED; 3797 log->l_flags &= ~XLOG_RECOVERY_NEEDED;
3936 } else { 3798 } else {
3937 cmn_err(CE_DEBUG, 3799 xfs_info(log->l_mp, "Ending clean mount");
3938 "!Ending clean XFS mount for filesystem: %s\n",
3939 log->l_mp->m_fsname);
3940 } 3800 }
3941 return 0; 3801 return 0;
3942} 3802}
@@ -3969,10 +3829,8 @@ xlog_recover_check_summary(
3969 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) { 3829 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
3970 error = xfs_read_agf(mp, NULL, agno, 0, &agfbp); 3830 error = xfs_read_agf(mp, NULL, agno, 0, &agfbp);
3971 if (error) { 3831 if (error) {
3972 xfs_fs_cmn_err(CE_ALERT, mp, 3832 xfs_alert(mp, "%s agf read failed agno %d error %d",
3973 "xlog_recover_check_summary(agf)" 3833 __func__, agno, error);
3974 "agf read failed agno %d error %d",
3975 agno, error);
3976 } else { 3834 } else {
3977 agfp = XFS_BUF_TO_AGF(agfbp); 3835 agfp = XFS_BUF_TO_AGF(agfbp);
3978 freeblks += be32_to_cpu(agfp->agf_freeblks) + 3836 freeblks += be32_to_cpu(agfp->agf_freeblks) +
@@ -3981,7 +3839,10 @@ xlog_recover_check_summary(
3981 } 3839 }
3982 3840
3983 error = xfs_read_agi(mp, NULL, agno, &agibp); 3841 error = xfs_read_agi(mp, NULL, agno, &agibp);
3984 if (!error) { 3842 if (error) {
3843 xfs_alert(mp, "%s agi read failed agno %d error %d",
3844 __func__, agno, error);
3845 } else {
3985 struct xfs_agi *agi = XFS_BUF_TO_AGI(agibp); 3846 struct xfs_agi *agi = XFS_BUF_TO_AGI(agibp);
3986 3847
3987 itotal += be32_to_cpu(agi->agi_count); 3848 itotal += be32_to_cpu(agi->agi_count);