diff options
author | Dave Chinner <david@fromorbit.com> | 2014-09-28 20:00:24 -0400 |
---|---|---|
committer | Dave Chinner <david@fromorbit.com> | 2014-09-28 20:00:24 -0400 |
commit | 2f43bbd96e43d0b85803f5092be94bbb92d8eac9 (patch) | |
tree | 1b21ab8c82fc176abf80fc3863f2b623fcce414c /fs/xfs | |
parent | 33044dc408e6e6bb7f270c0a2e12598ef5592987 (diff) | |
parent | b818cca1976d1a01754033ac08724e05d07cce8f (diff) |
Merge branch 'xfs-trans-recover-cleanup' into for-next
Diffstat (limited to 'fs/xfs')
-rw-r--r-- | fs/xfs/xfs_log_recover.c | 564 |
1 files changed, 308 insertions, 256 deletions
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 29e101fc32c5..79cfe7e6ec7a 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c | |||
@@ -1445,160 +1445,6 @@ xlog_clear_stale_blocks( | |||
1445 | ****************************************************************************** | 1445 | ****************************************************************************** |
1446 | */ | 1446 | */ |
1447 | 1447 | ||
1448 | STATIC xlog_recover_t * | ||
1449 | xlog_recover_find_tid( | ||
1450 | struct hlist_head *head, | ||
1451 | xlog_tid_t tid) | ||
1452 | { | ||
1453 | xlog_recover_t *trans; | ||
1454 | |||
1455 | hlist_for_each_entry(trans, head, r_list) { | ||
1456 | if (trans->r_log_tid == tid) | ||
1457 | return trans; | ||
1458 | } | ||
1459 | return NULL; | ||
1460 | } | ||
1461 | |||
1462 | STATIC void | ||
1463 | xlog_recover_new_tid( | ||
1464 | struct hlist_head *head, | ||
1465 | xlog_tid_t tid, | ||
1466 | xfs_lsn_t lsn) | ||
1467 | { | ||
1468 | xlog_recover_t *trans; | ||
1469 | |||
1470 | trans = kmem_zalloc(sizeof(xlog_recover_t), KM_SLEEP); | ||
1471 | trans->r_log_tid = tid; | ||
1472 | trans->r_lsn = lsn; | ||
1473 | INIT_LIST_HEAD(&trans->r_itemq); | ||
1474 | |||
1475 | INIT_HLIST_NODE(&trans->r_list); | ||
1476 | hlist_add_head(&trans->r_list, head); | ||
1477 | } | ||
1478 | |||
1479 | STATIC void | ||
1480 | xlog_recover_add_item( | ||
1481 | struct list_head *head) | ||
1482 | { | ||
1483 | xlog_recover_item_t *item; | ||
1484 | |||
1485 | item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP); | ||
1486 | INIT_LIST_HEAD(&item->ri_list); | ||
1487 | list_add_tail(&item->ri_list, head); | ||
1488 | } | ||
1489 | |||
1490 | STATIC int | ||
1491 | xlog_recover_add_to_cont_trans( | ||
1492 | struct xlog *log, | ||
1493 | struct xlog_recover *trans, | ||
1494 | xfs_caddr_t dp, | ||
1495 | int len) | ||
1496 | { | ||
1497 | xlog_recover_item_t *item; | ||
1498 | xfs_caddr_t ptr, old_ptr; | ||
1499 | int old_len; | ||
1500 | |||
1501 | if (list_empty(&trans->r_itemq)) { | ||
1502 | /* finish copying rest of trans header */ | ||
1503 | xlog_recover_add_item(&trans->r_itemq); | ||
1504 | ptr = (xfs_caddr_t) &trans->r_theader + | ||
1505 | sizeof(xfs_trans_header_t) - len; | ||
1506 | memcpy(ptr, dp, len); /* d, s, l */ | ||
1507 | return 0; | ||
1508 | } | ||
1509 | /* take the tail entry */ | ||
1510 | item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list); | ||
1511 | |||
1512 | old_ptr = item->ri_buf[item->ri_cnt-1].i_addr; | ||
1513 | old_len = item->ri_buf[item->ri_cnt-1].i_len; | ||
1514 | |||
1515 | ptr = kmem_realloc(old_ptr, len+old_len, old_len, KM_SLEEP); | ||
1516 | memcpy(&ptr[old_len], dp, len); /* d, s, l */ | ||
1517 | item->ri_buf[item->ri_cnt-1].i_len += len; | ||
1518 | item->ri_buf[item->ri_cnt-1].i_addr = ptr; | ||
1519 | trace_xfs_log_recover_item_add_cont(log, trans, item, 0); | ||
1520 | return 0; | ||
1521 | } | ||
1522 | |||
1523 | /* | ||
1524 | * The next region to add is the start of a new region. It could be | ||
1525 | * a whole region or it could be the first part of a new region. Because | ||
1526 | * of this, the assumption here is that the type and size fields of all | ||
1527 | * format structures fit into the first 32 bits of the structure. | ||
1528 | * | ||
1529 | * This works because all regions must be 32 bit aligned. Therefore, we | ||
1530 | * either have both fields or we have neither field. In the case we have | ||
1531 | * neither field, the data part of the region is zero length. We only have | ||
1532 | * a log_op_header and can throw away the header since a new one will appear | ||
1533 | * later. If we have at least 4 bytes, then we can determine how many regions | ||
1534 | * will appear in the current log item. | ||
1535 | */ | ||
1536 | STATIC int | ||
1537 | xlog_recover_add_to_trans( | ||
1538 | struct xlog *log, | ||
1539 | struct xlog_recover *trans, | ||
1540 | xfs_caddr_t dp, | ||
1541 | int len) | ||
1542 | { | ||
1543 | xfs_inode_log_format_t *in_f; /* any will do */ | ||
1544 | xlog_recover_item_t *item; | ||
1545 | xfs_caddr_t ptr; | ||
1546 | |||
1547 | if (!len) | ||
1548 | return 0; | ||
1549 | if (list_empty(&trans->r_itemq)) { | ||
1550 | /* we need to catch log corruptions here */ | ||
1551 | if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) { | ||
1552 | xfs_warn(log->l_mp, "%s: bad header magic number", | ||
1553 | __func__); | ||
1554 | ASSERT(0); | ||
1555 | return -EIO; | ||
1556 | } | ||
1557 | if (len == sizeof(xfs_trans_header_t)) | ||
1558 | xlog_recover_add_item(&trans->r_itemq); | ||
1559 | memcpy(&trans->r_theader, dp, len); /* d, s, l */ | ||
1560 | return 0; | ||
1561 | } | ||
1562 | |||
1563 | ptr = kmem_alloc(len, KM_SLEEP); | ||
1564 | memcpy(ptr, dp, len); | ||
1565 | in_f = (xfs_inode_log_format_t *)ptr; | ||
1566 | |||
1567 | /* take the tail entry */ | ||
1568 | item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list); | ||
1569 | if (item->ri_total != 0 && | ||
1570 | item->ri_total == item->ri_cnt) { | ||
1571 | /* tail item is in use, get a new one */ | ||
1572 | xlog_recover_add_item(&trans->r_itemq); | ||
1573 | item = list_entry(trans->r_itemq.prev, | ||
1574 | xlog_recover_item_t, ri_list); | ||
1575 | } | ||
1576 | |||
1577 | if (item->ri_total == 0) { /* first region to be added */ | ||
1578 | if (in_f->ilf_size == 0 || | ||
1579 | in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) { | ||
1580 | xfs_warn(log->l_mp, | ||
1581 | "bad number of regions (%d) in inode log format", | ||
1582 | in_f->ilf_size); | ||
1583 | ASSERT(0); | ||
1584 | kmem_free(ptr); | ||
1585 | return -EIO; | ||
1586 | } | ||
1587 | |||
1588 | item->ri_total = in_f->ilf_size; | ||
1589 | item->ri_buf = | ||
1590 | kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t), | ||
1591 | KM_SLEEP); | ||
1592 | } | ||
1593 | ASSERT(item->ri_total > item->ri_cnt); | ||
1594 | /* Description region is ri_buf[0] */ | ||
1595 | item->ri_buf[item->ri_cnt].i_addr = ptr; | ||
1596 | item->ri_buf[item->ri_cnt].i_len = len; | ||
1597 | item->ri_cnt++; | ||
1598 | trace_xfs_log_recover_item_add(log, trans, item, 0); | ||
1599 | return 0; | ||
1600 | } | ||
1601 | |||
1602 | /* | 1448 | /* |
1603 | * Sort the log items in the transaction. | 1449 | * Sort the log items in the transaction. |
1604 | * | 1450 | * |
@@ -3254,31 +3100,6 @@ xlog_recover_do_icreate_pass2( | |||
3254 | return 0; | 3100 | return 0; |
3255 | } | 3101 | } |
3256 | 3102 | ||
3257 | /* | ||
3258 | * Free up any resources allocated by the transaction | ||
3259 | * | ||
3260 | * Remember that EFIs, EFDs, and IUNLINKs are handled later. | ||
3261 | */ | ||
3262 | STATIC void | ||
3263 | xlog_recover_free_trans( | ||
3264 | struct xlog_recover *trans) | ||
3265 | { | ||
3266 | xlog_recover_item_t *item, *n; | ||
3267 | int i; | ||
3268 | |||
3269 | list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) { | ||
3270 | /* Free the regions in the item. */ | ||
3271 | list_del(&item->ri_list); | ||
3272 | for (i = 0; i < item->ri_cnt; i++) | ||
3273 | kmem_free(item->ri_buf[i].i_addr); | ||
3274 | /* Free the item itself */ | ||
3275 | kmem_free(item->ri_buf); | ||
3276 | kmem_free(item); | ||
3277 | } | ||
3278 | /* Free the transaction recover structure */ | ||
3279 | kmem_free(trans); | ||
3280 | } | ||
3281 | |||
3282 | STATIC void | 3103 | STATIC void |
3283 | xlog_recover_buffer_ra_pass2( | 3104 | xlog_recover_buffer_ra_pass2( |
3284 | struct xlog *log, | 3105 | struct xlog *log, |
@@ -3528,22 +3349,309 @@ out: | |||
3528 | if (!list_empty(&done_list)) | 3349 | if (!list_empty(&done_list)) |
3529 | list_splice_init(&done_list, &trans->r_itemq); | 3350 | list_splice_init(&done_list, &trans->r_itemq); |
3530 | 3351 | ||
3531 | xlog_recover_free_trans(trans); | ||
3532 | |||
3533 | error2 = xfs_buf_delwri_submit(&buffer_list); | 3352 | error2 = xfs_buf_delwri_submit(&buffer_list); |
3534 | return error ? error : error2; | 3353 | return error ? error : error2; |
3535 | } | 3354 | } |
3536 | 3355 | ||
3356 | STATIC void | ||
3357 | xlog_recover_add_item( | ||
3358 | struct list_head *head) | ||
3359 | { | ||
3360 | xlog_recover_item_t *item; | ||
3361 | |||
3362 | item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP); | ||
3363 | INIT_LIST_HEAD(&item->ri_list); | ||
3364 | list_add_tail(&item->ri_list, head); | ||
3365 | } | ||
3366 | |||
3537 | STATIC int | 3367 | STATIC int |
3538 | xlog_recover_unmount_trans( | 3368 | xlog_recover_add_to_cont_trans( |
3539 | struct xlog *log) | 3369 | struct xlog *log, |
3370 | struct xlog_recover *trans, | ||
3371 | xfs_caddr_t dp, | ||
3372 | int len) | ||
3373 | { | ||
3374 | xlog_recover_item_t *item; | ||
3375 | xfs_caddr_t ptr, old_ptr; | ||
3376 | int old_len; | ||
3377 | |||
3378 | if (list_empty(&trans->r_itemq)) { | ||
3379 | /* finish copying rest of trans header */ | ||
3380 | xlog_recover_add_item(&trans->r_itemq); | ||
3381 | ptr = (xfs_caddr_t) &trans->r_theader + | ||
3382 | sizeof(xfs_trans_header_t) - len; | ||
3383 | memcpy(ptr, dp, len); | ||
3384 | return 0; | ||
3385 | } | ||
3386 | /* take the tail entry */ | ||
3387 | item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list); | ||
3388 | |||
3389 | old_ptr = item->ri_buf[item->ri_cnt-1].i_addr; | ||
3390 | old_len = item->ri_buf[item->ri_cnt-1].i_len; | ||
3391 | |||
3392 | ptr = kmem_realloc(old_ptr, len+old_len, old_len, KM_SLEEP); | ||
3393 | memcpy(&ptr[old_len], dp, len); | ||
3394 | item->ri_buf[item->ri_cnt-1].i_len += len; | ||
3395 | item->ri_buf[item->ri_cnt-1].i_addr = ptr; | ||
3396 | trace_xfs_log_recover_item_add_cont(log, trans, item, 0); | ||
3397 | return 0; | ||
3398 | } | ||
3399 | |||
3400 | /* | ||
3401 | * The next region to add is the start of a new region. It could be | ||
3402 | * a whole region or it could be the first part of a new region. Because | ||
3403 | * of this, the assumption here is that the type and size fields of all | ||
3404 | * format structures fit into the first 32 bits of the structure. | ||
3405 | * | ||
3406 | * This works because all regions must be 32 bit aligned. Therefore, we | ||
3407 | * either have both fields or we have neither field. In the case we have | ||
3408 | * neither field, the data part of the region is zero length. We only have | ||
3409 | * a log_op_header and can throw away the header since a new one will appear | ||
3410 | * later. If we have at least 4 bytes, then we can determine how many regions | ||
3411 | * will appear in the current log item. | ||
3412 | */ | ||
3413 | STATIC int | ||
3414 | xlog_recover_add_to_trans( | ||
3415 | struct xlog *log, | ||
3416 | struct xlog_recover *trans, | ||
3417 | xfs_caddr_t dp, | ||
3418 | int len) | ||
3540 | { | 3419 | { |
3541 | /* Do nothing now */ | 3420 | xfs_inode_log_format_t *in_f; /* any will do */ |
3542 | xfs_warn(log->l_mp, "%s: Unmount LR", __func__); | 3421 | xlog_recover_item_t *item; |
3422 | xfs_caddr_t ptr; | ||
3423 | |||
3424 | if (!len) | ||
3425 | return 0; | ||
3426 | if (list_empty(&trans->r_itemq)) { | ||
3427 | /* we need to catch log corruptions here */ | ||
3428 | if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) { | ||
3429 | xfs_warn(log->l_mp, "%s: bad header magic number", | ||
3430 | __func__); | ||
3431 | ASSERT(0); | ||
3432 | return -EIO; | ||
3433 | } | ||
3434 | if (len == sizeof(xfs_trans_header_t)) | ||
3435 | xlog_recover_add_item(&trans->r_itemq); | ||
3436 | memcpy(&trans->r_theader, dp, len); | ||
3437 | return 0; | ||
3438 | } | ||
3439 | |||
3440 | ptr = kmem_alloc(len, KM_SLEEP); | ||
3441 | memcpy(ptr, dp, len); | ||
3442 | in_f = (xfs_inode_log_format_t *)ptr; | ||
3443 | |||
3444 | /* take the tail entry */ | ||
3445 | item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list); | ||
3446 | if (item->ri_total != 0 && | ||
3447 | item->ri_total == item->ri_cnt) { | ||
3448 | /* tail item is in use, get a new one */ | ||
3449 | xlog_recover_add_item(&trans->r_itemq); | ||
3450 | item = list_entry(trans->r_itemq.prev, | ||
3451 | xlog_recover_item_t, ri_list); | ||
3452 | } | ||
3453 | |||
3454 | if (item->ri_total == 0) { /* first region to be added */ | ||
3455 | if (in_f->ilf_size == 0 || | ||
3456 | in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) { | ||
3457 | xfs_warn(log->l_mp, | ||
3458 | "bad number of regions (%d) in inode log format", | ||
3459 | in_f->ilf_size); | ||
3460 | ASSERT(0); | ||
3461 | kmem_free(ptr); | ||
3462 | return -EIO; | ||
3463 | } | ||
3464 | |||
3465 | item->ri_total = in_f->ilf_size; | ||
3466 | item->ri_buf = | ||
3467 | kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t), | ||
3468 | KM_SLEEP); | ||
3469 | } | ||
3470 | ASSERT(item->ri_total > item->ri_cnt); | ||
3471 | /* Description region is ri_buf[0] */ | ||
3472 | item->ri_buf[item->ri_cnt].i_addr = ptr; | ||
3473 | item->ri_buf[item->ri_cnt].i_len = len; | ||
3474 | item->ri_cnt++; | ||
3475 | trace_xfs_log_recover_item_add(log, trans, item, 0); | ||
3543 | return 0; | 3476 | return 0; |
3544 | } | 3477 | } |
3545 | 3478 | ||
3546 | /* | 3479 | /* |
3480 | * Free up any resources allocated by the transaction | ||
3481 | * | ||
3482 | * Remember that EFIs, EFDs, and IUNLINKs are handled later. | ||
3483 | */ | ||
3484 | STATIC void | ||
3485 | xlog_recover_free_trans( | ||
3486 | struct xlog_recover *trans) | ||
3487 | { | ||
3488 | xlog_recover_item_t *item, *n; | ||
3489 | int i; | ||
3490 | |||
3491 | list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) { | ||
3492 | /* Free the regions in the item. */ | ||
3493 | list_del(&item->ri_list); | ||
3494 | for (i = 0; i < item->ri_cnt; i++) | ||
3495 | kmem_free(item->ri_buf[i].i_addr); | ||
3496 | /* Free the item itself */ | ||
3497 | kmem_free(item->ri_buf); | ||
3498 | kmem_free(item); | ||
3499 | } | ||
3500 | /* Free the transaction recover structure */ | ||
3501 | kmem_free(trans); | ||
3502 | } | ||
3503 | |||
3504 | /* | ||
3505 | * On error or completion, trans is freed. | ||
3506 | */ | ||
3507 | STATIC int | ||
3508 | xlog_recovery_process_trans( | ||
3509 | struct xlog *log, | ||
3510 | struct xlog_recover *trans, | ||
3511 | xfs_caddr_t dp, | ||
3512 | unsigned int len, | ||
3513 | unsigned int flags, | ||
3514 | int pass) | ||
3515 | { | ||
3516 | int error = 0; | ||
3517 | bool freeit = false; | ||
3518 | |||
3519 | /* mask off ophdr transaction container flags */ | ||
3520 | flags &= ~XLOG_END_TRANS; | ||
3521 | if (flags & XLOG_WAS_CONT_TRANS) | ||
3522 | flags &= ~XLOG_CONTINUE_TRANS; | ||
3523 | |||
3524 | /* | ||
3525 | * Callees must not free the trans structure. We'll decide if we need to | ||
3526 | * free it or not based on the operation being done and it's result. | ||
3527 | */ | ||
3528 | switch (flags) { | ||
3529 | /* expected flag values */ | ||
3530 | case 0: | ||
3531 | case XLOG_CONTINUE_TRANS: | ||
3532 | error = xlog_recover_add_to_trans(log, trans, dp, len); | ||
3533 | break; | ||
3534 | case XLOG_WAS_CONT_TRANS: | ||
3535 | error = xlog_recover_add_to_cont_trans(log, trans, dp, len); | ||
3536 | break; | ||
3537 | case XLOG_COMMIT_TRANS: | ||
3538 | error = xlog_recover_commit_trans(log, trans, pass); | ||
3539 | /* success or fail, we are now done with this transaction. */ | ||
3540 | freeit = true; | ||
3541 | break; | ||
3542 | |||
3543 | /* unexpected flag values */ | ||
3544 | case XLOG_UNMOUNT_TRANS: | ||
3545 | /* just skip trans */ | ||
3546 | xfs_warn(log->l_mp, "%s: Unmount LR", __func__); | ||
3547 | freeit = true; | ||
3548 | break; | ||
3549 | case XLOG_START_TRANS: | ||
3550 | default: | ||
3551 | xfs_warn(log->l_mp, "%s: bad flag 0x%x", __func__, flags); | ||
3552 | ASSERT(0); | ||
3553 | error = -EIO; | ||
3554 | break; | ||
3555 | } | ||
3556 | if (error || freeit) | ||
3557 | xlog_recover_free_trans(trans); | ||
3558 | return error; | ||
3559 | } | ||
3560 | |||
3561 | /* | ||
3562 | * Lookup the transaction recovery structure associated with the ID in the | ||
3563 | * current ophdr. If the transaction doesn't exist and the start flag is set in | ||
3564 | * the ophdr, then allocate a new transaction for future ID matches to find. | ||
3565 | * Either way, return what we found during the lookup - an existing transaction | ||
3566 | * or nothing. | ||
3567 | */ | ||
3568 | STATIC struct xlog_recover * | ||
3569 | xlog_recover_ophdr_to_trans( | ||
3570 | struct hlist_head rhash[], | ||
3571 | struct xlog_rec_header *rhead, | ||
3572 | struct xlog_op_header *ohead) | ||
3573 | { | ||
3574 | struct xlog_recover *trans; | ||
3575 | xlog_tid_t tid; | ||
3576 | struct hlist_head *rhp; | ||
3577 | |||
3578 | tid = be32_to_cpu(ohead->oh_tid); | ||
3579 | rhp = &rhash[XLOG_RHASH(tid)]; | ||
3580 | hlist_for_each_entry(trans, rhp, r_list) { | ||
3581 | if (trans->r_log_tid == tid) | ||
3582 | return trans; | ||
3583 | } | ||
3584 | |||
3585 | /* | ||
3586 | * skip over non-start transaction headers - we could be | ||
3587 | * processing slack space before the next transaction starts | ||
3588 | */ | ||
3589 | if (!(ohead->oh_flags & XLOG_START_TRANS)) | ||
3590 | return NULL; | ||
3591 | |||
3592 | ASSERT(be32_to_cpu(ohead->oh_len) == 0); | ||
3593 | |||
3594 | /* | ||
3595 | * This is a new transaction so allocate a new recovery container to | ||
3596 | * hold the recovery ops that will follow. | ||
3597 | */ | ||
3598 | trans = kmem_zalloc(sizeof(struct xlog_recover), KM_SLEEP); | ||
3599 | trans->r_log_tid = tid; | ||
3600 | trans->r_lsn = be64_to_cpu(rhead->h_lsn); | ||
3601 | INIT_LIST_HEAD(&trans->r_itemq); | ||
3602 | INIT_HLIST_NODE(&trans->r_list); | ||
3603 | hlist_add_head(&trans->r_list, rhp); | ||
3604 | |||
3605 | /* | ||
3606 | * Nothing more to do for this ophdr. Items to be added to this new | ||
3607 | * transaction will be in subsequent ophdr containers. | ||
3608 | */ | ||
3609 | return NULL; | ||
3610 | } | ||
3611 | |||
3612 | STATIC int | ||
3613 | xlog_recover_process_ophdr( | ||
3614 | struct xlog *log, | ||
3615 | struct hlist_head rhash[], | ||
3616 | struct xlog_rec_header *rhead, | ||
3617 | struct xlog_op_header *ohead, | ||
3618 | xfs_caddr_t dp, | ||
3619 | xfs_caddr_t end, | ||
3620 | int pass) | ||
3621 | { | ||
3622 | struct xlog_recover *trans; | ||
3623 | unsigned int len; | ||
3624 | |||
3625 | /* Do we understand who wrote this op? */ | ||
3626 | if (ohead->oh_clientid != XFS_TRANSACTION && | ||
3627 | ohead->oh_clientid != XFS_LOG) { | ||
3628 | xfs_warn(log->l_mp, "%s: bad clientid 0x%x", | ||
3629 | __func__, ohead->oh_clientid); | ||
3630 | ASSERT(0); | ||
3631 | return -EIO; | ||
3632 | } | ||
3633 | |||
3634 | /* | ||
3635 | * Check the ophdr contains all the data it is supposed to contain. | ||
3636 | */ | ||
3637 | len = be32_to_cpu(ohead->oh_len); | ||
3638 | if (dp + len > end) { | ||
3639 | xfs_warn(log->l_mp, "%s: bad length 0x%x", __func__, len); | ||
3640 | WARN_ON(1); | ||
3641 | return -EIO; | ||
3642 | } | ||
3643 | |||
3644 | trans = xlog_recover_ophdr_to_trans(rhash, rhead, ohead); | ||
3645 | if (!trans) { | ||
3646 | /* nothing to do, so skip over this ophdr */ | ||
3647 | return 0; | ||
3648 | } | ||
3649 | |||
3650 | return xlog_recovery_process_trans(log, trans, dp, len, | ||
3651 | ohead->oh_flags, pass); | ||
3652 | } | ||
3653 | |||
3654 | /* | ||
3547 | * There are two valid states of the r_state field. 0 indicates that the | 3655 | * There are two valid states of the r_state field. 0 indicates that the |
3548 | * transaction structure is in a normal state. We have either seen the | 3656 | * transaction structure is in a normal state. We have either seen the |
3549 | * start of the transaction or the last operation we added was not a partial | 3657 | * start of the transaction or the last operation we added was not a partial |
@@ -3560,86 +3668,30 @@ xlog_recover_process_data( | |||
3560 | xfs_caddr_t dp, | 3668 | xfs_caddr_t dp, |
3561 | int pass) | 3669 | int pass) |
3562 | { | 3670 | { |
3563 | xfs_caddr_t lp; | 3671 | struct xlog_op_header *ohead; |
3672 | xfs_caddr_t end; | ||
3564 | int num_logops; | 3673 | int num_logops; |
3565 | xlog_op_header_t *ohead; | ||
3566 | xlog_recover_t *trans; | ||
3567 | xlog_tid_t tid; | ||
3568 | int error; | 3674 | int error; |
3569 | unsigned long hash; | ||
3570 | uint flags; | ||
3571 | 3675 | ||
3572 | lp = dp + be32_to_cpu(rhead->h_len); | 3676 | end = dp + be32_to_cpu(rhead->h_len); |
3573 | num_logops = be32_to_cpu(rhead->h_num_logops); | 3677 | num_logops = be32_to_cpu(rhead->h_num_logops); |
3574 | 3678 | ||
3575 | /* check the log format matches our own - else we can't recover */ | 3679 | /* check the log format matches our own - else we can't recover */ |
3576 | if (xlog_header_check_recover(log->l_mp, rhead)) | 3680 | if (xlog_header_check_recover(log->l_mp, rhead)) |
3577 | return -EIO; | 3681 | return -EIO; |
3578 | 3682 | ||
3579 | while ((dp < lp) && num_logops) { | 3683 | while ((dp < end) && num_logops) { |
3580 | ASSERT(dp + sizeof(xlog_op_header_t) <= lp); | 3684 | |
3581 | ohead = (xlog_op_header_t *)dp; | 3685 | ohead = (struct xlog_op_header *)dp; |
3582 | dp += sizeof(xlog_op_header_t); | 3686 | dp += sizeof(*ohead); |
3583 | if (ohead->oh_clientid != XFS_TRANSACTION && | 3687 | ASSERT(dp <= end); |
3584 | ohead->oh_clientid != XFS_LOG) { | 3688 | |
3585 | xfs_warn(log->l_mp, "%s: bad clientid 0x%x", | 3689 | /* errors will abort recovery */ |
3586 | __func__, ohead->oh_clientid); | 3690 | error = xlog_recover_process_ophdr(log, rhash, rhead, ohead, |
3587 | ASSERT(0); | 3691 | dp, end, pass); |
3588 | return -EIO; | 3692 | if (error) |
3589 | } | 3693 | return error; |
3590 | tid = be32_to_cpu(ohead->oh_tid); | 3694 | |
3591 | hash = XLOG_RHASH(tid); | ||
3592 | trans = xlog_recover_find_tid(&rhash[hash], tid); | ||
3593 | if (trans == NULL) { /* not found; add new tid */ | ||
3594 | if (ohead->oh_flags & XLOG_START_TRANS) | ||
3595 | xlog_recover_new_tid(&rhash[hash], tid, | ||
3596 | be64_to_cpu(rhead->h_lsn)); | ||
3597 | } else { | ||
3598 | if (dp + be32_to_cpu(ohead->oh_len) > lp) { | ||
3599 | xfs_warn(log->l_mp, "%s: bad length 0x%x", | ||
3600 | __func__, be32_to_cpu(ohead->oh_len)); | ||
3601 | WARN_ON(1); | ||
3602 | return -EIO; | ||
3603 | } | ||
3604 | flags = ohead->oh_flags & ~XLOG_END_TRANS; | ||
3605 | if (flags & XLOG_WAS_CONT_TRANS) | ||
3606 | flags &= ~XLOG_CONTINUE_TRANS; | ||
3607 | switch (flags) { | ||
3608 | case XLOG_COMMIT_TRANS: | ||
3609 | error = xlog_recover_commit_trans(log, | ||
3610 | trans, pass); | ||
3611 | break; | ||
3612 | case XLOG_UNMOUNT_TRANS: | ||
3613 | error = xlog_recover_unmount_trans(log); | ||
3614 | break; | ||
3615 | case XLOG_WAS_CONT_TRANS: | ||
3616 | error = xlog_recover_add_to_cont_trans(log, | ||
3617 | trans, dp, | ||
3618 | be32_to_cpu(ohead->oh_len)); | ||
3619 | break; | ||
3620 | case XLOG_START_TRANS: | ||
3621 | xfs_warn(log->l_mp, "%s: bad transaction", | ||
3622 | __func__); | ||
3623 | ASSERT(0); | ||
3624 | error = -EIO; | ||
3625 | break; | ||
3626 | case 0: | ||
3627 | case XLOG_CONTINUE_TRANS: | ||
3628 | error = xlog_recover_add_to_trans(log, trans, | ||
3629 | dp, be32_to_cpu(ohead->oh_len)); | ||
3630 | break; | ||
3631 | default: | ||
3632 | xfs_warn(log->l_mp, "%s: bad flag 0x%x", | ||
3633 | __func__, flags); | ||
3634 | ASSERT(0); | ||
3635 | error = -EIO; | ||
3636 | break; | ||
3637 | } | ||
3638 | if (error) { | ||
3639 | xlog_recover_free_trans(trans); | ||
3640 | return error; | ||
3641 | } | ||
3642 | } | ||
3643 | dp += be32_to_cpu(ohead->oh_len); | 3695 | dp += be32_to_cpu(ohead->oh_len); |
3644 | num_logops--; | 3696 | num_logops--; |
3645 | } | 3697 | } |