diff options
author | Artem Bityutskiy <Artem.Bityutskiy@nokia.com> | 2011-05-16 06:44:48 -0400 |
---|---|---|
committer | Artem Bityutskiy <Artem.Bityutskiy@nokia.com> | 2011-05-16 08:48:48 -0400 |
commit | 7c47bfd0dbb20e5d7fa4e37cfd76bb73d39b32b4 (patch) | |
tree | 277a373b3262229f31d3a2733a55cacaac5e5ef7 /fs/ubifs/recovery.c | |
parent | 617992069513c1e789c707c4d75ff03bf7dd0fb0 (diff) |
UBIFS: always cleanup the recovered LEB
Now when we call 'ubifs_recover_leb()' only for LEBs which are potentially
corrupted (i.e., only for last buds, not for all of them), we can cleanup every
LEB, not only those where we find corruption. The reason - unstable bits. Even
though the LEB may look good now, it might contain unstable bits which may hit
us a bit later.
Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
Diffstat (limited to 'fs/ubifs/recovery.c')
-rw-r--r-- | fs/ubifs/recovery.c | 29 |
1 files changed, 9 insertions, 20 deletions
diff --git a/fs/ubifs/recovery.c b/fs/ubifs/recovery.c index 7d922033d666..4d10b6e36ec4 100644 --- a/fs/ubifs/recovery.c +++ b/fs/ubifs/recovery.c | |||
@@ -609,7 +609,7 @@ static int drop_incomplete_group(struct ubifs_scan_leb *sleb, int *offs) | |||
609 | struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum, | 609 | struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum, |
610 | int offs, void *sbuf, int grouped) | 610 | int offs, void *sbuf, int grouped) |
611 | { | 611 | { |
612 | int ret = 0, err, len = c->leb_size - offs, need_clean = 0; | 612 | int ret = 0, err, len = c->leb_size - offs; |
613 | int start = offs; | 613 | int start = offs; |
614 | struct ubifs_scan_leb *sleb; | 614 | struct ubifs_scan_leb *sleb; |
615 | void *buf = sbuf + offs; | 615 | void *buf = sbuf + offs; |
@@ -620,9 +620,6 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum, | |||
620 | if (IS_ERR(sleb)) | 620 | if (IS_ERR(sleb)) |
621 | return sleb; | 621 | return sleb; |
622 | 622 | ||
623 | if (sleb->ecc) | ||
624 | need_clean = 1; | ||
625 | |||
626 | while (len >= 8) { | 623 | while (len >= 8) { |
627 | dbg_scan("look at LEB %d:%d (%d bytes left)", | 624 | dbg_scan("look at LEB %d:%d (%d bytes left)", |
628 | lnum, offs, len); | 625 | lnum, offs, len); |
@@ -665,21 +662,18 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum, | |||
665 | } | 662 | } |
666 | 663 | ||
667 | if (ret == SCANNED_GARBAGE || ret == SCANNED_A_BAD_PAD_NODE) { | 664 | if (ret == SCANNED_GARBAGE || ret == SCANNED_A_BAD_PAD_NODE) { |
668 | if (is_last_write(c, buf, offs)) { | 665 | if (is_last_write(c, buf, offs)) |
669 | clean_buf(c, &buf, lnum, &offs, &len); | 666 | clean_buf(c, &buf, lnum, &offs, &len); |
670 | need_clean = 1; | 667 | else |
671 | } else | ||
672 | goto corrupted_rescan; | 668 | goto corrupted_rescan; |
673 | } else if (ret == SCANNED_A_CORRUPT_NODE) { | 669 | } else if (ret == SCANNED_A_CORRUPT_NODE) { |
674 | if (no_more_nodes(c, buf, len, lnum, offs)) { | 670 | if (no_more_nodes(c, buf, len, lnum, offs)) |
675 | clean_buf(c, &buf, lnum, &offs, &len); | 671 | clean_buf(c, &buf, lnum, &offs, &len); |
676 | need_clean = 1; | 672 | else |
677 | } else | ||
678 | goto corrupted_rescan; | 673 | goto corrupted_rescan; |
679 | } else if (!is_empty(buf, len)) { | 674 | } else if (!is_empty(buf, len)) { |
680 | if (is_last_write(c, buf, offs)) { | 675 | if (is_last_write(c, buf, offs)) { |
681 | clean_buf(c, &buf, lnum, &offs, &len); | 676 | clean_buf(c, &buf, lnum, &offs, &len); |
682 | need_clean = 1; | ||
683 | } else { | 677 | } else { |
684 | int corruption = first_non_ff(buf, len); | 678 | int corruption = first_non_ff(buf, len); |
685 | 679 | ||
@@ -701,21 +695,16 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum, | |||
701 | buf = sbuf + offs; | 695 | buf = sbuf + offs; |
702 | len = c->leb_size - offs; | 696 | len = c->leb_size - offs; |
703 | clean_buf(c, &buf, lnum, &offs, &len); | 697 | clean_buf(c, &buf, lnum, &offs, &len); |
704 | need_clean = 1; | ||
705 | } | 698 | } |
706 | 699 | ||
707 | if (offs % c->min_io_size) { | 700 | if (offs % c->min_io_size) |
708 | clean_buf(c, &buf, lnum, &offs, &len); | 701 | clean_buf(c, &buf, lnum, &offs, &len); |
709 | need_clean = 1; | ||
710 | } | ||
711 | 702 | ||
712 | ubifs_end_scan(c, sleb, lnum, offs); | 703 | ubifs_end_scan(c, sleb, lnum, offs); |
713 | 704 | ||
714 | if (need_clean) { | 705 | err = fix_unclean_leb(c, sleb, start); |
715 | err = fix_unclean_leb(c, sleb, start); | 706 | if (err) |
716 | if (err) | 707 | goto error; |
717 | goto error; | ||
718 | } | ||
719 | 708 | ||
720 | return sleb; | 709 | return sleb; |
721 | 710 | ||