diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-06-03 18:59:32 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-06-03 18:59:32 -0400 |
commit | 3af91a1256b628d55913324d27fe747c69566749 (patch) | |
tree | cdbb84d4cce611ba722699d989bf1fecb73a69f1 /fs | |
parent | 9a44fde3435e0c6012cbc9122497bbbd3338434a (diff) | |
parent | 098011940a2549ae7182db4bf101c3e3d2b4e6df (diff) |
Merge branch 'linux-next' of git://git.infradead.org/ubifs-2.6
* 'linux-next' of git://git.infradead.org/ubifs-2.6:
UBIFS: fix-up free space earlier
UBIFS: intialize LPT earlier
UBIFS: assert no fixup when writing a node
UBIFS: fix clean znode counter corruption in error cases
UBIFS: fix memory leak on error path
UBIFS: fix shrinker object count reports
UBIFS: fix recovery broken by the previous recovery fix
UBIFS: amend ubifs_recover_leb interface
UBIFS: introduce a "grouped" journal head flag
UBIFS: supress false error messages
Diffstat (limited to 'fs')
-rw-r--r-- | fs/ubifs/io.c | 2 | ||||
-rw-r--r-- | fs/ubifs/journal.c | 1 | ||||
-rw-r--r-- | fs/ubifs/orphan.c | 2 | ||||
-rw-r--r-- | fs/ubifs/recovery.c | 164 | ||||
-rw-r--r-- | fs/ubifs/replay.c | 3 | ||||
-rw-r--r-- | fs/ubifs/shrinker.c | 6 | ||||
-rw-r--r-- | fs/ubifs/super.c | 42 | ||||
-rw-r--r-- | fs/ubifs/tnc.c | 9 | ||||
-rw-r--r-- | fs/ubifs/ubifs.h | 4 |
9 files changed, 136 insertions, 97 deletions
diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c index 166951e0dcd3..3be645e012c9 100644 --- a/fs/ubifs/io.c +++ b/fs/ubifs/io.c | |||
@@ -581,6 +581,7 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len) | |||
581 | ubifs_assert(wbuf->size % c->min_io_size == 0); | 581 | ubifs_assert(wbuf->size % c->min_io_size == 0); |
582 | ubifs_assert(mutex_is_locked(&wbuf->io_mutex)); | 582 | ubifs_assert(mutex_is_locked(&wbuf->io_mutex)); |
583 | ubifs_assert(!c->ro_media && !c->ro_mount); | 583 | ubifs_assert(!c->ro_media && !c->ro_mount); |
584 | ubifs_assert(!c->space_fixup); | ||
584 | if (c->leb_size - wbuf->offs >= c->max_write_size) | 585 | if (c->leb_size - wbuf->offs >= c->max_write_size) |
585 | ubifs_assert(!((wbuf->offs + wbuf->size) % c->max_write_size)); | 586 | ubifs_assert(!((wbuf->offs + wbuf->size) % c->max_write_size)); |
586 | 587 | ||
@@ -759,6 +760,7 @@ int ubifs_write_node(struct ubifs_info *c, void *buf, int len, int lnum, | |||
759 | ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0); | 760 | ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0); |
760 | ubifs_assert(offs % c->min_io_size == 0 && offs < c->leb_size); | 761 | ubifs_assert(offs % c->min_io_size == 0 && offs < c->leb_size); |
761 | ubifs_assert(!c->ro_media && !c->ro_mount); | 762 | ubifs_assert(!c->ro_media && !c->ro_mount); |
763 | ubifs_assert(!c->space_fixup); | ||
762 | 764 | ||
763 | if (c->ro_error) | 765 | if (c->ro_error) |
764 | return -EROFS; | 766 | return -EROFS; |
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c index 34b1679e6e3a..cef0460f4c54 100644 --- a/fs/ubifs/journal.c +++ b/fs/ubifs/journal.c | |||
@@ -669,6 +669,7 @@ out_free: | |||
669 | 669 | ||
670 | out_release: | 670 | out_release: |
671 | release_head(c, BASEHD); | 671 | release_head(c, BASEHD); |
672 | kfree(dent); | ||
672 | out_ro: | 673 | out_ro: |
673 | ubifs_ro_mode(c, err); | 674 | ubifs_ro_mode(c, err); |
674 | if (last_reference) | 675 | if (last_reference) |
diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c index bd644bf587a8..a5422fffbd69 100644 --- a/fs/ubifs/orphan.c +++ b/fs/ubifs/orphan.c | |||
@@ -674,7 +674,7 @@ static int kill_orphans(struct ubifs_info *c) | |||
674 | if (IS_ERR(sleb)) { | 674 | if (IS_ERR(sleb)) { |
675 | if (PTR_ERR(sleb) == -EUCLEAN) | 675 | if (PTR_ERR(sleb) == -EUCLEAN) |
676 | sleb = ubifs_recover_leb(c, lnum, 0, | 676 | sleb = ubifs_recover_leb(c, lnum, 0, |
677 | c->sbuf, 0); | 677 | c->sbuf, -1); |
678 | if (IS_ERR(sleb)) { | 678 | if (IS_ERR(sleb)) { |
679 | err = PTR_ERR(sleb); | 679 | err = PTR_ERR(sleb); |
680 | break; | 680 | break; |
diff --git a/fs/ubifs/recovery.c b/fs/ubifs/recovery.c index 731d9e2e7b50..783d8e0beb76 100644 --- a/fs/ubifs/recovery.c +++ b/fs/ubifs/recovery.c | |||
@@ -564,19 +564,15 @@ static int fix_unclean_leb(struct ubifs_info *c, struct ubifs_scan_leb *sleb, | |||
564 | } | 564 | } |
565 | 565 | ||
566 | /** | 566 | /** |
567 | * drop_last_node - drop the last node or group of nodes. | 567 | * drop_last_group - drop the last group of nodes. |
568 | * @sleb: scanned LEB information | 568 | * @sleb: scanned LEB information |
569 | * @offs: offset of dropped nodes is returned here | 569 | * @offs: offset of dropped nodes is returned here |
570 | * @grouped: non-zero if whole group of nodes have to be dropped | ||
571 | * | 570 | * |
572 | * This is a helper function for 'ubifs_recover_leb()' which drops the last | 571 | * This is a helper function for 'ubifs_recover_leb()' which drops the last |
573 | * node of the scanned LEB or the last group of nodes if @grouped is not zero. | 572 | * group of nodes of the scanned LEB. |
574 | * This function returns %1 if a node was dropped and %0 otherwise. | ||
575 | */ | 573 | */ |
576 | static int drop_last_node(struct ubifs_scan_leb *sleb, int *offs, int grouped) | 574 | static void drop_last_group(struct ubifs_scan_leb *sleb, int *offs) |
577 | { | 575 | { |
578 | int dropped = 0; | ||
579 | |||
580 | while (!list_empty(&sleb->nodes)) { | 576 | while (!list_empty(&sleb->nodes)) { |
581 | struct ubifs_scan_node *snod; | 577 | struct ubifs_scan_node *snod; |
582 | struct ubifs_ch *ch; | 578 | struct ubifs_ch *ch; |
@@ -585,17 +581,40 @@ static int drop_last_node(struct ubifs_scan_leb *sleb, int *offs, int grouped) | |||
585 | list); | 581 | list); |
586 | ch = snod->node; | 582 | ch = snod->node; |
587 | if (ch->group_type != UBIFS_IN_NODE_GROUP) | 583 | if (ch->group_type != UBIFS_IN_NODE_GROUP) |
588 | return dropped; | 584 | break; |
589 | dbg_rcvry("dropping node at %d:%d", sleb->lnum, snod->offs); | 585 | |
586 | dbg_rcvry("dropping grouped node at %d:%d", | ||
587 | sleb->lnum, snod->offs); | ||
588 | *offs = snod->offs; | ||
589 | list_del(&snod->list); | ||
590 | kfree(snod); | ||
591 | sleb->nodes_cnt -= 1; | ||
592 | } | ||
593 | } | ||
594 | |||
595 | /** | ||
596 | * drop_last_node - drop the last node. | ||
597 | * @sleb: scanned LEB information | ||
598 | * @offs: offset of dropped nodes is returned here | ||
599 | * @grouped: non-zero if whole group of nodes have to be dropped | ||
600 | * | ||
601 | * This is a helper function for 'ubifs_recover_leb()' which drops the last | ||
602 | * node of the scanned LEB. | ||
603 | */ | ||
604 | static void drop_last_node(struct ubifs_scan_leb *sleb, int *offs) | ||
605 | { | ||
606 | struct ubifs_scan_node *snod; | ||
607 | |||
608 | if (!list_empty(&sleb->nodes)) { | ||
609 | snod = list_entry(sleb->nodes.prev, struct ubifs_scan_node, | ||
610 | list); | ||
611 | |||
612 | dbg_rcvry("dropping last node at %d:%d", sleb->lnum, snod->offs); | ||
590 | *offs = snod->offs; | 613 | *offs = snod->offs; |
591 | list_del(&snod->list); | 614 | list_del(&snod->list); |
592 | kfree(snod); | 615 | kfree(snod); |
593 | sleb->nodes_cnt -= 1; | 616 | sleb->nodes_cnt -= 1; |
594 | dropped = 1; | ||
595 | if (!grouped) | ||
596 | break; | ||
597 | } | 617 | } |
598 | return dropped; | ||
599 | } | 618 | } |
600 | 619 | ||
601 | /** | 620 | /** |
@@ -604,7 +623,8 @@ static int drop_last_node(struct ubifs_scan_leb *sleb, int *offs, int grouped) | |||
604 | * @lnum: LEB number | 623 | * @lnum: LEB number |
605 | * @offs: offset | 624 | * @offs: offset |
606 | * @sbuf: LEB-sized buffer to use | 625 | * @sbuf: LEB-sized buffer to use |
607 | * @grouped: nodes may be grouped for recovery | 626 | * @jhead: journal head number this LEB belongs to (%-1 if the LEB does not |
627 | * belong to any journal head) | ||
608 | * | 628 | * |
609 | * This function does a scan of a LEB, but caters for errors that might have | 629 | * This function does a scan of a LEB, but caters for errors that might have |
610 | * been caused by the unclean unmount from which we are attempting to recover. | 630 | * been caused by the unclean unmount from which we are attempting to recover. |
@@ -612,13 +632,14 @@ static int drop_last_node(struct ubifs_scan_leb *sleb, int *offs, int grouped) | |||
612 | * found, and a negative error code in case of failure. | 632 | * found, and a negative error code in case of failure. |
613 | */ | 633 | */ |
614 | struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum, | 634 | struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum, |
615 | int offs, void *sbuf, int grouped) | 635 | int offs, void *sbuf, int jhead) |
616 | { | 636 | { |
617 | int ret = 0, err, len = c->leb_size - offs, start = offs, min_io_unit; | 637 | int ret = 0, err, len = c->leb_size - offs, start = offs, min_io_unit; |
638 | int grouped = jhead == -1 ? 0 : c->jheads[jhead].grouped; | ||
618 | struct ubifs_scan_leb *sleb; | 639 | struct ubifs_scan_leb *sleb; |
619 | void *buf = sbuf + offs; | 640 | void *buf = sbuf + offs; |
620 | 641 | ||
621 | dbg_rcvry("%d:%d", lnum, offs); | 642 | dbg_rcvry("%d:%d, jhead %d, grouped %d", lnum, offs, jhead, grouped); |
622 | 643 | ||
623 | sleb = ubifs_start_scan(c, lnum, offs, sbuf); | 644 | sleb = ubifs_start_scan(c, lnum, offs, sbuf); |
624 | if (IS_ERR(sleb)) | 645 | if (IS_ERR(sleb)) |
@@ -635,7 +656,7 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum, | |||
635 | * Scan quietly until there is an error from which we cannot | 656 | * Scan quietly until there is an error from which we cannot |
636 | * recover | 657 | * recover |
637 | */ | 658 | */ |
638 | ret = ubifs_scan_a_node(c, buf, len, lnum, offs, 0); | 659 | ret = ubifs_scan_a_node(c, buf, len, lnum, offs, 1); |
639 | if (ret == SCANNED_A_NODE) { | 660 | if (ret == SCANNED_A_NODE) { |
640 | /* A valid node, and not a padding node */ | 661 | /* A valid node, and not a padding node */ |
641 | struct ubifs_ch *ch = buf; | 662 | struct ubifs_ch *ch = buf; |
@@ -695,59 +716,62 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum, | |||
695 | * If nodes are grouped, always drop the incomplete group at | 716 | * If nodes are grouped, always drop the incomplete group at |
696 | * the end. | 717 | * the end. |
697 | */ | 718 | */ |
698 | drop_last_node(sleb, &offs, 1); | 719 | drop_last_group(sleb, &offs); |
699 | 720 | ||
700 | /* | 721 | if (jhead == GCHD) { |
701 | * While we are in the middle of the same min. I/O unit keep dropping | 722 | /* |
702 | * nodes. So basically, what we want is to make sure that the last min. | 723 | * If this LEB belongs to the GC head then while we are in the |
703 | * I/O unit where we saw the corruption is dropped completely with all | 724 | * middle of the same min. I/O unit keep dropping nodes. So |
704 | * the uncorrupted node which may possibly sit there. | 725 | * basically, what we want is to make sure that the last min. |
705 | * | 726 | * I/O unit where we saw the corruption is dropped completely |
706 | * In other words, let's name the min. I/O unit where the corruption | 727 | * with all the uncorrupted nodes which may possibly sit there. |
707 | * starts B, and the previous min. I/O unit A. The below code tries to | 728 | * |
708 | * deal with a situation when half of B contains valid nodes or the end | 729 | * In other words, let's name the min. I/O unit where the |
709 | * of a valid node, and the second half of B contains corrupted data or | 730 | * corruption starts B, and the previous min. I/O unit A. The |
710 | * garbage. This means that UBIFS had been writing to B just before the | 731 | * below code tries to deal with a situation when half of B |
711 | * power cut happened. I do not know how realistic is this scenario | 732 | * contains valid nodes or the end of a valid node, and the |
712 | * that half of the min. I/O unit had been written successfully and the | 733 | * second half of B contains corrupted data or garbage. This |
713 | * other half not, but this is possible in our 'failure mode emulation' | 734 | * means that UBIFS had been writing to B just before the power |
714 | * infrastructure at least. | 735 | * cut happened. I do not know how realistic is this scenario |
715 | * | 736 | * that half of the min. I/O unit had been written successfully |
716 | * So what is the problem, why we need to drop those nodes? Whey can't | 737 | * and the other half not, but this is possible in our 'failure |
717 | * we just clean-up the second half of B by putting a padding node | 738 | * mode emulation' infrastructure at least. |
718 | * there? We can, and this works fine with one exception which was | 739 | * |
719 | * reproduced with power cut emulation testing and happens extremely | 740 | * So what is the problem, why we need to drop those nodes? Why |
720 | * rarely. The description follows, but it is worth noting that that is | 741 | * can't we just clean-up the second half of B by putting a |
721 | * only about the GC head, so we could do this trick only if the bud | 742 | * padding node there? We can, and this works fine with one |
722 | * belongs to the GC head, but it does not seem to be worth an | 743 | * exception which was reproduced with power cut emulation |
723 | * additional "if" statement. | 744 | * testing and happens extremely rarely. |
724 | * | 745 | * |
725 | * So, imagine the file-system is full, we run GC which is moving valid | 746 | * Imagine the file-system is full, we run GC which starts |
726 | * nodes from LEB X to LEB Y (obviously, LEB Y is the current GC head | 747 | * moving valid nodes from LEB X to LEB Y (obviously, LEB Y is |
727 | * LEB). The @c->gc_lnum is -1, which means that GC will retain LEB X | 748 | * the current GC head LEB). The @c->gc_lnum is -1, which means |
728 | * and will try to continue. Imagine that LEB X is currently the | 749 | * that GC will retain LEB X and will try to continue. Imagine |
729 | * dirtiest LEB, and the amount of used space in LEB Y is exactly the | 750 | * that LEB X is currently the dirtiest LEB, and the amount of |
730 | * same as amount of free space in LEB X. | 751 | * used space in LEB Y is exactly the same as amount of free |
731 | * | 752 | * space in LEB X. |
732 | * And a power cut happens when nodes are moved from LEB X to LEB Y. We | 753 | * |
733 | * are here trying to recover LEB Y which is the GC head LEB. We find | 754 | * And a power cut happens when nodes are moved from LEB X to |
734 | * the min. I/O unit B as described above. Then we clean-up LEB Y by | 755 | * LEB Y. We are here trying to recover LEB Y which is the GC |
735 | * padding min. I/O unit. And later 'ubifs_rcvry_gc_commit()' function | 756 | * head LEB. We find the min. I/O unit B as described above. |
736 | * fails, because it cannot find a dirty LEB which could be GC'd into | 757 | * Then we clean-up LEB Y by padding min. I/O unit. And later |
737 | * LEB Y! Even LEB X does not match because the amount of valid nodes | 758 | * 'ubifs_rcvry_gc_commit()' function fails, because it cannot |
738 | * there does not fit the free space in LEB Y any more! And this is | 759 | * find a dirty LEB which could be GC'd into LEB Y! Even LEB X |
739 | * because of the padding node which we added to LEB Y. The | 760 | * does not match because the amount of valid nodes there does |
740 | * user-visible effect of this which I once observed and analysed is | 761 | * not fit the free space in LEB Y any more! And this is |
741 | * that we cannot mount the file-system with -ENOSPC error. | 762 | * because of the padding node which we added to LEB Y. The |
742 | * | 763 | * user-visible effect of this which I once observed and |
743 | * So obviously, to make sure that situation does not happen we should | 764 | * analysed is that we cannot mount the file-system with |
744 | * free min. I/O unit B in LEB Y completely and the last used min. I/O | 765 | * -ENOSPC error. |
745 | * unit in LEB Y should be A. This is basically what the below code | 766 | * |
746 | * tries to do. | 767 | * So obviously, to make sure that situation does not happen we |
747 | */ | 768 | * should free min. I/O unit B in LEB Y completely and the last |
748 | while (min_io_unit == round_down(offs, c->min_io_size) && | 769 | * used min. I/O unit in LEB Y should be A. This is basically |
749 | min_io_unit != offs && | 770 | * what the below code tries to do. |
750 | drop_last_node(sleb, &offs, grouped)); | 771 | */ |
772 | while (offs > min_io_unit) | ||
773 | drop_last_node(sleb, &offs); | ||
774 | } | ||
751 | 775 | ||
752 | buf = sbuf + offs; | 776 | buf = sbuf + offs; |
753 | len = c->leb_size - offs; | 777 | len = c->leb_size - offs; |
@@ -881,7 +905,7 @@ struct ubifs_scan_leb *ubifs_recover_log_leb(struct ubifs_info *c, int lnum, | |||
881 | } | 905 | } |
882 | ubifs_scan_destroy(sleb); | 906 | ubifs_scan_destroy(sleb); |
883 | } | 907 | } |
884 | return ubifs_recover_leb(c, lnum, offs, sbuf, 0); | 908 | return ubifs_recover_leb(c, lnum, offs, sbuf, -1); |
885 | } | 909 | } |
886 | 910 | ||
887 | /** | 911 | /** |
diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c index 6617280d1679..5e97161ce4d3 100644 --- a/fs/ubifs/replay.c +++ b/fs/ubifs/replay.c | |||
@@ -557,8 +557,7 @@ static int replay_bud(struct ubifs_info *c, struct bud_entry *b) | |||
557 | * these LEBs could possibly be written to at the power cut | 557 | * these LEBs could possibly be written to at the power cut |
558 | * time. | 558 | * time. |
559 | */ | 559 | */ |
560 | sleb = ubifs_recover_leb(c, lnum, offs, c->sbuf, | 560 | sleb = ubifs_recover_leb(c, lnum, offs, c->sbuf, b->bud->jhead); |
561 | b->bud->jhead != GCHD); | ||
562 | else | 561 | else |
563 | sleb = ubifs_scan(c, lnum, offs, c->sbuf, 0); | 562 | sleb = ubifs_scan(c, lnum, offs, c->sbuf, 0); |
564 | if (IS_ERR(sleb)) | 563 | if (IS_ERR(sleb)) |
diff --git a/fs/ubifs/shrinker.c b/fs/ubifs/shrinker.c index ca953a945029..9e1d05666fed 100644 --- a/fs/ubifs/shrinker.c +++ b/fs/ubifs/shrinker.c | |||
@@ -284,7 +284,11 @@ int ubifs_shrinker(struct shrinker *shrink, struct shrink_control *sc) | |||
284 | long clean_zn_cnt = atomic_long_read(&ubifs_clean_zn_cnt); | 284 | long clean_zn_cnt = atomic_long_read(&ubifs_clean_zn_cnt); |
285 | 285 | ||
286 | if (nr == 0) | 286 | if (nr == 0) |
287 | return clean_zn_cnt; | 287 | /* |
288 | * Due to the way UBIFS updates the clean znode counter it may | ||
289 | * temporarily be negative. | ||
290 | */ | ||
291 | return clean_zn_cnt >= 0 ? clean_zn_cnt : 1; | ||
288 | 292 | ||
289 | if (!clean_zn_cnt) { | 293 | if (!clean_zn_cnt) { |
290 | /* | 294 | /* |
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 1ab0d22e4c94..b5aeb5a8ebed 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c | |||
@@ -811,15 +811,18 @@ static int alloc_wbufs(struct ubifs_info *c) | |||
811 | 811 | ||
812 | c->jheads[i].wbuf.sync_callback = &bud_wbuf_callback; | 812 | c->jheads[i].wbuf.sync_callback = &bud_wbuf_callback; |
813 | c->jheads[i].wbuf.jhead = i; | 813 | c->jheads[i].wbuf.jhead = i; |
814 | c->jheads[i].grouped = 1; | ||
814 | } | 815 | } |
815 | 816 | ||
816 | c->jheads[BASEHD].wbuf.dtype = UBI_SHORTTERM; | 817 | c->jheads[BASEHD].wbuf.dtype = UBI_SHORTTERM; |
817 | /* | 818 | /* |
818 | * Garbage Collector head likely contains long-term data and | 819 | * Garbage Collector head likely contains long-term data and |
819 | * does not need to be synchronized by timer. | 820 | * does not need to be synchronized by timer. Also GC head nodes are |
821 | * not grouped. | ||
820 | */ | 822 | */ |
821 | c->jheads[GCHD].wbuf.dtype = UBI_LONGTERM; | 823 | c->jheads[GCHD].wbuf.dtype = UBI_LONGTERM; |
822 | c->jheads[GCHD].wbuf.no_timer = 1; | 824 | c->jheads[GCHD].wbuf.no_timer = 1; |
825 | c->jheads[GCHD].grouped = 0; | ||
823 | 826 | ||
824 | return 0; | 827 | return 0; |
825 | } | 828 | } |
@@ -1284,12 +1287,25 @@ static int mount_ubifs(struct ubifs_info *c) | |||
1284 | if ((c->mst_node->flags & cpu_to_le32(UBIFS_MST_DIRTY)) != 0) { | 1287 | if ((c->mst_node->flags & cpu_to_le32(UBIFS_MST_DIRTY)) != 0) { |
1285 | ubifs_msg("recovery needed"); | 1288 | ubifs_msg("recovery needed"); |
1286 | c->need_recovery = 1; | 1289 | c->need_recovery = 1; |
1287 | if (!c->ro_mount) { | 1290 | } |
1288 | err = ubifs_recover_inl_heads(c, c->sbuf); | 1291 | |
1289 | if (err) | 1292 | if (c->need_recovery && !c->ro_mount) { |
1290 | goto out_master; | 1293 | err = ubifs_recover_inl_heads(c, c->sbuf); |
1291 | } | 1294 | if (err) |
1292 | } else if (!c->ro_mount) { | 1295 | goto out_master; |
1296 | } | ||
1297 | |||
1298 | err = ubifs_lpt_init(c, 1, !c->ro_mount); | ||
1299 | if (err) | ||
1300 | goto out_master; | ||
1301 | |||
1302 | if (!c->ro_mount && c->space_fixup) { | ||
1303 | err = ubifs_fixup_free_space(c); | ||
1304 | if (err) | ||
1305 | goto out_master; | ||
1306 | } | ||
1307 | |||
1308 | if (!c->ro_mount) { | ||
1293 | /* | 1309 | /* |
1294 | * Set the "dirty" flag so that if we reboot uncleanly we | 1310 | * Set the "dirty" flag so that if we reboot uncleanly we |
1295 | * will notice this immediately on the next mount. | 1311 | * will notice this immediately on the next mount. |
@@ -1297,13 +1313,9 @@ static int mount_ubifs(struct ubifs_info *c) | |||
1297 | c->mst_node->flags |= cpu_to_le32(UBIFS_MST_DIRTY); | 1313 | c->mst_node->flags |= cpu_to_le32(UBIFS_MST_DIRTY); |
1298 | err = ubifs_write_master(c); | 1314 | err = ubifs_write_master(c); |
1299 | if (err) | 1315 | if (err) |
1300 | goto out_master; | 1316 | goto out_lpt; |
1301 | } | 1317 | } |
1302 | 1318 | ||
1303 | err = ubifs_lpt_init(c, 1, !c->ro_mount); | ||
1304 | if (err) | ||
1305 | goto out_lpt; | ||
1306 | |||
1307 | err = dbg_check_idx_size(c, c->bi.old_idx_sz); | 1319 | err = dbg_check_idx_size(c, c->bi.old_idx_sz); |
1308 | if (err) | 1320 | if (err) |
1309 | goto out_lpt; | 1321 | goto out_lpt; |
@@ -1396,12 +1408,6 @@ static int mount_ubifs(struct ubifs_info *c) | |||
1396 | } else | 1408 | } else |
1397 | ubifs_assert(c->lst.taken_empty_lebs > 0); | 1409 | ubifs_assert(c->lst.taken_empty_lebs > 0); |
1398 | 1410 | ||
1399 | if (!c->ro_mount && c->space_fixup) { | ||
1400 | err = ubifs_fixup_free_space(c); | ||
1401 | if (err) | ||
1402 | goto out_infos; | ||
1403 | } | ||
1404 | |||
1405 | err = dbg_check_filesystem(c); | 1411 | err = dbg_check_filesystem(c); |
1406 | if (err) | 1412 | if (err) |
1407 | goto out_infos; | 1413 | goto out_infos; |
diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c index 8119b1fd8d94..91b4213dde84 100644 --- a/fs/ubifs/tnc.c +++ b/fs/ubifs/tnc.c | |||
@@ -2876,12 +2876,13 @@ static void tnc_destroy_cnext(struct ubifs_info *c) | |||
2876 | */ | 2876 | */ |
2877 | void ubifs_tnc_close(struct ubifs_info *c) | 2877 | void ubifs_tnc_close(struct ubifs_info *c) |
2878 | { | 2878 | { |
2879 | long clean_freed; | ||
2880 | |||
2881 | tnc_destroy_cnext(c); | 2879 | tnc_destroy_cnext(c); |
2882 | if (c->zroot.znode) { | 2880 | if (c->zroot.znode) { |
2883 | clean_freed = ubifs_destroy_tnc_subtree(c->zroot.znode); | 2881 | long n; |
2884 | atomic_long_sub(clean_freed, &ubifs_clean_zn_cnt); | 2882 | |
2883 | ubifs_destroy_tnc_subtree(c->zroot.znode); | ||
2884 | n = atomic_long_read(&c->clean_zn_cnt); | ||
2885 | atomic_long_sub(n, &ubifs_clean_zn_cnt); | ||
2885 | } | 2886 | } |
2886 | kfree(c->gap_lebs); | 2887 | kfree(c->gap_lebs); |
2887 | kfree(c->ilebs); | 2888 | kfree(c->ilebs); |
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index a70d7b4ffb25..f79983d6f860 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h | |||
@@ -722,12 +722,14 @@ struct ubifs_bud { | |||
722 | * struct ubifs_jhead - journal head. | 722 | * struct ubifs_jhead - journal head. |
723 | * @wbuf: head's write-buffer | 723 | * @wbuf: head's write-buffer |
724 | * @buds_list: list of bud LEBs belonging to this journal head | 724 | * @buds_list: list of bud LEBs belonging to this journal head |
725 | * @grouped: non-zero if UBIFS groups nodes when writing to this journal head | ||
725 | * | 726 | * |
726 | * Note, the @buds list is protected by the @c->buds_lock. | 727 | * Note, the @buds list is protected by the @c->buds_lock. |
727 | */ | 728 | */ |
728 | struct ubifs_jhead { | 729 | struct ubifs_jhead { |
729 | struct ubifs_wbuf wbuf; | 730 | struct ubifs_wbuf wbuf; |
730 | struct list_head buds_list; | 731 | struct list_head buds_list; |
732 | unsigned int grouped:1; | ||
731 | }; | 733 | }; |
732 | 734 | ||
733 | /** | 735 | /** |
@@ -1742,7 +1744,7 @@ struct inode *ubifs_iget(struct super_block *sb, unsigned long inum); | |||
1742 | int ubifs_recover_master_node(struct ubifs_info *c); | 1744 | int ubifs_recover_master_node(struct ubifs_info *c); |
1743 | int ubifs_write_rcvrd_mst_node(struct ubifs_info *c); | 1745 | int ubifs_write_rcvrd_mst_node(struct ubifs_info *c); |
1744 | struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum, | 1746 | struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum, |
1745 | int offs, void *sbuf, int grouped); | 1747 | int offs, void *sbuf, int jhead); |
1746 | struct ubifs_scan_leb *ubifs_recover_log_leb(struct ubifs_info *c, int lnum, | 1748 | struct ubifs_scan_leb *ubifs_recover_log_leb(struct ubifs_info *c, int lnum, |
1747 | int offs, void *sbuf); | 1749 | int offs, void *sbuf); |
1748 | int ubifs_recover_inl_heads(const struct ubifs_info *c, void *sbuf); | 1750 | int ubifs_recover_inl_heads(const struct ubifs_info *c, void *sbuf); |