diff options
author | Richard Weinberger <richard@nod.at> | 2014-10-07 15:39:20 -0400 |
---|---|---|
committer | Richard Weinberger <richard@nod.at> | 2015-03-26 17:45:59 -0400 |
commit | d141a8ef21ab496ab6f8d188dfe7ca33e16fe798 (patch) | |
tree | 4b7da9b11fd006e78eedcdb45f1f1ae1f98bf7d0 | |
parent | a83832a7c8d023f446ec865926190951bd18a4b1 (diff) |
UBI: Fastmap: Remove eba_orphans logic
This logic is in vain as we treat protected PEBs also as used, so this
case must not happen.
If a PEB is found which is in the EBA table but not known as used
has to be issued as fatal error.
Signed-off-by: Richard Weinberger <richard@nod.at>
-rw-r--r-- | drivers/mtd/ubi/fastmap.c | 91 |
1 files changed, 8 insertions, 83 deletions
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c index 9858bfb22314..8b95c48a002d 100644 --- a/drivers/mtd/ubi/fastmap.c +++ b/drivers/mtd/ubi/fastmap.c | |||
@@ -376,7 +376,6 @@ static void unmap_peb(struct ubi_attach_info *ai, int pnum) | |||
376 | * @pebs: an array of all PEB numbers in the to be scanned pool | 376 | * @pebs: an array of all PEB numbers in the to be scanned pool |
377 | * @pool_size: size of the pool (number of entries in @pebs) | 377 | * @pool_size: size of the pool (number of entries in @pebs) |
378 | * @max_sqnum: pointer to the maximal sequence number | 378 | * @max_sqnum: pointer to the maximal sequence number |
379 | * @eba_orphans: list of PEBs which need to be scanned | ||
380 | * @free: list of PEBs which are most likely free (and go into @ai->free) | 379 | * @free: list of PEBs which are most likely free (and go into @ai->free) |
381 | * | 380 | * |
382 | * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned. | 381 | * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned. |
@@ -384,12 +383,12 @@ static void unmap_peb(struct ubi_attach_info *ai, int pnum) | |||
384 | */ | 383 | */ |
385 | static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai, | 384 | static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai, |
386 | int *pebs, int pool_size, unsigned long long *max_sqnum, | 385 | int *pebs, int pool_size, unsigned long long *max_sqnum, |
387 | struct list_head *eba_orphans, struct list_head *free) | 386 | struct list_head *free) |
388 | { | 387 | { |
389 | struct ubi_vid_hdr *vh; | 388 | struct ubi_vid_hdr *vh; |
390 | struct ubi_ec_hdr *ech; | 389 | struct ubi_ec_hdr *ech; |
391 | struct ubi_ainf_peb *new_aeb, *tmp_aeb; | 390 | struct ubi_ainf_peb *new_aeb; |
392 | int i, pnum, err, found_orphan, ret = 0; | 391 | int i, pnum, err, ret = 0; |
393 | 392 | ||
394 | ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); | 393 | ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); |
395 | if (!ech) | 394 | if (!ech) |
@@ -457,18 +456,6 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai, | |||
457 | if (err == UBI_IO_BITFLIPS) | 456 | if (err == UBI_IO_BITFLIPS) |
458 | scrub = 1; | 457 | scrub = 1; |
459 | 458 | ||
460 | found_orphan = 0; | ||
461 | list_for_each_entry(tmp_aeb, eba_orphans, u.list) { | ||
462 | if (tmp_aeb->pnum == pnum) { | ||
463 | found_orphan = 1; | ||
464 | break; | ||
465 | } | ||
466 | } | ||
467 | if (found_orphan) { | ||
468 | list_del(&tmp_aeb->u.list); | ||
469 | kmem_cache_free(ai->aeb_slab_cache, tmp_aeb); | ||
470 | } | ||
471 | |||
472 | new_aeb = kmem_cache_alloc(ai->aeb_slab_cache, | 459 | new_aeb = kmem_cache_alloc(ai->aeb_slab_cache, |
473 | GFP_KERNEL); | 460 | GFP_KERNEL); |
474 | if (!new_aeb) { | 461 | if (!new_aeb) { |
@@ -543,10 +530,9 @@ static int ubi_attach_fastmap(struct ubi_device *ubi, | |||
543 | struct ubi_attach_info *ai, | 530 | struct ubi_attach_info *ai, |
544 | struct ubi_fastmap_layout *fm) | 531 | struct ubi_fastmap_layout *fm) |
545 | { | 532 | { |
546 | struct list_head used, eba_orphans, free; | 533 | struct list_head used, free; |
547 | struct ubi_ainf_volume *av; | 534 | struct ubi_ainf_volume *av; |
548 | struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb; | 535 | struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb; |
549 | struct ubi_ec_hdr *ech; | ||
550 | struct ubi_fm_sb *fmsb; | 536 | struct ubi_fm_sb *fmsb; |
551 | struct ubi_fm_hdr *fmhdr; | 537 | struct ubi_fm_hdr *fmhdr; |
552 | struct ubi_fm_scan_pool *fmpl1, *fmpl2; | 538 | struct ubi_fm_scan_pool *fmpl1, *fmpl2; |
@@ -560,7 +546,6 @@ static int ubi_attach_fastmap(struct ubi_device *ubi, | |||
560 | 546 | ||
561 | INIT_LIST_HEAD(&used); | 547 | INIT_LIST_HEAD(&used); |
562 | INIT_LIST_HEAD(&free); | 548 | INIT_LIST_HEAD(&free); |
563 | INIT_LIST_HEAD(&eba_orphans); | ||
564 | ai->min_ec = UBI_MAX_ERASECOUNTER; | 549 | ai->min_ec = UBI_MAX_ERASECOUNTER; |
565 | 550 | ||
566 | fmsb = (struct ubi_fm_sb *)(fm_raw); | 551 | fmsb = (struct ubi_fm_sb *)(fm_raw); |
@@ -728,28 +713,9 @@ static int ubi_attach_fastmap(struct ubi_device *ubi, | |||
728 | } | 713 | } |
729 | } | 714 | } |
730 | 715 | ||
731 | /* This can happen if a PEB is already in an EBA known | ||
732 | * by this fastmap but the PEB itself is not in the used | ||
733 | * list. | ||
734 | * In this case the PEB can be within the fastmap pool | ||
735 | * or while writing the fastmap it was in the protection | ||
736 | * queue. | ||
737 | */ | ||
738 | if (!aeb) { | 716 | if (!aeb) { |
739 | aeb = kmem_cache_alloc(ai->aeb_slab_cache, | 717 | ubi_err(ubi, "PEB %i is in EBA but not in used list", pnum); |
740 | GFP_KERNEL); | 718 | goto fail_bad; |
741 | if (!aeb) { | ||
742 | ret = -ENOMEM; | ||
743 | |||
744 | goto fail; | ||
745 | } | ||
746 | |||
747 | aeb->lnum = j; | ||
748 | aeb->pnum = be32_to_cpu(fm_eba->pnum[j]); | ||
749 | aeb->ec = -1; | ||
750 | aeb->scrub = aeb->copy_flag = aeb->sqnum = 0; | ||
751 | list_add_tail(&aeb->u.list, &eba_orphans); | ||
752 | continue; | ||
753 | } | 719 | } |
754 | 720 | ||
755 | aeb->lnum = j; | 721 | aeb->lnum = j; |
@@ -762,49 +728,13 @@ static int ubi_attach_fastmap(struct ubi_device *ubi, | |||
762 | dbg_bld("inserting PEB:%i (LEB %i) to vol %i", | 728 | dbg_bld("inserting PEB:%i (LEB %i) to vol %i", |
763 | aeb->pnum, aeb->lnum, av->vol_id); | 729 | aeb->pnum, aeb->lnum, av->vol_id); |
764 | } | 730 | } |
765 | |||
766 | ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); | ||
767 | if (!ech) { | ||
768 | ret = -ENOMEM; | ||
769 | goto fail; | ||
770 | } | ||
771 | |||
772 | list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &eba_orphans, | ||
773 | u.list) { | ||
774 | int err; | ||
775 | |||
776 | if (ubi_io_is_bad(ubi, tmp_aeb->pnum)) { | ||
777 | ubi_err(ubi, "bad PEB in fastmap EBA orphan list"); | ||
778 | ret = UBI_BAD_FASTMAP; | ||
779 | kfree(ech); | ||
780 | goto fail; | ||
781 | } | ||
782 | |||
783 | err = ubi_io_read_ec_hdr(ubi, tmp_aeb->pnum, ech, 0); | ||
784 | if (err && err != UBI_IO_BITFLIPS) { | ||
785 | ubi_err(ubi, "unable to read EC header! PEB:%i err:%i", | ||
786 | tmp_aeb->pnum, err); | ||
787 | ret = err > 0 ? UBI_BAD_FASTMAP : err; | ||
788 | kfree(ech); | ||
789 | |||
790 | goto fail; | ||
791 | } else if (err == UBI_IO_BITFLIPS) | ||
792 | tmp_aeb->scrub = 1; | ||
793 | |||
794 | tmp_aeb->ec = be64_to_cpu(ech->ec); | ||
795 | assign_aeb_to_av(ai, tmp_aeb, av); | ||
796 | } | ||
797 | |||
798 | kfree(ech); | ||
799 | } | 731 | } |
800 | 732 | ||
801 | ret = scan_pool(ubi, ai, fmpl1->pebs, pool_size, &max_sqnum, | 733 | ret = scan_pool(ubi, ai, fmpl1->pebs, pool_size, &max_sqnum, &free); |
802 | &eba_orphans, &free); | ||
803 | if (ret) | 734 | if (ret) |
804 | goto fail; | 735 | goto fail; |
805 | 736 | ||
806 | ret = scan_pool(ubi, ai, fmpl2->pebs, wl_pool_size, &max_sqnum, | 737 | ret = scan_pool(ubi, ai, fmpl2->pebs, wl_pool_size, &max_sqnum, &free); |
807 | &eba_orphans, &free); | ||
808 | if (ret) | 738 | if (ret) |
809 | goto fail; | 739 | goto fail; |
810 | 740 | ||
@@ -817,7 +747,6 @@ static int ubi_attach_fastmap(struct ubi_device *ubi, | |||
817 | list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) | 747 | list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) |
818 | list_move_tail(&tmp_aeb->u.list, &ai->erase); | 748 | list_move_tail(&tmp_aeb->u.list, &ai->erase); |
819 | 749 | ||
820 | ubi_assert(list_empty(&eba_orphans)); | ||
821 | ubi_assert(list_empty(&free)); | 750 | ubi_assert(list_empty(&free)); |
822 | 751 | ||
823 | /* | 752 | /* |
@@ -839,10 +768,6 @@ fail: | |||
839 | list_del(&tmp_aeb->u.list); | 768 | list_del(&tmp_aeb->u.list); |
840 | kmem_cache_free(ai->aeb_slab_cache, tmp_aeb); | 769 | kmem_cache_free(ai->aeb_slab_cache, tmp_aeb); |
841 | } | 770 | } |
842 | list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &eba_orphans, u.list) { | ||
843 | list_del(&tmp_aeb->u.list); | ||
844 | kmem_cache_free(ai->aeb_slab_cache, tmp_aeb); | ||
845 | } | ||
846 | list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) { | 771 | list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) { |
847 | list_del(&tmp_aeb->u.list); | 772 | list_del(&tmp_aeb->u.list); |
848 | kmem_cache_free(ai->aeb_slab_cache, tmp_aeb); | 773 | kmem_cache_free(ai->aeb_slab_cache, tmp_aeb); |