diff options
Diffstat (limited to 'drivers/mtd/ubi/build.c')
-rw-r--r-- | drivers/mtd/ubi/build.c | 62 |
1 files changed, 19 insertions, 43 deletions
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index f49e49dc5928..65626c1c446d 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c | |||
@@ -672,33 +672,7 @@ static int io_init(struct ubi_device *ubi) | |||
672 | ubi->nor_flash = 1; | 672 | ubi->nor_flash = 1; |
673 | } | 673 | } |
674 | 674 | ||
675 | /* | 675 | ubi->min_io_size = ubi->mtd->writesize; |
676 | * Set UBI min. I/O size (@ubi->min_io_size). We use @mtd->writebufsize | ||
677 | * for these purposes, not @mtd->writesize. At the moment this does not | ||
678 | * matter for NAND, because currently @mtd->writebufsize is equivalent to | ||
679 | * @mtd->writesize for all NANDs. However, some CFI NOR flashes may | ||
680 | * have @mtd->writebufsize which is multiple of @mtd->writesize. | ||
681 | * | ||
682 | * The reason we use @mtd->writebufsize for @ubi->min_io_size is that | ||
683 | * UBI and UBIFS recovery algorithms rely on the fact that if there was | ||
684 | * an unclean power cut, then we can find offset of the last corrupted | ||
685 | * node, align the offset to @ubi->min_io_size, read the rest of the | ||
686 | * eraseblock starting from this offset, and check whether there are | ||
687 | * only 0xFF bytes. If yes, then we are probably dealing with a | ||
688 | * corruption caused by a power cut, if not, then this is probably some | ||
689 | * severe corruption. | ||
690 | * | ||
691 | * Thus, we have to use the maximum write unit size of the flash, which | ||
692 | * is @mtd->writebufsize, because @mtd->writesize is the minimum write | ||
693 | * size, not the maximum. | ||
694 | */ | ||
695 | if (ubi->mtd->type == MTD_NANDFLASH) | ||
696 | ubi_assert(ubi->mtd->writebufsize == ubi->mtd->writesize); | ||
697 | else if (ubi->mtd->type == MTD_NORFLASH) | ||
698 | ubi_assert(ubi->mtd->writebufsize % ubi->mtd->writesize == 0); | ||
699 | |||
700 | ubi->min_io_size = ubi->mtd->writebufsize; | ||
701 | |||
702 | ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft; | 676 | ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft; |
703 | 677 | ||
704 | /* | 678 | /* |
@@ -716,11 +690,25 @@ static int io_init(struct ubi_device *ubi) | |||
716 | ubi_assert(ubi->hdrs_min_io_size <= ubi->min_io_size); | 690 | ubi_assert(ubi->hdrs_min_io_size <= ubi->min_io_size); |
717 | ubi_assert(ubi->min_io_size % ubi->hdrs_min_io_size == 0); | 691 | ubi_assert(ubi->min_io_size % ubi->hdrs_min_io_size == 0); |
718 | 692 | ||
693 | ubi->max_write_size = ubi->mtd->writebufsize; | ||
694 | /* | ||
695 | * Maximum write size has to be greater or equivalent to min. I/O | ||
696 | * size, and be multiple of min. I/O size. | ||
697 | */ | ||
698 | if (ubi->max_write_size < ubi->min_io_size || | ||
699 | ubi->max_write_size % ubi->min_io_size || | ||
700 | !is_power_of_2(ubi->max_write_size)) { | ||
701 | ubi_err("bad write buffer size %d for %d min. I/O unit", | ||
702 | ubi->max_write_size, ubi->min_io_size); | ||
703 | return -EINVAL; | ||
704 | } | ||
705 | |||
719 | /* Calculate default aligned sizes of EC and VID headers */ | 706 | /* Calculate default aligned sizes of EC and VID headers */ |
720 | ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size); | 707 | ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size); |
721 | ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size); | 708 | ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size); |
722 | 709 | ||
723 | dbg_msg("min_io_size %d", ubi->min_io_size); | 710 | dbg_msg("min_io_size %d", ubi->min_io_size); |
711 | dbg_msg("max_write_size %d", ubi->max_write_size); | ||
724 | dbg_msg("hdrs_min_io_size %d", ubi->hdrs_min_io_size); | 712 | dbg_msg("hdrs_min_io_size %d", ubi->hdrs_min_io_size); |
725 | dbg_msg("ec_hdr_alsize %d", ubi->ec_hdr_alsize); | 713 | dbg_msg("ec_hdr_alsize %d", ubi->ec_hdr_alsize); |
726 | dbg_msg("vid_hdr_alsize %d", ubi->vid_hdr_alsize); | 714 | dbg_msg("vid_hdr_alsize %d", ubi->vid_hdr_alsize); |
@@ -737,7 +725,7 @@ static int io_init(struct ubi_device *ubi) | |||
737 | } | 725 | } |
738 | 726 | ||
739 | /* Similar for the data offset */ | 727 | /* Similar for the data offset */ |
740 | ubi->leb_start = ubi->vid_hdr_offset + UBI_EC_HDR_SIZE; | 728 | ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE; |
741 | ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size); | 729 | ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size); |
742 | 730 | ||
743 | dbg_msg("vid_hdr_offset %d", ubi->vid_hdr_offset); | 731 | dbg_msg("vid_hdr_offset %d", ubi->vid_hdr_offset); |
@@ -949,6 +937,8 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset) | |||
949 | spin_lock_init(&ubi->volumes_lock); | 937 | spin_lock_init(&ubi->volumes_lock); |
950 | 938 | ||
951 | ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num); | 939 | ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num); |
940 | dbg_msg("sizeof(struct ubi_scan_leb) %zu", sizeof(struct ubi_scan_leb)); | ||
941 | dbg_msg("sizeof(struct ubi_wl_entry) %zu", sizeof(struct ubi_wl_entry)); | ||
952 | 942 | ||
953 | err = io_init(ubi); | 943 | err = io_init(ubi); |
954 | if (err) | 944 | if (err) |
@@ -963,13 +953,6 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset) | |||
963 | if (!ubi->peb_buf2) | 953 | if (!ubi->peb_buf2) |
964 | goto out_free; | 954 | goto out_free; |
965 | 955 | ||
966 | #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID | ||
967 | mutex_init(&ubi->dbg_buf_mutex); | ||
968 | ubi->dbg_peb_buf = vmalloc(ubi->peb_size); | ||
969 | if (!ubi->dbg_peb_buf) | ||
970 | goto out_free; | ||
971 | #endif | ||
972 | |||
973 | err = attach_by_scanning(ubi); | 956 | err = attach_by_scanning(ubi); |
974 | if (err) { | 957 | if (err) { |
975 | dbg_err("failed to attach by scanning, error %d", err); | 958 | dbg_err("failed to attach by scanning, error %d", err); |
@@ -1017,8 +1000,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset) | |||
1017 | * checks @ubi->thread_enabled. Otherwise we may fail to wake it up. | 1000 | * checks @ubi->thread_enabled. Otherwise we may fail to wake it up. |
1018 | */ | 1001 | */ |
1019 | spin_lock(&ubi->wl_lock); | 1002 | spin_lock(&ubi->wl_lock); |
1020 | if (!DBG_DISABLE_BGT) | 1003 | ubi->thread_enabled = 1; |
1021 | ubi->thread_enabled = 1; | ||
1022 | wake_up_process(ubi->bgt_thread); | 1004 | wake_up_process(ubi->bgt_thread); |
1023 | spin_unlock(&ubi->wl_lock); | 1005 | spin_unlock(&ubi->wl_lock); |
1024 | 1006 | ||
@@ -1035,9 +1017,6 @@ out_detach: | |||
1035 | out_free: | 1017 | out_free: |
1036 | vfree(ubi->peb_buf1); | 1018 | vfree(ubi->peb_buf1); |
1037 | vfree(ubi->peb_buf2); | 1019 | vfree(ubi->peb_buf2); |
1038 | #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID | ||
1039 | vfree(ubi->dbg_peb_buf); | ||
1040 | #endif | ||
1041 | if (ref) | 1020 | if (ref) |
1042 | put_device(&ubi->dev); | 1021 | put_device(&ubi->dev); |
1043 | else | 1022 | else |
@@ -1108,9 +1087,6 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway) | |||
1108 | put_mtd_device(ubi->mtd); | 1087 | put_mtd_device(ubi->mtd); |
1109 | vfree(ubi->peb_buf1); | 1088 | vfree(ubi->peb_buf1); |
1110 | vfree(ubi->peb_buf2); | 1089 | vfree(ubi->peb_buf2); |
1111 | #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID | ||
1112 | vfree(ubi->dbg_peb_buf); | ||
1113 | #endif | ||
1114 | ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num); | 1090 | ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num); |
1115 | put_device(&ubi->dev); | 1091 | put_device(&ubi->dev); |
1116 | return 0; | 1092 | return 0; |