summaryrefslogtreecommitdiffstats
path: root/drivers/mtd
diff options
context:
space:
mode:
authorRichard Weinberger <richard@nod.at>2016-06-23 13:30:38 -0400
committerRichard Weinberger <richard@nod.at>2016-07-29 17:32:01 -0400
commit4946784bd3924b1374f05eebff2fd68660bae866 (patch)
tree309fe1d3d97aeabbc6df3b8b40b89b3be20f839d /drivers/mtd
parentbc743f34dfa011e62edd0ea4ae8455be06c083b5 (diff)
ubi: Make volume resize power cut aware
When the volume resize operation shrinks a volume, LEBs will be unmapped. Since unmapping will not erase these LEBs immediately we have to wait for that operation to finish. Otherwise in case of a power cut right after writing the new volume table the UBI attach process can find more LEBs than the volume table knows. This will render the UBI image unattachable. Fix this issue by waiting for erase to complete and write the new volume table afterward. Cc: <stable@vger.kernel.org> Reported-by: Boris Brezillon <boris.brezillon@free-electrons.com> Reviewed-by: Boris Brezillon <boris.brezillon@free-electrons.com> Signed-off-by: Richard Weinberger <richard@nod.at>
Diffstat (limited to 'drivers/mtd')
-rw-r--r--drivers/mtd/ubi/vmt.c25
1 files changed, 18 insertions, 7 deletions
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 10059dfdc1b6..0138f526474a 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -488,13 +488,6 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
488 spin_unlock(&ubi->volumes_lock); 488 spin_unlock(&ubi->volumes_lock);
489 } 489 }
490 490
491 /* Change volume table record */
492 vtbl_rec = ubi->vtbl[vol_id];
493 vtbl_rec.reserved_pebs = cpu_to_be32(reserved_pebs);
494 err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
495 if (err)
496 goto out_acc;
497
498 if (pebs < 0) { 491 if (pebs < 0) {
499 for (i = 0; i < -pebs; i++) { 492 for (i = 0; i < -pebs; i++) {
500 err = ubi_eba_unmap_leb(ubi, vol, reserved_pebs + i); 493 err = ubi_eba_unmap_leb(ubi, vol, reserved_pebs + i);
@@ -512,6 +505,24 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
512 spin_unlock(&ubi->volumes_lock); 505 spin_unlock(&ubi->volumes_lock);
513 } 506 }
514 507
508 /*
509 * When we shrink a volume we have to flush all pending (erase) work.
510 * Otherwise it can happen that upon next attach UBI finds a LEB with
511 * lnum > highest_lnum and refuses to attach.
512 */
513 if (pebs < 0) {
514 err = ubi_wl_flush(ubi, vol_id, UBI_ALL);
515 if (err)
516 goto out_acc;
517 }
518
519 /* Change volume table record */
520 vtbl_rec = ubi->vtbl[vol_id];
521 vtbl_rec.reserved_pebs = cpu_to_be32(reserved_pebs);
522 err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
523 if (err)
524 goto out_acc;
525
515 vol->reserved_pebs = reserved_pebs; 526 vol->reserved_pebs = reserved_pebs;
516 if (vol->vol_type == UBI_DYNAMIC_VOLUME) { 527 if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
517 vol->used_ebs = reserved_pebs; 528 vol->used_ebs = reserved_pebs;