aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRichard Weinberger <richard@nod.at>2014-09-23 13:29:05 -0400
committerRichard Weinberger <richard@nod.at>2015-03-26 14:30:54 -0400
commit19371d73c9bd31a8e634ec5a80fc19fcd7714481 (patch)
treee04e6910c0c98500f74afb8b92cb060404518df1
parentab6de685221a95392e46d75d0651ac08b8a57689 (diff)
UBI: Fastmap: Ensure that only one fastmap work is scheduled
If the WL pool runs out of PEBs we schedule a fastmap write to refill it as soon as possible. Ensure that only one at a time is scheduled otherwise we might end in a fastmap write storm because writing the fastmap can schedule another write if bitflips are detected. Signed-off-by: Richard Weinberger <richard@nod.at> Reviewed-by: Tanya Brokhman <tlinder@codeaurora.org> Reviewed-by: Guido Martínez <guido@vanguardiasur.com.ar>
-rw-r--r--drivers/mtd/ubi/ubi.h4
-rw-r--r--drivers/mtd/ubi/wl.c8
2 files changed, 10 insertions, 2 deletions
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 2251a6c4c8fa..7a33470c0416 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -431,6 +431,7 @@ struct ubi_debug_info {
431 * @fm_size: fastmap size in bytes 431 * @fm_size: fastmap size in bytes
432 * @fm_sem: allows ubi_update_fastmap() to block EBA table changes 432 * @fm_sem: allows ubi_update_fastmap() to block EBA table changes
433 * @fm_work: fastmap work queue 433 * @fm_work: fastmap work queue
434 * @fm_work_scheduled: non-zero if fastmap work was scheduled
434 * 435 *
435 * @used: RB-tree of used physical eraseblocks 436 * @used: RB-tree of used physical eraseblocks
436 * @erroneous: RB-tree of erroneous used physical eraseblocks 437 * @erroneous: RB-tree of erroneous used physical eraseblocks
@@ -442,7 +443,7 @@ struct ubi_debug_info {
442 * @pq_head: protection queue head 443 * @pq_head: protection queue head
443 * @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from, 444 * @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from,
444 * @move_to, @move_to_put @erase_pending, @wl_scheduled, @works, 445 * @move_to, @move_to_put @erase_pending, @wl_scheduled, @works,
445 * @erroneous, and @erroneous_peb_count fields 446 * @erroneous, @erroneous_peb_count, and @fm_work_scheduled fields
446 * @move_mutex: serializes eraseblock moves 447 * @move_mutex: serializes eraseblock moves
447 * @work_sem: used to wait for all the scheduled works to finish and prevent 448 * @work_sem: used to wait for all the scheduled works to finish and prevent
448 * new works from being submitted 449 * new works from being submitted
@@ -537,6 +538,7 @@ struct ubi_device {
537 void *fm_buf; 538 void *fm_buf;
538 size_t fm_size; 539 size_t fm_size;
539 struct work_struct fm_work; 540 struct work_struct fm_work;
541 int fm_work_scheduled;
540 542
541 /* Wear-leveling sub-system's stuff */ 543 /* Wear-leveling sub-system's stuff */
542 struct rb_root used; 544 struct rb_root used;
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 0bd92d816391..ae174f4ed674 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -149,6 +149,9 @@ static void update_fastmap_work_fn(struct work_struct *wrk)
149{ 149{
150 struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work); 150 struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
151 ubi_update_fastmap(ubi); 151 ubi_update_fastmap(ubi);
152 spin_lock(&ubi->wl_lock);
153 ubi->fm_work_scheduled = 0;
154 spin_unlock(&ubi->wl_lock);
152} 155}
153 156
154/** 157/**
@@ -657,7 +660,10 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
657 /* We cannot update the fastmap here because this 660 /* We cannot update the fastmap here because this
658 * function is called in atomic context. 661 * function is called in atomic context.
659 * Let's fail here and refill/update it as soon as possible. */ 662 * Let's fail here and refill/update it as soon as possible. */
660 schedule_work(&ubi->fm_work); 663 if (!ubi->fm_work_scheduled) {
664 ubi->fm_work_scheduled = 1;
665 schedule_work(&ubi->fm_work);
666 }
661 return NULL; 667 return NULL;
662 } else { 668 } else {
663 pnum = pool->pebs[pool->used++]; 669 pnum = pool->pebs[pool->used++];