diff options
-rw-r--r-- | drivers/mtd/ubi/ubi.h | 3 | ||||
-rw-r--r-- | drivers/mtd/ubi/wl.c | 34 |
2 files changed, 25 insertions, 12 deletions
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h index 7a33470c0416..bc13d14e02c4 100644 --- a/drivers/mtd/ubi/ubi.h +++ b/drivers/mtd/ubi/ubi.h | |||
@@ -443,7 +443,8 @@ struct ubi_debug_info { | |||
443 | * @pq_head: protection queue head | 443 | * @pq_head: protection queue head |
444 | * @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from, | 444 | * @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from, |
445 | * @move_to, @move_to_put @erase_pending, @wl_scheduled, @works, | 445 | * @move_to, @move_to_put @erase_pending, @wl_scheduled, @works, |
446 | * @erroneous, @erroneous_peb_count, and @fm_work_scheduled fields | 446 | * @erroneous, @erroneous_peb_count, @fm_work_scheduled, @fm_pool, |
447 | * and @fm_wl_pool fields | ||
447 | * @move_mutex: serializes eraseblock moves | 448 | * @move_mutex: serializes eraseblock moves |
448 | * @work_sem: used to wait for all the scheduled works to finish and prevent | 449 | * @work_sem: used to wait for all the scheduled works to finish and prevent |
449 | * new works from being submitted | 450 | * new works from being submitted |
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index bf66890fefad..2539a12140e7 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c | |||
@@ -626,24 +626,36 @@ void ubi_refill_pools(struct ubi_device *ubi) | |||
626 | */ | 626 | */ |
627 | int ubi_wl_get_peb(struct ubi_device *ubi) | 627 | int ubi_wl_get_peb(struct ubi_device *ubi) |
628 | { | 628 | { |
629 | int ret; | 629 | int ret, retried = 0; |
630 | struct ubi_fm_pool *pool = &ubi->fm_pool; | 630 | struct ubi_fm_pool *pool = &ubi->fm_pool; |
631 | struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool; | 631 | struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool; |
632 | 632 | ||
633 | if (!pool->size || !wl_pool->size || pool->used == pool->size || | 633 | again: |
634 | wl_pool->used == wl_pool->size) | 634 | spin_lock(&ubi->wl_lock); |
635 | /* We check here also for the WL pool because at this point we can | ||
636 | * refill the WL pool synchronous. */ | ||
637 | if (pool->used == pool->size || wl_pool->used == wl_pool->size) { | ||
638 | spin_unlock(&ubi->wl_lock); | ||
635 | ubi_update_fastmap(ubi); | 639 | ubi_update_fastmap(ubi); |
636 | |||
637 | /* we got not a single free PEB */ | ||
638 | if (!pool->size) | ||
639 | ret = -ENOSPC; | ||
640 | else { | ||
641 | spin_lock(&ubi->wl_lock); | 640 | spin_lock(&ubi->wl_lock); |
642 | ret = pool->pebs[pool->used++]; | 641 | } |
643 | prot_queue_add(ubi, ubi->lookuptbl[ret]); | 642 | |
643 | if (pool->used == pool->size) { | ||
644 | spin_unlock(&ubi->wl_lock); | 644 | spin_unlock(&ubi->wl_lock); |
645 | if (retried) { | ||
646 | ubi_err(ubi, "Unable to get a free PEB from user WL pool"); | ||
647 | ret = -ENOSPC; | ||
648 | goto out; | ||
649 | } | ||
650 | retried = 1; | ||
651 | goto again; | ||
645 | } | 652 | } |
646 | 653 | ||
654 | ubi_assert(pool->used < pool->size); | ||
655 | ret = pool->pebs[pool->used++]; | ||
656 | prot_queue_add(ubi, ubi->lookuptbl[ret]); | ||
657 | spin_unlock(&ubi->wl_lock); | ||
658 | out: | ||
647 | return ret; | 659 | return ret; |
648 | } | 660 | } |
649 | 661 | ||
@@ -656,7 +668,7 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi) | |||
656 | struct ubi_fm_pool *pool = &ubi->fm_wl_pool; | 668 | struct ubi_fm_pool *pool = &ubi->fm_wl_pool; |
657 | int pnum; | 669 | int pnum; |
658 | 670 | ||
659 | if (pool->used == pool->size || !pool->size) { | 671 | if (pool->used == pool->size) { |
660 | /* We cannot update the fastmap here because this | 672 | /* We cannot update the fastmap here because this |
661 | * function is called in atomic context. | 673 | * function is called in atomic context. |
662 | * Let's fail here and refill/update it as soon as possible. */ | 674 | * Let's fail here and refill/update it as soon as possible. */ |