diff options
author | Richard Weinberger <richard@nod.at> | 2014-10-02 09:00:35 -0400 |
---|---|---|
committer | Richard Weinberger <richard@nod.at> | 2015-03-26 16:24:24 -0400 |
commit | d59f21bebe9d0fda34027ff1afda4f2b0d5f1869 (patch) | |
tree | 4825c6a28771521a6eeff4e926d9b5a39ed7011a /drivers/mtd | |
parent | 399a9feeac83c2f64138c438e41222a12dd71766 (diff) |
UBI: Fastmap: Fix races in ubi_wl_get_peb()
ubi_wl_get_peb() has two problems, it reads the pool
size and usage counters without any protection.
While reading one value would be perfectly fine it reads multiple
values and compares them. This is racy and can lead to incorrect
pool handling.
Furthermore ubi_update_fastmap() is called without wl_lock held,
before incrementing the used counter it needs to be checked again.
It could happen that another thread consumed all PEBs from the
pool and the counter goes beyond ->size.
Signed-off-by: Richard Weinberger <richard@nod.at>
Diffstat (limited to 'drivers/mtd')
-rw-r--r-- | drivers/mtd/ubi/ubi.h | 3 | ||||
-rw-r--r-- | drivers/mtd/ubi/wl.c | 34 |
2 files changed, 25 insertions, 12 deletions
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h index 7a33470c0416..bc13d14e02c4 100644 --- a/drivers/mtd/ubi/ubi.h +++ b/drivers/mtd/ubi/ubi.h | |||
@@ -443,7 +443,8 @@ struct ubi_debug_info { | |||
443 | * @pq_head: protection queue head | 443 | * @pq_head: protection queue head |
444 | * @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from, | 444 | * @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from, |
445 | * @move_to, @move_to_put @erase_pending, @wl_scheduled, @works, | 445 | * @move_to, @move_to_put @erase_pending, @wl_scheduled, @works, |
446 | * @erroneous, @erroneous_peb_count, and @fm_work_scheduled fields | 446 | * @erroneous, @erroneous_peb_count, @fm_work_scheduled, @fm_pool, |
447 | * and @fm_wl_pool fields | ||
447 | * @move_mutex: serializes eraseblock moves | 448 | * @move_mutex: serializes eraseblock moves |
448 | * @work_sem: used to wait for all the scheduled works to finish and prevent | 449 | * @work_sem: used to wait for all the scheduled works to finish and prevent |
449 | * new works from being submitted | 450 | * new works from being submitted |
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index bf66890fefad..2539a12140e7 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c | |||
@@ -626,24 +626,36 @@ void ubi_refill_pools(struct ubi_device *ubi) | |||
626 | */ | 626 | */ |
627 | int ubi_wl_get_peb(struct ubi_device *ubi) | 627 | int ubi_wl_get_peb(struct ubi_device *ubi) |
628 | { | 628 | { |
629 | int ret; | 629 | int ret, retried = 0; |
630 | struct ubi_fm_pool *pool = &ubi->fm_pool; | 630 | struct ubi_fm_pool *pool = &ubi->fm_pool; |
631 | struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool; | 631 | struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool; |
632 | 632 | ||
633 | if (!pool->size || !wl_pool->size || pool->used == pool->size || | 633 | again: |
634 | wl_pool->used == wl_pool->size) | 634 | spin_lock(&ubi->wl_lock); |
635 | /* We check here also for the WL pool because at this point we can | ||
636 | * refill the WL pool synchronous. */ | ||
637 | if (pool->used == pool->size || wl_pool->used == wl_pool->size) { | ||
638 | spin_unlock(&ubi->wl_lock); | ||
635 | ubi_update_fastmap(ubi); | 639 | ubi_update_fastmap(ubi); |
636 | |||
637 | /* we got not a single free PEB */ | ||
638 | if (!pool->size) | ||
639 | ret = -ENOSPC; | ||
640 | else { | ||
641 | spin_lock(&ubi->wl_lock); | 640 | spin_lock(&ubi->wl_lock); |
642 | ret = pool->pebs[pool->used++]; | 641 | } |
643 | prot_queue_add(ubi, ubi->lookuptbl[ret]); | 642 | |
643 | if (pool->used == pool->size) { | ||
644 | spin_unlock(&ubi->wl_lock); | 644 | spin_unlock(&ubi->wl_lock); |
645 | if (retried) { | ||
646 | ubi_err(ubi, "Unable to get a free PEB from user WL pool"); | ||
647 | ret = -ENOSPC; | ||
648 | goto out; | ||
649 | } | ||
650 | retried = 1; | ||
651 | goto again; | ||
645 | } | 652 | } |
646 | 653 | ||
654 | ubi_assert(pool->used < pool->size); | ||
655 | ret = pool->pebs[pool->used++]; | ||
656 | prot_queue_add(ubi, ubi->lookuptbl[ret]); | ||
657 | spin_unlock(&ubi->wl_lock); | ||
658 | out: | ||
647 | return ret; | 659 | return ret; |
648 | } | 660 | } |
649 | 661 | ||
@@ -656,7 +668,7 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi) | |||
656 | struct ubi_fm_pool *pool = &ubi->fm_wl_pool; | 668 | struct ubi_fm_pool *pool = &ubi->fm_wl_pool; |
657 | int pnum; | 669 | int pnum; |
658 | 670 | ||
659 | if (pool->used == pool->size || !pool->size) { | 671 | if (pool->used == pool->size) { |
660 | /* We cannot update the fastmap here because this | 672 | /* We cannot update the fastmap here because this |
661 | * function is called in atomic context. | 673 | * function is called in atomic context. |
662 | * Let's fail here and refill/update it as soon as possible. */ | 674 | * Let's fail here and refill/update it as soon as possible. */ |