aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/mtd/ubi/eba.c4
-rw-r--r--drivers/mtd/ubi/fastmap-wl.c6
-rw-r--r--drivers/mtd/ubi/fastmap.c14
-rw-r--r--drivers/mtd/ubi/wl.c20
4 files changed, 30 insertions, 14 deletions
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index ddf4e63eed76..95c4048a371e 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -1210,6 +1210,8 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1210 struct ubi_volume *vol; 1210 struct ubi_volume *vol;
1211 uint32_t crc; 1211 uint32_t crc;
1212 1212
1213 ubi_assert(rwsem_is_locked(&ubi->fm_eba_sem));
1214
1213 vol_id = be32_to_cpu(vid_hdr->vol_id); 1215 vol_id = be32_to_cpu(vid_hdr->vol_id);
1214 lnum = be32_to_cpu(vid_hdr->lnum); 1216 lnum = be32_to_cpu(vid_hdr->lnum);
1215 1217
@@ -1352,9 +1354,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1352 } 1354 }
1353 1355
1354 ubi_assert(vol->eba_tbl->entries[lnum].pnum == from); 1356 ubi_assert(vol->eba_tbl->entries[lnum].pnum == from);
1355 down_read(&ubi->fm_eba_sem);
1356 vol->eba_tbl->entries[lnum].pnum = to; 1357 vol->eba_tbl->entries[lnum].pnum = to;
1357 up_read(&ubi->fm_eba_sem);
1358 1358
1359out_unlock_buf: 1359out_unlock_buf:
1360 mutex_unlock(&ubi->buf_mutex); 1360 mutex_unlock(&ubi->buf_mutex);
diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c
index 30d3999dddba..4f0bd6b4422a 100644
--- a/drivers/mtd/ubi/fastmap-wl.c
+++ b/drivers/mtd/ubi/fastmap-wl.c
@@ -262,6 +262,8 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
262 struct ubi_fm_pool *pool = &ubi->fm_wl_pool; 262 struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
263 int pnum; 263 int pnum;
264 264
265 ubi_assert(rwsem_is_locked(&ubi->fm_eba_sem));
266
265 if (pool->used == pool->size) { 267 if (pool->used == pool->size) {
266 /* We cannot update the fastmap here because this 268 /* We cannot update the fastmap here because this
267 * function is called in atomic context. 269 * function is called in atomic context.
@@ -303,7 +305,7 @@ int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
303 305
304 wrk->anchor = 1; 306 wrk->anchor = 1;
305 wrk->func = &wear_leveling_worker; 307 wrk->func = &wear_leveling_worker;
306 schedule_ubi_work(ubi, wrk); 308 __schedule_ubi_work(ubi, wrk);
307 return 0; 309 return 0;
308} 310}
309 311
@@ -344,7 +346,7 @@ int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
344 spin_unlock(&ubi->wl_lock); 346 spin_unlock(&ubi->wl_lock);
345 347
346 vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID; 348 vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
347 return schedule_erase(ubi, e, vol_id, lnum, torture); 349 return schedule_erase(ubi, e, vol_id, lnum, torture, true);
348} 350}
349 351
350/** 352/**
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index 4adffb893376..8bd468332ec0 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -1492,22 +1492,30 @@ int ubi_update_fastmap(struct ubi_device *ubi)
1492 struct ubi_wl_entry *tmp_e; 1492 struct ubi_wl_entry *tmp_e;
1493 1493
1494 down_write(&ubi->fm_protect); 1494 down_write(&ubi->fm_protect);
1495 down_write(&ubi->work_sem);
1496 down_write(&ubi->fm_eba_sem);
1495 1497
1496 ubi_refill_pools(ubi); 1498 ubi_refill_pools(ubi);
1497 1499
1498 if (ubi->ro_mode || ubi->fm_disabled) { 1500 if (ubi->ro_mode || ubi->fm_disabled) {
1501 up_write(&ubi->fm_eba_sem);
1502 up_write(&ubi->work_sem);
1499 up_write(&ubi->fm_protect); 1503 up_write(&ubi->fm_protect);
1500 return 0; 1504 return 0;
1501 } 1505 }
1502 1506
1503 ret = ubi_ensure_anchor_pebs(ubi); 1507 ret = ubi_ensure_anchor_pebs(ubi);
1504 if (ret) { 1508 if (ret) {
1509 up_write(&ubi->fm_eba_sem);
1510 up_write(&ubi->work_sem);
1505 up_write(&ubi->fm_protect); 1511 up_write(&ubi->fm_protect);
1506 return ret; 1512 return ret;
1507 } 1513 }
1508 1514
1509 new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL); 1515 new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
1510 if (!new_fm) { 1516 if (!new_fm) {
1517 up_write(&ubi->fm_eba_sem);
1518 up_write(&ubi->work_sem);
1511 up_write(&ubi->fm_protect); 1519 up_write(&ubi->fm_protect);
1512 return -ENOMEM; 1520 return -ENOMEM;
1513 } 1521 }
@@ -1616,16 +1624,14 @@ int ubi_update_fastmap(struct ubi_device *ubi)
1616 new_fm->e[0] = tmp_e; 1624 new_fm->e[0] = tmp_e;
1617 } 1625 }
1618 1626
1619 down_write(&ubi->work_sem);
1620 down_write(&ubi->fm_eba_sem);
1621 ret = ubi_write_fastmap(ubi, new_fm); 1627 ret = ubi_write_fastmap(ubi, new_fm);
1622 up_write(&ubi->fm_eba_sem);
1623 up_write(&ubi->work_sem);
1624 1628
1625 if (ret) 1629 if (ret)
1626 goto err; 1630 goto err;
1627 1631
1628out_unlock: 1632out_unlock:
1633 up_write(&ubi->fm_eba_sem);
1634 up_write(&ubi->work_sem);
1629 up_write(&ubi->fm_protect); 1635 up_write(&ubi->fm_protect);
1630 kfree(old_fm); 1636 kfree(old_fm);
1631 return ret; 1637 return ret;
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 19206ad677b2..b5b8cd6f481c 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -580,7 +580,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
580 * failure. 580 * failure.
581 */ 581 */
582static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, 582static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
583 int vol_id, int lnum, int torture) 583 int vol_id, int lnum, int torture, bool nested)
584{ 584{
585 struct ubi_work *wl_wrk; 585 struct ubi_work *wl_wrk;
586 586
@@ -599,7 +599,10 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
599 wl_wrk->lnum = lnum; 599 wl_wrk->lnum = lnum;
600 wl_wrk->torture = torture; 600 wl_wrk->torture = torture;
601 601
602 schedule_ubi_work(ubi, wl_wrk); 602 if (nested)
603 __schedule_ubi_work(ubi, wl_wrk);
604 else
605 schedule_ubi_work(ubi, wl_wrk);
603 return 0; 606 return 0;
604} 607}
605 608
@@ -663,6 +666,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
663 666
664 vid_hdr = ubi_get_vid_hdr(vidb); 667 vid_hdr = ubi_get_vid_hdr(vidb);
665 668
669 down_read(&ubi->fm_eba_sem);
666 mutex_lock(&ubi->move_mutex); 670 mutex_lock(&ubi->move_mutex);
667 spin_lock(&ubi->wl_lock); 671 spin_lock(&ubi->wl_lock);
668 ubi_assert(!ubi->move_from && !ubi->move_to); 672 ubi_assert(!ubi->move_from && !ubi->move_to);
@@ -893,6 +897,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
893 897
894 dbg_wl("done"); 898 dbg_wl("done");
895 mutex_unlock(&ubi->move_mutex); 899 mutex_unlock(&ubi->move_mutex);
900 up_read(&ubi->fm_eba_sem);
896 return 0; 901 return 0;
897 902
898 /* 903 /*
@@ -943,6 +948,7 @@ out_not_moved:
943 } 948 }
944 949
945 mutex_unlock(&ubi->move_mutex); 950 mutex_unlock(&ubi->move_mutex);
951 up_read(&ubi->fm_eba_sem);
946 return 0; 952 return 0;
947 953
948out_error: 954out_error:
@@ -964,6 +970,7 @@ out_error:
964out_ro: 970out_ro:
965 ubi_ro_mode(ubi); 971 ubi_ro_mode(ubi);
966 mutex_unlock(&ubi->move_mutex); 972 mutex_unlock(&ubi->move_mutex);
973 up_read(&ubi->fm_eba_sem);
967 ubi_assert(err != 0); 974 ubi_assert(err != 0);
968 return err < 0 ? err : -EIO; 975 return err < 0 ? err : -EIO;
969 976
@@ -971,6 +978,7 @@ out_cancel:
971 ubi->wl_scheduled = 0; 978 ubi->wl_scheduled = 0;
972 spin_unlock(&ubi->wl_lock); 979 spin_unlock(&ubi->wl_lock);
973 mutex_unlock(&ubi->move_mutex); 980 mutex_unlock(&ubi->move_mutex);
981 up_read(&ubi->fm_eba_sem);
974 ubi_free_vid_buf(vidb); 982 ubi_free_vid_buf(vidb);
975 return 0; 983 return 0;
976} 984}
@@ -1093,7 +1101,7 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
1093 int err1; 1101 int err1;
1094 1102
1095 /* Re-schedule the LEB for erasure */ 1103 /* Re-schedule the LEB for erasure */
1096 err1 = schedule_erase(ubi, e, vol_id, lnum, 0); 1104 err1 = schedule_erase(ubi, e, vol_id, lnum, 0, false);
1097 if (err1) { 1105 if (err1) {
1098 wl_entry_destroy(ubi, e); 1106 wl_entry_destroy(ubi, e);
1099 err = err1; 1107 err = err1;
@@ -1274,7 +1282,7 @@ retry:
1274 } 1282 }
1275 spin_unlock(&ubi->wl_lock); 1283 spin_unlock(&ubi->wl_lock);
1276 1284
1277 err = schedule_erase(ubi, e, vol_id, lnum, torture); 1285 err = schedule_erase(ubi, e, vol_id, lnum, torture, false);
1278 if (err) { 1286 if (err) {
1279 spin_lock(&ubi->wl_lock); 1287 spin_lock(&ubi->wl_lock);
1280 wl_tree_add(e, &ubi->used); 1288 wl_tree_add(e, &ubi->used);
@@ -1565,7 +1573,7 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1565 e->pnum = aeb->pnum; 1573 e->pnum = aeb->pnum;
1566 e->ec = aeb->ec; 1574 e->ec = aeb->ec;
1567 ubi->lookuptbl[e->pnum] = e; 1575 ubi->lookuptbl[e->pnum] = e;
1568 if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) { 1576 if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false)) {
1569 wl_entry_destroy(ubi, e); 1577 wl_entry_destroy(ubi, e);
1570 goto out_free; 1578 goto out_free;
1571 } 1579 }
@@ -1644,7 +1652,7 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1644 e->ec = aeb->ec; 1652 e->ec = aeb->ec;
1645 ubi_assert(!ubi->lookuptbl[e->pnum]); 1653 ubi_assert(!ubi->lookuptbl[e->pnum]);
1646 ubi->lookuptbl[e->pnum] = e; 1654 ubi->lookuptbl[e->pnum] = e;
1647 if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) { 1655 if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false)) {
1648 wl_entry_destroy(ubi, e); 1656 wl_entry_destroy(ubi, e);
1649 goto out_free; 1657 goto out_free;
1650 } 1658 }