aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd/ubi/wl.c
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
commitc71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch)
treeecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /drivers/mtd/ubi/wl.c
parentea53c912f8a86a8567697115b6a0d8152beee5c8 (diff)
parent6a00f206debf8a5c8899055726ad127dbeeed098 (diff)
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts: litmus/sched_cedf.c
Diffstat (limited to 'drivers/mtd/ubi/wl.c')
-rw-r--r--drivers/mtd/ubi/wl.c54
1 files changed, 31 insertions, 23 deletions
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 97a435672eaf..ff2c4956eeff 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -161,7 +161,7 @@ struct ubi_work {
161 int torture; 161 int torture;
162}; 162};
163 163
164#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID 164#ifdef CONFIG_MTD_UBI_DEBUG
165static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec); 165static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec);
166static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e, 166static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
167 struct rb_root *root); 167 struct rb_root *root);
@@ -613,7 +613,7 @@ static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
613 list_add_tail(&wrk->list, &ubi->works); 613 list_add_tail(&wrk->list, &ubi->works);
614 ubi_assert(ubi->works_count >= 0); 614 ubi_assert(ubi->works_count >= 0);
615 ubi->works_count += 1; 615 ubi->works_count += 1;
616 if (ubi->thread_enabled) 616 if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled())
617 wake_up_process(ubi->bgt_thread); 617 wake_up_process(ubi->bgt_thread);
618 spin_unlock(&ubi->wl_lock); 618 spin_unlock(&ubi->wl_lock);
619} 619}
@@ -745,7 +745,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
745 745
746 err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0); 746 err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
747 if (err && err != UBI_IO_BITFLIPS) { 747 if (err && err != UBI_IO_BITFLIPS) {
748 if (err == UBI_IO_PEB_FREE) { 748 if (err == UBI_IO_FF) {
749 /* 749 /*
750 * We are trying to move PEB without a VID header. UBI 750 * We are trying to move PEB without a VID header. UBI
751 * always write VID headers shortly after the PEB was 751 * always write VID headers shortly after the PEB was
@@ -759,6 +759,16 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
759 dbg_wl("PEB %d has no VID header", e1->pnum); 759 dbg_wl("PEB %d has no VID header", e1->pnum);
760 protect = 1; 760 protect = 1;
761 goto out_not_moved; 761 goto out_not_moved;
762 } else if (err == UBI_IO_FF_BITFLIPS) {
763 /*
764 * The same situation as %UBI_IO_FF, but bit-flips were
765 * detected. It is better to schedule this PEB for
766 * scrubbing.
767 */
768 dbg_wl("PEB %d has no VID header but has bit-flips",
769 e1->pnum);
770 scrubbing = 1;
771 goto out_not_moved;
762 } 772 }
763 773
764 ubi_err("error %d while reading VID header from PEB %d", 774 ubi_err("error %d while reading VID header from PEB %d",
@@ -1354,7 +1364,7 @@ int ubi_thread(void *u)
1354 1364
1355 spin_lock(&ubi->wl_lock); 1365 spin_lock(&ubi->wl_lock);
1356 if (list_empty(&ubi->works) || ubi->ro_mode || 1366 if (list_empty(&ubi->works) || ubi->ro_mode ||
1357 !ubi->thread_enabled) { 1367 !ubi->thread_enabled || ubi_dbg_is_bgt_disabled()) {
1358 set_current_state(TASK_INTERRUPTIBLE); 1368 set_current_state(TASK_INTERRUPTIBLE);
1359 spin_unlock(&ubi->wl_lock); 1369 spin_unlock(&ubi->wl_lock);
1360 schedule(); 1370 schedule();
@@ -1468,22 +1478,6 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1468 ubi->lookuptbl[e->pnum] = e; 1478 ubi->lookuptbl[e->pnum] = e;
1469 } 1479 }
1470 1480
1471 list_for_each_entry(seb, &si->corr, u.list) {
1472 cond_resched();
1473
1474 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1475 if (!e)
1476 goto out_free;
1477
1478 e->pnum = seb->pnum;
1479 e->ec = seb->ec;
1480 ubi->lookuptbl[e->pnum] = e;
1481 if (schedule_erase(ubi, e, 0)) {
1482 kmem_cache_free(ubi_wl_entry_slab, e);
1483 goto out_free;
1484 }
1485 }
1486
1487 ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) { 1481 ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
1488 ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) { 1482 ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
1489 cond_resched(); 1483 cond_resched();
@@ -1510,6 +1504,9 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1510 if (ubi->avail_pebs < WL_RESERVED_PEBS) { 1504 if (ubi->avail_pebs < WL_RESERVED_PEBS) {
1511 ubi_err("no enough physical eraseblocks (%d, need %d)", 1505 ubi_err("no enough physical eraseblocks (%d, need %d)",
1512 ubi->avail_pebs, WL_RESERVED_PEBS); 1506 ubi->avail_pebs, WL_RESERVED_PEBS);
1507 if (ubi->corr_peb_count)
1508 ubi_err("%d PEBs are corrupted and not used",
1509 ubi->corr_peb_count);
1513 goto out_free; 1510 goto out_free;
1514 } 1511 }
1515 ubi->avail_pebs -= WL_RESERVED_PEBS; 1512 ubi->avail_pebs -= WL_RESERVED_PEBS;
@@ -1564,7 +1561,7 @@ void ubi_wl_close(struct ubi_device *ubi)
1564 kfree(ubi->lookuptbl); 1561 kfree(ubi->lookuptbl);
1565} 1562}
1566 1563
1567#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID 1564#ifdef CONFIG_MTD_UBI_DEBUG
1568 1565
1569/** 1566/**
1570 * paranoid_check_ec - make sure that the erase counter of a PEB is correct. 1567 * paranoid_check_ec - make sure that the erase counter of a PEB is correct.
@@ -1573,7 +1570,8 @@ void ubi_wl_close(struct ubi_device *ubi)
1573 * @ec: the erase counter to check 1570 * @ec: the erase counter to check
1574 * 1571 *
1575 * This function returns zero if the erase counter of physical eraseblock @pnum 1572 * This function returns zero if the erase counter of physical eraseblock @pnum
1576 * is equivalent to @ec, and a negative error code if not or if an error occurred. 1573 * is equivalent to @ec, and a negative error code if not or if an error
1574 * occurred.
1577 */ 1575 */
1578static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec) 1576static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec)
1579{ 1577{
@@ -1581,6 +1579,9 @@ static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec)
1581 long long read_ec; 1579 long long read_ec;
1582 struct ubi_ec_hdr *ec_hdr; 1580 struct ubi_ec_hdr *ec_hdr;
1583 1581
1582 if (!(ubi_chk_flags & UBI_CHK_GEN))
1583 return 0;
1584
1584 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS); 1585 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
1585 if (!ec_hdr) 1586 if (!ec_hdr)
1586 return -ENOMEM; 1587 return -ENOMEM;
@@ -1617,6 +1618,9 @@ out_free:
1617static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e, 1618static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
1618 struct rb_root *root) 1619 struct rb_root *root)
1619{ 1620{
1621 if (!(ubi_chk_flags & UBI_CHK_GEN))
1622 return 0;
1623
1620 if (in_wl_tree(e, root)) 1624 if (in_wl_tree(e, root))
1621 return 0; 1625 return 0;
1622 1626
@@ -1639,6 +1643,9 @@ static int paranoid_check_in_pq(struct ubi_device *ubi, struct ubi_wl_entry *e)
1639 struct ubi_wl_entry *p; 1643 struct ubi_wl_entry *p;
1640 int i; 1644 int i;
1641 1645
1646 if (!(ubi_chk_flags & UBI_CHK_GEN))
1647 return 0;
1648
1642 for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) 1649 for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
1643 list_for_each_entry(p, &ubi->pq[i], u.list) 1650 list_for_each_entry(p, &ubi->pq[i], u.list)
1644 if (p == e) 1651 if (p == e)
@@ -1649,4 +1656,5 @@ static int paranoid_check_in_pq(struct ubi_device *ubi, struct ubi_wl_entry *e)
1649 ubi_dbg_dump_stack(); 1656 ubi_dbg_dump_stack();
1650 return -EINVAL; 1657 return -EINVAL;
1651} 1658}
1652#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */ 1659
1660#endif /* CONFIG_MTD_UBI_DEBUG */