aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mtd')
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-lib.c2
-rw-r--r--drivers/mtd/nand/hisi504_nand.c3
-rw-r--r--drivers/mtd/ubi/attach.c73
-rw-r--r--drivers/mtd/ubi/build.c29
-rw-r--r--drivers/mtd/ubi/cdev.c2
-rw-r--r--drivers/mtd/ubi/debug.c100
-rw-r--r--drivers/mtd/ubi/debug.h12
-rw-r--r--drivers/mtd/ubi/eba.c57
-rw-r--r--drivers/mtd/ubi/fastmap-wl.c362
-rw-r--r--drivers/mtd/ubi/fastmap.c443
-rw-r--r--drivers/mtd/ubi/io.c6
-rw-r--r--drivers/mtd/ubi/ubi-media.h2
-rw-r--r--drivers/mtd/ubi/ubi.h85
-rw-r--r--drivers/mtd/ubi/wl.c587
-rw-r--r--drivers/mtd/ubi/wl.h28
15 files changed, 1075 insertions, 716 deletions
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
index 27f272ed502a..43fa16b5f510 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
@@ -1105,7 +1105,7 @@ int gpmi_is_ready(struct gpmi_nand_data *this, unsigned chip)
1105 mask = MX28_BF_GPMI_STAT_READY_BUSY(1 << chip); 1105 mask = MX28_BF_GPMI_STAT_READY_BUSY(1 << chip);
1106 reg = readl(r->gpmi_regs + HW_GPMI_STAT); 1106 reg = readl(r->gpmi_regs + HW_GPMI_STAT);
1107 } else 1107 } else
1108 dev_err(this->dev, "unknow arch.\n"); 1108 dev_err(this->dev, "unknown arch.\n");
1109 return reg & mask; 1109 return reg & mask;
1110} 1110}
1111 1111
diff --git a/drivers/mtd/nand/hisi504_nand.c b/drivers/mtd/nand/hisi504_nand.c
index 289ad3ac3e80..8dcc7b8fee40 100644
--- a/drivers/mtd/nand/hisi504_nand.c
+++ b/drivers/mtd/nand/hisi504_nand.c
@@ -758,8 +758,7 @@ static int hisi_nfc_probe(struct platform_device *pdev)
758 758
759 hisi_nfc_host_init(host); 759 hisi_nfc_host_init(host);
760 760
761 ret = devm_request_irq(dev, irq, hinfc_irq_handle, IRQF_DISABLED, 761 ret = devm_request_irq(dev, irq, hinfc_irq_handle, 0x0, "nandc", host);
762 "nandc", host);
763 if (ret) { 762 if (ret) {
764 dev_err(dev, "failed to request IRQ\n"); 763 dev_err(dev, "failed to request IRQ\n");
765 goto err_res; 764 goto err_res;
diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c
index 9d2e16f3150a..68eea5befaf1 100644
--- a/drivers/mtd/ubi/attach.c
+++ b/drivers/mtd/ubi/attach.c
@@ -410,7 +410,7 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
410 second_is_newer = !second_is_newer; 410 second_is_newer = !second_is_newer;
411 } else { 411 } else {
412 dbg_bld("PEB %d CRC is OK", pnum); 412 dbg_bld("PEB %d CRC is OK", pnum);
413 bitflips = !!err; 413 bitflips |= !!err;
414 } 414 }
415 mutex_unlock(&ubi->buf_mutex); 415 mutex_unlock(&ubi->buf_mutex);
416 416
@@ -1301,6 +1301,30 @@ out_ech:
1301 return err; 1301 return err;
1302} 1302}
1303 1303
1304static struct ubi_attach_info *alloc_ai(void)
1305{
1306 struct ubi_attach_info *ai;
1307
1308 ai = kzalloc(sizeof(struct ubi_attach_info), GFP_KERNEL);
1309 if (!ai)
1310 return ai;
1311
1312 INIT_LIST_HEAD(&ai->corr);
1313 INIT_LIST_HEAD(&ai->free);
1314 INIT_LIST_HEAD(&ai->erase);
1315 INIT_LIST_HEAD(&ai->alien);
1316 ai->volumes = RB_ROOT;
1317 ai->aeb_slab_cache = kmem_cache_create("ubi_aeb_slab_cache",
1318 sizeof(struct ubi_ainf_peb),
1319 0, 0, NULL);
1320 if (!ai->aeb_slab_cache) {
1321 kfree(ai);
1322 ai = NULL;
1323 }
1324
1325 return ai;
1326}
1327
1304#ifdef CONFIG_MTD_UBI_FASTMAP 1328#ifdef CONFIG_MTD_UBI_FASTMAP
1305 1329
1306/** 1330/**
@@ -1313,7 +1337,7 @@ out_ech:
1313 * UBI_NO_FASTMAP denotes that no fastmap was found. 1337 * UBI_NO_FASTMAP denotes that no fastmap was found.
1314 * UBI_BAD_FASTMAP denotes that the found fastmap was invalid. 1338 * UBI_BAD_FASTMAP denotes that the found fastmap was invalid.
1315 */ 1339 */
1316static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info *ai) 1340static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info **ai)
1317{ 1341{
1318 int err, pnum, fm_anchor = -1; 1342 int err, pnum, fm_anchor = -1;
1319 unsigned long long max_sqnum = 0; 1343 unsigned long long max_sqnum = 0;
@@ -1334,7 +1358,7 @@ static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info *ai)
1334 cond_resched(); 1358 cond_resched();
1335 1359
1336 dbg_gen("process PEB %d", pnum); 1360 dbg_gen("process PEB %d", pnum);
1337 err = scan_peb(ubi, ai, pnum, &vol_id, &sqnum); 1361 err = scan_peb(ubi, *ai, pnum, &vol_id, &sqnum);
1338 if (err < 0) 1362 if (err < 0)
1339 goto out_vidh; 1363 goto out_vidh;
1340 1364
@@ -1350,7 +1374,12 @@ static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info *ai)
1350 if (fm_anchor < 0) 1374 if (fm_anchor < 0)
1351 return UBI_NO_FASTMAP; 1375 return UBI_NO_FASTMAP;
1352 1376
1353 return ubi_scan_fastmap(ubi, ai, fm_anchor); 1377 destroy_ai(*ai);
1378 *ai = alloc_ai();
1379 if (!*ai)
1380 return -ENOMEM;
1381
1382 return ubi_scan_fastmap(ubi, *ai, fm_anchor);
1354 1383
1355out_vidh: 1384out_vidh:
1356 ubi_free_vid_hdr(ubi, vidh); 1385 ubi_free_vid_hdr(ubi, vidh);
@@ -1362,30 +1391,6 @@ out:
1362 1391
1363#endif 1392#endif
1364 1393
1365static struct ubi_attach_info *alloc_ai(const char *slab_name)
1366{
1367 struct ubi_attach_info *ai;
1368
1369 ai = kzalloc(sizeof(struct ubi_attach_info), GFP_KERNEL);
1370 if (!ai)
1371 return ai;
1372
1373 INIT_LIST_HEAD(&ai->corr);
1374 INIT_LIST_HEAD(&ai->free);
1375 INIT_LIST_HEAD(&ai->erase);
1376 INIT_LIST_HEAD(&ai->alien);
1377 ai->volumes = RB_ROOT;
1378 ai->aeb_slab_cache = kmem_cache_create(slab_name,
1379 sizeof(struct ubi_ainf_peb),
1380 0, 0, NULL);
1381 if (!ai->aeb_slab_cache) {
1382 kfree(ai);
1383 ai = NULL;
1384 }
1385
1386 return ai;
1387}
1388
1389/** 1394/**
1390 * ubi_attach - attach an MTD device. 1395 * ubi_attach - attach an MTD device.
1391 * @ubi: UBI device descriptor 1396 * @ubi: UBI device descriptor
@@ -1399,7 +1404,7 @@ int ubi_attach(struct ubi_device *ubi, int force_scan)
1399 int err; 1404 int err;
1400 struct ubi_attach_info *ai; 1405 struct ubi_attach_info *ai;
1401 1406
1402 ai = alloc_ai("ubi_aeb_slab_cache"); 1407 ai = alloc_ai();
1403 if (!ai) 1408 if (!ai)
1404 return -ENOMEM; 1409 return -ENOMEM;
1405 1410
@@ -1413,11 +1418,11 @@ int ubi_attach(struct ubi_device *ubi, int force_scan)
1413 if (force_scan) 1418 if (force_scan)
1414 err = scan_all(ubi, ai, 0); 1419 err = scan_all(ubi, ai, 0);
1415 else { 1420 else {
1416 err = scan_fast(ubi, ai); 1421 err = scan_fast(ubi, &ai);
1417 if (err > 0) { 1422 if (err > 0 || mtd_is_eccerr(err)) {
1418 if (err != UBI_NO_FASTMAP) { 1423 if (err != UBI_NO_FASTMAP) {
1419 destroy_ai(ai); 1424 destroy_ai(ai);
1420 ai = alloc_ai("ubi_aeb_slab_cache2"); 1425 ai = alloc_ai();
1421 if (!ai) 1426 if (!ai)
1422 return -ENOMEM; 1427 return -ENOMEM;
1423 1428
@@ -1453,10 +1458,10 @@ int ubi_attach(struct ubi_device *ubi, int force_scan)
1453 goto out_wl; 1458 goto out_wl;
1454 1459
1455#ifdef CONFIG_MTD_UBI_FASTMAP 1460#ifdef CONFIG_MTD_UBI_FASTMAP
1456 if (ubi->fm && ubi_dbg_chk_gen(ubi)) { 1461 if (ubi->fm && ubi_dbg_chk_fastmap(ubi)) {
1457 struct ubi_attach_info *scan_ai; 1462 struct ubi_attach_info *scan_ai;
1458 1463
1459 scan_ai = alloc_ai("ubi_ckh_aeb_slab_cache"); 1464 scan_ai = alloc_ai();
1460 if (!scan_ai) { 1465 if (!scan_ai) {
1461 err = -ENOMEM; 1466 err = -ENOMEM;
1462 goto out_wl; 1467 goto out_wl;
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index ba01a8d22d28..9690cf9aaef5 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -81,6 +81,7 @@ static struct mtd_dev_param __initdata mtd_dev_param[UBI_MAX_DEVICES];
81#ifdef CONFIG_MTD_UBI_FASTMAP 81#ifdef CONFIG_MTD_UBI_FASTMAP
82/* UBI module parameter to enable fastmap automatically on non-fastmap images */ 82/* UBI module parameter to enable fastmap automatically on non-fastmap images */
83static bool fm_autoconvert; 83static bool fm_autoconvert;
84static bool fm_debug;
84#endif 85#endif
85/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */ 86/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */
86struct class *ubi_class; 87struct class *ubi_class;
@@ -154,23 +155,22 @@ static struct device_attribute dev_mtd_num =
154 */ 155 */
155int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype) 156int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype)
156{ 157{
158 int ret;
157 struct ubi_notification nt; 159 struct ubi_notification nt;
158 160
159 ubi_do_get_device_info(ubi, &nt.di); 161 ubi_do_get_device_info(ubi, &nt.di);
160 ubi_do_get_volume_info(ubi, vol, &nt.vi); 162 ubi_do_get_volume_info(ubi, vol, &nt.vi);
161 163
162#ifdef CONFIG_MTD_UBI_FASTMAP
163 switch (ntype) { 164 switch (ntype) {
164 case UBI_VOLUME_ADDED: 165 case UBI_VOLUME_ADDED:
165 case UBI_VOLUME_REMOVED: 166 case UBI_VOLUME_REMOVED:
166 case UBI_VOLUME_RESIZED: 167 case UBI_VOLUME_RESIZED:
167 case UBI_VOLUME_RENAMED: 168 case UBI_VOLUME_RENAMED:
168 if (ubi_update_fastmap(ubi)) { 169 ret = ubi_update_fastmap(ubi);
169 ubi_err(ubi, "Unable to update fastmap!"); 170 if (ret)
170 ubi_ro_mode(ubi); 171 ubi_msg(ubi, "Unable to write a new fastmap: %i", ret);
171 }
172 } 172 }
173#endif 173
174 return blocking_notifier_call_chain(&ubi_notifiers, ntype, &nt); 174 return blocking_notifier_call_chain(&ubi_notifiers, ntype, &nt);
175} 175}
176 176
@@ -950,8 +950,10 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
950 if (ubi->fm_pool.max_size < UBI_FM_MIN_POOL_SIZE) 950 if (ubi->fm_pool.max_size < UBI_FM_MIN_POOL_SIZE)
951 ubi->fm_pool.max_size = UBI_FM_MIN_POOL_SIZE; 951 ubi->fm_pool.max_size = UBI_FM_MIN_POOL_SIZE;
952 952
953 ubi->fm_wl_pool.max_size = UBI_FM_WL_POOL_SIZE; 953 ubi->fm_wl_pool.max_size = ubi->fm_pool.max_size / 2;
954 ubi->fm_disabled = !fm_autoconvert; 954 ubi->fm_disabled = !fm_autoconvert;
955 if (fm_debug)
956 ubi_enable_dbg_chk_fastmap(ubi);
955 957
956 if (!ubi->fm_disabled && (int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd) 958 if (!ubi->fm_disabled && (int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd)
957 <= UBI_FM_MAX_START) { 959 <= UBI_FM_MAX_START) {
@@ -970,8 +972,8 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
970 mutex_init(&ubi->ckvol_mutex); 972 mutex_init(&ubi->ckvol_mutex);
971 mutex_init(&ubi->device_mutex); 973 mutex_init(&ubi->device_mutex);
972 spin_lock_init(&ubi->volumes_lock); 974 spin_lock_init(&ubi->volumes_lock);
973 mutex_init(&ubi->fm_mutex); 975 init_rwsem(&ubi->fm_protect);
974 init_rwsem(&ubi->fm_sem); 976 init_rwsem(&ubi->fm_eba_sem);
975 977
976 ubi_msg(ubi, "attaching mtd%d", mtd->index); 978 ubi_msg(ubi, "attaching mtd%d", mtd->index);
977 979
@@ -1115,8 +1117,11 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
1115 ubi_msg(ubi, "detaching mtd%d", ubi->mtd->index); 1117 ubi_msg(ubi, "detaching mtd%d", ubi->mtd->index);
1116#ifdef CONFIG_MTD_UBI_FASTMAP 1118#ifdef CONFIG_MTD_UBI_FASTMAP
1117 /* If we don't write a new fastmap at detach time we lose all 1119 /* If we don't write a new fastmap at detach time we lose all
1118 * EC updates that have been made since the last written fastmap. */ 1120 * EC updates that have been made since the last written fastmap.
1119 ubi_update_fastmap(ubi); 1121 * In case of fastmap debugging we omit the update to simulate an
1122 * unclean shutdown. */
1123 if (!ubi_dbg_chk_fastmap(ubi))
1124 ubi_update_fastmap(ubi);
1120#endif 1125#endif
1121 /* 1126 /*
1122 * Before freeing anything, we have to stop the background thread to 1127 * Before freeing anything, we have to stop the background thread to
@@ -1501,6 +1506,8 @@ MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: mtd=<name|num|pa
1501#ifdef CONFIG_MTD_UBI_FASTMAP 1506#ifdef CONFIG_MTD_UBI_FASTMAP
1502module_param(fm_autoconvert, bool, 0644); 1507module_param(fm_autoconvert, bool, 0644);
1503MODULE_PARM_DESC(fm_autoconvert, "Set this parameter to enable fastmap automatically on images without a fastmap."); 1508MODULE_PARM_DESC(fm_autoconvert, "Set this parameter to enable fastmap automatically on images without a fastmap.");
1509module_param(fm_debug, bool, 0);
1510MODULE_PARM_DESC(fm_debug, "Set this parameter to enable fastmap debugging by default. Warning, this will make fastmap slow!");
1504#endif 1511#endif
1505MODULE_VERSION(__stringify(UBI_VERSION)); 1512MODULE_VERSION(__stringify(UBI_VERSION));
1506MODULE_DESCRIPTION("UBI - Unsorted Block Images"); 1513MODULE_DESCRIPTION("UBI - Unsorted Block Images");
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index d647e504f9b1..d16fccf79179 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -455,7 +455,7 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
455 /* Validate the request */ 455 /* Validate the request */
456 err = -EINVAL; 456 err = -EINVAL;
457 if (req.lnum < 0 || req.lnum >= vol->reserved_pebs || 457 if (req.lnum < 0 || req.lnum >= vol->reserved_pebs ||
458 req.bytes < 0 || req.lnum >= vol->usable_leb_size) 458 req.bytes < 0 || req.bytes > vol->usable_leb_size)
459 break; 459 break;
460 460
461 err = get_exclusive(desc); 461 err = get_exclusive(desc);
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index 7335c9ff9d99..b077e43b5ba9 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -263,7 +263,7 @@ static ssize_t dfs_file_read(struct file *file, char __user *user_buf,
263 struct dentry *dent = file->f_path.dentry; 263 struct dentry *dent = file->f_path.dentry;
264 struct ubi_device *ubi; 264 struct ubi_device *ubi;
265 struct ubi_debug_info *d; 265 struct ubi_debug_info *d;
266 char buf[3]; 266 char buf[8];
267 int val; 267 int val;
268 268
269 ubi = ubi_get_device(ubi_num); 269 ubi = ubi_get_device(ubi_num);
@@ -275,12 +275,30 @@ static ssize_t dfs_file_read(struct file *file, char __user *user_buf,
275 val = d->chk_gen; 275 val = d->chk_gen;
276 else if (dent == d->dfs_chk_io) 276 else if (dent == d->dfs_chk_io)
277 val = d->chk_io; 277 val = d->chk_io;
278 else if (dent == d->dfs_chk_fastmap)
279 val = d->chk_fastmap;
278 else if (dent == d->dfs_disable_bgt) 280 else if (dent == d->dfs_disable_bgt)
279 val = d->disable_bgt; 281 val = d->disable_bgt;
280 else if (dent == d->dfs_emulate_bitflips) 282 else if (dent == d->dfs_emulate_bitflips)
281 val = d->emulate_bitflips; 283 val = d->emulate_bitflips;
282 else if (dent == d->dfs_emulate_io_failures) 284 else if (dent == d->dfs_emulate_io_failures)
283 val = d->emulate_io_failures; 285 val = d->emulate_io_failures;
286 else if (dent == d->dfs_emulate_power_cut) {
287 snprintf(buf, sizeof(buf), "%u\n", d->emulate_power_cut);
288 count = simple_read_from_buffer(user_buf, count, ppos,
289 buf, strlen(buf));
290 goto out;
291 } else if (dent == d->dfs_power_cut_min) {
292 snprintf(buf, sizeof(buf), "%u\n", d->power_cut_min);
293 count = simple_read_from_buffer(user_buf, count, ppos,
294 buf, strlen(buf));
295 goto out;
296 } else if (dent == d->dfs_power_cut_max) {
297 snprintf(buf, sizeof(buf), "%u\n", d->power_cut_max);
298 count = simple_read_from_buffer(user_buf, count, ppos,
299 buf, strlen(buf));
300 goto out;
301 }
284 else { 302 else {
285 count = -EINVAL; 303 count = -EINVAL;
286 goto out; 304 goto out;
@@ -309,7 +327,7 @@ static ssize_t dfs_file_write(struct file *file, const char __user *user_buf,
309 struct ubi_device *ubi; 327 struct ubi_device *ubi;
310 struct ubi_debug_info *d; 328 struct ubi_debug_info *d;
311 size_t buf_size; 329 size_t buf_size;
312 char buf[8]; 330 char buf[8] = {0};
313 int val; 331 int val;
314 332
315 ubi = ubi_get_device(ubi_num); 333 ubi = ubi_get_device(ubi_num);
@@ -323,6 +341,21 @@ static ssize_t dfs_file_write(struct file *file, const char __user *user_buf,
323 goto out; 341 goto out;
324 } 342 }
325 343
344 if (dent == d->dfs_power_cut_min) {
345 if (kstrtouint(buf, 0, &d->power_cut_min) != 0)
346 count = -EINVAL;
347 goto out;
348 } else if (dent == d->dfs_power_cut_max) {
349 if (kstrtouint(buf, 0, &d->power_cut_max) != 0)
350 count = -EINVAL;
351 goto out;
352 } else if (dent == d->dfs_emulate_power_cut) {
353 if (kstrtoint(buf, 0, &val) != 0)
354 count = -EINVAL;
355 d->emulate_power_cut = val;
356 goto out;
357 }
358
326 if (buf[0] == '1') 359 if (buf[0] == '1')
327 val = 1; 360 val = 1;
328 else if (buf[0] == '0') 361 else if (buf[0] == '0')
@@ -336,6 +369,8 @@ static ssize_t dfs_file_write(struct file *file, const char __user *user_buf,
336 d->chk_gen = val; 369 d->chk_gen = val;
337 else if (dent == d->dfs_chk_io) 370 else if (dent == d->dfs_chk_io)
338 d->chk_io = val; 371 d->chk_io = val;
372 else if (dent == d->dfs_chk_fastmap)
373 d->chk_fastmap = val;
339 else if (dent == d->dfs_disable_bgt) 374 else if (dent == d->dfs_disable_bgt)
340 d->disable_bgt = val; 375 d->disable_bgt = val;
341 else if (dent == d->dfs_emulate_bitflips) 376 else if (dent == d->dfs_emulate_bitflips)
@@ -406,6 +441,13 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi)
406 goto out_remove; 441 goto out_remove;
407 d->dfs_chk_io = dent; 442 d->dfs_chk_io = dent;
408 443
444 fname = "chk_fastmap";
445 dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
446 &dfs_fops);
447 if (IS_ERR_OR_NULL(dent))
448 goto out_remove;
449 d->dfs_chk_fastmap = dent;
450
409 fname = "tst_disable_bgt"; 451 fname = "tst_disable_bgt";
410 dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num, 452 dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
411 &dfs_fops); 453 &dfs_fops);
@@ -427,6 +469,27 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi)
427 goto out_remove; 469 goto out_remove;
428 d->dfs_emulate_io_failures = dent; 470 d->dfs_emulate_io_failures = dent;
429 471
472 fname = "tst_emulate_power_cut";
473 dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
474 &dfs_fops);
475 if (IS_ERR_OR_NULL(dent))
476 goto out_remove;
477 d->dfs_emulate_power_cut = dent;
478
479 fname = "tst_emulate_power_cut_min";
480 dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
481 &dfs_fops);
482 if (IS_ERR_OR_NULL(dent))
483 goto out_remove;
484 d->dfs_power_cut_min = dent;
485
486 fname = "tst_emulate_power_cut_max";
487 dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
488 &dfs_fops);
489 if (IS_ERR_OR_NULL(dent))
490 goto out_remove;
491 d->dfs_power_cut_max = dent;
492
430 return 0; 493 return 0;
431 494
432out_remove: 495out_remove:
@@ -447,3 +510,36 @@ void ubi_debugfs_exit_dev(struct ubi_device *ubi)
447 if (IS_ENABLED(CONFIG_DEBUG_FS)) 510 if (IS_ENABLED(CONFIG_DEBUG_FS))
448 debugfs_remove_recursive(ubi->dbg.dfs_dir); 511 debugfs_remove_recursive(ubi->dbg.dfs_dir);
449} 512}
513
514/**
515 * ubi_dbg_power_cut - emulate a power cut if it is time to do so
516 * @ubi: UBI device description object
517 * @caller: Flags set to indicate from where the function is being called
518 *
519 * Returns non-zero if a power cut was emulated, zero if not.
520 */
521int ubi_dbg_power_cut(struct ubi_device *ubi, int caller)
522{
523 unsigned int range;
524
525 if ((ubi->dbg.emulate_power_cut & caller) == 0)
526 return 0;
527
528 if (ubi->dbg.power_cut_counter == 0) {
529 ubi->dbg.power_cut_counter = ubi->dbg.power_cut_min;
530
531 if (ubi->dbg.power_cut_max > ubi->dbg.power_cut_min) {
532 range = ubi->dbg.power_cut_max - ubi->dbg.power_cut_min;
533 ubi->dbg.power_cut_counter += prandom_u32() % range;
534 }
535 return 0;
536 }
537
538 ubi->dbg.power_cut_counter--;
539 if (ubi->dbg.power_cut_counter)
540 return 0;
541
542 ubi_msg(ubi, "XXXXXXXXXXXXXXX emulating a power cut XXXXXXXXXXXXXXXX");
543 ubi_ro_mode(ubi);
544 return 1;
545}
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index cba89fcd1587..eb8985e5c178 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -127,4 +127,16 @@ static inline int ubi_dbg_chk_gen(const struct ubi_device *ubi)
127{ 127{
128 return ubi->dbg.chk_gen; 128 return ubi->dbg.chk_gen;
129} 129}
130
131static inline int ubi_dbg_chk_fastmap(const struct ubi_device *ubi)
132{
133 return ubi->dbg.chk_fastmap;
134}
135
136static inline void ubi_enable_dbg_chk_fastmap(struct ubi_device *ubi)
137{
138 ubi->dbg.chk_fastmap = 1;
139}
140
141int ubi_dbg_power_cut(struct ubi_device *ubi, int caller);
130#endif /* !__UBI_DEBUG_H__ */ 142#endif /* !__UBI_DEBUG_H__ */
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index da4c79259f67..51bca035cd83 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -340,9 +340,9 @@ int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
340 340
341 dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum); 341 dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum);
342 342
343 down_read(&ubi->fm_sem); 343 down_read(&ubi->fm_eba_sem);
344 vol->eba_tbl[lnum] = UBI_LEB_UNMAPPED; 344 vol->eba_tbl[lnum] = UBI_LEB_UNMAPPED;
345 up_read(&ubi->fm_sem); 345 up_read(&ubi->fm_eba_sem);
346 err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 0); 346 err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 0);
347 347
348out_unlock: 348out_unlock:
@@ -425,9 +425,10 @@ retry:
425 ubi_warn(ubi, "corrupted VID header at PEB %d, LEB %d:%d", 425 ubi_warn(ubi, "corrupted VID header at PEB %d, LEB %d:%d",
426 pnum, vol_id, lnum); 426 pnum, vol_id, lnum);
427 err = -EBADMSG; 427 err = -EBADMSG;
428 } else 428 } else {
429 err = -EINVAL; 429 err = -EINVAL;
430 ubi_ro_mode(ubi); 430 ubi_ro_mode(ubi);
431 }
431 } 432 }
432 goto out_free; 433 goto out_free;
433 } else if (err == UBI_IO_BITFLIPS) 434 } else if (err == UBI_IO_BITFLIPS)
@@ -566,6 +567,7 @@ retry:
566 new_pnum = ubi_wl_get_peb(ubi); 567 new_pnum = ubi_wl_get_peb(ubi);
567 if (new_pnum < 0) { 568 if (new_pnum < 0) {
568 ubi_free_vid_hdr(ubi, vid_hdr); 569 ubi_free_vid_hdr(ubi, vid_hdr);
570 up_read(&ubi->fm_eba_sem);
569 return new_pnum; 571 return new_pnum;
570 } 572 }
571 573
@@ -576,13 +578,16 @@ retry:
576 if (err && err != UBI_IO_BITFLIPS) { 578 if (err && err != UBI_IO_BITFLIPS) {
577 if (err > 0) 579 if (err > 0)
578 err = -EIO; 580 err = -EIO;
581 up_read(&ubi->fm_eba_sem);
579 goto out_put; 582 goto out_put;
580 } 583 }
581 584
582 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 585 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
583 err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr); 586 err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
584 if (err) 587 if (err) {
588 up_read(&ubi->fm_eba_sem);
585 goto write_error; 589 goto write_error;
590 }
586 591
587 data_size = offset + len; 592 data_size = offset + len;
588 mutex_lock(&ubi->buf_mutex); 593 mutex_lock(&ubi->buf_mutex);
@@ -591,8 +596,10 @@ retry:
591 /* Read everything before the area where the write failure happened */ 596 /* Read everything before the area where the write failure happened */
592 if (offset > 0) { 597 if (offset > 0) {
593 err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, offset); 598 err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, offset);
594 if (err && err != UBI_IO_BITFLIPS) 599 if (err && err != UBI_IO_BITFLIPS) {
600 up_read(&ubi->fm_eba_sem);
595 goto out_unlock; 601 goto out_unlock;
602 }
596 } 603 }
597 604
598 memcpy(ubi->peb_buf + offset, buf, len); 605 memcpy(ubi->peb_buf + offset, buf, len);
@@ -600,15 +607,15 @@ retry:
600 err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size); 607 err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size);
601 if (err) { 608 if (err) {
602 mutex_unlock(&ubi->buf_mutex); 609 mutex_unlock(&ubi->buf_mutex);
610 up_read(&ubi->fm_eba_sem);
603 goto write_error; 611 goto write_error;
604 } 612 }
605 613
606 mutex_unlock(&ubi->buf_mutex); 614 mutex_unlock(&ubi->buf_mutex);
607 ubi_free_vid_hdr(ubi, vid_hdr); 615 ubi_free_vid_hdr(ubi, vid_hdr);
608 616
609 down_read(&ubi->fm_sem);
610 vol->eba_tbl[lnum] = new_pnum; 617 vol->eba_tbl[lnum] = new_pnum;
611 up_read(&ubi->fm_sem); 618 up_read(&ubi->fm_eba_sem);
612 ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1); 619 ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
613 620
614 ubi_msg(ubi, "data was successfully recovered"); 621 ubi_msg(ubi, "data was successfully recovered");
@@ -703,6 +710,7 @@ retry:
703 if (pnum < 0) { 710 if (pnum < 0) {
704 ubi_free_vid_hdr(ubi, vid_hdr); 711 ubi_free_vid_hdr(ubi, vid_hdr);
705 leb_write_unlock(ubi, vol_id, lnum); 712 leb_write_unlock(ubi, vol_id, lnum);
713 up_read(&ubi->fm_eba_sem);
706 return pnum; 714 return pnum;
707 } 715 }
708 716
@@ -713,6 +721,7 @@ retry:
713 if (err) { 721 if (err) {
714 ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d", 722 ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d",
715 vol_id, lnum, pnum); 723 vol_id, lnum, pnum);
724 up_read(&ubi->fm_eba_sem);
716 goto write_error; 725 goto write_error;
717 } 726 }
718 727
@@ -721,13 +730,13 @@ retry:
721 if (err) { 730 if (err) {
722 ubi_warn(ubi, "failed to write %d bytes at offset %d of LEB %d:%d, PEB %d", 731 ubi_warn(ubi, "failed to write %d bytes at offset %d of LEB %d:%d, PEB %d",
723 len, offset, vol_id, lnum, pnum); 732 len, offset, vol_id, lnum, pnum);
733 up_read(&ubi->fm_eba_sem);
724 goto write_error; 734 goto write_error;
725 } 735 }
726 } 736 }
727 737
728 down_read(&ubi->fm_sem);
729 vol->eba_tbl[lnum] = pnum; 738 vol->eba_tbl[lnum] = pnum;
730 up_read(&ubi->fm_sem); 739 up_read(&ubi->fm_eba_sem);
731 740
732 leb_write_unlock(ubi, vol_id, lnum); 741 leb_write_unlock(ubi, vol_id, lnum);
733 ubi_free_vid_hdr(ubi, vid_hdr); 742 ubi_free_vid_hdr(ubi, vid_hdr);
@@ -824,6 +833,7 @@ retry:
824 if (pnum < 0) { 833 if (pnum < 0) {
825 ubi_free_vid_hdr(ubi, vid_hdr); 834 ubi_free_vid_hdr(ubi, vid_hdr);
826 leb_write_unlock(ubi, vol_id, lnum); 835 leb_write_unlock(ubi, vol_id, lnum);
836 up_read(&ubi->fm_eba_sem);
827 return pnum; 837 return pnum;
828 } 838 }
829 839
@@ -834,6 +844,7 @@ retry:
834 if (err) { 844 if (err) {
835 ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d", 845 ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d",
836 vol_id, lnum, pnum); 846 vol_id, lnum, pnum);
847 up_read(&ubi->fm_eba_sem);
837 goto write_error; 848 goto write_error;
838 } 849 }
839 850
@@ -841,13 +852,13 @@ retry:
841 if (err) { 852 if (err) {
842 ubi_warn(ubi, "failed to write %d bytes of data to PEB %d", 853 ubi_warn(ubi, "failed to write %d bytes of data to PEB %d",
843 len, pnum); 854 len, pnum);
855 up_read(&ubi->fm_eba_sem);
844 goto write_error; 856 goto write_error;
845 } 857 }
846 858
847 ubi_assert(vol->eba_tbl[lnum] < 0); 859 ubi_assert(vol->eba_tbl[lnum] < 0);
848 down_read(&ubi->fm_sem);
849 vol->eba_tbl[lnum] = pnum; 860 vol->eba_tbl[lnum] = pnum;
850 up_read(&ubi->fm_sem); 861 up_read(&ubi->fm_eba_sem);
851 862
852 leb_write_unlock(ubi, vol_id, lnum); 863 leb_write_unlock(ubi, vol_id, lnum);
853 ubi_free_vid_hdr(ubi, vid_hdr); 864 ubi_free_vid_hdr(ubi, vid_hdr);
@@ -899,7 +910,7 @@ write_error:
899int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, 910int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
900 int lnum, const void *buf, int len) 911 int lnum, const void *buf, int len)
901{ 912{
902 int err, pnum, tries = 0, vol_id = vol->vol_id; 913 int err, pnum, old_pnum, tries = 0, vol_id = vol->vol_id;
903 struct ubi_vid_hdr *vid_hdr; 914 struct ubi_vid_hdr *vid_hdr;
904 uint32_t crc; 915 uint32_t crc;
905 916
@@ -942,6 +953,7 @@ retry:
942 pnum = ubi_wl_get_peb(ubi); 953 pnum = ubi_wl_get_peb(ubi);
943 if (pnum < 0) { 954 if (pnum < 0) {
944 err = pnum; 955 err = pnum;
956 up_read(&ubi->fm_eba_sem);
945 goto out_leb_unlock; 957 goto out_leb_unlock;
946 } 958 }
947 959
@@ -952,6 +964,7 @@ retry:
952 if (err) { 964 if (err) {
953 ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d", 965 ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d",
954 vol_id, lnum, pnum); 966 vol_id, lnum, pnum);
967 up_read(&ubi->fm_eba_sem);
955 goto write_error; 968 goto write_error;
956 } 969 }
957 970
@@ -959,19 +972,20 @@ retry:
959 if (err) { 972 if (err) {
960 ubi_warn(ubi, "failed to write %d bytes of data to PEB %d", 973 ubi_warn(ubi, "failed to write %d bytes of data to PEB %d",
961 len, pnum); 974 len, pnum);
975 up_read(&ubi->fm_eba_sem);
962 goto write_error; 976 goto write_error;
963 } 977 }
964 978
965 if (vol->eba_tbl[lnum] >= 0) { 979 old_pnum = vol->eba_tbl[lnum];
966 err = ubi_wl_put_peb(ubi, vol_id, lnum, vol->eba_tbl[lnum], 0); 980 vol->eba_tbl[lnum] = pnum;
981 up_read(&ubi->fm_eba_sem);
982
983 if (old_pnum >= 0) {
984 err = ubi_wl_put_peb(ubi, vol_id, lnum, old_pnum, 0);
967 if (err) 985 if (err)
968 goto out_leb_unlock; 986 goto out_leb_unlock;
969 } 987 }
970 988
971 down_read(&ubi->fm_sem);
972 vol->eba_tbl[lnum] = pnum;
973 up_read(&ubi->fm_sem);
974
975out_leb_unlock: 989out_leb_unlock:
976 leb_write_unlock(ubi, vol_id, lnum); 990 leb_write_unlock(ubi, vol_id, lnum);
977out_mutex: 991out_mutex:
@@ -1217,9 +1231,9 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1217 } 1231 }
1218 1232
1219 ubi_assert(vol->eba_tbl[lnum] == from); 1233 ubi_assert(vol->eba_tbl[lnum] == from);
1220 down_read(&ubi->fm_sem); 1234 down_read(&ubi->fm_eba_sem);
1221 vol->eba_tbl[lnum] = to; 1235 vol->eba_tbl[lnum] = to;
1222 up_read(&ubi->fm_sem); 1236 up_read(&ubi->fm_eba_sem);
1223 1237
1224out_unlock_buf: 1238out_unlock_buf:
1225 mutex_unlock(&ubi->buf_mutex); 1239 mutex_unlock(&ubi->buf_mutex);
@@ -1418,7 +1432,8 @@ int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1418 * during re-size. 1432 * during re-size.
1419 */ 1433 */
1420 ubi_move_aeb_to_list(av, aeb, &ai->erase); 1434 ubi_move_aeb_to_list(av, aeb, &ai->erase);
1421 vol->eba_tbl[aeb->lnum] = aeb->pnum; 1435 else
1436 vol->eba_tbl[aeb->lnum] = aeb->pnum;
1422 } 1437 }
1423 } 1438 }
1424 1439
diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c
new file mode 100644
index 000000000000..b2a665398bca
--- /dev/null
+++ b/drivers/mtd/ubi/fastmap-wl.c
@@ -0,0 +1,362 @@
1/*
2 * Copyright (c) 2012 Linutronix GmbH
3 * Copyright (c) 2014 sigma star gmbh
4 * Author: Richard Weinberger <richard@nod.at>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
13 * the GNU General Public License for more details.
14 *
15 */
16
17/**
18 * update_fastmap_work_fn - calls ubi_update_fastmap from a work queue
19 * @wrk: the work description object
20 */
21static void update_fastmap_work_fn(struct work_struct *wrk)
22{
23 struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
24
25 ubi_update_fastmap(ubi);
26 spin_lock(&ubi->wl_lock);
27 ubi->fm_work_scheduled = 0;
28 spin_unlock(&ubi->wl_lock);
29}
30
31/**
32 * find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB.
33 * @root: the RB-tree where to look for
34 */
35static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
36{
37 struct rb_node *p;
38 struct ubi_wl_entry *e, *victim = NULL;
39 int max_ec = UBI_MAX_ERASECOUNTER;
40
41 ubi_rb_for_each_entry(p, e, root, u.rb) {
42 if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
43 victim = e;
44 max_ec = e->ec;
45 }
46 }
47
48 return victim;
49}
50
51/**
52 * return_unused_pool_pebs - returns unused PEB to the free tree.
53 * @ubi: UBI device description object
54 * @pool: fastmap pool description object
55 */
56static void return_unused_pool_pebs(struct ubi_device *ubi,
57 struct ubi_fm_pool *pool)
58{
59 int i;
60 struct ubi_wl_entry *e;
61
62 for (i = pool->used; i < pool->size; i++) {
63 e = ubi->lookuptbl[pool->pebs[i]];
64 wl_tree_add(e, &ubi->free);
65 ubi->free_count++;
66 }
67}
68
69static int anchor_pebs_avalible(struct rb_root *root)
70{
71 struct rb_node *p;
72 struct ubi_wl_entry *e;
73
74 ubi_rb_for_each_entry(p, e, root, u.rb)
75 if (e->pnum < UBI_FM_MAX_START)
76 return 1;
77
78 return 0;
79}
80
81/**
82 * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
83 * @ubi: UBI device description object
84 * @anchor: This PEB will be used as anchor PEB by fastmap
85 *
86 * The function returns a physical erase block with a given maximal number
87 * and removes it from the wl subsystem.
88 * Must be called with wl_lock held!
89 */
90struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
91{
92 struct ubi_wl_entry *e = NULL;
93
94 if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
95 goto out;
96
97 if (anchor)
98 e = find_anchor_wl_entry(&ubi->free);
99 else
100 e = find_mean_wl_entry(ubi, &ubi->free);
101
102 if (!e)
103 goto out;
104
105 self_check_in_wl_tree(ubi, e, &ubi->free);
106
107 /* remove it from the free list,
108 * the wl subsystem does no longer know this erase block */
109 rb_erase(&e->u.rb, &ubi->free);
110 ubi->free_count--;
111out:
112 return e;
113}
114
115/**
116 * ubi_refill_pools - refills all fastmap PEB pools.
117 * @ubi: UBI device description object
118 */
119void ubi_refill_pools(struct ubi_device *ubi)
120{
121 struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
122 struct ubi_fm_pool *pool = &ubi->fm_pool;
123 struct ubi_wl_entry *e;
124 int enough;
125
126 spin_lock(&ubi->wl_lock);
127
128 return_unused_pool_pebs(ubi, wl_pool);
129 return_unused_pool_pebs(ubi, pool);
130
131 wl_pool->size = 0;
132 pool->size = 0;
133
134 for (;;) {
135 enough = 0;
136 if (pool->size < pool->max_size) {
137 if (!ubi->free.rb_node)
138 break;
139
140 e = wl_get_wle(ubi);
141 if (!e)
142 break;
143
144 pool->pebs[pool->size] = e->pnum;
145 pool->size++;
146 } else
147 enough++;
148
149 if (wl_pool->size < wl_pool->max_size) {
150 if (!ubi->free.rb_node ||
151 (ubi->free_count - ubi->beb_rsvd_pebs < 5))
152 break;
153
154 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
155 self_check_in_wl_tree(ubi, e, &ubi->free);
156 rb_erase(&e->u.rb, &ubi->free);
157 ubi->free_count--;
158
159 wl_pool->pebs[wl_pool->size] = e->pnum;
160 wl_pool->size++;
161 } else
162 enough++;
163
164 if (enough == 2)
165 break;
166 }
167
168 wl_pool->used = 0;
169 pool->used = 0;
170
171 spin_unlock(&ubi->wl_lock);
172}
173
174/**
175 * ubi_wl_get_peb - get a physical eraseblock.
176 * @ubi: UBI device description object
177 *
178 * This function returns a physical eraseblock in case of success and a
179 * negative error code in case of failure.
180 * Returns with ubi->fm_eba_sem held in read mode!
181 */
182int ubi_wl_get_peb(struct ubi_device *ubi)
183{
184 int ret, retried = 0;
185 struct ubi_fm_pool *pool = &ubi->fm_pool;
186 struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
187
188again:
189 down_read(&ubi->fm_eba_sem);
190 spin_lock(&ubi->wl_lock);
191
192 /* We check here also for the WL pool because at this point we can
193 * refill the WL pool synchronous. */
194 if (pool->used == pool->size || wl_pool->used == wl_pool->size) {
195 spin_unlock(&ubi->wl_lock);
196 up_read(&ubi->fm_eba_sem);
197 ret = ubi_update_fastmap(ubi);
198 if (ret) {
199 ubi_msg(ubi, "Unable to write a new fastmap: %i", ret);
200 down_read(&ubi->fm_eba_sem);
201 return -ENOSPC;
202 }
203 down_read(&ubi->fm_eba_sem);
204 spin_lock(&ubi->wl_lock);
205 }
206
207 if (pool->used == pool->size) {
208 spin_unlock(&ubi->wl_lock);
209 if (retried) {
210 ubi_err(ubi, "Unable to get a free PEB from user WL pool");
211 ret = -ENOSPC;
212 goto out;
213 }
214 retried = 1;
215 up_read(&ubi->fm_eba_sem);
216 goto again;
217 }
218
219 ubi_assert(pool->used < pool->size);
220 ret = pool->pebs[pool->used++];
221 prot_queue_add(ubi, ubi->lookuptbl[ret]);
222 spin_unlock(&ubi->wl_lock);
223out:
224 return ret;
225}
226
227/* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
228 *
229 * @ubi: UBI device description object
230 */
231static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
232{
233 struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
234 int pnum;
235
236 if (pool->used == pool->size) {
237 /* We cannot update the fastmap here because this
238 * function is called in atomic context.
239 * Let's fail here and refill/update it as soon as possible. */
240 if (!ubi->fm_work_scheduled) {
241 ubi->fm_work_scheduled = 1;
242 schedule_work(&ubi->fm_work);
243 }
244 return NULL;
245 }
246
247 pnum = pool->pebs[pool->used++];
248 return ubi->lookuptbl[pnum];
249}
250
251/**
252 * ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB.
253 * @ubi: UBI device description object
254 */
255int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
256{
257 struct ubi_work *wrk;
258
259 spin_lock(&ubi->wl_lock);
260 if (ubi->wl_scheduled) {
261 spin_unlock(&ubi->wl_lock);
262 return 0;
263 }
264 ubi->wl_scheduled = 1;
265 spin_unlock(&ubi->wl_lock);
266
267 wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
268 if (!wrk) {
269 spin_lock(&ubi->wl_lock);
270 ubi->wl_scheduled = 0;
271 spin_unlock(&ubi->wl_lock);
272 return -ENOMEM;
273 }
274
275 wrk->anchor = 1;
276 wrk->func = &wear_leveling_worker;
277 schedule_ubi_work(ubi, wrk);
278 return 0;
279}
280
281/**
282 * ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling
283 * sub-system.
284 * see: ubi_wl_put_peb()
285 *
286 * @ubi: UBI device description object
287 * @fm_e: physical eraseblock to return
288 * @lnum: the last used logical eraseblock number for the PEB
289 * @torture: if this physical eraseblock has to be tortured
290 */
291int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
292 int lnum, int torture)
293{
294 struct ubi_wl_entry *e;
295 int vol_id, pnum = fm_e->pnum;
296
297 dbg_wl("PEB %d", pnum);
298
299 ubi_assert(pnum >= 0);
300 ubi_assert(pnum < ubi->peb_count);
301
302 spin_lock(&ubi->wl_lock);
303 e = ubi->lookuptbl[pnum];
304
305 /* This can happen if we recovered from a fastmap the very
306 * first time and writing now a new one. In this case the wl system
307 * has never seen any PEB used by the original fastmap.
308 */
309 if (!e) {
310 e = fm_e;
311 ubi_assert(e->ec >= 0);
312 ubi->lookuptbl[pnum] = e;
313 }
314
315 spin_unlock(&ubi->wl_lock);
316
317 vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
318 return schedule_erase(ubi, e, vol_id, lnum, torture);
319}
320
321/**
322 * ubi_is_erase_work - checks whether a work is erase work.
323 * @wrk: The work object to be checked
324 */
325int ubi_is_erase_work(struct ubi_work *wrk)
326{
327 return wrk->func == erase_worker;
328}
329
330static void ubi_fastmap_close(struct ubi_device *ubi)
331{
332 int i;
333
334 flush_work(&ubi->fm_work);
335 return_unused_pool_pebs(ubi, &ubi->fm_pool);
336 return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
337
338 if (ubi->fm) {
339 for (i = 0; i < ubi->fm->used_blocks; i++)
340 kfree(ubi->fm->e[i]);
341 }
342 kfree(ubi->fm);
343}
344
345/**
346 * may_reserve_for_fm - tests whether a PEB shall be reserved for fastmap.
347 * See find_mean_wl_entry()
348 *
349 * @ubi: UBI device description object
350 * @e: physical eraseblock to return
351 * @root: RB tree to test against.
352 */
353static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
354 struct ubi_wl_entry *e,
355 struct rb_root *root) {
356 if (e && !ubi->fm_disabled && !ubi->fm &&
357 e->pnum < UBI_FM_MAX_START)
358 e = rb_entry(rb_next(root->rb_node),
359 struct ubi_wl_entry, u.rb);
360
361 return e;
362}
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index db3defdfc3c0..02a6de2f53ee 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2012 Linutronix GmbH 2 * Copyright (c) 2012 Linutronix GmbH
3 * Copyright (c) 2014 sigma star gmbh
3 * Author: Richard Weinberger <richard@nod.at> 4 * Author: Richard Weinberger <richard@nod.at>
4 * 5 *
5 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
@@ -17,6 +18,69 @@
17#include "ubi.h" 18#include "ubi.h"
18 19
19/** 20/**
21 * init_seen - allocate memory for used for debugging.
22 * @ubi: UBI device description object
23 */
24static inline int *init_seen(struct ubi_device *ubi)
25{
26 int *ret;
27
28 if (!ubi_dbg_chk_fastmap(ubi))
29 return NULL;
30
31 ret = kcalloc(ubi->peb_count, sizeof(int), GFP_KERNEL);
32 if (!ret)
33 return ERR_PTR(-ENOMEM);
34
35 return ret;
36}
37
38/**
39 * free_seen - free the seen logic integer array.
40 * @seen: integer array of @ubi->peb_count size
41 */
42static inline void free_seen(int *seen)
43{
44 kfree(seen);
45}
46
47/**
48 * set_seen - mark a PEB as seen.
49 * @ubi: UBI device description object
50 * @pnum: The PEB to be makred as seen
51 * @seen: integer array of @ubi->peb_count size
52 */
53static inline void set_seen(struct ubi_device *ubi, int pnum, int *seen)
54{
55 if (!ubi_dbg_chk_fastmap(ubi) || !seen)
56 return;
57
58 seen[pnum] = 1;
59}
60
61/**
62 * self_check_seen - check whether all PEB have been seen by fastmap.
63 * @ubi: UBI device description object
64 * @seen: integer array of @ubi->peb_count size
65 */
66static int self_check_seen(struct ubi_device *ubi, int *seen)
67{
68 int pnum, ret = 0;
69
70 if (!ubi_dbg_chk_fastmap(ubi) || !seen)
71 return 0;
72
73 for (pnum = 0; pnum < ubi->peb_count; pnum++) {
74 if (!seen[pnum] && ubi->lookuptbl[pnum]) {
75 ubi_err(ubi, "self-check failed for PEB %d, fastmap didn't see it", pnum);
76 ret = -EINVAL;
77 }
78 }
79
80 return ret;
81}
82
83/**
20 * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device. 84 * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
21 * @ubi: UBI device description object 85 * @ubi: UBI device description object
22 */ 86 */
@@ -136,14 +200,15 @@ static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
136 if (!av) 200 if (!av)
137 goto out; 201 goto out;
138 202
139 av->highest_lnum = av->leb_count = 0; 203 av->highest_lnum = av->leb_count = av->used_ebs = 0;
140 av->vol_id = vol_id; 204 av->vol_id = vol_id;
141 av->used_ebs = used_ebs;
142 av->data_pad = data_pad; 205 av->data_pad = data_pad;
143 av->last_data_size = last_eb_bytes; 206 av->last_data_size = last_eb_bytes;
144 av->compat = 0; 207 av->compat = 0;
145 av->vol_type = vol_type; 208 av->vol_type = vol_type;
146 av->root = RB_ROOT; 209 av->root = RB_ROOT;
210 if (av->vol_type == UBI_STATIC_VOLUME)
211 av->used_ebs = used_ebs;
147 212
148 dbg_bld("found volume (ID %i)", vol_id); 213 dbg_bld("found volume (ID %i)", vol_id);
149 214
@@ -362,6 +427,7 @@ static void unmap_peb(struct ubi_attach_info *ai, int pnum)
362 aeb = rb_entry(node2, struct ubi_ainf_peb, u.rb); 427 aeb = rb_entry(node2, struct ubi_ainf_peb, u.rb);
363 if (aeb->pnum == pnum) { 428 if (aeb->pnum == pnum) {
364 rb_erase(&aeb->u.rb, &av->root); 429 rb_erase(&aeb->u.rb, &av->root);
430 av->leb_count--;
365 kmem_cache_free(ai->aeb_slab_cache, aeb); 431 kmem_cache_free(ai->aeb_slab_cache, aeb);
366 return; 432 return;
367 } 433 }
@@ -376,7 +442,6 @@ static void unmap_peb(struct ubi_attach_info *ai, int pnum)
376 * @pebs: an array of all PEB numbers in the to be scanned pool 442 * @pebs: an array of all PEB numbers in the to be scanned pool
377 * @pool_size: size of the pool (number of entries in @pebs) 443 * @pool_size: size of the pool (number of entries in @pebs)
378 * @max_sqnum: pointer to the maximal sequence number 444 * @max_sqnum: pointer to the maximal sequence number
379 * @eba_orphans: list of PEBs which need to be scanned
380 * @free: list of PEBs which are most likely free (and go into @ai->free) 445 * @free: list of PEBs which are most likely free (and go into @ai->free)
381 * 446 *
382 * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned. 447 * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned.
@@ -384,12 +449,12 @@ static void unmap_peb(struct ubi_attach_info *ai, int pnum)
384 */ 449 */
385static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai, 450static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
386 int *pebs, int pool_size, unsigned long long *max_sqnum, 451 int *pebs, int pool_size, unsigned long long *max_sqnum,
387 struct list_head *eba_orphans, struct list_head *free) 452 struct list_head *free)
388{ 453{
389 struct ubi_vid_hdr *vh; 454 struct ubi_vid_hdr *vh;
390 struct ubi_ec_hdr *ech; 455 struct ubi_ec_hdr *ech;
391 struct ubi_ainf_peb *new_aeb, *tmp_aeb; 456 struct ubi_ainf_peb *new_aeb;
392 int i, pnum, err, found_orphan, ret = 0; 457 int i, pnum, err, ret = 0;
393 458
394 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); 459 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
395 if (!ech) 460 if (!ech)
@@ -457,18 +522,6 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
457 if (err == UBI_IO_BITFLIPS) 522 if (err == UBI_IO_BITFLIPS)
458 scrub = 1; 523 scrub = 1;
459 524
460 found_orphan = 0;
461 list_for_each_entry(tmp_aeb, eba_orphans, u.list) {
462 if (tmp_aeb->pnum == pnum) {
463 found_orphan = 1;
464 break;
465 }
466 }
467 if (found_orphan) {
468 list_del(&tmp_aeb->u.list);
469 kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
470 }
471
472 new_aeb = kmem_cache_alloc(ai->aeb_slab_cache, 525 new_aeb = kmem_cache_alloc(ai->aeb_slab_cache,
473 GFP_KERNEL); 526 GFP_KERNEL);
474 if (!new_aeb) { 527 if (!new_aeb) {
@@ -543,10 +596,9 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
543 struct ubi_attach_info *ai, 596 struct ubi_attach_info *ai,
544 struct ubi_fastmap_layout *fm) 597 struct ubi_fastmap_layout *fm)
545{ 598{
546 struct list_head used, eba_orphans, free; 599 struct list_head used, free;
547 struct ubi_ainf_volume *av; 600 struct ubi_ainf_volume *av;
548 struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb; 601 struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb;
549 struct ubi_ec_hdr *ech;
550 struct ubi_fm_sb *fmsb; 602 struct ubi_fm_sb *fmsb;
551 struct ubi_fm_hdr *fmhdr; 603 struct ubi_fm_hdr *fmhdr;
552 struct ubi_fm_scan_pool *fmpl1, *fmpl2; 604 struct ubi_fm_scan_pool *fmpl1, *fmpl2;
@@ -560,22 +612,8 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
560 612
561 INIT_LIST_HEAD(&used); 613 INIT_LIST_HEAD(&used);
562 INIT_LIST_HEAD(&free); 614 INIT_LIST_HEAD(&free);
563 INIT_LIST_HEAD(&eba_orphans);
564 INIT_LIST_HEAD(&ai->corr);
565 INIT_LIST_HEAD(&ai->free);
566 INIT_LIST_HEAD(&ai->erase);
567 INIT_LIST_HEAD(&ai->alien);
568 ai->volumes = RB_ROOT;
569 ai->min_ec = UBI_MAX_ERASECOUNTER; 615 ai->min_ec = UBI_MAX_ERASECOUNTER;
570 616
571 ai->aeb_slab_cache = kmem_cache_create("ubi_ainf_peb_slab",
572 sizeof(struct ubi_ainf_peb),
573 0, 0, NULL);
574 if (!ai->aeb_slab_cache) {
575 ret = -ENOMEM;
576 goto fail;
577 }
578
579 fmsb = (struct ubi_fm_sb *)(fm_raw); 617 fmsb = (struct ubi_fm_sb *)(fm_raw);
580 ai->max_sqnum = fmsb->sqnum; 618 ai->max_sqnum = fmsb->sqnum;
581 fm_pos += sizeof(struct ubi_fm_sb); 619 fm_pos += sizeof(struct ubi_fm_sb);
@@ -741,28 +779,9 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
741 } 779 }
742 } 780 }
743 781
744 /* This can happen if a PEB is already in an EBA known
745 * by this fastmap but the PEB itself is not in the used
746 * list.
747 * In this case the PEB can be within the fastmap pool
748 * or while writing the fastmap it was in the protection
749 * queue.
750 */
751 if (!aeb) { 782 if (!aeb) {
752 aeb = kmem_cache_alloc(ai->aeb_slab_cache, 783 ubi_err(ubi, "PEB %i is in EBA but not in used list", pnum);
753 GFP_KERNEL); 784 goto fail_bad;
754 if (!aeb) {
755 ret = -ENOMEM;
756
757 goto fail;
758 }
759
760 aeb->lnum = j;
761 aeb->pnum = be32_to_cpu(fm_eba->pnum[j]);
762 aeb->ec = -1;
763 aeb->scrub = aeb->copy_flag = aeb->sqnum = 0;
764 list_add_tail(&aeb->u.list, &eba_orphans);
765 continue;
766 } 785 }
767 786
768 aeb->lnum = j; 787 aeb->lnum = j;
@@ -775,49 +794,13 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
775 dbg_bld("inserting PEB:%i (LEB %i) to vol %i", 794 dbg_bld("inserting PEB:%i (LEB %i) to vol %i",
776 aeb->pnum, aeb->lnum, av->vol_id); 795 aeb->pnum, aeb->lnum, av->vol_id);
777 } 796 }
778
779 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
780 if (!ech) {
781 ret = -ENOMEM;
782 goto fail;
783 }
784
785 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &eba_orphans,
786 u.list) {
787 int err;
788
789 if (ubi_io_is_bad(ubi, tmp_aeb->pnum)) {
790 ubi_err(ubi, "bad PEB in fastmap EBA orphan list");
791 ret = UBI_BAD_FASTMAP;
792 kfree(ech);
793 goto fail;
794 }
795
796 err = ubi_io_read_ec_hdr(ubi, tmp_aeb->pnum, ech, 0);
797 if (err && err != UBI_IO_BITFLIPS) {
798 ubi_err(ubi, "unable to read EC header! PEB:%i err:%i",
799 tmp_aeb->pnum, err);
800 ret = err > 0 ? UBI_BAD_FASTMAP : err;
801 kfree(ech);
802
803 goto fail;
804 } else if (err == UBI_IO_BITFLIPS)
805 tmp_aeb->scrub = 1;
806
807 tmp_aeb->ec = be64_to_cpu(ech->ec);
808 assign_aeb_to_av(ai, tmp_aeb, av);
809 }
810
811 kfree(ech);
812 } 797 }
813 798
814 ret = scan_pool(ubi, ai, fmpl1->pebs, pool_size, &max_sqnum, 799 ret = scan_pool(ubi, ai, fmpl1->pebs, pool_size, &max_sqnum, &free);
815 &eba_orphans, &free);
816 if (ret) 800 if (ret)
817 goto fail; 801 goto fail;
818 802
819 ret = scan_pool(ubi, ai, fmpl2->pebs, wl_pool_size, &max_sqnum, 803 ret = scan_pool(ubi, ai, fmpl2->pebs, wl_pool_size, &max_sqnum, &free);
820 &eba_orphans, &free);
821 if (ret) 804 if (ret)
822 goto fail; 805 goto fail;
823 806
@@ -827,8 +810,9 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
827 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) 810 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list)
828 list_move_tail(&tmp_aeb->u.list, &ai->free); 811 list_move_tail(&tmp_aeb->u.list, &ai->free);
829 812
830 ubi_assert(list_empty(&used)); 813 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list)
831 ubi_assert(list_empty(&eba_orphans)); 814 list_move_tail(&tmp_aeb->u.list, &ai->erase);
815
832 ubi_assert(list_empty(&free)); 816 ubi_assert(list_empty(&free));
833 817
834 /* 818 /*
@@ -850,10 +834,6 @@ fail:
850 list_del(&tmp_aeb->u.list); 834 list_del(&tmp_aeb->u.list);
851 kmem_cache_free(ai->aeb_slab_cache, tmp_aeb); 835 kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
852 } 836 }
853 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &eba_orphans, u.list) {
854 list_del(&tmp_aeb->u.list);
855 kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
856 }
857 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) { 837 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
858 list_del(&tmp_aeb->u.list); 838 list_del(&tmp_aeb->u.list);
859 kmem_cache_free(ai->aeb_slab_cache, tmp_aeb); 839 kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
@@ -884,7 +864,7 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
884 __be32 crc, tmp_crc; 864 __be32 crc, tmp_crc;
885 unsigned long long sqnum = 0; 865 unsigned long long sqnum = 0;
886 866
887 mutex_lock(&ubi->fm_mutex); 867 down_write(&ubi->fm_protect);
888 memset(ubi->fm_buf, 0, ubi->fm_size); 868 memset(ubi->fm_buf, 0, ubi->fm_size);
889 869
890 fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL); 870 fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL);
@@ -1075,7 +1055,7 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
1075 ubi_free_vid_hdr(ubi, vh); 1055 ubi_free_vid_hdr(ubi, vh);
1076 kfree(ech); 1056 kfree(ech);
1077out: 1057out:
1078 mutex_unlock(&ubi->fm_mutex); 1058 up_write(&ubi->fm_protect);
1079 if (ret == UBI_BAD_FASTMAP) 1059 if (ret == UBI_BAD_FASTMAP)
1080 ubi_err(ubi, "Attach by fastmap failed, doing a full scan!"); 1060 ubi_err(ubi, "Attach by fastmap failed, doing a full scan!");
1081 return ret; 1061 return ret;
@@ -1107,13 +1087,14 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
1107 struct ubi_fm_ec *fec; 1087 struct ubi_fm_ec *fec;
1108 struct ubi_fm_volhdr *fvh; 1088 struct ubi_fm_volhdr *fvh;
1109 struct ubi_fm_eba *feba; 1089 struct ubi_fm_eba *feba;
1110 struct rb_node *node;
1111 struct ubi_wl_entry *wl_e; 1090 struct ubi_wl_entry *wl_e;
1112 struct ubi_volume *vol; 1091 struct ubi_volume *vol;
1113 struct ubi_vid_hdr *avhdr, *dvhdr; 1092 struct ubi_vid_hdr *avhdr, *dvhdr;
1114 struct ubi_work *ubi_wrk; 1093 struct ubi_work *ubi_wrk;
1094 struct rb_node *tmp_rb;
1115 int ret, i, j, free_peb_count, used_peb_count, vol_count; 1095 int ret, i, j, free_peb_count, used_peb_count, vol_count;
1116 int scrub_peb_count, erase_peb_count; 1096 int scrub_peb_count, erase_peb_count;
1097 int *seen_pebs = NULL;
1117 1098
1118 fm_raw = ubi->fm_buf; 1099 fm_raw = ubi->fm_buf;
1119 memset(ubi->fm_buf, 0, ubi->fm_size); 1100 memset(ubi->fm_buf, 0, ubi->fm_size);
@@ -1130,6 +1111,12 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
1130 goto out_kfree; 1111 goto out_kfree;
1131 } 1112 }
1132 1113
1114 seen_pebs = init_seen(ubi);
1115 if (IS_ERR(seen_pebs)) {
1116 ret = PTR_ERR(seen_pebs);
1117 goto out_kfree;
1118 }
1119
1133 spin_lock(&ubi->volumes_lock); 1120 spin_lock(&ubi->volumes_lock);
1134 spin_lock(&ubi->wl_lock); 1121 spin_lock(&ubi->wl_lock);
1135 1122
@@ -1160,8 +1147,10 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
1160 fmpl1->size = cpu_to_be16(ubi->fm_pool.size); 1147 fmpl1->size = cpu_to_be16(ubi->fm_pool.size);
1161 fmpl1->max_size = cpu_to_be16(ubi->fm_pool.max_size); 1148 fmpl1->max_size = cpu_to_be16(ubi->fm_pool.max_size);
1162 1149
1163 for (i = 0; i < ubi->fm_pool.size; i++) 1150 for (i = 0; i < ubi->fm_pool.size; i++) {
1164 fmpl1->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]); 1151 fmpl1->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]);
1152 set_seen(ubi, ubi->fm_pool.pebs[i], seen_pebs);
1153 }
1165 1154
1166 fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos); 1155 fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1167 fm_pos += sizeof(*fmpl2); 1156 fm_pos += sizeof(*fmpl2);
@@ -1169,14 +1158,16 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
1169 fmpl2->size = cpu_to_be16(ubi->fm_wl_pool.size); 1158 fmpl2->size = cpu_to_be16(ubi->fm_wl_pool.size);
1170 fmpl2->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size); 1159 fmpl2->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size);
1171 1160
1172 for (i = 0; i < ubi->fm_wl_pool.size; i++) 1161 for (i = 0; i < ubi->fm_wl_pool.size; i++) {
1173 fmpl2->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]); 1162 fmpl2->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]);
1163 set_seen(ubi, ubi->fm_wl_pool.pebs[i], seen_pebs);
1164 }
1174 1165
1175 for (node = rb_first(&ubi->free); node; node = rb_next(node)) { 1166 ubi_for_each_free_peb(ubi, wl_e, tmp_rb) {
1176 wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
1177 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 1167 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1178 1168
1179 fec->pnum = cpu_to_be32(wl_e->pnum); 1169 fec->pnum = cpu_to_be32(wl_e->pnum);
1170 set_seen(ubi, wl_e->pnum, seen_pebs);
1180 fec->ec = cpu_to_be32(wl_e->ec); 1171 fec->ec = cpu_to_be32(wl_e->ec);
1181 1172
1182 free_peb_count++; 1173 free_peb_count++;
@@ -1185,11 +1176,11 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
1185 } 1176 }
1186 fmh->free_peb_count = cpu_to_be32(free_peb_count); 1177 fmh->free_peb_count = cpu_to_be32(free_peb_count);
1187 1178
1188 for (node = rb_first(&ubi->used); node; node = rb_next(node)) { 1179 ubi_for_each_used_peb(ubi, wl_e, tmp_rb) {
1189 wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
1190 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 1180 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1191 1181
1192 fec->pnum = cpu_to_be32(wl_e->pnum); 1182 fec->pnum = cpu_to_be32(wl_e->pnum);
1183 set_seen(ubi, wl_e->pnum, seen_pebs);
1193 fec->ec = cpu_to_be32(wl_e->ec); 1184 fec->ec = cpu_to_be32(wl_e->ec);
1194 1185
1195 used_peb_count++; 1186 used_peb_count++;
@@ -1197,25 +1188,24 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
1197 ubi_assert(fm_pos <= ubi->fm_size); 1188 ubi_assert(fm_pos <= ubi->fm_size);
1198 } 1189 }
1199 1190
1200 for (i = 0; i < UBI_PROT_QUEUE_LEN; i++) { 1191 ubi_for_each_protected_peb(ubi, i, wl_e) {
1201 list_for_each_entry(wl_e, &ubi->pq[i], u.list) { 1192 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1202 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1203 1193
1204 fec->pnum = cpu_to_be32(wl_e->pnum); 1194 fec->pnum = cpu_to_be32(wl_e->pnum);
1205 fec->ec = cpu_to_be32(wl_e->ec); 1195 set_seen(ubi, wl_e->pnum, seen_pebs);
1196 fec->ec = cpu_to_be32(wl_e->ec);
1206 1197
1207 used_peb_count++; 1198 used_peb_count++;
1208 fm_pos += sizeof(*fec); 1199 fm_pos += sizeof(*fec);
1209 ubi_assert(fm_pos <= ubi->fm_size); 1200 ubi_assert(fm_pos <= ubi->fm_size);
1210 }
1211 } 1201 }
1212 fmh->used_peb_count = cpu_to_be32(used_peb_count); 1202 fmh->used_peb_count = cpu_to_be32(used_peb_count);
1213 1203
1214 for (node = rb_first(&ubi->scrub); node; node = rb_next(node)) { 1204 ubi_for_each_scrub_peb(ubi, wl_e, tmp_rb) {
1215 wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
1216 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 1205 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1217 1206
1218 fec->pnum = cpu_to_be32(wl_e->pnum); 1207 fec->pnum = cpu_to_be32(wl_e->pnum);
1208 set_seen(ubi, wl_e->pnum, seen_pebs);
1219 fec->ec = cpu_to_be32(wl_e->ec); 1209 fec->ec = cpu_to_be32(wl_e->ec);
1220 1210
1221 scrub_peb_count++; 1211 scrub_peb_count++;
@@ -1233,6 +1223,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
1233 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 1223 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1234 1224
1235 fec->pnum = cpu_to_be32(wl_e->pnum); 1225 fec->pnum = cpu_to_be32(wl_e->pnum);
1226 set_seen(ubi, wl_e->pnum, seen_pebs);
1236 fec->ec = cpu_to_be32(wl_e->ec); 1227 fec->ec = cpu_to_be32(wl_e->ec);
1237 1228
1238 erase_peb_count++; 1229 erase_peb_count++;
@@ -1292,6 +1283,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
1292 1283
1293 for (i = 0; i < new_fm->used_blocks; i++) { 1284 for (i = 0; i < new_fm->used_blocks; i++) {
1294 fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum); 1285 fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum);
1286 set_seen(ubi, new_fm->e[i]->pnum, seen_pebs);
1295 fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec); 1287 fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec);
1296 } 1288 }
1297 1289
@@ -1325,11 +1317,13 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
1325 ubi_assert(new_fm); 1317 ubi_assert(new_fm);
1326 ubi->fm = new_fm; 1318 ubi->fm = new_fm;
1327 1319
1320 ret = self_check_seen(ubi, seen_pebs);
1328 dbg_bld("fastmap written!"); 1321 dbg_bld("fastmap written!");
1329 1322
1330out_kfree: 1323out_kfree:
1331 ubi_free_vid_hdr(ubi, avhdr); 1324 ubi_free_vid_hdr(ubi, avhdr);
1332 ubi_free_vid_hdr(ubi, dvhdr); 1325 ubi_free_vid_hdr(ubi, dvhdr);
1326 free_seen(seen_pebs);
1333out: 1327out:
1334 return ret; 1328 return ret;
1335} 1329}
@@ -1384,31 +1378,87 @@ out:
1384/** 1378/**
1385 * invalidate_fastmap - destroys a fastmap. 1379 * invalidate_fastmap - destroys a fastmap.
1386 * @ubi: UBI device object 1380 * @ubi: UBI device object
1387 * @fm: the fastmap to be destroyed
1388 * 1381 *
1382 * This function ensures that upon next UBI attach a full scan
1383 * is issued. We need this if UBI is about to write a new fastmap
1384 * but is unable to do so. In this case we have two options:
1385 * a) Make sure that the current fastmap will not be usued upon
1386 * attach time and contine or b) fall back to RO mode to have the
1387 * current fastmap in a valid state.
1389 * Returns 0 on success, < 0 indicates an internal error. 1388 * Returns 0 on success, < 0 indicates an internal error.
1390 */ 1389 */
1391static int invalidate_fastmap(struct ubi_device *ubi, 1390static int invalidate_fastmap(struct ubi_device *ubi)
1392 struct ubi_fastmap_layout *fm)
1393{ 1391{
1394 int ret; 1392 int ret;
1395 struct ubi_vid_hdr *vh; 1393 struct ubi_fastmap_layout *fm;
1394 struct ubi_wl_entry *e;
1395 struct ubi_vid_hdr *vh = NULL;
1396 1396
1397 ret = erase_block(ubi, fm->e[0]->pnum); 1397 if (!ubi->fm)
1398 if (ret < 0) 1398 return 0;
1399 return ret; 1399
1400 ubi->fm = NULL;
1401
1402 ret = -ENOMEM;
1403 fm = kzalloc(sizeof(*fm), GFP_KERNEL);
1404 if (!fm)
1405 goto out;
1400 1406
1401 vh = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID); 1407 vh = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
1402 if (!vh) 1408 if (!vh)
1403 return -ENOMEM; 1409 goto out_free_fm;
1404 1410
1405 /* deleting the current fastmap SB is not enough, an old SB may exist, 1411 ret = -ENOSPC;
1406 * so create a (corrupted) SB such that fastmap will find it and fall 1412 e = ubi_wl_get_fm_peb(ubi, 1);
1407 * back to scanning mode in any case */ 1413 if (!e)
1414 goto out_free_fm;
1415
1416 /*
1417 * Create fake fastmap such that UBI will fall back
1418 * to scanning mode.
1419 */
1408 vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 1420 vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1409 ret = ubi_io_write_vid_hdr(ubi, fm->e[0]->pnum, vh); 1421 ret = ubi_io_write_vid_hdr(ubi, e->pnum, vh);
1422 if (ret < 0) {
1423 ubi_wl_put_fm_peb(ubi, e, 0, 0);
1424 goto out_free_fm;
1425 }
1426
1427 fm->used_blocks = 1;
1428 fm->e[0] = e;
1429
1430 ubi->fm = fm;
1410 1431
1432out:
1433 ubi_free_vid_hdr(ubi, vh);
1411 return ret; 1434 return ret;
1435
1436out_free_fm:
1437 kfree(fm);
1438 goto out;
1439}
1440
1441/**
1442 * return_fm_pebs - returns all PEBs used by a fastmap back to the
1443 * WL sub-system.
1444 * @ubi: UBI device object
1445 * @fm: fastmap layout object
1446 */
1447static void return_fm_pebs(struct ubi_device *ubi,
1448 struct ubi_fastmap_layout *fm)
1449{
1450 int i;
1451
1452 if (!fm)
1453 return;
1454
1455 for (i = 0; i < fm->used_blocks; i++) {
1456 if (fm->e[i]) {
1457 ubi_wl_put_fm_peb(ubi, fm->e[i], i,
1458 fm->to_be_tortured[i]);
1459 fm->e[i] = NULL;
1460 }
1461 }
1412} 1462}
1413 1463
1414/** 1464/**
@@ -1420,45 +1470,32 @@ static int invalidate_fastmap(struct ubi_device *ubi,
1420 */ 1470 */
1421int ubi_update_fastmap(struct ubi_device *ubi) 1471int ubi_update_fastmap(struct ubi_device *ubi)
1422{ 1472{
1423 int ret, i; 1473 int ret, i, j;
1424 struct ubi_fastmap_layout *new_fm, *old_fm; 1474 struct ubi_fastmap_layout *new_fm, *old_fm;
1425 struct ubi_wl_entry *tmp_e; 1475 struct ubi_wl_entry *tmp_e;
1426 1476
1427 mutex_lock(&ubi->fm_mutex); 1477 down_write(&ubi->fm_protect);
1428 1478
1429 ubi_refill_pools(ubi); 1479 ubi_refill_pools(ubi);
1430 1480
1431 if (ubi->ro_mode || ubi->fm_disabled) { 1481 if (ubi->ro_mode || ubi->fm_disabled) {
1432 mutex_unlock(&ubi->fm_mutex); 1482 up_write(&ubi->fm_protect);
1433 return 0; 1483 return 0;
1434 } 1484 }
1435 1485
1436 ret = ubi_ensure_anchor_pebs(ubi); 1486 ret = ubi_ensure_anchor_pebs(ubi);
1437 if (ret) { 1487 if (ret) {
1438 mutex_unlock(&ubi->fm_mutex); 1488 up_write(&ubi->fm_protect);
1439 return ret; 1489 return ret;
1440 } 1490 }
1441 1491
1442 new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL); 1492 new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
1443 if (!new_fm) { 1493 if (!new_fm) {
1444 mutex_unlock(&ubi->fm_mutex); 1494 up_write(&ubi->fm_protect);
1445 return -ENOMEM; 1495 return -ENOMEM;
1446 } 1496 }
1447 1497
1448 new_fm->used_blocks = ubi->fm_size / ubi->leb_size; 1498 new_fm->used_blocks = ubi->fm_size / ubi->leb_size;
1449
1450 for (i = 0; i < new_fm->used_blocks; i++) {
1451 new_fm->e[i] = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1452 if (!new_fm->e[i]) {
1453 while (i--)
1454 kfree(new_fm->e[i]);
1455
1456 kfree(new_fm);
1457 mutex_unlock(&ubi->fm_mutex);
1458 return -ENOMEM;
1459 }
1460 }
1461
1462 old_fm = ubi->fm; 1499 old_fm = ubi->fm;
1463 ubi->fm = NULL; 1500 ubi->fm = NULL;
1464 1501
@@ -1473,37 +1510,49 @@ int ubi_update_fastmap(struct ubi_device *ubi)
1473 tmp_e = ubi_wl_get_fm_peb(ubi, 0); 1510 tmp_e = ubi_wl_get_fm_peb(ubi, 0);
1474 spin_unlock(&ubi->wl_lock); 1511 spin_unlock(&ubi->wl_lock);
1475 1512
1476 if (!tmp_e && !old_fm) { 1513 if (!tmp_e) {
1477 int j; 1514 if (old_fm && old_fm->e[i]) {
1478 ubi_err(ubi, "could not get any free erase block"); 1515 ret = erase_block(ubi, old_fm->e[i]->pnum);
1479 1516 if (ret < 0) {
1480 for (j = 1; j < i; j++) 1517 ubi_err(ubi, "could not erase old fastmap PEB");
1481 ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0); 1518
1482 1519 for (j = 1; j < i; j++) {
1483 ret = -ENOSPC; 1520 ubi_wl_put_fm_peb(ubi, new_fm->e[j],
1484 goto err; 1521 j, 0);
1485 } else if (!tmp_e && old_fm) { 1522 new_fm->e[j] = NULL;
1486 ret = erase_block(ubi, old_fm->e[i]->pnum); 1523 }
1487 if (ret < 0) { 1524 goto err;
1488 int j; 1525 }
1489 1526 new_fm->e[i] = old_fm->e[i];
1490 for (j = 1; j < i; j++) 1527 old_fm->e[i] = NULL;
1491 ubi_wl_put_fm_peb(ubi, new_fm->e[j], 1528 } else {
1492 j, 0); 1529 ubi_err(ubi, "could not get any free erase block");
1530
1531 for (j = 1; j < i; j++) {
1532 ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0);
1533 new_fm->e[j] = NULL;
1534 }
1493 1535
1494 ubi_err(ubi, "could not erase old fastmap PEB"); 1536 ret = -ENOSPC;
1495 goto err; 1537 goto err;
1496 } 1538 }
1497
1498 new_fm->e[i]->pnum = old_fm->e[i]->pnum;
1499 new_fm->e[i]->ec = old_fm->e[i]->ec;
1500 } else { 1539 } else {
1501 new_fm->e[i]->pnum = tmp_e->pnum; 1540 new_fm->e[i] = tmp_e;
1502 new_fm->e[i]->ec = tmp_e->ec;
1503 1541
1504 if (old_fm) 1542 if (old_fm && old_fm->e[i]) {
1505 ubi_wl_put_fm_peb(ubi, old_fm->e[i], i, 1543 ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1506 old_fm->to_be_tortured[i]); 1544 old_fm->to_be_tortured[i]);
1545 old_fm->e[i] = NULL;
1546 }
1547 }
1548 }
1549
1550 /* Old fastmap is larger than the new one */
1551 if (old_fm && new_fm->used_blocks < old_fm->used_blocks) {
1552 for (i = new_fm->used_blocks; i < old_fm->used_blocks; i++) {
1553 ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1554 old_fm->to_be_tortured[i]);
1555 old_fm->e[i] = NULL;
1507 } 1556 }
1508 } 1557 }
1509 1558
@@ -1516,67 +1565,67 @@ int ubi_update_fastmap(struct ubi_device *ubi)
1516 if (!tmp_e) { 1565 if (!tmp_e) {
1517 ret = erase_block(ubi, old_fm->e[0]->pnum); 1566 ret = erase_block(ubi, old_fm->e[0]->pnum);
1518 if (ret < 0) { 1567 if (ret < 0) {
1519 int i;
1520 ubi_err(ubi, "could not erase old anchor PEB"); 1568 ubi_err(ubi, "could not erase old anchor PEB");
1521 1569
1522 for (i = 1; i < new_fm->used_blocks; i++) 1570 for (i = 1; i < new_fm->used_blocks; i++) {
1523 ubi_wl_put_fm_peb(ubi, new_fm->e[i], 1571 ubi_wl_put_fm_peb(ubi, new_fm->e[i],
1524 i, 0); 1572 i, 0);
1573 new_fm->e[i] = NULL;
1574 }
1525 goto err; 1575 goto err;
1526 } 1576 }
1527 1577 new_fm->e[0] = old_fm->e[0];
1528 new_fm->e[0]->pnum = old_fm->e[0]->pnum;
1529 new_fm->e[0]->ec = ret; 1578 new_fm->e[0]->ec = ret;
1579 old_fm->e[0] = NULL;
1530 } else { 1580 } else {
1531 /* we've got a new anchor PEB, return the old one */ 1581 /* we've got a new anchor PEB, return the old one */
1532 ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0, 1582 ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0,
1533 old_fm->to_be_tortured[0]); 1583 old_fm->to_be_tortured[0]);
1534 1584 new_fm->e[0] = tmp_e;
1535 new_fm->e[0]->pnum = tmp_e->pnum; 1585 old_fm->e[0] = NULL;
1536 new_fm->e[0]->ec = tmp_e->ec;
1537 } 1586 }
1538 } else { 1587 } else {
1539 if (!tmp_e) { 1588 if (!tmp_e) {
1540 int i;
1541 ubi_err(ubi, "could not find any anchor PEB"); 1589 ubi_err(ubi, "could not find any anchor PEB");
1542 1590
1543 for (i = 1; i < new_fm->used_blocks; i++) 1591 for (i = 1; i < new_fm->used_blocks; i++) {
1544 ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0); 1592 ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0);
1593 new_fm->e[i] = NULL;
1594 }
1545 1595
1546 ret = -ENOSPC; 1596 ret = -ENOSPC;
1547 goto err; 1597 goto err;
1548 } 1598 }
1549 1599 new_fm->e[0] = tmp_e;
1550 new_fm->e[0]->pnum = tmp_e->pnum;
1551 new_fm->e[0]->ec = tmp_e->ec;
1552 } 1600 }
1553 1601
1554 down_write(&ubi->work_sem); 1602 down_write(&ubi->work_sem);
1555 down_write(&ubi->fm_sem); 1603 down_write(&ubi->fm_eba_sem);
1556 ret = ubi_write_fastmap(ubi, new_fm); 1604 ret = ubi_write_fastmap(ubi, new_fm);
1557 up_write(&ubi->fm_sem); 1605 up_write(&ubi->fm_eba_sem);
1558 up_write(&ubi->work_sem); 1606 up_write(&ubi->work_sem);
1559 1607
1560 if (ret) 1608 if (ret)
1561 goto err; 1609 goto err;
1562 1610
1563out_unlock: 1611out_unlock:
1564 mutex_unlock(&ubi->fm_mutex); 1612 up_write(&ubi->fm_protect);
1565 kfree(old_fm); 1613 kfree(old_fm);
1566 return ret; 1614 return ret;
1567 1615
1568err: 1616err:
1569 kfree(new_fm);
1570
1571 ubi_warn(ubi, "Unable to write new fastmap, err=%i", ret); 1617 ubi_warn(ubi, "Unable to write new fastmap, err=%i", ret);
1572 1618
1573 ret = 0; 1619 ret = invalidate_fastmap(ubi);
1574 if (old_fm) { 1620 if (ret < 0) {
1575 ret = invalidate_fastmap(ubi, old_fm); 1621 ubi_err(ubi, "Unable to invalidiate current fastmap!");
1576 if (ret < 0) 1622 ubi_ro_mode(ubi);
1577 ubi_err(ubi, "Unable to invalidiate current fastmap!"); 1623 } else {
1578 else if (ret) 1624 return_fm_pebs(ubi, old_fm);
1579 ret = 0; 1625 return_fm_pebs(ubi, new_fm);
1626 ret = 0;
1580 } 1627 }
1628
1629 kfree(new_fm);
1581 goto out_unlock; 1630 goto out_unlock;
1582} 1631}
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index ed0bcb35472f..5bbd1f094f4e 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -859,6 +859,9 @@ int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum,
859 if (err) 859 if (err)
860 return err; 860 return err;
861 861
862 if (ubi_dbg_power_cut(ubi, POWER_CUT_EC_WRITE))
863 return -EROFS;
864
862 err = ubi_io_write(ubi, ec_hdr, pnum, 0, ubi->ec_hdr_alsize); 865 err = ubi_io_write(ubi, ec_hdr, pnum, 0, ubi->ec_hdr_alsize);
863 return err; 866 return err;
864} 867}
@@ -1106,6 +1109,9 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
1106 if (err) 1109 if (err)
1107 return err; 1110 return err;
1108 1111
1112 if (ubi_dbg_power_cut(ubi, POWER_CUT_VID_WRITE))
1113 return -EROFS;
1114
1109 p = (char *)vid_hdr - ubi->vid_hdr_shift; 1115 p = (char *)vid_hdr - ubi->vid_hdr_shift;
1110 err = ubi_io_write(ubi, p, pnum, ubi->vid_hdr_aloffset, 1116 err = ubi_io_write(ubi, p, pnum, ubi->vid_hdr_aloffset,
1111 ubi->vid_hdr_alsize); 1117 ubi->vid_hdr_alsize);
diff --git a/drivers/mtd/ubi/ubi-media.h b/drivers/mtd/ubi/ubi-media.h
index ac2b24d1783d..d0d072e7ccd2 100644
--- a/drivers/mtd/ubi/ubi-media.h
+++ b/drivers/mtd/ubi/ubi-media.h
@@ -403,8 +403,6 @@ struct ubi_vtbl_record {
403#define UBI_FM_MIN_POOL_SIZE 8 403#define UBI_FM_MIN_POOL_SIZE 8
404#define UBI_FM_MAX_POOL_SIZE 256 404#define UBI_FM_MAX_POOL_SIZE 256
405 405
406#define UBI_FM_WL_POOL_SIZE 25
407
408/** 406/**
409 * struct ubi_fm_sb - UBI fastmap super block 407 * struct ubi_fm_sb - UBI fastmap super block
410 * @magic: fastmap super block magic number (%UBI_FM_SB_MAGIC) 408 * @magic: fastmap super block magic number (%UBI_FM_SB_MAGIC)
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index c5be82d9d345..c998212fc680 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -151,6 +151,17 @@ enum {
151 UBI_BAD_FASTMAP, 151 UBI_BAD_FASTMAP,
152}; 152};
153 153
154/*
155 * Flags for emulate_power_cut in ubi_debug_info
156 *
157 * POWER_CUT_EC_WRITE: Emulate a power cut when writing an EC header
158 * POWER_CUT_VID_WRITE: Emulate a power cut when writing a VID header
159 */
160enum {
161 POWER_CUT_EC_WRITE = 0x01,
162 POWER_CUT_VID_WRITE = 0x02,
163};
164
154/** 165/**
155 * struct ubi_wl_entry - wear-leveling entry. 166 * struct ubi_wl_entry - wear-leveling entry.
156 * @u.rb: link in the corresponding (free/used) RB-tree 167 * @u.rb: link in the corresponding (free/used) RB-tree
@@ -356,30 +367,48 @@ struct ubi_wl_entry;
356 * 367 *
357 * @chk_gen: if UBI general extra checks are enabled 368 * @chk_gen: if UBI general extra checks are enabled
358 * @chk_io: if UBI I/O extra checks are enabled 369 * @chk_io: if UBI I/O extra checks are enabled
370 * @chk_fastmap: if UBI fastmap extra checks are enabled
359 * @disable_bgt: disable the background task for testing purposes 371 * @disable_bgt: disable the background task for testing purposes
360 * @emulate_bitflips: emulate bit-flips for testing purposes 372 * @emulate_bitflips: emulate bit-flips for testing purposes
361 * @emulate_io_failures: emulate write/erase failures for testing purposes 373 * @emulate_io_failures: emulate write/erase failures for testing purposes
374 * @emulate_power_cut: emulate power cut for testing purposes
375 * @power_cut_counter: count down for writes left until emulated power cut
376 * @power_cut_min: minimum number of writes before emulating a power cut
377 * @power_cut_max: maximum number of writes until emulating a power cut
362 * @dfs_dir_name: name of debugfs directory containing files of this UBI device 378 * @dfs_dir_name: name of debugfs directory containing files of this UBI device
363 * @dfs_dir: direntry object of the UBI device debugfs directory 379 * @dfs_dir: direntry object of the UBI device debugfs directory
364 * @dfs_chk_gen: debugfs knob to enable UBI general extra checks 380 * @dfs_chk_gen: debugfs knob to enable UBI general extra checks
365 * @dfs_chk_io: debugfs knob to enable UBI I/O extra checks 381 * @dfs_chk_io: debugfs knob to enable UBI I/O extra checks
382 * @dfs_chk_fastmap: debugfs knob to enable UBI fastmap extra checks
366 * @dfs_disable_bgt: debugfs knob to disable the background task 383 * @dfs_disable_bgt: debugfs knob to disable the background task
367 * @dfs_emulate_bitflips: debugfs knob to emulate bit-flips 384 * @dfs_emulate_bitflips: debugfs knob to emulate bit-flips
368 * @dfs_emulate_io_failures: debugfs knob to emulate write/erase failures 385 * @dfs_emulate_io_failures: debugfs knob to emulate write/erase failures
386 * @dfs_emulate_power_cut: debugfs knob to emulate power cuts
387 * @dfs_power_cut_min: debugfs knob for minimum writes before power cut
388 * @dfs_power_cut_max: debugfs knob for maximum writes until power cut
369 */ 389 */
370struct ubi_debug_info { 390struct ubi_debug_info {
371 unsigned int chk_gen:1; 391 unsigned int chk_gen:1;
372 unsigned int chk_io:1; 392 unsigned int chk_io:1;
393 unsigned int chk_fastmap:1;
373 unsigned int disable_bgt:1; 394 unsigned int disable_bgt:1;
374 unsigned int emulate_bitflips:1; 395 unsigned int emulate_bitflips:1;
375 unsigned int emulate_io_failures:1; 396 unsigned int emulate_io_failures:1;
397 unsigned int emulate_power_cut:2;
398 unsigned int power_cut_counter;
399 unsigned int power_cut_min;
400 unsigned int power_cut_max;
376 char dfs_dir_name[UBI_DFS_DIR_LEN + 1]; 401 char dfs_dir_name[UBI_DFS_DIR_LEN + 1];
377 struct dentry *dfs_dir; 402 struct dentry *dfs_dir;
378 struct dentry *dfs_chk_gen; 403 struct dentry *dfs_chk_gen;
379 struct dentry *dfs_chk_io; 404 struct dentry *dfs_chk_io;
405 struct dentry *dfs_chk_fastmap;
380 struct dentry *dfs_disable_bgt; 406 struct dentry *dfs_disable_bgt;
381 struct dentry *dfs_emulate_bitflips; 407 struct dentry *dfs_emulate_bitflips;
382 struct dentry *dfs_emulate_io_failures; 408 struct dentry *dfs_emulate_io_failures;
409 struct dentry *dfs_emulate_power_cut;
410 struct dentry *dfs_power_cut_min;
411 struct dentry *dfs_power_cut_max;
383}; 412};
384 413
385/** 414/**
@@ -426,11 +455,13 @@ struct ubi_debug_info {
426 * @fm_pool: in-memory data structure of the fastmap pool 455 * @fm_pool: in-memory data structure of the fastmap pool
427 * @fm_wl_pool: in-memory data structure of the fastmap pool used by the WL 456 * @fm_wl_pool: in-memory data structure of the fastmap pool used by the WL
428 * sub-system 457 * sub-system
429 * @fm_mutex: serializes ubi_update_fastmap() and protects @fm_buf 458 * @fm_protect: serializes ubi_update_fastmap(), protects @fm_buf and makes sure
459 * that critical sections cannot be interrupted by ubi_update_fastmap()
430 * @fm_buf: vmalloc()'d buffer which holds the raw fastmap 460 * @fm_buf: vmalloc()'d buffer which holds the raw fastmap
431 * @fm_size: fastmap size in bytes 461 * @fm_size: fastmap size in bytes
432 * @fm_sem: allows ubi_update_fastmap() to block EBA table changes 462 * @fm_eba_sem: allows ubi_update_fastmap() to block EBA table changes
433 * @fm_work: fastmap work queue 463 * @fm_work: fastmap work queue
464 * @fm_work_scheduled: non-zero if fastmap work was scheduled
434 * 465 *
435 * @used: RB-tree of used physical eraseblocks 466 * @used: RB-tree of used physical eraseblocks
436 * @erroneous: RB-tree of erroneous used physical eraseblocks 467 * @erroneous: RB-tree of erroneous used physical eraseblocks
@@ -442,7 +473,8 @@ struct ubi_debug_info {
442 * @pq_head: protection queue head 473 * @pq_head: protection queue head
443 * @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from, 474 * @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from,
444 * @move_to, @move_to_put @erase_pending, @wl_scheduled, @works, 475 * @move_to, @move_to_put @erase_pending, @wl_scheduled, @works,
445 * @erroneous, and @erroneous_peb_count fields 476 * @erroneous, @erroneous_peb_count, @fm_work_scheduled, @fm_pool,
477 * and @fm_wl_pool fields
446 * @move_mutex: serializes eraseblock moves 478 * @move_mutex: serializes eraseblock moves
447 * @work_sem: used to wait for all the scheduled works to finish and prevent 479 * @work_sem: used to wait for all the scheduled works to finish and prevent
448 * new works from being submitted 480 * new works from being submitted
@@ -479,7 +511,7 @@ struct ubi_debug_info {
479 * @vid_hdr_offset: starting offset of the volume identifier header (might be 511 * @vid_hdr_offset: starting offset of the volume identifier header (might be
480 * unaligned) 512 * unaligned)
481 * @vid_hdr_aloffset: starting offset of the VID header aligned to 513 * @vid_hdr_aloffset: starting offset of the VID header aligned to
482 * @hdrs_min_io_size 514 * @hdrs_min_io_size
483 * @vid_hdr_shift: contains @vid_hdr_offset - @vid_hdr_aloffset 515 * @vid_hdr_shift: contains @vid_hdr_offset - @vid_hdr_aloffset
484 * @bad_allowed: whether the MTD device admits of bad physical eraseblocks or 516 * @bad_allowed: whether the MTD device admits of bad physical eraseblocks or
485 * not 517 * not
@@ -532,11 +564,12 @@ struct ubi_device {
532 struct ubi_fastmap_layout *fm; 564 struct ubi_fastmap_layout *fm;
533 struct ubi_fm_pool fm_pool; 565 struct ubi_fm_pool fm_pool;
534 struct ubi_fm_pool fm_wl_pool; 566 struct ubi_fm_pool fm_wl_pool;
535 struct rw_semaphore fm_sem; 567 struct rw_semaphore fm_eba_sem;
536 struct mutex fm_mutex; 568 struct rw_semaphore fm_protect;
537 void *fm_buf; 569 void *fm_buf;
538 size_t fm_size; 570 size_t fm_size;
539 struct work_struct fm_work; 571 struct work_struct fm_work;
572 int fm_work_scheduled;
540 573
541 /* Wear-leveling sub-system's stuff */ 574 /* Wear-leveling sub-system's stuff */
542 struct rb_root used; 575 struct rb_root used;
@@ -868,10 +901,14 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
868 int pnum, const struct ubi_vid_hdr *vid_hdr); 901 int pnum, const struct ubi_vid_hdr *vid_hdr);
869 902
870/* fastmap.c */ 903/* fastmap.c */
904#ifdef CONFIG_MTD_UBI_FASTMAP
871size_t ubi_calc_fm_size(struct ubi_device *ubi); 905size_t ubi_calc_fm_size(struct ubi_device *ubi);
872int ubi_update_fastmap(struct ubi_device *ubi); 906int ubi_update_fastmap(struct ubi_device *ubi);
873int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai, 907int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
874 int fm_anchor); 908 int fm_anchor);
909#else
910static inline int ubi_update_fastmap(struct ubi_device *ubi) { return 0; }
911#endif
875 912
876/* block.c */ 913/* block.c */
877#ifdef CONFIG_MTD_UBI_BLOCK 914#ifdef CONFIG_MTD_UBI_BLOCK
@@ -892,6 +929,42 @@ static inline int ubiblock_remove(struct ubi_volume_info *vi)
892} 929}
893#endif 930#endif
894 931
932/*
933 * ubi_for_each_free_peb - walk the UBI free RB tree.
934 * @ubi: UBI device description object
935 * @e: a pointer to a ubi_wl_entry to use as cursor
936 * @pos: a pointer to RB-tree entry type to use as a loop counter
937 */
938#define ubi_for_each_free_peb(ubi, e, tmp_rb) \
939 ubi_rb_for_each_entry((tmp_rb), (e), &(ubi)->free, u.rb)
940
941/*
942 * ubi_for_each_used_peb - walk the UBI used RB tree.
943 * @ubi: UBI device description object
944 * @e: a pointer to a ubi_wl_entry to use as cursor
945 * @pos: a pointer to RB-tree entry type to use as a loop counter
946 */
947#define ubi_for_each_used_peb(ubi, e, tmp_rb) \
948 ubi_rb_for_each_entry((tmp_rb), (e), &(ubi)->used, u.rb)
949
950/*
951 * ubi_for_each_scub_peb - walk the UBI scub RB tree.
952 * @ubi: UBI device description object
953 * @e: a pointer to a ubi_wl_entry to use as cursor
954 * @pos: a pointer to RB-tree entry type to use as a loop counter
955 */
956#define ubi_for_each_scrub_peb(ubi, e, tmp_rb) \
957 ubi_rb_for_each_entry((tmp_rb), (e), &(ubi)->scrub, u.rb)
958
959/*
960 * ubi_for_each_protected_peb - walk the UBI protection queue.
961 * @ubi: UBI device description object
962 * @i: a integer used as counter
963 * @e: a pointer to a ubi_wl_entry to use as cursor
964 */
965#define ubi_for_each_protected_peb(ubi, i, e) \
966 for ((i) = 0; (i) < UBI_PROT_QUEUE_LEN; (i)++) \
967 list_for_each_entry((e), &(ubi->pq[(i)]), u.list)
895 968
896/* 969/*
897 * ubi_rb_for_each_entry - walk an RB-tree. 970 * ubi_rb_for_each_entry - walk an RB-tree.
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 8f7bde6a85d6..16214d3d57a4 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -103,6 +103,7 @@
103#include <linux/freezer.h> 103#include <linux/freezer.h>
104#include <linux/kthread.h> 104#include <linux/kthread.h>
105#include "ubi.h" 105#include "ubi.h"
106#include "wl.h"
106 107
107/* Number of physical eraseblocks reserved for wear-leveling purposes */ 108/* Number of physical eraseblocks reserved for wear-leveling purposes */
108#define WL_RESERVED_PEBS 1 109#define WL_RESERVED_PEBS 1
@@ -140,42 +141,6 @@ static int self_check_in_wl_tree(const struct ubi_device *ubi,
140static int self_check_in_pq(const struct ubi_device *ubi, 141static int self_check_in_pq(const struct ubi_device *ubi,
141 struct ubi_wl_entry *e); 142 struct ubi_wl_entry *e);
142 143
143#ifdef CONFIG_MTD_UBI_FASTMAP
144/**
145 * update_fastmap_work_fn - calls ubi_update_fastmap from a work queue
146 * @wrk: the work description object
147 */
148static void update_fastmap_work_fn(struct work_struct *wrk)
149{
150 struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
151 ubi_update_fastmap(ubi);
152}
153
154/**
155 * ubi_ubi_is_fm_block - returns 1 if a PEB is currently used in a fastmap.
156 * @ubi: UBI device description object
157 * @pnum: the to be checked PEB
158 */
159static int ubi_is_fm_block(struct ubi_device *ubi, int pnum)
160{
161 int i;
162
163 if (!ubi->fm)
164 return 0;
165
166 for (i = 0; i < ubi->fm->used_blocks; i++)
167 if (ubi->fm->e[i]->pnum == pnum)
168 return 1;
169
170 return 0;
171}
172#else
173static int ubi_is_fm_block(struct ubi_device *ubi, int pnum)
174{
175 return 0;
176}
177#endif
178
179/** 144/**
180 * wl_tree_add - add a wear-leveling entry to a WL RB-tree. 145 * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
181 * @e: the wear-leveling entry to add 146 * @e: the wear-leveling entry to add
@@ -213,6 +178,20 @@ static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
213} 178}
214 179
215/** 180/**
181 * wl_tree_destroy - destroy a wear-leveling entry.
182 * @ubi: UBI device description object
183 * @e: the wear-leveling entry to add
184 *
185 * This function destroys a wear leveling entry and removes
186 * the reference from the lookup table.
187 */
188static void wl_entry_destroy(struct ubi_device *ubi, struct ubi_wl_entry *e)
189{
190 ubi->lookuptbl[e->pnum] = NULL;
191 kmem_cache_free(ubi_wl_entry_slab, e);
192}
193
194/**
216 * do_work - do one pending work. 195 * do_work - do one pending work.
217 * @ubi: UBI device description object 196 * @ubi: UBI device description object
218 * 197 *
@@ -260,33 +239,6 @@ static int do_work(struct ubi_device *ubi)
260} 239}
261 240
262/** 241/**
263 * produce_free_peb - produce a free physical eraseblock.
264 * @ubi: UBI device description object
265 *
266 * This function tries to make a free PEB by means of synchronous execution of
267 * pending works. This may be needed if, for example the background thread is
268 * disabled. Returns zero in case of success and a negative error code in case
269 * of failure.
270 */
271static int produce_free_peb(struct ubi_device *ubi)
272{
273 int err;
274
275 while (!ubi->free.rb_node && ubi->works_count) {
276 spin_unlock(&ubi->wl_lock);
277
278 dbg_wl("do one work synchronously");
279 err = do_work(ubi);
280
281 spin_lock(&ubi->wl_lock);
282 if (err)
283 return err;
284 }
285
286 return 0;
287}
288
289/**
290 * in_wl_tree - check if wear-leveling entry is present in a WL RB-tree. 242 * in_wl_tree - check if wear-leveling entry is present in a WL RB-tree.
291 * @e: the wear-leveling entry to check 243 * @e: the wear-leveling entry to check
292 * @root: the root of the tree 244 * @root: the root of the tree
@@ -409,119 +361,32 @@ static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi,
409 if (last->ec - first->ec < WL_FREE_MAX_DIFF) { 361 if (last->ec - first->ec < WL_FREE_MAX_DIFF) {
410 e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb); 362 e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb);
411 363
412#ifdef CONFIG_MTD_UBI_FASTMAP
413 /* If no fastmap has been written and this WL entry can be used 364 /* If no fastmap has been written and this WL entry can be used
414 * as anchor PEB, hold it back and return the second best 365 * as anchor PEB, hold it back and return the second best
415 * WL entry such that fastmap can use the anchor PEB later. */ 366 * WL entry such that fastmap can use the anchor PEB later. */
416 if (e && !ubi->fm_disabled && !ubi->fm && 367 e = may_reserve_for_fm(ubi, e, root);
417 e->pnum < UBI_FM_MAX_START)
418 e = rb_entry(rb_next(root->rb_node),
419 struct ubi_wl_entry, u.rb);
420#endif
421 } else 368 } else
422 e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2); 369 e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2);
423 370
424 return e; 371 return e;
425} 372}
426 373
427#ifdef CONFIG_MTD_UBI_FASTMAP
428/**
429 * find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB.
430 * @root: the RB-tree where to look for
431 */
432static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
433{
434 struct rb_node *p;
435 struct ubi_wl_entry *e, *victim = NULL;
436 int max_ec = UBI_MAX_ERASECOUNTER;
437
438 ubi_rb_for_each_entry(p, e, root, u.rb) {
439 if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
440 victim = e;
441 max_ec = e->ec;
442 }
443 }
444
445 return victim;
446}
447
448static int anchor_pebs_avalible(struct rb_root *root)
449{
450 struct rb_node *p;
451 struct ubi_wl_entry *e;
452
453 ubi_rb_for_each_entry(p, e, root, u.rb)
454 if (e->pnum < UBI_FM_MAX_START)
455 return 1;
456
457 return 0;
458}
459
460/** 374/**
461 * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number. 375 * wl_get_wle - get a mean wl entry to be used by ubi_wl_get_peb() or
376 * refill_wl_user_pool().
462 * @ubi: UBI device description object 377 * @ubi: UBI device description object
463 * @anchor: This PEB will be used as anchor PEB by fastmap
464 * 378 *
465 * The function returns a physical erase block with a given maximal number 379 * This function returns a a wear leveling entry in case of success and
466 * and removes it from the wl subsystem. 380 * NULL in case of failure.
467 * Must be called with wl_lock held!
468 */ 381 */
469struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor) 382static struct ubi_wl_entry *wl_get_wle(struct ubi_device *ubi)
470{ 383{
471 struct ubi_wl_entry *e = NULL;
472
473 if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
474 goto out;
475
476 if (anchor)
477 e = find_anchor_wl_entry(&ubi->free);
478 else
479 e = find_mean_wl_entry(ubi, &ubi->free);
480
481 if (!e)
482 goto out;
483
484 self_check_in_wl_tree(ubi, e, &ubi->free);
485
486 /* remove it from the free list,
487 * the wl subsystem does no longer know this erase block */
488 rb_erase(&e->u.rb, &ubi->free);
489 ubi->free_count--;
490out:
491 return e;
492}
493#endif
494
495/**
496 * __wl_get_peb - get a physical eraseblock.
497 * @ubi: UBI device description object
498 *
499 * This function returns a physical eraseblock in case of success and a
500 * negative error code in case of failure.
501 */
502static int __wl_get_peb(struct ubi_device *ubi)
503{
504 int err;
505 struct ubi_wl_entry *e; 384 struct ubi_wl_entry *e;
506 385
507retry:
508 if (!ubi->free.rb_node) {
509 if (ubi->works_count == 0) {
510 ubi_err(ubi, "no free eraseblocks");
511 ubi_assert(list_empty(&ubi->works));
512 return -ENOSPC;
513 }
514
515 err = produce_free_peb(ubi);
516 if (err < 0)
517 return err;
518 goto retry;
519 }
520
521 e = find_mean_wl_entry(ubi, &ubi->free); 386 e = find_mean_wl_entry(ubi, &ubi->free);
522 if (!e) { 387 if (!e) {
523 ubi_err(ubi, "no free eraseblocks"); 388 ubi_err(ubi, "no free eraseblocks");
524 return -ENOSPC; 389 return NULL;
525 } 390 }
526 391
527 self_check_in_wl_tree(ubi, e, &ubi->free); 392 self_check_in_wl_tree(ubi, e, &ubi->free);
@@ -533,174 +398,10 @@ retry:
533 rb_erase(&e->u.rb, &ubi->free); 398 rb_erase(&e->u.rb, &ubi->free);
534 ubi->free_count--; 399 ubi->free_count--;
535 dbg_wl("PEB %d EC %d", e->pnum, e->ec); 400 dbg_wl("PEB %d EC %d", e->pnum, e->ec);
536#ifndef CONFIG_MTD_UBI_FASTMAP
537 /* We have to enqueue e only if fastmap is disabled,
538 * is fastmap enabled prot_queue_add() will be called by
539 * ubi_wl_get_peb() after removing e from the pool. */
540 prot_queue_add(ubi, e);
541#endif
542 return e->pnum;
543}
544
545#ifdef CONFIG_MTD_UBI_FASTMAP
546/**
547 * return_unused_pool_pebs - returns unused PEB to the free tree.
548 * @ubi: UBI device description object
549 * @pool: fastmap pool description object
550 */
551static void return_unused_pool_pebs(struct ubi_device *ubi,
552 struct ubi_fm_pool *pool)
553{
554 int i;
555 struct ubi_wl_entry *e;
556
557 for (i = pool->used; i < pool->size; i++) {
558 e = ubi->lookuptbl[pool->pebs[i]];
559 wl_tree_add(e, &ubi->free);
560 ubi->free_count++;
561 }
562}
563
564/**
565 * refill_wl_pool - refills all the fastmap pool used by the
566 * WL sub-system.
567 * @ubi: UBI device description object
568 */
569static void refill_wl_pool(struct ubi_device *ubi)
570{
571 struct ubi_wl_entry *e;
572 struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
573
574 return_unused_pool_pebs(ubi, pool);
575
576 for (pool->size = 0; pool->size < pool->max_size; pool->size++) {
577 if (!ubi->free.rb_node ||
578 (ubi->free_count - ubi->beb_rsvd_pebs < 5))
579 break;
580
581 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
582 self_check_in_wl_tree(ubi, e, &ubi->free);
583 rb_erase(&e->u.rb, &ubi->free);
584 ubi->free_count--;
585
586 pool->pebs[pool->size] = e->pnum;
587 }
588 pool->used = 0;
589}
590
591/**
592 * refill_wl_user_pool - refills all the fastmap pool used by ubi_wl_get_peb.
593 * @ubi: UBI device description object
594 */
595static void refill_wl_user_pool(struct ubi_device *ubi)
596{
597 struct ubi_fm_pool *pool = &ubi->fm_pool;
598
599 return_unused_pool_pebs(ubi, pool);
600
601 for (pool->size = 0; pool->size < pool->max_size; pool->size++) {
602 pool->pebs[pool->size] = __wl_get_peb(ubi);
603 if (pool->pebs[pool->size] < 0)
604 break;
605 }
606 pool->used = 0;
607}
608
609/**
610 * ubi_refill_pools - refills all fastmap PEB pools.
611 * @ubi: UBI device description object
612 */
613void ubi_refill_pools(struct ubi_device *ubi)
614{
615 spin_lock(&ubi->wl_lock);
616 refill_wl_pool(ubi);
617 refill_wl_user_pool(ubi);
618 spin_unlock(&ubi->wl_lock);
619}
620
621/* ubi_wl_get_peb - works exaclty like __wl_get_peb but keeps track of
622 * the fastmap pool.
623 */
624int ubi_wl_get_peb(struct ubi_device *ubi)
625{
626 int ret;
627 struct ubi_fm_pool *pool = &ubi->fm_pool;
628 struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
629
630 if (!pool->size || !wl_pool->size || pool->used == pool->size ||
631 wl_pool->used == wl_pool->size)
632 ubi_update_fastmap(ubi);
633
634 /* we got not a single free PEB */
635 if (!pool->size)
636 ret = -ENOSPC;
637 else {
638 spin_lock(&ubi->wl_lock);
639 ret = pool->pebs[pool->used++];
640 prot_queue_add(ubi, ubi->lookuptbl[ret]);
641 spin_unlock(&ubi->wl_lock);
642 }
643
644 return ret;
645}
646
647/* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
648 *
649 * @ubi: UBI device description object
650 */
651static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
652{
653 struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
654 int pnum;
655
656 if (pool->used == pool->size || !pool->size) {
657 /* We cannot update the fastmap here because this
658 * function is called in atomic context.
659 * Let's fail here and refill/update it as soon as possible. */
660 schedule_work(&ubi->fm_work);
661 return NULL;
662 } else {
663 pnum = pool->pebs[pool->used++];
664 return ubi->lookuptbl[pnum];
665 }
666}
667#else
668static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
669{
670 struct ubi_wl_entry *e;
671
672 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
673 self_check_in_wl_tree(ubi, e, &ubi->free);
674 ubi->free_count--;
675 ubi_assert(ubi->free_count >= 0);
676 rb_erase(&e->u.rb, &ubi->free);
677 401
678 return e; 402 return e;
679} 403}
680 404
681int ubi_wl_get_peb(struct ubi_device *ubi)
682{
683 int peb, err;
684
685 spin_lock(&ubi->wl_lock);
686 peb = __wl_get_peb(ubi);
687 spin_unlock(&ubi->wl_lock);
688
689 if (peb < 0)
690 return peb;
691
692 err = ubi_self_check_all_ff(ubi, peb, ubi->vid_hdr_aloffset,
693 ubi->peb_size - ubi->vid_hdr_aloffset);
694 if (err) {
695 ubi_err(ubi, "new PEB %d does not contain all 0xFF bytes",
696 peb);
697 return err;
698 }
699
700 return peb;
701}
702#endif
703
704/** 405/**
705 * prot_queue_del - remove a physical eraseblock from the protection queue. 406 * prot_queue_del - remove a physical eraseblock from the protection queue.
706 * @ubi: UBI device description object 407 * @ubi: UBI device description object
@@ -867,17 +568,6 @@ static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
867static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, 568static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
868 int shutdown); 569 int shutdown);
869 570
870#ifdef CONFIG_MTD_UBI_FASTMAP
871/**
872 * ubi_is_erase_work - checks whether a work is erase work.
873 * @wrk: The work object to be checked
874 */
875int ubi_is_erase_work(struct ubi_work *wrk)
876{
877 return wrk->func == erase_worker;
878}
879#endif
880
881/** 571/**
882 * schedule_erase - schedule an erase work. 572 * schedule_erase - schedule an erase work.
883 * @ubi: UBI device description object 573 * @ubi: UBI device description object
@@ -895,7 +585,6 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
895 struct ubi_work *wl_wrk; 585 struct ubi_work *wl_wrk;
896 586
897 ubi_assert(e); 587 ubi_assert(e);
898 ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
899 588
900 dbg_wl("schedule erasure of PEB %d, EC %d, torture %d", 589 dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
901 e->pnum, e->ec, torture); 590 e->pnum, e->ec, torture);
@@ -942,51 +631,6 @@ static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
942 return erase_worker(ubi, wl_wrk, 0); 631 return erase_worker(ubi, wl_wrk, 0);
943} 632}
944 633
945#ifdef CONFIG_MTD_UBI_FASTMAP
946/**
947 * ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling
948 * sub-system.
949 * see: ubi_wl_put_peb()
950 *
951 * @ubi: UBI device description object
952 * @fm_e: physical eraseblock to return
953 * @lnum: the last used logical eraseblock number for the PEB
954 * @torture: if this physical eraseblock has to be tortured
955 */
956int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
957 int lnum, int torture)
958{
959 struct ubi_wl_entry *e;
960 int vol_id, pnum = fm_e->pnum;
961
962 dbg_wl("PEB %d", pnum);
963
964 ubi_assert(pnum >= 0);
965 ubi_assert(pnum < ubi->peb_count);
966
967 spin_lock(&ubi->wl_lock);
968 e = ubi->lookuptbl[pnum];
969
970 /* This can happen if we recovered from a fastmap the very
971 * first time and writing now a new one. In this case the wl system
972 * has never seen any PEB used by the original fastmap.
973 */
974 if (!e) {
975 e = fm_e;
976 ubi_assert(e->ec >= 0);
977 ubi->lookuptbl[pnum] = e;
978 } else {
979 e->ec = fm_e->ec;
980 kfree(fm_e);
981 }
982
983 spin_unlock(&ubi->wl_lock);
984
985 vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
986 return schedule_erase(ubi, e, vol_id, lnum, torture);
987}
988#endif
989
990/** 634/**
991 * wear_leveling_worker - wear-leveling worker function. 635 * wear_leveling_worker - wear-leveling worker function.
992 * @ubi: UBI device description object 636 * @ubi: UBI device description object
@@ -1002,7 +646,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
1002 int shutdown) 646 int shutdown)
1003{ 647{
1004 int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0; 648 int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
1005 int vol_id = -1, uninitialized_var(lnum); 649 int vol_id = -1, lnum = -1;
1006#ifdef CONFIG_MTD_UBI_FASTMAP 650#ifdef CONFIG_MTD_UBI_FASTMAP
1007 int anchor = wrk->anchor; 651 int anchor = wrk->anchor;
1008#endif 652#endif
@@ -1214,7 +858,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
1214 err = do_sync_erase(ubi, e1, vol_id, lnum, 0); 858 err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
1215 if (err) { 859 if (err) {
1216 if (e2) 860 if (e2)
1217 kmem_cache_free(ubi_wl_entry_slab, e2); 861 wl_entry_destroy(ubi, e2);
1218 goto out_ro; 862 goto out_ro;
1219 } 863 }
1220 864
@@ -1282,8 +926,8 @@ out_error:
1282 spin_unlock(&ubi->wl_lock); 926 spin_unlock(&ubi->wl_lock);
1283 927
1284 ubi_free_vid_hdr(ubi, vid_hdr); 928 ubi_free_vid_hdr(ubi, vid_hdr);
1285 kmem_cache_free(ubi_wl_entry_slab, e1); 929 wl_entry_destroy(ubi, e1);
1286 kmem_cache_free(ubi_wl_entry_slab, e2); 930 wl_entry_destroy(ubi, e2);
1287 931
1288out_ro: 932out_ro:
1289 ubi_ro_mode(ubi); 933 ubi_ro_mode(ubi);
@@ -1369,38 +1013,6 @@ out_unlock:
1369 return err; 1013 return err;
1370} 1014}
1371 1015
1372#ifdef CONFIG_MTD_UBI_FASTMAP
1373/**
1374 * ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB.
1375 * @ubi: UBI device description object
1376 */
1377int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
1378{
1379 struct ubi_work *wrk;
1380
1381 spin_lock(&ubi->wl_lock);
1382 if (ubi->wl_scheduled) {
1383 spin_unlock(&ubi->wl_lock);
1384 return 0;
1385 }
1386 ubi->wl_scheduled = 1;
1387 spin_unlock(&ubi->wl_lock);
1388
1389 wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
1390 if (!wrk) {
1391 spin_lock(&ubi->wl_lock);
1392 ubi->wl_scheduled = 0;
1393 spin_unlock(&ubi->wl_lock);
1394 return -ENOMEM;
1395 }
1396
1397 wrk->anchor = 1;
1398 wrk->func = &wear_leveling_worker;
1399 schedule_ubi_work(ubi, wrk);
1400 return 0;
1401}
1402#endif
1403
1404/** 1016/**
1405 * erase_worker - physical eraseblock erase worker function. 1017 * erase_worker - physical eraseblock erase worker function.
1406 * @ubi: UBI device description object 1018 * @ubi: UBI device description object
@@ -1425,15 +1037,13 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1425 if (shutdown) { 1037 if (shutdown) {
1426 dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec); 1038 dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
1427 kfree(wl_wrk); 1039 kfree(wl_wrk);
1428 kmem_cache_free(ubi_wl_entry_slab, e); 1040 wl_entry_destroy(ubi, e);
1429 return 0; 1041 return 0;
1430 } 1042 }
1431 1043
1432 dbg_wl("erase PEB %d EC %d LEB %d:%d", 1044 dbg_wl("erase PEB %d EC %d LEB %d:%d",
1433 pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum); 1045 pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
1434 1046
1435 ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
1436
1437 err = sync_erase(ubi, e, wl_wrk->torture); 1047 err = sync_erase(ubi, e, wl_wrk->torture);
1438 if (!err) { 1048 if (!err) {
1439 /* Fine, we've erased it successfully */ 1049 /* Fine, we've erased it successfully */
@@ -1471,7 +1081,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1471 return err; 1081 return err;
1472 } 1082 }
1473 1083
1474 kmem_cache_free(ubi_wl_entry_slab, e); 1084 wl_entry_destroy(ubi, e);
1475 if (err != -EIO) 1085 if (err != -EIO)
1476 /* 1086 /*
1477 * If this is not %-EIO, we have no idea what to do. Scheduling 1087 * If this is not %-EIO, we have no idea what to do. Scheduling
@@ -1563,6 +1173,8 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
1563 ubi_assert(pnum >= 0); 1173 ubi_assert(pnum >= 0);
1564 ubi_assert(pnum < ubi->peb_count); 1174 ubi_assert(pnum < ubi->peb_count);
1565 1175
1176 down_read(&ubi->fm_protect);
1177
1566retry: 1178retry:
1567 spin_lock(&ubi->wl_lock); 1179 spin_lock(&ubi->wl_lock);
1568 e = ubi->lookuptbl[pnum]; 1180 e = ubi->lookuptbl[pnum];
@@ -1593,6 +1205,7 @@ retry:
1593 ubi_assert(!ubi->move_to_put); 1205 ubi_assert(!ubi->move_to_put);
1594 ubi->move_to_put = 1; 1206 ubi->move_to_put = 1;
1595 spin_unlock(&ubi->wl_lock); 1207 spin_unlock(&ubi->wl_lock);
1208 up_read(&ubi->fm_protect);
1596 return 0; 1209 return 0;
1597 } else { 1210 } else {
1598 if (in_wl_tree(e, &ubi->used)) { 1211 if (in_wl_tree(e, &ubi->used)) {
@@ -1614,6 +1227,7 @@ retry:
1614 ubi_err(ubi, "PEB %d not found", pnum); 1227 ubi_err(ubi, "PEB %d not found", pnum);
1615 ubi_ro_mode(ubi); 1228 ubi_ro_mode(ubi);
1616 spin_unlock(&ubi->wl_lock); 1229 spin_unlock(&ubi->wl_lock);
1230 up_read(&ubi->fm_protect);
1617 return err; 1231 return err;
1618 } 1232 }
1619 } 1233 }
@@ -1627,6 +1241,7 @@ retry:
1627 spin_unlock(&ubi->wl_lock); 1241 spin_unlock(&ubi->wl_lock);
1628 } 1242 }
1629 1243
1244 up_read(&ubi->fm_protect);
1630 return err; 1245 return err;
1631} 1246}
1632 1247
@@ -1758,9 +1373,10 @@ int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
1758 1373
1759/** 1374/**
1760 * tree_destroy - destroy an RB-tree. 1375 * tree_destroy - destroy an RB-tree.
1376 * @ubi: UBI device description object
1761 * @root: the root of the tree to destroy 1377 * @root: the root of the tree to destroy
1762 */ 1378 */
1763static void tree_destroy(struct rb_root *root) 1379static void tree_destroy(struct ubi_device *ubi, struct rb_root *root)
1764{ 1380{
1765 struct rb_node *rb; 1381 struct rb_node *rb;
1766 struct ubi_wl_entry *e; 1382 struct ubi_wl_entry *e;
@@ -1782,7 +1398,7 @@ static void tree_destroy(struct rb_root *root)
1782 rb->rb_right = NULL; 1398 rb->rb_right = NULL;
1783 } 1399 }
1784 1400
1785 kmem_cache_free(ubi_wl_entry_slab, e); 1401 wl_entry_destroy(ubi, e);
1786 } 1402 }
1787 } 1403 }
1788} 1404}
@@ -1850,6 +1466,9 @@ int ubi_thread(void *u)
1850 */ 1466 */
1851static void shutdown_work(struct ubi_device *ubi) 1467static void shutdown_work(struct ubi_device *ubi)
1852{ 1468{
1469#ifdef CONFIG_MTD_UBI_FASTMAP
1470 flush_work(&ubi->fm_work);
1471#endif
1853 while (!list_empty(&ubi->works)) { 1472 while (!list_empty(&ubi->works)) {
1854 struct ubi_work *wrk; 1473 struct ubi_work *wrk;
1855 1474
@@ -1883,9 +1502,6 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1883 init_rwsem(&ubi->work_sem); 1502 init_rwsem(&ubi->work_sem);
1884 ubi->max_ec = ai->max_ec; 1503 ubi->max_ec = ai->max_ec;
1885 INIT_LIST_HEAD(&ubi->works); 1504 INIT_LIST_HEAD(&ubi->works);
1886#ifdef CONFIG_MTD_UBI_FASTMAP
1887 INIT_WORK(&ubi->fm_work, update_fastmap_work_fn);
1888#endif
1889 1505
1890 sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num); 1506 sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1891 1507
@@ -1907,10 +1523,9 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1907 1523
1908 e->pnum = aeb->pnum; 1524 e->pnum = aeb->pnum;
1909 e->ec = aeb->ec; 1525 e->ec = aeb->ec;
1910 ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
1911 ubi->lookuptbl[e->pnum] = e; 1526 ubi->lookuptbl[e->pnum] = e;
1912 if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) { 1527 if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) {
1913 kmem_cache_free(ubi_wl_entry_slab, e); 1528 wl_entry_destroy(ubi, e);
1914 goto out_free; 1529 goto out_free;
1915 } 1530 }
1916 1531
@@ -1928,7 +1543,6 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1928 e->pnum = aeb->pnum; 1543 e->pnum = aeb->pnum;
1929 e->ec = aeb->ec; 1544 e->ec = aeb->ec;
1930 ubi_assert(e->ec >= 0); 1545 ubi_assert(e->ec >= 0);
1931 ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
1932 1546
1933 wl_tree_add(e, &ubi->free); 1547 wl_tree_add(e, &ubi->free);
1934 ubi->free_count++; 1548 ubi->free_count++;
@@ -1966,17 +1580,20 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1966 1580
1967 dbg_wl("found %i PEBs", found_pebs); 1581 dbg_wl("found %i PEBs", found_pebs);
1968 1582
1969 if (ubi->fm) 1583 if (ubi->fm) {
1970 ubi_assert(ubi->good_peb_count == \ 1584 ubi_assert(ubi->good_peb_count == \
1971 found_pebs + ubi->fm->used_blocks); 1585 found_pebs + ubi->fm->used_blocks);
1586
1587 for (i = 0; i < ubi->fm->used_blocks; i++) {
1588 e = ubi->fm->e[i];
1589 ubi->lookuptbl[e->pnum] = e;
1590 }
1591 }
1972 else 1592 else
1973 ubi_assert(ubi->good_peb_count == found_pebs); 1593 ubi_assert(ubi->good_peb_count == found_pebs);
1974 1594
1975 reserved_pebs = WL_RESERVED_PEBS; 1595 reserved_pebs = WL_RESERVED_PEBS;
1976#ifdef CONFIG_MTD_UBI_FASTMAP 1596 ubi_fastmap_init(ubi, &reserved_pebs);
1977 /* Reserve enough LEBs to store two fastmaps. */
1978 reserved_pebs += (ubi->fm_size / ubi->leb_size) * 2;
1979#endif
1980 1597
1981 if (ubi->avail_pebs < reserved_pebs) { 1598 if (ubi->avail_pebs < reserved_pebs) {
1982 ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)", 1599 ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)",
@@ -1998,9 +1615,9 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1998 1615
1999out_free: 1616out_free:
2000 shutdown_work(ubi); 1617 shutdown_work(ubi);
2001 tree_destroy(&ubi->used); 1618 tree_destroy(ubi, &ubi->used);
2002 tree_destroy(&ubi->free); 1619 tree_destroy(ubi, &ubi->free);
2003 tree_destroy(&ubi->scrub); 1620 tree_destroy(ubi, &ubi->scrub);
2004 kfree(ubi->lookuptbl); 1621 kfree(ubi->lookuptbl);
2005 return err; 1622 return err;
2006} 1623}
@@ -2017,7 +1634,7 @@ static void protection_queue_destroy(struct ubi_device *ubi)
2017 for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) { 1634 for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) {
2018 list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) { 1635 list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
2019 list_del(&e->u.list); 1636 list_del(&e->u.list);
2020 kmem_cache_free(ubi_wl_entry_slab, e); 1637 wl_entry_destroy(ubi, e);
2021 } 1638 }
2022 } 1639 }
2023} 1640}
@@ -2029,12 +1646,13 @@ static void protection_queue_destroy(struct ubi_device *ubi)
2029void ubi_wl_close(struct ubi_device *ubi) 1646void ubi_wl_close(struct ubi_device *ubi)
2030{ 1647{
2031 dbg_wl("close the WL sub-system"); 1648 dbg_wl("close the WL sub-system");
1649 ubi_fastmap_close(ubi);
2032 shutdown_work(ubi); 1650 shutdown_work(ubi);
2033 protection_queue_destroy(ubi); 1651 protection_queue_destroy(ubi);
2034 tree_destroy(&ubi->used); 1652 tree_destroy(ubi, &ubi->used);
2035 tree_destroy(&ubi->erroneous); 1653 tree_destroy(ubi, &ubi->erroneous);
2036 tree_destroy(&ubi->free); 1654 tree_destroy(ubi, &ubi->free);
2037 tree_destroy(&ubi->scrub); 1655 tree_destroy(ubi, &ubi->scrub);
2038 kfree(ubi->lookuptbl); 1656 kfree(ubi->lookuptbl);
2039} 1657}
2040 1658
@@ -2133,3 +1751,94 @@ static int self_check_in_pq(const struct ubi_device *ubi,
2133 dump_stack(); 1751 dump_stack();
2134 return -EINVAL; 1752 return -EINVAL;
2135} 1753}
1754#ifndef CONFIG_MTD_UBI_FASTMAP
1755static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
1756{
1757 struct ubi_wl_entry *e;
1758
1759 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
1760 self_check_in_wl_tree(ubi, e, &ubi->free);
1761 ubi->free_count--;
1762 ubi_assert(ubi->free_count >= 0);
1763 rb_erase(&e->u.rb, &ubi->free);
1764
1765 return e;
1766}
1767
1768/**
1769 * produce_free_peb - produce a free physical eraseblock.
1770 * @ubi: UBI device description object
1771 *
1772 * This function tries to make a free PEB by means of synchronous execution of
1773 * pending works. This may be needed if, for example the background thread is
1774 * disabled. Returns zero in case of success and a negative error code in case
1775 * of failure.
1776 */
1777static int produce_free_peb(struct ubi_device *ubi)
1778{
1779 int err;
1780
1781 while (!ubi->free.rb_node && ubi->works_count) {
1782 spin_unlock(&ubi->wl_lock);
1783
1784 dbg_wl("do one work synchronously");
1785 err = do_work(ubi);
1786
1787 spin_lock(&ubi->wl_lock);
1788 if (err)
1789 return err;
1790 }
1791
1792 return 0;
1793}
1794
1795/**
1796 * ubi_wl_get_peb - get a physical eraseblock.
1797 * @ubi: UBI device description object
1798 *
1799 * This function returns a physical eraseblock in case of success and a
1800 * negative error code in case of failure.
1801 * Returns with ubi->fm_eba_sem held in read mode!
1802 */
1803int ubi_wl_get_peb(struct ubi_device *ubi)
1804{
1805 int err;
1806 struct ubi_wl_entry *e;
1807
1808retry:
1809 down_read(&ubi->fm_eba_sem);
1810 spin_lock(&ubi->wl_lock);
1811 if (!ubi->free.rb_node) {
1812 if (ubi->works_count == 0) {
1813 ubi_err(ubi, "no free eraseblocks");
1814 ubi_assert(list_empty(&ubi->works));
1815 spin_unlock(&ubi->wl_lock);
1816 return -ENOSPC;
1817 }
1818
1819 err = produce_free_peb(ubi);
1820 if (err < 0) {
1821 spin_unlock(&ubi->wl_lock);
1822 return err;
1823 }
1824 spin_unlock(&ubi->wl_lock);
1825 up_read(&ubi->fm_eba_sem);
1826 goto retry;
1827
1828 }
1829 e = wl_get_wle(ubi);
1830 prot_queue_add(ubi, e);
1831 spin_unlock(&ubi->wl_lock);
1832
1833 err = ubi_self_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
1834 ubi->peb_size - ubi->vid_hdr_aloffset);
1835 if (err) {
1836 ubi_err(ubi, "new PEB %d does not contain all 0xFF bytes", e->pnum);
1837 return err;
1838 }
1839
1840 return e->pnum;
1841}
1842#else
1843#include "fastmap-wl.c"
1844#endif
diff --git a/drivers/mtd/ubi/wl.h b/drivers/mtd/ubi/wl.h
new file mode 100644
index 000000000000..bd1f07e5ce9a
--- /dev/null
+++ b/drivers/mtd/ubi/wl.h
@@ -0,0 +1,28 @@
1#ifndef UBI_WL_H
2#define UBI_WL_H
3#ifdef CONFIG_MTD_UBI_FASTMAP
4static int anchor_pebs_avalible(struct rb_root *root);
5static void update_fastmap_work_fn(struct work_struct *wrk);
6static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root);
7static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi);
8static void ubi_fastmap_close(struct ubi_device *ubi);
9static inline void ubi_fastmap_init(struct ubi_device *ubi, int *count)
10{
11 /* Reserve enough LEBs to store two fastmaps. */
12 *count += (ubi->fm_size / ubi->leb_size) * 2;
13 INIT_WORK(&ubi->fm_work, update_fastmap_work_fn);
14}
15static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
16 struct ubi_wl_entry *e,
17 struct rb_root *root);
18#else /* !CONFIG_MTD_UBI_FASTMAP */
19static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi);
20static inline void ubi_fastmap_close(struct ubi_device *ubi) { }
21static inline void ubi_fastmap_init(struct ubi_device *ubi, int *count) { }
22static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
23 struct ubi_wl_entry *e,
24 struct rb_root *root) {
25 return e;
26}
27#endif /* CONFIG_MTD_UBI_FASTMAP */
28#endif /* UBI_WL_H */