aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd/ubi/ubi.h
diff options
context:
space:
mode:
authorArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2009-05-24 07:13:34 -0400
committerArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2009-06-02 06:53:35 -0400
commitb86a2c56e512f46d140a4bcb4e35e8a7d4a99a4b (patch)
tree59c3e036dfd767b73e700bd7fd8cb4bee15c4f58 /drivers/mtd/ubi/ubi.h
parent87960c0b12d0c5a0b37e0c79aef77aa1a0b10d44 (diff)
UBI: do not switch to R/O mode on read errors
This patch improves UBI errors handling. ATM UBI switches to R/O mode when the WL worker fails to read the source PEB. This means that the upper layers (e.g., UBIFS) has no chances to unmap the erroneous PEB and fix the error. This patch changes this behaviour and makes UBI put PEBs like this into a separate RB-tree, thus preventing the WL worker from hitting the same read errors again and again. But there is a 10% limit on a maximum amount of PEBs like this. If there are too much of them, UBI switches to R/O mode. Additionally, this patch teaches UBI not to panic and switch to R/O mode if after a PEB has been copied, the target LEB cannot be read back. Instead, now UBI cancels the operation and schedules the target PEB for torturing. The error paths has been tested by ingecting errors into 'ubi_eba_copy_leb()'. Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
Diffstat (limited to 'drivers/mtd/ubi/ubi.h')
-rw-r--r--drivers/mtd/ubi/ubi.h16
1 files changed, 14 insertions, 2 deletions
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index fd9b20da5b6b..6d929329a8d5 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -105,6 +105,10 @@ enum {
105 * 105 *
106 * MOVE_CANCEL_RACE: canceled because the volume is being deleted, the source 106 * MOVE_CANCEL_RACE: canceled because the volume is being deleted, the source
107 * PEB was put meanwhile, or there is I/O on the source PEB 107 * PEB was put meanwhile, or there is I/O on the source PEB
108 * MOVE_SOURCE_RD_ERR: canceled because there was a read error from the source
109 * PEB
110 * MOVE_TARGET_RD_ERR: canceled because there was a read error from the target
111 * PEB
108 * MOVE_TARGET_WR_ERR: canceled because there was a write error to the target 112 * MOVE_TARGET_WR_ERR: canceled because there was a write error to the target
109 * PEB 113 * PEB
110 * MOVE_CANCEL_BITFLIPS: canceled because a bit-flip was detected in the 114 * MOVE_CANCEL_BITFLIPS: canceled because a bit-flip was detected in the
@@ -112,6 +116,8 @@ enum {
112 */ 116 */
113enum { 117enum {
114 MOVE_CANCEL_RACE = 1, 118 MOVE_CANCEL_RACE = 1,
119 MOVE_SOURCE_RD_ERR,
120 MOVE_TARGET_RD_ERR,
115 MOVE_TARGET_WR_ERR, 121 MOVE_TARGET_WR_ERR,
116 MOVE_CANCEL_BITFLIPS, 122 MOVE_CANCEL_BITFLIPS,
117}; 123};
@@ -334,14 +340,15 @@ struct ubi_wl_entry;
334 * @alc_mutex: serializes "atomic LEB change" operations 340 * @alc_mutex: serializes "atomic LEB change" operations
335 * 341 *
336 * @used: RB-tree of used physical eraseblocks 342 * @used: RB-tree of used physical eraseblocks
343 * @erroneous: RB-tree of erroneous used physical eraseblocks
337 * @free: RB-tree of free physical eraseblocks 344 * @free: RB-tree of free physical eraseblocks
338 * @scrub: RB-tree of physical eraseblocks which need scrubbing 345 * @scrub: RB-tree of physical eraseblocks which need scrubbing
339 * @pq: protection queue (contain physical eraseblocks which are temporarily 346 * @pq: protection queue (contain physical eraseblocks which are temporarily
340 * protected from the wear-leveling worker) 347 * protected from the wear-leveling worker)
341 * @pq_head: protection queue head 348 * @pq_head: protection queue head
342 * @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from, 349 * @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from,
343 * @move_to, @move_to_put @erase_pending, @wl_scheduled and @works 350 * @move_to, @move_to_put @erase_pending, @wl_scheduled, @works and
344 * fields 351 * @erroneous_peb_count fields
345 * @move_mutex: serializes eraseblock moves 352 * @move_mutex: serializes eraseblock moves
346 * @work_sem: synchronizes the WL worker with use tasks 353 * @work_sem: synchronizes the WL worker with use tasks
347 * @wl_scheduled: non-zero if the wear-leveling was scheduled 354 * @wl_scheduled: non-zero if the wear-leveling was scheduled
@@ -361,6 +368,8 @@ struct ubi_wl_entry;
361 * @peb_size: physical eraseblock size 368 * @peb_size: physical eraseblock size
362 * @bad_peb_count: count of bad physical eraseblocks 369 * @bad_peb_count: count of bad physical eraseblocks
363 * @good_peb_count: count of good physical eraseblocks 370 * @good_peb_count: count of good physical eraseblocks
371 * @erroneous_peb_count: count of erroneous physical eraseblocks in @erroneous
372 * @max_erroneous: maximum allowed amount of erroneous physical eraseblocks
364 * @min_io_size: minimal input/output unit size of the underlying MTD device 373 * @min_io_size: minimal input/output unit size of the underlying MTD device
365 * @hdrs_min_io_size: minimal I/O unit size used for VID and EC headers 374 * @hdrs_min_io_size: minimal I/O unit size used for VID and EC headers
366 * @ro_mode: if the UBI device is in read-only mode 375 * @ro_mode: if the UBI device is in read-only mode
@@ -418,6 +427,7 @@ struct ubi_device {
418 427
419 /* Wear-leveling sub-system's stuff */ 428 /* Wear-leveling sub-system's stuff */
420 struct rb_root used; 429 struct rb_root used;
430 struct rb_root erroneous;
421 struct rb_root free; 431 struct rb_root free;
422 struct rb_root scrub; 432 struct rb_root scrub;
423 struct list_head pq[UBI_PROT_QUEUE_LEN]; 433 struct list_head pq[UBI_PROT_QUEUE_LEN];
@@ -442,6 +452,8 @@ struct ubi_device {
442 int peb_size; 452 int peb_size;
443 int bad_peb_count; 453 int bad_peb_count;
444 int good_peb_count; 454 int good_peb_count;
455 int erroneous_peb_count;
456 int max_erroneous;
445 int min_io_size; 457 int min_io_size;
446 int hdrs_min_io_size; 458 int hdrs_min_io_size;
447 int ro_mode; 459 int ro_mode;