aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd/ubi/ubi.h
diff options
context:
space:
mode:
authorArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2007-12-18 08:06:55 -0500
committerArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2007-12-26 12:15:16 -0500
commit43f9b25a9cdd7b177f77f026b1461abd1abbd174 (patch)
tree0c58fa96a8b6050fd2166d67552809cebd435549 /drivers/mtd/ubi/ubi.h
parentd2c468550915ab2f16149e274a6f0da0b925a748 (diff)
UBI: bugfix: protect from volume removal
When the WL worker is moving an LEB, the volume might go away occasionally. UBI does not handle these situations correctly. This patch introduces a new mutex which serializes wear-levelling worker and the the 'ubi_wl_put_peb()' function. Now, if one puts an LEB, and its PEB is being moved, it will wait on the mutex. And because we unmap all LEBs when removing volumes, this will make the volume remove function to wait while the LEB movement finishes. Below is an example of an oops which should be fixed by this patch: Pid: 9167, comm: io_paral Not tainted (2.6.24-rc5-ubi-2.6.git #2) EIP: 0060:[<f884a379>] EFLAGS: 00010246 CPU: 0 EIP is at prot_tree_del+0x2a/0x63 [ubi] EAX: f39a90e0 EBX: 00000000 ECX: 00000000 EDX: 00000134 ESI: f39a90e0 EDI: f39a90e0 EBP: f2d55ddc ESP: f2d55dd4 DS: 007b ES: 007b FS: 00d8 GS: 0033 SS: 0068 Process io_paral (pid: 9167, ti=f2d54000 task=f72a8030 task.ti=f2d54000) Stack: f39a95f8 ef6aae50 f2d55e08 f884a511 f88538e1 f884ecea 00000134 00000000 f39a9604 f39a95f0 efea8280 00000000 f39a90e0 f2d55e40 f8847261 f8850c3c f884eaad 00000001 000000b9 00000134 00000172 000000b9 00000134 00000001 Call Trace: [<c0105227>] show_trace_log_lvl+0x1a/0x30 [<c01052e2>] show_stack_log_lvl+0xa5/0xca [<c01053d6>] show_registers+0xcf/0x21b [<c0105648>] die+0x126/0x224 [<c0119a62>] do_page_fault+0x27f/0x60d [<c037dd62>] error_code+0x72/0x78 [<f884a511>] ubi_wl_put_peb+0xf0/0x191 [ubi] [<f8847261>] ubi_eba_unmap_leb+0xaf/0xcc [ubi] [<f8843c21>] ubi_remove_volume+0x102/0x1e8 [ubi] [<f8846077>] ubi_cdev_ioctl+0x22a/0x383 [ubi] [<c017d768>] do_ioctl+0x68/0x71 [<c017d7c6>] vfs_ioctl+0x55/0x271 [<c017da15>] sys_ioctl+0x33/0x52 [<c0104152>] sysenter_past_esp+0x5f/0xa5 ======================= Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
Diffstat (limited to 'drivers/mtd/ubi/ubi.h')
-rw-r--r--drivers/mtd/ubi/ubi.h10
1 files changed, 6 insertions, 4 deletions
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index f782d5aa849a..ea9a6990a4dc 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -275,13 +275,13 @@ struct ubi_wl_entry;
275 * @wl_lock: protects the @used, @free, @prot, @lookuptbl, @abs_ec, @move_from, 275 * @wl_lock: protects the @used, @free, @prot, @lookuptbl, @abs_ec, @move_from,
276 * @move_to, @move_to_put @erase_pending, @wl_scheduled, and @works 276 * @move_to, @move_to_put @erase_pending, @wl_scheduled, and @works
277 * fields 277 * fields
278 * @move_mutex: serializes eraseblock moves
278 * @wl_scheduled: non-zero if the wear-leveling was scheduled 279 * @wl_scheduled: non-zero if the wear-leveling was scheduled
279 * @lookuptbl: a table to quickly find a &struct ubi_wl_entry object for any 280 * @lookuptbl: a table to quickly find a &struct ubi_wl_entry object for any
280 * physical eraseblock 281 * physical eraseblock
281 * @abs_ec: absolute erase counter 282 * @abs_ec: absolute erase counter
282 * @move_from: physical eraseblock from where the data is being moved 283 * @move_from: physical eraseblock from where the data is being moved
283 * @move_to: physical eraseblock where the data is being moved to 284 * @move_to: physical eraseblock where the data is being moved to
284 * @move_from_put: if the "from" PEB was put
285 * @move_to_put: if the "to" PEB was put 285 * @move_to_put: if the "to" PEB was put
286 * @works: list of pending works 286 * @works: list of pending works
287 * @works_count: count of pending works 287 * @works_count: count of pending works
@@ -354,12 +354,12 @@ struct ubi_device {
354 struct rb_root aec; 354 struct rb_root aec;
355 } prot; 355 } prot;
356 spinlock_t wl_lock; 356 spinlock_t wl_lock;
357 struct mutex move_mutex;
357 int wl_scheduled; 358 int wl_scheduled;
358 struct ubi_wl_entry **lookuptbl; 359 struct ubi_wl_entry **lookuptbl;
359 unsigned long long abs_ec; 360 unsigned long long abs_ec;
360 struct ubi_wl_entry *move_from; 361 struct ubi_wl_entry *move_from;
361 struct ubi_wl_entry *move_to; 362 struct ubi_wl_entry *move_to;
362 int move_from_put;
363 int move_to_put; 363 int move_to_put;
364 struct list_head works; 364 struct list_head works;
365 int works_count; 365 int works_count;
@@ -561,8 +561,10 @@ static inline int ubi_io_write_data(struct ubi_device *ubi, const void *buf,
561 */ 561 */
562static inline void ubi_ro_mode(struct ubi_device *ubi) 562static inline void ubi_ro_mode(struct ubi_device *ubi)
563{ 563{
564 ubi->ro_mode = 1; 564 if (!ubi->ro_mode) {
565 ubi_warn("switch to read-only mode"); 565 ubi->ro_mode = 1;
566 ubi_warn("switch to read-only mode");
567 }
566} 568}
567 569
568/** 570/**