aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd/ubi/ubi.h
diff options
context:
space:
mode:
authorXiaochuan-Xu <xiaochuan-xu@cqu.edu.cn>2008-12-15 08:07:41 -0500
committerArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2008-12-16 03:09:58 -0500
commit7b6c32daec3bff380ced6822002bc352bdf2c982 (patch)
tree8c5cb043f18451628151dc2492410fb70999a634 /drivers/mtd/ubi/ubi.h
parent23553b2c08c9b6e96be98c44feb9c5e640d3e789 (diff)
UBI: simplify PEB protection code
UBI has 2 RB-trees to implement PEB protection, which is too much for simply prevent PEB from being moved for some time. This patch implements this using lists. The benefits: 1. No need to allocate protection entry on each PEB get. 2. No need to maintain balanced trees and walk them. Signed-off-by: Xiaochuan-Xu <xiaochuan-xu@cqu.edu.cn> Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
Diffstat (limited to 'drivers/mtd/ubi/ubi.h')
-rw-r--r--drivers/mtd/ubi/ubi.h39
1 files changed, 22 insertions, 17 deletions
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 46a4763f8e7c..4a8ec485c91d 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -74,6 +74,13 @@
74#define UBI_IO_RETRIES 3 74#define UBI_IO_RETRIES 3
75 75
76/* 76/*
77 * Length of the protection queue. The length is effectively equivalent to the
78 * number of (global) erase cycles PEBs are protected from the wear-leveling
79 * worker.
80 */
81#define UBI_PROT_QUEUE_LEN 10
82
83/*
77 * Error codes returned by the I/O sub-system. 84 * Error codes returned by the I/O sub-system.
78 * 85 *
79 * UBI_IO_PEB_EMPTY: the physical eraseblock is empty, i.e. it contains only 86 * UBI_IO_PEB_EMPTY: the physical eraseblock is empty, i.e. it contains only
@@ -96,6 +103,7 @@ enum {
96/** 103/**
97 * struct ubi_wl_entry - wear-leveling entry. 104 * struct ubi_wl_entry - wear-leveling entry.
98 * @u.rb: link in the corresponding (free/used) RB-tree 105 * @u.rb: link in the corresponding (free/used) RB-tree
106 * @u.list: link in the protection queue
99 * @ec: erase counter 107 * @ec: erase counter
100 * @pnum: physical eraseblock number 108 * @pnum: physical eraseblock number
101 * 109 *
@@ -106,6 +114,7 @@ enum {
106struct ubi_wl_entry { 114struct ubi_wl_entry {
107 union { 115 union {
108 struct rb_node rb; 116 struct rb_node rb;
117 struct list_head list;
109 } u; 118 } u;
110 int ec; 119 int ec;
111 int pnum; 120 int pnum;
@@ -290,7 +299,7 @@ struct ubi_wl_entry;
290 * @beb_rsvd_level: normal level of PEBs reserved for bad PEB handling 299 * @beb_rsvd_level: normal level of PEBs reserved for bad PEB handling
291 * 300 *
292 * @autoresize_vol_id: ID of the volume which has to be auto-resized at the end 301 * @autoresize_vol_id: ID of the volume which has to be auto-resized at the end
293 * of UBI ititializetion 302 * of UBI initialization
294 * @vtbl_slots: how many slots are available in the volume table 303 * @vtbl_slots: how many slots are available in the volume table
295 * @vtbl_size: size of the volume table in bytes 304 * @vtbl_size: size of the volume table in bytes
296 * @vtbl: in-RAM volume table copy 305 * @vtbl: in-RAM volume table copy
@@ -308,18 +317,17 @@ struct ubi_wl_entry;
308 * @used: RB-tree of used physical eraseblocks 317 * @used: RB-tree of used physical eraseblocks
309 * @free: RB-tree of free physical eraseblocks 318 * @free: RB-tree of free physical eraseblocks
310 * @scrub: RB-tree of physical eraseblocks which need scrubbing 319 * @scrub: RB-tree of physical eraseblocks which need scrubbing
311 * @prot: protection trees 320 * @pq: protection queue (contain physical eraseblocks which are temporarily
312 * @prot.pnum: protection tree indexed by physical eraseblock numbers 321 * protected from the wear-leveling worker)
313 * @prot.aec: protection tree indexed by absolute erase counter value 322 * @pq_head: protection queue head
314 * @wl_lock: protects the @used, @free, @prot, @lookuptbl, @abs_ec, @move_from, 323 * @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from,
315 * @move_to, @move_to_put @erase_pending, @wl_scheduled, and @works 324 * @move_to, @move_to_put @erase_pending, @wl_scheduled and @works
316 * fields 325 * fields
317 * @move_mutex: serializes eraseblock moves 326 * @move_mutex: serializes eraseblock moves
318 * @work_sem: sycnhronizes the WL worker with use tasks 327 * @work_sem: synchronizes the WL worker with use tasks
319 * @wl_scheduled: non-zero if the wear-leveling was scheduled 328 * @wl_scheduled: non-zero if the wear-leveling was scheduled
320 * @lookuptbl: a table to quickly find a &struct ubi_wl_entry object for any 329 * @lookuptbl: a table to quickly find a &struct ubi_wl_entry object for any
321 * physical eraseblock 330 * physical eraseblock
322 * @abs_ec: absolute erase counter
323 * @move_from: physical eraseblock from where the data is being moved 331 * @move_from: physical eraseblock from where the data is being moved
324 * @move_to: physical eraseblock where the data is being moved to 332 * @move_to: physical eraseblock where the data is being moved to
325 * @move_to_put: if the "to" PEB was put 333 * @move_to_put: if the "to" PEB was put
@@ -353,11 +361,11 @@ struct ubi_wl_entry;
353 * 361 *
354 * @peb_buf1: a buffer of PEB size used for different purposes 362 * @peb_buf1: a buffer of PEB size used for different purposes
355 * @peb_buf2: another buffer of PEB size used for different purposes 363 * @peb_buf2: another buffer of PEB size used for different purposes
356 * @buf_mutex: proptects @peb_buf1 and @peb_buf2 364 * @buf_mutex: protects @peb_buf1 and @peb_buf2
357 * @ckvol_mutex: serializes static volume checking when opening 365 * @ckvol_mutex: serializes static volume checking when opening
358 * @mult_mutex: serializes operations on multiple volumes, like re-nameing 366 * @mult_mutex: serializes operations on multiple volumes, like re-naming
359 * @dbg_peb_buf: buffer of PEB size used for debugging 367 * @dbg_peb_buf: buffer of PEB size used for debugging
360 * @dbg_buf_mutex: proptects @dbg_peb_buf 368 * @dbg_buf_mutex: protects @dbg_peb_buf
361 */ 369 */
362struct ubi_device { 370struct ubi_device {
363 struct cdev cdev; 371 struct cdev cdev;
@@ -394,16 +402,13 @@ struct ubi_device {
394 struct rb_root used; 402 struct rb_root used;
395 struct rb_root free; 403 struct rb_root free;
396 struct rb_root scrub; 404 struct rb_root scrub;
397 struct { 405 struct list_head pq[UBI_PROT_QUEUE_LEN];
398 struct rb_root pnum; 406 int pq_head;
399 struct rb_root aec;
400 } prot;
401 spinlock_t wl_lock; 407 spinlock_t wl_lock;
402 struct mutex move_mutex; 408 struct mutex move_mutex;
403 struct rw_semaphore work_sem; 409 struct rw_semaphore work_sem;
404 int wl_scheduled; 410 int wl_scheduled;
405 struct ubi_wl_entry **lookuptbl; 411 struct ubi_wl_entry **lookuptbl;
406 unsigned long long abs_ec;
407 struct ubi_wl_entry *move_from; 412 struct ubi_wl_entry *move_from;
408 struct ubi_wl_entry *move_to; 413 struct ubi_wl_entry *move_to;
409 int move_to_put; 414 int move_to_put;