aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd
diff options
context:
space:
mode:
authorXiaochuan-Xu <xiaochuan-xu@cqu.edu.cn>2008-12-15 08:07:41 -0500
committerArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2008-12-16 03:09:58 -0500
commit7b6c32daec3bff380ced6822002bc352bdf2c982 (patch)
tree8c5cb043f18451628151dc2492410fb70999a634 /drivers/mtd
parent23553b2c08c9b6e96be98c44feb9c5e640d3e789 (diff)
UBI: simplify PEB protection code
UBI has 2 RB-trees to implement PEB protection, which is too much for simply prevent PEB from being moved for some time. This patch implements this using lists. The benefits: 1. No need to allocate protection entry on each PEB get. 2. No need to maintain balanced trees and walk them. Signed-off-by: Xiaochuan-Xu <xiaochuan-xu@cqu.edu.cn> Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
Diffstat (limited to 'drivers/mtd')
-rw-r--r--drivers/mtd/ubi/ubi.h39
-rw-r--r--drivers/mtd/ubi/wl.c364
2 files changed, 162 insertions, 241 deletions
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 46a4763f8e7c..4a8ec485c91d 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -74,6 +74,13 @@
74#define UBI_IO_RETRIES 3 74#define UBI_IO_RETRIES 3
75 75
76/* 76/*
77 * Length of the protection queue. The length is effectively equivalent to the
78 * number of (global) erase cycles PEBs are protected from the wear-leveling
79 * worker.
80 */
81#define UBI_PROT_QUEUE_LEN 10
82
83/*
77 * Error codes returned by the I/O sub-system. 84 * Error codes returned by the I/O sub-system.
78 * 85 *
79 * UBI_IO_PEB_EMPTY: the physical eraseblock is empty, i.e. it contains only 86 * UBI_IO_PEB_EMPTY: the physical eraseblock is empty, i.e. it contains only
@@ -96,6 +103,7 @@ enum {
96/** 103/**
97 * struct ubi_wl_entry - wear-leveling entry. 104 * struct ubi_wl_entry - wear-leveling entry.
98 * @u.rb: link in the corresponding (free/used) RB-tree 105 * @u.rb: link in the corresponding (free/used) RB-tree
106 * @u.list: link in the protection queue
99 * @ec: erase counter 107 * @ec: erase counter
100 * @pnum: physical eraseblock number 108 * @pnum: physical eraseblock number
101 * 109 *
@@ -106,6 +114,7 @@ enum {
106struct ubi_wl_entry { 114struct ubi_wl_entry {
107 union { 115 union {
108 struct rb_node rb; 116 struct rb_node rb;
117 struct list_head list;
109 } u; 118 } u;
110 int ec; 119 int ec;
111 int pnum; 120 int pnum;
@@ -290,7 +299,7 @@ struct ubi_wl_entry;
290 * @beb_rsvd_level: normal level of PEBs reserved for bad PEB handling 299 * @beb_rsvd_level: normal level of PEBs reserved for bad PEB handling
291 * 300 *
292 * @autoresize_vol_id: ID of the volume which has to be auto-resized at the end 301 * @autoresize_vol_id: ID of the volume which has to be auto-resized at the end
293 * of UBI ititializetion 302 * of UBI initialization
294 * @vtbl_slots: how many slots are available in the volume table 303 * @vtbl_slots: how many slots are available in the volume table
295 * @vtbl_size: size of the volume table in bytes 304 * @vtbl_size: size of the volume table in bytes
296 * @vtbl: in-RAM volume table copy 305 * @vtbl: in-RAM volume table copy
@@ -308,18 +317,17 @@ struct ubi_wl_entry;
308 * @used: RB-tree of used physical eraseblocks 317 * @used: RB-tree of used physical eraseblocks
309 * @free: RB-tree of free physical eraseblocks 318 * @free: RB-tree of free physical eraseblocks
310 * @scrub: RB-tree of physical eraseblocks which need scrubbing 319 * @scrub: RB-tree of physical eraseblocks which need scrubbing
311 * @prot: protection trees 320 * @pq: protection queue (contain physical eraseblocks which are temporarily
312 * @prot.pnum: protection tree indexed by physical eraseblock numbers 321 * protected from the wear-leveling worker)
313 * @prot.aec: protection tree indexed by absolute erase counter value 322 * @pq_head: protection queue head
314 * @wl_lock: protects the @used, @free, @prot, @lookuptbl, @abs_ec, @move_from, 323 * @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from,
315 * @move_to, @move_to_put @erase_pending, @wl_scheduled, and @works 324 * @move_to, @move_to_put @erase_pending, @wl_scheduled and @works
316 * fields 325 * fields
317 * @move_mutex: serializes eraseblock moves 326 * @move_mutex: serializes eraseblock moves
318 * @work_sem: sycnhronizes the WL worker with use tasks 327 * @work_sem: synchronizes the WL worker with use tasks
319 * @wl_scheduled: non-zero if the wear-leveling was scheduled 328 * @wl_scheduled: non-zero if the wear-leveling was scheduled
320 * @lookuptbl: a table to quickly find a &struct ubi_wl_entry object for any 329 * @lookuptbl: a table to quickly find a &struct ubi_wl_entry object for any
321 * physical eraseblock 330 * physical eraseblock
322 * @abs_ec: absolute erase counter
323 * @move_from: physical eraseblock from where the data is being moved 331 * @move_from: physical eraseblock from where the data is being moved
324 * @move_to: physical eraseblock where the data is being moved to 332 * @move_to: physical eraseblock where the data is being moved to
325 * @move_to_put: if the "to" PEB was put 333 * @move_to_put: if the "to" PEB was put
@@ -353,11 +361,11 @@ struct ubi_wl_entry;
353 * 361 *
354 * @peb_buf1: a buffer of PEB size used for different purposes 362 * @peb_buf1: a buffer of PEB size used for different purposes
355 * @peb_buf2: another buffer of PEB size used for different purposes 363 * @peb_buf2: another buffer of PEB size used for different purposes
356 * @buf_mutex: proptects @peb_buf1 and @peb_buf2 364 * @buf_mutex: protects @peb_buf1 and @peb_buf2
357 * @ckvol_mutex: serializes static volume checking when opening 365 * @ckvol_mutex: serializes static volume checking when opening
358 * @mult_mutex: serializes operations on multiple volumes, like re-nameing 366 * @mult_mutex: serializes operations on multiple volumes, like re-naming
359 * @dbg_peb_buf: buffer of PEB size used for debugging 367 * @dbg_peb_buf: buffer of PEB size used for debugging
360 * @dbg_buf_mutex: proptects @dbg_peb_buf 368 * @dbg_buf_mutex: protects @dbg_peb_buf
361 */ 369 */
362struct ubi_device { 370struct ubi_device {
363 struct cdev cdev; 371 struct cdev cdev;
@@ -394,16 +402,13 @@ struct ubi_device {
394 struct rb_root used; 402 struct rb_root used;
395 struct rb_root free; 403 struct rb_root free;
396 struct rb_root scrub; 404 struct rb_root scrub;
397 struct { 405 struct list_head pq[UBI_PROT_QUEUE_LEN];
398 struct rb_root pnum; 406 int pq_head;
399 struct rb_root aec;
400 } prot;
401 spinlock_t wl_lock; 407 spinlock_t wl_lock;
402 struct mutex move_mutex; 408 struct mutex move_mutex;
403 struct rw_semaphore work_sem; 409 struct rw_semaphore work_sem;
404 int wl_scheduled; 410 int wl_scheduled;
405 struct ubi_wl_entry **lookuptbl; 411 struct ubi_wl_entry **lookuptbl;
406 unsigned long long abs_ec;
407 struct ubi_wl_entry *move_from; 412 struct ubi_wl_entry *move_from;
408 struct ubi_wl_entry *move_to; 413 struct ubi_wl_entry *move_to;
409 int move_to_put; 414 int move_to_put;
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 0279bf9dc722..14901cb82c18 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -22,7 +22,7 @@
22 * UBI wear-leveling sub-system. 22 * UBI wear-leveling sub-system.
23 * 23 *
24 * This sub-system is responsible for wear-leveling. It works in terms of 24 * This sub-system is responsible for wear-leveling. It works in terms of
25 * physical* eraseblocks and erase counters and knows nothing about logical 25 * physical eraseblocks and erase counters and knows nothing about logical
26 * eraseblocks, volumes, etc. From this sub-system's perspective all physical 26 * eraseblocks, volumes, etc. From this sub-system's perspective all physical
27 * eraseblocks are of two types - used and free. Used physical eraseblocks are 27 * eraseblocks are of two types - used and free. Used physical eraseblocks are
28 * those that were "get" by the 'ubi_wl_get_peb()' function, and free physical 28 * those that were "get" by the 'ubi_wl_get_peb()' function, and free physical
@@ -55,8 +55,39 @@
55 * 55 *
56 * As it was said, for the UBI sub-system all physical eraseblocks are either 56 * As it was said, for the UBI sub-system all physical eraseblocks are either
57 * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while 57 * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while
58 * used eraseblocks are kept in a set of different RB-trees: @wl->used, 58 * used eraseblocks are kept in @wl->used or @wl->scrub RB-trees, or
59 * @wl->prot.pnum, @wl->prot.aec, and @wl->scrub. 59 * (temporarily) in the @wl->pq queue.
60 *
61 * When the WL sub-system returns a physical eraseblock, the physical
62 * eraseblock is protected from being moved for some "time". For this reason,
63 * the physical eraseblock is not directly moved from the @wl->free tree to the
64 * @wl->used tree. There is a protection queue in between where this
65 * physical eraseblock is temporarily stored (@wl->pq).
66 *
67 * All this protection stuff is needed because:
68 * o we don't want to move physical eraseblocks just after we have given them
69 * to the user; instead, we first want to let users fill them up with data;
70 *
71 * o there is a chance that the user will put the physical eraseblock very
72 * soon, so it makes sense not to move it for some time, but wait; this is
73 * especially important in case of "short term" physical eraseblocks.
74 *
75 * Physical eraseblocks stay protected only for limited time. But the "time" is
76 * measured in erase cycles in this case. This is implemented with help of the
77 * protection queue. Eraseblocks are put to the tail of this queue when they
78 * are returned by the 'ubi_wl_get_peb()', and eraseblocks are removed from the
79 * head of the queue on each erase operation (for any eraseblock). So the
80 * length of the queue defines how may (global) erase cycles PEBs are protected.
81 *
82 * To put it differently, each physical eraseblock has 2 main states: free and
83 * used. The former state corresponds to the @wl->free tree. The latter state
84 * is split up on several sub-states:
85 * o the WL movement is allowed (@wl->used tree);
86 * o the WL movement is temporarily prohibited (@wl->pq queue);
87 * o scrubbing is needed (@wl->scrub tree).
88 *
89 * Depending on the sub-state, wear-leveling entries of the used physical
90 * eraseblocks may be kept in one of those structures.
60 * 91 *
61 * Note, in this implementation, we keep a small in-RAM object for each physical 92 * Note, in this implementation, we keep a small in-RAM object for each physical
62 * eraseblock. This is surely not a scalable solution. But it appears to be good 93 * eraseblock. This is surely not a scalable solution. But it appears to be good
@@ -70,9 +101,6 @@
70 * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we 101 * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we
71 * pick target PEB with an average EC if our PEB is not very "old". This is a 102 * pick target PEB with an average EC if our PEB is not very "old". This is a
72 * room for future re-works of the WL sub-system. 103 * room for future re-works of the WL sub-system.
73 *
74 * Note: the stuff with protection trees looks too complex and is difficult to
75 * understand. Should be fixed.
76 */ 104 */
77 105
78#include <linux/slab.h> 106#include <linux/slab.h>
@@ -85,14 +113,6 @@
85#define WL_RESERVED_PEBS 1 113#define WL_RESERVED_PEBS 1
86 114
87/* 115/*
88 * How many erase cycles are short term, unknown, and long term physical
89 * eraseblocks protected.
90 */
91#define ST_PROTECTION 16
92#define U_PROTECTION 10
93#define LT_PROTECTION 4
94
95/*
96 * Maximum difference between two erase counters. If this threshold is 116 * Maximum difference between two erase counters. If this threshold is
97 * exceeded, the WL sub-system starts moving data from used physical 117 * exceeded, the WL sub-system starts moving data from used physical
98 * eraseblocks with low erase counter to free physical eraseblocks with high 118 * eraseblocks with low erase counter to free physical eraseblocks with high
@@ -120,64 +140,9 @@
120#define WL_MAX_FAILURES 32 140#define WL_MAX_FAILURES 32
121 141
122/** 142/**
123 * struct ubi_wl_prot_entry - PEB protection entry.
124 * @rb_pnum: link in the @wl->prot.pnum RB-tree
125 * @rb_aec: link in the @wl->prot.aec RB-tree
126 * @abs_ec: the absolute erase counter value when the protection ends
127 * @e: the wear-leveling entry of the physical eraseblock under protection
128 *
129 * When the WL sub-system returns a physical eraseblock, the physical
130 * eraseblock is protected from being moved for some "time". For this reason,
131 * the physical eraseblock is not directly moved from the @wl->free tree to the
132 * @wl->used tree. There is one more tree in between where this physical
133 * eraseblock is temporarily stored (@wl->prot).
134 *
135 * All this protection stuff is needed because:
136 * o we don't want to move physical eraseblocks just after we have given them
137 * to the user; instead, we first want to let users fill them up with data;
138 *
139 * o there is a chance that the user will put the physical eraseblock very
140 * soon, so it makes sense not to move it for some time, but wait; this is
141 * especially important in case of "short term" physical eraseblocks.
142 *
143 * Physical eraseblocks stay protected only for limited time. But the "time" is
144 * measured in erase cycles in this case. This is implemented with help of the
145 * absolute erase counter (@wl->abs_ec). When it reaches certain value, the
146 * physical eraseblocks are moved from the protection trees (@wl->prot.*) to
147 * the @wl->used tree.
148 *
149 * Protected physical eraseblocks are searched by physical eraseblock number
150 * (when they are put) and by the absolute erase counter (to check if it is
151 * time to move them to the @wl->used tree). So there are actually 2 RB-trees
152 * storing the protected physical eraseblocks: @wl->prot.pnum and
153 * @wl->prot.aec. They are referred to as the "protection" trees. The
154 * first one is indexed by the physical eraseblock number. The second one is
155 * indexed by the absolute erase counter. Both trees store
156 * &struct ubi_wl_prot_entry objects.
157 *
158 * Each physical eraseblock has 2 main states: free and used. The former state
159 * corresponds to the @wl->free tree. The latter state is split up on several
160 * sub-states:
161 * o the WL movement is allowed (@wl->used tree);
162 * o the WL movement is temporarily prohibited (@wl->prot.pnum and
163 * @wl->prot.aec trees);
164 * o scrubbing is needed (@wl->scrub tree).
165 *
166 * Depending on the sub-state, wear-leveling entries of the used physical
167 * eraseblocks may be kept in one of those trees.
168 */
169struct ubi_wl_prot_entry {
170 struct rb_node rb_pnum;
171 struct rb_node rb_aec;
172 unsigned long long abs_ec;
173 struct ubi_wl_entry *e;
174};
175
176/**
177 * struct ubi_work - UBI work description data structure. 143 * struct ubi_work - UBI work description data structure.
178 * @list: a link in the list of pending works 144 * @list: a link in the list of pending works
179 * @func: worker function 145 * @func: worker function
180 * @priv: private data of the worker function
181 * @e: physical eraseblock to erase 146 * @e: physical eraseblock to erase
182 * @torture: if the physical eraseblock has to be tortured 147 * @torture: if the physical eraseblock has to be tortured
183 * 148 *
@@ -198,9 +163,11 @@ struct ubi_work {
198static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec); 163static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec);
199static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e, 164static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
200 struct rb_root *root); 165 struct rb_root *root);
166static int paranoid_check_in_pq(struct ubi_device *ubi, struct ubi_wl_entry *e);
201#else 167#else
202#define paranoid_check_ec(ubi, pnum, ec) 0 168#define paranoid_check_ec(ubi, pnum, ec) 0
203#define paranoid_check_in_wl_tree(e, root) 169#define paranoid_check_in_wl_tree(e, root)
170#define paranoid_check_in_pq(ubi, e) 0
204#endif 171#endif
205 172
206/** 173/**
@@ -355,49 +322,24 @@ static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
355} 322}
356 323
357/** 324/**
358 * prot_tree_add - add physical eraseblock to protection trees. 325 * prot_queue_add - add physical eraseblock to the protection queue.
359 * @ubi: UBI device description object 326 * @ubi: UBI device description object
360 * @e: the physical eraseblock to add 327 * @e: the physical eraseblock to add
361 * @pe: protection entry object to use
362 * @ec: for how many erase operations this PEB should be protected
363 * 328 *
364 * @wl->lock has to be locked. 329 * This function adds @e to the tail of the protection queue @ubi->pq, where
330 * @e will stay for %UBI_PROT_QUEUE_LEN erase operations and will be
331 * temporarily protected from the wear-leveling worker. Note, @wl->lock has to
332 * be locked.
365 */ 333 */
366static void prot_tree_add(struct ubi_device *ubi, struct ubi_wl_entry *e, 334static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
367 struct ubi_wl_prot_entry *pe, int ec)
368{ 335{
369 struct rb_node **p, *parent = NULL; 336 int pq_tail = ubi->pq_head - 1;
370 struct ubi_wl_prot_entry *pe1;
371 337
372 pe->e = e; 338 if (pq_tail < 0)
373 pe->abs_ec = ubi->abs_ec + ec; 339 pq_tail = UBI_PROT_QUEUE_LEN - 1;
374 340 ubi_assert(pq_tail >= 0 && pq_tail < UBI_PROT_QUEUE_LEN);
375 p = &ubi->prot.pnum.rb_node; 341 list_add_tail(&e->u.list, &ubi->pq[pq_tail]);
376 while (*p) { 342 dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec);
377 parent = *p;
378 pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_pnum);
379
380 if (e->pnum < pe1->e->pnum)
381 p = &(*p)->rb_left;
382 else
383 p = &(*p)->rb_right;
384 }
385 rb_link_node(&pe->rb_pnum, parent, p);
386 rb_insert_color(&pe->rb_pnum, &ubi->prot.pnum);
387
388 p = &ubi->prot.aec.rb_node;
389 parent = NULL;
390 while (*p) {
391 parent = *p;
392 pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_aec);
393
394 if (pe->abs_ec < pe1->abs_ec)
395 p = &(*p)->rb_left;
396 else
397 p = &(*p)->rb_right;
398 }
399 rb_link_node(&pe->rb_aec, parent, p);
400 rb_insert_color(&pe->rb_aec, &ubi->prot.aec);
401} 343}
402 344
403/** 345/**
@@ -442,17 +384,12 @@ static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max)
442 */ 384 */
443int ubi_wl_get_peb(struct ubi_device *ubi, int dtype) 385int ubi_wl_get_peb(struct ubi_device *ubi, int dtype)
444{ 386{
445 int err, protect, medium_ec; 387 int err, medium_ec;
446 struct ubi_wl_entry *e, *first, *last; 388 struct ubi_wl_entry *e, *first, *last;
447 struct ubi_wl_prot_entry *pe;
448 389
449 ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM || 390 ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM ||
450 dtype == UBI_UNKNOWN); 391 dtype == UBI_UNKNOWN);
451 392
452 pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS);
453 if (!pe)
454 return -ENOMEM;
455
456retry: 393retry:
457 spin_lock(&ubi->wl_lock); 394 spin_lock(&ubi->wl_lock);
458 if (!ubi->free.rb_node) { 395 if (!ubi->free.rb_node) {
@@ -460,16 +397,13 @@ retry:
460 ubi_assert(list_empty(&ubi->works)); 397 ubi_assert(list_empty(&ubi->works));
461 ubi_err("no free eraseblocks"); 398 ubi_err("no free eraseblocks");
462 spin_unlock(&ubi->wl_lock); 399 spin_unlock(&ubi->wl_lock);
463 kfree(pe);
464 return -ENOSPC; 400 return -ENOSPC;
465 } 401 }
466 spin_unlock(&ubi->wl_lock); 402 spin_unlock(&ubi->wl_lock);
467 403
468 err = produce_free_peb(ubi); 404 err = produce_free_peb(ubi);
469 if (err < 0) { 405 if (err < 0)
470 kfree(pe);
471 return err; 406 return err;
472 }
473 goto retry; 407 goto retry;
474 } 408 }
475 409
@@ -482,7 +416,6 @@ retry:
482 * %WL_FREE_MAX_DIFF. 416 * %WL_FREE_MAX_DIFF.
483 */ 417 */
484 e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); 418 e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
485 protect = LT_PROTECTION;
486 break; 419 break;
487 case UBI_UNKNOWN: 420 case UBI_UNKNOWN:
488 /* 421 /*
@@ -502,7 +435,6 @@ retry:
502 medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2; 435 medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2;
503 e = find_wl_entry(&ubi->free, medium_ec); 436 e = find_wl_entry(&ubi->free, medium_ec);
504 } 437 }
505 protect = U_PROTECTION;
506 break; 438 break;
507 case UBI_SHORTTERM: 439 case UBI_SHORTTERM:
508 /* 440 /*
@@ -510,63 +442,45 @@ retry:
510 * lowest erase counter as we expect it will be erased soon. 442 * lowest erase counter as we expect it will be erased soon.
511 */ 443 */
512 e = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, u.rb); 444 e = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, u.rb);
513 protect = ST_PROTECTION;
514 break; 445 break;
515 default: 446 default:
516 protect = 0;
517 e = NULL;
518 BUG(); 447 BUG();
519 } 448 }
520 449
450 paranoid_check_in_wl_tree(e, &ubi->free);
451
521 /* 452 /*
522 * Move the physical eraseblock to the protection trees where it will 453 * Move the physical eraseblock to the protection queue where it will
523 * be protected from being moved for some time. 454 * be protected from being moved for some time.
524 */ 455 */
525 paranoid_check_in_wl_tree(e, &ubi->free);
526 rb_erase(&e->u.rb, &ubi->free); 456 rb_erase(&e->u.rb, &ubi->free);
527 prot_tree_add(ubi, e, pe, protect); 457 dbg_wl("PEB %d EC %d", e->pnum, e->ec);
528 458 prot_queue_add(ubi, e);
529 dbg_wl("PEB %d EC %d, protection %d", e->pnum, e->ec, protect);
530 spin_unlock(&ubi->wl_lock); 459 spin_unlock(&ubi->wl_lock);
531
532 return e->pnum; 460 return e->pnum;
533} 461}
534 462
535/** 463/**
536 * prot_tree_del - remove a physical eraseblock from the protection trees 464 * prot_queue_del - remove a physical eraseblock from the protection queue.
537 * @ubi: UBI device description object 465 * @ubi: UBI device description object
538 * @pnum: the physical eraseblock to remove 466 * @pnum: the physical eraseblock to remove
539 * 467 *
540 * This function returns PEB @pnum from the protection trees and returns zero 468 * This function deletes PEB @pnum from the protection queue and returns zero
541 * in case of success and %-ENODEV if the PEB was not found in the protection 469 * in case of success and %-ENODEV if the PEB was not found.
542 * trees.
543 */ 470 */
544static int prot_tree_del(struct ubi_device *ubi, int pnum) 471static int prot_queue_del(struct ubi_device *ubi, int pnum)
545{ 472{
546 struct rb_node *p; 473 struct ubi_wl_entry *e;
547 struct ubi_wl_prot_entry *pe = NULL;
548
549 p = ubi->prot.pnum.rb_node;
550 while (p) {
551
552 pe = rb_entry(p, struct ubi_wl_prot_entry, rb_pnum);
553
554 if (pnum == pe->e->pnum)
555 goto found;
556 474
557 if (pnum < pe->e->pnum) 475 e = ubi->lookuptbl[pnum];
558 p = p->rb_left; 476 if (!e)
559 else 477 return -ENODEV;
560 p = p->rb_right;
561 }
562 478
563 return -ENODEV; 479 if (paranoid_check_in_pq(ubi, e))
480 return -ENODEV;
564 481
565found: 482 list_del(&e->u.list);
566 ubi_assert(pe->e->pnum == pnum); 483 dbg_wl("deleted PEB %d from the protection queue", e->pnum);
567 rb_erase(&pe->rb_aec, &ubi->prot.aec);
568 rb_erase(&pe->rb_pnum, &ubi->prot.pnum);
569 kfree(pe);
570 return 0; 484 return 0;
571} 485}
572 486
@@ -632,47 +546,47 @@ out_free:
632} 546}
633 547
634/** 548/**
635 * check_protection_over - check if it is time to stop protecting some PEBs. 549 * serve_prot_queue - check if it is time to stop protecting PEBs.
636 * @ubi: UBI device description object 550 * @ubi: UBI device description object
637 * 551 *
638 * This function is called after each erase operation, when the absolute erase 552 * This function is called after each erase operation and removes PEBs from the
639 * counter is incremented, to check if some physical eraseblock have not to be 553 * tail of the protection queue. These PEBs have been protected for long enough
640 * protected any longer. These physical eraseblocks are moved from the 554 * and should be moved to the used tree.
641 * protection trees to the used tree.
642 */ 555 */
643static void check_protection_over(struct ubi_device *ubi) 556static void serve_prot_queue(struct ubi_device *ubi)
644{ 557{
645 struct ubi_wl_prot_entry *pe; 558 struct ubi_wl_entry *e, *tmp;
559 int count;
646 560
647 /* 561 /*
648 * There may be several protected physical eraseblock to remove, 562 * There may be several protected physical eraseblock to remove,
649 * process them all. 563 * process them all.
650 */ 564 */
651 while (1) { 565repeat:
652 spin_lock(&ubi->wl_lock); 566 count = 0;
653 if (!ubi->prot.aec.rb_node) { 567 spin_lock(&ubi->wl_lock);
654 spin_unlock(&ubi->wl_lock); 568 list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) {
655 break; 569 dbg_wl("PEB %d EC %d protection over, move to used tree",
656 } 570 e->pnum, e->ec);
657
658 pe = rb_entry(rb_first(&ubi->prot.aec),
659 struct ubi_wl_prot_entry, rb_aec);
660 571
661 if (pe->abs_ec > ubi->abs_ec) { 572 list_del(&e->u.list);
573 wl_tree_add(e, &ubi->used);
574 if (count++ > 32) {
575 /*
576 * Let's be nice and avoid holding the spinlock for
577 * too long.
578 */
662 spin_unlock(&ubi->wl_lock); 579 spin_unlock(&ubi->wl_lock);
663 break; 580 cond_resched();
581 goto repeat;
664 } 582 }
665
666 dbg_wl("PEB %d protection over, abs_ec %llu, PEB abs_ec %llu",
667 pe->e->pnum, ubi->abs_ec, pe->abs_ec);
668 rb_erase(&pe->rb_aec, &ubi->prot.aec);
669 rb_erase(&pe->rb_pnum, &ubi->prot.pnum);
670 wl_tree_add(pe->e, &ubi->used);
671 spin_unlock(&ubi->wl_lock);
672
673 kfree(pe);
674 cond_resched();
675 } 583 }
584
585 ubi->pq_head += 1;
586 if (ubi->pq_head == UBI_PROT_QUEUE_LEN)
587 ubi->pq_head = 0;
588 ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN);
589 spin_unlock(&ubi->wl_lock);
676} 590}
677 591
678/** 592/**
@@ -680,8 +594,8 @@ static void check_protection_over(struct ubi_device *ubi)
680 * @ubi: UBI device description object 594 * @ubi: UBI device description object
681 * @wrk: the work to schedule 595 * @wrk: the work to schedule
682 * 596 *
683 * This function enqueues a work defined by @wrk to the tail of the pending 597 * This function adds a work defined by @wrk to the tail of the pending works
684 * works list. 598 * list.
685 */ 599 */
686static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk) 600static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
687{ 601{
@@ -740,7 +654,6 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
740 int cancel) 654 int cancel)
741{ 655{
742 int err, scrubbing = 0, torture = 0; 656 int err, scrubbing = 0, torture = 0;
743 struct ubi_wl_prot_entry *uninitialized_var(pe);
744 struct ubi_wl_entry *e1, *e2; 657 struct ubi_wl_entry *e1, *e2;
745 struct ubi_vid_hdr *vid_hdr; 658 struct ubi_vid_hdr *vid_hdr;
746 659
@@ -857,23 +770,17 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
857 * The LEB has not been moved because the volume is being 770 * The LEB has not been moved because the volume is being
858 * deleted or the PEB has been put meanwhile. We should prevent 771 * deleted or the PEB has been put meanwhile. We should prevent
859 * this PEB from being selected for wear-leveling movement 772 * this PEB from being selected for wear-leveling movement
860 * again, so put it to the protection tree. 773 * again, so put it to the protection queue.
861 */ 774 */
862 775
863 dbg_wl("canceled moving PEB %d", e1->pnum); 776 dbg_wl("canceled moving PEB %d", e1->pnum);
864 ubi_assert(err == 1); 777 ubi_assert(err == 1);
865 778
866 pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS);
867 if (!pe) {
868 err = -ENOMEM;
869 goto out_error;
870 }
871
872 ubi_free_vid_hdr(ubi, vid_hdr); 779 ubi_free_vid_hdr(ubi, vid_hdr);
873 vid_hdr = NULL; 780 vid_hdr = NULL;
874 781
875 spin_lock(&ubi->wl_lock); 782 spin_lock(&ubi->wl_lock);
876 prot_tree_add(ubi, e1, pe, U_PROTECTION); 783 prot_queue_add(ubi, e1);
877 ubi_assert(!ubi->move_to_put); 784 ubi_assert(!ubi->move_to_put);
878 ubi->move_from = ubi->move_to = NULL; 785 ubi->move_from = ubi->move_to = NULL;
879 ubi->wl_scheduled = 0; 786 ubi->wl_scheduled = 0;
@@ -1075,7 +982,6 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1075 kfree(wl_wrk); 982 kfree(wl_wrk);
1076 983
1077 spin_lock(&ubi->wl_lock); 984 spin_lock(&ubi->wl_lock);
1078 ubi->abs_ec += 1;
1079 wl_tree_add(e, &ubi->free); 985 wl_tree_add(e, &ubi->free);
1080 spin_unlock(&ubi->wl_lock); 986 spin_unlock(&ubi->wl_lock);
1081 987
@@ -1083,7 +989,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1083 * One more erase operation has happened, take care about 989 * One more erase operation has happened, take care about
1084 * protected physical eraseblocks. 990 * protected physical eraseblocks.
1085 */ 991 */
1086 check_protection_over(ubi); 992 serve_prot_queue(ubi);
1087 993
1088 /* And take care about wear-leveling */ 994 /* And take care about wear-leveling */
1089 err = ensure_wear_leveling(ubi); 995 err = ensure_wear_leveling(ubi);
@@ -1220,7 +1126,7 @@ retry:
1220 paranoid_check_in_wl_tree(e, &ubi->scrub); 1126 paranoid_check_in_wl_tree(e, &ubi->scrub);
1221 rb_erase(&e->u.rb, &ubi->scrub); 1127 rb_erase(&e->u.rb, &ubi->scrub);
1222 } else { 1128 } else {
1223 err = prot_tree_del(ubi, e->pnum); 1129 err = prot_queue_del(ubi, e->pnum);
1224 if (err) { 1130 if (err) {
1225 ubi_err("PEB %d not found", pnum); 1131 ubi_err("PEB %d not found", pnum);
1226 ubi_ro_mode(ubi); 1132 ubi_ro_mode(ubi);
@@ -1284,7 +1190,7 @@ retry:
1284 } else { 1190 } else {
1285 int err; 1191 int err;
1286 1192
1287 err = prot_tree_del(ubi, e->pnum); 1193 err = prot_queue_del(ubi, e->pnum);
1288 if (err) { 1194 if (err) {
1289 ubi_err("PEB %d not found", pnum); 1195 ubi_err("PEB %d not found", pnum);
1290 ubi_ro_mode(ubi); 1196 ubi_ro_mode(ubi);
@@ -1315,7 +1221,7 @@ int ubi_wl_flush(struct ubi_device *ubi)
1315 int err; 1221 int err;
1316 1222
1317 /* 1223 /*
1318 * Erase while the pending works queue is not empty, but not more then 1224 * Erase while the pending works queue is not empty, but not more than
1319 * the number of currently pending works. 1225 * the number of currently pending works.
1320 */ 1226 */
1321 dbg_wl("flush (%d pending works)", ubi->works_count); 1227 dbg_wl("flush (%d pending works)", ubi->works_count);
@@ -1461,15 +1367,13 @@ static void cancel_pending(struct ubi_device *ubi)
1461 */ 1367 */
1462int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) 1368int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1463{ 1369{
1464 int err; 1370 int err, i;
1465 struct rb_node *rb1, *rb2; 1371 struct rb_node *rb1, *rb2;
1466 struct ubi_scan_volume *sv; 1372 struct ubi_scan_volume *sv;
1467 struct ubi_scan_leb *seb, *tmp; 1373 struct ubi_scan_leb *seb, *tmp;
1468 struct ubi_wl_entry *e; 1374 struct ubi_wl_entry *e;
1469 1375
1470
1471 ubi->used = ubi->free = ubi->scrub = RB_ROOT; 1376 ubi->used = ubi->free = ubi->scrub = RB_ROOT;
1472 ubi->prot.pnum = ubi->prot.aec = RB_ROOT;
1473 spin_lock_init(&ubi->wl_lock); 1377 spin_lock_init(&ubi->wl_lock);
1474 mutex_init(&ubi->move_mutex); 1378 mutex_init(&ubi->move_mutex);
1475 init_rwsem(&ubi->work_sem); 1379 init_rwsem(&ubi->work_sem);
@@ -1483,6 +1387,10 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1483 if (!ubi->lookuptbl) 1387 if (!ubi->lookuptbl)
1484 return err; 1388 return err;
1485 1389
1390 for (i = 0; i < UBI_PROT_QUEUE_LEN; i++)
1391 INIT_LIST_HEAD(&ubi->pq[i]);
1392 ubi->pq_head = 0;
1393
1486 list_for_each_entry_safe(seb, tmp, &si->erase, u.list) { 1394 list_for_each_entry_safe(seb, tmp, &si->erase, u.list) {
1487 cond_resched(); 1395 cond_resched();
1488 1396
@@ -1577,33 +1485,18 @@ out_free:
1577} 1485}
1578 1486
1579/** 1487/**
1580 * protection_trees_destroy - destroy the protection RB-trees. 1488 * protection_queue_destroy - destroy the protection queue.
1581 * @ubi: UBI device description object 1489 * @ubi: UBI device description object
1582 */ 1490 */
1583static void protection_trees_destroy(struct ubi_device *ubi) 1491static void protection_queue_destroy(struct ubi_device *ubi)
1584{ 1492{
1585 struct rb_node *rb; 1493 int i;
1586 struct ubi_wl_prot_entry *pe; 1494 struct ubi_wl_entry *e, *tmp;
1587 1495
1588 rb = ubi->prot.aec.rb_node; 1496 for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) {
1589 while (rb) { 1497 list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
1590 if (rb->rb_left) 1498 list_del(&e->u.list);
1591 rb = rb->rb_left; 1499 kmem_cache_free(ubi_wl_entry_slab, e);
1592 else if (rb->rb_right)
1593 rb = rb->rb_right;
1594 else {
1595 pe = rb_entry(rb, struct ubi_wl_prot_entry, rb_aec);
1596
1597 rb = rb_parent(rb);
1598 if (rb) {
1599 if (rb->rb_left == &pe->rb_aec)
1600 rb->rb_left = NULL;
1601 else
1602 rb->rb_right = NULL;
1603 }
1604
1605 kmem_cache_free(ubi_wl_entry_slab, pe->e);
1606 kfree(pe);
1607 } 1500 }
1608 } 1501 }
1609} 1502}
@@ -1616,7 +1509,7 @@ void ubi_wl_close(struct ubi_device *ubi)
1616{ 1509{
1617 dbg_wl("close the WL sub-system"); 1510 dbg_wl("close the WL sub-system");
1618 cancel_pending(ubi); 1511 cancel_pending(ubi);
1619 protection_trees_destroy(ubi); 1512 protection_queue_destroy(ubi);
1620 tree_destroy(&ubi->used); 1513 tree_destroy(&ubi->used);
1621 tree_destroy(&ubi->free); 1514 tree_destroy(&ubi->free);
1622 tree_destroy(&ubi->scrub); 1515 tree_destroy(&ubi->scrub);
@@ -1686,4 +1579,27 @@ static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
1686 return 1; 1579 return 1;
1687} 1580}
1688 1581
1582/**
1583 * paranoid_check_in_pq - check if wear-leveling entry is in the protection
1584 * queue.
1585 * @ubi: UBI device description object
1586 * @e: the wear-leveling entry to check
1587 *
1588 * This function returns zero if @e is in @ubi->pq and %1 if it is not.
1589 */
1590static int paranoid_check_in_pq(struct ubi_device *ubi, struct ubi_wl_entry *e)
1591{
1592 struct ubi_wl_entry *p;
1593 int i;
1594
1595 for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
1596 list_for_each_entry(p, &ubi->pq[i], u.list)
1597 if (p == e)
1598 return 0;
1599
1600 ubi_err("paranoid check failed for PEB %d, EC %d, Protect queue",
1601 e->pnum, e->ec);
1602 ubi_dbg_dump_stack();
1603 return 1;
1604}
1689#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */ 1605#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */