diff options
author | Richard Weinberger <richard@nod.at> | 2012-09-26 11:51:48 -0400 |
---|---|---|
committer | Artem Bityutskiy <artem.bityutskiy@linux.intel.com> | 2012-10-03 05:29:38 -0400 |
commit | 8199b901a31b6e89b63842643f644fc05b403b20 (patch) | |
tree | 39970cc3c3cf43be0b1d28e558b736fad2592ad2 /drivers/mtd | |
parent | dac6e2087a4143cfc3fc1017bf24b9d4be3055b7 (diff) |
UBI: Add fastmap support to the WL sub-system
To make fastmap possible the WL sub-system needs some
changes.
Mostly to support fastmaps pools.
Signed-off-by: Richard Weinberger <richard@nod.at>
Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
Diffstat (limited to 'drivers/mtd')
-rw-r--r-- | drivers/mtd/ubi/wl.c | 575 |
1 files changed, 531 insertions, 44 deletions
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index f4d06dbdb2a9..da7b44998b40 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c | |||
@@ -141,6 +141,42 @@ static int self_check_in_wl_tree(const struct ubi_device *ubi, | |||
141 | static int self_check_in_pq(const struct ubi_device *ubi, | 141 | static int self_check_in_pq(const struct ubi_device *ubi, |
142 | struct ubi_wl_entry *e); | 142 | struct ubi_wl_entry *e); |
143 | 143 | ||
144 | #ifdef CONFIG_MTD_UBI_FASTMAP | ||
145 | /** | ||
146 | * update_fastmap_work_fn - calls ubi_update_fastmap from a work queue | ||
147 | * @wrk: the work description object | ||
148 | */ | ||
149 | static void update_fastmap_work_fn(struct work_struct *wrk) | ||
150 | { | ||
151 | struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work); | ||
152 | ubi_update_fastmap(ubi); | ||
153 | } | ||
154 | |||
155 | /** | ||
156 | * ubi_ubi_is_fm_block - returns 1 if a PEB is currently used in a fastmap. | ||
157 | * @ubi: UBI device description object | ||
158 | * @pnum: the to be checked PEB | ||
159 | */ | ||
160 | static int ubi_is_fm_block(struct ubi_device *ubi, int pnum) | ||
161 | { | ||
162 | int i; | ||
163 | |||
164 | if (!ubi->fm) | ||
165 | return 0; | ||
166 | |||
167 | for (i = 0; i < ubi->fm->used_blocks; i++) | ||
168 | if (ubi->fm->e[i]->pnum == pnum) | ||
169 | return 1; | ||
170 | |||
171 | return 0; | ||
172 | } | ||
173 | #else | ||
174 | static int ubi_is_fm_block(struct ubi_device *ubi, int pnum) | ||
175 | { | ||
176 | return 0; | ||
177 | } | ||
178 | #endif | ||
179 | |||
144 | /** | 180 | /** |
145 | * wl_tree_add - add a wear-leveling entry to a WL RB-tree. | 181 | * wl_tree_add - add a wear-leveling entry to a WL RB-tree. |
146 | * @e: the wear-leveling entry to add | 182 | * @e: the wear-leveling entry to add |
@@ -237,18 +273,16 @@ static int produce_free_peb(struct ubi_device *ubi) | |||
237 | { | 273 | { |
238 | int err; | 274 | int err; |
239 | 275 | ||
240 | spin_lock(&ubi->wl_lock); | ||
241 | while (!ubi->free.rb_node) { | 276 | while (!ubi->free.rb_node) { |
242 | spin_unlock(&ubi->wl_lock); | 277 | spin_unlock(&ubi->wl_lock); |
243 | 278 | ||
244 | dbg_wl("do one work synchronously"); | 279 | dbg_wl("do one work synchronously"); |
245 | err = do_work(ubi); | 280 | err = do_work(ubi); |
246 | if (err) | ||
247 | return err; | ||
248 | 281 | ||
249 | spin_lock(&ubi->wl_lock); | 282 | spin_lock(&ubi->wl_lock); |
283 | if (err) | ||
284 | return err; | ||
250 | } | 285 | } |
251 | spin_unlock(&ubi->wl_lock); | ||
252 | 286 | ||
253 | return 0; | 287 | return 0; |
254 | } | 288 | } |
@@ -315,16 +349,18 @@ static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e) | |||
315 | 349 | ||
316 | /** | 350 | /** |
317 | * find_wl_entry - find wear-leveling entry closest to certain erase counter. | 351 | * find_wl_entry - find wear-leveling entry closest to certain erase counter. |
352 | * @ubi: UBI device description object | ||
318 | * @root: the RB-tree where to look for | 353 | * @root: the RB-tree where to look for |
319 | * @diff: maximum possible difference from the smallest erase counter | 354 | * @diff: maximum possible difference from the smallest erase counter |
320 | * | 355 | * |
321 | * This function looks for a wear leveling entry with erase counter closest to | 356 | * This function looks for a wear leveling entry with erase counter closest to |
322 | * min + @diff, where min is the smallest erase counter. | 357 | * min + @diff, where min is the smallest erase counter. |
323 | */ | 358 | */ |
324 | static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int diff) | 359 | static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi, |
360 | struct rb_root *root, int diff) | ||
325 | { | 361 | { |
326 | struct rb_node *p; | 362 | struct rb_node *p; |
327 | struct ubi_wl_entry *e; | 363 | struct ubi_wl_entry *e, *prev_e = NULL; |
328 | int max; | 364 | int max; |
329 | 365 | ||
330 | e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb); | 366 | e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb); |
@@ -339,35 +375,143 @@ static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int diff) | |||
339 | p = p->rb_left; | 375 | p = p->rb_left; |
340 | else { | 376 | else { |
341 | p = p->rb_right; | 377 | p = p->rb_right; |
378 | prev_e = e; | ||
342 | e = e1; | 379 | e = e1; |
343 | } | 380 | } |
344 | } | 381 | } |
345 | 382 | ||
383 | /* If no fastmap has been written and this WL entry can be used | ||
384 | * as anchor PEB, hold it back and return the second best WL entry | ||
385 | * such that fastmap can use the anchor PEB later. */ | ||
386 | if (prev_e && !ubi->fm_disabled && | ||
387 | !ubi->fm && e->pnum < UBI_FM_MAX_START) | ||
388 | return prev_e; | ||
389 | |||
346 | return e; | 390 | return e; |
347 | } | 391 | } |
348 | 392 | ||
349 | /** | 393 | /** |
350 | * ubi_wl_get_peb - get a physical eraseblock. | 394 | * find_mean_wl_entry - find wear-leveling entry with medium erase counter. |
395 | * @ubi: UBI device description object | ||
396 | * @root: the RB-tree where to look for | ||
397 | * | ||
398 | * This function looks for a wear leveling entry with medium erase counter, | ||
399 | * but not greater or equivalent than the lowest erase counter plus | ||
400 | * %WL_FREE_MAX_DIFF/2. | ||
401 | */ | ||
402 | static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi, | ||
403 | struct rb_root *root) | ||
404 | { | ||
405 | struct ubi_wl_entry *e, *first, *last; | ||
406 | |||
407 | first = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb); | ||
408 | last = rb_entry(rb_last(root), struct ubi_wl_entry, u.rb); | ||
409 | |||
410 | if (last->ec - first->ec < WL_FREE_MAX_DIFF) { | ||
411 | e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb); | ||
412 | |||
413 | #ifdef CONFIG_MTD_UBI_FASTMAP | ||
414 | /* If no fastmap has been written and this WL entry can be used | ||
415 | * as anchor PEB, hold it back and return the second best | ||
416 | * WL entry such that fastmap can use the anchor PEB later. */ | ||
417 | if (e && !ubi->fm_disabled && !ubi->fm && | ||
418 | e->pnum < UBI_FM_MAX_START) | ||
419 | e = rb_entry(rb_next(root->rb_node), | ||
420 | struct ubi_wl_entry, u.rb); | ||
421 | #endif | ||
422 | } else | ||
423 | e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2); | ||
424 | |||
425 | return e; | ||
426 | } | ||
427 | |||
428 | #ifdef CONFIG_MTD_UBI_FASTMAP | ||
429 | /** | ||
430 | * find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB. | ||
431 | * @root: the RB-tree where to look for | ||
432 | */ | ||
433 | static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root) | ||
434 | { | ||
435 | struct rb_node *p; | ||
436 | struct ubi_wl_entry *e, *victim = NULL; | ||
437 | int max_ec = UBI_MAX_ERASECOUNTER; | ||
438 | |||
439 | ubi_rb_for_each_entry(p, e, root, u.rb) { | ||
440 | if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) { | ||
441 | victim = e; | ||
442 | max_ec = e->ec; | ||
443 | } | ||
444 | } | ||
445 | |||
446 | return victim; | ||
447 | } | ||
448 | |||
449 | static int anchor_pebs_avalible(struct rb_root *root) | ||
450 | { | ||
451 | struct rb_node *p; | ||
452 | struct ubi_wl_entry *e; | ||
453 | |||
454 | ubi_rb_for_each_entry(p, e, root, u.rb) | ||
455 | if (e->pnum < UBI_FM_MAX_START) | ||
456 | return 1; | ||
457 | |||
458 | return 0; | ||
459 | } | ||
460 | |||
461 | /** | ||
462 | * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number. | ||
463 | * @ubi: UBI device description object | ||
464 | * @anchor: This PEB will be used as anchor PEB by fastmap | ||
465 | * | ||
466 | * The function returns a physical erase block with a given maximal number | ||
467 | * and removes it from the wl subsystem. | ||
468 | * Must be called with wl_lock held! | ||
469 | */ | ||
470 | struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor) | ||
471 | { | ||
472 | struct ubi_wl_entry *e = NULL; | ||
473 | |||
474 | if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1)) | ||
475 | goto out; | ||
476 | |||
477 | if (anchor) | ||
478 | e = find_anchor_wl_entry(&ubi->free); | ||
479 | else | ||
480 | e = find_mean_wl_entry(ubi, &ubi->free); | ||
481 | |||
482 | if (!e) | ||
483 | goto out; | ||
484 | |||
485 | self_check_in_wl_tree(ubi, e, &ubi->free); | ||
486 | |||
487 | /* remove it from the free list, | ||
488 | * the wl subsystem does no longer know this erase block */ | ||
489 | rb_erase(&e->u.rb, &ubi->free); | ||
490 | ubi->free_count--; | ||
491 | out: | ||
492 | return e; | ||
493 | } | ||
494 | #endif | ||
495 | |||
496 | /** | ||
497 | * __wl_get_peb - get a physical eraseblock. | ||
351 | * @ubi: UBI device description object | 498 | * @ubi: UBI device description object |
352 | * | 499 | * |
353 | * This function returns a physical eraseblock in case of success and a | 500 | * This function returns a physical eraseblock in case of success and a |
354 | * negative error code in case of failure. Might sleep. | 501 | * negative error code in case of failure. Might sleep. |
355 | */ | 502 | */ |
356 | int ubi_wl_get_peb(struct ubi_device *ubi) | 503 | static int __wl_get_peb(struct ubi_device *ubi) |
357 | { | 504 | { |
358 | int err; | 505 | int err; |
359 | struct ubi_wl_entry *e, *first, *last; | 506 | struct ubi_wl_entry *e; |
360 | 507 | ||
361 | retry: | 508 | retry: |
362 | spin_lock(&ubi->wl_lock); | ||
363 | if (!ubi->free.rb_node) { | 509 | if (!ubi->free.rb_node) { |
364 | if (ubi->works_count == 0) { | 510 | if (ubi->works_count == 0) { |
365 | ubi_assert(list_empty(&ubi->works)); | ||
366 | ubi_err("no free eraseblocks"); | 511 | ubi_err("no free eraseblocks"); |
367 | spin_unlock(&ubi->wl_lock); | 512 | ubi_assert(list_empty(&ubi->works)); |
368 | return -ENOSPC; | 513 | return -ENOSPC; |
369 | } | 514 | } |
370 | spin_unlock(&ubi->wl_lock); | ||
371 | 515 | ||
372 | err = produce_free_peb(ubi); | 516 | err = produce_free_peb(ubi); |
373 | if (err < 0) | 517 | if (err < 0) |
@@ -375,13 +519,11 @@ retry: | |||
375 | goto retry; | 519 | goto retry; |
376 | } | 520 | } |
377 | 521 | ||
378 | first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, u.rb); | 522 | e = find_mean_wl_entry(ubi, &ubi->free); |
379 | last = rb_entry(rb_last(&ubi->free), struct ubi_wl_entry, u.rb); | 523 | if (!e) { |
380 | 524 | ubi_err("no free eraseblocks"); | |
381 | if (last->ec - first->ec < WL_FREE_MAX_DIFF) | 525 | return -ENOSPC; |
382 | e = rb_entry(ubi->free.rb_node, struct ubi_wl_entry, u.rb); | 526 | } |
383 | else | ||
384 | e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF/2); | ||
385 | 527 | ||
386 | self_check_in_wl_tree(ubi, e, &ubi->free); | 528 | self_check_in_wl_tree(ubi, e, &ubi->free); |
387 | 529 | ||
@@ -390,10 +532,14 @@ retry: | |||
390 | * be protected from being moved for some time. | 532 | * be protected from being moved for some time. |
391 | */ | 533 | */ |
392 | rb_erase(&e->u.rb, &ubi->free); | 534 | rb_erase(&e->u.rb, &ubi->free); |
535 | ubi->free_count--; | ||
393 | dbg_wl("PEB %d EC %d", e->pnum, e->ec); | 536 | dbg_wl("PEB %d EC %d", e->pnum, e->ec); |
537 | #ifndef CONFIG_MTD_UBI_FASTMAP | ||
538 | /* We have to enqueue e only if fastmap is disabled, | ||
539 | * is fastmap enabled prot_queue_add() will be called by | ||
540 | * ubi_wl_get_peb() after removing e from the pool. */ | ||
394 | prot_queue_add(ubi, e); | 541 | prot_queue_add(ubi, e); |
395 | spin_unlock(&ubi->wl_lock); | 542 | #endif |
396 | |||
397 | err = ubi_self_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset, | 543 | err = ubi_self_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset, |
398 | ubi->peb_size - ubi->vid_hdr_aloffset); | 544 | ubi->peb_size - ubi->vid_hdr_aloffset); |
399 | if (err) { | 545 | if (err) { |
@@ -404,6 +550,150 @@ retry: | |||
404 | return e->pnum; | 550 | return e->pnum; |
405 | } | 551 | } |
406 | 552 | ||
553 | #ifdef CONFIG_MTD_UBI_FASTMAP | ||
554 | /** | ||
555 | * return_unused_pool_pebs - returns unused PEB to the free tree. | ||
556 | * @ubi: UBI device description object | ||
557 | * @pool: fastmap pool description object | ||
558 | */ | ||
559 | static void return_unused_pool_pebs(struct ubi_device *ubi, | ||
560 | struct ubi_fm_pool *pool) | ||
561 | { | ||
562 | int i; | ||
563 | struct ubi_wl_entry *e; | ||
564 | |||
565 | for (i = pool->used; i < pool->size; i++) { | ||
566 | e = ubi->lookuptbl[pool->pebs[i]]; | ||
567 | wl_tree_add(e, &ubi->free); | ||
568 | ubi->free_count++; | ||
569 | } | ||
570 | } | ||
571 | |||
572 | /** | ||
573 | * refill_wl_pool - refills all the fastmap pool used by the | ||
574 | * WL sub-system. | ||
575 | * @ubi: UBI device description object | ||
576 | */ | ||
577 | static void refill_wl_pool(struct ubi_device *ubi) | ||
578 | { | ||
579 | struct ubi_wl_entry *e; | ||
580 | struct ubi_fm_pool *pool = &ubi->fm_wl_pool; | ||
581 | |||
582 | return_unused_pool_pebs(ubi, pool); | ||
583 | |||
584 | for (pool->size = 0; pool->size < pool->max_size; pool->size++) { | ||
585 | if (!ubi->free.rb_node || | ||
586 | (ubi->free_count - ubi->beb_rsvd_pebs < 5)) | ||
587 | break; | ||
588 | |||
589 | e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF); | ||
590 | self_check_in_wl_tree(ubi, e, &ubi->free); | ||
591 | rb_erase(&e->u.rb, &ubi->free); | ||
592 | ubi->free_count--; | ||
593 | |||
594 | pool->pebs[pool->size] = e->pnum; | ||
595 | } | ||
596 | pool->used = 0; | ||
597 | } | ||
598 | |||
599 | /** | ||
600 | * refill_wl_user_pool - refills all the fastmap pool used by ubi_wl_get_peb. | ||
601 | * @ubi: UBI device description object | ||
602 | */ | ||
603 | static void refill_wl_user_pool(struct ubi_device *ubi) | ||
604 | { | ||
605 | struct ubi_fm_pool *pool = &ubi->fm_pool; | ||
606 | |||
607 | return_unused_pool_pebs(ubi, pool); | ||
608 | |||
609 | for (pool->size = 0; pool->size < pool->max_size; pool->size++) { | ||
610 | if (!ubi->free.rb_node || | ||
611 | (ubi->free_count - ubi->beb_rsvd_pebs < 1)) | ||
612 | break; | ||
613 | |||
614 | pool->pebs[pool->size] = __wl_get_peb(ubi); | ||
615 | if (pool->pebs[pool->size] < 0) | ||
616 | break; | ||
617 | } | ||
618 | pool->used = 0; | ||
619 | } | ||
620 | |||
621 | /** | ||
622 | * ubi_refill_pools - refills all fastmap PEB pools. | ||
623 | * @ubi: UBI device description object | ||
624 | */ | ||
625 | void ubi_refill_pools(struct ubi_device *ubi) | ||
626 | { | ||
627 | spin_lock(&ubi->wl_lock); | ||
628 | refill_wl_pool(ubi); | ||
629 | refill_wl_user_pool(ubi); | ||
630 | spin_unlock(&ubi->wl_lock); | ||
631 | } | ||
632 | |||
633 | /* ubi_wl_get_peb - works exaclty like __wl_get_peb but keeps track of | ||
634 | * the fastmap pool. | ||
635 | */ | ||
636 | int ubi_wl_get_peb(struct ubi_device *ubi) | ||
637 | { | ||
638 | int ret; | ||
639 | struct ubi_fm_pool *pool = &ubi->fm_pool; | ||
640 | struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool; | ||
641 | |||
642 | if (!pool->size || !wl_pool->size || pool->used == pool->size || | ||
643 | wl_pool->used == wl_pool->size) | ||
644 | ubi_update_fastmap(ubi); | ||
645 | |||
646 | /* we got not a single free PEB */ | ||
647 | if (!pool->size) | ||
648 | ret = -ENOSPC; | ||
649 | else { | ||
650 | spin_lock(&ubi->wl_lock); | ||
651 | ret = pool->pebs[pool->used++]; | ||
652 | prot_queue_add(ubi, ubi->lookuptbl[ret]); | ||
653 | spin_unlock(&ubi->wl_lock); | ||
654 | } | ||
655 | |||
656 | return ret; | ||
657 | } | ||
658 | |||
659 | /* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system. | ||
660 | * | ||
661 | * @ubi: UBI device description object | ||
662 | */ | ||
663 | static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi) | ||
664 | { | ||
665 | struct ubi_fm_pool *pool = &ubi->fm_wl_pool; | ||
666 | int pnum; | ||
667 | |||
668 | if (pool->used == pool->size || !pool->size) { | ||
669 | /* We cannot update the fastmap here because this | ||
670 | * function is called in atomic context. | ||
671 | * Let's fail here and refill/update it as soon as possible. */ | ||
672 | schedule_work(&ubi->fm_work); | ||
673 | return NULL; | ||
674 | } else { | ||
675 | pnum = pool->pebs[pool->used++]; | ||
676 | return ubi->lookuptbl[pnum]; | ||
677 | } | ||
678 | } | ||
679 | #else | ||
680 | static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi) | ||
681 | { | ||
682 | return find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF); | ||
683 | } | ||
684 | |||
685 | int ubi_wl_get_peb(struct ubi_device *ubi) | ||
686 | { | ||
687 | int peb; | ||
688 | |||
689 | spin_lock(&ubi->wl_lock); | ||
690 | peb = __wl_get_peb(ubi); | ||
691 | spin_unlock(&ubi->wl_lock); | ||
692 | |||
693 | return peb; | ||
694 | } | ||
695 | #endif | ||
696 | |||
407 | /** | 697 | /** |
408 | * prot_queue_del - remove a physical eraseblock from the protection queue. | 698 | * prot_queue_del - remove a physical eraseblock from the protection queue. |
409 | * @ubi: UBI device description object | 699 | * @ubi: UBI device description object |
@@ -534,14 +824,14 @@ repeat: | |||
534 | } | 824 | } |
535 | 825 | ||
536 | /** | 826 | /** |
537 | * schedule_ubi_work - schedule a work. | 827 | * __schedule_ubi_work - schedule a work. |
538 | * @ubi: UBI device description object | 828 | * @ubi: UBI device description object |
539 | * @wrk: the work to schedule | 829 | * @wrk: the work to schedule |
540 | * | 830 | * |
541 | * This function adds a work defined by @wrk to the tail of the pending works | 831 | * This function adds a work defined by @wrk to the tail of the pending works |
542 | * list. | 832 | * list. Can only be used of ubi->work_sem is already held in read mode! |
543 | */ | 833 | */ |
544 | static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk) | 834 | static void __schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk) |
545 | { | 835 | { |
546 | spin_lock(&ubi->wl_lock); | 836 | spin_lock(&ubi->wl_lock); |
547 | list_add_tail(&wrk->list, &ubi->works); | 837 | list_add_tail(&wrk->list, &ubi->works); |
@@ -552,9 +842,35 @@ static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk) | |||
552 | spin_unlock(&ubi->wl_lock); | 842 | spin_unlock(&ubi->wl_lock); |
553 | } | 843 | } |
554 | 844 | ||
845 | /** | ||
846 | * schedule_ubi_work - schedule a work. | ||
847 | * @ubi: UBI device description object | ||
848 | * @wrk: the work to schedule | ||
849 | * | ||
850 | * This function adds a work defined by @wrk to the tail of the pending works | ||
851 | * list. | ||
852 | */ | ||
853 | static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk) | ||
854 | { | ||
855 | down_read(&ubi->work_sem); | ||
856 | __schedule_ubi_work(ubi, wrk); | ||
857 | up_read(&ubi->work_sem); | ||
858 | } | ||
859 | |||
555 | static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, | 860 | static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, |
556 | int cancel); | 861 | int cancel); |
557 | 862 | ||
863 | #ifdef CONFIG_MTD_UBI_FASTMAP | ||
864 | /** | ||
865 | * ubi_is_erase_work - checks whether a work is erase work. | ||
866 | * @wrk: The work object to be checked | ||
867 | */ | ||
868 | int ubi_is_erase_work(struct ubi_work *wrk) | ||
869 | { | ||
870 | return wrk->func == erase_worker; | ||
871 | } | ||
872 | #endif | ||
873 | |||
558 | /** | 874 | /** |
559 | * schedule_erase - schedule an erase work. | 875 | * schedule_erase - schedule an erase work. |
560 | * @ubi: UBI device description object | 876 | * @ubi: UBI device description object |
@@ -571,6 +887,9 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, | |||
571 | { | 887 | { |
572 | struct ubi_work *wl_wrk; | 888 | struct ubi_work *wl_wrk; |
573 | 889 | ||
890 | ubi_assert(e); | ||
891 | ubi_assert(!ubi_is_fm_block(ubi, e->pnum)); | ||
892 | |||
574 | dbg_wl("schedule erasure of PEB %d, EC %d, torture %d", | 893 | dbg_wl("schedule erasure of PEB %d, EC %d, torture %d", |
575 | e->pnum, e->ec, torture); | 894 | e->pnum, e->ec, torture); |
576 | 895 | ||
@@ -589,6 +908,79 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, | |||
589 | } | 908 | } |
590 | 909 | ||
591 | /** | 910 | /** |
911 | * do_sync_erase - run the erase worker synchronously. | ||
912 | * @ubi: UBI device description object | ||
913 | * @e: the WL entry of the physical eraseblock to erase | ||
914 | * @vol_id: the volume ID that last used this PEB | ||
915 | * @lnum: the last used logical eraseblock number for the PEB | ||
916 | * @torture: if the physical eraseblock has to be tortured | ||
917 | * | ||
918 | */ | ||
919 | static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, | ||
920 | int vol_id, int lnum, int torture) | ||
921 | { | ||
922 | struct ubi_work *wl_wrk; | ||
923 | |||
924 | dbg_wl("sync erase of PEB %i", e->pnum); | ||
925 | |||
926 | wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS); | ||
927 | if (!wl_wrk) | ||
928 | return -ENOMEM; | ||
929 | |||
930 | wl_wrk->e = e; | ||
931 | wl_wrk->vol_id = vol_id; | ||
932 | wl_wrk->lnum = lnum; | ||
933 | wl_wrk->torture = torture; | ||
934 | |||
935 | return erase_worker(ubi, wl_wrk, 0); | ||
936 | } | ||
937 | |||
938 | #ifdef CONFIG_MTD_UBI_FASTMAP | ||
939 | /** | ||
940 | * ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling | ||
941 | * sub-system. | ||
942 | * see: ubi_wl_put_peb() | ||
943 | * | ||
944 | * @ubi: UBI device description object | ||
945 | * @fm_e: physical eraseblock to return | ||
946 | * @lnum: the last used logical eraseblock number for the PEB | ||
947 | * @torture: if this physical eraseblock has to be tortured | ||
948 | */ | ||
949 | int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e, | ||
950 | int lnum, int torture) | ||
951 | { | ||
952 | struct ubi_wl_entry *e; | ||
953 | int vol_id, pnum = fm_e->pnum; | ||
954 | |||
955 | dbg_wl("PEB %d", pnum); | ||
956 | |||
957 | ubi_assert(pnum >= 0); | ||
958 | ubi_assert(pnum < ubi->peb_count); | ||
959 | |||
960 | spin_lock(&ubi->wl_lock); | ||
961 | e = ubi->lookuptbl[pnum]; | ||
962 | |||
963 | /* This can happen if we recovered from a fastmap the very | ||
964 | * first time and writing now a new one. In this case the wl system | ||
965 | * has never seen any PEB used by the original fastmap. | ||
966 | */ | ||
967 | if (!e) { | ||
968 | e = fm_e; | ||
969 | ubi_assert(e->ec >= 0); | ||
970 | ubi->lookuptbl[pnum] = e; | ||
971 | } else { | ||
972 | e->ec = fm_e->ec; | ||
973 | kfree(fm_e); | ||
974 | } | ||
975 | |||
976 | spin_unlock(&ubi->wl_lock); | ||
977 | |||
978 | vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID; | ||
979 | return schedule_erase(ubi, e, vol_id, lnum, torture); | ||
980 | } | ||
981 | #endif | ||
982 | |||
983 | /** | ||
592 | * wear_leveling_worker - wear-leveling worker function. | 984 | * wear_leveling_worker - wear-leveling worker function. |
593 | * @ubi: UBI device description object | 985 | * @ubi: UBI device description object |
594 | * @wrk: the work object | 986 | * @wrk: the work object |
@@ -603,6 +995,9 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, | |||
603 | { | 995 | { |
604 | int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0; | 996 | int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0; |
605 | int vol_id = -1, uninitialized_var(lnum); | 997 | int vol_id = -1, uninitialized_var(lnum); |
998 | #ifdef CONFIG_MTD_UBI_FASTMAP | ||
999 | int anchor = wrk->anchor; | ||
1000 | #endif | ||
606 | struct ubi_wl_entry *e1, *e2; | 1001 | struct ubi_wl_entry *e1, *e2; |
607 | struct ubi_vid_hdr *vid_hdr; | 1002 | struct ubi_vid_hdr *vid_hdr; |
608 | 1003 | ||
@@ -636,14 +1031,35 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, | |||
636 | goto out_cancel; | 1031 | goto out_cancel; |
637 | } | 1032 | } |
638 | 1033 | ||
1034 | #ifdef CONFIG_MTD_UBI_FASTMAP | ||
1035 | /* Check whether we need to produce an anchor PEB */ | ||
1036 | if (!anchor) | ||
1037 | anchor = !anchor_pebs_avalible(&ubi->free); | ||
1038 | |||
1039 | if (anchor) { | ||
1040 | e1 = find_anchor_wl_entry(&ubi->used); | ||
1041 | if (!e1) | ||
1042 | goto out_cancel; | ||
1043 | e2 = get_peb_for_wl(ubi); | ||
1044 | if (!e2) | ||
1045 | goto out_cancel; | ||
1046 | |||
1047 | self_check_in_wl_tree(ubi, e1, &ubi->used); | ||
1048 | rb_erase(&e1->u.rb, &ubi->used); | ||
1049 | dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum); | ||
1050 | } else if (!ubi->scrub.rb_node) { | ||
1051 | #else | ||
639 | if (!ubi->scrub.rb_node) { | 1052 | if (!ubi->scrub.rb_node) { |
1053 | #endif | ||
640 | /* | 1054 | /* |
641 | * Now pick the least worn-out used physical eraseblock and a | 1055 | * Now pick the least worn-out used physical eraseblock and a |
642 | * highly worn-out free physical eraseblock. If the erase | 1056 | * highly worn-out free physical eraseblock. If the erase |
643 | * counters differ much enough, start wear-leveling. | 1057 | * counters differ much enough, start wear-leveling. |
644 | */ | 1058 | */ |
645 | e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb); | 1059 | e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb); |
646 | e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); | 1060 | e2 = get_peb_for_wl(ubi); |
1061 | if (!e2) | ||
1062 | goto out_cancel; | ||
647 | 1063 | ||
648 | if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) { | 1064 | if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) { |
649 | dbg_wl("no WL needed: min used EC %d, max free EC %d", | 1065 | dbg_wl("no WL needed: min used EC %d, max free EC %d", |
@@ -658,14 +1074,15 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, | |||
658 | /* Perform scrubbing */ | 1074 | /* Perform scrubbing */ |
659 | scrubbing = 1; | 1075 | scrubbing = 1; |
660 | e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb); | 1076 | e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb); |
661 | e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); | 1077 | e2 = get_peb_for_wl(ubi); |
1078 | if (!e2) | ||
1079 | goto out_cancel; | ||
1080 | |||
662 | self_check_in_wl_tree(ubi, e1, &ubi->scrub); | 1081 | self_check_in_wl_tree(ubi, e1, &ubi->scrub); |
663 | rb_erase(&e1->u.rb, &ubi->scrub); | 1082 | rb_erase(&e1->u.rb, &ubi->scrub); |
664 | dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum); | 1083 | dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum); |
665 | } | 1084 | } |
666 | 1085 | ||
667 | self_check_in_wl_tree(ubi, e2, &ubi->free); | ||
668 | rb_erase(&e2->u.rb, &ubi->free); | ||
669 | ubi->move_from = e1; | 1086 | ubi->move_from = e1; |
670 | ubi->move_to = e2; | 1087 | ubi->move_to = e2; |
671 | spin_unlock(&ubi->wl_lock); | 1088 | spin_unlock(&ubi->wl_lock); |
@@ -782,7 +1199,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, | |||
782 | ubi->move_to_put = ubi->wl_scheduled = 0; | 1199 | ubi->move_to_put = ubi->wl_scheduled = 0; |
783 | spin_unlock(&ubi->wl_lock); | 1200 | spin_unlock(&ubi->wl_lock); |
784 | 1201 | ||
785 | err = schedule_erase(ubi, e1, vol_id, lnum, 0); | 1202 | err = do_sync_erase(ubi, e1, vol_id, lnum, 0); |
786 | if (err) { | 1203 | if (err) { |
787 | kmem_cache_free(ubi_wl_entry_slab, e1); | 1204 | kmem_cache_free(ubi_wl_entry_slab, e1); |
788 | if (e2) | 1205 | if (e2) |
@@ -797,7 +1214,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, | |||
797 | */ | 1214 | */ |
798 | dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase", | 1215 | dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase", |
799 | e2->pnum, vol_id, lnum); | 1216 | e2->pnum, vol_id, lnum); |
800 | err = schedule_erase(ubi, e2, vol_id, lnum, 0); | 1217 | err = do_sync_erase(ubi, e2, vol_id, lnum, 0); |
801 | if (err) { | 1218 | if (err) { |
802 | kmem_cache_free(ubi_wl_entry_slab, e2); | 1219 | kmem_cache_free(ubi_wl_entry_slab, e2); |
803 | goto out_ro; | 1220 | goto out_ro; |
@@ -836,7 +1253,7 @@ out_not_moved: | |||
836 | spin_unlock(&ubi->wl_lock); | 1253 | spin_unlock(&ubi->wl_lock); |
837 | 1254 | ||
838 | ubi_free_vid_hdr(ubi, vid_hdr); | 1255 | ubi_free_vid_hdr(ubi, vid_hdr); |
839 | err = schedule_erase(ubi, e2, vol_id, lnum, torture); | 1256 | err = do_sync_erase(ubi, e2, vol_id, lnum, torture); |
840 | if (err) { | 1257 | if (err) { |
841 | kmem_cache_free(ubi_wl_entry_slab, e2); | 1258 | kmem_cache_free(ubi_wl_entry_slab, e2); |
842 | goto out_ro; | 1259 | goto out_ro; |
@@ -877,12 +1294,13 @@ out_cancel: | |||
877 | /** | 1294 | /** |
878 | * ensure_wear_leveling - schedule wear-leveling if it is needed. | 1295 | * ensure_wear_leveling - schedule wear-leveling if it is needed. |
879 | * @ubi: UBI device description object | 1296 | * @ubi: UBI device description object |
1297 | * @nested: set to non-zero if this function is called from UBI worker | ||
880 | * | 1298 | * |
881 | * This function checks if it is time to start wear-leveling and schedules it | 1299 | * This function checks if it is time to start wear-leveling and schedules it |
882 | * if yes. This function returns zero in case of success and a negative error | 1300 | * if yes. This function returns zero in case of success and a negative error |
883 | * code in case of failure. | 1301 | * code in case of failure. |
884 | */ | 1302 | */ |
885 | static int ensure_wear_leveling(struct ubi_device *ubi) | 1303 | static int ensure_wear_leveling(struct ubi_device *ubi, int nested) |
886 | { | 1304 | { |
887 | int err = 0; | 1305 | int err = 0; |
888 | struct ubi_wl_entry *e1; | 1306 | struct ubi_wl_entry *e1; |
@@ -910,7 +1328,7 @@ static int ensure_wear_leveling(struct ubi_device *ubi) | |||
910 | * %UBI_WL_THRESHOLD. | 1328 | * %UBI_WL_THRESHOLD. |
911 | */ | 1329 | */ |
912 | e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb); | 1330 | e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb); |
913 | e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); | 1331 | e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF); |
914 | 1332 | ||
915 | if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) | 1333 | if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) |
916 | goto out_unlock; | 1334 | goto out_unlock; |
@@ -927,8 +1345,12 @@ static int ensure_wear_leveling(struct ubi_device *ubi) | |||
927 | goto out_cancel; | 1345 | goto out_cancel; |
928 | } | 1346 | } |
929 | 1347 | ||
1348 | wrk->anchor = 0; | ||
930 | wrk->func = &wear_leveling_worker; | 1349 | wrk->func = &wear_leveling_worker; |
931 | schedule_ubi_work(ubi, wrk); | 1350 | if (nested) |
1351 | __schedule_ubi_work(ubi, wrk); | ||
1352 | else | ||
1353 | schedule_ubi_work(ubi, wrk); | ||
932 | return err; | 1354 | return err; |
933 | 1355 | ||
934 | out_cancel: | 1356 | out_cancel: |
@@ -939,6 +1361,38 @@ out_unlock: | |||
939 | return err; | 1361 | return err; |
940 | } | 1362 | } |
941 | 1363 | ||
1364 | #ifdef CONFIG_MTD_UBI_FASTMAP | ||
1365 | /** | ||
1366 | * ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB. | ||
1367 | * @ubi: UBI device description object | ||
1368 | */ | ||
1369 | int ubi_ensure_anchor_pebs(struct ubi_device *ubi) | ||
1370 | { | ||
1371 | struct ubi_work *wrk; | ||
1372 | |||
1373 | spin_lock(&ubi->wl_lock); | ||
1374 | if (ubi->wl_scheduled) { | ||
1375 | spin_unlock(&ubi->wl_lock); | ||
1376 | return 0; | ||
1377 | } | ||
1378 | ubi->wl_scheduled = 1; | ||
1379 | spin_unlock(&ubi->wl_lock); | ||
1380 | |||
1381 | wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS); | ||
1382 | if (!wrk) { | ||
1383 | spin_lock(&ubi->wl_lock); | ||
1384 | ubi->wl_scheduled = 0; | ||
1385 | spin_unlock(&ubi->wl_lock); | ||
1386 | return -ENOMEM; | ||
1387 | } | ||
1388 | |||
1389 | wrk->anchor = 1; | ||
1390 | wrk->func = &wear_leveling_worker; | ||
1391 | schedule_ubi_work(ubi, wrk); | ||
1392 | return 0; | ||
1393 | } | ||
1394 | #endif | ||
1395 | |||
942 | /** | 1396 | /** |
943 | * erase_worker - physical eraseblock erase worker function. | 1397 | * erase_worker - physical eraseblock erase worker function. |
944 | * @ubi: UBI device description object | 1398 | * @ubi: UBI device description object |
@@ -969,6 +1423,8 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, | |||
969 | dbg_wl("erase PEB %d EC %d LEB %d:%d", | 1423 | dbg_wl("erase PEB %d EC %d LEB %d:%d", |
970 | pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum); | 1424 | pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum); |
971 | 1425 | ||
1426 | ubi_assert(!ubi_is_fm_block(ubi, e->pnum)); | ||
1427 | |||
972 | err = sync_erase(ubi, e, wl_wrk->torture); | 1428 | err = sync_erase(ubi, e, wl_wrk->torture); |
973 | if (!err) { | 1429 | if (!err) { |
974 | /* Fine, we've erased it successfully */ | 1430 | /* Fine, we've erased it successfully */ |
@@ -976,6 +1432,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, | |||
976 | 1432 | ||
977 | spin_lock(&ubi->wl_lock); | 1433 | spin_lock(&ubi->wl_lock); |
978 | wl_tree_add(e, &ubi->free); | 1434 | wl_tree_add(e, &ubi->free); |
1435 | ubi->free_count++; | ||
979 | spin_unlock(&ubi->wl_lock); | 1436 | spin_unlock(&ubi->wl_lock); |
980 | 1437 | ||
981 | /* | 1438 | /* |
@@ -985,7 +1442,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, | |||
985 | serve_prot_queue(ubi); | 1442 | serve_prot_queue(ubi); |
986 | 1443 | ||
987 | /* And take care about wear-leveling */ | 1444 | /* And take care about wear-leveling */ |
988 | err = ensure_wear_leveling(ubi); | 1445 | err = ensure_wear_leveling(ubi, 1); |
989 | return err; | 1446 | return err; |
990 | } | 1447 | } |
991 | 1448 | ||
@@ -1223,7 +1680,7 @@ retry: | |||
1223 | * Technically scrubbing is the same as wear-leveling, so it is done | 1680 | * Technically scrubbing is the same as wear-leveling, so it is done |
1224 | * by the WL worker. | 1681 | * by the WL worker. |
1225 | */ | 1682 | */ |
1226 | return ensure_wear_leveling(ubi); | 1683 | return ensure_wear_leveling(ubi, 0); |
1227 | } | 1684 | } |
1228 | 1685 | ||
1229 | /** | 1686 | /** |
@@ -1404,7 +1861,7 @@ static void cancel_pending(struct ubi_device *ubi) | |||
1404 | */ | 1861 | */ |
1405 | int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) | 1862 | int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) |
1406 | { | 1863 | { |
1407 | int err, i; | 1864 | int err, i, reserved_pebs, found_pebs = 0; |
1408 | struct rb_node *rb1, *rb2; | 1865 | struct rb_node *rb1, *rb2; |
1409 | struct ubi_ainf_volume *av; | 1866 | struct ubi_ainf_volume *av; |
1410 | struct ubi_ainf_peb *aeb, *tmp; | 1867 | struct ubi_ainf_peb *aeb, *tmp; |
@@ -1416,6 +1873,9 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) | |||
1416 | init_rwsem(&ubi->work_sem); | 1873 | init_rwsem(&ubi->work_sem); |
1417 | ubi->max_ec = ai->max_ec; | 1874 | ubi->max_ec = ai->max_ec; |
1418 | INIT_LIST_HEAD(&ubi->works); | 1875 | INIT_LIST_HEAD(&ubi->works); |
1876 | #ifdef CONFIG_MTD_UBI_FASTMAP | ||
1877 | INIT_WORK(&ubi->fm_work, update_fastmap_work_fn); | ||
1878 | #endif | ||
1419 | 1879 | ||
1420 | sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num); | 1880 | sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num); |
1421 | 1881 | ||
@@ -1437,13 +1897,17 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) | |||
1437 | 1897 | ||
1438 | e->pnum = aeb->pnum; | 1898 | e->pnum = aeb->pnum; |
1439 | e->ec = aeb->ec; | 1899 | e->ec = aeb->ec; |
1900 | ubi_assert(!ubi_is_fm_block(ubi, e->pnum)); | ||
1440 | ubi->lookuptbl[e->pnum] = e; | 1901 | ubi->lookuptbl[e->pnum] = e; |
1441 | if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) { | 1902 | if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) { |
1442 | kmem_cache_free(ubi_wl_entry_slab, e); | 1903 | kmem_cache_free(ubi_wl_entry_slab, e); |
1443 | goto out_free; | 1904 | goto out_free; |
1444 | } | 1905 | } |
1906 | |||
1907 | found_pebs++; | ||
1445 | } | 1908 | } |
1446 | 1909 | ||
1910 | ubi->free_count = 0; | ||
1447 | list_for_each_entry(aeb, &ai->free, u.list) { | 1911 | list_for_each_entry(aeb, &ai->free, u.list) { |
1448 | cond_resched(); | 1912 | cond_resched(); |
1449 | 1913 | ||
@@ -1454,8 +1918,14 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) | |||
1454 | e->pnum = aeb->pnum; | 1918 | e->pnum = aeb->pnum; |
1455 | e->ec = aeb->ec; | 1919 | e->ec = aeb->ec; |
1456 | ubi_assert(e->ec >= 0); | 1920 | ubi_assert(e->ec >= 0); |
1921 | ubi_assert(!ubi_is_fm_block(ubi, e->pnum)); | ||
1922 | |||
1457 | wl_tree_add(e, &ubi->free); | 1923 | wl_tree_add(e, &ubi->free); |
1924 | ubi->free_count++; | ||
1925 | |||
1458 | ubi->lookuptbl[e->pnum] = e; | 1926 | ubi->lookuptbl[e->pnum] = e; |
1927 | |||
1928 | found_pebs++; | ||
1459 | } | 1929 | } |
1460 | 1930 | ||
1461 | ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) { | 1931 | ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) { |
@@ -1469,6 +1939,7 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) | |||
1469 | e->pnum = aeb->pnum; | 1939 | e->pnum = aeb->pnum; |
1470 | e->ec = aeb->ec; | 1940 | e->ec = aeb->ec; |
1471 | ubi->lookuptbl[e->pnum] = e; | 1941 | ubi->lookuptbl[e->pnum] = e; |
1942 | |||
1472 | if (!aeb->scrub) { | 1943 | if (!aeb->scrub) { |
1473 | dbg_wl("add PEB %d EC %d to the used tree", | 1944 | dbg_wl("add PEB %d EC %d to the used tree", |
1474 | e->pnum, e->ec); | 1945 | e->pnum, e->ec); |
@@ -1478,22 +1949,38 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) | |||
1478 | e->pnum, e->ec); | 1949 | e->pnum, e->ec); |
1479 | wl_tree_add(e, &ubi->scrub); | 1950 | wl_tree_add(e, &ubi->scrub); |
1480 | } | 1951 | } |
1952 | |||
1953 | found_pebs++; | ||
1481 | } | 1954 | } |
1482 | } | 1955 | } |
1483 | 1956 | ||
1484 | if (ubi->avail_pebs < WL_RESERVED_PEBS) { | 1957 | dbg_wl("found %i PEBs", found_pebs); |
1958 | |||
1959 | if (ubi->fm) | ||
1960 | ubi_assert(ubi->good_peb_count == \ | ||
1961 | found_pebs + ubi->fm->used_blocks); | ||
1962 | else | ||
1963 | ubi_assert(ubi->good_peb_count == found_pebs); | ||
1964 | |||
1965 | reserved_pebs = WL_RESERVED_PEBS; | ||
1966 | #ifdef CONFIG_MTD_UBI_FASTMAP | ||
1967 | /* Reserve enough LEBs to store two fastmaps. */ | ||
1968 | reserved_pebs += (ubi->fm_size / ubi->leb_size) * 2; | ||
1969 | #endif | ||
1970 | |||
1971 | if (ubi->avail_pebs < reserved_pebs) { | ||
1485 | ubi_err("no enough physical eraseblocks (%d, need %d)", | 1972 | ubi_err("no enough physical eraseblocks (%d, need %d)", |
1486 | ubi->avail_pebs, WL_RESERVED_PEBS); | 1973 | ubi->avail_pebs, reserved_pebs); |
1487 | if (ubi->corr_peb_count) | 1974 | if (ubi->corr_peb_count) |
1488 | ubi_err("%d PEBs are corrupted and not used", | 1975 | ubi_err("%d PEBs are corrupted and not used", |
1489 | ubi->corr_peb_count); | 1976 | ubi->corr_peb_count); |
1490 | goto out_free; | 1977 | goto out_free; |
1491 | } | 1978 | } |
1492 | ubi->avail_pebs -= WL_RESERVED_PEBS; | 1979 | ubi->avail_pebs -= reserved_pebs; |
1493 | ubi->rsvd_pebs += WL_RESERVED_PEBS; | 1980 | ubi->rsvd_pebs += reserved_pebs; |
1494 | 1981 | ||
1495 | /* Schedule wear-leveling if needed */ | 1982 | /* Schedule wear-leveling if needed */ |
1496 | err = ensure_wear_leveling(ubi); | 1983 | err = ensure_wear_leveling(ubi, 0); |
1497 | if (err) | 1984 | if (err) |
1498 | goto out_free; | 1985 | goto out_free; |
1499 | 1986 | ||
@@ -1572,7 +2059,7 @@ static int self_check_ec(struct ubi_device *ubi, int pnum, int ec) | |||
1572 | } | 2059 | } |
1573 | 2060 | ||
1574 | read_ec = be64_to_cpu(ec_hdr->ec); | 2061 | read_ec = be64_to_cpu(ec_hdr->ec); |
1575 | if (ec != read_ec) { | 2062 | if (ec != read_ec && read_ec - ec > 1) { |
1576 | ubi_err("self-check failed for PEB %d", pnum); | 2063 | ubi_err("self-check failed for PEB %d", pnum); |
1577 | ubi_err("read EC is %lld, should be %d", read_ec, ec); | 2064 | ubi_err("read EC is %lld, should be %d", read_ec, ec); |
1578 | dump_stack(); | 2065 | dump_stack(); |