aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRichard Weinberger <richard@nod.at>2014-11-10 12:21:51 -0500
committerRichard Weinberger <richard@nod.at>2015-03-26 17:47:28 -0400
commit78d6d497a648cc04ae26b27af1e01ce3e41a9c72 (patch)
tree2edaa099bd72458ae3356c92e58c59cb31049d0b
parent479c2c0cac0c1d23655df15cf039b4f8e3623d23 (diff)
UBI: Move fastmap specific functions out of wl.c
Fastmap is tightly connected to the WL sub-system, many fastmap-specific functionslive in wl.c. To get rid of most #ifdefs in wl.c move this functions into a new file and include it into wl.c Signed-off-by: Richard Weinberger <richard@nod.at>
-rw-r--r--drivers/mtd/ubi/fastmap-wl.c361
-rw-r--r--drivers/mtd/ubi/wl.c560
-rw-r--r--drivers/mtd/ubi/wl.h18
3 files changed, 476 insertions, 463 deletions
diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c
new file mode 100644
index 000000000000..88a400c52418
--- /dev/null
+++ b/drivers/mtd/ubi/fastmap-wl.c
@@ -0,0 +1,361 @@
1/*
2 * Copyright (c) 2012 Linutronix GmbH
3 * Copyright (c) 2014 sigma star gmbh
4 * Author: Richard Weinberger <richard@nod.at>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
13 * the GNU General Public License for more details.
14 *
15 */
16
17/**
18 * update_fastmap_work_fn - calls ubi_update_fastmap from a work queue
19 * @wrk: the work description object
20 */
21static void update_fastmap_work_fn(struct work_struct *wrk)
22{
23 struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
24 ubi_update_fastmap(ubi);
25 spin_lock(&ubi->wl_lock);
26 ubi->fm_work_scheduled = 0;
27 spin_unlock(&ubi->wl_lock);
28}
29
30/**
31 * is_fm_block - returns 1 if a PEB is currently used in a fastmap.
32 * @ubi: UBI device description object
33 * @pnum: the to be checked PEB
34 */
35static int is_fm_block(struct ubi_device *ubi, int pnum)
36{
37 int i;
38
39 if (!ubi->fm)
40 return 0;
41
42 for (i = 0; i < ubi->fm->used_blocks; i++)
43 if (ubi->fm->e[i]->pnum == pnum)
44 return 1;
45
46 return 0;
47}
48
49/**
50 * find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB.
51 * @root: the RB-tree where to look for
52 */
53static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
54{
55 struct rb_node *p;
56 struct ubi_wl_entry *e, *victim = NULL;
57 int max_ec = UBI_MAX_ERASECOUNTER;
58
59 ubi_rb_for_each_entry(p, e, root, u.rb) {
60 if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
61 victim = e;
62 max_ec = e->ec;
63 }
64 }
65
66 return victim;
67}
68
69/**
70 * return_unused_pool_pebs - returns unused PEB to the free tree.
71 * @ubi: UBI device description object
72 * @pool: fastmap pool description object
73 */
74static void return_unused_pool_pebs(struct ubi_device *ubi,
75 struct ubi_fm_pool *pool)
76{
77 int i;
78 struct ubi_wl_entry *e;
79
80 for (i = pool->used; i < pool->size; i++) {
81 e = ubi->lookuptbl[pool->pebs[i]];
82 wl_tree_add(e, &ubi->free);
83 ubi->free_count++;
84 }
85}
86
87static int anchor_pebs_avalible(struct rb_root *root)
88{
89 struct rb_node *p;
90 struct ubi_wl_entry *e;
91
92 ubi_rb_for_each_entry(p, e, root, u.rb)
93 if (e->pnum < UBI_FM_MAX_START)
94 return 1;
95
96 return 0;
97}
98
99/**
100 * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
101 * @ubi: UBI device description object
102 * @anchor: This PEB will be used as anchor PEB by fastmap
103 *
104 * The function returns a physical erase block with a given maximal number
105 * and removes it from the wl subsystem.
106 * Must be called with wl_lock held!
107 */
108struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
109{
110 struct ubi_wl_entry *e = NULL;
111
112 if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
113 goto out;
114
115 if (anchor)
116 e = find_anchor_wl_entry(&ubi->free);
117 else
118 e = find_mean_wl_entry(ubi, &ubi->free);
119
120 if (!e)
121 goto out;
122
123 self_check_in_wl_tree(ubi, e, &ubi->free);
124
125 /* remove it from the free list,
126 * the wl subsystem does no longer know this erase block */
127 rb_erase(&e->u.rb, &ubi->free);
128 ubi->free_count--;
129out:
130 return e;
131}
132
133/**
134 * ubi_refill_pools - refills all fastmap PEB pools.
135 * @ubi: UBI device description object
136 */
137void ubi_refill_pools(struct ubi_device *ubi)
138{
139 struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
140 struct ubi_fm_pool *pool = &ubi->fm_pool;
141 struct ubi_wl_entry *e;
142 int enough;
143
144 spin_lock(&ubi->wl_lock);
145
146 return_unused_pool_pebs(ubi, wl_pool);
147 return_unused_pool_pebs(ubi, pool);
148
149 wl_pool->size = 0;
150 pool->size = 0;
151
152 for (;;) {
153 enough = 0;
154 if (pool->size < pool->max_size) {
155 if (!ubi->free.rb_node)
156 break;
157
158 e = wl_get_wle(ubi);
159 if (!e)
160 break;
161
162 pool->pebs[pool->size] = e->pnum;
163 pool->size++;
164 } else
165 enough++;
166
167 if (wl_pool->size < wl_pool->max_size) {
168 if (!ubi->free.rb_node ||
169 (ubi->free_count - ubi->beb_rsvd_pebs < 5))
170 break;
171
172 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
173 self_check_in_wl_tree(ubi, e, &ubi->free);
174 rb_erase(&e->u.rb, &ubi->free);
175 ubi->free_count--;
176
177 wl_pool->pebs[wl_pool->size] = e->pnum;
178 wl_pool->size++;
179 } else
180 enough++;
181
182 if (enough == 2)
183 break;
184 }
185
186 wl_pool->used = 0;
187 pool->used = 0;
188
189 spin_unlock(&ubi->wl_lock);
190}
191
192/**
193 * ubi_wl_get_peb - get a physical eraseblock.
194 * @ubi: UBI device description object
195 *
196 * This function returns a physical eraseblock in case of success and a
197 * negative error code in case of failure.
198 * Returns with ubi->fm_eba_sem held in read mode!
199 */
200int ubi_wl_get_peb(struct ubi_device *ubi)
201{
202 int ret, retried = 0;
203 struct ubi_fm_pool *pool = &ubi->fm_pool;
204 struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
205
206again:
207 down_read(&ubi->fm_eba_sem);
208 spin_lock(&ubi->wl_lock);
209
210 /* We check here also for the WL pool because at this point we can
211 * refill the WL pool synchronous. */
212 if (pool->used == pool->size || wl_pool->used == wl_pool->size) {
213 spin_unlock(&ubi->wl_lock);
214 up_read(&ubi->fm_eba_sem);
215 ret = ubi_update_fastmap(ubi);
216 if (ret) {
217 ubi_msg(ubi, "Unable to write a new fastmap: %i", ret);
218 down_read(&ubi->fm_eba_sem);
219 return -ENOSPC;
220 }
221 down_read(&ubi->fm_eba_sem);
222 spin_lock(&ubi->wl_lock);
223 }
224
225 if (pool->used == pool->size) {
226 spin_unlock(&ubi->wl_lock);
227 if (retried) {
228 ubi_err(ubi, "Unable to get a free PEB from user WL pool");
229 ret = -ENOSPC;
230 goto out;
231 }
232 retried = 1;
233 up_read(&ubi->fm_eba_sem);
234 goto again;
235 }
236
237 ubi_assert(pool->used < pool->size);
238 ret = pool->pebs[pool->used++];
239 prot_queue_add(ubi, ubi->lookuptbl[ret]);
240 spin_unlock(&ubi->wl_lock);
241out:
242 return ret;
243}
244
245/* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
246 *
247 * @ubi: UBI device description object
248 */
249static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
250{
251 struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
252 int pnum;
253
254 if (pool->used == pool->size) {
255 /* We cannot update the fastmap here because this
256 * function is called in atomic context.
257 * Let's fail here and refill/update it as soon as possible. */
258 if (!ubi->fm_work_scheduled) {
259 ubi->fm_work_scheduled = 1;
260 schedule_work(&ubi->fm_work);
261 }
262 return NULL;
263 } else {
264 pnum = pool->pebs[pool->used++];
265 return ubi->lookuptbl[pnum];
266 }
267}
268
269/**
270 * ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB.
271 * @ubi: UBI device description object
272 */
273int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
274{
275 struct ubi_work *wrk;
276
277 spin_lock(&ubi->wl_lock);
278 if (ubi->wl_scheduled) {
279 spin_unlock(&ubi->wl_lock);
280 return 0;
281 }
282 ubi->wl_scheduled = 1;
283 spin_unlock(&ubi->wl_lock);
284
285 wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
286 if (!wrk) {
287 spin_lock(&ubi->wl_lock);
288 ubi->wl_scheduled = 0;
289 spin_unlock(&ubi->wl_lock);
290 return -ENOMEM;
291 }
292
293 wrk->anchor = 1;
294 wrk->func = &wear_leveling_worker;
295 schedule_ubi_work(ubi, wrk);
296 return 0;
297}
298
299/**
300 * ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling
301 * sub-system.
302 * see: ubi_wl_put_peb()
303 *
304 * @ubi: UBI device description object
305 * @fm_e: physical eraseblock to return
306 * @lnum: the last used logical eraseblock number for the PEB
307 * @torture: if this physical eraseblock has to be tortured
308 */
309int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
310 int lnum, int torture)
311{
312 struct ubi_wl_entry *e;
313 int vol_id, pnum = fm_e->pnum;
314
315 dbg_wl("PEB %d", pnum);
316
317 ubi_assert(pnum >= 0);
318 ubi_assert(pnum < ubi->peb_count);
319
320 spin_lock(&ubi->wl_lock);
321 e = ubi->lookuptbl[pnum];
322
323 /* This can happen if we recovered from a fastmap the very
324 * first time and writing now a new one. In this case the wl system
325 * has never seen any PEB used by the original fastmap.
326 */
327 if (!e) {
328 e = fm_e;
329 ubi_assert(e->ec >= 0);
330 ubi->lookuptbl[pnum] = e;
331 }
332
333 spin_unlock(&ubi->wl_lock);
334
335 vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
336 return schedule_erase(ubi, e, vol_id, lnum, torture);
337}
338
339/**
340 * ubi_is_erase_work - checks whether a work is erase work.
341 * @wrk: The work object to be checked
342 */
343int ubi_is_erase_work(struct ubi_work *wrk)
344{
345 return wrk->func == erase_worker;
346}
347
348static void ubi_fastmap_close(struct ubi_device *ubi)
349{
350 int i;
351
352 flush_work(&ubi->fm_work);
353 return_unused_pool_pebs(ubi, &ubi->fm_pool);
354 return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
355
356 if (ubi->fm) {
357 for (i = 0; i < ubi->fm->used_blocks; i++)
358 kfree(ubi->fm->e[i]);
359 }
360 kfree(ubi->fm);
361}
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 83848324daa2..7d49f42ca2d5 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -103,6 +103,7 @@
103#include <linux/freezer.h> 103#include <linux/freezer.h>
104#include <linux/kthread.h> 104#include <linux/kthread.h>
105#include "ubi.h" 105#include "ubi.h"
106#include "wl.h"
106 107
107/* Number of physical eraseblocks reserved for wear-leveling purposes */ 108/* Number of physical eraseblocks reserved for wear-leveling purposes */
108#define WL_RESERVED_PEBS 1 109#define WL_RESERVED_PEBS 1
@@ -140,45 +141,6 @@ static int self_check_in_wl_tree(const struct ubi_device *ubi,
140static int self_check_in_pq(const struct ubi_device *ubi, 141static int self_check_in_pq(const struct ubi_device *ubi,
141 struct ubi_wl_entry *e); 142 struct ubi_wl_entry *e);
142 143
143#ifdef CONFIG_MTD_UBI_FASTMAP
144/**
145 * update_fastmap_work_fn - calls ubi_update_fastmap from a work queue
146 * @wrk: the work description object
147 */
148static void update_fastmap_work_fn(struct work_struct *wrk)
149{
150 struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
151 ubi_update_fastmap(ubi);
152 spin_lock(&ubi->wl_lock);
153 ubi->fm_work_scheduled = 0;
154 spin_unlock(&ubi->wl_lock);
155}
156
157/**
158 * ubi_ubi_is_fm_block - returns 1 if a PEB is currently used in a fastmap.
159 * @ubi: UBI device description object
160 * @pnum: the to be checked PEB
161 */
162static int ubi_is_fm_block(struct ubi_device *ubi, int pnum)
163{
164 int i;
165
166 if (!ubi->fm)
167 return 0;
168
169 for (i = 0; i < ubi->fm->used_blocks; i++)
170 if (ubi->fm->e[i]->pnum == pnum)
171 return 1;
172
173 return 0;
174}
175#else
176static int ubi_is_fm_block(struct ubi_device *ubi, int pnum)
177{
178 return 0;
179}
180#endif
181
182/** 144/**
183 * wl_tree_add - add a wear-leveling entry to a WL RB-tree. 145 * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
184 * @e: the wear-leveling entry to add 146 * @e: the wear-leveling entry to add
@@ -277,33 +239,6 @@ static int do_work(struct ubi_device *ubi)
277} 239}
278 240
279/** 241/**
280 * produce_free_peb - produce a free physical eraseblock.
281 * @ubi: UBI device description object
282 *
283 * This function tries to make a free PEB by means of synchronous execution of
284 * pending works. This may be needed if, for example the background thread is
285 * disabled. Returns zero in case of success and a negative error code in case
286 * of failure.
287 */
288static int produce_free_peb(struct ubi_device *ubi)
289{
290 int err;
291
292 while (!ubi->free.rb_node && ubi->works_count) {
293 spin_unlock(&ubi->wl_lock);
294
295 dbg_wl("do one work synchronously");
296 err = do_work(ubi);
297
298 spin_lock(&ubi->wl_lock);
299 if (err)
300 return err;
301 }
302
303 return 0;
304}
305
306/**
307 * in_wl_tree - check if wear-leveling entry is present in a WL RB-tree. 242 * in_wl_tree - check if wear-leveling entry is present in a WL RB-tree.
308 * @e: the wear-leveling entry to check 243 * @e: the wear-leveling entry to check
309 * @root: the root of the tree 244 * @root: the root of the tree
@@ -441,76 +376,8 @@ static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi,
441 return e; 376 return e;
442} 377}
443 378
444#ifdef CONFIG_MTD_UBI_FASTMAP
445/**
446 * find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB.
447 * @root: the RB-tree where to look for
448 */
449static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
450{
451 struct rb_node *p;
452 struct ubi_wl_entry *e, *victim = NULL;
453 int max_ec = UBI_MAX_ERASECOUNTER;
454
455 ubi_rb_for_each_entry(p, e, root, u.rb) {
456 if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
457 victim = e;
458 max_ec = e->ec;
459 }
460 }
461
462 return victim;
463}
464
465static int anchor_pebs_avalible(struct rb_root *root)
466{
467 struct rb_node *p;
468 struct ubi_wl_entry *e;
469
470 ubi_rb_for_each_entry(p, e, root, u.rb)
471 if (e->pnum < UBI_FM_MAX_START)
472 return 1;
473
474 return 0;
475}
476
477/**
478 * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
479 * @ubi: UBI device description object
480 * @anchor: This PEB will be used as anchor PEB by fastmap
481 *
482 * The function returns a physical erase block with a given maximal number
483 * and removes it from the wl subsystem.
484 * Must be called with wl_lock held!
485 */
486struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
487{
488 struct ubi_wl_entry *e = NULL;
489
490 if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
491 goto out;
492
493 if (anchor)
494 e = find_anchor_wl_entry(&ubi->free);
495 else
496 e = find_mean_wl_entry(ubi, &ubi->free);
497
498 if (!e)
499 goto out;
500
501 self_check_in_wl_tree(ubi, e, &ubi->free);
502
503 /* remove it from the free list,
504 * the wl subsystem does no longer know this erase block */
505 rb_erase(&e->u.rb, &ubi->free);
506 ubi->free_count--;
507out:
508 return e;
509}
510#endif
511
512/** 379/**
513 * wl_get_wle - get a mean wl entry to be used by wl_get_peb() or 380 * wl_get_wle - get a mean wl entry to be used by ubi_wl_get_peb() or
514 * refill_wl_user_pool(). 381 * refill_wl_user_pool().
515 * @ubi: UBI device description object 382 * @ubi: UBI device description object
516 * 383 *
@@ -541,228 +408,6 @@ static struct ubi_wl_entry *wl_get_wle(struct ubi_device *ubi)
541} 408}
542 409
543/** 410/**
544 * wl_get_peb - get a physical eraseblock.
545 * @ubi: UBI device description object
546 *
547 * This function returns a physical eraseblock in case of success and a
548 * negative error code in case of failure.
549 * It is the low level component of ubi_wl_get_peb() in the non-fastmap
550 * case.
551 */
552static int wl_get_peb(struct ubi_device *ubi)
553{
554 int err;
555 struct ubi_wl_entry *e;
556
557retry:
558 if (!ubi->free.rb_node) {
559 if (ubi->works_count == 0) {
560 ubi_err(ubi, "no free eraseblocks");
561 ubi_assert(list_empty(&ubi->works));
562 return -ENOSPC;
563 }
564
565 err = produce_free_peb(ubi);
566 if (err < 0)
567 return err;
568 goto retry;
569 }
570
571 e = wl_get_wle(ubi);
572 prot_queue_add(ubi, e);
573
574 return e->pnum;
575}
576
577#ifdef CONFIG_MTD_UBI_FASTMAP
578/**
579 * return_unused_pool_pebs - returns unused PEB to the free tree.
580 * @ubi: UBI device description object
581 * @pool: fastmap pool description object
582 */
583static void return_unused_pool_pebs(struct ubi_device *ubi,
584 struct ubi_fm_pool *pool)
585{
586 int i;
587 struct ubi_wl_entry *e;
588
589 for (i = pool->used; i < pool->size; i++) {
590 e = ubi->lookuptbl[pool->pebs[i]];
591 wl_tree_add(e, &ubi->free);
592 ubi->free_count++;
593 }
594}
595
596/**
597 * ubi_refill_pools - refills all fastmap PEB pools.
598 * @ubi: UBI device description object
599 */
600void ubi_refill_pools(struct ubi_device *ubi)
601{
602 struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
603 struct ubi_fm_pool *pool = &ubi->fm_pool;
604 struct ubi_wl_entry *e;
605 int enough;
606
607 spin_lock(&ubi->wl_lock);
608
609 return_unused_pool_pebs(ubi, wl_pool);
610 return_unused_pool_pebs(ubi, pool);
611
612 wl_pool->size = 0;
613 pool->size = 0;
614
615 for (;;) {
616 enough = 0;
617 if (pool->size < pool->max_size) {
618 if (!ubi->free.rb_node ||
619 (ubi->free_count - ubi->beb_rsvd_pebs < 5))
620 break;
621
622 e = wl_get_wle(ubi);
623 if (!e)
624 break;
625
626 pool->pebs[pool->size] = e->pnum;
627 pool->size++;
628 } else
629 enough++;
630
631 if (wl_pool->size < wl_pool->max_size) {
632 if (!ubi->free.rb_node ||
633 (ubi->free_count - ubi->beb_rsvd_pebs < 5))
634 break;
635
636 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
637 self_check_in_wl_tree(ubi, e, &ubi->free);
638 rb_erase(&e->u.rb, &ubi->free);
639 ubi->free_count--;
640
641 wl_pool->pebs[wl_pool->size] = e->pnum;
642 wl_pool->size++;
643 } else
644 enough++;
645
646 if (enough == 2)
647 break;
648 }
649
650 wl_pool->used = 0;
651 pool->used = 0;
652
653 spin_unlock(&ubi->wl_lock);
654}
655
656/* ubi_wl_get_peb - works exaclty like __wl_get_peb but keeps track of
657 * the fastmap pool.
658 * Returns with ubi->fm_eba_sem held in read mode!
659 */
660int ubi_wl_get_peb(struct ubi_device *ubi)
661{
662 int ret, retried = 0;
663 struct ubi_fm_pool *pool = &ubi->fm_pool;
664 struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
665
666again:
667 down_read(&ubi->fm_eba_sem);
668 spin_lock(&ubi->wl_lock);
669 /* We check here also for the WL pool because at this point we can
670 * refill the WL pool synchronous. */
671 if (pool->used == pool->size || wl_pool->used == wl_pool->size) {
672 spin_unlock(&ubi->wl_lock);
673 up_read(&ubi->fm_eba_sem);
674 ret = ubi_update_fastmap(ubi);
675 if (ret) {
676 ubi_msg(ubi, "Unable to write a new fastmap: %i", ret);
677 down_read(&ubi->fm_eba_sem);
678 return -ENOSPC;
679 }
680 down_read(&ubi->fm_eba_sem);
681 spin_lock(&ubi->wl_lock);
682 }
683
684 if (pool->used == pool->size) {
685 spin_unlock(&ubi->wl_lock);
686 if (retried) {
687 ubi_err(ubi, "Unable to get a free PEB from user WL pool");
688 ret = -ENOSPC;
689 goto out;
690 }
691 retried = 1;
692 up_read(&ubi->fm_eba_sem);
693 goto again;
694 }
695
696 ubi_assert(pool->used < pool->size);
697 ret = pool->pebs[pool->used++];
698 prot_queue_add(ubi, ubi->lookuptbl[ret]);
699 spin_unlock(&ubi->wl_lock);
700out:
701 return ret;
702}
703
704/* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
705 *
706 * @ubi: UBI device description object
707 */
708static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
709{
710 struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
711 int pnum;
712
713 if (pool->used == pool->size) {
714 /* We cannot update the fastmap here because this
715 * function is called in atomic context.
716 * Let's fail here and refill/update it as soon as possible. */
717 if (!ubi->fm_work_scheduled) {
718 ubi->fm_work_scheduled = 1;
719 schedule_work(&ubi->fm_work);
720 }
721 return NULL;
722 } else {
723 pnum = pool->pebs[pool->used++];
724 return ubi->lookuptbl[pnum];
725 }
726}
727#else
728static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
729{
730 struct ubi_wl_entry *e;
731
732 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
733 self_check_in_wl_tree(ubi, e, &ubi->free);
734 ubi->free_count--;
735 ubi_assert(ubi->free_count >= 0);
736 rb_erase(&e->u.rb, &ubi->free);
737
738 return e;
739}
740
741int ubi_wl_get_peb(struct ubi_device *ubi)
742{
743 int peb, err;
744
745 spin_lock(&ubi->wl_lock);
746 peb = wl_get_peb(ubi);
747 spin_unlock(&ubi->wl_lock);
748 down_read(&ubi->fm_eba_sem);
749
750 if (peb < 0)
751 return peb;
752
753 err = ubi_self_check_all_ff(ubi, peb, ubi->vid_hdr_aloffset,
754 ubi->peb_size - ubi->vid_hdr_aloffset);
755 if (err) {
756 ubi_err(ubi, "new PEB %d does not contain all 0xFF bytes",
757 peb);
758 return err;
759 }
760
761 return peb;
762}
763#endif
764
765/**
766 * prot_queue_del - remove a physical eraseblock from the protection queue. 411 * prot_queue_del - remove a physical eraseblock from the protection queue.
767 * @ubi: UBI device description object 412 * @ubi: UBI device description object
768 * @pnum: the physical eraseblock to remove 413 * @pnum: the physical eraseblock to remove
@@ -928,17 +573,6 @@ static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
928static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, 573static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
929 int shutdown); 574 int shutdown);
930 575
931#ifdef CONFIG_MTD_UBI_FASTMAP
932/**
933 * ubi_is_erase_work - checks whether a work is erase work.
934 * @wrk: The work object to be checked
935 */
936int ubi_is_erase_work(struct ubi_work *wrk)
937{
938 return wrk->func == erase_worker;
939}
940#endif
941
942/** 576/**
943 * schedule_erase - schedule an erase work. 577 * schedule_erase - schedule an erase work.
944 * @ubi: UBI device description object 578 * @ubi: UBI device description object
@@ -956,7 +590,7 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
956 struct ubi_work *wl_wrk; 590 struct ubi_work *wl_wrk;
957 591
958 ubi_assert(e); 592 ubi_assert(e);
959 ubi_assert(!ubi_is_fm_block(ubi, e->pnum)); 593 ubi_assert(!is_fm_block(ubi, e->pnum));
960 594
961 dbg_wl("schedule erasure of PEB %d, EC %d, torture %d", 595 dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
962 e->pnum, e->ec, torture); 596 e->pnum, e->ec, torture);
@@ -1003,48 +637,6 @@ static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
1003 return erase_worker(ubi, wl_wrk, 0); 637 return erase_worker(ubi, wl_wrk, 0);
1004} 638}
1005 639
1006#ifdef CONFIG_MTD_UBI_FASTMAP
1007/**
1008 * ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling
1009 * sub-system.
1010 * see: ubi_wl_put_peb()
1011 *
1012 * @ubi: UBI device description object
1013 * @fm_e: physical eraseblock to return
1014 * @lnum: the last used logical eraseblock number for the PEB
1015 * @torture: if this physical eraseblock has to be tortured
1016 */
1017int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
1018 int lnum, int torture)
1019{
1020 struct ubi_wl_entry *e;
1021 int vol_id, pnum = fm_e->pnum;
1022
1023 dbg_wl("PEB %d", pnum);
1024
1025 ubi_assert(pnum >= 0);
1026 ubi_assert(pnum < ubi->peb_count);
1027
1028 spin_lock(&ubi->wl_lock);
1029 e = ubi->lookuptbl[pnum];
1030
1031 /* This can happen if we recovered from a fastmap the very
1032 * first time and writing now a new one. In this case the wl system
1033 * has never seen any PEB used by the original fastmap.
1034 */
1035 if (!e) {
1036 e = fm_e;
1037 ubi_assert(e->ec >= 0);
1038 ubi->lookuptbl[pnum] = e;
1039 }
1040
1041 spin_unlock(&ubi->wl_lock);
1042
1043 vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
1044 return schedule_erase(ubi, e, vol_id, lnum, torture);
1045}
1046#endif
1047
1048/** 640/**
1049 * wear_leveling_worker - wear-leveling worker function. 641 * wear_leveling_worker - wear-leveling worker function.
1050 * @ubi: UBI device description object 642 * @ubi: UBI device description object
@@ -1427,38 +1019,6 @@ out_unlock:
1427 return err; 1019 return err;
1428} 1020}
1429 1021
1430#ifdef CONFIG_MTD_UBI_FASTMAP
1431/**
1432 * ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB.
1433 * @ubi: UBI device description object
1434 */
1435int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
1436{
1437 struct ubi_work *wrk;
1438
1439 spin_lock(&ubi->wl_lock);
1440 if (ubi->wl_scheduled) {
1441 spin_unlock(&ubi->wl_lock);
1442 return 0;
1443 }
1444 ubi->wl_scheduled = 1;
1445 spin_unlock(&ubi->wl_lock);
1446
1447 wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
1448 if (!wrk) {
1449 spin_lock(&ubi->wl_lock);
1450 ubi->wl_scheduled = 0;
1451 spin_unlock(&ubi->wl_lock);
1452 return -ENOMEM;
1453 }
1454
1455 wrk->anchor = 1;
1456 wrk->func = &wear_leveling_worker;
1457 schedule_ubi_work(ubi, wrk);
1458 return 0;
1459}
1460#endif
1461
1462/** 1022/**
1463 * erase_worker - physical eraseblock erase worker function. 1023 * erase_worker - physical eraseblock erase worker function.
1464 * @ubi: UBI device description object 1024 * @ubi: UBI device description object
@@ -1490,7 +1050,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1490 dbg_wl("erase PEB %d EC %d LEB %d:%d", 1050 dbg_wl("erase PEB %d EC %d LEB %d:%d",
1491 pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum); 1051 pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
1492 1052
1493 ubi_assert(!ubi_is_fm_block(ubi, e->pnum)); 1053 ubi_assert(!is_fm_block(ubi, e->pnum));
1494 1054
1495 err = sync_erase(ubi, e, wl_wrk->torture); 1055 err = sync_erase(ubi, e, wl_wrk->torture);
1496 if (!err) { 1056 if (!err) {
@@ -1974,7 +1534,7 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1974 1534
1975 e->pnum = aeb->pnum; 1535 e->pnum = aeb->pnum;
1976 e->ec = aeb->ec; 1536 e->ec = aeb->ec;
1977 ubi_assert(!ubi_is_fm_block(ubi, e->pnum)); 1537 ubi_assert(!is_fm_block(ubi, e->pnum));
1978 ubi->lookuptbl[e->pnum] = e; 1538 ubi->lookuptbl[e->pnum] = e;
1979 if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) { 1539 if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) {
1980 wl_entry_destroy(ubi, e); 1540 wl_entry_destroy(ubi, e);
@@ -1995,7 +1555,7 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1995 e->pnum = aeb->pnum; 1555 e->pnum = aeb->pnum;
1996 e->ec = aeb->ec; 1556 e->ec = aeb->ec;
1997 ubi_assert(e->ec >= 0); 1557 ubi_assert(e->ec >= 0);
1998 ubi_assert(!ubi_is_fm_block(ubi, e->pnum)); 1558 ubi_assert(!is_fm_block(ubi, e->pnum));
1999 1559
2000 wl_tree_add(e, &ubi->free); 1560 wl_tree_add(e, &ubi->free);
2001 ubi->free_count++; 1561 ubi->free_count++;
@@ -2095,23 +1655,6 @@ static void protection_queue_destroy(struct ubi_device *ubi)
2095 } 1655 }
2096} 1656}
2097 1657
2098static void ubi_fastmap_close(struct ubi_device *ubi)
2099{
2100#ifdef CONFIG_MTD_UBI_FASTMAP
2101 int i;
2102
2103 flush_work(&ubi->fm_work);
2104 return_unused_pool_pebs(ubi, &ubi->fm_pool);
2105 return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
2106
2107 if (ubi->fm) {
2108 for (i = 0; i < ubi->fm->used_blocks; i++)
2109 kfree(ubi->fm->e[i]);
2110 }
2111 kfree(ubi->fm);
2112#endif
2113}
2114
2115/** 1658/**
2116 * ubi_wl_close - close the wear-leveling sub-system. 1659 * ubi_wl_close - close the wear-leveling sub-system.
2117 * @ubi: UBI device description object 1660 * @ubi: UBI device description object
@@ -2224,3 +1767,94 @@ static int self_check_in_pq(const struct ubi_device *ubi,
2224 dump_stack(); 1767 dump_stack();
2225 return -EINVAL; 1768 return -EINVAL;
2226} 1769}
1770#ifndef CONFIG_MTD_UBI_FASTMAP
1771static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
1772{
1773 struct ubi_wl_entry *e;
1774
1775 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
1776 self_check_in_wl_tree(ubi, e, &ubi->free);
1777 ubi->free_count--;
1778 ubi_assert(ubi->free_count >= 0);
1779 rb_erase(&e->u.rb, &ubi->free);
1780
1781 return e;
1782}
1783
1784/**
1785 * produce_free_peb - produce a free physical eraseblock.
1786 * @ubi: UBI device description object
1787 *
1788 * This function tries to make a free PEB by means of synchronous execution of
1789 * pending works. This may be needed if, for example the background thread is
1790 * disabled. Returns zero in case of success and a negative error code in case
1791 * of failure.
1792 */
1793static int produce_free_peb(struct ubi_device *ubi)
1794{
1795 int err;
1796
1797 while (!ubi->free.rb_node && ubi->works_count) {
1798 spin_unlock(&ubi->wl_lock);
1799
1800 dbg_wl("do one work synchronously");
1801 err = do_work(ubi);
1802
1803 spin_lock(&ubi->wl_lock);
1804 if (err)
1805 return err;
1806 }
1807
1808 return 0;
1809}
1810
1811/**
1812 * ubi_wl_get_peb - get a physical eraseblock.
1813 * @ubi: UBI device description object
1814 *
1815 * This function returns a physical eraseblock in case of success and a
1816 * negative error code in case of failure.
1817 * Returns with ubi->fm_eba_sem held in read mode!
1818 */
1819int ubi_wl_get_peb(struct ubi_device *ubi)
1820{
1821 int err;
1822 struct ubi_wl_entry *e;
1823
1824retry:
1825 down_read(&ubi->fm_eba_sem);
1826 spin_lock(&ubi->wl_lock);
1827 if (!ubi->free.rb_node) {
1828 if (ubi->works_count == 0) {
1829 ubi_err(ubi, "no free eraseblocks");
1830 ubi_assert(list_empty(&ubi->works));
1831 spin_unlock(&ubi->wl_lock);
1832 return -ENOSPC;
1833 }
1834
1835 err = produce_free_peb(ubi);
1836 if (err < 0) {
1837 spin_unlock(&ubi->wl_lock);
1838 return err;
1839 }
1840 spin_unlock(&ubi->wl_lock);
1841 up_read(&ubi->fm_eba_sem);
1842 goto retry;
1843
1844 }
1845 e = wl_get_wle(ubi);
1846 prot_queue_add(ubi, e);
1847 spin_unlock(&ubi->wl_lock);
1848
1849 err = ubi_self_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
1850 ubi->peb_size - ubi->vid_hdr_aloffset);
1851 if (err) {
1852 ubi_err(ubi, "new PEB %d does not contain all 0xFF bytes", e->pnum);
1853 return err;
1854 }
1855
1856 return e->pnum;
1857}
1858#else
1859#include "fastmap-wl.c"
1860#endif
diff --git a/drivers/mtd/ubi/wl.h b/drivers/mtd/ubi/wl.h
new file mode 100644
index 000000000000..db8681477d96
--- /dev/null
+++ b/drivers/mtd/ubi/wl.h
@@ -0,0 +1,18 @@
1#ifndef UBI_WL_H
2#define UBI_WL_H
3#ifdef CONFIG_MTD_UBI_FASTMAP
4static int is_fm_block(struct ubi_device *ubi, int pnum);
5static int anchor_pebs_avalible(struct rb_root *root);
6static void update_fastmap_work_fn(struct work_struct *wrk);
7static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root);
8static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi);
9static void ubi_fastmap_close(struct ubi_device *ubi);
10#else /* !CONFIG_MTD_UBI_FASTMAP */
11static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi);
12static inline int is_fm_block(struct ubi_device *ubi, int pnum)
13{
14 return 0;
15}
16static inline void ubi_fastmap_close(struct ubi_device *ubi) { }
17#endif /* CONFIG_MTD_UBI_FASTMAP */
18#endif /* UBI_WL_H */