diff options
author | David Woodhouse <dwmw2@infradead.org> | 2007-10-14 09:29:00 -0400 |
---|---|---|
committer | David Woodhouse <dwmw2@infradead.org> | 2007-10-14 09:29:00 -0400 |
commit | e3d18658d4f28e4783e1bb1c41e9134c9e5db0a9 (patch) | |
tree | d4fe529460e98522775bc0ecca557dcc76866999 | |
parent | a2e1b833d9e0231d67e722b7e2f4d79daf919baf (diff) | |
parent | cc5f4f28755b3b152297fd7bc3e03781dd8008e2 (diff) |
Merge branch 'master' of git://git.infradead.org/~dedekind/ubi-2.6
-rw-r--r-- | drivers/mtd/ubi/build.c | 28 | ||||
-rw-r--r-- | drivers/mtd/ubi/debug.c | 37 | ||||
-rw-r--r-- | drivers/mtd/ubi/debug.h | 2 | ||||
-rw-r--r-- | drivers/mtd/ubi/eba.c | 131 | ||||
-rw-r--r-- | drivers/mtd/ubi/io.c | 75 | ||||
-rw-r--r-- | drivers/mtd/ubi/kapi.c | 9 | ||||
-rw-r--r-- | drivers/mtd/ubi/scan.c | 45 | ||||
-rw-r--r-- | drivers/mtd/ubi/scan.h | 8 | ||||
-rw-r--r-- | drivers/mtd/ubi/ubi.h | 42 | ||||
-rw-r--r-- | drivers/mtd/ubi/vmt.c | 70 | ||||
-rw-r--r-- | drivers/mtd/ubi/vtbl.c | 8 | ||||
-rw-r--r-- | drivers/mtd/ubi/wl.c | 136 |
12 files changed, 274 insertions, 317 deletions
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index 1cb22bfae750..023653977a1a 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c | |||
@@ -565,7 +565,7 @@ static int attach_mtd_dev(const char *mtd_dev, int vid_hdr_offset, | |||
565 | } | 565 | } |
566 | 566 | ||
567 | ubi = ubi_devices[ubi_devices_cnt] = kzalloc(sizeof(struct ubi_device), | 567 | ubi = ubi_devices[ubi_devices_cnt] = kzalloc(sizeof(struct ubi_device), |
568 | GFP_KERNEL); | 568 | GFP_KERNEL); |
569 | if (!ubi) { | 569 | if (!ubi) { |
570 | err = -ENOMEM; | 570 | err = -ENOMEM; |
571 | goto out_mtd; | 571 | goto out_mtd; |
@@ -583,6 +583,22 @@ static int attach_mtd_dev(const char *mtd_dev, int vid_hdr_offset, | |||
583 | if (err) | 583 | if (err) |
584 | goto out_free; | 584 | goto out_free; |
585 | 585 | ||
586 | mutex_init(&ubi->buf_mutex); | ||
587 | ubi->peb_buf1 = vmalloc(ubi->peb_size); | ||
588 | if (!ubi->peb_buf1) | ||
589 | goto out_free; | ||
590 | |||
591 | ubi->peb_buf2 = vmalloc(ubi->peb_size); | ||
592 | if (!ubi->peb_buf2) | ||
593 | goto out_free; | ||
594 | |||
595 | #ifdef CONFIG_MTD_UBI_DEBUG | ||
596 | mutex_init(&ubi->dbg_buf_mutex); | ||
597 | ubi->dbg_peb_buf = vmalloc(ubi->peb_size); | ||
598 | if (!ubi->dbg_peb_buf) | ||
599 | goto out_free; | ||
600 | #endif | ||
601 | |||
586 | err = attach_by_scanning(ubi); | 602 | err = attach_by_scanning(ubi); |
587 | if (err) { | 603 | if (err) { |
588 | dbg_err("failed to attach by scanning, error %d", err); | 604 | dbg_err("failed to attach by scanning, error %d", err); |
@@ -630,6 +646,11 @@ out_detach: | |||
630 | ubi_wl_close(ubi); | 646 | ubi_wl_close(ubi); |
631 | vfree(ubi->vtbl); | 647 | vfree(ubi->vtbl); |
632 | out_free: | 648 | out_free: |
649 | vfree(ubi->peb_buf1); | ||
650 | vfree(ubi->peb_buf2); | ||
651 | #ifdef CONFIG_MTD_UBI_DEBUG | ||
652 | vfree(ubi->dbg_peb_buf); | ||
653 | #endif | ||
633 | kfree(ubi); | 654 | kfree(ubi); |
634 | out_mtd: | 655 | out_mtd: |
635 | put_mtd_device(mtd); | 656 | put_mtd_device(mtd); |
@@ -651,6 +672,11 @@ static void detach_mtd_dev(struct ubi_device *ubi) | |||
651 | ubi_wl_close(ubi); | 672 | ubi_wl_close(ubi); |
652 | vfree(ubi->vtbl); | 673 | vfree(ubi->vtbl); |
653 | put_mtd_device(ubi->mtd); | 674 | put_mtd_device(ubi->mtd); |
675 | vfree(ubi->peb_buf1); | ||
676 | vfree(ubi->peb_buf2); | ||
677 | #ifdef CONFIG_MTD_UBI_DEBUG | ||
678 | vfree(ubi->dbg_peb_buf); | ||
679 | #endif | ||
654 | kfree(ubi_devices[ubi_num]); | 680 | kfree(ubi_devices[ubi_num]); |
655 | ubi_devices[ubi_num] = NULL; | 681 | ubi_devices[ubi_num] = NULL; |
656 | ubi_devices_cnt -= 1; | 682 | ubi_devices_cnt -= 1; |
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c index 310341e5cd43..56956ec2845f 100644 --- a/drivers/mtd/ubi/debug.c +++ b/drivers/mtd/ubi/debug.c | |||
@@ -42,7 +42,8 @@ void ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr) | |||
42 | dbg_msg("data_offset %d", be32_to_cpu(ec_hdr->data_offset)); | 42 | dbg_msg("data_offset %d", be32_to_cpu(ec_hdr->data_offset)); |
43 | dbg_msg("hdr_crc %#08x", be32_to_cpu(ec_hdr->hdr_crc)); | 43 | dbg_msg("hdr_crc %#08x", be32_to_cpu(ec_hdr->hdr_crc)); |
44 | dbg_msg("erase counter header hexdump:"); | 44 | dbg_msg("erase counter header hexdump:"); |
45 | ubi_dbg_hexdump(ec_hdr, UBI_EC_HDR_SIZE); | 45 | print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, |
46 | ec_hdr, UBI_EC_HDR_SIZE, 1); | ||
46 | } | 47 | } |
47 | 48 | ||
48 | /** | 49 | /** |
@@ -187,38 +188,4 @@ void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req) | |||
187 | dbg_msg("the 1st 16 characters of the name: %s", nm); | 188 | dbg_msg("the 1st 16 characters of the name: %s", nm); |
188 | } | 189 | } |
189 | 190 | ||
190 | #define BYTES_PER_LINE 32 | ||
191 | |||
192 | /** | ||
193 | * ubi_dbg_hexdump - dump a buffer. | ||
194 | * @ptr: the buffer to dump | ||
195 | * @size: buffer size which must be multiple of 4 bytes | ||
196 | */ | ||
197 | void ubi_dbg_hexdump(const void *ptr, int size) | ||
198 | { | ||
199 | int i, k = 0, rows, columns; | ||
200 | const uint8_t *p = ptr; | ||
201 | |||
202 | size = ALIGN(size, 4); | ||
203 | rows = size/BYTES_PER_LINE + size % BYTES_PER_LINE; | ||
204 | for (i = 0; i < rows; i++) { | ||
205 | int j; | ||
206 | |||
207 | cond_resched(); | ||
208 | columns = min(size - k, BYTES_PER_LINE) / 4; | ||
209 | if (columns == 0) | ||
210 | break; | ||
211 | printk(KERN_DEBUG "%5d: ", i * BYTES_PER_LINE); | ||
212 | for (j = 0; j < columns; j++) { | ||
213 | int n, N; | ||
214 | |||
215 | N = size - k > 4 ? 4 : size - k; | ||
216 | for (n = 0; n < N; n++) | ||
217 | printk("%02x", p[k++]); | ||
218 | printk(" "); | ||
219 | } | ||
220 | printk("\n"); | ||
221 | } | ||
222 | } | ||
223 | |||
224 | #endif /* CONFIG_MTD_UBI_DEBUG_MSG */ | 191 | #endif /* CONFIG_MTD_UBI_DEBUG_MSG */ |
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h index ff8f39548cd8..467722eb618b 100644 --- a/drivers/mtd/ubi/debug.h +++ b/drivers/mtd/ubi/debug.h | |||
@@ -59,7 +59,6 @@ void ubi_dbg_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx); | |||
59 | void ubi_dbg_dump_sv(const struct ubi_scan_volume *sv); | 59 | void ubi_dbg_dump_sv(const struct ubi_scan_volume *sv); |
60 | void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb, int type); | 60 | void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb, int type); |
61 | void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req); | 61 | void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req); |
62 | void ubi_dbg_hexdump(const void *buf, int size); | ||
63 | 62 | ||
64 | #else | 63 | #else |
65 | 64 | ||
@@ -72,7 +71,6 @@ void ubi_dbg_hexdump(const void *buf, int size); | |||
72 | #define ubi_dbg_dump_sv(sv) ({}) | 71 | #define ubi_dbg_dump_sv(sv) ({}) |
73 | #define ubi_dbg_dump_seb(seb, type) ({}) | 72 | #define ubi_dbg_dump_seb(seb, type) ({}) |
74 | #define ubi_dbg_dump_mkvol_req(req) ({}) | 73 | #define ubi_dbg_dump_mkvol_req(req) ({}) |
75 | #define ubi_dbg_hexdump(buf, size) ({}) | ||
76 | 74 | ||
77 | #endif /* CONFIG_MTD_UBI_DEBUG_MSG */ | 75 | #endif /* CONFIG_MTD_UBI_DEBUG_MSG */ |
78 | 76 | ||
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c index 7c5e29eaf118..1297732f4db9 100644 --- a/drivers/mtd/ubi/eba.c +++ b/drivers/mtd/ubi/eba.c | |||
@@ -46,6 +46,9 @@ | |||
46 | #include <linux/err.h> | 46 | #include <linux/err.h> |
47 | #include "ubi.h" | 47 | #include "ubi.h" |
48 | 48 | ||
49 | /* Number of physical eraseblocks reserved for atomic LEB change operation */ | ||
50 | #define EBA_RESERVED_PEBS 1 | ||
51 | |||
49 | /** | 52 | /** |
50 | * struct ltree_entry - an entry in the lock tree. | 53 | * struct ltree_entry - an entry in the lock tree. |
51 | * @rb: links RB-tree nodes | 54 | * @rb: links RB-tree nodes |
@@ -157,7 +160,7 @@ static struct ltree_entry *ltree_add_entry(struct ubi_device *ubi, int vol_id, | |||
157 | { | 160 | { |
158 | struct ltree_entry *le, *le1, *le_free; | 161 | struct ltree_entry *le, *le1, *le_free; |
159 | 162 | ||
160 | le = kmem_cache_alloc(ltree_slab, GFP_KERNEL); | 163 | le = kmem_cache_alloc(ltree_slab, GFP_NOFS); |
161 | if (!le) | 164 | if (!le) |
162 | return ERR_PTR(-ENOMEM); | 165 | return ERR_PTR(-ENOMEM); |
163 | 166 | ||
@@ -397,7 +400,7 @@ int ubi_eba_read_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf, | |||
397 | 400 | ||
398 | retry: | 401 | retry: |
399 | if (check) { | 402 | if (check) { |
400 | vid_hdr = ubi_zalloc_vid_hdr(ubi); | 403 | vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); |
401 | if (!vid_hdr) { | 404 | if (!vid_hdr) { |
402 | err = -ENOMEM; | 405 | err = -ENOMEM; |
403 | goto out_unlock; | 406 | goto out_unlock; |
@@ -495,16 +498,18 @@ static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum, | |||
495 | int err, idx = vol_id2idx(ubi, vol_id), new_pnum, data_size, tries = 0; | 498 | int err, idx = vol_id2idx(ubi, vol_id), new_pnum, data_size, tries = 0; |
496 | struct ubi_volume *vol = ubi->volumes[idx]; | 499 | struct ubi_volume *vol = ubi->volumes[idx]; |
497 | struct ubi_vid_hdr *vid_hdr; | 500 | struct ubi_vid_hdr *vid_hdr; |
498 | unsigned char *new_buf; | ||
499 | 501 | ||
500 | vid_hdr = ubi_zalloc_vid_hdr(ubi); | 502 | vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); |
501 | if (!vid_hdr) { | 503 | if (!vid_hdr) { |
502 | return -ENOMEM; | 504 | return -ENOMEM; |
503 | } | 505 | } |
504 | 506 | ||
507 | mutex_lock(&ubi->buf_mutex); | ||
508 | |||
505 | retry: | 509 | retry: |
506 | new_pnum = ubi_wl_get_peb(ubi, UBI_UNKNOWN); | 510 | new_pnum = ubi_wl_get_peb(ubi, UBI_UNKNOWN); |
507 | if (new_pnum < 0) { | 511 | if (new_pnum < 0) { |
512 | mutex_unlock(&ubi->buf_mutex); | ||
508 | ubi_free_vid_hdr(ubi, vid_hdr); | 513 | ubi_free_vid_hdr(ubi, vid_hdr); |
509 | return new_pnum; | 514 | return new_pnum; |
510 | } | 515 | } |
@@ -524,31 +529,22 @@ retry: | |||
524 | goto write_error; | 529 | goto write_error; |
525 | 530 | ||
526 | data_size = offset + len; | 531 | data_size = offset + len; |
527 | new_buf = vmalloc(data_size); | 532 | memset(ubi->peb_buf1 + offset, 0xFF, len); |
528 | if (!new_buf) { | ||
529 | err = -ENOMEM; | ||
530 | goto out_put; | ||
531 | } | ||
532 | memset(new_buf + offset, 0xFF, len); | ||
533 | 533 | ||
534 | /* Read everything before the area where the write failure happened */ | 534 | /* Read everything before the area where the write failure happened */ |
535 | if (offset > 0) { | 535 | if (offset > 0) { |
536 | err = ubi_io_read_data(ubi, new_buf, pnum, 0, offset); | 536 | err = ubi_io_read_data(ubi, ubi->peb_buf1, pnum, 0, offset); |
537 | if (err && err != UBI_IO_BITFLIPS) { | 537 | if (err && err != UBI_IO_BITFLIPS) |
538 | vfree(new_buf); | ||
539 | goto out_put; | 538 | goto out_put; |
540 | } | ||
541 | } | 539 | } |
542 | 540 | ||
543 | memcpy(new_buf + offset, buf, len); | 541 | memcpy(ubi->peb_buf1 + offset, buf, len); |
544 | 542 | ||
545 | err = ubi_io_write_data(ubi, new_buf, new_pnum, 0, data_size); | 543 | err = ubi_io_write_data(ubi, ubi->peb_buf1, new_pnum, 0, data_size); |
546 | if (err) { | 544 | if (err) |
547 | vfree(new_buf); | ||
548 | goto write_error; | 545 | goto write_error; |
549 | } | ||
550 | 546 | ||
551 | vfree(new_buf); | 547 | mutex_unlock(&ubi->buf_mutex); |
552 | ubi_free_vid_hdr(ubi, vid_hdr); | 548 | ubi_free_vid_hdr(ubi, vid_hdr); |
553 | 549 | ||
554 | vol->eba_tbl[lnum] = new_pnum; | 550 | vol->eba_tbl[lnum] = new_pnum; |
@@ -558,6 +554,7 @@ retry: | |||
558 | return 0; | 554 | return 0; |
559 | 555 | ||
560 | out_put: | 556 | out_put: |
557 | mutex_unlock(&ubi->buf_mutex); | ||
561 | ubi_wl_put_peb(ubi, new_pnum, 1); | 558 | ubi_wl_put_peb(ubi, new_pnum, 1); |
562 | ubi_free_vid_hdr(ubi, vid_hdr); | 559 | ubi_free_vid_hdr(ubi, vid_hdr); |
563 | return err; | 560 | return err; |
@@ -570,6 +567,7 @@ write_error: | |||
570 | ubi_warn("failed to write to PEB %d", new_pnum); | 567 | ubi_warn("failed to write to PEB %d", new_pnum); |
571 | ubi_wl_put_peb(ubi, new_pnum, 1); | 568 | ubi_wl_put_peb(ubi, new_pnum, 1); |
572 | if (++tries > UBI_IO_RETRIES) { | 569 | if (++tries > UBI_IO_RETRIES) { |
570 | mutex_unlock(&ubi->buf_mutex); | ||
573 | ubi_free_vid_hdr(ubi, vid_hdr); | 571 | ubi_free_vid_hdr(ubi, vid_hdr); |
574 | return err; | 572 | return err; |
575 | } | 573 | } |
@@ -627,7 +625,7 @@ int ubi_eba_write_leb(struct ubi_device *ubi, int vol_id, int lnum, | |||
627 | * The logical eraseblock is not mapped. We have to get a free physical | 625 | * The logical eraseblock is not mapped. We have to get a free physical |
628 | * eraseblock and write the volume identifier header there first. | 626 | * eraseblock and write the volume identifier header there first. |
629 | */ | 627 | */ |
630 | vid_hdr = ubi_zalloc_vid_hdr(ubi); | 628 | vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); |
631 | if (!vid_hdr) { | 629 | if (!vid_hdr) { |
632 | leb_write_unlock(ubi, vol_id, lnum); | 630 | leb_write_unlock(ubi, vol_id, lnum); |
633 | return -ENOMEM; | 631 | return -ENOMEM; |
@@ -738,7 +736,7 @@ int ubi_eba_write_leb_st(struct ubi_device *ubi, int vol_id, int lnum, | |||
738 | else | 736 | else |
739 | ubi_assert(len % ubi->min_io_size == 0); | 737 | ubi_assert(len % ubi->min_io_size == 0); |
740 | 738 | ||
741 | vid_hdr = ubi_zalloc_vid_hdr(ubi); | 739 | vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); |
742 | if (!vid_hdr) | 740 | if (!vid_hdr) |
743 | return -ENOMEM; | 741 | return -ENOMEM; |
744 | 742 | ||
@@ -832,6 +830,9 @@ write_error: | |||
832 | * data, which has to be aligned. This function guarantees that in case of an | 830 | * data, which has to be aligned. This function guarantees that in case of an |
833 | * unclean reboot the old contents is preserved. Returns zero in case of | 831 | * unclean reboot the old contents is preserved. Returns zero in case of |
834 | * success and a negative error code in case of failure. | 832 | * success and a negative error code in case of failure. |
833 | * | ||
834 | * UBI reserves one LEB for the "atomic LEB change" operation, so only one | ||
835 | * LEB change may be done at a time. This is ensured by @ubi->alc_mutex. | ||
835 | */ | 836 | */ |
836 | int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum, | 837 | int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum, |
837 | const void *buf, int len, int dtype) | 838 | const void *buf, int len, int dtype) |
@@ -844,15 +845,14 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum, | |||
844 | if (ubi->ro_mode) | 845 | if (ubi->ro_mode) |
845 | return -EROFS; | 846 | return -EROFS; |
846 | 847 | ||
847 | vid_hdr = ubi_zalloc_vid_hdr(ubi); | 848 | vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); |
848 | if (!vid_hdr) | 849 | if (!vid_hdr) |
849 | return -ENOMEM; | 850 | return -ENOMEM; |
850 | 851 | ||
852 | mutex_lock(&ubi->alc_mutex); | ||
851 | err = leb_write_lock(ubi, vol_id, lnum); | 853 | err = leb_write_lock(ubi, vol_id, lnum); |
852 | if (err) { | 854 | if (err) |
853 | ubi_free_vid_hdr(ubi, vid_hdr); | 855 | goto out_mutex; |
854 | return err; | ||
855 | } | ||
856 | 856 | ||
857 | vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); | 857 | vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); |
858 | vid_hdr->vol_id = cpu_to_be32(vol_id); | 858 | vid_hdr->vol_id = cpu_to_be32(vol_id); |
@@ -869,9 +869,8 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum, | |||
869 | retry: | 869 | retry: |
870 | pnum = ubi_wl_get_peb(ubi, dtype); | 870 | pnum = ubi_wl_get_peb(ubi, dtype); |
871 | if (pnum < 0) { | 871 | if (pnum < 0) { |
872 | ubi_free_vid_hdr(ubi, vid_hdr); | 872 | err = pnum; |
873 | leb_write_unlock(ubi, vol_id, lnum); | 873 | goto out_leb_unlock; |
874 | return pnum; | ||
875 | } | 874 | } |
876 | 875 | ||
877 | dbg_eba("change LEB %d:%d, PEB %d, write VID hdr to PEB %d", | 876 | dbg_eba("change LEB %d:%d, PEB %d, write VID hdr to PEB %d", |
@@ -893,17 +892,18 @@ retry: | |||
893 | 892 | ||
894 | if (vol->eba_tbl[lnum] >= 0) { | 893 | if (vol->eba_tbl[lnum] >= 0) { |
895 | err = ubi_wl_put_peb(ubi, vol->eba_tbl[lnum], 1); | 894 | err = ubi_wl_put_peb(ubi, vol->eba_tbl[lnum], 1); |
896 | if (err) { | 895 | if (err) |
897 | ubi_free_vid_hdr(ubi, vid_hdr); | 896 | goto out_leb_unlock; |
898 | leb_write_unlock(ubi, vol_id, lnum); | ||
899 | return err; | ||
900 | } | ||
901 | } | 897 | } |
902 | 898 | ||
903 | vol->eba_tbl[lnum] = pnum; | 899 | vol->eba_tbl[lnum] = pnum; |
900 | |||
901 | out_leb_unlock: | ||
904 | leb_write_unlock(ubi, vol_id, lnum); | 902 | leb_write_unlock(ubi, vol_id, lnum); |
903 | out_mutex: | ||
904 | mutex_unlock(&ubi->alc_mutex); | ||
905 | ubi_free_vid_hdr(ubi, vid_hdr); | 905 | ubi_free_vid_hdr(ubi, vid_hdr); |
906 | return 0; | 906 | return err; |
907 | 907 | ||
908 | write_error: | 908 | write_error: |
909 | if (err != -EIO || !ubi->bad_allowed) { | 909 | if (err != -EIO || !ubi->bad_allowed) { |
@@ -913,17 +913,13 @@ write_error: | |||
913 | * mode just in case. | 913 | * mode just in case. |
914 | */ | 914 | */ |
915 | ubi_ro_mode(ubi); | 915 | ubi_ro_mode(ubi); |
916 | leb_write_unlock(ubi, vol_id, lnum); | 916 | goto out_leb_unlock; |
917 | ubi_free_vid_hdr(ubi, vid_hdr); | ||
918 | return err; | ||
919 | } | 917 | } |
920 | 918 | ||
921 | err = ubi_wl_put_peb(ubi, pnum, 1); | 919 | err = ubi_wl_put_peb(ubi, pnum, 1); |
922 | if (err || ++tries > UBI_IO_RETRIES) { | 920 | if (err || ++tries > UBI_IO_RETRIES) { |
923 | ubi_ro_mode(ubi); | 921 | ubi_ro_mode(ubi); |
924 | leb_write_unlock(ubi, vol_id, lnum); | 922 | goto out_leb_unlock; |
925 | ubi_free_vid_hdr(ubi, vid_hdr); | ||
926 | return err; | ||
927 | } | 923 | } |
928 | 924 | ||
929 | vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); | 925 | vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); |
@@ -965,7 +961,6 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, | |||
965 | int err, vol_id, lnum, data_size, aldata_size, pnum, idx; | 961 | int err, vol_id, lnum, data_size, aldata_size, pnum, idx; |
966 | struct ubi_volume *vol; | 962 | struct ubi_volume *vol; |
967 | uint32_t crc; | 963 | uint32_t crc; |
968 | void *buf, *buf1 = NULL; | ||
969 | 964 | ||
970 | vol_id = be32_to_cpu(vid_hdr->vol_id); | 965 | vol_id = be32_to_cpu(vid_hdr->vol_id); |
971 | lnum = be32_to_cpu(vid_hdr->lnum); | 966 | lnum = be32_to_cpu(vid_hdr->lnum); |
@@ -979,19 +974,15 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, | |||
979 | data_size = aldata_size = | 974 | data_size = aldata_size = |
980 | ubi->leb_size - be32_to_cpu(vid_hdr->data_pad); | 975 | ubi->leb_size - be32_to_cpu(vid_hdr->data_pad); |
981 | 976 | ||
982 | buf = vmalloc(aldata_size); | ||
983 | if (!buf) | ||
984 | return -ENOMEM; | ||
985 | |||
986 | /* | 977 | /* |
987 | * We do not want anybody to write to this logical eraseblock while we | 978 | * We do not want anybody to write to this logical eraseblock while we |
988 | * are moving it, so we lock it. | 979 | * are moving it, so we lock it. |
989 | */ | 980 | */ |
990 | err = leb_write_lock(ubi, vol_id, lnum); | 981 | err = leb_write_lock(ubi, vol_id, lnum); |
991 | if (err) { | 982 | if (err) |
992 | vfree(buf); | ||
993 | return err; | 983 | return err; |
994 | } | 984 | |
985 | mutex_lock(&ubi->buf_mutex); | ||
995 | 986 | ||
996 | /* | 987 | /* |
997 | * But the logical eraseblock might have been put by this time. | 988 | * But the logical eraseblock might have been put by this time. |
@@ -1023,7 +1014,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, | |||
1023 | /* OK, now the LEB is locked and we can safely start moving it */ | 1014 | /* OK, now the LEB is locked and we can safely start moving it */ |
1024 | 1015 | ||
1025 | dbg_eba("read %d bytes of data", aldata_size); | 1016 | dbg_eba("read %d bytes of data", aldata_size); |
1026 | err = ubi_io_read_data(ubi, buf, from, 0, aldata_size); | 1017 | err = ubi_io_read_data(ubi, ubi->peb_buf1, from, 0, aldata_size); |
1027 | if (err && err != UBI_IO_BITFLIPS) { | 1018 | if (err && err != UBI_IO_BITFLIPS) { |
1028 | ubi_warn("error %d while reading data from PEB %d", | 1019 | ubi_warn("error %d while reading data from PEB %d", |
1029 | err, from); | 1020 | err, from); |
@@ -1042,10 +1033,10 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, | |||
1042 | */ | 1033 | */ |
1043 | if (vid_hdr->vol_type == UBI_VID_DYNAMIC) | 1034 | if (vid_hdr->vol_type == UBI_VID_DYNAMIC) |
1044 | aldata_size = data_size = | 1035 | aldata_size = data_size = |
1045 | ubi_calc_data_len(ubi, buf, data_size); | 1036 | ubi_calc_data_len(ubi, ubi->peb_buf1, data_size); |
1046 | 1037 | ||
1047 | cond_resched(); | 1038 | cond_resched(); |
1048 | crc = crc32(UBI_CRC32_INIT, buf, data_size); | 1039 | crc = crc32(UBI_CRC32_INIT, ubi->peb_buf1, data_size); |
1049 | cond_resched(); | 1040 | cond_resched(); |
1050 | 1041 | ||
1051 | /* | 1042 | /* |
@@ -1076,23 +1067,18 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, | |||
1076 | } | 1067 | } |
1077 | 1068 | ||
1078 | if (data_size > 0) { | 1069 | if (data_size > 0) { |
1079 | err = ubi_io_write_data(ubi, buf, to, 0, aldata_size); | 1070 | err = ubi_io_write_data(ubi, ubi->peb_buf1, to, 0, aldata_size); |
1080 | if (err) | 1071 | if (err) |
1081 | goto out_unlock; | 1072 | goto out_unlock; |
1082 | 1073 | ||
1074 | cond_resched(); | ||
1075 | |||
1083 | /* | 1076 | /* |
1084 | * We've written the data and are going to read it back to make | 1077 | * We've written the data and are going to read it back to make |
1085 | * sure it was written correctly. | 1078 | * sure it was written correctly. |
1086 | */ | 1079 | */ |
1087 | buf1 = vmalloc(aldata_size); | ||
1088 | if (!buf1) { | ||
1089 | err = -ENOMEM; | ||
1090 | goto out_unlock; | ||
1091 | } | ||
1092 | |||
1093 | cond_resched(); | ||
1094 | 1080 | ||
1095 | err = ubi_io_read_data(ubi, buf1, to, 0, aldata_size); | 1081 | err = ubi_io_read_data(ubi, ubi->peb_buf2, to, 0, aldata_size); |
1096 | if (err) { | 1082 | if (err) { |
1097 | if (err != UBI_IO_BITFLIPS) | 1083 | if (err != UBI_IO_BITFLIPS) |
1098 | ubi_warn("cannot read data back from PEB %d", | 1084 | ubi_warn("cannot read data back from PEB %d", |
@@ -1102,7 +1088,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, | |||
1102 | 1088 | ||
1103 | cond_resched(); | 1089 | cond_resched(); |
1104 | 1090 | ||
1105 | if (memcmp(buf, buf1, aldata_size)) { | 1091 | if (memcmp(ubi->peb_buf1, ubi->peb_buf2, aldata_size)) { |
1106 | ubi_warn("read data back from PEB %d - it is different", | 1092 | ubi_warn("read data back from PEB %d - it is different", |
1107 | to); | 1093 | to); |
1108 | goto out_unlock; | 1094 | goto out_unlock; |
@@ -1112,16 +1098,9 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, | |||
1112 | ubi_assert(vol->eba_tbl[lnum] == from); | 1098 | ubi_assert(vol->eba_tbl[lnum] == from); |
1113 | vol->eba_tbl[lnum] = to; | 1099 | vol->eba_tbl[lnum] = to; |
1114 | 1100 | ||
1115 | leb_write_unlock(ubi, vol_id, lnum); | ||
1116 | vfree(buf); | ||
1117 | vfree(buf1); | ||
1118 | |||
1119 | return 0; | ||
1120 | |||
1121 | out_unlock: | 1101 | out_unlock: |
1102 | mutex_unlock(&ubi->buf_mutex); | ||
1122 | leb_write_unlock(ubi, vol_id, lnum); | 1103 | leb_write_unlock(ubi, vol_id, lnum); |
1123 | vfree(buf); | ||
1124 | vfree(buf1); | ||
1125 | return err; | 1104 | return err; |
1126 | } | 1105 | } |
1127 | 1106 | ||
@@ -1144,6 +1123,7 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) | |||
1144 | dbg_eba("initialize EBA unit"); | 1123 | dbg_eba("initialize EBA unit"); |
1145 | 1124 | ||
1146 | spin_lock_init(&ubi->ltree_lock); | 1125 | spin_lock_init(&ubi->ltree_lock); |
1126 | mutex_init(&ubi->alc_mutex); | ||
1147 | ubi->ltree = RB_ROOT; | 1127 | ubi->ltree = RB_ROOT; |
1148 | 1128 | ||
1149 | if (ubi_devices_cnt == 0) { | 1129 | if (ubi_devices_cnt == 0) { |
@@ -1205,6 +1185,15 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) | |||
1205 | ubi->rsvd_pebs += ubi->beb_rsvd_pebs; | 1185 | ubi->rsvd_pebs += ubi->beb_rsvd_pebs; |
1206 | } | 1186 | } |
1207 | 1187 | ||
1188 | if (ubi->avail_pebs < EBA_RESERVED_PEBS) { | ||
1189 | ubi_err("no enough physical eraseblocks (%d, need %d)", | ||
1190 | ubi->avail_pebs, EBA_RESERVED_PEBS); | ||
1191 | err = -ENOSPC; | ||
1192 | goto out_free; | ||
1193 | } | ||
1194 | ubi->avail_pebs -= EBA_RESERVED_PEBS; | ||
1195 | ubi->rsvd_pebs += EBA_RESERVED_PEBS; | ||
1196 | |||
1208 | dbg_eba("EBA unit is initialized"); | 1197 | dbg_eba("EBA unit is initialized"); |
1209 | return 0; | 1198 | return 0; |
1210 | 1199 | ||
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c index b0d8f4cede97..7c304eec78b5 100644 --- a/drivers/mtd/ubi/io.c +++ b/drivers/mtd/ubi/io.c | |||
@@ -98,8 +98,8 @@ static int paranoid_check_ec_hdr(const struct ubi_device *ubi, int pnum, | |||
98 | static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum); | 98 | static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum); |
99 | static int paranoid_check_vid_hdr(const struct ubi_device *ubi, int pnum, | 99 | static int paranoid_check_vid_hdr(const struct ubi_device *ubi, int pnum, |
100 | const struct ubi_vid_hdr *vid_hdr); | 100 | const struct ubi_vid_hdr *vid_hdr); |
101 | static int paranoid_check_all_ff(const struct ubi_device *ubi, int pnum, | 101 | static int paranoid_check_all_ff(struct ubi_device *ubi, int pnum, int offset, |
102 | int offset, int len); | 102 | int len); |
103 | #else | 103 | #else |
104 | #define paranoid_check_not_bad(ubi, pnum) 0 | 104 | #define paranoid_check_not_bad(ubi, pnum) 0 |
105 | #define paranoid_check_peb_ec_hdr(ubi, pnum) 0 | 105 | #define paranoid_check_peb_ec_hdr(ubi, pnum) 0 |
@@ -202,8 +202,8 @@ retry: | |||
202 | * Note, in case of an error, it is possible that something was still written | 202 | * Note, in case of an error, it is possible that something was still written |
203 | * to the flash media, but may be some garbage. | 203 | * to the flash media, but may be some garbage. |
204 | */ | 204 | */ |
205 | int ubi_io_write(const struct ubi_device *ubi, const void *buf, int pnum, | 205 | int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset, |
206 | int offset, int len) | 206 | int len) |
207 | { | 207 | { |
208 | int err; | 208 | int err; |
209 | size_t written; | 209 | size_t written; |
@@ -285,7 +285,7 @@ static void erase_callback(struct erase_info *ei) | |||
285 | * zero in case of success and a negative error code in case of failure. If | 285 | * zero in case of success and a negative error code in case of failure. If |
286 | * %-EIO is returned, the physical eraseblock most probably went bad. | 286 | * %-EIO is returned, the physical eraseblock most probably went bad. |
287 | */ | 287 | */ |
288 | static int do_sync_erase(const struct ubi_device *ubi, int pnum) | 288 | static int do_sync_erase(struct ubi_device *ubi, int pnum) |
289 | { | 289 | { |
290 | int err, retries = 0; | 290 | int err, retries = 0; |
291 | struct erase_info ei; | 291 | struct erase_info ei; |
@@ -377,29 +377,25 @@ static uint8_t patterns[] = {0xa5, 0x5a, 0x0}; | |||
377 | * test, a positive number of erase operations done if the test was | 377 | * test, a positive number of erase operations done if the test was |
378 | * successfully passed, and other negative error codes in case of other errors. | 378 | * successfully passed, and other negative error codes in case of other errors. |
379 | */ | 379 | */ |
380 | static int torture_peb(const struct ubi_device *ubi, int pnum) | 380 | static int torture_peb(struct ubi_device *ubi, int pnum) |
381 | { | 381 | { |
382 | void *buf; | ||
383 | int err, i, patt_count; | 382 | int err, i, patt_count; |
384 | 383 | ||
385 | buf = vmalloc(ubi->peb_size); | ||
386 | if (!buf) | ||
387 | return -ENOMEM; | ||
388 | |||
389 | patt_count = ARRAY_SIZE(patterns); | 384 | patt_count = ARRAY_SIZE(patterns); |
390 | ubi_assert(patt_count > 0); | 385 | ubi_assert(patt_count > 0); |
391 | 386 | ||
387 | mutex_lock(&ubi->buf_mutex); | ||
392 | for (i = 0; i < patt_count; i++) { | 388 | for (i = 0; i < patt_count; i++) { |
393 | err = do_sync_erase(ubi, pnum); | 389 | err = do_sync_erase(ubi, pnum); |
394 | if (err) | 390 | if (err) |
395 | goto out; | 391 | goto out; |
396 | 392 | ||
397 | /* Make sure the PEB contains only 0xFF bytes */ | 393 | /* Make sure the PEB contains only 0xFF bytes */ |
398 | err = ubi_io_read(ubi, buf, pnum, 0, ubi->peb_size); | 394 | err = ubi_io_read(ubi, ubi->peb_buf1, pnum, 0, ubi->peb_size); |
399 | if (err) | 395 | if (err) |
400 | goto out; | 396 | goto out; |
401 | 397 | ||
402 | err = check_pattern(buf, 0xFF, ubi->peb_size); | 398 | err = check_pattern(ubi->peb_buf1, 0xFF, ubi->peb_size); |
403 | if (err == 0) { | 399 | if (err == 0) { |
404 | ubi_err("erased PEB %d, but a non-0xFF byte found", | 400 | ubi_err("erased PEB %d, but a non-0xFF byte found", |
405 | pnum); | 401 | pnum); |
@@ -408,17 +404,17 @@ static int torture_peb(const struct ubi_device *ubi, int pnum) | |||
408 | } | 404 | } |
409 | 405 | ||
410 | /* Write a pattern and check it */ | 406 | /* Write a pattern and check it */ |
411 | memset(buf, patterns[i], ubi->peb_size); | 407 | memset(ubi->peb_buf1, patterns[i], ubi->peb_size); |
412 | err = ubi_io_write(ubi, buf, pnum, 0, ubi->peb_size); | 408 | err = ubi_io_write(ubi, ubi->peb_buf1, pnum, 0, ubi->peb_size); |
413 | if (err) | 409 | if (err) |
414 | goto out; | 410 | goto out; |
415 | 411 | ||
416 | memset(buf, ~patterns[i], ubi->peb_size); | 412 | memset(ubi->peb_buf1, ~patterns[i], ubi->peb_size); |
417 | err = ubi_io_read(ubi, buf, pnum, 0, ubi->peb_size); | 413 | err = ubi_io_read(ubi, ubi->peb_buf1, pnum, 0, ubi->peb_size); |
418 | if (err) | 414 | if (err) |
419 | goto out; | 415 | goto out; |
420 | 416 | ||
421 | err = check_pattern(buf, patterns[i], ubi->peb_size); | 417 | err = check_pattern(ubi->peb_buf1, patterns[i], ubi->peb_size); |
422 | if (err == 0) { | 418 | if (err == 0) { |
423 | ubi_err("pattern %x checking failed for PEB %d", | 419 | ubi_err("pattern %x checking failed for PEB %d", |
424 | patterns[i], pnum); | 420 | patterns[i], pnum); |
@@ -430,14 +426,17 @@ static int torture_peb(const struct ubi_device *ubi, int pnum) | |||
430 | err = patt_count; | 426 | err = patt_count; |
431 | 427 | ||
432 | out: | 428 | out: |
433 | if (err == UBI_IO_BITFLIPS || err == -EBADMSG) | 429 | mutex_unlock(&ubi->buf_mutex); |
430 | if (err == UBI_IO_BITFLIPS || err == -EBADMSG) { | ||
434 | /* | 431 | /* |
435 | * If a bit-flip or data integrity error was detected, the test | 432 | * If a bit-flip or data integrity error was detected, the test |
436 | * has not passed because it happened on a freshly erased | 433 | * has not passed because it happened on a freshly erased |
437 | * physical eraseblock which means something is wrong with it. | 434 | * physical eraseblock which means something is wrong with it. |
438 | */ | 435 | */ |
436 | ubi_err("read problems on freshly erased PEB %d, must be bad", | ||
437 | pnum); | ||
439 | err = -EIO; | 438 | err = -EIO; |
440 | vfree(buf); | 439 | } |
441 | return err; | 440 | return err; |
442 | } | 441 | } |
443 | 442 | ||
@@ -457,7 +456,7 @@ out: | |||
457 | * codes in case of other errors. Note, %-EIO means that the physical | 456 | * codes in case of other errors. Note, %-EIO means that the physical |
458 | * eraseblock is bad. | 457 | * eraseblock is bad. |
459 | */ | 458 | */ |
460 | int ubi_io_sync_erase(const struct ubi_device *ubi, int pnum, int torture) | 459 | int ubi_io_sync_erase(struct ubi_device *ubi, int pnum, int torture) |
461 | { | 460 | { |
462 | int err, ret = 0; | 461 | int err, ret = 0; |
463 | 462 | ||
@@ -614,7 +613,7 @@ bad: | |||
614 | * o %UBI_IO_PEB_EMPTY if the physical eraseblock is empty; | 613 | * o %UBI_IO_PEB_EMPTY if the physical eraseblock is empty; |
615 | * o a negative error code in case of failure. | 614 | * o a negative error code in case of failure. |
616 | */ | 615 | */ |
617 | int ubi_io_read_ec_hdr(const struct ubi_device *ubi, int pnum, | 616 | int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum, |
618 | struct ubi_ec_hdr *ec_hdr, int verbose) | 617 | struct ubi_ec_hdr *ec_hdr, int verbose) |
619 | { | 618 | { |
620 | int err, read_err = 0; | 619 | int err, read_err = 0; |
@@ -720,7 +719,7 @@ int ubi_io_read_ec_hdr(const struct ubi_device *ubi, int pnum, | |||
720 | * case of failure. If %-EIO is returned, the physical eraseblock most probably | 719 | * case of failure. If %-EIO is returned, the physical eraseblock most probably |
721 | * went bad. | 720 | * went bad. |
722 | */ | 721 | */ |
723 | int ubi_io_write_ec_hdr(const struct ubi_device *ubi, int pnum, | 722 | int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum, |
724 | struct ubi_ec_hdr *ec_hdr) | 723 | struct ubi_ec_hdr *ec_hdr) |
725 | { | 724 | { |
726 | int err; | 725 | int err; |
@@ -886,7 +885,7 @@ bad: | |||
886 | * header there); | 885 | * header there); |
887 | * o a negative error code in case of failure. | 886 | * o a negative error code in case of failure. |
888 | */ | 887 | */ |
889 | int ubi_io_read_vid_hdr(const struct ubi_device *ubi, int pnum, | 888 | int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum, |
890 | struct ubi_vid_hdr *vid_hdr, int verbose) | 889 | struct ubi_vid_hdr *vid_hdr, int verbose) |
891 | { | 890 | { |
892 | int err, read_err = 0; | 891 | int err, read_err = 0; |
@@ -993,7 +992,7 @@ int ubi_io_read_vid_hdr(const struct ubi_device *ubi, int pnum, | |||
993 | * case of failure. If %-EIO is returned, the physical eraseblock probably went | 992 | * case of failure. If %-EIO is returned, the physical eraseblock probably went |
994 | * bad. | 993 | * bad. |
995 | */ | 994 | */ |
996 | int ubi_io_write_vid_hdr(const struct ubi_device *ubi, int pnum, | 995 | int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum, |
997 | struct ubi_vid_hdr *vid_hdr) | 996 | struct ubi_vid_hdr *vid_hdr) |
998 | { | 997 | { |
999 | int err; | 998 | int err; |
@@ -1096,7 +1095,7 @@ static int paranoid_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum) | |||
1096 | uint32_t crc, hdr_crc; | 1095 | uint32_t crc, hdr_crc; |
1097 | struct ubi_ec_hdr *ec_hdr; | 1096 | struct ubi_ec_hdr *ec_hdr; |
1098 | 1097 | ||
1099 | ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); | 1098 | ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS); |
1100 | if (!ec_hdr) | 1099 | if (!ec_hdr) |
1101 | return -ENOMEM; | 1100 | return -ENOMEM; |
1102 | 1101 | ||
@@ -1176,7 +1175,7 @@ static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum) | |||
1176 | struct ubi_vid_hdr *vid_hdr; | 1175 | struct ubi_vid_hdr *vid_hdr; |
1177 | void *p; | 1176 | void *p; |
1178 | 1177 | ||
1179 | vid_hdr = ubi_zalloc_vid_hdr(ubi); | 1178 | vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); |
1180 | if (!vid_hdr) | 1179 | if (!vid_hdr) |
1181 | return -ENOMEM; | 1180 | return -ENOMEM; |
1182 | 1181 | ||
@@ -1216,44 +1215,40 @@ exit: | |||
1216 | * @offset of the physical eraseblock @pnum, %1 if not, and a negative error | 1215 | * @offset of the physical eraseblock @pnum, %1 if not, and a negative error |
1217 | * code if an error occurred. | 1216 | * code if an error occurred. |
1218 | */ | 1217 | */ |
1219 | static int paranoid_check_all_ff(const struct ubi_device *ubi, int pnum, | 1218 | static int paranoid_check_all_ff(struct ubi_device *ubi, int pnum, int offset, |
1220 | int offset, int len) | 1219 | int len) |
1221 | { | 1220 | { |
1222 | size_t read; | 1221 | size_t read; |
1223 | int err; | 1222 | int err; |
1224 | void *buf; | ||
1225 | loff_t addr = (loff_t)pnum * ubi->peb_size + offset; | 1223 | loff_t addr = (loff_t)pnum * ubi->peb_size + offset; |
1226 | 1224 | ||
1227 | buf = vmalloc(len); | 1225 | mutex_lock(&ubi->dbg_buf_mutex); |
1228 | if (!buf) | 1226 | err = ubi->mtd->read(ubi->mtd, addr, len, &read, ubi->dbg_peb_buf); |
1229 | return -ENOMEM; | ||
1230 | memset(buf, 0, len); | ||
1231 | |||
1232 | err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf); | ||
1233 | if (err && err != -EUCLEAN) { | 1227 | if (err && err != -EUCLEAN) { |
1234 | ubi_err("error %d while reading %d bytes from PEB %d:%d, " | 1228 | ubi_err("error %d while reading %d bytes from PEB %d:%d, " |
1235 | "read %zd bytes", err, len, pnum, offset, read); | 1229 | "read %zd bytes", err, len, pnum, offset, read); |
1236 | goto error; | 1230 | goto error; |
1237 | } | 1231 | } |
1238 | 1232 | ||
1239 | err = check_pattern(buf, 0xFF, len); | 1233 | err = check_pattern(ubi->dbg_peb_buf, 0xFF, len); |
1240 | if (err == 0) { | 1234 | if (err == 0) { |
1241 | ubi_err("flash region at PEB %d:%d, length %d does not " | 1235 | ubi_err("flash region at PEB %d:%d, length %d does not " |
1242 | "contain all 0xFF bytes", pnum, offset, len); | 1236 | "contain all 0xFF bytes", pnum, offset, len); |
1243 | goto fail; | 1237 | goto fail; |
1244 | } | 1238 | } |
1239 | mutex_unlock(&ubi->dbg_buf_mutex); | ||
1245 | 1240 | ||
1246 | vfree(buf); | ||
1247 | return 0; | 1241 | return 0; |
1248 | 1242 | ||
1249 | fail: | 1243 | fail: |
1250 | ubi_err("paranoid check failed for PEB %d", pnum); | 1244 | ubi_err("paranoid check failed for PEB %d", pnum); |
1251 | dbg_msg("hex dump of the %d-%d region", offset, offset + len); | 1245 | dbg_msg("hex dump of the %d-%d region", offset, offset + len); |
1252 | ubi_dbg_hexdump(buf, len); | 1246 | print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, |
1247 | ubi->dbg_peb_buf, len, 1); | ||
1253 | err = 1; | 1248 | err = 1; |
1254 | error: | 1249 | error: |
1255 | ubi_dbg_dump_stack(); | 1250 | ubi_dbg_dump_stack(); |
1256 | vfree(buf); | 1251 | mutex_unlock(&ubi->dbg_buf_mutex); |
1257 | return err; | 1252 | return err; |
1258 | } | 1253 | } |
1259 | 1254 | ||
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c index 4a458e83e4e9..03c774f41549 100644 --- a/drivers/mtd/ubi/kapi.c +++ b/drivers/mtd/ubi/kapi.c | |||
@@ -99,16 +99,21 @@ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode) | |||
99 | { | 99 | { |
100 | int err; | 100 | int err; |
101 | struct ubi_volume_desc *desc; | 101 | struct ubi_volume_desc *desc; |
102 | struct ubi_device *ubi = ubi_devices[ubi_num]; | 102 | struct ubi_device *ubi; |
103 | struct ubi_volume *vol; | 103 | struct ubi_volume *vol; |
104 | 104 | ||
105 | dbg_msg("open device %d volume %d, mode %d", ubi_num, vol_id, mode); | 105 | dbg_msg("open device %d volume %d, mode %d", ubi_num, vol_id, mode); |
106 | 106 | ||
107 | err = -ENODEV; | 107 | err = -ENODEV; |
108 | if (ubi_num < 0) | ||
109 | return ERR_PTR(err); | ||
110 | |||
111 | ubi = ubi_devices[ubi_num]; | ||
112 | |||
108 | if (!try_module_get(THIS_MODULE)) | 113 | if (!try_module_get(THIS_MODULE)) |
109 | return ERR_PTR(err); | 114 | return ERR_PTR(err); |
110 | 115 | ||
111 | if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES || !ubi) | 116 | if (ubi_num >= UBI_MAX_DEVICES || !ubi) |
112 | goto out_put; | 117 | goto out_put; |
113 | 118 | ||
114 | err = -EINVAL; | 119 | err = -EINVAL; |
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c index 29c41eeb09fe..c7b0afc9d280 100644 --- a/drivers/mtd/ubi/scan.c +++ b/drivers/mtd/ubi/scan.c | |||
@@ -45,8 +45,7 @@ | |||
45 | #include "ubi.h" | 45 | #include "ubi.h" |
46 | 46 | ||
47 | #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID | 47 | #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID |
48 | static int paranoid_check_si(const struct ubi_device *ubi, | 48 | static int paranoid_check_si(struct ubi_device *ubi, struct ubi_scan_info *si); |
49 | struct ubi_scan_info *si); | ||
50 | #else | 49 | #else |
51 | #define paranoid_check_si(ubi, si) 0 | 50 | #define paranoid_check_si(ubi, si) 0 |
52 | #endif | 51 | #endif |
@@ -259,14 +258,13 @@ static struct ubi_scan_volume *add_volume(struct ubi_scan_info *si, int vol_id, | |||
259 | * o bit 2 is cleared: the older LEB is not corrupted; | 258 | * o bit 2 is cleared: the older LEB is not corrupted; |
260 | * o bit 2 is set: the older LEB is corrupted. | 259 | * o bit 2 is set: the older LEB is corrupted. |
261 | */ | 260 | */ |
262 | static int compare_lebs(const struct ubi_device *ubi, | 261 | static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb, |
263 | const struct ubi_scan_leb *seb, int pnum, | 262 | int pnum, const struct ubi_vid_hdr *vid_hdr) |
264 | const struct ubi_vid_hdr *vid_hdr) | ||
265 | { | 263 | { |
266 | void *buf; | 264 | void *buf; |
267 | int len, err, second_is_newer, bitflips = 0, corrupted = 0; | 265 | int len, err, second_is_newer, bitflips = 0, corrupted = 0; |
268 | uint32_t data_crc, crc; | 266 | uint32_t data_crc, crc; |
269 | struct ubi_vid_hdr *vidh = NULL; | 267 | struct ubi_vid_hdr *vh = NULL; |
270 | unsigned long long sqnum2 = be64_to_cpu(vid_hdr->sqnum); | 268 | unsigned long long sqnum2 = be64_to_cpu(vid_hdr->sqnum); |
271 | 269 | ||
272 | if (seb->sqnum == 0 && sqnum2 == 0) { | 270 | if (seb->sqnum == 0 && sqnum2 == 0) { |
@@ -323,11 +321,11 @@ static int compare_lebs(const struct ubi_device *ubi, | |||
323 | } else { | 321 | } else { |
324 | pnum = seb->pnum; | 322 | pnum = seb->pnum; |
325 | 323 | ||
326 | vidh = ubi_zalloc_vid_hdr(ubi); | 324 | vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); |
327 | if (!vidh) | 325 | if (!vh) |
328 | return -ENOMEM; | 326 | return -ENOMEM; |
329 | 327 | ||
330 | err = ubi_io_read_vid_hdr(ubi, pnum, vidh, 0); | 328 | err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0); |
331 | if (err) { | 329 | if (err) { |
332 | if (err == UBI_IO_BITFLIPS) | 330 | if (err == UBI_IO_BITFLIPS) |
333 | bitflips = 1; | 331 | bitflips = 1; |
@@ -341,7 +339,7 @@ static int compare_lebs(const struct ubi_device *ubi, | |||
341 | } | 339 | } |
342 | } | 340 | } |
343 | 341 | ||
344 | if (!vidh->copy_flag) { | 342 | if (!vh->copy_flag) { |
345 | /* It is not a copy, so it is newer */ | 343 | /* It is not a copy, so it is newer */ |
346 | dbg_bld("first PEB %d is newer, copy_flag is unset", | 344 | dbg_bld("first PEB %d is newer, copy_flag is unset", |
347 | pnum); | 345 | pnum); |
@@ -349,7 +347,7 @@ static int compare_lebs(const struct ubi_device *ubi, | |||
349 | goto out_free_vidh; | 347 | goto out_free_vidh; |
350 | } | 348 | } |
351 | 349 | ||
352 | vid_hdr = vidh; | 350 | vid_hdr = vh; |
353 | } | 351 | } |
354 | 352 | ||
355 | /* Read the data of the copy and check the CRC */ | 353 | /* Read the data of the copy and check the CRC */ |
@@ -379,7 +377,7 @@ static int compare_lebs(const struct ubi_device *ubi, | |||
379 | } | 377 | } |
380 | 378 | ||
381 | vfree(buf); | 379 | vfree(buf); |
382 | ubi_free_vid_hdr(ubi, vidh); | 380 | ubi_free_vid_hdr(ubi, vh); |
383 | 381 | ||
384 | if (second_is_newer) | 382 | if (second_is_newer) |
385 | dbg_bld("second PEB %d is newer, copy_flag is set", pnum); | 383 | dbg_bld("second PEB %d is newer, copy_flag is set", pnum); |
@@ -391,7 +389,7 @@ static int compare_lebs(const struct ubi_device *ubi, | |||
391 | out_free_buf: | 389 | out_free_buf: |
392 | vfree(buf); | 390 | vfree(buf); |
393 | out_free_vidh: | 391 | out_free_vidh: |
394 | ubi_free_vid_hdr(ubi, vidh); | 392 | ubi_free_vid_hdr(ubi, vh); |
395 | ubi_assert(err < 0); | 393 | ubi_assert(err < 0); |
396 | return err; | 394 | return err; |
397 | } | 395 | } |
@@ -413,7 +411,7 @@ out_free_vidh: | |||
413 | * to be picked, while the older one has to be dropped. This function returns | 411 | * to be picked, while the older one has to be dropped. This function returns |
414 | * zero in case of success and a negative error code in case of failure. | 412 | * zero in case of success and a negative error code in case of failure. |
415 | */ | 413 | */ |
416 | int ubi_scan_add_used(const struct ubi_device *ubi, struct ubi_scan_info *si, | 414 | int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si, |
417 | int pnum, int ec, const struct ubi_vid_hdr *vid_hdr, | 415 | int pnum, int ec, const struct ubi_vid_hdr *vid_hdr, |
418 | int bitflips) | 416 | int bitflips) |
419 | { | 417 | { |
@@ -667,16 +665,12 @@ void ubi_scan_rm_volume(struct ubi_scan_info *si, struct ubi_scan_volume *sv) | |||
667 | * function returns zero in case of success and a negative error code in case | 665 | * function returns zero in case of success and a negative error code in case |
668 | * of failure. | 666 | * of failure. |
669 | */ | 667 | */ |
670 | int ubi_scan_erase_peb(const struct ubi_device *ubi, | 668 | int ubi_scan_erase_peb(struct ubi_device *ubi, const struct ubi_scan_info *si, |
671 | const struct ubi_scan_info *si, int pnum, int ec) | 669 | int pnum, int ec) |
672 | { | 670 | { |
673 | int err; | 671 | int err; |
674 | struct ubi_ec_hdr *ec_hdr; | 672 | struct ubi_ec_hdr *ec_hdr; |
675 | 673 | ||
676 | ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); | ||
677 | if (!ec_hdr) | ||
678 | return -ENOMEM; | ||
679 | |||
680 | if ((long long)ec >= UBI_MAX_ERASECOUNTER) { | 674 | if ((long long)ec >= UBI_MAX_ERASECOUNTER) { |
681 | /* | 675 | /* |
682 | * Erase counter overflow. Upgrade UBI and use 64-bit | 676 | * Erase counter overflow. Upgrade UBI and use 64-bit |
@@ -686,6 +680,10 @@ int ubi_scan_erase_peb(const struct ubi_device *ubi, | |||
686 | return -EINVAL; | 680 | return -EINVAL; |
687 | } | 681 | } |
688 | 682 | ||
683 | ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); | ||
684 | if (!ec_hdr) | ||
685 | return -ENOMEM; | ||
686 | |||
689 | ec_hdr->ec = cpu_to_be64(ec); | 687 | ec_hdr->ec = cpu_to_be64(ec); |
690 | 688 | ||
691 | err = ubi_io_sync_erase(ubi, pnum, 0); | 689 | err = ubi_io_sync_erase(ubi, pnum, 0); |
@@ -712,7 +710,7 @@ out_free: | |||
712 | * This function returns scanning physical eraseblock information in case of | 710 | * This function returns scanning physical eraseblock information in case of |
713 | * success and an error code in case of failure. | 711 | * success and an error code in case of failure. |
714 | */ | 712 | */ |
715 | struct ubi_scan_leb *ubi_scan_get_free_peb(const struct ubi_device *ubi, | 713 | struct ubi_scan_leb *ubi_scan_get_free_peb(struct ubi_device *ubi, |
716 | struct ubi_scan_info *si) | 714 | struct ubi_scan_info *si) |
717 | { | 715 | { |
718 | int err = 0, i; | 716 | int err = 0, i; |
@@ -948,7 +946,7 @@ struct ubi_scan_info *ubi_scan(struct ubi_device *ubi) | |||
948 | if (!ech) | 946 | if (!ech) |
949 | goto out_si; | 947 | goto out_si; |
950 | 948 | ||
951 | vidh = ubi_zalloc_vid_hdr(ubi); | 949 | vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); |
952 | if (!vidh) | 950 | if (!vidh) |
953 | goto out_ech; | 951 | goto out_ech; |
954 | 952 | ||
@@ -1110,8 +1108,7 @@ void ubi_scan_destroy_si(struct ubi_scan_info *si) | |||
1110 | * This function returns zero if the scanning information is all right, %1 if | 1108 | * This function returns zero if the scanning information is all right, %1 if |
1111 | * not and a negative error code if an error occurred. | 1109 | * not and a negative error code if an error occurred. |
1112 | */ | 1110 | */ |
1113 | static int paranoid_check_si(const struct ubi_device *ubi, | 1111 | static int paranoid_check_si(struct ubi_device *ubi, struct ubi_scan_info *si) |
1114 | struct ubi_scan_info *si) | ||
1115 | { | 1112 | { |
1116 | int pnum, err, vols_found = 0; | 1113 | int pnum, err, vols_found = 0; |
1117 | struct rb_node *rb1, *rb2; | 1114 | struct rb_node *rb1, *rb2; |
diff --git a/drivers/mtd/ubi/scan.h b/drivers/mtd/ubi/scan.h index 140e82e26534..46d444af471a 100644 --- a/drivers/mtd/ubi/scan.h +++ b/drivers/mtd/ubi/scan.h | |||
@@ -147,7 +147,7 @@ static inline void ubi_scan_move_to_list(struct ubi_scan_volume *sv, | |||
147 | list_add_tail(&seb->u.list, list); | 147 | list_add_tail(&seb->u.list, list); |
148 | } | 148 | } |
149 | 149 | ||
150 | int ubi_scan_add_used(const struct ubi_device *ubi, struct ubi_scan_info *si, | 150 | int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si, |
151 | int pnum, int ec, const struct ubi_vid_hdr *vid_hdr, | 151 | int pnum, int ec, const struct ubi_vid_hdr *vid_hdr, |
152 | int bitflips); | 152 | int bitflips); |
153 | struct ubi_scan_volume *ubi_scan_find_sv(const struct ubi_scan_info *si, | 153 | struct ubi_scan_volume *ubi_scan_find_sv(const struct ubi_scan_info *si, |
@@ -155,10 +155,10 @@ struct ubi_scan_volume *ubi_scan_find_sv(const struct ubi_scan_info *si, | |||
155 | struct ubi_scan_leb *ubi_scan_find_seb(const struct ubi_scan_volume *sv, | 155 | struct ubi_scan_leb *ubi_scan_find_seb(const struct ubi_scan_volume *sv, |
156 | int lnum); | 156 | int lnum); |
157 | void ubi_scan_rm_volume(struct ubi_scan_info *si, struct ubi_scan_volume *sv); | 157 | void ubi_scan_rm_volume(struct ubi_scan_info *si, struct ubi_scan_volume *sv); |
158 | struct ubi_scan_leb *ubi_scan_get_free_peb(const struct ubi_device *ubi, | 158 | struct ubi_scan_leb *ubi_scan_get_free_peb(struct ubi_device *ubi, |
159 | struct ubi_scan_info *si); | 159 | struct ubi_scan_info *si); |
160 | int ubi_scan_erase_peb(const struct ubi_device *ubi, | 160 | int ubi_scan_erase_peb(struct ubi_device *ubi, const struct ubi_scan_info *si, |
161 | const struct ubi_scan_info *si, int pnum, int ec); | 161 | int pnum, int ec); |
162 | struct ubi_scan_info *ubi_scan(struct ubi_device *ubi); | 162 | struct ubi_scan_info *ubi_scan(struct ubi_device *ubi); |
163 | void ubi_scan_destroy_si(struct ubi_scan_info *si); | 163 | void ubi_scan_destroy_si(struct ubi_scan_info *si); |
164 | 164 | ||
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h index 5959f91be240..5e941a633030 100644 --- a/drivers/mtd/ubi/ubi.h +++ b/drivers/mtd/ubi/ubi.h | |||
@@ -221,14 +221,15 @@ struct ubi_wl_entry; | |||
221 | * @vtbl_slots: how many slots are available in the volume table | 221 | * @vtbl_slots: how many slots are available in the volume table |
222 | * @vtbl_size: size of the volume table in bytes | 222 | * @vtbl_size: size of the volume table in bytes |
223 | * @vtbl: in-RAM volume table copy | 223 | * @vtbl: in-RAM volume table copy |
224 | * @vtbl_mutex: protects on-flash volume table | ||
224 | * | 225 | * |
225 | * @max_ec: current highest erase counter value | 226 | * @max_ec: current highest erase counter value |
226 | * @mean_ec: current mean erase counter value | 227 | * @mean_ec: current mean erase counter value |
227 | * | 228 | * |
228 | * global_sqnum: global sequence number | 229 | * @global_sqnum: global sequence number |
229 | * @ltree_lock: protects the lock tree and @global_sqnum | 230 | * @ltree_lock: protects the lock tree and @global_sqnum |
230 | * @ltree: the lock tree | 231 | * @ltree: the lock tree |
231 | * @vtbl_mutex: protects on-flash volume table | 232 | * @alc_mutex: serializes "atomic LEB change" operations |
232 | * | 233 | * |
233 | * @used: RB-tree of used physical eraseblocks | 234 | * @used: RB-tree of used physical eraseblocks |
234 | * @free: RB-tree of free physical eraseblocks | 235 | * @free: RB-tree of free physical eraseblocks |
@@ -274,6 +275,12 @@ struct ubi_wl_entry; | |||
274 | * @bad_allowed: whether the MTD device admits of bad physical eraseblocks or | 275 | * @bad_allowed: whether the MTD device admits of bad physical eraseblocks or |
275 | * not | 276 | * not |
276 | * @mtd: MTD device descriptor | 277 | * @mtd: MTD device descriptor |
278 | * | ||
279 | * @peb_buf1: a buffer of PEB size used for different purposes | ||
280 | * @peb_buf2: another buffer of PEB size used for different purposes | ||
281 | * @buf_mutex: proptects @peb_buf1 and @peb_buf2 | ||
282 | * @dbg_peb_buf: buffer of PEB size used for debugging | ||
283 | * @dbg_buf_mutex: proptects @dbg_peb_buf | ||
277 | */ | 284 | */ |
278 | struct ubi_device { | 285 | struct ubi_device { |
279 | struct cdev cdev; | 286 | struct cdev cdev; |
@@ -302,6 +309,7 @@ struct ubi_device { | |||
302 | unsigned long long global_sqnum; | 309 | unsigned long long global_sqnum; |
303 | spinlock_t ltree_lock; | 310 | spinlock_t ltree_lock; |
304 | struct rb_root ltree; | 311 | struct rb_root ltree; |
312 | struct mutex alc_mutex; | ||
305 | 313 | ||
306 | /* Wear-leveling unit's stuff */ | 314 | /* Wear-leveling unit's stuff */ |
307 | struct rb_root used; | 315 | struct rb_root used; |
@@ -343,6 +351,14 @@ struct ubi_device { | |||
343 | int vid_hdr_shift; | 351 | int vid_hdr_shift; |
344 | int bad_allowed; | 352 | int bad_allowed; |
345 | struct mtd_info *mtd; | 353 | struct mtd_info *mtd; |
354 | |||
355 | void *peb_buf1; | ||
356 | void *peb_buf2; | ||
357 | struct mutex buf_mutex; | ||
358 | #ifdef CONFIG_MTD_UBI_DEBUG | ||
359 | void *dbg_peb_buf; | ||
360 | struct mutex dbg_buf_mutex; | ||
361 | #endif | ||
346 | }; | 362 | }; |
347 | 363 | ||
348 | extern struct file_operations ubi_cdev_operations; | 364 | extern struct file_operations ubi_cdev_operations; |
@@ -409,18 +425,18 @@ void ubi_wl_close(struct ubi_device *ubi); | |||
409 | /* io.c */ | 425 | /* io.c */ |
410 | int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset, | 426 | int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset, |
411 | int len); | 427 | int len); |
412 | int ubi_io_write(const struct ubi_device *ubi, const void *buf, int pnum, | 428 | int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset, |
413 | int offset, int len); | 429 | int len); |
414 | int ubi_io_sync_erase(const struct ubi_device *ubi, int pnum, int torture); | 430 | int ubi_io_sync_erase(struct ubi_device *ubi, int pnum, int torture); |
415 | int ubi_io_is_bad(const struct ubi_device *ubi, int pnum); | 431 | int ubi_io_is_bad(const struct ubi_device *ubi, int pnum); |
416 | int ubi_io_mark_bad(const struct ubi_device *ubi, int pnum); | 432 | int ubi_io_mark_bad(const struct ubi_device *ubi, int pnum); |
417 | int ubi_io_read_ec_hdr(const struct ubi_device *ubi, int pnum, | 433 | int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum, |
418 | struct ubi_ec_hdr *ec_hdr, int verbose); | 434 | struct ubi_ec_hdr *ec_hdr, int verbose); |
419 | int ubi_io_write_ec_hdr(const struct ubi_device *ubi, int pnum, | 435 | int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum, |
420 | struct ubi_ec_hdr *ec_hdr); | 436 | struct ubi_ec_hdr *ec_hdr); |
421 | int ubi_io_read_vid_hdr(const struct ubi_device *ubi, int pnum, | 437 | int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum, |
422 | struct ubi_vid_hdr *vid_hdr, int verbose); | 438 | struct ubi_vid_hdr *vid_hdr, int verbose); |
423 | int ubi_io_write_vid_hdr(const struct ubi_device *ubi, int pnum, | 439 | int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum, |
424 | struct ubi_vid_hdr *vid_hdr); | 440 | struct ubi_vid_hdr *vid_hdr); |
425 | 441 | ||
426 | /* | 442 | /* |
@@ -439,16 +455,18 @@ int ubi_io_write_vid_hdr(const struct ubi_device *ubi, int pnum, | |||
439 | /** | 455 | /** |
440 | * ubi_zalloc_vid_hdr - allocate a volume identifier header object. | 456 | * ubi_zalloc_vid_hdr - allocate a volume identifier header object. |
441 | * @ubi: UBI device description object | 457 | * @ubi: UBI device description object |
458 | * @gfp_flags: GFP flags to allocate with | ||
442 | * | 459 | * |
443 | * This function returns a pointer to the newly allocated and zero-filled | 460 | * This function returns a pointer to the newly allocated and zero-filled |
444 | * volume identifier header object in case of success and %NULL in case of | 461 | * volume identifier header object in case of success and %NULL in case of |
445 | * failure. | 462 | * failure. |
446 | */ | 463 | */ |
447 | static inline struct ubi_vid_hdr *ubi_zalloc_vid_hdr(const struct ubi_device *ubi) | 464 | static inline struct ubi_vid_hdr * |
465 | ubi_zalloc_vid_hdr(const struct ubi_device *ubi, gfp_t gfp_flags) | ||
448 | { | 466 | { |
449 | void *vid_hdr; | 467 | void *vid_hdr; |
450 | 468 | ||
451 | vid_hdr = kzalloc(ubi->vid_hdr_alsize, GFP_KERNEL); | 469 | vid_hdr = kzalloc(ubi->vid_hdr_alsize, gfp_flags); |
452 | if (!vid_hdr) | 470 | if (!vid_hdr) |
453 | return NULL; | 471 | return NULL; |
454 | 472 | ||
@@ -492,7 +510,7 @@ static inline int ubi_io_read_data(const struct ubi_device *ubi, void *buf, | |||
492 | * the beginning of the logical eraseblock, not to the beginning of the | 510 | * the beginning of the logical eraseblock, not to the beginning of the |
493 | * physical eraseblock. | 511 | * physical eraseblock. |
494 | */ | 512 | */ |
495 | static inline int ubi_io_write_data(const struct ubi_device *ubi, const void *buf, | 513 | static inline int ubi_io_write_data(struct ubi_device *ubi, const void *buf, |
496 | int pnum, int offset, int len) | 514 | int pnum, int offset, int len) |
497 | { | 515 | { |
498 | ubi_assert(offset >= 0); | 516 | ubi_assert(offset >= 0); |
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c index ea0d5c825ab4..88629a320c2b 100644 --- a/drivers/mtd/ubi/vmt.c +++ b/drivers/mtd/ubi/vmt.c | |||
@@ -37,21 +37,21 @@ static ssize_t vol_attribute_show(struct device *dev, | |||
37 | struct device_attribute *attr, char *buf); | 37 | struct device_attribute *attr, char *buf); |
38 | 38 | ||
39 | /* Device attributes corresponding to files in '/<sysfs>/class/ubi/ubiX_Y' */ | 39 | /* Device attributes corresponding to files in '/<sysfs>/class/ubi/ubiX_Y' */ |
40 | static struct device_attribute vol_reserved_ebs = | 40 | static struct device_attribute attr_vol_reserved_ebs = |
41 | __ATTR(reserved_ebs, S_IRUGO, vol_attribute_show, NULL); | 41 | __ATTR(reserved_ebs, S_IRUGO, vol_attribute_show, NULL); |
42 | static struct device_attribute vol_type = | 42 | static struct device_attribute attr_vol_type = |
43 | __ATTR(type, S_IRUGO, vol_attribute_show, NULL); | 43 | __ATTR(type, S_IRUGO, vol_attribute_show, NULL); |
44 | static struct device_attribute vol_name = | 44 | static struct device_attribute attr_vol_name = |
45 | __ATTR(name, S_IRUGO, vol_attribute_show, NULL); | 45 | __ATTR(name, S_IRUGO, vol_attribute_show, NULL); |
46 | static struct device_attribute vol_corrupted = | 46 | static struct device_attribute attr_vol_corrupted = |
47 | __ATTR(corrupted, S_IRUGO, vol_attribute_show, NULL); | 47 | __ATTR(corrupted, S_IRUGO, vol_attribute_show, NULL); |
48 | static struct device_attribute vol_alignment = | 48 | static struct device_attribute attr_vol_alignment = |
49 | __ATTR(alignment, S_IRUGO, vol_attribute_show, NULL); | 49 | __ATTR(alignment, S_IRUGO, vol_attribute_show, NULL); |
50 | static struct device_attribute vol_usable_eb_size = | 50 | static struct device_attribute attr_vol_usable_eb_size = |
51 | __ATTR(usable_eb_size, S_IRUGO, vol_attribute_show, NULL); | 51 | __ATTR(usable_eb_size, S_IRUGO, vol_attribute_show, NULL); |
52 | static struct device_attribute vol_data_bytes = | 52 | static struct device_attribute attr_vol_data_bytes = |
53 | __ATTR(data_bytes, S_IRUGO, vol_attribute_show, NULL); | 53 | __ATTR(data_bytes, S_IRUGO, vol_attribute_show, NULL); |
54 | static struct device_attribute vol_upd_marker = | 54 | static struct device_attribute attr_vol_upd_marker = |
55 | __ATTR(upd_marker, S_IRUGO, vol_attribute_show, NULL); | 55 | __ATTR(upd_marker, S_IRUGO, vol_attribute_show, NULL); |
56 | 56 | ||
57 | /* | 57 | /* |
@@ -78,23 +78,27 @@ static ssize_t vol_attribute_show(struct device *dev, | |||
78 | spin_unlock(&vol->ubi->volumes_lock); | 78 | spin_unlock(&vol->ubi->volumes_lock); |
79 | return -ENODEV; | 79 | return -ENODEV; |
80 | } | 80 | } |
81 | if (attr == &vol_reserved_ebs) | 81 | if (attr == &attr_vol_reserved_ebs) |
82 | ret = sprintf(buf, "%d\n", vol->reserved_pebs); | 82 | ret = sprintf(buf, "%d\n", vol->reserved_pebs); |
83 | else if (attr == &vol_type) { | 83 | else if (attr == &attr_vol_type) { |
84 | const char *tp; | 84 | const char *tp; |
85 | tp = vol->vol_type == UBI_DYNAMIC_VOLUME ? "dynamic" : "static"; | 85 | |
86 | if (vol->vol_type == UBI_DYNAMIC_VOLUME) | ||
87 | tp = "dynamic"; | ||
88 | else | ||
89 | tp = "static"; | ||
86 | ret = sprintf(buf, "%s\n", tp); | 90 | ret = sprintf(buf, "%s\n", tp); |
87 | } else if (attr == &vol_name) | 91 | } else if (attr == &attr_vol_name) |
88 | ret = sprintf(buf, "%s\n", vol->name); | 92 | ret = sprintf(buf, "%s\n", vol->name); |
89 | else if (attr == &vol_corrupted) | 93 | else if (attr == &attr_vol_corrupted) |
90 | ret = sprintf(buf, "%d\n", vol->corrupted); | 94 | ret = sprintf(buf, "%d\n", vol->corrupted); |
91 | else if (attr == &vol_alignment) | 95 | else if (attr == &attr_vol_alignment) |
92 | ret = sprintf(buf, "%d\n", vol->alignment); | 96 | ret = sprintf(buf, "%d\n", vol->alignment); |
93 | else if (attr == &vol_usable_eb_size) { | 97 | else if (attr == &attr_vol_usable_eb_size) { |
94 | ret = sprintf(buf, "%d\n", vol->usable_leb_size); | 98 | ret = sprintf(buf, "%d\n", vol->usable_leb_size); |
95 | } else if (attr == &vol_data_bytes) | 99 | } else if (attr == &attr_vol_data_bytes) |
96 | ret = sprintf(buf, "%lld\n", vol->used_bytes); | 100 | ret = sprintf(buf, "%lld\n", vol->used_bytes); |
97 | else if (attr == &vol_upd_marker) | 101 | else if (attr == &attr_vol_upd_marker) |
98 | ret = sprintf(buf, "%d\n", vol->upd_marker); | 102 | ret = sprintf(buf, "%d\n", vol->upd_marker); |
99 | else | 103 | else |
100 | BUG(); | 104 | BUG(); |
@@ -126,28 +130,28 @@ static int volume_sysfs_init(struct ubi_device *ubi, struct ubi_volume *vol) | |||
126 | { | 130 | { |
127 | int err; | 131 | int err; |
128 | 132 | ||
129 | err = device_create_file(&vol->dev, &vol_reserved_ebs); | 133 | err = device_create_file(&vol->dev, &attr_vol_reserved_ebs); |
130 | if (err) | 134 | if (err) |
131 | return err; | 135 | return err; |
132 | err = device_create_file(&vol->dev, &vol_type); | 136 | err = device_create_file(&vol->dev, &attr_vol_type); |
133 | if (err) | 137 | if (err) |
134 | return err; | 138 | return err; |
135 | err = device_create_file(&vol->dev, &vol_name); | 139 | err = device_create_file(&vol->dev, &attr_vol_name); |
136 | if (err) | 140 | if (err) |
137 | return err; | 141 | return err; |
138 | err = device_create_file(&vol->dev, &vol_corrupted); | 142 | err = device_create_file(&vol->dev, &attr_vol_corrupted); |
139 | if (err) | 143 | if (err) |
140 | return err; | 144 | return err; |
141 | err = device_create_file(&vol->dev, &vol_alignment); | 145 | err = device_create_file(&vol->dev, &attr_vol_alignment); |
142 | if (err) | 146 | if (err) |
143 | return err; | 147 | return err; |
144 | err = device_create_file(&vol->dev, &vol_usable_eb_size); | 148 | err = device_create_file(&vol->dev, &attr_vol_usable_eb_size); |
145 | if (err) | 149 | if (err) |
146 | return err; | 150 | return err; |
147 | err = device_create_file(&vol->dev, &vol_data_bytes); | 151 | err = device_create_file(&vol->dev, &attr_vol_data_bytes); |
148 | if (err) | 152 | if (err) |
149 | return err; | 153 | return err; |
150 | err = device_create_file(&vol->dev, &vol_upd_marker); | 154 | err = device_create_file(&vol->dev, &attr_vol_upd_marker); |
151 | if (err) | 155 | if (err) |
152 | return err; | 156 | return err; |
153 | return 0; | 157 | return 0; |
@@ -159,14 +163,14 @@ static int volume_sysfs_init(struct ubi_device *ubi, struct ubi_volume *vol) | |||
159 | */ | 163 | */ |
160 | static void volume_sysfs_close(struct ubi_volume *vol) | 164 | static void volume_sysfs_close(struct ubi_volume *vol) |
161 | { | 165 | { |
162 | device_remove_file(&vol->dev, &vol_upd_marker); | 166 | device_remove_file(&vol->dev, &attr_vol_upd_marker); |
163 | device_remove_file(&vol->dev, &vol_data_bytes); | 167 | device_remove_file(&vol->dev, &attr_vol_data_bytes); |
164 | device_remove_file(&vol->dev, &vol_usable_eb_size); | 168 | device_remove_file(&vol->dev, &attr_vol_usable_eb_size); |
165 | device_remove_file(&vol->dev, &vol_alignment); | 169 | device_remove_file(&vol->dev, &attr_vol_alignment); |
166 | device_remove_file(&vol->dev, &vol_corrupted); | 170 | device_remove_file(&vol->dev, &attr_vol_corrupted); |
167 | device_remove_file(&vol->dev, &vol_name); | 171 | device_remove_file(&vol->dev, &attr_vol_name); |
168 | device_remove_file(&vol->dev, &vol_type); | 172 | device_remove_file(&vol->dev, &attr_vol_type); |
169 | device_remove_file(&vol->dev, &vol_reserved_ebs); | 173 | device_remove_file(&vol->dev, &attr_vol_reserved_ebs); |
170 | device_unregister(&vol->dev); | 174 | device_unregister(&vol->dev); |
171 | } | 175 | } |
172 | 176 | ||
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c index bc5df50813d6..25b3bd61c7ec 100644 --- a/drivers/mtd/ubi/vtbl.c +++ b/drivers/mtd/ubi/vtbl.c | |||
@@ -254,7 +254,7 @@ bad: | |||
254 | * This function returns zero in case of success and a negative error code in | 254 | * This function returns zero in case of success and a negative error code in |
255 | * case of failure. | 255 | * case of failure. |
256 | */ | 256 | */ |
257 | static int create_vtbl(const struct ubi_device *ubi, struct ubi_scan_info *si, | 257 | static int create_vtbl(struct ubi_device *ubi, struct ubi_scan_info *si, |
258 | int copy, void *vtbl) | 258 | int copy, void *vtbl) |
259 | { | 259 | { |
260 | int err, tries = 0; | 260 | int err, tries = 0; |
@@ -264,7 +264,7 @@ static int create_vtbl(const struct ubi_device *ubi, struct ubi_scan_info *si, | |||
264 | 264 | ||
265 | ubi_msg("create volume table (copy #%d)", copy + 1); | 265 | ubi_msg("create volume table (copy #%d)", copy + 1); |
266 | 266 | ||
267 | vid_hdr = ubi_zalloc_vid_hdr(ubi); | 267 | vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); |
268 | if (!vid_hdr) | 268 | if (!vid_hdr) |
269 | return -ENOMEM; | 269 | return -ENOMEM; |
270 | 270 | ||
@@ -339,7 +339,7 @@ out_free: | |||
339 | * not corrupted, and recovering from corruptions if needed. Returns volume | 339 | * not corrupted, and recovering from corruptions if needed. Returns volume |
340 | * table in case of success and a negative error code in case of failure. | 340 | * table in case of success and a negative error code in case of failure. |
341 | */ | 341 | */ |
342 | static struct ubi_vtbl_record *process_lvol(const struct ubi_device *ubi, | 342 | static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi, |
343 | struct ubi_scan_info *si, | 343 | struct ubi_scan_info *si, |
344 | struct ubi_scan_volume *sv) | 344 | struct ubi_scan_volume *sv) |
345 | { | 345 | { |
@@ -453,7 +453,7 @@ out_free: | |||
453 | * This function returns volume table contents in case of success and a | 453 | * This function returns volume table contents in case of success and a |
454 | * negative error code in case of failure. | 454 | * negative error code in case of failure. |
455 | */ | 455 | */ |
456 | static struct ubi_vtbl_record *create_empty_lvol(const struct ubi_device *ubi, | 456 | static struct ubi_vtbl_record *create_empty_lvol(struct ubi_device *ubi, |
457 | struct ubi_scan_info *si) | 457 | struct ubi_scan_info *si) |
458 | { | 458 | { |
459 | int i; | 459 | int i; |
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index a5a9b8d87302..a4f1bf33164a 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c | |||
@@ -208,7 +208,7 @@ struct ubi_work { | |||
208 | }; | 208 | }; |
209 | 209 | ||
210 | #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID | 210 | #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID |
211 | static int paranoid_check_ec(const struct ubi_device *ubi, int pnum, int ec); | 211 | static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec); |
212 | static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e, | 212 | static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e, |
213 | struct rb_root *root); | 213 | struct rb_root *root); |
214 | #else | 214 | #else |
@@ -220,17 +220,6 @@ static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e, | |||
220 | static struct kmem_cache *wl_entries_slab; | 220 | static struct kmem_cache *wl_entries_slab; |
221 | 221 | ||
222 | /** | 222 | /** |
223 | * tree_empty - a helper function to check if an RB-tree is empty. | ||
224 | * @root: the root of the tree | ||
225 | * | ||
226 | * This function returns non-zero if the RB-tree is empty and zero if not. | ||
227 | */ | ||
228 | static inline int tree_empty(struct rb_root *root) | ||
229 | { | ||
230 | return root->rb_node == NULL; | ||
231 | } | ||
232 | |||
233 | /** | ||
234 | * wl_tree_add - add a wear-leveling entry to a WL RB-tree. | 223 | * wl_tree_add - add a wear-leveling entry to a WL RB-tree. |
235 | * @e: the wear-leveling entry to add | 224 | * @e: the wear-leveling entry to add |
236 | * @root: the root of the tree | 225 | * @root: the root of the tree |
@@ -266,45 +255,6 @@ static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root) | |||
266 | rb_insert_color(&e->rb, root); | 255 | rb_insert_color(&e->rb, root); |
267 | } | 256 | } |
268 | 257 | ||
269 | |||
270 | /* | ||
271 | * Helper functions to add and delete wear-leveling entries from different | ||
272 | * trees. | ||
273 | */ | ||
274 | |||
275 | static void free_tree_add(struct ubi_device *ubi, struct ubi_wl_entry *e) | ||
276 | { | ||
277 | wl_tree_add(e, &ubi->free); | ||
278 | } | ||
279 | static inline void used_tree_add(struct ubi_device *ubi, | ||
280 | struct ubi_wl_entry *e) | ||
281 | { | ||
282 | wl_tree_add(e, &ubi->used); | ||
283 | } | ||
284 | static inline void scrub_tree_add(struct ubi_device *ubi, | ||
285 | struct ubi_wl_entry *e) | ||
286 | { | ||
287 | wl_tree_add(e, &ubi->scrub); | ||
288 | } | ||
289 | static inline void free_tree_del(struct ubi_device *ubi, | ||
290 | struct ubi_wl_entry *e) | ||
291 | { | ||
292 | paranoid_check_in_wl_tree(e, &ubi->free); | ||
293 | rb_erase(&e->rb, &ubi->free); | ||
294 | } | ||
295 | static inline void used_tree_del(struct ubi_device *ubi, | ||
296 | struct ubi_wl_entry *e) | ||
297 | { | ||
298 | paranoid_check_in_wl_tree(e, &ubi->used); | ||
299 | rb_erase(&e->rb, &ubi->used); | ||
300 | } | ||
301 | static inline void scrub_tree_del(struct ubi_device *ubi, | ||
302 | struct ubi_wl_entry *e) | ||
303 | { | ||
304 | paranoid_check_in_wl_tree(e, &ubi->scrub); | ||
305 | rb_erase(&e->rb, &ubi->scrub); | ||
306 | } | ||
307 | |||
308 | /** | 258 | /** |
309 | * do_work - do one pending work. | 259 | * do_work - do one pending work. |
310 | * @ubi: UBI device description object | 260 | * @ubi: UBI device description object |
@@ -358,7 +308,7 @@ static int produce_free_peb(struct ubi_device *ubi) | |||
358 | int err; | 308 | int err; |
359 | 309 | ||
360 | spin_lock(&ubi->wl_lock); | 310 | spin_lock(&ubi->wl_lock); |
361 | while (tree_empty(&ubi->free)) { | 311 | while (!ubi->free.rb_node) { |
362 | spin_unlock(&ubi->wl_lock); | 312 | spin_unlock(&ubi->wl_lock); |
363 | 313 | ||
364 | dbg_wl("do one work synchronously"); | 314 | dbg_wl("do one work synchronously"); |
@@ -508,13 +458,13 @@ int ubi_wl_get_peb(struct ubi_device *ubi, int dtype) | |||
508 | ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM || | 458 | ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM || |
509 | dtype == UBI_UNKNOWN); | 459 | dtype == UBI_UNKNOWN); |
510 | 460 | ||
511 | pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_KERNEL); | 461 | pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS); |
512 | if (!pe) | 462 | if (!pe) |
513 | return -ENOMEM; | 463 | return -ENOMEM; |
514 | 464 | ||
515 | retry: | 465 | retry: |
516 | spin_lock(&ubi->wl_lock); | 466 | spin_lock(&ubi->wl_lock); |
517 | if (tree_empty(&ubi->free)) { | 467 | if (!ubi->free.rb_node) { |
518 | if (ubi->works_count == 0) { | 468 | if (ubi->works_count == 0) { |
519 | ubi_assert(list_empty(&ubi->works)); | 469 | ubi_assert(list_empty(&ubi->works)); |
520 | ubi_err("no free eraseblocks"); | 470 | ubi_err("no free eraseblocks"); |
@@ -585,7 +535,8 @@ retry: | |||
585 | * Move the physical eraseblock to the protection trees where it will | 535 | * Move the physical eraseblock to the protection trees where it will |
586 | * be protected from being moved for some time. | 536 | * be protected from being moved for some time. |
587 | */ | 537 | */ |
588 | free_tree_del(ubi, e); | 538 | paranoid_check_in_wl_tree(e, &ubi->free); |
539 | rb_erase(&e->rb, &ubi->free); | ||
589 | prot_tree_add(ubi, e, pe, protect); | 540 | prot_tree_add(ubi, e, pe, protect); |
590 | 541 | ||
591 | dbg_wl("PEB %d EC %d, protection %d", e->pnum, e->ec, protect); | 542 | dbg_wl("PEB %d EC %d, protection %d", e->pnum, e->ec, protect); |
@@ -645,7 +596,7 @@ static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, int tortur | |||
645 | if (err > 0) | 596 | if (err > 0) |
646 | return -EINVAL; | 597 | return -EINVAL; |
647 | 598 | ||
648 | ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); | 599 | ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS); |
649 | if (!ec_hdr) | 600 | if (!ec_hdr) |
650 | return -ENOMEM; | 601 | return -ENOMEM; |
651 | 602 | ||
@@ -704,7 +655,7 @@ static void check_protection_over(struct ubi_device *ubi) | |||
704 | */ | 655 | */ |
705 | while (1) { | 656 | while (1) { |
706 | spin_lock(&ubi->wl_lock); | 657 | spin_lock(&ubi->wl_lock); |
707 | if (tree_empty(&ubi->prot.aec)) { | 658 | if (!ubi->prot.aec.rb_node) { |
708 | spin_unlock(&ubi->wl_lock); | 659 | spin_unlock(&ubi->wl_lock); |
709 | break; | 660 | break; |
710 | } | 661 | } |
@@ -721,7 +672,7 @@ static void check_protection_over(struct ubi_device *ubi) | |||
721 | pe->e->pnum, ubi->abs_ec, pe->abs_ec); | 672 | pe->e->pnum, ubi->abs_ec, pe->abs_ec); |
722 | rb_erase(&pe->rb_aec, &ubi->prot.aec); | 673 | rb_erase(&pe->rb_aec, &ubi->prot.aec); |
723 | rb_erase(&pe->rb_pnum, &ubi->prot.pnum); | 674 | rb_erase(&pe->rb_pnum, &ubi->prot.pnum); |
724 | used_tree_add(ubi, pe->e); | 675 | wl_tree_add(pe->e, &ubi->used); |
725 | spin_unlock(&ubi->wl_lock); | 676 | spin_unlock(&ubi->wl_lock); |
726 | 677 | ||
727 | kfree(pe); | 678 | kfree(pe); |
@@ -768,7 +719,7 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, | |||
768 | dbg_wl("schedule erasure of PEB %d, EC %d, torture %d", | 719 | dbg_wl("schedule erasure of PEB %d, EC %d, torture %d", |
769 | e->pnum, e->ec, torture); | 720 | e->pnum, e->ec, torture); |
770 | 721 | ||
771 | wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_KERNEL); | 722 | wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS); |
772 | if (!wl_wrk) | 723 | if (!wl_wrk) |
773 | return -ENOMEM; | 724 | return -ENOMEM; |
774 | 725 | ||
@@ -802,7 +753,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, | |||
802 | if (cancel) | 753 | if (cancel) |
803 | return 0; | 754 | return 0; |
804 | 755 | ||
805 | vid_hdr = ubi_zalloc_vid_hdr(ubi); | 756 | vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); |
806 | if (!vid_hdr) | 757 | if (!vid_hdr) |
807 | return -ENOMEM; | 758 | return -ENOMEM; |
808 | 759 | ||
@@ -812,8 +763,8 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, | |||
812 | * Only one WL worker at a time is supported at this implementation, so | 763 | * Only one WL worker at a time is supported at this implementation, so |
813 | * make sure a PEB is not being moved already. | 764 | * make sure a PEB is not being moved already. |
814 | */ | 765 | */ |
815 | if (ubi->move_to || tree_empty(&ubi->free) || | 766 | if (ubi->move_to || !ubi->free.rb_node || |
816 | (tree_empty(&ubi->used) && tree_empty(&ubi->scrub))) { | 767 | (!ubi->used.rb_node && !ubi->scrub.rb_node)) { |
817 | /* | 768 | /* |
818 | * Only one WL worker at a time is supported at this | 769 | * Only one WL worker at a time is supported at this |
819 | * implementation, so if a LEB is already being moved, cancel. | 770 | * implementation, so if a LEB is already being moved, cancel. |
@@ -828,14 +779,14 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, | |||
828 | * triggered again. | 779 | * triggered again. |
829 | */ | 780 | */ |
830 | dbg_wl("cancel WL, a list is empty: free %d, used %d", | 781 | dbg_wl("cancel WL, a list is empty: free %d, used %d", |
831 | tree_empty(&ubi->free), tree_empty(&ubi->used)); | 782 | !ubi->free.rb_node, !ubi->used.rb_node); |
832 | ubi->wl_scheduled = 0; | 783 | ubi->wl_scheduled = 0; |
833 | spin_unlock(&ubi->wl_lock); | 784 | spin_unlock(&ubi->wl_lock); |
834 | ubi_free_vid_hdr(ubi, vid_hdr); | 785 | ubi_free_vid_hdr(ubi, vid_hdr); |
835 | return 0; | 786 | return 0; |
836 | } | 787 | } |
837 | 788 | ||
838 | if (tree_empty(&ubi->scrub)) { | 789 | if (!ubi->scrub.rb_node) { |
839 | /* | 790 | /* |
840 | * Now pick the least worn-out used physical eraseblock and a | 791 | * Now pick the least worn-out used physical eraseblock and a |
841 | * highly worn-out free physical eraseblock. If the erase | 792 | * highly worn-out free physical eraseblock. If the erase |
@@ -852,17 +803,20 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, | |||
852 | ubi_free_vid_hdr(ubi, vid_hdr); | 803 | ubi_free_vid_hdr(ubi, vid_hdr); |
853 | return 0; | 804 | return 0; |
854 | } | 805 | } |
855 | used_tree_del(ubi, e1); | 806 | paranoid_check_in_wl_tree(e1, &ubi->used); |
807 | rb_erase(&e1->rb, &ubi->used); | ||
856 | dbg_wl("move PEB %d EC %d to PEB %d EC %d", | 808 | dbg_wl("move PEB %d EC %d to PEB %d EC %d", |
857 | e1->pnum, e1->ec, e2->pnum, e2->ec); | 809 | e1->pnum, e1->ec, e2->pnum, e2->ec); |
858 | } else { | 810 | } else { |
859 | e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, rb); | 811 | e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, rb); |
860 | e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); | 812 | e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); |
861 | scrub_tree_del(ubi, e1); | 813 | paranoid_check_in_wl_tree(e1, &ubi->scrub); |
814 | rb_erase(&e1->rb, &ubi->scrub); | ||
862 | dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum); | 815 | dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum); |
863 | } | 816 | } |
864 | 817 | ||
865 | free_tree_del(ubi, e2); | 818 | paranoid_check_in_wl_tree(e2, &ubi->free); |
819 | rb_erase(&e2->rb, &ubi->free); | ||
866 | ubi_assert(!ubi->move_from && !ubi->move_to); | 820 | ubi_assert(!ubi->move_from && !ubi->move_to); |
867 | ubi_assert(!ubi->move_to_put && !ubi->move_from_put); | 821 | ubi_assert(!ubi->move_to_put && !ubi->move_from_put); |
868 | ubi->move_from = e1; | 822 | ubi->move_from = e1; |
@@ -908,7 +862,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, | |||
908 | ubi_free_vid_hdr(ubi, vid_hdr); | 862 | ubi_free_vid_hdr(ubi, vid_hdr); |
909 | spin_lock(&ubi->wl_lock); | 863 | spin_lock(&ubi->wl_lock); |
910 | if (!ubi->move_to_put) | 864 | if (!ubi->move_to_put) |
911 | used_tree_add(ubi, e2); | 865 | wl_tree_add(e2, &ubi->used); |
912 | else | 866 | else |
913 | put = 1; | 867 | put = 1; |
914 | ubi->move_from = ubi->move_to = NULL; | 868 | ubi->move_from = ubi->move_to = NULL; |
@@ -953,7 +907,7 @@ error: | |||
953 | if (ubi->move_from_put) | 907 | if (ubi->move_from_put) |
954 | put = 1; | 908 | put = 1; |
955 | else | 909 | else |
956 | used_tree_add(ubi, e1); | 910 | wl_tree_add(e1, &ubi->used); |
957 | ubi->move_from = ubi->move_to = NULL; | 911 | ubi->move_from = ubi->move_to = NULL; |
958 | ubi->move_from_put = ubi->move_to_put = 0; | 912 | ubi->move_from_put = ubi->move_to_put = 0; |
959 | spin_unlock(&ubi->wl_lock); | 913 | spin_unlock(&ubi->wl_lock); |
@@ -1005,8 +959,8 @@ static int ensure_wear_leveling(struct ubi_device *ubi) | |||
1005 | * If the ubi->scrub tree is not empty, scrubbing is needed, and the | 959 | * If the ubi->scrub tree is not empty, scrubbing is needed, and the |
1006 | * the WL worker has to be scheduled anyway. | 960 | * the WL worker has to be scheduled anyway. |
1007 | */ | 961 | */ |
1008 | if (tree_empty(&ubi->scrub)) { | 962 | if (!ubi->scrub.rb_node) { |
1009 | if (tree_empty(&ubi->used) || tree_empty(&ubi->free)) | 963 | if (!ubi->used.rb_node || !ubi->free.rb_node) |
1010 | /* No physical eraseblocks - no deal */ | 964 | /* No physical eraseblocks - no deal */ |
1011 | goto out_unlock; | 965 | goto out_unlock; |
1012 | 966 | ||
@@ -1028,7 +982,7 @@ static int ensure_wear_leveling(struct ubi_device *ubi) | |||
1028 | ubi->wl_scheduled = 1; | 982 | ubi->wl_scheduled = 1; |
1029 | spin_unlock(&ubi->wl_lock); | 983 | spin_unlock(&ubi->wl_lock); |
1030 | 984 | ||
1031 | wrk = kmalloc(sizeof(struct ubi_work), GFP_KERNEL); | 985 | wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS); |
1032 | if (!wrk) { | 986 | if (!wrk) { |
1033 | err = -ENOMEM; | 987 | err = -ENOMEM; |
1034 | goto out_cancel; | 988 | goto out_cancel; |
@@ -1079,7 +1033,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, | |||
1079 | 1033 | ||
1080 | spin_lock(&ubi->wl_lock); | 1034 | spin_lock(&ubi->wl_lock); |
1081 | ubi->abs_ec += 1; | 1035 | ubi->abs_ec += 1; |
1082 | free_tree_add(ubi, e); | 1036 | wl_tree_add(e, &ubi->free); |
1083 | spin_unlock(&ubi->wl_lock); | 1037 | spin_unlock(&ubi->wl_lock); |
1084 | 1038 | ||
1085 | /* | 1039 | /* |
@@ -1093,6 +1047,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, | |||
1093 | return err; | 1047 | return err; |
1094 | } | 1048 | } |
1095 | 1049 | ||
1050 | ubi_err("failed to erase PEB %d, error %d", pnum, err); | ||
1096 | kfree(wl_wrk); | 1051 | kfree(wl_wrk); |
1097 | kmem_cache_free(wl_entries_slab, e); | 1052 | kmem_cache_free(wl_entries_slab, e); |
1098 | 1053 | ||
@@ -1211,11 +1166,13 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture) | |||
1211 | spin_unlock(&ubi->wl_lock); | 1166 | spin_unlock(&ubi->wl_lock); |
1212 | return 0; | 1167 | return 0; |
1213 | } else { | 1168 | } else { |
1214 | if (in_wl_tree(e, &ubi->used)) | 1169 | if (in_wl_tree(e, &ubi->used)) { |
1215 | used_tree_del(ubi, e); | 1170 | paranoid_check_in_wl_tree(e, &ubi->used); |
1216 | else if (in_wl_tree(e, &ubi->scrub)) | 1171 | rb_erase(&e->rb, &ubi->used); |
1217 | scrub_tree_del(ubi, e); | 1172 | } else if (in_wl_tree(e, &ubi->scrub)) { |
1218 | else | 1173 | paranoid_check_in_wl_tree(e, &ubi->scrub); |
1174 | rb_erase(&e->rb, &ubi->scrub); | ||
1175 | } else | ||
1219 | prot_tree_del(ubi, e->pnum); | 1176 | prot_tree_del(ubi, e->pnum); |
1220 | } | 1177 | } |
1221 | spin_unlock(&ubi->wl_lock); | 1178 | spin_unlock(&ubi->wl_lock); |
@@ -1223,7 +1180,7 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture) | |||
1223 | err = schedule_erase(ubi, e, torture); | 1180 | err = schedule_erase(ubi, e, torture); |
1224 | if (err) { | 1181 | if (err) { |
1225 | spin_lock(&ubi->wl_lock); | 1182 | spin_lock(&ubi->wl_lock); |
1226 | used_tree_add(ubi, e); | 1183 | wl_tree_add(e, &ubi->used); |
1227 | spin_unlock(&ubi->wl_lock); | 1184 | spin_unlock(&ubi->wl_lock); |
1228 | } | 1185 | } |
1229 | 1186 | ||
@@ -1267,12 +1224,13 @@ retry: | |||
1267 | goto retry; | 1224 | goto retry; |
1268 | } | 1225 | } |
1269 | 1226 | ||
1270 | if (in_wl_tree(e, &ubi->used)) | 1227 | if (in_wl_tree(e, &ubi->used)) { |
1271 | used_tree_del(ubi, e); | 1228 | paranoid_check_in_wl_tree(e, &ubi->used); |
1272 | else | 1229 | rb_erase(&e->rb, &ubi->used); |
1230 | } else | ||
1273 | prot_tree_del(ubi, pnum); | 1231 | prot_tree_del(ubi, pnum); |
1274 | 1232 | ||
1275 | scrub_tree_add(ubi, e); | 1233 | wl_tree_add(e, &ubi->scrub); |
1276 | spin_unlock(&ubi->wl_lock); | 1234 | spin_unlock(&ubi->wl_lock); |
1277 | 1235 | ||
1278 | /* | 1236 | /* |
@@ -1488,7 +1446,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) | |||
1488 | e->pnum = seb->pnum; | 1446 | e->pnum = seb->pnum; |
1489 | e->ec = seb->ec; | 1447 | e->ec = seb->ec; |
1490 | ubi_assert(e->ec >= 0); | 1448 | ubi_assert(e->ec >= 0); |
1491 | free_tree_add(ubi, e); | 1449 | wl_tree_add(e, &ubi->free); |
1492 | ubi->lookuptbl[e->pnum] = e; | 1450 | ubi->lookuptbl[e->pnum] = e; |
1493 | } | 1451 | } |
1494 | 1452 | ||
@@ -1522,16 +1480,16 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) | |||
1522 | if (!seb->scrub) { | 1480 | if (!seb->scrub) { |
1523 | dbg_wl("add PEB %d EC %d to the used tree", | 1481 | dbg_wl("add PEB %d EC %d to the used tree", |
1524 | e->pnum, e->ec); | 1482 | e->pnum, e->ec); |
1525 | used_tree_add(ubi, e); | 1483 | wl_tree_add(e, &ubi->used); |
1526 | } else { | 1484 | } else { |
1527 | dbg_wl("add PEB %d EC %d to the scrub tree", | 1485 | dbg_wl("add PEB %d EC %d to the scrub tree", |
1528 | e->pnum, e->ec); | 1486 | e->pnum, e->ec); |
1529 | scrub_tree_add(ubi, e); | 1487 | wl_tree_add(e, &ubi->scrub); |
1530 | } | 1488 | } |
1531 | } | 1489 | } |
1532 | } | 1490 | } |
1533 | 1491 | ||
1534 | if (WL_RESERVED_PEBS > ubi->avail_pebs) { | 1492 | if (ubi->avail_pebs < WL_RESERVED_PEBS) { |
1535 | ubi_err("no enough physical eraseblocks (%d, need %d)", | 1493 | ubi_err("no enough physical eraseblocks (%d, need %d)", |
1536 | ubi->avail_pebs, WL_RESERVED_PEBS); | 1494 | ubi->avail_pebs, WL_RESERVED_PEBS); |
1537 | goto out_free; | 1495 | goto out_free; |
@@ -1624,13 +1582,13 @@ void ubi_wl_close(struct ubi_device *ubi) | |||
1624 | * is equivalent to @ec, %1 if not, and a negative error code if an error | 1582 | * is equivalent to @ec, %1 if not, and a negative error code if an error |
1625 | * occurred. | 1583 | * occurred. |
1626 | */ | 1584 | */ |
1627 | static int paranoid_check_ec(const struct ubi_device *ubi, int pnum, int ec) | 1585 | static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec) |
1628 | { | 1586 | { |
1629 | int err; | 1587 | int err; |
1630 | long long read_ec; | 1588 | long long read_ec; |
1631 | struct ubi_ec_hdr *ec_hdr; | 1589 | struct ubi_ec_hdr *ec_hdr; |
1632 | 1590 | ||
1633 | ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); | 1591 | ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS); |
1634 | if (!ec_hdr) | 1592 | if (!ec_hdr) |
1635 | return -ENOMEM; | 1593 | return -ENOMEM; |
1636 | 1594 | ||