aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd/ubi/eba.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mtd/ubi/eba.c')
-rw-r--r--drivers/mtd/ubi/eba.c321
1 files changed, 173 insertions, 148 deletions
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 880fa3690352..7ce91ca742b1 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -31,7 +31,7 @@
31 * logical eraseblock it is locked for reading or writing. The per-logical 31 * logical eraseblock it is locked for reading or writing. The per-logical
32 * eraseblock locking is implemented by means of the lock tree. The lock tree 32 * eraseblock locking is implemented by means of the lock tree. The lock tree
33 * is an RB-tree which refers all the currently locked logical eraseblocks. The 33 * is an RB-tree which refers all the currently locked logical eraseblocks. The
34 * lock tree elements are &struct ltree_entry objects. They are indexed by 34 * lock tree elements are &struct ubi_ltree_entry objects. They are indexed by
35 * (@vol_id, @lnum) pairs. 35 * (@vol_id, @lnum) pairs.
36 * 36 *
37 * EBA also maintains the global sequence counter which is incremented each 37 * EBA also maintains the global sequence counter which is incremented each
@@ -50,29 +50,6 @@
50#define EBA_RESERVED_PEBS 1 50#define EBA_RESERVED_PEBS 1
51 51
52/** 52/**
53 * struct ltree_entry - an entry in the lock tree.
54 * @rb: links RB-tree nodes
55 * @vol_id: volume ID of the locked logical eraseblock
56 * @lnum: locked logical eraseblock number
57 * @users: how many tasks are using this logical eraseblock or wait for it
58 * @mutex: read/write mutex to implement read/write access serialization to
59 * the (@vol_id, @lnum) logical eraseblock
60 *
61 * When a logical eraseblock is being locked - corresponding &struct ltree_entry
62 * object is inserted to the lock tree (@ubi->ltree).
63 */
64struct ltree_entry {
65 struct rb_node rb;
66 int vol_id;
67 int lnum;
68 int users;
69 struct rw_semaphore mutex;
70};
71
72/* Slab cache for lock-tree entries */
73static struct kmem_cache *ltree_slab;
74
75/**
76 * next_sqnum - get next sequence number. 53 * next_sqnum - get next sequence number.
77 * @ubi: UBI device description object 54 * @ubi: UBI device description object
78 * 55 *
@@ -101,7 +78,7 @@ static unsigned long long next_sqnum(struct ubi_device *ubi)
101 */ 78 */
102static int ubi_get_compat(const struct ubi_device *ubi, int vol_id) 79static int ubi_get_compat(const struct ubi_device *ubi, int vol_id)
103{ 80{
104 if (vol_id == UBI_LAYOUT_VOL_ID) 81 if (vol_id == UBI_LAYOUT_VOLUME_ID)
105 return UBI_LAYOUT_VOLUME_COMPAT; 82 return UBI_LAYOUT_VOLUME_COMPAT;
106 return 0; 83 return 0;
107} 84}
@@ -112,20 +89,20 @@ static int ubi_get_compat(const struct ubi_device *ubi, int vol_id)
112 * @vol_id: volume ID 89 * @vol_id: volume ID
113 * @lnum: logical eraseblock number 90 * @lnum: logical eraseblock number
114 * 91 *
115 * This function returns a pointer to the corresponding &struct ltree_entry 92 * This function returns a pointer to the corresponding &struct ubi_ltree_entry
116 * object if the logical eraseblock is locked and %NULL if it is not. 93 * object if the logical eraseblock is locked and %NULL if it is not.
117 * @ubi->ltree_lock has to be locked. 94 * @ubi->ltree_lock has to be locked.
118 */ 95 */
119static struct ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id, 96static struct ubi_ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id,
120 int lnum) 97 int lnum)
121{ 98{
122 struct rb_node *p; 99 struct rb_node *p;
123 100
124 p = ubi->ltree.rb_node; 101 p = ubi->ltree.rb_node;
125 while (p) { 102 while (p) {
126 struct ltree_entry *le; 103 struct ubi_ltree_entry *le;
127 104
128 le = rb_entry(p, struct ltree_entry, rb); 105 le = rb_entry(p, struct ubi_ltree_entry, rb);
129 106
130 if (vol_id < le->vol_id) 107 if (vol_id < le->vol_id)
131 p = p->rb_left; 108 p = p->rb_left;
@@ -155,15 +132,17 @@ static struct ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id,
155 * Returns pointer to the lock tree entry or %-ENOMEM if memory allocation 132 * Returns pointer to the lock tree entry or %-ENOMEM if memory allocation
156 * failed. 133 * failed.
157 */ 134 */
158static struct ltree_entry *ltree_add_entry(struct ubi_device *ubi, int vol_id, 135static struct ubi_ltree_entry *ltree_add_entry(struct ubi_device *ubi,
159 int lnum) 136 int vol_id, int lnum)
160{ 137{
161 struct ltree_entry *le, *le1, *le_free; 138 struct ubi_ltree_entry *le, *le1, *le_free;
162 139
163 le = kmem_cache_alloc(ltree_slab, GFP_NOFS); 140 le = kmalloc(sizeof(struct ubi_ltree_entry), GFP_NOFS);
164 if (!le) 141 if (!le)
165 return ERR_PTR(-ENOMEM); 142 return ERR_PTR(-ENOMEM);
166 143
144 le->users = 0;
145 init_rwsem(&le->mutex);
167 le->vol_id = vol_id; 146 le->vol_id = vol_id;
168 le->lnum = lnum; 147 le->lnum = lnum;
169 148
@@ -189,7 +168,7 @@ static struct ltree_entry *ltree_add_entry(struct ubi_device *ubi, int vol_id,
189 p = &ubi->ltree.rb_node; 168 p = &ubi->ltree.rb_node;
190 while (*p) { 169 while (*p) {
191 parent = *p; 170 parent = *p;
192 le1 = rb_entry(parent, struct ltree_entry, rb); 171 le1 = rb_entry(parent, struct ubi_ltree_entry, rb);
193 172
194 if (vol_id < le1->vol_id) 173 if (vol_id < le1->vol_id)
195 p = &(*p)->rb_left; 174 p = &(*p)->rb_left;
@@ -211,7 +190,7 @@ static struct ltree_entry *ltree_add_entry(struct ubi_device *ubi, int vol_id,
211 spin_unlock(&ubi->ltree_lock); 190 spin_unlock(&ubi->ltree_lock);
212 191
213 if (le_free) 192 if (le_free)
214 kmem_cache_free(ltree_slab, le_free); 193 kfree(le_free);
215 194
216 return le; 195 return le;
217} 196}
@@ -227,7 +206,7 @@ static struct ltree_entry *ltree_add_entry(struct ubi_device *ubi, int vol_id,
227 */ 206 */
228static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum) 207static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum)
229{ 208{
230 struct ltree_entry *le; 209 struct ubi_ltree_entry *le;
231 210
232 le = ltree_add_entry(ubi, vol_id, lnum); 211 le = ltree_add_entry(ubi, vol_id, lnum);
233 if (IS_ERR(le)) 212 if (IS_ERR(le))
@@ -245,7 +224,7 @@ static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum)
245static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum) 224static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
246{ 225{
247 int free = 0; 226 int free = 0;
248 struct ltree_entry *le; 227 struct ubi_ltree_entry *le;
249 228
250 spin_lock(&ubi->ltree_lock); 229 spin_lock(&ubi->ltree_lock);
251 le = ltree_lookup(ubi, vol_id, lnum); 230 le = ltree_lookup(ubi, vol_id, lnum);
@@ -259,7 +238,7 @@ static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
259 238
260 up_read(&le->mutex); 239 up_read(&le->mutex);
261 if (free) 240 if (free)
262 kmem_cache_free(ltree_slab, le); 241 kfree(le);
263} 242}
264 243
265/** 244/**
@@ -273,7 +252,7 @@ static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
273 */ 252 */
274static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum) 253static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
275{ 254{
276 struct ltree_entry *le; 255 struct ubi_ltree_entry *le;
277 256
278 le = ltree_add_entry(ubi, vol_id, lnum); 257 le = ltree_add_entry(ubi, vol_id, lnum);
279 if (IS_ERR(le)) 258 if (IS_ERR(le))
@@ -283,6 +262,44 @@ static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
283} 262}
284 263
285/** 264/**
265 * leb_write_lock - lock logical eraseblock for writing.
266 * @ubi: UBI device description object
267 * @vol_id: volume ID
268 * @lnum: logical eraseblock number
269 *
270 * This function locks a logical eraseblock for writing if there is no
271 * contention and does nothing if there is contention. Returns %0 in case of
272 * success, %1 in case of contention, and and a negative error code in case of
273 * failure.
274 */
275static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum)
276{
277 int free;
278 struct ubi_ltree_entry *le;
279
280 le = ltree_add_entry(ubi, vol_id, lnum);
281 if (IS_ERR(le))
282 return PTR_ERR(le);
283 if (down_write_trylock(&le->mutex))
284 return 0;
285
286 /* Contention, cancel */
287 spin_lock(&ubi->ltree_lock);
288 le->users -= 1;
289 ubi_assert(le->users >= 0);
290 if (le->users == 0) {
291 rb_erase(&le->rb, &ubi->ltree);
292 free = 1;
293 } else
294 free = 0;
295 spin_unlock(&ubi->ltree_lock);
296 if (free)
297 kfree(le);
298
299 return 1;
300}
301
302/**
286 * leb_write_unlock - unlock logical eraseblock. 303 * leb_write_unlock - unlock logical eraseblock.
287 * @ubi: UBI device description object 304 * @ubi: UBI device description object
288 * @vol_id: volume ID 305 * @vol_id: volume ID
@@ -291,7 +308,7 @@ static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
291static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum) 308static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum)
292{ 309{
293 int free; 310 int free;
294 struct ltree_entry *le; 311 struct ubi_ltree_entry *le;
295 312
296 spin_lock(&ubi->ltree_lock); 313 spin_lock(&ubi->ltree_lock);
297 le = ltree_lookup(ubi, vol_id, lnum); 314 le = ltree_lookup(ubi, vol_id, lnum);
@@ -306,23 +323,23 @@ static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum)
306 323
307 up_write(&le->mutex); 324 up_write(&le->mutex);
308 if (free) 325 if (free)
309 kmem_cache_free(ltree_slab, le); 326 kfree(le);
310} 327}
311 328
312/** 329/**
313 * ubi_eba_unmap_leb - un-map logical eraseblock. 330 * ubi_eba_unmap_leb - un-map logical eraseblock.
314 * @ubi: UBI device description object 331 * @ubi: UBI device description object
315 * @vol_id: volume ID 332 * @vol: volume description object
316 * @lnum: logical eraseblock number 333 * @lnum: logical eraseblock number
317 * 334 *
318 * This function un-maps logical eraseblock @lnum and schedules corresponding 335 * This function un-maps logical eraseblock @lnum and schedules corresponding
319 * physical eraseblock for erasure. Returns zero in case of success and a 336 * physical eraseblock for erasure. Returns zero in case of success and a
320 * negative error code in case of failure. 337 * negative error code in case of failure.
321 */ 338 */
322int ubi_eba_unmap_leb(struct ubi_device *ubi, int vol_id, int lnum) 339int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
340 int lnum)
323{ 341{
324 int idx = vol_id2idx(ubi, vol_id), err, pnum; 342 int err, pnum, vol_id = vol->vol_id;
325 struct ubi_volume *vol = ubi->volumes[idx];
326 343
327 if (ubi->ro_mode) 344 if (ubi->ro_mode)
328 return -EROFS; 345 return -EROFS;
@@ -349,7 +366,7 @@ out_unlock:
349/** 366/**
350 * ubi_eba_read_leb - read data. 367 * ubi_eba_read_leb - read data.
351 * @ubi: UBI device description object 368 * @ubi: UBI device description object
352 * @vol_id: volume ID 369 * @vol: volume description object
353 * @lnum: logical eraseblock number 370 * @lnum: logical eraseblock number
354 * @buf: buffer to store the read data 371 * @buf: buffer to store the read data
355 * @offset: offset from where to read 372 * @offset: offset from where to read
@@ -365,12 +382,11 @@ out_unlock:
365 * returned for any volume type if an ECC error was detected by the MTD device 382 * returned for any volume type if an ECC error was detected by the MTD device
366 * driver. Other negative error cored may be returned in case of other errors. 383 * driver. Other negative error cored may be returned in case of other errors.
367 */ 384 */
368int ubi_eba_read_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf, 385int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
369 int offset, int len, int check) 386 void *buf, int offset, int len, int check)
370{ 387{
371 int err, pnum, scrub = 0, idx = vol_id2idx(ubi, vol_id); 388 int err, pnum, scrub = 0, vol_id = vol->vol_id;
372 struct ubi_vid_hdr *vid_hdr; 389 struct ubi_vid_hdr *vid_hdr;
373 struct ubi_volume *vol = ubi->volumes[idx];
374 uint32_t uninitialized_var(crc); 390 uint32_t uninitialized_var(crc);
375 391
376 err = leb_read_lock(ubi, vol_id, lnum); 392 err = leb_read_lock(ubi, vol_id, lnum);
@@ -578,7 +594,7 @@ write_error:
578/** 594/**
579 * ubi_eba_write_leb - write data to dynamic volume. 595 * ubi_eba_write_leb - write data to dynamic volume.
580 * @ubi: UBI device description object 596 * @ubi: UBI device description object
581 * @vol_id: volume ID 597 * @vol: volume description object
582 * @lnum: logical eraseblock number 598 * @lnum: logical eraseblock number
583 * @buf: the data to write 599 * @buf: the data to write
584 * @offset: offset within the logical eraseblock where to write 600 * @offset: offset within the logical eraseblock where to write
@@ -586,15 +602,14 @@ write_error:
586 * @dtype: data type 602 * @dtype: data type
587 * 603 *
588 * This function writes data to logical eraseblock @lnum of a dynamic volume 604 * This function writes data to logical eraseblock @lnum of a dynamic volume
589 * @vol_id. Returns zero in case of success and a negative error code in case 605 * @vol. Returns zero in case of success and a negative error code in case
590 * of failure. In case of error, it is possible that something was still 606 * of failure. In case of error, it is possible that something was still
591 * written to the flash media, but may be some garbage. 607 * written to the flash media, but may be some garbage.
592 */ 608 */
593int ubi_eba_write_leb(struct ubi_device *ubi, int vol_id, int lnum, 609int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
594 const void *buf, int offset, int len, int dtype) 610 const void *buf, int offset, int len, int dtype)
595{ 611{
596 int idx = vol_id2idx(ubi, vol_id), err, pnum, tries = 0; 612 int err, pnum, tries = 0, vol_id = vol->vol_id;
597 struct ubi_volume *vol = ubi->volumes[idx];
598 struct ubi_vid_hdr *vid_hdr; 613 struct ubi_vid_hdr *vid_hdr;
599 614
600 if (ubi->ro_mode) 615 if (ubi->ro_mode)
@@ -613,7 +628,8 @@ int ubi_eba_write_leb(struct ubi_device *ubi, int vol_id, int lnum,
613 if (err) { 628 if (err) {
614 ubi_warn("failed to write data to PEB %d", pnum); 629 ubi_warn("failed to write data to PEB %d", pnum);
615 if (err == -EIO && ubi->bad_allowed) 630 if (err == -EIO && ubi->bad_allowed)
616 err = recover_peb(ubi, pnum, vol_id, lnum, buf, offset, len); 631 err = recover_peb(ubi, pnum, vol_id, lnum, buf,
632 offset, len);
617 if (err) 633 if (err)
618 ubi_ro_mode(ubi); 634 ubi_ro_mode(ubi);
619 } 635 }
@@ -656,11 +672,14 @@ retry:
656 goto write_error; 672 goto write_error;
657 } 673 }
658 674
659 err = ubi_io_write_data(ubi, buf, pnum, offset, len); 675 if (len) {
660 if (err) { 676 err = ubi_io_write_data(ubi, buf, pnum, offset, len);
661 ubi_warn("failed to write %d bytes at offset %d of LEB %d:%d, " 677 if (err) {
662 "PEB %d", len, offset, vol_id, lnum, pnum); 678 ubi_warn("failed to write %d bytes at offset %d of "
663 goto write_error; 679 "LEB %d:%d, PEB %d", len, offset, vol_id,
680 lnum, pnum);
681 goto write_error;
682 }
664 } 683 }
665 684
666 vol->eba_tbl[lnum] = pnum; 685 vol->eba_tbl[lnum] = pnum;
@@ -698,7 +717,7 @@ write_error:
698/** 717/**
699 * ubi_eba_write_leb_st - write data to static volume. 718 * ubi_eba_write_leb_st - write data to static volume.
700 * @ubi: UBI device description object 719 * @ubi: UBI device description object
701 * @vol_id: volume ID 720 * @vol: volume description object
702 * @lnum: logical eraseblock number 721 * @lnum: logical eraseblock number
703 * @buf: data to write 722 * @buf: data to write
704 * @len: how many bytes to write 723 * @len: how many bytes to write
@@ -706,7 +725,7 @@ write_error:
706 * @used_ebs: how many logical eraseblocks will this volume contain 725 * @used_ebs: how many logical eraseblocks will this volume contain
707 * 726 *
708 * This function writes data to logical eraseblock @lnum of static volume 727 * This function writes data to logical eraseblock @lnum of static volume
709 * @vol_id. The @used_ebs argument should contain total number of logical 728 * @vol. The @used_ebs argument should contain total number of logical
710 * eraseblock in this static volume. 729 * eraseblock in this static volume.
711 * 730 *
712 * When writing to the last logical eraseblock, the @len argument doesn't have 731 * When writing to the last logical eraseblock, the @len argument doesn't have
@@ -718,12 +737,11 @@ write_error:
718 * volumes. This function returns zero in case of success and a negative error 737 * volumes. This function returns zero in case of success and a negative error
719 * code in case of failure. 738 * code in case of failure.
720 */ 739 */
721int ubi_eba_write_leb_st(struct ubi_device *ubi, int vol_id, int lnum, 740int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
722 const void *buf, int len, int dtype, int used_ebs) 741 int lnum, const void *buf, int len, int dtype,
742 int used_ebs)
723{ 743{
724 int err, pnum, tries = 0, data_size = len; 744 int err, pnum, tries = 0, data_size = len, vol_id = vol->vol_id;
725 int idx = vol_id2idx(ubi, vol_id);
726 struct ubi_volume *vol = ubi->volumes[idx];
727 struct ubi_vid_hdr *vid_hdr; 745 struct ubi_vid_hdr *vid_hdr;
728 uint32_t crc; 746 uint32_t crc;
729 747
@@ -819,7 +837,7 @@ write_error:
819/* 837/*
820 * ubi_eba_atomic_leb_change - change logical eraseblock atomically. 838 * ubi_eba_atomic_leb_change - change logical eraseblock atomically.
821 * @ubi: UBI device description object 839 * @ubi: UBI device description object
822 * @vol_id: volume ID 840 * @vol: volume description object
823 * @lnum: logical eraseblock number 841 * @lnum: logical eraseblock number
824 * @buf: data to write 842 * @buf: data to write
825 * @len: how many bytes to write 843 * @len: how many bytes to write
@@ -834,17 +852,27 @@ write_error:
834 * UBI reserves one LEB for the "atomic LEB change" operation, so only one 852 * UBI reserves one LEB for the "atomic LEB change" operation, so only one
835 * LEB change may be done at a time. This is ensured by @ubi->alc_mutex. 853 * LEB change may be done at a time. This is ensured by @ubi->alc_mutex.
836 */ 854 */
837int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum, 855int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
838 const void *buf, int len, int dtype) 856 int lnum, const void *buf, int len, int dtype)
839{ 857{
840 int err, pnum, tries = 0, idx = vol_id2idx(ubi, vol_id); 858 int err, pnum, tries = 0, vol_id = vol->vol_id;
841 struct ubi_volume *vol = ubi->volumes[idx];
842 struct ubi_vid_hdr *vid_hdr; 859 struct ubi_vid_hdr *vid_hdr;
843 uint32_t crc; 860 uint32_t crc;
844 861
845 if (ubi->ro_mode) 862 if (ubi->ro_mode)
846 return -EROFS; 863 return -EROFS;
847 864
865 if (len == 0) {
866 /*
867 * Special case when data length is zero. In this case the LEB
868 * has to be unmapped and mapped somewhere else.
869 */
870 err = ubi_eba_unmap_leb(ubi, vol, lnum);
871 if (err)
872 return err;
873 return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0, dtype);
874 }
875
848 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); 876 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
849 if (!vid_hdr) 877 if (!vid_hdr)
850 return -ENOMEM; 878 return -ENOMEM;
@@ -928,20 +956,6 @@ write_error:
928} 956}
929 957
930/** 958/**
931 * ltree_entry_ctor - lock tree entries slab cache constructor.
932 * @obj: the lock-tree entry to construct
933 * @cache: the lock tree entry slab cache
934 * @flags: constructor flags
935 */
936static void ltree_entry_ctor(struct kmem_cache *cache, void *obj)
937{
938 struct ltree_entry *le = obj;
939
940 le->users = 0;
941 init_rwsem(&le->mutex);
942}
943
944/**
945 * ubi_eba_copy_leb - copy logical eraseblock. 959 * ubi_eba_copy_leb - copy logical eraseblock.
946 * @ubi: UBI device description object 960 * @ubi: UBI device description object
947 * @from: physical eraseblock number from where to copy 961 * @from: physical eraseblock number from where to copy
@@ -950,14 +964,16 @@ static void ltree_entry_ctor(struct kmem_cache *cache, void *obj)
950 * 964 *
951 * This function copies logical eraseblock from physical eraseblock @from to 965 * This function copies logical eraseblock from physical eraseblock @from to
952 * physical eraseblock @to. The @vid_hdr buffer may be changed by this 966 * physical eraseblock @to. The @vid_hdr buffer may be changed by this
953 * function. Returns zero in case of success, %UBI_IO_BITFLIPS if the operation 967 * function. Returns:
954 * was canceled because bit-flips were detected at the target PEB, and a 968 * o %0 in case of success;
955 * negative error code in case of failure. 969 * o %1 if the operation was canceled and should be tried later (e.g.,
970 * because a bit-flip was detected at the target PEB);
971 * o %2 if the volume is being deleted and this LEB should not be moved.
956 */ 972 */
957int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, 973int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
958 struct ubi_vid_hdr *vid_hdr) 974 struct ubi_vid_hdr *vid_hdr)
959{ 975{
960 int err, vol_id, lnum, data_size, aldata_size, pnum, idx; 976 int err, vol_id, lnum, data_size, aldata_size, idx;
961 struct ubi_volume *vol; 977 struct ubi_volume *vol;
962 uint32_t crc; 978 uint32_t crc;
963 979
@@ -973,51 +989,67 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
973 data_size = aldata_size = 989 data_size = aldata_size =
974 ubi->leb_size - be32_to_cpu(vid_hdr->data_pad); 990 ubi->leb_size - be32_to_cpu(vid_hdr->data_pad);
975 991
976 /*
977 * We do not want anybody to write to this logical eraseblock while we
978 * are moving it, so we lock it.
979 */
980 err = leb_write_lock(ubi, vol_id, lnum);
981 if (err)
982 return err;
983
984 mutex_lock(&ubi->buf_mutex);
985
986 /*
987 * But the logical eraseblock might have been put by this time.
988 * Cancel if it is true.
989 */
990 idx = vol_id2idx(ubi, vol_id); 992 idx = vol_id2idx(ubi, vol_id);
991 993 spin_lock(&ubi->volumes_lock);
992 /* 994 /*
993 * We may race with volume deletion/re-size, so we have to hold 995 * Note, we may race with volume deletion, which means that the volume
994 * @ubi->volumes_lock. 996 * this logical eraseblock belongs to might be being deleted. Since the
997 * volume deletion unmaps all the volume's logical eraseblocks, it will
998 * be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish.
995 */ 999 */
996 spin_lock(&ubi->volumes_lock);
997 vol = ubi->volumes[idx]; 1000 vol = ubi->volumes[idx];
998 if (!vol) { 1001 if (!vol) {
999 dbg_eba("volume %d was removed meanwhile", vol_id); 1002 /* No need to do further work, cancel */
1003 dbg_eba("volume %d is being removed, cancel", vol_id);
1000 spin_unlock(&ubi->volumes_lock); 1004 spin_unlock(&ubi->volumes_lock);
1001 goto out_unlock; 1005 return 2;
1002 } 1006 }
1007 spin_unlock(&ubi->volumes_lock);
1003 1008
1004 pnum = vol->eba_tbl[lnum]; 1009 /*
1005 if (pnum != from) { 1010 * We do not want anybody to write to this logical eraseblock while we
1006 dbg_eba("LEB %d:%d is no longer mapped to PEB %d, mapped to " 1011 * are moving it, so lock it.
1007 "PEB %d, cancel", vol_id, lnum, from, pnum); 1012 *
1008 spin_unlock(&ubi->volumes_lock); 1013 * Note, we are using non-waiting locking here, because we cannot sleep
1009 goto out_unlock; 1014 * on the LEB, since it may cause deadlocks. Indeed, imagine a task is
1015 * unmapping the LEB which is mapped to the PEB we are going to move
1016 * (@from). This task locks the LEB and goes sleep in the
1017 * 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are
1018 * holding @ubi->move_mutex and go sleep on the LEB lock. So, if the
1019 * LEB is already locked, we just do not move it and return %1.
1020 */
1021 err = leb_write_trylock(ubi, vol_id, lnum);
1022 if (err) {
1023 dbg_eba("contention on LEB %d:%d, cancel", vol_id, lnum);
1024 return err;
1010 } 1025 }
1011 spin_unlock(&ubi->volumes_lock);
1012 1026
1013 /* OK, now the LEB is locked and we can safely start moving it */ 1027 /*
1028 * The LEB might have been put meanwhile, and the task which put it is
1029 * probably waiting on @ubi->move_mutex. No need to continue the work,
1030 * cancel it.
1031 */
1032 if (vol->eba_tbl[lnum] != from) {
1033 dbg_eba("LEB %d:%d is no longer mapped to PEB %d, mapped to "
1034 "PEB %d, cancel", vol_id, lnum, from,
1035 vol->eba_tbl[lnum]);
1036 err = 1;
1037 goto out_unlock_leb;
1038 }
1014 1039
1040 /*
1041 * OK, now the LEB is locked and we can safely start moving iy. Since
1042 * this function utilizes thie @ubi->peb1_buf buffer which is shared
1043 * with some other functions, so lock the buffer by taking the
1044 * @ubi->buf_mutex.
1045 */
1046 mutex_lock(&ubi->buf_mutex);
1015 dbg_eba("read %d bytes of data", aldata_size); 1047 dbg_eba("read %d bytes of data", aldata_size);
1016 err = ubi_io_read_data(ubi, ubi->peb_buf1, from, 0, aldata_size); 1048 err = ubi_io_read_data(ubi, ubi->peb_buf1, from, 0, aldata_size);
1017 if (err && err != UBI_IO_BITFLIPS) { 1049 if (err && err != UBI_IO_BITFLIPS) {
1018 ubi_warn("error %d while reading data from PEB %d", 1050 ubi_warn("error %d while reading data from PEB %d",
1019 err, from); 1051 err, from);
1020 goto out_unlock; 1052 goto out_unlock_buf;
1021 } 1053 }
1022 1054
1023 /* 1055 /*
@@ -1053,7 +1085,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1053 1085
1054 err = ubi_io_write_vid_hdr(ubi, to, vid_hdr); 1086 err = ubi_io_write_vid_hdr(ubi, to, vid_hdr);
1055 if (err) 1087 if (err)
1056 goto out_unlock; 1088 goto out_unlock_buf;
1057 1089
1058 cond_resched(); 1090 cond_resched();
1059 1091
@@ -1062,13 +1094,15 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1062 if (err) { 1094 if (err) {
1063 if (err != UBI_IO_BITFLIPS) 1095 if (err != UBI_IO_BITFLIPS)
1064 ubi_warn("cannot read VID header back from PEB %d", to); 1096 ubi_warn("cannot read VID header back from PEB %d", to);
1065 goto out_unlock; 1097 else
1098 err = 1;
1099 goto out_unlock_buf;
1066 } 1100 }
1067 1101
1068 if (data_size > 0) { 1102 if (data_size > 0) {
1069 err = ubi_io_write_data(ubi, ubi->peb_buf1, to, 0, aldata_size); 1103 err = ubi_io_write_data(ubi, ubi->peb_buf1, to, 0, aldata_size);
1070 if (err) 1104 if (err)
1071 goto out_unlock; 1105 goto out_unlock_buf;
1072 1106
1073 cond_resched(); 1107 cond_resched();
1074 1108
@@ -1082,7 +1116,9 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1082 if (err != UBI_IO_BITFLIPS) 1116 if (err != UBI_IO_BITFLIPS)
1083 ubi_warn("cannot read data back from PEB %d", 1117 ubi_warn("cannot read data back from PEB %d",
1084 to); 1118 to);
1085 goto out_unlock; 1119 else
1120 err = 1;
1121 goto out_unlock_buf;
1086 } 1122 }
1087 1123
1088 cond_resched(); 1124 cond_resched();
@@ -1090,15 +1126,16 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1090 if (memcmp(ubi->peb_buf1, ubi->peb_buf2, aldata_size)) { 1126 if (memcmp(ubi->peb_buf1, ubi->peb_buf2, aldata_size)) {
1091 ubi_warn("read data back from PEB %d - it is different", 1127 ubi_warn("read data back from PEB %d - it is different",
1092 to); 1128 to);
1093 goto out_unlock; 1129 goto out_unlock_buf;
1094 } 1130 }
1095 } 1131 }
1096 1132
1097 ubi_assert(vol->eba_tbl[lnum] == from); 1133 ubi_assert(vol->eba_tbl[lnum] == from);
1098 vol->eba_tbl[lnum] = to; 1134 vol->eba_tbl[lnum] = to;
1099 1135
1100out_unlock: 1136out_unlock_buf:
1101 mutex_unlock(&ubi->buf_mutex); 1137 mutex_unlock(&ubi->buf_mutex);
1138out_unlock_leb:
1102 leb_write_unlock(ubi, vol_id, lnum); 1139 leb_write_unlock(ubi, vol_id, lnum);
1103 return err; 1140 return err;
1104} 1141}
@@ -1125,14 +1162,6 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1125 mutex_init(&ubi->alc_mutex); 1162 mutex_init(&ubi->alc_mutex);
1126 ubi->ltree = RB_ROOT; 1163 ubi->ltree = RB_ROOT;
1127 1164
1128 if (ubi_devices_cnt == 0) {
1129 ltree_slab = kmem_cache_create("ubi_ltree_slab",
1130 sizeof(struct ltree_entry), 0,
1131 0, &ltree_entry_ctor);
1132 if (!ltree_slab)
1133 return -ENOMEM;
1134 }
1135
1136 ubi->global_sqnum = si->max_sqnum + 1; 1165 ubi->global_sqnum = si->max_sqnum + 1;
1137 num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT; 1166 num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
1138 1167
@@ -1168,6 +1197,15 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1168 } 1197 }
1169 } 1198 }
1170 1199
1200 if (ubi->avail_pebs < EBA_RESERVED_PEBS) {
1201 ubi_err("no enough physical eraseblocks (%d, need %d)",
1202 ubi->avail_pebs, EBA_RESERVED_PEBS);
1203 err = -ENOSPC;
1204 goto out_free;
1205 }
1206 ubi->avail_pebs -= EBA_RESERVED_PEBS;
1207 ubi->rsvd_pebs += EBA_RESERVED_PEBS;
1208
1171 if (ubi->bad_allowed) { 1209 if (ubi->bad_allowed) {
1172 ubi_calculate_reserved(ubi); 1210 ubi_calculate_reserved(ubi);
1173 1211
@@ -1184,15 +1222,6 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1184 ubi->rsvd_pebs += ubi->beb_rsvd_pebs; 1222 ubi->rsvd_pebs += ubi->beb_rsvd_pebs;
1185 } 1223 }
1186 1224
1187 if (ubi->avail_pebs < EBA_RESERVED_PEBS) {
1188 ubi_err("no enough physical eraseblocks (%d, need %d)",
1189 ubi->avail_pebs, EBA_RESERVED_PEBS);
1190 err = -ENOSPC;
1191 goto out_free;
1192 }
1193 ubi->avail_pebs -= EBA_RESERVED_PEBS;
1194 ubi->rsvd_pebs += EBA_RESERVED_PEBS;
1195
1196 dbg_eba("EBA unit is initialized"); 1225 dbg_eba("EBA unit is initialized");
1197 return 0; 1226 return 0;
1198 1227
@@ -1202,8 +1231,6 @@ out_free:
1202 continue; 1231 continue;
1203 kfree(ubi->volumes[i]->eba_tbl); 1232 kfree(ubi->volumes[i]->eba_tbl);
1204 } 1233 }
1205 if (ubi_devices_cnt == 0)
1206 kmem_cache_destroy(ltree_slab);
1207 return err; 1234 return err;
1208} 1235}
1209 1236
@@ -1222,6 +1249,4 @@ void ubi_eba_close(const struct ubi_device *ubi)
1222 continue; 1249 continue;
1223 kfree(ubi->volumes[i]->eba_tbl); 1250 kfree(ubi->volumes[i]->eba_tbl);
1224 } 1251 }
1225 if (ubi_devices_cnt == 1)
1226 kmem_cache_destroy(ltree_slab);
1227} 1252}