diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-10-11 13:49:44 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-10-11 13:49:44 -0400 |
commit | 4c609922a3ae0248597785d1f9adc8f142a80aef (patch) | |
tree | 7fad8817f40c9c5b4700795f39c22fdc6626fb2d | |
parent | 1689c73a739d094b544c680b0dfdebe52ffee8fb (diff) | |
parent | ec037dfcc064f5f81982c78e95bab783568ae35f (diff) |
Merge tag 'upstream-4.9-rc1' of git://git.infradead.org/linux-ubifs
Pull UBI/UBIFS updates from Richard Weinberger:
"This pull request contains:
- Fixes for both UBI and UBIFS
- overlayfs support (O_TMPFILE, RENAME_WHITEOUT/EXCHANGE)
- Code refactoring for the upcoming MLC support"
[ Ugh, we just got rid of the "rename2()" naming for the extended rename
functionality. And this re-introduces it in ubifs with the cross-
renaming and whiteout support.
But rather than do any re-organizations in the merge itself, the
naming can be cleaned up later ]
* tag 'upstream-4.9-rc1' of git://git.infradead.org/linux-ubifs: (27 commits)
UBIFS: improve function-level documentation
ubifs: fix host xattr_len when changing xattr
ubifs: Use move variable in ubifs_rename()
ubifs: Implement RENAME_EXCHANGE
ubifs: Implement RENAME_WHITEOUT
ubifs: Implement O_TMPFILE
ubi: Fix Fastmap's update_vol()
ubi: Fix races around ubi_refill_pools()
ubi: Deal with interrupted erasures in WL
UBI: introduce the VID buffer concept
UBI: hide EBA internals
UBI: provide an helper to query LEB information
UBI: provide an helper to check whether a LEB is mapped or not
UBI: add an helper to check lnum validity
UBI: simplify LEB write and atomic LEB change code
UBI: simplify recover_peb() code
UBI: move the global ech and vidh variables into struct ubi_attach_info
UBI: provide helpers to allocate and free aeb elements
UBI: fastmap: use ubi_io_{read, write}_data() instead of ubi_io_{read, write}()
UBI: fastmap: use ubi_rb_for_each_entry() in unmap_peb()
...
-rw-r--r-- | drivers/mtd/ubi/attach.c | 304 | ||||
-rw-r--r-- | drivers/mtd/ubi/build.c | 2 | ||||
-rw-r--r-- | drivers/mtd/ubi/cdev.c | 6 | ||||
-rw-r--r-- | drivers/mtd/ubi/eba.c | 649 | ||||
-rw-r--r-- | drivers/mtd/ubi/fastmap-wl.c | 6 | ||||
-rw-r--r-- | drivers/mtd/ubi/fastmap.c | 203 | ||||
-rw-r--r-- | drivers/mtd/ubi/io.c | 39 | ||||
-rw-r--r-- | drivers/mtd/ubi/kapi.c | 16 | ||||
-rw-r--r-- | drivers/mtd/ubi/ubi.h | 132 | ||||
-rw-r--r-- | drivers/mtd/ubi/vmt.c | 40 | ||||
-rw-r--r-- | drivers/mtd/ubi/vtbl.c | 17 | ||||
-rw-r--r-- | drivers/mtd/ubi/wl.c | 60 | ||||
-rw-r--r-- | fs/ubifs/dir.c | 239 | ||||
-rw-r--r-- | fs/ubifs/file.c | 2 | ||||
-rw-r--r-- | fs/ubifs/gc.c | 2 | ||||
-rw-r--r-- | fs/ubifs/journal.c | 188 | ||||
-rw-r--r-- | fs/ubifs/lprops.c | 2 | ||||
-rw-r--r-- | fs/ubifs/lpt_commit.c | 4 | ||||
-rw-r--r-- | fs/ubifs/replay.c | 2 | ||||
-rw-r--r-- | fs/ubifs/ubifs.h | 8 | ||||
-rw-r--r-- | fs/ubifs/xattr.c | 6 |
21 files changed, 1300 insertions, 627 deletions
diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c index 903becd31410..93ceea4f27d5 100644 --- a/drivers/mtd/ubi/attach.c +++ b/drivers/mtd/ubi/attach.c | |||
@@ -91,9 +91,132 @@ | |||
91 | 91 | ||
92 | static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai); | 92 | static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai); |
93 | 93 | ||
94 | /* Temporary variables used during scanning */ | 94 | #define AV_FIND BIT(0) |
95 | static struct ubi_ec_hdr *ech; | 95 | #define AV_ADD BIT(1) |
96 | static struct ubi_vid_hdr *vidh; | 96 | #define AV_FIND_OR_ADD (AV_FIND | AV_ADD) |
97 | |||
98 | /** | ||
99 | * find_or_add_av - internal function to find a volume, add a volume or do | ||
100 | * both (find and add if missing). | ||
101 | * @ai: attaching information | ||
102 | * @vol_id: the requested volume ID | ||
103 | * @flags: a combination of the %AV_FIND and %AV_ADD flags describing the | ||
104 | * expected operation. If only %AV_ADD is set, -EEXIST is returned | ||
105 | * if the volume already exists. If only %AV_FIND is set, NULL is | ||
106 | * returned if the volume does not exist. And if both flags are | ||
107 | * set, the helper first tries to find an existing volume, and if | ||
108 | * it does not exist it creates a new one. | ||
109 | * @created: in value used to inform the caller whether it"s a newly created | ||
110 | * volume or not. | ||
111 | * | ||
112 | * This function returns a pointer to a volume description or an ERR_PTR if | ||
113 | * the operation failed. It can also return NULL if only %AV_FIND is set and | ||
114 | * the volume does not exist. | ||
115 | */ | ||
116 | static struct ubi_ainf_volume *find_or_add_av(struct ubi_attach_info *ai, | ||
117 | int vol_id, unsigned int flags, | ||
118 | bool *created) | ||
119 | { | ||
120 | struct ubi_ainf_volume *av; | ||
121 | struct rb_node **p = &ai->volumes.rb_node, *parent = NULL; | ||
122 | |||
123 | /* Walk the volume RB-tree to look if this volume is already present */ | ||
124 | while (*p) { | ||
125 | parent = *p; | ||
126 | av = rb_entry(parent, struct ubi_ainf_volume, rb); | ||
127 | |||
128 | if (vol_id == av->vol_id) { | ||
129 | *created = false; | ||
130 | |||
131 | if (!(flags & AV_FIND)) | ||
132 | return ERR_PTR(-EEXIST); | ||
133 | |||
134 | return av; | ||
135 | } | ||
136 | |||
137 | if (vol_id > av->vol_id) | ||
138 | p = &(*p)->rb_left; | ||
139 | else | ||
140 | p = &(*p)->rb_right; | ||
141 | } | ||
142 | |||
143 | if (!(flags & AV_ADD)) | ||
144 | return NULL; | ||
145 | |||
146 | /* The volume is absent - add it */ | ||
147 | av = kzalloc(sizeof(*av), GFP_KERNEL); | ||
148 | if (!av) | ||
149 | return ERR_PTR(-ENOMEM); | ||
150 | |||
151 | av->vol_id = vol_id; | ||
152 | |||
153 | if (vol_id > ai->highest_vol_id) | ||
154 | ai->highest_vol_id = vol_id; | ||
155 | |||
156 | rb_link_node(&av->rb, parent, p); | ||
157 | rb_insert_color(&av->rb, &ai->volumes); | ||
158 | ai->vols_found += 1; | ||
159 | *created = true; | ||
160 | dbg_bld("added volume %d", vol_id); | ||
161 | return av; | ||
162 | } | ||
163 | |||
164 | /** | ||
165 | * ubi_find_or_add_av - search for a volume in the attaching information and | ||
166 | * add one if it does not exist. | ||
167 | * @ai: attaching information | ||
168 | * @vol_id: the requested volume ID | ||
169 | * @created: whether the volume has been created or not | ||
170 | * | ||
171 | * This function returns a pointer to the new volume description or an | ||
172 | * ERR_PTR if the operation failed. | ||
173 | */ | ||
174 | static struct ubi_ainf_volume *ubi_find_or_add_av(struct ubi_attach_info *ai, | ||
175 | int vol_id, bool *created) | ||
176 | { | ||
177 | return find_or_add_av(ai, vol_id, AV_FIND_OR_ADD, created); | ||
178 | } | ||
179 | |||
180 | /** | ||
181 | * ubi_alloc_aeb - allocate an aeb element | ||
182 | * @ai: attaching information | ||
183 | * @pnum: physical eraseblock number | ||
184 | * @ec: erase counter of the physical eraseblock | ||
185 | * | ||
186 | * Allocate an aeb object and initialize the pnum and ec information. | ||
187 | * vol_id and lnum are set to UBI_UNKNOWN, and the other fields are | ||
188 | * initialized to zero. | ||
189 | * Note that the element is not added in any list or RB tree. | ||
190 | */ | ||
191 | struct ubi_ainf_peb *ubi_alloc_aeb(struct ubi_attach_info *ai, int pnum, | ||
192 | int ec) | ||
193 | { | ||
194 | struct ubi_ainf_peb *aeb; | ||
195 | |||
196 | aeb = kmem_cache_zalloc(ai->aeb_slab_cache, GFP_KERNEL); | ||
197 | if (!aeb) | ||
198 | return NULL; | ||
199 | |||
200 | aeb->pnum = pnum; | ||
201 | aeb->ec = ec; | ||
202 | aeb->vol_id = UBI_UNKNOWN; | ||
203 | aeb->lnum = UBI_UNKNOWN; | ||
204 | |||
205 | return aeb; | ||
206 | } | ||
207 | |||
208 | /** | ||
209 | * ubi_free_aeb - free an aeb element | ||
210 | * @ai: attaching information | ||
211 | * @aeb: the element to free | ||
212 | * | ||
213 | * Free an aeb object. The caller must have removed the element from any list | ||
214 | * or RB tree. | ||
215 | */ | ||
216 | void ubi_free_aeb(struct ubi_attach_info *ai, struct ubi_ainf_peb *aeb) | ||
217 | { | ||
218 | kmem_cache_free(ai->aeb_slab_cache, aeb); | ||
219 | } | ||
97 | 220 | ||
98 | /** | 221 | /** |
99 | * add_to_list - add physical eraseblock to a list. | 222 | * add_to_list - add physical eraseblock to a list. |
@@ -131,14 +254,12 @@ static int add_to_list(struct ubi_attach_info *ai, int pnum, int vol_id, | |||
131 | } else | 254 | } else |
132 | BUG(); | 255 | BUG(); |
133 | 256 | ||
134 | aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL); | 257 | aeb = ubi_alloc_aeb(ai, pnum, ec); |
135 | if (!aeb) | 258 | if (!aeb) |
136 | return -ENOMEM; | 259 | return -ENOMEM; |
137 | 260 | ||
138 | aeb->pnum = pnum; | ||
139 | aeb->vol_id = vol_id; | 261 | aeb->vol_id = vol_id; |
140 | aeb->lnum = lnum; | 262 | aeb->lnum = lnum; |
141 | aeb->ec = ec; | ||
142 | if (to_head) | 263 | if (to_head) |
143 | list_add(&aeb->u.list, list); | 264 | list_add(&aeb->u.list, list); |
144 | else | 265 | else |
@@ -163,13 +284,11 @@ static int add_corrupted(struct ubi_attach_info *ai, int pnum, int ec) | |||
163 | 284 | ||
164 | dbg_bld("add to corrupted: PEB %d, EC %d", pnum, ec); | 285 | dbg_bld("add to corrupted: PEB %d, EC %d", pnum, ec); |
165 | 286 | ||
166 | aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL); | 287 | aeb = ubi_alloc_aeb(ai, pnum, ec); |
167 | if (!aeb) | 288 | if (!aeb) |
168 | return -ENOMEM; | 289 | return -ENOMEM; |
169 | 290 | ||
170 | ai->corr_peb_count += 1; | 291 | ai->corr_peb_count += 1; |
171 | aeb->pnum = pnum; | ||
172 | aeb->ec = ec; | ||
173 | list_add(&aeb->u.list, &ai->corr); | 292 | list_add(&aeb->u.list, &ai->corr); |
174 | return 0; | 293 | return 0; |
175 | } | 294 | } |
@@ -192,14 +311,12 @@ static int add_fastmap(struct ubi_attach_info *ai, int pnum, | |||
192 | { | 311 | { |
193 | struct ubi_ainf_peb *aeb; | 312 | struct ubi_ainf_peb *aeb; |
194 | 313 | ||
195 | aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL); | 314 | aeb = ubi_alloc_aeb(ai, pnum, ec); |
196 | if (!aeb) | 315 | if (!aeb) |
197 | return -ENOMEM; | 316 | return -ENOMEM; |
198 | 317 | ||
199 | aeb->pnum = pnum; | 318 | aeb->vol_id = be32_to_cpu(vid_hdr->vol_id); |
200 | aeb->vol_id = be32_to_cpu(vidh->vol_id); | 319 | aeb->sqnum = be64_to_cpu(vid_hdr->sqnum); |
201 | aeb->sqnum = be64_to_cpu(vidh->sqnum); | ||
202 | aeb->ec = ec; | ||
203 | list_add(&aeb->u.list, &ai->fastmap); | 320 | list_add(&aeb->u.list, &ai->fastmap); |
204 | 321 | ||
205 | dbg_bld("add to fastmap list: PEB %d, vol_id %d, sqnum: %llu", pnum, | 322 | dbg_bld("add to fastmap list: PEB %d, vol_id %d, sqnum: %llu", pnum, |
@@ -294,44 +411,20 @@ static struct ubi_ainf_volume *add_volume(struct ubi_attach_info *ai, | |||
294 | const struct ubi_vid_hdr *vid_hdr) | 411 | const struct ubi_vid_hdr *vid_hdr) |
295 | { | 412 | { |
296 | struct ubi_ainf_volume *av; | 413 | struct ubi_ainf_volume *av; |
297 | struct rb_node **p = &ai->volumes.rb_node, *parent = NULL; | 414 | bool created; |
298 | 415 | ||
299 | ubi_assert(vol_id == be32_to_cpu(vid_hdr->vol_id)); | 416 | ubi_assert(vol_id == be32_to_cpu(vid_hdr->vol_id)); |
300 | 417 | ||
301 | /* Walk the volume RB-tree to look if this volume is already present */ | 418 | av = ubi_find_or_add_av(ai, vol_id, &created); |
302 | while (*p) { | 419 | if (IS_ERR(av) || !created) |
303 | parent = *p; | 420 | return av; |
304 | av = rb_entry(parent, struct ubi_ainf_volume, rb); | ||
305 | |||
306 | if (vol_id == av->vol_id) | ||
307 | return av; | ||
308 | |||
309 | if (vol_id > av->vol_id) | ||
310 | p = &(*p)->rb_left; | ||
311 | else | ||
312 | p = &(*p)->rb_right; | ||
313 | } | ||
314 | |||
315 | /* The volume is absent - add it */ | ||
316 | av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL); | ||
317 | if (!av) | ||
318 | return ERR_PTR(-ENOMEM); | ||
319 | 421 | ||
320 | av->highest_lnum = av->leb_count = 0; | ||
321 | av->vol_id = vol_id; | ||
322 | av->root = RB_ROOT; | ||
323 | av->used_ebs = be32_to_cpu(vid_hdr->used_ebs); | 422 | av->used_ebs = be32_to_cpu(vid_hdr->used_ebs); |
324 | av->data_pad = be32_to_cpu(vid_hdr->data_pad); | 423 | av->data_pad = be32_to_cpu(vid_hdr->data_pad); |
325 | av->compat = vid_hdr->compat; | 424 | av->compat = vid_hdr->compat; |
326 | av->vol_type = vid_hdr->vol_type == UBI_VID_DYNAMIC ? UBI_DYNAMIC_VOLUME | 425 | av->vol_type = vid_hdr->vol_type == UBI_VID_DYNAMIC ? UBI_DYNAMIC_VOLUME |
327 | : UBI_STATIC_VOLUME; | 426 | : UBI_STATIC_VOLUME; |
328 | if (vol_id > ai->highest_vol_id) | ||
329 | ai->highest_vol_id = vol_id; | ||
330 | 427 | ||
331 | rb_link_node(&av->rb, parent, p); | ||
332 | rb_insert_color(&av->rb, &ai->volumes); | ||
333 | ai->vols_found += 1; | ||
334 | dbg_bld("added volume %d", vol_id); | ||
335 | return av; | 428 | return av; |
336 | } | 429 | } |
337 | 430 | ||
@@ -360,7 +453,7 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb, | |||
360 | { | 453 | { |
361 | int len, err, second_is_newer, bitflips = 0, corrupted = 0; | 454 | int len, err, second_is_newer, bitflips = 0, corrupted = 0; |
362 | uint32_t data_crc, crc; | 455 | uint32_t data_crc, crc; |
363 | struct ubi_vid_hdr *vh = NULL; | 456 | struct ubi_vid_io_buf *vidb = NULL; |
364 | unsigned long long sqnum2 = be64_to_cpu(vid_hdr->sqnum); | 457 | unsigned long long sqnum2 = be64_to_cpu(vid_hdr->sqnum); |
365 | 458 | ||
366 | if (sqnum2 == aeb->sqnum) { | 459 | if (sqnum2 == aeb->sqnum) { |
@@ -403,12 +496,12 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb, | |||
403 | return bitflips << 1; | 496 | return bitflips << 1; |
404 | } | 497 | } |
405 | 498 | ||
406 | vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); | 499 | vidb = ubi_alloc_vid_buf(ubi, GFP_KERNEL); |
407 | if (!vh) | 500 | if (!vidb) |
408 | return -ENOMEM; | 501 | return -ENOMEM; |
409 | 502 | ||
410 | pnum = aeb->pnum; | 503 | pnum = aeb->pnum; |
411 | err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0); | 504 | err = ubi_io_read_vid_hdr(ubi, pnum, vidb, 0); |
412 | if (err) { | 505 | if (err) { |
413 | if (err == UBI_IO_BITFLIPS) | 506 | if (err == UBI_IO_BITFLIPS) |
414 | bitflips = 1; | 507 | bitflips = 1; |
@@ -422,7 +515,7 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb, | |||
422 | } | 515 | } |
423 | } | 516 | } |
424 | 517 | ||
425 | vid_hdr = vh; | 518 | vid_hdr = ubi_get_vid_hdr(vidb); |
426 | } | 519 | } |
427 | 520 | ||
428 | /* Read the data of the copy and check the CRC */ | 521 | /* Read the data of the copy and check the CRC */ |
@@ -448,7 +541,7 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb, | |||
448 | } | 541 | } |
449 | mutex_unlock(&ubi->buf_mutex); | 542 | mutex_unlock(&ubi->buf_mutex); |
450 | 543 | ||
451 | ubi_free_vid_hdr(ubi, vh); | 544 | ubi_free_vid_buf(vidb); |
452 | 545 | ||
453 | if (second_is_newer) | 546 | if (second_is_newer) |
454 | dbg_bld("second PEB %d is newer, copy_flag is set", pnum); | 547 | dbg_bld("second PEB %d is newer, copy_flag is set", pnum); |
@@ -460,7 +553,7 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb, | |||
460 | out_unlock: | 553 | out_unlock: |
461 | mutex_unlock(&ubi->buf_mutex); | 554 | mutex_unlock(&ubi->buf_mutex); |
462 | out_free_vidh: | 555 | out_free_vidh: |
463 | ubi_free_vid_hdr(ubi, vh); | 556 | ubi_free_vid_buf(vidb); |
464 | return err; | 557 | return err; |
465 | } | 558 | } |
466 | 559 | ||
@@ -605,12 +698,10 @@ int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum, | |||
605 | if (err) | 698 | if (err) |
606 | return err; | 699 | return err; |
607 | 700 | ||
608 | aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL); | 701 | aeb = ubi_alloc_aeb(ai, pnum, ec); |
609 | if (!aeb) | 702 | if (!aeb) |
610 | return -ENOMEM; | 703 | return -ENOMEM; |
611 | 704 | ||
612 | aeb->ec = ec; | ||
613 | aeb->pnum = pnum; | ||
614 | aeb->vol_id = vol_id; | 705 | aeb->vol_id = vol_id; |
615 | aeb->lnum = lnum; | 706 | aeb->lnum = lnum; |
616 | aeb->scrub = bitflips; | 707 | aeb->scrub = bitflips; |
@@ -629,6 +720,21 @@ int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum, | |||
629 | } | 720 | } |
630 | 721 | ||
631 | /** | 722 | /** |
723 | * ubi_add_av - add volume to the attaching information. | ||
724 | * @ai: attaching information | ||
725 | * @vol_id: the requested volume ID | ||
726 | * | ||
727 | * This function returns a pointer to the new volume description or an | ||
728 | * ERR_PTR if the operation failed. | ||
729 | */ | ||
730 | struct ubi_ainf_volume *ubi_add_av(struct ubi_attach_info *ai, int vol_id) | ||
731 | { | ||
732 | bool created; | ||
733 | |||
734 | return find_or_add_av(ai, vol_id, AV_ADD, &created); | ||
735 | } | ||
736 | |||
737 | /** | ||
632 | * ubi_find_av - find volume in the attaching information. | 738 | * ubi_find_av - find volume in the attaching information. |
633 | * @ai: attaching information | 739 | * @ai: attaching information |
634 | * @vol_id: the requested volume ID | 740 | * @vol_id: the requested volume ID |
@@ -639,24 +745,15 @@ int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum, | |||
639 | struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai, | 745 | struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai, |
640 | int vol_id) | 746 | int vol_id) |
641 | { | 747 | { |
642 | struct ubi_ainf_volume *av; | 748 | bool created; |
643 | struct rb_node *p = ai->volumes.rb_node; | ||
644 | |||
645 | while (p) { | ||
646 | av = rb_entry(p, struct ubi_ainf_volume, rb); | ||
647 | |||
648 | if (vol_id == av->vol_id) | ||
649 | return av; | ||
650 | |||
651 | if (vol_id > av->vol_id) | ||
652 | p = p->rb_left; | ||
653 | else | ||
654 | p = p->rb_right; | ||
655 | } | ||
656 | 749 | ||
657 | return NULL; | 750 | return find_or_add_av((struct ubi_attach_info *)ai, vol_id, AV_FIND, |
751 | &created); | ||
658 | } | 752 | } |
659 | 753 | ||
754 | static void destroy_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av, | ||
755 | struct list_head *list); | ||
756 | |||
660 | /** | 757 | /** |
661 | * ubi_remove_av - delete attaching information about a volume. | 758 | * ubi_remove_av - delete attaching information about a volume. |
662 | * @ai: attaching information | 759 | * @ai: attaching information |
@@ -664,19 +761,10 @@ struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai, | |||
664 | */ | 761 | */ |
665 | void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av) | 762 | void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av) |
666 | { | 763 | { |
667 | struct rb_node *rb; | ||
668 | struct ubi_ainf_peb *aeb; | ||
669 | |||
670 | dbg_bld("remove attaching information about volume %d", av->vol_id); | 764 | dbg_bld("remove attaching information about volume %d", av->vol_id); |
671 | 765 | ||
672 | while ((rb = rb_first(&av->root))) { | ||
673 | aeb = rb_entry(rb, struct ubi_ainf_peb, u.rb); | ||
674 | rb_erase(&aeb->u.rb, &av->root); | ||
675 | list_add_tail(&aeb->u.list, &ai->erase); | ||
676 | } | ||
677 | |||
678 | rb_erase(&av->rb, &ai->volumes); | 766 | rb_erase(&av->rb, &ai->volumes); |
679 | kfree(av); | 767 | destroy_av(ai, av, &ai->erase); |
680 | ai->vols_found -= 1; | 768 | ai->vols_found -= 1; |
681 | } | 769 | } |
682 | 770 | ||
@@ -866,6 +954,9 @@ static bool vol_ignored(int vol_id) | |||
866 | static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai, | 954 | static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai, |
867 | int pnum, bool fast) | 955 | int pnum, bool fast) |
868 | { | 956 | { |
957 | struct ubi_ec_hdr *ech = ai->ech; | ||
958 | struct ubi_vid_io_buf *vidb = ai->vidb; | ||
959 | struct ubi_vid_hdr *vidh = ubi_get_vid_hdr(vidb); | ||
869 | long long ec; | 960 | long long ec; |
870 | int err, bitflips = 0, vol_id = -1, ec_err = 0; | 961 | int err, bitflips = 0, vol_id = -1, ec_err = 0; |
871 | 962 | ||
@@ -963,7 +1054,7 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai, | |||
963 | 1054 | ||
964 | /* OK, we've done with the EC header, let's look at the VID header */ | 1055 | /* OK, we've done with the EC header, let's look at the VID header */ |
965 | 1056 | ||
966 | err = ubi_io_read_vid_hdr(ubi, pnum, vidh, 0); | 1057 | err = ubi_io_read_vid_hdr(ubi, pnum, vidb, 0); |
967 | if (err < 0) | 1058 | if (err < 0) |
968 | return err; | 1059 | return err; |
969 | switch (err) { | 1060 | switch (err) { |
@@ -1191,10 +1282,12 @@ static int late_analysis(struct ubi_device *ubi, struct ubi_attach_info *ai) | |||
1191 | * destroy_av - free volume attaching information. | 1282 | * destroy_av - free volume attaching information. |
1192 | * @av: volume attaching information | 1283 | * @av: volume attaching information |
1193 | * @ai: attaching information | 1284 | * @ai: attaching information |
1285 | * @list: put the aeb elements in there if !NULL, otherwise free them | ||
1194 | * | 1286 | * |
1195 | * This function destroys the volume attaching information. | 1287 | * This function destroys the volume attaching information. |
1196 | */ | 1288 | */ |
1197 | static void destroy_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av) | 1289 | static void destroy_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av, |
1290 | struct list_head *list) | ||
1198 | { | 1291 | { |
1199 | struct ubi_ainf_peb *aeb; | 1292 | struct ubi_ainf_peb *aeb; |
1200 | struct rb_node *this = av->root.rb_node; | 1293 | struct rb_node *this = av->root.rb_node; |
@@ -1214,7 +1307,10 @@ static void destroy_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av) | |||
1214 | this->rb_right = NULL; | 1307 | this->rb_right = NULL; |
1215 | } | 1308 | } |
1216 | 1309 | ||
1217 | kmem_cache_free(ai->aeb_slab_cache, aeb); | 1310 | if (list) |
1311 | list_add_tail(&aeb->u.list, list); | ||
1312 | else | ||
1313 | ubi_free_aeb(ai, aeb); | ||
1218 | } | 1314 | } |
1219 | } | 1315 | } |
1220 | kfree(av); | 1316 | kfree(av); |
@@ -1232,23 +1328,23 @@ static void destroy_ai(struct ubi_attach_info *ai) | |||
1232 | 1328 | ||
1233 | list_for_each_entry_safe(aeb, aeb_tmp, &ai->alien, u.list) { | 1329 | list_for_each_entry_safe(aeb, aeb_tmp, &ai->alien, u.list) { |
1234 | list_del(&aeb->u.list); | 1330 | list_del(&aeb->u.list); |
1235 | kmem_cache_free(ai->aeb_slab_cache, aeb); | 1331 | ubi_free_aeb(ai, aeb); |
1236 | } | 1332 | } |
1237 | list_for_each_entry_safe(aeb, aeb_tmp, &ai->erase, u.list) { | 1333 | list_for_each_entry_safe(aeb, aeb_tmp, &ai->erase, u.list) { |
1238 | list_del(&aeb->u.list); | 1334 | list_del(&aeb->u.list); |
1239 | kmem_cache_free(ai->aeb_slab_cache, aeb); | 1335 | ubi_free_aeb(ai, aeb); |
1240 | } | 1336 | } |
1241 | list_for_each_entry_safe(aeb, aeb_tmp, &ai->corr, u.list) { | 1337 | list_for_each_entry_safe(aeb, aeb_tmp, &ai->corr, u.list) { |
1242 | list_del(&aeb->u.list); | 1338 | list_del(&aeb->u.list); |
1243 | kmem_cache_free(ai->aeb_slab_cache, aeb); | 1339 | ubi_free_aeb(ai, aeb); |
1244 | } | 1340 | } |
1245 | list_for_each_entry_safe(aeb, aeb_tmp, &ai->free, u.list) { | 1341 | list_for_each_entry_safe(aeb, aeb_tmp, &ai->free, u.list) { |
1246 | list_del(&aeb->u.list); | 1342 | list_del(&aeb->u.list); |
1247 | kmem_cache_free(ai->aeb_slab_cache, aeb); | 1343 | ubi_free_aeb(ai, aeb); |
1248 | } | 1344 | } |
1249 | list_for_each_entry_safe(aeb, aeb_tmp, &ai->fastmap, u.list) { | 1345 | list_for_each_entry_safe(aeb, aeb_tmp, &ai->fastmap, u.list) { |
1250 | list_del(&aeb->u.list); | 1346 | list_del(&aeb->u.list); |
1251 | kmem_cache_free(ai->aeb_slab_cache, aeb); | 1347 | ubi_free_aeb(ai, aeb); |
1252 | } | 1348 | } |
1253 | 1349 | ||
1254 | /* Destroy the volume RB-tree */ | 1350 | /* Destroy the volume RB-tree */ |
@@ -1269,7 +1365,7 @@ static void destroy_ai(struct ubi_attach_info *ai) | |||
1269 | rb->rb_right = NULL; | 1365 | rb->rb_right = NULL; |
1270 | } | 1366 | } |
1271 | 1367 | ||
1272 | destroy_av(ai, av); | 1368 | destroy_av(ai, av, NULL); |
1273 | } | 1369 | } |
1274 | } | 1370 | } |
1275 | 1371 | ||
@@ -1297,12 +1393,12 @@ static int scan_all(struct ubi_device *ubi, struct ubi_attach_info *ai, | |||
1297 | 1393 | ||
1298 | err = -ENOMEM; | 1394 | err = -ENOMEM; |
1299 | 1395 | ||
1300 | ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); | 1396 | ai->ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); |
1301 | if (!ech) | 1397 | if (!ai->ech) |
1302 | return err; | 1398 | return err; |
1303 | 1399 | ||
1304 | vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); | 1400 | ai->vidb = ubi_alloc_vid_buf(ubi, GFP_KERNEL); |
1305 | if (!vidh) | 1401 | if (!ai->vidb) |
1306 | goto out_ech; | 1402 | goto out_ech; |
1307 | 1403 | ||
1308 | for (pnum = start; pnum < ubi->peb_count; pnum++) { | 1404 | for (pnum = start; pnum < ubi->peb_count; pnum++) { |
@@ -1351,15 +1447,15 @@ static int scan_all(struct ubi_device *ubi, struct ubi_attach_info *ai, | |||
1351 | if (err) | 1447 | if (err) |
1352 | goto out_vidh; | 1448 | goto out_vidh; |
1353 | 1449 | ||
1354 | ubi_free_vid_hdr(ubi, vidh); | 1450 | ubi_free_vid_buf(ai->vidb); |
1355 | kfree(ech); | 1451 | kfree(ai->ech); |
1356 | 1452 | ||
1357 | return 0; | 1453 | return 0; |
1358 | 1454 | ||
1359 | out_vidh: | 1455 | out_vidh: |
1360 | ubi_free_vid_hdr(ubi, vidh); | 1456 | ubi_free_vid_buf(ai->vidb); |
1361 | out_ech: | 1457 | out_ech: |
1362 | kfree(ech); | 1458 | kfree(ai->ech); |
1363 | return err; | 1459 | return err; |
1364 | } | 1460 | } |
1365 | 1461 | ||
@@ -1411,12 +1507,12 @@ static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info **ai) | |||
1411 | if (!scan_ai) | 1507 | if (!scan_ai) |
1412 | goto out; | 1508 | goto out; |
1413 | 1509 | ||
1414 | ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); | 1510 | scan_ai->ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); |
1415 | if (!ech) | 1511 | if (!scan_ai->ech) |
1416 | goto out_ai; | 1512 | goto out_ai; |
1417 | 1513 | ||
1418 | vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); | 1514 | scan_ai->vidb = ubi_alloc_vid_buf(ubi, GFP_KERNEL); |
1419 | if (!vidh) | 1515 | if (!scan_ai->vidb) |
1420 | goto out_ech; | 1516 | goto out_ech; |
1421 | 1517 | ||
1422 | for (pnum = 0; pnum < UBI_FM_MAX_START; pnum++) { | 1518 | for (pnum = 0; pnum < UBI_FM_MAX_START; pnum++) { |
@@ -1428,8 +1524,8 @@ static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info **ai) | |||
1428 | goto out_vidh; | 1524 | goto out_vidh; |
1429 | } | 1525 | } |
1430 | 1526 | ||
1431 | ubi_free_vid_hdr(ubi, vidh); | 1527 | ubi_free_vid_buf(scan_ai->vidb); |
1432 | kfree(ech); | 1528 | kfree(scan_ai->ech); |
1433 | 1529 | ||
1434 | if (scan_ai->force_full_scan) | 1530 | if (scan_ai->force_full_scan) |
1435 | err = UBI_NO_FASTMAP; | 1531 | err = UBI_NO_FASTMAP; |
@@ -1449,9 +1545,9 @@ static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info **ai) | |||
1449 | return err; | 1545 | return err; |
1450 | 1546 | ||
1451 | out_vidh: | 1547 | out_vidh: |
1452 | ubi_free_vid_hdr(ubi, vidh); | 1548 | ubi_free_vid_buf(scan_ai->vidb); |
1453 | out_ech: | 1549 | out_ech: |
1454 | kfree(ech); | 1550 | kfree(scan_ai->ech); |
1455 | out_ai: | 1551 | out_ai: |
1456 | destroy_ai(scan_ai); | 1552 | destroy_ai(scan_ai); |
1457 | out: | 1553 | out: |
@@ -1573,6 +1669,8 @@ out_ai: | |||
1573 | */ | 1669 | */ |
1574 | static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai) | 1670 | static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai) |
1575 | { | 1671 | { |
1672 | struct ubi_vid_io_buf *vidb = ai->vidb; | ||
1673 | struct ubi_vid_hdr *vidh = ubi_get_vid_hdr(vidb); | ||
1576 | int pnum, err, vols_found = 0; | 1674 | int pnum, err, vols_found = 0; |
1577 | struct rb_node *rb1, *rb2; | 1675 | struct rb_node *rb1, *rb2; |
1578 | struct ubi_ainf_volume *av; | 1676 | struct ubi_ainf_volume *av; |
@@ -1708,7 +1806,7 @@ static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai) | |||
1708 | 1806 | ||
1709 | last_aeb = aeb; | 1807 | last_aeb = aeb; |
1710 | 1808 | ||
1711 | err = ubi_io_read_vid_hdr(ubi, aeb->pnum, vidh, 1); | 1809 | err = ubi_io_read_vid_hdr(ubi, aeb->pnum, vidb, 1); |
1712 | if (err && err != UBI_IO_BITFLIPS) { | 1810 | if (err && err != UBI_IO_BITFLIPS) { |
1713 | ubi_err(ubi, "VID header is not OK (%d)", | 1811 | ubi_err(ubi, "VID header is not OK (%d)", |
1714 | err); | 1812 | err); |
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index 0680516bb472..85d54f37e28f 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c | |||
@@ -574,7 +574,7 @@ void ubi_free_internal_volumes(struct ubi_device *ubi) | |||
574 | 574 | ||
575 | for (i = ubi->vtbl_slots; | 575 | for (i = ubi->vtbl_slots; |
576 | i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) { | 576 | i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) { |
577 | kfree(ubi->volumes[i]->eba_tbl); | 577 | ubi_eba_replace_table(ubi->volumes[i], NULL); |
578 | kfree(ubi->volumes[i]); | 578 | kfree(ubi->volumes[i]); |
579 | } | 579 | } |
580 | } | 580 | } |
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c index ee2b74d1d1b5..45c329694a5e 100644 --- a/drivers/mtd/ubi/cdev.c +++ b/drivers/mtd/ubi/cdev.c | |||
@@ -416,7 +416,7 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd, | |||
416 | } | 416 | } |
417 | 417 | ||
418 | rsvd_bytes = (long long)vol->reserved_pebs * | 418 | rsvd_bytes = (long long)vol->reserved_pebs * |
419 | ubi->leb_size-vol->data_pad; | 419 | vol->usable_leb_size; |
420 | if (bytes < 0 || bytes > rsvd_bytes) { | 420 | if (bytes < 0 || bytes > rsvd_bytes) { |
421 | err = -EINVAL; | 421 | err = -EINVAL; |
422 | break; | 422 | break; |
@@ -454,7 +454,7 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd, | |||
454 | 454 | ||
455 | /* Validate the request */ | 455 | /* Validate the request */ |
456 | err = -EINVAL; | 456 | err = -EINVAL; |
457 | if (req.lnum < 0 || req.lnum >= vol->reserved_pebs || | 457 | if (!ubi_leb_valid(vol, req.lnum) || |
458 | req.bytes < 0 || req.bytes > vol->usable_leb_size) | 458 | req.bytes < 0 || req.bytes > vol->usable_leb_size) |
459 | break; | 459 | break; |
460 | 460 | ||
@@ -485,7 +485,7 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd, | |||
485 | break; | 485 | break; |
486 | } | 486 | } |
487 | 487 | ||
488 | if (lnum < 0 || lnum >= vol->reserved_pebs) { | 488 | if (!ubi_leb_valid(vol, lnum)) { |
489 | err = -EINVAL; | 489 | err = -EINVAL; |
490 | break; | 490 | break; |
491 | } | 491 | } |
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c index ebf517271d29..95c4048a371e 100644 --- a/drivers/mtd/ubi/eba.c +++ b/drivers/mtd/ubi/eba.c | |||
@@ -50,6 +50,30 @@ | |||
50 | #define EBA_RESERVED_PEBS 1 | 50 | #define EBA_RESERVED_PEBS 1 |
51 | 51 | ||
52 | /** | 52 | /** |
53 | * struct ubi_eba_entry - structure encoding a single LEB -> PEB association | ||
54 | * @pnum: the physical eraseblock number attached to the LEB | ||
55 | * | ||
56 | * This structure is encoding a LEB -> PEB association. Note that the LEB | ||
57 | * number is not stored here, because it is the index used to access the | ||
58 | * entries table. | ||
59 | */ | ||
60 | struct ubi_eba_entry { | ||
61 | int pnum; | ||
62 | }; | ||
63 | |||
64 | /** | ||
65 | * struct ubi_eba_table - LEB -> PEB association information | ||
66 | * @entries: the LEB to PEB mapping (one entry per LEB). | ||
67 | * | ||
68 | * This structure is private to the EBA logic and should be kept here. | ||
69 | * It is encoding the LEB to PEB association table, and is subject to | ||
70 | * changes. | ||
71 | */ | ||
72 | struct ubi_eba_table { | ||
73 | struct ubi_eba_entry *entries; | ||
74 | }; | ||
75 | |||
76 | /** | ||
53 | * next_sqnum - get next sequence number. | 77 | * next_sqnum - get next sequence number. |
54 | * @ubi: UBI device description object | 78 | * @ubi: UBI device description object |
55 | * | 79 | * |
@@ -84,6 +108,110 @@ static int ubi_get_compat(const struct ubi_device *ubi, int vol_id) | |||
84 | } | 108 | } |
85 | 109 | ||
86 | /** | 110 | /** |
111 | * ubi_eba_get_ldesc - get information about a LEB | ||
112 | * @vol: volume description object | ||
113 | * @lnum: logical eraseblock number | ||
114 | * @ldesc: the LEB descriptor to fill | ||
115 | * | ||
116 | * Used to query information about a specific LEB. | ||
117 | * It is currently only returning the physical position of the LEB, but will be | ||
118 | * extended to provide more information. | ||
119 | */ | ||
120 | void ubi_eba_get_ldesc(struct ubi_volume *vol, int lnum, | ||
121 | struct ubi_eba_leb_desc *ldesc) | ||
122 | { | ||
123 | ldesc->lnum = lnum; | ||
124 | ldesc->pnum = vol->eba_tbl->entries[lnum].pnum; | ||
125 | } | ||
126 | |||
127 | /** | ||
128 | * ubi_eba_create_table - allocate a new EBA table and initialize it with all | ||
129 | * LEBs unmapped | ||
130 | * @vol: volume containing the EBA table to copy | ||
131 | * @nentries: number of entries in the table | ||
132 | * | ||
133 | * Allocate a new EBA table and initialize it with all LEBs unmapped. | ||
134 | * Returns a valid pointer if it succeed, an ERR_PTR() otherwise. | ||
135 | */ | ||
136 | struct ubi_eba_table *ubi_eba_create_table(struct ubi_volume *vol, | ||
137 | int nentries) | ||
138 | { | ||
139 | struct ubi_eba_table *tbl; | ||
140 | int err = -ENOMEM; | ||
141 | int i; | ||
142 | |||
143 | tbl = kzalloc(sizeof(*tbl), GFP_KERNEL); | ||
144 | if (!tbl) | ||
145 | return ERR_PTR(-ENOMEM); | ||
146 | |||
147 | tbl->entries = kmalloc_array(nentries, sizeof(*tbl->entries), | ||
148 | GFP_KERNEL); | ||
149 | if (!tbl->entries) | ||
150 | goto err; | ||
151 | |||
152 | for (i = 0; i < nentries; i++) | ||
153 | tbl->entries[i].pnum = UBI_LEB_UNMAPPED; | ||
154 | |||
155 | return tbl; | ||
156 | |||
157 | err: | ||
158 | kfree(tbl->entries); | ||
159 | kfree(tbl); | ||
160 | |||
161 | return ERR_PTR(err); | ||
162 | } | ||
163 | |||
164 | /** | ||
165 | * ubi_eba_destroy_table - destroy an EBA table | ||
166 | * @tbl: the table to destroy | ||
167 | * | ||
168 | * Destroy an EBA table. | ||
169 | */ | ||
170 | void ubi_eba_destroy_table(struct ubi_eba_table *tbl) | ||
171 | { | ||
172 | if (!tbl) | ||
173 | return; | ||
174 | |||
175 | kfree(tbl->entries); | ||
176 | kfree(tbl); | ||
177 | } | ||
178 | |||
179 | /** | ||
180 | * ubi_eba_copy_table - copy the EBA table attached to vol into another table | ||
181 | * @vol: volume containing the EBA table to copy | ||
182 | * @dst: destination | ||
183 | * @nentries: number of entries to copy | ||
184 | * | ||
185 | * Copy the EBA table stored in vol into the one pointed by dst. | ||
186 | */ | ||
187 | void ubi_eba_copy_table(struct ubi_volume *vol, struct ubi_eba_table *dst, | ||
188 | int nentries) | ||
189 | { | ||
190 | struct ubi_eba_table *src; | ||
191 | int i; | ||
192 | |||
193 | ubi_assert(dst && vol && vol->eba_tbl); | ||
194 | |||
195 | src = vol->eba_tbl; | ||
196 | |||
197 | for (i = 0; i < nentries; i++) | ||
198 | dst->entries[i].pnum = src->entries[i].pnum; | ||
199 | } | ||
200 | |||
201 | /** | ||
202 | * ubi_eba_replace_table - assign a new EBA table to a volume | ||
203 | * @vol: volume containing the EBA table to copy | ||
204 | * @tbl: new EBA table | ||
205 | * | ||
206 | * Assign a new EBA table to the volume and release the old one. | ||
207 | */ | ||
208 | void ubi_eba_replace_table(struct ubi_volume *vol, struct ubi_eba_table *tbl) | ||
209 | { | ||
210 | ubi_eba_destroy_table(vol->eba_tbl); | ||
211 | vol->eba_tbl = tbl; | ||
212 | } | ||
213 | |||
214 | /** | ||
87 | * ltree_lookup - look up the lock tree. | 215 | * ltree_lookup - look up the lock tree. |
88 | * @ubi: UBI device description object | 216 | * @ubi: UBI device description object |
89 | * @vol_id: volume ID | 217 | * @vol_id: volume ID |
@@ -312,6 +440,18 @@ static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum) | |||
312 | } | 440 | } |
313 | 441 | ||
314 | /** | 442 | /** |
443 | * ubi_eba_is_mapped - check if a LEB is mapped. | ||
444 | * @vol: volume description object | ||
445 | * @lnum: logical eraseblock number | ||
446 | * | ||
447 | * This function returns true if the LEB is mapped, false otherwise. | ||
448 | */ | ||
449 | bool ubi_eba_is_mapped(struct ubi_volume *vol, int lnum) | ||
450 | { | ||
451 | return vol->eba_tbl->entries[lnum].pnum >= 0; | ||
452 | } | ||
453 | |||
454 | /** | ||
315 | * ubi_eba_unmap_leb - un-map logical eraseblock. | 455 | * ubi_eba_unmap_leb - un-map logical eraseblock. |
316 | * @ubi: UBI device description object | 456 | * @ubi: UBI device description object |
317 | * @vol: volume description object | 457 | * @vol: volume description object |
@@ -333,7 +473,7 @@ int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol, | |||
333 | if (err) | 473 | if (err) |
334 | return err; | 474 | return err; |
335 | 475 | ||
336 | pnum = vol->eba_tbl[lnum]; | 476 | pnum = vol->eba_tbl->entries[lnum].pnum; |
337 | if (pnum < 0) | 477 | if (pnum < 0) |
338 | /* This logical eraseblock is already unmapped */ | 478 | /* This logical eraseblock is already unmapped */ |
339 | goto out_unlock; | 479 | goto out_unlock; |
@@ -341,7 +481,7 @@ int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol, | |||
341 | dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum); | 481 | dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum); |
342 | 482 | ||
343 | down_read(&ubi->fm_eba_sem); | 483 | down_read(&ubi->fm_eba_sem); |
344 | vol->eba_tbl[lnum] = UBI_LEB_UNMAPPED; | 484 | vol->eba_tbl->entries[lnum].pnum = UBI_LEB_UNMAPPED; |
345 | up_read(&ubi->fm_eba_sem); | 485 | up_read(&ubi->fm_eba_sem); |
346 | err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 0); | 486 | err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 0); |
347 | 487 | ||
@@ -373,6 +513,7 @@ int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, | |||
373 | void *buf, int offset, int len, int check) | 513 | void *buf, int offset, int len, int check) |
374 | { | 514 | { |
375 | int err, pnum, scrub = 0, vol_id = vol->vol_id; | 515 | int err, pnum, scrub = 0, vol_id = vol->vol_id; |
516 | struct ubi_vid_io_buf *vidb; | ||
376 | struct ubi_vid_hdr *vid_hdr; | 517 | struct ubi_vid_hdr *vid_hdr; |
377 | uint32_t uninitialized_var(crc); | 518 | uint32_t uninitialized_var(crc); |
378 | 519 | ||
@@ -380,7 +521,7 @@ int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, | |||
380 | if (err) | 521 | if (err) |
381 | return err; | 522 | return err; |
382 | 523 | ||
383 | pnum = vol->eba_tbl[lnum]; | 524 | pnum = vol->eba_tbl->entries[lnum].pnum; |
384 | if (pnum < 0) { | 525 | if (pnum < 0) { |
385 | /* | 526 | /* |
386 | * The logical eraseblock is not mapped, fill the whole buffer | 527 | * The logical eraseblock is not mapped, fill the whole buffer |
@@ -403,13 +544,15 @@ int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, | |||
403 | 544 | ||
404 | retry: | 545 | retry: |
405 | if (check) { | 546 | if (check) { |
406 | vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); | 547 | vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS); |
407 | if (!vid_hdr) { | 548 | if (!vidb) { |
408 | err = -ENOMEM; | 549 | err = -ENOMEM; |
409 | goto out_unlock; | 550 | goto out_unlock; |
410 | } | 551 | } |
411 | 552 | ||
412 | err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1); | 553 | vid_hdr = ubi_get_vid_hdr(vidb); |
554 | |||
555 | err = ubi_io_read_vid_hdr(ubi, pnum, vidb, 1); | ||
413 | if (err && err != UBI_IO_BITFLIPS) { | 556 | if (err && err != UBI_IO_BITFLIPS) { |
414 | if (err > 0) { | 557 | if (err > 0) { |
415 | /* | 558 | /* |
@@ -455,7 +598,7 @@ retry: | |||
455 | ubi_assert(len == be32_to_cpu(vid_hdr->data_size)); | 598 | ubi_assert(len == be32_to_cpu(vid_hdr->data_size)); |
456 | 599 | ||
457 | crc = be32_to_cpu(vid_hdr->data_crc); | 600 | crc = be32_to_cpu(vid_hdr->data_crc); |
458 | ubi_free_vid_hdr(ubi, vid_hdr); | 601 | ubi_free_vid_buf(vidb); |
459 | } | 602 | } |
460 | 603 | ||
461 | err = ubi_io_read_data(ubi, buf, pnum, offset, len); | 604 | err = ubi_io_read_data(ubi, buf, pnum, offset, len); |
@@ -492,7 +635,7 @@ retry: | |||
492 | return err; | 635 | return err; |
493 | 636 | ||
494 | out_free: | 637 | out_free: |
495 | ubi_free_vid_hdr(ubi, vid_hdr); | 638 | ubi_free_vid_buf(vidb); |
496 | out_unlock: | 639 | out_unlock: |
497 | leb_read_unlock(ubi, vol_id, lnum); | 640 | leb_read_unlock(ubi, vol_id, lnum); |
498 | return err; | 641 | return err; |
@@ -554,49 +697,47 @@ int ubi_eba_read_leb_sg(struct ubi_device *ubi, struct ubi_volume *vol, | |||
554 | } | 697 | } |
555 | 698 | ||
556 | /** | 699 | /** |
557 | * recover_peb - recover from write failure. | 700 | * try_recover_peb - try to recover from write failure. |
558 | * @ubi: UBI device description object | 701 | * @vol: volume description object |
559 | * @pnum: the physical eraseblock to recover | 702 | * @pnum: the physical eraseblock to recover |
560 | * @vol_id: volume ID | ||
561 | * @lnum: logical eraseblock number | 703 | * @lnum: logical eraseblock number |
562 | * @buf: data which was not written because of the write failure | 704 | * @buf: data which was not written because of the write failure |
563 | * @offset: offset of the failed write | 705 | * @offset: offset of the failed write |
564 | * @len: how many bytes should have been written | 706 | * @len: how many bytes should have been written |
707 | * @vidb: VID buffer | ||
708 | * @retry: whether the caller should retry in case of failure | ||
565 | * | 709 | * |
566 | * This function is called in case of a write failure and moves all good data | 710 | * This function is called in case of a write failure and moves all good data |
567 | * from the potentially bad physical eraseblock to a good physical eraseblock. | 711 | * from the potentially bad physical eraseblock to a good physical eraseblock. |
568 | * This function also writes the data which was not written due to the failure. | 712 | * This function also writes the data which was not written due to the failure. |
569 | * Returns new physical eraseblock number in case of success, and a negative | 713 | * Returns 0 in case of success, and a negative error code in case of failure. |
570 | * error code in case of failure. | 714 | * In case of failure, the %retry parameter is set to false if this is a fatal |
715 | * error (retrying won't help), and true otherwise. | ||
571 | */ | 716 | */ |
572 | static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum, | 717 | static int try_recover_peb(struct ubi_volume *vol, int pnum, int lnum, |
573 | const void *buf, int offset, int len) | 718 | const void *buf, int offset, int len, |
719 | struct ubi_vid_io_buf *vidb, bool *retry) | ||
574 | { | 720 | { |
575 | int err, idx = vol_id2idx(ubi, vol_id), new_pnum, data_size, tries = 0; | 721 | struct ubi_device *ubi = vol->ubi; |
576 | struct ubi_volume *vol = ubi->volumes[idx]; | ||
577 | struct ubi_vid_hdr *vid_hdr; | 722 | struct ubi_vid_hdr *vid_hdr; |
723 | int new_pnum, err, vol_id = vol->vol_id, data_size; | ||
578 | uint32_t crc; | 724 | uint32_t crc; |
579 | 725 | ||
580 | vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); | 726 | *retry = false; |
581 | if (!vid_hdr) | ||
582 | return -ENOMEM; | ||
583 | 727 | ||
584 | retry: | ||
585 | new_pnum = ubi_wl_get_peb(ubi); | 728 | new_pnum = ubi_wl_get_peb(ubi); |
586 | if (new_pnum < 0) { | 729 | if (new_pnum < 0) { |
587 | ubi_free_vid_hdr(ubi, vid_hdr); | 730 | err = new_pnum; |
588 | up_read(&ubi->fm_eba_sem); | 731 | goto out_put; |
589 | return new_pnum; | ||
590 | } | 732 | } |
591 | 733 | ||
592 | ubi_msg(ubi, "recover PEB %d, move data to PEB %d", | 734 | ubi_msg(ubi, "recover PEB %d, move data to PEB %d", |
593 | pnum, new_pnum); | 735 | pnum, new_pnum); |
594 | 736 | ||
595 | err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1); | 737 | err = ubi_io_read_vid_hdr(ubi, pnum, vidb, 1); |
596 | if (err && err != UBI_IO_BITFLIPS) { | 738 | if (err && err != UBI_IO_BITFLIPS) { |
597 | if (err > 0) | 739 | if (err > 0) |
598 | err = -EIO; | 740 | err = -EIO; |
599 | up_read(&ubi->fm_eba_sem); | ||
600 | goto out_put; | 741 | goto out_put; |
601 | } | 742 | } |
602 | 743 | ||
@@ -608,12 +749,12 @@ retry: | |||
608 | /* Read everything before the area where the write failure happened */ | 749 | /* Read everything before the area where the write failure happened */ |
609 | if (offset > 0) { | 750 | if (offset > 0) { |
610 | err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, offset); | 751 | err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, offset); |
611 | if (err && err != UBI_IO_BITFLIPS) { | 752 | if (err && err != UBI_IO_BITFLIPS) |
612 | up_read(&ubi->fm_eba_sem); | ||
613 | goto out_unlock; | 753 | goto out_unlock; |
614 | } | ||
615 | } | 754 | } |
616 | 755 | ||
756 | *retry = true; | ||
757 | |||
617 | memcpy(ubi->peb_buf + offset, buf, len); | 758 | memcpy(ubi->peb_buf + offset, buf, len); |
618 | 759 | ||
619 | data_size = offset + len; | 760 | data_size = offset + len; |
@@ -622,50 +763,140 @@ retry: | |||
622 | vid_hdr->copy_flag = 1; | 763 | vid_hdr->copy_flag = 1; |
623 | vid_hdr->data_size = cpu_to_be32(data_size); | 764 | vid_hdr->data_size = cpu_to_be32(data_size); |
624 | vid_hdr->data_crc = cpu_to_be32(crc); | 765 | vid_hdr->data_crc = cpu_to_be32(crc); |
625 | err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr); | 766 | err = ubi_io_write_vid_hdr(ubi, new_pnum, vidb); |
626 | if (err) { | 767 | if (err) |
627 | mutex_unlock(&ubi->buf_mutex); | 768 | goto out_unlock; |
628 | up_read(&ubi->fm_eba_sem); | ||
629 | goto write_error; | ||
630 | } | ||
631 | 769 | ||
632 | err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size); | 770 | err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size); |
633 | if (err) { | ||
634 | mutex_unlock(&ubi->buf_mutex); | ||
635 | up_read(&ubi->fm_eba_sem); | ||
636 | goto write_error; | ||
637 | } | ||
638 | 771 | ||
772 | out_unlock: | ||
639 | mutex_unlock(&ubi->buf_mutex); | 773 | mutex_unlock(&ubi->buf_mutex); |
640 | ubi_free_vid_hdr(ubi, vid_hdr); | ||
641 | 774 | ||
642 | vol->eba_tbl[lnum] = new_pnum; | 775 | if (!err) |
776 | vol->eba_tbl->entries[lnum].pnum = new_pnum; | ||
777 | |||
778 | out_put: | ||
643 | up_read(&ubi->fm_eba_sem); | 779 | up_read(&ubi->fm_eba_sem); |
644 | ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1); | ||
645 | 780 | ||
646 | ubi_msg(ubi, "data was successfully recovered"); | 781 | if (!err) { |
647 | return 0; | 782 | ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1); |
783 | ubi_msg(ubi, "data was successfully recovered"); | ||
784 | } else if (new_pnum >= 0) { | ||
785 | /* | ||
786 | * Bad luck? This physical eraseblock is bad too? Crud. Let's | ||
787 | * try to get another one. | ||
788 | */ | ||
789 | ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1); | ||
790 | ubi_warn(ubi, "failed to write to PEB %d", new_pnum); | ||
791 | } | ||
648 | 792 | ||
649 | out_unlock: | ||
650 | mutex_unlock(&ubi->buf_mutex); | ||
651 | out_put: | ||
652 | ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1); | ||
653 | ubi_free_vid_hdr(ubi, vid_hdr); | ||
654 | return err; | 793 | return err; |
794 | } | ||
655 | 795 | ||
656 | write_error: | 796 | /** |
657 | /* | 797 | * recover_peb - recover from write failure. |
658 | * Bad luck? This physical eraseblock is bad too? Crud. Let's try to | 798 | * @ubi: UBI device description object |
659 | * get another one. | 799 | * @pnum: the physical eraseblock to recover |
660 | */ | 800 | * @vol_id: volume ID |
661 | ubi_warn(ubi, "failed to write to PEB %d", new_pnum); | 801 | * @lnum: logical eraseblock number |
662 | ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1); | 802 | * @buf: data which was not written because of the write failure |
663 | if (++tries > UBI_IO_RETRIES) { | 803 | * @offset: offset of the failed write |
664 | ubi_free_vid_hdr(ubi, vid_hdr); | 804 | * @len: how many bytes should have been written |
665 | return err; | 805 | * |
806 | * This function is called in case of a write failure and moves all good data | ||
807 | * from the potentially bad physical eraseblock to a good physical eraseblock. | ||
808 | * This function also writes the data which was not written due to the failure. | ||
809 | * Returns 0 in case of success, and a negative error code in case of failure. | ||
810 | * This function tries %UBI_IO_RETRIES before giving up. | ||
811 | */ | ||
812 | static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum, | ||
813 | const void *buf, int offset, int len) | ||
814 | { | ||
815 | int err, idx = vol_id2idx(ubi, vol_id), tries; | ||
816 | struct ubi_volume *vol = ubi->volumes[idx]; | ||
817 | struct ubi_vid_io_buf *vidb; | ||
818 | |||
819 | vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS); | ||
820 | if (!vidb) | ||
821 | return -ENOMEM; | ||
822 | |||
823 | for (tries = 0; tries <= UBI_IO_RETRIES; tries++) { | ||
824 | bool retry; | ||
825 | |||
826 | err = try_recover_peb(vol, pnum, lnum, buf, offset, len, vidb, | ||
827 | &retry); | ||
828 | if (!err || !retry) | ||
829 | break; | ||
830 | |||
831 | ubi_msg(ubi, "try again"); | ||
666 | } | 832 | } |
667 | ubi_msg(ubi, "try again"); | 833 | |
668 | goto retry; | 834 | ubi_free_vid_buf(vidb); |
835 | |||
836 | return err; | ||
837 | } | ||
838 | |||
839 | /** | ||
840 | * try_write_vid_and_data - try to write VID header and data to a new PEB. | ||
841 | * @vol: volume description object | ||
842 | * @lnum: logical eraseblock number | ||
843 | * @vidb: the VID buffer to write | ||
844 | * @buf: buffer containing the data | ||
845 | * @offset: where to start writing data | ||
846 | * @len: how many bytes should be written | ||
847 | * | ||
848 | * This function tries to write VID header and data belonging to logical | ||
849 | * eraseblock @lnum of volume @vol to a new physical eraseblock. Returns zero | ||
850 | * in case of success and a negative error code in case of failure. | ||
851 | * In case of error, it is possible that something was still written to the | ||
852 | * flash media, but may be some garbage. | ||
853 | */ | ||
854 | static int try_write_vid_and_data(struct ubi_volume *vol, int lnum, | ||
855 | struct ubi_vid_io_buf *vidb, const void *buf, | ||
856 | int offset, int len) | ||
857 | { | ||
858 | struct ubi_device *ubi = vol->ubi; | ||
859 | int pnum, opnum, err, vol_id = vol->vol_id; | ||
860 | |||
861 | pnum = ubi_wl_get_peb(ubi); | ||
862 | if (pnum < 0) { | ||
863 | err = pnum; | ||
864 | goto out_put; | ||
865 | } | ||
866 | |||
867 | opnum = vol->eba_tbl->entries[lnum].pnum; | ||
868 | |||
869 | dbg_eba("write VID hdr and %d bytes at offset %d of LEB %d:%d, PEB %d", | ||
870 | len, offset, vol_id, lnum, pnum); | ||
871 | |||
872 | err = ubi_io_write_vid_hdr(ubi, pnum, vidb); | ||
873 | if (err) { | ||
874 | ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d", | ||
875 | vol_id, lnum, pnum); | ||
876 | goto out_put; | ||
877 | } | ||
878 | |||
879 | if (len) { | ||
880 | err = ubi_io_write_data(ubi, buf, pnum, offset, len); | ||
881 | if (err) { | ||
882 | ubi_warn(ubi, | ||
883 | "failed to write %d bytes at offset %d of LEB %d:%d, PEB %d", | ||
884 | len, offset, vol_id, lnum, pnum); | ||
885 | goto out_put; | ||
886 | } | ||
887 | } | ||
888 | |||
889 | vol->eba_tbl->entries[lnum].pnum = pnum; | ||
890 | |||
891 | out_put: | ||
892 | up_read(&ubi->fm_eba_sem); | ||
893 | |||
894 | if (err && pnum >= 0) | ||
895 | err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1); | ||
896 | else if (!err && opnum >= 0) | ||
897 | err = ubi_wl_put_peb(ubi, vol_id, lnum, opnum, 0); | ||
898 | |||
899 | return err; | ||
669 | } | 900 | } |
670 | 901 | ||
671 | /** | 902 | /** |
@@ -681,11 +912,13 @@ write_error: | |||
681 | * @vol. Returns zero in case of success and a negative error code in case | 912 | * @vol. Returns zero in case of success and a negative error code in case |
682 | * of failure. In case of error, it is possible that something was still | 913 | * of failure. In case of error, it is possible that something was still |
683 | * written to the flash media, but may be some garbage. | 914 | * written to the flash media, but may be some garbage. |
915 | * This function retries %UBI_IO_RETRIES times before giving up. | ||
684 | */ | 916 | */ |
685 | int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, | 917 | int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, |
686 | const void *buf, int offset, int len) | 918 | const void *buf, int offset, int len) |
687 | { | 919 | { |
688 | int err, pnum, tries = 0, vol_id = vol->vol_id; | 920 | int err, pnum, tries, vol_id = vol->vol_id; |
921 | struct ubi_vid_io_buf *vidb; | ||
689 | struct ubi_vid_hdr *vid_hdr; | 922 | struct ubi_vid_hdr *vid_hdr; |
690 | 923 | ||
691 | if (ubi->ro_mode) | 924 | if (ubi->ro_mode) |
@@ -695,7 +928,7 @@ int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, | |||
695 | if (err) | 928 | if (err) |
696 | return err; | 929 | return err; |
697 | 930 | ||
698 | pnum = vol->eba_tbl[lnum]; | 931 | pnum = vol->eba_tbl->entries[lnum].pnum; |
699 | if (pnum >= 0) { | 932 | if (pnum >= 0) { |
700 | dbg_eba("write %d bytes at offset %d of LEB %d:%d, PEB %d", | 933 | dbg_eba("write %d bytes at offset %d of LEB %d:%d, PEB %d", |
701 | len, offset, vol_id, lnum, pnum); | 934 | len, offset, vol_id, lnum, pnum); |
@@ -706,23 +939,23 @@ int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, | |||
706 | if (err == -EIO && ubi->bad_allowed) | 939 | if (err == -EIO && ubi->bad_allowed) |
707 | err = recover_peb(ubi, pnum, vol_id, lnum, buf, | 940 | err = recover_peb(ubi, pnum, vol_id, lnum, buf, |
708 | offset, len); | 941 | offset, len); |
709 | if (err) | ||
710 | ubi_ro_mode(ubi); | ||
711 | } | 942 | } |
712 | leb_write_unlock(ubi, vol_id, lnum); | 943 | |
713 | return err; | 944 | goto out; |
714 | } | 945 | } |
715 | 946 | ||
716 | /* | 947 | /* |
717 | * The logical eraseblock is not mapped. We have to get a free physical | 948 | * The logical eraseblock is not mapped. We have to get a free physical |
718 | * eraseblock and write the volume identifier header there first. | 949 | * eraseblock and write the volume identifier header there first. |
719 | */ | 950 | */ |
720 | vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); | 951 | vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS); |
721 | if (!vid_hdr) { | 952 | if (!vidb) { |
722 | leb_write_unlock(ubi, vol_id, lnum); | 953 | leb_write_unlock(ubi, vol_id, lnum); |
723 | return -ENOMEM; | 954 | return -ENOMEM; |
724 | } | 955 | } |
725 | 956 | ||
957 | vid_hdr = ubi_get_vid_hdr(vidb); | ||
958 | |||
726 | vid_hdr->vol_type = UBI_VID_DYNAMIC; | 959 | vid_hdr->vol_type = UBI_VID_DYNAMIC; |
727 | vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); | 960 | vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); |
728 | vid_hdr->vol_id = cpu_to_be32(vol_id); | 961 | vid_hdr->vol_id = cpu_to_be32(vol_id); |
@@ -730,67 +963,30 @@ int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, | |||
730 | vid_hdr->compat = ubi_get_compat(ubi, vol_id); | 963 | vid_hdr->compat = ubi_get_compat(ubi, vol_id); |
731 | vid_hdr->data_pad = cpu_to_be32(vol->data_pad); | 964 | vid_hdr->data_pad = cpu_to_be32(vol->data_pad); |
732 | 965 | ||
733 | retry: | 966 | for (tries = 0; tries <= UBI_IO_RETRIES; tries++) { |
734 | pnum = ubi_wl_get_peb(ubi); | 967 | err = try_write_vid_and_data(vol, lnum, vidb, buf, offset, len); |
735 | if (pnum < 0) { | 968 | if (err != -EIO || !ubi->bad_allowed) |
736 | ubi_free_vid_hdr(ubi, vid_hdr); | 969 | break; |
737 | leb_write_unlock(ubi, vol_id, lnum); | ||
738 | up_read(&ubi->fm_eba_sem); | ||
739 | return pnum; | ||
740 | } | ||
741 | |||
742 | dbg_eba("write VID hdr and %d bytes at offset %d of LEB %d:%d, PEB %d", | ||
743 | len, offset, vol_id, lnum, pnum); | ||
744 | |||
745 | err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr); | ||
746 | if (err) { | ||
747 | ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d", | ||
748 | vol_id, lnum, pnum); | ||
749 | up_read(&ubi->fm_eba_sem); | ||
750 | goto write_error; | ||
751 | } | ||
752 | 970 | ||
753 | if (len) { | 971 | /* |
754 | err = ubi_io_write_data(ubi, buf, pnum, offset, len); | 972 | * Fortunately, this is the first write operation to this |
755 | if (err) { | 973 | * physical eraseblock, so just put it and request a new one. |
756 | ubi_warn(ubi, "failed to write %d bytes at offset %d of LEB %d:%d, PEB %d", | 974 | * We assume that if this physical eraseblock went bad, the |
757 | len, offset, vol_id, lnum, pnum); | 975 | * erase code will handle that. |
758 | up_read(&ubi->fm_eba_sem); | 976 | */ |
759 | goto write_error; | 977 | vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); |
760 | } | 978 | ubi_msg(ubi, "try another PEB"); |
761 | } | 979 | } |
762 | 980 | ||
763 | vol->eba_tbl[lnum] = pnum; | 981 | ubi_free_vid_buf(vidb); |
764 | up_read(&ubi->fm_eba_sem); | ||
765 | |||
766 | leb_write_unlock(ubi, vol_id, lnum); | ||
767 | ubi_free_vid_hdr(ubi, vid_hdr); | ||
768 | return 0; | ||
769 | 982 | ||
770 | write_error: | 983 | out: |
771 | if (err != -EIO || !ubi->bad_allowed) { | 984 | if (err) |
772 | ubi_ro_mode(ubi); | 985 | ubi_ro_mode(ubi); |
773 | leb_write_unlock(ubi, vol_id, lnum); | ||
774 | ubi_free_vid_hdr(ubi, vid_hdr); | ||
775 | return err; | ||
776 | } | ||
777 | 986 | ||
778 | /* | 987 | leb_write_unlock(ubi, vol_id, lnum); |
779 | * Fortunately, this is the first write operation to this physical | ||
780 | * eraseblock, so just put it and request a new one. We assume that if | ||
781 | * this physical eraseblock went bad, the erase code will handle that. | ||
782 | */ | ||
783 | err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1); | ||
784 | if (err || ++tries > UBI_IO_RETRIES) { | ||
785 | ubi_ro_mode(ubi); | ||
786 | leb_write_unlock(ubi, vol_id, lnum); | ||
787 | ubi_free_vid_hdr(ubi, vid_hdr); | ||
788 | return err; | ||
789 | } | ||
790 | 988 | ||
791 | vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); | 989 | return err; |
792 | ubi_msg(ubi, "try another PEB"); | ||
793 | goto retry; | ||
794 | } | 990 | } |
795 | 991 | ||
796 | /** | 992 | /** |
@@ -818,7 +1014,8 @@ write_error: | |||
818 | int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol, | 1014 | int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol, |
819 | int lnum, const void *buf, int len, int used_ebs) | 1015 | int lnum, const void *buf, int len, int used_ebs) |
820 | { | 1016 | { |
821 | int err, pnum, tries = 0, data_size = len, vol_id = vol->vol_id; | 1017 | int err, tries, data_size = len, vol_id = vol->vol_id; |
1018 | struct ubi_vid_io_buf *vidb; | ||
822 | struct ubi_vid_hdr *vid_hdr; | 1019 | struct ubi_vid_hdr *vid_hdr; |
823 | uint32_t crc; | 1020 | uint32_t crc; |
824 | 1021 | ||
@@ -831,15 +1028,15 @@ int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol, | |||
831 | else | 1028 | else |
832 | ubi_assert(!(len & (ubi->min_io_size - 1))); | 1029 | ubi_assert(!(len & (ubi->min_io_size - 1))); |
833 | 1030 | ||
834 | vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); | 1031 | vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS); |
835 | if (!vid_hdr) | 1032 | if (!vidb) |
836 | return -ENOMEM; | 1033 | return -ENOMEM; |
837 | 1034 | ||
1035 | vid_hdr = ubi_get_vid_hdr(vidb); | ||
1036 | |||
838 | err = leb_write_lock(ubi, vol_id, lnum); | 1037 | err = leb_write_lock(ubi, vol_id, lnum); |
839 | if (err) { | 1038 | if (err) |
840 | ubi_free_vid_hdr(ubi, vid_hdr); | 1039 | goto out; |
841 | return err; | ||
842 | } | ||
843 | 1040 | ||
844 | vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); | 1041 | vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); |
845 | vid_hdr->vol_id = cpu_to_be32(vol_id); | 1042 | vid_hdr->vol_id = cpu_to_be32(vol_id); |
@@ -853,66 +1050,26 @@ int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol, | |||
853 | vid_hdr->used_ebs = cpu_to_be32(used_ebs); | 1050 | vid_hdr->used_ebs = cpu_to_be32(used_ebs); |
854 | vid_hdr->data_crc = cpu_to_be32(crc); | 1051 | vid_hdr->data_crc = cpu_to_be32(crc); |
855 | 1052 | ||
856 | retry: | 1053 | ubi_assert(vol->eba_tbl->entries[lnum].pnum < 0); |
857 | pnum = ubi_wl_get_peb(ubi); | ||
858 | if (pnum < 0) { | ||
859 | ubi_free_vid_hdr(ubi, vid_hdr); | ||
860 | leb_write_unlock(ubi, vol_id, lnum); | ||
861 | up_read(&ubi->fm_eba_sem); | ||
862 | return pnum; | ||
863 | } | ||
864 | 1054 | ||
865 | dbg_eba("write VID hdr and %d bytes at LEB %d:%d, PEB %d, used_ebs %d", | 1055 | for (tries = 0; tries <= UBI_IO_RETRIES; tries++) { |
866 | len, vol_id, lnum, pnum, used_ebs); | 1056 | err = try_write_vid_and_data(vol, lnum, vidb, buf, 0, len); |
1057 | if (err != -EIO || !ubi->bad_allowed) | ||
1058 | break; | ||
867 | 1059 | ||
868 | err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr); | 1060 | vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); |
869 | if (err) { | 1061 | ubi_msg(ubi, "try another PEB"); |
870 | ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d", | ||
871 | vol_id, lnum, pnum); | ||
872 | up_read(&ubi->fm_eba_sem); | ||
873 | goto write_error; | ||
874 | } | 1062 | } |
875 | 1063 | ||
876 | err = ubi_io_write_data(ubi, buf, pnum, 0, len); | 1064 | if (err) |
877 | if (err) { | 1065 | ubi_ro_mode(ubi); |
878 | ubi_warn(ubi, "failed to write %d bytes of data to PEB %d", | ||
879 | len, pnum); | ||
880 | up_read(&ubi->fm_eba_sem); | ||
881 | goto write_error; | ||
882 | } | ||
883 | |||
884 | ubi_assert(vol->eba_tbl[lnum] < 0); | ||
885 | vol->eba_tbl[lnum] = pnum; | ||
886 | up_read(&ubi->fm_eba_sem); | ||
887 | 1066 | ||
888 | leb_write_unlock(ubi, vol_id, lnum); | 1067 | leb_write_unlock(ubi, vol_id, lnum); |
889 | ubi_free_vid_hdr(ubi, vid_hdr); | ||
890 | return 0; | ||
891 | |||
892 | write_error: | ||
893 | if (err != -EIO || !ubi->bad_allowed) { | ||
894 | /* | ||
895 | * This flash device does not admit of bad eraseblocks or | ||
896 | * something nasty and unexpected happened. Switch to read-only | ||
897 | * mode just in case. | ||
898 | */ | ||
899 | ubi_ro_mode(ubi); | ||
900 | leb_write_unlock(ubi, vol_id, lnum); | ||
901 | ubi_free_vid_hdr(ubi, vid_hdr); | ||
902 | return err; | ||
903 | } | ||
904 | 1068 | ||
905 | err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1); | 1069 | out: |
906 | if (err || ++tries > UBI_IO_RETRIES) { | 1070 | ubi_free_vid_buf(vidb); |
907 | ubi_ro_mode(ubi); | ||
908 | leb_write_unlock(ubi, vol_id, lnum); | ||
909 | ubi_free_vid_hdr(ubi, vid_hdr); | ||
910 | return err; | ||
911 | } | ||
912 | 1071 | ||
913 | vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); | 1072 | return err; |
914 | ubi_msg(ubi, "try another PEB"); | ||
915 | goto retry; | ||
916 | } | 1073 | } |
917 | 1074 | ||
918 | /* | 1075 | /* |
@@ -935,7 +1092,8 @@ write_error: | |||
935 | int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, | 1092 | int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, |
936 | int lnum, const void *buf, int len) | 1093 | int lnum, const void *buf, int len) |
937 | { | 1094 | { |
938 | int err, pnum, old_pnum, tries = 0, vol_id = vol->vol_id; | 1095 | int err, tries, vol_id = vol->vol_id; |
1096 | struct ubi_vid_io_buf *vidb; | ||
939 | struct ubi_vid_hdr *vid_hdr; | 1097 | struct ubi_vid_hdr *vid_hdr; |
940 | uint32_t crc; | 1098 | uint32_t crc; |
941 | 1099 | ||
@@ -953,10 +1111,12 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, | |||
953 | return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0); | 1111 | return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0); |
954 | } | 1112 | } |
955 | 1113 | ||
956 | vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); | 1114 | vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS); |
957 | if (!vid_hdr) | 1115 | if (!vidb) |
958 | return -ENOMEM; | 1116 | return -ENOMEM; |
959 | 1117 | ||
1118 | vid_hdr = ubi_get_vid_hdr(vidb); | ||
1119 | |||
960 | mutex_lock(&ubi->alc_mutex); | 1120 | mutex_lock(&ubi->alc_mutex); |
961 | err = leb_write_lock(ubi, vol_id, lnum); | 1121 | err = leb_write_lock(ubi, vol_id, lnum); |
962 | if (err) | 1122 | if (err) |
@@ -974,70 +1134,31 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, | |||
974 | vid_hdr->copy_flag = 1; | 1134 | vid_hdr->copy_flag = 1; |
975 | vid_hdr->data_crc = cpu_to_be32(crc); | 1135 | vid_hdr->data_crc = cpu_to_be32(crc); |
976 | 1136 | ||
977 | retry: | 1137 | dbg_eba("change LEB %d:%d", vol_id, lnum); |
978 | pnum = ubi_wl_get_peb(ubi); | ||
979 | if (pnum < 0) { | ||
980 | err = pnum; | ||
981 | up_read(&ubi->fm_eba_sem); | ||
982 | goto out_leb_unlock; | ||
983 | } | ||
984 | |||
985 | dbg_eba("change LEB %d:%d, PEB %d, write VID hdr to PEB %d", | ||
986 | vol_id, lnum, vol->eba_tbl[lnum], pnum); | ||
987 | 1138 | ||
988 | err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr); | 1139 | for (tries = 0; tries <= UBI_IO_RETRIES; tries++) { |
989 | if (err) { | 1140 | err = try_write_vid_and_data(vol, lnum, vidb, buf, 0, len); |
990 | ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d", | 1141 | if (err != -EIO || !ubi->bad_allowed) |
991 | vol_id, lnum, pnum); | 1142 | break; |
992 | up_read(&ubi->fm_eba_sem); | ||
993 | goto write_error; | ||
994 | } | ||
995 | 1143 | ||
996 | err = ubi_io_write_data(ubi, buf, pnum, 0, len); | 1144 | vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); |
997 | if (err) { | 1145 | ubi_msg(ubi, "try another PEB"); |
998 | ubi_warn(ubi, "failed to write %d bytes of data to PEB %d", | ||
999 | len, pnum); | ||
1000 | up_read(&ubi->fm_eba_sem); | ||
1001 | goto write_error; | ||
1002 | } | 1146 | } |
1003 | 1147 | ||
1004 | old_pnum = vol->eba_tbl[lnum]; | 1148 | /* |
1005 | vol->eba_tbl[lnum] = pnum; | 1149 | * This flash device does not admit of bad eraseblocks or |
1006 | up_read(&ubi->fm_eba_sem); | 1150 | * something nasty and unexpected happened. Switch to read-only |
1007 | 1151 | * mode just in case. | |
1008 | if (old_pnum >= 0) { | 1152 | */ |
1009 | err = ubi_wl_put_peb(ubi, vol_id, lnum, old_pnum, 0); | 1153 | if (err) |
1010 | if (err) | 1154 | ubi_ro_mode(ubi); |
1011 | goto out_leb_unlock; | ||
1012 | } | ||
1013 | 1155 | ||
1014 | out_leb_unlock: | ||
1015 | leb_write_unlock(ubi, vol_id, lnum); | 1156 | leb_write_unlock(ubi, vol_id, lnum); |
1157 | |||
1016 | out_mutex: | 1158 | out_mutex: |
1017 | mutex_unlock(&ubi->alc_mutex); | 1159 | mutex_unlock(&ubi->alc_mutex); |
1018 | ubi_free_vid_hdr(ubi, vid_hdr); | 1160 | ubi_free_vid_buf(vidb); |
1019 | return err; | 1161 | return err; |
1020 | |||
1021 | write_error: | ||
1022 | if (err != -EIO || !ubi->bad_allowed) { | ||
1023 | /* | ||
1024 | * This flash device does not admit of bad eraseblocks or | ||
1025 | * something nasty and unexpected happened. Switch to read-only | ||
1026 | * mode just in case. | ||
1027 | */ | ||
1028 | ubi_ro_mode(ubi); | ||
1029 | goto out_leb_unlock; | ||
1030 | } | ||
1031 | |||
1032 | err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1); | ||
1033 | if (err || ++tries > UBI_IO_RETRIES) { | ||
1034 | ubi_ro_mode(ubi); | ||
1035 | goto out_leb_unlock; | ||
1036 | } | ||
1037 | |||
1038 | vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); | ||
1039 | ubi_msg(ubi, "try another PEB"); | ||
1040 | goto retry; | ||
1041 | } | 1162 | } |
1042 | 1163 | ||
1043 | /** | 1164 | /** |
@@ -1082,12 +1203,15 @@ static int is_error_sane(int err) | |||
1082 | * o a negative error code in case of failure. | 1203 | * o a negative error code in case of failure. |
1083 | */ | 1204 | */ |
1084 | int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, | 1205 | int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, |
1085 | struct ubi_vid_hdr *vid_hdr) | 1206 | struct ubi_vid_io_buf *vidb) |
1086 | { | 1207 | { |
1087 | int err, vol_id, lnum, data_size, aldata_size, idx; | 1208 | int err, vol_id, lnum, data_size, aldata_size, idx; |
1209 | struct ubi_vid_hdr *vid_hdr = ubi_get_vid_hdr(vidb); | ||
1088 | struct ubi_volume *vol; | 1210 | struct ubi_volume *vol; |
1089 | uint32_t crc; | 1211 | uint32_t crc; |
1090 | 1212 | ||
1213 | ubi_assert(rwsem_is_locked(&ubi->fm_eba_sem)); | ||
1214 | |||
1091 | vol_id = be32_to_cpu(vid_hdr->vol_id); | 1215 | vol_id = be32_to_cpu(vid_hdr->vol_id); |
1092 | lnum = be32_to_cpu(vid_hdr->lnum); | 1216 | lnum = be32_to_cpu(vid_hdr->lnum); |
1093 | 1217 | ||
@@ -1142,9 +1266,9 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, | |||
1142 | * probably waiting on @ubi->move_mutex. No need to continue the work, | 1266 | * probably waiting on @ubi->move_mutex. No need to continue the work, |
1143 | * cancel it. | 1267 | * cancel it. |
1144 | */ | 1268 | */ |
1145 | if (vol->eba_tbl[lnum] != from) { | 1269 | if (vol->eba_tbl->entries[lnum].pnum != from) { |
1146 | dbg_wl("LEB %d:%d is no longer mapped to PEB %d, mapped to PEB %d, cancel", | 1270 | dbg_wl("LEB %d:%d is no longer mapped to PEB %d, mapped to PEB %d, cancel", |
1147 | vol_id, lnum, from, vol->eba_tbl[lnum]); | 1271 | vol_id, lnum, from, vol->eba_tbl->entries[lnum].pnum); |
1148 | err = MOVE_CANCEL_RACE; | 1272 | err = MOVE_CANCEL_RACE; |
1149 | goto out_unlock_leb; | 1273 | goto out_unlock_leb; |
1150 | } | 1274 | } |
@@ -1196,7 +1320,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, | |||
1196 | } | 1320 | } |
1197 | vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); | 1321 | vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); |
1198 | 1322 | ||
1199 | err = ubi_io_write_vid_hdr(ubi, to, vid_hdr); | 1323 | err = ubi_io_write_vid_hdr(ubi, to, vidb); |
1200 | if (err) { | 1324 | if (err) { |
1201 | if (err == -EIO) | 1325 | if (err == -EIO) |
1202 | err = MOVE_TARGET_WR_ERR; | 1326 | err = MOVE_TARGET_WR_ERR; |
@@ -1206,7 +1330,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, | |||
1206 | cond_resched(); | 1330 | cond_resched(); |
1207 | 1331 | ||
1208 | /* Read the VID header back and check if it was written correctly */ | 1332 | /* Read the VID header back and check if it was written correctly */ |
1209 | err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1); | 1333 | err = ubi_io_read_vid_hdr(ubi, to, vidb, 1); |
1210 | if (err) { | 1334 | if (err) { |
1211 | if (err != UBI_IO_BITFLIPS) { | 1335 | if (err != UBI_IO_BITFLIPS) { |
1212 | ubi_warn(ubi, "error %d while reading VID header back from PEB %d", | 1336 | ubi_warn(ubi, "error %d while reading VID header back from PEB %d", |
@@ -1229,10 +1353,8 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, | |||
1229 | cond_resched(); | 1353 | cond_resched(); |
1230 | } | 1354 | } |
1231 | 1355 | ||
1232 | ubi_assert(vol->eba_tbl[lnum] == from); | 1356 | ubi_assert(vol->eba_tbl->entries[lnum].pnum == from); |
1233 | down_read(&ubi->fm_eba_sem); | 1357 | vol->eba_tbl->entries[lnum].pnum = to; |
1234 | vol->eba_tbl[lnum] = to; | ||
1235 | up_read(&ubi->fm_eba_sem); | ||
1236 | 1358 | ||
1237 | out_unlock_buf: | 1359 | out_unlock_buf: |
1238 | mutex_unlock(&ubi->buf_mutex); | 1360 | mutex_unlock(&ubi->buf_mutex); |
@@ -1388,7 +1510,7 @@ out_free: | |||
1388 | */ | 1510 | */ |
1389 | int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai) | 1511 | int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai) |
1390 | { | 1512 | { |
1391 | int i, j, err, num_volumes; | 1513 | int i, err, num_volumes; |
1392 | struct ubi_ainf_volume *av; | 1514 | struct ubi_ainf_volume *av; |
1393 | struct ubi_volume *vol; | 1515 | struct ubi_volume *vol; |
1394 | struct ubi_ainf_peb *aeb; | 1516 | struct ubi_ainf_peb *aeb; |
@@ -1404,35 +1526,39 @@ int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai) | |||
1404 | num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT; | 1526 | num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT; |
1405 | 1527 | ||
1406 | for (i = 0; i < num_volumes; i++) { | 1528 | for (i = 0; i < num_volumes; i++) { |
1529 | struct ubi_eba_table *tbl; | ||
1530 | |||
1407 | vol = ubi->volumes[i]; | 1531 | vol = ubi->volumes[i]; |
1408 | if (!vol) | 1532 | if (!vol) |
1409 | continue; | 1533 | continue; |
1410 | 1534 | ||
1411 | cond_resched(); | 1535 | cond_resched(); |
1412 | 1536 | ||
1413 | vol->eba_tbl = kmalloc(vol->reserved_pebs * sizeof(int), | 1537 | tbl = ubi_eba_create_table(vol, vol->reserved_pebs); |
1414 | GFP_KERNEL); | 1538 | if (IS_ERR(tbl)) { |
1415 | if (!vol->eba_tbl) { | 1539 | err = PTR_ERR(tbl); |
1416 | err = -ENOMEM; | ||
1417 | goto out_free; | 1540 | goto out_free; |
1418 | } | 1541 | } |
1419 | 1542 | ||
1420 | for (j = 0; j < vol->reserved_pebs; j++) | 1543 | ubi_eba_replace_table(vol, tbl); |
1421 | vol->eba_tbl[j] = UBI_LEB_UNMAPPED; | ||
1422 | 1544 | ||
1423 | av = ubi_find_av(ai, idx2vol_id(ubi, i)); | 1545 | av = ubi_find_av(ai, idx2vol_id(ubi, i)); |
1424 | if (!av) | 1546 | if (!av) |
1425 | continue; | 1547 | continue; |
1426 | 1548 | ||
1427 | ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) { | 1549 | ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) { |
1428 | if (aeb->lnum >= vol->reserved_pebs) | 1550 | if (aeb->lnum >= vol->reserved_pebs) { |
1429 | /* | 1551 | /* |
1430 | * This may happen in case of an unclean reboot | 1552 | * This may happen in case of an unclean reboot |
1431 | * during re-size. | 1553 | * during re-size. |
1432 | */ | 1554 | */ |
1433 | ubi_move_aeb_to_list(av, aeb, &ai->erase); | 1555 | ubi_move_aeb_to_list(av, aeb, &ai->erase); |
1434 | else | 1556 | } else { |
1435 | vol->eba_tbl[aeb->lnum] = aeb->pnum; | 1557 | struct ubi_eba_entry *entry; |
1558 | |||
1559 | entry = &vol->eba_tbl->entries[aeb->lnum]; | ||
1560 | entry->pnum = aeb->pnum; | ||
1561 | } | ||
1436 | } | 1562 | } |
1437 | } | 1563 | } |
1438 | 1564 | ||
@@ -1469,8 +1595,7 @@ out_free: | |||
1469 | for (i = 0; i < num_volumes; i++) { | 1595 | for (i = 0; i < num_volumes; i++) { |
1470 | if (!ubi->volumes[i]) | 1596 | if (!ubi->volumes[i]) |
1471 | continue; | 1597 | continue; |
1472 | kfree(ubi->volumes[i]->eba_tbl); | 1598 | ubi_eba_replace_table(ubi->volumes[i], NULL); |
1473 | ubi->volumes[i]->eba_tbl = NULL; | ||
1474 | } | 1599 | } |
1475 | return err; | 1600 | return err; |
1476 | } | 1601 | } |
diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c index 30d3999dddba..4f0bd6b4422a 100644 --- a/drivers/mtd/ubi/fastmap-wl.c +++ b/drivers/mtd/ubi/fastmap-wl.c | |||
@@ -262,6 +262,8 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi) | |||
262 | struct ubi_fm_pool *pool = &ubi->fm_wl_pool; | 262 | struct ubi_fm_pool *pool = &ubi->fm_wl_pool; |
263 | int pnum; | 263 | int pnum; |
264 | 264 | ||
265 | ubi_assert(rwsem_is_locked(&ubi->fm_eba_sem)); | ||
266 | |||
265 | if (pool->used == pool->size) { | 267 | if (pool->used == pool->size) { |
266 | /* We cannot update the fastmap here because this | 268 | /* We cannot update the fastmap here because this |
267 | * function is called in atomic context. | 269 | * function is called in atomic context. |
@@ -303,7 +305,7 @@ int ubi_ensure_anchor_pebs(struct ubi_device *ubi) | |||
303 | 305 | ||
304 | wrk->anchor = 1; | 306 | wrk->anchor = 1; |
305 | wrk->func = &wear_leveling_worker; | 307 | wrk->func = &wear_leveling_worker; |
306 | schedule_ubi_work(ubi, wrk); | 308 | __schedule_ubi_work(ubi, wrk); |
307 | return 0; | 309 | return 0; |
308 | } | 310 | } |
309 | 311 | ||
@@ -344,7 +346,7 @@ int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e, | |||
344 | spin_unlock(&ubi->wl_lock); | 346 | spin_unlock(&ubi->wl_lock); |
345 | 347 | ||
346 | vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID; | 348 | vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID; |
347 | return schedule_erase(ubi, e, vol_id, lnum, torture); | 349 | return schedule_erase(ubi, e, vol_id, lnum, torture, true); |
348 | } | 350 | } |
349 | 351 | ||
350 | /** | 352 | /** |
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c index 48eb55f344eb..d6384d965788 100644 --- a/drivers/mtd/ubi/fastmap.c +++ b/drivers/mtd/ubi/fastmap.c | |||
@@ -110,21 +110,23 @@ size_t ubi_calc_fm_size(struct ubi_device *ubi) | |||
110 | * Returns a new struct ubi_vid_hdr on success. | 110 | * Returns a new struct ubi_vid_hdr on success. |
111 | * NULL indicates out of memory. | 111 | * NULL indicates out of memory. |
112 | */ | 112 | */ |
113 | static struct ubi_vid_hdr *new_fm_vhdr(struct ubi_device *ubi, int vol_id) | 113 | static struct ubi_vid_io_buf *new_fm_vbuf(struct ubi_device *ubi, int vol_id) |
114 | { | 114 | { |
115 | struct ubi_vid_hdr *new; | 115 | struct ubi_vid_io_buf *new; |
116 | struct ubi_vid_hdr *vh; | ||
116 | 117 | ||
117 | new = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); | 118 | new = ubi_alloc_vid_buf(ubi, GFP_KERNEL); |
118 | if (!new) | 119 | if (!new) |
119 | goto out; | 120 | goto out; |
120 | 121 | ||
121 | new->vol_type = UBI_VID_DYNAMIC; | 122 | vh = ubi_get_vid_hdr(new); |
122 | new->vol_id = cpu_to_be32(vol_id); | 123 | vh->vol_type = UBI_VID_DYNAMIC; |
124 | vh->vol_id = cpu_to_be32(vol_id); | ||
123 | 125 | ||
124 | /* UBI implementations without fastmap support have to delete the | 126 | /* UBI implementations without fastmap support have to delete the |
125 | * fastmap. | 127 | * fastmap. |
126 | */ | 128 | */ |
127 | new->compat = UBI_COMPAT_DELETE; | 129 | vh->compat = UBI_COMPAT_DELETE; |
128 | 130 | ||
129 | out: | 131 | out: |
130 | return new; | 132 | return new; |
@@ -145,12 +147,10 @@ static int add_aeb(struct ubi_attach_info *ai, struct list_head *list, | |||
145 | { | 147 | { |
146 | struct ubi_ainf_peb *aeb; | 148 | struct ubi_ainf_peb *aeb; |
147 | 149 | ||
148 | aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL); | 150 | aeb = ubi_alloc_aeb(ai, pnum, ec); |
149 | if (!aeb) | 151 | if (!aeb) |
150 | return -ENOMEM; | 152 | return -ENOMEM; |
151 | 153 | ||
152 | aeb->pnum = pnum; | ||
153 | aeb->ec = ec; | ||
154 | aeb->lnum = -1; | 154 | aeb->lnum = -1; |
155 | aeb->scrub = scrub; | 155 | aeb->scrub = scrub; |
156 | aeb->copy_flag = aeb->sqnum = 0; | 156 | aeb->copy_flag = aeb->sqnum = 0; |
@@ -186,40 +186,19 @@ static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id, | |||
186 | int last_eb_bytes) | 186 | int last_eb_bytes) |
187 | { | 187 | { |
188 | struct ubi_ainf_volume *av; | 188 | struct ubi_ainf_volume *av; |
189 | struct rb_node **p = &ai->volumes.rb_node, *parent = NULL; | ||
190 | |||
191 | while (*p) { | ||
192 | parent = *p; | ||
193 | av = rb_entry(parent, struct ubi_ainf_volume, rb); | ||
194 | |||
195 | if (vol_id > av->vol_id) | ||
196 | p = &(*p)->rb_left; | ||
197 | else if (vol_id < av->vol_id) | ||
198 | p = &(*p)->rb_right; | ||
199 | else | ||
200 | return ERR_PTR(-EINVAL); | ||
201 | } | ||
202 | 189 | ||
203 | av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL); | 190 | av = ubi_add_av(ai, vol_id); |
204 | if (!av) | 191 | if (IS_ERR(av)) |
205 | goto out; | 192 | return av; |
206 | 193 | ||
207 | av->highest_lnum = av->leb_count = av->used_ebs = 0; | ||
208 | av->vol_id = vol_id; | ||
209 | av->data_pad = data_pad; | 194 | av->data_pad = data_pad; |
210 | av->last_data_size = last_eb_bytes; | 195 | av->last_data_size = last_eb_bytes; |
211 | av->compat = 0; | 196 | av->compat = 0; |
212 | av->vol_type = vol_type; | 197 | av->vol_type = vol_type; |
213 | av->root = RB_ROOT; | ||
214 | if (av->vol_type == UBI_STATIC_VOLUME) | 198 | if (av->vol_type == UBI_STATIC_VOLUME) |
215 | av->used_ebs = used_ebs; | 199 | av->used_ebs = used_ebs; |
216 | 200 | ||
217 | dbg_bld("found volume (ID %i)", vol_id); | 201 | dbg_bld("found volume (ID %i)", vol_id); |
218 | |||
219 | rb_link_node(&av->rb, parent, p); | ||
220 | rb_insert_color(&av->rb, &ai->volumes); | ||
221 | |||
222 | out: | ||
223 | return av; | 202 | return av; |
224 | } | 203 | } |
225 | 204 | ||
@@ -297,7 +276,7 @@ static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai, | |||
297 | */ | 276 | */ |
298 | if (aeb->pnum == new_aeb->pnum) { | 277 | if (aeb->pnum == new_aeb->pnum) { |
299 | ubi_assert(aeb->lnum == new_aeb->lnum); | 278 | ubi_assert(aeb->lnum == new_aeb->lnum); |
300 | kmem_cache_free(ai->aeb_slab_cache, new_aeb); | 279 | ubi_free_aeb(ai, new_aeb); |
301 | 280 | ||
302 | return 0; | 281 | return 0; |
303 | } | 282 | } |
@@ -308,13 +287,10 @@ static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai, | |||
308 | 287 | ||
309 | /* new_aeb is newer */ | 288 | /* new_aeb is newer */ |
310 | if (cmp_res & 1) { | 289 | if (cmp_res & 1) { |
311 | victim = kmem_cache_alloc(ai->aeb_slab_cache, | 290 | victim = ubi_alloc_aeb(ai, aeb->ec, aeb->pnum); |
312 | GFP_KERNEL); | ||
313 | if (!victim) | 291 | if (!victim) |
314 | return -ENOMEM; | 292 | return -ENOMEM; |
315 | 293 | ||
316 | victim->ec = aeb->ec; | ||
317 | victim->pnum = aeb->pnum; | ||
318 | list_add_tail(&victim->u.list, &ai->erase); | 294 | list_add_tail(&victim->u.list, &ai->erase); |
319 | 295 | ||
320 | if (av->highest_lnum == be32_to_cpu(new_vh->lnum)) | 296 | if (av->highest_lnum == be32_to_cpu(new_vh->lnum)) |
@@ -328,7 +304,8 @@ static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai, | |||
328 | aeb->pnum = new_aeb->pnum; | 304 | aeb->pnum = new_aeb->pnum; |
329 | aeb->copy_flag = new_vh->copy_flag; | 305 | aeb->copy_flag = new_vh->copy_flag; |
330 | aeb->scrub = new_aeb->scrub; | 306 | aeb->scrub = new_aeb->scrub; |
331 | kmem_cache_free(ai->aeb_slab_cache, new_aeb); | 307 | aeb->sqnum = new_aeb->sqnum; |
308 | ubi_free_aeb(ai, new_aeb); | ||
332 | 309 | ||
333 | /* new_aeb is older */ | 310 | /* new_aeb is older */ |
334 | } else { | 311 | } else { |
@@ -370,41 +347,24 @@ static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai, | |||
370 | struct ubi_vid_hdr *new_vh, | 347 | struct ubi_vid_hdr *new_vh, |
371 | struct ubi_ainf_peb *new_aeb) | 348 | struct ubi_ainf_peb *new_aeb) |
372 | { | 349 | { |
373 | struct ubi_ainf_volume *av, *tmp_av = NULL; | 350 | int vol_id = be32_to_cpu(new_vh->vol_id); |
374 | struct rb_node **p = &ai->volumes.rb_node, *parent = NULL; | 351 | struct ubi_ainf_volume *av; |
375 | int found = 0; | ||
376 | 352 | ||
377 | if (be32_to_cpu(new_vh->vol_id) == UBI_FM_SB_VOLUME_ID || | 353 | if (vol_id == UBI_FM_SB_VOLUME_ID || vol_id == UBI_FM_DATA_VOLUME_ID) { |
378 | be32_to_cpu(new_vh->vol_id) == UBI_FM_DATA_VOLUME_ID) { | 354 | ubi_free_aeb(ai, new_aeb); |
379 | kmem_cache_free(ai->aeb_slab_cache, new_aeb); | ||
380 | 355 | ||
381 | return 0; | 356 | return 0; |
382 | } | 357 | } |
383 | 358 | ||
384 | /* Find the volume this SEB belongs to */ | 359 | /* Find the volume this SEB belongs to */ |
385 | while (*p) { | 360 | av = ubi_find_av(ai, vol_id); |
386 | parent = *p; | 361 | if (!av) { |
387 | tmp_av = rb_entry(parent, struct ubi_ainf_volume, rb); | ||
388 | |||
389 | if (be32_to_cpu(new_vh->vol_id) > tmp_av->vol_id) | ||
390 | p = &(*p)->rb_left; | ||
391 | else if (be32_to_cpu(new_vh->vol_id) < tmp_av->vol_id) | ||
392 | p = &(*p)->rb_right; | ||
393 | else { | ||
394 | found = 1; | ||
395 | break; | ||
396 | } | ||
397 | } | ||
398 | |||
399 | if (found) | ||
400 | av = tmp_av; | ||
401 | else { | ||
402 | ubi_err(ubi, "orphaned volume in fastmap pool!"); | 362 | ubi_err(ubi, "orphaned volume in fastmap pool!"); |
403 | kmem_cache_free(ai->aeb_slab_cache, new_aeb); | 363 | ubi_free_aeb(ai, new_aeb); |
404 | return UBI_BAD_FASTMAP; | 364 | return UBI_BAD_FASTMAP; |
405 | } | 365 | } |
406 | 366 | ||
407 | ubi_assert(be32_to_cpu(new_vh->vol_id) == av->vol_id); | 367 | ubi_assert(vol_id == av->vol_id); |
408 | 368 | ||
409 | return update_vol(ubi, ai, av, new_vh, new_aeb); | 369 | return update_vol(ubi, ai, av, new_vh, new_aeb); |
410 | } | 370 | } |
@@ -423,16 +383,12 @@ static void unmap_peb(struct ubi_attach_info *ai, int pnum) | |||
423 | struct rb_node *node, *node2; | 383 | struct rb_node *node, *node2; |
424 | struct ubi_ainf_peb *aeb; | 384 | struct ubi_ainf_peb *aeb; |
425 | 385 | ||
426 | for (node = rb_first(&ai->volumes); node; node = rb_next(node)) { | 386 | ubi_rb_for_each_entry(node, av, &ai->volumes, rb) { |
427 | av = rb_entry(node, struct ubi_ainf_volume, rb); | 387 | ubi_rb_for_each_entry(node2, aeb, &av->root, u.rb) { |
428 | |||
429 | for (node2 = rb_first(&av->root); node2; | ||
430 | node2 = rb_next(node2)) { | ||
431 | aeb = rb_entry(node2, struct ubi_ainf_peb, u.rb); | ||
432 | if (aeb->pnum == pnum) { | 388 | if (aeb->pnum == pnum) { |
433 | rb_erase(&aeb->u.rb, &av->root); | 389 | rb_erase(&aeb->u.rb, &av->root); |
434 | av->leb_count--; | 390 | av->leb_count--; |
435 | kmem_cache_free(ai->aeb_slab_cache, aeb); | 391 | ubi_free_aeb(ai, aeb); |
436 | return; | 392 | return; |
437 | } | 393 | } |
438 | } | 394 | } |
@@ -455,6 +411,7 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai, | |||
455 | __be32 *pebs, int pool_size, unsigned long long *max_sqnum, | 411 | __be32 *pebs, int pool_size, unsigned long long *max_sqnum, |
456 | struct list_head *free) | 412 | struct list_head *free) |
457 | { | 413 | { |
414 | struct ubi_vid_io_buf *vb; | ||
458 | struct ubi_vid_hdr *vh; | 415 | struct ubi_vid_hdr *vh; |
459 | struct ubi_ec_hdr *ech; | 416 | struct ubi_ec_hdr *ech; |
460 | struct ubi_ainf_peb *new_aeb; | 417 | struct ubi_ainf_peb *new_aeb; |
@@ -464,12 +421,14 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai, | |||
464 | if (!ech) | 421 | if (!ech) |
465 | return -ENOMEM; | 422 | return -ENOMEM; |
466 | 423 | ||
467 | vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); | 424 | vb = ubi_alloc_vid_buf(ubi, GFP_KERNEL); |
468 | if (!vh) { | 425 | if (!vb) { |
469 | kfree(ech); | 426 | kfree(ech); |
470 | return -ENOMEM; | 427 | return -ENOMEM; |
471 | } | 428 | } |
472 | 429 | ||
430 | vh = ubi_get_vid_hdr(vb); | ||
431 | |||
473 | dbg_bld("scanning fastmap pool: size = %i", pool_size); | 432 | dbg_bld("scanning fastmap pool: size = %i", pool_size); |
474 | 433 | ||
475 | /* | 434 | /* |
@@ -510,15 +469,16 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai, | |||
510 | goto out; | 469 | goto out; |
511 | } | 470 | } |
512 | 471 | ||
513 | err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0); | 472 | err = ubi_io_read_vid_hdr(ubi, pnum, vb, 0); |
514 | if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) { | 473 | if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) { |
515 | unsigned long long ec = be64_to_cpu(ech->ec); | 474 | unsigned long long ec = be64_to_cpu(ech->ec); |
516 | unmap_peb(ai, pnum); | 475 | unmap_peb(ai, pnum); |
517 | dbg_bld("Adding PEB to free: %i", pnum); | 476 | dbg_bld("Adding PEB to free: %i", pnum); |
477 | |||
518 | if (err == UBI_IO_FF_BITFLIPS) | 478 | if (err == UBI_IO_FF_BITFLIPS) |
519 | add_aeb(ai, free, pnum, ec, 1); | 479 | scrub = 1; |
520 | else | 480 | |
521 | add_aeb(ai, free, pnum, ec, 0); | 481 | add_aeb(ai, free, pnum, ec, scrub); |
522 | continue; | 482 | continue; |
523 | } else if (err == 0 || err == UBI_IO_BITFLIPS) { | 483 | } else if (err == 0 || err == UBI_IO_BITFLIPS) { |
524 | dbg_bld("Found non empty PEB:%i in pool", pnum); | 484 | dbg_bld("Found non empty PEB:%i in pool", pnum); |
@@ -526,15 +486,12 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai, | |||
526 | if (err == UBI_IO_BITFLIPS) | 486 | if (err == UBI_IO_BITFLIPS) |
527 | scrub = 1; | 487 | scrub = 1; |
528 | 488 | ||
529 | new_aeb = kmem_cache_alloc(ai->aeb_slab_cache, | 489 | new_aeb = ubi_alloc_aeb(ai, pnum, be64_to_cpu(ech->ec)); |
530 | GFP_KERNEL); | ||
531 | if (!new_aeb) { | 490 | if (!new_aeb) { |
532 | ret = -ENOMEM; | 491 | ret = -ENOMEM; |
533 | goto out; | 492 | goto out; |
534 | } | 493 | } |
535 | 494 | ||
536 | new_aeb->ec = be64_to_cpu(ech->ec); | ||
537 | new_aeb->pnum = pnum; | ||
538 | new_aeb->lnum = be32_to_cpu(vh->lnum); | 495 | new_aeb->lnum = be32_to_cpu(vh->lnum); |
539 | new_aeb->sqnum = be64_to_cpu(vh->sqnum); | 496 | new_aeb->sqnum = be64_to_cpu(vh->sqnum); |
540 | new_aeb->copy_flag = vh->copy_flag; | 497 | new_aeb->copy_flag = vh->copy_flag; |
@@ -558,7 +515,7 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai, | |||
558 | } | 515 | } |
559 | 516 | ||
560 | out: | 517 | out: |
561 | ubi_free_vid_hdr(ubi, vh); | 518 | ubi_free_vid_buf(vb); |
562 | kfree(ech); | 519 | kfree(ech); |
563 | return ret; | 520 | return ret; |
564 | } | 521 | } |
@@ -841,11 +798,11 @@ fail_bad: | |||
841 | fail: | 798 | fail: |
842 | list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) { | 799 | list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) { |
843 | list_del(&tmp_aeb->u.list); | 800 | list_del(&tmp_aeb->u.list); |
844 | kmem_cache_free(ai->aeb_slab_cache, tmp_aeb); | 801 | ubi_free_aeb(ai, tmp_aeb); |
845 | } | 802 | } |
846 | list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) { | 803 | list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) { |
847 | list_del(&tmp_aeb->u.list); | 804 | list_del(&tmp_aeb->u.list); |
848 | kmem_cache_free(ai->aeb_slab_cache, tmp_aeb); | 805 | ubi_free_aeb(ai, tmp_aeb); |
849 | } | 806 | } |
850 | 807 | ||
851 | return ret; | 808 | return ret; |
@@ -886,6 +843,7 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai, | |||
886 | struct ubi_attach_info *scan_ai) | 843 | struct ubi_attach_info *scan_ai) |
887 | { | 844 | { |
888 | struct ubi_fm_sb *fmsb, *fmsb2; | 845 | struct ubi_fm_sb *fmsb, *fmsb2; |
846 | struct ubi_vid_io_buf *vb; | ||
889 | struct ubi_vid_hdr *vh; | 847 | struct ubi_vid_hdr *vh; |
890 | struct ubi_ec_hdr *ech; | 848 | struct ubi_ec_hdr *ech; |
891 | struct ubi_fastmap_layout *fm; | 849 | struct ubi_fastmap_layout *fm; |
@@ -919,7 +877,7 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai, | |||
919 | goto out; | 877 | goto out; |
920 | } | 878 | } |
921 | 879 | ||
922 | ret = ubi_io_read(ubi, fmsb, fm_anchor, ubi->leb_start, sizeof(*fmsb)); | 880 | ret = ubi_io_read_data(ubi, fmsb, fm_anchor, 0, sizeof(*fmsb)); |
923 | if (ret && ret != UBI_IO_BITFLIPS) | 881 | if (ret && ret != UBI_IO_BITFLIPS) |
924 | goto free_fm_sb; | 882 | goto free_fm_sb; |
925 | else if (ret == UBI_IO_BITFLIPS) | 883 | else if (ret == UBI_IO_BITFLIPS) |
@@ -961,12 +919,14 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai, | |||
961 | goto free_fm_sb; | 919 | goto free_fm_sb; |
962 | } | 920 | } |
963 | 921 | ||
964 | vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); | 922 | vb = ubi_alloc_vid_buf(ubi, GFP_KERNEL); |
965 | if (!vh) { | 923 | if (!vb) { |
966 | ret = -ENOMEM; | 924 | ret = -ENOMEM; |
967 | goto free_hdr; | 925 | goto free_hdr; |
968 | } | 926 | } |
969 | 927 | ||
928 | vh = ubi_get_vid_hdr(vb); | ||
929 | |||
970 | for (i = 0; i < used_blocks; i++) { | 930 | for (i = 0; i < used_blocks; i++) { |
971 | int image_seq; | 931 | int image_seq; |
972 | 932 | ||
@@ -1009,7 +969,7 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai, | |||
1009 | goto free_hdr; | 969 | goto free_hdr; |
1010 | } | 970 | } |
1011 | 971 | ||
1012 | ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0); | 972 | ret = ubi_io_read_vid_hdr(ubi, pnum, vb, 0); |
1013 | if (ret && ret != UBI_IO_BITFLIPS) { | 973 | if (ret && ret != UBI_IO_BITFLIPS) { |
1014 | ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i)", | 974 | ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i)", |
1015 | i, pnum); | 975 | i, pnum); |
@@ -1037,8 +997,8 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai, | |||
1037 | if (sqnum < be64_to_cpu(vh->sqnum)) | 997 | if (sqnum < be64_to_cpu(vh->sqnum)) |
1038 | sqnum = be64_to_cpu(vh->sqnum); | 998 | sqnum = be64_to_cpu(vh->sqnum); |
1039 | 999 | ||
1040 | ret = ubi_io_read(ubi, ubi->fm_buf + (ubi->leb_size * i), pnum, | 1000 | ret = ubi_io_read_data(ubi, ubi->fm_buf + (ubi->leb_size * i), |
1041 | ubi->leb_start, ubi->leb_size); | 1001 | pnum, 0, ubi->leb_size); |
1042 | if (ret && ret != UBI_IO_BITFLIPS) { | 1002 | if (ret && ret != UBI_IO_BITFLIPS) { |
1043 | ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i, " | 1003 | ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i, " |
1044 | "err: %i)", i, pnum, ret); | 1004 | "err: %i)", i, pnum, ret); |
@@ -1099,7 +1059,7 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai, | |||
1099 | ubi->fm_disabled = 0; | 1059 | ubi->fm_disabled = 0; |
1100 | ubi->fast_attach = 1; | 1060 | ubi->fast_attach = 1; |
1101 | 1061 | ||
1102 | ubi_free_vid_hdr(ubi, vh); | 1062 | ubi_free_vid_buf(vb); |
1103 | kfree(ech); | 1063 | kfree(ech); |
1104 | out: | 1064 | out: |
1105 | up_write(&ubi->fm_protect); | 1065 | up_write(&ubi->fm_protect); |
@@ -1108,7 +1068,7 @@ out: | |||
1108 | return ret; | 1068 | return ret; |
1109 | 1069 | ||
1110 | free_hdr: | 1070 | free_hdr: |
1111 | ubi_free_vid_hdr(ubi, vh); | 1071 | ubi_free_vid_buf(vb); |
1112 | kfree(ech); | 1072 | kfree(ech); |
1113 | free_fm_sb: | 1073 | free_fm_sb: |
1114 | kfree(fmsb); | 1074 | kfree(fmsb); |
@@ -1136,6 +1096,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi, | |||
1136 | struct ubi_fm_eba *feba; | 1096 | struct ubi_fm_eba *feba; |
1137 | struct ubi_wl_entry *wl_e; | 1097 | struct ubi_wl_entry *wl_e; |
1138 | struct ubi_volume *vol; | 1098 | struct ubi_volume *vol; |
1099 | struct ubi_vid_io_buf *avbuf, *dvbuf; | ||
1139 | struct ubi_vid_hdr *avhdr, *dvhdr; | 1100 | struct ubi_vid_hdr *avhdr, *dvhdr; |
1140 | struct ubi_work *ubi_wrk; | 1101 | struct ubi_work *ubi_wrk; |
1141 | struct rb_node *tmp_rb; | 1102 | struct rb_node *tmp_rb; |
@@ -1146,18 +1107,21 @@ static int ubi_write_fastmap(struct ubi_device *ubi, | |||
1146 | fm_raw = ubi->fm_buf; | 1107 | fm_raw = ubi->fm_buf; |
1147 | memset(ubi->fm_buf, 0, ubi->fm_size); | 1108 | memset(ubi->fm_buf, 0, ubi->fm_size); |
1148 | 1109 | ||
1149 | avhdr = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID); | 1110 | avbuf = new_fm_vbuf(ubi, UBI_FM_SB_VOLUME_ID); |
1150 | if (!avhdr) { | 1111 | if (!avbuf) { |
1151 | ret = -ENOMEM; | 1112 | ret = -ENOMEM; |
1152 | goto out; | 1113 | goto out; |
1153 | } | 1114 | } |
1154 | 1115 | ||
1155 | dvhdr = new_fm_vhdr(ubi, UBI_FM_DATA_VOLUME_ID); | 1116 | dvbuf = new_fm_vbuf(ubi, UBI_FM_DATA_VOLUME_ID); |
1156 | if (!dvhdr) { | 1117 | if (!dvbuf) { |
1157 | ret = -ENOMEM; | 1118 | ret = -ENOMEM; |
1158 | goto out_kfree; | 1119 | goto out_kfree; |
1159 | } | 1120 | } |
1160 | 1121 | ||
1122 | avhdr = ubi_get_vid_hdr(avbuf); | ||
1123 | dvhdr = ubi_get_vid_hdr(dvbuf); | ||
1124 | |||
1161 | seen_pebs = init_seen(ubi); | 1125 | seen_pebs = init_seen(ubi); |
1162 | if (IS_ERR(seen_pebs)) { | 1126 | if (IS_ERR(seen_pebs)) { |
1163 | ret = PTR_ERR(seen_pebs); | 1127 | ret = PTR_ERR(seen_pebs); |
@@ -1306,8 +1270,12 @@ static int ubi_write_fastmap(struct ubi_device *ubi, | |||
1306 | fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs); | 1270 | fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs); |
1307 | ubi_assert(fm_pos <= ubi->fm_size); | 1271 | ubi_assert(fm_pos <= ubi->fm_size); |
1308 | 1272 | ||
1309 | for (j = 0; j < vol->reserved_pebs; j++) | 1273 | for (j = 0; j < vol->reserved_pebs; j++) { |
1310 | feba->pnum[j] = cpu_to_be32(vol->eba_tbl[j]); | 1274 | struct ubi_eba_leb_desc ldesc; |
1275 | |||
1276 | ubi_eba_get_ldesc(vol, j, &ldesc); | ||
1277 | feba->pnum[j] = cpu_to_be32(ldesc.pnum); | ||
1278 | } | ||
1311 | 1279 | ||
1312 | feba->reserved_pebs = cpu_to_be32(j); | 1280 | feba->reserved_pebs = cpu_to_be32(j); |
1313 | feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC); | 1281 | feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC); |
@@ -1322,7 +1290,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi, | |||
1322 | spin_unlock(&ubi->volumes_lock); | 1290 | spin_unlock(&ubi->volumes_lock); |
1323 | 1291 | ||
1324 | dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum); | 1292 | dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum); |
1325 | ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avhdr); | 1293 | ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avbuf); |
1326 | if (ret) { | 1294 | if (ret) { |
1327 | ubi_err(ubi, "unable to write vid_hdr to fastmap SB!"); | 1295 | ubi_err(ubi, "unable to write vid_hdr to fastmap SB!"); |
1328 | goto out_kfree; | 1296 | goto out_kfree; |
@@ -1343,7 +1311,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi, | |||
1343 | dvhdr->lnum = cpu_to_be32(i); | 1311 | dvhdr->lnum = cpu_to_be32(i); |
1344 | dbg_bld("writing fastmap data to PEB %i sqnum %llu", | 1312 | dbg_bld("writing fastmap data to PEB %i sqnum %llu", |
1345 | new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum)); | 1313 | new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum)); |
1346 | ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvhdr); | 1314 | ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvbuf); |
1347 | if (ret) { | 1315 | if (ret) { |
1348 | ubi_err(ubi, "unable to write vid_hdr to PEB %i!", | 1316 | ubi_err(ubi, "unable to write vid_hdr to PEB %i!", |
1349 | new_fm->e[i]->pnum); | 1317 | new_fm->e[i]->pnum); |
@@ -1352,8 +1320,8 @@ static int ubi_write_fastmap(struct ubi_device *ubi, | |||
1352 | } | 1320 | } |
1353 | 1321 | ||
1354 | for (i = 0; i < new_fm->used_blocks; i++) { | 1322 | for (i = 0; i < new_fm->used_blocks; i++) { |
1355 | ret = ubi_io_write(ubi, fm_raw + (i * ubi->leb_size), | 1323 | ret = ubi_io_write_data(ubi, fm_raw + (i * ubi->leb_size), |
1356 | new_fm->e[i]->pnum, ubi->leb_start, ubi->leb_size); | 1324 | new_fm->e[i]->pnum, 0, ubi->leb_size); |
1357 | if (ret) { | 1325 | if (ret) { |
1358 | ubi_err(ubi, "unable to write fastmap to PEB %i!", | 1326 | ubi_err(ubi, "unable to write fastmap to PEB %i!", |
1359 | new_fm->e[i]->pnum); | 1327 | new_fm->e[i]->pnum); |
@@ -1368,8 +1336,8 @@ static int ubi_write_fastmap(struct ubi_device *ubi, | |||
1368 | dbg_bld("fastmap written!"); | 1336 | dbg_bld("fastmap written!"); |
1369 | 1337 | ||
1370 | out_kfree: | 1338 | out_kfree: |
1371 | ubi_free_vid_hdr(ubi, avhdr); | 1339 | ubi_free_vid_buf(avbuf); |
1372 | ubi_free_vid_hdr(ubi, dvhdr); | 1340 | ubi_free_vid_buf(dvbuf); |
1373 | free_seen(seen_pebs); | 1341 | free_seen(seen_pebs); |
1374 | out: | 1342 | out: |
1375 | return ret; | 1343 | return ret; |
@@ -1439,7 +1407,8 @@ static int invalidate_fastmap(struct ubi_device *ubi) | |||
1439 | int ret; | 1407 | int ret; |
1440 | struct ubi_fastmap_layout *fm; | 1408 | struct ubi_fastmap_layout *fm; |
1441 | struct ubi_wl_entry *e; | 1409 | struct ubi_wl_entry *e; |
1442 | struct ubi_vid_hdr *vh = NULL; | 1410 | struct ubi_vid_io_buf *vb = NULL; |
1411 | struct ubi_vid_hdr *vh; | ||
1443 | 1412 | ||
1444 | if (!ubi->fm) | 1413 | if (!ubi->fm) |
1445 | return 0; | 1414 | return 0; |
@@ -1451,10 +1420,12 @@ static int invalidate_fastmap(struct ubi_device *ubi) | |||
1451 | if (!fm) | 1420 | if (!fm) |
1452 | goto out; | 1421 | goto out; |
1453 | 1422 | ||
1454 | vh = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID); | 1423 | vb = new_fm_vbuf(ubi, UBI_FM_SB_VOLUME_ID); |
1455 | if (!vh) | 1424 | if (!vb) |
1456 | goto out_free_fm; | 1425 | goto out_free_fm; |
1457 | 1426 | ||
1427 | vh = ubi_get_vid_hdr(vb); | ||
1428 | |||
1458 | ret = -ENOSPC; | 1429 | ret = -ENOSPC; |
1459 | e = ubi_wl_get_fm_peb(ubi, 1); | 1430 | e = ubi_wl_get_fm_peb(ubi, 1); |
1460 | if (!e) | 1431 | if (!e) |
@@ -1465,7 +1436,7 @@ static int invalidate_fastmap(struct ubi_device *ubi) | |||
1465 | * to scanning mode. | 1436 | * to scanning mode. |
1466 | */ | 1437 | */ |
1467 | vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); | 1438 | vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); |
1468 | ret = ubi_io_write_vid_hdr(ubi, e->pnum, vh); | 1439 | ret = ubi_io_write_vid_hdr(ubi, e->pnum, vb); |
1469 | if (ret < 0) { | 1440 | if (ret < 0) { |
1470 | ubi_wl_put_fm_peb(ubi, e, 0, 0); | 1441 | ubi_wl_put_fm_peb(ubi, e, 0, 0); |
1471 | goto out_free_fm; | 1442 | goto out_free_fm; |
@@ -1477,7 +1448,7 @@ static int invalidate_fastmap(struct ubi_device *ubi) | |||
1477 | ubi->fm = fm; | 1448 | ubi->fm = fm; |
1478 | 1449 | ||
1479 | out: | 1450 | out: |
1480 | ubi_free_vid_hdr(ubi, vh); | 1451 | ubi_free_vid_buf(vb); |
1481 | return ret; | 1452 | return ret; |
1482 | 1453 | ||
1483 | out_free_fm: | 1454 | out_free_fm: |
@@ -1522,22 +1493,30 @@ int ubi_update_fastmap(struct ubi_device *ubi) | |||
1522 | struct ubi_wl_entry *tmp_e; | 1493 | struct ubi_wl_entry *tmp_e; |
1523 | 1494 | ||
1524 | down_write(&ubi->fm_protect); | 1495 | down_write(&ubi->fm_protect); |
1496 | down_write(&ubi->work_sem); | ||
1497 | down_write(&ubi->fm_eba_sem); | ||
1525 | 1498 | ||
1526 | ubi_refill_pools(ubi); | 1499 | ubi_refill_pools(ubi); |
1527 | 1500 | ||
1528 | if (ubi->ro_mode || ubi->fm_disabled) { | 1501 | if (ubi->ro_mode || ubi->fm_disabled) { |
1502 | up_write(&ubi->fm_eba_sem); | ||
1503 | up_write(&ubi->work_sem); | ||
1529 | up_write(&ubi->fm_protect); | 1504 | up_write(&ubi->fm_protect); |
1530 | return 0; | 1505 | return 0; |
1531 | } | 1506 | } |
1532 | 1507 | ||
1533 | ret = ubi_ensure_anchor_pebs(ubi); | 1508 | ret = ubi_ensure_anchor_pebs(ubi); |
1534 | if (ret) { | 1509 | if (ret) { |
1510 | up_write(&ubi->fm_eba_sem); | ||
1511 | up_write(&ubi->work_sem); | ||
1535 | up_write(&ubi->fm_protect); | 1512 | up_write(&ubi->fm_protect); |
1536 | return ret; | 1513 | return ret; |
1537 | } | 1514 | } |
1538 | 1515 | ||
1539 | new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL); | 1516 | new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL); |
1540 | if (!new_fm) { | 1517 | if (!new_fm) { |
1518 | up_write(&ubi->fm_eba_sem); | ||
1519 | up_write(&ubi->work_sem); | ||
1541 | up_write(&ubi->fm_protect); | 1520 | up_write(&ubi->fm_protect); |
1542 | return -ENOMEM; | 1521 | return -ENOMEM; |
1543 | } | 1522 | } |
@@ -1646,16 +1625,14 @@ int ubi_update_fastmap(struct ubi_device *ubi) | |||
1646 | new_fm->e[0] = tmp_e; | 1625 | new_fm->e[0] = tmp_e; |
1647 | } | 1626 | } |
1648 | 1627 | ||
1649 | down_write(&ubi->work_sem); | ||
1650 | down_write(&ubi->fm_eba_sem); | ||
1651 | ret = ubi_write_fastmap(ubi, new_fm); | 1628 | ret = ubi_write_fastmap(ubi, new_fm); |
1652 | up_write(&ubi->fm_eba_sem); | ||
1653 | up_write(&ubi->work_sem); | ||
1654 | 1629 | ||
1655 | if (ret) | 1630 | if (ret) |
1656 | goto err; | 1631 | goto err; |
1657 | 1632 | ||
1658 | out_unlock: | 1633 | out_unlock: |
1634 | up_write(&ubi->fm_eba_sem); | ||
1635 | up_write(&ubi->work_sem); | ||
1659 | up_write(&ubi->fm_protect); | 1636 | up_write(&ubi->fm_protect); |
1660 | kfree(old_fm); | 1637 | kfree(old_fm); |
1661 | return ret; | 1638 | return ret; |
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c index ff8cafe1e5cd..b6fb8f945c21 100644 --- a/drivers/mtd/ubi/io.c +++ b/drivers/mtd/ubi/io.c | |||
@@ -502,6 +502,7 @@ static int nor_erase_prepare(struct ubi_device *ubi, int pnum) | |||
502 | loff_t addr; | 502 | loff_t addr; |
503 | uint32_t data = 0; | 503 | uint32_t data = 0; |
504 | struct ubi_ec_hdr ec_hdr; | 504 | struct ubi_ec_hdr ec_hdr; |
505 | struct ubi_vid_io_buf vidb; | ||
505 | 506 | ||
506 | /* | 507 | /* |
507 | * Note, we cannot generally define VID header buffers on stack, | 508 | * Note, we cannot generally define VID header buffers on stack, |
@@ -528,7 +529,10 @@ static int nor_erase_prepare(struct ubi_device *ubi, int pnum) | |||
528 | goto error; | 529 | goto error; |
529 | } | 530 | } |
530 | 531 | ||
531 | err = ubi_io_read_vid_hdr(ubi, pnum, &vid_hdr, 0); | 532 | ubi_init_vid_buf(ubi, &vidb, &vid_hdr); |
533 | ubi_assert(&vid_hdr == ubi_get_vid_hdr(&vidb)); | ||
534 | |||
535 | err = ubi_io_read_vid_hdr(ubi, pnum, &vidb, 0); | ||
532 | if (err != UBI_IO_BAD_HDR_EBADMSG && err != UBI_IO_BAD_HDR && | 536 | if (err != UBI_IO_BAD_HDR_EBADMSG && err != UBI_IO_BAD_HDR && |
533 | err != UBI_IO_FF){ | 537 | err != UBI_IO_FF){ |
534 | addr += ubi->vid_hdr_aloffset; | 538 | addr += ubi->vid_hdr_aloffset; |
@@ -995,12 +999,11 @@ bad: | |||
995 | * ubi_io_read_vid_hdr - read and check a volume identifier header. | 999 | * ubi_io_read_vid_hdr - read and check a volume identifier header. |
996 | * @ubi: UBI device description object | 1000 | * @ubi: UBI device description object |
997 | * @pnum: physical eraseblock number to read from | 1001 | * @pnum: physical eraseblock number to read from |
998 | * @vid_hdr: &struct ubi_vid_hdr object where to store the read volume | 1002 | * @vidb: the volume identifier buffer to store data in |
999 | * identifier header | ||
1000 | * @verbose: be verbose if the header is corrupted or wasn't found | 1003 | * @verbose: be verbose if the header is corrupted or wasn't found |
1001 | * | 1004 | * |
1002 | * This function reads the volume identifier header from physical eraseblock | 1005 | * This function reads the volume identifier header from physical eraseblock |
1003 | * @pnum and stores it in @vid_hdr. It also checks CRC checksum of the read | 1006 | * @pnum and stores it in @vidb. It also checks CRC checksum of the read |
1004 | * volume identifier header. The error codes are the same as in | 1007 | * volume identifier header. The error codes are the same as in |
1005 | * 'ubi_io_read_ec_hdr()'. | 1008 | * 'ubi_io_read_ec_hdr()'. |
1006 | * | 1009 | * |
@@ -1008,16 +1011,16 @@ bad: | |||
1008 | * 'ubi_io_read_ec_hdr()', so refer commentaries in 'ubi_io_read_ec_hdr()'. | 1011 | * 'ubi_io_read_ec_hdr()', so refer commentaries in 'ubi_io_read_ec_hdr()'. |
1009 | */ | 1012 | */ |
1010 | int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum, | 1013 | int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum, |
1011 | struct ubi_vid_hdr *vid_hdr, int verbose) | 1014 | struct ubi_vid_io_buf *vidb, int verbose) |
1012 | { | 1015 | { |
1013 | int err, read_err; | 1016 | int err, read_err; |
1014 | uint32_t crc, magic, hdr_crc; | 1017 | uint32_t crc, magic, hdr_crc; |
1015 | void *p; | 1018 | struct ubi_vid_hdr *vid_hdr = ubi_get_vid_hdr(vidb); |
1019 | void *p = vidb->buffer; | ||
1016 | 1020 | ||
1017 | dbg_io("read VID header from PEB %d", pnum); | 1021 | dbg_io("read VID header from PEB %d", pnum); |
1018 | ubi_assert(pnum >= 0 && pnum < ubi->peb_count); | 1022 | ubi_assert(pnum >= 0 && pnum < ubi->peb_count); |
1019 | 1023 | ||
1020 | p = (char *)vid_hdr - ubi->vid_hdr_shift; | ||
1021 | read_err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset, | 1024 | read_err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset, |
1022 | ubi->vid_hdr_shift + UBI_VID_HDR_SIZE); | 1025 | ubi->vid_hdr_shift + UBI_VID_HDR_SIZE); |
1023 | if (read_err && read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err)) | 1026 | if (read_err && read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err)) |
@@ -1080,23 +1083,24 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum, | |||
1080 | * ubi_io_write_vid_hdr - write a volume identifier header. | 1083 | * ubi_io_write_vid_hdr - write a volume identifier header. |
1081 | * @ubi: UBI device description object | 1084 | * @ubi: UBI device description object |
1082 | * @pnum: the physical eraseblock number to write to | 1085 | * @pnum: the physical eraseblock number to write to |
1083 | * @vid_hdr: the volume identifier header to write | 1086 | * @vidb: the volume identifier buffer to write |
1084 | * | 1087 | * |
1085 | * This function writes the volume identifier header described by @vid_hdr to | 1088 | * This function writes the volume identifier header described by @vid_hdr to |
1086 | * physical eraseblock @pnum. This function automatically fills the | 1089 | * physical eraseblock @pnum. This function automatically fills the |
1087 | * @vid_hdr->magic and the @vid_hdr->version fields, as well as calculates | 1090 | * @vidb->hdr->magic and the @vidb->hdr->version fields, as well as calculates |
1088 | * header CRC checksum and stores it at vid_hdr->hdr_crc. | 1091 | * header CRC checksum and stores it at vidb->hdr->hdr_crc. |
1089 | * | 1092 | * |
1090 | * This function returns zero in case of success and a negative error code in | 1093 | * This function returns zero in case of success and a negative error code in |
1091 | * case of failure. If %-EIO is returned, the physical eraseblock probably went | 1094 | * case of failure. If %-EIO is returned, the physical eraseblock probably went |
1092 | * bad. | 1095 | * bad. |
1093 | */ | 1096 | */ |
1094 | int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum, | 1097 | int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum, |
1095 | struct ubi_vid_hdr *vid_hdr) | 1098 | struct ubi_vid_io_buf *vidb) |
1096 | { | 1099 | { |
1100 | struct ubi_vid_hdr *vid_hdr = ubi_get_vid_hdr(vidb); | ||
1097 | int err; | 1101 | int err; |
1098 | uint32_t crc; | 1102 | uint32_t crc; |
1099 | void *p; | 1103 | void *p = vidb->buffer; |
1100 | 1104 | ||
1101 | dbg_io("write VID header to PEB %d", pnum); | 1105 | dbg_io("write VID header to PEB %d", pnum); |
1102 | ubi_assert(pnum >= 0 && pnum < ubi->peb_count); | 1106 | ubi_assert(pnum >= 0 && pnum < ubi->peb_count); |
@@ -1117,7 +1121,6 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum, | |||
1117 | if (ubi_dbg_power_cut(ubi, POWER_CUT_VID_WRITE)) | 1121 | if (ubi_dbg_power_cut(ubi, POWER_CUT_VID_WRITE)) |
1118 | return -EROFS; | 1122 | return -EROFS; |
1119 | 1123 | ||
1120 | p = (char *)vid_hdr - ubi->vid_hdr_shift; | ||
1121 | err = ubi_io_write(ubi, p, pnum, ubi->vid_hdr_aloffset, | 1124 | err = ubi_io_write(ubi, p, pnum, ubi->vid_hdr_aloffset, |
1122 | ubi->vid_hdr_alsize); | 1125 | ubi->vid_hdr_alsize); |
1123 | return err; | 1126 | return err; |
@@ -1283,17 +1286,19 @@ static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum) | |||
1283 | { | 1286 | { |
1284 | int err; | 1287 | int err; |
1285 | uint32_t crc, hdr_crc; | 1288 | uint32_t crc, hdr_crc; |
1289 | struct ubi_vid_io_buf *vidb; | ||
1286 | struct ubi_vid_hdr *vid_hdr; | 1290 | struct ubi_vid_hdr *vid_hdr; |
1287 | void *p; | 1291 | void *p; |
1288 | 1292 | ||
1289 | if (!ubi_dbg_chk_io(ubi)) | 1293 | if (!ubi_dbg_chk_io(ubi)) |
1290 | return 0; | 1294 | return 0; |
1291 | 1295 | ||
1292 | vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); | 1296 | vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS); |
1293 | if (!vid_hdr) | 1297 | if (!vidb) |
1294 | return -ENOMEM; | 1298 | return -ENOMEM; |
1295 | 1299 | ||
1296 | p = (char *)vid_hdr - ubi->vid_hdr_shift; | 1300 | vid_hdr = ubi_get_vid_hdr(vidb); |
1301 | p = vidb->buffer; | ||
1297 | err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset, | 1302 | err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset, |
1298 | ubi->vid_hdr_alsize); | 1303 | ubi->vid_hdr_alsize); |
1299 | if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err)) | 1304 | if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err)) |
@@ -1314,7 +1319,7 @@ static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum) | |||
1314 | err = self_check_vid_hdr(ubi, pnum, vid_hdr); | 1319 | err = self_check_vid_hdr(ubi, pnum, vid_hdr); |
1315 | 1320 | ||
1316 | exit: | 1321 | exit: |
1317 | ubi_free_vid_hdr(ubi, vid_hdr); | 1322 | ubi_free_vid_buf(vidb); |
1318 | return err; | 1323 | return err; |
1319 | } | 1324 | } |
1320 | 1325 | ||
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c index a9e2cef7c95c..88b1897aeb40 100644 --- a/drivers/mtd/ubi/kapi.c +++ b/drivers/mtd/ubi/kapi.c | |||
@@ -538,7 +538,7 @@ int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf, | |||
538 | if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) | 538 | if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) |
539 | return -EROFS; | 539 | return -EROFS; |
540 | 540 | ||
541 | if (lnum < 0 || lnum >= vol->reserved_pebs || offset < 0 || len < 0 || | 541 | if (!ubi_leb_valid(vol, lnum) || offset < 0 || len < 0 || |
542 | offset + len > vol->usable_leb_size || | 542 | offset + len > vol->usable_leb_size || |
543 | offset & (ubi->min_io_size - 1) || len & (ubi->min_io_size - 1)) | 543 | offset & (ubi->min_io_size - 1) || len & (ubi->min_io_size - 1)) |
544 | return -EINVAL; | 544 | return -EINVAL; |
@@ -583,7 +583,7 @@ int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf, | |||
583 | if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) | 583 | if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) |
584 | return -EROFS; | 584 | return -EROFS; |
585 | 585 | ||
586 | if (lnum < 0 || lnum >= vol->reserved_pebs || len < 0 || | 586 | if (!ubi_leb_valid(vol, lnum) || len < 0 || |
587 | len > vol->usable_leb_size || len & (ubi->min_io_size - 1)) | 587 | len > vol->usable_leb_size || len & (ubi->min_io_size - 1)) |
588 | return -EINVAL; | 588 | return -EINVAL; |
589 | 589 | ||
@@ -620,7 +620,7 @@ int ubi_leb_erase(struct ubi_volume_desc *desc, int lnum) | |||
620 | if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) | 620 | if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) |
621 | return -EROFS; | 621 | return -EROFS; |
622 | 622 | ||
623 | if (lnum < 0 || lnum >= vol->reserved_pebs) | 623 | if (!ubi_leb_valid(vol, lnum)) |
624 | return -EINVAL; | 624 | return -EINVAL; |
625 | 625 | ||
626 | if (vol->upd_marker) | 626 | if (vol->upd_marker) |
@@ -680,7 +680,7 @@ int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum) | |||
680 | if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) | 680 | if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) |
681 | return -EROFS; | 681 | return -EROFS; |
682 | 682 | ||
683 | if (lnum < 0 || lnum >= vol->reserved_pebs) | 683 | if (!ubi_leb_valid(vol, lnum)) |
684 | return -EINVAL; | 684 | return -EINVAL; |
685 | 685 | ||
686 | if (vol->upd_marker) | 686 | if (vol->upd_marker) |
@@ -716,13 +716,13 @@ int ubi_leb_map(struct ubi_volume_desc *desc, int lnum) | |||
716 | if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) | 716 | if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) |
717 | return -EROFS; | 717 | return -EROFS; |
718 | 718 | ||
719 | if (lnum < 0 || lnum >= vol->reserved_pebs) | 719 | if (!ubi_leb_valid(vol, lnum)) |
720 | return -EINVAL; | 720 | return -EINVAL; |
721 | 721 | ||
722 | if (vol->upd_marker) | 722 | if (vol->upd_marker) |
723 | return -EBADF; | 723 | return -EBADF; |
724 | 724 | ||
725 | if (vol->eba_tbl[lnum] >= 0) | 725 | if (ubi_eba_is_mapped(vol, lnum)) |
726 | return -EBADMSG; | 726 | return -EBADMSG; |
727 | 727 | ||
728 | return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0); | 728 | return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0); |
@@ -751,13 +751,13 @@ int ubi_is_mapped(struct ubi_volume_desc *desc, int lnum) | |||
751 | 751 | ||
752 | dbg_gen("test LEB %d:%d", vol->vol_id, lnum); | 752 | dbg_gen("test LEB %d:%d", vol->vol_id, lnum); |
753 | 753 | ||
754 | if (lnum < 0 || lnum >= vol->reserved_pebs) | 754 | if (!ubi_leb_valid(vol, lnum)) |
755 | return -EINVAL; | 755 | return -EINVAL; |
756 | 756 | ||
757 | if (vol->upd_marker) | 757 | if (vol->upd_marker) |
758 | return -EBADF; | 758 | return -EBADF; |
759 | 759 | ||
760 | return vol->eba_tbl[lnum] >= 0; | 760 | return ubi_eba_is_mapped(vol, lnum); |
761 | } | 761 | } |
762 | EXPORT_SYMBOL_GPL(ubi_is_mapped); | 762 | EXPORT_SYMBOL_GPL(ubi_is_mapped); |
763 | 763 | ||
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h index b616a115c9d3..697dbcba7371 100644 --- a/drivers/mtd/ubi/ubi.h +++ b/drivers/mtd/ubi/ubi.h | |||
@@ -167,6 +167,17 @@ enum { | |||
167 | }; | 167 | }; |
168 | 168 | ||
169 | /** | 169 | /** |
170 | * struct ubi_vid_io_buf - VID buffer used to read/write VID info to/from the | ||
171 | * flash. | ||
172 | * @hdr: a pointer to the VID header stored in buffer | ||
173 | * @buffer: underlying buffer | ||
174 | */ | ||
175 | struct ubi_vid_io_buf { | ||
176 | struct ubi_vid_hdr *hdr; | ||
177 | void *buffer; | ||
178 | }; | ||
179 | |||
180 | /** | ||
170 | * struct ubi_wl_entry - wear-leveling entry. | 181 | * struct ubi_wl_entry - wear-leveling entry. |
171 | * @u.rb: link in the corresponding (free/used) RB-tree | 182 | * @u.rb: link in the corresponding (free/used) RB-tree |
172 | * @u.list: link in the protection queue | 183 | * @u.list: link in the protection queue |
@@ -267,6 +278,21 @@ struct ubi_fm_pool { | |||
267 | }; | 278 | }; |
268 | 279 | ||
269 | /** | 280 | /** |
281 | * struct ubi_eba_leb_desc - EBA logical eraseblock descriptor | ||
282 | * @lnum: the logical eraseblock number | ||
283 | * @pnum: the physical eraseblock where the LEB can be found | ||
284 | * | ||
285 | * This structure is here to hide EBA's internal from other part of the | ||
286 | * UBI implementation. | ||
287 | * | ||
288 | * One can query the position of a LEB by calling ubi_eba_get_ldesc(). | ||
289 | */ | ||
290 | struct ubi_eba_leb_desc { | ||
291 | int lnum; | ||
292 | int pnum; | ||
293 | }; | ||
294 | |||
295 | /** | ||
270 | * struct ubi_volume - UBI volume description data structure. | 296 | * struct ubi_volume - UBI volume description data structure. |
271 | * @dev: device object to make use of the the Linux device model | 297 | * @dev: device object to make use of the the Linux device model |
272 | * @cdev: character device object to create character device | 298 | * @cdev: character device object to create character device |
@@ -344,7 +370,7 @@ struct ubi_volume { | |||
344 | long long upd_received; | 370 | long long upd_received; |
345 | void *upd_buf; | 371 | void *upd_buf; |
346 | 372 | ||
347 | int *eba_tbl; | 373 | struct ubi_eba_table *eba_tbl; |
348 | unsigned int checked:1; | 374 | unsigned int checked:1; |
349 | unsigned int corrupted:1; | 375 | unsigned int corrupted:1; |
350 | unsigned int upd_marker:1; | 376 | unsigned int upd_marker:1; |
@@ -724,6 +750,8 @@ struct ubi_ainf_volume { | |||
724 | * @ec_sum: a temporary variable used when calculating @mean_ec | 750 | * @ec_sum: a temporary variable used when calculating @mean_ec |
725 | * @ec_count: a temporary variable used when calculating @mean_ec | 751 | * @ec_count: a temporary variable used when calculating @mean_ec |
726 | * @aeb_slab_cache: slab cache for &struct ubi_ainf_peb objects | 752 | * @aeb_slab_cache: slab cache for &struct ubi_ainf_peb objects |
753 | * @ech: temporary EC header. Only available during scan | ||
754 | * @vidh: temporary VID buffer. Only available during scan | ||
727 | * | 755 | * |
728 | * This data structure contains the result of attaching an MTD device and may | 756 | * This data structure contains the result of attaching an MTD device and may |
729 | * be used by other UBI sub-systems to build final UBI data structures, further | 757 | * be used by other UBI sub-systems to build final UBI data structures, further |
@@ -752,6 +780,8 @@ struct ubi_attach_info { | |||
752 | uint64_t ec_sum; | 780 | uint64_t ec_sum; |
753 | int ec_count; | 781 | int ec_count; |
754 | struct kmem_cache *aeb_slab_cache; | 782 | struct kmem_cache *aeb_slab_cache; |
783 | struct ubi_ec_hdr *ech; | ||
784 | struct ubi_vid_io_buf *vidb; | ||
755 | }; | 785 | }; |
756 | 786 | ||
757 | /** | 787 | /** |
@@ -792,8 +822,12 @@ extern struct mutex ubi_devices_mutex; | |||
792 | extern struct blocking_notifier_head ubi_notifiers; | 822 | extern struct blocking_notifier_head ubi_notifiers; |
793 | 823 | ||
794 | /* attach.c */ | 824 | /* attach.c */ |
825 | struct ubi_ainf_peb *ubi_alloc_aeb(struct ubi_attach_info *ai, int pnum, | ||
826 | int ec); | ||
827 | void ubi_free_aeb(struct ubi_attach_info *ai, struct ubi_ainf_peb *aeb); | ||
795 | int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum, | 828 | int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum, |
796 | int ec, const struct ubi_vid_hdr *vid_hdr, int bitflips); | 829 | int ec, const struct ubi_vid_hdr *vid_hdr, int bitflips); |
830 | struct ubi_ainf_volume *ubi_add_av(struct ubi_attach_info *ai, int vol_id); | ||
797 | struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai, | 831 | struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai, |
798 | int vol_id); | 832 | int vol_id); |
799 | void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av); | 833 | void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av); |
@@ -835,7 +869,21 @@ void ubi_update_reserved(struct ubi_device *ubi); | |||
835 | void ubi_calculate_reserved(struct ubi_device *ubi); | 869 | void ubi_calculate_reserved(struct ubi_device *ubi); |
836 | int ubi_check_pattern(const void *buf, uint8_t patt, int size); | 870 | int ubi_check_pattern(const void *buf, uint8_t patt, int size); |
837 | 871 | ||
872 | static inline bool ubi_leb_valid(struct ubi_volume *vol, int lnum) | ||
873 | { | ||
874 | return lnum >= 0 && lnum < vol->reserved_pebs; | ||
875 | } | ||
876 | |||
838 | /* eba.c */ | 877 | /* eba.c */ |
878 | struct ubi_eba_table *ubi_eba_create_table(struct ubi_volume *vol, | ||
879 | int nentries); | ||
880 | void ubi_eba_destroy_table(struct ubi_eba_table *tbl); | ||
881 | void ubi_eba_copy_table(struct ubi_volume *vol, struct ubi_eba_table *dst, | ||
882 | int nentries); | ||
883 | void ubi_eba_replace_table(struct ubi_volume *vol, struct ubi_eba_table *tbl); | ||
884 | void ubi_eba_get_ldesc(struct ubi_volume *vol, int lnum, | ||
885 | struct ubi_eba_leb_desc *ldesc); | ||
886 | bool ubi_eba_is_mapped(struct ubi_volume *vol, int lnum); | ||
839 | int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol, | 887 | int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol, |
840 | int lnum); | 888 | int lnum); |
841 | int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, | 889 | int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, |
@@ -850,7 +898,7 @@ int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol, | |||
850 | int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, | 898 | int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, |
851 | int lnum, const void *buf, int len); | 899 | int lnum, const void *buf, int len); |
852 | int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, | 900 | int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, |
853 | struct ubi_vid_hdr *vid_hdr); | 901 | struct ubi_vid_io_buf *vidb); |
854 | int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai); | 902 | int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai); |
855 | unsigned long long ubi_next_sqnum(struct ubi_device *ubi); | 903 | unsigned long long ubi_next_sqnum(struct ubi_device *ubi); |
856 | int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap, | 904 | int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap, |
@@ -885,9 +933,9 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum, | |||
885 | int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum, | 933 | int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum, |
886 | struct ubi_ec_hdr *ec_hdr); | 934 | struct ubi_ec_hdr *ec_hdr); |
887 | int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum, | 935 | int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum, |
888 | struct ubi_vid_hdr *vid_hdr, int verbose); | 936 | struct ubi_vid_io_buf *vidb, int verbose); |
889 | int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum, | 937 | int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum, |
890 | struct ubi_vid_hdr *vid_hdr); | 938 | struct ubi_vid_io_buf *vidb); |
891 | 939 | ||
892 | /* build.c */ | 940 | /* build.c */ |
893 | int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, | 941 | int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, |
@@ -1008,44 +1056,68 @@ static inline void ubi_move_aeb_to_list(struct ubi_ainf_volume *av, | |||
1008 | } | 1056 | } |
1009 | 1057 | ||
1010 | /** | 1058 | /** |
1011 | * ubi_zalloc_vid_hdr - allocate a volume identifier header object. | 1059 | * ubi_init_vid_buf - Initialize a VID buffer |
1012 | * @ubi: UBI device description object | 1060 | * @ubi: the UBI device |
1013 | * @gfp_flags: GFP flags to allocate with | 1061 | * @vidb: the VID buffer to initialize |
1014 | * | 1062 | * @buf: the underlying buffer |
1015 | * This function returns a pointer to the newly allocated and zero-filled | ||
1016 | * volume identifier header object in case of success and %NULL in case of | ||
1017 | * failure. | ||
1018 | */ | 1063 | */ |
1019 | static inline struct ubi_vid_hdr * | 1064 | static inline void ubi_init_vid_buf(const struct ubi_device *ubi, |
1020 | ubi_zalloc_vid_hdr(const struct ubi_device *ubi, gfp_t gfp_flags) | 1065 | struct ubi_vid_io_buf *vidb, |
1066 | void *buf) | ||
1021 | { | 1067 | { |
1022 | void *vid_hdr; | 1068 | if (buf) |
1069 | memset(buf, 0, ubi->vid_hdr_alsize); | ||
1023 | 1070 | ||
1024 | vid_hdr = kzalloc(ubi->vid_hdr_alsize, gfp_flags); | 1071 | vidb->buffer = buf; |
1025 | if (!vid_hdr) | 1072 | vidb->hdr = buf + ubi->vid_hdr_shift; |
1073 | } | ||
1074 | |||
1075 | /** | ||
1076 | * ubi_init_vid_buf - Allocate a VID buffer | ||
1077 | * @ubi: the UBI device | ||
1078 | * @gfp_flags: GFP flags to use for the allocation | ||
1079 | */ | ||
1080 | static inline struct ubi_vid_io_buf * | ||
1081 | ubi_alloc_vid_buf(const struct ubi_device *ubi, gfp_t gfp_flags) | ||
1082 | { | ||
1083 | struct ubi_vid_io_buf *vidb; | ||
1084 | void *buf; | ||
1085 | |||
1086 | vidb = kzalloc(sizeof(*vidb), gfp_flags); | ||
1087 | if (!vidb) | ||
1088 | return NULL; | ||
1089 | |||
1090 | buf = kmalloc(ubi->vid_hdr_alsize, gfp_flags); | ||
1091 | if (!buf) { | ||
1092 | kfree(vidb); | ||
1026 | return NULL; | 1093 | return NULL; |
1094 | } | ||
1027 | 1095 | ||
1028 | /* | 1096 | ubi_init_vid_buf(ubi, vidb, buf); |
1029 | * VID headers may be stored at un-aligned flash offsets, so we shift | 1097 | |
1030 | * the pointer. | 1098 | return vidb; |
1031 | */ | ||
1032 | return vid_hdr + ubi->vid_hdr_shift; | ||
1033 | } | 1099 | } |
1034 | 1100 | ||
1035 | /** | 1101 | /** |
1036 | * ubi_free_vid_hdr - free a volume identifier header object. | 1102 | * ubi_free_vid_buf - Free a VID buffer |
1037 | * @ubi: UBI device description object | 1103 | * @vidb: the VID buffer to free |
1038 | * @vid_hdr: the object to free | ||
1039 | */ | 1104 | */ |
1040 | static inline void ubi_free_vid_hdr(const struct ubi_device *ubi, | 1105 | static inline void ubi_free_vid_buf(struct ubi_vid_io_buf *vidb) |
1041 | struct ubi_vid_hdr *vid_hdr) | ||
1042 | { | 1106 | { |
1043 | void *p = vid_hdr; | 1107 | if (!vidb) |
1044 | |||
1045 | if (!p) | ||
1046 | return; | 1108 | return; |
1047 | 1109 | ||
1048 | kfree(p - ubi->vid_hdr_shift); | 1110 | kfree(vidb->buffer); |
1111 | kfree(vidb); | ||
1112 | } | ||
1113 | |||
1114 | /** | ||
1115 | * ubi_get_vid_hdr - Get the VID header attached to a VID buffer | ||
1116 | * @vidb: VID buffer | ||
1117 | */ | ||
1118 | static inline struct ubi_vid_hdr *ubi_get_vid_hdr(struct ubi_vid_io_buf *vidb) | ||
1119 | { | ||
1120 | return vidb->hdr; | ||
1049 | } | 1121 | } |
1050 | 1122 | ||
1051 | /* | 1123 | /* |
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c index 0138f526474a..7ac78c13dd1c 100644 --- a/drivers/mtd/ubi/vmt.c +++ b/drivers/mtd/ubi/vmt.c | |||
@@ -138,7 +138,7 @@ static void vol_release(struct device *dev) | |||
138 | { | 138 | { |
139 | struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev); | 139 | struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev); |
140 | 140 | ||
141 | kfree(vol->eba_tbl); | 141 | ubi_eba_replace_table(vol, NULL); |
142 | kfree(vol); | 142 | kfree(vol); |
143 | } | 143 | } |
144 | 144 | ||
@@ -158,6 +158,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) | |||
158 | int i, err, vol_id = req->vol_id, do_free = 1; | 158 | int i, err, vol_id = req->vol_id, do_free = 1; |
159 | struct ubi_volume *vol; | 159 | struct ubi_volume *vol; |
160 | struct ubi_vtbl_record vtbl_rec; | 160 | struct ubi_vtbl_record vtbl_rec; |
161 | struct ubi_eba_table *eba_tbl = NULL; | ||
161 | dev_t dev; | 162 | dev_t dev; |
162 | 163 | ||
163 | if (ubi->ro_mode) | 164 | if (ubi->ro_mode) |
@@ -241,14 +242,13 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) | |||
241 | if (err) | 242 | if (err) |
242 | goto out_acc; | 243 | goto out_acc; |
243 | 244 | ||
244 | vol->eba_tbl = kmalloc(vol->reserved_pebs * sizeof(int), GFP_KERNEL); | 245 | eba_tbl = ubi_eba_create_table(vol, vol->reserved_pebs); |
245 | if (!vol->eba_tbl) { | 246 | if (IS_ERR(eba_tbl)) { |
246 | err = -ENOMEM; | 247 | err = PTR_ERR(eba_tbl); |
247 | goto out_acc; | 248 | goto out_acc; |
248 | } | 249 | } |
249 | 250 | ||
250 | for (i = 0; i < vol->reserved_pebs; i++) | 251 | ubi_eba_replace_table(vol, eba_tbl); |
251 | vol->eba_tbl[i] = UBI_LEB_UNMAPPED; | ||
252 | 252 | ||
253 | if (vol->vol_type == UBI_DYNAMIC_VOLUME) { | 253 | if (vol->vol_type == UBI_DYNAMIC_VOLUME) { |
254 | vol->used_ebs = vol->reserved_pebs; | 254 | vol->used_ebs = vol->reserved_pebs; |
@@ -329,7 +329,7 @@ out_cdev: | |||
329 | cdev_del(&vol->cdev); | 329 | cdev_del(&vol->cdev); |
330 | out_mapping: | 330 | out_mapping: |
331 | if (do_free) | 331 | if (do_free) |
332 | kfree(vol->eba_tbl); | 332 | ubi_eba_destroy_table(eba_tbl); |
333 | out_acc: | 333 | out_acc: |
334 | spin_lock(&ubi->volumes_lock); | 334 | spin_lock(&ubi->volumes_lock); |
335 | ubi->rsvd_pebs -= vol->reserved_pebs; | 335 | ubi->rsvd_pebs -= vol->reserved_pebs; |
@@ -427,10 +427,11 @@ out_unlock: | |||
427 | */ | 427 | */ |
428 | int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs) | 428 | int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs) |
429 | { | 429 | { |
430 | int i, err, pebs, *new_mapping; | 430 | int i, err, pebs; |
431 | struct ubi_volume *vol = desc->vol; | 431 | struct ubi_volume *vol = desc->vol; |
432 | struct ubi_device *ubi = vol->ubi; | 432 | struct ubi_device *ubi = vol->ubi; |
433 | struct ubi_vtbl_record vtbl_rec; | 433 | struct ubi_vtbl_record vtbl_rec; |
434 | struct ubi_eba_table *new_eba_tbl = NULL; | ||
434 | int vol_id = vol->vol_id; | 435 | int vol_id = vol->vol_id; |
435 | 436 | ||
436 | if (ubi->ro_mode) | 437 | if (ubi->ro_mode) |
@@ -450,12 +451,9 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs) | |||
450 | if (reserved_pebs == vol->reserved_pebs) | 451 | if (reserved_pebs == vol->reserved_pebs) |
451 | return 0; | 452 | return 0; |
452 | 453 | ||
453 | new_mapping = kmalloc(reserved_pebs * sizeof(int), GFP_KERNEL); | 454 | new_eba_tbl = ubi_eba_create_table(vol, reserved_pebs); |
454 | if (!new_mapping) | 455 | if (IS_ERR(new_eba_tbl)) |
455 | return -ENOMEM; | 456 | return PTR_ERR(new_eba_tbl); |
456 | |||
457 | for (i = 0; i < reserved_pebs; i++) | ||
458 | new_mapping[i] = UBI_LEB_UNMAPPED; | ||
459 | 457 | ||
460 | spin_lock(&ubi->volumes_lock); | 458 | spin_lock(&ubi->volumes_lock); |
461 | if (vol->ref_count > 1) { | 459 | if (vol->ref_count > 1) { |
@@ -481,10 +479,8 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs) | |||
481 | } | 479 | } |
482 | ubi->avail_pebs -= pebs; | 480 | ubi->avail_pebs -= pebs; |
483 | ubi->rsvd_pebs += pebs; | 481 | ubi->rsvd_pebs += pebs; |
484 | for (i = 0; i < vol->reserved_pebs; i++) | 482 | ubi_eba_copy_table(vol, new_eba_tbl, vol->reserved_pebs); |
485 | new_mapping[i] = vol->eba_tbl[i]; | 483 | ubi_eba_replace_table(vol, new_eba_tbl); |
486 | kfree(vol->eba_tbl); | ||
487 | vol->eba_tbl = new_mapping; | ||
488 | spin_unlock(&ubi->volumes_lock); | 484 | spin_unlock(&ubi->volumes_lock); |
489 | } | 485 | } |
490 | 486 | ||
@@ -498,10 +494,8 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs) | |||
498 | ubi->rsvd_pebs += pebs; | 494 | ubi->rsvd_pebs += pebs; |
499 | ubi->avail_pebs -= pebs; | 495 | ubi->avail_pebs -= pebs; |
500 | ubi_update_reserved(ubi); | 496 | ubi_update_reserved(ubi); |
501 | for (i = 0; i < reserved_pebs; i++) | 497 | ubi_eba_copy_table(vol, new_eba_tbl, reserved_pebs); |
502 | new_mapping[i] = vol->eba_tbl[i]; | 498 | ubi_eba_replace_table(vol, new_eba_tbl); |
503 | kfree(vol->eba_tbl); | ||
504 | vol->eba_tbl = new_mapping; | ||
505 | spin_unlock(&ubi->volumes_lock); | 499 | spin_unlock(&ubi->volumes_lock); |
506 | } | 500 | } |
507 | 501 | ||
@@ -543,7 +537,7 @@ out_acc: | |||
543 | spin_unlock(&ubi->volumes_lock); | 537 | spin_unlock(&ubi->volumes_lock); |
544 | } | 538 | } |
545 | out_free: | 539 | out_free: |
546 | kfree(new_mapping); | 540 | kfree(new_eba_tbl); |
547 | return err; | 541 | return err; |
548 | } | 542 | } |
549 | 543 | ||
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c index d85c19762160..263743e7b741 100644 --- a/drivers/mtd/ubi/vtbl.c +++ b/drivers/mtd/ubi/vtbl.c | |||
@@ -299,15 +299,18 @@ static int create_vtbl(struct ubi_device *ubi, struct ubi_attach_info *ai, | |||
299 | int copy, void *vtbl) | 299 | int copy, void *vtbl) |
300 | { | 300 | { |
301 | int err, tries = 0; | 301 | int err, tries = 0; |
302 | struct ubi_vid_io_buf *vidb; | ||
302 | struct ubi_vid_hdr *vid_hdr; | 303 | struct ubi_vid_hdr *vid_hdr; |
303 | struct ubi_ainf_peb *new_aeb; | 304 | struct ubi_ainf_peb *new_aeb; |
304 | 305 | ||
305 | dbg_gen("create volume table (copy #%d)", copy + 1); | 306 | dbg_gen("create volume table (copy #%d)", copy + 1); |
306 | 307 | ||
307 | vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); | 308 | vidb = ubi_alloc_vid_buf(ubi, GFP_KERNEL); |
308 | if (!vid_hdr) | 309 | if (!vidb) |
309 | return -ENOMEM; | 310 | return -ENOMEM; |
310 | 311 | ||
312 | vid_hdr = ubi_get_vid_hdr(vidb); | ||
313 | |||
311 | retry: | 314 | retry: |
312 | new_aeb = ubi_early_get_peb(ubi, ai); | 315 | new_aeb = ubi_early_get_peb(ubi, ai); |
313 | if (IS_ERR(new_aeb)) { | 316 | if (IS_ERR(new_aeb)) { |
@@ -324,7 +327,7 @@ retry: | |||
324 | vid_hdr->sqnum = cpu_to_be64(++ai->max_sqnum); | 327 | vid_hdr->sqnum = cpu_to_be64(++ai->max_sqnum); |
325 | 328 | ||
326 | /* The EC header is already there, write the VID header */ | 329 | /* The EC header is already there, write the VID header */ |
327 | err = ubi_io_write_vid_hdr(ubi, new_aeb->pnum, vid_hdr); | 330 | err = ubi_io_write_vid_hdr(ubi, new_aeb->pnum, vidb); |
328 | if (err) | 331 | if (err) |
329 | goto write_error; | 332 | goto write_error; |
330 | 333 | ||
@@ -338,8 +341,8 @@ retry: | |||
338 | * of this LEB as it will be deleted and freed in 'ubi_add_to_av()'. | 341 | * of this LEB as it will be deleted and freed in 'ubi_add_to_av()'. |
339 | */ | 342 | */ |
340 | err = ubi_add_to_av(ubi, ai, new_aeb->pnum, new_aeb->ec, vid_hdr, 0); | 343 | err = ubi_add_to_av(ubi, ai, new_aeb->pnum, new_aeb->ec, vid_hdr, 0); |
341 | kmem_cache_free(ai->aeb_slab_cache, new_aeb); | 344 | ubi_free_aeb(ai, new_aeb); |
342 | ubi_free_vid_hdr(ubi, vid_hdr); | 345 | ubi_free_vid_buf(vidb); |
343 | return err; | 346 | return err; |
344 | 347 | ||
345 | write_error: | 348 | write_error: |
@@ -351,9 +354,9 @@ write_error: | |||
351 | list_add(&new_aeb->u.list, &ai->erase); | 354 | list_add(&new_aeb->u.list, &ai->erase); |
352 | goto retry; | 355 | goto retry; |
353 | } | 356 | } |
354 | kmem_cache_free(ai->aeb_slab_cache, new_aeb); | 357 | ubi_free_aeb(ai, new_aeb); |
355 | out_free: | 358 | out_free: |
356 | ubi_free_vid_hdr(ubi, vid_hdr); | 359 | ubi_free_vid_buf(vidb); |
357 | return err; | 360 | return err; |
358 | 361 | ||
359 | } | 362 | } |
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index f4533266d7b2..b5b8cd6f481c 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c | |||
@@ -580,7 +580,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, | |||
580 | * failure. | 580 | * failure. |
581 | */ | 581 | */ |
582 | static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, | 582 | static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, |
583 | int vol_id, int lnum, int torture) | 583 | int vol_id, int lnum, int torture, bool nested) |
584 | { | 584 | { |
585 | struct ubi_work *wl_wrk; | 585 | struct ubi_work *wl_wrk; |
586 | 586 | ||
@@ -599,7 +599,10 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, | |||
599 | wl_wrk->lnum = lnum; | 599 | wl_wrk->lnum = lnum; |
600 | wl_wrk->torture = torture; | 600 | wl_wrk->torture = torture; |
601 | 601 | ||
602 | schedule_ubi_work(ubi, wl_wrk); | 602 | if (nested) |
603 | __schedule_ubi_work(ubi, wl_wrk); | ||
604 | else | ||
605 | schedule_ubi_work(ubi, wl_wrk); | ||
603 | return 0; | 606 | return 0; |
604 | } | 607 | } |
605 | 608 | ||
@@ -644,11 +647,12 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, | |||
644 | int shutdown) | 647 | int shutdown) |
645 | { | 648 | { |
646 | int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0; | 649 | int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0; |
647 | int vol_id = -1, lnum = -1; | 650 | int erase = 0, keep = 0, vol_id = -1, lnum = -1; |
648 | #ifdef CONFIG_MTD_UBI_FASTMAP | 651 | #ifdef CONFIG_MTD_UBI_FASTMAP |
649 | int anchor = wrk->anchor; | 652 | int anchor = wrk->anchor; |
650 | #endif | 653 | #endif |
651 | struct ubi_wl_entry *e1, *e2; | 654 | struct ubi_wl_entry *e1, *e2; |
655 | struct ubi_vid_io_buf *vidb; | ||
652 | struct ubi_vid_hdr *vid_hdr; | 656 | struct ubi_vid_hdr *vid_hdr; |
653 | int dst_leb_clean = 0; | 657 | int dst_leb_clean = 0; |
654 | 658 | ||
@@ -656,10 +660,13 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, | |||
656 | if (shutdown) | 660 | if (shutdown) |
657 | return 0; | 661 | return 0; |
658 | 662 | ||
659 | vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); | 663 | vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS); |
660 | if (!vid_hdr) | 664 | if (!vidb) |
661 | return -ENOMEM; | 665 | return -ENOMEM; |
662 | 666 | ||
667 | vid_hdr = ubi_get_vid_hdr(vidb); | ||
668 | |||
669 | down_read(&ubi->fm_eba_sem); | ||
663 | mutex_lock(&ubi->move_mutex); | 670 | mutex_lock(&ubi->move_mutex); |
664 | spin_lock(&ubi->wl_lock); | 671 | spin_lock(&ubi->wl_lock); |
665 | ubi_assert(!ubi->move_from && !ubi->move_to); | 672 | ubi_assert(!ubi->move_from && !ubi->move_to); |
@@ -753,7 +760,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, | |||
753 | * which is being moved was unmapped. | 760 | * which is being moved was unmapped. |
754 | */ | 761 | */ |
755 | 762 | ||
756 | err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0); | 763 | err = ubi_io_read_vid_hdr(ubi, e1->pnum, vidb, 0); |
757 | if (err && err != UBI_IO_BITFLIPS) { | 764 | if (err && err != UBI_IO_BITFLIPS) { |
758 | dst_leb_clean = 1; | 765 | dst_leb_clean = 1; |
759 | if (err == UBI_IO_FF) { | 766 | if (err == UBI_IO_FF) { |
@@ -780,6 +787,16 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, | |||
780 | e1->pnum); | 787 | e1->pnum); |
781 | scrubbing = 1; | 788 | scrubbing = 1; |
782 | goto out_not_moved; | 789 | goto out_not_moved; |
790 | } else if (ubi->fast_attach && err == UBI_IO_BAD_HDR_EBADMSG) { | ||
791 | /* | ||
792 | * While a full scan would detect interrupted erasures | ||
793 | * at attach time we can face them here when attached from | ||
794 | * Fastmap. | ||
795 | */ | ||
796 | dbg_wl("PEB %d has ECC errors, maybe from an interrupted erasure", | ||
797 | e1->pnum); | ||
798 | erase = 1; | ||
799 | goto out_not_moved; | ||
783 | } | 800 | } |
784 | 801 | ||
785 | ubi_err(ubi, "error %d while reading VID header from PEB %d", | 802 | ubi_err(ubi, "error %d while reading VID header from PEB %d", |
@@ -790,7 +807,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, | |||
790 | vol_id = be32_to_cpu(vid_hdr->vol_id); | 807 | vol_id = be32_to_cpu(vid_hdr->vol_id); |
791 | lnum = be32_to_cpu(vid_hdr->lnum); | 808 | lnum = be32_to_cpu(vid_hdr->lnum); |
792 | 809 | ||
793 | err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr); | 810 | err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vidb); |
794 | if (err) { | 811 | if (err) { |
795 | if (err == MOVE_CANCEL_RACE) { | 812 | if (err == MOVE_CANCEL_RACE) { |
796 | /* | 813 | /* |
@@ -815,6 +832,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, | |||
815 | * Target PEB had bit-flips or write error - torture it. | 832 | * Target PEB had bit-flips or write error - torture it. |
816 | */ | 833 | */ |
817 | torture = 1; | 834 | torture = 1; |
835 | keep = 1; | ||
818 | goto out_not_moved; | 836 | goto out_not_moved; |
819 | } | 837 | } |
820 | 838 | ||
@@ -847,7 +865,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, | |||
847 | if (scrubbing) | 865 | if (scrubbing) |
848 | ubi_msg(ubi, "scrubbed PEB %d (LEB %d:%d), data moved to PEB %d", | 866 | ubi_msg(ubi, "scrubbed PEB %d (LEB %d:%d), data moved to PEB %d", |
849 | e1->pnum, vol_id, lnum, e2->pnum); | 867 | e1->pnum, vol_id, lnum, e2->pnum); |
850 | ubi_free_vid_hdr(ubi, vid_hdr); | 868 | ubi_free_vid_buf(vidb); |
851 | 869 | ||
852 | spin_lock(&ubi->wl_lock); | 870 | spin_lock(&ubi->wl_lock); |
853 | if (!ubi->move_to_put) { | 871 | if (!ubi->move_to_put) { |
@@ -879,6 +897,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, | |||
879 | 897 | ||
880 | dbg_wl("done"); | 898 | dbg_wl("done"); |
881 | mutex_unlock(&ubi->move_mutex); | 899 | mutex_unlock(&ubi->move_mutex); |
900 | up_read(&ubi->fm_eba_sem); | ||
882 | return 0; | 901 | return 0; |
883 | 902 | ||
884 | /* | 903 | /* |
@@ -901,7 +920,7 @@ out_not_moved: | |||
901 | ubi->erroneous_peb_count += 1; | 920 | ubi->erroneous_peb_count += 1; |
902 | } else if (scrubbing) | 921 | } else if (scrubbing) |
903 | wl_tree_add(e1, &ubi->scrub); | 922 | wl_tree_add(e1, &ubi->scrub); |
904 | else | 923 | else if (keep) |
905 | wl_tree_add(e1, &ubi->used); | 924 | wl_tree_add(e1, &ubi->used); |
906 | if (dst_leb_clean) { | 925 | if (dst_leb_clean) { |
907 | wl_tree_add(e2, &ubi->free); | 926 | wl_tree_add(e2, &ubi->free); |
@@ -913,7 +932,7 @@ out_not_moved: | |||
913 | ubi->wl_scheduled = 0; | 932 | ubi->wl_scheduled = 0; |
914 | spin_unlock(&ubi->wl_lock); | 933 | spin_unlock(&ubi->wl_lock); |
915 | 934 | ||
916 | ubi_free_vid_hdr(ubi, vid_hdr); | 935 | ubi_free_vid_buf(vidb); |
917 | if (dst_leb_clean) { | 936 | if (dst_leb_clean) { |
918 | ensure_wear_leveling(ubi, 1); | 937 | ensure_wear_leveling(ubi, 1); |
919 | } else { | 938 | } else { |
@@ -922,7 +941,14 @@ out_not_moved: | |||
922 | goto out_ro; | 941 | goto out_ro; |
923 | } | 942 | } |
924 | 943 | ||
944 | if (erase) { | ||
945 | err = do_sync_erase(ubi, e1, vol_id, lnum, 1); | ||
946 | if (err) | ||
947 | goto out_ro; | ||
948 | } | ||
949 | |||
925 | mutex_unlock(&ubi->move_mutex); | 950 | mutex_unlock(&ubi->move_mutex); |
951 | up_read(&ubi->fm_eba_sem); | ||
926 | return 0; | 952 | return 0; |
927 | 953 | ||
928 | out_error: | 954 | out_error: |
@@ -937,13 +963,14 @@ out_error: | |||
937 | ubi->move_to_put = ubi->wl_scheduled = 0; | 963 | ubi->move_to_put = ubi->wl_scheduled = 0; |
938 | spin_unlock(&ubi->wl_lock); | 964 | spin_unlock(&ubi->wl_lock); |
939 | 965 | ||
940 | ubi_free_vid_hdr(ubi, vid_hdr); | 966 | ubi_free_vid_buf(vidb); |
941 | wl_entry_destroy(ubi, e1); | 967 | wl_entry_destroy(ubi, e1); |
942 | wl_entry_destroy(ubi, e2); | 968 | wl_entry_destroy(ubi, e2); |
943 | 969 | ||
944 | out_ro: | 970 | out_ro: |
945 | ubi_ro_mode(ubi); | 971 | ubi_ro_mode(ubi); |
946 | mutex_unlock(&ubi->move_mutex); | 972 | mutex_unlock(&ubi->move_mutex); |
973 | up_read(&ubi->fm_eba_sem); | ||
947 | ubi_assert(err != 0); | 974 | ubi_assert(err != 0); |
948 | return err < 0 ? err : -EIO; | 975 | return err < 0 ? err : -EIO; |
949 | 976 | ||
@@ -951,7 +978,8 @@ out_cancel: | |||
951 | ubi->wl_scheduled = 0; | 978 | ubi->wl_scheduled = 0; |
952 | spin_unlock(&ubi->wl_lock); | 979 | spin_unlock(&ubi->wl_lock); |
953 | mutex_unlock(&ubi->move_mutex); | 980 | mutex_unlock(&ubi->move_mutex); |
954 | ubi_free_vid_hdr(ubi, vid_hdr); | 981 | up_read(&ubi->fm_eba_sem); |
982 | ubi_free_vid_buf(vidb); | ||
955 | return 0; | 983 | return 0; |
956 | } | 984 | } |
957 | 985 | ||
@@ -1073,7 +1101,7 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk) | |||
1073 | int err1; | 1101 | int err1; |
1074 | 1102 | ||
1075 | /* Re-schedule the LEB for erasure */ | 1103 | /* Re-schedule the LEB for erasure */ |
1076 | err1 = schedule_erase(ubi, e, vol_id, lnum, 0); | 1104 | err1 = schedule_erase(ubi, e, vol_id, lnum, 0, false); |
1077 | if (err1) { | 1105 | if (err1) { |
1078 | wl_entry_destroy(ubi, e); | 1106 | wl_entry_destroy(ubi, e); |
1079 | err = err1; | 1107 | err = err1; |
@@ -1254,7 +1282,7 @@ retry: | |||
1254 | } | 1282 | } |
1255 | spin_unlock(&ubi->wl_lock); | 1283 | spin_unlock(&ubi->wl_lock); |
1256 | 1284 | ||
1257 | err = schedule_erase(ubi, e, vol_id, lnum, torture); | 1285 | err = schedule_erase(ubi, e, vol_id, lnum, torture, false); |
1258 | if (err) { | 1286 | if (err) { |
1259 | spin_lock(&ubi->wl_lock); | 1287 | spin_lock(&ubi->wl_lock); |
1260 | wl_tree_add(e, &ubi->used); | 1288 | wl_tree_add(e, &ubi->used); |
@@ -1545,7 +1573,7 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) | |||
1545 | e->pnum = aeb->pnum; | 1573 | e->pnum = aeb->pnum; |
1546 | e->ec = aeb->ec; | 1574 | e->ec = aeb->ec; |
1547 | ubi->lookuptbl[e->pnum] = e; | 1575 | ubi->lookuptbl[e->pnum] = e; |
1548 | if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) { | 1576 | if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false)) { |
1549 | wl_entry_destroy(ubi, e); | 1577 | wl_entry_destroy(ubi, e); |
1550 | goto out_free; | 1578 | goto out_free; |
1551 | } | 1579 | } |
@@ -1624,7 +1652,7 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) | |||
1624 | e->ec = aeb->ec; | 1652 | e->ec = aeb->ec; |
1625 | ubi_assert(!ubi->lookuptbl[e->pnum]); | 1653 | ubi_assert(!ubi->lookuptbl[e->pnum]); |
1626 | ubi->lookuptbl[e->pnum] = e; | 1654 | ubi->lookuptbl[e->pnum] = e; |
1627 | if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) { | 1655 | if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false)) { |
1628 | wl_entry_destroy(ubi, e); | 1656 | wl_entry_destroy(ubi, e); |
1629 | goto out_free; | 1657 | goto out_free; |
1630 | } | 1658 | } |
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c index 64902702b17d..c8f60df2733e 100644 --- a/fs/ubifs/dir.c +++ b/fs/ubifs/dir.c | |||
@@ -301,6 +301,95 @@ out_budg: | |||
301 | return err; | 301 | return err; |
302 | } | 302 | } |
303 | 303 | ||
304 | static int do_tmpfile(struct inode *dir, struct dentry *dentry, | ||
305 | umode_t mode, struct inode **whiteout) | ||
306 | { | ||
307 | struct inode *inode; | ||
308 | struct ubifs_info *c = dir->i_sb->s_fs_info; | ||
309 | struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1}; | ||
310 | struct ubifs_budget_req ino_req = { .dirtied_ino = 1 }; | ||
311 | struct ubifs_inode *ui, *dir_ui = ubifs_inode(dir); | ||
312 | int err, instantiated = 0; | ||
313 | |||
314 | /* | ||
315 | * Budget request settings: new dirty inode, new direntry, | ||
316 | * budget for dirtied inode will be released via writeback. | ||
317 | */ | ||
318 | |||
319 | dbg_gen("dent '%pd', mode %#hx in dir ino %lu", | ||
320 | dentry, mode, dir->i_ino); | ||
321 | |||
322 | err = ubifs_budget_space(c, &req); | ||
323 | if (err) | ||
324 | return err; | ||
325 | |||
326 | err = ubifs_budget_space(c, &ino_req); | ||
327 | if (err) { | ||
328 | ubifs_release_budget(c, &req); | ||
329 | return err; | ||
330 | } | ||
331 | |||
332 | inode = ubifs_new_inode(c, dir, mode); | ||
333 | if (IS_ERR(inode)) { | ||
334 | err = PTR_ERR(inode); | ||
335 | goto out_budg; | ||
336 | } | ||
337 | ui = ubifs_inode(inode); | ||
338 | |||
339 | if (whiteout) { | ||
340 | init_special_inode(inode, inode->i_mode, WHITEOUT_DEV); | ||
341 | ubifs_assert(inode->i_op == &ubifs_file_inode_operations); | ||
342 | } | ||
343 | |||
344 | err = ubifs_init_security(dir, inode, &dentry->d_name); | ||
345 | if (err) | ||
346 | goto out_inode; | ||
347 | |||
348 | mutex_lock(&ui->ui_mutex); | ||
349 | insert_inode_hash(inode); | ||
350 | |||
351 | if (whiteout) { | ||
352 | mark_inode_dirty(inode); | ||
353 | drop_nlink(inode); | ||
354 | *whiteout = inode; | ||
355 | } else { | ||
356 | d_tmpfile(dentry, inode); | ||
357 | } | ||
358 | ubifs_assert(ui->dirty); | ||
359 | |||
360 | instantiated = 1; | ||
361 | mutex_unlock(&ui->ui_mutex); | ||
362 | |||
363 | mutex_lock(&dir_ui->ui_mutex); | ||
364 | err = ubifs_jnl_update(c, dir, &dentry->d_name, inode, 1, 0); | ||
365 | if (err) | ||
366 | goto out_cancel; | ||
367 | mutex_unlock(&dir_ui->ui_mutex); | ||
368 | |||
369 | ubifs_release_budget(c, &req); | ||
370 | |||
371 | return 0; | ||
372 | |||
373 | out_cancel: | ||
374 | mutex_unlock(&dir_ui->ui_mutex); | ||
375 | out_inode: | ||
376 | make_bad_inode(inode); | ||
377 | if (!instantiated) | ||
378 | iput(inode); | ||
379 | out_budg: | ||
380 | ubifs_release_budget(c, &req); | ||
381 | if (!instantiated) | ||
382 | ubifs_release_budget(c, &ino_req); | ||
383 | ubifs_err(c, "cannot create temporary file, error %d", err); | ||
384 | return err; | ||
385 | } | ||
386 | |||
387 | static int ubifs_tmpfile(struct inode *dir, struct dentry *dentry, | ||
388 | umode_t mode) | ||
389 | { | ||
390 | return do_tmpfile(dir, dentry, mode, NULL); | ||
391 | } | ||
392 | |||
304 | /** | 393 | /** |
305 | * vfs_dent_type - get VFS directory entry type. | 394 | * vfs_dent_type - get VFS directory entry type. |
306 | * @type: UBIFS directory entry type | 395 | * @type: UBIFS directory entry type |
@@ -927,37 +1016,43 @@ out_budg: | |||
927 | } | 1016 | } |
928 | 1017 | ||
929 | /** | 1018 | /** |
930 | * lock_3_inodes - a wrapper for locking three UBIFS inodes. | 1019 | * lock_4_inodes - a wrapper for locking three UBIFS inodes. |
931 | * @inode1: first inode | 1020 | * @inode1: first inode |
932 | * @inode2: second inode | 1021 | * @inode2: second inode |
933 | * @inode3: third inode | 1022 | * @inode3: third inode |
1023 | * @inode4: fouth inode | ||
934 | * | 1024 | * |
935 | * This function is used for 'ubifs_rename()' and @inode1 may be the same as | 1025 | * This function is used for 'ubifs_rename()' and @inode1 may be the same as |
936 | * @inode2 whereas @inode3 may be %NULL. | 1026 | * @inode2 whereas @inode3 and @inode4 may be %NULL. |
937 | * | 1027 | * |
938 | * We do not implement any tricks to guarantee strict lock ordering, because | 1028 | * We do not implement any tricks to guarantee strict lock ordering, because |
939 | * VFS has already done it for us on the @i_mutex. So this is just a simple | 1029 | * VFS has already done it for us on the @i_mutex. So this is just a simple |
940 | * wrapper function. | 1030 | * wrapper function. |
941 | */ | 1031 | */ |
942 | static void lock_3_inodes(struct inode *inode1, struct inode *inode2, | 1032 | static void lock_4_inodes(struct inode *inode1, struct inode *inode2, |
943 | struct inode *inode3) | 1033 | struct inode *inode3, struct inode *inode4) |
944 | { | 1034 | { |
945 | mutex_lock_nested(&ubifs_inode(inode1)->ui_mutex, WB_MUTEX_1); | 1035 | mutex_lock_nested(&ubifs_inode(inode1)->ui_mutex, WB_MUTEX_1); |
946 | if (inode2 != inode1) | 1036 | if (inode2 != inode1) |
947 | mutex_lock_nested(&ubifs_inode(inode2)->ui_mutex, WB_MUTEX_2); | 1037 | mutex_lock_nested(&ubifs_inode(inode2)->ui_mutex, WB_MUTEX_2); |
948 | if (inode3) | 1038 | if (inode3) |
949 | mutex_lock_nested(&ubifs_inode(inode3)->ui_mutex, WB_MUTEX_3); | 1039 | mutex_lock_nested(&ubifs_inode(inode3)->ui_mutex, WB_MUTEX_3); |
1040 | if (inode4) | ||
1041 | mutex_lock_nested(&ubifs_inode(inode4)->ui_mutex, WB_MUTEX_4); | ||
950 | } | 1042 | } |
951 | 1043 | ||
952 | /** | 1044 | /** |
953 | * unlock_3_inodes - a wrapper for unlocking three UBIFS inodes for rename. | 1045 | * unlock_4_inodes - a wrapper for unlocking three UBIFS inodes for rename. |
954 | * @inode1: first inode | 1046 | * @inode1: first inode |
955 | * @inode2: second inode | 1047 | * @inode2: second inode |
956 | * @inode3: third inode | 1048 | * @inode3: third inode |
1049 | * @inode4: fouth inode | ||
957 | */ | 1050 | */ |
958 | static void unlock_3_inodes(struct inode *inode1, struct inode *inode2, | 1051 | static void unlock_4_inodes(struct inode *inode1, struct inode *inode2, |
959 | struct inode *inode3) | 1052 | struct inode *inode3, struct inode *inode4) |
960 | { | 1053 | { |
1054 | if (inode4) | ||
1055 | mutex_unlock(&ubifs_inode(inode4)->ui_mutex); | ||
961 | if (inode3) | 1056 | if (inode3) |
962 | mutex_unlock(&ubifs_inode(inode3)->ui_mutex); | 1057 | mutex_unlock(&ubifs_inode(inode3)->ui_mutex); |
963 | if (inode1 != inode2) | 1058 | if (inode1 != inode2) |
@@ -972,7 +1067,9 @@ static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
972 | struct ubifs_info *c = old_dir->i_sb->s_fs_info; | 1067 | struct ubifs_info *c = old_dir->i_sb->s_fs_info; |
973 | struct inode *old_inode = d_inode(old_dentry); | 1068 | struct inode *old_inode = d_inode(old_dentry); |
974 | struct inode *new_inode = d_inode(new_dentry); | 1069 | struct inode *new_inode = d_inode(new_dentry); |
1070 | struct inode *whiteout = NULL; | ||
975 | struct ubifs_inode *old_inode_ui = ubifs_inode(old_inode); | 1071 | struct ubifs_inode *old_inode_ui = ubifs_inode(old_inode); |
1072 | struct ubifs_inode *whiteout_ui = NULL; | ||
976 | int err, release, sync = 0, move = (new_dir != old_dir); | 1073 | int err, release, sync = 0, move = (new_dir != old_dir); |
977 | int is_dir = S_ISDIR(old_inode->i_mode); | 1074 | int is_dir = S_ISDIR(old_inode->i_mode); |
978 | int unlink = !!new_inode; | 1075 | int unlink = !!new_inode; |
@@ -997,15 +1094,13 @@ static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
997 | * separately. | 1094 | * separately. |
998 | */ | 1095 | */ |
999 | 1096 | ||
1000 | dbg_gen("dent '%pd' ino %lu in dir ino %lu to dent '%pd' in dir ino %lu", | 1097 | dbg_gen("dent '%pd' ino %lu in dir ino %lu to dent '%pd' in dir ino %lu flags 0x%x", |
1001 | old_dentry, old_inode->i_ino, old_dir->i_ino, | 1098 | old_dentry, old_inode->i_ino, old_dir->i_ino, |
1002 | new_dentry, new_dir->i_ino); | 1099 | new_dentry, new_dir->i_ino, flags); |
1003 | ubifs_assert(inode_is_locked(old_dir)); | 1100 | |
1004 | ubifs_assert(inode_is_locked(new_dir)); | ||
1005 | if (unlink) | 1101 | if (unlink) |
1006 | ubifs_assert(inode_is_locked(new_inode)); | 1102 | ubifs_assert(inode_is_locked(new_inode)); |
1007 | 1103 | ||
1008 | |||
1009 | if (unlink && is_dir) { | 1104 | if (unlink && is_dir) { |
1010 | err = check_dir_empty(c, new_inode); | 1105 | err = check_dir_empty(c, new_inode); |
1011 | if (err) | 1106 | if (err) |
@@ -1021,7 +1116,32 @@ static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
1021 | return err; | 1116 | return err; |
1022 | } | 1117 | } |
1023 | 1118 | ||
1024 | lock_3_inodes(old_dir, new_dir, new_inode); | 1119 | if (flags & RENAME_WHITEOUT) { |
1120 | union ubifs_dev_desc *dev = NULL; | ||
1121 | |||
1122 | dev = kmalloc(sizeof(union ubifs_dev_desc), GFP_NOFS); | ||
1123 | if (!dev) { | ||
1124 | ubifs_release_budget(c, &req); | ||
1125 | ubifs_release_budget(c, &ino_req); | ||
1126 | return -ENOMEM; | ||
1127 | } | ||
1128 | |||
1129 | err = do_tmpfile(old_dir, old_dentry, S_IFCHR | WHITEOUT_MODE, &whiteout); | ||
1130 | if (err) { | ||
1131 | ubifs_release_budget(c, &req); | ||
1132 | ubifs_release_budget(c, &ino_req); | ||
1133 | kfree(dev); | ||
1134 | return err; | ||
1135 | } | ||
1136 | |||
1137 | whiteout->i_state |= I_LINKABLE; | ||
1138 | whiteout_ui = ubifs_inode(whiteout); | ||
1139 | whiteout_ui->data = dev; | ||
1140 | whiteout_ui->data_len = ubifs_encode_dev(dev, MKDEV(0, 0)); | ||
1141 | ubifs_assert(!whiteout_ui->dirty); | ||
1142 | } | ||
1143 | |||
1144 | lock_4_inodes(old_dir, new_dir, new_inode, whiteout); | ||
1025 | 1145 | ||
1026 | /* | 1146 | /* |
1027 | * Like most other Unix systems, set the @i_ctime for inodes on a | 1147 | * Like most other Unix systems, set the @i_ctime for inodes on a |
@@ -1091,12 +1211,34 @@ static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
1091 | if (unlink && IS_SYNC(new_inode)) | 1211 | if (unlink && IS_SYNC(new_inode)) |
1092 | sync = 1; | 1212 | sync = 1; |
1093 | } | 1213 | } |
1094 | err = ubifs_jnl_rename(c, old_dir, old_dentry, new_dir, new_dentry, | 1214 | |
1215 | if (whiteout) { | ||
1216 | struct ubifs_budget_req wht_req = { .dirtied_ino = 1, | ||
1217 | .dirtied_ino_d = \ | ||
1218 | ALIGN(ubifs_inode(whiteout)->data_len, 8) }; | ||
1219 | |||
1220 | err = ubifs_budget_space(c, &wht_req); | ||
1221 | if (err) { | ||
1222 | ubifs_release_budget(c, &req); | ||
1223 | ubifs_release_budget(c, &ino_req); | ||
1224 | kfree(whiteout_ui->data); | ||
1225 | whiteout_ui->data_len = 0; | ||
1226 | iput(whiteout); | ||
1227 | return err; | ||
1228 | } | ||
1229 | |||
1230 | inc_nlink(whiteout); | ||
1231 | mark_inode_dirty(whiteout); | ||
1232 | whiteout->i_state &= ~I_LINKABLE; | ||
1233 | iput(whiteout); | ||
1234 | } | ||
1235 | |||
1236 | err = ubifs_jnl_rename(c, old_dir, old_dentry, new_dir, new_dentry, whiteout, | ||
1095 | sync); | 1237 | sync); |
1096 | if (err) | 1238 | if (err) |
1097 | goto out_cancel; | 1239 | goto out_cancel; |
1098 | 1240 | ||
1099 | unlock_3_inodes(old_dir, new_dir, new_inode); | 1241 | unlock_4_inodes(old_dir, new_dir, new_inode, whiteout); |
1100 | ubifs_release_budget(c, &req); | 1242 | ubifs_release_budget(c, &req); |
1101 | 1243 | ||
1102 | mutex_lock(&old_inode_ui->ui_mutex); | 1244 | mutex_lock(&old_inode_ui->ui_mutex); |
@@ -1129,12 +1271,74 @@ out_cancel: | |||
1129 | inc_nlink(old_dir); | 1271 | inc_nlink(old_dir); |
1130 | } | 1272 | } |
1131 | } | 1273 | } |
1132 | unlock_3_inodes(old_dir, new_dir, new_inode); | 1274 | if (whiteout) { |
1275 | drop_nlink(whiteout); | ||
1276 | iput(whiteout); | ||
1277 | } | ||
1278 | unlock_4_inodes(old_dir, new_dir, new_inode, whiteout); | ||
1133 | ubifs_release_budget(c, &ino_req); | 1279 | ubifs_release_budget(c, &ino_req); |
1134 | ubifs_release_budget(c, &req); | 1280 | ubifs_release_budget(c, &req); |
1135 | return err; | 1281 | return err; |
1136 | } | 1282 | } |
1137 | 1283 | ||
1284 | static int ubifs_xrename(struct inode *old_dir, struct dentry *old_dentry, | ||
1285 | struct inode *new_dir, struct dentry *new_dentry) | ||
1286 | { | ||
1287 | struct ubifs_info *c = old_dir->i_sb->s_fs_info; | ||
1288 | struct ubifs_budget_req req = { .new_dent = 1, .mod_dent = 1, | ||
1289 | .dirtied_ino = 2 }; | ||
1290 | int sync = IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir); | ||
1291 | struct inode *fst_inode = d_inode(old_dentry); | ||
1292 | struct inode *snd_inode = d_inode(new_dentry); | ||
1293 | struct timespec time; | ||
1294 | int err; | ||
1295 | |||
1296 | ubifs_assert(fst_inode && snd_inode); | ||
1297 | |||
1298 | lock_4_inodes(old_dir, new_dir, NULL, NULL); | ||
1299 | |||
1300 | time = ubifs_current_time(old_dir); | ||
1301 | fst_inode->i_ctime = time; | ||
1302 | snd_inode->i_ctime = time; | ||
1303 | old_dir->i_mtime = old_dir->i_ctime = time; | ||
1304 | new_dir->i_mtime = new_dir->i_ctime = time; | ||
1305 | |||
1306 | if (old_dir != new_dir) { | ||
1307 | if (S_ISDIR(fst_inode->i_mode) && !S_ISDIR(snd_inode->i_mode)) { | ||
1308 | inc_nlink(new_dir); | ||
1309 | drop_nlink(old_dir); | ||
1310 | } | ||
1311 | else if (!S_ISDIR(fst_inode->i_mode) && S_ISDIR(snd_inode->i_mode)) { | ||
1312 | drop_nlink(new_dir); | ||
1313 | inc_nlink(old_dir); | ||
1314 | } | ||
1315 | } | ||
1316 | |||
1317 | err = ubifs_jnl_xrename(c, old_dir, old_dentry, new_dir, new_dentry, | ||
1318 | sync); | ||
1319 | |||
1320 | unlock_4_inodes(old_dir, new_dir, NULL, NULL); | ||
1321 | ubifs_release_budget(c, &req); | ||
1322 | |||
1323 | return err; | ||
1324 | } | ||
1325 | |||
1326 | static int ubifs_rename2(struct inode *old_dir, struct dentry *old_dentry, | ||
1327 | struct inode *new_dir, struct dentry *new_dentry, | ||
1328 | unsigned int flags) | ||
1329 | { | ||
1330 | if (flags & ~(RENAME_NOREPLACE | RENAME_WHITEOUT | RENAME_EXCHANGE)) | ||
1331 | return -EINVAL; | ||
1332 | |||
1333 | ubifs_assert(inode_is_locked(old_dir)); | ||
1334 | ubifs_assert(inode_is_locked(new_dir)); | ||
1335 | |||
1336 | if (flags & RENAME_EXCHANGE) | ||
1337 | return ubifs_xrename(old_dir, old_dentry, new_dir, new_dentry); | ||
1338 | |||
1339 | return ubifs_rename(old_dir, old_dentry, new_dir, new_dentry, flags); | ||
1340 | } | ||
1341 | |||
1138 | int ubifs_getattr(struct vfsmount *mnt, struct dentry *dentry, | 1342 | int ubifs_getattr(struct vfsmount *mnt, struct dentry *dentry, |
1139 | struct kstat *stat) | 1343 | struct kstat *stat) |
1140 | { | 1344 | { |
@@ -1183,13 +1387,14 @@ const struct inode_operations ubifs_dir_inode_operations = { | |||
1183 | .mkdir = ubifs_mkdir, | 1387 | .mkdir = ubifs_mkdir, |
1184 | .rmdir = ubifs_rmdir, | 1388 | .rmdir = ubifs_rmdir, |
1185 | .mknod = ubifs_mknod, | 1389 | .mknod = ubifs_mknod, |
1186 | .rename = ubifs_rename, | 1390 | .rename = ubifs_rename2, |
1187 | .setattr = ubifs_setattr, | 1391 | .setattr = ubifs_setattr, |
1188 | .getattr = ubifs_getattr, | 1392 | .getattr = ubifs_getattr, |
1189 | .listxattr = ubifs_listxattr, | 1393 | .listxattr = ubifs_listxattr, |
1190 | #ifdef CONFIG_UBIFS_ATIME_SUPPORT | 1394 | #ifdef CONFIG_UBIFS_ATIME_SUPPORT |
1191 | .update_time = ubifs_update_time, | 1395 | .update_time = ubifs_update_time, |
1192 | #endif | 1396 | #endif |
1397 | .tmpfile = ubifs_tmpfile, | ||
1193 | }; | 1398 | }; |
1194 | 1399 | ||
1195 | const struct file_operations ubifs_dir_operations = { | 1400 | const struct file_operations ubifs_dir_operations = { |
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index a746982fbcda..b4fbeefba246 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c | |||
@@ -1397,7 +1397,7 @@ int ubifs_update_time(struct inode *inode, struct timespec *time, | |||
1397 | #endif | 1397 | #endif |
1398 | 1398 | ||
1399 | /** | 1399 | /** |
1400 | * update_ctime - update mtime and ctime of an inode. | 1400 | * update_mctime - update mtime and ctime of an inode. |
1401 | * @inode: inode to update | 1401 | * @inode: inode to update |
1402 | * | 1402 | * |
1403 | * This function updates mtime and ctime of the inode if it is not equivalent to | 1403 | * This function updates mtime and ctime of the inode if it is not equivalent to |
diff --git a/fs/ubifs/gc.c b/fs/ubifs/gc.c index 821b34816976..e845c64b6ce1 100644 --- a/fs/ubifs/gc.c +++ b/fs/ubifs/gc.c | |||
@@ -113,7 +113,7 @@ static int switch_gc_head(struct ubifs_info *c) | |||
113 | * data_nodes_cmp - compare 2 data nodes. | 113 | * data_nodes_cmp - compare 2 data nodes. |
114 | * @priv: UBIFS file-system description object | 114 | * @priv: UBIFS file-system description object |
115 | * @a: first data node | 115 | * @a: first data node |
116 | * @a: second data node | 116 | * @b: second data node |
117 | * | 117 | * |
118 | * This function compares data nodes @a and @b. Returns %1 if @a has greater | 118 | * This function compares data nodes @a and @b. Returns %1 if @a has greater |
119 | * inode or block number, and %-1 otherwise. | 119 | * inode or block number, and %-1 otherwise. |
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c index 0b9da5b6e0f9..91bc76dc559e 100644 --- a/fs/ubifs/journal.c +++ b/fs/ubifs/journal.c | |||
@@ -908,6 +908,147 @@ int ubifs_jnl_delete_inode(struct ubifs_info *c, const struct inode *inode) | |||
908 | } | 908 | } |
909 | 909 | ||
910 | /** | 910 | /** |
911 | * ubifs_jnl_xrename - cross rename two directory entries. | ||
912 | * @c: UBIFS file-system description object | ||
913 | * @fst_dir: parent inode of 1st directory entry to exchange | ||
914 | * @fst_dentry: 1st directory entry to exchange | ||
915 | * @snd_dir: parent inode of 2nd directory entry to exchange | ||
916 | * @snd_dentry: 2nd directory entry to exchange | ||
917 | * @sync: non-zero if the write-buffer has to be synchronized | ||
918 | * | ||
919 | * This function implements the cross rename operation which may involve | ||
920 | * writing 2 inodes and 2 directory entries. It marks the written inodes as clean | ||
921 | * and returns zero on success. In case of failure, a negative error code is | ||
922 | * returned. | ||
923 | */ | ||
924 | int ubifs_jnl_xrename(struct ubifs_info *c, const struct inode *fst_dir, | ||
925 | const struct dentry *fst_dentry, | ||
926 | const struct inode *snd_dir, | ||
927 | const struct dentry *snd_dentry, int sync) | ||
928 | { | ||
929 | union ubifs_key key; | ||
930 | struct ubifs_dent_node *dent1, *dent2; | ||
931 | int err, dlen1, dlen2, lnum, offs, len, plen = UBIFS_INO_NODE_SZ; | ||
932 | int aligned_dlen1, aligned_dlen2; | ||
933 | int twoparents = (fst_dir != snd_dir); | ||
934 | const struct inode *fst_inode = d_inode(fst_dentry); | ||
935 | const struct inode *snd_inode = d_inode(snd_dentry); | ||
936 | void *p; | ||
937 | |||
938 | dbg_jnl("dent '%pd' in dir ino %lu between dent '%pd' in dir ino %lu", | ||
939 | fst_dentry, fst_dir->i_ino, snd_dentry, snd_dir->i_ino); | ||
940 | |||
941 | ubifs_assert(ubifs_inode(fst_dir)->data_len == 0); | ||
942 | ubifs_assert(ubifs_inode(snd_dir)->data_len == 0); | ||
943 | ubifs_assert(mutex_is_locked(&ubifs_inode(fst_dir)->ui_mutex)); | ||
944 | ubifs_assert(mutex_is_locked(&ubifs_inode(snd_dir)->ui_mutex)); | ||
945 | |||
946 | dlen1 = UBIFS_DENT_NODE_SZ + snd_dentry->d_name.len + 1; | ||
947 | dlen2 = UBIFS_DENT_NODE_SZ + fst_dentry->d_name.len + 1; | ||
948 | aligned_dlen1 = ALIGN(dlen1, 8); | ||
949 | aligned_dlen2 = ALIGN(dlen2, 8); | ||
950 | |||
951 | len = aligned_dlen1 + aligned_dlen2 + ALIGN(plen, 8); | ||
952 | if (twoparents) | ||
953 | len += plen; | ||
954 | |||
955 | dent1 = kmalloc(len, GFP_NOFS); | ||
956 | if (!dent1) | ||
957 | return -ENOMEM; | ||
958 | |||
959 | /* Make reservation before allocating sequence numbers */ | ||
960 | err = make_reservation(c, BASEHD, len); | ||
961 | if (err) | ||
962 | goto out_free; | ||
963 | |||
964 | /* Make new dent for 1st entry */ | ||
965 | dent1->ch.node_type = UBIFS_DENT_NODE; | ||
966 | dent_key_init_flash(c, &dent1->key, snd_dir->i_ino, &snd_dentry->d_name); | ||
967 | dent1->inum = cpu_to_le64(fst_inode->i_ino); | ||
968 | dent1->type = get_dent_type(fst_inode->i_mode); | ||
969 | dent1->nlen = cpu_to_le16(snd_dentry->d_name.len); | ||
970 | memcpy(dent1->name, snd_dentry->d_name.name, snd_dentry->d_name.len); | ||
971 | dent1->name[snd_dentry->d_name.len] = '\0'; | ||
972 | zero_dent_node_unused(dent1); | ||
973 | ubifs_prep_grp_node(c, dent1, dlen1, 0); | ||
974 | |||
975 | /* Make new dent for 2nd entry */ | ||
976 | dent2 = (void *)dent1 + aligned_dlen1; | ||
977 | dent2->ch.node_type = UBIFS_DENT_NODE; | ||
978 | dent_key_init_flash(c, &dent2->key, fst_dir->i_ino, &fst_dentry->d_name); | ||
979 | dent2->inum = cpu_to_le64(snd_inode->i_ino); | ||
980 | dent2->type = get_dent_type(snd_inode->i_mode); | ||
981 | dent2->nlen = cpu_to_le16(fst_dentry->d_name.len); | ||
982 | memcpy(dent2->name, fst_dentry->d_name.name, fst_dentry->d_name.len); | ||
983 | dent2->name[fst_dentry->d_name.len] = '\0'; | ||
984 | zero_dent_node_unused(dent2); | ||
985 | ubifs_prep_grp_node(c, dent2, dlen2, 0); | ||
986 | |||
987 | p = (void *)dent2 + aligned_dlen2; | ||
988 | if (!twoparents) | ||
989 | pack_inode(c, p, fst_dir, 1); | ||
990 | else { | ||
991 | pack_inode(c, p, fst_dir, 0); | ||
992 | p += ALIGN(plen, 8); | ||
993 | pack_inode(c, p, snd_dir, 1); | ||
994 | } | ||
995 | |||
996 | err = write_head(c, BASEHD, dent1, len, &lnum, &offs, sync); | ||
997 | if (err) | ||
998 | goto out_release; | ||
999 | if (!sync) { | ||
1000 | struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf; | ||
1001 | |||
1002 | ubifs_wbuf_add_ino_nolock(wbuf, fst_dir->i_ino); | ||
1003 | ubifs_wbuf_add_ino_nolock(wbuf, snd_dir->i_ino); | ||
1004 | } | ||
1005 | release_head(c, BASEHD); | ||
1006 | |||
1007 | dent_key_init(c, &key, snd_dir->i_ino, &snd_dentry->d_name); | ||
1008 | err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen1, &snd_dentry->d_name); | ||
1009 | if (err) | ||
1010 | goto out_ro; | ||
1011 | |||
1012 | offs += aligned_dlen1; | ||
1013 | dent_key_init(c, &key, fst_dir->i_ino, &fst_dentry->d_name); | ||
1014 | err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen2, &fst_dentry->d_name); | ||
1015 | if (err) | ||
1016 | goto out_ro; | ||
1017 | |||
1018 | offs += aligned_dlen2; | ||
1019 | |||
1020 | ino_key_init(c, &key, fst_dir->i_ino); | ||
1021 | err = ubifs_tnc_add(c, &key, lnum, offs, plen); | ||
1022 | if (err) | ||
1023 | goto out_ro; | ||
1024 | |||
1025 | if (twoparents) { | ||
1026 | offs += ALIGN(plen, 8); | ||
1027 | ino_key_init(c, &key, snd_dir->i_ino); | ||
1028 | err = ubifs_tnc_add(c, &key, lnum, offs, plen); | ||
1029 | if (err) | ||
1030 | goto out_ro; | ||
1031 | } | ||
1032 | |||
1033 | finish_reservation(c); | ||
1034 | |||
1035 | mark_inode_clean(c, ubifs_inode(fst_dir)); | ||
1036 | if (twoparents) | ||
1037 | mark_inode_clean(c, ubifs_inode(snd_dir)); | ||
1038 | kfree(dent1); | ||
1039 | return 0; | ||
1040 | |||
1041 | out_release: | ||
1042 | release_head(c, BASEHD); | ||
1043 | out_ro: | ||
1044 | ubifs_ro_mode(c, err); | ||
1045 | finish_reservation(c); | ||
1046 | out_free: | ||
1047 | kfree(dent1); | ||
1048 | return err; | ||
1049 | } | ||
1050 | |||
1051 | /** | ||
911 | * ubifs_jnl_rename - rename a directory entry. | 1052 | * ubifs_jnl_rename - rename a directory entry. |
912 | * @c: UBIFS file-system description object | 1053 | * @c: UBIFS file-system description object |
913 | * @old_dir: parent inode of directory entry to rename | 1054 | * @old_dir: parent inode of directory entry to rename |
@@ -917,14 +1058,15 @@ int ubifs_jnl_delete_inode(struct ubifs_info *c, const struct inode *inode) | |||
917 | * @sync: non-zero if the write-buffer has to be synchronized | 1058 | * @sync: non-zero if the write-buffer has to be synchronized |
918 | * | 1059 | * |
919 | * This function implements the re-name operation which may involve writing up | 1060 | * This function implements the re-name operation which may involve writing up |
920 | * to 3 inodes and 2 directory entries. It marks the written inodes as clean | 1061 | * to 4 inodes and 2 directory entries. It marks the written inodes as clean |
921 | * and returns zero on success. In case of failure, a negative error code is | 1062 | * and returns zero on success. In case of failure, a negative error code is |
922 | * returned. | 1063 | * returned. |
923 | */ | 1064 | */ |
924 | int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir, | 1065 | int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir, |
925 | const struct dentry *old_dentry, | 1066 | const struct dentry *old_dentry, |
926 | const struct inode *new_dir, | 1067 | const struct inode *new_dir, |
927 | const struct dentry *new_dentry, int sync) | 1068 | const struct dentry *new_dentry, |
1069 | const struct inode *whiteout, int sync) | ||
928 | { | 1070 | { |
929 | void *p; | 1071 | void *p; |
930 | union ubifs_key key; | 1072 | union ubifs_key key; |
@@ -958,7 +1100,7 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir, | |||
958 | aligned_dlen1 = ALIGN(dlen1, 8); | 1100 | aligned_dlen1 = ALIGN(dlen1, 8); |
959 | aligned_dlen2 = ALIGN(dlen2, 8); | 1101 | aligned_dlen2 = ALIGN(dlen2, 8); |
960 | len = aligned_dlen1 + aligned_dlen2 + ALIGN(ilen, 8) + ALIGN(plen, 8); | 1102 | len = aligned_dlen1 + aligned_dlen2 + ALIGN(ilen, 8) + ALIGN(plen, 8); |
961 | if (old_dir != new_dir) | 1103 | if (move) |
962 | len += plen; | 1104 | len += plen; |
963 | dent = kmalloc(len, GFP_NOFS); | 1105 | dent = kmalloc(len, GFP_NOFS); |
964 | if (!dent) | 1106 | if (!dent) |
@@ -980,13 +1122,19 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir, | |||
980 | zero_dent_node_unused(dent); | 1122 | zero_dent_node_unused(dent); |
981 | ubifs_prep_grp_node(c, dent, dlen1, 0); | 1123 | ubifs_prep_grp_node(c, dent, dlen1, 0); |
982 | 1124 | ||
983 | /* Make deletion dent */ | ||
984 | dent2 = (void *)dent + aligned_dlen1; | 1125 | dent2 = (void *)dent + aligned_dlen1; |
985 | dent2->ch.node_type = UBIFS_DENT_NODE; | 1126 | dent2->ch.node_type = UBIFS_DENT_NODE; |
986 | dent_key_init_flash(c, &dent2->key, old_dir->i_ino, | 1127 | dent_key_init_flash(c, &dent2->key, old_dir->i_ino, |
987 | &old_dentry->d_name); | 1128 | &old_dentry->d_name); |
988 | dent2->inum = 0; | 1129 | |
989 | dent2->type = DT_UNKNOWN; | 1130 | if (whiteout) { |
1131 | dent2->inum = cpu_to_le64(whiteout->i_ino); | ||
1132 | dent2->type = get_dent_type(whiteout->i_mode); | ||
1133 | } else { | ||
1134 | /* Make deletion dent */ | ||
1135 | dent2->inum = 0; | ||
1136 | dent2->type = DT_UNKNOWN; | ||
1137 | } | ||
990 | dent2->nlen = cpu_to_le16(old_dentry->d_name.len); | 1138 | dent2->nlen = cpu_to_le16(old_dentry->d_name.len); |
991 | memcpy(dent2->name, old_dentry->d_name.name, old_dentry->d_name.len); | 1139 | memcpy(dent2->name, old_dentry->d_name.name, old_dentry->d_name.len); |
992 | dent2->name[old_dentry->d_name.len] = '\0'; | 1140 | dent2->name[old_dentry->d_name.len] = '\0'; |
@@ -1035,16 +1183,26 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir, | |||
1035 | if (err) | 1183 | if (err) |
1036 | goto out_ro; | 1184 | goto out_ro; |
1037 | 1185 | ||
1038 | err = ubifs_add_dirt(c, lnum, dlen2); | 1186 | offs += aligned_dlen1; |
1039 | if (err) | 1187 | if (whiteout) { |
1040 | goto out_ro; | 1188 | dent_key_init(c, &key, old_dir->i_ino, &old_dentry->d_name); |
1189 | err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen2, &old_dentry->d_name); | ||
1190 | if (err) | ||
1191 | goto out_ro; | ||
1041 | 1192 | ||
1042 | dent_key_init(c, &key, old_dir->i_ino, &old_dentry->d_name); | 1193 | ubifs_delete_orphan(c, whiteout->i_ino); |
1043 | err = ubifs_tnc_remove_nm(c, &key, &old_dentry->d_name); | 1194 | } else { |
1044 | if (err) | 1195 | err = ubifs_add_dirt(c, lnum, dlen2); |
1045 | goto out_ro; | 1196 | if (err) |
1197 | goto out_ro; | ||
1198 | |||
1199 | dent_key_init(c, &key, old_dir->i_ino, &old_dentry->d_name); | ||
1200 | err = ubifs_tnc_remove_nm(c, &key, &old_dentry->d_name); | ||
1201 | if (err) | ||
1202 | goto out_ro; | ||
1203 | } | ||
1046 | 1204 | ||
1047 | offs += aligned_dlen1 + aligned_dlen2; | 1205 | offs += aligned_dlen2; |
1048 | if (new_inode) { | 1206 | if (new_inode) { |
1049 | ino_key_init(c, &key, new_inode->i_ino); | 1207 | ino_key_init(c, &key, new_inode->i_ino); |
1050 | err = ubifs_tnc_add(c, &key, lnum, offs, ilen); | 1208 | err = ubifs_tnc_add(c, &key, lnum, offs, ilen); |
@@ -1058,7 +1216,7 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir, | |||
1058 | if (err) | 1216 | if (err) |
1059 | goto out_ro; | 1217 | goto out_ro; |
1060 | 1218 | ||
1061 | if (old_dir != new_dir) { | 1219 | if (move) { |
1062 | offs += ALIGN(plen, 8); | 1220 | offs += ALIGN(plen, 8); |
1063 | ino_key_init(c, &key, new_dir->i_ino); | 1221 | ino_key_init(c, &key, new_dir->i_ino); |
1064 | err = ubifs_tnc_add(c, &key, lnum, offs, plen); | 1222 | err = ubifs_tnc_add(c, &key, lnum, offs, plen); |
diff --git a/fs/ubifs/lprops.c b/fs/ubifs/lprops.c index a0011aa3a779..6c3a1abd0e22 100644 --- a/fs/ubifs/lprops.c +++ b/fs/ubifs/lprops.c | |||
@@ -636,7 +636,7 @@ const struct ubifs_lprops *ubifs_change_lp(struct ubifs_info *c, | |||
636 | /** | 636 | /** |
637 | * ubifs_get_lp_stats - get lprops statistics. | 637 | * ubifs_get_lp_stats - get lprops statistics. |
638 | * @c: UBIFS file-system description object | 638 | * @c: UBIFS file-system description object |
639 | * @st: return statistics | 639 | * @lst: return statistics |
640 | */ | 640 | */ |
641 | void ubifs_get_lp_stats(struct ubifs_info *c, struct ubifs_lp_stats *lst) | 641 | void ubifs_get_lp_stats(struct ubifs_info *c, struct ubifs_lp_stats *lst) |
642 | { | 642 | { |
diff --git a/fs/ubifs/lpt_commit.c b/fs/ubifs/lpt_commit.c index ce89bdc3eb02..235654c2fe89 100644 --- a/fs/ubifs/lpt_commit.c +++ b/fs/ubifs/lpt_commit.c | |||
@@ -34,7 +34,6 @@ static int dbg_populate_lsave(struct ubifs_info *c); | |||
34 | 34 | ||
35 | /** | 35 | /** |
36 | * first_dirty_cnode - find first dirty cnode. | 36 | * first_dirty_cnode - find first dirty cnode. |
37 | * @c: UBIFS file-system description object | ||
38 | * @nnode: nnode at which to start | 37 | * @nnode: nnode at which to start |
39 | * | 38 | * |
40 | * This function returns the first dirty cnode or %NULL if there is not one. | 39 | * This function returns the first dirty cnode or %NULL if there is not one. |
@@ -1623,7 +1622,6 @@ static int dbg_is_node_dirty(struct ubifs_info *c, int node_type, int lnum, | |||
1623 | * dbg_check_ltab_lnum - check the ltab for a LPT LEB number. | 1622 | * dbg_check_ltab_lnum - check the ltab for a LPT LEB number. |
1624 | * @c: the UBIFS file-system description object | 1623 | * @c: the UBIFS file-system description object |
1625 | * @lnum: LEB number where node was written | 1624 | * @lnum: LEB number where node was written |
1626 | * @offs: offset where node was written | ||
1627 | * | 1625 | * |
1628 | * This function returns %0 on success and a negative error code on failure. | 1626 | * This function returns %0 on success and a negative error code on failure. |
1629 | */ | 1627 | */ |
@@ -1870,7 +1868,7 @@ int dbg_chk_lpt_sz(struct ubifs_info *c, int action, int len) | |||
1870 | } | 1868 | } |
1871 | 1869 | ||
1872 | /** | 1870 | /** |
1873 | * ubifs_dump_lpt_leb - dump an LPT LEB. | 1871 | * dump_lpt_leb - dump an LPT LEB. |
1874 | * @c: UBIFS file-system description object | 1872 | * @c: UBIFS file-system description object |
1875 | * @lnum: LEB number to dump | 1873 | * @lnum: LEB number to dump |
1876 | * | 1874 | * |
diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c index 3ca4540130b5..fb0f44cd1e28 100644 --- a/fs/ubifs/replay.c +++ b/fs/ubifs/replay.c | |||
@@ -267,7 +267,7 @@ static int apply_replay_entry(struct ubifs_info *c, struct replay_entry *r) | |||
267 | * replay_entries_cmp - compare 2 replay entries. | 267 | * replay_entries_cmp - compare 2 replay entries. |
268 | * @priv: UBIFS file-system description object | 268 | * @priv: UBIFS file-system description object |
269 | * @a: first replay entry | 269 | * @a: first replay entry |
270 | * @a: second replay entry | 270 | * @b: second replay entry |
271 | * | 271 | * |
272 | * This is a comparios function for 'list_sort()' which compares 2 replay | 272 | * This is a comparios function for 'list_sort()' which compares 2 replay |
273 | * entries @a and @b by comparing their sequence numer. Returns %1 if @a has | 273 | * entries @a and @b by comparing their sequence numer. Returns %1 if @a has |
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index 4617d459022a..096035eb29d0 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h | |||
@@ -157,6 +157,7 @@ enum { | |||
157 | WB_MUTEX_1 = 0, | 157 | WB_MUTEX_1 = 0, |
158 | WB_MUTEX_2 = 1, | 158 | WB_MUTEX_2 = 1, |
159 | WB_MUTEX_3 = 2, | 159 | WB_MUTEX_3 = 2, |
160 | WB_MUTEX_4 = 3, | ||
160 | }; | 161 | }; |
161 | 162 | ||
162 | /* | 163 | /* |
@@ -1520,10 +1521,15 @@ int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode, | |||
1520 | const union ubifs_key *key, const void *buf, int len); | 1521 | const union ubifs_key *key, const void *buf, int len); |
1521 | int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode); | 1522 | int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode); |
1522 | int ubifs_jnl_delete_inode(struct ubifs_info *c, const struct inode *inode); | 1523 | int ubifs_jnl_delete_inode(struct ubifs_info *c, const struct inode *inode); |
1524 | int ubifs_jnl_xrename(struct ubifs_info *c, const struct inode *fst_dir, | ||
1525 | const struct dentry *fst_dentry, | ||
1526 | const struct inode *snd_dir, | ||
1527 | const struct dentry *snd_dentry, int sync); | ||
1523 | int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir, | 1528 | int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir, |
1524 | const struct dentry *old_dentry, | 1529 | const struct dentry *old_dentry, |
1525 | const struct inode *new_dir, | 1530 | const struct inode *new_dir, |
1526 | const struct dentry *new_dentry, int sync); | 1531 | const struct dentry *new_dentry, |
1532 | const struct inode *whiteout, int sync); | ||
1527 | int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode, | 1533 | int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode, |
1528 | loff_t old_size, loff_t new_size); | 1534 | loff_t old_size, loff_t new_size); |
1529 | int ubifs_jnl_delete_xattr(struct ubifs_info *c, const struct inode *host, | 1535 | int ubifs_jnl_delete_xattr(struct ubifs_info *c, const struct inode *host, |
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c index 11a004114eba..6c2f4d41ed73 100644 --- a/fs/ubifs/xattr.c +++ b/fs/ubifs/xattr.c | |||
@@ -200,6 +200,7 @@ static int change_xattr(struct ubifs_info *c, struct inode *host, | |||
200 | struct ubifs_inode *host_ui = ubifs_inode(host); | 200 | struct ubifs_inode *host_ui = ubifs_inode(host); |
201 | struct ubifs_inode *ui = ubifs_inode(inode); | 201 | struct ubifs_inode *ui = ubifs_inode(inode); |
202 | void *buf = NULL; | 202 | void *buf = NULL; |
203 | int old_size; | ||
203 | struct ubifs_budget_req req = { .dirtied_ino = 2, | 204 | struct ubifs_budget_req req = { .dirtied_ino = 2, |
204 | .dirtied_ino_d = ALIGN(size, 8) + ALIGN(host_ui->data_len, 8) }; | 205 | .dirtied_ino_d = ALIGN(size, 8) + ALIGN(host_ui->data_len, 8) }; |
205 | 206 | ||
@@ -217,12 +218,13 @@ static int change_xattr(struct ubifs_info *c, struct inode *host, | |||
217 | kfree(ui->data); | 218 | kfree(ui->data); |
218 | ui->data = buf; | 219 | ui->data = buf; |
219 | inode->i_size = ui->ui_size = size; | 220 | inode->i_size = ui->ui_size = size; |
221 | old_size = ui->data_len; | ||
220 | ui->data_len = size; | 222 | ui->data_len = size; |
221 | mutex_unlock(&ui->ui_mutex); | 223 | mutex_unlock(&ui->ui_mutex); |
222 | 224 | ||
223 | mutex_lock(&host_ui->ui_mutex); | 225 | mutex_lock(&host_ui->ui_mutex); |
224 | host->i_ctime = ubifs_current_time(host); | 226 | host->i_ctime = ubifs_current_time(host); |
225 | host_ui->xattr_size -= CALC_XATTR_BYTES(ui->data_len); | 227 | host_ui->xattr_size -= CALC_XATTR_BYTES(old_size); |
226 | host_ui->xattr_size += CALC_XATTR_BYTES(size); | 228 | host_ui->xattr_size += CALC_XATTR_BYTES(size); |
227 | 229 | ||
228 | /* | 230 | /* |
@@ -241,7 +243,7 @@ static int change_xattr(struct ubifs_info *c, struct inode *host, | |||
241 | 243 | ||
242 | out_cancel: | 244 | out_cancel: |
243 | host_ui->xattr_size -= CALC_XATTR_BYTES(size); | 245 | host_ui->xattr_size -= CALC_XATTR_BYTES(size); |
244 | host_ui->xattr_size += CALC_XATTR_BYTES(ui->data_len); | 246 | host_ui->xattr_size += CALC_XATTR_BYTES(old_size); |
245 | mutex_unlock(&host_ui->ui_mutex); | 247 | mutex_unlock(&host_ui->ui_mutex); |
246 | make_bad_inode(inode); | 248 | make_bad_inode(inode); |
247 | out_free: | 249 | out_free: |