aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd/ubi/fastmap.c
diff options
context:
space:
mode:
authorBoris Brezillon <boris.brezillon@free-electrons.com>2016-09-16 10:59:18 -0400
committerRichard Weinberger <richard@nod.at>2016-10-02 16:48:14 -0400
commit91f4285fe389a2729efcd5db642d7652d8f27a40 (patch)
treeb2c5374b3457c9a8b955b3a63d3651e2cfb1736b /drivers/mtd/ubi/fastmap.c
parentfcbb6af17bda4b3856a1f4c302da5d0d7bf9a0f9 (diff)
UBI: provide helpers to allocate and free aeb elements
This not only hides the aeb allocation internals (which is always good in case we ever want to change the allocation system), but also helps us factorize the initialization of some common fields (ec and pnum). Signed-off-by: Boris Brezillon <boris.brezillon@free-electrons.com> Signed-off-by: Richard Weinberger <richard@nod.at>
Diffstat (limited to 'drivers/mtd/ubi/fastmap.c')
-rw-r--r--drivers/mtd/ubi/fastmap.c28
1 files changed, 10 insertions, 18 deletions
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index 7a07b8b53081..25e80a749a52 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -145,12 +145,10 @@ static int add_aeb(struct ubi_attach_info *ai, struct list_head *list,
145{ 145{
146 struct ubi_ainf_peb *aeb; 146 struct ubi_ainf_peb *aeb;
147 147
148 aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL); 148 aeb = ubi_alloc_aeb(ai, pnum, ec);
149 if (!aeb) 149 if (!aeb)
150 return -ENOMEM; 150 return -ENOMEM;
151 151
152 aeb->pnum = pnum;
153 aeb->ec = ec;
154 aeb->lnum = -1; 152 aeb->lnum = -1;
155 aeb->scrub = scrub; 153 aeb->scrub = scrub;
156 aeb->copy_flag = aeb->sqnum = 0; 154 aeb->copy_flag = aeb->sqnum = 0;
@@ -276,7 +274,7 @@ static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
276 */ 274 */
277 if (aeb->pnum == new_aeb->pnum) { 275 if (aeb->pnum == new_aeb->pnum) {
278 ubi_assert(aeb->lnum == new_aeb->lnum); 276 ubi_assert(aeb->lnum == new_aeb->lnum);
279 kmem_cache_free(ai->aeb_slab_cache, new_aeb); 277 ubi_free_aeb(ai, new_aeb);
280 278
281 return 0; 279 return 0;
282 } 280 }
@@ -287,13 +285,10 @@ static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
287 285
288 /* new_aeb is newer */ 286 /* new_aeb is newer */
289 if (cmp_res & 1) { 287 if (cmp_res & 1) {
290 victim = kmem_cache_alloc(ai->aeb_slab_cache, 288 victim = ubi_alloc_aeb(ai, aeb->ec, aeb->pnum);
291 GFP_KERNEL);
292 if (!victim) 289 if (!victim)
293 return -ENOMEM; 290 return -ENOMEM;
294 291
295 victim->ec = aeb->ec;
296 victim->pnum = aeb->pnum;
297 list_add_tail(&victim->u.list, &ai->erase); 292 list_add_tail(&victim->u.list, &ai->erase);
298 293
299 if (av->highest_lnum == be32_to_cpu(new_vh->lnum)) 294 if (av->highest_lnum == be32_to_cpu(new_vh->lnum))
@@ -307,7 +302,7 @@ static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
307 aeb->pnum = new_aeb->pnum; 302 aeb->pnum = new_aeb->pnum;
308 aeb->copy_flag = new_vh->copy_flag; 303 aeb->copy_flag = new_vh->copy_flag;
309 aeb->scrub = new_aeb->scrub; 304 aeb->scrub = new_aeb->scrub;
310 kmem_cache_free(ai->aeb_slab_cache, new_aeb); 305 ubi_free_aeb(ai, new_aeb);
311 306
312 /* new_aeb is older */ 307 /* new_aeb is older */
313 } else { 308 } else {
@@ -353,7 +348,7 @@ static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
353 struct ubi_ainf_volume *av; 348 struct ubi_ainf_volume *av;
354 349
355 if (vol_id == UBI_FM_SB_VOLUME_ID || vol_id == UBI_FM_DATA_VOLUME_ID) { 350 if (vol_id == UBI_FM_SB_VOLUME_ID || vol_id == UBI_FM_DATA_VOLUME_ID) {
356 kmem_cache_free(ai->aeb_slab_cache, new_aeb); 351 ubi_free_aeb(ai, new_aeb);
357 352
358 return 0; 353 return 0;
359 } 354 }
@@ -362,7 +357,7 @@ static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
362 av = ubi_find_av(ai, vol_id); 357 av = ubi_find_av(ai, vol_id);
363 if (!av) { 358 if (!av) {
364 ubi_err(ubi, "orphaned volume in fastmap pool!"); 359 ubi_err(ubi, "orphaned volume in fastmap pool!");
365 kmem_cache_free(ai->aeb_slab_cache, new_aeb); 360 ubi_free_aeb(ai, new_aeb);
366 return UBI_BAD_FASTMAP; 361 return UBI_BAD_FASTMAP;
367 } 362 }
368 363
@@ -390,7 +385,7 @@ static void unmap_peb(struct ubi_attach_info *ai, int pnum)
390 if (aeb->pnum == pnum) { 385 if (aeb->pnum == pnum) {
391 rb_erase(&aeb->u.rb, &av->root); 386 rb_erase(&aeb->u.rb, &av->root);
392 av->leb_count--; 387 av->leb_count--;
393 kmem_cache_free(ai->aeb_slab_cache, aeb); 388 ubi_free_aeb(ai, aeb);
394 return; 389 return;
395 } 390 }
396 } 391 }
@@ -485,15 +480,12 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
485 if (err == UBI_IO_BITFLIPS) 480 if (err == UBI_IO_BITFLIPS)
486 scrub = 1; 481 scrub = 1;
487 482
488 new_aeb = kmem_cache_alloc(ai->aeb_slab_cache, 483 new_aeb = ubi_alloc_aeb(ai, pnum, be64_to_cpu(ech->ec));
489 GFP_KERNEL);
490 if (!new_aeb) { 484 if (!new_aeb) {
491 ret = -ENOMEM; 485 ret = -ENOMEM;
492 goto out; 486 goto out;
493 } 487 }
494 488
495 new_aeb->ec = be64_to_cpu(ech->ec);
496 new_aeb->pnum = pnum;
497 new_aeb->lnum = be32_to_cpu(vh->lnum); 489 new_aeb->lnum = be32_to_cpu(vh->lnum);
498 new_aeb->sqnum = be64_to_cpu(vh->sqnum); 490 new_aeb->sqnum = be64_to_cpu(vh->sqnum);
499 new_aeb->copy_flag = vh->copy_flag; 491 new_aeb->copy_flag = vh->copy_flag;
@@ -800,11 +792,11 @@ fail_bad:
800fail: 792fail:
801 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) { 793 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) {
802 list_del(&tmp_aeb->u.list); 794 list_del(&tmp_aeb->u.list);
803 kmem_cache_free(ai->aeb_slab_cache, tmp_aeb); 795 ubi_free_aeb(ai, tmp_aeb);
804 } 796 }
805 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) { 797 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
806 list_del(&tmp_aeb->u.list); 798 list_del(&tmp_aeb->u.list);
807 kmem_cache_free(ai->aeb_slab_cache, tmp_aeb); 799 ubi_free_aeb(ai, tmp_aeb);
808 } 800 }
809 801
810 return ret; 802 return ret;