aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2007-12-16 05:49:01 -0500
committerArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2007-12-26 12:15:14 -0500
commit06b68ba15671f32a3aa3bbddf04b0d2dd7fbf902 (patch)
tree67024857173069023835148077775e8c9e381d8a
parent3a8d4642861fb69b62401949e490c0bcb19ceb40 (diff)
UBI: create ubi_wl_entry slab on initialization
Similarly to ltree_entry_slab, it makes more sense to create and destroy ubi_wl_entry slab on module initialization/exit. Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
-rw-r--r--drivers/mtd/ubi/build.c13
-rw-r--r--drivers/mtd/ubi/ubi.h17
-rw-r--r--drivers/mtd/ubi/wl.c58
3 files changed, 44 insertions, 44 deletions
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 44c852144a9c..7f6820becf10 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -70,6 +70,10 @@ struct class *ubi_class;
70/* Slab cache for lock-tree entries */ 70/* Slab cache for lock-tree entries */
71struct kmem_cache *ubi_ltree_slab; 71struct kmem_cache *ubi_ltree_slab;
72 72
73/* Slab cache for wear-leveling entries */
74struct kmem_cache *ubi_wl_entry_slab;
75
76
73/* "Show" method for files in '/<sysfs>/class/ubi/' */ 77/* "Show" method for files in '/<sysfs>/class/ubi/' */
74static ssize_t ubi_version_show(struct class *class, char *buf) 78static ssize_t ubi_version_show(struct class *class, char *buf)
75{ 79{
@@ -732,6 +736,12 @@ static int __init ubi_init(void)
732 if (!ubi_ltree_slab) 736 if (!ubi_ltree_slab)
733 goto out_version; 737 goto out_version;
734 738
739 ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
740 sizeof(struct ubi_wl_entry),
741 0, 0, NULL);
742 if (!ubi_wl_entry_slab)
743 goto out_ltree;
744
735 /* Attach MTD devices */ 745 /* Attach MTD devices */
736 for (i = 0; i < mtd_devs; i++) { 746 for (i = 0; i < mtd_devs; i++) {
737 struct mtd_dev_param *p = &mtd_dev_param[i]; 747 struct mtd_dev_param *p = &mtd_dev_param[i];
@@ -747,6 +757,8 @@ static int __init ubi_init(void)
747out_detach: 757out_detach:
748 for (k = 0; k < i; k++) 758 for (k = 0; k < i; k++)
749 detach_mtd_dev(ubi_devices[k]); 759 detach_mtd_dev(ubi_devices[k]);
760 kmem_cache_destroy(ubi_wl_entry_slab);
761out_ltree:
750 kmem_cache_destroy(ubi_ltree_slab); 762 kmem_cache_destroy(ubi_ltree_slab);
751out_version: 763out_version:
752 class_remove_file(ubi_class, &ubi_version); 764 class_remove_file(ubi_class, &ubi_version);
@@ -762,6 +774,7 @@ static void __exit ubi_exit(void)
762 774
763 for (i = 0; i < n; i++) 775 for (i = 0; i < n; i++)
764 detach_mtd_dev(ubi_devices[i]); 776 detach_mtd_dev(ubi_devices[i]);
777 kmem_cache_destroy(ubi_wl_entry_slab);
765 kmem_cache_destroy(ubi_ltree_slab); 778 kmem_cache_destroy(ubi_ltree_slab);
766 class_remove_file(ubi_class, &ubi_version); 779 class_remove_file(ubi_class, &ubi_version);
767 class_destroy(ubi_class); 780 class_destroy(ubi_class);
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 0f2ea81b3122..b7c93173e77b 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -98,6 +98,22 @@ extern int ubi_devices_cnt;
98extern struct ubi_device *ubi_devices[]; 98extern struct ubi_device *ubi_devices[];
99 99
100/** 100/**
101 * struct ubi_wl_entry - wear-leveling entry.
102 * @rb: link in the corresponding RB-tree
103 * @ec: erase counter
104 * @pnum: physical eraseblock number
105 *
106 * This data structure is used in the WL unit. Each physical eraseblock has a
107 * corresponding &struct wl_entry object which may be kept in different
108 * RB-trees. See WL unit for details.
109 */
110struct ubi_wl_entry {
111 struct rb_node rb;
112 int ec;
113 int pnum;
114};
115
116/**
101 * struct ubi_ltree_entry - an entry in the lock tree. 117 * struct ubi_ltree_entry - an entry in the lock tree.
102 * @rb: links RB-tree nodes 118 * @rb: links RB-tree nodes
103 * @vol_id: volume ID of the locked logical eraseblock 119 * @vol_id: volume ID of the locked logical eraseblock
@@ -382,6 +398,7 @@ struct ubi_device {
382}; 398};
383 399
384extern struct kmem_cache *ubi_ltree_slab; 400extern struct kmem_cache *ubi_ltree_slab;
401extern struct kmem_cache *ubi_wl_entry_slab;
385extern struct file_operations ubi_cdev_operations; 402extern struct file_operations ubi_cdev_operations;
386extern struct file_operations ubi_vol_cdev_operations; 403extern struct file_operations ubi_vol_cdev_operations;
387extern struct class *ubi_class; 404extern struct class *ubi_class;
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 6330c8cc72b5..a405d40faa23 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -117,21 +117,6 @@
117#define WL_MAX_FAILURES 32 117#define WL_MAX_FAILURES 32
118 118
119/** 119/**
120 * struct ubi_wl_entry - wear-leveling entry.
121 * @rb: link in the corresponding RB-tree
122 * @ec: erase counter
123 * @pnum: physical eraseblock number
124 *
125 * Each physical eraseblock has a corresponding &struct wl_entry object which
126 * may be kept in different RB-trees.
127 */
128struct ubi_wl_entry {
129 struct rb_node rb;
130 int ec;
131 int pnum;
132};
133
134/**
135 * struct ubi_wl_prot_entry - PEB protection entry. 120 * struct ubi_wl_prot_entry - PEB protection entry.
136 * @rb_pnum: link in the @wl->prot.pnum RB-tree 121 * @rb_pnum: link in the @wl->prot.pnum RB-tree
137 * @rb_aec: link in the @wl->prot.aec RB-tree 122 * @rb_aec: link in the @wl->prot.aec RB-tree
@@ -216,9 +201,6 @@ static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
216#define paranoid_check_in_wl_tree(e, root) 201#define paranoid_check_in_wl_tree(e, root)
217#endif 202#endif
218 203
219/* Slab cache for wear-leveling entries */
220static struct kmem_cache *wl_entries_slab;
221
222/** 204/**
223 * wl_tree_add - add a wear-leveling entry to a WL RB-tree. 205 * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
224 * @e: the wear-leveling entry to add 206 * @e: the wear-leveling entry to add
@@ -878,14 +860,14 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
878 dbg_wl("PEB %d was put meanwhile, erase", e2->pnum); 860 dbg_wl("PEB %d was put meanwhile, erase", e2->pnum);
879 err = schedule_erase(ubi, e2, 0); 861 err = schedule_erase(ubi, e2, 0);
880 if (err) { 862 if (err) {
881 kmem_cache_free(wl_entries_slab, e2); 863 kmem_cache_free(ubi_wl_entry_slab, e2);
882 ubi_ro_mode(ubi); 864 ubi_ro_mode(ubi);
883 } 865 }
884 } 866 }
885 867
886 err = schedule_erase(ubi, e1, 0); 868 err = schedule_erase(ubi, e1, 0);
887 if (err) { 869 if (err) {
888 kmem_cache_free(wl_entries_slab, e1); 870 kmem_cache_free(ubi_wl_entry_slab, e1);
889 ubi_ro_mode(ubi); 871 ubi_ro_mode(ubi);
890 } 872 }
891 873
@@ -920,14 +902,14 @@ error:
920 dbg_wl("PEB %d was put meanwhile, erase", e1->pnum); 902 dbg_wl("PEB %d was put meanwhile, erase", e1->pnum);
921 err = schedule_erase(ubi, e1, 0); 903 err = schedule_erase(ubi, e1, 0);
922 if (err) { 904 if (err) {
923 kmem_cache_free(wl_entries_slab, e1); 905 kmem_cache_free(ubi_wl_entry_slab, e1);
924 ubi_ro_mode(ubi); 906 ubi_ro_mode(ubi);
925 } 907 }
926 } 908 }
927 909
928 err = schedule_erase(ubi, e2, 0); 910 err = schedule_erase(ubi, e2, 0);
929 if (err) { 911 if (err) {
930 kmem_cache_free(wl_entries_slab, e2); 912 kmem_cache_free(ubi_wl_entry_slab, e2);
931 ubi_ro_mode(ubi); 913 ubi_ro_mode(ubi);
932 } 914 }
933 915
@@ -1020,7 +1002,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1020 if (cancel) { 1002 if (cancel) {
1021 dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec); 1003 dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
1022 kfree(wl_wrk); 1004 kfree(wl_wrk);
1023 kmem_cache_free(wl_entries_slab, e); 1005 kmem_cache_free(ubi_wl_entry_slab, e);
1024 return 0; 1006 return 0;
1025 } 1007 }
1026 1008
@@ -1049,7 +1031,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1049 1031
1050 ubi_err("failed to erase PEB %d, error %d", pnum, err); 1032 ubi_err("failed to erase PEB %d, error %d", pnum, err);
1051 kfree(wl_wrk); 1033 kfree(wl_wrk);
1052 kmem_cache_free(wl_entries_slab, e); 1034 kmem_cache_free(ubi_wl_entry_slab, e);
1053 1035
1054 if (err == -EINTR || err == -ENOMEM || err == -EAGAIN || 1036 if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
1055 err == -EBUSY) { 1037 err == -EBUSY) {
@@ -1294,7 +1276,7 @@ static void tree_destroy(struct rb_root *root)
1294 rb->rb_right = NULL; 1276 rb->rb_right = NULL;
1295 } 1277 }
1296 1278
1297 kmem_cache_free(wl_entries_slab, e); 1279 kmem_cache_free(ubi_wl_entry_slab, e);
1298 } 1280 }
1299 } 1281 }
1300} 1282}
@@ -1407,14 +1389,6 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1407 return err; 1389 return err;
1408 } 1390 }
1409 1391
1410 if (ubi_devices_cnt == 0) {
1411 wl_entries_slab = kmem_cache_create("ubi_wl_entry_slab",
1412 sizeof(struct ubi_wl_entry),
1413 0, 0, NULL);
1414 if (!wl_entries_slab)
1415 return -ENOMEM;
1416 }
1417
1418 err = -ENOMEM; 1392 err = -ENOMEM;
1419 ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL); 1393 ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL);
1420 if (!ubi->lookuptbl) 1394 if (!ubi->lookuptbl)
@@ -1423,7 +1397,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1423 list_for_each_entry_safe(seb, tmp, &si->erase, u.list) { 1397 list_for_each_entry_safe(seb, tmp, &si->erase, u.list) {
1424 cond_resched(); 1398 cond_resched();
1425 1399
1426 e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL); 1400 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1427 if (!e) 1401 if (!e)
1428 goto out_free; 1402 goto out_free;
1429 1403
@@ -1431,7 +1405,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1431 e->ec = seb->ec; 1405 e->ec = seb->ec;
1432 ubi->lookuptbl[e->pnum] = e; 1406 ubi->lookuptbl[e->pnum] = e;
1433 if (schedule_erase(ubi, e, 0)) { 1407 if (schedule_erase(ubi, e, 0)) {
1434 kmem_cache_free(wl_entries_slab, e); 1408 kmem_cache_free(ubi_wl_entry_slab, e);
1435 goto out_free; 1409 goto out_free;
1436 } 1410 }
1437 } 1411 }
@@ -1439,7 +1413,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1439 list_for_each_entry(seb, &si->free, u.list) { 1413 list_for_each_entry(seb, &si->free, u.list) {
1440 cond_resched(); 1414 cond_resched();
1441 1415
1442 e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL); 1416 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1443 if (!e) 1417 if (!e)
1444 goto out_free; 1418 goto out_free;
1445 1419
@@ -1453,7 +1427,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1453 list_for_each_entry(seb, &si->corr, u.list) { 1427 list_for_each_entry(seb, &si->corr, u.list) {
1454 cond_resched(); 1428 cond_resched();
1455 1429
1456 e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL); 1430 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1457 if (!e) 1431 if (!e)
1458 goto out_free; 1432 goto out_free;
1459 1433
@@ -1461,7 +1435,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1461 e->ec = seb->ec; 1435 e->ec = seb->ec;
1462 ubi->lookuptbl[e->pnum] = e; 1436 ubi->lookuptbl[e->pnum] = e;
1463 if (schedule_erase(ubi, e, 0)) { 1437 if (schedule_erase(ubi, e, 0)) {
1464 kmem_cache_free(wl_entries_slab, e); 1438 kmem_cache_free(ubi_wl_entry_slab, e);
1465 goto out_free; 1439 goto out_free;
1466 } 1440 }
1467 } 1441 }
@@ -1470,7 +1444,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1470 ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) { 1444 ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
1471 cond_resched(); 1445 cond_resched();
1472 1446
1473 e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL); 1447 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1474 if (!e) 1448 if (!e)
1475 goto out_free; 1449 goto out_free;
1476 1450
@@ -1510,8 +1484,6 @@ out_free:
1510 tree_destroy(&ubi->free); 1484 tree_destroy(&ubi->free);
1511 tree_destroy(&ubi->scrub); 1485 tree_destroy(&ubi->scrub);
1512 kfree(ubi->lookuptbl); 1486 kfree(ubi->lookuptbl);
1513 if (ubi_devices_cnt == 0)
1514 kmem_cache_destroy(wl_entries_slab);
1515 return err; 1487 return err;
1516} 1488}
1517 1489
@@ -1541,7 +1513,7 @@ static void protection_trees_destroy(struct ubi_device *ubi)
1541 rb->rb_right = NULL; 1513 rb->rb_right = NULL;
1542 } 1514 }
1543 1515
1544 kmem_cache_free(wl_entries_slab, pe->e); 1516 kmem_cache_free(ubi_wl_entry_slab, pe->e);
1545 kfree(pe); 1517 kfree(pe);
1546 } 1518 }
1547 } 1519 }
@@ -1565,8 +1537,6 @@ void ubi_wl_close(struct ubi_device *ubi)
1565 tree_destroy(&ubi->free); 1537 tree_destroy(&ubi->free);
1566 tree_destroy(&ubi->scrub); 1538 tree_destroy(&ubi->scrub);
1567 kfree(ubi->lookuptbl); 1539 kfree(ubi->lookuptbl);
1568 if (ubi_devices_cnt == 1)
1569 kmem_cache_destroy(wl_entries_slab);
1570} 1540}
1571 1541
1572#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID 1542#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID