aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/mtd/ubi/build.c26
-rw-r--r--drivers/mtd/ubi/eba.c85
-rw-r--r--drivers/mtd/ubi/ubi.h23
3 files changed, 67 insertions, 67 deletions
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 5490a73deca5..44c852144a9c 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -67,6 +67,9 @@ struct ubi_device *ubi_devices[UBI_MAX_DEVICES];
67/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */ 67/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */
68struct class *ubi_class; 68struct class *ubi_class;
69 69
70/* Slab cache for lock-tree entries */
71struct kmem_cache *ubi_ltree_slab;
72
70/* "Show" method for files in '/<sysfs>/class/ubi/' */ 73/* "Show" method for files in '/<sysfs>/class/ubi/' */
71static ssize_t ubi_version_show(struct class *class, char *buf) 74static ssize_t ubi_version_show(struct class *class, char *buf)
72{ 75{
@@ -687,6 +690,20 @@ static void detach_mtd_dev(struct ubi_device *ubi)
687 ubi_msg("mtd%d is detached from ubi%d", mtd_num, ubi_num); 690 ubi_msg("mtd%d is detached from ubi%d", mtd_num, ubi_num);
688} 691}
689 692
693/**
694 * ltree_entry_ctor - lock tree entries slab cache constructor.
695 * @obj: the lock-tree entry to construct
696 * @cache: the lock tree entry slab cache
697 * @flags: constructor flags
698 */
699static void ltree_entry_ctor(struct kmem_cache *cache, void *obj)
700{
701 struct ubi_ltree_entry *le = obj;
702
703 le->users = 0;
704 init_rwsem(&le->mutex);
705}
706
690static int __init ubi_init(void) 707static int __init ubi_init(void)
691{ 708{
692 int err, i, k; 709 int err, i, k;
@@ -709,6 +726,12 @@ static int __init ubi_init(void)
709 if (err) 726 if (err)
710 goto out_class; 727 goto out_class;
711 728
729 ubi_ltree_slab = kmem_cache_create("ubi_ltree_slab",
730 sizeof(struct ubi_ltree_entry), 0,
731 0, &ltree_entry_ctor);
732 if (!ubi_ltree_slab)
733 goto out_version;
734
712 /* Attach MTD devices */ 735 /* Attach MTD devices */
713 for (i = 0; i < mtd_devs; i++) { 736 for (i = 0; i < mtd_devs; i++) {
714 struct mtd_dev_param *p = &mtd_dev_param[i]; 737 struct mtd_dev_param *p = &mtd_dev_param[i];
@@ -724,6 +747,8 @@ static int __init ubi_init(void)
724out_detach: 747out_detach:
725 for (k = 0; k < i; k++) 748 for (k = 0; k < i; k++)
726 detach_mtd_dev(ubi_devices[k]); 749 detach_mtd_dev(ubi_devices[k]);
750 kmem_cache_destroy(ubi_ltree_slab);
751out_version:
727 class_remove_file(ubi_class, &ubi_version); 752 class_remove_file(ubi_class, &ubi_version);
728out_class: 753out_class:
729 class_destroy(ubi_class); 754 class_destroy(ubi_class);
@@ -737,6 +762,7 @@ static void __exit ubi_exit(void)
737 762
738 for (i = 0; i < n; i++) 763 for (i = 0; i < n; i++)
739 detach_mtd_dev(ubi_devices[i]); 764 detach_mtd_dev(ubi_devices[i]);
765 kmem_cache_destroy(ubi_ltree_slab);
740 class_remove_file(ubi_class, &ubi_version); 766 class_remove_file(ubi_class, &ubi_version);
741 class_destroy(ubi_class); 767 class_destroy(ubi_class);
742} 768}
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index c87db07bcd0c..5fdb31bc5636 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -31,7 +31,7 @@
31 * logical eraseblock it is locked for reading or writing. The per-logical 31 * logical eraseblock it is locked for reading or writing. The per-logical
32 * eraseblock locking is implemented by means of the lock tree. The lock tree 32 * eraseblock locking is implemented by means of the lock tree. The lock tree
33 * is an RB-tree which refers all the currently locked logical eraseblocks. The 33 * is an RB-tree which refers all the currently locked logical eraseblocks. The
34 * lock tree elements are &struct ltree_entry objects. They are indexed by 34 * lock tree elements are &struct ubi_ltree_entry objects. They are indexed by
35 * (@vol_id, @lnum) pairs. 35 * (@vol_id, @lnum) pairs.
36 * 36 *
37 * EBA also maintains the global sequence counter which is incremented each 37 * EBA also maintains the global sequence counter which is incremented each
@@ -50,29 +50,6 @@
50#define EBA_RESERVED_PEBS 1 50#define EBA_RESERVED_PEBS 1
51 51
52/** 52/**
53 * struct ltree_entry - an entry in the lock tree.
54 * @rb: links RB-tree nodes
55 * @vol_id: volume ID of the locked logical eraseblock
56 * @lnum: locked logical eraseblock number
57 * @users: how many tasks are using this logical eraseblock or wait for it
58 * @mutex: read/write mutex to implement read/write access serialization to
59 * the (@vol_id, @lnum) logical eraseblock
60 *
61 * When a logical eraseblock is being locked - corresponding &struct ltree_entry
62 * object is inserted to the lock tree (@ubi->ltree).
63 */
64struct ltree_entry {
65 struct rb_node rb;
66 int vol_id;
67 int lnum;
68 int users;
69 struct rw_semaphore mutex;
70};
71
72/* Slab cache for lock-tree entries */
73static struct kmem_cache *ltree_slab;
74
75/**
76 * next_sqnum - get next sequence number. 53 * next_sqnum - get next sequence number.
77 * @ubi: UBI device description object 54 * @ubi: UBI device description object
78 * 55 *
@@ -112,20 +89,20 @@ static int ubi_get_compat(const struct ubi_device *ubi, int vol_id)
112 * @vol_id: volume ID 89 * @vol_id: volume ID
113 * @lnum: logical eraseblock number 90 * @lnum: logical eraseblock number
114 * 91 *
115 * This function returns a pointer to the corresponding &struct ltree_entry 92 * This function returns a pointer to the corresponding &struct ubi_ltree_entry
116 * object if the logical eraseblock is locked and %NULL if it is not. 93 * object if the logical eraseblock is locked and %NULL if it is not.
117 * @ubi->ltree_lock has to be locked. 94 * @ubi->ltree_lock has to be locked.
118 */ 95 */
119static struct ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id, 96static struct ubi_ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id,
120 int lnum) 97 int lnum)
121{ 98{
122 struct rb_node *p; 99 struct rb_node *p;
123 100
124 p = ubi->ltree.rb_node; 101 p = ubi->ltree.rb_node;
125 while (p) { 102 while (p) {
126 struct ltree_entry *le; 103 struct ubi_ltree_entry *le;
127 104
128 le = rb_entry(p, struct ltree_entry, rb); 105 le = rb_entry(p, struct ubi_ltree_entry, rb);
129 106
130 if (vol_id < le->vol_id) 107 if (vol_id < le->vol_id)
131 p = p->rb_left; 108 p = p->rb_left;
@@ -155,12 +132,12 @@ static struct ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id,
155 * Returns pointer to the lock tree entry or %-ENOMEM if memory allocation 132 * Returns pointer to the lock tree entry or %-ENOMEM if memory allocation
156 * failed. 133 * failed.
157 */ 134 */
158static struct ltree_entry *ltree_add_entry(struct ubi_device *ubi, int vol_id, 135static struct ubi_ltree_entry *ltree_add_entry(struct ubi_device *ubi,
159 int lnum) 136 int vol_id, int lnum)
160{ 137{
161 struct ltree_entry *le, *le1, *le_free; 138 struct ubi_ltree_entry *le, *le1, *le_free;
162 139
163 le = kmem_cache_alloc(ltree_slab, GFP_NOFS); 140 le = kmem_cache_alloc(ubi_ltree_slab, GFP_NOFS);
164 if (!le) 141 if (!le)
165 return ERR_PTR(-ENOMEM); 142 return ERR_PTR(-ENOMEM);
166 143
@@ -189,7 +166,7 @@ static struct ltree_entry *ltree_add_entry(struct ubi_device *ubi, int vol_id,
189 p = &ubi->ltree.rb_node; 166 p = &ubi->ltree.rb_node;
190 while (*p) { 167 while (*p) {
191 parent = *p; 168 parent = *p;
192 le1 = rb_entry(parent, struct ltree_entry, rb); 169 le1 = rb_entry(parent, struct ubi_ltree_entry, rb);
193 170
194 if (vol_id < le1->vol_id) 171 if (vol_id < le1->vol_id)
195 p = &(*p)->rb_left; 172 p = &(*p)->rb_left;
@@ -211,7 +188,7 @@ static struct ltree_entry *ltree_add_entry(struct ubi_device *ubi, int vol_id,
211 spin_unlock(&ubi->ltree_lock); 188 spin_unlock(&ubi->ltree_lock);
212 189
213 if (le_free) 190 if (le_free)
214 kmem_cache_free(ltree_slab, le_free); 191 kmem_cache_free(ubi_ltree_slab, le_free);
215 192
216 return le; 193 return le;
217} 194}
@@ -227,7 +204,7 @@ static struct ltree_entry *ltree_add_entry(struct ubi_device *ubi, int vol_id,
227 */ 204 */
228static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum) 205static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum)
229{ 206{
230 struct ltree_entry *le; 207 struct ubi_ltree_entry *le;
231 208
232 le = ltree_add_entry(ubi, vol_id, lnum); 209 le = ltree_add_entry(ubi, vol_id, lnum);
233 if (IS_ERR(le)) 210 if (IS_ERR(le))
@@ -245,7 +222,7 @@ static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum)
245static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum) 222static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
246{ 223{
247 int free = 0; 224 int free = 0;
248 struct ltree_entry *le; 225 struct ubi_ltree_entry *le;
249 226
250 spin_lock(&ubi->ltree_lock); 227 spin_lock(&ubi->ltree_lock);
251 le = ltree_lookup(ubi, vol_id, lnum); 228 le = ltree_lookup(ubi, vol_id, lnum);
@@ -259,7 +236,7 @@ static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
259 236
260 up_read(&le->mutex); 237 up_read(&le->mutex);
261 if (free) 238 if (free)
262 kmem_cache_free(ltree_slab, le); 239 kmem_cache_free(ubi_ltree_slab, le);
263} 240}
264 241
265/** 242/**
@@ -273,7 +250,7 @@ static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
273 */ 250 */
274static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum) 251static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
275{ 252{
276 struct ltree_entry *le; 253 struct ubi_ltree_entry *le;
277 254
278 le = ltree_add_entry(ubi, vol_id, lnum); 255 le = ltree_add_entry(ubi, vol_id, lnum);
279 if (IS_ERR(le)) 256 if (IS_ERR(le))
@@ -291,7 +268,7 @@ static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
291static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum) 268static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum)
292{ 269{
293 int free; 270 int free;
294 struct ltree_entry *le; 271 struct ubi_ltree_entry *le;
295 272
296 spin_lock(&ubi->ltree_lock); 273 spin_lock(&ubi->ltree_lock);
297 le = ltree_lookup(ubi, vol_id, lnum); 274 le = ltree_lookup(ubi, vol_id, lnum);
@@ -306,7 +283,7 @@ static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum)
306 283
307 up_write(&le->mutex); 284 up_write(&le->mutex);
308 if (free) 285 if (free)
309 kmem_cache_free(ltree_slab, le); 286 kmem_cache_free(ubi_ltree_slab, le);
310} 287}
311 288
312/** 289/**
@@ -931,20 +908,6 @@ write_error:
931} 908}
932 909
933/** 910/**
934 * ltree_entry_ctor - lock tree entries slab cache constructor.
935 * @obj: the lock-tree entry to construct
936 * @cache: the lock tree entry slab cache
937 * @flags: constructor flags
938 */
939static void ltree_entry_ctor(struct kmem_cache *cache, void *obj)
940{
941 struct ltree_entry *le = obj;
942
943 le->users = 0;
944 init_rwsem(&le->mutex);
945}
946
947/**
948 * ubi_eba_copy_leb - copy logical eraseblock. 911 * ubi_eba_copy_leb - copy logical eraseblock.
949 * @ubi: UBI device description object 912 * @ubi: UBI device description object
950 * @from: physical eraseblock number from where to copy 913 * @from: physical eraseblock number from where to copy
@@ -1128,14 +1091,6 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1128 mutex_init(&ubi->alc_mutex); 1091 mutex_init(&ubi->alc_mutex);
1129 ubi->ltree = RB_ROOT; 1092 ubi->ltree = RB_ROOT;
1130 1093
1131 if (ubi_devices_cnt == 0) {
1132 ltree_slab = kmem_cache_create("ubi_ltree_slab",
1133 sizeof(struct ltree_entry), 0,
1134 0, &ltree_entry_ctor);
1135 if (!ltree_slab)
1136 return -ENOMEM;
1137 }
1138
1139 ubi->global_sqnum = si->max_sqnum + 1; 1094 ubi->global_sqnum = si->max_sqnum + 1;
1140 num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT; 1095 num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
1141 1096
@@ -1205,8 +1160,6 @@ out_free:
1205 continue; 1160 continue;
1206 kfree(ubi->volumes[i]->eba_tbl); 1161 kfree(ubi->volumes[i]->eba_tbl);
1207 } 1162 }
1208 if (ubi_devices_cnt == 0)
1209 kmem_cache_destroy(ltree_slab);
1210 return err; 1163 return err;
1211} 1164}
1212 1165
@@ -1225,6 +1178,4 @@ void ubi_eba_close(const struct ubi_device *ubi)
1225 continue; 1178 continue;
1226 kfree(ubi->volumes[i]->eba_tbl); 1179 kfree(ubi->volumes[i]->eba_tbl);
1227 } 1180 }
1228 if (ubi_devices_cnt == 1)
1229 kmem_cache_destroy(ltree_slab);
1230} 1181}
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 318ce2543fb8..0f2ea81b3122 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -97,6 +97,28 @@ enum {
97extern int ubi_devices_cnt; 97extern int ubi_devices_cnt;
98extern struct ubi_device *ubi_devices[]; 98extern struct ubi_device *ubi_devices[];
99 99
100/**
101 * struct ubi_ltree_entry - an entry in the lock tree.
102 * @rb: links RB-tree nodes
103 * @vol_id: volume ID of the locked logical eraseblock
104 * @lnum: locked logical eraseblock number
105 * @users: how many tasks are using this logical eraseblock or wait for it
106 * @mutex: read/write mutex to implement read/write access serialization to
107 * the (@vol_id, @lnum) logical eraseblock
108 *
109 * This data structure is used in the EBA unit to implement per-LEB locking.
110 * When a logical eraseblock is being locked - corresponding
111 * &struct ubi_ltree_entry object is inserted to the lock tree (@ubi->ltree).
112 * See EBA unit for details.
113 */
114struct ubi_ltree_entry {
115 struct rb_node rb;
116 int vol_id;
117 int lnum;
118 int users;
119 struct rw_semaphore mutex;
120};
121
100struct ubi_volume_desc; 122struct ubi_volume_desc;
101 123
102/** 124/**
@@ -359,6 +381,7 @@ struct ubi_device {
359#endif 381#endif
360}; 382};
361 383
384extern struct kmem_cache *ubi_ltree_slab;
362extern struct file_operations ubi_cdev_operations; 385extern struct file_operations ubi_cdev_operations;
363extern struct file_operations ubi_vol_cdev_operations; 386extern struct file_operations ubi_vol_cdev_operations;
364extern struct class *ubi_class; 387extern struct class *ubi_class;