aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd/ubi/eba.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mtd/ubi/eba.c')
-rw-r--r--drivers/mtd/ubi/eba.c85
1 files changed, 18 insertions, 67 deletions
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index c87db07bcd0c..5fdb31bc5636 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -31,7 +31,7 @@
31 * logical eraseblock it is locked for reading or writing. The per-logical 31 * logical eraseblock it is locked for reading or writing. The per-logical
32 * eraseblock locking is implemented by means of the lock tree. The lock tree 32 * eraseblock locking is implemented by means of the lock tree. The lock tree
33 * is an RB-tree which refers all the currently locked logical eraseblocks. The 33 * is an RB-tree which refers all the currently locked logical eraseblocks. The
34 * lock tree elements are &struct ltree_entry objects. They are indexed by 34 * lock tree elements are &struct ubi_ltree_entry objects. They are indexed by
35 * (@vol_id, @lnum) pairs. 35 * (@vol_id, @lnum) pairs.
36 * 36 *
37 * EBA also maintains the global sequence counter which is incremented each 37 * EBA also maintains the global sequence counter which is incremented each
@@ -50,29 +50,6 @@
50#define EBA_RESERVED_PEBS 1 50#define EBA_RESERVED_PEBS 1
51 51
52/** 52/**
53 * struct ltree_entry - an entry in the lock tree.
54 * @rb: links RB-tree nodes
55 * @vol_id: volume ID of the locked logical eraseblock
56 * @lnum: locked logical eraseblock number
57 * @users: how many tasks are using this logical eraseblock or wait for it
58 * @mutex: read/write mutex to implement read/write access serialization to
59 * the (@vol_id, @lnum) logical eraseblock
60 *
61 * When a logical eraseblock is being locked - corresponding &struct ltree_entry
62 * object is inserted to the lock tree (@ubi->ltree).
63 */
64struct ltree_entry {
65 struct rb_node rb;
66 int vol_id;
67 int lnum;
68 int users;
69 struct rw_semaphore mutex;
70};
71
72/* Slab cache for lock-tree entries */
73static struct kmem_cache *ltree_slab;
74
75/**
76 * next_sqnum - get next sequence number. 53 * next_sqnum - get next sequence number.
77 * @ubi: UBI device description object 54 * @ubi: UBI device description object
78 * 55 *
@@ -112,20 +89,20 @@ static int ubi_get_compat(const struct ubi_device *ubi, int vol_id)
112 * @vol_id: volume ID 89 * @vol_id: volume ID
113 * @lnum: logical eraseblock number 90 * @lnum: logical eraseblock number
114 * 91 *
115 * This function returns a pointer to the corresponding &struct ltree_entry 92 * This function returns a pointer to the corresponding &struct ubi_ltree_entry
116 * object if the logical eraseblock is locked and %NULL if it is not. 93 * object if the logical eraseblock is locked and %NULL if it is not.
117 * @ubi->ltree_lock has to be locked. 94 * @ubi->ltree_lock has to be locked.
118 */ 95 */
119static struct ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id, 96static struct ubi_ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id,
120 int lnum) 97 int lnum)
121{ 98{
122 struct rb_node *p; 99 struct rb_node *p;
123 100
124 p = ubi->ltree.rb_node; 101 p = ubi->ltree.rb_node;
125 while (p) { 102 while (p) {
126 struct ltree_entry *le; 103 struct ubi_ltree_entry *le;
127 104
128 le = rb_entry(p, struct ltree_entry, rb); 105 le = rb_entry(p, struct ubi_ltree_entry, rb);
129 106
130 if (vol_id < le->vol_id) 107 if (vol_id < le->vol_id)
131 p = p->rb_left; 108 p = p->rb_left;
@@ -155,12 +132,12 @@ static struct ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id,
155 * Returns pointer to the lock tree entry or %-ENOMEM if memory allocation 132 * Returns pointer to the lock tree entry or %-ENOMEM if memory allocation
156 * failed. 133 * failed.
157 */ 134 */
158static struct ltree_entry *ltree_add_entry(struct ubi_device *ubi, int vol_id, 135static struct ubi_ltree_entry *ltree_add_entry(struct ubi_device *ubi,
159 int lnum) 136 int vol_id, int lnum)
160{ 137{
161 struct ltree_entry *le, *le1, *le_free; 138 struct ubi_ltree_entry *le, *le1, *le_free;
162 139
163 le = kmem_cache_alloc(ltree_slab, GFP_NOFS); 140 le = kmem_cache_alloc(ubi_ltree_slab, GFP_NOFS);
164 if (!le) 141 if (!le)
165 return ERR_PTR(-ENOMEM); 142 return ERR_PTR(-ENOMEM);
166 143
@@ -189,7 +166,7 @@ static struct ltree_entry *ltree_add_entry(struct ubi_device *ubi, int vol_id,
189 p = &ubi->ltree.rb_node; 166 p = &ubi->ltree.rb_node;
190 while (*p) { 167 while (*p) {
191 parent = *p; 168 parent = *p;
192 le1 = rb_entry(parent, struct ltree_entry, rb); 169 le1 = rb_entry(parent, struct ubi_ltree_entry, rb);
193 170
194 if (vol_id < le1->vol_id) 171 if (vol_id < le1->vol_id)
195 p = &(*p)->rb_left; 172 p = &(*p)->rb_left;
@@ -211,7 +188,7 @@ static struct ltree_entry *ltree_add_entry(struct ubi_device *ubi, int vol_id,
211 spin_unlock(&ubi->ltree_lock); 188 spin_unlock(&ubi->ltree_lock);
212 189
213 if (le_free) 190 if (le_free)
214 kmem_cache_free(ltree_slab, le_free); 191 kmem_cache_free(ubi_ltree_slab, le_free);
215 192
216 return le; 193 return le;
217} 194}
@@ -227,7 +204,7 @@ static struct ltree_entry *ltree_add_entry(struct ubi_device *ubi, int vol_id,
227 */ 204 */
228static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum) 205static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum)
229{ 206{
230 struct ltree_entry *le; 207 struct ubi_ltree_entry *le;
231 208
232 le = ltree_add_entry(ubi, vol_id, lnum); 209 le = ltree_add_entry(ubi, vol_id, lnum);
233 if (IS_ERR(le)) 210 if (IS_ERR(le))
@@ -245,7 +222,7 @@ static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum)
245static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum) 222static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
246{ 223{
247 int free = 0; 224 int free = 0;
248 struct ltree_entry *le; 225 struct ubi_ltree_entry *le;
249 226
250 spin_lock(&ubi->ltree_lock); 227 spin_lock(&ubi->ltree_lock);
251 le = ltree_lookup(ubi, vol_id, lnum); 228 le = ltree_lookup(ubi, vol_id, lnum);
@@ -259,7 +236,7 @@ static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
259 236
260 up_read(&le->mutex); 237 up_read(&le->mutex);
261 if (free) 238 if (free)
262 kmem_cache_free(ltree_slab, le); 239 kmem_cache_free(ubi_ltree_slab, le);
263} 240}
264 241
265/** 242/**
@@ -273,7 +250,7 @@ static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
273 */ 250 */
274static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum) 251static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
275{ 252{
276 struct ltree_entry *le; 253 struct ubi_ltree_entry *le;
277 254
278 le = ltree_add_entry(ubi, vol_id, lnum); 255 le = ltree_add_entry(ubi, vol_id, lnum);
279 if (IS_ERR(le)) 256 if (IS_ERR(le))
@@ -291,7 +268,7 @@ static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
291static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum) 268static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum)
292{ 269{
293 int free; 270 int free;
294 struct ltree_entry *le; 271 struct ubi_ltree_entry *le;
295 272
296 spin_lock(&ubi->ltree_lock); 273 spin_lock(&ubi->ltree_lock);
297 le = ltree_lookup(ubi, vol_id, lnum); 274 le = ltree_lookup(ubi, vol_id, lnum);
@@ -306,7 +283,7 @@ static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum)
306 283
307 up_write(&le->mutex); 284 up_write(&le->mutex);
308 if (free) 285 if (free)
309 kmem_cache_free(ltree_slab, le); 286 kmem_cache_free(ubi_ltree_slab, le);
310} 287}
311 288
312/** 289/**
@@ -931,20 +908,6 @@ write_error:
931} 908}
932 909
933/** 910/**
934 * ltree_entry_ctor - lock tree entries slab cache constructor.
935 * @obj: the lock-tree entry to construct
936 * @cache: the lock tree entry slab cache
937 * @flags: constructor flags
938 */
939static void ltree_entry_ctor(struct kmem_cache *cache, void *obj)
940{
941 struct ltree_entry *le = obj;
942
943 le->users = 0;
944 init_rwsem(&le->mutex);
945}
946
947/**
948 * ubi_eba_copy_leb - copy logical eraseblock. 911 * ubi_eba_copy_leb - copy logical eraseblock.
949 * @ubi: UBI device description object 912 * @ubi: UBI device description object
950 * @from: physical eraseblock number from where to copy 913 * @from: physical eraseblock number from where to copy
@@ -1128,14 +1091,6 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1128 mutex_init(&ubi->alc_mutex); 1091 mutex_init(&ubi->alc_mutex);
1129 ubi->ltree = RB_ROOT; 1092 ubi->ltree = RB_ROOT;
1130 1093
1131 if (ubi_devices_cnt == 0) {
1132 ltree_slab = kmem_cache_create("ubi_ltree_slab",
1133 sizeof(struct ltree_entry), 0,
1134 0, &ltree_entry_ctor);
1135 if (!ltree_slab)
1136 return -ENOMEM;
1137 }
1138
1139 ubi->global_sqnum = si->max_sqnum + 1; 1094 ubi->global_sqnum = si->max_sqnum + 1;
1140 num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT; 1095 num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
1141 1096
@@ -1205,8 +1160,6 @@ out_free:
1205 continue; 1160 continue;
1206 kfree(ubi->volumes[i]->eba_tbl); 1161 kfree(ubi->volumes[i]->eba_tbl);
1207 } 1162 }
1208 if (ubi_devices_cnt == 0)
1209 kmem_cache_destroy(ltree_slab);
1210 return err; 1163 return err;
1211} 1164}
1212 1165
@@ -1225,6 +1178,4 @@ void ubi_eba_close(const struct ubi_device *ubi)
1225 continue; 1178 continue;
1226 kfree(ubi->volumes[i]->eba_tbl); 1179 kfree(ubi->volumes[i]->eba_tbl);
1227 } 1180 }
1228 if (ubi_devices_cnt == 1)
1229 kmem_cache_destroy(ltree_slab);
1230} 1181}