aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd/ubi/eba.c
diff options
context:
space:
mode:
authorArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2008-01-16 05:11:54 -0500
committerArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2008-01-25 09:41:24 -0500
commitb9a06623d9d0c6dff758d525ceb0d9e2bba8f7d6 (patch)
tree9107daa5cf1aa527603c7d33d55ca45dbe760d5a /drivers/mtd/ubi/eba.c
parent4fac9f698404a5cd50b978fbdb7e54235353c215 (diff)
UBI: get rid of ubi_ltree_slab
This slab cache is not really needed since the number of objects is low and the constructor does not make much sense because we allocate oblects when doint I/O, which is way slower then allocation. Suggested-by: Arnd Bergmann <arnd@arndb.de> Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
Diffstat (limited to 'drivers/mtd/ubi/eba.c')
-rw-r--r--drivers/mtd/ubi/eba.c12
1 files changed, 7 insertions, 5 deletions
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 85297cde4ac5..7c05c6e1abc7 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -137,10 +137,12 @@ static struct ubi_ltree_entry *ltree_add_entry(struct ubi_device *ubi,
137{ 137{
138 struct ubi_ltree_entry *le, *le1, *le_free; 138 struct ubi_ltree_entry *le, *le1, *le_free;
139 139
140 le = kmem_cache_alloc(ubi_ltree_slab, GFP_NOFS); 140 le = kmalloc(sizeof(struct ubi_ltree_entry), GFP_NOFS);
141 if (!le) 141 if (!le)
142 return ERR_PTR(-ENOMEM); 142 return ERR_PTR(-ENOMEM);
143 143
144 le->users = 0;
145 init_rwsem(&le->mutex);
144 le->vol_id = vol_id; 146 le->vol_id = vol_id;
145 le->lnum = lnum; 147 le->lnum = lnum;
146 148
@@ -188,7 +190,7 @@ static struct ubi_ltree_entry *ltree_add_entry(struct ubi_device *ubi,
188 spin_unlock(&ubi->ltree_lock); 190 spin_unlock(&ubi->ltree_lock);
189 191
190 if (le_free) 192 if (le_free)
191 kmem_cache_free(ubi_ltree_slab, le_free); 193 kfree(le_free);
192 194
193 return le; 195 return le;
194} 196}
@@ -236,7 +238,7 @@ static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
236 238
237 up_read(&le->mutex); 239 up_read(&le->mutex);
238 if (free) 240 if (free)
239 kmem_cache_free(ubi_ltree_slab, le); 241 kfree(le);
240} 242}
241 243
242/** 244/**
@@ -292,7 +294,7 @@ static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum)
292 free = 0; 294 free = 0;
293 spin_unlock(&ubi->ltree_lock); 295 spin_unlock(&ubi->ltree_lock);
294 if (free) 296 if (free)
295 kmem_cache_free(ubi_ltree_slab, le); 297 kfree(le);
296 298
297 return 1; 299 return 1;
298} 300}
@@ -321,7 +323,7 @@ static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum)
321 323
322 up_write(&le->mutex); 324 up_write(&le->mutex);
323 if (free) 325 if (free)
324 kmem_cache_free(ubi_ltree_slab, le); 326 kfree(le);
325} 327}
326 328
327/** 329/**