aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndreas Gruenbacher <agruen@suse.de>2010-07-19 12:19:41 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2010-08-09 16:48:45 -0400
commit2aec7c523291621ebb68ba8e0bd9b52a26bb76ee (patch)
tree5be94d61cb157c0482c4e4005e438844c0312dd0
parent365b18189789bfa1acd9939e6312b8a4b4577b28 (diff)
mbcache: Remove unused features
The mbcache code was written to support a variable number of indexes, but all the existing users use exactly one index. Simplify to code to support only that case. There are also no users of the cache entry free operation, and none of the users keep extra data in cache entries. Remove those features as well. Signed-off-by: Andreas Gruenbacher <agruen@suse.de> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
-rw-r--r--fs/ext2/xattr.c12
-rw-r--r--fs/ext3/xattr.c12
-rw-r--r--fs/ext4/xattr.c12
-rw-r--r--fs/mbcache.c141
-rw-r--r--include/linux/mbcache.h20
5 files changed, 60 insertions, 137 deletions
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index 5ab87e6edffc..8c29ae15129e 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -843,7 +843,7 @@ ext2_xattr_cache_insert(struct buffer_head *bh)
843 ce = mb_cache_entry_alloc(ext2_xattr_cache, GFP_NOFS); 843 ce = mb_cache_entry_alloc(ext2_xattr_cache, GFP_NOFS);
844 if (!ce) 844 if (!ce)
845 return -ENOMEM; 845 return -ENOMEM;
846 error = mb_cache_entry_insert(ce, bh->b_bdev, bh->b_blocknr, &hash); 846 error = mb_cache_entry_insert(ce, bh->b_bdev, bh->b_blocknr, hash);
847 if (error) { 847 if (error) {
848 mb_cache_entry_free(ce); 848 mb_cache_entry_free(ce);
849 if (error == -EBUSY) { 849 if (error == -EBUSY) {
@@ -917,8 +917,8 @@ ext2_xattr_cache_find(struct inode *inode, struct ext2_xattr_header *header)
917 return NULL; /* never share */ 917 return NULL; /* never share */
918 ea_idebug(inode, "looking for cached blocks [%x]", (int)hash); 918 ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
919again: 919again:
920 ce = mb_cache_entry_find_first(ext2_xattr_cache, 0, 920 ce = mb_cache_entry_find_first(ext2_xattr_cache, inode->i_sb->s_bdev,
921 inode->i_sb->s_bdev, hash); 921 hash);
922 while (ce) { 922 while (ce) {
923 struct buffer_head *bh; 923 struct buffer_head *bh;
924 924
@@ -950,7 +950,7 @@ again:
950 unlock_buffer(bh); 950 unlock_buffer(bh);
951 brelse(bh); 951 brelse(bh);
952 } 952 }
953 ce = mb_cache_entry_find_next(ce, 0, inode->i_sb->s_bdev, hash); 953 ce = mb_cache_entry_find_next(ce, inode->i_sb->s_bdev, hash);
954 } 954 }
955 return NULL; 955 return NULL;
956} 956}
@@ -1026,9 +1026,7 @@ static void ext2_xattr_rehash(struct ext2_xattr_header *header,
1026int __init 1026int __init
1027init_ext2_xattr(void) 1027init_ext2_xattr(void)
1028{ 1028{
1029 ext2_xattr_cache = mb_cache_create("ext2_xattr", NULL, 1029 ext2_xattr_cache = mb_cache_create("ext2_xattr", 6);
1030 sizeof(struct mb_cache_entry) +
1031 sizeof(((struct mb_cache_entry *) 0)->e_indexes[0]), 1, 6);
1032 if (!ext2_xattr_cache) 1030 if (!ext2_xattr_cache)
1033 return -ENOMEM; 1031 return -ENOMEM;
1034 return 0; 1032 return 0;
diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
index 71fb8d65e54c..e69dc6dfaa89 100644
--- a/fs/ext3/xattr.c
+++ b/fs/ext3/xattr.c
@@ -1139,7 +1139,7 @@ ext3_xattr_cache_insert(struct buffer_head *bh)
1139 ea_bdebug(bh, "out of memory"); 1139 ea_bdebug(bh, "out of memory");
1140 return; 1140 return;
1141 } 1141 }
1142 error = mb_cache_entry_insert(ce, bh->b_bdev, bh->b_blocknr, &hash); 1142 error = mb_cache_entry_insert(ce, bh->b_bdev, bh->b_blocknr, hash);
1143 if (error) { 1143 if (error) {
1144 mb_cache_entry_free(ce); 1144 mb_cache_entry_free(ce);
1145 if (error == -EBUSY) { 1145 if (error == -EBUSY) {
@@ -1211,8 +1211,8 @@ ext3_xattr_cache_find(struct inode *inode, struct ext3_xattr_header *header,
1211 return NULL; /* never share */ 1211 return NULL; /* never share */
1212 ea_idebug(inode, "looking for cached blocks [%x]", (int)hash); 1212 ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
1213again: 1213again:
1214 ce = mb_cache_entry_find_first(ext3_xattr_cache, 0, 1214 ce = mb_cache_entry_find_first(ext3_xattr_cache, inode->i_sb->s_bdev,
1215 inode->i_sb->s_bdev, hash); 1215 hash);
1216 while (ce) { 1216 while (ce) {
1217 struct buffer_head *bh; 1217 struct buffer_head *bh;
1218 1218
@@ -1237,7 +1237,7 @@ again:
1237 return bh; 1237 return bh;
1238 } 1238 }
1239 brelse(bh); 1239 brelse(bh);
1240 ce = mb_cache_entry_find_next(ce, 0, inode->i_sb->s_bdev, hash); 1240 ce = mb_cache_entry_find_next(ce, inode->i_sb->s_bdev, hash);
1241 } 1241 }
1242 return NULL; 1242 return NULL;
1243} 1243}
@@ -1313,9 +1313,7 @@ static void ext3_xattr_rehash(struct ext3_xattr_header *header,
1313int __init 1313int __init
1314init_ext3_xattr(void) 1314init_ext3_xattr(void)
1315{ 1315{
1316 ext3_xattr_cache = mb_cache_create("ext3_xattr", NULL, 1316 ext3_xattr_cache = mb_cache_create("ext3_xattr", 6);
1317 sizeof(struct mb_cache_entry) +
1318 sizeof(((struct mb_cache_entry *) 0)->e_indexes[0]), 1, 6);
1319 if (!ext3_xattr_cache) 1317 if (!ext3_xattr_cache)
1320 return -ENOMEM; 1318 return -ENOMEM;
1321 return 0; 1319 return 0;
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 04338009793a..1c93198353e7 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -1418,7 +1418,7 @@ ext4_xattr_cache_insert(struct buffer_head *bh)
1418 ea_bdebug(bh, "out of memory"); 1418 ea_bdebug(bh, "out of memory");
1419 return; 1419 return;
1420 } 1420 }
1421 error = mb_cache_entry_insert(ce, bh->b_bdev, bh->b_blocknr, &hash); 1421 error = mb_cache_entry_insert(ce, bh->b_bdev, bh->b_blocknr, hash);
1422 if (error) { 1422 if (error) {
1423 mb_cache_entry_free(ce); 1423 mb_cache_entry_free(ce);
1424 if (error == -EBUSY) { 1424 if (error == -EBUSY) {
@@ -1490,8 +1490,8 @@ ext4_xattr_cache_find(struct inode *inode, struct ext4_xattr_header *header,
1490 return NULL; /* never share */ 1490 return NULL; /* never share */
1491 ea_idebug(inode, "looking for cached blocks [%x]", (int)hash); 1491 ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
1492again: 1492again:
1493 ce = mb_cache_entry_find_first(ext4_xattr_cache, 0, 1493 ce = mb_cache_entry_find_first(ext4_xattr_cache, inode->i_sb->s_bdev,
1494 inode->i_sb->s_bdev, hash); 1494 hash);
1495 while (ce) { 1495 while (ce) {
1496 struct buffer_head *bh; 1496 struct buffer_head *bh;
1497 1497
@@ -1515,7 +1515,7 @@ again:
1515 return bh; 1515 return bh;
1516 } 1516 }
1517 brelse(bh); 1517 brelse(bh);
1518 ce = mb_cache_entry_find_next(ce, 0, inode->i_sb->s_bdev, hash); 1518 ce = mb_cache_entry_find_next(ce, inode->i_sb->s_bdev, hash);
1519 } 1519 }
1520 return NULL; 1520 return NULL;
1521} 1521}
@@ -1591,9 +1591,7 @@ static void ext4_xattr_rehash(struct ext4_xattr_header *header,
1591int __init 1591int __init
1592init_ext4_xattr(void) 1592init_ext4_xattr(void)
1593{ 1593{
1594 ext4_xattr_cache = mb_cache_create("ext4_xattr", NULL, 1594 ext4_xattr_cache = mb_cache_create("ext4_xattr", 6);
1595 sizeof(struct mb_cache_entry) +
1596 sizeof(((struct mb_cache_entry *) 0)->e_indexes[0]), 1, 6);
1597 if (!ext4_xattr_cache) 1595 if (!ext4_xattr_cache)
1598 return -ENOMEM; 1596 return -ENOMEM;
1599 return 0; 1597 return 0;
diff --git a/fs/mbcache.c b/fs/mbcache.c
index e28f21b95344..8a2cbd823079 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -79,15 +79,11 @@ EXPORT_SYMBOL(mb_cache_entry_find_next);
79struct mb_cache { 79struct mb_cache {
80 struct list_head c_cache_list; 80 struct list_head c_cache_list;
81 const char *c_name; 81 const char *c_name;
82 struct mb_cache_op c_op;
83 atomic_t c_entry_count; 82 atomic_t c_entry_count;
84 int c_bucket_bits; 83 int c_bucket_bits;
85#ifndef MB_CACHE_INDEXES_COUNT 84 struct kmem_cache *c_entry_cache;
86 int c_indexes_count;
87#endif
88 struct kmem_cache *c_entry_cache;
89 struct list_head *c_block_hash; 85 struct list_head *c_block_hash;
90 struct list_head *c_indexes_hash[0]; 86 struct list_head *c_index_hash;
91}; 87};
92 88
93 89
@@ -101,16 +97,6 @@ static LIST_HEAD(mb_cache_list);
101static LIST_HEAD(mb_cache_lru_list); 97static LIST_HEAD(mb_cache_lru_list);
102static DEFINE_SPINLOCK(mb_cache_spinlock); 98static DEFINE_SPINLOCK(mb_cache_spinlock);
103 99
104static inline int
105mb_cache_indexes(struct mb_cache *cache)
106{
107#ifdef MB_CACHE_INDEXES_COUNT
108 return MB_CACHE_INDEXES_COUNT;
109#else
110 return cache->c_indexes_count;
111#endif
112}
113
114/* 100/*
115 * What the mbcache registers as to get shrunk dynamically. 101 * What the mbcache registers as to get shrunk dynamically.
116 */ 102 */
@@ -132,12 +118,9 @@ __mb_cache_entry_is_hashed(struct mb_cache_entry *ce)
132static void 118static void
133__mb_cache_entry_unhash(struct mb_cache_entry *ce) 119__mb_cache_entry_unhash(struct mb_cache_entry *ce)
134{ 120{
135 int n;
136
137 if (__mb_cache_entry_is_hashed(ce)) { 121 if (__mb_cache_entry_is_hashed(ce)) {
138 list_del_init(&ce->e_block_list); 122 list_del_init(&ce->e_block_list);
139 for (n=0; n<mb_cache_indexes(ce->e_cache); n++) 123 list_del(&ce->e_index.o_list);
140 list_del(&ce->e_indexes[n].o_list);
141 } 124 }
142} 125}
143 126
@@ -148,16 +131,8 @@ __mb_cache_entry_forget(struct mb_cache_entry *ce, gfp_t gfp_mask)
148 struct mb_cache *cache = ce->e_cache; 131 struct mb_cache *cache = ce->e_cache;
149 132
150 mb_assert(!(ce->e_used || ce->e_queued)); 133 mb_assert(!(ce->e_used || ce->e_queued));
151 if (cache->c_op.free && cache->c_op.free(ce, gfp_mask)) { 134 kmem_cache_free(cache->c_entry_cache, ce);
152 /* free failed -- put back on the lru list 135 atomic_dec(&cache->c_entry_count);
153 for freeing later. */
154 spin_lock(&mb_cache_spinlock);
155 list_add(&ce->e_lru_list, &mb_cache_lru_list);
156 spin_unlock(&mb_cache_spinlock);
157 } else {
158 kmem_cache_free(cache->c_entry_cache, ce);
159 atomic_dec(&cache->c_entry_count);
160 }
161} 136}
162 137
163 138
@@ -243,72 +218,49 @@ out:
243 * memory was available. 218 * memory was available.
244 * 219 *
245 * @name: name of the cache (informal) 220 * @name: name of the cache (informal)
246 * @cache_op: contains the callback called when freeing a cache entry
247 * @entry_size: The size of a cache entry, including
248 * struct mb_cache_entry
249 * @indexes_count: number of additional indexes in the cache. Must equal
250 * MB_CACHE_INDEXES_COUNT if the number of indexes is
251 * hardwired.
252 * @bucket_bits: log2(number of hash buckets) 221 * @bucket_bits: log2(number of hash buckets)
253 */ 222 */
254struct mb_cache * 223struct mb_cache *
255mb_cache_create(const char *name, struct mb_cache_op *cache_op, 224mb_cache_create(const char *name, int bucket_bits)
256 size_t entry_size, int indexes_count, int bucket_bits)
257{ 225{
258 int m=0, n, bucket_count = 1 << bucket_bits; 226 int n, bucket_count = 1 << bucket_bits;
259 struct mb_cache *cache = NULL; 227 struct mb_cache *cache = NULL;
260 228
261 if(entry_size < sizeof(struct mb_cache_entry) + 229 cache = kmalloc(sizeof(struct mb_cache), GFP_KERNEL);
262 indexes_count * sizeof(((struct mb_cache_entry *) 0)->e_indexes[0]))
263 return NULL;
264
265 cache = kmalloc(sizeof(struct mb_cache) +
266 indexes_count * sizeof(struct list_head), GFP_KERNEL);
267 if (!cache) 230 if (!cache)
268 goto fail; 231 return NULL;
269 cache->c_name = name; 232 cache->c_name = name;
270 cache->c_op.free = NULL;
271 if (cache_op)
272 cache->c_op.free = cache_op->free;
273 atomic_set(&cache->c_entry_count, 0); 233 atomic_set(&cache->c_entry_count, 0);
274 cache->c_bucket_bits = bucket_bits; 234 cache->c_bucket_bits = bucket_bits;
275#ifdef MB_CACHE_INDEXES_COUNT
276 mb_assert(indexes_count == MB_CACHE_INDEXES_COUNT);
277#else
278 cache->c_indexes_count = indexes_count;
279#endif
280 cache->c_block_hash = kmalloc(bucket_count * sizeof(struct list_head), 235 cache->c_block_hash = kmalloc(bucket_count * sizeof(struct list_head),
281 GFP_KERNEL); 236 GFP_KERNEL);
282 if (!cache->c_block_hash) 237 if (!cache->c_block_hash)
283 goto fail; 238 goto fail;
284 for (n=0; n<bucket_count; n++) 239 for (n=0; n<bucket_count; n++)
285 INIT_LIST_HEAD(&cache->c_block_hash[n]); 240 INIT_LIST_HEAD(&cache->c_block_hash[n]);
286 for (m=0; m<indexes_count; m++) { 241 cache->c_index_hash = kmalloc(bucket_count * sizeof(struct list_head),
287 cache->c_indexes_hash[m] = kmalloc(bucket_count * 242 GFP_KERNEL);
288 sizeof(struct list_head), 243 if (!cache->c_index_hash)
289 GFP_KERNEL); 244 goto fail;
290 if (!cache->c_indexes_hash[m]) 245 for (n=0; n<bucket_count; n++)
291 goto fail; 246 INIT_LIST_HEAD(&cache->c_index_hash[n]);
292 for (n=0; n<bucket_count; n++) 247 cache->c_entry_cache = kmem_cache_create(name,
293 INIT_LIST_HEAD(&cache->c_indexes_hash[m][n]); 248 sizeof(struct mb_cache_entry), 0,
294 }
295 cache->c_entry_cache = kmem_cache_create(name, entry_size, 0,
296 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL); 249 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
297 if (!cache->c_entry_cache) 250 if (!cache->c_entry_cache)
298 goto fail; 251 goto fail2;
299 252
300 spin_lock(&mb_cache_spinlock); 253 spin_lock(&mb_cache_spinlock);
301 list_add(&cache->c_cache_list, &mb_cache_list); 254 list_add(&cache->c_cache_list, &mb_cache_list);
302 spin_unlock(&mb_cache_spinlock); 255 spin_unlock(&mb_cache_spinlock);
303 return cache; 256 return cache;
304 257
258fail2:
259 kfree(cache->c_index_hash);
260
305fail: 261fail:
306 if (cache) { 262 kfree(cache->c_block_hash);
307 while (--m >= 0) 263 kfree(cache);
308 kfree(cache->c_indexes_hash[m]);
309 kfree(cache->c_block_hash);
310 kfree(cache);
311 }
312 return NULL; 264 return NULL;
313} 265}
314 266
@@ -357,7 +309,6 @@ mb_cache_destroy(struct mb_cache *cache)
357{ 309{
358 LIST_HEAD(free_list); 310 LIST_HEAD(free_list);
359 struct list_head *l, *ltmp; 311 struct list_head *l, *ltmp;
360 int n;
361 312
362 spin_lock(&mb_cache_spinlock); 313 spin_lock(&mb_cache_spinlock);
363 list_for_each_safe(l, ltmp, &mb_cache_lru_list) { 314 list_for_each_safe(l, ltmp, &mb_cache_lru_list) {
@@ -384,8 +335,7 @@ mb_cache_destroy(struct mb_cache *cache)
384 335
385 kmem_cache_destroy(cache->c_entry_cache); 336 kmem_cache_destroy(cache->c_entry_cache);
386 337
387 for (n=0; n < mb_cache_indexes(cache); n++) 338 kfree(cache->c_index_hash);
388 kfree(cache->c_indexes_hash[n]);
389 kfree(cache->c_block_hash); 339 kfree(cache->c_block_hash);
390 kfree(cache); 340 kfree(cache);
391} 341}
@@ -429,17 +379,16 @@ mb_cache_entry_alloc(struct mb_cache *cache, gfp_t gfp_flags)
429 * 379 *
430 * @bdev: device the cache entry belongs to 380 * @bdev: device the cache entry belongs to
431 * @block: block number 381 * @block: block number
432 * @keys: array of additional keys. There must be indexes_count entries 382 * @key: lookup key
433 * in the array (as specified when creating the cache).
434 */ 383 */
435int 384int
436mb_cache_entry_insert(struct mb_cache_entry *ce, struct block_device *bdev, 385mb_cache_entry_insert(struct mb_cache_entry *ce, struct block_device *bdev,
437 sector_t block, unsigned int keys[]) 386 sector_t block, unsigned int key)
438{ 387{
439 struct mb_cache *cache = ce->e_cache; 388 struct mb_cache *cache = ce->e_cache;
440 unsigned int bucket; 389 unsigned int bucket;
441 struct list_head *l; 390 struct list_head *l;
442 int error = -EBUSY, n; 391 int error = -EBUSY;
443 392
444 bucket = hash_long((unsigned long)bdev + (block & 0xffffffff), 393 bucket = hash_long((unsigned long)bdev + (block & 0xffffffff),
445 cache->c_bucket_bits); 394 cache->c_bucket_bits);
@@ -454,12 +403,9 @@ mb_cache_entry_insert(struct mb_cache_entry *ce, struct block_device *bdev,
454 ce->e_bdev = bdev; 403 ce->e_bdev = bdev;
455 ce->e_block = block; 404 ce->e_block = block;
456 list_add(&ce->e_block_list, &cache->c_block_hash[bucket]); 405 list_add(&ce->e_block_list, &cache->c_block_hash[bucket]);
457 for (n=0; n<mb_cache_indexes(cache); n++) { 406 ce->e_index.o_key = key;
458 ce->e_indexes[n].o_key = keys[n]; 407 bucket = hash_long(key, cache->c_bucket_bits);
459 bucket = hash_long(keys[n], cache->c_bucket_bits); 408 list_add(&ce->e_index.o_list, &cache->c_index_hash[bucket]);
460 list_add(&ce->e_indexes[n].o_list,
461 &cache->c_indexes_hash[n][bucket]);
462 }
463 error = 0; 409 error = 0;
464out: 410out:
465 spin_unlock(&mb_cache_spinlock); 411 spin_unlock(&mb_cache_spinlock);
@@ -555,13 +501,12 @@ cleanup:
555 501
556static struct mb_cache_entry * 502static struct mb_cache_entry *
557__mb_cache_entry_find(struct list_head *l, struct list_head *head, 503__mb_cache_entry_find(struct list_head *l, struct list_head *head,
558 int index, struct block_device *bdev, unsigned int key) 504 struct block_device *bdev, unsigned int key)
559{ 505{
560 while (l != head) { 506 while (l != head) {
561 struct mb_cache_entry *ce = 507 struct mb_cache_entry *ce =
562 list_entry(l, struct mb_cache_entry, 508 list_entry(l, struct mb_cache_entry, e_index.o_list);
563 e_indexes[index].o_list); 509 if (ce->e_bdev == bdev && ce->e_index.o_key == key) {
564 if (ce->e_bdev == bdev && ce->e_indexes[index].o_key == key) {
565 DEFINE_WAIT(wait); 510 DEFINE_WAIT(wait);
566 511
567 if (!list_empty(&ce->e_lru_list)) 512 if (!list_empty(&ce->e_lru_list))
@@ -603,23 +548,20 @@ __mb_cache_entry_find(struct list_head *l, struct list_head *head,
603 * returned cache entry is locked for shared access ("multiple readers"). 548 * returned cache entry is locked for shared access ("multiple readers").
604 * 549 *
605 * @cache: the cache to search 550 * @cache: the cache to search
606 * @index: the number of the additonal index to search (0<=index<indexes_count)
607 * @bdev: the device the cache entry should belong to 551 * @bdev: the device the cache entry should belong to
608 * @key: the key in the index 552 * @key: the key in the index
609 */ 553 */
610struct mb_cache_entry * 554struct mb_cache_entry *
611mb_cache_entry_find_first(struct mb_cache *cache, int index, 555mb_cache_entry_find_first(struct mb_cache *cache, struct block_device *bdev,
612 struct block_device *bdev, unsigned int key) 556 unsigned int key)
613{ 557{
614 unsigned int bucket = hash_long(key, cache->c_bucket_bits); 558 unsigned int bucket = hash_long(key, cache->c_bucket_bits);
615 struct list_head *l; 559 struct list_head *l;
616 struct mb_cache_entry *ce; 560 struct mb_cache_entry *ce;
617 561
618 mb_assert(index < mb_cache_indexes(cache));
619 spin_lock(&mb_cache_spinlock); 562 spin_lock(&mb_cache_spinlock);
620 l = cache->c_indexes_hash[index][bucket].next; 563 l = cache->c_index_hash[bucket].next;
621 ce = __mb_cache_entry_find(l, &cache->c_indexes_hash[index][bucket], 564 ce = __mb_cache_entry_find(l, &cache->c_index_hash[bucket], bdev, key);
622 index, bdev, key);
623 spin_unlock(&mb_cache_spinlock); 565 spin_unlock(&mb_cache_spinlock);
624 return ce; 566 return ce;
625} 567}
@@ -640,12 +582,11 @@ mb_cache_entry_find_first(struct mb_cache *cache, int index,
640 * } 582 * }
641 * 583 *
642 * @prev: The previous match 584 * @prev: The previous match
643 * @index: the number of the additonal index to search (0<=index<indexes_count)
644 * @bdev: the device the cache entry should belong to 585 * @bdev: the device the cache entry should belong to
645 * @key: the key in the index 586 * @key: the key in the index
646 */ 587 */
647struct mb_cache_entry * 588struct mb_cache_entry *
648mb_cache_entry_find_next(struct mb_cache_entry *prev, int index, 589mb_cache_entry_find_next(struct mb_cache_entry *prev,
649 struct block_device *bdev, unsigned int key) 590 struct block_device *bdev, unsigned int key)
650{ 591{
651 struct mb_cache *cache = prev->e_cache; 592 struct mb_cache *cache = prev->e_cache;
@@ -653,11 +594,9 @@ mb_cache_entry_find_next(struct mb_cache_entry *prev, int index,
653 struct list_head *l; 594 struct list_head *l;
654 struct mb_cache_entry *ce; 595 struct mb_cache_entry *ce;
655 596
656 mb_assert(index < mb_cache_indexes(cache));
657 spin_lock(&mb_cache_spinlock); 597 spin_lock(&mb_cache_spinlock);
658 l = prev->e_indexes[index].o_list.next; 598 l = prev->e_index.o_list.next;
659 ce = __mb_cache_entry_find(l, &cache->c_indexes_hash[index][bucket], 599 ce = __mb_cache_entry_find(l, &cache->c_index_hash[bucket], bdev, key);
660 index, bdev, key);
661 __mb_cache_entry_release_unlock(prev); 600 __mb_cache_entry_release_unlock(prev);
662 return ce; 601 return ce;
663} 602}
diff --git a/include/linux/mbcache.h b/include/linux/mbcache.h
index a09b84e4fdb4..54cbbac1e71d 100644
--- a/include/linux/mbcache.h
+++ b/include/linux/mbcache.h
@@ -4,9 +4,6 @@
4 (C) 2001 by Andreas Gruenbacher, <a.gruenbacher@computer.org> 4 (C) 2001 by Andreas Gruenbacher, <a.gruenbacher@computer.org>
5*/ 5*/
6 6
7/* Hardwire the number of additional indexes */
8#define MB_CACHE_INDEXES_COUNT 1
9
10struct mb_cache_entry { 7struct mb_cache_entry {
11 struct list_head e_lru_list; 8 struct list_head e_lru_list;
12 struct mb_cache *e_cache; 9 struct mb_cache *e_cache;
@@ -18,17 +15,12 @@ struct mb_cache_entry {
18 struct { 15 struct {
19 struct list_head o_list; 16 struct list_head o_list;
20 unsigned int o_key; 17 unsigned int o_key;
21 } e_indexes[0]; 18 } e_index;
22};
23
24struct mb_cache_op {
25 int (*free)(struct mb_cache_entry *, gfp_t);
26}; 19};
27 20
28/* Functions on caches */ 21/* Functions on caches */
29 22
30struct mb_cache * mb_cache_create(const char *, struct mb_cache_op *, size_t, 23struct mb_cache *mb_cache_create(const char *, int);
31 int, int);
32void mb_cache_shrink(struct block_device *); 24void mb_cache_shrink(struct block_device *);
33void mb_cache_destroy(struct mb_cache *); 25void mb_cache_destroy(struct mb_cache *);
34 26
@@ -36,17 +28,15 @@ void mb_cache_destroy(struct mb_cache *);
36 28
37struct mb_cache_entry *mb_cache_entry_alloc(struct mb_cache *, gfp_t); 29struct mb_cache_entry *mb_cache_entry_alloc(struct mb_cache *, gfp_t);
38int mb_cache_entry_insert(struct mb_cache_entry *, struct block_device *, 30int mb_cache_entry_insert(struct mb_cache_entry *, struct block_device *,
39 sector_t, unsigned int[]); 31 sector_t, unsigned int);
40void mb_cache_entry_release(struct mb_cache_entry *); 32void mb_cache_entry_release(struct mb_cache_entry *);
41void mb_cache_entry_free(struct mb_cache_entry *); 33void mb_cache_entry_free(struct mb_cache_entry *);
42struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *, 34struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *,
43 struct block_device *, 35 struct block_device *,
44 sector_t); 36 sector_t);
45#if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0) 37struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache,
46struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache, int,
47 struct block_device *, 38 struct block_device *,
48 unsigned int); 39 unsigned int);
49struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache_entry *, int, 40struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache_entry *,
50 struct block_device *, 41 struct block_device *,
51 unsigned int); 42 unsigned int);
52#endif