diff options
Diffstat (limited to 'fs/mbcache.c')
| -rw-r--r-- | fs/mbcache.c | 173 |
1 files changed, 53 insertions, 120 deletions
diff --git a/fs/mbcache.c b/fs/mbcache.c index ec88ff3d04a9..cf4e6cdfd15b 100644 --- a/fs/mbcache.c +++ b/fs/mbcache.c | |||
| @@ -79,15 +79,11 @@ EXPORT_SYMBOL(mb_cache_entry_find_next); | |||
| 79 | struct mb_cache { | 79 | struct mb_cache { |
| 80 | struct list_head c_cache_list; | 80 | struct list_head c_cache_list; |
| 81 | const char *c_name; | 81 | const char *c_name; |
| 82 | struct mb_cache_op c_op; | ||
| 83 | atomic_t c_entry_count; | 82 | atomic_t c_entry_count; |
| 84 | int c_bucket_bits; | 83 | int c_bucket_bits; |
| 85 | #ifndef MB_CACHE_INDEXES_COUNT | 84 | struct kmem_cache *c_entry_cache; |
| 86 | int c_indexes_count; | ||
| 87 | #endif | ||
| 88 | struct kmem_cache *c_entry_cache; | ||
| 89 | struct list_head *c_block_hash; | 85 | struct list_head *c_block_hash; |
| 90 | struct list_head *c_indexes_hash[0]; | 86 | struct list_head *c_index_hash; |
| 91 | }; | 87 | }; |
| 92 | 88 | ||
| 93 | 89 | ||
| @@ -101,21 +97,11 @@ static LIST_HEAD(mb_cache_list); | |||
| 101 | static LIST_HEAD(mb_cache_lru_list); | 97 | static LIST_HEAD(mb_cache_lru_list); |
| 102 | static DEFINE_SPINLOCK(mb_cache_spinlock); | 98 | static DEFINE_SPINLOCK(mb_cache_spinlock); |
| 103 | 99 | ||
| 104 | static inline int | ||
| 105 | mb_cache_indexes(struct mb_cache *cache) | ||
| 106 | { | ||
| 107 | #ifdef MB_CACHE_INDEXES_COUNT | ||
| 108 | return MB_CACHE_INDEXES_COUNT; | ||
| 109 | #else | ||
| 110 | return cache->c_indexes_count; | ||
| 111 | #endif | ||
| 112 | } | ||
| 113 | |||
| 114 | /* | 100 | /* |
| 115 | * What the mbcache registers as to get shrunk dynamically. | 101 | * What the mbcache registers as to get shrunk dynamically. |
| 116 | */ | 102 | */ |
| 117 | 103 | ||
| 118 | static int mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask); | 104 | static int mb_cache_shrink_fn(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask); |
| 119 | 105 | ||
| 120 | static struct shrinker mb_cache_shrinker = { | 106 | static struct shrinker mb_cache_shrinker = { |
| 121 | .shrink = mb_cache_shrink_fn, | 107 | .shrink = mb_cache_shrink_fn, |
| @@ -132,12 +118,9 @@ __mb_cache_entry_is_hashed(struct mb_cache_entry *ce) | |||
| 132 | static void | 118 | static void |
| 133 | __mb_cache_entry_unhash(struct mb_cache_entry *ce) | 119 | __mb_cache_entry_unhash(struct mb_cache_entry *ce) |
| 134 | { | 120 | { |
| 135 | int n; | ||
| 136 | |||
| 137 | if (__mb_cache_entry_is_hashed(ce)) { | 121 | if (__mb_cache_entry_is_hashed(ce)) { |
| 138 | list_del_init(&ce->e_block_list); | 122 | list_del_init(&ce->e_block_list); |
| 139 | for (n=0; n<mb_cache_indexes(ce->e_cache); n++) | 123 | list_del(&ce->e_index.o_list); |
| 140 | list_del(&ce->e_indexes[n].o_list); | ||
| 141 | } | 124 | } |
| 142 | } | 125 | } |
| 143 | 126 | ||
| @@ -148,16 +131,8 @@ __mb_cache_entry_forget(struct mb_cache_entry *ce, gfp_t gfp_mask) | |||
| 148 | struct mb_cache *cache = ce->e_cache; | 131 | struct mb_cache *cache = ce->e_cache; |
| 149 | 132 | ||
| 150 | mb_assert(!(ce->e_used || ce->e_queued)); | 133 | mb_assert(!(ce->e_used || ce->e_queued)); |
| 151 | if (cache->c_op.free && cache->c_op.free(ce, gfp_mask)) { | 134 | kmem_cache_free(cache->c_entry_cache, ce); |
| 152 | /* free failed -- put back on the lru list | 135 | atomic_dec(&cache->c_entry_count); |
| 153 | for freeing later. */ | ||
| 154 | spin_lock(&mb_cache_spinlock); | ||
| 155 | list_add(&ce->e_lru_list, &mb_cache_lru_list); | ||
| 156 | spin_unlock(&mb_cache_spinlock); | ||
| 157 | } else { | ||
| 158 | kmem_cache_free(cache->c_entry_cache, ce); | ||
| 159 | atomic_dec(&cache->c_entry_count); | ||
| 160 | } | ||
| 161 | } | 136 | } |
| 162 | 137 | ||
| 163 | 138 | ||
| @@ -191,31 +166,22 @@ forget: | |||
| 191 | * This function is called by the kernel memory management when memory | 166 | * This function is called by the kernel memory management when memory |
| 192 | * gets low. | 167 | * gets low. |
| 193 | * | 168 | * |
| 169 | * @shrink: (ignored) | ||
| 194 | * @nr_to_scan: Number of objects to scan | 170 | * @nr_to_scan: Number of objects to scan |
| 195 | * @gfp_mask: (ignored) | 171 | * @gfp_mask: (ignored) |
| 196 | * | 172 | * |
| 197 | * Returns the number of objects which are present in the cache. | 173 | * Returns the number of objects which are present in the cache. |
| 198 | */ | 174 | */ |
| 199 | static int | 175 | static int |
| 200 | mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask) | 176 | mb_cache_shrink_fn(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) |
| 201 | { | 177 | { |
| 202 | LIST_HEAD(free_list); | 178 | LIST_HEAD(free_list); |
| 203 | struct list_head *l, *ltmp; | 179 | struct mb_cache *cache; |
| 180 | struct mb_cache_entry *entry, *tmp; | ||
| 204 | int count = 0; | 181 | int count = 0; |
| 205 | 182 | ||
| 206 | spin_lock(&mb_cache_spinlock); | ||
| 207 | list_for_each(l, &mb_cache_list) { | ||
| 208 | struct mb_cache *cache = | ||
| 209 | list_entry(l, struct mb_cache, c_cache_list); | ||
| 210 | mb_debug("cache %s (%d)", cache->c_name, | ||
| 211 | atomic_read(&cache->c_entry_count)); | ||
| 212 | count += atomic_read(&cache->c_entry_count); | ||
| 213 | } | ||
| 214 | mb_debug("trying to free %d entries", nr_to_scan); | 183 | mb_debug("trying to free %d entries", nr_to_scan); |
| 215 | if (nr_to_scan == 0) { | 184 | spin_lock(&mb_cache_spinlock); |
| 216 | spin_unlock(&mb_cache_spinlock); | ||
| 217 | goto out; | ||
| 218 | } | ||
| 219 | while (nr_to_scan-- && !list_empty(&mb_cache_lru_list)) { | 185 | while (nr_to_scan-- && !list_empty(&mb_cache_lru_list)) { |
| 220 | struct mb_cache_entry *ce = | 186 | struct mb_cache_entry *ce = |
| 221 | list_entry(mb_cache_lru_list.next, | 187 | list_entry(mb_cache_lru_list.next, |
| @@ -223,12 +189,15 @@ mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask) | |||
| 223 | list_move_tail(&ce->e_lru_list, &free_list); | 189 | list_move_tail(&ce->e_lru_list, &free_list); |
| 224 | __mb_cache_entry_unhash(ce); | 190 | __mb_cache_entry_unhash(ce); |
| 225 | } | 191 | } |
| 192 | list_for_each_entry(cache, &mb_cache_list, c_cache_list) { | ||
| 193 | mb_debug("cache %s (%d)", cache->c_name, | ||
| 194 | atomic_read(&cache->c_entry_count)); | ||
| 195 | count += atomic_read(&cache->c_entry_count); | ||
| 196 | } | ||
| 226 | spin_unlock(&mb_cache_spinlock); | 197 | spin_unlock(&mb_cache_spinlock); |
| 227 | list_for_each_safe(l, ltmp, &free_list) { | 198 | list_for_each_entry_safe(entry, tmp, &free_list, e_lru_list) { |
| 228 | __mb_cache_entry_forget(list_entry(l, struct mb_cache_entry, | 199 | __mb_cache_entry_forget(entry, gfp_mask); |
| 229 | e_lru_list), gfp_mask); | ||
| 230 | } | 200 | } |
| 231 | out: | ||
| 232 | return (count / 100) * sysctl_vfs_cache_pressure; | 201 | return (count / 100) * sysctl_vfs_cache_pressure; |
| 233 | } | 202 | } |
| 234 | 203 | ||
| @@ -242,72 +211,49 @@ out: | |||
| 242 | * memory was available. | 211 | * memory was available. |
| 243 | * | 212 | * |
| 244 | * @name: name of the cache (informal) | 213 | * @name: name of the cache (informal) |
| 245 | * @cache_op: contains the callback called when freeing a cache entry | ||
| 246 | * @entry_size: The size of a cache entry, including | ||
| 247 | * struct mb_cache_entry | ||
| 248 | * @indexes_count: number of additional indexes in the cache. Must equal | ||
| 249 | * MB_CACHE_INDEXES_COUNT if the number of indexes is | ||
| 250 | * hardwired. | ||
| 251 | * @bucket_bits: log2(number of hash buckets) | 214 | * @bucket_bits: log2(number of hash buckets) |
| 252 | */ | 215 | */ |
| 253 | struct mb_cache * | 216 | struct mb_cache * |
| 254 | mb_cache_create(const char *name, struct mb_cache_op *cache_op, | 217 | mb_cache_create(const char *name, int bucket_bits) |
| 255 | size_t entry_size, int indexes_count, int bucket_bits) | ||
| 256 | { | 218 | { |
| 257 | int m=0, n, bucket_count = 1 << bucket_bits; | 219 | int n, bucket_count = 1 << bucket_bits; |
| 258 | struct mb_cache *cache = NULL; | 220 | struct mb_cache *cache = NULL; |
| 259 | 221 | ||
| 260 | if(entry_size < sizeof(struct mb_cache_entry) + | 222 | cache = kmalloc(sizeof(struct mb_cache), GFP_KERNEL); |
| 261 | indexes_count * sizeof(((struct mb_cache_entry *) 0)->e_indexes[0])) | ||
| 262 | return NULL; | ||
| 263 | |||
| 264 | cache = kmalloc(sizeof(struct mb_cache) + | ||
| 265 | indexes_count * sizeof(struct list_head), GFP_KERNEL); | ||
| 266 | if (!cache) | 223 | if (!cache) |
| 267 | goto fail; | 224 | return NULL; |
| 268 | cache->c_name = name; | 225 | cache->c_name = name; |
| 269 | cache->c_op.free = NULL; | ||
| 270 | if (cache_op) | ||
| 271 | cache->c_op.free = cache_op->free; | ||
| 272 | atomic_set(&cache->c_entry_count, 0); | 226 | atomic_set(&cache->c_entry_count, 0); |
| 273 | cache->c_bucket_bits = bucket_bits; | 227 | cache->c_bucket_bits = bucket_bits; |
| 274 | #ifdef MB_CACHE_INDEXES_COUNT | ||
| 275 | mb_assert(indexes_count == MB_CACHE_INDEXES_COUNT); | ||
| 276 | #else | ||
| 277 | cache->c_indexes_count = indexes_count; | ||
| 278 | #endif | ||
| 279 | cache->c_block_hash = kmalloc(bucket_count * sizeof(struct list_head), | 228 | cache->c_block_hash = kmalloc(bucket_count * sizeof(struct list_head), |
| 280 | GFP_KERNEL); | 229 | GFP_KERNEL); |
| 281 | if (!cache->c_block_hash) | 230 | if (!cache->c_block_hash) |
| 282 | goto fail; | 231 | goto fail; |
| 283 | for (n=0; n<bucket_count; n++) | 232 | for (n=0; n<bucket_count; n++) |
| 284 | INIT_LIST_HEAD(&cache->c_block_hash[n]); | 233 | INIT_LIST_HEAD(&cache->c_block_hash[n]); |
| 285 | for (m=0; m<indexes_count; m++) { | 234 | cache->c_index_hash = kmalloc(bucket_count * sizeof(struct list_head), |
| 286 | cache->c_indexes_hash[m] = kmalloc(bucket_count * | 235 | GFP_KERNEL); |
| 287 | sizeof(struct list_head), | 236 | if (!cache->c_index_hash) |
| 288 | GFP_KERNEL); | 237 | goto fail; |
| 289 | if (!cache->c_indexes_hash[m]) | 238 | for (n=0; n<bucket_count; n++) |
| 290 | goto fail; | 239 | INIT_LIST_HEAD(&cache->c_index_hash[n]); |
| 291 | for (n=0; n<bucket_count; n++) | 240 | cache->c_entry_cache = kmem_cache_create(name, |
| 292 | INIT_LIST_HEAD(&cache->c_indexes_hash[m][n]); | 241 | sizeof(struct mb_cache_entry), 0, |
| 293 | } | ||
| 294 | cache->c_entry_cache = kmem_cache_create(name, entry_size, 0, | ||
| 295 | SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL); | 242 | SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL); |
| 296 | if (!cache->c_entry_cache) | 243 | if (!cache->c_entry_cache) |
| 297 | goto fail; | 244 | goto fail2; |
| 298 | 245 | ||
| 299 | spin_lock(&mb_cache_spinlock); | 246 | spin_lock(&mb_cache_spinlock); |
| 300 | list_add(&cache->c_cache_list, &mb_cache_list); | 247 | list_add(&cache->c_cache_list, &mb_cache_list); |
| 301 | spin_unlock(&mb_cache_spinlock); | 248 | spin_unlock(&mb_cache_spinlock); |
| 302 | return cache; | 249 | return cache; |
| 303 | 250 | ||
| 251 | fail2: | ||
| 252 | kfree(cache->c_index_hash); | ||
| 253 | |||
| 304 | fail: | 254 | fail: |
| 305 | if (cache) { | 255 | kfree(cache->c_block_hash); |
| 306 | while (--m >= 0) | 256 | kfree(cache); |
| 307 | kfree(cache->c_indexes_hash[m]); | ||
| 308 | kfree(cache->c_block_hash); | ||
| 309 | kfree(cache); | ||
| 310 | } | ||
| 311 | return NULL; | 257 | return NULL; |
| 312 | } | 258 | } |
| 313 | 259 | ||
| @@ -356,7 +302,6 @@ mb_cache_destroy(struct mb_cache *cache) | |||
| 356 | { | 302 | { |
| 357 | LIST_HEAD(free_list); | 303 | LIST_HEAD(free_list); |
| 358 | struct list_head *l, *ltmp; | 304 | struct list_head *l, *ltmp; |
| 359 | int n; | ||
| 360 | 305 | ||
| 361 | spin_lock(&mb_cache_spinlock); | 306 | spin_lock(&mb_cache_spinlock); |
| 362 | list_for_each_safe(l, ltmp, &mb_cache_lru_list) { | 307 | list_for_each_safe(l, ltmp, &mb_cache_lru_list) { |
| @@ -383,8 +328,7 @@ mb_cache_destroy(struct mb_cache *cache) | |||
| 383 | 328 | ||
| 384 | kmem_cache_destroy(cache->c_entry_cache); | 329 | kmem_cache_destroy(cache->c_entry_cache); |
| 385 | 330 | ||
| 386 | for (n=0; n < mb_cache_indexes(cache); n++) | 331 | kfree(cache->c_index_hash); |
| 387 | kfree(cache->c_indexes_hash[n]); | ||
| 388 | kfree(cache->c_block_hash); | 332 | kfree(cache->c_block_hash); |
| 389 | kfree(cache); | 333 | kfree(cache); |
| 390 | } | 334 | } |
| @@ -428,17 +372,16 @@ mb_cache_entry_alloc(struct mb_cache *cache, gfp_t gfp_flags) | |||
| 428 | * | 372 | * |
| 429 | * @bdev: device the cache entry belongs to | 373 | * @bdev: device the cache entry belongs to |
| 430 | * @block: block number | 374 | * @block: block number |
| 431 | * @keys: array of additional keys. There must be indexes_count entries | 375 | * @key: lookup key |
| 432 | * in the array (as specified when creating the cache). | ||
| 433 | */ | 376 | */ |
| 434 | int | 377 | int |
| 435 | mb_cache_entry_insert(struct mb_cache_entry *ce, struct block_device *bdev, | 378 | mb_cache_entry_insert(struct mb_cache_entry *ce, struct block_device *bdev, |
| 436 | sector_t block, unsigned int keys[]) | 379 | sector_t block, unsigned int key) |
| 437 | { | 380 | { |
| 438 | struct mb_cache *cache = ce->e_cache; | 381 | struct mb_cache *cache = ce->e_cache; |
| 439 | unsigned int bucket; | 382 | unsigned int bucket; |
| 440 | struct list_head *l; | 383 | struct list_head *l; |
| 441 | int error = -EBUSY, n; | 384 | int error = -EBUSY; |
| 442 | 385 | ||
| 443 | bucket = hash_long((unsigned long)bdev + (block & 0xffffffff), | 386 | bucket = hash_long((unsigned long)bdev + (block & 0xffffffff), |
| 444 | cache->c_bucket_bits); | 387 | cache->c_bucket_bits); |
| @@ -453,12 +396,9 @@ mb_cache_entry_insert(struct mb_cache_entry *ce, struct block_device *bdev, | |||
| 453 | ce->e_bdev = bdev; | 396 | ce->e_bdev = bdev; |
| 454 | ce->e_block = block; | 397 | ce->e_block = block; |
| 455 | list_add(&ce->e_block_list, &cache->c_block_hash[bucket]); | 398 | list_add(&ce->e_block_list, &cache->c_block_hash[bucket]); |
| 456 | for (n=0; n<mb_cache_indexes(cache); n++) { | 399 | ce->e_index.o_key = key; |
| 457 | ce->e_indexes[n].o_key = keys[n]; | 400 | bucket = hash_long(key, cache->c_bucket_bits); |
| 458 | bucket = hash_long(keys[n], cache->c_bucket_bits); | 401 | list_add(&ce->e_index.o_list, &cache->c_index_hash[bucket]); |
| 459 | list_add(&ce->e_indexes[n].o_list, | ||
| 460 | &cache->c_indexes_hash[n][bucket]); | ||
| 461 | } | ||
| 462 | error = 0; | 402 | error = 0; |
| 463 | out: | 403 | out: |
| 464 | spin_unlock(&mb_cache_spinlock); | 404 | spin_unlock(&mb_cache_spinlock); |
| @@ -554,13 +494,12 @@ cleanup: | |||
| 554 | 494 | ||
| 555 | static struct mb_cache_entry * | 495 | static struct mb_cache_entry * |
| 556 | __mb_cache_entry_find(struct list_head *l, struct list_head *head, | 496 | __mb_cache_entry_find(struct list_head *l, struct list_head *head, |
| 557 | int index, struct block_device *bdev, unsigned int key) | 497 | struct block_device *bdev, unsigned int key) |
| 558 | { | 498 | { |
| 559 | while (l != head) { | 499 | while (l != head) { |
| 560 | struct mb_cache_entry *ce = | 500 | struct mb_cache_entry *ce = |
| 561 | list_entry(l, struct mb_cache_entry, | 501 | list_entry(l, struct mb_cache_entry, e_index.o_list); |
| 562 | e_indexes[index].o_list); | 502 | if (ce->e_bdev == bdev && ce->e_index.o_key == key) { |
| 563 | if (ce->e_bdev == bdev && ce->e_indexes[index].o_key == key) { | ||
| 564 | DEFINE_WAIT(wait); | 503 | DEFINE_WAIT(wait); |
| 565 | 504 | ||
| 566 | if (!list_empty(&ce->e_lru_list)) | 505 | if (!list_empty(&ce->e_lru_list)) |
| @@ -602,23 +541,20 @@ __mb_cache_entry_find(struct list_head *l, struct list_head *head, | |||
| 602 | * returned cache entry is locked for shared access ("multiple readers"). | 541 | * returned cache entry is locked for shared access ("multiple readers"). |
| 603 | * | 542 | * |
| 604 | * @cache: the cache to search | 543 | * @cache: the cache to search |
| 605 | * @index: the number of the additonal index to search (0<=index<indexes_count) | ||
| 606 | * @bdev: the device the cache entry should belong to | 544 | * @bdev: the device the cache entry should belong to |
| 607 | * @key: the key in the index | 545 | * @key: the key in the index |
| 608 | */ | 546 | */ |
| 609 | struct mb_cache_entry * | 547 | struct mb_cache_entry * |
| 610 | mb_cache_entry_find_first(struct mb_cache *cache, int index, | 548 | mb_cache_entry_find_first(struct mb_cache *cache, struct block_device *bdev, |
| 611 | struct block_device *bdev, unsigned int key) | 549 | unsigned int key) |
| 612 | { | 550 | { |
| 613 | unsigned int bucket = hash_long(key, cache->c_bucket_bits); | 551 | unsigned int bucket = hash_long(key, cache->c_bucket_bits); |
| 614 | struct list_head *l; | 552 | struct list_head *l; |
| 615 | struct mb_cache_entry *ce; | 553 | struct mb_cache_entry *ce; |
| 616 | 554 | ||
| 617 | mb_assert(index < mb_cache_indexes(cache)); | ||
| 618 | spin_lock(&mb_cache_spinlock); | 555 | spin_lock(&mb_cache_spinlock); |
| 619 | l = cache->c_indexes_hash[index][bucket].next; | 556 | l = cache->c_index_hash[bucket].next; |
| 620 | ce = __mb_cache_entry_find(l, &cache->c_indexes_hash[index][bucket], | 557 | ce = __mb_cache_entry_find(l, &cache->c_index_hash[bucket], bdev, key); |
| 621 | index, bdev, key); | ||
| 622 | spin_unlock(&mb_cache_spinlock); | 558 | spin_unlock(&mb_cache_spinlock); |
| 623 | return ce; | 559 | return ce; |
| 624 | } | 560 | } |
| @@ -639,12 +575,11 @@ mb_cache_entry_find_first(struct mb_cache *cache, int index, | |||
| 639 | * } | 575 | * } |
| 640 | * | 576 | * |
| 641 | * @prev: The previous match | 577 | * @prev: The previous match |
| 642 | * @index: the number of the additonal index to search (0<=index<indexes_count) | ||
| 643 | * @bdev: the device the cache entry should belong to | 578 | * @bdev: the device the cache entry should belong to |
| 644 | * @key: the key in the index | 579 | * @key: the key in the index |
| 645 | */ | 580 | */ |
| 646 | struct mb_cache_entry * | 581 | struct mb_cache_entry * |
| 647 | mb_cache_entry_find_next(struct mb_cache_entry *prev, int index, | 582 | mb_cache_entry_find_next(struct mb_cache_entry *prev, |
| 648 | struct block_device *bdev, unsigned int key) | 583 | struct block_device *bdev, unsigned int key) |
| 649 | { | 584 | { |
| 650 | struct mb_cache *cache = prev->e_cache; | 585 | struct mb_cache *cache = prev->e_cache; |
| @@ -652,11 +587,9 @@ mb_cache_entry_find_next(struct mb_cache_entry *prev, int index, | |||
| 652 | struct list_head *l; | 587 | struct list_head *l; |
| 653 | struct mb_cache_entry *ce; | 588 | struct mb_cache_entry *ce; |
| 654 | 589 | ||
| 655 | mb_assert(index < mb_cache_indexes(cache)); | ||
| 656 | spin_lock(&mb_cache_spinlock); | 590 | spin_lock(&mb_cache_spinlock); |
| 657 | l = prev->e_indexes[index].o_list.next; | 591 | l = prev->e_index.o_list.next; |
| 658 | ce = __mb_cache_entry_find(l, &cache->c_indexes_hash[index][bucket], | 592 | ce = __mb_cache_entry_find(l, &cache->c_index_hash[bucket], bdev, key); |
| 659 | index, bdev, key); | ||
| 660 | __mb_cache_entry_release_unlock(prev); | 593 | __mb_cache_entry_release_unlock(prev); |
| 661 | return ce; | 594 | return ce; |
| 662 | } | 595 | } |
