diff options
Diffstat (limited to 'fs/mbcache.c')
-rw-r--r-- | fs/mbcache.c | 196 |
1 files changed, 73 insertions, 123 deletions
diff --git a/fs/mbcache.c b/fs/mbcache.c index e28f21b95344..93444747237b 100644 --- a/fs/mbcache.c +++ b/fs/mbcache.c | |||
@@ -79,15 +79,12 @@ EXPORT_SYMBOL(mb_cache_entry_find_next); | |||
79 | struct mb_cache { | 79 | struct mb_cache { |
80 | struct list_head c_cache_list; | 80 | struct list_head c_cache_list; |
81 | const char *c_name; | 81 | const char *c_name; |
82 | struct mb_cache_op c_op; | ||
83 | atomic_t c_entry_count; | 82 | atomic_t c_entry_count; |
83 | int c_max_entries; | ||
84 | int c_bucket_bits; | 84 | int c_bucket_bits; |
85 | #ifndef MB_CACHE_INDEXES_COUNT | 85 | struct kmem_cache *c_entry_cache; |
86 | int c_indexes_count; | ||
87 | #endif | ||
88 | struct kmem_cache *c_entry_cache; | ||
89 | struct list_head *c_block_hash; | 86 | struct list_head *c_block_hash; |
90 | struct list_head *c_indexes_hash[0]; | 87 | struct list_head *c_index_hash; |
91 | }; | 88 | }; |
92 | 89 | ||
93 | 90 | ||
@@ -101,16 +98,6 @@ static LIST_HEAD(mb_cache_list); | |||
101 | static LIST_HEAD(mb_cache_lru_list); | 98 | static LIST_HEAD(mb_cache_lru_list); |
102 | static DEFINE_SPINLOCK(mb_cache_spinlock); | 99 | static DEFINE_SPINLOCK(mb_cache_spinlock); |
103 | 100 | ||
104 | static inline int | ||
105 | mb_cache_indexes(struct mb_cache *cache) | ||
106 | { | ||
107 | #ifdef MB_CACHE_INDEXES_COUNT | ||
108 | return MB_CACHE_INDEXES_COUNT; | ||
109 | #else | ||
110 | return cache->c_indexes_count; | ||
111 | #endif | ||
112 | } | ||
113 | |||
114 | /* | 101 | /* |
115 | * What the mbcache registers as to get shrunk dynamically. | 102 | * What the mbcache registers as to get shrunk dynamically. |
116 | */ | 103 | */ |
@@ -132,12 +119,9 @@ __mb_cache_entry_is_hashed(struct mb_cache_entry *ce) | |||
132 | static void | 119 | static void |
133 | __mb_cache_entry_unhash(struct mb_cache_entry *ce) | 120 | __mb_cache_entry_unhash(struct mb_cache_entry *ce) |
134 | { | 121 | { |
135 | int n; | ||
136 | |||
137 | if (__mb_cache_entry_is_hashed(ce)) { | 122 | if (__mb_cache_entry_is_hashed(ce)) { |
138 | list_del_init(&ce->e_block_list); | 123 | list_del_init(&ce->e_block_list); |
139 | for (n=0; n<mb_cache_indexes(ce->e_cache); n++) | 124 | list_del(&ce->e_index.o_list); |
140 | list_del(&ce->e_indexes[n].o_list); | ||
141 | } | 125 | } |
142 | } | 126 | } |
143 | 127 | ||
@@ -148,16 +132,8 @@ __mb_cache_entry_forget(struct mb_cache_entry *ce, gfp_t gfp_mask) | |||
148 | struct mb_cache *cache = ce->e_cache; | 132 | struct mb_cache *cache = ce->e_cache; |
149 | 133 | ||
150 | mb_assert(!(ce->e_used || ce->e_queued)); | 134 | mb_assert(!(ce->e_used || ce->e_queued)); |
151 | if (cache->c_op.free && cache->c_op.free(ce, gfp_mask)) { | 135 | kmem_cache_free(cache->c_entry_cache, ce); |
152 | /* free failed -- put back on the lru list | 136 | atomic_dec(&cache->c_entry_count); |
153 | for freeing later. */ | ||
154 | spin_lock(&mb_cache_spinlock); | ||
155 | list_add(&ce->e_lru_list, &mb_cache_lru_list); | ||
156 | spin_unlock(&mb_cache_spinlock); | ||
157 | } else { | ||
158 | kmem_cache_free(cache->c_entry_cache, ce); | ||
159 | atomic_dec(&cache->c_entry_count); | ||
160 | } | ||
161 | } | 137 | } |
162 | 138 | ||
163 | 139 | ||
@@ -201,22 +177,12 @@ static int | |||
201 | mb_cache_shrink_fn(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) | 177 | mb_cache_shrink_fn(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) |
202 | { | 178 | { |
203 | LIST_HEAD(free_list); | 179 | LIST_HEAD(free_list); |
204 | struct list_head *l, *ltmp; | 180 | struct mb_cache *cache; |
181 | struct mb_cache_entry *entry, *tmp; | ||
205 | int count = 0; | 182 | int count = 0; |
206 | 183 | ||
207 | spin_lock(&mb_cache_spinlock); | ||
208 | list_for_each(l, &mb_cache_list) { | ||
209 | struct mb_cache *cache = | ||
210 | list_entry(l, struct mb_cache, c_cache_list); | ||
211 | mb_debug("cache %s (%d)", cache->c_name, | ||
212 | atomic_read(&cache->c_entry_count)); | ||
213 | count += atomic_read(&cache->c_entry_count); | ||
214 | } | ||
215 | mb_debug("trying to free %d entries", nr_to_scan); | 184 | mb_debug("trying to free %d entries", nr_to_scan); |
216 | if (nr_to_scan == 0) { | 185 | spin_lock(&mb_cache_spinlock); |
217 | spin_unlock(&mb_cache_spinlock); | ||
218 | goto out; | ||
219 | } | ||
220 | while (nr_to_scan-- && !list_empty(&mb_cache_lru_list)) { | 186 | while (nr_to_scan-- && !list_empty(&mb_cache_lru_list)) { |
221 | struct mb_cache_entry *ce = | 187 | struct mb_cache_entry *ce = |
222 | list_entry(mb_cache_lru_list.next, | 188 | list_entry(mb_cache_lru_list.next, |
@@ -224,12 +190,15 @@ mb_cache_shrink_fn(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) | |||
224 | list_move_tail(&ce->e_lru_list, &free_list); | 190 | list_move_tail(&ce->e_lru_list, &free_list); |
225 | __mb_cache_entry_unhash(ce); | 191 | __mb_cache_entry_unhash(ce); |
226 | } | 192 | } |
193 | list_for_each_entry(cache, &mb_cache_list, c_cache_list) { | ||
194 | mb_debug("cache %s (%d)", cache->c_name, | ||
195 | atomic_read(&cache->c_entry_count)); | ||
196 | count += atomic_read(&cache->c_entry_count); | ||
197 | } | ||
227 | spin_unlock(&mb_cache_spinlock); | 198 | spin_unlock(&mb_cache_spinlock); |
228 | list_for_each_safe(l, ltmp, &free_list) { | 199 | list_for_each_entry_safe(entry, tmp, &free_list, e_lru_list) { |
229 | __mb_cache_entry_forget(list_entry(l, struct mb_cache_entry, | 200 | __mb_cache_entry_forget(entry, gfp_mask); |
230 | e_lru_list), gfp_mask); | ||
231 | } | 201 | } |
232 | out: | ||
233 | return (count / 100) * sysctl_vfs_cache_pressure; | 202 | return (count / 100) * sysctl_vfs_cache_pressure; |
234 | } | 203 | } |
235 | 204 | ||
@@ -243,72 +212,55 @@ out: | |||
243 | * memory was available. | 212 | * memory was available. |
244 | * | 213 | * |
245 | * @name: name of the cache (informal) | 214 | * @name: name of the cache (informal) |
246 | * @cache_op: contains the callback called when freeing a cache entry | ||
247 | * @entry_size: The size of a cache entry, including | ||
248 | * struct mb_cache_entry | ||
249 | * @indexes_count: number of additional indexes in the cache. Must equal | ||
250 | * MB_CACHE_INDEXES_COUNT if the number of indexes is | ||
251 | * hardwired. | ||
252 | * @bucket_bits: log2(number of hash buckets) | 215 | * @bucket_bits: log2(number of hash buckets) |
253 | */ | 216 | */ |
254 | struct mb_cache * | 217 | struct mb_cache * |
255 | mb_cache_create(const char *name, struct mb_cache_op *cache_op, | 218 | mb_cache_create(const char *name, int bucket_bits) |
256 | size_t entry_size, int indexes_count, int bucket_bits) | ||
257 | { | 219 | { |
258 | int m=0, n, bucket_count = 1 << bucket_bits; | 220 | int n, bucket_count = 1 << bucket_bits; |
259 | struct mb_cache *cache = NULL; | 221 | struct mb_cache *cache = NULL; |
260 | 222 | ||
261 | if(entry_size < sizeof(struct mb_cache_entry) + | 223 | cache = kmalloc(sizeof(struct mb_cache), GFP_KERNEL); |
262 | indexes_count * sizeof(((struct mb_cache_entry *) 0)->e_indexes[0])) | ||
263 | return NULL; | ||
264 | |||
265 | cache = kmalloc(sizeof(struct mb_cache) + | ||
266 | indexes_count * sizeof(struct list_head), GFP_KERNEL); | ||
267 | if (!cache) | 224 | if (!cache) |
268 | goto fail; | 225 | return NULL; |
269 | cache->c_name = name; | 226 | cache->c_name = name; |
270 | cache->c_op.free = NULL; | ||
271 | if (cache_op) | ||
272 | cache->c_op.free = cache_op->free; | ||
273 | atomic_set(&cache->c_entry_count, 0); | 227 | atomic_set(&cache->c_entry_count, 0); |
274 | cache->c_bucket_bits = bucket_bits; | 228 | cache->c_bucket_bits = bucket_bits; |
275 | #ifdef MB_CACHE_INDEXES_COUNT | ||
276 | mb_assert(indexes_count == MB_CACHE_INDEXES_COUNT); | ||
277 | #else | ||
278 | cache->c_indexes_count = indexes_count; | ||
279 | #endif | ||
280 | cache->c_block_hash = kmalloc(bucket_count * sizeof(struct list_head), | 229 | cache->c_block_hash = kmalloc(bucket_count * sizeof(struct list_head), |
281 | GFP_KERNEL); | 230 | GFP_KERNEL); |
282 | if (!cache->c_block_hash) | 231 | if (!cache->c_block_hash) |
283 | goto fail; | 232 | goto fail; |
284 | for (n=0; n<bucket_count; n++) | 233 | for (n=0; n<bucket_count; n++) |
285 | INIT_LIST_HEAD(&cache->c_block_hash[n]); | 234 | INIT_LIST_HEAD(&cache->c_block_hash[n]); |
286 | for (m=0; m<indexes_count; m++) { | 235 | cache->c_index_hash = kmalloc(bucket_count * sizeof(struct list_head), |
287 | cache->c_indexes_hash[m] = kmalloc(bucket_count * | 236 | GFP_KERNEL); |
288 | sizeof(struct list_head), | 237 | if (!cache->c_index_hash) |
289 | GFP_KERNEL); | 238 | goto fail; |
290 | if (!cache->c_indexes_hash[m]) | 239 | for (n=0; n<bucket_count; n++) |
291 | goto fail; | 240 | INIT_LIST_HEAD(&cache->c_index_hash[n]); |
292 | for (n=0; n<bucket_count; n++) | 241 | cache->c_entry_cache = kmem_cache_create(name, |
293 | INIT_LIST_HEAD(&cache->c_indexes_hash[m][n]); | 242 | sizeof(struct mb_cache_entry), 0, |
294 | } | ||
295 | cache->c_entry_cache = kmem_cache_create(name, entry_size, 0, | ||
296 | SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL); | 243 | SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL); |
297 | if (!cache->c_entry_cache) | 244 | if (!cache->c_entry_cache) |
298 | goto fail; | 245 | goto fail2; |
246 | |||
247 | /* | ||
248 | * Set an upper limit on the number of cache entries so that the hash | ||
249 | * chains won't grow too long. | ||
250 | */ | ||
251 | cache->c_max_entries = bucket_count << 4; | ||
299 | 252 | ||
300 | spin_lock(&mb_cache_spinlock); | 253 | spin_lock(&mb_cache_spinlock); |
301 | list_add(&cache->c_cache_list, &mb_cache_list); | 254 | list_add(&cache->c_cache_list, &mb_cache_list); |
302 | spin_unlock(&mb_cache_spinlock); | 255 | spin_unlock(&mb_cache_spinlock); |
303 | return cache; | 256 | return cache; |
304 | 257 | ||
258 | fail2: | ||
259 | kfree(cache->c_index_hash); | ||
260 | |||
305 | fail: | 261 | fail: |
306 | if (cache) { | 262 | kfree(cache->c_block_hash); |
307 | while (--m >= 0) | 263 | kfree(cache); |
308 | kfree(cache->c_indexes_hash[m]); | ||
309 | kfree(cache->c_block_hash); | ||
310 | kfree(cache); | ||
311 | } | ||
312 | return NULL; | 264 | return NULL; |
313 | } | 265 | } |
314 | 266 | ||
@@ -357,7 +309,6 @@ mb_cache_destroy(struct mb_cache *cache) | |||
357 | { | 309 | { |
358 | LIST_HEAD(free_list); | 310 | LIST_HEAD(free_list); |
359 | struct list_head *l, *ltmp; | 311 | struct list_head *l, *ltmp; |
360 | int n; | ||
361 | 312 | ||
362 | spin_lock(&mb_cache_spinlock); | 313 | spin_lock(&mb_cache_spinlock); |
363 | list_for_each_safe(l, ltmp, &mb_cache_lru_list) { | 314 | list_for_each_safe(l, ltmp, &mb_cache_lru_list) { |
@@ -384,13 +335,11 @@ mb_cache_destroy(struct mb_cache *cache) | |||
384 | 335 | ||
385 | kmem_cache_destroy(cache->c_entry_cache); | 336 | kmem_cache_destroy(cache->c_entry_cache); |
386 | 337 | ||
387 | for (n=0; n < mb_cache_indexes(cache); n++) | 338 | kfree(cache->c_index_hash); |
388 | kfree(cache->c_indexes_hash[n]); | ||
389 | kfree(cache->c_block_hash); | 339 | kfree(cache->c_block_hash); |
390 | kfree(cache); | 340 | kfree(cache); |
391 | } | 341 | } |
392 | 342 | ||
393 | |||
394 | /* | 343 | /* |
395 | * mb_cache_entry_alloc() | 344 | * mb_cache_entry_alloc() |
396 | * | 345 | * |
@@ -402,17 +351,29 @@ mb_cache_destroy(struct mb_cache *cache) | |||
402 | struct mb_cache_entry * | 351 | struct mb_cache_entry * |
403 | mb_cache_entry_alloc(struct mb_cache *cache, gfp_t gfp_flags) | 352 | mb_cache_entry_alloc(struct mb_cache *cache, gfp_t gfp_flags) |
404 | { | 353 | { |
405 | struct mb_cache_entry *ce; | 354 | struct mb_cache_entry *ce = NULL; |
406 | 355 | ||
407 | ce = kmem_cache_alloc(cache->c_entry_cache, gfp_flags); | 356 | if (atomic_read(&cache->c_entry_count) >= cache->c_max_entries) { |
408 | if (ce) { | 357 | spin_lock(&mb_cache_spinlock); |
358 | if (!list_empty(&mb_cache_lru_list)) { | ||
359 | ce = list_entry(mb_cache_lru_list.next, | ||
360 | struct mb_cache_entry, e_lru_list); | ||
361 | list_del_init(&ce->e_lru_list); | ||
362 | __mb_cache_entry_unhash(ce); | ||
363 | } | ||
364 | spin_unlock(&mb_cache_spinlock); | ||
365 | } | ||
366 | if (!ce) { | ||
367 | ce = kmem_cache_alloc(cache->c_entry_cache, gfp_flags); | ||
368 | if (!ce) | ||
369 | return NULL; | ||
409 | atomic_inc(&cache->c_entry_count); | 370 | atomic_inc(&cache->c_entry_count); |
410 | INIT_LIST_HEAD(&ce->e_lru_list); | 371 | INIT_LIST_HEAD(&ce->e_lru_list); |
411 | INIT_LIST_HEAD(&ce->e_block_list); | 372 | INIT_LIST_HEAD(&ce->e_block_list); |
412 | ce->e_cache = cache; | 373 | ce->e_cache = cache; |
413 | ce->e_used = 1 + MB_CACHE_WRITER; | ||
414 | ce->e_queued = 0; | 374 | ce->e_queued = 0; |
415 | } | 375 | } |
376 | ce->e_used = 1 + MB_CACHE_WRITER; | ||
416 | return ce; | 377 | return ce; |
417 | } | 378 | } |
418 | 379 | ||
@@ -429,17 +390,16 @@ mb_cache_entry_alloc(struct mb_cache *cache, gfp_t gfp_flags) | |||
429 | * | 390 | * |
430 | * @bdev: device the cache entry belongs to | 391 | * @bdev: device the cache entry belongs to |
431 | * @block: block number | 392 | * @block: block number |
432 | * @keys: array of additional keys. There must be indexes_count entries | 393 | * @key: lookup key |
433 | * in the array (as specified when creating the cache). | ||
434 | */ | 394 | */ |
435 | int | 395 | int |
436 | mb_cache_entry_insert(struct mb_cache_entry *ce, struct block_device *bdev, | 396 | mb_cache_entry_insert(struct mb_cache_entry *ce, struct block_device *bdev, |
437 | sector_t block, unsigned int keys[]) | 397 | sector_t block, unsigned int key) |
438 | { | 398 | { |
439 | struct mb_cache *cache = ce->e_cache; | 399 | struct mb_cache *cache = ce->e_cache; |
440 | unsigned int bucket; | 400 | unsigned int bucket; |
441 | struct list_head *l; | 401 | struct list_head *l; |
442 | int error = -EBUSY, n; | 402 | int error = -EBUSY; |
443 | 403 | ||
444 | bucket = hash_long((unsigned long)bdev + (block & 0xffffffff), | 404 | bucket = hash_long((unsigned long)bdev + (block & 0xffffffff), |
445 | cache->c_bucket_bits); | 405 | cache->c_bucket_bits); |
@@ -454,12 +414,9 @@ mb_cache_entry_insert(struct mb_cache_entry *ce, struct block_device *bdev, | |||
454 | ce->e_bdev = bdev; | 414 | ce->e_bdev = bdev; |
455 | ce->e_block = block; | 415 | ce->e_block = block; |
456 | list_add(&ce->e_block_list, &cache->c_block_hash[bucket]); | 416 | list_add(&ce->e_block_list, &cache->c_block_hash[bucket]); |
457 | for (n=0; n<mb_cache_indexes(cache); n++) { | 417 | ce->e_index.o_key = key; |
458 | ce->e_indexes[n].o_key = keys[n]; | 418 | bucket = hash_long(key, cache->c_bucket_bits); |
459 | bucket = hash_long(keys[n], cache->c_bucket_bits); | 419 | list_add(&ce->e_index.o_list, &cache->c_index_hash[bucket]); |
460 | list_add(&ce->e_indexes[n].o_list, | ||
461 | &cache->c_indexes_hash[n][bucket]); | ||
462 | } | ||
463 | error = 0; | 420 | error = 0; |
464 | out: | 421 | out: |
465 | spin_unlock(&mb_cache_spinlock); | 422 | spin_unlock(&mb_cache_spinlock); |
@@ -555,13 +512,12 @@ cleanup: | |||
555 | 512 | ||
556 | static struct mb_cache_entry * | 513 | static struct mb_cache_entry * |
557 | __mb_cache_entry_find(struct list_head *l, struct list_head *head, | 514 | __mb_cache_entry_find(struct list_head *l, struct list_head *head, |
558 | int index, struct block_device *bdev, unsigned int key) | 515 | struct block_device *bdev, unsigned int key) |
559 | { | 516 | { |
560 | while (l != head) { | 517 | while (l != head) { |
561 | struct mb_cache_entry *ce = | 518 | struct mb_cache_entry *ce = |
562 | list_entry(l, struct mb_cache_entry, | 519 | list_entry(l, struct mb_cache_entry, e_index.o_list); |
563 | e_indexes[index].o_list); | 520 | if (ce->e_bdev == bdev && ce->e_index.o_key == key) { |
564 | if (ce->e_bdev == bdev && ce->e_indexes[index].o_key == key) { | ||
565 | DEFINE_WAIT(wait); | 521 | DEFINE_WAIT(wait); |
566 | 522 | ||
567 | if (!list_empty(&ce->e_lru_list)) | 523 | if (!list_empty(&ce->e_lru_list)) |
@@ -603,23 +559,20 @@ __mb_cache_entry_find(struct list_head *l, struct list_head *head, | |||
603 | * returned cache entry is locked for shared access ("multiple readers"). | 559 | * returned cache entry is locked for shared access ("multiple readers"). |
604 | * | 560 | * |
605 | * @cache: the cache to search | 561 | * @cache: the cache to search |
606 | * @index: the number of the additonal index to search (0<=index<indexes_count) | ||
607 | * @bdev: the device the cache entry should belong to | 562 | * @bdev: the device the cache entry should belong to |
608 | * @key: the key in the index | 563 | * @key: the key in the index |
609 | */ | 564 | */ |
610 | struct mb_cache_entry * | 565 | struct mb_cache_entry * |
611 | mb_cache_entry_find_first(struct mb_cache *cache, int index, | 566 | mb_cache_entry_find_first(struct mb_cache *cache, struct block_device *bdev, |
612 | struct block_device *bdev, unsigned int key) | 567 | unsigned int key) |
613 | { | 568 | { |
614 | unsigned int bucket = hash_long(key, cache->c_bucket_bits); | 569 | unsigned int bucket = hash_long(key, cache->c_bucket_bits); |
615 | struct list_head *l; | 570 | struct list_head *l; |
616 | struct mb_cache_entry *ce; | 571 | struct mb_cache_entry *ce; |
617 | 572 | ||
618 | mb_assert(index < mb_cache_indexes(cache)); | ||
619 | spin_lock(&mb_cache_spinlock); | 573 | spin_lock(&mb_cache_spinlock); |
620 | l = cache->c_indexes_hash[index][bucket].next; | 574 | l = cache->c_index_hash[bucket].next; |
621 | ce = __mb_cache_entry_find(l, &cache->c_indexes_hash[index][bucket], | 575 | ce = __mb_cache_entry_find(l, &cache->c_index_hash[bucket], bdev, key); |
622 | index, bdev, key); | ||
623 | spin_unlock(&mb_cache_spinlock); | 576 | spin_unlock(&mb_cache_spinlock); |
624 | return ce; | 577 | return ce; |
625 | } | 578 | } |
@@ -640,12 +593,11 @@ mb_cache_entry_find_first(struct mb_cache *cache, int index, | |||
640 | * } | 593 | * } |
641 | * | 594 | * |
642 | * @prev: The previous match | 595 | * @prev: The previous match |
643 | * @index: the number of the additonal index to search (0<=index<indexes_count) | ||
644 | * @bdev: the device the cache entry should belong to | 596 | * @bdev: the device the cache entry should belong to |
645 | * @key: the key in the index | 597 | * @key: the key in the index |
646 | */ | 598 | */ |
647 | struct mb_cache_entry * | 599 | struct mb_cache_entry * |
648 | mb_cache_entry_find_next(struct mb_cache_entry *prev, int index, | 600 | mb_cache_entry_find_next(struct mb_cache_entry *prev, |
649 | struct block_device *bdev, unsigned int key) | 601 | struct block_device *bdev, unsigned int key) |
650 | { | 602 | { |
651 | struct mb_cache *cache = prev->e_cache; | 603 | struct mb_cache *cache = prev->e_cache; |
@@ -653,11 +605,9 @@ mb_cache_entry_find_next(struct mb_cache_entry *prev, int index, | |||
653 | struct list_head *l; | 605 | struct list_head *l; |
654 | struct mb_cache_entry *ce; | 606 | struct mb_cache_entry *ce; |
655 | 607 | ||
656 | mb_assert(index < mb_cache_indexes(cache)); | ||
657 | spin_lock(&mb_cache_spinlock); | 608 | spin_lock(&mb_cache_spinlock); |
658 | l = prev->e_indexes[index].o_list.next; | 609 | l = prev->e_index.o_list.next; |
659 | ce = __mb_cache_entry_find(l, &cache->c_indexes_hash[index][bucket], | 610 | ce = __mb_cache_entry_find(l, &cache->c_index_hash[bucket], bdev, key); |
660 | index, bdev, key); | ||
661 | __mb_cache_entry_release_unlock(prev); | 611 | __mb_cache_entry_release_unlock(prev); |
662 | return ce; | 612 | return ce; |
663 | } | 613 | } |