diff options
author | Joel Becker <joel.becker@oracle.com> | 2009-02-10 19:05:07 -0500 |
---|---|---|
committer | Joel Becker <joel.becker@oracle.com> | 2009-09-04 19:07:47 -0400 |
commit | 47460d65a483529b3bc2bf6ccf461ad45f94df83 (patch) | |
tree | 0727cae9477749e5f2596e86253a210e79c96a83 /fs/ocfs2/uptodate.c | |
parent | 8379e7c46cc48f51197dd663fc6676f47f2a1e71 (diff) |
ocfs2: Make the ocfs2_caching_info structure self-contained.
We want to use the ocfs2_caching_info structure in places that are not
inodes. To do that, it can no longer rely on referencing the inode
directly.
This patch moves the flags to ocfs2_caching_info->ci_flags, stores
pointers to the parent's locks on the ocfs2_caching_info, and renames
the constants and flags to reflect its independant state.
Signed-off-by: Joel Becker <joel.becker@oracle.com>
Diffstat (limited to 'fs/ocfs2/uptodate.c')
-rw-r--r-- | fs/ocfs2/uptodate.c | 99 |
1 files changed, 51 insertions, 48 deletions
diff --git a/fs/ocfs2/uptodate.c b/fs/ocfs2/uptodate.c index 187b99ff0368..8dbc457ba236 100644 --- a/fs/ocfs2/uptodate.c +++ b/fs/ocfs2/uptodate.c | |||
@@ -75,12 +75,13 @@ struct ocfs2_meta_cache_item { | |||
75 | 75 | ||
76 | static struct kmem_cache *ocfs2_uptodate_cachep = NULL; | 76 | static struct kmem_cache *ocfs2_uptodate_cachep = NULL; |
77 | 77 | ||
78 | void ocfs2_metadata_cache_init(struct inode *inode) | 78 | void ocfs2_metadata_cache_init(struct ocfs2_caching_info *ci, |
79 | spinlock_t *cache_lock, | ||
80 | struct mutex *io_mutex) | ||
79 | { | 81 | { |
80 | struct ocfs2_inode_info *oi = OCFS2_I(inode); | 82 | ci->ci_lock = cache_lock; |
81 | struct ocfs2_caching_info *ci = &oi->ip_metadata_cache; | 83 | ci->ci_io_mutex = io_mutex; |
82 | 84 | ci->ci_flags |= OCFS2_CACHE_FL_INLINE; | |
83 | oi->ip_flags |= OCFS2_INODE_CACHE_INLINE; | ||
84 | ci->ci_num_cached = 0; | 85 | ci->ci_num_cached = 0; |
85 | } | 86 | } |
86 | 87 | ||
@@ -119,8 +120,8 @@ void ocfs2_metadata_cache_purge(struct inode *inode) | |||
119 | struct ocfs2_caching_info *ci = &oi->ip_metadata_cache; | 120 | struct ocfs2_caching_info *ci = &oi->ip_metadata_cache; |
120 | struct rb_root root = RB_ROOT; | 121 | struct rb_root root = RB_ROOT; |
121 | 122 | ||
122 | spin_lock(&oi->ip_lock); | 123 | spin_lock(ci->ci_lock); |
123 | tree = !(oi->ip_flags & OCFS2_INODE_CACHE_INLINE); | 124 | tree = !(ci->ci_flags & OCFS2_CACHE_FL_INLINE); |
124 | to_purge = ci->ci_num_cached; | 125 | to_purge = ci->ci_num_cached; |
125 | 126 | ||
126 | mlog(0, "Purge %u %s items from Inode %llu\n", to_purge, | 127 | mlog(0, "Purge %u %s items from Inode %llu\n", to_purge, |
@@ -132,8 +133,8 @@ void ocfs2_metadata_cache_purge(struct inode *inode) | |||
132 | if (tree) | 133 | if (tree) |
133 | root = ci->ci_cache.ci_tree; | 134 | root = ci->ci_cache.ci_tree; |
134 | 135 | ||
135 | ocfs2_metadata_cache_init(inode); | 136 | ocfs2_metadata_cache_init(ci, ci->ci_lock, ci->ci_io_mutex); |
136 | spin_unlock(&oi->ip_lock); | 137 | spin_unlock(ci->ci_lock); |
137 | 138 | ||
138 | purged = ocfs2_purge_copied_metadata_tree(&root); | 139 | purged = ocfs2_purge_copied_metadata_tree(&root); |
139 | /* If possible, track the number wiped so that we can more | 140 | /* If possible, track the number wiped so that we can more |
@@ -187,22 +188,23 @@ static int ocfs2_buffer_cached(struct ocfs2_inode_info *oi, | |||
187 | { | 188 | { |
188 | int index = -1; | 189 | int index = -1; |
189 | struct ocfs2_meta_cache_item *item = NULL; | 190 | struct ocfs2_meta_cache_item *item = NULL; |
191 | struct ocfs2_caching_info *ci = &oi->ip_metadata_cache; | ||
190 | 192 | ||
191 | spin_lock(&oi->ip_lock); | 193 | spin_lock(ci->ci_lock); |
192 | 194 | ||
193 | mlog(0, "Inode %llu, query block %llu (inline = %u)\n", | 195 | mlog(0, "Inode %llu, query block %llu (inline = %u)\n", |
194 | (unsigned long long)oi->ip_blkno, | 196 | (unsigned long long)oi->ip_blkno, |
195 | (unsigned long long) bh->b_blocknr, | 197 | (unsigned long long) bh->b_blocknr, |
196 | !!(oi->ip_flags & OCFS2_INODE_CACHE_INLINE)); | 198 | !!(ci->ci_flags & OCFS2_CACHE_FL_INLINE)); |
197 | 199 | ||
198 | if (oi->ip_flags & OCFS2_INODE_CACHE_INLINE) | 200 | if (ci->ci_flags & OCFS2_CACHE_FL_INLINE) |
199 | index = ocfs2_search_cache_array(&oi->ip_metadata_cache, | 201 | index = ocfs2_search_cache_array(&oi->ip_metadata_cache, |
200 | bh->b_blocknr); | 202 | bh->b_blocknr); |
201 | else | 203 | else |
202 | item = ocfs2_search_cache_tree(&oi->ip_metadata_cache, | 204 | item = ocfs2_search_cache_tree(&oi->ip_metadata_cache, |
203 | bh->b_blocknr); | 205 | bh->b_blocknr); |
204 | 206 | ||
205 | spin_unlock(&oi->ip_lock); | 207 | spin_unlock(ci->ci_lock); |
206 | 208 | ||
207 | mlog(0, "index = %d, item = %p\n", index, item); | 209 | mlog(0, "index = %d, item = %p\n", index, item); |
208 | 210 | ||
@@ -235,7 +237,7 @@ int ocfs2_buffer_uptodate(struct inode *inode, | |||
235 | 237 | ||
236 | /* | 238 | /* |
237 | * Determine whether a buffer is currently out on a read-ahead request. | 239 | * Determine whether a buffer is currently out on a read-ahead request. |
238 | * ip_io_sem should be held to serialize submitters with the logic here. | 240 | * ci_io_sem should be held to serialize submitters with the logic here. |
239 | */ | 241 | */ |
240 | int ocfs2_buffer_read_ahead(struct inode *inode, | 242 | int ocfs2_buffer_read_ahead(struct inode *inode, |
241 | struct buffer_head *bh) | 243 | struct buffer_head *bh) |
@@ -247,7 +249,7 @@ int ocfs2_buffer_read_ahead(struct inode *inode, | |||
247 | static void ocfs2_append_cache_array(struct ocfs2_caching_info *ci, | 249 | static void ocfs2_append_cache_array(struct ocfs2_caching_info *ci, |
248 | sector_t block) | 250 | sector_t block) |
249 | { | 251 | { |
250 | BUG_ON(ci->ci_num_cached >= OCFS2_INODE_MAX_CACHE_ARRAY); | 252 | BUG_ON(ci->ci_num_cached >= OCFS2_CACHE_INFO_MAX_ARRAY); |
251 | 253 | ||
252 | mlog(0, "block %llu takes position %u\n", (unsigned long long) block, | 254 | mlog(0, "block %llu takes position %u\n", (unsigned long long) block, |
253 | ci->ci_num_cached); | 255 | ci->ci_num_cached); |
@@ -295,13 +297,13 @@ static void __ocfs2_insert_cache_tree(struct ocfs2_caching_info *ci, | |||
295 | static inline int ocfs2_insert_can_use_array(struct ocfs2_inode_info *oi, | 297 | static inline int ocfs2_insert_can_use_array(struct ocfs2_inode_info *oi, |
296 | struct ocfs2_caching_info *ci) | 298 | struct ocfs2_caching_info *ci) |
297 | { | 299 | { |
298 | assert_spin_locked(&oi->ip_lock); | 300 | assert_spin_locked(ci->ci_lock); |
299 | 301 | ||
300 | return (oi->ip_flags & OCFS2_INODE_CACHE_INLINE) && | 302 | return (ci->ci_flags & OCFS2_CACHE_FL_INLINE) && |
301 | (ci->ci_num_cached < OCFS2_INODE_MAX_CACHE_ARRAY); | 303 | (ci->ci_num_cached < OCFS2_CACHE_INFO_MAX_ARRAY); |
302 | } | 304 | } |
303 | 305 | ||
304 | /* tree should be exactly OCFS2_INODE_MAX_CACHE_ARRAY wide. NULL the | 306 | /* tree should be exactly OCFS2_CACHE_INFO_MAX_ARRAY wide. NULL the |
305 | * pointers in tree after we use them - this allows caller to detect | 307 | * pointers in tree after we use them - this allows caller to detect |
306 | * when to free in case of error. */ | 308 | * when to free in case of error. */ |
307 | static void ocfs2_expand_cache(struct ocfs2_inode_info *oi, | 309 | static void ocfs2_expand_cache(struct ocfs2_inode_info *oi, |
@@ -310,32 +312,32 @@ static void ocfs2_expand_cache(struct ocfs2_inode_info *oi, | |||
310 | int i; | 312 | int i; |
311 | struct ocfs2_caching_info *ci = &oi->ip_metadata_cache; | 313 | struct ocfs2_caching_info *ci = &oi->ip_metadata_cache; |
312 | 314 | ||
313 | mlog_bug_on_msg(ci->ci_num_cached != OCFS2_INODE_MAX_CACHE_ARRAY, | 315 | mlog_bug_on_msg(ci->ci_num_cached != OCFS2_CACHE_INFO_MAX_ARRAY, |
314 | "Inode %llu, num cached = %u, should be %u\n", | 316 | "Inode %llu, num cached = %u, should be %u\n", |
315 | (unsigned long long)oi->ip_blkno, ci->ci_num_cached, | 317 | (unsigned long long)oi->ip_blkno, ci->ci_num_cached, |
316 | OCFS2_INODE_MAX_CACHE_ARRAY); | 318 | OCFS2_CACHE_INFO_MAX_ARRAY); |
317 | mlog_bug_on_msg(!(oi->ip_flags & OCFS2_INODE_CACHE_INLINE), | 319 | mlog_bug_on_msg(!(ci->ci_flags & OCFS2_CACHE_FL_INLINE), |
318 | "Inode %llu not marked as inline anymore!\n", | 320 | "Inode %llu not marked as inline anymore!\n", |
319 | (unsigned long long)oi->ip_blkno); | 321 | (unsigned long long)oi->ip_blkno); |
320 | assert_spin_locked(&oi->ip_lock); | 322 | assert_spin_locked(ci->ci_lock); |
321 | 323 | ||
322 | /* Be careful to initialize the tree members *first* because | 324 | /* Be careful to initialize the tree members *first* because |
323 | * once the ci_tree is used, the array is junk... */ | 325 | * once the ci_tree is used, the array is junk... */ |
324 | for(i = 0; i < OCFS2_INODE_MAX_CACHE_ARRAY; i++) | 326 | for (i = 0; i < OCFS2_CACHE_INFO_MAX_ARRAY; i++) |
325 | tree[i]->c_block = ci->ci_cache.ci_array[i]; | 327 | tree[i]->c_block = ci->ci_cache.ci_array[i]; |
326 | 328 | ||
327 | oi->ip_flags &= ~OCFS2_INODE_CACHE_INLINE; | 329 | ci->ci_flags &= ~OCFS2_CACHE_FL_INLINE; |
328 | ci->ci_cache.ci_tree = RB_ROOT; | 330 | ci->ci_cache.ci_tree = RB_ROOT; |
329 | /* this will be set again by __ocfs2_insert_cache_tree */ | 331 | /* this will be set again by __ocfs2_insert_cache_tree */ |
330 | ci->ci_num_cached = 0; | 332 | ci->ci_num_cached = 0; |
331 | 333 | ||
332 | for(i = 0; i < OCFS2_INODE_MAX_CACHE_ARRAY; i++) { | 334 | for (i = 0; i < OCFS2_CACHE_INFO_MAX_ARRAY; i++) { |
333 | __ocfs2_insert_cache_tree(ci, tree[i]); | 335 | __ocfs2_insert_cache_tree(ci, tree[i]); |
334 | tree[i] = NULL; | 336 | tree[i] = NULL; |
335 | } | 337 | } |
336 | 338 | ||
337 | mlog(0, "Expanded %llu to a tree cache: flags 0x%x, num = %u\n", | 339 | mlog(0, "Expanded %llu to a tree cache: flags 0x%x, num = %u\n", |
338 | (unsigned long long)oi->ip_blkno, oi->ip_flags, ci->ci_num_cached); | 340 | (unsigned long long)oi->ip_blkno, ci->ci_flags, ci->ci_num_cached); |
339 | } | 341 | } |
340 | 342 | ||
341 | /* Slow path function - memory allocation is necessary. See the | 343 | /* Slow path function - memory allocation is necessary. See the |
@@ -347,7 +349,7 @@ static void __ocfs2_set_buffer_uptodate(struct ocfs2_inode_info *oi, | |||
347 | int i; | 349 | int i; |
348 | struct ocfs2_caching_info *ci = &oi->ip_metadata_cache; | 350 | struct ocfs2_caching_info *ci = &oi->ip_metadata_cache; |
349 | struct ocfs2_meta_cache_item *new = NULL; | 351 | struct ocfs2_meta_cache_item *new = NULL; |
350 | struct ocfs2_meta_cache_item *tree[OCFS2_INODE_MAX_CACHE_ARRAY] = | 352 | struct ocfs2_meta_cache_item *tree[OCFS2_CACHE_INFO_MAX_ARRAY] = |
351 | { NULL, }; | 353 | { NULL, }; |
352 | 354 | ||
353 | mlog(0, "Inode %llu, block %llu, expand = %d\n", | 355 | mlog(0, "Inode %llu, block %llu, expand = %d\n", |
@@ -364,7 +366,7 @@ static void __ocfs2_set_buffer_uptodate(struct ocfs2_inode_info *oi, | |||
364 | if (expand_tree) { | 366 | if (expand_tree) { |
365 | /* Do *not* allocate an array here - the removal code | 367 | /* Do *not* allocate an array here - the removal code |
366 | * has no way of tracking that. */ | 368 | * has no way of tracking that. */ |
367 | for(i = 0; i < OCFS2_INODE_MAX_CACHE_ARRAY; i++) { | 369 | for (i = 0; i < OCFS2_CACHE_INFO_MAX_ARRAY; i++) { |
368 | tree[i] = kmem_cache_alloc(ocfs2_uptodate_cachep, | 370 | tree[i] = kmem_cache_alloc(ocfs2_uptodate_cachep, |
369 | GFP_NOFS); | 371 | GFP_NOFS); |
370 | if (!tree[i]) { | 372 | if (!tree[i]) { |
@@ -376,13 +378,13 @@ static void __ocfs2_set_buffer_uptodate(struct ocfs2_inode_info *oi, | |||
376 | } | 378 | } |
377 | } | 379 | } |
378 | 380 | ||
379 | spin_lock(&oi->ip_lock); | 381 | spin_lock(ci->ci_lock); |
380 | if (ocfs2_insert_can_use_array(oi, ci)) { | 382 | if (ocfs2_insert_can_use_array(oi, ci)) { |
381 | mlog(0, "Someone cleared the tree underneath us\n"); | 383 | mlog(0, "Someone cleared the tree underneath us\n"); |
382 | /* Ok, items were removed from the cache in between | 384 | /* Ok, items were removed from the cache in between |
383 | * locks. Detect this and revert back to the fast path */ | 385 | * locks. Detect this and revert back to the fast path */ |
384 | ocfs2_append_cache_array(ci, block); | 386 | ocfs2_append_cache_array(ci, block); |
385 | spin_unlock(&oi->ip_lock); | 387 | spin_unlock(ci->ci_lock); |
386 | goto out_free; | 388 | goto out_free; |
387 | } | 389 | } |
388 | 390 | ||
@@ -390,7 +392,7 @@ static void __ocfs2_set_buffer_uptodate(struct ocfs2_inode_info *oi, | |||
390 | ocfs2_expand_cache(oi, tree); | 392 | ocfs2_expand_cache(oi, tree); |
391 | 393 | ||
392 | __ocfs2_insert_cache_tree(ci, new); | 394 | __ocfs2_insert_cache_tree(ci, new); |
393 | spin_unlock(&oi->ip_lock); | 395 | spin_unlock(ci->ci_lock); |
394 | 396 | ||
395 | new = NULL; | 397 | new = NULL; |
396 | out_free: | 398 | out_free: |
@@ -400,14 +402,14 @@ out_free: | |||
400 | /* If these were used, then ocfs2_expand_cache re-set them to | 402 | /* If these were used, then ocfs2_expand_cache re-set them to |
401 | * NULL for us. */ | 403 | * NULL for us. */ |
402 | if (tree[0]) { | 404 | if (tree[0]) { |
403 | for(i = 0; i < OCFS2_INODE_MAX_CACHE_ARRAY; i++) | 405 | for (i = 0; i < OCFS2_CACHE_INFO_MAX_ARRAY; i++) |
404 | if (tree[i]) | 406 | if (tree[i]) |
405 | kmem_cache_free(ocfs2_uptodate_cachep, | 407 | kmem_cache_free(ocfs2_uptodate_cachep, |
406 | tree[i]); | 408 | tree[i]); |
407 | } | 409 | } |
408 | } | 410 | } |
409 | 411 | ||
410 | /* Item insertion is guarded by ip_io_mutex, so the insertion path takes | 412 | /* Item insertion is guarded by ci_io_mutex, so the insertion path takes |
411 | * advantage of this by not rechecking for a duplicate insert during | 413 | * advantage of this by not rechecking for a duplicate insert during |
412 | * the slow case. Additionally, if the cache needs to be bumped up to | 414 | * the slow case. Additionally, if the cache needs to be bumped up to |
413 | * a tree, the code will not recheck after acquiring the lock -- | 415 | * a tree, the code will not recheck after acquiring the lock -- |
@@ -442,42 +444,43 @@ void ocfs2_set_buffer_uptodate(struct inode *inode, | |||
442 | (unsigned long long)bh->b_blocknr); | 444 | (unsigned long long)bh->b_blocknr); |
443 | 445 | ||
444 | /* No need to recheck under spinlock - insertion is guarded by | 446 | /* No need to recheck under spinlock - insertion is guarded by |
445 | * ip_io_mutex */ | 447 | * ci_io_mutex */ |
446 | spin_lock(&oi->ip_lock); | 448 | spin_lock(ci->ci_lock); |
447 | if (ocfs2_insert_can_use_array(oi, ci)) { | 449 | if (ocfs2_insert_can_use_array(oi, ci)) { |
448 | /* Fast case - it's an array and there's a free | 450 | /* Fast case - it's an array and there's a free |
449 | * spot. */ | 451 | * spot. */ |
450 | ocfs2_append_cache_array(ci, bh->b_blocknr); | 452 | ocfs2_append_cache_array(ci, bh->b_blocknr); |
451 | spin_unlock(&oi->ip_lock); | 453 | spin_unlock(ci->ci_lock); |
452 | return; | 454 | return; |
453 | } | 455 | } |
454 | 456 | ||
455 | expand = 0; | 457 | expand = 0; |
456 | if (oi->ip_flags & OCFS2_INODE_CACHE_INLINE) { | 458 | if (ci->ci_flags & OCFS2_CACHE_FL_INLINE) { |
457 | /* We need to bump things up to a tree. */ | 459 | /* We need to bump things up to a tree. */ |
458 | expand = 1; | 460 | expand = 1; |
459 | } | 461 | } |
460 | spin_unlock(&oi->ip_lock); | 462 | spin_unlock(ci->ci_lock); |
461 | 463 | ||
462 | __ocfs2_set_buffer_uptodate(oi, bh->b_blocknr, expand); | 464 | __ocfs2_set_buffer_uptodate(oi, bh->b_blocknr, expand); |
463 | } | 465 | } |
464 | 466 | ||
465 | /* Called against a newly allocated buffer. Most likely nobody should | 467 | /* Called against a newly allocated buffer. Most likely nobody should |
466 | * be able to read this sort of metadata while it's still being | 468 | * be able to read this sort of metadata while it's still being |
467 | * allocated, but this is careful to take ip_io_mutex anyway. */ | 469 | * allocated, but this is careful to take ci_io_mutex anyway. */ |
468 | void ocfs2_set_new_buffer_uptodate(struct inode *inode, | 470 | void ocfs2_set_new_buffer_uptodate(struct inode *inode, |
469 | struct buffer_head *bh) | 471 | struct buffer_head *bh) |
470 | { | 472 | { |
471 | struct ocfs2_inode_info *oi = OCFS2_I(inode); | 473 | struct ocfs2_inode_info *oi = OCFS2_I(inode); |
474 | struct ocfs2_caching_info *ci = &oi->ip_metadata_cache; | ||
472 | 475 | ||
473 | /* This should definitely *not* exist in our cache */ | 476 | /* This should definitely *not* exist in our cache */ |
474 | BUG_ON(ocfs2_buffer_cached(oi, bh)); | 477 | BUG_ON(ocfs2_buffer_cached(oi, bh)); |
475 | 478 | ||
476 | set_buffer_uptodate(bh); | 479 | set_buffer_uptodate(bh); |
477 | 480 | ||
478 | mutex_lock(&oi->ip_io_mutex); | 481 | mutex_lock(ci->ci_io_mutex); |
479 | ocfs2_set_buffer_uptodate(inode, bh); | 482 | ocfs2_set_buffer_uptodate(inode, bh); |
480 | mutex_unlock(&oi->ip_io_mutex); | 483 | mutex_unlock(ci->ci_io_mutex); |
481 | } | 484 | } |
482 | 485 | ||
483 | /* Requires ip_lock. */ | 486 | /* Requires ip_lock. */ |
@@ -487,7 +490,7 @@ static void ocfs2_remove_metadata_array(struct ocfs2_caching_info *ci, | |||
487 | sector_t *array = ci->ci_cache.ci_array; | 490 | sector_t *array = ci->ci_cache.ci_array; |
488 | int bytes; | 491 | int bytes; |
489 | 492 | ||
490 | BUG_ON(index < 0 || index >= OCFS2_INODE_MAX_CACHE_ARRAY); | 493 | BUG_ON(index < 0 || index >= OCFS2_CACHE_INFO_MAX_ARRAY); |
491 | BUG_ON(index >= ci->ci_num_cached); | 494 | BUG_ON(index >= ci->ci_num_cached); |
492 | BUG_ON(!ci->ci_num_cached); | 495 | BUG_ON(!ci->ci_num_cached); |
493 | 496 | ||
@@ -523,13 +526,13 @@ static void ocfs2_remove_block_from_cache(struct inode *inode, | |||
523 | struct ocfs2_inode_info *oi = OCFS2_I(inode); | 526 | struct ocfs2_inode_info *oi = OCFS2_I(inode); |
524 | struct ocfs2_caching_info *ci = &oi->ip_metadata_cache; | 527 | struct ocfs2_caching_info *ci = &oi->ip_metadata_cache; |
525 | 528 | ||
526 | spin_lock(&oi->ip_lock); | 529 | spin_lock(ci->ci_lock); |
527 | mlog(0, "Inode %llu, remove %llu, items = %u, array = %u\n", | 530 | mlog(0, "Inode %llu, remove %llu, items = %u, array = %u\n", |
528 | (unsigned long long)oi->ip_blkno, | 531 | (unsigned long long)oi->ip_blkno, |
529 | (unsigned long long) block, ci->ci_num_cached, | 532 | (unsigned long long) block, ci->ci_num_cached, |
530 | oi->ip_flags & OCFS2_INODE_CACHE_INLINE); | 533 | ci->ci_flags & OCFS2_CACHE_FL_INLINE); |
531 | 534 | ||
532 | if (oi->ip_flags & OCFS2_INODE_CACHE_INLINE) { | 535 | if (ci->ci_flags & OCFS2_CACHE_FL_INLINE) { |
533 | index = ocfs2_search_cache_array(ci, block); | 536 | index = ocfs2_search_cache_array(ci, block); |
534 | if (index != -1) | 537 | if (index != -1) |
535 | ocfs2_remove_metadata_array(ci, index); | 538 | ocfs2_remove_metadata_array(ci, index); |
@@ -538,7 +541,7 @@ static void ocfs2_remove_block_from_cache(struct inode *inode, | |||
538 | if (item) | 541 | if (item) |
539 | ocfs2_remove_metadata_tree(ci, item); | 542 | ocfs2_remove_metadata_tree(ci, item); |
540 | } | 543 | } |
541 | spin_unlock(&oi->ip_lock); | 544 | spin_unlock(ci->ci_lock); |
542 | 545 | ||
543 | if (item) | 546 | if (item) |
544 | kmem_cache_free(ocfs2_uptodate_cachep, item); | 547 | kmem_cache_free(ocfs2_uptodate_cachep, item); |
@@ -577,7 +580,7 @@ int __init init_ocfs2_uptodate_cache(void) | |||
577 | return -ENOMEM; | 580 | return -ENOMEM; |
578 | 581 | ||
579 | mlog(0, "%u inlined cache items per inode.\n", | 582 | mlog(0, "%u inlined cache items per inode.\n", |
580 | OCFS2_INODE_MAX_CACHE_ARRAY); | 583 | OCFS2_CACHE_INFO_MAX_ARRAY); |
581 | 584 | ||
582 | return 0; | 585 | return 0; |
583 | } | 586 | } |