aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ocfs2/uptodate.c
diff options
context:
space:
mode:
authorJoel Becker <joel.becker@oracle.com>2009-02-10 22:00:37 -0500
committerJoel Becker <joel.becker@oracle.com>2009-09-04 19:07:48 -0400
commit6e5a3d7538ad4e46a976862f593faf65750e37cc (patch)
treee87ce6d69bdbcce23eed0a195a7f80a59c01d3d9 /fs/ocfs2/uptodate.c
parent47460d65a483529b3bc2bf6ccf461ad45f94df83 (diff)
ocfs2: Change metadata caching locks to an operations structure.
We don't really want to cart around too many new fields on the ocfs2_caching_info structure. So let's wrap all our access of the parent object in a set of operations. One pointer on caching_info, and more flexibility to boot. Signed-off-by: Joel Becker <joel.becker@oracle.com>
Diffstat (limited to 'fs/ocfs2/uptodate.c')
-rw-r--r--fs/ocfs2/uptodate.c129
1 files changed, 85 insertions, 44 deletions
diff --git a/fs/ocfs2/uptodate.c b/fs/ocfs2/uptodate.c
index 8dbc457ba236..226d0429fd7f 100644
--- a/fs/ocfs2/uptodate.c
+++ b/fs/ocfs2/uptodate.c
@@ -75,12 +75,48 @@ struct ocfs2_meta_cache_item {
75 75
76static struct kmem_cache *ocfs2_uptodate_cachep = NULL; 76static struct kmem_cache *ocfs2_uptodate_cachep = NULL;
77 77
78static u64 ocfs2_metadata_cache_owner(struct ocfs2_caching_info *ci)
79{
80 BUG_ON(!ci || !ci->ci_ops);
81
82 return ci->ci_ops->co_owner(ci);
83}
84
85static void ocfs2_metadata_cache_lock(struct ocfs2_caching_info *ci)
86{
87 BUG_ON(!ci || !ci->ci_ops);
88
89 ci->ci_ops->co_cache_lock(ci);
90}
91
92static void ocfs2_metadata_cache_unlock(struct ocfs2_caching_info *ci)
93{
94 BUG_ON(!ci || !ci->ci_ops);
95
96 ci->ci_ops->co_cache_unlock(ci);
97}
98
99static void ocfs2_metadata_cache_io_lock(struct ocfs2_caching_info *ci)
100{
101 BUG_ON(!ci || !ci->ci_ops);
102
103 ci->ci_ops->co_io_lock(ci);
104}
105
106static void ocfs2_metadata_cache_io_unlock(struct ocfs2_caching_info *ci)
107{
108 BUG_ON(!ci || !ci->ci_ops);
109
110 ci->ci_ops->co_io_unlock(ci);
111}
112
113
78void ocfs2_metadata_cache_init(struct ocfs2_caching_info *ci, 114void ocfs2_metadata_cache_init(struct ocfs2_caching_info *ci,
79 spinlock_t *cache_lock, 115 const struct ocfs2_caching_operations *ops)
80 struct mutex *io_mutex)
81{ 116{
82 ci->ci_lock = cache_lock; 117 BUG_ON(!ops);
83 ci->ci_io_mutex = io_mutex; 118
119 ci->ci_ops = ops;
84 ci->ci_flags |= OCFS2_CACHE_FL_INLINE; 120 ci->ci_flags |= OCFS2_CACHE_FL_INLINE;
85 ci->ci_num_cached = 0; 121 ci->ci_num_cached = 0;
86} 122}
@@ -120,12 +156,15 @@ void ocfs2_metadata_cache_purge(struct inode *inode)
120 struct ocfs2_caching_info *ci = &oi->ip_metadata_cache; 156 struct ocfs2_caching_info *ci = &oi->ip_metadata_cache;
121 struct rb_root root = RB_ROOT; 157 struct rb_root root = RB_ROOT;
122 158
123 spin_lock(ci->ci_lock); 159 BUG_ON(!ci || !ci->ci_ops);
160
161 ocfs2_metadata_cache_lock(ci);
124 tree = !(ci->ci_flags & OCFS2_CACHE_FL_INLINE); 162 tree = !(ci->ci_flags & OCFS2_CACHE_FL_INLINE);
125 to_purge = ci->ci_num_cached; 163 to_purge = ci->ci_num_cached;
126 164
127 mlog(0, "Purge %u %s items from Inode %llu\n", to_purge, 165 mlog(0, "Purge %u %s items from Owner %llu\n", to_purge,
128 tree ? "array" : "tree", (unsigned long long)oi->ip_blkno); 166 tree ? "array" : "tree",
167 (unsigned long long)ocfs2_metadata_cache_owner(ci));
129 168
130 /* If we're a tree, save off the root so that we can safely 169 /* If we're a tree, save off the root so that we can safely
131 * initialize the cache. We do the work to free tree members 170 * initialize the cache. We do the work to free tree members
@@ -133,16 +172,17 @@ void ocfs2_metadata_cache_purge(struct inode *inode)
133 if (tree) 172 if (tree)
134 root = ci->ci_cache.ci_tree; 173 root = ci->ci_cache.ci_tree;
135 174
136 ocfs2_metadata_cache_init(ci, ci->ci_lock, ci->ci_io_mutex); 175 ocfs2_metadata_cache_init(ci, ci->ci_ops);
137 spin_unlock(ci->ci_lock); 176 ocfs2_metadata_cache_unlock(ci);
138 177
139 purged = ocfs2_purge_copied_metadata_tree(&root); 178 purged = ocfs2_purge_copied_metadata_tree(&root);
140 /* If possible, track the number wiped so that we can more 179 /* If possible, track the number wiped so that we can more
141 * easily detect counting errors. Unfortunately, this is only 180 * easily detect counting errors. Unfortunately, this is only
142 * meaningful for trees. */ 181 * meaningful for trees. */
143 if (tree && purged != to_purge) 182 if (tree && purged != to_purge)
144 mlog(ML_ERROR, "Inode %llu, count = %u, purged = %u\n", 183 mlog(ML_ERROR, "Owner %llu, count = %u, purged = %u\n",
145 (unsigned long long)oi->ip_blkno, to_purge, purged); 184 (unsigned long long)ocfs2_metadata_cache_owner(ci),
185 to_purge, purged);
146} 186}
147 187
148/* Returns the index in the cache array, -1 if not found. 188/* Returns the index in the cache array, -1 if not found.
@@ -190,10 +230,10 @@ static int ocfs2_buffer_cached(struct ocfs2_inode_info *oi,
190 struct ocfs2_meta_cache_item *item = NULL; 230 struct ocfs2_meta_cache_item *item = NULL;
191 struct ocfs2_caching_info *ci = &oi->ip_metadata_cache; 231 struct ocfs2_caching_info *ci = &oi->ip_metadata_cache;
192 232
193 spin_lock(ci->ci_lock); 233 ocfs2_metadata_cache_lock(ci);
194 234
195 mlog(0, "Inode %llu, query block %llu (inline = %u)\n", 235 mlog(0, "Owner %llu, query block %llu (inline = %u)\n",
196 (unsigned long long)oi->ip_blkno, 236 (unsigned long long)ocfs2_metadata_cache_owner(ci),
197 (unsigned long long) bh->b_blocknr, 237 (unsigned long long) bh->b_blocknr,
198 !!(ci->ci_flags & OCFS2_CACHE_FL_INLINE)); 238 !!(ci->ci_flags & OCFS2_CACHE_FL_INLINE));
199 239
@@ -204,7 +244,7 @@ static int ocfs2_buffer_cached(struct ocfs2_inode_info *oi,
204 item = ocfs2_search_cache_tree(&oi->ip_metadata_cache, 244 item = ocfs2_search_cache_tree(&oi->ip_metadata_cache,
205 bh->b_blocknr); 245 bh->b_blocknr);
206 246
207 spin_unlock(ci->ci_lock); 247 ocfs2_metadata_cache_unlock(ci);
208 248
209 mlog(0, "index = %d, item = %p\n", index, item); 249 mlog(0, "index = %d, item = %p\n", index, item);
210 250
@@ -294,18 +334,19 @@ static void __ocfs2_insert_cache_tree(struct ocfs2_caching_info *ci,
294 ci->ci_num_cached++; 334 ci->ci_num_cached++;
295} 335}
296 336
337/* co_cache_lock() must be held */
297static inline int ocfs2_insert_can_use_array(struct ocfs2_inode_info *oi, 338static inline int ocfs2_insert_can_use_array(struct ocfs2_inode_info *oi,
298 struct ocfs2_caching_info *ci) 339 struct ocfs2_caching_info *ci)
299{ 340{
300 assert_spin_locked(ci->ci_lock);
301
302 return (ci->ci_flags & OCFS2_CACHE_FL_INLINE) && 341 return (ci->ci_flags & OCFS2_CACHE_FL_INLINE) &&
303 (ci->ci_num_cached < OCFS2_CACHE_INFO_MAX_ARRAY); 342 (ci->ci_num_cached < OCFS2_CACHE_INFO_MAX_ARRAY);
304} 343}
305 344
306/* tree should be exactly OCFS2_CACHE_INFO_MAX_ARRAY wide. NULL the 345/* tree should be exactly OCFS2_CACHE_INFO_MAX_ARRAY wide. NULL the
307 * pointers in tree after we use them - this allows caller to detect 346 * pointers in tree after we use them - this allows caller to detect
308 * when to free in case of error. */ 347 * when to free in case of error.
348 *
349 * The co_cache_lock() must be held. */
309static void ocfs2_expand_cache(struct ocfs2_inode_info *oi, 350static void ocfs2_expand_cache(struct ocfs2_inode_info *oi,
310 struct ocfs2_meta_cache_item **tree) 351 struct ocfs2_meta_cache_item **tree)
311{ 352{
@@ -313,13 +354,12 @@ static void ocfs2_expand_cache(struct ocfs2_inode_info *oi,
313 struct ocfs2_caching_info *ci = &oi->ip_metadata_cache; 354 struct ocfs2_caching_info *ci = &oi->ip_metadata_cache;
314 355
315 mlog_bug_on_msg(ci->ci_num_cached != OCFS2_CACHE_INFO_MAX_ARRAY, 356 mlog_bug_on_msg(ci->ci_num_cached != OCFS2_CACHE_INFO_MAX_ARRAY,
316 "Inode %llu, num cached = %u, should be %u\n", 357 "Owner %llu, num cached = %u, should be %u\n",
317 (unsigned long long)oi->ip_blkno, ci->ci_num_cached, 358 (unsigned long long)ocfs2_metadata_cache_owner(ci),
318 OCFS2_CACHE_INFO_MAX_ARRAY); 359 ci->ci_num_cached, OCFS2_CACHE_INFO_MAX_ARRAY);
319 mlog_bug_on_msg(!(ci->ci_flags & OCFS2_CACHE_FL_INLINE), 360 mlog_bug_on_msg(!(ci->ci_flags & OCFS2_CACHE_FL_INLINE),
320 "Inode %llu not marked as inline anymore!\n", 361 "Owner %llu not marked as inline anymore!\n",
321 (unsigned long long)oi->ip_blkno); 362 (unsigned long long)ocfs2_metadata_cache_owner(ci));
322 assert_spin_locked(ci->ci_lock);
323 363
324 /* Be careful to initialize the tree members *first* because 364 /* Be careful to initialize the tree members *first* because
325 * once the ci_tree is used, the array is junk... */ 365 * once the ci_tree is used, the array is junk... */
@@ -337,7 +377,8 @@ static void ocfs2_expand_cache(struct ocfs2_inode_info *oi,
337 } 377 }
338 378
339 mlog(0, "Expanded %llu to a tree cache: flags 0x%x, num = %u\n", 379 mlog(0, "Expanded %llu to a tree cache: flags 0x%x, num = %u\n",
340 (unsigned long long)oi->ip_blkno, ci->ci_flags, ci->ci_num_cached); 380 (unsigned long long)ocfs2_metadata_cache_owner(ci),
381 ci->ci_flags, ci->ci_num_cached);
341} 382}
342 383
343/* Slow path function - memory allocation is necessary. See the 384/* Slow path function - memory allocation is necessary. See the
@@ -352,8 +393,8 @@ static void __ocfs2_set_buffer_uptodate(struct ocfs2_inode_info *oi,
352 struct ocfs2_meta_cache_item *tree[OCFS2_CACHE_INFO_MAX_ARRAY] = 393 struct ocfs2_meta_cache_item *tree[OCFS2_CACHE_INFO_MAX_ARRAY] =
353 { NULL, }; 394 { NULL, };
354 395
355 mlog(0, "Inode %llu, block %llu, expand = %d\n", 396 mlog(0, "Owner %llu, block %llu, expand = %d\n",
356 (unsigned long long)oi->ip_blkno, 397 (unsigned long long)ocfs2_metadata_cache_owner(ci),
357 (unsigned long long)block, expand_tree); 398 (unsigned long long)block, expand_tree);
358 399
359 new = kmem_cache_alloc(ocfs2_uptodate_cachep, GFP_NOFS); 400 new = kmem_cache_alloc(ocfs2_uptodate_cachep, GFP_NOFS);
@@ -378,13 +419,13 @@ static void __ocfs2_set_buffer_uptodate(struct ocfs2_inode_info *oi,
378 } 419 }
379 } 420 }
380 421
381 spin_lock(ci->ci_lock); 422 ocfs2_metadata_cache_lock(ci);
382 if (ocfs2_insert_can_use_array(oi, ci)) { 423 if (ocfs2_insert_can_use_array(oi, ci)) {
383 mlog(0, "Someone cleared the tree underneath us\n"); 424 mlog(0, "Someone cleared the tree underneath us\n");
384 /* Ok, items were removed from the cache in between 425 /* Ok, items were removed from the cache in between
385 * locks. Detect this and revert back to the fast path */ 426 * locks. Detect this and revert back to the fast path */
386 ocfs2_append_cache_array(ci, block); 427 ocfs2_append_cache_array(ci, block);
387 spin_unlock(ci->ci_lock); 428 ocfs2_metadata_cache_unlock(ci);
388 goto out_free; 429 goto out_free;
389 } 430 }
390 431
@@ -392,7 +433,7 @@ static void __ocfs2_set_buffer_uptodate(struct ocfs2_inode_info *oi,
392 ocfs2_expand_cache(oi, tree); 433 ocfs2_expand_cache(oi, tree);
393 434
394 __ocfs2_insert_cache_tree(ci, new); 435 __ocfs2_insert_cache_tree(ci, new);
395 spin_unlock(ci->ci_lock); 436 ocfs2_metadata_cache_unlock(ci);
396 437
397 new = NULL; 438 new = NULL;
398out_free: 439out_free:
@@ -409,7 +450,7 @@ out_free:
409 } 450 }
410} 451}
411 452
412/* Item insertion is guarded by ci_io_mutex, so the insertion path takes 453/* Item insertion is guarded by co_io_lock(), so the insertion path takes
413 * advantage of this by not rechecking for a duplicate insert during 454 * advantage of this by not rechecking for a duplicate insert during
414 * the slow case. Additionally, if the cache needs to be bumped up to 455 * the slow case. Additionally, if the cache needs to be bumped up to
415 * a tree, the code will not recheck after acquiring the lock -- 456 * a tree, the code will not recheck after acquiring the lock --
@@ -439,18 +480,18 @@ void ocfs2_set_buffer_uptodate(struct inode *inode,
439 if (ocfs2_buffer_cached(oi, bh)) 480 if (ocfs2_buffer_cached(oi, bh))
440 return; 481 return;
441 482
442 mlog(0, "Inode %llu, inserting block %llu\n", 483 mlog(0, "Owner %llu, inserting block %llu\n",
443 (unsigned long long)oi->ip_blkno, 484 (unsigned long long)ocfs2_metadata_cache_owner(ci),
444 (unsigned long long)bh->b_blocknr); 485 (unsigned long long)bh->b_blocknr);
445 486
446 /* No need to recheck under spinlock - insertion is guarded by 487 /* No need to recheck under spinlock - insertion is guarded by
447 * ci_io_mutex */ 488 * co_io_lock() */
448 spin_lock(ci->ci_lock); 489 ocfs2_metadata_cache_lock(ci);
449 if (ocfs2_insert_can_use_array(oi, ci)) { 490 if (ocfs2_insert_can_use_array(oi, ci)) {
450 /* Fast case - it's an array and there's a free 491 /* Fast case - it's an array and there's a free
451 * spot. */ 492 * spot. */
452 ocfs2_append_cache_array(ci, bh->b_blocknr); 493 ocfs2_append_cache_array(ci, bh->b_blocknr);
453 spin_unlock(ci->ci_lock); 494 ocfs2_metadata_cache_unlock(ci);
454 return; 495 return;
455 } 496 }
456 497
@@ -459,14 +500,14 @@ void ocfs2_set_buffer_uptodate(struct inode *inode,
459 /* We need to bump things up to a tree. */ 500 /* We need to bump things up to a tree. */
460 expand = 1; 501 expand = 1;
461 } 502 }
462 spin_unlock(ci->ci_lock); 503 ocfs2_metadata_cache_unlock(ci);
463 504
464 __ocfs2_set_buffer_uptodate(oi, bh->b_blocknr, expand); 505 __ocfs2_set_buffer_uptodate(oi, bh->b_blocknr, expand);
465} 506}
466 507
467/* Called against a newly allocated buffer. Most likely nobody should 508/* Called against a newly allocated buffer. Most likely nobody should
468 * be able to read this sort of metadata while it's still being 509 * be able to read this sort of metadata while it's still being
469 * allocated, but this is careful to take ci_io_mutex anyway. */ 510 * allocated, but this is careful to take co_io_lock() anyway. */
470void ocfs2_set_new_buffer_uptodate(struct inode *inode, 511void ocfs2_set_new_buffer_uptodate(struct inode *inode,
471 struct buffer_head *bh) 512 struct buffer_head *bh)
472{ 513{
@@ -478,9 +519,9 @@ void ocfs2_set_new_buffer_uptodate(struct inode *inode,
478 519
479 set_buffer_uptodate(bh); 520 set_buffer_uptodate(bh);
480 521
481 mutex_lock(ci->ci_io_mutex); 522 ocfs2_metadata_cache_io_lock(ci);
482 ocfs2_set_buffer_uptodate(inode, bh); 523 ocfs2_set_buffer_uptodate(inode, bh);
483 mutex_unlock(ci->ci_io_mutex); 524 ocfs2_metadata_cache_io_unlock(ci);
484} 525}
485 526
486/* Requires ip_lock. */ 527/* Requires ip_lock. */
@@ -526,9 +567,9 @@ static void ocfs2_remove_block_from_cache(struct inode *inode,
526 struct ocfs2_inode_info *oi = OCFS2_I(inode); 567 struct ocfs2_inode_info *oi = OCFS2_I(inode);
527 struct ocfs2_caching_info *ci = &oi->ip_metadata_cache; 568 struct ocfs2_caching_info *ci = &oi->ip_metadata_cache;
528 569
529 spin_lock(ci->ci_lock); 570 ocfs2_metadata_cache_lock(ci);
530 mlog(0, "Inode %llu, remove %llu, items = %u, array = %u\n", 571 mlog(0, "Owner %llu, remove %llu, items = %u, array = %u\n",
531 (unsigned long long)oi->ip_blkno, 572 (unsigned long long)ocfs2_metadata_cache_owner(ci),
532 (unsigned long long) block, ci->ci_num_cached, 573 (unsigned long long) block, ci->ci_num_cached,
533 ci->ci_flags & OCFS2_CACHE_FL_INLINE); 574 ci->ci_flags & OCFS2_CACHE_FL_INLINE);
534 575
@@ -541,7 +582,7 @@ static void ocfs2_remove_block_from_cache(struct inode *inode,
541 if (item) 582 if (item)
542 ocfs2_remove_metadata_tree(ci, item); 583 ocfs2_remove_metadata_tree(ci, item);
543 } 584 }
544 spin_unlock(ci->ci_lock); 585 ocfs2_metadata_cache_unlock(ci);
545 586
546 if (item) 587 if (item)
547 kmem_cache_free(ocfs2_uptodate_cachep, item); 588 kmem_cache_free(ocfs2_uptodate_cachep, item);