aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/transaction.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2008-07-28 15:32:51 -0400
committerChris Mason <chris.mason@oracle.com>2008-09-25 11:04:05 -0400
commit017e5369eb353559d68a11d4a718faa634533821 (patch)
treec339f2f4a59e403c7f9bfa8d137663c6bf260537 /fs/btrfs/transaction.c
parent31153d81284934601d08110ac7698fd9a535e4c0 (diff)
Btrfs: Leaf reference cache update
This changes the reference cache to make a single cache per root instead of one cache per transaction, and to key by the byte number of the disk block instead of the keys inside. This makes it much less likely to have cache misses if a snapshot or something has an extra reference on a higher node or a leaf while the first transaction that added the leaf into the cache is dropping. Some throttling is added to functions that free blocks heavily so they wait for old transactions to drop. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/transaction.c')
-rw-r--r--fs/btrfs/transaction.c34
1 files changed, 12 insertions, 22 deletions
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 543e5ee4033a..fcef3cae0c92 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -36,7 +36,6 @@ struct dirty_root {
36 struct list_head list; 36 struct list_head list;
37 struct btrfs_root *root; 37 struct btrfs_root *root;
38 struct btrfs_root *latest_root; 38 struct btrfs_root *latest_root;
39 struct btrfs_leaf_ref_tree ref_tree;
40}; 39};
41 40
42static noinline void put_transaction(struct btrfs_transaction *transaction) 41static noinline void put_transaction(struct btrfs_transaction *transaction)
@@ -108,13 +107,13 @@ static noinline int record_root_in_trans(struct btrfs_root *root)
108 107
109 dirty->latest_root = root; 108 dirty->latest_root = root;
110 INIT_LIST_HEAD(&dirty->list); 109 INIT_LIST_HEAD(&dirty->list);
111 btrfs_leaf_ref_tree_init(&dirty->ref_tree);
112 dirty->ref_tree.generation = running_trans_id;
113 110
114 root->commit_root = btrfs_root_node(root); 111 root->commit_root = btrfs_root_node(root);
115 root->ref_tree = &dirty->ref_tree; 112 root->dirty_root = dirty;
116 113
117 memcpy(dirty->root, root, sizeof(*root)); 114 memcpy(dirty->root, root, sizeof(*root));
115 dirty->root->ref_tree = &root->ref_tree_struct;
116
118 spin_lock_init(&dirty->root->node_lock); 117 spin_lock_init(&dirty->root->node_lock);
119 mutex_init(&dirty->root->objectid_mutex); 118 mutex_init(&dirty->root->objectid_mutex);
120 dirty->root->node = root->commit_root; 119 dirty->root->node = root->commit_root;
@@ -217,12 +216,13 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
217 if (waitqueue_active(&cur_trans->writer_wait)) 216 if (waitqueue_active(&cur_trans->writer_wait))
218 wake_up(&cur_trans->writer_wait); 217 wake_up(&cur_trans->writer_wait);
219 218
220 if (0 && cur_trans->in_commit && throttle) { 219 if (throttle && atomic_read(&root->fs_info->throttles)) {
221 DEFINE_WAIT(wait); 220 DEFINE_WAIT(wait);
222 mutex_unlock(&root->fs_info->trans_mutex); 221 mutex_unlock(&root->fs_info->trans_mutex);
223 prepare_to_wait(&root->fs_info->transaction_throttle, &wait, 222 prepare_to_wait(&root->fs_info->transaction_throttle, &wait,
224 TASK_UNINTERRUPTIBLE); 223 TASK_UNINTERRUPTIBLE);
225 schedule(); 224 if (atomic_read(&root->fs_info->throttles))
225 schedule();
226 finish_wait(&root->fs_info->transaction_throttle, &wait); 226 finish_wait(&root->fs_info->transaction_throttle, &wait);
227 mutex_lock(&root->fs_info->trans_mutex); 227 mutex_lock(&root->fs_info->trans_mutex);
228 } 228 }
@@ -333,6 +333,8 @@ int btrfs_commit_tree_roots(struct btrfs_trans_handle *trans,
333 list_del_init(next); 333 list_del_init(next);
334 root = list_entry(next, struct btrfs_root, dirty_list); 334 root = list_entry(next, struct btrfs_root, dirty_list);
335 update_cowonly_root(trans, root); 335 update_cowonly_root(trans, root);
336 if (root->fs_info->closing)
337 btrfs_remove_leaf_refs(root);
336 } 338 }
337 return 0; 339 return 0;
338} 340}
@@ -346,10 +348,8 @@ int btrfs_add_dead_root(struct btrfs_root *root,
346 dirty = kmalloc(sizeof(*dirty), GFP_NOFS); 348 dirty = kmalloc(sizeof(*dirty), GFP_NOFS);
347 if (!dirty) 349 if (!dirty)
348 return -ENOMEM; 350 return -ENOMEM;
349 btrfs_leaf_ref_tree_init(&dirty->ref_tree);
350 dirty->root = root; 351 dirty->root = root;
351 dirty->latest_root = latest; 352 dirty->latest_root = latest;
352 root->ref_tree = NULL;
353 list_add(&dirty->list, dead_list); 353 list_add(&dirty->list, dead_list);
354 return 0; 354 return 0;
355} 355}
@@ -379,18 +379,14 @@ static noinline int add_dirty_roots(struct btrfs_trans_handle *trans,
379 BTRFS_ROOT_TRANS_TAG); 379 BTRFS_ROOT_TRANS_TAG);
380 380
381 BUG_ON(!root->ref_tree); 381 BUG_ON(!root->ref_tree);
382 dirty = container_of(root->ref_tree, struct dirty_root, 382 dirty = root->dirty_root;
383 ref_tree);
384 383
385 if (root->commit_root == root->node) { 384 if (root->commit_root == root->node) {
386 WARN_ON(root->node->start != 385 WARN_ON(root->node->start !=
387 btrfs_root_bytenr(&root->root_item)); 386 btrfs_root_bytenr(&root->root_item));
388 387
389 BUG_ON(!btrfs_leaf_ref_tree_empty(
390 root->ref_tree));
391 free_extent_buffer(root->commit_root); 388 free_extent_buffer(root->commit_root);
392 root->commit_root = NULL; 389 root->commit_root = NULL;
393 root->ref_tree = NULL;
394 390
395 kfree(dirty->root); 391 kfree(dirty->root);
396 kfree(dirty); 392 kfree(dirty);
@@ -410,7 +406,6 @@ static noinline int add_dirty_roots(struct btrfs_trans_handle *trans,
410 sizeof(struct btrfs_disk_key)); 406 sizeof(struct btrfs_disk_key));
411 root->root_item.drop_level = 0; 407 root->root_item.drop_level = 0;
412 root->commit_root = NULL; 408 root->commit_root = NULL;
413 root->ref_tree = NULL;
414 root->root_key.offset = root->fs_info->generation; 409 root->root_key.offset = root->fs_info->generation;
415 btrfs_set_root_bytenr(&root->root_item, 410 btrfs_set_root_bytenr(&root->root_item,
416 root->node->start); 411 root->node->start);
@@ -485,7 +480,7 @@ static noinline int drop_dirty_roots(struct btrfs_root *tree_root,
485 while(!list_empty(list)) { 480 while(!list_empty(list)) {
486 struct btrfs_root *root; 481 struct btrfs_root *root;
487 482
488 dirty = list_entry(list->next, struct dirty_root, list); 483 dirty = list_entry(list->prev, struct dirty_root, list);
489 list_del_init(&dirty->list); 484 list_del_init(&dirty->list);
490 485
491 num_bytes = btrfs_root_used(&dirty->root->root_item); 486 num_bytes = btrfs_root_used(&dirty->root->root_item);
@@ -507,7 +502,7 @@ static noinline int drop_dirty_roots(struct btrfs_root *tree_root,
507 if (err) 502 if (err)
508 ret = err; 503 ret = err;
509 nr = trans->blocks_used; 504 nr = trans->blocks_used;
510 ret = btrfs_end_transaction_throttle(trans, tree_root); 505 ret = btrfs_end_transaction(trans, tree_root);
511 BUG_ON(ret); 506 BUG_ON(ret);
512 507
513 mutex_unlock(&root->fs_info->drop_mutex); 508 mutex_unlock(&root->fs_info->drop_mutex);
@@ -517,6 +512,7 @@ static noinline int drop_dirty_roots(struct btrfs_root *tree_root,
517 } 512 }
518 BUG_ON(ret); 513 BUG_ON(ret);
519 atomic_dec(&root->fs_info->throttles); 514 atomic_dec(&root->fs_info->throttles);
515 wake_up(&root->fs_info->transaction_throttle);
520 516
521 mutex_lock(&root->fs_info->alloc_mutex); 517 mutex_lock(&root->fs_info->alloc_mutex);
522 num_bytes -= btrfs_root_used(&dirty->root->root_item); 518 num_bytes -= btrfs_root_used(&dirty->root->root_item);
@@ -539,8 +535,6 @@ static noinline int drop_dirty_roots(struct btrfs_root *tree_root,
539 ret = btrfs_end_transaction(trans, tree_root); 535 ret = btrfs_end_transaction(trans, tree_root);
540 BUG_ON(ret); 536 BUG_ON(ret);
541 537
542 btrfs_remove_leaf_refs(dirty->root);
543
544 free_extent_buffer(dirty->root->node); 538 free_extent_buffer(dirty->root->node);
545 kfree(dirty->root); 539 kfree(dirty->root);
546 kfree(dirty); 540 kfree(dirty);
@@ -725,10 +719,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
725 &dirty_fs_roots); 719 &dirty_fs_roots);
726 BUG_ON(ret); 720 BUG_ON(ret);
727 721
728 spin_lock(&root->fs_info->ref_cache_lock);
729 root->fs_info->running_ref_cache_size = 0;
730 spin_unlock(&root->fs_info->ref_cache_lock);
731
732 ret = btrfs_commit_tree_roots(trans, root); 722 ret = btrfs_commit_tree_roots(trans, root);
733 BUG_ON(ret); 723 BUG_ON(ret);
734 724