aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent_io.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2008-12-17 14:51:42 -0500
committerChris Mason <chris.mason@oracle.com>2008-12-17 14:51:42 -0500
commitcad321ad529400c6ab24c501a67c3be720a0744c (patch)
tree7ac2b81a914329b7ac78b4f2a4275d62484ef45e /fs/btrfs/extent_io.c
parent87b29b208c6c38f3446d2de6ece946e2459052cf (diff)
Btrfs: shift all end_io work to thread pools
bio_end_io for reads without checksumming on and btree writes were happening without using async thread pools. This means the extent_io.c code had to use spin_lock_irq and friends on the rb tree locks for extent state. There were some irq safe vs unsafe lock inversions between the delallock lock and the extent state locks. This patch gets rid of them by moving all end_io code into the thread pools. To avoid contention and deadlocks between the data end_io processing and the metadata end_io processing yet another thread pool is added to finish off metadata writes. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r--fs/btrfs/extent_io.c51
1 files changed, 24 insertions, 27 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 607f5ff2791c..25ce2d18e5b4 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -477,7 +477,6 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
477 struct extent_state *state; 477 struct extent_state *state;
478 struct extent_state *prealloc = NULL; 478 struct extent_state *prealloc = NULL;
479 struct rb_node *node; 479 struct rb_node *node;
480 unsigned long flags;
481 int err; 480 int err;
482 int set = 0; 481 int set = 0;
483 482
@@ -488,7 +487,7 @@ again:
488 return -ENOMEM; 487 return -ENOMEM;
489 } 488 }
490 489
491 spin_lock_irqsave(&tree->lock, flags); 490 spin_lock(&tree->lock);
492 /* 491 /*
493 * this search will find the extents that end after 492 * this search will find the extents that end after
494 * our range starts 493 * our range starts
@@ -559,7 +558,7 @@ again:
559 goto search_again; 558 goto search_again;
560 559
561out: 560out:
562 spin_unlock_irqrestore(&tree->lock, flags); 561 spin_unlock(&tree->lock);
563 if (prealloc) 562 if (prealloc)
564 free_extent_state(prealloc); 563 free_extent_state(prealloc);
565 564
@@ -568,7 +567,7 @@ out:
568search_again: 567search_again:
569 if (start > end) 568 if (start > end)
570 goto out; 569 goto out;
571 spin_unlock_irqrestore(&tree->lock, flags); 570 spin_unlock(&tree->lock);
572 if (mask & __GFP_WAIT) 571 if (mask & __GFP_WAIT)
573 cond_resched(); 572 cond_resched();
574 goto again; 573 goto again;
@@ -582,9 +581,9 @@ static int wait_on_state(struct extent_io_tree *tree,
582{ 581{
583 DEFINE_WAIT(wait); 582 DEFINE_WAIT(wait);
584 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE); 583 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
585 spin_unlock_irq(&tree->lock); 584 spin_unlock(&tree->lock);
586 schedule(); 585 schedule();
587 spin_lock_irq(&tree->lock); 586 spin_lock(&tree->lock);
588 finish_wait(&state->wq, &wait); 587 finish_wait(&state->wq, &wait);
589 return 0; 588 return 0;
590} 589}
@@ -599,7 +598,7 @@ int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
599 struct extent_state *state; 598 struct extent_state *state;
600 struct rb_node *node; 599 struct rb_node *node;
601 600
602 spin_lock_irq(&tree->lock); 601 spin_lock(&tree->lock);
603again: 602again:
604 while (1) { 603 while (1) {
605 /* 604 /*
@@ -628,13 +627,13 @@ again:
628 break; 627 break;
629 628
630 if (need_resched()) { 629 if (need_resched()) {
631 spin_unlock_irq(&tree->lock); 630 spin_unlock(&tree->lock);
632 cond_resched(); 631 cond_resched();
633 spin_lock_irq(&tree->lock); 632 spin_lock(&tree->lock);
634 } 633 }
635 } 634 }
636out: 635out:
637 spin_unlock_irq(&tree->lock); 636 spin_unlock(&tree->lock);
638 return 0; 637 return 0;
639} 638}
640EXPORT_SYMBOL(wait_extent_bit); 639EXPORT_SYMBOL(wait_extent_bit);
@@ -668,7 +667,6 @@ static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int b
668 struct extent_state *state; 667 struct extent_state *state;
669 struct extent_state *prealloc = NULL; 668 struct extent_state *prealloc = NULL;
670 struct rb_node *node; 669 struct rb_node *node;
671 unsigned long flags;
672 int err = 0; 670 int err = 0;
673 int set; 671 int set;
674 u64 last_start; 672 u64 last_start;
@@ -680,7 +678,7 @@ again:
680 return -ENOMEM; 678 return -ENOMEM;
681 } 679 }
682 680
683 spin_lock_irqsave(&tree->lock, flags); 681 spin_lock(&tree->lock);
684 /* 682 /*
685 * this search will find all the extents that end after 683 * this search will find all the extents that end after
686 * our range starts. 684 * our range starts.
@@ -800,7 +798,7 @@ again:
800 goto search_again; 798 goto search_again;
801 799
802out: 800out:
803 spin_unlock_irqrestore(&tree->lock, flags); 801 spin_unlock(&tree->lock);
804 if (prealloc) 802 if (prealloc)
805 free_extent_state(prealloc); 803 free_extent_state(prealloc);
806 804
@@ -809,7 +807,7 @@ out:
809search_again: 807search_again:
810 if (start > end) 808 if (start > end)
811 goto out; 809 goto out;
812 spin_unlock_irqrestore(&tree->lock, flags); 810 spin_unlock(&tree->lock);
813 if (mask & __GFP_WAIT) 811 if (mask & __GFP_WAIT)
814 cond_resched(); 812 cond_resched();
815 goto again; 813 goto again;
@@ -1021,7 +1019,7 @@ int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1021 struct extent_state *state; 1019 struct extent_state *state;
1022 int ret = 1; 1020 int ret = 1;
1023 1021
1024 spin_lock_irq(&tree->lock); 1022 spin_lock(&tree->lock);
1025 /* 1023 /*
1026 * this search will find all the extents that end after 1024 * this search will find all the extents that end after
1027 * our range starts. 1025 * our range starts.
@@ -1044,7 +1042,7 @@ int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1044 break; 1042 break;
1045 } 1043 }
1046out: 1044out:
1047 spin_unlock_irq(&tree->lock); 1045 spin_unlock(&tree->lock);
1048 return ret; 1046 return ret;
1049} 1047}
1050EXPORT_SYMBOL(find_first_extent_bit); 1048EXPORT_SYMBOL(find_first_extent_bit);
@@ -1097,7 +1095,7 @@ static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
1097 u64 found = 0; 1095 u64 found = 0;
1098 u64 total_bytes = 0; 1096 u64 total_bytes = 0;
1099 1097
1100 spin_lock_irq(&tree->lock); 1098 spin_lock(&tree->lock);
1101 1099
1102 /* 1100 /*
1103 * this search will find all the extents that end after 1101 * this search will find all the extents that end after
@@ -1134,7 +1132,7 @@ static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
1134 break; 1132 break;
1135 } 1133 }
1136out: 1134out:
1137 spin_unlock_irq(&tree->lock); 1135 spin_unlock(&tree->lock);
1138 return found; 1136 return found;
1139} 1137}
1140 1138
@@ -1391,7 +1389,7 @@ u64 count_range_bits(struct extent_io_tree *tree,
1391 return 0; 1389 return 0;
1392 } 1390 }
1393 1391
1394 spin_lock_irq(&tree->lock); 1392 spin_lock(&tree->lock);
1395 if (cur_start == 0 && bits == EXTENT_DIRTY) { 1393 if (cur_start == 0 && bits == EXTENT_DIRTY) {
1396 total_bytes = tree->dirty_bytes; 1394 total_bytes = tree->dirty_bytes;
1397 goto out; 1395 goto out;
@@ -1424,7 +1422,7 @@ u64 count_range_bits(struct extent_io_tree *tree,
1424 break; 1422 break;
1425 } 1423 }
1426out: 1424out:
1427 spin_unlock_irq(&tree->lock); 1425 spin_unlock(&tree->lock);
1428 return total_bytes; 1426 return total_bytes;
1429} 1427}
1430 1428
@@ -1501,7 +1499,7 @@ int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1501 struct extent_state *state; 1499 struct extent_state *state;
1502 int ret = 0; 1500 int ret = 0;
1503 1501
1504 spin_lock_irq(&tree->lock); 1502 spin_lock(&tree->lock);
1505 /* 1503 /*
1506 * this search will find all the extents that end after 1504 * this search will find all the extents that end after
1507 * our range starts. 1505 * our range starts.
@@ -1518,7 +1516,7 @@ int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1518 } 1516 }
1519 state->private = private; 1517 state->private = private;
1520out: 1518out:
1521 spin_unlock_irq(&tree->lock); 1519 spin_unlock(&tree->lock);
1522 return ret; 1520 return ret;
1523} 1521}
1524 1522
@@ -1528,7 +1526,7 @@ int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1528 struct extent_state *state; 1526 struct extent_state *state;
1529 int ret = 0; 1527 int ret = 0;
1530 1528
1531 spin_lock_irq(&tree->lock); 1529 spin_lock(&tree->lock);
1532 /* 1530 /*
1533 * this search will find all the extents that end after 1531 * this search will find all the extents that end after
1534 * our range starts. 1532 * our range starts.
@@ -1545,7 +1543,7 @@ int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1545 } 1543 }
1546 *private = state->private; 1544 *private = state->private;
1547out: 1545out:
1548 spin_unlock_irq(&tree->lock); 1546 spin_unlock(&tree->lock);
1549 return ret; 1547 return ret;
1550} 1548}
1551 1549
@@ -1561,9 +1559,8 @@ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1561 struct extent_state *state = NULL; 1559 struct extent_state *state = NULL;
1562 struct rb_node *node; 1560 struct rb_node *node;
1563 int bitset = 0; 1561 int bitset = 0;
1564 unsigned long flags;
1565 1562
1566 spin_lock_irqsave(&tree->lock, flags); 1563 spin_lock(&tree->lock);
1567 node = tree_search(tree, start); 1564 node = tree_search(tree, start);
1568 while (node && start <= end) { 1565 while (node && start <= end) {
1569 state = rb_entry(node, struct extent_state, rb_node); 1566 state = rb_entry(node, struct extent_state, rb_node);
@@ -1594,7 +1591,7 @@ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1594 break; 1591 break;
1595 } 1592 }
1596 } 1593 }
1597 spin_unlock_irqrestore(&tree->lock, flags); 1594 spin_unlock(&tree->lock);
1598 return bitset; 1595 return bitset;
1599} 1596}
1600EXPORT_SYMBOL(test_range_bit); 1597EXPORT_SYMBOL(test_range_bit);