aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2008-12-17 14:51:42 -0500
committerChris Mason <chris.mason@oracle.com>2008-12-17 14:51:42 -0500
commitcad321ad529400c6ab24c501a67c3be720a0744c (patch)
tree7ac2b81a914329b7ac78b4f2a4275d62484ef45e
parent87b29b208c6c38f3446d2de6ece946e2459052cf (diff)
Btrfs: shift all end_io work to thread pools
bio_end_io for reads without checksumming on and btree writes were happening without using async thread pools. This means the extent_io.c code had to use spin_lock_irq and friends on the rb tree locks for extent state. There were some irq safe vs unsafe lock inversions between the delallock lock and the extent state locks. This patch gets rid of them by moving all end_io code into the thread pools. To avoid contention and deadlocks between the data end_io processing and the metadata end_io processing yet another thread pool is added to finish off metadata writes. Signed-off-by: Chris Mason <chris.mason@oracle.com>
-rw-r--r--fs/btrfs/ctree.h1
-rw-r--r--fs/btrfs/disk-io.c39
-rw-r--r--fs/btrfs/extent_io.c51
-rw-r--r--fs/btrfs/inode.c12
4 files changed, 57 insertions, 46 deletions
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index b89999de4564..ccea0648e106 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -758,6 +758,7 @@ struct btrfs_fs_info {
758 struct btrfs_workers delalloc_workers; 758 struct btrfs_workers delalloc_workers;
759 struct btrfs_workers endio_workers; 759 struct btrfs_workers endio_workers;
760 struct btrfs_workers endio_meta_workers; 760 struct btrfs_workers endio_meta_workers;
761 struct btrfs_workers endio_meta_write_workers;
761 struct btrfs_workers endio_write_workers; 762 struct btrfs_workers endio_write_workers;
762 struct btrfs_workers submit_workers; 763 struct btrfs_workers submit_workers;
763 /* 764 /*
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 541a8279ac71..04f8d7080b17 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -447,8 +447,12 @@ static void end_workqueue_bio(struct bio *bio, int err)
447 end_io_wq->work.flags = 0; 447 end_io_wq->work.flags = 0;
448 448
449 if (bio->bi_rw & (1 << BIO_RW)) { 449 if (bio->bi_rw & (1 << BIO_RW)) {
450 btrfs_queue_worker(&fs_info->endio_write_workers, 450 if (end_io_wq->metadata)
451 &end_io_wq->work); 451 btrfs_queue_worker(&fs_info->endio_meta_write_workers,
452 &end_io_wq->work);
453 else
454 btrfs_queue_worker(&fs_info->endio_write_workers,
455 &end_io_wq->work);
452 } else { 456 } else {
453 if (end_io_wq->metadata) 457 if (end_io_wq->metadata)
454 btrfs_queue_worker(&fs_info->endio_meta_workers, 458 btrfs_queue_worker(&fs_info->endio_meta_workers,
@@ -624,23 +628,24 @@ static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
624static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, 628static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
625 int mirror_num, unsigned long bio_flags) 629 int mirror_num, unsigned long bio_flags)
626{ 630{
627 /* 631 int ret;
628 * kthread helpers are used to submit writes so that checksumming 632
629 * can happen in parallel across all CPUs 633 ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info,
630 */ 634 bio, 1);
635 BUG_ON(ret);
636
631 if (!(rw & (1 << BIO_RW))) { 637 if (!(rw & (1 << BIO_RW))) {
632 int ret;
633 /* 638 /*
634 * called for a read, do the setup so that checksum validation 639 * called for a read, do the setup so that checksum validation
635 * can happen in the async kernel threads 640 * can happen in the async kernel threads
636 */ 641 */
637 ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info,
638 bio, 1);
639 BUG_ON(ret);
640
641 return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, 642 return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
642 mirror_num, 0); 643 mirror_num, 0);
643 } 644 }
645 /*
646 * kthread helpers are used to submit writes so that checksumming
647 * can happen in parallel across all CPUs
648 */
644 return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info, 649 return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
645 inode, rw, bio, mirror_num, 0, 650 inode, rw, bio, mirror_num, 0,
646 __btree_submit_bio_start, 651 __btree_submit_bio_start,
@@ -1350,12 +1355,13 @@ static void end_workqueue_fn(struct btrfs_work *work)
1350 bio = end_io_wq->bio; 1355 bio = end_io_wq->bio;
1351 fs_info = end_io_wq->info; 1356 fs_info = end_io_wq->info;
1352 1357
1353 /* metadata bios are special because the whole tree block must 1358 /* metadata bio reads are special because the whole tree block must
1354 * be checksummed at once. This makes sure the entire block is in 1359 * be checksummed at once. This makes sure the entire block is in
1355 * ram and up to date before trying to verify things. For 1360 * ram and up to date before trying to verify things. For
1356 * blocksize <= pagesize, it is basically a noop 1361 * blocksize <= pagesize, it is basically a noop
1357 */ 1362 */
1358 if (end_io_wq->metadata && !bio_ready_for_csum(bio)) { 1363 if (!(bio->bi_rw & (1 << BIO_RW)) && end_io_wq->metadata &&
1364 !bio_ready_for_csum(bio)) {
1359 btrfs_queue_worker(&fs_info->endio_meta_workers, 1365 btrfs_queue_worker(&fs_info->endio_meta_workers,
1360 &end_io_wq->work); 1366 &end_io_wq->work);
1361 return; 1367 return;
@@ -1668,6 +1674,8 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1668 fs_info->thread_pool_size); 1674 fs_info->thread_pool_size);
1669 btrfs_init_workers(&fs_info->endio_meta_workers, "endio-meta", 1675 btrfs_init_workers(&fs_info->endio_meta_workers, "endio-meta",
1670 fs_info->thread_pool_size); 1676 fs_info->thread_pool_size);
1677 btrfs_init_workers(&fs_info->endio_meta_write_workers,
1678 "endio-meta-write", fs_info->thread_pool_size);
1671 btrfs_init_workers(&fs_info->endio_write_workers, "endio-write", 1679 btrfs_init_workers(&fs_info->endio_write_workers, "endio-write",
1672 fs_info->thread_pool_size); 1680 fs_info->thread_pool_size);
1673 1681
@@ -1677,6 +1685,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1677 */ 1685 */
1678 fs_info->endio_workers.idle_thresh = 4; 1686 fs_info->endio_workers.idle_thresh = 4;
1679 fs_info->endio_write_workers.idle_thresh = 64; 1687 fs_info->endio_write_workers.idle_thresh = 64;
1688 fs_info->endio_meta_write_workers.idle_thresh = 64;
1680 1689
1681 btrfs_start_workers(&fs_info->workers, 1); 1690 btrfs_start_workers(&fs_info->workers, 1);
1682 btrfs_start_workers(&fs_info->submit_workers, 1); 1691 btrfs_start_workers(&fs_info->submit_workers, 1);
@@ -1685,6 +1694,8 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1685 btrfs_start_workers(&fs_info->endio_workers, fs_info->thread_pool_size); 1694 btrfs_start_workers(&fs_info->endio_workers, fs_info->thread_pool_size);
1686 btrfs_start_workers(&fs_info->endio_meta_workers, 1695 btrfs_start_workers(&fs_info->endio_meta_workers,
1687 fs_info->thread_pool_size); 1696 fs_info->thread_pool_size);
1697 btrfs_start_workers(&fs_info->endio_meta_write_workers,
1698 fs_info->thread_pool_size);
1688 btrfs_start_workers(&fs_info->endio_write_workers, 1699 btrfs_start_workers(&fs_info->endio_write_workers,
1689 fs_info->thread_pool_size); 1700 fs_info->thread_pool_size);
1690 1701
@@ -1866,6 +1877,7 @@ fail_sb_buffer:
1866 btrfs_stop_workers(&fs_info->workers); 1877 btrfs_stop_workers(&fs_info->workers);
1867 btrfs_stop_workers(&fs_info->endio_workers); 1878 btrfs_stop_workers(&fs_info->endio_workers);
1868 btrfs_stop_workers(&fs_info->endio_meta_workers); 1879 btrfs_stop_workers(&fs_info->endio_meta_workers);
1880 btrfs_stop_workers(&fs_info->endio_meta_write_workers);
1869 btrfs_stop_workers(&fs_info->endio_write_workers); 1881 btrfs_stop_workers(&fs_info->endio_write_workers);
1870 btrfs_stop_workers(&fs_info->submit_workers); 1882 btrfs_stop_workers(&fs_info->submit_workers);
1871fail_iput: 1883fail_iput:
@@ -2253,6 +2265,7 @@ int close_ctree(struct btrfs_root *root)
2253 btrfs_stop_workers(&fs_info->workers); 2265 btrfs_stop_workers(&fs_info->workers);
2254 btrfs_stop_workers(&fs_info->endio_workers); 2266 btrfs_stop_workers(&fs_info->endio_workers);
2255 btrfs_stop_workers(&fs_info->endio_meta_workers); 2267 btrfs_stop_workers(&fs_info->endio_meta_workers);
2268 btrfs_stop_workers(&fs_info->endio_meta_write_workers);
2256 btrfs_stop_workers(&fs_info->endio_write_workers); 2269 btrfs_stop_workers(&fs_info->endio_write_workers);
2257 btrfs_stop_workers(&fs_info->submit_workers); 2270 btrfs_stop_workers(&fs_info->submit_workers);
2258 2271
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 607f5ff2791c..25ce2d18e5b4 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -477,7 +477,6 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
477 struct extent_state *state; 477 struct extent_state *state;
478 struct extent_state *prealloc = NULL; 478 struct extent_state *prealloc = NULL;
479 struct rb_node *node; 479 struct rb_node *node;
480 unsigned long flags;
481 int err; 480 int err;
482 int set = 0; 481 int set = 0;
483 482
@@ -488,7 +487,7 @@ again:
488 return -ENOMEM; 487 return -ENOMEM;
489 } 488 }
490 489
491 spin_lock_irqsave(&tree->lock, flags); 490 spin_lock(&tree->lock);
492 /* 491 /*
493 * this search will find the extents that end after 492 * this search will find the extents that end after
494 * our range starts 493 * our range starts
@@ -559,7 +558,7 @@ again:
559 goto search_again; 558 goto search_again;
560 559
561out: 560out:
562 spin_unlock_irqrestore(&tree->lock, flags); 561 spin_unlock(&tree->lock);
563 if (prealloc) 562 if (prealloc)
564 free_extent_state(prealloc); 563 free_extent_state(prealloc);
565 564
@@ -568,7 +567,7 @@ out:
568search_again: 567search_again:
569 if (start > end) 568 if (start > end)
570 goto out; 569 goto out;
571 spin_unlock_irqrestore(&tree->lock, flags); 570 spin_unlock(&tree->lock);
572 if (mask & __GFP_WAIT) 571 if (mask & __GFP_WAIT)
573 cond_resched(); 572 cond_resched();
574 goto again; 573 goto again;
@@ -582,9 +581,9 @@ static int wait_on_state(struct extent_io_tree *tree,
582{ 581{
583 DEFINE_WAIT(wait); 582 DEFINE_WAIT(wait);
584 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE); 583 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
585 spin_unlock_irq(&tree->lock); 584 spin_unlock(&tree->lock);
586 schedule(); 585 schedule();
587 spin_lock_irq(&tree->lock); 586 spin_lock(&tree->lock);
588 finish_wait(&state->wq, &wait); 587 finish_wait(&state->wq, &wait);
589 return 0; 588 return 0;
590} 589}
@@ -599,7 +598,7 @@ int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
599 struct extent_state *state; 598 struct extent_state *state;
600 struct rb_node *node; 599 struct rb_node *node;
601 600
602 spin_lock_irq(&tree->lock); 601 spin_lock(&tree->lock);
603again: 602again:
604 while (1) { 603 while (1) {
605 /* 604 /*
@@ -628,13 +627,13 @@ again:
628 break; 627 break;
629 628
630 if (need_resched()) { 629 if (need_resched()) {
631 spin_unlock_irq(&tree->lock); 630 spin_unlock(&tree->lock);
632 cond_resched(); 631 cond_resched();
633 spin_lock_irq(&tree->lock); 632 spin_lock(&tree->lock);
634 } 633 }
635 } 634 }
636out: 635out:
637 spin_unlock_irq(&tree->lock); 636 spin_unlock(&tree->lock);
638 return 0; 637 return 0;
639} 638}
640EXPORT_SYMBOL(wait_extent_bit); 639EXPORT_SYMBOL(wait_extent_bit);
@@ -668,7 +667,6 @@ static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int b
668 struct extent_state *state; 667 struct extent_state *state;
669 struct extent_state *prealloc = NULL; 668 struct extent_state *prealloc = NULL;
670 struct rb_node *node; 669 struct rb_node *node;
671 unsigned long flags;
672 int err = 0; 670 int err = 0;
673 int set; 671 int set;
674 u64 last_start; 672 u64 last_start;
@@ -680,7 +678,7 @@ again:
680 return -ENOMEM; 678 return -ENOMEM;
681 } 679 }
682 680
683 spin_lock_irqsave(&tree->lock, flags); 681 spin_lock(&tree->lock);
684 /* 682 /*
685 * this search will find all the extents that end after 683 * this search will find all the extents that end after
686 * our range starts. 684 * our range starts.
@@ -800,7 +798,7 @@ again:
800 goto search_again; 798 goto search_again;
801 799
802out: 800out:
803 spin_unlock_irqrestore(&tree->lock, flags); 801 spin_unlock(&tree->lock);
804 if (prealloc) 802 if (prealloc)
805 free_extent_state(prealloc); 803 free_extent_state(prealloc);
806 804
@@ -809,7 +807,7 @@ out:
809search_again: 807search_again:
810 if (start > end) 808 if (start > end)
811 goto out; 809 goto out;
812 spin_unlock_irqrestore(&tree->lock, flags); 810 spin_unlock(&tree->lock);
813 if (mask & __GFP_WAIT) 811 if (mask & __GFP_WAIT)
814 cond_resched(); 812 cond_resched();
815 goto again; 813 goto again;
@@ -1021,7 +1019,7 @@ int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1021 struct extent_state *state; 1019 struct extent_state *state;
1022 int ret = 1; 1020 int ret = 1;
1023 1021
1024 spin_lock_irq(&tree->lock); 1022 spin_lock(&tree->lock);
1025 /* 1023 /*
1026 * this search will find all the extents that end after 1024 * this search will find all the extents that end after
1027 * our range starts. 1025 * our range starts.
@@ -1044,7 +1042,7 @@ int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1044 break; 1042 break;
1045 } 1043 }
1046out: 1044out:
1047 spin_unlock_irq(&tree->lock); 1045 spin_unlock(&tree->lock);
1048 return ret; 1046 return ret;
1049} 1047}
1050EXPORT_SYMBOL(find_first_extent_bit); 1048EXPORT_SYMBOL(find_first_extent_bit);
@@ -1097,7 +1095,7 @@ static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
1097 u64 found = 0; 1095 u64 found = 0;
1098 u64 total_bytes = 0; 1096 u64 total_bytes = 0;
1099 1097
1100 spin_lock_irq(&tree->lock); 1098 spin_lock(&tree->lock);
1101 1099
1102 /* 1100 /*
1103 * this search will find all the extents that end after 1101 * this search will find all the extents that end after
@@ -1134,7 +1132,7 @@ static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
1134 break; 1132 break;
1135 } 1133 }
1136out: 1134out:
1137 spin_unlock_irq(&tree->lock); 1135 spin_unlock(&tree->lock);
1138 return found; 1136 return found;
1139} 1137}
1140 1138
@@ -1391,7 +1389,7 @@ u64 count_range_bits(struct extent_io_tree *tree,
1391 return 0; 1389 return 0;
1392 } 1390 }
1393 1391
1394 spin_lock_irq(&tree->lock); 1392 spin_lock(&tree->lock);
1395 if (cur_start == 0 && bits == EXTENT_DIRTY) { 1393 if (cur_start == 0 && bits == EXTENT_DIRTY) {
1396 total_bytes = tree->dirty_bytes; 1394 total_bytes = tree->dirty_bytes;
1397 goto out; 1395 goto out;
@@ -1424,7 +1422,7 @@ u64 count_range_bits(struct extent_io_tree *tree,
1424 break; 1422 break;
1425 } 1423 }
1426out: 1424out:
1427 spin_unlock_irq(&tree->lock); 1425 spin_unlock(&tree->lock);
1428 return total_bytes; 1426 return total_bytes;
1429} 1427}
1430 1428
@@ -1501,7 +1499,7 @@ int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1501 struct extent_state *state; 1499 struct extent_state *state;
1502 int ret = 0; 1500 int ret = 0;
1503 1501
1504 spin_lock_irq(&tree->lock); 1502 spin_lock(&tree->lock);
1505 /* 1503 /*
1506 * this search will find all the extents that end after 1504 * this search will find all the extents that end after
1507 * our range starts. 1505 * our range starts.
@@ -1518,7 +1516,7 @@ int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1518 } 1516 }
1519 state->private = private; 1517 state->private = private;
1520out: 1518out:
1521 spin_unlock_irq(&tree->lock); 1519 spin_unlock(&tree->lock);
1522 return ret; 1520 return ret;
1523} 1521}
1524 1522
@@ -1528,7 +1526,7 @@ int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1528 struct extent_state *state; 1526 struct extent_state *state;
1529 int ret = 0; 1527 int ret = 0;
1530 1528
1531 spin_lock_irq(&tree->lock); 1529 spin_lock(&tree->lock);
1532 /* 1530 /*
1533 * this search will find all the extents that end after 1531 * this search will find all the extents that end after
1534 * our range starts. 1532 * our range starts.
@@ -1545,7 +1543,7 @@ int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1545 } 1543 }
1546 *private = state->private; 1544 *private = state->private;
1547out: 1545out:
1548 spin_unlock_irq(&tree->lock); 1546 spin_unlock(&tree->lock);
1549 return ret; 1547 return ret;
1550} 1548}
1551 1549
@@ -1561,9 +1559,8 @@ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1561 struct extent_state *state = NULL; 1559 struct extent_state *state = NULL;
1562 struct rb_node *node; 1560 struct rb_node *node;
1563 int bitset = 0; 1561 int bitset = 0;
1564 unsigned long flags;
1565 1562
1566 spin_lock_irqsave(&tree->lock, flags); 1563 spin_lock(&tree->lock);
1567 node = tree_search(tree, start); 1564 node = tree_search(tree, start);
1568 while (node && start <= end) { 1565 while (node && start <= end) {
1569 state = rb_entry(node, struct extent_state, rb_node); 1566 state = rb_entry(node, struct extent_state, rb_node);
@@ -1594,7 +1591,7 @@ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1594 break; 1591 break;
1595 } 1592 }
1596 } 1593 }
1597 spin_unlock_irqrestore(&tree->lock, flags); 1594 spin_unlock(&tree->lock);
1598 return bitset; 1595 return bitset;
1599} 1596}
1600EXPORT_SYMBOL(test_range_bit); 1597EXPORT_SYMBOL(test_range_bit);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 0577e77e661d..068bad463387 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1282,8 +1282,8 @@ static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
1282} 1282}
1283 1283
1284/* 1284/*
1285 * extent_io.c submission hook. This does the right thing for csum calculation on write, 1285 * extent_io.c submission hook. This does the right thing for csum calculation
1286 * or reading the csums from the tree before a read 1286 * on write, or reading the csums from the tree before a read
1287 */ 1287 */
1288static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, 1288static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1289 int mirror_num, unsigned long bio_flags) 1289 int mirror_num, unsigned long bio_flags)
@@ -1292,11 +1292,11 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1292 int ret = 0; 1292 int ret = 0;
1293 int skip_sum; 1293 int skip_sum;
1294 1294
1295 skip_sum = btrfs_test_flag(inode, NODATASUM);
1296
1295 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); 1297 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
1296 BUG_ON(ret); 1298 BUG_ON(ret);
1297 1299
1298 skip_sum = btrfs_test_flag(inode, NODATASUM);
1299
1300 if (!(rw & (1 << BIO_RW))) { 1300 if (!(rw & (1 << BIO_RW))) {
1301 if (bio_flags & EXTENT_BIO_COMPRESSED) { 1301 if (bio_flags & EXTENT_BIO_COMPRESSED) {
1302 return btrfs_submit_compressed_read(inode, bio, 1302 return btrfs_submit_compressed_read(inode, bio,
@@ -1648,13 +1648,13 @@ static int btrfs_io_failed_hook(struct bio *failed_bio,
1648 failrec->logical, failrec->len); 1648 failrec->logical, failrec->len);
1649 failrec->last_mirror++; 1649 failrec->last_mirror++;
1650 if (!state) { 1650 if (!state) {
1651 spin_lock_irq(&BTRFS_I(inode)->io_tree.lock); 1651 spin_lock(&BTRFS_I(inode)->io_tree.lock);
1652 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree, 1652 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
1653 failrec->start, 1653 failrec->start,
1654 EXTENT_LOCKED); 1654 EXTENT_LOCKED);
1655 if (state && state->start != failrec->start) 1655 if (state && state->start != failrec->start)
1656 state = NULL; 1656 state = NULL;
1657 spin_unlock_irq(&BTRFS_I(inode)->io_tree.lock); 1657 spin_unlock(&BTRFS_I(inode)->io_tree.lock);
1658 } 1658 }
1659 if (!state || failrec->last_mirror > num_copies) { 1659 if (!state || failrec->last_mirror > num_copies) {
1660 set_state_private(failure_tree, failrec->start, 0); 1660 set_state_private(failure_tree, failrec->start, 0);