diff options
| author | Chris Mason <chris.mason@oracle.com> | 2011-11-06 03:05:08 -0500 |
|---|---|---|
| committer | Chris Mason <chris.mason@oracle.com> | 2011-11-06 03:05:08 -0500 |
| commit | 531f4b1ae5e0fc8c9b3f03838218e5ea178f80d3 (patch) | |
| tree | 63efdfe9b192243fefb76be3921b9a2aaa26291e | |
| parent | c06a0e120a4e381a1c291c1fce3c6155c5791cae (diff) | |
| parent | 7a26285eea8eb92e0088db011571d887d4551b0f (diff) | |
Merge branch 'for-chris' of git://github.com/sensille/linux into integration
Conflicts:
fs/btrfs/ctree.h
Signed-off-by: Chris Mason <chris.mason@oracle.com>
| -rw-r--r-- | fs/btrfs/Makefile | 3 | ||||
| -rw-r--r-- | fs/btrfs/ctree.h | 21 | ||||
| -rw-r--r-- | fs/btrfs/disk-io.c | 84 | ||||
| -rw-r--r-- | fs/btrfs/disk-io.h | 2 | ||||
| -rw-r--r-- | fs/btrfs/extent_io.c | 9 | ||||
| -rw-r--r-- | fs/btrfs/extent_io.h | 4 | ||||
| -rw-r--r-- | fs/btrfs/reada.c | 949 | ||||
| -rw-r--r-- | fs/btrfs/scrub.c | 112 | ||||
| -rw-r--r-- | fs/btrfs/volumes.c | 8 | ||||
| -rw-r--r-- | fs/btrfs/volumes.h | 8 |
10 files changed, 1130 insertions, 70 deletions
diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile index 40e6ac08c21f..bdd6fb238ce1 100644 --- a/fs/btrfs/Makefile +++ b/fs/btrfs/Makefile | |||
| @@ -7,6 +7,7 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \ | |||
| 7 | extent_map.o sysfs.o struct-funcs.o xattr.o ordered-data.o \ | 7 | extent_map.o sysfs.o struct-funcs.o xattr.o ordered-data.o \ |
| 8 | extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \ | 8 | extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \ |
| 9 | export.o tree-log.o free-space-cache.o zlib.o lzo.o \ | 9 | export.o tree-log.o free-space-cache.o zlib.o lzo.o \ |
| 10 | compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o | 10 | compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \ |
| 11 | reada.o | ||
| 11 | 12 | ||
| 12 | btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o | 13 | btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o |
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 6bb34fc1ff22..b9ba59ff9292 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
| @@ -1074,6 +1074,7 @@ struct btrfs_fs_info { | |||
| 1074 | struct btrfs_workers endio_freespace_worker; | 1074 | struct btrfs_workers endio_freespace_worker; |
| 1075 | struct btrfs_workers submit_workers; | 1075 | struct btrfs_workers submit_workers; |
| 1076 | struct btrfs_workers caching_workers; | 1076 | struct btrfs_workers caching_workers; |
| 1077 | struct btrfs_workers readahead_workers; | ||
| 1077 | 1078 | ||
| 1078 | /* | 1079 | /* |
| 1079 | * fixup workers take dirty pages that didn't properly go through | 1080 | * fixup workers take dirty pages that didn't properly go through |
| @@ -1158,6 +1159,10 @@ struct btrfs_fs_info { | |||
| 1158 | 1159 | ||
| 1159 | struct btrfs_delayed_root *delayed_root; | 1160 | struct btrfs_delayed_root *delayed_root; |
| 1160 | 1161 | ||
| 1162 | /* readahead tree */ | ||
| 1163 | spinlock_t reada_lock; | ||
| 1164 | struct radix_tree_root reada_tree; | ||
| 1165 | |||
| 1161 | /* next backup root to be overwritten */ | 1166 | /* next backup root to be overwritten */ |
| 1162 | int backup_root_index; | 1167 | int backup_root_index; |
| 1163 | }; | 1168 | }; |
| @@ -2812,4 +2817,20 @@ int btrfs_scrub_cancel_devid(struct btrfs_root *root, u64 devid); | |||
| 2812 | int btrfs_scrub_progress(struct btrfs_root *root, u64 devid, | 2817 | int btrfs_scrub_progress(struct btrfs_root *root, u64 devid, |
| 2813 | struct btrfs_scrub_progress *progress); | 2818 | struct btrfs_scrub_progress *progress); |
| 2814 | 2819 | ||
| 2820 | /* reada.c */ | ||
| 2821 | struct reada_control { | ||
| 2822 | struct btrfs_root *root; /* tree to prefetch */ | ||
| 2823 | struct btrfs_key key_start; | ||
| 2824 | struct btrfs_key key_end; /* exclusive */ | ||
| 2825 | atomic_t elems; | ||
| 2826 | struct kref refcnt; | ||
| 2827 | wait_queue_head_t wait; | ||
| 2828 | }; | ||
| 2829 | struct reada_control *btrfs_reada_add(struct btrfs_root *root, | ||
| 2830 | struct btrfs_key *start, struct btrfs_key *end); | ||
| 2831 | int btrfs_reada_wait(void *handle); | ||
| 2832 | void btrfs_reada_detach(void *handle); | ||
| 2833 | int btree_readahead_hook(struct btrfs_root *root, struct extent_buffer *eb, | ||
| 2834 | u64 start, int err); | ||
| 2835 | |||
| 2815 | #endif | 2836 | #endif |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 23b6776477b7..cedfbfb278eb 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
| @@ -366,7 +366,8 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root, | |||
| 366 | clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags); | 366 | clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags); |
| 367 | io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree; | 367 | io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree; |
| 368 | while (1) { | 368 | while (1) { |
| 369 | ret = read_extent_buffer_pages(io_tree, eb, start, 1, | 369 | ret = read_extent_buffer_pages(io_tree, eb, start, |
| 370 | WAIT_COMPLETE, | ||
| 370 | btree_get_extent, mirror_num); | 371 | btree_get_extent, mirror_num); |
| 371 | if (!ret && | 372 | if (!ret && |
| 372 | !verify_parent_transid(io_tree, eb, parent_transid)) | 373 | !verify_parent_transid(io_tree, eb, parent_transid)) |
| @@ -607,11 +608,47 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end, | |||
| 607 | end = min_t(u64, eb->len, PAGE_CACHE_SIZE); | 608 | end = min_t(u64, eb->len, PAGE_CACHE_SIZE); |
| 608 | end = eb->start + end - 1; | 609 | end = eb->start + end - 1; |
| 609 | err: | 610 | err: |
| 611 | if (test_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) { | ||
| 612 | clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags); | ||
| 613 | btree_readahead_hook(root, eb, eb->start, ret); | ||
| 614 | } | ||
| 615 | |||
| 610 | free_extent_buffer(eb); | 616 | free_extent_buffer(eb); |
| 611 | out: | 617 | out: |
| 612 | return ret; | 618 | return ret; |
| 613 | } | 619 | } |
| 614 | 620 | ||
| 621 | static int btree_io_failed_hook(struct bio *failed_bio, | ||
| 622 | struct page *page, u64 start, u64 end, | ||
| 623 | struct extent_state *state) | ||
| 624 | { | ||
| 625 | struct extent_io_tree *tree; | ||
| 626 | unsigned long len; | ||
| 627 | struct extent_buffer *eb; | ||
| 628 | struct btrfs_root *root = BTRFS_I(page->mapping->host)->root; | ||
| 629 | |||
| 630 | tree = &BTRFS_I(page->mapping->host)->io_tree; | ||
| 631 | if (page->private == EXTENT_PAGE_PRIVATE) | ||
| 632 | goto out; | ||
| 633 | if (!page->private) | ||
| 634 | goto out; | ||
| 635 | |||
| 636 | len = page->private >> 2; | ||
| 637 | WARN_ON(len == 0); | ||
| 638 | |||
| 639 | eb = alloc_extent_buffer(tree, start, len, page); | ||
| 640 | if (eb == NULL) | ||
| 641 | goto out; | ||
| 642 | |||
| 643 | if (test_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) { | ||
| 644 | clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags); | ||
| 645 | btree_readahead_hook(root, eb, eb->start, -EIO); | ||
| 646 | } | ||
| 647 | |||
| 648 | out: | ||
| 649 | return -EIO; /* we fixed nothing */ | ||
| 650 | } | ||
| 651 | |||
| 615 | static void end_workqueue_bio(struct bio *bio, int err) | 652 | static void end_workqueue_bio(struct bio *bio, int err) |
| 616 | { | 653 | { |
| 617 | struct end_io_wq *end_io_wq = bio->bi_private; | 654 | struct end_io_wq *end_io_wq = bio->bi_private; |
| @@ -973,11 +1010,43 @@ int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize, | |||
| 973 | if (!buf) | 1010 | if (!buf) |
| 974 | return 0; | 1011 | return 0; |
| 975 | read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree, | 1012 | read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree, |
| 976 | buf, 0, 0, btree_get_extent, 0); | 1013 | buf, 0, WAIT_NONE, btree_get_extent, 0); |
| 977 | free_extent_buffer(buf); | 1014 | free_extent_buffer(buf); |
| 978 | return ret; | 1015 | return ret; |
| 979 | } | 1016 | } |
| 980 | 1017 | ||
| 1018 | int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize, | ||
| 1019 | int mirror_num, struct extent_buffer **eb) | ||
| 1020 | { | ||
| 1021 | struct extent_buffer *buf = NULL; | ||
| 1022 | struct inode *btree_inode = root->fs_info->btree_inode; | ||
| 1023 | struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree; | ||
| 1024 | int ret; | ||
| 1025 | |||
| 1026 | buf = btrfs_find_create_tree_block(root, bytenr, blocksize); | ||
| 1027 | if (!buf) | ||
| 1028 | return 0; | ||
| 1029 | |||
| 1030 | set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags); | ||
| 1031 | |||
| 1032 | ret = read_extent_buffer_pages(io_tree, buf, 0, WAIT_PAGE_LOCK, | ||
| 1033 | btree_get_extent, mirror_num); | ||
| 1034 | if (ret) { | ||
| 1035 | free_extent_buffer(buf); | ||
| 1036 | return ret; | ||
| 1037 | } | ||
| 1038 | |||
| 1039 | if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) { | ||
| 1040 | free_extent_buffer(buf); | ||
| 1041 | return -EIO; | ||
| 1042 | } else if (extent_buffer_uptodate(io_tree, buf, NULL)) { | ||
| 1043 | *eb = buf; | ||
| 1044 | } else { | ||
| 1045 | free_extent_buffer(buf); | ||
| 1046 | } | ||
| 1047 | return 0; | ||
| 1048 | } | ||
| 1049 | |||
| 981 | struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root, | 1050 | struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root, |
| 982 | u64 bytenr, u32 blocksize) | 1051 | u64 bytenr, u32 blocksize) |
| 983 | { | 1052 | { |
| @@ -1904,6 +1973,10 @@ struct btrfs_root *open_ctree(struct super_block *sb, | |||
| 1904 | fs_info->trans_no_join = 0; | 1973 | fs_info->trans_no_join = 0; |
| 1905 | fs_info->free_chunk_space = 0; | 1974 | fs_info->free_chunk_space = 0; |
| 1906 | 1975 | ||
| 1976 | /* readahead state */ | ||
| 1977 | INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT); | ||
| 1978 | spin_lock_init(&fs_info->reada_lock); | ||
| 1979 | |||
| 1907 | fs_info->thread_pool_size = min_t(unsigned long, | 1980 | fs_info->thread_pool_size = min_t(unsigned long, |
| 1908 | num_online_cpus() + 2, 8); | 1981 | num_online_cpus() + 2, 8); |
| 1909 | 1982 | ||
| @@ -2103,6 +2176,9 @@ struct btrfs_root *open_ctree(struct super_block *sb, | |||
| 2103 | btrfs_init_workers(&fs_info->delayed_workers, "delayed-meta", | 2176 | btrfs_init_workers(&fs_info->delayed_workers, "delayed-meta", |
| 2104 | fs_info->thread_pool_size, | 2177 | fs_info->thread_pool_size, |
| 2105 | &fs_info->generic_worker); | 2178 | &fs_info->generic_worker); |
| 2179 | btrfs_init_workers(&fs_info->readahead_workers, "readahead", | ||
| 2180 | fs_info->thread_pool_size, | ||
| 2181 | &fs_info->generic_worker); | ||
| 2106 | 2182 | ||
| 2107 | /* | 2183 | /* |
| 2108 | * endios are largely parallel and should have a very | 2184 | * endios are largely parallel and should have a very |
| @@ -2113,6 +2189,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, | |||
| 2113 | 2189 | ||
| 2114 | fs_info->endio_write_workers.idle_thresh = 2; | 2190 | fs_info->endio_write_workers.idle_thresh = 2; |
| 2115 | fs_info->endio_meta_write_workers.idle_thresh = 2; | 2191 | fs_info->endio_meta_write_workers.idle_thresh = 2; |
| 2192 | fs_info->readahead_workers.idle_thresh = 2; | ||
| 2116 | 2193 | ||
| 2117 | btrfs_start_workers(&fs_info->workers, 1); | 2194 | btrfs_start_workers(&fs_info->workers, 1); |
| 2118 | btrfs_start_workers(&fs_info->generic_worker, 1); | 2195 | btrfs_start_workers(&fs_info->generic_worker, 1); |
| @@ -2126,6 +2203,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, | |||
| 2126 | btrfs_start_workers(&fs_info->endio_freespace_worker, 1); | 2203 | btrfs_start_workers(&fs_info->endio_freespace_worker, 1); |
| 2127 | btrfs_start_workers(&fs_info->delayed_workers, 1); | 2204 | btrfs_start_workers(&fs_info->delayed_workers, 1); |
| 2128 | btrfs_start_workers(&fs_info->caching_workers, 1); | 2205 | btrfs_start_workers(&fs_info->caching_workers, 1); |
| 2206 | btrfs_start_workers(&fs_info->readahead_workers, 1); | ||
| 2129 | 2207 | ||
| 2130 | fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super); | 2208 | fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super); |
| 2131 | fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages, | 2209 | fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages, |
| @@ -2855,6 +2933,7 @@ int close_ctree(struct btrfs_root *root) | |||
| 2855 | btrfs_stop_workers(&fs_info->submit_workers); | 2933 | btrfs_stop_workers(&fs_info->submit_workers); |
| 2856 | btrfs_stop_workers(&fs_info->delayed_workers); | 2934 | btrfs_stop_workers(&fs_info->delayed_workers); |
| 2857 | btrfs_stop_workers(&fs_info->caching_workers); | 2935 | btrfs_stop_workers(&fs_info->caching_workers); |
| 2936 | btrfs_stop_workers(&fs_info->readahead_workers); | ||
| 2858 | 2937 | ||
| 2859 | btrfs_close_devices(fs_info->fs_devices); | 2938 | btrfs_close_devices(fs_info->fs_devices); |
| 2860 | btrfs_mapping_tree_free(&fs_info->mapping_tree); | 2939 | btrfs_mapping_tree_free(&fs_info->mapping_tree); |
| @@ -3363,6 +3442,7 @@ static int btrfs_cleanup_transaction(struct btrfs_root *root) | |||
| 3363 | static struct extent_io_ops btree_extent_io_ops = { | 3442 | static struct extent_io_ops btree_extent_io_ops = { |
| 3364 | .write_cache_pages_lock_hook = btree_lock_page_hook, | 3443 | .write_cache_pages_lock_hook = btree_lock_page_hook, |
| 3365 | .readpage_end_io_hook = btree_readpage_end_io_hook, | 3444 | .readpage_end_io_hook = btree_readpage_end_io_hook, |
| 3445 | .readpage_io_failed_hook = btree_io_failed_hook, | ||
| 3366 | .submit_bio_hook = btree_submit_bio_hook, | 3446 | .submit_bio_hook = btree_submit_bio_hook, |
| 3367 | /* note we're sharing with inode.c for the merge bio hook */ | 3447 | /* note we're sharing with inode.c for the merge bio hook */ |
| 3368 | .merge_bio_hook = btrfs_merge_bio_hook, | 3448 | .merge_bio_hook = btrfs_merge_bio_hook, |
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h index e678539c8519..c99d0a8f13fa 100644 --- a/fs/btrfs/disk-io.h +++ b/fs/btrfs/disk-io.h | |||
| @@ -40,6 +40,8 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr, | |||
| 40 | u32 blocksize, u64 parent_transid); | 40 | u32 blocksize, u64 parent_transid); |
| 41 | int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize, | 41 | int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize, |
| 42 | u64 parent_transid); | 42 | u64 parent_transid); |
| 43 | int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize, | ||
| 44 | int mirror_num, struct extent_buffer **eb); | ||
| 43 | struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root, | 45 | struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root, |
| 44 | u64 bytenr, u32 blocksize); | 46 | u64 bytenr, u32 blocksize); |
| 45 | int clean_tree_block(struct btrfs_trans_handle *trans, | 47 | int clean_tree_block(struct btrfs_trans_handle *trans, |
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index cc3c58970d4e..c12705682c65 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
| @@ -1919,7 +1919,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err) | |||
| 1919 | if (!uptodate && tree->ops && | 1919 | if (!uptodate && tree->ops && |
| 1920 | tree->ops->readpage_io_failed_hook) { | 1920 | tree->ops->readpage_io_failed_hook) { |
| 1921 | ret = tree->ops->readpage_io_failed_hook(bio, page, | 1921 | ret = tree->ops->readpage_io_failed_hook(bio, page, |
| 1922 | start, end, NULL); | 1922 | start, end, state); |
| 1923 | if (ret == 0) { | 1923 | if (ret == 0) { |
| 1924 | uptodate = | 1924 | uptodate = |
| 1925 | test_bit(BIO_UPTODATE, &bio->bi_flags); | 1925 | test_bit(BIO_UPTODATE, &bio->bi_flags); |
| @@ -3551,8 +3551,7 @@ int extent_buffer_uptodate(struct extent_io_tree *tree, | |||
| 3551 | } | 3551 | } |
| 3552 | 3552 | ||
| 3553 | int read_extent_buffer_pages(struct extent_io_tree *tree, | 3553 | int read_extent_buffer_pages(struct extent_io_tree *tree, |
| 3554 | struct extent_buffer *eb, | 3554 | struct extent_buffer *eb, u64 start, int wait, |
| 3555 | u64 start, int wait, | ||
| 3556 | get_extent_t *get_extent, int mirror_num) | 3555 | get_extent_t *get_extent, int mirror_num) |
| 3557 | { | 3556 | { |
| 3558 | unsigned long i; | 3557 | unsigned long i; |
| @@ -3588,7 +3587,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, | |||
| 3588 | num_pages = num_extent_pages(eb->start, eb->len); | 3587 | num_pages = num_extent_pages(eb->start, eb->len); |
| 3589 | for (i = start_i; i < num_pages; i++) { | 3588 | for (i = start_i; i < num_pages; i++) { |
| 3590 | page = extent_buffer_page(eb, i); | 3589 | page = extent_buffer_page(eb, i); |
| 3591 | if (!wait) { | 3590 | if (wait == WAIT_NONE) { |
| 3592 | if (!trylock_page(page)) | 3591 | if (!trylock_page(page)) |
| 3593 | goto unlock_exit; | 3592 | goto unlock_exit; |
| 3594 | } else { | 3593 | } else { |
| @@ -3632,7 +3631,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, | |||
| 3632 | if (bio) | 3631 | if (bio) |
| 3633 | submit_one_bio(READ, bio, mirror_num, bio_flags); | 3632 | submit_one_bio(READ, bio, mirror_num, bio_flags); |
| 3634 | 3633 | ||
| 3635 | if (ret || !wait) | 3634 | if (ret || wait != WAIT_COMPLETE) |
| 3636 | return ret; | 3635 | return ret; |
| 3637 | 3636 | ||
| 3638 | for (i = start_i; i < num_pages; i++) { | 3637 | for (i = start_i; i < num_pages; i++) { |
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index cbd4824a7c94..697570eed9e8 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h | |||
| @@ -33,6 +33,7 @@ | |||
| 33 | #define EXTENT_BUFFER_BLOCKING 1 | 33 | #define EXTENT_BUFFER_BLOCKING 1 |
| 34 | #define EXTENT_BUFFER_DIRTY 2 | 34 | #define EXTENT_BUFFER_DIRTY 2 |
| 35 | #define EXTENT_BUFFER_CORRUPT 3 | 35 | #define EXTENT_BUFFER_CORRUPT 3 |
| 36 | #define EXTENT_BUFFER_READAHEAD 4 /* this got triggered by readahead */ | ||
| 36 | 37 | ||
| 37 | /* these are flags for extent_clear_unlock_delalloc */ | 38 | /* these are flags for extent_clear_unlock_delalloc */ |
| 38 | #define EXTENT_CLEAR_UNLOCK_PAGE 0x1 | 39 | #define EXTENT_CLEAR_UNLOCK_PAGE 0x1 |
| @@ -252,6 +253,9 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, | |||
| 252 | struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree, | 253 | struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree, |
| 253 | u64 start, unsigned long len); | 254 | u64 start, unsigned long len); |
| 254 | void free_extent_buffer(struct extent_buffer *eb); | 255 | void free_extent_buffer(struct extent_buffer *eb); |
| 256 | #define WAIT_NONE 0 | ||
| 257 | #define WAIT_COMPLETE 1 | ||
| 258 | #define WAIT_PAGE_LOCK 2 | ||
| 255 | int read_extent_buffer_pages(struct extent_io_tree *tree, | 259 | int read_extent_buffer_pages(struct extent_io_tree *tree, |
| 256 | struct extent_buffer *eb, u64 start, int wait, | 260 | struct extent_buffer *eb, u64 start, int wait, |
| 257 | get_extent_t *get_extent, int mirror_num); | 261 | get_extent_t *get_extent, int mirror_num); |
diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c new file mode 100644 index 000000000000..2b701d082227 --- /dev/null +++ b/fs/btrfs/reada.c | |||
| @@ -0,0 +1,949 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2011 STRATO. All rights reserved. | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or | ||
| 5 | * modify it under the terms of the GNU General Public | ||
| 6 | * License v2 as published by the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope that it will be useful, | ||
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 11 | * General Public License for more details. | ||
| 12 | * | ||
| 13 | * You should have received a copy of the GNU General Public | ||
| 14 | * License along with this program; if not, write to the | ||
| 15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
| 16 | * Boston, MA 021110-1307, USA. | ||
| 17 | */ | ||
| 18 | |||
| 19 | #include <linux/sched.h> | ||
| 20 | #include <linux/pagemap.h> | ||
| 21 | #include <linux/writeback.h> | ||
| 22 | #include <linux/blkdev.h> | ||
| 23 | #include <linux/rbtree.h> | ||
| 24 | #include <linux/slab.h> | ||
| 25 | #include <linux/workqueue.h> | ||
| 26 | #include "ctree.h" | ||
| 27 | #include "volumes.h" | ||
| 28 | #include "disk-io.h" | ||
| 29 | #include "transaction.h" | ||
| 30 | |||
| 31 | #undef DEBUG | ||
| 32 | |||
| 33 | /* | ||
| 34 | * This is the implementation for the generic read ahead framework. | ||
| 35 | * | ||
| 36 | * To trigger a readahead, btrfs_reada_add must be called. It will start | ||
| 37 | * a read ahead for the given range [start, end) on tree root. The returned | ||
| 38 | * handle can either be used to wait on the readahead to finish | ||
| 39 | * (btrfs_reada_wait), or to send it to the background (btrfs_reada_detach). | ||
| 40 | * | ||
| 41 | * The read ahead works as follows: | ||
| 42 | * On btrfs_reada_add, the root of the tree is inserted into a radix_tree. | ||
| 43 | * reada_start_machine will then search for extents to prefetch and trigger | ||
| 44 | * some reads. When a read finishes for a node, all contained node/leaf | ||
| 45 | * pointers that lie in the given range will also be enqueued. The reads will | ||
| 46 | * be triggered in sequential order, thus giving a big win over a naive | ||
| 47 | * enumeration. It will also make use of multi-device layouts. Each disk | ||
| 48 | * will have its on read pointer and all disks will by utilized in parallel. | ||
| 49 | * Also will no two disks read both sides of a mirror simultaneously, as this | ||
| 50 | * would waste seeking capacity. Instead both disks will read different parts | ||
| 51 | * of the filesystem. | ||
| 52 | * Any number of readaheads can be started in parallel. The read order will be | ||
| 53 | * determined globally, i.e. 2 parallel readaheads will normally finish faster | ||
| 54 | * than the 2 started one after another. | ||
| 55 | */ | ||
| 56 | |||
| 57 | #define MAX_MIRRORS 2 | ||
| 58 | #define MAX_IN_FLIGHT 6 | ||
| 59 | |||
| 60 | struct reada_extctl { | ||
| 61 | struct list_head list; | ||
| 62 | struct reada_control *rc; | ||
| 63 | u64 generation; | ||
| 64 | }; | ||
| 65 | |||
| 66 | struct reada_extent { | ||
| 67 | u64 logical; | ||
| 68 | struct btrfs_key top; | ||
| 69 | u32 blocksize; | ||
| 70 | int err; | ||
| 71 | struct list_head extctl; | ||
| 72 | struct kref refcnt; | ||
| 73 | spinlock_t lock; | ||
| 74 | struct reada_zone *zones[MAX_MIRRORS]; | ||
| 75 | int nzones; | ||
| 76 | struct btrfs_device *scheduled_for; | ||
| 77 | }; | ||
| 78 | |||
| 79 | struct reada_zone { | ||
| 80 | u64 start; | ||
| 81 | u64 end; | ||
| 82 | u64 elems; | ||
| 83 | struct list_head list; | ||
| 84 | spinlock_t lock; | ||
| 85 | int locked; | ||
| 86 | struct btrfs_device *device; | ||
| 87 | struct btrfs_device *devs[MAX_MIRRORS]; /* full list, incl self */ | ||
| 88 | int ndevs; | ||
| 89 | struct kref refcnt; | ||
| 90 | }; | ||
| 91 | |||
| 92 | struct reada_machine_work { | ||
| 93 | struct btrfs_work work; | ||
| 94 | struct btrfs_fs_info *fs_info; | ||
| 95 | }; | ||
| 96 | |||
| 97 | static void reada_extent_put(struct btrfs_fs_info *, struct reada_extent *); | ||
| 98 | static void reada_control_release(struct kref *kref); | ||
| 99 | static void reada_zone_release(struct kref *kref); | ||
| 100 | static void reada_start_machine(struct btrfs_fs_info *fs_info); | ||
| 101 | static void __reada_start_machine(struct btrfs_fs_info *fs_info); | ||
| 102 | |||
| 103 | static int reada_add_block(struct reada_control *rc, u64 logical, | ||
| 104 | struct btrfs_key *top, int level, u64 generation); | ||
| 105 | |||
| 106 | /* recurses */ | ||
| 107 | /* in case of err, eb might be NULL */ | ||
| 108 | static int __readahead_hook(struct btrfs_root *root, struct extent_buffer *eb, | ||
| 109 | u64 start, int err) | ||
| 110 | { | ||
| 111 | int level = 0; | ||
| 112 | int nritems; | ||
| 113 | int i; | ||
| 114 | u64 bytenr; | ||
| 115 | u64 generation; | ||
| 116 | struct reada_extent *re; | ||
| 117 | struct btrfs_fs_info *fs_info = root->fs_info; | ||
| 118 | struct list_head list; | ||
| 119 | unsigned long index = start >> PAGE_CACHE_SHIFT; | ||
| 120 | struct btrfs_device *for_dev; | ||
| 121 | |||
| 122 | if (eb) | ||
| 123 | level = btrfs_header_level(eb); | ||
| 124 | |||
| 125 | /* find extent */ | ||
| 126 | spin_lock(&fs_info->reada_lock); | ||
| 127 | re = radix_tree_lookup(&fs_info->reada_tree, index); | ||
| 128 | if (re) | ||
| 129 | kref_get(&re->refcnt); | ||
| 130 | spin_unlock(&fs_info->reada_lock); | ||
| 131 | |||
| 132 | if (!re) | ||
| 133 | return -1; | ||
| 134 | |||
| 135 | spin_lock(&re->lock); | ||
| 136 | /* | ||
| 137 | * just take the full list from the extent. afterwards we | ||
| 138 | * don't need the lock anymore | ||
| 139 | */ | ||
| 140 | list_replace_init(&re->extctl, &list); | ||
| 141 | for_dev = re->scheduled_for; | ||
| 142 | re->scheduled_for = NULL; | ||
| 143 | spin_unlock(&re->lock); | ||
| 144 | |||
| 145 | if (err == 0) { | ||
| 146 | nritems = level ? btrfs_header_nritems(eb) : 0; | ||
| 147 | generation = btrfs_header_generation(eb); | ||
| 148 | /* | ||
| 149 | * FIXME: currently we just set nritems to 0 if this is a leaf, | ||
| 150 | * effectively ignoring the content. In a next step we could | ||
| 151 | * trigger more readahead depending from the content, e.g. | ||
| 152 | * fetch the checksums for the extents in the leaf. | ||
| 153 | */ | ||
| 154 | } else { | ||
| 155 | /* | ||
| 156 | * this is the error case, the extent buffer has not been | ||
| 157 | * read correctly. We won't access anything from it and | ||
| 158 | * just cleanup our data structures. Effectively this will | ||
| 159 | * cut the branch below this node from read ahead. | ||
| 160 | */ | ||
| 161 | nritems = 0; | ||
| 162 | generation = 0; | ||
| 163 | } | ||
| 164 | |||
| 165 | for (i = 0; i < nritems; i++) { | ||
| 166 | struct reada_extctl *rec; | ||
| 167 | u64 n_gen; | ||
| 168 | struct btrfs_key key; | ||
| 169 | struct btrfs_key next_key; | ||
| 170 | |||
| 171 | btrfs_node_key_to_cpu(eb, &key, i); | ||
| 172 | if (i + 1 < nritems) | ||
| 173 | btrfs_node_key_to_cpu(eb, &next_key, i + 1); | ||
| 174 | else | ||
| 175 | next_key = re->top; | ||
| 176 | bytenr = btrfs_node_blockptr(eb, i); | ||
| 177 | n_gen = btrfs_node_ptr_generation(eb, i); | ||
| 178 | |||
| 179 | list_for_each_entry(rec, &list, list) { | ||
| 180 | struct reada_control *rc = rec->rc; | ||
| 181 | |||
| 182 | /* | ||
| 183 | * if the generation doesn't match, just ignore this | ||
| 184 | * extctl. This will probably cut off a branch from | ||
| 185 | * prefetch. Alternatively one could start a new (sub-) | ||
| 186 | * prefetch for this branch, starting again from root. | ||
| 187 | * FIXME: move the generation check out of this loop | ||
| 188 | */ | ||
| 189 | #ifdef DEBUG | ||
| 190 | if (rec->generation != generation) { | ||
| 191 | printk(KERN_DEBUG "generation mismatch for " | ||
| 192 | "(%llu,%d,%llu) %llu != %llu\n", | ||
| 193 | key.objectid, key.type, key.offset, | ||
| 194 | rec->generation, generation); | ||
| 195 | } | ||
| 196 | #endif | ||
| 197 | if (rec->generation == generation && | ||
| 198 | btrfs_comp_cpu_keys(&key, &rc->key_end) < 0 && | ||
| 199 | btrfs_comp_cpu_keys(&next_key, &rc->key_start) > 0) | ||
| 200 | reada_add_block(rc, bytenr, &next_key, | ||
| 201 | level - 1, n_gen); | ||
| 202 | } | ||
| 203 | } | ||
| 204 | /* | ||
| 205 | * free extctl records | ||
| 206 | */ | ||
| 207 | while (!list_empty(&list)) { | ||
| 208 | struct reada_control *rc; | ||
| 209 | struct reada_extctl *rec; | ||
| 210 | |||
| 211 | rec = list_first_entry(&list, struct reada_extctl, list); | ||
| 212 | list_del(&rec->list); | ||
| 213 | rc = rec->rc; | ||
| 214 | kfree(rec); | ||
| 215 | |||
| 216 | kref_get(&rc->refcnt); | ||
| 217 | if (atomic_dec_and_test(&rc->elems)) { | ||
| 218 | kref_put(&rc->refcnt, reada_control_release); | ||
| 219 | wake_up(&rc->wait); | ||
| 220 | } | ||
| 221 | kref_put(&rc->refcnt, reada_control_release); | ||
| 222 | |||
| 223 | reada_extent_put(fs_info, re); /* one ref for each entry */ | ||
| 224 | } | ||
| 225 | reada_extent_put(fs_info, re); /* our ref */ | ||
| 226 | if (for_dev) | ||
| 227 | atomic_dec(&for_dev->reada_in_flight); | ||
| 228 | |||
| 229 | return 0; | ||
| 230 | } | ||
| 231 | |||
| 232 | /* | ||
| 233 | * start is passed separately in case eb in NULL, which may be the case with | ||
| 234 | * failed I/O | ||
| 235 | */ | ||
| 236 | int btree_readahead_hook(struct btrfs_root *root, struct extent_buffer *eb, | ||
| 237 | u64 start, int err) | ||
| 238 | { | ||
| 239 | int ret; | ||
| 240 | |||
| 241 | ret = __readahead_hook(root, eb, start, err); | ||
| 242 | |||
| 243 | reada_start_machine(root->fs_info); | ||
| 244 | |||
| 245 | return ret; | ||
| 246 | } | ||
| 247 | |||
| 248 | static struct reada_zone *reada_find_zone(struct btrfs_fs_info *fs_info, | ||
| 249 | struct btrfs_device *dev, u64 logical, | ||
| 250 | struct btrfs_multi_bio *multi) | ||
| 251 | { | ||
| 252 | int ret; | ||
| 253 | int looped = 0; | ||
| 254 | struct reada_zone *zone; | ||
| 255 | struct btrfs_block_group_cache *cache = NULL; | ||
| 256 | u64 start; | ||
| 257 | u64 end; | ||
| 258 | int i; | ||
| 259 | |||
| 260 | again: | ||
| 261 | zone = NULL; | ||
| 262 | spin_lock(&fs_info->reada_lock); | ||
| 263 | ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone, | ||
| 264 | logical >> PAGE_CACHE_SHIFT, 1); | ||
| 265 | if (ret == 1) | ||
| 266 | kref_get(&zone->refcnt); | ||
| 267 | spin_unlock(&fs_info->reada_lock); | ||
| 268 | |||
| 269 | if (ret == 1) { | ||
| 270 | if (logical >= zone->start && logical < zone->end) | ||
| 271 | return zone; | ||
| 272 | spin_lock(&fs_info->reada_lock); | ||
| 273 | kref_put(&zone->refcnt, reada_zone_release); | ||
| 274 | spin_unlock(&fs_info->reada_lock); | ||
| 275 | } | ||
| 276 | |||
| 277 | if (looped) | ||
| 278 | return NULL; | ||
| 279 | |||
| 280 | cache = btrfs_lookup_block_group(fs_info, logical); | ||
| 281 | if (!cache) | ||
| 282 | return NULL; | ||
| 283 | |||
| 284 | start = cache->key.objectid; | ||
| 285 | end = start + cache->key.offset - 1; | ||
| 286 | btrfs_put_block_group(cache); | ||
| 287 | |||
| 288 | zone = kzalloc(sizeof(*zone), GFP_NOFS); | ||
| 289 | if (!zone) | ||
| 290 | return NULL; | ||
| 291 | |||
| 292 | zone->start = start; | ||
| 293 | zone->end = end; | ||
| 294 | INIT_LIST_HEAD(&zone->list); | ||
| 295 | spin_lock_init(&zone->lock); | ||
| 296 | zone->locked = 0; | ||
| 297 | kref_init(&zone->refcnt); | ||
| 298 | zone->elems = 0; | ||
| 299 | zone->device = dev; /* our device always sits at index 0 */ | ||
| 300 | for (i = 0; i < multi->num_stripes; ++i) { | ||
| 301 | /* bounds have already been checked */ | ||
| 302 | zone->devs[i] = multi->stripes[i].dev; | ||
| 303 | } | ||
| 304 | zone->ndevs = multi->num_stripes; | ||
| 305 | |||
| 306 | spin_lock(&fs_info->reada_lock); | ||
| 307 | ret = radix_tree_insert(&dev->reada_zones, | ||
| 308 | (unsigned long)zone->end >> PAGE_CACHE_SHIFT, | ||
| 309 | zone); | ||
| 310 | spin_unlock(&fs_info->reada_lock); | ||
| 311 | |||
| 312 | if (ret) { | ||
| 313 | kfree(zone); | ||
| 314 | looped = 1; | ||
| 315 | goto again; | ||
| 316 | } | ||
| 317 | |||
| 318 | return zone; | ||
| 319 | } | ||
| 320 | |||
| 321 | static struct reada_extent *reada_find_extent(struct btrfs_root *root, | ||
| 322 | u64 logical, | ||
| 323 | struct btrfs_key *top, int level) | ||
| 324 | { | ||
| 325 | int ret; | ||
| 326 | int looped = 0; | ||
| 327 | struct reada_extent *re = NULL; | ||
| 328 | struct btrfs_fs_info *fs_info = root->fs_info; | ||
| 329 | struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree; | ||
| 330 | struct btrfs_multi_bio *multi = NULL; | ||
| 331 | struct btrfs_device *dev; | ||
| 332 | u32 blocksize; | ||
| 333 | u64 length; | ||
| 334 | int nzones = 0; | ||
| 335 | int i; | ||
| 336 | unsigned long index = logical >> PAGE_CACHE_SHIFT; | ||
| 337 | |||
| 338 | again: | ||
| 339 | spin_lock(&fs_info->reada_lock); | ||
| 340 | re = radix_tree_lookup(&fs_info->reada_tree, index); | ||
| 341 | if (re) | ||
| 342 | kref_get(&re->refcnt); | ||
| 343 | spin_unlock(&fs_info->reada_lock); | ||
| 344 | |||
| 345 | if (re || looped) | ||
| 346 | return re; | ||
| 347 | |||
| 348 | re = kzalloc(sizeof(*re), GFP_NOFS); | ||
| 349 | if (!re) | ||
| 350 | return NULL; | ||
| 351 | |||
| 352 | blocksize = btrfs_level_size(root, level); | ||
| 353 | re->logical = logical; | ||
| 354 | re->blocksize = blocksize; | ||
| 355 | re->top = *top; | ||
| 356 | INIT_LIST_HEAD(&re->extctl); | ||
| 357 | spin_lock_init(&re->lock); | ||
| 358 | kref_init(&re->refcnt); | ||
| 359 | |||
| 360 | /* | ||
| 361 | * map block | ||
| 362 | */ | ||
| 363 | length = blocksize; | ||
| 364 | ret = btrfs_map_block(map_tree, REQ_WRITE, logical, &length, &multi, 0); | ||
| 365 | if (ret || !multi || length < blocksize) | ||
| 366 | goto error; | ||
| 367 | |||
| 368 | if (multi->num_stripes > MAX_MIRRORS) { | ||
| 369 | printk(KERN_ERR "btrfs readahead: more than %d copies not " | ||
| 370 | "supported", MAX_MIRRORS); | ||
| 371 | goto error; | ||
| 372 | } | ||
| 373 | |||
| 374 | for (nzones = 0; nzones < multi->num_stripes; ++nzones) { | ||
| 375 | struct reada_zone *zone; | ||
| 376 | |||
| 377 | dev = multi->stripes[nzones].dev; | ||
| 378 | zone = reada_find_zone(fs_info, dev, logical, multi); | ||
| 379 | if (!zone) | ||
| 380 | break; | ||
| 381 | |||
| 382 | re->zones[nzones] = zone; | ||
| 383 | spin_lock(&zone->lock); | ||
| 384 | if (!zone->elems) | ||
| 385 | kref_get(&zone->refcnt); | ||
| 386 | ++zone->elems; | ||
| 387 | spin_unlock(&zone->lock); | ||
| 388 | spin_lock(&fs_info->reada_lock); | ||
| 389 | kref_put(&zone->refcnt, reada_zone_release); | ||
| 390 | spin_unlock(&fs_info->reada_lock); | ||
| 391 | } | ||
| 392 | re->nzones = nzones; | ||
| 393 | if (nzones == 0) { | ||
| 394 | /* not a single zone found, error and out */ | ||
| 395 | goto error; | ||
| 396 | } | ||
| 397 | |||
| 398 | /* insert extent in reada_tree + all per-device trees, all or nothing */ | ||
| 399 | spin_lock(&fs_info->reada_lock); | ||
| 400 | ret = radix_tree_insert(&fs_info->reada_tree, index, re); | ||
| 401 | if (ret) { | ||
| 402 | spin_unlock(&fs_info->reada_lock); | ||
| 403 | if (ret != -ENOMEM) { | ||
| 404 | /* someone inserted the extent in the meantime */ | ||
| 405 | looped = 1; | ||
| 406 | } | ||
| 407 | goto error; | ||
| 408 | } | ||
| 409 | for (i = 0; i < nzones; ++i) { | ||
| 410 | dev = multi->stripes[i].dev; | ||
| 411 | ret = radix_tree_insert(&dev->reada_extents, index, re); | ||
| 412 | if (ret) { | ||
| 413 | while (--i >= 0) { | ||
| 414 | dev = multi->stripes[i].dev; | ||
| 415 | BUG_ON(dev == NULL); | ||
| 416 | radix_tree_delete(&dev->reada_extents, index); | ||
| 417 | } | ||
| 418 | BUG_ON(fs_info == NULL); | ||
| 419 | radix_tree_delete(&fs_info->reada_tree, index); | ||
| 420 | spin_unlock(&fs_info->reada_lock); | ||
| 421 | goto error; | ||
| 422 | } | ||
| 423 | } | ||
| 424 | spin_unlock(&fs_info->reada_lock); | ||
| 425 | |||
| 426 | return re; | ||
| 427 | |||
| 428 | error: | ||
| 429 | while (nzones) { | ||
| 430 | struct reada_zone *zone; | ||
| 431 | |||
| 432 | --nzones; | ||
| 433 | zone = re->zones[nzones]; | ||
| 434 | kref_get(&zone->refcnt); | ||
| 435 | spin_lock(&zone->lock); | ||
| 436 | --zone->elems; | ||
| 437 | if (zone->elems == 0) { | ||
| 438 | /* | ||
| 439 | * no fs_info->reada_lock needed, as this can't be | ||
| 440 | * the last ref | ||
| 441 | */ | ||
| 442 | kref_put(&zone->refcnt, reada_zone_release); | ||
| 443 | } | ||
| 444 | spin_unlock(&zone->lock); | ||
| 445 | |||
| 446 | spin_lock(&fs_info->reada_lock); | ||
| 447 | kref_put(&zone->refcnt, reada_zone_release); | ||
| 448 | spin_unlock(&fs_info->reada_lock); | ||
| 449 | } | ||
| 450 | kfree(re); | ||
| 451 | if (looped) | ||
| 452 | goto again; | ||
| 453 | return NULL; | ||
| 454 | } | ||
| 455 | |||
| 456 | static void reada_kref_dummy(struct kref *kr) | ||
| 457 | { | ||
| 458 | } | ||
| 459 | |||
| 460 | static void reada_extent_put(struct btrfs_fs_info *fs_info, | ||
| 461 | struct reada_extent *re) | ||
| 462 | { | ||
| 463 | int i; | ||
| 464 | unsigned long index = re->logical >> PAGE_CACHE_SHIFT; | ||
| 465 | |||
| 466 | spin_lock(&fs_info->reada_lock); | ||
| 467 | if (!kref_put(&re->refcnt, reada_kref_dummy)) { | ||
| 468 | spin_unlock(&fs_info->reada_lock); | ||
| 469 | return; | ||
| 470 | } | ||
| 471 | |||
| 472 | radix_tree_delete(&fs_info->reada_tree, index); | ||
| 473 | for (i = 0; i < re->nzones; ++i) { | ||
| 474 | struct reada_zone *zone = re->zones[i]; | ||
| 475 | |||
| 476 | radix_tree_delete(&zone->device->reada_extents, index); | ||
| 477 | } | ||
| 478 | |||
| 479 | spin_unlock(&fs_info->reada_lock); | ||
| 480 | |||
| 481 | for (i = 0; i < re->nzones; ++i) { | ||
| 482 | struct reada_zone *zone = re->zones[i]; | ||
| 483 | |||
| 484 | kref_get(&zone->refcnt); | ||
| 485 | spin_lock(&zone->lock); | ||
| 486 | --zone->elems; | ||
| 487 | if (zone->elems == 0) { | ||
| 488 | /* no fs_info->reada_lock needed, as this can't be | ||
| 489 | * the last ref */ | ||
| 490 | kref_put(&zone->refcnt, reada_zone_release); | ||
| 491 | } | ||
| 492 | spin_unlock(&zone->lock); | ||
| 493 | |||
| 494 | spin_lock(&fs_info->reada_lock); | ||
| 495 | kref_put(&zone->refcnt, reada_zone_release); | ||
| 496 | spin_unlock(&fs_info->reada_lock); | ||
| 497 | } | ||
| 498 | if (re->scheduled_for) | ||
| 499 | atomic_dec(&re->scheduled_for->reada_in_flight); | ||
| 500 | |||
| 501 | kfree(re); | ||
| 502 | } | ||
| 503 | |||
| 504 | static void reada_zone_release(struct kref *kref) | ||
| 505 | { | ||
| 506 | struct reada_zone *zone = container_of(kref, struct reada_zone, refcnt); | ||
| 507 | |||
| 508 | radix_tree_delete(&zone->device->reada_zones, | ||
| 509 | zone->end >> PAGE_CACHE_SHIFT); | ||
| 510 | |||
| 511 | kfree(zone); | ||
| 512 | } | ||
| 513 | |||
| 514 | static void reada_control_release(struct kref *kref) | ||
| 515 | { | ||
| 516 | struct reada_control *rc = container_of(kref, struct reada_control, | ||
| 517 | refcnt); | ||
| 518 | |||
| 519 | kfree(rc); | ||
| 520 | } | ||
| 521 | |||
| 522 | static int reada_add_block(struct reada_control *rc, u64 logical, | ||
| 523 | struct btrfs_key *top, int level, u64 generation) | ||
| 524 | { | ||
| 525 | struct btrfs_root *root = rc->root; | ||
| 526 | struct reada_extent *re; | ||
| 527 | struct reada_extctl *rec; | ||
| 528 | |||
| 529 | re = reada_find_extent(root, logical, top, level); /* takes one ref */ | ||
| 530 | if (!re) | ||
| 531 | return -1; | ||
| 532 | |||
| 533 | rec = kzalloc(sizeof(*rec), GFP_NOFS); | ||
| 534 | if (!rec) { | ||
| 535 | reada_extent_put(root->fs_info, re); | ||
| 536 | return -1; | ||
| 537 | } | ||
| 538 | |||
| 539 | rec->rc = rc; | ||
| 540 | rec->generation = generation; | ||
| 541 | atomic_inc(&rc->elems); | ||
| 542 | |||
| 543 | spin_lock(&re->lock); | ||
| 544 | list_add_tail(&rec->list, &re->extctl); | ||
| 545 | spin_unlock(&re->lock); | ||
| 546 | |||
| 547 | /* leave the ref on the extent */ | ||
| 548 | |||
| 549 | return 0; | ||
| 550 | } | ||
| 551 | |||
| 552 | /* | ||
| 553 | * called with fs_info->reada_lock held | ||
| 554 | */ | ||
| 555 | static void reada_peer_zones_set_lock(struct reada_zone *zone, int lock) | ||
| 556 | { | ||
| 557 | int i; | ||
| 558 | unsigned long index = zone->end >> PAGE_CACHE_SHIFT; | ||
| 559 | |||
| 560 | for (i = 0; i < zone->ndevs; ++i) { | ||
| 561 | struct reada_zone *peer; | ||
| 562 | peer = radix_tree_lookup(&zone->devs[i]->reada_zones, index); | ||
| 563 | if (peer && peer->device != zone->device) | ||
| 564 | peer->locked = lock; | ||
| 565 | } | ||
| 566 | } | ||
| 567 | |||
| 568 | /* | ||
| 569 | * called with fs_info->reada_lock held | ||
| 570 | */ | ||
| 571 | static int reada_pick_zone(struct btrfs_device *dev) | ||
| 572 | { | ||
| 573 | struct reada_zone *top_zone = NULL; | ||
| 574 | struct reada_zone *top_locked_zone = NULL; | ||
| 575 | u64 top_elems = 0; | ||
| 576 | u64 top_locked_elems = 0; | ||
| 577 | unsigned long index = 0; | ||
| 578 | int ret; | ||
| 579 | |||
| 580 | if (dev->reada_curr_zone) { | ||
| 581 | reada_peer_zones_set_lock(dev->reada_curr_zone, 0); | ||
| 582 | kref_put(&dev->reada_curr_zone->refcnt, reada_zone_release); | ||
| 583 | dev->reada_curr_zone = NULL; | ||
| 584 | } | ||
| 585 | /* pick the zone with the most elements */ | ||
| 586 | while (1) { | ||
| 587 | struct reada_zone *zone; | ||
| 588 | |||
| 589 | ret = radix_tree_gang_lookup(&dev->reada_zones, | ||
| 590 | (void **)&zone, index, 1); | ||
| 591 | if (ret == 0) | ||
| 592 | break; | ||
| 593 | index = (zone->end >> PAGE_CACHE_SHIFT) + 1; | ||
| 594 | if (zone->locked) { | ||
| 595 | if (zone->elems > top_locked_elems) { | ||
| 596 | top_locked_elems = zone->elems; | ||
| 597 | top_locked_zone = zone; | ||
| 598 | } | ||
| 599 | } else { | ||
| 600 | if (zone->elems > top_elems) { | ||
| 601 | top_elems = zone->elems; | ||
| 602 | top_zone = zone; | ||
| 603 | } | ||
| 604 | } | ||
| 605 | } | ||
| 606 | if (top_zone) | ||
| 607 | dev->reada_curr_zone = top_zone; | ||
| 608 | else if (top_locked_zone) | ||
| 609 | dev->reada_curr_zone = top_locked_zone; | ||
| 610 | else | ||
| 611 | return 0; | ||
| 612 | |||
| 613 | dev->reada_next = dev->reada_curr_zone->start; | ||
| 614 | kref_get(&dev->reada_curr_zone->refcnt); | ||
| 615 | reada_peer_zones_set_lock(dev->reada_curr_zone, 1); | ||
| 616 | |||
| 617 | return 1; | ||
| 618 | } | ||
| 619 | |||
| 620 | static int reada_start_machine_dev(struct btrfs_fs_info *fs_info, | ||
| 621 | struct btrfs_device *dev) | ||
| 622 | { | ||
| 623 | struct reada_extent *re = NULL; | ||
| 624 | int mirror_num = 0; | ||
| 625 | struct extent_buffer *eb = NULL; | ||
| 626 | u64 logical; | ||
| 627 | u32 blocksize; | ||
| 628 | int ret; | ||
| 629 | int i; | ||
| 630 | int need_kick = 0; | ||
| 631 | |||
| 632 | spin_lock(&fs_info->reada_lock); | ||
| 633 | if (dev->reada_curr_zone == NULL) { | ||
| 634 | ret = reada_pick_zone(dev); | ||
| 635 | if (!ret) { | ||
| 636 | spin_unlock(&fs_info->reada_lock); | ||
| 637 | return 0; | ||
| 638 | } | ||
| 639 | } | ||
| 640 | /* | ||
| 641 | * FIXME currently we issue the reads one extent at a time. If we have | ||
| 642 | * a contiguous block of extents, we could also coagulate them or use | ||
| 643 | * plugging to speed things up | ||
| 644 | */ | ||
| 645 | ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re, | ||
| 646 | dev->reada_next >> PAGE_CACHE_SHIFT, 1); | ||
| 647 | if (ret == 0 || re->logical >= dev->reada_curr_zone->end) { | ||
| 648 | ret = reada_pick_zone(dev); | ||
| 649 | if (!ret) { | ||
| 650 | spin_unlock(&fs_info->reada_lock); | ||
| 651 | return 0; | ||
| 652 | } | ||
| 653 | re = NULL; | ||
| 654 | ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re, | ||
| 655 | dev->reada_next >> PAGE_CACHE_SHIFT, 1); | ||
| 656 | } | ||
| 657 | if (ret == 0) { | ||
| 658 | spin_unlock(&fs_info->reada_lock); | ||
| 659 | return 0; | ||
| 660 | } | ||
| 661 | dev->reada_next = re->logical + re->blocksize; | ||
| 662 | kref_get(&re->refcnt); | ||
| 663 | |||
| 664 | spin_unlock(&fs_info->reada_lock); | ||
| 665 | |||
| 666 | /* | ||
| 667 | * find mirror num | ||
| 668 | */ | ||
| 669 | for (i = 0; i < re->nzones; ++i) { | ||
| 670 | if (re->zones[i]->device == dev) { | ||
| 671 | mirror_num = i + 1; | ||
| 672 | break; | ||
| 673 | } | ||
| 674 | } | ||
| 675 | logical = re->logical; | ||
| 676 | blocksize = re->blocksize; | ||
| 677 | |||
| 678 | spin_lock(&re->lock); | ||
| 679 | if (re->scheduled_for == NULL) { | ||
| 680 | re->scheduled_for = dev; | ||
| 681 | need_kick = 1; | ||
| 682 | } | ||
| 683 | spin_unlock(&re->lock); | ||
| 684 | |||
| 685 | reada_extent_put(fs_info, re); | ||
| 686 | |||
| 687 | if (!need_kick) | ||
| 688 | return 0; | ||
| 689 | |||
| 690 | atomic_inc(&dev->reada_in_flight); | ||
| 691 | ret = reada_tree_block_flagged(fs_info->extent_root, logical, blocksize, | ||
| 692 | mirror_num, &eb); | ||
| 693 | if (ret) | ||
| 694 | __readahead_hook(fs_info->extent_root, NULL, logical, ret); | ||
| 695 | else if (eb) | ||
| 696 | __readahead_hook(fs_info->extent_root, eb, eb->start, ret); | ||
| 697 | |||
| 698 | if (eb) | ||
| 699 | free_extent_buffer(eb); | ||
| 700 | |||
| 701 | return 1; | ||
| 702 | |||
| 703 | } | ||
| 704 | |||
| 705 | static void reada_start_machine_worker(struct btrfs_work *work) | ||
| 706 | { | ||
| 707 | struct reada_machine_work *rmw; | ||
| 708 | struct btrfs_fs_info *fs_info; | ||
| 709 | |||
| 710 | rmw = container_of(work, struct reada_machine_work, work); | ||
| 711 | fs_info = rmw->fs_info; | ||
| 712 | |||
| 713 | kfree(rmw); | ||
| 714 | |||
| 715 | __reada_start_machine(fs_info); | ||
| 716 | } | ||
| 717 | |||
| 718 | static void __reada_start_machine(struct btrfs_fs_info *fs_info) | ||
| 719 | { | ||
| 720 | struct btrfs_device *device; | ||
| 721 | struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; | ||
| 722 | u64 enqueued; | ||
| 723 | u64 total = 0; | ||
| 724 | int i; | ||
| 725 | |||
| 726 | do { | ||
| 727 | enqueued = 0; | ||
| 728 | list_for_each_entry(device, &fs_devices->devices, dev_list) { | ||
| 729 | if (atomic_read(&device->reada_in_flight) < | ||
| 730 | MAX_IN_FLIGHT) | ||
| 731 | enqueued += reada_start_machine_dev(fs_info, | ||
| 732 | device); | ||
| 733 | } | ||
| 734 | total += enqueued; | ||
| 735 | } while (enqueued && total < 10000); | ||
| 736 | |||
| 737 | if (enqueued == 0) | ||
| 738 | return; | ||
| 739 | |||
| 740 | /* | ||
| 741 | * If everything is already in the cache, this is effectively single | ||
| 742 | * threaded. To a) not hold the caller for too long and b) to utilize | ||
| 743 | * more cores, we broke the loop above after 10000 iterations and now | ||
| 744 | * enqueue to workers to finish it. This will distribute the load to | ||
| 745 | * the cores. | ||
| 746 | */ | ||
| 747 | for (i = 0; i < 2; ++i) | ||
| 748 | reada_start_machine(fs_info); | ||
| 749 | } | ||
| 750 | |||
| 751 | static void reada_start_machine(struct btrfs_fs_info *fs_info) | ||
| 752 | { | ||
| 753 | struct reada_machine_work *rmw; | ||
| 754 | |||
| 755 | rmw = kzalloc(sizeof(*rmw), GFP_NOFS); | ||
| 756 | if (!rmw) { | ||
| 757 | /* FIXME we cannot handle this properly right now */ | ||
| 758 | BUG(); | ||
| 759 | } | ||
| 760 | rmw->work.func = reada_start_machine_worker; | ||
| 761 | rmw->fs_info = fs_info; | ||
| 762 | |||
| 763 | btrfs_queue_worker(&fs_info->readahead_workers, &rmw->work); | ||
| 764 | } | ||
| 765 | |||
| 766 | #ifdef DEBUG | ||
| 767 | static void dump_devs(struct btrfs_fs_info *fs_info, int all) | ||
| 768 | { | ||
| 769 | struct btrfs_device *device; | ||
| 770 | struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; | ||
| 771 | unsigned long index; | ||
| 772 | int ret; | ||
| 773 | int i; | ||
| 774 | int j; | ||
| 775 | int cnt; | ||
| 776 | |||
| 777 | spin_lock(&fs_info->reada_lock); | ||
| 778 | list_for_each_entry(device, &fs_devices->devices, dev_list) { | ||
| 779 | printk(KERN_DEBUG "dev %lld has %d in flight\n", device->devid, | ||
| 780 | atomic_read(&device->reada_in_flight)); | ||
| 781 | index = 0; | ||
| 782 | while (1) { | ||
| 783 | struct reada_zone *zone; | ||
| 784 | ret = radix_tree_gang_lookup(&device->reada_zones, | ||
| 785 | (void **)&zone, index, 1); | ||
| 786 | if (ret == 0) | ||
| 787 | break; | ||
| 788 | printk(KERN_DEBUG " zone %llu-%llu elems %llu locked " | ||
| 789 | "%d devs", zone->start, zone->end, zone->elems, | ||
| 790 | zone->locked); | ||
| 791 | for (j = 0; j < zone->ndevs; ++j) { | ||
| 792 | printk(KERN_CONT " %lld", | ||
| 793 | zone->devs[j]->devid); | ||
| 794 | } | ||
| 795 | if (device->reada_curr_zone == zone) | ||
| 796 | printk(KERN_CONT " curr off %llu", | ||
| 797 | device->reada_next - zone->start); | ||
| 798 | printk(KERN_CONT "\n"); | ||
| 799 | index = (zone->end >> PAGE_CACHE_SHIFT) + 1; | ||
| 800 | } | ||
| 801 | cnt = 0; | ||
| 802 | index = 0; | ||
| 803 | while (all) { | ||
| 804 | struct reada_extent *re = NULL; | ||
| 805 | |||
| 806 | ret = radix_tree_gang_lookup(&device->reada_extents, | ||
| 807 | (void **)&re, index, 1); | ||
| 808 | if (ret == 0) | ||
| 809 | break; | ||
| 810 | printk(KERN_DEBUG | ||
| 811 | " re: logical %llu size %u empty %d for %lld", | ||
| 812 | re->logical, re->blocksize, | ||
| 813 | list_empty(&re->extctl), re->scheduled_for ? | ||
| 814 | re->scheduled_for->devid : -1); | ||
| 815 | |||
| 816 | for (i = 0; i < re->nzones; ++i) { | ||
| 817 | printk(KERN_CONT " zone %llu-%llu devs", | ||
| 818 | re->zones[i]->start, | ||
| 819 | re->zones[i]->end); | ||
| 820 | for (j = 0; j < re->zones[i]->ndevs; ++j) { | ||
| 821 | printk(KERN_CONT " %lld", | ||
| 822 | re->zones[i]->devs[j]->devid); | ||
| 823 | } | ||
| 824 | } | ||
| 825 | printk(KERN_CONT "\n"); | ||
| 826 | index = (re->logical >> PAGE_CACHE_SHIFT) + 1; | ||
| 827 | if (++cnt > 15) | ||
| 828 | break; | ||
| 829 | } | ||
| 830 | } | ||
| 831 | |||
| 832 | index = 0; | ||
| 833 | cnt = 0; | ||
| 834 | while (all) { | ||
| 835 | struct reada_extent *re = NULL; | ||
| 836 | |||
| 837 | ret = radix_tree_gang_lookup(&fs_info->reada_tree, (void **)&re, | ||
| 838 | index, 1); | ||
| 839 | if (ret == 0) | ||
| 840 | break; | ||
| 841 | if (!re->scheduled_for) { | ||
| 842 | index = (re->logical >> PAGE_CACHE_SHIFT) + 1; | ||
| 843 | continue; | ||
| 844 | } | ||
| 845 | printk(KERN_DEBUG | ||
| 846 | "re: logical %llu size %u list empty %d for %lld", | ||
| 847 | re->logical, re->blocksize, list_empty(&re->extctl), | ||
| 848 | re->scheduled_for ? re->scheduled_for->devid : -1); | ||
| 849 | for (i = 0; i < re->nzones; ++i) { | ||
| 850 | printk(KERN_CONT " zone %llu-%llu devs", | ||
| 851 | re->zones[i]->start, | ||
| 852 | re->zones[i]->end); | ||
| 853 | for (i = 0; i < re->nzones; ++i) { | ||
| 854 | printk(KERN_CONT " zone %llu-%llu devs", | ||
| 855 | re->zones[i]->start, | ||
| 856 | re->zones[i]->end); | ||
| 857 | for (j = 0; j < re->zones[i]->ndevs; ++j) { | ||
| 858 | printk(KERN_CONT " %lld", | ||
| 859 | re->zones[i]->devs[j]->devid); | ||
| 860 | } | ||
| 861 | } | ||
| 862 | } | ||
| 863 | printk(KERN_CONT "\n"); | ||
| 864 | index = (re->logical >> PAGE_CACHE_SHIFT) + 1; | ||
| 865 | } | ||
| 866 | spin_unlock(&fs_info->reada_lock); | ||
| 867 | } | ||
| 868 | #endif | ||
| 869 | |||
| 870 | /* | ||
| 871 | * interface | ||
| 872 | */ | ||
| 873 | struct reada_control *btrfs_reada_add(struct btrfs_root *root, | ||
| 874 | struct btrfs_key *key_start, struct btrfs_key *key_end) | ||
| 875 | { | ||
| 876 | struct reada_control *rc; | ||
| 877 | u64 start; | ||
| 878 | u64 generation; | ||
| 879 | int level; | ||
| 880 | struct extent_buffer *node; | ||
| 881 | static struct btrfs_key max_key = { | ||
| 882 | .objectid = (u64)-1, | ||
| 883 | .type = (u8)-1, | ||
| 884 | .offset = (u64)-1 | ||
| 885 | }; | ||
| 886 | |||
| 887 | rc = kzalloc(sizeof(*rc), GFP_NOFS); | ||
| 888 | if (!rc) | ||
| 889 | return ERR_PTR(-ENOMEM); | ||
| 890 | |||
| 891 | rc->root = root; | ||
| 892 | rc->key_start = *key_start; | ||
| 893 | rc->key_end = *key_end; | ||
| 894 | atomic_set(&rc->elems, 0); | ||
| 895 | init_waitqueue_head(&rc->wait); | ||
| 896 | kref_init(&rc->refcnt); | ||
| 897 | kref_get(&rc->refcnt); /* one ref for having elements */ | ||
| 898 | |||
| 899 | node = btrfs_root_node(root); | ||
| 900 | start = node->start; | ||
| 901 | level = btrfs_header_level(node); | ||
| 902 | generation = btrfs_header_generation(node); | ||
| 903 | free_extent_buffer(node); | ||
| 904 | |||
| 905 | reada_add_block(rc, start, &max_key, level, generation); | ||
| 906 | |||
| 907 | reada_start_machine(root->fs_info); | ||
| 908 | |||
| 909 | return rc; | ||
| 910 | } | ||
| 911 | |||
| 912 | #ifdef DEBUG | ||
| 913 | int btrfs_reada_wait(void *handle) | ||
| 914 | { | ||
| 915 | struct reada_control *rc = handle; | ||
| 916 | |||
| 917 | while (atomic_read(&rc->elems)) { | ||
| 918 | wait_event_timeout(rc->wait, atomic_read(&rc->elems) == 0, | ||
| 919 | 5 * HZ); | ||
| 920 | dump_devs(rc->root->fs_info, rc->elems < 10 ? 1 : 0); | ||
| 921 | } | ||
| 922 | |||
| 923 | dump_devs(rc->root->fs_info, rc->elems < 10 ? 1 : 0); | ||
| 924 | |||
| 925 | kref_put(&rc->refcnt, reada_control_release); | ||
| 926 | |||
| 927 | return 0; | ||
| 928 | } | ||
| 929 | #else | ||
| 930 | int btrfs_reada_wait(void *handle) | ||
| 931 | { | ||
| 932 | struct reada_control *rc = handle; | ||
| 933 | |||
| 934 | while (atomic_read(&rc->elems)) { | ||
| 935 | wait_event(rc->wait, atomic_read(&rc->elems) == 0); | ||
| 936 | } | ||
| 937 | |||
| 938 | kref_put(&rc->refcnt, reada_control_release); | ||
| 939 | |||
| 940 | return 0; | ||
| 941 | } | ||
| 942 | #endif | ||
| 943 | |||
| 944 | void btrfs_reada_detach(void *handle) | ||
| 945 | { | ||
| 946 | struct reada_control *rc = handle; | ||
| 947 | |||
| 948 | kref_put(&rc->refcnt, reada_control_release); | ||
| 949 | } | ||
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 69a600f07763..5bc4ec827b3d 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c | |||
| @@ -29,15 +29,12 @@ | |||
| 29 | * any can be found. | 29 | * any can be found. |
| 30 | * | 30 | * |
| 31 | * Future enhancements: | 31 | * Future enhancements: |
| 32 | * - To enhance the performance, better read-ahead strategies for the | ||
| 33 | * extent-tree can be employed. | ||
| 34 | * - In case an unrepairable extent is encountered, track which files are | 32 | * - In case an unrepairable extent is encountered, track which files are |
| 35 | * affected and report them | 33 | * affected and report them |
| 36 | * - In case of a read error on files with nodatasum, map the file and read | 34 | * - In case of a read error on files with nodatasum, map the file and read |
| 37 | * the extent to trigger a writeback of the good copy | 35 | * the extent to trigger a writeback of the good copy |
| 38 | * - track and record media errors, throw out bad devices | 36 | * - track and record media errors, throw out bad devices |
| 39 | * - add a mode to also read unallocated space | 37 | * - add a mode to also read unallocated space |
| 40 | * - make the prefetch cancellable | ||
| 41 | */ | 38 | */ |
| 42 | 39 | ||
| 43 | struct scrub_bio; | 40 | struct scrub_bio; |
| @@ -741,13 +738,16 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev, | |||
| 741 | int slot; | 738 | int slot; |
| 742 | int i; | 739 | int i; |
| 743 | u64 nstripes; | 740 | u64 nstripes; |
| 744 | int start_stripe; | ||
| 745 | struct extent_buffer *l; | 741 | struct extent_buffer *l; |
| 746 | struct btrfs_key key; | 742 | struct btrfs_key key; |
| 747 | u64 physical; | 743 | u64 physical; |
| 748 | u64 logical; | 744 | u64 logical; |
| 749 | u64 generation; | 745 | u64 generation; |
| 750 | u64 mirror_num; | 746 | u64 mirror_num; |
| 747 | struct reada_control *reada1; | ||
| 748 | struct reada_control *reada2; | ||
| 749 | struct btrfs_key key_start; | ||
| 750 | struct btrfs_key key_end; | ||
| 751 | 751 | ||
| 752 | u64 increment = map->stripe_len; | 752 | u64 increment = map->stripe_len; |
| 753 | u64 offset; | 753 | u64 offset; |
| @@ -779,81 +779,67 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev, | |||
| 779 | if (!path) | 779 | if (!path) |
| 780 | return -ENOMEM; | 780 | return -ENOMEM; |
| 781 | 781 | ||
| 782 | path->reada = 2; | ||
| 783 | path->search_commit_root = 1; | 782 | path->search_commit_root = 1; |
| 784 | path->skip_locking = 1; | 783 | path->skip_locking = 1; |
| 785 | 784 | ||
| 786 | /* | 785 | /* |
| 787 | * find all extents for each stripe and just read them to get | 786 | * trigger the readahead for extent tree csum tree and wait for |
| 788 | * them into the page cache | 787 | * completion. During readahead, the scrub is officially paused |
| 789 | * FIXME: we can do better. build a more intelligent prefetching | 788 | * to not hold off transaction commits |
| 790 | */ | 789 | */ |
| 791 | logical = base + offset; | 790 | logical = base + offset; |
| 792 | physical = map->stripes[num].physical; | ||
| 793 | ret = 0; | ||
| 794 | for (i = 0; i < nstripes; ++i) { | ||
| 795 | key.objectid = logical; | ||
| 796 | key.type = BTRFS_EXTENT_ITEM_KEY; | ||
| 797 | key.offset = (u64)0; | ||
| 798 | |||
| 799 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | ||
| 800 | if (ret < 0) | ||
| 801 | goto out_noplug; | ||
| 802 | |||
| 803 | /* | ||
| 804 | * we might miss half an extent here, but that doesn't matter, | ||
| 805 | * as it's only the prefetch | ||
| 806 | */ | ||
| 807 | while (1) { | ||
| 808 | l = path->nodes[0]; | ||
| 809 | slot = path->slots[0]; | ||
| 810 | if (slot >= btrfs_header_nritems(l)) { | ||
| 811 | ret = btrfs_next_leaf(root, path); | ||
| 812 | if (ret == 0) | ||
| 813 | continue; | ||
| 814 | if (ret < 0) | ||
| 815 | goto out_noplug; | ||
| 816 | 791 | ||
| 817 | break; | 792 | wait_event(sdev->list_wait, |
| 818 | } | 793 | atomic_read(&sdev->in_flight) == 0); |
| 819 | btrfs_item_key_to_cpu(l, &key, slot); | 794 | atomic_inc(&fs_info->scrubs_paused); |
| 795 | wake_up(&fs_info->scrub_pause_wait); | ||
| 820 | 796 | ||
| 821 | if (key.objectid >= logical + map->stripe_len) | 797 | /* FIXME it might be better to start readahead at commit root */ |
| 822 | break; | 798 | key_start.objectid = logical; |
| 799 | key_start.type = BTRFS_EXTENT_ITEM_KEY; | ||
| 800 | key_start.offset = (u64)0; | ||
| 801 | key_end.objectid = base + offset + nstripes * increment; | ||
| 802 | key_end.type = BTRFS_EXTENT_ITEM_KEY; | ||
| 803 | key_end.offset = (u64)0; | ||
| 804 | reada1 = btrfs_reada_add(root, &key_start, &key_end); | ||
| 805 | |||
| 806 | key_start.objectid = BTRFS_EXTENT_CSUM_OBJECTID; | ||
| 807 | key_start.type = BTRFS_EXTENT_CSUM_KEY; | ||
| 808 | key_start.offset = logical; | ||
| 809 | key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID; | ||
| 810 | key_end.type = BTRFS_EXTENT_CSUM_KEY; | ||
| 811 | key_end.offset = base + offset + nstripes * increment; | ||
| 812 | reada2 = btrfs_reada_add(csum_root, &key_start, &key_end); | ||
| 813 | |||
| 814 | if (!IS_ERR(reada1)) | ||
| 815 | btrfs_reada_wait(reada1); | ||
| 816 | if (!IS_ERR(reada2)) | ||
| 817 | btrfs_reada_wait(reada2); | ||
| 823 | 818 | ||
| 824 | path->slots[0]++; | 819 | mutex_lock(&fs_info->scrub_lock); |
| 825 | } | 820 | while (atomic_read(&fs_info->scrub_pause_req)) { |
| 826 | btrfs_release_path(path); | 821 | mutex_unlock(&fs_info->scrub_lock); |
| 827 | logical += increment; | 822 | wait_event(fs_info->scrub_pause_wait, |
| 828 | physical += map->stripe_len; | 823 | atomic_read(&fs_info->scrub_pause_req) == 0); |
| 829 | cond_resched(); | 824 | mutex_lock(&fs_info->scrub_lock); |
| 830 | } | 825 | } |
| 826 | atomic_dec(&fs_info->scrubs_paused); | ||
| 827 | mutex_unlock(&fs_info->scrub_lock); | ||
| 828 | wake_up(&fs_info->scrub_pause_wait); | ||
| 831 | 829 | ||
| 832 | /* | 830 | /* |
| 833 | * collect all data csums for the stripe to avoid seeking during | 831 | * collect all data csums for the stripe to avoid seeking during |
| 834 | * the scrub. This might currently (crc32) end up to be about 1MB | 832 | * the scrub. This might currently (crc32) end up to be about 1MB |
| 835 | */ | 833 | */ |
| 836 | start_stripe = 0; | ||
| 837 | blk_start_plug(&plug); | 834 | blk_start_plug(&plug); |
| 838 | again: | ||
| 839 | logical = base + offset + start_stripe * increment; | ||
| 840 | for (i = start_stripe; i < nstripes; ++i) { | ||
| 841 | ret = btrfs_lookup_csums_range(csum_root, logical, | ||
| 842 | logical + map->stripe_len - 1, | ||
| 843 | &sdev->csum_list, 1); | ||
| 844 | if (ret) | ||
| 845 | goto out; | ||
| 846 | 835 | ||
| 847 | logical += increment; | ||
| 848 | cond_resched(); | ||
| 849 | } | ||
| 850 | /* | 836 | /* |
| 851 | * now find all extents for each stripe and scrub them | 837 | * now find all extents for each stripe and scrub them |
| 852 | */ | 838 | */ |
| 853 | logical = base + offset + start_stripe * increment; | 839 | logical = base + offset; |
| 854 | physical = map->stripes[num].physical + start_stripe * map->stripe_len; | 840 | physical = map->stripes[num].physical; |
| 855 | ret = 0; | 841 | ret = 0; |
| 856 | for (i = start_stripe; i < nstripes; ++i) { | 842 | for (i = 0; i < nstripes; ++i) { |
| 857 | /* | 843 | /* |
| 858 | * canceled? | 844 | * canceled? |
| 859 | */ | 845 | */ |
| @@ -882,11 +868,14 @@ again: | |||
| 882 | atomic_dec(&fs_info->scrubs_paused); | 868 | atomic_dec(&fs_info->scrubs_paused); |
| 883 | mutex_unlock(&fs_info->scrub_lock); | 869 | mutex_unlock(&fs_info->scrub_lock); |
| 884 | wake_up(&fs_info->scrub_pause_wait); | 870 | wake_up(&fs_info->scrub_pause_wait); |
| 885 | scrub_free_csums(sdev); | ||
| 886 | start_stripe = i; | ||
| 887 | goto again; | ||
| 888 | } | 871 | } |
| 889 | 872 | ||
| 873 | ret = btrfs_lookup_csums_range(csum_root, logical, | ||
| 874 | logical + map->stripe_len - 1, | ||
| 875 | &sdev->csum_list, 1); | ||
| 876 | if (ret) | ||
| 877 | goto out; | ||
| 878 | |||
| 890 | key.objectid = logical; | 879 | key.objectid = logical; |
| 891 | key.type = BTRFS_EXTENT_ITEM_KEY; | 880 | key.type = BTRFS_EXTENT_ITEM_KEY; |
| 892 | key.offset = (u64)0; | 881 | key.offset = (u64)0; |
| @@ -982,7 +971,6 @@ next: | |||
| 982 | 971 | ||
| 983 | out: | 972 | out: |
| 984 | blk_finish_plug(&plug); | 973 | blk_finish_plug(&plug); |
| 985 | out_noplug: | ||
| 986 | btrfs_free_path(path); | 974 | btrfs_free_path(path); |
| 987 | return ret < 0 ? ret : 0; | 975 | return ret < 0 ? ret : 0; |
| 988 | } | 976 | } |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index c3b45564048e..f1685a2b45c8 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
| @@ -366,6 +366,14 @@ static noinline int device_list_add(const char *path, | |||
| 366 | } | 366 | } |
| 367 | INIT_LIST_HEAD(&device->dev_alloc_list); | 367 | INIT_LIST_HEAD(&device->dev_alloc_list); |
| 368 | 368 | ||
| 369 | /* init readahead state */ | ||
| 370 | spin_lock_init(&device->reada_lock); | ||
| 371 | device->reada_curr_zone = NULL; | ||
| 372 | atomic_set(&device->reada_in_flight, 0); | ||
| 373 | device->reada_next = 0; | ||
| 374 | INIT_RADIX_TREE(&device->reada_zones, GFP_NOFS & ~__GFP_WAIT); | ||
| 375 | INIT_RADIX_TREE(&device->reada_extents, GFP_NOFS & ~__GFP_WAIT); | ||
| 376 | |||
| 369 | mutex_lock(&fs_devices->device_list_mutex); | 377 | mutex_lock(&fs_devices->device_list_mutex); |
| 370 | list_add_rcu(&device->dev_list, &fs_devices->devices); | 378 | list_add_rcu(&device->dev_list, &fs_devices->devices); |
| 371 | mutex_unlock(&fs_devices->device_list_mutex); | 379 | mutex_unlock(&fs_devices->device_list_mutex); |
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 6d866db4e177..2a751246188a 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h | |||
| @@ -92,6 +92,14 @@ struct btrfs_device { | |||
| 92 | struct btrfs_work work; | 92 | struct btrfs_work work; |
| 93 | struct rcu_head rcu; | 93 | struct rcu_head rcu; |
| 94 | struct work_struct rcu_work; | 94 | struct work_struct rcu_work; |
| 95 | |||
| 96 | /* readahead state */ | ||
| 97 | spinlock_t reada_lock; | ||
| 98 | atomic_t reada_in_flight; | ||
| 99 | u64 reada_next; | ||
| 100 | struct reada_zone *reada_curr_zone; | ||
| 101 | struct radix_tree_root reada_zones; | ||
| 102 | struct radix_tree_root reada_extents; | ||
| 95 | }; | 103 | }; |
| 96 | 104 | ||
| 97 | struct btrfs_fs_devices { | 105 | struct btrfs_fs_devices { |
