aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/volumes.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/volumes.c')
-rw-r--r--fs/btrfs/volumes.c125
1 files changed, 83 insertions, 42 deletions
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 5dbefd11b4af..7eda483d7b5a 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -260,7 +260,7 @@ loop_lock:
260 num_run++; 260 num_run++;
261 batch_run++; 261 batch_run++;
262 262
263 if (bio_sync(cur)) 263 if (bio_rw_flagged(cur, BIO_RW_SYNCIO))
264 num_sync_run++; 264 num_sync_run++;
265 265
266 if (need_resched()) { 266 if (need_resched()) {
@@ -276,7 +276,7 @@ loop_lock:
276 * is now congested. Back off and let other work structs 276 * is now congested. Back off and let other work structs
277 * run instead 277 * run instead
278 */ 278 */
279 if (pending && bdi_write_congested(bdi) && batch_run > 32 && 279 if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
280 fs_info->fs_devices->open_devices > 1) { 280 fs_info->fs_devices->open_devices > 1) {
281 struct io_context *ioc; 281 struct io_context *ioc;
282 282
@@ -446,8 +446,10 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
446 goto error; 446 goto error;
447 447
448 device->name = kstrdup(orig_dev->name, GFP_NOFS); 448 device->name = kstrdup(orig_dev->name, GFP_NOFS);
449 if (!device->name) 449 if (!device->name) {
450 kfree(device);
450 goto error; 451 goto error;
452 }
451 453
452 device->devid = orig_dev->devid; 454 device->devid = orig_dev->devid;
453 device->work.func = pending_bios_fn; 455 device->work.func = pending_bios_fn;
@@ -719,10 +721,9 @@ error:
719 * called very infrequently and that a given device has a small number 721 * called very infrequently and that a given device has a small number
720 * of extents 722 * of extents
721 */ 723 */
722static noinline int find_free_dev_extent(struct btrfs_trans_handle *trans, 724int find_free_dev_extent(struct btrfs_trans_handle *trans,
723 struct btrfs_device *device, 725 struct btrfs_device *device, u64 num_bytes,
724 u64 num_bytes, u64 *start, 726 u64 *start, u64 *max_avail)
725 u64 *max_avail)
726{ 727{
727 struct btrfs_key key; 728 struct btrfs_key key;
728 struct btrfs_root *root = device->dev_root; 729 struct btrfs_root *root = device->dev_root;
@@ -1736,6 +1737,10 @@ static int btrfs_relocate_chunk(struct btrfs_root *root,
1736 extent_root = root->fs_info->extent_root; 1737 extent_root = root->fs_info->extent_root;
1737 em_tree = &root->fs_info->mapping_tree.map_tree; 1738 em_tree = &root->fs_info->mapping_tree.map_tree;
1738 1739
1740 ret = btrfs_can_relocate(extent_root, chunk_offset);
1741 if (ret)
1742 return -ENOSPC;
1743
1739 /* step one, relocate all the extents inside this chunk */ 1744 /* step one, relocate all the extents inside this chunk */
1740 ret = btrfs_relocate_block_group(extent_root, chunk_offset); 1745 ret = btrfs_relocate_block_group(extent_root, chunk_offset);
1741 BUG_ON(ret); 1746 BUG_ON(ret);
@@ -1749,9 +1754,9 @@ static int btrfs_relocate_chunk(struct btrfs_root *root,
1749 * step two, delete the device extents and the 1754 * step two, delete the device extents and the
1750 * chunk tree entries 1755 * chunk tree entries
1751 */ 1756 */
1752 spin_lock(&em_tree->lock); 1757 read_lock(&em_tree->lock);
1753 em = lookup_extent_mapping(em_tree, chunk_offset, 1); 1758 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
1754 spin_unlock(&em_tree->lock); 1759 read_unlock(&em_tree->lock);
1755 1760
1756 BUG_ON(em->start > chunk_offset || 1761 BUG_ON(em->start > chunk_offset ||
1757 em->start + em->len < chunk_offset); 1762 em->start + em->len < chunk_offset);
@@ -1780,9 +1785,9 @@ static int btrfs_relocate_chunk(struct btrfs_root *root,
1780 ret = btrfs_remove_block_group(trans, extent_root, chunk_offset); 1785 ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
1781 BUG_ON(ret); 1786 BUG_ON(ret);
1782 1787
1783 spin_lock(&em_tree->lock); 1788 write_lock(&em_tree->lock);
1784 remove_extent_mapping(em_tree, em); 1789 remove_extent_mapping(em_tree, em);
1785 spin_unlock(&em_tree->lock); 1790 write_unlock(&em_tree->lock);
1786 1791
1787 kfree(map); 1792 kfree(map);
1788 em->bdev = NULL; 1793 em->bdev = NULL;
@@ -1807,12 +1812,15 @@ static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
1807 struct btrfs_key found_key; 1812 struct btrfs_key found_key;
1808 u64 chunk_tree = chunk_root->root_key.objectid; 1813 u64 chunk_tree = chunk_root->root_key.objectid;
1809 u64 chunk_type; 1814 u64 chunk_type;
1815 bool retried = false;
1816 int failed = 0;
1810 int ret; 1817 int ret;
1811 1818
1812 path = btrfs_alloc_path(); 1819 path = btrfs_alloc_path();
1813 if (!path) 1820 if (!path)
1814 return -ENOMEM; 1821 return -ENOMEM;
1815 1822
1823again:
1816 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 1824 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
1817 key.offset = (u64)-1; 1825 key.offset = (u64)-1;
1818 key.type = BTRFS_CHUNK_ITEM_KEY; 1826 key.type = BTRFS_CHUNK_ITEM_KEY;
@@ -1842,7 +1850,10 @@ static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
1842 ret = btrfs_relocate_chunk(chunk_root, chunk_tree, 1850 ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
1843 found_key.objectid, 1851 found_key.objectid,
1844 found_key.offset); 1852 found_key.offset);
1845 BUG_ON(ret); 1853 if (ret == -ENOSPC)
1854 failed++;
1855 else if (ret)
1856 BUG();
1846 } 1857 }
1847 1858
1848 if (found_key.offset == 0) 1859 if (found_key.offset == 0)
@@ -1850,6 +1861,14 @@ static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
1850 key.offset = found_key.offset - 1; 1861 key.offset = found_key.offset - 1;
1851 } 1862 }
1852 ret = 0; 1863 ret = 0;
1864 if (failed && !retried) {
1865 failed = 0;
1866 retried = true;
1867 goto again;
1868 } else if (failed && retried) {
1869 WARN_ON(1);
1870 ret = -ENOSPC;
1871 }
1853error: 1872error:
1854 btrfs_free_path(path); 1873 btrfs_free_path(path);
1855 return ret; 1874 return ret;
@@ -1894,6 +1913,8 @@ int btrfs_balance(struct btrfs_root *dev_root)
1894 continue; 1913 continue;
1895 1914
1896 ret = btrfs_shrink_device(device, old_size - size_to_free); 1915 ret = btrfs_shrink_device(device, old_size - size_to_free);
1916 if (ret == -ENOSPC)
1917 break;
1897 BUG_ON(ret); 1918 BUG_ON(ret);
1898 1919
1899 trans = btrfs_start_transaction(dev_root, 1); 1920 trans = btrfs_start_transaction(dev_root, 1);
@@ -1938,9 +1959,8 @@ int btrfs_balance(struct btrfs_root *dev_root)
1938 chunk = btrfs_item_ptr(path->nodes[0], 1959 chunk = btrfs_item_ptr(path->nodes[0],
1939 path->slots[0], 1960 path->slots[0],
1940 struct btrfs_chunk); 1961 struct btrfs_chunk);
1941 key.offset = found_key.offset;
1942 /* chunk zero is special */ 1962 /* chunk zero is special */
1943 if (key.offset == 0) 1963 if (found_key.offset == 0)
1944 break; 1964 break;
1945 1965
1946 btrfs_release_path(chunk_root, path); 1966 btrfs_release_path(chunk_root, path);
@@ -1948,7 +1968,8 @@ int btrfs_balance(struct btrfs_root *dev_root)
1948 chunk_root->root_key.objectid, 1968 chunk_root->root_key.objectid,
1949 found_key.objectid, 1969 found_key.objectid,
1950 found_key.offset); 1970 found_key.offset);
1951 BUG_ON(ret); 1971 BUG_ON(ret && ret != -ENOSPC);
1972 key.offset = found_key.offset - 1;
1952 } 1973 }
1953 ret = 0; 1974 ret = 0;
1954error: 1975error:
@@ -1974,10 +1995,13 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
1974 u64 chunk_offset; 1995 u64 chunk_offset;
1975 int ret; 1996 int ret;
1976 int slot; 1997 int slot;
1998 int failed = 0;
1999 bool retried = false;
1977 struct extent_buffer *l; 2000 struct extent_buffer *l;
1978 struct btrfs_key key; 2001 struct btrfs_key key;
1979 struct btrfs_super_block *super_copy = &root->fs_info->super_copy; 2002 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1980 u64 old_total = btrfs_super_total_bytes(super_copy); 2003 u64 old_total = btrfs_super_total_bytes(super_copy);
2004 u64 old_size = device->total_bytes;
1981 u64 diff = device->total_bytes - new_size; 2005 u64 diff = device->total_bytes - new_size;
1982 2006
1983 if (new_size >= device->total_bytes) 2007 if (new_size >= device->total_bytes)
@@ -1987,12 +2011,6 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
1987 if (!path) 2011 if (!path)
1988 return -ENOMEM; 2012 return -ENOMEM;
1989 2013
1990 trans = btrfs_start_transaction(root, 1);
1991 if (!trans) {
1992 ret = -ENOMEM;
1993 goto done;
1994 }
1995
1996 path->reada = 2; 2014 path->reada = 2;
1997 2015
1998 lock_chunks(root); 2016 lock_chunks(root);
@@ -2001,8 +2019,8 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
2001 if (device->writeable) 2019 if (device->writeable)
2002 device->fs_devices->total_rw_bytes -= diff; 2020 device->fs_devices->total_rw_bytes -= diff;
2003 unlock_chunks(root); 2021 unlock_chunks(root);
2004 btrfs_end_transaction(trans, root);
2005 2022
2023again:
2006 key.objectid = device->devid; 2024 key.objectid = device->devid;
2007 key.offset = (u64)-1; 2025 key.offset = (u64)-1;
2008 key.type = BTRFS_DEV_EXTENT_KEY; 2026 key.type = BTRFS_DEV_EXTENT_KEY;
@@ -2017,6 +2035,7 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
2017 goto done; 2035 goto done;
2018 if (ret) { 2036 if (ret) {
2019 ret = 0; 2037 ret = 0;
2038 btrfs_release_path(root, path);
2020 break; 2039 break;
2021 } 2040 }
2022 2041
@@ -2024,14 +2043,18 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
2024 slot = path->slots[0]; 2043 slot = path->slots[0];
2025 btrfs_item_key_to_cpu(l, &key, path->slots[0]); 2044 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
2026 2045
2027 if (key.objectid != device->devid) 2046 if (key.objectid != device->devid) {
2047 btrfs_release_path(root, path);
2028 break; 2048 break;
2049 }
2029 2050
2030 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 2051 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
2031 length = btrfs_dev_extent_length(l, dev_extent); 2052 length = btrfs_dev_extent_length(l, dev_extent);
2032 2053
2033 if (key.offset + length <= new_size) 2054 if (key.offset + length <= new_size) {
2055 btrfs_release_path(root, path);
2034 break; 2056 break;
2057 }
2035 2058
2036 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent); 2059 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
2037 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent); 2060 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
@@ -2040,8 +2063,26 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
2040 2063
2041 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid, 2064 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
2042 chunk_offset); 2065 chunk_offset);
2043 if (ret) 2066 if (ret && ret != -ENOSPC)
2044 goto done; 2067 goto done;
2068 if (ret == -ENOSPC)
2069 failed++;
2070 key.offset -= 1;
2071 }
2072
2073 if (failed && !retried) {
2074 failed = 0;
2075 retried = true;
2076 goto again;
2077 } else if (failed && retried) {
2078 ret = -ENOSPC;
2079 lock_chunks(root);
2080
2081 device->total_bytes = old_size;
2082 if (device->writeable)
2083 device->fs_devices->total_rw_bytes += diff;
2084 unlock_chunks(root);
2085 goto done;
2045 } 2086 }
2046 2087
2047 /* Shrinking succeeded, else we would be at "done". */ 2088 /* Shrinking succeeded, else we would be at "done". */
@@ -2294,9 +2335,9 @@ again:
2294 em->block_len = em->len; 2335 em->block_len = em->len;
2295 2336
2296 em_tree = &extent_root->fs_info->mapping_tree.map_tree; 2337 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
2297 spin_lock(&em_tree->lock); 2338 write_lock(&em_tree->lock);
2298 ret = add_extent_mapping(em_tree, em); 2339 ret = add_extent_mapping(em_tree, em);
2299 spin_unlock(&em_tree->lock); 2340 write_unlock(&em_tree->lock);
2300 BUG_ON(ret); 2341 BUG_ON(ret);
2301 free_extent_map(em); 2342 free_extent_map(em);
2302 2343
@@ -2491,9 +2532,9 @@ int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
2491 int readonly = 0; 2532 int readonly = 0;
2492 int i; 2533 int i;
2493 2534
2494 spin_lock(&map_tree->map_tree.lock); 2535 read_lock(&map_tree->map_tree.lock);
2495 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1); 2536 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
2496 spin_unlock(&map_tree->map_tree.lock); 2537 read_unlock(&map_tree->map_tree.lock);
2497 if (!em) 2538 if (!em)
2498 return 1; 2539 return 1;
2499 2540
@@ -2518,11 +2559,11 @@ void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
2518 struct extent_map *em; 2559 struct extent_map *em;
2519 2560
2520 while (1) { 2561 while (1) {
2521 spin_lock(&tree->map_tree.lock); 2562 write_lock(&tree->map_tree.lock);
2522 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1); 2563 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
2523 if (em) 2564 if (em)
2524 remove_extent_mapping(&tree->map_tree, em); 2565 remove_extent_mapping(&tree->map_tree, em);
2525 spin_unlock(&tree->map_tree.lock); 2566 write_unlock(&tree->map_tree.lock);
2526 if (!em) 2567 if (!em)
2527 break; 2568 break;
2528 kfree(em->bdev); 2569 kfree(em->bdev);
@@ -2540,9 +2581,9 @@ int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
2540 struct extent_map_tree *em_tree = &map_tree->map_tree; 2581 struct extent_map_tree *em_tree = &map_tree->map_tree;
2541 int ret; 2582 int ret;
2542 2583
2543 spin_lock(&em_tree->lock); 2584 read_lock(&em_tree->lock);
2544 em = lookup_extent_mapping(em_tree, logical, len); 2585 em = lookup_extent_mapping(em_tree, logical, len);
2545 spin_unlock(&em_tree->lock); 2586 read_unlock(&em_tree->lock);
2546 BUG_ON(!em); 2587 BUG_ON(!em);
2547 2588
2548 BUG_ON(em->start > logical || em->start + em->len < logical); 2589 BUG_ON(em->start > logical || em->start + em->len < logical);
@@ -2604,9 +2645,9 @@ again:
2604 atomic_set(&multi->error, 0); 2645 atomic_set(&multi->error, 0);
2605 } 2646 }
2606 2647
2607 spin_lock(&em_tree->lock); 2648 read_lock(&em_tree->lock);
2608 em = lookup_extent_mapping(em_tree, logical, *length); 2649 em = lookup_extent_mapping(em_tree, logical, *length);
2609 spin_unlock(&em_tree->lock); 2650 read_unlock(&em_tree->lock);
2610 2651
2611 if (!em && unplug_page) 2652 if (!em && unplug_page)
2612 return 0; 2653 return 0;
@@ -2763,9 +2804,9 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
2763 u64 stripe_nr; 2804 u64 stripe_nr;
2764 int i, j, nr = 0; 2805 int i, j, nr = 0;
2765 2806
2766 spin_lock(&em_tree->lock); 2807 read_lock(&em_tree->lock);
2767 em = lookup_extent_mapping(em_tree, chunk_start, 1); 2808 em = lookup_extent_mapping(em_tree, chunk_start, 1);
2768 spin_unlock(&em_tree->lock); 2809 read_unlock(&em_tree->lock);
2769 2810
2770 BUG_ON(!em || em->start != chunk_start); 2811 BUG_ON(!em || em->start != chunk_start);
2771 map = (struct map_lookup *)em->bdev; 2812 map = (struct map_lookup *)em->bdev;
@@ -2903,7 +2944,7 @@ static noinline int schedule_bio(struct btrfs_root *root,
2903 bio->bi_rw |= rw; 2944 bio->bi_rw |= rw;
2904 2945
2905 spin_lock(&device->io_lock); 2946 spin_lock(&device->io_lock);
2906 if (bio_sync(bio)) 2947 if (bio_rw_flagged(bio, BIO_RW_SYNCIO))
2907 pending_bios = &device->pending_sync_bios; 2948 pending_bios = &device->pending_sync_bios;
2908 else 2949 else
2909 pending_bios = &device->pending_bios; 2950 pending_bios = &device->pending_bios;
@@ -3053,9 +3094,9 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
3053 logical = key->offset; 3094 logical = key->offset;
3054 length = btrfs_chunk_length(leaf, chunk); 3095 length = btrfs_chunk_length(leaf, chunk);
3055 3096
3056 spin_lock(&map_tree->map_tree.lock); 3097 read_lock(&map_tree->map_tree.lock);
3057 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1); 3098 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
3058 spin_unlock(&map_tree->map_tree.lock); 3099 read_unlock(&map_tree->map_tree.lock);
3059 3100
3060 /* already mapped? */ 3101 /* already mapped? */
3061 if (em && em->start <= logical && em->start + em->len > logical) { 3102 if (em && em->start <= logical && em->start + em->len > logical) {
@@ -3114,9 +3155,9 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
3114 map->stripes[i].dev->in_fs_metadata = 1; 3155 map->stripes[i].dev->in_fs_metadata = 1;
3115 } 3156 }
3116 3157
3117 spin_lock(&map_tree->map_tree.lock); 3158 write_lock(&map_tree->map_tree.lock);
3118 ret = add_extent_mapping(&map_tree->map_tree, em); 3159 ret = add_extent_mapping(&map_tree->map_tree, em);
3119 spin_unlock(&map_tree->map_tree.lock); 3160 write_unlock(&map_tree->map_tree.lock);
3120 BUG_ON(ret); 3161 BUG_ON(ret);
3121 free_extent_map(em); 3162 free_extent_map(em);
3122 3163