aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/volumes.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/volumes.c')
-rw-r--r--fs/btrfs/volumes.c91
1 files changed, 53 insertions, 38 deletions
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 41ecbb2347f2..dd318ff280b2 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -17,6 +17,7 @@
17 */ 17 */
18#include <linux/sched.h> 18#include <linux/sched.h>
19#include <linux/bio.h> 19#include <linux/bio.h>
20#include <linux/slab.h>
20#include <linux/buffer_head.h> 21#include <linux/buffer_head.h>
21#include <linux/blkdev.h> 22#include <linux/blkdev.h>
22#include <linux/random.h> 23#include <linux/random.h>
@@ -256,13 +257,13 @@ loop_lock:
256 wake_up(&fs_info->async_submit_wait); 257 wake_up(&fs_info->async_submit_wait);
257 258
258 BUG_ON(atomic_read(&cur->bi_cnt) == 0); 259 BUG_ON(atomic_read(&cur->bi_cnt) == 0);
259 submit_bio(cur->bi_rw, cur);
260 num_run++;
261 batch_run++;
262 260
263 if (bio_rw_flagged(cur, BIO_RW_SYNCIO)) 261 if (cur->bi_rw & REQ_SYNC)
264 num_sync_run++; 262 num_sync_run++;
265 263
264 submit_bio(cur->bi_rw, cur);
265 num_run++;
266 batch_run++;
266 if (need_resched()) { 267 if (need_resched()) {
267 if (num_sync_run) { 268 if (num_sync_run) {
268 blk_run_backing_dev(bdi, NULL); 269 blk_run_backing_dev(bdi, NULL);
@@ -325,16 +326,6 @@ loop_lock:
325 num_sync_run = 0; 326 num_sync_run = 0;
326 blk_run_backing_dev(bdi, NULL); 327 blk_run_backing_dev(bdi, NULL);
327 } 328 }
328
329 cond_resched();
330 if (again)
331 goto loop;
332
333 spin_lock(&device->io_lock);
334 if (device->pending_bios.head || device->pending_sync_bios.head)
335 goto loop_lock;
336 spin_unlock(&device->io_lock);
337
338 /* 329 /*
339 * IO has already been through a long path to get here. Checksumming, 330 * IO has already been through a long path to get here. Checksumming,
340 * async helper threads, perhaps compression. We've done a pretty 331 * async helper threads, perhaps compression. We've done a pretty
@@ -346,6 +337,16 @@ loop_lock:
346 * cared about found its way down here. 337 * cared about found its way down here.
347 */ 338 */
348 blk_run_backing_dev(bdi, NULL); 339 blk_run_backing_dev(bdi, NULL);
340
341 cond_resched();
342 if (again)
343 goto loop;
344
345 spin_lock(&device->io_lock);
346 if (device->pending_bios.head || device->pending_sync_bios.head)
347 goto loop_lock;
348 spin_unlock(&device->io_lock);
349
349done: 350done:
350 return 0; 351 return 0;
351} 352}
@@ -365,6 +366,7 @@ static noinline int device_list_add(const char *path,
365 struct btrfs_device *device; 366 struct btrfs_device *device;
366 struct btrfs_fs_devices *fs_devices; 367 struct btrfs_fs_devices *fs_devices;
367 u64 found_transid = btrfs_super_generation(disk_super); 368 u64 found_transid = btrfs_super_generation(disk_super);
369 char *name;
368 370
369 fs_devices = find_fsid(disk_super->fsid); 371 fs_devices = find_fsid(disk_super->fsid);
370 if (!fs_devices) { 372 if (!fs_devices) {
@@ -411,6 +413,12 @@ static noinline int device_list_add(const char *path,
411 413
412 device->fs_devices = fs_devices; 414 device->fs_devices = fs_devices;
413 fs_devices->num_devices++; 415 fs_devices->num_devices++;
416 } else if (strcmp(device->name, path)) {
417 name = kstrdup(path, GFP_NOFS);
418 if (!name)
419 return -ENOMEM;
420 kfree(device->name);
421 device->name = name;
414 } 422 }
415 423
416 if (found_transid > fs_devices->latest_trans) { 424 if (found_transid > fs_devices->latest_trans) {
@@ -592,7 +600,7 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
592 goto error_close; 600 goto error_close;
593 601
594 disk_super = (struct btrfs_super_block *)bh->b_data; 602 disk_super = (struct btrfs_super_block *)bh->b_data;
595 devid = le64_to_cpu(disk_super->dev_item.devid); 603 devid = btrfs_stack_device_id(&disk_super->dev_item);
596 if (devid != device->devid) 604 if (devid != device->devid)
597 goto error_brelse; 605 goto error_brelse;
598 606
@@ -694,7 +702,7 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
694 goto error_close; 702 goto error_close;
695 } 703 }
696 disk_super = (struct btrfs_super_block *)bh->b_data; 704 disk_super = (struct btrfs_super_block *)bh->b_data;
697 devid = le64_to_cpu(disk_super->dev_item.devid); 705 devid = btrfs_stack_device_id(&disk_super->dev_item);
698 transid = btrfs_super_generation(disk_super); 706 transid = btrfs_super_generation(disk_super);
699 if (disk_super->label[0]) 707 if (disk_super->label[0])
700 printk(KERN_INFO "device label %s ", disk_super->label); 708 printk(KERN_INFO "device label %s ", disk_super->label);
@@ -1089,7 +1097,7 @@ static int btrfs_rm_dev_item(struct btrfs_root *root,
1089 if (!path) 1097 if (!path)
1090 return -ENOMEM; 1098 return -ENOMEM;
1091 1099
1092 trans = btrfs_start_transaction(root, 1); 1100 trans = btrfs_start_transaction(root, 0);
1093 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1101 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1094 key.type = BTRFS_DEV_ITEM_KEY; 1102 key.type = BTRFS_DEV_ITEM_KEY;
1095 key.offset = device->devid; 1103 key.offset = device->devid;
@@ -1187,7 +1195,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1187 goto error_close; 1195 goto error_close;
1188 } 1196 }
1189 disk_super = (struct btrfs_super_block *)bh->b_data; 1197 disk_super = (struct btrfs_super_block *)bh->b_data;
1190 devid = le64_to_cpu(disk_super->dev_item.devid); 1198 devid = btrfs_stack_device_id(&disk_super->dev_item);
1191 dev_uuid = disk_super->dev_item.uuid; 1199 dev_uuid = disk_super->dev_item.uuid;
1192 device = btrfs_find_device(root, devid, dev_uuid, 1200 device = btrfs_find_device(root, devid, dev_uuid,
1193 disk_super->fsid); 1201 disk_super->fsid);
@@ -1478,7 +1486,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1478 goto error; 1486 goto error;
1479 } 1487 }
1480 1488
1481 trans = btrfs_start_transaction(root, 1); 1489 trans = btrfs_start_transaction(root, 0);
1482 lock_chunks(root); 1490 lock_chunks(root);
1483 1491
1484 device->barriers = 1; 1492 device->barriers = 1;
@@ -1743,9 +1751,10 @@ static int btrfs_relocate_chunk(struct btrfs_root *root,
1743 1751
1744 /* step one, relocate all the extents inside this chunk */ 1752 /* step one, relocate all the extents inside this chunk */
1745 ret = btrfs_relocate_block_group(extent_root, chunk_offset); 1753 ret = btrfs_relocate_block_group(extent_root, chunk_offset);
1746 BUG_ON(ret); 1754 if (ret)
1755 return ret;
1747 1756
1748 trans = btrfs_start_transaction(root, 1); 1757 trans = btrfs_start_transaction(root, 0);
1749 BUG_ON(!trans); 1758 BUG_ON(!trans);
1750 1759
1751 lock_chunks(root); 1760 lock_chunks(root);
@@ -1917,7 +1926,7 @@ int btrfs_balance(struct btrfs_root *dev_root)
1917 break; 1926 break;
1918 BUG_ON(ret); 1927 BUG_ON(ret);
1919 1928
1920 trans = btrfs_start_transaction(dev_root, 1); 1929 trans = btrfs_start_transaction(dev_root, 0);
1921 BUG_ON(!trans); 1930 BUG_ON(!trans);
1922 1931
1923 ret = btrfs_grow_device(trans, device, old_size); 1932 ret = btrfs_grow_device(trans, device, old_size);
@@ -2086,11 +2095,7 @@ again:
2086 } 2095 }
2087 2096
2088 /* Shrinking succeeded, else we would be at "done". */ 2097 /* Shrinking succeeded, else we would be at "done". */
2089 trans = btrfs_start_transaction(root, 1); 2098 trans = btrfs_start_transaction(root, 0);
2090 if (!trans) {
2091 ret = -ENOMEM;
2092 goto done;
2093 }
2094 lock_chunks(root); 2099 lock_chunks(root);
2095 2100
2096 device->disk_total_bytes = new_size; 2101 device->disk_total_bytes = new_size;
@@ -2191,9 +2196,9 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
2191 min_stripes = 2; 2196 min_stripes = 2;
2192 } 2197 }
2193 if (type & (BTRFS_BLOCK_GROUP_RAID1)) { 2198 if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
2194 num_stripes = min_t(u64, 2, fs_devices->rw_devices); 2199 if (fs_devices->rw_devices < 2)
2195 if (num_stripes < 2)
2196 return -ENOSPC; 2200 return -ENOSPC;
2201 num_stripes = 2;
2197 min_stripes = 2; 2202 min_stripes = 2;
2198 } 2203 }
2199 if (type & (BTRFS_BLOCK_GROUP_RAID10)) { 2204 if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
@@ -2237,8 +2242,16 @@ again:
2237 do_div(calc_size, stripe_len); 2242 do_div(calc_size, stripe_len);
2238 calc_size *= stripe_len; 2243 calc_size *= stripe_len;
2239 } 2244 }
2245
2240 /* we don't want tiny stripes */ 2246 /* we don't want tiny stripes */
2241 calc_size = max_t(u64, min_stripe_size, calc_size); 2247 if (!looped)
2248 calc_size = max_t(u64, min_stripe_size, calc_size);
2249
2250 /*
2251 * we're about to do_div by the stripe_len so lets make sure
2252 * we end up with something bigger than a stripe
2253 */
2254 calc_size = max_t(u64, calc_size, stripe_len * 4);
2242 2255
2243 do_div(calc_size, stripe_len); 2256 do_div(calc_size, stripe_len);
2244 calc_size *= stripe_len; 2257 calc_size *= stripe_len;
@@ -2638,7 +2651,7 @@ static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
2638 int max_errors = 0; 2651 int max_errors = 0;
2639 struct btrfs_multi_bio *multi = NULL; 2652 struct btrfs_multi_bio *multi = NULL;
2640 2653
2641 if (multi_ret && !(rw & (1 << BIO_RW))) 2654 if (multi_ret && !(rw & REQ_WRITE))
2642 stripes_allocated = 1; 2655 stripes_allocated = 1;
2643again: 2656again:
2644 if (multi_ret) { 2657 if (multi_ret) {
@@ -2674,7 +2687,7 @@ again:
2674 mirror_num = 0; 2687 mirror_num = 0;
2675 2688
2676 /* if our multi bio struct is too small, back off and try again */ 2689 /* if our multi bio struct is too small, back off and try again */
2677 if (rw & (1 << BIO_RW)) { 2690 if (rw & REQ_WRITE) {
2678 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 | 2691 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
2679 BTRFS_BLOCK_GROUP_DUP)) { 2692 BTRFS_BLOCK_GROUP_DUP)) {
2680 stripes_required = map->num_stripes; 2693 stripes_required = map->num_stripes;
@@ -2684,7 +2697,7 @@ again:
2684 max_errors = 1; 2697 max_errors = 1;
2685 } 2698 }
2686 } 2699 }
2687 if (multi_ret && (rw & (1 << BIO_RW)) && 2700 if (multi_ret && (rw & REQ_WRITE) &&
2688 stripes_allocated < stripes_required) { 2701 stripes_allocated < stripes_required) {
2689 stripes_allocated = map->num_stripes; 2702 stripes_allocated = map->num_stripes;
2690 free_extent_map(em); 2703 free_extent_map(em);
@@ -2720,7 +2733,7 @@ again:
2720 num_stripes = 1; 2733 num_stripes = 1;
2721 stripe_index = 0; 2734 stripe_index = 0;
2722 if (map->type & BTRFS_BLOCK_GROUP_RAID1) { 2735 if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
2723 if (unplug_page || (rw & (1 << BIO_RW))) 2736 if (unplug_page || (rw & REQ_WRITE))
2724 num_stripes = map->num_stripes; 2737 num_stripes = map->num_stripes;
2725 else if (mirror_num) 2738 else if (mirror_num)
2726 stripe_index = mirror_num - 1; 2739 stripe_index = mirror_num - 1;
@@ -2731,7 +2744,7 @@ again:
2731 } 2744 }
2732 2745
2733 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { 2746 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
2734 if (rw & (1 << BIO_RW)) 2747 if (rw & REQ_WRITE)
2735 num_stripes = map->num_stripes; 2748 num_stripes = map->num_stripes;
2736 else if (mirror_num) 2749 else if (mirror_num)
2737 stripe_index = mirror_num - 1; 2750 stripe_index = mirror_num - 1;
@@ -2742,7 +2755,7 @@ again:
2742 stripe_index = do_div(stripe_nr, factor); 2755 stripe_index = do_div(stripe_nr, factor);
2743 stripe_index *= map->sub_stripes; 2756 stripe_index *= map->sub_stripes;
2744 2757
2745 if (unplug_page || (rw & (1 << BIO_RW))) 2758 if (unplug_page || (rw & REQ_WRITE))
2746 num_stripes = map->sub_stripes; 2759 num_stripes = map->sub_stripes;
2747 else if (mirror_num) 2760 else if (mirror_num)
2748 stripe_index += mirror_num - 1; 2761 stripe_index += mirror_num - 1;
@@ -2932,7 +2945,7 @@ static noinline int schedule_bio(struct btrfs_root *root,
2932 struct btrfs_pending_bios *pending_bios; 2945 struct btrfs_pending_bios *pending_bios;
2933 2946
2934 /* don't bother with additional async steps for reads, right now */ 2947 /* don't bother with additional async steps for reads, right now */
2935 if (!(rw & (1 << BIO_RW))) { 2948 if (!(rw & REQ_WRITE)) {
2936 bio_get(bio); 2949 bio_get(bio);
2937 submit_bio(rw, bio); 2950 submit_bio(rw, bio);
2938 bio_put(bio); 2951 bio_put(bio);
@@ -2951,7 +2964,7 @@ static noinline int schedule_bio(struct btrfs_root *root,
2951 bio->bi_rw |= rw; 2964 bio->bi_rw |= rw;
2952 2965
2953 spin_lock(&device->io_lock); 2966 spin_lock(&device->io_lock);
2954 if (bio_rw_flagged(bio, BIO_RW_SYNCIO)) 2967 if (bio->bi_rw & REQ_SYNC)
2955 pending_bios = &device->pending_sync_bios; 2968 pending_bios = &device->pending_sync_bios;
2956 else 2969 else
2957 pending_bios = &device->pending_bios; 2970 pending_bios = &device->pending_bios;
@@ -3382,6 +3395,8 @@ int btrfs_read_chunk_tree(struct btrfs_root *root)
3382 key.type = 0; 3395 key.type = 0;
3383again: 3396again:
3384 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 3397 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3398 if (ret < 0)
3399 goto error;
3385 while (1) { 3400 while (1) {
3386 leaf = path->nodes[0]; 3401 leaf = path->nodes[0];
3387 slot = path->slots[0]; 3402 slot = path->slots[0];