diff options
Diffstat (limited to 'fs/btrfs/volumes.c')
-rw-r--r-- | fs/btrfs/volumes.c | 91 |
1 files changed, 11 insertions, 80 deletions
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index af7dbca15276..6e0e82a1b188 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
@@ -162,7 +162,6 @@ static noinline int run_scheduled_bios(struct btrfs_device *device) | |||
162 | struct bio *cur; | 162 | struct bio *cur; |
163 | int again = 0; | 163 | int again = 0; |
164 | unsigned long num_run; | 164 | unsigned long num_run; |
165 | unsigned long num_sync_run; | ||
166 | unsigned long batch_run = 0; | 165 | unsigned long batch_run = 0; |
167 | unsigned long limit; | 166 | unsigned long limit; |
168 | unsigned long last_waited = 0; | 167 | unsigned long last_waited = 0; |
@@ -173,11 +172,6 @@ static noinline int run_scheduled_bios(struct btrfs_device *device) | |||
173 | limit = btrfs_async_submit_limit(fs_info); | 172 | limit = btrfs_async_submit_limit(fs_info); |
174 | limit = limit * 2 / 3; | 173 | limit = limit * 2 / 3; |
175 | 174 | ||
176 | /* we want to make sure that every time we switch from the sync | ||
177 | * list to the normal list, we unplug | ||
178 | */ | ||
179 | num_sync_run = 0; | ||
180 | |||
181 | loop: | 175 | loop: |
182 | spin_lock(&device->io_lock); | 176 | spin_lock(&device->io_lock); |
183 | 177 | ||
@@ -223,15 +217,6 @@ loop_lock: | |||
223 | 217 | ||
224 | spin_unlock(&device->io_lock); | 218 | spin_unlock(&device->io_lock); |
225 | 219 | ||
226 | /* | ||
227 | * if we're doing the regular priority list, make sure we unplug | ||
228 | * for any high prio bios we've sent down | ||
229 | */ | ||
230 | if (pending_bios == &device->pending_bios && num_sync_run > 0) { | ||
231 | num_sync_run = 0; | ||
232 | blk_run_backing_dev(bdi, NULL); | ||
233 | } | ||
234 | |||
235 | while (pending) { | 220 | while (pending) { |
236 | 221 | ||
237 | rmb(); | 222 | rmb(); |
@@ -259,19 +244,11 @@ loop_lock: | |||
259 | 244 | ||
260 | BUG_ON(atomic_read(&cur->bi_cnt) == 0); | 245 | BUG_ON(atomic_read(&cur->bi_cnt) == 0); |
261 | 246 | ||
262 | if (cur->bi_rw & REQ_SYNC) | ||
263 | num_sync_run++; | ||
264 | |||
265 | submit_bio(cur->bi_rw, cur); | 247 | submit_bio(cur->bi_rw, cur); |
266 | num_run++; | 248 | num_run++; |
267 | batch_run++; | 249 | batch_run++; |
268 | if (need_resched()) { | 250 | if (need_resched()) |
269 | if (num_sync_run) { | ||
270 | blk_run_backing_dev(bdi, NULL); | ||
271 | num_sync_run = 0; | ||
272 | } | ||
273 | cond_resched(); | 251 | cond_resched(); |
274 | } | ||
275 | 252 | ||
276 | /* | 253 | /* |
277 | * we made progress, there is more work to do and the bdi | 254 | * we made progress, there is more work to do and the bdi |
@@ -304,13 +281,8 @@ loop_lock: | |||
304 | * against it before looping | 281 | * against it before looping |
305 | */ | 282 | */ |
306 | last_waited = ioc->last_waited; | 283 | last_waited = ioc->last_waited; |
307 | if (need_resched()) { | 284 | if (need_resched()) |
308 | if (num_sync_run) { | ||
309 | blk_run_backing_dev(bdi, NULL); | ||
310 | num_sync_run = 0; | ||
311 | } | ||
312 | cond_resched(); | 285 | cond_resched(); |
313 | } | ||
314 | continue; | 286 | continue; |
315 | } | 287 | } |
316 | spin_lock(&device->io_lock); | 288 | spin_lock(&device->io_lock); |
@@ -323,22 +295,6 @@ loop_lock: | |||
323 | } | 295 | } |
324 | } | 296 | } |
325 | 297 | ||
326 | if (num_sync_run) { | ||
327 | num_sync_run = 0; | ||
328 | blk_run_backing_dev(bdi, NULL); | ||
329 | } | ||
330 | /* | ||
331 | * IO has already been through a long path to get here. Checksumming, | ||
332 | * async helper threads, perhaps compression. We've done a pretty | ||
333 | * good job of collecting a batch of IO and should just unplug | ||
334 | * the device right away. | ||
335 | * | ||
336 | * This will help anyone who is waiting on the IO, they might have | ||
337 | * already unplugged, but managed to do so before the bio they | ||
338 | * cared about found its way down here. | ||
339 | */ | ||
340 | blk_run_backing_dev(bdi, NULL); | ||
341 | |||
342 | cond_resched(); | 298 | cond_resched(); |
343 | if (again) | 299 | if (again) |
344 | goto loop; | 300 | goto loop; |
@@ -2948,7 +2904,7 @@ static int find_live_mirror(struct map_lookup *map, int first, int num, | |||
2948 | static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw, | 2904 | static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw, |
2949 | u64 logical, u64 *length, | 2905 | u64 logical, u64 *length, |
2950 | struct btrfs_multi_bio **multi_ret, | 2906 | struct btrfs_multi_bio **multi_ret, |
2951 | int mirror_num, struct page *unplug_page) | 2907 | int mirror_num) |
2952 | { | 2908 | { |
2953 | struct extent_map *em; | 2909 | struct extent_map *em; |
2954 | struct map_lookup *map; | 2910 | struct map_lookup *map; |
@@ -2980,11 +2936,6 @@ again: | |||
2980 | em = lookup_extent_mapping(em_tree, logical, *length); | 2936 | em = lookup_extent_mapping(em_tree, logical, *length); |
2981 | read_unlock(&em_tree->lock); | 2937 | read_unlock(&em_tree->lock); |
2982 | 2938 | ||
2983 | if (!em && unplug_page) { | ||
2984 | kfree(multi); | ||
2985 | return 0; | ||
2986 | } | ||
2987 | |||
2988 | if (!em) { | 2939 | if (!em) { |
2989 | printk(KERN_CRIT "unable to find logical %llu len %llu\n", | 2940 | printk(KERN_CRIT "unable to find logical %llu len %llu\n", |
2990 | (unsigned long long)logical, | 2941 | (unsigned long long)logical, |
@@ -3040,13 +2991,13 @@ again: | |||
3040 | *length = em->len - offset; | 2991 | *length = em->len - offset; |
3041 | } | 2992 | } |
3042 | 2993 | ||
3043 | if (!multi_ret && !unplug_page) | 2994 | if (!multi_ret) |
3044 | goto out; | 2995 | goto out; |
3045 | 2996 | ||
3046 | num_stripes = 1; | 2997 | num_stripes = 1; |
3047 | stripe_index = 0; | 2998 | stripe_index = 0; |
3048 | if (map->type & BTRFS_BLOCK_GROUP_RAID1) { | 2999 | if (map->type & BTRFS_BLOCK_GROUP_RAID1) { |
3049 | if (unplug_page || (rw & REQ_WRITE)) | 3000 | if (rw & REQ_WRITE) |
3050 | num_stripes = map->num_stripes; | 3001 | num_stripes = map->num_stripes; |
3051 | else if (mirror_num) | 3002 | else if (mirror_num) |
3052 | stripe_index = mirror_num - 1; | 3003 | stripe_index = mirror_num - 1; |
@@ -3068,7 +3019,7 @@ again: | |||
3068 | stripe_index = do_div(stripe_nr, factor); | 3019 | stripe_index = do_div(stripe_nr, factor); |
3069 | stripe_index *= map->sub_stripes; | 3020 | stripe_index *= map->sub_stripes; |
3070 | 3021 | ||
3071 | if (unplug_page || (rw & REQ_WRITE)) | 3022 | if (rw & REQ_WRITE) |
3072 | num_stripes = map->sub_stripes; | 3023 | num_stripes = map->sub_stripes; |
3073 | else if (mirror_num) | 3024 | else if (mirror_num) |
3074 | stripe_index += mirror_num - 1; | 3025 | stripe_index += mirror_num - 1; |
@@ -3088,22 +3039,10 @@ again: | |||
3088 | BUG_ON(stripe_index >= map->num_stripes); | 3039 | BUG_ON(stripe_index >= map->num_stripes); |
3089 | 3040 | ||
3090 | for (i = 0; i < num_stripes; i++) { | 3041 | for (i = 0; i < num_stripes; i++) { |
3091 | if (unplug_page) { | 3042 | multi->stripes[i].physical = |
3092 | struct btrfs_device *device; | 3043 | map->stripes[stripe_index].physical + |
3093 | struct backing_dev_info *bdi; | 3044 | stripe_offset + stripe_nr * map->stripe_len; |
3094 | 3045 | multi->stripes[i].dev = map->stripes[stripe_index].dev; | |
3095 | device = map->stripes[stripe_index].dev; | ||
3096 | if (device->bdev) { | ||
3097 | bdi = blk_get_backing_dev_info(device->bdev); | ||
3098 | if (bdi->unplug_io_fn) | ||
3099 | bdi->unplug_io_fn(bdi, unplug_page); | ||
3100 | } | ||
3101 | } else { | ||
3102 | multi->stripes[i].physical = | ||
3103 | map->stripes[stripe_index].physical + | ||
3104 | stripe_offset + stripe_nr * map->stripe_len; | ||
3105 | multi->stripes[i].dev = map->stripes[stripe_index].dev; | ||
3106 | } | ||
3107 | stripe_index++; | 3046 | stripe_index++; |
3108 | } | 3047 | } |
3109 | if (multi_ret) { | 3048 | if (multi_ret) { |
@@ -3121,7 +3060,7 @@ int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw, | |||
3121 | struct btrfs_multi_bio **multi_ret, int mirror_num) | 3060 | struct btrfs_multi_bio **multi_ret, int mirror_num) |
3122 | { | 3061 | { |
3123 | return __btrfs_map_block(map_tree, rw, logical, length, multi_ret, | 3062 | return __btrfs_map_block(map_tree, rw, logical, length, multi_ret, |
3124 | mirror_num, NULL); | 3063 | mirror_num); |
3125 | } | 3064 | } |
3126 | 3065 | ||
3127 | int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, | 3066 | int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, |
@@ -3189,14 +3128,6 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, | |||
3189 | return 0; | 3128 | return 0; |
3190 | } | 3129 | } |
3191 | 3130 | ||
3192 | int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree, | ||
3193 | u64 logical, struct page *page) | ||
3194 | { | ||
3195 | u64 length = PAGE_CACHE_SIZE; | ||
3196 | return __btrfs_map_block(map_tree, READ, logical, &length, | ||
3197 | NULL, 0, page); | ||
3198 | } | ||
3199 | |||
3200 | static void end_bio_multi_stripe(struct bio *bio, int err) | 3131 | static void end_bio_multi_stripe(struct bio *bio, int err) |
3201 | { | 3132 | { |
3202 | struct btrfs_multi_bio *multi = bio->bi_private; | 3133 | struct btrfs_multi_bio *multi = bio->bi_private; |