aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/volumes.c
diff options
context:
space:
mode:
authorJens Axboe <jaxboe@fusionio.com>2011-03-10 02:58:35 -0500
committerJens Axboe <jaxboe@fusionio.com>2011-03-10 02:58:35 -0500
commit4c63f5646e405b5010cc9499419060bf2e838f5b (patch)
treedf91ba315032c8ec4aafeb3ab96fdfa7c6c656e1 /fs/btrfs/volumes.c
parentcafb0bfca1a73efd6d8a4a6a6a716e6134b96c24 (diff)
parent69d60eb96ae8a73cf9b79cf28051caf973006011 (diff)
Merge branch 'for-2.6.39/stack-plug' into for-2.6.39/core
Conflicts: block/blk-core.c block/blk-flush.c drivers/md/raid1.c drivers/md/raid10.c drivers/md/raid5.c fs/nilfs2/btnode.c fs/nilfs2/mdt.c Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'fs/btrfs/volumes.c')
-rw-r--r--fs/btrfs/volumes.c91
1 files changed, 11 insertions, 80 deletions
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index dd13eb81ee40..9d554e8e6583 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -162,7 +162,6 @@ static noinline int run_scheduled_bios(struct btrfs_device *device)
162 struct bio *cur; 162 struct bio *cur;
163 int again = 0; 163 int again = 0;
164 unsigned long num_run; 164 unsigned long num_run;
165 unsigned long num_sync_run;
166 unsigned long batch_run = 0; 165 unsigned long batch_run = 0;
167 unsigned long limit; 166 unsigned long limit;
168 unsigned long last_waited = 0; 167 unsigned long last_waited = 0;
@@ -173,11 +172,6 @@ static noinline int run_scheduled_bios(struct btrfs_device *device)
173 limit = btrfs_async_submit_limit(fs_info); 172 limit = btrfs_async_submit_limit(fs_info);
174 limit = limit * 2 / 3; 173 limit = limit * 2 / 3;
175 174
176 /* we want to make sure that every time we switch from the sync
177 * list to the normal list, we unplug
178 */
179 num_sync_run = 0;
180
181loop: 175loop:
182 spin_lock(&device->io_lock); 176 spin_lock(&device->io_lock);
183 177
@@ -223,15 +217,6 @@ loop_lock:
223 217
224 spin_unlock(&device->io_lock); 218 spin_unlock(&device->io_lock);
225 219
226 /*
227 * if we're doing the regular priority list, make sure we unplug
228 * for any high prio bios we've sent down
229 */
230 if (pending_bios == &device->pending_bios && num_sync_run > 0) {
231 num_sync_run = 0;
232 blk_run_backing_dev(bdi, NULL);
233 }
234
235 while (pending) { 220 while (pending) {
236 221
237 rmb(); 222 rmb();
@@ -259,19 +244,11 @@ loop_lock:
259 244
260 BUG_ON(atomic_read(&cur->bi_cnt) == 0); 245 BUG_ON(atomic_read(&cur->bi_cnt) == 0);
261 246
262 if (cur->bi_rw & REQ_SYNC)
263 num_sync_run++;
264
265 submit_bio(cur->bi_rw, cur); 247 submit_bio(cur->bi_rw, cur);
266 num_run++; 248 num_run++;
267 batch_run++; 249 batch_run++;
268 if (need_resched()) { 250 if (need_resched())
269 if (num_sync_run) {
270 blk_run_backing_dev(bdi, NULL);
271 num_sync_run = 0;
272 }
273 cond_resched(); 251 cond_resched();
274 }
275 252
276 /* 253 /*
277 * we made progress, there is more work to do and the bdi 254 * we made progress, there is more work to do and the bdi
@@ -304,13 +281,8 @@ loop_lock:
304 * against it before looping 281 * against it before looping
305 */ 282 */
306 last_waited = ioc->last_waited; 283 last_waited = ioc->last_waited;
307 if (need_resched()) { 284 if (need_resched())
308 if (num_sync_run) {
309 blk_run_backing_dev(bdi, NULL);
310 num_sync_run = 0;
311 }
312 cond_resched(); 285 cond_resched();
313 }
314 continue; 286 continue;
315 } 287 }
316 spin_lock(&device->io_lock); 288 spin_lock(&device->io_lock);
@@ -323,22 +295,6 @@ loop_lock:
323 } 295 }
324 } 296 }
325 297
326 if (num_sync_run) {
327 num_sync_run = 0;
328 blk_run_backing_dev(bdi, NULL);
329 }
330 /*
331 * IO has already been through a long path to get here. Checksumming,
332 * async helper threads, perhaps compression. We've done a pretty
333 * good job of collecting a batch of IO and should just unplug
334 * the device right away.
335 *
336 * This will help anyone who is waiting on the IO, they might have
337 * already unplugged, but managed to do so before the bio they
338 * cared about found its way down here.
339 */
340 blk_run_backing_dev(bdi, NULL);
341
342 cond_resched(); 298 cond_resched();
343 if (again) 299 if (again)
344 goto loop; 300 goto loop;
@@ -2955,7 +2911,7 @@ static int find_live_mirror(struct map_lookup *map, int first, int num,
2955static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw, 2911static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
2956 u64 logical, u64 *length, 2912 u64 logical, u64 *length,
2957 struct btrfs_multi_bio **multi_ret, 2913 struct btrfs_multi_bio **multi_ret,
2958 int mirror_num, struct page *unplug_page) 2914 int mirror_num)
2959{ 2915{
2960 struct extent_map *em; 2916 struct extent_map *em;
2961 struct map_lookup *map; 2917 struct map_lookup *map;
@@ -2987,11 +2943,6 @@ again:
2987 em = lookup_extent_mapping(em_tree, logical, *length); 2943 em = lookup_extent_mapping(em_tree, logical, *length);
2988 read_unlock(&em_tree->lock); 2944 read_unlock(&em_tree->lock);
2989 2945
2990 if (!em && unplug_page) {
2991 kfree(multi);
2992 return 0;
2993 }
2994
2995 if (!em) { 2946 if (!em) {
2996 printk(KERN_CRIT "unable to find logical %llu len %llu\n", 2947 printk(KERN_CRIT "unable to find logical %llu len %llu\n",
2997 (unsigned long long)logical, 2948 (unsigned long long)logical,
@@ -3047,13 +2998,13 @@ again:
3047 *length = em->len - offset; 2998 *length = em->len - offset;
3048 } 2999 }
3049 3000
3050 if (!multi_ret && !unplug_page) 3001 if (!multi_ret)
3051 goto out; 3002 goto out;
3052 3003
3053 num_stripes = 1; 3004 num_stripes = 1;
3054 stripe_index = 0; 3005 stripe_index = 0;
3055 if (map->type & BTRFS_BLOCK_GROUP_RAID1) { 3006 if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
3056 if (unplug_page || (rw & REQ_WRITE)) 3007 if (rw & REQ_WRITE)
3057 num_stripes = map->num_stripes; 3008 num_stripes = map->num_stripes;
3058 else if (mirror_num) 3009 else if (mirror_num)
3059 stripe_index = mirror_num - 1; 3010 stripe_index = mirror_num - 1;
@@ -3075,7 +3026,7 @@ again:
3075 stripe_index = do_div(stripe_nr, factor); 3026 stripe_index = do_div(stripe_nr, factor);
3076 stripe_index *= map->sub_stripes; 3027 stripe_index *= map->sub_stripes;
3077 3028
3078 if (unplug_page || (rw & REQ_WRITE)) 3029 if (rw & REQ_WRITE)
3079 num_stripes = map->sub_stripes; 3030 num_stripes = map->sub_stripes;
3080 else if (mirror_num) 3031 else if (mirror_num)
3081 stripe_index += mirror_num - 1; 3032 stripe_index += mirror_num - 1;
@@ -3095,22 +3046,10 @@ again:
3095 BUG_ON(stripe_index >= map->num_stripes); 3046 BUG_ON(stripe_index >= map->num_stripes);
3096 3047
3097 for (i = 0; i < num_stripes; i++) { 3048 for (i = 0; i < num_stripes; i++) {
3098 if (unplug_page) { 3049 multi->stripes[i].physical =
3099 struct btrfs_device *device; 3050 map->stripes[stripe_index].physical +
3100 struct backing_dev_info *bdi; 3051 stripe_offset + stripe_nr * map->stripe_len;
3101 3052 multi->stripes[i].dev = map->stripes[stripe_index].dev;
3102 device = map->stripes[stripe_index].dev;
3103 if (device->bdev) {
3104 bdi = blk_get_backing_dev_info(device->bdev);
3105 if (bdi->unplug_io_fn)
3106 bdi->unplug_io_fn(bdi, unplug_page);
3107 }
3108 } else {
3109 multi->stripes[i].physical =
3110 map->stripes[stripe_index].physical +
3111 stripe_offset + stripe_nr * map->stripe_len;
3112 multi->stripes[i].dev = map->stripes[stripe_index].dev;
3113 }
3114 stripe_index++; 3053 stripe_index++;
3115 } 3054 }
3116 if (multi_ret) { 3055 if (multi_ret) {
@@ -3128,7 +3067,7 @@ int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
3128 struct btrfs_multi_bio **multi_ret, int mirror_num) 3067 struct btrfs_multi_bio **multi_ret, int mirror_num)
3129{ 3068{
3130 return __btrfs_map_block(map_tree, rw, logical, length, multi_ret, 3069 return __btrfs_map_block(map_tree, rw, logical, length, multi_ret,
3131 mirror_num, NULL); 3070 mirror_num);
3132} 3071}
3133 3072
3134int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, 3073int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
@@ -3196,14 +3135,6 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
3196 return 0; 3135 return 0;
3197} 3136}
3198 3137
3199int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree,
3200 u64 logical, struct page *page)
3201{
3202 u64 length = PAGE_CACHE_SIZE;
3203 return __btrfs_map_block(map_tree, READ, logical, &length,
3204 NULL, 0, page);
3205}
3206
3207static void end_bio_multi_stripe(struct bio *bio, int err) 3138static void end_bio_multi_stripe(struct bio *bio, int err)
3208{ 3139{
3209 struct btrfs_multi_bio *multi = bio->bi_private; 3140 struct btrfs_multi_bio *multi = bio->bi_private;