aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/disk-io.c79
-rw-r--r--fs/btrfs/inode.c1
-rw-r--r--fs/btrfs/volumes.c91
3 files changed, 11 insertions, 160 deletions
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index e1aa8d607bc7..ada1f6bd0a57 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -847,7 +847,6 @@ static const struct address_space_operations btree_aops = {
847 .writepages = btree_writepages, 847 .writepages = btree_writepages,
848 .releasepage = btree_releasepage, 848 .releasepage = btree_releasepage,
849 .invalidatepage = btree_invalidatepage, 849 .invalidatepage = btree_invalidatepage,
850 .sync_page = block_sync_page,
851#ifdef CONFIG_MIGRATION 850#ifdef CONFIG_MIGRATION
852 .migratepage = btree_migratepage, 851 .migratepage = btree_migratepage,
853#endif 852#endif
@@ -1331,82 +1330,6 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1331} 1330}
1332 1331
1333/* 1332/*
1334 * this unplugs every device on the box, and it is only used when page
1335 * is null
1336 */
1337static void __unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
1338{
1339 struct btrfs_device *device;
1340 struct btrfs_fs_info *info;
1341
1342 info = (struct btrfs_fs_info *)bdi->unplug_io_data;
1343 list_for_each_entry(device, &info->fs_devices->devices, dev_list) {
1344 if (!device->bdev)
1345 continue;
1346
1347 bdi = blk_get_backing_dev_info(device->bdev);
1348 if (bdi->unplug_io_fn)
1349 bdi->unplug_io_fn(bdi, page);
1350 }
1351}
1352
1353static void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
1354{
1355 struct inode *inode;
1356 struct extent_map_tree *em_tree;
1357 struct extent_map *em;
1358 struct address_space *mapping;
1359 u64 offset;
1360
1361 /* the generic O_DIRECT read code does this */
1362 if (1 || !page) {
1363 __unplug_io_fn(bdi, page);
1364 return;
1365 }
1366
1367 /*
1368 * page->mapping may change at any time. Get a consistent copy
1369 * and use that for everything below
1370 */
1371 smp_mb();
1372 mapping = page->mapping;
1373 if (!mapping)
1374 return;
1375
1376 inode = mapping->host;
1377
1378 /*
1379 * don't do the expensive searching for a small number of
1380 * devices
1381 */
1382 if (BTRFS_I(inode)->root->fs_info->fs_devices->open_devices <= 2) {
1383 __unplug_io_fn(bdi, page);
1384 return;
1385 }
1386
1387 offset = page_offset(page);
1388
1389 em_tree = &BTRFS_I(inode)->extent_tree;
1390 read_lock(&em_tree->lock);
1391 em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE);
1392 read_unlock(&em_tree->lock);
1393 if (!em) {
1394 __unplug_io_fn(bdi, page);
1395 return;
1396 }
1397
1398 if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
1399 free_extent_map(em);
1400 __unplug_io_fn(bdi, page);
1401 return;
1402 }
1403 offset = offset - em->start;
1404 btrfs_unplug_page(&BTRFS_I(inode)->root->fs_info->mapping_tree,
1405 em->block_start + offset, page);
1406 free_extent_map(em);
1407}
1408
1409/*
1410 * If this fails, caller must call bdi_destroy() to get rid of the 1333 * If this fails, caller must call bdi_destroy() to get rid of the
1411 * bdi again. 1334 * bdi again.
1412 */ 1335 */
@@ -1420,8 +1343,6 @@ static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
1420 return err; 1343 return err;
1421 1344
1422 bdi->ra_pages = default_backing_dev_info.ra_pages; 1345 bdi->ra_pages = default_backing_dev_info.ra_pages;
1423 bdi->unplug_io_fn = btrfs_unplug_io_fn;
1424 bdi->unplug_io_data = info;
1425 bdi->congested_fn = btrfs_congested_fn; 1346 bdi->congested_fn = btrfs_congested_fn;
1426 bdi->congested_data = info; 1347 bdi->congested_data = info;
1427 return 0; 1348 return 0;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index fb9bd7832b6d..462e08e724b0 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -7218,7 +7218,6 @@ static const struct address_space_operations btrfs_aops = {
7218 .writepage = btrfs_writepage, 7218 .writepage = btrfs_writepage,
7219 .writepages = btrfs_writepages, 7219 .writepages = btrfs_writepages,
7220 .readpages = btrfs_readpages, 7220 .readpages = btrfs_readpages,
7221 .sync_page = block_sync_page,
7222 .direct_IO = btrfs_direct_IO, 7221 .direct_IO = btrfs_direct_IO,
7223 .invalidatepage = btrfs_invalidatepage, 7222 .invalidatepage = btrfs_invalidatepage,
7224 .releasepage = btrfs_releasepage, 7223 .releasepage = btrfs_releasepage,
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index af7dbca15276..6e0e82a1b188 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -162,7 +162,6 @@ static noinline int run_scheduled_bios(struct btrfs_device *device)
162 struct bio *cur; 162 struct bio *cur;
163 int again = 0; 163 int again = 0;
164 unsigned long num_run; 164 unsigned long num_run;
165 unsigned long num_sync_run;
166 unsigned long batch_run = 0; 165 unsigned long batch_run = 0;
167 unsigned long limit; 166 unsigned long limit;
168 unsigned long last_waited = 0; 167 unsigned long last_waited = 0;
@@ -173,11 +172,6 @@ static noinline int run_scheduled_bios(struct btrfs_device *device)
173 limit = btrfs_async_submit_limit(fs_info); 172 limit = btrfs_async_submit_limit(fs_info);
174 limit = limit * 2 / 3; 173 limit = limit * 2 / 3;
175 174
176 /* we want to make sure that every time we switch from the sync
177 * list to the normal list, we unplug
178 */
179 num_sync_run = 0;
180
181loop: 175loop:
182 spin_lock(&device->io_lock); 176 spin_lock(&device->io_lock);
183 177
@@ -223,15 +217,6 @@ loop_lock:
223 217
224 spin_unlock(&device->io_lock); 218 spin_unlock(&device->io_lock);
225 219
226 /*
227 * if we're doing the regular priority list, make sure we unplug
228 * for any high prio bios we've sent down
229 */
230 if (pending_bios == &device->pending_bios && num_sync_run > 0) {
231 num_sync_run = 0;
232 blk_run_backing_dev(bdi, NULL);
233 }
234
235 while (pending) { 220 while (pending) {
236 221
237 rmb(); 222 rmb();
@@ -259,19 +244,11 @@ loop_lock:
259 244
260 BUG_ON(atomic_read(&cur->bi_cnt) == 0); 245 BUG_ON(atomic_read(&cur->bi_cnt) == 0);
261 246
262 if (cur->bi_rw & REQ_SYNC)
263 num_sync_run++;
264
265 submit_bio(cur->bi_rw, cur); 247 submit_bio(cur->bi_rw, cur);
266 num_run++; 248 num_run++;
267 batch_run++; 249 batch_run++;
268 if (need_resched()) { 250 if (need_resched())
269 if (num_sync_run) {
270 blk_run_backing_dev(bdi, NULL);
271 num_sync_run = 0;
272 }
273 cond_resched(); 251 cond_resched();
274 }
275 252
276 /* 253 /*
277 * we made progress, there is more work to do and the bdi 254 * we made progress, there is more work to do and the bdi
@@ -304,13 +281,8 @@ loop_lock:
304 * against it before looping 281 * against it before looping
305 */ 282 */
306 last_waited = ioc->last_waited; 283 last_waited = ioc->last_waited;
307 if (need_resched()) { 284 if (need_resched())
308 if (num_sync_run) {
309 blk_run_backing_dev(bdi, NULL);
310 num_sync_run = 0;
311 }
312 cond_resched(); 285 cond_resched();
313 }
314 continue; 286 continue;
315 } 287 }
316 spin_lock(&device->io_lock); 288 spin_lock(&device->io_lock);
@@ -323,22 +295,6 @@ loop_lock:
323 } 295 }
324 } 296 }
325 297
326 if (num_sync_run) {
327 num_sync_run = 0;
328 blk_run_backing_dev(bdi, NULL);
329 }
330 /*
331 * IO has already been through a long path to get here. Checksumming,
332 * async helper threads, perhaps compression. We've done a pretty
333 * good job of collecting a batch of IO and should just unplug
334 * the device right away.
335 *
336 * This will help anyone who is waiting on the IO, they might have
337 * already unplugged, but managed to do so before the bio they
338 * cared about found its way down here.
339 */
340 blk_run_backing_dev(bdi, NULL);
341
342 cond_resched(); 298 cond_resched();
343 if (again) 299 if (again)
344 goto loop; 300 goto loop;
@@ -2948,7 +2904,7 @@ static int find_live_mirror(struct map_lookup *map, int first, int num,
2948static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw, 2904static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
2949 u64 logical, u64 *length, 2905 u64 logical, u64 *length,
2950 struct btrfs_multi_bio **multi_ret, 2906 struct btrfs_multi_bio **multi_ret,
2951 int mirror_num, struct page *unplug_page) 2907 int mirror_num)
2952{ 2908{
2953 struct extent_map *em; 2909 struct extent_map *em;
2954 struct map_lookup *map; 2910 struct map_lookup *map;
@@ -2980,11 +2936,6 @@ again:
2980 em = lookup_extent_mapping(em_tree, logical, *length); 2936 em = lookup_extent_mapping(em_tree, logical, *length);
2981 read_unlock(&em_tree->lock); 2937 read_unlock(&em_tree->lock);
2982 2938
2983 if (!em && unplug_page) {
2984 kfree(multi);
2985 return 0;
2986 }
2987
2988 if (!em) { 2939 if (!em) {
2989 printk(KERN_CRIT "unable to find logical %llu len %llu\n", 2940 printk(KERN_CRIT "unable to find logical %llu len %llu\n",
2990 (unsigned long long)logical, 2941 (unsigned long long)logical,
@@ -3040,13 +2991,13 @@ again:
3040 *length = em->len - offset; 2991 *length = em->len - offset;
3041 } 2992 }
3042 2993
3043 if (!multi_ret && !unplug_page) 2994 if (!multi_ret)
3044 goto out; 2995 goto out;
3045 2996
3046 num_stripes = 1; 2997 num_stripes = 1;
3047 stripe_index = 0; 2998 stripe_index = 0;
3048 if (map->type & BTRFS_BLOCK_GROUP_RAID1) { 2999 if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
3049 if (unplug_page || (rw & REQ_WRITE)) 3000 if (rw & REQ_WRITE)
3050 num_stripes = map->num_stripes; 3001 num_stripes = map->num_stripes;
3051 else if (mirror_num) 3002 else if (mirror_num)
3052 stripe_index = mirror_num - 1; 3003 stripe_index = mirror_num - 1;
@@ -3068,7 +3019,7 @@ again:
3068 stripe_index = do_div(stripe_nr, factor); 3019 stripe_index = do_div(stripe_nr, factor);
3069 stripe_index *= map->sub_stripes; 3020 stripe_index *= map->sub_stripes;
3070 3021
3071 if (unplug_page || (rw & REQ_WRITE)) 3022 if (rw & REQ_WRITE)
3072 num_stripes = map->sub_stripes; 3023 num_stripes = map->sub_stripes;
3073 else if (mirror_num) 3024 else if (mirror_num)
3074 stripe_index += mirror_num - 1; 3025 stripe_index += mirror_num - 1;
@@ -3088,22 +3039,10 @@ again:
3088 BUG_ON(stripe_index >= map->num_stripes); 3039 BUG_ON(stripe_index >= map->num_stripes);
3089 3040
3090 for (i = 0; i < num_stripes; i++) { 3041 for (i = 0; i < num_stripes; i++) {
3091 if (unplug_page) { 3042 multi->stripes[i].physical =
3092 struct btrfs_device *device; 3043 map->stripes[stripe_index].physical +
3093 struct backing_dev_info *bdi; 3044 stripe_offset + stripe_nr * map->stripe_len;
3094 3045 multi->stripes[i].dev = map->stripes[stripe_index].dev;
3095 device = map->stripes[stripe_index].dev;
3096 if (device->bdev) {
3097 bdi = blk_get_backing_dev_info(device->bdev);
3098 if (bdi->unplug_io_fn)
3099 bdi->unplug_io_fn(bdi, unplug_page);
3100 }
3101 } else {
3102 multi->stripes[i].physical =
3103 map->stripes[stripe_index].physical +
3104 stripe_offset + stripe_nr * map->stripe_len;
3105 multi->stripes[i].dev = map->stripes[stripe_index].dev;
3106 }
3107 stripe_index++; 3046 stripe_index++;
3108 } 3047 }
3109 if (multi_ret) { 3048 if (multi_ret) {
@@ -3121,7 +3060,7 @@ int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
3121 struct btrfs_multi_bio **multi_ret, int mirror_num) 3060 struct btrfs_multi_bio **multi_ret, int mirror_num)
3122{ 3061{
3123 return __btrfs_map_block(map_tree, rw, logical, length, multi_ret, 3062 return __btrfs_map_block(map_tree, rw, logical, length, multi_ret,
3124 mirror_num, NULL); 3063 mirror_num);
3125} 3064}
3126 3065
3127int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, 3066int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
@@ -3189,14 +3128,6 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
3189 return 0; 3128 return 0;
3190} 3129}
3191 3130
3192int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree,
3193 u64 logical, struct page *page)
3194{
3195 u64 length = PAGE_CACHE_SIZE;
3196 return __btrfs_map_block(map_tree, READ, logical, &length,
3197 NULL, 0, page);
3198}
3199
3200static void end_bio_multi_stripe(struct bio *bio, int err) 3131static void end_bio_multi_stripe(struct bio *bio, int err)
3201{ 3132{
3202 struct btrfs_multi_bio *multi = bio->bi_private; 3133 struct btrfs_multi_bio *multi = bio->bi_private;