diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-03-24 13:16:26 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-03-24 13:16:26 -0400 |
commit | 6c5103890057b1bb781b26b7aae38d33e4c517d8 (patch) | |
tree | e6e57961dcddcb5841acb34956e70b9dc696a880 /fs/btrfs | |
parent | 3dab04e6978e358ad2307bca563fabd6c5d2c58b (diff) | |
parent | 9d2e157d970a73b3f270b631828e03eb452d525e (diff) |
Merge branch 'for-2.6.39/core' of git://git.kernel.dk/linux-2.6-block
* 'for-2.6.39/core' of git://git.kernel.dk/linux-2.6-block: (65 commits)
Documentation/iostats.txt: bit-size reference etc.
cfq-iosched: removing unnecessary think time checking
cfq-iosched: Don't clear queue stats when preempt.
blk-throttle: Reset group slice when limits are changed
blk-cgroup: Only give unaccounted_time under debug
cfq-iosched: Don't set active queue in preempt
block: fix non-atomic access to genhd inflight structures
block: attempt to merge with existing requests on plug flush
block: NULL dereference on error path in __blkdev_get()
cfq-iosched: Don't update group weights when on service tree
fs: assign sb->s_bdi to default_backing_dev_info if the bdi is going away
block: Require subsystems to explicitly allocate bio_set integrity mempool
jbd2: finish conversion from WRITE_SYNC_PLUG to WRITE_SYNC and explicit plugging
jbd: finish conversion from WRITE_SYNC_PLUG to WRITE_SYNC and explicit plugging
fs: make fsync_buffers_list() plug
mm: make generic_writepages() use plugging
blk-cgroup: Add unaccounted time to timeslice_used.
block: fixup plugging stubs for !CONFIG_BLOCK
block: remove obsolete comments for blkdev_issue_zeroout.
blktrace: Use rq->cmd_flags directly in blk_add_trace_rq.
...
Fix up conflicts in fs/{aio.c,super.c}
Diffstat (limited to 'fs/btrfs')
-rw-r--r-- | fs/btrfs/disk-io.c | 79 | ||||
-rw-r--r-- | fs/btrfs/extent_io.c | 2 | ||||
-rw-r--r-- | fs/btrfs/inode.c | 1 | ||||
-rw-r--r-- | fs/btrfs/volumes.c | 91 |
4 files changed, 12 insertions, 161 deletions
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 100b07f021b4..830d261d0e6b 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -847,7 +847,6 @@ static const struct address_space_operations btree_aops = { | |||
847 | .writepages = btree_writepages, | 847 | .writepages = btree_writepages, |
848 | .releasepage = btree_releasepage, | 848 | .releasepage = btree_releasepage, |
849 | .invalidatepage = btree_invalidatepage, | 849 | .invalidatepage = btree_invalidatepage, |
850 | .sync_page = block_sync_page, | ||
851 | #ifdef CONFIG_MIGRATION | 850 | #ifdef CONFIG_MIGRATION |
852 | .migratepage = btree_migratepage, | 851 | .migratepage = btree_migratepage, |
853 | #endif | 852 | #endif |
@@ -1331,82 +1330,6 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits) | |||
1331 | } | 1330 | } |
1332 | 1331 | ||
1333 | /* | 1332 | /* |
1334 | * this unplugs every device on the box, and it is only used when page | ||
1335 | * is null | ||
1336 | */ | ||
1337 | static void __unplug_io_fn(struct backing_dev_info *bdi, struct page *page) | ||
1338 | { | ||
1339 | struct btrfs_device *device; | ||
1340 | struct btrfs_fs_info *info; | ||
1341 | |||
1342 | info = (struct btrfs_fs_info *)bdi->unplug_io_data; | ||
1343 | list_for_each_entry(device, &info->fs_devices->devices, dev_list) { | ||
1344 | if (!device->bdev) | ||
1345 | continue; | ||
1346 | |||
1347 | bdi = blk_get_backing_dev_info(device->bdev); | ||
1348 | if (bdi->unplug_io_fn) | ||
1349 | bdi->unplug_io_fn(bdi, page); | ||
1350 | } | ||
1351 | } | ||
1352 | |||
1353 | static void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) | ||
1354 | { | ||
1355 | struct inode *inode; | ||
1356 | struct extent_map_tree *em_tree; | ||
1357 | struct extent_map *em; | ||
1358 | struct address_space *mapping; | ||
1359 | u64 offset; | ||
1360 | |||
1361 | /* the generic O_DIRECT read code does this */ | ||
1362 | if (1 || !page) { | ||
1363 | __unplug_io_fn(bdi, page); | ||
1364 | return; | ||
1365 | } | ||
1366 | |||
1367 | /* | ||
1368 | * page->mapping may change at any time. Get a consistent copy | ||
1369 | * and use that for everything below | ||
1370 | */ | ||
1371 | smp_mb(); | ||
1372 | mapping = page->mapping; | ||
1373 | if (!mapping) | ||
1374 | return; | ||
1375 | |||
1376 | inode = mapping->host; | ||
1377 | |||
1378 | /* | ||
1379 | * don't do the expensive searching for a small number of | ||
1380 | * devices | ||
1381 | */ | ||
1382 | if (BTRFS_I(inode)->root->fs_info->fs_devices->open_devices <= 2) { | ||
1383 | __unplug_io_fn(bdi, page); | ||
1384 | return; | ||
1385 | } | ||
1386 | |||
1387 | offset = page_offset(page); | ||
1388 | |||
1389 | em_tree = &BTRFS_I(inode)->extent_tree; | ||
1390 | read_lock(&em_tree->lock); | ||
1391 | em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE); | ||
1392 | read_unlock(&em_tree->lock); | ||
1393 | if (!em) { | ||
1394 | __unplug_io_fn(bdi, page); | ||
1395 | return; | ||
1396 | } | ||
1397 | |||
1398 | if (em->block_start >= EXTENT_MAP_LAST_BYTE) { | ||
1399 | free_extent_map(em); | ||
1400 | __unplug_io_fn(bdi, page); | ||
1401 | return; | ||
1402 | } | ||
1403 | offset = offset - em->start; | ||
1404 | btrfs_unplug_page(&BTRFS_I(inode)->root->fs_info->mapping_tree, | ||
1405 | em->block_start + offset, page); | ||
1406 | free_extent_map(em); | ||
1407 | } | ||
1408 | |||
1409 | /* | ||
1410 | * If this fails, caller must call bdi_destroy() to get rid of the | 1333 | * If this fails, caller must call bdi_destroy() to get rid of the |
1411 | * bdi again. | 1334 | * bdi again. |
1412 | */ | 1335 | */ |
@@ -1420,8 +1343,6 @@ static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi) | |||
1420 | return err; | 1343 | return err; |
1421 | 1344 | ||
1422 | bdi->ra_pages = default_backing_dev_info.ra_pages; | 1345 | bdi->ra_pages = default_backing_dev_info.ra_pages; |
1423 | bdi->unplug_io_fn = btrfs_unplug_io_fn; | ||
1424 | bdi->unplug_io_data = info; | ||
1425 | bdi->congested_fn = btrfs_congested_fn; | 1346 | bdi->congested_fn = btrfs_congested_fn; |
1426 | bdi->congested_data = info; | 1347 | bdi->congested_data = info; |
1427 | return 0; | 1348 | return 0; |
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 714adc4ac4c2..b5b92824a271 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -2188,7 +2188,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, | |||
2188 | unsigned long nr_written = 0; | 2188 | unsigned long nr_written = 0; |
2189 | 2189 | ||
2190 | if (wbc->sync_mode == WB_SYNC_ALL) | 2190 | if (wbc->sync_mode == WB_SYNC_ALL) |
2191 | write_flags = WRITE_SYNC_PLUG; | 2191 | write_flags = WRITE_SYNC; |
2192 | else | 2192 | else |
2193 | write_flags = WRITE; | 2193 | write_flags = WRITE; |
2194 | 2194 | ||
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 512c3d1da083..119520bdb9a5 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -7340,7 +7340,6 @@ static const struct address_space_operations btrfs_aops = { | |||
7340 | .writepage = btrfs_writepage, | 7340 | .writepage = btrfs_writepage, |
7341 | .writepages = btrfs_writepages, | 7341 | .writepages = btrfs_writepages, |
7342 | .readpages = btrfs_readpages, | 7342 | .readpages = btrfs_readpages, |
7343 | .sync_page = block_sync_page, | ||
7344 | .direct_IO = btrfs_direct_IO, | 7343 | .direct_IO = btrfs_direct_IO, |
7345 | .invalidatepage = btrfs_invalidatepage, | 7344 | .invalidatepage = btrfs_invalidatepage, |
7346 | .releasepage = btrfs_releasepage, | 7345 | .releasepage = btrfs_releasepage, |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index dd13eb81ee40..9d554e8e6583 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
@@ -162,7 +162,6 @@ static noinline int run_scheduled_bios(struct btrfs_device *device) | |||
162 | struct bio *cur; | 162 | struct bio *cur; |
163 | int again = 0; | 163 | int again = 0; |
164 | unsigned long num_run; | 164 | unsigned long num_run; |
165 | unsigned long num_sync_run; | ||
166 | unsigned long batch_run = 0; | 165 | unsigned long batch_run = 0; |
167 | unsigned long limit; | 166 | unsigned long limit; |
168 | unsigned long last_waited = 0; | 167 | unsigned long last_waited = 0; |
@@ -173,11 +172,6 @@ static noinline int run_scheduled_bios(struct btrfs_device *device) | |||
173 | limit = btrfs_async_submit_limit(fs_info); | 172 | limit = btrfs_async_submit_limit(fs_info); |
174 | limit = limit * 2 / 3; | 173 | limit = limit * 2 / 3; |
175 | 174 | ||
176 | /* we want to make sure that every time we switch from the sync | ||
177 | * list to the normal list, we unplug | ||
178 | */ | ||
179 | num_sync_run = 0; | ||
180 | |||
181 | loop: | 175 | loop: |
182 | spin_lock(&device->io_lock); | 176 | spin_lock(&device->io_lock); |
183 | 177 | ||
@@ -223,15 +217,6 @@ loop_lock: | |||
223 | 217 | ||
224 | spin_unlock(&device->io_lock); | 218 | spin_unlock(&device->io_lock); |
225 | 219 | ||
226 | /* | ||
227 | * if we're doing the regular priority list, make sure we unplug | ||
228 | * for any high prio bios we've sent down | ||
229 | */ | ||
230 | if (pending_bios == &device->pending_bios && num_sync_run > 0) { | ||
231 | num_sync_run = 0; | ||
232 | blk_run_backing_dev(bdi, NULL); | ||
233 | } | ||
234 | |||
235 | while (pending) { | 220 | while (pending) { |
236 | 221 | ||
237 | rmb(); | 222 | rmb(); |
@@ -259,19 +244,11 @@ loop_lock: | |||
259 | 244 | ||
260 | BUG_ON(atomic_read(&cur->bi_cnt) == 0); | 245 | BUG_ON(atomic_read(&cur->bi_cnt) == 0); |
261 | 246 | ||
262 | if (cur->bi_rw & REQ_SYNC) | ||
263 | num_sync_run++; | ||
264 | |||
265 | submit_bio(cur->bi_rw, cur); | 247 | submit_bio(cur->bi_rw, cur); |
266 | num_run++; | 248 | num_run++; |
267 | batch_run++; | 249 | batch_run++; |
268 | if (need_resched()) { | 250 | if (need_resched()) |
269 | if (num_sync_run) { | ||
270 | blk_run_backing_dev(bdi, NULL); | ||
271 | num_sync_run = 0; | ||
272 | } | ||
273 | cond_resched(); | 251 | cond_resched(); |
274 | } | ||
275 | 252 | ||
276 | /* | 253 | /* |
277 | * we made progress, there is more work to do and the bdi | 254 | * we made progress, there is more work to do and the bdi |
@@ -304,13 +281,8 @@ loop_lock: | |||
304 | * against it before looping | 281 | * against it before looping |
305 | */ | 282 | */ |
306 | last_waited = ioc->last_waited; | 283 | last_waited = ioc->last_waited; |
307 | if (need_resched()) { | 284 | if (need_resched()) |
308 | if (num_sync_run) { | ||
309 | blk_run_backing_dev(bdi, NULL); | ||
310 | num_sync_run = 0; | ||
311 | } | ||
312 | cond_resched(); | 285 | cond_resched(); |
313 | } | ||
314 | continue; | 286 | continue; |
315 | } | 287 | } |
316 | spin_lock(&device->io_lock); | 288 | spin_lock(&device->io_lock); |
@@ -323,22 +295,6 @@ loop_lock: | |||
323 | } | 295 | } |
324 | } | 296 | } |
325 | 297 | ||
326 | if (num_sync_run) { | ||
327 | num_sync_run = 0; | ||
328 | blk_run_backing_dev(bdi, NULL); | ||
329 | } | ||
330 | /* | ||
331 | * IO has already been through a long path to get here. Checksumming, | ||
332 | * async helper threads, perhaps compression. We've done a pretty | ||
333 | * good job of collecting a batch of IO and should just unplug | ||
334 | * the device right away. | ||
335 | * | ||
336 | * This will help anyone who is waiting on the IO, they might have | ||
337 | * already unplugged, but managed to do so before the bio they | ||
338 | * cared about found its way down here. | ||
339 | */ | ||
340 | blk_run_backing_dev(bdi, NULL); | ||
341 | |||
342 | cond_resched(); | 298 | cond_resched(); |
343 | if (again) | 299 | if (again) |
344 | goto loop; | 300 | goto loop; |
@@ -2955,7 +2911,7 @@ static int find_live_mirror(struct map_lookup *map, int first, int num, | |||
2955 | static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw, | 2911 | static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw, |
2956 | u64 logical, u64 *length, | 2912 | u64 logical, u64 *length, |
2957 | struct btrfs_multi_bio **multi_ret, | 2913 | struct btrfs_multi_bio **multi_ret, |
2958 | int mirror_num, struct page *unplug_page) | 2914 | int mirror_num) |
2959 | { | 2915 | { |
2960 | struct extent_map *em; | 2916 | struct extent_map *em; |
2961 | struct map_lookup *map; | 2917 | struct map_lookup *map; |
@@ -2987,11 +2943,6 @@ again: | |||
2987 | em = lookup_extent_mapping(em_tree, logical, *length); | 2943 | em = lookup_extent_mapping(em_tree, logical, *length); |
2988 | read_unlock(&em_tree->lock); | 2944 | read_unlock(&em_tree->lock); |
2989 | 2945 | ||
2990 | if (!em && unplug_page) { | ||
2991 | kfree(multi); | ||
2992 | return 0; | ||
2993 | } | ||
2994 | |||
2995 | if (!em) { | 2946 | if (!em) { |
2996 | printk(KERN_CRIT "unable to find logical %llu len %llu\n", | 2947 | printk(KERN_CRIT "unable to find logical %llu len %llu\n", |
2997 | (unsigned long long)logical, | 2948 | (unsigned long long)logical, |
@@ -3047,13 +2998,13 @@ again: | |||
3047 | *length = em->len - offset; | 2998 | *length = em->len - offset; |
3048 | } | 2999 | } |
3049 | 3000 | ||
3050 | if (!multi_ret && !unplug_page) | 3001 | if (!multi_ret) |
3051 | goto out; | 3002 | goto out; |
3052 | 3003 | ||
3053 | num_stripes = 1; | 3004 | num_stripes = 1; |
3054 | stripe_index = 0; | 3005 | stripe_index = 0; |
3055 | if (map->type & BTRFS_BLOCK_GROUP_RAID1) { | 3006 | if (map->type & BTRFS_BLOCK_GROUP_RAID1) { |
3056 | if (unplug_page || (rw & REQ_WRITE)) | 3007 | if (rw & REQ_WRITE) |
3057 | num_stripes = map->num_stripes; | 3008 | num_stripes = map->num_stripes; |
3058 | else if (mirror_num) | 3009 | else if (mirror_num) |
3059 | stripe_index = mirror_num - 1; | 3010 | stripe_index = mirror_num - 1; |
@@ -3075,7 +3026,7 @@ again: | |||
3075 | stripe_index = do_div(stripe_nr, factor); | 3026 | stripe_index = do_div(stripe_nr, factor); |
3076 | stripe_index *= map->sub_stripes; | 3027 | stripe_index *= map->sub_stripes; |
3077 | 3028 | ||
3078 | if (unplug_page || (rw & REQ_WRITE)) | 3029 | if (rw & REQ_WRITE) |
3079 | num_stripes = map->sub_stripes; | 3030 | num_stripes = map->sub_stripes; |
3080 | else if (mirror_num) | 3031 | else if (mirror_num) |
3081 | stripe_index += mirror_num - 1; | 3032 | stripe_index += mirror_num - 1; |
@@ -3095,22 +3046,10 @@ again: | |||
3095 | BUG_ON(stripe_index >= map->num_stripes); | 3046 | BUG_ON(stripe_index >= map->num_stripes); |
3096 | 3047 | ||
3097 | for (i = 0; i < num_stripes; i++) { | 3048 | for (i = 0; i < num_stripes; i++) { |
3098 | if (unplug_page) { | 3049 | multi->stripes[i].physical = |
3099 | struct btrfs_device *device; | 3050 | map->stripes[stripe_index].physical + |
3100 | struct backing_dev_info *bdi; | 3051 | stripe_offset + stripe_nr * map->stripe_len; |
3101 | 3052 | multi->stripes[i].dev = map->stripes[stripe_index].dev; | |
3102 | device = map->stripes[stripe_index].dev; | ||
3103 | if (device->bdev) { | ||
3104 | bdi = blk_get_backing_dev_info(device->bdev); | ||
3105 | if (bdi->unplug_io_fn) | ||
3106 | bdi->unplug_io_fn(bdi, unplug_page); | ||
3107 | } | ||
3108 | } else { | ||
3109 | multi->stripes[i].physical = | ||
3110 | map->stripes[stripe_index].physical + | ||
3111 | stripe_offset + stripe_nr * map->stripe_len; | ||
3112 | multi->stripes[i].dev = map->stripes[stripe_index].dev; | ||
3113 | } | ||
3114 | stripe_index++; | 3053 | stripe_index++; |
3115 | } | 3054 | } |
3116 | if (multi_ret) { | 3055 | if (multi_ret) { |
@@ -3128,7 +3067,7 @@ int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw, | |||
3128 | struct btrfs_multi_bio **multi_ret, int mirror_num) | 3067 | struct btrfs_multi_bio **multi_ret, int mirror_num) |
3129 | { | 3068 | { |
3130 | return __btrfs_map_block(map_tree, rw, logical, length, multi_ret, | 3069 | return __btrfs_map_block(map_tree, rw, logical, length, multi_ret, |
3131 | mirror_num, NULL); | 3070 | mirror_num); |
3132 | } | 3071 | } |
3133 | 3072 | ||
3134 | int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, | 3073 | int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, |
@@ -3196,14 +3135,6 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, | |||
3196 | return 0; | 3135 | return 0; |
3197 | } | 3136 | } |
3198 | 3137 | ||
3199 | int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree, | ||
3200 | u64 logical, struct page *page) | ||
3201 | { | ||
3202 | u64 length = PAGE_CACHE_SIZE; | ||
3203 | return __btrfs_map_block(map_tree, READ, logical, &length, | ||
3204 | NULL, 0, page); | ||
3205 | } | ||
3206 | |||
3207 | static void end_bio_multi_stripe(struct bio *bio, int err) | 3138 | static void end_bio_multi_stripe(struct bio *bio, int err) |
3208 | { | 3139 | { |
3209 | struct btrfs_multi_bio *multi = bio->bi_private; | 3140 | struct btrfs_multi_bio *multi = bio->bi_private; |