aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2008-11-07 12:35:44 -0500
committerChris Mason <chris.mason@oracle.com>2008-11-07 12:35:44 -0500
commitaf09abfece59aa50bfbf16f6f1f85822554e061f (patch)
treeca419511219b433a2bf45626814777df8667a19f
parent4366211ccd050e00674b3276b5ed81981b2d7793 (diff)
Btrfs: make sure compressed bios don't complete too soon
When writing a compressed extent, a number of bios are created that point to a single struct compressed_bio. At end_io time an atomic counter in the compressed_bio struct makes sure that all of the bios have finished before final end_io processing is done. But when multiple bios are needed to write a compressed extent, the counter was being incremented after the first bio was sent to submit_bio. It is possible the bio will complete before the counter is incremented, making the end_io handler free the compressed_bio struct before processing is finished. The fix is to increment the atomic counter before bio submission, both for compressed reads and writes. Signed-off-by: Chris Mason <chris.mason@oracle.com>
-rw-r--r--fs/btrfs/compression.c17
1 files changed, 15 insertions, 2 deletions
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 284f21025bcc..7397c622fb6a 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -314,6 +314,13 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
314 PAGE_CACHE_SIZE) { 314 PAGE_CACHE_SIZE) {
315 bio_get(bio); 315 bio_get(bio);
316 316
317 /*
318 * inc the count before we submit the bio so
319 * we know the end IO handler won't happen before
320 * we inc the count. Otherwise, the cb might get
321 * freed before we're done setting it up
322 */
323 atomic_inc(&cb->pending_bios);
317 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); 324 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
318 BUG_ON(ret); 325 BUG_ON(ret);
319 326
@@ -323,7 +330,6 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
323 bio_put(bio); 330 bio_put(bio);
324 331
325 bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS); 332 bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS);
326 atomic_inc(&cb->pending_bios);
327 bio->bi_private = cb; 333 bio->bi_private = cb;
328 bio->bi_end_io = end_compressed_bio_write; 334 bio->bi_end_io = end_compressed_bio_write;
329 bio_add_page(bio, page, PAGE_CACHE_SIZE, 0); 335 bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
@@ -573,6 +579,14 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
573 ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0); 579 ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0);
574 BUG_ON(ret); 580 BUG_ON(ret);
575 581
582 /*
583 * inc the count before we submit the bio so
584 * we know the end IO handler won't happen before
585 * we inc the count. Otherwise, the cb might get
586 * freed before we're done setting it up
587 */
588 atomic_inc(&cb->pending_bios);
589
576 ret = btrfs_map_bio(root, READ, comp_bio, 0, 0); 590 ret = btrfs_map_bio(root, READ, comp_bio, 0, 0);
577 BUG_ON(ret); 591 BUG_ON(ret);
578 592
@@ -580,7 +594,6 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
580 594
581 comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, 595 comp_bio = compressed_bio_alloc(bdev, cur_disk_byte,
582 GFP_NOFS); 596 GFP_NOFS);
583 atomic_inc(&cb->pending_bios);
584 comp_bio->bi_private = cb; 597 comp_bio->bi_private = cb;
585 comp_bio->bi_end_io = end_compressed_bio_read; 598 comp_bio->bi_end_io = end_compressed_bio_read;
586 599