aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2010-08-07 12:20:39 -0400
committerJens Axboe <jaxboe@fusionio.com>2010-08-07 12:20:39 -0400
commit7b6d91daee5cac6402186ff224c3af39d79f4a0e (patch)
treeb1518cf0b6c301178e0a320f80610cd5b3aa7625 /drivers/md/dm.c
parent33659ebbae262228eef4e0fe990f393d1f0ed941 (diff)
block: unify flags for struct bio and struct request
Remove the current bio flags and reuse the request flags for the bio, too. This allows to more easily trace the type of I/O from the filesystem down to the block driver. There were two flags in the bio that were missing in the requests: BIO_RW_UNPLUG and BIO_RW_AHEAD. Also I've renamed two request flags that had a superflous RW in them. Note that the flags are in bio.h despite having the REQ_ name - as blkdev.h includes bio.h that is the only way to go for now. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'drivers/md/dm.c')
-rw-r--r--drivers/md/dm.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 1e0e6dd51501..d6f77baeafd6 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -614,7 +614,7 @@ static void dec_pending(struct dm_io *io, int error)
614 */ 614 */
615 spin_lock_irqsave(&md->deferred_lock, flags); 615 spin_lock_irqsave(&md->deferred_lock, flags);
616 if (__noflush_suspending(md)) { 616 if (__noflush_suspending(md)) {
617 if (!bio_rw_flagged(io->bio, BIO_RW_BARRIER)) 617 if (!(io->bio->bi_rw & REQ_HARDBARRIER))
618 bio_list_add_head(&md->deferred, 618 bio_list_add_head(&md->deferred,
619 io->bio); 619 io->bio);
620 } else 620 } else
@@ -626,7 +626,7 @@ static void dec_pending(struct dm_io *io, int error)
626 io_error = io->error; 626 io_error = io->error;
627 bio = io->bio; 627 bio = io->bio;
628 628
629 if (bio_rw_flagged(bio, BIO_RW_BARRIER)) { 629 if (bio->bi_rw & REQ_HARDBARRIER) {
630 /* 630 /*
631 * There can be just one barrier request so we use 631 * There can be just one barrier request so we use
632 * a per-device variable for error reporting. 632 * a per-device variable for error reporting.
@@ -1106,7 +1106,7 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector,
1106 1106
1107 clone->bi_sector = sector; 1107 clone->bi_sector = sector;
1108 clone->bi_bdev = bio->bi_bdev; 1108 clone->bi_bdev = bio->bi_bdev;
1109 clone->bi_rw = bio->bi_rw & ~(1 << BIO_RW_BARRIER); 1109 clone->bi_rw = bio->bi_rw & ~REQ_HARDBARRIER;
1110 clone->bi_vcnt = 1; 1110 clone->bi_vcnt = 1;
1111 clone->bi_size = to_bytes(len); 1111 clone->bi_size = to_bytes(len);
1112 clone->bi_io_vec->bv_offset = offset; 1112 clone->bi_io_vec->bv_offset = offset;
@@ -1133,7 +1133,7 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector,
1133 1133
1134 clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs); 1134 clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs);
1135 __bio_clone(clone, bio); 1135 __bio_clone(clone, bio);
1136 clone->bi_rw &= ~(1 << BIO_RW_BARRIER); 1136 clone->bi_rw &= ~REQ_HARDBARRIER;
1137 clone->bi_destructor = dm_bio_destructor; 1137 clone->bi_destructor = dm_bio_destructor;
1138 clone->bi_sector = sector; 1138 clone->bi_sector = sector;
1139 clone->bi_idx = idx; 1139 clone->bi_idx = idx;
@@ -1301,7 +1301,7 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
1301 1301
1302 ci.map = dm_get_live_table(md); 1302 ci.map = dm_get_live_table(md);
1303 if (unlikely(!ci.map)) { 1303 if (unlikely(!ci.map)) {
1304 if (!bio_rw_flagged(bio, BIO_RW_BARRIER)) 1304 if (!(bio->bi_rw & REQ_HARDBARRIER))
1305 bio_io_error(bio); 1305 bio_io_error(bio);
1306 else 1306 else
1307 if (!md->barrier_error) 1307 if (!md->barrier_error)
@@ -1414,7 +1414,7 @@ static int _dm_request(struct request_queue *q, struct bio *bio)
1414 * we have to queue this io for later. 1414 * we have to queue this io for later.
1415 */ 1415 */
1416 if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) || 1416 if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) ||
1417 unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) { 1417 unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
1418 up_read(&md->io_lock); 1418 up_read(&md->io_lock);
1419 1419
1420 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) && 1420 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) &&
@@ -2296,7 +2296,7 @@ static void dm_wq_work(struct work_struct *work)
2296 if (dm_request_based(md)) 2296 if (dm_request_based(md))
2297 generic_make_request(c); 2297 generic_make_request(c);
2298 else { 2298 else {
2299 if (bio_rw_flagged(c, BIO_RW_BARRIER)) 2299 if (c->bi_rw & REQ_HARDBARRIER)
2300 process_barrier(md, c); 2300 process_barrier(md, c);
2301 else 2301 else
2302 __split_and_process_bio(md, c); 2302 __split_and_process_bio(md, c);