aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-core.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2010-08-07 12:20:39 -0400
committerJens Axboe <jaxboe@fusionio.com>2010-08-07 12:20:39 -0400
commit7b6d91daee5cac6402186ff224c3af39d79f4a0e (patch)
treeb1518cf0b6c301178e0a320f80610cd5b3aa7625 /block/blk-core.c
parent33659ebbae262228eef4e0fe990f393d1f0ed941 (diff)
block: unify flags for struct bio and struct request
Remove the current bio flags and reuse the request flags for the bio, too. This allows to more easily trace the type of I/O from the filesystem down to the block driver. There were two flags in the bio that were missing in the requests: BIO_RW_UNPLUG and BIO_RW_AHEAD. Also I've renamed two request flags that had a superflous RW in them. Note that the flags are in bio.h despite having the REQ_ name - as blkdev.h includes bio.h that is the only way to go for now. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c37
1 files changed, 10 insertions, 27 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index dca43a31e725..66c3cfe94d0a 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1140,25 +1140,9 @@ void init_request_from_bio(struct request *req, struct bio *bio)
1140 req->cpu = bio->bi_comp_cpu; 1140 req->cpu = bio->bi_comp_cpu;
1141 req->cmd_type = REQ_TYPE_FS; 1141 req->cmd_type = REQ_TYPE_FS;
1142 1142
1143 /* 1143 req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK;
1144 * Inherit FAILFAST from bio (for read-ahead, and explicit 1144 if (bio->bi_rw & REQ_RAHEAD)
1145 * FAILFAST). FAILFAST flags are identical for req and bio.
1146 */
1147 if (bio_rw_flagged(bio, BIO_RW_AHEAD))
1148 req->cmd_flags |= REQ_FAILFAST_MASK; 1145 req->cmd_flags |= REQ_FAILFAST_MASK;
1149 else
1150 req->cmd_flags |= bio->bi_rw & REQ_FAILFAST_MASK;
1151
1152 if (bio_rw_flagged(bio, BIO_RW_DISCARD))
1153 req->cmd_flags |= REQ_DISCARD;
1154 if (bio_rw_flagged(bio, BIO_RW_BARRIER))
1155 req->cmd_flags |= REQ_HARDBARRIER;
1156 if (bio_rw_flagged(bio, BIO_RW_SYNCIO))
1157 req->cmd_flags |= REQ_RW_SYNC;
1158 if (bio_rw_flagged(bio, BIO_RW_META))
1159 req->cmd_flags |= REQ_RW_META;
1160 if (bio_rw_flagged(bio, BIO_RW_NOIDLE))
1161 req->cmd_flags |= REQ_NOIDLE;
1162 1146
1163 req->errors = 0; 1147 req->errors = 0;
1164 req->__sector = bio->bi_sector; 1148 req->__sector = bio->bi_sector;
@@ -1181,12 +1165,12 @@ static int __make_request(struct request_queue *q, struct bio *bio)
1181 int el_ret; 1165 int el_ret;
1182 unsigned int bytes = bio->bi_size; 1166 unsigned int bytes = bio->bi_size;
1183 const unsigned short prio = bio_prio(bio); 1167 const unsigned short prio = bio_prio(bio);
1184 const bool sync = bio_rw_flagged(bio, BIO_RW_SYNCIO); 1168 const bool sync = (bio->bi_rw & REQ_SYNC);
1185 const bool unplug = bio_rw_flagged(bio, BIO_RW_UNPLUG); 1169 const bool unplug = (bio->bi_rw & REQ_UNPLUG);
1186 const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK; 1170 const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK;
1187 int rw_flags; 1171 int rw_flags;
1188 1172
1189 if (bio_rw_flagged(bio, BIO_RW_BARRIER) && 1173 if ((bio->bi_rw & REQ_HARDBARRIER) &&
1190 (q->next_ordered == QUEUE_ORDERED_NONE)) { 1174 (q->next_ordered == QUEUE_ORDERED_NONE)) {
1191 bio_endio(bio, -EOPNOTSUPP); 1175 bio_endio(bio, -EOPNOTSUPP);
1192 return 0; 1176 return 0;
@@ -1200,7 +1184,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
1200 1184
1201 spin_lock_irq(q->queue_lock); 1185 spin_lock_irq(q->queue_lock);
1202 1186
1203 if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER)) || elv_queue_empty(q)) 1187 if (unlikely((bio->bi_rw & REQ_HARDBARRIER)) || elv_queue_empty(q))
1204 goto get_rq; 1188 goto get_rq;
1205 1189
1206 el_ret = elv_merge(q, &req, bio); 1190 el_ret = elv_merge(q, &req, bio);
@@ -1275,7 +1259,7 @@ get_rq:
1275 */ 1259 */
1276 rw_flags = bio_data_dir(bio); 1260 rw_flags = bio_data_dir(bio);
1277 if (sync) 1261 if (sync)
1278 rw_flags |= REQ_RW_SYNC; 1262 rw_flags |= REQ_SYNC;
1279 1263
1280 /* 1264 /*
1281 * Grab a free request. This is might sleep but can not fail. 1265 * Grab a free request. This is might sleep but can not fail.
@@ -1464,7 +1448,7 @@ static inline void __generic_make_request(struct bio *bio)
1464 goto end_io; 1448 goto end_io;
1465 } 1449 }
1466 1450
1467 if (unlikely(!bio_rw_flagged(bio, BIO_RW_DISCARD) && 1451 if (unlikely(!(bio->bi_rw & REQ_DISCARD) &&
1468 nr_sectors > queue_max_hw_sectors(q))) { 1452 nr_sectors > queue_max_hw_sectors(q))) {
1469 printk(KERN_ERR "bio too big device %s (%u > %u)\n", 1453 printk(KERN_ERR "bio too big device %s (%u > %u)\n",
1470 bdevname(bio->bi_bdev, b), 1454 bdevname(bio->bi_bdev, b),
@@ -1497,8 +1481,7 @@ static inline void __generic_make_request(struct bio *bio)
1497 if (bio_check_eod(bio, nr_sectors)) 1481 if (bio_check_eod(bio, nr_sectors))
1498 goto end_io; 1482 goto end_io;
1499 1483
1500 if (bio_rw_flagged(bio, BIO_RW_DISCARD) && 1484 if ((bio->bi_rw & REQ_DISCARD) && !blk_queue_discard(q)) {
1501 !blk_queue_discard(q)) {
1502 err = -EOPNOTSUPP; 1485 err = -EOPNOTSUPP;
1503 goto end_io; 1486 goto end_io;
1504 } 1487 }
@@ -2365,7 +2348,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
2365 struct bio *bio) 2348 struct bio *bio)
2366{ 2349{
2367 /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */ 2350 /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */
2368 rq->cmd_flags |= bio->bi_rw & REQ_RW; 2351 rq->cmd_flags |= bio->bi_rw & REQ_WRITE;
2369 2352
2370 if (bio_has_data(bio)) { 2353 if (bio_has_data(bio)) {
2371 rq->nr_phys_segments = bio_phys_segments(q, bio); 2354 rq->nr_phys_segments = bio_phys_segments(q, bio);