aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-barrier.c2
-rw-r--r--block/blk-core.c37
-rw-r--r--block/blk-map.c2
-rw-r--r--block/blk-merge.c2
-rw-r--r--block/cfq-iosched.c14
-rw-r--r--block/elevator.c3
6 files changed, 21 insertions, 39 deletions
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index 74e404393172..7c6f4a714687 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -203,7 +203,7 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
203 /* initialize proxy request and queue it */ 203 /* initialize proxy request and queue it */
204 blk_rq_init(q, rq); 204 blk_rq_init(q, rq);
205 if (bio_data_dir(q->orig_bar_rq->bio) == WRITE) 205 if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
206 rq->cmd_flags |= REQ_RW; 206 rq->cmd_flags |= REQ_WRITE;
207 if (q->ordered & QUEUE_ORDERED_DO_FUA) 207 if (q->ordered & QUEUE_ORDERED_DO_FUA)
208 rq->cmd_flags |= REQ_FUA; 208 rq->cmd_flags |= REQ_FUA;
209 init_request_from_bio(rq, q->orig_bar_rq->bio); 209 init_request_from_bio(rq, q->orig_bar_rq->bio);
diff --git a/block/blk-core.c b/block/blk-core.c
index dca43a31e725..66c3cfe94d0a 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1140,25 +1140,9 @@ void init_request_from_bio(struct request *req, struct bio *bio)
1140 req->cpu = bio->bi_comp_cpu; 1140 req->cpu = bio->bi_comp_cpu;
1141 req->cmd_type = REQ_TYPE_FS; 1141 req->cmd_type = REQ_TYPE_FS;
1142 1142
1143 /* 1143 req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK;
1144 * Inherit FAILFAST from bio (for read-ahead, and explicit 1144 if (bio->bi_rw & REQ_RAHEAD)
1145 * FAILFAST). FAILFAST flags are identical for req and bio.
1146 */
1147 if (bio_rw_flagged(bio, BIO_RW_AHEAD))
1148 req->cmd_flags |= REQ_FAILFAST_MASK; 1145 req->cmd_flags |= REQ_FAILFAST_MASK;
1149 else
1150 req->cmd_flags |= bio->bi_rw & REQ_FAILFAST_MASK;
1151
1152 if (bio_rw_flagged(bio, BIO_RW_DISCARD))
1153 req->cmd_flags |= REQ_DISCARD;
1154 if (bio_rw_flagged(bio, BIO_RW_BARRIER))
1155 req->cmd_flags |= REQ_HARDBARRIER;
1156 if (bio_rw_flagged(bio, BIO_RW_SYNCIO))
1157 req->cmd_flags |= REQ_RW_SYNC;
1158 if (bio_rw_flagged(bio, BIO_RW_META))
1159 req->cmd_flags |= REQ_RW_META;
1160 if (bio_rw_flagged(bio, BIO_RW_NOIDLE))
1161 req->cmd_flags |= REQ_NOIDLE;
1162 1146
1163 req->errors = 0; 1147 req->errors = 0;
1164 req->__sector = bio->bi_sector; 1148 req->__sector = bio->bi_sector;
@@ -1181,12 +1165,12 @@ static int __make_request(struct request_queue *q, struct bio *bio)
1181 int el_ret; 1165 int el_ret;
1182 unsigned int bytes = bio->bi_size; 1166 unsigned int bytes = bio->bi_size;
1183 const unsigned short prio = bio_prio(bio); 1167 const unsigned short prio = bio_prio(bio);
1184 const bool sync = bio_rw_flagged(bio, BIO_RW_SYNCIO); 1168 const bool sync = (bio->bi_rw & REQ_SYNC);
1185 const bool unplug = bio_rw_flagged(bio, BIO_RW_UNPLUG); 1169 const bool unplug = (bio->bi_rw & REQ_UNPLUG);
1186 const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK; 1170 const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK;
1187 int rw_flags; 1171 int rw_flags;
1188 1172
1189 if (bio_rw_flagged(bio, BIO_RW_BARRIER) && 1173 if ((bio->bi_rw & REQ_HARDBARRIER) &&
1190 (q->next_ordered == QUEUE_ORDERED_NONE)) { 1174 (q->next_ordered == QUEUE_ORDERED_NONE)) {
1191 bio_endio(bio, -EOPNOTSUPP); 1175 bio_endio(bio, -EOPNOTSUPP);
1192 return 0; 1176 return 0;
@@ -1200,7 +1184,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
1200 1184
1201 spin_lock_irq(q->queue_lock); 1185 spin_lock_irq(q->queue_lock);
1202 1186
1203 if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER)) || elv_queue_empty(q)) 1187 if (unlikely((bio->bi_rw & REQ_HARDBARRIER)) || elv_queue_empty(q))
1204 goto get_rq; 1188 goto get_rq;
1205 1189
1206 el_ret = elv_merge(q, &req, bio); 1190 el_ret = elv_merge(q, &req, bio);
@@ -1275,7 +1259,7 @@ get_rq:
1275 */ 1259 */
1276 rw_flags = bio_data_dir(bio); 1260 rw_flags = bio_data_dir(bio);
1277 if (sync) 1261 if (sync)
1278 rw_flags |= REQ_RW_SYNC; 1262 rw_flags |= REQ_SYNC;
1279 1263
1280 /* 1264 /*
1281 * Grab a free request. This is might sleep but can not fail. 1265 * Grab a free request. This is might sleep but can not fail.
@@ -1464,7 +1448,7 @@ static inline void __generic_make_request(struct bio *bio)
1464 goto end_io; 1448 goto end_io;
1465 } 1449 }
1466 1450
1467 if (unlikely(!bio_rw_flagged(bio, BIO_RW_DISCARD) && 1451 if (unlikely(!(bio->bi_rw & REQ_DISCARD) &&
1468 nr_sectors > queue_max_hw_sectors(q))) { 1452 nr_sectors > queue_max_hw_sectors(q))) {
1469 printk(KERN_ERR "bio too big device %s (%u > %u)\n", 1453 printk(KERN_ERR "bio too big device %s (%u > %u)\n",
1470 bdevname(bio->bi_bdev, b), 1454 bdevname(bio->bi_bdev, b),
@@ -1497,8 +1481,7 @@ static inline void __generic_make_request(struct bio *bio)
1497 if (bio_check_eod(bio, nr_sectors)) 1481 if (bio_check_eod(bio, nr_sectors))
1498 goto end_io; 1482 goto end_io;
1499 1483
1500 if (bio_rw_flagged(bio, BIO_RW_DISCARD) && 1484 if ((bio->bi_rw & REQ_DISCARD) && !blk_queue_discard(q)) {
1501 !blk_queue_discard(q)) {
1502 err = -EOPNOTSUPP; 1485 err = -EOPNOTSUPP;
1503 goto end_io; 1486 goto end_io;
1504 } 1487 }
@@ -2365,7 +2348,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
2365 struct bio *bio) 2348 struct bio *bio)
2366{ 2349{
2367 /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */ 2350 /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */
2368 rq->cmd_flags |= bio->bi_rw & REQ_RW; 2351 rq->cmd_flags |= bio->bi_rw & REQ_WRITE;
2369 2352
2370 if (bio_has_data(bio)) { 2353 if (bio_has_data(bio)) {
2371 rq->nr_phys_segments = bio_phys_segments(q, bio); 2354 rq->nr_phys_segments = bio_phys_segments(q, bio);
diff --git a/block/blk-map.c b/block/blk-map.c
index 9083cf0180cc..c65d7593f7f1 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -307,7 +307,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
307 return PTR_ERR(bio); 307 return PTR_ERR(bio);
308 308
309 if (rq_data_dir(rq) == WRITE) 309 if (rq_data_dir(rq) == WRITE)
310 bio->bi_rw |= (1 << BIO_RW); 310 bio->bi_rw |= (1 << REQ_WRITE);
311 311
312 if (do_copy) 312 if (do_copy)
313 rq->cmd_flags |= REQ_COPY_USER; 313 rq->cmd_flags |= REQ_COPY_USER;
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 87e4fb7d0e98..4852475521ea 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -180,7 +180,7 @@ new_segment:
180 } 180 }
181 181
182 if (q->dma_drain_size && q->dma_drain_needed(rq)) { 182 if (q->dma_drain_size && q->dma_drain_needed(rq)) {
183 if (rq->cmd_flags & REQ_RW) 183 if (rq->cmd_flags & REQ_WRITE)
184 memset(q->dma_drain_buffer, 0, q->dma_drain_size); 184 memset(q->dma_drain_buffer, 0, q->dma_drain_size);
185 185
186 sg->page_link &= ~0x02; 186 sg->page_link &= ~0x02;
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index d4edeb8fceb8..eb4086f7dfef 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -458,7 +458,7 @@ static inline struct cfq_data *cic_to_cfqd(struct cfq_io_context *cic)
458 */ 458 */
459static inline bool cfq_bio_sync(struct bio *bio) 459static inline bool cfq_bio_sync(struct bio *bio)
460{ 460{
461 return bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO); 461 return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
462} 462}
463 463
464/* 464/*
@@ -646,10 +646,10 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2,
646 return rq1; 646 return rq1;
647 else if (rq_is_sync(rq2) && !rq_is_sync(rq1)) 647 else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
648 return rq2; 648 return rq2;
649 if ((rq1->cmd_flags & REQ_RW_META) && !(rq2->cmd_flags & REQ_RW_META)) 649 if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META))
650 return rq1; 650 return rq1;
651 else if ((rq2->cmd_flags & REQ_RW_META) && 651 else if ((rq2->cmd_flags & REQ_META) &&
652 !(rq1->cmd_flags & REQ_RW_META)) 652 !(rq1->cmd_flags & REQ_META))
653 return rq2; 653 return rq2;
654 654
655 s1 = blk_rq_pos(rq1); 655 s1 = blk_rq_pos(rq1);
@@ -1485,7 +1485,7 @@ static void cfq_remove_request(struct request *rq)
1485 cfqq->cfqd->rq_queued--; 1485 cfqq->cfqd->rq_queued--;
1486 cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, 1486 cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
1487 rq_data_dir(rq), rq_is_sync(rq)); 1487 rq_data_dir(rq), rq_is_sync(rq));
1488 if (rq->cmd_flags & REQ_RW_META) { 1488 if (rq->cmd_flags & REQ_META) {
1489 WARN_ON(!cfqq->meta_pending); 1489 WARN_ON(!cfqq->meta_pending);
1490 cfqq->meta_pending--; 1490 cfqq->meta_pending--;
1491 } 1491 }
@@ -3177,7 +3177,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
3177 * So both queues are sync. Let the new request get disk time if 3177 * So both queues are sync. Let the new request get disk time if
3178 * it's a metadata request and the current queue is doing regular IO. 3178 * it's a metadata request and the current queue is doing regular IO.
3179 */ 3179 */
3180 if ((rq->cmd_flags & REQ_RW_META) && !cfqq->meta_pending) 3180 if ((rq->cmd_flags & REQ_META) && !cfqq->meta_pending)
3181 return true; 3181 return true;
3182 3182
3183 /* 3183 /*
@@ -3231,7 +3231,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3231 struct cfq_io_context *cic = RQ_CIC(rq); 3231 struct cfq_io_context *cic = RQ_CIC(rq);
3232 3232
3233 cfqd->rq_queued++; 3233 cfqd->rq_queued++;
3234 if (rq->cmd_flags & REQ_RW_META) 3234 if (rq->cmd_flags & REQ_META)
3235 cfqq->meta_pending++; 3235 cfqq->meta_pending++;
3236 3236
3237 cfq_update_io_thinktime(cfqd, cic); 3237 cfq_update_io_thinktime(cfqd, cic);
diff --git a/block/elevator.c b/block/elevator.c
index aa99b59c03d6..816a7c8d6394 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -79,8 +79,7 @@ int elv_rq_merge_ok(struct request *rq, struct bio *bio)
79 /* 79 /*
80 * Don't merge file system requests and discard requests 80 * Don't merge file system requests and discard requests
81 */ 81 */
82 if (bio_rw_flagged(bio, BIO_RW_DISCARD) != 82 if ((bio->bi_rw & REQ_DISCARD) != (rq->bio->bi_rw & REQ_DISCARD))
83 bio_rw_flagged(rq->bio, BIO_RW_DISCARD))
84 return 0; 83 return 0;
85 84
86 /* 85 /*