aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/as-iosched.c2
-rw-r--r--block/elevator.c26
-rw-r--r--block/ll_rw_blk.c101
-rw-r--r--block/scsi_ioctl.c6
4 files changed, 52 insertions, 83 deletions
diff --git a/block/as-iosched.c b/block/as-iosched.c
index 5da56d48fbd3..ad1cc4077819 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -1335,7 +1335,7 @@ static void as_add_request(request_queue_t *q, struct request *rq)
1335 arq->state = AS_RQ_NEW; 1335 arq->state = AS_RQ_NEW;
1336 1336
1337 if (rq_data_dir(arq->request) == READ 1337 if (rq_data_dir(arq->request) == READ
1338 || (arq->request->flags & REQ_RW_SYNC)) 1338 || (arq->request->cmd_flags & REQ_RW_SYNC))
1339 arq->is_sync = 1; 1339 arq->is_sync = 1;
1340 else 1340 else
1341 arq->is_sync = 0; 1341 arq->is_sync = 0;
diff --git a/block/elevator.c b/block/elevator.c
index 9b72dc7c8a5c..4ac97b642042 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -242,7 +242,7 @@ void elv_dispatch_sort(request_queue_t *q, struct request *rq)
242 list_for_each_prev(entry, &q->queue_head) { 242 list_for_each_prev(entry, &q->queue_head) {
243 struct request *pos = list_entry_rq(entry); 243 struct request *pos = list_entry_rq(entry);
244 244
245 if (pos->flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED)) 245 if (pos->cmd_flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED))
246 break; 246 break;
247 if (rq->sector >= boundary) { 247 if (rq->sector >= boundary) {
248 if (pos->sector < boundary) 248 if (pos->sector < boundary)
@@ -313,7 +313,7 @@ void elv_requeue_request(request_queue_t *q, struct request *rq)
313 e->ops->elevator_deactivate_req_fn(q, rq); 313 e->ops->elevator_deactivate_req_fn(q, rq);
314 } 314 }
315 315
316 rq->flags &= ~REQ_STARTED; 316 rq->cmd_flags &= ~REQ_STARTED;
317 317
318 elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE); 318 elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
319} 319}
@@ -344,13 +344,13 @@ void elv_insert(request_queue_t *q, struct request *rq, int where)
344 344
345 switch (where) { 345 switch (where) {
346 case ELEVATOR_INSERT_FRONT: 346 case ELEVATOR_INSERT_FRONT:
347 rq->flags |= REQ_SOFTBARRIER; 347 rq->cmd_flags |= REQ_SOFTBARRIER;
348 348
349 list_add(&rq->queuelist, &q->queue_head); 349 list_add(&rq->queuelist, &q->queue_head);
350 break; 350 break;
351 351
352 case ELEVATOR_INSERT_BACK: 352 case ELEVATOR_INSERT_BACK:
353 rq->flags |= REQ_SOFTBARRIER; 353 rq->cmd_flags |= REQ_SOFTBARRIER;
354 elv_drain_elevator(q); 354 elv_drain_elevator(q);
355 list_add_tail(&rq->queuelist, &q->queue_head); 355 list_add_tail(&rq->queuelist, &q->queue_head);
356 /* 356 /*
@@ -369,7 +369,7 @@ void elv_insert(request_queue_t *q, struct request *rq, int where)
369 369
370 case ELEVATOR_INSERT_SORT: 370 case ELEVATOR_INSERT_SORT:
371 BUG_ON(!blk_fs_request(rq)); 371 BUG_ON(!blk_fs_request(rq));
372 rq->flags |= REQ_SORTED; 372 rq->cmd_flags |= REQ_SORTED;
373 q->nr_sorted++; 373 q->nr_sorted++;
374 if (q->last_merge == NULL && rq_mergeable(rq)) 374 if (q->last_merge == NULL && rq_mergeable(rq))
375 q->last_merge = rq; 375 q->last_merge = rq;
@@ -387,7 +387,7 @@ void elv_insert(request_queue_t *q, struct request *rq, int where)
387 * insertion; otherwise, requests should be requeued 387 * insertion; otherwise, requests should be requeued
388 * in ordseq order. 388 * in ordseq order.
389 */ 389 */
390 rq->flags |= REQ_SOFTBARRIER; 390 rq->cmd_flags |= REQ_SOFTBARRIER;
391 391
392 if (q->ordseq == 0) { 392 if (q->ordseq == 0) {
393 list_add(&rq->queuelist, &q->queue_head); 393 list_add(&rq->queuelist, &q->queue_head);
@@ -429,9 +429,9 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where,
429 int plug) 429 int plug)
430{ 430{
431 if (q->ordcolor) 431 if (q->ordcolor)
432 rq->flags |= REQ_ORDERED_COLOR; 432 rq->cmd_flags |= REQ_ORDERED_COLOR;
433 433
434 if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) { 434 if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
435 /* 435 /*
436 * toggle ordered color 436 * toggle ordered color
437 */ 437 */
@@ -452,7 +452,7 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where,
452 q->end_sector = rq_end_sector(rq); 452 q->end_sector = rq_end_sector(rq);
453 q->boundary_rq = rq; 453 q->boundary_rq = rq;
454 } 454 }
455 } else if (!(rq->flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT) 455 } else if (!(rq->cmd_flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT)
456 where = ELEVATOR_INSERT_BACK; 456 where = ELEVATOR_INSERT_BACK;
457 457
458 if (plug) 458 if (plug)
@@ -493,7 +493,7 @@ struct request *elv_next_request(request_queue_t *q)
493 int ret; 493 int ret;
494 494
495 while ((rq = __elv_next_request(q)) != NULL) { 495 while ((rq = __elv_next_request(q)) != NULL) {
496 if (!(rq->flags & REQ_STARTED)) { 496 if (!(rq->cmd_flags & REQ_STARTED)) {
497 elevator_t *e = q->elevator; 497 elevator_t *e = q->elevator;
498 498
499 /* 499 /*
@@ -510,7 +510,7 @@ struct request *elv_next_request(request_queue_t *q)
510 * it, a request that has been delayed should 510 * it, a request that has been delayed should
511 * not be passed by new incoming requests 511 * not be passed by new incoming requests
512 */ 512 */
513 rq->flags |= REQ_STARTED; 513 rq->cmd_flags |= REQ_STARTED;
514 blk_add_trace_rq(q, rq, BLK_TA_ISSUE); 514 blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
515 } 515 }
516 516
@@ -519,7 +519,7 @@ struct request *elv_next_request(request_queue_t *q)
519 q->boundary_rq = NULL; 519 q->boundary_rq = NULL;
520 } 520 }
521 521
522 if ((rq->flags & REQ_DONTPREP) || !q->prep_rq_fn) 522 if ((rq->cmd_flags & REQ_DONTPREP) || !q->prep_rq_fn)
523 break; 523 break;
524 524
525 ret = q->prep_rq_fn(q, rq); 525 ret = q->prep_rq_fn(q, rq);
@@ -541,7 +541,7 @@ struct request *elv_next_request(request_queue_t *q)
541 nr_bytes = rq->data_len; 541 nr_bytes = rq->data_len;
542 542
543 blkdev_dequeue_request(rq); 543 blkdev_dequeue_request(rq);
544 rq->flags |= REQ_QUIET; 544 rq->cmd_flags |= REQ_QUIET;
545 end_that_request_chunk(rq, 0, nr_bytes); 545 end_that_request_chunk(rq, 0, nr_bytes);
546 end_that_request_last(rq, 0); 546 end_that_request_last(rq, 0);
547 } else { 547 } else {
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 51dc0edf76e0..9b91bb70c5ed 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -382,8 +382,8 @@ unsigned blk_ordered_req_seq(struct request *rq)
382 if (rq == &q->post_flush_rq) 382 if (rq == &q->post_flush_rq)
383 return QUEUE_ORDSEQ_POSTFLUSH; 383 return QUEUE_ORDSEQ_POSTFLUSH;
384 384
385 if ((rq->flags & REQ_ORDERED_COLOR) == 385 if ((rq->cmd_flags & REQ_ORDERED_COLOR) ==
386 (q->orig_bar_rq->flags & REQ_ORDERED_COLOR)) 386 (q->orig_bar_rq->cmd_flags & REQ_ORDERED_COLOR))
387 return QUEUE_ORDSEQ_DRAIN; 387 return QUEUE_ORDSEQ_DRAIN;
388 else 388 else
389 return QUEUE_ORDSEQ_DONE; 389 return QUEUE_ORDSEQ_DONE;
@@ -446,8 +446,8 @@ static void queue_flush(request_queue_t *q, unsigned which)
446 end_io = post_flush_end_io; 446 end_io = post_flush_end_io;
447 } 447 }
448 448
449 rq->cmd_flags = REQ_HARDBARRIER;
449 rq_init(q, rq); 450 rq_init(q, rq);
450 rq->flags = REQ_HARDBARRIER;
451 rq->elevator_private = NULL; 451 rq->elevator_private = NULL;
452 rq->rq_disk = q->bar_rq.rq_disk; 452 rq->rq_disk = q->bar_rq.rq_disk;
453 rq->rl = NULL; 453 rq->rl = NULL;
@@ -471,9 +471,11 @@ static inline struct request *start_ordered(request_queue_t *q,
471 blkdev_dequeue_request(rq); 471 blkdev_dequeue_request(rq);
472 q->orig_bar_rq = rq; 472 q->orig_bar_rq = rq;
473 rq = &q->bar_rq; 473 rq = &q->bar_rq;
474 rq->cmd_flags = 0;
474 rq_init(q, rq); 475 rq_init(q, rq);
475 rq->flags = bio_data_dir(q->orig_bar_rq->bio); 476 if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
476 rq->flags |= q->ordered & QUEUE_ORDERED_FUA ? REQ_FUA : 0; 477 rq->cmd_flags |= REQ_RW;
478 rq->cmd_flags |= q->ordered & QUEUE_ORDERED_FUA ? REQ_FUA : 0;
477 rq->elevator_private = NULL; 479 rq->elevator_private = NULL;
478 rq->rl = NULL; 480 rq->rl = NULL;
479 init_request_from_bio(rq, q->orig_bar_rq->bio); 481 init_request_from_bio(rq, q->orig_bar_rq->bio);
@@ -1124,7 +1126,7 @@ void blk_queue_end_tag(request_queue_t *q, struct request *rq)
1124 } 1126 }
1125 1127
1126 list_del_init(&rq->queuelist); 1128 list_del_init(&rq->queuelist);
1127 rq->flags &= ~REQ_QUEUED; 1129 rq->cmd_flags &= ~REQ_QUEUED;
1128 rq->tag = -1; 1130 rq->tag = -1;
1129 1131
1130 if (unlikely(bqt->tag_index[tag] == NULL)) 1132 if (unlikely(bqt->tag_index[tag] == NULL))
@@ -1160,7 +1162,7 @@ int blk_queue_start_tag(request_queue_t *q, struct request *rq)
1160 struct blk_queue_tag *bqt = q->queue_tags; 1162 struct blk_queue_tag *bqt = q->queue_tags;
1161 int tag; 1163 int tag;
1162 1164
1163 if (unlikely((rq->flags & REQ_QUEUED))) { 1165 if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
1164 printk(KERN_ERR 1166 printk(KERN_ERR
1165 "%s: request %p for device [%s] already tagged %d", 1167 "%s: request %p for device [%s] already tagged %d",
1166 __FUNCTION__, rq, 1168 __FUNCTION__, rq,
@@ -1174,7 +1176,7 @@ int blk_queue_start_tag(request_queue_t *q, struct request *rq)
1174 1176
1175 __set_bit(tag, bqt->tag_map); 1177 __set_bit(tag, bqt->tag_map);
1176 1178
1177 rq->flags |= REQ_QUEUED; 1179 rq->cmd_flags |= REQ_QUEUED;
1178 rq->tag = tag; 1180 rq->tag = tag;
1179 bqt->tag_index[tag] = rq; 1181 bqt->tag_index[tag] = rq;
1180 blkdev_dequeue_request(rq); 1182 blkdev_dequeue_request(rq);
@@ -1210,65 +1212,31 @@ void blk_queue_invalidate_tags(request_queue_t *q)
1210 printk(KERN_ERR 1212 printk(KERN_ERR
1211 "%s: bad tag found on list\n", __FUNCTION__); 1213 "%s: bad tag found on list\n", __FUNCTION__);
1212 list_del_init(&rq->queuelist); 1214 list_del_init(&rq->queuelist);
1213 rq->flags &= ~REQ_QUEUED; 1215 rq->cmd_flags &= ~REQ_QUEUED;
1214 } else 1216 } else
1215 blk_queue_end_tag(q, rq); 1217 blk_queue_end_tag(q, rq);
1216 1218
1217 rq->flags &= ~REQ_STARTED; 1219 rq->cmd_flags &= ~REQ_STARTED;
1218 __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0); 1220 __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0);
1219 } 1221 }
1220} 1222}
1221 1223
1222EXPORT_SYMBOL(blk_queue_invalidate_tags); 1224EXPORT_SYMBOL(blk_queue_invalidate_tags);
1223 1225
1224static const char * const rq_flags[] = {
1225 "REQ_RW",
1226 "REQ_FAILFAST",
1227 "REQ_SORTED",
1228 "REQ_SOFTBARRIER",
1229 "REQ_HARDBARRIER",
1230 "REQ_FUA",
1231 "REQ_CMD",
1232 "REQ_NOMERGE",
1233 "REQ_STARTED",
1234 "REQ_DONTPREP",
1235 "REQ_QUEUED",
1236 "REQ_ELVPRIV",
1237 "REQ_PC",
1238 "REQ_BLOCK_PC",
1239 "REQ_SENSE",
1240 "REQ_FAILED",
1241 "REQ_QUIET",
1242 "REQ_SPECIAL",
1243 "REQ_DRIVE_CMD",
1244 "REQ_DRIVE_TASK",
1245 "REQ_DRIVE_TASKFILE",
1246 "REQ_PREEMPT",
1247 "REQ_PM_SUSPEND",
1248 "REQ_PM_RESUME",
1249 "REQ_PM_SHUTDOWN",
1250 "REQ_ORDERED_COLOR",
1251};
1252
1253void blk_dump_rq_flags(struct request *rq, char *msg) 1226void blk_dump_rq_flags(struct request *rq, char *msg)
1254{ 1227{
1255 int bit; 1228 int bit;
1256 1229
1257 printk("%s: dev %s: flags = ", msg, 1230 printk("%s: dev %s: type=%x, flags=%x\n", msg,
1258 rq->rq_disk ? rq->rq_disk->disk_name : "?"); 1231 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
1259 bit = 0; 1232 rq->cmd_flags);
1260 do {
1261 if (rq->flags & (1 << bit))
1262 printk("%s ", rq_flags[bit]);
1263 bit++;
1264 } while (bit < __REQ_NR_BITS);
1265 1233
1266 printk("\nsector %llu, nr/cnr %lu/%u\n", (unsigned long long)rq->sector, 1234 printk("\nsector %llu, nr/cnr %lu/%u\n", (unsigned long long)rq->sector,
1267 rq->nr_sectors, 1235 rq->nr_sectors,
1268 rq->current_nr_sectors); 1236 rq->current_nr_sectors);
1269 printk("bio %p, biotail %p, buffer %p, data %p, len %u\n", rq->bio, rq->biotail, rq->buffer, rq->data, rq->data_len); 1237 printk("bio %p, biotail %p, buffer %p, data %p, len %u\n", rq->bio, rq->biotail, rq->buffer, rq->data, rq->data_len);
1270 1238
1271 if (rq->flags & (REQ_BLOCK_PC | REQ_PC)) { 1239 if (blk_pc_request(rq)) {
1272 printk("cdb: "); 1240 printk("cdb: ");
1273 for (bit = 0; bit < sizeof(rq->cmd); bit++) 1241 for (bit = 0; bit < sizeof(rq->cmd); bit++)
1274 printk("%02x ", rq->cmd[bit]); 1242 printk("%02x ", rq->cmd[bit]);
@@ -1441,7 +1409,7 @@ static inline int ll_new_mergeable(request_queue_t *q,
1441 int nr_phys_segs = bio_phys_segments(q, bio); 1409 int nr_phys_segs = bio_phys_segments(q, bio);
1442 1410
1443 if (req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) { 1411 if (req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
1444 req->flags |= REQ_NOMERGE; 1412 req->cmd_flags |= REQ_NOMERGE;
1445 if (req == q->last_merge) 1413 if (req == q->last_merge)
1446 q->last_merge = NULL; 1414 q->last_merge = NULL;
1447 return 0; 1415 return 0;
@@ -1464,7 +1432,7 @@ static inline int ll_new_hw_segment(request_queue_t *q,
1464 1432
1465 if (req->nr_hw_segments + nr_hw_segs > q->max_hw_segments 1433 if (req->nr_hw_segments + nr_hw_segs > q->max_hw_segments
1466 || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) { 1434 || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
1467 req->flags |= REQ_NOMERGE; 1435 req->cmd_flags |= REQ_NOMERGE;
1468 if (req == q->last_merge) 1436 if (req == q->last_merge)
1469 q->last_merge = NULL; 1437 q->last_merge = NULL;
1470 return 0; 1438 return 0;
@@ -1491,7 +1459,7 @@ static int ll_back_merge_fn(request_queue_t *q, struct request *req,
1491 max_sectors = q->max_sectors; 1459 max_sectors = q->max_sectors;
1492 1460
1493 if (req->nr_sectors + bio_sectors(bio) > max_sectors) { 1461 if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
1494 req->flags |= REQ_NOMERGE; 1462 req->cmd_flags |= REQ_NOMERGE;
1495 if (req == q->last_merge) 1463 if (req == q->last_merge)
1496 q->last_merge = NULL; 1464 q->last_merge = NULL;
1497 return 0; 1465 return 0;
@@ -1530,7 +1498,7 @@ static int ll_front_merge_fn(request_queue_t *q, struct request *req,
1530 1498
1531 1499
1532 if (req->nr_sectors + bio_sectors(bio) > max_sectors) { 1500 if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
1533 req->flags |= REQ_NOMERGE; 1501 req->cmd_flags |= REQ_NOMERGE;
1534 if (req == q->last_merge) 1502 if (req == q->last_merge)
1535 q->last_merge = NULL; 1503 q->last_merge = NULL;
1536 return 0; 1504 return 0;
@@ -2029,7 +1997,7 @@ EXPORT_SYMBOL(blk_get_queue);
2029 1997
2030static inline void blk_free_request(request_queue_t *q, struct request *rq) 1998static inline void blk_free_request(request_queue_t *q, struct request *rq)
2031{ 1999{
2032 if (rq->flags & REQ_ELVPRIV) 2000 if (rq->cmd_flags & REQ_ELVPRIV)
2033 elv_put_request(q, rq); 2001 elv_put_request(q, rq);
2034 mempool_free(rq, q->rq.rq_pool); 2002 mempool_free(rq, q->rq.rq_pool);
2035} 2003}
@@ -2044,17 +2012,17 @@ blk_alloc_request(request_queue_t *q, int rw, struct bio *bio,
2044 return NULL; 2012 return NULL;
2045 2013
2046 /* 2014 /*
2047 * first three bits are identical in rq->flags and bio->bi_rw, 2015 * first three bits are identical in rq->cmd_flags and bio->bi_rw,
2048 * see bio.h and blkdev.h 2016 * see bio.h and blkdev.h
2049 */ 2017 */
2050 rq->flags = rw; 2018 rq->cmd_flags = rw;
2051 2019
2052 if (priv) { 2020 if (priv) {
2053 if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) { 2021 if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) {
2054 mempool_free(rq, q->rq.rq_pool); 2022 mempool_free(rq, q->rq.rq_pool);
2055 return NULL; 2023 return NULL;
2056 } 2024 }
2057 rq->flags |= REQ_ELVPRIV; 2025 rq->cmd_flags |= REQ_ELVPRIV;
2058 } 2026 }
2059 2027
2060 return rq; 2028 return rq;
@@ -2351,7 +2319,8 @@ void blk_insert_request(request_queue_t *q, struct request *rq,
2351 * must not attempt merges on this) and that it acts as a soft 2319 * must not attempt merges on this) and that it acts as a soft
2352 * barrier 2320 * barrier
2353 */ 2321 */
2354 rq->flags |= REQ_SPECIAL | REQ_SOFTBARRIER; 2322 rq->cmd_type = REQ_TYPE_SPECIAL;
2323 rq->cmd_flags |= REQ_SOFTBARRIER;
2355 2324
2356 rq->special = data; 2325 rq->special = data;
2357 2326
@@ -2558,7 +2527,7 @@ void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk,
2558 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; 2527 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
2559 2528
2560 rq->rq_disk = bd_disk; 2529 rq->rq_disk = bd_disk;
2561 rq->flags |= REQ_NOMERGE; 2530 rq->cmd_flags |= REQ_NOMERGE;
2562 rq->end_io = done; 2531 rq->end_io = done;
2563 WARN_ON(irqs_disabled()); 2532 WARN_ON(irqs_disabled());
2564 spin_lock_irq(q->queue_lock); 2533 spin_lock_irq(q->queue_lock);
@@ -2728,7 +2697,7 @@ void __blk_put_request(request_queue_t *q, struct request *req)
2728 */ 2697 */
2729 if (rl) { 2698 if (rl) {
2730 int rw = rq_data_dir(req); 2699 int rw = rq_data_dir(req);
2731 int priv = req->flags & REQ_ELVPRIV; 2700 int priv = req->cmd_flags & REQ_ELVPRIV;
2732 2701
2733 BUG_ON(!list_empty(&req->queuelist)); 2702 BUG_ON(!list_empty(&req->queuelist));
2734 2703
@@ -2890,22 +2859,22 @@ static inline int attempt_front_merge(request_queue_t *q, struct request *rq)
2890 2859
2891static void init_request_from_bio(struct request *req, struct bio *bio) 2860static void init_request_from_bio(struct request *req, struct bio *bio)
2892{ 2861{
2893 req->flags |= REQ_CMD; 2862 req->cmd_type = REQ_TYPE_FS;
2894 2863
2895 /* 2864 /*
2896 * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST) 2865 * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
2897 */ 2866 */
2898 if (bio_rw_ahead(bio) || bio_failfast(bio)) 2867 if (bio_rw_ahead(bio) || bio_failfast(bio))
2899 req->flags |= REQ_FAILFAST; 2868 req->cmd_flags |= REQ_FAILFAST;
2900 2869
2901 /* 2870 /*
2902 * REQ_BARRIER implies no merging, but lets make it explicit 2871 * REQ_BARRIER implies no merging, but lets make it explicit
2903 */ 2872 */
2904 if (unlikely(bio_barrier(bio))) 2873 if (unlikely(bio_barrier(bio)))
2905 req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE); 2874 req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
2906 2875
2907 if (bio_sync(bio)) 2876 if (bio_sync(bio))
2908 req->flags |= REQ_RW_SYNC; 2877 req->cmd_flags |= REQ_RW_SYNC;
2909 2878
2910 req->errors = 0; 2879 req->errors = 0;
2911 req->hard_sector = req->sector = bio->bi_sector; 2880 req->hard_sector = req->sector = bio->bi_sector;
@@ -3306,7 +3275,7 @@ static int __end_that_request_first(struct request *req, int uptodate,
3306 req->errors = 0; 3275 req->errors = 0;
3307 3276
3308 if (!uptodate) { 3277 if (!uptodate) {
3309 if (blk_fs_request(req) && !(req->flags & REQ_QUIET)) 3278 if (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))
3310 printk("end_request: I/O error, dev %s, sector %llu\n", 3279 printk("end_request: I/O error, dev %s, sector %llu\n",
3311 req->rq_disk ? req->rq_disk->disk_name : "?", 3280 req->rq_disk ? req->rq_disk->disk_name : "?",
3312 (unsigned long long)req->sector); 3281 (unsigned long long)req->sector);
@@ -3569,8 +3538,8 @@ EXPORT_SYMBOL(end_request);
3569 3538
3570void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio) 3539void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio)
3571{ 3540{
3572 /* first two bits are identical in rq->flags and bio->bi_rw */ 3541 /* first two bits are identical in rq->cmd_flags and bio->bi_rw */
3573 rq->flags |= (bio->bi_rw & 3); 3542 rq->cmd_flags |= (bio->bi_rw & 3);
3574 3543
3575 rq->nr_phys_segments = bio_phys_segments(q, bio); 3544 rq->nr_phys_segments = bio_phys_segments(q, bio);
3576 rq->nr_hw_segments = bio_hw_segments(q, bio); 3545 rq->nr_hw_segments = bio_hw_segments(q, bio);
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index b33eda26e205..2dc326421a24 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -294,7 +294,7 @@ static int sg_io(struct file *file, request_queue_t *q,
294 rq->sense = sense; 294 rq->sense = sense;
295 rq->sense_len = 0; 295 rq->sense_len = 0;
296 296
297 rq->flags |= REQ_BLOCK_PC; 297 rq->cmd_type = REQ_TYPE_BLOCK_PC;
298 bio = rq->bio; 298 bio = rq->bio;
299 299
300 /* 300 /*
@@ -470,7 +470,7 @@ int sg_scsi_ioctl(struct file *file, struct request_queue *q,
470 memset(sense, 0, sizeof(sense)); 470 memset(sense, 0, sizeof(sense));
471 rq->sense = sense; 471 rq->sense = sense;
472 rq->sense_len = 0; 472 rq->sense_len = 0;
473 rq->flags |= REQ_BLOCK_PC; 473 rq->cmd_type = REQ_TYPE_BLOCK_PC;
474 474
475 blk_execute_rq(q, disk, rq, 0); 475 blk_execute_rq(q, disk, rq, 0);
476 476
@@ -502,7 +502,7 @@ static int __blk_send_generic(request_queue_t *q, struct gendisk *bd_disk, int c
502 int err; 502 int err;
503 503
504 rq = blk_get_request(q, WRITE, __GFP_WAIT); 504 rq = blk_get_request(q, WRITE, __GFP_WAIT);
505 rq->flags |= REQ_BLOCK_PC; 505 rq->cmd_type = REQ_TYPE_BLOCK_PC;
506 rq->data = NULL; 506 rq->data = NULL;
507 rq->data_len = 0; 507 rq->data_len = 0;
508 rq->timeout = BLK_DEFAULT_TIMEOUT; 508 rq->timeout = BLK_DEFAULT_TIMEOUT;