diff options
author | Jens Axboe <axboe@suse.de> | 2006-08-10 02:44:47 -0400 |
---|---|---|
committer | Jens Axboe <axboe@nelson.home.kernel.dk> | 2006-09-30 14:23:37 -0400 |
commit | 4aff5e2333c9a1609662f2091f55c3f6fffdad36 (patch) | |
tree | b73d8c2b7c1bdc03d3313c108da7dfc95ee95525 | |
parent | 77ed74da26f50fa28471571ee7a2251b77526d84 (diff) |
[PATCH] Split struct request ->flags into two parts
Right now ->flags is a bit of a mess: some are request types, and
others are just modifiers. Clean this up by splitting it into
->cmd_type and ->cmd_flags. This allows introduction of generic
Linux block message types, useful for sending generic Linux commands
to block devices.
Signed-off-by: Jens Axboe <axboe@suse.de>
39 files changed, 295 insertions, 301 deletions
diff --git a/block/as-iosched.c b/block/as-iosched.c index 5da56d48fbd3..ad1cc4077819 100644 --- a/block/as-iosched.c +++ b/block/as-iosched.c | |||
@@ -1335,7 +1335,7 @@ static void as_add_request(request_queue_t *q, struct request *rq) | |||
1335 | arq->state = AS_RQ_NEW; | 1335 | arq->state = AS_RQ_NEW; |
1336 | 1336 | ||
1337 | if (rq_data_dir(arq->request) == READ | 1337 | if (rq_data_dir(arq->request) == READ |
1338 | || (arq->request->flags & REQ_RW_SYNC)) | 1338 | || (arq->request->cmd_flags & REQ_RW_SYNC)) |
1339 | arq->is_sync = 1; | 1339 | arq->is_sync = 1; |
1340 | else | 1340 | else |
1341 | arq->is_sync = 0; | 1341 | arq->is_sync = 0; |
diff --git a/block/elevator.c b/block/elevator.c index 9b72dc7c8a5c..4ac97b642042 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -242,7 +242,7 @@ void elv_dispatch_sort(request_queue_t *q, struct request *rq) | |||
242 | list_for_each_prev(entry, &q->queue_head) { | 242 | list_for_each_prev(entry, &q->queue_head) { |
243 | struct request *pos = list_entry_rq(entry); | 243 | struct request *pos = list_entry_rq(entry); |
244 | 244 | ||
245 | if (pos->flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED)) | 245 | if (pos->cmd_flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED)) |
246 | break; | 246 | break; |
247 | if (rq->sector >= boundary) { | 247 | if (rq->sector >= boundary) { |
248 | if (pos->sector < boundary) | 248 | if (pos->sector < boundary) |
@@ -313,7 +313,7 @@ void elv_requeue_request(request_queue_t *q, struct request *rq) | |||
313 | e->ops->elevator_deactivate_req_fn(q, rq); | 313 | e->ops->elevator_deactivate_req_fn(q, rq); |
314 | } | 314 | } |
315 | 315 | ||
316 | rq->flags &= ~REQ_STARTED; | 316 | rq->cmd_flags &= ~REQ_STARTED; |
317 | 317 | ||
318 | elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE); | 318 | elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE); |
319 | } | 319 | } |
@@ -344,13 +344,13 @@ void elv_insert(request_queue_t *q, struct request *rq, int where) | |||
344 | 344 | ||
345 | switch (where) { | 345 | switch (where) { |
346 | case ELEVATOR_INSERT_FRONT: | 346 | case ELEVATOR_INSERT_FRONT: |
347 | rq->flags |= REQ_SOFTBARRIER; | 347 | rq->cmd_flags |= REQ_SOFTBARRIER; |
348 | 348 | ||
349 | list_add(&rq->queuelist, &q->queue_head); | 349 | list_add(&rq->queuelist, &q->queue_head); |
350 | break; | 350 | break; |
351 | 351 | ||
352 | case ELEVATOR_INSERT_BACK: | 352 | case ELEVATOR_INSERT_BACK: |
353 | rq->flags |= REQ_SOFTBARRIER; | 353 | rq->cmd_flags |= REQ_SOFTBARRIER; |
354 | elv_drain_elevator(q); | 354 | elv_drain_elevator(q); |
355 | list_add_tail(&rq->queuelist, &q->queue_head); | 355 | list_add_tail(&rq->queuelist, &q->queue_head); |
356 | /* | 356 | /* |
@@ -369,7 +369,7 @@ void elv_insert(request_queue_t *q, struct request *rq, int where) | |||
369 | 369 | ||
370 | case ELEVATOR_INSERT_SORT: | 370 | case ELEVATOR_INSERT_SORT: |
371 | BUG_ON(!blk_fs_request(rq)); | 371 | BUG_ON(!blk_fs_request(rq)); |
372 | rq->flags |= REQ_SORTED; | 372 | rq->cmd_flags |= REQ_SORTED; |
373 | q->nr_sorted++; | 373 | q->nr_sorted++; |
374 | if (q->last_merge == NULL && rq_mergeable(rq)) | 374 | if (q->last_merge == NULL && rq_mergeable(rq)) |
375 | q->last_merge = rq; | 375 | q->last_merge = rq; |
@@ -387,7 +387,7 @@ void elv_insert(request_queue_t *q, struct request *rq, int where) | |||
387 | * insertion; otherwise, requests should be requeued | 387 | * insertion; otherwise, requests should be requeued |
388 | * in ordseq order. | 388 | * in ordseq order. |
389 | */ | 389 | */ |
390 | rq->flags |= REQ_SOFTBARRIER; | 390 | rq->cmd_flags |= REQ_SOFTBARRIER; |
391 | 391 | ||
392 | if (q->ordseq == 0) { | 392 | if (q->ordseq == 0) { |
393 | list_add(&rq->queuelist, &q->queue_head); | 393 | list_add(&rq->queuelist, &q->queue_head); |
@@ -429,9 +429,9 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where, | |||
429 | int plug) | 429 | int plug) |
430 | { | 430 | { |
431 | if (q->ordcolor) | 431 | if (q->ordcolor) |
432 | rq->flags |= REQ_ORDERED_COLOR; | 432 | rq->cmd_flags |= REQ_ORDERED_COLOR; |
433 | 433 | ||
434 | if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) { | 434 | if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) { |
435 | /* | 435 | /* |
436 | * toggle ordered color | 436 | * toggle ordered color |
437 | */ | 437 | */ |
@@ -452,7 +452,7 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where, | |||
452 | q->end_sector = rq_end_sector(rq); | 452 | q->end_sector = rq_end_sector(rq); |
453 | q->boundary_rq = rq; | 453 | q->boundary_rq = rq; |
454 | } | 454 | } |
455 | } else if (!(rq->flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT) | 455 | } else if (!(rq->cmd_flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT) |
456 | where = ELEVATOR_INSERT_BACK; | 456 | where = ELEVATOR_INSERT_BACK; |
457 | 457 | ||
458 | if (plug) | 458 | if (plug) |
@@ -493,7 +493,7 @@ struct request *elv_next_request(request_queue_t *q) | |||
493 | int ret; | 493 | int ret; |
494 | 494 | ||
495 | while ((rq = __elv_next_request(q)) != NULL) { | 495 | while ((rq = __elv_next_request(q)) != NULL) { |
496 | if (!(rq->flags & REQ_STARTED)) { | 496 | if (!(rq->cmd_flags & REQ_STARTED)) { |
497 | elevator_t *e = q->elevator; | 497 | elevator_t *e = q->elevator; |
498 | 498 | ||
499 | /* | 499 | /* |
@@ -510,7 +510,7 @@ struct request *elv_next_request(request_queue_t *q) | |||
510 | * it, a request that has been delayed should | 510 | * it, a request that has been delayed should |
511 | * not be passed by new incoming requests | 511 | * not be passed by new incoming requests |
512 | */ | 512 | */ |
513 | rq->flags |= REQ_STARTED; | 513 | rq->cmd_flags |= REQ_STARTED; |
514 | blk_add_trace_rq(q, rq, BLK_TA_ISSUE); | 514 | blk_add_trace_rq(q, rq, BLK_TA_ISSUE); |
515 | } | 515 | } |
516 | 516 | ||
@@ -519,7 +519,7 @@ struct request *elv_next_request(request_queue_t *q) | |||
519 | q->boundary_rq = NULL; | 519 | q->boundary_rq = NULL; |
520 | } | 520 | } |
521 | 521 | ||
522 | if ((rq->flags & REQ_DONTPREP) || !q->prep_rq_fn) | 522 | if ((rq->cmd_flags & REQ_DONTPREP) || !q->prep_rq_fn) |
523 | break; | 523 | break; |
524 | 524 | ||
525 | ret = q->prep_rq_fn(q, rq); | 525 | ret = q->prep_rq_fn(q, rq); |
@@ -541,7 +541,7 @@ struct request *elv_next_request(request_queue_t *q) | |||
541 | nr_bytes = rq->data_len; | 541 | nr_bytes = rq->data_len; |
542 | 542 | ||
543 | blkdev_dequeue_request(rq); | 543 | blkdev_dequeue_request(rq); |
544 | rq->flags |= REQ_QUIET; | 544 | rq->cmd_flags |= REQ_QUIET; |
545 | end_that_request_chunk(rq, 0, nr_bytes); | 545 | end_that_request_chunk(rq, 0, nr_bytes); |
546 | end_that_request_last(rq, 0); | 546 | end_that_request_last(rq, 0); |
547 | } else { | 547 | } else { |
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 51dc0edf76e0..9b91bb70c5ed 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c | |||
@@ -382,8 +382,8 @@ unsigned blk_ordered_req_seq(struct request *rq) | |||
382 | if (rq == &q->post_flush_rq) | 382 | if (rq == &q->post_flush_rq) |
383 | return QUEUE_ORDSEQ_POSTFLUSH; | 383 | return QUEUE_ORDSEQ_POSTFLUSH; |
384 | 384 | ||
385 | if ((rq->flags & REQ_ORDERED_COLOR) == | 385 | if ((rq->cmd_flags & REQ_ORDERED_COLOR) == |
386 | (q->orig_bar_rq->flags & REQ_ORDERED_COLOR)) | 386 | (q->orig_bar_rq->cmd_flags & REQ_ORDERED_COLOR)) |
387 | return QUEUE_ORDSEQ_DRAIN; | 387 | return QUEUE_ORDSEQ_DRAIN; |
388 | else | 388 | else |
389 | return QUEUE_ORDSEQ_DONE; | 389 | return QUEUE_ORDSEQ_DONE; |
@@ -446,8 +446,8 @@ static void queue_flush(request_queue_t *q, unsigned which) | |||
446 | end_io = post_flush_end_io; | 446 | end_io = post_flush_end_io; |
447 | } | 447 | } |
448 | 448 | ||
449 | rq->cmd_flags = REQ_HARDBARRIER; | ||
449 | rq_init(q, rq); | 450 | rq_init(q, rq); |
450 | rq->flags = REQ_HARDBARRIER; | ||
451 | rq->elevator_private = NULL; | 451 | rq->elevator_private = NULL; |
452 | rq->rq_disk = q->bar_rq.rq_disk; | 452 | rq->rq_disk = q->bar_rq.rq_disk; |
453 | rq->rl = NULL; | 453 | rq->rl = NULL; |
@@ -471,9 +471,11 @@ static inline struct request *start_ordered(request_queue_t *q, | |||
471 | blkdev_dequeue_request(rq); | 471 | blkdev_dequeue_request(rq); |
472 | q->orig_bar_rq = rq; | 472 | q->orig_bar_rq = rq; |
473 | rq = &q->bar_rq; | 473 | rq = &q->bar_rq; |
474 | rq->cmd_flags = 0; | ||
474 | rq_init(q, rq); | 475 | rq_init(q, rq); |
475 | rq->flags = bio_data_dir(q->orig_bar_rq->bio); | 476 | if (bio_data_dir(q->orig_bar_rq->bio) == WRITE) |
476 | rq->flags |= q->ordered & QUEUE_ORDERED_FUA ? REQ_FUA : 0; | 477 | rq->cmd_flags |= REQ_RW; |
478 | rq->cmd_flags |= q->ordered & QUEUE_ORDERED_FUA ? REQ_FUA : 0; | ||
477 | rq->elevator_private = NULL; | 479 | rq->elevator_private = NULL; |
478 | rq->rl = NULL; | 480 | rq->rl = NULL; |
479 | init_request_from_bio(rq, q->orig_bar_rq->bio); | 481 | init_request_from_bio(rq, q->orig_bar_rq->bio); |
@@ -1124,7 +1126,7 @@ void blk_queue_end_tag(request_queue_t *q, struct request *rq) | |||
1124 | } | 1126 | } |
1125 | 1127 | ||
1126 | list_del_init(&rq->queuelist); | 1128 | list_del_init(&rq->queuelist); |
1127 | rq->flags &= ~REQ_QUEUED; | 1129 | rq->cmd_flags &= ~REQ_QUEUED; |
1128 | rq->tag = -1; | 1130 | rq->tag = -1; |
1129 | 1131 | ||
1130 | if (unlikely(bqt->tag_index[tag] == NULL)) | 1132 | if (unlikely(bqt->tag_index[tag] == NULL)) |
@@ -1160,7 +1162,7 @@ int blk_queue_start_tag(request_queue_t *q, struct request *rq) | |||
1160 | struct blk_queue_tag *bqt = q->queue_tags; | 1162 | struct blk_queue_tag *bqt = q->queue_tags; |
1161 | int tag; | 1163 | int tag; |
1162 | 1164 | ||
1163 | if (unlikely((rq->flags & REQ_QUEUED))) { | 1165 | if (unlikely((rq->cmd_flags & REQ_QUEUED))) { |
1164 | printk(KERN_ERR | 1166 | printk(KERN_ERR |
1165 | "%s: request %p for device [%s] already tagged %d", | 1167 | "%s: request %p for device [%s] already tagged %d", |
1166 | __FUNCTION__, rq, | 1168 | __FUNCTION__, rq, |
@@ -1174,7 +1176,7 @@ int blk_queue_start_tag(request_queue_t *q, struct request *rq) | |||
1174 | 1176 | ||
1175 | __set_bit(tag, bqt->tag_map); | 1177 | __set_bit(tag, bqt->tag_map); |
1176 | 1178 | ||
1177 | rq->flags |= REQ_QUEUED; | 1179 | rq->cmd_flags |= REQ_QUEUED; |
1178 | rq->tag = tag; | 1180 | rq->tag = tag; |
1179 | bqt->tag_index[tag] = rq; | 1181 | bqt->tag_index[tag] = rq; |
1180 | blkdev_dequeue_request(rq); | 1182 | blkdev_dequeue_request(rq); |
@@ -1210,65 +1212,31 @@ void blk_queue_invalidate_tags(request_queue_t *q) | |||
1210 | printk(KERN_ERR | 1212 | printk(KERN_ERR |
1211 | "%s: bad tag found on list\n", __FUNCTION__); | 1213 | "%s: bad tag found on list\n", __FUNCTION__); |
1212 | list_del_init(&rq->queuelist); | 1214 | list_del_init(&rq->queuelist); |
1213 | rq->flags &= ~REQ_QUEUED; | 1215 | rq->cmd_flags &= ~REQ_QUEUED; |
1214 | } else | 1216 | } else |
1215 | blk_queue_end_tag(q, rq); | 1217 | blk_queue_end_tag(q, rq); |
1216 | 1218 | ||
1217 | rq->flags &= ~REQ_STARTED; | 1219 | rq->cmd_flags &= ~REQ_STARTED; |
1218 | __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0); | 1220 | __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0); |
1219 | } | 1221 | } |
1220 | } | 1222 | } |
1221 | 1223 | ||
1222 | EXPORT_SYMBOL(blk_queue_invalidate_tags); | 1224 | EXPORT_SYMBOL(blk_queue_invalidate_tags); |
1223 | 1225 | ||
1224 | static const char * const rq_flags[] = { | ||
1225 | "REQ_RW", | ||
1226 | "REQ_FAILFAST", | ||
1227 | "REQ_SORTED", | ||
1228 | "REQ_SOFTBARRIER", | ||
1229 | "REQ_HARDBARRIER", | ||
1230 | "REQ_FUA", | ||
1231 | "REQ_CMD", | ||
1232 | "REQ_NOMERGE", | ||
1233 | "REQ_STARTED", | ||
1234 | "REQ_DONTPREP", | ||
1235 | "REQ_QUEUED", | ||
1236 | "REQ_ELVPRIV", | ||
1237 | "REQ_PC", | ||
1238 | "REQ_BLOCK_PC", | ||
1239 | "REQ_SENSE", | ||
1240 | "REQ_FAILED", | ||
1241 | "REQ_QUIET", | ||
1242 | "REQ_SPECIAL", | ||
1243 | "REQ_DRIVE_CMD", | ||
1244 | "REQ_DRIVE_TASK", | ||
1245 | "REQ_DRIVE_TASKFILE", | ||
1246 | "REQ_PREEMPT", | ||
1247 | "REQ_PM_SUSPEND", | ||
1248 | "REQ_PM_RESUME", | ||
1249 | "REQ_PM_SHUTDOWN", | ||
1250 | "REQ_ORDERED_COLOR", | ||
1251 | }; | ||
1252 | |||
1253 | void blk_dump_rq_flags(struct request *rq, char *msg) | 1226 | void blk_dump_rq_flags(struct request *rq, char *msg) |
1254 | { | 1227 | { |
1255 | int bit; | 1228 | int bit; |
1256 | 1229 | ||
1257 | printk("%s: dev %s: flags = ", msg, | 1230 | printk("%s: dev %s: type=%x, flags=%x\n", msg, |
1258 | rq->rq_disk ? rq->rq_disk->disk_name : "?"); | 1231 | rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type, |
1259 | bit = 0; | 1232 | rq->cmd_flags); |
1260 | do { | ||
1261 | if (rq->flags & (1 << bit)) | ||
1262 | printk("%s ", rq_flags[bit]); | ||
1263 | bit++; | ||
1264 | } while (bit < __REQ_NR_BITS); | ||
1265 | 1233 | ||
1266 | printk("\nsector %llu, nr/cnr %lu/%u\n", (unsigned long long)rq->sector, | 1234 | printk("\nsector %llu, nr/cnr %lu/%u\n", (unsigned long long)rq->sector, |
1267 | rq->nr_sectors, | 1235 | rq->nr_sectors, |
1268 | rq->current_nr_sectors); | 1236 | rq->current_nr_sectors); |
1269 | printk("bio %p, biotail %p, buffer %p, data %p, len %u\n", rq->bio, rq->biotail, rq->buffer, rq->data, rq->data_len); | 1237 | printk("bio %p, biotail %p, buffer %p, data %p, len %u\n", rq->bio, rq->biotail, rq->buffer, rq->data, rq->data_len); |
1270 | 1238 | ||
1271 | if (rq->flags & (REQ_BLOCK_PC | REQ_PC)) { | 1239 | if (blk_pc_request(rq)) { |
1272 | printk("cdb: "); | 1240 | printk("cdb: "); |
1273 | for (bit = 0; bit < sizeof(rq->cmd); bit++) | 1241 | for (bit = 0; bit < sizeof(rq->cmd); bit++) |
1274 | printk("%02x ", rq->cmd[bit]); | 1242 | printk("%02x ", rq->cmd[bit]); |
@@ -1441,7 +1409,7 @@ static inline int ll_new_mergeable(request_queue_t *q, | |||
1441 | int nr_phys_segs = bio_phys_segments(q, bio); | 1409 | int nr_phys_segs = bio_phys_segments(q, bio); |
1442 | 1410 | ||
1443 | if (req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) { | 1411 | if (req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) { |
1444 | req->flags |= REQ_NOMERGE; | 1412 | req->cmd_flags |= REQ_NOMERGE; |
1445 | if (req == q->last_merge) | 1413 | if (req == q->last_merge) |
1446 | q->last_merge = NULL; | 1414 | q->last_merge = NULL; |
1447 | return 0; | 1415 | return 0; |
@@ -1464,7 +1432,7 @@ static inline int ll_new_hw_segment(request_queue_t *q, | |||
1464 | 1432 | ||
1465 | if (req->nr_hw_segments + nr_hw_segs > q->max_hw_segments | 1433 | if (req->nr_hw_segments + nr_hw_segs > q->max_hw_segments |
1466 | || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) { | 1434 | || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) { |
1467 | req->flags |= REQ_NOMERGE; | 1435 | req->cmd_flags |= REQ_NOMERGE; |
1468 | if (req == q->last_merge) | 1436 | if (req == q->last_merge) |
1469 | q->last_merge = NULL; | 1437 | q->last_merge = NULL; |
1470 | return 0; | 1438 | return 0; |
@@ -1491,7 +1459,7 @@ static int ll_back_merge_fn(request_queue_t *q, struct request *req, | |||
1491 | max_sectors = q->max_sectors; | 1459 | max_sectors = q->max_sectors; |
1492 | 1460 | ||
1493 | if (req->nr_sectors + bio_sectors(bio) > max_sectors) { | 1461 | if (req->nr_sectors + bio_sectors(bio) > max_sectors) { |
1494 | req->flags |= REQ_NOMERGE; | 1462 | req->cmd_flags |= REQ_NOMERGE; |
1495 | if (req == q->last_merge) | 1463 | if (req == q->last_merge) |
1496 | q->last_merge = NULL; | 1464 | q->last_merge = NULL; |
1497 | return 0; | 1465 | return 0; |
@@ -1530,7 +1498,7 @@ static int ll_front_merge_fn(request_queue_t *q, struct request *req, | |||
1530 | 1498 | ||
1531 | 1499 | ||
1532 | if (req->nr_sectors + bio_sectors(bio) > max_sectors) { | 1500 | if (req->nr_sectors + bio_sectors(bio) > max_sectors) { |
1533 | req->flags |= REQ_NOMERGE; | 1501 | req->cmd_flags |= REQ_NOMERGE; |
1534 | if (req == q->last_merge) | 1502 | if (req == q->last_merge) |
1535 | q->last_merge = NULL; | 1503 | q->last_merge = NULL; |
1536 | return 0; | 1504 | return 0; |
@@ -2029,7 +1997,7 @@ EXPORT_SYMBOL(blk_get_queue); | |||
2029 | 1997 | ||
2030 | static inline void blk_free_request(request_queue_t *q, struct request *rq) | 1998 | static inline void blk_free_request(request_queue_t *q, struct request *rq) |
2031 | { | 1999 | { |
2032 | if (rq->flags & REQ_ELVPRIV) | 2000 | if (rq->cmd_flags & REQ_ELVPRIV) |
2033 | elv_put_request(q, rq); | 2001 | elv_put_request(q, rq); |
2034 | mempool_free(rq, q->rq.rq_pool); | 2002 | mempool_free(rq, q->rq.rq_pool); |
2035 | } | 2003 | } |
@@ -2044,17 +2012,17 @@ blk_alloc_request(request_queue_t *q, int rw, struct bio *bio, | |||
2044 | return NULL; | 2012 | return NULL; |
2045 | 2013 | ||
2046 | /* | 2014 | /* |
2047 | * first three bits are identical in rq->flags and bio->bi_rw, | 2015 | * first three bits are identical in rq->cmd_flags and bio->bi_rw, |
2048 | * see bio.h and blkdev.h | 2016 | * see bio.h and blkdev.h |
2049 | */ | 2017 | */ |
2050 | rq->flags = rw; | 2018 | rq->cmd_flags = rw; |
2051 | 2019 | ||
2052 | if (priv) { | 2020 | if (priv) { |
2053 | if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) { | 2021 | if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) { |
2054 | mempool_free(rq, q->rq.rq_pool); | 2022 | mempool_free(rq, q->rq.rq_pool); |
2055 | return NULL; | 2023 | return NULL; |
2056 | } | 2024 | } |
2057 | rq->flags |= REQ_ELVPRIV; | 2025 | rq->cmd_flags |= REQ_ELVPRIV; |
2058 | } | 2026 | } |
2059 | 2027 | ||
2060 | return rq; | 2028 | return rq; |
@@ -2351,7 +2319,8 @@ void blk_insert_request(request_queue_t *q, struct request *rq, | |||
2351 | * must not attempt merges on this) and that it acts as a soft | 2319 | * must not attempt merges on this) and that it acts as a soft |
2352 | * barrier | 2320 | * barrier |
2353 | */ | 2321 | */ |
2354 | rq->flags |= REQ_SPECIAL | REQ_SOFTBARRIER; | 2322 | rq->cmd_type = REQ_TYPE_SPECIAL; |
2323 | rq->cmd_flags |= REQ_SOFTBARRIER; | ||
2355 | 2324 | ||
2356 | rq->special = data; | 2325 | rq->special = data; |
2357 | 2326 | ||
@@ -2558,7 +2527,7 @@ void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk, | |||
2558 | int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; | 2527 | int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; |
2559 | 2528 | ||
2560 | rq->rq_disk = bd_disk; | 2529 | rq->rq_disk = bd_disk; |
2561 | rq->flags |= REQ_NOMERGE; | 2530 | rq->cmd_flags |= REQ_NOMERGE; |
2562 | rq->end_io = done; | 2531 | rq->end_io = done; |
2563 | WARN_ON(irqs_disabled()); | 2532 | WARN_ON(irqs_disabled()); |
2564 | spin_lock_irq(q->queue_lock); | 2533 | spin_lock_irq(q->queue_lock); |
@@ -2728,7 +2697,7 @@ void __blk_put_request(request_queue_t *q, struct request *req) | |||
2728 | */ | 2697 | */ |
2729 | if (rl) { | 2698 | if (rl) { |
2730 | int rw = rq_data_dir(req); | 2699 | int rw = rq_data_dir(req); |
2731 | int priv = req->flags & REQ_ELVPRIV; | 2700 | int priv = req->cmd_flags & REQ_ELVPRIV; |
2732 | 2701 | ||
2733 | BUG_ON(!list_empty(&req->queuelist)); | 2702 | BUG_ON(!list_empty(&req->queuelist)); |
2734 | 2703 | ||
@@ -2890,22 +2859,22 @@ static inline int attempt_front_merge(request_queue_t *q, struct request *rq) | |||
2890 | 2859 | ||
2891 | static void init_request_from_bio(struct request *req, struct bio *bio) | 2860 | static void init_request_from_bio(struct request *req, struct bio *bio) |
2892 | { | 2861 | { |
2893 | req->flags |= REQ_CMD; | 2862 | req->cmd_type = REQ_TYPE_FS; |
2894 | 2863 | ||
2895 | /* | 2864 | /* |
2896 | * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST) | 2865 | * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST) |
2897 | */ | 2866 | */ |
2898 | if (bio_rw_ahead(bio) || bio_failfast(bio)) | 2867 | if (bio_rw_ahead(bio) || bio_failfast(bio)) |
2899 | req->flags |= REQ_FAILFAST; | 2868 | req->cmd_flags |= REQ_FAILFAST; |
2900 | 2869 | ||
2901 | /* | 2870 | /* |
2902 | * REQ_BARRIER implies no merging, but lets make it explicit | 2871 | * REQ_BARRIER implies no merging, but lets make it explicit |
2903 | */ | 2872 | */ |
2904 | if (unlikely(bio_barrier(bio))) | 2873 | if (unlikely(bio_barrier(bio))) |
2905 | req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE); | 2874 | req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE); |
2906 | 2875 | ||
2907 | if (bio_sync(bio)) | 2876 | if (bio_sync(bio)) |
2908 | req->flags |= REQ_RW_SYNC; | 2877 | req->cmd_flags |= REQ_RW_SYNC; |
2909 | 2878 | ||
2910 | req->errors = 0; | 2879 | req->errors = 0; |
2911 | req->hard_sector = req->sector = bio->bi_sector; | 2880 | req->hard_sector = req->sector = bio->bi_sector; |
@@ -3306,7 +3275,7 @@ static int __end_that_request_first(struct request *req, int uptodate, | |||
3306 | req->errors = 0; | 3275 | req->errors = 0; |
3307 | 3276 | ||
3308 | if (!uptodate) { | 3277 | if (!uptodate) { |
3309 | if (blk_fs_request(req) && !(req->flags & REQ_QUIET)) | 3278 | if (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET)) |
3310 | printk("end_request: I/O error, dev %s, sector %llu\n", | 3279 | printk("end_request: I/O error, dev %s, sector %llu\n", |
3311 | req->rq_disk ? req->rq_disk->disk_name : "?", | 3280 | req->rq_disk ? req->rq_disk->disk_name : "?", |
3312 | (unsigned long long)req->sector); | 3281 | (unsigned long long)req->sector); |
@@ -3569,8 +3538,8 @@ EXPORT_SYMBOL(end_request); | |||
3569 | 3538 | ||
3570 | void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio) | 3539 | void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio) |
3571 | { | 3540 | { |
3572 | /* first two bits are identical in rq->flags and bio->bi_rw */ | 3541 | /* first two bits are identical in rq->cmd_flags and bio->bi_rw */ |
3573 | rq->flags |= (bio->bi_rw & 3); | 3542 | rq->cmd_flags |= (bio->bi_rw & 3); |
3574 | 3543 | ||
3575 | rq->nr_phys_segments = bio_phys_segments(q, bio); | 3544 | rq->nr_phys_segments = bio_phys_segments(q, bio); |
3576 | rq->nr_hw_segments = bio_hw_segments(q, bio); | 3545 | rq->nr_hw_segments = bio_hw_segments(q, bio); |
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index b33eda26e205..2dc326421a24 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c | |||
@@ -294,7 +294,7 @@ static int sg_io(struct file *file, request_queue_t *q, | |||
294 | rq->sense = sense; | 294 | rq->sense = sense; |
295 | rq->sense_len = 0; | 295 | rq->sense_len = 0; |
296 | 296 | ||
297 | rq->flags |= REQ_BLOCK_PC; | 297 | rq->cmd_type = REQ_TYPE_BLOCK_PC; |
298 | bio = rq->bio; | 298 | bio = rq->bio; |
299 | 299 | ||
300 | /* | 300 | /* |
@@ -470,7 +470,7 @@ int sg_scsi_ioctl(struct file *file, struct request_queue *q, | |||
470 | memset(sense, 0, sizeof(sense)); | 470 | memset(sense, 0, sizeof(sense)); |
471 | rq->sense = sense; | 471 | rq->sense = sense; |
472 | rq->sense_len = 0; | 472 | rq->sense_len = 0; |
473 | rq->flags |= REQ_BLOCK_PC; | 473 | rq->cmd_type = REQ_TYPE_BLOCK_PC; |
474 | 474 | ||
475 | blk_execute_rq(q, disk, rq, 0); | 475 | blk_execute_rq(q, disk, rq, 0); |
476 | 476 | ||
@@ -502,7 +502,7 @@ static int __blk_send_generic(request_queue_t *q, struct gendisk *bd_disk, int c | |||
502 | int err; | 502 | int err; |
503 | 503 | ||
504 | rq = blk_get_request(q, WRITE, __GFP_WAIT); | 504 | rq = blk_get_request(q, WRITE, __GFP_WAIT); |
505 | rq->flags |= REQ_BLOCK_PC; | 505 | rq->cmd_type = REQ_TYPE_BLOCK_PC; |
506 | rq->data = NULL; | 506 | rq->data = NULL; |
507 | rq->data_len = 0; | 507 | rq->data_len = 0; |
508 | rq->timeout = BLK_DEFAULT_TIMEOUT; | 508 | rq->timeout = BLK_DEFAULT_TIMEOUT; |
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index ad1d7065a1b2..629c5769d994 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c | |||
@@ -2991,8 +2991,8 @@ static void do_fd_request(request_queue_t * q) | |||
2991 | if (usage_count == 0) { | 2991 | if (usage_count == 0) { |
2992 | printk("warning: usage count=0, current_req=%p exiting\n", | 2992 | printk("warning: usage count=0, current_req=%p exiting\n", |
2993 | current_req); | 2993 | current_req); |
2994 | printk("sect=%ld flags=%lx\n", (long)current_req->sector, | 2994 | printk("sect=%ld type=%x flags=%x\n", (long)current_req->sector, |
2995 | current_req->flags); | 2995 | current_req->cmd_type, current_req->cmd_flags); |
2996 | return; | 2996 | return; |
2997 | } | 2997 | } |
2998 | if (test_bit(0, &fdc_busy)) { | 2998 | if (test_bit(0, &fdc_busy)) { |
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index bdbade9a5cf5..9d1035e8d9d8 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c | |||
@@ -407,10 +407,10 @@ static void do_nbd_request(request_queue_t * q) | |||
407 | struct nbd_device *lo; | 407 | struct nbd_device *lo; |
408 | 408 | ||
409 | blkdev_dequeue_request(req); | 409 | blkdev_dequeue_request(req); |
410 | dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%lx)\n", | 410 | dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%x)\n", |
411 | req->rq_disk->disk_name, req, req->flags); | 411 | req->rq_disk->disk_name, req, req->cmd_type); |
412 | 412 | ||
413 | if (!(req->flags & REQ_CMD)) | 413 | if (!blk_fs_request(req)) |
414 | goto error_out; | 414 | goto error_out; |
415 | 415 | ||
416 | lo = req->rq_disk->private_data; | 416 | lo = req->rq_disk->private_data; |
@@ -489,7 +489,7 @@ static int nbd_ioctl(struct inode *inode, struct file *file, | |||
489 | switch (cmd) { | 489 | switch (cmd) { |
490 | case NBD_DISCONNECT: | 490 | case NBD_DISCONNECT: |
491 | printk(KERN_INFO "%s: NBD_DISCONNECT\n", lo->disk->disk_name); | 491 | printk(KERN_INFO "%s: NBD_DISCONNECT\n", lo->disk->disk_name); |
492 | sreq.flags = REQ_SPECIAL; | 492 | sreq.cmd_type = REQ_TYPE_SPECIAL; |
493 | nbd_cmd(&sreq) = NBD_CMD_DISC; | 493 | nbd_cmd(&sreq) = NBD_CMD_DISC; |
494 | /* | 494 | /* |
495 | * Set these to sane values in case server implementation | 495 | * Set these to sane values in case server implementation |
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c index 2403721f9db1..12ff1a274d91 100644 --- a/drivers/block/paride/pd.c +++ b/drivers/block/paride/pd.c | |||
@@ -437,7 +437,7 @@ static char *pd_buf; /* buffer for request in progress */ | |||
437 | 437 | ||
438 | static enum action do_pd_io_start(void) | 438 | static enum action do_pd_io_start(void) |
439 | { | 439 | { |
440 | if (pd_req->flags & REQ_SPECIAL) { | 440 | if (blk_special_request(pd_req)) { |
441 | phase = pd_special; | 441 | phase = pd_special; |
442 | return pd_special(); | 442 | return pd_special(); |
443 | } | 443 | } |
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 451b996bba91..42891d2b054e 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c | |||
@@ -365,16 +365,16 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command * | |||
365 | rq->sense = sense; | 365 | rq->sense = sense; |
366 | memset(sense, 0, sizeof(sense)); | 366 | memset(sense, 0, sizeof(sense)); |
367 | rq->sense_len = 0; | 367 | rq->sense_len = 0; |
368 | rq->flags |= REQ_BLOCK_PC | REQ_HARDBARRIER; | 368 | rq->cmd_type = REQ_TYPE_BLOCK_PC; |
369 | rq->cmd_flags |= REQ_HARDBARRIER; | ||
369 | if (cgc->quiet) | 370 | if (cgc->quiet) |
370 | rq->flags |= REQ_QUIET; | 371 | rq->cmd_flags |= REQ_QUIET; |
371 | memcpy(rq->cmd, cgc->cmd, CDROM_PACKET_SIZE); | 372 | memcpy(rq->cmd, cgc->cmd, CDROM_PACKET_SIZE); |
372 | if (sizeof(rq->cmd) > CDROM_PACKET_SIZE) | 373 | if (sizeof(rq->cmd) > CDROM_PACKET_SIZE) |
373 | memset(rq->cmd + CDROM_PACKET_SIZE, 0, sizeof(rq->cmd) - CDROM_PACKET_SIZE); | 374 | memset(rq->cmd + CDROM_PACKET_SIZE, 0, sizeof(rq->cmd) - CDROM_PACKET_SIZE); |
374 | rq->cmd_len = COMMAND_SIZE(rq->cmd[0]); | 375 | rq->cmd_len = COMMAND_SIZE(rq->cmd[0]); |
375 | 376 | ||
376 | rq->ref_count++; | 377 | rq->ref_count++; |
377 | rq->flags |= REQ_NOMERGE; | ||
378 | rq->waiting = &wait; | 378 | rq->waiting = &wait; |
379 | rq->end_io = blk_end_sync_rq; | 379 | rq->end_io = blk_end_sync_rq; |
380 | elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 1); | 380 | elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 1); |
diff --git a/drivers/block/xd.c b/drivers/block/xd.c index e828e4cbd3e1..ebf3025721d1 100644 --- a/drivers/block/xd.c +++ b/drivers/block/xd.c | |||
@@ -313,7 +313,7 @@ static void do_xd_request (request_queue_t * q) | |||
313 | int res = 0; | 313 | int res = 0; |
314 | int retry; | 314 | int retry; |
315 | 315 | ||
316 | if (!(req->flags & REQ_CMD)) { | 316 | if (!blk_fs_request(req)) { |
317 | end_request(req, 0); | 317 | end_request(req, 0); |
318 | continue; | 318 | continue; |
319 | } | 319 | } |
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index d239cf8b20bd..b38c84a7a8e3 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c | |||
@@ -2129,7 +2129,7 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf, | |||
2129 | rq->cmd[9] = 0xf8; | 2129 | rq->cmd[9] = 0xf8; |
2130 | 2130 | ||
2131 | rq->cmd_len = 12; | 2131 | rq->cmd_len = 12; |
2132 | rq->flags |= REQ_BLOCK_PC; | 2132 | rq->cmd_type = REQ_TYPE_BLOCK_PC; |
2133 | rq->timeout = 60 * HZ; | 2133 | rq->timeout = 60 * HZ; |
2134 | bio = rq->bio; | 2134 | bio = rq->bio; |
2135 | 2135 | ||
diff --git a/drivers/cdrom/cdu31a.c b/drivers/cdrom/cdu31a.c index 37bdb0163f0d..ccd91c1a84bd 100644 --- a/drivers/cdrom/cdu31a.c +++ b/drivers/cdrom/cdu31a.c | |||
@@ -1338,8 +1338,10 @@ static void do_cdu31a_request(request_queue_t * q) | |||
1338 | } | 1338 | } |
1339 | 1339 | ||
1340 | /* WTF??? */ | 1340 | /* WTF??? */ |
1341 | if (!(req->flags & REQ_CMD)) | 1341 | if (!blk_fs_request(req)) { |
1342 | end_request(req, 0); | ||
1342 | continue; | 1343 | continue; |
1344 | } | ||
1343 | if (rq_data_dir(req) == WRITE) { | 1345 | if (rq_data_dir(req) == WRITE) { |
1344 | end_request(req, 0); | 1346 | end_request(req, 0); |
1345 | continue; | 1347 | continue; |
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index 654d4cd09847..69bbb6206a00 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c | |||
@@ -372,7 +372,7 @@ static int cdrom_log_sense(ide_drive_t *drive, struct request *rq, | |||
372 | { | 372 | { |
373 | int log = 0; | 373 | int log = 0; |
374 | 374 | ||
375 | if (!sense || !rq || (rq->flags & REQ_QUIET)) | 375 | if (!sense || !rq || (rq->cmd_flags & REQ_QUIET)) |
376 | return 0; | 376 | return 0; |
377 | 377 | ||
378 | switch (sense->sense_key) { | 378 | switch (sense->sense_key) { |
@@ -597,7 +597,7 @@ static void cdrom_prepare_request(ide_drive_t *drive, struct request *rq) | |||
597 | struct cdrom_info *cd = drive->driver_data; | 597 | struct cdrom_info *cd = drive->driver_data; |
598 | 598 | ||
599 | ide_init_drive_cmd(rq); | 599 | ide_init_drive_cmd(rq); |
600 | rq->flags = REQ_PC; | 600 | rq->cmd_type = REQ_TYPE_BLOCK_PC; |
601 | rq->rq_disk = cd->disk; | 601 | rq->rq_disk = cd->disk; |
602 | } | 602 | } |
603 | 603 | ||
@@ -617,7 +617,7 @@ static void cdrom_queue_request_sense(ide_drive_t *drive, void *sense, | |||
617 | rq->cmd[0] = GPCMD_REQUEST_SENSE; | 617 | rq->cmd[0] = GPCMD_REQUEST_SENSE; |
618 | rq->cmd[4] = rq->data_len = 18; | 618 | rq->cmd[4] = rq->data_len = 18; |
619 | 619 | ||
620 | rq->flags = REQ_SENSE; | 620 | rq->cmd_type = REQ_TYPE_SENSE; |
621 | 621 | ||
622 | /* NOTE! Save the failed command in "rq->buffer" */ | 622 | /* NOTE! Save the failed command in "rq->buffer" */ |
623 | rq->buffer = (void *) failed_command; | 623 | rq->buffer = (void *) failed_command; |
@@ -630,10 +630,10 @@ static void cdrom_end_request (ide_drive_t *drive, int uptodate) | |||
630 | struct request *rq = HWGROUP(drive)->rq; | 630 | struct request *rq = HWGROUP(drive)->rq; |
631 | int nsectors = rq->hard_cur_sectors; | 631 | int nsectors = rq->hard_cur_sectors; |
632 | 632 | ||
633 | if ((rq->flags & REQ_SENSE) && uptodate) { | 633 | if (blk_sense_request(rq) && uptodate) { |
634 | /* | 634 | /* |
635 | * For REQ_SENSE, "rq->buffer" points to the original failed | 635 | * For REQ_TYPE_SENSE, "rq->buffer" points to the original |
636 | * request | 636 | * failed request |
637 | */ | 637 | */ |
638 | struct request *failed = (struct request *) rq->buffer; | 638 | struct request *failed = (struct request *) rq->buffer; |
639 | struct cdrom_info *info = drive->driver_data; | 639 | struct cdrom_info *info = drive->driver_data; |
@@ -706,17 +706,17 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret) | |||
706 | return 1; | 706 | return 1; |
707 | } | 707 | } |
708 | 708 | ||
709 | if (rq->flags & REQ_SENSE) { | 709 | if (blk_sense_request(rq)) { |
710 | /* We got an error trying to get sense info | 710 | /* We got an error trying to get sense info |
711 | from the drive (probably while trying | 711 | from the drive (probably while trying |
712 | to recover from a former error). Just give up. */ | 712 | to recover from a former error). Just give up. */ |
713 | 713 | ||
714 | rq->flags |= REQ_FAILED; | 714 | rq->cmd_flags |= REQ_FAILED; |
715 | cdrom_end_request(drive, 0); | 715 | cdrom_end_request(drive, 0); |
716 | ide_error(drive, "request sense failure", stat); | 716 | ide_error(drive, "request sense failure", stat); |
717 | return 1; | 717 | return 1; |
718 | 718 | ||
719 | } else if (rq->flags & (REQ_PC | REQ_BLOCK_PC)) { | 719 | } else if (blk_pc_request(rq)) { |
720 | /* All other functions, except for READ. */ | 720 | /* All other functions, except for READ. */ |
721 | unsigned long flags; | 721 | unsigned long flags; |
722 | 722 | ||
@@ -724,7 +724,7 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret) | |||
724 | * if we have an error, pass back CHECK_CONDITION as the | 724 | * if we have an error, pass back CHECK_CONDITION as the |
725 | * scsi status byte | 725 | * scsi status byte |
726 | */ | 726 | */ |
727 | if ((rq->flags & REQ_BLOCK_PC) && !rq->errors) | 727 | if (!rq->errors) |
728 | rq->errors = SAM_STAT_CHECK_CONDITION; | 728 | rq->errors = SAM_STAT_CHECK_CONDITION; |
729 | 729 | ||
730 | /* Check for tray open. */ | 730 | /* Check for tray open. */ |
@@ -735,12 +735,12 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret) | |||
735 | cdrom_saw_media_change (drive); | 735 | cdrom_saw_media_change (drive); |
736 | /*printk("%s: media changed\n",drive->name);*/ | 736 | /*printk("%s: media changed\n",drive->name);*/ |
737 | return 0; | 737 | return 0; |
738 | } else if (!(rq->flags & REQ_QUIET)) { | 738 | } else if (!(rq->cmd_flags & REQ_QUIET)) { |
739 | /* Otherwise, print an error. */ | 739 | /* Otherwise, print an error. */ |
740 | ide_dump_status(drive, "packet command error", stat); | 740 | ide_dump_status(drive, "packet command error", stat); |
741 | } | 741 | } |
742 | 742 | ||
743 | rq->flags |= REQ_FAILED; | 743 | rq->cmd_flags |= REQ_FAILED; |
744 | 744 | ||
745 | /* | 745 | /* |
746 | * instead of playing games with moving completions around, | 746 | * instead of playing games with moving completions around, |
@@ -881,7 +881,7 @@ static int cdrom_timer_expiry(ide_drive_t *drive) | |||
881 | wait = ATAPI_WAIT_PC; | 881 | wait = ATAPI_WAIT_PC; |
882 | break; | 882 | break; |
883 | default: | 883 | default: |
884 | if (!(rq->flags & REQ_QUIET)) | 884 | if (!(rq->cmd_flags & REQ_QUIET)) |
885 | printk(KERN_INFO "ide-cd: cmd 0x%x timed out\n", rq->cmd[0]); | 885 | printk(KERN_INFO "ide-cd: cmd 0x%x timed out\n", rq->cmd[0]); |
886 | wait = 0; | 886 | wait = 0; |
887 | break; | 887 | break; |
@@ -1124,7 +1124,7 @@ static ide_startstop_t cdrom_read_intr (ide_drive_t *drive) | |||
1124 | if (rq->current_nr_sectors > 0) { | 1124 | if (rq->current_nr_sectors > 0) { |
1125 | printk (KERN_ERR "%s: cdrom_read_intr: data underrun (%d blocks)\n", | 1125 | printk (KERN_ERR "%s: cdrom_read_intr: data underrun (%d blocks)\n", |
1126 | drive->name, rq->current_nr_sectors); | 1126 | drive->name, rq->current_nr_sectors); |
1127 | rq->flags |= REQ_FAILED; | 1127 | rq->cmd_flags |= REQ_FAILED; |
1128 | cdrom_end_request(drive, 0); | 1128 | cdrom_end_request(drive, 0); |
1129 | } else | 1129 | } else |
1130 | cdrom_end_request(drive, 1); | 1130 | cdrom_end_request(drive, 1); |
@@ -1456,7 +1456,7 @@ static ide_startstop_t cdrom_pc_intr (ide_drive_t *drive) | |||
1456 | printk ("%s: cdrom_pc_intr: data underrun %d\n", | 1456 | printk ("%s: cdrom_pc_intr: data underrun %d\n", |
1457 | drive->name, pc->buflen); | 1457 | drive->name, pc->buflen); |
1458 | */ | 1458 | */ |
1459 | rq->flags |= REQ_FAILED; | 1459 | rq->cmd_flags |= REQ_FAILED; |
1460 | cdrom_end_request(drive, 0); | 1460 | cdrom_end_request(drive, 0); |
1461 | } | 1461 | } |
1462 | return ide_stopped; | 1462 | return ide_stopped; |
@@ -1509,7 +1509,7 @@ static ide_startstop_t cdrom_pc_intr (ide_drive_t *drive) | |||
1509 | rq->data += thislen; | 1509 | rq->data += thislen; |
1510 | rq->data_len -= thislen; | 1510 | rq->data_len -= thislen; |
1511 | 1511 | ||
1512 | if (rq->flags & REQ_SENSE) | 1512 | if (blk_sense_request(rq)) |
1513 | rq->sense_len += thislen; | 1513 | rq->sense_len += thislen; |
1514 | } else { | 1514 | } else { |
1515 | confused: | 1515 | confused: |
@@ -1517,7 +1517,7 @@ confused: | |||
1517 | "appears confused (ireason = 0x%02x). " | 1517 | "appears confused (ireason = 0x%02x). " |
1518 | "Trying to recover by ending request.\n", | 1518 | "Trying to recover by ending request.\n", |
1519 | drive->name, ireason); | 1519 | drive->name, ireason); |
1520 | rq->flags |= REQ_FAILED; | 1520 | rq->cmd_flags |= REQ_FAILED; |
1521 | cdrom_end_request(drive, 0); | 1521 | cdrom_end_request(drive, 0); |
1522 | return ide_stopped; | 1522 | return ide_stopped; |
1523 | } | 1523 | } |
@@ -1546,7 +1546,7 @@ static ide_startstop_t cdrom_do_packet_command (ide_drive_t *drive) | |||
1546 | struct cdrom_info *info = drive->driver_data; | 1546 | struct cdrom_info *info = drive->driver_data; |
1547 | 1547 | ||
1548 | info->dma = 0; | 1548 | info->dma = 0; |
1549 | rq->flags &= ~REQ_FAILED; | 1549 | rq->cmd_flags &= ~REQ_FAILED; |
1550 | len = rq->data_len; | 1550 | len = rq->data_len; |
1551 | 1551 | ||
1552 | /* Start sending the command to the drive. */ | 1552 | /* Start sending the command to the drive. */ |
@@ -1558,7 +1558,7 @@ static int cdrom_queue_packet_command(ide_drive_t *drive, struct request *rq) | |||
1558 | { | 1558 | { |
1559 | struct request_sense sense; | 1559 | struct request_sense sense; |
1560 | int retries = 10; | 1560 | int retries = 10; |
1561 | unsigned int flags = rq->flags; | 1561 | unsigned int flags = rq->cmd_flags; |
1562 | 1562 | ||
1563 | if (rq->sense == NULL) | 1563 | if (rq->sense == NULL) |
1564 | rq->sense = &sense; | 1564 | rq->sense = &sense; |
@@ -1567,14 +1567,14 @@ static int cdrom_queue_packet_command(ide_drive_t *drive, struct request *rq) | |||
1567 | do { | 1567 | do { |
1568 | int error; | 1568 | int error; |
1569 | unsigned long time = jiffies; | 1569 | unsigned long time = jiffies; |
1570 | rq->flags = flags; | 1570 | rq->cmd_flags = flags; |
1571 | 1571 | ||
1572 | error = ide_do_drive_cmd(drive, rq, ide_wait); | 1572 | error = ide_do_drive_cmd(drive, rq, ide_wait); |
1573 | time = jiffies - time; | 1573 | time = jiffies - time; |
1574 | 1574 | ||
1575 | /* FIXME: we should probably abort/retry or something | 1575 | /* FIXME: we should probably abort/retry or something |
1576 | * in case of failure */ | 1576 | * in case of failure */ |
1577 | if (rq->flags & REQ_FAILED) { | 1577 | if (rq->cmd_flags & REQ_FAILED) { |
1578 | /* The request failed. Retry if it was due to a unit | 1578 | /* The request failed. Retry if it was due to a unit |
1579 | attention status | 1579 | attention status |
1580 | (usually means media was changed). */ | 1580 | (usually means media was changed). */ |
@@ -1596,10 +1596,10 @@ static int cdrom_queue_packet_command(ide_drive_t *drive, struct request *rq) | |||
1596 | } | 1596 | } |
1597 | 1597 | ||
1598 | /* End of retry loop. */ | 1598 | /* End of retry loop. */ |
1599 | } while ((rq->flags & REQ_FAILED) && retries >= 0); | 1599 | } while ((rq->cmd_flags & REQ_FAILED) && retries >= 0); |
1600 | 1600 | ||
1601 | /* Return an error if the command failed. */ | 1601 | /* Return an error if the command failed. */ |
1602 | return (rq->flags & REQ_FAILED) ? -EIO : 0; | 1602 | return (rq->cmd_flags & REQ_FAILED) ? -EIO : 0; |
1603 | } | 1603 | } |
1604 | 1604 | ||
1605 | /* | 1605 | /* |
@@ -1963,7 +1963,7 @@ static ide_startstop_t cdrom_do_block_pc(ide_drive_t *drive, struct request *rq) | |||
1963 | { | 1963 | { |
1964 | struct cdrom_info *info = drive->driver_data; | 1964 | struct cdrom_info *info = drive->driver_data; |
1965 | 1965 | ||
1966 | rq->flags |= REQ_QUIET; | 1966 | rq->cmd_flags |= REQ_QUIET; |
1967 | 1967 | ||
1968 | info->dma = 0; | 1968 | info->dma = 0; |
1969 | 1969 | ||
@@ -2023,11 +2023,11 @@ ide_do_rw_cdrom (ide_drive_t *drive, struct request *rq, sector_t block) | |||
2023 | } | 2023 | } |
2024 | info->last_block = block; | 2024 | info->last_block = block; |
2025 | return action; | 2025 | return action; |
2026 | } else if (rq->flags & (REQ_PC | REQ_SENSE)) { | 2026 | } else if (rq->cmd_type == REQ_TYPE_SENSE) { |
2027 | return cdrom_do_packet_command(drive); | 2027 | return cdrom_do_packet_command(drive); |
2028 | } else if (rq->flags & REQ_BLOCK_PC) { | 2028 | } else if (blk_pc_request(rq)) { |
2029 | return cdrom_do_block_pc(drive, rq); | 2029 | return cdrom_do_block_pc(drive, rq); |
2030 | } else if (rq->flags & REQ_SPECIAL) { | 2030 | } else if (blk_special_request(rq)) { |
2031 | /* | 2031 | /* |
2032 | * right now this can only be a reset... | 2032 | * right now this can only be a reset... |
2033 | */ | 2033 | */ |
@@ -2105,7 +2105,7 @@ static int cdrom_check_status(ide_drive_t *drive, struct request_sense *sense) | |||
2105 | 2105 | ||
2106 | req.sense = sense; | 2106 | req.sense = sense; |
2107 | req.cmd[0] = GPCMD_TEST_UNIT_READY; | 2107 | req.cmd[0] = GPCMD_TEST_UNIT_READY; |
2108 | req.flags |= REQ_QUIET; | 2108 | req.cmd_flags |= REQ_QUIET; |
2109 | 2109 | ||
2110 | #if ! STANDARD_ATAPI | 2110 | #if ! STANDARD_ATAPI |
2111 | /* the Sanyo 3 CD changer uses byte 7 of TEST_UNIT_READY to | 2111 | /* the Sanyo 3 CD changer uses byte 7 of TEST_UNIT_READY to |
@@ -2207,7 +2207,7 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity, | |||
2207 | req.cmd[0] = GPCMD_READ_CDVD_CAPACITY; | 2207 | req.cmd[0] = GPCMD_READ_CDVD_CAPACITY; |
2208 | req.data = (char *)&capbuf; | 2208 | req.data = (char *)&capbuf; |
2209 | req.data_len = sizeof(capbuf); | 2209 | req.data_len = sizeof(capbuf); |
2210 | req.flags |= REQ_QUIET; | 2210 | req.cmd_flags |= REQ_QUIET; |
2211 | 2211 | ||
2212 | stat = cdrom_queue_packet_command(drive, &req); | 2212 | stat = cdrom_queue_packet_command(drive, &req); |
2213 | if (stat == 0) { | 2213 | if (stat == 0) { |
@@ -2230,7 +2230,7 @@ static int cdrom_read_tocentry(ide_drive_t *drive, int trackno, int msf_flag, | |||
2230 | req.sense = sense; | 2230 | req.sense = sense; |
2231 | req.data = buf; | 2231 | req.data = buf; |
2232 | req.data_len = buflen; | 2232 | req.data_len = buflen; |
2233 | req.flags |= REQ_QUIET; | 2233 | req.cmd_flags |= REQ_QUIET; |
2234 | req.cmd[0] = GPCMD_READ_TOC_PMA_ATIP; | 2234 | req.cmd[0] = GPCMD_READ_TOC_PMA_ATIP; |
2235 | req.cmd[6] = trackno; | 2235 | req.cmd[6] = trackno; |
2236 | req.cmd[7] = (buflen >> 8); | 2236 | req.cmd[7] = (buflen >> 8); |
@@ -2531,7 +2531,7 @@ static int ide_cdrom_packet(struct cdrom_device_info *cdi, | |||
2531 | req.timeout = cgc->timeout; | 2531 | req.timeout = cgc->timeout; |
2532 | 2532 | ||
2533 | if (cgc->quiet) | 2533 | if (cgc->quiet) |
2534 | req.flags |= REQ_QUIET; | 2534 | req.cmd_flags |= REQ_QUIET; |
2535 | 2535 | ||
2536 | req.sense = cgc->sense; | 2536 | req.sense = cgc->sense; |
2537 | cgc->stat = cdrom_queue_packet_command(drive, &req); | 2537 | cgc->stat = cdrom_queue_packet_command(drive, &req); |
@@ -2629,7 +2629,8 @@ int ide_cdrom_reset (struct cdrom_device_info *cdi) | |||
2629 | int ret; | 2629 | int ret; |
2630 | 2630 | ||
2631 | cdrom_prepare_request(drive, &req); | 2631 | cdrom_prepare_request(drive, &req); |
2632 | req.flags = REQ_SPECIAL | REQ_QUIET; | 2632 | req.cmd_type = REQ_TYPE_SPECIAL; |
2633 | req.cmd_flags = REQ_QUIET; | ||
2633 | ret = ide_do_drive_cmd(drive, &req, ide_wait); | 2634 | ret = ide_do_drive_cmd(drive, &req, ide_wait); |
2634 | 2635 | ||
2635 | /* | 2636 | /* |
@@ -3116,9 +3117,9 @@ static int ide_cdrom_prep_pc(struct request *rq) | |||
3116 | 3117 | ||
3117 | static int ide_cdrom_prep_fn(request_queue_t *q, struct request *rq) | 3118 | static int ide_cdrom_prep_fn(request_queue_t *q, struct request *rq) |
3118 | { | 3119 | { |
3119 | if (rq->flags & REQ_CMD) | 3120 | if (blk_fs_request(rq)) |
3120 | return ide_cdrom_prep_fs(q, rq); | 3121 | return ide_cdrom_prep_fs(q, rq); |
3121 | else if (rq->flags & REQ_BLOCK_PC) | 3122 | else if (blk_pc_request(rq)) |
3122 | return ide_cdrom_prep_pc(rq); | 3123 | return ide_cdrom_prep_pc(rq); |
3123 | 3124 | ||
3124 | return 0; | 3125 | return 0; |
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c index 7cf3eb023521..0a05a377d66a 100644 --- a/drivers/ide/ide-disk.c +++ b/drivers/ide/ide-disk.c | |||
@@ -699,7 +699,8 @@ static void idedisk_prepare_flush(request_queue_t *q, struct request *rq) | |||
699 | rq->cmd[0] = WIN_FLUSH_CACHE; | 699 | rq->cmd[0] = WIN_FLUSH_CACHE; |
700 | 700 | ||
701 | 701 | ||
702 | rq->flags |= REQ_DRIVE_TASK; | 702 | rq->cmd_type = REQ_TYPE_ATA_TASK; |
703 | rq->cmd_flags |= REQ_SOFTBARRIER; | ||
703 | rq->buffer = rq->cmd; | 704 | rq->buffer = rq->cmd; |
704 | } | 705 | } |
705 | 706 | ||
@@ -740,7 +741,7 @@ static int set_multcount(ide_drive_t *drive, int arg) | |||
740 | if (drive->special.b.set_multmode) | 741 | if (drive->special.b.set_multmode) |
741 | return -EBUSY; | 742 | return -EBUSY; |
742 | ide_init_drive_cmd (&rq); | 743 | ide_init_drive_cmd (&rq); |
743 | rq.flags = REQ_DRIVE_CMD; | 744 | rq.cmd_type = REQ_TYPE_ATA_CMD; |
744 | drive->mult_req = arg; | 745 | drive->mult_req = arg; |
745 | drive->special.b.set_multmode = 1; | 746 | drive->special.b.set_multmode = 1; |
746 | (void) ide_do_drive_cmd (drive, &rq, ide_wait); | 747 | (void) ide_do_drive_cmd (drive, &rq, ide_wait); |
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c index 7c3a13e1cf64..c3546fe9af63 100644 --- a/drivers/ide/ide-dma.c +++ b/drivers/ide/ide-dma.c | |||
@@ -205,7 +205,7 @@ int ide_build_sglist(ide_drive_t *drive, struct request *rq) | |||
205 | ide_hwif_t *hwif = HWIF(drive); | 205 | ide_hwif_t *hwif = HWIF(drive); |
206 | struct scatterlist *sg = hwif->sg_table; | 206 | struct scatterlist *sg = hwif->sg_table; |
207 | 207 | ||
208 | BUG_ON((rq->flags & REQ_DRIVE_TASKFILE) && rq->nr_sectors > 256); | 208 | BUG_ON((rq->cmd_type == REQ_TYPE_ATA_TASKFILE) && rq->nr_sectors > 256); |
209 | 209 | ||
210 | ide_map_sg(drive, rq); | 210 | ide_map_sg(drive, rq); |
211 | 211 | ||
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c index adbe9f76a505..0edc32204915 100644 --- a/drivers/ide/ide-floppy.c +++ b/drivers/ide/ide-floppy.c | |||
@@ -588,7 +588,7 @@ static int idefloppy_do_end_request(ide_drive_t *drive, int uptodate, int nsecs) | |||
588 | /* Why does this happen? */ | 588 | /* Why does this happen? */ |
589 | if (!rq) | 589 | if (!rq) |
590 | return 0; | 590 | return 0; |
591 | if (!(rq->flags & REQ_SPECIAL)) { //if (!IDEFLOPPY_RQ_CMD (rq->cmd)) { | 591 | if (!blk_special_request(rq)) { |
592 | /* our real local end request function */ | 592 | /* our real local end request function */ |
593 | ide_end_request(drive, uptodate, nsecs); | 593 | ide_end_request(drive, uptodate, nsecs); |
594 | return 0; | 594 | return 0; |
@@ -689,7 +689,7 @@ static void idefloppy_queue_pc_head (ide_drive_t *drive,idefloppy_pc_t *pc,struc | |||
689 | 689 | ||
690 | ide_init_drive_cmd(rq); | 690 | ide_init_drive_cmd(rq); |
691 | rq->buffer = (char *) pc; | 691 | rq->buffer = (char *) pc; |
692 | rq->flags = REQ_SPECIAL; //rq->cmd = IDEFLOPPY_PC_RQ; | 692 | rq->cmd_type = REQ_TYPE_SPECIAL; |
693 | rq->rq_disk = floppy->disk; | 693 | rq->rq_disk = floppy->disk; |
694 | (void) ide_do_drive_cmd(drive, rq, ide_preempt); | 694 | (void) ide_do_drive_cmd(drive, rq, ide_preempt); |
695 | } | 695 | } |
@@ -1250,7 +1250,7 @@ static void idefloppy_create_rw_cmd (idefloppy_floppy_t *floppy, idefloppy_pc_t | |||
1250 | pc->callback = &idefloppy_rw_callback; | 1250 | pc->callback = &idefloppy_rw_callback; |
1251 | pc->rq = rq; | 1251 | pc->rq = rq; |
1252 | pc->b_count = cmd == READ ? 0 : rq->bio->bi_size; | 1252 | pc->b_count = cmd == READ ? 0 : rq->bio->bi_size; |
1253 | if (rq->flags & REQ_RW) | 1253 | if (rq->cmd_flags & REQ_RW) |
1254 | set_bit(PC_WRITING, &pc->flags); | 1254 | set_bit(PC_WRITING, &pc->flags); |
1255 | pc->buffer = NULL; | 1255 | pc->buffer = NULL; |
1256 | pc->request_transfer = pc->buffer_size = blocks * floppy->block_size; | 1256 | pc->request_transfer = pc->buffer_size = blocks * floppy->block_size; |
@@ -1303,7 +1303,7 @@ static ide_startstop_t idefloppy_do_request (ide_drive_t *drive, struct request | |||
1303 | idefloppy_do_end_request(drive, 0, 0); | 1303 | idefloppy_do_end_request(drive, 0, 0); |
1304 | return ide_stopped; | 1304 | return ide_stopped; |
1305 | } | 1305 | } |
1306 | if (rq->flags & REQ_CMD) { | 1306 | if (blk_fs_request(rq)) { |
1307 | if (((long)rq->sector % floppy->bs_factor) || | 1307 | if (((long)rq->sector % floppy->bs_factor) || |
1308 | (rq->nr_sectors % floppy->bs_factor)) { | 1308 | (rq->nr_sectors % floppy->bs_factor)) { |
1309 | printk("%s: unsupported r/w request size\n", | 1309 | printk("%s: unsupported r/w request size\n", |
@@ -1313,9 +1313,9 @@ static ide_startstop_t idefloppy_do_request (ide_drive_t *drive, struct request | |||
1313 | } | 1313 | } |
1314 | pc = idefloppy_next_pc_storage(drive); | 1314 | pc = idefloppy_next_pc_storage(drive); |
1315 | idefloppy_create_rw_cmd(floppy, pc, rq, block); | 1315 | idefloppy_create_rw_cmd(floppy, pc, rq, block); |
1316 | } else if (rq->flags & REQ_SPECIAL) { | 1316 | } else if (blk_special_request(rq)) { |
1317 | pc = (idefloppy_pc_t *) rq->buffer; | 1317 | pc = (idefloppy_pc_t *) rq->buffer; |
1318 | } else if (rq->flags & REQ_BLOCK_PC) { | 1318 | } else if (blk_pc_request(rq)) { |
1319 | pc = idefloppy_next_pc_storage(drive); | 1319 | pc = idefloppy_next_pc_storage(drive); |
1320 | if (idefloppy_blockpc_cmd(floppy, pc, rq)) { | 1320 | if (idefloppy_blockpc_cmd(floppy, pc, rq)) { |
1321 | idefloppy_do_end_request(drive, 0, 0); | 1321 | idefloppy_do_end_request(drive, 0, 0); |
@@ -1343,7 +1343,7 @@ static int idefloppy_queue_pc_tail (ide_drive_t *drive,idefloppy_pc_t *pc) | |||
1343 | 1343 | ||
1344 | ide_init_drive_cmd (&rq); | 1344 | ide_init_drive_cmd (&rq); |
1345 | rq.buffer = (char *) pc; | 1345 | rq.buffer = (char *) pc; |
1346 | rq.flags = REQ_SPECIAL; // rq.cmd = IDEFLOPPY_PC_RQ; | 1346 | rq.cmd_type = REQ_TYPE_SPECIAL; |
1347 | rq.rq_disk = floppy->disk; | 1347 | rq.rq_disk = floppy->disk; |
1348 | 1348 | ||
1349 | return ide_do_drive_cmd(drive, &rq, ide_wait); | 1349 | return ide_do_drive_cmd(drive, &rq, ide_wait); |
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index fb6795236e76..3436b1f104eb 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c | |||
@@ -59,7 +59,7 @@ static int __ide_end_request(ide_drive_t *drive, struct request *rq, | |||
59 | { | 59 | { |
60 | int ret = 1; | 60 | int ret = 1; |
61 | 61 | ||
62 | BUG_ON(!(rq->flags & REQ_STARTED)); | 62 | BUG_ON(!blk_rq_started(rq)); |
63 | 63 | ||
64 | /* | 64 | /* |
65 | * if failfast is set on a request, override number of sectors and | 65 | * if failfast is set on a request, override number of sectors and |
@@ -244,7 +244,7 @@ int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq, | |||
244 | 244 | ||
245 | spin_lock_irqsave(&ide_lock, flags); | 245 | spin_lock_irqsave(&ide_lock, flags); |
246 | 246 | ||
247 | BUG_ON(!(rq->flags & REQ_STARTED)); | 247 | BUG_ON(!blk_rq_started(rq)); |
248 | 248 | ||
249 | /* | 249 | /* |
250 | * if failfast is set on a request, override number of sectors and | 250 | * if failfast is set on a request, override number of sectors and |
@@ -366,7 +366,7 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err) | |||
366 | rq = HWGROUP(drive)->rq; | 366 | rq = HWGROUP(drive)->rq; |
367 | spin_unlock_irqrestore(&ide_lock, flags); | 367 | spin_unlock_irqrestore(&ide_lock, flags); |
368 | 368 | ||
369 | if (rq->flags & REQ_DRIVE_CMD) { | 369 | if (rq->cmd_type == REQ_TYPE_ATA_CMD) { |
370 | u8 *args = (u8 *) rq->buffer; | 370 | u8 *args = (u8 *) rq->buffer; |
371 | if (rq->errors == 0) | 371 | if (rq->errors == 0) |
372 | rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT); | 372 | rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT); |
@@ -376,7 +376,7 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err) | |||
376 | args[1] = err; | 376 | args[1] = err; |
377 | args[2] = hwif->INB(IDE_NSECTOR_REG); | 377 | args[2] = hwif->INB(IDE_NSECTOR_REG); |
378 | } | 378 | } |
379 | } else if (rq->flags & REQ_DRIVE_TASK) { | 379 | } else if (rq->cmd_type == REQ_TYPE_ATA_TASK) { |
380 | u8 *args = (u8 *) rq->buffer; | 380 | u8 *args = (u8 *) rq->buffer; |
381 | if (rq->errors == 0) | 381 | if (rq->errors == 0) |
382 | rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT); | 382 | rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT); |
@@ -390,7 +390,7 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err) | |||
390 | args[5] = hwif->INB(IDE_HCYL_REG); | 390 | args[5] = hwif->INB(IDE_HCYL_REG); |
391 | args[6] = hwif->INB(IDE_SELECT_REG); | 391 | args[6] = hwif->INB(IDE_SELECT_REG); |
392 | } | 392 | } |
393 | } else if (rq->flags & REQ_DRIVE_TASKFILE) { | 393 | } else if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { |
394 | ide_task_t *args = (ide_task_t *) rq->special; | 394 | ide_task_t *args = (ide_task_t *) rq->special; |
395 | if (rq->errors == 0) | 395 | if (rq->errors == 0) |
396 | rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT); | 396 | rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT); |
@@ -587,7 +587,7 @@ ide_startstop_t ide_error (ide_drive_t *drive, const char *msg, u8 stat) | |||
587 | return ide_stopped; | 587 | return ide_stopped; |
588 | 588 | ||
589 | /* retry only "normal" I/O: */ | 589 | /* retry only "normal" I/O: */ |
590 | if (rq->flags & (REQ_DRIVE_CMD | REQ_DRIVE_TASK | REQ_DRIVE_TASKFILE)) { | 590 | if (!blk_fs_request(rq)) { |
591 | rq->errors = 1; | 591 | rq->errors = 1; |
592 | ide_end_drive_cmd(drive, stat, err); | 592 | ide_end_drive_cmd(drive, stat, err); |
593 | return ide_stopped; | 593 | return ide_stopped; |
@@ -638,7 +638,7 @@ ide_startstop_t ide_abort(ide_drive_t *drive, const char *msg) | |||
638 | return ide_stopped; | 638 | return ide_stopped; |
639 | 639 | ||
640 | /* retry only "normal" I/O: */ | 640 | /* retry only "normal" I/O: */ |
641 | if (rq->flags & (REQ_DRIVE_CMD | REQ_DRIVE_TASK | REQ_DRIVE_TASKFILE)) { | 641 | if (!blk_fs_request(rq)) { |
642 | rq->errors = 1; | 642 | rq->errors = 1; |
643 | ide_end_drive_cmd(drive, BUSY_STAT, 0); | 643 | ide_end_drive_cmd(drive, BUSY_STAT, 0); |
644 | return ide_stopped; | 644 | return ide_stopped; |
@@ -808,7 +808,7 @@ void ide_map_sg(ide_drive_t *drive, struct request *rq) | |||
808 | if (hwif->sg_mapped) /* needed by ide-scsi */ | 808 | if (hwif->sg_mapped) /* needed by ide-scsi */ |
809 | return; | 809 | return; |
810 | 810 | ||
811 | if ((rq->flags & REQ_DRIVE_TASKFILE) == 0) { | 811 | if (rq->cmd_type != REQ_TYPE_ATA_TASKFILE) { |
812 | hwif->sg_nents = blk_rq_map_sg(drive->queue, rq, sg); | 812 | hwif->sg_nents = blk_rq_map_sg(drive->queue, rq, sg); |
813 | } else { | 813 | } else { |
814 | sg_init_one(sg, rq->buffer, rq->nr_sectors * SECTOR_SIZE); | 814 | sg_init_one(sg, rq->buffer, rq->nr_sectors * SECTOR_SIZE); |
@@ -844,7 +844,7 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive, | |||
844 | struct request *rq) | 844 | struct request *rq) |
845 | { | 845 | { |
846 | ide_hwif_t *hwif = HWIF(drive); | 846 | ide_hwif_t *hwif = HWIF(drive); |
847 | if (rq->flags & REQ_DRIVE_TASKFILE) { | 847 | if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { |
848 | ide_task_t *args = rq->special; | 848 | ide_task_t *args = rq->special; |
849 | 849 | ||
850 | if (!args) | 850 | if (!args) |
@@ -866,7 +866,7 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive, | |||
866 | if (args->tf_out_flags.all != 0) | 866 | if (args->tf_out_flags.all != 0) |
867 | return flagged_taskfile(drive, args); | 867 | return flagged_taskfile(drive, args); |
868 | return do_rw_taskfile(drive, args); | 868 | return do_rw_taskfile(drive, args); |
869 | } else if (rq->flags & REQ_DRIVE_TASK) { | 869 | } else if (rq->cmd_type == REQ_TYPE_ATA_TASK) { |
870 | u8 *args = rq->buffer; | 870 | u8 *args = rq->buffer; |
871 | u8 sel; | 871 | u8 sel; |
872 | 872 | ||
@@ -892,7 +892,7 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive, | |||
892 | hwif->OUTB(sel, IDE_SELECT_REG); | 892 | hwif->OUTB(sel, IDE_SELECT_REG); |
893 | ide_cmd(drive, args[0], args[2], &drive_cmd_intr); | 893 | ide_cmd(drive, args[0], args[2], &drive_cmd_intr); |
894 | return ide_started; | 894 | return ide_started; |
895 | } else if (rq->flags & REQ_DRIVE_CMD) { | 895 | } else if (rq->cmd_type == REQ_TYPE_ATA_CMD) { |
896 | u8 *args = rq->buffer; | 896 | u8 *args = rq->buffer; |
897 | 897 | ||
898 | if (!args) | 898 | if (!args) |
@@ -980,7 +980,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq) | |||
980 | ide_startstop_t startstop; | 980 | ide_startstop_t startstop; |
981 | sector_t block; | 981 | sector_t block; |
982 | 982 | ||
983 | BUG_ON(!(rq->flags & REQ_STARTED)); | 983 | BUG_ON(!blk_rq_started(rq)); |
984 | 984 | ||
985 | #ifdef DEBUG | 985 | #ifdef DEBUG |
986 | printk("%s: start_request: current=0x%08lx\n", | 986 | printk("%s: start_request: current=0x%08lx\n", |
@@ -1013,9 +1013,9 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq) | |||
1013 | if (!drive->special.all) { | 1013 | if (!drive->special.all) { |
1014 | ide_driver_t *drv; | 1014 | ide_driver_t *drv; |
1015 | 1015 | ||
1016 | if (rq->flags & (REQ_DRIVE_CMD | REQ_DRIVE_TASK)) | 1016 | if (rq->cmd_type == REQ_TYPE_ATA_CMD || |
1017 | return execute_drive_cmd(drive, rq); | 1017 | rq->cmd_type == REQ_TYPE_ATA_TASK || |
1018 | else if (rq->flags & REQ_DRIVE_TASKFILE) | 1018 | rq->cmd_type == REQ_TYPE_ATA_TASKFILE) |
1019 | return execute_drive_cmd(drive, rq); | 1019 | return execute_drive_cmd(drive, rq); |
1020 | else if (blk_pm_request(rq)) { | 1020 | else if (blk_pm_request(rq)) { |
1021 | struct request_pm_state *pm = rq->end_io_data; | 1021 | struct request_pm_state *pm = rq->end_io_data; |
@@ -1264,7 +1264,7 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq) | |||
1264 | * We count how many times we loop here to make sure we service | 1264 | * We count how many times we loop here to make sure we service |
1265 | * all drives in the hwgroup without looping for ever | 1265 | * all drives in the hwgroup without looping for ever |
1266 | */ | 1266 | */ |
1267 | if (drive->blocked && !blk_pm_request(rq) && !(rq->flags & REQ_PREEMPT)) { | 1267 | if (drive->blocked && !blk_pm_request(rq) && !(rq->cmd_flags & REQ_PREEMPT)) { |
1268 | drive = drive->next ? drive->next : hwgroup->drive; | 1268 | drive = drive->next ? drive->next : hwgroup->drive; |
1269 | if (loops++ < 4 && !blk_queue_plugged(drive->queue)) | 1269 | if (loops++ < 4 && !blk_queue_plugged(drive->queue)) |
1270 | goto again; | 1270 | goto again; |
@@ -1670,7 +1670,7 @@ irqreturn_t ide_intr (int irq, void *dev_id, struct pt_regs *regs) | |||
1670 | void ide_init_drive_cmd (struct request *rq) | 1670 | void ide_init_drive_cmd (struct request *rq) |
1671 | { | 1671 | { |
1672 | memset(rq, 0, sizeof(*rq)); | 1672 | memset(rq, 0, sizeof(*rq)); |
1673 | rq->flags = REQ_DRIVE_CMD; | 1673 | rq->cmd_type = REQ_TYPE_ATA_CMD; |
1674 | rq->ref_count = 1; | 1674 | rq->ref_count = 1; |
1675 | } | 1675 | } |
1676 | 1676 | ||
@@ -1727,7 +1727,7 @@ int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t actio | |||
1727 | hwgroup->rq = NULL; | 1727 | hwgroup->rq = NULL; |
1728 | if (action == ide_preempt || action == ide_head_wait) { | 1728 | if (action == ide_preempt || action == ide_head_wait) { |
1729 | where = ELEVATOR_INSERT_FRONT; | 1729 | where = ELEVATOR_INSERT_FRONT; |
1730 | rq->flags |= REQ_PREEMPT; | 1730 | rq->cmd_flags |= REQ_PREEMPT; |
1731 | } | 1731 | } |
1732 | __elv_add_request(drive->queue, rq, where, 0); | 1732 | __elv_add_request(drive->queue, rq, where, 0); |
1733 | ide_do_request(hwgroup, IDE_NO_IRQ); | 1733 | ide_do_request(hwgroup, IDE_NO_IRQ); |
diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c index 1feff23487d4..850ef63cc986 100644 --- a/drivers/ide/ide-lib.c +++ b/drivers/ide/ide-lib.c | |||
@@ -456,13 +456,14 @@ static void ide_dump_opcode(ide_drive_t *drive) | |||
456 | spin_unlock(&ide_lock); | 456 | spin_unlock(&ide_lock); |
457 | if (!rq) | 457 | if (!rq) |
458 | return; | 458 | return; |
459 | if (rq->flags & (REQ_DRIVE_CMD | REQ_DRIVE_TASK)) { | 459 | if (rq->cmd_type == REQ_TYPE_ATA_CMD || |
460 | rq->cmd_type == REQ_TYPE_ATA_TASK) { | ||
460 | char *args = rq->buffer; | 461 | char *args = rq->buffer; |
461 | if (args) { | 462 | if (args) { |
462 | opcode = args[0]; | 463 | opcode = args[0]; |
463 | found = 1; | 464 | found = 1; |
464 | } | 465 | } |
465 | } else if (rq->flags & REQ_DRIVE_TASKFILE) { | 466 | } else if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { |
466 | ide_task_t *args = rq->special; | 467 | ide_task_t *args = rq->special; |
467 | if (args) { | 468 | if (args) { |
468 | task_struct_t *tf = (task_struct_t *) args->tfRegister; | 469 | task_struct_t *tf = (task_struct_t *) args->tfRegister; |
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c index 7067ab997927..643e4b9ac651 100644 --- a/drivers/ide/ide-tape.c +++ b/drivers/ide/ide-tape.c | |||
@@ -1776,7 +1776,7 @@ static void idetape_create_request_sense_cmd (idetape_pc_t *pc) | |||
1776 | static void idetape_init_rq(struct request *rq, u8 cmd) | 1776 | static void idetape_init_rq(struct request *rq, u8 cmd) |
1777 | { | 1777 | { |
1778 | memset(rq, 0, sizeof(*rq)); | 1778 | memset(rq, 0, sizeof(*rq)); |
1779 | rq->flags = REQ_SPECIAL; | 1779 | rq->cmd_type = REQ_TYPE_SPECIAL; |
1780 | rq->cmd[0] = cmd; | 1780 | rq->cmd[0] = cmd; |
1781 | } | 1781 | } |
1782 | 1782 | ||
@@ -2433,12 +2433,12 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive, | |||
2433 | rq->sector, rq->nr_sectors, rq->current_nr_sectors); | 2433 | rq->sector, rq->nr_sectors, rq->current_nr_sectors); |
2434 | #endif /* IDETAPE_DEBUG_LOG */ | 2434 | #endif /* IDETAPE_DEBUG_LOG */ |
2435 | 2435 | ||
2436 | if ((rq->flags & REQ_SPECIAL) == 0) { | 2436 | if (!blk_special_request(rq)) { |
2437 | /* | 2437 | /* |
2438 | * We do not support buffer cache originated requests. | 2438 | * We do not support buffer cache originated requests. |
2439 | */ | 2439 | */ |
2440 | printk(KERN_NOTICE "ide-tape: %s: Unsupported request in " | 2440 | printk(KERN_NOTICE "ide-tape: %s: Unsupported request in " |
2441 | "request queue (%ld)\n", drive->name, rq->flags); | 2441 | "request queue (%d)\n", drive->name, rq->cmd_type); |
2442 | ide_end_request(drive, 0, 0); | 2442 | ide_end_request(drive, 0, 0); |
2443 | return ide_stopped; | 2443 | return ide_stopped; |
2444 | } | 2444 | } |
@@ -2768,7 +2768,7 @@ static void idetape_wait_for_request (ide_drive_t *drive, struct request *rq) | |||
2768 | idetape_tape_t *tape = drive->driver_data; | 2768 | idetape_tape_t *tape = drive->driver_data; |
2769 | 2769 | ||
2770 | #if IDETAPE_DEBUG_BUGS | 2770 | #if IDETAPE_DEBUG_BUGS |
2771 | if (rq == NULL || (rq->flags & REQ_SPECIAL) == 0) { | 2771 | if (rq == NULL || !blk_special_request(rq)) { |
2772 | printk (KERN_ERR "ide-tape: bug: Trying to sleep on non-valid request\n"); | 2772 | printk (KERN_ERR "ide-tape: bug: Trying to sleep on non-valid request\n"); |
2773 | return; | 2773 | return; |
2774 | } | 2774 | } |
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c index 97a9244312fc..1d0470c1f957 100644 --- a/drivers/ide/ide-taskfile.c +++ b/drivers/ide/ide-taskfile.c | |||
@@ -363,7 +363,7 @@ static ide_startstop_t task_error(ide_drive_t *drive, struct request *rq, | |||
363 | 363 | ||
364 | static void task_end_request(ide_drive_t *drive, struct request *rq, u8 stat) | 364 | static void task_end_request(ide_drive_t *drive, struct request *rq, u8 stat) |
365 | { | 365 | { |
366 | if (rq->flags & REQ_DRIVE_TASKFILE) { | 366 | if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { |
367 | ide_task_t *task = rq->special; | 367 | ide_task_t *task = rq->special; |
368 | 368 | ||
369 | if (task->tf_out_flags.all) { | 369 | if (task->tf_out_flags.all) { |
@@ -474,7 +474,7 @@ static int ide_diag_taskfile(ide_drive_t *drive, ide_task_t *args, unsigned long | |||
474 | struct request rq; | 474 | struct request rq; |
475 | 475 | ||
476 | memset(&rq, 0, sizeof(rq)); | 476 | memset(&rq, 0, sizeof(rq)); |
477 | rq.flags = REQ_DRIVE_TASKFILE; | 477 | rq.cmd_type = REQ_TYPE_ATA_TASKFILE; |
478 | rq.buffer = buf; | 478 | rq.buffer = buf; |
479 | 479 | ||
480 | /* | 480 | /* |
@@ -499,7 +499,7 @@ static int ide_diag_taskfile(ide_drive_t *drive, ide_task_t *args, unsigned long | |||
499 | rq.hard_cur_sectors = rq.current_nr_sectors = rq.nr_sectors; | 499 | rq.hard_cur_sectors = rq.current_nr_sectors = rq.nr_sectors; |
500 | 500 | ||
501 | if (args->command_type == IDE_DRIVE_TASK_RAW_WRITE) | 501 | if (args->command_type == IDE_DRIVE_TASK_RAW_WRITE) |
502 | rq.flags |= REQ_RW; | 502 | rq.cmd_flags |= REQ_RW; |
503 | } | 503 | } |
504 | 504 | ||
505 | rq.special = args; | 505 | rq.special = args; |
@@ -737,7 +737,7 @@ static int ide_wait_cmd_task(ide_drive_t *drive, u8 *buf) | |||
737 | struct request rq; | 737 | struct request rq; |
738 | 738 | ||
739 | ide_init_drive_cmd(&rq); | 739 | ide_init_drive_cmd(&rq); |
740 | rq.flags = REQ_DRIVE_TASK; | 740 | rq.cmd_type = REQ_TYPE_ATA_TASK; |
741 | rq.buffer = buf; | 741 | rq.buffer = buf; |
742 | return ide_do_drive_cmd(drive, &rq, ide_wait); | 742 | return ide_do_drive_cmd(drive, &rq, ide_wait); |
743 | } | 743 | } |
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c index 9c8468de1a75..9384a3fdde6c 100644 --- a/drivers/ide/ide.c +++ b/drivers/ide/ide.c | |||
@@ -1217,7 +1217,7 @@ static int generic_ide_suspend(struct device *dev, pm_message_t mesg) | |||
1217 | memset(&rq, 0, sizeof(rq)); | 1217 | memset(&rq, 0, sizeof(rq)); |
1218 | memset(&rqpm, 0, sizeof(rqpm)); | 1218 | memset(&rqpm, 0, sizeof(rqpm)); |
1219 | memset(&args, 0, sizeof(args)); | 1219 | memset(&args, 0, sizeof(args)); |
1220 | rq.flags = REQ_PM_SUSPEND; | 1220 | rq.cmd_type = REQ_TYPE_PM_SUSPEND; |
1221 | rq.special = &args; | 1221 | rq.special = &args; |
1222 | rq.end_io_data = &rqpm; | 1222 | rq.end_io_data = &rqpm; |
1223 | rqpm.pm_step = ide_pm_state_start_suspend; | 1223 | rqpm.pm_step = ide_pm_state_start_suspend; |
@@ -1238,7 +1238,7 @@ static int generic_ide_resume(struct device *dev) | |||
1238 | memset(&rq, 0, sizeof(rq)); | 1238 | memset(&rq, 0, sizeof(rq)); |
1239 | memset(&rqpm, 0, sizeof(rqpm)); | 1239 | memset(&rqpm, 0, sizeof(rqpm)); |
1240 | memset(&args, 0, sizeof(args)); | 1240 | memset(&args, 0, sizeof(args)); |
1241 | rq.flags = REQ_PM_RESUME; | 1241 | rq.cmd_type = REQ_TYPE_PM_RESUME; |
1242 | rq.special = &args; | 1242 | rq.special = &args; |
1243 | rq.end_io_data = &rqpm; | 1243 | rq.end_io_data = &rqpm; |
1244 | rqpm.pm_step = ide_pm_state_start_resume; | 1244 | rqpm.pm_step = ide_pm_state_start_resume; |
diff --git a/drivers/ide/legacy/hd.c b/drivers/ide/legacy/hd.c index aebecd8f51cc..4ab931145673 100644 --- a/drivers/ide/legacy/hd.c +++ b/drivers/ide/legacy/hd.c | |||
@@ -626,7 +626,7 @@ repeat: | |||
626 | req->rq_disk->disk_name, (req->cmd == READ)?"read":"writ", | 626 | req->rq_disk->disk_name, (req->cmd == READ)?"read":"writ", |
627 | cyl, head, sec, nsect, req->buffer); | 627 | cyl, head, sec, nsect, req->buffer); |
628 | #endif | 628 | #endif |
629 | if (req->flags & REQ_CMD) { | 629 | if (blk_fs_request(req)) { |
630 | switch (rq_data_dir(req)) { | 630 | switch (rq_data_dir(req)) { |
631 | case READ: | 631 | case READ: |
632 | hd_out(disk,nsect,sec,head,cyl,WIN_READ,&read_intr); | 632 | hd_out(disk,nsect,sec,head,cyl,WIN_READ,&read_intr); |
diff --git a/drivers/md/dm-emc.c b/drivers/md/dm-emc.c index 2a374ccb30dd..2b2d45d7baaa 100644 --- a/drivers/md/dm-emc.c +++ b/drivers/md/dm-emc.c | |||
@@ -126,7 +126,8 @@ static struct request *get_failover_req(struct emc_handler *h, | |||
126 | memset(&rq->cmd, 0, BLK_MAX_CDB); | 126 | memset(&rq->cmd, 0, BLK_MAX_CDB); |
127 | 127 | ||
128 | rq->timeout = EMC_FAILOVER_TIMEOUT; | 128 | rq->timeout = EMC_FAILOVER_TIMEOUT; |
129 | rq->flags |= (REQ_BLOCK_PC | REQ_FAILFAST | REQ_NOMERGE); | 129 | rq->cmd_type = REQ_TYPE_BLOCK_PC; |
130 | rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE; | ||
130 | 131 | ||
131 | return rq; | 132 | return rq; |
132 | } | 133 | } |
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c index 1ddc2fb429d5..eaba81bf2eca 100644 --- a/drivers/message/i2o/i2o_block.c +++ b/drivers/message/i2o/i2o_block.c | |||
@@ -390,9 +390,9 @@ static int i2o_block_prep_req_fn(struct request_queue *q, struct request *req) | |||
390 | } | 390 | } |
391 | 391 | ||
392 | /* request is already processed by us, so return */ | 392 | /* request is already processed by us, so return */ |
393 | if (req->flags & REQ_SPECIAL) { | 393 | if (blk_special_request(req)) { |
394 | osm_debug("REQ_SPECIAL already set!\n"); | 394 | osm_debug("REQ_SPECIAL already set!\n"); |
395 | req->flags |= REQ_DONTPREP; | 395 | req->cmd_flags |= REQ_DONTPREP; |
396 | return BLKPREP_OK; | 396 | return BLKPREP_OK; |
397 | } | 397 | } |
398 | 398 | ||
@@ -411,7 +411,8 @@ static int i2o_block_prep_req_fn(struct request_queue *q, struct request *req) | |||
411 | ireq = req->special; | 411 | ireq = req->special; |
412 | 412 | ||
413 | /* do not come back here */ | 413 | /* do not come back here */ |
414 | req->flags |= REQ_DONTPREP | REQ_SPECIAL; | 414 | req->cmd_type = REQ_TYPE_SPECIAL; |
415 | req->cmd_flags |= REQ_DONTPREP; | ||
415 | 416 | ||
416 | return BLKPREP_OK; | 417 | return BLKPREP_OK; |
417 | }; | 418 | }; |
diff --git a/drivers/mmc/mmc_queue.c b/drivers/mmc/mmc_queue.c index 74f8cdeeff0f..4ccdd82b680f 100644 --- a/drivers/mmc/mmc_queue.c +++ b/drivers/mmc/mmc_queue.c | |||
@@ -28,7 +28,7 @@ static int mmc_prep_request(struct request_queue *q, struct request *req) | |||
28 | struct mmc_queue *mq = q->queuedata; | 28 | struct mmc_queue *mq = q->queuedata; |
29 | int ret = BLKPREP_KILL; | 29 | int ret = BLKPREP_KILL; |
30 | 30 | ||
31 | if (req->flags & REQ_SPECIAL) { | 31 | if (blk_special_request(req)) { |
32 | /* | 32 | /* |
33 | * Special commands already have the command | 33 | * Special commands already have the command |
34 | * blocks already setup in req->special. | 34 | * blocks already setup in req->special. |
@@ -36,7 +36,7 @@ static int mmc_prep_request(struct request_queue *q, struct request *req) | |||
36 | BUG_ON(!req->special); | 36 | BUG_ON(!req->special); |
37 | 37 | ||
38 | ret = BLKPREP_OK; | 38 | ret = BLKPREP_OK; |
39 | } else if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) { | 39 | } else if (blk_fs_request(req) || blk_pc_request(req)) { |
40 | /* | 40 | /* |
41 | * Block I/O requests need translating according | 41 | * Block I/O requests need translating according |
42 | * to the protocol. | 42 | * to the protocol. |
@@ -50,7 +50,7 @@ static int mmc_prep_request(struct request_queue *q, struct request *req) | |||
50 | } | 50 | } |
51 | 51 | ||
52 | if (ret == BLKPREP_OK) | 52 | if (ret == BLKPREP_OK) |
53 | req->flags |= REQ_DONTPREP; | 53 | req->cmd_flags |= REQ_DONTPREP; |
54 | 54 | ||
55 | return ret; | 55 | return ret; |
56 | } | 56 | } |
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index 458d3c8ae1ee..6baf5fe14230 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c | |||
@@ -46,7 +46,7 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr, | |||
46 | nsect = req->current_nr_sectors; | 46 | nsect = req->current_nr_sectors; |
47 | buf = req->buffer; | 47 | buf = req->buffer; |
48 | 48 | ||
49 | if (!(req->flags & REQ_CMD)) | 49 | if (!blk_fs_request(req)) |
50 | return 0; | 50 | return 0; |
51 | 51 | ||
52 | if (block + nsect > get_capacity(req->rq_disk)) | 52 | if (block + nsect > get_capacity(req->rq_disk)) |
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c index 9d051e5687ea..222a8a71a5e8 100644 --- a/drivers/s390/block/dasd_diag.c +++ b/drivers/s390/block/dasd_diag.c | |||
@@ -529,7 +529,7 @@ dasd_diag_build_cp(struct dasd_device * device, struct request *req) | |||
529 | } | 529 | } |
530 | cqr->retries = DIAG_MAX_RETRIES; | 530 | cqr->retries = DIAG_MAX_RETRIES; |
531 | cqr->buildclk = get_clock(); | 531 | cqr->buildclk = get_clock(); |
532 | if (req->flags & REQ_FAILFAST) | 532 | if (req->cmd_flags & REQ_FAILFAST) |
533 | set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); | 533 | set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); |
534 | cqr->device = device; | 534 | cqr->device = device; |
535 | cqr->expires = DIAG_TIMEOUT; | 535 | cqr->expires = DIAG_TIMEOUT; |
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index b7a7fac3f7c3..5ecea3e4fdef 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c | |||
@@ -1266,7 +1266,7 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req) | |||
1266 | recid++; | 1266 | recid++; |
1267 | } | 1267 | } |
1268 | } | 1268 | } |
1269 | if (req->flags & REQ_FAILFAST) | 1269 | if (req->cmd_flags & REQ_FAILFAST) |
1270 | set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); | 1270 | set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); |
1271 | cqr->device = device; | 1271 | cqr->device = device; |
1272 | cqr->expires = 5 * 60 * HZ; /* 5 minutes */ | 1272 | cqr->expires = 5 * 60 * HZ; /* 5 minutes */ |
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c index e85015be109b..80926c548228 100644 --- a/drivers/s390/block/dasd_fba.c +++ b/drivers/s390/block/dasd_fba.c | |||
@@ -344,7 +344,7 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req) | |||
344 | recid++; | 344 | recid++; |
345 | } | 345 | } |
346 | } | 346 | } |
347 | if (req->flags & REQ_FAILFAST) | 347 | if (req->cmd_flags & REQ_FAILFAST) |
348 | set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); | 348 | set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); |
349 | cqr->device = device; | 349 | cqr->device = device; |
350 | cqr->expires = 5 * 60 * HZ; /* 5 minutes */ | 350 | cqr->expires = 5 * 60 * HZ; /* 5 minutes */ |
diff --git a/drivers/scsi/aic7xxx_old.c b/drivers/scsi/aic7xxx_old.c index 5dcef48d414f..10353379a074 100644 --- a/drivers/scsi/aic7xxx_old.c +++ b/drivers/scsi/aic7xxx_old.c | |||
@@ -2862,7 +2862,7 @@ aic7xxx_done(struct aic7xxx_host *p, struct aic7xxx_scb *scb) | |||
2862 | aic_dev->r_total++; | 2862 | aic_dev->r_total++; |
2863 | ptr = aic_dev->r_bins; | 2863 | ptr = aic_dev->r_bins; |
2864 | } | 2864 | } |
2865 | if(cmd->device->simple_tags && cmd->request->flags & REQ_HARDBARRIER) | 2865 | if(cmd->device->simple_tags && cmd->request->cmd_flags & REQ_HARDBARRIER) |
2866 | { | 2866 | { |
2867 | aic_dev->barrier_total++; | 2867 | aic_dev->barrier_total++; |
2868 | if(scb->tag_action == MSG_ORDERED_Q_TAG) | 2868 | if(scb->tag_action == MSG_ORDERED_Q_TAG) |
@@ -10158,7 +10158,7 @@ aic7xxx_buildscb(struct aic7xxx_host *p, Scsi_Cmnd *cmd, | |||
10158 | /* We always force TEST_UNIT_READY to untagged */ | 10158 | /* We always force TEST_UNIT_READY to untagged */ |
10159 | if (cmd->cmnd[0] != TEST_UNIT_READY && sdptr->simple_tags) | 10159 | if (cmd->cmnd[0] != TEST_UNIT_READY && sdptr->simple_tags) |
10160 | { | 10160 | { |
10161 | if (req->flags & REQ_HARDBARRIER) | 10161 | if (req->cmd_flags & REQ_HARDBARRIER) |
10162 | { | 10162 | { |
10163 | if(sdptr->ordered_tags) | 10163 | if(sdptr->ordered_tags) |
10164 | { | 10164 | { |
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c index 94d1de55607f..65b19695ebe2 100644 --- a/drivers/scsi/ide-scsi.c +++ b/drivers/scsi/ide-scsi.c | |||
@@ -344,7 +344,7 @@ static int idescsi_check_condition(ide_drive_t *drive, struct request *failed_co | |||
344 | pc->buffer = buf; | 344 | pc->buffer = buf; |
345 | pc->c[0] = REQUEST_SENSE; | 345 | pc->c[0] = REQUEST_SENSE; |
346 | pc->c[4] = pc->request_transfer = pc->buffer_size = SCSI_SENSE_BUFFERSIZE; | 346 | pc->c[4] = pc->request_transfer = pc->buffer_size = SCSI_SENSE_BUFFERSIZE; |
347 | rq->flags = REQ_SENSE; | 347 | rq->cmd_type = REQ_TYPE_SENSE; |
348 | pc->timeout = jiffies + WAIT_READY; | 348 | pc->timeout = jiffies + WAIT_READY; |
349 | /* NOTE! Save the failed packet command in "rq->buffer" */ | 349 | /* NOTE! Save the failed packet command in "rq->buffer" */ |
350 | rq->buffer = (void *) failed_command->special; | 350 | rq->buffer = (void *) failed_command->special; |
@@ -398,12 +398,12 @@ static int idescsi_end_request (ide_drive_t *drive, int uptodate, int nrsecs) | |||
398 | int errors = rq->errors; | 398 | int errors = rq->errors; |
399 | unsigned long flags; | 399 | unsigned long flags; |
400 | 400 | ||
401 | if (!(rq->flags & (REQ_SPECIAL|REQ_SENSE))) { | 401 | if (!blk_special_request(rq) && !blk_sense_request(rq)) { |
402 | ide_end_request(drive, uptodate, nrsecs); | 402 | ide_end_request(drive, uptodate, nrsecs); |
403 | return 0; | 403 | return 0; |
404 | } | 404 | } |
405 | ide_end_drive_cmd (drive, 0, 0); | 405 | ide_end_drive_cmd (drive, 0, 0); |
406 | if (rq->flags & REQ_SENSE) { | 406 | if (blk_sense_request(rq)) { |
407 | idescsi_pc_t *opc = (idescsi_pc_t *) rq->buffer; | 407 | idescsi_pc_t *opc = (idescsi_pc_t *) rq->buffer; |
408 | if (log) { | 408 | if (log) { |
409 | printk ("ide-scsi: %s: wrap up check %lu, rst = ", drive->name, opc->scsi_cmd->serial_number); | 409 | printk ("ide-scsi: %s: wrap up check %lu, rst = ", drive->name, opc->scsi_cmd->serial_number); |
@@ -712,7 +712,7 @@ static ide_startstop_t idescsi_do_request (ide_drive_t *drive, struct request *r | |||
712 | printk (KERN_INFO "sector: %ld, nr_sectors: %ld, current_nr_sectors: %d\n",rq->sector,rq->nr_sectors,rq->current_nr_sectors); | 712 | printk (KERN_INFO "sector: %ld, nr_sectors: %ld, current_nr_sectors: %d\n",rq->sector,rq->nr_sectors,rq->current_nr_sectors); |
713 | #endif /* IDESCSI_DEBUG_LOG */ | 713 | #endif /* IDESCSI_DEBUG_LOG */ |
714 | 714 | ||
715 | if (rq->flags & (REQ_SPECIAL|REQ_SENSE)) { | 715 | if (blk_sense_request(rq) || blk_special_request(rq)) { |
716 | return idescsi_issue_pc (drive, (idescsi_pc_t *) rq->special); | 716 | return idescsi_issue_pc (drive, (idescsi_pc_t *) rq->special); |
717 | } | 717 | } |
718 | blk_dump_rq_flags(rq, "ide-scsi: unsup command"); | 718 | blk_dump_rq_flags(rq, "ide-scsi: unsup command"); |
@@ -938,7 +938,7 @@ static int idescsi_queue (struct scsi_cmnd *cmd, | |||
938 | 938 | ||
939 | ide_init_drive_cmd (rq); | 939 | ide_init_drive_cmd (rq); |
940 | rq->special = (char *) pc; | 940 | rq->special = (char *) pc; |
941 | rq->flags = REQ_SPECIAL; | 941 | rq->cmd_type = REQ_TYPE_SPECIAL; |
942 | spin_unlock_irq(host->host_lock); | 942 | spin_unlock_irq(host->host_lock); |
943 | rq->rq_disk = scsi->disk; | 943 | rq->rq_disk = scsi->disk; |
944 | (void) ide_do_drive_cmd (drive, rq, ide_end); | 944 | (void) ide_do_drive_cmd (drive, rq, ide_end); |
@@ -992,7 +992,7 @@ static int idescsi_eh_abort (struct scsi_cmnd *cmd) | |||
992 | */ | 992 | */ |
993 | printk (KERN_ERR "ide-scsi: cmd aborted!\n"); | 993 | printk (KERN_ERR "ide-scsi: cmd aborted!\n"); |
994 | 994 | ||
995 | if (scsi->pc->rq->flags & REQ_SENSE) | 995 | if (blk_sense_request(scsi->pc->rq)) |
996 | kfree(scsi->pc->buffer); | 996 | kfree(scsi->pc->buffer); |
997 | kfree(scsi->pc->rq); | 997 | kfree(scsi->pc->rq); |
998 | kfree(scsi->pc); | 998 | kfree(scsi->pc); |
@@ -1042,7 +1042,7 @@ static int idescsi_eh_reset (struct scsi_cmnd *cmd) | |||
1042 | /* kill current request */ | 1042 | /* kill current request */ |
1043 | blkdev_dequeue_request(req); | 1043 | blkdev_dequeue_request(req); |
1044 | end_that_request_last(req, 0); | 1044 | end_that_request_last(req, 0); |
1045 | if (req->flags & REQ_SENSE) | 1045 | if (blk_sense_request(req)) |
1046 | kfree(scsi->pc->buffer); | 1046 | kfree(scsi->pc->buffer); |
1047 | kfree(scsi->pc); | 1047 | kfree(scsi->pc); |
1048 | scsi->pc = NULL; | 1048 | scsi->pc = NULL; |
diff --git a/drivers/scsi/pluto.c b/drivers/scsi/pluto.c index 0bd9c60e6455..aa60a5f1fbc3 100644 --- a/drivers/scsi/pluto.c +++ b/drivers/scsi/pluto.c | |||
@@ -67,7 +67,6 @@ static void __init pluto_detect_done(Scsi_Cmnd *SCpnt) | |||
67 | 67 | ||
68 | static void __init pluto_detect_scsi_done(Scsi_Cmnd *SCpnt) | 68 | static void __init pluto_detect_scsi_done(Scsi_Cmnd *SCpnt) |
69 | { | 69 | { |
70 | SCpnt->request->rq_status = RQ_SCSI_DONE; | ||
71 | PLND(("Detect done %08lx\n", (long)SCpnt)) | 70 | PLND(("Detect done %08lx\n", (long)SCpnt)) |
72 | if (atomic_dec_and_test (&fcss)) | 71 | if (atomic_dec_and_test (&fcss)) |
73 | up(&fc_sem); | 72 | up(&fc_sem); |
@@ -166,7 +165,7 @@ int __init pluto_detect(struct scsi_host_template *tpnt) | |||
166 | 165 | ||
167 | SCpnt->cmd_len = COMMAND_SIZE(INQUIRY); | 166 | SCpnt->cmd_len = COMMAND_SIZE(INQUIRY); |
168 | 167 | ||
169 | SCpnt->request->rq_status = RQ_SCSI_BUSY; | 168 | SCpnt->request->cmd_flags &= ~REQ_STARTED; |
170 | 169 | ||
171 | SCpnt->done = pluto_detect_done; | 170 | SCpnt->done = pluto_detect_done; |
172 | SCpnt->request_bufflen = 256; | 171 | SCpnt->request_bufflen = 256; |
@@ -178,7 +177,8 @@ int __init pluto_detect(struct scsi_host_template *tpnt) | |||
178 | for (retry = 0; retry < 5; retry++) { | 177 | for (retry = 0; retry < 5; retry++) { |
179 | for (i = 0; i < fcscount; i++) { | 178 | for (i = 0; i < fcscount; i++) { |
180 | if (!fcs[i].fc) break; | 179 | if (!fcs[i].fc) break; |
181 | if (fcs[i].cmd.request->rq_status != RQ_SCSI_DONE) { | 180 | if (!(fcs[i].cmd.request->cmd_flags & REQ_STARTED)) { |
181 | fcs[i].cmd.request->cmd_flags |= REQ_STARTED; | ||
182 | disable_irq(fcs[i].fc->irq); | 182 | disable_irq(fcs[i].fc->irq); |
183 | PLND(("queuecommand %d %d\n", retry, i)) | 183 | PLND(("queuecommand %d %d\n", retry, i)) |
184 | fcp_scsi_queuecommand (&(fcs[i].cmd), | 184 | fcp_scsi_queuecommand (&(fcs[i].cmd), |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index d6743b959a72..71084728eb42 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -82,7 +82,7 @@ static void scsi_unprep_request(struct request *req) | |||
82 | { | 82 | { |
83 | struct scsi_cmnd *cmd = req->special; | 83 | struct scsi_cmnd *cmd = req->special; |
84 | 84 | ||
85 | req->flags &= ~REQ_DONTPREP; | 85 | req->cmd_flags &= ~REQ_DONTPREP; |
86 | req->special = NULL; | 86 | req->special = NULL; |
87 | 87 | ||
88 | scsi_put_command(cmd); | 88 | scsi_put_command(cmd); |
@@ -196,7 +196,8 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, | |||
196 | req->sense_len = 0; | 196 | req->sense_len = 0; |
197 | req->retries = retries; | 197 | req->retries = retries; |
198 | req->timeout = timeout; | 198 | req->timeout = timeout; |
199 | req->flags |= flags | REQ_BLOCK_PC | REQ_SPECIAL | REQ_QUIET; | 199 | req->cmd_type = REQ_TYPE_BLOCK_PC; |
200 | req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT; | ||
200 | 201 | ||
201 | /* | 202 | /* |
202 | * head injection *required* here otherwise quiesce won't work | 203 | * head injection *required* here otherwise quiesce won't work |
@@ -397,7 +398,8 @@ int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd, | |||
397 | req = blk_get_request(sdev->request_queue, write, gfp); | 398 | req = blk_get_request(sdev->request_queue, write, gfp); |
398 | if (!req) | 399 | if (!req) |
399 | goto free_sense; | 400 | goto free_sense; |
400 | req->flags |= REQ_BLOCK_PC | REQ_QUIET; | 401 | req->cmd_type = REQ_TYPE_BLOCK_PC; |
402 | req->cmd_flags |= REQ_QUIET; | ||
401 | 403 | ||
402 | if (use_sg) | 404 | if (use_sg) |
403 | err = scsi_req_map_sg(req, buffer, use_sg, bufflen, gfp); | 405 | err = scsi_req_map_sg(req, buffer, use_sg, bufflen, gfp); |
@@ -933,7 +935,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
933 | break; | 935 | break; |
934 | } | 936 | } |
935 | } | 937 | } |
936 | if (!(req->flags & REQ_QUIET)) { | 938 | if (!(req->cmd_flags & REQ_QUIET)) { |
937 | scmd_printk(KERN_INFO, cmd, | 939 | scmd_printk(KERN_INFO, cmd, |
938 | "Device not ready: "); | 940 | "Device not ready: "); |
939 | scsi_print_sense_hdr("", &sshdr); | 941 | scsi_print_sense_hdr("", &sshdr); |
@@ -941,7 +943,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
941 | scsi_end_request(cmd, 0, this_count, 1); | 943 | scsi_end_request(cmd, 0, this_count, 1); |
942 | return; | 944 | return; |
943 | case VOLUME_OVERFLOW: | 945 | case VOLUME_OVERFLOW: |
944 | if (!(req->flags & REQ_QUIET)) { | 946 | if (!(req->cmd_flags & REQ_QUIET)) { |
945 | scmd_printk(KERN_INFO, cmd, | 947 | scmd_printk(KERN_INFO, cmd, |
946 | "Volume overflow, CDB: "); | 948 | "Volume overflow, CDB: "); |
947 | __scsi_print_command(cmd->cmnd); | 949 | __scsi_print_command(cmd->cmnd); |
@@ -963,7 +965,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
963 | return; | 965 | return; |
964 | } | 966 | } |
965 | if (result) { | 967 | if (result) { |
966 | if (!(req->flags & REQ_QUIET)) { | 968 | if (!(req->cmd_flags & REQ_QUIET)) { |
967 | scmd_printk(KERN_INFO, cmd, | 969 | scmd_printk(KERN_INFO, cmd, |
968 | "SCSI error: return code = 0x%08x\n", | 970 | "SCSI error: return code = 0x%08x\n", |
969 | result); | 971 | result); |
@@ -995,7 +997,7 @@ static int scsi_init_io(struct scsi_cmnd *cmd) | |||
995 | /* | 997 | /* |
996 | * if this is a rq->data based REQ_BLOCK_PC, setup for a non-sg xfer | 998 | * if this is a rq->data based REQ_BLOCK_PC, setup for a non-sg xfer |
997 | */ | 999 | */ |
998 | if ((req->flags & REQ_BLOCK_PC) && !req->bio) { | 1000 | if (blk_pc_request(req) && !req->bio) { |
999 | cmd->request_bufflen = req->data_len; | 1001 | cmd->request_bufflen = req->data_len; |
1000 | cmd->request_buffer = req->data; | 1002 | cmd->request_buffer = req->data; |
1001 | req->buffer = req->data; | 1003 | req->buffer = req->data; |
@@ -1139,13 +1141,12 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req) | |||
1139 | * these two cases differently. We differentiate by looking | 1141 | * these two cases differently. We differentiate by looking |
1140 | * at request->cmd, as this tells us the real story. | 1142 | * at request->cmd, as this tells us the real story. |
1141 | */ | 1143 | */ |
1142 | if (req->flags & REQ_SPECIAL && req->special) { | 1144 | if (blk_special_request(req) && req->special) |
1143 | cmd = req->special; | 1145 | cmd = req->special; |
1144 | } else if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) { | 1146 | else if (blk_pc_request(req) || blk_fs_request(req)) { |
1145 | 1147 | if (unlikely(specials_only) && !(req->cmd_flags & REQ_PREEMPT)){ | |
1146 | if(unlikely(specials_only) && !(req->flags & REQ_SPECIAL)) { | 1148 | if (specials_only == SDEV_QUIESCE || |
1147 | if(specials_only == SDEV_QUIESCE || | 1149 | specials_only == SDEV_BLOCK) |
1148 | specials_only == SDEV_BLOCK) | ||
1149 | goto defer; | 1150 | goto defer; |
1150 | 1151 | ||
1151 | sdev_printk(KERN_ERR, sdev, | 1152 | sdev_printk(KERN_ERR, sdev, |
@@ -1153,7 +1154,6 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req) | |||
1153 | goto kill; | 1154 | goto kill; |
1154 | } | 1155 | } |
1155 | 1156 | ||
1156 | |||
1157 | /* | 1157 | /* |
1158 | * Now try and find a command block that we can use. | 1158 | * Now try and find a command block that we can use. |
1159 | */ | 1159 | */ |
@@ -1184,7 +1184,7 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req) | |||
1184 | * lock. We hope REQ_STARTED prevents anything untoward from | 1184 | * lock. We hope REQ_STARTED prevents anything untoward from |
1185 | * happening now. | 1185 | * happening now. |
1186 | */ | 1186 | */ |
1187 | if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) { | 1187 | if (blk_fs_request(req) || blk_pc_request(req)) { |
1188 | int ret; | 1188 | int ret; |
1189 | 1189 | ||
1190 | /* | 1190 | /* |
@@ -1216,7 +1216,7 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req) | |||
1216 | /* | 1216 | /* |
1217 | * Initialize the actual SCSI command for this request. | 1217 | * Initialize the actual SCSI command for this request. |
1218 | */ | 1218 | */ |
1219 | if (req->flags & REQ_BLOCK_PC) { | 1219 | if (blk_pc_request(req)) { |
1220 | scsi_setup_blk_pc_cmnd(cmd); | 1220 | scsi_setup_blk_pc_cmnd(cmd); |
1221 | } else if (req->rq_disk) { | 1221 | } else if (req->rq_disk) { |
1222 | struct scsi_driver *drv; | 1222 | struct scsi_driver *drv; |
@@ -1233,7 +1233,7 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req) | |||
1233 | /* | 1233 | /* |
1234 | * The request is now prepped, no need to come back here | 1234 | * The request is now prepped, no need to come back here |
1235 | */ | 1235 | */ |
1236 | req->flags |= REQ_DONTPREP; | 1236 | req->cmd_flags |= REQ_DONTPREP; |
1237 | return BLKPREP_OK; | 1237 | return BLKPREP_OK; |
1238 | 1238 | ||
1239 | defer: | 1239 | defer: |
@@ -1454,8 +1454,9 @@ static void scsi_request_fn(struct request_queue *q) | |||
1454 | if (unlikely(cmd == NULL)) { | 1454 | if (unlikely(cmd == NULL)) { |
1455 | printk(KERN_CRIT "impossible request in %s.\n" | 1455 | printk(KERN_CRIT "impossible request in %s.\n" |
1456 | "please mail a stack trace to " | 1456 | "please mail a stack trace to " |
1457 | "linux-scsi@vger.kernel.org", | 1457 | "linux-scsi@vger.kernel.org\n", |
1458 | __FUNCTION__); | 1458 | __FUNCTION__); |
1459 | blk_dump_rq_flags(req, "foo"); | ||
1459 | BUG(); | 1460 | BUG(); |
1460 | } | 1461 | } |
1461 | spin_lock(shost->host_lock); | 1462 | spin_lock(shost->host_lock); |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 638cff41d436..10bc99c911fa 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
@@ -443,8 +443,7 @@ static int sd_init_command(struct scsi_cmnd * SCpnt) | |||
443 | SCpnt->cmnd[0] = READ_6; | 443 | SCpnt->cmnd[0] = READ_6; |
444 | SCpnt->sc_data_direction = DMA_FROM_DEVICE; | 444 | SCpnt->sc_data_direction = DMA_FROM_DEVICE; |
445 | } else { | 445 | } else { |
446 | printk(KERN_ERR "sd: Unknown command %lx\n", rq->flags); | 446 | printk(KERN_ERR "sd: Unknown command %x\n", rq->cmd_flags); |
447 | /* overkill panic("Unknown sd command %lx\n", rq->flags); */ | ||
448 | return 0; | 447 | return 0; |
449 | } | 448 | } |
450 | 449 | ||
@@ -840,7 +839,7 @@ static int sd_issue_flush(struct device *dev, sector_t *error_sector) | |||
840 | static void sd_prepare_flush(request_queue_t *q, struct request *rq) | 839 | static void sd_prepare_flush(request_queue_t *q, struct request *rq) |
841 | { | 840 | { |
842 | memset(rq->cmd, 0, sizeof(rq->cmd)); | 841 | memset(rq->cmd, 0, sizeof(rq->cmd)); |
843 | rq->flags |= REQ_BLOCK_PC; | 842 | rq->cmd_type = REQ_TYPE_BLOCK_PC; |
844 | rq->timeout = SD_TIMEOUT; | 843 | rq->timeout = SD_TIMEOUT; |
845 | rq->cmd[0] = SYNCHRONIZE_CACHE; | 844 | rq->cmd[0] = SYNCHRONIZE_CACHE; |
846 | rq->cmd_len = 10; | 845 | rq->cmd_len = 10; |
diff --git a/drivers/scsi/sun3_NCR5380.c b/drivers/scsi/sun3_NCR5380.c index 2f8073b73bf3..7f9bcef6adfa 100644 --- a/drivers/scsi/sun3_NCR5380.c +++ b/drivers/scsi/sun3_NCR5380.c | |||
@@ -2017,7 +2017,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) | |||
2017 | if((count > SUN3_DMA_MINSIZE) && (sun3_dma_setup_done | 2017 | if((count > SUN3_DMA_MINSIZE) && (sun3_dma_setup_done |
2018 | != cmd)) | 2018 | != cmd)) |
2019 | { | 2019 | { |
2020 | if(cmd->request->flags & REQ_CMD) { | 2020 | if(blk_fs_request(cmd->request)) { |
2021 | sun3scsi_dma_setup(d, count, | 2021 | sun3scsi_dma_setup(d, count, |
2022 | rq_data_dir(cmd->request)); | 2022 | rq_data_dir(cmd->request)); |
2023 | sun3_dma_setup_done = cmd; | 2023 | sun3_dma_setup_done = cmd; |
diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c index 837173415d4c..44a99aeb8180 100644 --- a/drivers/scsi/sun3_scsi.c +++ b/drivers/scsi/sun3_scsi.c | |||
@@ -524,7 +524,7 @@ static inline unsigned long sun3scsi_dma_residual(struct Scsi_Host *instance) | |||
524 | static inline unsigned long sun3scsi_dma_xfer_len(unsigned long wanted, Scsi_Cmnd *cmd, | 524 | static inline unsigned long sun3scsi_dma_xfer_len(unsigned long wanted, Scsi_Cmnd *cmd, |
525 | int write_flag) | 525 | int write_flag) |
526 | { | 526 | { |
527 | if(cmd->request->flags & REQ_CMD) | 527 | if(blk_fs_request(cmd->request)) |
528 | return wanted; | 528 | return wanted; |
529 | else | 529 | else |
530 | return 0; | 530 | return 0; |
diff --git a/drivers/scsi/sun3_scsi_vme.c b/drivers/scsi/sun3_scsi_vme.c index 008a82ab8521..f5742b84b27a 100644 --- a/drivers/scsi/sun3_scsi_vme.c +++ b/drivers/scsi/sun3_scsi_vme.c | |||
@@ -458,7 +458,7 @@ static inline unsigned long sun3scsi_dma_residual(struct Scsi_Host *instance) | |||
458 | static inline unsigned long sun3scsi_dma_xfer_len(unsigned long wanted, Scsi_Cmnd *cmd, | 458 | static inline unsigned long sun3scsi_dma_xfer_len(unsigned long wanted, Scsi_Cmnd *cmd, |
459 | int write_flag) | 459 | int write_flag) |
460 | { | 460 | { |
461 | if(cmd->request->flags & REQ_CMD) | 461 | if(blk_fs_request(cmd->request)) |
462 | return wanted; | 462 | return wanted; |
463 | else | 463 | else |
464 | return 0; | 464 | return 0; |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index cfde8b3ee919..b2a412cf468f 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -120,6 +120,86 @@ struct request_list { | |||
120 | wait_queue_head_t wait[2]; | 120 | wait_queue_head_t wait[2]; |
121 | }; | 121 | }; |
122 | 122 | ||
123 | /* | ||
124 | * request command types | ||
125 | */ | ||
126 | enum rq_cmd_type_bits { | ||
127 | REQ_TYPE_FS = 1, /* fs request */ | ||
128 | REQ_TYPE_BLOCK_PC, /* scsi command */ | ||
129 | REQ_TYPE_SENSE, /* sense request */ | ||
130 | REQ_TYPE_PM_SUSPEND, /* suspend request */ | ||
131 | REQ_TYPE_PM_RESUME, /* resume request */ | ||
132 | REQ_TYPE_PM_SHUTDOWN, /* shutdown request */ | ||
133 | REQ_TYPE_FLUSH, /* flush request */ | ||
134 | REQ_TYPE_SPECIAL, /* driver defined type */ | ||
135 | REQ_TYPE_LINUX_BLOCK, /* generic block layer message */ | ||
136 | /* | ||
137 | * for ATA/ATAPI devices. this really doesn't belong here, ide should | ||
138 | * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver | ||
139 | * private REQ_LB opcodes to differentiate what type of request this is | ||
140 | */ | ||
141 | REQ_TYPE_ATA_CMD, | ||
142 | REQ_TYPE_ATA_TASK, | ||
143 | REQ_TYPE_ATA_TASKFILE, | ||
144 | }; | ||
145 | |||
146 | /* | ||
147 | * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being | ||
148 | * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a | ||
149 | * SCSI cdb. | ||
150 | * | ||
151 | * 0x00 -> 0x3f are driver private, to be used for whatever purpose they need, | ||
152 | * typically to differentiate REQ_TYPE_SPECIAL requests. | ||
153 | * | ||
154 | */ | ||
155 | enum { | ||
156 | /* | ||
157 | * just examples for now | ||
158 | */ | ||
159 | REQ_LB_OP_EJECT = 0x40, /* eject request */ | ||
160 | REQ_LB_OP_FLUSH = 0x41, /* flush device */ | ||
161 | }; | ||
162 | |||
163 | /* | ||
164 | * request type modified bits. first three bits match BIO_RW* bits, important | ||
165 | */ | ||
166 | enum rq_flag_bits { | ||
167 | __REQ_RW, /* not set, read. set, write */ | ||
168 | __REQ_FAILFAST, /* no low level driver retries */ | ||
169 | __REQ_SORTED, /* elevator knows about this request */ | ||
170 | __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ | ||
171 | __REQ_HARDBARRIER, /* may not be passed by drive either */ | ||
172 | __REQ_FUA, /* forced unit access */ | ||
173 | __REQ_NOMERGE, /* don't touch this for merging */ | ||
174 | __REQ_STARTED, /* drive already may have started this one */ | ||
175 | __REQ_DONTPREP, /* don't call prep for this one */ | ||
176 | __REQ_QUEUED, /* uses queueing */ | ||
177 | __REQ_ELVPRIV, /* elevator private data attached */ | ||
178 | __REQ_FAILED, /* set if the request failed */ | ||
179 | __REQ_QUIET, /* don't worry about errors */ | ||
180 | __REQ_PREEMPT, /* set for "ide_preempt" requests */ | ||
181 | __REQ_ORDERED_COLOR, /* is before or after barrier */ | ||
182 | __REQ_RW_SYNC, /* request is sync (O_DIRECT) */ | ||
183 | __REQ_NR_BITS, /* stops here */ | ||
184 | }; | ||
185 | |||
186 | #define REQ_RW (1 << __REQ_RW) | ||
187 | #define REQ_FAILFAST (1 << __REQ_FAILFAST) | ||
188 | #define REQ_SORTED (1 << __REQ_SORTED) | ||
189 | #define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) | ||
190 | #define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER) | ||
191 | #define REQ_FUA (1 << __REQ_FUA) | ||
192 | #define REQ_NOMERGE (1 << __REQ_NOMERGE) | ||
193 | #define REQ_STARTED (1 << __REQ_STARTED) | ||
194 | #define REQ_DONTPREP (1 << __REQ_DONTPREP) | ||
195 | #define REQ_QUEUED (1 << __REQ_QUEUED) | ||
196 | #define REQ_ELVPRIV (1 << __REQ_ELVPRIV) | ||
197 | #define REQ_FAILED (1 << __REQ_FAILED) | ||
198 | #define REQ_QUIET (1 << __REQ_QUIET) | ||
199 | #define REQ_PREEMPT (1 << __REQ_PREEMPT) | ||
200 | #define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR) | ||
201 | #define REQ_RW_SYNC (1 << __REQ_RW_SYNC) | ||
202 | |||
123 | #define BLK_MAX_CDB 16 | 203 | #define BLK_MAX_CDB 16 |
124 | 204 | ||
125 | /* | 205 | /* |
@@ -129,7 +209,8 @@ struct request { | |||
129 | struct list_head queuelist; | 209 | struct list_head queuelist; |
130 | struct list_head donelist; | 210 | struct list_head donelist; |
131 | 211 | ||
132 | unsigned long flags; /* see REQ_ bits below */ | 212 | unsigned int cmd_flags; |
213 | enum rq_cmd_type_bits cmd_type; | ||
133 | 214 | ||
134 | /* Maintain bio traversal state for part by part I/O submission. | 215 | /* Maintain bio traversal state for part by part I/O submission. |
135 | * hard_* are block layer internals, no driver should touch them! | 216 | * hard_* are block layer internals, no driver should touch them! |
@@ -202,73 +283,7 @@ struct request { | |||
202 | }; | 283 | }; |
203 | 284 | ||
204 | /* | 285 | /* |
205 | * first three bits match BIO_RW* bits, important | 286 | * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME |
206 | */ | ||
207 | enum rq_flag_bits { | ||
208 | __REQ_RW, /* not set, read. set, write */ | ||
209 | __REQ_FAILFAST, /* no low level driver retries */ | ||
210 | __REQ_SORTED, /* elevator knows about this request */ | ||
211 | __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ | ||
212 | __REQ_HARDBARRIER, /* may not be passed by drive either */ | ||
213 | __REQ_FUA, /* forced unit access */ | ||
214 | __REQ_CMD, /* is a regular fs rw request */ | ||
215 | __REQ_NOMERGE, /* don't touch this for merging */ | ||
216 | __REQ_STARTED, /* drive already may have started this one */ | ||
217 | __REQ_DONTPREP, /* don't call prep for this one */ | ||
218 | __REQ_QUEUED, /* uses queueing */ | ||
219 | __REQ_ELVPRIV, /* elevator private data attached */ | ||
220 | /* | ||
221 | * for ATA/ATAPI devices | ||
222 | */ | ||
223 | __REQ_PC, /* packet command (special) */ | ||
224 | __REQ_BLOCK_PC, /* queued down pc from block layer */ | ||
225 | __REQ_SENSE, /* sense retrival */ | ||
226 | |||
227 | __REQ_FAILED, /* set if the request failed */ | ||
228 | __REQ_QUIET, /* don't worry about errors */ | ||
229 | __REQ_SPECIAL, /* driver suplied command */ | ||
230 | __REQ_DRIVE_CMD, | ||
231 | __REQ_DRIVE_TASK, | ||
232 | __REQ_DRIVE_TASKFILE, | ||
233 | __REQ_PREEMPT, /* set for "ide_preempt" requests */ | ||
234 | __REQ_PM_SUSPEND, /* suspend request */ | ||
235 | __REQ_PM_RESUME, /* resume request */ | ||
236 | __REQ_PM_SHUTDOWN, /* shutdown request */ | ||
237 | __REQ_ORDERED_COLOR, /* is before or after barrier */ | ||
238 | __REQ_RW_SYNC, /* request is sync (O_DIRECT) */ | ||
239 | __REQ_NR_BITS, /* stops here */ | ||
240 | }; | ||
241 | |||
242 | #define REQ_RW (1 << __REQ_RW) | ||
243 | #define REQ_FAILFAST (1 << __REQ_FAILFAST) | ||
244 | #define REQ_SORTED (1 << __REQ_SORTED) | ||
245 | #define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) | ||
246 | #define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER) | ||
247 | #define REQ_FUA (1 << __REQ_FUA) | ||
248 | #define REQ_CMD (1 << __REQ_CMD) | ||
249 | #define REQ_NOMERGE (1 << __REQ_NOMERGE) | ||
250 | #define REQ_STARTED (1 << __REQ_STARTED) | ||
251 | #define REQ_DONTPREP (1 << __REQ_DONTPREP) | ||
252 | #define REQ_QUEUED (1 << __REQ_QUEUED) | ||
253 | #define REQ_ELVPRIV (1 << __REQ_ELVPRIV) | ||
254 | #define REQ_PC (1 << __REQ_PC) | ||
255 | #define REQ_BLOCK_PC (1 << __REQ_BLOCK_PC) | ||
256 | #define REQ_SENSE (1 << __REQ_SENSE) | ||
257 | #define REQ_FAILED (1 << __REQ_FAILED) | ||
258 | #define REQ_QUIET (1 << __REQ_QUIET) | ||
259 | #define REQ_SPECIAL (1 << __REQ_SPECIAL) | ||
260 | #define REQ_DRIVE_CMD (1 << __REQ_DRIVE_CMD) | ||
261 | #define REQ_DRIVE_TASK (1 << __REQ_DRIVE_TASK) | ||
262 | #define REQ_DRIVE_TASKFILE (1 << __REQ_DRIVE_TASKFILE) | ||
263 | #define REQ_PREEMPT (1 << __REQ_PREEMPT) | ||
264 | #define REQ_PM_SUSPEND (1 << __REQ_PM_SUSPEND) | ||
265 | #define REQ_PM_RESUME (1 << __REQ_PM_RESUME) | ||
266 | #define REQ_PM_SHUTDOWN (1 << __REQ_PM_SHUTDOWN) | ||
267 | #define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR) | ||
268 | #define REQ_RW_SYNC (1 << __REQ_RW_SYNC) | ||
269 | |||
270 | /* | ||
271 | * State information carried for REQ_PM_SUSPEND and REQ_PM_RESUME | ||
272 | * requests. Some step values could eventually be made generic. | 287 | * requests. Some step values could eventually be made generic. |
273 | */ | 288 | */ |
274 | struct request_pm_state | 289 | struct request_pm_state |
@@ -490,25 +505,28 @@ enum { | |||
490 | #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) | 505 | #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) |
491 | #define blk_queue_flushing(q) ((q)->ordseq) | 506 | #define blk_queue_flushing(q) ((q)->ordseq) |
492 | 507 | ||
493 | #define blk_fs_request(rq) ((rq)->flags & REQ_CMD) | 508 | #define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) |
494 | #define blk_pc_request(rq) ((rq)->flags & REQ_BLOCK_PC) | 509 | #define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC) |
495 | #define blk_noretry_request(rq) ((rq)->flags & REQ_FAILFAST) | 510 | #define blk_special_request(rq) ((rq)->cmd_type == REQ_TYPE_SPECIAL) |
496 | #define blk_rq_started(rq) ((rq)->flags & REQ_STARTED) | 511 | #define blk_sense_request(rq) ((rq)->cmd_type == REQ_TYPE_SENSE) |
512 | |||
513 | #define blk_noretry_request(rq) ((rq)->cmd_flags & REQ_FAILFAST) | ||
514 | #define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED) | ||
497 | 515 | ||
498 | #define blk_account_rq(rq) (blk_rq_started(rq) && blk_fs_request(rq)) | 516 | #define blk_account_rq(rq) (blk_rq_started(rq) && blk_fs_request(rq)) |
499 | 517 | ||
500 | #define blk_pm_suspend_request(rq) ((rq)->flags & REQ_PM_SUSPEND) | 518 | #define blk_pm_suspend_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND) |
501 | #define blk_pm_resume_request(rq) ((rq)->flags & REQ_PM_RESUME) | 519 | #define blk_pm_resume_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_RESUME) |
502 | #define blk_pm_request(rq) \ | 520 | #define blk_pm_request(rq) \ |
503 | ((rq)->flags & (REQ_PM_SUSPEND | REQ_PM_RESUME)) | 521 | (blk_pm_suspend_request(rq) || blk_pm_resume_request(rq)) |
504 | 522 | ||
505 | #define blk_sorted_rq(rq) ((rq)->flags & REQ_SORTED) | 523 | #define blk_sorted_rq(rq) ((rq)->cmd_flags & REQ_SORTED) |
506 | #define blk_barrier_rq(rq) ((rq)->flags & REQ_HARDBARRIER) | 524 | #define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER) |
507 | #define blk_fua_rq(rq) ((rq)->flags & REQ_FUA) | 525 | #define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA) |
508 | 526 | ||
509 | #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) | 527 | #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) |
510 | 528 | ||
511 | #define rq_data_dir(rq) ((rq)->flags & 1) | 529 | #define rq_data_dir(rq) ((rq)->cmd_flags & 1) |
512 | 530 | ||
513 | static inline int blk_queue_full(struct request_queue *q, int rw) | 531 | static inline int blk_queue_full(struct request_queue *q, int rw) |
514 | { | 532 | { |
@@ -541,7 +559,7 @@ static inline void blk_clear_queue_full(struct request_queue *q, int rw) | |||
541 | #define RQ_NOMERGE_FLAGS \ | 559 | #define RQ_NOMERGE_FLAGS \ |
542 | (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER) | 560 | (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER) |
543 | #define rq_mergeable(rq) \ | 561 | #define rq_mergeable(rq) \ |
544 | (!((rq)->flags & RQ_NOMERGE_FLAGS) && blk_fs_request((rq))) | 562 | (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && blk_fs_request((rq))) |
545 | 563 | ||
546 | /* | 564 | /* |
547 | * noop, requests are automagically marked as active/inactive by I/O | 565 | * noop, requests are automagically marked as active/inactive by I/O |
@@ -737,7 +755,7 @@ extern void blk_put_queue(request_queue_t *); | |||
737 | */ | 755 | */ |
738 | #define blk_queue_tag_depth(q) ((q)->queue_tags->busy) | 756 | #define blk_queue_tag_depth(q) ((q)->queue_tags->busy) |
739 | #define blk_queue_tag_queue(q) ((q)->queue_tags->busy < (q)->queue_tags->max_depth) | 757 | #define blk_queue_tag_queue(q) ((q)->queue_tags->busy < (q)->queue_tags->max_depth) |
740 | #define blk_rq_tagged(rq) ((rq)->flags & REQ_QUEUED) | 758 | #define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED) |
741 | extern int blk_queue_start_tag(request_queue_t *, struct request *); | 759 | extern int blk_queue_start_tag(request_queue_t *, struct request *); |
742 | extern struct request *blk_queue_find_tag(request_queue_t *, int); | 760 | extern struct request *blk_queue_find_tag(request_queue_t *, int); |
743 | extern void blk_queue_end_tag(request_queue_t *, struct request *); | 761 | extern void blk_queue_end_tag(request_queue_t *, struct request *); |
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index 7520cc1ff9e2..ea48eb1b3fd3 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h | |||
@@ -148,7 +148,7 @@ static inline void blk_add_trace_rq(struct request_queue *q, struct request *rq, | |||
148 | u32 what) | 148 | u32 what) |
149 | { | 149 | { |
150 | struct blk_trace *bt = q->blk_trace; | 150 | struct blk_trace *bt = q->blk_trace; |
151 | int rw = rq->flags & 0x03; | 151 | int rw = rq->cmd_flags & 0x03; |
152 | 152 | ||
153 | if (likely(!bt)) | 153 | if (likely(!bt)) |
154 | return; | 154 | return; |
diff --git a/include/scsi/scsi_tcq.h b/include/scsi/scsi_tcq.h index d04d05adfa9b..bbf66219b769 100644 --- a/include/scsi/scsi_tcq.h +++ b/include/scsi/scsi_tcq.h | |||
@@ -100,7 +100,7 @@ static inline int scsi_populate_tag_msg(struct scsi_cmnd *cmd, char *msg) | |||
100 | struct scsi_device *sdev = cmd->device; | 100 | struct scsi_device *sdev = cmd->device; |
101 | 101 | ||
102 | if (blk_rq_tagged(req)) { | 102 | if (blk_rq_tagged(req)) { |
103 | if (sdev->ordered_tags && req->flags & REQ_HARDBARRIER) | 103 | if (sdev->ordered_tags && req->cmd_flags & REQ_HARDBARRIER) |
104 | *msg++ = MSG_ORDERED_TAG; | 104 | *msg++ = MSG_ORDERED_TAG; |
105 | else | 105 | else |
106 | *msg++ = MSG_SIMPLE_TAG; | 106 | *msg++ = MSG_SIMPLE_TAG; |