aboutsummaryrefslogtreecommitdiffstats
path: root/block/ll_rw_blk.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@suse.de>2006-08-10 02:44:47 -0400
committerJens Axboe <axboe@nelson.home.kernel.dk>2006-09-30 14:23:37 -0400
commit4aff5e2333c9a1609662f2091f55c3f6fffdad36 (patch)
treeb73d8c2b7c1bdc03d3313c108da7dfc95ee95525 /block/ll_rw_blk.c
parent77ed74da26f50fa28471571ee7a2251b77526d84 (diff)
[PATCH] Split struct request ->flags into two parts
Right now ->flags is a bit of a mess: some are request types, and others are just modifiers. Clean this up by splitting it into ->cmd_type and ->cmd_flags. This allows introduction of generic Linux block message types, useful for sending generic Linux commands to block devices. Signed-off-by: Jens Axboe <axboe@suse.de>
Diffstat (limited to 'block/ll_rw_blk.c')
-rw-r--r--block/ll_rw_blk.c101
1 files changed, 35 insertions, 66 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 51dc0edf76e..9b91bb70c5e 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -382,8 +382,8 @@ unsigned blk_ordered_req_seq(struct request *rq)
382 if (rq == &q->post_flush_rq) 382 if (rq == &q->post_flush_rq)
383 return QUEUE_ORDSEQ_POSTFLUSH; 383 return QUEUE_ORDSEQ_POSTFLUSH;
384 384
385 if ((rq->flags & REQ_ORDERED_COLOR) == 385 if ((rq->cmd_flags & REQ_ORDERED_COLOR) ==
386 (q->orig_bar_rq->flags & REQ_ORDERED_COLOR)) 386 (q->orig_bar_rq->cmd_flags & REQ_ORDERED_COLOR))
387 return QUEUE_ORDSEQ_DRAIN; 387 return QUEUE_ORDSEQ_DRAIN;
388 else 388 else
389 return QUEUE_ORDSEQ_DONE; 389 return QUEUE_ORDSEQ_DONE;
@@ -446,8 +446,8 @@ static void queue_flush(request_queue_t *q, unsigned which)
446 end_io = post_flush_end_io; 446 end_io = post_flush_end_io;
447 } 447 }
448 448
449 rq->cmd_flags = REQ_HARDBARRIER;
449 rq_init(q, rq); 450 rq_init(q, rq);
450 rq->flags = REQ_HARDBARRIER;
451 rq->elevator_private = NULL; 451 rq->elevator_private = NULL;
452 rq->rq_disk = q->bar_rq.rq_disk; 452 rq->rq_disk = q->bar_rq.rq_disk;
453 rq->rl = NULL; 453 rq->rl = NULL;
@@ -471,9 +471,11 @@ static inline struct request *start_ordered(request_queue_t *q,
471 blkdev_dequeue_request(rq); 471 blkdev_dequeue_request(rq);
472 q->orig_bar_rq = rq; 472 q->orig_bar_rq = rq;
473 rq = &q->bar_rq; 473 rq = &q->bar_rq;
474 rq->cmd_flags = 0;
474 rq_init(q, rq); 475 rq_init(q, rq);
475 rq->flags = bio_data_dir(q->orig_bar_rq->bio); 476 if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
476 rq->flags |= q->ordered & QUEUE_ORDERED_FUA ? REQ_FUA : 0; 477 rq->cmd_flags |= REQ_RW;
478 rq->cmd_flags |= q->ordered & QUEUE_ORDERED_FUA ? REQ_FUA : 0;
477 rq->elevator_private = NULL; 479 rq->elevator_private = NULL;
478 rq->rl = NULL; 480 rq->rl = NULL;
479 init_request_from_bio(rq, q->orig_bar_rq->bio); 481 init_request_from_bio(rq, q->orig_bar_rq->bio);
@@ -1124,7 +1126,7 @@ void blk_queue_end_tag(request_queue_t *q, struct request *rq)
1124 } 1126 }
1125 1127
1126 list_del_init(&rq->queuelist); 1128 list_del_init(&rq->queuelist);
1127 rq->flags &= ~REQ_QUEUED; 1129 rq->cmd_flags &= ~REQ_QUEUED;
1128 rq->tag = -1; 1130 rq->tag = -1;
1129 1131
1130 if (unlikely(bqt->tag_index[tag] == NULL)) 1132 if (unlikely(bqt->tag_index[tag] == NULL))
@@ -1160,7 +1162,7 @@ int blk_queue_start_tag(request_queue_t *q, struct request *rq)
1160 struct blk_queue_tag *bqt = q->queue_tags; 1162 struct blk_queue_tag *bqt = q->queue_tags;
1161 int tag; 1163 int tag;
1162 1164
1163 if (unlikely((rq->flags & REQ_QUEUED))) { 1165 if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
1164 printk(KERN_ERR 1166 printk(KERN_ERR
1165 "%s: request %p for device [%s] already tagged %d", 1167 "%s: request %p for device [%s] already tagged %d",
1166 __FUNCTION__, rq, 1168 __FUNCTION__, rq,
@@ -1174,7 +1176,7 @@ int blk_queue_start_tag(request_queue_t *q, struct request *rq)
1174 1176
1175 __set_bit(tag, bqt->tag_map); 1177 __set_bit(tag, bqt->tag_map);
1176 1178
1177 rq->flags |= REQ_QUEUED; 1179 rq->cmd_flags |= REQ_QUEUED;
1178 rq->tag = tag; 1180 rq->tag = tag;
1179 bqt->tag_index[tag] = rq; 1181 bqt->tag_index[tag] = rq;
1180 blkdev_dequeue_request(rq); 1182 blkdev_dequeue_request(rq);
@@ -1210,65 +1212,31 @@ void blk_queue_invalidate_tags(request_queue_t *q)
1210 printk(KERN_ERR 1212 printk(KERN_ERR
1211 "%s: bad tag found on list\n", __FUNCTION__); 1213 "%s: bad tag found on list\n", __FUNCTION__);
1212 list_del_init(&rq->queuelist); 1214 list_del_init(&rq->queuelist);
1213 rq->flags &= ~REQ_QUEUED; 1215 rq->cmd_flags &= ~REQ_QUEUED;
1214 } else 1216 } else
1215 blk_queue_end_tag(q, rq); 1217 blk_queue_end_tag(q, rq);
1216 1218
1217 rq->flags &= ~REQ_STARTED; 1219 rq->cmd_flags &= ~REQ_STARTED;
1218 __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0); 1220 __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0);
1219 } 1221 }
1220} 1222}
1221 1223
1222EXPORT_SYMBOL(blk_queue_invalidate_tags); 1224EXPORT_SYMBOL(blk_queue_invalidate_tags);
1223 1225
1224static const char * const rq_flags[] = {
1225 "REQ_RW",
1226 "REQ_FAILFAST",
1227 "REQ_SORTED",
1228 "REQ_SOFTBARRIER",
1229 "REQ_HARDBARRIER",
1230 "REQ_FUA",
1231 "REQ_CMD",
1232 "REQ_NOMERGE",
1233 "REQ_STARTED",
1234 "REQ_DONTPREP",
1235 "REQ_QUEUED",
1236 "REQ_ELVPRIV",
1237 "REQ_PC",
1238 "REQ_BLOCK_PC",
1239 "REQ_SENSE",
1240 "REQ_FAILED",
1241 "REQ_QUIET",
1242 "REQ_SPECIAL",
1243 "REQ_DRIVE_CMD",
1244 "REQ_DRIVE_TASK",
1245 "REQ_DRIVE_TASKFILE",
1246 "REQ_PREEMPT",
1247 "REQ_PM_SUSPEND",
1248 "REQ_PM_RESUME",
1249 "REQ_PM_SHUTDOWN",
1250 "REQ_ORDERED_COLOR",
1251};
1252
1253void blk_dump_rq_flags(struct request *rq, char *msg) 1226void blk_dump_rq_flags(struct request *rq, char *msg)
1254{ 1227{
1255 int bit; 1228 int bit;
1256 1229
1257 printk("%s: dev %s: flags = ", msg, 1230 printk("%s: dev %s: type=%x, flags=%x\n", msg,
1258 rq->rq_disk ? rq->rq_disk->disk_name : "?"); 1231 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
1259 bit = 0; 1232 rq->cmd_flags);
1260 do {
1261 if (rq->flags & (1 << bit))
1262 printk("%s ", rq_flags[bit]);
1263 bit++;
1264 } while (bit < __REQ_NR_BITS);
1265 1233
1266 printk("\nsector %llu, nr/cnr %lu/%u\n", (unsigned long long)rq->sector, 1234 printk("\nsector %llu, nr/cnr %lu/%u\n", (unsigned long long)rq->sector,
1267 rq->nr_sectors, 1235 rq->nr_sectors,
1268 rq->current_nr_sectors); 1236 rq->current_nr_sectors);
1269 printk("bio %p, biotail %p, buffer %p, data %p, len %u\n", rq->bio, rq->biotail, rq->buffer, rq->data, rq->data_len); 1237 printk("bio %p, biotail %p, buffer %p, data %p, len %u\n", rq->bio, rq->biotail, rq->buffer, rq->data, rq->data_len);
1270 1238
1271 if (rq->flags & (REQ_BLOCK_PC | REQ_PC)) { 1239 if (blk_pc_request(rq)) {
1272 printk("cdb: "); 1240 printk("cdb: ");
1273 for (bit = 0; bit < sizeof(rq->cmd); bit++) 1241 for (bit = 0; bit < sizeof(rq->cmd); bit++)
1274 printk("%02x ", rq->cmd[bit]); 1242 printk("%02x ", rq->cmd[bit]);
@@ -1441,7 +1409,7 @@ static inline int ll_new_mergeable(request_queue_t *q,
1441 int nr_phys_segs = bio_phys_segments(q, bio); 1409 int nr_phys_segs = bio_phys_segments(q, bio);
1442 1410
1443 if (req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) { 1411 if (req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
1444 req->flags |= REQ_NOMERGE; 1412 req->cmd_flags |= REQ_NOMERGE;
1445 if (req == q->last_merge) 1413 if (req == q->last_merge)
1446 q->last_merge = NULL; 1414 q->last_merge = NULL;
1447 return 0; 1415 return 0;
@@ -1464,7 +1432,7 @@ static inline int ll_new_hw_segment(request_queue_t *q,
1464 1432
1465 if (req->nr_hw_segments + nr_hw_segs > q->max_hw_segments 1433 if (req->nr_hw_segments + nr_hw_segs > q->max_hw_segments
1466 || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) { 1434 || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
1467 req->flags |= REQ_NOMERGE; 1435 req->cmd_flags |= REQ_NOMERGE;
1468 if (req == q->last_merge) 1436 if (req == q->last_merge)
1469 q->last_merge = NULL; 1437 q->last_merge = NULL;
1470 return 0; 1438 return 0;
@@ -1491,7 +1459,7 @@ static int ll_back_merge_fn(request_queue_t *q, struct request *req,
1491 max_sectors = q->max_sectors; 1459 max_sectors = q->max_sectors;
1492 1460
1493 if (req->nr_sectors + bio_sectors(bio) > max_sectors) { 1461 if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
1494 req->flags |= REQ_NOMERGE; 1462 req->cmd_flags |= REQ_NOMERGE;
1495 if (req == q->last_merge) 1463 if (req == q->last_merge)
1496 q->last_merge = NULL; 1464 q->last_merge = NULL;
1497 return 0; 1465 return 0;
@@ -1530,7 +1498,7 @@ static int ll_front_merge_fn(request_queue_t *q, struct request *req,
1530 1498
1531 1499
1532 if (req->nr_sectors + bio_sectors(bio) > max_sectors) { 1500 if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
1533 req->flags |= REQ_NOMERGE; 1501 req->cmd_flags |= REQ_NOMERGE;
1534 if (req == q->last_merge) 1502 if (req == q->last_merge)
1535 q->last_merge = NULL; 1503 q->last_merge = NULL;
1536 return 0; 1504 return 0;
@@ -2029,7 +1997,7 @@ EXPORT_SYMBOL(blk_get_queue);
2029 1997
2030static inline void blk_free_request(request_queue_t *q, struct request *rq) 1998static inline void blk_free_request(request_queue_t *q, struct request *rq)
2031{ 1999{
2032 if (rq->flags & REQ_ELVPRIV) 2000 if (rq->cmd_flags & REQ_ELVPRIV)
2033 elv_put_request(q, rq); 2001 elv_put_request(q, rq);
2034 mempool_free(rq, q->rq.rq_pool); 2002 mempool_free(rq, q->rq.rq_pool);
2035} 2003}
@@ -2044,17 +2012,17 @@ blk_alloc_request(request_queue_t *q, int rw, struct bio *bio,
2044 return NULL; 2012 return NULL;
2045 2013
2046 /* 2014 /*
2047 * first three bits are identical in rq->flags and bio->bi_rw, 2015 * first three bits are identical in rq->cmd_flags and bio->bi_rw,
2048 * see bio.h and blkdev.h 2016 * see bio.h and blkdev.h
2049 */ 2017 */
2050 rq->flags = rw; 2018 rq->cmd_flags = rw;
2051 2019
2052 if (priv) { 2020 if (priv) {
2053 if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) { 2021 if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) {
2054 mempool_free(rq, q->rq.rq_pool); 2022 mempool_free(rq, q->rq.rq_pool);
2055 return NULL; 2023 return NULL;
2056 } 2024 }
2057 rq->flags |= REQ_ELVPRIV; 2025 rq->cmd_flags |= REQ_ELVPRIV;
2058 } 2026 }
2059 2027
2060 return rq; 2028 return rq;
@@ -2351,7 +2319,8 @@ void blk_insert_request(request_queue_t *q, struct request *rq,
2351 * must not attempt merges on this) and that it acts as a soft 2319 * must not attempt merges on this) and that it acts as a soft
2352 * barrier 2320 * barrier
2353 */ 2321 */
2354 rq->flags |= REQ_SPECIAL | REQ_SOFTBARRIER; 2322 rq->cmd_type = REQ_TYPE_SPECIAL;
2323 rq->cmd_flags |= REQ_SOFTBARRIER;
2355 2324
2356 rq->special = data; 2325 rq->special = data;
2357 2326
@@ -2558,7 +2527,7 @@ void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk,
2558 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; 2527 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
2559 2528
2560 rq->rq_disk = bd_disk; 2529 rq->rq_disk = bd_disk;
2561 rq->flags |= REQ_NOMERGE; 2530 rq->cmd_flags |= REQ_NOMERGE;
2562 rq->end_io = done; 2531 rq->end_io = done;
2563 WARN_ON(irqs_disabled()); 2532 WARN_ON(irqs_disabled());
2564 spin_lock_irq(q->queue_lock); 2533 spin_lock_irq(q->queue_lock);
@@ -2728,7 +2697,7 @@ void __blk_put_request(request_queue_t *q, struct request *req)
2728 */ 2697 */
2729 if (rl) { 2698 if (rl) {
2730 int rw = rq_data_dir(req); 2699 int rw = rq_data_dir(req);
2731 int priv = req->flags & REQ_ELVPRIV; 2700 int priv = req->cmd_flags & REQ_ELVPRIV;
2732 2701
2733 BUG_ON(!list_empty(&req->queuelist)); 2702 BUG_ON(!list_empty(&req->queuelist));
2734 2703
@@ -2890,22 +2859,22 @@ static inline int attempt_front_merge(request_queue_t *q, struct request *rq)
2890 2859
2891static void init_request_from_bio(struct request *req, struct bio *bio) 2860static void init_request_from_bio(struct request *req, struct bio *bio)
2892{ 2861{
2893 req->flags |= REQ_CMD; 2862 req->cmd_type = REQ_TYPE_FS;
2894 2863
2895 /* 2864 /*
2896 * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST) 2865 * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
2897 */ 2866 */
2898 if (bio_rw_ahead(bio) || bio_failfast(bio)) 2867 if (bio_rw_ahead(bio) || bio_failfast(bio))
2899 req->flags |= REQ_FAILFAST; 2868 req->cmd_flags |= REQ_FAILFAST;
2900 2869
2901 /* 2870 /*
2902 * REQ_BARRIER implies no merging, but lets make it explicit 2871 * REQ_BARRIER implies no merging, but lets make it explicit
2903 */ 2872 */
2904 if (unlikely(bio_barrier(bio))) 2873 if (unlikely(bio_barrier(bio)))
2905 req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE); 2874 req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
2906 2875
2907 if (bio_sync(bio)) 2876 if (bio_sync(bio))
2908 req->flags |= REQ_RW_SYNC; 2877 req->cmd_flags |= REQ_RW_SYNC;
2909 2878
2910 req->errors = 0; 2879 req->errors = 0;
2911 req->hard_sector = req->sector = bio->bi_sector; 2880 req->hard_sector = req->sector = bio->bi_sector;
@@ -3306,7 +3275,7 @@ static int __end_that_request_first(struct request *req, int uptodate,
3306 req->errors = 0; 3275 req->errors = 0;
3307 3276
3308 if (!uptodate) { 3277 if (!uptodate) {
3309 if (blk_fs_request(req) && !(req->flags & REQ_QUIET)) 3278 if (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))
3310 printk("end_request: I/O error, dev %s, sector %llu\n", 3279 printk("end_request: I/O error, dev %s, sector %llu\n",
3311 req->rq_disk ? req->rq_disk->disk_name : "?", 3280 req->rq_disk ? req->rq_disk->disk_name : "?",
3312 (unsigned long long)req->sector); 3281 (unsigned long long)req->sector);
@@ -3569,8 +3538,8 @@ EXPORT_SYMBOL(end_request);
3569 3538
3570void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio) 3539void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio)
3571{ 3540{
3572 /* first two bits are identical in rq->flags and bio->bi_rw */ 3541 /* first two bits are identical in rq->cmd_flags and bio->bi_rw */
3573 rq->flags |= (bio->bi_rw & 3); 3542 rq->cmd_flags |= (bio->bi_rw & 3);
3574 3543
3575 rq->nr_phys_segments = bio_phys_segments(q, bio); 3544 rq->nr_phys_segments = bio_phys_segments(q, bio);
3576 rq->nr_hw_segments = bio_hw_segments(q, bio); 3545 rq->nr_hw_segments = bio_hw_segments(q, bio);