diff options
-rw-r--r-- | Documentation/block/biodoc.txt | 4 | ||||
-rw-r--r-- | block/blk-core.c | 60 | ||||
-rw-r--r-- | block/blk-flush.c | 2 | ||||
-rw-r--r-- | block/blk-lib.c | 2 | ||||
-rw-r--r-- | block/blk-map.c | 2 | ||||
-rw-r--r-- | block/blk-mq.c | 28 | ||||
-rw-r--r-- | block/cfq-iosched.c | 66 | ||||
-rw-r--r-- | block/elevator.c | 4 | ||||
-rw-r--r-- | drivers/md/dm-crypt.c | 2 | ||||
-rw-r--r-- | drivers/scsi/sd.c | 3 | ||||
-rw-r--r-- | fs/btrfs/inode.c | 5 | ||||
-rw-r--r-- | fs/buffer.c | 2 | ||||
-rw-r--r-- | fs/f2fs/f2fs.h | 2 | ||||
-rw-r--r-- | fs/gfs2/lops.c | 2 | ||||
-rw-r--r-- | include/linux/blk-cgroup.h | 11 | ||||
-rw-r--r-- | include/linux/blk_types.h | 83 | ||||
-rw-r--r-- | include/linux/blkdev.h | 26 | ||||
-rw-r--r-- | include/linux/blktrace_api.h | 2 | ||||
-rw-r--r-- | include/linux/dm-io.h | 2 | ||||
-rw-r--r-- | include/linux/elevator.h | 4 | ||||
-rw-r--r-- | include/trace/events/bcache.h | 12 | ||||
-rw-r--r-- | include/trace/events/block.h | 31 | ||||
-rw-r--r-- | kernel/trace/blktrace.c | 14 |
23 files changed, 148 insertions, 221 deletions
diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt index 6acea160298c..01ddeaf64b0f 100644 --- a/Documentation/block/biodoc.txt +++ b/Documentation/block/biodoc.txt | |||
@@ -553,8 +553,8 @@ struct request { | |||
553 | struct request_list *rl; | 553 | struct request_list *rl; |
554 | } | 554 | } |
555 | 555 | ||
556 | See the rq_flag_bits definitions for an explanation of the various flags | 556 | See the req_ops and req_flag_bits definitions for an explanation of the various |
557 | available. Some bits are used by the block layer or i/o scheduler. | 557 | flags available. Some bits are used by the block layer or i/o scheduler. |
558 | 558 | ||
559 | The behaviour of the various sector counts are almost the same as before, | 559 | The behaviour of the various sector counts are almost the same as before, |
560 | except that since we have multi-segment bios, current_nr_sectors refers | 560 | except that since we have multi-segment bios, current_nr_sectors refers |
diff --git a/block/blk-core.c b/block/blk-core.c index fd416651a676..0bfaa54d3e9f 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -1056,8 +1056,7 @@ static struct io_context *rq_ioc(struct bio *bio) | |||
1056 | /** | 1056 | /** |
1057 | * __get_request - get a free request | 1057 | * __get_request - get a free request |
1058 | * @rl: request list to allocate from | 1058 | * @rl: request list to allocate from |
1059 | * @op: REQ_OP_READ/REQ_OP_WRITE | 1059 | * @op: operation and flags |
1060 | * @op_flags: rq_flag_bits | ||
1061 | * @bio: bio to allocate request for (can be %NULL) | 1060 | * @bio: bio to allocate request for (can be %NULL) |
1062 | * @gfp_mask: allocation mask | 1061 | * @gfp_mask: allocation mask |
1063 | * | 1062 | * |
@@ -1068,23 +1067,22 @@ static struct io_context *rq_ioc(struct bio *bio) | |||
1068 | * Returns ERR_PTR on failure, with @q->queue_lock held. | 1067 | * Returns ERR_PTR on failure, with @q->queue_lock held. |
1069 | * Returns request pointer on success, with @q->queue_lock *not held*. | 1068 | * Returns request pointer on success, with @q->queue_lock *not held*. |
1070 | */ | 1069 | */ |
1071 | static struct request *__get_request(struct request_list *rl, int op, | 1070 | static struct request *__get_request(struct request_list *rl, unsigned int op, |
1072 | int op_flags, struct bio *bio, | 1071 | struct bio *bio, gfp_t gfp_mask) |
1073 | gfp_t gfp_mask) | ||
1074 | { | 1072 | { |
1075 | struct request_queue *q = rl->q; | 1073 | struct request_queue *q = rl->q; |
1076 | struct request *rq; | 1074 | struct request *rq; |
1077 | struct elevator_type *et = q->elevator->type; | 1075 | struct elevator_type *et = q->elevator->type; |
1078 | struct io_context *ioc = rq_ioc(bio); | 1076 | struct io_context *ioc = rq_ioc(bio); |
1079 | struct io_cq *icq = NULL; | 1077 | struct io_cq *icq = NULL; |
1080 | const bool is_sync = rw_is_sync(op, op_flags) != 0; | 1078 | const bool is_sync = op_is_sync(op); |
1081 | int may_queue; | 1079 | int may_queue; |
1082 | req_flags_t rq_flags = RQF_ALLOCED; | 1080 | req_flags_t rq_flags = RQF_ALLOCED; |
1083 | 1081 | ||
1084 | if (unlikely(blk_queue_dying(q))) | 1082 | if (unlikely(blk_queue_dying(q))) |
1085 | return ERR_PTR(-ENODEV); | 1083 | return ERR_PTR(-ENODEV); |
1086 | 1084 | ||
1087 | may_queue = elv_may_queue(q, op, op_flags); | 1085 | may_queue = elv_may_queue(q, op); |
1088 | if (may_queue == ELV_MQUEUE_NO) | 1086 | if (may_queue == ELV_MQUEUE_NO) |
1089 | goto rq_starved; | 1087 | goto rq_starved; |
1090 | 1088 | ||
@@ -1154,7 +1152,7 @@ static struct request *__get_request(struct request_list *rl, int op, | |||
1154 | 1152 | ||
1155 | blk_rq_init(q, rq); | 1153 | blk_rq_init(q, rq); |
1156 | blk_rq_set_rl(rq, rl); | 1154 | blk_rq_set_rl(rq, rl); |
1157 | req_set_op_attrs(rq, op, op_flags); | 1155 | rq->cmd_flags = op; |
1158 | rq->rq_flags = rq_flags; | 1156 | rq->rq_flags = rq_flags; |
1159 | 1157 | ||
1160 | /* init elvpriv */ | 1158 | /* init elvpriv */ |
@@ -1232,8 +1230,7 @@ rq_starved: | |||
1232 | /** | 1230 | /** |
1233 | * get_request - get a free request | 1231 | * get_request - get a free request |
1234 | * @q: request_queue to allocate request from | 1232 | * @q: request_queue to allocate request from |
1235 | * @op: REQ_OP_READ/REQ_OP_WRITE | 1233 | * @op: operation and flags |
1236 | * @op_flags: rq_flag_bits | ||
1237 | * @bio: bio to allocate request for (can be %NULL) | 1234 | * @bio: bio to allocate request for (can be %NULL) |
1238 | * @gfp_mask: allocation mask | 1235 | * @gfp_mask: allocation mask |
1239 | * | 1236 | * |
@@ -1244,18 +1241,17 @@ rq_starved: | |||
1244 | * Returns ERR_PTR on failure, with @q->queue_lock held. | 1241 | * Returns ERR_PTR on failure, with @q->queue_lock held. |
1245 | * Returns request pointer on success, with @q->queue_lock *not held*. | 1242 | * Returns request pointer on success, with @q->queue_lock *not held*. |
1246 | */ | 1243 | */ |
1247 | static struct request *get_request(struct request_queue *q, int op, | 1244 | static struct request *get_request(struct request_queue *q, unsigned int op, |
1248 | int op_flags, struct bio *bio, | 1245 | struct bio *bio, gfp_t gfp_mask) |
1249 | gfp_t gfp_mask) | ||
1250 | { | 1246 | { |
1251 | const bool is_sync = rw_is_sync(op, op_flags) != 0; | 1247 | const bool is_sync = op_is_sync(op); |
1252 | DEFINE_WAIT(wait); | 1248 | DEFINE_WAIT(wait); |
1253 | struct request_list *rl; | 1249 | struct request_list *rl; |
1254 | struct request *rq; | 1250 | struct request *rq; |
1255 | 1251 | ||
1256 | rl = blk_get_rl(q, bio); /* transferred to @rq on success */ | 1252 | rl = blk_get_rl(q, bio); /* transferred to @rq on success */ |
1257 | retry: | 1253 | retry: |
1258 | rq = __get_request(rl, op, op_flags, bio, gfp_mask); | 1254 | rq = __get_request(rl, op, bio, gfp_mask); |
1259 | if (!IS_ERR(rq)) | 1255 | if (!IS_ERR(rq)) |
1260 | return rq; | 1256 | return rq; |
1261 | 1257 | ||
@@ -1297,7 +1293,7 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw, | |||
1297 | create_io_context(gfp_mask, q->node); | 1293 | create_io_context(gfp_mask, q->node); |
1298 | 1294 | ||
1299 | spin_lock_irq(q->queue_lock); | 1295 | spin_lock_irq(q->queue_lock); |
1300 | rq = get_request(q, rw, 0, NULL, gfp_mask); | 1296 | rq = get_request(q, rw, NULL, gfp_mask); |
1301 | if (IS_ERR(rq)) { | 1297 | if (IS_ERR(rq)) { |
1302 | spin_unlock_irq(q->queue_lock); | 1298 | spin_unlock_irq(q->queue_lock); |
1303 | return rq; | 1299 | return rq; |
@@ -1446,7 +1442,7 @@ void __blk_put_request(struct request_queue *q, struct request *req) | |||
1446 | */ | 1442 | */ |
1447 | if (rq_flags & RQF_ALLOCED) { | 1443 | if (rq_flags & RQF_ALLOCED) { |
1448 | struct request_list *rl = blk_rq_rl(req); | 1444 | struct request_list *rl = blk_rq_rl(req); |
1449 | bool sync = rw_is_sync(req_op(req), req->cmd_flags); | 1445 | bool sync = op_is_sync(req->cmd_flags); |
1450 | 1446 | ||
1451 | BUG_ON(!list_empty(&req->queuelist)); | 1447 | BUG_ON(!list_empty(&req->queuelist)); |
1452 | BUG_ON(ELV_ON_HASH(req)); | 1448 | BUG_ON(ELV_ON_HASH(req)); |
@@ -1652,8 +1648,6 @@ out: | |||
1652 | void init_request_from_bio(struct request *req, struct bio *bio) | 1648 | void init_request_from_bio(struct request *req, struct bio *bio) |
1653 | { | 1649 | { |
1654 | req->cmd_type = REQ_TYPE_FS; | 1650 | req->cmd_type = REQ_TYPE_FS; |
1655 | |||
1656 | req->cmd_flags |= bio->bi_opf & REQ_COMMON_MASK; | ||
1657 | if (bio->bi_opf & REQ_RAHEAD) | 1651 | if (bio->bi_opf & REQ_RAHEAD) |
1658 | req->cmd_flags |= REQ_FAILFAST_MASK; | 1652 | req->cmd_flags |= REQ_FAILFAST_MASK; |
1659 | 1653 | ||
@@ -1665,9 +1659,8 @@ void init_request_from_bio(struct request *req, struct bio *bio) | |||
1665 | 1659 | ||
1666 | static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio) | 1660 | static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio) |
1667 | { | 1661 | { |
1668 | const bool sync = !!(bio->bi_opf & REQ_SYNC); | ||
1669 | struct blk_plug *plug; | 1662 | struct blk_plug *plug; |
1670 | int el_ret, rw_flags = 0, where = ELEVATOR_INSERT_SORT; | 1663 | int el_ret, where = ELEVATOR_INSERT_SORT; |
1671 | struct request *req; | 1664 | struct request *req; |
1672 | unsigned int request_count = 0; | 1665 | unsigned int request_count = 0; |
1673 | 1666 | ||
@@ -1723,23 +1716,10 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio) | |||
1723 | 1716 | ||
1724 | get_rq: | 1717 | get_rq: |
1725 | /* | 1718 | /* |
1726 | * This sync check and mask will be re-done in init_request_from_bio(), | ||
1727 | * but we need to set it earlier to expose the sync flag to the | ||
1728 | * rq allocator and io schedulers. | ||
1729 | */ | ||
1730 | if (sync) | ||
1731 | rw_flags |= REQ_SYNC; | ||
1732 | |||
1733 | /* | ||
1734 | * Add in META/PRIO flags, if set, before we get to the IO scheduler | ||
1735 | */ | ||
1736 | rw_flags |= (bio->bi_opf & (REQ_META | REQ_PRIO)); | ||
1737 | |||
1738 | /* | ||
1739 | * Grab a free request. This is might sleep but can not fail. | 1719 | * Grab a free request. This is might sleep but can not fail. |
1740 | * Returns with the queue unlocked. | 1720 | * Returns with the queue unlocked. |
1741 | */ | 1721 | */ |
1742 | req = get_request(q, bio_data_dir(bio), rw_flags, bio, GFP_NOIO); | 1722 | req = get_request(q, bio->bi_opf, bio, GFP_NOIO); |
1743 | if (IS_ERR(req)) { | 1723 | if (IS_ERR(req)) { |
1744 | bio->bi_error = PTR_ERR(req); | 1724 | bio->bi_error = PTR_ERR(req); |
1745 | bio_endio(bio); | 1725 | bio_endio(bio); |
@@ -2946,8 +2926,6 @@ EXPORT_SYMBOL_GPL(__blk_end_request_err); | |||
2946 | void blk_rq_bio_prep(struct request_queue *q, struct request *rq, | 2926 | void blk_rq_bio_prep(struct request_queue *q, struct request *rq, |
2947 | struct bio *bio) | 2927 | struct bio *bio) |
2948 | { | 2928 | { |
2949 | req_set_op(rq, bio_op(bio)); | ||
2950 | |||
2951 | if (bio_has_data(bio)) | 2929 | if (bio_has_data(bio)) |
2952 | rq->nr_phys_segments = bio_phys_segments(q, bio); | 2930 | rq->nr_phys_segments = bio_phys_segments(q, bio); |
2953 | 2931 | ||
@@ -3031,8 +3009,7 @@ EXPORT_SYMBOL_GPL(blk_rq_unprep_clone); | |||
3031 | static void __blk_rq_prep_clone(struct request *dst, struct request *src) | 3009 | static void __blk_rq_prep_clone(struct request *dst, struct request *src) |
3032 | { | 3010 | { |
3033 | dst->cpu = src->cpu; | 3011 | dst->cpu = src->cpu; |
3034 | req_set_op_attrs(dst, req_op(src), | 3012 | dst->cmd_flags = src->cmd_flags | REQ_NOMERGE; |
3035 | (src->cmd_flags & REQ_CLONE_MASK) | REQ_NOMERGE); | ||
3036 | dst->cmd_type = src->cmd_type; | 3013 | dst->cmd_type = src->cmd_type; |
3037 | dst->__sector = blk_rq_pos(src); | 3014 | dst->__sector = blk_rq_pos(src); |
3038 | dst->__data_len = blk_rq_bytes(src); | 3015 | dst->__data_len = blk_rq_bytes(src); |
@@ -3537,8 +3514,11 @@ EXPORT_SYMBOL(blk_set_runtime_active); | |||
3537 | 3514 | ||
3538 | int __init blk_dev_init(void) | 3515 | int __init blk_dev_init(void) |
3539 | { | 3516 | { |
3540 | BUILD_BUG_ON(__REQ_NR_BITS > 8 * | 3517 | BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS)); |
3518 | BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 * | ||
3541 | FIELD_SIZEOF(struct request, cmd_flags)); | 3519 | FIELD_SIZEOF(struct request, cmd_flags)); |
3520 | BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 * | ||
3521 | FIELD_SIZEOF(struct bio, bi_opf)); | ||
3542 | 3522 | ||
3543 | /* used for unplugging and affects IO latency/throughput - HIGHPRI */ | 3523 | /* used for unplugging and affects IO latency/throughput - HIGHPRI */ |
3544 | kblockd_workqueue = alloc_workqueue("kblockd", | 3524 | kblockd_workqueue = alloc_workqueue("kblockd", |
diff --git a/block/blk-flush.c b/block/blk-flush.c index 3990b9cfbda5..95f1d4d357df 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c | |||
@@ -330,7 +330,7 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq) | |||
330 | } | 330 | } |
331 | 331 | ||
332 | flush_rq->cmd_type = REQ_TYPE_FS; | 332 | flush_rq->cmd_type = REQ_TYPE_FS; |
333 | req_set_op_attrs(flush_rq, REQ_OP_FLUSH, WRITE_FLUSH); | 333 | flush_rq->cmd_flags = REQ_OP_FLUSH | WRITE_FLUSH; |
334 | flush_rq->rq_flags |= RQF_FLUSH_SEQ; | 334 | flush_rq->rq_flags |= RQF_FLUSH_SEQ; |
335 | flush_rq->rq_disk = first_rq->rq_disk; | 335 | flush_rq->rq_disk = first_rq->rq_disk; |
336 | flush_rq->end_io = flush_end_io; | 336 | flush_rq->end_io = flush_end_io; |
diff --git a/block/blk-lib.c b/block/blk-lib.c index 46fe9248410d..18abda862915 100644 --- a/block/blk-lib.c +++ b/block/blk-lib.c | |||
@@ -29,7 +29,7 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, | |||
29 | struct request_queue *q = bdev_get_queue(bdev); | 29 | struct request_queue *q = bdev_get_queue(bdev); |
30 | struct bio *bio = *biop; | 30 | struct bio *bio = *biop; |
31 | unsigned int granularity; | 31 | unsigned int granularity; |
32 | enum req_op op; | 32 | unsigned int op; |
33 | int alignment; | 33 | int alignment; |
34 | sector_t bs_mask; | 34 | sector_t bs_mask; |
35 | 35 | ||
diff --git a/block/blk-map.c b/block/blk-map.c index 2c5ae5fef473..0173a72a8aa9 100644 --- a/block/blk-map.c +++ b/block/blk-map.c | |||
@@ -16,6 +16,8 @@ | |||
16 | int blk_rq_append_bio(struct request *rq, struct bio *bio) | 16 | int blk_rq_append_bio(struct request *rq, struct bio *bio) |
17 | { | 17 | { |
18 | if (!rq->bio) { | 18 | if (!rq->bio) { |
19 | rq->cmd_flags &= REQ_OP_MASK; | ||
20 | rq->cmd_flags |= (bio->bi_opf & REQ_OP_MASK); | ||
19 | blk_rq_bio_prep(rq->q, rq, bio); | 21 | blk_rq_bio_prep(rq->q, rq, bio); |
20 | } else { | 22 | } else { |
21 | if (!ll_back_merge_fn(rq->q, rq, bio)) | 23 | if (!ll_back_merge_fn(rq->q, rq, bio)) |
diff --git a/block/blk-mq.c b/block/blk-mq.c index b49c6658eb05..2da1a0ee3318 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -139,14 +139,13 @@ bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx) | |||
139 | EXPORT_SYMBOL(blk_mq_can_queue); | 139 | EXPORT_SYMBOL(blk_mq_can_queue); |
140 | 140 | ||
141 | static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, | 141 | static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, |
142 | struct request *rq, int op, | 142 | struct request *rq, unsigned int op) |
143 | unsigned int op_flags) | ||
144 | { | 143 | { |
145 | INIT_LIST_HEAD(&rq->queuelist); | 144 | INIT_LIST_HEAD(&rq->queuelist); |
146 | /* csd/requeue_work/fifo_time is initialized before use */ | 145 | /* csd/requeue_work/fifo_time is initialized before use */ |
147 | rq->q = q; | 146 | rq->q = q; |
148 | rq->mq_ctx = ctx; | 147 | rq->mq_ctx = ctx; |
149 | req_set_op_attrs(rq, op, op_flags); | 148 | rq->cmd_flags = op; |
150 | if (blk_queue_io_stat(q)) | 149 | if (blk_queue_io_stat(q)) |
151 | rq->rq_flags |= RQF_IO_STAT; | 150 | rq->rq_flags |= RQF_IO_STAT; |
152 | /* do not touch atomic flags, it needs atomic ops against the timer */ | 151 | /* do not touch atomic flags, it needs atomic ops against the timer */ |
@@ -183,11 +182,11 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, | |||
183 | rq->end_io_data = NULL; | 182 | rq->end_io_data = NULL; |
184 | rq->next_rq = NULL; | 183 | rq->next_rq = NULL; |
185 | 184 | ||
186 | ctx->rq_dispatched[rw_is_sync(op, op_flags)]++; | 185 | ctx->rq_dispatched[op_is_sync(op)]++; |
187 | } | 186 | } |
188 | 187 | ||
189 | static struct request * | 188 | static struct request * |
190 | __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int op, int op_flags) | 189 | __blk_mq_alloc_request(struct blk_mq_alloc_data *data, unsigned int op) |
191 | { | 190 | { |
192 | struct request *rq; | 191 | struct request *rq; |
193 | unsigned int tag; | 192 | unsigned int tag; |
@@ -202,7 +201,7 @@ __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int op, int op_flags) | |||
202 | } | 201 | } |
203 | 202 | ||
204 | rq->tag = tag; | 203 | rq->tag = tag; |
205 | blk_mq_rq_ctx_init(data->q, data->ctx, rq, op, op_flags); | 204 | blk_mq_rq_ctx_init(data->q, data->ctx, rq, op); |
206 | return rq; | 205 | return rq; |
207 | } | 206 | } |
208 | 207 | ||
@@ -225,7 +224,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, | |||
225 | ctx = blk_mq_get_ctx(q); | 224 | ctx = blk_mq_get_ctx(q); |
226 | hctx = blk_mq_map_queue(q, ctx->cpu); | 225 | hctx = blk_mq_map_queue(q, ctx->cpu); |
227 | blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx); | 226 | blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx); |
228 | rq = __blk_mq_alloc_request(&alloc_data, rw, 0); | 227 | rq = __blk_mq_alloc_request(&alloc_data, rw); |
229 | blk_mq_put_ctx(ctx); | 228 | blk_mq_put_ctx(ctx); |
230 | 229 | ||
231 | if (!rq) { | 230 | if (!rq) { |
@@ -277,7 +276,7 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw, | |||
277 | ctx = __blk_mq_get_ctx(q, cpumask_first(hctx->cpumask)); | 276 | ctx = __blk_mq_get_ctx(q, cpumask_first(hctx->cpumask)); |
278 | 277 | ||
279 | blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx); | 278 | blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx); |
280 | rq = __blk_mq_alloc_request(&alloc_data, rw, 0); | 279 | rq = __blk_mq_alloc_request(&alloc_data, rw); |
281 | if (!rq) { | 280 | if (!rq) { |
282 | ret = -EWOULDBLOCK; | 281 | ret = -EWOULDBLOCK; |
283 | goto out_queue_exit; | 282 | goto out_queue_exit; |
@@ -1196,19 +1195,14 @@ static struct request *blk_mq_map_request(struct request_queue *q, | |||
1196 | struct blk_mq_hw_ctx *hctx; | 1195 | struct blk_mq_hw_ctx *hctx; |
1197 | struct blk_mq_ctx *ctx; | 1196 | struct blk_mq_ctx *ctx; |
1198 | struct request *rq; | 1197 | struct request *rq; |
1199 | int op = bio_data_dir(bio); | ||
1200 | int op_flags = 0; | ||
1201 | 1198 | ||
1202 | blk_queue_enter_live(q); | 1199 | blk_queue_enter_live(q); |
1203 | ctx = blk_mq_get_ctx(q); | 1200 | ctx = blk_mq_get_ctx(q); |
1204 | hctx = blk_mq_map_queue(q, ctx->cpu); | 1201 | hctx = blk_mq_map_queue(q, ctx->cpu); |
1205 | 1202 | ||
1206 | if (rw_is_sync(bio_op(bio), bio->bi_opf)) | 1203 | trace_block_getrq(q, bio, bio->bi_opf); |
1207 | op_flags |= REQ_SYNC; | ||
1208 | |||
1209 | trace_block_getrq(q, bio, op); | ||
1210 | blk_mq_set_alloc_data(data, q, 0, ctx, hctx); | 1204 | blk_mq_set_alloc_data(data, q, 0, ctx, hctx); |
1211 | rq = __blk_mq_alloc_request(data, op, op_flags); | 1205 | rq = __blk_mq_alloc_request(data, bio->bi_opf); |
1212 | 1206 | ||
1213 | data->hctx->queued++; | 1207 | data->hctx->queued++; |
1214 | return rq; | 1208 | return rq; |
@@ -1256,7 +1250,7 @@ static int blk_mq_direct_issue_request(struct request *rq, blk_qc_t *cookie) | |||
1256 | */ | 1250 | */ |
1257 | static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) | 1251 | static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) |
1258 | { | 1252 | { |
1259 | const int is_sync = rw_is_sync(bio_op(bio), bio->bi_opf); | 1253 | const int is_sync = op_is_sync(bio->bi_opf); |
1260 | const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA); | 1254 | const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA); |
1261 | struct blk_mq_alloc_data data; | 1255 | struct blk_mq_alloc_data data; |
1262 | struct request *rq; | 1256 | struct request *rq; |
@@ -1350,7 +1344,7 @@ done: | |||
1350 | */ | 1344 | */ |
1351 | static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio) | 1345 | static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio) |
1352 | { | 1346 | { |
1353 | const int is_sync = rw_is_sync(bio_op(bio), bio->bi_opf); | 1347 | const int is_sync = op_is_sync(bio->bi_opf); |
1354 | const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA); | 1348 | const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA); |
1355 | struct blk_plug *plug; | 1349 | struct blk_plug *plug; |
1356 | unsigned int request_count = 0; | 1350 | unsigned int request_count = 0; |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 5e24d880306c..c96186adaa66 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -667,10 +667,10 @@ static inline void cfqg_put(struct cfq_group *cfqg) | |||
667 | } while (0) | 667 | } while (0) |
668 | 668 | ||
669 | static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg, | 669 | static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg, |
670 | struct cfq_group *curr_cfqg, int op, | 670 | struct cfq_group *curr_cfqg, |
671 | int op_flags) | 671 | unsigned int op) |
672 | { | 672 | { |
673 | blkg_rwstat_add(&cfqg->stats.queued, op, op_flags, 1); | 673 | blkg_rwstat_add(&cfqg->stats.queued, op, 1); |
674 | cfqg_stats_end_empty_time(&cfqg->stats); | 674 | cfqg_stats_end_empty_time(&cfqg->stats); |
675 | cfqg_stats_set_start_group_wait_time(cfqg, curr_cfqg); | 675 | cfqg_stats_set_start_group_wait_time(cfqg, curr_cfqg); |
676 | } | 676 | } |
@@ -684,30 +684,29 @@ static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg, | |||
684 | #endif | 684 | #endif |
685 | } | 685 | } |
686 | 686 | ||
687 | static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int op, | 687 | static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, |
688 | int op_flags) | 688 | unsigned int op) |
689 | { | 689 | { |
690 | blkg_rwstat_add(&cfqg->stats.queued, op, op_flags, -1); | 690 | blkg_rwstat_add(&cfqg->stats.queued, op, -1); |
691 | } | 691 | } |
692 | 692 | ||
693 | static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int op, | 693 | static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, |
694 | int op_flags) | 694 | unsigned int op) |
695 | { | 695 | { |
696 | blkg_rwstat_add(&cfqg->stats.merged, op, op_flags, 1); | 696 | blkg_rwstat_add(&cfqg->stats.merged, op, 1); |
697 | } | 697 | } |
698 | 698 | ||
699 | static inline void cfqg_stats_update_completion(struct cfq_group *cfqg, | 699 | static inline void cfqg_stats_update_completion(struct cfq_group *cfqg, |
700 | uint64_t start_time, uint64_t io_start_time, int op, | 700 | uint64_t start_time, uint64_t io_start_time, |
701 | int op_flags) | 701 | unsigned int op) |
702 | { | 702 | { |
703 | struct cfqg_stats *stats = &cfqg->stats; | 703 | struct cfqg_stats *stats = &cfqg->stats; |
704 | unsigned long long now = sched_clock(); | 704 | unsigned long long now = sched_clock(); |
705 | 705 | ||
706 | if (time_after64(now, io_start_time)) | 706 | if (time_after64(now, io_start_time)) |
707 | blkg_rwstat_add(&stats->service_time, op, op_flags, | 707 | blkg_rwstat_add(&stats->service_time, op, now - io_start_time); |
708 | now - io_start_time); | ||
709 | if (time_after64(io_start_time, start_time)) | 708 | if (time_after64(io_start_time, start_time)) |
710 | blkg_rwstat_add(&stats->wait_time, op, op_flags, | 709 | blkg_rwstat_add(&stats->wait_time, op, |
711 | io_start_time - start_time); | 710 | io_start_time - start_time); |
712 | } | 711 | } |
713 | 712 | ||
@@ -786,16 +785,16 @@ static inline void cfqg_put(struct cfq_group *cfqg) { } | |||
786 | #define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0) | 785 | #define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0) |
787 | 786 | ||
788 | static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg, | 787 | static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg, |
789 | struct cfq_group *curr_cfqg, int op, int op_flags) { } | 788 | struct cfq_group *curr_cfqg, unsigned int op) { } |
790 | static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg, | 789 | static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg, |
791 | uint64_t time, unsigned long unaccounted_time) { } | 790 | uint64_t time, unsigned long unaccounted_time) { } |
792 | static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int op, | 791 | static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, |
793 | int op_flags) { } | 792 | unsigned int op) { } |
794 | static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int op, | 793 | static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, |
795 | int op_flags) { } | 794 | unsigned int op) { } |
796 | static inline void cfqg_stats_update_completion(struct cfq_group *cfqg, | 795 | static inline void cfqg_stats_update_completion(struct cfq_group *cfqg, |
797 | uint64_t start_time, uint64_t io_start_time, int op, | 796 | uint64_t start_time, uint64_t io_start_time, |
798 | int op_flags) { } | 797 | unsigned int op) { } |
799 | 798 | ||
800 | #endif /* CONFIG_CFQ_GROUP_IOSCHED */ | 799 | #endif /* CONFIG_CFQ_GROUP_IOSCHED */ |
801 | 800 | ||
@@ -2474,10 +2473,10 @@ static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq) | |||
2474 | { | 2473 | { |
2475 | elv_rb_del(&cfqq->sort_list, rq); | 2474 | elv_rb_del(&cfqq->sort_list, rq); |
2476 | cfqq->queued[rq_is_sync(rq)]--; | 2475 | cfqq->queued[rq_is_sync(rq)]--; |
2477 | cfqg_stats_update_io_remove(RQ_CFQG(rq), req_op(rq), rq->cmd_flags); | 2476 | cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags); |
2478 | cfq_add_rq_rb(rq); | 2477 | cfq_add_rq_rb(rq); |
2479 | cfqg_stats_update_io_add(RQ_CFQG(rq), cfqq->cfqd->serving_group, | 2478 | cfqg_stats_update_io_add(RQ_CFQG(rq), cfqq->cfqd->serving_group, |
2480 | req_op(rq), rq->cmd_flags); | 2479 | rq->cmd_flags); |
2481 | } | 2480 | } |
2482 | 2481 | ||
2483 | static struct request * | 2482 | static struct request * |
@@ -2530,7 +2529,7 @@ static void cfq_remove_request(struct request *rq) | |||
2530 | cfq_del_rq_rb(rq); | 2529 | cfq_del_rq_rb(rq); |
2531 | 2530 | ||
2532 | cfqq->cfqd->rq_queued--; | 2531 | cfqq->cfqd->rq_queued--; |
2533 | cfqg_stats_update_io_remove(RQ_CFQG(rq), req_op(rq), rq->cmd_flags); | 2532 | cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags); |
2534 | if (rq->cmd_flags & REQ_PRIO) { | 2533 | if (rq->cmd_flags & REQ_PRIO) { |
2535 | WARN_ON(!cfqq->prio_pending); | 2534 | WARN_ON(!cfqq->prio_pending); |
2536 | cfqq->prio_pending--; | 2535 | cfqq->prio_pending--; |
@@ -2565,7 +2564,7 @@ static void cfq_merged_request(struct request_queue *q, struct request *req, | |||
2565 | static void cfq_bio_merged(struct request_queue *q, struct request *req, | 2564 | static void cfq_bio_merged(struct request_queue *q, struct request *req, |
2566 | struct bio *bio) | 2565 | struct bio *bio) |
2567 | { | 2566 | { |
2568 | cfqg_stats_update_io_merged(RQ_CFQG(req), bio_op(bio), bio->bi_opf); | 2567 | cfqg_stats_update_io_merged(RQ_CFQG(req), bio->bi_opf); |
2569 | } | 2568 | } |
2570 | 2569 | ||
2571 | static void | 2570 | static void |
@@ -2588,7 +2587,7 @@ cfq_merged_requests(struct request_queue *q, struct request *rq, | |||
2588 | if (cfqq->next_rq == next) | 2587 | if (cfqq->next_rq == next) |
2589 | cfqq->next_rq = rq; | 2588 | cfqq->next_rq = rq; |
2590 | cfq_remove_request(next); | 2589 | cfq_remove_request(next); |
2591 | cfqg_stats_update_io_merged(RQ_CFQG(rq), req_op(next), next->cmd_flags); | 2590 | cfqg_stats_update_io_merged(RQ_CFQG(rq), next->cmd_flags); |
2592 | 2591 | ||
2593 | cfqq = RQ_CFQQ(next); | 2592 | cfqq = RQ_CFQQ(next); |
2594 | /* | 2593 | /* |
@@ -4142,7 +4141,7 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq) | |||
4142 | rq->fifo_time = ktime_get_ns() + cfqd->cfq_fifo_expire[rq_is_sync(rq)]; | 4141 | rq->fifo_time = ktime_get_ns() + cfqd->cfq_fifo_expire[rq_is_sync(rq)]; |
4143 | list_add_tail(&rq->queuelist, &cfqq->fifo); | 4142 | list_add_tail(&rq->queuelist, &cfqq->fifo); |
4144 | cfq_add_rq_rb(rq); | 4143 | cfq_add_rq_rb(rq); |
4145 | cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group, req_op(rq), | 4144 | cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group, |
4146 | rq->cmd_flags); | 4145 | rq->cmd_flags); |
4147 | cfq_rq_enqueued(cfqd, cfqq, rq); | 4146 | cfq_rq_enqueued(cfqd, cfqq, rq); |
4148 | } | 4147 | } |
@@ -4240,8 +4239,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) | |||
4240 | cfqq->dispatched--; | 4239 | cfqq->dispatched--; |
4241 | (RQ_CFQG(rq))->dispatched--; | 4240 | (RQ_CFQG(rq))->dispatched--; |
4242 | cfqg_stats_update_completion(cfqq->cfqg, rq_start_time_ns(rq), | 4241 | cfqg_stats_update_completion(cfqq->cfqg, rq_start_time_ns(rq), |
4243 | rq_io_start_time_ns(rq), req_op(rq), | 4242 | rq_io_start_time_ns(rq), rq->cmd_flags); |
4244 | rq->cmd_flags); | ||
4245 | 4243 | ||
4246 | cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--; | 4244 | cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--; |
4247 | 4245 | ||
@@ -4319,14 +4317,14 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) | |||
4319 | cfq_schedule_dispatch(cfqd); | 4317 | cfq_schedule_dispatch(cfqd); |
4320 | } | 4318 | } |
4321 | 4319 | ||
4322 | static void cfqq_boost_on_prio(struct cfq_queue *cfqq, int op_flags) | 4320 | static void cfqq_boost_on_prio(struct cfq_queue *cfqq, unsigned int op) |
4323 | { | 4321 | { |
4324 | /* | 4322 | /* |
4325 | * If REQ_PRIO is set, boost class and prio level, if it's below | 4323 | * If REQ_PRIO is set, boost class and prio level, if it's below |
4326 | * BE/NORM. If prio is not set, restore the potentially boosted | 4324 | * BE/NORM. If prio is not set, restore the potentially boosted |
4327 | * class/prio level. | 4325 | * class/prio level. |
4328 | */ | 4326 | */ |
4329 | if (!(op_flags & REQ_PRIO)) { | 4327 | if (!(op & REQ_PRIO)) { |
4330 | cfqq->ioprio_class = cfqq->org_ioprio_class; | 4328 | cfqq->ioprio_class = cfqq->org_ioprio_class; |
4331 | cfqq->ioprio = cfqq->org_ioprio; | 4329 | cfqq->ioprio = cfqq->org_ioprio; |
4332 | } else { | 4330 | } else { |
@@ -4347,7 +4345,7 @@ static inline int __cfq_may_queue(struct cfq_queue *cfqq) | |||
4347 | return ELV_MQUEUE_MAY; | 4345 | return ELV_MQUEUE_MAY; |
4348 | } | 4346 | } |
4349 | 4347 | ||
4350 | static int cfq_may_queue(struct request_queue *q, int op, int op_flags) | 4348 | static int cfq_may_queue(struct request_queue *q, unsigned int op) |
4351 | { | 4349 | { |
4352 | struct cfq_data *cfqd = q->elevator->elevator_data; | 4350 | struct cfq_data *cfqd = q->elevator->elevator_data; |
4353 | struct task_struct *tsk = current; | 4351 | struct task_struct *tsk = current; |
@@ -4364,10 +4362,10 @@ static int cfq_may_queue(struct request_queue *q, int op, int op_flags) | |||
4364 | if (!cic) | 4362 | if (!cic) |
4365 | return ELV_MQUEUE_MAY; | 4363 | return ELV_MQUEUE_MAY; |
4366 | 4364 | ||
4367 | cfqq = cic_to_cfqq(cic, rw_is_sync(op, op_flags)); | 4365 | cfqq = cic_to_cfqq(cic, op_is_sync(op)); |
4368 | if (cfqq) { | 4366 | if (cfqq) { |
4369 | cfq_init_prio_data(cfqq, cic); | 4367 | cfq_init_prio_data(cfqq, cic); |
4370 | cfqq_boost_on_prio(cfqq, op_flags); | 4368 | cfqq_boost_on_prio(cfqq, op); |
4371 | 4369 | ||
4372 | return __cfq_may_queue(cfqq); | 4370 | return __cfq_may_queue(cfqq); |
4373 | } | 4371 | } |
diff --git a/block/elevator.c b/block/elevator.c index ac80f89a0842..a18a5db274e4 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -714,12 +714,12 @@ void elv_put_request(struct request_queue *q, struct request *rq) | |||
714 | e->type->ops.elevator_put_req_fn(rq); | 714 | e->type->ops.elevator_put_req_fn(rq); |
715 | } | 715 | } |
716 | 716 | ||
717 | int elv_may_queue(struct request_queue *q, int op, int op_flags) | 717 | int elv_may_queue(struct request_queue *q, unsigned int op) |
718 | { | 718 | { |
719 | struct elevator_queue *e = q->elevator; | 719 | struct elevator_queue *e = q->elevator; |
720 | 720 | ||
721 | if (e->type->ops.elevator_may_queue_fn) | 721 | if (e->type->ops.elevator_may_queue_fn) |
722 | return e->type->ops.elevator_may_queue_fn(q, op, op_flags); | 722 | return e->type->ops.elevator_may_queue_fn(q, op); |
723 | 723 | ||
724 | return ELV_MQUEUE_MAY; | 724 | return ELV_MQUEUE_MAY; |
725 | } | 725 | } |
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index a2768835d394..68a9eb4f3f36 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -1135,7 +1135,7 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone) | |||
1135 | clone->bi_private = io; | 1135 | clone->bi_private = io; |
1136 | clone->bi_end_io = crypt_endio; | 1136 | clone->bi_end_io = crypt_endio; |
1137 | clone->bi_bdev = cc->dev->bdev; | 1137 | clone->bi_bdev = cc->dev->bdev; |
1138 | bio_set_op_attrs(clone, bio_op(io->base_bio), bio_flags(io->base_bio)); | 1138 | clone->bi_opf = io->base_bio->bi_opf; |
1139 | } | 1139 | } |
1140 | 1140 | ||
1141 | static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) | 1141 | static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index cef1f78031d4..65738b0aad36 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
@@ -1031,8 +1031,7 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt) | |||
1031 | } else if (rq_data_dir(rq) == READ) { | 1031 | } else if (rq_data_dir(rq) == READ) { |
1032 | SCpnt->cmnd[0] = READ_6; | 1032 | SCpnt->cmnd[0] = READ_6; |
1033 | } else { | 1033 | } else { |
1034 | scmd_printk(KERN_ERR, SCpnt, "Unknown command %llu,%llx\n", | 1034 | scmd_printk(KERN_ERR, SCpnt, "Unknown command %d\n", req_op(rq)); |
1035 | req_op(rq), (unsigned long long) rq->cmd_flags); | ||
1036 | goto out; | 1035 | goto out; |
1037 | } | 1036 | } |
1038 | 1037 | ||
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 2b790bda7998..9a377079af26 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -8427,7 +8427,7 @@ static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip, | |||
8427 | if (!bio) | 8427 | if (!bio) |
8428 | return -ENOMEM; | 8428 | return -ENOMEM; |
8429 | 8429 | ||
8430 | bio_set_op_attrs(bio, bio_op(orig_bio), bio_flags(orig_bio)); | 8430 | bio->bi_opf = orig_bio->bi_opf; |
8431 | bio->bi_private = dip; | 8431 | bio->bi_private = dip; |
8432 | bio->bi_end_io = btrfs_end_dio_bio; | 8432 | bio->bi_end_io = btrfs_end_dio_bio; |
8433 | btrfs_io_bio(bio)->logical = file_offset; | 8433 | btrfs_io_bio(bio)->logical = file_offset; |
@@ -8465,8 +8465,7 @@ next_block: | |||
8465 | start_sector, GFP_NOFS); | 8465 | start_sector, GFP_NOFS); |
8466 | if (!bio) | 8466 | if (!bio) |
8467 | goto out_err; | 8467 | goto out_err; |
8468 | bio_set_op_attrs(bio, bio_op(orig_bio), | 8468 | bio->bi_opf = orig_bio->bi_opf; |
8469 | bio_flags(orig_bio)); | ||
8470 | bio->bi_private = dip; | 8469 | bio->bi_private = dip; |
8471 | bio->bi_end_io = btrfs_end_dio_bio; | 8470 | bio->bi_end_io = btrfs_end_dio_bio; |
8472 | btrfs_io_bio(bio)->logical = file_offset; | 8471 | btrfs_io_bio(bio)->logical = file_offset; |
diff --git a/fs/buffer.c b/fs/buffer.c index b205a629001d..a29335867e30 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -3118,7 +3118,7 @@ EXPORT_SYMBOL(submit_bh); | |||
3118 | /** | 3118 | /** |
3119 | * ll_rw_block: low-level access to block devices (DEPRECATED) | 3119 | * ll_rw_block: low-level access to block devices (DEPRECATED) |
3120 | * @op: whether to %READ or %WRITE | 3120 | * @op: whether to %READ or %WRITE |
3121 | * @op_flags: rq_flag_bits | 3121 | * @op_flags: req_flag_bits |
3122 | * @nr: number of &struct buffer_heads in the array | 3122 | * @nr: number of &struct buffer_heads in the array |
3123 | * @bhs: array of pointers to &struct buffer_head | 3123 | * @bhs: array of pointers to &struct buffer_head |
3124 | * | 3124 | * |
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 9e8de18a168a..2cf4f7f09e32 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h | |||
@@ -688,7 +688,7 @@ struct f2fs_io_info { | |||
688 | struct f2fs_sb_info *sbi; /* f2fs_sb_info pointer */ | 688 | struct f2fs_sb_info *sbi; /* f2fs_sb_info pointer */ |
689 | enum page_type type; /* contains DATA/NODE/META/META_FLUSH */ | 689 | enum page_type type; /* contains DATA/NODE/META/META_FLUSH */ |
690 | int op; /* contains REQ_OP_ */ | 690 | int op; /* contains REQ_OP_ */ |
691 | int op_flags; /* rq_flag_bits */ | 691 | int op_flags; /* req_flag_bits */ |
692 | block_t new_blkaddr; /* new block address to be written */ | 692 | block_t new_blkaddr; /* new block address to be written */ |
693 | block_t old_blkaddr; /* old block address before Cow */ | 693 | block_t old_blkaddr; /* old block address before Cow */ |
694 | struct page *page; /* page to be written */ | 694 | struct page *page; /* page to be written */ |
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c index 49d5a1b61b06..b1f9144b42c7 100644 --- a/fs/gfs2/lops.c +++ b/fs/gfs2/lops.c | |||
@@ -231,7 +231,7 @@ static void gfs2_end_log_write(struct bio *bio) | |||
231 | * gfs2_log_flush_bio - Submit any pending log bio | 231 | * gfs2_log_flush_bio - Submit any pending log bio |
232 | * @sdp: The superblock | 232 | * @sdp: The superblock |
233 | * @op: REQ_OP | 233 | * @op: REQ_OP |
234 | * @op_flags: rq_flag_bits | 234 | * @op_flags: req_flag_bits |
235 | * | 235 | * |
236 | * Submit any pending part-built or full bio to the block device. If | 236 | * Submit any pending part-built or full bio to the block device. If |
237 | * there is no pending bio, then this is a no-op. | 237 | * there is no pending bio, then this is a no-op. |
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index 3bf5d33800ab..ddaf28d0988f 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h | |||
@@ -581,15 +581,14 @@ static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat) | |||
581 | /** | 581 | /** |
582 | * blkg_rwstat_add - add a value to a blkg_rwstat | 582 | * blkg_rwstat_add - add a value to a blkg_rwstat |
583 | * @rwstat: target blkg_rwstat | 583 | * @rwstat: target blkg_rwstat |
584 | * @op: REQ_OP | 584 | * @op: REQ_OP and flags |
585 | * @op_flags: rq_flag_bits | ||
586 | * @val: value to add | 585 | * @val: value to add |
587 | * | 586 | * |
588 | * Add @val to @rwstat. The counters are chosen according to @rw. The | 587 | * Add @val to @rwstat. The counters are chosen according to @rw. The |
589 | * caller is responsible for synchronizing calls to this function. | 588 | * caller is responsible for synchronizing calls to this function. |
590 | */ | 589 | */ |
591 | static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat, | 590 | static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat, |
592 | int op, int op_flags, uint64_t val) | 591 | unsigned int op, uint64_t val) |
593 | { | 592 | { |
594 | struct percpu_counter *cnt; | 593 | struct percpu_counter *cnt; |
595 | 594 | ||
@@ -600,7 +599,7 @@ static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat, | |||
600 | 599 | ||
601 | __percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH); | 600 | __percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH); |
602 | 601 | ||
603 | if (op_flags & REQ_SYNC) | 602 | if (op & REQ_SYNC) |
604 | cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC]; | 603 | cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC]; |
605 | else | 604 | else |
606 | cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC]; | 605 | cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC]; |
@@ -705,9 +704,9 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q, | |||
705 | 704 | ||
706 | if (!throtl) { | 705 | if (!throtl) { |
707 | blkg = blkg ?: q->root_blkg; | 706 | blkg = blkg ?: q->root_blkg; |
708 | blkg_rwstat_add(&blkg->stat_bytes, bio_op(bio), bio->bi_opf, | 707 | blkg_rwstat_add(&blkg->stat_bytes, bio->bi_opf, |
709 | bio->bi_iter.bi_size); | 708 | bio->bi_iter.bi_size); |
710 | blkg_rwstat_add(&blkg->stat_ios, bio_op(bio), bio->bi_opf, 1); | 709 | blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1); |
711 | } | 710 | } |
712 | 711 | ||
713 | rcu_read_unlock(); | 712 | rcu_read_unlock(); |
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index ec69a8fe3b29..dca972d67548 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h | |||
@@ -88,24 +88,6 @@ struct bio { | |||
88 | struct bio_vec bi_inline_vecs[0]; | 88 | struct bio_vec bi_inline_vecs[0]; |
89 | }; | 89 | }; |
90 | 90 | ||
91 | #define BIO_OP_SHIFT (8 * FIELD_SIZEOF(struct bio, bi_opf) - REQ_OP_BITS) | ||
92 | #define bio_flags(bio) ((bio)->bi_opf & ((1 << BIO_OP_SHIFT) - 1)) | ||
93 | #define bio_op(bio) ((bio)->bi_opf >> BIO_OP_SHIFT) | ||
94 | |||
95 | #define bio_set_op_attrs(bio, op, op_flags) do { \ | ||
96 | if (__builtin_constant_p(op)) \ | ||
97 | BUILD_BUG_ON((op) + 0U >= (1U << REQ_OP_BITS)); \ | ||
98 | else \ | ||
99 | WARN_ON_ONCE((op) + 0U >= (1U << REQ_OP_BITS)); \ | ||
100 | if (__builtin_constant_p(op_flags)) \ | ||
101 | BUILD_BUG_ON((op_flags) + 0U >= (1U << BIO_OP_SHIFT)); \ | ||
102 | else \ | ||
103 | WARN_ON_ONCE((op_flags) + 0U >= (1U << BIO_OP_SHIFT)); \ | ||
104 | (bio)->bi_opf = bio_flags(bio); \ | ||
105 | (bio)->bi_opf |= (((op) + 0U) << BIO_OP_SHIFT); \ | ||
106 | (bio)->bi_opf |= (op_flags); \ | ||
107 | } while (0) | ||
108 | |||
109 | #define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs) | 91 | #define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs) |
110 | 92 | ||
111 | /* | 93 | /* |
@@ -147,26 +129,40 @@ struct bio { | |||
147 | #endif /* CONFIG_BLOCK */ | 129 | #endif /* CONFIG_BLOCK */ |
148 | 130 | ||
149 | /* | 131 | /* |
150 | * Request flags. For use in the cmd_flags field of struct request, and in | 132 | * Operations and flags common to the bio and request structures. |
151 | * bi_opf of struct bio. Note that some flags are only valid in either one. | 133 | * We use 8 bits for encoding the operation, and the remaining 24 for flags. |
152 | */ | 134 | */ |
153 | enum rq_flag_bits { | 135 | #define REQ_OP_BITS 8 |
154 | /* common flags */ | 136 | #define REQ_OP_MASK ((1 << REQ_OP_BITS) - 1) |
155 | __REQ_FAILFAST_DEV, /* no driver retries of device errors */ | 137 | #define REQ_FLAG_BITS 24 |
138 | |||
139 | enum req_opf { | ||
140 | REQ_OP_READ, | ||
141 | REQ_OP_WRITE, | ||
142 | REQ_OP_DISCARD, /* request to discard sectors */ | ||
143 | REQ_OP_SECURE_ERASE, /* request to securely erase sectors */ | ||
144 | REQ_OP_WRITE_SAME, /* write same block many times */ | ||
145 | REQ_OP_FLUSH, /* request for cache flush */ | ||
146 | REQ_OP_ZONE_REPORT, /* Get zone information */ | ||
147 | REQ_OP_ZONE_RESET, /* Reset a zone write pointer */ | ||
148 | |||
149 | REQ_OP_LAST, | ||
150 | }; | ||
151 | |||
152 | enum req_flag_bits { | ||
153 | __REQ_FAILFAST_DEV = /* no driver retries of device errors */ | ||
154 | REQ_OP_BITS, | ||
156 | __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ | 155 | __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ |
157 | __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ | 156 | __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ |
158 | |||
159 | __REQ_SYNC, /* request is sync (sync write or read) */ | 157 | __REQ_SYNC, /* request is sync (sync write or read) */ |
160 | __REQ_META, /* metadata io request */ | 158 | __REQ_META, /* metadata io request */ |
161 | __REQ_PRIO, /* boost priority in cfq */ | 159 | __REQ_PRIO, /* boost priority in cfq */ |
162 | |||
163 | __REQ_NOMERGE, /* don't touch this for merging */ | 160 | __REQ_NOMERGE, /* don't touch this for merging */ |
164 | __REQ_NOIDLE, /* don't anticipate more IO after this one */ | 161 | __REQ_NOIDLE, /* don't anticipate more IO after this one */ |
165 | __REQ_INTEGRITY, /* I/O includes block integrity payload */ | 162 | __REQ_INTEGRITY, /* I/O includes block integrity payload */ |
166 | __REQ_FUA, /* forced unit access */ | 163 | __REQ_FUA, /* forced unit access */ |
167 | __REQ_PREFLUSH, /* request for cache flush */ | 164 | __REQ_PREFLUSH, /* request for cache flush */ |
168 | __REQ_RAHEAD, /* read ahead, can fail anytime */ | 165 | __REQ_RAHEAD, /* read ahead, can fail anytime */ |
169 | |||
170 | __REQ_NR_BITS, /* stops here */ | 166 | __REQ_NR_BITS, /* stops here */ |
171 | }; | 167 | }; |
172 | 168 | ||
@@ -176,37 +172,32 @@ enum rq_flag_bits { | |||
176 | #define REQ_SYNC (1ULL << __REQ_SYNC) | 172 | #define REQ_SYNC (1ULL << __REQ_SYNC) |
177 | #define REQ_META (1ULL << __REQ_META) | 173 | #define REQ_META (1ULL << __REQ_META) |
178 | #define REQ_PRIO (1ULL << __REQ_PRIO) | 174 | #define REQ_PRIO (1ULL << __REQ_PRIO) |
175 | #define REQ_NOMERGE (1ULL << __REQ_NOMERGE) | ||
179 | #define REQ_NOIDLE (1ULL << __REQ_NOIDLE) | 176 | #define REQ_NOIDLE (1ULL << __REQ_NOIDLE) |
180 | #define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY) | 177 | #define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY) |
178 | #define REQ_FUA (1ULL << __REQ_FUA) | ||
179 | #define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH) | ||
180 | #define REQ_RAHEAD (1ULL << __REQ_RAHEAD) | ||
181 | 181 | ||
182 | #define REQ_FAILFAST_MASK \ | 182 | #define REQ_FAILFAST_MASK \ |
183 | (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) | 183 | (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) |
184 | #define REQ_COMMON_MASK \ | ||
185 | (REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | REQ_NOIDLE | \ | ||
186 | REQ_PREFLUSH | REQ_FUA | REQ_INTEGRITY | REQ_NOMERGE | REQ_RAHEAD) | ||
187 | #define REQ_CLONE_MASK REQ_COMMON_MASK | ||
188 | 184 | ||
189 | /* This mask is used for both bio and request merge checking */ | ||
190 | #define REQ_NOMERGE_FLAGS \ | 185 | #define REQ_NOMERGE_FLAGS \ |
191 | (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA) | 186 | (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA) |
192 | 187 | ||
193 | #define REQ_RAHEAD (1ULL << __REQ_RAHEAD) | 188 | #define bio_op(bio) \ |
194 | #define REQ_FUA (1ULL << __REQ_FUA) | 189 | ((bio)->bi_opf & REQ_OP_MASK) |
195 | #define REQ_NOMERGE (1ULL << __REQ_NOMERGE) | 190 | #define req_op(req) \ |
196 | #define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH) | 191 | ((req)->cmd_flags & REQ_OP_MASK) |
197 | 192 | ||
198 | enum req_op { | 193 | /* obsolete, don't use in new code */ |
199 | REQ_OP_READ, | 194 | #define bio_set_op_attrs(bio, op, op_flags) \ |
200 | REQ_OP_WRITE, | 195 | ((bio)->bi_opf |= (op | op_flags)) |
201 | REQ_OP_DISCARD, /* request to discard sectors */ | ||
202 | REQ_OP_SECURE_ERASE, /* request to securely erase sectors */ | ||
203 | REQ_OP_WRITE_SAME, /* write same block many times */ | ||
204 | REQ_OP_FLUSH, /* request for cache flush */ | ||
205 | REQ_OP_ZONE_REPORT, /* Get zone information */ | ||
206 | REQ_OP_ZONE_RESET, /* Reset a zone write pointer */ | ||
207 | }; | ||
208 | 196 | ||
209 | #define REQ_OP_BITS 3 | 197 | static inline bool op_is_sync(unsigned int op) |
198 | { | ||
199 | return (op & REQ_OP_MASK) == REQ_OP_READ || (op & REQ_SYNC); | ||
200 | } | ||
210 | 201 | ||
211 | typedef unsigned int blk_qc_t; | 202 | typedef unsigned int blk_qc_t; |
212 | #define BLK_QC_T_NONE -1U | 203 | #define BLK_QC_T_NONE -1U |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index b4415feac679..8396da2bb698 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -142,7 +142,7 @@ struct request { | |||
142 | 142 | ||
143 | int cpu; | 143 | int cpu; |
144 | unsigned cmd_type; | 144 | unsigned cmd_type; |
145 | u64 cmd_flags; | 145 | unsigned int cmd_flags; /* op and common flags */ |
146 | req_flags_t rq_flags; | 146 | req_flags_t rq_flags; |
147 | unsigned long atomic_flags; | 147 | unsigned long atomic_flags; |
148 | 148 | ||
@@ -244,20 +244,6 @@ struct request { | |||
244 | struct request *next_rq; | 244 | struct request *next_rq; |
245 | }; | 245 | }; |
246 | 246 | ||
247 | #define REQ_OP_SHIFT (8 * sizeof(u64) - REQ_OP_BITS) | ||
248 | #define req_op(req) ((req)->cmd_flags >> REQ_OP_SHIFT) | ||
249 | |||
250 | #define req_set_op(req, op) do { \ | ||
251 | WARN_ON(op >= (1 << REQ_OP_BITS)); \ | ||
252 | (req)->cmd_flags &= ((1ULL << REQ_OP_SHIFT) - 1); \ | ||
253 | (req)->cmd_flags |= ((u64) (op) << REQ_OP_SHIFT); \ | ||
254 | } while (0) | ||
255 | |||
256 | #define req_set_op_attrs(req, op, flags) do { \ | ||
257 | req_set_op(req, op); \ | ||
258 | (req)->cmd_flags |= flags; \ | ||
259 | } while (0) | ||
260 | |||
261 | static inline unsigned short req_get_ioprio(struct request *req) | 247 | static inline unsigned short req_get_ioprio(struct request *req) |
262 | { | 248 | { |
263 | return req->ioprio; | 249 | return req->ioprio; |
@@ -741,17 +727,9 @@ static inline unsigned int blk_queue_zone_size(struct request_queue *q) | |||
741 | return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0; | 727 | return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0; |
742 | } | 728 | } |
743 | 729 | ||
744 | /* | ||
745 | * We regard a request as sync, if either a read or a sync write | ||
746 | */ | ||
747 | static inline bool rw_is_sync(int op, unsigned int rw_flags) | ||
748 | { | ||
749 | return op == REQ_OP_READ || (rw_flags & REQ_SYNC); | ||
750 | } | ||
751 | |||
752 | static inline bool rq_is_sync(struct request *rq) | 730 | static inline bool rq_is_sync(struct request *rq) |
753 | { | 731 | { |
754 | return rw_is_sync(req_op(rq), rq->cmd_flags); | 732 | return op_is_sync(rq->cmd_flags); |
755 | } | 733 | } |
756 | 734 | ||
757 | static inline bool blk_rl_full(struct request_list *rl, bool sync) | 735 | static inline bool blk_rl_full(struct request_list *rl, bool sync) |
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index cceb72f9e29f..e417f080219a 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h | |||
@@ -118,7 +118,7 @@ static inline int blk_cmd_buf_len(struct request *rq) | |||
118 | } | 118 | } |
119 | 119 | ||
120 | extern void blk_dump_cmd(char *buf, struct request *rq); | 120 | extern void blk_dump_cmd(char *buf, struct request *rq); |
121 | extern void blk_fill_rwbs(char *rwbs, int op, u32 rw, int bytes); | 121 | extern void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes); |
122 | 122 | ||
123 | #endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */ | 123 | #endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */ |
124 | 124 | ||
diff --git a/include/linux/dm-io.h b/include/linux/dm-io.h index b91b023deffb..a52c6580cc9a 100644 --- a/include/linux/dm-io.h +++ b/include/linux/dm-io.h | |||
@@ -58,7 +58,7 @@ struct dm_io_notify { | |||
58 | struct dm_io_client; | 58 | struct dm_io_client; |
59 | struct dm_io_request { | 59 | struct dm_io_request { |
60 | int bi_op; /* REQ_OP */ | 60 | int bi_op; /* REQ_OP */ |
61 | int bi_op_flags; /* rq_flag_bits */ | 61 | int bi_op_flags; /* req_flag_bits */ |
62 | struct dm_io_memory mem; /* Memory to use for io */ | 62 | struct dm_io_memory mem; /* Memory to use for io */ |
63 | struct dm_io_notify notify; /* Synchronous if notify.fn is NULL */ | 63 | struct dm_io_notify notify; /* Synchronous if notify.fn is NULL */ |
64 | struct dm_io_client *client; /* Client memory handler */ | 64 | struct dm_io_client *client; /* Client memory handler */ |
diff --git a/include/linux/elevator.h b/include/linux/elevator.h index e7f358d2e5fc..f219c9aed360 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h | |||
@@ -30,7 +30,7 @@ typedef int (elevator_dispatch_fn) (struct request_queue *, int); | |||
30 | typedef void (elevator_add_req_fn) (struct request_queue *, struct request *); | 30 | typedef void (elevator_add_req_fn) (struct request_queue *, struct request *); |
31 | typedef struct request *(elevator_request_list_fn) (struct request_queue *, struct request *); | 31 | typedef struct request *(elevator_request_list_fn) (struct request_queue *, struct request *); |
32 | typedef void (elevator_completed_req_fn) (struct request_queue *, struct request *); | 32 | typedef void (elevator_completed_req_fn) (struct request_queue *, struct request *); |
33 | typedef int (elevator_may_queue_fn) (struct request_queue *, int, int); | 33 | typedef int (elevator_may_queue_fn) (struct request_queue *, unsigned int); |
34 | 34 | ||
35 | typedef void (elevator_init_icq_fn) (struct io_cq *); | 35 | typedef void (elevator_init_icq_fn) (struct io_cq *); |
36 | typedef void (elevator_exit_icq_fn) (struct io_cq *); | 36 | typedef void (elevator_exit_icq_fn) (struct io_cq *); |
@@ -139,7 +139,7 @@ extern struct request *elv_former_request(struct request_queue *, struct request | |||
139 | extern struct request *elv_latter_request(struct request_queue *, struct request *); | 139 | extern struct request *elv_latter_request(struct request_queue *, struct request *); |
140 | extern int elv_register_queue(struct request_queue *q); | 140 | extern int elv_register_queue(struct request_queue *q); |
141 | extern void elv_unregister_queue(struct request_queue *q); | 141 | extern void elv_unregister_queue(struct request_queue *q); |
142 | extern int elv_may_queue(struct request_queue *, int, int); | 142 | extern int elv_may_queue(struct request_queue *, unsigned int); |
143 | extern void elv_completed_request(struct request_queue *, struct request *); | 143 | extern void elv_completed_request(struct request_queue *, struct request *); |
144 | extern int elv_set_request(struct request_queue *q, struct request *rq, | 144 | extern int elv_set_request(struct request_queue *q, struct request *rq, |
145 | struct bio *bio, gfp_t gfp_mask); | 145 | struct bio *bio, gfp_t gfp_mask); |
diff --git a/include/trace/events/bcache.h b/include/trace/events/bcache.h index d336b890e31f..df3e9ae5ad8d 100644 --- a/include/trace/events/bcache.h +++ b/include/trace/events/bcache.h | |||
@@ -27,8 +27,7 @@ DECLARE_EVENT_CLASS(bcache_request, | |||
27 | __entry->sector = bio->bi_iter.bi_sector; | 27 | __entry->sector = bio->bi_iter.bi_sector; |
28 | __entry->orig_sector = bio->bi_iter.bi_sector - 16; | 28 | __entry->orig_sector = bio->bi_iter.bi_sector - 16; |
29 | __entry->nr_sector = bio->bi_iter.bi_size >> 9; | 29 | __entry->nr_sector = bio->bi_iter.bi_size >> 9; |
30 | blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf, | 30 | blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); |
31 | bio->bi_iter.bi_size); | ||
32 | ), | 31 | ), |
33 | 32 | ||
34 | TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)", | 33 | TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)", |
@@ -102,8 +101,7 @@ DECLARE_EVENT_CLASS(bcache_bio, | |||
102 | __entry->dev = bio->bi_bdev->bd_dev; | 101 | __entry->dev = bio->bi_bdev->bd_dev; |
103 | __entry->sector = bio->bi_iter.bi_sector; | 102 | __entry->sector = bio->bi_iter.bi_sector; |
104 | __entry->nr_sector = bio->bi_iter.bi_size >> 9; | 103 | __entry->nr_sector = bio->bi_iter.bi_size >> 9; |
105 | blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf, | 104 | blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); |
106 | bio->bi_iter.bi_size); | ||
107 | ), | 105 | ), |
108 | 106 | ||
109 | TP_printk("%d,%d %s %llu + %u", | 107 | TP_printk("%d,%d %s %llu + %u", |
@@ -138,8 +136,7 @@ TRACE_EVENT(bcache_read, | |||
138 | __entry->dev = bio->bi_bdev->bd_dev; | 136 | __entry->dev = bio->bi_bdev->bd_dev; |
139 | __entry->sector = bio->bi_iter.bi_sector; | 137 | __entry->sector = bio->bi_iter.bi_sector; |
140 | __entry->nr_sector = bio->bi_iter.bi_size >> 9; | 138 | __entry->nr_sector = bio->bi_iter.bi_size >> 9; |
141 | blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf, | 139 | blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); |
142 | bio->bi_iter.bi_size); | ||
143 | __entry->cache_hit = hit; | 140 | __entry->cache_hit = hit; |
144 | __entry->bypass = bypass; | 141 | __entry->bypass = bypass; |
145 | ), | 142 | ), |
@@ -170,8 +167,7 @@ TRACE_EVENT(bcache_write, | |||
170 | __entry->inode = inode; | 167 | __entry->inode = inode; |
171 | __entry->sector = bio->bi_iter.bi_sector; | 168 | __entry->sector = bio->bi_iter.bi_sector; |
172 | __entry->nr_sector = bio->bi_iter.bi_size >> 9; | 169 | __entry->nr_sector = bio->bi_iter.bi_size >> 9; |
173 | blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf, | 170 | blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); |
174 | bio->bi_iter.bi_size); | ||
175 | __entry->writeback = writeback; | 171 | __entry->writeback = writeback; |
176 | __entry->bypass = bypass; | 172 | __entry->bypass = bypass; |
177 | ), | 173 | ), |
diff --git a/include/trace/events/block.h b/include/trace/events/block.h index 8f3a163b8166..3e02e3a25413 100644 --- a/include/trace/events/block.h +++ b/include/trace/events/block.h | |||
@@ -84,8 +84,7 @@ DECLARE_EVENT_CLASS(block_rq_with_error, | |||
84 | 0 : blk_rq_sectors(rq); | 84 | 0 : blk_rq_sectors(rq); |
85 | __entry->errors = rq->errors; | 85 | __entry->errors = rq->errors; |
86 | 86 | ||
87 | blk_fill_rwbs(__entry->rwbs, req_op(rq), rq->cmd_flags, | 87 | blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); |
88 | blk_rq_bytes(rq)); | ||
89 | blk_dump_cmd(__get_str(cmd), rq); | 88 | blk_dump_cmd(__get_str(cmd), rq); |
90 | ), | 89 | ), |
91 | 90 | ||
@@ -163,7 +162,7 @@ TRACE_EVENT(block_rq_complete, | |||
163 | __entry->nr_sector = nr_bytes >> 9; | 162 | __entry->nr_sector = nr_bytes >> 9; |
164 | __entry->errors = rq->errors; | 163 | __entry->errors = rq->errors; |
165 | 164 | ||
166 | blk_fill_rwbs(__entry->rwbs, req_op(rq), rq->cmd_flags, nr_bytes); | 165 | blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, nr_bytes); |
167 | blk_dump_cmd(__get_str(cmd), rq); | 166 | blk_dump_cmd(__get_str(cmd), rq); |
168 | ), | 167 | ), |
169 | 168 | ||
@@ -199,8 +198,7 @@ DECLARE_EVENT_CLASS(block_rq, | |||
199 | __entry->bytes = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? | 198 | __entry->bytes = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? |
200 | blk_rq_bytes(rq) : 0; | 199 | blk_rq_bytes(rq) : 0; |
201 | 200 | ||
202 | blk_fill_rwbs(__entry->rwbs, req_op(rq), rq->cmd_flags, | 201 | blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); |
203 | blk_rq_bytes(rq)); | ||
204 | blk_dump_cmd(__get_str(cmd), rq); | 202 | blk_dump_cmd(__get_str(cmd), rq); |
205 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | 203 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); |
206 | ), | 204 | ), |
@@ -274,8 +272,7 @@ TRACE_EVENT(block_bio_bounce, | |||
274 | bio->bi_bdev->bd_dev : 0; | 272 | bio->bi_bdev->bd_dev : 0; |
275 | __entry->sector = bio->bi_iter.bi_sector; | 273 | __entry->sector = bio->bi_iter.bi_sector; |
276 | __entry->nr_sector = bio_sectors(bio); | 274 | __entry->nr_sector = bio_sectors(bio); |
277 | blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf, | 275 | blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); |
278 | bio->bi_iter.bi_size); | ||
279 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | 276 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); |
280 | ), | 277 | ), |
281 | 278 | ||
@@ -313,8 +310,7 @@ TRACE_EVENT(block_bio_complete, | |||
313 | __entry->sector = bio->bi_iter.bi_sector; | 310 | __entry->sector = bio->bi_iter.bi_sector; |
314 | __entry->nr_sector = bio_sectors(bio); | 311 | __entry->nr_sector = bio_sectors(bio); |
315 | __entry->error = error; | 312 | __entry->error = error; |
316 | blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf, | 313 | blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); |
317 | bio->bi_iter.bi_size); | ||
318 | ), | 314 | ), |
319 | 315 | ||
320 | TP_printk("%d,%d %s %llu + %u [%d]", | 316 | TP_printk("%d,%d %s %llu + %u [%d]", |
@@ -341,8 +337,7 @@ DECLARE_EVENT_CLASS(block_bio_merge, | |||
341 | __entry->dev = bio->bi_bdev->bd_dev; | 337 | __entry->dev = bio->bi_bdev->bd_dev; |
342 | __entry->sector = bio->bi_iter.bi_sector; | 338 | __entry->sector = bio->bi_iter.bi_sector; |
343 | __entry->nr_sector = bio_sectors(bio); | 339 | __entry->nr_sector = bio_sectors(bio); |
344 | blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf, | 340 | blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); |
345 | bio->bi_iter.bi_size); | ||
346 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | 341 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); |
347 | ), | 342 | ), |
348 | 343 | ||
@@ -409,8 +404,7 @@ TRACE_EVENT(block_bio_queue, | |||
409 | __entry->dev = bio->bi_bdev->bd_dev; | 404 | __entry->dev = bio->bi_bdev->bd_dev; |
410 | __entry->sector = bio->bi_iter.bi_sector; | 405 | __entry->sector = bio->bi_iter.bi_sector; |
411 | __entry->nr_sector = bio_sectors(bio); | 406 | __entry->nr_sector = bio_sectors(bio); |
412 | blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf, | 407 | blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); |
413 | bio->bi_iter.bi_size); | ||
414 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | 408 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); |
415 | ), | 409 | ), |
416 | 410 | ||
@@ -438,7 +432,7 @@ DECLARE_EVENT_CLASS(block_get_rq, | |||
438 | __entry->dev = bio ? bio->bi_bdev->bd_dev : 0; | 432 | __entry->dev = bio ? bio->bi_bdev->bd_dev : 0; |
439 | __entry->sector = bio ? bio->bi_iter.bi_sector : 0; | 433 | __entry->sector = bio ? bio->bi_iter.bi_sector : 0; |
440 | __entry->nr_sector = bio ? bio_sectors(bio) : 0; | 434 | __entry->nr_sector = bio ? bio_sectors(bio) : 0; |
441 | blk_fill_rwbs(__entry->rwbs, bio ? bio_op(bio) : 0, | 435 | blk_fill_rwbs(__entry->rwbs, |
442 | bio ? bio->bi_opf : 0, __entry->nr_sector); | 436 | bio ? bio->bi_opf : 0, __entry->nr_sector); |
443 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | 437 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); |
444 | ), | 438 | ), |
@@ -573,8 +567,7 @@ TRACE_EVENT(block_split, | |||
573 | __entry->dev = bio->bi_bdev->bd_dev; | 567 | __entry->dev = bio->bi_bdev->bd_dev; |
574 | __entry->sector = bio->bi_iter.bi_sector; | 568 | __entry->sector = bio->bi_iter.bi_sector; |
575 | __entry->new_sector = new_sector; | 569 | __entry->new_sector = new_sector; |
576 | blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf, | 570 | blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); |
577 | bio->bi_iter.bi_size); | ||
578 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | 571 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); |
579 | ), | 572 | ), |
580 | 573 | ||
@@ -617,8 +610,7 @@ TRACE_EVENT(block_bio_remap, | |||
617 | __entry->nr_sector = bio_sectors(bio); | 610 | __entry->nr_sector = bio_sectors(bio); |
618 | __entry->old_dev = dev; | 611 | __entry->old_dev = dev; |
619 | __entry->old_sector = from; | 612 | __entry->old_sector = from; |
620 | blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf, | 613 | blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); |
621 | bio->bi_iter.bi_size); | ||
622 | ), | 614 | ), |
623 | 615 | ||
624 | TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu", | 616 | TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu", |
@@ -664,8 +656,7 @@ TRACE_EVENT(block_rq_remap, | |||
664 | __entry->old_dev = dev; | 656 | __entry->old_dev = dev; |
665 | __entry->old_sector = from; | 657 | __entry->old_sector = from; |
666 | __entry->nr_bios = blk_rq_count_bios(rq); | 658 | __entry->nr_bios = blk_rq_count_bios(rq); |
667 | blk_fill_rwbs(__entry->rwbs, req_op(rq), rq->cmd_flags, | 659 | blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); |
668 | blk_rq_bytes(rq)); | ||
669 | ), | 660 | ), |
670 | 661 | ||
671 | TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu %u", | 662 | TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu %u", |
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index dbafc5df03f3..95cecbf67f5c 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c | |||
@@ -1777,14 +1777,14 @@ void blk_dump_cmd(char *buf, struct request *rq) | |||
1777 | } | 1777 | } |
1778 | } | 1778 | } |
1779 | 1779 | ||
1780 | void blk_fill_rwbs(char *rwbs, int op, u32 rw, int bytes) | 1780 | void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes) |
1781 | { | 1781 | { |
1782 | int i = 0; | 1782 | int i = 0; |
1783 | 1783 | ||
1784 | if (rw & REQ_PREFLUSH) | 1784 | if (op & REQ_PREFLUSH) |
1785 | rwbs[i++] = 'F'; | 1785 | rwbs[i++] = 'F'; |
1786 | 1786 | ||
1787 | switch (op) { | 1787 | switch (op & REQ_OP_MASK) { |
1788 | case REQ_OP_WRITE: | 1788 | case REQ_OP_WRITE: |
1789 | case REQ_OP_WRITE_SAME: | 1789 | case REQ_OP_WRITE_SAME: |
1790 | rwbs[i++] = 'W'; | 1790 | rwbs[i++] = 'W'; |
@@ -1806,13 +1806,13 @@ void blk_fill_rwbs(char *rwbs, int op, u32 rw, int bytes) | |||
1806 | rwbs[i++] = 'N'; | 1806 | rwbs[i++] = 'N'; |
1807 | } | 1807 | } |
1808 | 1808 | ||
1809 | if (rw & REQ_FUA) | 1809 | if (op & REQ_FUA) |
1810 | rwbs[i++] = 'F'; | 1810 | rwbs[i++] = 'F'; |
1811 | if (rw & REQ_RAHEAD) | 1811 | if (op & REQ_RAHEAD) |
1812 | rwbs[i++] = 'A'; | 1812 | rwbs[i++] = 'A'; |
1813 | if (rw & REQ_SYNC) | 1813 | if (op & REQ_SYNC) |
1814 | rwbs[i++] = 'S'; | 1814 | rwbs[i++] = 'S'; |
1815 | if (rw & REQ_META) | 1815 | if (op & REQ_META) |
1816 | rwbs[i++] = 'M'; | 1816 | rwbs[i++] = 'M'; |
1817 | 1817 | ||
1818 | rwbs[i] = '\0'; | 1818 | rwbs[i] = '\0'; |