diff options
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-core.c | 5 | ||||
-rw-r--r-- | block/blk-flush.c | 4 | ||||
-rw-r--r-- | block/blk-merge.c | 28 | ||||
-rw-r--r-- | block/blk-mq.c | 41 | ||||
-rw-r--r-- | block/scsi_ioctl.c | 4 |
5 files changed, 45 insertions, 37 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index d87be5b4e554..40d654861c33 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -2957,8 +2957,6 @@ int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, | |||
2957 | } | 2957 | } |
2958 | EXPORT_SYMBOL(kblockd_schedule_delayed_work_on); | 2958 | EXPORT_SYMBOL(kblockd_schedule_delayed_work_on); |
2959 | 2959 | ||
2960 | #define PLUG_MAGIC 0x91827364 | ||
2961 | |||
2962 | /** | 2960 | /** |
2963 | * blk_start_plug - initialize blk_plug and track it inside the task_struct | 2961 | * blk_start_plug - initialize blk_plug and track it inside the task_struct |
2964 | * @plug: The &struct blk_plug that needs to be initialized | 2962 | * @plug: The &struct blk_plug that needs to be initialized |
@@ -2977,7 +2975,6 @@ void blk_start_plug(struct blk_plug *plug) | |||
2977 | { | 2975 | { |
2978 | struct task_struct *tsk = current; | 2976 | struct task_struct *tsk = current; |
2979 | 2977 | ||
2980 | plug->magic = PLUG_MAGIC; | ||
2981 | INIT_LIST_HEAD(&plug->list); | 2978 | INIT_LIST_HEAD(&plug->list); |
2982 | INIT_LIST_HEAD(&plug->mq_list); | 2979 | INIT_LIST_HEAD(&plug->mq_list); |
2983 | INIT_LIST_HEAD(&plug->cb_list); | 2980 | INIT_LIST_HEAD(&plug->cb_list); |
@@ -3074,8 +3071,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) | |||
3074 | LIST_HEAD(list); | 3071 | LIST_HEAD(list); |
3075 | unsigned int depth; | 3072 | unsigned int depth; |
3076 | 3073 | ||
3077 | BUG_ON(plug->magic != PLUG_MAGIC); | ||
3078 | |||
3079 | flush_plug_callbacks(plug, from_schedule); | 3074 | flush_plug_callbacks(plug, from_schedule); |
3080 | 3075 | ||
3081 | if (!list_empty(&plug->mq_list)) | 3076 | if (!list_empty(&plug->mq_list)) |
diff --git a/block/blk-flush.c b/block/blk-flush.c index ef608b35d9be..ff87c664b7df 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c | |||
@@ -223,8 +223,10 @@ static void flush_end_io(struct request *flush_rq, int error) | |||
223 | struct request *rq, *n; | 223 | struct request *rq, *n; |
224 | unsigned long flags = 0; | 224 | unsigned long flags = 0; |
225 | 225 | ||
226 | if (q->mq_ops) | 226 | if (q->mq_ops) { |
227 | spin_lock_irqsave(&q->mq_flush_lock, flags); | 227 | spin_lock_irqsave(&q->mq_flush_lock, flags); |
228 | q->flush_rq->cmd_flags = 0; | ||
229 | } | ||
228 | 230 | ||
229 | running = &q->flush_queue[q->flush_running_idx]; | 231 | running = &q->flush_queue[q->flush_running_idx]; |
230 | BUG_ON(q->flush_pending_idx == q->flush_running_idx); | 232 | BUG_ON(q->flush_pending_idx == q->flush_running_idx); |
diff --git a/block/blk-merge.c b/block/blk-merge.c index 6c583f9c5b65..b3bf0df0f4c2 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
@@ -13,7 +13,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, | |||
13 | struct bio *bio) | 13 | struct bio *bio) |
14 | { | 14 | { |
15 | struct bio_vec bv, bvprv = { NULL }; | 15 | struct bio_vec bv, bvprv = { NULL }; |
16 | int cluster, high, highprv = 1; | 16 | int cluster, high, highprv = 1, no_sg_merge; |
17 | unsigned int seg_size, nr_phys_segs; | 17 | unsigned int seg_size, nr_phys_segs; |
18 | struct bio *fbio, *bbio; | 18 | struct bio *fbio, *bbio; |
19 | struct bvec_iter iter; | 19 | struct bvec_iter iter; |
@@ -35,12 +35,21 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, | |||
35 | cluster = blk_queue_cluster(q); | 35 | cluster = blk_queue_cluster(q); |
36 | seg_size = 0; | 36 | seg_size = 0; |
37 | nr_phys_segs = 0; | 37 | nr_phys_segs = 0; |
38 | no_sg_merge = test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags); | ||
39 | high = 0; | ||
38 | for_each_bio(bio) { | 40 | for_each_bio(bio) { |
39 | bio_for_each_segment(bv, bio, iter) { | 41 | bio_for_each_segment(bv, bio, iter) { |
40 | /* | 42 | /* |
43 | * If SG merging is disabled, each bio vector is | ||
44 | * a segment | ||
45 | */ | ||
46 | if (no_sg_merge) | ||
47 | goto new_segment; | ||
48 | |||
49 | /* | ||
41 | * the trick here is making sure that a high page is | 50 | * the trick here is making sure that a high page is |
42 | * never considered part of another segment, since that | 51 | * never considered part of another segment, since |
43 | * might change with the bounce page. | 52 | * that might change with the bounce page. |
44 | */ | 53 | */ |
45 | high = page_to_pfn(bv.bv_page) > queue_bounce_pfn(q); | 54 | high = page_to_pfn(bv.bv_page) > queue_bounce_pfn(q); |
46 | if (!high && !highprv && cluster) { | 55 | if (!high && !highprv && cluster) { |
@@ -84,11 +93,16 @@ void blk_recalc_rq_segments(struct request *rq) | |||
84 | 93 | ||
85 | void blk_recount_segments(struct request_queue *q, struct bio *bio) | 94 | void blk_recount_segments(struct request_queue *q, struct bio *bio) |
86 | { | 95 | { |
87 | struct bio *nxt = bio->bi_next; | 96 | if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags)) |
97 | bio->bi_phys_segments = bio->bi_vcnt; | ||
98 | else { | ||
99 | struct bio *nxt = bio->bi_next; | ||
100 | |||
101 | bio->bi_next = NULL; | ||
102 | bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio); | ||
103 | bio->bi_next = nxt; | ||
104 | } | ||
88 | 105 | ||
89 | bio->bi_next = NULL; | ||
90 | bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio); | ||
91 | bio->bi_next = nxt; | ||
92 | bio->bi_flags |= (1 << BIO_SEG_VALID); | 106 | bio->bi_flags |= (1 << BIO_SEG_VALID); |
93 | } | 107 | } |
94 | EXPORT_SYMBOL(blk_recount_segments); | 108 | EXPORT_SYMBOL(blk_recount_segments); |
diff --git a/block/blk-mq.c b/block/blk-mq.c index f27fe44230c2..21f952ab3581 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -199,19 +199,12 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, | |||
199 | rq->q = q; | 199 | rq->q = q; |
200 | rq->mq_ctx = ctx; | 200 | rq->mq_ctx = ctx; |
201 | rq->cmd_flags |= rw_flags; | 201 | rq->cmd_flags |= rw_flags; |
202 | rq->cmd_type = 0; | ||
203 | /* do not touch atomic flags, it needs atomic ops against the timer */ | 202 | /* do not touch atomic flags, it needs atomic ops against the timer */ |
204 | rq->cpu = -1; | 203 | rq->cpu = -1; |
205 | rq->__data_len = 0; | ||
206 | rq->__sector = (sector_t) -1; | ||
207 | rq->bio = NULL; | ||
208 | rq->biotail = NULL; | ||
209 | INIT_HLIST_NODE(&rq->hash); | 204 | INIT_HLIST_NODE(&rq->hash); |
210 | RB_CLEAR_NODE(&rq->rb_node); | 205 | RB_CLEAR_NODE(&rq->rb_node); |
211 | memset(&rq->flush, 0, max(sizeof(rq->flush), sizeof(rq->elv))); | ||
212 | rq->rq_disk = NULL; | 206 | rq->rq_disk = NULL; |
213 | rq->part = NULL; | 207 | rq->part = NULL; |
214 | rq->start_time = jiffies; | ||
215 | #ifdef CONFIG_BLK_CGROUP | 208 | #ifdef CONFIG_BLK_CGROUP |
216 | rq->rl = NULL; | 209 | rq->rl = NULL; |
217 | set_start_time_ns(rq); | 210 | set_start_time_ns(rq); |
@@ -221,23 +214,16 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, | |||
221 | #if defined(CONFIG_BLK_DEV_INTEGRITY) | 214 | #if defined(CONFIG_BLK_DEV_INTEGRITY) |
222 | rq->nr_integrity_segments = 0; | 215 | rq->nr_integrity_segments = 0; |
223 | #endif | 216 | #endif |
224 | rq->ioprio = 0; | ||
225 | rq->special = NULL; | 217 | rq->special = NULL; |
226 | /* tag was already set */ | 218 | /* tag was already set */ |
227 | rq->errors = 0; | 219 | rq->errors = 0; |
228 | memset(rq->__cmd, 0, sizeof(rq->__cmd)); | ||
229 | rq->cmd = rq->__cmd; | ||
230 | rq->cmd_len = BLK_MAX_CDB; | ||
231 | 220 | ||
232 | rq->extra_len = 0; | 221 | rq->extra_len = 0; |
233 | rq->sense_len = 0; | 222 | rq->sense_len = 0; |
234 | rq->resid_len = 0; | 223 | rq->resid_len = 0; |
235 | rq->sense = NULL; | 224 | rq->sense = NULL; |
236 | 225 | ||
237 | rq->deadline = 0; | ||
238 | INIT_LIST_HEAD(&rq->timeout_list); | 226 | INIT_LIST_HEAD(&rq->timeout_list); |
239 | rq->timeout = 0; | ||
240 | rq->retries = 0; | ||
241 | rq->end_io = NULL; | 227 | rq->end_io = NULL; |
242 | rq->end_io_data = NULL; | 228 | rq->end_io_data = NULL; |
243 | rq->next_rq = NULL; | 229 | rq->next_rq = NULL; |
@@ -449,8 +435,10 @@ static void blk_mq_start_request(struct request *rq, bool last) | |||
449 | * complete. So be sure to clear complete again when we start | 435 | * complete. So be sure to clear complete again when we start |
450 | * the request, otherwise we'll ignore the completion event. | 436 | * the request, otherwise we'll ignore the completion event. |
451 | */ | 437 | */ |
452 | set_bit(REQ_ATOM_STARTED, &rq->atomic_flags); | 438 | if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) |
453 | clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); | 439 | set_bit(REQ_ATOM_STARTED, &rq->atomic_flags); |
440 | if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags)) | ||
441 | clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); | ||
454 | 442 | ||
455 | if (q->dma_drain_size && blk_rq_bytes(rq)) { | 443 | if (q->dma_drain_size && blk_rq_bytes(rq)) { |
456 | /* | 444 | /* |
@@ -553,9 +541,15 @@ void blk_mq_kick_requeue_list(struct request_queue *q) | |||
553 | } | 541 | } |
554 | EXPORT_SYMBOL(blk_mq_kick_requeue_list); | 542 | EXPORT_SYMBOL(blk_mq_kick_requeue_list); |
555 | 543 | ||
556 | struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag) | 544 | struct request *blk_mq_tag_to_rq(struct blk_mq_hw_ctx *hctx, unsigned int tag) |
557 | { | 545 | { |
558 | return tags->rqs[tag]; | 546 | struct request_queue *q = hctx->queue; |
547 | |||
548 | if ((q->flush_rq->cmd_flags & REQ_FLUSH_SEQ) && | ||
549 | q->flush_rq->tag == tag) | ||
550 | return q->flush_rq; | ||
551 | |||
552 | return hctx->tags->rqs[tag]; | ||
559 | } | 553 | } |
560 | EXPORT_SYMBOL(blk_mq_tag_to_rq); | 554 | EXPORT_SYMBOL(blk_mq_tag_to_rq); |
561 | 555 | ||
@@ -584,7 +578,7 @@ static void blk_mq_timeout_check(void *__data, unsigned long *free_tags) | |||
584 | if (tag >= hctx->tags->nr_tags) | 578 | if (tag >= hctx->tags->nr_tags) |
585 | break; | 579 | break; |
586 | 580 | ||
587 | rq = blk_mq_tag_to_rq(hctx->tags, tag++); | 581 | rq = blk_mq_tag_to_rq(hctx, tag++); |
588 | if (rq->q != hctx->queue) | 582 | if (rq->q != hctx->queue) |
589 | continue; | 583 | continue; |
590 | if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) | 584 | if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) |
@@ -1112,7 +1106,11 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) | |||
1112 | static void blk_mq_bio_to_request(struct request *rq, struct bio *bio) | 1106 | static void blk_mq_bio_to_request(struct request *rq, struct bio *bio) |
1113 | { | 1107 | { |
1114 | init_request_from_bio(rq, bio); | 1108 | init_request_from_bio(rq, bio); |
1115 | blk_account_io_start(rq, 1); | 1109 | |
1110 | if (blk_do_io_stat(rq)) { | ||
1111 | rq->start_time = jiffies; | ||
1112 | blk_account_io_start(rq, 1); | ||
1113 | } | ||
1116 | } | 1114 | } |
1117 | 1115 | ||
1118 | static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx, | 1116 | static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx, |
@@ -1829,6 +1827,9 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) | |||
1829 | q->mq_ops = set->ops; | 1827 | q->mq_ops = set->ops; |
1830 | q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; | 1828 | q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; |
1831 | 1829 | ||
1830 | if (!(set->flags & BLK_MQ_F_SG_MERGE)) | ||
1831 | q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE; | ||
1832 | |||
1832 | q->sg_reserved_size = INT_MAX; | 1833 | q->sg_reserved_size = INT_MAX; |
1833 | 1834 | ||
1834 | INIT_WORK(&q->requeue_work, blk_mq_requeue_work); | 1835 | INIT_WORK(&q->requeue_work, blk_mq_requeue_work); |
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index 26487972ac54..9c28a5b38042 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c | |||
@@ -205,10 +205,6 @@ int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm) | |||
205 | if (capable(CAP_SYS_RAWIO)) | 205 | if (capable(CAP_SYS_RAWIO)) |
206 | return 0; | 206 | return 0; |
207 | 207 | ||
208 | /* if there's no filter set, assume we're filtering everything out */ | ||
209 | if (!filter) | ||
210 | return -EPERM; | ||
211 | |||
212 | /* Anybody who can open the device can do a read-safe command */ | 208 | /* Anybody who can open the device can do a read-safe command */ |
213 | if (test_bit(cmd[0], filter->read_ok)) | 209 | if (test_bit(cmd[0], filter->read_ok)) |
214 | return 0; | 210 | return 0; |