aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2014-05-30 10:11:50 -0400
committerJens Axboe <axboe@fb.com>2014-05-30 10:11:50 -0400
commitf89ca166460e84620db73d4542f28d34c40a8917 (patch)
tree6f7ef4b566879674d3dfc6e79b71561a49ac877e
parent879466e6a53ce063fa779476cf648abafcd703a9 (diff)
parent2230237500821aedfcf2bba2a79d9cbca389233c (diff)
Merge branch 'for-3.16/core' into for-3.16/drivers
Pulled in for the blk_mq_tag_to_rq() change, which impacts mtip32xx. Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--block/blk-core.c5
-rw-r--r--block/blk-flush.c4
-rw-r--r--block/blk-merge.c28
-rw-r--r--block/blk-mq.c41
-rw-r--r--block/scsi_ioctl.c4
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c4
-rw-r--r--include/linux/blk-mq.h3
-rw-r--r--include/linux/blkdev.h2
8 files changed, 49 insertions, 42 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index d87be5b4e554..40d654861c33 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -2957,8 +2957,6 @@ int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
2957} 2957}
2958EXPORT_SYMBOL(kblockd_schedule_delayed_work_on); 2958EXPORT_SYMBOL(kblockd_schedule_delayed_work_on);
2959 2959
2960#define PLUG_MAGIC 0x91827364
2961
2962/** 2960/**
2963 * blk_start_plug - initialize blk_plug and track it inside the task_struct 2961 * blk_start_plug - initialize blk_plug and track it inside the task_struct
2964 * @plug: The &struct blk_plug that needs to be initialized 2962 * @plug: The &struct blk_plug that needs to be initialized
@@ -2977,7 +2975,6 @@ void blk_start_plug(struct blk_plug *plug)
2977{ 2975{
2978 struct task_struct *tsk = current; 2976 struct task_struct *tsk = current;
2979 2977
2980 plug->magic = PLUG_MAGIC;
2981 INIT_LIST_HEAD(&plug->list); 2978 INIT_LIST_HEAD(&plug->list);
2982 INIT_LIST_HEAD(&plug->mq_list); 2979 INIT_LIST_HEAD(&plug->mq_list);
2983 INIT_LIST_HEAD(&plug->cb_list); 2980 INIT_LIST_HEAD(&plug->cb_list);
@@ -3074,8 +3071,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
3074 LIST_HEAD(list); 3071 LIST_HEAD(list);
3075 unsigned int depth; 3072 unsigned int depth;
3076 3073
3077 BUG_ON(plug->magic != PLUG_MAGIC);
3078
3079 flush_plug_callbacks(plug, from_schedule); 3074 flush_plug_callbacks(plug, from_schedule);
3080 3075
3081 if (!list_empty(&plug->mq_list)) 3076 if (!list_empty(&plug->mq_list))
diff --git a/block/blk-flush.c b/block/blk-flush.c
index ef608b35d9be..ff87c664b7df 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -223,8 +223,10 @@ static void flush_end_io(struct request *flush_rq, int error)
223 struct request *rq, *n; 223 struct request *rq, *n;
224 unsigned long flags = 0; 224 unsigned long flags = 0;
225 225
226 if (q->mq_ops) 226 if (q->mq_ops) {
227 spin_lock_irqsave(&q->mq_flush_lock, flags); 227 spin_lock_irqsave(&q->mq_flush_lock, flags);
228 q->flush_rq->cmd_flags = 0;
229 }
228 230
229 running = &q->flush_queue[q->flush_running_idx]; 231 running = &q->flush_queue[q->flush_running_idx];
230 BUG_ON(q->flush_pending_idx == q->flush_running_idx); 232 BUG_ON(q->flush_pending_idx == q->flush_running_idx);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 6c583f9c5b65..b3bf0df0f4c2 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -13,7 +13,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
13 struct bio *bio) 13 struct bio *bio)
14{ 14{
15 struct bio_vec bv, bvprv = { NULL }; 15 struct bio_vec bv, bvprv = { NULL };
16 int cluster, high, highprv = 1; 16 int cluster, high, highprv = 1, no_sg_merge;
17 unsigned int seg_size, nr_phys_segs; 17 unsigned int seg_size, nr_phys_segs;
18 struct bio *fbio, *bbio; 18 struct bio *fbio, *bbio;
19 struct bvec_iter iter; 19 struct bvec_iter iter;
@@ -35,12 +35,21 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
35 cluster = blk_queue_cluster(q); 35 cluster = blk_queue_cluster(q);
36 seg_size = 0; 36 seg_size = 0;
37 nr_phys_segs = 0; 37 nr_phys_segs = 0;
38 no_sg_merge = test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags);
39 high = 0;
38 for_each_bio(bio) { 40 for_each_bio(bio) {
39 bio_for_each_segment(bv, bio, iter) { 41 bio_for_each_segment(bv, bio, iter) {
40 /* 42 /*
43 * If SG merging is disabled, each bio vector is
44 * a segment
45 */
46 if (no_sg_merge)
47 goto new_segment;
48
49 /*
41 * the trick here is making sure that a high page is 50 * the trick here is making sure that a high page is
42 * never considered part of another segment, since that 51 * never considered part of another segment, since
43 * might change with the bounce page. 52 * that might change with the bounce page.
44 */ 53 */
45 high = page_to_pfn(bv.bv_page) > queue_bounce_pfn(q); 54 high = page_to_pfn(bv.bv_page) > queue_bounce_pfn(q);
46 if (!high && !highprv && cluster) { 55 if (!high && !highprv && cluster) {
@@ -84,11 +93,16 @@ void blk_recalc_rq_segments(struct request *rq)
84 93
85void blk_recount_segments(struct request_queue *q, struct bio *bio) 94void blk_recount_segments(struct request_queue *q, struct bio *bio)
86{ 95{
87 struct bio *nxt = bio->bi_next; 96 if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags))
97 bio->bi_phys_segments = bio->bi_vcnt;
98 else {
99 struct bio *nxt = bio->bi_next;
100
101 bio->bi_next = NULL;
102 bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio);
103 bio->bi_next = nxt;
104 }
88 105
89 bio->bi_next = NULL;
90 bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio);
91 bio->bi_next = nxt;
92 bio->bi_flags |= (1 << BIO_SEG_VALID); 106 bio->bi_flags |= (1 << BIO_SEG_VALID);
93} 107}
94EXPORT_SYMBOL(blk_recount_segments); 108EXPORT_SYMBOL(blk_recount_segments);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index f27fe44230c2..21f952ab3581 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -199,19 +199,12 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
199 rq->q = q; 199 rq->q = q;
200 rq->mq_ctx = ctx; 200 rq->mq_ctx = ctx;
201 rq->cmd_flags |= rw_flags; 201 rq->cmd_flags |= rw_flags;
202 rq->cmd_type = 0;
203 /* do not touch atomic flags, it needs atomic ops against the timer */ 202 /* do not touch atomic flags, it needs atomic ops against the timer */
204 rq->cpu = -1; 203 rq->cpu = -1;
205 rq->__data_len = 0;
206 rq->__sector = (sector_t) -1;
207 rq->bio = NULL;
208 rq->biotail = NULL;
209 INIT_HLIST_NODE(&rq->hash); 204 INIT_HLIST_NODE(&rq->hash);
210 RB_CLEAR_NODE(&rq->rb_node); 205 RB_CLEAR_NODE(&rq->rb_node);
211 memset(&rq->flush, 0, max(sizeof(rq->flush), sizeof(rq->elv)));
212 rq->rq_disk = NULL; 206 rq->rq_disk = NULL;
213 rq->part = NULL; 207 rq->part = NULL;
214 rq->start_time = jiffies;
215#ifdef CONFIG_BLK_CGROUP 208#ifdef CONFIG_BLK_CGROUP
216 rq->rl = NULL; 209 rq->rl = NULL;
217 set_start_time_ns(rq); 210 set_start_time_ns(rq);
@@ -221,23 +214,16 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
221#if defined(CONFIG_BLK_DEV_INTEGRITY) 214#if defined(CONFIG_BLK_DEV_INTEGRITY)
222 rq->nr_integrity_segments = 0; 215 rq->nr_integrity_segments = 0;
223#endif 216#endif
224 rq->ioprio = 0;
225 rq->special = NULL; 217 rq->special = NULL;
226 /* tag was already set */ 218 /* tag was already set */
227 rq->errors = 0; 219 rq->errors = 0;
228 memset(rq->__cmd, 0, sizeof(rq->__cmd));
229 rq->cmd = rq->__cmd;
230 rq->cmd_len = BLK_MAX_CDB;
231 220
232 rq->extra_len = 0; 221 rq->extra_len = 0;
233 rq->sense_len = 0; 222 rq->sense_len = 0;
234 rq->resid_len = 0; 223 rq->resid_len = 0;
235 rq->sense = NULL; 224 rq->sense = NULL;
236 225
237 rq->deadline = 0;
238 INIT_LIST_HEAD(&rq->timeout_list); 226 INIT_LIST_HEAD(&rq->timeout_list);
239 rq->timeout = 0;
240 rq->retries = 0;
241 rq->end_io = NULL; 227 rq->end_io = NULL;
242 rq->end_io_data = NULL; 228 rq->end_io_data = NULL;
243 rq->next_rq = NULL; 229 rq->next_rq = NULL;
@@ -449,8 +435,10 @@ static void blk_mq_start_request(struct request *rq, bool last)
449 * complete. So be sure to clear complete again when we start 435 * complete. So be sure to clear complete again when we start
450 * the request, otherwise we'll ignore the completion event. 436 * the request, otherwise we'll ignore the completion event.
451 */ 437 */
452 set_bit(REQ_ATOM_STARTED, &rq->atomic_flags); 438 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
453 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); 439 set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
440 if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
441 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
454 442
455 if (q->dma_drain_size && blk_rq_bytes(rq)) { 443 if (q->dma_drain_size && blk_rq_bytes(rq)) {
456 /* 444 /*
@@ -553,9 +541,15 @@ void blk_mq_kick_requeue_list(struct request_queue *q)
553} 541}
554EXPORT_SYMBOL(blk_mq_kick_requeue_list); 542EXPORT_SYMBOL(blk_mq_kick_requeue_list);
555 543
556struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag) 544struct request *blk_mq_tag_to_rq(struct blk_mq_hw_ctx *hctx, unsigned int tag)
557{ 545{
558 return tags->rqs[tag]; 546 struct request_queue *q = hctx->queue;
547
548 if ((q->flush_rq->cmd_flags & REQ_FLUSH_SEQ) &&
549 q->flush_rq->tag == tag)
550 return q->flush_rq;
551
552 return hctx->tags->rqs[tag];
559} 553}
560EXPORT_SYMBOL(blk_mq_tag_to_rq); 554EXPORT_SYMBOL(blk_mq_tag_to_rq);
561 555
@@ -584,7 +578,7 @@ static void blk_mq_timeout_check(void *__data, unsigned long *free_tags)
584 if (tag >= hctx->tags->nr_tags) 578 if (tag >= hctx->tags->nr_tags)
585 break; 579 break;
586 580
587 rq = blk_mq_tag_to_rq(hctx->tags, tag++); 581 rq = blk_mq_tag_to_rq(hctx, tag++);
588 if (rq->q != hctx->queue) 582 if (rq->q != hctx->queue)
589 continue; 583 continue;
590 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) 584 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
@@ -1112,7 +1106,11 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1112static void blk_mq_bio_to_request(struct request *rq, struct bio *bio) 1106static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1113{ 1107{
1114 init_request_from_bio(rq, bio); 1108 init_request_from_bio(rq, bio);
1115 blk_account_io_start(rq, 1); 1109
1110 if (blk_do_io_stat(rq)) {
1111 rq->start_time = jiffies;
1112 blk_account_io_start(rq, 1);
1113 }
1116} 1114}
1117 1115
1118static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx, 1116static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
@@ -1829,6 +1827,9 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1829 q->mq_ops = set->ops; 1827 q->mq_ops = set->ops;
1830 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; 1828 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
1831 1829
1830 if (!(set->flags & BLK_MQ_F_SG_MERGE))
1831 q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE;
1832
1832 q->sg_reserved_size = INT_MAX; 1833 q->sg_reserved_size = INT_MAX;
1833 1834
1834 INIT_WORK(&q->requeue_work, blk_mq_requeue_work); 1835 INIT_WORK(&q->requeue_work, blk_mq_requeue_work);
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index 26487972ac54..9c28a5b38042 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -205,10 +205,6 @@ int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm)
205 if (capable(CAP_SYS_RAWIO)) 205 if (capable(CAP_SYS_RAWIO))
206 return 0; 206 return 0;
207 207
208 /* if there's no filter set, assume we're filtering everything out */
209 if (!filter)
210 return -EPERM;
211
212 /* Anybody who can open the device can do a read-safe command */ 208 /* Anybody who can open the device can do a read-safe command */
213 if (test_bit(cmd[0], filter->read_ok)) 209 if (test_bit(cmd[0], filter->read_ok))
214 return 0; 210 return 0;
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 74abd49fabdc..abc858b3528b 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -193,9 +193,7 @@ static void mtip_put_int_command(struct driver_data *dd, struct mtip_cmd *cmd)
193static struct request *mtip_rq_from_tag(struct driver_data *dd, 193static struct request *mtip_rq_from_tag(struct driver_data *dd,
194 unsigned int tag) 194 unsigned int tag)
195{ 195{
196 struct blk_mq_hw_ctx *hctx = dd->queue->queue_hw_ctx[0]; 196 return blk_mq_tag_to_rq(dd->queue->queue_hw_ctx[0], tag);
197
198 return blk_mq_tag_to_rq(hctx->tags, tag);
199} 197}
200 198
201static struct mtip_cmd *mtip_cmd_from_tag(struct driver_data *dd, 199static struct mtip_cmd *mtip_cmd_from_tag(struct driver_data *dd,
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 91dfb75ce39f..ad3adb73cc70 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -129,6 +129,7 @@ enum {
129 BLK_MQ_F_SHOULD_MERGE = 1 << 0, 129 BLK_MQ_F_SHOULD_MERGE = 1 << 0,
130 BLK_MQ_F_SHOULD_SORT = 1 << 1, 130 BLK_MQ_F_SHOULD_SORT = 1 << 1,
131 BLK_MQ_F_TAG_SHARED = 1 << 2, 131 BLK_MQ_F_TAG_SHARED = 1 << 2,
132 BLK_MQ_F_SG_MERGE = 1 << 3,
132 133
133 BLK_MQ_S_STOPPED = 0, 134 BLK_MQ_S_STOPPED = 0,
134 BLK_MQ_S_TAG_ACTIVE = 1, 135 BLK_MQ_S_TAG_ACTIVE = 1,
@@ -153,7 +154,7 @@ void blk_mq_free_request(struct request *rq);
153bool blk_mq_can_queue(struct blk_mq_hw_ctx *); 154bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
154struct request *blk_mq_alloc_request(struct request_queue *q, int rw, 155struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
155 gfp_t gfp, bool reserved); 156 gfp_t gfp, bool reserved);
156struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); 157struct request *blk_mq_tag_to_rq(struct blk_mq_hw_ctx *hctx, unsigned int tag);
157 158
158struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); 159struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
159struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); 160struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index e90e1692e052..8aba35f46f87 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -510,6 +510,7 @@ struct request_queue {
510#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */ 510#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */
511#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ 511#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */
512#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */ 512#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */
513#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/
513 514
514#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 515#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
515 (1 << QUEUE_FLAG_STACKABLE) | \ 516 (1 << QUEUE_FLAG_STACKABLE) | \
@@ -1069,7 +1070,6 @@ static inline void blk_post_runtime_resume(struct request_queue *q, int err) {}
1069 * schedule() where blk_schedule_flush_plug() is called. 1070 * schedule() where blk_schedule_flush_plug() is called.
1070 */ 1071 */
1071struct blk_plug { 1072struct blk_plug {
1072 unsigned long magic; /* detect uninitialized use-cases */
1073 struct list_head list; /* requests */ 1073 struct list_head list; /* requests */
1074 struct list_head mq_list; /* blk-mq requests */ 1074 struct list_head mq_list; /* blk-mq requests */
1075 struct list_head cb_list; /* md requires an unplug callback */ 1075 struct list_head cb_list; /* md requires an unplug callback */