summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-08-28 04:19:01 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-08-28 04:19:01 -0400
commit9749c37275cb1f72d309e676642f45eb92724190 (patch)
tree7ecfdc655eebb56ddfee430f7f05e641ec4b49bb /block
parent4f9adc8f91ba996374cd9487ecd1180fa99b9438 (diff)
parentcc4a41fe5541a73019a864883297bd5043aa6d98 (diff)
Merge 4.13-rc7 into char-misc-next
We want the binder fix in here as well for testing and merge issues. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq-debugfs.c3
-rw-r--r--block/blk-mq-pci.c8
-rw-r--r--block/blk-mq.c5
-rw-r--r--block/blk-throttle.c18
-rw-r--r--block/bsg-lib.c74
5 files changed, 70 insertions, 38 deletions
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index 9ebc2945f991..4f927a58dff8 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -75,6 +75,8 @@ static const char *const blk_queue_flag_name[] = {
75 QUEUE_FLAG_NAME(STATS), 75 QUEUE_FLAG_NAME(STATS),
76 QUEUE_FLAG_NAME(POLL_STATS), 76 QUEUE_FLAG_NAME(POLL_STATS),
77 QUEUE_FLAG_NAME(REGISTERED), 77 QUEUE_FLAG_NAME(REGISTERED),
78 QUEUE_FLAG_NAME(SCSI_PASSTHROUGH),
79 QUEUE_FLAG_NAME(QUIESCED),
78}; 80};
79#undef QUEUE_FLAG_NAME 81#undef QUEUE_FLAG_NAME
80 82
@@ -265,6 +267,7 @@ static const char *const cmd_flag_name[] = {
265 CMD_FLAG_NAME(RAHEAD), 267 CMD_FLAG_NAME(RAHEAD),
266 CMD_FLAG_NAME(BACKGROUND), 268 CMD_FLAG_NAME(BACKGROUND),
267 CMD_FLAG_NAME(NOUNMAP), 269 CMD_FLAG_NAME(NOUNMAP),
270 CMD_FLAG_NAME(NOWAIT),
268}; 271};
269#undef CMD_FLAG_NAME 272#undef CMD_FLAG_NAME
270 273
diff --git a/block/blk-mq-pci.c b/block/blk-mq-pci.c
index 0c3354cf3552..76944e3271bf 100644
--- a/block/blk-mq-pci.c
+++ b/block/blk-mq-pci.c
@@ -36,12 +36,18 @@ int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev)
36 for (queue = 0; queue < set->nr_hw_queues; queue++) { 36 for (queue = 0; queue < set->nr_hw_queues; queue++) {
37 mask = pci_irq_get_affinity(pdev, queue); 37 mask = pci_irq_get_affinity(pdev, queue);
38 if (!mask) 38 if (!mask)
39 return -EINVAL; 39 goto fallback;
40 40
41 for_each_cpu(cpu, mask) 41 for_each_cpu(cpu, mask)
42 set->mq_map[cpu] = queue; 42 set->mq_map[cpu] = queue;
43 } 43 }
44 44
45 return 0; 45 return 0;
46
47fallback:
48 WARN_ON_ONCE(set->nr_hw_queues > 1);
49 for_each_possible_cpu(cpu)
50 set->mq_map[cpu] = 0;
51 return 0;
46} 52}
47EXPORT_SYMBOL_GPL(blk_mq_pci_map_queues); 53EXPORT_SYMBOL_GPL(blk_mq_pci_map_queues);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 535cbdf32aab..4603b115e234 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -360,12 +360,12 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
360 return ERR_PTR(ret); 360 return ERR_PTR(ret);
361 361
362 rq = blk_mq_get_request(q, NULL, op, &alloc_data); 362 rq = blk_mq_get_request(q, NULL, op, &alloc_data);
363 blk_queue_exit(q);
363 364
364 if (!rq) 365 if (!rq)
365 return ERR_PTR(-EWOULDBLOCK); 366 return ERR_PTR(-EWOULDBLOCK);
366 367
367 blk_mq_put_ctx(alloc_data.ctx); 368 blk_mq_put_ctx(alloc_data.ctx);
368 blk_queue_exit(q);
369 369
370 rq->__data_len = 0; 370 rq->__data_len = 0;
371 rq->__sector = (sector_t) -1; 371 rq->__sector = (sector_t) -1;
@@ -411,12 +411,11 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
411 alloc_data.ctx = __blk_mq_get_ctx(q, cpu); 411 alloc_data.ctx = __blk_mq_get_ctx(q, cpu);
412 412
413 rq = blk_mq_get_request(q, NULL, op, &alloc_data); 413 rq = blk_mq_get_request(q, NULL, op, &alloc_data);
414 blk_queue_exit(q);
414 415
415 if (!rq) 416 if (!rq)
416 return ERR_PTR(-EWOULDBLOCK); 417 return ERR_PTR(-EWOULDBLOCK);
417 418
418 blk_queue_exit(q);
419
420 return rq; 419 return rq;
421} 420}
422EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx); 421EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index a7285bf2831c..80f5481fe9f6 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -382,6 +382,14 @@ static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw)
382 } \ 382 } \
383} while (0) 383} while (0)
384 384
385static inline unsigned int throtl_bio_data_size(struct bio *bio)
386{
387 /* assume it's one sector */
388 if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
389 return 512;
390 return bio->bi_iter.bi_size;
391}
392
385static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg) 393static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
386{ 394{
387 INIT_LIST_HEAD(&qn->node); 395 INIT_LIST_HEAD(&qn->node);
@@ -934,6 +942,7 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
934 bool rw = bio_data_dir(bio); 942 bool rw = bio_data_dir(bio);
935 u64 bytes_allowed, extra_bytes, tmp; 943 u64 bytes_allowed, extra_bytes, tmp;
936 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd; 944 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
945 unsigned int bio_size = throtl_bio_data_size(bio);
937 946
938 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw]; 947 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
939 948
@@ -947,14 +956,14 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
947 do_div(tmp, HZ); 956 do_div(tmp, HZ);
948 bytes_allowed = tmp; 957 bytes_allowed = tmp;
949 958
950 if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) { 959 if (tg->bytes_disp[rw] + bio_size <= bytes_allowed) {
951 if (wait) 960 if (wait)
952 *wait = 0; 961 *wait = 0;
953 return true; 962 return true;
954 } 963 }
955 964
956 /* Calc approx time to dispatch */ 965 /* Calc approx time to dispatch */
957 extra_bytes = tg->bytes_disp[rw] + bio->bi_iter.bi_size - bytes_allowed; 966 extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed;
958 jiffy_wait = div64_u64(extra_bytes * HZ, tg_bps_limit(tg, rw)); 967 jiffy_wait = div64_u64(extra_bytes * HZ, tg_bps_limit(tg, rw));
959 968
960 if (!jiffy_wait) 969 if (!jiffy_wait)
@@ -1034,11 +1043,12 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
1034static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio) 1043static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
1035{ 1044{
1036 bool rw = bio_data_dir(bio); 1045 bool rw = bio_data_dir(bio);
1046 unsigned int bio_size = throtl_bio_data_size(bio);
1037 1047
1038 /* Charge the bio to the group */ 1048 /* Charge the bio to the group */
1039 tg->bytes_disp[rw] += bio->bi_iter.bi_size; 1049 tg->bytes_disp[rw] += bio_size;
1040 tg->io_disp[rw]++; 1050 tg->io_disp[rw]++;
1041 tg->last_bytes_disp[rw] += bio->bi_iter.bi_size; 1051 tg->last_bytes_disp[rw] += bio_size;
1042 tg->last_io_disp[rw]++; 1052 tg->last_io_disp[rw]++;
1043 1053
1044 /* 1054 /*
diff --git a/block/bsg-lib.c b/block/bsg-lib.c
index c4513b23f57a..dd56d7460cb9 100644
--- a/block/bsg-lib.c
+++ b/block/bsg-lib.c
@@ -29,26 +29,25 @@
29#include <scsi/scsi_cmnd.h> 29#include <scsi/scsi_cmnd.h>
30 30
31/** 31/**
32 * bsg_destroy_job - routine to teardown/delete a bsg job 32 * bsg_teardown_job - routine to teardown a bsg job
33 * @job: bsg_job that is to be torn down 33 * @job: bsg_job that is to be torn down
34 */ 34 */
35static void bsg_destroy_job(struct kref *kref) 35static void bsg_teardown_job(struct kref *kref)
36{ 36{
37 struct bsg_job *job = container_of(kref, struct bsg_job, kref); 37 struct bsg_job *job = container_of(kref, struct bsg_job, kref);
38 struct request *rq = job->req; 38 struct request *rq = job->req;
39 39
40 blk_end_request_all(rq, BLK_STS_OK);
41
42 put_device(job->dev); /* release reference for the request */ 40 put_device(job->dev); /* release reference for the request */
43 41
44 kfree(job->request_payload.sg_list); 42 kfree(job->request_payload.sg_list);
45 kfree(job->reply_payload.sg_list); 43 kfree(job->reply_payload.sg_list);
46 kfree(job); 44
45 blk_end_request_all(rq, BLK_STS_OK);
47} 46}
48 47
49void bsg_job_put(struct bsg_job *job) 48void bsg_job_put(struct bsg_job *job)
50{ 49{
51 kref_put(&job->kref, bsg_destroy_job); 50 kref_put(&job->kref, bsg_teardown_job);
52} 51}
53EXPORT_SYMBOL_GPL(bsg_job_put); 52EXPORT_SYMBOL_GPL(bsg_job_put);
54 53
@@ -100,7 +99,7 @@ EXPORT_SYMBOL_GPL(bsg_job_done);
100 */ 99 */
101static void bsg_softirq_done(struct request *rq) 100static void bsg_softirq_done(struct request *rq)
102{ 101{
103 struct bsg_job *job = rq->special; 102 struct bsg_job *job = blk_mq_rq_to_pdu(rq);
104 103
105 bsg_job_put(job); 104 bsg_job_put(job);
106} 105}
@@ -122,33 +121,20 @@ static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req)
122} 121}
123 122
124/** 123/**
125 * bsg_create_job - create the bsg_job structure for the bsg request 124 * bsg_prepare_job - create the bsg_job structure for the bsg request
126 * @dev: device that is being sent the bsg request 125 * @dev: device that is being sent the bsg request
127 * @req: BSG request that needs a job structure 126 * @req: BSG request that needs a job structure
128 */ 127 */
129static int bsg_create_job(struct device *dev, struct request *req) 128static int bsg_prepare_job(struct device *dev, struct request *req)
130{ 129{
131 struct request *rsp = req->next_rq; 130 struct request *rsp = req->next_rq;
132 struct request_queue *q = req->q;
133 struct scsi_request *rq = scsi_req(req); 131 struct scsi_request *rq = scsi_req(req);
134 struct bsg_job *job; 132 struct bsg_job *job = blk_mq_rq_to_pdu(req);
135 int ret; 133 int ret;
136 134
137 BUG_ON(req->special);
138
139 job = kzalloc(sizeof(struct bsg_job) + q->bsg_job_size, GFP_KERNEL);
140 if (!job)
141 return -ENOMEM;
142
143 req->special = job;
144 job->req = req;
145 if (q->bsg_job_size)
146 job->dd_data = (void *)&job[1];
147 job->request = rq->cmd; 135 job->request = rq->cmd;
148 job->request_len = rq->cmd_len; 136 job->request_len = rq->cmd_len;
149 job->reply = rq->sense; 137
150 job->reply_len = SCSI_SENSE_BUFFERSIZE; /* Size of sense buffer
151 * allocated */
152 if (req->bio) { 138 if (req->bio) {
153 ret = bsg_map_buffer(&job->request_payload, req); 139 ret = bsg_map_buffer(&job->request_payload, req);
154 if (ret) 140 if (ret)
@@ -187,7 +173,6 @@ static void bsg_request_fn(struct request_queue *q)
187{ 173{
188 struct device *dev = q->queuedata; 174 struct device *dev = q->queuedata;
189 struct request *req; 175 struct request *req;
190 struct bsg_job *job;
191 int ret; 176 int ret;
192 177
193 if (!get_device(dev)) 178 if (!get_device(dev))
@@ -199,7 +184,7 @@ static void bsg_request_fn(struct request_queue *q)
199 break; 184 break;
200 spin_unlock_irq(q->queue_lock); 185 spin_unlock_irq(q->queue_lock);
201 186
202 ret = bsg_create_job(dev, req); 187 ret = bsg_prepare_job(dev, req);
203 if (ret) { 188 if (ret) {
204 scsi_req(req)->result = ret; 189 scsi_req(req)->result = ret;
205 blk_end_request_all(req, BLK_STS_OK); 190 blk_end_request_all(req, BLK_STS_OK);
@@ -207,8 +192,7 @@ static void bsg_request_fn(struct request_queue *q)
207 continue; 192 continue;
208 } 193 }
209 194
210 job = req->special; 195 ret = q->bsg_job_fn(blk_mq_rq_to_pdu(req));
211 ret = q->bsg_job_fn(job);
212 spin_lock_irq(q->queue_lock); 196 spin_lock_irq(q->queue_lock);
213 if (ret) 197 if (ret)
214 break; 198 break;
@@ -219,6 +203,35 @@ static void bsg_request_fn(struct request_queue *q)
219 spin_lock_irq(q->queue_lock); 203 spin_lock_irq(q->queue_lock);
220} 204}
221 205
206static int bsg_init_rq(struct request_queue *q, struct request *req, gfp_t gfp)
207{
208 struct bsg_job *job = blk_mq_rq_to_pdu(req);
209 struct scsi_request *sreq = &job->sreq;
210
211 memset(job, 0, sizeof(*job));
212
213 scsi_req_init(sreq);
214 sreq->sense_len = SCSI_SENSE_BUFFERSIZE;
215 sreq->sense = kzalloc(sreq->sense_len, gfp);
216 if (!sreq->sense)
217 return -ENOMEM;
218
219 job->req = req;
220 job->reply = sreq->sense;
221 job->reply_len = sreq->sense_len;
222 job->dd_data = job + 1;
223
224 return 0;
225}
226
227static void bsg_exit_rq(struct request_queue *q, struct request *req)
228{
229 struct bsg_job *job = blk_mq_rq_to_pdu(req);
230 struct scsi_request *sreq = &job->sreq;
231
232 kfree(sreq->sense);
233}
234
222/** 235/**
223 * bsg_setup_queue - Create and add the bsg hooks so we can receive requests 236 * bsg_setup_queue - Create and add the bsg hooks so we can receive requests
224 * @dev: device to attach bsg device to 237 * @dev: device to attach bsg device to
@@ -235,7 +248,9 @@ struct request_queue *bsg_setup_queue(struct device *dev, char *name,
235 q = blk_alloc_queue(GFP_KERNEL); 248 q = blk_alloc_queue(GFP_KERNEL);
236 if (!q) 249 if (!q)
237 return ERR_PTR(-ENOMEM); 250 return ERR_PTR(-ENOMEM);
238 q->cmd_size = sizeof(struct scsi_request); 251 q->cmd_size = sizeof(struct bsg_job) + dd_job_size;
252 q->init_rq_fn = bsg_init_rq;
253 q->exit_rq_fn = bsg_exit_rq;
239 q->request_fn = bsg_request_fn; 254 q->request_fn = bsg_request_fn;
240 255
241 ret = blk_init_allocated_queue(q); 256 ret = blk_init_allocated_queue(q);
@@ -243,7 +258,6 @@ struct request_queue *bsg_setup_queue(struct device *dev, char *name,
243 goto out_cleanup_queue; 258 goto out_cleanup_queue;
244 259
245 q->queuedata = dev; 260 q->queuedata = dev;
246 q->bsg_job_size = dd_job_size;
247 q->bsg_job_fn = job_fn; 261 q->bsg_job_fn = job_fn;
248 queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q); 262 queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
249 queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q); 263 queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q);