aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-04-13 19:23:16 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-04-13 19:23:16 -0400
commit4443f8e6ac7755cd775c70d08be8042dc2f936cb (patch)
tree7adb7c79800f27cd327cbafa792d9a847b804937 /drivers
parentb60bc0665e6af8c55b946b67ea8cb235823bb74e (diff)
parenta89afe58f1a74aac768a5eb77af95ef4ee15beaa (diff)
Merge tag 'for-linus-20190412' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe: "Set of fixes that should go into this round. This pull is larger than I'd like at this time, but there's really no specific reason for that. Some are fixes for issues that went into this merge window, others are not. Anyway, this contains: - Hardware queue limiting for virtio-blk/scsi (Dongli) - Multi-page bvec fixes for lightnvm pblk - Multi-bio dio error fix (Jason) - Remove the cache hint from the io_uring tool side, since we didn't move forward with that (me) - Make io_uring SETUP_SQPOLL root restricted (me) - Fix leak of page in error handling for pc requests (Jérôme) - Fix BFQ regression introduced in this merge window (Paolo) - Fix break logic for bio segment iteration (Ming) - Fix NVMe cancel request error handling (Ming) - NVMe pull request with two fixes (Christoph): - fix the initial CSN for nvme-fc (James) - handle log page offsets properly in the target (Keith)" * tag 'for-linus-20190412' of git://git.kernel.dk/linux-block: block: fix the return errno for direct IO nvmet: fix discover log page when offsets are used nvme-fc: correct csn initialization and increments on error block: do not leak memory in bio_copy_user_iov() lightnvm: pblk: fix crash in pblk_end_partial_read due to multipage bvecs nvme: cancel request synchronously blk-mq: introduce blk_mq_complete_request_sync() scsi: virtio_scsi: limit number of hw queues by nr_cpu_ids virtio-blk: limit number of hw queues by nr_cpu_ids block, bfq: fix use after free in bfq_bfqq_expire io_uring: restrict IORING_SETUP_SQPOLL to root tools/io_uring: remove IOCQE_FLAG_CACHEHIT block: don't use for-inside-for in bio_for_each_segment_all
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/virtio_blk.c2
-rw-r--r--drivers/lightnvm/pblk-read.c50
-rw-r--r--drivers/nvme/host/core.c2
-rw-r--r--drivers/nvme/host/fc.c20
-rw-r--r--drivers/nvme/target/admin-cmd.c5
-rw-r--r--drivers/nvme/target/discovery.c68
-rw-r--r--drivers/nvme/target/nvmet.h1
-rw-r--r--drivers/scsi/virtio_scsi.c1
8 files changed, 98 insertions, 51 deletions
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 4bc083b7c9b5..2a7ca4a1e6f7 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -513,6 +513,8 @@ static int init_vq(struct virtio_blk *vblk)
513 if (err) 513 if (err)
514 num_vqs = 1; 514 num_vqs = 1;
515 515
516 num_vqs = min_t(unsigned int, nr_cpu_ids, num_vqs);
517
516 vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL); 518 vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
517 if (!vblk->vqs) 519 if (!vblk->vqs)
518 return -ENOMEM; 520 return -ENOMEM;
diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c
index 3789185144da..0b7d5fb4548d 100644
--- a/drivers/lightnvm/pblk-read.c
+++ b/drivers/lightnvm/pblk-read.c
@@ -231,14 +231,14 @@ static void pblk_end_partial_read(struct nvm_rq *rqd)
231 struct pblk_sec_meta *meta; 231 struct pblk_sec_meta *meta;
232 struct bio *new_bio = rqd->bio; 232 struct bio *new_bio = rqd->bio;
233 struct bio *bio = pr_ctx->orig_bio; 233 struct bio *bio = pr_ctx->orig_bio;
234 struct bio_vec src_bv, dst_bv;
235 void *meta_list = rqd->meta_list; 234 void *meta_list = rqd->meta_list;
236 int bio_init_idx = pr_ctx->bio_init_idx;
237 unsigned long *read_bitmap = pr_ctx->bitmap; 235 unsigned long *read_bitmap = pr_ctx->bitmap;
236 struct bvec_iter orig_iter = BVEC_ITER_ALL_INIT;
237 struct bvec_iter new_iter = BVEC_ITER_ALL_INIT;
238 int nr_secs = pr_ctx->orig_nr_secs; 238 int nr_secs = pr_ctx->orig_nr_secs;
239 int nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs); 239 int nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs);
240 void *src_p, *dst_p; 240 void *src_p, *dst_p;
241 int hole, i; 241 int bit, i;
242 242
243 if (unlikely(nr_holes == 1)) { 243 if (unlikely(nr_holes == 1)) {
244 struct ppa_addr ppa; 244 struct ppa_addr ppa;
@@ -257,33 +257,39 @@ static void pblk_end_partial_read(struct nvm_rq *rqd)
257 257
258 /* Fill the holes in the original bio */ 258 /* Fill the holes in the original bio */
259 i = 0; 259 i = 0;
260 hole = find_first_zero_bit(read_bitmap, nr_secs); 260 for (bit = 0; bit < nr_secs; bit++) {
261 do { 261 if (!test_bit(bit, read_bitmap)) {
262 struct pblk_line *line; 262 struct bio_vec dst_bv, src_bv;
263 struct pblk_line *line;
263 264
264 line = pblk_ppa_to_line(pblk, rqd->ppa_list[i]); 265 line = pblk_ppa_to_line(pblk, rqd->ppa_list[i]);
265 kref_put(&line->ref, pblk_line_put); 266 kref_put(&line->ref, pblk_line_put);
266 267
267 meta = pblk_get_meta(pblk, meta_list, hole); 268 meta = pblk_get_meta(pblk, meta_list, bit);
268 meta->lba = cpu_to_le64(pr_ctx->lba_list_media[i]); 269 meta->lba = cpu_to_le64(pr_ctx->lba_list_media[i]);
269 270
270 src_bv = new_bio->bi_io_vec[i++]; 271 dst_bv = bio_iter_iovec(bio, orig_iter);
271 dst_bv = bio->bi_io_vec[bio_init_idx + hole]; 272 src_bv = bio_iter_iovec(new_bio, new_iter);
272 273
273 src_p = kmap_atomic(src_bv.bv_page); 274 src_p = kmap_atomic(src_bv.bv_page);
274 dst_p = kmap_atomic(dst_bv.bv_page); 275 dst_p = kmap_atomic(dst_bv.bv_page);
275 276
276 memcpy(dst_p + dst_bv.bv_offset, 277 memcpy(dst_p + dst_bv.bv_offset,
277 src_p + src_bv.bv_offset, 278 src_p + src_bv.bv_offset,
278 PBLK_EXPOSED_PAGE_SIZE); 279 PBLK_EXPOSED_PAGE_SIZE);
279 280
280 kunmap_atomic(src_p); 281 kunmap_atomic(src_p);
281 kunmap_atomic(dst_p); 282 kunmap_atomic(dst_p);
282 283
283 mempool_free(src_bv.bv_page, &pblk->page_bio_pool); 284 flush_dcache_page(dst_bv.bv_page);
285 mempool_free(src_bv.bv_page, &pblk->page_bio_pool);
284 286
285 hole = find_next_zero_bit(read_bitmap, nr_secs, hole + 1); 287 bio_advance_iter(new_bio, &new_iter,
286 } while (hole < nr_secs); 288 PBLK_EXPOSED_PAGE_SIZE);
289 i++;
290 }
291 bio_advance_iter(bio, &orig_iter, PBLK_EXPOSED_PAGE_SIZE);
292 }
287 293
288 bio_put(new_bio); 294 bio_put(new_bio);
289 kfree(pr_ctx); 295 kfree(pr_ctx);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 470601980794..2c43e12b70af 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -288,7 +288,7 @@ bool nvme_cancel_request(struct request *req, void *data, bool reserved)
288 "Cancelling I/O %d", req->tag); 288 "Cancelling I/O %d", req->tag);
289 289
290 nvme_req(req)->status = NVME_SC_ABORT_REQ; 290 nvme_req(req)->status = NVME_SC_ABORT_REQ;
291 blk_mq_complete_request(req); 291 blk_mq_complete_request_sync(req);
292 return true; 292 return true;
293} 293}
294EXPORT_SYMBOL_GPL(nvme_cancel_request); 294EXPORT_SYMBOL_GPL(nvme_cancel_request);
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index f3b9d91ba0df..6d8451356eac 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -1845,7 +1845,7 @@ nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx)
1845 memset(queue, 0, sizeof(*queue)); 1845 memset(queue, 0, sizeof(*queue));
1846 queue->ctrl = ctrl; 1846 queue->ctrl = ctrl;
1847 queue->qnum = idx; 1847 queue->qnum = idx;
1848 atomic_set(&queue->csn, 1); 1848 atomic_set(&queue->csn, 0);
1849 queue->dev = ctrl->dev; 1849 queue->dev = ctrl->dev;
1850 1850
1851 if (idx > 0) 1851 if (idx > 0)
@@ -1887,7 +1887,7 @@ nvme_fc_free_queue(struct nvme_fc_queue *queue)
1887 */ 1887 */
1888 1888
1889 queue->connection_id = 0; 1889 queue->connection_id = 0;
1890 atomic_set(&queue->csn, 1); 1890 atomic_set(&queue->csn, 0);
1891} 1891}
1892 1892
1893static void 1893static void
@@ -2183,7 +2183,6 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
2183{ 2183{
2184 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; 2184 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
2185 struct nvme_command *sqe = &cmdiu->sqe; 2185 struct nvme_command *sqe = &cmdiu->sqe;
2186 u32 csn;
2187 int ret, opstate; 2186 int ret, opstate;
2188 2187
2189 /* 2188 /*
@@ -2198,8 +2197,6 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
2198 2197
2199 /* format the FC-NVME CMD IU and fcp_req */ 2198 /* format the FC-NVME CMD IU and fcp_req */
2200 cmdiu->connection_id = cpu_to_be64(queue->connection_id); 2199 cmdiu->connection_id = cpu_to_be64(queue->connection_id);
2201 csn = atomic_inc_return(&queue->csn);
2202 cmdiu->csn = cpu_to_be32(csn);
2203 cmdiu->data_len = cpu_to_be32(data_len); 2200 cmdiu->data_len = cpu_to_be32(data_len);
2204 switch (io_dir) { 2201 switch (io_dir) {
2205 case NVMEFC_FCP_WRITE: 2202 case NVMEFC_FCP_WRITE:
@@ -2257,11 +2254,24 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
2257 if (!(op->flags & FCOP_FLAGS_AEN)) 2254 if (!(op->flags & FCOP_FLAGS_AEN))
2258 blk_mq_start_request(op->rq); 2255 blk_mq_start_request(op->rq);
2259 2256
2257 cmdiu->csn = cpu_to_be32(atomic_inc_return(&queue->csn));
2260 ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport, 2258 ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport,
2261 &ctrl->rport->remoteport, 2259 &ctrl->rport->remoteport,
2262 queue->lldd_handle, &op->fcp_req); 2260 queue->lldd_handle, &op->fcp_req);
2263 2261
2264 if (ret) { 2262 if (ret) {
2263 /*
2264 * If the lld fails to send the command is there an issue with
2265 * the csn value? If the command that fails is the Connect,
2266 * no - as the connection won't be live. If it is a command
2267 * post-connect, it's possible a gap in csn may be created.
2268 * Does this matter? As Linux initiators don't send fused
2269 * commands, no. The gap would exist, but as there's nothing
2270 * that depends on csn order to be delivered on the target
2271 * side, it shouldn't hurt. It would be difficult for a
2272 * target to even detect the csn gap as it has no idea when the
2273 * cmd with the csn was supposed to arrive.
2274 */
2265 opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE); 2275 opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
2266 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); 2276 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
2267 2277
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 76250181fee0..9f72d515fc4b 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -24,6 +24,11 @@ u32 nvmet_get_log_page_len(struct nvme_command *cmd)
24 return len; 24 return len;
25} 25}
26 26
27u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
28{
29 return le64_to_cpu(cmd->get_log_page.lpo);
30}
31
27static void nvmet_execute_get_log_page_noop(struct nvmet_req *req) 32static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
28{ 33{
29 nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->data_len)); 34 nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->data_len));
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
index c872b47a88f3..33ed95e72d6b 100644
--- a/drivers/nvme/target/discovery.c
+++ b/drivers/nvme/target/discovery.c
@@ -131,54 +131,76 @@ static void nvmet_set_disc_traddr(struct nvmet_req *req, struct nvmet_port *port
131 memcpy(traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE); 131 memcpy(traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE);
132} 132}
133 133
134static size_t discovery_log_entries(struct nvmet_req *req)
135{
136 struct nvmet_ctrl *ctrl = req->sq->ctrl;
137 struct nvmet_subsys_link *p;
138 struct nvmet_port *r;
139 size_t entries = 0;
140
141 list_for_each_entry(p, &req->port->subsystems, entry) {
142 if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn))
143 continue;
144 entries++;
145 }
146 list_for_each_entry(r, &req->port->referrals, entry)
147 entries++;
148 return entries;
149}
150
134static void nvmet_execute_get_disc_log_page(struct nvmet_req *req) 151static void nvmet_execute_get_disc_log_page(struct nvmet_req *req)
135{ 152{
136 const int entry_size = sizeof(struct nvmf_disc_rsp_page_entry); 153 const int entry_size = sizeof(struct nvmf_disc_rsp_page_entry);
137 struct nvmet_ctrl *ctrl = req->sq->ctrl; 154 struct nvmet_ctrl *ctrl = req->sq->ctrl;
138 struct nvmf_disc_rsp_page_hdr *hdr; 155 struct nvmf_disc_rsp_page_hdr *hdr;
156 u64 offset = nvmet_get_log_page_offset(req->cmd);
139 size_t data_len = nvmet_get_log_page_len(req->cmd); 157 size_t data_len = nvmet_get_log_page_len(req->cmd);
140 size_t alloc_len = max(data_len, sizeof(*hdr)); 158 size_t alloc_len;
141 int residual_len = data_len - sizeof(*hdr);
142 struct nvmet_subsys_link *p; 159 struct nvmet_subsys_link *p;
143 struct nvmet_port *r; 160 struct nvmet_port *r;
144 u32 numrec = 0; 161 u32 numrec = 0;
145 u16 status = 0; 162 u16 status = 0;
163 void *buffer;
164
165 /* Spec requires dword aligned offsets */
166 if (offset & 0x3) {
167 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
168 goto out;
169 }
146 170
147 /* 171 /*
148 * Make sure we're passing at least a buffer of response header size. 172 * Make sure we're passing at least a buffer of response header size.
149 * If host provided data len is less than the header size, only the 173 * If host provided data len is less than the header size, only the
150 * number of bytes requested by host will be sent to host. 174 * number of bytes requested by host will be sent to host.
151 */ 175 */
152 hdr = kzalloc(alloc_len, GFP_KERNEL); 176 down_read(&nvmet_config_sem);
153 if (!hdr) { 177 alloc_len = sizeof(*hdr) + entry_size * discovery_log_entries(req);
178 buffer = kzalloc(alloc_len, GFP_KERNEL);
179 if (!buffer) {
180 up_read(&nvmet_config_sem);
154 status = NVME_SC_INTERNAL; 181 status = NVME_SC_INTERNAL;
155 goto out; 182 goto out;
156 } 183 }
157 184
158 down_read(&nvmet_config_sem); 185 hdr = buffer;
159 list_for_each_entry(p, &req->port->subsystems, entry) { 186 list_for_each_entry(p, &req->port->subsystems, entry) {
187 char traddr[NVMF_TRADDR_SIZE];
188
160 if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn)) 189 if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn))
161 continue; 190 continue;
162 if (residual_len >= entry_size) { 191
163 char traddr[NVMF_TRADDR_SIZE]; 192 nvmet_set_disc_traddr(req, req->port, traddr);
164 193 nvmet_format_discovery_entry(hdr, req->port,
165 nvmet_set_disc_traddr(req, req->port, traddr); 194 p->subsys->subsysnqn, traddr,
166 nvmet_format_discovery_entry(hdr, req->port, 195 NVME_NQN_NVME, numrec);
167 p->subsys->subsysnqn, traddr,
168 NVME_NQN_NVME, numrec);
169 residual_len -= entry_size;
170 }
171 numrec++; 196 numrec++;
172 } 197 }
173 198
174 list_for_each_entry(r, &req->port->referrals, entry) { 199 list_for_each_entry(r, &req->port->referrals, entry) {
175 if (residual_len >= entry_size) { 200 nvmet_format_discovery_entry(hdr, r,
176 nvmet_format_discovery_entry(hdr, r, 201 NVME_DISC_SUBSYS_NAME,
177 NVME_DISC_SUBSYS_NAME, 202 r->disc_addr.traddr,
178 r->disc_addr.traddr, 203 NVME_NQN_DISC, numrec);
179 NVME_NQN_DISC, numrec);
180 residual_len -= entry_size;
181 }
182 numrec++; 204 numrec++;
183 } 205 }
184 206
@@ -190,8 +212,8 @@ static void nvmet_execute_get_disc_log_page(struct nvmet_req *req)
190 212
191 up_read(&nvmet_config_sem); 213 up_read(&nvmet_config_sem);
192 214
193 status = nvmet_copy_to_sgl(req, 0, hdr, data_len); 215 status = nvmet_copy_to_sgl(req, 0, buffer + offset, data_len);
194 kfree(hdr); 216 kfree(buffer);
195out: 217out:
196 nvmet_req_complete(req, status); 218 nvmet_req_complete(req, status);
197} 219}
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 51e49efd7849..1653d19b187f 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -428,6 +428,7 @@ u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf,
428u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len); 428u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len);
429 429
430u32 nvmet_get_log_page_len(struct nvme_command *cmd); 430u32 nvmet_get_log_page_len(struct nvme_command *cmd);
431u64 nvmet_get_log_page_offset(struct nvme_command *cmd);
431 432
432extern struct list_head *nvmet_ports; 433extern struct list_head *nvmet_ports;
433void nvmet_port_disc_changed(struct nvmet_port *port, 434void nvmet_port_disc_changed(struct nvmet_port *port,
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 8af01777d09c..f8cb7c23305b 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -793,6 +793,7 @@ static int virtscsi_probe(struct virtio_device *vdev)
793 793
794 /* We need to know how many queues before we allocate. */ 794 /* We need to know how many queues before we allocate. */
795 num_queues = virtscsi_config_get(vdev, num_queues) ? : 1; 795 num_queues = virtscsi_config_get(vdev, num_queues) ? : 1;
796 num_queues = min_t(unsigned int, nr_cpu_ids, num_queues);
796 797
797 num_targets = virtscsi_config_get(vdev, max_target) + 1; 798 num_targets = virtscsi_config_get(vdev, max_target) + 1;
798 799