diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-12-01 14:36:32 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-12-01 14:36:32 -0500 |
| commit | 880584176ed7875117a5ba76cf316cb60f7ad30b (patch) | |
| tree | 7d77cf9e4782047c2a6508a1218fa7b42d28b048 | |
| parent | c734b42583bc391d86ed64e3be25fd5f2c464124 (diff) | |
| parent | 1c9b357ced0b2fd2c173c058c2de88af513bc064 (diff) | |
Merge tag 'for-linus-20181201' of git://git.kernel.dk/linux-block
Pull block layer fixes from Jens Axboe:
- Single range elevator discard merge fix, that caused crashes (Ming)
- Fix for a regression in O_DIRECT, where we could potentially lose the
error value (Maximilian Heyne)
- NVMe pull request from Christoph, with little fixes all over the map
for NVMe.
* tag 'for-linus-20181201' of git://git.kernel.dk/linux-block:
block: fix single range discard merge
nvme-rdma: fix double freeing of async event data
nvme: flush namespace scanning work just before removing namespaces
nvme: warn when finding multi-port subsystems without multipathing enabled
fs: fix lost error code in dio_complete
nvme-pci: fix surprise removal
nvme-fc: initialize nvme_req(rq)->ctrl after calling __nvme_fc_init_request()
nvme: Free ctrl device name on init failure
| -rw-r--r-- | block/blk-merge.c | 2 | ||||
| -rw-r--r-- | drivers/nvme/host/core.c | 8 | ||||
| -rw-r--r-- | drivers/nvme/host/fc.c | 2 | ||||
| -rw-r--r-- | drivers/nvme/host/nvme.h | 3 | ||||
| -rw-r--r-- | drivers/nvme/host/rdma.c | 2 | ||||
| -rw-r--r-- | fs/direct-io.c | 4 |
6 files changed, 14 insertions, 7 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c index e7696c47489a..7695034f4b87 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
| @@ -820,7 +820,7 @@ static struct request *attempt_merge(struct request_queue *q, | |||
| 820 | 820 | ||
| 821 | req->__data_len += blk_rq_bytes(next); | 821 | req->__data_len += blk_rq_bytes(next); |
| 822 | 822 | ||
| 823 | if (req_op(req) != REQ_OP_DISCARD) | 823 | if (!blk_discard_mergable(req)) |
| 824 | elv_merge_requests(q, req, next); | 824 | elv_merge_requests(q, req, next); |
| 825 | 825 | ||
| 826 | /* | 826 | /* |
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 559d567693b8..3cf1b773158e 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c | |||
| @@ -3314,6 +3314,9 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl) | |||
| 3314 | struct nvme_ns *ns, *next; | 3314 | struct nvme_ns *ns, *next; |
| 3315 | LIST_HEAD(ns_list); | 3315 | LIST_HEAD(ns_list); |
| 3316 | 3316 | ||
| 3317 | /* prevent racing with ns scanning */ | ||
| 3318 | flush_work(&ctrl->scan_work); | ||
| 3319 | |||
| 3317 | /* | 3320 | /* |
| 3318 | * The dead states indicates the controller was not gracefully | 3321 | * The dead states indicates the controller was not gracefully |
| 3319 | * disconnected. In that case, we won't be able to flush any data while | 3322 | * disconnected. In that case, we won't be able to flush any data while |
| @@ -3476,7 +3479,6 @@ void nvme_stop_ctrl(struct nvme_ctrl *ctrl) | |||
| 3476 | nvme_mpath_stop(ctrl); | 3479 | nvme_mpath_stop(ctrl); |
| 3477 | nvme_stop_keep_alive(ctrl); | 3480 | nvme_stop_keep_alive(ctrl); |
| 3478 | flush_work(&ctrl->async_event_work); | 3481 | flush_work(&ctrl->async_event_work); |
| 3479 | flush_work(&ctrl->scan_work); | ||
| 3480 | cancel_work_sync(&ctrl->fw_act_work); | 3482 | cancel_work_sync(&ctrl->fw_act_work); |
| 3481 | if (ctrl->ops->stop_ctrl) | 3483 | if (ctrl->ops->stop_ctrl) |
| 3482 | ctrl->ops->stop_ctrl(ctrl); | 3484 | ctrl->ops->stop_ctrl(ctrl); |
| @@ -3585,7 +3587,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, | |||
| 3585 | 3587 | ||
| 3586 | return 0; | 3588 | return 0; |
| 3587 | out_free_name: | 3589 | out_free_name: |
| 3588 | kfree_const(dev->kobj.name); | 3590 | kfree_const(ctrl->device->kobj.name); |
| 3589 | out_release_instance: | 3591 | out_release_instance: |
| 3590 | ida_simple_remove(&nvme_instance_ida, ctrl->instance); | 3592 | ida_simple_remove(&nvme_instance_ida, ctrl->instance); |
| 3591 | out: | 3593 | out: |
| @@ -3607,7 +3609,7 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl) | |||
| 3607 | down_read(&ctrl->namespaces_rwsem); | 3609 | down_read(&ctrl->namespaces_rwsem); |
| 3608 | 3610 | ||
| 3609 | /* Forcibly unquiesce queues to avoid blocking dispatch */ | 3611 | /* Forcibly unquiesce queues to avoid blocking dispatch */ |
| 3610 | if (ctrl->admin_q) | 3612 | if (ctrl->admin_q && !blk_queue_dying(ctrl->admin_q)) |
| 3611 | blk_mq_unquiesce_queue(ctrl->admin_q); | 3613 | blk_mq_unquiesce_queue(ctrl->admin_q); |
| 3612 | 3614 | ||
| 3613 | list_for_each_entry(ns, &ctrl->namespaces, list) | 3615 | list_for_each_entry(ns, &ctrl->namespaces, list) |
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 54032c466636..feb86b59170e 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c | |||
| @@ -1752,12 +1752,12 @@ nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq, | |||
| 1752 | struct nvme_fc_queue *queue = &ctrl->queues[queue_idx]; | 1752 | struct nvme_fc_queue *queue = &ctrl->queues[queue_idx]; |
| 1753 | int res; | 1753 | int res; |
| 1754 | 1754 | ||
| 1755 | nvme_req(rq)->ctrl = &ctrl->ctrl; | ||
| 1756 | res = __nvme_fc_init_request(ctrl, queue, &op->op, rq, queue->rqcnt++); | 1755 | res = __nvme_fc_init_request(ctrl, queue, &op->op, rq, queue->rqcnt++); |
| 1757 | if (res) | 1756 | if (res) |
| 1758 | return res; | 1757 | return res; |
| 1759 | op->op.fcp_req.first_sgl = &op->sgl[0]; | 1758 | op->op.fcp_req.first_sgl = &op->sgl[0]; |
| 1760 | op->op.fcp_req.private = &op->priv[0]; | 1759 | op->op.fcp_req.private = &op->priv[0]; |
| 1760 | nvme_req(rq)->ctrl = &ctrl->ctrl; | ||
| 1761 | return res; | 1761 | return res; |
| 1762 | } | 1762 | } |
| 1763 | 1763 | ||
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index cee79cb388af..081cbdcce880 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h | |||
| @@ -531,6 +531,9 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) | |||
| 531 | static inline int nvme_mpath_init(struct nvme_ctrl *ctrl, | 531 | static inline int nvme_mpath_init(struct nvme_ctrl *ctrl, |
| 532 | struct nvme_id_ctrl *id) | 532 | struct nvme_id_ctrl *id) |
| 533 | { | 533 | { |
| 534 | if (ctrl->subsys->cmic & (1 << 3)) | ||
| 535 | dev_warn(ctrl->device, | ||
| 536 | "Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n"); | ||
| 534 | return 0; | 537 | return 0; |
| 535 | } | 538 | } |
| 536 | static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl) | 539 | static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl) |
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index d181cafedc58..ab6ec7295bf9 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c | |||
| @@ -184,6 +184,7 @@ static int nvme_rdma_alloc_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe, | |||
| 184 | qe->dma = ib_dma_map_single(ibdev, qe->data, capsule_size, dir); | 184 | qe->dma = ib_dma_map_single(ibdev, qe->data, capsule_size, dir); |
| 185 | if (ib_dma_mapping_error(ibdev, qe->dma)) { | 185 | if (ib_dma_mapping_error(ibdev, qe->dma)) { |
| 186 | kfree(qe->data); | 186 | kfree(qe->data); |
| 187 | qe->data = NULL; | ||
| 187 | return -ENOMEM; | 188 | return -ENOMEM; |
| 188 | } | 189 | } |
| 189 | 190 | ||
| @@ -823,6 +824,7 @@ out_free_tagset: | |||
| 823 | out_free_async_qe: | 824 | out_free_async_qe: |
| 824 | nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe, | 825 | nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe, |
| 825 | sizeof(struct nvme_command), DMA_TO_DEVICE); | 826 | sizeof(struct nvme_command), DMA_TO_DEVICE); |
| 827 | ctrl->async_event_sqe.data = NULL; | ||
| 826 | out_free_queue: | 828 | out_free_queue: |
| 827 | nvme_rdma_free_queue(&ctrl->queues[0]); | 829 | nvme_rdma_free_queue(&ctrl->queues[0]); |
| 828 | return error; | 830 | return error; |
diff --git a/fs/direct-io.c b/fs/direct-io.c index 722d17c88edb..41a0e97252ae 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c | |||
| @@ -325,8 +325,8 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, unsigned int flags) | |||
| 325 | */ | 325 | */ |
| 326 | dio->iocb->ki_pos += transferred; | 326 | dio->iocb->ki_pos += transferred; |
| 327 | 327 | ||
| 328 | if (dio->op == REQ_OP_WRITE) | 328 | if (ret > 0 && dio->op == REQ_OP_WRITE) |
| 329 | ret = generic_write_sync(dio->iocb, transferred); | 329 | ret = generic_write_sync(dio->iocb, ret); |
| 330 | dio->iocb->ki_complete(dio->iocb, ret, 0); | 330 | dio->iocb->ki_complete(dio->iocb, ret, 0); |
| 331 | } | 331 | } |
| 332 | 332 | ||
