diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-10-26 11:08:48 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-10-26 11:08:48 -0400 |
commit | 3b5a9a8e654c3695e7b499b0784341f299adef48 (patch) | |
tree | 16422d52d414d18a169902477d693630e1bc9e27 | |
parent | 832c6b18f904b96f494d43d6023db68c9f330cf0 (diff) | |
parent | 32e67a3a06b88904155170560b7a63d372b320bd (diff) |
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe:
"A few select fixes that should go into this series. Mainly for NVMe,
but also a single stable fix for nbd from Josef"
* 'for-linus' of git://git.kernel.dk/linux-block:
nbd: handle interrupted sendmsg with a sndtimeo set
nvme-rdma: Fix error status return in tagset allocation failure
nvme-rdma: Fix possible double free in reconnect flow
nvmet: synchronize sqhd update
nvme-fc: retry initial controller connections 3 times
nvme-fc: fix iowait hang
-rw-r--r-- | drivers/block/nbd.c | 13 | ||||
-rw-r--r-- | drivers/nvme/host/fc.c | 37 | ||||
-rw-r--r-- | drivers/nvme/host/rdma.c | 16 | ||||
-rw-r--r-- | drivers/nvme/target/core.c | 15 | ||||
-rw-r--r-- | drivers/nvme/target/nvmet.h | 2 |
5 files changed, 69 insertions, 14 deletions
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index baebbdfd74d5..9adfb5445f8d 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c | |||
@@ -386,6 +386,15 @@ static int sock_xmit(struct nbd_device *nbd, int index, int send, | |||
386 | return result; | 386 | return result; |
387 | } | 387 | } |
388 | 388 | ||
389 | /* | ||
390 | * Different settings for sk->sk_sndtimeo can result in different return values | ||
391 | * if there is a signal pending when we enter sendmsg, because reasons? | ||
392 | */ | ||
393 | static inline int was_interrupted(int result) | ||
394 | { | ||
395 | return result == -ERESTARTSYS || result == -EINTR; | ||
396 | } | ||
397 | |||
389 | /* always call with the tx_lock held */ | 398 | /* always call with the tx_lock held */ |
390 | static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) | 399 | static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) |
391 | { | 400 | { |
@@ -458,7 +467,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) | |||
458 | result = sock_xmit(nbd, index, 1, &from, | 467 | result = sock_xmit(nbd, index, 1, &from, |
459 | (type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent); | 468 | (type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent); |
460 | if (result <= 0) { | 469 | if (result <= 0) { |
461 | if (result == -ERESTARTSYS) { | 470 | if (was_interrupted(result)) { |
462 | /* If we havne't sent anything we can just return BUSY, | 471 | /* If we havne't sent anything we can just return BUSY, |
463 | * however if we have sent something we need to make | 472 | * however if we have sent something we need to make |
464 | * sure we only allow this req to be sent until we are | 473 | * sure we only allow this req to be sent until we are |
@@ -502,7 +511,7 @@ send_pages: | |||
502 | } | 511 | } |
503 | result = sock_xmit(nbd, index, 1, &from, flags, &sent); | 512 | result = sock_xmit(nbd, index, 1, &from, flags, &sent); |
504 | if (result <= 0) { | 513 | if (result <= 0) { |
505 | if (result == -ERESTARTSYS) { | 514 | if (was_interrupted(result)) { |
506 | /* We've already sent the header, we | 515 | /* We've already sent the header, we |
507 | * have no choice but to set pending and | 516 | * have no choice but to set pending and |
508 | * return BUSY. | 517 | * return BUSY. |
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index af075e998944..be49d0f79381 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c | |||
@@ -2545,10 +2545,10 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl) | |||
2545 | nvme_fc_abort_aen_ops(ctrl); | 2545 | nvme_fc_abort_aen_ops(ctrl); |
2546 | 2546 | ||
2547 | /* wait for all io that had to be aborted */ | 2547 | /* wait for all io that had to be aborted */ |
2548 | spin_lock_irqsave(&ctrl->lock, flags); | 2548 | spin_lock_irq(&ctrl->lock); |
2549 | wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock); | 2549 | wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock); |
2550 | ctrl->flags &= ~FCCTRL_TERMIO; | 2550 | ctrl->flags &= ~FCCTRL_TERMIO; |
2551 | spin_unlock_irqrestore(&ctrl->lock, flags); | 2551 | spin_unlock_irq(&ctrl->lock); |
2552 | 2552 | ||
2553 | nvme_fc_term_aen_ops(ctrl); | 2553 | nvme_fc_term_aen_ops(ctrl); |
2554 | 2554 | ||
@@ -2734,7 +2734,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, | |||
2734 | { | 2734 | { |
2735 | struct nvme_fc_ctrl *ctrl; | 2735 | struct nvme_fc_ctrl *ctrl; |
2736 | unsigned long flags; | 2736 | unsigned long flags; |
2737 | int ret, idx; | 2737 | int ret, idx, retry; |
2738 | 2738 | ||
2739 | if (!(rport->remoteport.port_role & | 2739 | if (!(rport->remoteport.port_role & |
2740 | (FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) { | 2740 | (FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) { |
@@ -2760,6 +2760,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, | |||
2760 | ctrl->rport = rport; | 2760 | ctrl->rport = rport; |
2761 | ctrl->dev = lport->dev; | 2761 | ctrl->dev = lport->dev; |
2762 | ctrl->cnum = idx; | 2762 | ctrl->cnum = idx; |
2763 | init_waitqueue_head(&ctrl->ioabort_wait); | ||
2763 | 2764 | ||
2764 | get_device(ctrl->dev); | 2765 | get_device(ctrl->dev); |
2765 | kref_init(&ctrl->ref); | 2766 | kref_init(&ctrl->ref); |
@@ -2825,9 +2826,37 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, | |||
2825 | list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list); | 2826 | list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list); |
2826 | spin_unlock_irqrestore(&rport->lock, flags); | 2827 | spin_unlock_irqrestore(&rport->lock, flags); |
2827 | 2828 | ||
2828 | ret = nvme_fc_create_association(ctrl); | 2829 | /* |
2830 | * It's possible that transactions used to create the association | ||
2831 | * may fail. Examples: CreateAssociation LS or CreateIOConnection | ||
2832 | * LS gets dropped/corrupted/fails; or a frame gets dropped or a | ||
2833 | * command times out for one of the actions to init the controller | ||
2834 | * (Connect, Get/Set_Property, Set_Features, etc). Many of these | ||
2835 | * transport errors (frame drop, LS failure) inherently must kill | ||
2836 | * the association. The transport is coded so that any command used | ||
2837 | * to create the association (prior to a LIVE state transition | ||
2838 | * while NEW or RECONNECTING) will fail if it completes in error or | ||
2839 | * times out. | ||
2840 | * | ||
2841 | * As such: as the connect request was mostly likely due to a | ||
2842 | * udev event that discovered the remote port, meaning there is | ||
2843 | * not an admin or script there to restart if the connect | ||
2844 | * request fails, retry the initial connection creation up to | ||
2845 | * three times before giving up and declaring failure. | ||
2846 | */ | ||
2847 | for (retry = 0; retry < 3; retry++) { | ||
2848 | ret = nvme_fc_create_association(ctrl); | ||
2849 | if (!ret) | ||
2850 | break; | ||
2851 | } | ||
2852 | |||
2829 | if (ret) { | 2853 | if (ret) { |
2854 | /* couldn't schedule retry - fail out */ | ||
2855 | dev_err(ctrl->ctrl.device, | ||
2856 | "NVME-FC{%d}: Connect retry failed\n", ctrl->cnum); | ||
2857 | |||
2830 | ctrl->ctrl.opts = NULL; | 2858 | ctrl->ctrl.opts = NULL; |
2859 | |||
2831 | /* initiate nvme ctrl ref counting teardown */ | 2860 | /* initiate nvme ctrl ref counting teardown */ |
2832 | nvme_uninit_ctrl(&ctrl->ctrl); | 2861 | nvme_uninit_ctrl(&ctrl->ctrl); |
2833 | nvme_put_ctrl(&ctrl->ctrl); | 2862 | nvme_put_ctrl(&ctrl->ctrl); |
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 92a03ff5fb4d..87bac27ec64b 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c | |||
@@ -571,6 +571,12 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue) | |||
571 | if (test_and_set_bit(NVME_RDMA_Q_DELETING, &queue->flags)) | 571 | if (test_and_set_bit(NVME_RDMA_Q_DELETING, &queue->flags)) |
572 | return; | 572 | return; |
573 | 573 | ||
574 | if (nvme_rdma_queue_idx(queue) == 0) { | ||
575 | nvme_rdma_free_qe(queue->device->dev, | ||
576 | &queue->ctrl->async_event_sqe, | ||
577 | sizeof(struct nvme_command), DMA_TO_DEVICE); | ||
578 | } | ||
579 | |||
574 | nvme_rdma_destroy_queue_ib(queue); | 580 | nvme_rdma_destroy_queue_ib(queue); |
575 | rdma_destroy_id(queue->cm_id); | 581 | rdma_destroy_id(queue->cm_id); |
576 | } | 582 | } |
@@ -739,8 +745,6 @@ out: | |||
739 | static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl, | 745 | static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl, |
740 | bool remove) | 746 | bool remove) |
741 | { | 747 | { |
742 | nvme_rdma_free_qe(ctrl->queues[0].device->dev, &ctrl->async_event_sqe, | ||
743 | sizeof(struct nvme_command), DMA_TO_DEVICE); | ||
744 | nvme_rdma_stop_queue(&ctrl->queues[0]); | 748 | nvme_rdma_stop_queue(&ctrl->queues[0]); |
745 | if (remove) { | 749 | if (remove) { |
746 | blk_cleanup_queue(ctrl->ctrl.admin_q); | 750 | blk_cleanup_queue(ctrl->ctrl.admin_q); |
@@ -765,8 +769,10 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl, | |||
765 | 769 | ||
766 | if (new) { | 770 | if (new) { |
767 | ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true); | 771 | ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true); |
768 | if (IS_ERR(ctrl->ctrl.admin_tagset)) | 772 | if (IS_ERR(ctrl->ctrl.admin_tagset)) { |
773 | error = PTR_ERR(ctrl->ctrl.admin_tagset); | ||
769 | goto out_free_queue; | 774 | goto out_free_queue; |
775 | } | ||
770 | 776 | ||
771 | ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set); | 777 | ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set); |
772 | if (IS_ERR(ctrl->ctrl.admin_q)) { | 778 | if (IS_ERR(ctrl->ctrl.admin_q)) { |
@@ -846,8 +852,10 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new) | |||
846 | 852 | ||
847 | if (new) { | 853 | if (new) { |
848 | ctrl->ctrl.tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, false); | 854 | ctrl->ctrl.tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, false); |
849 | if (IS_ERR(ctrl->ctrl.tagset)) | 855 | if (IS_ERR(ctrl->ctrl.tagset)) { |
856 | ret = PTR_ERR(ctrl->ctrl.tagset); | ||
850 | goto out_free_io_queues; | 857 | goto out_free_io_queues; |
858 | } | ||
851 | 859 | ||
852 | ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set); | 860 | ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set); |
853 | if (IS_ERR(ctrl->ctrl.connect_q)) { | 861 | if (IS_ERR(ctrl->ctrl.connect_q)) { |
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 1b208beeef50..645ba7eee35d 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c | |||
@@ -387,12 +387,21 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid) | |||
387 | 387 | ||
388 | static void __nvmet_req_complete(struct nvmet_req *req, u16 status) | 388 | static void __nvmet_req_complete(struct nvmet_req *req, u16 status) |
389 | { | 389 | { |
390 | u32 old_sqhd, new_sqhd; | ||
391 | u16 sqhd; | ||
392 | |||
390 | if (status) | 393 | if (status) |
391 | nvmet_set_status(req, status); | 394 | nvmet_set_status(req, status); |
392 | 395 | ||
393 | if (req->sq->size) | 396 | if (req->sq->size) { |
394 | req->sq->sqhd = (req->sq->sqhd + 1) % req->sq->size; | 397 | do { |
395 | req->rsp->sq_head = cpu_to_le16(req->sq->sqhd); | 398 | old_sqhd = req->sq->sqhd; |
399 | new_sqhd = (old_sqhd + 1) % req->sq->size; | ||
400 | } while (cmpxchg(&req->sq->sqhd, old_sqhd, new_sqhd) != | ||
401 | old_sqhd); | ||
402 | } | ||
403 | sqhd = req->sq->sqhd & 0x0000FFFF; | ||
404 | req->rsp->sq_head = cpu_to_le16(sqhd); | ||
396 | req->rsp->sq_id = cpu_to_le16(req->sq->qid); | 405 | req->rsp->sq_id = cpu_to_le16(req->sq->qid); |
397 | req->rsp->command_id = req->cmd->common.command_id; | 406 | req->rsp->command_id = req->cmd->common.command_id; |
398 | 407 | ||
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index 7b8e20adf760..87e429bfcd8a 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h | |||
@@ -74,7 +74,7 @@ struct nvmet_sq { | |||
74 | struct percpu_ref ref; | 74 | struct percpu_ref ref; |
75 | u16 qid; | 75 | u16 qid; |
76 | u16 size; | 76 | u16 size; |
77 | u16 sqhd; | 77 | u32 sqhd; |
78 | struct completion free_done; | 78 | struct completion free_done; |
79 | struct completion confirm_done; | 79 | struct completion confirm_done; |
80 | }; | 80 | }; |