diff options
author | Christoph Hellwig <hch@lst.de> | 2015-11-16 04:28:47 -0500 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2015-12-22 11:38:34 -0500 |
commit | d8f32166a9c587e87a3a86f654c73d40b6b5df00 (patch) | |
tree | 1b5757bbc76be359d7b8def1acdd72f66247e5e1 | |
parent | 7688faa6dd2c99ce5d66571d9ad65535ec39e8cb (diff) |
nvme: switch delete SQ/CQ to blk_execute_rq_nowait
Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r-- | drivers/nvme/host/pci.c | 49 |
1 files changed, 15 insertions, 34 deletions
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 808fb7355603..d6d92b022f97 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
@@ -86,8 +86,6 @@ static void nvme_dev_shutdown(struct nvme_dev *dev); | |||
86 | struct async_cmd_info { | 86 | struct async_cmd_info { |
87 | struct kthread_work work; | 87 | struct kthread_work work; |
88 | struct kthread_worker *worker; | 88 | struct kthread_worker *worker; |
89 | struct request *req; | ||
90 | u32 result; | ||
91 | int status; | 89 | int status; |
92 | void *ctx; | 90 | void *ctx; |
93 | }; | 91 | }; |
@@ -391,16 +389,6 @@ static void abort_completion(struct nvme_queue *nvmeq, void *ctx, | |||
391 | atomic_inc(&nvmeq->dev->ctrl.abort_limit); | 389 | atomic_inc(&nvmeq->dev->ctrl.abort_limit); |
392 | } | 390 | } |
393 | 391 | ||
394 | static void async_completion(struct nvme_queue *nvmeq, void *ctx, | ||
395 | struct nvme_completion *cqe) | ||
396 | { | ||
397 | struct async_cmd_info *cmdinfo = ctx; | ||
398 | cmdinfo->result = le32_to_cpup(&cqe->result); | ||
399 | cmdinfo->status = le16_to_cpup(&cqe->status) >> 1; | ||
400 | queue_kthread_work(cmdinfo->worker, &cmdinfo->work); | ||
401 | blk_mq_free_request(cmdinfo->req); | ||
402 | } | ||
403 | |||
404 | static inline struct nvme_cmd_info *get_cmd_from_tag(struct nvme_queue *nvmeq, | 392 | static inline struct nvme_cmd_info *get_cmd_from_tag(struct nvme_queue *nvmeq, |
405 | unsigned int tag) | 393 | unsigned int tag) |
406 | { | 394 | { |
@@ -985,28 +973,13 @@ static int nvme_submit_async_admin_req(struct nvme_dev *dev) | |||
985 | return 0; | 973 | return 0; |
986 | } | 974 | } |
987 | 975 | ||
988 | static int nvme_submit_admin_async_cmd(struct nvme_dev *dev, | 976 | static void async_cmd_info_endio(struct request *req, int error) |
989 | struct nvme_command *cmd, | ||
990 | struct async_cmd_info *cmdinfo, unsigned timeout) | ||
991 | { | 977 | { |
992 | struct nvme_queue *nvmeq = dev->queues[0]; | 978 | struct async_cmd_info *cmdinfo = req->end_io_data; |
993 | struct request *req; | ||
994 | struct nvme_cmd_info *cmd_rq; | ||
995 | |||
996 | req = blk_mq_alloc_request(dev->ctrl.admin_q, WRITE, 0); | ||
997 | if (IS_ERR(req)) | ||
998 | return PTR_ERR(req); | ||
999 | 979 | ||
1000 | req->timeout = timeout; | 980 | cmdinfo->status = req->errors; |
1001 | cmd_rq = blk_mq_rq_to_pdu(req); | 981 | queue_kthread_work(cmdinfo->worker, &cmdinfo->work); |
1002 | cmdinfo->req = req; | 982 | blk_mq_free_request(req); |
1003 | nvme_set_info(cmd_rq, cmdinfo, async_completion); | ||
1004 | cmdinfo->status = -EINTR; | ||
1005 | |||
1006 | cmd->common.command_id = req->tag; | ||
1007 | |||
1008 | nvme_submit_cmd(nvmeq, cmd); | ||
1009 | return 0; | ||
1010 | } | 983 | } |
1011 | 984 | ||
1012 | static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) | 985 | static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) |
@@ -1920,6 +1893,7 @@ static void nvme_del_queue_end(struct nvme_queue *nvmeq) | |||
1920 | static int adapter_async_del_queue(struct nvme_queue *nvmeq, u8 opcode, | 1893 | static int adapter_async_del_queue(struct nvme_queue *nvmeq, u8 opcode, |
1921 | kthread_work_func_t fn) | 1894 | kthread_work_func_t fn) |
1922 | { | 1895 | { |
1896 | struct request *req; | ||
1923 | struct nvme_command c; | 1897 | struct nvme_command c; |
1924 | 1898 | ||
1925 | memset(&c, 0, sizeof(c)); | 1899 | memset(&c, 0, sizeof(c)); |
@@ -1927,8 +1901,15 @@ static int adapter_async_del_queue(struct nvme_queue *nvmeq, u8 opcode, | |||
1927 | c.delete_queue.qid = cpu_to_le16(nvmeq->qid); | 1901 | c.delete_queue.qid = cpu_to_le16(nvmeq->qid); |
1928 | 1902 | ||
1929 | init_kthread_work(&nvmeq->cmdinfo.work, fn); | 1903 | init_kthread_work(&nvmeq->cmdinfo.work, fn); |
1930 | return nvme_submit_admin_async_cmd(nvmeq->dev, &c, &nvmeq->cmdinfo, | 1904 | |
1931 | ADMIN_TIMEOUT); | 1905 | req = nvme_alloc_request(nvmeq->dev->ctrl.admin_q, &c, 0); |
1906 | if (IS_ERR(req)) | ||
1907 | return PTR_ERR(req); | ||
1908 | |||
1909 | req->timeout = ADMIN_TIMEOUT; | ||
1910 | req->end_io_data = &nvmeq->cmdinfo; | ||
1911 | blk_execute_rq_nowait(req->q, NULL, req, 0, async_cmd_info_endio); | ||
1912 | return 0; | ||
1932 | } | 1913 | } |
1933 | 1914 | ||
1934 | static void nvme_del_cq_work_handler(struct kthread_work *work) | 1915 | static void nvme_del_cq_work_handler(struct kthread_work *work) |