diff options
author | Jens Axboe <axboe@kernel.dk> | 2018-01-26 13:13:01 -0500 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2018-01-26 13:13:01 -0500 |
commit | b16791de6daf641703c85db32415e7fde2202f54 (patch) | |
tree | 4e838cbfa69d0a9bf6fbfe0c994a6cf555351d25 | |
parent | 3124b65dad946c20feaaf08959ee38ec27361da9 (diff) | |
parent | ca5554a696dce37852f6d6721520b4f13fc295c3 (diff) |
Merge branch 'nvme-4.16' of git://git.infradead.org/nvme into for-4.16/block
Pull NVMe fixes from Christoph:
"The additional week before the 4.15 release gave us time for a few more
nvme fixes, as well as the nifty trace points from Johannes"
* 'nvme-4.16' of git://git.infradead.org/nvme:
nvme: add tracepoint for nvme_complete_rq
nvme: add tracepoint for nvme_setup_cmd
nvme-pci: introduce RECONNECTING state to mark initializing procedure
nvme-rdma: remove redundant boolean for inline_data
nvme: don't free uuid pointer before printing it
nvme-pci: Suspend queues after deleting them
nvme-pci: Fix queue double allocations
-rw-r--r-- | drivers/nvme/host/Makefile | 4 | ||||
-rw-r--r-- | drivers/nvme/host/core.c | 11 | ||||
-rw-r--r-- | drivers/nvme/host/fabrics.c | 3 | ||||
-rw-r--r-- | drivers/nvme/host/pci.c | 49 | ||||
-rw-r--r-- | drivers/nvme/host/rdma.c | 3 | ||||
-rw-r--r-- | drivers/nvme/host/trace.c | 130 | ||||
-rw-r--r-- | drivers/nvme/host/trace.h | 165 |
7 files changed, 339 insertions, 26 deletions
diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile index a25fd43650ad..441e67e3a9d7 100644 --- a/drivers/nvme/host/Makefile +++ b/drivers/nvme/host/Makefile | |||
@@ -1,4 +1,7 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | 1 | # SPDX-License-Identifier: GPL-2.0 |
2 | |||
3 | ccflags-y += -I$(src) | ||
4 | |||
2 | obj-$(CONFIG_NVME_CORE) += nvme-core.o | 5 | obj-$(CONFIG_NVME_CORE) += nvme-core.o |
3 | obj-$(CONFIG_BLK_DEV_NVME) += nvme.o | 6 | obj-$(CONFIG_BLK_DEV_NVME) += nvme.o |
4 | obj-$(CONFIG_NVME_FABRICS) += nvme-fabrics.o | 7 | obj-$(CONFIG_NVME_FABRICS) += nvme-fabrics.o |
@@ -6,6 +9,7 @@ obj-$(CONFIG_NVME_RDMA) += nvme-rdma.o | |||
6 | obj-$(CONFIG_NVME_FC) += nvme-fc.o | 9 | obj-$(CONFIG_NVME_FC) += nvme-fc.o |
7 | 10 | ||
8 | nvme-core-y := core.o | 11 | nvme-core-y := core.o |
12 | nvme-core-$(CONFIG_TRACING) += trace.o | ||
9 | nvme-core-$(CONFIG_NVME_MULTIPATH) += multipath.o | 13 | nvme-core-$(CONFIG_NVME_MULTIPATH) += multipath.o |
10 | nvme-core-$(CONFIG_NVM) += lightnvm.o | 14 | nvme-core-$(CONFIG_NVM) += lightnvm.o |
11 | 15 | ||
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index fde6fd2e7eef..b3af8e914570 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c | |||
@@ -29,6 +29,9 @@ | |||
29 | #include <linux/pm_qos.h> | 29 | #include <linux/pm_qos.h> |
30 | #include <asm/unaligned.h> | 30 | #include <asm/unaligned.h> |
31 | 31 | ||
32 | #define CREATE_TRACE_POINTS | ||
33 | #include "trace.h" | ||
34 | |||
32 | #include "nvme.h" | 35 | #include "nvme.h" |
33 | #include "fabrics.h" | 36 | #include "fabrics.h" |
34 | 37 | ||
@@ -217,6 +220,8 @@ void nvme_complete_rq(struct request *req) | |||
217 | { | 220 | { |
218 | blk_status_t status = nvme_error_status(req); | 221 | blk_status_t status = nvme_error_status(req); |
219 | 222 | ||
223 | trace_nvme_complete_rq(req); | ||
224 | |||
220 | if (unlikely(status != BLK_STS_OK && nvme_req_needs_retry(req))) { | 225 | if (unlikely(status != BLK_STS_OK && nvme_req_needs_retry(req))) { |
221 | if (nvme_req_needs_failover(req, status)) { | 226 | if (nvme_req_needs_failover(req, status)) { |
222 | nvme_failover_req(req); | 227 | nvme_failover_req(req); |
@@ -260,7 +265,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, | |||
260 | switch (new_state) { | 265 | switch (new_state) { |
261 | case NVME_CTRL_ADMIN_ONLY: | 266 | case NVME_CTRL_ADMIN_ONLY: |
262 | switch (old_state) { | 267 | switch (old_state) { |
263 | case NVME_CTRL_RESETTING: | 268 | case NVME_CTRL_RECONNECTING: |
264 | changed = true; | 269 | changed = true; |
265 | /* FALLTHRU */ | 270 | /* FALLTHRU */ |
266 | default: | 271 | default: |
@@ -628,6 +633,10 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req, | |||
628 | } | 633 | } |
629 | 634 | ||
630 | cmd->common.command_id = req->tag; | 635 | cmd->common.command_id = req->tag; |
636 | if (ns) | ||
637 | trace_nvme_setup_nvm_cmd(req->q->id, cmd); | ||
638 | else | ||
639 | trace_nvme_setup_admin_cmd(cmd); | ||
631 | return ret; | 640 | return ret; |
632 | } | 641 | } |
633 | EXPORT_SYMBOL_GPL(nvme_setup_cmd); | 642 | EXPORT_SYMBOL_GPL(nvme_setup_cmd); |
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index eb46967bb0d5..9cee72a80472 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c | |||
@@ -739,12 +739,13 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, | |||
739 | goto out; | 739 | goto out; |
740 | } | 740 | } |
741 | ret = uuid_parse(p, &hostid); | 741 | ret = uuid_parse(p, &hostid); |
742 | kfree(p); | ||
743 | if (ret) { | 742 | if (ret) { |
744 | pr_err("Invalid hostid %s\n", p); | 743 | pr_err("Invalid hostid %s\n", p); |
745 | ret = -EINVAL; | 744 | ret = -EINVAL; |
745 | kfree(p); | ||
746 | goto out; | 746 | goto out; |
747 | } | 747 | } |
748 | kfree(p); | ||
748 | break; | 749 | break; |
749 | case NVMF_OPT_DUP_CONNECT: | 750 | case NVMF_OPT_DUP_CONNECT: |
750 | opts->duplicate_connect = true; | 751 | opts->duplicate_connect = true; |
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index a2ffb557b616..0bc6a9e48c8e 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
@@ -1140,9 +1140,14 @@ static bool nvme_should_reset(struct nvme_dev *dev, u32 csts) | |||
1140 | */ | 1140 | */ |
1141 | bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO); | 1141 | bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO); |
1142 | 1142 | ||
1143 | /* If there is a reset ongoing, we shouldn't reset again. */ | 1143 | /* If there is a reset/reinit ongoing, we shouldn't reset again. */ |
1144 | if (dev->ctrl.state == NVME_CTRL_RESETTING) | 1144 | switch (dev->ctrl.state) { |
1145 | case NVME_CTRL_RESETTING: | ||
1146 | case NVME_CTRL_RECONNECTING: | ||
1145 | return false; | 1147 | return false; |
1148 | default: | ||
1149 | break; | ||
1150 | } | ||
1146 | 1151 | ||
1147 | /* We shouldn't reset unless the controller is on fatal error state | 1152 | /* We shouldn't reset unless the controller is on fatal error state |
1148 | * _or_ if we lost the communication with it. | 1153 | * _or_ if we lost the communication with it. |
@@ -1324,9 +1329,6 @@ static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown) | |||
1324 | { | 1329 | { |
1325 | struct nvme_queue *nvmeq = &dev->queues[0]; | 1330 | struct nvme_queue *nvmeq = &dev->queues[0]; |
1326 | 1331 | ||
1327 | if (nvme_suspend_queue(nvmeq)) | ||
1328 | return; | ||
1329 | |||
1330 | if (shutdown) | 1332 | if (shutdown) |
1331 | nvme_shutdown_ctrl(&dev->ctrl); | 1333 | nvme_shutdown_ctrl(&dev->ctrl); |
1332 | else | 1334 | else |
@@ -1384,6 +1386,9 @@ static int nvme_alloc_queue(struct nvme_dev *dev, int qid, | |||
1384 | { | 1386 | { |
1385 | struct nvme_queue *nvmeq = &dev->queues[qid]; | 1387 | struct nvme_queue *nvmeq = &dev->queues[qid]; |
1386 | 1388 | ||
1389 | if (dev->ctrl.queue_count > qid) | ||
1390 | return 0; | ||
1391 | |||
1387 | nvmeq->cqes = dma_zalloc_coherent(dev->dev, CQ_SIZE(depth), | 1392 | nvmeq->cqes = dma_zalloc_coherent(dev->dev, CQ_SIZE(depth), |
1388 | &nvmeq->cq_dma_addr, GFP_KERNEL); | 1393 | &nvmeq->cq_dma_addr, GFP_KERNEL); |
1389 | if (!nvmeq->cqes) | 1394 | if (!nvmeq->cqes) |
@@ -2008,9 +2013,9 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode) | |||
2008 | return 0; | 2013 | return 0; |
2009 | } | 2014 | } |
2010 | 2015 | ||
2011 | static void nvme_disable_io_queues(struct nvme_dev *dev, int queues) | 2016 | static void nvme_disable_io_queues(struct nvme_dev *dev) |
2012 | { | 2017 | { |
2013 | int pass; | 2018 | int pass, queues = dev->online_queues - 1; |
2014 | unsigned long timeout; | 2019 | unsigned long timeout; |
2015 | u8 opcode = nvme_admin_delete_sq; | 2020 | u8 opcode = nvme_admin_delete_sq; |
2016 | 2021 | ||
@@ -2161,7 +2166,7 @@ static void nvme_pci_disable(struct nvme_dev *dev) | |||
2161 | 2166 | ||
2162 | static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) | 2167 | static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) |
2163 | { | 2168 | { |
2164 | int i, queues; | 2169 | int i; |
2165 | bool dead = true; | 2170 | bool dead = true; |
2166 | struct pci_dev *pdev = to_pci_dev(dev->dev); | 2171 | struct pci_dev *pdev = to_pci_dev(dev->dev); |
2167 | 2172 | ||
@@ -2196,21 +2201,13 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) | |||
2196 | } | 2201 | } |
2197 | nvme_stop_queues(&dev->ctrl); | 2202 | nvme_stop_queues(&dev->ctrl); |
2198 | 2203 | ||
2199 | queues = dev->online_queues - 1; | 2204 | if (!dead) { |
2200 | for (i = dev->ctrl.queue_count - 1; i > 0; i--) | 2205 | nvme_disable_io_queues(dev); |
2201 | nvme_suspend_queue(&dev->queues[i]); | ||
2202 | |||
2203 | if (dead) { | ||
2204 | /* A device might become IO incapable very soon during | ||
2205 | * probe, before the admin queue is configured. Thus, | ||
2206 | * queue_count can be 0 here. | ||
2207 | */ | ||
2208 | if (dev->ctrl.queue_count) | ||
2209 | nvme_suspend_queue(&dev->queues[0]); | ||
2210 | } else { | ||
2211 | nvme_disable_io_queues(dev, queues); | ||
2212 | nvme_disable_admin_queue(dev, shutdown); | 2206 | nvme_disable_admin_queue(dev, shutdown); |
2213 | } | 2207 | } |
2208 | for (i = dev->ctrl.queue_count - 1; i >= 0; i--) | ||
2209 | nvme_suspend_queue(&dev->queues[i]); | ||
2210 | |||
2214 | nvme_pci_disable(dev); | 2211 | nvme_pci_disable(dev); |
2215 | 2212 | ||
2216 | blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl); | 2213 | blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl); |
@@ -2292,6 +2289,16 @@ static void nvme_reset_work(struct work_struct *work) | |||
2292 | if (dev->ctrl.ctrl_config & NVME_CC_ENABLE) | 2289 | if (dev->ctrl.ctrl_config & NVME_CC_ENABLE) |
2293 | nvme_dev_disable(dev, false); | 2290 | nvme_dev_disable(dev, false); |
2294 | 2291 | ||
2292 | /* | ||
2293 | * Introduce RECONNECTING state from nvme-fc/rdma transports to mark the | ||
2294 | * initializing procedure here. | ||
2295 | */ | ||
2296 | if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RECONNECTING)) { | ||
2297 | dev_warn(dev->ctrl.device, | ||
2298 | "failed to mark controller RECONNECTING\n"); | ||
2299 | goto out; | ||
2300 | } | ||
2301 | |||
2295 | result = nvme_pci_enable(dev); | 2302 | result = nvme_pci_enable(dev); |
2296 | if (result) | 2303 | if (result) |
2297 | goto out; | 2304 | goto out; |
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 38e183461d9d..6c2fdfa4c86a 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c | |||
@@ -66,7 +66,6 @@ struct nvme_rdma_request { | |||
66 | struct ib_sge sge[1 + NVME_RDMA_MAX_INLINE_SEGMENTS]; | 66 | struct ib_sge sge[1 + NVME_RDMA_MAX_INLINE_SEGMENTS]; |
67 | u32 num_sge; | 67 | u32 num_sge; |
68 | int nents; | 68 | int nents; |
69 | bool inline_data; | ||
70 | struct ib_reg_wr reg_wr; | 69 | struct ib_reg_wr reg_wr; |
71 | struct ib_cqe reg_cqe; | 70 | struct ib_cqe reg_cqe; |
72 | struct nvme_rdma_queue *queue; | 71 | struct nvme_rdma_queue *queue; |
@@ -1086,7 +1085,6 @@ static int nvme_rdma_map_sg_inline(struct nvme_rdma_queue *queue, | |||
1086 | sg->length = cpu_to_le32(sg_dma_len(req->sg_table.sgl)); | 1085 | sg->length = cpu_to_le32(sg_dma_len(req->sg_table.sgl)); |
1087 | sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET; | 1086 | sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET; |
1088 | 1087 | ||
1089 | req->inline_data = true; | ||
1090 | req->num_sge++; | 1088 | req->num_sge++; |
1091 | return 0; | 1089 | return 0; |
1092 | } | 1090 | } |
@@ -1158,7 +1156,6 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue, | |||
1158 | int count, ret; | 1156 | int count, ret; |
1159 | 1157 | ||
1160 | req->num_sge = 1; | 1158 | req->num_sge = 1; |
1161 | req->inline_data = false; | ||
1162 | refcount_set(&req->ref, 2); /* send and recv completions */ | 1159 | refcount_set(&req->ref, 2); /* send and recv completions */ |
1163 | 1160 | ||
1164 | c->common.flags |= NVME_CMD_SGL_METABUF; | 1161 | c->common.flags |= NVME_CMD_SGL_METABUF; |
diff --git a/drivers/nvme/host/trace.c b/drivers/nvme/host/trace.c new file mode 100644 index 000000000000..41944bbef835 --- /dev/null +++ b/drivers/nvme/host/trace.c | |||
@@ -0,0 +1,130 @@ | |||
1 | /* | ||
2 | * NVM Express device driver tracepoints | ||
3 | * Copyright (c) 2018 Johannes Thumshirn, SUSE Linux GmbH | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms and conditions of the GNU General Public License, | ||
7 | * version 2, as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <asm/unaligned.h> | ||
16 | #include "trace.h" | ||
17 | |||
18 | static const char *nvme_trace_create_sq(struct trace_seq *p, u8 *cdw10) | ||
19 | { | ||
20 | const char *ret = trace_seq_buffer_ptr(p); | ||
21 | u16 sqid = get_unaligned_le16(cdw10); | ||
22 | u16 qsize = get_unaligned_le16(cdw10 + 2); | ||
23 | u16 sq_flags = get_unaligned_le16(cdw10 + 4); | ||
24 | u16 cqid = get_unaligned_le16(cdw10 + 6); | ||
25 | |||
26 | |||
27 | trace_seq_printf(p, "sqid=%u, qsize=%u, sq_flags=0x%x, cqid=%u", | ||
28 | sqid, qsize, sq_flags, cqid); | ||
29 | trace_seq_putc(p, 0); | ||
30 | |||
31 | return ret; | ||
32 | } | ||
33 | |||
34 | static const char *nvme_trace_create_cq(struct trace_seq *p, u8 *cdw10) | ||
35 | { | ||
36 | const char *ret = trace_seq_buffer_ptr(p); | ||
37 | u16 cqid = get_unaligned_le16(cdw10); | ||
38 | u16 qsize = get_unaligned_le16(cdw10 + 2); | ||
39 | u16 cq_flags = get_unaligned_le16(cdw10 + 4); | ||
40 | u16 irq_vector = get_unaligned_le16(cdw10 + 6); | ||
41 | |||
42 | trace_seq_printf(p, "cqid=%u, qsize=%u, cq_flags=0x%x, irq_vector=%u", | ||
43 | cqid, qsize, cq_flags, irq_vector); | ||
44 | trace_seq_putc(p, 0); | ||
45 | |||
46 | return ret; | ||
47 | } | ||
48 | |||
49 | static const char *nvme_trace_admin_identify(struct trace_seq *p, u8 *cdw10) | ||
50 | { | ||
51 | const char *ret = trace_seq_buffer_ptr(p); | ||
52 | u8 cns = cdw10[0]; | ||
53 | u16 ctrlid = get_unaligned_le16(cdw10 + 2); | ||
54 | |||
55 | trace_seq_printf(p, "cns=%u, ctrlid=%u", cns, ctrlid); | ||
56 | trace_seq_putc(p, 0); | ||
57 | |||
58 | return ret; | ||
59 | } | ||
60 | |||
61 | |||
62 | |||
63 | static const char *nvme_trace_read_write(struct trace_seq *p, u8 *cdw10) | ||
64 | { | ||
65 | const char *ret = trace_seq_buffer_ptr(p); | ||
66 | u64 slba = get_unaligned_le64(cdw10); | ||
67 | u16 length = get_unaligned_le16(cdw10 + 8); | ||
68 | u16 control = get_unaligned_le16(cdw10 + 10); | ||
69 | u32 dsmgmt = get_unaligned_le32(cdw10 + 12); | ||
70 | u32 reftag = get_unaligned_le32(cdw10 + 16); | ||
71 | |||
72 | trace_seq_printf(p, | ||
73 | "slba=%llu, len=%u, ctrl=0x%x, dsmgmt=%u, reftag=%u", | ||
74 | slba, length, control, dsmgmt, reftag); | ||
75 | trace_seq_putc(p, 0); | ||
76 | |||
77 | return ret; | ||
78 | } | ||
79 | |||
80 | static const char *nvme_trace_dsm(struct trace_seq *p, u8 *cdw10) | ||
81 | { | ||
82 | const char *ret = trace_seq_buffer_ptr(p); | ||
83 | |||
84 | trace_seq_printf(p, "nr=%u, attributes=%u", | ||
85 | get_unaligned_le32(cdw10), | ||
86 | get_unaligned_le32(cdw10 + 4)); | ||
87 | trace_seq_putc(p, 0); | ||
88 | |||
89 | return ret; | ||
90 | } | ||
91 | |||
92 | static const char *nvme_trace_common(struct trace_seq *p, u8 *cdw10) | ||
93 | { | ||
94 | const char *ret = trace_seq_buffer_ptr(p); | ||
95 | |||
96 | trace_seq_printf(p, "cdw10=%*ph", 24, cdw10); | ||
97 | trace_seq_putc(p, 0); | ||
98 | |||
99 | return ret; | ||
100 | } | ||
101 | |||
102 | const char *nvme_trace_parse_admin_cmd(struct trace_seq *p, | ||
103 | u8 opcode, u8 *cdw10) | ||
104 | { | ||
105 | switch (opcode) { | ||
106 | case nvme_admin_create_sq: | ||
107 | return nvme_trace_create_sq(p, cdw10); | ||
108 | case nvme_admin_create_cq: | ||
109 | return nvme_trace_create_cq(p, cdw10); | ||
110 | case nvme_admin_identify: | ||
111 | return nvme_trace_admin_identify(p, cdw10); | ||
112 | default: | ||
113 | return nvme_trace_common(p, cdw10); | ||
114 | } | ||
115 | } | ||
116 | |||
117 | const char *nvme_trace_parse_nvm_cmd(struct trace_seq *p, | ||
118 | u8 opcode, u8 *cdw10) | ||
119 | { | ||
120 | switch (opcode) { | ||
121 | case nvme_cmd_read: | ||
122 | case nvme_cmd_write: | ||
123 | case nvme_cmd_write_zeroes: | ||
124 | return nvme_trace_read_write(p, cdw10); | ||
125 | case nvme_cmd_dsm: | ||
126 | return nvme_trace_dsm(p, cdw10); | ||
127 | default: | ||
128 | return nvme_trace_common(p, cdw10); | ||
129 | } | ||
130 | } | ||
diff --git a/drivers/nvme/host/trace.h b/drivers/nvme/host/trace.h new file mode 100644 index 000000000000..ea91fccd1bc0 --- /dev/null +++ b/drivers/nvme/host/trace.h | |||
@@ -0,0 +1,165 @@ | |||
1 | /* | ||
2 | * NVM Express device driver tracepoints | ||
3 | * Copyright (c) 2018 Johannes Thumshirn, SUSE Linux GmbH | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms and conditions of the GNU General Public License, | ||
7 | * version 2, as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #undef TRACE_SYSTEM | ||
16 | #define TRACE_SYSTEM nvme | ||
17 | |||
18 | #if !defined(_TRACE_NVME_H) || defined(TRACE_HEADER_MULTI_READ) | ||
19 | #define _TRACE_NVME_H | ||
20 | |||
21 | #include <linux/nvme.h> | ||
22 | #include <linux/tracepoint.h> | ||
23 | #include <linux/trace_seq.h> | ||
24 | |||
25 | #include "nvme.h" | ||
26 | |||
27 | #define nvme_admin_opcode_name(opcode) { opcode, #opcode } | ||
28 | #define show_admin_opcode_name(val) \ | ||
29 | __print_symbolic(val, \ | ||
30 | nvme_admin_opcode_name(nvme_admin_delete_sq), \ | ||
31 | nvme_admin_opcode_name(nvme_admin_create_sq), \ | ||
32 | nvme_admin_opcode_name(nvme_admin_get_log_page), \ | ||
33 | nvme_admin_opcode_name(nvme_admin_delete_cq), \ | ||
34 | nvme_admin_opcode_name(nvme_admin_create_cq), \ | ||
35 | nvme_admin_opcode_name(nvme_admin_identify), \ | ||
36 | nvme_admin_opcode_name(nvme_admin_abort_cmd), \ | ||
37 | nvme_admin_opcode_name(nvme_admin_set_features), \ | ||
38 | nvme_admin_opcode_name(nvme_admin_get_features), \ | ||
39 | nvme_admin_opcode_name(nvme_admin_async_event), \ | ||
40 | nvme_admin_opcode_name(nvme_admin_ns_mgmt), \ | ||
41 | nvme_admin_opcode_name(nvme_admin_activate_fw), \ | ||
42 | nvme_admin_opcode_name(nvme_admin_download_fw), \ | ||
43 | nvme_admin_opcode_name(nvme_admin_ns_attach), \ | ||
44 | nvme_admin_opcode_name(nvme_admin_keep_alive), \ | ||
45 | nvme_admin_opcode_name(nvme_admin_directive_send), \ | ||
46 | nvme_admin_opcode_name(nvme_admin_directive_recv), \ | ||
47 | nvme_admin_opcode_name(nvme_admin_dbbuf), \ | ||
48 | nvme_admin_opcode_name(nvme_admin_format_nvm), \ | ||
49 | nvme_admin_opcode_name(nvme_admin_security_send), \ | ||
50 | nvme_admin_opcode_name(nvme_admin_security_recv), \ | ||
51 | nvme_admin_opcode_name(nvme_admin_sanitize_nvm)) | ||
52 | |||
53 | const char *nvme_trace_parse_admin_cmd(struct trace_seq *p, u8 opcode, | ||
54 | u8 *cdw10); | ||
55 | #define __parse_nvme_admin_cmd(opcode, cdw10) \ | ||
56 | nvme_trace_parse_admin_cmd(p, opcode, cdw10) | ||
57 | |||
58 | #define nvme_opcode_name(opcode) { opcode, #opcode } | ||
59 | #define show_opcode_name(val) \ | ||
60 | __print_symbolic(val, \ | ||
61 | nvme_opcode_name(nvme_cmd_flush), \ | ||
62 | nvme_opcode_name(nvme_cmd_write), \ | ||
63 | nvme_opcode_name(nvme_cmd_read), \ | ||
64 | nvme_opcode_name(nvme_cmd_write_uncor), \ | ||
65 | nvme_opcode_name(nvme_cmd_compare), \ | ||
66 | nvme_opcode_name(nvme_cmd_write_zeroes), \ | ||
67 | nvme_opcode_name(nvme_cmd_dsm), \ | ||
68 | nvme_opcode_name(nvme_cmd_resv_register), \ | ||
69 | nvme_opcode_name(nvme_cmd_resv_report), \ | ||
70 | nvme_opcode_name(nvme_cmd_resv_acquire), \ | ||
71 | nvme_opcode_name(nvme_cmd_resv_release)) | ||
72 | |||
73 | const char *nvme_trace_parse_nvm_cmd(struct trace_seq *p, u8 opcode, | ||
74 | u8 *cdw10); | ||
75 | #define __parse_nvme_cmd(opcode, cdw10) \ | ||
76 | nvme_trace_parse_nvm_cmd(p, opcode, cdw10) | ||
77 | |||
78 | TRACE_EVENT(nvme_setup_admin_cmd, | ||
79 | TP_PROTO(struct nvme_command *cmd), | ||
80 | TP_ARGS(cmd), | ||
81 | TP_STRUCT__entry( | ||
82 | __field(u8, opcode) | ||
83 | __field(u8, flags) | ||
84 | __field(u16, cid) | ||
85 | __field(u64, metadata) | ||
86 | __array(u8, cdw10, 24) | ||
87 | ), | ||
88 | TP_fast_assign( | ||
89 | __entry->opcode = cmd->common.opcode; | ||
90 | __entry->flags = cmd->common.flags; | ||
91 | __entry->cid = cmd->common.command_id; | ||
92 | __entry->metadata = le64_to_cpu(cmd->common.metadata); | ||
93 | memcpy(__entry->cdw10, cmd->common.cdw10, | ||
94 | sizeof(__entry->cdw10)); | ||
95 | ), | ||
96 | TP_printk(" cmdid=%u, flags=0x%x, meta=0x%llx, cmd=(%s %s)", | ||
97 | __entry->cid, __entry->flags, __entry->metadata, | ||
98 | show_admin_opcode_name(__entry->opcode), | ||
99 | __parse_nvme_admin_cmd(__entry->opcode, __entry->cdw10)) | ||
100 | ); | ||
101 | |||
102 | |||
103 | TRACE_EVENT(nvme_setup_nvm_cmd, | ||
104 | TP_PROTO(int qid, struct nvme_command *cmd), | ||
105 | TP_ARGS(qid, cmd), | ||
106 | TP_STRUCT__entry( | ||
107 | __field(int, qid) | ||
108 | __field(u8, opcode) | ||
109 | __field(u8, flags) | ||
110 | __field(u16, cid) | ||
111 | __field(u32, nsid) | ||
112 | __field(u64, metadata) | ||
113 | __array(u8, cdw10, 24) | ||
114 | ), | ||
115 | TP_fast_assign( | ||
116 | __entry->qid = qid; | ||
117 | __entry->opcode = cmd->common.opcode; | ||
118 | __entry->flags = cmd->common.flags; | ||
119 | __entry->cid = cmd->common.command_id; | ||
120 | __entry->nsid = le32_to_cpu(cmd->common.nsid); | ||
121 | __entry->metadata = le64_to_cpu(cmd->common.metadata); | ||
122 | memcpy(__entry->cdw10, cmd->common.cdw10, | ||
123 | sizeof(__entry->cdw10)); | ||
124 | ), | ||
125 | TP_printk("qid=%d, nsid=%u, cmdid=%u, flags=0x%x, meta=0x%llx, cmd=(%s %s)", | ||
126 | __entry->qid, __entry->nsid, __entry->cid, | ||
127 | __entry->flags, __entry->metadata, | ||
128 | show_opcode_name(__entry->opcode), | ||
129 | __parse_nvme_cmd(__entry->opcode, __entry->cdw10)) | ||
130 | ); | ||
131 | |||
132 | TRACE_EVENT(nvme_complete_rq, | ||
133 | TP_PROTO(struct request *req), | ||
134 | TP_ARGS(req), | ||
135 | TP_STRUCT__entry( | ||
136 | __field(int, qid) | ||
137 | __field(int, cid) | ||
138 | __field(u64, result) | ||
139 | __field(u8, retries) | ||
140 | __field(u8, flags) | ||
141 | __field(u16, status) | ||
142 | ), | ||
143 | TP_fast_assign( | ||
144 | __entry->qid = req->q->id; | ||
145 | __entry->cid = req->tag; | ||
146 | __entry->result = le64_to_cpu(nvme_req(req)->result.u64); | ||
147 | __entry->retries = nvme_req(req)->retries; | ||
148 | __entry->flags = nvme_req(req)->flags; | ||
149 | __entry->status = nvme_req(req)->status; | ||
150 | ), | ||
151 | TP_printk("cmdid=%u, qid=%d, res=%llu, retries=%u, flags=0x%x, status=%u", | ||
152 | __entry->cid, __entry->qid, __entry->result, | ||
153 | __entry->retries, __entry->flags, __entry->status) | ||
154 | |||
155 | ); | ||
156 | |||
157 | #endif /* _TRACE_NVME_H */ | ||
158 | |||
159 | #undef TRACE_INCLUDE_PATH | ||
160 | #define TRACE_INCLUDE_PATH . | ||
161 | #undef TRACE_INCLUDE_FILE | ||
162 | #define TRACE_INCLUDE_FILE trace | ||
163 | |||
164 | /* This part must be outside protection */ | ||
165 | #include <trace/define_trace.h> | ||