aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/nvme/target/io-cmd.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2016-06-21 12:04:20 -0400
committerJens Axboe <axboe@fb.com>2016-07-05 13:30:33 -0400
commita07b4970f464f13640e28e16dad6cfa33647cc99 (patch)
treea7b810e87e8eb8cb650288ea3a169ef5b443849e /drivers/nvme/target/io-cmd.c
parent9645c1a2336bb92751a04454e7565c09c9a06f3c (diff)
nvmet: add a generic NVMe target
This patch introduces a implementation of NVMe subsystems, controllers and discovery service which allows to export NVMe namespaces across fabrics such as Ethernet, FC etc. The implementation conforms to the NVMe 1.2.1 specification and interoperates with NVMe over fabrics host implementations. Configuration works using configfs, and is best performed using the nvmetcli tool from http://git.infradead.org/users/hch/nvmetcli.git, which also has a detailed explanation of the required steps in the README file. Signed-off-by: Armen Baloyan <armenx.baloyan@intel.com> Signed-off-by: Anthony Knapp <anthony.j.knapp@intel.com> Signed-off-by: Jay Freyensee <james.p.freyensee@intel.com> Signed-off-by: Ming Lin <ming.l@ssi.samsung.com> Signed-off-by: Sagi Grimberg <sagi@grimberg.me> Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Steve Wise <swise@opengridcomputing.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/nvme/target/io-cmd.c')
-rw-r--r--drivers/nvme/target/io-cmd.c215
1 files changed, 215 insertions, 0 deletions
diff --git a/drivers/nvme/target/io-cmd.c b/drivers/nvme/target/io-cmd.c
new file mode 100644
index 000000000000..2cd069b691ae
--- /dev/null
+++ b/drivers/nvme/target/io-cmd.c
@@ -0,0 +1,215 @@
1/*
2 * NVMe I/O command implementation.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15#include <linux/blkdev.h>
16#include <linux/module.h>
17#include "nvmet.h"
18
19static void nvmet_bio_done(struct bio *bio)
20{
21 struct nvmet_req *req = bio->bi_private;
22
23 nvmet_req_complete(req,
24 bio->bi_error ? NVME_SC_INTERNAL | NVME_SC_DNR : 0);
25
26 if (bio != &req->inline_bio)
27 bio_put(bio);
28}
29
30static inline u32 nvmet_rw_len(struct nvmet_req *req)
31{
32 return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) <<
33 req->ns->blksize_shift;
34}
35
36static void nvmet_inline_bio_init(struct nvmet_req *req)
37{
38 struct bio *bio = &req->inline_bio;
39
40 bio_init(bio);
41 bio->bi_max_vecs = NVMET_MAX_INLINE_BIOVEC;
42 bio->bi_io_vec = req->inline_bvec;
43}
44
45static void nvmet_execute_rw(struct nvmet_req *req)
46{
47 int sg_cnt = req->sg_cnt;
48 struct scatterlist *sg;
49 struct bio *bio;
50 sector_t sector;
51 blk_qc_t cookie;
52 int op, op_flags = 0, i;
53
54 if (!req->sg_cnt) {
55 nvmet_req_complete(req, 0);
56 return;
57 }
58
59 if (req->cmd->rw.opcode == nvme_cmd_write) {
60 op = REQ_OP_WRITE;
61 if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
62 op_flags |= REQ_FUA;
63 } else {
64 op = REQ_OP_READ;
65 }
66
67 sector = le64_to_cpu(req->cmd->rw.slba);
68 sector <<= (req->ns->blksize_shift - 9);
69
70 nvmet_inline_bio_init(req);
71 bio = &req->inline_bio;
72 bio->bi_bdev = req->ns->bdev;
73 bio->bi_iter.bi_sector = sector;
74 bio->bi_private = req;
75 bio->bi_end_io = nvmet_bio_done;
76 bio_set_op_attrs(bio, op, op_flags);
77
78 for_each_sg(req->sg, sg, req->sg_cnt, i) {
79 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
80 != sg->length) {
81 struct bio *prev = bio;
82
83 bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
84 bio->bi_bdev = req->ns->bdev;
85 bio->bi_iter.bi_sector = sector;
86 bio_set_op_attrs(bio, op, op_flags);
87
88 bio_chain(bio, prev);
89 cookie = submit_bio(prev);
90 }
91
92 sector += sg->length >> 9;
93 sg_cnt--;
94 }
95
96 cookie = submit_bio(bio);
97
98 blk_poll(bdev_get_queue(req->ns->bdev), cookie);
99}
100
101static void nvmet_execute_flush(struct nvmet_req *req)
102{
103 struct bio *bio;
104
105 nvmet_inline_bio_init(req);
106 bio = &req->inline_bio;
107
108 bio->bi_bdev = req->ns->bdev;
109 bio->bi_private = req;
110 bio->bi_end_io = nvmet_bio_done;
111 bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH);
112
113 submit_bio(bio);
114}
115
116static u16 nvmet_discard_range(struct nvmet_ns *ns,
117 struct nvme_dsm_range *range, struct bio **bio)
118{
119 if (__blkdev_issue_discard(ns->bdev,
120 le64_to_cpu(range->slba) << (ns->blksize_shift - 9),
121 le32_to_cpu(range->nlb) << (ns->blksize_shift - 9),
122 GFP_KERNEL, 0, bio))
123 return NVME_SC_INTERNAL | NVME_SC_DNR;
124 return 0;
125}
126
127static void nvmet_execute_discard(struct nvmet_req *req)
128{
129 struct nvme_dsm_range range;
130 struct bio *bio = NULL;
131 int i;
132 u16 status;
133
134 for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) {
135 status = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
136 sizeof(range));
137 if (status)
138 break;
139
140 status = nvmet_discard_range(req->ns, &range, &bio);
141 if (status)
142 break;
143 }
144
145 if (bio) {
146 bio->bi_private = req;
147 bio->bi_end_io = nvmet_bio_done;
148 if (status) {
149 bio->bi_error = -EIO;
150 bio_endio(bio);
151 } else {
152 submit_bio(bio);
153 }
154 } else {
155 nvmet_req_complete(req, status);
156 }
157}
158
159static void nvmet_execute_dsm(struct nvmet_req *req)
160{
161 switch (le32_to_cpu(req->cmd->dsm.attributes)) {
162 case NVME_DSMGMT_AD:
163 nvmet_execute_discard(req);
164 return;
165 case NVME_DSMGMT_IDR:
166 case NVME_DSMGMT_IDW:
167 default:
168 /* Not supported yet */
169 nvmet_req_complete(req, 0);
170 return;
171 }
172}
173
174int nvmet_parse_io_cmd(struct nvmet_req *req)
175{
176 struct nvme_command *cmd = req->cmd;
177
178 if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) {
179 pr_err("nvmet: got io cmd %d while CC.EN == 0\n",
180 cmd->common.opcode);
181 req->ns = NULL;
182 return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
183 }
184
185 if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
186 pr_err("nvmet: got io cmd %d while CSTS.RDY == 0\n",
187 cmd->common.opcode);
188 req->ns = NULL;
189 return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
190 }
191
192 req->ns = nvmet_find_namespace(req->sq->ctrl, cmd->rw.nsid);
193 if (!req->ns)
194 return NVME_SC_INVALID_NS | NVME_SC_DNR;
195
196 switch (cmd->common.opcode) {
197 case nvme_cmd_read:
198 case nvme_cmd_write:
199 req->execute = nvmet_execute_rw;
200 req->data_len = nvmet_rw_len(req);
201 return 0;
202 case nvme_cmd_flush:
203 req->execute = nvmet_execute_flush;
204 req->data_len = 0;
205 return 0;
206 case nvme_cmd_dsm:
207 req->execute = nvmet_execute_dsm;
208 req->data_len = le32_to_cpu(cmd->dsm.nr) *
209 sizeof(struct nvme_dsm_range);
210 return 0;
211 default:
212 pr_err("nvmet: unhandled cmd %d\n", cmd->common.opcode);
213 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
214 }
215}