aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2016-06-21 12:04:20 -0400
committerJens Axboe <axboe@fb.com>2016-07-05 13:30:33 -0400
commita07b4970f464f13640e28e16dad6cfa33647cc99 (patch)
treea7b810e87e8eb8cb650288ea3a169ef5b443849e
parent9645c1a2336bb92751a04454e7565c09c9a06f3c (diff)
nvmet: add a generic NVMe target
This patch introduces a implementation of NVMe subsystems, controllers and discovery service which allows to export NVMe namespaces across fabrics such as Ethernet, FC etc. The implementation conforms to the NVMe 1.2.1 specification and interoperates with NVMe over fabrics host implementations. Configuration works using configfs, and is best performed using the nvmetcli tool from http://git.infradead.org/users/hch/nvmetcli.git, which also has a detailed explanation of the required steps in the README file. Signed-off-by: Armen Baloyan <armenx.baloyan@intel.com> Signed-off-by: Anthony Knapp <anthony.j.knapp@intel.com> Signed-off-by: Jay Freyensee <james.p.freyensee@intel.com> Signed-off-by: Ming Lin <ming.l@ssi.samsung.com> Signed-off-by: Sagi Grimberg <sagi@grimberg.me> Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Steve Wise <swise@opengridcomputing.com> Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--MAINTAINERS7
-rw-r--r--drivers/nvme/Kconfig1
-rw-r--r--drivers/nvme/Makefile1
-rw-r--r--drivers/nvme/target/Kconfig16
-rw-r--r--drivers/nvme/target/Makefile5
-rw-r--r--drivers/nvme/target/admin-cmd.c465
-rw-r--r--drivers/nvme/target/configfs.c917
-rw-r--r--drivers/nvme/target/core.c964
-rw-r--r--drivers/nvme/target/discovery.c221
-rw-r--r--drivers/nvme/target/fabrics-cmd.c240
-rw-r--r--drivers/nvme/target/io-cmd.c215
-rw-r--r--drivers/nvme/target/nvmet.h331
12 files changed, 3383 insertions, 0 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index ed42cb65a19b..b2190b166e4b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -8153,6 +8153,13 @@ S: Supported
8153F: drivers/nvme/host/ 8153F: drivers/nvme/host/
8154F: include/linux/nvme.h 8154F: include/linux/nvme.h
8155 8155
8156NVM EXPRESS TARGET DRIVER
8157M: Christoph Hellwig <hch@lst.de>
8158M: Sagi Grimberg <sagi@grimberg.me>
8159L: linux-nvme@lists.infradead.org
8160S: Supported
8161F: drivers/nvme/target/
8162
8156NVMEM FRAMEWORK 8163NVMEM FRAMEWORK
8157M: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> 8164M: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
8158M: Maxime Ripard <maxime.ripard@free-electrons.com> 8165M: Maxime Ripard <maxime.ripard@free-electrons.com>
diff --git a/drivers/nvme/Kconfig b/drivers/nvme/Kconfig
index a39d9431eaec..b7c78a5b1f7a 100644
--- a/drivers/nvme/Kconfig
+++ b/drivers/nvme/Kconfig
@@ -1 +1,2 @@
1source "drivers/nvme/host/Kconfig" 1source "drivers/nvme/host/Kconfig"
2source "drivers/nvme/target/Kconfig"
diff --git a/drivers/nvme/Makefile b/drivers/nvme/Makefile
index 9421e829d2a9..0096a7fd1431 100644
--- a/drivers/nvme/Makefile
+++ b/drivers/nvme/Makefile
@@ -1,2 +1,3 @@
1 1
2obj-y += host/ 2obj-y += host/
3obj-y += target/
diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig
new file mode 100644
index 000000000000..acf0c070e50d
--- /dev/null
+++ b/drivers/nvme/target/Kconfig
@@ -0,0 +1,16 @@
1
2config NVME_TARGET
3 tristate "NVMe Target support"
4 depends on BLOCK
5 depends on CONFIGFS_FS
6 help
7 This enabled target side support for the NVMe protocol, that is
8 it allows the Linux kernel to implement NVMe subsystems and
9 controllers and export Linux block devices as NVMe namespaces.
10 You need to select at least one of the transports below to make this
11 functionality useful.
12
13 To configure the NVMe target you probably want to use the nvmetcli
14 tool from http://git.infradead.org/users/hch/nvmetcli.git.
15
16 If unsure, say N.
diff --git a/drivers/nvme/target/Makefile b/drivers/nvme/target/Makefile
new file mode 100644
index 000000000000..b4600b6f5724
--- /dev/null
+++ b/drivers/nvme/target/Makefile
@@ -0,0 +1,5 @@
1
2obj-$(CONFIG_NVME_TARGET) += nvmet.o
3
4nvmet-y += core.o configfs.o admin-cmd.o io-cmd.o fabrics-cmd.o \
5 discovery.o
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
new file mode 100644
index 000000000000..2fac17a5ad53
--- /dev/null
+++ b/drivers/nvme/target/admin-cmd.c
@@ -0,0 +1,465 @@
1/*
2 * NVMe admin command implementation.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15#include <linux/module.h>
16#include <linux/random.h>
17#include <generated/utsrelease.h>
18#include "nvmet.h"
19
20u32 nvmet_get_log_page_len(struct nvme_command *cmd)
21{
22 u32 len = le16_to_cpu(cmd->get_log_page.numdu);
23
24 len <<= 16;
25 len += le16_to_cpu(cmd->get_log_page.numdl);
26 /* NUMD is a 0's based value */
27 len += 1;
28 len *= sizeof(u32);
29
30 return len;
31}
32
33static void nvmet_execute_get_log_page(struct nvmet_req *req)
34{
35 size_t data_len = nvmet_get_log_page_len(req->cmd);
36 void *buf;
37 u16 status = 0;
38
39 buf = kzalloc(data_len, GFP_KERNEL);
40 if (!buf) {
41 status = NVME_SC_INTERNAL;
42 goto out;
43 }
44
45 switch (req->cmd->get_log_page.lid) {
46 case 0x01:
47 /*
48 * We currently never set the More bit in the status field,
49 * so all error log entries are invalid and can be zeroed out.
50 * This is called a minum viable implementation (TM) of this
51 * mandatory log page.
52 */
53 break;
54 case 0x02:
55 /*
56 * XXX: fill out actual smart log
57 *
58 * We might have a hard time coming up with useful values for
59 * many of the fields, and even when we have useful data
60 * available (e.g. units or commands read/written) those aren't
61 * persistent over power loss.
62 */
63 break;
64 case 0x03:
65 /*
66 * We only support a single firmware slot which always is
67 * active, so we can zero out the whole firmware slot log and
68 * still claim to fully implement this mandatory log page.
69 */
70 break;
71 default:
72 BUG();
73 }
74
75 status = nvmet_copy_to_sgl(req, 0, buf, data_len);
76
77 kfree(buf);
78out:
79 nvmet_req_complete(req, status);
80}
81
82static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
83{
84 struct nvmet_ctrl *ctrl = req->sq->ctrl;
85 struct nvme_id_ctrl *id;
86 u64 serial;
87 u16 status = 0;
88
89 id = kzalloc(sizeof(*id), GFP_KERNEL);
90 if (!id) {
91 status = NVME_SC_INTERNAL;
92 goto out;
93 }
94
95 /* XXX: figure out how to assign real vendors IDs. */
96 id->vid = 0;
97 id->ssvid = 0;
98
99 /* generate a random serial number as our controllers are ephemeral: */
100 get_random_bytes(&serial, sizeof(serial));
101 memset(id->sn, ' ', sizeof(id->sn));
102 snprintf(id->sn, sizeof(id->sn), "%llx", serial);
103
104 memset(id->mn, ' ', sizeof(id->mn));
105 strncpy((char *)id->mn, "Linux", sizeof(id->mn));
106
107 memset(id->fr, ' ', sizeof(id->fr));
108 strncpy((char *)id->fr, UTS_RELEASE, sizeof(id->fr));
109
110 id->rab = 6;
111
112 /*
113 * XXX: figure out how we can assign a IEEE OUI, but until then
114 * the safest is to leave it as zeroes.
115 */
116
117 /* we support multiple ports and multiples hosts: */
118 id->mic = (1 << 0) | (1 << 1);
119
120 /* no limit on data transfer sizes for now */
121 id->mdts = 0;
122 id->cntlid = cpu_to_le16(ctrl->cntlid);
123 id->ver = cpu_to_le32(ctrl->subsys->ver);
124
125 /* XXX: figure out what to do about RTD3R/RTD3 */
126 id->oaes = cpu_to_le32(1 << 8);
127 id->ctratt = cpu_to_le32(1 << 0);
128
129 id->oacs = 0;
130
131 /*
132 * We don't really have a practical limit on the number of abort
133 * comands. But we don't do anything useful for abort either, so
134 * no point in allowing more abort commands than the spec requires.
135 */
136 id->acl = 3;
137
138 id->aerl = NVMET_ASYNC_EVENTS - 1;
139
140 /* first slot is read-only, only one slot supported */
141 id->frmw = (1 << 0) | (1 << 1);
142 id->lpa = (1 << 0) | (1 << 2);
143 id->elpe = NVMET_ERROR_LOG_SLOTS - 1;
144 id->npss = 0;
145
146 /* We support keep-alive timeout in granularity of seconds */
147 id->kas = cpu_to_le16(NVMET_KAS);
148
149 id->sqes = (0x6 << 4) | 0x6;
150 id->cqes = (0x4 << 4) | 0x4;
151
152 /* no enforcement soft-limit for maxcmd - pick arbitrary high value */
153 id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
154
155 id->nn = cpu_to_le32(ctrl->subsys->max_nsid);
156 id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM);
157
158 /* XXX: don't report vwc if the underlying device is write through */
159 id->vwc = NVME_CTRL_VWC_PRESENT;
160
161 /*
162 * We can't support atomic writes bigger than a LBA without support
163 * from the backend device.
164 */
165 id->awun = 0;
166 id->awupf = 0;
167
168 id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
169 if (ctrl->ops->has_keyed_sgls)
170 id->sgls |= cpu_to_le32(1 << 2);
171 if (ctrl->ops->sqe_inline_size)
172 id->sgls |= cpu_to_le32(1 << 20);
173
174 strcpy(id->subnqn, ctrl->subsys->subsysnqn);
175
176 /* Max command capsule size is sqe + single page of in-capsule data */
177 id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
178 ctrl->ops->sqe_inline_size) / 16);
179 /* Max response capsule size is cqe */
180 id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
181
182 id->msdbd = ctrl->ops->msdbd;
183
184 /*
185 * Meh, we don't really support any power state. Fake up the same
186 * values that qemu does.
187 */
188 id->psd[0].max_power = cpu_to_le16(0x9c4);
189 id->psd[0].entry_lat = cpu_to_le32(0x10);
190 id->psd[0].exit_lat = cpu_to_le32(0x4);
191
192 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
193
194 kfree(id);
195out:
196 nvmet_req_complete(req, status);
197}
198
199static void nvmet_execute_identify_ns(struct nvmet_req *req)
200{
201 struct nvmet_ns *ns;
202 struct nvme_id_ns *id;
203 u16 status = 0;
204
205 ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
206 if (!ns) {
207 status = NVME_SC_INVALID_NS | NVME_SC_DNR;
208 goto out;
209 }
210
211 id = kzalloc(sizeof(*id), GFP_KERNEL);
212 if (!id) {
213 status = NVME_SC_INTERNAL;
214 goto out_put_ns;
215 }
216
217 /*
218 * nuse = ncap = nsze isn't aways true, but we have no way to find
219 * that out from the underlying device.
220 */
221 id->ncap = id->nuse = id->nsze =
222 cpu_to_le64(ns->size >> ns->blksize_shift);
223
224 /*
225 * We just provide a single LBA format that matches what the
226 * underlying device reports.
227 */
228 id->nlbaf = 0;
229 id->flbas = 0;
230
231 /*
232 * Our namespace might always be shared. Not just with other
233 * controllers, but also with any other user of the block device.
234 */
235 id->nmic = (1 << 0);
236
237 memcpy(&id->nguid, &ns->nguid, sizeof(uuid_le));
238
239 id->lbaf[0].ds = ns->blksize_shift;
240
241 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
242
243 kfree(id);
244out_put_ns:
245 nvmet_put_namespace(ns);
246out:
247 nvmet_req_complete(req, status);
248}
249
250static void nvmet_execute_identify_nslist(struct nvmet_req *req)
251{
252 static const int buf_size = 4096;
253 struct nvmet_ctrl *ctrl = req->sq->ctrl;
254 struct nvmet_ns *ns;
255 u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
256 __le32 *list;
257 u16 status = 0;
258 int i = 0;
259
260 list = kzalloc(buf_size, GFP_KERNEL);
261 if (!list) {
262 status = NVME_SC_INTERNAL;
263 goto out;
264 }
265
266 rcu_read_lock();
267 list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
268 if (ns->nsid <= min_nsid)
269 continue;
270 list[i++] = cpu_to_le32(ns->nsid);
271 if (i == buf_size / sizeof(__le32))
272 break;
273 }
274 rcu_read_unlock();
275
276 status = nvmet_copy_to_sgl(req, 0, list, buf_size);
277
278 kfree(list);
279out:
280 nvmet_req_complete(req, status);
281}
282
283/*
284 * A "mimimum viable" abort implementation: the command is mandatory in the
285 * spec, but we are not required to do any useful work. We couldn't really
286 * do a useful abort, so don't bother even with waiting for the command
287 * to be exectuted and return immediately telling the command to abort
288 * wasn't found.
289 */
290static void nvmet_execute_abort(struct nvmet_req *req)
291{
292 nvmet_set_result(req, 1);
293 nvmet_req_complete(req, 0);
294}
295
296static void nvmet_execute_set_features(struct nvmet_req *req)
297{
298 struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
299 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]);
300 u64 val;
301 u32 val32;
302 u16 status = 0;
303
304 switch (cdw10 & 0xf) {
305 case NVME_FEAT_NUM_QUEUES:
306 nvmet_set_result(req,
307 (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
308 break;
309 case NVME_FEAT_KATO:
310 val = le64_to_cpu(req->cmd->prop_set.value);
311 val32 = val & 0xffff;
312 req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
313 nvmet_set_result(req, req->sq->ctrl->kato);
314 break;
315 default:
316 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
317 break;
318 }
319
320 nvmet_req_complete(req, status);
321}
322
323static void nvmet_execute_get_features(struct nvmet_req *req)
324{
325 struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
326 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]);
327 u16 status = 0;
328
329 switch (cdw10 & 0xf) {
330 /*
331 * These features are mandatory in the spec, but we don't
332 * have a useful way to implement them. We'll eventually
333 * need to come up with some fake values for these.
334 */
335#if 0
336 case NVME_FEAT_ARBITRATION:
337 break;
338 case NVME_FEAT_POWER_MGMT:
339 break;
340 case NVME_FEAT_TEMP_THRESH:
341 break;
342 case NVME_FEAT_ERR_RECOVERY:
343 break;
344 case NVME_FEAT_IRQ_COALESCE:
345 break;
346 case NVME_FEAT_IRQ_CONFIG:
347 break;
348 case NVME_FEAT_WRITE_ATOMIC:
349 break;
350 case NVME_FEAT_ASYNC_EVENT:
351 break;
352#endif
353 case NVME_FEAT_VOLATILE_WC:
354 nvmet_set_result(req, 1);
355 break;
356 case NVME_FEAT_NUM_QUEUES:
357 nvmet_set_result(req,
358 (subsys->max_qid-1) | ((subsys->max_qid-1) << 16));
359 break;
360 case NVME_FEAT_KATO:
361 nvmet_set_result(req, req->sq->ctrl->kato * 1000);
362 break;
363 default:
364 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
365 break;
366 }
367
368 nvmet_req_complete(req, status);
369}
370
371static void nvmet_execute_async_event(struct nvmet_req *req)
372{
373 struct nvmet_ctrl *ctrl = req->sq->ctrl;
374
375 mutex_lock(&ctrl->lock);
376 if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
377 mutex_unlock(&ctrl->lock);
378 nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_SC_DNR);
379 return;
380 }
381 ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
382 mutex_unlock(&ctrl->lock);
383
384 schedule_work(&ctrl->async_event_work);
385}
386
387static void nvmet_execute_keep_alive(struct nvmet_req *req)
388{
389 struct nvmet_ctrl *ctrl = req->sq->ctrl;
390
391 pr_debug("ctrl %d update keep-alive timer for %d secs\n",
392 ctrl->cntlid, ctrl->kato);
393
394 mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
395 nvmet_req_complete(req, 0);
396}
397
398int nvmet_parse_admin_cmd(struct nvmet_req *req)
399{
400 struct nvme_command *cmd = req->cmd;
401
402 req->ns = NULL;
403
404 if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) {
405 pr_err("nvmet: got admin cmd %d while CC.EN == 0\n",
406 cmd->common.opcode);
407 return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
408 }
409 if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
410 pr_err("nvmet: got admin cmd %d while CSTS.RDY == 0\n",
411 cmd->common.opcode);
412 return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
413 }
414
415 switch (cmd->common.opcode) {
416 case nvme_admin_get_log_page:
417 req->data_len = nvmet_get_log_page_len(cmd);
418
419 switch (cmd->get_log_page.lid) {
420 case 0x01:
421 case 0x02:
422 case 0x03:
423 req->execute = nvmet_execute_get_log_page;
424 return 0;
425 }
426 break;
427 case nvme_admin_identify:
428 req->data_len = 4096;
429 switch (le32_to_cpu(cmd->identify.cns)) {
430 case 0x00:
431 req->execute = nvmet_execute_identify_ns;
432 return 0;
433 case 0x01:
434 req->execute = nvmet_execute_identify_ctrl;
435 return 0;
436 case 0x02:
437 req->execute = nvmet_execute_identify_nslist;
438 return 0;
439 }
440 break;
441 case nvme_admin_abort_cmd:
442 req->execute = nvmet_execute_abort;
443 req->data_len = 0;
444 return 0;
445 case nvme_admin_set_features:
446 req->execute = nvmet_execute_set_features;
447 req->data_len = 0;
448 return 0;
449 case nvme_admin_get_features:
450 req->execute = nvmet_execute_get_features;
451 req->data_len = 0;
452 return 0;
453 case nvme_admin_async_event:
454 req->execute = nvmet_execute_async_event;
455 req->data_len = 0;
456 return 0;
457 case nvme_admin_keep_alive:
458 req->execute = nvmet_execute_keep_alive;
459 req->data_len = 0;
460 return 0;
461 }
462
463 pr_err("nvmet: unhandled cmd %d\n", cmd->common.opcode);
464 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
465}
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
new file mode 100644
index 000000000000..9bed302789a2
--- /dev/null
+++ b/drivers/nvme/target/configfs.c
@@ -0,0 +1,917 @@
1/*
2 * Configfs interface for the NVMe target.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/slab.h>
18#include <linux/stat.h>
19#include <linux/ctype.h>
20
21#include "nvmet.h"
22
23static struct config_item_type nvmet_host_type;
24static struct config_item_type nvmet_subsys_type;
25
26/*
27 * nvmet_port Generic ConfigFS definitions.
28 * Used in any place in the ConfigFS tree that refers to an address.
29 */
30static ssize_t nvmet_addr_adrfam_show(struct config_item *item,
31 char *page)
32{
33 switch (to_nvmet_port(item)->disc_addr.adrfam) {
34 case NVMF_ADDR_FAMILY_IP4:
35 return sprintf(page, "ipv4\n");
36 case NVMF_ADDR_FAMILY_IP6:
37 return sprintf(page, "ipv6\n");
38 case NVMF_ADDR_FAMILY_IB:
39 return sprintf(page, "ib\n");
40 default:
41 return sprintf(page, "\n");
42 }
43}
44
45static ssize_t nvmet_addr_adrfam_store(struct config_item *item,
46 const char *page, size_t count)
47{
48 struct nvmet_port *port = to_nvmet_port(item);
49
50 if (port->enabled) {
51 pr_err("Cannot modify address while enabled\n");
52 pr_err("Disable the address before modifying\n");
53 return -EACCES;
54 }
55
56 if (sysfs_streq(page, "ipv4")) {
57 port->disc_addr.adrfam = NVMF_ADDR_FAMILY_IP4;
58 } else if (sysfs_streq(page, "ipv6")) {
59 port->disc_addr.adrfam = NVMF_ADDR_FAMILY_IP6;
60 } else if (sysfs_streq(page, "ib")) {
61 port->disc_addr.adrfam = NVMF_ADDR_FAMILY_IB;
62 } else {
63 pr_err("Invalid value '%s' for adrfam\n", page);
64 return -EINVAL;
65 }
66
67 return count;
68}
69
70CONFIGFS_ATTR(nvmet_, addr_adrfam);
71
72static ssize_t nvmet_addr_portid_show(struct config_item *item,
73 char *page)
74{
75 struct nvmet_port *port = to_nvmet_port(item);
76
77 return snprintf(page, PAGE_SIZE, "%d\n",
78 le16_to_cpu(port->disc_addr.portid));
79}
80
81static ssize_t nvmet_addr_portid_store(struct config_item *item,
82 const char *page, size_t count)
83{
84 struct nvmet_port *port = to_nvmet_port(item);
85 u16 portid = 0;
86
87 if (kstrtou16(page, 0, &portid)) {
88 pr_err("Invalid value '%s' for portid\n", page);
89 return -EINVAL;
90 }
91
92 if (port->enabled) {
93 pr_err("Cannot modify address while enabled\n");
94 pr_err("Disable the address before modifying\n");
95 return -EACCES;
96 }
97 port->disc_addr.portid = cpu_to_le16(portid);
98 return count;
99}
100
101CONFIGFS_ATTR(nvmet_, addr_portid);
102
103static ssize_t nvmet_addr_traddr_show(struct config_item *item,
104 char *page)
105{
106 struct nvmet_port *port = to_nvmet_port(item);
107
108 return snprintf(page, PAGE_SIZE, "%s\n",
109 port->disc_addr.traddr);
110}
111
112static ssize_t nvmet_addr_traddr_store(struct config_item *item,
113 const char *page, size_t count)
114{
115 struct nvmet_port *port = to_nvmet_port(item);
116
117 if (count > NVMF_TRADDR_SIZE) {
118 pr_err("Invalid value '%s' for traddr\n", page);
119 return -EINVAL;
120 }
121
122 if (port->enabled) {
123 pr_err("Cannot modify address while enabled\n");
124 pr_err("Disable the address before modifying\n");
125 return -EACCES;
126 }
127 return snprintf(port->disc_addr.traddr,
128 sizeof(port->disc_addr.traddr), "%s", page);
129}
130
131CONFIGFS_ATTR(nvmet_, addr_traddr);
132
133static ssize_t nvmet_addr_treq_show(struct config_item *item,
134 char *page)
135{
136 switch (to_nvmet_port(item)->disc_addr.treq) {
137 case NVMF_TREQ_NOT_SPECIFIED:
138 return sprintf(page, "not specified\n");
139 case NVMF_TREQ_REQUIRED:
140 return sprintf(page, "required\n");
141 case NVMF_TREQ_NOT_REQUIRED:
142 return sprintf(page, "not required\n");
143 default:
144 return sprintf(page, "\n");
145 }
146}
147
148static ssize_t nvmet_addr_treq_store(struct config_item *item,
149 const char *page, size_t count)
150{
151 struct nvmet_port *port = to_nvmet_port(item);
152
153 if (port->enabled) {
154 pr_err("Cannot modify address while enabled\n");
155 pr_err("Disable the address before modifying\n");
156 return -EACCES;
157 }
158
159 if (sysfs_streq(page, "not specified")) {
160 port->disc_addr.treq = NVMF_TREQ_NOT_SPECIFIED;
161 } else if (sysfs_streq(page, "required")) {
162 port->disc_addr.treq = NVMF_TREQ_REQUIRED;
163 } else if (sysfs_streq(page, "not required")) {
164 port->disc_addr.treq = NVMF_TREQ_NOT_REQUIRED;
165 } else {
166 pr_err("Invalid value '%s' for treq\n", page);
167 return -EINVAL;
168 }
169
170 return count;
171}
172
173CONFIGFS_ATTR(nvmet_, addr_treq);
174
175static ssize_t nvmet_addr_trsvcid_show(struct config_item *item,
176 char *page)
177{
178 struct nvmet_port *port = to_nvmet_port(item);
179
180 return snprintf(page, PAGE_SIZE, "%s\n",
181 port->disc_addr.trsvcid);
182}
183
184static ssize_t nvmet_addr_trsvcid_store(struct config_item *item,
185 const char *page, size_t count)
186{
187 struct nvmet_port *port = to_nvmet_port(item);
188
189 if (count > NVMF_TRSVCID_SIZE) {
190 pr_err("Invalid value '%s' for trsvcid\n", page);
191 return -EINVAL;
192 }
193 if (port->enabled) {
194 pr_err("Cannot modify address while enabled\n");
195 pr_err("Disable the address before modifying\n");
196 return -EACCES;
197 }
198 return snprintf(port->disc_addr.trsvcid,
199 sizeof(port->disc_addr.trsvcid), "%s", page);
200}
201
202CONFIGFS_ATTR(nvmet_, addr_trsvcid);
203
204static ssize_t nvmet_addr_trtype_show(struct config_item *item,
205 char *page)
206{
207 switch (to_nvmet_port(item)->disc_addr.trtype) {
208 case NVMF_TRTYPE_RDMA:
209 return sprintf(page, "rdma\n");
210 case NVMF_TRTYPE_LOOP:
211 return sprintf(page, "loop\n");
212 default:
213 return sprintf(page, "\n");
214 }
215}
216
217static void nvmet_port_init_tsas_rdma(struct nvmet_port *port)
218{
219 port->disc_addr.trtype = NVMF_TRTYPE_RDMA;
220 memset(&port->disc_addr.tsas.rdma, 0, NVMF_TSAS_SIZE);
221 port->disc_addr.tsas.rdma.qptype = NVMF_RDMA_QPTYPE_CONNECTED;
222 port->disc_addr.tsas.rdma.prtype = NVMF_RDMA_PRTYPE_NOT_SPECIFIED;
223 port->disc_addr.tsas.rdma.cms = NVMF_RDMA_CMS_RDMA_CM;
224}
225
226static void nvmet_port_init_tsas_loop(struct nvmet_port *port)
227{
228 port->disc_addr.trtype = NVMF_TRTYPE_LOOP;
229 memset(&port->disc_addr.tsas, 0, NVMF_TSAS_SIZE);
230}
231
232static ssize_t nvmet_addr_trtype_store(struct config_item *item,
233 const char *page, size_t count)
234{
235 struct nvmet_port *port = to_nvmet_port(item);
236
237 if (port->enabled) {
238 pr_err("Cannot modify address while enabled\n");
239 pr_err("Disable the address before modifying\n");
240 return -EACCES;
241 }
242
243 if (sysfs_streq(page, "rdma")) {
244 nvmet_port_init_tsas_rdma(port);
245 } else if (sysfs_streq(page, "loop")) {
246 nvmet_port_init_tsas_loop(port);
247 } else {
248 pr_err("Invalid value '%s' for trtype\n", page);
249 return -EINVAL;
250 }
251
252 return count;
253}
254
255CONFIGFS_ATTR(nvmet_, addr_trtype);
256
257/*
258 * Namespace structures & file operation functions below
259 */
260static ssize_t nvmet_ns_device_path_show(struct config_item *item, char *page)
261{
262 return sprintf(page, "%s\n", to_nvmet_ns(item)->device_path);
263}
264
265static ssize_t nvmet_ns_device_path_store(struct config_item *item,
266 const char *page, size_t count)
267{
268 struct nvmet_ns *ns = to_nvmet_ns(item);
269 struct nvmet_subsys *subsys = ns->subsys;
270 int ret;
271
272 mutex_lock(&subsys->lock);
273 ret = -EBUSY;
274 if (nvmet_ns_enabled(ns))
275 goto out_unlock;
276
277 kfree(ns->device_path);
278
279 ret = -ENOMEM;
280 ns->device_path = kstrdup(page, GFP_KERNEL);
281 if (!ns->device_path)
282 goto out_unlock;
283
284 mutex_unlock(&subsys->lock);
285 return count;
286
287out_unlock:
288 mutex_unlock(&subsys->lock);
289 return ret;
290}
291
292CONFIGFS_ATTR(nvmet_ns_, device_path);
293
294static ssize_t nvmet_ns_device_nguid_show(struct config_item *item, char *page)
295{
296 return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->nguid);
297}
298
299static ssize_t nvmet_ns_device_nguid_store(struct config_item *item,
300 const char *page, size_t count)
301{
302 struct nvmet_ns *ns = to_nvmet_ns(item);
303 struct nvmet_subsys *subsys = ns->subsys;
304 u8 nguid[16];
305 const char *p = page;
306 int i;
307 int ret = 0;
308
309 mutex_lock(&subsys->lock);
310 if (nvmet_ns_enabled(ns)) {
311 ret = -EBUSY;
312 goto out_unlock;
313 }
314
315 for (i = 0; i < 16; i++) {
316 if (p + 2 > page + count) {
317 ret = -EINVAL;
318 goto out_unlock;
319 }
320 if (!isxdigit(p[0]) || !isxdigit(p[1])) {
321 ret = -EINVAL;
322 goto out_unlock;
323 }
324
325 nguid[i] = (hex_to_bin(p[0]) << 4) | hex_to_bin(p[1]);
326 p += 2;
327
328 if (*p == '-' || *p == ':')
329 p++;
330 }
331
332 memcpy(&ns->nguid, nguid, sizeof(nguid));
333out_unlock:
334 mutex_unlock(&subsys->lock);
335 return ret ? ret : count;
336}
337
338CONFIGFS_ATTR(nvmet_ns_, device_nguid);
339
340static ssize_t nvmet_ns_enable_show(struct config_item *item, char *page)
341{
342 return sprintf(page, "%d\n", nvmet_ns_enabled(to_nvmet_ns(item)));
343}
344
345static ssize_t nvmet_ns_enable_store(struct config_item *item,
346 const char *page, size_t count)
347{
348 struct nvmet_ns *ns = to_nvmet_ns(item);
349 bool enable;
350 int ret = 0;
351
352 if (strtobool(page, &enable))
353 return -EINVAL;
354
355 if (enable)
356 ret = nvmet_ns_enable(ns);
357 else
358 nvmet_ns_disable(ns);
359
360 return ret ? ret : count;
361}
362
363CONFIGFS_ATTR(nvmet_ns_, enable);
364
365static struct configfs_attribute *nvmet_ns_attrs[] = {
366 &nvmet_ns_attr_device_path,
367 &nvmet_ns_attr_device_nguid,
368 &nvmet_ns_attr_enable,
369 NULL,
370};
371
372static void nvmet_ns_release(struct config_item *item)
373{
374 struct nvmet_ns *ns = to_nvmet_ns(item);
375
376 nvmet_ns_free(ns);
377}
378
379static struct configfs_item_operations nvmet_ns_item_ops = {
380 .release = nvmet_ns_release,
381};
382
383static struct config_item_type nvmet_ns_type = {
384 .ct_item_ops = &nvmet_ns_item_ops,
385 .ct_attrs = nvmet_ns_attrs,
386 .ct_owner = THIS_MODULE,
387};
388
389static struct config_group *nvmet_ns_make(struct config_group *group,
390 const char *name)
391{
392 struct nvmet_subsys *subsys = namespaces_to_subsys(&group->cg_item);
393 struct nvmet_ns *ns;
394 int ret;
395 u32 nsid;
396
397 ret = kstrtou32(name, 0, &nsid);
398 if (ret)
399 goto out;
400
401 ret = -EINVAL;
402 if (nsid == 0 || nsid == 0xffffffff)
403 goto out;
404
405 ret = -ENOMEM;
406 ns = nvmet_ns_alloc(subsys, nsid);
407 if (!ns)
408 goto out;
409 config_group_init_type_name(&ns->group, name, &nvmet_ns_type);
410
411 pr_info("adding nsid %d to subsystem %s\n", nsid, subsys->subsysnqn);
412
413 return &ns->group;
414out:
415 return ERR_PTR(ret);
416}
417
418static struct configfs_group_operations nvmet_namespaces_group_ops = {
419 .make_group = nvmet_ns_make,
420};
421
422static struct config_item_type nvmet_namespaces_type = {
423 .ct_group_ops = &nvmet_namespaces_group_ops,
424 .ct_owner = THIS_MODULE,
425};
426
427static int nvmet_port_subsys_allow_link(struct config_item *parent,
428 struct config_item *target)
429{
430 struct nvmet_port *port = to_nvmet_port(parent->ci_parent);
431 struct nvmet_subsys *subsys;
432 struct nvmet_subsys_link *link, *p;
433 int ret;
434
435 if (target->ci_type != &nvmet_subsys_type) {
436 pr_err("can only link subsystems into the subsystems dir.!\n");
437 return -EINVAL;
438 }
439 subsys = to_subsys(target);
440 link = kmalloc(sizeof(*link), GFP_KERNEL);
441 if (!link)
442 return -ENOMEM;
443 link->subsys = subsys;
444
445 down_write(&nvmet_config_sem);
446 ret = -EEXIST;
447 list_for_each_entry(p, &port->subsystems, entry) {
448 if (p->subsys == subsys)
449 goto out_free_link;
450 }
451
452 if (list_empty(&port->subsystems)) {
453 ret = nvmet_enable_port(port);
454 if (ret)
455 goto out_free_link;
456 }
457
458 list_add_tail(&link->entry, &port->subsystems);
459 nvmet_genctr++;
460 up_write(&nvmet_config_sem);
461 return 0;
462
463out_free_link:
464 up_write(&nvmet_config_sem);
465 kfree(link);
466 return ret;
467}
468
469static int nvmet_port_subsys_drop_link(struct config_item *parent,
470 struct config_item *target)
471{
472 struct nvmet_port *port = to_nvmet_port(parent->ci_parent);
473 struct nvmet_subsys *subsys = to_subsys(target);
474 struct nvmet_subsys_link *p;
475
476 down_write(&nvmet_config_sem);
477 list_for_each_entry(p, &port->subsystems, entry) {
478 if (p->subsys == subsys)
479 goto found;
480 }
481 up_write(&nvmet_config_sem);
482 return -EINVAL;
483
484found:
485 list_del(&p->entry);
486 nvmet_genctr++;
487 if (list_empty(&port->subsystems))
488 nvmet_disable_port(port);
489 up_write(&nvmet_config_sem);
490 kfree(p);
491 return 0;
492}
493
494static struct configfs_item_operations nvmet_port_subsys_item_ops = {
495 .allow_link = nvmet_port_subsys_allow_link,
496 .drop_link = nvmet_port_subsys_drop_link,
497};
498
499static struct config_item_type nvmet_port_subsys_type = {
500 .ct_item_ops = &nvmet_port_subsys_item_ops,
501 .ct_owner = THIS_MODULE,
502};
503
504static int nvmet_allowed_hosts_allow_link(struct config_item *parent,
505 struct config_item *target)
506{
507 struct nvmet_subsys *subsys = to_subsys(parent->ci_parent);
508 struct nvmet_host *host;
509 struct nvmet_host_link *link, *p;
510 int ret;
511
512 if (target->ci_type != &nvmet_host_type) {
513 pr_err("can only link hosts into the allowed_hosts directory!\n");
514 return -EINVAL;
515 }
516
517 host = to_host(target);
518 link = kmalloc(sizeof(*link), GFP_KERNEL);
519 if (!link)
520 return -ENOMEM;
521 link->host = host;
522
523 down_write(&nvmet_config_sem);
524 ret = -EINVAL;
525 if (subsys->allow_any_host) {
526 pr_err("can't add hosts when allow_any_host is set!\n");
527 goto out_free_link;
528 }
529
530 ret = -EEXIST;
531 list_for_each_entry(p, &subsys->hosts, entry) {
532 if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host)))
533 goto out_free_link;
534 }
535 list_add_tail(&link->entry, &subsys->hosts);
536 nvmet_genctr++;
537 up_write(&nvmet_config_sem);
538 return 0;
539out_free_link:
540 up_write(&nvmet_config_sem);
541 kfree(link);
542 return ret;
543}
544
545static int nvmet_allowed_hosts_drop_link(struct config_item *parent,
546 struct config_item *target)
547{
548 struct nvmet_subsys *subsys = to_subsys(parent->ci_parent);
549 struct nvmet_host *host = to_host(target);
550 struct nvmet_host_link *p;
551
552 down_write(&nvmet_config_sem);
553 list_for_each_entry(p, &subsys->hosts, entry) {
554 if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host)))
555 goto found;
556 }
557 up_write(&nvmet_config_sem);
558 return -EINVAL;
559
560found:
561 list_del(&p->entry);
562 nvmet_genctr++;
563 up_write(&nvmet_config_sem);
564 kfree(p);
565 return 0;
566}
567
568static struct configfs_item_operations nvmet_allowed_hosts_item_ops = {
569 .allow_link = nvmet_allowed_hosts_allow_link,
570 .drop_link = nvmet_allowed_hosts_drop_link,
571};
572
573static struct config_item_type nvmet_allowed_hosts_type = {
574 .ct_item_ops = &nvmet_allowed_hosts_item_ops,
575 .ct_owner = THIS_MODULE,
576};
577
578static ssize_t nvmet_subsys_attr_allow_any_host_show(struct config_item *item,
579 char *page)
580{
581 return snprintf(page, PAGE_SIZE, "%d\n",
582 to_subsys(item)->allow_any_host);
583}
584
585static ssize_t nvmet_subsys_attr_allow_any_host_store(struct config_item *item,
586 const char *page, size_t count)
587{
588 struct nvmet_subsys *subsys = to_subsys(item);
589 bool allow_any_host;
590 int ret = 0;
591
592 if (strtobool(page, &allow_any_host))
593 return -EINVAL;
594
595 down_write(&nvmet_config_sem);
596 if (allow_any_host && !list_empty(&subsys->hosts)) {
597 pr_err("Can't set allow_any_host when explicit hosts are set!\n");
598 ret = -EINVAL;
599 goto out_unlock;
600 }
601
602 subsys->allow_any_host = allow_any_host;
603out_unlock:
604 up_write(&nvmet_config_sem);
605 return ret ? ret : count;
606}
607
608CONFIGFS_ATTR(nvmet_subsys_, attr_allow_any_host);
609
610static struct configfs_attribute *nvmet_subsys_attrs[] = {
611 &nvmet_subsys_attr_attr_allow_any_host,
612 NULL,
613};
614
615/*
616 * Subsystem structures & folder operation functions below
617 */
618static void nvmet_subsys_release(struct config_item *item)
619{
620 struct nvmet_subsys *subsys = to_subsys(item);
621
622 nvmet_subsys_put(subsys);
623}
624
625static struct configfs_item_operations nvmet_subsys_item_ops = {
626 .release = nvmet_subsys_release,
627};
628
629static struct config_item_type nvmet_subsys_type = {
630 .ct_item_ops = &nvmet_subsys_item_ops,
631 .ct_attrs = nvmet_subsys_attrs,
632 .ct_owner = THIS_MODULE,
633};
634
635static struct config_group *nvmet_subsys_make(struct config_group *group,
636 const char *name)
637{
638 struct nvmet_subsys *subsys;
639
640 if (sysfs_streq(name, NVME_DISC_SUBSYS_NAME)) {
641 pr_err("can't create discovery subsystem through configfs\n");
642 return ERR_PTR(-EINVAL);
643 }
644
645 subsys = nvmet_subsys_alloc(name, NVME_NQN_NVME);
646 if (!subsys)
647 return ERR_PTR(-ENOMEM);
648
649 config_group_init_type_name(&subsys->group, name, &nvmet_subsys_type);
650
651 config_group_init_type_name(&subsys->namespaces_group,
652 "namespaces", &nvmet_namespaces_type);
653 configfs_add_default_group(&subsys->namespaces_group, &subsys->group);
654
655 config_group_init_type_name(&subsys->allowed_hosts_group,
656 "allowed_hosts", &nvmet_allowed_hosts_type);
657 configfs_add_default_group(&subsys->allowed_hosts_group,
658 &subsys->group);
659
660 return &subsys->group;
661}
662
663static struct configfs_group_operations nvmet_subsystems_group_ops = {
664 .make_group = nvmet_subsys_make,
665};
666
667static struct config_item_type nvmet_subsystems_type = {
668 .ct_group_ops = &nvmet_subsystems_group_ops,
669 .ct_owner = THIS_MODULE,
670};
671
672static ssize_t nvmet_referral_enable_show(struct config_item *item,
673 char *page)
674{
675 return snprintf(page, PAGE_SIZE, "%d\n", to_nvmet_port(item)->enabled);
676}
677
678static ssize_t nvmet_referral_enable_store(struct config_item *item,
679 const char *page, size_t count)
680{
681 struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent);
682 struct nvmet_port *port = to_nvmet_port(item);
683 bool enable;
684
685 if (strtobool(page, &enable))
686 goto inval;
687
688 if (enable)
689 nvmet_referral_enable(parent, port);
690 else
691 nvmet_referral_disable(port);
692
693 return count;
694inval:
695 pr_err("Invalid value '%s' for enable\n", page);
696 return -EINVAL;
697}
698
699CONFIGFS_ATTR(nvmet_referral_, enable);
700
701/*
702 * Discovery Service subsystem definitions
703 */
704static struct configfs_attribute *nvmet_referral_attrs[] = {
705 &nvmet_attr_addr_adrfam,
706 &nvmet_attr_addr_portid,
707 &nvmet_attr_addr_treq,
708 &nvmet_attr_addr_traddr,
709 &nvmet_attr_addr_trsvcid,
710 &nvmet_attr_addr_trtype,
711 &nvmet_referral_attr_enable,
712 NULL,
713};
714
715static void nvmet_referral_release(struct config_item *item)
716{
717 struct nvmet_port *port = to_nvmet_port(item);
718
719 nvmet_referral_disable(port);
720 kfree(port);
721}
722
723static struct configfs_item_operations nvmet_referral_item_ops = {
724 .release = nvmet_referral_release,
725};
726
727static struct config_item_type nvmet_referral_type = {
728 .ct_owner = THIS_MODULE,
729 .ct_attrs = nvmet_referral_attrs,
730 .ct_item_ops = &nvmet_referral_item_ops,
731};
732
733static struct config_group *nvmet_referral_make(
734 struct config_group *group, const char *name)
735{
736 struct nvmet_port *port;
737
738 port = kzalloc(sizeof(*port), GFP_KERNEL);
739 if (!port)
740 return ERR_CAST(port);
741
742 INIT_LIST_HEAD(&port->entry);
743 config_group_init_type_name(&port->group, name, &nvmet_referral_type);
744
745 return &port->group;
746}
747
748static struct configfs_group_operations nvmet_referral_group_ops = {
749 .make_group = nvmet_referral_make,
750};
751
752static struct config_item_type nvmet_referrals_type = {
753 .ct_owner = THIS_MODULE,
754 .ct_group_ops = &nvmet_referral_group_ops,
755};
756
757/*
758 * Ports definitions.
759 */
760static void nvmet_port_release(struct config_item *item)
761{
762 struct nvmet_port *port = to_nvmet_port(item);
763
764 kfree(port);
765}
766
767static struct configfs_attribute *nvmet_port_attrs[] = {
768 &nvmet_attr_addr_adrfam,
769 &nvmet_attr_addr_treq,
770 &nvmet_attr_addr_traddr,
771 &nvmet_attr_addr_trsvcid,
772 &nvmet_attr_addr_trtype,
773 NULL,
774};
775
776static struct configfs_item_operations nvmet_port_item_ops = {
777 .release = nvmet_port_release,
778};
779
780static struct config_item_type nvmet_port_type = {
781 .ct_attrs = nvmet_port_attrs,
782 .ct_item_ops = &nvmet_port_item_ops,
783 .ct_owner = THIS_MODULE,
784};
785
786static struct config_group *nvmet_ports_make(struct config_group *group,
787 const char *name)
788{
789 struct nvmet_port *port;
790 u16 portid;
791
792 if (kstrtou16(name, 0, &portid))
793 return ERR_PTR(-EINVAL);
794
795 port = kzalloc(sizeof(*port), GFP_KERNEL);
796 if (!port)
797 return ERR_CAST(port);
798
799 INIT_LIST_HEAD(&port->entry);
800 INIT_LIST_HEAD(&port->subsystems);
801 INIT_LIST_HEAD(&port->referrals);
802
803 port->disc_addr.portid = cpu_to_le16(portid);
804 config_group_init_type_name(&port->group, name, &nvmet_port_type);
805
806 config_group_init_type_name(&port->subsys_group,
807 "subsystems", &nvmet_port_subsys_type);
808 configfs_add_default_group(&port->subsys_group, &port->group);
809
810 config_group_init_type_name(&port->referrals_group,
811 "referrals", &nvmet_referrals_type);
812 configfs_add_default_group(&port->referrals_group, &port->group);
813
814 return &port->group;
815}
816
817static struct configfs_group_operations nvmet_ports_group_ops = {
818 .make_group = nvmet_ports_make,
819};
820
821static struct config_item_type nvmet_ports_type = {
822 .ct_group_ops = &nvmet_ports_group_ops,
823 .ct_owner = THIS_MODULE,
824};
825
826static struct config_group nvmet_subsystems_group;
827static struct config_group nvmet_ports_group;
828
829static void nvmet_host_release(struct config_item *item)
830{
831 struct nvmet_host *host = to_host(item);
832
833 kfree(host);
834}
835
836static struct configfs_item_operations nvmet_host_item_ops = {
837 .release = nvmet_host_release,
838};
839
840static struct config_item_type nvmet_host_type = {
841 .ct_item_ops = &nvmet_host_item_ops,
842 .ct_owner = THIS_MODULE,
843};
844
845static struct config_group *nvmet_hosts_make_group(struct config_group *group,
846 const char *name)
847{
848 struct nvmet_host *host;
849
850 host = kzalloc(sizeof(*host), GFP_KERNEL);
851 if (!host)
852 return ERR_PTR(-ENOMEM);
853
854 config_group_init_type_name(&host->group, name, &nvmet_host_type);
855
856 return &host->group;
857}
858
859static struct configfs_group_operations nvmet_hosts_group_ops = {
860 .make_group = nvmet_hosts_make_group,
861};
862
863static struct config_item_type nvmet_hosts_type = {
864 .ct_group_ops = &nvmet_hosts_group_ops,
865 .ct_owner = THIS_MODULE,
866};
867
868static struct config_group nvmet_hosts_group;
869
870static struct config_item_type nvmet_root_type = {
871 .ct_owner = THIS_MODULE,
872};
873
874static struct configfs_subsystem nvmet_configfs_subsystem = {
875 .su_group = {
876 .cg_item = {
877 .ci_namebuf = "nvmet",
878 .ci_type = &nvmet_root_type,
879 },
880 },
881};
882
883int __init nvmet_init_configfs(void)
884{
885 int ret;
886
887 config_group_init(&nvmet_configfs_subsystem.su_group);
888 mutex_init(&nvmet_configfs_subsystem.su_mutex);
889
890 config_group_init_type_name(&nvmet_subsystems_group,
891 "subsystems", &nvmet_subsystems_type);
892 configfs_add_default_group(&nvmet_subsystems_group,
893 &nvmet_configfs_subsystem.su_group);
894
895 config_group_init_type_name(&nvmet_ports_group,
896 "ports", &nvmet_ports_type);
897 configfs_add_default_group(&nvmet_ports_group,
898 &nvmet_configfs_subsystem.su_group);
899
900 config_group_init_type_name(&nvmet_hosts_group,
901 "hosts", &nvmet_hosts_type);
902 configfs_add_default_group(&nvmet_hosts_group,
903 &nvmet_configfs_subsystem.su_group);
904
905 ret = configfs_register_subsystem(&nvmet_configfs_subsystem);
906 if (ret) {
907 pr_err("configfs_register_subsystem: %d\n", ret);
908 return ret;
909 }
910
911 return 0;
912}
913
914void __exit nvmet_exit_configfs(void)
915{
916 configfs_unregister_subsystem(&nvmet_configfs_subsystem);
917}
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
new file mode 100644
index 000000000000..e0b3f0166722
--- /dev/null
+++ b/drivers/nvme/target/core.c
@@ -0,0 +1,964 @@
1/*
2 * Common code for the NVMe target.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15#include <linux/module.h>
16#include "nvmet.h"
17
18static struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
19
20/*
21 * This read/write semaphore is used to synchronize access to configuration
22 * information on a target system that will result in discovery log page
23 * information change for at least one host.
24 * The full list of resources to protected by this semaphore is:
25 *
26 * - subsystems list
27 * - per-subsystem allowed hosts list
28 * - allow_any_host subsystem attribute
29 * - nvmet_genctr
30 * - the nvmet_transports array
31 *
32 * When updating any of those lists/structures write lock should be obtained,
33 * while when reading (popolating discovery log page or checking host-subsystem
34 * link) read lock is obtained to allow concurrent reads.
35 */
36DECLARE_RWSEM(nvmet_config_sem);
37
38static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
39 const char *subsysnqn);
40
41u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
42 size_t len)
43{
44 if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len)
45 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
46 return 0;
47}
48
49u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len)
50{
51 if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len)
52 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
53 return 0;
54}
55
56static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
57{
58 return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16);
59}
60
61static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
62{
63 struct nvmet_req *req;
64
65 while (1) {
66 mutex_lock(&ctrl->lock);
67 if (!ctrl->nr_async_event_cmds) {
68 mutex_unlock(&ctrl->lock);
69 return;
70 }
71
72 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
73 mutex_unlock(&ctrl->lock);
74 nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
75 }
76}
77
78static void nvmet_async_event_work(struct work_struct *work)
79{
80 struct nvmet_ctrl *ctrl =
81 container_of(work, struct nvmet_ctrl, async_event_work);
82 struct nvmet_async_event *aen;
83 struct nvmet_req *req;
84
85 while (1) {
86 mutex_lock(&ctrl->lock);
87 aen = list_first_entry_or_null(&ctrl->async_events,
88 struct nvmet_async_event, entry);
89 if (!aen || !ctrl->nr_async_event_cmds) {
90 mutex_unlock(&ctrl->lock);
91 return;
92 }
93
94 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
95 nvmet_set_result(req, nvmet_async_event_result(aen));
96
97 list_del(&aen->entry);
98 kfree(aen);
99
100 mutex_unlock(&ctrl->lock);
101 nvmet_req_complete(req, 0);
102 }
103}
104
105static void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
106 u8 event_info, u8 log_page)
107{
108 struct nvmet_async_event *aen;
109
110 aen = kmalloc(sizeof(*aen), GFP_KERNEL);
111 if (!aen)
112 return;
113
114 aen->event_type = event_type;
115 aen->event_info = event_info;
116 aen->log_page = log_page;
117
118 mutex_lock(&ctrl->lock);
119 list_add_tail(&aen->entry, &ctrl->async_events);
120 mutex_unlock(&ctrl->lock);
121
122 schedule_work(&ctrl->async_event_work);
123}
124
125int nvmet_register_transport(struct nvmet_fabrics_ops *ops)
126{
127 int ret = 0;
128
129 down_write(&nvmet_config_sem);
130 if (nvmet_transports[ops->type])
131 ret = -EINVAL;
132 else
133 nvmet_transports[ops->type] = ops;
134 up_write(&nvmet_config_sem);
135
136 return ret;
137}
138EXPORT_SYMBOL_GPL(nvmet_register_transport);
139
140void nvmet_unregister_transport(struct nvmet_fabrics_ops *ops)
141{
142 down_write(&nvmet_config_sem);
143 nvmet_transports[ops->type] = NULL;
144 up_write(&nvmet_config_sem);
145}
146EXPORT_SYMBOL_GPL(nvmet_unregister_transport);
147
148int nvmet_enable_port(struct nvmet_port *port)
149{
150 struct nvmet_fabrics_ops *ops;
151 int ret;
152
153 lockdep_assert_held(&nvmet_config_sem);
154
155 ops = nvmet_transports[port->disc_addr.trtype];
156 if (!ops) {
157 up_write(&nvmet_config_sem);
158 request_module("nvmet-transport-%d", port->disc_addr.trtype);
159 down_write(&nvmet_config_sem);
160 ops = nvmet_transports[port->disc_addr.trtype];
161 if (!ops) {
162 pr_err("transport type %d not supported\n",
163 port->disc_addr.trtype);
164 return -EINVAL;
165 }
166 }
167
168 if (!try_module_get(ops->owner))
169 return -EINVAL;
170
171 ret = ops->add_port(port);
172 if (ret) {
173 module_put(ops->owner);
174 return ret;
175 }
176
177 port->enabled = true;
178 return 0;
179}
180
181void nvmet_disable_port(struct nvmet_port *port)
182{
183 struct nvmet_fabrics_ops *ops;
184
185 lockdep_assert_held(&nvmet_config_sem);
186
187 port->enabled = false;
188
189 ops = nvmet_transports[port->disc_addr.trtype];
190 ops->remove_port(port);
191 module_put(ops->owner);
192}
193
194static void nvmet_keep_alive_timer(struct work_struct *work)
195{
196 struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work),
197 struct nvmet_ctrl, ka_work);
198
199 pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
200 ctrl->cntlid, ctrl->kato);
201
202 ctrl->ops->delete_ctrl(ctrl);
203}
204
205static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
206{
207 pr_debug("ctrl %d start keep-alive timer for %d secs\n",
208 ctrl->cntlid, ctrl->kato);
209
210 INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
211 schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
212}
213
214static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
215{
216 pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid);
217
218 cancel_delayed_work_sync(&ctrl->ka_work);
219}
220
221static struct nvmet_ns *__nvmet_find_namespace(struct nvmet_ctrl *ctrl,
222 __le32 nsid)
223{
224 struct nvmet_ns *ns;
225
226 list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
227 if (ns->nsid == le32_to_cpu(nsid))
228 return ns;
229 }
230
231 return NULL;
232}
233
234struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid)
235{
236 struct nvmet_ns *ns;
237
238 rcu_read_lock();
239 ns = __nvmet_find_namespace(ctrl, nsid);
240 if (ns)
241 percpu_ref_get(&ns->ref);
242 rcu_read_unlock();
243
244 return ns;
245}
246
247static void nvmet_destroy_namespace(struct percpu_ref *ref)
248{
249 struct nvmet_ns *ns = container_of(ref, struct nvmet_ns, ref);
250
251 complete(&ns->disable_done);
252}
253
254void nvmet_put_namespace(struct nvmet_ns *ns)
255{
256 percpu_ref_put(&ns->ref);
257}
258
259int nvmet_ns_enable(struct nvmet_ns *ns)
260{
261 struct nvmet_subsys *subsys = ns->subsys;
262 struct nvmet_ctrl *ctrl;
263 int ret = 0;
264
265 mutex_lock(&subsys->lock);
266 if (!list_empty(&ns->dev_link))
267 goto out_unlock;
268
269 ns->bdev = blkdev_get_by_path(ns->device_path, FMODE_READ | FMODE_WRITE,
270 NULL);
271 if (IS_ERR(ns->bdev)) {
272 pr_err("nvmet: failed to open block device %s: (%ld)\n",
273 ns->device_path, PTR_ERR(ns->bdev));
274 ret = PTR_ERR(ns->bdev);
275 ns->bdev = NULL;
276 goto out_unlock;
277 }
278
279 ns->size = i_size_read(ns->bdev->bd_inode);
280 ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
281
282 ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace,
283 0, GFP_KERNEL);
284 if (ret)
285 goto out_blkdev_put;
286
287 if (ns->nsid > subsys->max_nsid)
288 subsys->max_nsid = ns->nsid;
289
290 /*
291 * The namespaces list needs to be sorted to simplify the implementation
292 * of the Identify Namepace List subcommand.
293 */
294 if (list_empty(&subsys->namespaces)) {
295 list_add_tail_rcu(&ns->dev_link, &subsys->namespaces);
296 } else {
297 struct nvmet_ns *old;
298
299 list_for_each_entry_rcu(old, &subsys->namespaces, dev_link) {
300 BUG_ON(ns->nsid == old->nsid);
301 if (ns->nsid < old->nsid)
302 break;
303 }
304
305 list_add_tail_rcu(&ns->dev_link, &old->dev_link);
306 }
307
308 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
309 nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, 0, 0);
310
311 ret = 0;
312out_unlock:
313 mutex_unlock(&subsys->lock);
314 return ret;
315out_blkdev_put:
316 blkdev_put(ns->bdev, FMODE_WRITE|FMODE_READ);
317 ns->bdev = NULL;
318 goto out_unlock;
319}
320
321void nvmet_ns_disable(struct nvmet_ns *ns)
322{
323 struct nvmet_subsys *subsys = ns->subsys;
324 struct nvmet_ctrl *ctrl;
325
326 mutex_lock(&subsys->lock);
327 if (list_empty(&ns->dev_link)) {
328 mutex_unlock(&subsys->lock);
329 return;
330 }
331 list_del_init(&ns->dev_link);
332 mutex_unlock(&subsys->lock);
333
334 /*
335 * Now that we removed the namespaces from the lookup list, we
336 * can kill the per_cpu ref and wait for any remaining references
337 * to be dropped, as well as a RCU grace period for anyone only
338 * using the namepace under rcu_read_lock(). Note that we can't
339 * use call_rcu here as we need to ensure the namespaces have
340 * been fully destroyed before unloading the module.
341 */
342 percpu_ref_kill(&ns->ref);
343 synchronize_rcu();
344 wait_for_completion(&ns->disable_done);
345 percpu_ref_exit(&ns->ref);
346
347 mutex_lock(&subsys->lock);
348 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
349 nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, 0, 0);
350
351 if (ns->bdev)
352 blkdev_put(ns->bdev, FMODE_WRITE|FMODE_READ);
353 mutex_unlock(&subsys->lock);
354}
355
356void nvmet_ns_free(struct nvmet_ns *ns)
357{
358 nvmet_ns_disable(ns);
359
360 kfree(ns->device_path);
361 kfree(ns);
362}
363
364struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
365{
366 struct nvmet_ns *ns;
367
368 ns = kzalloc(sizeof(*ns), GFP_KERNEL);
369 if (!ns)
370 return NULL;
371
372 INIT_LIST_HEAD(&ns->dev_link);
373 init_completion(&ns->disable_done);
374
375 ns->nsid = nsid;
376 ns->subsys = subsys;
377
378 return ns;
379}
380
381static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
382{
383 if (status)
384 nvmet_set_status(req, status);
385
386 /* XXX: need to fill in something useful for sq_head */
387 req->rsp->sq_head = 0;
388 if (likely(req->sq)) /* may happen during early failure */
389 req->rsp->sq_id = cpu_to_le16(req->sq->qid);
390 req->rsp->command_id = req->cmd->common.command_id;
391
392 if (req->ns)
393 nvmet_put_namespace(req->ns);
394 req->ops->queue_response(req);
395}
396
397void nvmet_req_complete(struct nvmet_req *req, u16 status)
398{
399 __nvmet_req_complete(req, status);
400 percpu_ref_put(&req->sq->ref);
401}
402EXPORT_SYMBOL_GPL(nvmet_req_complete);
403
404void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq,
405 u16 qid, u16 size)
406{
407 cq->qid = qid;
408 cq->size = size;
409
410 ctrl->cqs[qid] = cq;
411}
412
413void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
414 u16 qid, u16 size)
415{
416 sq->qid = qid;
417 sq->size = size;
418
419 ctrl->sqs[qid] = sq;
420}
421
422void nvmet_sq_destroy(struct nvmet_sq *sq)
423{
424 /*
425 * If this is the admin queue, complete all AERs so that our
426 * queue doesn't have outstanding requests on it.
427 */
428 if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq)
429 nvmet_async_events_free(sq->ctrl);
430 percpu_ref_kill(&sq->ref);
431 wait_for_completion(&sq->free_done);
432 percpu_ref_exit(&sq->ref);
433
434 if (sq->ctrl) {
435 nvmet_ctrl_put(sq->ctrl);
436 sq->ctrl = NULL; /* allows reusing the queue later */
437 }
438}
439EXPORT_SYMBOL_GPL(nvmet_sq_destroy);
440
441static void nvmet_sq_free(struct percpu_ref *ref)
442{
443 struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
444
445 complete(&sq->free_done);
446}
447
448int nvmet_sq_init(struct nvmet_sq *sq)
449{
450 int ret;
451
452 ret = percpu_ref_init(&sq->ref, nvmet_sq_free, 0, GFP_KERNEL);
453 if (ret) {
454 pr_err("percpu_ref init failed!\n");
455 return ret;
456 }
457 init_completion(&sq->free_done);
458
459 return 0;
460}
461EXPORT_SYMBOL_GPL(nvmet_sq_init);
462
463bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
464 struct nvmet_sq *sq, struct nvmet_fabrics_ops *ops)
465{
466 u8 flags = req->cmd->common.flags;
467 u16 status;
468
469 req->cq = cq;
470 req->sq = sq;
471 req->ops = ops;
472 req->sg = NULL;
473 req->sg_cnt = 0;
474 req->rsp->status = 0;
475
476 /* no support for fused commands yet */
477 if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
478 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
479 goto fail;
480 }
481
482 /* either variant of SGLs is fine, as we don't support metadata */
483 if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF &&
484 (flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METASEG)) {
485 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
486 goto fail;
487 }
488
489 if (unlikely(!req->sq->ctrl))
490 /* will return an error for any Non-connect command: */
491 status = nvmet_parse_connect_cmd(req);
492 else if (likely(req->sq->qid != 0))
493 status = nvmet_parse_io_cmd(req);
494 else if (req->cmd->common.opcode == nvme_fabrics_command)
495 status = nvmet_parse_fabrics_cmd(req);
496 else if (req->sq->ctrl->subsys->type == NVME_NQN_DISC)
497 status = nvmet_parse_discovery_cmd(req);
498 else
499 status = nvmet_parse_admin_cmd(req);
500
501 if (status)
502 goto fail;
503
504 if (unlikely(!percpu_ref_tryget_live(&sq->ref))) {
505 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
506 goto fail;
507 }
508
509 return true;
510
511fail:
512 __nvmet_req_complete(req, status);
513 return false;
514}
515EXPORT_SYMBOL_GPL(nvmet_req_init);
516
517static inline bool nvmet_cc_en(u32 cc)
518{
519 return cc & 0x1;
520}
521
522static inline u8 nvmet_cc_css(u32 cc)
523{
524 return (cc >> 4) & 0x7;
525}
526
527static inline u8 nvmet_cc_mps(u32 cc)
528{
529 return (cc >> 7) & 0xf;
530}
531
532static inline u8 nvmet_cc_ams(u32 cc)
533{
534 return (cc >> 11) & 0x7;
535}
536
537static inline u8 nvmet_cc_shn(u32 cc)
538{
539 return (cc >> 14) & 0x3;
540}
541
542static inline u8 nvmet_cc_iosqes(u32 cc)
543{
544 return (cc >> 16) & 0xf;
545}
546
547static inline u8 nvmet_cc_iocqes(u32 cc)
548{
549 return (cc >> 20) & 0xf;
550}
551
552static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
553{
554 lockdep_assert_held(&ctrl->lock);
555
556 if (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES ||
557 nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES ||
558 nvmet_cc_mps(ctrl->cc) != 0 ||
559 nvmet_cc_ams(ctrl->cc) != 0 ||
560 nvmet_cc_css(ctrl->cc) != 0) {
561 ctrl->csts = NVME_CSTS_CFS;
562 return;
563 }
564
565 ctrl->csts = NVME_CSTS_RDY;
566}
567
568static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
569{
570 lockdep_assert_held(&ctrl->lock);
571
572 /* XXX: tear down queues? */
573 ctrl->csts &= ~NVME_CSTS_RDY;
574 ctrl->cc = 0;
575}
576
577void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new)
578{
579 u32 old;
580
581 mutex_lock(&ctrl->lock);
582 old = ctrl->cc;
583 ctrl->cc = new;
584
585 if (nvmet_cc_en(new) && !nvmet_cc_en(old))
586 nvmet_start_ctrl(ctrl);
587 if (!nvmet_cc_en(new) && nvmet_cc_en(old))
588 nvmet_clear_ctrl(ctrl);
589 if (nvmet_cc_shn(new) && !nvmet_cc_shn(old)) {
590 nvmet_clear_ctrl(ctrl);
591 ctrl->csts |= NVME_CSTS_SHST_CMPLT;
592 }
593 if (!nvmet_cc_shn(new) && nvmet_cc_shn(old))
594 ctrl->csts &= ~NVME_CSTS_SHST_CMPLT;
595 mutex_unlock(&ctrl->lock);
596}
597
598static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
599{
600 /* command sets supported: NVMe command set: */
601 ctrl->cap = (1ULL << 37);
602 /* CC.EN timeout in 500msec units: */
603 ctrl->cap |= (15ULL << 24);
604 /* maximum queue entries supported: */
605 ctrl->cap |= NVMET_QUEUE_SIZE - 1;
606}
607
608u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
609 struct nvmet_req *req, struct nvmet_ctrl **ret)
610{
611 struct nvmet_subsys *subsys;
612 struct nvmet_ctrl *ctrl;
613 u16 status = 0;
614
615 subsys = nvmet_find_get_subsys(req->port, subsysnqn);
616 if (!subsys) {
617 pr_warn("connect request for invalid subsystem %s!\n",
618 subsysnqn);
619 req->rsp->result = IPO_IATTR_CONNECT_DATA(subsysnqn);
620 return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
621 }
622
623 mutex_lock(&subsys->lock);
624 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
625 if (ctrl->cntlid == cntlid) {
626 if (strncmp(hostnqn, ctrl->hostnqn, NVMF_NQN_SIZE)) {
627 pr_warn("hostnqn mismatch.\n");
628 continue;
629 }
630 if (!kref_get_unless_zero(&ctrl->ref))
631 continue;
632
633 *ret = ctrl;
634 goto out;
635 }
636 }
637
638 pr_warn("could not find controller %d for subsys %s / host %s\n",
639 cntlid, subsysnqn, hostnqn);
640 req->rsp->result = IPO_IATTR_CONNECT_DATA(cntlid);
641 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
642
643out:
644 mutex_unlock(&subsys->lock);
645 nvmet_subsys_put(subsys);
646 return status;
647}
648
649static bool __nvmet_host_allowed(struct nvmet_subsys *subsys,
650 const char *hostnqn)
651{
652 struct nvmet_host_link *p;
653
654 if (subsys->allow_any_host)
655 return true;
656
657 list_for_each_entry(p, &subsys->hosts, entry) {
658 if (!strcmp(nvmet_host_name(p->host), hostnqn))
659 return true;
660 }
661
662 return false;
663}
664
665static bool nvmet_host_discovery_allowed(struct nvmet_req *req,
666 const char *hostnqn)
667{
668 struct nvmet_subsys_link *s;
669
670 list_for_each_entry(s, &req->port->subsystems, entry) {
671 if (__nvmet_host_allowed(s->subsys, hostnqn))
672 return true;
673 }
674
675 return false;
676}
677
678bool nvmet_host_allowed(struct nvmet_req *req, struct nvmet_subsys *subsys,
679 const char *hostnqn)
680{
681 lockdep_assert_held(&nvmet_config_sem);
682
683 if (subsys->type == NVME_NQN_DISC)
684 return nvmet_host_discovery_allowed(req, hostnqn);
685 else
686 return __nvmet_host_allowed(subsys, hostnqn);
687}
688
689u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
690 struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
691{
692 struct nvmet_subsys *subsys;
693 struct nvmet_ctrl *ctrl;
694 int ret;
695 u16 status;
696
697 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
698 subsys = nvmet_find_get_subsys(req->port, subsysnqn);
699 if (!subsys) {
700 pr_warn("connect request for invalid subsystem %s!\n",
701 subsysnqn);
702 req->rsp->result = IPO_IATTR_CONNECT_DATA(subsysnqn);
703 goto out;
704 }
705
706 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
707 down_read(&nvmet_config_sem);
708 if (!nvmet_host_allowed(req, subsys, hostnqn)) {
709 pr_info("connect by host %s for subsystem %s not allowed\n",
710 hostnqn, subsysnqn);
711 req->rsp->result = IPO_IATTR_CONNECT_DATA(hostnqn);
712 up_read(&nvmet_config_sem);
713 goto out_put_subsystem;
714 }
715 up_read(&nvmet_config_sem);
716
717 status = NVME_SC_INTERNAL;
718 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
719 if (!ctrl)
720 goto out_put_subsystem;
721 mutex_init(&ctrl->lock);
722
723 nvmet_init_cap(ctrl);
724
725 INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
726 INIT_LIST_HEAD(&ctrl->async_events);
727
728 memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
729 memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
730
731 kref_init(&ctrl->ref);
732 ctrl->subsys = subsys;
733
734 ctrl->cqs = kcalloc(subsys->max_qid + 1,
735 sizeof(struct nvmet_cq *),
736 GFP_KERNEL);
737 if (!ctrl->cqs)
738 goto out_free_ctrl;
739
740 ctrl->sqs = kcalloc(subsys->max_qid + 1,
741 sizeof(struct nvmet_sq *),
742 GFP_KERNEL);
743 if (!ctrl->sqs)
744 goto out_free_cqs;
745
746 ret = ida_simple_get(&subsys->cntlid_ida,
747 NVME_CNTLID_MIN, NVME_CNTLID_MAX,
748 GFP_KERNEL);
749 if (ret < 0) {
750 status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
751 goto out_free_sqs;
752 }
753 ctrl->cntlid = ret;
754
755 ctrl->ops = req->ops;
756 if (ctrl->subsys->type == NVME_NQN_DISC) {
757 /* Don't accept keep-alive timeout for discovery controllers */
758 if (kato) {
759 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
760 goto out_free_sqs;
761 }
762
763 /*
764 * Discovery controllers use some arbitrary high value in order
765 * to cleanup stale discovery sessions
766 *
767 * From the latest base diff RC:
768 * "The Keep Alive command is not supported by
769 * Discovery controllers. A transport may specify a
770 * fixed Discovery controller activity timeout value
771 * (e.g., 2 minutes). If no commands are received
772 * by a Discovery controller within that time
773 * period, the controller may perform the
774 * actions for Keep Alive Timer expiration".
775 */
776 ctrl->kato = NVMET_DISC_KATO;
777 } else {
778 /* keep-alive timeout in seconds */
779 ctrl->kato = DIV_ROUND_UP(kato, 1000);
780 }
781 nvmet_start_keep_alive_timer(ctrl);
782
783 mutex_lock(&subsys->lock);
784 list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
785 mutex_unlock(&subsys->lock);
786
787 *ctrlp = ctrl;
788 return 0;
789
790out_free_sqs:
791 kfree(ctrl->sqs);
792out_free_cqs:
793 kfree(ctrl->cqs);
794out_free_ctrl:
795 kfree(ctrl);
796out_put_subsystem:
797 nvmet_subsys_put(subsys);
798out:
799 return status;
800}
801
802static void nvmet_ctrl_free(struct kref *ref)
803{
804 struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref);
805 struct nvmet_subsys *subsys = ctrl->subsys;
806
807 nvmet_stop_keep_alive_timer(ctrl);
808
809 mutex_lock(&subsys->lock);
810 list_del(&ctrl->subsys_entry);
811 mutex_unlock(&subsys->lock);
812
813 ida_simple_remove(&subsys->cntlid_ida, ctrl->cntlid);
814 nvmet_subsys_put(subsys);
815
816 kfree(ctrl->sqs);
817 kfree(ctrl->cqs);
818 kfree(ctrl);
819}
820
821void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
822{
823 kref_put(&ctrl->ref, nvmet_ctrl_free);
824}
825
826static void nvmet_fatal_error_handler(struct work_struct *work)
827{
828 struct nvmet_ctrl *ctrl =
829 container_of(work, struct nvmet_ctrl, fatal_err_work);
830
831 pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
832 ctrl->ops->delete_ctrl(ctrl);
833}
834
835void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
836{
837 ctrl->csts |= NVME_CSTS_CFS;
838 INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
839 schedule_work(&ctrl->fatal_err_work);
840}
841EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error);
842
843static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
844 const char *subsysnqn)
845{
846 struct nvmet_subsys_link *p;
847
848 if (!port)
849 return NULL;
850
851 if (!strncmp(NVME_DISC_SUBSYS_NAME, subsysnqn,
852 NVMF_NQN_SIZE)) {
853 if (!kref_get_unless_zero(&nvmet_disc_subsys->ref))
854 return NULL;
855 return nvmet_disc_subsys;
856 }
857
858 down_read(&nvmet_config_sem);
859 list_for_each_entry(p, &port->subsystems, entry) {
860 if (!strncmp(p->subsys->subsysnqn, subsysnqn,
861 NVMF_NQN_SIZE)) {
862 if (!kref_get_unless_zero(&p->subsys->ref))
863 break;
864 up_read(&nvmet_config_sem);
865 return p->subsys;
866 }
867 }
868 up_read(&nvmet_config_sem);
869 return NULL;
870}
871
872struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
873 enum nvme_subsys_type type)
874{
875 struct nvmet_subsys *subsys;
876
877 subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
878 if (!subsys)
879 return NULL;
880
881 subsys->ver = (1 << 16) | (2 << 8) | 1; /* NVMe 1.2.1 */
882
883 switch (type) {
884 case NVME_NQN_NVME:
885 subsys->max_qid = NVMET_NR_QUEUES;
886 break;
887 case NVME_NQN_DISC:
888 subsys->max_qid = 0;
889 break;
890 default:
891 pr_err("%s: Unknown Subsystem type - %d\n", __func__, type);
892 kfree(subsys);
893 return NULL;
894 }
895 subsys->type = type;
896 subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE,
897 GFP_KERNEL);
898 if (IS_ERR(subsys->subsysnqn)) {
899 kfree(subsys);
900 return NULL;
901 }
902
903 kref_init(&subsys->ref);
904
905 mutex_init(&subsys->lock);
906 INIT_LIST_HEAD(&subsys->namespaces);
907 INIT_LIST_HEAD(&subsys->ctrls);
908
909 ida_init(&subsys->cntlid_ida);
910
911 INIT_LIST_HEAD(&subsys->hosts);
912
913 return subsys;
914}
915
916static void nvmet_subsys_free(struct kref *ref)
917{
918 struct nvmet_subsys *subsys =
919 container_of(ref, struct nvmet_subsys, ref);
920
921 WARN_ON_ONCE(!list_empty(&subsys->namespaces));
922
923 ida_destroy(&subsys->cntlid_ida);
924 kfree(subsys->subsysnqn);
925 kfree(subsys);
926}
927
928void nvmet_subsys_put(struct nvmet_subsys *subsys)
929{
930 kref_put(&subsys->ref, nvmet_subsys_free);
931}
932
933static int __init nvmet_init(void)
934{
935 int error;
936
937 error = nvmet_init_discovery();
938 if (error)
939 goto out;
940
941 error = nvmet_init_configfs();
942 if (error)
943 goto out_exit_discovery;
944 return 0;
945
946out_exit_discovery:
947 nvmet_exit_discovery();
948out:
949 return error;
950}
951
952static void __exit nvmet_exit(void)
953{
954 nvmet_exit_configfs();
955 nvmet_exit_discovery();
956
957 BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
958 BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
959}
960
961module_init(nvmet_init);
962module_exit(nvmet_exit);
963
964MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
new file mode 100644
index 000000000000..6f65646e89cf
--- /dev/null
+++ b/drivers/nvme/target/discovery.c
@@ -0,0 +1,221 @@
1/*
2 * Discovery service for the NVMe over Fabrics target.
3 * Copyright (C) 2016 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version
7 * 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15#include <linux/slab.h>
16#include <generated/utsrelease.h>
17#include "nvmet.h"
18
19struct nvmet_subsys *nvmet_disc_subsys;
20
21u64 nvmet_genctr;
22
23void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port)
24{
25 down_write(&nvmet_config_sem);
26 if (list_empty(&port->entry)) {
27 list_add_tail(&port->entry, &parent->referrals);
28 port->enabled = true;
29 nvmet_genctr++;
30 }
31 up_write(&nvmet_config_sem);
32}
33
34void nvmet_referral_disable(struct nvmet_port *port)
35{
36 down_write(&nvmet_config_sem);
37 if (!list_empty(&port->entry)) {
38 port->enabled = false;
39 list_del_init(&port->entry);
40 nvmet_genctr++;
41 }
42 up_write(&nvmet_config_sem);
43}
44
45static void nvmet_format_discovery_entry(struct nvmf_disc_rsp_page_hdr *hdr,
46 struct nvmet_port *port, char *subsys_nqn, u8 type, u32 numrec)
47{
48 struct nvmf_disc_rsp_page_entry *e = &hdr->entries[numrec];
49
50 e->trtype = port->disc_addr.trtype;
51 e->adrfam = port->disc_addr.adrfam;
52 e->treq = port->disc_addr.treq;
53 e->portid = port->disc_addr.portid;
54 /* we support only dynamic controllers */
55 e->cntlid = cpu_to_le16(NVME_CNTLID_DYNAMIC);
56 e->asqsz = cpu_to_le16(NVMF_AQ_DEPTH);
57 e->nqntype = type;
58 memcpy(e->trsvcid, port->disc_addr.trsvcid, NVMF_TRSVCID_SIZE);
59 memcpy(e->traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE);
60 memcpy(e->tsas.common, port->disc_addr.tsas.common, NVMF_TSAS_SIZE);
61 memcpy(e->subnqn, subsys_nqn, NVMF_NQN_SIZE);
62}
63
64static void nvmet_execute_get_disc_log_page(struct nvmet_req *req)
65{
66 const int entry_size = sizeof(struct nvmf_disc_rsp_page_entry);
67 struct nvmet_ctrl *ctrl = req->sq->ctrl;
68 struct nvmf_disc_rsp_page_hdr *hdr;
69 size_t data_len = nvmet_get_log_page_len(req->cmd);
70 size_t alloc_len = max(data_len, sizeof(*hdr));
71 int residual_len = data_len - sizeof(*hdr);
72 struct nvmet_subsys_link *p;
73 struct nvmet_port *r;
74 u32 numrec = 0;
75 u16 status = 0;
76
77 /*
78 * Make sure we're passing at least a buffer of response header size.
79 * If host provided data len is less than the header size, only the
80 * number of bytes requested by host will be sent to host.
81 */
82 hdr = kzalloc(alloc_len, GFP_KERNEL);
83 if (!hdr) {
84 status = NVME_SC_INTERNAL;
85 goto out;
86 }
87
88 down_read(&nvmet_config_sem);
89 list_for_each_entry(p, &req->port->subsystems, entry) {
90 if (!nvmet_host_allowed(req, p->subsys, ctrl->hostnqn))
91 continue;
92 if (residual_len >= entry_size) {
93 nvmet_format_discovery_entry(hdr, req->port,
94 p->subsys->subsysnqn,
95 NVME_NQN_NVME, numrec);
96 residual_len -= entry_size;
97 }
98 numrec++;
99 }
100
101 list_for_each_entry(r, &req->port->referrals, entry) {
102 if (residual_len >= entry_size) {
103 nvmet_format_discovery_entry(hdr, r,
104 NVME_DISC_SUBSYS_NAME,
105 NVME_NQN_DISC, numrec);
106 residual_len -= entry_size;
107 }
108 numrec++;
109 }
110
111 hdr->genctr = cpu_to_le64(nvmet_genctr);
112 hdr->numrec = cpu_to_le64(numrec);
113 hdr->recfmt = cpu_to_le16(0);
114
115 up_read(&nvmet_config_sem);
116
117 status = nvmet_copy_to_sgl(req, 0, hdr, data_len);
118 kfree(hdr);
119out:
120 nvmet_req_complete(req, status);
121}
122
123static void nvmet_execute_identify_disc_ctrl(struct nvmet_req *req)
124{
125 struct nvmet_ctrl *ctrl = req->sq->ctrl;
126 struct nvme_id_ctrl *id;
127 u16 status = 0;
128
129 id = kzalloc(sizeof(*id), GFP_KERNEL);
130 if (!id) {
131 status = NVME_SC_INTERNAL;
132 goto out;
133 }
134
135 memset(id->fr, ' ', sizeof(id->fr));
136 strncpy((char *)id->fr, UTS_RELEASE, sizeof(id->fr));
137
138 /* no limit on data transfer sizes for now */
139 id->mdts = 0;
140 id->cntlid = cpu_to_le16(ctrl->cntlid);
141 id->ver = cpu_to_le32(ctrl->subsys->ver);
142 id->lpa = (1 << 2);
143
144 /* no enforcement soft-limit for maxcmd - pick arbitrary high value */
145 id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
146
147 id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
148 if (ctrl->ops->has_keyed_sgls)
149 id->sgls |= cpu_to_le32(1 << 2);
150 if (ctrl->ops->sqe_inline_size)
151 id->sgls |= cpu_to_le32(1 << 20);
152
153 strcpy(id->subnqn, ctrl->subsys->subsysnqn);
154
155 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
156
157 kfree(id);
158out:
159 nvmet_req_complete(req, status);
160}
161
162int nvmet_parse_discovery_cmd(struct nvmet_req *req)
163{
164 struct nvme_command *cmd = req->cmd;
165
166 req->ns = NULL;
167
168 if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
169 pr_err("nvmet: got cmd %d while not ready\n",
170 cmd->common.opcode);
171 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
172 }
173
174 switch (cmd->common.opcode) {
175 case nvme_admin_get_log_page:
176 req->data_len = nvmet_get_log_page_len(cmd);
177
178 switch (cmd->get_log_page.lid) {
179 case NVME_LOG_DISC:
180 req->execute = nvmet_execute_get_disc_log_page;
181 return 0;
182 default:
183 pr_err("nvmet: unsupported get_log_page lid %d\n",
184 cmd->get_log_page.lid);
185 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
186 }
187 case nvme_admin_identify:
188 req->data_len = 4096;
189 switch (le32_to_cpu(cmd->identify.cns)) {
190 case 0x01:
191 req->execute =
192 nvmet_execute_identify_disc_ctrl;
193 return 0;
194 default:
195 pr_err("nvmet: unsupported identify cns %d\n",
196 le32_to_cpu(cmd->identify.cns));
197 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
198 }
199 default:
200 pr_err("nvmet: unsupported cmd %d\n",
201 cmd->common.opcode);
202 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
203 }
204
205 pr_err("nvmet: unhandled cmd %d\n", cmd->common.opcode);
206 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
207}
208
209int __init nvmet_init_discovery(void)
210{
211 nvmet_disc_subsys =
212 nvmet_subsys_alloc(NVME_DISC_SUBSYS_NAME, NVME_NQN_DISC);
213 if (!nvmet_disc_subsys)
214 return -ENOMEM;
215 return 0;
216}
217
218void nvmet_exit_discovery(void)
219{
220 nvmet_subsys_put(nvmet_disc_subsys);
221}
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
new file mode 100644
index 000000000000..9a97ae67e656
--- /dev/null
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -0,0 +1,240 @@
1/*
2 * NVMe Fabrics command implementation.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15#include <linux/blkdev.h>
16#include "nvmet.h"
17
18static void nvmet_execute_prop_set(struct nvmet_req *req)
19{
20 u16 status = 0;
21
22 if (!(req->cmd->prop_set.attrib & 1)) {
23 u64 val = le64_to_cpu(req->cmd->prop_set.value);
24
25 switch (le32_to_cpu(req->cmd->prop_set.offset)) {
26 case NVME_REG_CC:
27 nvmet_update_cc(req->sq->ctrl, val);
28 break;
29 default:
30 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
31 break;
32 }
33 } else {
34 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
35 }
36
37 nvmet_req_complete(req, status);
38}
39
40static void nvmet_execute_prop_get(struct nvmet_req *req)
41{
42 struct nvmet_ctrl *ctrl = req->sq->ctrl;
43 u16 status = 0;
44 u64 val = 0;
45
46 if (req->cmd->prop_get.attrib & 1) {
47 switch (le32_to_cpu(req->cmd->prop_get.offset)) {
48 case NVME_REG_CAP:
49 val = ctrl->cap;
50 break;
51 default:
52 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
53 break;
54 }
55 } else {
56 switch (le32_to_cpu(req->cmd->prop_get.offset)) {
57 case NVME_REG_VS:
58 val = ctrl->subsys->ver;
59 break;
60 case NVME_REG_CC:
61 val = ctrl->cc;
62 break;
63 case NVME_REG_CSTS:
64 val = ctrl->csts;
65 break;
66 default:
67 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
68 break;
69 }
70 }
71
72 req->rsp->result64 = cpu_to_le64(val);
73 nvmet_req_complete(req, status);
74}
75
76int nvmet_parse_fabrics_cmd(struct nvmet_req *req)
77{
78 struct nvme_command *cmd = req->cmd;
79
80 req->ns = NULL;
81
82 switch (cmd->fabrics.fctype) {
83 case nvme_fabrics_type_property_set:
84 req->data_len = 0;
85 req->execute = nvmet_execute_prop_set;
86 break;
87 case nvme_fabrics_type_property_get:
88 req->data_len = 0;
89 req->execute = nvmet_execute_prop_get;
90 break;
91 default:
92 pr_err("received unknown capsule type 0x%x\n",
93 cmd->fabrics.fctype);
94 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
95 }
96
97 return 0;
98}
99
100static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
101{
102 struct nvmf_connect_command *c = &req->cmd->connect;
103 u16 qid = le16_to_cpu(c->qid);
104 u16 sqsize = le16_to_cpu(c->sqsize);
105 struct nvmet_ctrl *old;
106
107 old = cmpxchg(&req->sq->ctrl, NULL, ctrl);
108 if (old) {
109 pr_warn("queue already connected!\n");
110 return NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
111 }
112
113 nvmet_cq_setup(ctrl, req->cq, qid, sqsize);
114 nvmet_sq_setup(ctrl, req->sq, qid, sqsize);
115 return 0;
116}
117
118static void nvmet_execute_admin_connect(struct nvmet_req *req)
119{
120 struct nvmf_connect_command *c = &req->cmd->connect;
121 struct nvmf_connect_data *d;
122 struct nvmet_ctrl *ctrl = NULL;
123 u16 status = 0;
124
125 d = kmap(sg_page(req->sg)) + req->sg->offset;
126
127 /* zero out initial completion result, assign values as needed */
128 req->rsp->result = 0;
129
130 if (c->recfmt != 0) {
131 pr_warn("invalid connect version (%d).\n",
132 le16_to_cpu(c->recfmt));
133 status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR;
134 goto out;
135 }
136
137 if (unlikely(d->cntlid != cpu_to_le16(0xffff))) {
138 pr_warn("connect attempt for invalid controller ID %#x\n",
139 d->cntlid);
140 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
141 req->rsp->result = IPO_IATTR_CONNECT_DATA(cntlid);
142 goto out;
143 }
144
145 status = nvmet_alloc_ctrl(d->subsysnqn, d->hostnqn, req,
146 le32_to_cpu(c->kato), &ctrl);
147 if (status)
148 goto out;
149
150 status = nvmet_install_queue(ctrl, req);
151 if (status) {
152 nvmet_ctrl_put(ctrl);
153 goto out;
154 }
155
156 pr_info("creating controller %d for NQN %s.\n",
157 ctrl->cntlid, ctrl->hostnqn);
158 req->rsp->result16 = cpu_to_le16(ctrl->cntlid);
159
160out:
161 kunmap(sg_page(req->sg));
162 nvmet_req_complete(req, status);
163}
164
165static void nvmet_execute_io_connect(struct nvmet_req *req)
166{
167 struct nvmf_connect_command *c = &req->cmd->connect;
168 struct nvmf_connect_data *d;
169 struct nvmet_ctrl *ctrl = NULL;
170 u16 qid = le16_to_cpu(c->qid);
171 u16 status = 0;
172
173 d = kmap(sg_page(req->sg)) + req->sg->offset;
174
175 /* zero out initial completion result, assign values as needed */
176 req->rsp->result = 0;
177
178 if (c->recfmt != 0) {
179 pr_warn("invalid connect version (%d).\n",
180 le16_to_cpu(c->recfmt));
181 status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR;
182 goto out;
183 }
184
185 status = nvmet_ctrl_find_get(d->subsysnqn, d->hostnqn,
186 le16_to_cpu(d->cntlid),
187 req, &ctrl);
188 if (status)
189 goto out;
190
191 if (unlikely(qid > ctrl->subsys->max_qid)) {
192 pr_warn("invalid queue id (%d)\n", qid);
193 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
194 req->rsp->result = IPO_IATTR_CONNECT_SQE(qid);
195 goto out_ctrl_put;
196 }
197
198 status = nvmet_install_queue(ctrl, req);
199 if (status) {
200 /* pass back cntlid that had the issue of installing queue */
201 req->rsp->result16 = cpu_to_le16(ctrl->cntlid);
202 goto out_ctrl_put;
203 }
204
205 pr_info("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid);
206
207out:
208 kunmap(sg_page(req->sg));
209 nvmet_req_complete(req, status);
210 return;
211
212out_ctrl_put:
213 nvmet_ctrl_put(ctrl);
214 goto out;
215}
216
217int nvmet_parse_connect_cmd(struct nvmet_req *req)
218{
219 struct nvme_command *cmd = req->cmd;
220
221 req->ns = NULL;
222
223 if (req->cmd->common.opcode != nvme_fabrics_command) {
224 pr_err("invalid command 0x%x on unconnected queue.\n",
225 cmd->fabrics.opcode);
226 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
227 }
228 if (cmd->fabrics.fctype != nvme_fabrics_type_connect) {
229 pr_err("invalid capsule type 0x%x on unconnected queue.\n",
230 cmd->fabrics.fctype);
231 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
232 }
233
234 req->data_len = sizeof(struct nvmf_connect_data);
235 if (cmd->connect.qid == 0)
236 req->execute = nvmet_execute_admin_connect;
237 else
238 req->execute = nvmet_execute_io_connect;
239 return 0;
240}
diff --git a/drivers/nvme/target/io-cmd.c b/drivers/nvme/target/io-cmd.c
new file mode 100644
index 000000000000..2cd069b691ae
--- /dev/null
+++ b/drivers/nvme/target/io-cmd.c
@@ -0,0 +1,215 @@
1/*
2 * NVMe I/O command implementation.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15#include <linux/blkdev.h>
16#include <linux/module.h>
17#include "nvmet.h"
18
19static void nvmet_bio_done(struct bio *bio)
20{
21 struct nvmet_req *req = bio->bi_private;
22
23 nvmet_req_complete(req,
24 bio->bi_error ? NVME_SC_INTERNAL | NVME_SC_DNR : 0);
25
26 if (bio != &req->inline_bio)
27 bio_put(bio);
28}
29
30static inline u32 nvmet_rw_len(struct nvmet_req *req)
31{
32 return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) <<
33 req->ns->blksize_shift;
34}
35
36static void nvmet_inline_bio_init(struct nvmet_req *req)
37{
38 struct bio *bio = &req->inline_bio;
39
40 bio_init(bio);
41 bio->bi_max_vecs = NVMET_MAX_INLINE_BIOVEC;
42 bio->bi_io_vec = req->inline_bvec;
43}
44
45static void nvmet_execute_rw(struct nvmet_req *req)
46{
47 int sg_cnt = req->sg_cnt;
48 struct scatterlist *sg;
49 struct bio *bio;
50 sector_t sector;
51 blk_qc_t cookie;
52 int op, op_flags = 0, i;
53
54 if (!req->sg_cnt) {
55 nvmet_req_complete(req, 0);
56 return;
57 }
58
59 if (req->cmd->rw.opcode == nvme_cmd_write) {
60 op = REQ_OP_WRITE;
61 if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
62 op_flags |= REQ_FUA;
63 } else {
64 op = REQ_OP_READ;
65 }
66
67 sector = le64_to_cpu(req->cmd->rw.slba);
68 sector <<= (req->ns->blksize_shift - 9);
69
70 nvmet_inline_bio_init(req);
71 bio = &req->inline_bio;
72 bio->bi_bdev = req->ns->bdev;
73 bio->bi_iter.bi_sector = sector;
74 bio->bi_private = req;
75 bio->bi_end_io = nvmet_bio_done;
76 bio_set_op_attrs(bio, op, op_flags);
77
78 for_each_sg(req->sg, sg, req->sg_cnt, i) {
79 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
80 != sg->length) {
81 struct bio *prev = bio;
82
83 bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
84 bio->bi_bdev = req->ns->bdev;
85 bio->bi_iter.bi_sector = sector;
86 bio_set_op_attrs(bio, op, op_flags);
87
88 bio_chain(bio, prev);
89 cookie = submit_bio(prev);
90 }
91
92 sector += sg->length >> 9;
93 sg_cnt--;
94 }
95
96 cookie = submit_bio(bio);
97
98 blk_poll(bdev_get_queue(req->ns->bdev), cookie);
99}
100
101static void nvmet_execute_flush(struct nvmet_req *req)
102{
103 struct bio *bio;
104
105 nvmet_inline_bio_init(req);
106 bio = &req->inline_bio;
107
108 bio->bi_bdev = req->ns->bdev;
109 bio->bi_private = req;
110 bio->bi_end_io = nvmet_bio_done;
111 bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH);
112
113 submit_bio(bio);
114}
115
116static u16 nvmet_discard_range(struct nvmet_ns *ns,
117 struct nvme_dsm_range *range, struct bio **bio)
118{
119 if (__blkdev_issue_discard(ns->bdev,
120 le64_to_cpu(range->slba) << (ns->blksize_shift - 9),
121 le32_to_cpu(range->nlb) << (ns->blksize_shift - 9),
122 GFP_KERNEL, 0, bio))
123 return NVME_SC_INTERNAL | NVME_SC_DNR;
124 return 0;
125}
126
127static void nvmet_execute_discard(struct nvmet_req *req)
128{
129 struct nvme_dsm_range range;
130 struct bio *bio = NULL;
131 int i;
132 u16 status;
133
134 for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) {
135 status = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
136 sizeof(range));
137 if (status)
138 break;
139
140 status = nvmet_discard_range(req->ns, &range, &bio);
141 if (status)
142 break;
143 }
144
145 if (bio) {
146 bio->bi_private = req;
147 bio->bi_end_io = nvmet_bio_done;
148 if (status) {
149 bio->bi_error = -EIO;
150 bio_endio(bio);
151 } else {
152 submit_bio(bio);
153 }
154 } else {
155 nvmet_req_complete(req, status);
156 }
157}
158
159static void nvmet_execute_dsm(struct nvmet_req *req)
160{
161 switch (le32_to_cpu(req->cmd->dsm.attributes)) {
162 case NVME_DSMGMT_AD:
163 nvmet_execute_discard(req);
164 return;
165 case NVME_DSMGMT_IDR:
166 case NVME_DSMGMT_IDW:
167 default:
168 /* Not supported yet */
169 nvmet_req_complete(req, 0);
170 return;
171 }
172}
173
174int nvmet_parse_io_cmd(struct nvmet_req *req)
175{
176 struct nvme_command *cmd = req->cmd;
177
178 if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) {
179 pr_err("nvmet: got io cmd %d while CC.EN == 0\n",
180 cmd->common.opcode);
181 req->ns = NULL;
182 return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
183 }
184
185 if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
186 pr_err("nvmet: got io cmd %d while CSTS.RDY == 0\n",
187 cmd->common.opcode);
188 req->ns = NULL;
189 return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
190 }
191
192 req->ns = nvmet_find_namespace(req->sq->ctrl, cmd->rw.nsid);
193 if (!req->ns)
194 return NVME_SC_INVALID_NS | NVME_SC_DNR;
195
196 switch (cmd->common.opcode) {
197 case nvme_cmd_read:
198 case nvme_cmd_write:
199 req->execute = nvmet_execute_rw;
200 req->data_len = nvmet_rw_len(req);
201 return 0;
202 case nvme_cmd_flush:
203 req->execute = nvmet_execute_flush;
204 req->data_len = 0;
205 return 0;
206 case nvme_cmd_dsm:
207 req->execute = nvmet_execute_dsm;
208 req->data_len = le32_to_cpu(cmd->dsm.nr) *
209 sizeof(struct nvme_dsm_range);
210 return 0;
211 default:
212 pr_err("nvmet: unhandled cmd %d\n", cmd->common.opcode);
213 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
214 }
215}
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
new file mode 100644
index 000000000000..57dd6d834c28
--- /dev/null
+++ b/drivers/nvme/target/nvmet.h
@@ -0,0 +1,331 @@
1/*
2 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 */
13
14#ifndef _NVMET_H
15#define _NVMET_H
16
17#include <linux/dma-mapping.h>
18#include <linux/types.h>
19#include <linux/device.h>
20#include <linux/kref.h>
21#include <linux/percpu-refcount.h>
22#include <linux/list.h>
23#include <linux/mutex.h>
24#include <linux/nvme.h>
25#include <linux/configfs.h>
26#include <linux/rcupdate.h>
27#include <linux/blkdev.h>
28
29#define NVMET_ASYNC_EVENTS 4
30#define NVMET_ERROR_LOG_SLOTS 128
31
32/* Helper Macros when NVMe error is NVME_SC_CONNECT_INVALID_PARAM
33 * The 16 bit shift is to set IATTR bit to 1, which means offending
34 * offset starts in the data section of connect()
35 */
36#define IPO_IATTR_CONNECT_DATA(x) \
37 (cpu_to_le32((1 << 16) | (offsetof(struct nvmf_connect_data, x))))
38#define IPO_IATTR_CONNECT_SQE(x) \
39 (cpu_to_le32(offsetof(struct nvmf_connect_command, x)))
40
41struct nvmet_ns {
42 struct list_head dev_link;
43 struct percpu_ref ref;
44 struct block_device *bdev;
45 u32 nsid;
46 u32 blksize_shift;
47 loff_t size;
48 u8 nguid[16];
49
50 struct nvmet_subsys *subsys;
51 const char *device_path;
52
53 struct config_group device_group;
54 struct config_group group;
55
56 struct completion disable_done;
57};
58
59static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item)
60{
61 return container_of(to_config_group(item), struct nvmet_ns, group);
62}
63
64static inline bool nvmet_ns_enabled(struct nvmet_ns *ns)
65{
66 return !list_empty_careful(&ns->dev_link);
67}
68
69struct nvmet_cq {
70 u16 qid;
71 u16 size;
72};
73
74struct nvmet_sq {
75 struct nvmet_ctrl *ctrl;
76 struct percpu_ref ref;
77 u16 qid;
78 u16 size;
79 struct completion free_done;
80};
81
82/**
83 * struct nvmet_port - Common structure to keep port
84 * information for the target.
85 * @entry: List head for holding a list of these elements.
86 * @disc_addr: Address information is stored in a format defined
87 * for a discovery log page entry.
88 * @group: ConfigFS group for this element's folder.
89 * @priv: Private data for the transport.
90 */
91struct nvmet_port {
92 struct list_head entry;
93 struct nvmf_disc_rsp_page_entry disc_addr;
94 struct config_group group;
95 struct config_group subsys_group;
96 struct list_head subsystems;
97 struct config_group referrals_group;
98 struct list_head referrals;
99 void *priv;
100 bool enabled;
101};
102
103static inline struct nvmet_port *to_nvmet_port(struct config_item *item)
104{
105 return container_of(to_config_group(item), struct nvmet_port,
106 group);
107}
108
109struct nvmet_ctrl {
110 struct nvmet_subsys *subsys;
111 struct nvmet_cq **cqs;
112 struct nvmet_sq **sqs;
113
114 struct mutex lock;
115 u64 cap;
116 u32 cc;
117 u32 csts;
118
119 u16 cntlid;
120 u32 kato;
121
122 struct nvmet_req *async_event_cmds[NVMET_ASYNC_EVENTS];
123 unsigned int nr_async_event_cmds;
124 struct list_head async_events;
125 struct work_struct async_event_work;
126
127 struct list_head subsys_entry;
128 struct kref ref;
129 struct delayed_work ka_work;
130 struct work_struct fatal_err_work;
131
132 struct nvmet_fabrics_ops *ops;
133
134 char subsysnqn[NVMF_NQN_FIELD_LEN];
135 char hostnqn[NVMF_NQN_FIELD_LEN];
136};
137
138struct nvmet_subsys {
139 enum nvme_subsys_type type;
140
141 struct mutex lock;
142 struct kref ref;
143
144 struct list_head namespaces;
145 unsigned int max_nsid;
146
147 struct list_head ctrls;
148 struct ida cntlid_ida;
149
150 struct list_head hosts;
151 bool allow_any_host;
152
153 u16 max_qid;
154
155 u64 ver;
156 char *subsysnqn;
157
158 struct config_group group;
159
160 struct config_group namespaces_group;
161 struct config_group allowed_hosts_group;
162};
163
164static inline struct nvmet_subsys *to_subsys(struct config_item *item)
165{
166 return container_of(to_config_group(item), struct nvmet_subsys, group);
167}
168
169static inline struct nvmet_subsys *namespaces_to_subsys(
170 struct config_item *item)
171{
172 return container_of(to_config_group(item), struct nvmet_subsys,
173 namespaces_group);
174}
175
176struct nvmet_host {
177 struct config_group group;
178};
179
180static inline struct nvmet_host *to_host(struct config_item *item)
181{
182 return container_of(to_config_group(item), struct nvmet_host, group);
183}
184
185static inline char *nvmet_host_name(struct nvmet_host *host)
186{
187 return config_item_name(&host->group.cg_item);
188}
189
190struct nvmet_host_link {
191 struct list_head entry;
192 struct nvmet_host *host;
193};
194
195struct nvmet_subsys_link {
196 struct list_head entry;
197 struct nvmet_subsys *subsys;
198};
199
200struct nvmet_req;
201struct nvmet_fabrics_ops {
202 struct module *owner;
203 unsigned int type;
204 unsigned int sqe_inline_size;
205 unsigned int msdbd;
206 bool has_keyed_sgls : 1;
207 void (*queue_response)(struct nvmet_req *req);
208 int (*add_port)(struct nvmet_port *port);
209 void (*remove_port)(struct nvmet_port *port);
210 void (*delete_ctrl)(struct nvmet_ctrl *ctrl);
211};
212
213#define NVMET_MAX_INLINE_BIOVEC 8
214
215struct nvmet_req {
216 struct nvme_command *cmd;
217 struct nvme_completion *rsp;
218 struct nvmet_sq *sq;
219 struct nvmet_cq *cq;
220 struct nvmet_ns *ns;
221 struct scatterlist *sg;
222 struct bio inline_bio;
223 struct bio_vec inline_bvec[NVMET_MAX_INLINE_BIOVEC];
224 int sg_cnt;
225 size_t data_len;
226
227 struct nvmet_port *port;
228
229 void (*execute)(struct nvmet_req *req);
230 struct nvmet_fabrics_ops *ops;
231};
232
233static inline void nvmet_set_status(struct nvmet_req *req, u16 status)
234{
235 req->rsp->status = cpu_to_le16(status << 1);
236}
237
238static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
239{
240 req->rsp->result = cpu_to_le32(result);
241}
242
243/*
244 * NVMe command writes actually are DMA reads for us on the target side.
245 */
246static inline enum dma_data_direction
247nvmet_data_dir(struct nvmet_req *req)
248{
249 return nvme_is_write(req->cmd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
250}
251
252struct nvmet_async_event {
253 struct list_head entry;
254 u8 event_type;
255 u8 event_info;
256 u8 log_page;
257};
258
259int nvmet_parse_connect_cmd(struct nvmet_req *req);
260int nvmet_parse_io_cmd(struct nvmet_req *req);
261int nvmet_parse_admin_cmd(struct nvmet_req *req);
262int nvmet_parse_discovery_cmd(struct nvmet_req *req);
263int nvmet_parse_fabrics_cmd(struct nvmet_req *req);
264
265bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
266 struct nvmet_sq *sq, struct nvmet_fabrics_ops *ops);
267void nvmet_req_complete(struct nvmet_req *req, u16 status);
268
269void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
270 u16 size);
271void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid,
272 u16 size);
273void nvmet_sq_destroy(struct nvmet_sq *sq);
274int nvmet_sq_init(struct nvmet_sq *sq);
275
276void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl);
277
278void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new);
279u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
280 struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp);
281u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
282 struct nvmet_req *req, struct nvmet_ctrl **ret);
283void nvmet_ctrl_put(struct nvmet_ctrl *ctrl);
284
285struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
286 enum nvme_subsys_type type);
287void nvmet_subsys_put(struct nvmet_subsys *subsys);
288
289struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid);
290void nvmet_put_namespace(struct nvmet_ns *ns);
291int nvmet_ns_enable(struct nvmet_ns *ns);
292void nvmet_ns_disable(struct nvmet_ns *ns);
293struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid);
294void nvmet_ns_free(struct nvmet_ns *ns);
295
296int nvmet_register_transport(struct nvmet_fabrics_ops *ops);
297void nvmet_unregister_transport(struct nvmet_fabrics_ops *ops);
298
299int nvmet_enable_port(struct nvmet_port *port);
300void nvmet_disable_port(struct nvmet_port *port);
301
302void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port);
303void nvmet_referral_disable(struct nvmet_port *port);
304
305u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
306 size_t len);
307u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf,
308 size_t len);
309
310u32 nvmet_get_log_page_len(struct nvme_command *cmd);
311
312#define NVMET_QUEUE_SIZE 1024
313#define NVMET_NR_QUEUES 64
314#define NVMET_MAX_CMD NVMET_QUEUE_SIZE
315#define NVMET_KAS 10
316#define NVMET_DISC_KATO 120
317
318int __init nvmet_init_configfs(void);
319void __exit nvmet_exit_configfs(void);
320
321int __init nvmet_init_discovery(void);
322void nvmet_exit_discovery(void);
323
324extern struct nvmet_subsys *nvmet_disc_subsys;
325extern u64 nvmet_genctr;
326extern struct rw_semaphore nvmet_config_sem;
327
328bool nvmet_host_allowed(struct nvmet_req *req, struct nvmet_subsys *subsys,
329 const char *hostnqn);
330
331#endif /* _NVMET_H */