aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/nvme/target
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2016-09-01 12:33:46 -0400
committerThomas Gleixner <tglx@linutronix.de>2016-09-01 12:33:46 -0400
commit0cb7bf61b1e9f05027de58c80f9b46a714d24e35 (patch)
tree41fb55cf62d07b425122f9a8b96412c0d8eb99c5 /drivers/nvme/target
parentaa877175e7a9982233ed8f10cb4bfddd78d82741 (diff)
parent3eab887a55424fc2c27553b7bfe32330df83f7b8 (diff)
Merge branch 'linus' into smp/hotplug
Apply upstream changes to avoid conflicts with pending patches.
Diffstat (limited to 'drivers/nvme/target')
-rw-r--r--drivers/nvme/target/admin-cmd.c6
-rw-r--r--drivers/nvme/target/core.c4
-rw-r--r--drivers/nvme/target/loop.c4
-rw-r--r--drivers/nvme/target/nvmet.h1
-rw-r--r--drivers/nvme/target/rdma.c100
5 files changed, 81 insertions, 34 deletions
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 2fac17a5ad53..47c564b5a289 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -13,7 +13,6 @@
13 */ 13 */
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/random.h>
17#include <generated/utsrelease.h> 16#include <generated/utsrelease.h>
18#include "nvmet.h" 17#include "nvmet.h"
19 18
@@ -83,7 +82,6 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
83{ 82{
84 struct nvmet_ctrl *ctrl = req->sq->ctrl; 83 struct nvmet_ctrl *ctrl = req->sq->ctrl;
85 struct nvme_id_ctrl *id; 84 struct nvme_id_ctrl *id;
86 u64 serial;
87 u16 status = 0; 85 u16 status = 0;
88 86
89 id = kzalloc(sizeof(*id), GFP_KERNEL); 87 id = kzalloc(sizeof(*id), GFP_KERNEL);
@@ -96,10 +94,8 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
96 id->vid = 0; 94 id->vid = 0;
97 id->ssvid = 0; 95 id->ssvid = 0;
98 96
99 /* generate a random serial number as our controllers are ephemeral: */
100 get_random_bytes(&serial, sizeof(serial));
101 memset(id->sn, ' ', sizeof(id->sn)); 97 memset(id->sn, ' ', sizeof(id->sn));
102 snprintf(id->sn, sizeof(id->sn), "%llx", serial); 98 snprintf(id->sn, sizeof(id->sn), "%llx", ctrl->serial);
103 99
104 memset(id->mn, ' ', sizeof(id->mn)); 100 memset(id->mn, ' ', sizeof(id->mn));
105 strncpy((char *)id->mn, "Linux", sizeof(id->mn)); 101 strncpy((char *)id->mn, "Linux", sizeof(id->mn));
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 8a891ca53367..6559d5afa7bf 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -13,6 +13,7 @@
13 */ 13 */
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/random.h>
16#include "nvmet.h" 17#include "nvmet.h"
17 18
18static struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX]; 19static struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
@@ -728,6 +729,9 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
728 memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE); 729 memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
729 memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE); 730 memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
730 731
732 /* generate a random serial number as our controllers are ephemeral: */
733 get_random_bytes(&ctrl->serial, sizeof(ctrl->serial));
734
731 kref_init(&ctrl->ref); 735 kref_init(&ctrl->ref);
732 ctrl->subsys = subsys; 736 ctrl->subsys = subsys;
733 737
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 94e782987cc9..7affd40a6b33 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -414,9 +414,8 @@ static void nvme_loop_del_ctrl_work(struct work_struct *work)
414 struct nvme_loop_ctrl *ctrl = container_of(work, 414 struct nvme_loop_ctrl *ctrl = container_of(work,
415 struct nvme_loop_ctrl, delete_work); 415 struct nvme_loop_ctrl, delete_work);
416 416
417 nvme_remove_namespaces(&ctrl->ctrl);
418 nvme_loop_shutdown_ctrl(ctrl);
419 nvme_uninit_ctrl(&ctrl->ctrl); 417 nvme_uninit_ctrl(&ctrl->ctrl);
418 nvme_loop_shutdown_ctrl(ctrl);
420 nvme_put_ctrl(&ctrl->ctrl); 419 nvme_put_ctrl(&ctrl->ctrl);
421} 420}
422 421
@@ -501,7 +500,6 @@ out_free_queues:
501 nvme_loop_destroy_admin_queue(ctrl); 500 nvme_loop_destroy_admin_queue(ctrl);
502out_disable: 501out_disable:
503 dev_warn(ctrl->ctrl.device, "Removing after reset failure\n"); 502 dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
504 nvme_remove_namespaces(&ctrl->ctrl);
505 nvme_uninit_ctrl(&ctrl->ctrl); 503 nvme_uninit_ctrl(&ctrl->ctrl);
506 nvme_put_ctrl(&ctrl->ctrl); 504 nvme_put_ctrl(&ctrl->ctrl);
507} 505}
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 57dd6d834c28..76b6eedccaf9 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -113,6 +113,7 @@ struct nvmet_ctrl {
113 113
114 struct mutex lock; 114 struct mutex lock;
115 u64 cap; 115 u64 cap;
116 u64 serial;
116 u32 cc; 117 u32 cc;
117 u32 csts; 118 u32 csts;
118 119
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index e06d504bdf0c..b4d648536c3e 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -77,6 +77,7 @@ enum nvmet_rdma_queue_state {
77 NVMET_RDMA_Q_CONNECTING, 77 NVMET_RDMA_Q_CONNECTING,
78 NVMET_RDMA_Q_LIVE, 78 NVMET_RDMA_Q_LIVE,
79 NVMET_RDMA_Q_DISCONNECTING, 79 NVMET_RDMA_Q_DISCONNECTING,
80 NVMET_RDMA_IN_DEVICE_REMOVAL,
80}; 81};
81 82
82struct nvmet_rdma_queue { 83struct nvmet_rdma_queue {
@@ -615,15 +616,10 @@ static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
615 if (!len) 616 if (!len)
616 return 0; 617 return 0;
617 618
618 /* use the already allocated data buffer if possible */ 619 status = nvmet_rdma_alloc_sgl(&rsp->req.sg, &rsp->req.sg_cnt,
619 if (len <= NVMET_RDMA_INLINE_DATA_SIZE && rsp->queue->host_qid) { 620 len);
620 nvmet_rdma_use_inline_sg(rsp, len, 0); 621 if (status)
621 } else { 622 return status;
622 status = nvmet_rdma_alloc_sgl(&rsp->req.sg, &rsp->req.sg_cnt,
623 len);
624 if (status)
625 return status;
626 }
627 623
628 ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num, 624 ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
629 rsp->req.sg, rsp->req.sg_cnt, 0, addr, key, 625 rsp->req.sg, rsp->req.sg_cnt, 0, addr, key,
@@ -984,7 +980,10 @@ static void nvmet_rdma_release_queue_work(struct work_struct *w)
984 struct nvmet_rdma_device *dev = queue->dev; 980 struct nvmet_rdma_device *dev = queue->dev;
985 981
986 nvmet_rdma_free_queue(queue); 982 nvmet_rdma_free_queue(queue);
987 rdma_destroy_id(cm_id); 983
984 if (queue->state != NVMET_RDMA_IN_DEVICE_REMOVAL)
985 rdma_destroy_id(cm_id);
986
988 kref_put(&dev->ref, nvmet_rdma_free_dev); 987 kref_put(&dev->ref, nvmet_rdma_free_dev);
989} 988}
990 989
@@ -1233,8 +1232,9 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
1233 switch (queue->state) { 1232 switch (queue->state) {
1234 case NVMET_RDMA_Q_CONNECTING: 1233 case NVMET_RDMA_Q_CONNECTING:
1235 case NVMET_RDMA_Q_LIVE: 1234 case NVMET_RDMA_Q_LIVE:
1236 disconnect = true;
1237 queue->state = NVMET_RDMA_Q_DISCONNECTING; 1235 queue->state = NVMET_RDMA_Q_DISCONNECTING;
1236 case NVMET_RDMA_IN_DEVICE_REMOVAL:
1237 disconnect = true;
1238 break; 1238 break;
1239 case NVMET_RDMA_Q_DISCONNECTING: 1239 case NVMET_RDMA_Q_DISCONNECTING:
1240 break; 1240 break;
@@ -1272,6 +1272,62 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
1272 schedule_work(&queue->release_work); 1272 schedule_work(&queue->release_work);
1273} 1273}
1274 1274
1275/**
1276 * nvme_rdma_device_removal() - Handle RDMA device removal
1277 * @queue: nvmet rdma queue (cm id qp_context)
1278 * @addr: nvmet address (cm_id context)
1279 *
1280 * DEVICE_REMOVAL event notifies us that the RDMA device is about
1281 * to unplug so we should take care of destroying our RDMA resources.
1282 * This event will be generated for each allocated cm_id.
1283 *
1284 * Note that this event can be generated on a normal queue cm_id
1285 * and/or a device bound listener cm_id (where in this case
1286 * queue will be null).
1287 *
1288 * we claim ownership on destroying the cm_id. For queues we move
1289 * the queue state to NVMET_RDMA_IN_DEVICE_REMOVAL and for port
1290 * we nullify the priv to prevent double cm_id destruction and destroying
1291 * the cm_id implicitely by returning a non-zero rc to the callout.
1292 */
1293static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
1294 struct nvmet_rdma_queue *queue)
1295{
1296 unsigned long flags;
1297
1298 if (!queue) {
1299 struct nvmet_port *port = cm_id->context;
1300
1301 /*
1302 * This is a listener cm_id. Make sure that
1303 * future remove_port won't invoke a double
1304 * cm_id destroy. use atomic xchg to make sure
1305 * we don't compete with remove_port.
1306 */
1307 if (xchg(&port->priv, NULL) != cm_id)
1308 return 0;
1309 } else {
1310 /*
1311 * This is a queue cm_id. Make sure that
1312 * release queue will not destroy the cm_id
1313 * and schedule all ctrl queues removal (only
1314 * if the queue is not disconnecting already).
1315 */
1316 spin_lock_irqsave(&queue->state_lock, flags);
1317 if (queue->state != NVMET_RDMA_Q_DISCONNECTING)
1318 queue->state = NVMET_RDMA_IN_DEVICE_REMOVAL;
1319 spin_unlock_irqrestore(&queue->state_lock, flags);
1320 nvmet_rdma_queue_disconnect(queue);
1321 flush_scheduled_work();
1322 }
1323
1324 /*
1325 * We need to return 1 so that the core will destroy
1326 * it's own ID. What a great API design..
1327 */
1328 return 1;
1329}
1330
1275static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id, 1331static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
1276 struct rdma_cm_event *event) 1332 struct rdma_cm_event *event)
1277{ 1333{
@@ -1294,20 +1350,11 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
1294 break; 1350 break;
1295 case RDMA_CM_EVENT_ADDR_CHANGE: 1351 case RDMA_CM_EVENT_ADDR_CHANGE:
1296 case RDMA_CM_EVENT_DISCONNECTED: 1352 case RDMA_CM_EVENT_DISCONNECTED:
1297 case RDMA_CM_EVENT_DEVICE_REMOVAL:
1298 case RDMA_CM_EVENT_TIMEWAIT_EXIT: 1353 case RDMA_CM_EVENT_TIMEWAIT_EXIT:
1299 /* 1354 nvmet_rdma_queue_disconnect(queue);
1300 * We can get the device removal callback even for a 1355 break;
1301 * CM ID that we aren't actually using. In that case 1356 case RDMA_CM_EVENT_DEVICE_REMOVAL:
1302 * the context pointer is NULL, so we shouldn't try 1357 ret = nvmet_rdma_device_removal(cm_id, queue);
1303 * to disconnect a non-existing queue. But we also
1304 * need to return 1 so that the core will destroy
1305 * it's own ID. What a great API design..
1306 */
1307 if (queue)
1308 nvmet_rdma_queue_disconnect(queue);
1309 else
1310 ret = 1;
1311 break; 1358 break;
1312 case RDMA_CM_EVENT_REJECTED: 1359 case RDMA_CM_EVENT_REJECTED:
1313 case RDMA_CM_EVENT_UNREACHABLE: 1360 case RDMA_CM_EVENT_UNREACHABLE:
@@ -1396,9 +1443,10 @@ out_destroy_id:
1396 1443
1397static void nvmet_rdma_remove_port(struct nvmet_port *port) 1444static void nvmet_rdma_remove_port(struct nvmet_port *port)
1398{ 1445{
1399 struct rdma_cm_id *cm_id = port->priv; 1446 struct rdma_cm_id *cm_id = xchg(&port->priv, NULL);
1400 1447
1401 rdma_destroy_id(cm_id); 1448 if (cm_id)
1449 rdma_destroy_id(cm_id);
1402} 1450}
1403 1451
1404static struct nvmet_fabrics_ops nvmet_rdma_ops = { 1452static struct nvmet_fabrics_ops nvmet_rdma_ops = {