aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/Makefile1
-rw-r--r--drivers/infiniband/ulp/srpt/Kconfig12
-rw-r--r--drivers/infiniband/ulp/srpt/Makefile2
-rw-r--r--drivers/infiniband/ulp/srpt/ib_dm_mad.h139
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c4073
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h444
7 files changed, 4672 insertions, 0 deletions
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 0f9a84c1046a..eb0add311dc8 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -55,6 +55,7 @@ source "drivers/infiniband/hw/nes/Kconfig"
55source "drivers/infiniband/ulp/ipoib/Kconfig" 55source "drivers/infiniband/ulp/ipoib/Kconfig"
56 56
57source "drivers/infiniband/ulp/srp/Kconfig" 57source "drivers/infiniband/ulp/srp/Kconfig"
58source "drivers/infiniband/ulp/srpt/Kconfig"
58 59
59source "drivers/infiniband/ulp/iser/Kconfig" 60source "drivers/infiniband/ulp/iser/Kconfig"
60 61
diff --git a/drivers/infiniband/Makefile b/drivers/infiniband/Makefile
index 9cc7a47d3e67..a3b2d8eac86e 100644
--- a/drivers/infiniband/Makefile
+++ b/drivers/infiniband/Makefile
@@ -10,4 +10,5 @@ obj-$(CONFIG_MLX4_INFINIBAND) += hw/mlx4/
10obj-$(CONFIG_INFINIBAND_NES) += hw/nes/ 10obj-$(CONFIG_INFINIBAND_NES) += hw/nes/
11obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/ 11obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/
12obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/ 12obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/
13obj-$(CONFIG_INFINIBAND_SRPT) += ulp/srpt/
13obj-$(CONFIG_INFINIBAND_ISER) += ulp/iser/ 14obj-$(CONFIG_INFINIBAND_ISER) += ulp/iser/
diff --git a/drivers/infiniband/ulp/srpt/Kconfig b/drivers/infiniband/ulp/srpt/Kconfig
new file mode 100644
index 000000000000..31ee83d528d9
--- /dev/null
+++ b/drivers/infiniband/ulp/srpt/Kconfig
@@ -0,0 +1,12 @@
1config INFINIBAND_SRPT
2 tristate "InfiniBand SCSI RDMA Protocol target support"
3 depends on INFINIBAND && TARGET_CORE
4 ---help---
5
6 Support for the SCSI RDMA Protocol (SRP) Target driver. The
7 SRP protocol is a protocol that allows an initiator to access
8 a block storage device on another host (target) over a network
9 that supports the RDMA protocol. Currently the RDMA protocol is
10 supported by InfiniBand and by iWarp network hardware. More
11 information about the SRP protocol can be found on the website
12 of the INCITS T10 technical committee (http://www.t10.org/).
diff --git a/drivers/infiniband/ulp/srpt/Makefile b/drivers/infiniband/ulp/srpt/Makefile
new file mode 100644
index 000000000000..e3ee4bdfffa5
--- /dev/null
+++ b/drivers/infiniband/ulp/srpt/Makefile
@@ -0,0 +1,2 @@
1ccflags-y := -Idrivers/target
2obj-$(CONFIG_INFINIBAND_SRPT) += ib_srpt.o
diff --git a/drivers/infiniband/ulp/srpt/ib_dm_mad.h b/drivers/infiniband/ulp/srpt/ib_dm_mad.h
new file mode 100644
index 000000000000..fb1de1f6f297
--- /dev/null
+++ b/drivers/infiniband/ulp/srpt/ib_dm_mad.h
@@ -0,0 +1,139 @@
1/*
2 * Copyright (c) 2006 - 2009 Mellanox Technology Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#ifndef IB_DM_MAD_H
35#define IB_DM_MAD_H
36
37#include <linux/types.h>
38
39#include <rdma/ib_mad.h>
40
41enum {
42 /*
43 * See also section 13.4.7 Status Field, table 115 MAD Common Status
44 * Field Bit Values and also section 16.3.1.1 Status Field in the
45 * InfiniBand Architecture Specification.
46 */
47 DM_MAD_STATUS_UNSUP_METHOD = 0x0008,
48 DM_MAD_STATUS_UNSUP_METHOD_ATTR = 0x000c,
49 DM_MAD_STATUS_INVALID_FIELD = 0x001c,
50 DM_MAD_STATUS_NO_IOC = 0x0100,
51
52 /*
53 * See also the Device Management chapter, section 16.3.3 Attributes,
54 * table 279 Device Management Attributes in the InfiniBand
55 * Architecture Specification.
56 */
57 DM_ATTR_CLASS_PORT_INFO = 0x01,
58 DM_ATTR_IOU_INFO = 0x10,
59 DM_ATTR_IOC_PROFILE = 0x11,
60 DM_ATTR_SVC_ENTRIES = 0x12
61};
62
63struct ib_dm_hdr {
64 u8 reserved[28];
65};
66
67/*
68 * Structure of management datagram sent by the SRP target implementation.
69 * Contains a management datagram header, reliable multi-packet transaction
70 * protocol (RMPP) header and ib_dm_hdr. Notes:
71 * - The SRP target implementation does not use RMPP or ib_dm_hdr when sending
72 * management datagrams.
73 * - The header size must be exactly 64 bytes (IB_MGMT_DEVICE_HDR), since this
74 * is the header size that is passed to ib_create_send_mad() in ib_srpt.c.
75 * - The maximum supported size for a management datagram when not using RMPP
76 * is 256 bytes -- 64 bytes header and 192 (IB_MGMT_DEVICE_DATA) bytes data.
77 */
78struct ib_dm_mad {
79 struct ib_mad_hdr mad_hdr;
80 struct ib_rmpp_hdr rmpp_hdr;
81 struct ib_dm_hdr dm_hdr;
82 u8 data[IB_MGMT_DEVICE_DATA];
83};
84
85/*
86 * IOUnitInfo as defined in section 16.3.3.3 IOUnitInfo of the InfiniBand
87 * Architecture Specification.
88 */
89struct ib_dm_iou_info {
90 __be16 change_id;
91 u8 max_controllers;
92 u8 op_rom;
93 u8 controller_list[128];
94};
95
96/*
97 * IOControllerprofile as defined in section 16.3.3.4 IOControllerProfile of
98 * the InfiniBand Architecture Specification.
99 */
100struct ib_dm_ioc_profile {
101 __be64 guid;
102 __be32 vendor_id;
103 __be32 device_id;
104 __be16 device_version;
105 __be16 reserved1;
106 __be32 subsys_vendor_id;
107 __be32 subsys_device_id;
108 __be16 io_class;
109 __be16 io_subclass;
110 __be16 protocol;
111 __be16 protocol_version;
112 __be16 service_conn;
113 __be16 initiators_supported;
114 __be16 send_queue_depth;
115 u8 reserved2;
116 u8 rdma_read_depth;
117 __be32 send_size;
118 __be32 rdma_size;
119 u8 op_cap_mask;
120 u8 svc_cap_mask;
121 u8 num_svc_entries;
122 u8 reserved3[9];
123 u8 id_string[64];
124};
125
126struct ib_dm_svc_entry {
127 u8 name[40];
128 __be64 id;
129};
130
131/*
132 * See also section 16.3.3.5 ServiceEntries in the InfiniBand Architecture
133 * Specification. See also section B.7, table B.8 in the T10 SRP r16a document.
134 */
135struct ib_dm_svc_entries {
136 struct ib_dm_svc_entry service_entries[4];
137};
138
139#endif
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
new file mode 100644
index 000000000000..cd5d05e22a77
--- /dev/null
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -0,0 +1,4073 @@
1/*
2 * Copyright (c) 2006 - 2009 Mellanox Technology Inc. All rights reserved.
3 * Copyright (C) 2008 - 2011 Bart Van Assche <bvanassche@acm.org>.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 */
34
35#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/err.h>
39#include <linux/ctype.h>
40#include <linux/kthread.h>
41#include <linux/string.h>
42#include <linux/delay.h>
43#include <linux/atomic.h>
44#include <scsi/scsi_tcq.h>
45#include <target/configfs_macros.h>
46#include <target/target_core_base.h>
47#include <target/target_core_fabric_configfs.h>
48#include <target/target_core_fabric.h>
49#include <target/target_core_configfs.h>
50#include "ib_srpt.h"
51
52/* Name of this kernel module. */
53#define DRV_NAME "ib_srpt"
54#define DRV_VERSION "2.0.0"
55#define DRV_RELDATE "2011-02-14"
56
57#define SRPT_ID_STRING "Linux SRP target"
58
59#undef pr_fmt
60#define pr_fmt(fmt) DRV_NAME " " fmt
61
62MODULE_AUTHOR("Vu Pham and Bart Van Assche");
63MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol target "
64 "v" DRV_VERSION " (" DRV_RELDATE ")");
65MODULE_LICENSE("Dual BSD/GPL");
66
67/*
68 * Global Variables
69 */
70
71static u64 srpt_service_guid;
72static spinlock_t srpt_dev_lock; /* Protects srpt_dev_list. */
73static struct list_head srpt_dev_list; /* List of srpt_device structures. */
74
75static unsigned srp_max_req_size = DEFAULT_MAX_REQ_SIZE;
76module_param(srp_max_req_size, int, 0444);
77MODULE_PARM_DESC(srp_max_req_size,
78 "Maximum size of SRP request messages in bytes.");
79
80static int srpt_srq_size = DEFAULT_SRPT_SRQ_SIZE;
81module_param(srpt_srq_size, int, 0444);
82MODULE_PARM_DESC(srpt_srq_size,
83 "Shared receive queue (SRQ) size.");
84
85static int srpt_get_u64_x(char *buffer, struct kernel_param *kp)
86{
87 return sprintf(buffer, "0x%016llx", *(u64 *)kp->arg);
88}
89module_param_call(srpt_service_guid, NULL, srpt_get_u64_x, &srpt_service_guid,
90 0444);
91MODULE_PARM_DESC(srpt_service_guid,
92 "Using this value for ioc_guid, id_ext, and cm_listen_id"
93 " instead of using the node_guid of the first HCA.");
94
95static struct ib_client srpt_client;
96static struct target_fabric_configfs *srpt_target;
97static void srpt_release_channel(struct srpt_rdma_ch *ch);
98static int srpt_queue_status(struct se_cmd *cmd);
99
100/**
101 * opposite_dma_dir() - Swap DMA_TO_DEVICE and DMA_FROM_DEVICE.
102 */
103static inline
104enum dma_data_direction opposite_dma_dir(enum dma_data_direction dir)
105{
106 switch (dir) {
107 case DMA_TO_DEVICE: return DMA_FROM_DEVICE;
108 case DMA_FROM_DEVICE: return DMA_TO_DEVICE;
109 default: return dir;
110 }
111}
112
113/**
114 * srpt_sdev_name() - Return the name associated with the HCA.
115 *
116 * Examples are ib0, ib1, ...
117 */
118static inline const char *srpt_sdev_name(struct srpt_device *sdev)
119{
120 return sdev->device->name;
121}
122
123static enum rdma_ch_state srpt_get_ch_state(struct srpt_rdma_ch *ch)
124{
125 unsigned long flags;
126 enum rdma_ch_state state;
127
128 spin_lock_irqsave(&ch->spinlock, flags);
129 state = ch->state;
130 spin_unlock_irqrestore(&ch->spinlock, flags);
131 return state;
132}
133
134static enum rdma_ch_state
135srpt_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state new_state)
136{
137 unsigned long flags;
138 enum rdma_ch_state prev;
139
140 spin_lock_irqsave(&ch->spinlock, flags);
141 prev = ch->state;
142 ch->state = new_state;
143 spin_unlock_irqrestore(&ch->spinlock, flags);
144 return prev;
145}
146
147/**
148 * srpt_test_and_set_ch_state() - Test and set the channel state.
149 *
150 * Returns true if and only if the channel state has been set to the new state.
151 */
152static bool
153srpt_test_and_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state old,
154 enum rdma_ch_state new)
155{
156 unsigned long flags;
157 enum rdma_ch_state prev;
158
159 spin_lock_irqsave(&ch->spinlock, flags);
160 prev = ch->state;
161 if (prev == old)
162 ch->state = new;
163 spin_unlock_irqrestore(&ch->spinlock, flags);
164 return prev == old;
165}
166
167/**
168 * srpt_event_handler() - Asynchronous IB event callback function.
169 *
170 * Callback function called by the InfiniBand core when an asynchronous IB
171 * event occurs. This callback may occur in interrupt context. See also
172 * section 11.5.2, Set Asynchronous Event Handler in the InfiniBand
173 * Architecture Specification.
174 */
175static void srpt_event_handler(struct ib_event_handler *handler,
176 struct ib_event *event)
177{
178 struct srpt_device *sdev;
179 struct srpt_port *sport;
180
181 sdev = ib_get_client_data(event->device, &srpt_client);
182 if (!sdev || sdev->device != event->device)
183 return;
184
185 pr_debug("ASYNC event= %d on device= %s\n", event->event,
186 srpt_sdev_name(sdev));
187
188 switch (event->event) {
189 case IB_EVENT_PORT_ERR:
190 if (event->element.port_num <= sdev->device->phys_port_cnt) {
191 sport = &sdev->port[event->element.port_num - 1];
192 sport->lid = 0;
193 sport->sm_lid = 0;
194 }
195 break;
196 case IB_EVENT_PORT_ACTIVE:
197 case IB_EVENT_LID_CHANGE:
198 case IB_EVENT_PKEY_CHANGE:
199 case IB_EVENT_SM_CHANGE:
200 case IB_EVENT_CLIENT_REREGISTER:
201 /* Refresh port data asynchronously. */
202 if (event->element.port_num <= sdev->device->phys_port_cnt) {
203 sport = &sdev->port[event->element.port_num - 1];
204 if (!sport->lid && !sport->sm_lid)
205 schedule_work(&sport->work);
206 }
207 break;
208 default:
209 printk(KERN_ERR "received unrecognized IB event %d\n",
210 event->event);
211 break;
212 }
213}
214
215/**
216 * srpt_srq_event() - SRQ event callback function.
217 */
218static void srpt_srq_event(struct ib_event *event, void *ctx)
219{
220 printk(KERN_INFO "SRQ event %d\n", event->event);
221}
222
223/**
224 * srpt_qp_event() - QP event callback function.
225 */
226static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
227{
228 pr_debug("QP event %d on cm_id=%p sess_name=%s state=%d\n",
229 event->event, ch->cm_id, ch->sess_name, srpt_get_ch_state(ch));
230
231 switch (event->event) {
232 case IB_EVENT_COMM_EST:
233 ib_cm_notify(ch->cm_id, event->event);
234 break;
235 case IB_EVENT_QP_LAST_WQE_REACHED:
236 if (srpt_test_and_set_ch_state(ch, CH_DRAINING,
237 CH_RELEASING))
238 srpt_release_channel(ch);
239 else
240 pr_debug("%s: state %d - ignored LAST_WQE.\n",
241 ch->sess_name, srpt_get_ch_state(ch));
242 break;
243 default:
244 printk(KERN_ERR "received unrecognized IB QP event %d\n",
245 event->event);
246 break;
247 }
248}
249
250/**
251 * srpt_set_ioc() - Helper function for initializing an IOUnitInfo structure.
252 *
253 * @slot: one-based slot number.
254 * @value: four-bit value.
255 *
256 * Copies the lowest four bits of value in element slot of the array of four
257 * bit elements called c_list (controller list). The index slot is one-based.
258 */
259static void srpt_set_ioc(u8 *c_list, u32 slot, u8 value)
260{
261 u16 id;
262 u8 tmp;
263
264 id = (slot - 1) / 2;
265 if (slot & 0x1) {
266 tmp = c_list[id] & 0xf;
267 c_list[id] = (value << 4) | tmp;
268 } else {
269 tmp = c_list[id] & 0xf0;
270 c_list[id] = (value & 0xf) | tmp;
271 }
272}
273
274/**
275 * srpt_get_class_port_info() - Copy ClassPortInfo to a management datagram.
276 *
277 * See also section 16.3.3.1 ClassPortInfo in the InfiniBand Architecture
278 * Specification.
279 */
280static void srpt_get_class_port_info(struct ib_dm_mad *mad)
281{
282 struct ib_class_port_info *cif;
283
284 cif = (struct ib_class_port_info *)mad->data;
285 memset(cif, 0, sizeof *cif);
286 cif->base_version = 1;
287 cif->class_version = 1;
288 cif->resp_time_value = 20;
289
290 mad->mad_hdr.status = 0;
291}
292
293/**
294 * srpt_get_iou() - Write IOUnitInfo to a management datagram.
295 *
296 * See also section 16.3.3.3 IOUnitInfo in the InfiniBand Architecture
297 * Specification. See also section B.7, table B.6 in the SRP r16a document.
298 */
299static void srpt_get_iou(struct ib_dm_mad *mad)
300{
301 struct ib_dm_iou_info *ioui;
302 u8 slot;
303 int i;
304
305 ioui = (struct ib_dm_iou_info *)mad->data;
306 ioui->change_id = __constant_cpu_to_be16(1);
307 ioui->max_controllers = 16;
308
309 /* set present for slot 1 and empty for the rest */
310 srpt_set_ioc(ioui->controller_list, 1, 1);
311 for (i = 1, slot = 2; i < 16; i++, slot++)
312 srpt_set_ioc(ioui->controller_list, slot, 0);
313
314 mad->mad_hdr.status = 0;
315}
316
317/**
318 * srpt_get_ioc() - Write IOControllerprofile to a management datagram.
319 *
320 * See also section 16.3.3.4 IOControllerProfile in the InfiniBand
321 * Architecture Specification. See also section B.7, table B.7 in the SRP
322 * r16a document.
323 */
324static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
325 struct ib_dm_mad *mad)
326{
327 struct srpt_device *sdev = sport->sdev;
328 struct ib_dm_ioc_profile *iocp;
329
330 iocp = (struct ib_dm_ioc_profile *)mad->data;
331
332 if (!slot || slot > 16) {
333 mad->mad_hdr.status
334 = __constant_cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
335 return;
336 }
337
338 if (slot > 2) {
339 mad->mad_hdr.status
340 = __constant_cpu_to_be16(DM_MAD_STATUS_NO_IOC);
341 return;
342 }
343
344 memset(iocp, 0, sizeof *iocp);
345 strcpy(iocp->id_string, SRPT_ID_STRING);
346 iocp->guid = cpu_to_be64(srpt_service_guid);
347 iocp->vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id);
348 iocp->device_id = cpu_to_be32(sdev->dev_attr.vendor_part_id);
349 iocp->device_version = cpu_to_be16(sdev->dev_attr.hw_ver);
350 iocp->subsys_vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id);
351 iocp->subsys_device_id = 0x0;
352 iocp->io_class = __constant_cpu_to_be16(SRP_REV16A_IB_IO_CLASS);
353 iocp->io_subclass = __constant_cpu_to_be16(SRP_IO_SUBCLASS);
354 iocp->protocol = __constant_cpu_to_be16(SRP_PROTOCOL);
355 iocp->protocol_version = __constant_cpu_to_be16(SRP_PROTOCOL_VERSION);
356 iocp->send_queue_depth = cpu_to_be16(sdev->srq_size);
357 iocp->rdma_read_depth = 4;
358 iocp->send_size = cpu_to_be32(srp_max_req_size);
359 iocp->rdma_size = cpu_to_be32(min(sport->port_attrib.srp_max_rdma_size,
360 1U << 24));
361 iocp->num_svc_entries = 1;
362 iocp->op_cap_mask = SRP_SEND_TO_IOC | SRP_SEND_FROM_IOC |
363 SRP_RDMA_READ_FROM_IOC | SRP_RDMA_WRITE_FROM_IOC;
364
365 mad->mad_hdr.status = 0;
366}
367
368/**
369 * srpt_get_svc_entries() - Write ServiceEntries to a management datagram.
370 *
371 * See also section 16.3.3.5 ServiceEntries in the InfiniBand Architecture
372 * Specification. See also section B.7, table B.8 in the SRP r16a document.
373 */
374static void srpt_get_svc_entries(u64 ioc_guid,
375 u16 slot, u8 hi, u8 lo, struct ib_dm_mad *mad)
376{
377 struct ib_dm_svc_entries *svc_entries;
378
379 WARN_ON(!ioc_guid);
380
381 if (!slot || slot > 16) {
382 mad->mad_hdr.status
383 = __constant_cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
384 return;
385 }
386
387 if (slot > 2 || lo > hi || hi > 1) {
388 mad->mad_hdr.status
389 = __constant_cpu_to_be16(DM_MAD_STATUS_NO_IOC);
390 return;
391 }
392
393 svc_entries = (struct ib_dm_svc_entries *)mad->data;
394 memset(svc_entries, 0, sizeof *svc_entries);
395 svc_entries->service_entries[0].id = cpu_to_be64(ioc_guid);
396 snprintf(svc_entries->service_entries[0].name,
397 sizeof(svc_entries->service_entries[0].name),
398 "%s%016llx",
399 SRP_SERVICE_NAME_PREFIX,
400 ioc_guid);
401
402 mad->mad_hdr.status = 0;
403}
404
405/**
406 * srpt_mgmt_method_get() - Process a received management datagram.
407 * @sp: source port through which the MAD has been received.
408 * @rq_mad: received MAD.
409 * @rsp_mad: response MAD.
410 */
411static void srpt_mgmt_method_get(struct srpt_port *sp, struct ib_mad *rq_mad,
412 struct ib_dm_mad *rsp_mad)
413{
414 u16 attr_id;
415 u32 slot;
416 u8 hi, lo;
417
418 attr_id = be16_to_cpu(rq_mad->mad_hdr.attr_id);
419 switch (attr_id) {
420 case DM_ATTR_CLASS_PORT_INFO:
421 srpt_get_class_port_info(rsp_mad);
422 break;
423 case DM_ATTR_IOU_INFO:
424 srpt_get_iou(rsp_mad);
425 break;
426 case DM_ATTR_IOC_PROFILE:
427 slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
428 srpt_get_ioc(sp, slot, rsp_mad);
429 break;
430 case DM_ATTR_SVC_ENTRIES:
431 slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
432 hi = (u8) ((slot >> 8) & 0xff);
433 lo = (u8) (slot & 0xff);
434 slot = (u16) ((slot >> 16) & 0xffff);
435 srpt_get_svc_entries(srpt_service_guid,
436 slot, hi, lo, rsp_mad);
437 break;
438 default:
439 rsp_mad->mad_hdr.status =
440 __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
441 break;
442 }
443}
444
445/**
446 * srpt_mad_send_handler() - Post MAD-send callback function.
447 */
448static void srpt_mad_send_handler(struct ib_mad_agent *mad_agent,
449 struct ib_mad_send_wc *mad_wc)
450{
451 ib_destroy_ah(mad_wc->send_buf->ah);
452 ib_free_send_mad(mad_wc->send_buf);
453}
454
455/**
456 * srpt_mad_recv_handler() - MAD reception callback function.
457 */
458static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent,
459 struct ib_mad_recv_wc *mad_wc)
460{
461 struct srpt_port *sport = (struct srpt_port *)mad_agent->context;
462 struct ib_ah *ah;
463 struct ib_mad_send_buf *rsp;
464 struct ib_dm_mad *dm_mad;
465
466 if (!mad_wc || !mad_wc->recv_buf.mad)
467 return;
468
469 ah = ib_create_ah_from_wc(mad_agent->qp->pd, mad_wc->wc,
470 mad_wc->recv_buf.grh, mad_agent->port_num);
471 if (IS_ERR(ah))
472 goto err;
473
474 BUILD_BUG_ON(offsetof(struct ib_dm_mad, data) != IB_MGMT_DEVICE_HDR);
475
476 rsp = ib_create_send_mad(mad_agent, mad_wc->wc->src_qp,
477 mad_wc->wc->pkey_index, 0,
478 IB_MGMT_DEVICE_HDR, IB_MGMT_DEVICE_DATA,
479 GFP_KERNEL);
480 if (IS_ERR(rsp))
481 goto err_rsp;
482
483 rsp->ah = ah;
484
485 dm_mad = rsp->mad;
486 memcpy(dm_mad, mad_wc->recv_buf.mad, sizeof *dm_mad);
487 dm_mad->mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
488 dm_mad->mad_hdr.status = 0;
489
490 switch (mad_wc->recv_buf.mad->mad_hdr.method) {
491 case IB_MGMT_METHOD_GET:
492 srpt_mgmt_method_get(sport, mad_wc->recv_buf.mad, dm_mad);
493 break;
494 case IB_MGMT_METHOD_SET:
495 dm_mad->mad_hdr.status =
496 __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
497 break;
498 default:
499 dm_mad->mad_hdr.status =
500 __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD);
501 break;
502 }
503
504 if (!ib_post_send_mad(rsp, NULL)) {
505 ib_free_recv_mad(mad_wc);
506 /* will destroy_ah & free_send_mad in send completion */
507 return;
508 }
509
510 ib_free_send_mad(rsp);
511
512err_rsp:
513 ib_destroy_ah(ah);
514err:
515 ib_free_recv_mad(mad_wc);
516}
517
518/**
519 * srpt_refresh_port() - Configure a HCA port.
520 *
521 * Enable InfiniBand management datagram processing, update the cached sm_lid,
522 * lid and gid values, and register a callback function for processing MADs
523 * on the specified port.
524 *
525 * Note: It is safe to call this function more than once for the same port.
526 */
527static int srpt_refresh_port(struct srpt_port *sport)
528{
529 struct ib_mad_reg_req reg_req;
530 struct ib_port_modify port_modify;
531 struct ib_port_attr port_attr;
532 int ret;
533
534 memset(&port_modify, 0, sizeof port_modify);
535 port_modify.set_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
536 port_modify.clr_port_cap_mask = 0;
537
538 ret = ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
539 if (ret)
540 goto err_mod_port;
541
542 ret = ib_query_port(sport->sdev->device, sport->port, &port_attr);
543 if (ret)
544 goto err_query_port;
545
546 sport->sm_lid = port_attr.sm_lid;
547 sport->lid = port_attr.lid;
548
549 ret = ib_query_gid(sport->sdev->device, sport->port, 0, &sport->gid);
550 if (ret)
551 goto err_query_port;
552
553 if (!sport->mad_agent) {
554 memset(&reg_req, 0, sizeof reg_req);
555 reg_req.mgmt_class = IB_MGMT_CLASS_DEVICE_MGMT;
556 reg_req.mgmt_class_version = IB_MGMT_BASE_VERSION;
557 set_bit(IB_MGMT_METHOD_GET, reg_req.method_mask);
558 set_bit(IB_MGMT_METHOD_SET, reg_req.method_mask);
559
560 sport->mad_agent = ib_register_mad_agent(sport->sdev->device,
561 sport->port,
562 IB_QPT_GSI,
563 &reg_req, 0,
564 srpt_mad_send_handler,
565 srpt_mad_recv_handler,
566 sport);
567 if (IS_ERR(sport->mad_agent)) {
568 ret = PTR_ERR(sport->mad_agent);
569 sport->mad_agent = NULL;
570 goto err_query_port;
571 }
572 }
573
574 return 0;
575
576err_query_port:
577
578 port_modify.set_port_cap_mask = 0;
579 port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
580 ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
581
582err_mod_port:
583
584 return ret;
585}
586
587/**
588 * srpt_unregister_mad_agent() - Unregister MAD callback functions.
589 *
590 * Note: It is safe to call this function more than once for the same device.
591 */
592static void srpt_unregister_mad_agent(struct srpt_device *sdev)
593{
594 struct ib_port_modify port_modify = {
595 .clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP,
596 };
597 struct srpt_port *sport;
598 int i;
599
600 for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
601 sport = &sdev->port[i - 1];
602 WARN_ON(sport->port != i);
603 if (ib_modify_port(sdev->device, i, 0, &port_modify) < 0)
604 printk(KERN_ERR "disabling MAD processing failed.\n");
605 if (sport->mad_agent) {
606 ib_unregister_mad_agent(sport->mad_agent);
607 sport->mad_agent = NULL;
608 }
609 }
610}
611
612/**
613 * srpt_alloc_ioctx() - Allocate an SRPT I/O context structure.
614 */
615static struct srpt_ioctx *srpt_alloc_ioctx(struct srpt_device *sdev,
616 int ioctx_size, int dma_size,
617 enum dma_data_direction dir)
618{
619 struct srpt_ioctx *ioctx;
620
621 ioctx = kmalloc(ioctx_size, GFP_KERNEL);
622 if (!ioctx)
623 goto err;
624
625 ioctx->buf = kmalloc(dma_size, GFP_KERNEL);
626 if (!ioctx->buf)
627 goto err_free_ioctx;
628
629 ioctx->dma = ib_dma_map_single(sdev->device, ioctx->buf, dma_size, dir);
630 if (ib_dma_mapping_error(sdev->device, ioctx->dma))
631 goto err_free_buf;
632
633 return ioctx;
634
635err_free_buf:
636 kfree(ioctx->buf);
637err_free_ioctx:
638 kfree(ioctx);
639err:
640 return NULL;
641}
642
643/**
644 * srpt_free_ioctx() - Free an SRPT I/O context structure.
645 */
646static void srpt_free_ioctx(struct srpt_device *sdev, struct srpt_ioctx *ioctx,
647 int dma_size, enum dma_data_direction dir)
648{
649 if (!ioctx)
650 return;
651
652 ib_dma_unmap_single(sdev->device, ioctx->dma, dma_size, dir);
653 kfree(ioctx->buf);
654 kfree(ioctx);
655}
656
657/**
658 * srpt_alloc_ioctx_ring() - Allocate a ring of SRPT I/O context structures.
659 * @sdev: Device to allocate the I/O context ring for.
660 * @ring_size: Number of elements in the I/O context ring.
661 * @ioctx_size: I/O context size.
662 * @dma_size: DMA buffer size.
663 * @dir: DMA data direction.
664 */
665static struct srpt_ioctx **srpt_alloc_ioctx_ring(struct srpt_device *sdev,
666 int ring_size, int ioctx_size,
667 int dma_size, enum dma_data_direction dir)
668{
669 struct srpt_ioctx **ring;
670 int i;
671
672 WARN_ON(ioctx_size != sizeof(struct srpt_recv_ioctx)
673 && ioctx_size != sizeof(struct srpt_send_ioctx));
674
675 ring = kmalloc(ring_size * sizeof(ring[0]), GFP_KERNEL);
676 if (!ring)
677 goto out;
678 for (i = 0; i < ring_size; ++i) {
679 ring[i] = srpt_alloc_ioctx(sdev, ioctx_size, dma_size, dir);
680 if (!ring[i])
681 goto err;
682 ring[i]->index = i;
683 }
684 goto out;
685
686err:
687 while (--i >= 0)
688 srpt_free_ioctx(sdev, ring[i], dma_size, dir);
689 kfree(ring);
690out:
691 return ring;
692}
693
694/**
695 * srpt_free_ioctx_ring() - Free the ring of SRPT I/O context structures.
696 */
697static void srpt_free_ioctx_ring(struct srpt_ioctx **ioctx_ring,
698 struct srpt_device *sdev, int ring_size,
699 int dma_size, enum dma_data_direction dir)
700{
701 int i;
702
703 for (i = 0; i < ring_size; ++i)
704 srpt_free_ioctx(sdev, ioctx_ring[i], dma_size, dir);
705 kfree(ioctx_ring);
706}
707
708/**
709 * srpt_get_cmd_state() - Get the state of a SCSI command.
710 */
711static enum srpt_command_state srpt_get_cmd_state(struct srpt_send_ioctx *ioctx)
712{
713 enum srpt_command_state state;
714 unsigned long flags;
715
716 BUG_ON(!ioctx);
717
718 spin_lock_irqsave(&ioctx->spinlock, flags);
719 state = ioctx->state;
720 spin_unlock_irqrestore(&ioctx->spinlock, flags);
721 return state;
722}
723
724/**
725 * srpt_set_cmd_state() - Set the state of a SCSI command.
726 *
727 * Does not modify the state of aborted commands. Returns the previous command
728 * state.
729 */
730static enum srpt_command_state srpt_set_cmd_state(struct srpt_send_ioctx *ioctx,
731 enum srpt_command_state new)
732{
733 enum srpt_command_state previous;
734 unsigned long flags;
735
736 BUG_ON(!ioctx);
737
738 spin_lock_irqsave(&ioctx->spinlock, flags);
739 previous = ioctx->state;
740 if (previous != SRPT_STATE_DONE)
741 ioctx->state = new;
742 spin_unlock_irqrestore(&ioctx->spinlock, flags);
743
744 return previous;
745}
746
747/**
748 * srpt_test_and_set_cmd_state() - Test and set the state of a command.
749 *
750 * Returns true if and only if the previous command state was equal to 'old'.
751 */
752static bool srpt_test_and_set_cmd_state(struct srpt_send_ioctx *ioctx,
753 enum srpt_command_state old,
754 enum srpt_command_state new)
755{
756 enum srpt_command_state previous;
757 unsigned long flags;
758
759 WARN_ON(!ioctx);
760 WARN_ON(old == SRPT_STATE_DONE);
761 WARN_ON(new == SRPT_STATE_NEW);
762
763 spin_lock_irqsave(&ioctx->spinlock, flags);
764 previous = ioctx->state;
765 if (previous == old)
766 ioctx->state = new;
767 spin_unlock_irqrestore(&ioctx->spinlock, flags);
768 return previous == old;
769}
770
771/**
772 * srpt_post_recv() - Post an IB receive request.
773 */
774static int srpt_post_recv(struct srpt_device *sdev,
775 struct srpt_recv_ioctx *ioctx)
776{
777 struct ib_sge list;
778 struct ib_recv_wr wr, *bad_wr;
779
780 BUG_ON(!sdev);
781 wr.wr_id = encode_wr_id(SRPT_RECV, ioctx->ioctx.index);
782
783 list.addr = ioctx->ioctx.dma;
784 list.length = srp_max_req_size;
785 list.lkey = sdev->mr->lkey;
786
787 wr.next = NULL;
788 wr.sg_list = &list;
789 wr.num_sge = 1;
790
791 return ib_post_srq_recv(sdev->srq, &wr, &bad_wr);
792}
793
794/**
795 * srpt_post_send() - Post an IB send request.
796 *
797 * Returns zero upon success and a non-zero value upon failure.
798 */
799static int srpt_post_send(struct srpt_rdma_ch *ch,
800 struct srpt_send_ioctx *ioctx, int len)
801{
802 struct ib_sge list;
803 struct ib_send_wr wr, *bad_wr;
804 struct srpt_device *sdev = ch->sport->sdev;
805 int ret;
806
807 atomic_inc(&ch->req_lim);
808
809 ret = -ENOMEM;
810 if (unlikely(atomic_dec_return(&ch->sq_wr_avail) < 0)) {
811 printk(KERN_WARNING "IB send queue full (needed 1)\n");
812 goto out;
813 }
814
815 ib_dma_sync_single_for_device(sdev->device, ioctx->ioctx.dma, len,
816 DMA_TO_DEVICE);
817
818 list.addr = ioctx->ioctx.dma;
819 list.length = len;
820 list.lkey = sdev->mr->lkey;
821
822 wr.next = NULL;
823 wr.wr_id = encode_wr_id(SRPT_SEND, ioctx->ioctx.index);
824 wr.sg_list = &list;
825 wr.num_sge = 1;
826 wr.opcode = IB_WR_SEND;
827 wr.send_flags = IB_SEND_SIGNALED;
828
829 ret = ib_post_send(ch->qp, &wr, &bad_wr);
830
831out:
832 if (ret < 0) {
833 atomic_inc(&ch->sq_wr_avail);
834 atomic_dec(&ch->req_lim);
835 }
836 return ret;
837}
838
839/**
840 * srpt_get_desc_tbl() - Parse the data descriptors of an SRP_CMD request.
841 * @ioctx: Pointer to the I/O context associated with the request.
842 * @srp_cmd: Pointer to the SRP_CMD request data.
843 * @dir: Pointer to the variable to which the transfer direction will be
844 * written.
845 * @data_len: Pointer to the variable to which the total data length of all
846 * descriptors in the SRP_CMD request will be written.
847 *
848 * This function initializes ioctx->nrbuf and ioctx->r_bufs.
849 *
850 * Returns -EINVAL when the SRP_CMD request contains inconsistent descriptors;
851 * -ENOMEM when memory allocation fails and zero upon success.
852 */
853static int srpt_get_desc_tbl(struct srpt_send_ioctx *ioctx,
854 struct srp_cmd *srp_cmd,
855 enum dma_data_direction *dir, u64 *data_len)
856{
857 struct srp_indirect_buf *idb;
858 struct srp_direct_buf *db;
859 unsigned add_cdb_offset;
860 int ret;
861
862 /*
863 * The pointer computations below will only be compiled correctly
864 * if srp_cmd::add_data is declared as s8*, u8*, s8[] or u8[], so check
865 * whether srp_cmd::add_data has been declared as a byte pointer.
866 */
867 BUILD_BUG_ON(!__same_type(srp_cmd->add_data[0], (s8)0)
868 && !__same_type(srp_cmd->add_data[0], (u8)0));
869
870 BUG_ON(!dir);
871 BUG_ON(!data_len);
872
873 ret = 0;
874 *data_len = 0;
875
876 /*
877 * The lower four bits of the buffer format field contain the DATA-IN
878 * buffer descriptor format, and the highest four bits contain the
879 * DATA-OUT buffer descriptor format.
880 */
881 *dir = DMA_NONE;
882 if (srp_cmd->buf_fmt & 0xf)
883 /* DATA-IN: transfer data from target to initiator (read). */
884 *dir = DMA_FROM_DEVICE;
885 else if (srp_cmd->buf_fmt >> 4)
886 /* DATA-OUT: transfer data from initiator to target (write). */
887 *dir = DMA_TO_DEVICE;
888
889 /*
890 * According to the SRP spec, the lower two bits of the 'ADDITIONAL
891 * CDB LENGTH' field are reserved and the size in bytes of this field
892 * is four times the value specified in bits 3..7. Hence the "& ~3".
893 */
894 add_cdb_offset = srp_cmd->add_cdb_len & ~3;
895 if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_DIRECT) ||
896 ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_DIRECT)) {
897 ioctx->n_rbuf = 1;
898 ioctx->rbufs = &ioctx->single_rbuf;
899
900 db = (struct srp_direct_buf *)(srp_cmd->add_data
901 + add_cdb_offset);
902 memcpy(ioctx->rbufs, db, sizeof *db);
903 *data_len = be32_to_cpu(db->len);
904 } else if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_INDIRECT) ||
905 ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_INDIRECT)) {
906 idb = (struct srp_indirect_buf *)(srp_cmd->add_data
907 + add_cdb_offset);
908
909 ioctx->n_rbuf = be32_to_cpu(idb->table_desc.len) / sizeof *db;
910
911 if (ioctx->n_rbuf >
912 (srp_cmd->data_out_desc_cnt + srp_cmd->data_in_desc_cnt)) {
913 printk(KERN_ERR "received unsupported SRP_CMD request"
914 " type (%u out + %u in != %u / %zu)\n",
915 srp_cmd->data_out_desc_cnt,
916 srp_cmd->data_in_desc_cnt,
917 be32_to_cpu(idb->table_desc.len),
918 sizeof(*db));
919 ioctx->n_rbuf = 0;
920 ret = -EINVAL;
921 goto out;
922 }
923
924 if (ioctx->n_rbuf == 1)
925 ioctx->rbufs = &ioctx->single_rbuf;
926 else {
927 ioctx->rbufs =
928 kmalloc(ioctx->n_rbuf * sizeof *db, GFP_ATOMIC);
929 if (!ioctx->rbufs) {
930 ioctx->n_rbuf = 0;
931 ret = -ENOMEM;
932 goto out;
933 }
934 }
935
936 db = idb->desc_list;
937 memcpy(ioctx->rbufs, db, ioctx->n_rbuf * sizeof *db);
938 *data_len = be32_to_cpu(idb->len);
939 }
940out:
941 return ret;
942}
943
944/**
945 * srpt_init_ch_qp() - Initialize queue pair attributes.
946 *
947 * Initialized the attributes of queue pair 'qp' by allowing local write,
948 * remote read and remote write. Also transitions 'qp' to state IB_QPS_INIT.
949 */
950static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp)
951{
952 struct ib_qp_attr *attr;
953 int ret;
954
955 attr = kzalloc(sizeof *attr, GFP_KERNEL);
956 if (!attr)
957 return -ENOMEM;
958
959 attr->qp_state = IB_QPS_INIT;
960 attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ |
961 IB_ACCESS_REMOTE_WRITE;
962 attr->port_num = ch->sport->port;
963 attr->pkey_index = 0;
964
965 ret = ib_modify_qp(qp, attr,
966 IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PORT |
967 IB_QP_PKEY_INDEX);
968
969 kfree(attr);
970 return ret;
971}
972
973/**
974 * srpt_ch_qp_rtr() - Change the state of a channel to 'ready to receive' (RTR).
975 * @ch: channel of the queue pair.
976 * @qp: queue pair to change the state of.
977 *
978 * Returns zero upon success and a negative value upon failure.
979 *
980 * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system.
981 * If this structure ever becomes larger, it might be necessary to allocate
982 * it dynamically instead of on the stack.
983 */
984static int srpt_ch_qp_rtr(struct srpt_rdma_ch *ch, struct ib_qp *qp)
985{
986 struct ib_qp_attr qp_attr;
987 int attr_mask;
988 int ret;
989
990 qp_attr.qp_state = IB_QPS_RTR;
991 ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask);
992 if (ret)
993 goto out;
994
995 qp_attr.max_dest_rd_atomic = 4;
996
997 ret = ib_modify_qp(qp, &qp_attr, attr_mask);
998
999out:
1000 return ret;
1001}
1002
1003/**
1004 * srpt_ch_qp_rts() - Change the state of a channel to 'ready to send' (RTS).
1005 * @ch: channel of the queue pair.
1006 * @qp: queue pair to change the state of.
1007 *
1008 * Returns zero upon success and a negative value upon failure.
1009 *
1010 * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system.
1011 * If this structure ever becomes larger, it might be necessary to allocate
1012 * it dynamically instead of on the stack.
1013 */
1014static int srpt_ch_qp_rts(struct srpt_rdma_ch *ch, struct ib_qp *qp)
1015{
1016 struct ib_qp_attr qp_attr;
1017 int attr_mask;
1018 int ret;
1019
1020 qp_attr.qp_state = IB_QPS_RTS;
1021 ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask);
1022 if (ret)
1023 goto out;
1024
1025 qp_attr.max_rd_atomic = 4;
1026
1027 ret = ib_modify_qp(qp, &qp_attr, attr_mask);
1028
1029out:
1030 return ret;
1031}
1032
1033/**
1034 * srpt_ch_qp_err() - Set the channel queue pair state to 'error'.
1035 */
1036static int srpt_ch_qp_err(struct srpt_rdma_ch *ch)
1037{
1038 struct ib_qp_attr qp_attr;
1039
1040 qp_attr.qp_state = IB_QPS_ERR;
1041 return ib_modify_qp(ch->qp, &qp_attr, IB_QP_STATE);
1042}
1043
1044/**
1045 * srpt_unmap_sg_to_ib_sge() - Unmap an IB SGE list.
1046 */
1047static void srpt_unmap_sg_to_ib_sge(struct srpt_rdma_ch *ch,
1048 struct srpt_send_ioctx *ioctx)
1049{
1050 struct scatterlist *sg;
1051 enum dma_data_direction dir;
1052
1053 BUG_ON(!ch);
1054 BUG_ON(!ioctx);
1055 BUG_ON(ioctx->n_rdma && !ioctx->rdma_ius);
1056
1057 while (ioctx->n_rdma)
1058 kfree(ioctx->rdma_ius[--ioctx->n_rdma].sge);
1059
1060 kfree(ioctx->rdma_ius);
1061 ioctx->rdma_ius = NULL;
1062
1063 if (ioctx->mapped_sg_count) {
1064 sg = ioctx->sg;
1065 WARN_ON(!sg);
1066 dir = ioctx->cmd.data_direction;
1067 BUG_ON(dir == DMA_NONE);
1068 ib_dma_unmap_sg(ch->sport->sdev->device, sg, ioctx->sg_cnt,
1069 opposite_dma_dir(dir));
1070 ioctx->mapped_sg_count = 0;
1071 }
1072}
1073
1074/**
1075 * srpt_map_sg_to_ib_sge() - Map an SG list to an IB SGE list.
1076 */
1077static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
1078 struct srpt_send_ioctx *ioctx)
1079{
1080 struct se_cmd *cmd;
1081 struct scatterlist *sg, *sg_orig;
1082 int sg_cnt;
1083 enum dma_data_direction dir;
1084 struct rdma_iu *riu;
1085 struct srp_direct_buf *db;
1086 dma_addr_t dma_addr;
1087 struct ib_sge *sge;
1088 u64 raddr;
1089 u32 rsize;
1090 u32 tsize;
1091 u32 dma_len;
1092 int count, nrdma;
1093 int i, j, k;
1094
1095 BUG_ON(!ch);
1096 BUG_ON(!ioctx);
1097 cmd = &ioctx->cmd;
1098 dir = cmd->data_direction;
1099 BUG_ON(dir == DMA_NONE);
1100
1101 transport_do_task_sg_chain(cmd);
1102 ioctx->sg = sg = sg_orig = cmd->t_tasks_sg_chained;
1103 ioctx->sg_cnt = sg_cnt = cmd->t_tasks_sg_chained_no;
1104
1105 count = ib_dma_map_sg(ch->sport->sdev->device, sg, sg_cnt,
1106 opposite_dma_dir(dir));
1107 if (unlikely(!count))
1108 return -EAGAIN;
1109
1110 ioctx->mapped_sg_count = count;
1111
1112 if (ioctx->rdma_ius && ioctx->n_rdma_ius)
1113 nrdma = ioctx->n_rdma_ius;
1114 else {
1115 nrdma = (count + SRPT_DEF_SG_PER_WQE - 1) / SRPT_DEF_SG_PER_WQE
1116 + ioctx->n_rbuf;
1117
1118 ioctx->rdma_ius = kzalloc(nrdma * sizeof *riu, GFP_KERNEL);
1119 if (!ioctx->rdma_ius)
1120 goto free_mem;
1121
1122 ioctx->n_rdma_ius = nrdma;
1123 }
1124
1125 db = ioctx->rbufs;
1126 tsize = cmd->data_length;
1127 dma_len = sg_dma_len(&sg[0]);
1128 riu = ioctx->rdma_ius;
1129
1130 /*
1131 * For each remote desc - calculate the #ib_sge.
1132 * If #ib_sge < SRPT_DEF_SG_PER_WQE per rdma operation then
1133 * each remote desc rdma_iu is required a rdma wr;
1134 * else
1135 * we need to allocate extra rdma_iu to carry extra #ib_sge in
1136 * another rdma wr
1137 */
1138 for (i = 0, j = 0;
1139 j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) {
1140 rsize = be32_to_cpu(db->len);
1141 raddr = be64_to_cpu(db->va);
1142 riu->raddr = raddr;
1143 riu->rkey = be32_to_cpu(db->key);
1144 riu->sge_cnt = 0;
1145
1146 /* calculate how many sge required for this remote_buf */
1147 while (rsize > 0 && tsize > 0) {
1148
1149 if (rsize >= dma_len) {
1150 tsize -= dma_len;
1151 rsize -= dma_len;
1152 raddr += dma_len;
1153
1154 if (tsize > 0) {
1155 ++j;
1156 if (j < count) {
1157 sg = sg_next(sg);
1158 dma_len = sg_dma_len(sg);
1159 }
1160 }
1161 } else {
1162 tsize -= rsize;
1163 dma_len -= rsize;
1164 rsize = 0;
1165 }
1166
1167 ++riu->sge_cnt;
1168
1169 if (rsize > 0 && riu->sge_cnt == SRPT_DEF_SG_PER_WQE) {
1170 ++ioctx->n_rdma;
1171 riu->sge =
1172 kmalloc(riu->sge_cnt * sizeof *riu->sge,
1173 GFP_KERNEL);
1174 if (!riu->sge)
1175 goto free_mem;
1176
1177 ++riu;
1178 riu->sge_cnt = 0;
1179 riu->raddr = raddr;
1180 riu->rkey = be32_to_cpu(db->key);
1181 }
1182 }
1183
1184 ++ioctx->n_rdma;
1185 riu->sge = kmalloc(riu->sge_cnt * sizeof *riu->sge,
1186 GFP_KERNEL);
1187 if (!riu->sge)
1188 goto free_mem;
1189 }
1190
1191 db = ioctx->rbufs;
1192 tsize = cmd->data_length;
1193 riu = ioctx->rdma_ius;
1194 sg = sg_orig;
1195 dma_len = sg_dma_len(&sg[0]);
1196 dma_addr = sg_dma_address(&sg[0]);
1197
1198 /* this second loop is really mapped sg_addres to rdma_iu->ib_sge */
1199 for (i = 0, j = 0;
1200 j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) {
1201 rsize = be32_to_cpu(db->len);
1202 sge = riu->sge;
1203 k = 0;
1204
1205 while (rsize > 0 && tsize > 0) {
1206 sge->addr = dma_addr;
1207 sge->lkey = ch->sport->sdev->mr->lkey;
1208
1209 if (rsize >= dma_len) {
1210 sge->length =
1211 (tsize < dma_len) ? tsize : dma_len;
1212 tsize -= dma_len;
1213 rsize -= dma_len;
1214
1215 if (tsize > 0) {
1216 ++j;
1217 if (j < count) {
1218 sg = sg_next(sg);
1219 dma_len = sg_dma_len(sg);
1220 dma_addr = sg_dma_address(sg);
1221 }
1222 }
1223 } else {
1224 sge->length = (tsize < rsize) ? tsize : rsize;
1225 tsize -= rsize;
1226 dma_len -= rsize;
1227 dma_addr += rsize;
1228 rsize = 0;
1229 }
1230
1231 ++k;
1232 if (k == riu->sge_cnt && rsize > 0 && tsize > 0) {
1233 ++riu;
1234 sge = riu->sge;
1235 k = 0;
1236 } else if (rsize > 0 && tsize > 0)
1237 ++sge;
1238 }
1239 }
1240
1241 return 0;
1242
1243free_mem:
1244 srpt_unmap_sg_to_ib_sge(ch, ioctx);
1245
1246 return -ENOMEM;
1247}
1248
1249/**
1250 * srpt_get_send_ioctx() - Obtain an I/O context for sending to the initiator.
1251 */
1252static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
1253{
1254 struct srpt_send_ioctx *ioctx;
1255 unsigned long flags;
1256
1257 BUG_ON(!ch);
1258
1259 ioctx = NULL;
1260 spin_lock_irqsave(&ch->spinlock, flags);
1261 if (!list_empty(&ch->free_list)) {
1262 ioctx = list_first_entry(&ch->free_list,
1263 struct srpt_send_ioctx, free_list);
1264 list_del(&ioctx->free_list);
1265 }
1266 spin_unlock_irqrestore(&ch->spinlock, flags);
1267
1268 if (!ioctx)
1269 return ioctx;
1270
1271 BUG_ON(ioctx->ch != ch);
1272 kref_init(&ioctx->kref);
1273 spin_lock_init(&ioctx->spinlock);
1274 ioctx->state = SRPT_STATE_NEW;
1275 ioctx->n_rbuf = 0;
1276 ioctx->rbufs = NULL;
1277 ioctx->n_rdma = 0;
1278 ioctx->n_rdma_ius = 0;
1279 ioctx->rdma_ius = NULL;
1280 ioctx->mapped_sg_count = 0;
1281 init_completion(&ioctx->tx_done);
1282 ioctx->queue_status_only = false;
1283 /*
1284 * transport_init_se_cmd() does not initialize all fields, so do it
1285 * here.
1286 */
1287 memset(&ioctx->cmd, 0, sizeof(ioctx->cmd));
1288 memset(&ioctx->sense_data, 0, sizeof(ioctx->sense_data));
1289
1290 return ioctx;
1291}
1292
1293/**
1294 * srpt_put_send_ioctx() - Free up resources.
1295 */
1296static void srpt_put_send_ioctx(struct srpt_send_ioctx *ioctx)
1297{
1298 struct srpt_rdma_ch *ch;
1299 unsigned long flags;
1300
1301 BUG_ON(!ioctx);
1302 ch = ioctx->ch;
1303 BUG_ON(!ch);
1304
1305 WARN_ON(srpt_get_cmd_state(ioctx) != SRPT_STATE_DONE);
1306
1307 srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
1308 transport_generic_free_cmd(&ioctx->cmd, 0);
1309
1310 if (ioctx->n_rbuf > 1) {
1311 kfree(ioctx->rbufs);
1312 ioctx->rbufs = NULL;
1313 ioctx->n_rbuf = 0;
1314 }
1315
1316 spin_lock_irqsave(&ch->spinlock, flags);
1317 list_add(&ioctx->free_list, &ch->free_list);
1318 spin_unlock_irqrestore(&ch->spinlock, flags);
1319}
1320
1321static void srpt_put_send_ioctx_kref(struct kref *kref)
1322{
1323 srpt_put_send_ioctx(container_of(kref, struct srpt_send_ioctx, kref));
1324}
1325
1326/**
1327 * srpt_abort_cmd() - Abort a SCSI command.
1328 * @ioctx: I/O context associated with the SCSI command.
1329 * @context: Preferred execution context.
1330 */
1331static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
1332{
1333 enum srpt_command_state state;
1334 unsigned long flags;
1335
1336 BUG_ON(!ioctx);
1337
1338 /*
1339 * If the command is in a state where the target core is waiting for
1340 * the ib_srpt driver, change the state to the next state. Changing
1341 * the state of the command from SRPT_STATE_NEED_DATA to
1342 * SRPT_STATE_DATA_IN ensures that srpt_xmit_response() will call this
1343 * function a second time.
1344 */
1345
1346 spin_lock_irqsave(&ioctx->spinlock, flags);
1347 state = ioctx->state;
1348 switch (state) {
1349 case SRPT_STATE_NEED_DATA:
1350 ioctx->state = SRPT_STATE_DATA_IN;
1351 break;
1352 case SRPT_STATE_DATA_IN:
1353 case SRPT_STATE_CMD_RSP_SENT:
1354 case SRPT_STATE_MGMT_RSP_SENT:
1355 ioctx->state = SRPT_STATE_DONE;
1356 break;
1357 default:
1358 break;
1359 }
1360 spin_unlock_irqrestore(&ioctx->spinlock, flags);
1361
1362 if (state == SRPT_STATE_DONE)
1363 goto out;
1364
1365 pr_debug("Aborting cmd with state %d and tag %lld\n", state,
1366 ioctx->tag);
1367
1368 switch (state) {
1369 case SRPT_STATE_NEW:
1370 case SRPT_STATE_DATA_IN:
1371 case SRPT_STATE_MGMT:
1372 /*
1373 * Do nothing - defer abort processing until
1374 * srpt_queue_response() is invoked.
1375 */
1376 WARN_ON(!transport_check_aborted_status(&ioctx->cmd, false));
1377 break;
1378 case SRPT_STATE_NEED_DATA:
1379 /* DMA_TO_DEVICE (write) - RDMA read error. */
1380 atomic_set(&ioctx->cmd.transport_lun_stop, 1);
1381 transport_generic_handle_data(&ioctx->cmd);
1382 break;
1383 case SRPT_STATE_CMD_RSP_SENT:
1384 /*
1385 * SRP_RSP sending failed or the SRP_RSP send completion has
1386 * not been received in time.
1387 */
1388 srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
1389 atomic_set(&ioctx->cmd.transport_lun_stop, 1);
1390 kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
1391 break;
1392 case SRPT_STATE_MGMT_RSP_SENT:
1393 srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
1394 kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
1395 break;
1396 default:
1397 WARN_ON("ERROR: unexpected command state");
1398 break;
1399 }
1400
1401out:
1402 return state;
1403}
1404
1405/**
1406 * srpt_handle_send_err_comp() - Process an IB_WC_SEND error completion.
1407 */
1408static void srpt_handle_send_err_comp(struct srpt_rdma_ch *ch, u64 wr_id)
1409{
1410 struct srpt_send_ioctx *ioctx;
1411 enum srpt_command_state state;
1412 struct se_cmd *cmd;
1413 u32 index;
1414
1415 atomic_inc(&ch->sq_wr_avail);
1416
1417 index = idx_from_wr_id(wr_id);
1418 ioctx = ch->ioctx_ring[index];
1419 state = srpt_get_cmd_state(ioctx);
1420 cmd = &ioctx->cmd;
1421
1422 WARN_ON(state != SRPT_STATE_CMD_RSP_SENT
1423 && state != SRPT_STATE_MGMT_RSP_SENT
1424 && state != SRPT_STATE_NEED_DATA
1425 && state != SRPT_STATE_DONE);
1426
1427 /* If SRP_RSP sending failed, undo the ch->req_lim change. */
1428 if (state == SRPT_STATE_CMD_RSP_SENT
1429 || state == SRPT_STATE_MGMT_RSP_SENT)
1430 atomic_dec(&ch->req_lim);
1431
1432 srpt_abort_cmd(ioctx);
1433}
1434
1435/**
1436 * srpt_handle_send_comp() - Process an IB send completion notification.
1437 */
1438static void srpt_handle_send_comp(struct srpt_rdma_ch *ch,
1439 struct srpt_send_ioctx *ioctx)
1440{
1441 enum srpt_command_state state;
1442
1443 atomic_inc(&ch->sq_wr_avail);
1444
1445 state = srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
1446
1447 if (WARN_ON(state != SRPT_STATE_CMD_RSP_SENT
1448 && state != SRPT_STATE_MGMT_RSP_SENT
1449 && state != SRPT_STATE_DONE))
1450 pr_debug("state = %d\n", state);
1451
1452 if (state != SRPT_STATE_DONE)
1453 kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
1454 else
1455 printk(KERN_ERR "IB completion has been received too late for"
1456 " wr_id = %u.\n", ioctx->ioctx.index);
1457}
1458
1459/**
1460 * srpt_handle_rdma_comp() - Process an IB RDMA completion notification.
1461 *
1462 * Note: transport_generic_handle_data() is asynchronous so unmapping the
1463 * data that has been transferred via IB RDMA must be postponed until the
1464 * check_stop_free() callback.
1465 */
1466static void srpt_handle_rdma_comp(struct srpt_rdma_ch *ch,
1467 struct srpt_send_ioctx *ioctx,
1468 enum srpt_opcode opcode)
1469{
1470 WARN_ON(ioctx->n_rdma <= 0);
1471 atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
1472
1473 if (opcode == SRPT_RDMA_READ_LAST) {
1474 if (srpt_test_and_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA,
1475 SRPT_STATE_DATA_IN))
1476 transport_generic_handle_data(&ioctx->cmd);
1477 else
1478 printk(KERN_ERR "%s[%d]: wrong state = %d\n", __func__,
1479 __LINE__, srpt_get_cmd_state(ioctx));
1480 } else if (opcode == SRPT_RDMA_ABORT) {
1481 ioctx->rdma_aborted = true;
1482 } else {
1483 WARN(true, "unexpected opcode %d\n", opcode);
1484 }
1485}
1486
1487/**
1488 * srpt_handle_rdma_err_comp() - Process an IB RDMA error completion.
1489 */
1490static void srpt_handle_rdma_err_comp(struct srpt_rdma_ch *ch,
1491 struct srpt_send_ioctx *ioctx,
1492 enum srpt_opcode opcode)
1493{
1494 struct se_cmd *cmd;
1495 enum srpt_command_state state;
1496
1497 cmd = &ioctx->cmd;
1498 state = srpt_get_cmd_state(ioctx);
1499 switch (opcode) {
1500 case SRPT_RDMA_READ_LAST:
1501 if (ioctx->n_rdma <= 0) {
1502 printk(KERN_ERR "Received invalid RDMA read"
1503 " error completion with idx %d\n",
1504 ioctx->ioctx.index);
1505 break;
1506 }
1507 atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
1508 if (state == SRPT_STATE_NEED_DATA)
1509 srpt_abort_cmd(ioctx);
1510 else
1511 printk(KERN_ERR "%s[%d]: wrong state = %d\n",
1512 __func__, __LINE__, state);
1513 break;
1514 case SRPT_RDMA_WRITE_LAST:
1515 atomic_set(&ioctx->cmd.transport_lun_stop, 1);
1516 break;
1517 default:
1518 printk(KERN_ERR "%s[%d]: opcode = %u\n", __func__,
1519 __LINE__, opcode);
1520 break;
1521 }
1522}
1523
1524/**
1525 * srpt_build_cmd_rsp() - Build an SRP_RSP response.
1526 * @ch: RDMA channel through which the request has been received.
1527 * @ioctx: I/O context associated with the SRP_CMD request. The response will
1528 * be built in the buffer ioctx->buf points at and hence this function will
1529 * overwrite the request data.
1530 * @tag: tag of the request for which this response is being generated.
1531 * @status: value for the STATUS field of the SRP_RSP information unit.
1532 *
1533 * Returns the size in bytes of the SRP_RSP response.
1534 *
1535 * An SRP_RSP response contains a SCSI status or service response. See also
1536 * section 6.9 in the SRP r16a document for the format of an SRP_RSP
1537 * response. See also SPC-2 for more information about sense data.
1538 */
1539static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
1540 struct srpt_send_ioctx *ioctx, u64 tag,
1541 int status)
1542{
1543 struct srp_rsp *srp_rsp;
1544 const u8 *sense_data;
1545 int sense_data_len, max_sense_len;
1546
1547 /*
1548 * The lowest bit of all SAM-3 status codes is zero (see also
1549 * paragraph 5.3 in SAM-3).
1550 */
1551 WARN_ON(status & 1);
1552
1553 srp_rsp = ioctx->ioctx.buf;
1554 BUG_ON(!srp_rsp);
1555
1556 sense_data = ioctx->sense_data;
1557 sense_data_len = ioctx->cmd.scsi_sense_length;
1558 WARN_ON(sense_data_len > sizeof(ioctx->sense_data));
1559
1560 memset(srp_rsp, 0, sizeof *srp_rsp);
1561 srp_rsp->opcode = SRP_RSP;
1562 srp_rsp->req_lim_delta =
1563 __constant_cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
1564 srp_rsp->tag = tag;
1565 srp_rsp->status = status;
1566
1567 if (sense_data_len) {
1568 BUILD_BUG_ON(MIN_MAX_RSP_SIZE <= sizeof(*srp_rsp));
1569 max_sense_len = ch->max_ti_iu_len - sizeof(*srp_rsp);
1570 if (sense_data_len > max_sense_len) {
1571 printk(KERN_WARNING "truncated sense data from %d to %d"
1572 " bytes\n", sense_data_len, max_sense_len);
1573 sense_data_len = max_sense_len;
1574 }
1575
1576 srp_rsp->flags |= SRP_RSP_FLAG_SNSVALID;
1577 srp_rsp->sense_data_len = cpu_to_be32(sense_data_len);
1578 memcpy(srp_rsp + 1, sense_data, sense_data_len);
1579 }
1580
1581 return sizeof(*srp_rsp) + sense_data_len;
1582}
1583
1584/**
1585 * srpt_build_tskmgmt_rsp() - Build a task management response.
1586 * @ch: RDMA channel through which the request has been received.
1587 * @ioctx: I/O context in which the SRP_RSP response will be built.
1588 * @rsp_code: RSP_CODE that will be stored in the response.
1589 * @tag: Tag of the request for which this response is being generated.
1590 *
1591 * Returns the size in bytes of the SRP_RSP response.
1592 *
1593 * An SRP_RSP response contains a SCSI status or service response. See also
1594 * section 6.9 in the SRP r16a document for the format of an SRP_RSP
1595 * response.
1596 */
1597static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
1598 struct srpt_send_ioctx *ioctx,
1599 u8 rsp_code, u64 tag)
1600{
1601 struct srp_rsp *srp_rsp;
1602 int resp_data_len;
1603 int resp_len;
1604
1605 resp_data_len = (rsp_code == SRP_TSK_MGMT_SUCCESS) ? 0 : 4;
1606 resp_len = sizeof(*srp_rsp) + resp_data_len;
1607
1608 srp_rsp = ioctx->ioctx.buf;
1609 BUG_ON(!srp_rsp);
1610 memset(srp_rsp, 0, sizeof *srp_rsp);
1611
1612 srp_rsp->opcode = SRP_RSP;
1613 srp_rsp->req_lim_delta = __constant_cpu_to_be32(1
1614 + atomic_xchg(&ch->req_lim_delta, 0));
1615 srp_rsp->tag = tag;
1616
1617 if (rsp_code != SRP_TSK_MGMT_SUCCESS) {
1618 srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID;
1619 srp_rsp->resp_data_len = cpu_to_be32(resp_data_len);
1620 srp_rsp->data[3] = rsp_code;
1621 }
1622
1623 return resp_len;
1624}
1625
1626#define NO_SUCH_LUN ((uint64_t)-1LL)
1627
1628/*
1629 * SCSI LUN addressing method. See also SAM-2 and the section about
1630 * eight byte LUNs.
1631 */
1632enum scsi_lun_addr_method {
1633 SCSI_LUN_ADDR_METHOD_PERIPHERAL = 0,
1634 SCSI_LUN_ADDR_METHOD_FLAT = 1,
1635 SCSI_LUN_ADDR_METHOD_LUN = 2,
1636 SCSI_LUN_ADDR_METHOD_EXTENDED_LUN = 3,
1637};
1638
1639/*
1640 * srpt_unpack_lun() - Convert from network LUN to linear LUN.
1641 *
1642 * Convert an 2-byte, 4-byte, 6-byte or 8-byte LUN structure in network byte
1643 * order (big endian) to a linear LUN. Supports three LUN addressing methods:
1644 * peripheral, flat and logical unit. See also SAM-2, section 4.9.4 (page 40).
1645 */
1646static uint64_t srpt_unpack_lun(const uint8_t *lun, int len)
1647{
1648 uint64_t res = NO_SUCH_LUN;
1649 int addressing_method;
1650
1651 if (unlikely(len < 2)) {
1652 printk(KERN_ERR "Illegal LUN length %d, expected 2 bytes or "
1653 "more", len);
1654 goto out;
1655 }
1656
1657 switch (len) {
1658 case 8:
1659 if ((*((__be64 *)lun) &
1660 __constant_cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
1661 goto out_err;
1662 break;
1663 case 4:
1664 if (*((__be16 *)&lun[2]) != 0)
1665 goto out_err;
1666 break;
1667 case 6:
1668 if (*((__be32 *)&lun[2]) != 0)
1669 goto out_err;
1670 break;
1671 case 2:
1672 break;
1673 default:
1674 goto out_err;
1675 }
1676
1677 addressing_method = (*lun) >> 6; /* highest two bits of byte 0 */
1678 switch (addressing_method) {
1679 case SCSI_LUN_ADDR_METHOD_PERIPHERAL:
1680 case SCSI_LUN_ADDR_METHOD_FLAT:
1681 case SCSI_LUN_ADDR_METHOD_LUN:
1682 res = *(lun + 1) | (((*lun) & 0x3f) << 8);
1683 break;
1684
1685 case SCSI_LUN_ADDR_METHOD_EXTENDED_LUN:
1686 default:
1687 printk(KERN_ERR "Unimplemented LUN addressing method %u",
1688 addressing_method);
1689 break;
1690 }
1691
1692out:
1693 return res;
1694
1695out_err:
1696 printk(KERN_ERR "Support for multi-level LUNs has not yet been"
1697 " implemented");
1698 goto out;
1699}
1700
1701static int srpt_check_stop_free(struct se_cmd *cmd)
1702{
1703 struct srpt_send_ioctx *ioctx;
1704
1705 ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
1706 return kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
1707}
1708
1709/**
1710 * srpt_handle_cmd() - Process SRP_CMD.
1711 */
1712static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
1713 struct srpt_recv_ioctx *recv_ioctx,
1714 struct srpt_send_ioctx *send_ioctx)
1715{
1716 struct se_cmd *cmd;
1717 struct srp_cmd *srp_cmd;
1718 uint64_t unpacked_lun;
1719 u64 data_len;
1720 enum dma_data_direction dir;
1721 int ret;
1722
1723 BUG_ON(!send_ioctx);
1724
1725 srp_cmd = recv_ioctx->ioctx.buf;
1726 kref_get(&send_ioctx->kref);
1727 cmd = &send_ioctx->cmd;
1728 send_ioctx->tag = srp_cmd->tag;
1729
1730 switch (srp_cmd->task_attr) {
1731 case SRP_CMD_SIMPLE_Q:
1732 cmd->sam_task_attr = MSG_SIMPLE_TAG;
1733 break;
1734 case SRP_CMD_ORDERED_Q:
1735 default:
1736 cmd->sam_task_attr = MSG_ORDERED_TAG;
1737 break;
1738 case SRP_CMD_HEAD_OF_Q:
1739 cmd->sam_task_attr = MSG_HEAD_TAG;
1740 break;
1741 case SRP_CMD_ACA:
1742 cmd->sam_task_attr = MSG_ACA_TAG;
1743 break;
1744 }
1745
1746 ret = srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &data_len);
1747 if (ret) {
1748 printk(KERN_ERR "0x%llx: parsing SRP descriptor table failed.\n",
1749 srp_cmd->tag);
1750 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1751 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1752 goto send_sense;
1753 }
1754
1755 cmd->data_length = data_len;
1756 cmd->data_direction = dir;
1757 unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_cmd->lun,
1758 sizeof(srp_cmd->lun));
1759 if (transport_lookup_cmd_lun(cmd, unpacked_lun) < 0)
1760 goto send_sense;
1761 ret = transport_generic_allocate_tasks(cmd, srp_cmd->cdb);
1762 if (cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
1763 srpt_queue_status(cmd);
1764 else if (cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION)
1765 goto send_sense;
1766 else
1767 WARN_ON_ONCE(ret);
1768
1769 transport_handle_cdb_direct(cmd);
1770 return 0;
1771
1772send_sense:
1773 transport_send_check_condition_and_sense(cmd, cmd->scsi_sense_reason,
1774 0);
1775 return -1;
1776}
1777
1778/**
1779 * srpt_rx_mgmt_fn_tag() - Process a task management function by tag.
1780 * @ch: RDMA channel of the task management request.
1781 * @fn: Task management function to perform.
1782 * @req_tag: Tag of the SRP task management request.
1783 * @mgmt_ioctx: I/O context of the task management request.
1784 *
1785 * Returns zero if the target core will process the task management
1786 * request asynchronously.
1787 *
1788 * Note: It is assumed that the initiator serializes tag-based task management
1789 * requests.
1790 */
1791static int srpt_rx_mgmt_fn_tag(struct srpt_send_ioctx *ioctx, u64 tag)
1792{
1793 struct srpt_device *sdev;
1794 struct srpt_rdma_ch *ch;
1795 struct srpt_send_ioctx *target;
1796 int ret, i;
1797
1798 ret = -EINVAL;
1799 ch = ioctx->ch;
1800 BUG_ON(!ch);
1801 BUG_ON(!ch->sport);
1802 sdev = ch->sport->sdev;
1803 BUG_ON(!sdev);
1804 spin_lock_irq(&sdev->spinlock);
1805 for (i = 0; i < ch->rq_size; ++i) {
1806 target = ch->ioctx_ring[i];
1807 if (target->cmd.se_lun == ioctx->cmd.se_lun &&
1808 target->tag == tag &&
1809 srpt_get_cmd_state(target) != SRPT_STATE_DONE) {
1810 ret = 0;
1811 /* now let the target core abort &target->cmd; */
1812 break;
1813 }
1814 }
1815 spin_unlock_irq(&sdev->spinlock);
1816 return ret;
1817}
1818
1819static int srp_tmr_to_tcm(int fn)
1820{
1821 switch (fn) {
1822 case SRP_TSK_ABORT_TASK:
1823 return TMR_ABORT_TASK;
1824 case SRP_TSK_ABORT_TASK_SET:
1825 return TMR_ABORT_TASK_SET;
1826 case SRP_TSK_CLEAR_TASK_SET:
1827 return TMR_CLEAR_TASK_SET;
1828 case SRP_TSK_LUN_RESET:
1829 return TMR_LUN_RESET;
1830 case SRP_TSK_CLEAR_ACA:
1831 return TMR_CLEAR_ACA;
1832 default:
1833 return -1;
1834 }
1835}
1836
1837/**
1838 * srpt_handle_tsk_mgmt() - Process an SRP_TSK_MGMT information unit.
1839 *
1840 * Returns 0 if and only if the request will be processed by the target core.
1841 *
1842 * For more information about SRP_TSK_MGMT information units, see also section
1843 * 6.7 in the SRP r16a document.
1844 */
1845static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
1846 struct srpt_recv_ioctx *recv_ioctx,
1847 struct srpt_send_ioctx *send_ioctx)
1848{
1849 struct srp_tsk_mgmt *srp_tsk;
1850 struct se_cmd *cmd;
1851 uint64_t unpacked_lun;
1852 int tcm_tmr;
1853 int res;
1854
1855 BUG_ON(!send_ioctx);
1856
1857 srp_tsk = recv_ioctx->ioctx.buf;
1858 cmd = &send_ioctx->cmd;
1859
1860 pr_debug("recv tsk_mgmt fn %d for task_tag %lld and cmd tag %lld"
1861 " cm_id %p sess %p\n", srp_tsk->tsk_mgmt_func,
1862 srp_tsk->task_tag, srp_tsk->tag, ch->cm_id, ch->sess);
1863
1864 srpt_set_cmd_state(send_ioctx, SRPT_STATE_MGMT);
1865 send_ioctx->tag = srp_tsk->tag;
1866 tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func);
1867 if (tcm_tmr < 0) {
1868 send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1869 send_ioctx->cmd.se_tmr_req->response =
1870 TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
1871 goto process_tmr;
1872 }
1873 cmd->se_tmr_req = core_tmr_alloc_req(cmd, NULL, tcm_tmr, GFP_KERNEL);
1874 if (!cmd->se_tmr_req) {
1875 send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1876 send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
1877 goto process_tmr;
1878 }
1879
1880 unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_tsk->lun,
1881 sizeof(srp_tsk->lun));
1882 res = transport_lookup_tmr_lun(&send_ioctx->cmd, unpacked_lun);
1883 if (res) {
1884 pr_debug("rejecting TMR for LUN %lld\n", unpacked_lun);
1885 send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1886 send_ioctx->cmd.se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
1887 goto process_tmr;
1888 }
1889
1890 if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK)
1891 srpt_rx_mgmt_fn_tag(send_ioctx, srp_tsk->task_tag);
1892
1893process_tmr:
1894 kref_get(&send_ioctx->kref);
1895 if (!(send_ioctx->cmd.se_cmd_flags & SCF_SCSI_CDB_EXCEPTION))
1896 transport_generic_handle_tmr(&send_ioctx->cmd);
1897 else
1898 transport_send_check_condition_and_sense(cmd,
1899 cmd->scsi_sense_reason, 0);
1900
1901}
1902
1903/**
1904 * srpt_handle_new_iu() - Process a newly received information unit.
1905 * @ch: RDMA channel through which the information unit has been received.
1906 * @ioctx: SRPT I/O context associated with the information unit.
1907 */
1908static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
1909 struct srpt_recv_ioctx *recv_ioctx,
1910 struct srpt_send_ioctx *send_ioctx)
1911{
1912 struct srp_cmd *srp_cmd;
1913 enum rdma_ch_state ch_state;
1914
1915 BUG_ON(!ch);
1916 BUG_ON(!recv_ioctx);
1917
1918 ib_dma_sync_single_for_cpu(ch->sport->sdev->device,
1919 recv_ioctx->ioctx.dma, srp_max_req_size,
1920 DMA_FROM_DEVICE);
1921
1922 ch_state = srpt_get_ch_state(ch);
1923 if (unlikely(ch_state == CH_CONNECTING)) {
1924 list_add_tail(&recv_ioctx->wait_list, &ch->cmd_wait_list);
1925 goto out;
1926 }
1927
1928 if (unlikely(ch_state != CH_LIVE))
1929 goto out;
1930
1931 srp_cmd = recv_ioctx->ioctx.buf;
1932 if (srp_cmd->opcode == SRP_CMD || srp_cmd->opcode == SRP_TSK_MGMT) {
1933 if (!send_ioctx)
1934 send_ioctx = srpt_get_send_ioctx(ch);
1935 if (unlikely(!send_ioctx)) {
1936 list_add_tail(&recv_ioctx->wait_list,
1937 &ch->cmd_wait_list);
1938 goto out;
1939 }
1940 }
1941
1942 transport_init_se_cmd(&send_ioctx->cmd, &srpt_target->tf_ops, ch->sess,
1943 0, DMA_NONE, MSG_SIMPLE_TAG,
1944 send_ioctx->sense_data);
1945
1946 switch (srp_cmd->opcode) {
1947 case SRP_CMD:
1948 srpt_handle_cmd(ch, recv_ioctx, send_ioctx);
1949 break;
1950 case SRP_TSK_MGMT:
1951 srpt_handle_tsk_mgmt(ch, recv_ioctx, send_ioctx);
1952 break;
1953 case SRP_I_LOGOUT:
1954 printk(KERN_ERR "Not yet implemented: SRP_I_LOGOUT\n");
1955 break;
1956 case SRP_CRED_RSP:
1957 pr_debug("received SRP_CRED_RSP\n");
1958 break;
1959 case SRP_AER_RSP:
1960 pr_debug("received SRP_AER_RSP\n");
1961 break;
1962 case SRP_RSP:
1963 printk(KERN_ERR "Received SRP_RSP\n");
1964 break;
1965 default:
1966 printk(KERN_ERR "received IU with unknown opcode 0x%x\n",
1967 srp_cmd->opcode);
1968 break;
1969 }
1970
1971 srpt_post_recv(ch->sport->sdev, recv_ioctx);
1972out:
1973 return;
1974}
1975
1976static void srpt_process_rcv_completion(struct ib_cq *cq,
1977 struct srpt_rdma_ch *ch,
1978 struct ib_wc *wc)
1979{
1980 struct srpt_device *sdev = ch->sport->sdev;
1981 struct srpt_recv_ioctx *ioctx;
1982 u32 index;
1983
1984 index = idx_from_wr_id(wc->wr_id);
1985 if (wc->status == IB_WC_SUCCESS) {
1986 int req_lim;
1987
1988 req_lim = atomic_dec_return(&ch->req_lim);
1989 if (unlikely(req_lim < 0))
1990 printk(KERN_ERR "req_lim = %d < 0\n", req_lim);
1991 ioctx = sdev->ioctx_ring[index];
1992 srpt_handle_new_iu(ch, ioctx, NULL);
1993 } else {
1994 printk(KERN_INFO "receiving failed for idx %u with status %d\n",
1995 index, wc->status);
1996 }
1997}
1998
1999/**
2000 * srpt_process_send_completion() - Process an IB send completion.
2001 *
2002 * Note: Although this has not yet been observed during tests, at least in
2003 * theory it is possible that the srpt_get_send_ioctx() call invoked by
2004 * srpt_handle_new_iu() fails. This is possible because the req_lim_delta
2005 * value in each response is set to one, and it is possible that this response
2006 * makes the initiator send a new request before the send completion for that
2007 * response has been processed. This could e.g. happen if the call to
2008 * srpt_put_send_iotcx() is delayed because of a higher priority interrupt or
2009 * if IB retransmission causes generation of the send completion to be
2010 * delayed. Incoming information units for which srpt_get_send_ioctx() fails
2011 * are queued on cmd_wait_list. The code below processes these delayed
2012 * requests one at a time.
2013 */
2014static void srpt_process_send_completion(struct ib_cq *cq,
2015 struct srpt_rdma_ch *ch,
2016 struct ib_wc *wc)
2017{
2018 struct srpt_send_ioctx *send_ioctx;
2019 uint32_t index;
2020 enum srpt_opcode opcode;
2021
2022 index = idx_from_wr_id(wc->wr_id);
2023 opcode = opcode_from_wr_id(wc->wr_id);
2024 send_ioctx = ch->ioctx_ring[index];
2025 if (wc->status == IB_WC_SUCCESS) {
2026 if (opcode == SRPT_SEND)
2027 srpt_handle_send_comp(ch, send_ioctx);
2028 else {
2029 WARN_ON(opcode != SRPT_RDMA_ABORT &&
2030 wc->opcode != IB_WC_RDMA_READ);
2031 srpt_handle_rdma_comp(ch, send_ioctx, opcode);
2032 }
2033 } else {
2034 if (opcode == SRPT_SEND) {
2035 printk(KERN_INFO "sending response for idx %u failed"
2036 " with status %d\n", index, wc->status);
2037 srpt_handle_send_err_comp(ch, wc->wr_id);
2038 } else if (opcode != SRPT_RDMA_MID) {
2039 printk(KERN_INFO "RDMA t %d for idx %u failed with"
2040 " status %d", opcode, index, wc->status);
2041 srpt_handle_rdma_err_comp(ch, send_ioctx, opcode);
2042 }
2043 }
2044
2045 while (unlikely(opcode == SRPT_SEND
2046 && !list_empty(&ch->cmd_wait_list)
2047 && srpt_get_ch_state(ch) == CH_LIVE
2048 && (send_ioctx = srpt_get_send_ioctx(ch)) != NULL)) {
2049 struct srpt_recv_ioctx *recv_ioctx;
2050
2051 recv_ioctx = list_first_entry(&ch->cmd_wait_list,
2052 struct srpt_recv_ioctx,
2053 wait_list);
2054 list_del(&recv_ioctx->wait_list);
2055 srpt_handle_new_iu(ch, recv_ioctx, send_ioctx);
2056 }
2057}
2058
2059static void srpt_process_completion(struct ib_cq *cq, struct srpt_rdma_ch *ch)
2060{
2061 struct ib_wc *const wc = ch->wc;
2062 int i, n;
2063
2064 WARN_ON(cq != ch->cq);
2065
2066 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
2067 while ((n = ib_poll_cq(cq, ARRAY_SIZE(ch->wc), wc)) > 0) {
2068 for (i = 0; i < n; i++) {
2069 if (opcode_from_wr_id(wc[i].wr_id) == SRPT_RECV)
2070 srpt_process_rcv_completion(cq, ch, &wc[i]);
2071 else
2072 srpt_process_send_completion(cq, ch, &wc[i]);
2073 }
2074 }
2075}
2076
2077/**
2078 * srpt_completion() - IB completion queue callback function.
2079 *
2080 * Notes:
2081 * - It is guaranteed that a completion handler will never be invoked
2082 * concurrently on two different CPUs for the same completion queue. See also
2083 * Documentation/infiniband/core_locking.txt and the implementation of
2084 * handle_edge_irq() in kernel/irq/chip.c.
2085 * - When threaded IRQs are enabled, completion handlers are invoked in thread
2086 * context instead of interrupt context.
2087 */
2088static void srpt_completion(struct ib_cq *cq, void *ctx)
2089{
2090 struct srpt_rdma_ch *ch = ctx;
2091
2092 wake_up_interruptible(&ch->wait_queue);
2093}
2094
2095static int srpt_compl_thread(void *arg)
2096{
2097 struct srpt_rdma_ch *ch;
2098
2099 /* Hibernation / freezing of the SRPT kernel thread is not supported. */
2100 current->flags |= PF_NOFREEZE;
2101
2102 ch = arg;
2103 BUG_ON(!ch);
2104 printk(KERN_INFO "Session %s: kernel thread %s (PID %d) started\n",
2105 ch->sess_name, ch->thread->comm, current->pid);
2106 while (!kthread_should_stop()) {
2107 wait_event_interruptible(ch->wait_queue,
2108 (srpt_process_completion(ch->cq, ch),
2109 kthread_should_stop()));
2110 }
2111 printk(KERN_INFO "Session %s: kernel thread %s (PID %d) stopped\n",
2112 ch->sess_name, ch->thread->comm, current->pid);
2113 return 0;
2114}
2115
2116/**
2117 * srpt_create_ch_ib() - Create receive and send completion queues.
2118 */
2119static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
2120{
2121 struct ib_qp_init_attr *qp_init;
2122 struct srpt_port *sport = ch->sport;
2123 struct srpt_device *sdev = sport->sdev;
2124 u32 srp_sq_size = sport->port_attrib.srp_sq_size;
2125 int ret;
2126
2127 WARN_ON(ch->rq_size < 1);
2128
2129 ret = -ENOMEM;
2130 qp_init = kzalloc(sizeof *qp_init, GFP_KERNEL);
2131 if (!qp_init)
2132 goto out;
2133
2134 ch->cq = ib_create_cq(sdev->device, srpt_completion, NULL, ch,
2135 ch->rq_size + srp_sq_size, 0);
2136 if (IS_ERR(ch->cq)) {
2137 ret = PTR_ERR(ch->cq);
2138 printk(KERN_ERR "failed to create CQ cqe= %d ret= %d\n",
2139 ch->rq_size + srp_sq_size, ret);
2140 goto out;
2141 }
2142
2143 qp_init->qp_context = (void *)ch;
2144 qp_init->event_handler
2145 = (void(*)(struct ib_event *, void*))srpt_qp_event;
2146 qp_init->send_cq = ch->cq;
2147 qp_init->recv_cq = ch->cq;
2148 qp_init->srq = sdev->srq;
2149 qp_init->sq_sig_type = IB_SIGNAL_REQ_WR;
2150 qp_init->qp_type = IB_QPT_RC;
2151 qp_init->cap.max_send_wr = srp_sq_size;
2152 qp_init->cap.max_send_sge = SRPT_DEF_SG_PER_WQE;
2153
2154 ch->qp = ib_create_qp(sdev->pd, qp_init);
2155 if (IS_ERR(ch->qp)) {
2156 ret = PTR_ERR(ch->qp);
2157 printk(KERN_ERR "failed to create_qp ret= %d\n", ret);
2158 goto err_destroy_cq;
2159 }
2160
2161 atomic_set(&ch->sq_wr_avail, qp_init->cap.max_send_wr);
2162
2163 pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n",
2164 __func__, ch->cq->cqe, qp_init->cap.max_send_sge,
2165 qp_init->cap.max_send_wr, ch->cm_id);
2166
2167 ret = srpt_init_ch_qp(ch, ch->qp);
2168 if (ret)
2169 goto err_destroy_qp;
2170
2171 init_waitqueue_head(&ch->wait_queue);
2172
2173 pr_debug("creating thread for session %s\n", ch->sess_name);
2174
2175 ch->thread = kthread_run(srpt_compl_thread, ch, "ib_srpt_compl");
2176 if (IS_ERR(ch->thread)) {
2177 printk(KERN_ERR "failed to create kernel thread %ld\n",
2178 PTR_ERR(ch->thread));
2179 ch->thread = NULL;
2180 goto err_destroy_qp;
2181 }
2182
2183out:
2184 kfree(qp_init);
2185 return ret;
2186
2187err_destroy_qp:
2188 ib_destroy_qp(ch->qp);
2189err_destroy_cq:
2190 ib_destroy_cq(ch->cq);
2191 goto out;
2192}
2193
2194static void srpt_destroy_ch_ib(struct srpt_rdma_ch *ch)
2195{
2196 if (ch->thread)
2197 kthread_stop(ch->thread);
2198
2199 ib_destroy_qp(ch->qp);
2200 ib_destroy_cq(ch->cq);
2201}
2202
2203/**
2204 * __srpt_close_ch() - Close an RDMA channel by setting the QP error state.
2205 *
2206 * Reset the QP and make sure all resources associated with the channel will
2207 * be deallocated at an appropriate time.
2208 *
2209 * Note: The caller must hold ch->sport->sdev->spinlock.
2210 */
2211static void __srpt_close_ch(struct srpt_rdma_ch *ch)
2212{
2213 struct srpt_device *sdev;
2214 enum rdma_ch_state prev_state;
2215 unsigned long flags;
2216
2217 sdev = ch->sport->sdev;
2218
2219 spin_lock_irqsave(&ch->spinlock, flags);
2220 prev_state = ch->state;
2221 switch (prev_state) {
2222 case CH_CONNECTING:
2223 case CH_LIVE:
2224 ch->state = CH_DISCONNECTING;
2225 break;
2226 default:
2227 break;
2228 }
2229 spin_unlock_irqrestore(&ch->spinlock, flags);
2230
2231 switch (prev_state) {
2232 case CH_CONNECTING:
2233 ib_send_cm_rej(ch->cm_id, IB_CM_REJ_NO_RESOURCES, NULL, 0,
2234 NULL, 0);
2235 /* fall through */
2236 case CH_LIVE:
2237 if (ib_send_cm_dreq(ch->cm_id, NULL, 0) < 0)
2238 printk(KERN_ERR "sending CM DREQ failed.\n");
2239 break;
2240 case CH_DISCONNECTING:
2241 break;
2242 case CH_DRAINING:
2243 case CH_RELEASING:
2244 break;
2245 }
2246}
2247
2248/**
2249 * srpt_close_ch() - Close an RDMA channel.
2250 */
2251static void srpt_close_ch(struct srpt_rdma_ch *ch)
2252{
2253 struct srpt_device *sdev;
2254
2255 sdev = ch->sport->sdev;
2256 spin_lock_irq(&sdev->spinlock);
2257 __srpt_close_ch(ch);
2258 spin_unlock_irq(&sdev->spinlock);
2259}
2260
2261/**
2262 * srpt_drain_channel() - Drain a channel by resetting the IB queue pair.
2263 * @cm_id: Pointer to the CM ID of the channel to be drained.
2264 *
2265 * Note: Must be called from inside srpt_cm_handler to avoid a race between
2266 * accessing sdev->spinlock and the call to kfree(sdev) in srpt_remove_one()
2267 * (the caller of srpt_cm_handler holds the cm_id spinlock; srpt_remove_one()
2268 * waits until all target sessions for the associated IB device have been
2269 * unregistered and target session registration involves a call to
2270 * ib_destroy_cm_id(), which locks the cm_id spinlock and hence waits until
2271 * this function has finished).
2272 */
2273static void srpt_drain_channel(struct ib_cm_id *cm_id)
2274{
2275 struct srpt_device *sdev;
2276 struct srpt_rdma_ch *ch;
2277 int ret;
2278 bool do_reset = false;
2279
2280 WARN_ON_ONCE(irqs_disabled());
2281
2282 sdev = cm_id->context;
2283 BUG_ON(!sdev);
2284 spin_lock_irq(&sdev->spinlock);
2285 list_for_each_entry(ch, &sdev->rch_list, list) {
2286 if (ch->cm_id == cm_id) {
2287 do_reset = srpt_test_and_set_ch_state(ch,
2288 CH_CONNECTING, CH_DRAINING) ||
2289 srpt_test_and_set_ch_state(ch,
2290 CH_LIVE, CH_DRAINING) ||
2291 srpt_test_and_set_ch_state(ch,
2292 CH_DISCONNECTING, CH_DRAINING);
2293 break;
2294 }
2295 }
2296 spin_unlock_irq(&sdev->spinlock);
2297
2298 if (do_reset) {
2299 ret = srpt_ch_qp_err(ch);
2300 if (ret < 0)
2301 printk(KERN_ERR "Setting queue pair in error state"
2302 " failed: %d\n", ret);
2303 }
2304}
2305
2306/**
2307 * srpt_find_channel() - Look up an RDMA channel.
2308 * @cm_id: Pointer to the CM ID of the channel to be looked up.
2309 *
2310 * Return NULL if no matching RDMA channel has been found.
2311 */
2312static struct srpt_rdma_ch *srpt_find_channel(struct srpt_device *sdev,
2313 struct ib_cm_id *cm_id)
2314{
2315 struct srpt_rdma_ch *ch;
2316 bool found;
2317
2318 WARN_ON_ONCE(irqs_disabled());
2319 BUG_ON(!sdev);
2320
2321 found = false;
2322 spin_lock_irq(&sdev->spinlock);
2323 list_for_each_entry(ch, &sdev->rch_list, list) {
2324 if (ch->cm_id == cm_id) {
2325 found = true;
2326 break;
2327 }
2328 }
2329 spin_unlock_irq(&sdev->spinlock);
2330
2331 return found ? ch : NULL;
2332}
2333
2334/**
2335 * srpt_release_channel() - Release channel resources.
2336 *
2337 * Schedules the actual release because:
2338 * - Calling the ib_destroy_cm_id() call from inside an IB CM callback would
2339 * trigger a deadlock.
2340 * - It is not safe to call TCM transport_* functions from interrupt context.
2341 */
2342static void srpt_release_channel(struct srpt_rdma_ch *ch)
2343{
2344 schedule_work(&ch->release_work);
2345}
2346
2347static void srpt_release_channel_work(struct work_struct *w)
2348{
2349 struct srpt_rdma_ch *ch;
2350 struct srpt_device *sdev;
2351
2352 ch = container_of(w, struct srpt_rdma_ch, release_work);
2353 pr_debug("ch = %p; ch->sess = %p; release_done = %p\n", ch, ch->sess,
2354 ch->release_done);
2355
2356 sdev = ch->sport->sdev;
2357 BUG_ON(!sdev);
2358
2359 transport_deregister_session_configfs(ch->sess);
2360 transport_deregister_session(ch->sess);
2361 ch->sess = NULL;
2362
2363 srpt_destroy_ch_ib(ch);
2364
2365 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
2366 ch->sport->sdev, ch->rq_size,
2367 ch->rsp_size, DMA_TO_DEVICE);
2368
2369 spin_lock_irq(&sdev->spinlock);
2370 list_del(&ch->list);
2371 spin_unlock_irq(&sdev->spinlock);
2372
2373 ib_destroy_cm_id(ch->cm_id);
2374
2375 if (ch->release_done)
2376 complete(ch->release_done);
2377
2378 wake_up(&sdev->ch_releaseQ);
2379
2380 kfree(ch);
2381}
2382
2383static struct srpt_node_acl *__srpt_lookup_acl(struct srpt_port *sport,
2384 u8 i_port_id[16])
2385{
2386 struct srpt_node_acl *nacl;
2387
2388 list_for_each_entry(nacl, &sport->port_acl_list, list)
2389 if (memcmp(nacl->i_port_id, i_port_id,
2390 sizeof(nacl->i_port_id)) == 0)
2391 return nacl;
2392
2393 return NULL;
2394}
2395
2396static struct srpt_node_acl *srpt_lookup_acl(struct srpt_port *sport,
2397 u8 i_port_id[16])
2398{
2399 struct srpt_node_acl *nacl;
2400
2401 spin_lock_irq(&sport->port_acl_lock);
2402 nacl = __srpt_lookup_acl(sport, i_port_id);
2403 spin_unlock_irq(&sport->port_acl_lock);
2404
2405 return nacl;
2406}
2407
2408/**
2409 * srpt_cm_req_recv() - Process the event IB_CM_REQ_RECEIVED.
2410 *
2411 * Ownership of the cm_id is transferred to the target session if this
2412 * functions returns zero. Otherwise the caller remains the owner of cm_id.
2413 */
2414static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2415 struct ib_cm_req_event_param *param,
2416 void *private_data)
2417{
2418 struct srpt_device *sdev = cm_id->context;
2419 struct srpt_port *sport = &sdev->port[param->port - 1];
2420 struct srp_login_req *req;
2421 struct srp_login_rsp *rsp;
2422 struct srp_login_rej *rej;
2423 struct ib_cm_rep_param *rep_param;
2424 struct srpt_rdma_ch *ch, *tmp_ch;
2425 struct srpt_node_acl *nacl;
2426 u32 it_iu_len;
2427 int i;
2428 int ret = 0;
2429
2430 WARN_ON_ONCE(irqs_disabled());
2431
2432 if (WARN_ON(!sdev || !private_data))
2433 return -EINVAL;
2434
2435 req = (struct srp_login_req *)private_data;
2436
2437 it_iu_len = be32_to_cpu(req->req_it_iu_len);
2438
2439 printk(KERN_INFO "Received SRP_LOGIN_REQ with i_port_id 0x%llx:0x%llx,"
2440 " t_port_id 0x%llx:0x%llx and it_iu_len %d on port %d"
2441 " (guid=0x%llx:0x%llx)\n",
2442 be64_to_cpu(*(__be64 *)&req->initiator_port_id[0]),
2443 be64_to_cpu(*(__be64 *)&req->initiator_port_id[8]),
2444 be64_to_cpu(*(__be64 *)&req->target_port_id[0]),
2445 be64_to_cpu(*(__be64 *)&req->target_port_id[8]),
2446 it_iu_len,
2447 param->port,
2448 be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[0]),
2449 be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[8]));
2450
2451 rsp = kzalloc(sizeof *rsp, GFP_KERNEL);
2452 rej = kzalloc(sizeof *rej, GFP_KERNEL);
2453 rep_param = kzalloc(sizeof *rep_param, GFP_KERNEL);
2454
2455 if (!rsp || !rej || !rep_param) {
2456 ret = -ENOMEM;
2457 goto out;
2458 }
2459
2460 if (it_iu_len > srp_max_req_size || it_iu_len < 64) {
2461 rej->reason = __constant_cpu_to_be32(
2462 SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE);
2463 ret = -EINVAL;
2464 printk(KERN_ERR "rejected SRP_LOGIN_REQ because its"
2465 " length (%d bytes) is out of range (%d .. %d)\n",
2466 it_iu_len, 64, srp_max_req_size);
2467 goto reject;
2468 }
2469
2470 if (!sport->enabled) {
2471 rej->reason = __constant_cpu_to_be32(
2472 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2473 ret = -EINVAL;
2474 printk(KERN_ERR "rejected SRP_LOGIN_REQ because the target port"
2475 " has not yet been enabled\n");
2476 goto reject;
2477 }
2478
2479 if ((req->req_flags & SRP_MTCH_ACTION) == SRP_MULTICHAN_SINGLE) {
2480 rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_NO_CHAN;
2481
2482 spin_lock_irq(&sdev->spinlock);
2483
2484 list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list) {
2485 if (!memcmp(ch->i_port_id, req->initiator_port_id, 16)
2486 && !memcmp(ch->t_port_id, req->target_port_id, 16)
2487 && param->port == ch->sport->port
2488 && param->listen_id == ch->sport->sdev->cm_id
2489 && ch->cm_id) {
2490 enum rdma_ch_state ch_state;
2491
2492 ch_state = srpt_get_ch_state(ch);
2493 if (ch_state != CH_CONNECTING
2494 && ch_state != CH_LIVE)
2495 continue;
2496
2497 /* found an existing channel */
2498 pr_debug("Found existing channel %s"
2499 " cm_id= %p state= %d\n",
2500 ch->sess_name, ch->cm_id, ch_state);
2501
2502 __srpt_close_ch(ch);
2503
2504 rsp->rsp_flags =
2505 SRP_LOGIN_RSP_MULTICHAN_TERMINATED;
2506 }
2507 }
2508
2509 spin_unlock_irq(&sdev->spinlock);
2510
2511 } else
2512 rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_MAINTAINED;
2513
2514 if (*(__be64 *)req->target_port_id != cpu_to_be64(srpt_service_guid)
2515 || *(__be64 *)(req->target_port_id + 8) !=
2516 cpu_to_be64(srpt_service_guid)) {
2517 rej->reason = __constant_cpu_to_be32(
2518 SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL);
2519 ret = -ENOMEM;
2520 printk(KERN_ERR "rejected SRP_LOGIN_REQ because it"
2521 " has an invalid target port identifier.\n");
2522 goto reject;
2523 }
2524
2525 ch = kzalloc(sizeof *ch, GFP_KERNEL);
2526 if (!ch) {
2527 rej->reason = __constant_cpu_to_be32(
2528 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2529 printk(KERN_ERR "rejected SRP_LOGIN_REQ because no memory.\n");
2530 ret = -ENOMEM;
2531 goto reject;
2532 }
2533
2534 INIT_WORK(&ch->release_work, srpt_release_channel_work);
2535 memcpy(ch->i_port_id, req->initiator_port_id, 16);
2536 memcpy(ch->t_port_id, req->target_port_id, 16);
2537 ch->sport = &sdev->port[param->port - 1];
2538 ch->cm_id = cm_id;
2539 /*
2540 * Avoid QUEUE_FULL conditions by limiting the number of buffers used
2541 * for the SRP protocol to the command queue size.
2542 */
2543 ch->rq_size = SRPT_RQ_SIZE;
2544 spin_lock_init(&ch->spinlock);
2545 ch->state = CH_CONNECTING;
2546 INIT_LIST_HEAD(&ch->cmd_wait_list);
2547 ch->rsp_size = ch->sport->port_attrib.srp_max_rsp_size;
2548
2549 ch->ioctx_ring = (struct srpt_send_ioctx **)
2550 srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size,
2551 sizeof(*ch->ioctx_ring[0]),
2552 ch->rsp_size, DMA_TO_DEVICE);
2553 if (!ch->ioctx_ring)
2554 goto free_ch;
2555
2556 INIT_LIST_HEAD(&ch->free_list);
2557 for (i = 0; i < ch->rq_size; i++) {
2558 ch->ioctx_ring[i]->ch = ch;
2559 list_add_tail(&ch->ioctx_ring[i]->free_list, &ch->free_list);
2560 }
2561
2562 ret = srpt_create_ch_ib(ch);
2563 if (ret) {
2564 rej->reason = __constant_cpu_to_be32(
2565 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2566 printk(KERN_ERR "rejected SRP_LOGIN_REQ because creating"
2567 " a new RDMA channel failed.\n");
2568 goto free_ring;
2569 }
2570
2571 ret = srpt_ch_qp_rtr(ch, ch->qp);
2572 if (ret) {
2573 rej->reason = __constant_cpu_to_be32(
2574 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2575 printk(KERN_ERR "rejected SRP_LOGIN_REQ because enabling"
2576 " RTR failed (error code = %d)\n", ret);
2577 goto destroy_ib;
2578 }
2579 /*
2580 * Use the initator port identifier as the session name.
2581 */
2582 snprintf(ch->sess_name, sizeof(ch->sess_name), "0x%016llx%016llx",
2583 be64_to_cpu(*(__be64 *)ch->i_port_id),
2584 be64_to_cpu(*(__be64 *)(ch->i_port_id + 8)));
2585
2586 pr_debug("registering session %s\n", ch->sess_name);
2587
2588 nacl = srpt_lookup_acl(sport, ch->i_port_id);
2589 if (!nacl) {
2590 printk(KERN_INFO "Rejected login because no ACL has been"
2591 " configured yet for initiator %s.\n", ch->sess_name);
2592 rej->reason = __constant_cpu_to_be32(
2593 SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
2594 goto destroy_ib;
2595 }
2596
2597 ch->sess = transport_init_session();
2598 if (!ch->sess) {
2599 rej->reason = __constant_cpu_to_be32(
2600 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2601 pr_debug("Failed to create session\n");
2602 goto deregister_session;
2603 }
2604 ch->sess->se_node_acl = &nacl->nacl;
2605 transport_register_session(&sport->port_tpg_1, &nacl->nacl, ch->sess, ch);
2606
2607 pr_debug("Establish connection sess=%p name=%s cm_id=%p\n", ch->sess,
2608 ch->sess_name, ch->cm_id);
2609
2610 /* create srp_login_response */
2611 rsp->opcode = SRP_LOGIN_RSP;
2612 rsp->tag = req->tag;
2613 rsp->max_it_iu_len = req->req_it_iu_len;
2614 rsp->max_ti_iu_len = req->req_it_iu_len;
2615 ch->max_ti_iu_len = it_iu_len;
2616 rsp->buf_fmt = __constant_cpu_to_be16(SRP_BUF_FORMAT_DIRECT
2617 | SRP_BUF_FORMAT_INDIRECT);
2618 rsp->req_lim_delta = cpu_to_be32(ch->rq_size);
2619 atomic_set(&ch->req_lim, ch->rq_size);
2620 atomic_set(&ch->req_lim_delta, 0);
2621
2622 /* create cm reply */
2623 rep_param->qp_num = ch->qp->qp_num;
2624 rep_param->private_data = (void *)rsp;
2625 rep_param->private_data_len = sizeof *rsp;
2626 rep_param->rnr_retry_count = 7;
2627 rep_param->flow_control = 1;
2628 rep_param->failover_accepted = 0;
2629 rep_param->srq = 1;
2630 rep_param->responder_resources = 4;
2631 rep_param->initiator_depth = 4;
2632
2633 ret = ib_send_cm_rep(cm_id, rep_param);
2634 if (ret) {
2635 printk(KERN_ERR "sending SRP_LOGIN_REQ response failed"
2636 " (error code = %d)\n", ret);
2637 goto release_channel;
2638 }
2639
2640 spin_lock_irq(&sdev->spinlock);
2641 list_add_tail(&ch->list, &sdev->rch_list);
2642 spin_unlock_irq(&sdev->spinlock);
2643
2644 goto out;
2645
2646release_channel:
2647 srpt_set_ch_state(ch, CH_RELEASING);
2648 transport_deregister_session_configfs(ch->sess);
2649
2650deregister_session:
2651 transport_deregister_session(ch->sess);
2652 ch->sess = NULL;
2653
2654destroy_ib:
2655 srpt_destroy_ch_ib(ch);
2656
2657free_ring:
2658 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
2659 ch->sport->sdev, ch->rq_size,
2660 ch->rsp_size, DMA_TO_DEVICE);
2661free_ch:
2662 kfree(ch);
2663
2664reject:
2665 rej->opcode = SRP_LOGIN_REJ;
2666 rej->tag = req->tag;
2667 rej->buf_fmt = __constant_cpu_to_be16(SRP_BUF_FORMAT_DIRECT
2668 | SRP_BUF_FORMAT_INDIRECT);
2669
2670 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
2671 (void *)rej, sizeof *rej);
2672
2673out:
2674 kfree(rep_param);
2675 kfree(rsp);
2676 kfree(rej);
2677
2678 return ret;
2679}
2680
2681static void srpt_cm_rej_recv(struct ib_cm_id *cm_id)
2682{
2683 printk(KERN_INFO "Received IB REJ for cm_id %p.\n", cm_id);
2684 srpt_drain_channel(cm_id);
2685}
2686
2687/**
2688 * srpt_cm_rtu_recv() - Process an IB_CM_RTU_RECEIVED or USER_ESTABLISHED event.
2689 *
2690 * An IB_CM_RTU_RECEIVED message indicates that the connection is established
2691 * and that the recipient may begin transmitting (RTU = ready to use).
2692 */
2693static void srpt_cm_rtu_recv(struct ib_cm_id *cm_id)
2694{
2695 struct srpt_rdma_ch *ch;
2696 int ret;
2697
2698 ch = srpt_find_channel(cm_id->context, cm_id);
2699 BUG_ON(!ch);
2700
2701 if (srpt_test_and_set_ch_state(ch, CH_CONNECTING, CH_LIVE)) {
2702 struct srpt_recv_ioctx *ioctx, *ioctx_tmp;
2703
2704 ret = srpt_ch_qp_rts(ch, ch->qp);
2705
2706 list_for_each_entry_safe(ioctx, ioctx_tmp, &ch->cmd_wait_list,
2707 wait_list) {
2708 list_del(&ioctx->wait_list);
2709 srpt_handle_new_iu(ch, ioctx, NULL);
2710 }
2711 if (ret)
2712 srpt_close_ch(ch);
2713 }
2714}
2715
2716static void srpt_cm_timewait_exit(struct ib_cm_id *cm_id)
2717{
2718 printk(KERN_INFO "Received IB TimeWait exit for cm_id %p.\n", cm_id);
2719 srpt_drain_channel(cm_id);
2720}
2721
2722static void srpt_cm_rep_error(struct ib_cm_id *cm_id)
2723{
2724 printk(KERN_INFO "Received IB REP error for cm_id %p.\n", cm_id);
2725 srpt_drain_channel(cm_id);
2726}
2727
2728/**
2729 * srpt_cm_dreq_recv() - Process reception of a DREQ message.
2730 */
2731static void srpt_cm_dreq_recv(struct ib_cm_id *cm_id)
2732{
2733 struct srpt_rdma_ch *ch;
2734 unsigned long flags;
2735 bool send_drep = false;
2736
2737 ch = srpt_find_channel(cm_id->context, cm_id);
2738 BUG_ON(!ch);
2739
2740 pr_debug("cm_id= %p ch->state= %d\n", cm_id, srpt_get_ch_state(ch));
2741
2742 spin_lock_irqsave(&ch->spinlock, flags);
2743 switch (ch->state) {
2744 case CH_CONNECTING:
2745 case CH_LIVE:
2746 send_drep = true;
2747 ch->state = CH_DISCONNECTING;
2748 break;
2749 case CH_DISCONNECTING:
2750 case CH_DRAINING:
2751 case CH_RELEASING:
2752 WARN(true, "unexpected channel state %d\n", ch->state);
2753 break;
2754 }
2755 spin_unlock_irqrestore(&ch->spinlock, flags);
2756
2757 if (send_drep) {
2758 if (ib_send_cm_drep(ch->cm_id, NULL, 0) < 0)
2759 printk(KERN_ERR "Sending IB DREP failed.\n");
2760 printk(KERN_INFO "Received DREQ and sent DREP for session %s.\n",
2761 ch->sess_name);
2762 }
2763}
2764
2765/**
2766 * srpt_cm_drep_recv() - Process reception of a DREP message.
2767 */
2768static void srpt_cm_drep_recv(struct ib_cm_id *cm_id)
2769{
2770 printk(KERN_INFO "Received InfiniBand DREP message for cm_id %p.\n",
2771 cm_id);
2772 srpt_drain_channel(cm_id);
2773}
2774
2775/**
2776 * srpt_cm_handler() - IB connection manager callback function.
2777 *
2778 * A non-zero return value will cause the caller destroy the CM ID.
2779 *
2780 * Note: srpt_cm_handler() must only return a non-zero value when transferring
2781 * ownership of the cm_id to a channel by srpt_cm_req_recv() failed. Returning
2782 * a non-zero value in any other case will trigger a race with the
2783 * ib_destroy_cm_id() call in srpt_release_channel().
2784 */
2785static int srpt_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2786{
2787 int ret;
2788
2789 ret = 0;
2790 switch (event->event) {
2791 case IB_CM_REQ_RECEIVED:
2792 ret = srpt_cm_req_recv(cm_id, &event->param.req_rcvd,
2793 event->private_data);
2794 break;
2795 case IB_CM_REJ_RECEIVED:
2796 srpt_cm_rej_recv(cm_id);
2797 break;
2798 case IB_CM_RTU_RECEIVED:
2799 case IB_CM_USER_ESTABLISHED:
2800 srpt_cm_rtu_recv(cm_id);
2801 break;
2802 case IB_CM_DREQ_RECEIVED:
2803 srpt_cm_dreq_recv(cm_id);
2804 break;
2805 case IB_CM_DREP_RECEIVED:
2806 srpt_cm_drep_recv(cm_id);
2807 break;
2808 case IB_CM_TIMEWAIT_EXIT:
2809 srpt_cm_timewait_exit(cm_id);
2810 break;
2811 case IB_CM_REP_ERROR:
2812 srpt_cm_rep_error(cm_id);
2813 break;
2814 case IB_CM_DREQ_ERROR:
2815 printk(KERN_INFO "Received IB DREQ ERROR event.\n");
2816 break;
2817 case IB_CM_MRA_RECEIVED:
2818 printk(KERN_INFO "Received IB MRA event\n");
2819 break;
2820 default:
2821 printk(KERN_ERR "received unrecognized IB CM event %d\n",
2822 event->event);
2823 break;
2824 }
2825
2826 return ret;
2827}
2828
2829/**
2830 * srpt_perform_rdmas() - Perform IB RDMA.
2831 *
2832 * Returns zero upon success or a negative number upon failure.
2833 */
2834static int srpt_perform_rdmas(struct srpt_rdma_ch *ch,
2835 struct srpt_send_ioctx *ioctx)
2836{
2837 struct ib_send_wr wr;
2838 struct ib_send_wr *bad_wr;
2839 struct rdma_iu *riu;
2840 int i;
2841 int ret;
2842 int sq_wr_avail;
2843 enum dma_data_direction dir;
2844 const int n_rdma = ioctx->n_rdma;
2845
2846 dir = ioctx->cmd.data_direction;
2847 if (dir == DMA_TO_DEVICE) {
2848 /* write */
2849 ret = -ENOMEM;
2850 sq_wr_avail = atomic_sub_return(n_rdma, &ch->sq_wr_avail);
2851 if (sq_wr_avail < 0) {
2852 printk(KERN_WARNING "IB send queue full (needed %d)\n",
2853 n_rdma);
2854 goto out;
2855 }
2856 }
2857
2858 ioctx->rdma_aborted = false;
2859 ret = 0;
2860 riu = ioctx->rdma_ius;
2861 memset(&wr, 0, sizeof wr);
2862
2863 for (i = 0; i < n_rdma; ++i, ++riu) {
2864 if (dir == DMA_FROM_DEVICE) {
2865 wr.opcode = IB_WR_RDMA_WRITE;
2866 wr.wr_id = encode_wr_id(i == n_rdma - 1 ?
2867 SRPT_RDMA_WRITE_LAST :
2868 SRPT_RDMA_MID,
2869 ioctx->ioctx.index);
2870 } else {
2871 wr.opcode = IB_WR_RDMA_READ;
2872 wr.wr_id = encode_wr_id(i == n_rdma - 1 ?
2873 SRPT_RDMA_READ_LAST :
2874 SRPT_RDMA_MID,
2875 ioctx->ioctx.index);
2876 }
2877 wr.next = NULL;
2878 wr.wr.rdma.remote_addr = riu->raddr;
2879 wr.wr.rdma.rkey = riu->rkey;
2880 wr.num_sge = riu->sge_cnt;
2881 wr.sg_list = riu->sge;
2882
2883 /* only get completion event for the last rdma write */
2884 if (i == (n_rdma - 1) && dir == DMA_TO_DEVICE)
2885 wr.send_flags = IB_SEND_SIGNALED;
2886
2887 ret = ib_post_send(ch->qp, &wr, &bad_wr);
2888 if (ret)
2889 break;
2890 }
2891
2892 if (ret)
2893 printk(KERN_ERR "%s[%d]: ib_post_send() returned %d for %d/%d",
2894 __func__, __LINE__, ret, i, n_rdma);
2895 if (ret && i > 0) {
2896 wr.num_sge = 0;
2897 wr.wr_id = encode_wr_id(SRPT_RDMA_ABORT, ioctx->ioctx.index);
2898 wr.send_flags = IB_SEND_SIGNALED;
2899 while (ch->state == CH_LIVE &&
2900 ib_post_send(ch->qp, &wr, &bad_wr) != 0) {
2901 printk(KERN_INFO "Trying to abort failed RDMA transfer [%d]",
2902 ioctx->ioctx.index);
2903 msleep(1000);
2904 }
2905 while (ch->state != CH_RELEASING && !ioctx->rdma_aborted) {
2906 printk(KERN_INFO "Waiting until RDMA abort finished [%d]",
2907 ioctx->ioctx.index);
2908 msleep(1000);
2909 }
2910 }
2911out:
2912 if (unlikely(dir == DMA_TO_DEVICE && ret < 0))
2913 atomic_add(n_rdma, &ch->sq_wr_avail);
2914 return ret;
2915}
2916
2917/**
2918 * srpt_xfer_data() - Start data transfer from initiator to target.
2919 */
2920static int srpt_xfer_data(struct srpt_rdma_ch *ch,
2921 struct srpt_send_ioctx *ioctx)
2922{
2923 int ret;
2924
2925 ret = srpt_map_sg_to_ib_sge(ch, ioctx);
2926 if (ret) {
2927 printk(KERN_ERR "%s[%d] ret=%d\n", __func__, __LINE__, ret);
2928 goto out;
2929 }
2930
2931 ret = srpt_perform_rdmas(ch, ioctx);
2932 if (ret) {
2933 if (ret == -EAGAIN || ret == -ENOMEM)
2934 printk(KERN_INFO "%s[%d] queue full -- ret=%d\n",
2935 __func__, __LINE__, ret);
2936 else
2937 printk(KERN_ERR "%s[%d] fatal error -- ret=%d\n",
2938 __func__, __LINE__, ret);
2939 goto out_unmap;
2940 }
2941
2942out:
2943 return ret;
2944out_unmap:
2945 srpt_unmap_sg_to_ib_sge(ch, ioctx);
2946 goto out;
2947}
2948
2949static int srpt_write_pending_status(struct se_cmd *se_cmd)
2950{
2951 struct srpt_send_ioctx *ioctx;
2952
2953 ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
2954 return srpt_get_cmd_state(ioctx) == SRPT_STATE_NEED_DATA;
2955}
2956
2957/*
2958 * srpt_write_pending() - Start data transfer from initiator to target (write).
2959 */
2960static int srpt_write_pending(struct se_cmd *se_cmd)
2961{
2962 struct srpt_rdma_ch *ch;
2963 struct srpt_send_ioctx *ioctx;
2964 enum srpt_command_state new_state;
2965 enum rdma_ch_state ch_state;
2966 int ret;
2967
2968 ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
2969
2970 new_state = srpt_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA);
2971 WARN_ON(new_state == SRPT_STATE_DONE);
2972
2973 ch = ioctx->ch;
2974 BUG_ON(!ch);
2975
2976 ch_state = srpt_get_ch_state(ch);
2977 switch (ch_state) {
2978 case CH_CONNECTING:
2979 WARN(true, "unexpected channel state %d\n", ch_state);
2980 ret = -EINVAL;
2981 goto out;
2982 case CH_LIVE:
2983 break;
2984 case CH_DISCONNECTING:
2985 case CH_DRAINING:
2986 case CH_RELEASING:
2987 pr_debug("cmd with tag %lld: channel disconnecting\n",
2988 ioctx->tag);
2989 srpt_set_cmd_state(ioctx, SRPT_STATE_DATA_IN);
2990 ret = -EINVAL;
2991 goto out;
2992 }
2993 ret = srpt_xfer_data(ch, ioctx);
2994
2995out:
2996 return ret;
2997}
2998
2999static u8 tcm_to_srp_tsk_mgmt_status(const int tcm_mgmt_status)
3000{
3001 switch (tcm_mgmt_status) {
3002 case TMR_FUNCTION_COMPLETE:
3003 return SRP_TSK_MGMT_SUCCESS;
3004 case TMR_FUNCTION_REJECTED:
3005 return SRP_TSK_MGMT_FUNC_NOT_SUPP;
3006 }
3007 return SRP_TSK_MGMT_FAILED;
3008}
3009
3010/**
3011 * srpt_queue_response() - Transmits the response to a SCSI command.
3012 *
3013 * Callback function called by the TCM core. Must not block since it can be
3014 * invoked on the context of the IB completion handler.
3015 */
3016static int srpt_queue_response(struct se_cmd *cmd)
3017{
3018 struct srpt_rdma_ch *ch;
3019 struct srpt_send_ioctx *ioctx;
3020 enum srpt_command_state state;
3021 unsigned long flags;
3022 int ret;
3023 enum dma_data_direction dir;
3024 int resp_len;
3025 u8 srp_tm_status;
3026
3027 ret = 0;
3028
3029 ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
3030 ch = ioctx->ch;
3031 BUG_ON(!ch);
3032
3033 spin_lock_irqsave(&ioctx->spinlock, flags);
3034 state = ioctx->state;
3035 switch (state) {
3036 case SRPT_STATE_NEW:
3037 case SRPT_STATE_DATA_IN:
3038 ioctx->state = SRPT_STATE_CMD_RSP_SENT;
3039 break;
3040 case SRPT_STATE_MGMT:
3041 ioctx->state = SRPT_STATE_MGMT_RSP_SENT;
3042 break;
3043 default:
3044 WARN(true, "ch %p; cmd %d: unexpected command state %d\n",
3045 ch, ioctx->ioctx.index, ioctx->state);
3046 break;
3047 }
3048 spin_unlock_irqrestore(&ioctx->spinlock, flags);
3049
3050 if (unlikely(transport_check_aborted_status(&ioctx->cmd, false)
3051 || WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT))) {
3052 atomic_inc(&ch->req_lim_delta);
3053 srpt_abort_cmd(ioctx);
3054 goto out;
3055 }
3056
3057 dir = ioctx->cmd.data_direction;
3058
3059 /* For read commands, transfer the data to the initiator. */
3060 if (dir == DMA_FROM_DEVICE && ioctx->cmd.data_length &&
3061 !ioctx->queue_status_only) {
3062 ret = srpt_xfer_data(ch, ioctx);
3063 if (ret) {
3064 printk(KERN_ERR "xfer_data failed for tag %llu\n",
3065 ioctx->tag);
3066 goto out;
3067 }
3068 }
3069
3070 if (state != SRPT_STATE_MGMT)
3071 resp_len = srpt_build_cmd_rsp(ch, ioctx, ioctx->tag,
3072 cmd->scsi_status);
3073 else {
3074 srp_tm_status
3075 = tcm_to_srp_tsk_mgmt_status(cmd->se_tmr_req->response);
3076 resp_len = srpt_build_tskmgmt_rsp(ch, ioctx, srp_tm_status,
3077 ioctx->tag);
3078 }
3079 ret = srpt_post_send(ch, ioctx, resp_len);
3080 if (ret) {
3081 printk(KERN_ERR "sending cmd response failed for tag %llu\n",
3082 ioctx->tag);
3083 srpt_unmap_sg_to_ib_sge(ch, ioctx);
3084 srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
3085 kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
3086 }
3087
3088out:
3089 return ret;
3090}
3091
3092static int srpt_queue_status(struct se_cmd *cmd)
3093{
3094 struct srpt_send_ioctx *ioctx;
3095
3096 ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
3097 BUG_ON(ioctx->sense_data != cmd->sense_buffer);
3098 if (cmd->se_cmd_flags &
3099 (SCF_TRANSPORT_TASK_SENSE | SCF_EMULATED_TASK_SENSE))
3100 WARN_ON(cmd->scsi_status != SAM_STAT_CHECK_CONDITION);
3101 ioctx->queue_status_only = true;
3102 return srpt_queue_response(cmd);
3103}
3104
3105static void srpt_refresh_port_work(struct work_struct *work)
3106{
3107 struct srpt_port *sport = container_of(work, struct srpt_port, work);
3108
3109 srpt_refresh_port(sport);
3110}
3111
3112static int srpt_ch_list_empty(struct srpt_device *sdev)
3113{
3114 int res;
3115
3116 spin_lock_irq(&sdev->spinlock);
3117 res = list_empty(&sdev->rch_list);
3118 spin_unlock_irq(&sdev->spinlock);
3119
3120 return res;
3121}
3122
3123/**
3124 * srpt_release_sdev() - Free the channel resources associated with a target.
3125 */
3126static int srpt_release_sdev(struct srpt_device *sdev)
3127{
3128 struct srpt_rdma_ch *ch, *tmp_ch;
3129 int res;
3130
3131 WARN_ON_ONCE(irqs_disabled());
3132
3133 BUG_ON(!sdev);
3134
3135 spin_lock_irq(&sdev->spinlock);
3136 list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list)
3137 __srpt_close_ch(ch);
3138 spin_unlock_irq(&sdev->spinlock);
3139
3140 res = wait_event_interruptible(sdev->ch_releaseQ,
3141 srpt_ch_list_empty(sdev));
3142 if (res)
3143 printk(KERN_ERR "%s: interrupted.\n", __func__);
3144
3145 return 0;
3146}
3147
3148static struct srpt_port *__srpt_lookup_port(const char *name)
3149{
3150 struct ib_device *dev;
3151 struct srpt_device *sdev;
3152 struct srpt_port *sport;
3153 int i;
3154
3155 list_for_each_entry(sdev, &srpt_dev_list, list) {
3156 dev = sdev->device;
3157 if (!dev)
3158 continue;
3159
3160 for (i = 0; i < dev->phys_port_cnt; i++) {
3161 sport = &sdev->port[i];
3162
3163 if (!strcmp(sport->port_guid, name))
3164 return sport;
3165 }
3166 }
3167
3168 return NULL;
3169}
3170
3171static struct srpt_port *srpt_lookup_port(const char *name)
3172{
3173 struct srpt_port *sport;
3174
3175 spin_lock(&srpt_dev_lock);
3176 sport = __srpt_lookup_port(name);
3177 spin_unlock(&srpt_dev_lock);
3178
3179 return sport;
3180}
3181
3182/**
3183 * srpt_add_one() - Infiniband device addition callback function.
3184 */
3185static void srpt_add_one(struct ib_device *device)
3186{
3187 struct srpt_device *sdev;
3188 struct srpt_port *sport;
3189 struct ib_srq_init_attr srq_attr;
3190 int i;
3191
3192 pr_debug("device = %p, device->dma_ops = %p\n", device,
3193 device->dma_ops);
3194
3195 sdev = kzalloc(sizeof *sdev, GFP_KERNEL);
3196 if (!sdev)
3197 goto err;
3198
3199 sdev->device = device;
3200 INIT_LIST_HEAD(&sdev->rch_list);
3201 init_waitqueue_head(&sdev->ch_releaseQ);
3202 spin_lock_init(&sdev->spinlock);
3203
3204 if (ib_query_device(device, &sdev->dev_attr))
3205 goto free_dev;
3206
3207 sdev->pd = ib_alloc_pd(device);
3208 if (IS_ERR(sdev->pd))
3209 goto free_dev;
3210
3211 sdev->mr = ib_get_dma_mr(sdev->pd, IB_ACCESS_LOCAL_WRITE);
3212 if (IS_ERR(sdev->mr))
3213 goto err_pd;
3214
3215 sdev->srq_size = min(srpt_srq_size, sdev->dev_attr.max_srq_wr);
3216
3217 srq_attr.event_handler = srpt_srq_event;
3218 srq_attr.srq_context = (void *)sdev;
3219 srq_attr.attr.max_wr = sdev->srq_size;
3220 srq_attr.attr.max_sge = 1;
3221 srq_attr.attr.srq_limit = 0;
3222
3223 sdev->srq = ib_create_srq(sdev->pd, &srq_attr);
3224 if (IS_ERR(sdev->srq))
3225 goto err_mr;
3226
3227 pr_debug("%s: create SRQ #wr= %d max_allow=%d dev= %s\n",
3228 __func__, sdev->srq_size, sdev->dev_attr.max_srq_wr,
3229 device->name);
3230
3231 if (!srpt_service_guid)
3232 srpt_service_guid = be64_to_cpu(device->node_guid);
3233
3234 sdev->cm_id = ib_create_cm_id(device, srpt_cm_handler, sdev);
3235 if (IS_ERR(sdev->cm_id))
3236 goto err_srq;
3237
3238 /* print out target login information */
3239 pr_debug("Target login info: id_ext=%016llx,ioc_guid=%016llx,"
3240 "pkey=ffff,service_id=%016llx\n", srpt_service_guid,
3241 srpt_service_guid, srpt_service_guid);
3242
3243 /*
3244 * We do not have a consistent service_id (ie. also id_ext of target_id)
3245 * to identify this target. We currently use the guid of the first HCA
3246 * in the system as service_id; therefore, the target_id will change
3247 * if this HCA is gone bad and replaced by different HCA
3248 */
3249 if (ib_cm_listen(sdev->cm_id, cpu_to_be64(srpt_service_guid), 0, NULL))
3250 goto err_cm;
3251
3252 INIT_IB_EVENT_HANDLER(&sdev->event_handler, sdev->device,
3253 srpt_event_handler);
3254 if (ib_register_event_handler(&sdev->event_handler))
3255 goto err_cm;
3256
3257 sdev->ioctx_ring = (struct srpt_recv_ioctx **)
3258 srpt_alloc_ioctx_ring(sdev, sdev->srq_size,
3259 sizeof(*sdev->ioctx_ring[0]),
3260 srp_max_req_size, DMA_FROM_DEVICE);
3261 if (!sdev->ioctx_ring)
3262 goto err_event;
3263
3264 for (i = 0; i < sdev->srq_size; ++i)
3265 srpt_post_recv(sdev, sdev->ioctx_ring[i]);
3266
3267 WARN_ON(sdev->device->phys_port_cnt
3268 > sizeof(sdev->port)/sizeof(sdev->port[0]));
3269
3270 for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
3271 sport = &sdev->port[i - 1];
3272 sport->sdev = sdev;
3273 sport->port = i;
3274 sport->port_attrib.srp_max_rdma_size = DEFAULT_MAX_RDMA_SIZE;
3275 sport->port_attrib.srp_max_rsp_size = DEFAULT_MAX_RSP_SIZE;
3276 sport->port_attrib.srp_sq_size = DEF_SRPT_SQ_SIZE;
3277 INIT_WORK(&sport->work, srpt_refresh_port_work);
3278 INIT_LIST_HEAD(&sport->port_acl_list);
3279 spin_lock_init(&sport->port_acl_lock);
3280
3281 if (srpt_refresh_port(sport)) {
3282 printk(KERN_ERR "MAD registration failed for %s-%d.\n",
3283 srpt_sdev_name(sdev), i);
3284 goto err_ring;
3285 }
3286 snprintf(sport->port_guid, sizeof(sport->port_guid),
3287 "0x%016llx%016llx",
3288 be64_to_cpu(sport->gid.global.subnet_prefix),
3289 be64_to_cpu(sport->gid.global.interface_id));
3290 }
3291
3292 spin_lock(&srpt_dev_lock);
3293 list_add_tail(&sdev->list, &srpt_dev_list);
3294 spin_unlock(&srpt_dev_lock);
3295
3296out:
3297 ib_set_client_data(device, &srpt_client, sdev);
3298 pr_debug("added %s.\n", device->name);
3299 return;
3300
3301err_ring:
3302 srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
3303 sdev->srq_size, srp_max_req_size,
3304 DMA_FROM_DEVICE);
3305err_event:
3306 ib_unregister_event_handler(&sdev->event_handler);
3307err_cm:
3308 ib_destroy_cm_id(sdev->cm_id);
3309err_srq:
3310 ib_destroy_srq(sdev->srq);
3311err_mr:
3312 ib_dereg_mr(sdev->mr);
3313err_pd:
3314 ib_dealloc_pd(sdev->pd);
3315free_dev:
3316 kfree(sdev);
3317err:
3318 sdev = NULL;
3319 printk(KERN_INFO "%s(%s) failed.\n", __func__, device->name);
3320 goto out;
3321}
3322
3323/**
3324 * srpt_remove_one() - InfiniBand device removal callback function.
3325 */
3326static void srpt_remove_one(struct ib_device *device)
3327{
3328 struct srpt_device *sdev;
3329 int i;
3330
3331 sdev = ib_get_client_data(device, &srpt_client);
3332 if (!sdev) {
3333 printk(KERN_INFO "%s(%s): nothing to do.\n", __func__,
3334 device->name);
3335 return;
3336 }
3337
3338 srpt_unregister_mad_agent(sdev);
3339
3340 ib_unregister_event_handler(&sdev->event_handler);
3341
3342 /* Cancel any work queued by the just unregistered IB event handler. */
3343 for (i = 0; i < sdev->device->phys_port_cnt; i++)
3344 cancel_work_sync(&sdev->port[i].work);
3345
3346 ib_destroy_cm_id(sdev->cm_id);
3347
3348 /*
3349 * Unregistering a target must happen after destroying sdev->cm_id
3350 * such that no new SRP_LOGIN_REQ information units can arrive while
3351 * destroying the target.
3352 */
3353 spin_lock(&srpt_dev_lock);
3354 list_del(&sdev->list);
3355 spin_unlock(&srpt_dev_lock);
3356 srpt_release_sdev(sdev);
3357
3358 ib_destroy_srq(sdev->srq);
3359 ib_dereg_mr(sdev->mr);
3360 ib_dealloc_pd(sdev->pd);
3361
3362 srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
3363 sdev->srq_size, srp_max_req_size, DMA_FROM_DEVICE);
3364 sdev->ioctx_ring = NULL;
3365 kfree(sdev);
3366}
3367
3368static struct ib_client srpt_client = {
3369 .name = DRV_NAME,
3370 .add = srpt_add_one,
3371 .remove = srpt_remove_one
3372};
3373
3374static int srpt_check_true(struct se_portal_group *se_tpg)
3375{
3376 return 1;
3377}
3378
3379static int srpt_check_false(struct se_portal_group *se_tpg)
3380{
3381 return 0;
3382}
3383
3384static char *srpt_get_fabric_name(void)
3385{
3386 return "srpt";
3387}
3388
3389static u8 srpt_get_fabric_proto_ident(struct se_portal_group *se_tpg)
3390{
3391 return SCSI_TRANSPORTID_PROTOCOLID_SRP;
3392}
3393
3394static char *srpt_get_fabric_wwn(struct se_portal_group *tpg)
3395{
3396 struct srpt_port *sport = container_of(tpg, struct srpt_port, port_tpg_1);
3397
3398 return sport->port_guid;
3399}
3400
3401static u16 srpt_get_tag(struct se_portal_group *tpg)
3402{
3403 return 1;
3404}
3405
3406static u32 srpt_get_default_depth(struct se_portal_group *se_tpg)
3407{
3408 return 1;
3409}
3410
3411static u32 srpt_get_pr_transport_id(struct se_portal_group *se_tpg,
3412 struct se_node_acl *se_nacl,
3413 struct t10_pr_registration *pr_reg,
3414 int *format_code, unsigned char *buf)
3415{
3416 struct srpt_node_acl *nacl;
3417 struct spc_rdma_transport_id *tr_id;
3418
3419 nacl = container_of(se_nacl, struct srpt_node_acl, nacl);
3420 tr_id = (void *)buf;
3421 tr_id->protocol_identifier = SCSI_TRANSPORTID_PROTOCOLID_SRP;
3422 memcpy(tr_id->i_port_id, nacl->i_port_id, sizeof(tr_id->i_port_id));
3423 return sizeof(*tr_id);
3424}
3425
3426static u32 srpt_get_pr_transport_id_len(struct se_portal_group *se_tpg,
3427 struct se_node_acl *se_nacl,
3428 struct t10_pr_registration *pr_reg,
3429 int *format_code)
3430{
3431 *format_code = 0;
3432 return sizeof(struct spc_rdma_transport_id);
3433}
3434
3435static char *srpt_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
3436 const char *buf, u32 *out_tid_len,
3437 char **port_nexus_ptr)
3438{
3439 struct spc_rdma_transport_id *tr_id;
3440
3441 *port_nexus_ptr = NULL;
3442 *out_tid_len = sizeof(struct spc_rdma_transport_id);
3443 tr_id = (void *)buf;
3444 return (char *)tr_id->i_port_id;
3445}
3446
3447static struct se_node_acl *srpt_alloc_fabric_acl(struct se_portal_group *se_tpg)
3448{
3449 struct srpt_node_acl *nacl;
3450
3451 nacl = kzalloc(sizeof(struct srpt_node_acl), GFP_KERNEL);
3452 if (!nacl) {
3453 printk(KERN_ERR "Unable to alocate struct srpt_node_acl\n");
3454 return NULL;
3455 }
3456
3457 return &nacl->nacl;
3458}
3459
3460static void srpt_release_fabric_acl(struct se_portal_group *se_tpg,
3461 struct se_node_acl *se_nacl)
3462{
3463 struct srpt_node_acl *nacl;
3464
3465 nacl = container_of(se_nacl, struct srpt_node_acl, nacl);
3466 kfree(nacl);
3467}
3468
3469static u32 srpt_tpg_get_inst_index(struct se_portal_group *se_tpg)
3470{
3471 return 1;
3472}
3473
3474static void srpt_release_cmd(struct se_cmd *se_cmd)
3475{
3476}
3477
3478/**
3479 * srpt_shutdown_session() - Whether or not a session may be shut down.
3480 */
3481static int srpt_shutdown_session(struct se_session *se_sess)
3482{
3483 return true;
3484}
3485
3486/**
3487 * srpt_close_session() - Forcibly close a session.
3488 *
3489 * Callback function invoked by the TCM core to clean up sessions associated
3490 * with a node ACL when the user invokes
3491 * rmdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
3492 */
3493static void srpt_close_session(struct se_session *se_sess)
3494{
3495 DECLARE_COMPLETION_ONSTACK(release_done);
3496 struct srpt_rdma_ch *ch;
3497 struct srpt_device *sdev;
3498 int res;
3499
3500 ch = se_sess->fabric_sess_ptr;
3501 WARN_ON(ch->sess != se_sess);
3502
3503 pr_debug("ch %p state %d\n", ch, srpt_get_ch_state(ch));
3504
3505 sdev = ch->sport->sdev;
3506 spin_lock_irq(&sdev->spinlock);
3507 BUG_ON(ch->release_done);
3508 ch->release_done = &release_done;
3509 __srpt_close_ch(ch);
3510 spin_unlock_irq(&sdev->spinlock);
3511
3512 res = wait_for_completion_timeout(&release_done, 60 * HZ);
3513 WARN_ON(res <= 0);
3514}
3515
3516/**
3517 * To do: Find out whether stop_session() has a meaning for transports
3518 * other than iSCSI.
3519 */
3520static void srpt_stop_session(struct se_session *se_sess, int sess_sleep,
3521 int conn_sleep)
3522{
3523}
3524
3525static void srpt_reset_nexus(struct se_session *sess)
3526{
3527 printk(KERN_ERR "This is the SRP protocol, not iSCSI\n");
3528}
3529
3530static int srpt_sess_logged_in(struct se_session *se_sess)
3531{
3532 return true;
3533}
3534
3535/**
3536 * srpt_sess_get_index() - Return the value of scsiAttIntrPortIndex (SCSI-MIB).
3537 *
3538 * A quote from RFC 4455 (SCSI-MIB) about this MIB object:
3539 * This object represents an arbitrary integer used to uniquely identify a
3540 * particular attached remote initiator port to a particular SCSI target port
3541 * within a particular SCSI target device within a particular SCSI instance.
3542 */
3543static u32 srpt_sess_get_index(struct se_session *se_sess)
3544{
3545 return 0;
3546}
3547
3548static void srpt_set_default_node_attrs(struct se_node_acl *nacl)
3549{
3550}
3551
3552static u32 srpt_get_task_tag(struct se_cmd *se_cmd)
3553{
3554 struct srpt_send_ioctx *ioctx;
3555
3556 ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
3557 return ioctx->tag;
3558}
3559
3560/* Note: only used from inside debug printk's by the TCM core. */
3561static int srpt_get_tcm_cmd_state(struct se_cmd *se_cmd)
3562{
3563 struct srpt_send_ioctx *ioctx;
3564
3565 ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
3566 return srpt_get_cmd_state(ioctx);
3567}
3568
3569static u16 srpt_set_fabric_sense_len(struct se_cmd *cmd, u32 sense_length)
3570{
3571 return 0;
3572}
3573
3574static u16 srpt_get_fabric_sense_len(void)
3575{
3576 return 0;
3577}
3578
3579static int srpt_is_state_remove(struct se_cmd *se_cmd)
3580{
3581 return 0;
3582}
3583
3584/**
3585 * srpt_parse_i_port_id() - Parse an initiator port ID.
3586 * @name: ASCII representation of a 128-bit initiator port ID.
3587 * @i_port_id: Binary 128-bit port ID.
3588 */
3589static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name)
3590{
3591 const char *p;
3592 unsigned len, count, leading_zero_bytes;
3593 int ret, rc;
3594
3595 p = name;
3596 if (strnicmp(p, "0x", 2) == 0)
3597 p += 2;
3598 ret = -EINVAL;
3599 len = strlen(p);
3600 if (len % 2)
3601 goto out;
3602 count = min(len / 2, 16U);
3603 leading_zero_bytes = 16 - count;
3604 memset(i_port_id, 0, leading_zero_bytes);
3605 rc = hex2bin(i_port_id + leading_zero_bytes, p, count);
3606 if (rc < 0)
3607 pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", rc);
3608 ret = 0;
3609out:
3610 return ret;
3611}
3612
3613/*
3614 * configfs callback function invoked for
3615 * mkdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
3616 */
3617static struct se_node_acl *srpt_make_nodeacl(struct se_portal_group *tpg,
3618 struct config_group *group,
3619 const char *name)
3620{
3621 struct srpt_port *sport = container_of(tpg, struct srpt_port, port_tpg_1);
3622 struct se_node_acl *se_nacl, *se_nacl_new;
3623 struct srpt_node_acl *nacl;
3624 int ret = 0;
3625 u32 nexus_depth = 1;
3626 u8 i_port_id[16];
3627
3628 if (srpt_parse_i_port_id(i_port_id, name) < 0) {
3629 printk(KERN_ERR "invalid initiator port ID %s\n", name);
3630 ret = -EINVAL;
3631 goto err;
3632 }
3633
3634 se_nacl_new = srpt_alloc_fabric_acl(tpg);
3635 if (!se_nacl_new) {
3636 ret = -ENOMEM;
3637 goto err;
3638 }
3639 /*
3640 * nacl_new may be released by core_tpg_add_initiator_node_acl()
3641 * when converting a node ACL from demo mode to explict
3642 */
3643 se_nacl = core_tpg_add_initiator_node_acl(tpg, se_nacl_new, name,
3644 nexus_depth);
3645 if (IS_ERR(se_nacl)) {
3646 ret = PTR_ERR(se_nacl);
3647 goto err;
3648 }
3649 /* Locate our struct srpt_node_acl and set sdev and i_port_id. */
3650 nacl = container_of(se_nacl, struct srpt_node_acl, nacl);
3651 memcpy(&nacl->i_port_id[0], &i_port_id[0], 16);
3652 nacl->sport = sport;
3653
3654 spin_lock_irq(&sport->port_acl_lock);
3655 list_add_tail(&nacl->list, &sport->port_acl_list);
3656 spin_unlock_irq(&sport->port_acl_lock);
3657
3658 return se_nacl;
3659err:
3660 return ERR_PTR(ret);
3661}
3662
3663/*
3664 * configfs callback function invoked for
3665 * rmdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
3666 */
3667static void srpt_drop_nodeacl(struct se_node_acl *se_nacl)
3668{
3669 struct srpt_node_acl *nacl;
3670 struct srpt_device *sdev;
3671 struct srpt_port *sport;
3672
3673 nacl = container_of(se_nacl, struct srpt_node_acl, nacl);
3674 sport = nacl->sport;
3675 sdev = sport->sdev;
3676 spin_lock_irq(&sport->port_acl_lock);
3677 list_del(&nacl->list);
3678 spin_unlock_irq(&sport->port_acl_lock);
3679 core_tpg_del_initiator_node_acl(&sport->port_tpg_1, se_nacl, 1);
3680 srpt_release_fabric_acl(NULL, se_nacl);
3681}
3682
3683static ssize_t srpt_tpg_attrib_show_srp_max_rdma_size(
3684 struct se_portal_group *se_tpg,
3685 char *page)
3686{
3687 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3688
3689 return sprintf(page, "%u\n", sport->port_attrib.srp_max_rdma_size);
3690}
3691
3692static ssize_t srpt_tpg_attrib_store_srp_max_rdma_size(
3693 struct se_portal_group *se_tpg,
3694 const char *page,
3695 size_t count)
3696{
3697 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3698 unsigned long val;
3699 int ret;
3700
3701 ret = strict_strtoul(page, 0, &val);
3702 if (ret < 0) {
3703 pr_err("strict_strtoul() failed with ret: %d\n", ret);
3704 return -EINVAL;
3705 }
3706 if (val > MAX_SRPT_RDMA_SIZE) {
3707 pr_err("val: %lu exceeds MAX_SRPT_RDMA_SIZE: %d\n", val,
3708 MAX_SRPT_RDMA_SIZE);
3709 return -EINVAL;
3710 }
3711 if (val < DEFAULT_MAX_RDMA_SIZE) {
3712 pr_err("val: %lu smaller than DEFAULT_MAX_RDMA_SIZE: %d\n",
3713 val, DEFAULT_MAX_RDMA_SIZE);
3714 return -EINVAL;
3715 }
3716 sport->port_attrib.srp_max_rdma_size = val;
3717
3718 return count;
3719}
3720
3721TF_TPG_ATTRIB_ATTR(srpt, srp_max_rdma_size, S_IRUGO | S_IWUSR);
3722
3723static ssize_t srpt_tpg_attrib_show_srp_max_rsp_size(
3724 struct se_portal_group *se_tpg,
3725 char *page)
3726{
3727 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3728
3729 return sprintf(page, "%u\n", sport->port_attrib.srp_max_rsp_size);
3730}
3731
3732static ssize_t srpt_tpg_attrib_store_srp_max_rsp_size(
3733 struct se_portal_group *se_tpg,
3734 const char *page,
3735 size_t count)
3736{
3737 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3738 unsigned long val;
3739 int ret;
3740
3741 ret = strict_strtoul(page, 0, &val);
3742 if (ret < 0) {
3743 pr_err("strict_strtoul() failed with ret: %d\n", ret);
3744 return -EINVAL;
3745 }
3746 if (val > MAX_SRPT_RSP_SIZE) {
3747 pr_err("val: %lu exceeds MAX_SRPT_RSP_SIZE: %d\n", val,
3748 MAX_SRPT_RSP_SIZE);
3749 return -EINVAL;
3750 }
3751 if (val < MIN_MAX_RSP_SIZE) {
3752 pr_err("val: %lu smaller than MIN_MAX_RSP_SIZE: %d\n", val,
3753 MIN_MAX_RSP_SIZE);
3754 return -EINVAL;
3755 }
3756 sport->port_attrib.srp_max_rsp_size = val;
3757
3758 return count;
3759}
3760
3761TF_TPG_ATTRIB_ATTR(srpt, srp_max_rsp_size, S_IRUGO | S_IWUSR);
3762
3763static ssize_t srpt_tpg_attrib_show_srp_sq_size(
3764 struct se_portal_group *se_tpg,
3765 char *page)
3766{
3767 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3768
3769 return sprintf(page, "%u\n", sport->port_attrib.srp_sq_size);
3770}
3771
3772static ssize_t srpt_tpg_attrib_store_srp_sq_size(
3773 struct se_portal_group *se_tpg,
3774 const char *page,
3775 size_t count)
3776{
3777 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3778 unsigned long val;
3779 int ret;
3780
3781 ret = strict_strtoul(page, 0, &val);
3782 if (ret < 0) {
3783 pr_err("strict_strtoul() failed with ret: %d\n", ret);
3784 return -EINVAL;
3785 }
3786 if (val > MAX_SRPT_SRQ_SIZE) {
3787 pr_err("val: %lu exceeds MAX_SRPT_SRQ_SIZE: %d\n", val,
3788 MAX_SRPT_SRQ_SIZE);
3789 return -EINVAL;
3790 }
3791 if (val < MIN_SRPT_SRQ_SIZE) {
3792 pr_err("val: %lu smaller than MIN_SRPT_SRQ_SIZE: %d\n", val,
3793 MIN_SRPT_SRQ_SIZE);
3794 return -EINVAL;
3795 }
3796 sport->port_attrib.srp_sq_size = val;
3797
3798 return count;
3799}
3800
3801TF_TPG_ATTRIB_ATTR(srpt, srp_sq_size, S_IRUGO | S_IWUSR);
3802
3803static struct configfs_attribute *srpt_tpg_attrib_attrs[] = {
3804 &srpt_tpg_attrib_srp_max_rdma_size.attr,
3805 &srpt_tpg_attrib_srp_max_rsp_size.attr,
3806 &srpt_tpg_attrib_srp_sq_size.attr,
3807 NULL,
3808};
3809
3810static ssize_t srpt_tpg_show_enable(
3811 struct se_portal_group *se_tpg,
3812 char *page)
3813{
3814 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3815
3816 return snprintf(page, PAGE_SIZE, "%d\n", (sport->enabled) ? 1: 0);
3817}
3818
3819static ssize_t srpt_tpg_store_enable(
3820 struct se_portal_group *se_tpg,
3821 const char *page,
3822 size_t count)
3823{
3824 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3825 unsigned long tmp;
3826 int ret;
3827
3828 ret = strict_strtoul(page, 0, &tmp);
3829 if (ret < 0) {
3830 printk(KERN_ERR "Unable to extract srpt_tpg_store_enable\n");
3831 return -EINVAL;
3832 }
3833
3834 if ((tmp != 0) && (tmp != 1)) {
3835 printk(KERN_ERR "Illegal value for srpt_tpg_store_enable: %lu\n", tmp);
3836 return -EINVAL;
3837 }
3838 if (tmp == 1)
3839 sport->enabled = true;
3840 else
3841 sport->enabled = false;
3842
3843 return count;
3844}
3845
3846TF_TPG_BASE_ATTR(srpt, enable, S_IRUGO | S_IWUSR);
3847
3848static struct configfs_attribute *srpt_tpg_attrs[] = {
3849 &srpt_tpg_enable.attr,
3850 NULL,
3851};
3852
3853/**
3854 * configfs callback invoked for
3855 * mkdir /sys/kernel/config/target/$driver/$port/$tpg
3856 */
3857static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn,
3858 struct config_group *group,
3859 const char *name)
3860{
3861 struct srpt_port *sport = container_of(wwn, struct srpt_port, port_wwn);
3862 int res;
3863
3864 /* Initialize sport->port_wwn and sport->port_tpg_1 */
3865 res = core_tpg_register(&srpt_target->tf_ops, &sport->port_wwn,
3866 &sport->port_tpg_1, sport, TRANSPORT_TPG_TYPE_NORMAL);
3867 if (res)
3868 return ERR_PTR(res);
3869
3870 return &sport->port_tpg_1;
3871}
3872
3873/**
3874 * configfs callback invoked for
3875 * rmdir /sys/kernel/config/target/$driver/$port/$tpg
3876 */
3877static void srpt_drop_tpg(struct se_portal_group *tpg)
3878{
3879 struct srpt_port *sport = container_of(tpg,
3880 struct srpt_port, port_tpg_1);
3881
3882 sport->enabled = false;
3883 core_tpg_deregister(&sport->port_tpg_1);
3884}
3885
3886/**
3887 * configfs callback invoked for
3888 * mkdir /sys/kernel/config/target/$driver/$port
3889 */
3890static struct se_wwn *srpt_make_tport(struct target_fabric_configfs *tf,
3891 struct config_group *group,
3892 const char *name)
3893{
3894 struct srpt_port *sport;
3895 int ret;
3896
3897 sport = srpt_lookup_port(name);
3898 pr_debug("make_tport(%s)\n", name);
3899 ret = -EINVAL;
3900 if (!sport)
3901 goto err;
3902
3903 return &sport->port_wwn;
3904
3905err:
3906 return ERR_PTR(ret);
3907}
3908
3909/**
3910 * configfs callback invoked for
3911 * rmdir /sys/kernel/config/target/$driver/$port
3912 */
3913static void srpt_drop_tport(struct se_wwn *wwn)
3914{
3915 struct srpt_port *sport = container_of(wwn, struct srpt_port, port_wwn);
3916
3917 pr_debug("drop_tport(%s\n", config_item_name(&sport->port_wwn.wwn_group.cg_item));
3918}
3919
3920static ssize_t srpt_wwn_show_attr_version(struct target_fabric_configfs *tf,
3921 char *buf)
3922{
3923 return scnprintf(buf, PAGE_SIZE, "%s\n", DRV_VERSION);
3924}
3925
3926TF_WWN_ATTR_RO(srpt, version);
3927
3928static struct configfs_attribute *srpt_wwn_attrs[] = {
3929 &srpt_wwn_version.attr,
3930 NULL,
3931};
3932
3933static struct target_core_fabric_ops srpt_template = {
3934 .get_fabric_name = srpt_get_fabric_name,
3935 .get_fabric_proto_ident = srpt_get_fabric_proto_ident,
3936 .tpg_get_wwn = srpt_get_fabric_wwn,
3937 .tpg_get_tag = srpt_get_tag,
3938 .tpg_get_default_depth = srpt_get_default_depth,
3939 .tpg_get_pr_transport_id = srpt_get_pr_transport_id,
3940 .tpg_get_pr_transport_id_len = srpt_get_pr_transport_id_len,
3941 .tpg_parse_pr_out_transport_id = srpt_parse_pr_out_transport_id,
3942 .tpg_check_demo_mode = srpt_check_false,
3943 .tpg_check_demo_mode_cache = srpt_check_true,
3944 .tpg_check_demo_mode_write_protect = srpt_check_true,
3945 .tpg_check_prod_mode_write_protect = srpt_check_false,
3946 .tpg_alloc_fabric_acl = srpt_alloc_fabric_acl,
3947 .tpg_release_fabric_acl = srpt_release_fabric_acl,
3948 .tpg_get_inst_index = srpt_tpg_get_inst_index,
3949 .release_cmd = srpt_release_cmd,
3950 .check_stop_free = srpt_check_stop_free,
3951 .shutdown_session = srpt_shutdown_session,
3952 .close_session = srpt_close_session,
3953 .stop_session = srpt_stop_session,
3954 .fall_back_to_erl0 = srpt_reset_nexus,
3955 .sess_logged_in = srpt_sess_logged_in,
3956 .sess_get_index = srpt_sess_get_index,
3957 .sess_get_initiator_sid = NULL,
3958 .write_pending = srpt_write_pending,
3959 .write_pending_status = srpt_write_pending_status,
3960 .set_default_node_attributes = srpt_set_default_node_attrs,
3961 .get_task_tag = srpt_get_task_tag,
3962 .get_cmd_state = srpt_get_tcm_cmd_state,
3963 .queue_data_in = srpt_queue_response,
3964 .queue_status = srpt_queue_status,
3965 .queue_tm_rsp = srpt_queue_response,
3966 .get_fabric_sense_len = srpt_get_fabric_sense_len,
3967 .set_fabric_sense_len = srpt_set_fabric_sense_len,
3968 .is_state_remove = srpt_is_state_remove,
3969 /*
3970 * Setup function pointers for generic logic in
3971 * target_core_fabric_configfs.c
3972 */
3973 .fabric_make_wwn = srpt_make_tport,
3974 .fabric_drop_wwn = srpt_drop_tport,
3975 .fabric_make_tpg = srpt_make_tpg,
3976 .fabric_drop_tpg = srpt_drop_tpg,
3977 .fabric_post_link = NULL,
3978 .fabric_pre_unlink = NULL,
3979 .fabric_make_np = NULL,
3980 .fabric_drop_np = NULL,
3981 .fabric_make_nodeacl = srpt_make_nodeacl,
3982 .fabric_drop_nodeacl = srpt_drop_nodeacl,
3983};
3984
3985/**
3986 * srpt_init_module() - Kernel module initialization.
3987 *
3988 * Note: Since ib_register_client() registers callback functions, and since at
3989 * least one of these callback functions (srpt_add_one()) calls target core
3990 * functions, this driver must be registered with the target core before
3991 * ib_register_client() is called.
3992 */
3993static int __init srpt_init_module(void)
3994{
3995 int ret;
3996
3997 ret = -EINVAL;
3998 if (srp_max_req_size < MIN_MAX_REQ_SIZE) {
3999 printk(KERN_ERR "invalid value %d for kernel module parameter"
4000 " srp_max_req_size -- must be at least %d.\n",
4001 srp_max_req_size, MIN_MAX_REQ_SIZE);
4002 goto out;
4003 }
4004
4005 if (srpt_srq_size < MIN_SRPT_SRQ_SIZE
4006 || srpt_srq_size > MAX_SRPT_SRQ_SIZE) {
4007 printk(KERN_ERR "invalid value %d for kernel module parameter"
4008 " srpt_srq_size -- must be in the range [%d..%d].\n",
4009 srpt_srq_size, MIN_SRPT_SRQ_SIZE, MAX_SRPT_SRQ_SIZE);
4010 goto out;
4011 }
4012
4013 spin_lock_init(&srpt_dev_lock);
4014 INIT_LIST_HEAD(&srpt_dev_list);
4015
4016 ret = -ENODEV;
4017 srpt_target = target_fabric_configfs_init(THIS_MODULE, "srpt");
4018 if (!srpt_target) {
4019 printk(KERN_ERR "couldn't register\n");
4020 goto out;
4021 }
4022
4023 srpt_target->tf_ops = srpt_template;
4024
4025 /* Enable SG chaining */
4026 srpt_target->tf_ops.task_sg_chaining = true;
4027
4028 /*
4029 * Set up default attribute lists.
4030 */
4031 srpt_target->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = srpt_wwn_attrs;
4032 srpt_target->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = srpt_tpg_attrs;
4033 srpt_target->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = srpt_tpg_attrib_attrs;
4034 srpt_target->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
4035 srpt_target->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
4036 srpt_target->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
4037 srpt_target->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
4038 srpt_target->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
4039 srpt_target->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
4040
4041 ret = target_fabric_configfs_register(srpt_target);
4042 if (ret < 0) {
4043 printk(KERN_ERR "couldn't register\n");
4044 goto out_free_target;
4045 }
4046
4047 ret = ib_register_client(&srpt_client);
4048 if (ret) {
4049 printk(KERN_ERR "couldn't register IB client\n");
4050 goto out_unregister_target;
4051 }
4052
4053 return 0;
4054
4055out_unregister_target:
4056 target_fabric_configfs_deregister(srpt_target);
4057 srpt_target = NULL;
4058out_free_target:
4059 if (srpt_target)
4060 target_fabric_configfs_free(srpt_target);
4061out:
4062 return ret;
4063}
4064
4065static void __exit srpt_cleanup_module(void)
4066{
4067 ib_unregister_client(&srpt_client);
4068 target_fabric_configfs_deregister(srpt_target);
4069 srpt_target = NULL;
4070}
4071
4072module_init(srpt_init_module);
4073module_exit(srpt_cleanup_module);
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
new file mode 100644
index 000000000000..b4b4bbcd7f16
--- /dev/null
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -0,0 +1,444 @@
1/*
2 * Copyright (c) 2006 - 2009 Mellanox Technology Inc. All rights reserved.
3 * Copyright (C) 2009 - 2010 Bart Van Assche <bvanassche@acm.org>.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 */
34
35#ifndef IB_SRPT_H
36#define IB_SRPT_H
37
38#include <linux/version.h>
39#include <linux/types.h>
40#include <linux/list.h>
41#include <linux/wait.h>
42
43#include <rdma/ib_verbs.h>
44#include <rdma/ib_sa.h>
45#include <rdma/ib_cm.h>
46
47#include <scsi/srp.h>
48
49#include "ib_dm_mad.h"
50
51/*
52 * The prefix the ServiceName field must start with in the device management
53 * ServiceEntries attribute pair. See also the SRP specification.
54 */
55#define SRP_SERVICE_NAME_PREFIX "SRP.T10:"
56
57enum {
58 /*
59 * SRP IOControllerProfile attributes for SRP target ports that have
60 * not been defined in <scsi/srp.h>. Source: section B.7, table B.7
61 * in the SRP specification.
62 */
63 SRP_PROTOCOL = 0x0108,
64 SRP_PROTOCOL_VERSION = 0x0001,
65 SRP_IO_SUBCLASS = 0x609e,
66 SRP_SEND_TO_IOC = 0x01,
67 SRP_SEND_FROM_IOC = 0x02,
68 SRP_RDMA_READ_FROM_IOC = 0x08,
69 SRP_RDMA_WRITE_FROM_IOC = 0x20,
70
71 /*
72 * srp_login_cmd.req_flags bitmasks. See also table 9 in the SRP
73 * specification.
74 */
75 SRP_MTCH_ACTION = 0x03, /* MULTI-CHANNEL ACTION */
76 SRP_LOSOLNT = 0x10, /* logout solicited notification */
77 SRP_CRSOLNT = 0x20, /* credit request solicited notification */
78 SRP_AESOLNT = 0x40, /* asynchronous event solicited notification */
79
80 /*
81 * srp_cmd.sol_nt / srp_tsk_mgmt.sol_not bitmasks. See also tables
82 * 18 and 20 in the SRP specification.
83 */
84 SRP_SCSOLNT = 0x02, /* SCSOLNT = successful solicited notification */
85 SRP_UCSOLNT = 0x04, /* UCSOLNT = unsuccessful solicited notification */
86
87 /*
88 * srp_rsp.sol_not / srp_t_logout.sol_not bitmasks. See also tables
89 * 16 and 22 in the SRP specification.
90 */
91 SRP_SOLNT = 0x01, /* SOLNT = solicited notification */
92
93 /* See also table 24 in the SRP specification. */
94 SRP_TSK_MGMT_SUCCESS = 0x00,
95 SRP_TSK_MGMT_FUNC_NOT_SUPP = 0x04,
96 SRP_TSK_MGMT_FAILED = 0x05,
97
98 /* See also table 21 in the SRP specification. */
99 SRP_CMD_SIMPLE_Q = 0x0,
100 SRP_CMD_HEAD_OF_Q = 0x1,
101 SRP_CMD_ORDERED_Q = 0x2,
102 SRP_CMD_ACA = 0x4,
103
104 SRP_LOGIN_RSP_MULTICHAN_NO_CHAN = 0x0,
105 SRP_LOGIN_RSP_MULTICHAN_TERMINATED = 0x1,
106 SRP_LOGIN_RSP_MULTICHAN_MAINTAINED = 0x2,
107
108 SRPT_DEF_SG_TABLESIZE = 128,
109 SRPT_DEF_SG_PER_WQE = 16,
110
111 MIN_SRPT_SQ_SIZE = 16,
112 DEF_SRPT_SQ_SIZE = 4096,
113 SRPT_RQ_SIZE = 128,
114 MIN_SRPT_SRQ_SIZE = 4,
115 DEFAULT_SRPT_SRQ_SIZE = 4095,
116 MAX_SRPT_SRQ_SIZE = 65535,
117 MAX_SRPT_RDMA_SIZE = 1U << 24,
118 MAX_SRPT_RSP_SIZE = 1024,
119
120 MIN_MAX_REQ_SIZE = 996,
121 DEFAULT_MAX_REQ_SIZE
122 = sizeof(struct srp_cmd)/*48*/
123 + sizeof(struct srp_indirect_buf)/*20*/
124 + 128 * sizeof(struct srp_direct_buf)/*16*/,
125
126 MIN_MAX_RSP_SIZE = sizeof(struct srp_rsp)/*36*/ + 4,
127 DEFAULT_MAX_RSP_SIZE = 256, /* leaves 220 bytes for sense data */
128
129 DEFAULT_MAX_RDMA_SIZE = 65536,
130};
131
132enum srpt_opcode {
133 SRPT_RECV,
134 SRPT_SEND,
135 SRPT_RDMA_MID,
136 SRPT_RDMA_ABORT,
137 SRPT_RDMA_READ_LAST,
138 SRPT_RDMA_WRITE_LAST,
139};
140
141static inline u64 encode_wr_id(u8 opcode, u32 idx)
142{
143 return ((u64)opcode << 32) | idx;
144}
145static inline enum srpt_opcode opcode_from_wr_id(u64 wr_id)
146{
147 return wr_id >> 32;
148}
149static inline u32 idx_from_wr_id(u64 wr_id)
150{
151 return (u32)wr_id;
152}
153
154struct rdma_iu {
155 u64 raddr;
156 u32 rkey;
157 struct ib_sge *sge;
158 u32 sge_cnt;
159 int mem_id;
160};
161
162/**
163 * enum srpt_command_state - SCSI command state managed by SRPT.
164 * @SRPT_STATE_NEW: New command arrived and is being processed.
165 * @SRPT_STATE_NEED_DATA: Processing a write or bidir command and waiting
166 * for data arrival.
167 * @SRPT_STATE_DATA_IN: Data for the write or bidir command arrived and is
168 * being processed.
169 * @SRPT_STATE_CMD_RSP_SENT: SRP_RSP for SRP_CMD has been sent.
170 * @SRPT_STATE_MGMT: Processing a SCSI task management command.
171 * @SRPT_STATE_MGMT_RSP_SENT: SRP_RSP for SRP_TSK_MGMT has been sent.
172 * @SRPT_STATE_DONE: Command processing finished successfully, command
173 * processing has been aborted or command processing
174 * failed.
175 */
176enum srpt_command_state {
177 SRPT_STATE_NEW = 0,
178 SRPT_STATE_NEED_DATA = 1,
179 SRPT_STATE_DATA_IN = 2,
180 SRPT_STATE_CMD_RSP_SENT = 3,
181 SRPT_STATE_MGMT = 4,
182 SRPT_STATE_MGMT_RSP_SENT = 5,
183 SRPT_STATE_DONE = 6,
184};
185
186/**
187 * struct srpt_ioctx - Shared SRPT I/O context information.
188 * @buf: Pointer to the buffer.
189 * @dma: DMA address of the buffer.
190 * @index: Index of the I/O context in its ioctx_ring array.
191 */
192struct srpt_ioctx {
193 void *buf;
194 dma_addr_t dma;
195 uint32_t index;
196};
197
198/**
199 * struct srpt_recv_ioctx - SRPT receive I/O context.
200 * @ioctx: See above.
201 * @wait_list: Node for insertion in srpt_rdma_ch.cmd_wait_list.
202 */
203struct srpt_recv_ioctx {
204 struct srpt_ioctx ioctx;
205 struct list_head wait_list;
206};
207
208/**
209 * struct srpt_send_ioctx - SRPT send I/O context.
210 * @ioctx: See above.
211 * @ch: Channel pointer.
212 * @free_list: Node in srpt_rdma_ch.free_list.
213 * @n_rbuf: Number of data buffers in the received SRP command.
214 * @rbufs: Pointer to SRP data buffer array.
215 * @single_rbuf: SRP data buffer if the command has only a single buffer.
216 * @sg: Pointer to sg-list associated with this I/O context.
217 * @sg_cnt: SG-list size.
218 * @mapped_sg_count: ib_dma_map_sg() return value.
219 * @n_rdma_ius: Number of elements in the rdma_ius array.
220 * @rdma_ius: Array with information about the RDMA mapping.
221 * @tag: Tag of the received SRP information unit.
222 * @spinlock: Protects 'state'.
223 * @state: I/O context state.
224 * @rdma_aborted: If initiating a multipart RDMA transfer failed, whether
225 * the already initiated transfers have finished.
226 * @cmd: Target core command data structure.
227 * @sense_data: SCSI sense data.
228 */
229struct srpt_send_ioctx {
230 struct srpt_ioctx ioctx;
231 struct srpt_rdma_ch *ch;
232 struct kref kref;
233 struct rdma_iu *rdma_ius;
234 struct srp_direct_buf *rbufs;
235 struct srp_direct_buf single_rbuf;
236 struct scatterlist *sg;
237 struct list_head free_list;
238 spinlock_t spinlock;
239 enum srpt_command_state state;
240 bool rdma_aborted;
241 struct se_cmd cmd;
242 struct completion tx_done;
243 u64 tag;
244 int sg_cnt;
245 int mapped_sg_count;
246 u16 n_rdma_ius;
247 u8 n_rdma;
248 u8 n_rbuf;
249 bool queue_status_only;
250 u8 sense_data[SCSI_SENSE_BUFFERSIZE];
251};
252
253/**
254 * enum rdma_ch_state - SRP channel state.
255 * @CH_CONNECTING: QP is in RTR state; waiting for RTU.
256 * @CH_LIVE: QP is in RTS state.
257 * @CH_DISCONNECTING: DREQ has been received; waiting for DREP
258 * or DREQ has been send and waiting for DREP
259 * or .
260 * @CH_DRAINING: QP is in ERR state; waiting for last WQE event.
261 * @CH_RELEASING: Last WQE event has been received; releasing resources.
262 */
263enum rdma_ch_state {
264 CH_CONNECTING,
265 CH_LIVE,
266 CH_DISCONNECTING,
267 CH_DRAINING,
268 CH_RELEASING
269};
270
271/**
272 * struct srpt_rdma_ch - RDMA channel.
273 * @wait_queue: Allows the kernel thread to wait for more work.
274 * @thread: Kernel thread that processes the IB queues associated with
275 * the channel.
276 * @cm_id: IB CM ID associated with the channel.
277 * @qp: IB queue pair used for communicating over this channel.
278 * @cq: IB completion queue for this channel.
279 * @rq_size: IB receive queue size.
280 * @rsp_size IB response message size in bytes.
281 * @sq_wr_avail: number of work requests available in the send queue.
282 * @sport: pointer to the information of the HCA port used by this
283 * channel.
284 * @i_port_id: 128-bit initiator port identifier copied from SRP_LOGIN_REQ.
285 * @t_port_id: 128-bit target port identifier copied from SRP_LOGIN_REQ.
286 * @max_ti_iu_len: maximum target-to-initiator information unit length.
287 * @req_lim: request limit: maximum number of requests that may be sent
288 * by the initiator without having received a response.
289 * @req_lim_delta: Number of credits not yet sent back to the initiator.
290 * @spinlock: Protects free_list and state.
291 * @free_list: Head of list with free send I/O contexts.
292 * @state: channel state. See also enum rdma_ch_state.
293 * @ioctx_ring: Send ring.
294 * @wc: IB work completion array for srpt_process_completion().
295 * @list: Node for insertion in the srpt_device.rch_list list.
296 * @cmd_wait_list: List of SCSI commands that arrived before the RTU event. This
297 * list contains struct srpt_ioctx elements and is protected
298 * against concurrent modification by the cm_id spinlock.
299 * @sess: Session information associated with this SRP channel.
300 * @sess_name: Session name.
301 * @release_work: Allows scheduling of srpt_release_channel().
302 * @release_done: Enables waiting for srpt_release_channel() completion.
303 */
304struct srpt_rdma_ch {
305 wait_queue_head_t wait_queue;
306 struct task_struct *thread;
307 struct ib_cm_id *cm_id;
308 struct ib_qp *qp;
309 struct ib_cq *cq;
310 int rq_size;
311 u32 rsp_size;
312 atomic_t sq_wr_avail;
313 struct srpt_port *sport;
314 u8 i_port_id[16];
315 u8 t_port_id[16];
316 int max_ti_iu_len;
317 atomic_t req_lim;
318 atomic_t req_lim_delta;
319 spinlock_t spinlock;
320 struct list_head free_list;
321 enum rdma_ch_state state;
322 struct srpt_send_ioctx **ioctx_ring;
323 struct ib_wc wc[16];
324 struct list_head list;
325 struct list_head cmd_wait_list;
326 struct se_session *sess;
327 u8 sess_name[36];
328 struct work_struct release_work;
329 struct completion *release_done;
330};
331
332/**
333 * struct srpt_port_attib - Attributes for SRPT port
334 * @srp_max_rdma_size: Maximum size of SRP RDMA transfers for new connections.
335 * @srp_max_rsp_size: Maximum size of SRP response messages in bytes.
336 * @srp_sq_size: Shared receive queue (SRQ) size.
337 */
338struct srpt_port_attrib {
339 u32 srp_max_rdma_size;
340 u32 srp_max_rsp_size;
341 u32 srp_sq_size;
342};
343
344/**
345 * struct srpt_port - Information associated by SRPT with a single IB port.
346 * @sdev: backpointer to the HCA information.
347 * @mad_agent: per-port management datagram processing information.
348 * @enabled: Whether or not this target port is enabled.
349 * @port_guid: ASCII representation of Port GUID
350 * @port: one-based port number.
351 * @sm_lid: cached value of the port's sm_lid.
352 * @lid: cached value of the port's lid.
353 * @gid: cached value of the port's gid.
354 * @port_acl_lock spinlock for port_acl_list:
355 * @work: work structure for refreshing the aforementioned cached values.
356 * @port_tpg_1 Target portal group = 1 data.
357 * @port_wwn: Target core WWN data.
358 * @port_acl_list: Head of the list with all node ACLs for this port.
359 */
360struct srpt_port {
361 struct srpt_device *sdev;
362 struct ib_mad_agent *mad_agent;
363 bool enabled;
364 u8 port_guid[64];
365 u8 port;
366 u16 sm_lid;
367 u16 lid;
368 union ib_gid gid;
369 spinlock_t port_acl_lock;
370 struct work_struct work;
371 struct se_portal_group port_tpg_1;
372 struct se_wwn port_wwn;
373 struct list_head port_acl_list;
374 struct srpt_port_attrib port_attrib;
375};
376
377/**
378 * struct srpt_device - Information associated by SRPT with a single HCA.
379 * @device: Backpointer to the struct ib_device managed by the IB core.
380 * @pd: IB protection domain.
381 * @mr: L_Key (local key) with write access to all local memory.
382 * @srq: Per-HCA SRQ (shared receive queue).
383 * @cm_id: Connection identifier.
384 * @dev_attr: Attributes of the InfiniBand device as obtained during the
385 * ib_client.add() callback.
386 * @srq_size: SRQ size.
387 * @ioctx_ring: Per-HCA SRQ.
388 * @rch_list: Per-device channel list -- see also srpt_rdma_ch.list.
389 * @ch_releaseQ: Enables waiting for removal from rch_list.
390 * @spinlock: Protects rch_list and tpg.
391 * @port: Information about the ports owned by this HCA.
392 * @event_handler: Per-HCA asynchronous IB event handler.
393 * @list: Node in srpt_dev_list.
394 */
395struct srpt_device {
396 struct ib_device *device;
397 struct ib_pd *pd;
398 struct ib_mr *mr;
399 struct ib_srq *srq;
400 struct ib_cm_id *cm_id;
401 struct ib_device_attr dev_attr;
402 int srq_size;
403 struct srpt_recv_ioctx **ioctx_ring;
404 struct list_head rch_list;
405 wait_queue_head_t ch_releaseQ;
406 spinlock_t spinlock;
407 struct srpt_port port[2];
408 struct ib_event_handler event_handler;
409 struct list_head list;
410};
411
412/**
413 * struct srpt_node_acl - Per-initiator ACL data (managed via configfs).
414 * @i_port_id: 128-bit SRP initiator port ID.
415 * @sport: port information.
416 * @nacl: Target core node ACL information.
417 * @list: Element of the per-HCA ACL list.
418 */
419struct srpt_node_acl {
420 u8 i_port_id[16];
421 struct srpt_port *sport;
422 struct se_node_acl nacl;
423 struct list_head list;
424};
425
426/*
427 * SRP-releated SCSI persistent reservation definitions.
428 *
429 * See also SPC4r28, section 7.6.1 (Protocol specific parameters introduction).
430 * See also SPC4r28, section 7.6.4.5 (TransportID for initiator ports using
431 * SCSI over an RDMA interface).
432 */
433
434enum {
435 SCSI_TRANSPORTID_PROTOCOLID_SRP = 4,
436};
437
438struct spc_rdma_transport_id {
439 uint8_t protocol_identifier;
440 uint8_t reserved[7];
441 uint8_t i_port_id[16];
442};
443
444#endif /* IB_SRPT_H */