aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-04-30 16:14:57 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-04-30 16:14:57 -0400
commit6da6dc2380c3cfe8d6b59d7c3c55fdd7a521fe6c (patch)
tree152566bea1fc5593ef58deec450e0a499776d8c4
parent8c55f1463c1fd318d5e785f02b80bcc32176d342 (diff)
parentb8d26b3be8b33682cf163274ed07479a70554633 (diff)
Merge branch 'for-next-merge' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending
Pull SCSI target update from Nicholas Bellinger: "The highlights this round include: - Add fileio support for WRITE_SAME w/ UNMAP=1 discard (asias) - Add fileio support for UNMAP discard (asias) - Add tcm_vhost hotplug support to work with upstream QEMU vhost-scsi-pci code (asias + mst) - Check for aborted sequence in tcm_fc response path (mdr) - Add initial iscsit_transport support into iscsi-target code (nab) - Refactor iscsi-target RX PDU logic + export request PDU handling (nab) - Refactor iscsi-target TX queue logic + export response PDU creation (nab) - Add new iSCSI Extentions for RDMA (ISER) target driver (Or + nab) The biggest changes revolve around iscsi-target refactoring in order to support the iser-target driver. This includes the conversion of the iscsi-target data-path to use modern se_cmd->cmd_kref counting, and allowing transport independent aspects of RX/TX PDU request/response handling be shared across existing traditional iscsi-target code, and the new iser-target code. Thanks to Or Gerlitz + Mellanox for supporting the iser-target development effort!" * 'for-next-merge' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (25 commits) iser-target: Add iSCSI Extensions for RDMA (iSER) target driver tcm_vhost: Enable VIRTIO_SCSI_F_HOTPLUG tcm_vhost: Add ioctl to get and set events missed flag tcm_vhost: Add hotplug/hotunplug support tcm_vhost: Refactor the lock nesting rule tcm_fc: Check for aborted sequence iscsi-target: Add iser network portal attribute iscsi-target: Refactor TX queue logic + export response PDU creation iscsi-target: Refactor RX PDU logic + export request PDU handling iscsi-target: Add per transport iscsi_cmd alloc/free iscsi-target: Add iser-target parameter keys + setup during login iscsi-target: Initial traditional TCP conversion to iscsit_transport iscsi-target: Add iscsit_transport API template target: Add export of target_get_sess_cmd symbol target: Change default sense key of NOT_READY target/file: Set is_nonrot attribute target: Add sbc_execute_unmap() helper target/iblock: Add iblock_do_unmap() helper target/file: Add fd_do_unmap() helper target/file: Add UNMAP emulation support ...
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/Makefile1
-rw-r--r--drivers/infiniband/ulp/isert/Kconfig5
-rw-r--r--drivers/infiniband/ulp/isert/Makefile2
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c2281
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h138
-rw-r--r--drivers/infiniband/ulp/isert/isert_proto.h47
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c19
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h1
-rw-r--r--drivers/target/iscsi/Makefile3
-rw-r--r--drivers/target/iscsi/iscsi_target.c1182
-rw-r--r--drivers/target/iscsi/iscsi_target.h3
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c28
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c98
-rw-r--r--drivers/target/iscsi/iscsi_target_core.h26
-rw-r--r--drivers/target/iscsi/iscsi_target_device.c7
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c13
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c472
-rw-r--r--drivers/target/iscsi/iscsi_target_login.h6
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c194
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.h11
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c87
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.h16
-rw-r--r--drivers/target/iscsi/iscsi_target_tmr.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_transport.c55
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c53
-rw-r--r--drivers/target/iscsi/iscsi_target_util.h1
-rw-r--r--drivers/target/target_core_file.c122
-rw-r--r--drivers/target/target_core_iblock.c108
-rw-r--r--drivers/target/target_core_sbc.c85
-rw-r--r--drivers/target/target_core_transport.c13
-rw-r--r--drivers/target/tcm_fc/tfc_io.c9
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c9
-rw-r--r--drivers/vhost/tcm_vhost.c262
-rw-r--r--drivers/vhost/tcm_vhost.h13
-rw-r--r--include/target/iscsi/iscsi_transport.h83
-rw-r--r--include/target/target_core_backend.h4
-rw-r--r--include/target/target_core_fabric.h2
39 files changed, 4469 insertions, 1001 deletions
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index a0f29c1d03bc..c85b56c28099 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -59,5 +59,6 @@ source "drivers/infiniband/ulp/srp/Kconfig"
59source "drivers/infiniband/ulp/srpt/Kconfig" 59source "drivers/infiniband/ulp/srpt/Kconfig"
60 60
61source "drivers/infiniband/ulp/iser/Kconfig" 61source "drivers/infiniband/ulp/iser/Kconfig"
62source "drivers/infiniband/ulp/isert/Kconfig"
62 63
63endif # INFINIBAND 64endif # INFINIBAND
diff --git a/drivers/infiniband/Makefile b/drivers/infiniband/Makefile
index bf846a14b9d3..b126fefe0b1c 100644
--- a/drivers/infiniband/Makefile
+++ b/drivers/infiniband/Makefile
@@ -13,3 +13,4 @@ obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/
13obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/ 13obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/
14obj-$(CONFIG_INFINIBAND_SRPT) += ulp/srpt/ 14obj-$(CONFIG_INFINIBAND_SRPT) += ulp/srpt/
15obj-$(CONFIG_INFINIBAND_ISER) += ulp/iser/ 15obj-$(CONFIG_INFINIBAND_ISER) += ulp/iser/
16obj-$(CONFIG_INFINIBAND_ISERT) += ulp/isert/
diff --git a/drivers/infiniband/ulp/isert/Kconfig b/drivers/infiniband/ulp/isert/Kconfig
new file mode 100644
index 000000000000..ce3fd32167dc
--- /dev/null
+++ b/drivers/infiniband/ulp/isert/Kconfig
@@ -0,0 +1,5 @@
1config INFINIBAND_ISERT
2 tristate "iSCSI Extentions for RDMA (iSER) target support"
3 depends on INET && INFINIBAND_ADDR_TRANS && TARGET_CORE && ISCSI_TARGET
4 ---help---
5 Support for iSCSI Extentions for RDMA (iSER) Target on Infiniband fabrics.
diff --git a/drivers/infiniband/ulp/isert/Makefile b/drivers/infiniband/ulp/isert/Makefile
new file mode 100644
index 000000000000..c8bf2421f5bc
--- /dev/null
+++ b/drivers/infiniband/ulp/isert/Makefile
@@ -0,0 +1,2 @@
1ccflags-y := -Idrivers/target -Idrivers/target/iscsi
2obj-$(CONFIG_INFINIBAND_ISERT) += ib_isert.o
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
new file mode 100644
index 000000000000..41712f096515
--- /dev/null
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -0,0 +1,2281 @@
1/*******************************************************************************
2 * This file contains iSCSI extentions for RDMA (iSER) Verbs
3 *
4 * (c) Copyright 2013 RisingTide Systems LLC.
5 *
6 * Nicholas A. Bellinger <nab@linux-iscsi.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 ****************************************************************************/
18
19#include <linux/string.h>
20#include <linux/module.h>
21#include <linux/scatterlist.h>
22#include <linux/socket.h>
23#include <linux/in.h>
24#include <linux/in6.h>
25#include <rdma/ib_verbs.h>
26#include <rdma/rdma_cm.h>
27#include <target/target_core_base.h>
28#include <target/target_core_fabric.h>
29#include <target/iscsi/iscsi_transport.h>
30
31#include "isert_proto.h"
32#include "ib_isert.h"
33
34#define ISERT_MAX_CONN 8
35#define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
36#define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN)
37
38static DEFINE_MUTEX(device_list_mutex);
39static LIST_HEAD(device_list);
40static struct workqueue_struct *isert_rx_wq;
41static struct workqueue_struct *isert_comp_wq;
42static struct kmem_cache *isert_cmd_cache;
43
44static void
45isert_qp_event_callback(struct ib_event *e, void *context)
46{
47 struct isert_conn *isert_conn = (struct isert_conn *)context;
48
49 pr_err("isert_qp_event_callback event: %d\n", e->event);
50 switch (e->event) {
51 case IB_EVENT_COMM_EST:
52 rdma_notify(isert_conn->conn_cm_id, IB_EVENT_COMM_EST);
53 break;
54 case IB_EVENT_QP_LAST_WQE_REACHED:
55 pr_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED:\n");
56 break;
57 default:
58 break;
59 }
60}
61
62static int
63isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr)
64{
65 int ret;
66
67 ret = ib_query_device(ib_dev, devattr);
68 if (ret) {
69 pr_err("ib_query_device() failed: %d\n", ret);
70 return ret;
71 }
72 pr_debug("devattr->max_sge: %d\n", devattr->max_sge);
73 pr_debug("devattr->max_sge_rd: %d\n", devattr->max_sge_rd);
74
75 return 0;
76}
77
78static int
79isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
80{
81 struct isert_device *device = isert_conn->conn_device;
82 struct ib_qp_init_attr attr;
83 struct ib_device_attr devattr;
84 int ret, index, min_index = 0;
85
86 memset(&devattr, 0, sizeof(struct ib_device_attr));
87 ret = isert_query_device(cma_id->device, &devattr);
88 if (ret)
89 return ret;
90
91 mutex_lock(&device_list_mutex);
92 for (index = 0; index < device->cqs_used; index++)
93 if (device->cq_active_qps[index] <
94 device->cq_active_qps[min_index])
95 min_index = index;
96 device->cq_active_qps[min_index]++;
97 pr_debug("isert_conn_setup_qp: Using min_index: %d\n", min_index);
98 mutex_unlock(&device_list_mutex);
99
100 memset(&attr, 0, sizeof(struct ib_qp_init_attr));
101 attr.event_handler = isert_qp_event_callback;
102 attr.qp_context = isert_conn;
103 attr.send_cq = device->dev_tx_cq[min_index];
104 attr.recv_cq = device->dev_rx_cq[min_index];
105 attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS;
106 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS;
107 /*
108 * FIXME: Use devattr.max_sge - 2 for max_send_sge as
109 * work-around for RDMA_READ..
110 */
111 attr.cap.max_send_sge = devattr.max_sge - 2;
112 isert_conn->max_sge = attr.cap.max_send_sge;
113
114 attr.cap.max_recv_sge = 1;
115 attr.sq_sig_type = IB_SIGNAL_REQ_WR;
116 attr.qp_type = IB_QPT_RC;
117
118 pr_debug("isert_conn_setup_qp cma_id->device: %p\n",
119 cma_id->device);
120 pr_debug("isert_conn_setup_qp conn_pd->device: %p\n",
121 isert_conn->conn_pd->device);
122
123 ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr);
124 if (ret) {
125 pr_err("rdma_create_qp failed for cma_id %d\n", ret);
126 return ret;
127 }
128 isert_conn->conn_qp = cma_id->qp;
129 pr_debug("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n");
130
131 return 0;
132}
133
134static void
135isert_cq_event_callback(struct ib_event *e, void *context)
136{
137 pr_debug("isert_cq_event_callback event: %d\n", e->event);
138}
139
140static int
141isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
142{
143 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
144 struct iser_rx_desc *rx_desc;
145 struct ib_sge *rx_sg;
146 u64 dma_addr;
147 int i, j;
148
149 isert_conn->conn_rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS *
150 sizeof(struct iser_rx_desc), GFP_KERNEL);
151 if (!isert_conn->conn_rx_descs)
152 goto fail;
153
154 rx_desc = isert_conn->conn_rx_descs;
155
156 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
157 dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc,
158 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
159 if (ib_dma_mapping_error(ib_dev, dma_addr))
160 goto dma_map_fail;
161
162 rx_desc->dma_addr = dma_addr;
163
164 rx_sg = &rx_desc->rx_sg;
165 rx_sg->addr = rx_desc->dma_addr;
166 rx_sg->length = ISER_RX_PAYLOAD_SIZE;
167 rx_sg->lkey = isert_conn->conn_mr->lkey;
168 }
169
170 isert_conn->conn_rx_desc_head = 0;
171 return 0;
172
173dma_map_fail:
174 rx_desc = isert_conn->conn_rx_descs;
175 for (j = 0; j < i; j++, rx_desc++) {
176 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
177 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
178 }
179 kfree(isert_conn->conn_rx_descs);
180 isert_conn->conn_rx_descs = NULL;
181fail:
182 return -ENOMEM;
183}
184
185static void
186isert_free_rx_descriptors(struct isert_conn *isert_conn)
187{
188 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
189 struct iser_rx_desc *rx_desc;
190 int i;
191
192 if (!isert_conn->conn_rx_descs)
193 return;
194
195 rx_desc = isert_conn->conn_rx_descs;
196 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
197 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
198 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
199 }
200
201 kfree(isert_conn->conn_rx_descs);
202 isert_conn->conn_rx_descs = NULL;
203}
204
205static void isert_cq_tx_callback(struct ib_cq *, void *);
206static void isert_cq_rx_callback(struct ib_cq *, void *);
207
208static int
209isert_create_device_ib_res(struct isert_device *device)
210{
211 struct ib_device *ib_dev = device->ib_device;
212 struct isert_cq_desc *cq_desc;
213 int ret = 0, i, j;
214
215 device->cqs_used = min_t(int, num_online_cpus(),
216 device->ib_device->num_comp_vectors);
217 device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used);
218 pr_debug("Using %d CQs, device %s supports %d vectors\n",
219 device->cqs_used, device->ib_device->name,
220 device->ib_device->num_comp_vectors);
221 device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) *
222 device->cqs_used, GFP_KERNEL);
223 if (!device->cq_desc) {
224 pr_err("Unable to allocate device->cq_desc\n");
225 return -ENOMEM;
226 }
227 cq_desc = device->cq_desc;
228
229 device->dev_pd = ib_alloc_pd(ib_dev);
230 if (IS_ERR(device->dev_pd)) {
231 ret = PTR_ERR(device->dev_pd);
232 pr_err("ib_alloc_pd failed for dev_pd: %d\n", ret);
233 goto out_cq_desc;
234 }
235
236 for (i = 0; i < device->cqs_used; i++) {
237 cq_desc[i].device = device;
238 cq_desc[i].cq_index = i;
239
240 device->dev_rx_cq[i] = ib_create_cq(device->ib_device,
241 isert_cq_rx_callback,
242 isert_cq_event_callback,
243 (void *)&cq_desc[i],
244 ISER_MAX_RX_CQ_LEN, i);
245 if (IS_ERR(device->dev_rx_cq[i]))
246 goto out_cq;
247
248 device->dev_tx_cq[i] = ib_create_cq(device->ib_device,
249 isert_cq_tx_callback,
250 isert_cq_event_callback,
251 (void *)&cq_desc[i],
252 ISER_MAX_TX_CQ_LEN, i);
253 if (IS_ERR(device->dev_tx_cq[i]))
254 goto out_cq;
255
256 if (ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP))
257 goto out_cq;
258
259 if (ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP))
260 goto out_cq;
261 }
262
263 device->dev_mr = ib_get_dma_mr(device->dev_pd, IB_ACCESS_LOCAL_WRITE);
264 if (IS_ERR(device->dev_mr)) {
265 ret = PTR_ERR(device->dev_mr);
266 pr_err("ib_get_dma_mr failed for dev_mr: %d\n", ret);
267 goto out_cq;
268 }
269
270 return 0;
271
272out_cq:
273 for (j = 0; j < i; j++) {
274 cq_desc = &device->cq_desc[j];
275
276 if (device->dev_rx_cq[j]) {
277 cancel_work_sync(&cq_desc->cq_rx_work);
278 ib_destroy_cq(device->dev_rx_cq[j]);
279 }
280 if (device->dev_tx_cq[j]) {
281 cancel_work_sync(&cq_desc->cq_tx_work);
282 ib_destroy_cq(device->dev_tx_cq[j]);
283 }
284 }
285 ib_dealloc_pd(device->dev_pd);
286
287out_cq_desc:
288 kfree(device->cq_desc);
289
290 return ret;
291}
292
293static void
294isert_free_device_ib_res(struct isert_device *device)
295{
296 struct isert_cq_desc *cq_desc;
297 int i;
298
299 for (i = 0; i < device->cqs_used; i++) {
300 cq_desc = &device->cq_desc[i];
301
302 cancel_work_sync(&cq_desc->cq_rx_work);
303 cancel_work_sync(&cq_desc->cq_tx_work);
304 ib_destroy_cq(device->dev_rx_cq[i]);
305 ib_destroy_cq(device->dev_tx_cq[i]);
306 device->dev_rx_cq[i] = NULL;
307 device->dev_tx_cq[i] = NULL;
308 }
309
310 ib_dereg_mr(device->dev_mr);
311 ib_dealloc_pd(device->dev_pd);
312 kfree(device->cq_desc);
313}
314
315static void
316isert_device_try_release(struct isert_device *device)
317{
318 mutex_lock(&device_list_mutex);
319 device->refcount--;
320 if (!device->refcount) {
321 isert_free_device_ib_res(device);
322 list_del(&device->dev_node);
323 kfree(device);
324 }
325 mutex_unlock(&device_list_mutex);
326}
327
328static struct isert_device *
329isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id)
330{
331 struct isert_device *device;
332 int ret;
333
334 mutex_lock(&device_list_mutex);
335 list_for_each_entry(device, &device_list, dev_node) {
336 if (device->ib_device->node_guid == cma_id->device->node_guid) {
337 device->refcount++;
338 mutex_unlock(&device_list_mutex);
339 return device;
340 }
341 }
342
343 device = kzalloc(sizeof(struct isert_device), GFP_KERNEL);
344 if (!device) {
345 mutex_unlock(&device_list_mutex);
346 return ERR_PTR(-ENOMEM);
347 }
348
349 INIT_LIST_HEAD(&device->dev_node);
350
351 device->ib_device = cma_id->device;
352 ret = isert_create_device_ib_res(device);
353 if (ret) {
354 kfree(device);
355 mutex_unlock(&device_list_mutex);
356 return ERR_PTR(ret);
357 }
358
359 device->refcount++;
360 list_add_tail(&device->dev_node, &device_list);
361 mutex_unlock(&device_list_mutex);
362
363 return device;
364}
365
366static int
367isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
368{
369 struct iscsi_np *np = cma_id->context;
370 struct isert_np *isert_np = np->np_context;
371 struct isert_conn *isert_conn;
372 struct isert_device *device;
373 struct ib_device *ib_dev = cma_id->device;
374 int ret = 0;
375
376 pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n",
377 cma_id, cma_id->context);
378
379 isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL);
380 if (!isert_conn) {
381 pr_err("Unable to allocate isert_conn\n");
382 return -ENOMEM;
383 }
384 isert_conn->state = ISER_CONN_INIT;
385 INIT_LIST_HEAD(&isert_conn->conn_accept_node);
386 init_completion(&isert_conn->conn_login_comp);
387 init_waitqueue_head(&isert_conn->conn_wait);
388 init_waitqueue_head(&isert_conn->conn_wait_comp_err);
389 kref_init(&isert_conn->conn_kref);
390 kref_get(&isert_conn->conn_kref);
391
392 cma_id->context = isert_conn;
393 isert_conn->conn_cm_id = cma_id;
394 isert_conn->responder_resources = event->param.conn.responder_resources;
395 isert_conn->initiator_depth = event->param.conn.initiator_depth;
396 pr_debug("Using responder_resources: %u initiator_depth: %u\n",
397 isert_conn->responder_resources, isert_conn->initiator_depth);
398
399 isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
400 ISER_RX_LOGIN_SIZE, GFP_KERNEL);
401 if (!isert_conn->login_buf) {
402 pr_err("Unable to allocate isert_conn->login_buf\n");
403 ret = -ENOMEM;
404 goto out;
405 }
406
407 isert_conn->login_req_buf = isert_conn->login_buf;
408 isert_conn->login_rsp_buf = isert_conn->login_buf +
409 ISCSI_DEF_MAX_RECV_SEG_LEN;
410 pr_debug("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
411 isert_conn->login_buf, isert_conn->login_req_buf,
412 isert_conn->login_rsp_buf);
413
414 isert_conn->login_req_dma = ib_dma_map_single(ib_dev,
415 (void *)isert_conn->login_req_buf,
416 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
417
418 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
419 if (ret) {
420 pr_err("ib_dma_mapping_error failed for login_req_dma: %d\n",
421 ret);
422 isert_conn->login_req_dma = 0;
423 goto out_login_buf;
424 }
425
426 isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev,
427 (void *)isert_conn->login_rsp_buf,
428 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
429
430 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
431 if (ret) {
432 pr_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n",
433 ret);
434 isert_conn->login_rsp_dma = 0;
435 goto out_req_dma_map;
436 }
437
438 device = isert_device_find_by_ib_dev(cma_id);
439 if (IS_ERR(device)) {
440 ret = PTR_ERR(device);
441 goto out_rsp_dma_map;
442 }
443
444 isert_conn->conn_device = device;
445 isert_conn->conn_pd = device->dev_pd;
446 isert_conn->conn_mr = device->dev_mr;
447
448 ret = isert_conn_setup_qp(isert_conn, cma_id);
449 if (ret)
450 goto out_conn_dev;
451
452 mutex_lock(&isert_np->np_accept_mutex);
453 list_add_tail(&isert_np->np_accept_list, &isert_conn->conn_accept_node);
454 mutex_unlock(&isert_np->np_accept_mutex);
455
456 pr_debug("isert_connect_request() waking up np_accept_wq: %p\n", np);
457 wake_up(&isert_np->np_accept_wq);
458 return 0;
459
460out_conn_dev:
461 isert_device_try_release(device);
462out_rsp_dma_map:
463 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
464 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
465out_req_dma_map:
466 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
467 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
468out_login_buf:
469 kfree(isert_conn->login_buf);
470out:
471 kfree(isert_conn);
472 return ret;
473}
474
475static void
476isert_connect_release(struct isert_conn *isert_conn)
477{
478 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
479 struct isert_device *device = isert_conn->conn_device;
480 int cq_index;
481
482 pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
483
484 if (isert_conn->conn_qp) {
485 cq_index = ((struct isert_cq_desc *)
486 isert_conn->conn_qp->recv_cq->cq_context)->cq_index;
487 pr_debug("isert_connect_release: cq_index: %d\n", cq_index);
488 isert_conn->conn_device->cq_active_qps[cq_index]--;
489
490 rdma_destroy_qp(isert_conn->conn_cm_id);
491 }
492
493 isert_free_rx_descriptors(isert_conn);
494 rdma_destroy_id(isert_conn->conn_cm_id);
495
496 if (isert_conn->login_buf) {
497 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
498 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
499 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
500 ISCSI_DEF_MAX_RECV_SEG_LEN,
501 DMA_FROM_DEVICE);
502 kfree(isert_conn->login_buf);
503 }
504 kfree(isert_conn);
505
506 if (device)
507 isert_device_try_release(device);
508
509 pr_debug("Leaving isert_connect_release >>>>>>>>>>>>\n");
510}
511
512static void
513isert_connected_handler(struct rdma_cm_id *cma_id)
514{
515 return;
516}
517
518static void
519isert_release_conn_kref(struct kref *kref)
520{
521 struct isert_conn *isert_conn = container_of(kref,
522 struct isert_conn, conn_kref);
523
524 pr_debug("Calling isert_connect_release for final kref %s/%d\n",
525 current->comm, current->pid);
526
527 isert_connect_release(isert_conn);
528}
529
530static void
531isert_put_conn(struct isert_conn *isert_conn)
532{
533 kref_put(&isert_conn->conn_kref, isert_release_conn_kref);
534}
535
536static void
537isert_disconnect_work(struct work_struct *work)
538{
539 struct isert_conn *isert_conn = container_of(work,
540 struct isert_conn, conn_logout_work);
541
542 pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
543
544 isert_conn->state = ISER_CONN_DOWN;
545
546 if (isert_conn->post_recv_buf_count == 0 &&
547 atomic_read(&isert_conn->post_send_buf_count) == 0) {
548 pr_debug("Calling wake_up(&isert_conn->conn_wait);\n");
549 wake_up(&isert_conn->conn_wait);
550 }
551
552 isert_put_conn(isert_conn);
553}
554
555static void
556isert_disconnected_handler(struct rdma_cm_id *cma_id)
557{
558 struct isert_conn *isert_conn = (struct isert_conn *)cma_id->context;
559
560 INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work);
561 schedule_work(&isert_conn->conn_logout_work);
562}
563
564static int
565isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
566{
567 int ret = 0;
568
569 pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n",
570 event->event, event->status, cma_id->context, cma_id);
571
572 switch (event->event) {
573 case RDMA_CM_EVENT_CONNECT_REQUEST:
574 pr_debug("RDMA_CM_EVENT_CONNECT_REQUEST: >>>>>>>>>>>>>>>\n");
575 ret = isert_connect_request(cma_id, event);
576 break;
577 case RDMA_CM_EVENT_ESTABLISHED:
578 pr_debug("RDMA_CM_EVENT_ESTABLISHED >>>>>>>>>>>>>>\n");
579 isert_connected_handler(cma_id);
580 break;
581 case RDMA_CM_EVENT_DISCONNECTED:
582 pr_debug("RDMA_CM_EVENT_DISCONNECTED: >>>>>>>>>>>>>>\n");
583 isert_disconnected_handler(cma_id);
584 break;
585 case RDMA_CM_EVENT_DEVICE_REMOVAL:
586 case RDMA_CM_EVENT_ADDR_CHANGE:
587 break;
588 case RDMA_CM_EVENT_CONNECT_ERROR:
589 default:
590 pr_err("Unknown RDMA CMA event: %d\n", event->event);
591 break;
592 }
593
594 if (ret != 0) {
595 pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n",
596 event->event, ret);
597 dump_stack();
598 }
599
600 return ret;
601}
602
603static int
604isert_post_recv(struct isert_conn *isert_conn, u32 count)
605{
606 struct ib_recv_wr *rx_wr, *rx_wr_failed;
607 int i, ret;
608 unsigned int rx_head = isert_conn->conn_rx_desc_head;
609 struct iser_rx_desc *rx_desc;
610
611 for (rx_wr = isert_conn->conn_rx_wr, i = 0; i < count; i++, rx_wr++) {
612 rx_desc = &isert_conn->conn_rx_descs[rx_head];
613 rx_wr->wr_id = (unsigned long)rx_desc;
614 rx_wr->sg_list = &rx_desc->rx_sg;
615 rx_wr->num_sge = 1;
616 rx_wr->next = rx_wr + 1;
617 rx_head = (rx_head + 1) & (ISERT_QP_MAX_RECV_DTOS - 1);
618 }
619
620 rx_wr--;
621 rx_wr->next = NULL; /* mark end of work requests list */
622
623 isert_conn->post_recv_buf_count += count;
624 ret = ib_post_recv(isert_conn->conn_qp, isert_conn->conn_rx_wr,
625 &rx_wr_failed);
626 if (ret) {
627 pr_err("ib_post_recv() failed with ret: %d\n", ret);
628 isert_conn->post_recv_buf_count -= count;
629 } else {
630 pr_debug("isert_post_recv(): Posted %d RX buffers\n", count);
631 isert_conn->conn_rx_desc_head = rx_head;
632 }
633 return ret;
634}
635
636static int
637isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
638{
639 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
640 struct ib_send_wr send_wr, *send_wr_failed;
641 int ret;
642
643 ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr,
644 ISER_HEADERS_LEN, DMA_TO_DEVICE);
645
646 send_wr.next = NULL;
647 send_wr.wr_id = (unsigned long)tx_desc;
648 send_wr.sg_list = tx_desc->tx_sg;
649 send_wr.num_sge = tx_desc->num_sge;
650 send_wr.opcode = IB_WR_SEND;
651 send_wr.send_flags = IB_SEND_SIGNALED;
652
653 atomic_inc(&isert_conn->post_send_buf_count);
654
655 ret = ib_post_send(isert_conn->conn_qp, &send_wr, &send_wr_failed);
656 if (ret) {
657 pr_err("ib_post_send() failed, ret: %d\n", ret);
658 atomic_dec(&isert_conn->post_send_buf_count);
659 }
660
661 return ret;
662}
663
664static void
665isert_create_send_desc(struct isert_conn *isert_conn,
666 struct isert_cmd *isert_cmd,
667 struct iser_tx_desc *tx_desc)
668{
669 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
670
671 ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
672 ISER_HEADERS_LEN, DMA_TO_DEVICE);
673
674 memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr));
675 tx_desc->iser_header.flags = ISER_VER;
676
677 tx_desc->num_sge = 1;
678 tx_desc->isert_cmd = isert_cmd;
679
680 if (tx_desc->tx_sg[0].lkey != isert_conn->conn_mr->lkey) {
681 tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
682 pr_debug("tx_desc %p lkey mismatch, fixing\n", tx_desc);
683 }
684}
685
686static int
687isert_init_tx_hdrs(struct isert_conn *isert_conn,
688 struct iser_tx_desc *tx_desc)
689{
690 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
691 u64 dma_addr;
692
693 dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc,
694 ISER_HEADERS_LEN, DMA_TO_DEVICE);
695 if (ib_dma_mapping_error(ib_dev, dma_addr)) {
696 pr_err("ib_dma_mapping_error() failed\n");
697 return -ENOMEM;
698 }
699
700 tx_desc->dma_addr = dma_addr;
701 tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
702 tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
703 tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
704
705 pr_debug("isert_init_tx_hdrs: Setup tx_sg[0].addr: 0x%llx length: %u"
706 " lkey: 0x%08x\n", tx_desc->tx_sg[0].addr,
707 tx_desc->tx_sg[0].length, tx_desc->tx_sg[0].lkey);
708
709 return 0;
710}
711
712static void
713isert_init_send_wr(struct isert_cmd *isert_cmd, struct ib_send_wr *send_wr)
714{
715 isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND;
716 send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
717 send_wr->opcode = IB_WR_SEND;
718 send_wr->send_flags = IB_SEND_SIGNALED;
719 send_wr->sg_list = &isert_cmd->tx_desc.tx_sg[0];
720 send_wr->num_sge = isert_cmd->tx_desc.num_sge;
721}
722
723static int
724isert_rdma_post_recvl(struct isert_conn *isert_conn)
725{
726 struct ib_recv_wr rx_wr, *rx_wr_fail;
727 struct ib_sge sge;
728 int ret;
729
730 memset(&sge, 0, sizeof(struct ib_sge));
731 sge.addr = isert_conn->login_req_dma;
732 sge.length = ISER_RX_LOGIN_SIZE;
733 sge.lkey = isert_conn->conn_mr->lkey;
734
735 pr_debug("Setup sge: addr: %llx length: %d 0x%08x\n",
736 sge.addr, sge.length, sge.lkey);
737
738 memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
739 rx_wr.wr_id = (unsigned long)isert_conn->login_req_buf;
740 rx_wr.sg_list = &sge;
741 rx_wr.num_sge = 1;
742
743 isert_conn->post_recv_buf_count++;
744 ret = ib_post_recv(isert_conn->conn_qp, &rx_wr, &rx_wr_fail);
745 if (ret) {
746 pr_err("ib_post_recv() failed: %d\n", ret);
747 isert_conn->post_recv_buf_count--;
748 }
749
750 pr_debug("ib_post_recv(): returned success >>>>>>>>>>>>>>>>>>>>>>>>\n");
751 return ret;
752}
753
754static int
755isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
756 u32 length)
757{
758 struct isert_conn *isert_conn = conn->context;
759 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
760 struct iser_tx_desc *tx_desc = &isert_conn->conn_login_tx_desc;
761 int ret;
762
763 isert_create_send_desc(isert_conn, NULL, tx_desc);
764
765 memcpy(&tx_desc->iscsi_header, &login->rsp[0],
766 sizeof(struct iscsi_hdr));
767
768 isert_init_tx_hdrs(isert_conn, tx_desc);
769
770 if (length > 0) {
771 struct ib_sge *tx_dsg = &tx_desc->tx_sg[1];
772
773 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma,
774 length, DMA_TO_DEVICE);
775
776 memcpy(isert_conn->login_rsp_buf, login->rsp_buf, length);
777
778 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma,
779 length, DMA_TO_DEVICE);
780
781 tx_dsg->addr = isert_conn->login_rsp_dma;
782 tx_dsg->length = length;
783 tx_dsg->lkey = isert_conn->conn_mr->lkey;
784 tx_desc->num_sge = 2;
785 }
786 if (!login->login_failed) {
787 if (login->login_complete) {
788 ret = isert_alloc_rx_descriptors(isert_conn);
789 if (ret)
790 return ret;
791
792 ret = isert_post_recv(isert_conn, ISERT_MIN_POSTED_RX);
793 if (ret)
794 return ret;
795
796 isert_conn->state = ISER_CONN_UP;
797 goto post_send;
798 }
799
800 ret = isert_rdma_post_recvl(isert_conn);
801 if (ret)
802 return ret;
803 }
804post_send:
805 ret = isert_post_send(isert_conn, tx_desc);
806 if (ret)
807 return ret;
808
809 return 0;
810}
811
812static void
813isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen,
814 struct isert_conn *isert_conn)
815{
816 struct iscsi_conn *conn = isert_conn->conn;
817 struct iscsi_login *login = conn->conn_login;
818 int size;
819
820 if (!login) {
821 pr_err("conn->conn_login is NULL\n");
822 dump_stack();
823 return;
824 }
825
826 if (login->first_request) {
827 struct iscsi_login_req *login_req =
828 (struct iscsi_login_req *)&rx_desc->iscsi_header;
829 /*
830 * Setup the initial iscsi_login values from the leading
831 * login request PDU.
832 */
833 login->leading_connection = (!login_req->tsih) ? 1 : 0;
834 login->current_stage =
835 (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK)
836 >> 2;
837 login->version_min = login_req->min_version;
838 login->version_max = login_req->max_version;
839 memcpy(login->isid, login_req->isid, 6);
840 login->cmd_sn = be32_to_cpu(login_req->cmdsn);
841 login->init_task_tag = login_req->itt;
842 login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
843 login->cid = be16_to_cpu(login_req->cid);
844 login->tsih = be16_to_cpu(login_req->tsih);
845 }
846
847 memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN);
848
849 size = min(rx_buflen, MAX_KEY_VALUE_PAIRS);
850 pr_debug("Using login payload size: %d, rx_buflen: %d MAX_KEY_VALUE_PAIRS: %d\n",
851 size, rx_buflen, MAX_KEY_VALUE_PAIRS);
852 memcpy(login->req_buf, &rx_desc->data[0], size);
853
854 complete(&isert_conn->conn_login_comp);
855}
856
857static void
858isert_release_cmd(struct iscsi_cmd *cmd)
859{
860 struct isert_cmd *isert_cmd = container_of(cmd, struct isert_cmd,
861 iscsi_cmd);
862
863 pr_debug("Entering isert_release_cmd %p >>>>>>>>>>>>>>>.\n", isert_cmd);
864
865 kfree(cmd->buf_ptr);
866 kfree(cmd->tmr_req);
867
868 kmem_cache_free(isert_cmd_cache, isert_cmd);
869}
870
871static struct iscsi_cmd
872*isert_alloc_cmd(struct iscsi_conn *conn, gfp_t gfp)
873{
874 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
875 struct isert_cmd *isert_cmd;
876
877 isert_cmd = kmem_cache_zalloc(isert_cmd_cache, gfp);
878 if (!isert_cmd) {
879 pr_err("Unable to allocate isert_cmd\n");
880 return NULL;
881 }
882 isert_cmd->conn = isert_conn;
883 isert_cmd->iscsi_cmd.release_cmd = &isert_release_cmd;
884
885 return &isert_cmd->iscsi_cmd;
886}
887
888static int
889isert_handle_scsi_cmd(struct isert_conn *isert_conn,
890 struct isert_cmd *isert_cmd, struct iser_rx_desc *rx_desc,
891 unsigned char *buf)
892{
893 struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
894 struct iscsi_conn *conn = isert_conn->conn;
895 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
896 struct scatterlist *sg;
897 int imm_data, imm_data_len, unsol_data, sg_nents, rc;
898 bool dump_payload = false;
899
900 rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
901 if (rc < 0)
902 return rc;
903
904 imm_data = cmd->immediate_data;
905 imm_data_len = cmd->first_burst_len;
906 unsol_data = cmd->unsolicited_data;
907
908 rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
909 if (rc < 0) {
910 return 0;
911 } else if (rc > 0) {
912 dump_payload = true;
913 goto sequence_cmd;
914 }
915
916 if (!imm_data)
917 return 0;
918
919 sg = &cmd->se_cmd.t_data_sg[0];
920 sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
921
922 pr_debug("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n",
923 sg, sg_nents, &rx_desc->data[0], imm_data_len);
924
925 sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len);
926
927 cmd->write_data_done += imm_data_len;
928
929 if (cmd->write_data_done == cmd->se_cmd.data_length) {
930 spin_lock_bh(&cmd->istate_lock);
931 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
932 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
933 spin_unlock_bh(&cmd->istate_lock);
934 }
935
936sequence_cmd:
937 rc = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
938
939 if (!rc && dump_payload == false && unsol_data)
940 iscsit_set_unsoliticed_dataout(cmd);
941
942 if (rc == CMDSN_ERROR_CANNOT_RECOVER)
943 return iscsit_add_reject_from_cmd(
944 ISCSI_REASON_PROTOCOL_ERROR,
945 1, 0, (unsigned char *)hdr, cmd);
946
947 return 0;
948}
949
950static int
951isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
952 struct iser_rx_desc *rx_desc, unsigned char *buf)
953{
954 struct scatterlist *sg_start;
955 struct iscsi_conn *conn = isert_conn->conn;
956 struct iscsi_cmd *cmd = NULL;
957 struct iscsi_data *hdr = (struct iscsi_data *)buf;
958 u32 unsol_data_len = ntoh24(hdr->dlength);
959 int rc, sg_nents, sg_off, page_off;
960
961 rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
962 if (rc < 0)
963 return rc;
964 else if (!cmd)
965 return 0;
966 /*
967 * FIXME: Unexpected unsolicited_data out
968 */
969 if (!cmd->unsolicited_data) {
970 pr_err("Received unexpected solicited data payload\n");
971 dump_stack();
972 return -1;
973 }
974
975 pr_debug("Unsolicited DataOut unsol_data_len: %u, write_data_done: %u, data_length: %u\n",
976 unsol_data_len, cmd->write_data_done, cmd->se_cmd.data_length);
977
978 sg_off = cmd->write_data_done / PAGE_SIZE;
979 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
980 sg_nents = max(1UL, DIV_ROUND_UP(unsol_data_len, PAGE_SIZE));
981 page_off = cmd->write_data_done % PAGE_SIZE;
982 /*
983 * FIXME: Non page-aligned unsolicited_data out
984 */
985 if (page_off) {
986 pr_err("Received unexpected non-page aligned data payload\n");
987 dump_stack();
988 return -1;
989 }
990 pr_debug("Copying DataOut: sg_start: %p, sg_off: %u sg_nents: %u from %p %u\n",
991 sg_start, sg_off, sg_nents, &rx_desc->data[0], unsol_data_len);
992
993 sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0],
994 unsol_data_len);
995
996 rc = iscsit_check_dataout_payload(cmd, hdr, false);
997 if (rc < 0)
998 return rc;
999
1000 return 0;
1001}
1002
1003static int
1004isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1005 uint32_t read_stag, uint64_t read_va,
1006 uint32_t write_stag, uint64_t write_va)
1007{
1008 struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
1009 struct iscsi_conn *conn = isert_conn->conn;
1010 struct iscsi_cmd *cmd;
1011 struct isert_cmd *isert_cmd;
1012 int ret = -EINVAL;
1013 u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
1014
1015 switch (opcode) {
1016 case ISCSI_OP_SCSI_CMD:
1017 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
1018 if (!cmd)
1019 break;
1020
1021 isert_cmd = container_of(cmd, struct isert_cmd, iscsi_cmd);
1022 isert_cmd->read_stag = read_stag;
1023 isert_cmd->read_va = read_va;
1024 isert_cmd->write_stag = write_stag;
1025 isert_cmd->write_va = write_va;
1026
1027 ret = isert_handle_scsi_cmd(isert_conn, isert_cmd,
1028 rx_desc, (unsigned char *)hdr);
1029 break;
1030 case ISCSI_OP_NOOP_OUT:
1031 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
1032 if (!cmd)
1033 break;
1034
1035 ret = iscsit_handle_nop_out(conn, cmd, (unsigned char *)hdr);
1036 break;
1037 case ISCSI_OP_SCSI_DATA_OUT:
1038 ret = isert_handle_iscsi_dataout(isert_conn, rx_desc,
1039 (unsigned char *)hdr);
1040 break;
1041 case ISCSI_OP_SCSI_TMFUNC:
1042 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
1043 if (!cmd)
1044 break;
1045
1046 ret = iscsit_handle_task_mgt_cmd(conn, cmd,
1047 (unsigned char *)hdr);
1048 break;
1049 case ISCSI_OP_LOGOUT:
1050 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
1051 if (!cmd)
1052 break;
1053
1054 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
1055 if (ret > 0)
1056 wait_for_completion_timeout(&conn->conn_logout_comp,
1057 SECONDS_FOR_LOGOUT_COMP *
1058 HZ);
1059 break;
1060 default:
1061 pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
1062 dump_stack();
1063 break;
1064 }
1065
1066 return ret;
1067}
1068
1069static void
1070isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
1071{
1072 struct iser_hdr *iser_hdr = &rx_desc->iser_header;
1073 uint64_t read_va = 0, write_va = 0;
1074 uint32_t read_stag = 0, write_stag = 0;
1075 int rc;
1076
1077 switch (iser_hdr->flags & 0xF0) {
1078 case ISCSI_CTRL:
1079 if (iser_hdr->flags & ISER_RSV) {
1080 read_stag = be32_to_cpu(iser_hdr->read_stag);
1081 read_va = be64_to_cpu(iser_hdr->read_va);
1082 pr_debug("ISER_RSV: read_stag: 0x%08x read_va: 0x%16llx\n",
1083 read_stag, (unsigned long long)read_va);
1084 }
1085 if (iser_hdr->flags & ISER_WSV) {
1086 write_stag = be32_to_cpu(iser_hdr->write_stag);
1087 write_va = be64_to_cpu(iser_hdr->write_va);
1088 pr_debug("ISER_WSV: write__stag: 0x%08x write_va: 0x%16llx\n",
1089 write_stag, (unsigned long long)write_va);
1090 }
1091
1092 pr_debug("ISER ISCSI_CTRL PDU\n");
1093 break;
1094 case ISER_HELLO:
1095 pr_err("iSER Hello message\n");
1096 break;
1097 default:
1098 pr_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags);
1099 break;
1100 }
1101
1102 rc = isert_rx_opcode(isert_conn, rx_desc,
1103 read_stag, read_va, write_stag, write_va);
1104}
1105
1106static void
1107isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
1108 unsigned long xfer_len)
1109{
1110 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1111 struct iscsi_hdr *hdr;
1112 u64 rx_dma;
1113 int rx_buflen, outstanding;
1114
1115 if ((char *)desc == isert_conn->login_req_buf) {
1116 rx_dma = isert_conn->login_req_dma;
1117 rx_buflen = ISER_RX_LOGIN_SIZE;
1118 pr_debug("ISER login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1119 rx_dma, rx_buflen);
1120 } else {
1121 rx_dma = desc->dma_addr;
1122 rx_buflen = ISER_RX_PAYLOAD_SIZE;
1123 pr_debug("ISER req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1124 rx_dma, rx_buflen);
1125 }
1126
1127 ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE);
1128
1129 hdr = &desc->iscsi_header;
1130 pr_debug("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
1131 hdr->opcode, hdr->itt, hdr->flags,
1132 (int)(xfer_len - ISER_HEADERS_LEN));
1133
1134 if ((char *)desc == isert_conn->login_req_buf)
1135 isert_rx_login_req(desc, xfer_len - ISER_HEADERS_LEN,
1136 isert_conn);
1137 else
1138 isert_rx_do_work(desc, isert_conn);
1139
1140 ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
1141 DMA_FROM_DEVICE);
1142
1143 isert_conn->post_recv_buf_count--;
1144 pr_debug("iSERT: Decremented post_recv_buf_count: %d\n",
1145 isert_conn->post_recv_buf_count);
1146
1147 if ((char *)desc == isert_conn->login_req_buf)
1148 return;
1149
1150 outstanding = isert_conn->post_recv_buf_count;
1151 if (outstanding + ISERT_MIN_POSTED_RX <= ISERT_QP_MAX_RECV_DTOS) {
1152 int err, count = min(ISERT_QP_MAX_RECV_DTOS - outstanding,
1153 ISERT_MIN_POSTED_RX);
1154 err = isert_post_recv(isert_conn, count);
1155 if (err) {
1156 pr_err("isert_post_recv() count: %d failed, %d\n",
1157 count, err);
1158 }
1159 }
1160}
1161
1162static void
1163isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1164{
1165 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1166 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1167
1168 pr_debug("isert_unmap_cmd >>>>>>>>>>>>>>>>>>>>>>>\n");
1169
1170 if (wr->sge) {
1171 ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge, DMA_TO_DEVICE);
1172 wr->sge = NULL;
1173 }
1174
1175 kfree(wr->send_wr);
1176 wr->send_wr = NULL;
1177
1178 kfree(isert_cmd->ib_sge);
1179 isert_cmd->ib_sge = NULL;
1180}
1181
1182static void
1183isert_put_cmd(struct isert_cmd *isert_cmd)
1184{
1185 struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
1186 struct isert_conn *isert_conn = isert_cmd->conn;
1187 struct iscsi_conn *conn;
1188
1189 pr_debug("Entering isert_put_cmd: %p\n", isert_cmd);
1190
1191 switch (cmd->iscsi_opcode) {
1192 case ISCSI_OP_SCSI_CMD:
1193 conn = isert_conn->conn;
1194
1195 spin_lock_bh(&conn->cmd_lock);
1196 if (!list_empty(&cmd->i_conn_node))
1197 list_del(&cmd->i_conn_node);
1198 spin_unlock_bh(&conn->cmd_lock);
1199
1200 if (cmd->data_direction == DMA_TO_DEVICE)
1201 iscsit_stop_dataout_timer(cmd);
1202
1203 isert_unmap_cmd(isert_cmd, isert_conn);
1204 /*
1205 * Fall-through
1206 */
1207 case ISCSI_OP_SCSI_TMFUNC:
1208 transport_generic_free_cmd(&cmd->se_cmd, 0);
1209 break;
1210 case ISCSI_OP_REJECT:
1211 case ISCSI_OP_NOOP_OUT:
1212 conn = isert_conn->conn;
1213
1214 spin_lock_bh(&conn->cmd_lock);
1215 if (!list_empty(&cmd->i_conn_node))
1216 list_del(&cmd->i_conn_node);
1217 spin_unlock_bh(&conn->cmd_lock);
1218
1219 /*
1220 * Handle special case for REJECT when iscsi_add_reject*() has
1221 * overwritten the original iscsi_opcode assignment, and the
1222 * associated cmd->se_cmd needs to be released.
1223 */
1224 if (cmd->se_cmd.se_tfo != NULL) {
1225 transport_generic_free_cmd(&cmd->se_cmd, 0);
1226 break;
1227 }
1228 /*
1229 * Fall-through
1230 */
1231 default:
1232 isert_release_cmd(cmd);
1233 break;
1234 }
1235}
1236
1237static void
1238isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
1239{
1240 if (tx_desc->dma_addr != 0) {
1241 pr_debug("Calling ib_dma_unmap_single for tx_desc->dma_addr\n");
1242 ib_dma_unmap_single(ib_dev, tx_desc->dma_addr,
1243 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1244 tx_desc->dma_addr = 0;
1245 }
1246}
1247
1248static void
1249isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
1250 struct ib_device *ib_dev)
1251{
1252 if (isert_cmd->sense_buf_dma != 0) {
1253 pr_debug("Calling ib_dma_unmap_single for isert_cmd->sense_buf_dma\n");
1254 ib_dma_unmap_single(ib_dev, isert_cmd->sense_buf_dma,
1255 isert_cmd->sense_buf_len, DMA_TO_DEVICE);
1256 isert_cmd->sense_buf_dma = 0;
1257 }
1258
1259 isert_unmap_tx_desc(tx_desc, ib_dev);
1260 isert_put_cmd(isert_cmd);
1261}
1262
1263static void
1264isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
1265 struct isert_cmd *isert_cmd)
1266{
1267 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1268 struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
1269 struct se_cmd *se_cmd = &cmd->se_cmd;
1270 struct ib_device *ib_dev = isert_cmd->conn->conn_cm_id->device;
1271
1272 iscsit_stop_dataout_timer(cmd);
1273
1274 if (wr->sge) {
1275 pr_debug("isert_do_rdma_read_comp: Unmapping wr->sge from t_data_sg\n");
1276 ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge, DMA_TO_DEVICE);
1277 wr->sge = NULL;
1278 }
1279
1280 if (isert_cmd->ib_sge) {
1281 pr_debug("isert_do_rdma_read_comp: Freeing isert_cmd->ib_sge\n");
1282 kfree(isert_cmd->ib_sge);
1283 isert_cmd->ib_sge = NULL;
1284 }
1285
1286 cmd->write_data_done = se_cmd->data_length;
1287
1288 pr_debug("isert_do_rdma_read_comp, calling target_execute_cmd\n");
1289 spin_lock_bh(&cmd->istate_lock);
1290 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1291 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1292 spin_unlock_bh(&cmd->istate_lock);
1293
1294 target_execute_cmd(se_cmd);
1295}
1296
1297static void
1298isert_do_control_comp(struct work_struct *work)
1299{
1300 struct isert_cmd *isert_cmd = container_of(work,
1301 struct isert_cmd, comp_work);
1302 struct isert_conn *isert_conn = isert_cmd->conn;
1303 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1304 struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
1305
1306 switch (cmd->i_state) {
1307 case ISTATE_SEND_TASKMGTRSP:
1308 pr_debug("Calling iscsit_tmr_post_handler >>>>>>>>>>>>>>>>>\n");
1309
1310 atomic_dec(&isert_conn->post_send_buf_count);
1311 iscsit_tmr_post_handler(cmd, cmd->conn);
1312
1313 cmd->i_state = ISTATE_SENT_STATUS;
1314 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
1315 break;
1316 case ISTATE_SEND_REJECT:
1317 pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n");
1318 atomic_dec(&isert_conn->post_send_buf_count);
1319
1320 cmd->i_state = ISTATE_SENT_STATUS;
1321 complete(&cmd->reject_comp);
1322 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
1323 case ISTATE_SEND_LOGOUTRSP:
1324 pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
1325 /*
1326 * Call atomic_dec(&isert_conn->post_send_buf_count)
1327 * from isert_free_conn()
1328 */
1329 isert_conn->logout_posted = true;
1330 iscsit_logout_post_handler(cmd, cmd->conn);
1331 break;
1332 default:
1333 pr_err("Unknown do_control_comp i_state %d\n", cmd->i_state);
1334 dump_stack();
1335 break;
1336 }
1337}
1338
1339static void
1340isert_response_completion(struct iser_tx_desc *tx_desc,
1341 struct isert_cmd *isert_cmd,
1342 struct isert_conn *isert_conn,
1343 struct ib_device *ib_dev)
1344{
1345 struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
1346
1347 if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
1348 cmd->i_state == ISTATE_SEND_LOGOUTRSP) {
1349 isert_unmap_tx_desc(tx_desc, ib_dev);
1350
1351 INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp);
1352 queue_work(isert_comp_wq, &isert_cmd->comp_work);
1353 return;
1354 }
1355 atomic_dec(&isert_conn->post_send_buf_count);
1356
1357 cmd->i_state = ISTATE_SENT_STATUS;
1358 isert_completion_put(tx_desc, isert_cmd, ib_dev);
1359}
1360
1361static void
1362isert_send_completion(struct iser_tx_desc *tx_desc,
1363 struct isert_conn *isert_conn)
1364{
1365 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1366 struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
1367 struct isert_rdma_wr *wr;
1368
1369 if (!isert_cmd) {
1370 atomic_dec(&isert_conn->post_send_buf_count);
1371 isert_unmap_tx_desc(tx_desc, ib_dev);
1372 return;
1373 }
1374 wr = &isert_cmd->rdma_wr;
1375
1376 switch (wr->iser_ib_op) {
1377 case ISER_IB_RECV:
1378 pr_err("isert_send_completion: Got ISER_IB_RECV\n");
1379 dump_stack();
1380 break;
1381 case ISER_IB_SEND:
1382 pr_debug("isert_send_completion: Got ISER_IB_SEND\n");
1383 isert_response_completion(tx_desc, isert_cmd,
1384 isert_conn, ib_dev);
1385 break;
1386 case ISER_IB_RDMA_WRITE:
1387 pr_err("isert_send_completion: Got ISER_IB_RDMA_WRITE\n");
1388 dump_stack();
1389 break;
1390 case ISER_IB_RDMA_READ:
1391 pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
1392
1393 atomic_dec(&isert_conn->post_send_buf_count);
1394 isert_completion_rdma_read(tx_desc, isert_cmd);
1395 break;
1396 default:
1397 pr_err("Unknown wr->iser_ib_op: 0x%02x\n", wr->iser_ib_op);
1398 dump_stack();
1399 break;
1400 }
1401}
1402
1403static void
1404isert_cq_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
1405{
1406 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1407
1408 if (tx_desc) {
1409 struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
1410
1411 if (!isert_cmd)
1412 isert_unmap_tx_desc(tx_desc, ib_dev);
1413 else
1414 isert_completion_put(tx_desc, isert_cmd, ib_dev);
1415 }
1416
1417 if (isert_conn->post_recv_buf_count == 0 &&
1418 atomic_read(&isert_conn->post_send_buf_count) == 0) {
1419 pr_debug("isert_cq_comp_err >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
1420 pr_debug("Calling wake_up from isert_cq_comp_err\n");
1421
1422 isert_conn->state = ISER_CONN_TERMINATING;
1423 wake_up(&isert_conn->conn_wait_comp_err);
1424 }
1425}
1426
1427static void
1428isert_cq_tx_work(struct work_struct *work)
1429{
1430 struct isert_cq_desc *cq_desc = container_of(work,
1431 struct isert_cq_desc, cq_tx_work);
1432 struct isert_device *device = cq_desc->device;
1433 int cq_index = cq_desc->cq_index;
1434 struct ib_cq *tx_cq = device->dev_tx_cq[cq_index];
1435 struct isert_conn *isert_conn;
1436 struct iser_tx_desc *tx_desc;
1437 struct ib_wc wc;
1438
1439 while (ib_poll_cq(tx_cq, 1, &wc) == 1) {
1440 tx_desc = (struct iser_tx_desc *)(unsigned long)wc.wr_id;
1441 isert_conn = wc.qp->qp_context;
1442
1443 if (wc.status == IB_WC_SUCCESS) {
1444 isert_send_completion(tx_desc, isert_conn);
1445 } else {
1446 pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
1447 pr_debug("TX wc.status: 0x%08x\n", wc.status);
1448 atomic_dec(&isert_conn->post_send_buf_count);
1449 isert_cq_comp_err(tx_desc, isert_conn);
1450 }
1451 }
1452
1453 ib_req_notify_cq(tx_cq, IB_CQ_NEXT_COMP);
1454}
1455
1456static void
1457isert_cq_tx_callback(struct ib_cq *cq, void *context)
1458{
1459 struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
1460
1461 INIT_WORK(&cq_desc->cq_tx_work, isert_cq_tx_work);
1462 queue_work(isert_comp_wq, &cq_desc->cq_tx_work);
1463}
1464
1465static void
1466isert_cq_rx_work(struct work_struct *work)
1467{
1468 struct isert_cq_desc *cq_desc = container_of(work,
1469 struct isert_cq_desc, cq_rx_work);
1470 struct isert_device *device = cq_desc->device;
1471 int cq_index = cq_desc->cq_index;
1472 struct ib_cq *rx_cq = device->dev_rx_cq[cq_index];
1473 struct isert_conn *isert_conn;
1474 struct iser_rx_desc *rx_desc;
1475 struct ib_wc wc;
1476 unsigned long xfer_len;
1477
1478 while (ib_poll_cq(rx_cq, 1, &wc) == 1) {
1479 rx_desc = (struct iser_rx_desc *)(unsigned long)wc.wr_id;
1480 isert_conn = wc.qp->qp_context;
1481
1482 if (wc.status == IB_WC_SUCCESS) {
1483 xfer_len = (unsigned long)wc.byte_len;
1484 isert_rx_completion(rx_desc, isert_conn, xfer_len);
1485 } else {
1486 pr_debug("RX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
1487 if (wc.status != IB_WC_WR_FLUSH_ERR)
1488 pr_debug("RX wc.status: 0x%08x\n", wc.status);
1489
1490 isert_conn->post_recv_buf_count--;
1491 isert_cq_comp_err(NULL, isert_conn);
1492 }
1493 }
1494
1495 ib_req_notify_cq(rx_cq, IB_CQ_NEXT_COMP);
1496}
1497
1498static void
1499isert_cq_rx_callback(struct ib_cq *cq, void *context)
1500{
1501 struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
1502
1503 INIT_WORK(&cq_desc->cq_rx_work, isert_cq_rx_work);
1504 queue_work(isert_rx_wq, &cq_desc->cq_rx_work);
1505}
1506
1507static int
1508isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
1509{
1510 struct ib_send_wr *wr_failed;
1511 int ret;
1512
1513 atomic_inc(&isert_conn->post_send_buf_count);
1514
1515 ret = ib_post_send(isert_conn->conn_qp, &isert_cmd->tx_desc.send_wr,
1516 &wr_failed);
1517 if (ret) {
1518 pr_err("ib_post_send failed with %d\n", ret);
1519 atomic_dec(&isert_conn->post_send_buf_count);
1520 return ret;
1521 }
1522 return ret;
1523}
1524
1525static int
1526isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
1527{
1528 struct isert_cmd *isert_cmd = container_of(cmd,
1529 struct isert_cmd, iscsi_cmd);
1530 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1531 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1532 struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)
1533 &isert_cmd->tx_desc.iscsi_header;
1534
1535 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1536 iscsit_build_rsp_pdu(cmd, conn, true, hdr);
1537 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1538 /*
1539 * Attach SENSE DATA payload to iSCSI Response PDU
1540 */
1541 if (cmd->se_cmd.sense_buffer &&
1542 ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
1543 (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
1544 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1545 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
1546 u32 padding, sense_len;
1547
1548 put_unaligned_be16(cmd->se_cmd.scsi_sense_length,
1549 cmd->sense_buffer);
1550 cmd->se_cmd.scsi_sense_length += sizeof(__be16);
1551
1552 padding = -(cmd->se_cmd.scsi_sense_length) & 3;
1553 hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
1554 sense_len = cmd->se_cmd.scsi_sense_length + padding;
1555
1556 isert_cmd->sense_buf_dma = ib_dma_map_single(ib_dev,
1557 (void *)cmd->sense_buffer, sense_len,
1558 DMA_TO_DEVICE);
1559
1560 isert_cmd->sense_buf_len = sense_len;
1561 tx_dsg->addr = isert_cmd->sense_buf_dma;
1562 tx_dsg->length = sense_len;
1563 tx_dsg->lkey = isert_conn->conn_mr->lkey;
1564 isert_cmd->tx_desc.num_sge = 2;
1565 }
1566
1567 isert_init_send_wr(isert_cmd, send_wr);
1568
1569 pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1570
1571 return isert_post_response(isert_conn, isert_cmd);
1572}
1573
1574static int
1575isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
1576 bool nopout_response)
1577{
1578 struct isert_cmd *isert_cmd = container_of(cmd,
1579 struct isert_cmd, iscsi_cmd);
1580 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1581 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1582
1583 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1584 iscsit_build_nopin_rsp(cmd, conn, (struct iscsi_nopin *)
1585 &isert_cmd->tx_desc.iscsi_header,
1586 nopout_response);
1587 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1588 isert_init_send_wr(isert_cmd, send_wr);
1589
1590 pr_debug("Posting NOPIN Reponse IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1591
1592 return isert_post_response(isert_conn, isert_cmd);
1593}
1594
1595static int
1596isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1597{
1598 struct isert_cmd *isert_cmd = container_of(cmd,
1599 struct isert_cmd, iscsi_cmd);
1600 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1601 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1602
1603 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1604 iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *)
1605 &isert_cmd->tx_desc.iscsi_header);
1606 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1607 isert_init_send_wr(isert_cmd, send_wr);
1608
1609 pr_debug("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1610
1611 return isert_post_response(isert_conn, isert_cmd);
1612}
1613
1614static int
1615isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1616{
1617 struct isert_cmd *isert_cmd = container_of(cmd,
1618 struct isert_cmd, iscsi_cmd);
1619 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1620 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1621
1622 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1623 iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *)
1624 &isert_cmd->tx_desc.iscsi_header);
1625 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1626 isert_init_send_wr(isert_cmd, send_wr);
1627
1628 pr_debug("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1629
1630 return isert_post_response(isert_conn, isert_cmd);
1631}
1632
1633static int
1634isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1635{
1636 struct isert_cmd *isert_cmd = container_of(cmd,
1637 struct isert_cmd, iscsi_cmd);
1638 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1639 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1640
1641 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1642 iscsit_build_reject(cmd, conn, (struct iscsi_reject *)
1643 &isert_cmd->tx_desc.iscsi_header);
1644 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1645 isert_init_send_wr(isert_cmd, send_wr);
1646
1647 pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1648
1649 return isert_post_response(isert_conn, isert_cmd);
1650}
1651
1652static int
1653isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1654 struct ib_sge *ib_sge, struct ib_send_wr *send_wr,
1655 u32 data_left, u32 offset)
1656{
1657 struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
1658 struct scatterlist *sg_start, *tmp_sg;
1659 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1660 u32 sg_off, page_off;
1661 int i = 0, sg_nents;
1662
1663 sg_off = offset / PAGE_SIZE;
1664 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1665 sg_nents = min(cmd->se_cmd.t_data_nents - sg_off, isert_conn->max_sge);
1666 page_off = offset % PAGE_SIZE;
1667
1668 send_wr->sg_list = ib_sge;
1669 send_wr->num_sge = sg_nents;
1670 send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
1671 /*
1672 * Perform mapping of TCM scatterlist memory ib_sge dma_addr.
1673 */
1674 for_each_sg(sg_start, tmp_sg, sg_nents, i) {
1675 pr_debug("ISER RDMA from SGL dma_addr: 0x%16llx dma_len: %u, page_off: %u\n",
1676 (unsigned long long)tmp_sg->dma_address,
1677 tmp_sg->length, page_off);
1678
1679 ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off;
1680 ib_sge->length = min_t(u32, data_left,
1681 ib_sg_dma_len(ib_dev, tmp_sg) - page_off);
1682 ib_sge->lkey = isert_conn->conn_mr->lkey;
1683
1684 pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u\n",
1685 ib_sge->addr, ib_sge->length);
1686 page_off = 0;
1687 data_left -= ib_sge->length;
1688 ib_sge++;
1689 pr_debug("Incrementing ib_sge pointer to %p\n", ib_sge);
1690 }
1691
1692 pr_debug("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
1693 send_wr->sg_list, send_wr->num_sge);
1694
1695 return sg_nents;
1696}
1697
1698static int
1699isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
1700{
1701 struct se_cmd *se_cmd = &cmd->se_cmd;
1702 struct isert_cmd *isert_cmd = container_of(cmd,
1703 struct isert_cmd, iscsi_cmd);
1704 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1705 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1706 struct ib_send_wr *wr_failed, *send_wr;
1707 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1708 struct ib_sge *ib_sge;
1709 struct scatterlist *sg;
1710 u32 offset = 0, data_len, data_left, rdma_write_max;
1711 int rc, ret = 0, count, sg_nents, i, ib_sge_cnt;
1712
1713 pr_debug("RDMA_WRITE: data_length: %u\n", se_cmd->data_length);
1714
1715 sg = &se_cmd->t_data_sg[0];
1716 sg_nents = se_cmd->t_data_nents;
1717
1718 count = ib_dma_map_sg(ib_dev, sg, sg_nents, DMA_TO_DEVICE);
1719 if (unlikely(!count)) {
1720 pr_err("Unable to map put_datain SGs\n");
1721 return -EINVAL;
1722 }
1723 wr->sge = sg;
1724 wr->num_sge = sg_nents;
1725 pr_debug("Mapped IB count: %u sg: %p sg_nents: %u for RDMA_WRITE\n",
1726 count, sg, sg_nents);
1727
1728 ib_sge = kzalloc(sizeof(struct ib_sge) * sg_nents, GFP_KERNEL);
1729 if (!ib_sge) {
1730 pr_warn("Unable to allocate datain ib_sge\n");
1731 ret = -ENOMEM;
1732 goto unmap_sg;
1733 }
1734 isert_cmd->ib_sge = ib_sge;
1735
1736 pr_debug("Allocated ib_sge: %p from t_data_ents: %d for RDMA_WRITE\n",
1737 ib_sge, se_cmd->t_data_nents);
1738
1739 wr->send_wr_num = DIV_ROUND_UP(sg_nents, isert_conn->max_sge);
1740 wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
1741 GFP_KERNEL);
1742 if (!wr->send_wr) {
1743 pr_err("Unable to allocate wr->send_wr\n");
1744 ret = -ENOMEM;
1745 goto unmap_sg;
1746 }
1747 pr_debug("Allocated wr->send_wr: %p wr->send_wr_num: %u\n",
1748 wr->send_wr, wr->send_wr_num);
1749
1750 iscsit_increment_maxcmdsn(cmd, conn->sess);
1751 cmd->stat_sn = conn->stat_sn++;
1752
1753 wr->isert_cmd = isert_cmd;
1754 rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
1755 data_left = se_cmd->data_length;
1756
1757 for (i = 0; i < wr->send_wr_num; i++) {
1758 send_wr = &isert_cmd->rdma_wr.send_wr[i];
1759 data_len = min(data_left, rdma_write_max);
1760
1761 send_wr->opcode = IB_WR_RDMA_WRITE;
1762 send_wr->send_flags = 0;
1763 send_wr->wr.rdma.remote_addr = isert_cmd->read_va + offset;
1764 send_wr->wr.rdma.rkey = isert_cmd->read_stag;
1765
1766 ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
1767 send_wr, data_len, offset);
1768 ib_sge += ib_sge_cnt;
1769
1770 if (i + 1 == wr->send_wr_num)
1771 send_wr->next = &isert_cmd->tx_desc.send_wr;
1772 else
1773 send_wr->next = &wr->send_wr[i + 1];
1774
1775 offset += data_len;
1776 data_left -= data_len;
1777 }
1778 /*
1779 * Build isert_conn->tx_desc for iSCSI response PDU and attach
1780 */
1781 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1782 iscsit_build_rsp_pdu(cmd, conn, false, (struct iscsi_scsi_rsp *)
1783 &isert_cmd->tx_desc.iscsi_header);
1784 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1785 isert_init_send_wr(isert_cmd, &isert_cmd->tx_desc.send_wr);
1786
1787 atomic_inc(&isert_conn->post_send_buf_count);
1788
1789 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
1790 if (rc) {
1791 pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
1792 atomic_dec(&isert_conn->post_send_buf_count);
1793 }
1794 pr_debug("Posted RDMA_WRITE + Response for iSER Data READ\n");
1795 return 1;
1796
1797unmap_sg:
1798 ib_dma_unmap_sg(ib_dev, sg, sg_nents, DMA_TO_DEVICE);
1799 return ret;
1800}
1801
1802static int
1803isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
1804{
1805 struct se_cmd *se_cmd = &cmd->se_cmd;
1806 struct isert_cmd *isert_cmd = container_of(cmd,
1807 struct isert_cmd, iscsi_cmd);
1808 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1809 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1810 struct ib_send_wr *wr_failed, *send_wr;
1811 struct ib_sge *ib_sge;
1812 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1813 struct scatterlist *sg_start;
1814 u32 sg_off, sg_nents, page_off, va_offset = 0;
1815 u32 offset = 0, data_len, data_left, rdma_write_max;
1816 int rc, ret = 0, count, i, ib_sge_cnt;
1817
1818 pr_debug("RDMA_READ: data_length: %u write_data_done: %u\n",
1819 se_cmd->data_length, cmd->write_data_done);
1820
1821 sg_off = cmd->write_data_done / PAGE_SIZE;
1822 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1823 page_off = cmd->write_data_done % PAGE_SIZE;
1824
1825 pr_debug("RDMA_READ: sg_off: %d, sg_start: %p page_off: %d\n",
1826 sg_off, sg_start, page_off);
1827
1828 data_left = se_cmd->data_length - cmd->write_data_done;
1829 sg_nents = se_cmd->t_data_nents - sg_off;
1830
1831 pr_debug("RDMA_READ: data_left: %d, sg_nents: %d\n",
1832 data_left, sg_nents);
1833
1834 count = ib_dma_map_sg(ib_dev, sg_start, sg_nents, DMA_FROM_DEVICE);
1835 if (unlikely(!count)) {
1836 pr_err("Unable to map get_dataout SGs\n");
1837 return -EINVAL;
1838 }
1839 wr->sge = sg_start;
1840 wr->num_sge = sg_nents;
1841 pr_debug("Mapped IB count: %u sg_start: %p sg_nents: %u for RDMA_READ\n",
1842 count, sg_start, sg_nents);
1843
1844 ib_sge = kzalloc(sizeof(struct ib_sge) * sg_nents, GFP_KERNEL);
1845 if (!ib_sge) {
1846 pr_warn("Unable to allocate dataout ib_sge\n");
1847 ret = -ENOMEM;
1848 goto unmap_sg;
1849 }
1850 isert_cmd->ib_sge = ib_sge;
1851
1852 pr_debug("Using ib_sge: %p from sg_ents: %d for RDMA_READ\n",
1853 ib_sge, sg_nents);
1854
1855 wr->send_wr_num = DIV_ROUND_UP(sg_nents, isert_conn->max_sge);
1856 wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
1857 GFP_KERNEL);
1858 if (!wr->send_wr) {
1859 pr_debug("Unable to allocate wr->send_wr\n");
1860 ret = -ENOMEM;
1861 goto unmap_sg;
1862 }
1863 pr_debug("Allocated wr->send_wr: %p wr->send_wr_num: %u\n",
1864 wr->send_wr, wr->send_wr_num);
1865
1866 isert_cmd->tx_desc.isert_cmd = isert_cmd;
1867
1868 wr->iser_ib_op = ISER_IB_RDMA_READ;
1869 wr->isert_cmd = isert_cmd;
1870 rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
1871 offset = cmd->write_data_done;
1872
1873 for (i = 0; i < wr->send_wr_num; i++) {
1874 send_wr = &isert_cmd->rdma_wr.send_wr[i];
1875 data_len = min(data_left, rdma_write_max);
1876
1877 send_wr->opcode = IB_WR_RDMA_READ;
1878 send_wr->wr.rdma.remote_addr = isert_cmd->write_va + va_offset;
1879 send_wr->wr.rdma.rkey = isert_cmd->write_stag;
1880
1881 ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
1882 send_wr, data_len, offset);
1883 ib_sge += ib_sge_cnt;
1884
1885 if (i + 1 == wr->send_wr_num)
1886 send_wr->send_flags = IB_SEND_SIGNALED;
1887 else
1888 send_wr->next = &wr->send_wr[i + 1];
1889
1890 offset += data_len;
1891 va_offset += data_len;
1892 data_left -= data_len;
1893 }
1894
1895 atomic_inc(&isert_conn->post_send_buf_count);
1896
1897 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
1898 if (rc) {
1899 pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
1900 atomic_dec(&isert_conn->post_send_buf_count);
1901 }
1902 pr_debug("Posted RDMA_READ memory for ISER Data WRITE\n");
1903 return 0;
1904
1905unmap_sg:
1906 ib_dma_unmap_sg(ib_dev, sg_start, sg_nents, DMA_FROM_DEVICE);
1907 return ret;
1908}
1909
1910static int
1911isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
1912{
1913 int ret;
1914
1915 switch (state) {
1916 case ISTATE_SEND_NOPIN_WANT_RESPONSE:
1917 ret = isert_put_nopin(cmd, conn, false);
1918 break;
1919 default:
1920 pr_err("Unknown immediate state: 0x%02x\n", state);
1921 ret = -EINVAL;
1922 break;
1923 }
1924
1925 return ret;
1926}
1927
1928static int
1929isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
1930{
1931 int ret;
1932
1933 switch (state) {
1934 case ISTATE_SEND_LOGOUTRSP:
1935 ret = isert_put_logout_rsp(cmd, conn);
1936 if (!ret) {
1937 pr_debug("Returning iSER Logout -EAGAIN\n");
1938 ret = -EAGAIN;
1939 }
1940 break;
1941 case ISTATE_SEND_NOPIN:
1942 ret = isert_put_nopin(cmd, conn, true);
1943 break;
1944 case ISTATE_SEND_TASKMGTRSP:
1945 ret = isert_put_tm_rsp(cmd, conn);
1946 break;
1947 case ISTATE_SEND_REJECT:
1948 ret = isert_put_reject(cmd, conn);
1949 break;
1950 case ISTATE_SEND_STATUS:
1951 /*
1952 * Special case for sending non GOOD SCSI status from TX thread
1953 * context during pre se_cmd excecution failure.
1954 */
1955 ret = isert_put_response(conn, cmd);
1956 break;
1957 default:
1958 pr_err("Unknown response state: 0x%02x\n", state);
1959 ret = -EINVAL;
1960 break;
1961 }
1962
1963 return ret;
1964}
1965
1966static int
1967isert_setup_np(struct iscsi_np *np,
1968 struct __kernel_sockaddr_storage *ksockaddr)
1969{
1970 struct isert_np *isert_np;
1971 struct rdma_cm_id *isert_lid;
1972 struct sockaddr *sa;
1973 int ret;
1974
1975 isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
1976 if (!isert_np) {
1977 pr_err("Unable to allocate struct isert_np\n");
1978 return -ENOMEM;
1979 }
1980 init_waitqueue_head(&isert_np->np_accept_wq);
1981 mutex_init(&isert_np->np_accept_mutex);
1982 INIT_LIST_HEAD(&isert_np->np_accept_list);
1983 init_completion(&isert_np->np_login_comp);
1984
1985 sa = (struct sockaddr *)ksockaddr;
1986 pr_debug("ksockaddr: %p, sa: %p\n", ksockaddr, sa);
1987 /*
1988 * Setup the np->np_sockaddr from the passed sockaddr setup
1989 * in iscsi_target_configfs.c code..
1990 */
1991 memcpy(&np->np_sockaddr, ksockaddr,
1992 sizeof(struct __kernel_sockaddr_storage));
1993
1994 isert_lid = rdma_create_id(isert_cma_handler, np, RDMA_PS_TCP,
1995 IB_QPT_RC);
1996 if (IS_ERR(isert_lid)) {
1997 pr_err("rdma_create_id() for isert_listen_handler failed: %ld\n",
1998 PTR_ERR(isert_lid));
1999 ret = PTR_ERR(isert_lid);
2000 goto out;
2001 }
2002
2003 ret = rdma_bind_addr(isert_lid, sa);
2004 if (ret) {
2005 pr_err("rdma_bind_addr() for isert_lid failed: %d\n", ret);
2006 goto out_lid;
2007 }
2008
2009 ret = rdma_listen(isert_lid, ISERT_RDMA_LISTEN_BACKLOG);
2010 if (ret) {
2011 pr_err("rdma_listen() for isert_lid failed: %d\n", ret);
2012 goto out_lid;
2013 }
2014
2015 isert_np->np_cm_id = isert_lid;
2016 np->np_context = isert_np;
2017 pr_debug("Setup isert_lid->context: %p\n", isert_lid->context);
2018
2019 return 0;
2020
2021out_lid:
2022 rdma_destroy_id(isert_lid);
2023out:
2024 kfree(isert_np);
2025 return ret;
2026}
2027
2028static int
2029isert_check_accept_queue(struct isert_np *isert_np)
2030{
2031 int empty;
2032
2033 mutex_lock(&isert_np->np_accept_mutex);
2034 empty = list_empty(&isert_np->np_accept_list);
2035 mutex_unlock(&isert_np->np_accept_mutex);
2036
2037 return empty;
2038}
2039
2040static int
2041isert_rdma_accept(struct isert_conn *isert_conn)
2042{
2043 struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
2044 struct rdma_conn_param cp;
2045 int ret;
2046
2047 memset(&cp, 0, sizeof(struct rdma_conn_param));
2048 cp.responder_resources = isert_conn->responder_resources;
2049 cp.initiator_depth = isert_conn->initiator_depth;
2050 cp.retry_count = 7;
2051 cp.rnr_retry_count = 7;
2052
2053 pr_debug("Before rdma_accept >>>>>>>>>>>>>>>>>>>>.\n");
2054
2055 ret = rdma_accept(cm_id, &cp);
2056 if (ret) {
2057 pr_err("rdma_accept() failed with: %d\n", ret);
2058 return ret;
2059 }
2060
2061 pr_debug("After rdma_accept >>>>>>>>>>>>>>>>>>>>>.\n");
2062
2063 return 0;
2064}
2065
2066static int
2067isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
2068{
2069 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2070 int ret;
2071
2072 pr_debug("isert_get_login_rx before conn_login_comp conn: %p\n", conn);
2073
2074 ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp);
2075 if (ret)
2076 return ret;
2077
2078 pr_debug("isert_get_login_rx processing login->req: %p\n", login->req);
2079 return 0;
2080}
2081
2082static void
2083isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
2084 struct isert_conn *isert_conn)
2085{
2086 struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
2087 struct rdma_route *cm_route = &cm_id->route;
2088 struct sockaddr_in *sock_in;
2089 struct sockaddr_in6 *sock_in6;
2090
2091 conn->login_family = np->np_sockaddr.ss_family;
2092
2093 if (np->np_sockaddr.ss_family == AF_INET6) {
2094 sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.dst_addr;
2095 snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c",
2096 &sock_in6->sin6_addr.in6_u);
2097 conn->login_port = ntohs(sock_in6->sin6_port);
2098
2099 sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.src_addr;
2100 snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c",
2101 &sock_in6->sin6_addr.in6_u);
2102 conn->local_port = ntohs(sock_in6->sin6_port);
2103 } else {
2104 sock_in = (struct sockaddr_in *)&cm_route->addr.dst_addr;
2105 sprintf(conn->login_ip, "%pI4",
2106 &sock_in->sin_addr.s_addr);
2107 conn->login_port = ntohs(sock_in->sin_port);
2108
2109 sock_in = (struct sockaddr_in *)&cm_route->addr.src_addr;
2110 sprintf(conn->local_ip, "%pI4",
2111 &sock_in->sin_addr.s_addr);
2112 conn->local_port = ntohs(sock_in->sin_port);
2113 }
2114}
2115
2116static int
2117isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
2118{
2119 struct isert_np *isert_np = (struct isert_np *)np->np_context;
2120 struct isert_conn *isert_conn;
2121 int max_accept = 0, ret;
2122
2123accept_wait:
2124 ret = wait_event_interruptible(isert_np->np_accept_wq,
2125 !isert_check_accept_queue(isert_np) ||
2126 np->np_thread_state == ISCSI_NP_THREAD_RESET);
2127 if (max_accept > 5)
2128 return -ENODEV;
2129
2130 spin_lock_bh(&np->np_thread_lock);
2131 if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
2132 spin_unlock_bh(&np->np_thread_lock);
2133 pr_err("ISCSI_NP_THREAD_RESET for isert_accept_np\n");
2134 return -ENODEV;
2135 }
2136 spin_unlock_bh(&np->np_thread_lock);
2137
2138 mutex_lock(&isert_np->np_accept_mutex);
2139 if (list_empty(&isert_np->np_accept_list)) {
2140 mutex_unlock(&isert_np->np_accept_mutex);
2141 max_accept++;
2142 goto accept_wait;
2143 }
2144 isert_conn = list_first_entry(&isert_np->np_accept_list,
2145 struct isert_conn, conn_accept_node);
2146 list_del_init(&isert_conn->conn_accept_node);
2147 mutex_unlock(&isert_np->np_accept_mutex);
2148
2149 conn->context = isert_conn;
2150 isert_conn->conn = conn;
2151 max_accept = 0;
2152
2153 ret = isert_rdma_post_recvl(isert_conn);
2154 if (ret)
2155 return ret;
2156
2157 ret = isert_rdma_accept(isert_conn);
2158 if (ret)
2159 return ret;
2160
2161 isert_set_conn_info(np, conn, isert_conn);
2162
2163 pr_debug("Processing isert_accept_np: isert_conn: %p\n", isert_conn);
2164 return 0;
2165}
2166
2167static void
2168isert_free_np(struct iscsi_np *np)
2169{
2170 struct isert_np *isert_np = (struct isert_np *)np->np_context;
2171
2172 rdma_destroy_id(isert_np->np_cm_id);
2173
2174 np->np_context = NULL;
2175 kfree(isert_np);
2176}
2177
2178static void isert_free_conn(struct iscsi_conn *conn)
2179{
2180 struct isert_conn *isert_conn = conn->context;
2181
2182 pr_debug("isert_free_conn: Starting \n");
2183 /*
2184 * Decrement post_send_buf_count for special case when called
2185 * from isert_do_control_comp() -> iscsit_logout_post_handler()
2186 */
2187 if (isert_conn->logout_posted)
2188 atomic_dec(&isert_conn->post_send_buf_count);
2189
2190 if (isert_conn->conn_cm_id)
2191 rdma_disconnect(isert_conn->conn_cm_id);
2192 /*
2193 * Only wait for conn_wait_comp_err if the isert_conn made it
2194 * into full feature phase..
2195 */
2196 if (isert_conn->state > ISER_CONN_INIT) {
2197 pr_debug("isert_free_conn: Before wait_event comp_err %d\n",
2198 isert_conn->state);
2199 wait_event(isert_conn->conn_wait_comp_err,
2200 isert_conn->state == ISER_CONN_TERMINATING);
2201 pr_debug("isert_free_conn: After wait_event #1 >>>>>>>>>>>>\n");
2202 }
2203
2204 pr_debug("isert_free_conn: wait_event conn_wait %d\n", isert_conn->state);
2205 wait_event(isert_conn->conn_wait, isert_conn->state == ISER_CONN_DOWN);
2206 pr_debug("isert_free_conn: After wait_event #2 >>>>>>>>>>>>>>>>>>>>\n");
2207
2208 isert_put_conn(isert_conn);
2209}
2210
2211static struct iscsit_transport iser_target_transport = {
2212 .name = "IB/iSER",
2213 .transport_type = ISCSI_INFINIBAND,
2214 .owner = THIS_MODULE,
2215 .iscsit_setup_np = isert_setup_np,
2216 .iscsit_accept_np = isert_accept_np,
2217 .iscsit_free_np = isert_free_np,
2218 .iscsit_free_conn = isert_free_conn,
2219 .iscsit_alloc_cmd = isert_alloc_cmd,
2220 .iscsit_get_login_rx = isert_get_login_rx,
2221 .iscsit_put_login_tx = isert_put_login_tx,
2222 .iscsit_immediate_queue = isert_immediate_queue,
2223 .iscsit_response_queue = isert_response_queue,
2224 .iscsit_get_dataout = isert_get_dataout,
2225 .iscsit_queue_data_in = isert_put_datain,
2226 .iscsit_queue_status = isert_put_response,
2227};
2228
2229static int __init isert_init(void)
2230{
2231 int ret;
2232
2233 isert_rx_wq = alloc_workqueue("isert_rx_wq", 0, 0);
2234 if (!isert_rx_wq) {
2235 pr_err("Unable to allocate isert_rx_wq\n");
2236 return -ENOMEM;
2237 }
2238
2239 isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0);
2240 if (!isert_comp_wq) {
2241 pr_err("Unable to allocate isert_comp_wq\n");
2242 ret = -ENOMEM;
2243 goto destroy_rx_wq;
2244 }
2245
2246 isert_cmd_cache = kmem_cache_create("isert_cmd_cache",
2247 sizeof(struct isert_cmd), __alignof__(struct isert_cmd),
2248 0, NULL);
2249 if (!isert_cmd_cache) {
2250 pr_err("Unable to create isert_cmd_cache\n");
2251 ret = -ENOMEM;
2252 goto destroy_tx_cq;
2253 }
2254
2255 iscsit_register_transport(&iser_target_transport);
2256 pr_debug("iSER_TARGET[0] - Loaded iser_target_transport\n");
2257 return 0;
2258
2259destroy_tx_cq:
2260 destroy_workqueue(isert_comp_wq);
2261destroy_rx_wq:
2262 destroy_workqueue(isert_rx_wq);
2263 return ret;
2264}
2265
2266static void __exit isert_exit(void)
2267{
2268 kmem_cache_destroy(isert_cmd_cache);
2269 destroy_workqueue(isert_comp_wq);
2270 destroy_workqueue(isert_rx_wq);
2271 iscsit_unregister_transport(&iser_target_transport);
2272 pr_debug("iSER_TARGET[0] - Released iser_target_transport\n");
2273}
2274
2275MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
2276MODULE_VERSION("0.1");
2277MODULE_AUTHOR("nab@Linux-iSCSI.org");
2278MODULE_LICENSE("GPL");
2279
2280module_init(isert_init);
2281module_exit(isert_exit);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
new file mode 100644
index 000000000000..b104f4c2cd38
--- /dev/null
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -0,0 +1,138 @@
1#include <linux/socket.h>
2#include <linux/in.h>
3#include <linux/in6.h>
4#include <rdma/ib_verbs.h>
5#include <rdma/rdma_cm.h>
6
7#define ISERT_RDMA_LISTEN_BACKLOG 10
8
9enum isert_desc_type {
10 ISCSI_TX_CONTROL,
11 ISCSI_TX_DATAIN
12};
13
14enum iser_ib_op_code {
15 ISER_IB_RECV,
16 ISER_IB_SEND,
17 ISER_IB_RDMA_WRITE,
18 ISER_IB_RDMA_READ,
19};
20
21enum iser_conn_state {
22 ISER_CONN_INIT,
23 ISER_CONN_UP,
24 ISER_CONN_TERMINATING,
25 ISER_CONN_DOWN,
26};
27
28struct iser_rx_desc {
29 struct iser_hdr iser_header;
30 struct iscsi_hdr iscsi_header;
31 char data[ISER_RECV_DATA_SEG_LEN];
32 u64 dma_addr;
33 struct ib_sge rx_sg;
34 char pad[ISER_RX_PAD_SIZE];
35} __packed;
36
37struct iser_tx_desc {
38 struct iser_hdr iser_header;
39 struct iscsi_hdr iscsi_header;
40 enum isert_desc_type type;
41 u64 dma_addr;
42 struct ib_sge tx_sg[2];
43 int num_sge;
44 struct isert_cmd *isert_cmd;
45 struct ib_send_wr send_wr;
46} __packed;
47
48struct isert_rdma_wr {
49 struct list_head wr_list;
50 struct isert_cmd *isert_cmd;
51 enum iser_ib_op_code iser_ib_op;
52 struct ib_sge *ib_sge;
53 int num_sge;
54 struct scatterlist *sge;
55 int send_wr_num;
56 struct ib_send_wr *send_wr;
57};
58
59struct isert_cmd {
60 uint32_t read_stag;
61 uint32_t write_stag;
62 uint64_t read_va;
63 uint64_t write_va;
64 u64 sense_buf_dma;
65 u32 sense_buf_len;
66 u32 read_va_off;
67 u32 write_va_off;
68 u32 rdma_wr_num;
69 struct isert_conn *conn;
70 struct iscsi_cmd iscsi_cmd;
71 struct ib_sge *ib_sge;
72 struct iser_tx_desc tx_desc;
73 struct isert_rdma_wr rdma_wr;
74 struct work_struct comp_work;
75};
76
77struct isert_device;
78
79struct isert_conn {
80 enum iser_conn_state state;
81 bool logout_posted;
82 int post_recv_buf_count;
83 atomic_t post_send_buf_count;
84 u32 responder_resources;
85 u32 initiator_depth;
86 u32 max_sge;
87 char *login_buf;
88 char *login_req_buf;
89 char *login_rsp_buf;
90 u64 login_req_dma;
91 u64 login_rsp_dma;
92 unsigned int conn_rx_desc_head;
93 struct iser_rx_desc *conn_rx_descs;
94 struct ib_recv_wr conn_rx_wr[ISERT_MIN_POSTED_RX];
95 struct iscsi_conn *conn;
96 struct list_head conn_accept_node;
97 struct completion conn_login_comp;
98 struct iser_tx_desc conn_login_tx_desc;
99 struct rdma_cm_id *conn_cm_id;
100 struct ib_pd *conn_pd;
101 struct ib_mr *conn_mr;
102 struct ib_qp *conn_qp;
103 struct isert_device *conn_device;
104 struct work_struct conn_logout_work;
105 wait_queue_head_t conn_wait;
106 wait_queue_head_t conn_wait_comp_err;
107 struct kref conn_kref;
108};
109
110#define ISERT_MAX_CQ 64
111
112struct isert_cq_desc {
113 struct isert_device *device;
114 int cq_index;
115 struct work_struct cq_rx_work;
116 struct work_struct cq_tx_work;
117};
118
119struct isert_device {
120 int cqs_used;
121 int refcount;
122 int cq_active_qps[ISERT_MAX_CQ];
123 struct ib_device *ib_device;
124 struct ib_pd *dev_pd;
125 struct ib_mr *dev_mr;
126 struct ib_cq *dev_rx_cq[ISERT_MAX_CQ];
127 struct ib_cq *dev_tx_cq[ISERT_MAX_CQ];
128 struct isert_cq_desc *cq_desc;
129 struct list_head dev_node;
130};
131
132struct isert_np {
133 wait_queue_head_t np_accept_wq;
134 struct rdma_cm_id *np_cm_id;
135 struct mutex np_accept_mutex;
136 struct list_head np_accept_list;
137 struct completion np_login_comp;
138};
diff --git a/drivers/infiniband/ulp/isert/isert_proto.h b/drivers/infiniband/ulp/isert/isert_proto.h
new file mode 100644
index 000000000000..4dccd313b777
--- /dev/null
+++ b/drivers/infiniband/ulp/isert/isert_proto.h
@@ -0,0 +1,47 @@
1/* From iscsi_iser.h */
2
3struct iser_hdr {
4 u8 flags;
5 u8 rsvd[3];
6 __be32 write_stag; /* write rkey */
7 __be64 write_va;
8 __be32 read_stag; /* read rkey */
9 __be64 read_va;
10} __packed;
11
12/*Constant PDU lengths calculations */
13#define ISER_HEADERS_LEN (sizeof(struct iser_hdr) + sizeof(struct iscsi_hdr))
14
15#define ISER_RECV_DATA_SEG_LEN 8192
16#define ISER_RX_PAYLOAD_SIZE (ISER_HEADERS_LEN + ISER_RECV_DATA_SEG_LEN)
17#define ISER_RX_LOGIN_SIZE (ISER_HEADERS_LEN + ISCSI_DEF_MAX_RECV_SEG_LEN)
18
19/* QP settings */
20/* Maximal bounds on received asynchronous PDUs */
21#define ISERT_MAX_TX_MISC_PDUS 4 /* NOOP_IN(2) , ASYNC_EVENT(2) */
22
23#define ISERT_MAX_RX_MISC_PDUS 6 /* NOOP_OUT(2), TEXT(1), *
24 * SCSI_TMFUNC(2), LOGOUT(1) */
25
26#define ISCSI_DEF_XMIT_CMDS_MAX 128 /* from libiscsi.h, must be power of 2 */
27
28#define ISERT_QP_MAX_RECV_DTOS (ISCSI_DEF_XMIT_CMDS_MAX)
29
30#define ISERT_MIN_POSTED_RX (ISCSI_DEF_XMIT_CMDS_MAX >> 2)
31
32#define ISERT_INFLIGHT_DATAOUTS 8
33
34#define ISERT_QP_MAX_REQ_DTOS (ISCSI_DEF_XMIT_CMDS_MAX * \
35 (1 + ISERT_INFLIGHT_DATAOUTS) + \
36 ISERT_MAX_TX_MISC_PDUS + \
37 ISERT_MAX_RX_MISC_PDUS)
38
39#define ISER_RX_PAD_SIZE (ISER_RECV_DATA_SEG_LEN + 4096 - \
40 (ISER_RX_PAYLOAD_SIZE + sizeof(u64) + sizeof(struct ib_sge)))
41
42#define ISER_VER 0x10
43#define ISER_WSV 0x08
44#define ISER_RSV 0x04
45#define ISCSI_CTRL 0x10
46#define ISER_HELLO 0x20
47#define ISER_HELLORPLY 0x30
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 61b5d8c2b5da..fcdc22306cab 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -2585,25 +2585,6 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
2585 ha->tgt.tgt_ops->free_cmd(cmd); 2585 ha->tgt.tgt_ops->free_cmd(cmd);
2586} 2586}
2587 2587
2588/* ha->hardware_lock supposed to be held on entry */
2589/* called via callback from qla2xxx */
2590void qlt_ctio_completion(struct scsi_qla_host *vha, uint32_t handle)
2591{
2592 struct qla_hw_data *ha = vha->hw;
2593 struct qla_tgt *tgt = ha->tgt.qla_tgt;
2594
2595 if (likely(tgt == NULL)) {
2596 ql_dbg(ql_dbg_tgt, vha, 0xe021,
2597 "CTIO, but target mode not enabled"
2598 " (ha %d %p handle %#x)", vha->vp_idx, ha, handle);
2599 return;
2600 }
2601
2602 tgt->irq_cmd_count++;
2603 qlt_do_ctio_completion(vha, handle, CTIO_SUCCESS, NULL);
2604 tgt->irq_cmd_count--;
2605}
2606
2607static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha, 2588static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha,
2608 uint8_t task_codes) 2589 uint8_t task_codes)
2609{ 2590{
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index ff9ccb9fd036..b33e411f28a0 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -980,7 +980,6 @@ extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t);
980extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *); 980extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *);
981extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *); 981extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *);
982extern void qlt_free_cmd(struct qla_tgt_cmd *cmd); 982extern void qlt_free_cmd(struct qla_tgt_cmd *cmd);
983extern void qlt_ctio_completion(struct scsi_qla_host *, uint32_t);
984extern void qlt_async_event(uint16_t, struct scsi_qla_host *, uint16_t *); 983extern void qlt_async_event(uint16_t, struct scsi_qla_host *, uint16_t *);
985extern void qlt_enable_vha(struct scsi_qla_host *); 984extern void qlt_enable_vha(struct scsi_qla_host *);
986extern void qlt_vport_create(struct scsi_qla_host *, struct qla_hw_data *); 985extern void qlt_vport_create(struct scsi_qla_host *, struct qla_hw_data *);
diff --git a/drivers/target/iscsi/Makefile b/drivers/target/iscsi/Makefile
index 5b9a2cf7f0a9..13a92403fe3e 100644
--- a/drivers/target/iscsi/Makefile
+++ b/drivers/target/iscsi/Makefile
@@ -15,6 +15,7 @@ iscsi_target_mod-y += iscsi_target_parameters.o \
15 iscsi_target_util.o \ 15 iscsi_target_util.o \
16 iscsi_target.o \ 16 iscsi_target.o \
17 iscsi_target_configfs.o \ 17 iscsi_target_configfs.o \
18 iscsi_target_stat.o 18 iscsi_target_stat.o \
19 iscsi_target_transport.o
19 20
20obj-$(CONFIG_ISCSI_TARGET) += iscsi_target_mod.o 21obj-$(CONFIG_ISCSI_TARGET) += iscsi_target_mod.o
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 7ea246a07731..ffbc6a94be52 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -49,6 +49,8 @@
49#include "iscsi_target_device.h" 49#include "iscsi_target_device.h"
50#include "iscsi_target_stat.h" 50#include "iscsi_target_stat.h"
51 51
52#include <target/iscsi/iscsi_transport.h>
53
52static LIST_HEAD(g_tiqn_list); 54static LIST_HEAD(g_tiqn_list);
53static LIST_HEAD(g_np_list); 55static LIST_HEAD(g_np_list);
54static DEFINE_SPINLOCK(tiqn_lock); 56static DEFINE_SPINLOCK(tiqn_lock);
@@ -68,8 +70,7 @@ struct kmem_cache *lio_ooo_cache;
68struct kmem_cache *lio_r2t_cache; 70struct kmem_cache *lio_r2t_cache;
69 71
70static int iscsit_handle_immediate_data(struct iscsi_cmd *, 72static int iscsit_handle_immediate_data(struct iscsi_cmd *,
71 unsigned char *buf, u32); 73 struct iscsi_scsi_req *, u32);
72static int iscsit_logout_post_handler(struct iscsi_cmd *, struct iscsi_conn *);
73 74
74struct iscsi_tiqn *iscsit_get_tiqn_for_login(unsigned char *buf) 75struct iscsi_tiqn *iscsit_get_tiqn_for_login(unsigned char *buf)
75{ 76{
@@ -401,8 +402,7 @@ struct iscsi_np *iscsit_add_np(
401 spin_unlock_bh(&np_lock); 402 spin_unlock_bh(&np_lock);
402 403
403 pr_debug("CORE[0] - Added Network Portal: %s:%hu on %s\n", 404 pr_debug("CORE[0] - Added Network Portal: %s:%hu on %s\n",
404 np->np_ip, np->np_port, (np->np_network_transport == ISCSI_TCP) ? 405 np->np_ip, np->np_port, np->np_transport->name);
405 "TCP" : "SCTP");
406 406
407 return np; 407 return np;
408} 408}
@@ -441,11 +441,10 @@ int iscsit_reset_np_thread(
441 return 0; 441 return 0;
442} 442}
443 443
444static int iscsit_del_np_comm(struct iscsi_np *np) 444static void iscsit_free_np(struct iscsi_np *np)
445{ 445{
446 if (np->np_socket) 446 if (np->np_socket)
447 sock_release(np->np_socket); 447 sock_release(np->np_socket);
448 return 0;
449} 448}
450 449
451int iscsit_del_np(struct iscsi_np *np) 450int iscsit_del_np(struct iscsi_np *np)
@@ -467,20 +466,47 @@ int iscsit_del_np(struct iscsi_np *np)
467 send_sig(SIGINT, np->np_thread, 1); 466 send_sig(SIGINT, np->np_thread, 1);
468 kthread_stop(np->np_thread); 467 kthread_stop(np->np_thread);
469 } 468 }
470 iscsit_del_np_comm(np); 469
470 np->np_transport->iscsit_free_np(np);
471 471
472 spin_lock_bh(&np_lock); 472 spin_lock_bh(&np_lock);
473 list_del(&np->np_list); 473 list_del(&np->np_list);
474 spin_unlock_bh(&np_lock); 474 spin_unlock_bh(&np_lock);
475 475
476 pr_debug("CORE[0] - Removed Network Portal: %s:%hu on %s\n", 476 pr_debug("CORE[0] - Removed Network Portal: %s:%hu on %s\n",
477 np->np_ip, np->np_port, (np->np_network_transport == ISCSI_TCP) ? 477 np->np_ip, np->np_port, np->np_transport->name);
478 "TCP" : "SCTP");
479 478
479 iscsit_put_transport(np->np_transport);
480 kfree(np); 480 kfree(np);
481 return 0; 481 return 0;
482} 482}
483 483
484static int iscsit_immediate_queue(struct iscsi_conn *, struct iscsi_cmd *, int);
485static int iscsit_response_queue(struct iscsi_conn *, struct iscsi_cmd *, int);
486
487static int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
488{
489 iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
490 return 0;
491}
492
493static struct iscsit_transport iscsi_target_transport = {
494 .name = "iSCSI/TCP",
495 .transport_type = ISCSI_TCP,
496 .owner = NULL,
497 .iscsit_setup_np = iscsit_setup_np,
498 .iscsit_accept_np = iscsit_accept_np,
499 .iscsit_free_np = iscsit_free_np,
500 .iscsit_alloc_cmd = iscsit_alloc_cmd,
501 .iscsit_get_login_rx = iscsit_get_login_rx,
502 .iscsit_put_login_tx = iscsit_put_login_tx,
503 .iscsit_get_dataout = iscsit_build_r2ts_for_cmd,
504 .iscsit_immediate_queue = iscsit_immediate_queue,
505 .iscsit_response_queue = iscsit_response_queue,
506 .iscsit_queue_data_in = iscsit_queue_rsp,
507 .iscsit_queue_status = iscsit_queue_rsp,
508};
509
484static int __init iscsi_target_init_module(void) 510static int __init iscsi_target_init_module(void)
485{ 511{
486 int ret = 0; 512 int ret = 0;
@@ -557,6 +583,8 @@ static int __init iscsi_target_init_module(void)
557 goto ooo_out; 583 goto ooo_out;
558 } 584 }
559 585
586 iscsit_register_transport(&iscsi_target_transport);
587
560 if (iscsit_load_discovery_tpg() < 0) 588 if (iscsit_load_discovery_tpg() < 0)
561 goto r2t_out; 589 goto r2t_out;
562 590
@@ -587,6 +615,7 @@ static void __exit iscsi_target_cleanup_module(void)
587 iscsi_deallocate_thread_sets(); 615 iscsi_deallocate_thread_sets();
588 iscsi_thread_set_free(); 616 iscsi_thread_set_free();
589 iscsit_release_discovery_tpg(); 617 iscsit_release_discovery_tpg();
618 iscsit_unregister_transport(&iscsi_target_transport);
590 kmem_cache_destroy(lio_cmd_cache); 619 kmem_cache_destroy(lio_cmd_cache);
591 kmem_cache_destroy(lio_qr_cache); 620 kmem_cache_destroy(lio_qr_cache);
592 kmem_cache_destroy(lio_dr_cache); 621 kmem_cache_destroy(lio_dr_cache);
@@ -682,11 +711,20 @@ int iscsit_add_reject_from_cmd(
682 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 711 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
683 712
684 ret = wait_for_completion_interruptible(&cmd->reject_comp); 713 ret = wait_for_completion_interruptible(&cmd->reject_comp);
714 /*
715 * Perform the kref_put now if se_cmd has already been setup by
716 * scsit_setup_scsi_cmd()
717 */
718 if (cmd->se_cmd.se_tfo != NULL) {
719 pr_debug("iscsi reject: calling target_put_sess_cmd >>>>>>\n");
720 target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
721 }
685 if (ret != 0) 722 if (ret != 0)
686 return -1; 723 return -1;
687 724
688 return (!fail_conn) ? 0 : -1; 725 return (!fail_conn) ? 0 : -1;
689} 726}
727EXPORT_SYMBOL(iscsit_add_reject_from_cmd);
690 728
691/* 729/*
692 * Map some portion of the allocated scatterlist to an iovec, suitable for 730 * Map some portion of the allocated scatterlist to an iovec, suitable for
@@ -745,6 +783,9 @@ static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn)
745 783
746 conn->exp_statsn = exp_statsn; 784 conn->exp_statsn = exp_statsn;
747 785
786 if (conn->sess->sess_ops->RDMAExtensions)
787 return;
788
748 spin_lock_bh(&conn->cmd_lock); 789 spin_lock_bh(&conn->cmd_lock);
749 list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) { 790 list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
750 spin_lock(&cmd->istate_lock); 791 spin_lock(&cmd->istate_lock);
@@ -777,12 +818,10 @@ static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd)
777 return 0; 818 return 0;
778} 819}
779 820
780static int iscsit_handle_scsi_cmd( 821int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
781 struct iscsi_conn *conn, 822 unsigned char *buf)
782 unsigned char *buf)
783{ 823{
784 int data_direction, payload_length, cmdsn_ret = 0, immed_ret; 824 int data_direction, payload_length;
785 struct iscsi_cmd *cmd = NULL;
786 struct iscsi_scsi_req *hdr; 825 struct iscsi_scsi_req *hdr;
787 int iscsi_task_attr; 826 int iscsi_task_attr;
788 int sam_task_attr; 827 int sam_task_attr;
@@ -805,8 +844,8 @@ static int iscsit_handle_scsi_cmd(
805 !(hdr->flags & ISCSI_FLAG_CMD_FINAL)) { 844 !(hdr->flags & ISCSI_FLAG_CMD_FINAL)) {
806 pr_err("ISCSI_FLAG_CMD_WRITE & ISCSI_FLAG_CMD_FINAL" 845 pr_err("ISCSI_FLAG_CMD_WRITE & ISCSI_FLAG_CMD_FINAL"
807 " not set. Bad iSCSI Initiator.\n"); 846 " not set. Bad iSCSI Initiator.\n");
808 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1, 847 return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID,
809 buf, conn); 848 1, 1, buf, cmd);
810 } 849 }
811 850
812 if (((hdr->flags & ISCSI_FLAG_CMD_READ) || 851 if (((hdr->flags & ISCSI_FLAG_CMD_READ) ||
@@ -826,8 +865,8 @@ static int iscsit_handle_scsi_cmd(
826 pr_err("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE" 865 pr_err("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"
827 " set when Expected Data Transfer Length is 0 for" 866 " set when Expected Data Transfer Length is 0 for"
828 " CDB: 0x%02x. Bad iSCSI Initiator.\n", hdr->cdb[0]); 867 " CDB: 0x%02x. Bad iSCSI Initiator.\n", hdr->cdb[0]);
829 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1, 868 return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID,
830 buf, conn); 869 1, 1, buf, cmd);
831 } 870 }
832done: 871done:
833 872
@@ -836,29 +875,29 @@ done:
836 pr_err("ISCSI_FLAG_CMD_READ and/or ISCSI_FLAG_CMD_WRITE" 875 pr_err("ISCSI_FLAG_CMD_READ and/or ISCSI_FLAG_CMD_WRITE"
837 " MUST be set if Expected Data Transfer Length is not 0." 876 " MUST be set if Expected Data Transfer Length is not 0."
838 " Bad iSCSI Initiator\n"); 877 " Bad iSCSI Initiator\n");
839 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1, 878 return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID,
840 buf, conn); 879 1, 1, buf, cmd);
841 } 880 }
842 881
843 if ((hdr->flags & ISCSI_FLAG_CMD_READ) && 882 if ((hdr->flags & ISCSI_FLAG_CMD_READ) &&
844 (hdr->flags & ISCSI_FLAG_CMD_WRITE)) { 883 (hdr->flags & ISCSI_FLAG_CMD_WRITE)) {
845 pr_err("Bidirectional operations not supported!\n"); 884 pr_err("Bidirectional operations not supported!\n");
846 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1, 885 return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID,
847 buf, conn); 886 1, 1, buf, cmd);
848 } 887 }
849 888
850 if (hdr->opcode & ISCSI_OP_IMMEDIATE) { 889 if (hdr->opcode & ISCSI_OP_IMMEDIATE) {
851 pr_err("Illegally set Immediate Bit in iSCSI Initiator" 890 pr_err("Illegally set Immediate Bit in iSCSI Initiator"
852 " Scsi Command PDU.\n"); 891 " Scsi Command PDU.\n");
853 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1, 892 return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID,
854 buf, conn); 893 1, 1, buf, cmd);
855 } 894 }
856 895
857 if (payload_length && !conn->sess->sess_ops->ImmediateData) { 896 if (payload_length && !conn->sess->sess_ops->ImmediateData) {
858 pr_err("ImmediateData=No but DataSegmentLength=%u," 897 pr_err("ImmediateData=No but DataSegmentLength=%u,"
859 " protocol error.\n", payload_length); 898 " protocol error.\n", payload_length);
860 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 899 return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR,
861 buf, conn); 900 1, 1, buf, cmd);
862 } 901 }
863 902
864 if ((be32_to_cpu(hdr->data_length )== payload_length) && 903 if ((be32_to_cpu(hdr->data_length )== payload_length) &&
@@ -866,43 +905,38 @@ done:
866 pr_err("Expected Data Transfer Length and Length of" 905 pr_err("Expected Data Transfer Length and Length of"
867 " Immediate Data are the same, but ISCSI_FLAG_CMD_FINAL" 906 " Immediate Data are the same, but ISCSI_FLAG_CMD_FINAL"
868 " bit is not set protocol error\n"); 907 " bit is not set protocol error\n");
869 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 908 return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR,
870 buf, conn); 909 1, 1, buf, cmd);
871 } 910 }
872 911
873 if (payload_length > be32_to_cpu(hdr->data_length)) { 912 if (payload_length > be32_to_cpu(hdr->data_length)) {
874 pr_err("DataSegmentLength: %u is greater than" 913 pr_err("DataSegmentLength: %u is greater than"
875 " EDTL: %u, protocol error.\n", payload_length, 914 " EDTL: %u, protocol error.\n", payload_length,
876 hdr->data_length); 915 hdr->data_length);
877 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 916 return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR,
878 buf, conn); 917 1, 1, buf, cmd);
879 } 918 }
880 919
881 if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) { 920 if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
882 pr_err("DataSegmentLength: %u is greater than" 921 pr_err("DataSegmentLength: %u is greater than"
883 " MaxXmitDataSegmentLength: %u, protocol error.\n", 922 " MaxXmitDataSegmentLength: %u, protocol error.\n",
884 payload_length, conn->conn_ops->MaxXmitDataSegmentLength); 923 payload_length, conn->conn_ops->MaxXmitDataSegmentLength);
885 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 924 return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR,
886 buf, conn); 925 1, 1, buf, cmd);
887 } 926 }
888 927
889 if (payload_length > conn->sess->sess_ops->FirstBurstLength) { 928 if (payload_length > conn->sess->sess_ops->FirstBurstLength) {
890 pr_err("DataSegmentLength: %u is greater than" 929 pr_err("DataSegmentLength: %u is greater than"
891 " FirstBurstLength: %u, protocol error.\n", 930 " FirstBurstLength: %u, protocol error.\n",
892 payload_length, conn->sess->sess_ops->FirstBurstLength); 931 payload_length, conn->sess->sess_ops->FirstBurstLength);
893 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1, 932 return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID,
894 buf, conn); 933 1, 1, buf, cmd);
895 } 934 }
896 935
897 data_direction = (hdr->flags & ISCSI_FLAG_CMD_WRITE) ? DMA_TO_DEVICE : 936 data_direction = (hdr->flags & ISCSI_FLAG_CMD_WRITE) ? DMA_TO_DEVICE :
898 (hdr->flags & ISCSI_FLAG_CMD_READ) ? DMA_FROM_DEVICE : 937 (hdr->flags & ISCSI_FLAG_CMD_READ) ? DMA_FROM_DEVICE :
899 DMA_NONE; 938 DMA_NONE;
900 939
901 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
902 if (!cmd)
903 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1,
904 buf, conn);
905
906 cmd->data_direction = data_direction; 940 cmd->data_direction = data_direction;
907 iscsi_task_attr = hdr->flags & ISCSI_FLAG_CMD_ATTR_MASK; 941 iscsi_task_attr = hdr->flags & ISCSI_FLAG_CMD_ATTR_MASK;
908 /* 942 /*
@@ -945,7 +979,8 @@ done:
945 cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn); 979 cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
946 cmd->first_burst_len = payload_length; 980 cmd->first_burst_len = payload_length;
947 981
948 if (cmd->data_direction == DMA_FROM_DEVICE) { 982 if (!conn->sess->sess_ops->RDMAExtensions &&
983 cmd->data_direction == DMA_FROM_DEVICE) {
949 struct iscsi_datain_req *dr; 984 struct iscsi_datain_req *dr;
950 985
951 dr = iscsit_allocate_datain_req(); 986 dr = iscsit_allocate_datain_req();
@@ -967,7 +1002,10 @@ done:
967 1002
968 pr_debug("Got SCSI Command, ITT: 0x%08x, CmdSN: 0x%08x," 1003 pr_debug("Got SCSI Command, ITT: 0x%08x, CmdSN: 0x%08x,"
969 " ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt, 1004 " ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt,
970 hdr->cmdsn, hdr->data_length, payload_length, conn->cid); 1005 hdr->cmdsn, be32_to_cpu(hdr->data_length), payload_length,
1006 conn->cid);
1007
1008 target_get_sess_cmd(conn->sess->se_sess, &cmd->se_cmd, true);
971 1009
972 cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd, 1010 cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd,
973 scsilun_to_int(&hdr->lun)); 1011 scsilun_to_int(&hdr->lun));
@@ -1001,12 +1039,24 @@ attach_cmd:
1001 */ 1039 */
1002 core_alua_check_nonop_delay(&cmd->se_cmd); 1040 core_alua_check_nonop_delay(&cmd->se_cmd);
1003 1041
1004 if (iscsit_allocate_iovecs(cmd) < 0) { 1042 return 0;
1005 return iscsit_add_reject_from_cmd( 1043}
1006 ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1044EXPORT_SYMBOL(iscsit_setup_scsi_cmd);
1007 1, 0, buf, cmd); 1045
1008 } 1046void iscsit_set_unsoliticed_dataout(struct iscsi_cmd *cmd)
1047{
1048 iscsit_set_dataout_sequence_values(cmd);
1049
1050 spin_lock_bh(&cmd->dataout_timeout_lock);
1051 iscsit_start_dataout_timer(cmd, cmd->conn);
1052 spin_unlock_bh(&cmd->dataout_timeout_lock);
1053}
1054EXPORT_SYMBOL(iscsit_set_unsoliticed_dataout);
1009 1055
1056int iscsit_process_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
1057 struct iscsi_scsi_req *hdr)
1058{
1059 int cmdsn_ret = 0;
1010 /* 1060 /*
1011 * Check the CmdSN against ExpCmdSN/MaxCmdSN here if 1061 * Check the CmdSN against ExpCmdSN/MaxCmdSN here if
1012 * the Immediate Bit is not set, and no Immediate 1062 * the Immediate Bit is not set, and no Immediate
@@ -1019,12 +1069,17 @@ attach_cmd:
1019 */ 1069 */
1020 if (!cmd->immediate_data) { 1070 if (!cmd->immediate_data) {
1021 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn); 1071 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
1022 if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) 1072 if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
1073 if (!cmd->sense_reason)
1074 return 0;
1075
1076 target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
1023 return 0; 1077 return 0;
1024 else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) 1078 } else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) {
1025 return iscsit_add_reject_from_cmd( 1079 return iscsit_add_reject_from_cmd(
1026 ISCSI_REASON_PROTOCOL_ERROR, 1080 ISCSI_REASON_PROTOCOL_ERROR,
1027 1, 0, buf, cmd); 1081 1, 0, (unsigned char *)hdr, cmd);
1082 }
1028 } 1083 }
1029 1084
1030 iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn)); 1085 iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
@@ -1033,25 +1088,23 @@ attach_cmd:
1033 * If no Immediate Data is attached, it's OK to return now. 1088 * If no Immediate Data is attached, it's OK to return now.
1034 */ 1089 */
1035 if (!cmd->immediate_data) { 1090 if (!cmd->immediate_data) {
1036 if (!cmd->sense_reason && cmd->unsolicited_data) { 1091 if (!cmd->sense_reason && cmd->unsolicited_data)
1037 iscsit_set_dataout_sequence_values(cmd); 1092 iscsit_set_unsoliticed_dataout(cmd);
1038 1093 if (!cmd->sense_reason)
1039 spin_lock_bh(&cmd->dataout_timeout_lock); 1094 return 0;
1040 iscsit_start_dataout_timer(cmd, cmd->conn);
1041 spin_unlock_bh(&cmd->dataout_timeout_lock);
1042 }
1043 1095
1096 target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
1044 return 0; 1097 return 0;
1045 } 1098 }
1046 1099
1047 /* 1100 /*
1048 * Early CHECK_CONDITIONs never make it to the transport processing 1101 * Early CHECK_CONDITIONs with ImmediateData never make it to command
1049 * thread. They are processed in CmdSN order by 1102 * execution. These exceptions are processed in CmdSN order using
1050 * iscsit_check_received_cmdsn() below. 1103 * iscsit_check_received_cmdsn() in iscsit_get_immediate_data() below.
1051 */ 1104 */
1052 if (cmd->sense_reason) { 1105 if (cmd->sense_reason) {
1053 immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION; 1106 target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
1054 goto after_immediate_data; 1107 return 1;
1055 } 1108 }
1056 /* 1109 /*
1057 * Call directly into transport_generic_new_cmd() to perform 1110 * Call directly into transport_generic_new_cmd() to perform
@@ -1059,11 +1112,27 @@ attach_cmd:
1059 */ 1112 */
1060 cmd->sense_reason = transport_generic_new_cmd(&cmd->se_cmd); 1113 cmd->sense_reason = transport_generic_new_cmd(&cmd->se_cmd);
1061 if (cmd->sense_reason) { 1114 if (cmd->sense_reason) {
1062 immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION; 1115 target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
1063 goto after_immediate_data; 1116 return 1;
1064 } 1117 }
1065 1118
1066 immed_ret = iscsit_handle_immediate_data(cmd, buf, payload_length); 1119 return 0;
1120}
1121EXPORT_SYMBOL(iscsit_process_scsi_cmd);
1122
1123static int
1124iscsit_get_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr,
1125 bool dump_payload)
1126{
1127 int cmdsn_ret = 0, immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
1128 /*
1129 * Special case for Unsupported SAM WRITE Opcodes and ImmediateData=Yes.
1130 */
1131 if (dump_payload == true)
1132 goto after_immediate_data;
1133
1134 immed_ret = iscsit_handle_immediate_data(cmd, hdr,
1135 cmd->first_burst_len);
1067after_immediate_data: 1136after_immediate_data:
1068 if (immed_ret == IMMEDIATE_DATA_NORMAL_OPERATION) { 1137 if (immed_ret == IMMEDIATE_DATA_NORMAL_OPERATION) {
1069 /* 1138 /*
@@ -1071,26 +1140,19 @@ after_immediate_data:
1071 * DataCRC, check against ExpCmdSN/MaxCmdSN if 1140 * DataCRC, check against ExpCmdSN/MaxCmdSN if
1072 * Immediate Bit is not set. 1141 * Immediate Bit is not set.
1073 */ 1142 */
1074 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn); 1143 cmdsn_ret = iscsit_sequence_cmd(cmd->conn, cmd, hdr->cmdsn);
1075 /* 1144
1076 * Special case for Unsupported SAM WRITE Opcodes
1077 * and ImmediateData=Yes.
1078 */
1079 if (cmd->sense_reason) { 1145 if (cmd->sense_reason) {
1080 if (iscsit_dump_data_payload(conn, payload_length, 1) < 0) 1146 if (iscsit_dump_data_payload(cmd->conn,
1147 cmd->first_burst_len, 1) < 0)
1081 return -1; 1148 return -1;
1082 } else if (cmd->unsolicited_data) { 1149 } else if (cmd->unsolicited_data)
1083 iscsit_set_dataout_sequence_values(cmd); 1150 iscsit_set_unsoliticed_dataout(cmd);
1084
1085 spin_lock_bh(&cmd->dataout_timeout_lock);
1086 iscsit_start_dataout_timer(cmd, cmd->conn);
1087 spin_unlock_bh(&cmd->dataout_timeout_lock);
1088 }
1089 1151
1090 if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) 1152 if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
1091 return iscsit_add_reject_from_cmd( 1153 return iscsit_add_reject_from_cmd(
1092 ISCSI_REASON_PROTOCOL_ERROR, 1154 ISCSI_REASON_PROTOCOL_ERROR,
1093 1, 0, buf, cmd); 1155 1, 0, (unsigned char *)hdr, cmd);
1094 1156
1095 } else if (immed_ret == IMMEDIATE_DATA_ERL1_CRC_FAILURE) { 1157 } else if (immed_ret == IMMEDIATE_DATA_ERL1_CRC_FAILURE) {
1096 /* 1158 /*
@@ -1105,13 +1167,47 @@ after_immediate_data:
1105 * CmdSN and issue a retry to plug the sequence. 1167 * CmdSN and issue a retry to plug the sequence.
1106 */ 1168 */
1107 cmd->i_state = ISTATE_REMOVE; 1169 cmd->i_state = ISTATE_REMOVE;
1108 iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state); 1170 iscsit_add_cmd_to_immediate_queue(cmd, cmd->conn, cmd->i_state);
1109 } else /* immed_ret == IMMEDIATE_DATA_CANNOT_RECOVER */ 1171 } else /* immed_ret == IMMEDIATE_DATA_CANNOT_RECOVER */
1110 return -1; 1172 return -1;
1111 1173
1112 return 0; 1174 return 0;
1113} 1175}
1114 1176
1177static int
1178iscsit_handle_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
1179 unsigned char *buf)
1180{
1181 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
1182 int rc, immed_data;
1183 bool dump_payload = false;
1184
1185 rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
1186 if (rc < 0)
1187 return rc;
1188 /*
1189 * Allocation iovecs needed for struct socket operations for
1190 * traditional iSCSI block I/O.
1191 */
1192 if (iscsit_allocate_iovecs(cmd) < 0) {
1193 return iscsit_add_reject_from_cmd(
1194 ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1195 1, 0, buf, cmd);
1196 }
1197 immed_data = cmd->immediate_data;
1198
1199 rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
1200 if (rc < 0)
1201 return rc;
1202 else if (rc > 0)
1203 dump_payload = true;
1204
1205 if (!immed_data)
1206 return 0;
1207
1208 return iscsit_get_immediate_data(cmd, hdr, dump_payload);
1209}
1210
1115static u32 iscsit_do_crypto_hash_sg( 1211static u32 iscsit_do_crypto_hash_sg(
1116 struct hash_desc *hash, 1212 struct hash_desc *hash,
1117 struct iscsi_cmd *cmd, 1213 struct iscsi_cmd *cmd,
@@ -1174,20 +1270,16 @@ static void iscsit_do_crypto_hash_buf(
1174 crypto_hash_final(hash, data_crc); 1270 crypto_hash_final(hash, data_crc);
1175} 1271}
1176 1272
1177static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf) 1273int
1274iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf,
1275 struct iscsi_cmd **out_cmd)
1178{ 1276{
1179 int iov_ret, ooo_cmdsn = 0, ret; 1277 struct iscsi_data *hdr = (struct iscsi_data *)buf;
1180 u8 data_crc_failed = 0;
1181 u32 checksum, iov_count = 0, padding = 0, rx_got = 0;
1182 u32 rx_size = 0, payload_length;
1183 struct iscsi_cmd *cmd = NULL; 1278 struct iscsi_cmd *cmd = NULL;
1184 struct se_cmd *se_cmd; 1279 struct se_cmd *se_cmd;
1185 struct iscsi_data *hdr;
1186 struct kvec *iov;
1187 unsigned long flags; 1280 unsigned long flags;
1188 1281 u32 payload_length = ntoh24(hdr->dlength);
1189 hdr = (struct iscsi_data *) buf; 1282 int rc;
1190 payload_length = ntoh24(hdr->dlength);
1191 1283
1192 if (!payload_length) { 1284 if (!payload_length) {
1193 pr_err("DataOUT payload is ZERO, protocol error.\n"); 1285 pr_err("DataOUT payload is ZERO, protocol error.\n");
@@ -1220,7 +1312,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
1220 1312
1221 pr_debug("Got DataOut ITT: 0x%08x, TTT: 0x%08x," 1313 pr_debug("Got DataOut ITT: 0x%08x, TTT: 0x%08x,"
1222 " DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n", 1314 " DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n",
1223 hdr->itt, hdr->ttt, hdr->datasn, hdr->offset, 1315 hdr->itt, hdr->ttt, hdr->datasn, ntohl(hdr->offset),
1224 payload_length, conn->cid); 1316 payload_length, conn->cid);
1225 1317
1226 if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) { 1318 if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) {
@@ -1312,12 +1404,26 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
1312 * Preform DataSN, DataSequenceInOrder, DataPDUInOrder, and 1404 * Preform DataSN, DataSequenceInOrder, DataPDUInOrder, and
1313 * within-command recovery checks before receiving the payload. 1405 * within-command recovery checks before receiving the payload.
1314 */ 1406 */
1315 ret = iscsit_check_pre_dataout(cmd, buf); 1407 rc = iscsit_check_pre_dataout(cmd, buf);
1316 if (ret == DATAOUT_WITHIN_COMMAND_RECOVERY) 1408 if (rc == DATAOUT_WITHIN_COMMAND_RECOVERY)
1317 return 0; 1409 return 0;
1318 else if (ret == DATAOUT_CANNOT_RECOVER) 1410 else if (rc == DATAOUT_CANNOT_RECOVER)
1319 return -1; 1411 return -1;
1320 1412
1413 *out_cmd = cmd;
1414 return 0;
1415}
1416EXPORT_SYMBOL(iscsit_check_dataout_hdr);
1417
1418static int
1419iscsit_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
1420 struct iscsi_data *hdr)
1421{
1422 struct kvec *iov;
1423 u32 checksum, iov_count = 0, padding = 0, rx_got = 0, rx_size = 0;
1424 u32 payload_length = ntoh24(hdr->dlength);
1425 int iov_ret, data_crc_failed = 0;
1426
1321 rx_size += payload_length; 1427 rx_size += payload_length;
1322 iov = &cmd->iov_data[0]; 1428 iov = &cmd->iov_data[0];
1323 1429
@@ -1370,17 +1476,27 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
1370 payload_length); 1476 payload_length);
1371 } 1477 }
1372 } 1478 }
1479
1480 return data_crc_failed;
1481}
1482
1483int
1484iscsit_check_dataout_payload(struct iscsi_cmd *cmd, struct iscsi_data *hdr,
1485 bool data_crc_failed)
1486{
1487 struct iscsi_conn *conn = cmd->conn;
1488 int rc, ooo_cmdsn;
1373 /* 1489 /*
1374 * Increment post receive data and CRC values or perform 1490 * Increment post receive data and CRC values or perform
1375 * within-command recovery. 1491 * within-command recovery.
1376 */ 1492 */
1377 ret = iscsit_check_post_dataout(cmd, buf, data_crc_failed); 1493 rc = iscsit_check_post_dataout(cmd, (unsigned char *)hdr, data_crc_failed);
1378 if ((ret == DATAOUT_NORMAL) || (ret == DATAOUT_WITHIN_COMMAND_RECOVERY)) 1494 if ((rc == DATAOUT_NORMAL) || (rc == DATAOUT_WITHIN_COMMAND_RECOVERY))
1379 return 0; 1495 return 0;
1380 else if (ret == DATAOUT_SEND_R2T) { 1496 else if (rc == DATAOUT_SEND_R2T) {
1381 iscsit_set_dataout_sequence_values(cmd); 1497 iscsit_set_dataout_sequence_values(cmd);
1382 iscsit_build_r2ts_for_cmd(cmd, conn, false); 1498 conn->conn_transport->iscsit_get_dataout(conn, cmd, false);
1383 } else if (ret == DATAOUT_SEND_TO_TRANSPORT) { 1499 } else if (rc == DATAOUT_SEND_TO_TRANSPORT) {
1384 /* 1500 /*
1385 * Handle extra special case for out of order 1501 * Handle extra special case for out of order
1386 * Unsolicited Data Out. 1502 * Unsolicited Data Out.
@@ -1401,15 +1517,37 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
1401 1517
1402 return 0; 1518 return 0;
1403} 1519}
1520EXPORT_SYMBOL(iscsit_check_dataout_payload);
1404 1521
1405static int iscsit_handle_nop_out( 1522static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
1406 struct iscsi_conn *conn, 1523{
1407 unsigned char *buf) 1524 struct iscsi_cmd *cmd;
1525 struct iscsi_data *hdr = (struct iscsi_data *)buf;
1526 int rc;
1527 bool data_crc_failed = false;
1528
1529 rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
1530 if (rc < 0)
1531 return rc;
1532 else if (!cmd)
1533 return 0;
1534
1535 rc = iscsit_get_dataout(conn, cmd, hdr);
1536 if (rc < 0)
1537 return rc;
1538 else if (rc > 0)
1539 data_crc_failed = true;
1540
1541 return iscsit_check_dataout_payload(cmd, hdr, data_crc_failed);
1542}
1543
1544int iscsit_handle_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
1545 unsigned char *buf)
1408{ 1546{
1409 unsigned char *ping_data = NULL; 1547 unsigned char *ping_data = NULL;
1410 int cmdsn_ret, niov = 0, ret = 0, rx_got, rx_size; 1548 int cmdsn_ret, niov = 0, ret = 0, rx_got, rx_size;
1411 u32 checksum, data_crc, padding = 0, payload_length; 1549 u32 checksum, data_crc, padding = 0, payload_length;
1412 struct iscsi_cmd *cmd = NULL; 1550 struct iscsi_cmd *cmd_p = NULL;
1413 struct kvec *iov = NULL; 1551 struct kvec *iov = NULL;
1414 struct iscsi_nopout *hdr; 1552 struct iscsi_nopout *hdr;
1415 1553
@@ -1432,7 +1570,7 @@ static int iscsit_handle_nop_out(
1432 buf, conn); 1570 buf, conn);
1433 } 1571 }
1434 1572
1435 pr_debug("Got NOPOUT Ping %s ITT: 0x%08x, TTT: 0x%09x," 1573 pr_debug("Got NOPOUT Ping %s ITT: 0x%08x, TTT: 0x%08x,"
1436 " CmdSN: 0x%08x, ExpStatSN: 0x%08x, Length: %u\n", 1574 " CmdSN: 0x%08x, ExpStatSN: 0x%08x, Length: %u\n",
1437 hdr->itt == RESERVED_ITT ? "Response" : "Request", 1575 hdr->itt == RESERVED_ITT ? "Response" : "Request",
1438 hdr->itt, hdr->ttt, hdr->cmdsn, hdr->exp_statsn, 1576 hdr->itt, hdr->ttt, hdr->cmdsn, hdr->exp_statsn,
@@ -1445,7 +1583,6 @@ static int iscsit_handle_nop_out(
1445 * can contain ping data. 1583 * can contain ping data.
1446 */ 1584 */
1447 if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) { 1585 if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
1448 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
1449 if (!cmd) 1586 if (!cmd)
1450 return iscsit_add_reject( 1587 return iscsit_add_reject(
1451 ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1588 ISCSI_REASON_BOOKMARK_NO_RESOURCES,
@@ -1580,14 +1717,14 @@ static int iscsit_handle_nop_out(
1580 /* 1717 /*
1581 * This was a response to a unsolicited NOPIN ping. 1718 * This was a response to a unsolicited NOPIN ping.
1582 */ 1719 */
1583 cmd = iscsit_find_cmd_from_ttt(conn, be32_to_cpu(hdr->ttt)); 1720 cmd_p = iscsit_find_cmd_from_ttt(conn, be32_to_cpu(hdr->ttt));
1584 if (!cmd) 1721 if (!cmd_p)
1585 return -1; 1722 return -1;
1586 1723
1587 iscsit_stop_nopin_response_timer(conn); 1724 iscsit_stop_nopin_response_timer(conn);
1588 1725
1589 cmd->i_state = ISTATE_REMOVE; 1726 cmd_p->i_state = ISTATE_REMOVE;
1590 iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state); 1727 iscsit_add_cmd_to_immediate_queue(cmd_p, conn, cmd_p->i_state);
1591 iscsit_start_nopin_timer(conn); 1728 iscsit_start_nopin_timer(conn);
1592 } else { 1729 } else {
1593 /* 1730 /*
@@ -1611,12 +1748,12 @@ ping_out:
1611 kfree(ping_data); 1748 kfree(ping_data);
1612 return ret; 1749 return ret;
1613} 1750}
1751EXPORT_SYMBOL(iscsit_handle_nop_out);
1614 1752
1615static int iscsit_handle_task_mgt_cmd( 1753int
1616 struct iscsi_conn *conn, 1754iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
1617 unsigned char *buf) 1755 unsigned char *buf)
1618{ 1756{
1619 struct iscsi_cmd *cmd;
1620 struct se_tmr_req *se_tmr; 1757 struct se_tmr_req *se_tmr;
1621 struct iscsi_tmr_req *tmr_req; 1758 struct iscsi_tmr_req *tmr_req;
1622 struct iscsi_tm *hdr; 1759 struct iscsi_tm *hdr;
@@ -1645,18 +1782,13 @@ static int iscsit_handle_task_mgt_cmd(
1645 pr_err("Task Management Request TASK_REASSIGN not" 1782 pr_err("Task Management Request TASK_REASSIGN not"
1646 " issued as immediate command, bad iSCSI Initiator" 1783 " issued as immediate command, bad iSCSI Initiator"
1647 "implementation\n"); 1784 "implementation\n");
1648 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 1785 return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR,
1649 buf, conn); 1786 1, 1, buf, cmd);
1650 } 1787 }
1651 if ((function != ISCSI_TM_FUNC_ABORT_TASK) && 1788 if ((function != ISCSI_TM_FUNC_ABORT_TASK) &&
1652 be32_to_cpu(hdr->refcmdsn) != ISCSI_RESERVED_TAG) 1789 be32_to_cpu(hdr->refcmdsn) != ISCSI_RESERVED_TAG)
1653 hdr->refcmdsn = cpu_to_be32(ISCSI_RESERVED_TAG); 1790 hdr->refcmdsn = cpu_to_be32(ISCSI_RESERVED_TAG);
1654 1791
1655 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
1656 if (!cmd)
1657 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1658 1, buf, conn);
1659
1660 cmd->data_direction = DMA_NONE; 1792 cmd->data_direction = DMA_NONE;
1661 1793
1662 cmd->tmr_req = kzalloc(sizeof(struct iscsi_tmr_req), GFP_KERNEL); 1794 cmd->tmr_req = kzalloc(sizeof(struct iscsi_tmr_req), GFP_KERNEL);
@@ -1827,6 +1959,7 @@ attach:
1827 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 1959 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
1828 return 0; 1960 return 0;
1829} 1961}
1962EXPORT_SYMBOL(iscsit_handle_task_mgt_cmd);
1830 1963
1831/* #warning FIXME: Support Text Command parameters besides SendTargets */ 1964/* #warning FIXME: Support Text Command parameters besides SendTargets */
1832static int iscsit_handle_text_cmd( 1965static int iscsit_handle_text_cmd(
@@ -2089,13 +2222,12 @@ int iscsit_logout_removeconnforrecovery(struct iscsi_cmd *cmd, struct iscsi_conn
2089 return 0; 2222 return 0;
2090} 2223}
2091 2224
2092static int iscsit_handle_logout_cmd( 2225int
2093 struct iscsi_conn *conn, 2226iscsit_handle_logout_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2094 unsigned char *buf) 2227 unsigned char *buf)
2095{ 2228{
2096 int cmdsn_ret, logout_remove = 0; 2229 int cmdsn_ret, logout_remove = 0;
2097 u8 reason_code = 0; 2230 u8 reason_code = 0;
2098 struct iscsi_cmd *cmd;
2099 struct iscsi_logout *hdr; 2231 struct iscsi_logout *hdr;
2100 struct iscsi_tiqn *tiqn = iscsit_snmp_get_tiqn(conn); 2232 struct iscsi_tiqn *tiqn = iscsit_snmp_get_tiqn(conn);
2101 2233
@@ -2119,14 +2251,10 @@ static int iscsit_handle_logout_cmd(
2119 if (conn->conn_state != TARG_CONN_STATE_LOGGED_IN) { 2251 if (conn->conn_state != TARG_CONN_STATE_LOGGED_IN) {
2120 pr_err("Received logout request on connection that" 2252 pr_err("Received logout request on connection that"
2121 " is not in logged in state, ignoring request.\n"); 2253 " is not in logged in state, ignoring request.\n");
2254 iscsit_release_cmd(cmd);
2122 return 0; 2255 return 0;
2123 } 2256 }
2124 2257
2125 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
2126 if (!cmd)
2127 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1,
2128 buf, conn);
2129
2130 cmd->iscsi_opcode = ISCSI_OP_LOGOUT; 2258 cmd->iscsi_opcode = ISCSI_OP_LOGOUT;
2131 cmd->i_state = ISTATE_SEND_LOGOUTRSP; 2259 cmd->i_state = ISTATE_SEND_LOGOUTRSP;
2132 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0); 2260 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
@@ -2176,6 +2304,7 @@ static int iscsit_handle_logout_cmd(
2176 2304
2177 return logout_remove; 2305 return logout_remove;
2178} 2306}
2307EXPORT_SYMBOL(iscsit_handle_logout_cmd);
2179 2308
2180static int iscsit_handle_snack( 2309static int iscsit_handle_snack(
2181 struct iscsi_conn *conn, 2310 struct iscsi_conn *conn,
@@ -2243,7 +2372,7 @@ static void iscsit_rx_thread_wait_for_tcp(struct iscsi_conn *conn)
2243 2372
2244static int iscsit_handle_immediate_data( 2373static int iscsit_handle_immediate_data(
2245 struct iscsi_cmd *cmd, 2374 struct iscsi_cmd *cmd,
2246 unsigned char *buf, 2375 struct iscsi_scsi_req *hdr,
2247 u32 length) 2376 u32 length)
2248{ 2377{
2249 int iov_ret, rx_got = 0, rx_size = 0; 2378 int iov_ret, rx_got = 0, rx_size = 0;
@@ -2299,12 +2428,12 @@ static int iscsit_handle_immediate_data(
2299 " in ERL=0.\n"); 2428 " in ERL=0.\n");
2300 iscsit_add_reject_from_cmd( 2429 iscsit_add_reject_from_cmd(
2301 ISCSI_REASON_DATA_DIGEST_ERROR, 2430 ISCSI_REASON_DATA_DIGEST_ERROR,
2302 1, 0, buf, cmd); 2431 1, 0, (unsigned char *)hdr, cmd);
2303 return IMMEDIATE_DATA_CANNOT_RECOVER; 2432 return IMMEDIATE_DATA_CANNOT_RECOVER;
2304 } else { 2433 } else {
2305 iscsit_add_reject_from_cmd( 2434 iscsit_add_reject_from_cmd(
2306 ISCSI_REASON_DATA_DIGEST_ERROR, 2435 ISCSI_REASON_DATA_DIGEST_ERROR,
2307 0, 0, buf, cmd); 2436 0, 0, (unsigned char *)hdr, cmd);
2308 return IMMEDIATE_DATA_ERL1_CRC_FAILURE; 2437 return IMMEDIATE_DATA_ERL1_CRC_FAILURE;
2309 } 2438 }
2310 } else { 2439 } else {
@@ -2424,18 +2553,60 @@ static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *conn)
2424 } 2553 }
2425} 2554}
2426 2555
2427static int iscsit_send_data_in( 2556static void
2428 struct iscsi_cmd *cmd, 2557iscsit_build_datain_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
2429 struct iscsi_conn *conn) 2558 struct iscsi_datain *datain, struct iscsi_data_rsp *hdr,
2559 bool set_statsn)
2430{ 2560{
2431 int iov_ret = 0, set_statsn = 0; 2561 hdr->opcode = ISCSI_OP_SCSI_DATA_IN;
2432 u32 iov_count = 0, tx_size = 0; 2562 hdr->flags = datain->flags;
2563 if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
2564 if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
2565 hdr->flags |= ISCSI_FLAG_DATA_OVERFLOW;
2566 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
2567 } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
2568 hdr->flags |= ISCSI_FLAG_DATA_UNDERFLOW;
2569 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
2570 }
2571 }
2572 hton24(hdr->dlength, datain->length);
2573 if (hdr->flags & ISCSI_FLAG_DATA_ACK)
2574 int_to_scsilun(cmd->se_cmd.orig_fe_lun,
2575 (struct scsi_lun *)&hdr->lun);
2576 else
2577 put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun);
2578
2579 hdr->itt = cmd->init_task_tag;
2580
2581 if (hdr->flags & ISCSI_FLAG_DATA_ACK)
2582 hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
2583 else
2584 hdr->ttt = cpu_to_be32(0xFFFFFFFF);
2585 if (set_statsn)
2586 hdr->statsn = cpu_to_be32(cmd->stat_sn);
2587 else
2588 hdr->statsn = cpu_to_be32(0xFFFFFFFF);
2589
2590 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
2591 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
2592 hdr->datasn = cpu_to_be32(datain->data_sn);
2593 hdr->offset = cpu_to_be32(datain->offset);
2594
2595 pr_debug("Built DataIN ITT: 0x%08x, StatSN: 0x%08x,"
2596 " DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n",
2597 cmd->init_task_tag, ntohl(hdr->statsn), ntohl(hdr->datasn),
2598 ntohl(hdr->offset), datain->length, conn->cid);
2599}
2600
2601static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2602{
2603 struct iscsi_data_rsp *hdr = (struct iscsi_data_rsp *)&cmd->pdu[0];
2433 struct iscsi_datain datain; 2604 struct iscsi_datain datain;
2434 struct iscsi_datain_req *dr; 2605 struct iscsi_datain_req *dr;
2435 struct iscsi_data_rsp *hdr;
2436 struct kvec *iov; 2606 struct kvec *iov;
2437 int eodr = 0; 2607 u32 iov_count = 0, tx_size = 0;
2438 int ret; 2608 int eodr = 0, ret, iov_ret;
2609 bool set_statsn = false;
2439 2610
2440 memset(&datain, 0, sizeof(struct iscsi_datain)); 2611 memset(&datain, 0, sizeof(struct iscsi_datain));
2441 dr = iscsit_get_datain_values(cmd, &datain); 2612 dr = iscsit_get_datain_values(cmd, &datain);
@@ -2444,7 +2615,6 @@ static int iscsit_send_data_in(
2444 cmd->init_task_tag); 2615 cmd->init_task_tag);
2445 return -1; 2616 return -1;
2446 } 2617 }
2447
2448 /* 2618 /*
2449 * Be paranoid and double check the logic for now. 2619 * Be paranoid and double check the logic for now.
2450 */ 2620 */
@@ -2452,7 +2622,7 @@ static int iscsit_send_data_in(
2452 pr_err("Command ITT: 0x%08x, datain.offset: %u and" 2622 pr_err("Command ITT: 0x%08x, datain.offset: %u and"
2453 " datain.length: %u exceeds cmd->data_length: %u\n", 2623 " datain.length: %u exceeds cmd->data_length: %u\n",
2454 cmd->init_task_tag, datain.offset, datain.length, 2624 cmd->init_task_tag, datain.offset, datain.length,
2455 cmd->se_cmd.data_length); 2625 cmd->se_cmd.data_length);
2456 return -1; 2626 return -1;
2457 } 2627 }
2458 2628
@@ -2476,47 +2646,13 @@ static int iscsit_send_data_in(
2476 (dr->dr_complete == DATAIN_COMPLETE_CONNECTION_RECOVERY)) { 2646 (dr->dr_complete == DATAIN_COMPLETE_CONNECTION_RECOVERY)) {
2477 iscsit_increment_maxcmdsn(cmd, conn->sess); 2647 iscsit_increment_maxcmdsn(cmd, conn->sess);
2478 cmd->stat_sn = conn->stat_sn++; 2648 cmd->stat_sn = conn->stat_sn++;
2479 set_statsn = 1; 2649 set_statsn = true;
2480 } else if (dr->dr_complete == 2650 } else if (dr->dr_complete ==
2481 DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY) 2651 DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY)
2482 set_statsn = 1; 2652 set_statsn = true;
2483 } 2653 }
2484 2654
2485 hdr = (struct iscsi_data_rsp *) cmd->pdu; 2655 iscsit_build_datain_pdu(cmd, conn, &datain, hdr, set_statsn);
2486 memset(hdr, 0, ISCSI_HDR_LEN);
2487 hdr->opcode = ISCSI_OP_SCSI_DATA_IN;
2488 hdr->flags = datain.flags;
2489 if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
2490 if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
2491 hdr->flags |= ISCSI_FLAG_DATA_OVERFLOW;
2492 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
2493 } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
2494 hdr->flags |= ISCSI_FLAG_DATA_UNDERFLOW;
2495 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
2496 }
2497 }
2498 hton24(hdr->dlength, datain.length);
2499 if (hdr->flags & ISCSI_FLAG_DATA_ACK)
2500 int_to_scsilun(cmd->se_cmd.orig_fe_lun,
2501 (struct scsi_lun *)&hdr->lun);
2502 else
2503 put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun);
2504
2505 hdr->itt = cmd->init_task_tag;
2506
2507 if (hdr->flags & ISCSI_FLAG_DATA_ACK)
2508 hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
2509 else
2510 hdr->ttt = cpu_to_be32(0xFFFFFFFF);
2511 if (set_statsn)
2512 hdr->statsn = cpu_to_be32(cmd->stat_sn);
2513 else
2514 hdr->statsn = cpu_to_be32(0xFFFFFFFF);
2515
2516 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
2517 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
2518 hdr->datasn = cpu_to_be32(datain.data_sn);
2519 hdr->offset = cpu_to_be32(datain.offset);
2520 2656
2521 iov = &cmd->iov_data[0]; 2657 iov = &cmd->iov_data[0];
2522 iov[iov_count].iov_base = cmd->pdu; 2658 iov[iov_count].iov_base = cmd->pdu;
@@ -2527,7 +2663,7 @@ static int iscsit_send_data_in(
2527 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 2663 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
2528 2664
2529 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 2665 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
2530 (unsigned char *)hdr, ISCSI_HDR_LEN, 2666 (unsigned char *)cmd->pdu, ISCSI_HDR_LEN,
2531 0, NULL, (u8 *)header_digest); 2667 0, NULL, (u8 *)header_digest);
2532 2668
2533 iov[0].iov_len += ISCSI_CRC_LEN; 2669 iov[0].iov_len += ISCSI_CRC_LEN;
@@ -2537,7 +2673,8 @@ static int iscsit_send_data_in(
2537 " for DataIN PDU 0x%08x\n", *header_digest); 2673 " for DataIN PDU 0x%08x\n", *header_digest);
2538 } 2674 }
2539 2675
2540 iov_ret = iscsit_map_iovec(cmd, &cmd->iov_data[1], datain.offset, datain.length); 2676 iov_ret = iscsit_map_iovec(cmd, &cmd->iov_data[1],
2677 datain.offset, datain.length);
2541 if (iov_ret < 0) 2678 if (iov_ret < 0)
2542 return -1; 2679 return -1;
2543 2680
@@ -2568,11 +2705,6 @@ static int iscsit_send_data_in(
2568 cmd->iov_data_count = iov_count; 2705 cmd->iov_data_count = iov_count;
2569 cmd->tx_size = tx_size; 2706 cmd->tx_size = tx_size;
2570 2707
2571 pr_debug("Built DataIN ITT: 0x%08x, StatSN: 0x%08x,"
2572 " DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n",
2573 cmd->init_task_tag, ntohl(hdr->statsn), ntohl(hdr->datasn),
2574 ntohl(hdr->offset), datain.length, conn->cid);
2575
2576 /* sendpage is preferred but can't insert markers */ 2708 /* sendpage is preferred but can't insert markers */
2577 if (!conn->conn_ops->IFMarker) 2709 if (!conn->conn_ops->IFMarker)
2578 ret = iscsit_fe_sendpage_sg(cmd, conn); 2710 ret = iscsit_fe_sendpage_sg(cmd, conn);
@@ -2595,16 +2727,13 @@ static int iscsit_send_data_in(
2595 return eodr; 2727 return eodr;
2596} 2728}
2597 2729
2598static int iscsit_send_logout_response( 2730int
2599 struct iscsi_cmd *cmd, 2731iscsit_build_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
2600 struct iscsi_conn *conn) 2732 struct iscsi_logout_rsp *hdr)
2601{ 2733{
2602 int niov = 0, tx_size;
2603 struct iscsi_conn *logout_conn = NULL; 2734 struct iscsi_conn *logout_conn = NULL;
2604 struct iscsi_conn_recovery *cr = NULL; 2735 struct iscsi_conn_recovery *cr = NULL;
2605 struct iscsi_session *sess = conn->sess; 2736 struct iscsi_session *sess = conn->sess;
2606 struct kvec *iov;
2607 struct iscsi_logout_rsp *hdr;
2608 /* 2737 /*
2609 * The actual shutting down of Sessions and/or Connections 2738 * The actual shutting down of Sessions and/or Connections
2610 * for CLOSESESSION and CLOSECONNECTION Logout Requests 2739 * for CLOSESESSION and CLOSECONNECTION Logout Requests
@@ -2673,9 +2802,6 @@ static int iscsit_send_logout_response(
2673 return -1; 2802 return -1;
2674 } 2803 }
2675 2804
2676 tx_size = ISCSI_HDR_LEN;
2677 hdr = (struct iscsi_logout_rsp *)cmd->pdu;
2678 memset(hdr, 0, ISCSI_HDR_LEN);
2679 hdr->opcode = ISCSI_OP_LOGOUT_RSP; 2805 hdr->opcode = ISCSI_OP_LOGOUT_RSP;
2680 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 2806 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
2681 hdr->response = cmd->logout_response; 2807 hdr->response = cmd->logout_response;
@@ -2687,6 +2813,27 @@ static int iscsit_send_logout_response(
2687 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 2813 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
2688 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 2814 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
2689 2815
2816 pr_debug("Built Logout Response ITT: 0x%08x StatSN:"
2817 " 0x%08x Response: 0x%02x CID: %hu on CID: %hu\n",
2818 cmd->init_task_tag, cmd->stat_sn, hdr->response,
2819 cmd->logout_cid, conn->cid);
2820
2821 return 0;
2822}
2823EXPORT_SYMBOL(iscsit_build_logout_rsp);
2824
2825static int
2826iscsit_send_logout(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2827{
2828 struct kvec *iov;
2829 int niov = 0, tx_size, rc;
2830
2831 rc = iscsit_build_logout_rsp(cmd, conn,
2832 (struct iscsi_logout_rsp *)&cmd->pdu[0]);
2833 if (rc < 0)
2834 return rc;
2835
2836 tx_size = ISCSI_HDR_LEN;
2690 iov = &cmd->iov_misc[0]; 2837 iov = &cmd->iov_misc[0];
2691 iov[niov].iov_base = cmd->pdu; 2838 iov[niov].iov_base = cmd->pdu;
2692 iov[niov++].iov_len = ISCSI_HDR_LEN; 2839 iov[niov++].iov_len = ISCSI_HDR_LEN;
@@ -2695,7 +2842,7 @@ static int iscsit_send_logout_response(
2695 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 2842 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
2696 2843
2697 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 2844 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
2698 (unsigned char *)hdr, ISCSI_HDR_LEN, 2845 (unsigned char *)&cmd->pdu[0], ISCSI_HDR_LEN,
2699 0, NULL, (u8 *)header_digest); 2846 0, NULL, (u8 *)header_digest);
2700 2847
2701 iov[0].iov_len += ISCSI_CRC_LEN; 2848 iov[0].iov_len += ISCSI_CRC_LEN;
@@ -2706,14 +2853,37 @@ static int iscsit_send_logout_response(
2706 cmd->iov_misc_count = niov; 2853 cmd->iov_misc_count = niov;
2707 cmd->tx_size = tx_size; 2854 cmd->tx_size = tx_size;
2708 2855
2709 pr_debug("Sending Logout Response ITT: 0x%08x StatSN:"
2710 " 0x%08x Response: 0x%02x CID: %hu on CID: %hu\n",
2711 cmd->init_task_tag, cmd->stat_sn, hdr->response,
2712 cmd->logout_cid, conn->cid);
2713
2714 return 0; 2856 return 0;
2715} 2857}
2716 2858
2859void
2860iscsit_build_nopin_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
2861 struct iscsi_nopin *hdr, bool nopout_response)
2862{
2863 hdr->opcode = ISCSI_OP_NOOP_IN;
2864 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
2865 hton24(hdr->dlength, cmd->buf_ptr_size);
2866 if (nopout_response)
2867 put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun);
2868 hdr->itt = cmd->init_task_tag;
2869 hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
2870 cmd->stat_sn = (nopout_response) ? conn->stat_sn++ :
2871 conn->stat_sn;
2872 hdr->statsn = cpu_to_be32(cmd->stat_sn);
2873
2874 if (nopout_response)
2875 iscsit_increment_maxcmdsn(cmd, conn->sess);
2876
2877 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
2878 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
2879
2880 pr_debug("Built NOPIN %s Response ITT: 0x%08x, TTT: 0x%08x,"
2881 " StatSN: 0x%08x, Length %u\n", (nopout_response) ?
2882 "Solicitied" : "Unsolicitied", cmd->init_task_tag,
2883 cmd->targ_xfer_tag, cmd->stat_sn, cmd->buf_ptr_size);
2884}
2885EXPORT_SYMBOL(iscsit_build_nopin_rsp);
2886
2717/* 2887/*
2718 * Unsolicited NOPIN, either requesting a response or not. 2888 * Unsolicited NOPIN, either requesting a response or not.
2719 */ 2889 */
@@ -2722,20 +2892,10 @@ static int iscsit_send_unsolicited_nopin(
2722 struct iscsi_conn *conn, 2892 struct iscsi_conn *conn,
2723 int want_response) 2893 int want_response)
2724{ 2894{
2725 int tx_size = ISCSI_HDR_LEN; 2895 struct iscsi_nopin *hdr = (struct iscsi_nopin *)&cmd->pdu[0];
2726 struct iscsi_nopin *hdr; 2896 int tx_size = ISCSI_HDR_LEN, ret;
2727 int ret;
2728 2897
2729 hdr = (struct iscsi_nopin *) cmd->pdu; 2898 iscsit_build_nopin_rsp(cmd, conn, hdr, false);
2730 memset(hdr, 0, ISCSI_HDR_LEN);
2731 hdr->opcode = ISCSI_OP_NOOP_IN;
2732 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
2733 hdr->itt = cmd->init_task_tag;
2734 hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
2735 cmd->stat_sn = conn->stat_sn;
2736 hdr->statsn = cpu_to_be32(cmd->stat_sn);
2737 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
2738 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
2739 2899
2740 if (conn->conn_ops->HeaderDigest) { 2900 if (conn->conn_ops->HeaderDigest) {
2741 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 2901 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
@@ -2771,31 +2931,17 @@ static int iscsit_send_unsolicited_nopin(
2771 return 0; 2931 return 0;
2772} 2932}
2773 2933
2774static int iscsit_send_nopin_response( 2934static int
2775 struct iscsi_cmd *cmd, 2935iscsit_send_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2776 struct iscsi_conn *conn)
2777{ 2936{
2778 int niov = 0, tx_size; 2937 struct iscsi_nopin *hdr = (struct iscsi_nopin *)&cmd->pdu[0];
2779 u32 padding = 0;
2780 struct kvec *iov; 2938 struct kvec *iov;
2781 struct iscsi_nopin *hdr; 2939 u32 padding = 0;
2782 2940 int niov = 0, tx_size;
2783 tx_size = ISCSI_HDR_LEN;
2784 hdr = (struct iscsi_nopin *) cmd->pdu;
2785 memset(hdr, 0, ISCSI_HDR_LEN);
2786 hdr->opcode = ISCSI_OP_NOOP_IN;
2787 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
2788 hton24(hdr->dlength, cmd->buf_ptr_size);
2789 put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun);
2790 hdr->itt = cmd->init_task_tag;
2791 hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
2792 cmd->stat_sn = conn->stat_sn++;
2793 hdr->statsn = cpu_to_be32(cmd->stat_sn);
2794 2941
2795 iscsit_increment_maxcmdsn(cmd, conn->sess); 2942 iscsit_build_nopin_rsp(cmd, conn, hdr, true);
2796 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
2797 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
2798 2943
2944 tx_size = ISCSI_HDR_LEN;
2799 iov = &cmd->iov_misc[0]; 2945 iov = &cmd->iov_misc[0];
2800 iov[niov].iov_base = cmd->pdu; 2946 iov[niov].iov_base = cmd->pdu;
2801 iov[niov++].iov_len = ISCSI_HDR_LEN; 2947 iov[niov++].iov_len = ISCSI_HDR_LEN;
@@ -2851,10 +2997,6 @@ static int iscsit_send_nopin_response(
2851 cmd->iov_misc_count = niov; 2997 cmd->iov_misc_count = niov;
2852 cmd->tx_size = tx_size; 2998 cmd->tx_size = tx_size;
2853 2999
2854 pr_debug("Sending NOPIN Response ITT: 0x%08x, TTT:"
2855 " 0x%08x, StatSN: 0x%08x, Length %u\n", cmd->init_task_tag,
2856 cmd->targ_xfer_tag, cmd->stat_sn, cmd->buf_ptr_size);
2857
2858 return 0; 3000 return 0;
2859} 3001}
2860 3002
@@ -2939,8 +3081,8 @@ static int iscsit_send_r2t(
2939 * connection recovery. 3081 * connection recovery.
2940 */ 3082 */
2941int iscsit_build_r2ts_for_cmd( 3083int iscsit_build_r2ts_for_cmd(
2942 struct iscsi_cmd *cmd,
2943 struct iscsi_conn *conn, 3084 struct iscsi_conn *conn,
3085 struct iscsi_cmd *cmd,
2944 bool recovery) 3086 bool recovery)
2945{ 3087{
2946 int first_r2t = 1; 3088 int first_r2t = 1;
@@ -3015,24 +3157,16 @@ int iscsit_build_r2ts_for_cmd(
3015 return 0; 3157 return 0;
3016} 3158}
3017 3159
3018static int iscsit_send_status( 3160void iscsit_build_rsp_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
3019 struct iscsi_cmd *cmd, 3161 bool inc_stat_sn, struct iscsi_scsi_rsp *hdr)
3020 struct iscsi_conn *conn)
3021{ 3162{
3022 u8 iov_count = 0, recovery; 3163 if (inc_stat_sn)
3023 u32 padding = 0, tx_size = 0;
3024 struct iscsi_scsi_rsp *hdr;
3025 struct kvec *iov;
3026
3027 recovery = (cmd->i_state != ISTATE_SEND_STATUS);
3028 if (!recovery)
3029 cmd->stat_sn = conn->stat_sn++; 3164 cmd->stat_sn = conn->stat_sn++;
3030 3165
3031 spin_lock_bh(&conn->sess->session_stats_lock); 3166 spin_lock_bh(&conn->sess->session_stats_lock);
3032 conn->sess->rsp_pdus++; 3167 conn->sess->rsp_pdus++;
3033 spin_unlock_bh(&conn->sess->session_stats_lock); 3168 spin_unlock_bh(&conn->sess->session_stats_lock);
3034 3169
3035 hdr = (struct iscsi_scsi_rsp *) cmd->pdu;
3036 memset(hdr, 0, ISCSI_HDR_LEN); 3170 memset(hdr, 0, ISCSI_HDR_LEN);
3037 hdr->opcode = ISCSI_OP_SCSI_CMD_RSP; 3171 hdr->opcode = ISCSI_OP_SCSI_CMD_RSP;
3038 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 3172 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
@@ -3052,6 +3186,23 @@ static int iscsit_send_status(
3052 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 3186 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
3053 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 3187 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
3054 3188
3189 pr_debug("Built SCSI Response, ITT: 0x%08x, StatSN: 0x%08x,"
3190 " Response: 0x%02x, SAM Status: 0x%02x, CID: %hu\n",
3191 cmd->init_task_tag, cmd->stat_sn, cmd->se_cmd.scsi_status,
3192 cmd->se_cmd.scsi_status, conn->cid);
3193}
3194EXPORT_SYMBOL(iscsit_build_rsp_pdu);
3195
3196static int iscsit_send_response(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
3197{
3198 struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)&cmd->pdu[0];
3199 struct kvec *iov;
3200 u32 padding = 0, tx_size = 0;
3201 int iov_count = 0;
3202 bool inc_stat_sn = (cmd->i_state == ISTATE_SEND_STATUS);
3203
3204 iscsit_build_rsp_pdu(cmd, conn, inc_stat_sn, hdr);
3205
3055 iov = &cmd->iov_misc[0]; 3206 iov = &cmd->iov_misc[0];
3056 iov[iov_count].iov_base = cmd->pdu; 3207 iov[iov_count].iov_base = cmd->pdu;
3057 iov[iov_count++].iov_len = ISCSI_HDR_LEN; 3208 iov[iov_count++].iov_len = ISCSI_HDR_LEN;
@@ -3106,7 +3257,7 @@ static int iscsit_send_status(
3106 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 3257 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
3107 3258
3108 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 3259 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
3109 (unsigned char *)hdr, ISCSI_HDR_LEN, 3260 (unsigned char *)cmd->pdu, ISCSI_HDR_LEN,
3110 0, NULL, (u8 *)header_digest); 3261 0, NULL, (u8 *)header_digest);
3111 3262
3112 iov[0].iov_len += ISCSI_CRC_LEN; 3263 iov[0].iov_len += ISCSI_CRC_LEN;
@@ -3118,11 +3269,6 @@ static int iscsit_send_status(
3118 cmd->iov_misc_count = iov_count; 3269 cmd->iov_misc_count = iov_count;
3119 cmd->tx_size = tx_size; 3270 cmd->tx_size = tx_size;
3120 3271
3121 pr_debug("Built %sSCSI Response, ITT: 0x%08x, StatSN: 0x%08x,"
3122 " Response: 0x%02x, SAM Status: 0x%02x, CID: %hu\n",
3123 (!recovery) ? "" : "Recovery ", cmd->init_task_tag,
3124 cmd->stat_sn, 0x00, cmd->se_cmd.scsi_status, conn->cid);
3125
3126 return 0; 3272 return 0;
3127} 3273}
3128 3274
@@ -3145,16 +3291,12 @@ static u8 iscsit_convert_tcm_tmr_rsp(struct se_tmr_req *se_tmr)
3145 } 3291 }
3146} 3292}
3147 3293
3148static int iscsit_send_task_mgt_rsp( 3294void
3149 struct iscsi_cmd *cmd, 3295iscsit_build_task_mgt_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
3150 struct iscsi_conn *conn) 3296 struct iscsi_tm_rsp *hdr)
3151{ 3297{
3152 struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req; 3298 struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
3153 struct iscsi_tm_rsp *hdr;
3154 u32 tx_size = 0;
3155 3299
3156 hdr = (struct iscsi_tm_rsp *) cmd->pdu;
3157 memset(hdr, 0, ISCSI_HDR_LEN);
3158 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP; 3300 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
3159 hdr->flags = ISCSI_FLAG_CMD_FINAL; 3301 hdr->flags = ISCSI_FLAG_CMD_FINAL;
3160 hdr->response = iscsit_convert_tcm_tmr_rsp(se_tmr); 3302 hdr->response = iscsit_convert_tcm_tmr_rsp(se_tmr);
@@ -3166,6 +3308,20 @@ static int iscsit_send_task_mgt_rsp(
3166 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 3308 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
3167 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 3309 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
3168 3310
3311 pr_debug("Built Task Management Response ITT: 0x%08x,"
3312 " StatSN: 0x%08x, Response: 0x%02x, CID: %hu\n",
3313 cmd->init_task_tag, cmd->stat_sn, hdr->response, conn->cid);
3314}
3315EXPORT_SYMBOL(iscsit_build_task_mgt_rsp);
3316
3317static int
3318iscsit_send_task_mgt_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
3319{
3320 struct iscsi_tm_rsp *hdr = (struct iscsi_tm_rsp *)&cmd->pdu[0];
3321 u32 tx_size = 0;
3322
3323 iscsit_build_task_mgt_rsp(cmd, conn, hdr);
3324
3169 cmd->iov_misc[0].iov_base = cmd->pdu; 3325 cmd->iov_misc[0].iov_base = cmd->pdu;
3170 cmd->iov_misc[0].iov_len = ISCSI_HDR_LEN; 3326 cmd->iov_misc[0].iov_len = ISCSI_HDR_LEN;
3171 tx_size += ISCSI_HDR_LEN; 3327 tx_size += ISCSI_HDR_LEN;
@@ -3186,10 +3342,6 @@ static int iscsit_send_task_mgt_rsp(
3186 cmd->iov_misc_count = 1; 3342 cmd->iov_misc_count = 1;
3187 cmd->tx_size = tx_size; 3343 cmd->tx_size = tx_size;
3188 3344
3189 pr_debug("Built Task Management Response ITT: 0x%08x,"
3190 " StatSN: 0x%08x, Response: 0x%02x, CID: %hu\n",
3191 cmd->init_task_tag, cmd->stat_sn, hdr->response, conn->cid);
3192
3193 return 0; 3345 return 0;
3194} 3346}
3195 3347
@@ -3385,6 +3537,22 @@ static int iscsit_send_text_rsp(
3385 return 0; 3537 return 0;
3386} 3538}
3387 3539
3540void
3541iscsit_build_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
3542 struct iscsi_reject *hdr)
3543{
3544 hdr->opcode = ISCSI_OP_REJECT;
3545 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
3546 hton24(hdr->dlength, ISCSI_HDR_LEN);
3547 hdr->ffffffff = cpu_to_be32(0xffffffff);
3548 cmd->stat_sn = conn->stat_sn++;
3549 hdr->statsn = cpu_to_be32(cmd->stat_sn);
3550 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
3551 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
3552
3553}
3554EXPORT_SYMBOL(iscsit_build_reject);
3555
3388static int iscsit_send_reject( 3556static int iscsit_send_reject(
3389 struct iscsi_cmd *cmd, 3557 struct iscsi_cmd *cmd,
3390 struct iscsi_conn *conn) 3558 struct iscsi_conn *conn)
@@ -3393,18 +3561,9 @@ static int iscsit_send_reject(
3393 struct iscsi_reject *hdr; 3561 struct iscsi_reject *hdr;
3394 struct kvec *iov; 3562 struct kvec *iov;
3395 3563
3396 hdr = (struct iscsi_reject *) cmd->pdu; 3564 iscsit_build_reject(cmd, conn, (struct iscsi_reject *)&cmd->pdu[0]);
3397 hdr->opcode = ISCSI_OP_REJECT;
3398 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
3399 hton24(hdr->dlength, ISCSI_HDR_LEN);
3400 hdr->ffffffff = cpu_to_be32(0xffffffff);
3401 cmd->stat_sn = conn->stat_sn++;
3402 hdr->statsn = cpu_to_be32(cmd->stat_sn);
3403 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
3404 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
3405 3565
3406 iov = &cmd->iov_misc[0]; 3566 iov = &cmd->iov_misc[0];
3407
3408 iov[iov_count].iov_base = cmd->pdu; 3567 iov[iov_count].iov_base = cmd->pdu;
3409 iov[iov_count++].iov_len = ISCSI_HDR_LEN; 3568 iov[iov_count++].iov_len = ISCSI_HDR_LEN;
3410 iov[iov_count].iov_base = cmd->buf_ptr; 3569 iov[iov_count].iov_base = cmd->buf_ptr;
@@ -3501,55 +3660,41 @@ static inline void iscsit_thread_check_cpumask(
3501 set_cpus_allowed_ptr(p, conn->conn_cpumask); 3660 set_cpus_allowed_ptr(p, conn->conn_cpumask);
3502} 3661}
3503 3662
3504static int handle_immediate_queue(struct iscsi_conn *conn) 3663static int
3664iscsit_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
3505{ 3665{
3506 struct iscsi_queue_req *qr;
3507 struct iscsi_cmd *cmd;
3508 u8 state;
3509 int ret; 3666 int ret;
3510 3667
3511 while ((qr = iscsit_get_cmd_from_immediate_queue(conn))) { 3668 switch (state) {
3512 atomic_set(&conn->check_immediate_queue, 0); 3669 case ISTATE_SEND_R2T:
3513 cmd = qr->cmd; 3670 ret = iscsit_send_r2t(cmd, conn);
3514 state = qr->state; 3671 if (ret < 0)
3515 kmem_cache_free(lio_qr_cache, qr); 3672 goto err;
3516 3673 break;
3517 switch (state) { 3674 case ISTATE_REMOVE:
3518 case ISTATE_SEND_R2T: 3675 spin_lock_bh(&conn->cmd_lock);
3519 ret = iscsit_send_r2t(cmd, conn); 3676 list_del(&cmd->i_conn_node);
3520 if (ret < 0) 3677 spin_unlock_bh(&conn->cmd_lock);
3521 goto err;
3522 break;
3523 case ISTATE_REMOVE:
3524 if (cmd->data_direction == DMA_TO_DEVICE)
3525 iscsit_stop_dataout_timer(cmd);
3526
3527 spin_lock_bh(&conn->cmd_lock);
3528 list_del(&cmd->i_conn_node);
3529 spin_unlock_bh(&conn->cmd_lock);
3530 3678
3531 iscsit_free_cmd(cmd); 3679 iscsit_free_cmd(cmd);
3532 continue; 3680 break;
3533 case ISTATE_SEND_NOPIN_WANT_RESPONSE: 3681 case ISTATE_SEND_NOPIN_WANT_RESPONSE:
3534 iscsit_mod_nopin_response_timer(conn); 3682 iscsit_mod_nopin_response_timer(conn);
3535 ret = iscsit_send_unsolicited_nopin(cmd, 3683 ret = iscsit_send_unsolicited_nopin(cmd, conn, 1);
3536 conn, 1); 3684 if (ret < 0)
3537 if (ret < 0)
3538 goto err;
3539 break;
3540 case ISTATE_SEND_NOPIN_NO_RESPONSE:
3541 ret = iscsit_send_unsolicited_nopin(cmd,
3542 conn, 0);
3543 if (ret < 0)
3544 goto err;
3545 break;
3546 default:
3547 pr_err("Unknown Opcode: 0x%02x ITT:"
3548 " 0x%08x, i_state: %d on CID: %hu\n",
3549 cmd->iscsi_opcode, cmd->init_task_tag, state,
3550 conn->cid);
3551 goto err; 3685 goto err;
3552 } 3686 break;
3687 case ISTATE_SEND_NOPIN_NO_RESPONSE:
3688 ret = iscsit_send_unsolicited_nopin(cmd, conn, 0);
3689 if (ret < 0)
3690 goto err;
3691 break;
3692 default:
3693 pr_err("Unknown Opcode: 0x%02x ITT:"
3694 " 0x%08x, i_state: %d on CID: %hu\n",
3695 cmd->iscsi_opcode, cmd->init_task_tag, state,
3696 conn->cid);
3697 goto err;
3553 } 3698 }
3554 3699
3555 return 0; 3700 return 0;
@@ -3558,128 +3703,143 @@ err:
3558 return -1; 3703 return -1;
3559} 3704}
3560 3705
3561static int handle_response_queue(struct iscsi_conn *conn) 3706static int
3707iscsit_handle_immediate_queue(struct iscsi_conn *conn)
3562{ 3708{
3709 struct iscsit_transport *t = conn->conn_transport;
3563 struct iscsi_queue_req *qr; 3710 struct iscsi_queue_req *qr;
3564 struct iscsi_cmd *cmd; 3711 struct iscsi_cmd *cmd;
3565 u8 state; 3712 u8 state;
3566 int ret; 3713 int ret;
3567 3714
3568 while ((qr = iscsit_get_cmd_from_response_queue(conn))) { 3715 while ((qr = iscsit_get_cmd_from_immediate_queue(conn))) {
3716 atomic_set(&conn->check_immediate_queue, 0);
3569 cmd = qr->cmd; 3717 cmd = qr->cmd;
3570 state = qr->state; 3718 state = qr->state;
3571 kmem_cache_free(lio_qr_cache, qr); 3719 kmem_cache_free(lio_qr_cache, qr);
3572 3720
3573check_rsp_state: 3721 ret = t->iscsit_immediate_queue(conn, cmd, state);
3574 switch (state) { 3722 if (ret < 0)
3575 case ISTATE_SEND_DATAIN: 3723 return ret;
3576 ret = iscsit_send_data_in(cmd, conn); 3724 }
3577 if (ret < 0)
3578 goto err;
3579 else if (!ret)
3580 /* more drs */
3581 goto check_rsp_state;
3582 else if (ret == 1) {
3583 /* all done */
3584 spin_lock_bh(&cmd->istate_lock);
3585 cmd->i_state = ISTATE_SENT_STATUS;
3586 spin_unlock_bh(&cmd->istate_lock);
3587
3588 if (atomic_read(&conn->check_immediate_queue))
3589 return 1;
3590 3725
3591 continue; 3726 return 0;
3592 } else if (ret == 2) { 3727}
3593 /* Still must send status,
3594 SCF_TRANSPORT_TASK_SENSE was set */
3595 spin_lock_bh(&cmd->istate_lock);
3596 cmd->i_state = ISTATE_SEND_STATUS;
3597 spin_unlock_bh(&cmd->istate_lock);
3598 state = ISTATE_SEND_STATUS;
3599 goto check_rsp_state;
3600 }
3601 3728
3602 break; 3729static int
3603 case ISTATE_SEND_STATUS: 3730iscsit_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
3604 case ISTATE_SEND_STATUS_RECOVERY: 3731{
3605 ret = iscsit_send_status(cmd, conn); 3732 int ret;
3606 break; 3733
3607 case ISTATE_SEND_LOGOUTRSP: 3734check_rsp_state:
3608 ret = iscsit_send_logout_response(cmd, conn); 3735 switch (state) {
3609 break; 3736 case ISTATE_SEND_DATAIN:
3610 case ISTATE_SEND_ASYNCMSG: 3737 ret = iscsit_send_datain(cmd, conn);
3611 ret = iscsit_send_conn_drop_async_message(
3612 cmd, conn);
3613 break;
3614 case ISTATE_SEND_NOPIN:
3615 ret = iscsit_send_nopin_response(cmd, conn);
3616 break;
3617 case ISTATE_SEND_REJECT:
3618 ret = iscsit_send_reject(cmd, conn);
3619 break;
3620 case ISTATE_SEND_TASKMGTRSP:
3621 ret = iscsit_send_task_mgt_rsp(cmd, conn);
3622 if (ret != 0)
3623 break;
3624 ret = iscsit_tmr_post_handler(cmd, conn);
3625 if (ret != 0)
3626 iscsit_fall_back_to_erl0(conn->sess);
3627 break;
3628 case ISTATE_SEND_TEXTRSP:
3629 ret = iscsit_send_text_rsp(cmd, conn);
3630 break;
3631 default:
3632 pr_err("Unknown Opcode: 0x%02x ITT:"
3633 " 0x%08x, i_state: %d on CID: %hu\n",
3634 cmd->iscsi_opcode, cmd->init_task_tag,
3635 state, conn->cid);
3636 goto err;
3637 }
3638 if (ret < 0) 3738 if (ret < 0)
3639 goto err; 3739 goto err;
3740 else if (!ret)
3741 /* more drs */
3742 goto check_rsp_state;
3743 else if (ret == 1) {
3744 /* all done */
3745 spin_lock_bh(&cmd->istate_lock);
3746 cmd->i_state = ISTATE_SENT_STATUS;
3747 spin_unlock_bh(&cmd->istate_lock);
3640 3748
3641 if (iscsit_send_tx_data(cmd, conn, 1) < 0) { 3749 if (atomic_read(&conn->check_immediate_queue))
3642 iscsit_tx_thread_wait_for_tcp(conn); 3750 return 1;
3643 iscsit_unmap_iovec(cmd);
3644 goto err;
3645 }
3646 iscsit_unmap_iovec(cmd);
3647 3751
3648 switch (state) { 3752 return 0;
3649 case ISTATE_SEND_LOGOUTRSP: 3753 } else if (ret == 2) {
3650 if (!iscsit_logout_post_handler(cmd, conn)) 3754 /* Still must send status,
3651 goto restart; 3755 SCF_TRANSPORT_TASK_SENSE was set */
3652 /* fall through */
3653 case ISTATE_SEND_STATUS:
3654 case ISTATE_SEND_ASYNCMSG:
3655 case ISTATE_SEND_NOPIN:
3656 case ISTATE_SEND_STATUS_RECOVERY:
3657 case ISTATE_SEND_TEXTRSP:
3658 case ISTATE_SEND_TASKMGTRSP:
3659 spin_lock_bh(&cmd->istate_lock); 3756 spin_lock_bh(&cmd->istate_lock);
3660 cmd->i_state = ISTATE_SENT_STATUS; 3757 cmd->i_state = ISTATE_SEND_STATUS;
3661 spin_unlock_bh(&cmd->istate_lock); 3758 spin_unlock_bh(&cmd->istate_lock);
3759 state = ISTATE_SEND_STATUS;
3760 goto check_rsp_state;
3761 }
3762
3763 break;
3764 case ISTATE_SEND_STATUS:
3765 case ISTATE_SEND_STATUS_RECOVERY:
3766 ret = iscsit_send_response(cmd, conn);
3767 break;
3768 case ISTATE_SEND_LOGOUTRSP:
3769 ret = iscsit_send_logout(cmd, conn);
3770 break;
3771 case ISTATE_SEND_ASYNCMSG:
3772 ret = iscsit_send_conn_drop_async_message(
3773 cmd, conn);
3774 break;
3775 case ISTATE_SEND_NOPIN:
3776 ret = iscsit_send_nopin(cmd, conn);
3777 break;
3778 case ISTATE_SEND_REJECT:
3779 ret = iscsit_send_reject(cmd, conn);
3780 break;
3781 case ISTATE_SEND_TASKMGTRSP:
3782 ret = iscsit_send_task_mgt_rsp(cmd, conn);
3783 if (ret != 0)
3662 break; 3784 break;
3663 case ISTATE_SEND_REJECT: 3785 ret = iscsit_tmr_post_handler(cmd, conn);
3664 if (cmd->cmd_flags & ICF_REJECT_FAIL_CONN) { 3786 if (ret != 0)
3665 cmd->cmd_flags &= ~ICF_REJECT_FAIL_CONN; 3787 iscsit_fall_back_to_erl0(conn->sess);
3666 complete(&cmd->reject_comp); 3788 break;
3667 goto err; 3789 case ISTATE_SEND_TEXTRSP:
3668 } 3790 ret = iscsit_send_text_rsp(cmd, conn);
3791 break;
3792 default:
3793 pr_err("Unknown Opcode: 0x%02x ITT:"
3794 " 0x%08x, i_state: %d on CID: %hu\n",
3795 cmd->iscsi_opcode, cmd->init_task_tag,
3796 state, conn->cid);
3797 goto err;
3798 }
3799 if (ret < 0)
3800 goto err;
3801
3802 if (iscsit_send_tx_data(cmd, conn, 1) < 0) {
3803 iscsit_tx_thread_wait_for_tcp(conn);
3804 iscsit_unmap_iovec(cmd);
3805 goto err;
3806 }
3807 iscsit_unmap_iovec(cmd);
3808
3809 switch (state) {
3810 case ISTATE_SEND_LOGOUTRSP:
3811 if (!iscsit_logout_post_handler(cmd, conn))
3812 goto restart;
3813 /* fall through */
3814 case ISTATE_SEND_STATUS:
3815 case ISTATE_SEND_ASYNCMSG:
3816 case ISTATE_SEND_NOPIN:
3817 case ISTATE_SEND_STATUS_RECOVERY:
3818 case ISTATE_SEND_TEXTRSP:
3819 case ISTATE_SEND_TASKMGTRSP:
3820 spin_lock_bh(&cmd->istate_lock);
3821 cmd->i_state = ISTATE_SENT_STATUS;
3822 spin_unlock_bh(&cmd->istate_lock);
3823 break;
3824 case ISTATE_SEND_REJECT:
3825 if (cmd->cmd_flags & ICF_REJECT_FAIL_CONN) {
3826 cmd->cmd_flags &= ~ICF_REJECT_FAIL_CONN;
3669 complete(&cmd->reject_comp); 3827 complete(&cmd->reject_comp);
3670 break;
3671 default:
3672 pr_err("Unknown Opcode: 0x%02x ITT:"
3673 " 0x%08x, i_state: %d on CID: %hu\n",
3674 cmd->iscsi_opcode, cmd->init_task_tag,
3675 cmd->i_state, conn->cid);
3676 goto err; 3828 goto err;
3677 } 3829 }
3678 3830 complete(&cmd->reject_comp);
3679 if (atomic_read(&conn->check_immediate_queue)) 3831 break;
3680 return 1; 3832 default:
3833 pr_err("Unknown Opcode: 0x%02x ITT:"
3834 " 0x%08x, i_state: %d on CID: %hu\n",
3835 cmd->iscsi_opcode, cmd->init_task_tag,
3836 cmd->i_state, conn->cid);
3837 goto err;
3681 } 3838 }
3682 3839
3840 if (atomic_read(&conn->check_immediate_queue))
3841 return 1;
3842
3683 return 0; 3843 return 0;
3684 3844
3685err: 3845err:
@@ -3688,6 +3848,27 @@ restart:
3688 return -EAGAIN; 3848 return -EAGAIN;
3689} 3849}
3690 3850
3851static int iscsit_handle_response_queue(struct iscsi_conn *conn)
3852{
3853 struct iscsit_transport *t = conn->conn_transport;
3854 struct iscsi_queue_req *qr;
3855 struct iscsi_cmd *cmd;
3856 u8 state;
3857 int ret;
3858
3859 while ((qr = iscsit_get_cmd_from_response_queue(conn))) {
3860 cmd = qr->cmd;
3861 state = qr->state;
3862 kmem_cache_free(lio_qr_cache, qr);
3863
3864 ret = t->iscsit_response_queue(conn, cmd, state);
3865 if (ret == 1 || ret < 0)
3866 return ret;
3867 }
3868
3869 return 0;
3870}
3871
3691int iscsi_target_tx_thread(void *arg) 3872int iscsi_target_tx_thread(void *arg)
3692{ 3873{
3693 int ret = 0; 3874 int ret = 0;
@@ -3722,11 +3903,11 @@ restart:
3722 goto transport_err; 3903 goto transport_err;
3723 3904
3724get_immediate: 3905get_immediate:
3725 ret = handle_immediate_queue(conn); 3906 ret = iscsit_handle_immediate_queue(conn);
3726 if (ret < 0) 3907 if (ret < 0)
3727 goto transport_err; 3908 goto transport_err;
3728 3909
3729 ret = handle_response_queue(conn); 3910 ret = iscsit_handle_response_queue(conn);
3730 if (ret == 1) 3911 if (ret == 1)
3731 goto get_immediate; 3912 goto get_immediate;
3732 else if (ret == -EAGAIN) 3913 else if (ret == -EAGAIN)
@@ -3742,6 +3923,83 @@ out:
3742 return 0; 3923 return 0;
3743} 3924}
3744 3925
3926static int iscsi_target_rx_opcode(struct iscsi_conn *conn, unsigned char *buf)
3927{
3928 struct iscsi_hdr *hdr = (struct iscsi_hdr *)buf;
3929 struct iscsi_cmd *cmd;
3930 int ret = 0;
3931
3932 switch (hdr->opcode & ISCSI_OPCODE_MASK) {
3933 case ISCSI_OP_SCSI_CMD:
3934 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
3935 if (!cmd)
3936 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
3937 1, buf, conn);
3938
3939 ret = iscsit_handle_scsi_cmd(conn, cmd, buf);
3940 break;
3941 case ISCSI_OP_SCSI_DATA_OUT:
3942 ret = iscsit_handle_data_out(conn, buf);
3943 break;
3944 case ISCSI_OP_NOOP_OUT:
3945 cmd = NULL;
3946 if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
3947 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
3948 if (!cmd)
3949 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
3950 1, buf, conn);
3951 }
3952 ret = iscsit_handle_nop_out(conn, cmd, buf);
3953 break;
3954 case ISCSI_OP_SCSI_TMFUNC:
3955 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
3956 if (!cmd)
3957 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
3958 1, buf, conn);
3959
3960 ret = iscsit_handle_task_mgt_cmd(conn, cmd, buf);
3961 break;
3962 case ISCSI_OP_TEXT:
3963 ret = iscsit_handle_text_cmd(conn, buf);
3964 break;
3965 case ISCSI_OP_LOGOUT:
3966 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
3967 if (!cmd)
3968 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
3969 1, buf, conn);
3970
3971 ret = iscsit_handle_logout_cmd(conn, cmd, buf);
3972 if (ret > 0)
3973 wait_for_completion_timeout(&conn->conn_logout_comp,
3974 SECONDS_FOR_LOGOUT_COMP * HZ);
3975 break;
3976 case ISCSI_OP_SNACK:
3977 ret = iscsit_handle_snack(conn, buf);
3978 break;
3979 default:
3980 pr_err("Got unknown iSCSI OpCode: 0x%02x\n", hdr->opcode);
3981 if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
3982 pr_err("Cannot recover from unknown"
3983 " opcode while ERL=0, closing iSCSI connection.\n");
3984 return -1;
3985 }
3986 if (!conn->conn_ops->OFMarker) {
3987 pr_err("Unable to recover from unknown"
3988 " opcode while OFMarker=No, closing iSCSI"
3989 " connection.\n");
3990 return -1;
3991 }
3992 if (iscsit_recover_from_unknown_opcode(conn) < 0) {
3993 pr_err("Unable to recover from unknown"
3994 " opcode, closing iSCSI connection.\n");
3995 return -1;
3996 }
3997 break;
3998 }
3999
4000 return ret;
4001}
4002
3745int iscsi_target_rx_thread(void *arg) 4003int iscsi_target_rx_thread(void *arg)
3746{ 4004{
3747 int ret; 4005 int ret;
@@ -3761,6 +4019,18 @@ restart:
3761 if (!conn) 4019 if (!conn)
3762 goto out; 4020 goto out;
3763 4021
4022 if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) {
4023 struct completion comp;
4024 int rc;
4025
4026 init_completion(&comp);
4027 rc = wait_for_completion_interruptible(&comp);
4028 if (rc < 0)
4029 goto transport_err;
4030
4031 goto out;
4032 }
4033
3764 while (!kthread_should_stop()) { 4034 while (!kthread_should_stop()) {
3765 /* 4035 /*
3766 * Ensure that both TX and RX per connection kthreads 4036 * Ensure that both TX and RX per connection kthreads
@@ -3832,62 +4102,9 @@ restart:
3832 goto transport_err; 4102 goto transport_err;
3833 } 4103 }
3834 4104
3835 switch (opcode) { 4105 ret = iscsi_target_rx_opcode(conn, buffer);
3836 case ISCSI_OP_SCSI_CMD: 4106 if (ret < 0)
3837 if (iscsit_handle_scsi_cmd(conn, buffer) < 0) 4107 goto transport_err;
3838 goto transport_err;
3839 break;
3840 case ISCSI_OP_SCSI_DATA_OUT:
3841 if (iscsit_handle_data_out(conn, buffer) < 0)
3842 goto transport_err;
3843 break;
3844 case ISCSI_OP_NOOP_OUT:
3845 if (iscsit_handle_nop_out(conn, buffer) < 0)
3846 goto transport_err;
3847 break;
3848 case ISCSI_OP_SCSI_TMFUNC:
3849 if (iscsit_handle_task_mgt_cmd(conn, buffer) < 0)
3850 goto transport_err;
3851 break;
3852 case ISCSI_OP_TEXT:
3853 if (iscsit_handle_text_cmd(conn, buffer) < 0)
3854 goto transport_err;
3855 break;
3856 case ISCSI_OP_LOGOUT:
3857 ret = iscsit_handle_logout_cmd(conn, buffer);
3858 if (ret > 0) {
3859 wait_for_completion_timeout(&conn->conn_logout_comp,
3860 SECONDS_FOR_LOGOUT_COMP * HZ);
3861 goto transport_err;
3862 } else if (ret < 0)
3863 goto transport_err;
3864 break;
3865 case ISCSI_OP_SNACK:
3866 if (iscsit_handle_snack(conn, buffer) < 0)
3867 goto transport_err;
3868 break;
3869 default:
3870 pr_err("Got unknown iSCSI OpCode: 0x%02x\n",
3871 opcode);
3872 if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
3873 pr_err("Cannot recover from unknown"
3874 " opcode while ERL=0, closing iSCSI connection"
3875 ".\n");
3876 goto transport_err;
3877 }
3878 if (!conn->conn_ops->OFMarker) {
3879 pr_err("Unable to recover from unknown"
3880 " opcode while OFMarker=No, closing iSCSI"
3881 " connection.\n");
3882 goto transport_err;
3883 }
3884 if (iscsit_recover_from_unknown_opcode(conn) < 0) {
3885 pr_err("Unable to recover from unknown"
3886 " opcode, closing iSCSI connection.\n");
3887 goto transport_err;
3888 }
3889 break;
3890 }
3891 } 4108 }
3892 4109
3893transport_err: 4110transport_err:
@@ -4053,6 +4270,12 @@ int iscsit_close_connection(
4053 4270
4054 if (conn->sock) 4271 if (conn->sock)
4055 sock_release(conn->sock); 4272 sock_release(conn->sock);
4273
4274 if (conn->conn_transport->iscsit_free_conn)
4275 conn->conn_transport->iscsit_free_conn(conn);
4276
4277 iscsit_put_transport(conn->conn_transport);
4278
4056 conn->thread_set = NULL; 4279 conn->thread_set = NULL;
4057 4280
4058 pr_debug("Moving to TARG_CONN_STATE_FREE.\n"); 4281 pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
@@ -4284,7 +4507,7 @@ static void iscsit_logout_post_handler_diffcid(
4284/* 4507/*
4285 * Return of 0 causes the TX thread to restart. 4508 * Return of 0 causes the TX thread to restart.
4286 */ 4509 */
4287static int iscsit_logout_post_handler( 4510int iscsit_logout_post_handler(
4288 struct iscsi_cmd *cmd, 4511 struct iscsi_cmd *cmd,
4289 struct iscsi_conn *conn) 4512 struct iscsi_conn *conn)
4290{ 4513{
@@ -4342,6 +4565,7 @@ static int iscsit_logout_post_handler(
4342 } 4565 }
4343 return ret; 4566 return ret;
4344} 4567}
4568EXPORT_SYMBOL(iscsit_logout_post_handler);
4345 4569
4346void iscsit_fail_session(struct iscsi_session *sess) 4570void iscsit_fail_session(struct iscsi_session *sess)
4347{ 4571{
diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h
index b1a1e6350707..a0050b2f294e 100644
--- a/drivers/target/iscsi/iscsi_target.h
+++ b/drivers/target/iscsi/iscsi_target.h
@@ -16,11 +16,12 @@ extern int iscsit_reset_np_thread(struct iscsi_np *, struct iscsi_tpg_np *,
16 struct iscsi_portal_group *); 16 struct iscsi_portal_group *);
17extern int iscsit_del_np(struct iscsi_np *); 17extern int iscsit_del_np(struct iscsi_np *);
18extern int iscsit_add_reject_from_cmd(u8, int, int, unsigned char *, struct iscsi_cmd *); 18extern int iscsit_add_reject_from_cmd(u8, int, int, unsigned char *, struct iscsi_cmd *);
19extern void iscsit_set_unsoliticed_dataout(struct iscsi_cmd *);
19extern int iscsit_logout_closesession(struct iscsi_cmd *, struct iscsi_conn *); 20extern int iscsit_logout_closesession(struct iscsi_cmd *, struct iscsi_conn *);
20extern int iscsit_logout_closeconnection(struct iscsi_cmd *, struct iscsi_conn *); 21extern int iscsit_logout_closeconnection(struct iscsi_cmd *, struct iscsi_conn *);
21extern int iscsit_logout_removeconnforrecovery(struct iscsi_cmd *, struct iscsi_conn *); 22extern int iscsit_logout_removeconnforrecovery(struct iscsi_cmd *, struct iscsi_conn *);
22extern int iscsit_send_async_msg(struct iscsi_conn *, u16, u8, u8); 23extern int iscsit_send_async_msg(struct iscsi_conn *, u16, u8, u8);
23extern int iscsit_build_r2ts_for_cmd(struct iscsi_cmd *, struct iscsi_conn *, bool recovery); 24extern int iscsit_build_r2ts_for_cmd(struct iscsi_conn *, struct iscsi_cmd *, bool recovery);
24extern void iscsit_thread_get_cpumask(struct iscsi_conn *); 25extern void iscsit_thread_get_cpumask(struct iscsi_conn *);
25extern int iscsi_target_tx_thread(void *); 26extern int iscsi_target_tx_thread(void *);
26extern int iscsi_target_rx_thread(void *); 27extern int iscsi_target_rx_thread(void *);
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index a0fc7b9eea65..cee17543278c 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -49,32 +49,6 @@ static void chap_binaryhex_to_asciihex(char *dst, char *src, int src_len)
49 } 49 }
50} 50}
51 51
52static void chap_set_random(char *data, int length)
53{
54 long r;
55 unsigned n;
56
57 while (length > 0) {
58 get_random_bytes(&r, sizeof(long));
59 r = r ^ (r >> 8);
60 r = r ^ (r >> 4);
61 n = r & 0x7;
62
63 get_random_bytes(&r, sizeof(long));
64 r = r ^ (r >> 8);
65 r = r ^ (r >> 5);
66 n = (n << 3) | (r & 0x7);
67
68 get_random_bytes(&r, sizeof(long));
69 r = r ^ (r >> 8);
70 r = r ^ (r >> 5);
71 n = (n << 2) | (r & 0x3);
72
73 *data++ = n;
74 length--;
75 }
76}
77
78static void chap_gen_challenge( 52static void chap_gen_challenge(
79 struct iscsi_conn *conn, 53 struct iscsi_conn *conn,
80 int caller, 54 int caller,
@@ -86,7 +60,7 @@ static void chap_gen_challenge(
86 60
87 memset(challenge_asciihex, 0, CHAP_CHALLENGE_LENGTH * 2 + 1); 61 memset(challenge_asciihex, 0, CHAP_CHALLENGE_LENGTH * 2 + 1);
88 62
89 chap_set_random(chap->challenge, CHAP_CHALLENGE_LENGTH); 63 get_random_bytes(chap->challenge, CHAP_CHALLENGE_LENGTH);
90 chap_binaryhex_to_asciihex(challenge_asciihex, chap->challenge, 64 chap_binaryhex_to_asciihex(challenge_asciihex, chap->challenge,
91 CHAP_CHALLENGE_LENGTH); 65 CHAP_CHALLENGE_LENGTH);
92 /* 66 /*
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 78d75c8567d0..13e9e715ad2e 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -27,6 +27,7 @@
27#include <target/target_core_fabric_configfs.h> 27#include <target/target_core_fabric_configfs.h>
28#include <target/target_core_configfs.h> 28#include <target/target_core_configfs.h>
29#include <target/configfs_macros.h> 29#include <target/configfs_macros.h>
30#include <target/iscsi/iscsi_transport.h>
30 31
31#include "iscsi_target_core.h" 32#include "iscsi_target_core.h"
32#include "iscsi_target_parameters.h" 33#include "iscsi_target_parameters.h"
@@ -124,8 +125,87 @@ out:
124 125
125TF_NP_BASE_ATTR(lio_target, sctp, S_IRUGO | S_IWUSR); 126TF_NP_BASE_ATTR(lio_target, sctp, S_IRUGO | S_IWUSR);
126 127
128static ssize_t lio_target_np_show_iser(
129 struct se_tpg_np *se_tpg_np,
130 char *page)
131{
132 struct iscsi_tpg_np *tpg_np = container_of(se_tpg_np,
133 struct iscsi_tpg_np, se_tpg_np);
134 struct iscsi_tpg_np *tpg_np_iser;
135 ssize_t rb;
136
137 tpg_np_iser = iscsit_tpg_locate_child_np(tpg_np, ISCSI_INFINIBAND);
138 if (tpg_np_iser)
139 rb = sprintf(page, "1\n");
140 else
141 rb = sprintf(page, "0\n");
142
143 return rb;
144}
145
146static ssize_t lio_target_np_store_iser(
147 struct se_tpg_np *se_tpg_np,
148 const char *page,
149 size_t count)
150{
151 struct iscsi_np *np;
152 struct iscsi_portal_group *tpg;
153 struct iscsi_tpg_np *tpg_np = container_of(se_tpg_np,
154 struct iscsi_tpg_np, se_tpg_np);
155 struct iscsi_tpg_np *tpg_np_iser = NULL;
156 char *endptr;
157 u32 op;
158 int rc;
159
160 op = simple_strtoul(page, &endptr, 0);
161 if ((op != 1) && (op != 0)) {
162 pr_err("Illegal value for tpg_enable: %u\n", op);
163 return -EINVAL;
164 }
165 np = tpg_np->tpg_np;
166 if (!np) {
167 pr_err("Unable to locate struct iscsi_np from"
168 " struct iscsi_tpg_np\n");
169 return -EINVAL;
170 }
171
172 tpg = tpg_np->tpg;
173 if (iscsit_get_tpg(tpg) < 0)
174 return -EINVAL;
175
176 if (op) {
177 int rc = request_module("ib_isert");
178 if (rc != 0)
179 pr_warn("Unable to request_module for ib_isert\n");
180
181 tpg_np_iser = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr,
182 np->np_ip, tpg_np, ISCSI_INFINIBAND);
183 if (!tpg_np_iser || IS_ERR(tpg_np_iser))
184 goto out;
185 } else {
186 tpg_np_iser = iscsit_tpg_locate_child_np(tpg_np, ISCSI_INFINIBAND);
187 if (!tpg_np_iser)
188 goto out;
189
190 rc = iscsit_tpg_del_network_portal(tpg, tpg_np_iser);
191 if (rc < 0)
192 goto out;
193 }
194
195 printk("lio_target_np_store_iser() done, op: %d\n", op);
196
197 iscsit_put_tpg(tpg);
198 return count;
199out:
200 iscsit_put_tpg(tpg);
201 return -EINVAL;
202}
203
204TF_NP_BASE_ATTR(lio_target, iser, S_IRUGO | S_IWUSR);
205
127static struct configfs_attribute *lio_target_portal_attrs[] = { 206static struct configfs_attribute *lio_target_portal_attrs[] = {
128 &lio_target_np_sctp.attr, 207 &lio_target_np_sctp.attr,
208 &lio_target_np_iser.attr,
129 NULL, 209 NULL,
130}; 210};
131 211
@@ -1536,16 +1616,18 @@ static int lio_queue_data_in(struct se_cmd *se_cmd)
1536 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); 1616 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
1537 1617
1538 cmd->i_state = ISTATE_SEND_DATAIN; 1618 cmd->i_state = ISTATE_SEND_DATAIN;
1539 iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state); 1619 cmd->conn->conn_transport->iscsit_queue_data_in(cmd->conn, cmd);
1620
1540 return 0; 1621 return 0;
1541} 1622}
1542 1623
1543static int lio_write_pending(struct se_cmd *se_cmd) 1624static int lio_write_pending(struct se_cmd *se_cmd)
1544{ 1625{
1545 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); 1626 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
1627 struct iscsi_conn *conn = cmd->conn;
1546 1628
1547 if (!cmd->immediate_data && !cmd->unsolicited_data) 1629 if (!cmd->immediate_data && !cmd->unsolicited_data)
1548 return iscsit_build_r2ts_for_cmd(cmd, cmd->conn, false); 1630 return conn->conn_transport->iscsit_get_dataout(conn, cmd, false);
1549 1631
1550 return 0; 1632 return 0;
1551} 1633}
@@ -1567,7 +1649,8 @@ static int lio_queue_status(struct se_cmd *se_cmd)
1567 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); 1649 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
1568 1650
1569 cmd->i_state = ISTATE_SEND_STATUS; 1651 cmd->i_state = ISTATE_SEND_STATUS;
1570 iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state); 1652 cmd->conn->conn_transport->iscsit_queue_status(cmd->conn, cmd);
1653
1571 return 0; 1654 return 0;
1572} 1655}
1573 1656
@@ -1696,11 +1779,17 @@ static void lio_set_default_node_attributes(struct se_node_acl *se_acl)
1696 iscsit_set_default_node_attribues(acl); 1779 iscsit_set_default_node_attribues(acl);
1697} 1780}
1698 1781
1782static int lio_check_stop_free(struct se_cmd *se_cmd)
1783{
1784 return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
1785}
1786
1699static void lio_release_cmd(struct se_cmd *se_cmd) 1787static void lio_release_cmd(struct se_cmd *se_cmd)
1700{ 1788{
1701 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); 1789 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
1702 1790
1703 iscsit_release_cmd(cmd); 1791 pr_debug("Entering lio_release_cmd for se_cmd: %p\n", se_cmd);
1792 cmd->release_cmd(cmd);
1704} 1793}
1705 1794
1706/* End functions for target_core_fabric_ops */ 1795/* End functions for target_core_fabric_ops */
@@ -1740,6 +1829,7 @@ int iscsi_target_register_configfs(void)
1740 fabric->tf_ops.tpg_alloc_fabric_acl = &lio_tpg_alloc_fabric_acl; 1829 fabric->tf_ops.tpg_alloc_fabric_acl = &lio_tpg_alloc_fabric_acl;
1741 fabric->tf_ops.tpg_release_fabric_acl = &lio_tpg_release_fabric_acl; 1830 fabric->tf_ops.tpg_release_fabric_acl = &lio_tpg_release_fabric_acl;
1742 fabric->tf_ops.tpg_get_inst_index = &lio_tpg_get_inst_index; 1831 fabric->tf_ops.tpg_get_inst_index = &lio_tpg_get_inst_index;
1832 fabric->tf_ops.check_stop_free = &lio_check_stop_free,
1743 fabric->tf_ops.release_cmd = &lio_release_cmd; 1833 fabric->tf_ops.release_cmd = &lio_release_cmd;
1744 fabric->tf_ops.shutdown_session = &lio_tpg_shutdown_session; 1834 fabric->tf_ops.shutdown_session = &lio_tpg_shutdown_session;
1745 fabric->tf_ops.close_session = &lio_tpg_close_session; 1835 fabric->tf_ops.close_session = &lio_tpg_close_session;
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
index 7a333d28d9a2..60ec4b92be03 100644
--- a/drivers/target/iscsi/iscsi_target_core.h
+++ b/drivers/target/iscsi/iscsi_target_core.h
@@ -60,7 +60,7 @@
60 60
61#define ISCSI_IOV_DATA_BUFFER 5 61#define ISCSI_IOV_DATA_BUFFER 5
62 62
63enum tpg_np_network_transport_table { 63enum iscsit_transport_type {
64 ISCSI_TCP = 0, 64 ISCSI_TCP = 0,
65 ISCSI_SCTP_TCP = 1, 65 ISCSI_SCTP_TCP = 1,
66 ISCSI_SCTP_UDP = 2, 66 ISCSI_SCTP_UDP = 2,
@@ -244,6 +244,11 @@ struct iscsi_conn_ops {
244 u8 IFMarker; /* [0,1] == [No,Yes] */ 244 u8 IFMarker; /* [0,1] == [No,Yes] */
245 u32 OFMarkInt; /* [1..65535] */ 245 u32 OFMarkInt; /* [1..65535] */
246 u32 IFMarkInt; /* [1..65535] */ 246 u32 IFMarkInt; /* [1..65535] */
247 /*
248 * iSER specific connection parameters
249 */
250 u32 InitiatorRecvDataSegmentLength; /* [512..2**24-1] */
251 u32 TargetRecvDataSegmentLength; /* [512..2**24-1] */
247}; 252};
248 253
249struct iscsi_sess_ops { 254struct iscsi_sess_ops {
@@ -265,6 +270,10 @@ struct iscsi_sess_ops {
265 u8 DataSequenceInOrder; /* [0,1] == [No,Yes] */ 270 u8 DataSequenceInOrder; /* [0,1] == [No,Yes] */
266 u8 ErrorRecoveryLevel; /* [0..2] */ 271 u8 ErrorRecoveryLevel; /* [0..2] */
267 u8 SessionType; /* [0,1] == [Normal,Discovery]*/ 272 u8 SessionType; /* [0,1] == [Normal,Discovery]*/
273 /*
274 * iSER specific session parameters
275 */
276 u8 RDMAExtensions; /* [0,1] == [No,Yes] */
268}; 277};
269 278
270struct iscsi_queue_req { 279struct iscsi_queue_req {
@@ -284,6 +293,7 @@ struct iscsi_data_count {
284}; 293};
285 294
286struct iscsi_param_list { 295struct iscsi_param_list {
296 bool iser;
287 struct list_head param_list; 297 struct list_head param_list;
288 struct list_head extra_response_list; 298 struct list_head extra_response_list;
289}; 299};
@@ -475,6 +485,7 @@ struct iscsi_cmd {
475 u32 first_data_sg_off; 485 u32 first_data_sg_off;
476 u32 kmapped_nents; 486 u32 kmapped_nents;
477 sense_reason_t sense_reason; 487 sense_reason_t sense_reason;
488 void (*release_cmd)(struct iscsi_cmd *);
478} ____cacheline_aligned; 489} ____cacheline_aligned;
479 490
480struct iscsi_tmr_req { 491struct iscsi_tmr_req {
@@ -503,6 +514,7 @@ struct iscsi_conn {
503 u16 login_port; 514 u16 login_port;
504 u16 local_port; 515 u16 local_port;
505 int net_size; 516 int net_size;
517 int login_family;
506 u32 auth_id; 518 u32 auth_id;
507 u32 conn_flags; 519 u32 conn_flags;
508 /* Used for iscsi_tx_login_rsp() */ 520 /* Used for iscsi_tx_login_rsp() */
@@ -562,9 +574,12 @@ struct iscsi_conn {
562 struct list_head immed_queue_list; 574 struct list_head immed_queue_list;
563 struct list_head response_queue_list; 575 struct list_head response_queue_list;
564 struct iscsi_conn_ops *conn_ops; 576 struct iscsi_conn_ops *conn_ops;
577 struct iscsi_login *conn_login;
578 struct iscsit_transport *conn_transport;
565 struct iscsi_param_list *param_list; 579 struct iscsi_param_list *param_list;
566 /* Used for per connection auth state machine */ 580 /* Used for per connection auth state machine */
567 void *auth_protocol; 581 void *auth_protocol;
582 void *context;
568 struct iscsi_login_thread_s *login_thread; 583 struct iscsi_login_thread_s *login_thread;
569 struct iscsi_portal_group *tpg; 584 struct iscsi_portal_group *tpg;
570 /* Pointer to parent session */ 585 /* Pointer to parent session */
@@ -663,6 +678,8 @@ struct iscsi_login {
663 u8 first_request; 678 u8 first_request;
664 u8 version_min; 679 u8 version_min;
665 u8 version_max; 680 u8 version_max;
681 u8 login_complete;
682 u8 login_failed;
666 char isid[6]; 683 char isid[6];
667 u32 cmd_sn; 684 u32 cmd_sn;
668 itt_t init_task_tag; 685 itt_t init_task_tag;
@@ -670,10 +687,11 @@ struct iscsi_login {
670 u32 rsp_length; 687 u32 rsp_length;
671 u16 cid; 688 u16 cid;
672 u16 tsih; 689 u16 tsih;
673 char *req; 690 char req[ISCSI_HDR_LEN];
674 char *rsp; 691 char rsp[ISCSI_HDR_LEN];
675 char *req_buf; 692 char *req_buf;
676 char *rsp_buf; 693 char *rsp_buf;
694 struct iscsi_conn *conn;
677} ____cacheline_aligned; 695} ____cacheline_aligned;
678 696
679struct iscsi_node_attrib { 697struct iscsi_node_attrib {
@@ -754,6 +772,8 @@ struct iscsi_np {
754 struct task_struct *np_thread; 772 struct task_struct *np_thread;
755 struct timer_list np_login_timer; 773 struct timer_list np_login_timer;
756 struct iscsi_portal_group *np_login_tpg; 774 struct iscsi_portal_group *np_login_tpg;
775 void *np_context;
776 struct iscsit_transport *np_transport;
757 struct list_head np_list; 777 struct list_head np_list;
758} ____cacheline_aligned; 778} ____cacheline_aligned;
759 779
diff --git a/drivers/target/iscsi/iscsi_target_device.c b/drivers/target/iscsi/iscsi_target_device.c
index bcc409853a67..1b74033510a0 100644
--- a/drivers/target/iscsi/iscsi_target_device.c
+++ b/drivers/target/iscsi/iscsi_target_device.c
@@ -60,8 +60,13 @@ void iscsit_increment_maxcmdsn(struct iscsi_cmd *cmd, struct iscsi_session *sess
60 60
61 cmd->maxcmdsn_inc = 1; 61 cmd->maxcmdsn_inc = 1;
62 62
63 mutex_lock(&sess->cmdsn_mutex); 63 if (!mutex_trylock(&sess->cmdsn_mutex)) {
64 sess->max_cmd_sn += 1;
65 pr_debug("Updated MaxCmdSN to 0x%08x\n", sess->max_cmd_sn);
66 return;
67 }
64 sess->max_cmd_sn += 1; 68 sess->max_cmd_sn += 1;
65 pr_debug("Updated MaxCmdSN to 0x%08x\n", sess->max_cmd_sn); 69 pr_debug("Updated MaxCmdSN to 0x%08x\n", sess->max_cmd_sn);
66 mutex_unlock(&sess->cmdsn_mutex); 70 mutex_unlock(&sess->cmdsn_mutex);
67} 71}
72EXPORT_SYMBOL(iscsit_increment_maxcmdsn);
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index 0b52a2371305..7816af6cdd12 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -22,6 +22,7 @@
22#include <scsi/iscsi_proto.h> 22#include <scsi/iscsi_proto.h>
23#include <target/target_core_base.h> 23#include <target/target_core_base.h>
24#include <target/target_core_fabric.h> 24#include <target/target_core_fabric.h>
25#include <target/iscsi/iscsi_transport.h>
25 26
26#include "iscsi_target_core.h" 27#include "iscsi_target_core.h"
27#include "iscsi_target_seq_pdu_list.h" 28#include "iscsi_target_seq_pdu_list.h"
@@ -53,6 +54,9 @@ int iscsit_dump_data_payload(
53 u32 length, padding, offset = 0, size; 54 u32 length, padding, offset = 0, size;
54 struct kvec iov; 55 struct kvec iov;
55 56
57 if (conn->sess->sess_ops->RDMAExtensions)
58 return 0;
59
56 length = (buf_len > OFFLOAD_BUF_SIZE) ? OFFLOAD_BUF_SIZE : buf_len; 60 length = (buf_len > OFFLOAD_BUF_SIZE) ? OFFLOAD_BUF_SIZE : buf_len;
57 61
58 buf = kzalloc(length, GFP_ATOMIC); 62 buf = kzalloc(length, GFP_ATOMIC);
@@ -919,6 +923,7 @@ int iscsit_execute_ooo_cmdsns(struct iscsi_session *sess)
919int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo) 923int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
920{ 924{
921 struct se_cmd *se_cmd = &cmd->se_cmd; 925 struct se_cmd *se_cmd = &cmd->se_cmd;
926 struct iscsi_conn *conn = cmd->conn;
922 int lr = 0; 927 int lr = 0;
923 928
924 spin_lock_bh(&cmd->istate_lock); 929 spin_lock_bh(&cmd->istate_lock);
@@ -981,7 +986,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
981 return 0; 986 return 0;
982 987
983 iscsit_set_dataout_sequence_values(cmd); 988 iscsit_set_dataout_sequence_values(cmd);
984 iscsit_build_r2ts_for_cmd(cmd, cmd->conn, false); 989 conn->conn_transport->iscsit_get_dataout(conn, cmd, false);
985 } 990 }
986 return 0; 991 return 0;
987 } 992 }
@@ -999,10 +1004,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
999 if (transport_check_aborted_status(se_cmd, 1) != 0) 1004 if (transport_check_aborted_status(se_cmd, 1) != 0)
1000 return 0; 1005 return 0;
1001 1006
1002 iscsit_set_dataout_sequence_values(cmd); 1007 iscsit_set_unsoliticed_dataout(cmd);
1003 spin_lock_bh(&cmd->dataout_timeout_lock);
1004 iscsit_start_dataout_timer(cmd, cmd->conn);
1005 spin_unlock_bh(&cmd->dataout_timeout_lock);
1006 } 1008 }
1007 return transport_handle_cdb_direct(&cmd->se_cmd); 1009 return transport_handle_cdb_direct(&cmd->se_cmd);
1008 1010
@@ -1290,3 +1292,4 @@ void iscsit_stop_dataout_timer(struct iscsi_cmd *cmd)
1290 cmd->init_task_tag); 1292 cmd->init_task_tag);
1291 spin_unlock_bh(&cmd->dataout_timeout_lock); 1293 spin_unlock_bh(&cmd->dataout_timeout_lock);
1292} 1294}
1295EXPORT_SYMBOL(iscsit_stop_dataout_timer);
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 2535d4d46c0e..bb5d5c5bce65 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -39,8 +39,39 @@
39#include "iscsi_target.h" 39#include "iscsi_target.h"
40#include "iscsi_target_parameters.h" 40#include "iscsi_target_parameters.h"
41 41
42static int iscsi_login_init_conn(struct iscsi_conn *conn) 42#include <target/iscsi/iscsi_transport.h>
43
44static struct iscsi_login *iscsi_login_init_conn(struct iscsi_conn *conn)
43{ 45{
46 struct iscsi_login *login;
47
48 login = kzalloc(sizeof(struct iscsi_login), GFP_KERNEL);
49 if (!login) {
50 pr_err("Unable to allocate memory for struct iscsi_login.\n");
51 return NULL;
52 }
53 login->conn = conn;
54 login->first_request = 1;
55
56 login->req_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL);
57 if (!login->req_buf) {
58 pr_err("Unable to allocate memory for response buffer.\n");
59 goto out_login;
60 }
61
62 login->rsp_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL);
63 if (!login->rsp_buf) {
64 pr_err("Unable to allocate memory for request buffer.\n");
65 goto out_req_buf;
66 }
67
68 conn->conn_ops = kzalloc(sizeof(struct iscsi_conn_ops), GFP_KERNEL);
69 if (!conn->conn_ops) {
70 pr_err("Unable to allocate memory for"
71 " struct iscsi_conn_ops.\n");
72 goto out_rsp_buf;
73 }
74
44 init_waitqueue_head(&conn->queues_wq); 75 init_waitqueue_head(&conn->queues_wq);
45 INIT_LIST_HEAD(&conn->conn_list); 76 INIT_LIST_HEAD(&conn->conn_list);
46 INIT_LIST_HEAD(&conn->conn_cmd_list); 77 INIT_LIST_HEAD(&conn->conn_cmd_list);
@@ -62,10 +93,21 @@ static int iscsi_login_init_conn(struct iscsi_conn *conn)
62 93
63 if (!zalloc_cpumask_var(&conn->conn_cpumask, GFP_KERNEL)) { 94 if (!zalloc_cpumask_var(&conn->conn_cpumask, GFP_KERNEL)) {
64 pr_err("Unable to allocate conn->conn_cpumask\n"); 95 pr_err("Unable to allocate conn->conn_cpumask\n");
65 return -ENOMEM; 96 goto out_conn_ops;
66 } 97 }
98 conn->conn_login = login;
67 99
68 return 0; 100 return login;
101
102out_conn_ops:
103 kfree(conn->conn_ops);
104out_rsp_buf:
105 kfree(login->rsp_buf);
106out_req_buf:
107 kfree(login->req_buf);
108out_login:
109 kfree(login);
110 return NULL;
69} 111}
70 112
71/* 113/*
@@ -298,6 +340,7 @@ static int iscsi_login_zero_tsih_s2(
298 struct iscsi_node_attrib *na; 340 struct iscsi_node_attrib *na;
299 struct iscsi_session *sess = conn->sess; 341 struct iscsi_session *sess = conn->sess;
300 unsigned char buf[32]; 342 unsigned char buf[32];
343 bool iser = false;
301 344
302 sess->tpg = conn->tpg; 345 sess->tpg = conn->tpg;
303 346
@@ -319,7 +362,10 @@ static int iscsi_login_zero_tsih_s2(
319 return -1; 362 return -1;
320 } 363 }
321 364
322 iscsi_set_keys_to_negotiate(0, conn->param_list); 365 if (conn->conn_transport->transport_type == ISCSI_INFINIBAND)
366 iser = true;
367
368 iscsi_set_keys_to_negotiate(conn->param_list, iser);
323 369
324 if (sess->sess_ops->SessionType) 370 if (sess->sess_ops->SessionType)
325 return iscsi_set_keys_irrelevant_for_discovery( 371 return iscsi_set_keys_irrelevant_for_discovery(
@@ -357,6 +403,56 @@ static int iscsi_login_zero_tsih_s2(
357 403
358 if (iscsi_login_disable_FIM_keys(conn->param_list, conn) < 0) 404 if (iscsi_login_disable_FIM_keys(conn->param_list, conn) < 0)
359 return -1; 405 return -1;
406 /*
407 * Set RDMAExtensions=Yes by default for iSER enabled network portals
408 */
409 if (iser) {
410 struct iscsi_param *param;
411 unsigned long mrdsl, off;
412 int rc;
413
414 sprintf(buf, "RDMAExtensions=Yes");
415 if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
416 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
417 ISCSI_LOGIN_STATUS_NO_RESOURCES);
418 return -1;
419 }
420 /*
421 * Make MaxRecvDataSegmentLength PAGE_SIZE aligned for
422 * Immediate Data + Unsolicitied Data-OUT if necessary..
423 */
424 param = iscsi_find_param_from_key("MaxRecvDataSegmentLength",
425 conn->param_list);
426 if (!param) {
427 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
428 ISCSI_LOGIN_STATUS_NO_RESOURCES);
429 return -1;
430 }
431 rc = strict_strtoul(param->value, 0, &mrdsl);
432 if (rc < 0) {
433 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
434 ISCSI_LOGIN_STATUS_NO_RESOURCES);
435 return -1;
436 }
437 off = mrdsl % PAGE_SIZE;
438 if (!off)
439 return 0;
440
441 if (mrdsl < PAGE_SIZE)
442 mrdsl = PAGE_SIZE;
443 else
444 mrdsl -= off;
445
446 pr_warn("Aligning ISER MaxRecvDataSegmentLength: %lu down"
447 " to PAGE_SIZE\n", mrdsl);
448
449 sprintf(buf, "MaxRecvDataSegmentLength=%lu\n", mrdsl);
450 if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
451 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
452 ISCSI_LOGIN_STATUS_NO_RESOURCES);
453 return -1;
454 }
455 }
360 456
361 return 0; 457 return 0;
362} 458}
@@ -436,6 +532,7 @@ static int iscsi_login_non_zero_tsih_s2(
436 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; 532 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
437 struct se_session *se_sess, *se_sess_tmp; 533 struct se_session *se_sess, *se_sess_tmp;
438 struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf; 534 struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;
535 bool iser = false;
439 536
440 spin_lock_bh(&se_tpg->session_lock); 537 spin_lock_bh(&se_tpg->session_lock);
441 list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list, 538 list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list,
@@ -485,7 +582,10 @@ static int iscsi_login_non_zero_tsih_s2(
485 return -1; 582 return -1;
486 } 583 }
487 584
488 iscsi_set_keys_to_negotiate(0, conn->param_list); 585 if (conn->conn_transport->transport_type == ISCSI_INFINIBAND)
586 iser = true;
587
588 iscsi_set_keys_to_negotiate(conn->param_list, iser);
489 /* 589 /*
490 * Need to send TargetPortalGroupTag back in first login response 590 * Need to send TargetPortalGroupTag back in first login response
491 * on any iSCSI connection where the Initiator provides TargetName. 591 * on any iSCSI connection where the Initiator provides TargetName.
@@ -574,6 +674,11 @@ int iscsi_login_post_auth_non_zero_tsih(
574static void iscsi_post_login_start_timers(struct iscsi_conn *conn) 674static void iscsi_post_login_start_timers(struct iscsi_conn *conn)
575{ 675{
576 struct iscsi_session *sess = conn->sess; 676 struct iscsi_session *sess = conn->sess;
677 /*
678 * FIXME: Unsolicitied NopIN support for ISER
679 */
680 if (conn->conn_transport->transport_type == ISCSI_INFINIBAND)
681 return;
577 682
578 if (!sess->sess_ops->SessionType) 683 if (!sess->sess_ops->SessionType)
579 iscsit_start_nopin_timer(conn); 684 iscsit_start_nopin_timer(conn);
@@ -632,6 +737,7 @@ static int iscsi_post_login_handler(
632 spin_unlock_bh(&sess->conn_lock); 737 spin_unlock_bh(&sess->conn_lock);
633 738
634 iscsi_post_login_start_timers(conn); 739 iscsi_post_login_start_timers(conn);
740
635 iscsi_activate_thread_set(conn, ts); 741 iscsi_activate_thread_set(conn, ts);
636 /* 742 /*
637 * Determine CPU mask to ensure connection's RX and TX kthreads 743 * Determine CPU mask to ensure connection's RX and TX kthreads
@@ -761,11 +867,11 @@ static void iscsi_stop_login_thread_timer(struct iscsi_np *np)
761 spin_unlock_bh(&np->np_thread_lock); 867 spin_unlock_bh(&np->np_thread_lock);
762} 868}
763 869
764int iscsi_target_setup_login_socket( 870int iscsit_setup_np(
765 struct iscsi_np *np, 871 struct iscsi_np *np,
766 struct __kernel_sockaddr_storage *sockaddr) 872 struct __kernel_sockaddr_storage *sockaddr)
767{ 873{
768 struct socket *sock; 874 struct socket *sock = NULL;
769 int backlog = 5, ret, opt = 0, len; 875 int backlog = 5, ret, opt = 0, len;
770 876
771 switch (np->np_network_transport) { 877 switch (np->np_network_transport) {
@@ -781,15 +887,15 @@ int iscsi_target_setup_login_socket(
781 np->np_ip_proto = IPPROTO_SCTP; 887 np->np_ip_proto = IPPROTO_SCTP;
782 np->np_sock_type = SOCK_SEQPACKET; 888 np->np_sock_type = SOCK_SEQPACKET;
783 break; 889 break;
784 case ISCSI_IWARP_TCP:
785 case ISCSI_IWARP_SCTP:
786 case ISCSI_INFINIBAND:
787 default: 890 default:
788 pr_err("Unsupported network_transport: %d\n", 891 pr_err("Unsupported network_transport: %d\n",
789 np->np_network_transport); 892 np->np_network_transport);
790 return -EINVAL; 893 return -EINVAL;
791 } 894 }
792 895
896 np->np_ip_proto = IPPROTO_TCP;
897 np->np_sock_type = SOCK_STREAM;
898
793 ret = sock_create(sockaddr->ss_family, np->np_sock_type, 899 ret = sock_create(sockaddr->ss_family, np->np_sock_type,
794 np->np_ip_proto, &sock); 900 np->np_ip_proto, &sock);
795 if (ret < 0) { 901 if (ret < 0) {
@@ -853,7 +959,6 @@ int iscsi_target_setup_login_socket(
853 } 959 }
854 960
855 return 0; 961 return 0;
856
857fail: 962fail:
858 np->np_socket = NULL; 963 np->np_socket = NULL;
859 if (sock) 964 if (sock)
@@ -861,21 +966,169 @@ fail:
861 return ret; 966 return ret;
862} 967}
863 968
969int iscsi_target_setup_login_socket(
970 struct iscsi_np *np,
971 struct __kernel_sockaddr_storage *sockaddr)
972{
973 struct iscsit_transport *t;
974 int rc;
975
976 t = iscsit_get_transport(np->np_network_transport);
977 if (!t)
978 return -EINVAL;
979
980 rc = t->iscsit_setup_np(np, sockaddr);
981 if (rc < 0) {
982 iscsit_put_transport(t);
983 return rc;
984 }
985
986 np->np_transport = t;
987 printk("Set np->np_transport to %p -> %s\n", np->np_transport,
988 np->np_transport->name);
989 return 0;
990}
991
992int iscsit_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
993{
994 struct socket *new_sock, *sock = np->np_socket;
995 struct sockaddr_in sock_in;
996 struct sockaddr_in6 sock_in6;
997 int rc, err;
998
999 rc = kernel_accept(sock, &new_sock, 0);
1000 if (rc < 0)
1001 return rc;
1002
1003 conn->sock = new_sock;
1004 conn->login_family = np->np_sockaddr.ss_family;
1005 printk("iSCSI/TCP: Setup conn->sock from new_sock: %p\n", new_sock);
1006
1007 if (np->np_sockaddr.ss_family == AF_INET6) {
1008 memset(&sock_in6, 0, sizeof(struct sockaddr_in6));
1009
1010 rc = conn->sock->ops->getname(conn->sock,
1011 (struct sockaddr *)&sock_in6, &err, 1);
1012 if (!rc) {
1013 snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c",
1014 &sock_in6.sin6_addr.in6_u);
1015 conn->login_port = ntohs(sock_in6.sin6_port);
1016 }
1017
1018 rc = conn->sock->ops->getname(conn->sock,
1019 (struct sockaddr *)&sock_in6, &err, 0);
1020 if (!rc) {
1021 snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c",
1022 &sock_in6.sin6_addr.in6_u);
1023 conn->local_port = ntohs(sock_in6.sin6_port);
1024 }
1025 } else {
1026 memset(&sock_in, 0, sizeof(struct sockaddr_in));
1027
1028 rc = conn->sock->ops->getname(conn->sock,
1029 (struct sockaddr *)&sock_in, &err, 1);
1030 if (!rc) {
1031 sprintf(conn->login_ip, "%pI4",
1032 &sock_in.sin_addr.s_addr);
1033 conn->login_port = ntohs(sock_in.sin_port);
1034 }
1035
1036 rc = conn->sock->ops->getname(conn->sock,
1037 (struct sockaddr *)&sock_in, &err, 0);
1038 if (!rc) {
1039 sprintf(conn->local_ip, "%pI4",
1040 &sock_in.sin_addr.s_addr);
1041 conn->local_port = ntohs(sock_in.sin_port);
1042 }
1043 }
1044
1045 return 0;
1046}
1047
1048int iscsit_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
1049{
1050 struct iscsi_login_req *login_req;
1051 u32 padding = 0, payload_length;
1052
1053 if (iscsi_login_rx_data(conn, login->req, ISCSI_HDR_LEN) < 0)
1054 return -1;
1055
1056 login_req = (struct iscsi_login_req *)login->req;
1057 payload_length = ntoh24(login_req->dlength);
1058 padding = ((-payload_length) & 3);
1059
1060 pr_debug("Got Login Command, Flags 0x%02x, ITT: 0x%08x,"
1061 " CmdSN: 0x%08x, ExpStatSN: 0x%08x, CID: %hu, Length: %u\n",
1062 login_req->flags, login_req->itt, login_req->cmdsn,
1063 login_req->exp_statsn, login_req->cid, payload_length);
1064 /*
1065 * Setup the initial iscsi_login values from the leading
1066 * login request PDU.
1067 */
1068 if (login->first_request) {
1069 login_req = (struct iscsi_login_req *)login->req;
1070 login->leading_connection = (!login_req->tsih) ? 1 : 0;
1071 login->current_stage = ISCSI_LOGIN_CURRENT_STAGE(login_req->flags);
1072 login->version_min = login_req->min_version;
1073 login->version_max = login_req->max_version;
1074 memcpy(login->isid, login_req->isid, 6);
1075 login->cmd_sn = be32_to_cpu(login_req->cmdsn);
1076 login->init_task_tag = login_req->itt;
1077 login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
1078 login->cid = be16_to_cpu(login_req->cid);
1079 login->tsih = be16_to_cpu(login_req->tsih);
1080 }
1081
1082 if (iscsi_target_check_login_request(conn, login) < 0)
1083 return -1;
1084
1085 memset(login->req_buf, 0, MAX_KEY_VALUE_PAIRS);
1086 if (iscsi_login_rx_data(conn, login->req_buf,
1087 payload_length + padding) < 0)
1088 return -1;
1089
1090 return 0;
1091}
1092
1093int iscsit_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
1094 u32 length)
1095{
1096 if (iscsi_login_tx_data(conn, login->rsp, login->rsp_buf, length) < 0)
1097 return -1;
1098
1099 return 0;
1100}
1101
1102static int
1103iscsit_conn_set_transport(struct iscsi_conn *conn, struct iscsit_transport *t)
1104{
1105 int rc;
1106
1107 if (!t->owner) {
1108 conn->conn_transport = t;
1109 return 0;
1110 }
1111
1112 rc = try_module_get(t->owner);
1113 if (!rc) {
1114 pr_err("try_module_get() failed for %s\n", t->name);
1115 return -EINVAL;
1116 }
1117
1118 conn->conn_transport = t;
1119 return 0;
1120}
1121
864static int __iscsi_target_login_thread(struct iscsi_np *np) 1122static int __iscsi_target_login_thread(struct iscsi_np *np)
865{ 1123{
866 u8 buffer[ISCSI_HDR_LEN], iscsi_opcode, zero_tsih = 0; 1124 u8 *buffer, zero_tsih = 0;
867 int err, ret = 0, stop; 1125 int ret = 0, rc, stop;
868 struct iscsi_conn *conn = NULL; 1126 struct iscsi_conn *conn = NULL;
869 struct iscsi_login *login; 1127 struct iscsi_login *login;
870 struct iscsi_portal_group *tpg = NULL; 1128 struct iscsi_portal_group *tpg = NULL;
871 struct socket *new_sock, *sock;
872 struct kvec iov;
873 struct iscsi_login_req *pdu; 1129 struct iscsi_login_req *pdu;
874 struct sockaddr_in sock_in;
875 struct sockaddr_in6 sock_in6;
876 1130
877 flush_signals(current); 1131 flush_signals(current);
878 sock = np->np_socket;
879 1132
880 spin_lock_bh(&np->np_thread_lock); 1133 spin_lock_bh(&np->np_thread_lock);
881 if (np->np_thread_state == ISCSI_NP_THREAD_RESET) { 1134 if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
@@ -886,75 +1139,76 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
886 } 1139 }
887 spin_unlock_bh(&np->np_thread_lock); 1140 spin_unlock_bh(&np->np_thread_lock);
888 1141
889 if (kernel_accept(sock, &new_sock, 0) < 0) {
890 spin_lock_bh(&np->np_thread_lock);
891 if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
892 spin_unlock_bh(&np->np_thread_lock);
893 complete(&np->np_restart_comp);
894 /* Get another socket */
895 return 1;
896 }
897 spin_unlock_bh(&np->np_thread_lock);
898 goto out;
899 }
900 iscsi_start_login_thread_timer(np);
901
902 conn = kzalloc(sizeof(struct iscsi_conn), GFP_KERNEL); 1142 conn = kzalloc(sizeof(struct iscsi_conn), GFP_KERNEL);
903 if (!conn) { 1143 if (!conn) {
904 pr_err("Could not allocate memory for" 1144 pr_err("Could not allocate memory for"
905 " new connection\n"); 1145 " new connection\n");
906 sock_release(new_sock);
907 /* Get another socket */ 1146 /* Get another socket */
908 return 1; 1147 return 1;
909 } 1148 }
910
911 pr_debug("Moving to TARG_CONN_STATE_FREE.\n"); 1149 pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
912 conn->conn_state = TARG_CONN_STATE_FREE; 1150 conn->conn_state = TARG_CONN_STATE_FREE;
913 conn->sock = new_sock;
914 1151
915 pr_debug("Moving to TARG_CONN_STATE_XPT_UP.\n"); 1152 if (iscsit_conn_set_transport(conn, np->np_transport) < 0) {
916 conn->conn_state = TARG_CONN_STATE_XPT_UP; 1153 kfree(conn);
1154 return 1;
1155 }
917 1156
918 /* 1157 rc = np->np_transport->iscsit_accept_np(np, conn);
919 * Allocate conn->conn_ops early as a failure calling 1158 if (rc == -ENOSYS) {
920 * iscsit_tx_login_rsp() below will call tx_data(). 1159 complete(&np->np_restart_comp);
921 */ 1160 iscsit_put_transport(conn->conn_transport);
922 conn->conn_ops = kzalloc(sizeof(struct iscsi_conn_ops), GFP_KERNEL); 1161 kfree(conn);
923 if (!conn->conn_ops) { 1162 conn = NULL;
924 pr_err("Unable to allocate memory for" 1163 goto exit;
925 " struct iscsi_conn_ops.\n"); 1164 } else if (rc < 0) {
926 goto new_sess_out; 1165 spin_lock_bh(&np->np_thread_lock);
1166 if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
1167 spin_unlock_bh(&np->np_thread_lock);
1168 complete(&np->np_restart_comp);
1169 if (ret == -ENODEV) {
1170 iscsit_put_transport(conn->conn_transport);
1171 kfree(conn);
1172 conn = NULL;
1173 goto out;
1174 }
1175 /* Get another socket */
1176 return 1;
1177 }
1178 spin_unlock_bh(&np->np_thread_lock);
1179 iscsit_put_transport(conn->conn_transport);
1180 kfree(conn);
1181 conn = NULL;
1182 goto out;
927 } 1183 }
928 /* 1184 /*
929 * Perform the remaining iSCSI connection initialization items.. 1185 * Perform the remaining iSCSI connection initialization items..
930 */ 1186 */
931 if (iscsi_login_init_conn(conn) < 0) 1187 login = iscsi_login_init_conn(conn);
932 goto new_sess_out; 1188 if (!login) {
933
934 memset(buffer, 0, ISCSI_HDR_LEN);
935 memset(&iov, 0, sizeof(struct kvec));
936 iov.iov_base = buffer;
937 iov.iov_len = ISCSI_HDR_LEN;
938
939 if (rx_data(conn, &iov, 1, ISCSI_HDR_LEN) <= 0) {
940 pr_err("rx_data() returned an error.\n");
941 goto new_sess_out; 1189 goto new_sess_out;
942 } 1190 }
943 1191
944 iscsi_opcode = (buffer[0] & ISCSI_OPCODE_MASK); 1192 iscsi_start_login_thread_timer(np);
945 if (!(iscsi_opcode & ISCSI_OP_LOGIN)) {
946 pr_err("First opcode is not login request,"
947 " failing login request.\n");
948 goto new_sess_out;
949 }
950 1193
951 pdu = (struct iscsi_login_req *) buffer; 1194 pr_debug("Moving to TARG_CONN_STATE_XPT_UP.\n");
1195 conn->conn_state = TARG_CONN_STATE_XPT_UP;
1196 /*
1197 * This will process the first login request + payload..
1198 */
1199 rc = np->np_transport->iscsit_get_login_rx(conn, login);
1200 if (rc == 1)
1201 return 1;
1202 else if (rc < 0)
1203 goto new_sess_out;
952 1204
1205 buffer = &login->req[0];
1206 pdu = (struct iscsi_login_req *)buffer;
953 /* 1207 /*
954 * Used by iscsit_tx_login_rsp() for Login Resonses PDUs 1208 * Used by iscsit_tx_login_rsp() for Login Resonses PDUs
955 * when Status-Class != 0. 1209 * when Status-Class != 0.
956 */ 1210 */
957 conn->login_itt = pdu->itt; 1211 conn->login_itt = pdu->itt;
958 1212
959 spin_lock_bh(&np->np_thread_lock); 1213 spin_lock_bh(&np->np_thread_lock);
960 if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) { 1214 if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
@@ -967,61 +1221,11 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
967 } 1221 }
968 spin_unlock_bh(&np->np_thread_lock); 1222 spin_unlock_bh(&np->np_thread_lock);
969 1223
970 if (np->np_sockaddr.ss_family == AF_INET6) {
971 memset(&sock_in6, 0, sizeof(struct sockaddr_in6));
972
973 if (conn->sock->ops->getname(conn->sock,
974 (struct sockaddr *)&sock_in6, &err, 1) < 0) {
975 pr_err("sock_ops->getname() failed.\n");
976 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
977 ISCSI_LOGIN_STATUS_TARGET_ERROR);
978 goto new_sess_out;
979 }
980 snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c",
981 &sock_in6.sin6_addr.in6_u);
982 conn->login_port = ntohs(sock_in6.sin6_port);
983
984 if (conn->sock->ops->getname(conn->sock,
985 (struct sockaddr *)&sock_in6, &err, 0) < 0) {
986 pr_err("sock_ops->getname() failed.\n");
987 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
988 ISCSI_LOGIN_STATUS_TARGET_ERROR);
989 goto new_sess_out;
990 }
991 snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c",
992 &sock_in6.sin6_addr.in6_u);
993 conn->local_port = ntohs(sock_in6.sin6_port);
994
995 } else {
996 memset(&sock_in, 0, sizeof(struct sockaddr_in));
997
998 if (conn->sock->ops->getname(conn->sock,
999 (struct sockaddr *)&sock_in, &err, 1) < 0) {
1000 pr_err("sock_ops->getname() failed.\n");
1001 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
1002 ISCSI_LOGIN_STATUS_TARGET_ERROR);
1003 goto new_sess_out;
1004 }
1005 sprintf(conn->login_ip, "%pI4", &sock_in.sin_addr.s_addr);
1006 conn->login_port = ntohs(sock_in.sin_port);
1007
1008 if (conn->sock->ops->getname(conn->sock,
1009 (struct sockaddr *)&sock_in, &err, 0) < 0) {
1010 pr_err("sock_ops->getname() failed.\n");
1011 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
1012 ISCSI_LOGIN_STATUS_TARGET_ERROR);
1013 goto new_sess_out;
1014 }
1015 sprintf(conn->local_ip, "%pI4", &sock_in.sin_addr.s_addr);
1016 conn->local_port = ntohs(sock_in.sin_port);
1017 }
1018
1019 conn->network_transport = np->np_network_transport; 1224 conn->network_transport = np->np_network_transport;
1020 1225
1021 pr_debug("Received iSCSI login request from %s on %s Network" 1226 pr_debug("Received iSCSI login request from %s on %s Network"
1022 " Portal %s:%hu\n", conn->login_ip, 1227 " Portal %s:%hu\n", conn->login_ip, np->np_transport->name,
1023 (conn->network_transport == ISCSI_TCP) ? "TCP" : "SCTP", 1228 conn->local_ip, conn->local_port);
1024 conn->local_ip, conn->local_port);
1025 1229
1026 pr_debug("Moving to TARG_CONN_STATE_IN_LOGIN.\n"); 1230 pr_debug("Moving to TARG_CONN_STATE_IN_LOGIN.\n");
1027 conn->conn_state = TARG_CONN_STATE_IN_LOGIN; 1231 conn->conn_state = TARG_CONN_STATE_IN_LOGIN;
@@ -1050,13 +1254,17 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
1050 if (iscsi_login_non_zero_tsih_s1(conn, buffer) < 0) 1254 if (iscsi_login_non_zero_tsih_s1(conn, buffer) < 0)
1051 goto new_sess_out; 1255 goto new_sess_out;
1052 } 1256 }
1053
1054 /* 1257 /*
1055 * This will process the first login request, and call 1258 * SessionType: Discovery
1056 * iscsi_target_locate_portal(), and return a valid struct iscsi_login. 1259 *
1260 * Locates Default Portal
1261 *
1262 * SessionType: Normal
1263 *
1264 * Locates Target Portal from NP -> Target IQN
1057 */ 1265 */
1058 login = iscsi_target_init_negotiation(np, conn, buffer); 1266 rc = iscsi_target_locate_portal(np, conn, login);
1059 if (!login) { 1267 if (rc < 0) {
1060 tpg = conn->tpg; 1268 tpg = conn->tpg;
1061 goto new_sess_out; 1269 goto new_sess_out;
1062 } 1270 }
@@ -1068,15 +1276,11 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
1068 } 1276 }
1069 1277
1070 if (zero_tsih) { 1278 if (zero_tsih) {
1071 if (iscsi_login_zero_tsih_s2(conn) < 0) { 1279 if (iscsi_login_zero_tsih_s2(conn) < 0)
1072 iscsi_target_nego_release(login, conn);
1073 goto new_sess_out; 1280 goto new_sess_out;
1074 }
1075 } else { 1281 } else {
1076 if (iscsi_login_non_zero_tsih_s2(conn, buffer) < 0) { 1282 if (iscsi_login_non_zero_tsih_s2(conn, buffer) < 0)
1077 iscsi_target_nego_release(login, conn);
1078 goto old_sess_out; 1283 goto old_sess_out;
1079 }
1080 } 1284 }
1081 1285
1082 if (iscsi_target_start_negotiation(login, conn) < 0) 1286 if (iscsi_target_start_negotiation(login, conn) < 0)
@@ -1153,8 +1357,18 @@ old_sess_out:
1153 iscsi_release_param_list(conn->param_list); 1357 iscsi_release_param_list(conn->param_list);
1154 conn->param_list = NULL; 1358 conn->param_list = NULL;
1155 } 1359 }
1156 if (conn->sock) 1360 iscsi_target_nego_release(conn);
1361
1362 if (conn->sock) {
1157 sock_release(conn->sock); 1363 sock_release(conn->sock);
1364 conn->sock = NULL;
1365 }
1366
1367 if (conn->conn_transport->iscsit_free_conn)
1368 conn->conn_transport->iscsit_free_conn(conn);
1369
1370 iscsit_put_transport(conn->conn_transport);
1371
1158 kfree(conn); 1372 kfree(conn);
1159 1373
1160 if (tpg) { 1374 if (tpg) {
@@ -1172,11 +1386,13 @@ out:
1172 /* Wait for another socket.. */ 1386 /* Wait for another socket.. */
1173 if (!stop) 1387 if (!stop)
1174 return 1; 1388 return 1;
1175 1389exit:
1176 iscsi_stop_login_thread_timer(np); 1390 iscsi_stop_login_thread_timer(np);
1177 spin_lock_bh(&np->np_thread_lock); 1391 spin_lock_bh(&np->np_thread_lock);
1178 np->np_thread_state = ISCSI_NP_THREAD_EXIT; 1392 np->np_thread_state = ISCSI_NP_THREAD_EXIT;
1393 np->np_thread = NULL;
1179 spin_unlock_bh(&np->np_thread_lock); 1394 spin_unlock_bh(&np->np_thread_lock);
1395
1180 return 0; 1396 return 0;
1181} 1397}
1182 1398
diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h
index 091dcae2532b..63efd2878451 100644
--- a/drivers/target/iscsi/iscsi_target_login.h
+++ b/drivers/target/iscsi/iscsi_target_login.h
@@ -4,8 +4,14 @@
4extern int iscsi_login_setup_crypto(struct iscsi_conn *); 4extern int iscsi_login_setup_crypto(struct iscsi_conn *);
5extern int iscsi_check_for_session_reinstatement(struct iscsi_conn *); 5extern int iscsi_check_for_session_reinstatement(struct iscsi_conn *);
6extern int iscsi_login_post_auth_non_zero_tsih(struct iscsi_conn *, u16, u32); 6extern int iscsi_login_post_auth_non_zero_tsih(struct iscsi_conn *, u16, u32);
7extern int iscsit_setup_np(struct iscsi_np *,
8 struct __kernel_sockaddr_storage *);
7extern int iscsi_target_setup_login_socket(struct iscsi_np *, 9extern int iscsi_target_setup_login_socket(struct iscsi_np *,
8 struct __kernel_sockaddr_storage *); 10 struct __kernel_sockaddr_storage *);
11extern int iscsit_accept_np(struct iscsi_np *, struct iscsi_conn *);
12extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *);
13extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32);
14extern void iscsit_free_conn(struct iscsi_np *, struct iscsi_conn *);
9extern int iscsi_target_login_thread(void *); 15extern int iscsi_target_login_thread(void *);
10extern int iscsi_login_disable_FIM_keys(struct iscsi_param_list *, struct iscsi_conn *); 16extern int iscsi_login_disable_FIM_keys(struct iscsi_param_list *, struct iscsi_conn *);
11 17
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 9d902aefe01a..7ad912060e21 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -22,6 +22,7 @@
22#include <scsi/iscsi_proto.h> 22#include <scsi/iscsi_proto.h>
23#include <target/target_core_base.h> 23#include <target/target_core_base.h>
24#include <target/target_core_fabric.h> 24#include <target/target_core_fabric.h>
25#include <target/iscsi/iscsi_transport.h>
25 26
26#include "iscsi_target_core.h" 27#include "iscsi_target_core.h"
27#include "iscsi_target_parameters.h" 28#include "iscsi_target_parameters.h"
@@ -169,7 +170,7 @@ static void iscsi_remove_failed_auth_entry(struct iscsi_conn *conn)
169 kfree(conn->auth_protocol); 170 kfree(conn->auth_protocol);
170} 171}
171 172
172static int iscsi_target_check_login_request( 173int iscsi_target_check_login_request(
173 struct iscsi_conn *conn, 174 struct iscsi_conn *conn,
174 struct iscsi_login *login) 175 struct iscsi_login *login)
175{ 176{
@@ -200,8 +201,8 @@ static int iscsi_target_check_login_request(
200 return -1; 201 return -1;
201 } 202 }
202 203
203 req_csg = (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) >> 2; 204 req_csg = ISCSI_LOGIN_CURRENT_STAGE(login_req->flags);
204 req_nsg = (login_req->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE_MASK); 205 req_nsg = ISCSI_LOGIN_NEXT_STAGE(login_req->flags);
205 206
206 if (req_csg != login->current_stage) { 207 if (req_csg != login->current_stage) {
207 pr_err("Initiator unexpectedly changed login stage" 208 pr_err("Initiator unexpectedly changed login stage"
@@ -352,11 +353,8 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log
352 353
353 padding = ((-login->rsp_length) & 3); 354 padding = ((-login->rsp_length) & 3);
354 355
355 if (iscsi_login_tx_data( 356 if (conn->conn_transport->iscsit_put_login_tx(conn, login,
356 conn, 357 login->rsp_length + padding) < 0)
357 login->rsp,
358 login->rsp_buf,
359 login->rsp_length + padding) < 0)
360 return -1; 358 return -1;
361 359
362 login->rsp_length = 0; 360 login->rsp_length = 0;
@@ -368,72 +366,12 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log
368 return 0; 366 return 0;
369} 367}
370 368
371static int iscsi_target_do_rx_login_io(struct iscsi_conn *conn, struct iscsi_login *login)
372{
373 u32 padding = 0, payload_length;
374 struct iscsi_login_req *login_req;
375
376 if (iscsi_login_rx_data(conn, login->req, ISCSI_HDR_LEN) < 0)
377 return -1;
378
379 login_req = (struct iscsi_login_req *) login->req;
380 payload_length = ntoh24(login_req->dlength);
381
382 pr_debug("Got Login Command, Flags 0x%02x, ITT: 0x%08x,"
383 " CmdSN: 0x%08x, ExpStatSN: 0x%08x, CID: %hu, Length: %u\n",
384 login_req->flags, login_req->itt, login_req->cmdsn,
385 login_req->exp_statsn, login_req->cid, payload_length);
386
387 if (iscsi_target_check_login_request(conn, login) < 0)
388 return -1;
389
390 padding = ((-payload_length) & 3);
391 memset(login->req_buf, 0, MAX_KEY_VALUE_PAIRS);
392
393 if (iscsi_login_rx_data(
394 conn,
395 login->req_buf,
396 payload_length + padding) < 0)
397 return -1;
398
399 return 0;
400}
401
402static int iscsi_target_do_login_io(struct iscsi_conn *conn, struct iscsi_login *login) 369static int iscsi_target_do_login_io(struct iscsi_conn *conn, struct iscsi_login *login)
403{ 370{
404 if (iscsi_target_do_tx_login_io(conn, login) < 0) 371 if (iscsi_target_do_tx_login_io(conn, login) < 0)
405 return -1; 372 return -1;
406 373
407 if (iscsi_target_do_rx_login_io(conn, login) < 0) 374 if (conn->conn_transport->iscsit_get_login_rx(conn, login) < 0)
408 return -1;
409
410 return 0;
411}
412
413static int iscsi_target_get_initial_payload(
414 struct iscsi_conn *conn,
415 struct iscsi_login *login)
416{
417 u32 padding = 0, payload_length;
418 struct iscsi_login_req *login_req;
419
420 login_req = (struct iscsi_login_req *) login->req;
421 payload_length = ntoh24(login_req->dlength);
422
423 pr_debug("Got Login Command, Flags 0x%02x, ITT: 0x%08x,"
424 " CmdSN: 0x%08x, ExpStatSN: 0x%08x, Length: %u\n",
425 login_req->flags, login_req->itt, login_req->cmdsn,
426 login_req->exp_statsn, payload_length);
427
428 if (iscsi_target_check_login_request(conn, login) < 0)
429 return -1;
430
431 padding = ((-payload_length) & 3);
432
433 if (iscsi_login_rx_data(
434 conn,
435 login->req_buf,
436 payload_length + padding) < 0)
437 return -1; 375 return -1;
438 376
439 return 0; 377 return 0;
@@ -681,9 +619,9 @@ static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *lo
681 return -1; 619 return -1;
682 } 620 }
683 621
684 switch ((login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) >> 2) { 622 switch (ISCSI_LOGIN_CURRENT_STAGE(login_req->flags)) {
685 case 0: 623 case 0:
686 login_rsp->flags |= (0 & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK); 624 login_rsp->flags &= ~ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK;
687 if (iscsi_target_handle_csg_zero(conn, login) < 0) 625 if (iscsi_target_handle_csg_zero(conn, login) < 0)
688 return -1; 626 return -1;
689 break; 627 break;
@@ -693,6 +631,7 @@ static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *lo
693 return -1; 631 return -1;
694 if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) { 632 if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) {
695 login->tsih = conn->sess->tsih; 633 login->tsih = conn->sess->tsih;
634 login->login_complete = 1;
696 if (iscsi_target_do_tx_login_io(conn, 635 if (iscsi_target_do_tx_login_io(conn,
697 login) < 0) 636 login) < 0)
698 return -1; 637 return -1;
@@ -702,8 +641,7 @@ static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *lo
702 default: 641 default:
703 pr_err("Illegal CSG: %d received from" 642 pr_err("Illegal CSG: %d received from"
704 " Initiator, protocol error.\n", 643 " Initiator, protocol error.\n",
705 (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) 644 ISCSI_LOGIN_CURRENT_STAGE(login_req->flags));
706 >> 2);
707 break; 645 break;
708 } 646 }
709 647
@@ -737,7 +675,7 @@ static void iscsi_initiatorname_tolower(
737/* 675/*
738 * Processes the first Login Request.. 676 * Processes the first Login Request..
739 */ 677 */
740static int iscsi_target_locate_portal( 678int iscsi_target_locate_portal(
741 struct iscsi_np *np, 679 struct iscsi_np *np,
742 struct iscsi_conn *conn, 680 struct iscsi_conn *conn,
743 struct iscsi_login *login) 681 struct iscsi_login *login)
@@ -753,22 +691,6 @@ static int iscsi_target_locate_portal(
753 login_req = (struct iscsi_login_req *) login->req; 691 login_req = (struct iscsi_login_req *) login->req;
754 payload_length = ntoh24(login_req->dlength); 692 payload_length = ntoh24(login_req->dlength);
755 693
756 login->first_request = 1;
757 login->leading_connection = (!login_req->tsih) ? 1 : 0;
758 login->current_stage =
759 (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) >> 2;
760 login->version_min = login_req->min_version;
761 login->version_max = login_req->max_version;
762 memcpy(login->isid, login_req->isid, 6);
763 login->cmd_sn = be32_to_cpu(login_req->cmdsn);
764 login->init_task_tag = login_req->itt;
765 login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
766 login->cid = be16_to_cpu(login_req->cid);
767 login->tsih = be16_to_cpu(login_req->tsih);
768
769 if (iscsi_target_get_initial_payload(conn, login) < 0)
770 return -1;
771
772 tmpbuf = kzalloc(payload_length + 1, GFP_KERNEL); 694 tmpbuf = kzalloc(payload_length + 1, GFP_KERNEL);
773 if (!tmpbuf) { 695 if (!tmpbuf) {
774 pr_err("Unable to allocate memory for tmpbuf.\n"); 696 pr_err("Unable to allocate memory for tmpbuf.\n");
@@ -800,6 +722,8 @@ static int iscsi_target_locate_portal(
800 start += strlen(key) + strlen(value) + 2; 722 start += strlen(key) + strlen(value) + 2;
801 } 723 }
802 724
725 printk("i_buf: %s, s_buf: %s, t_buf: %s\n", i_buf, s_buf, t_buf);
726
803 /* 727 /*
804 * See 5.3. Login Phase. 728 * See 5.3. Login Phase.
805 */ 729 */
@@ -958,100 +882,30 @@ out:
958 return ret; 882 return ret;
959} 883}
960 884
961struct iscsi_login *iscsi_target_init_negotiation(
962 struct iscsi_np *np,
963 struct iscsi_conn *conn,
964 char *login_pdu)
965{
966 struct iscsi_login *login;
967
968 login = kzalloc(sizeof(struct iscsi_login), GFP_KERNEL);
969 if (!login) {
970 pr_err("Unable to allocate memory for struct iscsi_login.\n");
971 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
972 ISCSI_LOGIN_STATUS_NO_RESOURCES);
973 return NULL;
974 }
975
976 login->req = kmemdup(login_pdu, ISCSI_HDR_LEN, GFP_KERNEL);
977 if (!login->req) {
978 pr_err("Unable to allocate memory for Login Request.\n");
979 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
980 ISCSI_LOGIN_STATUS_NO_RESOURCES);
981 goto out;
982 }
983
984 login->req_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL);
985 if (!login->req_buf) {
986 pr_err("Unable to allocate memory for response buffer.\n");
987 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
988 ISCSI_LOGIN_STATUS_NO_RESOURCES);
989 goto out;
990 }
991 /*
992 * SessionType: Discovery
993 *
994 * Locates Default Portal
995 *
996 * SessionType: Normal
997 *
998 * Locates Target Portal from NP -> Target IQN
999 */
1000 if (iscsi_target_locate_portal(np, conn, login) < 0) {
1001 goto out;
1002 }
1003
1004 return login;
1005out:
1006 kfree(login->req);
1007 kfree(login->req_buf);
1008 kfree(login);
1009
1010 return NULL;
1011}
1012
1013int iscsi_target_start_negotiation( 885int iscsi_target_start_negotiation(
1014 struct iscsi_login *login, 886 struct iscsi_login *login,
1015 struct iscsi_conn *conn) 887 struct iscsi_conn *conn)
1016{ 888{
1017 int ret = -1; 889 int ret;
1018
1019 login->rsp = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
1020 if (!login->rsp) {
1021 pr_err("Unable to allocate memory for"
1022 " Login Response.\n");
1023 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
1024 ISCSI_LOGIN_STATUS_NO_RESOURCES);
1025 ret = -1;
1026 goto out;
1027 }
1028
1029 login->rsp_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL);
1030 if (!login->rsp_buf) {
1031 pr_err("Unable to allocate memory for"
1032 " request buffer.\n");
1033 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
1034 ISCSI_LOGIN_STATUS_NO_RESOURCES);
1035 ret = -1;
1036 goto out;
1037 }
1038 890
1039 ret = iscsi_target_do_login(conn, login); 891 ret = iscsi_target_do_login(conn, login);
1040out:
1041 if (ret != 0) 892 if (ret != 0)
1042 iscsi_remove_failed_auth_entry(conn); 893 iscsi_remove_failed_auth_entry(conn);
1043 894
1044 iscsi_target_nego_release(login, conn); 895 iscsi_target_nego_release(conn);
1045 return ret; 896 return ret;
1046} 897}
1047 898
1048void iscsi_target_nego_release( 899void iscsi_target_nego_release(struct iscsi_conn *conn)
1049 struct iscsi_login *login,
1050 struct iscsi_conn *conn)
1051{ 900{
1052 kfree(login->req); 901 struct iscsi_login *login = conn->conn_login;
1053 kfree(login->rsp); 902
903 if (!login)
904 return;
905
1054 kfree(login->req_buf); 906 kfree(login->req_buf);
1055 kfree(login->rsp_buf); 907 kfree(login->rsp_buf);
1056 kfree(login); 908 kfree(login);
909
910 conn->conn_login = NULL;
1057} 911}
diff --git a/drivers/target/iscsi/iscsi_target_nego.h b/drivers/target/iscsi/iscsi_target_nego.h
index 92e133a5158f..f021cbd330e5 100644
--- a/drivers/target/iscsi/iscsi_target_nego.h
+++ b/drivers/target/iscsi/iscsi_target_nego.h
@@ -7,11 +7,14 @@
7extern void convert_null_to_semi(char *, int); 7extern void convert_null_to_semi(char *, int);
8extern int extract_param(const char *, const char *, unsigned int, char *, 8extern int extract_param(const char *, const char *, unsigned int, char *,
9 unsigned char *); 9 unsigned char *);
10extern struct iscsi_login *iscsi_target_init_negotiation( 10extern int iscsi_target_check_login_request(struct iscsi_conn *,
11 struct iscsi_np *, struct iscsi_conn *, char *); 11 struct iscsi_login *);
12extern int iscsi_target_get_initial_payload(struct iscsi_conn *,
13 struct iscsi_login *);
14extern int iscsi_target_locate_portal(struct iscsi_np *, struct iscsi_conn *,
15 struct iscsi_login *);
12extern int iscsi_target_start_negotiation( 16extern int iscsi_target_start_negotiation(
13 struct iscsi_login *, struct iscsi_conn *); 17 struct iscsi_login *, struct iscsi_conn *);
14extern void iscsi_target_nego_release( 18extern void iscsi_target_nego_release(struct iscsi_conn *);
15 struct iscsi_login *, struct iscsi_conn *);
16 19
17#endif /* ISCSI_TARGET_NEGO_H */ 20#endif /* ISCSI_TARGET_NEGO_H */
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index ca2be406f141..f690be9e5293 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -59,7 +59,7 @@ int iscsi_login_tx_data(
59 char *text_buf, 59 char *text_buf,
60 int text_length) 60 int text_length)
61{ 61{
62 int length, tx_sent; 62 int length, tx_sent, iov_cnt = 1;
63 struct kvec iov[2]; 63 struct kvec iov[2];
64 64
65 length = (ISCSI_HDR_LEN + text_length); 65 length = (ISCSI_HDR_LEN + text_length);
@@ -67,8 +67,12 @@ int iscsi_login_tx_data(
67 memset(&iov[0], 0, 2 * sizeof(struct kvec)); 67 memset(&iov[0], 0, 2 * sizeof(struct kvec));
68 iov[0].iov_len = ISCSI_HDR_LEN; 68 iov[0].iov_len = ISCSI_HDR_LEN;
69 iov[0].iov_base = pdu_buf; 69 iov[0].iov_base = pdu_buf;
70 iov[1].iov_len = text_length; 70
71 iov[1].iov_base = text_buf; 71 if (text_buf && text_length) {
72 iov[1].iov_len = text_length;
73 iov[1].iov_base = text_buf;
74 iov_cnt++;
75 }
72 76
73 /* 77 /*
74 * Initial Marker-less Interval. 78 * Initial Marker-less Interval.
@@ -77,7 +81,7 @@ int iscsi_login_tx_data(
77 */ 81 */
78 conn->if_marker += length; 82 conn->if_marker += length;
79 83
80 tx_sent = tx_data(conn, &iov[0], 2, length); 84 tx_sent = tx_data(conn, &iov[0], iov_cnt, length);
81 if (tx_sent != length) { 85 if (tx_sent != length) {
82 pr_err("tx_data returned %d, expecting %d.\n", 86 pr_err("tx_data returned %d, expecting %d.\n",
83 tx_sent, length); 87 tx_sent, length);
@@ -429,6 +433,28 @@ int iscsi_create_default_params(struct iscsi_param_list **param_list_ptr)
429 TYPERANGE_MARKINT, USE_INITIAL_ONLY); 433 TYPERANGE_MARKINT, USE_INITIAL_ONLY);
430 if (!param) 434 if (!param)
431 goto out; 435 goto out;
436 /*
437 * Extra parameters for ISER from RFC-5046
438 */
439 param = iscsi_set_default_param(pl, RDMAEXTENTIONS, INITIAL_RDMAEXTENTIONS,
440 PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
441 TYPERANGE_BOOL_AND, USE_LEADING_ONLY);
442 if (!param)
443 goto out;
444
445 param = iscsi_set_default_param(pl, INITIATORRECVDATASEGMENTLENGTH,
446 INITIAL_INITIATORRECVDATASEGMENTLENGTH,
447 PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
448 TYPERANGE_512_TO_16777215, USE_ALL);
449 if (!param)
450 goto out;
451
452 param = iscsi_set_default_param(pl, TARGETRECVDATASEGMENTLENGTH,
453 INITIAL_TARGETRECVDATASEGMENTLENGTH,
454 PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
455 TYPERANGE_512_TO_16777215, USE_ALL);
456 if (!param)
457 goto out;
432 458
433 *param_list_ptr = pl; 459 *param_list_ptr = pl;
434 return 0; 460 return 0;
@@ -438,19 +464,23 @@ out:
438} 464}
439 465
440int iscsi_set_keys_to_negotiate( 466int iscsi_set_keys_to_negotiate(
441 int sessiontype, 467 struct iscsi_param_list *param_list,
442 struct iscsi_param_list *param_list) 468 bool iser)
443{ 469{
444 struct iscsi_param *param; 470 struct iscsi_param *param;
445 471
472 param_list->iser = iser;
473
446 list_for_each_entry(param, &param_list->param_list, p_list) { 474 list_for_each_entry(param, &param_list->param_list, p_list) {
447 param->state = 0; 475 param->state = 0;
448 if (!strcmp(param->name, AUTHMETHOD)) { 476 if (!strcmp(param->name, AUTHMETHOD)) {
449 SET_PSTATE_NEGOTIATE(param); 477 SET_PSTATE_NEGOTIATE(param);
450 } else if (!strcmp(param->name, HEADERDIGEST)) { 478 } else if (!strcmp(param->name, HEADERDIGEST)) {
451 SET_PSTATE_NEGOTIATE(param); 479 if (iser == false)
480 SET_PSTATE_NEGOTIATE(param);
452 } else if (!strcmp(param->name, DATADIGEST)) { 481 } else if (!strcmp(param->name, DATADIGEST)) {
453 SET_PSTATE_NEGOTIATE(param); 482 if (iser == false)
483 SET_PSTATE_NEGOTIATE(param);
454 } else if (!strcmp(param->name, MAXCONNECTIONS)) { 484 } else if (!strcmp(param->name, MAXCONNECTIONS)) {
455 SET_PSTATE_NEGOTIATE(param); 485 SET_PSTATE_NEGOTIATE(param);
456 } else if (!strcmp(param->name, TARGETNAME)) { 486 } else if (!strcmp(param->name, TARGETNAME)) {
@@ -469,7 +499,8 @@ int iscsi_set_keys_to_negotiate(
469 } else if (!strcmp(param->name, IMMEDIATEDATA)) { 499 } else if (!strcmp(param->name, IMMEDIATEDATA)) {
470 SET_PSTATE_NEGOTIATE(param); 500 SET_PSTATE_NEGOTIATE(param);
471 } else if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) { 501 } else if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) {
472 SET_PSTATE_NEGOTIATE(param); 502 if (iser == false)
503 SET_PSTATE_NEGOTIATE(param);
473 } else if (!strcmp(param->name, MAXXMITDATASEGMENTLENGTH)) { 504 } else if (!strcmp(param->name, MAXXMITDATASEGMENTLENGTH)) {
474 continue; 505 continue;
475 } else if (!strcmp(param->name, MAXBURSTLENGTH)) { 506 } else if (!strcmp(param->name, MAXBURSTLENGTH)) {
@@ -498,6 +529,15 @@ int iscsi_set_keys_to_negotiate(
498 SET_PSTATE_NEGOTIATE(param); 529 SET_PSTATE_NEGOTIATE(param);
499 } else if (!strcmp(param->name, OFMARKINT)) { 530 } else if (!strcmp(param->name, OFMARKINT)) {
500 SET_PSTATE_NEGOTIATE(param); 531 SET_PSTATE_NEGOTIATE(param);
532 } else if (!strcmp(param->name, RDMAEXTENTIONS)) {
533 if (iser == true)
534 SET_PSTATE_NEGOTIATE(param);
535 } else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) {
536 if (iser == true)
537 SET_PSTATE_NEGOTIATE(param);
538 } else if (!strcmp(param->name, TARGETRECVDATASEGMENTLENGTH)) {
539 if (iser == true)
540 SET_PSTATE_NEGOTIATE(param);
501 } 541 }
502 } 542 }
503 543
@@ -540,6 +580,12 @@ int iscsi_set_keys_irrelevant_for_discovery(
540 param->state &= ~PSTATE_NEGOTIATE; 580 param->state &= ~PSTATE_NEGOTIATE;
541 else if (!strcmp(param->name, OFMARKINT)) 581 else if (!strcmp(param->name, OFMARKINT))
542 param->state &= ~PSTATE_NEGOTIATE; 582 param->state &= ~PSTATE_NEGOTIATE;
583 else if (!strcmp(param->name, RDMAEXTENTIONS))
584 param->state &= ~PSTATE_NEGOTIATE;
585 else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH))
586 param->state &= ~PSTATE_NEGOTIATE;
587 else if (!strcmp(param->name, TARGETRECVDATASEGMENTLENGTH))
588 param->state &= ~PSTATE_NEGOTIATE;
543 } 589 }
544 590
545 return 0; 591 return 0;
@@ -1755,6 +1801,9 @@ void iscsi_set_connection_parameters(
1755 * this key is not sent over the wire. 1801 * this key is not sent over the wire.
1756 */ 1802 */
1757 if (!strcmp(param->name, MAXXMITDATASEGMENTLENGTH)) { 1803 if (!strcmp(param->name, MAXXMITDATASEGMENTLENGTH)) {
1804 if (param_list->iser == true)
1805 continue;
1806
1758 ops->MaxXmitDataSegmentLength = 1807 ops->MaxXmitDataSegmentLength =
1759 simple_strtoul(param->value, &tmpptr, 0); 1808 simple_strtoul(param->value, &tmpptr, 0);
1760 pr_debug("MaxXmitDataSegmentLength: %s\n", 1809 pr_debug("MaxXmitDataSegmentLength: %s\n",
@@ -1800,6 +1849,22 @@ void iscsi_set_connection_parameters(
1800 simple_strtoul(param->value, &tmpptr, 0); 1849 simple_strtoul(param->value, &tmpptr, 0);
1801 pr_debug("IFMarkInt: %s\n", 1850 pr_debug("IFMarkInt: %s\n",
1802 param->value); 1851 param->value);
1852 } else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) {
1853 ops->InitiatorRecvDataSegmentLength =
1854 simple_strtoul(param->value, &tmpptr, 0);
1855 pr_debug("InitiatorRecvDataSegmentLength: %s\n",
1856 param->value);
1857 ops->MaxRecvDataSegmentLength =
1858 ops->InitiatorRecvDataSegmentLength;
1859 pr_debug("Set MRDSL from InitiatorRecvDataSegmentLength\n");
1860 } else if (!strcmp(param->name, TARGETRECVDATASEGMENTLENGTH)) {
1861 ops->TargetRecvDataSegmentLength =
1862 simple_strtoul(param->value, &tmpptr, 0);
1863 pr_debug("TargetRecvDataSegmentLength: %s\n",
1864 param->value);
1865 ops->MaxXmitDataSegmentLength =
1866 ops->TargetRecvDataSegmentLength;
1867 pr_debug("Set MXDSL from TargetRecvDataSegmentLength\n");
1803 } 1868 }
1804 } 1869 }
1805 pr_debug("----------------------------------------------------" 1870 pr_debug("----------------------------------------------------"
@@ -1912,6 +1977,10 @@ void iscsi_set_session_parameters(
1912 ops->SessionType = !strcmp(param->value, DISCOVERY); 1977 ops->SessionType = !strcmp(param->value, DISCOVERY);
1913 pr_debug("SessionType: %s\n", 1978 pr_debug("SessionType: %s\n",
1914 param->value); 1979 param->value);
1980 } else if (!strcmp(param->name, RDMAEXTENTIONS)) {
1981 ops->RDMAExtensions = !strcmp(param->value, YES);
1982 pr_debug("RDMAExtensions: %s\n",
1983 param->value);
1915 } 1984 }
1916 } 1985 }
1917 pr_debug("----------------------------------------------------" 1986 pr_debug("----------------------------------------------------"
diff --git a/drivers/target/iscsi/iscsi_target_parameters.h b/drivers/target/iscsi/iscsi_target_parameters.h
index 1e1b7504a76b..f31b9c4b83f2 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.h
+++ b/drivers/target/iscsi/iscsi_target_parameters.h
@@ -27,7 +27,7 @@ extern void iscsi_dump_conn_ops(struct iscsi_conn_ops *);
27extern void iscsi_dump_sess_ops(struct iscsi_sess_ops *); 27extern void iscsi_dump_sess_ops(struct iscsi_sess_ops *);
28extern void iscsi_print_params(struct iscsi_param_list *); 28extern void iscsi_print_params(struct iscsi_param_list *);
29extern int iscsi_create_default_params(struct iscsi_param_list **); 29extern int iscsi_create_default_params(struct iscsi_param_list **);
30extern int iscsi_set_keys_to_negotiate(int, struct iscsi_param_list *); 30extern int iscsi_set_keys_to_negotiate(struct iscsi_param_list *, bool);
31extern int iscsi_set_keys_irrelevant_for_discovery(struct iscsi_param_list *); 31extern int iscsi_set_keys_irrelevant_for_discovery(struct iscsi_param_list *);
32extern int iscsi_copy_param_list(struct iscsi_param_list **, 32extern int iscsi_copy_param_list(struct iscsi_param_list **,
33 struct iscsi_param_list *, int); 33 struct iscsi_param_list *, int);
@@ -89,6 +89,13 @@ extern void iscsi_set_session_parameters(struct iscsi_sess_ops *,
89#define X_EXTENSIONKEY_CISCO_OLD "X-com.cisco.iscsi.draft" 89#define X_EXTENSIONKEY_CISCO_OLD "X-com.cisco.iscsi.draft"
90 90
91/* 91/*
92 * Parameter names of iSCSI Extentions for RDMA (iSER). See RFC-5046
93 */
94#define RDMAEXTENTIONS "RDMAExtensions"
95#define INITIATORRECVDATASEGMENTLENGTH "InitiatorRecvDataSegmentLength"
96#define TARGETRECVDATASEGMENTLENGTH "TargetRecvDataSegmentLength"
97
98/*
92 * For AuthMethod. 99 * For AuthMethod.
93 */ 100 */
94#define KRB5 "KRB5" 101#define KRB5 "KRB5"
@@ -133,6 +140,13 @@ extern void iscsi_set_session_parameters(struct iscsi_sess_ops *,
133#define INITIAL_OFMARKINT "2048~65535" 140#define INITIAL_OFMARKINT "2048~65535"
134 141
135/* 142/*
143 * Initial values for iSER parameters following RFC-5046 Section 6
144 */
145#define INITIAL_RDMAEXTENTIONS NO
146#define INITIAL_INITIATORRECVDATASEGMENTLENGTH "262144"
147#define INITIAL_TARGETRECVDATASEGMENTLENGTH "8192"
148
149/*
136 * For [Header,Data]Digests. 150 * For [Header,Data]Digests.
137 */ 151 */
138#define CRC32C "CRC32C" 152#define CRC32C "CRC32C"
diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c
index 9d4417aae921..b997e5da47d3 100644
--- a/drivers/target/iscsi/iscsi_target_tmr.c
+++ b/drivers/target/iscsi/iscsi_target_tmr.c
@@ -23,6 +23,7 @@
23#include <scsi/iscsi_proto.h> 23#include <scsi/iscsi_proto.h>
24#include <target/target_core_base.h> 24#include <target/target_core_base.h>
25#include <target/target_core_fabric.h> 25#include <target/target_core_fabric.h>
26#include <target/iscsi/iscsi_transport.h>
26 27
27#include "iscsi_target_core.h" 28#include "iscsi_target_core.h"
28#include "iscsi_target_seq_pdu_list.h" 29#include "iscsi_target_seq_pdu_list.h"
@@ -301,7 +302,7 @@ static int iscsit_task_reassign_complete_write(
301 /* 302 /*
302 * iscsit_build_r2ts_for_cmd() can handle the rest from here. 303 * iscsit_build_r2ts_for_cmd() can handle the rest from here.
303 */ 304 */
304 return iscsit_build_r2ts_for_cmd(cmd, conn, true); 305 return conn->conn_transport->iscsit_get_dataout(conn, cmd, true);
305} 306}
306 307
307static int iscsit_task_reassign_complete_read( 308static int iscsit_task_reassign_complete_read(
@@ -471,6 +472,7 @@ int iscsit_tmr_post_handler(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
471 472
472 return 0; 473 return 0;
473} 474}
475EXPORT_SYMBOL(iscsit_tmr_post_handler);
474 476
475/* 477/*
476 * Nothing to do here, but leave it for good measure. :-) 478 * Nothing to do here, but leave it for good measure. :-)
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index ee8f8c66248d..439260b7d87f 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -31,6 +31,8 @@
31#include "iscsi_target.h" 31#include "iscsi_target.h"
32#include "iscsi_target_parameters.h" 32#include "iscsi_target_parameters.h"
33 33
34#include <target/iscsi/iscsi_transport.h>
35
34struct iscsi_portal_group *iscsit_alloc_portal_group(struct iscsi_tiqn *tiqn, u16 tpgt) 36struct iscsi_portal_group *iscsit_alloc_portal_group(struct iscsi_tiqn *tiqn, u16 tpgt)
35{ 37{
36 struct iscsi_portal_group *tpg; 38 struct iscsi_portal_group *tpg;
@@ -508,7 +510,7 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
508 510
509 pr_debug("CORE[%s] - Added Network Portal: %s:%hu,%hu on %s\n", 511 pr_debug("CORE[%s] - Added Network Portal: %s:%hu,%hu on %s\n",
510 tpg->tpg_tiqn->tiqn, np->np_ip, np->np_port, tpg->tpgt, 512 tpg->tpg_tiqn->tiqn, np->np_ip, np->np_port, tpg->tpgt,
511 (np->np_network_transport == ISCSI_TCP) ? "TCP" : "SCTP"); 513 np->np_transport->name);
512 514
513 return tpg_np; 515 return tpg_np;
514} 516}
@@ -522,7 +524,7 @@ static int iscsit_tpg_release_np(
522 524
523 pr_debug("CORE[%s] - Removed Network Portal: %s:%hu,%hu on %s\n", 525 pr_debug("CORE[%s] - Removed Network Portal: %s:%hu,%hu on %s\n",
524 tpg->tpg_tiqn->tiqn, np->np_ip, np->np_port, tpg->tpgt, 526 tpg->tpg_tiqn->tiqn, np->np_ip, np->np_port, tpg->tpgt,
525 (np->np_network_transport == ISCSI_TCP) ? "TCP" : "SCTP"); 527 np->np_transport->name);
526 528
527 tpg_np->tpg_np = NULL; 529 tpg_np->tpg_np = NULL;
528 tpg_np->tpg = NULL; 530 tpg_np->tpg = NULL;
diff --git a/drivers/target/iscsi/iscsi_target_transport.c b/drivers/target/iscsi/iscsi_target_transport.c
new file mode 100644
index 000000000000..882728fac30c
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_transport.c
@@ -0,0 +1,55 @@
1#include <linux/spinlock.h>
2#include <linux/list.h>
3#include <target/iscsi/iscsi_transport.h>
4
5static LIST_HEAD(g_transport_list);
6static DEFINE_MUTEX(transport_mutex);
7
8struct iscsit_transport *iscsit_get_transport(int type)
9{
10 struct iscsit_transport *t;
11
12 mutex_lock(&transport_mutex);
13 list_for_each_entry(t, &g_transport_list, t_node) {
14 if (t->transport_type == type) {
15 if (t->owner && !try_module_get(t->owner)) {
16 t = NULL;
17 }
18 mutex_unlock(&transport_mutex);
19 return t;
20 }
21 }
22 mutex_unlock(&transport_mutex);
23
24 return NULL;
25}
26
27void iscsit_put_transport(struct iscsit_transport *t)
28{
29 if (t->owner)
30 module_put(t->owner);
31}
32
33int iscsit_register_transport(struct iscsit_transport *t)
34{
35 INIT_LIST_HEAD(&t->t_node);
36
37 mutex_lock(&transport_mutex);
38 list_add_tail(&t->t_node, &g_transport_list);
39 mutex_unlock(&transport_mutex);
40
41 pr_debug("Registered iSCSI transport: %s\n", t->name);
42
43 return 0;
44}
45EXPORT_SYMBOL(iscsit_register_transport);
46
47void iscsit_unregister_transport(struct iscsit_transport *t)
48{
49 mutex_lock(&transport_mutex);
50 list_del(&t->t_node);
51 mutex_unlock(&transport_mutex);
52
53 pr_debug("Unregistered iSCSI transport: %s\n", t->name);
54}
55EXPORT_SYMBOL(iscsit_unregister_transport);
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 7ce350578c82..2cc6c9a3ffb8 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -24,6 +24,7 @@
24#include <target/target_core_base.h> 24#include <target/target_core_base.h>
25#include <target/target_core_fabric.h> 25#include <target/target_core_fabric.h>
26#include <target/target_core_configfs.h> 26#include <target/target_core_configfs.h>
27#include <target/iscsi/iscsi_transport.h>
27 28
28#include "iscsi_target_core.h" 29#include "iscsi_target_core.h"
29#include "iscsi_target_parameters.h" 30#include "iscsi_target_parameters.h"
@@ -148,6 +149,18 @@ void iscsit_free_r2ts_from_list(struct iscsi_cmd *cmd)
148 spin_unlock_bh(&cmd->r2t_lock); 149 spin_unlock_bh(&cmd->r2t_lock);
149} 150}
150 151
152struct iscsi_cmd *iscsit_alloc_cmd(struct iscsi_conn *conn, gfp_t gfp_mask)
153{
154 struct iscsi_cmd *cmd;
155
156 cmd = kmem_cache_zalloc(lio_cmd_cache, gfp_mask);
157 if (!cmd)
158 return NULL;
159
160 cmd->release_cmd = &iscsit_release_cmd;
161 return cmd;
162}
163
151/* 164/*
152 * May be called from software interrupt (timer) context for allocating 165 * May be called from software interrupt (timer) context for allocating
153 * iSCSI NopINs. 166 * iSCSI NopINs.
@@ -156,13 +169,12 @@ struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp_mask)
156{ 169{
157 struct iscsi_cmd *cmd; 170 struct iscsi_cmd *cmd;
158 171
159 cmd = kmem_cache_zalloc(lio_cmd_cache, gfp_mask); 172 cmd = conn->conn_transport->iscsit_alloc_cmd(conn, gfp_mask);
160 if (!cmd) { 173 if (!cmd) {
161 pr_err("Unable to allocate memory for struct iscsi_cmd.\n"); 174 pr_err("Unable to allocate memory for struct iscsi_cmd.\n");
162 return NULL; 175 return NULL;
163 } 176 }
164 177 cmd->conn = conn;
165 cmd->conn = conn;
166 INIT_LIST_HEAD(&cmd->i_conn_node); 178 INIT_LIST_HEAD(&cmd->i_conn_node);
167 INIT_LIST_HEAD(&cmd->datain_list); 179 INIT_LIST_HEAD(&cmd->datain_list);
168 INIT_LIST_HEAD(&cmd->cmd_r2t_list); 180 INIT_LIST_HEAD(&cmd->cmd_r2t_list);
@@ -175,6 +187,7 @@ struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp_mask)
175 187
176 return cmd; 188 return cmd;
177} 189}
190EXPORT_SYMBOL(iscsit_allocate_cmd);
178 191
179struct iscsi_seq *iscsit_get_seq_holder_for_datain( 192struct iscsi_seq *iscsit_get_seq_holder_for_datain(
180 struct iscsi_cmd *cmd, 193 struct iscsi_cmd *cmd,
@@ -304,6 +317,7 @@ int iscsit_sequence_cmd(
304 317
305 return ret; 318 return ret;
306} 319}
320EXPORT_SYMBOL(iscsit_sequence_cmd);
307 321
308int iscsit_check_unsolicited_dataout(struct iscsi_cmd *cmd, unsigned char *buf) 322int iscsit_check_unsolicited_dataout(struct iscsi_cmd *cmd, unsigned char *buf)
309{ 323{
@@ -689,6 +703,11 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd)
689 */ 703 */
690 switch (cmd->iscsi_opcode) { 704 switch (cmd->iscsi_opcode) {
691 case ISCSI_OP_SCSI_CMD: 705 case ISCSI_OP_SCSI_CMD:
706 if (cmd->data_direction == DMA_TO_DEVICE)
707 iscsit_stop_dataout_timer(cmd);
708 /*
709 * Fallthrough
710 */
692 case ISCSI_OP_SCSI_TMFUNC: 711 case ISCSI_OP_SCSI_TMFUNC:
693 transport_generic_free_cmd(&cmd->se_cmd, 1); 712 transport_generic_free_cmd(&cmd->se_cmd, 1);
694 break; 713 break;
@@ -704,7 +723,7 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd)
704 } 723 }
705 /* Fall-through */ 724 /* Fall-through */
706 default: 725 default:
707 iscsit_release_cmd(cmd); 726 cmd->release_cmd(cmd);
708 break; 727 break;
709 } 728 }
710} 729}
@@ -1226,34 +1245,19 @@ send_datacrc:
1226 */ 1245 */
1227int iscsit_tx_login_rsp(struct iscsi_conn *conn, u8 status_class, u8 status_detail) 1246int iscsit_tx_login_rsp(struct iscsi_conn *conn, u8 status_class, u8 status_detail)
1228{ 1247{
1229 u8 iscsi_hdr[ISCSI_HDR_LEN];
1230 int err;
1231 struct kvec iov;
1232 struct iscsi_login_rsp *hdr; 1248 struct iscsi_login_rsp *hdr;
1249 struct iscsi_login *login = conn->conn_login;
1233 1250
1251 login->login_failed = 1;
1234 iscsit_collect_login_stats(conn, status_class, status_detail); 1252 iscsit_collect_login_stats(conn, status_class, status_detail);
1235 1253
1236 memset(&iov, 0, sizeof(struct kvec)); 1254 hdr = (struct iscsi_login_rsp *)&login->rsp[0];
1237 memset(&iscsi_hdr, 0x0, ISCSI_HDR_LEN);
1238
1239 hdr = (struct iscsi_login_rsp *)&iscsi_hdr;
1240 hdr->opcode = ISCSI_OP_LOGIN_RSP; 1255 hdr->opcode = ISCSI_OP_LOGIN_RSP;
1241 hdr->status_class = status_class; 1256 hdr->status_class = status_class;
1242 hdr->status_detail = status_detail; 1257 hdr->status_detail = status_detail;
1243 hdr->itt = conn->login_itt; 1258 hdr->itt = conn->login_itt;
1244 1259
1245 iov.iov_base = &iscsi_hdr; 1260 return conn->conn_transport->iscsit_put_login_tx(conn, login, 0);
1246 iov.iov_len = ISCSI_HDR_LEN;
1247
1248 PRINT_BUFF(iscsi_hdr, ISCSI_HDR_LEN);
1249
1250 err = tx_data(conn, &iov, 1, ISCSI_HDR_LEN);
1251 if (err != ISCSI_HDR_LEN) {
1252 pr_err("tx_data returned less than expected\n");
1253 return -1;
1254 }
1255
1256 return 0;
1257} 1261}
1258 1262
1259void iscsit_print_session_params(struct iscsi_session *sess) 1263void iscsit_print_session_params(struct iscsi_session *sess)
@@ -1432,7 +1436,8 @@ void iscsit_collect_login_stats(
1432 strcpy(ls->last_intr_fail_name, 1436 strcpy(ls->last_intr_fail_name,
1433 (intrname ? intrname->value : "Unknown")); 1437 (intrname ? intrname->value : "Unknown"));
1434 1438
1435 ls->last_intr_fail_ip_family = conn->sock->sk->sk_family; 1439 ls->last_intr_fail_ip_family = conn->login_family;
1440
1436 snprintf(ls->last_intr_fail_ip_addr, IPV6_ADDRESS_SPACE, 1441 snprintf(ls->last_intr_fail_ip_addr, IPV6_ADDRESS_SPACE,
1437 "%s", conn->login_ip); 1442 "%s", conn->login_ip);
1438 ls->last_fail_time = get_jiffies_64(); 1443 ls->last_fail_time = get_jiffies_64();
diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h
index 894d0f837924..4f8e01a47081 100644
--- a/drivers/target/iscsi/iscsi_target_util.h
+++ b/drivers/target/iscsi/iscsi_target_util.h
@@ -8,6 +8,7 @@ extern struct iscsi_r2t *iscsit_get_r2t_for_eos(struct iscsi_cmd *, u32, u32);
8extern struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *); 8extern struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *);
9extern void iscsit_free_r2t(struct iscsi_r2t *, struct iscsi_cmd *); 9extern void iscsit_free_r2t(struct iscsi_r2t *, struct iscsi_cmd *);
10extern void iscsit_free_r2ts_from_list(struct iscsi_cmd *); 10extern void iscsit_free_r2ts_from_list(struct iscsi_cmd *);
11extern struct iscsi_cmd *iscsit_alloc_cmd(struct iscsi_conn *, gfp_t);
11extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, gfp_t); 12extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, gfp_t);
12extern struct iscsi_seq *iscsit_get_seq_holder_for_datain(struct iscsi_cmd *, u32); 13extern struct iscsi_seq *iscsit_get_seq_holder_for_datain(struct iscsi_cmd *, u32);
13extern struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *); 14extern struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *);
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 17a6acbc3ab0..58ed683e04ae 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -30,8 +30,10 @@
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/spinlock.h> 31#include <linux/spinlock.h>
32#include <linux/module.h> 32#include <linux/module.h>
33#include <linux/falloc.h>
33#include <scsi/scsi.h> 34#include <scsi/scsi.h>
34#include <scsi/scsi_host.h> 35#include <scsi/scsi_host.h>
36#include <asm/unaligned.h>
35 37
36#include <target/target_core_base.h> 38#include <target/target_core_base.h>
37#include <target/target_core_backend.h> 39#include <target/target_core_backend.h>
@@ -166,6 +168,33 @@ static int fd_configure_device(struct se_device *dev)
166 " block_device blocks: %llu logical_block_size: %d\n", 168 " block_device blocks: %llu logical_block_size: %d\n",
167 dev_size, div_u64(dev_size, fd_dev->fd_block_size), 169 dev_size, div_u64(dev_size, fd_dev->fd_block_size),
168 fd_dev->fd_block_size); 170 fd_dev->fd_block_size);
171 /*
172 * Check if the underlying struct block_device request_queue supports
173 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
174 * in ATA and we need to set TPE=1
175 */
176 if (blk_queue_discard(q)) {
177 dev->dev_attrib.max_unmap_lba_count =
178 q->limits.max_discard_sectors;
179 /*
180 * Currently hardcoded to 1 in Linux/SCSI code..
181 */
182 dev->dev_attrib.max_unmap_block_desc_count = 1;
183 dev->dev_attrib.unmap_granularity =
184 q->limits.discard_granularity >> 9;
185 dev->dev_attrib.unmap_granularity_alignment =
186 q->limits.discard_alignment;
187 pr_debug("IFILE: BLOCK Discard support available,"
188 " disabled by default\n");
189 }
190 /*
191 * Enable write same emulation for IBLOCK and use 0xFFFF as
192 * the smaller WRITE_SAME(10) only has a two-byte block count.
193 */
194 dev->dev_attrib.max_write_same_len = 0xFFFF;
195
196 if (blk_queue_nonrot(q))
197 dev->dev_attrib.is_nonrot = 1;
169 } else { 198 } else {
170 if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) { 199 if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) {
171 pr_err("FILEIO: Missing fd_dev_size=" 200 pr_err("FILEIO: Missing fd_dev_size="
@@ -176,6 +205,23 @@ static int fd_configure_device(struct se_device *dev)
176 205
177 dev->dev_attrib.hw_block_size = FD_BLOCKSIZE; 206 dev->dev_attrib.hw_block_size = FD_BLOCKSIZE;
178 dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS; 207 dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS;
208
209 /*
210 * Limit UNMAP emulation to 8k Number of LBAs (NoLB)
211 */
212 dev->dev_attrib.max_unmap_lba_count = 0x2000;
213 /*
214 * Currently hardcoded to 1 in Linux/SCSI code..
215 */
216 dev->dev_attrib.max_unmap_block_desc_count = 1;
217 dev->dev_attrib.unmap_granularity = 1;
218 dev->dev_attrib.unmap_granularity_alignment = 0;
219
220 /*
221 * Limit WRITE_SAME w/ UNMAP=0 emulation to 8k Number of LBAs (NoLB)
222 * based upon struct iovec limit for vfs_writev()
223 */
224 dev->dev_attrib.max_write_same_len = 0x1000;
179 } 225 }
180 226
181 fd_dev->fd_block_size = dev->dev_attrib.hw_block_size; 227 fd_dev->fd_block_size = dev->dev_attrib.hw_block_size;
@@ -190,11 +236,6 @@ static int fd_configure_device(struct se_device *dev)
190 236
191 fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++; 237 fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
192 fd_dev->fd_queue_depth = dev->queue_depth; 238 fd_dev->fd_queue_depth = dev->queue_depth;
193 /*
194 * Limit WRITE_SAME w/ UNMAP=0 emulation to 8k Number of LBAs (NoLB)
195 * based upon struct iovec limit for vfs_writev()
196 */
197 dev->dev_attrib.max_write_same_len = 0x1000;
198 239
199 pr_debug("CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s," 240 pr_debug("CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s,"
200 " %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id, 241 " %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
@@ -442,6 +483,75 @@ fd_execute_write_same(struct se_cmd *cmd)
442} 483}
443 484
444static sense_reason_t 485static sense_reason_t
486fd_do_unmap(struct se_cmd *cmd, void *priv, sector_t lba, sector_t nolb)
487{
488 struct file *file = priv;
489 struct inode *inode = file->f_mapping->host;
490 int ret;
491
492 if (S_ISBLK(inode->i_mode)) {
493 /* The backend is block device, use discard */
494 struct block_device *bdev = inode->i_bdev;
495
496 ret = blkdev_issue_discard(bdev, lba,
497 nolb, GFP_KERNEL, 0);
498 if (ret < 0) {
499 pr_warn("FILEIO: blkdev_issue_discard() failed: %d\n",
500 ret);
501 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
502 }
503 } else {
504 /* The backend is normal file, use fallocate */
505 struct se_device *se_dev = cmd->se_dev;
506 loff_t pos = lba * se_dev->dev_attrib.block_size;
507 unsigned int len = nolb * se_dev->dev_attrib.block_size;
508 int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
509
510 if (!file->f_op->fallocate)
511 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
512
513 ret = file->f_op->fallocate(file, mode, pos, len);
514 if (ret < 0) {
515 pr_warn("FILEIO: fallocate() failed: %d\n", ret);
516 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
517 }
518 }
519
520 return 0;
521}
522
523static sense_reason_t
524fd_execute_write_same_unmap(struct se_cmd *cmd)
525{
526 struct se_device *se_dev = cmd->se_dev;
527 struct fd_dev *fd_dev = FD_DEV(se_dev);
528 struct file *file = fd_dev->fd_file;
529 sector_t lba = cmd->t_task_lba;
530 sector_t nolb = sbc_get_write_same_sectors(cmd);
531 int ret;
532
533 if (!nolb) {
534 target_complete_cmd(cmd, SAM_STAT_GOOD);
535 return 0;
536 }
537
538 ret = fd_do_unmap(cmd, file, lba, nolb);
539 if (ret)
540 return ret;
541
542 target_complete_cmd(cmd, GOOD);
543 return 0;
544}
545
546static sense_reason_t
547fd_execute_unmap(struct se_cmd *cmd)
548{
549 struct file *file = FD_DEV(cmd->se_dev)->fd_file;
550
551 return sbc_execute_unmap(cmd, fd_do_unmap, file);
552}
553
554static sense_reason_t
445fd_execute_rw(struct se_cmd *cmd) 555fd_execute_rw(struct se_cmd *cmd)
446{ 556{
447 struct scatterlist *sgl = cmd->t_data_sg; 557 struct scatterlist *sgl = cmd->t_data_sg;
@@ -600,6 +710,8 @@ static struct sbc_ops fd_sbc_ops = {
600 .execute_rw = fd_execute_rw, 710 .execute_rw = fd_execute_rw,
601 .execute_sync_cache = fd_execute_sync_cache, 711 .execute_sync_cache = fd_execute_sync_cache,
602 .execute_write_same = fd_execute_write_same, 712 .execute_write_same = fd_execute_write_same,
713 .execute_write_same_unmap = fd_execute_write_same_unmap,
714 .execute_unmap = fd_execute_unmap,
603}; 715};
604 716
605static sense_reason_t 717static sense_reason_t
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 8bcc514ec8b6..07f5f94634bb 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -380,104 +380,40 @@ iblock_execute_sync_cache(struct se_cmd *cmd)
380} 380}
381 381
382static sense_reason_t 382static sense_reason_t
383iblock_execute_unmap(struct se_cmd *cmd) 383iblock_do_unmap(struct se_cmd *cmd, void *priv,
384 sector_t lba, sector_t nolb)
384{ 385{
385 struct se_device *dev = cmd->se_dev; 386 struct block_device *bdev = priv;
386 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 387 int ret;
387 unsigned char *buf, *ptr = NULL;
388 sector_t lba;
389 int size;
390 u32 range;
391 sense_reason_t ret = 0;
392 int dl, bd_dl, err;
393
394 /* We never set ANC_SUP */
395 if (cmd->t_task_cdb[1])
396 return TCM_INVALID_CDB_FIELD;
397
398 if (cmd->data_length == 0) {
399 target_complete_cmd(cmd, SAM_STAT_GOOD);
400 return 0;
401 }
402 388
403 if (cmd->data_length < 8) { 389 ret = blkdev_issue_discard(bdev, lba, nolb, GFP_KERNEL, 0);
404 pr_warn("UNMAP parameter list length %u too small\n", 390 if (ret < 0) {
405 cmd->data_length); 391 pr_err("blkdev_issue_discard() failed: %d\n", ret);
406 return TCM_PARAMETER_LIST_LENGTH_ERROR;
407 }
408
409 buf = transport_kmap_data_sg(cmd);
410 if (!buf)
411 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 392 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
412
413 dl = get_unaligned_be16(&buf[0]);
414 bd_dl = get_unaligned_be16(&buf[2]);
415
416 size = cmd->data_length - 8;
417 if (bd_dl > size)
418 pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n",
419 cmd->data_length, bd_dl);
420 else
421 size = bd_dl;
422
423 if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) {
424 ret = TCM_INVALID_PARAMETER_LIST;
425 goto err;
426 } 393 }
427 394
428 /* First UNMAP block descriptor starts at 8 byte offset */ 395 return 0;
429 ptr = &buf[8]; 396}
430 pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u"
431 " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
432
433 while (size >= 16) {
434 lba = get_unaligned_be64(&ptr[0]);
435 range = get_unaligned_be32(&ptr[8]);
436 pr_debug("UNMAP: Using lba: %llu and range: %u\n",
437 (unsigned long long)lba, range);
438
439 if (range > dev->dev_attrib.max_unmap_lba_count) {
440 ret = TCM_INVALID_PARAMETER_LIST;
441 goto err;
442 }
443
444 if (lba + range > dev->transport->get_blocks(dev) + 1) {
445 ret = TCM_ADDRESS_OUT_OF_RANGE;
446 goto err;
447 }
448
449 err = blkdev_issue_discard(ib_dev->ibd_bd, lba, range,
450 GFP_KERNEL, 0);
451 if (err < 0) {
452 pr_err("blkdev_issue_discard() failed: %d\n",
453 err);
454 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
455 goto err;
456 }
457 397
458 ptr += 16; 398static sense_reason_t
459 size -= 16; 399iblock_execute_unmap(struct se_cmd *cmd)
460 } 400{
401 struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
461 402
462err: 403 return sbc_execute_unmap(cmd, iblock_do_unmap, bdev);
463 transport_kunmap_data_sg(cmd);
464 if (!ret)
465 target_complete_cmd(cmd, GOOD);
466 return ret;
467} 404}
468 405
469static sense_reason_t 406static sense_reason_t
470iblock_execute_write_same_unmap(struct se_cmd *cmd) 407iblock_execute_write_same_unmap(struct se_cmd *cmd)
471{ 408{
472 struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev); 409 struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
473 int rc; 410 sector_t lba = cmd->t_task_lba;
474 411 sector_t nolb = sbc_get_write_same_sectors(cmd);
475 rc = blkdev_issue_discard(ib_dev->ibd_bd, cmd->t_task_lba, 412 int ret;
476 sbc_get_write_same_sectors(cmd), GFP_KERNEL, 0); 413
477 if (rc < 0) { 414 ret = iblock_do_unmap(cmd, bdev, lba, nolb);
478 pr_warn("blkdev_issue_discard() failed: %d\n", rc); 415 if (ret)
479 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 416 return ret;
480 }
481 417
482 target_complete_cmd(cmd, GOOD); 418 target_complete_cmd(cmd, GOOD);
483 return 0; 419 return 0;
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 60d4b5185f32..bbc5b0ee2bdc 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -596,3 +596,88 @@ u32 sbc_get_device_type(struct se_device *dev)
596 return TYPE_DISK; 596 return TYPE_DISK;
597} 597}
598EXPORT_SYMBOL(sbc_get_device_type); 598EXPORT_SYMBOL(sbc_get_device_type);
599
600sense_reason_t
601sbc_execute_unmap(struct se_cmd *cmd,
602 sense_reason_t (*do_unmap_fn)(struct se_cmd *, void *,
603 sector_t, sector_t),
604 void *priv)
605{
606 struct se_device *dev = cmd->se_dev;
607 unsigned char *buf, *ptr = NULL;
608 sector_t lba;
609 int size;
610 u32 range;
611 sense_reason_t ret = 0;
612 int dl, bd_dl;
613
614 /* We never set ANC_SUP */
615 if (cmd->t_task_cdb[1])
616 return TCM_INVALID_CDB_FIELD;
617
618 if (cmd->data_length == 0) {
619 target_complete_cmd(cmd, SAM_STAT_GOOD);
620 return 0;
621 }
622
623 if (cmd->data_length < 8) {
624 pr_warn("UNMAP parameter list length %u too small\n",
625 cmd->data_length);
626 return TCM_PARAMETER_LIST_LENGTH_ERROR;
627 }
628
629 buf = transport_kmap_data_sg(cmd);
630 if (!buf)
631 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
632
633 dl = get_unaligned_be16(&buf[0]);
634 bd_dl = get_unaligned_be16(&buf[2]);
635
636 size = cmd->data_length - 8;
637 if (bd_dl > size)
638 pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n",
639 cmd->data_length, bd_dl);
640 else
641 size = bd_dl;
642
643 if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) {
644 ret = TCM_INVALID_PARAMETER_LIST;
645 goto err;
646 }
647
648 /* First UNMAP block descriptor starts at 8 byte offset */
649 ptr = &buf[8];
650 pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u"
651 " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
652
653 while (size >= 16) {
654 lba = get_unaligned_be64(&ptr[0]);
655 range = get_unaligned_be32(&ptr[8]);
656 pr_debug("UNMAP: Using lba: %llu and range: %u\n",
657 (unsigned long long)lba, range);
658
659 if (range > dev->dev_attrib.max_unmap_lba_count) {
660 ret = TCM_INVALID_PARAMETER_LIST;
661 goto err;
662 }
663
664 if (lba + range > dev->transport->get_blocks(dev) + 1) {
665 ret = TCM_ADDRESS_OUT_OF_RANGE;
666 goto err;
667 }
668
669 ret = do_unmap_fn(cmd, priv, lba, range);
670 if (ret)
671 goto err;
672
673 ptr += 16;
674 size -= 16;
675 }
676
677err:
678 transport_kunmap_data_sg(cmd);
679 if (!ret)
680 target_complete_cmd(cmd, GOOD);
681 return ret;
682}
683EXPORT_SYMBOL(sbc_execute_unmap);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 3243ea790eab..f8388b4024aa 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -65,7 +65,6 @@ static void transport_complete_task_attr(struct se_cmd *cmd);
65static void transport_handle_queue_full(struct se_cmd *cmd, 65static void transport_handle_queue_full(struct se_cmd *cmd,
66 struct se_device *dev); 66 struct se_device *dev);
67static int transport_generic_get_mem(struct se_cmd *cmd); 67static int transport_generic_get_mem(struct se_cmd *cmd);
68static int target_get_sess_cmd(struct se_session *, struct se_cmd *, bool);
69static void transport_put_cmd(struct se_cmd *cmd); 68static void transport_put_cmd(struct se_cmd *cmd);
70static void target_complete_ok_work(struct work_struct *work); 69static void target_complete_ok_work(struct work_struct *work);
71 70
@@ -2179,7 +2178,7 @@ EXPORT_SYMBOL(transport_generic_free_cmd);
2179 * @se_cmd: command descriptor to add 2178 * @se_cmd: command descriptor to add
2180 * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd() 2179 * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd()
2181 */ 2180 */
2182static int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd, 2181int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
2183 bool ack_kref) 2182 bool ack_kref)
2184{ 2183{
2185 unsigned long flags; 2184 unsigned long flags;
@@ -2208,6 +2207,7 @@ out:
2208 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2207 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2209 return ret; 2208 return ret;
2210} 2209}
2210EXPORT_SYMBOL(target_get_sess_cmd);
2211 2211
2212static void target_release_cmd_kref(struct kref *kref) 2212static void target_release_cmd_kref(struct kref *kref)
2213{ 2213{
@@ -2765,8 +2765,13 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd,
2765 /* CURRENT ERROR */ 2765 /* CURRENT ERROR */
2766 buffer[0] = 0x70; 2766 buffer[0] = 0x70;
2767 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2767 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2768 /* ILLEGAL REQUEST */ 2768 /*
2769 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2769 * Returning ILLEGAL REQUEST would cause immediate IO errors on
2770 * Solaris initiators. Returning NOT READY instead means the
2771 * operations will be retried a finite number of times and we
2772 * can survive intermittent errors.
2773 */
2774 buffer[SPC_SENSE_KEY_OFFSET] = NOT_READY;
2770 /* LOGICAL UNIT COMMUNICATION FAILURE */ 2775 /* LOGICAL UNIT COMMUNICATION FAILURE */
2771 buffer[SPC_ASC_KEY_OFFSET] = 0x08; 2776 buffer[SPC_ASC_KEY_OFFSET] = 0x08;
2772 break; 2777 break;
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index b6fd4cf42840..e415af32115a 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -103,6 +103,13 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
103 use_sg = !(remaining % 4); 103 use_sg = !(remaining % 4);
104 104
105 while (remaining) { 105 while (remaining) {
106 struct fc_seq *seq = cmd->seq;
107
108 if (!seq) {
109 pr_debug("%s: Command aborted, xid 0x%x\n",
110 __func__, ep->xid);
111 break;
112 }
106 if (!mem_len) { 113 if (!mem_len) {
107 sg = sg_next(sg); 114 sg = sg_next(sg);
108 mem_len = min((size_t)sg->length, remaining); 115 mem_len = min((size_t)sg->length, remaining);
@@ -169,7 +176,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
169 f_ctl |= FC_FC_END_SEQ; 176 f_ctl |= FC_FC_END_SEQ;
170 fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid, 177 fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid,
171 FC_TYPE_FCP, f_ctl, fh_off); 178 FC_TYPE_FCP, f_ctl, fh_off);
172 error = lport->tt.seq_send(lport, cmd->seq, fp); 179 error = lport->tt.seq_send(lport, seq, fp);
173 if (error) { 180 if (error) {
174 /* XXX For now, initiator will retry */ 181 /* XXX For now, initiator will retry */
175 pr_err_ratelimited("%s: Failed to send frame %p, " 182 pr_err_ratelimited("%s: Failed to send frame %p, "
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index 113f33598b9f..4859505ae2ed 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -428,19 +428,12 @@ static int ft_prli(struct fc_rport_priv *rdata, u32 spp_len,
428 return ret; 428 return ret;
429} 429}
430 430
431static void ft_sess_rcu_free(struct rcu_head *rcu)
432{
433 struct ft_sess *sess = container_of(rcu, struct ft_sess, rcu);
434
435 kfree(sess);
436}
437
438static void ft_sess_free(struct kref *kref) 431static void ft_sess_free(struct kref *kref)
439{ 432{
440 struct ft_sess *sess = container_of(kref, struct ft_sess, kref); 433 struct ft_sess *sess = container_of(kref, struct ft_sess, kref);
441 434
442 transport_deregister_session(sess->se_sess); 435 transport_deregister_session(sess->se_sess);
443 call_rcu(&sess->rcu, ft_sess_rcu_free); 436 kfree_rcu(sess, rcu);
444} 437}
445 438
446void ft_sess_put(struct ft_sess *sess) 439void ft_sess_put(struct ft_sess *sess)
diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
index 957a0b98a5d9..1677238d281f 100644
--- a/drivers/vhost/tcm_vhost.c
+++ b/drivers/vhost/tcm_vhost.c
@@ -66,11 +66,13 @@ enum {
66 * TODO: debug and remove the workaround. 66 * TODO: debug and remove the workaround.
67 */ 67 */
68enum { 68enum {
69 VHOST_SCSI_FEATURES = VHOST_FEATURES & (~VIRTIO_RING_F_EVENT_IDX) 69 VHOST_SCSI_FEATURES = (VHOST_FEATURES & (~VIRTIO_RING_F_EVENT_IDX)) |
70 (1ULL << VIRTIO_SCSI_F_HOTPLUG)
70}; 71};
71 72
72#define VHOST_SCSI_MAX_TARGET 256 73#define VHOST_SCSI_MAX_TARGET 256
73#define VHOST_SCSI_MAX_VQ 128 74#define VHOST_SCSI_MAX_VQ 128
75#define VHOST_SCSI_MAX_EVENT 128
74 76
75struct vhost_scsi { 77struct vhost_scsi {
76 /* Protected by vhost_scsi->dev.mutex */ 78 /* Protected by vhost_scsi->dev.mutex */
@@ -82,6 +84,12 @@ struct vhost_scsi {
82 84
83 struct vhost_work vs_completion_work; /* cmd completion work item */ 85 struct vhost_work vs_completion_work; /* cmd completion work item */
84 struct llist_head vs_completion_list; /* cmd completion queue */ 86 struct llist_head vs_completion_list; /* cmd completion queue */
87
88 struct vhost_work vs_event_work; /* evt injection work item */
89 struct llist_head vs_event_list; /* evt injection queue */
90
91 bool vs_events_missed; /* any missed events, protected by vq->mutex */
92 int vs_events_nr; /* num of pending events, protected by vq->mutex */
85}; 93};
86 94
87/* Local pointer to allocated TCM configfs fabric module */ 95/* Local pointer to allocated TCM configfs fabric module */
@@ -349,6 +357,37 @@ static int tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
349 return 0; 357 return 0;
350} 358}
351 359
360static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
361{
362 vs->vs_events_nr--;
363 kfree(evt);
364}
365
366static struct tcm_vhost_evt *tcm_vhost_allocate_evt(struct vhost_scsi *vs,
367 u32 event, u32 reason)
368{
369 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT];
370 struct tcm_vhost_evt *evt;
371
372 if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
373 vs->vs_events_missed = true;
374 return NULL;
375 }
376
377 evt = kzalloc(sizeof(*evt), GFP_KERNEL);
378 if (!evt) {
379 vq_err(vq, "Failed to allocate tcm_vhost_evt\n");
380 vs->vs_events_missed = true;
381 return NULL;
382 }
383
384 evt->event.event = event;
385 evt->event.reason = reason;
386 vs->vs_events_nr++;
387
388 return evt;
389}
390
352static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd) 391static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
353{ 392{
354 struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd; 393 struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
@@ -367,6 +406,75 @@ static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
367 kfree(tv_cmd); 406 kfree(tv_cmd);
368} 407}
369 408
409static void tcm_vhost_do_evt_work(struct vhost_scsi *vs,
410 struct tcm_vhost_evt *evt)
411{
412 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT];
413 struct virtio_scsi_event *event = &evt->event;
414 struct virtio_scsi_event __user *eventp;
415 unsigned out, in;
416 int head, ret;
417
418 if (!vq->private_data) {
419 vs->vs_events_missed = true;
420 return;
421 }
422
423again:
424 vhost_disable_notify(&vs->dev, vq);
425 head = vhost_get_vq_desc(&vs->dev, vq, vq->iov,
426 ARRAY_SIZE(vq->iov), &out, &in,
427 NULL, NULL);
428 if (head < 0) {
429 vs->vs_events_missed = true;
430 return;
431 }
432 if (head == vq->num) {
433 if (vhost_enable_notify(&vs->dev, vq))
434 goto again;
435 vs->vs_events_missed = true;
436 return;
437 }
438
439 if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
440 vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
441 vq->iov[out].iov_len);
442 vs->vs_events_missed = true;
443 return;
444 }
445
446 if (vs->vs_events_missed) {
447 event->event |= VIRTIO_SCSI_T_EVENTS_MISSED;
448 vs->vs_events_missed = false;
449 }
450
451 eventp = vq->iov[out].iov_base;
452 ret = __copy_to_user(eventp, event, sizeof(*event));
453 if (!ret)
454 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
455 else
456 vq_err(vq, "Faulted on tcm_vhost_send_event\n");
457}
458
459static void tcm_vhost_evt_work(struct vhost_work *work)
460{
461 struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
462 vs_event_work);
463 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT];
464 struct tcm_vhost_evt *evt;
465 struct llist_node *llnode;
466
467 mutex_lock(&vq->mutex);
468 llnode = llist_del_all(&vs->vs_event_list);
469 while (llnode) {
470 evt = llist_entry(llnode, struct tcm_vhost_evt, list);
471 llnode = llist_next(llnode);
472 tcm_vhost_do_evt_work(vs, evt);
473 tcm_vhost_free_evt(vs, evt);
474 }
475 mutex_unlock(&vq->mutex);
476}
477
370/* Fill in status and signal that we are done processing this command 478/* Fill in status and signal that we are done processing this command
371 * 479 *
372 * This is scheduled in the vhost work queue so we are called with the owner 480 * This is scheduled in the vhost work queue so we are called with the owner
@@ -777,9 +885,46 @@ static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
777 pr_debug("%s: The handling func for control queue.\n", __func__); 885 pr_debug("%s: The handling func for control queue.\n", __func__);
778} 886}
779 887
888static void tcm_vhost_send_evt(struct vhost_scsi *vs, struct tcm_vhost_tpg *tpg,
889 struct se_lun *lun, u32 event, u32 reason)
890{
891 struct tcm_vhost_evt *evt;
892
893 evt = tcm_vhost_allocate_evt(vs, event, reason);
894 if (!evt)
895 return;
896
897 if (tpg && lun) {
898 /* TODO: share lun setup code with virtio-scsi.ko */
899 /*
900 * Note: evt->event is zeroed when we allocate it and
901 * lun[4-7] need to be zero according to virtio-scsi spec.
902 */
903 evt->event.lun[0] = 0x01;
904 evt->event.lun[1] = tpg->tport_tpgt & 0xFF;
905 if (lun->unpacked_lun >= 256)
906 evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
907 evt->event.lun[3] = lun->unpacked_lun & 0xFF;
908 }
909
910 llist_add(&evt->list, &vs->vs_event_list);
911 vhost_work_queue(&vs->dev, &vs->vs_event_work);
912}
913
780static void vhost_scsi_evt_handle_kick(struct vhost_work *work) 914static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
781{ 915{
782 pr_debug("%s: The handling func for event queue.\n", __func__); 916 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
917 poll.work);
918 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
919
920 mutex_lock(&vq->mutex);
921 if (!vq->private_data)
922 goto out;
923
924 if (vs->vs_events_missed)
925 tcm_vhost_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
926out:
927 mutex_unlock(&vq->mutex);
783} 928}
784 929
785static void vhost_scsi_handle_kick(struct vhost_work *work) 930static void vhost_scsi_handle_kick(struct vhost_work *work)
@@ -803,11 +948,15 @@ static void vhost_scsi_flush(struct vhost_scsi *vs)
803 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) 948 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
804 vhost_scsi_flush_vq(vs, i); 949 vhost_scsi_flush_vq(vs, i);
805 vhost_work_flush(&vs->dev, &vs->vs_completion_work); 950 vhost_work_flush(&vs->dev, &vs->vs_completion_work);
951 vhost_work_flush(&vs->dev, &vs->vs_event_work);
806} 952}
807 953
808/* 954/*
809 * Called from vhost_scsi_ioctl() context to walk the list of available 955 * Called from vhost_scsi_ioctl() context to walk the list of available
810 * tcm_vhost_tpg with an active struct tcm_vhost_nexus 956 * tcm_vhost_tpg with an active struct tcm_vhost_nexus
957 *
958 * The lock nesting rule is:
959 * tcm_vhost_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
811 */ 960 */
812static int vhost_scsi_set_endpoint( 961static int vhost_scsi_set_endpoint(
813 struct vhost_scsi *vs, 962 struct vhost_scsi *vs,
@@ -820,26 +969,27 @@ static int vhost_scsi_set_endpoint(
820 int index, ret, i, len; 969 int index, ret, i, len;
821 bool match = false; 970 bool match = false;
822 971
972 mutex_lock(&tcm_vhost_mutex);
823 mutex_lock(&vs->dev.mutex); 973 mutex_lock(&vs->dev.mutex);
974
824 /* Verify that ring has been setup correctly. */ 975 /* Verify that ring has been setup correctly. */
825 for (index = 0; index < vs->dev.nvqs; ++index) { 976 for (index = 0; index < vs->dev.nvqs; ++index) {
826 /* Verify that ring has been setup correctly. */ 977 /* Verify that ring has been setup correctly. */
827 if (!vhost_vq_access_ok(&vs->vqs[index])) { 978 if (!vhost_vq_access_ok(&vs->vqs[index])) {
828 mutex_unlock(&vs->dev.mutex); 979 ret = -EFAULT;
829 return -EFAULT; 980 goto out;
830 } 981 }
831 } 982 }
832 983
833 len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET; 984 len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
834 vs_tpg = kzalloc(len, GFP_KERNEL); 985 vs_tpg = kzalloc(len, GFP_KERNEL);
835 if (!vs_tpg) { 986 if (!vs_tpg) {
836 mutex_unlock(&vs->dev.mutex); 987 ret = -ENOMEM;
837 return -ENOMEM; 988 goto out;
838 } 989 }
839 if (vs->vs_tpg) 990 if (vs->vs_tpg)
840 memcpy(vs_tpg, vs->vs_tpg, len); 991 memcpy(vs_tpg, vs->vs_tpg, len);
841 992
842 mutex_lock(&tcm_vhost_mutex);
843 list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) { 993 list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) {
844 mutex_lock(&tv_tpg->tv_tpg_mutex); 994 mutex_lock(&tv_tpg->tv_tpg_mutex);
845 if (!tv_tpg->tpg_nexus) { 995 if (!tv_tpg->tpg_nexus) {
@@ -854,20 +1004,19 @@ static int vhost_scsi_set_endpoint(
854 1004
855 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) { 1005 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
856 if (vs->vs_tpg && vs->vs_tpg[tv_tpg->tport_tpgt]) { 1006 if (vs->vs_tpg && vs->vs_tpg[tv_tpg->tport_tpgt]) {
857 mutex_unlock(&tv_tpg->tv_tpg_mutex);
858 mutex_unlock(&tcm_vhost_mutex);
859 mutex_unlock(&vs->dev.mutex);
860 kfree(vs_tpg); 1007 kfree(vs_tpg);
861 return -EEXIST; 1008 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1009 ret = -EEXIST;
1010 goto out;
862 } 1011 }
863 tv_tpg->tv_tpg_vhost_count++; 1012 tv_tpg->tv_tpg_vhost_count++;
1013 tv_tpg->vhost_scsi = vs;
864 vs_tpg[tv_tpg->tport_tpgt] = tv_tpg; 1014 vs_tpg[tv_tpg->tport_tpgt] = tv_tpg;
865 smp_mb__after_atomic_inc(); 1015 smp_mb__after_atomic_inc();
866 match = true; 1016 match = true;
867 } 1017 }
868 mutex_unlock(&tv_tpg->tv_tpg_mutex); 1018 mutex_unlock(&tv_tpg->tv_tpg_mutex);
869 } 1019 }
870 mutex_unlock(&tcm_vhost_mutex);
871 1020
872 if (match) { 1021 if (match) {
873 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn, 1022 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
@@ -893,7 +1042,9 @@ static int vhost_scsi_set_endpoint(
893 kfree(vs->vs_tpg); 1042 kfree(vs->vs_tpg);
894 vs->vs_tpg = vs_tpg; 1043 vs->vs_tpg = vs_tpg;
895 1044
1045out:
896 mutex_unlock(&vs->dev.mutex); 1046 mutex_unlock(&vs->dev.mutex);
1047 mutex_unlock(&tcm_vhost_mutex);
897 return ret; 1048 return ret;
898} 1049}
899 1050
@@ -908,6 +1059,7 @@ static int vhost_scsi_clear_endpoint(
908 int index, ret, i; 1059 int index, ret, i;
909 u8 target; 1060 u8 target;
910 1061
1062 mutex_lock(&tcm_vhost_mutex);
911 mutex_lock(&vs->dev.mutex); 1063 mutex_lock(&vs->dev.mutex);
912 /* Verify that ring has been setup correctly. */ 1064 /* Verify that ring has been setup correctly. */
913 for (index = 0; index < vs->dev.nvqs; ++index) { 1065 for (index = 0; index < vs->dev.nvqs; ++index) {
@@ -918,8 +1070,8 @@ static int vhost_scsi_clear_endpoint(
918 } 1070 }
919 1071
920 if (!vs->vs_tpg) { 1072 if (!vs->vs_tpg) {
921 mutex_unlock(&vs->dev.mutex); 1073 ret = 0;
922 return 0; 1074 goto err_dev;
923 } 1075 }
924 1076
925 for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) { 1077 for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
@@ -944,6 +1096,7 @@ static int vhost_scsi_clear_endpoint(
944 goto err_tpg; 1096 goto err_tpg;
945 } 1097 }
946 tv_tpg->tv_tpg_vhost_count--; 1098 tv_tpg->tv_tpg_vhost_count--;
1099 tv_tpg->vhost_scsi = NULL;
947 vs->vs_tpg[target] = NULL; 1100 vs->vs_tpg[target] = NULL;
948 match = true; 1101 match = true;
949 mutex_unlock(&tv_tpg->tv_tpg_mutex); 1102 mutex_unlock(&tv_tpg->tv_tpg_mutex);
@@ -964,14 +1117,16 @@ static int vhost_scsi_clear_endpoint(
964 vhost_scsi_flush(vs); 1117 vhost_scsi_flush(vs);
965 kfree(vs->vs_tpg); 1118 kfree(vs->vs_tpg);
966 vs->vs_tpg = NULL; 1119 vs->vs_tpg = NULL;
1120 WARN_ON(vs->vs_events_nr);
967 mutex_unlock(&vs->dev.mutex); 1121 mutex_unlock(&vs->dev.mutex);
968 1122 mutex_unlock(&tcm_vhost_mutex);
969 return 0; 1123 return 0;
970 1124
971err_tpg: 1125err_tpg:
972 mutex_unlock(&tv_tpg->tv_tpg_mutex); 1126 mutex_unlock(&tv_tpg->tv_tpg_mutex);
973err_dev: 1127err_dev:
974 mutex_unlock(&vs->dev.mutex); 1128 mutex_unlock(&vs->dev.mutex);
1129 mutex_unlock(&tcm_vhost_mutex);
975 return ret; 1130 return ret;
976} 1131}
977 1132
@@ -1003,6 +1158,10 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
1003 return -ENOMEM; 1158 return -ENOMEM;
1004 1159
1005 vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work); 1160 vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work);
1161 vhost_work_init(&s->vs_event_work, tcm_vhost_evt_work);
1162
1163 s->vs_events_nr = 0;
1164 s->vs_events_missed = false;
1006 1165
1007 s->vqs[VHOST_SCSI_VQ_CTL].handle_kick = vhost_scsi_ctl_handle_kick; 1166 s->vqs[VHOST_SCSI_VQ_CTL].handle_kick = vhost_scsi_ctl_handle_kick;
1008 s->vqs[VHOST_SCSI_VQ_EVT].handle_kick = vhost_scsi_evt_handle_kick; 1167 s->vqs[VHOST_SCSI_VQ_EVT].handle_kick = vhost_scsi_evt_handle_kick;
@@ -1029,6 +1188,8 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)
1029 vhost_scsi_clear_endpoint(s, &t); 1188 vhost_scsi_clear_endpoint(s, &t);
1030 vhost_dev_stop(&s->dev); 1189 vhost_dev_stop(&s->dev);
1031 vhost_dev_cleanup(&s->dev, false); 1190 vhost_dev_cleanup(&s->dev, false);
1191 /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
1192 vhost_scsi_flush(s);
1032 kfree(s); 1193 kfree(s);
1033 return 0; 1194 return 0;
1034} 1195}
@@ -1040,8 +1201,11 @@ static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
1040 struct vhost_scsi_target backend; 1201 struct vhost_scsi_target backend;
1041 void __user *argp = (void __user *)arg; 1202 void __user *argp = (void __user *)arg;
1042 u64 __user *featurep = argp; 1203 u64 __user *featurep = argp;
1204 u32 __user *eventsp = argp;
1205 u32 events_missed;
1043 u64 features; 1206 u64 features;
1044 int r, abi_version = VHOST_SCSI_ABI_VERSION; 1207 int r, abi_version = VHOST_SCSI_ABI_VERSION;
1208 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT];
1045 1209
1046 switch (ioctl) { 1210 switch (ioctl) {
1047 case VHOST_SCSI_SET_ENDPOINT: 1211 case VHOST_SCSI_SET_ENDPOINT:
@@ -1062,6 +1226,20 @@ static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
1062 if (copy_to_user(argp, &abi_version, sizeof abi_version)) 1226 if (copy_to_user(argp, &abi_version, sizeof abi_version))
1063 return -EFAULT; 1227 return -EFAULT;
1064 return 0; 1228 return 0;
1229 case VHOST_SCSI_SET_EVENTS_MISSED:
1230 if (get_user(events_missed, eventsp))
1231 return -EFAULT;
1232 mutex_lock(&vq->mutex);
1233 vs->vs_events_missed = events_missed;
1234 mutex_unlock(&vq->mutex);
1235 return 0;
1236 case VHOST_SCSI_GET_EVENTS_MISSED:
1237 mutex_lock(&vq->mutex);
1238 events_missed = vs->vs_events_missed;
1239 mutex_unlock(&vq->mutex);
1240 if (put_user(events_missed, eventsp))
1241 return -EFAULT;
1242 return 0;
1065 case VHOST_GET_FEATURES: 1243 case VHOST_GET_FEATURES:
1066 features = VHOST_SCSI_FEATURES; 1244 features = VHOST_SCSI_FEATURES;
1067 if (copy_to_user(featurep, &features, sizeof features)) 1245 if (copy_to_user(featurep, &features, sizeof features))
@@ -1133,28 +1311,80 @@ static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)
1133 return "Unknown"; 1311 return "Unknown";
1134} 1312}
1135 1313
1314static void tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
1315 struct se_lun *lun, bool plug)
1316{
1317
1318 struct vhost_scsi *vs = tpg->vhost_scsi;
1319 struct vhost_virtqueue *vq;
1320 u32 reason;
1321
1322 if (!vs)
1323 return;
1324
1325 mutex_lock(&vs->dev.mutex);
1326 if (!vhost_has_feature(&vs->dev, VIRTIO_SCSI_F_HOTPLUG)) {
1327 mutex_unlock(&vs->dev.mutex);
1328 return;
1329 }
1330
1331 if (plug)
1332 reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
1333 else
1334 reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
1335
1336 vq = &vs->vqs[VHOST_SCSI_VQ_EVT];
1337 mutex_lock(&vq->mutex);
1338 tcm_vhost_send_evt(vs, tpg, lun,
1339 VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
1340 mutex_unlock(&vq->mutex);
1341 mutex_unlock(&vs->dev.mutex);
1342}
1343
1344static void tcm_vhost_hotplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
1345{
1346 tcm_vhost_do_plug(tpg, lun, true);
1347}
1348
1349static void tcm_vhost_hotunplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
1350{
1351 tcm_vhost_do_plug(tpg, lun, false);
1352}
1353
1136static int tcm_vhost_port_link(struct se_portal_group *se_tpg, 1354static int tcm_vhost_port_link(struct se_portal_group *se_tpg,
1137 struct se_lun *lun) 1355 struct se_lun *lun)
1138{ 1356{
1139 struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg, 1357 struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
1140 struct tcm_vhost_tpg, se_tpg); 1358 struct tcm_vhost_tpg, se_tpg);
1141 1359
1360 mutex_lock(&tcm_vhost_mutex);
1361
1142 mutex_lock(&tv_tpg->tv_tpg_mutex); 1362 mutex_lock(&tv_tpg->tv_tpg_mutex);
1143 tv_tpg->tv_tpg_port_count++; 1363 tv_tpg->tv_tpg_port_count++;
1144 mutex_unlock(&tv_tpg->tv_tpg_mutex); 1364 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1145 1365
1366 tcm_vhost_hotplug(tv_tpg, lun);
1367
1368 mutex_unlock(&tcm_vhost_mutex);
1369
1146 return 0; 1370 return 0;
1147} 1371}
1148 1372
1149static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg, 1373static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg,
1150 struct se_lun *se_lun) 1374 struct se_lun *lun)
1151{ 1375{
1152 struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg, 1376 struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
1153 struct tcm_vhost_tpg, se_tpg); 1377 struct tcm_vhost_tpg, se_tpg);
1154 1378
1379 mutex_lock(&tcm_vhost_mutex);
1380
1155 mutex_lock(&tv_tpg->tv_tpg_mutex); 1381 mutex_lock(&tv_tpg->tv_tpg_mutex);
1156 tv_tpg->tv_tpg_port_count--; 1382 tv_tpg->tv_tpg_port_count--;
1157 mutex_unlock(&tv_tpg->tv_tpg_mutex); 1383 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1384
1385 tcm_vhost_hotunplug(tv_tpg, lun);
1386
1387 mutex_unlock(&tcm_vhost_mutex);
1158} 1388}
1159 1389
1160static struct se_node_acl *tcm_vhost_make_nodeacl( 1390static struct se_node_acl *tcm_vhost_make_nodeacl(
diff --git a/drivers/vhost/tcm_vhost.h b/drivers/vhost/tcm_vhost.h
index 1d2ae7a60e11..514b9fda230e 100644
--- a/drivers/vhost/tcm_vhost.h
+++ b/drivers/vhost/tcm_vhost.h
@@ -53,6 +53,7 @@ struct tcm_vhost_nacl {
53 struct se_node_acl se_node_acl; 53 struct se_node_acl se_node_acl;
54}; 54};
55 55
56struct vhost_scsi;
56struct tcm_vhost_tpg { 57struct tcm_vhost_tpg {
57 /* Vhost port target portal group tag for TCM */ 58 /* Vhost port target portal group tag for TCM */
58 u16 tport_tpgt; 59 u16 tport_tpgt;
@@ -70,6 +71,8 @@ struct tcm_vhost_tpg {
70 struct tcm_vhost_tport *tport; 71 struct tcm_vhost_tport *tport;
71 /* Returned by tcm_vhost_make_tpg() */ 72 /* Returned by tcm_vhost_make_tpg() */
72 struct se_portal_group se_tpg; 73 struct se_portal_group se_tpg;
74 /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
75 struct vhost_scsi *vhost_scsi;
73}; 76};
74 77
75struct tcm_vhost_tport { 78struct tcm_vhost_tport {
@@ -83,6 +86,13 @@ struct tcm_vhost_tport {
83 struct se_wwn tport_wwn; 86 struct se_wwn tport_wwn;
84}; 87};
85 88
89struct tcm_vhost_evt {
90 /* event to be sent to guest */
91 struct virtio_scsi_event event;
92 /* event list, serviced from vhost worker thread */
93 struct llist_node list;
94};
95
86/* 96/*
87 * As per request from MST, keep TCM_VHOST related ioctl defines out of 97 * As per request from MST, keep TCM_VHOST related ioctl defines out of
88 * linux/vhost.h (user-space) for now.. 98 * linux/vhost.h (user-space) for now..
@@ -113,3 +123,6 @@ struct vhost_scsi_target {
113#define VHOST_SCSI_CLEAR_ENDPOINT _IOW(VHOST_VIRTIO, 0x41, struct vhost_scsi_target) 123#define VHOST_SCSI_CLEAR_ENDPOINT _IOW(VHOST_VIRTIO, 0x41, struct vhost_scsi_target)
114/* Changing this breaks userspace. */ 124/* Changing this breaks userspace. */
115#define VHOST_SCSI_GET_ABI_VERSION _IOW(VHOST_VIRTIO, 0x42, int) 125#define VHOST_SCSI_GET_ABI_VERSION _IOW(VHOST_VIRTIO, 0x42, int)
126/* Set and get the events missed flag */
127#define VHOST_SCSI_SET_EVENTS_MISSED _IOW(VHOST_VIRTIO, 0x43, __u32)
128#define VHOST_SCSI_GET_EVENTS_MISSED _IOW(VHOST_VIRTIO, 0x44, __u32)
diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h
new file mode 100644
index 000000000000..23a87d0cd72c
--- /dev/null
+++ b/include/target/iscsi/iscsi_transport.h
@@ -0,0 +1,83 @@
1#include <linux/module.h>
2#include <linux/list.h>
3#include "../../../drivers/target/iscsi/iscsi_target_core.h"
4
5struct iscsit_transport {
6#define ISCSIT_TRANSPORT_NAME 16
7 char name[ISCSIT_TRANSPORT_NAME];
8 int transport_type;
9 struct module *owner;
10 struct list_head t_node;
11 int (*iscsit_setup_np)(struct iscsi_np *, struct __kernel_sockaddr_storage *);
12 int (*iscsit_accept_np)(struct iscsi_np *, struct iscsi_conn *);
13 void (*iscsit_free_np)(struct iscsi_np *);
14 void (*iscsit_free_conn)(struct iscsi_conn *);
15 struct iscsi_cmd *(*iscsit_alloc_cmd)(struct iscsi_conn *, gfp_t);
16 int (*iscsit_get_login_rx)(struct iscsi_conn *, struct iscsi_login *);
17 int (*iscsit_put_login_tx)(struct iscsi_conn *, struct iscsi_login *, u32);
18 int (*iscsit_immediate_queue)(struct iscsi_conn *, struct iscsi_cmd *, int);
19 int (*iscsit_response_queue)(struct iscsi_conn *, struct iscsi_cmd *, int);
20 int (*iscsit_get_dataout)(struct iscsi_conn *, struct iscsi_cmd *, bool);
21 int (*iscsit_queue_data_in)(struct iscsi_conn *, struct iscsi_cmd *);
22 int (*iscsit_queue_status)(struct iscsi_conn *, struct iscsi_cmd *);
23};
24
25/*
26 * From iscsi_target_transport.c
27 */
28
29extern int iscsit_register_transport(struct iscsit_transport *);
30extern void iscsit_unregister_transport(struct iscsit_transport *);
31extern struct iscsit_transport *iscsit_get_transport(int);
32extern void iscsit_put_transport(struct iscsit_transport *);
33
34/*
35 * From iscsi_target.c
36 */
37extern int iscsit_add_reject_from_cmd(u8, int, int, unsigned char *,
38 struct iscsi_cmd *);
39extern int iscsit_setup_scsi_cmd(struct iscsi_conn *, struct iscsi_cmd *,
40 unsigned char *);
41extern void iscsit_set_unsoliticed_dataout(struct iscsi_cmd *);
42extern int iscsit_process_scsi_cmd(struct iscsi_conn *, struct iscsi_cmd *,
43 struct iscsi_scsi_req *);
44extern int iscsit_check_dataout_hdr(struct iscsi_conn *, unsigned char *,
45 struct iscsi_cmd **);
46extern int iscsit_check_dataout_payload(struct iscsi_cmd *, struct iscsi_data *,
47 bool);
48extern int iscsit_handle_nop_out(struct iscsi_conn *, struct iscsi_cmd *,
49 unsigned char *);
50extern int iscsit_handle_logout_cmd(struct iscsi_conn *, struct iscsi_cmd *,
51 unsigned char *);
52extern int iscsit_handle_task_mgt_cmd(struct iscsi_conn *, struct iscsi_cmd *,
53 unsigned char *);
54extern void iscsit_build_rsp_pdu(struct iscsi_cmd *, struct iscsi_conn *,
55 bool, struct iscsi_scsi_rsp *);
56extern void iscsit_build_nopin_rsp(struct iscsi_cmd *, struct iscsi_conn *,
57 struct iscsi_nopin *, bool);
58extern void iscsit_build_task_mgt_rsp(struct iscsi_cmd *, struct iscsi_conn *,
59 struct iscsi_tm_rsp *);
60extern void iscsit_build_reject(struct iscsi_cmd *, struct iscsi_conn *,
61 struct iscsi_reject *);
62extern int iscsit_build_logout_rsp(struct iscsi_cmd *, struct iscsi_conn *,
63 struct iscsi_logout_rsp *);
64extern int iscsit_logout_post_handler(struct iscsi_cmd *, struct iscsi_conn *);
65/*
66 * From iscsi_target_device.c
67 */
68extern void iscsit_increment_maxcmdsn(struct iscsi_cmd *, struct iscsi_session *);
69/*
70 * From iscsi_target_erl1.c
71 */
72extern void iscsit_stop_dataout_timer(struct iscsi_cmd *);
73
74/*
75 * From iscsi_target_tmr.c
76 */
77extern int iscsit_tmr_post_handler(struct iscsi_cmd *, struct iscsi_conn *);
78
79/*
80 * From iscsi_target_util.c
81 */
82extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, gfp_t);
83extern int iscsit_sequence_cmd(struct iscsi_conn *, struct iscsi_cmd *, __be32);
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index b128c20770bc..ffa2696d64dc 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -60,6 +60,10 @@ sense_reason_t sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops);
60u32 sbc_get_device_rev(struct se_device *dev); 60u32 sbc_get_device_rev(struct se_device *dev);
61u32 sbc_get_device_type(struct se_device *dev); 61u32 sbc_get_device_type(struct se_device *dev);
62sector_t sbc_get_write_same_sectors(struct se_cmd *cmd); 62sector_t sbc_get_write_same_sectors(struct se_cmd *cmd);
63sense_reason_t sbc_execute_unmap(struct se_cmd *cmd,
64 sense_reason_t (*do_unmap_fn)(struct se_cmd *cmd, void *priv,
65 sector_t lba, sector_t nolb),
66 void *priv);
63 67
64void transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *); 68void transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *);
65int transport_set_vpd_assoc(struct t10_vpd *, unsigned char *); 69int transport_set_vpd_assoc(struct t10_vpd *, unsigned char *);
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index aaa1ee6ab391..ba3471b73c07 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -120,7 +120,7 @@ bool transport_wait_for_tasks(struct se_cmd *);
120int transport_check_aborted_status(struct se_cmd *, int); 120int transport_check_aborted_status(struct se_cmd *, int);
121int transport_send_check_condition_and_sense(struct se_cmd *, 121int transport_send_check_condition_and_sense(struct se_cmd *,
122 sense_reason_t, int); 122 sense_reason_t, int);
123 123int target_get_sess_cmd(struct se_session *, struct se_cmd *, bool);
124int target_put_sess_cmd(struct se_session *, struct se_cmd *); 124int target_put_sess_cmd(struct se_session *, struct se_cmd *);
125void target_sess_cmd_list_set_waiting(struct se_session *); 125void target_sess_cmd_list_set_waiting(struct se_session *);
126void target_wait_for_sess_cmds(struct se_session *, int); 126void target_wait_for_sess_cmds(struct se_session *, int);