diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-19 21:02:22 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-19 21:02:22 -0500 |
commit | ed55635e2e4df3169f21ae4047004b7235de956e (patch) | |
tree | 59483776aa04d2b87023059f8a84c5c08d15373f /drivers/infiniband | |
parent | 5be95b7e24bde4d93ff1bff5911b303043753168 (diff) | |
parent | ae450e246e8540300699480a3780a420a028b73f (diff) |
Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending
Pull SCSI target fixes from Nicholas Bellinger:
"The highlights this merge window include:
- Allow target fabric drivers to function as built-in. (Roland)
- Fix tcm_loop multi-TPG endpoint nexus bug. (Hannes)
- Move per device config_item_type into se_subsystem_api, allowing
configfs attributes to be defined at module_init time. (Jerome +
nab)
- Convert existing IBLOCK/FILEIO/RAMDISK/PSCSI/TCMU drivers to use
external configfs attributes. (nab)
- A number of iser-target fixes related to active session + network
portal shutdown stability during extended stress testing. (Sagi +
Slava)
- Dynamic allocation of T10-PI contexts for iser-target, fixing a
potentially bogus iscsi_np->tpg_np pointer reference in >= v3.14
code. (Sagi)
- iser-target performance + scalability improvements. (Sagi)
- Fixes for SPC-4 Persistent Reservation AllRegistrants spec
compliance. (Ilias + James + nab)
- Avoid potential short kern_sendmsg() in iscsi-target for now until
Al's conversion to use msghdr iteration is merged post -rc1.
(Viro)
Also, Sagi has requested a number of iser-target patches (9) that
address stability issues he's encountered during extended stress
testing be considered for v3.10.y + v3.14.y code. Given the amount of
LOC involved, it will certainly require extra backporting effort.
Apologies in advance to Greg-KH & Co on this. Sagi and I will be
working post-merge to ensure they each get applied correctly"
* 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (53 commits)
target: Allow AllRegistrants to re-RESERVE existing reservation
uapi/linux/target_core_user.h: fix headers_install.sh badness
iscsi-target: Fail connection on short sendmsg writes
iscsi-target: nullify session in failed login sequence
target: Avoid dropping AllRegistrants reservation during unregister
target: Fix R_HOLDER bit usage for AllRegistrants
iscsi-target: Drop left-over bogus iscsi_np->tpg_np
iser-target: Fix wc->wr_id cast warning
iser-target: Remove code duplication
iser-target: Adjust log levels and prettify some prints
iser-target: Use debug_level parameter to control logging level
iser-target: Fix logout sequence
iser-target: Don't wait for session commands from completion context
iser-target: Reduce CQ lock contention by batch polling
iser-target: Introduce isert_poll_budget
iser-target: Remove an atomic operation from the IO path
iser-target: Remove redundant call to isert_conn_terminate
iser-target: Use single CQ for TX and RX
iser-target: Centralize completion elements to a context
iser-target: Cast wr_id with uintptr_t instead of unsinged long
...
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/ulp/isert/ib_isert.c | 1599 | ||||
-rw-r--r-- | drivers/infiniband/ulp/isert/ib_isert.h | 80 |
2 files changed, 886 insertions, 793 deletions
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 10641b7816f4..dafb3c531f96 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c | |||
@@ -22,7 +22,6 @@ | |||
22 | #include <linux/socket.h> | 22 | #include <linux/socket.h> |
23 | #include <linux/in.h> | 23 | #include <linux/in.h> |
24 | #include <linux/in6.h> | 24 | #include <linux/in6.h> |
25 | #include <linux/llist.h> | ||
26 | #include <rdma/ib_verbs.h> | 25 | #include <rdma/ib_verbs.h> |
27 | #include <rdma/rdma_cm.h> | 26 | #include <rdma/rdma_cm.h> |
28 | #include <target/target_core_base.h> | 27 | #include <target/target_core_base.h> |
@@ -36,11 +35,17 @@ | |||
36 | #define ISERT_MAX_CONN 8 | 35 | #define ISERT_MAX_CONN 8 |
37 | #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN) | 36 | #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN) |
38 | #define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN) | 37 | #define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN) |
38 | #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \ | ||
39 | ISERT_MAX_CONN) | ||
40 | |||
41 | int isert_debug_level = 0; | ||
42 | module_param_named(debug_level, isert_debug_level, int, 0644); | ||
43 | MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:0)"); | ||
39 | 44 | ||
40 | static DEFINE_MUTEX(device_list_mutex); | 45 | static DEFINE_MUTEX(device_list_mutex); |
41 | static LIST_HEAD(device_list); | 46 | static LIST_HEAD(device_list); |
42 | static struct workqueue_struct *isert_rx_wq; | ||
43 | static struct workqueue_struct *isert_comp_wq; | 47 | static struct workqueue_struct *isert_comp_wq; |
48 | static struct workqueue_struct *isert_release_wq; | ||
44 | 49 | ||
45 | static void | 50 | static void |
46 | isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn); | 51 | isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn); |
@@ -54,19 +59,32 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | |||
54 | struct isert_rdma_wr *wr); | 59 | struct isert_rdma_wr *wr); |
55 | static int | 60 | static int |
56 | isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd); | 61 | isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd); |
62 | static int | ||
63 | isert_rdma_post_recvl(struct isert_conn *isert_conn); | ||
64 | static int | ||
65 | isert_rdma_accept(struct isert_conn *isert_conn); | ||
66 | struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np); | ||
67 | |||
68 | static inline bool | ||
69 | isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd) | ||
70 | { | ||
71 | return (conn->pi_support && | ||
72 | cmd->prot_op != TARGET_PROT_NORMAL); | ||
73 | } | ||
74 | |||
57 | 75 | ||
58 | static void | 76 | static void |
59 | isert_qp_event_callback(struct ib_event *e, void *context) | 77 | isert_qp_event_callback(struct ib_event *e, void *context) |
60 | { | 78 | { |
61 | struct isert_conn *isert_conn = (struct isert_conn *)context; | 79 | struct isert_conn *isert_conn = (struct isert_conn *)context; |
62 | 80 | ||
63 | pr_err("isert_qp_event_callback event: %d\n", e->event); | 81 | isert_err("conn %p event: %d\n", isert_conn, e->event); |
64 | switch (e->event) { | 82 | switch (e->event) { |
65 | case IB_EVENT_COMM_EST: | 83 | case IB_EVENT_COMM_EST: |
66 | rdma_notify(isert_conn->conn_cm_id, IB_EVENT_COMM_EST); | 84 | rdma_notify(isert_conn->conn_cm_id, IB_EVENT_COMM_EST); |
67 | break; | 85 | break; |
68 | case IB_EVENT_QP_LAST_WQE_REACHED: | 86 | case IB_EVENT_QP_LAST_WQE_REACHED: |
69 | pr_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED:\n"); | 87 | isert_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED\n"); |
70 | break; | 88 | break; |
71 | default: | 89 | default: |
72 | break; | 90 | break; |
@@ -80,39 +98,41 @@ isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr) | |||
80 | 98 | ||
81 | ret = ib_query_device(ib_dev, devattr); | 99 | ret = ib_query_device(ib_dev, devattr); |
82 | if (ret) { | 100 | if (ret) { |
83 | pr_err("ib_query_device() failed: %d\n", ret); | 101 | isert_err("ib_query_device() failed: %d\n", ret); |
84 | return ret; | 102 | return ret; |
85 | } | 103 | } |
86 | pr_debug("devattr->max_sge: %d\n", devattr->max_sge); | 104 | isert_dbg("devattr->max_sge: %d\n", devattr->max_sge); |
87 | pr_debug("devattr->max_sge_rd: %d\n", devattr->max_sge_rd); | 105 | isert_dbg("devattr->max_sge_rd: %d\n", devattr->max_sge_rd); |
88 | 106 | ||
89 | return 0; | 107 | return 0; |
90 | } | 108 | } |
91 | 109 | ||
92 | static int | 110 | static int |
93 | isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id, | 111 | isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id) |
94 | u8 protection) | ||
95 | { | 112 | { |
96 | struct isert_device *device = isert_conn->conn_device; | 113 | struct isert_device *device = isert_conn->conn_device; |
97 | struct ib_qp_init_attr attr; | 114 | struct ib_qp_init_attr attr; |
98 | int ret, index, min_index = 0; | 115 | struct isert_comp *comp; |
116 | int ret, i, min = 0; | ||
99 | 117 | ||
100 | mutex_lock(&device_list_mutex); | 118 | mutex_lock(&device_list_mutex); |
101 | for (index = 0; index < device->cqs_used; index++) | 119 | for (i = 0; i < device->comps_used; i++) |
102 | if (device->cq_active_qps[index] < | 120 | if (device->comps[i].active_qps < |
103 | device->cq_active_qps[min_index]) | 121 | device->comps[min].active_qps) |
104 | min_index = index; | 122 | min = i; |
105 | device->cq_active_qps[min_index]++; | 123 | comp = &device->comps[min]; |
106 | pr_debug("isert_conn_setup_qp: Using min_index: %d\n", min_index); | 124 | comp->active_qps++; |
125 | isert_info("conn %p, using comp %p min_index: %d\n", | ||
126 | isert_conn, comp, min); | ||
107 | mutex_unlock(&device_list_mutex); | 127 | mutex_unlock(&device_list_mutex); |
108 | 128 | ||
109 | memset(&attr, 0, sizeof(struct ib_qp_init_attr)); | 129 | memset(&attr, 0, sizeof(struct ib_qp_init_attr)); |
110 | attr.event_handler = isert_qp_event_callback; | 130 | attr.event_handler = isert_qp_event_callback; |
111 | attr.qp_context = isert_conn; | 131 | attr.qp_context = isert_conn; |
112 | attr.send_cq = device->dev_tx_cq[min_index]; | 132 | attr.send_cq = comp->cq; |
113 | attr.recv_cq = device->dev_rx_cq[min_index]; | 133 | attr.recv_cq = comp->cq; |
114 | attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS; | 134 | attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS; |
115 | attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS; | 135 | attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1; |
116 | /* | 136 | /* |
117 | * FIXME: Use devattr.max_sge - 2 for max_send_sge as | 137 | * FIXME: Use devattr.max_sge - 2 for max_send_sge as |
118 | * work-around for RDMA_READs with ConnectX-2. | 138 | * work-around for RDMA_READs with ConnectX-2. |
@@ -126,29 +146,29 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id, | |||
126 | attr.cap.max_recv_sge = 1; | 146 | attr.cap.max_recv_sge = 1; |
127 | attr.sq_sig_type = IB_SIGNAL_REQ_WR; | 147 | attr.sq_sig_type = IB_SIGNAL_REQ_WR; |
128 | attr.qp_type = IB_QPT_RC; | 148 | attr.qp_type = IB_QPT_RC; |
129 | if (protection) | 149 | if (device->pi_capable) |
130 | attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN; | 150 | attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN; |
131 | 151 | ||
132 | pr_debug("isert_conn_setup_qp cma_id->device: %p\n", | ||
133 | cma_id->device); | ||
134 | pr_debug("isert_conn_setup_qp conn_pd->device: %p\n", | ||
135 | isert_conn->conn_pd->device); | ||
136 | |||
137 | ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr); | 152 | ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr); |
138 | if (ret) { | 153 | if (ret) { |
139 | pr_err("rdma_create_qp failed for cma_id %d\n", ret); | 154 | isert_err("rdma_create_qp failed for cma_id %d\n", ret); |
140 | return ret; | 155 | goto err; |
141 | } | 156 | } |
142 | isert_conn->conn_qp = cma_id->qp; | 157 | isert_conn->conn_qp = cma_id->qp; |
143 | pr_debug("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n"); | ||
144 | 158 | ||
145 | return 0; | 159 | return 0; |
160 | err: | ||
161 | mutex_lock(&device_list_mutex); | ||
162 | comp->active_qps--; | ||
163 | mutex_unlock(&device_list_mutex); | ||
164 | |||
165 | return ret; | ||
146 | } | 166 | } |
147 | 167 | ||
148 | static void | 168 | static void |
149 | isert_cq_event_callback(struct ib_event *e, void *context) | 169 | isert_cq_event_callback(struct ib_event *e, void *context) |
150 | { | 170 | { |
151 | pr_debug("isert_cq_event_callback event: %d\n", e->event); | 171 | isert_dbg("event: %d\n", e->event); |
152 | } | 172 | } |
153 | 173 | ||
154 | static int | 174 | static int |
@@ -182,6 +202,7 @@ isert_alloc_rx_descriptors(struct isert_conn *isert_conn) | |||
182 | } | 202 | } |
183 | 203 | ||
184 | isert_conn->conn_rx_desc_head = 0; | 204 | isert_conn->conn_rx_desc_head = 0; |
205 | |||
185 | return 0; | 206 | return 0; |
186 | 207 | ||
187 | dma_map_fail: | 208 | dma_map_fail: |
@@ -193,6 +214,8 @@ dma_map_fail: | |||
193 | kfree(isert_conn->conn_rx_descs); | 214 | kfree(isert_conn->conn_rx_descs); |
194 | isert_conn->conn_rx_descs = NULL; | 215 | isert_conn->conn_rx_descs = NULL; |
195 | fail: | 216 | fail: |
217 | isert_err("conn %p failed to allocate rx descriptors\n", isert_conn); | ||
218 | |||
196 | return -ENOMEM; | 219 | return -ENOMEM; |
197 | } | 220 | } |
198 | 221 | ||
@@ -216,27 +239,23 @@ isert_free_rx_descriptors(struct isert_conn *isert_conn) | |||
216 | isert_conn->conn_rx_descs = NULL; | 239 | isert_conn->conn_rx_descs = NULL; |
217 | } | 240 | } |
218 | 241 | ||
219 | static void isert_cq_tx_work(struct work_struct *); | 242 | static void isert_cq_work(struct work_struct *); |
220 | static void isert_cq_tx_callback(struct ib_cq *, void *); | 243 | static void isert_cq_callback(struct ib_cq *, void *); |
221 | static void isert_cq_rx_work(struct work_struct *); | ||
222 | static void isert_cq_rx_callback(struct ib_cq *, void *); | ||
223 | 244 | ||
224 | static int | 245 | static int |
225 | isert_create_device_ib_res(struct isert_device *device) | 246 | isert_create_device_ib_res(struct isert_device *device) |
226 | { | 247 | { |
227 | struct ib_device *ib_dev = device->ib_device; | 248 | struct ib_device *ib_dev = device->ib_device; |
228 | struct isert_cq_desc *cq_desc; | ||
229 | struct ib_device_attr *dev_attr; | 249 | struct ib_device_attr *dev_attr; |
230 | int ret = 0, i, j; | 250 | int ret = 0, i; |
231 | int max_rx_cqe, max_tx_cqe; | 251 | int max_cqe; |
232 | 252 | ||
233 | dev_attr = &device->dev_attr; | 253 | dev_attr = &device->dev_attr; |
234 | ret = isert_query_device(ib_dev, dev_attr); | 254 | ret = isert_query_device(ib_dev, dev_attr); |
235 | if (ret) | 255 | if (ret) |
236 | return ret; | 256 | return ret; |
237 | 257 | ||
238 | max_rx_cqe = min(ISER_MAX_RX_CQ_LEN, dev_attr->max_cqe); | 258 | max_cqe = min(ISER_MAX_CQ_LEN, dev_attr->max_cqe); |
239 | max_tx_cqe = min(ISER_MAX_TX_CQ_LEN, dev_attr->max_cqe); | ||
240 | 259 | ||
241 | /* asign function handlers */ | 260 | /* asign function handlers */ |
242 | if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS && | 261 | if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS && |
@@ -254,55 +273,38 @@ isert_create_device_ib_res(struct isert_device *device) | |||
254 | device->pi_capable = dev_attr->device_cap_flags & | 273 | device->pi_capable = dev_attr->device_cap_flags & |
255 | IB_DEVICE_SIGNATURE_HANDOVER ? true : false; | 274 | IB_DEVICE_SIGNATURE_HANDOVER ? true : false; |
256 | 275 | ||
257 | device->cqs_used = min_t(int, num_online_cpus(), | 276 | device->comps_used = min(ISERT_MAX_CQ, min_t(int, num_online_cpus(), |
258 | device->ib_device->num_comp_vectors); | 277 | device->ib_device->num_comp_vectors)); |
259 | device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used); | 278 | isert_info("Using %d CQs, %s supports %d vectors support " |
260 | pr_debug("Using %d CQs, device %s supports %d vectors support " | 279 | "Fast registration %d pi_capable %d\n", |
261 | "Fast registration %d pi_capable %d\n", | 280 | device->comps_used, device->ib_device->name, |
262 | device->cqs_used, device->ib_device->name, | 281 | device->ib_device->num_comp_vectors, device->use_fastreg, |
263 | device->ib_device->num_comp_vectors, device->use_fastreg, | 282 | device->pi_capable); |
264 | device->pi_capable); | 283 | |
265 | device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) * | 284 | device->comps = kcalloc(device->comps_used, sizeof(struct isert_comp), |
266 | device->cqs_used, GFP_KERNEL); | 285 | GFP_KERNEL); |
267 | if (!device->cq_desc) { | 286 | if (!device->comps) { |
268 | pr_err("Unable to allocate device->cq_desc\n"); | 287 | isert_err("Unable to allocate completion contexts\n"); |
269 | return -ENOMEM; | 288 | return -ENOMEM; |
270 | } | 289 | } |
271 | cq_desc = device->cq_desc; | ||
272 | |||
273 | for (i = 0; i < device->cqs_used; i++) { | ||
274 | cq_desc[i].device = device; | ||
275 | cq_desc[i].cq_index = i; | ||
276 | |||
277 | INIT_WORK(&cq_desc[i].cq_rx_work, isert_cq_rx_work); | ||
278 | device->dev_rx_cq[i] = ib_create_cq(device->ib_device, | ||
279 | isert_cq_rx_callback, | ||
280 | isert_cq_event_callback, | ||
281 | (void *)&cq_desc[i], | ||
282 | max_rx_cqe, i); | ||
283 | if (IS_ERR(device->dev_rx_cq[i])) { | ||
284 | ret = PTR_ERR(device->dev_rx_cq[i]); | ||
285 | device->dev_rx_cq[i] = NULL; | ||
286 | goto out_cq; | ||
287 | } | ||
288 | 290 | ||
289 | INIT_WORK(&cq_desc[i].cq_tx_work, isert_cq_tx_work); | 291 | for (i = 0; i < device->comps_used; i++) { |
290 | device->dev_tx_cq[i] = ib_create_cq(device->ib_device, | 292 | struct isert_comp *comp = &device->comps[i]; |
291 | isert_cq_tx_callback, | ||
292 | isert_cq_event_callback, | ||
293 | (void *)&cq_desc[i], | ||
294 | max_tx_cqe, i); | ||
295 | if (IS_ERR(device->dev_tx_cq[i])) { | ||
296 | ret = PTR_ERR(device->dev_tx_cq[i]); | ||
297 | device->dev_tx_cq[i] = NULL; | ||
298 | goto out_cq; | ||
299 | } | ||
300 | 293 | ||
301 | ret = ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP); | 294 | comp->device = device; |
302 | if (ret) | 295 | INIT_WORK(&comp->work, isert_cq_work); |
296 | comp->cq = ib_create_cq(device->ib_device, | ||
297 | isert_cq_callback, | ||
298 | isert_cq_event_callback, | ||
299 | (void *)comp, | ||
300 | max_cqe, i); | ||
301 | if (IS_ERR(comp->cq)) { | ||
302 | ret = PTR_ERR(comp->cq); | ||
303 | comp->cq = NULL; | ||
303 | goto out_cq; | 304 | goto out_cq; |
305 | } | ||
304 | 306 | ||
305 | ret = ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP); | 307 | ret = ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP); |
306 | if (ret) | 308 | if (ret) |
307 | goto out_cq; | 309 | goto out_cq; |
308 | } | 310 | } |
@@ -310,19 +312,15 @@ isert_create_device_ib_res(struct isert_device *device) | |||
310 | return 0; | 312 | return 0; |
311 | 313 | ||
312 | out_cq: | 314 | out_cq: |
313 | for (j = 0; j < i; j++) { | 315 | for (i = 0; i < device->comps_used; i++) { |
314 | cq_desc = &device->cq_desc[j]; | 316 | struct isert_comp *comp = &device->comps[i]; |
315 | 317 | ||
316 | if (device->dev_rx_cq[j]) { | 318 | if (comp->cq) { |
317 | cancel_work_sync(&cq_desc->cq_rx_work); | 319 | cancel_work_sync(&comp->work); |
318 | ib_destroy_cq(device->dev_rx_cq[j]); | 320 | ib_destroy_cq(comp->cq); |
319 | } | ||
320 | if (device->dev_tx_cq[j]) { | ||
321 | cancel_work_sync(&cq_desc->cq_tx_work); | ||
322 | ib_destroy_cq(device->dev_tx_cq[j]); | ||
323 | } | 321 | } |
324 | } | 322 | } |
325 | kfree(device->cq_desc); | 323 | kfree(device->comps); |
326 | 324 | ||
327 | return ret; | 325 | return ret; |
328 | } | 326 | } |
@@ -330,21 +328,18 @@ out_cq: | |||
330 | static void | 328 | static void |
331 | isert_free_device_ib_res(struct isert_device *device) | 329 | isert_free_device_ib_res(struct isert_device *device) |
332 | { | 330 | { |
333 | struct isert_cq_desc *cq_desc; | ||
334 | int i; | 331 | int i; |
335 | 332 | ||
336 | for (i = 0; i < device->cqs_used; i++) { | 333 | isert_info("device %p\n", device); |
337 | cq_desc = &device->cq_desc[i]; | ||
338 | 334 | ||
339 | cancel_work_sync(&cq_desc->cq_rx_work); | 335 | for (i = 0; i < device->comps_used; i++) { |
340 | cancel_work_sync(&cq_desc->cq_tx_work); | 336 | struct isert_comp *comp = &device->comps[i]; |
341 | ib_destroy_cq(device->dev_rx_cq[i]); | ||
342 | ib_destroy_cq(device->dev_tx_cq[i]); | ||
343 | device->dev_rx_cq[i] = NULL; | ||
344 | device->dev_tx_cq[i] = NULL; | ||
345 | } | ||
346 | 337 | ||
347 | kfree(device->cq_desc); | 338 | cancel_work_sync(&comp->work); |
339 | ib_destroy_cq(comp->cq); | ||
340 | comp->cq = NULL; | ||
341 | } | ||
342 | kfree(device->comps); | ||
348 | } | 343 | } |
349 | 344 | ||
350 | static void | 345 | static void |
@@ -352,6 +347,7 @@ isert_device_try_release(struct isert_device *device) | |||
352 | { | 347 | { |
353 | mutex_lock(&device_list_mutex); | 348 | mutex_lock(&device_list_mutex); |
354 | device->refcount--; | 349 | device->refcount--; |
350 | isert_info("device %p refcount %d\n", device, device->refcount); | ||
355 | if (!device->refcount) { | 351 | if (!device->refcount) { |
356 | isert_free_device_ib_res(device); | 352 | isert_free_device_ib_res(device); |
357 | list_del(&device->dev_node); | 353 | list_del(&device->dev_node); |
@@ -370,6 +366,8 @@ isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id) | |||
370 | list_for_each_entry(device, &device_list, dev_node) { | 366 | list_for_each_entry(device, &device_list, dev_node) { |
371 | if (device->ib_device->node_guid == cma_id->device->node_guid) { | 367 | if (device->ib_device->node_guid == cma_id->device->node_guid) { |
372 | device->refcount++; | 368 | device->refcount++; |
369 | isert_info("Found iser device %p refcount %d\n", | ||
370 | device, device->refcount); | ||
373 | mutex_unlock(&device_list_mutex); | 371 | mutex_unlock(&device_list_mutex); |
374 | return device; | 372 | return device; |
375 | } | 373 | } |
@@ -393,6 +391,8 @@ isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id) | |||
393 | 391 | ||
394 | device->refcount++; | 392 | device->refcount++; |
395 | list_add_tail(&device->dev_node, &device_list); | 393 | list_add_tail(&device->dev_node, &device_list); |
394 | isert_info("Created a new iser device %p refcount %d\n", | ||
395 | device, device->refcount); | ||
396 | mutex_unlock(&device_list_mutex); | 396 | mutex_unlock(&device_list_mutex); |
397 | 397 | ||
398 | return device; | 398 | return device; |
@@ -407,7 +407,7 @@ isert_conn_free_fastreg_pool(struct isert_conn *isert_conn) | |||
407 | if (list_empty(&isert_conn->conn_fr_pool)) | 407 | if (list_empty(&isert_conn->conn_fr_pool)) |
408 | return; | 408 | return; |
409 | 409 | ||
410 | pr_debug("Freeing conn %p fastreg pool", isert_conn); | 410 | isert_info("Freeing conn %p fastreg pool", isert_conn); |
411 | 411 | ||
412 | list_for_each_entry_safe(fr_desc, tmp, | 412 | list_for_each_entry_safe(fr_desc, tmp, |
413 | &isert_conn->conn_fr_pool, list) { | 413 | &isert_conn->conn_fr_pool, list) { |
@@ -425,87 +425,97 @@ isert_conn_free_fastreg_pool(struct isert_conn *isert_conn) | |||
425 | } | 425 | } |
426 | 426 | ||
427 | if (i < isert_conn->conn_fr_pool_size) | 427 | if (i < isert_conn->conn_fr_pool_size) |
428 | pr_warn("Pool still has %d regions registered\n", | 428 | isert_warn("Pool still has %d regions registered\n", |
429 | isert_conn->conn_fr_pool_size - i); | 429 | isert_conn->conn_fr_pool_size - i); |
430 | } | 430 | } |
431 | 431 | ||
432 | static int | 432 | static int |
433 | isert_create_pi_ctx(struct fast_reg_descriptor *desc, | ||
434 | struct ib_device *device, | ||
435 | struct ib_pd *pd) | ||
436 | { | ||
437 | struct ib_mr_init_attr mr_init_attr; | ||
438 | struct pi_context *pi_ctx; | ||
439 | int ret; | ||
440 | |||
441 | pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL); | ||
442 | if (!pi_ctx) { | ||
443 | isert_err("Failed to allocate pi context\n"); | ||
444 | return -ENOMEM; | ||
445 | } | ||
446 | |||
447 | pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(device, | ||
448 | ISCSI_ISER_SG_TABLESIZE); | ||
449 | if (IS_ERR(pi_ctx->prot_frpl)) { | ||
450 | isert_err("Failed to allocate prot frpl err=%ld\n", | ||
451 | PTR_ERR(pi_ctx->prot_frpl)); | ||
452 | ret = PTR_ERR(pi_ctx->prot_frpl); | ||
453 | goto err_pi_ctx; | ||
454 | } | ||
455 | |||
456 | pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE); | ||
457 | if (IS_ERR(pi_ctx->prot_mr)) { | ||
458 | isert_err("Failed to allocate prot frmr err=%ld\n", | ||
459 | PTR_ERR(pi_ctx->prot_mr)); | ||
460 | ret = PTR_ERR(pi_ctx->prot_mr); | ||
461 | goto err_prot_frpl; | ||
462 | } | ||
463 | desc->ind |= ISERT_PROT_KEY_VALID; | ||
464 | |||
465 | memset(&mr_init_attr, 0, sizeof(mr_init_attr)); | ||
466 | mr_init_attr.max_reg_descriptors = 2; | ||
467 | mr_init_attr.flags |= IB_MR_SIGNATURE_EN; | ||
468 | pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr); | ||
469 | if (IS_ERR(pi_ctx->sig_mr)) { | ||
470 | isert_err("Failed to allocate signature enabled mr err=%ld\n", | ||
471 | PTR_ERR(pi_ctx->sig_mr)); | ||
472 | ret = PTR_ERR(pi_ctx->sig_mr); | ||
473 | goto err_prot_mr; | ||
474 | } | ||
475 | |||
476 | desc->pi_ctx = pi_ctx; | ||
477 | desc->ind |= ISERT_SIG_KEY_VALID; | ||
478 | desc->ind &= ~ISERT_PROTECTED; | ||
479 | |||
480 | return 0; | ||
481 | |||
482 | err_prot_mr: | ||
483 | ib_dereg_mr(desc->pi_ctx->prot_mr); | ||
484 | err_prot_frpl: | ||
485 | ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl); | ||
486 | err_pi_ctx: | ||
487 | kfree(desc->pi_ctx); | ||
488 | |||
489 | return ret; | ||
490 | } | ||
491 | |||
492 | static int | ||
433 | isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd, | 493 | isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd, |
434 | struct fast_reg_descriptor *fr_desc, u8 protection) | 494 | struct fast_reg_descriptor *fr_desc) |
435 | { | 495 | { |
436 | int ret; | 496 | int ret; |
437 | 497 | ||
438 | fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device, | 498 | fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device, |
439 | ISCSI_ISER_SG_TABLESIZE); | 499 | ISCSI_ISER_SG_TABLESIZE); |
440 | if (IS_ERR(fr_desc->data_frpl)) { | 500 | if (IS_ERR(fr_desc->data_frpl)) { |
441 | pr_err("Failed to allocate data frpl err=%ld\n", | 501 | isert_err("Failed to allocate data frpl err=%ld\n", |
442 | PTR_ERR(fr_desc->data_frpl)); | 502 | PTR_ERR(fr_desc->data_frpl)); |
443 | return PTR_ERR(fr_desc->data_frpl); | 503 | return PTR_ERR(fr_desc->data_frpl); |
444 | } | 504 | } |
445 | 505 | ||
446 | fr_desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE); | 506 | fr_desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE); |
447 | if (IS_ERR(fr_desc->data_mr)) { | 507 | if (IS_ERR(fr_desc->data_mr)) { |
448 | pr_err("Failed to allocate data frmr err=%ld\n", | 508 | isert_err("Failed to allocate data frmr err=%ld\n", |
449 | PTR_ERR(fr_desc->data_mr)); | 509 | PTR_ERR(fr_desc->data_mr)); |
450 | ret = PTR_ERR(fr_desc->data_mr); | 510 | ret = PTR_ERR(fr_desc->data_mr); |
451 | goto err_data_frpl; | 511 | goto err_data_frpl; |
452 | } | 512 | } |
453 | pr_debug("Create fr_desc %p page_list %p\n", | ||
454 | fr_desc, fr_desc->data_frpl->page_list); | ||
455 | fr_desc->ind |= ISERT_DATA_KEY_VALID; | 513 | fr_desc->ind |= ISERT_DATA_KEY_VALID; |
456 | 514 | ||
457 | if (protection) { | 515 | isert_dbg("Created fr_desc %p\n", fr_desc); |
458 | struct ib_mr_init_attr mr_init_attr = {0}; | ||
459 | struct pi_context *pi_ctx; | ||
460 | |||
461 | fr_desc->pi_ctx = kzalloc(sizeof(*fr_desc->pi_ctx), GFP_KERNEL); | ||
462 | if (!fr_desc->pi_ctx) { | ||
463 | pr_err("Failed to allocate pi context\n"); | ||
464 | ret = -ENOMEM; | ||
465 | goto err_data_mr; | ||
466 | } | ||
467 | pi_ctx = fr_desc->pi_ctx; | ||
468 | |||
469 | pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(ib_device, | ||
470 | ISCSI_ISER_SG_TABLESIZE); | ||
471 | if (IS_ERR(pi_ctx->prot_frpl)) { | ||
472 | pr_err("Failed to allocate prot frpl err=%ld\n", | ||
473 | PTR_ERR(pi_ctx->prot_frpl)); | ||
474 | ret = PTR_ERR(pi_ctx->prot_frpl); | ||
475 | goto err_pi_ctx; | ||
476 | } | ||
477 | |||
478 | pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE); | ||
479 | if (IS_ERR(pi_ctx->prot_mr)) { | ||
480 | pr_err("Failed to allocate prot frmr err=%ld\n", | ||
481 | PTR_ERR(pi_ctx->prot_mr)); | ||
482 | ret = PTR_ERR(pi_ctx->prot_mr); | ||
483 | goto err_prot_frpl; | ||
484 | } | ||
485 | fr_desc->ind |= ISERT_PROT_KEY_VALID; | ||
486 | |||
487 | mr_init_attr.max_reg_descriptors = 2; | ||
488 | mr_init_attr.flags |= IB_MR_SIGNATURE_EN; | ||
489 | pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr); | ||
490 | if (IS_ERR(pi_ctx->sig_mr)) { | ||
491 | pr_err("Failed to allocate signature enabled mr err=%ld\n", | ||
492 | PTR_ERR(pi_ctx->sig_mr)); | ||
493 | ret = PTR_ERR(pi_ctx->sig_mr); | ||
494 | goto err_prot_mr; | ||
495 | } | ||
496 | fr_desc->ind |= ISERT_SIG_KEY_VALID; | ||
497 | } | ||
498 | fr_desc->ind &= ~ISERT_PROTECTED; | ||
499 | 516 | ||
500 | return 0; | 517 | return 0; |
501 | err_prot_mr: | 518 | |
502 | ib_dereg_mr(fr_desc->pi_ctx->prot_mr); | ||
503 | err_prot_frpl: | ||
504 | ib_free_fast_reg_page_list(fr_desc->pi_ctx->prot_frpl); | ||
505 | err_pi_ctx: | ||
506 | kfree(fr_desc->pi_ctx); | ||
507 | err_data_mr: | ||
508 | ib_dereg_mr(fr_desc->data_mr); | ||
509 | err_data_frpl: | 519 | err_data_frpl: |
510 | ib_free_fast_reg_page_list(fr_desc->data_frpl); | 520 | ib_free_fast_reg_page_list(fr_desc->data_frpl); |
511 | 521 | ||
@@ -513,7 +523,7 @@ err_data_frpl: | |||
513 | } | 523 | } |
514 | 524 | ||
515 | static int | 525 | static int |
516 | isert_conn_create_fastreg_pool(struct isert_conn *isert_conn, u8 pi_support) | 526 | isert_conn_create_fastreg_pool(struct isert_conn *isert_conn) |
517 | { | 527 | { |
518 | struct fast_reg_descriptor *fr_desc; | 528 | struct fast_reg_descriptor *fr_desc; |
519 | struct isert_device *device = isert_conn->conn_device; | 529 | struct isert_device *device = isert_conn->conn_device; |
@@ -531,16 +541,15 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn, u8 pi_support) | |||
531 | for (i = 0; i < tag_num; i++) { | 541 | for (i = 0; i < tag_num; i++) { |
532 | fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL); | 542 | fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL); |
533 | if (!fr_desc) { | 543 | if (!fr_desc) { |
534 | pr_err("Failed to allocate fast_reg descriptor\n"); | 544 | isert_err("Failed to allocate fast_reg descriptor\n"); |
535 | ret = -ENOMEM; | 545 | ret = -ENOMEM; |
536 | goto err; | 546 | goto err; |
537 | } | 547 | } |
538 | 548 | ||
539 | ret = isert_create_fr_desc(device->ib_device, | 549 | ret = isert_create_fr_desc(device->ib_device, |
540 | isert_conn->conn_pd, fr_desc, | 550 | isert_conn->conn_pd, fr_desc); |
541 | pi_support); | ||
542 | if (ret) { | 551 | if (ret) { |
543 | pr_err("Failed to create fastreg descriptor err=%d\n", | 552 | isert_err("Failed to create fastreg descriptor err=%d\n", |
544 | ret); | 553 | ret); |
545 | kfree(fr_desc); | 554 | kfree(fr_desc); |
546 | goto err; | 555 | goto err; |
@@ -550,7 +559,7 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn, u8 pi_support) | |||
550 | isert_conn->conn_fr_pool_size++; | 559 | isert_conn->conn_fr_pool_size++; |
551 | } | 560 | } |
552 | 561 | ||
553 | pr_debug("Creating conn %p fastreg pool size=%d", | 562 | isert_dbg("Creating conn %p fastreg pool size=%d", |
554 | isert_conn, isert_conn->conn_fr_pool_size); | 563 | isert_conn, isert_conn->conn_fr_pool_size); |
555 | 564 | ||
556 | return 0; | 565 | return 0; |
@@ -563,47 +572,45 @@ err: | |||
563 | static int | 572 | static int |
564 | isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) | 573 | isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) |
565 | { | 574 | { |
566 | struct iscsi_np *np = cma_id->context; | 575 | struct isert_np *isert_np = cma_id->context; |
567 | struct isert_np *isert_np = np->np_context; | 576 | struct iscsi_np *np = isert_np->np; |
568 | struct isert_conn *isert_conn; | 577 | struct isert_conn *isert_conn; |
569 | struct isert_device *device; | 578 | struct isert_device *device; |
570 | struct ib_device *ib_dev = cma_id->device; | 579 | struct ib_device *ib_dev = cma_id->device; |
571 | int ret = 0; | 580 | int ret = 0; |
572 | u8 pi_support; | ||
573 | 581 | ||
574 | spin_lock_bh(&np->np_thread_lock); | 582 | spin_lock_bh(&np->np_thread_lock); |
575 | if (!np->enabled) { | 583 | if (!np->enabled) { |
576 | spin_unlock_bh(&np->np_thread_lock); | 584 | spin_unlock_bh(&np->np_thread_lock); |
577 | pr_debug("iscsi_np is not enabled, reject connect request\n"); | 585 | isert_dbg("iscsi_np is not enabled, reject connect request\n"); |
578 | return rdma_reject(cma_id, NULL, 0); | 586 | return rdma_reject(cma_id, NULL, 0); |
579 | } | 587 | } |
580 | spin_unlock_bh(&np->np_thread_lock); | 588 | spin_unlock_bh(&np->np_thread_lock); |
581 | 589 | ||
582 | pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n", | 590 | isert_dbg("cma_id: %p, portal: %p\n", |
583 | cma_id, cma_id->context); | 591 | cma_id, cma_id->context); |
584 | 592 | ||
585 | isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL); | 593 | isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL); |
586 | if (!isert_conn) { | 594 | if (!isert_conn) { |
587 | pr_err("Unable to allocate isert_conn\n"); | 595 | isert_err("Unable to allocate isert_conn\n"); |
588 | return -ENOMEM; | 596 | return -ENOMEM; |
589 | } | 597 | } |
590 | isert_conn->state = ISER_CONN_INIT; | 598 | isert_conn->state = ISER_CONN_INIT; |
591 | INIT_LIST_HEAD(&isert_conn->conn_accept_node); | 599 | INIT_LIST_HEAD(&isert_conn->conn_accept_node); |
592 | init_completion(&isert_conn->conn_login_comp); | 600 | init_completion(&isert_conn->conn_login_comp); |
601 | init_completion(&isert_conn->login_req_comp); | ||
593 | init_completion(&isert_conn->conn_wait); | 602 | init_completion(&isert_conn->conn_wait); |
594 | init_completion(&isert_conn->conn_wait_comp_err); | ||
595 | kref_init(&isert_conn->conn_kref); | 603 | kref_init(&isert_conn->conn_kref); |
596 | mutex_init(&isert_conn->conn_mutex); | 604 | mutex_init(&isert_conn->conn_mutex); |
597 | spin_lock_init(&isert_conn->conn_lock); | 605 | spin_lock_init(&isert_conn->conn_lock); |
598 | INIT_LIST_HEAD(&isert_conn->conn_fr_pool); | 606 | INIT_LIST_HEAD(&isert_conn->conn_fr_pool); |
599 | 607 | ||
600 | cma_id->context = isert_conn; | ||
601 | isert_conn->conn_cm_id = cma_id; | 608 | isert_conn->conn_cm_id = cma_id; |
602 | 609 | ||
603 | isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN + | 610 | isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN + |
604 | ISER_RX_LOGIN_SIZE, GFP_KERNEL); | 611 | ISER_RX_LOGIN_SIZE, GFP_KERNEL); |
605 | if (!isert_conn->login_buf) { | 612 | if (!isert_conn->login_buf) { |
606 | pr_err("Unable to allocate isert_conn->login_buf\n"); | 613 | isert_err("Unable to allocate isert_conn->login_buf\n"); |
607 | ret = -ENOMEM; | 614 | ret = -ENOMEM; |
608 | goto out; | 615 | goto out; |
609 | } | 616 | } |
@@ -611,7 +618,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) | |||
611 | isert_conn->login_req_buf = isert_conn->login_buf; | 618 | isert_conn->login_req_buf = isert_conn->login_buf; |
612 | isert_conn->login_rsp_buf = isert_conn->login_buf + | 619 | isert_conn->login_rsp_buf = isert_conn->login_buf + |
613 | ISCSI_DEF_MAX_RECV_SEG_LEN; | 620 | ISCSI_DEF_MAX_RECV_SEG_LEN; |
614 | pr_debug("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n", | 621 | isert_dbg("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n", |
615 | isert_conn->login_buf, isert_conn->login_req_buf, | 622 | isert_conn->login_buf, isert_conn->login_req_buf, |
616 | isert_conn->login_rsp_buf); | 623 | isert_conn->login_rsp_buf); |
617 | 624 | ||
@@ -621,7 +628,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) | |||
621 | 628 | ||
622 | ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma); | 629 | ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma); |
623 | if (ret) { | 630 | if (ret) { |
624 | pr_err("ib_dma_mapping_error failed for login_req_dma: %d\n", | 631 | isert_err("ib_dma_mapping_error failed for login_req_dma: %d\n", |
625 | ret); | 632 | ret); |
626 | isert_conn->login_req_dma = 0; | 633 | isert_conn->login_req_dma = 0; |
627 | goto out_login_buf; | 634 | goto out_login_buf; |
@@ -633,7 +640,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) | |||
633 | 640 | ||
634 | ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma); | 641 | ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma); |
635 | if (ret) { | 642 | if (ret) { |
636 | pr_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n", | 643 | isert_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n", |
637 | ret); | 644 | ret); |
638 | isert_conn->login_rsp_dma = 0; | 645 | isert_conn->login_rsp_dma = 0; |
639 | goto out_req_dma_map; | 646 | goto out_req_dma_map; |
@@ -649,13 +656,13 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) | |||
649 | isert_conn->initiator_depth = min_t(u8, | 656 | isert_conn->initiator_depth = min_t(u8, |
650 | event->param.conn.initiator_depth, | 657 | event->param.conn.initiator_depth, |
651 | device->dev_attr.max_qp_init_rd_atom); | 658 | device->dev_attr.max_qp_init_rd_atom); |
652 | pr_debug("Using initiator_depth: %u\n", isert_conn->initiator_depth); | 659 | isert_dbg("Using initiator_depth: %u\n", isert_conn->initiator_depth); |
653 | 660 | ||
654 | isert_conn->conn_device = device; | 661 | isert_conn->conn_device = device; |
655 | isert_conn->conn_pd = ib_alloc_pd(isert_conn->conn_device->ib_device); | 662 | isert_conn->conn_pd = ib_alloc_pd(isert_conn->conn_device->ib_device); |
656 | if (IS_ERR(isert_conn->conn_pd)) { | 663 | if (IS_ERR(isert_conn->conn_pd)) { |
657 | ret = PTR_ERR(isert_conn->conn_pd); | 664 | ret = PTR_ERR(isert_conn->conn_pd); |
658 | pr_err("ib_alloc_pd failed for conn %p: ret=%d\n", | 665 | isert_err("ib_alloc_pd failed for conn %p: ret=%d\n", |
659 | isert_conn, ret); | 666 | isert_conn, ret); |
660 | goto out_pd; | 667 | goto out_pd; |
661 | } | 668 | } |
@@ -664,20 +671,20 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) | |||
664 | IB_ACCESS_LOCAL_WRITE); | 671 | IB_ACCESS_LOCAL_WRITE); |
665 | if (IS_ERR(isert_conn->conn_mr)) { | 672 | if (IS_ERR(isert_conn->conn_mr)) { |
666 | ret = PTR_ERR(isert_conn->conn_mr); | 673 | ret = PTR_ERR(isert_conn->conn_mr); |
667 | pr_err("ib_get_dma_mr failed for conn %p: ret=%d\n", | 674 | isert_err("ib_get_dma_mr failed for conn %p: ret=%d\n", |
668 | isert_conn, ret); | 675 | isert_conn, ret); |
669 | goto out_mr; | 676 | goto out_mr; |
670 | } | 677 | } |
671 | 678 | ||
672 | pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi; | 679 | ret = isert_conn_setup_qp(isert_conn, cma_id); |
673 | if (pi_support && !device->pi_capable) { | 680 | if (ret) |
674 | pr_err("Protection information requested but not supported, " | 681 | goto out_conn_dev; |
675 | "rejecting connect request\n"); | ||
676 | ret = rdma_reject(cma_id, NULL, 0); | ||
677 | goto out_mr; | ||
678 | } | ||
679 | 682 | ||
680 | ret = isert_conn_setup_qp(isert_conn, cma_id, pi_support); | 683 | ret = isert_rdma_post_recvl(isert_conn); |
684 | if (ret) | ||
685 | goto out_conn_dev; | ||
686 | |||
687 | ret = isert_rdma_accept(isert_conn); | ||
681 | if (ret) | 688 | if (ret) |
682 | goto out_conn_dev; | 689 | goto out_conn_dev; |
683 | 690 | ||
@@ -685,7 +692,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) | |||
685 | list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list); | 692 | list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list); |
686 | mutex_unlock(&isert_np->np_accept_mutex); | 693 | mutex_unlock(&isert_np->np_accept_mutex); |
687 | 694 | ||
688 | pr_debug("isert_connect_request() up np_sem np: %p\n", np); | 695 | isert_info("np %p: Allow accept_np to continue\n", np); |
689 | up(&isert_np->np_sem); | 696 | up(&isert_np->np_sem); |
690 | return 0; | 697 | return 0; |
691 | 698 | ||
@@ -705,6 +712,7 @@ out_login_buf: | |||
705 | kfree(isert_conn->login_buf); | 712 | kfree(isert_conn->login_buf); |
706 | out: | 713 | out: |
707 | kfree(isert_conn); | 714 | kfree(isert_conn); |
715 | rdma_reject(cma_id, NULL, 0); | ||
708 | return ret; | 716 | return ret; |
709 | } | 717 | } |
710 | 718 | ||
@@ -713,24 +721,25 @@ isert_connect_release(struct isert_conn *isert_conn) | |||
713 | { | 721 | { |
714 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; | 722 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; |
715 | struct isert_device *device = isert_conn->conn_device; | 723 | struct isert_device *device = isert_conn->conn_device; |
716 | int cq_index; | ||
717 | 724 | ||
718 | pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); | 725 | isert_dbg("conn %p\n", isert_conn); |
719 | 726 | ||
720 | if (device && device->use_fastreg) | 727 | if (device && device->use_fastreg) |
721 | isert_conn_free_fastreg_pool(isert_conn); | 728 | isert_conn_free_fastreg_pool(isert_conn); |
722 | 729 | ||
730 | isert_free_rx_descriptors(isert_conn); | ||
731 | rdma_destroy_id(isert_conn->conn_cm_id); | ||
732 | |||
723 | if (isert_conn->conn_qp) { | 733 | if (isert_conn->conn_qp) { |
724 | cq_index = ((struct isert_cq_desc *) | 734 | struct isert_comp *comp = isert_conn->conn_qp->recv_cq->cq_context; |
725 | isert_conn->conn_qp->recv_cq->cq_context)->cq_index; | ||
726 | pr_debug("isert_connect_release: cq_index: %d\n", cq_index); | ||
727 | isert_conn->conn_device->cq_active_qps[cq_index]--; | ||
728 | 735 | ||
729 | rdma_destroy_qp(isert_conn->conn_cm_id); | 736 | isert_dbg("dec completion context %p active_qps\n", comp); |
730 | } | 737 | mutex_lock(&device_list_mutex); |
738 | comp->active_qps--; | ||
739 | mutex_unlock(&device_list_mutex); | ||
731 | 740 | ||
732 | isert_free_rx_descriptors(isert_conn); | 741 | ib_destroy_qp(isert_conn->conn_qp); |
733 | rdma_destroy_id(isert_conn->conn_cm_id); | 742 | } |
734 | 743 | ||
735 | ib_dereg_mr(isert_conn->conn_mr); | 744 | ib_dereg_mr(isert_conn->conn_mr); |
736 | ib_dealloc_pd(isert_conn->conn_pd); | 745 | ib_dealloc_pd(isert_conn->conn_pd); |
@@ -747,16 +756,24 @@ isert_connect_release(struct isert_conn *isert_conn) | |||
747 | 756 | ||
748 | if (device) | 757 | if (device) |
749 | isert_device_try_release(device); | 758 | isert_device_try_release(device); |
750 | |||
751 | pr_debug("Leaving isert_connect_release >>>>>>>>>>>>\n"); | ||
752 | } | 759 | } |
753 | 760 | ||
754 | static void | 761 | static void |
755 | isert_connected_handler(struct rdma_cm_id *cma_id) | 762 | isert_connected_handler(struct rdma_cm_id *cma_id) |
756 | { | 763 | { |
757 | struct isert_conn *isert_conn = cma_id->context; | 764 | struct isert_conn *isert_conn = cma_id->qp->qp_context; |
758 | 765 | ||
759 | kref_get(&isert_conn->conn_kref); | 766 | isert_info("conn %p\n", isert_conn); |
767 | |||
768 | if (!kref_get_unless_zero(&isert_conn->conn_kref)) { | ||
769 | isert_warn("conn %p connect_release is running\n", isert_conn); | ||
770 | return; | ||
771 | } | ||
772 | |||
773 | mutex_lock(&isert_conn->conn_mutex); | ||
774 | if (isert_conn->state != ISER_CONN_FULL_FEATURE) | ||
775 | isert_conn->state = ISER_CONN_UP; | ||
776 | mutex_unlock(&isert_conn->conn_mutex); | ||
760 | } | 777 | } |
761 | 778 | ||
762 | static void | 779 | static void |
@@ -765,8 +782,8 @@ isert_release_conn_kref(struct kref *kref) | |||
765 | struct isert_conn *isert_conn = container_of(kref, | 782 | struct isert_conn *isert_conn = container_of(kref, |
766 | struct isert_conn, conn_kref); | 783 | struct isert_conn, conn_kref); |
767 | 784 | ||
768 | pr_debug("Calling isert_connect_release for final kref %s/%d\n", | 785 | isert_info("conn %p final kref %s/%d\n", isert_conn, current->comm, |
769 | current->comm, current->pid); | 786 | current->pid); |
770 | 787 | ||
771 | isert_connect_release(isert_conn); | 788 | isert_connect_release(isert_conn); |
772 | } | 789 | } |
@@ -777,75 +794,111 @@ isert_put_conn(struct isert_conn *isert_conn) | |||
777 | kref_put(&isert_conn->conn_kref, isert_release_conn_kref); | 794 | kref_put(&isert_conn->conn_kref, isert_release_conn_kref); |
778 | } | 795 | } |
779 | 796 | ||
797 | /** | ||
798 | * isert_conn_terminate() - Initiate connection termination | ||
799 | * @isert_conn: isert connection struct | ||
800 | * | ||
801 | * Notes: | ||
802 | * In case the connection state is FULL_FEATURE, move state | ||
803 | * to TEMINATING and start teardown sequence (rdma_disconnect). | ||
804 | * In case the connection state is UP, complete flush as well. | ||
805 | * | ||
806 | * This routine must be called with conn_mutex held. Thus it is | ||
807 | * safe to call multiple times. | ||
808 | */ | ||
780 | static void | 809 | static void |
781 | isert_disconnect_work(struct work_struct *work) | 810 | isert_conn_terminate(struct isert_conn *isert_conn) |
782 | { | 811 | { |
783 | struct isert_conn *isert_conn = container_of(work, | 812 | int err; |
784 | struct isert_conn, conn_logout_work); | ||
785 | 813 | ||
786 | pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); | 814 | switch (isert_conn->state) { |
787 | mutex_lock(&isert_conn->conn_mutex); | 815 | case ISER_CONN_TERMINATING: |
788 | if (isert_conn->state == ISER_CONN_UP) | 816 | break; |
817 | case ISER_CONN_UP: | ||
818 | case ISER_CONN_FULL_FEATURE: /* FALLTHRU */ | ||
819 | isert_info("Terminating conn %p state %d\n", | ||
820 | isert_conn, isert_conn->state); | ||
789 | isert_conn->state = ISER_CONN_TERMINATING; | 821 | isert_conn->state = ISER_CONN_TERMINATING; |
790 | 822 | err = rdma_disconnect(isert_conn->conn_cm_id); | |
791 | if (isert_conn->post_recv_buf_count == 0 && | 823 | if (err) |
792 | atomic_read(&isert_conn->post_send_buf_count) == 0) { | 824 | isert_warn("Failed rdma_disconnect isert_conn %p\n", |
793 | mutex_unlock(&isert_conn->conn_mutex); | 825 | isert_conn); |
794 | goto wake_up; | 826 | break; |
795 | } | 827 | default: |
796 | if (!isert_conn->conn_cm_id) { | 828 | isert_warn("conn %p teminating in state %d\n", |
797 | mutex_unlock(&isert_conn->conn_mutex); | 829 | isert_conn, isert_conn->state); |
798 | isert_put_conn(isert_conn); | ||
799 | return; | ||
800 | } | 830 | } |
831 | } | ||
801 | 832 | ||
802 | if (isert_conn->disconnect) { | 833 | static int |
803 | /* Send DREQ/DREP towards our initiator */ | 834 | isert_np_cma_handler(struct isert_np *isert_np, |
804 | rdma_disconnect(isert_conn->conn_cm_id); | 835 | enum rdma_cm_event_type event) |
805 | } | 836 | { |
837 | isert_dbg("isert np %p, handling event %d\n", isert_np, event); | ||
806 | 838 | ||
807 | mutex_unlock(&isert_conn->conn_mutex); | 839 | switch (event) { |
840 | case RDMA_CM_EVENT_DEVICE_REMOVAL: | ||
841 | isert_np->np_cm_id = NULL; | ||
842 | break; | ||
843 | case RDMA_CM_EVENT_ADDR_CHANGE: | ||
844 | isert_np->np_cm_id = isert_setup_id(isert_np); | ||
845 | if (IS_ERR(isert_np->np_cm_id)) { | ||
846 | isert_err("isert np %p setup id failed: %ld\n", | ||
847 | isert_np, PTR_ERR(isert_np->np_cm_id)); | ||
848 | isert_np->np_cm_id = NULL; | ||
849 | } | ||
850 | break; | ||
851 | default: | ||
852 | isert_err("isert np %p Unexpected event %d\n", | ||
853 | isert_np, event); | ||
854 | } | ||
808 | 855 | ||
809 | wake_up: | 856 | return -1; |
810 | complete(&isert_conn->conn_wait); | ||
811 | } | 857 | } |
812 | 858 | ||
813 | static int | 859 | static int |
814 | isert_disconnected_handler(struct rdma_cm_id *cma_id, bool disconnect) | 860 | isert_disconnected_handler(struct rdma_cm_id *cma_id, |
861 | enum rdma_cm_event_type event) | ||
815 | { | 862 | { |
863 | struct isert_np *isert_np = cma_id->context; | ||
816 | struct isert_conn *isert_conn; | 864 | struct isert_conn *isert_conn; |
817 | 865 | ||
818 | if (!cma_id->qp) { | 866 | if (isert_np->np_cm_id == cma_id) |
819 | struct isert_np *isert_np = cma_id->context; | 867 | return isert_np_cma_handler(cma_id->context, event); |
820 | 868 | ||
821 | isert_np->np_cm_id = NULL; | 869 | isert_conn = cma_id->qp->qp_context; |
822 | return -1; | ||
823 | } | ||
824 | 870 | ||
825 | isert_conn = (struct isert_conn *)cma_id->context; | 871 | mutex_lock(&isert_conn->conn_mutex); |
872 | isert_conn_terminate(isert_conn); | ||
873 | mutex_unlock(&isert_conn->conn_mutex); | ||
826 | 874 | ||
827 | isert_conn->disconnect = disconnect; | 875 | isert_info("conn %p completing conn_wait\n", isert_conn); |
828 | INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work); | 876 | complete(&isert_conn->conn_wait); |
829 | schedule_work(&isert_conn->conn_logout_work); | ||
830 | 877 | ||
831 | return 0; | 878 | return 0; |
832 | } | 879 | } |
833 | 880 | ||
881 | static void | ||
882 | isert_connect_error(struct rdma_cm_id *cma_id) | ||
883 | { | ||
884 | struct isert_conn *isert_conn = cma_id->qp->qp_context; | ||
885 | |||
886 | isert_put_conn(isert_conn); | ||
887 | } | ||
888 | |||
834 | static int | 889 | static int |
835 | isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) | 890 | isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) |
836 | { | 891 | { |
837 | int ret = 0; | 892 | int ret = 0; |
838 | bool disconnect = false; | ||
839 | 893 | ||
840 | pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n", | 894 | isert_info("event %d status %d id %p np %p\n", event->event, |
841 | event->event, event->status, cma_id->context, cma_id); | 895 | event->status, cma_id, cma_id->context); |
842 | 896 | ||
843 | switch (event->event) { | 897 | switch (event->event) { |
844 | case RDMA_CM_EVENT_CONNECT_REQUEST: | 898 | case RDMA_CM_EVENT_CONNECT_REQUEST: |
845 | ret = isert_connect_request(cma_id, event); | 899 | ret = isert_connect_request(cma_id, event); |
846 | if (ret) | 900 | if (ret) |
847 | pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n", | 901 | isert_err("failed handle connect request %d\n", ret); |
848 | event->event, ret); | ||
849 | break; | 902 | break; |
850 | case RDMA_CM_EVENT_ESTABLISHED: | 903 | case RDMA_CM_EVENT_ESTABLISHED: |
851 | isert_connected_handler(cma_id); | 904 | isert_connected_handler(cma_id); |
@@ -853,13 +906,16 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) | |||
853 | case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */ | 906 | case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */ |
854 | case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */ | 907 | case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */ |
855 | case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */ | 908 | case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */ |
856 | disconnect = true; | ||
857 | case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */ | 909 | case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */ |
858 | ret = isert_disconnected_handler(cma_id, disconnect); | 910 | ret = isert_disconnected_handler(cma_id, event->event); |
859 | break; | 911 | break; |
912 | case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */ | ||
913 | case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */ | ||
860 | case RDMA_CM_EVENT_CONNECT_ERROR: | 914 | case RDMA_CM_EVENT_CONNECT_ERROR: |
915 | isert_connect_error(cma_id); | ||
916 | break; | ||
861 | default: | 917 | default: |
862 | pr_err("Unhandled RDMA CMA event: %d\n", event->event); | 918 | isert_err("Unhandled RDMA CMA event: %d\n", event->event); |
863 | break; | 919 | break; |
864 | } | 920 | } |
865 | 921 | ||
@@ -876,7 +932,7 @@ isert_post_recv(struct isert_conn *isert_conn, u32 count) | |||
876 | 932 | ||
877 | for (rx_wr = isert_conn->conn_rx_wr, i = 0; i < count; i++, rx_wr++) { | 933 | for (rx_wr = isert_conn->conn_rx_wr, i = 0; i < count; i++, rx_wr++) { |
878 | rx_desc = &isert_conn->conn_rx_descs[rx_head]; | 934 | rx_desc = &isert_conn->conn_rx_descs[rx_head]; |
879 | rx_wr->wr_id = (unsigned long)rx_desc; | 935 | rx_wr->wr_id = (uintptr_t)rx_desc; |
880 | rx_wr->sg_list = &rx_desc->rx_sg; | 936 | rx_wr->sg_list = &rx_desc->rx_sg; |
881 | rx_wr->num_sge = 1; | 937 | rx_wr->num_sge = 1; |
882 | rx_wr->next = rx_wr + 1; | 938 | rx_wr->next = rx_wr + 1; |
@@ -890,10 +946,10 @@ isert_post_recv(struct isert_conn *isert_conn, u32 count) | |||
890 | ret = ib_post_recv(isert_conn->conn_qp, isert_conn->conn_rx_wr, | 946 | ret = ib_post_recv(isert_conn->conn_qp, isert_conn->conn_rx_wr, |
891 | &rx_wr_failed); | 947 | &rx_wr_failed); |
892 | if (ret) { | 948 | if (ret) { |
893 | pr_err("ib_post_recv() failed with ret: %d\n", ret); | 949 | isert_err("ib_post_recv() failed with ret: %d\n", ret); |
894 | isert_conn->post_recv_buf_count -= count; | 950 | isert_conn->post_recv_buf_count -= count; |
895 | } else { | 951 | } else { |
896 | pr_debug("isert_post_recv(): Posted %d RX buffers\n", count); | 952 | isert_dbg("isert_post_recv(): Posted %d RX buffers\n", count); |
897 | isert_conn->conn_rx_desc_head = rx_head; | 953 | isert_conn->conn_rx_desc_head = rx_head; |
898 | } | 954 | } |
899 | return ret; | 955 | return ret; |
@@ -910,19 +966,15 @@ isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc) | |||
910 | ISER_HEADERS_LEN, DMA_TO_DEVICE); | 966 | ISER_HEADERS_LEN, DMA_TO_DEVICE); |
911 | 967 | ||
912 | send_wr.next = NULL; | 968 | send_wr.next = NULL; |
913 | send_wr.wr_id = (unsigned long)tx_desc; | 969 | send_wr.wr_id = (uintptr_t)tx_desc; |
914 | send_wr.sg_list = tx_desc->tx_sg; | 970 | send_wr.sg_list = tx_desc->tx_sg; |
915 | send_wr.num_sge = tx_desc->num_sge; | 971 | send_wr.num_sge = tx_desc->num_sge; |
916 | send_wr.opcode = IB_WR_SEND; | 972 | send_wr.opcode = IB_WR_SEND; |
917 | send_wr.send_flags = IB_SEND_SIGNALED; | 973 | send_wr.send_flags = IB_SEND_SIGNALED; |
918 | 974 | ||
919 | atomic_inc(&isert_conn->post_send_buf_count); | ||
920 | |||
921 | ret = ib_post_send(isert_conn->conn_qp, &send_wr, &send_wr_failed); | 975 | ret = ib_post_send(isert_conn->conn_qp, &send_wr, &send_wr_failed); |
922 | if (ret) { | 976 | if (ret) |
923 | pr_err("ib_post_send() failed, ret: %d\n", ret); | 977 | isert_err("ib_post_send() failed, ret: %d\n", ret); |
924 | atomic_dec(&isert_conn->post_send_buf_count); | ||
925 | } | ||
926 | 978 | ||
927 | return ret; | 979 | return ret; |
928 | } | 980 | } |
@@ -945,7 +997,7 @@ isert_create_send_desc(struct isert_conn *isert_conn, | |||
945 | 997 | ||
946 | if (tx_desc->tx_sg[0].lkey != isert_conn->conn_mr->lkey) { | 998 | if (tx_desc->tx_sg[0].lkey != isert_conn->conn_mr->lkey) { |
947 | tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey; | 999 | tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey; |
948 | pr_debug("tx_desc %p lkey mismatch, fixing\n", tx_desc); | 1000 | isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc); |
949 | } | 1001 | } |
950 | } | 1002 | } |
951 | 1003 | ||
@@ -959,7 +1011,7 @@ isert_init_tx_hdrs(struct isert_conn *isert_conn, | |||
959 | dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc, | 1011 | dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc, |
960 | ISER_HEADERS_LEN, DMA_TO_DEVICE); | 1012 | ISER_HEADERS_LEN, DMA_TO_DEVICE); |
961 | if (ib_dma_mapping_error(ib_dev, dma_addr)) { | 1013 | if (ib_dma_mapping_error(ib_dev, dma_addr)) { |
962 | pr_err("ib_dma_mapping_error() failed\n"); | 1014 | isert_err("ib_dma_mapping_error() failed\n"); |
963 | return -ENOMEM; | 1015 | return -ENOMEM; |
964 | } | 1016 | } |
965 | 1017 | ||
@@ -968,40 +1020,24 @@ isert_init_tx_hdrs(struct isert_conn *isert_conn, | |||
968 | tx_desc->tx_sg[0].length = ISER_HEADERS_LEN; | 1020 | tx_desc->tx_sg[0].length = ISER_HEADERS_LEN; |
969 | tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey; | 1021 | tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey; |
970 | 1022 | ||
971 | pr_debug("isert_init_tx_hdrs: Setup tx_sg[0].addr: 0x%llx length: %u" | 1023 | isert_dbg("Setup tx_sg[0].addr: 0x%llx length: %u lkey: 0x%x\n", |
972 | " lkey: 0x%08x\n", tx_desc->tx_sg[0].addr, | 1024 | tx_desc->tx_sg[0].addr, tx_desc->tx_sg[0].length, |
973 | tx_desc->tx_sg[0].length, tx_desc->tx_sg[0].lkey); | 1025 | tx_desc->tx_sg[0].lkey); |
974 | 1026 | ||
975 | return 0; | 1027 | return 0; |
976 | } | 1028 | } |
977 | 1029 | ||
978 | static void | 1030 | static void |
979 | isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, | 1031 | isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, |
980 | struct ib_send_wr *send_wr, bool coalesce) | 1032 | struct ib_send_wr *send_wr) |
981 | { | 1033 | { |
982 | struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc; | 1034 | struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc; |
983 | 1035 | ||
984 | isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND; | 1036 | isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND; |
985 | send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc; | 1037 | send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc; |
986 | send_wr->opcode = IB_WR_SEND; | 1038 | send_wr->opcode = IB_WR_SEND; |
987 | send_wr->sg_list = &tx_desc->tx_sg[0]; | 1039 | send_wr->sg_list = &tx_desc->tx_sg[0]; |
988 | send_wr->num_sge = isert_cmd->tx_desc.num_sge; | 1040 | send_wr->num_sge = isert_cmd->tx_desc.num_sge; |
989 | /* | ||
990 | * Coalesce send completion interrupts by only setting IB_SEND_SIGNALED | ||
991 | * bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls. | ||
992 | */ | ||
993 | mutex_lock(&isert_conn->conn_mutex); | ||
994 | if (coalesce && isert_conn->state == ISER_CONN_UP && | ||
995 | ++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) { | ||
996 | tx_desc->llnode_active = true; | ||
997 | llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist); | ||
998 | mutex_unlock(&isert_conn->conn_mutex); | ||
999 | return; | ||
1000 | } | ||
1001 | isert_conn->conn_comp_batch = 0; | ||
1002 | tx_desc->comp_llnode_batch = llist_del_all(&isert_conn->conn_comp_llist); | ||
1003 | mutex_unlock(&isert_conn->conn_mutex); | ||
1004 | |||
1005 | send_wr->send_flags = IB_SEND_SIGNALED; | 1041 | send_wr->send_flags = IB_SEND_SIGNALED; |
1006 | } | 1042 | } |
1007 | 1043 | ||
@@ -1017,22 +1053,21 @@ isert_rdma_post_recvl(struct isert_conn *isert_conn) | |||
1017 | sge.length = ISER_RX_LOGIN_SIZE; | 1053 | sge.length = ISER_RX_LOGIN_SIZE; |
1018 | sge.lkey = isert_conn->conn_mr->lkey; | 1054 | sge.lkey = isert_conn->conn_mr->lkey; |
1019 | 1055 | ||
1020 | pr_debug("Setup sge: addr: %llx length: %d 0x%08x\n", | 1056 | isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n", |
1021 | sge.addr, sge.length, sge.lkey); | 1057 | sge.addr, sge.length, sge.lkey); |
1022 | 1058 | ||
1023 | memset(&rx_wr, 0, sizeof(struct ib_recv_wr)); | 1059 | memset(&rx_wr, 0, sizeof(struct ib_recv_wr)); |
1024 | rx_wr.wr_id = (unsigned long)isert_conn->login_req_buf; | 1060 | rx_wr.wr_id = (uintptr_t)isert_conn->login_req_buf; |
1025 | rx_wr.sg_list = &sge; | 1061 | rx_wr.sg_list = &sge; |
1026 | rx_wr.num_sge = 1; | 1062 | rx_wr.num_sge = 1; |
1027 | 1063 | ||
1028 | isert_conn->post_recv_buf_count++; | 1064 | isert_conn->post_recv_buf_count++; |
1029 | ret = ib_post_recv(isert_conn->conn_qp, &rx_wr, &rx_wr_fail); | 1065 | ret = ib_post_recv(isert_conn->conn_qp, &rx_wr, &rx_wr_fail); |
1030 | if (ret) { | 1066 | if (ret) { |
1031 | pr_err("ib_post_recv() failed: %d\n", ret); | 1067 | isert_err("ib_post_recv() failed: %d\n", ret); |
1032 | isert_conn->post_recv_buf_count--; | 1068 | isert_conn->post_recv_buf_count--; |
1033 | } | 1069 | } |
1034 | 1070 | ||
1035 | pr_debug("ib_post_recv(): returned success >>>>>>>>>>>>>>>>>>>>>>>>\n"); | ||
1036 | return ret; | 1071 | return ret; |
1037 | } | 1072 | } |
1038 | 1073 | ||
@@ -1072,13 +1107,9 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login, | |||
1072 | if (login->login_complete) { | 1107 | if (login->login_complete) { |
1073 | if (!conn->sess->sess_ops->SessionType && | 1108 | if (!conn->sess->sess_ops->SessionType && |
1074 | isert_conn->conn_device->use_fastreg) { | 1109 | isert_conn->conn_device->use_fastreg) { |
1075 | /* Normal Session and fastreg is used */ | 1110 | ret = isert_conn_create_fastreg_pool(isert_conn); |
1076 | u8 pi_support = login->np->tpg_np->tpg->tpg_attrib.t10_pi; | ||
1077 | |||
1078 | ret = isert_conn_create_fastreg_pool(isert_conn, | ||
1079 | pi_support); | ||
1080 | if (ret) { | 1111 | if (ret) { |
1081 | pr_err("Conn: %p failed to create" | 1112 | isert_err("Conn: %p failed to create" |
1082 | " fastreg pool\n", isert_conn); | 1113 | " fastreg pool\n", isert_conn); |
1083 | return ret; | 1114 | return ret; |
1084 | } | 1115 | } |
@@ -1092,7 +1123,10 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login, | |||
1092 | if (ret) | 1123 | if (ret) |
1093 | return ret; | 1124 | return ret; |
1094 | 1125 | ||
1095 | isert_conn->state = ISER_CONN_UP; | 1126 | /* Now we are in FULL_FEATURE phase */ |
1127 | mutex_lock(&isert_conn->conn_mutex); | ||
1128 | isert_conn->state = ISER_CONN_FULL_FEATURE; | ||
1129 | mutex_unlock(&isert_conn->conn_mutex); | ||
1096 | goto post_send; | 1130 | goto post_send; |
1097 | } | 1131 | } |
1098 | 1132 | ||
@@ -1109,18 +1143,17 @@ post_send: | |||
1109 | } | 1143 | } |
1110 | 1144 | ||
1111 | static void | 1145 | static void |
1112 | isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen, | 1146 | isert_rx_login_req(struct isert_conn *isert_conn) |
1113 | struct isert_conn *isert_conn) | ||
1114 | { | 1147 | { |
1148 | struct iser_rx_desc *rx_desc = (void *)isert_conn->login_req_buf; | ||
1149 | int rx_buflen = isert_conn->login_req_len; | ||
1115 | struct iscsi_conn *conn = isert_conn->conn; | 1150 | struct iscsi_conn *conn = isert_conn->conn; |
1116 | struct iscsi_login *login = conn->conn_login; | 1151 | struct iscsi_login *login = conn->conn_login; |
1117 | int size; | 1152 | int size; |
1118 | 1153 | ||
1119 | if (!login) { | 1154 | isert_info("conn %p\n", isert_conn); |
1120 | pr_err("conn->conn_login is NULL\n"); | 1155 | |
1121 | dump_stack(); | 1156 | WARN_ON_ONCE(!login); |
1122 | return; | ||
1123 | } | ||
1124 | 1157 | ||
1125 | if (login->first_request) { | 1158 | if (login->first_request) { |
1126 | struct iscsi_login_req *login_req = | 1159 | struct iscsi_login_req *login_req = |
@@ -1146,8 +1179,9 @@ isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen, | |||
1146 | memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN); | 1179 | memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN); |
1147 | 1180 | ||
1148 | size = min(rx_buflen, MAX_KEY_VALUE_PAIRS); | 1181 | size = min(rx_buflen, MAX_KEY_VALUE_PAIRS); |
1149 | pr_debug("Using login payload size: %d, rx_buflen: %d MAX_KEY_VALUE_PAIRS: %d\n", | 1182 | isert_dbg("Using login payload size: %d, rx_buflen: %d " |
1150 | size, rx_buflen, MAX_KEY_VALUE_PAIRS); | 1183 | "MAX_KEY_VALUE_PAIRS: %d\n", size, rx_buflen, |
1184 | MAX_KEY_VALUE_PAIRS); | ||
1151 | memcpy(login->req_buf, &rx_desc->data[0], size); | 1185 | memcpy(login->req_buf, &rx_desc->data[0], size); |
1152 | 1186 | ||
1153 | if (login->first_request) { | 1187 | if (login->first_request) { |
@@ -1166,7 +1200,7 @@ static struct iscsi_cmd | |||
1166 | 1200 | ||
1167 | cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); | 1201 | cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); |
1168 | if (!cmd) { | 1202 | if (!cmd) { |
1169 | pr_err("Unable to allocate iscsi_cmd + isert_cmd\n"); | 1203 | isert_err("Unable to allocate iscsi_cmd + isert_cmd\n"); |
1170 | return NULL; | 1204 | return NULL; |
1171 | } | 1205 | } |
1172 | isert_cmd = iscsit_priv_cmd(cmd); | 1206 | isert_cmd = iscsit_priv_cmd(cmd); |
@@ -1209,8 +1243,8 @@ isert_handle_scsi_cmd(struct isert_conn *isert_conn, | |||
1209 | sg = &cmd->se_cmd.t_data_sg[0]; | 1243 | sg = &cmd->se_cmd.t_data_sg[0]; |
1210 | sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE)); | 1244 | sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE)); |
1211 | 1245 | ||
1212 | pr_debug("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n", | 1246 | isert_dbg("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n", |
1213 | sg, sg_nents, &rx_desc->data[0], imm_data_len); | 1247 | sg, sg_nents, &rx_desc->data[0], imm_data_len); |
1214 | 1248 | ||
1215 | sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len); | 1249 | sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len); |
1216 | 1250 | ||
@@ -1254,13 +1288,15 @@ isert_handle_iscsi_dataout(struct isert_conn *isert_conn, | |||
1254 | * FIXME: Unexpected unsolicited_data out | 1288 | * FIXME: Unexpected unsolicited_data out |
1255 | */ | 1289 | */ |
1256 | if (!cmd->unsolicited_data) { | 1290 | if (!cmd->unsolicited_data) { |
1257 | pr_err("Received unexpected solicited data payload\n"); | 1291 | isert_err("Received unexpected solicited data payload\n"); |
1258 | dump_stack(); | 1292 | dump_stack(); |
1259 | return -1; | 1293 | return -1; |
1260 | } | 1294 | } |
1261 | 1295 | ||
1262 | pr_debug("Unsolicited DataOut unsol_data_len: %u, write_data_done: %u, data_length: %u\n", | 1296 | isert_dbg("Unsolicited DataOut unsol_data_len: %u, " |
1263 | unsol_data_len, cmd->write_data_done, cmd->se_cmd.data_length); | 1297 | "write_data_done: %u, data_length: %u\n", |
1298 | unsol_data_len, cmd->write_data_done, | ||
1299 | cmd->se_cmd.data_length); | ||
1264 | 1300 | ||
1265 | sg_off = cmd->write_data_done / PAGE_SIZE; | 1301 | sg_off = cmd->write_data_done / PAGE_SIZE; |
1266 | sg_start = &cmd->se_cmd.t_data_sg[sg_off]; | 1302 | sg_start = &cmd->se_cmd.t_data_sg[sg_off]; |
@@ -1270,12 +1306,13 @@ isert_handle_iscsi_dataout(struct isert_conn *isert_conn, | |||
1270 | * FIXME: Non page-aligned unsolicited_data out | 1306 | * FIXME: Non page-aligned unsolicited_data out |
1271 | */ | 1307 | */ |
1272 | if (page_off) { | 1308 | if (page_off) { |
1273 | pr_err("Received unexpected non-page aligned data payload\n"); | 1309 | isert_err("unexpected non-page aligned data payload\n"); |
1274 | dump_stack(); | 1310 | dump_stack(); |
1275 | return -1; | 1311 | return -1; |
1276 | } | 1312 | } |
1277 | pr_debug("Copying DataOut: sg_start: %p, sg_off: %u sg_nents: %u from %p %u\n", | 1313 | isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u " |
1278 | sg_start, sg_off, sg_nents, &rx_desc->data[0], unsol_data_len); | 1314 | "sg_nents: %u from %p %u\n", sg_start, sg_off, |
1315 | sg_nents, &rx_desc->data[0], unsol_data_len); | ||
1279 | 1316 | ||
1280 | sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0], | 1317 | sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0], |
1281 | unsol_data_len); | 1318 | unsol_data_len); |
@@ -1322,8 +1359,8 @@ isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd | |||
1322 | 1359 | ||
1323 | text_in = kzalloc(payload_length, GFP_KERNEL); | 1360 | text_in = kzalloc(payload_length, GFP_KERNEL); |
1324 | if (!text_in) { | 1361 | if (!text_in) { |
1325 | pr_err("Unable to allocate text_in of payload_length: %u\n", | 1362 | isert_err("Unable to allocate text_in of payload_length: %u\n", |
1326 | payload_length); | 1363 | payload_length); |
1327 | return -ENOMEM; | 1364 | return -ENOMEM; |
1328 | } | 1365 | } |
1329 | cmd->text_in_ptr = text_in; | 1366 | cmd->text_in_ptr = text_in; |
@@ -1348,8 +1385,8 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, | |||
1348 | 1385 | ||
1349 | if (sess->sess_ops->SessionType && | 1386 | if (sess->sess_ops->SessionType && |
1350 | (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) { | 1387 | (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) { |
1351 | pr_err("Got illegal opcode: 0x%02x in SessionType=Discovery," | 1388 | isert_err("Got illegal opcode: 0x%02x in SessionType=Discovery," |
1352 | " ignoring\n", opcode); | 1389 | " ignoring\n", opcode); |
1353 | return 0; | 1390 | return 0; |
1354 | } | 1391 | } |
1355 | 1392 | ||
@@ -1395,10 +1432,6 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, | |||
1395 | break; | 1432 | break; |
1396 | 1433 | ||
1397 | ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr); | 1434 | ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr); |
1398 | if (ret > 0) | ||
1399 | wait_for_completion_timeout(&conn->conn_logout_comp, | ||
1400 | SECONDS_FOR_LOGOUT_COMP * | ||
1401 | HZ); | ||
1402 | break; | 1435 | break; |
1403 | case ISCSI_OP_TEXT: | 1436 | case ISCSI_OP_TEXT: |
1404 | cmd = isert_allocate_cmd(conn); | 1437 | cmd = isert_allocate_cmd(conn); |
@@ -1410,7 +1443,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, | |||
1410 | rx_desc, (struct iscsi_text *)hdr); | 1443 | rx_desc, (struct iscsi_text *)hdr); |
1411 | break; | 1444 | break; |
1412 | default: | 1445 | default: |
1413 | pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode); | 1446 | isert_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode); |
1414 | dump_stack(); | 1447 | dump_stack(); |
1415 | break; | 1448 | break; |
1416 | } | 1449 | } |
@@ -1431,23 +1464,23 @@ isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn) | |||
1431 | if (iser_hdr->flags & ISER_RSV) { | 1464 | if (iser_hdr->flags & ISER_RSV) { |
1432 | read_stag = be32_to_cpu(iser_hdr->read_stag); | 1465 | read_stag = be32_to_cpu(iser_hdr->read_stag); |
1433 | read_va = be64_to_cpu(iser_hdr->read_va); | 1466 | read_va = be64_to_cpu(iser_hdr->read_va); |
1434 | pr_debug("ISER_RSV: read_stag: 0x%08x read_va: 0x%16llx\n", | 1467 | isert_dbg("ISER_RSV: read_stag: 0x%x read_va: 0x%llx\n", |
1435 | read_stag, (unsigned long long)read_va); | 1468 | read_stag, (unsigned long long)read_va); |
1436 | } | 1469 | } |
1437 | if (iser_hdr->flags & ISER_WSV) { | 1470 | if (iser_hdr->flags & ISER_WSV) { |
1438 | write_stag = be32_to_cpu(iser_hdr->write_stag); | 1471 | write_stag = be32_to_cpu(iser_hdr->write_stag); |
1439 | write_va = be64_to_cpu(iser_hdr->write_va); | 1472 | write_va = be64_to_cpu(iser_hdr->write_va); |
1440 | pr_debug("ISER_WSV: write__stag: 0x%08x write_va: 0x%16llx\n", | 1473 | isert_dbg("ISER_WSV: write_stag: 0x%x write_va: 0x%llx\n", |
1441 | write_stag, (unsigned long long)write_va); | 1474 | write_stag, (unsigned long long)write_va); |
1442 | } | 1475 | } |
1443 | 1476 | ||
1444 | pr_debug("ISER ISCSI_CTRL PDU\n"); | 1477 | isert_dbg("ISER ISCSI_CTRL PDU\n"); |
1445 | break; | 1478 | break; |
1446 | case ISER_HELLO: | 1479 | case ISER_HELLO: |
1447 | pr_err("iSER Hello message\n"); | 1480 | isert_err("iSER Hello message\n"); |
1448 | break; | 1481 | break; |
1449 | default: | 1482 | default: |
1450 | pr_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags); | 1483 | isert_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags); |
1451 | break; | 1484 | break; |
1452 | } | 1485 | } |
1453 | 1486 | ||
@@ -1457,7 +1490,7 @@ isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn) | |||
1457 | 1490 | ||
1458 | static void | 1491 | static void |
1459 | isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn, | 1492 | isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn, |
1460 | unsigned long xfer_len) | 1493 | u32 xfer_len) |
1461 | { | 1494 | { |
1462 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; | 1495 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; |
1463 | struct iscsi_hdr *hdr; | 1496 | struct iscsi_hdr *hdr; |
@@ -1467,34 +1500,43 @@ isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn, | |||
1467 | if ((char *)desc == isert_conn->login_req_buf) { | 1500 | if ((char *)desc == isert_conn->login_req_buf) { |
1468 | rx_dma = isert_conn->login_req_dma; | 1501 | rx_dma = isert_conn->login_req_dma; |
1469 | rx_buflen = ISER_RX_LOGIN_SIZE; | 1502 | rx_buflen = ISER_RX_LOGIN_SIZE; |
1470 | pr_debug("ISER login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n", | 1503 | isert_dbg("login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n", |
1471 | rx_dma, rx_buflen); | 1504 | rx_dma, rx_buflen); |
1472 | } else { | 1505 | } else { |
1473 | rx_dma = desc->dma_addr; | 1506 | rx_dma = desc->dma_addr; |
1474 | rx_buflen = ISER_RX_PAYLOAD_SIZE; | 1507 | rx_buflen = ISER_RX_PAYLOAD_SIZE; |
1475 | pr_debug("ISER req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n", | 1508 | isert_dbg("req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n", |
1476 | rx_dma, rx_buflen); | 1509 | rx_dma, rx_buflen); |
1477 | } | 1510 | } |
1478 | 1511 | ||
1479 | ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE); | 1512 | ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE); |
1480 | 1513 | ||
1481 | hdr = &desc->iscsi_header; | 1514 | hdr = &desc->iscsi_header; |
1482 | pr_debug("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n", | 1515 | isert_dbg("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n", |
1483 | hdr->opcode, hdr->itt, hdr->flags, | 1516 | hdr->opcode, hdr->itt, hdr->flags, |
1484 | (int)(xfer_len - ISER_HEADERS_LEN)); | 1517 | (int)(xfer_len - ISER_HEADERS_LEN)); |
1485 | 1518 | ||
1486 | if ((char *)desc == isert_conn->login_req_buf) | 1519 | if ((char *)desc == isert_conn->login_req_buf) { |
1487 | isert_rx_login_req(desc, xfer_len - ISER_HEADERS_LEN, | 1520 | isert_conn->login_req_len = xfer_len - ISER_HEADERS_LEN; |
1488 | isert_conn); | 1521 | if (isert_conn->conn) { |
1489 | else | 1522 | struct iscsi_login *login = isert_conn->conn->conn_login; |
1523 | |||
1524 | if (login && !login->first_request) | ||
1525 | isert_rx_login_req(isert_conn); | ||
1526 | } | ||
1527 | mutex_lock(&isert_conn->conn_mutex); | ||
1528 | complete(&isert_conn->login_req_comp); | ||
1529 | mutex_unlock(&isert_conn->conn_mutex); | ||
1530 | } else { | ||
1490 | isert_rx_do_work(desc, isert_conn); | 1531 | isert_rx_do_work(desc, isert_conn); |
1532 | } | ||
1491 | 1533 | ||
1492 | ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen, | 1534 | ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen, |
1493 | DMA_FROM_DEVICE); | 1535 | DMA_FROM_DEVICE); |
1494 | 1536 | ||
1495 | isert_conn->post_recv_buf_count--; | 1537 | isert_conn->post_recv_buf_count--; |
1496 | pr_debug("iSERT: Decremented post_recv_buf_count: %d\n", | 1538 | isert_dbg("Decremented post_recv_buf_count: %d\n", |
1497 | isert_conn->post_recv_buf_count); | 1539 | isert_conn->post_recv_buf_count); |
1498 | 1540 | ||
1499 | if ((char *)desc == isert_conn->login_req_buf) | 1541 | if ((char *)desc == isert_conn->login_req_buf) |
1500 | return; | 1542 | return; |
@@ -1505,7 +1547,7 @@ isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn, | |||
1505 | ISERT_MIN_POSTED_RX); | 1547 | ISERT_MIN_POSTED_RX); |
1506 | err = isert_post_recv(isert_conn, count); | 1548 | err = isert_post_recv(isert_conn, count); |
1507 | if (err) { | 1549 | if (err) { |
1508 | pr_err("isert_post_recv() count: %d failed, %d\n", | 1550 | isert_err("isert_post_recv() count: %d failed, %d\n", |
1509 | count, err); | 1551 | count, err); |
1510 | } | 1552 | } |
1511 | } | 1553 | } |
@@ -1534,12 +1576,12 @@ isert_map_data_buf(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, | |||
1534 | data->dma_nents = ib_dma_map_sg(ib_dev, data->sg, data->nents, | 1576 | data->dma_nents = ib_dma_map_sg(ib_dev, data->sg, data->nents, |
1535 | data->dma_dir); | 1577 | data->dma_dir); |
1536 | if (unlikely(!data->dma_nents)) { | 1578 | if (unlikely(!data->dma_nents)) { |
1537 | pr_err("Cmd: unable to dma map SGs %p\n", sg); | 1579 | isert_err("Cmd: unable to dma map SGs %p\n", sg); |
1538 | return -EINVAL; | 1580 | return -EINVAL; |
1539 | } | 1581 | } |
1540 | 1582 | ||
1541 | pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n", | 1583 | isert_dbg("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n", |
1542 | isert_cmd, data->dma_nents, data->sg, data->nents, data->len); | 1584 | isert_cmd, data->dma_nents, data->sg, data->nents, data->len); |
1543 | 1585 | ||
1544 | return 0; | 1586 | return 0; |
1545 | } | 1587 | } |
@@ -1560,21 +1602,21 @@ isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) | |||
1560 | { | 1602 | { |
1561 | struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; | 1603 | struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; |
1562 | 1604 | ||
1563 | pr_debug("isert_unmap_cmd: %p\n", isert_cmd); | 1605 | isert_dbg("Cmd %p\n", isert_cmd); |
1564 | 1606 | ||
1565 | if (wr->data.sg) { | 1607 | if (wr->data.sg) { |
1566 | pr_debug("isert_unmap_cmd: %p unmap_sg op\n", isert_cmd); | 1608 | isert_dbg("Cmd %p unmap_sg op\n", isert_cmd); |
1567 | isert_unmap_data_buf(isert_conn, &wr->data); | 1609 | isert_unmap_data_buf(isert_conn, &wr->data); |
1568 | } | 1610 | } |
1569 | 1611 | ||
1570 | if (wr->send_wr) { | 1612 | if (wr->send_wr) { |
1571 | pr_debug("isert_unmap_cmd: %p free send_wr\n", isert_cmd); | 1613 | isert_dbg("Cmd %p free send_wr\n", isert_cmd); |
1572 | kfree(wr->send_wr); | 1614 | kfree(wr->send_wr); |
1573 | wr->send_wr = NULL; | 1615 | wr->send_wr = NULL; |
1574 | } | 1616 | } |
1575 | 1617 | ||
1576 | if (wr->ib_sge) { | 1618 | if (wr->ib_sge) { |
1577 | pr_debug("isert_unmap_cmd: %p free ib_sge\n", isert_cmd); | 1619 | isert_dbg("Cmd %p free ib_sge\n", isert_cmd); |
1578 | kfree(wr->ib_sge); | 1620 | kfree(wr->ib_sge); |
1579 | wr->ib_sge = NULL; | 1621 | wr->ib_sge = NULL; |
1580 | } | 1622 | } |
@@ -1586,11 +1628,10 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) | |||
1586 | struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; | 1628 | struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; |
1587 | LIST_HEAD(unmap_list); | 1629 | LIST_HEAD(unmap_list); |
1588 | 1630 | ||
1589 | pr_debug("unreg_fastreg_cmd: %p\n", isert_cmd); | 1631 | isert_dbg("Cmd %p\n", isert_cmd); |
1590 | 1632 | ||
1591 | if (wr->fr_desc) { | 1633 | if (wr->fr_desc) { |
1592 | pr_debug("unreg_fastreg_cmd: %p free fr_desc %p\n", | 1634 | isert_dbg("Cmd %p free fr_desc %p\n", isert_cmd, wr->fr_desc); |
1593 | isert_cmd, wr->fr_desc); | ||
1594 | if (wr->fr_desc->ind & ISERT_PROTECTED) { | 1635 | if (wr->fr_desc->ind & ISERT_PROTECTED) { |
1595 | isert_unmap_data_buf(isert_conn, &wr->prot); | 1636 | isert_unmap_data_buf(isert_conn, &wr->prot); |
1596 | wr->fr_desc->ind &= ~ISERT_PROTECTED; | 1637 | wr->fr_desc->ind &= ~ISERT_PROTECTED; |
@@ -1602,7 +1643,7 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) | |||
1602 | } | 1643 | } |
1603 | 1644 | ||
1604 | if (wr->data.sg) { | 1645 | if (wr->data.sg) { |
1605 | pr_debug("unreg_fastreg_cmd: %p unmap_sg op\n", isert_cmd); | 1646 | isert_dbg("Cmd %p unmap_sg op\n", isert_cmd); |
1606 | isert_unmap_data_buf(isert_conn, &wr->data); | 1647 | isert_unmap_data_buf(isert_conn, &wr->data); |
1607 | } | 1648 | } |
1608 | 1649 | ||
@@ -1618,7 +1659,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err) | |||
1618 | struct iscsi_conn *conn = isert_conn->conn; | 1659 | struct iscsi_conn *conn = isert_conn->conn; |
1619 | struct isert_device *device = isert_conn->conn_device; | 1660 | struct isert_device *device = isert_conn->conn_device; |
1620 | 1661 | ||
1621 | pr_debug("Entering isert_put_cmd: %p\n", isert_cmd); | 1662 | isert_dbg("Cmd %p\n", isert_cmd); |
1622 | 1663 | ||
1623 | switch (cmd->iscsi_opcode) { | 1664 | switch (cmd->iscsi_opcode) { |
1624 | case ISCSI_OP_SCSI_CMD: | 1665 | case ISCSI_OP_SCSI_CMD: |
@@ -1668,7 +1709,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err) | |||
1668 | * associated cmd->se_cmd needs to be released. | 1709 | * associated cmd->se_cmd needs to be released. |
1669 | */ | 1710 | */ |
1670 | if (cmd->se_cmd.se_tfo != NULL) { | 1711 | if (cmd->se_cmd.se_tfo != NULL) { |
1671 | pr_debug("Calling transport_generic_free_cmd from" | 1712 | isert_dbg("Calling transport_generic_free_cmd from" |
1672 | " isert_put_cmd for 0x%02x\n", | 1713 | " isert_put_cmd for 0x%02x\n", |
1673 | cmd->iscsi_opcode); | 1714 | cmd->iscsi_opcode); |
1674 | transport_generic_free_cmd(&cmd->se_cmd, 0); | 1715 | transport_generic_free_cmd(&cmd->se_cmd, 0); |
@@ -1687,7 +1728,7 @@ static void | |||
1687 | isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev) | 1728 | isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev) |
1688 | { | 1729 | { |
1689 | if (tx_desc->dma_addr != 0) { | 1730 | if (tx_desc->dma_addr != 0) { |
1690 | pr_debug("Calling ib_dma_unmap_single for tx_desc->dma_addr\n"); | 1731 | isert_dbg("unmap single for tx_desc->dma_addr\n"); |
1691 | ib_dma_unmap_single(ib_dev, tx_desc->dma_addr, | 1732 | ib_dma_unmap_single(ib_dev, tx_desc->dma_addr, |
1692 | ISER_HEADERS_LEN, DMA_TO_DEVICE); | 1733 | ISER_HEADERS_LEN, DMA_TO_DEVICE); |
1693 | tx_desc->dma_addr = 0; | 1734 | tx_desc->dma_addr = 0; |
@@ -1699,7 +1740,7 @@ isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd, | |||
1699 | struct ib_device *ib_dev, bool comp_err) | 1740 | struct ib_device *ib_dev, bool comp_err) |
1700 | { | 1741 | { |
1701 | if (isert_cmd->pdu_buf_dma != 0) { | 1742 | if (isert_cmd->pdu_buf_dma != 0) { |
1702 | pr_debug("Calling ib_dma_unmap_single for isert_cmd->pdu_buf_dma\n"); | 1743 | isert_dbg("unmap single for isert_cmd->pdu_buf_dma\n"); |
1703 | ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma, | 1744 | ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma, |
1704 | isert_cmd->pdu_buf_len, DMA_TO_DEVICE); | 1745 | isert_cmd->pdu_buf_len, DMA_TO_DEVICE); |
1705 | isert_cmd->pdu_buf_dma = 0; | 1746 | isert_cmd->pdu_buf_dma = 0; |
@@ -1717,7 +1758,7 @@ isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr) | |||
1717 | 1758 | ||
1718 | ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status); | 1759 | ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status); |
1719 | if (ret) { | 1760 | if (ret) { |
1720 | pr_err("ib_check_mr_status failed, ret %d\n", ret); | 1761 | isert_err("ib_check_mr_status failed, ret %d\n", ret); |
1721 | goto fail_mr_status; | 1762 | goto fail_mr_status; |
1722 | } | 1763 | } |
1723 | 1764 | ||
@@ -1740,12 +1781,12 @@ isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr) | |||
1740 | do_div(sec_offset_err, block_size); | 1781 | do_div(sec_offset_err, block_size); |
1741 | se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba; | 1782 | se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba; |
1742 | 1783 | ||
1743 | pr_err("isert: PI error found type %d at sector 0x%llx " | 1784 | isert_err("PI error found type %d at sector 0x%llx " |
1744 | "expected 0x%x vs actual 0x%x\n", | 1785 | "expected 0x%x vs actual 0x%x\n", |
1745 | mr_status.sig_err.err_type, | 1786 | mr_status.sig_err.err_type, |
1746 | (unsigned long long)se_cmd->bad_sector, | 1787 | (unsigned long long)se_cmd->bad_sector, |
1747 | mr_status.sig_err.expected, | 1788 | mr_status.sig_err.expected, |
1748 | mr_status.sig_err.actual); | 1789 | mr_status.sig_err.actual); |
1749 | ret = 1; | 1790 | ret = 1; |
1750 | } | 1791 | } |
1751 | 1792 | ||
@@ -1801,7 +1842,7 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc, | |||
1801 | cmd->write_data_done = wr->data.len; | 1842 | cmd->write_data_done = wr->data.len; |
1802 | wr->send_wr_num = 0; | 1843 | wr->send_wr_num = 0; |
1803 | 1844 | ||
1804 | pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd); | 1845 | isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd); |
1805 | spin_lock_bh(&cmd->istate_lock); | 1846 | spin_lock_bh(&cmd->istate_lock); |
1806 | cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; | 1847 | cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; |
1807 | cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; | 1848 | cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; |
@@ -1823,36 +1864,22 @@ isert_do_control_comp(struct work_struct *work) | |||
1823 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; | 1864 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; |
1824 | struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; | 1865 | struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; |
1825 | 1866 | ||
1867 | isert_dbg("Cmd %p i_state %d\n", isert_cmd, cmd->i_state); | ||
1868 | |||
1826 | switch (cmd->i_state) { | 1869 | switch (cmd->i_state) { |
1827 | case ISTATE_SEND_TASKMGTRSP: | 1870 | case ISTATE_SEND_TASKMGTRSP: |
1828 | pr_debug("Calling iscsit_tmr_post_handler >>>>>>>>>>>>>>>>>\n"); | ||
1829 | |||
1830 | atomic_dec(&isert_conn->post_send_buf_count); | ||
1831 | iscsit_tmr_post_handler(cmd, cmd->conn); | 1871 | iscsit_tmr_post_handler(cmd, cmd->conn); |
1832 | 1872 | case ISTATE_SEND_REJECT: /* FALLTHRU */ | |
1833 | cmd->i_state = ISTATE_SENT_STATUS; | 1873 | case ISTATE_SEND_TEXTRSP: /* FALLTHRU */ |
1834 | isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false); | ||
1835 | break; | ||
1836 | case ISTATE_SEND_REJECT: | ||
1837 | pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n"); | ||
1838 | atomic_dec(&isert_conn->post_send_buf_count); | ||
1839 | |||
1840 | cmd->i_state = ISTATE_SENT_STATUS; | 1874 | cmd->i_state = ISTATE_SENT_STATUS; |
1841 | isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false); | 1875 | isert_completion_put(&isert_cmd->tx_desc, isert_cmd, |
1876 | ib_dev, false); | ||
1842 | break; | 1877 | break; |
1843 | case ISTATE_SEND_LOGOUTRSP: | 1878 | case ISTATE_SEND_LOGOUTRSP: |
1844 | pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n"); | ||
1845 | |||
1846 | atomic_dec(&isert_conn->post_send_buf_count); | ||
1847 | iscsit_logout_post_handler(cmd, cmd->conn); | 1879 | iscsit_logout_post_handler(cmd, cmd->conn); |
1848 | break; | 1880 | break; |
1849 | case ISTATE_SEND_TEXTRSP: | ||
1850 | atomic_dec(&isert_conn->post_send_buf_count); | ||
1851 | cmd->i_state = ISTATE_SENT_STATUS; | ||
1852 | isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false); | ||
1853 | break; | ||
1854 | default: | 1881 | default: |
1855 | pr_err("Unknown do_control_comp i_state %d\n", cmd->i_state); | 1882 | isert_err("Unknown i_state %d\n", cmd->i_state); |
1856 | dump_stack(); | 1883 | dump_stack(); |
1857 | break; | 1884 | break; |
1858 | } | 1885 | } |
@@ -1865,7 +1892,6 @@ isert_response_completion(struct iser_tx_desc *tx_desc, | |||
1865 | struct ib_device *ib_dev) | 1892 | struct ib_device *ib_dev) |
1866 | { | 1893 | { |
1867 | struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; | 1894 | struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; |
1868 | struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; | ||
1869 | 1895 | ||
1870 | if (cmd->i_state == ISTATE_SEND_TASKMGTRSP || | 1896 | if (cmd->i_state == ISTATE_SEND_TASKMGTRSP || |
1871 | cmd->i_state == ISTATE_SEND_LOGOUTRSP || | 1897 | cmd->i_state == ISTATE_SEND_LOGOUTRSP || |
@@ -1878,267 +1904,151 @@ isert_response_completion(struct iser_tx_desc *tx_desc, | |||
1878 | return; | 1904 | return; |
1879 | } | 1905 | } |
1880 | 1906 | ||
1881 | /** | ||
1882 | * If send_wr_num is 0 this means that we got | ||
1883 | * RDMA completion and we cleared it and we should | ||
1884 | * simply decrement the response post. else the | ||
1885 | * response is incorporated in send_wr_num, just | ||
1886 | * sub it. | ||
1887 | **/ | ||
1888 | if (wr->send_wr_num) | ||
1889 | atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count); | ||
1890 | else | ||
1891 | atomic_dec(&isert_conn->post_send_buf_count); | ||
1892 | |||
1893 | cmd->i_state = ISTATE_SENT_STATUS; | 1907 | cmd->i_state = ISTATE_SENT_STATUS; |
1894 | isert_completion_put(tx_desc, isert_cmd, ib_dev, false); | 1908 | isert_completion_put(tx_desc, isert_cmd, ib_dev, false); |
1895 | } | 1909 | } |
1896 | 1910 | ||
1897 | static void | 1911 | static void |
1898 | __isert_send_completion(struct iser_tx_desc *tx_desc, | 1912 | isert_send_completion(struct iser_tx_desc *tx_desc, |
1899 | struct isert_conn *isert_conn) | 1913 | struct isert_conn *isert_conn) |
1900 | { | 1914 | { |
1901 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; | 1915 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; |
1902 | struct isert_cmd *isert_cmd = tx_desc->isert_cmd; | 1916 | struct isert_cmd *isert_cmd = tx_desc->isert_cmd; |
1903 | struct isert_rdma_wr *wr; | 1917 | struct isert_rdma_wr *wr; |
1904 | 1918 | ||
1905 | if (!isert_cmd) { | 1919 | if (!isert_cmd) { |
1906 | atomic_dec(&isert_conn->post_send_buf_count); | ||
1907 | isert_unmap_tx_desc(tx_desc, ib_dev); | 1920 | isert_unmap_tx_desc(tx_desc, ib_dev); |
1908 | return; | 1921 | return; |
1909 | } | 1922 | } |
1910 | wr = &isert_cmd->rdma_wr; | 1923 | wr = &isert_cmd->rdma_wr; |
1911 | 1924 | ||
1925 | isert_dbg("Cmd %p iser_ib_op %d\n", isert_cmd, wr->iser_ib_op); | ||
1926 | |||
1912 | switch (wr->iser_ib_op) { | 1927 | switch (wr->iser_ib_op) { |
1913 | case ISER_IB_RECV: | 1928 | case ISER_IB_RECV: |
1914 | pr_err("isert_send_completion: Got ISER_IB_RECV\n"); | 1929 | isert_err("Got ISER_IB_RECV\n"); |
1915 | dump_stack(); | 1930 | dump_stack(); |
1916 | break; | 1931 | break; |
1917 | case ISER_IB_SEND: | 1932 | case ISER_IB_SEND: |
1918 | pr_debug("isert_send_completion: Got ISER_IB_SEND\n"); | ||
1919 | isert_response_completion(tx_desc, isert_cmd, | 1933 | isert_response_completion(tx_desc, isert_cmd, |
1920 | isert_conn, ib_dev); | 1934 | isert_conn, ib_dev); |
1921 | break; | 1935 | break; |
1922 | case ISER_IB_RDMA_WRITE: | 1936 | case ISER_IB_RDMA_WRITE: |
1923 | pr_debug("isert_send_completion: Got ISER_IB_RDMA_WRITE\n"); | ||
1924 | atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count); | ||
1925 | isert_completion_rdma_write(tx_desc, isert_cmd); | 1937 | isert_completion_rdma_write(tx_desc, isert_cmd); |
1926 | break; | 1938 | break; |
1927 | case ISER_IB_RDMA_READ: | 1939 | case ISER_IB_RDMA_READ: |
1928 | pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n"); | ||
1929 | |||
1930 | atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count); | ||
1931 | isert_completion_rdma_read(tx_desc, isert_cmd); | 1940 | isert_completion_rdma_read(tx_desc, isert_cmd); |
1932 | break; | 1941 | break; |
1933 | default: | 1942 | default: |
1934 | pr_err("Unknown wr->iser_ib_op: 0x%02x\n", wr->iser_ib_op); | 1943 | isert_err("Unknown wr->iser_ib_op: 0x%x\n", wr->iser_ib_op); |
1935 | dump_stack(); | 1944 | dump_stack(); |
1936 | break; | 1945 | break; |
1937 | } | 1946 | } |
1938 | } | 1947 | } |
1939 | 1948 | ||
1940 | static void | 1949 | /** |
1941 | isert_send_completion(struct iser_tx_desc *tx_desc, | 1950 | * is_isert_tx_desc() - Indicate if the completion wr_id |
1942 | struct isert_conn *isert_conn) | 1951 | * is a TX descriptor or not. |
1943 | { | 1952 | * @isert_conn: iser connection |
1944 | struct llist_node *llnode = tx_desc->comp_llnode_batch; | 1953 | * @wr_id: completion WR identifier |
1945 | struct iser_tx_desc *t; | 1954 | * |
1946 | /* | 1955 | * Since we cannot rely on wc opcode in FLUSH errors |
1947 | * Drain coalesced completion llist starting from comp_llnode_batch | 1956 | * we must work around it by checking if the wr_id address |
1948 | * setup in isert_init_send_wr(), and then complete trailing tx_desc. | 1957 | * falls in the iser connection rx_descs buffer. If so |
1949 | */ | 1958 | * it is an RX descriptor, otherwize it is a TX. |
1950 | while (llnode) { | 1959 | */ |
1951 | t = llist_entry(llnode, struct iser_tx_desc, comp_llnode); | 1960 | static inline bool |
1952 | llnode = llist_next(llnode); | 1961 | is_isert_tx_desc(struct isert_conn *isert_conn, void *wr_id) |
1953 | __isert_send_completion(t, isert_conn); | ||
1954 | } | ||
1955 | __isert_send_completion(tx_desc, isert_conn); | ||
1956 | } | ||
1957 | |||
1958 | static void | ||
1959 | isert_cq_drain_comp_llist(struct isert_conn *isert_conn, struct ib_device *ib_dev) | ||
1960 | { | 1962 | { |
1961 | struct llist_node *llnode; | 1963 | void *start = isert_conn->conn_rx_descs; |
1962 | struct isert_rdma_wr *wr; | 1964 | int len = ISERT_QP_MAX_RECV_DTOS * sizeof(*isert_conn->conn_rx_descs); |
1963 | struct iser_tx_desc *t; | ||
1964 | 1965 | ||
1965 | mutex_lock(&isert_conn->conn_mutex); | 1966 | if (wr_id >= start && wr_id < start + len) |
1966 | llnode = llist_del_all(&isert_conn->conn_comp_llist); | 1967 | return false; |
1967 | isert_conn->conn_comp_batch = 0; | ||
1968 | mutex_unlock(&isert_conn->conn_mutex); | ||
1969 | |||
1970 | while (llnode) { | ||
1971 | t = llist_entry(llnode, struct iser_tx_desc, comp_llnode); | ||
1972 | llnode = llist_next(llnode); | ||
1973 | wr = &t->isert_cmd->rdma_wr; | ||
1974 | |||
1975 | /** | ||
1976 | * If send_wr_num is 0 this means that we got | ||
1977 | * RDMA completion and we cleared it and we should | ||
1978 | * simply decrement the response post. else the | ||
1979 | * response is incorporated in send_wr_num, just | ||
1980 | * sub it. | ||
1981 | **/ | ||
1982 | if (wr->send_wr_num) | ||
1983 | atomic_sub(wr->send_wr_num, | ||
1984 | &isert_conn->post_send_buf_count); | ||
1985 | else | ||
1986 | atomic_dec(&isert_conn->post_send_buf_count); | ||
1987 | 1968 | ||
1988 | isert_completion_put(t, t->isert_cmd, ib_dev, true); | 1969 | return true; |
1989 | } | ||
1990 | } | 1970 | } |
1991 | 1971 | ||
1992 | static void | 1972 | static void |
1993 | isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn) | 1973 | isert_cq_comp_err(struct isert_conn *isert_conn, struct ib_wc *wc) |
1994 | { | 1974 | { |
1995 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; | 1975 | if (wc->wr_id == ISER_BEACON_WRID) { |
1996 | struct isert_cmd *isert_cmd = tx_desc->isert_cmd; | 1976 | isert_info("conn %p completing conn_wait_comp_err\n", |
1997 | struct llist_node *llnode = tx_desc->comp_llnode_batch; | 1977 | isert_conn); |
1998 | struct isert_rdma_wr *wr; | 1978 | complete(&isert_conn->conn_wait_comp_err); |
1999 | struct iser_tx_desc *t; | 1979 | } else if (is_isert_tx_desc(isert_conn, (void *)(uintptr_t)wc->wr_id)) { |
2000 | 1980 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; | |
2001 | while (llnode) { | 1981 | struct isert_cmd *isert_cmd; |
2002 | t = llist_entry(llnode, struct iser_tx_desc, comp_llnode); | 1982 | struct iser_tx_desc *desc; |
2003 | llnode = llist_next(llnode); | ||
2004 | wr = &t->isert_cmd->rdma_wr; | ||
2005 | 1983 | ||
2006 | /** | 1984 | desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id; |
2007 | * If send_wr_num is 0 this means that we got | 1985 | isert_cmd = desc->isert_cmd; |
2008 | * RDMA completion and we cleared it and we should | 1986 | if (!isert_cmd) |
2009 | * simply decrement the response post. else the | 1987 | isert_unmap_tx_desc(desc, ib_dev); |
2010 | * response is incorporated in send_wr_num, just | ||
2011 | * sub it. | ||
2012 | **/ | ||
2013 | if (wr->send_wr_num) | ||
2014 | atomic_sub(wr->send_wr_num, | ||
2015 | &isert_conn->post_send_buf_count); | ||
2016 | else | 1988 | else |
2017 | atomic_dec(&isert_conn->post_send_buf_count); | 1989 | isert_completion_put(desc, isert_cmd, ib_dev, true); |
2018 | 1990 | } else { | |
2019 | isert_completion_put(t, t->isert_cmd, ib_dev, true); | 1991 | isert_conn->post_recv_buf_count--; |
2020 | } | 1992 | if (!isert_conn->post_recv_buf_count) |
2021 | tx_desc->comp_llnode_batch = NULL; | 1993 | iscsit_cause_connection_reinstatement(isert_conn->conn, 0); |
2022 | |||
2023 | if (!isert_cmd) | ||
2024 | isert_unmap_tx_desc(tx_desc, ib_dev); | ||
2025 | else | ||
2026 | isert_completion_put(tx_desc, isert_cmd, ib_dev, true); | ||
2027 | } | ||
2028 | |||
2029 | static void | ||
2030 | isert_cq_rx_comp_err(struct isert_conn *isert_conn) | ||
2031 | { | ||
2032 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; | ||
2033 | struct iscsi_conn *conn = isert_conn->conn; | ||
2034 | |||
2035 | if (isert_conn->post_recv_buf_count) | ||
2036 | return; | ||
2037 | |||
2038 | isert_cq_drain_comp_llist(isert_conn, ib_dev); | ||
2039 | |||
2040 | if (conn->sess) { | ||
2041 | target_sess_cmd_list_set_waiting(conn->sess->se_sess); | ||
2042 | target_wait_for_sess_cmds(conn->sess->se_sess); | ||
2043 | } | 1994 | } |
2044 | |||
2045 | while (atomic_read(&isert_conn->post_send_buf_count)) | ||
2046 | msleep(3000); | ||
2047 | |||
2048 | mutex_lock(&isert_conn->conn_mutex); | ||
2049 | isert_conn->state = ISER_CONN_DOWN; | ||
2050 | mutex_unlock(&isert_conn->conn_mutex); | ||
2051 | |||
2052 | iscsit_cause_connection_reinstatement(isert_conn->conn, 0); | ||
2053 | |||
2054 | complete(&isert_conn->conn_wait_comp_err); | ||
2055 | } | 1995 | } |
2056 | 1996 | ||
2057 | static void | 1997 | static void |
2058 | isert_cq_tx_work(struct work_struct *work) | 1998 | isert_handle_wc(struct ib_wc *wc) |
2059 | { | 1999 | { |
2060 | struct isert_cq_desc *cq_desc = container_of(work, | ||
2061 | struct isert_cq_desc, cq_tx_work); | ||
2062 | struct isert_device *device = cq_desc->device; | ||
2063 | int cq_index = cq_desc->cq_index; | ||
2064 | struct ib_cq *tx_cq = device->dev_tx_cq[cq_index]; | ||
2065 | struct isert_conn *isert_conn; | 2000 | struct isert_conn *isert_conn; |
2066 | struct iser_tx_desc *tx_desc; | 2001 | struct iser_tx_desc *tx_desc; |
2067 | struct ib_wc wc; | 2002 | struct iser_rx_desc *rx_desc; |
2068 | |||
2069 | while (ib_poll_cq(tx_cq, 1, &wc) == 1) { | ||
2070 | tx_desc = (struct iser_tx_desc *)(unsigned long)wc.wr_id; | ||
2071 | isert_conn = wc.qp->qp_context; | ||
2072 | 2003 | ||
2073 | if (wc.status == IB_WC_SUCCESS) { | 2004 | isert_conn = wc->qp->qp_context; |
2074 | isert_send_completion(tx_desc, isert_conn); | 2005 | if (likely(wc->status == IB_WC_SUCCESS)) { |
2006 | if (wc->opcode == IB_WC_RECV) { | ||
2007 | rx_desc = (struct iser_rx_desc *)(uintptr_t)wc->wr_id; | ||
2008 | isert_rx_completion(rx_desc, isert_conn, wc->byte_len); | ||
2075 | } else { | 2009 | } else { |
2076 | pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n"); | 2010 | tx_desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id; |
2077 | pr_debug("TX wc.status: 0x%08x\n", wc.status); | 2011 | isert_send_completion(tx_desc, isert_conn); |
2078 | pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err); | ||
2079 | |||
2080 | if (wc.wr_id != ISER_FASTREG_LI_WRID) { | ||
2081 | if (tx_desc->llnode_active) | ||
2082 | continue; | ||
2083 | |||
2084 | atomic_dec(&isert_conn->post_send_buf_count); | ||
2085 | isert_cq_tx_comp_err(tx_desc, isert_conn); | ||
2086 | } | ||
2087 | } | 2012 | } |
2088 | } | 2013 | } else { |
2089 | 2014 | if (wc->status != IB_WC_WR_FLUSH_ERR) | |
2090 | ib_req_notify_cq(tx_cq, IB_CQ_NEXT_COMP); | 2015 | isert_err("wr id %llx status %d vend_err %x\n", |
2091 | } | 2016 | wc->wr_id, wc->status, wc->vendor_err); |
2092 | 2017 | else | |
2093 | static void | 2018 | isert_dbg("flush error: wr id %llx\n", wc->wr_id); |
2094 | isert_cq_tx_callback(struct ib_cq *cq, void *context) | ||
2095 | { | ||
2096 | struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context; | ||
2097 | 2019 | ||
2098 | queue_work(isert_comp_wq, &cq_desc->cq_tx_work); | 2020 | if (wc->wr_id != ISER_FASTREG_LI_WRID) |
2021 | isert_cq_comp_err(isert_conn, wc); | ||
2022 | } | ||
2099 | } | 2023 | } |
2100 | 2024 | ||
2101 | static void | 2025 | static void |
2102 | isert_cq_rx_work(struct work_struct *work) | 2026 | isert_cq_work(struct work_struct *work) |
2103 | { | 2027 | { |
2104 | struct isert_cq_desc *cq_desc = container_of(work, | 2028 | enum { isert_poll_budget = 65536 }; |
2105 | struct isert_cq_desc, cq_rx_work); | 2029 | struct isert_comp *comp = container_of(work, struct isert_comp, |
2106 | struct isert_device *device = cq_desc->device; | 2030 | work); |
2107 | int cq_index = cq_desc->cq_index; | 2031 | struct ib_wc *const wcs = comp->wcs; |
2108 | struct ib_cq *rx_cq = device->dev_rx_cq[cq_index]; | 2032 | int i, n, completed = 0; |
2109 | struct isert_conn *isert_conn; | ||
2110 | struct iser_rx_desc *rx_desc; | ||
2111 | struct ib_wc wc; | ||
2112 | unsigned long xfer_len; | ||
2113 | 2033 | ||
2114 | while (ib_poll_cq(rx_cq, 1, &wc) == 1) { | 2034 | while ((n = ib_poll_cq(comp->cq, ARRAY_SIZE(comp->wcs), wcs)) > 0) { |
2115 | rx_desc = (struct iser_rx_desc *)(unsigned long)wc.wr_id; | 2035 | for (i = 0; i < n; i++) |
2116 | isert_conn = wc.qp->qp_context; | 2036 | isert_handle_wc(&wcs[i]); |
2117 | 2037 | ||
2118 | if (wc.status == IB_WC_SUCCESS) { | 2038 | completed += n; |
2119 | xfer_len = (unsigned long)wc.byte_len; | 2039 | if (completed >= isert_poll_budget) |
2120 | isert_rx_completion(rx_desc, isert_conn, xfer_len); | 2040 | break; |
2121 | } else { | ||
2122 | pr_debug("RX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n"); | ||
2123 | if (wc.status != IB_WC_WR_FLUSH_ERR) { | ||
2124 | pr_debug("RX wc.status: 0x%08x\n", wc.status); | ||
2125 | pr_debug("RX wc.vendor_err: 0x%08x\n", | ||
2126 | wc.vendor_err); | ||
2127 | } | ||
2128 | isert_conn->post_recv_buf_count--; | ||
2129 | isert_cq_rx_comp_err(isert_conn); | ||
2130 | } | ||
2131 | } | 2041 | } |
2132 | 2042 | ||
2133 | ib_req_notify_cq(rx_cq, IB_CQ_NEXT_COMP); | 2043 | ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP); |
2134 | } | 2044 | } |
2135 | 2045 | ||
2136 | static void | 2046 | static void |
2137 | isert_cq_rx_callback(struct ib_cq *cq, void *context) | 2047 | isert_cq_callback(struct ib_cq *cq, void *context) |
2138 | { | 2048 | { |
2139 | struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context; | 2049 | struct isert_comp *comp = context; |
2140 | 2050 | ||
2141 | queue_work(isert_rx_wq, &cq_desc->cq_rx_work); | 2051 | queue_work(isert_comp_wq, &comp->work); |
2142 | } | 2052 | } |
2143 | 2053 | ||
2144 | static int | 2054 | static int |
@@ -2147,13 +2057,10 @@ isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd) | |||
2147 | struct ib_send_wr *wr_failed; | 2057 | struct ib_send_wr *wr_failed; |
2148 | int ret; | 2058 | int ret; |
2149 | 2059 | ||
2150 | atomic_inc(&isert_conn->post_send_buf_count); | ||
2151 | |||
2152 | ret = ib_post_send(isert_conn->conn_qp, &isert_cmd->tx_desc.send_wr, | 2060 | ret = ib_post_send(isert_conn->conn_qp, &isert_cmd->tx_desc.send_wr, |
2153 | &wr_failed); | 2061 | &wr_failed); |
2154 | if (ret) { | 2062 | if (ret) { |
2155 | pr_err("ib_post_send failed with %d\n", ret); | 2063 | isert_err("ib_post_send failed with %d\n", ret); |
2156 | atomic_dec(&isert_conn->post_send_buf_count); | ||
2157 | return ret; | 2064 | return ret; |
2158 | } | 2065 | } |
2159 | return ret; | 2066 | return ret; |
@@ -2200,9 +2107,9 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd) | |||
2200 | isert_cmd->tx_desc.num_sge = 2; | 2107 | isert_cmd->tx_desc.num_sge = 2; |
2201 | } | 2108 | } |
2202 | 2109 | ||
2203 | isert_init_send_wr(isert_conn, isert_cmd, send_wr, false); | 2110 | isert_init_send_wr(isert_conn, isert_cmd, send_wr); |
2204 | 2111 | ||
2205 | pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); | 2112 | isert_dbg("Posting SCSI Response\n"); |
2206 | 2113 | ||
2207 | return isert_post_response(isert_conn, isert_cmd); | 2114 | return isert_post_response(isert_conn, isert_cmd); |
2208 | } | 2115 | } |
@@ -2231,8 +2138,16 @@ isert_get_sup_prot_ops(struct iscsi_conn *conn) | |||
2231 | struct isert_conn *isert_conn = (struct isert_conn *)conn->context; | 2138 | struct isert_conn *isert_conn = (struct isert_conn *)conn->context; |
2232 | struct isert_device *device = isert_conn->conn_device; | 2139 | struct isert_device *device = isert_conn->conn_device; |
2233 | 2140 | ||
2234 | if (device->pi_capable) | 2141 | if (conn->tpg->tpg_attrib.t10_pi) { |
2235 | return TARGET_PROT_ALL; | 2142 | if (device->pi_capable) { |
2143 | isert_info("conn %p PI offload enabled\n", isert_conn); | ||
2144 | isert_conn->pi_support = true; | ||
2145 | return TARGET_PROT_ALL; | ||
2146 | } | ||
2147 | } | ||
2148 | |||
2149 | isert_info("conn %p PI offload disabled\n", isert_conn); | ||
2150 | isert_conn->pi_support = false; | ||
2236 | 2151 | ||
2237 | return TARGET_PROT_NORMAL; | 2152 | return TARGET_PROT_NORMAL; |
2238 | } | 2153 | } |
@@ -2250,9 +2165,9 @@ isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn, | |||
2250 | &isert_cmd->tx_desc.iscsi_header, | 2165 | &isert_cmd->tx_desc.iscsi_header, |
2251 | nopout_response); | 2166 | nopout_response); |
2252 | isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); | 2167 | isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); |
2253 | isert_init_send_wr(isert_conn, isert_cmd, send_wr, false); | 2168 | isert_init_send_wr(isert_conn, isert_cmd, send_wr); |
2254 | 2169 | ||
2255 | pr_debug("Posting NOPIN Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); | 2170 | isert_dbg("conn %p Posting NOPIN Response\n", isert_conn); |
2256 | 2171 | ||
2257 | return isert_post_response(isert_conn, isert_cmd); | 2172 | return isert_post_response(isert_conn, isert_cmd); |
2258 | } | 2173 | } |
@@ -2268,9 +2183,9 @@ isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) | |||
2268 | iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *) | 2183 | iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *) |
2269 | &isert_cmd->tx_desc.iscsi_header); | 2184 | &isert_cmd->tx_desc.iscsi_header); |
2270 | isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); | 2185 | isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); |
2271 | isert_init_send_wr(isert_conn, isert_cmd, send_wr, false); | 2186 | isert_init_send_wr(isert_conn, isert_cmd, send_wr); |
2272 | 2187 | ||
2273 | pr_debug("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); | 2188 | isert_dbg("conn %p Posting Logout Response\n", isert_conn); |
2274 | 2189 | ||
2275 | return isert_post_response(isert_conn, isert_cmd); | 2190 | return isert_post_response(isert_conn, isert_cmd); |
2276 | } | 2191 | } |
@@ -2286,9 +2201,9 @@ isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) | |||
2286 | iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *) | 2201 | iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *) |
2287 | &isert_cmd->tx_desc.iscsi_header); | 2202 | &isert_cmd->tx_desc.iscsi_header); |
2288 | isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); | 2203 | isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); |
2289 | isert_init_send_wr(isert_conn, isert_cmd, send_wr, false); | 2204 | isert_init_send_wr(isert_conn, isert_cmd, send_wr); |
2290 | 2205 | ||
2291 | pr_debug("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); | 2206 | isert_dbg("conn %p Posting Task Management Response\n", isert_conn); |
2292 | 2207 | ||
2293 | return isert_post_response(isert_conn, isert_cmd); | 2208 | return isert_post_response(isert_conn, isert_cmd); |
2294 | } | 2209 | } |
@@ -2318,9 +2233,9 @@ isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn) | |||
2318 | tx_dsg->lkey = isert_conn->conn_mr->lkey; | 2233 | tx_dsg->lkey = isert_conn->conn_mr->lkey; |
2319 | isert_cmd->tx_desc.num_sge = 2; | 2234 | isert_cmd->tx_desc.num_sge = 2; |
2320 | 2235 | ||
2321 | isert_init_send_wr(isert_conn, isert_cmd, send_wr, false); | 2236 | isert_init_send_wr(isert_conn, isert_cmd, send_wr); |
2322 | 2237 | ||
2323 | pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); | 2238 | isert_dbg("conn %p Posting Reject\n", isert_conn); |
2324 | 2239 | ||
2325 | return isert_post_response(isert_conn, isert_cmd); | 2240 | return isert_post_response(isert_conn, isert_cmd); |
2326 | } | 2241 | } |
@@ -2358,9 +2273,9 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) | |||
2358 | tx_dsg->lkey = isert_conn->conn_mr->lkey; | 2273 | tx_dsg->lkey = isert_conn->conn_mr->lkey; |
2359 | isert_cmd->tx_desc.num_sge = 2; | 2274 | isert_cmd->tx_desc.num_sge = 2; |
2360 | } | 2275 | } |
2361 | isert_init_send_wr(isert_conn, isert_cmd, send_wr, false); | 2276 | isert_init_send_wr(isert_conn, isert_cmd, send_wr); |
2362 | 2277 | ||
2363 | pr_debug("Posting Text Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); | 2278 | isert_dbg("conn %p Text Reject\n", isert_conn); |
2364 | 2279 | ||
2365 | return isert_post_response(isert_conn, isert_cmd); | 2280 | return isert_post_response(isert_conn, isert_cmd); |
2366 | } | 2281 | } |
@@ -2383,30 +2298,31 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, | |||
2383 | 2298 | ||
2384 | send_wr->sg_list = ib_sge; | 2299 | send_wr->sg_list = ib_sge; |
2385 | send_wr->num_sge = sg_nents; | 2300 | send_wr->num_sge = sg_nents; |
2386 | send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc; | 2301 | send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc; |
2387 | /* | 2302 | /* |
2388 | * Perform mapping of TCM scatterlist memory ib_sge dma_addr. | 2303 | * Perform mapping of TCM scatterlist memory ib_sge dma_addr. |
2389 | */ | 2304 | */ |
2390 | for_each_sg(sg_start, tmp_sg, sg_nents, i) { | 2305 | for_each_sg(sg_start, tmp_sg, sg_nents, i) { |
2391 | pr_debug("ISER RDMA from SGL dma_addr: 0x%16llx dma_len: %u, page_off: %u\n", | 2306 | isert_dbg("RDMA from SGL dma_addr: 0x%llx dma_len: %u, " |
2392 | (unsigned long long)tmp_sg->dma_address, | 2307 | "page_off: %u\n", |
2393 | tmp_sg->length, page_off); | 2308 | (unsigned long long)tmp_sg->dma_address, |
2309 | tmp_sg->length, page_off); | ||
2394 | 2310 | ||
2395 | ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off; | 2311 | ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off; |
2396 | ib_sge->length = min_t(u32, data_left, | 2312 | ib_sge->length = min_t(u32, data_left, |
2397 | ib_sg_dma_len(ib_dev, tmp_sg) - page_off); | 2313 | ib_sg_dma_len(ib_dev, tmp_sg) - page_off); |
2398 | ib_sge->lkey = isert_conn->conn_mr->lkey; | 2314 | ib_sge->lkey = isert_conn->conn_mr->lkey; |
2399 | 2315 | ||
2400 | pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u lkey: %08x\n", | 2316 | isert_dbg("RDMA ib_sge: addr: 0x%llx length: %u lkey: %x\n", |
2401 | ib_sge->addr, ib_sge->length, ib_sge->lkey); | 2317 | ib_sge->addr, ib_sge->length, ib_sge->lkey); |
2402 | page_off = 0; | 2318 | page_off = 0; |
2403 | data_left -= ib_sge->length; | 2319 | data_left -= ib_sge->length; |
2404 | ib_sge++; | 2320 | ib_sge++; |
2405 | pr_debug("Incrementing ib_sge pointer to %p\n", ib_sge); | 2321 | isert_dbg("Incrementing ib_sge pointer to %p\n", ib_sge); |
2406 | } | 2322 | } |
2407 | 2323 | ||
2408 | pr_debug("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n", | 2324 | isert_dbg("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n", |
2409 | send_wr->sg_list, send_wr->num_sge); | 2325 | send_wr->sg_list, send_wr->num_sge); |
2410 | 2326 | ||
2411 | return sg_nents; | 2327 | return sg_nents; |
2412 | } | 2328 | } |
@@ -2438,7 +2354,7 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | |||
2438 | 2354 | ||
2439 | ib_sge = kzalloc(sizeof(struct ib_sge) * data->nents, GFP_KERNEL); | 2355 | ib_sge = kzalloc(sizeof(struct ib_sge) * data->nents, GFP_KERNEL); |
2440 | if (!ib_sge) { | 2356 | if (!ib_sge) { |
2441 | pr_warn("Unable to allocate ib_sge\n"); | 2357 | isert_warn("Unable to allocate ib_sge\n"); |
2442 | ret = -ENOMEM; | 2358 | ret = -ENOMEM; |
2443 | goto unmap_cmd; | 2359 | goto unmap_cmd; |
2444 | } | 2360 | } |
@@ -2448,7 +2364,7 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | |||
2448 | wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num, | 2364 | wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num, |
2449 | GFP_KERNEL); | 2365 | GFP_KERNEL); |
2450 | if (!wr->send_wr) { | 2366 | if (!wr->send_wr) { |
2451 | pr_debug("Unable to allocate wr->send_wr\n"); | 2367 | isert_dbg("Unable to allocate wr->send_wr\n"); |
2452 | ret = -ENOMEM; | 2368 | ret = -ENOMEM; |
2453 | goto unmap_cmd; | 2369 | goto unmap_cmd; |
2454 | } | 2370 | } |
@@ -2512,9 +2428,9 @@ isert_map_fr_pagelist(struct ib_device *ib_dev, | |||
2512 | chunk_start = start_addr; | 2428 | chunk_start = start_addr; |
2513 | end_addr = start_addr + ib_sg_dma_len(ib_dev, tmp_sg); | 2429 | end_addr = start_addr + ib_sg_dma_len(ib_dev, tmp_sg); |
2514 | 2430 | ||
2515 | pr_debug("SGL[%d] dma_addr: 0x%16llx len: %u\n", | 2431 | isert_dbg("SGL[%d] dma_addr: 0x%llx len: %u\n", |
2516 | i, (unsigned long long)tmp_sg->dma_address, | 2432 | i, (unsigned long long)tmp_sg->dma_address, |
2517 | tmp_sg->length); | 2433 | tmp_sg->length); |
2518 | 2434 | ||
2519 | if ((end_addr & ~PAGE_MASK) && i < last_ent) { | 2435 | if ((end_addr & ~PAGE_MASK) && i < last_ent) { |
2520 | new_chunk = 0; | 2436 | new_chunk = 0; |
@@ -2525,8 +2441,8 @@ isert_map_fr_pagelist(struct ib_device *ib_dev, | |||
2525 | page = chunk_start & PAGE_MASK; | 2441 | page = chunk_start & PAGE_MASK; |
2526 | do { | 2442 | do { |
2527 | fr_pl[n_pages++] = page; | 2443 | fr_pl[n_pages++] = page; |
2528 | pr_debug("Mapped page_list[%d] page_addr: 0x%16llx\n", | 2444 | isert_dbg("Mapped page_list[%d] page_addr: 0x%llx\n", |
2529 | n_pages - 1, page); | 2445 | n_pages - 1, page); |
2530 | page += PAGE_SIZE; | 2446 | page += PAGE_SIZE; |
2531 | } while (page < end_addr); | 2447 | } while (page < end_addr); |
2532 | } | 2448 | } |
@@ -2534,6 +2450,21 @@ isert_map_fr_pagelist(struct ib_device *ib_dev, | |||
2534 | return n_pages; | 2450 | return n_pages; |
2535 | } | 2451 | } |
2536 | 2452 | ||
2453 | static inline void | ||
2454 | isert_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr) | ||
2455 | { | ||
2456 | u32 rkey; | ||
2457 | |||
2458 | memset(inv_wr, 0, sizeof(*inv_wr)); | ||
2459 | inv_wr->wr_id = ISER_FASTREG_LI_WRID; | ||
2460 | inv_wr->opcode = IB_WR_LOCAL_INV; | ||
2461 | inv_wr->ex.invalidate_rkey = mr->rkey; | ||
2462 | |||
2463 | /* Bump the key */ | ||
2464 | rkey = ib_inc_rkey(mr->rkey); | ||
2465 | ib_update_fast_reg_key(mr, rkey); | ||
2466 | } | ||
2467 | |||
2537 | static int | 2468 | static int |
2538 | isert_fast_reg_mr(struct isert_conn *isert_conn, | 2469 | isert_fast_reg_mr(struct isert_conn *isert_conn, |
2539 | struct fast_reg_descriptor *fr_desc, | 2470 | struct fast_reg_descriptor *fr_desc, |
@@ -2548,15 +2479,13 @@ isert_fast_reg_mr(struct isert_conn *isert_conn, | |||
2548 | struct ib_send_wr *bad_wr, *wr = NULL; | 2479 | struct ib_send_wr *bad_wr, *wr = NULL; |
2549 | int ret, pagelist_len; | 2480 | int ret, pagelist_len; |
2550 | u32 page_off; | 2481 | u32 page_off; |
2551 | u8 key; | ||
2552 | 2482 | ||
2553 | if (mem->dma_nents == 1) { | 2483 | if (mem->dma_nents == 1) { |
2554 | sge->lkey = isert_conn->conn_mr->lkey; | 2484 | sge->lkey = isert_conn->conn_mr->lkey; |
2555 | sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]); | 2485 | sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]); |
2556 | sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]); | 2486 | sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]); |
2557 | pr_debug("%s:%d sge: addr: 0x%llx length: %u lkey: %x\n", | 2487 | isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n", |
2558 | __func__, __LINE__, sge->addr, sge->length, | 2488 | sge->addr, sge->length, sge->lkey); |
2559 | sge->lkey); | ||
2560 | return 0; | 2489 | return 0; |
2561 | } | 2490 | } |
2562 | 2491 | ||
@@ -2572,21 +2501,15 @@ isert_fast_reg_mr(struct isert_conn *isert_conn, | |||
2572 | 2501 | ||
2573 | page_off = mem->offset % PAGE_SIZE; | 2502 | page_off = mem->offset % PAGE_SIZE; |
2574 | 2503 | ||
2575 | pr_debug("Use fr_desc %p sg_nents %d offset %u\n", | 2504 | isert_dbg("Use fr_desc %p sg_nents %d offset %u\n", |
2576 | fr_desc, mem->nents, mem->offset); | 2505 | fr_desc, mem->nents, mem->offset); |
2577 | 2506 | ||
2578 | pagelist_len = isert_map_fr_pagelist(ib_dev, mem->sg, mem->nents, | 2507 | pagelist_len = isert_map_fr_pagelist(ib_dev, mem->sg, mem->nents, |
2579 | &frpl->page_list[0]); | 2508 | &frpl->page_list[0]); |
2580 | 2509 | ||
2581 | if (!(fr_desc->ind & ISERT_DATA_KEY_VALID)) { | 2510 | if (!(fr_desc->ind & ind)) { |
2582 | memset(&inv_wr, 0, sizeof(inv_wr)); | 2511 | isert_inv_rkey(&inv_wr, mr); |
2583 | inv_wr.wr_id = ISER_FASTREG_LI_WRID; | ||
2584 | inv_wr.opcode = IB_WR_LOCAL_INV; | ||
2585 | inv_wr.ex.invalidate_rkey = mr->rkey; | ||
2586 | wr = &inv_wr; | 2512 | wr = &inv_wr; |
2587 | /* Bump the key */ | ||
2588 | key = (u8)(mr->rkey & 0x000000FF); | ||
2589 | ib_update_fast_reg_key(mr, ++key); | ||
2590 | } | 2513 | } |
2591 | 2514 | ||
2592 | /* Prepare FASTREG WR */ | 2515 | /* Prepare FASTREG WR */ |
@@ -2608,7 +2531,7 @@ isert_fast_reg_mr(struct isert_conn *isert_conn, | |||
2608 | 2531 | ||
2609 | ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr); | 2532 | ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr); |
2610 | if (ret) { | 2533 | if (ret) { |
2611 | pr_err("fast registration failed, ret:%d\n", ret); | 2534 | isert_err("fast registration failed, ret:%d\n", ret); |
2612 | return ret; | 2535 | return ret; |
2613 | } | 2536 | } |
2614 | fr_desc->ind &= ~ind; | 2537 | fr_desc->ind &= ~ind; |
@@ -2617,9 +2540,8 @@ isert_fast_reg_mr(struct isert_conn *isert_conn, | |||
2617 | sge->addr = frpl->page_list[0] + page_off; | 2540 | sge->addr = frpl->page_list[0] + page_off; |
2618 | sge->length = mem->len; | 2541 | sge->length = mem->len; |
2619 | 2542 | ||
2620 | pr_debug("%s:%d sge: addr: 0x%llx length: %u lkey: %x\n", | 2543 | isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n", |
2621 | __func__, __LINE__, sge->addr, sge->length, | 2544 | sge->addr, sge->length, sge->lkey); |
2622 | sge->lkey); | ||
2623 | 2545 | ||
2624 | return ret; | 2546 | return ret; |
2625 | } | 2547 | } |
@@ -2665,7 +2587,7 @@ isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs) | |||
2665 | isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem); | 2587 | isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem); |
2666 | break; | 2588 | break; |
2667 | default: | 2589 | default: |
2668 | pr_err("Unsupported PI operation %d\n", se_cmd->prot_op); | 2590 | isert_err("Unsupported PI operation %d\n", se_cmd->prot_op); |
2669 | return -EINVAL; | 2591 | return -EINVAL; |
2670 | } | 2592 | } |
2671 | 2593 | ||
@@ -2681,17 +2603,16 @@ isert_set_prot_checks(u8 prot_checks) | |||
2681 | } | 2603 | } |
2682 | 2604 | ||
2683 | static int | 2605 | static int |
2684 | isert_reg_sig_mr(struct isert_conn *isert_conn, struct se_cmd *se_cmd, | 2606 | isert_reg_sig_mr(struct isert_conn *isert_conn, |
2685 | struct fast_reg_descriptor *fr_desc, | 2607 | struct se_cmd *se_cmd, |
2686 | struct ib_sge *data_sge, struct ib_sge *prot_sge, | 2608 | struct isert_rdma_wr *rdma_wr, |
2687 | struct ib_sge *sig_sge) | 2609 | struct fast_reg_descriptor *fr_desc) |
2688 | { | 2610 | { |
2689 | struct ib_send_wr sig_wr, inv_wr; | 2611 | struct ib_send_wr sig_wr, inv_wr; |
2690 | struct ib_send_wr *bad_wr, *wr = NULL; | 2612 | struct ib_send_wr *bad_wr, *wr = NULL; |
2691 | struct pi_context *pi_ctx = fr_desc->pi_ctx; | 2613 | struct pi_context *pi_ctx = fr_desc->pi_ctx; |
2692 | struct ib_sig_attrs sig_attrs; | 2614 | struct ib_sig_attrs sig_attrs; |
2693 | int ret; | 2615 | int ret; |
2694 | u32 key; | ||
2695 | 2616 | ||
2696 | memset(&sig_attrs, 0, sizeof(sig_attrs)); | 2617 | memset(&sig_attrs, 0, sizeof(sig_attrs)); |
2697 | ret = isert_set_sig_attrs(se_cmd, &sig_attrs); | 2618 | ret = isert_set_sig_attrs(se_cmd, &sig_attrs); |
@@ -2701,26 +2622,20 @@ isert_reg_sig_mr(struct isert_conn *isert_conn, struct se_cmd *se_cmd, | |||
2701 | sig_attrs.check_mask = isert_set_prot_checks(se_cmd->prot_checks); | 2622 | sig_attrs.check_mask = isert_set_prot_checks(se_cmd->prot_checks); |
2702 | 2623 | ||
2703 | if (!(fr_desc->ind & ISERT_SIG_KEY_VALID)) { | 2624 | if (!(fr_desc->ind & ISERT_SIG_KEY_VALID)) { |
2704 | memset(&inv_wr, 0, sizeof(inv_wr)); | 2625 | isert_inv_rkey(&inv_wr, pi_ctx->sig_mr); |
2705 | inv_wr.opcode = IB_WR_LOCAL_INV; | ||
2706 | inv_wr.wr_id = ISER_FASTREG_LI_WRID; | ||
2707 | inv_wr.ex.invalidate_rkey = pi_ctx->sig_mr->rkey; | ||
2708 | wr = &inv_wr; | 2626 | wr = &inv_wr; |
2709 | /* Bump the key */ | ||
2710 | key = (u8)(pi_ctx->sig_mr->rkey & 0x000000FF); | ||
2711 | ib_update_fast_reg_key(pi_ctx->sig_mr, ++key); | ||
2712 | } | 2627 | } |
2713 | 2628 | ||
2714 | memset(&sig_wr, 0, sizeof(sig_wr)); | 2629 | memset(&sig_wr, 0, sizeof(sig_wr)); |
2715 | sig_wr.opcode = IB_WR_REG_SIG_MR; | 2630 | sig_wr.opcode = IB_WR_REG_SIG_MR; |
2716 | sig_wr.wr_id = ISER_FASTREG_LI_WRID; | 2631 | sig_wr.wr_id = ISER_FASTREG_LI_WRID; |
2717 | sig_wr.sg_list = data_sge; | 2632 | sig_wr.sg_list = &rdma_wr->ib_sg[DATA]; |
2718 | sig_wr.num_sge = 1; | 2633 | sig_wr.num_sge = 1; |
2719 | sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE; | 2634 | sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE; |
2720 | sig_wr.wr.sig_handover.sig_attrs = &sig_attrs; | 2635 | sig_wr.wr.sig_handover.sig_attrs = &sig_attrs; |
2721 | sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr; | 2636 | sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr; |
2722 | if (se_cmd->t_prot_sg) | 2637 | if (se_cmd->t_prot_sg) |
2723 | sig_wr.wr.sig_handover.prot = prot_sge; | 2638 | sig_wr.wr.sig_handover.prot = &rdma_wr->ib_sg[PROT]; |
2724 | 2639 | ||
2725 | if (!wr) | 2640 | if (!wr) |
2726 | wr = &sig_wr; | 2641 | wr = &sig_wr; |
@@ -2729,39 +2644,98 @@ isert_reg_sig_mr(struct isert_conn *isert_conn, struct se_cmd *se_cmd, | |||
2729 | 2644 | ||
2730 | ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr); | 2645 | ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr); |
2731 | if (ret) { | 2646 | if (ret) { |
2732 | pr_err("fast registration failed, ret:%d\n", ret); | 2647 | isert_err("fast registration failed, ret:%d\n", ret); |
2733 | goto err; | 2648 | goto err; |
2734 | } | 2649 | } |
2735 | fr_desc->ind &= ~ISERT_SIG_KEY_VALID; | 2650 | fr_desc->ind &= ~ISERT_SIG_KEY_VALID; |
2736 | 2651 | ||
2737 | sig_sge->lkey = pi_ctx->sig_mr->lkey; | 2652 | rdma_wr->ib_sg[SIG].lkey = pi_ctx->sig_mr->lkey; |
2738 | sig_sge->addr = 0; | 2653 | rdma_wr->ib_sg[SIG].addr = 0; |
2739 | sig_sge->length = se_cmd->data_length; | 2654 | rdma_wr->ib_sg[SIG].length = se_cmd->data_length; |
2740 | if (se_cmd->prot_op != TARGET_PROT_DIN_STRIP && | 2655 | if (se_cmd->prot_op != TARGET_PROT_DIN_STRIP && |
2741 | se_cmd->prot_op != TARGET_PROT_DOUT_INSERT) | 2656 | se_cmd->prot_op != TARGET_PROT_DOUT_INSERT) |
2742 | /* | 2657 | /* |
2743 | * We have protection guards on the wire | 2658 | * We have protection guards on the wire |
2744 | * so we need to set a larget transfer | 2659 | * so we need to set a larget transfer |
2745 | */ | 2660 | */ |
2746 | sig_sge->length += se_cmd->prot_length; | 2661 | rdma_wr->ib_sg[SIG].length += se_cmd->prot_length; |
2747 | 2662 | ||
2748 | pr_debug("sig_sge: addr: 0x%llx length: %u lkey: %x\n", | 2663 | isert_dbg("sig_sge: addr: 0x%llx length: %u lkey: %x\n", |
2749 | sig_sge->addr, sig_sge->length, | 2664 | rdma_wr->ib_sg[SIG].addr, rdma_wr->ib_sg[SIG].length, |
2750 | sig_sge->lkey); | 2665 | rdma_wr->ib_sg[SIG].lkey); |
2751 | err: | 2666 | err: |
2752 | return ret; | 2667 | return ret; |
2753 | } | 2668 | } |
2754 | 2669 | ||
2755 | static int | 2670 | static int |
2671 | isert_handle_prot_cmd(struct isert_conn *isert_conn, | ||
2672 | struct isert_cmd *isert_cmd, | ||
2673 | struct isert_rdma_wr *wr) | ||
2674 | { | ||
2675 | struct isert_device *device = isert_conn->conn_device; | ||
2676 | struct se_cmd *se_cmd = &isert_cmd->iscsi_cmd->se_cmd; | ||
2677 | int ret; | ||
2678 | |||
2679 | if (!wr->fr_desc->pi_ctx) { | ||
2680 | ret = isert_create_pi_ctx(wr->fr_desc, | ||
2681 | device->ib_device, | ||
2682 | isert_conn->conn_pd); | ||
2683 | if (ret) { | ||
2684 | isert_err("conn %p failed to allocate pi_ctx\n", | ||
2685 | isert_conn); | ||
2686 | return ret; | ||
2687 | } | ||
2688 | } | ||
2689 | |||
2690 | if (se_cmd->t_prot_sg) { | ||
2691 | ret = isert_map_data_buf(isert_conn, isert_cmd, | ||
2692 | se_cmd->t_prot_sg, | ||
2693 | se_cmd->t_prot_nents, | ||
2694 | se_cmd->prot_length, | ||
2695 | 0, wr->iser_ib_op, &wr->prot); | ||
2696 | if (ret) { | ||
2697 | isert_err("conn %p failed to map protection buffer\n", | ||
2698 | isert_conn); | ||
2699 | return ret; | ||
2700 | } | ||
2701 | |||
2702 | memset(&wr->ib_sg[PROT], 0, sizeof(wr->ib_sg[PROT])); | ||
2703 | ret = isert_fast_reg_mr(isert_conn, wr->fr_desc, &wr->prot, | ||
2704 | ISERT_PROT_KEY_VALID, &wr->ib_sg[PROT]); | ||
2705 | if (ret) { | ||
2706 | isert_err("conn %p failed to fast reg mr\n", | ||
2707 | isert_conn); | ||
2708 | goto unmap_prot_cmd; | ||
2709 | } | ||
2710 | } | ||
2711 | |||
2712 | ret = isert_reg_sig_mr(isert_conn, se_cmd, wr, wr->fr_desc); | ||
2713 | if (ret) { | ||
2714 | isert_err("conn %p failed to fast reg mr\n", | ||
2715 | isert_conn); | ||
2716 | goto unmap_prot_cmd; | ||
2717 | } | ||
2718 | wr->fr_desc->ind |= ISERT_PROTECTED; | ||
2719 | |||
2720 | return 0; | ||
2721 | |||
2722 | unmap_prot_cmd: | ||
2723 | if (se_cmd->t_prot_sg) | ||
2724 | isert_unmap_data_buf(isert_conn, &wr->prot); | ||
2725 | |||
2726 | return ret; | ||
2727 | } | ||
2728 | |||
2729 | static int | ||
2756 | isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | 2730 | isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, |
2757 | struct isert_rdma_wr *wr) | 2731 | struct isert_rdma_wr *wr) |
2758 | { | 2732 | { |
2759 | struct se_cmd *se_cmd = &cmd->se_cmd; | 2733 | struct se_cmd *se_cmd = &cmd->se_cmd; |
2760 | struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); | 2734 | struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); |
2761 | struct isert_conn *isert_conn = conn->context; | 2735 | struct isert_conn *isert_conn = conn->context; |
2762 | struct ib_sge data_sge; | ||
2763 | struct ib_send_wr *send_wr; | ||
2764 | struct fast_reg_descriptor *fr_desc = NULL; | 2736 | struct fast_reg_descriptor *fr_desc = NULL; |
2737 | struct ib_send_wr *send_wr; | ||
2738 | struct ib_sge *ib_sg; | ||
2765 | u32 offset; | 2739 | u32 offset; |
2766 | int ret = 0; | 2740 | int ret = 0; |
2767 | unsigned long flags; | 2741 | unsigned long flags; |
@@ -2775,8 +2749,7 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | |||
2775 | if (ret) | 2749 | if (ret) |
2776 | return ret; | 2750 | return ret; |
2777 | 2751 | ||
2778 | if (wr->data.dma_nents != 1 || | 2752 | if (wr->data.dma_nents != 1 || isert_prot_cmd(isert_conn, se_cmd)) { |
2779 | se_cmd->prot_op != TARGET_PROT_NORMAL) { | ||
2780 | spin_lock_irqsave(&isert_conn->conn_lock, flags); | 2753 | spin_lock_irqsave(&isert_conn->conn_lock, flags); |
2781 | fr_desc = list_first_entry(&isert_conn->conn_fr_pool, | 2754 | fr_desc = list_first_entry(&isert_conn->conn_fr_pool, |
2782 | struct fast_reg_descriptor, list); | 2755 | struct fast_reg_descriptor, list); |
@@ -2786,38 +2759,21 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | |||
2786 | } | 2759 | } |
2787 | 2760 | ||
2788 | ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->data, | 2761 | ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->data, |
2789 | ISERT_DATA_KEY_VALID, &data_sge); | 2762 | ISERT_DATA_KEY_VALID, &wr->ib_sg[DATA]); |
2790 | if (ret) | 2763 | if (ret) |
2791 | goto unmap_cmd; | 2764 | goto unmap_cmd; |
2792 | 2765 | ||
2793 | if (se_cmd->prot_op != TARGET_PROT_NORMAL) { | 2766 | if (isert_prot_cmd(isert_conn, se_cmd)) { |
2794 | struct ib_sge prot_sge, sig_sge; | 2767 | ret = isert_handle_prot_cmd(isert_conn, isert_cmd, wr); |
2795 | |||
2796 | if (se_cmd->t_prot_sg) { | ||
2797 | ret = isert_map_data_buf(isert_conn, isert_cmd, | ||
2798 | se_cmd->t_prot_sg, | ||
2799 | se_cmd->t_prot_nents, | ||
2800 | se_cmd->prot_length, | ||
2801 | 0, wr->iser_ib_op, &wr->prot); | ||
2802 | if (ret) | ||
2803 | goto unmap_cmd; | ||
2804 | |||
2805 | ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->prot, | ||
2806 | ISERT_PROT_KEY_VALID, &prot_sge); | ||
2807 | if (ret) | ||
2808 | goto unmap_prot_cmd; | ||
2809 | } | ||
2810 | |||
2811 | ret = isert_reg_sig_mr(isert_conn, se_cmd, fr_desc, | ||
2812 | &data_sge, &prot_sge, &sig_sge); | ||
2813 | if (ret) | 2768 | if (ret) |
2814 | goto unmap_prot_cmd; | 2769 | goto unmap_cmd; |
2815 | 2770 | ||
2816 | fr_desc->ind |= ISERT_PROTECTED; | 2771 | ib_sg = &wr->ib_sg[SIG]; |
2817 | memcpy(&wr->s_ib_sge, &sig_sge, sizeof(sig_sge)); | 2772 | } else { |
2818 | } else | 2773 | ib_sg = &wr->ib_sg[DATA]; |
2819 | memcpy(&wr->s_ib_sge, &data_sge, sizeof(data_sge)); | 2774 | } |
2820 | 2775 | ||
2776 | memcpy(&wr->s_ib_sge, ib_sg, sizeof(*ib_sg)); | ||
2821 | wr->ib_sge = &wr->s_ib_sge; | 2777 | wr->ib_sge = &wr->s_ib_sge; |
2822 | wr->send_wr_num = 1; | 2778 | wr->send_wr_num = 1; |
2823 | memset(&wr->s_send_wr, 0, sizeof(*send_wr)); | 2779 | memset(&wr->s_send_wr, 0, sizeof(*send_wr)); |
@@ -2827,12 +2783,12 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | |||
2827 | send_wr = &isert_cmd->rdma_wr.s_send_wr; | 2783 | send_wr = &isert_cmd->rdma_wr.s_send_wr; |
2828 | send_wr->sg_list = &wr->s_ib_sge; | 2784 | send_wr->sg_list = &wr->s_ib_sge; |
2829 | send_wr->num_sge = 1; | 2785 | send_wr->num_sge = 1; |
2830 | send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc; | 2786 | send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc; |
2831 | if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { | 2787 | if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { |
2832 | send_wr->opcode = IB_WR_RDMA_WRITE; | 2788 | send_wr->opcode = IB_WR_RDMA_WRITE; |
2833 | send_wr->wr.rdma.remote_addr = isert_cmd->read_va; | 2789 | send_wr->wr.rdma.remote_addr = isert_cmd->read_va; |
2834 | send_wr->wr.rdma.rkey = isert_cmd->read_stag; | 2790 | send_wr->wr.rdma.rkey = isert_cmd->read_stag; |
2835 | send_wr->send_flags = se_cmd->prot_op == TARGET_PROT_NORMAL ? | 2791 | send_wr->send_flags = !isert_prot_cmd(isert_conn, se_cmd) ? |
2836 | 0 : IB_SEND_SIGNALED; | 2792 | 0 : IB_SEND_SIGNALED; |
2837 | } else { | 2793 | } else { |
2838 | send_wr->opcode = IB_WR_RDMA_READ; | 2794 | send_wr->opcode = IB_WR_RDMA_READ; |
@@ -2842,9 +2798,7 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | |||
2842 | } | 2798 | } |
2843 | 2799 | ||
2844 | return 0; | 2800 | return 0; |
2845 | unmap_prot_cmd: | 2801 | |
2846 | if (se_cmd->t_prot_sg) | ||
2847 | isert_unmap_data_buf(isert_conn, &wr->prot); | ||
2848 | unmap_cmd: | 2802 | unmap_cmd: |
2849 | if (fr_desc) { | 2803 | if (fr_desc) { |
2850 | spin_lock_irqsave(&isert_conn->conn_lock, flags); | 2804 | spin_lock_irqsave(&isert_conn->conn_lock, flags); |
@@ -2867,16 +2821,17 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd) | |||
2867 | struct ib_send_wr *wr_failed; | 2821 | struct ib_send_wr *wr_failed; |
2868 | int rc; | 2822 | int rc; |
2869 | 2823 | ||
2870 | pr_debug("Cmd: %p RDMA_WRITE data_length: %u\n", | 2824 | isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n", |
2871 | isert_cmd, se_cmd->data_length); | 2825 | isert_cmd, se_cmd->data_length); |
2826 | |||
2872 | wr->iser_ib_op = ISER_IB_RDMA_WRITE; | 2827 | wr->iser_ib_op = ISER_IB_RDMA_WRITE; |
2873 | rc = device->reg_rdma_mem(conn, cmd, wr); | 2828 | rc = device->reg_rdma_mem(conn, cmd, wr); |
2874 | if (rc) { | 2829 | if (rc) { |
2875 | pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd); | 2830 | isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd); |
2876 | return rc; | 2831 | return rc; |
2877 | } | 2832 | } |
2878 | 2833 | ||
2879 | if (se_cmd->prot_op == TARGET_PROT_NORMAL) { | 2834 | if (!isert_prot_cmd(isert_conn, se_cmd)) { |
2880 | /* | 2835 | /* |
2881 | * Build isert_conn->tx_desc for iSCSI response PDU and attach | 2836 | * Build isert_conn->tx_desc for iSCSI response PDU and attach |
2882 | */ | 2837 | */ |
@@ -2886,24 +2841,20 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd) | |||
2886 | &isert_cmd->tx_desc.iscsi_header); | 2841 | &isert_cmd->tx_desc.iscsi_header); |
2887 | isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); | 2842 | isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); |
2888 | isert_init_send_wr(isert_conn, isert_cmd, | 2843 | isert_init_send_wr(isert_conn, isert_cmd, |
2889 | &isert_cmd->tx_desc.send_wr, false); | 2844 | &isert_cmd->tx_desc.send_wr); |
2890 | isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr; | 2845 | isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr; |
2891 | wr->send_wr_num += 1; | 2846 | wr->send_wr_num += 1; |
2892 | } | 2847 | } |
2893 | 2848 | ||
2894 | atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count); | ||
2895 | |||
2896 | rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); | 2849 | rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); |
2897 | if (rc) { | 2850 | if (rc) |
2898 | pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n"); | 2851 | isert_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n"); |
2899 | atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count); | ||
2900 | } | ||
2901 | 2852 | ||
2902 | if (se_cmd->prot_op == TARGET_PROT_NORMAL) | 2853 | if (!isert_prot_cmd(isert_conn, se_cmd)) |
2903 | pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data " | 2854 | isert_dbg("Cmd: %p posted RDMA_WRITE + Response for iSER Data " |
2904 | "READ\n", isert_cmd); | 2855 | "READ\n", isert_cmd); |
2905 | else | 2856 | else |
2906 | pr_debug("Cmd: %p posted RDMA_WRITE for iSER Data READ\n", | 2857 | isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ\n", |
2907 | isert_cmd); | 2858 | isert_cmd); |
2908 | 2859 | ||
2909 | return 1; | 2860 | return 1; |
@@ -2920,23 +2871,20 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery) | |||
2920 | struct ib_send_wr *wr_failed; | 2871 | struct ib_send_wr *wr_failed; |
2921 | int rc; | 2872 | int rc; |
2922 | 2873 | ||
2923 | pr_debug("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n", | 2874 | isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n", |
2924 | isert_cmd, se_cmd->data_length, cmd->write_data_done); | 2875 | isert_cmd, se_cmd->data_length, cmd->write_data_done); |
2925 | wr->iser_ib_op = ISER_IB_RDMA_READ; | 2876 | wr->iser_ib_op = ISER_IB_RDMA_READ; |
2926 | rc = device->reg_rdma_mem(conn, cmd, wr); | 2877 | rc = device->reg_rdma_mem(conn, cmd, wr); |
2927 | if (rc) { | 2878 | if (rc) { |
2928 | pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd); | 2879 | isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd); |
2929 | return rc; | 2880 | return rc; |
2930 | } | 2881 | } |
2931 | 2882 | ||
2932 | atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count); | ||
2933 | |||
2934 | rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); | 2883 | rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); |
2935 | if (rc) { | 2884 | if (rc) |
2936 | pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n"); | 2885 | isert_warn("ib_post_send() failed for IB_WR_RDMA_READ\n"); |
2937 | atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count); | 2886 | |
2938 | } | 2887 | isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n", |
2939 | pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n", | ||
2940 | isert_cmd); | 2888 | isert_cmd); |
2941 | 2889 | ||
2942 | return 0; | 2890 | return 0; |
@@ -2952,7 +2900,7 @@ isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) | |||
2952 | ret = isert_put_nopin(cmd, conn, false); | 2900 | ret = isert_put_nopin(cmd, conn, false); |
2953 | break; | 2901 | break; |
2954 | default: | 2902 | default: |
2955 | pr_err("Unknown immediate state: 0x%02x\n", state); | 2903 | isert_err("Unknown immediate state: 0x%02x\n", state); |
2956 | ret = -EINVAL; | 2904 | ret = -EINVAL; |
2957 | break; | 2905 | break; |
2958 | } | 2906 | } |
@@ -2963,15 +2911,14 @@ isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) | |||
2963 | static int | 2911 | static int |
2964 | isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) | 2912 | isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) |
2965 | { | 2913 | { |
2914 | struct isert_conn *isert_conn = conn->context; | ||
2966 | int ret; | 2915 | int ret; |
2967 | 2916 | ||
2968 | switch (state) { | 2917 | switch (state) { |
2969 | case ISTATE_SEND_LOGOUTRSP: | 2918 | case ISTATE_SEND_LOGOUTRSP: |
2970 | ret = isert_put_logout_rsp(cmd, conn); | 2919 | ret = isert_put_logout_rsp(cmd, conn); |
2971 | if (!ret) { | 2920 | if (!ret) |
2972 | pr_debug("Returning iSER Logout -EAGAIN\n"); | 2921 | isert_conn->logout_posted = true; |
2973 | ret = -EAGAIN; | ||
2974 | } | ||
2975 | break; | 2922 | break; |
2976 | case ISTATE_SEND_NOPIN: | 2923 | case ISTATE_SEND_NOPIN: |
2977 | ret = isert_put_nopin(cmd, conn, true); | 2924 | ret = isert_put_nopin(cmd, conn, true); |
@@ -2993,7 +2940,7 @@ isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) | |||
2993 | ret = isert_put_response(conn, cmd); | 2940 | ret = isert_put_response(conn, cmd); |
2994 | break; | 2941 | break; |
2995 | default: | 2942 | default: |
2996 | pr_err("Unknown response state: 0x%02x\n", state); | 2943 | isert_err("Unknown response state: 0x%02x\n", state); |
2997 | ret = -EINVAL; | 2944 | ret = -EINVAL; |
2998 | break; | 2945 | break; |
2999 | } | 2946 | } |
@@ -3001,27 +2948,64 @@ isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) | |||
3001 | return ret; | 2948 | return ret; |
3002 | } | 2949 | } |
3003 | 2950 | ||
2951 | struct rdma_cm_id * | ||
2952 | isert_setup_id(struct isert_np *isert_np) | ||
2953 | { | ||
2954 | struct iscsi_np *np = isert_np->np; | ||
2955 | struct rdma_cm_id *id; | ||
2956 | struct sockaddr *sa; | ||
2957 | int ret; | ||
2958 | |||
2959 | sa = (struct sockaddr *)&np->np_sockaddr; | ||
2960 | isert_dbg("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa); | ||
2961 | |||
2962 | id = rdma_create_id(isert_cma_handler, isert_np, | ||
2963 | RDMA_PS_TCP, IB_QPT_RC); | ||
2964 | if (IS_ERR(id)) { | ||
2965 | isert_err("rdma_create_id() failed: %ld\n", PTR_ERR(id)); | ||
2966 | ret = PTR_ERR(id); | ||
2967 | goto out; | ||
2968 | } | ||
2969 | isert_dbg("id %p context %p\n", id, id->context); | ||
2970 | |||
2971 | ret = rdma_bind_addr(id, sa); | ||
2972 | if (ret) { | ||
2973 | isert_err("rdma_bind_addr() failed: %d\n", ret); | ||
2974 | goto out_id; | ||
2975 | } | ||
2976 | |||
2977 | ret = rdma_listen(id, ISERT_RDMA_LISTEN_BACKLOG); | ||
2978 | if (ret) { | ||
2979 | isert_err("rdma_listen() failed: %d\n", ret); | ||
2980 | goto out_id; | ||
2981 | } | ||
2982 | |||
2983 | return id; | ||
2984 | out_id: | ||
2985 | rdma_destroy_id(id); | ||
2986 | out: | ||
2987 | return ERR_PTR(ret); | ||
2988 | } | ||
2989 | |||
3004 | static int | 2990 | static int |
3005 | isert_setup_np(struct iscsi_np *np, | 2991 | isert_setup_np(struct iscsi_np *np, |
3006 | struct __kernel_sockaddr_storage *ksockaddr) | 2992 | struct __kernel_sockaddr_storage *ksockaddr) |
3007 | { | 2993 | { |
3008 | struct isert_np *isert_np; | 2994 | struct isert_np *isert_np; |
3009 | struct rdma_cm_id *isert_lid; | 2995 | struct rdma_cm_id *isert_lid; |
3010 | struct sockaddr *sa; | ||
3011 | int ret; | 2996 | int ret; |
3012 | 2997 | ||
3013 | isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL); | 2998 | isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL); |
3014 | if (!isert_np) { | 2999 | if (!isert_np) { |
3015 | pr_err("Unable to allocate struct isert_np\n"); | 3000 | isert_err("Unable to allocate struct isert_np\n"); |
3016 | return -ENOMEM; | 3001 | return -ENOMEM; |
3017 | } | 3002 | } |
3018 | sema_init(&isert_np->np_sem, 0); | 3003 | sema_init(&isert_np->np_sem, 0); |
3019 | mutex_init(&isert_np->np_accept_mutex); | 3004 | mutex_init(&isert_np->np_accept_mutex); |
3020 | INIT_LIST_HEAD(&isert_np->np_accept_list); | 3005 | INIT_LIST_HEAD(&isert_np->np_accept_list); |
3021 | init_completion(&isert_np->np_login_comp); | 3006 | init_completion(&isert_np->np_login_comp); |
3007 | isert_np->np = np; | ||
3022 | 3008 | ||
3023 | sa = (struct sockaddr *)ksockaddr; | ||
3024 | pr_debug("ksockaddr: %p, sa: %p\n", ksockaddr, sa); | ||
3025 | /* | 3009 | /* |
3026 | * Setup the np->np_sockaddr from the passed sockaddr setup | 3010 | * Setup the np->np_sockaddr from the passed sockaddr setup |
3027 | * in iscsi_target_configfs.c code.. | 3011 | * in iscsi_target_configfs.c code.. |
@@ -3029,37 +3013,20 @@ isert_setup_np(struct iscsi_np *np, | |||
3029 | memcpy(&np->np_sockaddr, ksockaddr, | 3013 | memcpy(&np->np_sockaddr, ksockaddr, |
3030 | sizeof(struct __kernel_sockaddr_storage)); | 3014 | sizeof(struct __kernel_sockaddr_storage)); |
3031 | 3015 | ||
3032 | isert_lid = rdma_create_id(isert_cma_handler, np, RDMA_PS_TCP, | 3016 | isert_lid = isert_setup_id(isert_np); |
3033 | IB_QPT_RC); | ||
3034 | if (IS_ERR(isert_lid)) { | 3017 | if (IS_ERR(isert_lid)) { |
3035 | pr_err("rdma_create_id() for isert_listen_handler failed: %ld\n", | ||
3036 | PTR_ERR(isert_lid)); | ||
3037 | ret = PTR_ERR(isert_lid); | 3018 | ret = PTR_ERR(isert_lid); |
3038 | goto out; | 3019 | goto out; |
3039 | } | 3020 | } |
3040 | 3021 | ||
3041 | ret = rdma_bind_addr(isert_lid, sa); | ||
3042 | if (ret) { | ||
3043 | pr_err("rdma_bind_addr() for isert_lid failed: %d\n", ret); | ||
3044 | goto out_lid; | ||
3045 | } | ||
3046 | |||
3047 | ret = rdma_listen(isert_lid, ISERT_RDMA_LISTEN_BACKLOG); | ||
3048 | if (ret) { | ||
3049 | pr_err("rdma_listen() for isert_lid failed: %d\n", ret); | ||
3050 | goto out_lid; | ||
3051 | } | ||
3052 | |||
3053 | isert_np->np_cm_id = isert_lid; | 3022 | isert_np->np_cm_id = isert_lid; |
3054 | np->np_context = isert_np; | 3023 | np->np_context = isert_np; |
3055 | pr_debug("Setup isert_lid->context: %p\n", isert_lid->context); | ||
3056 | 3024 | ||
3057 | return 0; | 3025 | return 0; |
3058 | 3026 | ||
3059 | out_lid: | ||
3060 | rdma_destroy_id(isert_lid); | ||
3061 | out: | 3027 | out: |
3062 | kfree(isert_np); | 3028 | kfree(isert_np); |
3029 | |||
3063 | return ret; | 3030 | return ret; |
3064 | } | 3031 | } |
3065 | 3032 | ||
@@ -3075,16 +3042,12 @@ isert_rdma_accept(struct isert_conn *isert_conn) | |||
3075 | cp.retry_count = 7; | 3042 | cp.retry_count = 7; |
3076 | cp.rnr_retry_count = 7; | 3043 | cp.rnr_retry_count = 7; |
3077 | 3044 | ||
3078 | pr_debug("Before rdma_accept >>>>>>>>>>>>>>>>>>>>.\n"); | ||
3079 | |||
3080 | ret = rdma_accept(cm_id, &cp); | 3045 | ret = rdma_accept(cm_id, &cp); |
3081 | if (ret) { | 3046 | if (ret) { |
3082 | pr_err("rdma_accept() failed with: %d\n", ret); | 3047 | isert_err("rdma_accept() failed with: %d\n", ret); |
3083 | return ret; | 3048 | return ret; |
3084 | } | 3049 | } |
3085 | 3050 | ||
3086 | pr_debug("After rdma_accept >>>>>>>>>>>>>>>>>>>>>.\n"); | ||
3087 | |||
3088 | return 0; | 3051 | return 0; |
3089 | } | 3052 | } |
3090 | 3053 | ||
@@ -3094,7 +3057,15 @@ isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login) | |||
3094 | struct isert_conn *isert_conn = (struct isert_conn *)conn->context; | 3057 | struct isert_conn *isert_conn = (struct isert_conn *)conn->context; |
3095 | int ret; | 3058 | int ret; |
3096 | 3059 | ||
3097 | pr_debug("isert_get_login_rx before conn_login_comp conn: %p\n", conn); | 3060 | isert_info("before login_req comp conn: %p\n", isert_conn); |
3061 | ret = wait_for_completion_interruptible(&isert_conn->login_req_comp); | ||
3062 | if (ret) { | ||
3063 | isert_err("isert_conn %p interrupted before got login req\n", | ||
3064 | isert_conn); | ||
3065 | return ret; | ||
3066 | } | ||
3067 | reinit_completion(&isert_conn->login_req_comp); | ||
3068 | |||
3098 | /* | 3069 | /* |
3099 | * For login requests after the first PDU, isert_rx_login_req() will | 3070 | * For login requests after the first PDU, isert_rx_login_req() will |
3100 | * kick schedule_delayed_work(&conn->login_work) as the packet is | 3071 | * kick schedule_delayed_work(&conn->login_work) as the packet is |
@@ -3104,11 +3075,15 @@ isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login) | |||
3104 | if (!login->first_request) | 3075 | if (!login->first_request) |
3105 | return 0; | 3076 | return 0; |
3106 | 3077 | ||
3078 | isert_rx_login_req(isert_conn); | ||
3079 | |||
3080 | isert_info("before conn_login_comp conn: %p\n", conn); | ||
3107 | ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp); | 3081 | ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp); |
3108 | if (ret) | 3082 | if (ret) |
3109 | return ret; | 3083 | return ret; |
3110 | 3084 | ||
3111 | pr_debug("isert_get_login_rx processing login->req: %p\n", login->req); | 3085 | isert_info("processing login->req: %p\n", login->req); |
3086 | |||
3112 | return 0; | 3087 | return 0; |
3113 | } | 3088 | } |
3114 | 3089 | ||
@@ -3161,7 +3136,7 @@ accept_wait: | |||
3161 | spin_lock_bh(&np->np_thread_lock); | 3136 | spin_lock_bh(&np->np_thread_lock); |
3162 | if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) { | 3137 | if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) { |
3163 | spin_unlock_bh(&np->np_thread_lock); | 3138 | spin_unlock_bh(&np->np_thread_lock); |
3164 | pr_debug("np_thread_state %d for isert_accept_np\n", | 3139 | isert_dbg("np_thread_state %d for isert_accept_np\n", |
3165 | np->np_thread_state); | 3140 | np->np_thread_state); |
3166 | /** | 3141 | /** |
3167 | * No point in stalling here when np_thread | 3142 | * No point in stalling here when np_thread |
@@ -3186,17 +3161,10 @@ accept_wait: | |||
3186 | isert_conn->conn = conn; | 3161 | isert_conn->conn = conn; |
3187 | max_accept = 0; | 3162 | max_accept = 0; |
3188 | 3163 | ||
3189 | ret = isert_rdma_post_recvl(isert_conn); | ||
3190 | if (ret) | ||
3191 | return ret; | ||
3192 | |||
3193 | ret = isert_rdma_accept(isert_conn); | ||
3194 | if (ret) | ||
3195 | return ret; | ||
3196 | |||
3197 | isert_set_conn_info(np, conn, isert_conn); | 3164 | isert_set_conn_info(np, conn, isert_conn); |
3198 | 3165 | ||
3199 | pr_debug("Processing isert_accept_np: isert_conn: %p\n", isert_conn); | 3166 | isert_dbg("Processing isert_conn: %p\n", isert_conn); |
3167 | |||
3200 | return 0; | 3168 | return 0; |
3201 | } | 3169 | } |
3202 | 3170 | ||
@@ -3204,25 +3172,103 @@ static void | |||
3204 | isert_free_np(struct iscsi_np *np) | 3172 | isert_free_np(struct iscsi_np *np) |
3205 | { | 3173 | { |
3206 | struct isert_np *isert_np = (struct isert_np *)np->np_context; | 3174 | struct isert_np *isert_np = (struct isert_np *)np->np_context; |
3175 | struct isert_conn *isert_conn, *n; | ||
3207 | 3176 | ||
3208 | if (isert_np->np_cm_id) | 3177 | if (isert_np->np_cm_id) |
3209 | rdma_destroy_id(isert_np->np_cm_id); | 3178 | rdma_destroy_id(isert_np->np_cm_id); |
3210 | 3179 | ||
3180 | /* | ||
3181 | * FIXME: At this point we don't have a good way to insure | ||
3182 | * that at this point we don't have hanging connections that | ||
3183 | * completed RDMA establishment but didn't start iscsi login | ||
3184 | * process. So work-around this by cleaning up what ever piled | ||
3185 | * up in np_accept_list. | ||
3186 | */ | ||
3187 | mutex_lock(&isert_np->np_accept_mutex); | ||
3188 | if (!list_empty(&isert_np->np_accept_list)) { | ||
3189 | isert_info("Still have isert connections, cleaning up...\n"); | ||
3190 | list_for_each_entry_safe(isert_conn, n, | ||
3191 | &isert_np->np_accept_list, | ||
3192 | conn_accept_node) { | ||
3193 | isert_info("cleaning isert_conn %p state (%d)\n", | ||
3194 | isert_conn, isert_conn->state); | ||
3195 | isert_connect_release(isert_conn); | ||
3196 | } | ||
3197 | } | ||
3198 | mutex_unlock(&isert_np->np_accept_mutex); | ||
3199 | |||
3211 | np->np_context = NULL; | 3200 | np->np_context = NULL; |
3212 | kfree(isert_np); | 3201 | kfree(isert_np); |
3213 | } | 3202 | } |
3214 | 3203 | ||
3204 | static void isert_release_work(struct work_struct *work) | ||
3205 | { | ||
3206 | struct isert_conn *isert_conn = container_of(work, | ||
3207 | struct isert_conn, | ||
3208 | release_work); | ||
3209 | |||
3210 | isert_info("Starting release conn %p\n", isert_conn); | ||
3211 | |||
3212 | wait_for_completion(&isert_conn->conn_wait); | ||
3213 | |||
3214 | mutex_lock(&isert_conn->conn_mutex); | ||
3215 | isert_conn->state = ISER_CONN_DOWN; | ||
3216 | mutex_unlock(&isert_conn->conn_mutex); | ||
3217 | |||
3218 | isert_info("Destroying conn %p\n", isert_conn); | ||
3219 | isert_put_conn(isert_conn); | ||
3220 | } | ||
3221 | |||
3222 | static void | ||
3223 | isert_wait4logout(struct isert_conn *isert_conn) | ||
3224 | { | ||
3225 | struct iscsi_conn *conn = isert_conn->conn; | ||
3226 | |||
3227 | isert_info("conn %p\n", isert_conn); | ||
3228 | |||
3229 | if (isert_conn->logout_posted) { | ||
3230 | isert_info("conn %p wait for conn_logout_comp\n", isert_conn); | ||
3231 | wait_for_completion_timeout(&conn->conn_logout_comp, | ||
3232 | SECONDS_FOR_LOGOUT_COMP * HZ); | ||
3233 | } | ||
3234 | } | ||
3235 | |||
3236 | static void | ||
3237 | isert_wait4cmds(struct iscsi_conn *conn) | ||
3238 | { | ||
3239 | isert_info("iscsi_conn %p\n", conn); | ||
3240 | |||
3241 | if (conn->sess) { | ||
3242 | target_sess_cmd_list_set_waiting(conn->sess->se_sess); | ||
3243 | target_wait_for_sess_cmds(conn->sess->se_sess); | ||
3244 | } | ||
3245 | } | ||
3246 | |||
3247 | static void | ||
3248 | isert_wait4flush(struct isert_conn *isert_conn) | ||
3249 | { | ||
3250 | struct ib_recv_wr *bad_wr; | ||
3251 | |||
3252 | isert_info("conn %p\n", isert_conn); | ||
3253 | |||
3254 | init_completion(&isert_conn->conn_wait_comp_err); | ||
3255 | isert_conn->beacon.wr_id = ISER_BEACON_WRID; | ||
3256 | /* post an indication that all flush errors were consumed */ | ||
3257 | if (ib_post_recv(isert_conn->conn_qp, &isert_conn->beacon, &bad_wr)) { | ||
3258 | isert_err("conn %p failed to post beacon", isert_conn); | ||
3259 | return; | ||
3260 | } | ||
3261 | |||
3262 | wait_for_completion(&isert_conn->conn_wait_comp_err); | ||
3263 | } | ||
3264 | |||
3215 | static void isert_wait_conn(struct iscsi_conn *conn) | 3265 | static void isert_wait_conn(struct iscsi_conn *conn) |
3216 | { | 3266 | { |
3217 | struct isert_conn *isert_conn = conn->context; | 3267 | struct isert_conn *isert_conn = conn->context; |
3218 | 3268 | ||
3219 | pr_debug("isert_wait_conn: Starting \n"); | 3269 | isert_info("Starting conn %p\n", isert_conn); |
3220 | 3270 | ||
3221 | mutex_lock(&isert_conn->conn_mutex); | 3271 | mutex_lock(&isert_conn->conn_mutex); |
3222 | if (isert_conn->conn_cm_id && !isert_conn->disconnect) { | ||
3223 | pr_debug("Calling rdma_disconnect from isert_wait_conn\n"); | ||
3224 | rdma_disconnect(isert_conn->conn_cm_id); | ||
3225 | } | ||
3226 | /* | 3272 | /* |
3227 | * Only wait for conn_wait_comp_err if the isert_conn made it | 3273 | * Only wait for conn_wait_comp_err if the isert_conn made it |
3228 | * into full feature phase.. | 3274 | * into full feature phase.. |
@@ -3231,14 +3277,15 @@ static void isert_wait_conn(struct iscsi_conn *conn) | |||
3231 | mutex_unlock(&isert_conn->conn_mutex); | 3277 | mutex_unlock(&isert_conn->conn_mutex); |
3232 | return; | 3278 | return; |
3233 | } | 3279 | } |
3234 | if (isert_conn->state == ISER_CONN_UP) | 3280 | isert_conn_terminate(isert_conn); |
3235 | isert_conn->state = ISER_CONN_TERMINATING; | ||
3236 | mutex_unlock(&isert_conn->conn_mutex); | 3281 | mutex_unlock(&isert_conn->conn_mutex); |
3237 | 3282 | ||
3238 | wait_for_completion(&isert_conn->conn_wait_comp_err); | 3283 | isert_wait4cmds(conn); |
3284 | isert_wait4flush(isert_conn); | ||
3285 | isert_wait4logout(isert_conn); | ||
3239 | 3286 | ||
3240 | wait_for_completion(&isert_conn->conn_wait); | 3287 | INIT_WORK(&isert_conn->release_work, isert_release_work); |
3241 | isert_put_conn(isert_conn); | 3288 | queue_work(isert_release_wq, &isert_conn->release_work); |
3242 | } | 3289 | } |
3243 | 3290 | ||
3244 | static void isert_free_conn(struct iscsi_conn *conn) | 3291 | static void isert_free_conn(struct iscsi_conn *conn) |
@@ -3273,35 +3320,39 @@ static int __init isert_init(void) | |||
3273 | { | 3320 | { |
3274 | int ret; | 3321 | int ret; |
3275 | 3322 | ||
3276 | isert_rx_wq = alloc_workqueue("isert_rx_wq", 0, 0); | 3323 | isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0); |
3277 | if (!isert_rx_wq) { | 3324 | if (!isert_comp_wq) { |
3278 | pr_err("Unable to allocate isert_rx_wq\n"); | 3325 | isert_err("Unable to allocate isert_comp_wq\n"); |
3326 | ret = -ENOMEM; | ||
3279 | return -ENOMEM; | 3327 | return -ENOMEM; |
3280 | } | 3328 | } |
3281 | 3329 | ||
3282 | isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0); | 3330 | isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND, |
3283 | if (!isert_comp_wq) { | 3331 | WQ_UNBOUND_MAX_ACTIVE); |
3284 | pr_err("Unable to allocate isert_comp_wq\n"); | 3332 | if (!isert_release_wq) { |
3333 | isert_err("Unable to allocate isert_release_wq\n"); | ||
3285 | ret = -ENOMEM; | 3334 | ret = -ENOMEM; |
3286 | goto destroy_rx_wq; | 3335 | goto destroy_comp_wq; |
3287 | } | 3336 | } |
3288 | 3337 | ||
3289 | iscsit_register_transport(&iser_target_transport); | 3338 | iscsit_register_transport(&iser_target_transport); |
3290 | pr_debug("iSER_TARGET[0] - Loaded iser_target_transport\n"); | 3339 | isert_info("iSER_TARGET[0] - Loaded iser_target_transport\n"); |
3340 | |||
3291 | return 0; | 3341 | return 0; |
3292 | 3342 | ||
3293 | destroy_rx_wq: | 3343 | destroy_comp_wq: |
3294 | destroy_workqueue(isert_rx_wq); | 3344 | destroy_workqueue(isert_comp_wq); |
3345 | |||
3295 | return ret; | 3346 | return ret; |
3296 | } | 3347 | } |
3297 | 3348 | ||
3298 | static void __exit isert_exit(void) | 3349 | static void __exit isert_exit(void) |
3299 | { | 3350 | { |
3300 | flush_scheduled_work(); | 3351 | flush_scheduled_work(); |
3352 | destroy_workqueue(isert_release_wq); | ||
3301 | destroy_workqueue(isert_comp_wq); | 3353 | destroy_workqueue(isert_comp_wq); |
3302 | destroy_workqueue(isert_rx_wq); | ||
3303 | iscsit_unregister_transport(&iser_target_transport); | 3354 | iscsit_unregister_transport(&iser_target_transport); |
3304 | pr_debug("iSER_TARGET[0] - Released iser_target_transport\n"); | 3355 | isert_info("iSER_TARGET[0] - Released iser_target_transport\n"); |
3305 | } | 3356 | } |
3306 | 3357 | ||
3307 | MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure"); | 3358 | MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure"); |
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index 04f51f7bf614..8dc8415d152d 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h | |||
@@ -4,9 +4,37 @@ | |||
4 | #include <rdma/ib_verbs.h> | 4 | #include <rdma/ib_verbs.h> |
5 | #include <rdma/rdma_cm.h> | 5 | #include <rdma/rdma_cm.h> |
6 | 6 | ||
7 | #define DRV_NAME "isert" | ||
8 | #define PFX DRV_NAME ": " | ||
9 | |||
10 | #define isert_dbg(fmt, arg...) \ | ||
11 | do { \ | ||
12 | if (unlikely(isert_debug_level > 2)) \ | ||
13 | printk(KERN_DEBUG PFX "%s: " fmt,\ | ||
14 | __func__ , ## arg); \ | ||
15 | } while (0) | ||
16 | |||
17 | #define isert_warn(fmt, arg...) \ | ||
18 | do { \ | ||
19 | if (unlikely(isert_debug_level > 0)) \ | ||
20 | pr_warn(PFX "%s: " fmt, \ | ||
21 | __func__ , ## arg); \ | ||
22 | } while (0) | ||
23 | |||
24 | #define isert_info(fmt, arg...) \ | ||
25 | do { \ | ||
26 | if (unlikely(isert_debug_level > 1)) \ | ||
27 | pr_info(PFX "%s: " fmt, \ | ||
28 | __func__ , ## arg); \ | ||
29 | } while (0) | ||
30 | |||
31 | #define isert_err(fmt, arg...) \ | ||
32 | pr_err(PFX "%s: " fmt, __func__ , ## arg) | ||
33 | |||
7 | #define ISERT_RDMA_LISTEN_BACKLOG 10 | 34 | #define ISERT_RDMA_LISTEN_BACKLOG 10 |
8 | #define ISCSI_ISER_SG_TABLESIZE 256 | 35 | #define ISCSI_ISER_SG_TABLESIZE 256 |
9 | #define ISER_FASTREG_LI_WRID 0xffffffffffffffffULL | 36 | #define ISER_FASTREG_LI_WRID 0xffffffffffffffffULL |
37 | #define ISER_BEACON_WRID 0xfffffffffffffffeULL | ||
10 | 38 | ||
11 | enum isert_desc_type { | 39 | enum isert_desc_type { |
12 | ISCSI_TX_CONTROL, | 40 | ISCSI_TX_CONTROL, |
@@ -23,6 +51,7 @@ enum iser_ib_op_code { | |||
23 | enum iser_conn_state { | 51 | enum iser_conn_state { |
24 | ISER_CONN_INIT, | 52 | ISER_CONN_INIT, |
25 | ISER_CONN_UP, | 53 | ISER_CONN_UP, |
54 | ISER_CONN_FULL_FEATURE, | ||
26 | ISER_CONN_TERMINATING, | 55 | ISER_CONN_TERMINATING, |
27 | ISER_CONN_DOWN, | 56 | ISER_CONN_DOWN, |
28 | }; | 57 | }; |
@@ -44,9 +73,6 @@ struct iser_tx_desc { | |||
44 | struct ib_sge tx_sg[2]; | 73 | struct ib_sge tx_sg[2]; |
45 | int num_sge; | 74 | int num_sge; |
46 | struct isert_cmd *isert_cmd; | 75 | struct isert_cmd *isert_cmd; |
47 | struct llist_node *comp_llnode_batch; | ||
48 | struct llist_node comp_llnode; | ||
49 | bool llnode_active; | ||
50 | struct ib_send_wr send_wr; | 76 | struct ib_send_wr send_wr; |
51 | } __packed; | 77 | } __packed; |
52 | 78 | ||
@@ -81,6 +107,12 @@ struct isert_data_buf { | |||
81 | enum dma_data_direction dma_dir; | 107 | enum dma_data_direction dma_dir; |
82 | }; | 108 | }; |
83 | 109 | ||
110 | enum { | ||
111 | DATA = 0, | ||
112 | PROT = 1, | ||
113 | SIG = 2, | ||
114 | }; | ||
115 | |||
84 | struct isert_rdma_wr { | 116 | struct isert_rdma_wr { |
85 | struct list_head wr_list; | 117 | struct list_head wr_list; |
86 | struct isert_cmd *isert_cmd; | 118 | struct isert_cmd *isert_cmd; |
@@ -90,6 +122,7 @@ struct isert_rdma_wr { | |||
90 | int send_wr_num; | 122 | int send_wr_num; |
91 | struct ib_send_wr *send_wr; | 123 | struct ib_send_wr *send_wr; |
92 | struct ib_send_wr s_send_wr; | 124 | struct ib_send_wr s_send_wr; |
125 | struct ib_sge ib_sg[3]; | ||
93 | struct isert_data_buf data; | 126 | struct isert_data_buf data; |
94 | struct isert_data_buf prot; | 127 | struct isert_data_buf prot; |
95 | struct fast_reg_descriptor *fr_desc; | 128 | struct fast_reg_descriptor *fr_desc; |
@@ -117,14 +150,15 @@ struct isert_device; | |||
117 | struct isert_conn { | 150 | struct isert_conn { |
118 | enum iser_conn_state state; | 151 | enum iser_conn_state state; |
119 | int post_recv_buf_count; | 152 | int post_recv_buf_count; |
120 | atomic_t post_send_buf_count; | ||
121 | u32 responder_resources; | 153 | u32 responder_resources; |
122 | u32 initiator_depth; | 154 | u32 initiator_depth; |
155 | bool pi_support; | ||
123 | u32 max_sge; | 156 | u32 max_sge; |
124 | char *login_buf; | 157 | char *login_buf; |
125 | char *login_req_buf; | 158 | char *login_req_buf; |
126 | char *login_rsp_buf; | 159 | char *login_rsp_buf; |
127 | u64 login_req_dma; | 160 | u64 login_req_dma; |
161 | int login_req_len; | ||
128 | u64 login_rsp_dma; | 162 | u64 login_rsp_dma; |
129 | unsigned int conn_rx_desc_head; | 163 | unsigned int conn_rx_desc_head; |
130 | struct iser_rx_desc *conn_rx_descs; | 164 | struct iser_rx_desc *conn_rx_descs; |
@@ -132,13 +166,13 @@ struct isert_conn { | |||
132 | struct iscsi_conn *conn; | 166 | struct iscsi_conn *conn; |
133 | struct list_head conn_accept_node; | 167 | struct list_head conn_accept_node; |
134 | struct completion conn_login_comp; | 168 | struct completion conn_login_comp; |
169 | struct completion login_req_comp; | ||
135 | struct iser_tx_desc conn_login_tx_desc; | 170 | struct iser_tx_desc conn_login_tx_desc; |
136 | struct rdma_cm_id *conn_cm_id; | 171 | struct rdma_cm_id *conn_cm_id; |
137 | struct ib_pd *conn_pd; | 172 | struct ib_pd *conn_pd; |
138 | struct ib_mr *conn_mr; | 173 | struct ib_mr *conn_mr; |
139 | struct ib_qp *conn_qp; | 174 | struct ib_qp *conn_qp; |
140 | struct isert_device *conn_device; | 175 | struct isert_device *conn_device; |
141 | struct work_struct conn_logout_work; | ||
142 | struct mutex conn_mutex; | 176 | struct mutex conn_mutex; |
143 | struct completion conn_wait; | 177 | struct completion conn_wait; |
144 | struct completion conn_wait_comp_err; | 178 | struct completion conn_wait_comp_err; |
@@ -147,31 +181,38 @@ struct isert_conn { | |||
147 | int conn_fr_pool_size; | 181 | int conn_fr_pool_size; |
148 | /* lock to protect fastreg pool */ | 182 | /* lock to protect fastreg pool */ |
149 | spinlock_t conn_lock; | 183 | spinlock_t conn_lock; |
150 | #define ISERT_COMP_BATCH_COUNT 8 | 184 | struct work_struct release_work; |
151 | int conn_comp_batch; | 185 | struct ib_recv_wr beacon; |
152 | struct llist_head conn_comp_llist; | 186 | bool logout_posted; |
153 | bool disconnect; | ||
154 | }; | 187 | }; |
155 | 188 | ||
156 | #define ISERT_MAX_CQ 64 | 189 | #define ISERT_MAX_CQ 64 |
157 | 190 | ||
158 | struct isert_cq_desc { | 191 | /** |
159 | struct isert_device *device; | 192 | * struct isert_comp - iSER completion context |
160 | int cq_index; | 193 | * |
161 | struct work_struct cq_rx_work; | 194 | * @device: pointer to device handle |
162 | struct work_struct cq_tx_work; | 195 | * @cq: completion queue |
196 | * @wcs: work completion array | ||
197 | * @active_qps: Number of active QPs attached | ||
198 | * to completion context | ||
199 | * @work: completion work handle | ||
200 | */ | ||
201 | struct isert_comp { | ||
202 | struct isert_device *device; | ||
203 | struct ib_cq *cq; | ||
204 | struct ib_wc wcs[16]; | ||
205 | int active_qps; | ||
206 | struct work_struct work; | ||
163 | }; | 207 | }; |
164 | 208 | ||
165 | struct isert_device { | 209 | struct isert_device { |
166 | int use_fastreg; | 210 | int use_fastreg; |
167 | bool pi_capable; | 211 | bool pi_capable; |
168 | int cqs_used; | ||
169 | int refcount; | 212 | int refcount; |
170 | int cq_active_qps[ISERT_MAX_CQ]; | ||
171 | struct ib_device *ib_device; | 213 | struct ib_device *ib_device; |
172 | struct ib_cq *dev_rx_cq[ISERT_MAX_CQ]; | 214 | struct isert_comp *comps; |
173 | struct ib_cq *dev_tx_cq[ISERT_MAX_CQ]; | 215 | int comps_used; |
174 | struct isert_cq_desc *cq_desc; | ||
175 | struct list_head dev_node; | 216 | struct list_head dev_node; |
176 | struct ib_device_attr dev_attr; | 217 | struct ib_device_attr dev_attr; |
177 | int (*reg_rdma_mem)(struct iscsi_conn *conn, | 218 | int (*reg_rdma_mem)(struct iscsi_conn *conn, |
@@ -182,6 +223,7 @@ struct isert_device { | |||
182 | }; | 223 | }; |
183 | 224 | ||
184 | struct isert_np { | 225 | struct isert_np { |
226 | struct iscsi_np *np; | ||
185 | struct semaphore np_sem; | 227 | struct semaphore np_sem; |
186 | struct rdma_cm_id *np_cm_id; | 228 | struct rdma_cm_id *np_cm_id; |
187 | struct mutex np_accept_mutex; | 229 | struct mutex np_accept_mutex; |