diff options
author | Sagi Grimberg <sagig@mellanox.com> | 2014-10-01 07:01:58 -0400 |
---|---|---|
committer | Roland Dreier <roland@purestorage.com> | 2014-10-09 03:06:06 -0400 |
commit | a4ee3539f6e2955815b93350bbce01e8915d27f8 (patch) | |
tree | db75b70228634f6222c7d168a8b7c4268e25058d | |
parent | 5716af6e5234402b2017f41beb36c086201fae42 (diff) |
IB/iser: Re-introduce ib_conn
Structure that describes the RDMA relates connection objects. Static
member of iser_conn.
This patch does not change any functionality
Signed-off-by: Sagi Grimberg <sagig@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
-rw-r--r-- | drivers/infiniband/ulp/iser/iscsi_iser.c | 18 | ||||
-rw-r--r-- | drivers/infiniband/ulp/iser/iscsi_iser.h | 85 | ||||
-rw-r--r-- | drivers/infiniband/ulp/iser/iser_initiator.c | 63 | ||||
-rw-r--r-- | drivers/infiniband/ulp/iser/iser_memory.c | 52 | ||||
-rw-r--r-- | drivers/infiniband/ulp/iser/iser_verbs.c | 231 |
5 files changed, 245 insertions, 204 deletions
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 1f3ad2b13ae2..db83530184f8 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c | |||
@@ -148,7 +148,7 @@ int iser_initialize_task_headers(struct iscsi_task *task, | |||
148 | struct iser_tx_desc *tx_desc) | 148 | struct iser_tx_desc *tx_desc) |
149 | { | 149 | { |
150 | struct iser_conn *iser_conn = task->conn->dd_data; | 150 | struct iser_conn *iser_conn = task->conn->dd_data; |
151 | struct iser_device *device = iser_conn->device; | 151 | struct iser_device *device = iser_conn->ib_conn.device; |
152 | struct iscsi_iser_task *iser_task = task->dd_data; | 152 | struct iscsi_iser_task *iser_task = task->dd_data; |
153 | u64 dma_addr; | 153 | u64 dma_addr; |
154 | 154 | ||
@@ -291,7 +291,7 @@ static void iscsi_iser_cleanup_task(struct iscsi_task *task) | |||
291 | struct iscsi_iser_task *iser_task = task->dd_data; | 291 | struct iscsi_iser_task *iser_task = task->dd_data; |
292 | struct iser_tx_desc *tx_desc = &iser_task->desc; | 292 | struct iser_tx_desc *tx_desc = &iser_task->desc; |
293 | struct iser_conn *iser_conn = task->conn->dd_data; | 293 | struct iser_conn *iser_conn = task->conn->dd_data; |
294 | struct iser_device *device = iser_conn->device; | 294 | struct iser_device *device = iser_conn->ib_conn.device; |
295 | 295 | ||
296 | ib_dma_unmap_single(device->ib_device, | 296 | ib_dma_unmap_single(device->ib_device, |
297 | tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE); | 297 | tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE); |
@@ -448,6 +448,7 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep, | |||
448 | struct iscsi_session *session; | 448 | struct iscsi_session *session; |
449 | struct Scsi_Host *shost; | 449 | struct Scsi_Host *shost; |
450 | struct iser_conn *iser_conn = NULL; | 450 | struct iser_conn *iser_conn = NULL; |
451 | struct ib_conn *ib_conn; | ||
451 | 452 | ||
452 | shost = iscsi_host_alloc(&iscsi_iser_sht, 0, 0); | 453 | shost = iscsi_host_alloc(&iscsi_iser_sht, 0, 0); |
453 | if (!shost) | 454 | if (!shost) |
@@ -465,8 +466,9 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep, | |||
465 | */ | 466 | */ |
466 | if (ep) { | 467 | if (ep) { |
467 | iser_conn = ep->dd_data; | 468 | iser_conn = ep->dd_data; |
468 | if (iser_conn->pi_support) { | 469 | ib_conn = &iser_conn->ib_conn; |
469 | u32 sig_caps = iser_conn->device->dev_attr.sig_prot_cap; | 470 | if (ib_conn->pi_support) { |
471 | u32 sig_caps = ib_conn->device->dev_attr.sig_prot_cap; | ||
470 | 472 | ||
471 | scsi_host_set_prot(shost, iser_dif_prot_caps(sig_caps)); | 473 | scsi_host_set_prot(shost, iser_dif_prot_caps(sig_caps)); |
472 | if (iser_pi_guard) | 474 | if (iser_pi_guard) |
@@ -477,7 +479,7 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep, | |||
477 | } | 479 | } |
478 | 480 | ||
479 | if (iscsi_host_add(shost, ep ? | 481 | if (iscsi_host_add(shost, ep ? |
480 | iser_conn->device->ib_device->dma_device : NULL)) | 482 | ib_conn->device->ib_device->dma_device : NULL)) |
481 | goto free_host; | 483 | goto free_host; |
482 | 484 | ||
483 | if (cmds_max > ISER_DEF_XMIT_CMDS_MAX) { | 485 | if (cmds_max > ISER_DEF_XMIT_CMDS_MAX) { |
@@ -583,12 +585,12 @@ static int iscsi_iser_get_ep_param(struct iscsi_endpoint *ep, | |||
583 | switch (param) { | 585 | switch (param) { |
584 | case ISCSI_PARAM_CONN_PORT: | 586 | case ISCSI_PARAM_CONN_PORT: |
585 | case ISCSI_PARAM_CONN_ADDRESS: | 587 | case ISCSI_PARAM_CONN_ADDRESS: |
586 | if (!iser_conn || !iser_conn->cma_id) | 588 | if (!iser_conn || !iser_conn->ib_conn.cma_id) |
587 | return -ENOTCONN; | 589 | return -ENOTCONN; |
588 | 590 | ||
589 | return iscsi_conn_get_addr_param((struct sockaddr_storage *) | 591 | return iscsi_conn_get_addr_param((struct sockaddr_storage *) |
590 | &iser_conn->cma_id->route.addr.dst_addr, | 592 | &iser_conn->ib_conn.cma_id->route.addr.dst_addr, |
591 | param, buf); | 593 | param, buf); |
592 | break; | 594 | break; |
593 | default: | 595 | default: |
594 | return -ENOSYS; | 596 | return -ENOSYS; |
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index ec34b8f7d385..4ad73c91e531 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h | |||
@@ -265,6 +265,7 @@ struct iser_rx_desc { | |||
265 | #define ISER_MAX_CQ 4 | 265 | #define ISER_MAX_CQ 4 |
266 | 266 | ||
267 | struct iser_conn; | 267 | struct iser_conn; |
268 | struct ib_conn; | ||
268 | struct iscsi_iser_task; | 269 | struct iscsi_iser_task; |
269 | 270 | ||
270 | struct iser_device { | 271 | struct iser_device { |
@@ -281,9 +282,9 @@ struct iser_device { | |||
281 | int cq_active_qps[ISER_MAX_CQ]; | 282 | int cq_active_qps[ISER_MAX_CQ]; |
282 | int cqs_used; | 283 | int cqs_used; |
283 | struct iser_cq_desc *cq_desc; | 284 | struct iser_cq_desc *cq_desc; |
284 | int (*iser_alloc_rdma_reg_res)(struct iser_conn *iser_conn, | 285 | int (*iser_alloc_rdma_reg_res)(struct ib_conn *ib_conn, |
285 | unsigned cmds_max); | 286 | unsigned cmds_max); |
286 | void (*iser_free_rdma_reg_res)(struct iser_conn *iser_conn); | 287 | void (*iser_free_rdma_reg_res)(struct ib_conn *ib_conn); |
287 | int (*iser_reg_rdma_mem)(struct iscsi_iser_task *iser_task, | 288 | int (*iser_reg_rdma_mem)(struct iscsi_iser_task *iser_task, |
288 | enum iser_data_dir cmd_dir); | 289 | enum iser_data_dir cmd_dir); |
289 | void (*iser_unreg_rdma_mem)(struct iscsi_iser_task *iser_task, | 290 | void (*iser_unreg_rdma_mem)(struct iscsi_iser_task *iser_task, |
@@ -317,20 +318,57 @@ struct fast_reg_descriptor { | |||
317 | u8 reg_indicators; | 318 | u8 reg_indicators; |
318 | }; | 319 | }; |
319 | 320 | ||
321 | /** | ||
322 | * struct ib_conn - Infiniband related objects | ||
323 | * | ||
324 | * @cma_id: rdma_cm connection maneger handle | ||
325 | * @qp: Connection Queue-pair | ||
326 | * @post_recv_buf_count: post receive counter | ||
327 | * @post_send_buf_count: post send counter | ||
328 | * @rx_wr: receive work request for batch posts | ||
329 | * @device: reference to iser device | ||
330 | * @pi_support: Indicate device T10-PI support | ||
331 | * @lock: protects fmr/fastreg pool | ||
332 | * @union.fmr: | ||
333 | * @pool: FMR pool for fast registrations | ||
334 | * @page_vec: page vector to hold mapped commands pages | ||
335 | * used for registration | ||
336 | * @union.fastreg: | ||
337 | * @pool: Fast registration descriptors pool for fast | ||
338 | * registrations | ||
339 | * @pool_size: Size of pool | ||
340 | */ | ||
341 | struct ib_conn { | ||
342 | struct rdma_cm_id *cma_id; | ||
343 | struct ib_qp *qp; | ||
344 | int post_recv_buf_count; | ||
345 | atomic_t post_send_buf_count; | ||
346 | struct ib_recv_wr rx_wr[ISER_MIN_POSTED_RX]; | ||
347 | struct iser_device *device; | ||
348 | int cq_index; | ||
349 | bool pi_support; | ||
350 | spinlock_t lock; | ||
351 | union { | ||
352 | struct { | ||
353 | struct ib_fmr_pool *pool; | ||
354 | struct iser_page_vec *page_vec; | ||
355 | } fmr; | ||
356 | struct { | ||
357 | struct list_head pool; | ||
358 | int pool_size; | ||
359 | } fastreg; | ||
360 | }; | ||
361 | }; | ||
362 | |||
320 | struct iser_conn { | 363 | struct iser_conn { |
364 | struct ib_conn ib_conn; | ||
321 | struct iscsi_conn *iscsi_conn; | 365 | struct iscsi_conn *iscsi_conn; |
322 | struct iscsi_endpoint *ep; | 366 | struct iscsi_endpoint *ep; |
323 | enum iser_conn_state state; /* rdma connection state */ | 367 | enum iser_conn_state state; /* rdma connection state */ |
324 | atomic_t refcount; | 368 | atomic_t refcount; |
325 | spinlock_t lock; /* used for state changes */ | ||
326 | struct iser_device *device; /* device context */ | ||
327 | struct rdma_cm_id *cma_id; /* CMA ID */ | ||
328 | struct ib_qp *qp; /* QP */ | ||
329 | unsigned qp_max_recv_dtos; /* num of rx buffers */ | 369 | unsigned qp_max_recv_dtos; /* num of rx buffers */ |
330 | unsigned qp_max_recv_dtos_mask; /* above minus 1 */ | 370 | unsigned qp_max_recv_dtos_mask; /* above minus 1 */ |
331 | unsigned min_posted_rx; /* qp_max_recv_dtos >> 2 */ | 371 | unsigned min_posted_rx; /* qp_max_recv_dtos >> 2 */ |
332 | int post_recv_buf_count; /* posted rx count */ | ||
333 | atomic_t post_send_buf_count; /* posted tx count */ | ||
334 | char name[ISER_OBJECT_NAME_SIZE]; | 372 | char name[ISER_OBJECT_NAME_SIZE]; |
335 | struct work_struct release_work; | 373 | struct work_struct release_work; |
336 | struct completion stop_completion; | 374 | struct completion stop_completion; |
@@ -344,21 +382,6 @@ struct iser_conn { | |||
344 | u64 login_req_dma, login_resp_dma; | 382 | u64 login_req_dma, login_resp_dma; |
345 | unsigned int rx_desc_head; | 383 | unsigned int rx_desc_head; |
346 | struct iser_rx_desc *rx_descs; | 384 | struct iser_rx_desc *rx_descs; |
347 | struct ib_recv_wr rx_wr[ISER_MIN_POSTED_RX]; | ||
348 | bool pi_support; | ||
349 | |||
350 | /* Connection memory registration pool */ | ||
351 | union { | ||
352 | struct { | ||
353 | struct ib_fmr_pool *pool; /* pool of IB FMRs */ | ||
354 | struct iser_page_vec *page_vec; /* represents SG to fmr maps* | ||
355 | * maps serialized as tx is*/ | ||
356 | } fmr; | ||
357 | struct { | ||
358 | struct list_head pool; | ||
359 | int pool_size; | ||
360 | } fastreg; | ||
361 | }; | ||
362 | }; | 385 | }; |
363 | 386 | ||
364 | struct iscsi_iser_task { | 387 | struct iscsi_iser_task { |
@@ -429,10 +452,10 @@ void iser_release_work(struct work_struct *work); | |||
429 | 452 | ||
430 | void iser_rcv_completion(struct iser_rx_desc *desc, | 453 | void iser_rcv_completion(struct iser_rx_desc *desc, |
431 | unsigned long dto_xfer_len, | 454 | unsigned long dto_xfer_len, |
432 | struct iser_conn *iser_conn); | 455 | struct ib_conn *ib_conn); |
433 | 456 | ||
434 | void iser_snd_completion(struct iser_tx_desc *desc, | 457 | void iser_snd_completion(struct iser_tx_desc *desc, |
435 | struct iser_conn *iser_conn); | 458 | struct ib_conn *ib_conn); |
436 | 459 | ||
437 | void iser_task_rdma_init(struct iscsi_iser_task *task); | 460 | void iser_task_rdma_init(struct iscsi_iser_task *task); |
438 | 461 | ||
@@ -455,7 +478,7 @@ int iser_connect(struct iser_conn *iser_conn, | |||
455 | struct sockaddr *dst_addr, | 478 | struct sockaddr *dst_addr, |
456 | int non_blocking); | 479 | int non_blocking); |
457 | 480 | ||
458 | int iser_reg_page_vec(struct iser_conn *iser_conn, | 481 | int iser_reg_page_vec(struct ib_conn *ib_conn, |
459 | struct iser_page_vec *page_vec, | 482 | struct iser_page_vec *page_vec, |
460 | struct iser_mem_reg *mem_reg); | 483 | struct iser_mem_reg *mem_reg); |
461 | 484 | ||
@@ -466,7 +489,7 @@ void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task, | |||
466 | 489 | ||
467 | int iser_post_recvl(struct iser_conn *iser_conn); | 490 | int iser_post_recvl(struct iser_conn *iser_conn); |
468 | int iser_post_recvm(struct iser_conn *iser_conn, int count); | 491 | int iser_post_recvm(struct iser_conn *iser_conn, int count); |
469 | int iser_post_send(struct iser_conn *iser_conn, struct iser_tx_desc *tx_desc); | 492 | int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc); |
470 | 493 | ||
471 | int iser_dma_map_task_data(struct iscsi_iser_task *iser_task, | 494 | int iser_dma_map_task_data(struct iscsi_iser_task *iser_task, |
472 | struct iser_data_buf *data, | 495 | struct iser_data_buf *data, |
@@ -479,10 +502,10 @@ int iser_initialize_task_headers(struct iscsi_task *task, | |||
479 | struct iser_tx_desc *tx_desc); | 502 | struct iser_tx_desc *tx_desc); |
480 | int iser_alloc_rx_descriptors(struct iser_conn *iser_conn, | 503 | int iser_alloc_rx_descriptors(struct iser_conn *iser_conn, |
481 | struct iscsi_session *session); | 504 | struct iscsi_session *session); |
482 | int iser_create_fmr_pool(struct iser_conn *iser_conn, unsigned cmds_max); | 505 | int iser_create_fmr_pool(struct ib_conn *ib_conn, unsigned cmds_max); |
483 | void iser_free_fmr_pool(struct iser_conn *iser_conn); | 506 | void iser_free_fmr_pool(struct ib_conn *ib_conn); |
484 | int iser_create_fastreg_pool(struct iser_conn *iser_conn, unsigned cmds_max); | 507 | int iser_create_fastreg_pool(struct ib_conn *ib_conn, unsigned cmds_max); |
485 | void iser_free_fastreg_pool(struct iser_conn *iser_conn); | 508 | void iser_free_fastreg_pool(struct ib_conn *ib_conn); |
486 | u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task, | 509 | u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task, |
487 | enum iser_data_dir cmd_dir, sector_t *sector); | 510 | enum iser_data_dir cmd_dir, sector_t *sector); |
488 | #endif | 511 | #endif |
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c index 1f53ccb31534..123174570c16 100644 --- a/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/drivers/infiniband/ulp/iser/iser_initiator.c | |||
@@ -49,7 +49,7 @@ static int iser_prepare_read_cmd(struct iscsi_task *task) | |||
49 | 49 | ||
50 | { | 50 | { |
51 | struct iscsi_iser_task *iser_task = task->dd_data; | 51 | struct iscsi_iser_task *iser_task = task->dd_data; |
52 | struct iser_device *device = iser_task->iser_conn->device; | 52 | struct iser_device *device = iser_task->iser_conn->ib_conn.device; |
53 | struct iser_regd_buf *regd_buf; | 53 | struct iser_regd_buf *regd_buf; |
54 | int err; | 54 | int err; |
55 | struct iser_hdr *hdr = &iser_task->desc.iser_header; | 55 | struct iser_hdr *hdr = &iser_task->desc.iser_header; |
@@ -103,7 +103,7 @@ iser_prepare_write_cmd(struct iscsi_task *task, | |||
103 | unsigned int edtl) | 103 | unsigned int edtl) |
104 | { | 104 | { |
105 | struct iscsi_iser_task *iser_task = task->dd_data; | 105 | struct iscsi_iser_task *iser_task = task->dd_data; |
106 | struct iser_device *device = iser_task->iser_conn->device; | 106 | struct iser_device *device = iser_task->iser_conn->ib_conn.device; |
107 | struct iser_regd_buf *regd_buf; | 107 | struct iser_regd_buf *regd_buf; |
108 | int err; | 108 | int err; |
109 | struct iser_hdr *hdr = &iser_task->desc.iser_header; | 109 | struct iser_hdr *hdr = &iser_task->desc.iser_header; |
@@ -163,7 +163,7 @@ iser_prepare_write_cmd(struct iscsi_task *task, | |||
163 | static void iser_create_send_desc(struct iser_conn *iser_conn, | 163 | static void iser_create_send_desc(struct iser_conn *iser_conn, |
164 | struct iser_tx_desc *tx_desc) | 164 | struct iser_tx_desc *tx_desc) |
165 | { | 165 | { |
166 | struct iser_device *device = iser_conn->device; | 166 | struct iser_device *device = iser_conn->ib_conn.device; |
167 | 167 | ||
168 | ib_dma_sync_single_for_cpu(device->ib_device, | 168 | ib_dma_sync_single_for_cpu(device->ib_device, |
169 | tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE); | 169 | tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE); |
@@ -181,16 +181,18 @@ static void iser_create_send_desc(struct iser_conn *iser_conn, | |||
181 | 181 | ||
182 | static void iser_free_login_buf(struct iser_conn *iser_conn) | 182 | static void iser_free_login_buf(struct iser_conn *iser_conn) |
183 | { | 183 | { |
184 | struct iser_device *device = iser_conn->ib_conn.device; | ||
185 | |||
184 | if (!iser_conn->login_buf) | 186 | if (!iser_conn->login_buf) |
185 | return; | 187 | return; |
186 | 188 | ||
187 | if (iser_conn->login_req_dma) | 189 | if (iser_conn->login_req_dma) |
188 | ib_dma_unmap_single(iser_conn->device->ib_device, | 190 | ib_dma_unmap_single(device->ib_device, |
189 | iser_conn->login_req_dma, | 191 | iser_conn->login_req_dma, |
190 | ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE); | 192 | ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE); |
191 | 193 | ||
192 | if (iser_conn->login_resp_dma) | 194 | if (iser_conn->login_resp_dma) |
193 | ib_dma_unmap_single(iser_conn->device->ib_device, | 195 | ib_dma_unmap_single(device->ib_device, |
194 | iser_conn->login_resp_dma, | 196 | iser_conn->login_resp_dma, |
195 | ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE); | 197 | ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE); |
196 | 198 | ||
@@ -204,12 +206,10 @@ static void iser_free_login_buf(struct iser_conn *iser_conn) | |||
204 | 206 | ||
205 | static int iser_alloc_login_buf(struct iser_conn *iser_conn) | 207 | static int iser_alloc_login_buf(struct iser_conn *iser_conn) |
206 | { | 208 | { |
207 | struct iser_device *device; | 209 | struct iser_device *device = iser_conn->ib_conn.device; |
208 | int req_err, resp_err; | 210 | int req_err, resp_err; |
209 | 211 | ||
210 | BUG_ON(iser_conn->device == NULL); | 212 | BUG_ON(device == NULL); |
211 | |||
212 | device = iser_conn->device; | ||
213 | 213 | ||
214 | iser_conn->login_buf = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN + | 214 | iser_conn->login_buf = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN + |
215 | ISER_RX_LOGIN_SIZE, GFP_KERNEL); | 215 | ISER_RX_LOGIN_SIZE, GFP_KERNEL); |
@@ -259,13 +259,14 @@ int iser_alloc_rx_descriptors(struct iser_conn *iser_conn, | |||
259 | u64 dma_addr; | 259 | u64 dma_addr; |
260 | struct iser_rx_desc *rx_desc; | 260 | struct iser_rx_desc *rx_desc; |
261 | struct ib_sge *rx_sg; | 261 | struct ib_sge *rx_sg; |
262 | struct iser_device *device = iser_conn->device; | 262 | struct ib_conn *ib_conn = &iser_conn->ib_conn; |
263 | struct iser_device *device = ib_conn->device; | ||
263 | 264 | ||
264 | iser_conn->qp_max_recv_dtos = session->cmds_max; | 265 | iser_conn->qp_max_recv_dtos = session->cmds_max; |
265 | iser_conn->qp_max_recv_dtos_mask = session->cmds_max - 1; /* cmds_max is 2^N */ | 266 | iser_conn->qp_max_recv_dtos_mask = session->cmds_max - 1; /* cmds_max is 2^N */ |
266 | iser_conn->min_posted_rx = iser_conn->qp_max_recv_dtos >> 2; | 267 | iser_conn->min_posted_rx = iser_conn->qp_max_recv_dtos >> 2; |
267 | 268 | ||
268 | if (device->iser_alloc_rdma_reg_res(iser_conn, session->scsi_cmds_max)) | 269 | if (device->iser_alloc_rdma_reg_res(ib_conn, session->scsi_cmds_max)) |
269 | goto create_rdma_reg_res_failed; | 270 | goto create_rdma_reg_res_failed; |
270 | 271 | ||
271 | if (iser_alloc_login_buf(iser_conn)) | 272 | if (iser_alloc_login_buf(iser_conn)) |
@@ -305,7 +306,7 @@ rx_desc_dma_map_failed: | |||
305 | rx_desc_alloc_fail: | 306 | rx_desc_alloc_fail: |
306 | iser_free_login_buf(iser_conn); | 307 | iser_free_login_buf(iser_conn); |
307 | alloc_login_buf_fail: | 308 | alloc_login_buf_fail: |
308 | device->iser_free_rdma_reg_res(iser_conn); | 309 | device->iser_free_rdma_reg_res(ib_conn); |
309 | create_rdma_reg_res_failed: | 310 | create_rdma_reg_res_failed: |
310 | iser_err("failed allocating rx descriptors / data buffers\n"); | 311 | iser_err("failed allocating rx descriptors / data buffers\n"); |
311 | return -ENOMEM; | 312 | return -ENOMEM; |
@@ -315,13 +316,14 @@ void iser_free_rx_descriptors(struct iser_conn *iser_conn) | |||
315 | { | 316 | { |
316 | int i; | 317 | int i; |
317 | struct iser_rx_desc *rx_desc; | 318 | struct iser_rx_desc *rx_desc; |
318 | struct iser_device *device = iser_conn->device; | 319 | struct ib_conn *ib_conn = &iser_conn->ib_conn; |
320 | struct iser_device *device = ib_conn->device; | ||
319 | 321 | ||
320 | if (!iser_conn->rx_descs) | 322 | if (!iser_conn->rx_descs) |
321 | goto free_login_buf; | 323 | goto free_login_buf; |
322 | 324 | ||
323 | if (device->iser_free_rdma_reg_res) | 325 | if (device->iser_free_rdma_reg_res) |
324 | device->iser_free_rdma_reg_res(iser_conn); | 326 | device->iser_free_rdma_reg_res(ib_conn); |
325 | 327 | ||
326 | rx_desc = iser_conn->rx_descs; | 328 | rx_desc = iser_conn->rx_descs; |
327 | for (i = 0; i < iser_conn->qp_max_recv_dtos; i++, rx_desc++) | 329 | for (i = 0; i < iser_conn->qp_max_recv_dtos; i++, rx_desc++) |
@@ -338,6 +340,7 @@ free_login_buf: | |||
338 | static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req) | 340 | static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req) |
339 | { | 341 | { |
340 | struct iser_conn *iser_conn = conn->dd_data; | 342 | struct iser_conn *iser_conn = conn->dd_data; |
343 | struct ib_conn *ib_conn = &iser_conn->ib_conn; | ||
341 | struct iscsi_session *session = conn->session; | 344 | struct iscsi_session *session = conn->session; |
342 | 345 | ||
343 | iser_dbg("req op %x flags %x\n", req->opcode, req->flags); | 346 | iser_dbg("req op %x flags %x\n", req->opcode, req->flags); |
@@ -350,8 +353,8 @@ static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req) | |||
350 | * response) and no posted send buffers left - they must have been | 353 | * response) and no posted send buffers left - they must have been |
351 | * consumed during previous login phases. | 354 | * consumed during previous login phases. |
352 | */ | 355 | */ |
353 | WARN_ON(iser_conn->post_recv_buf_count != 1); | 356 | WARN_ON(ib_conn->post_recv_buf_count != 1); |
354 | WARN_ON(atomic_read(&iser_conn->post_send_buf_count) != 0); | 357 | WARN_ON(atomic_read(&ib_conn->post_send_buf_count) != 0); |
355 | 358 | ||
356 | if (session->discovery_sess) { | 359 | if (session->discovery_sess) { |
357 | iser_info("Discovery session, re-using login RX buffer\n"); | 360 | iser_info("Discovery session, re-using login RX buffer\n"); |
@@ -426,7 +429,7 @@ int iser_send_command(struct iscsi_conn *conn, | |||
426 | 429 | ||
427 | iser_task->status = ISER_TASK_STATUS_STARTED; | 430 | iser_task->status = ISER_TASK_STATUS_STARTED; |
428 | 431 | ||
429 | err = iser_post_send(iser_conn, tx_desc); | 432 | err = iser_post_send(&iser_conn->ib_conn, tx_desc); |
430 | if (!err) | 433 | if (!err) |
431 | return 0; | 434 | return 0; |
432 | 435 | ||
@@ -491,7 +494,7 @@ int iser_send_data_out(struct iscsi_conn *conn, | |||
491 | itt, buf_offset, data_seg_len); | 494 | itt, buf_offset, data_seg_len); |
492 | 495 | ||
493 | 496 | ||
494 | err = iser_post_send(iser_conn, tx_desc); | 497 | err = iser_post_send(&iser_conn->ib_conn, tx_desc); |
495 | if (!err) | 498 | if (!err) |
496 | return 0; | 499 | return 0; |
497 | 500 | ||
@@ -515,7 +518,7 @@ int iser_send_control(struct iscsi_conn *conn, | |||
515 | mdesc->type = ISCSI_TX_CONTROL; | 518 | mdesc->type = ISCSI_TX_CONTROL; |
516 | iser_create_send_desc(iser_conn, mdesc); | 519 | iser_create_send_desc(iser_conn, mdesc); |
517 | 520 | ||
518 | device = iser_conn->device; | 521 | device = iser_conn->ib_conn.device; |
519 | 522 | ||
520 | data_seg_len = ntoh24(task->hdr->dlength); | 523 | data_seg_len = ntoh24(task->hdr->dlength); |
521 | 524 | ||
@@ -553,7 +556,7 @@ int iser_send_control(struct iscsi_conn *conn, | |||
553 | goto send_control_error; | 556 | goto send_control_error; |
554 | } | 557 | } |
555 | 558 | ||
556 | err = iser_post_send(iser_conn, mdesc); | 559 | err = iser_post_send(&iser_conn->ib_conn, mdesc); |
557 | if (!err) | 560 | if (!err) |
558 | return 0; | 561 | return 0; |
559 | 562 | ||
@@ -567,8 +570,10 @@ send_control_error: | |||
567 | */ | 570 | */ |
568 | void iser_rcv_completion(struct iser_rx_desc *rx_desc, | 571 | void iser_rcv_completion(struct iser_rx_desc *rx_desc, |
569 | unsigned long rx_xfer_len, | 572 | unsigned long rx_xfer_len, |
570 | struct iser_conn *iser_conn) | 573 | struct ib_conn *ib_conn) |
571 | { | 574 | { |
575 | struct iser_conn *iser_conn = container_of(ib_conn, struct iser_conn, | ||
576 | ib_conn); | ||
572 | struct iscsi_hdr *hdr; | 577 | struct iscsi_hdr *hdr; |
573 | u64 rx_dma; | 578 | u64 rx_dma; |
574 | int rx_buflen, outstanding, count, err; | 579 | int rx_buflen, outstanding, count, err; |
@@ -582,7 +587,7 @@ void iser_rcv_completion(struct iser_rx_desc *rx_desc, | |||
582 | rx_buflen = ISER_RX_PAYLOAD_SIZE; | 587 | rx_buflen = ISER_RX_PAYLOAD_SIZE; |
583 | } | 588 | } |
584 | 589 | ||
585 | ib_dma_sync_single_for_cpu(iser_conn->device->ib_device, rx_dma, | 590 | ib_dma_sync_single_for_cpu(ib_conn->device->ib_device, rx_dma, |
586 | rx_buflen, DMA_FROM_DEVICE); | 591 | rx_buflen, DMA_FROM_DEVICE); |
587 | 592 | ||
588 | hdr = &rx_desc->iscsi_header; | 593 | hdr = &rx_desc->iscsi_header; |
@@ -593,19 +598,19 @@ void iser_rcv_completion(struct iser_rx_desc *rx_desc, | |||
593 | iscsi_iser_recv(iser_conn->iscsi_conn, hdr, rx_desc->data, | 598 | iscsi_iser_recv(iser_conn->iscsi_conn, hdr, rx_desc->data, |
594 | rx_xfer_len - ISER_HEADERS_LEN); | 599 | rx_xfer_len - ISER_HEADERS_LEN); |
595 | 600 | ||
596 | ib_dma_sync_single_for_device(iser_conn->device->ib_device, rx_dma, | 601 | ib_dma_sync_single_for_device(ib_conn->device->ib_device, rx_dma, |
597 | rx_buflen, DMA_FROM_DEVICE); | 602 | rx_buflen, DMA_FROM_DEVICE); |
598 | 603 | ||
599 | /* decrementing conn->post_recv_buf_count only --after-- freeing the * | 604 | /* decrementing conn->post_recv_buf_count only --after-- freeing the * |
600 | * task eliminates the need to worry on tasks which are completed in * | 605 | * task eliminates the need to worry on tasks which are completed in * |
601 | * parallel to the execution of iser_conn_term. So the code that waits * | 606 | * parallel to the execution of iser_conn_term. So the code that waits * |
602 | * for the posted rx bufs refcount to become zero handles everything */ | 607 | * for the posted rx bufs refcount to become zero handles everything */ |
603 | iser_conn->post_recv_buf_count--; | 608 | ib_conn->post_recv_buf_count--; |
604 | 609 | ||
605 | if (rx_dma == iser_conn->login_resp_dma) | 610 | if (rx_dma == iser_conn->login_resp_dma) |
606 | return; | 611 | return; |
607 | 612 | ||
608 | outstanding = iser_conn->post_recv_buf_count; | 613 | outstanding = ib_conn->post_recv_buf_count; |
609 | if (outstanding + iser_conn->min_posted_rx <= iser_conn->qp_max_recv_dtos) { | 614 | if (outstanding + iser_conn->min_posted_rx <= iser_conn->qp_max_recv_dtos) { |
610 | count = min(iser_conn->qp_max_recv_dtos - outstanding, | 615 | count = min(iser_conn->qp_max_recv_dtos - outstanding, |
611 | iser_conn->min_posted_rx); | 616 | iser_conn->min_posted_rx); |
@@ -616,10 +621,10 @@ void iser_rcv_completion(struct iser_rx_desc *rx_desc, | |||
616 | } | 621 | } |
617 | 622 | ||
618 | void iser_snd_completion(struct iser_tx_desc *tx_desc, | 623 | void iser_snd_completion(struct iser_tx_desc *tx_desc, |
619 | struct iser_conn *iser_conn) | 624 | struct ib_conn *ib_conn) |
620 | { | 625 | { |
621 | struct iscsi_task *task; | 626 | struct iscsi_task *task; |
622 | struct iser_device *device = iser_conn->device; | 627 | struct iser_device *device = ib_conn->device; |
623 | 628 | ||
624 | if (tx_desc->type == ISCSI_TX_DATAOUT) { | 629 | if (tx_desc->type == ISCSI_TX_DATAOUT) { |
625 | ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr, | 630 | ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr, |
@@ -628,7 +633,7 @@ void iser_snd_completion(struct iser_tx_desc *tx_desc, | |||
628 | tx_desc = NULL; | 633 | tx_desc = NULL; |
629 | } | 634 | } |
630 | 635 | ||
631 | atomic_dec(&iser_conn->post_send_buf_count); | 636 | atomic_dec(&ib_conn->post_send_buf_count); |
632 | 637 | ||
633 | if (tx_desc && tx_desc->type == ISCSI_TX_CONTROL) { | 638 | if (tx_desc && tx_desc->type == ISCSI_TX_CONTROL) { |
634 | /* this arithmetic is legal by libiscsi dd_data allocation */ | 639 | /* this arithmetic is legal by libiscsi dd_data allocation */ |
@@ -661,7 +666,7 @@ void iser_task_rdma_init(struct iscsi_iser_task *iser_task) | |||
661 | 666 | ||
662 | void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task) | 667 | void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task) |
663 | { | 668 | { |
664 | struct iser_device *device = iser_task->iser_conn->device; | 669 | struct iser_device *device = iser_task->iser_conn->ib_conn.device; |
665 | int is_rdma_data_aligned = 1; | 670 | int is_rdma_data_aligned = 1; |
666 | int is_rdma_prot_aligned = 1; | 671 | int is_rdma_prot_aligned = 1; |
667 | int prot_count = scsi_prot_sg_count(iser_task->sc); | 672 | int prot_count = scsi_prot_sg_count(iser_task->sc); |
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index ba09fbbe765e..de4db762dc77 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c | |||
@@ -49,7 +49,7 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, | |||
49 | struct iser_data_buf *data_copy, | 49 | struct iser_data_buf *data_copy, |
50 | enum iser_data_dir cmd_dir) | 50 | enum iser_data_dir cmd_dir) |
51 | { | 51 | { |
52 | struct ib_device *dev = iser_task->iser_conn->device->ib_device; | 52 | struct ib_device *dev = iser_task->iser_conn->ib_conn.device->ib_device; |
53 | struct scatterlist *sgl = (struct scatterlist *)data->buf; | 53 | struct scatterlist *sgl = (struct scatterlist *)data->buf; |
54 | struct scatterlist *sg; | 54 | struct scatterlist *sg; |
55 | char *mem = NULL; | 55 | char *mem = NULL; |
@@ -116,7 +116,7 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, | |||
116 | struct ib_device *dev; | 116 | struct ib_device *dev; |
117 | unsigned long cmd_data_len; | 117 | unsigned long cmd_data_len; |
118 | 118 | ||
119 | dev = iser_task->iser_conn->device->ib_device; | 119 | dev = iser_task->iser_conn->ib_conn.device->ib_device; |
120 | 120 | ||
121 | ib_dma_unmap_sg(dev, &data_copy->sg_single, 1, | 121 | ib_dma_unmap_sg(dev, &data_copy->sg_single, 1, |
122 | (cmd_dir == ISER_DIR_OUT) ? | 122 | (cmd_dir == ISER_DIR_OUT) ? |
@@ -322,7 +322,7 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task, | |||
322 | struct ib_device *dev; | 322 | struct ib_device *dev; |
323 | 323 | ||
324 | iser_task->dir[iser_dir] = 1; | 324 | iser_task->dir[iser_dir] = 1; |
325 | dev = iser_task->iser_conn->device->ib_device; | 325 | dev = iser_task->iser_conn->ib_conn.device->ib_device; |
326 | 326 | ||
327 | data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir); | 327 | data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir); |
328 | if (data->dma_nents == 0) { | 328 | if (data->dma_nents == 0) { |
@@ -337,7 +337,7 @@ void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task, | |||
337 | { | 337 | { |
338 | struct ib_device *dev; | 338 | struct ib_device *dev; |
339 | 339 | ||
340 | dev = iser_task->iser_conn->device->ib_device; | 340 | dev = iser_task->iser_conn->ib_conn.device->ib_device; |
341 | ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE); | 341 | ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE); |
342 | } | 342 | } |
343 | 343 | ||
@@ -377,8 +377,8 @@ static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task, | |||
377 | int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task, | 377 | int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task, |
378 | enum iser_data_dir cmd_dir) | 378 | enum iser_data_dir cmd_dir) |
379 | { | 379 | { |
380 | struct iser_conn *iser_conn = iser_task->iser_conn; | 380 | struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn; |
381 | struct iser_device *device = iser_conn->device; | 381 | struct iser_device *device = ib_conn->device; |
382 | struct ib_device *ibdev = device->ib_device; | 382 | struct ib_device *ibdev = device->ib_device; |
383 | struct iser_data_buf *mem = &iser_task->data[cmd_dir]; | 383 | struct iser_data_buf *mem = &iser_task->data[cmd_dir]; |
384 | struct iser_regd_buf *regd_buf; | 384 | struct iser_regd_buf *regd_buf; |
@@ -418,8 +418,8 @@ int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task, | |||
418 | (unsigned long)regd_buf->reg.va, | 418 | (unsigned long)regd_buf->reg.va, |
419 | (unsigned long)regd_buf->reg.len); | 419 | (unsigned long)regd_buf->reg.len); |
420 | } else { /* use FMR for multiple dma entries */ | 420 | } else { /* use FMR for multiple dma entries */ |
421 | iser_page_vec_build(mem, iser_conn->fmr.page_vec, ibdev); | 421 | iser_page_vec_build(mem, ib_conn->fmr.page_vec, ibdev); |
422 | err = iser_reg_page_vec(iser_conn, iser_conn->fmr.page_vec, | 422 | err = iser_reg_page_vec(ib_conn, ib_conn->fmr.page_vec, |
423 | ®d_buf->reg); | 423 | ®d_buf->reg); |
424 | if (err && err != -EAGAIN) { | 424 | if (err && err != -EAGAIN) { |
425 | iser_data_buf_dump(mem, ibdev); | 425 | iser_data_buf_dump(mem, ibdev); |
@@ -427,12 +427,12 @@ int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task, | |||
427 | mem->dma_nents, | 427 | mem->dma_nents, |
428 | ntoh24(iser_task->desc.iscsi_header.dlength)); | 428 | ntoh24(iser_task->desc.iscsi_header.dlength)); |
429 | iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n", | 429 | iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n", |
430 | iser_conn->fmr.page_vec->data_size, | 430 | ib_conn->fmr.page_vec->data_size, |
431 | iser_conn->fmr.page_vec->length, | 431 | ib_conn->fmr.page_vec->length, |
432 | iser_conn->fmr.page_vec->offset); | 432 | ib_conn->fmr.page_vec->offset); |
433 | for (i = 0; i < iser_conn->fmr.page_vec->length; i++) | 433 | for (i = 0; i < ib_conn->fmr.page_vec->length; i++) |
434 | iser_err("page_vec[%d] = 0x%llx\n", i, | 434 | iser_err("page_vec[%d] = 0x%llx\n", i, |
435 | (unsigned long long)iser_conn->fmr.page_vec->pages[i]); | 435 | (unsigned long long)ib_conn->fmr.page_vec->pages[i]); |
436 | } | 436 | } |
437 | if (err) | 437 | if (err) |
438 | return err; | 438 | return err; |
@@ -533,7 +533,7 @@ iser_reg_sig_mr(struct iscsi_iser_task *iser_task, | |||
533 | struct fast_reg_descriptor *desc, struct ib_sge *data_sge, | 533 | struct fast_reg_descriptor *desc, struct ib_sge *data_sge, |
534 | struct ib_sge *prot_sge, struct ib_sge *sig_sge) | 534 | struct ib_sge *prot_sge, struct ib_sge *sig_sge) |
535 | { | 535 | { |
536 | struct iser_conn *iser_conn = iser_task->iser_conn; | 536 | struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn; |
537 | struct iser_pi_context *pi_ctx = desc->pi_ctx; | 537 | struct iser_pi_context *pi_ctx = desc->pi_ctx; |
538 | struct ib_send_wr sig_wr, inv_wr; | 538 | struct ib_send_wr sig_wr, inv_wr; |
539 | struct ib_send_wr *bad_wr, *wr = NULL; | 539 | struct ib_send_wr *bad_wr, *wr = NULL; |
@@ -579,7 +579,7 @@ iser_reg_sig_mr(struct iscsi_iser_task *iser_task, | |||
579 | else | 579 | else |
580 | wr->next = &sig_wr; | 580 | wr->next = &sig_wr; |
581 | 581 | ||
582 | ret = ib_post_send(iser_conn->qp, wr, &bad_wr); | 582 | ret = ib_post_send(ib_conn->qp, wr, &bad_wr); |
583 | if (ret) { | 583 | if (ret) { |
584 | iser_err("reg_sig_mr failed, ret:%d\n", ret); | 584 | iser_err("reg_sig_mr failed, ret:%d\n", ret); |
585 | goto err; | 585 | goto err; |
@@ -609,8 +609,8 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task, | |||
609 | struct ib_sge *sge) | 609 | struct ib_sge *sge) |
610 | { | 610 | { |
611 | struct fast_reg_descriptor *desc = regd_buf->reg.mem_h; | 611 | struct fast_reg_descriptor *desc = regd_buf->reg.mem_h; |
612 | struct iser_conn *iser_conn = iser_task->iser_conn; | 612 | struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn; |
613 | struct iser_device *device = iser_conn->device; | 613 | struct iser_device *device = ib_conn->device; |
614 | struct ib_device *ibdev = device->ib_device; | 614 | struct ib_device *ibdev = device->ib_device; |
615 | struct ib_mr *mr; | 615 | struct ib_mr *mr; |
616 | struct ib_fast_reg_page_list *frpl; | 616 | struct ib_fast_reg_page_list *frpl; |
@@ -677,7 +677,7 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task, | |||
677 | else | 677 | else |
678 | wr->next = &fastreg_wr; | 678 | wr->next = &fastreg_wr; |
679 | 679 | ||
680 | ret = ib_post_send(iser_conn->qp, wr, &bad_wr); | 680 | ret = ib_post_send(ib_conn->qp, wr, &bad_wr); |
681 | if (ret) { | 681 | if (ret) { |
682 | iser_err("fast registration failed, ret:%d\n", ret); | 682 | iser_err("fast registration failed, ret:%d\n", ret); |
683 | return ret; | 683 | return ret; |
@@ -700,8 +700,8 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task, | |||
700 | int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task, | 700 | int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task, |
701 | enum iser_data_dir cmd_dir) | 701 | enum iser_data_dir cmd_dir) |
702 | { | 702 | { |
703 | struct iser_conn *iser_conn = iser_task->iser_conn; | 703 | struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn; |
704 | struct iser_device *device = iser_conn->device; | 704 | struct iser_device *device = ib_conn->device; |
705 | struct ib_device *ibdev = device->ib_device; | 705 | struct ib_device *ibdev = device->ib_device; |
706 | struct iser_data_buf *mem = &iser_task->data[cmd_dir]; | 706 | struct iser_data_buf *mem = &iser_task->data[cmd_dir]; |
707 | struct iser_regd_buf *regd_buf = &iser_task->rdma_regd[cmd_dir]; | 707 | struct iser_regd_buf *regd_buf = &iser_task->rdma_regd[cmd_dir]; |
@@ -724,11 +724,11 @@ int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task, | |||
724 | 724 | ||
725 | if (mem->dma_nents != 1 || | 725 | if (mem->dma_nents != 1 || |
726 | scsi_get_prot_op(iser_task->sc) != SCSI_PROT_NORMAL) { | 726 | scsi_get_prot_op(iser_task->sc) != SCSI_PROT_NORMAL) { |
727 | spin_lock_irqsave(&iser_conn->lock, flags); | 727 | spin_lock_irqsave(&ib_conn->lock, flags); |
728 | desc = list_first_entry(&iser_conn->fastreg.pool, | 728 | desc = list_first_entry(&ib_conn->fastreg.pool, |
729 | struct fast_reg_descriptor, list); | 729 | struct fast_reg_descriptor, list); |
730 | list_del(&desc->list); | 730 | list_del(&desc->list); |
731 | spin_unlock_irqrestore(&iser_conn->lock, flags); | 731 | spin_unlock_irqrestore(&ib_conn->lock, flags); |
732 | regd_buf->reg.mem_h = desc; | 732 | regd_buf->reg.mem_h = desc; |
733 | } | 733 | } |
734 | 734 | ||
@@ -791,9 +791,9 @@ int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task, | |||
791 | return 0; | 791 | return 0; |
792 | err_reg: | 792 | err_reg: |
793 | if (desc) { | 793 | if (desc) { |
794 | spin_lock_irqsave(&iser_conn->lock, flags); | 794 | spin_lock_irqsave(&ib_conn->lock, flags); |
795 | list_add_tail(&desc->list, &iser_conn->fastreg.pool); | 795 | list_add_tail(&desc->list, &ib_conn->fastreg.pool); |
796 | spin_unlock_irqrestore(&iser_conn->lock, flags); | 796 | spin_unlock_irqrestore(&ib_conn->lock, flags); |
797 | } | 797 | } |
798 | 798 | ||
799 | return err; | 799 | return err; |
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 778c166916fe..e69aba8eabec 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c | |||
@@ -213,19 +213,19 @@ static void iser_free_device_ib_res(struct iser_device *device) | |||
213 | * | 213 | * |
214 | * returns 0 on success, or errno code on failure | 214 | * returns 0 on success, or errno code on failure |
215 | */ | 215 | */ |
216 | int iser_create_fmr_pool(struct iser_conn *iser_conn, unsigned cmds_max) | 216 | int iser_create_fmr_pool(struct ib_conn *ib_conn, unsigned cmds_max) |
217 | { | 217 | { |
218 | struct iser_device *device = iser_conn->device; | 218 | struct iser_device *device = ib_conn->device; |
219 | struct ib_fmr_pool_param params; | 219 | struct ib_fmr_pool_param params; |
220 | int ret = -ENOMEM; | 220 | int ret = -ENOMEM; |
221 | 221 | ||
222 | iser_conn->fmr.page_vec = kmalloc(sizeof(*iser_conn->fmr.page_vec) + | 222 | ib_conn->fmr.page_vec = kmalloc(sizeof(*ib_conn->fmr.page_vec) + |
223 | (sizeof(u64)*(ISCSI_ISER_SG_TABLESIZE + 1)), | 223 | (sizeof(u64)*(ISCSI_ISER_SG_TABLESIZE + 1)), |
224 | GFP_KERNEL); | 224 | GFP_KERNEL); |
225 | if (!iser_conn->fmr.page_vec) | 225 | if (!ib_conn->fmr.page_vec) |
226 | return ret; | 226 | return ret; |
227 | 227 | ||
228 | iser_conn->fmr.page_vec->pages = (u64 *)(iser_conn->fmr.page_vec + 1); | 228 | ib_conn->fmr.page_vec->pages = (u64 *)(ib_conn->fmr.page_vec + 1); |
229 | 229 | ||
230 | params.page_shift = SHIFT_4K; | 230 | params.page_shift = SHIFT_4K; |
231 | /* when the first/last SG element are not start/end * | 231 | /* when the first/last SG element are not start/end * |
@@ -241,16 +241,16 @@ int iser_create_fmr_pool(struct iser_conn *iser_conn, unsigned cmds_max) | |||
241 | IB_ACCESS_REMOTE_WRITE | | 241 | IB_ACCESS_REMOTE_WRITE | |
242 | IB_ACCESS_REMOTE_READ); | 242 | IB_ACCESS_REMOTE_READ); |
243 | 243 | ||
244 | iser_conn->fmr.pool = ib_create_fmr_pool(device->pd, ¶ms); | 244 | ib_conn->fmr.pool = ib_create_fmr_pool(device->pd, ¶ms); |
245 | if (!IS_ERR(iser_conn->fmr.pool)) | 245 | if (!IS_ERR(ib_conn->fmr.pool)) |
246 | return 0; | 246 | return 0; |
247 | 247 | ||
248 | /* no FMR => no need for page_vec */ | 248 | /* no FMR => no need for page_vec */ |
249 | kfree(iser_conn->fmr.page_vec); | 249 | kfree(ib_conn->fmr.page_vec); |
250 | iser_conn->fmr.page_vec = NULL; | 250 | ib_conn->fmr.page_vec = NULL; |
251 | 251 | ||
252 | ret = PTR_ERR(iser_conn->fmr.pool); | 252 | ret = PTR_ERR(ib_conn->fmr.pool); |
253 | iser_conn->fmr.pool = NULL; | 253 | ib_conn->fmr.pool = NULL; |
254 | if (ret != -ENOSYS) { | 254 | if (ret != -ENOSYS) { |
255 | iser_err("FMR allocation failed, err %d\n", ret); | 255 | iser_err("FMR allocation failed, err %d\n", ret); |
256 | return ret; | 256 | return ret; |
@@ -263,18 +263,18 @@ int iser_create_fmr_pool(struct iser_conn *iser_conn, unsigned cmds_max) | |||
263 | /** | 263 | /** |
264 | * iser_free_fmr_pool - releases the FMR pool and page vec | 264 | * iser_free_fmr_pool - releases the FMR pool and page vec |
265 | */ | 265 | */ |
266 | void iser_free_fmr_pool(struct iser_conn *iser_conn) | 266 | void iser_free_fmr_pool(struct ib_conn *ib_conn) |
267 | { | 267 | { |
268 | iser_info("freeing conn %p fmr pool %p\n", | 268 | iser_info("freeing conn %p fmr pool %p\n", |
269 | iser_conn, iser_conn->fmr.pool); | 269 | ib_conn, ib_conn->fmr.pool); |
270 | 270 | ||
271 | if (iser_conn->fmr.pool != NULL) | 271 | if (ib_conn->fmr.pool != NULL) |
272 | ib_destroy_fmr_pool(iser_conn->fmr.pool); | 272 | ib_destroy_fmr_pool(ib_conn->fmr.pool); |
273 | 273 | ||
274 | iser_conn->fmr.pool = NULL; | 274 | ib_conn->fmr.pool = NULL; |
275 | 275 | ||
276 | kfree(iser_conn->fmr.page_vec); | 276 | kfree(ib_conn->fmr.page_vec); |
277 | iser_conn->fmr.page_vec = NULL; | 277 | ib_conn->fmr.page_vec = NULL; |
278 | } | 278 | } |
279 | 279 | ||
280 | static int | 280 | static int |
@@ -367,14 +367,14 @@ fast_reg_mr_failure: | |||
367 | * for fast registration work requests. | 367 | * for fast registration work requests. |
368 | * returns 0 on success, or errno code on failure | 368 | * returns 0 on success, or errno code on failure |
369 | */ | 369 | */ |
370 | int iser_create_fastreg_pool(struct iser_conn *iser_conn, unsigned cmds_max) | 370 | int iser_create_fastreg_pool(struct ib_conn *ib_conn, unsigned cmds_max) |
371 | { | 371 | { |
372 | struct iser_device *device = iser_conn->device; | 372 | struct iser_device *device = ib_conn->device; |
373 | struct fast_reg_descriptor *desc; | 373 | struct fast_reg_descriptor *desc; |
374 | int i, ret; | 374 | int i, ret; |
375 | 375 | ||
376 | INIT_LIST_HEAD(&iser_conn->fastreg.pool); | 376 | INIT_LIST_HEAD(&ib_conn->fastreg.pool); |
377 | iser_conn->fastreg.pool_size = 0; | 377 | ib_conn->fastreg.pool_size = 0; |
378 | for (i = 0; i < cmds_max; i++) { | 378 | for (i = 0; i < cmds_max; i++) { |
379 | desc = kzalloc(sizeof(*desc), GFP_KERNEL); | 379 | desc = kzalloc(sizeof(*desc), GFP_KERNEL); |
380 | if (!desc) { | 380 | if (!desc) { |
@@ -384,7 +384,7 @@ int iser_create_fastreg_pool(struct iser_conn *iser_conn, unsigned cmds_max) | |||
384 | } | 384 | } |
385 | 385 | ||
386 | ret = iser_create_fastreg_desc(device->ib_device, device->pd, | 386 | ret = iser_create_fastreg_desc(device->ib_device, device->pd, |
387 | iser_conn->pi_support, desc); | 387 | ib_conn->pi_support, desc); |
388 | if (ret) { | 388 | if (ret) { |
389 | iser_err("Failed to create fastreg descriptor err=%d\n", | 389 | iser_err("Failed to create fastreg descriptor err=%d\n", |
390 | ret); | 390 | ret); |
@@ -392,31 +392,31 @@ int iser_create_fastreg_pool(struct iser_conn *iser_conn, unsigned cmds_max) | |||
392 | goto err; | 392 | goto err; |
393 | } | 393 | } |
394 | 394 | ||
395 | list_add_tail(&desc->list, &iser_conn->fastreg.pool); | 395 | list_add_tail(&desc->list, &ib_conn->fastreg.pool); |
396 | iser_conn->fastreg.pool_size++; | 396 | ib_conn->fastreg.pool_size++; |
397 | } | 397 | } |
398 | 398 | ||
399 | return 0; | 399 | return 0; |
400 | 400 | ||
401 | err: | 401 | err: |
402 | iser_free_fastreg_pool(iser_conn); | 402 | iser_free_fastreg_pool(ib_conn); |
403 | return ret; | 403 | return ret; |
404 | } | 404 | } |
405 | 405 | ||
406 | /** | 406 | /** |
407 | * iser_free_fastreg_pool - releases the pool of fast_reg descriptors | 407 | * iser_free_fastreg_pool - releases the pool of fast_reg descriptors |
408 | */ | 408 | */ |
409 | void iser_free_fastreg_pool(struct iser_conn *iser_conn) | 409 | void iser_free_fastreg_pool(struct ib_conn *ib_conn) |
410 | { | 410 | { |
411 | struct fast_reg_descriptor *desc, *tmp; | 411 | struct fast_reg_descriptor *desc, *tmp; |
412 | int i = 0; | 412 | int i = 0; |
413 | 413 | ||
414 | if (list_empty(&iser_conn->fastreg.pool)) | 414 | if (list_empty(&ib_conn->fastreg.pool)) |
415 | return; | 415 | return; |
416 | 416 | ||
417 | iser_info("freeing conn %p fr pool\n", iser_conn); | 417 | iser_info("freeing conn %p fr pool\n", ib_conn); |
418 | 418 | ||
419 | list_for_each_entry_safe(desc, tmp, &iser_conn->fastreg.pool, list) { | 419 | list_for_each_entry_safe(desc, tmp, &ib_conn->fastreg.pool, list) { |
420 | list_del(&desc->list); | 420 | list_del(&desc->list); |
421 | ib_free_fast_reg_page_list(desc->data_frpl); | 421 | ib_free_fast_reg_page_list(desc->data_frpl); |
422 | ib_dereg_mr(desc->data_mr); | 422 | ib_dereg_mr(desc->data_mr); |
@@ -430,9 +430,9 @@ void iser_free_fastreg_pool(struct iser_conn *iser_conn) | |||
430 | ++i; | 430 | ++i; |
431 | } | 431 | } |
432 | 432 | ||
433 | if (i < iser_conn->fastreg.pool_size) | 433 | if (i < ib_conn->fastreg.pool_size) |
434 | iser_warn("pool still has %d regions registered\n", | 434 | iser_warn("pool still has %d regions registered\n", |
435 | iser_conn->fastreg.pool_size - i); | 435 | ib_conn->fastreg.pool_size - i); |
436 | } | 436 | } |
437 | 437 | ||
438 | /** | 438 | /** |
@@ -440,16 +440,16 @@ void iser_free_fastreg_pool(struct iser_conn *iser_conn) | |||
440 | * | 440 | * |
441 | * returns 0 on success, -1 on failure | 441 | * returns 0 on success, -1 on failure |
442 | */ | 442 | */ |
443 | static int iser_create_ib_conn_res(struct iser_conn *iser_conn) | 443 | static int iser_create_ib_conn_res(struct ib_conn *ib_conn) |
444 | { | 444 | { |
445 | struct iser_device *device; | 445 | struct iser_device *device; |
446 | struct ib_qp_init_attr init_attr; | 446 | struct ib_qp_init_attr init_attr; |
447 | int ret = -ENOMEM; | 447 | int ret = -ENOMEM; |
448 | int index, min_index = 0; | 448 | int index, min_index = 0; |
449 | 449 | ||
450 | BUG_ON(iser_conn->device == NULL); | 450 | BUG_ON(ib_conn->device == NULL); |
451 | 451 | ||
452 | device = iser_conn->device; | 452 | device = ib_conn->device; |
453 | 453 | ||
454 | memset(&init_attr, 0, sizeof init_attr); | 454 | memset(&init_attr, 0, sizeof init_attr); |
455 | 455 | ||
@@ -460,11 +460,12 @@ static int iser_create_ib_conn_res(struct iser_conn *iser_conn) | |||
460 | device->cq_active_qps[min_index]) | 460 | device->cq_active_qps[min_index]) |
461 | min_index = index; | 461 | min_index = index; |
462 | device->cq_active_qps[min_index]++; | 462 | device->cq_active_qps[min_index]++; |
463 | ib_conn->cq_index = min_index; | ||
463 | mutex_unlock(&ig.connlist_mutex); | 464 | mutex_unlock(&ig.connlist_mutex); |
464 | iser_info("cq index %d used for iser_conn %p\n", min_index, iser_conn); | 465 | iser_info("cq index %d used for ib_conn %p\n", min_index, ib_conn); |
465 | 466 | ||
466 | init_attr.event_handler = iser_qp_event_callback; | 467 | init_attr.event_handler = iser_qp_event_callback; |
467 | init_attr.qp_context = (void *)iser_conn; | 468 | init_attr.qp_context = (void *)ib_conn; |
468 | init_attr.send_cq = device->tx_cq[min_index]; | 469 | init_attr.send_cq = device->tx_cq[min_index]; |
469 | init_attr.recv_cq = device->rx_cq[min_index]; | 470 | init_attr.recv_cq = device->rx_cq[min_index]; |
470 | init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS; | 471 | init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS; |
@@ -472,21 +473,21 @@ static int iser_create_ib_conn_res(struct iser_conn *iser_conn) | |||
472 | init_attr.cap.max_recv_sge = 1; | 473 | init_attr.cap.max_recv_sge = 1; |
473 | init_attr.sq_sig_type = IB_SIGNAL_REQ_WR; | 474 | init_attr.sq_sig_type = IB_SIGNAL_REQ_WR; |
474 | init_attr.qp_type = IB_QPT_RC; | 475 | init_attr.qp_type = IB_QPT_RC; |
475 | if (iser_conn->pi_support) { | 476 | if (ib_conn->pi_support) { |
476 | init_attr.cap.max_send_wr = ISER_QP_SIG_MAX_REQ_DTOS; | 477 | init_attr.cap.max_send_wr = ISER_QP_SIG_MAX_REQ_DTOS; |
477 | init_attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN; | 478 | init_attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN; |
478 | } else { | 479 | } else { |
479 | init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS; | 480 | init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS; |
480 | } | 481 | } |
481 | 482 | ||
482 | ret = rdma_create_qp(iser_conn->cma_id, device->pd, &init_attr); | 483 | ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr); |
483 | if (ret) | 484 | if (ret) |
484 | goto out_err; | 485 | goto out_err; |
485 | 486 | ||
486 | iser_conn->qp = iser_conn->cma_id->qp; | 487 | ib_conn->qp = ib_conn->cma_id->qp; |
487 | iser_info("setting conn %p cma_id %p qp %p\n", | 488 | iser_info("setting conn %p cma_id %p qp %p\n", |
488 | iser_conn, iser_conn->cma_id, | 489 | ib_conn, ib_conn->cma_id, |
489 | iser_conn->cma_id->qp); | 490 | ib_conn->cma_id->qp); |
490 | return ret; | 491 | return ret; |
491 | 492 | ||
492 | out_err: | 493 | out_err: |
@@ -499,23 +500,20 @@ out_err: | |||
499 | */ | 500 | */ |
500 | static void iser_free_ib_conn_res(struct iser_conn *iser_conn) | 501 | static void iser_free_ib_conn_res(struct iser_conn *iser_conn) |
501 | { | 502 | { |
502 | int cq_index; | 503 | struct ib_conn *ib_conn = &iser_conn->ib_conn; |
503 | BUG_ON(iser_conn == NULL); | ||
504 | 504 | ||
505 | iser_info("freeing conn %p cma_id %p qp %p\n", | 505 | iser_info("freeing conn %p cma_id %p qp %p\n", |
506 | iser_conn, iser_conn->cma_id, | 506 | ib_conn, ib_conn->cma_id, |
507 | iser_conn->qp); | 507 | ib_conn->qp); |
508 | 508 | ||
509 | /* qp is created only once both addr & route are resolved */ | 509 | /* qp is created only once both addr & route are resolved */ |
510 | 510 | ||
511 | if (iser_conn->qp != NULL) { | 511 | if (ib_conn->qp != NULL) { |
512 | cq_index = ((struct iser_cq_desc *)iser_conn->qp->recv_cq->cq_context)->cq_index; | 512 | ib_conn->device->cq_active_qps[ib_conn->cq_index]--; |
513 | iser_conn->device->cq_active_qps[cq_index]--; | 513 | rdma_destroy_qp(ib_conn->cma_id); |
514 | |||
515 | rdma_destroy_qp(iser_conn->cma_id); | ||
516 | } | 514 | } |
517 | 515 | ||
518 | iser_conn->qp = NULL; | 516 | ib_conn->qp = NULL; |
519 | } | 517 | } |
520 | 518 | ||
521 | /** | 519 | /** |
@@ -614,7 +612,8 @@ void iser_release_work(struct work_struct *work) | |||
614 | */ | 612 | */ |
615 | void iser_conn_release(struct iser_conn *iser_conn) | 613 | void iser_conn_release(struct iser_conn *iser_conn) |
616 | { | 614 | { |
617 | struct iser_device *device = iser_conn->device; | 615 | struct ib_conn *ib_conn = &iser_conn->ib_conn; |
616 | struct iser_device *device = ib_conn->device; | ||
618 | 617 | ||
619 | mutex_lock(&ig.connlist_mutex); | 618 | mutex_lock(&ig.connlist_mutex); |
620 | list_del(&iser_conn->conn_list); | 619 | list_del(&iser_conn->conn_list); |
@@ -625,17 +624,17 @@ void iser_conn_release(struct iser_conn *iser_conn) | |||
625 | 624 | ||
626 | iser_free_rx_descriptors(iser_conn); | 625 | iser_free_rx_descriptors(iser_conn); |
627 | iser_free_ib_conn_res(iser_conn); | 626 | iser_free_ib_conn_res(iser_conn); |
628 | iser_conn->device = NULL; | 627 | ib_conn->device = NULL; |
629 | /* on EVENT_ADDR_ERROR there's no device yet for this conn */ | 628 | /* on EVENT_ADDR_ERROR there's no device yet for this conn */ |
630 | if (device != NULL) | 629 | if (device != NULL) |
631 | iser_device_try_release(device); | 630 | iser_device_try_release(device); |
632 | mutex_unlock(&iser_conn->state_mutex); | 631 | mutex_unlock(&iser_conn->state_mutex); |
633 | 632 | ||
634 | /* if cma handler context, the caller actually destroy the id */ | 633 | if (ib_conn->cma_id != NULL) { |
635 | if (iser_conn->cma_id != NULL) { | 634 | rdma_destroy_id(ib_conn->cma_id); |
636 | rdma_destroy_id(iser_conn->cma_id); | 635 | ib_conn->cma_id = NULL; |
637 | iser_conn->cma_id = NULL; | ||
638 | } | 636 | } |
637 | |||
639 | kfree(iser_conn); | 638 | kfree(iser_conn); |
640 | } | 639 | } |
641 | 640 | ||
@@ -644,6 +643,7 @@ void iser_conn_release(struct iser_conn *iser_conn) | |||
644 | */ | 643 | */ |
645 | void iser_conn_terminate(struct iser_conn *iser_conn) | 644 | void iser_conn_terminate(struct iser_conn *iser_conn) |
646 | { | 645 | { |
646 | struct ib_conn *ib_conn = &iser_conn->ib_conn; | ||
647 | int err = 0; | 647 | int err = 0; |
648 | 648 | ||
649 | /* change the ib conn state only if the conn is UP, however always call | 649 | /* change the ib conn state only if the conn is UP, however always call |
@@ -652,7 +652,7 @@ void iser_conn_terminate(struct iser_conn *iser_conn) | |||
652 | */ | 652 | */ |
653 | 653 | ||
654 | iser_conn_state_comp_exch(iser_conn, ISER_CONN_UP, ISER_CONN_TERMINATING); | 654 | iser_conn_state_comp_exch(iser_conn, ISER_CONN_UP, ISER_CONN_TERMINATING); |
655 | err = rdma_disconnect(iser_conn->cma_id); | 655 | err = rdma_disconnect(ib_conn->cma_id); |
656 | if (err) | 656 | if (err) |
657 | iser_err("Failed to disconnect, conn: 0x%p err %d\n", | 657 | iser_err("Failed to disconnect, conn: 0x%p err %d\n", |
658 | iser_conn, err); | 658 | iser_conn, err); |
@@ -676,6 +676,7 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id) | |||
676 | { | 676 | { |
677 | struct iser_device *device; | 677 | struct iser_device *device; |
678 | struct iser_conn *iser_conn; | 678 | struct iser_conn *iser_conn; |
679 | struct ib_conn *ib_conn; | ||
679 | int ret; | 680 | int ret; |
680 | 681 | ||
681 | iser_conn = (struct iser_conn *)cma_id->context; | 682 | iser_conn = (struct iser_conn *)cma_id->context; |
@@ -683,6 +684,7 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id) | |||
683 | /* bailout */ | 684 | /* bailout */ |
684 | return; | 685 | return; |
685 | 686 | ||
687 | ib_conn = &iser_conn->ib_conn; | ||
686 | device = iser_device_find_by_ib_device(cma_id); | 688 | device = iser_device_find_by_ib_device(cma_id); |
687 | if (!device) { | 689 | if (!device) { |
688 | iser_err("device lookup/creation failed\n"); | 690 | iser_err("device lookup/creation failed\n"); |
@@ -690,7 +692,7 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id) | |||
690 | return; | 692 | return; |
691 | } | 693 | } |
692 | 694 | ||
693 | iser_conn->device = device; | 695 | ib_conn->device = device; |
694 | 696 | ||
695 | /* connection T10-PI support */ | 697 | /* connection T10-PI support */ |
696 | if (iser_pi_enable) { | 698 | if (iser_pi_enable) { |
@@ -698,10 +700,10 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id) | |||
698 | IB_DEVICE_SIGNATURE_HANDOVER)) { | 700 | IB_DEVICE_SIGNATURE_HANDOVER)) { |
699 | iser_warn("T10-PI requested but not supported on %s, " | 701 | iser_warn("T10-PI requested but not supported on %s, " |
700 | "continue without T10-PI\n", | 702 | "continue without T10-PI\n", |
701 | iser_conn->device->ib_device->name); | 703 | ib_conn->device->ib_device->name); |
702 | iser_conn->pi_support = false; | 704 | ib_conn->pi_support = false; |
703 | } else { | 705 | } else { |
704 | iser_conn->pi_support = true; | 706 | ib_conn->pi_support = true; |
705 | } | 707 | } |
706 | } | 708 | } |
707 | 709 | ||
@@ -722,13 +724,14 @@ static void iser_route_handler(struct rdma_cm_id *cma_id) | |||
722 | int ret; | 724 | int ret; |
723 | struct iser_cm_hdr req_hdr; | 725 | struct iser_cm_hdr req_hdr; |
724 | struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context; | 726 | struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context; |
725 | struct iser_device *device = iser_conn->device; | 727 | struct ib_conn *ib_conn = &iser_conn->ib_conn; |
728 | struct iser_device *device = ib_conn->device; | ||
726 | 729 | ||
727 | if (iser_conn->state != ISER_CONN_PENDING) | 730 | if (iser_conn->state != ISER_CONN_PENDING) |
728 | /* bailout */ | 731 | /* bailout */ |
729 | return; | 732 | return; |
730 | 733 | ||
731 | ret = iser_create_ib_conn_res((struct iser_conn *)cma_id->context); | 734 | ret = iser_create_ib_conn_res(ib_conn); |
732 | if (ret) | 735 | if (ret) |
733 | goto failure; | 736 | goto failure; |
734 | 737 | ||
@@ -776,6 +779,7 @@ static void iser_connected_handler(struct rdma_cm_id *cma_id) | |||
776 | static void iser_disconnected_handler(struct rdma_cm_id *cma_id) | 779 | static void iser_disconnected_handler(struct rdma_cm_id *cma_id) |
777 | { | 780 | { |
778 | struct iser_conn *iser_conn; | 781 | struct iser_conn *iser_conn; |
782 | struct ib_conn *ib_conn = &iser_conn->ib_conn; | ||
779 | 783 | ||
780 | iser_conn = (struct iser_conn *)cma_id->context; | 784 | iser_conn = (struct iser_conn *)cma_id->context; |
781 | 785 | ||
@@ -793,8 +797,8 @@ static void iser_disconnected_handler(struct rdma_cm_id *cma_id) | |||
793 | * block also exists in iser_handle_comp_error(), but it is needed here | 797 | * block also exists in iser_handle_comp_error(), but it is needed here |
794 | * for cases of no flushes at all, e.g. discovery over rdma. | 798 | * for cases of no flushes at all, e.g. discovery over rdma. |
795 | */ | 799 | */ |
796 | if (iser_conn->post_recv_buf_count == 0 && | 800 | if (ib_conn->post_recv_buf_count == 0 && |
797 | (atomic_read(&iser_conn->post_send_buf_count) == 0)) { | 801 | (atomic_read(&ib_conn->post_send_buf_count) == 0)) { |
798 | complete(&iser_conn->flush_completion); | 802 | complete(&iser_conn->flush_completion); |
799 | } | 803 | } |
800 | } | 804 | } |
@@ -842,13 +846,13 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve | |||
842 | void iser_conn_init(struct iser_conn *iser_conn) | 846 | void iser_conn_init(struct iser_conn *iser_conn) |
843 | { | 847 | { |
844 | iser_conn->state = ISER_CONN_INIT; | 848 | iser_conn->state = ISER_CONN_INIT; |
845 | iser_conn->post_recv_buf_count = 0; | 849 | iser_conn->ib_conn.post_recv_buf_count = 0; |
846 | atomic_set(&iser_conn->post_send_buf_count, 0); | 850 | atomic_set(&iser_conn->ib_conn.post_send_buf_count, 0); |
847 | init_completion(&iser_conn->stop_completion); | 851 | init_completion(&iser_conn->stop_completion); |
848 | init_completion(&iser_conn->flush_completion); | 852 | init_completion(&iser_conn->flush_completion); |
849 | init_completion(&iser_conn->up_completion); | 853 | init_completion(&iser_conn->up_completion); |
850 | INIT_LIST_HEAD(&iser_conn->conn_list); | 854 | INIT_LIST_HEAD(&iser_conn->conn_list); |
851 | spin_lock_init(&iser_conn->lock); | 855 | spin_lock_init(&iser_conn->ib_conn.lock); |
852 | mutex_init(&iser_conn->state_mutex); | 856 | mutex_init(&iser_conn->state_mutex); |
853 | } | 857 | } |
854 | 858 | ||
@@ -861,6 +865,7 @@ int iser_connect(struct iser_conn *iser_conn, | |||
861 | struct sockaddr *dst_addr, | 865 | struct sockaddr *dst_addr, |
862 | int non_blocking) | 866 | int non_blocking) |
863 | { | 867 | { |
868 | struct ib_conn *ib_conn = &iser_conn->ib_conn; | ||
864 | int err = 0; | 869 | int err = 0; |
865 | 870 | ||
866 | mutex_lock(&iser_conn->state_mutex); | 871 | mutex_lock(&iser_conn->state_mutex); |
@@ -870,20 +875,20 @@ int iser_connect(struct iser_conn *iser_conn, | |||
870 | iser_info("connecting to: %s\n", iser_conn->name); | 875 | iser_info("connecting to: %s\n", iser_conn->name); |
871 | 876 | ||
872 | /* the device is known only --after-- address resolution */ | 877 | /* the device is known only --after-- address resolution */ |
873 | iser_conn->device = NULL; | 878 | ib_conn->device = NULL; |
874 | 879 | ||
875 | iser_conn->state = ISER_CONN_PENDING; | 880 | iser_conn->state = ISER_CONN_PENDING; |
876 | 881 | ||
877 | iser_conn->cma_id = rdma_create_id(iser_cma_handler, | 882 | ib_conn->cma_id = rdma_create_id(iser_cma_handler, |
878 | (void *)iser_conn, | 883 | (void *)iser_conn, |
879 | RDMA_PS_TCP, IB_QPT_RC); | 884 | RDMA_PS_TCP, IB_QPT_RC); |
880 | if (IS_ERR(iser_conn->cma_id)) { | 885 | if (IS_ERR(ib_conn->cma_id)) { |
881 | err = PTR_ERR(iser_conn->cma_id); | 886 | err = PTR_ERR(ib_conn->cma_id); |
882 | iser_err("rdma_create_id failed: %d\n", err); | 887 | iser_err("rdma_create_id failed: %d\n", err); |
883 | goto id_failure; | 888 | goto id_failure; |
884 | } | 889 | } |
885 | 890 | ||
886 | err = rdma_resolve_addr(iser_conn->cma_id, src_addr, dst_addr, 1000); | 891 | err = rdma_resolve_addr(ib_conn->cma_id, src_addr, dst_addr, 1000); |
887 | if (err) { | 892 | if (err) { |
888 | iser_err("rdma_resolve_addr failed: %d\n", err); | 893 | iser_err("rdma_resolve_addr failed: %d\n", err); |
889 | goto addr_failure; | 894 | goto addr_failure; |
@@ -905,7 +910,7 @@ int iser_connect(struct iser_conn *iser_conn, | |||
905 | return 0; | 910 | return 0; |
906 | 911 | ||
907 | id_failure: | 912 | id_failure: |
908 | iser_conn->cma_id = NULL; | 913 | ib_conn->cma_id = NULL; |
909 | addr_failure: | 914 | addr_failure: |
910 | iser_conn->state = ISER_CONN_DOWN; | 915 | iser_conn->state = ISER_CONN_DOWN; |
911 | connect_failure: | 916 | connect_failure: |
@@ -919,7 +924,7 @@ connect_failure: | |||
919 | * | 924 | * |
920 | * returns: 0 on success, errno code on failure | 925 | * returns: 0 on success, errno code on failure |
921 | */ | 926 | */ |
922 | int iser_reg_page_vec(struct iser_conn *iser_conn, | 927 | int iser_reg_page_vec(struct ib_conn *ib_conn, |
923 | struct iser_page_vec *page_vec, | 928 | struct iser_page_vec *page_vec, |
924 | struct iser_mem_reg *mem_reg) | 929 | struct iser_mem_reg *mem_reg) |
925 | { | 930 | { |
@@ -931,7 +936,7 @@ int iser_reg_page_vec(struct iser_conn *iser_conn, | |||
931 | page_list = page_vec->pages; | 936 | page_list = page_vec->pages; |
932 | io_addr = page_list[0]; | 937 | io_addr = page_list[0]; |
933 | 938 | ||
934 | mem = ib_fmr_pool_map_phys(iser_conn->fmr.pool, | 939 | mem = ib_fmr_pool_map_phys(ib_conn->fmr.pool, |
935 | page_list, | 940 | page_list, |
936 | page_vec->length, | 941 | page_vec->length, |
937 | io_addr); | 942 | io_addr); |
@@ -990,6 +995,7 @@ void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task, | |||
990 | { | 995 | { |
991 | struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg; | 996 | struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg; |
992 | struct iser_conn *iser_conn = iser_task->iser_conn; | 997 | struct iser_conn *iser_conn = iser_task->iser_conn; |
998 | struct ib_conn *ib_conn = &iser_conn->ib_conn; | ||
993 | struct fast_reg_descriptor *desc = reg->mem_h; | 999 | struct fast_reg_descriptor *desc = reg->mem_h; |
994 | 1000 | ||
995 | if (!reg->is_mr) | 1001 | if (!reg->is_mr) |
@@ -997,31 +1003,32 @@ void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task, | |||
997 | 1003 | ||
998 | reg->mem_h = NULL; | 1004 | reg->mem_h = NULL; |
999 | reg->is_mr = 0; | 1005 | reg->is_mr = 0; |
1000 | spin_lock_bh(&iser_conn->lock); | 1006 | spin_lock_bh(&ib_conn->lock); |
1001 | list_add_tail(&desc->list, &iser_conn->fastreg.pool); | 1007 | list_add_tail(&desc->list, &ib_conn->fastreg.pool); |
1002 | spin_unlock_bh(&iser_conn->lock); | 1008 | spin_unlock_bh(&ib_conn->lock); |
1003 | } | 1009 | } |
1004 | 1010 | ||
1005 | int iser_post_recvl(struct iser_conn *iser_conn) | 1011 | int iser_post_recvl(struct iser_conn *iser_conn) |
1006 | { | 1012 | { |
1007 | struct ib_recv_wr rx_wr, *rx_wr_failed; | 1013 | struct ib_recv_wr rx_wr, *rx_wr_failed; |
1014 | struct ib_conn *ib_conn = &iser_conn->ib_conn; | ||
1008 | struct ib_sge sge; | 1015 | struct ib_sge sge; |
1009 | int ib_ret; | 1016 | int ib_ret; |
1010 | 1017 | ||
1011 | sge.addr = iser_conn->login_resp_dma; | 1018 | sge.addr = iser_conn->login_resp_dma; |
1012 | sge.length = ISER_RX_LOGIN_SIZE; | 1019 | sge.length = ISER_RX_LOGIN_SIZE; |
1013 | sge.lkey = iser_conn->device->mr->lkey; | 1020 | sge.lkey = ib_conn->device->mr->lkey; |
1014 | 1021 | ||
1015 | rx_wr.wr_id = (unsigned long)iser_conn->login_resp_buf; | 1022 | rx_wr.wr_id = (unsigned long)iser_conn->login_resp_buf; |
1016 | rx_wr.sg_list = &sge; | 1023 | rx_wr.sg_list = &sge; |
1017 | rx_wr.num_sge = 1; | 1024 | rx_wr.num_sge = 1; |
1018 | rx_wr.next = NULL; | 1025 | rx_wr.next = NULL; |
1019 | 1026 | ||
1020 | iser_conn->post_recv_buf_count++; | 1027 | ib_conn->post_recv_buf_count++; |
1021 | ib_ret = ib_post_recv(iser_conn->qp, &rx_wr, &rx_wr_failed); | 1028 | ib_ret = ib_post_recv(ib_conn->qp, &rx_wr, &rx_wr_failed); |
1022 | if (ib_ret) { | 1029 | if (ib_ret) { |
1023 | iser_err("ib_post_recv failed ret=%d\n", ib_ret); | 1030 | iser_err("ib_post_recv failed ret=%d\n", ib_ret); |
1024 | iser_conn->post_recv_buf_count--; | 1031 | ib_conn->post_recv_buf_count--; |
1025 | } | 1032 | } |
1026 | return ib_ret; | 1033 | return ib_ret; |
1027 | } | 1034 | } |
@@ -1030,10 +1037,11 @@ int iser_post_recvm(struct iser_conn *iser_conn, int count) | |||
1030 | { | 1037 | { |
1031 | struct ib_recv_wr *rx_wr, *rx_wr_failed; | 1038 | struct ib_recv_wr *rx_wr, *rx_wr_failed; |
1032 | int i, ib_ret; | 1039 | int i, ib_ret; |
1040 | struct ib_conn *ib_conn = &iser_conn->ib_conn; | ||
1033 | unsigned int my_rx_head = iser_conn->rx_desc_head; | 1041 | unsigned int my_rx_head = iser_conn->rx_desc_head; |
1034 | struct iser_rx_desc *rx_desc; | 1042 | struct iser_rx_desc *rx_desc; |
1035 | 1043 | ||
1036 | for (rx_wr = iser_conn->rx_wr, i = 0; i < count; i++, rx_wr++) { | 1044 | for (rx_wr = ib_conn->rx_wr, i = 0; i < count; i++, rx_wr++) { |
1037 | rx_desc = &iser_conn->rx_descs[my_rx_head]; | 1045 | rx_desc = &iser_conn->rx_descs[my_rx_head]; |
1038 | rx_wr->wr_id = (unsigned long)rx_desc; | 1046 | rx_wr->wr_id = (unsigned long)rx_desc; |
1039 | rx_wr->sg_list = &rx_desc->rx_sg; | 1047 | rx_wr->sg_list = &rx_desc->rx_sg; |
@@ -1045,11 +1053,11 @@ int iser_post_recvm(struct iser_conn *iser_conn, int count) | |||
1045 | rx_wr--; | 1053 | rx_wr--; |
1046 | rx_wr->next = NULL; /* mark end of work requests list */ | 1054 | rx_wr->next = NULL; /* mark end of work requests list */ |
1047 | 1055 | ||
1048 | iser_conn->post_recv_buf_count += count; | 1056 | ib_conn->post_recv_buf_count += count; |
1049 | ib_ret = ib_post_recv(iser_conn->qp, iser_conn->rx_wr, &rx_wr_failed); | 1057 | ib_ret = ib_post_recv(ib_conn->qp, ib_conn->rx_wr, &rx_wr_failed); |
1050 | if (ib_ret) { | 1058 | if (ib_ret) { |
1051 | iser_err("ib_post_recv failed ret=%d\n", ib_ret); | 1059 | iser_err("ib_post_recv failed ret=%d\n", ib_ret); |
1052 | iser_conn->post_recv_buf_count -= count; | 1060 | ib_conn->post_recv_buf_count -= count; |
1053 | } else | 1061 | } else |
1054 | iser_conn->rx_desc_head = my_rx_head; | 1062 | iser_conn->rx_desc_head = my_rx_head; |
1055 | return ib_ret; | 1063 | return ib_ret; |
@@ -1061,12 +1069,12 @@ int iser_post_recvm(struct iser_conn *iser_conn, int count) | |||
1061 | * | 1069 | * |
1062 | * returns 0 on success, -1 on failure | 1070 | * returns 0 on success, -1 on failure |
1063 | */ | 1071 | */ |
1064 | int iser_post_send(struct iser_conn *iser_conn, struct iser_tx_desc *tx_desc) | 1072 | int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc) |
1065 | { | 1073 | { |
1066 | int ib_ret; | 1074 | int ib_ret; |
1067 | struct ib_send_wr send_wr, *send_wr_failed; | 1075 | struct ib_send_wr send_wr, *send_wr_failed; |
1068 | 1076 | ||
1069 | ib_dma_sync_single_for_device(iser_conn->device->ib_device, | 1077 | ib_dma_sync_single_for_device(ib_conn->device->ib_device, |
1070 | tx_desc->dma_addr, ISER_HEADERS_LEN, | 1078 | tx_desc->dma_addr, ISER_HEADERS_LEN, |
1071 | DMA_TO_DEVICE); | 1079 | DMA_TO_DEVICE); |
1072 | 1080 | ||
@@ -1077,24 +1085,27 @@ int iser_post_send(struct iser_conn *iser_conn, struct iser_tx_desc *tx_desc) | |||
1077 | send_wr.opcode = IB_WR_SEND; | 1085 | send_wr.opcode = IB_WR_SEND; |
1078 | send_wr.send_flags = IB_SEND_SIGNALED; | 1086 | send_wr.send_flags = IB_SEND_SIGNALED; |
1079 | 1087 | ||
1080 | atomic_inc(&iser_conn->post_send_buf_count); | 1088 | atomic_inc(&ib_conn->post_send_buf_count); |
1081 | 1089 | ||
1082 | ib_ret = ib_post_send(iser_conn->qp, &send_wr, &send_wr_failed); | 1090 | ib_ret = ib_post_send(ib_conn->qp, &send_wr, &send_wr_failed); |
1083 | if (ib_ret) { | 1091 | if (ib_ret) { |
1084 | iser_err("ib_post_send failed, ret:%d\n", ib_ret); | 1092 | iser_err("ib_post_send failed, ret:%d\n", ib_ret); |
1085 | atomic_dec(&iser_conn->post_send_buf_count); | 1093 | atomic_dec(&ib_conn->post_send_buf_count); |
1086 | } | 1094 | } |
1087 | return ib_ret; | 1095 | return ib_ret; |
1088 | } | 1096 | } |
1089 | 1097 | ||
1090 | static void iser_handle_comp_error(struct iser_tx_desc *desc, | 1098 | static void iser_handle_comp_error(struct iser_tx_desc *desc, |
1091 | struct iser_conn *iser_conn) | 1099 | struct ib_conn *ib_conn) |
1092 | { | 1100 | { |
1101 | struct iser_conn *iser_conn = container_of(ib_conn, struct iser_conn, | ||
1102 | ib_conn); | ||
1103 | |||
1093 | if (desc && desc->type == ISCSI_TX_DATAOUT) | 1104 | if (desc && desc->type == ISCSI_TX_DATAOUT) |
1094 | kmem_cache_free(ig.desc_cache, desc); | 1105 | kmem_cache_free(ig.desc_cache, desc); |
1095 | 1106 | ||
1096 | if (iser_conn->post_recv_buf_count == 0 && | 1107 | if (ib_conn->post_recv_buf_count == 0 && |
1097 | atomic_read(&iser_conn->post_send_buf_count) == 0) { | 1108 | atomic_read(&ib_conn->post_send_buf_count) == 0) { |
1098 | /** | 1109 | /** |
1099 | * getting here when the state is UP means that the conn is | 1110 | * getting here when the state is UP means that the conn is |
1100 | * being terminated asynchronously from the iSCSI layer's | 1111 | * being terminated asynchronously from the iSCSI layer's |
@@ -1116,15 +1127,15 @@ static int iser_drain_tx_cq(struct iser_device *device, int cq_index) | |||
1116 | struct ib_cq *cq = device->tx_cq[cq_index]; | 1127 | struct ib_cq *cq = device->tx_cq[cq_index]; |
1117 | struct ib_wc wc; | 1128 | struct ib_wc wc; |
1118 | struct iser_tx_desc *tx_desc; | 1129 | struct iser_tx_desc *tx_desc; |
1119 | struct iser_conn *iser_conn; | 1130 | struct ib_conn *ib_conn; |
1120 | int completed_tx = 0; | 1131 | int completed_tx = 0; |
1121 | 1132 | ||
1122 | while (ib_poll_cq(cq, 1, &wc) == 1) { | 1133 | while (ib_poll_cq(cq, 1, &wc) == 1) { |
1123 | tx_desc = (struct iser_tx_desc *) (unsigned long) wc.wr_id; | 1134 | tx_desc = (struct iser_tx_desc *) (unsigned long) wc.wr_id; |
1124 | iser_conn = wc.qp->qp_context; | 1135 | ib_conn = wc.qp->qp_context; |
1125 | if (wc.status == IB_WC_SUCCESS) { | 1136 | if (wc.status == IB_WC_SUCCESS) { |
1126 | if (wc.opcode == IB_WC_SEND) | 1137 | if (wc.opcode == IB_WC_SEND) |
1127 | iser_snd_completion(tx_desc, iser_conn); | 1138 | iser_snd_completion(tx_desc, ib_conn); |
1128 | else | 1139 | else |
1129 | iser_err("expected opcode %d got %d\n", | 1140 | iser_err("expected opcode %d got %d\n", |
1130 | IB_WC_SEND, wc.opcode); | 1141 | IB_WC_SEND, wc.opcode); |
@@ -1132,8 +1143,8 @@ static int iser_drain_tx_cq(struct iser_device *device, int cq_index) | |||
1132 | iser_err("tx id %llx status %d vend_err %x\n", | 1143 | iser_err("tx id %llx status %d vend_err %x\n", |
1133 | wc.wr_id, wc.status, wc.vendor_err); | 1144 | wc.wr_id, wc.status, wc.vendor_err); |
1134 | if (wc.wr_id != ISER_FASTREG_LI_WRID) { | 1145 | if (wc.wr_id != ISER_FASTREG_LI_WRID) { |
1135 | atomic_dec(&iser_conn->post_send_buf_count); | 1146 | atomic_dec(&ib_conn->post_send_buf_count); |
1136 | iser_handle_comp_error(tx_desc, iser_conn); | 1147 | iser_handle_comp_error(tx_desc, ib_conn); |
1137 | } | 1148 | } |
1138 | } | 1149 | } |
1139 | completed_tx++; | 1150 | completed_tx++; |
@@ -1151,7 +1162,7 @@ static void iser_cq_tasklet_fn(unsigned long data) | |||
1151 | struct ib_wc wc; | 1162 | struct ib_wc wc; |
1152 | struct iser_rx_desc *desc; | 1163 | struct iser_rx_desc *desc; |
1153 | unsigned long xfer_len; | 1164 | unsigned long xfer_len; |
1154 | struct iser_conn *iser_conn; | 1165 | struct ib_conn *ib_conn; |
1155 | int completed_tx, completed_rx = 0; | 1166 | int completed_tx, completed_rx = 0; |
1156 | 1167 | ||
1157 | /* First do tx drain, so in a case where we have rx flushes and a successful | 1168 | /* First do tx drain, so in a case where we have rx flushes and a successful |
@@ -1162,11 +1173,11 @@ static void iser_cq_tasklet_fn(unsigned long data) | |||
1162 | while (ib_poll_cq(cq, 1, &wc) == 1) { | 1173 | while (ib_poll_cq(cq, 1, &wc) == 1) { |
1163 | desc = (struct iser_rx_desc *) (unsigned long) wc.wr_id; | 1174 | desc = (struct iser_rx_desc *) (unsigned long) wc.wr_id; |
1164 | BUG_ON(desc == NULL); | 1175 | BUG_ON(desc == NULL); |
1165 | iser_conn = wc.qp->qp_context; | 1176 | ib_conn = wc.qp->qp_context; |
1166 | if (wc.status == IB_WC_SUCCESS) { | 1177 | if (wc.status == IB_WC_SUCCESS) { |
1167 | if (wc.opcode == IB_WC_RECV) { | 1178 | if (wc.opcode == IB_WC_RECV) { |
1168 | xfer_len = (unsigned long)wc.byte_len; | 1179 | xfer_len = (unsigned long)wc.byte_len; |
1169 | iser_rcv_completion(desc, xfer_len, iser_conn); | 1180 | iser_rcv_completion(desc, xfer_len, ib_conn); |
1170 | } else | 1181 | } else |
1171 | iser_err("expected opcode %d got %d\n", | 1182 | iser_err("expected opcode %d got %d\n", |
1172 | IB_WC_RECV, wc.opcode); | 1183 | IB_WC_RECV, wc.opcode); |
@@ -1174,8 +1185,8 @@ static void iser_cq_tasklet_fn(unsigned long data) | |||
1174 | if (wc.status != IB_WC_WR_FLUSH_ERR) | 1185 | if (wc.status != IB_WC_WR_FLUSH_ERR) |
1175 | iser_err("rx id %llx status %d vend_err %x\n", | 1186 | iser_err("rx id %llx status %d vend_err %x\n", |
1176 | wc.wr_id, wc.status, wc.vendor_err); | 1187 | wc.wr_id, wc.status, wc.vendor_err); |
1177 | iser_conn->post_recv_buf_count--; | 1188 | ib_conn->post_recv_buf_count--; |
1178 | iser_handle_comp_error(NULL, iser_conn); | 1189 | iser_handle_comp_error(NULL, ib_conn); |
1179 | } | 1190 | } |
1180 | completed_rx++; | 1191 | completed_rx++; |
1181 | if (!(completed_rx & 63)) | 1192 | if (!(completed_rx & 63)) |