aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSagi Grimberg <sagig@mellanox.com>2014-10-01 07:01:57 -0400
committerRoland Dreier <roland@purestorage.com>2014-10-09 03:06:06 -0400
commit5716af6e5234402b2017f41beb36c086201fae42 (patch)
tree88b8dc5828e28e371a680b7a8681c30f91b399b1
parentfe82dcec644244676d55a1384c958d5f67979adb (diff)
IB/iser: Rename ib_conn -> iser_conn
Two reasons why we choose to do this: 1. No point today calling struct iser_conn by another name ib_conn 2. In the next patches we will restructure iser control plane representation - struct iser_conn: connection logical representation - struct ib_conn: connection RDMA layout representation This patch does not change any functionality. Signed-off-by: Ariel Nahum <arieln@mellanox.com> Signed-off-by: Sagi Grimberg <sagig@mellanox.com> Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c125
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h44
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c197
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c54
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c375
5 files changed, 403 insertions, 392 deletions
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 93ce62fe1594..1f3ad2b13ae2 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -147,8 +147,8 @@ static int iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
147int iser_initialize_task_headers(struct iscsi_task *task, 147int iser_initialize_task_headers(struct iscsi_task *task,
148 struct iser_tx_desc *tx_desc) 148 struct iser_tx_desc *tx_desc)
149{ 149{
150 struct iser_conn *ib_conn = task->conn->dd_data; 150 struct iser_conn *iser_conn = task->conn->dd_data;
151 struct iser_device *device = ib_conn->device; 151 struct iser_device *device = iser_conn->device;
152 struct iscsi_iser_task *iser_task = task->dd_data; 152 struct iscsi_iser_task *iser_task = task->dd_data;
153 u64 dma_addr; 153 u64 dma_addr;
154 154
@@ -162,7 +162,7 @@ int iser_initialize_task_headers(struct iscsi_task *task,
162 tx_desc->tx_sg[0].length = ISER_HEADERS_LEN; 162 tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
163 tx_desc->tx_sg[0].lkey = device->mr->lkey; 163 tx_desc->tx_sg[0].lkey = device->mr->lkey;
164 164
165 iser_task->ib_conn = ib_conn; 165 iser_task->iser_conn = iser_conn;
166 return 0; 166 return 0;
167} 167}
168/** 168/**
@@ -290,8 +290,8 @@ static void iscsi_iser_cleanup_task(struct iscsi_task *task)
290{ 290{
291 struct iscsi_iser_task *iser_task = task->dd_data; 291 struct iscsi_iser_task *iser_task = task->dd_data;
292 struct iser_tx_desc *tx_desc = &iser_task->desc; 292 struct iser_tx_desc *tx_desc = &iser_task->desc;
293 struct iser_conn *ib_conn = task->conn->dd_data; 293 struct iser_conn *iser_conn = task->conn->dd_data;
294 struct iser_device *device = ib_conn->device; 294 struct iser_device *device = iser_conn->device;
295 295
296 ib_dma_unmap_single(device->ib_device, 296 ib_dma_unmap_single(device->ib_device,
297 tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE); 297 tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);
@@ -344,7 +344,7 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
344 int is_leading) 344 int is_leading)
345{ 345{
346 struct iscsi_conn *conn = cls_conn->dd_data; 346 struct iscsi_conn *conn = cls_conn->dd_data;
347 struct iser_conn *ib_conn; 347 struct iser_conn *iser_conn;
348 struct iscsi_endpoint *ep; 348 struct iscsi_endpoint *ep;
349 int error; 349 int error;
350 350
@@ -360,30 +360,30 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
360 (unsigned long long)transport_eph); 360 (unsigned long long)transport_eph);
361 return -EINVAL; 361 return -EINVAL;
362 } 362 }
363 ib_conn = ep->dd_data; 363 iser_conn = ep->dd_data;
364 364
365 mutex_lock(&ib_conn->state_mutex); 365 mutex_lock(&iser_conn->state_mutex);
366 if (ib_conn->state != ISER_CONN_UP) { 366 if (iser_conn->state != ISER_CONN_UP) {
367 error = -EINVAL; 367 error = -EINVAL;
368 iser_err("iser_conn %p state is %d, teardown started\n", 368 iser_err("iser_conn %p state is %d, teardown started\n",
369 ib_conn, ib_conn->state); 369 iser_conn, iser_conn->state);
370 goto out; 370 goto out;
371 } 371 }
372 372
373 error = iser_alloc_rx_descriptors(ib_conn, conn->session); 373 error = iser_alloc_rx_descriptors(iser_conn, conn->session);
374 if (error) 374 if (error)
375 goto out; 375 goto out;
376 376
377 /* binds the iSER connection retrieved from the previously 377 /* binds the iSER connection retrieved from the previously
378 * connected ep_handle to the iSCSI layer connection. exchanges 378 * connected ep_handle to the iSCSI layer connection. exchanges
379 * connection pointers */ 379 * connection pointers */
380 iser_info("binding iscsi conn %p to ib_conn %p\n", conn, ib_conn); 380 iser_info("binding iscsi conn %p to iser_conn %p\n", conn, iser_conn);
381 381
382 conn->dd_data = ib_conn; 382 conn->dd_data = iser_conn;
383 ib_conn->iscsi_conn = conn; 383 iser_conn->iscsi_conn = conn;
384 384
385out: 385out:
386 mutex_unlock(&ib_conn->state_mutex); 386 mutex_unlock(&iser_conn->state_mutex);
387 return error; 387 return error;
388} 388}
389 389
@@ -391,11 +391,11 @@ static int
391iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn) 391iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn)
392{ 392{
393 struct iscsi_conn *iscsi_conn; 393 struct iscsi_conn *iscsi_conn;
394 struct iser_conn *ib_conn; 394 struct iser_conn *iser_conn;
395 395
396 iscsi_conn = cls_conn->dd_data; 396 iscsi_conn = cls_conn->dd_data;
397 ib_conn = iscsi_conn->dd_data; 397 iser_conn = iscsi_conn->dd_data;
398 reinit_completion(&ib_conn->stop_completion); 398 reinit_completion(&iser_conn->stop_completion);
399 399
400 return iscsi_conn_start(cls_conn); 400 return iscsi_conn_start(cls_conn);
401} 401}
@@ -404,18 +404,18 @@ static void
404iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) 404iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
405{ 405{
406 struct iscsi_conn *conn = cls_conn->dd_data; 406 struct iscsi_conn *conn = cls_conn->dd_data;
407 struct iser_conn *ib_conn = conn->dd_data; 407 struct iser_conn *iser_conn = conn->dd_data;
408 408
409 iser_dbg("stopping iscsi_conn: %p, ib_conn: %p\n", conn, ib_conn); 409 iser_dbg("stopping iscsi_conn: %p, iser_conn: %p\n", conn, iser_conn);
410 iscsi_conn_stop(cls_conn, flag); 410 iscsi_conn_stop(cls_conn, flag);
411 411
412 /* 412 /*
413 * Userspace may have goofed up and not bound the connection or 413 * Userspace may have goofed up and not bound the connection or
414 * might have only partially setup the connection. 414 * might have only partially setup the connection.
415 */ 415 */
416 if (ib_conn) { 416 if (iser_conn) {
417 conn->dd_data = NULL; 417 conn->dd_data = NULL;
418 complete(&ib_conn->stop_completion); 418 complete(&iser_conn->stop_completion);
419 } 419 }
420} 420}
421 421
@@ -447,7 +447,7 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
447 struct iscsi_cls_session *cls_session; 447 struct iscsi_cls_session *cls_session;
448 struct iscsi_session *session; 448 struct iscsi_session *session;
449 struct Scsi_Host *shost; 449 struct Scsi_Host *shost;
450 struct iser_conn *ib_conn = NULL; 450 struct iser_conn *iser_conn = NULL;
451 451
452 shost = iscsi_host_alloc(&iscsi_iser_sht, 0, 0); 452 shost = iscsi_host_alloc(&iscsi_iser_sht, 0, 0);
453 if (!shost) 453 if (!shost)
@@ -464,9 +464,9 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
464 * the leading conn's ep so this will be NULL; 464 * the leading conn's ep so this will be NULL;
465 */ 465 */
466 if (ep) { 466 if (ep) {
467 ib_conn = ep->dd_data; 467 iser_conn = ep->dd_data;
468 if (ib_conn->pi_support) { 468 if (iser_conn->pi_support) {
469 u32 sig_caps = ib_conn->device->dev_attr.sig_prot_cap; 469 u32 sig_caps = iser_conn->device->dev_attr.sig_prot_cap;
470 470
471 scsi_host_set_prot(shost, iser_dif_prot_caps(sig_caps)); 471 scsi_host_set_prot(shost, iser_dif_prot_caps(sig_caps));
472 if (iser_pi_guard) 472 if (iser_pi_guard)
@@ -476,8 +476,8 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
476 } 476 }
477 } 477 }
478 478
479 if (iscsi_host_add(shost, 479 if (iscsi_host_add(shost, ep ?
480 ep ? ib_conn->device->ib_device->dma_device : NULL)) 480 iser_conn->device->ib_device->dma_device : NULL))
481 goto free_host; 481 goto free_host;
482 482
483 if (cmds_max > ISER_DEF_XMIT_CMDS_MAX) { 483 if (cmds_max > ISER_DEF_XMIT_CMDS_MAX) {
@@ -577,17 +577,17 @@ iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *s
577static int iscsi_iser_get_ep_param(struct iscsi_endpoint *ep, 577static int iscsi_iser_get_ep_param(struct iscsi_endpoint *ep,
578 enum iscsi_param param, char *buf) 578 enum iscsi_param param, char *buf)
579{ 579{
580 struct iser_conn *ib_conn = ep->dd_data; 580 struct iser_conn *iser_conn = ep->dd_data;
581 int len; 581 int len;
582 582
583 switch (param) { 583 switch (param) {
584 case ISCSI_PARAM_CONN_PORT: 584 case ISCSI_PARAM_CONN_PORT:
585 case ISCSI_PARAM_CONN_ADDRESS: 585 case ISCSI_PARAM_CONN_ADDRESS:
586 if (!ib_conn || !ib_conn->cma_id) 586 if (!iser_conn || !iser_conn->cma_id)
587 return -ENOTCONN; 587 return -ENOTCONN;
588 588
589 return iscsi_conn_get_addr_param((struct sockaddr_storage *) 589 return iscsi_conn_get_addr_param((struct sockaddr_storage *)
590 &ib_conn->cma_id->route.addr.dst_addr, 590 &iser_conn->cma_id->route.addr.dst_addr,
591 param, buf); 591 param, buf);
592 break; 592 break;
593 default: 593 default:
@@ -602,24 +602,24 @@ iscsi_iser_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
602 int non_blocking) 602 int non_blocking)
603{ 603{
604 int err; 604 int err;
605 struct iser_conn *ib_conn; 605 struct iser_conn *iser_conn;
606 struct iscsi_endpoint *ep; 606 struct iscsi_endpoint *ep;
607 607
608 ep = iscsi_create_endpoint(0); 608 ep = iscsi_create_endpoint(0);
609 if (!ep) 609 if (!ep)
610 return ERR_PTR(-ENOMEM); 610 return ERR_PTR(-ENOMEM);
611 611
612 ib_conn = kzalloc(sizeof(*ib_conn), GFP_KERNEL); 612 iser_conn = kzalloc(sizeof(*iser_conn), GFP_KERNEL);
613 if (!ib_conn) { 613 if (!iser_conn) {
614 err = -ENOMEM; 614 err = -ENOMEM;
615 goto failure; 615 goto failure;
616 } 616 }
617 617
618 ep->dd_data = ib_conn; 618 ep->dd_data = iser_conn;
619 ib_conn->ep = ep; 619 iser_conn->ep = ep;
620 iser_conn_init(ib_conn); 620 iser_conn_init(iser_conn);
621 621
622 err = iser_connect(ib_conn, NULL, dst_addr, non_blocking); 622 err = iser_connect(iser_conn, NULL, dst_addr, non_blocking);
623 if (err) 623 if (err)
624 goto failure; 624 goto failure;
625 625
@@ -632,22 +632,22 @@ failure:
632static int 632static int
633iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) 633iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
634{ 634{
635 struct iser_conn *ib_conn; 635 struct iser_conn *iser_conn;
636 int rc; 636 int rc;
637 637
638 ib_conn = ep->dd_data; 638 iser_conn = ep->dd_data;
639 rc = wait_for_completion_interruptible_timeout(&ib_conn->up_completion, 639 rc = wait_for_completion_interruptible_timeout(&iser_conn->up_completion,
640 msecs_to_jiffies(timeout_ms)); 640 msecs_to_jiffies(timeout_ms));
641 /* if conn establishment failed, return error code to iscsi */ 641 /* if conn establishment failed, return error code to iscsi */
642 if (rc == 0) { 642 if (rc == 0) {
643 mutex_lock(&ib_conn->state_mutex); 643 mutex_lock(&iser_conn->state_mutex);
644 if (ib_conn->state == ISER_CONN_TERMINATING || 644 if (iser_conn->state == ISER_CONN_TERMINATING ||
645 ib_conn->state == ISER_CONN_DOWN) 645 iser_conn->state == ISER_CONN_DOWN)
646 rc = -1; 646 rc = -1;
647 mutex_unlock(&ib_conn->state_mutex); 647 mutex_unlock(&iser_conn->state_mutex);
648 } 648 }
649 649
650 iser_info("ib conn %p rc = %d\n", ib_conn, rc); 650 iser_info("ib conn %p rc = %d\n", iser_conn, rc);
651 651
652 if (rc > 0) 652 if (rc > 0)
653 return 1; /* success, this is the equivalent of POLLOUT */ 653 return 1; /* success, this is the equivalent of POLLOUT */
@@ -660,12 +660,14 @@ iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
660static void 660static void
661iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep) 661iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep)
662{ 662{
663 struct iser_conn *ib_conn; 663 struct iser_conn *iser_conn;
664 664
665 ib_conn = ep->dd_data; 665 iser_conn = ep->dd_data;
666 iser_info("ep %p ib conn %p state %d\n", ep, ib_conn, ib_conn->state); 666 iser_info("ep %p iser conn %p state %d\n",
667 mutex_lock(&ib_conn->state_mutex); 667 ep, iser_conn, iser_conn->state);
668 iser_conn_terminate(ib_conn); 668
669 mutex_lock(&iser_conn->state_mutex);
670 iser_conn_terminate(iser_conn);
669 671
670 /* 672 /*
671 * if iser_conn and iscsi_conn are bound, we must wait for 673 * if iser_conn and iscsi_conn are bound, we must wait for
@@ -673,14 +675,14 @@ iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep)
673 * the iser resources. Otherwise we are safe to free resources 675 * the iser resources. Otherwise we are safe to free resources
674 * immediately. 676 * immediately.
675 */ 677 */
676 if (ib_conn->iscsi_conn) { 678 if (iser_conn->iscsi_conn) {
677 INIT_WORK(&ib_conn->release_work, iser_release_work); 679 INIT_WORK(&iser_conn->release_work, iser_release_work);
678 queue_work(release_wq, &ib_conn->release_work); 680 queue_work(release_wq, &iser_conn->release_work);
679 mutex_unlock(&ib_conn->state_mutex); 681 mutex_unlock(&iser_conn->state_mutex);
680 } else { 682 } else {
681 ib_conn->state = ISER_CONN_DOWN; 683 iser_conn->state = ISER_CONN_DOWN;
682 mutex_unlock(&ib_conn->state_mutex); 684 mutex_unlock(&iser_conn->state_mutex);
683 iser_conn_release(ib_conn); 685 iser_conn_release(iser_conn);
684 } 686 }
685 iscsi_destroy_endpoint(ep); 687 iscsi_destroy_endpoint(ep);
686} 688}
@@ -843,7 +845,7 @@ register_transport_failure:
843 845
844static void __exit iser_exit(void) 846static void __exit iser_exit(void)
845{ 847{
846 struct iser_conn *ib_conn, *n; 848 struct iser_conn *iser_conn, *n;
847 int connlist_empty; 849 int connlist_empty;
848 850
849 iser_dbg("Removing iSER datamover...\n"); 851 iser_dbg("Removing iSER datamover...\n");
@@ -856,8 +858,9 @@ static void __exit iser_exit(void)
856 if (!connlist_empty) { 858 if (!connlist_empty) {
857 iser_err("Error cleanup stage completed but we still have iser " 859 iser_err("Error cleanup stage completed but we still have iser "
858 "connections, destroying them anyway.\n"); 860 "connections, destroying them anyway.\n");
859 list_for_each_entry_safe(ib_conn, n, &ig.connlist, conn_list) { 861 list_for_each_entry_safe(iser_conn, n, &ig.connlist,
860 iser_conn_release(ib_conn); 862 conn_list) {
863 iser_conn_release(iser_conn);
861 } 864 }
862 } 865 }
863 866
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 9f0e0e34d6ca..ec34b8f7d385 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -179,7 +179,7 @@ struct iser_cm_hdr {
179/* Length of an object name string */ 179/* Length of an object name string */
180#define ISER_OBJECT_NAME_SIZE 64 180#define ISER_OBJECT_NAME_SIZE 64
181 181
182enum iser_ib_conn_state { 182enum iser_conn_state {
183 ISER_CONN_INIT, /* descriptor allocd, no conn */ 183 ISER_CONN_INIT, /* descriptor allocd, no conn */
184 ISER_CONN_PENDING, /* in the process of being established */ 184 ISER_CONN_PENDING, /* in the process of being established */
185 ISER_CONN_UP, /* up and running */ 185 ISER_CONN_UP, /* up and running */
@@ -281,9 +281,9 @@ struct iser_device {
281 int cq_active_qps[ISER_MAX_CQ]; 281 int cq_active_qps[ISER_MAX_CQ];
282 int cqs_used; 282 int cqs_used;
283 struct iser_cq_desc *cq_desc; 283 struct iser_cq_desc *cq_desc;
284 int (*iser_alloc_rdma_reg_res)(struct iser_conn *ib_conn, 284 int (*iser_alloc_rdma_reg_res)(struct iser_conn *iser_conn,
285 unsigned cmds_max); 285 unsigned cmds_max);
286 void (*iser_free_rdma_reg_res)(struct iser_conn *ib_conn); 286 void (*iser_free_rdma_reg_res)(struct iser_conn *iser_conn);
287 int (*iser_reg_rdma_mem)(struct iscsi_iser_task *iser_task, 287 int (*iser_reg_rdma_mem)(struct iscsi_iser_task *iser_task,
288 enum iser_data_dir cmd_dir); 288 enum iser_data_dir cmd_dir);
289 void (*iser_unreg_rdma_mem)(struct iscsi_iser_task *iser_task, 289 void (*iser_unreg_rdma_mem)(struct iscsi_iser_task *iser_task,
@@ -320,7 +320,7 @@ struct fast_reg_descriptor {
320struct iser_conn { 320struct iser_conn {
321 struct iscsi_conn *iscsi_conn; 321 struct iscsi_conn *iscsi_conn;
322 struct iscsi_endpoint *ep; 322 struct iscsi_endpoint *ep;
323 enum iser_ib_conn_state state; /* rdma connection state */ 323 enum iser_conn_state state; /* rdma connection state */
324 atomic_t refcount; 324 atomic_t refcount;
325 spinlock_t lock; /* used for state changes */ 325 spinlock_t lock; /* used for state changes */
326 struct iser_device *device; /* device context */ 326 struct iser_device *device; /* device context */
@@ -363,7 +363,7 @@ struct iser_conn {
363 363
364struct iscsi_iser_task { 364struct iscsi_iser_task {
365 struct iser_tx_desc desc; 365 struct iser_tx_desc desc;
366 struct iser_conn *ib_conn; 366 struct iser_conn *iser_conn;
367 enum iser_task_status status; 367 enum iser_task_status status;
368 struct scsi_cmnd *sc; 368 struct scsi_cmnd *sc;
369 int command_sent; /* set if command sent */ 369 int command_sent; /* set if command sent */
@@ -419,25 +419,26 @@ void iscsi_iser_recv(struct iscsi_conn *conn,
419 char *rx_data, 419 char *rx_data,
420 int rx_data_len); 420 int rx_data_len);
421 421
422void iser_conn_init(struct iser_conn *ib_conn); 422void iser_conn_init(struct iser_conn *iser_conn);
423 423
424void iser_conn_release(struct iser_conn *ib_conn); 424void iser_conn_release(struct iser_conn *iser_conn);
425 425
426void iser_conn_terminate(struct iser_conn *ib_conn); 426void iser_conn_terminate(struct iser_conn *iser_conn);
427 427
428void iser_release_work(struct work_struct *work); 428void iser_release_work(struct work_struct *work);
429 429
430void iser_rcv_completion(struct iser_rx_desc *desc, 430void iser_rcv_completion(struct iser_rx_desc *desc,
431 unsigned long dto_xfer_len, 431 unsigned long dto_xfer_len,
432 struct iser_conn *ib_conn); 432 struct iser_conn *iser_conn);
433 433
434void iser_snd_completion(struct iser_tx_desc *desc, struct iser_conn *ib_conn); 434void iser_snd_completion(struct iser_tx_desc *desc,
435 struct iser_conn *iser_conn);
435 436
436void iser_task_rdma_init(struct iscsi_iser_task *task); 437void iser_task_rdma_init(struct iscsi_iser_task *task);
437 438
438void iser_task_rdma_finalize(struct iscsi_iser_task *task); 439void iser_task_rdma_finalize(struct iscsi_iser_task *task);
439 440
440void iser_free_rx_descriptors(struct iser_conn *ib_conn); 441void iser_free_rx_descriptors(struct iser_conn *iser_conn);
441 442
442void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, 443void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
443 struct iser_data_buf *mem, 444 struct iser_data_buf *mem,
@@ -449,12 +450,12 @@ int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *task,
449int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *task, 450int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *task,
450 enum iser_data_dir cmd_dir); 451 enum iser_data_dir cmd_dir);
451 452
452int iser_connect(struct iser_conn *ib_conn, 453int iser_connect(struct iser_conn *iser_conn,
453 struct sockaddr *src_addr, 454 struct sockaddr *src_addr,
454 struct sockaddr *dst_addr, 455 struct sockaddr *dst_addr,
455 int non_blocking); 456 int non_blocking);
456 457
457int iser_reg_page_vec(struct iser_conn *ib_conn, 458int iser_reg_page_vec(struct iser_conn *iser_conn,
458 struct iser_page_vec *page_vec, 459 struct iser_page_vec *page_vec,
459 struct iser_mem_reg *mem_reg); 460 struct iser_mem_reg *mem_reg);
460 461
@@ -463,9 +464,9 @@ void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
463void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task, 464void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
464 enum iser_data_dir cmd_dir); 465 enum iser_data_dir cmd_dir);
465 466
466int iser_post_recvl(struct iser_conn *ib_conn); 467int iser_post_recvl(struct iser_conn *iser_conn);
467int iser_post_recvm(struct iser_conn *ib_conn, int count); 468int iser_post_recvm(struct iser_conn *iser_conn, int count);
468int iser_post_send(struct iser_conn *ib_conn, struct iser_tx_desc *tx_desc); 469int iser_post_send(struct iser_conn *iser_conn, struct iser_tx_desc *tx_desc);
469 470
470int iser_dma_map_task_data(struct iscsi_iser_task *iser_task, 471int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
471 struct iser_data_buf *data, 472 struct iser_data_buf *data,
@@ -476,11 +477,12 @@ void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
476 struct iser_data_buf *data); 477 struct iser_data_buf *data);
477int iser_initialize_task_headers(struct iscsi_task *task, 478int iser_initialize_task_headers(struct iscsi_task *task,
478 struct iser_tx_desc *tx_desc); 479 struct iser_tx_desc *tx_desc);
479int iser_alloc_rx_descriptors(struct iser_conn *ib_conn, struct iscsi_session *session); 480int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
480int iser_create_fmr_pool(struct iser_conn *ib_conn, unsigned cmds_max); 481 struct iscsi_session *session);
481void iser_free_fmr_pool(struct iser_conn *ib_conn); 482int iser_create_fmr_pool(struct iser_conn *iser_conn, unsigned cmds_max);
482int iser_create_fastreg_pool(struct iser_conn *ib_conn, unsigned cmds_max); 483void iser_free_fmr_pool(struct iser_conn *iser_conn);
483void iser_free_fastreg_pool(struct iser_conn *ib_conn); 484int iser_create_fastreg_pool(struct iser_conn *iser_conn, unsigned cmds_max);
485void iser_free_fastreg_pool(struct iser_conn *iser_conn);
484u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task, 486u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
485 enum iser_data_dir cmd_dir, sector_t *sector); 487 enum iser_data_dir cmd_dir, sector_t *sector);
486#endif 488#endif
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 8d44a4060634..1f53ccb31534 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -49,7 +49,7 @@ static int iser_prepare_read_cmd(struct iscsi_task *task)
49 49
50{ 50{
51 struct iscsi_iser_task *iser_task = task->dd_data; 51 struct iscsi_iser_task *iser_task = task->dd_data;
52 struct iser_device *device = iser_task->ib_conn->device; 52 struct iser_device *device = iser_task->iser_conn->device;
53 struct iser_regd_buf *regd_buf; 53 struct iser_regd_buf *regd_buf;
54 int err; 54 int err;
55 struct iser_hdr *hdr = &iser_task->desc.iser_header; 55 struct iser_hdr *hdr = &iser_task->desc.iser_header;
@@ -103,7 +103,7 @@ iser_prepare_write_cmd(struct iscsi_task *task,
103 unsigned int edtl) 103 unsigned int edtl)
104{ 104{
105 struct iscsi_iser_task *iser_task = task->dd_data; 105 struct iscsi_iser_task *iser_task = task->dd_data;
106 struct iser_device *device = iser_task->ib_conn->device; 106 struct iser_device *device = iser_task->iser_conn->device;
107 struct iser_regd_buf *regd_buf; 107 struct iser_regd_buf *regd_buf;
108 int err; 108 int err;
109 struct iser_hdr *hdr = &iser_task->desc.iser_header; 109 struct iser_hdr *hdr = &iser_task->desc.iser_header;
@@ -160,10 +160,10 @@ iser_prepare_write_cmd(struct iscsi_task *task,
160} 160}
161 161
162/* creates a new tx descriptor and adds header regd buffer */ 162/* creates a new tx descriptor and adds header regd buffer */
163static void iser_create_send_desc(struct iser_conn *ib_conn, 163static void iser_create_send_desc(struct iser_conn *iser_conn,
164 struct iser_tx_desc *tx_desc) 164 struct iser_tx_desc *tx_desc)
165{ 165{
166 struct iser_device *device = ib_conn->device; 166 struct iser_device *device = iser_conn->device;
167 167
168 ib_dma_sync_single_for_cpu(device->ib_device, 168 ib_dma_sync_single_for_cpu(device->ib_device,
169 tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE); 169 tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);
@@ -179,103 +179,106 @@ static void iser_create_send_desc(struct iser_conn *ib_conn,
179 } 179 }
180} 180}
181 181
182static void iser_free_login_buf(struct iser_conn *ib_conn) 182static void iser_free_login_buf(struct iser_conn *iser_conn)
183{ 183{
184 if (!ib_conn->login_buf) 184 if (!iser_conn->login_buf)
185 return; 185 return;
186 186
187 if (ib_conn->login_req_dma) 187 if (iser_conn->login_req_dma)
188 ib_dma_unmap_single(ib_conn->device->ib_device, 188 ib_dma_unmap_single(iser_conn->device->ib_device,
189 ib_conn->login_req_dma, 189 iser_conn->login_req_dma,
190 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE); 190 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE);
191 191
192 if (ib_conn->login_resp_dma) 192 if (iser_conn->login_resp_dma)
193 ib_dma_unmap_single(ib_conn->device->ib_device, 193 ib_dma_unmap_single(iser_conn->device->ib_device,
194 ib_conn->login_resp_dma, 194 iser_conn->login_resp_dma,
195 ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE); 195 ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
196 196
197 kfree(ib_conn->login_buf); 197 kfree(iser_conn->login_buf);
198 198
199 /* make sure we never redo any unmapping */ 199 /* make sure we never redo any unmapping */
200 ib_conn->login_req_dma = 0; 200 iser_conn->login_req_dma = 0;
201 ib_conn->login_resp_dma = 0; 201 iser_conn->login_resp_dma = 0;
202 ib_conn->login_buf = NULL; 202 iser_conn->login_buf = NULL;
203} 203}
204 204
205static int iser_alloc_login_buf(struct iser_conn *ib_conn) 205static int iser_alloc_login_buf(struct iser_conn *iser_conn)
206{ 206{
207 struct iser_device *device; 207 struct iser_device *device;
208 int req_err, resp_err; 208 int req_err, resp_err;
209 209
210 BUG_ON(ib_conn->device == NULL); 210 BUG_ON(iser_conn->device == NULL);
211 211
212 device = ib_conn->device; 212 device = iser_conn->device;
213 213
214 ib_conn->login_buf = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN + 214 iser_conn->login_buf = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
215 ISER_RX_LOGIN_SIZE, GFP_KERNEL); 215 ISER_RX_LOGIN_SIZE, GFP_KERNEL);
216 if (!ib_conn->login_buf) 216 if (!iser_conn->login_buf)
217 goto out_err; 217 goto out_err;
218 218
219 ib_conn->login_req_buf = ib_conn->login_buf; 219 iser_conn->login_req_buf = iser_conn->login_buf;
220 ib_conn->login_resp_buf = ib_conn->login_buf + 220 iser_conn->login_resp_buf = iser_conn->login_buf +
221 ISCSI_DEF_MAX_RECV_SEG_LEN; 221 ISCSI_DEF_MAX_RECV_SEG_LEN;
222 222
223 ib_conn->login_req_dma = ib_dma_map_single(ib_conn->device->ib_device, 223 iser_conn->login_req_dma = ib_dma_map_single(device->ib_device,
224 (void *)ib_conn->login_req_buf, 224 iser_conn->login_req_buf,
225 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE); 225 ISCSI_DEF_MAX_RECV_SEG_LEN,
226 DMA_TO_DEVICE);
226 227
227 ib_conn->login_resp_dma = ib_dma_map_single(ib_conn->device->ib_device, 228 iser_conn->login_resp_dma = ib_dma_map_single(device->ib_device,
228 (void *)ib_conn->login_resp_buf, 229 iser_conn->login_resp_buf,
229 ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE); 230 ISER_RX_LOGIN_SIZE,
231 DMA_FROM_DEVICE);
230 232
231 req_err = ib_dma_mapping_error(device->ib_device, 233 req_err = ib_dma_mapping_error(device->ib_device,
232 ib_conn->login_req_dma); 234 iser_conn->login_req_dma);
233 resp_err = ib_dma_mapping_error(device->ib_device, 235 resp_err = ib_dma_mapping_error(device->ib_device,
234 ib_conn->login_resp_dma); 236 iser_conn->login_resp_dma);
235 237
236 if (req_err || resp_err) { 238 if (req_err || resp_err) {
237 if (req_err) 239 if (req_err)
238 ib_conn->login_req_dma = 0; 240 iser_conn->login_req_dma = 0;
239 if (resp_err) 241 if (resp_err)
240 ib_conn->login_resp_dma = 0; 242 iser_conn->login_resp_dma = 0;
241 goto free_login_buf; 243 goto free_login_buf;
242 } 244 }
243 return 0; 245 return 0;
244 246
245free_login_buf: 247free_login_buf:
246 iser_free_login_buf(ib_conn); 248 iser_free_login_buf(iser_conn);
247 249
248out_err: 250out_err:
249 iser_err("unable to alloc or map login buf\n"); 251 iser_err("unable to alloc or map login buf\n");
250 return -ENOMEM; 252 return -ENOMEM;
251} 253}
252 254
253int iser_alloc_rx_descriptors(struct iser_conn *ib_conn, struct iscsi_session *session) 255int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
256 struct iscsi_session *session)
254{ 257{
255 int i, j; 258 int i, j;
256 u64 dma_addr; 259 u64 dma_addr;
257 struct iser_rx_desc *rx_desc; 260 struct iser_rx_desc *rx_desc;
258 struct ib_sge *rx_sg; 261 struct ib_sge *rx_sg;
259 struct iser_device *device = ib_conn->device; 262 struct iser_device *device = iser_conn->device;
260 263
261 ib_conn->qp_max_recv_dtos = session->cmds_max; 264 iser_conn->qp_max_recv_dtos = session->cmds_max;
262 ib_conn->qp_max_recv_dtos_mask = session->cmds_max - 1; /* cmds_max is 2^N */ 265 iser_conn->qp_max_recv_dtos_mask = session->cmds_max - 1; /* cmds_max is 2^N */
263 ib_conn->min_posted_rx = ib_conn->qp_max_recv_dtos >> 2; 266 iser_conn->min_posted_rx = iser_conn->qp_max_recv_dtos >> 2;
264 267
265 if (device->iser_alloc_rdma_reg_res(ib_conn, session->scsi_cmds_max)) 268 if (device->iser_alloc_rdma_reg_res(iser_conn, session->scsi_cmds_max))
266 goto create_rdma_reg_res_failed; 269 goto create_rdma_reg_res_failed;
267 270
268 if (iser_alloc_login_buf(ib_conn)) 271 if (iser_alloc_login_buf(iser_conn))
269 goto alloc_login_buf_fail; 272 goto alloc_login_buf_fail;
270 273
271 ib_conn->rx_descs = kmalloc(session->cmds_max * 274 iser_conn->rx_descs = kmalloc(session->cmds_max *
272 sizeof(struct iser_rx_desc), GFP_KERNEL); 275 sizeof(struct iser_rx_desc), GFP_KERNEL);
273 if (!ib_conn->rx_descs) 276 if (!iser_conn->rx_descs)
274 goto rx_desc_alloc_fail; 277 goto rx_desc_alloc_fail;
275 278
276 rx_desc = ib_conn->rx_descs; 279 rx_desc = iser_conn->rx_descs;
277 280
278 for (i = 0; i < ib_conn->qp_max_recv_dtos; i++, rx_desc++) { 281 for (i = 0; i < iser_conn->qp_max_recv_dtos; i++, rx_desc++) {
279 dma_addr = ib_dma_map_single(device->ib_device, (void *)rx_desc, 282 dma_addr = ib_dma_map_single(device->ib_device, (void *)rx_desc,
280 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 283 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
281 if (ib_dma_mapping_error(device->ib_device, dma_addr)) 284 if (ib_dma_mapping_error(device->ib_device, dma_addr))
@@ -289,52 +292,52 @@ int iser_alloc_rx_descriptors(struct iser_conn *ib_conn, struct iscsi_session *s
289 rx_sg->lkey = device->mr->lkey; 292 rx_sg->lkey = device->mr->lkey;
290 } 293 }
291 294
292 ib_conn->rx_desc_head = 0; 295 iser_conn->rx_desc_head = 0;
293 return 0; 296 return 0;
294 297
295rx_desc_dma_map_failed: 298rx_desc_dma_map_failed:
296 rx_desc = ib_conn->rx_descs; 299 rx_desc = iser_conn->rx_descs;
297 for (j = 0; j < i; j++, rx_desc++) 300 for (j = 0; j < i; j++, rx_desc++)
298 ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr, 301 ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
299 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 302 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
300 kfree(ib_conn->rx_descs); 303 kfree(iser_conn->rx_descs);
301 ib_conn->rx_descs = NULL; 304 iser_conn->rx_descs = NULL;
302rx_desc_alloc_fail: 305rx_desc_alloc_fail:
303 iser_free_login_buf(ib_conn); 306 iser_free_login_buf(iser_conn);
304alloc_login_buf_fail: 307alloc_login_buf_fail:
305 device->iser_free_rdma_reg_res(ib_conn); 308 device->iser_free_rdma_reg_res(iser_conn);
306create_rdma_reg_res_failed: 309create_rdma_reg_res_failed:
307 iser_err("failed allocating rx descriptors / data buffers\n"); 310 iser_err("failed allocating rx descriptors / data buffers\n");
308 return -ENOMEM; 311 return -ENOMEM;
309} 312}
310 313
311void iser_free_rx_descriptors(struct iser_conn *ib_conn) 314void iser_free_rx_descriptors(struct iser_conn *iser_conn)
312{ 315{
313 int i; 316 int i;
314 struct iser_rx_desc *rx_desc; 317 struct iser_rx_desc *rx_desc;
315 struct iser_device *device = ib_conn->device; 318 struct iser_device *device = iser_conn->device;
316 319
317 if (!ib_conn->rx_descs) 320 if (!iser_conn->rx_descs)
318 goto free_login_buf; 321 goto free_login_buf;
319 322
320 if (device->iser_free_rdma_reg_res) 323 if (device->iser_free_rdma_reg_res)
321 device->iser_free_rdma_reg_res(ib_conn); 324 device->iser_free_rdma_reg_res(iser_conn);
322 325
323 rx_desc = ib_conn->rx_descs; 326 rx_desc = iser_conn->rx_descs;
324 for (i = 0; i < ib_conn->qp_max_recv_dtos; i++, rx_desc++) 327 for (i = 0; i < iser_conn->qp_max_recv_dtos; i++, rx_desc++)
325 ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr, 328 ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
326 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 329 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
327 kfree(ib_conn->rx_descs); 330 kfree(iser_conn->rx_descs);
328 /* make sure we never redo any unmapping */ 331 /* make sure we never redo any unmapping */
329 ib_conn->rx_descs = NULL; 332 iser_conn->rx_descs = NULL;
330 333
331free_login_buf: 334free_login_buf:
332 iser_free_login_buf(ib_conn); 335 iser_free_login_buf(iser_conn);
333} 336}
334 337
335static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req) 338static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req)
336{ 339{
337 struct iser_conn *ib_conn = conn->dd_data; 340 struct iser_conn *iser_conn = conn->dd_data;
338 struct iscsi_session *session = conn->session; 341 struct iscsi_session *session = conn->session;
339 342
340 iser_dbg("req op %x flags %x\n", req->opcode, req->flags); 343 iser_dbg("req op %x flags %x\n", req->opcode, req->flags);
@@ -347,18 +350,18 @@ static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req)
347 * response) and no posted send buffers left - they must have been 350 * response) and no posted send buffers left - they must have been
348 * consumed during previous login phases. 351 * consumed during previous login phases.
349 */ 352 */
350 WARN_ON(ib_conn->post_recv_buf_count != 1); 353 WARN_ON(iser_conn->post_recv_buf_count != 1);
351 WARN_ON(atomic_read(&ib_conn->post_send_buf_count) != 0); 354 WARN_ON(atomic_read(&iser_conn->post_send_buf_count) != 0);
352 355
353 if (session->discovery_sess) { 356 if (session->discovery_sess) {
354 iser_info("Discovery session, re-using login RX buffer\n"); 357 iser_info("Discovery session, re-using login RX buffer\n");
355 return 0; 358 return 0;
356 } else 359 } else
357 iser_info("Normal session, posting batch of RX %d buffers\n", 360 iser_info("Normal session, posting batch of RX %d buffers\n",
358 ib_conn->min_posted_rx); 361 iser_conn->min_posted_rx);
359 362
360 /* Initial post receive buffers */ 363 /* Initial post receive buffers */
361 if (iser_post_recvm(ib_conn, ib_conn->min_posted_rx)) 364 if (iser_post_recvm(iser_conn, iser_conn->min_posted_rx))
362 return -ENOMEM; 365 return -ENOMEM;
363 366
364 return 0; 367 return 0;
@@ -370,7 +373,7 @@ static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req)
370int iser_send_command(struct iscsi_conn *conn, 373int iser_send_command(struct iscsi_conn *conn,
371 struct iscsi_task *task) 374 struct iscsi_task *task)
372{ 375{
373 struct iser_conn *ib_conn = conn->dd_data; 376 struct iser_conn *iser_conn = conn->dd_data;
374 struct iscsi_iser_task *iser_task = task->dd_data; 377 struct iscsi_iser_task *iser_task = task->dd_data;
375 unsigned long edtl; 378 unsigned long edtl;
376 int err; 379 int err;
@@ -383,7 +386,7 @@ int iser_send_command(struct iscsi_conn *conn,
383 386
384 /* build the tx desc regd header and add it to the tx desc dto */ 387 /* build the tx desc regd header and add it to the tx desc dto */
385 tx_desc->type = ISCSI_TX_SCSI_COMMAND; 388 tx_desc->type = ISCSI_TX_SCSI_COMMAND;
386 iser_create_send_desc(ib_conn, tx_desc); 389 iser_create_send_desc(iser_conn, tx_desc);
387 390
388 if (hdr->flags & ISCSI_FLAG_CMD_READ) { 391 if (hdr->flags & ISCSI_FLAG_CMD_READ) {
389 data_buf = &iser_task->data[ISER_DIR_IN]; 392 data_buf = &iser_task->data[ISER_DIR_IN];
@@ -423,7 +426,7 @@ int iser_send_command(struct iscsi_conn *conn,
423 426
424 iser_task->status = ISER_TASK_STATUS_STARTED; 427 iser_task->status = ISER_TASK_STATUS_STARTED;
425 428
426 err = iser_post_send(ib_conn, tx_desc); 429 err = iser_post_send(iser_conn, tx_desc);
427 if (!err) 430 if (!err)
428 return 0; 431 return 0;
429 432
@@ -439,7 +442,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
439 struct iscsi_task *task, 442 struct iscsi_task *task,
440 struct iscsi_data *hdr) 443 struct iscsi_data *hdr)
441{ 444{
442 struct iser_conn *ib_conn = conn->dd_data; 445 struct iser_conn *iser_conn = conn->dd_data;
443 struct iscsi_iser_task *iser_task = task->dd_data; 446 struct iscsi_iser_task *iser_task = task->dd_data;
444 struct iser_tx_desc *tx_desc = NULL; 447 struct iser_tx_desc *tx_desc = NULL;
445 struct iser_regd_buf *regd_buf; 448 struct iser_regd_buf *regd_buf;
@@ -488,7 +491,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
488 itt, buf_offset, data_seg_len); 491 itt, buf_offset, data_seg_len);
489 492
490 493
491 err = iser_post_send(ib_conn, tx_desc); 494 err = iser_post_send(iser_conn, tx_desc);
492 if (!err) 495 if (!err)
493 return 0; 496 return 0;
494 497
@@ -501,7 +504,7 @@ send_data_out_error:
501int iser_send_control(struct iscsi_conn *conn, 504int iser_send_control(struct iscsi_conn *conn,
502 struct iscsi_task *task) 505 struct iscsi_task *task)
503{ 506{
504 struct iser_conn *ib_conn = conn->dd_data; 507 struct iser_conn *iser_conn = conn->dd_data;
505 struct iscsi_iser_task *iser_task = task->dd_data; 508 struct iscsi_iser_task *iser_task = task->dd_data;
506 struct iser_tx_desc *mdesc = &iser_task->desc; 509 struct iser_tx_desc *mdesc = &iser_task->desc;
507 unsigned long data_seg_len; 510 unsigned long data_seg_len;
@@ -510,9 +513,9 @@ int iser_send_control(struct iscsi_conn *conn,
510 513
511 /* build the tx desc regd header and add it to the tx desc dto */ 514 /* build the tx desc regd header and add it to the tx desc dto */
512 mdesc->type = ISCSI_TX_CONTROL; 515 mdesc->type = ISCSI_TX_CONTROL;
513 iser_create_send_desc(ib_conn, mdesc); 516 iser_create_send_desc(iser_conn, mdesc);
514 517
515 device = ib_conn->device; 518 device = iser_conn->device;
516 519
517 data_seg_len = ntoh24(task->hdr->dlength); 520 data_seg_len = ntoh24(task->hdr->dlength);
518 521
@@ -524,16 +527,16 @@ int iser_send_control(struct iscsi_conn *conn,
524 } 527 }
525 528
526 ib_dma_sync_single_for_cpu(device->ib_device, 529 ib_dma_sync_single_for_cpu(device->ib_device,
527 ib_conn->login_req_dma, task->data_count, 530 iser_conn->login_req_dma, task->data_count,
528 DMA_TO_DEVICE); 531 DMA_TO_DEVICE);
529 532
530 memcpy(ib_conn->login_req_buf, task->data, task->data_count); 533 memcpy(iser_conn->login_req_buf, task->data, task->data_count);
531 534
532 ib_dma_sync_single_for_device(device->ib_device, 535 ib_dma_sync_single_for_device(device->ib_device,
533 ib_conn->login_req_dma, task->data_count, 536 iser_conn->login_req_dma, task->data_count,
534 DMA_TO_DEVICE); 537 DMA_TO_DEVICE);
535 538
536 tx_dsg->addr = ib_conn->login_req_dma; 539 tx_dsg->addr = iser_conn->login_req_dma;
537 tx_dsg->length = task->data_count; 540 tx_dsg->length = task->data_count;
538 tx_dsg->lkey = device->mr->lkey; 541 tx_dsg->lkey = device->mr->lkey;
539 mdesc->num_sge = 2; 542 mdesc->num_sge = 2;
@@ -542,7 +545,7 @@ int iser_send_control(struct iscsi_conn *conn,
542 if (task == conn->login_task) { 545 if (task == conn->login_task) {
543 iser_dbg("op %x dsl %lx, posting login rx buffer\n", 546 iser_dbg("op %x dsl %lx, posting login rx buffer\n",
544 task->hdr->opcode, data_seg_len); 547 task->hdr->opcode, data_seg_len);
545 err = iser_post_recvl(ib_conn); 548 err = iser_post_recvl(iser_conn);
546 if (err) 549 if (err)
547 goto send_control_error; 550 goto send_control_error;
548 err = iser_post_rx_bufs(conn, task->hdr); 551 err = iser_post_rx_bufs(conn, task->hdr);
@@ -550,7 +553,7 @@ int iser_send_control(struct iscsi_conn *conn,
550 goto send_control_error; 553 goto send_control_error;
551 } 554 }
552 555
553 err = iser_post_send(ib_conn, mdesc); 556 err = iser_post_send(iser_conn, mdesc);
554 if (!err) 557 if (!err)
555 return 0; 558 return 0;
556 559
@@ -564,59 +567,59 @@ send_control_error:
564 */ 567 */
565void iser_rcv_completion(struct iser_rx_desc *rx_desc, 568void iser_rcv_completion(struct iser_rx_desc *rx_desc,
566 unsigned long rx_xfer_len, 569 unsigned long rx_xfer_len,
567 struct iser_conn *ib_conn) 570 struct iser_conn *iser_conn)
568{ 571{
569 struct iscsi_hdr *hdr; 572 struct iscsi_hdr *hdr;
570 u64 rx_dma; 573 u64 rx_dma;
571 int rx_buflen, outstanding, count, err; 574 int rx_buflen, outstanding, count, err;
572 575
573 /* differentiate between login to all other PDUs */ 576 /* differentiate between login to all other PDUs */
574 if ((char *)rx_desc == ib_conn->login_resp_buf) { 577 if ((char *)rx_desc == iser_conn->login_resp_buf) {
575 rx_dma = ib_conn->login_resp_dma; 578 rx_dma = iser_conn->login_resp_dma;
576 rx_buflen = ISER_RX_LOGIN_SIZE; 579 rx_buflen = ISER_RX_LOGIN_SIZE;
577 } else { 580 } else {
578 rx_dma = rx_desc->dma_addr; 581 rx_dma = rx_desc->dma_addr;
579 rx_buflen = ISER_RX_PAYLOAD_SIZE; 582 rx_buflen = ISER_RX_PAYLOAD_SIZE;
580 } 583 }
581 584
582 ib_dma_sync_single_for_cpu(ib_conn->device->ib_device, rx_dma, 585 ib_dma_sync_single_for_cpu(iser_conn->device->ib_device, rx_dma,
583 rx_buflen, DMA_FROM_DEVICE); 586 rx_buflen, DMA_FROM_DEVICE);
584 587
585 hdr = &rx_desc->iscsi_header; 588 hdr = &rx_desc->iscsi_header;
586 589
587 iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode, 590 iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode,
588 hdr->itt, (int)(rx_xfer_len - ISER_HEADERS_LEN)); 591 hdr->itt, (int)(rx_xfer_len - ISER_HEADERS_LEN));
589 592
590 iscsi_iser_recv(ib_conn->iscsi_conn, hdr, rx_desc->data, 593 iscsi_iser_recv(iser_conn->iscsi_conn, hdr, rx_desc->data,
591 rx_xfer_len - ISER_HEADERS_LEN); 594 rx_xfer_len - ISER_HEADERS_LEN);
592 595
593 ib_dma_sync_single_for_device(ib_conn->device->ib_device, rx_dma, 596 ib_dma_sync_single_for_device(iser_conn->device->ib_device, rx_dma,
594 rx_buflen, DMA_FROM_DEVICE); 597 rx_buflen, DMA_FROM_DEVICE);
595 598
596 /* decrementing conn->post_recv_buf_count only --after-- freeing the * 599 /* decrementing conn->post_recv_buf_count only --after-- freeing the *
597 * task eliminates the need to worry on tasks which are completed in * 600 * task eliminates the need to worry on tasks which are completed in *
598 * parallel to the execution of iser_conn_term. So the code that waits * 601 * parallel to the execution of iser_conn_term. So the code that waits *
599 * for the posted rx bufs refcount to become zero handles everything */ 602 * for the posted rx bufs refcount to become zero handles everything */
600 ib_conn->post_recv_buf_count--; 603 iser_conn->post_recv_buf_count--;
601 604
602 if (rx_dma == ib_conn->login_resp_dma) 605 if (rx_dma == iser_conn->login_resp_dma)
603 return; 606 return;
604 607
605 outstanding = ib_conn->post_recv_buf_count; 608 outstanding = iser_conn->post_recv_buf_count;
606 if (outstanding + ib_conn->min_posted_rx <= ib_conn->qp_max_recv_dtos) { 609 if (outstanding + iser_conn->min_posted_rx <= iser_conn->qp_max_recv_dtos) {
607 count = min(ib_conn->qp_max_recv_dtos - outstanding, 610 count = min(iser_conn->qp_max_recv_dtos - outstanding,
608 ib_conn->min_posted_rx); 611 iser_conn->min_posted_rx);
609 err = iser_post_recvm(ib_conn, count); 612 err = iser_post_recvm(iser_conn, count);
610 if (err) 613 if (err)
611 iser_err("posting %d rx bufs err %d\n", count, err); 614 iser_err("posting %d rx bufs err %d\n", count, err);
612 } 615 }
613} 616}
614 617
615void iser_snd_completion(struct iser_tx_desc *tx_desc, 618void iser_snd_completion(struct iser_tx_desc *tx_desc,
616 struct iser_conn *ib_conn) 619 struct iser_conn *iser_conn)
617{ 620{
618 struct iscsi_task *task; 621 struct iscsi_task *task;
619 struct iser_device *device = ib_conn->device; 622 struct iser_device *device = iser_conn->device;
620 623
621 if (tx_desc->type == ISCSI_TX_DATAOUT) { 624 if (tx_desc->type == ISCSI_TX_DATAOUT) {
622 ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr, 625 ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr,
@@ -625,7 +628,7 @@ void iser_snd_completion(struct iser_tx_desc *tx_desc,
625 tx_desc = NULL; 628 tx_desc = NULL;
626 } 629 }
627 630
628 atomic_dec(&ib_conn->post_send_buf_count); 631 atomic_dec(&iser_conn->post_send_buf_count);
629 632
630 if (tx_desc && tx_desc->type == ISCSI_TX_CONTROL) { 633 if (tx_desc && tx_desc->type == ISCSI_TX_CONTROL) {
631 /* this arithmetic is legal by libiscsi dd_data allocation */ 634 /* this arithmetic is legal by libiscsi dd_data allocation */
@@ -658,7 +661,7 @@ void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
658 661
659void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task) 662void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
660{ 663{
661 struct iser_device *device = iser_task->ib_conn->device; 664 struct iser_device *device = iser_task->iser_conn->device;
662 int is_rdma_data_aligned = 1; 665 int is_rdma_data_aligned = 1;
663 int is_rdma_prot_aligned = 1; 666 int is_rdma_prot_aligned = 1;
664 int prot_count = scsi_prot_sg_count(iser_task->sc); 667 int prot_count = scsi_prot_sg_count(iser_task->sc);
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 47acd3ad3a17..ba09fbbe765e 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -49,7 +49,7 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
49 struct iser_data_buf *data_copy, 49 struct iser_data_buf *data_copy,
50 enum iser_data_dir cmd_dir) 50 enum iser_data_dir cmd_dir)
51{ 51{
52 struct ib_device *dev = iser_task->ib_conn->device->ib_device; 52 struct ib_device *dev = iser_task->iser_conn->device->ib_device;
53 struct scatterlist *sgl = (struct scatterlist *)data->buf; 53 struct scatterlist *sgl = (struct scatterlist *)data->buf;
54 struct scatterlist *sg; 54 struct scatterlist *sg;
55 char *mem = NULL; 55 char *mem = NULL;
@@ -116,7 +116,7 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
116 struct ib_device *dev; 116 struct ib_device *dev;
117 unsigned long cmd_data_len; 117 unsigned long cmd_data_len;
118 118
119 dev = iser_task->ib_conn->device->ib_device; 119 dev = iser_task->iser_conn->device->ib_device;
120 120
121 ib_dma_unmap_sg(dev, &data_copy->sg_single, 1, 121 ib_dma_unmap_sg(dev, &data_copy->sg_single, 1,
122 (cmd_dir == ISER_DIR_OUT) ? 122 (cmd_dir == ISER_DIR_OUT) ?
@@ -322,7 +322,7 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
322 struct ib_device *dev; 322 struct ib_device *dev;
323 323
324 iser_task->dir[iser_dir] = 1; 324 iser_task->dir[iser_dir] = 1;
325 dev = iser_task->ib_conn->device->ib_device; 325 dev = iser_task->iser_conn->device->ib_device;
326 326
327 data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir); 327 data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir);
328 if (data->dma_nents == 0) { 328 if (data->dma_nents == 0) {
@@ -337,7 +337,7 @@ void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
337{ 337{
338 struct ib_device *dev; 338 struct ib_device *dev;
339 339
340 dev = iser_task->ib_conn->device->ib_device; 340 dev = iser_task->iser_conn->device->ib_device;
341 ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE); 341 ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE);
342} 342}
343 343
@@ -348,7 +348,7 @@ static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
348 enum iser_data_dir cmd_dir, 348 enum iser_data_dir cmd_dir,
349 int aligned_len) 349 int aligned_len)
350{ 350{
351 struct iscsi_conn *iscsi_conn = iser_task->ib_conn->iscsi_conn; 351 struct iscsi_conn *iscsi_conn = iser_task->iser_conn->iscsi_conn;
352 352
353 iscsi_conn->fmr_unalign_cnt++; 353 iscsi_conn->fmr_unalign_cnt++;
354 iser_warn("rdma alignment violation (%d/%d aligned) or FMR not supported\n", 354 iser_warn("rdma alignment violation (%d/%d aligned) or FMR not supported\n",
@@ -377,8 +377,8 @@ static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
377int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task, 377int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task,
378 enum iser_data_dir cmd_dir) 378 enum iser_data_dir cmd_dir)
379{ 379{
380 struct iser_conn *ib_conn = iser_task->ib_conn; 380 struct iser_conn *iser_conn = iser_task->iser_conn;
381 struct iser_device *device = ib_conn->device; 381 struct iser_device *device = iser_conn->device;
382 struct ib_device *ibdev = device->ib_device; 382 struct ib_device *ibdev = device->ib_device;
383 struct iser_data_buf *mem = &iser_task->data[cmd_dir]; 383 struct iser_data_buf *mem = &iser_task->data[cmd_dir];
384 struct iser_regd_buf *regd_buf; 384 struct iser_regd_buf *regd_buf;
@@ -418,8 +418,8 @@ int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task,
418 (unsigned long)regd_buf->reg.va, 418 (unsigned long)regd_buf->reg.va,
419 (unsigned long)regd_buf->reg.len); 419 (unsigned long)regd_buf->reg.len);
420 } else { /* use FMR for multiple dma entries */ 420 } else { /* use FMR for multiple dma entries */
421 iser_page_vec_build(mem, ib_conn->fmr.page_vec, ibdev); 421 iser_page_vec_build(mem, iser_conn->fmr.page_vec, ibdev);
422 err = iser_reg_page_vec(ib_conn, ib_conn->fmr.page_vec, 422 err = iser_reg_page_vec(iser_conn, iser_conn->fmr.page_vec,
423 &regd_buf->reg); 423 &regd_buf->reg);
424 if (err && err != -EAGAIN) { 424 if (err && err != -EAGAIN) {
425 iser_data_buf_dump(mem, ibdev); 425 iser_data_buf_dump(mem, ibdev);
@@ -427,12 +427,12 @@ int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task,
427 mem->dma_nents, 427 mem->dma_nents,
428 ntoh24(iser_task->desc.iscsi_header.dlength)); 428 ntoh24(iser_task->desc.iscsi_header.dlength));
429 iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n", 429 iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",
430 ib_conn->fmr.page_vec->data_size, 430 iser_conn->fmr.page_vec->data_size,
431 ib_conn->fmr.page_vec->length, 431 iser_conn->fmr.page_vec->length,
432 ib_conn->fmr.page_vec->offset); 432 iser_conn->fmr.page_vec->offset);
433 for (i = 0; i < ib_conn->fmr.page_vec->length; i++) 433 for (i = 0; i < iser_conn->fmr.page_vec->length; i++)
434 iser_err("page_vec[%d] = 0x%llx\n", i, 434 iser_err("page_vec[%d] = 0x%llx\n", i,
435 (unsigned long long) ib_conn->fmr.page_vec->pages[i]); 435 (unsigned long long)iser_conn->fmr.page_vec->pages[i]);
436 } 436 }
437 if (err) 437 if (err)
438 return err; 438 return err;
@@ -533,7 +533,7 @@ iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
533 struct fast_reg_descriptor *desc, struct ib_sge *data_sge, 533 struct fast_reg_descriptor *desc, struct ib_sge *data_sge,
534 struct ib_sge *prot_sge, struct ib_sge *sig_sge) 534 struct ib_sge *prot_sge, struct ib_sge *sig_sge)
535{ 535{
536 struct iser_conn *ib_conn = iser_task->ib_conn; 536 struct iser_conn *iser_conn = iser_task->iser_conn;
537 struct iser_pi_context *pi_ctx = desc->pi_ctx; 537 struct iser_pi_context *pi_ctx = desc->pi_ctx;
538 struct ib_send_wr sig_wr, inv_wr; 538 struct ib_send_wr sig_wr, inv_wr;
539 struct ib_send_wr *bad_wr, *wr = NULL; 539 struct ib_send_wr *bad_wr, *wr = NULL;
@@ -579,7 +579,7 @@ iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
579 else 579 else
580 wr->next = &sig_wr; 580 wr->next = &sig_wr;
581 581
582 ret = ib_post_send(ib_conn->qp, wr, &bad_wr); 582 ret = ib_post_send(iser_conn->qp, wr, &bad_wr);
583 if (ret) { 583 if (ret) {
584 iser_err("reg_sig_mr failed, ret:%d\n", ret); 584 iser_err("reg_sig_mr failed, ret:%d\n", ret);
585 goto err; 585 goto err;
@@ -609,8 +609,8 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
609 struct ib_sge *sge) 609 struct ib_sge *sge)
610{ 610{
611 struct fast_reg_descriptor *desc = regd_buf->reg.mem_h; 611 struct fast_reg_descriptor *desc = regd_buf->reg.mem_h;
612 struct iser_conn *ib_conn = iser_task->ib_conn; 612 struct iser_conn *iser_conn = iser_task->iser_conn;
613 struct iser_device *device = ib_conn->device; 613 struct iser_device *device = iser_conn->device;
614 struct ib_device *ibdev = device->ib_device; 614 struct ib_device *ibdev = device->ib_device;
615 struct ib_mr *mr; 615 struct ib_mr *mr;
616 struct ib_fast_reg_page_list *frpl; 616 struct ib_fast_reg_page_list *frpl;
@@ -677,7 +677,7 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
677 else 677 else
678 wr->next = &fastreg_wr; 678 wr->next = &fastreg_wr;
679 679
680 ret = ib_post_send(ib_conn->qp, wr, &bad_wr); 680 ret = ib_post_send(iser_conn->qp, wr, &bad_wr);
681 if (ret) { 681 if (ret) {
682 iser_err("fast registration failed, ret:%d\n", ret); 682 iser_err("fast registration failed, ret:%d\n", ret);
683 return ret; 683 return ret;
@@ -700,8 +700,8 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
700int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task, 700int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task,
701 enum iser_data_dir cmd_dir) 701 enum iser_data_dir cmd_dir)
702{ 702{
703 struct iser_conn *ib_conn = iser_task->ib_conn; 703 struct iser_conn *iser_conn = iser_task->iser_conn;
704 struct iser_device *device = ib_conn->device; 704 struct iser_device *device = iser_conn->device;
705 struct ib_device *ibdev = device->ib_device; 705 struct ib_device *ibdev = device->ib_device;
706 struct iser_data_buf *mem = &iser_task->data[cmd_dir]; 706 struct iser_data_buf *mem = &iser_task->data[cmd_dir];
707 struct iser_regd_buf *regd_buf = &iser_task->rdma_regd[cmd_dir]; 707 struct iser_regd_buf *regd_buf = &iser_task->rdma_regd[cmd_dir];
@@ -724,11 +724,11 @@ int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task,
724 724
725 if (mem->dma_nents != 1 || 725 if (mem->dma_nents != 1 ||
726 scsi_get_prot_op(iser_task->sc) != SCSI_PROT_NORMAL) { 726 scsi_get_prot_op(iser_task->sc) != SCSI_PROT_NORMAL) {
727 spin_lock_irqsave(&ib_conn->lock, flags); 727 spin_lock_irqsave(&iser_conn->lock, flags);
728 desc = list_first_entry(&ib_conn->fastreg.pool, 728 desc = list_first_entry(&iser_conn->fastreg.pool,
729 struct fast_reg_descriptor, list); 729 struct fast_reg_descriptor, list);
730 list_del(&desc->list); 730 list_del(&desc->list);
731 spin_unlock_irqrestore(&ib_conn->lock, flags); 731 spin_unlock_irqrestore(&iser_conn->lock, flags);
732 regd_buf->reg.mem_h = desc; 732 regd_buf->reg.mem_h = desc;
733 } 733 }
734 734
@@ -791,9 +791,9 @@ int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task,
791 return 0; 791 return 0;
792err_reg: 792err_reg:
793 if (desc) { 793 if (desc) {
794 spin_lock_irqsave(&ib_conn->lock, flags); 794 spin_lock_irqsave(&iser_conn->lock, flags);
795 list_add_tail(&desc->list, &ib_conn->fastreg.pool); 795 list_add_tail(&desc->list, &iser_conn->fastreg.pool);
796 spin_unlock_irqrestore(&ib_conn->lock, flags); 796 spin_unlock_irqrestore(&iser_conn->lock, flags);
797 } 797 }
798 798
799 return err; 799 return err;
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 3bfec4bbda52..778c166916fe 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -213,19 +213,19 @@ static void iser_free_device_ib_res(struct iser_device *device)
213 * 213 *
214 * returns 0 on success, or errno code on failure 214 * returns 0 on success, or errno code on failure
215 */ 215 */
216int iser_create_fmr_pool(struct iser_conn *ib_conn, unsigned cmds_max) 216int iser_create_fmr_pool(struct iser_conn *iser_conn, unsigned cmds_max)
217{ 217{
218 struct iser_device *device = ib_conn->device; 218 struct iser_device *device = iser_conn->device;
219 struct ib_fmr_pool_param params; 219 struct ib_fmr_pool_param params;
220 int ret = -ENOMEM; 220 int ret = -ENOMEM;
221 221
222 ib_conn->fmr.page_vec = kmalloc(sizeof(*ib_conn->fmr.page_vec) + 222 iser_conn->fmr.page_vec = kmalloc(sizeof(*iser_conn->fmr.page_vec) +
223 (sizeof(u64)*(ISCSI_ISER_SG_TABLESIZE + 1)), 223 (sizeof(u64)*(ISCSI_ISER_SG_TABLESIZE + 1)),
224 GFP_KERNEL); 224 GFP_KERNEL);
225 if (!ib_conn->fmr.page_vec) 225 if (!iser_conn->fmr.page_vec)
226 return ret; 226 return ret;
227 227
228 ib_conn->fmr.page_vec->pages = (u64 *)(ib_conn->fmr.page_vec + 1); 228 iser_conn->fmr.page_vec->pages = (u64 *)(iser_conn->fmr.page_vec + 1);
229 229
230 params.page_shift = SHIFT_4K; 230 params.page_shift = SHIFT_4K;
231 /* when the first/last SG element are not start/end * 231 /* when the first/last SG element are not start/end *
@@ -241,16 +241,16 @@ int iser_create_fmr_pool(struct iser_conn *ib_conn, unsigned cmds_max)
241 IB_ACCESS_REMOTE_WRITE | 241 IB_ACCESS_REMOTE_WRITE |
242 IB_ACCESS_REMOTE_READ); 242 IB_ACCESS_REMOTE_READ);
243 243
244 ib_conn->fmr.pool = ib_create_fmr_pool(device->pd, &params); 244 iser_conn->fmr.pool = ib_create_fmr_pool(device->pd, &params);
245 if (!IS_ERR(ib_conn->fmr.pool)) 245 if (!IS_ERR(iser_conn->fmr.pool))
246 return 0; 246 return 0;
247 247
248 /* no FMR => no need for page_vec */ 248 /* no FMR => no need for page_vec */
249 kfree(ib_conn->fmr.page_vec); 249 kfree(iser_conn->fmr.page_vec);
250 ib_conn->fmr.page_vec = NULL; 250 iser_conn->fmr.page_vec = NULL;
251 251
252 ret = PTR_ERR(ib_conn->fmr.pool); 252 ret = PTR_ERR(iser_conn->fmr.pool);
253 ib_conn->fmr.pool = NULL; 253 iser_conn->fmr.pool = NULL;
254 if (ret != -ENOSYS) { 254 if (ret != -ENOSYS) {
255 iser_err("FMR allocation failed, err %d\n", ret); 255 iser_err("FMR allocation failed, err %d\n", ret);
256 return ret; 256 return ret;
@@ -263,18 +263,18 @@ int iser_create_fmr_pool(struct iser_conn *ib_conn, unsigned cmds_max)
263/** 263/**
264 * iser_free_fmr_pool - releases the FMR pool and page vec 264 * iser_free_fmr_pool - releases the FMR pool and page vec
265 */ 265 */
266void iser_free_fmr_pool(struct iser_conn *ib_conn) 266void iser_free_fmr_pool(struct iser_conn *iser_conn)
267{ 267{
268 iser_info("freeing conn %p fmr pool %p\n", 268 iser_info("freeing conn %p fmr pool %p\n",
269 ib_conn, ib_conn->fmr.pool); 269 iser_conn, iser_conn->fmr.pool);
270 270
271 if (ib_conn->fmr.pool != NULL) 271 if (iser_conn->fmr.pool != NULL)
272 ib_destroy_fmr_pool(ib_conn->fmr.pool); 272 ib_destroy_fmr_pool(iser_conn->fmr.pool);
273 273
274 ib_conn->fmr.pool = NULL; 274 iser_conn->fmr.pool = NULL;
275 275
276 kfree(ib_conn->fmr.page_vec); 276 kfree(iser_conn->fmr.page_vec);
277 ib_conn->fmr.page_vec = NULL; 277 iser_conn->fmr.page_vec = NULL;
278} 278}
279 279
280static int 280static int
@@ -367,14 +367,14 @@ fast_reg_mr_failure:
367 * for fast registration work requests. 367 * for fast registration work requests.
368 * returns 0 on success, or errno code on failure 368 * returns 0 on success, or errno code on failure
369 */ 369 */
370int iser_create_fastreg_pool(struct iser_conn *ib_conn, unsigned cmds_max) 370int iser_create_fastreg_pool(struct iser_conn *iser_conn, unsigned cmds_max)
371{ 371{
372 struct iser_device *device = ib_conn->device; 372 struct iser_device *device = iser_conn->device;
373 struct fast_reg_descriptor *desc; 373 struct fast_reg_descriptor *desc;
374 int i, ret; 374 int i, ret;
375 375
376 INIT_LIST_HEAD(&ib_conn->fastreg.pool); 376 INIT_LIST_HEAD(&iser_conn->fastreg.pool);
377 ib_conn->fastreg.pool_size = 0; 377 iser_conn->fastreg.pool_size = 0;
378 for (i = 0; i < cmds_max; i++) { 378 for (i = 0; i < cmds_max; i++) {
379 desc = kzalloc(sizeof(*desc), GFP_KERNEL); 379 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
380 if (!desc) { 380 if (!desc) {
@@ -384,7 +384,7 @@ int iser_create_fastreg_pool(struct iser_conn *ib_conn, unsigned cmds_max)
384 } 384 }
385 385
386 ret = iser_create_fastreg_desc(device->ib_device, device->pd, 386 ret = iser_create_fastreg_desc(device->ib_device, device->pd,
387 ib_conn->pi_support, desc); 387 iser_conn->pi_support, desc);
388 if (ret) { 388 if (ret) {
389 iser_err("Failed to create fastreg descriptor err=%d\n", 389 iser_err("Failed to create fastreg descriptor err=%d\n",
390 ret); 390 ret);
@@ -392,31 +392,31 @@ int iser_create_fastreg_pool(struct iser_conn *ib_conn, unsigned cmds_max)
392 goto err; 392 goto err;
393 } 393 }
394 394
395 list_add_tail(&desc->list, &ib_conn->fastreg.pool); 395 list_add_tail(&desc->list, &iser_conn->fastreg.pool);
396 ib_conn->fastreg.pool_size++; 396 iser_conn->fastreg.pool_size++;
397 } 397 }
398 398
399 return 0; 399 return 0;
400 400
401err: 401err:
402 iser_free_fastreg_pool(ib_conn); 402 iser_free_fastreg_pool(iser_conn);
403 return ret; 403 return ret;
404} 404}
405 405
406/** 406/**
407 * iser_free_fastreg_pool - releases the pool of fast_reg descriptors 407 * iser_free_fastreg_pool - releases the pool of fast_reg descriptors
408 */ 408 */
409void iser_free_fastreg_pool(struct iser_conn *ib_conn) 409void iser_free_fastreg_pool(struct iser_conn *iser_conn)
410{ 410{
411 struct fast_reg_descriptor *desc, *tmp; 411 struct fast_reg_descriptor *desc, *tmp;
412 int i = 0; 412 int i = 0;
413 413
414 if (list_empty(&ib_conn->fastreg.pool)) 414 if (list_empty(&iser_conn->fastreg.pool))
415 return; 415 return;
416 416
417 iser_info("freeing conn %p fr pool\n", ib_conn); 417 iser_info("freeing conn %p fr pool\n", iser_conn);
418 418
419 list_for_each_entry_safe(desc, tmp, &ib_conn->fastreg.pool, list) { 419 list_for_each_entry_safe(desc, tmp, &iser_conn->fastreg.pool, list) {
420 list_del(&desc->list); 420 list_del(&desc->list);
421 ib_free_fast_reg_page_list(desc->data_frpl); 421 ib_free_fast_reg_page_list(desc->data_frpl);
422 ib_dereg_mr(desc->data_mr); 422 ib_dereg_mr(desc->data_mr);
@@ -430,9 +430,9 @@ void iser_free_fastreg_pool(struct iser_conn *ib_conn)
430 ++i; 430 ++i;
431 } 431 }
432 432
433 if (i < ib_conn->fastreg.pool_size) 433 if (i < iser_conn->fastreg.pool_size)
434 iser_warn("pool still has %d regions registered\n", 434 iser_warn("pool still has %d regions registered\n",
435 ib_conn->fastreg.pool_size - i); 435 iser_conn->fastreg.pool_size - i);
436} 436}
437 437
438/** 438/**
@@ -440,16 +440,16 @@ void iser_free_fastreg_pool(struct iser_conn *ib_conn)
440 * 440 *
441 * returns 0 on success, -1 on failure 441 * returns 0 on success, -1 on failure
442 */ 442 */
443static int iser_create_ib_conn_res(struct iser_conn *ib_conn) 443static int iser_create_ib_conn_res(struct iser_conn *iser_conn)
444{ 444{
445 struct iser_device *device; 445 struct iser_device *device;
446 struct ib_qp_init_attr init_attr; 446 struct ib_qp_init_attr init_attr;
447 int ret = -ENOMEM; 447 int ret = -ENOMEM;
448 int index, min_index = 0; 448 int index, min_index = 0;
449 449
450 BUG_ON(ib_conn->device == NULL); 450 BUG_ON(iser_conn->device == NULL);
451 451
452 device = ib_conn->device; 452 device = iser_conn->device;
453 453
454 memset(&init_attr, 0, sizeof init_attr); 454 memset(&init_attr, 0, sizeof init_attr);
455 455
@@ -461,10 +461,10 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
461 min_index = index; 461 min_index = index;
462 device->cq_active_qps[min_index]++; 462 device->cq_active_qps[min_index]++;
463 mutex_unlock(&ig.connlist_mutex); 463 mutex_unlock(&ig.connlist_mutex);
464 iser_info("cq index %d used for ib_conn %p\n", min_index, ib_conn); 464 iser_info("cq index %d used for iser_conn %p\n", min_index, iser_conn);
465 465
466 init_attr.event_handler = iser_qp_event_callback; 466 init_attr.event_handler = iser_qp_event_callback;
467 init_attr.qp_context = (void *)ib_conn; 467 init_attr.qp_context = (void *)iser_conn;
468 init_attr.send_cq = device->tx_cq[min_index]; 468 init_attr.send_cq = device->tx_cq[min_index];
469 init_attr.recv_cq = device->rx_cq[min_index]; 469 init_attr.recv_cq = device->rx_cq[min_index];
470 init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS; 470 init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS;
@@ -472,21 +472,21 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
472 init_attr.cap.max_recv_sge = 1; 472 init_attr.cap.max_recv_sge = 1;
473 init_attr.sq_sig_type = IB_SIGNAL_REQ_WR; 473 init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
474 init_attr.qp_type = IB_QPT_RC; 474 init_attr.qp_type = IB_QPT_RC;
475 if (ib_conn->pi_support) { 475 if (iser_conn->pi_support) {
476 init_attr.cap.max_send_wr = ISER_QP_SIG_MAX_REQ_DTOS; 476 init_attr.cap.max_send_wr = ISER_QP_SIG_MAX_REQ_DTOS;
477 init_attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN; 477 init_attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
478 } else { 478 } else {
479 init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS; 479 init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS;
480 } 480 }
481 481
482 ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr); 482 ret = rdma_create_qp(iser_conn->cma_id, device->pd, &init_attr);
483 if (ret) 483 if (ret)
484 goto out_err; 484 goto out_err;
485 485
486 ib_conn->qp = ib_conn->cma_id->qp; 486 iser_conn->qp = iser_conn->cma_id->qp;
487 iser_info("setting conn %p cma_id %p qp %p\n", 487 iser_info("setting conn %p cma_id %p qp %p\n",
488 ib_conn, ib_conn->cma_id, 488 iser_conn, iser_conn->cma_id,
489 ib_conn->cma_id->qp); 489 iser_conn->cma_id->qp);
490 return ret; 490 return ret;
491 491
492out_err: 492out_err:
@@ -497,25 +497,25 @@ out_err:
497/** 497/**
498 * releases the QP object 498 * releases the QP object
499 */ 499 */
500static void iser_free_ib_conn_res(struct iser_conn *ib_conn) 500static void iser_free_ib_conn_res(struct iser_conn *iser_conn)
501{ 501{
502 int cq_index; 502 int cq_index;
503 BUG_ON(ib_conn == NULL); 503 BUG_ON(iser_conn == NULL);
504 504
505 iser_info("freeing conn %p cma_id %p qp %p\n", 505 iser_info("freeing conn %p cma_id %p qp %p\n",
506 ib_conn, ib_conn->cma_id, 506 iser_conn, iser_conn->cma_id,
507 ib_conn->qp); 507 iser_conn->qp);
508 508
509 /* qp is created only once both addr & route are resolved */ 509 /* qp is created only once both addr & route are resolved */
510 510
511 if (ib_conn->qp != NULL) { 511 if (iser_conn->qp != NULL) {
512 cq_index = ((struct iser_cq_desc *)ib_conn->qp->recv_cq->cq_context)->cq_index; 512 cq_index = ((struct iser_cq_desc *)iser_conn->qp->recv_cq->cq_context)->cq_index;
513 ib_conn->device->cq_active_qps[cq_index]--; 513 iser_conn->device->cq_active_qps[cq_index]--;
514 514
515 rdma_destroy_qp(ib_conn->cma_id); 515 rdma_destroy_qp(iser_conn->cma_id);
516 } 516 }
517 517
518 ib_conn->qp = NULL; 518 iser_conn->qp = NULL;
519} 519}
520 520
521/** 521/**
@@ -572,75 +572,77 @@ static void iser_device_try_release(struct iser_device *device)
572/** 572/**
573 * Called with state mutex held 573 * Called with state mutex held
574 **/ 574 **/
575static int iser_conn_state_comp_exch(struct iser_conn *ib_conn, 575static int iser_conn_state_comp_exch(struct iser_conn *iser_conn,
576 enum iser_ib_conn_state comp, 576 enum iser_conn_state comp,
577 enum iser_ib_conn_state exch) 577 enum iser_conn_state exch)
578{ 578{
579 int ret; 579 int ret;
580 580
581 if ((ret = (ib_conn->state == comp))) 581 ret = (iser_conn->state == comp);
582 ib_conn->state = exch; 582 if (ret)
583 iser_conn->state = exch;
584
583 return ret; 585 return ret;
584} 586}
585 587
586void iser_release_work(struct work_struct *work) 588void iser_release_work(struct work_struct *work)
587{ 589{
588 struct iser_conn *ib_conn; 590 struct iser_conn *iser_conn;
589 int rc; 591 int rc;
590 592
591 ib_conn = container_of(work, struct iser_conn, release_work); 593 iser_conn = container_of(work, struct iser_conn, release_work);
592 594
593 /* wait for .conn_stop callback */ 595 /* wait for .conn_stop callback */
594 rc = wait_for_completion_timeout(&ib_conn->stop_completion, 30 * HZ); 596 rc = wait_for_completion_timeout(&iser_conn->stop_completion, 30 * HZ);
595 WARN_ON(rc == 0); 597 WARN_ON(rc == 0);
596 598
597 /* wait for the qp`s post send and post receive buffers to empty */ 599 /* wait for the qp`s post send and post receive buffers to empty */
598 rc = wait_for_completion_timeout(&ib_conn->flush_completion, 30 * HZ); 600 rc = wait_for_completion_timeout(&iser_conn->flush_completion, 30 * HZ);
599 WARN_ON(rc == 0); 601 WARN_ON(rc == 0);
600 602
601 ib_conn->state = ISER_CONN_DOWN; 603 iser_conn->state = ISER_CONN_DOWN;
602 604
603 mutex_lock(&ib_conn->state_mutex); 605 mutex_lock(&iser_conn->state_mutex);
604 ib_conn->state = ISER_CONN_DOWN; 606 iser_conn->state = ISER_CONN_DOWN;
605 mutex_unlock(&ib_conn->state_mutex); 607 mutex_unlock(&iser_conn->state_mutex);
606 608
607 iser_conn_release(ib_conn); 609 iser_conn_release(iser_conn);
608} 610}
609 611
610/** 612/**
611 * Frees all conn objects and deallocs conn descriptor 613 * Frees all conn objects and deallocs conn descriptor
612 */ 614 */
613void iser_conn_release(struct iser_conn *ib_conn) 615void iser_conn_release(struct iser_conn *iser_conn)
614{ 616{
615 struct iser_device *device = ib_conn->device; 617 struct iser_device *device = iser_conn->device;
616 618
617 mutex_lock(&ig.connlist_mutex); 619 mutex_lock(&ig.connlist_mutex);
618 list_del(&ib_conn->conn_list); 620 list_del(&iser_conn->conn_list);
619 mutex_unlock(&ig.connlist_mutex); 621 mutex_unlock(&ig.connlist_mutex);
620 622
621 mutex_lock(&ib_conn->state_mutex); 623 mutex_lock(&iser_conn->state_mutex);
622 BUG_ON(ib_conn->state != ISER_CONN_DOWN); 624 BUG_ON(iser_conn->state != ISER_CONN_DOWN);
623 625
624 iser_free_rx_descriptors(ib_conn); 626 iser_free_rx_descriptors(iser_conn);
625 iser_free_ib_conn_res(ib_conn); 627 iser_free_ib_conn_res(iser_conn);
626 ib_conn->device = NULL; 628 iser_conn->device = NULL;
627 /* on EVENT_ADDR_ERROR there's no device yet for this conn */ 629 /* on EVENT_ADDR_ERROR there's no device yet for this conn */
628 if (device != NULL) 630 if (device != NULL)
629 iser_device_try_release(device); 631 iser_device_try_release(device);
630 mutex_unlock(&ib_conn->state_mutex); 632 mutex_unlock(&iser_conn->state_mutex);
631 633
632 /* if cma handler context, the caller actually destroy the id */ 634 /* if cma handler context, the caller actually destroy the id */
633 if (ib_conn->cma_id != NULL) { 635 if (iser_conn->cma_id != NULL) {
634 rdma_destroy_id(ib_conn->cma_id); 636 rdma_destroy_id(iser_conn->cma_id);
635 ib_conn->cma_id = NULL; 637 iser_conn->cma_id = NULL;
636 } 638 }
637 kfree(ib_conn); 639 kfree(iser_conn);
638} 640}
639 641
640/** 642/**
641 * triggers start of the disconnect procedures and wait for them to be done 643 * triggers start of the disconnect procedures and wait for them to be done
642 */ 644 */
643void iser_conn_terminate(struct iser_conn *ib_conn) 645void iser_conn_terminate(struct iser_conn *iser_conn)
644{ 646{
645 int err = 0; 647 int err = 0;
646 648
@@ -649,11 +651,11 @@ void iser_conn_terminate(struct iser_conn *ib_conn)
649 * the QP state to ERROR 651 * the QP state to ERROR
650 */ 652 */
651 653
652 iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP, ISER_CONN_TERMINATING); 654 iser_conn_state_comp_exch(iser_conn, ISER_CONN_UP, ISER_CONN_TERMINATING);
653 err = rdma_disconnect(ib_conn->cma_id); 655 err = rdma_disconnect(iser_conn->cma_id);
654 if (err) 656 if (err)
655 iser_err("Failed to disconnect, conn: 0x%p err %d\n", 657 iser_err("Failed to disconnect, conn: 0x%p err %d\n",
656 ib_conn,err); 658 iser_conn, err);
657} 659}
658 660
659/** 661/**
@@ -661,10 +663,10 @@ void iser_conn_terminate(struct iser_conn *ib_conn)
661 **/ 663 **/
662static void iser_connect_error(struct rdma_cm_id *cma_id) 664static void iser_connect_error(struct rdma_cm_id *cma_id)
663{ 665{
664 struct iser_conn *ib_conn; 666 struct iser_conn *iser_conn;
665 667
666 ib_conn = (struct iser_conn *)cma_id->context; 668 iser_conn = (struct iser_conn *)cma_id->context;
667 ib_conn->state = ISER_CONN_DOWN; 669 iser_conn->state = ISER_CONN_DOWN;
668} 670}
669 671
670/** 672/**
@@ -673,11 +675,11 @@ static void iser_connect_error(struct rdma_cm_id *cma_id)
673static void iser_addr_handler(struct rdma_cm_id *cma_id) 675static void iser_addr_handler(struct rdma_cm_id *cma_id)
674{ 676{
675 struct iser_device *device; 677 struct iser_device *device;
676 struct iser_conn *ib_conn; 678 struct iser_conn *iser_conn;
677 int ret; 679 int ret;
678 680
679 ib_conn = (struct iser_conn *)cma_id->context; 681 iser_conn = (struct iser_conn *)cma_id->context;
680 if (ib_conn->state != ISER_CONN_PENDING) 682 if (iser_conn->state != ISER_CONN_PENDING)
681 /* bailout */ 683 /* bailout */
682 return; 684 return;
683 685
@@ -688,7 +690,7 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id)
688 return; 690 return;
689 } 691 }
690 692
691 ib_conn->device = device; 693 iser_conn->device = device;
692 694
693 /* connection T10-PI support */ 695 /* connection T10-PI support */
694 if (iser_pi_enable) { 696 if (iser_pi_enable) {
@@ -696,10 +698,10 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id)
696 IB_DEVICE_SIGNATURE_HANDOVER)) { 698 IB_DEVICE_SIGNATURE_HANDOVER)) {
697 iser_warn("T10-PI requested but not supported on %s, " 699 iser_warn("T10-PI requested but not supported on %s, "
698 "continue without T10-PI\n", 700 "continue without T10-PI\n",
699 ib_conn->device->ib_device->name); 701 iser_conn->device->ib_device->name);
700 ib_conn->pi_support = false; 702 iser_conn->pi_support = false;
701 } else { 703 } else {
702 ib_conn->pi_support = true; 704 iser_conn->pi_support = true;
703 } 705 }
704 } 706 }
705 707
@@ -719,10 +721,10 @@ static void iser_route_handler(struct rdma_cm_id *cma_id)
719 struct rdma_conn_param conn_param; 721 struct rdma_conn_param conn_param;
720 int ret; 722 int ret;
721 struct iser_cm_hdr req_hdr; 723 struct iser_cm_hdr req_hdr;
722 struct iser_conn *ib_conn = (struct iser_conn *)cma_id->context; 724 struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
723 struct iser_device *device = ib_conn->device; 725 struct iser_device *device = iser_conn->device;
724 726
725 if (ib_conn->state != ISER_CONN_PENDING) 727 if (iser_conn->state != ISER_CONN_PENDING)
726 /* bailout */ 728 /* bailout */
727 return; 729 return;
728 730
@@ -755,34 +757,34 @@ failure:
755 757
756static void iser_connected_handler(struct rdma_cm_id *cma_id) 758static void iser_connected_handler(struct rdma_cm_id *cma_id)
757{ 759{
758 struct iser_conn *ib_conn; 760 struct iser_conn *iser_conn;
759 struct ib_qp_attr attr; 761 struct ib_qp_attr attr;
760 struct ib_qp_init_attr init_attr; 762 struct ib_qp_init_attr init_attr;
761 763
762 ib_conn = (struct iser_conn *)cma_id->context; 764 iser_conn = (struct iser_conn *)cma_id->context;
763 if (ib_conn->state != ISER_CONN_PENDING) 765 if (iser_conn->state != ISER_CONN_PENDING)
764 /* bailout */ 766 /* bailout */
765 return; 767 return;
766 768
767 (void)ib_query_qp(cma_id->qp, &attr, ~0, &init_attr); 769 (void)ib_query_qp(cma_id->qp, &attr, ~0, &init_attr);
768 iser_info("remote qpn:%x my qpn:%x\n", attr.dest_qp_num, cma_id->qp->qp_num); 770 iser_info("remote qpn:%x my qpn:%x\n", attr.dest_qp_num, cma_id->qp->qp_num);
769 771
770 ib_conn->state = ISER_CONN_UP; 772 iser_conn->state = ISER_CONN_UP;
771 complete(&ib_conn->up_completion); 773 complete(&iser_conn->up_completion);
772} 774}
773 775
774static void iser_disconnected_handler(struct rdma_cm_id *cma_id) 776static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
775{ 777{
776 struct iser_conn *ib_conn; 778 struct iser_conn *iser_conn;
777 779
778 ib_conn = (struct iser_conn *)cma_id->context; 780 iser_conn = (struct iser_conn *)cma_id->context;
779 781
780 /* getting here when the state is UP means that the conn is being * 782 /* getting here when the state is UP means that the conn is being *
781 * terminated asynchronously from the iSCSI layer's perspective. */ 783 * terminated asynchronously from the iSCSI layer's perspective. */
782 if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP, 784 if (iser_conn_state_comp_exch(iser_conn, ISER_CONN_UP,
783 ISER_CONN_TERMINATING)){ 785 ISER_CONN_TERMINATING)){
784 if (ib_conn->iscsi_conn) 786 if (iser_conn->iscsi_conn)
785 iscsi_conn_failure(ib_conn->iscsi_conn, ISCSI_ERR_CONN_FAILED); 787 iscsi_conn_failure(iser_conn->iscsi_conn, ISCSI_ERR_CONN_FAILED);
786 else 788 else
787 iser_err("iscsi_iser connection isn't bound\n"); 789 iser_err("iscsi_iser connection isn't bound\n");
788 } 790 }
@@ -791,21 +793,21 @@ static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
791 * block also exists in iser_handle_comp_error(), but it is needed here 793 * block also exists in iser_handle_comp_error(), but it is needed here
792 * for cases of no flushes at all, e.g. discovery over rdma. 794 * for cases of no flushes at all, e.g. discovery over rdma.
793 */ 795 */
794 if (ib_conn->post_recv_buf_count == 0 && 796 if (iser_conn->post_recv_buf_count == 0 &&
795 (atomic_read(&ib_conn->post_send_buf_count) == 0)) { 797 (atomic_read(&iser_conn->post_send_buf_count) == 0)) {
796 complete(&ib_conn->flush_completion); 798 complete(&iser_conn->flush_completion);
797 } 799 }
798} 800}
799 801
800static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) 802static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
801{ 803{
802 struct iser_conn *ib_conn; 804 struct iser_conn *iser_conn;
803 805
804 ib_conn = (struct iser_conn *)cma_id->context; 806 iser_conn = (struct iser_conn *)cma_id->context;
805 iser_info("event %d status %d conn %p id %p\n", 807 iser_info("event %d status %d conn %p id %p\n",
806 event->event, event->status, cma_id->context, cma_id); 808 event->event, event->status, cma_id->context, cma_id);
807 809
808 mutex_lock(&ib_conn->state_mutex); 810 mutex_lock(&iser_conn->state_mutex);
809 switch (event->event) { 811 switch (event->event) {
810 case RDMA_CM_EVENT_ADDR_RESOLVED: 812 case RDMA_CM_EVENT_ADDR_RESOLVED:
811 iser_addr_handler(cma_id); 813 iser_addr_handler(cma_id);
@@ -833,82 +835,82 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
833 iser_err("Unexpected RDMA CM event (%d)\n", event->event); 835 iser_err("Unexpected RDMA CM event (%d)\n", event->event);
834 break; 836 break;
835 } 837 }
836 mutex_unlock(&ib_conn->state_mutex); 838 mutex_unlock(&iser_conn->state_mutex);
837 return 0; 839 return 0;
838} 840}
839 841
840void iser_conn_init(struct iser_conn *ib_conn) 842void iser_conn_init(struct iser_conn *iser_conn)
841{ 843{
842 ib_conn->state = ISER_CONN_INIT; 844 iser_conn->state = ISER_CONN_INIT;
843 ib_conn->post_recv_buf_count = 0; 845 iser_conn->post_recv_buf_count = 0;
844 atomic_set(&ib_conn->post_send_buf_count, 0); 846 atomic_set(&iser_conn->post_send_buf_count, 0);
845 init_completion(&ib_conn->stop_completion); 847 init_completion(&iser_conn->stop_completion);
846 init_completion(&ib_conn->flush_completion); 848 init_completion(&iser_conn->flush_completion);
847 init_completion(&ib_conn->up_completion); 849 init_completion(&iser_conn->up_completion);
848 INIT_LIST_HEAD(&ib_conn->conn_list); 850 INIT_LIST_HEAD(&iser_conn->conn_list);
849 spin_lock_init(&ib_conn->lock); 851 spin_lock_init(&iser_conn->lock);
850 mutex_init(&ib_conn->state_mutex); 852 mutex_init(&iser_conn->state_mutex);
851} 853}
852 854
853 /** 855 /**
854 * starts the process of connecting to the target 856 * starts the process of connecting to the target
855 * sleeps until the connection is established or rejected 857 * sleeps until the connection is established or rejected
856 */ 858 */
857int iser_connect(struct iser_conn *ib_conn, 859int iser_connect(struct iser_conn *iser_conn,
858 struct sockaddr *src_addr, 860 struct sockaddr *src_addr,
859 struct sockaddr *dst_addr, 861 struct sockaddr *dst_addr,
860 int non_blocking) 862 int non_blocking)
861{ 863{
862 int err = 0; 864 int err = 0;
863 865
864 mutex_lock(&ib_conn->state_mutex); 866 mutex_lock(&iser_conn->state_mutex);
865 867
866 sprintf(ib_conn->name, "%pISp", dst_addr); 868 sprintf(iser_conn->name, "%pISp", dst_addr);
867 869
868 iser_info("connecting to: %s\n", ib_conn->name); 870 iser_info("connecting to: %s\n", iser_conn->name);
869 871
870 /* the device is known only --after-- address resolution */ 872 /* the device is known only --after-- address resolution */
871 ib_conn->device = NULL; 873 iser_conn->device = NULL;
872 874
873 ib_conn->state = ISER_CONN_PENDING; 875 iser_conn->state = ISER_CONN_PENDING;
874 876
875 ib_conn->cma_id = rdma_create_id(iser_cma_handler, 877 iser_conn->cma_id = rdma_create_id(iser_cma_handler,
876 (void *)ib_conn, 878 (void *)iser_conn,
877 RDMA_PS_TCP, IB_QPT_RC); 879 RDMA_PS_TCP, IB_QPT_RC);
878 if (IS_ERR(ib_conn->cma_id)) { 880 if (IS_ERR(iser_conn->cma_id)) {
879 err = PTR_ERR(ib_conn->cma_id); 881 err = PTR_ERR(iser_conn->cma_id);
880 iser_err("rdma_create_id failed: %d\n", err); 882 iser_err("rdma_create_id failed: %d\n", err);
881 goto id_failure; 883 goto id_failure;
882 } 884 }
883 885
884 err = rdma_resolve_addr(ib_conn->cma_id, src_addr, dst_addr, 1000); 886 err = rdma_resolve_addr(iser_conn->cma_id, src_addr, dst_addr, 1000);
885 if (err) { 887 if (err) {
886 iser_err("rdma_resolve_addr failed: %d\n", err); 888 iser_err("rdma_resolve_addr failed: %d\n", err);
887 goto addr_failure; 889 goto addr_failure;
888 } 890 }
889 891
890 if (!non_blocking) { 892 if (!non_blocking) {
891 wait_for_completion_interruptible(&ib_conn->up_completion); 893 wait_for_completion_interruptible(&iser_conn->up_completion);
892 894
893 if (ib_conn->state != ISER_CONN_UP) { 895 if (iser_conn->state != ISER_CONN_UP) {
894 err = -EIO; 896 err = -EIO;
895 goto connect_failure; 897 goto connect_failure;
896 } 898 }
897 } 899 }
898 mutex_unlock(&ib_conn->state_mutex); 900 mutex_unlock(&iser_conn->state_mutex);
899 901
900 mutex_lock(&ig.connlist_mutex); 902 mutex_lock(&ig.connlist_mutex);
901 list_add(&ib_conn->conn_list, &ig.connlist); 903 list_add(&iser_conn->conn_list, &ig.connlist);
902 mutex_unlock(&ig.connlist_mutex); 904 mutex_unlock(&ig.connlist_mutex);
903 return 0; 905 return 0;
904 906
905id_failure: 907id_failure:
906 ib_conn->cma_id = NULL; 908 iser_conn->cma_id = NULL;
907addr_failure: 909addr_failure:
908 ib_conn->state = ISER_CONN_DOWN; 910 iser_conn->state = ISER_CONN_DOWN;
909connect_failure: 911connect_failure:
910 mutex_unlock(&ib_conn->state_mutex); 912 mutex_unlock(&iser_conn->state_mutex);
911 iser_conn_release(ib_conn); 913 iser_conn_release(iser_conn);
912 return err; 914 return err;
913} 915}
914 916
@@ -917,7 +919,7 @@ connect_failure:
917 * 919 *
918 * returns: 0 on success, errno code on failure 920 * returns: 0 on success, errno code on failure
919 */ 921 */
920int iser_reg_page_vec(struct iser_conn *ib_conn, 922int iser_reg_page_vec(struct iser_conn *iser_conn,
921 struct iser_page_vec *page_vec, 923 struct iser_page_vec *page_vec,
922 struct iser_mem_reg *mem_reg) 924 struct iser_mem_reg *mem_reg)
923{ 925{
@@ -929,7 +931,7 @@ int iser_reg_page_vec(struct iser_conn *ib_conn,
929 page_list = page_vec->pages; 931 page_list = page_vec->pages;
930 io_addr = page_list[0]; 932 io_addr = page_list[0];
931 933
932 mem = ib_fmr_pool_map_phys(ib_conn->fmr.pool, 934 mem = ib_fmr_pool_map_phys(iser_conn->fmr.pool,
933 page_list, 935 page_list,
934 page_vec->length, 936 page_vec->length,
935 io_addr); 937 io_addr);
@@ -987,7 +989,7 @@ void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
987 enum iser_data_dir cmd_dir) 989 enum iser_data_dir cmd_dir)
988{ 990{
989 struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg; 991 struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg;
990 struct iser_conn *ib_conn = iser_task->ib_conn; 992 struct iser_conn *iser_conn = iser_task->iser_conn;
991 struct fast_reg_descriptor *desc = reg->mem_h; 993 struct fast_reg_descriptor *desc = reg->mem_h;
992 994
993 if (!reg->is_mr) 995 if (!reg->is_mr)
@@ -995,61 +997,61 @@ void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
995 997
996 reg->mem_h = NULL; 998 reg->mem_h = NULL;
997 reg->is_mr = 0; 999 reg->is_mr = 0;
998 spin_lock_bh(&ib_conn->lock); 1000 spin_lock_bh(&iser_conn->lock);
999 list_add_tail(&desc->list, &ib_conn->fastreg.pool); 1001 list_add_tail(&desc->list, &iser_conn->fastreg.pool);
1000 spin_unlock_bh(&ib_conn->lock); 1002 spin_unlock_bh(&iser_conn->lock);
1001} 1003}
1002 1004
1003int iser_post_recvl(struct iser_conn *ib_conn) 1005int iser_post_recvl(struct iser_conn *iser_conn)
1004{ 1006{
1005 struct ib_recv_wr rx_wr, *rx_wr_failed; 1007 struct ib_recv_wr rx_wr, *rx_wr_failed;
1006 struct ib_sge sge; 1008 struct ib_sge sge;
1007 int ib_ret; 1009 int ib_ret;
1008 1010
1009 sge.addr = ib_conn->login_resp_dma; 1011 sge.addr = iser_conn->login_resp_dma;
1010 sge.length = ISER_RX_LOGIN_SIZE; 1012 sge.length = ISER_RX_LOGIN_SIZE;
1011 sge.lkey = ib_conn->device->mr->lkey; 1013 sge.lkey = iser_conn->device->mr->lkey;
1012 1014
1013 rx_wr.wr_id = (unsigned long)ib_conn->login_resp_buf; 1015 rx_wr.wr_id = (unsigned long)iser_conn->login_resp_buf;
1014 rx_wr.sg_list = &sge; 1016 rx_wr.sg_list = &sge;
1015 rx_wr.num_sge = 1; 1017 rx_wr.num_sge = 1;
1016 rx_wr.next = NULL; 1018 rx_wr.next = NULL;
1017 1019
1018 ib_conn->post_recv_buf_count++; 1020 iser_conn->post_recv_buf_count++;
1019 ib_ret = ib_post_recv(ib_conn->qp, &rx_wr, &rx_wr_failed); 1021 ib_ret = ib_post_recv(iser_conn->qp, &rx_wr, &rx_wr_failed);
1020 if (ib_ret) { 1022 if (ib_ret) {
1021 iser_err("ib_post_recv failed ret=%d\n", ib_ret); 1023 iser_err("ib_post_recv failed ret=%d\n", ib_ret);
1022 ib_conn->post_recv_buf_count--; 1024 iser_conn->post_recv_buf_count--;
1023 } 1025 }
1024 return ib_ret; 1026 return ib_ret;
1025} 1027}
1026 1028
1027int iser_post_recvm(struct iser_conn *ib_conn, int count) 1029int iser_post_recvm(struct iser_conn *iser_conn, int count)
1028{ 1030{
1029 struct ib_recv_wr *rx_wr, *rx_wr_failed; 1031 struct ib_recv_wr *rx_wr, *rx_wr_failed;
1030 int i, ib_ret; 1032 int i, ib_ret;
1031 unsigned int my_rx_head = ib_conn->rx_desc_head; 1033 unsigned int my_rx_head = iser_conn->rx_desc_head;
1032 struct iser_rx_desc *rx_desc; 1034 struct iser_rx_desc *rx_desc;
1033 1035
1034 for (rx_wr = ib_conn->rx_wr, i = 0; i < count; i++, rx_wr++) { 1036 for (rx_wr = iser_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
1035 rx_desc = &ib_conn->rx_descs[my_rx_head]; 1037 rx_desc = &iser_conn->rx_descs[my_rx_head];
1036 rx_wr->wr_id = (unsigned long)rx_desc; 1038 rx_wr->wr_id = (unsigned long)rx_desc;
1037 rx_wr->sg_list = &rx_desc->rx_sg; 1039 rx_wr->sg_list = &rx_desc->rx_sg;
1038 rx_wr->num_sge = 1; 1040 rx_wr->num_sge = 1;
1039 rx_wr->next = rx_wr + 1; 1041 rx_wr->next = rx_wr + 1;
1040 my_rx_head = (my_rx_head + 1) & ib_conn->qp_max_recv_dtos_mask; 1042 my_rx_head = (my_rx_head + 1) & iser_conn->qp_max_recv_dtos_mask;
1041 } 1043 }
1042 1044
1043 rx_wr--; 1045 rx_wr--;
1044 rx_wr->next = NULL; /* mark end of work requests list */ 1046 rx_wr->next = NULL; /* mark end of work requests list */
1045 1047
1046 ib_conn->post_recv_buf_count += count; 1048 iser_conn->post_recv_buf_count += count;
1047 ib_ret = ib_post_recv(ib_conn->qp, ib_conn->rx_wr, &rx_wr_failed); 1049 ib_ret = ib_post_recv(iser_conn->qp, iser_conn->rx_wr, &rx_wr_failed);
1048 if (ib_ret) { 1050 if (ib_ret) {
1049 iser_err("ib_post_recv failed ret=%d\n", ib_ret); 1051 iser_err("ib_post_recv failed ret=%d\n", ib_ret);
1050 ib_conn->post_recv_buf_count -= count; 1052 iser_conn->post_recv_buf_count -= count;
1051 } else 1053 } else
1052 ib_conn->rx_desc_head = my_rx_head; 1054 iser_conn->rx_desc_head = my_rx_head;
1053 return ib_ret; 1055 return ib_ret;
1054} 1056}
1055 1057
@@ -1059,13 +1061,14 @@ int iser_post_recvm(struct iser_conn *ib_conn, int count)
1059 * 1061 *
1060 * returns 0 on success, -1 on failure 1062 * returns 0 on success, -1 on failure
1061 */ 1063 */
1062int iser_post_send(struct iser_conn *ib_conn, struct iser_tx_desc *tx_desc) 1064int iser_post_send(struct iser_conn *iser_conn, struct iser_tx_desc *tx_desc)
1063{ 1065{
1064 int ib_ret; 1066 int ib_ret;
1065 struct ib_send_wr send_wr, *send_wr_failed; 1067 struct ib_send_wr send_wr, *send_wr_failed;
1066 1068
1067 ib_dma_sync_single_for_device(ib_conn->device->ib_device, 1069 ib_dma_sync_single_for_device(iser_conn->device->ib_device,
1068 tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE); 1070 tx_desc->dma_addr, ISER_HEADERS_LEN,
1071 DMA_TO_DEVICE);
1069 1072
1070 send_wr.next = NULL; 1073 send_wr.next = NULL;
1071 send_wr.wr_id = (unsigned long)tx_desc; 1074 send_wr.wr_id = (unsigned long)tx_desc;
@@ -1074,37 +1077,37 @@ int iser_post_send(struct iser_conn *ib_conn, struct iser_tx_desc *tx_desc)
1074 send_wr.opcode = IB_WR_SEND; 1077 send_wr.opcode = IB_WR_SEND;
1075 send_wr.send_flags = IB_SEND_SIGNALED; 1078 send_wr.send_flags = IB_SEND_SIGNALED;
1076 1079
1077 atomic_inc(&ib_conn->post_send_buf_count); 1080 atomic_inc(&iser_conn->post_send_buf_count);
1078 1081
1079 ib_ret = ib_post_send(ib_conn->qp, &send_wr, &send_wr_failed); 1082 ib_ret = ib_post_send(iser_conn->qp, &send_wr, &send_wr_failed);
1080 if (ib_ret) { 1083 if (ib_ret) {
1081 iser_err("ib_post_send failed, ret:%d\n", ib_ret); 1084 iser_err("ib_post_send failed, ret:%d\n", ib_ret);
1082 atomic_dec(&ib_conn->post_send_buf_count); 1085 atomic_dec(&iser_conn->post_send_buf_count);
1083 } 1086 }
1084 return ib_ret; 1087 return ib_ret;
1085} 1088}
1086 1089
1087static void iser_handle_comp_error(struct iser_tx_desc *desc, 1090static void iser_handle_comp_error(struct iser_tx_desc *desc,
1088 struct iser_conn *ib_conn) 1091 struct iser_conn *iser_conn)
1089{ 1092{
1090 if (desc && desc->type == ISCSI_TX_DATAOUT) 1093 if (desc && desc->type == ISCSI_TX_DATAOUT)
1091 kmem_cache_free(ig.desc_cache, desc); 1094 kmem_cache_free(ig.desc_cache, desc);
1092 1095
1093 if (ib_conn->post_recv_buf_count == 0 && 1096 if (iser_conn->post_recv_buf_count == 0 &&
1094 atomic_read(&ib_conn->post_send_buf_count) == 0) { 1097 atomic_read(&iser_conn->post_send_buf_count) == 0) {
1095 /** 1098 /**
1096 * getting here when the state is UP means that the conn is 1099 * getting here when the state is UP means that the conn is
1097 * being terminated asynchronously from the iSCSI layer's 1100 * being terminated asynchronously from the iSCSI layer's
1098 * perspective. It is safe to peek at the connection state 1101 * perspective. It is safe to peek at the connection state
1099 * since iscsi_conn_failure is allowed to be called twice. 1102 * since iscsi_conn_failure is allowed to be called twice.
1100 **/ 1103 **/
1101 if (ib_conn->state == ISER_CONN_UP) 1104 if (iser_conn->state == ISER_CONN_UP)
1102 iscsi_conn_failure(ib_conn->iscsi_conn, 1105 iscsi_conn_failure(iser_conn->iscsi_conn,
1103 ISCSI_ERR_CONN_FAILED); 1106 ISCSI_ERR_CONN_FAILED);
1104 1107
1105 /* no more non completed posts to the QP, complete the 1108 /* no more non completed posts to the QP, complete the
1106 * termination process w.o worrying on disconnect event */ 1109 * termination process w.o worrying on disconnect event */
1107 complete(&ib_conn->flush_completion); 1110 complete(&iser_conn->flush_completion);
1108 } 1111 }
1109} 1112}
1110 1113
@@ -1113,15 +1116,15 @@ static int iser_drain_tx_cq(struct iser_device *device, int cq_index)
1113 struct ib_cq *cq = device->tx_cq[cq_index]; 1116 struct ib_cq *cq = device->tx_cq[cq_index];
1114 struct ib_wc wc; 1117 struct ib_wc wc;
1115 struct iser_tx_desc *tx_desc; 1118 struct iser_tx_desc *tx_desc;
1116 struct iser_conn *ib_conn; 1119 struct iser_conn *iser_conn;
1117 int completed_tx = 0; 1120 int completed_tx = 0;
1118 1121
1119 while (ib_poll_cq(cq, 1, &wc) == 1) { 1122 while (ib_poll_cq(cq, 1, &wc) == 1) {
1120 tx_desc = (struct iser_tx_desc *) (unsigned long) wc.wr_id; 1123 tx_desc = (struct iser_tx_desc *) (unsigned long) wc.wr_id;
1121 ib_conn = wc.qp->qp_context; 1124 iser_conn = wc.qp->qp_context;
1122 if (wc.status == IB_WC_SUCCESS) { 1125 if (wc.status == IB_WC_SUCCESS) {
1123 if (wc.opcode == IB_WC_SEND) 1126 if (wc.opcode == IB_WC_SEND)
1124 iser_snd_completion(tx_desc, ib_conn); 1127 iser_snd_completion(tx_desc, iser_conn);
1125 else 1128 else
1126 iser_err("expected opcode %d got %d\n", 1129 iser_err("expected opcode %d got %d\n",
1127 IB_WC_SEND, wc.opcode); 1130 IB_WC_SEND, wc.opcode);
@@ -1129,8 +1132,8 @@ static int iser_drain_tx_cq(struct iser_device *device, int cq_index)
1129 iser_err("tx id %llx status %d vend_err %x\n", 1132 iser_err("tx id %llx status %d vend_err %x\n",
1130 wc.wr_id, wc.status, wc.vendor_err); 1133 wc.wr_id, wc.status, wc.vendor_err);
1131 if (wc.wr_id != ISER_FASTREG_LI_WRID) { 1134 if (wc.wr_id != ISER_FASTREG_LI_WRID) {
1132 atomic_dec(&ib_conn->post_send_buf_count); 1135 atomic_dec(&iser_conn->post_send_buf_count);
1133 iser_handle_comp_error(tx_desc, ib_conn); 1136 iser_handle_comp_error(tx_desc, iser_conn);
1134 } 1137 }
1135 } 1138 }
1136 completed_tx++; 1139 completed_tx++;
@@ -1148,7 +1151,7 @@ static void iser_cq_tasklet_fn(unsigned long data)
1148 struct ib_wc wc; 1151 struct ib_wc wc;
1149 struct iser_rx_desc *desc; 1152 struct iser_rx_desc *desc;
1150 unsigned long xfer_len; 1153 unsigned long xfer_len;
1151 struct iser_conn *ib_conn; 1154 struct iser_conn *iser_conn;
1152 int completed_tx, completed_rx = 0; 1155 int completed_tx, completed_rx = 0;
1153 1156
1154 /* First do tx drain, so in a case where we have rx flushes and a successful 1157 /* First do tx drain, so in a case where we have rx flushes and a successful
@@ -1159,11 +1162,11 @@ static void iser_cq_tasklet_fn(unsigned long data)
1159 while (ib_poll_cq(cq, 1, &wc) == 1) { 1162 while (ib_poll_cq(cq, 1, &wc) == 1) {
1160 desc = (struct iser_rx_desc *) (unsigned long) wc.wr_id; 1163 desc = (struct iser_rx_desc *) (unsigned long) wc.wr_id;
1161 BUG_ON(desc == NULL); 1164 BUG_ON(desc == NULL);
1162 ib_conn = wc.qp->qp_context; 1165 iser_conn = wc.qp->qp_context;
1163 if (wc.status == IB_WC_SUCCESS) { 1166 if (wc.status == IB_WC_SUCCESS) {
1164 if (wc.opcode == IB_WC_RECV) { 1167 if (wc.opcode == IB_WC_RECV) {
1165 xfer_len = (unsigned long)wc.byte_len; 1168 xfer_len = (unsigned long)wc.byte_len;
1166 iser_rcv_completion(desc, xfer_len, ib_conn); 1169 iser_rcv_completion(desc, xfer_len, iser_conn);
1167 } else 1170 } else
1168 iser_err("expected opcode %d got %d\n", 1171 iser_err("expected opcode %d got %d\n",
1169 IB_WC_RECV, wc.opcode); 1172 IB_WC_RECV, wc.opcode);
@@ -1171,8 +1174,8 @@ static void iser_cq_tasklet_fn(unsigned long data)
1171 if (wc.status != IB_WC_WR_FLUSH_ERR) 1174 if (wc.status != IB_WC_WR_FLUSH_ERR)
1172 iser_err("rx id %llx status %d vend_err %x\n", 1175 iser_err("rx id %llx status %d vend_err %x\n",
1173 wc.wr_id, wc.status, wc.vendor_err); 1176 wc.wr_id, wc.status, wc.vendor_err);
1174 ib_conn->post_recv_buf_count--; 1177 iser_conn->post_recv_buf_count--;
1175 iser_handle_comp_error(NULL, ib_conn); 1178 iser_handle_comp_error(NULL, iser_conn);
1176 } 1179 }
1177 completed_rx++; 1180 completed_rx++;
1178 if (!(completed_rx & 63)) 1181 if (!(completed_rx & 63))