diff options
author | Ariel Nahum <arieln@mellanox.com> | 2014-05-22 04:00:18 -0400 |
---|---|---|
committer | Roland Dreier <roland@purestorage.com> | 2014-05-26 11:19:48 -0400 |
commit | b73c3adabdb1e2cb2f2c69bc3cbb9306aa3f9700 (patch) | |
tree | 4450f306e659387ffc2ec291a611c89ded989b06 | |
parent | d6d211db37e75de2ddc3a4f979038c40df7cc79c (diff) |
IB/iser: Simplify connection management
iSER relies on refcounting to manage iser connections establishment
and teardown.
Following commit 39ff05dbbbdb ("IB/iser: Enhance disconnection logic
for multi-pathing"), iser connection maintain 3 references:
- iscsi_endpoint (at creation stage)
- cma_id (at connection request stage)
- iscsi_conn (at bind stage)
We can avoid taking explicit refcounts by correctly serializing iser
teardown flows (graceful and non-graceful).
Our approach is to trigger a scheduled work to handle ordered teardown
by gracefully waiting for 2 cleanup stages to complete:
1. Cleanup of live pending tasks indicated by iscsi_conn_stop completion
2. Flush errors processing
Each completed stage will notify a waiting worker thread when it is
done to allow teardwon continuation.
Since iSCSI connection establishment may trigger endpoint disconnect
without a successful endpoint connect, we rely on the iscsi <-> iser
binding (.conn_bind) to learn about the teardown policy we should take
wrt cleanup stages.
Since all cleanup worker threads are scheduled (release_wq) in
.ep_disconnect it is safe to assume that when module_exit is called,
all cleanup workers are already scheduled. Thus proper module unload
shall flush all scheduled works before allowing safe exit, to
guarantee no resources got left behind.
Signed-off-by: Ariel Nahum <arieln@mellanox.com>
Signed-off-by: Sagi Grimberg <sagig@mellanox.com>
Reviewed-by: Roi Dayan <roid@mellanox.com>
Reviewed-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
-rw-r--r-- | drivers/infiniband/ulp/iser/iscsi_iser.c | 97 | ||||
-rw-r--r-- | drivers/infiniband/ulp/iser/iscsi_iser.h | 8 | ||||
-rw-r--r-- | drivers/infiniband/ulp/iser/iser_verbs.c | 85 |
3 files changed, 99 insertions, 91 deletions
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 25f195ef44b0..f2174880a63a 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c | |||
@@ -99,6 +99,7 @@ MODULE_PARM_DESC(pi_enable, "Enable T10-PI offload support (default:disabled)"); | |||
99 | module_param_named(pi_guard, iser_pi_guard, int, 0644); | 99 | module_param_named(pi_guard, iser_pi_guard, int, 0644); |
100 | MODULE_PARM_DESC(pi_guard, "T10-PI guard_type, 0:CRC|1:IP_CSUM (default:CRC)"); | 100 | MODULE_PARM_DESC(pi_guard, "T10-PI guard_type, 0:CRC|1:IP_CSUM (default:CRC)"); |
101 | 101 | ||
102 | static struct workqueue_struct *release_wq; | ||
102 | struct iser_global ig; | 103 | struct iser_global ig; |
103 | 104 | ||
104 | void | 105 | void |
@@ -337,24 +338,6 @@ iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx) | |||
337 | return cls_conn; | 338 | return cls_conn; |
338 | } | 339 | } |
339 | 340 | ||
340 | static void | ||
341 | iscsi_iser_conn_destroy(struct iscsi_cls_conn *cls_conn) | ||
342 | { | ||
343 | struct iscsi_conn *conn = cls_conn->dd_data; | ||
344 | struct iser_conn *ib_conn = conn->dd_data; | ||
345 | |||
346 | iscsi_conn_teardown(cls_conn); | ||
347 | /* | ||
348 | * Userspace will normally call the stop callback and | ||
349 | * already have freed the ib_conn, but if it goofed up then | ||
350 | * we free it here. | ||
351 | */ | ||
352 | if (ib_conn) { | ||
353 | ib_conn->iscsi_conn = NULL; | ||
354 | iser_conn_put(ib_conn, 1); /* deref iscsi/ib conn unbinding */ | ||
355 | } | ||
356 | } | ||
357 | |||
358 | static int | 341 | static int |
359 | iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session, | 342 | iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session, |
360 | struct iscsi_cls_conn *cls_conn, uint64_t transport_eph, | 343 | struct iscsi_cls_conn *cls_conn, uint64_t transport_eph, |
@@ -392,29 +375,39 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session, | |||
392 | conn->dd_data = ib_conn; | 375 | conn->dd_data = ib_conn; |
393 | ib_conn->iscsi_conn = conn; | 376 | ib_conn->iscsi_conn = conn; |
394 | 377 | ||
395 | iser_conn_get(ib_conn); /* ref iscsi/ib conn binding */ | ||
396 | return 0; | 378 | return 0; |
397 | } | 379 | } |
398 | 380 | ||
381 | static int | ||
382 | iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn) | ||
383 | { | ||
384 | struct iscsi_conn *iscsi_conn; | ||
385 | struct iser_conn *ib_conn; | ||
386 | |||
387 | iscsi_conn = cls_conn->dd_data; | ||
388 | ib_conn = iscsi_conn->dd_data; | ||
389 | reinit_completion(&ib_conn->stop_completion); | ||
390 | |||
391 | return iscsi_conn_start(cls_conn); | ||
392 | } | ||
393 | |||
399 | static void | 394 | static void |
400 | iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) | 395 | iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) |
401 | { | 396 | { |
402 | struct iscsi_conn *conn = cls_conn->dd_data; | 397 | struct iscsi_conn *conn = cls_conn->dd_data; |
403 | struct iser_conn *ib_conn = conn->dd_data; | 398 | struct iser_conn *ib_conn = conn->dd_data; |
404 | 399 | ||
400 | iser_dbg("stopping iscsi_conn: %p, ib_conn: %p\n", conn, ib_conn); | ||
401 | iscsi_conn_stop(cls_conn, flag); | ||
402 | |||
405 | /* | 403 | /* |
406 | * Userspace may have goofed up and not bound the connection or | 404 | * Userspace may have goofed up and not bound the connection or |
407 | * might have only partially setup the connection. | 405 | * might have only partially setup the connection. |
408 | */ | 406 | */ |
409 | if (ib_conn) { | 407 | if (ib_conn) { |
410 | iscsi_conn_stop(cls_conn, flag); | 408 | conn->dd_data = NULL; |
411 | /* | 409 | complete(&ib_conn->stop_completion); |
412 | * There is no unbind event so the stop callback | ||
413 | * must release the ref from the bind. | ||
414 | */ | ||
415 | iser_conn_put(ib_conn, 1); /* deref iscsi/ib conn unbinding */ | ||
416 | } | 410 | } |
417 | conn->dd_data = NULL; | ||
418 | } | 411 | } |
419 | 412 | ||
420 | static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session) | 413 | static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session) |
@@ -652,19 +645,20 @@ iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep) | |||
652 | struct iser_conn *ib_conn; | 645 | struct iser_conn *ib_conn; |
653 | 646 | ||
654 | ib_conn = ep->dd_data; | 647 | ib_conn = ep->dd_data; |
655 | if (ib_conn->iscsi_conn) | 648 | iser_info("ep %p ib conn %p state %d\n", ep, ib_conn, ib_conn->state); |
656 | /* | ||
657 | * Must suspend xmit path if the ep is bound to the | ||
658 | * iscsi_conn, so we know we are not accessing the ib_conn | ||
659 | * when we free it. | ||
660 | * | ||
661 | * This may not be bound if the ep poll failed. | ||
662 | */ | ||
663 | iscsi_suspend_tx(ib_conn->iscsi_conn); | ||
664 | |||
665 | |||
666 | iser_info("ib conn %p state %d\n", ib_conn, ib_conn->state); | ||
667 | iser_conn_terminate(ib_conn); | 649 | iser_conn_terminate(ib_conn); |
650 | |||
651 | /* | ||
652 | * if iser_conn and iscsi_conn are bound, we must wait iscsi_conn_stop | ||
653 | * call and ISER_CONN_DOWN state before freeing the iser resources. | ||
654 | * otherwise we are safe to free resources immediately. | ||
655 | */ | ||
656 | if (ib_conn->iscsi_conn) { | ||
657 | INIT_WORK(&ib_conn->release_work, iser_release_work); | ||
658 | queue_work(release_wq, &ib_conn->release_work); | ||
659 | } else { | ||
660 | iser_conn_release(ib_conn); | ||
661 | } | ||
668 | } | 662 | } |
669 | 663 | ||
670 | static umode_t iser_attr_is_visible(int param_type, int param) | 664 | static umode_t iser_attr_is_visible(int param_type, int param) |
@@ -748,13 +742,13 @@ static struct iscsi_transport iscsi_iser_transport = { | |||
748 | /* connection management */ | 742 | /* connection management */ |
749 | .create_conn = iscsi_iser_conn_create, | 743 | .create_conn = iscsi_iser_conn_create, |
750 | .bind_conn = iscsi_iser_conn_bind, | 744 | .bind_conn = iscsi_iser_conn_bind, |
751 | .destroy_conn = iscsi_iser_conn_destroy, | 745 | .destroy_conn = iscsi_conn_teardown, |
752 | .attr_is_visible = iser_attr_is_visible, | 746 | .attr_is_visible = iser_attr_is_visible, |
753 | .set_param = iscsi_iser_set_param, | 747 | .set_param = iscsi_iser_set_param, |
754 | .get_conn_param = iscsi_conn_get_param, | 748 | .get_conn_param = iscsi_conn_get_param, |
755 | .get_ep_param = iscsi_iser_get_ep_param, | 749 | .get_ep_param = iscsi_iser_get_ep_param, |
756 | .get_session_param = iscsi_session_get_param, | 750 | .get_session_param = iscsi_session_get_param, |
757 | .start_conn = iscsi_conn_start, | 751 | .start_conn = iscsi_iser_conn_start, |
758 | .stop_conn = iscsi_iser_conn_stop, | 752 | .stop_conn = iscsi_iser_conn_stop, |
759 | /* iscsi host params */ | 753 | /* iscsi host params */ |
760 | .get_host_param = iscsi_host_get_param, | 754 | .get_host_param = iscsi_host_get_param, |
@@ -801,6 +795,12 @@ static int __init iser_init(void) | |||
801 | mutex_init(&ig.connlist_mutex); | 795 | mutex_init(&ig.connlist_mutex); |
802 | INIT_LIST_HEAD(&ig.connlist); | 796 | INIT_LIST_HEAD(&ig.connlist); |
803 | 797 | ||
798 | release_wq = alloc_workqueue("release workqueue", 0, 0); | ||
799 | if (!release_wq) { | ||
800 | iser_err("failed to allocate release workqueue\n"); | ||
801 | return -ENOMEM; | ||
802 | } | ||
803 | |||
804 | iscsi_iser_scsi_transport = iscsi_register_transport( | 804 | iscsi_iser_scsi_transport = iscsi_register_transport( |
805 | &iscsi_iser_transport); | 805 | &iscsi_iser_transport); |
806 | if (!iscsi_iser_scsi_transport) { | 806 | if (!iscsi_iser_scsi_transport) { |
@@ -819,7 +819,24 @@ register_transport_failure: | |||
819 | 819 | ||
820 | static void __exit iser_exit(void) | 820 | static void __exit iser_exit(void) |
821 | { | 821 | { |
822 | struct iser_conn *ib_conn, *n; | ||
823 | int connlist_empty; | ||
824 | |||
822 | iser_dbg("Removing iSER datamover...\n"); | 825 | iser_dbg("Removing iSER datamover...\n"); |
826 | destroy_workqueue(release_wq); | ||
827 | |||
828 | mutex_lock(&ig.connlist_mutex); | ||
829 | connlist_empty = list_empty(&ig.connlist); | ||
830 | mutex_unlock(&ig.connlist_mutex); | ||
831 | |||
832 | if (!connlist_empty) { | ||
833 | iser_err("Error cleanup stage completed but we still have iser " | ||
834 | "connections, destroying them anyway.\n"); | ||
835 | list_for_each_entry_safe(ib_conn, n, &ig.connlist, conn_list) { | ||
836 | iser_conn_release(ib_conn); | ||
837 | } | ||
838 | } | ||
839 | |||
823 | iscsi_unregister_transport(&iscsi_iser_transport); | 840 | iscsi_unregister_transport(&iscsi_iser_transport); |
824 | kmem_cache_destroy(ig.desc_cache); | 841 | kmem_cache_destroy(ig.desc_cache); |
825 | } | 842 | } |
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 324129f80d40..d30962096ef5 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h | |||
@@ -333,6 +333,8 @@ struct iser_conn { | |||
333 | int post_recv_buf_count; /* posted rx count */ | 333 | int post_recv_buf_count; /* posted rx count */ |
334 | atomic_t post_send_buf_count; /* posted tx count */ | 334 | atomic_t post_send_buf_count; /* posted tx count */ |
335 | char name[ISER_OBJECT_NAME_SIZE]; | 335 | char name[ISER_OBJECT_NAME_SIZE]; |
336 | struct work_struct release_work; | ||
337 | struct completion stop_completion; | ||
336 | struct list_head conn_list; /* entry in ig conn list */ | 338 | struct list_head conn_list; /* entry in ig conn list */ |
337 | 339 | ||
338 | char *login_buf; | 340 | char *login_buf; |
@@ -417,12 +419,12 @@ void iscsi_iser_recv(struct iscsi_conn *conn, | |||
417 | 419 | ||
418 | void iser_conn_init(struct iser_conn *ib_conn); | 420 | void iser_conn_init(struct iser_conn *ib_conn); |
419 | 421 | ||
420 | void iser_conn_get(struct iser_conn *ib_conn); | 422 | void iser_conn_release(struct iser_conn *ib_conn); |
421 | |||
422 | int iser_conn_put(struct iser_conn *ib_conn, int destroy_cma_id_allowed); | ||
423 | 423 | ||
424 | void iser_conn_terminate(struct iser_conn *ib_conn); | 424 | void iser_conn_terminate(struct iser_conn *ib_conn); |
425 | 425 | ||
426 | void iser_release_work(struct work_struct *work); | ||
427 | |||
426 | void iser_rcv_completion(struct iser_rx_desc *desc, | 428 | void iser_rcv_completion(struct iser_rx_desc *desc, |
427 | unsigned long dto_xfer_len, | 429 | unsigned long dto_xfer_len, |
428 | struct iser_conn *ib_conn); | 430 | struct iser_conn *ib_conn); |
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 32849f2becde..4c698e58e550 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c | |||
@@ -581,14 +581,30 @@ static int iser_conn_state_comp_exch(struct iser_conn *ib_conn, | |||
581 | return ret; | 581 | return ret; |
582 | } | 582 | } |
583 | 583 | ||
584 | void iser_release_work(struct work_struct *work) | ||
585 | { | ||
586 | struct iser_conn *ib_conn; | ||
587 | |||
588 | ib_conn = container_of(work, struct iser_conn, release_work); | ||
589 | |||
590 | /* wait for .conn_stop callback */ | ||
591 | wait_for_completion(&ib_conn->stop_completion); | ||
592 | |||
593 | /* wait for the qp`s post send and post receive buffers to empty */ | ||
594 | wait_event_interruptible(ib_conn->wait, | ||
595 | ib_conn->state == ISER_CONN_DOWN); | ||
596 | |||
597 | iser_conn_release(ib_conn); | ||
598 | } | ||
599 | |||
584 | /** | 600 | /** |
585 | * Frees all conn objects and deallocs conn descriptor | 601 | * Frees all conn objects and deallocs conn descriptor |
586 | */ | 602 | */ |
587 | static void iser_conn_release(struct iser_conn *ib_conn, int can_destroy_id) | 603 | void iser_conn_release(struct iser_conn *ib_conn) |
588 | { | 604 | { |
589 | struct iser_device *device = ib_conn->device; | 605 | struct iser_device *device = ib_conn->device; |
590 | 606 | ||
591 | BUG_ON(ib_conn->state != ISER_CONN_DOWN); | 607 | BUG_ON(ib_conn->state == ISER_CONN_UP); |
592 | 608 | ||
593 | mutex_lock(&ig.connlist_mutex); | 609 | mutex_lock(&ig.connlist_mutex); |
594 | list_del(&ib_conn->conn_list); | 610 | list_del(&ib_conn->conn_list); |
@@ -600,27 +616,13 @@ static void iser_conn_release(struct iser_conn *ib_conn, int can_destroy_id) | |||
600 | if (device != NULL) | 616 | if (device != NULL) |
601 | iser_device_try_release(device); | 617 | iser_device_try_release(device); |
602 | /* if cma handler context, the caller actually destroy the id */ | 618 | /* if cma handler context, the caller actually destroy the id */ |
603 | if (ib_conn->cma_id != NULL && can_destroy_id) { | 619 | if (ib_conn->cma_id != NULL) { |
604 | rdma_destroy_id(ib_conn->cma_id); | 620 | rdma_destroy_id(ib_conn->cma_id); |
605 | ib_conn->cma_id = NULL; | 621 | ib_conn->cma_id = NULL; |
606 | } | 622 | } |
607 | iscsi_destroy_endpoint(ib_conn->ep); | 623 | iscsi_destroy_endpoint(ib_conn->ep); |
608 | } | 624 | } |
609 | 625 | ||
610 | void iser_conn_get(struct iser_conn *ib_conn) | ||
611 | { | ||
612 | atomic_inc(&ib_conn->refcount); | ||
613 | } | ||
614 | |||
615 | int iser_conn_put(struct iser_conn *ib_conn, int can_destroy_id) | ||
616 | { | ||
617 | if (atomic_dec_and_test(&ib_conn->refcount)) { | ||
618 | iser_conn_release(ib_conn, can_destroy_id); | ||
619 | return 1; | ||
620 | } | ||
621 | return 0; | ||
622 | } | ||
623 | |||
624 | /** | 626 | /** |
625 | * triggers start of the disconnect procedures and wait for them to be done | 627 | * triggers start of the disconnect procedures and wait for them to be done |
626 | */ | 628 | */ |
@@ -638,24 +640,19 @@ void iser_conn_terminate(struct iser_conn *ib_conn) | |||
638 | if (err) | 640 | if (err) |
639 | iser_err("Failed to disconnect, conn: 0x%p err %d\n", | 641 | iser_err("Failed to disconnect, conn: 0x%p err %d\n", |
640 | ib_conn,err); | 642 | ib_conn,err); |
641 | |||
642 | wait_event_interruptible(ib_conn->wait, | ||
643 | ib_conn->state == ISER_CONN_DOWN); | ||
644 | |||
645 | iser_conn_put(ib_conn, 1); /* deref ib conn deallocate */ | ||
646 | } | 643 | } |
647 | 644 | ||
648 | static int iser_connect_error(struct rdma_cm_id *cma_id) | 645 | static void iser_connect_error(struct rdma_cm_id *cma_id) |
649 | { | 646 | { |
650 | struct iser_conn *ib_conn; | 647 | struct iser_conn *ib_conn; |
648 | |||
651 | ib_conn = (struct iser_conn *)cma_id->context; | 649 | ib_conn = (struct iser_conn *)cma_id->context; |
652 | 650 | ||
653 | ib_conn->state = ISER_CONN_DOWN; | 651 | ib_conn->state = ISER_CONN_DOWN; |
654 | wake_up_interruptible(&ib_conn->wait); | 652 | wake_up_interruptible(&ib_conn->wait); |
655 | return iser_conn_put(ib_conn, 0); /* deref ib conn's cma id */ | ||
656 | } | 653 | } |
657 | 654 | ||
658 | static int iser_addr_handler(struct rdma_cm_id *cma_id) | 655 | static void iser_addr_handler(struct rdma_cm_id *cma_id) |
659 | { | 656 | { |
660 | struct iser_device *device; | 657 | struct iser_device *device; |
661 | struct iser_conn *ib_conn; | 658 | struct iser_conn *ib_conn; |
@@ -664,7 +661,8 @@ static int iser_addr_handler(struct rdma_cm_id *cma_id) | |||
664 | device = iser_device_find_by_ib_device(cma_id); | 661 | device = iser_device_find_by_ib_device(cma_id); |
665 | if (!device) { | 662 | if (!device) { |
666 | iser_err("device lookup/creation failed\n"); | 663 | iser_err("device lookup/creation failed\n"); |
667 | return iser_connect_error(cma_id); | 664 | iser_connect_error(cma_id); |
665 | return; | ||
668 | } | 666 | } |
669 | 667 | ||
670 | ib_conn = (struct iser_conn *)cma_id->context; | 668 | ib_conn = (struct iser_conn *)cma_id->context; |
@@ -686,13 +684,12 @@ static int iser_addr_handler(struct rdma_cm_id *cma_id) | |||
686 | ret = rdma_resolve_route(cma_id, 1000); | 684 | ret = rdma_resolve_route(cma_id, 1000); |
687 | if (ret) { | 685 | if (ret) { |
688 | iser_err("resolve route failed: %d\n", ret); | 686 | iser_err("resolve route failed: %d\n", ret); |
689 | return iser_connect_error(cma_id); | 687 | iser_connect_error(cma_id); |
688 | return; | ||
690 | } | 689 | } |
691 | |||
692 | return 0; | ||
693 | } | 690 | } |
694 | 691 | ||
695 | static int iser_route_handler(struct rdma_cm_id *cma_id) | 692 | static void iser_route_handler(struct rdma_cm_id *cma_id) |
696 | { | 693 | { |
697 | struct rdma_conn_param conn_param; | 694 | struct rdma_conn_param conn_param; |
698 | int ret; | 695 | int ret; |
@@ -720,9 +717,9 @@ static int iser_route_handler(struct rdma_cm_id *cma_id) | |||
720 | goto failure; | 717 | goto failure; |
721 | } | 718 | } |
722 | 719 | ||
723 | return 0; | 720 | return; |
724 | failure: | 721 | failure: |
725 | return iser_connect_error(cma_id); | 722 | iser_connect_error(cma_id); |
726 | } | 723 | } |
727 | 724 | ||
728 | static void iser_connected_handler(struct rdma_cm_id *cma_id) | 725 | static void iser_connected_handler(struct rdma_cm_id *cma_id) |
@@ -739,10 +736,9 @@ static void iser_connected_handler(struct rdma_cm_id *cma_id) | |||
739 | wake_up_interruptible(&ib_conn->wait); | 736 | wake_up_interruptible(&ib_conn->wait); |
740 | } | 737 | } |
741 | 738 | ||
742 | static int iser_disconnected_handler(struct rdma_cm_id *cma_id) | 739 | static void iser_disconnected_handler(struct rdma_cm_id *cma_id) |
743 | { | 740 | { |
744 | struct iser_conn *ib_conn; | 741 | struct iser_conn *ib_conn; |
745 | int ret; | ||
746 | 742 | ||
747 | ib_conn = (struct iser_conn *)cma_id->context; | 743 | ib_conn = (struct iser_conn *)cma_id->context; |
748 | 744 | ||
@@ -762,24 +758,19 @@ static int iser_disconnected_handler(struct rdma_cm_id *cma_id) | |||
762 | ib_conn->state = ISER_CONN_DOWN; | 758 | ib_conn->state = ISER_CONN_DOWN; |
763 | wake_up_interruptible(&ib_conn->wait); | 759 | wake_up_interruptible(&ib_conn->wait); |
764 | } | 760 | } |
765 | |||
766 | ret = iser_conn_put(ib_conn, 0); /* deref ib conn's cma id */ | ||
767 | return ret; | ||
768 | } | 761 | } |
769 | 762 | ||
770 | static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) | 763 | static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) |
771 | { | 764 | { |
772 | int ret = 0; | ||
773 | |||
774 | iser_info("event %d status %d conn %p id %p\n", | 765 | iser_info("event %d status %d conn %p id %p\n", |
775 | event->event, event->status, cma_id->context, cma_id); | 766 | event->event, event->status, cma_id->context, cma_id); |
776 | 767 | ||
777 | switch (event->event) { | 768 | switch (event->event) { |
778 | case RDMA_CM_EVENT_ADDR_RESOLVED: | 769 | case RDMA_CM_EVENT_ADDR_RESOLVED: |
779 | ret = iser_addr_handler(cma_id); | 770 | iser_addr_handler(cma_id); |
780 | break; | 771 | break; |
781 | case RDMA_CM_EVENT_ROUTE_RESOLVED: | 772 | case RDMA_CM_EVENT_ROUTE_RESOLVED: |
782 | ret = iser_route_handler(cma_id); | 773 | iser_route_handler(cma_id); |
783 | break; | 774 | break; |
784 | case RDMA_CM_EVENT_ESTABLISHED: | 775 | case RDMA_CM_EVENT_ESTABLISHED: |
785 | iser_connected_handler(cma_id); | 776 | iser_connected_handler(cma_id); |
@@ -789,18 +780,18 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve | |||
789 | case RDMA_CM_EVENT_CONNECT_ERROR: | 780 | case RDMA_CM_EVENT_CONNECT_ERROR: |
790 | case RDMA_CM_EVENT_UNREACHABLE: | 781 | case RDMA_CM_EVENT_UNREACHABLE: |
791 | case RDMA_CM_EVENT_REJECTED: | 782 | case RDMA_CM_EVENT_REJECTED: |
792 | ret = iser_connect_error(cma_id); | 783 | iser_connect_error(cma_id); |
793 | break; | 784 | break; |
794 | case RDMA_CM_EVENT_DISCONNECTED: | 785 | case RDMA_CM_EVENT_DISCONNECTED: |
795 | case RDMA_CM_EVENT_DEVICE_REMOVAL: | 786 | case RDMA_CM_EVENT_DEVICE_REMOVAL: |
796 | case RDMA_CM_EVENT_ADDR_CHANGE: | 787 | case RDMA_CM_EVENT_ADDR_CHANGE: |
797 | ret = iser_disconnected_handler(cma_id); | 788 | iser_disconnected_handler(cma_id); |
798 | break; | 789 | break; |
799 | default: | 790 | default: |
800 | iser_err("Unexpected RDMA CM event (%d)\n", event->event); | 791 | iser_err("Unexpected RDMA CM event (%d)\n", event->event); |
801 | break; | 792 | break; |
802 | } | 793 | } |
803 | return ret; | 794 | return 0; |
804 | } | 795 | } |
805 | 796 | ||
806 | void iser_conn_init(struct iser_conn *ib_conn) | 797 | void iser_conn_init(struct iser_conn *ib_conn) |
@@ -809,7 +800,7 @@ void iser_conn_init(struct iser_conn *ib_conn) | |||
809 | init_waitqueue_head(&ib_conn->wait); | 800 | init_waitqueue_head(&ib_conn->wait); |
810 | ib_conn->post_recv_buf_count = 0; | 801 | ib_conn->post_recv_buf_count = 0; |
811 | atomic_set(&ib_conn->post_send_buf_count, 0); | 802 | atomic_set(&ib_conn->post_send_buf_count, 0); |
812 | atomic_set(&ib_conn->refcount, 1); /* ref ib conn allocation */ | 803 | init_completion(&ib_conn->stop_completion); |
813 | INIT_LIST_HEAD(&ib_conn->conn_list); | 804 | INIT_LIST_HEAD(&ib_conn->conn_list); |
814 | spin_lock_init(&ib_conn->lock); | 805 | spin_lock_init(&ib_conn->lock); |
815 | } | 806 | } |
@@ -837,7 +828,6 @@ int iser_connect(struct iser_conn *ib_conn, | |||
837 | 828 | ||
838 | ib_conn->state = ISER_CONN_PENDING; | 829 | ib_conn->state = ISER_CONN_PENDING; |
839 | 830 | ||
840 | iser_conn_get(ib_conn); /* ref ib conn's cma id */ | ||
841 | ib_conn->cma_id = rdma_create_id(iser_cma_handler, | 831 | ib_conn->cma_id = rdma_create_id(iser_cma_handler, |
842 | (void *)ib_conn, | 832 | (void *)ib_conn, |
843 | RDMA_PS_TCP, IB_QPT_RC); | 833 | RDMA_PS_TCP, IB_QPT_RC); |
@@ -874,9 +864,8 @@ id_failure: | |||
874 | ib_conn->cma_id = NULL; | 864 | ib_conn->cma_id = NULL; |
875 | addr_failure: | 865 | addr_failure: |
876 | ib_conn->state = ISER_CONN_DOWN; | 866 | ib_conn->state = ISER_CONN_DOWN; |
877 | iser_conn_put(ib_conn, 1); /* deref ib conn's cma id */ | ||
878 | connect_failure: | 867 | connect_failure: |
879 | iser_conn_put(ib_conn, 1); /* deref ib conn deallocate */ | 868 | iser_conn_release(ib_conn); |
880 | return err; | 869 | return err; |
881 | } | 870 | } |
882 | 871 | ||