diff options
Diffstat (limited to 'drivers/infiniband/ulp/iser/iser_verbs.c')
-rw-r--r-- | drivers/infiniband/ulp/iser/iser_verbs.c | 281 |
1 files changed, 147 insertions, 134 deletions
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 8579f32ce38e..308d17bb5146 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c | |||
@@ -37,9 +37,8 @@ | |||
37 | #include "iscsi_iser.h" | 37 | #include "iscsi_iser.h" |
38 | 38 | ||
39 | #define ISCSI_ISER_MAX_CONN 8 | 39 | #define ISCSI_ISER_MAX_CONN 8 |
40 | #define ISER_MAX_CQ_LEN ((ISER_QP_MAX_RECV_DTOS + \ | 40 | #define ISER_MAX_RX_CQ_LEN (ISER_QP_MAX_RECV_DTOS * ISCSI_ISER_MAX_CONN) |
41 | ISER_QP_MAX_REQ_DTOS) * \ | 41 | #define ISER_MAX_TX_CQ_LEN (ISER_QP_MAX_REQ_DTOS * ISCSI_ISER_MAX_CONN) |
42 | ISCSI_ISER_MAX_CONN) | ||
43 | 42 | ||
44 | static void iser_cq_tasklet_fn(unsigned long data); | 43 | static void iser_cq_tasklet_fn(unsigned long data); |
45 | static void iser_cq_callback(struct ib_cq *cq, void *cq_context); | 44 | static void iser_cq_callback(struct ib_cq *cq, void *cq_context); |
@@ -67,15 +66,23 @@ static int iser_create_device_ib_res(struct iser_device *device) | |||
67 | if (IS_ERR(device->pd)) | 66 | if (IS_ERR(device->pd)) |
68 | goto pd_err; | 67 | goto pd_err; |
69 | 68 | ||
70 | device->cq = ib_create_cq(device->ib_device, | 69 | device->rx_cq = ib_create_cq(device->ib_device, |
71 | iser_cq_callback, | 70 | iser_cq_callback, |
72 | iser_cq_event_callback, | 71 | iser_cq_event_callback, |
73 | (void *)device, | 72 | (void *)device, |
74 | ISER_MAX_CQ_LEN, 0); | 73 | ISER_MAX_RX_CQ_LEN, 0); |
75 | if (IS_ERR(device->cq)) | 74 | if (IS_ERR(device->rx_cq)) |
76 | goto cq_err; | 75 | goto rx_cq_err; |
77 | 76 | ||
78 | if (ib_req_notify_cq(device->cq, IB_CQ_NEXT_COMP)) | 77 | device->tx_cq = ib_create_cq(device->ib_device, |
78 | NULL, iser_cq_event_callback, | ||
79 | (void *)device, | ||
80 | ISER_MAX_TX_CQ_LEN, 0); | ||
81 | |||
82 | if (IS_ERR(device->tx_cq)) | ||
83 | goto tx_cq_err; | ||
84 | |||
85 | if (ib_req_notify_cq(device->rx_cq, IB_CQ_NEXT_COMP)) | ||
79 | goto cq_arm_err; | 86 | goto cq_arm_err; |
80 | 87 | ||
81 | tasklet_init(&device->cq_tasklet, | 88 | tasklet_init(&device->cq_tasklet, |
@@ -93,8 +100,10 @@ static int iser_create_device_ib_res(struct iser_device *device) | |||
93 | dma_mr_err: | 100 | dma_mr_err: |
94 | tasklet_kill(&device->cq_tasklet); | 101 | tasklet_kill(&device->cq_tasklet); |
95 | cq_arm_err: | 102 | cq_arm_err: |
96 | ib_destroy_cq(device->cq); | 103 | ib_destroy_cq(device->tx_cq); |
97 | cq_err: | 104 | tx_cq_err: |
105 | ib_destroy_cq(device->rx_cq); | ||
106 | rx_cq_err: | ||
98 | ib_dealloc_pd(device->pd); | 107 | ib_dealloc_pd(device->pd); |
99 | pd_err: | 108 | pd_err: |
100 | iser_err("failed to allocate an IB resource\n"); | 109 | iser_err("failed to allocate an IB resource\n"); |
@@ -112,11 +121,13 @@ static void iser_free_device_ib_res(struct iser_device *device) | |||
112 | tasklet_kill(&device->cq_tasklet); | 121 | tasklet_kill(&device->cq_tasklet); |
113 | 122 | ||
114 | (void)ib_dereg_mr(device->mr); | 123 | (void)ib_dereg_mr(device->mr); |
115 | (void)ib_destroy_cq(device->cq); | 124 | (void)ib_destroy_cq(device->tx_cq); |
125 | (void)ib_destroy_cq(device->rx_cq); | ||
116 | (void)ib_dealloc_pd(device->pd); | 126 | (void)ib_dealloc_pd(device->pd); |
117 | 127 | ||
118 | device->mr = NULL; | 128 | device->mr = NULL; |
119 | device->cq = NULL; | 129 | device->tx_cq = NULL; |
130 | device->rx_cq = NULL; | ||
120 | device->pd = NULL; | 131 | device->pd = NULL; |
121 | } | 132 | } |
122 | 133 | ||
@@ -129,13 +140,23 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn) | |||
129 | { | 140 | { |
130 | struct iser_device *device; | 141 | struct iser_device *device; |
131 | struct ib_qp_init_attr init_attr; | 142 | struct ib_qp_init_attr init_attr; |
132 | int ret; | 143 | int ret = -ENOMEM; |
133 | struct ib_fmr_pool_param params; | 144 | struct ib_fmr_pool_param params; |
134 | 145 | ||
135 | BUG_ON(ib_conn->device == NULL); | 146 | BUG_ON(ib_conn->device == NULL); |
136 | 147 | ||
137 | device = ib_conn->device; | 148 | device = ib_conn->device; |
138 | 149 | ||
150 | ib_conn->login_buf = kmalloc(ISER_RX_LOGIN_SIZE, GFP_KERNEL); | ||
151 | if (!ib_conn->login_buf) { | ||
152 | goto alloc_err; | ||
153 | ret = -ENOMEM; | ||
154 | } | ||
155 | |||
156 | ib_conn->login_dma = ib_dma_map_single(ib_conn->device->ib_device, | ||
157 | (void *)ib_conn->login_buf, ISER_RX_LOGIN_SIZE, | ||
158 | DMA_FROM_DEVICE); | ||
159 | |||
139 | ib_conn->page_vec = kmalloc(sizeof(struct iser_page_vec) + | 160 | ib_conn->page_vec = kmalloc(sizeof(struct iser_page_vec) + |
140 | (sizeof(u64) * (ISCSI_ISER_SG_TABLESIZE +1)), | 161 | (sizeof(u64) * (ISCSI_ISER_SG_TABLESIZE +1)), |
141 | GFP_KERNEL); | 162 | GFP_KERNEL); |
@@ -169,12 +190,12 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn) | |||
169 | 190 | ||
170 | init_attr.event_handler = iser_qp_event_callback; | 191 | init_attr.event_handler = iser_qp_event_callback; |
171 | init_attr.qp_context = (void *)ib_conn; | 192 | init_attr.qp_context = (void *)ib_conn; |
172 | init_attr.send_cq = device->cq; | 193 | init_attr.send_cq = device->tx_cq; |
173 | init_attr.recv_cq = device->cq; | 194 | init_attr.recv_cq = device->rx_cq; |
174 | init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS; | 195 | init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS; |
175 | init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS; | 196 | init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS; |
176 | init_attr.cap.max_send_sge = MAX_REGD_BUF_VECTOR_LEN; | 197 | init_attr.cap.max_send_sge = 2; |
177 | init_attr.cap.max_recv_sge = 2; | 198 | init_attr.cap.max_recv_sge = 1; |
178 | init_attr.sq_sig_type = IB_SIGNAL_REQ_WR; | 199 | init_attr.sq_sig_type = IB_SIGNAL_REQ_WR; |
179 | init_attr.qp_type = IB_QPT_RC; | 200 | init_attr.qp_type = IB_QPT_RC; |
180 | 201 | ||
@@ -192,6 +213,7 @@ qp_err: | |||
192 | (void)ib_destroy_fmr_pool(ib_conn->fmr_pool); | 213 | (void)ib_destroy_fmr_pool(ib_conn->fmr_pool); |
193 | fmr_pool_err: | 214 | fmr_pool_err: |
194 | kfree(ib_conn->page_vec); | 215 | kfree(ib_conn->page_vec); |
216 | kfree(ib_conn->login_buf); | ||
195 | alloc_err: | 217 | alloc_err: |
196 | iser_err("unable to alloc mem or create resource, err %d\n", ret); | 218 | iser_err("unable to alloc mem or create resource, err %d\n", ret); |
197 | return ret; | 219 | return ret; |
@@ -278,17 +300,6 @@ static void iser_device_try_release(struct iser_device *device) | |||
278 | mutex_unlock(&ig.device_list_mutex); | 300 | mutex_unlock(&ig.device_list_mutex); |
279 | } | 301 | } |
280 | 302 | ||
281 | int iser_conn_state_comp(struct iser_conn *ib_conn, | ||
282 | enum iser_ib_conn_state comp) | ||
283 | { | ||
284 | int ret; | ||
285 | |||
286 | spin_lock_bh(&ib_conn->lock); | ||
287 | ret = (ib_conn->state == comp); | ||
288 | spin_unlock_bh(&ib_conn->lock); | ||
289 | return ret; | ||
290 | } | ||
291 | |||
292 | static int iser_conn_state_comp_exch(struct iser_conn *ib_conn, | 303 | static int iser_conn_state_comp_exch(struct iser_conn *ib_conn, |
293 | enum iser_ib_conn_state comp, | 304 | enum iser_ib_conn_state comp, |
294 | enum iser_ib_conn_state exch) | 305 | enum iser_ib_conn_state exch) |
@@ -314,7 +325,7 @@ static void iser_conn_release(struct iser_conn *ib_conn) | |||
314 | mutex_lock(&ig.connlist_mutex); | 325 | mutex_lock(&ig.connlist_mutex); |
315 | list_del(&ib_conn->conn_list); | 326 | list_del(&ib_conn->conn_list); |
316 | mutex_unlock(&ig.connlist_mutex); | 327 | mutex_unlock(&ig.connlist_mutex); |
317 | 328 | iser_free_rx_descriptors(ib_conn); | |
318 | iser_free_ib_conn_res(ib_conn); | 329 | iser_free_ib_conn_res(ib_conn); |
319 | ib_conn->device = NULL; | 330 | ib_conn->device = NULL; |
320 | /* on EVENT_ADDR_ERROR there's no device yet for this conn */ | 331 | /* on EVENT_ADDR_ERROR there's no device yet for this conn */ |
@@ -442,7 +453,7 @@ static void iser_disconnected_handler(struct rdma_cm_id *cma_id) | |||
442 | ISCSI_ERR_CONN_FAILED); | 453 | ISCSI_ERR_CONN_FAILED); |
443 | 454 | ||
444 | /* Complete the termination process if no posts are pending */ | 455 | /* Complete the termination process if no posts are pending */ |
445 | if ((atomic_read(&ib_conn->post_recv_buf_count) == 0) && | 456 | if (ib_conn->post_recv_buf_count == 0 && |
446 | (atomic_read(&ib_conn->post_send_buf_count) == 0)) { | 457 | (atomic_read(&ib_conn->post_send_buf_count) == 0)) { |
447 | ib_conn->state = ISER_CONN_DOWN; | 458 | ib_conn->state = ISER_CONN_DOWN; |
448 | wake_up_interruptible(&ib_conn->wait); | 459 | wake_up_interruptible(&ib_conn->wait); |
@@ -489,9 +500,8 @@ void iser_conn_init(struct iser_conn *ib_conn) | |||
489 | { | 500 | { |
490 | ib_conn->state = ISER_CONN_INIT; | 501 | ib_conn->state = ISER_CONN_INIT; |
491 | init_waitqueue_head(&ib_conn->wait); | 502 | init_waitqueue_head(&ib_conn->wait); |
492 | atomic_set(&ib_conn->post_recv_buf_count, 0); | 503 | ib_conn->post_recv_buf_count = 0; |
493 | atomic_set(&ib_conn->post_send_buf_count, 0); | 504 | atomic_set(&ib_conn->post_send_buf_count, 0); |
494 | atomic_set(&ib_conn->unexpected_pdu_count, 0); | ||
495 | atomic_set(&ib_conn->refcount, 1); | 505 | atomic_set(&ib_conn->refcount, 1); |
496 | INIT_LIST_HEAD(&ib_conn->conn_list); | 506 | INIT_LIST_HEAD(&ib_conn->conn_list); |
497 | spin_lock_init(&ib_conn->lock); | 507 | spin_lock_init(&ib_conn->lock); |
@@ -626,136 +636,97 @@ void iser_unreg_mem(struct iser_mem_reg *reg) | |||
626 | reg->mem_h = NULL; | 636 | reg->mem_h = NULL; |
627 | } | 637 | } |
628 | 638 | ||
629 | /** | 639 | int iser_post_recvl(struct iser_conn *ib_conn) |
630 | * iser_dto_to_iov - builds IOV from a dto descriptor | ||
631 | */ | ||
632 | static void iser_dto_to_iov(struct iser_dto *dto, struct ib_sge *iov, int iov_len) | ||
633 | { | 640 | { |
634 | int i; | 641 | struct ib_recv_wr rx_wr, *rx_wr_failed; |
635 | struct ib_sge *sge; | 642 | struct ib_sge sge; |
636 | struct iser_regd_buf *regd_buf; | 643 | int ib_ret; |
637 | |||
638 | if (dto->regd_vector_len > iov_len) { | ||
639 | iser_err("iov size %d too small for posting dto of len %d\n", | ||
640 | iov_len, dto->regd_vector_len); | ||
641 | BUG(); | ||
642 | } | ||
643 | 644 | ||
644 | for (i = 0; i < dto->regd_vector_len; i++) { | 645 | sge.addr = ib_conn->login_dma; |
645 | sge = &iov[i]; | 646 | sge.length = ISER_RX_LOGIN_SIZE; |
646 | regd_buf = dto->regd[i]; | 647 | sge.lkey = ib_conn->device->mr->lkey; |
647 | |||
648 | sge->addr = regd_buf->reg.va; | ||
649 | sge->length = regd_buf->reg.len; | ||
650 | sge->lkey = regd_buf->reg.lkey; | ||
651 | |||
652 | if (dto->used_sz[i] > 0) /* Adjust size */ | ||
653 | sge->length = dto->used_sz[i]; | ||
654 | |||
655 | /* offset and length should not exceed the regd buf length */ | ||
656 | if (sge->length + dto->offset[i] > regd_buf->reg.len) { | ||
657 | iser_err("Used len:%ld + offset:%d, exceed reg.buf.len:" | ||
658 | "%ld in dto:0x%p [%d], va:0x%08lX\n", | ||
659 | (unsigned long)sge->length, dto->offset[i], | ||
660 | (unsigned long)regd_buf->reg.len, dto, i, | ||
661 | (unsigned long)sge->addr); | ||
662 | BUG(); | ||
663 | } | ||
664 | 648 | ||
665 | sge->addr += dto->offset[i]; /* Adjust offset */ | 649 | rx_wr.wr_id = (unsigned long)ib_conn->login_buf; |
650 | rx_wr.sg_list = &sge; | ||
651 | rx_wr.num_sge = 1; | ||
652 | rx_wr.next = NULL; | ||
653 | |||
654 | ib_conn->post_recv_buf_count++; | ||
655 | ib_ret = ib_post_recv(ib_conn->qp, &rx_wr, &rx_wr_failed); | ||
656 | if (ib_ret) { | ||
657 | iser_err("ib_post_recv failed ret=%d\n", ib_ret); | ||
658 | ib_conn->post_recv_buf_count--; | ||
666 | } | 659 | } |
660 | return ib_ret; | ||
667 | } | 661 | } |
668 | 662 | ||
669 | /** | 663 | int iser_post_recvm(struct iser_conn *ib_conn, int count) |
670 | * iser_post_recv - Posts a receive buffer. | ||
671 | * | ||
672 | * returns 0 on success, -1 on failure | ||
673 | */ | ||
674 | int iser_post_recv(struct iser_desc *rx_desc) | ||
675 | { | 664 | { |
676 | int ib_ret, ret_val = 0; | 665 | struct ib_recv_wr *rx_wr, *rx_wr_failed; |
677 | struct ib_recv_wr recv_wr, *recv_wr_failed; | 666 | int i, ib_ret; |
678 | struct ib_sge iov[2]; | 667 | unsigned int my_rx_head = ib_conn->rx_desc_head; |
679 | struct iser_conn *ib_conn; | 668 | struct iser_rx_desc *rx_desc; |
680 | struct iser_dto *recv_dto = &rx_desc->dto; | 669 | |
681 | 670 | for (rx_wr = ib_conn->rx_wr, i = 0; i < count; i++, rx_wr++) { | |
682 | /* Retrieve conn */ | 671 | rx_desc = &ib_conn->rx_descs[my_rx_head]; |
683 | ib_conn = recv_dto->ib_conn; | 672 | rx_wr->wr_id = (unsigned long)rx_desc; |
684 | 673 | rx_wr->sg_list = &rx_desc->rx_sg; | |
685 | iser_dto_to_iov(recv_dto, iov, 2); | 674 | rx_wr->num_sge = 1; |
675 | rx_wr->next = rx_wr + 1; | ||
676 | my_rx_head = (my_rx_head + 1) & (ISER_QP_MAX_RECV_DTOS - 1); | ||
677 | } | ||
686 | 678 | ||
687 | recv_wr.next = NULL; | 679 | rx_wr--; |
688 | recv_wr.sg_list = iov; | 680 | rx_wr->next = NULL; /* mark end of work requests list */ |
689 | recv_wr.num_sge = recv_dto->regd_vector_len; | ||
690 | recv_wr.wr_id = (unsigned long)rx_desc; | ||
691 | 681 | ||
692 | atomic_inc(&ib_conn->post_recv_buf_count); | 682 | ib_conn->post_recv_buf_count += count; |
693 | ib_ret = ib_post_recv(ib_conn->qp, &recv_wr, &recv_wr_failed); | 683 | ib_ret = ib_post_recv(ib_conn->qp, ib_conn->rx_wr, &rx_wr_failed); |
694 | if (ib_ret) { | 684 | if (ib_ret) { |
695 | iser_err("ib_post_recv failed ret=%d\n", ib_ret); | 685 | iser_err("ib_post_recv failed ret=%d\n", ib_ret); |
696 | atomic_dec(&ib_conn->post_recv_buf_count); | 686 | ib_conn->post_recv_buf_count -= count; |
697 | ret_val = -1; | 687 | } else |
698 | } | 688 | ib_conn->rx_desc_head = my_rx_head; |
699 | 689 | return ib_ret; | |
700 | return ret_val; | ||
701 | } | 690 | } |
702 | 691 | ||
692 | |||
703 | /** | 693 | /** |
704 | * iser_start_send - Initiate a Send DTO operation | 694 | * iser_start_send - Initiate a Send DTO operation |
705 | * | 695 | * |
706 | * returns 0 on success, -1 on failure | 696 | * returns 0 on success, -1 on failure |
707 | */ | 697 | */ |
708 | int iser_post_send(struct iser_desc *tx_desc) | 698 | int iser_post_send(struct iser_conn *ib_conn, struct iser_tx_desc *tx_desc) |
709 | { | 699 | { |
710 | int ib_ret, ret_val = 0; | 700 | int ib_ret; |
711 | struct ib_send_wr send_wr, *send_wr_failed; | 701 | struct ib_send_wr send_wr, *send_wr_failed; |
712 | struct ib_sge iov[MAX_REGD_BUF_VECTOR_LEN]; | ||
713 | struct iser_conn *ib_conn; | ||
714 | struct iser_dto *dto = &tx_desc->dto; | ||
715 | 702 | ||
716 | ib_conn = dto->ib_conn; | 703 | ib_dma_sync_single_for_device(ib_conn->device->ib_device, |
717 | 704 | tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE); | |
718 | iser_dto_to_iov(dto, iov, MAX_REGD_BUF_VECTOR_LEN); | ||
719 | 705 | ||
720 | send_wr.next = NULL; | 706 | send_wr.next = NULL; |
721 | send_wr.wr_id = (unsigned long)tx_desc; | 707 | send_wr.wr_id = (unsigned long)tx_desc; |
722 | send_wr.sg_list = iov; | 708 | send_wr.sg_list = tx_desc->tx_sg; |
723 | send_wr.num_sge = dto->regd_vector_len; | 709 | send_wr.num_sge = tx_desc->num_sge; |
724 | send_wr.opcode = IB_WR_SEND; | 710 | send_wr.opcode = IB_WR_SEND; |
725 | send_wr.send_flags = dto->notify_enable ? IB_SEND_SIGNALED : 0; | 711 | send_wr.send_flags = IB_SEND_SIGNALED; |
726 | 712 | ||
727 | atomic_inc(&ib_conn->post_send_buf_count); | 713 | atomic_inc(&ib_conn->post_send_buf_count); |
728 | 714 | ||
729 | ib_ret = ib_post_send(ib_conn->qp, &send_wr, &send_wr_failed); | 715 | ib_ret = ib_post_send(ib_conn->qp, &send_wr, &send_wr_failed); |
730 | if (ib_ret) { | 716 | if (ib_ret) { |
731 | iser_err("Failed to start SEND DTO, dto: 0x%p, IOV len: %d\n", | ||
732 | dto, dto->regd_vector_len); | ||
733 | iser_err("ib_post_send failed, ret:%d\n", ib_ret); | 717 | iser_err("ib_post_send failed, ret:%d\n", ib_ret); |
734 | atomic_dec(&ib_conn->post_send_buf_count); | 718 | atomic_dec(&ib_conn->post_send_buf_count); |
735 | ret_val = -1; | ||
736 | } | 719 | } |
737 | 720 | return ib_ret; | |
738 | return ret_val; | ||
739 | } | 721 | } |
740 | 722 | ||
741 | static void iser_handle_comp_error(struct iser_desc *desc) | 723 | static void iser_handle_comp_error(struct iser_tx_desc *desc, |
724 | struct iser_conn *ib_conn) | ||
742 | { | 725 | { |
743 | struct iser_dto *dto = &desc->dto; | 726 | if (desc && desc->type == ISCSI_TX_DATAOUT) |
744 | struct iser_conn *ib_conn = dto->ib_conn; | ||
745 | |||
746 | iser_dto_buffs_release(dto); | ||
747 | |||
748 | if (desc->type == ISCSI_RX) { | ||
749 | kfree(desc->data); | ||
750 | kmem_cache_free(ig.desc_cache, desc); | 727 | kmem_cache_free(ig.desc_cache, desc); |
751 | atomic_dec(&ib_conn->post_recv_buf_count); | ||
752 | } else { /* type is TX control/command/dataout */ | ||
753 | if (desc->type == ISCSI_TX_DATAOUT) | ||
754 | kmem_cache_free(ig.desc_cache, desc); | ||
755 | atomic_dec(&ib_conn->post_send_buf_count); | ||
756 | } | ||
757 | 728 | ||
758 | if (atomic_read(&ib_conn->post_recv_buf_count) == 0 && | 729 | if (ib_conn->post_recv_buf_count == 0 && |
759 | atomic_read(&ib_conn->post_send_buf_count) == 0) { | 730 | atomic_read(&ib_conn->post_send_buf_count) == 0) { |
760 | /* getting here when the state is UP means that the conn is * | 731 | /* getting here when the state is UP means that the conn is * |
761 | * being terminated asynchronously from the iSCSI layer's * | 732 | * being terminated asynchronously from the iSCSI layer's * |
@@ -774,32 +745,74 @@ static void iser_handle_comp_error(struct iser_desc *desc) | |||
774 | } | 745 | } |
775 | } | 746 | } |
776 | 747 | ||
748 | static int iser_drain_tx_cq(struct iser_device *device) | ||
749 | { | ||
750 | struct ib_cq *cq = device->tx_cq; | ||
751 | struct ib_wc wc; | ||
752 | struct iser_tx_desc *tx_desc; | ||
753 | struct iser_conn *ib_conn; | ||
754 | int completed_tx = 0; | ||
755 | |||
756 | while (ib_poll_cq(cq, 1, &wc) == 1) { | ||
757 | tx_desc = (struct iser_tx_desc *) (unsigned long) wc.wr_id; | ||
758 | ib_conn = wc.qp->qp_context; | ||
759 | if (wc.status == IB_WC_SUCCESS) { | ||
760 | if (wc.opcode == IB_WC_SEND) | ||
761 | iser_snd_completion(tx_desc, ib_conn); | ||
762 | else | ||
763 | iser_err("expected opcode %d got %d\n", | ||
764 | IB_WC_SEND, wc.opcode); | ||
765 | } else { | ||
766 | iser_err("tx id %llx status %d vend_err %x\n", | ||
767 | wc.wr_id, wc.status, wc.vendor_err); | ||
768 | atomic_dec(&ib_conn->post_send_buf_count); | ||
769 | iser_handle_comp_error(tx_desc, ib_conn); | ||
770 | } | ||
771 | completed_tx++; | ||
772 | } | ||
773 | return completed_tx; | ||
774 | } | ||
775 | |||
776 | |||
777 | static void iser_cq_tasklet_fn(unsigned long data) | 777 | static void iser_cq_tasklet_fn(unsigned long data) |
778 | { | 778 | { |
779 | struct iser_device *device = (struct iser_device *)data; | 779 | struct iser_device *device = (struct iser_device *)data; |
780 | struct ib_cq *cq = device->cq; | 780 | struct ib_cq *cq = device->rx_cq; |
781 | struct ib_wc wc; | 781 | struct ib_wc wc; |
782 | struct iser_desc *desc; | 782 | struct iser_rx_desc *desc; |
783 | unsigned long xfer_len; | 783 | unsigned long xfer_len; |
784 | struct iser_conn *ib_conn; | ||
785 | int completed_tx, completed_rx; | ||
786 | completed_tx = completed_rx = 0; | ||
784 | 787 | ||
785 | while (ib_poll_cq(cq, 1, &wc) == 1) { | 788 | while (ib_poll_cq(cq, 1, &wc) == 1) { |
786 | desc = (struct iser_desc *) (unsigned long) wc.wr_id; | 789 | desc = (struct iser_rx_desc *) (unsigned long) wc.wr_id; |
787 | BUG_ON(desc == NULL); | 790 | BUG_ON(desc == NULL); |
788 | 791 | ib_conn = wc.qp->qp_context; | |
789 | if (wc.status == IB_WC_SUCCESS) { | 792 | if (wc.status == IB_WC_SUCCESS) { |
790 | if (desc->type == ISCSI_RX) { | 793 | if (wc.opcode == IB_WC_RECV) { |
791 | xfer_len = (unsigned long)wc.byte_len; | 794 | xfer_len = (unsigned long)wc.byte_len; |
792 | iser_rcv_completion(desc, xfer_len); | 795 | iser_rcv_completion(desc, xfer_len, ib_conn); |
793 | } else /* type == ISCSI_TX_CONTROL/SCSI_CMD/DOUT */ | 796 | } else |
794 | iser_snd_completion(desc); | 797 | iser_err("expected opcode %d got %d\n", |
798 | IB_WC_RECV, wc.opcode); | ||
795 | } else { | 799 | } else { |
796 | iser_err("comp w. error op %d status %d\n",desc->type,wc.status); | 800 | if (wc.status != IB_WC_WR_FLUSH_ERR) |
797 | iser_handle_comp_error(desc); | 801 | iser_err("rx id %llx status %d vend_err %x\n", |
802 | wc.wr_id, wc.status, wc.vendor_err); | ||
803 | ib_conn->post_recv_buf_count--; | ||
804 | iser_handle_comp_error(NULL, ib_conn); | ||
798 | } | 805 | } |
806 | completed_rx++; | ||
807 | if (!(completed_rx & 63)) | ||
808 | completed_tx += iser_drain_tx_cq(device); | ||
799 | } | 809 | } |
800 | /* #warning "it is assumed here that arming CQ only once its empty" * | 810 | /* #warning "it is assumed here that arming CQ only once its empty" * |
801 | * " would not cause interrupts to be missed" */ | 811 | * " would not cause interrupts to be missed" */ |
802 | ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); | 812 | ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); |
813 | |||
814 | completed_tx += iser_drain_tx_cq(device); | ||
815 | iser_dbg("got %d rx %d tx completions\n", completed_rx, completed_tx); | ||
803 | } | 816 | } |
804 | 817 | ||
805 | static void iser_cq_callback(struct ib_cq *cq, void *cq_context) | 818 | static void iser_cq_callback(struct ib_cq *cq, void *cq_context) |