aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-08-12 19:38:45 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-08-12 19:38:45 -0400
commitdae816835e8d1276c9e50d9c4dba75b7705fe66c (patch)
tree0df2e2aeb3d2ac7ba8deb4352e83cf8d306a91aa
parentddc752a4068088bfc5558a3f72e4f7cca3fc1210 (diff)
parent3a3eae0d66591572f771b9383e96ecacacee0abd (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: IB/ehca: Discard double CQE for one WR IB/ehca: Check idr_find() return value IB/ehca: Repoll CQ on invalid opcode IB/ehca: Rename goto label in ehca_poll_cq_one() IB/ehca: Update qp_state on cached modify_qp() IPoIB/cm: Use vmalloc() to allocate rx_rings
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h9
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qes.h1
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c48
-rw-r--r--drivers/infiniband/hw/ehca/ehca_reqs.c60
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c17
5 files changed, 104 insertions, 31 deletions
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index 0b0618edd645..1ab919f836a8 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -156,6 +156,14 @@ struct ehca_mod_qp_parm {
156 156
157#define EHCA_MOD_QP_PARM_MAX 4 157#define EHCA_MOD_QP_PARM_MAX 4
158 158
159#define QMAP_IDX_MASK 0xFFFFULL
160
161/* struct for tracking if cqes have been reported to the application */
162struct ehca_qmap_entry {
163 u16 app_wr_id;
164 u16 reported;
165};
166
159struct ehca_qp { 167struct ehca_qp {
160 union { 168 union {
161 struct ib_qp ib_qp; 169 struct ib_qp ib_qp;
@@ -165,6 +173,7 @@ struct ehca_qp {
165 enum ehca_ext_qp_type ext_type; 173 enum ehca_ext_qp_type ext_type;
166 enum ib_qp_state state; 174 enum ib_qp_state state;
167 struct ipz_queue ipz_squeue; 175 struct ipz_queue ipz_squeue;
176 struct ehca_qmap_entry *sq_map;
168 struct ipz_queue ipz_rqueue; 177 struct ipz_queue ipz_rqueue;
169 struct h_galpas galpas; 178 struct h_galpas galpas;
170 u32 qkey; 179 u32 qkey;
diff --git a/drivers/infiniband/hw/ehca/ehca_qes.h b/drivers/infiniband/hw/ehca/ehca_qes.h
index 818803057ebf..5d28e3e98a20 100644
--- a/drivers/infiniband/hw/ehca/ehca_qes.h
+++ b/drivers/infiniband/hw/ehca/ehca_qes.h
@@ -213,6 +213,7 @@ struct ehca_wqe {
213#define WC_STATUS_ERROR_BIT 0x80000000 213#define WC_STATUS_ERROR_BIT 0x80000000
214#define WC_STATUS_REMOTE_ERROR_FLAGS 0x0000F800 214#define WC_STATUS_REMOTE_ERROR_FLAGS 0x0000F800
215#define WC_STATUS_PURGE_BIT 0x10 215#define WC_STATUS_PURGE_BIT 0x10
216#define WC_SEND_RECEIVE_BIT 0x80
216 217
217struct ehca_cqe { 218struct ehca_cqe {
218 u64 work_request_id; 219 u64 work_request_id;
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index ea13efddf175..b6bcee036734 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -412,6 +412,7 @@ static struct ehca_qp *internal_create_qp(
412 struct ehca_shca *shca = container_of(pd->device, struct ehca_shca, 412 struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
413 ib_device); 413 ib_device);
414 struct ib_ucontext *context = NULL; 414 struct ib_ucontext *context = NULL;
415 u32 nr_qes;
415 u64 h_ret; 416 u64 h_ret;
416 int is_llqp = 0, has_srq = 0; 417 int is_llqp = 0, has_srq = 0;
417 int qp_type, max_send_sge, max_recv_sge, ret; 418 int qp_type, max_send_sge, max_recv_sge, ret;
@@ -715,6 +716,15 @@ static struct ehca_qp *internal_create_qp(
715 "and pages ret=%i", ret); 716 "and pages ret=%i", ret);
716 goto create_qp_exit2; 717 goto create_qp_exit2;
717 } 718 }
719 nr_qes = my_qp->ipz_squeue.queue_length /
720 my_qp->ipz_squeue.qe_size;
721 my_qp->sq_map = vmalloc(nr_qes *
722 sizeof(struct ehca_qmap_entry));
723 if (!my_qp->sq_map) {
724 ehca_err(pd->device, "Couldn't allocate squeue "
725 "map ret=%i", ret);
726 goto create_qp_exit3;
727 }
718 } 728 }
719 729
720 if (HAS_RQ(my_qp)) { 730 if (HAS_RQ(my_qp)) {
@@ -724,7 +734,7 @@ static struct ehca_qp *internal_create_qp(
724 if (ret) { 734 if (ret) {
725 ehca_err(pd->device, "Couldn't initialize rqueue " 735 ehca_err(pd->device, "Couldn't initialize rqueue "
726 "and pages ret=%i", ret); 736 "and pages ret=%i", ret);
727 goto create_qp_exit3; 737 goto create_qp_exit4;
728 } 738 }
729 } 739 }
730 740
@@ -770,7 +780,7 @@ static struct ehca_qp *internal_create_qp(
770 if (!my_qp->mod_qp_parm) { 780 if (!my_qp->mod_qp_parm) {
771 ehca_err(pd->device, 781 ehca_err(pd->device,
772 "Could not alloc mod_qp_parm"); 782 "Could not alloc mod_qp_parm");
773 goto create_qp_exit4; 783 goto create_qp_exit5;
774 } 784 }
775 } 785 }
776 } 786 }
@@ -780,7 +790,7 @@ static struct ehca_qp *internal_create_qp(
780 h_ret = ehca_define_sqp(shca, my_qp, init_attr); 790 h_ret = ehca_define_sqp(shca, my_qp, init_attr);
781 if (h_ret != H_SUCCESS) { 791 if (h_ret != H_SUCCESS) {
782 ret = ehca2ib_return_code(h_ret); 792 ret = ehca2ib_return_code(h_ret);
783 goto create_qp_exit5; 793 goto create_qp_exit6;
784 } 794 }
785 } 795 }
786 796
@@ -789,7 +799,7 @@ static struct ehca_qp *internal_create_qp(
789 if (ret) { 799 if (ret) {
790 ehca_err(pd->device, 800 ehca_err(pd->device,
791 "Couldn't assign qp to send_cq ret=%i", ret); 801 "Couldn't assign qp to send_cq ret=%i", ret);
792 goto create_qp_exit5; 802 goto create_qp_exit6;
793 } 803 }
794 } 804 }
795 805
@@ -815,22 +825,26 @@ static struct ehca_qp *internal_create_qp(
815 if (ib_copy_to_udata(udata, &resp, sizeof resp)) { 825 if (ib_copy_to_udata(udata, &resp, sizeof resp)) {
816 ehca_err(pd->device, "Copy to udata failed"); 826 ehca_err(pd->device, "Copy to udata failed");
817 ret = -EINVAL; 827 ret = -EINVAL;
818 goto create_qp_exit6; 828 goto create_qp_exit7;
819 } 829 }
820 } 830 }
821 831
822 return my_qp; 832 return my_qp;
823 833
824create_qp_exit6: 834create_qp_exit7:
825 ehca_cq_unassign_qp(my_qp->send_cq, my_qp->real_qp_num); 835 ehca_cq_unassign_qp(my_qp->send_cq, my_qp->real_qp_num);
826 836
827create_qp_exit5: 837create_qp_exit6:
828 kfree(my_qp->mod_qp_parm); 838 kfree(my_qp->mod_qp_parm);
829 839
830create_qp_exit4: 840create_qp_exit5:
831 if (HAS_RQ(my_qp)) 841 if (HAS_RQ(my_qp))
832 ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue); 842 ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
833 843
844create_qp_exit4:
845 if (HAS_SQ(my_qp))
846 vfree(my_qp->sq_map);
847
834create_qp_exit3: 848create_qp_exit3:
835 if (HAS_SQ(my_qp)) 849 if (HAS_SQ(my_qp))
836 ipz_queue_dtor(my_pd, &my_qp->ipz_squeue); 850 ipz_queue_dtor(my_pd, &my_qp->ipz_squeue);
@@ -1534,8 +1548,6 @@ static int internal_modify_qp(struct ib_qp *ibqp,
1534 if (attr_mask & IB_QP_QKEY) 1548 if (attr_mask & IB_QP_QKEY)
1535 my_qp->qkey = attr->qkey; 1549 my_qp->qkey = attr->qkey;
1536 1550
1537 my_qp->state = qp_new_state;
1538
1539modify_qp_exit2: 1551modify_qp_exit2:
1540 if (squeue_locked) { /* this means: sqe -> rts */ 1552 if (squeue_locked) { /* this means: sqe -> rts */
1541 spin_unlock_irqrestore(&my_qp->spinlock_s, flags); 1553 spin_unlock_irqrestore(&my_qp->spinlock_s, flags);
@@ -1551,6 +1563,8 @@ modify_qp_exit1:
1551int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, 1563int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
1552 struct ib_udata *udata) 1564 struct ib_udata *udata)
1553{ 1565{
1566 int ret = 0;
1567
1554 struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca, 1568 struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca,
1555 ib_device); 1569 ib_device);
1556 struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp); 1570 struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
@@ -1597,12 +1611,18 @@ int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
1597 attr->qp_state, my_qp->init_attr.port_num, 1611 attr->qp_state, my_qp->init_attr.port_num,
1598 ibqp->qp_type); 1612 ibqp->qp_type);
1599 spin_unlock_irqrestore(&sport->mod_sqp_lock, flags); 1613 spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
1600 return 0; 1614 goto out;
1601 } 1615 }
1602 spin_unlock_irqrestore(&sport->mod_sqp_lock, flags); 1616 spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
1603 } 1617 }
1604 1618
1605 return internal_modify_qp(ibqp, attr, attr_mask, 0); 1619 ret = internal_modify_qp(ibqp, attr, attr_mask, 0);
1620
1621out:
1622 if ((ret == 0) && (attr_mask & IB_QP_STATE))
1623 my_qp->state = attr->qp_state;
1624
1625 return ret;
1606} 1626}
1607 1627
1608void ehca_recover_sqp(struct ib_qp *sqp) 1628void ehca_recover_sqp(struct ib_qp *sqp)
@@ -1973,8 +1993,10 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
1973 1993
1974 if (HAS_RQ(my_qp)) 1994 if (HAS_RQ(my_qp))
1975 ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue); 1995 ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
1976 if (HAS_SQ(my_qp)) 1996 if (HAS_SQ(my_qp)) {
1977 ipz_queue_dtor(my_pd, &my_qp->ipz_squeue); 1997 ipz_queue_dtor(my_pd, &my_qp->ipz_squeue);
1998 vfree(my_qp->sq_map);
1999 }
1978 kmem_cache_free(qp_cache, my_qp); 2000 kmem_cache_free(qp_cache, my_qp);
1979 atomic_dec(&shca->num_qps); 2001 atomic_dec(&shca->num_qps);
1980 return 0; 2002 return 0;
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c
index 898c8b5c38dd..4426d82fe798 100644
--- a/drivers/infiniband/hw/ehca/ehca_reqs.c
+++ b/drivers/infiniband/hw/ehca/ehca_reqs.c
@@ -139,6 +139,7 @@ static void trace_send_wr_ud(const struct ib_send_wr *send_wr)
139static inline int ehca_write_swqe(struct ehca_qp *qp, 139static inline int ehca_write_swqe(struct ehca_qp *qp,
140 struct ehca_wqe *wqe_p, 140 struct ehca_wqe *wqe_p,
141 const struct ib_send_wr *send_wr, 141 const struct ib_send_wr *send_wr,
142 u32 sq_map_idx,
142 int hidden) 143 int hidden)
143{ 144{
144 u32 idx; 145 u32 idx;
@@ -157,7 +158,11 @@ static inline int ehca_write_swqe(struct ehca_qp *qp,
157 /* clear wqe header until sglist */ 158 /* clear wqe header until sglist */
158 memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list)); 159 memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list));
159 160
160 wqe_p->work_request_id = send_wr->wr_id; 161 wqe_p->work_request_id = send_wr->wr_id & ~QMAP_IDX_MASK;
162 wqe_p->work_request_id |= sq_map_idx & QMAP_IDX_MASK;
163
164 qp->sq_map[sq_map_idx].app_wr_id = send_wr->wr_id & QMAP_IDX_MASK;
165 qp->sq_map[sq_map_idx].reported = 0;
161 166
162 switch (send_wr->opcode) { 167 switch (send_wr->opcode) {
163 case IB_WR_SEND: 168 case IB_WR_SEND:
@@ -381,6 +386,7 @@ static inline int post_one_send(struct ehca_qp *my_qp,
381{ 386{
382 struct ehca_wqe *wqe_p; 387 struct ehca_wqe *wqe_p;
383 int ret; 388 int ret;
389 u32 sq_map_idx;
384 u64 start_offset = my_qp->ipz_squeue.current_q_offset; 390 u64 start_offset = my_qp->ipz_squeue.current_q_offset;
385 391
386 /* get pointer next to free WQE */ 392 /* get pointer next to free WQE */
@@ -393,8 +399,15 @@ static inline int post_one_send(struct ehca_qp *my_qp,
393 "qp_num=%x", my_qp->ib_qp.qp_num); 399 "qp_num=%x", my_qp->ib_qp.qp_num);
394 return -ENOMEM; 400 return -ENOMEM;
395 } 401 }
402
403 /*
404 * Get the index of the WQE in the send queue. The same index is used
405 * for writing into the sq_map.
406 */
407 sq_map_idx = start_offset / my_qp->ipz_squeue.qe_size;
408
396 /* write a SEND WQE into the QUEUE */ 409 /* write a SEND WQE into the QUEUE */
397 ret = ehca_write_swqe(my_qp, wqe_p, cur_send_wr, hidden); 410 ret = ehca_write_swqe(my_qp, wqe_p, cur_send_wr, sq_map_idx, hidden);
398 /* 411 /*
399 * if something failed, 412 * if something failed,
400 * reset the free entry pointer to the start value 413 * reset the free entry pointer to the start value
@@ -589,7 +602,7 @@ static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc)
589 struct ehca_qp *my_qp; 602 struct ehca_qp *my_qp;
590 int cqe_count = 0, is_error; 603 int cqe_count = 0, is_error;
591 604
592poll_cq_one_read_cqe: 605repoll:
593 cqe = (struct ehca_cqe *) 606 cqe = (struct ehca_cqe *)
594 ipz_qeit_get_inc_valid(&my_cq->ipz_queue); 607 ipz_qeit_get_inc_valid(&my_cq->ipz_queue);
595 if (!cqe) { 608 if (!cqe) {
@@ -617,7 +630,7 @@ poll_cq_one_read_cqe:
617 ehca_dmp(cqe, 64, "cq_num=%x qp_num=%x", 630 ehca_dmp(cqe, 64, "cq_num=%x qp_num=%x",
618 my_cq->cq_number, cqe->local_qp_number); 631 my_cq->cq_number, cqe->local_qp_number);
619 /* ignore this purged cqe */ 632 /* ignore this purged cqe */
620 goto poll_cq_one_read_cqe; 633 goto repoll;
621 } 634 }
622 spin_lock_irqsave(&qp->spinlock_s, flags); 635 spin_lock_irqsave(&qp->spinlock_s, flags);
623 purgeflag = qp->sqerr_purgeflag; 636 purgeflag = qp->sqerr_purgeflag;
@@ -636,7 +649,7 @@ poll_cq_one_read_cqe:
636 * that caused sqe and turn off purge flag 649 * that caused sqe and turn off purge flag
637 */ 650 */
638 qp->sqerr_purgeflag = 0; 651 qp->sqerr_purgeflag = 0;
639 goto poll_cq_one_read_cqe; 652 goto repoll;
640 } 653 }
641 } 654 }
642 655
@@ -654,8 +667,34 @@ poll_cq_one_read_cqe:
654 my_cq, my_cq->cq_number); 667 my_cq, my_cq->cq_number);
655 } 668 }
656 669
657 /* we got a completion! */ 670 read_lock(&ehca_qp_idr_lock);
658 wc->wr_id = cqe->work_request_id; 671 my_qp = idr_find(&ehca_qp_idr, cqe->qp_token);
672 read_unlock(&ehca_qp_idr_lock);
673 if (!my_qp)
674 goto repoll;
675 wc->qp = &my_qp->ib_qp;
676
677 if (!(cqe->w_completion_flags & WC_SEND_RECEIVE_BIT)) {
678 struct ehca_qmap_entry *qmap_entry;
679 /*
680 * We got a send completion and need to restore the original
681 * wr_id.
682 */
683 qmap_entry = &my_qp->sq_map[cqe->work_request_id &
684 QMAP_IDX_MASK];
685
686 if (qmap_entry->reported) {
687 ehca_warn(cq->device, "Double cqe on qp_num=%#x",
688 my_qp->real_qp_num);
689 /* found a double cqe, discard it and read next one */
690 goto repoll;
691 }
692 wc->wr_id = cqe->work_request_id & ~QMAP_IDX_MASK;
693 wc->wr_id |= qmap_entry->app_wr_id;
694 qmap_entry->reported = 1;
695 } else
696 /* We got a receive completion. */
697 wc->wr_id = cqe->work_request_id;
659 698
660 /* eval ib_wc_opcode */ 699 /* eval ib_wc_opcode */
661 wc->opcode = ib_wc_opcode[cqe->optype]-1; 700 wc->opcode = ib_wc_opcode[cqe->optype]-1;
@@ -667,7 +706,7 @@ poll_cq_one_read_cqe:
667 ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x", 706 ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x",
668 my_cq, my_cq->cq_number); 707 my_cq, my_cq->cq_number);
669 /* update also queue adder to throw away this entry!!! */ 708 /* update also queue adder to throw away this entry!!! */
670 goto poll_cq_one_exit0; 709 goto repoll;
671 } 710 }
672 711
673 /* eval ib_wc_status */ 712 /* eval ib_wc_status */
@@ -678,11 +717,6 @@ poll_cq_one_read_cqe:
678 } else 717 } else
679 wc->status = IB_WC_SUCCESS; 718 wc->status = IB_WC_SUCCESS;
680 719
681 read_lock(&ehca_qp_idr_lock);
682 my_qp = idr_find(&ehca_qp_idr, cqe->qp_token);
683 wc->qp = &my_qp->ib_qp;
684 read_unlock(&ehca_qp_idr_lock);
685
686 wc->byte_len = cqe->nr_bytes_transferred; 720 wc->byte_len = cqe->nr_bytes_transferred;
687 wc->pkey_index = cqe->pkey_index; 721 wc->pkey_index = cqe->pkey_index;
688 wc->slid = cqe->rlid; 722 wc->slid = cqe->rlid;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 7ebc400a4b3d..341ffedafed6 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -202,7 +202,7 @@ static void ipoib_cm_free_rx_ring(struct net_device *dev,
202 dev_kfree_skb_any(rx_ring[i].skb); 202 dev_kfree_skb_any(rx_ring[i].skb);
203 } 203 }
204 204
205 kfree(rx_ring); 205 vfree(rx_ring);
206} 206}
207 207
208static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv *priv) 208static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv *priv)
@@ -352,9 +352,14 @@ static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_i
352 int ret; 352 int ret;
353 int i; 353 int i;
354 354
355 rx->rx_ring = kcalloc(ipoib_recvq_size, sizeof *rx->rx_ring, GFP_KERNEL); 355 rx->rx_ring = vmalloc(ipoib_recvq_size * sizeof *rx->rx_ring);
356 if (!rx->rx_ring) 356 if (!rx->rx_ring) {
357 printk(KERN_WARNING "%s: failed to allocate CM non-SRQ ring (%d entries)\n",
358 priv->ca->name, ipoib_recvq_size);
357 return -ENOMEM; 359 return -ENOMEM;
360 }
361
362 memset(rx->rx_ring, 0, ipoib_recvq_size * sizeof *rx->rx_ring);
358 363
359 t = kmalloc(sizeof *t, GFP_KERNEL); 364 t = kmalloc(sizeof *t, GFP_KERNEL);
360 if (!t) { 365 if (!t) {
@@ -1494,14 +1499,16 @@ static void ipoib_cm_create_srq(struct net_device *dev, int max_sge)
1494 return; 1499 return;
1495 } 1500 }
1496 1501
1497 priv->cm.srq_ring = kzalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring, 1502 priv->cm.srq_ring = vmalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring);
1498 GFP_KERNEL);
1499 if (!priv->cm.srq_ring) { 1503 if (!priv->cm.srq_ring) {
1500 printk(KERN_WARNING "%s: failed to allocate CM SRQ ring (%d entries)\n", 1504 printk(KERN_WARNING "%s: failed to allocate CM SRQ ring (%d entries)\n",
1501 priv->ca->name, ipoib_recvq_size); 1505 priv->ca->name, ipoib_recvq_size);
1502 ib_destroy_srq(priv->cm.srq); 1506 ib_destroy_srq(priv->cm.srq);
1503 priv->cm.srq = NULL; 1507 priv->cm.srq = NULL;
1508 return;
1504 } 1509 }
1510
1511 memset(priv->cm.srq_ring, 0, ipoib_recvq_size * sizeof *priv->cm.srq_ring);
1505} 1512}
1506 1513
1507int ipoib_cm_dev_init(struct net_device *dev) 1514int ipoib_cm_dev_init(struct net_device *dev)