diff options
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/core/mad_rmpp.c | 19 | ||||
-rw-r--r-- | drivers/infiniband/core/user_mad.c | 5 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_eq.c | 16 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_qp.c | 51 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_srq.c | 25 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib.h | 2 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_ib.c | 4 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_main.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_multicast.c | 13 |
9 files changed, 63 insertions, 74 deletions
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c index 2bd8b1cc57c4..e23836d0e21b 100644 --- a/drivers/infiniband/core/mad_rmpp.c +++ b/drivers/infiniband/core/mad_rmpp.c | |||
@@ -412,8 +412,8 @@ static inline int get_mad_len(struct mad_rmpp_recv *rmpp_recv) | |||
412 | 412 | ||
413 | hdr_size = data_offset(rmpp_mad->mad_hdr.mgmt_class); | 413 | hdr_size = data_offset(rmpp_mad->mad_hdr.mgmt_class); |
414 | data_size = sizeof(struct ib_rmpp_mad) - hdr_size; | 414 | data_size = sizeof(struct ib_rmpp_mad) - hdr_size; |
415 | pad = data_size - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); | 415 | pad = IB_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); |
416 | if (pad > data_size || pad < 0) | 416 | if (pad > IB_MGMT_RMPP_DATA || pad < 0) |
417 | pad = 0; | 417 | pad = 0; |
418 | 418 | ||
419 | return hdr_size + rmpp_recv->seg_num * data_size - pad; | 419 | return hdr_size + rmpp_recv->seg_num * data_size - pad; |
@@ -583,6 +583,7 @@ static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr) | |||
583 | { | 583 | { |
584 | struct ib_rmpp_mad *rmpp_mad; | 584 | struct ib_rmpp_mad *rmpp_mad; |
585 | int timeout; | 585 | int timeout; |
586 | u32 paylen; | ||
586 | 587 | ||
587 | rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr; | 588 | rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr; |
588 | ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); | 589 | ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); |
@@ -590,11 +591,9 @@ static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr) | |||
590 | 591 | ||
591 | if (mad_send_wr->seg_num == 1) { | 592 | if (mad_send_wr->seg_num == 1) { |
592 | rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_FIRST; | 593 | rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_FIRST; |
593 | rmpp_mad->rmpp_hdr.paylen_newwin = | 594 | paylen = mad_send_wr->total_seg * IB_MGMT_RMPP_DATA - |
594 | cpu_to_be32(mad_send_wr->total_seg * | 595 | mad_send_wr->pad; |
595 | (sizeof(struct ib_rmpp_mad) - | 596 | rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(paylen); |
596 | offsetof(struct ib_rmpp_mad, data)) - | ||
597 | mad_send_wr->pad); | ||
598 | mad_send_wr->sg_list[0].length = sizeof(struct ib_rmpp_mad); | 597 | mad_send_wr->sg_list[0].length = sizeof(struct ib_rmpp_mad); |
599 | } else { | 598 | } else { |
600 | mad_send_wr->send_wr.num_sge = 2; | 599 | mad_send_wr->send_wr.num_sge = 2; |
@@ -608,10 +607,8 @@ static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr) | |||
608 | 607 | ||
609 | if (mad_send_wr->seg_num == mad_send_wr->total_seg) { | 608 | if (mad_send_wr->seg_num == mad_send_wr->total_seg) { |
610 | rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_LAST; | 609 | rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_LAST; |
611 | rmpp_mad->rmpp_hdr.paylen_newwin = | 610 | paylen = IB_MGMT_RMPP_DATA - mad_send_wr->pad; |
612 | cpu_to_be32(sizeof(struct ib_rmpp_mad) - | 611 | rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(paylen); |
613 | offsetof(struct ib_rmpp_mad, data) - | ||
614 | mad_send_wr->pad); | ||
615 | } | 612 | } |
616 | 613 | ||
617 | /* 2 seconds for an ACK until we can find the packet lifetime */ | 614 | /* 2 seconds for an ACK until we can find the packet lifetime */ |
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index 7c2f03057ddb..a64d6b4dcc16 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c | |||
@@ -334,10 +334,11 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, | |||
334 | ret = -EINVAL; | 334 | ret = -EINVAL; |
335 | goto err_ah; | 335 | goto err_ah; |
336 | } | 336 | } |
337 | /* Validate that management class can support RMPP */ | 337 | |
338 | /* Validate that the management class can support RMPP */ | ||
338 | if (rmpp_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_ADM) { | 339 | if (rmpp_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_ADM) { |
339 | hdr_len = offsetof(struct ib_sa_mad, data); | 340 | hdr_len = offsetof(struct ib_sa_mad, data); |
340 | data_len = length; | 341 | data_len = length - hdr_len; |
341 | } else if ((rmpp_mad->mad_hdr.mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && | 342 | } else if ((rmpp_mad->mad_hdr.mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && |
342 | (rmpp_mad->mad_hdr.mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) { | 343 | (rmpp_mad->mad_hdr.mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) { |
343 | hdr_len = offsetof(struct ib_vendor_mad, data); | 344 | hdr_len = offsetof(struct ib_vendor_mad, data); |
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c index 18f0981eb0c1..78152a8ad17d 100644 --- a/drivers/infiniband/hw/mthca/mthca_eq.c +++ b/drivers/infiniband/hw/mthca/mthca_eq.c | |||
@@ -476,12 +476,8 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev, | |||
476 | int i; | 476 | int i; |
477 | u8 status; | 477 | u8 status; |
478 | 478 | ||
479 | /* Make sure EQ size is aligned to a power of 2 size. */ | 479 | eq->dev = dev; |
480 | for (i = 1; i < nent; i <<= 1) | 480 | eq->nent = roundup_pow_of_two(max(nent, 2)); |
481 | ; /* nothing */ | ||
482 | nent = i; | ||
483 | |||
484 | eq->dev = dev; | ||
485 | 481 | ||
486 | eq->page_list = kmalloc(npages * sizeof *eq->page_list, | 482 | eq->page_list = kmalloc(npages * sizeof *eq->page_list, |
487 | GFP_KERNEL); | 483 | GFP_KERNEL); |
@@ -512,7 +508,7 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev, | |||
512 | memset(eq->page_list[i].buf, 0, PAGE_SIZE); | 508 | memset(eq->page_list[i].buf, 0, PAGE_SIZE); |
513 | } | 509 | } |
514 | 510 | ||
515 | for (i = 0; i < nent; ++i) | 511 | for (i = 0; i < eq->nent; ++i) |
516 | set_eqe_hw(get_eqe(eq, i)); | 512 | set_eqe_hw(get_eqe(eq, i)); |
517 | 513 | ||
518 | eq->eqn = mthca_alloc(&dev->eq_table.alloc); | 514 | eq->eqn = mthca_alloc(&dev->eq_table.alloc); |
@@ -528,8 +524,6 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev, | |||
528 | if (err) | 524 | if (err) |
529 | goto err_out_free_eq; | 525 | goto err_out_free_eq; |
530 | 526 | ||
531 | eq->nent = nent; | ||
532 | |||
533 | memset(eq_context, 0, sizeof *eq_context); | 527 | memset(eq_context, 0, sizeof *eq_context); |
534 | eq_context->flags = cpu_to_be32(MTHCA_EQ_STATUS_OK | | 528 | eq_context->flags = cpu_to_be32(MTHCA_EQ_STATUS_OK | |
535 | MTHCA_EQ_OWNER_HW | | 529 | MTHCA_EQ_OWNER_HW | |
@@ -538,7 +532,7 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev, | |||
538 | if (mthca_is_memfree(dev)) | 532 | if (mthca_is_memfree(dev)) |
539 | eq_context->flags |= cpu_to_be32(MTHCA_EQ_STATE_ARBEL); | 533 | eq_context->flags |= cpu_to_be32(MTHCA_EQ_STATE_ARBEL); |
540 | 534 | ||
541 | eq_context->logsize_usrpage = cpu_to_be32((ffs(nent) - 1) << 24); | 535 | eq_context->logsize_usrpage = cpu_to_be32((ffs(eq->nent) - 1) << 24); |
542 | if (mthca_is_memfree(dev)) { | 536 | if (mthca_is_memfree(dev)) { |
543 | eq_context->arbel_pd = cpu_to_be32(dev->driver_pd.pd_num); | 537 | eq_context->arbel_pd = cpu_to_be32(dev->driver_pd.pd_num); |
544 | } else { | 538 | } else { |
@@ -569,7 +563,7 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev, | |||
569 | dev->eq_table.arm_mask |= eq->eqn_mask; | 563 | dev->eq_table.arm_mask |= eq->eqn_mask; |
570 | 564 | ||
571 | mthca_dbg(dev, "Allocated EQ %d with %d entries\n", | 565 | mthca_dbg(dev, "Allocated EQ %d with %d entries\n", |
572 | eq->eqn, nent); | 566 | eq->eqn, eq->nent); |
573 | 567 | ||
574 | return err; | 568 | return err; |
575 | 569 | ||
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index bcef06bf15e7..5fa00669f9b8 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c | |||
@@ -227,7 +227,6 @@ static void mthca_wq_init(struct mthca_wq *wq) | |||
227 | wq->last_comp = wq->max - 1; | 227 | wq->last_comp = wq->max - 1; |
228 | wq->head = 0; | 228 | wq->head = 0; |
229 | wq->tail = 0; | 229 | wq->tail = 0; |
230 | wq->last = NULL; | ||
231 | } | 230 | } |
232 | 231 | ||
233 | void mthca_qp_event(struct mthca_dev *dev, u32 qpn, | 232 | void mthca_qp_event(struct mthca_dev *dev, u32 qpn, |
@@ -687,7 +686,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) | |||
687 | } | 686 | } |
688 | 687 | ||
689 | if (attr_mask & IB_QP_TIMEOUT) { | 688 | if (attr_mask & IB_QP_TIMEOUT) { |
690 | qp_context->pri_path.ackto = attr->timeout; | 689 | qp_context->pri_path.ackto = attr->timeout << 3; |
691 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT); | 690 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT); |
692 | } | 691 | } |
693 | 692 | ||
@@ -1103,6 +1102,9 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev, | |||
1103 | } | 1102 | } |
1104 | } | 1103 | } |
1105 | 1104 | ||
1105 | qp->sq.last = get_send_wqe(qp, qp->sq.max - 1); | ||
1106 | qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1); | ||
1107 | |||
1106 | return 0; | 1108 | return 0; |
1107 | } | 1109 | } |
1108 | 1110 | ||
@@ -1583,15 +1585,13 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
1583 | goto out; | 1585 | goto out; |
1584 | } | 1586 | } |
1585 | 1587 | ||
1586 | if (prev_wqe) { | 1588 | ((struct mthca_next_seg *) prev_wqe)->nda_op = |
1587 | ((struct mthca_next_seg *) prev_wqe)->nda_op = | 1589 | cpu_to_be32(((ind << qp->sq.wqe_shift) + |
1588 | cpu_to_be32(((ind << qp->sq.wqe_shift) + | 1590 | qp->send_wqe_offset) | |
1589 | qp->send_wqe_offset) | | 1591 | mthca_opcode[wr->opcode]); |
1590 | mthca_opcode[wr->opcode]); | 1592 | wmb(); |
1591 | wmb(); | 1593 | ((struct mthca_next_seg *) prev_wqe)->ee_nds = |
1592 | ((struct mthca_next_seg *) prev_wqe)->ee_nds = | 1594 | cpu_to_be32((size0 ? 0 : MTHCA_NEXT_DBD) | size); |
1593 | cpu_to_be32((size0 ? 0 : MTHCA_NEXT_DBD) | size); | ||
1594 | } | ||
1595 | 1595 | ||
1596 | if (!size0) { | 1596 | if (!size0) { |
1597 | size0 = size; | 1597 | size0 = size; |
@@ -1688,13 +1688,11 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |||
1688 | 1688 | ||
1689 | qp->wrid[ind] = wr->wr_id; | 1689 | qp->wrid[ind] = wr->wr_id; |
1690 | 1690 | ||
1691 | if (likely(prev_wqe)) { | 1691 | ((struct mthca_next_seg *) prev_wqe)->nda_op = |
1692 | ((struct mthca_next_seg *) prev_wqe)->nda_op = | 1692 | cpu_to_be32((ind << qp->rq.wqe_shift) | 1); |
1693 | cpu_to_be32((ind << qp->rq.wqe_shift) | 1); | 1693 | wmb(); |
1694 | wmb(); | 1694 | ((struct mthca_next_seg *) prev_wqe)->ee_nds = |
1695 | ((struct mthca_next_seg *) prev_wqe)->ee_nds = | 1695 | cpu_to_be32(MTHCA_NEXT_DBD | size); |
1696 | cpu_to_be32(MTHCA_NEXT_DBD | size); | ||
1697 | } | ||
1698 | 1696 | ||
1699 | if (!size0) | 1697 | if (!size0) |
1700 | size0 = size; | 1698 | size0 = size; |
@@ -1905,15 +1903,13 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
1905 | goto out; | 1903 | goto out; |
1906 | } | 1904 | } |
1907 | 1905 | ||
1908 | if (likely(prev_wqe)) { | 1906 | ((struct mthca_next_seg *) prev_wqe)->nda_op = |
1909 | ((struct mthca_next_seg *) prev_wqe)->nda_op = | 1907 | cpu_to_be32(((ind << qp->sq.wqe_shift) + |
1910 | cpu_to_be32(((ind << qp->sq.wqe_shift) + | 1908 | qp->send_wqe_offset) | |
1911 | qp->send_wqe_offset) | | 1909 | mthca_opcode[wr->opcode]); |
1912 | mthca_opcode[wr->opcode]); | 1910 | wmb(); |
1913 | wmb(); | 1911 | ((struct mthca_next_seg *) prev_wqe)->ee_nds = |
1914 | ((struct mthca_next_seg *) prev_wqe)->ee_nds = | 1912 | cpu_to_be32(MTHCA_NEXT_DBD | size); |
1915 | cpu_to_be32(MTHCA_NEXT_DBD | size); | ||
1916 | } | ||
1917 | 1913 | ||
1918 | if (!size0) { | 1914 | if (!size0) { |
1919 | size0 = size; | 1915 | size0 = size; |
@@ -2127,5 +2123,6 @@ void __devexit mthca_cleanup_qp_table(struct mthca_dev *dev) | |||
2127 | for (i = 0; i < 2; ++i) | 2123 | for (i = 0; i < 2; ++i) |
2128 | mthca_CONF_SPECIAL_QP(dev, i, 0, &status); | 2124 | mthca_CONF_SPECIAL_QP(dev, i, 0, &status); |
2129 | 2125 | ||
2126 | mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps); | ||
2130 | mthca_alloc_cleanup(&dev->qp_table.alloc); | 2127 | mthca_alloc_cleanup(&dev->qp_table.alloc); |
2131 | } | 2128 | } |
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c index 75cd2d84ef12..18998d48c53e 100644 --- a/drivers/infiniband/hw/mthca/mthca_srq.c +++ b/drivers/infiniband/hw/mthca/mthca_srq.c | |||
@@ -172,6 +172,8 @@ static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd, | |||
172 | scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); | 172 | scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); |
173 | } | 173 | } |
174 | 174 | ||
175 | srq->last = get_wqe(srq, srq->max - 1); | ||
176 | |||
175 | return 0; | 177 | return 0; |
176 | } | 178 | } |
177 | 179 | ||
@@ -189,7 +191,6 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, | |||
189 | 191 | ||
190 | srq->max = attr->max_wr; | 192 | srq->max = attr->max_wr; |
191 | srq->max_gs = attr->max_sge; | 193 | srq->max_gs = attr->max_sge; |
192 | srq->last = NULL; | ||
193 | srq->counter = 0; | 194 | srq->counter = 0; |
194 | 195 | ||
195 | if (mthca_is_memfree(dev)) | 196 | if (mthca_is_memfree(dev)) |
@@ -409,7 +410,7 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | |||
409 | mthca_err(dev, "SRQ %06x full\n", srq->srqn); | 410 | mthca_err(dev, "SRQ %06x full\n", srq->srqn); |
410 | err = -ENOMEM; | 411 | err = -ENOMEM; |
411 | *bad_wr = wr; | 412 | *bad_wr = wr; |
412 | return nreq; | 413 | break; |
413 | } | 414 | } |
414 | 415 | ||
415 | wqe = get_wqe(srq, ind); | 416 | wqe = get_wqe(srq, ind); |
@@ -427,7 +428,7 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | |||
427 | err = -EINVAL; | 428 | err = -EINVAL; |
428 | *bad_wr = wr; | 429 | *bad_wr = wr; |
429 | srq->last = prev_wqe; | 430 | srq->last = prev_wqe; |
430 | return nreq; | 431 | break; |
431 | } | 432 | } |
432 | 433 | ||
433 | for (i = 0; i < wr->num_sge; ++i) { | 434 | for (i = 0; i < wr->num_sge; ++i) { |
@@ -446,20 +447,16 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | |||
446 | ((struct mthca_data_seg *) wqe)->addr = 0; | 447 | ((struct mthca_data_seg *) wqe)->addr = 0; |
447 | } | 448 | } |
448 | 449 | ||
449 | if (likely(prev_wqe)) { | 450 | ((struct mthca_next_seg *) prev_wqe)->nda_op = |
450 | ((struct mthca_next_seg *) prev_wqe)->nda_op = | 451 | cpu_to_be32((ind << srq->wqe_shift) | 1); |
451 | cpu_to_be32((ind << srq->wqe_shift) | 1); | 452 | wmb(); |
452 | wmb(); | 453 | ((struct mthca_next_seg *) prev_wqe)->ee_nds = |
453 | ((struct mthca_next_seg *) prev_wqe)->ee_nds = | 454 | cpu_to_be32(MTHCA_NEXT_DBD); |
454 | cpu_to_be32(MTHCA_NEXT_DBD); | ||
455 | } | ||
456 | 455 | ||
457 | srq->wrid[ind] = wr->wr_id; | 456 | srq->wrid[ind] = wr->wr_id; |
458 | srq->first_free = next_ind; | 457 | srq->first_free = next_ind; |
459 | } | 458 | } |
460 | 459 | ||
461 | return nreq; | ||
462 | |||
463 | if (likely(nreq)) { | 460 | if (likely(nreq)) { |
464 | __be32 doorbell[2]; | 461 | __be32 doorbell[2]; |
465 | 462 | ||
@@ -503,7 +500,7 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | |||
503 | mthca_err(dev, "SRQ %06x full\n", srq->srqn); | 500 | mthca_err(dev, "SRQ %06x full\n", srq->srqn); |
504 | err = -ENOMEM; | 501 | err = -ENOMEM; |
505 | *bad_wr = wr; | 502 | *bad_wr = wr; |
506 | return nreq; | 503 | break; |
507 | } | 504 | } |
508 | 505 | ||
509 | wqe = get_wqe(srq, ind); | 506 | wqe = get_wqe(srq, ind); |
@@ -519,7 +516,7 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | |||
519 | if (unlikely(wr->num_sge > srq->max_gs)) { | 516 | if (unlikely(wr->num_sge > srq->max_gs)) { |
520 | err = -EINVAL; | 517 | err = -EINVAL; |
521 | *bad_wr = wr; | 518 | *bad_wr = wr; |
522 | return nreq; | 519 | break; |
523 | } | 520 | } |
524 | 521 | ||
525 | for (i = 0; i < wr->num_sge; ++i) { | 522 | for (i = 0; i < wr->num_sge; ++i) { |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index bea960b8191f..4ea1c1ca85bc 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h | |||
@@ -257,7 +257,7 @@ void ipoib_mcast_send(struct net_device *dev, union ib_gid *mgid, | |||
257 | 257 | ||
258 | void ipoib_mcast_restart_task(void *dev_ptr); | 258 | void ipoib_mcast_restart_task(void *dev_ptr); |
259 | int ipoib_mcast_start_thread(struct net_device *dev); | 259 | int ipoib_mcast_start_thread(struct net_device *dev); |
260 | int ipoib_mcast_stop_thread(struct net_device *dev); | 260 | int ipoib_mcast_stop_thread(struct net_device *dev, int flush); |
261 | 261 | ||
262 | void ipoib_mcast_dev_down(struct net_device *dev); | 262 | void ipoib_mcast_dev_down(struct net_device *dev); |
263 | void ipoib_mcast_dev_flush(struct net_device *dev); | 263 | void ipoib_mcast_dev_flush(struct net_device *dev); |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index ef0e3894863c..f7440096b5ed 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c | |||
@@ -432,7 +432,7 @@ int ipoib_ib_dev_down(struct net_device *dev) | |||
432 | flush_workqueue(ipoib_workqueue); | 432 | flush_workqueue(ipoib_workqueue); |
433 | } | 433 | } |
434 | 434 | ||
435 | ipoib_mcast_stop_thread(dev); | 435 | ipoib_mcast_stop_thread(dev, 1); |
436 | 436 | ||
437 | /* | 437 | /* |
438 | * Flush the multicast groups first so we stop any multicast joins. The | 438 | * Flush the multicast groups first so we stop any multicast joins. The |
@@ -599,7 +599,7 @@ void ipoib_ib_dev_cleanup(struct net_device *dev) | |||
599 | 599 | ||
600 | ipoib_dbg(priv, "cleaning up ib_dev\n"); | 600 | ipoib_dbg(priv, "cleaning up ib_dev\n"); |
601 | 601 | ||
602 | ipoib_mcast_stop_thread(dev); | 602 | ipoib_mcast_stop_thread(dev, 1); |
603 | 603 | ||
604 | /* Delete the broadcast address and the local address */ | 604 | /* Delete the broadcast address and the local address */ |
605 | ipoib_mcast_dev_down(dev); | 605 | ipoib_mcast_dev_down(dev); |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 49d120d2b92c..704f48e0b6a7 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
@@ -1005,6 +1005,7 @@ debug_failed: | |||
1005 | 1005 | ||
1006 | register_failed: | 1006 | register_failed: |
1007 | ib_unregister_event_handler(&priv->event_handler); | 1007 | ib_unregister_event_handler(&priv->event_handler); |
1008 | flush_scheduled_work(); | ||
1008 | 1009 | ||
1009 | event_failed: | 1010 | event_failed: |
1010 | ipoib_dev_cleanup(priv->dev); | 1011 | ipoib_dev_cleanup(priv->dev); |
@@ -1057,6 +1058,7 @@ static void ipoib_remove_one(struct ib_device *device) | |||
1057 | 1058 | ||
1058 | list_for_each_entry_safe(priv, tmp, dev_list, list) { | 1059 | list_for_each_entry_safe(priv, tmp, dev_list, list) { |
1059 | ib_unregister_event_handler(&priv->event_handler); | 1060 | ib_unregister_event_handler(&priv->event_handler); |
1061 | flush_scheduled_work(); | ||
1060 | 1062 | ||
1061 | unregister_netdev(priv->dev); | 1063 | unregister_netdev(priv->dev); |
1062 | ipoib_dev_cleanup(priv->dev); | 1064 | ipoib_dev_cleanup(priv->dev); |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index aca7aea18a69..36ce29836bf2 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c | |||
@@ -145,7 +145,7 @@ static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev, | |||
145 | 145 | ||
146 | mcast->dev = dev; | 146 | mcast->dev = dev; |
147 | mcast->created = jiffies; | 147 | mcast->created = jiffies; |
148 | mcast->backoff = HZ; | 148 | mcast->backoff = 1; |
149 | mcast->logcount = 0; | 149 | mcast->logcount = 0; |
150 | 150 | ||
151 | INIT_LIST_HEAD(&mcast->list); | 151 | INIT_LIST_HEAD(&mcast->list); |
@@ -396,7 +396,7 @@ static void ipoib_mcast_join_complete(int status, | |||
396 | IPOIB_GID_ARG(mcast->mcmember.mgid), status); | 396 | IPOIB_GID_ARG(mcast->mcmember.mgid), status); |
397 | 397 | ||
398 | if (!status && !ipoib_mcast_join_finish(mcast, mcmember)) { | 398 | if (!status && !ipoib_mcast_join_finish(mcast, mcmember)) { |
399 | mcast->backoff = HZ; | 399 | mcast->backoff = 1; |
400 | down(&mcast_mutex); | 400 | down(&mcast_mutex); |
401 | if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) | 401 | if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) |
402 | queue_work(ipoib_workqueue, &priv->mcast_task); | 402 | queue_work(ipoib_workqueue, &priv->mcast_task); |
@@ -496,7 +496,7 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast, | |||
496 | if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) | 496 | if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) |
497 | queue_delayed_work(ipoib_workqueue, | 497 | queue_delayed_work(ipoib_workqueue, |
498 | &priv->mcast_task, | 498 | &priv->mcast_task, |
499 | mcast->backoff); | 499 | mcast->backoff * HZ); |
500 | up(&mcast_mutex); | 500 | up(&mcast_mutex); |
501 | } else | 501 | } else |
502 | mcast->query_id = ret; | 502 | mcast->query_id = ret; |
@@ -598,7 +598,7 @@ int ipoib_mcast_start_thread(struct net_device *dev) | |||
598 | return 0; | 598 | return 0; |
599 | } | 599 | } |
600 | 600 | ||
601 | int ipoib_mcast_stop_thread(struct net_device *dev) | 601 | int ipoib_mcast_stop_thread(struct net_device *dev, int flush) |
602 | { | 602 | { |
603 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 603 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
604 | struct ipoib_mcast *mcast; | 604 | struct ipoib_mcast *mcast; |
@@ -610,7 +610,8 @@ int ipoib_mcast_stop_thread(struct net_device *dev) | |||
610 | cancel_delayed_work(&priv->mcast_task); | 610 | cancel_delayed_work(&priv->mcast_task); |
611 | up(&mcast_mutex); | 611 | up(&mcast_mutex); |
612 | 612 | ||
613 | flush_workqueue(ipoib_workqueue); | 613 | if (flush) |
614 | flush_workqueue(ipoib_workqueue); | ||
614 | 615 | ||
615 | if (priv->broadcast && priv->broadcast->query) { | 616 | if (priv->broadcast && priv->broadcast->query) { |
616 | ib_sa_cancel_query(priv->broadcast->query_id, priv->broadcast->query); | 617 | ib_sa_cancel_query(priv->broadcast->query_id, priv->broadcast->query); |
@@ -832,7 +833,7 @@ void ipoib_mcast_restart_task(void *dev_ptr) | |||
832 | 833 | ||
833 | ipoib_dbg_mcast(priv, "restarting multicast task\n"); | 834 | ipoib_dbg_mcast(priv, "restarting multicast task\n"); |
834 | 835 | ||
835 | ipoib_mcast_stop_thread(dev); | 836 | ipoib_mcast_stop_thread(dev, 0); |
836 | 837 | ||
837 | spin_lock_irqsave(&priv->lock, flags); | 838 | spin_lock_irqsave(&priv->lock, flags); |
838 | 839 | ||