aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-03-08 19:04:11 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-03-08 19:04:11 -0500
commiteb9c4f2ef7150ea2144b53811d1cf555d8e27f69 (patch)
treeedc3f7528f330b5343c953c4ca5976bbd7076362
parentc5bfdb7261f2c426c730b65ce59440e8de219fda (diff)
parent55c9adde13dfc6b738e0f70c071a0622e52f35ed (diff)
Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband: IPoIB: Turn on interface's carrier after broadcast group is joined RDMA/ucma: Avoid sending reject if backlog is full RDMA/cxgb3: Fix MR permission problems RDMA/cxgb3: Don't reuse skbs that are non-linear or cloned RDMA/cxgb3: Squelch logging AE errors RDMA/cxgb3: Stop EP timer when MPA exchange is aborted by peer RDMA/cxgb3: Move QP to error on destroy if the state is IDLE RDMA/cxgb3: Fixes for "normal close" failures RDMA/cxgb3: Fix build on sparc64 RDMA/cma: Initialize rdma_bind_list in cma_alloc_any_port() RDMA/cxgb3: Don't use mm after it's freed in iwch_mmap() RDMA/cxgb3: Start ep timer on a MPA reject IB/mthca: Fix error path in mthca_alloc_memfree() IB/ehca: Fix sync between completion handler and destroy cq IPoIB: Only handle async events for one port
-rw-r--r--drivers/infiniband/core/cma.c2
-rw-r--r--drivers/infiniband/core/ucma.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c1
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c19
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_ev.c12
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c40
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.h33
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h6
-rw-r--r--drivers/infiniband/hw/ehca/ehca_cq.c16
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c59
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c10
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c5
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c13
15 files changed, 122 insertions, 102 deletions
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index d441815a3e0c..fde92ce45153 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1821,7 +1821,7 @@ static int cma_alloc_port(struct idr *ps, struct rdma_id_private *id_priv,
1821 struct rdma_bind_list *bind_list; 1821 struct rdma_bind_list *bind_list;
1822 int port, ret; 1822 int port, ret;
1823 1823
1824 bind_list = kmalloc(sizeof *bind_list, GFP_KERNEL); 1824 bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
1825 if (!bind_list) 1825 if (!bind_list)
1826 return -ENOMEM; 1826 return -ENOMEM;
1827 1827
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index b516b93b8550..c859134c1daa 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -266,7 +266,7 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id,
266 mutex_lock(&ctx->file->mut); 266 mutex_lock(&ctx->file->mut);
267 if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) { 267 if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
268 if (!ctx->backlog) { 268 if (!ctx->backlog) {
269 ret = -EDQUOT; 269 ret = -ENOMEM;
270 kfree(uevent); 270 kfree(uevent);
271 goto out; 271 goto out;
272 } 272 }
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index d737c738d876..818cf1aee8c7 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -36,6 +36,7 @@
36#include <linux/sched.h> 36#include <linux/sched.h>
37#include <linux/spinlock.h> 37#include <linux/spinlock.h>
38#include <linux/pci.h> 38#include <linux/pci.h>
39#include <linux/dma-mapping.h>
39 40
40#include "cxio_resource.h" 41#include "cxio_resource.h"
41#include "cxio_hal.h" 42#include "cxio_hal.h"
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index b21fde8b659d..d0ed1d35ca3e 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -305,8 +305,7 @@ static int status2errno(int status)
305 */ 305 */
306static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp) 306static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
307{ 307{
308 if (skb) { 308 if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) {
309 BUG_ON(skb_cloned(skb));
310 skb_trim(skb, 0); 309 skb_trim(skb, 0);
311 skb_get(skb); 310 skb_get(skb);
312 } else { 311 } else {
@@ -1415,6 +1414,7 @@ static int peer_close(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1415 wake_up(&ep->com.waitq); 1414 wake_up(&ep->com.waitq);
1416 break; 1415 break;
1417 case FPDU_MODE: 1416 case FPDU_MODE:
1417 start_ep_timer(ep);
1418 __state_set(&ep->com, CLOSING); 1418 __state_set(&ep->com, CLOSING);
1419 attrs.next_state = IWCH_QP_STATE_CLOSING; 1419 attrs.next_state = IWCH_QP_STATE_CLOSING;
1420 iwch_modify_qp(ep->com.qp->rhp, ep->com.qp, 1420 iwch_modify_qp(ep->com.qp->rhp, ep->com.qp,
@@ -1425,7 +1425,6 @@ static int peer_close(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1425 disconnect = 0; 1425 disconnect = 0;
1426 break; 1426 break;
1427 case CLOSING: 1427 case CLOSING:
1428 start_ep_timer(ep);
1429 __state_set(&ep->com, MORIBUND); 1428 __state_set(&ep->com, MORIBUND);
1430 disconnect = 0; 1429 disconnect = 0;
1431 break; 1430 break;
@@ -1487,8 +1486,10 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1487 case CONNECTING: 1486 case CONNECTING:
1488 break; 1487 break;
1489 case MPA_REQ_WAIT: 1488 case MPA_REQ_WAIT:
1489 stop_ep_timer(ep);
1490 break; 1490 break;
1491 case MPA_REQ_SENT: 1491 case MPA_REQ_SENT:
1492 stop_ep_timer(ep);
1492 connect_reply_upcall(ep, -ECONNRESET); 1493 connect_reply_upcall(ep, -ECONNRESET);
1493 break; 1494 break;
1494 case MPA_REP_SENT: 1495 case MPA_REP_SENT:
@@ -1507,9 +1508,10 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1507 get_ep(&ep->com); 1508 get_ep(&ep->com);
1508 break; 1509 break;
1509 case MORIBUND: 1510 case MORIBUND:
1511 case CLOSING:
1510 stop_ep_timer(ep); 1512 stop_ep_timer(ep);
1513 /*FALLTHROUGH*/
1511 case FPDU_MODE: 1514 case FPDU_MODE:
1512 case CLOSING:
1513 if (ep->com.cm_id && ep->com.qp) { 1515 if (ep->com.cm_id && ep->com.qp) {
1514 attrs.next_state = IWCH_QP_STATE_ERROR; 1516 attrs.next_state = IWCH_QP_STATE_ERROR;
1515 ret = iwch_modify_qp(ep->com.qp->rhp, 1517 ret = iwch_modify_qp(ep->com.qp->rhp,
@@ -1570,7 +1572,6 @@ static int close_con_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1570 spin_lock_irqsave(&ep->com.lock, flags); 1572 spin_lock_irqsave(&ep->com.lock, flags);
1571 switch (ep->com.state) { 1573 switch (ep->com.state) {
1572 case CLOSING: 1574 case CLOSING:
1573 start_ep_timer(ep);
1574 __state_set(&ep->com, MORIBUND); 1575 __state_set(&ep->com, MORIBUND);
1575 break; 1576 break;
1576 case MORIBUND: 1577 case MORIBUND:
@@ -1586,6 +1587,8 @@ static int close_con_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1586 __state_set(&ep->com, DEAD); 1587 __state_set(&ep->com, DEAD);
1587 release = 1; 1588 release = 1;
1588 break; 1589 break;
1590 case ABORTING:
1591 break;
1589 case DEAD: 1592 case DEAD:
1590 default: 1593 default:
1591 BUG_ON(1); 1594 BUG_ON(1);
@@ -1659,6 +1662,7 @@ static void ep_timeout(unsigned long arg)
1659 break; 1662 break;
1660 case MPA_REQ_WAIT: 1663 case MPA_REQ_WAIT:
1661 break; 1664 break;
1665 case CLOSING:
1662 case MORIBUND: 1666 case MORIBUND:
1663 if (ep->com.cm_id && ep->com.qp) { 1667 if (ep->com.cm_id && ep->com.qp) {
1664 attrs.next_state = IWCH_QP_STATE_ERROR; 1668 attrs.next_state = IWCH_QP_STATE_ERROR;
@@ -1687,12 +1691,11 @@ int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
1687 return -ECONNRESET; 1691 return -ECONNRESET;
1688 } 1692 }
1689 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); 1693 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
1690 state_set(&ep->com, CLOSING);
1691 if (mpa_rev == 0) 1694 if (mpa_rev == 0)
1692 abort_connection(ep, NULL, GFP_KERNEL); 1695 abort_connection(ep, NULL, GFP_KERNEL);
1693 else { 1696 else {
1694 err = send_mpa_reject(ep, pdata, pdata_len); 1697 err = send_mpa_reject(ep, pdata, pdata_len);
1695 err = send_halfclose(ep, GFP_KERNEL); 1698 err = iwch_ep_disconnect(ep, 0, GFP_KERNEL);
1696 } 1699 }
1697 return 0; 1700 return 0;
1698} 1701}
@@ -1957,11 +1960,11 @@ int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp)
1957 case MPA_REQ_RCVD: 1960 case MPA_REQ_RCVD:
1958 case MPA_REP_SENT: 1961 case MPA_REP_SENT:
1959 case FPDU_MODE: 1962 case FPDU_MODE:
1963 start_ep_timer(ep);
1960 ep->com.state = CLOSING; 1964 ep->com.state = CLOSING;
1961 close = 1; 1965 close = 1;
1962 break; 1966 break;
1963 case CLOSING: 1967 case CLOSING:
1964 start_ep_timer(ep);
1965 ep->com.state = MORIBUND; 1968 ep->com.state = MORIBUND;
1966 close = 1; 1969 close = 1;
1967 break; 1970 break;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_ev.c b/drivers/infiniband/hw/cxgb3/iwch_ev.c
index 54362afbf72f..b40676662a8a 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_ev.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_ev.c
@@ -47,12 +47,6 @@ static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp,
47 struct iwch_qp_attributes attrs; 47 struct iwch_qp_attributes attrs;
48 struct iwch_qp *qhp; 48 struct iwch_qp *qhp;
49 49
50 printk(KERN_ERR "%s - AE qpid 0x%x opcode %d status 0x%x "
51 "type %d wrid.hi 0x%x wrid.lo 0x%x \n", __FUNCTION__,
52 CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe),
53 CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe),
54 CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
55
56 spin_lock(&rnicp->lock); 50 spin_lock(&rnicp->lock);
57 qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe)); 51 qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe));
58 52
@@ -73,6 +67,12 @@ static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp,
73 return; 67 return;
74 } 68 }
75 69
70 printk(KERN_ERR "%s - AE qpid 0x%x opcode %d status 0x%x "
71 "type %d wrid.hi 0x%x wrid.lo 0x%x \n", __FUNCTION__,
72 CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe),
73 CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe),
74 CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
75
76 atomic_inc(&qhp->refcnt); 76 atomic_inc(&qhp->refcnt);
77 spin_unlock(&rnicp->lock); 77 spin_unlock(&rnicp->lock);
78 78
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 9947a144a929..f2774ae906bf 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -331,6 +331,7 @@ static int iwch_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
331 int ret = 0; 331 int ret = 0;
332 struct iwch_mm_entry *mm; 332 struct iwch_mm_entry *mm;
333 struct iwch_ucontext *ucontext; 333 struct iwch_ucontext *ucontext;
334 u64 addr;
334 335
335 PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __FUNCTION__, vma->vm_pgoff, 336 PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __FUNCTION__, vma->vm_pgoff,
336 key, len); 337 key, len);
@@ -345,10 +346,11 @@ static int iwch_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
345 mm = remove_mmap(ucontext, key, len); 346 mm = remove_mmap(ucontext, key, len);
346 if (!mm) 347 if (!mm)
347 return -EINVAL; 348 return -EINVAL;
349 addr = mm->addr;
348 kfree(mm); 350 kfree(mm);
349 351
350 if ((mm->addr >= rdev_p->rnic_info.udbell_physbase) && 352 if ((addr >= rdev_p->rnic_info.udbell_physbase) &&
351 (mm->addr < (rdev_p->rnic_info.udbell_physbase + 353 (addr < (rdev_p->rnic_info.udbell_physbase +
352 rdev_p->rnic_info.udbell_len))) { 354 rdev_p->rnic_info.udbell_len))) {
353 355
354 /* 356 /*
@@ -362,7 +364,7 @@ static int iwch_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
362 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; 364 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
363 vma->vm_flags &= ~VM_MAYREAD; 365 vma->vm_flags &= ~VM_MAYREAD;
364 ret = io_remap_pfn_range(vma, vma->vm_start, 366 ret = io_remap_pfn_range(vma, vma->vm_start,
365 mm->addr >> PAGE_SHIFT, 367 addr >> PAGE_SHIFT,
366 len, vma->vm_page_prot); 368 len, vma->vm_page_prot);
367 } else { 369 } else {
368 370
@@ -370,7 +372,7 @@ static int iwch_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
370 * Map WQ or CQ contig dma memory... 372 * Map WQ or CQ contig dma memory...
371 */ 373 */
372 ret = remap_pfn_range(vma, vma->vm_start, 374 ret = remap_pfn_range(vma, vma->vm_start,
373 mm->addr >> PAGE_SHIFT, 375 addr >> PAGE_SHIFT,
374 len, vma->vm_page_prot); 376 len, vma->vm_page_prot);
375 } 377 }
376 378
@@ -463,9 +465,6 @@ static struct ib_mr *iwch_register_phys_mem(struct ib_pd *pd,
463 php = to_iwch_pd(pd); 465 php = to_iwch_pd(pd);
464 rhp = php->rhp; 466 rhp = php->rhp;
465 467
466 acc = iwch_convert_access(acc);
467
468
469 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); 468 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
470 if (!mhp) 469 if (!mhp)
471 return ERR_PTR(-ENOMEM); 470 return ERR_PTR(-ENOMEM);
@@ -491,12 +490,7 @@ static struct ib_mr *iwch_register_phys_mem(struct ib_pd *pd,
491 mhp->attr.pdid = php->pdid; 490 mhp->attr.pdid = php->pdid;
492 mhp->attr.zbva = 0; 491 mhp->attr.zbva = 0;
493 492
494 /* NOTE: TPT perms are backwards from BIND WR perms! */ 493 mhp->attr.perms = iwch_ib_to_tpt_access(acc);
495 mhp->attr.perms = (acc & 0x1) << 3;
496 mhp->attr.perms |= (acc & 0x2) << 1;
497 mhp->attr.perms |= (acc & 0x4) >> 1;
498 mhp->attr.perms |= (acc & 0x8) >> 3;
499
500 mhp->attr.va_fbo = *iova_start; 494 mhp->attr.va_fbo = *iova_start;
501 mhp->attr.page_size = shift - 12; 495 mhp->attr.page_size = shift - 12;
502 496
@@ -525,7 +519,6 @@ static int iwch_reregister_phys_mem(struct ib_mr *mr,
525 struct iwch_mr mh, *mhp; 519 struct iwch_mr mh, *mhp;
526 struct iwch_pd *php; 520 struct iwch_pd *php;
527 struct iwch_dev *rhp; 521 struct iwch_dev *rhp;
528 int new_acc;
529 __be64 *page_list = NULL; 522 __be64 *page_list = NULL;
530 int shift = 0; 523 int shift = 0;
531 u64 total_size; 524 u64 total_size;
@@ -546,14 +539,12 @@ static int iwch_reregister_phys_mem(struct ib_mr *mr,
546 if (rhp != php->rhp) 539 if (rhp != php->rhp)
547 return -EINVAL; 540 return -EINVAL;
548 541
549 new_acc = mhp->attr.perms;
550
551 memcpy(&mh, mhp, sizeof *mhp); 542 memcpy(&mh, mhp, sizeof *mhp);
552 543
553 if (mr_rereg_mask & IB_MR_REREG_PD) 544 if (mr_rereg_mask & IB_MR_REREG_PD)
554 php = to_iwch_pd(pd); 545 php = to_iwch_pd(pd);
555 if (mr_rereg_mask & IB_MR_REREG_ACCESS) 546 if (mr_rereg_mask & IB_MR_REREG_ACCESS)
556 mh.attr.perms = iwch_convert_access(acc); 547 mh.attr.perms = iwch_ib_to_tpt_access(acc);
557 if (mr_rereg_mask & IB_MR_REREG_TRANS) 548 if (mr_rereg_mask & IB_MR_REREG_TRANS)
558 ret = build_phys_page_list(buffer_list, num_phys_buf, 549 ret = build_phys_page_list(buffer_list, num_phys_buf,
559 iova_start, 550 iova_start,
@@ -568,7 +559,7 @@ static int iwch_reregister_phys_mem(struct ib_mr *mr,
568 if (mr_rereg_mask & IB_MR_REREG_PD) 559 if (mr_rereg_mask & IB_MR_REREG_PD)
569 mhp->attr.pdid = php->pdid; 560 mhp->attr.pdid = php->pdid;
570 if (mr_rereg_mask & IB_MR_REREG_ACCESS) 561 if (mr_rereg_mask & IB_MR_REREG_ACCESS)
571 mhp->attr.perms = acc; 562 mhp->attr.perms = iwch_ib_to_tpt_access(acc);
572 if (mr_rereg_mask & IB_MR_REREG_TRANS) { 563 if (mr_rereg_mask & IB_MR_REREG_TRANS) {
573 mhp->attr.zbva = 0; 564 mhp->attr.zbva = 0;
574 mhp->attr.va_fbo = *iova_start; 565 mhp->attr.va_fbo = *iova_start;
@@ -613,8 +604,6 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, struct ib_umem *region,
613 goto err; 604 goto err;
614 } 605 }
615 606
616 acc = iwch_convert_access(acc);
617
618 i = n = 0; 607 i = n = 0;
619 608
620 list_for_each_entry(chunk, &region->chunk_list, list) 609 list_for_each_entry(chunk, &region->chunk_list, list)
@@ -630,10 +619,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, struct ib_umem *region,
630 mhp->rhp = rhp; 619 mhp->rhp = rhp;
631 mhp->attr.pdid = php->pdid; 620 mhp->attr.pdid = php->pdid;
632 mhp->attr.zbva = 0; 621 mhp->attr.zbva = 0;
633 mhp->attr.perms = (acc & 0x1) << 3; 622 mhp->attr.perms = iwch_ib_to_tpt_access(acc);
634 mhp->attr.perms |= (acc & 0x2) << 1;
635 mhp->attr.perms |= (acc & 0x4) >> 1;
636 mhp->attr.perms |= (acc & 0x8) >> 3;
637 mhp->attr.va_fbo = region->virt_base; 623 mhp->attr.va_fbo = region->virt_base;
638 mhp->attr.page_size = shift - 12; 624 mhp->attr.page_size = shift - 12;
639 mhp->attr.len = (u32) region->length; 625 mhp->attr.len = (u32) region->length;
@@ -736,10 +722,8 @@ static int iwch_destroy_qp(struct ib_qp *ib_qp)
736 qhp = to_iwch_qp(ib_qp); 722 qhp = to_iwch_qp(ib_qp);
737 rhp = qhp->rhp; 723 rhp = qhp->rhp;
738 724
739 if (qhp->attr.state == IWCH_QP_STATE_RTS) { 725 attrs.next_state = IWCH_QP_STATE_ERROR;
740 attrs.next_state = IWCH_QP_STATE_ERROR; 726 iwch_modify_qp(rhp, qhp, IWCH_QP_ATTR_NEXT_STATE, &attrs, 0);
741 iwch_modify_qp(rhp, qhp, IWCH_QP_ATTR_NEXT_STATE, &attrs, 0);
742 }
743 wait_event(qhp->wait, !qhp->ep); 727 wait_event(qhp->wait, !qhp->ep);
744 728
745 remove_handle(rhp, &rhp->qpidr, qhp->wq.qpid); 729 remove_handle(rhp, &rhp->qpidr, qhp->wq.qpid);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.h b/drivers/infiniband/hw/cxgb3/iwch_provider.h
index de0fe1b93a0c..93bcc56756bd 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.h
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.h
@@ -286,27 +286,20 @@ static inline int iwch_convert_state(enum ib_qp_state ib_state)
286 } 286 }
287} 287}
288 288
289enum iwch_mem_perms { 289static inline u32 iwch_ib_to_tpt_access(int acc)
290 IWCH_MEM_ACCESS_LOCAL_READ = 1 << 0,
291 IWCH_MEM_ACCESS_LOCAL_WRITE = 1 << 1,
292 IWCH_MEM_ACCESS_REMOTE_READ = 1 << 2,
293 IWCH_MEM_ACCESS_REMOTE_WRITE = 1 << 3,
294 IWCH_MEM_ACCESS_ATOMICS = 1 << 4,
295 IWCH_MEM_ACCESS_BINDING = 1 << 5,
296 IWCH_MEM_ACCESS_LOCAL =
297 (IWCH_MEM_ACCESS_LOCAL_READ | IWCH_MEM_ACCESS_LOCAL_WRITE),
298 IWCH_MEM_ACCESS_REMOTE =
299 (IWCH_MEM_ACCESS_REMOTE_WRITE | IWCH_MEM_ACCESS_REMOTE_READ)
300 /* cannot go beyond 1 << 31 */
301} __attribute__ ((packed));
302
303static inline u32 iwch_convert_access(int acc)
304{ 290{
305 return (acc & IB_ACCESS_REMOTE_WRITE ? IWCH_MEM_ACCESS_REMOTE_WRITE : 0) 291 return (acc & IB_ACCESS_REMOTE_WRITE ? TPT_REMOTE_WRITE : 0) |
306 | (acc & IB_ACCESS_REMOTE_READ ? IWCH_MEM_ACCESS_REMOTE_READ : 0) | 292 (acc & IB_ACCESS_REMOTE_READ ? TPT_REMOTE_READ : 0) |
307 (acc & IB_ACCESS_LOCAL_WRITE ? IWCH_MEM_ACCESS_LOCAL_WRITE : 0) | 293 (acc & IB_ACCESS_LOCAL_WRITE ? TPT_LOCAL_WRITE : 0) |
308 (acc & IB_ACCESS_MW_BIND ? IWCH_MEM_ACCESS_BINDING : 0) | 294 TPT_LOCAL_READ;
309 IWCH_MEM_ACCESS_LOCAL_READ; 295}
296
297static inline u32 iwch_ib_to_mwbind_access(int acc)
298{
299 return (acc & IB_ACCESS_REMOTE_WRITE ? T3_MEM_ACCESS_REM_WRITE : 0) |
300 (acc & IB_ACCESS_REMOTE_READ ? T3_MEM_ACCESS_REM_READ : 0) |
301 (acc & IB_ACCESS_LOCAL_WRITE ? T3_MEM_ACCESS_LOCAL_WRITE : 0) |
302 T3_MEM_ACCESS_LOCAL_READ;
310} 303}
311 304
312enum iwch_mmid_state { 305enum iwch_mmid_state {
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index 9ea00cc4a5f8..0a472c9b44db 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -439,7 +439,7 @@ int iwch_bind_mw(struct ib_qp *qp,
439 wqe->bind.type = T3_VA_BASED_TO; 439 wqe->bind.type = T3_VA_BASED_TO;
440 440
441 /* TBD: check perms */ 441 /* TBD: check perms */
442 wqe->bind.perms = iwch_convert_access(mw_bind->mw_access_flags); 442 wqe->bind.perms = iwch_ib_to_mwbind_access(mw_bind->mw_access_flags);
443 wqe->bind.mr_stag = cpu_to_be32(mw_bind->mr->lkey); 443 wqe->bind.mr_stag = cpu_to_be32(mw_bind->mr->lkey);
444 wqe->bind.mw_stag = cpu_to_be32(mw->rkey); 444 wqe->bind.mw_stag = cpu_to_be32(mw->rkey);
445 wqe->bind.mw_len = cpu_to_be32(mw_bind->length); 445 wqe->bind.mw_len = cpu_to_be32(mw_bind->length);
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index 40404c9e2817..82ded44c6cee 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -52,6 +52,8 @@ struct ehca_mw;
52struct ehca_pd; 52struct ehca_pd;
53struct ehca_av; 53struct ehca_av;
54 54
55#include <linux/wait.h>
56
55#include <rdma/ib_verbs.h> 57#include <rdma/ib_verbs.h>
56#include <rdma/ib_user_verbs.h> 58#include <rdma/ib_user_verbs.h>
57 59
@@ -153,7 +155,9 @@ struct ehca_cq {
153 spinlock_t cb_lock; 155 spinlock_t cb_lock;
154 struct hlist_head qp_hashtab[QP_HASHTAB_LEN]; 156 struct hlist_head qp_hashtab[QP_HASHTAB_LEN];
155 struct list_head entry; 157 struct list_head entry;
156 u32 nr_callbacks; 158 u32 nr_callbacks; /* #events assigned to cpu by scaling code */
159 u32 nr_events; /* #events seen */
160 wait_queue_head_t wait_completion;
157 spinlock_t task_lock; 161 spinlock_t task_lock;
158 u32 ownpid; 162 u32 ownpid;
159 /* mmap counter for resources mapped into user space */ 163 /* mmap counter for resources mapped into user space */
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c
index 6ebfa27e4e16..e2cdc1a16fe9 100644
--- a/drivers/infiniband/hw/ehca/ehca_cq.c
+++ b/drivers/infiniband/hw/ehca/ehca_cq.c
@@ -146,6 +146,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe,
146 spin_lock_init(&my_cq->spinlock); 146 spin_lock_init(&my_cq->spinlock);
147 spin_lock_init(&my_cq->cb_lock); 147 spin_lock_init(&my_cq->cb_lock);
148 spin_lock_init(&my_cq->task_lock); 148 spin_lock_init(&my_cq->task_lock);
149 init_waitqueue_head(&my_cq->wait_completion);
149 my_cq->ownpid = current->tgid; 150 my_cq->ownpid = current->tgid;
150 151
151 cq = &my_cq->ib_cq; 152 cq = &my_cq->ib_cq;
@@ -302,6 +303,16 @@ create_cq_exit1:
302 return cq; 303 return cq;
303} 304}
304 305
306static int get_cq_nr_events(struct ehca_cq *my_cq)
307{
308 int ret;
309 unsigned long flags;
310 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
311 ret = my_cq->nr_events;
312 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
313 return ret;
314}
315
305int ehca_destroy_cq(struct ib_cq *cq) 316int ehca_destroy_cq(struct ib_cq *cq)
306{ 317{
307 u64 h_ret; 318 u64 h_ret;
@@ -329,10 +340,11 @@ int ehca_destroy_cq(struct ib_cq *cq)
329 } 340 }
330 341
331 spin_lock_irqsave(&ehca_cq_idr_lock, flags); 342 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
332 while (my_cq->nr_callbacks) { 343 while (my_cq->nr_events) {
333 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); 344 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
334 yield(); 345 wait_event(my_cq->wait_completion, !get_cq_nr_events(my_cq));
335 spin_lock_irqsave(&ehca_cq_idr_lock, flags); 346 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
347 /* recheck nr_events to assure no cqe has just arrived */
336 } 348 }
337 349
338 idr_remove(&ehca_cq_idr, my_cq->token); 350 idr_remove(&ehca_cq_idr, my_cq->token);
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index 3ec53c687d08..20f36bf8b2b6 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -404,10 +404,11 @@ static inline void process_eqe(struct ehca_shca *shca, struct ehca_eqe *eqe)
404 u32 token; 404 u32 token;
405 unsigned long flags; 405 unsigned long flags;
406 struct ehca_cq *cq; 406 struct ehca_cq *cq;
407
407 eqe_value = eqe->entry; 408 eqe_value = eqe->entry;
408 ehca_dbg(&shca->ib_device, "eqe_value=%lx", eqe_value); 409 ehca_dbg(&shca->ib_device, "eqe_value=%lx", eqe_value);
409 if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) { 410 if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
410 ehca_dbg(&shca->ib_device, "... completion event"); 411 ehca_dbg(&shca->ib_device, "Got completion event");
411 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value); 412 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
412 spin_lock_irqsave(&ehca_cq_idr_lock, flags); 413 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
413 cq = idr_find(&ehca_cq_idr, token); 414 cq = idr_find(&ehca_cq_idr, token);
@@ -419,16 +420,20 @@ static inline void process_eqe(struct ehca_shca *shca, struct ehca_eqe *eqe)
419 return; 420 return;
420 } 421 }
421 reset_eq_pending(cq); 422 reset_eq_pending(cq);
422 if (ehca_scaling_code) { 423 cq->nr_events++;
424 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
425 if (ehca_scaling_code)
423 queue_comp_task(cq); 426 queue_comp_task(cq);
424 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); 427 else {
425 } else {
426 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
427 comp_event_callback(cq); 428 comp_event_callback(cq);
429 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
430 cq->nr_events--;
431 if (!cq->nr_events)
432 wake_up(&cq->wait_completion);
433 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
428 } 434 }
429 } else { 435 } else {
430 ehca_dbg(&shca->ib_device, 436 ehca_dbg(&shca->ib_device, "Got non completion event");
431 "Got non completion event");
432 parse_identifier(shca, eqe_value); 437 parse_identifier(shca, eqe_value);
433 } 438 }
434} 439}
@@ -478,6 +483,7 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq)
478 "token=%x", token); 483 "token=%x", token);
479 continue; 484 continue;
480 } 485 }
486 eqe_cache[eqe_cnt].cq->nr_events++;
481 spin_unlock(&ehca_cq_idr_lock); 487 spin_unlock(&ehca_cq_idr_lock);
482 } else 488 } else
483 eqe_cache[eqe_cnt].cq = NULL; 489 eqe_cache[eqe_cnt].cq = NULL;
@@ -504,12 +510,18 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq)
504 /* call completion handler for cached eqes */ 510 /* call completion handler for cached eqes */
505 for (i = 0; i < eqe_cnt; i++) 511 for (i = 0; i < eqe_cnt; i++)
506 if (eq->eqe_cache[i].cq) { 512 if (eq->eqe_cache[i].cq) {
507 if (ehca_scaling_code) { 513 if (ehca_scaling_code)
508 spin_lock(&ehca_cq_idr_lock);
509 queue_comp_task(eq->eqe_cache[i].cq); 514 queue_comp_task(eq->eqe_cache[i].cq);
510 spin_unlock(&ehca_cq_idr_lock); 515 else {
511 } else 516 struct ehca_cq *cq = eq->eqe_cache[i].cq;
512 comp_event_callback(eq->eqe_cache[i].cq); 517 comp_event_callback(cq);
518 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
519 cq->nr_events--;
520 if (!cq->nr_events)
521 wake_up(&cq->wait_completion);
522 spin_unlock_irqrestore(&ehca_cq_idr_lock,
523 flags);
524 }
513 } else { 525 } else {
514 ehca_dbg(&shca->ib_device, "Got non completion event"); 526 ehca_dbg(&shca->ib_device, "Got non completion event");
515 parse_identifier(shca, eq->eqe_cache[i].eqe->entry); 527 parse_identifier(shca, eq->eqe_cache[i].eqe->entry);
@@ -523,7 +535,6 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq)
523 if (!eqe) 535 if (!eqe)
524 break; 536 break;
525 process_eqe(shca, eqe); 537 process_eqe(shca, eqe);
526 eqe_cnt++;
527 } while (1); 538 } while (1);
528 539
529unlock_irq_spinlock: 540unlock_irq_spinlock:
@@ -567,8 +578,7 @@ static void __queue_comp_task(struct ehca_cq *__cq,
567 list_add_tail(&__cq->entry, &cct->cq_list); 578 list_add_tail(&__cq->entry, &cct->cq_list);
568 cct->cq_jobs++; 579 cct->cq_jobs++;
569 wake_up(&cct->wait_queue); 580 wake_up(&cct->wait_queue);
570 } 581 } else
571 else
572 __cq->nr_callbacks++; 582 __cq->nr_callbacks++;
573 583
574 spin_unlock(&__cq->task_lock); 584 spin_unlock(&__cq->task_lock);
@@ -577,18 +587,21 @@ static void __queue_comp_task(struct ehca_cq *__cq,
577 587
578static void queue_comp_task(struct ehca_cq *__cq) 588static void queue_comp_task(struct ehca_cq *__cq)
579{ 589{
580 int cpu;
581 int cpu_id; 590 int cpu_id;
582 struct ehca_cpu_comp_task *cct; 591 struct ehca_cpu_comp_task *cct;
592 int cq_jobs;
593 unsigned long flags;
583 594
584 cpu = get_cpu();
585 cpu_id = find_next_online_cpu(pool); 595 cpu_id = find_next_online_cpu(pool);
586 BUG_ON(!cpu_online(cpu_id)); 596 BUG_ON(!cpu_online(cpu_id));
587 597
588 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id); 598 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id);
589 BUG_ON(!cct); 599 BUG_ON(!cct);
590 600
591 if (cct->cq_jobs > 0) { 601 spin_lock_irqsave(&cct->task_lock, flags);
602 cq_jobs = cct->cq_jobs;
603 spin_unlock_irqrestore(&cct->task_lock, flags);
604 if (cq_jobs > 0) {
592 cpu_id = find_next_online_cpu(pool); 605 cpu_id = find_next_online_cpu(pool);
593 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id); 606 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id);
594 BUG_ON(!cct); 607 BUG_ON(!cct);
@@ -608,11 +621,17 @@ static void run_comp_task(struct ehca_cpu_comp_task* cct)
608 cq = list_entry(cct->cq_list.next, struct ehca_cq, entry); 621 cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
609 spin_unlock_irqrestore(&cct->task_lock, flags); 622 spin_unlock_irqrestore(&cct->task_lock, flags);
610 comp_event_callback(cq); 623 comp_event_callback(cq);
611 spin_lock_irqsave(&cct->task_lock, flags);
612 624
625 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
626 cq->nr_events--;
627 if (!cq->nr_events)
628 wake_up(&cq->wait_completion);
629 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
630
631 spin_lock_irqsave(&cct->task_lock, flags);
613 spin_lock(&cq->task_lock); 632 spin_lock(&cq->task_lock);
614 cq->nr_callbacks--; 633 cq->nr_callbacks--;
615 if (cq->nr_callbacks == 0) { 634 if (!cq->nr_callbacks) {
616 list_del_init(cct->cq_list.next); 635 list_del_init(cct->cq_list.next);
617 cct->cq_jobs--; 636 cct->cq_jobs--;
618 } 637 }
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index c1835121a822..059da9628bb5 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -52,7 +52,7 @@
52MODULE_LICENSE("Dual BSD/GPL"); 52MODULE_LICENSE("Dual BSD/GPL");
53MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); 53MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
54MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver"); 54MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver");
55MODULE_VERSION("SVNEHCA_0021"); 55MODULE_VERSION("SVNEHCA_0022");
56 56
57int ehca_open_aqp1 = 0; 57int ehca_open_aqp1 = 0;
58int ehca_debug_level = 0; 58int ehca_debug_level = 0;
@@ -810,7 +810,7 @@ int __init ehca_module_init(void)
810 int ret; 810 int ret;
811 811
812 printk(KERN_INFO "eHCA Infiniband Device Driver " 812 printk(KERN_INFO "eHCA Infiniband Device Driver "
813 "(Rel.: SVNEHCA_0021)\n"); 813 "(Rel.: SVNEHCA_0022)\n");
814 idr_init(&ehca_qp_idr); 814 idr_init(&ehca_qp_idr);
815 idr_init(&ehca_cq_idr); 815 idr_init(&ehca_cq_idr);
816 spin_lock_init(&ehca_qp_idr_lock); 816 spin_lock_init(&ehca_qp_idr_lock);
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 71dc84bd4254..1c6b63aca268 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -1088,21 +1088,21 @@ static void mthca_unmap_memfree(struct mthca_dev *dev,
1088static int mthca_alloc_memfree(struct mthca_dev *dev, 1088static int mthca_alloc_memfree(struct mthca_dev *dev,
1089 struct mthca_qp *qp) 1089 struct mthca_qp *qp)
1090{ 1090{
1091 int ret = 0;
1092
1093 if (mthca_is_memfree(dev)) { 1091 if (mthca_is_memfree(dev)) {
1094 qp->rq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_RQ, 1092 qp->rq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_RQ,
1095 qp->qpn, &qp->rq.db); 1093 qp->qpn, &qp->rq.db);
1096 if (qp->rq.db_index < 0) 1094 if (qp->rq.db_index < 0)
1097 return ret; 1095 return -ENOMEM;
1098 1096
1099 qp->sq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SQ, 1097 qp->sq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SQ,
1100 qp->qpn, &qp->sq.db); 1098 qp->qpn, &qp->sq.db);
1101 if (qp->sq.db_index < 0) 1099 if (qp->sq.db_index < 0) {
1102 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index); 1100 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
1101 return -ENOMEM;
1102 }
1103 } 1103 }
1104 1104
1105 return ret; 1105 return 0;
1106} 1106}
1107 1107
1108static void mthca_free_memfree(struct mthca_dev *dev, 1108static void mthca_free_memfree(struct mthca_dev *dev,
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index bb2e3d5eee20..56c87a81bb67 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -407,6 +407,10 @@ static int ipoib_mcast_join_complete(int status,
407 queue_delayed_work(ipoib_workqueue, 407 queue_delayed_work(ipoib_workqueue,
408 &priv->mcast_task, 0); 408 &priv->mcast_task, 0);
409 mutex_unlock(&mcast_mutex); 409 mutex_unlock(&mcast_mutex);
410
411 if (mcast == priv->broadcast)
412 netif_carrier_on(dev);
413
410 return 0; 414 return 0;
411 } 415 }
412 416
@@ -594,7 +598,6 @@ void ipoib_mcast_join_task(struct work_struct *work)
594 ipoib_dbg_mcast(priv, "successfully joined all multicast groups\n"); 598 ipoib_dbg_mcast(priv, "successfully joined all multicast groups\n");
595 599
596 clear_bit(IPOIB_MCAST_RUN, &priv->flags); 600 clear_bit(IPOIB_MCAST_RUN, &priv->flags);
597 netif_carrier_on(dev);
598} 601}
599 602
600int ipoib_mcast_start_thread(struct net_device *dev) 603int ipoib_mcast_start_thread(struct net_device *dev)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index 3cb551b88756..7f3ec205e35f 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -259,12 +259,13 @@ void ipoib_event(struct ib_event_handler *handler,
259 struct ipoib_dev_priv *priv = 259 struct ipoib_dev_priv *priv =
260 container_of(handler, struct ipoib_dev_priv, event_handler); 260 container_of(handler, struct ipoib_dev_priv, event_handler);
261 261
262 if (record->event == IB_EVENT_PORT_ERR || 262 if ((record->event == IB_EVENT_PORT_ERR ||
263 record->event == IB_EVENT_PKEY_CHANGE || 263 record->event == IB_EVENT_PKEY_CHANGE ||
264 record->event == IB_EVENT_PORT_ACTIVE || 264 record->event == IB_EVENT_PORT_ACTIVE ||
265 record->event == IB_EVENT_LID_CHANGE || 265 record->event == IB_EVENT_LID_CHANGE ||
266 record->event == IB_EVENT_SM_CHANGE || 266 record->event == IB_EVENT_SM_CHANGE ||
267 record->event == IB_EVENT_CLIENT_REREGISTER) { 267 record->event == IB_EVENT_CLIENT_REREGISTER) &&
268 record->element.port_num == priv->port) {
268 ipoib_dbg(priv, "Port state change event\n"); 269 ipoib_dbg(priv, "Port state change event\n");
269 queue_work(ipoib_workqueue, &priv->flush_task); 270 queue_work(ipoib_workqueue, &priv->flush_task);
270 } 271 }