aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/ulp/ipoib
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/ulp/ipoib')
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h49
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c206
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c118
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c7
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c40
6 files changed, 325 insertions, 97 deletions
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 87310eeb6df0..285c143115cc 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -132,12 +132,46 @@ struct ipoib_cm_data {
132 __be32 mtu; 132 __be32 mtu;
133}; 133};
134 134
135/*
136 * Quoting 10.3.1 Queue Pair and EE Context States:
137 *
138 * Note, for QPs that are associated with an SRQ, the Consumer should take the
139 * QP through the Error State before invoking a Destroy QP or a Modify QP to the
140 * Reset State. The Consumer may invoke the Destroy QP without first performing
141 * a Modify QP to the Error State and waiting for the Affiliated Asynchronous
142 * Last WQE Reached Event. However, if the Consumer does not wait for the
143 * Affiliated Asynchronous Last WQE Reached Event, then WQE and Data Segment
144 * leakage may occur. Therefore, it is good programming practice to tear down a
145 * QP that is associated with an SRQ by using the following process:
146 *
147 * - Put the QP in the Error State
148 * - Wait for the Affiliated Asynchronous Last WQE Reached Event;
149 * - either:
150 * drain the CQ by invoking the Poll CQ verb and either wait for CQ
151 * to be empty or the number of Poll CQ operations has exceeded
152 * CQ capacity size;
153 * - or
154 * post another WR that completes on the same CQ and wait for this
155 * WR to return as a WC;
156 * - and then invoke a Destroy QP or Reset QP.
157 *
158 * We use the second option and wait for a completion on the
159 * same CQ before destroying QPs attached to our SRQ.
160 */
161
162enum ipoib_cm_state {
163 IPOIB_CM_RX_LIVE,
164 IPOIB_CM_RX_ERROR, /* Ignored by stale task */
165 IPOIB_CM_RX_FLUSH /* Last WQE Reached event observed */
166};
167
135struct ipoib_cm_rx { 168struct ipoib_cm_rx {
136 struct ib_cm_id *id; 169 struct ib_cm_id *id;
137 struct ib_qp *qp; 170 struct ib_qp *qp;
138 struct list_head list; 171 struct list_head list;
139 struct net_device *dev; 172 struct net_device *dev;
140 unsigned long jiffies; 173 unsigned long jiffies;
174 enum ipoib_cm_state state;
141}; 175};
142 176
143struct ipoib_cm_tx { 177struct ipoib_cm_tx {
@@ -165,10 +199,15 @@ struct ipoib_cm_dev_priv {
165 struct ib_srq *srq; 199 struct ib_srq *srq;
166 struct ipoib_cm_rx_buf *srq_ring; 200 struct ipoib_cm_rx_buf *srq_ring;
167 struct ib_cm_id *id; 201 struct ib_cm_id *id;
168 struct list_head passive_ids; 202 struct list_head passive_ids; /* state: LIVE */
203 struct list_head rx_error_list; /* state: ERROR */
204 struct list_head rx_flush_list; /* state: FLUSH, drain not started */
205 struct list_head rx_drain_list; /* state: FLUSH, drain started */
206 struct list_head rx_reap_list; /* state: FLUSH, drain done */
169 struct work_struct start_task; 207 struct work_struct start_task;
170 struct work_struct reap_task; 208 struct work_struct reap_task;
171 struct work_struct skb_task; 209 struct work_struct skb_task;
210 struct work_struct rx_reap_task;
172 struct delayed_work stale_task; 211 struct delayed_work stale_task;
173 struct sk_buff_head skb_queue; 212 struct sk_buff_head skb_queue;
174 struct list_head start_list; 213 struct list_head start_list;
@@ -201,15 +240,17 @@ struct ipoib_dev_priv {
201 struct list_head multicast_list; 240 struct list_head multicast_list;
202 struct rb_root multicast_tree; 241 struct rb_root multicast_tree;
203 242
204 struct delayed_work pkey_task; 243 struct delayed_work pkey_poll_task;
205 struct delayed_work mcast_task; 244 struct delayed_work mcast_task;
206 struct work_struct flush_task; 245 struct work_struct flush_task;
207 struct work_struct restart_task; 246 struct work_struct restart_task;
208 struct delayed_work ah_reap_task; 247 struct delayed_work ah_reap_task;
248 struct work_struct pkey_event_task;
209 249
210 struct ib_device *ca; 250 struct ib_device *ca;
211 u8 port; 251 u8 port;
212 u16 pkey; 252 u16 pkey;
253 u16 pkey_index;
213 struct ib_pd *pd; 254 struct ib_pd *pd;
214 struct ib_mr *mr; 255 struct ib_mr *mr;
215 struct ib_cq *cq; 256 struct ib_cq *cq;
@@ -333,12 +374,13 @@ struct ipoib_dev_priv *ipoib_intf_alloc(const char *format);
333 374
334int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port); 375int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
335void ipoib_ib_dev_flush(struct work_struct *work); 376void ipoib_ib_dev_flush(struct work_struct *work);
377void ipoib_pkey_event(struct work_struct *work);
336void ipoib_ib_dev_cleanup(struct net_device *dev); 378void ipoib_ib_dev_cleanup(struct net_device *dev);
337 379
338int ipoib_ib_dev_open(struct net_device *dev); 380int ipoib_ib_dev_open(struct net_device *dev);
339int ipoib_ib_dev_up(struct net_device *dev); 381int ipoib_ib_dev_up(struct net_device *dev);
340int ipoib_ib_dev_down(struct net_device *dev, int flush); 382int ipoib_ib_dev_down(struct net_device *dev, int flush);
341int ipoib_ib_dev_stop(struct net_device *dev); 383int ipoib_ib_dev_stop(struct net_device *dev, int flush);
342 384
343int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port); 385int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
344void ipoib_dev_cleanup(struct net_device *dev); 386void ipoib_dev_cleanup(struct net_device *dev);
@@ -386,6 +428,7 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey);
386 428
387void ipoib_pkey_poll(struct work_struct *work); 429void ipoib_pkey_poll(struct work_struct *work);
388int ipoib_pkey_dev_delay_open(struct net_device *dev); 430int ipoib_pkey_dev_delay_open(struct net_device *dev);
431void ipoib_drain_cq(struct net_device *dev);
389 432
390#ifdef CONFIG_INFINIBAND_IPOIB_CM 433#ifdef CONFIG_INFINIBAND_IPOIB_CM
391 434
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index eec833b81e9b..076a0bbb63d7 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -37,6 +37,7 @@
37#include <net/dst.h> 37#include <net/dst.h>
38#include <net/icmp.h> 38#include <net/icmp.h>
39#include <linux/icmpv6.h> 39#include <linux/icmpv6.h>
40#include <linux/delay.h>
40 41
41#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA 42#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
42static int data_debug_level; 43static int data_debug_level;
@@ -62,6 +63,17 @@ struct ipoib_cm_id {
62 u32 remote_mtu; 63 u32 remote_mtu;
63}; 64};
64 65
66static struct ib_qp_attr ipoib_cm_err_attr = {
67 .qp_state = IB_QPS_ERR
68};
69
70#define IPOIB_CM_RX_DRAIN_WRID 0x7fffffff
71
72static struct ib_send_wr ipoib_cm_rx_drain_wr = {
73 .wr_id = IPOIB_CM_RX_DRAIN_WRID,
74 .opcode = IB_WR_SEND,
75};
76
65static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id, 77static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
66 struct ib_cm_event *event); 78 struct ib_cm_event *event);
67 79
@@ -150,15 +162,54 @@ partial_error:
150 return NULL; 162 return NULL;
151} 163}
152 164
165static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv* priv)
166{
167 struct ib_send_wr *bad_wr;
168 struct ipoib_cm_rx *p;
169
170 /* We only reserved 1 extra slot in CQ for drain WRs, so
171 * make sure we have at most 1 outstanding WR. */
172 if (list_empty(&priv->cm.rx_flush_list) ||
173 !list_empty(&priv->cm.rx_drain_list))
174 return;
175
176 /*
177 * QPs on flush list are error state. This way, a "flush
178 * error" WC will be immediately generated for each WR we post.
179 */
180 p = list_entry(priv->cm.rx_flush_list.next, typeof(*p), list);
181 if (ib_post_send(p->qp, &ipoib_cm_rx_drain_wr, &bad_wr))
182 ipoib_warn(priv, "failed to post drain wr\n");
183
184 list_splice_init(&priv->cm.rx_flush_list, &priv->cm.rx_drain_list);
185}
186
187static void ipoib_cm_rx_event_handler(struct ib_event *event, void *ctx)
188{
189 struct ipoib_cm_rx *p = ctx;
190 struct ipoib_dev_priv *priv = netdev_priv(p->dev);
191 unsigned long flags;
192
193 if (event->event != IB_EVENT_QP_LAST_WQE_REACHED)
194 return;
195
196 spin_lock_irqsave(&priv->lock, flags);
197 list_move(&p->list, &priv->cm.rx_flush_list);
198 p->state = IPOIB_CM_RX_FLUSH;
199 ipoib_cm_start_rx_drain(priv);
200 spin_unlock_irqrestore(&priv->lock, flags);
201}
202
153static struct ib_qp *ipoib_cm_create_rx_qp(struct net_device *dev, 203static struct ib_qp *ipoib_cm_create_rx_qp(struct net_device *dev,
154 struct ipoib_cm_rx *p) 204 struct ipoib_cm_rx *p)
155{ 205{
156 struct ipoib_dev_priv *priv = netdev_priv(dev); 206 struct ipoib_dev_priv *priv = netdev_priv(dev);
157 struct ib_qp_init_attr attr = { 207 struct ib_qp_init_attr attr = {
158 .send_cq = priv->cq, /* does not matter, we never send anything */ 208 .event_handler = ipoib_cm_rx_event_handler,
209 .send_cq = priv->cq, /* For drain WR */
159 .recv_cq = priv->cq, 210 .recv_cq = priv->cq,
160 .srq = priv->cm.srq, 211 .srq = priv->cm.srq,
161 .cap.max_send_wr = 1, /* FIXME: 0 Seems not to work */ 212 .cap.max_send_wr = 1, /* For drain WR */
162 .cap.max_send_sge = 1, /* FIXME: 0 Seems not to work */ 213 .cap.max_send_sge = 1, /* FIXME: 0 Seems not to work */
163 .sq_sig_type = IB_SIGNAL_ALL_WR, 214 .sq_sig_type = IB_SIGNAL_ALL_WR,
164 .qp_type = IB_QPT_RC, 215 .qp_type = IB_QPT_RC,
@@ -198,6 +249,27 @@ static int ipoib_cm_modify_rx_qp(struct net_device *dev,
198 ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret); 249 ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret);
199 return ret; 250 return ret;
200 } 251 }
252
253 /*
254 * Current Mellanox HCA firmware won't generate completions
255 * with error for drain WRs unless the QP has been moved to
256 * RTS first. This work-around leaves a window where a QP has
257 * moved to error asynchronously, but this will eventually get
258 * fixed in firmware, so let's not error out if modify QP
259 * fails.
260 */
261 qp_attr.qp_state = IB_QPS_RTS;
262 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
263 if (ret) {
264 ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret);
265 return 0;
266 }
267 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
268 if (ret) {
269 ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret);
270 return 0;
271 }
272
201 return 0; 273 return 0;
202} 274}
203 275
@@ -256,6 +328,7 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
256 328
257 cm_id->context = p; 329 cm_id->context = p;
258 p->jiffies = jiffies; 330 p->jiffies = jiffies;
331 p->state = IPOIB_CM_RX_LIVE;
259 spin_lock_irq(&priv->lock); 332 spin_lock_irq(&priv->lock);
260 if (list_empty(&priv->cm.passive_ids)) 333 if (list_empty(&priv->cm.passive_ids))
261 queue_delayed_work(ipoib_workqueue, 334 queue_delayed_work(ipoib_workqueue,
@@ -277,7 +350,6 @@ static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id,
277{ 350{
278 struct ipoib_cm_rx *p; 351 struct ipoib_cm_rx *p;
279 struct ipoib_dev_priv *priv; 352 struct ipoib_dev_priv *priv;
280 int ret;
281 353
282 switch (event->event) { 354 switch (event->event) {
283 case IB_CM_REQ_RECEIVED: 355 case IB_CM_REQ_RECEIVED:
@@ -289,20 +361,9 @@ static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id,
289 case IB_CM_REJ_RECEIVED: 361 case IB_CM_REJ_RECEIVED:
290 p = cm_id->context; 362 p = cm_id->context;
291 priv = netdev_priv(p->dev); 363 priv = netdev_priv(p->dev);
292 spin_lock_irq(&priv->lock); 364 if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
293 if (list_empty(&p->list)) 365 ipoib_warn(priv, "unable to move qp to error state\n");
294 ret = 0; /* Connection is going away already. */ 366 /* Fall through */
295 else {
296 list_del_init(&p->list);
297 ret = -ECONNRESET;
298 }
299 spin_unlock_irq(&priv->lock);
300 if (ret) {
301 ib_destroy_qp(p->qp);
302 kfree(p);
303 return ret;
304 }
305 return 0;
306 default: 367 default:
307 return 0; 368 return 0;
308 } 369 }
@@ -354,8 +415,15 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
354 wr_id, wc->status); 415 wr_id, wc->status);
355 416
356 if (unlikely(wr_id >= ipoib_recvq_size)) { 417 if (unlikely(wr_id >= ipoib_recvq_size)) {
357 ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n", 418 if (wr_id == (IPOIB_CM_RX_DRAIN_WRID & ~IPOIB_CM_OP_SRQ)) {
358 wr_id, ipoib_recvq_size); 419 spin_lock_irqsave(&priv->lock, flags);
420 list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list);
421 ipoib_cm_start_rx_drain(priv);
422 queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
423 spin_unlock_irqrestore(&priv->lock, flags);
424 } else
425 ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n",
426 wr_id, ipoib_recvq_size);
359 return; 427 return;
360 } 428 }
361 429
@@ -374,9 +442,9 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
374 if (p && time_after_eq(jiffies, p->jiffies + IPOIB_CM_RX_UPDATE_TIME)) { 442 if (p && time_after_eq(jiffies, p->jiffies + IPOIB_CM_RX_UPDATE_TIME)) {
375 spin_lock_irqsave(&priv->lock, flags); 443 spin_lock_irqsave(&priv->lock, flags);
376 p->jiffies = jiffies; 444 p->jiffies = jiffies;
377 /* Move this entry to list head, but do 445 /* Move this entry to list head, but do not re-add it
378 * not re-add it if it has been removed. */ 446 * if it has been moved out of list. */
379 if (!list_empty(&p->list)) 447 if (p->state == IPOIB_CM_RX_LIVE)
380 list_move(&p->list, &priv->cm.passive_ids); 448 list_move(&p->list, &priv->cm.passive_ids);
381 spin_unlock_irqrestore(&priv->lock, flags); 449 spin_unlock_irqrestore(&priv->lock, flags);
382 } 450 }
@@ -592,8 +660,7 @@ int ipoib_cm_dev_open(struct net_device *dev)
592 if (IS_ERR(priv->cm.id)) { 660 if (IS_ERR(priv->cm.id)) {
593 printk(KERN_WARNING "%s: failed to create CM ID\n", priv->ca->name); 661 printk(KERN_WARNING "%s: failed to create CM ID\n", priv->ca->name);
594 ret = PTR_ERR(priv->cm.id); 662 ret = PTR_ERR(priv->cm.id);
595 priv->cm.id = NULL; 663 goto err_cm;
596 return ret;
597 } 664 }
598 665
599 ret = ib_cm_listen(priv->cm.id, cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num), 666 ret = ib_cm_listen(priv->cm.id, cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num),
@@ -601,34 +668,76 @@ int ipoib_cm_dev_open(struct net_device *dev)
601 if (ret) { 668 if (ret) {
602 printk(KERN_WARNING "%s: failed to listen on ID 0x%llx\n", priv->ca->name, 669 printk(KERN_WARNING "%s: failed to listen on ID 0x%llx\n", priv->ca->name,
603 IPOIB_CM_IETF_ID | priv->qp->qp_num); 670 IPOIB_CM_IETF_ID | priv->qp->qp_num);
604 ib_destroy_cm_id(priv->cm.id); 671 goto err_listen;
605 priv->cm.id = NULL;
606 return ret;
607 } 672 }
673
608 return 0; 674 return 0;
675
676err_listen:
677 ib_destroy_cm_id(priv->cm.id);
678err_cm:
679 priv->cm.id = NULL;
680 return ret;
609} 681}
610 682
611void ipoib_cm_dev_stop(struct net_device *dev) 683void ipoib_cm_dev_stop(struct net_device *dev)
612{ 684{
613 struct ipoib_dev_priv *priv = netdev_priv(dev); 685 struct ipoib_dev_priv *priv = netdev_priv(dev);
614 struct ipoib_cm_rx *p; 686 struct ipoib_cm_rx *p, *n;
687 unsigned long begin;
688 LIST_HEAD(list);
689 int ret;
615 690
616 if (!IPOIB_CM_SUPPORTED(dev->dev_addr) || !priv->cm.id) 691 if (!IPOIB_CM_SUPPORTED(dev->dev_addr) || !priv->cm.id)
617 return; 692 return;
618 693
619 ib_destroy_cm_id(priv->cm.id); 694 ib_destroy_cm_id(priv->cm.id);
620 priv->cm.id = NULL; 695 priv->cm.id = NULL;
696
621 spin_lock_irq(&priv->lock); 697 spin_lock_irq(&priv->lock);
622 while (!list_empty(&priv->cm.passive_ids)) { 698 while (!list_empty(&priv->cm.passive_ids)) {
623 p = list_entry(priv->cm.passive_ids.next, typeof(*p), list); 699 p = list_entry(priv->cm.passive_ids.next, typeof(*p), list);
624 list_del_init(&p->list); 700 list_move(&p->list, &priv->cm.rx_error_list);
701 p->state = IPOIB_CM_RX_ERROR;
702 spin_unlock_irq(&priv->lock);
703 ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE);
704 if (ret)
705 ipoib_warn(priv, "unable to move qp to error state: %d\n", ret);
706 spin_lock_irq(&priv->lock);
707 }
708
709 /* Wait for all RX to be drained */
710 begin = jiffies;
711
712 while (!list_empty(&priv->cm.rx_error_list) ||
713 !list_empty(&priv->cm.rx_flush_list) ||
714 !list_empty(&priv->cm.rx_drain_list)) {
715 if (time_after(jiffies, begin + 5 * HZ)) {
716 ipoib_warn(priv, "RX drain timing out\n");
717
718 /*
719 * assume the HW is wedged and just free up everything.
720 */
721 list_splice_init(&priv->cm.rx_flush_list, &list);
722 list_splice_init(&priv->cm.rx_error_list, &list);
723 list_splice_init(&priv->cm.rx_drain_list, &list);
724 break;
725 }
625 spin_unlock_irq(&priv->lock); 726 spin_unlock_irq(&priv->lock);
727 msleep(1);
728 ipoib_drain_cq(dev);
729 spin_lock_irq(&priv->lock);
730 }
731
732 list_splice_init(&priv->cm.rx_reap_list, &list);
733
734 spin_unlock_irq(&priv->lock);
735
736 list_for_each_entry_safe(p, n, &list, list) {
626 ib_destroy_cm_id(p->id); 737 ib_destroy_cm_id(p->id);
627 ib_destroy_qp(p->qp); 738 ib_destroy_qp(p->qp);
628 kfree(p); 739 kfree(p);
629 spin_lock_irq(&priv->lock);
630 } 740 }
631 spin_unlock_irq(&priv->lock);
632 741
633 cancel_delayed_work(&priv->cm.stale_task); 742 cancel_delayed_work(&priv->cm.stale_task);
634} 743}
@@ -1079,24 +1188,44 @@ void ipoib_cm_skb_too_long(struct net_device* dev, struct sk_buff *skb,
1079 queue_work(ipoib_workqueue, &priv->cm.skb_task); 1188 queue_work(ipoib_workqueue, &priv->cm.skb_task);
1080} 1189}
1081 1190
1191static void ipoib_cm_rx_reap(struct work_struct *work)
1192{
1193 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1194 cm.rx_reap_task);
1195 struct ipoib_cm_rx *p, *n;
1196 LIST_HEAD(list);
1197
1198 spin_lock_irq(&priv->lock);
1199 list_splice_init(&priv->cm.rx_reap_list, &list);
1200 spin_unlock_irq(&priv->lock);
1201
1202 list_for_each_entry_safe(p, n, &list, list) {
1203 ib_destroy_cm_id(p->id);
1204 ib_destroy_qp(p->qp);
1205 kfree(p);
1206 }
1207}
1208
1082static void ipoib_cm_stale_task(struct work_struct *work) 1209static void ipoib_cm_stale_task(struct work_struct *work)
1083{ 1210{
1084 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, 1211 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1085 cm.stale_task.work); 1212 cm.stale_task.work);
1086 struct ipoib_cm_rx *p; 1213 struct ipoib_cm_rx *p;
1214 int ret;
1087 1215
1088 spin_lock_irq(&priv->lock); 1216 spin_lock_irq(&priv->lock);
1089 while (!list_empty(&priv->cm.passive_ids)) { 1217 while (!list_empty(&priv->cm.passive_ids)) {
1090 /* List if sorted by LRU, start from tail, 1218 /* List is sorted by LRU, start from tail,
1091 * stop when we see a recently used entry */ 1219 * stop when we see a recently used entry */
1092 p = list_entry(priv->cm.passive_ids.prev, typeof(*p), list); 1220 p = list_entry(priv->cm.passive_ids.prev, typeof(*p), list);
1093 if (time_before_eq(jiffies, p->jiffies + IPOIB_CM_RX_TIMEOUT)) 1221 if (time_before_eq(jiffies, p->jiffies + IPOIB_CM_RX_TIMEOUT))
1094 break; 1222 break;
1095 list_del_init(&p->list); 1223 list_move(&p->list, &priv->cm.rx_error_list);
1224 p->state = IPOIB_CM_RX_ERROR;
1096 spin_unlock_irq(&priv->lock); 1225 spin_unlock_irq(&priv->lock);
1097 ib_destroy_cm_id(p->id); 1226 ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE);
1098 ib_destroy_qp(p->qp); 1227 if (ret)
1099 kfree(p); 1228 ipoib_warn(priv, "unable to move qp to error state: %d\n", ret);
1100 spin_lock_irq(&priv->lock); 1229 spin_lock_irq(&priv->lock);
1101 } 1230 }
1102 1231
@@ -1164,9 +1293,14 @@ int ipoib_cm_dev_init(struct net_device *dev)
1164 INIT_LIST_HEAD(&priv->cm.passive_ids); 1293 INIT_LIST_HEAD(&priv->cm.passive_ids);
1165 INIT_LIST_HEAD(&priv->cm.reap_list); 1294 INIT_LIST_HEAD(&priv->cm.reap_list);
1166 INIT_LIST_HEAD(&priv->cm.start_list); 1295 INIT_LIST_HEAD(&priv->cm.start_list);
1296 INIT_LIST_HEAD(&priv->cm.rx_error_list);
1297 INIT_LIST_HEAD(&priv->cm.rx_flush_list);
1298 INIT_LIST_HEAD(&priv->cm.rx_drain_list);
1299 INIT_LIST_HEAD(&priv->cm.rx_reap_list);
1167 INIT_WORK(&priv->cm.start_task, ipoib_cm_tx_start); 1300 INIT_WORK(&priv->cm.start_task, ipoib_cm_tx_start);
1168 INIT_WORK(&priv->cm.reap_task, ipoib_cm_tx_reap); 1301 INIT_WORK(&priv->cm.reap_task, ipoib_cm_tx_reap);
1169 INIT_WORK(&priv->cm.skb_task, ipoib_cm_skb_reap); 1302 INIT_WORK(&priv->cm.skb_task, ipoib_cm_skb_reap);
1303 INIT_WORK(&priv->cm.rx_reap_task, ipoib_cm_rx_reap);
1170 INIT_DELAYED_WORK(&priv->cm.stale_task, ipoib_cm_stale_task); 1304 INIT_DELAYED_WORK(&priv->cm.stale_task, ipoib_cm_stale_task);
1171 1305
1172 skb_queue_head_init(&priv->cm.skb_queue); 1306 skb_queue_head_init(&priv->cm.skb_queue);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 68d72c6f7ffb..8404f05b2b6e 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -448,6 +448,13 @@ int ipoib_ib_dev_open(struct net_device *dev)
448 struct ipoib_dev_priv *priv = netdev_priv(dev); 448 struct ipoib_dev_priv *priv = netdev_priv(dev);
449 int ret; 449 int ret;
450 450
451 if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &priv->pkey_index)) {
452 ipoib_warn(priv, "P_Key 0x%04x not found\n", priv->pkey);
453 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
454 return -1;
455 }
456 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
457
451 ret = ipoib_init_qp(dev); 458 ret = ipoib_init_qp(dev);
452 if (ret) { 459 if (ret) {
453 ipoib_warn(priv, "ipoib_init_qp returned %d\n", ret); 460 ipoib_warn(priv, "ipoib_init_qp returned %d\n", ret);
@@ -457,14 +464,14 @@ int ipoib_ib_dev_open(struct net_device *dev)
457 ret = ipoib_ib_post_receives(dev); 464 ret = ipoib_ib_post_receives(dev);
458 if (ret) { 465 if (ret) {
459 ipoib_warn(priv, "ipoib_ib_post_receives returned %d\n", ret); 466 ipoib_warn(priv, "ipoib_ib_post_receives returned %d\n", ret);
460 ipoib_ib_dev_stop(dev); 467 ipoib_ib_dev_stop(dev, 1);
461 return -1; 468 return -1;
462 } 469 }
463 470
464 ret = ipoib_cm_dev_open(dev); 471 ret = ipoib_cm_dev_open(dev);
465 if (ret) { 472 if (ret) {
466 ipoib_warn(priv, "ipoib_ib_post_receives returned %d\n", ret); 473 ipoib_warn(priv, "ipoib_cm_dev_open returned %d\n", ret);
467 ipoib_ib_dev_stop(dev); 474 ipoib_ib_dev_stop(dev, 1);
468 return -1; 475 return -1;
469 } 476 }
470 477
@@ -516,7 +523,7 @@ int ipoib_ib_dev_down(struct net_device *dev, int flush)
516 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) { 523 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
517 mutex_lock(&pkey_mutex); 524 mutex_lock(&pkey_mutex);
518 set_bit(IPOIB_PKEY_STOP, &priv->flags); 525 set_bit(IPOIB_PKEY_STOP, &priv->flags);
519 cancel_delayed_work(&priv->pkey_task); 526 cancel_delayed_work(&priv->pkey_poll_task);
520 mutex_unlock(&pkey_mutex); 527 mutex_unlock(&pkey_mutex);
521 if (flush) 528 if (flush)
522 flush_workqueue(ipoib_workqueue); 529 flush_workqueue(ipoib_workqueue);
@@ -543,13 +550,30 @@ static int recvs_pending(struct net_device *dev)
543 return pending; 550 return pending;
544} 551}
545 552
546int ipoib_ib_dev_stop(struct net_device *dev) 553void ipoib_drain_cq(struct net_device *dev)
554{
555 struct ipoib_dev_priv *priv = netdev_priv(dev);
556 int i, n;
557 do {
558 n = ib_poll_cq(priv->cq, IPOIB_NUM_WC, priv->ibwc);
559 for (i = 0; i < n; ++i) {
560 if (priv->ibwc[i].wr_id & IPOIB_CM_OP_SRQ)
561 ipoib_cm_handle_rx_wc(dev, priv->ibwc + i);
562 else if (priv->ibwc[i].wr_id & IPOIB_OP_RECV)
563 ipoib_ib_handle_rx_wc(dev, priv->ibwc + i);
564 else
565 ipoib_ib_handle_tx_wc(dev, priv->ibwc + i);
566 }
567 } while (n == IPOIB_NUM_WC);
568}
569
570int ipoib_ib_dev_stop(struct net_device *dev, int flush)
547{ 571{
548 struct ipoib_dev_priv *priv = netdev_priv(dev); 572 struct ipoib_dev_priv *priv = netdev_priv(dev);
549 struct ib_qp_attr qp_attr; 573 struct ib_qp_attr qp_attr;
550 unsigned long begin; 574 unsigned long begin;
551 struct ipoib_tx_buf *tx_req; 575 struct ipoib_tx_buf *tx_req;
552 int i, n; 576 int i;
553 577
554 clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); 578 clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
555 netif_poll_disable(dev); 579 netif_poll_disable(dev);
@@ -604,17 +628,7 @@ int ipoib_ib_dev_stop(struct net_device *dev)
604 goto timeout; 628 goto timeout;
605 } 629 }
606 630
607 do { 631 ipoib_drain_cq(dev);
608 n = ib_poll_cq(priv->cq, IPOIB_NUM_WC, priv->ibwc);
609 for (i = 0; i < n; ++i) {
610 if (priv->ibwc[i].wr_id & IPOIB_CM_OP_SRQ)
611 ipoib_cm_handle_rx_wc(dev, priv->ibwc + i);
612 else if (priv->ibwc[i].wr_id & IPOIB_OP_RECV)
613 ipoib_ib_handle_rx_wc(dev, priv->ibwc + i);
614 else
615 ipoib_ib_handle_tx_wc(dev, priv->ibwc + i);
616 }
617 } while (n == IPOIB_NUM_WC);
618 632
619 msleep(1); 633 msleep(1);
620 } 634 }
@@ -629,7 +643,8 @@ timeout:
629 /* Wait for all AHs to be reaped */ 643 /* Wait for all AHs to be reaped */
630 set_bit(IPOIB_STOP_REAPER, &priv->flags); 644 set_bit(IPOIB_STOP_REAPER, &priv->flags);
631 cancel_delayed_work(&priv->ah_reap_task); 645 cancel_delayed_work(&priv->ah_reap_task);
632 flush_workqueue(ipoib_workqueue); 646 if (flush)
647 flush_workqueue(ipoib_workqueue);
633 648
634 begin = jiffies; 649 begin = jiffies;
635 650
@@ -673,13 +688,24 @@ int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
673 return 0; 688 return 0;
674} 689}
675 690
676void ipoib_ib_dev_flush(struct work_struct *work) 691static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, int pkey_event)
677{ 692{
678 struct ipoib_dev_priv *cpriv, *priv = 693 struct ipoib_dev_priv *cpriv;
679 container_of(work, struct ipoib_dev_priv, flush_task);
680 struct net_device *dev = priv->dev; 694 struct net_device *dev = priv->dev;
695 u16 new_index;
696
697 mutex_lock(&priv->vlan_mutex);
698
699 /*
700 * Flush any child interfaces too -- they might be up even if
701 * the parent is down.
702 */
703 list_for_each_entry(cpriv, &priv->child_intfs, list)
704 __ipoib_ib_dev_flush(cpriv, pkey_event);
681 705
682 if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) ) { 706 mutex_unlock(&priv->vlan_mutex);
707
708 if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) {
683 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n"); 709 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
684 return; 710 return;
685 } 711 }
@@ -689,10 +715,32 @@ void ipoib_ib_dev_flush(struct work_struct *work)
689 return; 715 return;
690 } 716 }
691 717
718 if (pkey_event) {
719 if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &new_index)) {
720 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
721 ipoib_ib_dev_down(dev, 0);
722 ipoib_pkey_dev_delay_open(dev);
723 return;
724 }
725 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
726
727 /* restart QP only if P_Key index is changed */
728 if (new_index == priv->pkey_index) {
729 ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n");
730 return;
731 }
732 priv->pkey_index = new_index;
733 }
734
692 ipoib_dbg(priv, "flushing\n"); 735 ipoib_dbg(priv, "flushing\n");
693 736
694 ipoib_ib_dev_down(dev, 0); 737 ipoib_ib_dev_down(dev, 0);
695 738
739 if (pkey_event) {
740 ipoib_ib_dev_stop(dev, 0);
741 ipoib_ib_dev_open(dev);
742 }
743
696 /* 744 /*
697 * The device could have been brought down between the start and when 745 * The device could have been brought down between the start and when
698 * we get here, don't bring it back up if it's not configured up 746 * we get here, don't bring it back up if it's not configured up
@@ -701,14 +749,24 @@ void ipoib_ib_dev_flush(struct work_struct *work)
701 ipoib_ib_dev_up(dev); 749 ipoib_ib_dev_up(dev);
702 ipoib_mcast_restart_task(&priv->restart_task); 750 ipoib_mcast_restart_task(&priv->restart_task);
703 } 751 }
752}
704 753
705 mutex_lock(&priv->vlan_mutex); 754void ipoib_ib_dev_flush(struct work_struct *work)
755{
756 struct ipoib_dev_priv *priv =
757 container_of(work, struct ipoib_dev_priv, flush_task);
706 758
707 /* Flush any child interfaces too */ 759 ipoib_dbg(priv, "Flushing %s\n", priv->dev->name);
708 list_for_each_entry(cpriv, &priv->child_intfs, list) 760 __ipoib_ib_dev_flush(priv, 0);
709 ipoib_ib_dev_flush(&cpriv->flush_task); 761}
710 762
711 mutex_unlock(&priv->vlan_mutex); 763void ipoib_pkey_event(struct work_struct *work)
764{
765 struct ipoib_dev_priv *priv =
766 container_of(work, struct ipoib_dev_priv, pkey_event_task);
767
768 ipoib_dbg(priv, "Flushing %s and restarting its QP\n", priv->dev->name);
769 __ipoib_ib_dev_flush(priv, 1);
712} 770}
713 771
714void ipoib_ib_dev_cleanup(struct net_device *dev) 772void ipoib_ib_dev_cleanup(struct net_device *dev)
@@ -736,7 +794,7 @@ void ipoib_ib_dev_cleanup(struct net_device *dev)
736void ipoib_pkey_poll(struct work_struct *work) 794void ipoib_pkey_poll(struct work_struct *work)
737{ 795{
738 struct ipoib_dev_priv *priv = 796 struct ipoib_dev_priv *priv =
739 container_of(work, struct ipoib_dev_priv, pkey_task.work); 797 container_of(work, struct ipoib_dev_priv, pkey_poll_task.work);
740 struct net_device *dev = priv->dev; 798 struct net_device *dev = priv->dev;
741 799
742 ipoib_pkey_dev_check_presence(dev); 800 ipoib_pkey_dev_check_presence(dev);
@@ -747,7 +805,7 @@ void ipoib_pkey_poll(struct work_struct *work)
747 mutex_lock(&pkey_mutex); 805 mutex_lock(&pkey_mutex);
748 if (!test_bit(IPOIB_PKEY_STOP, &priv->flags)) 806 if (!test_bit(IPOIB_PKEY_STOP, &priv->flags))
749 queue_delayed_work(ipoib_workqueue, 807 queue_delayed_work(ipoib_workqueue,
750 &priv->pkey_task, 808 &priv->pkey_poll_task,
751 HZ); 809 HZ);
752 mutex_unlock(&pkey_mutex); 810 mutex_unlock(&pkey_mutex);
753 } 811 }
@@ -766,7 +824,7 @@ int ipoib_pkey_dev_delay_open(struct net_device *dev)
766 mutex_lock(&pkey_mutex); 824 mutex_lock(&pkey_mutex);
767 clear_bit(IPOIB_PKEY_STOP, &priv->flags); 825 clear_bit(IPOIB_PKEY_STOP, &priv->flags);
768 queue_delayed_work(ipoib_workqueue, 826 queue_delayed_work(ipoib_workqueue,
769 &priv->pkey_task, 827 &priv->pkey_poll_task,
770 HZ); 828 HZ);
771 mutex_unlock(&pkey_mutex); 829 mutex_unlock(&pkey_mutex);
772 return 1; 830 return 1;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 0a428f2b05c7..894b1dcdf3eb 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -107,7 +107,7 @@ int ipoib_open(struct net_device *dev)
107 return -EINVAL; 107 return -EINVAL;
108 108
109 if (ipoib_ib_dev_up(dev)) { 109 if (ipoib_ib_dev_up(dev)) {
110 ipoib_ib_dev_stop(dev); 110 ipoib_ib_dev_stop(dev, 1);
111 return -EINVAL; 111 return -EINVAL;
112 } 112 }
113 113
@@ -152,7 +152,7 @@ static int ipoib_stop(struct net_device *dev)
152 flush_workqueue(ipoib_workqueue); 152 flush_workqueue(ipoib_workqueue);
153 153
154 ipoib_ib_dev_down(dev, 1); 154 ipoib_ib_dev_down(dev, 1);
155 ipoib_ib_dev_stop(dev); 155 ipoib_ib_dev_stop(dev, 1);
156 156
157 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 157 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
158 struct ipoib_dev_priv *cpriv; 158 struct ipoib_dev_priv *cpriv;
@@ -988,7 +988,8 @@ static void ipoib_setup(struct net_device *dev)
988 INIT_LIST_HEAD(&priv->dead_ahs); 988 INIT_LIST_HEAD(&priv->dead_ahs);
989 INIT_LIST_HEAD(&priv->multicast_list); 989 INIT_LIST_HEAD(&priv->multicast_list);
990 990
991 INIT_DELAYED_WORK(&priv->pkey_task, ipoib_pkey_poll); 991 INIT_DELAYED_WORK(&priv->pkey_poll_task, ipoib_pkey_poll);
992 INIT_WORK(&priv->pkey_event_task, ipoib_pkey_event);
992 INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task); 993 INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task);
993 INIT_WORK(&priv->flush_task, ipoib_ib_dev_flush); 994 INIT_WORK(&priv->flush_task, ipoib_ib_dev_flush);
994 INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task); 995 INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 54fbead4de01..aae367057a56 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -524,7 +524,7 @@ void ipoib_mcast_join_task(struct work_struct *work)
524 return; 524 return;
525 525
526 if (ib_query_gid(priv->ca, priv->port, 0, &priv->local_gid)) 526 if (ib_query_gid(priv->ca, priv->port, 0, &priv->local_gid))
527 ipoib_warn(priv, "ib_gid_entry_get() failed\n"); 527 ipoib_warn(priv, "ib_query_gid() failed\n");
528 else 528 else
529 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid)); 529 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
530 530
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index 5c3c6a43a52b..982eb88e27ec 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -33,8 +33,6 @@
33 * $Id: ipoib_verbs.c 1349 2004-12-16 21:09:43Z roland $ 33 * $Id: ipoib_verbs.c 1349 2004-12-16 21:09:43Z roland $
34 */ 34 */
35 35
36#include <rdma/ib_cache.h>
37
38#include "ipoib.h" 36#include "ipoib.h"
39 37
40int ipoib_mcast_attach(struct net_device *dev, u16 mlid, union ib_gid *mgid) 38int ipoib_mcast_attach(struct net_device *dev, u16 mlid, union ib_gid *mgid)
@@ -49,7 +47,7 @@ int ipoib_mcast_attach(struct net_device *dev, u16 mlid, union ib_gid *mgid)
49 if (!qp_attr) 47 if (!qp_attr)
50 goto out; 48 goto out;
51 49
52 if (ib_find_cached_pkey(priv->ca, priv->port, priv->pkey, &pkey_index)) { 50 if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &pkey_index)) {
53 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); 51 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
54 ret = -ENXIO; 52 ret = -ENXIO;
55 goto out; 53 goto out;
@@ -94,26 +92,16 @@ int ipoib_init_qp(struct net_device *dev)
94{ 92{
95 struct ipoib_dev_priv *priv = netdev_priv(dev); 93 struct ipoib_dev_priv *priv = netdev_priv(dev);
96 int ret; 94 int ret;
97 u16 pkey_index;
98 struct ib_qp_attr qp_attr; 95 struct ib_qp_attr qp_attr;
99 int attr_mask; 96 int attr_mask;
100 97
101 /* 98 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
102 * Search through the port P_Key table for the requested pkey value. 99 return -1;
103 * The port has to be assigned to the respective IB partition in
104 * advance.
105 */
106 ret = ib_find_cached_pkey(priv->ca, priv->port, priv->pkey, &pkey_index);
107 if (ret) {
108 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
109 return ret;
110 }
111 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
112 100
113 qp_attr.qp_state = IB_QPS_INIT; 101 qp_attr.qp_state = IB_QPS_INIT;
114 qp_attr.qkey = 0; 102 qp_attr.qkey = 0;
115 qp_attr.port_num = priv->port; 103 qp_attr.port_num = priv->port;
116 qp_attr.pkey_index = pkey_index; 104 qp_attr.pkey_index = priv->pkey_index;
117 attr_mask = 105 attr_mask =
118 IB_QP_QKEY | 106 IB_QP_QKEY |
119 IB_QP_PORT | 107 IB_QP_PORT |
@@ -185,7 +173,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
185 size = ipoib_sendq_size + ipoib_recvq_size + 1; 173 size = ipoib_sendq_size + ipoib_recvq_size + 1;
186 ret = ipoib_cm_dev_init(dev); 174 ret = ipoib_cm_dev_init(dev);
187 if (!ret) 175 if (!ret)
188 size += ipoib_recvq_size; 176 size += ipoib_recvq_size + 1 /* 1 extra for rx_drain_qp */;
189 177
190 priv->cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL, dev, size, 0); 178 priv->cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL, dev, size, 0);
191 if (IS_ERR(priv->cq)) { 179 if (IS_ERR(priv->cq)) {
@@ -259,14 +247,18 @@ void ipoib_event(struct ib_event_handler *handler,
259 struct ipoib_dev_priv *priv = 247 struct ipoib_dev_priv *priv =
260 container_of(handler, struct ipoib_dev_priv, event_handler); 248 container_of(handler, struct ipoib_dev_priv, event_handler);
261 249
262 if ((record->event == IB_EVENT_PORT_ERR || 250 if (record->element.port_num != priv->port)
263 record->event == IB_EVENT_PKEY_CHANGE || 251 return;
264 record->event == IB_EVENT_PORT_ACTIVE || 252
265 record->event == IB_EVENT_LID_CHANGE || 253 if (record->event == IB_EVENT_PORT_ERR ||
266 record->event == IB_EVENT_SM_CHANGE || 254 record->event == IB_EVENT_PORT_ACTIVE ||
267 record->event == IB_EVENT_CLIENT_REREGISTER) && 255 record->event == IB_EVENT_LID_CHANGE ||
268 record->element.port_num == priv->port) { 256 record->event == IB_EVENT_SM_CHANGE ||
257 record->event == IB_EVENT_CLIENT_REREGISTER) {
269 ipoib_dbg(priv, "Port state change event\n"); 258 ipoib_dbg(priv, "Port state change event\n");
270 queue_work(ipoib_workqueue, &priv->flush_task); 259 queue_work(ipoib_workqueue, &priv->flush_task);
260 } else if (record->event == IB_EVENT_PKEY_CHANGE) {
261 ipoib_dbg(priv, "P_Key change event on port:%d\n", priv->port);
262 queue_work(ipoib_workqueue, &priv->pkey_event_task);
271 } 263 }
272} 264}