aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/ulp
diff options
context:
space:
mode:
authorMichael S. Tsirkin <mst@dev.mellanox.co.il>2007-05-14 00:26:51 -0400
committerRoland Dreier <rolandd@cisco.com>2007-05-14 17:11:01 -0400
commit7c5b9ef8577bfa7b74ea58fc9ff2934ffce13532 (patch)
tree4a39ad8609225261bc17f4acc16447944d875a12 /drivers/infiniband/ulp
parentbd18c112774db5bb887adb981ffbe9bfe00b2f3a (diff)
IPoIB/cm: Optimize stale connection detection
In the presence of some running RX connections, we repeat queue_delayed_work calls each 4 RX WRs, which is a waste. It's enough to start stale task when a first passive connection is added, and rerun it every IPOIB_CM_RX_DELAY as long as there are outstanding passive connections. This removes some code from RX data path. Signed-off-by: Michael S. Tsirkin <mst@dev.mellanox.co.il> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/ulp')
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c11
1 files changed, 7 insertions, 4 deletions
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 785bc8505f2a..eec833b81e9b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -257,10 +257,11 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
257 cm_id->context = p; 257 cm_id->context = p;
258 p->jiffies = jiffies; 258 p->jiffies = jiffies;
259 spin_lock_irq(&priv->lock); 259 spin_lock_irq(&priv->lock);
260 if (list_empty(&priv->cm.passive_ids))
261 queue_delayed_work(ipoib_workqueue,
262 &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
260 list_add(&p->list, &priv->cm.passive_ids); 263 list_add(&p->list, &priv->cm.passive_ids);
261 spin_unlock_irq(&priv->lock); 264 spin_unlock_irq(&priv->lock);
262 queue_delayed_work(ipoib_workqueue,
263 &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
264 return 0; 265 return 0;
265 266
266err_rep: 267err_rep:
@@ -378,8 +379,6 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
378 if (!list_empty(&p->list)) 379 if (!list_empty(&p->list))
379 list_move(&p->list, &priv->cm.passive_ids); 380 list_move(&p->list, &priv->cm.passive_ids);
380 spin_unlock_irqrestore(&priv->lock, flags); 381 spin_unlock_irqrestore(&priv->lock, flags);
381 queue_delayed_work(ipoib_workqueue,
382 &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
383 } 382 }
384 } 383 }
385 384
@@ -1100,6 +1099,10 @@ static void ipoib_cm_stale_task(struct work_struct *work)
1100 kfree(p); 1099 kfree(p);
1101 spin_lock_irq(&priv->lock); 1100 spin_lock_irq(&priv->lock);
1102 } 1101 }
1102
1103 if (!list_empty(&priv->cm.passive_ids))
1104 queue_delayed_work(ipoib_workqueue,
1105 &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
1103 spin_unlock_irq(&priv->lock); 1106 spin_unlock_irq(&priv->lock);
1104} 1107}
1105 1108