aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorMichael S. Tsirkin <mst@mellanox.co.il>2006-01-17 15:19:40 -0500
committerRoland Dreier <rolandd@cisco.com>2006-01-17 15:19:40 -0500
commitb36f170b617a7cd147b694dabf504e856a50ee9d (patch)
tree538606cc821b415447d5db710d76ef2272a75934 /drivers/infiniband
parent0f47ae0b3ec35dc5f4723f2e0ad0f6f3f55e9bcd (diff)
IPoIB: Lock accesses to multicast packet queues
Avoid corrupting mcast->pkt_queue by serializing access with priv->tx_lock. Also, update dropped packet statistics to count multicast packets removed from pkt_queue as dropped. Signed-off-by: Michael S. Tsirkin <mst@mellanox.co.il> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c25
1 files changed, 22 insertions, 3 deletions
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 98039da0caf0..ccaa0c387076 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -97,6 +97,7 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast)
97 struct ipoib_dev_priv *priv = netdev_priv(dev); 97 struct ipoib_dev_priv *priv = netdev_priv(dev);
98 struct ipoib_neigh *neigh, *tmp; 98 struct ipoib_neigh *neigh, *tmp;
99 unsigned long flags; 99 unsigned long flags;
100 int tx_dropped = 0;
100 101
101 ipoib_dbg_mcast(netdev_priv(dev), 102 ipoib_dbg_mcast(netdev_priv(dev),
102 "deleting multicast group " IPOIB_GID_FMT "\n", 103 "deleting multicast group " IPOIB_GID_FMT "\n",
@@ -123,8 +124,14 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast)
123 if (mcast->ah) 124 if (mcast->ah)
124 ipoib_put_ah(mcast->ah); 125 ipoib_put_ah(mcast->ah);
125 126
126 while (!skb_queue_empty(&mcast->pkt_queue)) 127 while (!skb_queue_empty(&mcast->pkt_queue)) {
128 ++tx_dropped;
127 dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue)); 129 dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
130 }
131
132 spin_lock_irqsave(&priv->tx_lock, flags);
133 priv->stats.tx_dropped += tx_dropped;
134 spin_unlock_irqrestore(&priv->tx_lock, flags);
128 135
129 kfree(mcast); 136 kfree(mcast);
130} 137}
@@ -276,8 +283,10 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
276 } 283 }
277 284
278 /* actually send any queued packets */ 285 /* actually send any queued packets */
286 spin_lock_irq(&priv->tx_lock);
279 while (!skb_queue_empty(&mcast->pkt_queue)) { 287 while (!skb_queue_empty(&mcast->pkt_queue)) {
280 struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue); 288 struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue);
289 spin_unlock_irq(&priv->tx_lock);
281 290
282 skb->dev = dev; 291 skb->dev = dev;
283 292
@@ -288,7 +297,9 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
288 297
289 if (dev_queue_xmit(skb)) 298 if (dev_queue_xmit(skb))
290 ipoib_warn(priv, "dev_queue_xmit failed to requeue packet\n"); 299 ipoib_warn(priv, "dev_queue_xmit failed to requeue packet\n");
300 spin_lock_irq(&priv->tx_lock);
291 } 301 }
302 spin_unlock_irq(&priv->tx_lock);
292 303
293 return 0; 304 return 0;
294} 305}
@@ -300,6 +311,7 @@ ipoib_mcast_sendonly_join_complete(int status,
300{ 311{
301 struct ipoib_mcast *mcast = mcast_ptr; 312 struct ipoib_mcast *mcast = mcast_ptr;
302 struct net_device *dev = mcast->dev; 313 struct net_device *dev = mcast->dev;
314 struct ipoib_dev_priv *priv = netdev_priv(dev);
303 315
304 if (!status) 316 if (!status)
305 ipoib_mcast_join_finish(mcast, mcmember); 317 ipoib_mcast_join_finish(mcast, mcmember);
@@ -310,8 +322,12 @@ ipoib_mcast_sendonly_join_complete(int status,
310 IPOIB_GID_ARG(mcast->mcmember.mgid), status); 322 IPOIB_GID_ARG(mcast->mcmember.mgid), status);
311 323
312 /* Flush out any queued packets */ 324 /* Flush out any queued packets */
313 while (!skb_queue_empty(&mcast->pkt_queue)) 325 spin_lock_irq(&priv->tx_lock);
326 while (!skb_queue_empty(&mcast->pkt_queue)) {
327 ++priv->stats.tx_dropped;
314 dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue)); 328 dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
329 }
330 spin_unlock_irq(&priv->tx_lock);
315 331
316 /* Clear the busy flag so we try again */ 332 /* Clear the busy flag so we try again */
317 clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); 333 clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
@@ -687,6 +703,7 @@ void ipoib_mcast_send(struct net_device *dev, union ib_gid *mgid,
687 if (!mcast) { 703 if (!mcast) {
688 ipoib_warn(priv, "unable to allocate memory for " 704 ipoib_warn(priv, "unable to allocate memory for "
689 "multicast structure\n"); 705 "multicast structure\n");
706 ++priv->stats.tx_dropped;
690 dev_kfree_skb_any(skb); 707 dev_kfree_skb_any(skb);
691 goto out; 708 goto out;
692 } 709 }
@@ -700,8 +717,10 @@ void ipoib_mcast_send(struct net_device *dev, union ib_gid *mgid,
700 if (!mcast->ah) { 717 if (!mcast->ah) {
701 if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE) 718 if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE)
702 skb_queue_tail(&mcast->pkt_queue, skb); 719 skb_queue_tail(&mcast->pkt_queue, skb);
703 else 720 else {
721 ++priv->stats.tx_dropped;
704 dev_kfree_skb_any(skb); 722 dev_kfree_skb_any(skb);
723 }
705 724
706 if (mcast->query) 725 if (mcast->query)
707 ipoib_dbg_mcast(priv, "no address vector, " 726 ipoib_dbg_mcast(priv, "no address vector, "