diff options
Diffstat (limited to 'drivers/infiniband/ulp')
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib.h | 8 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_cm.c | 88 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_ib.c | 30 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_main.c | 76 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_multicast.c | 31 |
5 files changed, 122 insertions, 111 deletions
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 05eb41b8ab63..68ba5c3482e4 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h | |||
@@ -268,10 +268,9 @@ struct ipoib_lro { | |||
268 | }; | 268 | }; |
269 | 269 | ||
270 | /* | 270 | /* |
271 | * Device private locking: tx_lock protects members used in TX fast | 271 | * Device private locking: network stack tx_lock protects members used |
272 | * path (and we use LLTX so upper layers don't do extra locking). | 272 | * in TX fast path, lock protects everything else. lock nests inside |
273 | * lock protects everything else. lock nests inside of tx_lock (ie | 273 | * of tx_lock (ie tx_lock must be acquired first if needed). |
274 | * tx_lock must be acquired first if needed). | ||
275 | */ | 274 | */ |
276 | struct ipoib_dev_priv { | 275 | struct ipoib_dev_priv { |
277 | spinlock_t lock; | 276 | spinlock_t lock; |
@@ -320,7 +319,6 @@ struct ipoib_dev_priv { | |||
320 | 319 | ||
321 | struct ipoib_rx_buf *rx_ring; | 320 | struct ipoib_rx_buf *rx_ring; |
322 | 321 | ||
323 | spinlock_t tx_lock; | ||
324 | struct ipoib_tx_buf *tx_ring; | 322 | struct ipoib_tx_buf *tx_ring; |
325 | unsigned tx_head; | 323 | unsigned tx_head; |
326 | unsigned tx_tail; | 324 | unsigned tx_tail; |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 341ffedafed6..7b14c2c39500 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c | |||
@@ -786,7 +786,8 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) | |||
786 | 786 | ||
787 | dev_kfree_skb_any(tx_req->skb); | 787 | dev_kfree_skb_any(tx_req->skb); |
788 | 788 | ||
789 | spin_lock_irqsave(&priv->tx_lock, flags); | 789 | netif_tx_lock(dev); |
790 | |||
790 | ++tx->tx_tail; | 791 | ++tx->tx_tail; |
791 | if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) && | 792 | if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) && |
792 | netif_queue_stopped(dev) && | 793 | netif_queue_stopped(dev) && |
@@ -801,7 +802,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) | |||
801 | "(status=%d, wrid=%d vend_err %x)\n", | 802 | "(status=%d, wrid=%d vend_err %x)\n", |
802 | wc->status, wr_id, wc->vendor_err); | 803 | wc->status, wr_id, wc->vendor_err); |
803 | 804 | ||
804 | spin_lock(&priv->lock); | 805 | spin_lock_irqsave(&priv->lock, flags); |
805 | neigh = tx->neigh; | 806 | neigh = tx->neigh; |
806 | 807 | ||
807 | if (neigh) { | 808 | if (neigh) { |
@@ -821,10 +822,10 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) | |||
821 | 822 | ||
822 | clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags); | 823 | clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags); |
823 | 824 | ||
824 | spin_unlock(&priv->lock); | 825 | spin_unlock_irqrestore(&priv->lock, flags); |
825 | } | 826 | } |
826 | 827 | ||
827 | spin_unlock_irqrestore(&priv->tx_lock, flags); | 828 | netif_tx_unlock(dev); |
828 | } | 829 | } |
829 | 830 | ||
830 | int ipoib_cm_dev_open(struct net_device *dev) | 831 | int ipoib_cm_dev_open(struct net_device *dev) |
@@ -1149,7 +1150,6 @@ static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p) | |||
1149 | { | 1150 | { |
1150 | struct ipoib_dev_priv *priv = netdev_priv(p->dev); | 1151 | struct ipoib_dev_priv *priv = netdev_priv(p->dev); |
1151 | struct ipoib_cm_tx_buf *tx_req; | 1152 | struct ipoib_cm_tx_buf *tx_req; |
1152 | unsigned long flags; | ||
1153 | unsigned long begin; | 1153 | unsigned long begin; |
1154 | 1154 | ||
1155 | ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n", | 1155 | ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n", |
@@ -1180,12 +1180,12 @@ timeout: | |||
1180 | DMA_TO_DEVICE); | 1180 | DMA_TO_DEVICE); |
1181 | dev_kfree_skb_any(tx_req->skb); | 1181 | dev_kfree_skb_any(tx_req->skb); |
1182 | ++p->tx_tail; | 1182 | ++p->tx_tail; |
1183 | spin_lock_irqsave(&priv->tx_lock, flags); | 1183 | netif_tx_lock_bh(p->dev); |
1184 | if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) && | 1184 | if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) && |
1185 | netif_queue_stopped(p->dev) && | 1185 | netif_queue_stopped(p->dev) && |
1186 | test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) | 1186 | test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) |
1187 | netif_wake_queue(p->dev); | 1187 | netif_wake_queue(p->dev); |
1188 | spin_unlock_irqrestore(&priv->tx_lock, flags); | 1188 | netif_tx_unlock_bh(p->dev); |
1189 | } | 1189 | } |
1190 | 1190 | ||
1191 | if (p->qp) | 1191 | if (p->qp) |
@@ -1202,6 +1202,7 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id, | |||
1202 | struct ipoib_dev_priv *priv = netdev_priv(tx->dev); | 1202 | struct ipoib_dev_priv *priv = netdev_priv(tx->dev); |
1203 | struct net_device *dev = priv->dev; | 1203 | struct net_device *dev = priv->dev; |
1204 | struct ipoib_neigh *neigh; | 1204 | struct ipoib_neigh *neigh; |
1205 | unsigned long flags; | ||
1205 | int ret; | 1206 | int ret; |
1206 | 1207 | ||
1207 | switch (event->event) { | 1208 | switch (event->event) { |
@@ -1220,8 +1221,8 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id, | |||
1220 | case IB_CM_REJ_RECEIVED: | 1221 | case IB_CM_REJ_RECEIVED: |
1221 | case IB_CM_TIMEWAIT_EXIT: | 1222 | case IB_CM_TIMEWAIT_EXIT: |
1222 | ipoib_dbg(priv, "CM error %d.\n", event->event); | 1223 | ipoib_dbg(priv, "CM error %d.\n", event->event); |
1223 | spin_lock_irq(&priv->tx_lock); | 1224 | netif_tx_lock_bh(dev); |
1224 | spin_lock(&priv->lock); | 1225 | spin_lock_irqsave(&priv->lock, flags); |
1225 | neigh = tx->neigh; | 1226 | neigh = tx->neigh; |
1226 | 1227 | ||
1227 | if (neigh) { | 1228 | if (neigh) { |
@@ -1239,8 +1240,8 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id, | |||
1239 | queue_work(ipoib_workqueue, &priv->cm.reap_task); | 1240 | queue_work(ipoib_workqueue, &priv->cm.reap_task); |
1240 | } | 1241 | } |
1241 | 1242 | ||
1242 | spin_unlock(&priv->lock); | 1243 | spin_unlock_irqrestore(&priv->lock, flags); |
1243 | spin_unlock_irq(&priv->tx_lock); | 1244 | netif_tx_unlock_bh(dev); |
1244 | break; | 1245 | break; |
1245 | default: | 1246 | default: |
1246 | break; | 1247 | break; |
@@ -1294,19 +1295,24 @@ static void ipoib_cm_tx_start(struct work_struct *work) | |||
1294 | struct ib_sa_path_rec pathrec; | 1295 | struct ib_sa_path_rec pathrec; |
1295 | u32 qpn; | 1296 | u32 qpn; |
1296 | 1297 | ||
1297 | spin_lock_irqsave(&priv->tx_lock, flags); | 1298 | netif_tx_lock_bh(dev); |
1298 | spin_lock(&priv->lock); | 1299 | spin_lock_irqsave(&priv->lock, flags); |
1300 | |||
1299 | while (!list_empty(&priv->cm.start_list)) { | 1301 | while (!list_empty(&priv->cm.start_list)) { |
1300 | p = list_entry(priv->cm.start_list.next, typeof(*p), list); | 1302 | p = list_entry(priv->cm.start_list.next, typeof(*p), list); |
1301 | list_del_init(&p->list); | 1303 | list_del_init(&p->list); |
1302 | neigh = p->neigh; | 1304 | neigh = p->neigh; |
1303 | qpn = IPOIB_QPN(neigh->neighbour->ha); | 1305 | qpn = IPOIB_QPN(neigh->neighbour->ha); |
1304 | memcpy(&pathrec, &p->path->pathrec, sizeof pathrec); | 1306 | memcpy(&pathrec, &p->path->pathrec, sizeof pathrec); |
1305 | spin_unlock(&priv->lock); | 1307 | |
1306 | spin_unlock_irqrestore(&priv->tx_lock, flags); | 1308 | spin_unlock_irqrestore(&priv->lock, flags); |
1309 | netif_tx_unlock_bh(dev); | ||
1310 | |||
1307 | ret = ipoib_cm_tx_init(p, qpn, &pathrec); | 1311 | ret = ipoib_cm_tx_init(p, qpn, &pathrec); |
1308 | spin_lock_irqsave(&priv->tx_lock, flags); | 1312 | |
1309 | spin_lock(&priv->lock); | 1313 | netif_tx_lock_bh(dev); |
1314 | spin_lock_irqsave(&priv->lock, flags); | ||
1315 | |||
1310 | if (ret) { | 1316 | if (ret) { |
1311 | neigh = p->neigh; | 1317 | neigh = p->neigh; |
1312 | if (neigh) { | 1318 | if (neigh) { |
@@ -1320,44 +1326,52 @@ static void ipoib_cm_tx_start(struct work_struct *work) | |||
1320 | kfree(p); | 1326 | kfree(p); |
1321 | } | 1327 | } |
1322 | } | 1328 | } |
1323 | spin_unlock(&priv->lock); | 1329 | |
1324 | spin_unlock_irqrestore(&priv->tx_lock, flags); | 1330 | spin_unlock_irqrestore(&priv->lock, flags); |
1331 | netif_tx_unlock_bh(dev); | ||
1325 | } | 1332 | } |
1326 | 1333 | ||
1327 | static void ipoib_cm_tx_reap(struct work_struct *work) | 1334 | static void ipoib_cm_tx_reap(struct work_struct *work) |
1328 | { | 1335 | { |
1329 | struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, | 1336 | struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, |
1330 | cm.reap_task); | 1337 | cm.reap_task); |
1338 | struct net_device *dev = priv->dev; | ||
1331 | struct ipoib_cm_tx *p; | 1339 | struct ipoib_cm_tx *p; |
1340 | unsigned long flags; | ||
1341 | |||
1342 | netif_tx_lock_bh(dev); | ||
1343 | spin_lock_irqsave(&priv->lock, flags); | ||
1332 | 1344 | ||
1333 | spin_lock_irq(&priv->tx_lock); | ||
1334 | spin_lock(&priv->lock); | ||
1335 | while (!list_empty(&priv->cm.reap_list)) { | 1345 | while (!list_empty(&priv->cm.reap_list)) { |
1336 | p = list_entry(priv->cm.reap_list.next, typeof(*p), list); | 1346 | p = list_entry(priv->cm.reap_list.next, typeof(*p), list); |
1337 | list_del(&p->list); | 1347 | list_del(&p->list); |
1338 | spin_unlock(&priv->lock); | 1348 | spin_unlock_irqrestore(&priv->lock, flags); |
1339 | spin_unlock_irq(&priv->tx_lock); | 1349 | netif_tx_unlock_bh(dev); |
1340 | ipoib_cm_tx_destroy(p); | 1350 | ipoib_cm_tx_destroy(p); |
1341 | spin_lock_irq(&priv->tx_lock); | 1351 | netif_tx_lock_bh(dev); |
1342 | spin_lock(&priv->lock); | 1352 | spin_lock_irqsave(&priv->lock, flags); |
1343 | } | 1353 | } |
1344 | spin_unlock(&priv->lock); | 1354 | |
1345 | spin_unlock_irq(&priv->tx_lock); | 1355 | spin_unlock_irqrestore(&priv->lock, flags); |
1356 | netif_tx_unlock_bh(dev); | ||
1346 | } | 1357 | } |
1347 | 1358 | ||
1348 | static void ipoib_cm_skb_reap(struct work_struct *work) | 1359 | static void ipoib_cm_skb_reap(struct work_struct *work) |
1349 | { | 1360 | { |
1350 | struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, | 1361 | struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, |
1351 | cm.skb_task); | 1362 | cm.skb_task); |
1363 | struct net_device *dev = priv->dev; | ||
1352 | struct sk_buff *skb; | 1364 | struct sk_buff *skb; |
1353 | 1365 | unsigned long flags; | |
1354 | unsigned mtu = priv->mcast_mtu; | 1366 | unsigned mtu = priv->mcast_mtu; |
1355 | 1367 | ||
1356 | spin_lock_irq(&priv->tx_lock); | 1368 | netif_tx_lock_bh(dev); |
1357 | spin_lock(&priv->lock); | 1369 | spin_lock_irqsave(&priv->lock, flags); |
1370 | |||
1358 | while ((skb = skb_dequeue(&priv->cm.skb_queue))) { | 1371 | while ((skb = skb_dequeue(&priv->cm.skb_queue))) { |
1359 | spin_unlock(&priv->lock); | 1372 | spin_unlock_irqrestore(&priv->lock, flags); |
1360 | spin_unlock_irq(&priv->tx_lock); | 1373 | netif_tx_unlock_bh(dev); |
1374 | |||
1361 | if (skb->protocol == htons(ETH_P_IP)) | 1375 | if (skb->protocol == htons(ETH_P_IP)) |
1362 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); | 1376 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); |
1363 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 1377 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) |
@@ -1365,11 +1379,13 @@ static void ipoib_cm_skb_reap(struct work_struct *work) | |||
1365 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, priv->dev); | 1379 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, priv->dev); |
1366 | #endif | 1380 | #endif |
1367 | dev_kfree_skb_any(skb); | 1381 | dev_kfree_skb_any(skb); |
1368 | spin_lock_irq(&priv->tx_lock); | 1382 | |
1369 | spin_lock(&priv->lock); | 1383 | netif_tx_lock_bh(dev); |
1384 | spin_lock_irqsave(&priv->lock, flags); | ||
1370 | } | 1385 | } |
1371 | spin_unlock(&priv->lock); | 1386 | |
1372 | spin_unlock_irq(&priv->tx_lock); | 1387 | spin_unlock_irqrestore(&priv->lock, flags); |
1388 | netif_tx_unlock_bh(dev); | ||
1373 | } | 1389 | } |
1374 | 1390 | ||
1375 | void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb, | 1391 | void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb, |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 66cafa20c246..0e748aeeae99 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c | |||
@@ -468,21 +468,22 @@ void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr) | |||
468 | static void drain_tx_cq(struct net_device *dev) | 468 | static void drain_tx_cq(struct net_device *dev) |
469 | { | 469 | { |
470 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 470 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
471 | unsigned long flags; | ||
472 | 471 | ||
473 | spin_lock_irqsave(&priv->tx_lock, flags); | 472 | netif_tx_lock(dev); |
474 | while (poll_tx(priv)) | 473 | while (poll_tx(priv)) |
475 | ; /* nothing */ | 474 | ; /* nothing */ |
476 | 475 | ||
477 | if (netif_queue_stopped(dev)) | 476 | if (netif_queue_stopped(dev)) |
478 | mod_timer(&priv->poll_timer, jiffies + 1); | 477 | mod_timer(&priv->poll_timer, jiffies + 1); |
479 | 478 | ||
480 | spin_unlock_irqrestore(&priv->tx_lock, flags); | 479 | netif_tx_unlock(dev); |
481 | } | 480 | } |
482 | 481 | ||
483 | void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr) | 482 | void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr) |
484 | { | 483 | { |
485 | drain_tx_cq((struct net_device *)dev_ptr); | 484 | struct ipoib_dev_priv *priv = netdev_priv(dev_ptr); |
485 | |||
486 | mod_timer(&priv->poll_timer, jiffies); | ||
486 | } | 487 | } |
487 | 488 | ||
488 | static inline int post_send(struct ipoib_dev_priv *priv, | 489 | static inline int post_send(struct ipoib_dev_priv *priv, |
@@ -614,17 +615,20 @@ static void __ipoib_reap_ah(struct net_device *dev) | |||
614 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 615 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
615 | struct ipoib_ah *ah, *tah; | 616 | struct ipoib_ah *ah, *tah; |
616 | LIST_HEAD(remove_list); | 617 | LIST_HEAD(remove_list); |
618 | unsigned long flags; | ||
619 | |||
620 | netif_tx_lock_bh(dev); | ||
621 | spin_lock_irqsave(&priv->lock, flags); | ||
617 | 622 | ||
618 | spin_lock_irq(&priv->tx_lock); | ||
619 | spin_lock(&priv->lock); | ||
620 | list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list) | 623 | list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list) |
621 | if ((int) priv->tx_tail - (int) ah->last_send >= 0) { | 624 | if ((int) priv->tx_tail - (int) ah->last_send >= 0) { |
622 | list_del(&ah->list); | 625 | list_del(&ah->list); |
623 | ib_destroy_ah(ah->ah); | 626 | ib_destroy_ah(ah->ah); |
624 | kfree(ah); | 627 | kfree(ah); |
625 | } | 628 | } |
626 | spin_unlock(&priv->lock); | 629 | |
627 | spin_unlock_irq(&priv->tx_lock); | 630 | spin_unlock_irqrestore(&priv->lock, flags); |
631 | netif_tx_unlock_bh(dev); | ||
628 | } | 632 | } |
629 | 633 | ||
630 | void ipoib_reap_ah(struct work_struct *work) | 634 | void ipoib_reap_ah(struct work_struct *work) |
@@ -761,6 +765,14 @@ void ipoib_drain_cq(struct net_device *dev) | |||
761 | { | 765 | { |
762 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 766 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
763 | int i, n; | 767 | int i, n; |
768 | |||
769 | /* | ||
770 | * We call completion handling routines that expect to be | ||
771 | * called from the BH-disabled NAPI poll context, so disable | ||
772 | * BHs here too. | ||
773 | */ | ||
774 | local_bh_disable(); | ||
775 | |||
764 | do { | 776 | do { |
765 | n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc); | 777 | n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc); |
766 | for (i = 0; i < n; ++i) { | 778 | for (i = 0; i < n; ++i) { |
@@ -784,6 +796,8 @@ void ipoib_drain_cq(struct net_device *dev) | |||
784 | 796 | ||
785 | while (poll_tx(priv)) | 797 | while (poll_tx(priv)) |
786 | ; /* nothing */ | 798 | ; /* nothing */ |
799 | |||
800 | local_bh_enable(); | ||
787 | } | 801 | } |
788 | 802 | ||
789 | int ipoib_ib_dev_stop(struct net_device *dev, int flush) | 803 | int ipoib_ib_dev_stop(struct net_device *dev, int flush) |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 1b1df5cc4113..c0ee514396df 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
@@ -373,9 +373,10 @@ void ipoib_flush_paths(struct net_device *dev) | |||
373 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 373 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
374 | struct ipoib_path *path, *tp; | 374 | struct ipoib_path *path, *tp; |
375 | LIST_HEAD(remove_list); | 375 | LIST_HEAD(remove_list); |
376 | unsigned long flags; | ||
376 | 377 | ||
377 | spin_lock_irq(&priv->tx_lock); | 378 | netif_tx_lock_bh(dev); |
378 | spin_lock(&priv->lock); | 379 | spin_lock_irqsave(&priv->lock, flags); |
379 | 380 | ||
380 | list_splice_init(&priv->path_list, &remove_list); | 381 | list_splice_init(&priv->path_list, &remove_list); |
381 | 382 | ||
@@ -385,15 +386,16 @@ void ipoib_flush_paths(struct net_device *dev) | |||
385 | list_for_each_entry_safe(path, tp, &remove_list, list) { | 386 | list_for_each_entry_safe(path, tp, &remove_list, list) { |
386 | if (path->query) | 387 | if (path->query) |
387 | ib_sa_cancel_query(path->query_id, path->query); | 388 | ib_sa_cancel_query(path->query_id, path->query); |
388 | spin_unlock(&priv->lock); | 389 | spin_unlock_irqrestore(&priv->lock, flags); |
389 | spin_unlock_irq(&priv->tx_lock); | 390 | netif_tx_unlock_bh(dev); |
390 | wait_for_completion(&path->done); | 391 | wait_for_completion(&path->done); |
391 | path_free(dev, path); | 392 | path_free(dev, path); |
392 | spin_lock_irq(&priv->tx_lock); | 393 | netif_tx_lock_bh(dev); |
393 | spin_lock(&priv->lock); | 394 | spin_lock_irqsave(&priv->lock, flags); |
394 | } | 395 | } |
395 | spin_unlock(&priv->lock); | 396 | |
396 | spin_unlock_irq(&priv->tx_lock); | 397 | spin_unlock_irqrestore(&priv->lock, flags); |
398 | netif_tx_unlock_bh(dev); | ||
397 | } | 399 | } |
398 | 400 | ||
399 | static void path_rec_completion(int status, | 401 | static void path_rec_completion(int status, |
@@ -404,7 +406,7 @@ static void path_rec_completion(int status, | |||
404 | struct net_device *dev = path->dev; | 406 | struct net_device *dev = path->dev; |
405 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 407 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
406 | struct ipoib_ah *ah = NULL; | 408 | struct ipoib_ah *ah = NULL; |
407 | struct ipoib_ah *old_ah; | 409 | struct ipoib_ah *old_ah = NULL; |
408 | struct ipoib_neigh *neigh, *tn; | 410 | struct ipoib_neigh *neigh, *tn; |
409 | struct sk_buff_head skqueue; | 411 | struct sk_buff_head skqueue; |
410 | struct sk_buff *skb; | 412 | struct sk_buff *skb; |
@@ -428,12 +430,12 @@ static void path_rec_completion(int status, | |||
428 | 430 | ||
429 | spin_lock_irqsave(&priv->lock, flags); | 431 | spin_lock_irqsave(&priv->lock, flags); |
430 | 432 | ||
431 | old_ah = path->ah; | ||
432 | path->ah = ah; | ||
433 | |||
434 | if (ah) { | 433 | if (ah) { |
435 | path->pathrec = *pathrec; | 434 | path->pathrec = *pathrec; |
436 | 435 | ||
436 | old_ah = path->ah; | ||
437 | path->ah = ah; | ||
438 | |||
437 | ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n", | 439 | ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n", |
438 | ah, be16_to_cpu(pathrec->dlid), pathrec->sl); | 440 | ah, be16_to_cpu(pathrec->dlid), pathrec->sl); |
439 | 441 | ||
@@ -555,6 +557,7 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev) | |||
555 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 557 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
556 | struct ipoib_path *path; | 558 | struct ipoib_path *path; |
557 | struct ipoib_neigh *neigh; | 559 | struct ipoib_neigh *neigh; |
560 | unsigned long flags; | ||
558 | 561 | ||
559 | neigh = ipoib_neigh_alloc(skb->dst->neighbour, skb->dev); | 562 | neigh = ipoib_neigh_alloc(skb->dst->neighbour, skb->dev); |
560 | if (!neigh) { | 563 | if (!neigh) { |
@@ -563,11 +566,7 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev) | |||
563 | return; | 566 | return; |
564 | } | 567 | } |
565 | 568 | ||
566 | /* | 569 | spin_lock_irqsave(&priv->lock, flags); |
567 | * We can only be called from ipoib_start_xmit, so we're | ||
568 | * inside tx_lock -- no need to save/restore flags. | ||
569 | */ | ||
570 | spin_lock(&priv->lock); | ||
571 | 570 | ||
572 | path = __path_find(dev, skb->dst->neighbour->ha + 4); | 571 | path = __path_find(dev, skb->dst->neighbour->ha + 4); |
573 | if (!path) { | 572 | if (!path) { |
@@ -614,7 +613,7 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev) | |||
614 | __skb_queue_tail(&neigh->queue, skb); | 613 | __skb_queue_tail(&neigh->queue, skb); |
615 | } | 614 | } |
616 | 615 | ||
617 | spin_unlock(&priv->lock); | 616 | spin_unlock_irqrestore(&priv->lock, flags); |
618 | return; | 617 | return; |
619 | 618 | ||
620 | err_list: | 619 | err_list: |
@@ -626,7 +625,7 @@ err_drop: | |||
626 | ++dev->stats.tx_dropped; | 625 | ++dev->stats.tx_dropped; |
627 | dev_kfree_skb_any(skb); | 626 | dev_kfree_skb_any(skb); |
628 | 627 | ||
629 | spin_unlock(&priv->lock); | 628 | spin_unlock_irqrestore(&priv->lock, flags); |
630 | } | 629 | } |
631 | 630 | ||
632 | static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev) | 631 | static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev) |
@@ -650,12 +649,9 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, | |||
650 | { | 649 | { |
651 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 650 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
652 | struct ipoib_path *path; | 651 | struct ipoib_path *path; |
652 | unsigned long flags; | ||
653 | 653 | ||
654 | /* | 654 | spin_lock_irqsave(&priv->lock, flags); |
655 | * We can only be called from ipoib_start_xmit, so we're | ||
656 | * inside tx_lock -- no need to save/restore flags. | ||
657 | */ | ||
658 | spin_lock(&priv->lock); | ||
659 | 655 | ||
660 | path = __path_find(dev, phdr->hwaddr + 4); | 656 | path = __path_find(dev, phdr->hwaddr + 4); |
661 | if (!path || !path->valid) { | 657 | if (!path || !path->valid) { |
@@ -667,7 +663,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, | |||
667 | __skb_queue_tail(&path->queue, skb); | 663 | __skb_queue_tail(&path->queue, skb); |
668 | 664 | ||
669 | if (path_rec_start(dev, path)) { | 665 | if (path_rec_start(dev, path)) { |
670 | spin_unlock(&priv->lock); | 666 | spin_unlock_irqrestore(&priv->lock, flags); |
671 | path_free(dev, path); | 667 | path_free(dev, path); |
672 | return; | 668 | return; |
673 | } else | 669 | } else |
@@ -677,7 +673,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, | |||
677 | dev_kfree_skb_any(skb); | 673 | dev_kfree_skb_any(skb); |
678 | } | 674 | } |
679 | 675 | ||
680 | spin_unlock(&priv->lock); | 676 | spin_unlock_irqrestore(&priv->lock, flags); |
681 | return; | 677 | return; |
682 | } | 678 | } |
683 | 679 | ||
@@ -696,7 +692,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, | |||
696 | dev_kfree_skb_any(skb); | 692 | dev_kfree_skb_any(skb); |
697 | } | 693 | } |
698 | 694 | ||
699 | spin_unlock(&priv->lock); | 695 | spin_unlock_irqrestore(&priv->lock, flags); |
700 | } | 696 | } |
701 | 697 | ||
702 | static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) | 698 | static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) |
@@ -705,13 +701,10 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
705 | struct ipoib_neigh *neigh; | 701 | struct ipoib_neigh *neigh; |
706 | unsigned long flags; | 702 | unsigned long flags; |
707 | 703 | ||
708 | if (unlikely(!spin_trylock_irqsave(&priv->tx_lock, flags))) | ||
709 | return NETDEV_TX_LOCKED; | ||
710 | |||
711 | if (likely(skb->dst && skb->dst->neighbour)) { | 704 | if (likely(skb->dst && skb->dst->neighbour)) { |
712 | if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) { | 705 | if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) { |
713 | ipoib_path_lookup(skb, dev); | 706 | ipoib_path_lookup(skb, dev); |
714 | goto out; | 707 | return NETDEV_TX_OK; |
715 | } | 708 | } |
716 | 709 | ||
717 | neigh = *to_ipoib_neigh(skb->dst->neighbour); | 710 | neigh = *to_ipoib_neigh(skb->dst->neighbour); |
@@ -721,7 +714,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
721 | skb->dst->neighbour->ha + 4, | 714 | skb->dst->neighbour->ha + 4, |
722 | sizeof(union ib_gid))) || | 715 | sizeof(union ib_gid))) || |
723 | (neigh->dev != dev))) { | 716 | (neigh->dev != dev))) { |
724 | spin_lock(&priv->lock); | 717 | spin_lock_irqsave(&priv->lock, flags); |
725 | /* | 718 | /* |
726 | * It's safe to call ipoib_put_ah() inside | 719 | * It's safe to call ipoib_put_ah() inside |
727 | * priv->lock here, because we know that | 720 | * priv->lock here, because we know that |
@@ -732,25 +725,25 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
732 | ipoib_put_ah(neigh->ah); | 725 | ipoib_put_ah(neigh->ah); |
733 | list_del(&neigh->list); | 726 | list_del(&neigh->list); |
734 | ipoib_neigh_free(dev, neigh); | 727 | ipoib_neigh_free(dev, neigh); |
735 | spin_unlock(&priv->lock); | 728 | spin_unlock_irqrestore(&priv->lock, flags); |
736 | ipoib_path_lookup(skb, dev); | 729 | ipoib_path_lookup(skb, dev); |
737 | goto out; | 730 | return NETDEV_TX_OK; |
738 | } | 731 | } |
739 | 732 | ||
740 | if (ipoib_cm_get(neigh)) { | 733 | if (ipoib_cm_get(neigh)) { |
741 | if (ipoib_cm_up(neigh)) { | 734 | if (ipoib_cm_up(neigh)) { |
742 | ipoib_cm_send(dev, skb, ipoib_cm_get(neigh)); | 735 | ipoib_cm_send(dev, skb, ipoib_cm_get(neigh)); |
743 | goto out; | 736 | return NETDEV_TX_OK; |
744 | } | 737 | } |
745 | } else if (neigh->ah) { | 738 | } else if (neigh->ah) { |
746 | ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(skb->dst->neighbour->ha)); | 739 | ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(skb->dst->neighbour->ha)); |
747 | goto out; | 740 | return NETDEV_TX_OK; |
748 | } | 741 | } |
749 | 742 | ||
750 | if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) { | 743 | if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) { |
751 | spin_lock(&priv->lock); | 744 | spin_lock_irqsave(&priv->lock, flags); |
752 | __skb_queue_tail(&neigh->queue, skb); | 745 | __skb_queue_tail(&neigh->queue, skb); |
753 | spin_unlock(&priv->lock); | 746 | spin_unlock_irqrestore(&priv->lock, flags); |
754 | } else { | 747 | } else { |
755 | ++dev->stats.tx_dropped; | 748 | ++dev->stats.tx_dropped; |
756 | dev_kfree_skb_any(skb); | 749 | dev_kfree_skb_any(skb); |
@@ -779,16 +772,13 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
779 | IPOIB_GID_RAW_ARG(phdr->hwaddr + 4)); | 772 | IPOIB_GID_RAW_ARG(phdr->hwaddr + 4)); |
780 | dev_kfree_skb_any(skb); | 773 | dev_kfree_skb_any(skb); |
781 | ++dev->stats.tx_dropped; | 774 | ++dev->stats.tx_dropped; |
782 | goto out; | 775 | return NETDEV_TX_OK; |
783 | } | 776 | } |
784 | 777 | ||
785 | unicast_arp_send(skb, dev, phdr); | 778 | unicast_arp_send(skb, dev, phdr); |
786 | } | 779 | } |
787 | } | 780 | } |
788 | 781 | ||
789 | out: | ||
790 | spin_unlock_irqrestore(&priv->tx_lock, flags); | ||
791 | |||
792 | return NETDEV_TX_OK; | 782 | return NETDEV_TX_OK; |
793 | } | 783 | } |
794 | 784 | ||
@@ -1052,7 +1042,6 @@ static void ipoib_setup(struct net_device *dev) | |||
1052 | dev->type = ARPHRD_INFINIBAND; | 1042 | dev->type = ARPHRD_INFINIBAND; |
1053 | dev->tx_queue_len = ipoib_sendq_size * 2; | 1043 | dev->tx_queue_len = ipoib_sendq_size * 2; |
1054 | dev->features = (NETIF_F_VLAN_CHALLENGED | | 1044 | dev->features = (NETIF_F_VLAN_CHALLENGED | |
1055 | NETIF_F_LLTX | | ||
1056 | NETIF_F_HIGHDMA); | 1045 | NETIF_F_HIGHDMA); |
1057 | 1046 | ||
1058 | memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN); | 1047 | memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN); |
@@ -1064,7 +1053,6 @@ static void ipoib_setup(struct net_device *dev) | |||
1064 | ipoib_lro_setup(priv); | 1053 | ipoib_lro_setup(priv); |
1065 | 1054 | ||
1066 | spin_lock_init(&priv->lock); | 1055 | spin_lock_init(&priv->lock); |
1067 | spin_lock_init(&priv->tx_lock); | ||
1068 | 1056 | ||
1069 | mutex_init(&priv->vlan_mutex); | 1057 | mutex_init(&priv->vlan_mutex); |
1070 | 1058 | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index aae28620a6e5..d9d1223c3fd5 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c | |||
@@ -69,14 +69,13 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast) | |||
69 | struct net_device *dev = mcast->dev; | 69 | struct net_device *dev = mcast->dev; |
70 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 70 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
71 | struct ipoib_neigh *neigh, *tmp; | 71 | struct ipoib_neigh *neigh, *tmp; |
72 | unsigned long flags; | ||
73 | int tx_dropped = 0; | 72 | int tx_dropped = 0; |
74 | 73 | ||
75 | ipoib_dbg_mcast(netdev_priv(dev), | 74 | ipoib_dbg_mcast(netdev_priv(dev), |
76 | "deleting multicast group " IPOIB_GID_FMT "\n", | 75 | "deleting multicast group " IPOIB_GID_FMT "\n", |
77 | IPOIB_GID_ARG(mcast->mcmember.mgid)); | 76 | IPOIB_GID_ARG(mcast->mcmember.mgid)); |
78 | 77 | ||
79 | spin_lock_irqsave(&priv->lock, flags); | 78 | spin_lock_irq(&priv->lock); |
80 | 79 | ||
81 | list_for_each_entry_safe(neigh, tmp, &mcast->neigh_list, list) { | 80 | list_for_each_entry_safe(neigh, tmp, &mcast->neigh_list, list) { |
82 | /* | 81 | /* |
@@ -90,7 +89,7 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast) | |||
90 | ipoib_neigh_free(dev, neigh); | 89 | ipoib_neigh_free(dev, neigh); |
91 | } | 90 | } |
92 | 91 | ||
93 | spin_unlock_irqrestore(&priv->lock, flags); | 92 | spin_unlock_irq(&priv->lock); |
94 | 93 | ||
95 | if (mcast->ah) | 94 | if (mcast->ah) |
96 | ipoib_put_ah(mcast->ah); | 95 | ipoib_put_ah(mcast->ah); |
@@ -100,9 +99,9 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast) | |||
100 | dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue)); | 99 | dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue)); |
101 | } | 100 | } |
102 | 101 | ||
103 | spin_lock_irqsave(&priv->tx_lock, flags); | 102 | netif_tx_lock_bh(dev); |
104 | dev->stats.tx_dropped += tx_dropped; | 103 | dev->stats.tx_dropped += tx_dropped; |
105 | spin_unlock_irqrestore(&priv->tx_lock, flags); | 104 | netif_tx_unlock_bh(dev); |
106 | 105 | ||
107 | kfree(mcast); | 106 | kfree(mcast); |
108 | } | 107 | } |
@@ -259,10 +258,10 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast, | |||
259 | } | 258 | } |
260 | 259 | ||
261 | /* actually send any queued packets */ | 260 | /* actually send any queued packets */ |
262 | spin_lock_irq(&priv->tx_lock); | 261 | netif_tx_lock_bh(dev); |
263 | while (!skb_queue_empty(&mcast->pkt_queue)) { | 262 | while (!skb_queue_empty(&mcast->pkt_queue)) { |
264 | struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue); | 263 | struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue); |
265 | spin_unlock_irq(&priv->tx_lock); | 264 | netif_tx_unlock_bh(dev); |
266 | 265 | ||
267 | skb->dev = dev; | 266 | skb->dev = dev; |
268 | 267 | ||
@@ -273,9 +272,9 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast, | |||
273 | 272 | ||
274 | if (dev_queue_xmit(skb)) | 273 | if (dev_queue_xmit(skb)) |
275 | ipoib_warn(priv, "dev_queue_xmit failed to requeue packet\n"); | 274 | ipoib_warn(priv, "dev_queue_xmit failed to requeue packet\n"); |
276 | spin_lock_irq(&priv->tx_lock); | 275 | netif_tx_lock_bh(dev); |
277 | } | 276 | } |
278 | spin_unlock_irq(&priv->tx_lock); | 277 | netif_tx_unlock_bh(dev); |
279 | 278 | ||
280 | return 0; | 279 | return 0; |
281 | } | 280 | } |
@@ -286,7 +285,6 @@ ipoib_mcast_sendonly_join_complete(int status, | |||
286 | { | 285 | { |
287 | struct ipoib_mcast *mcast = multicast->context; | 286 | struct ipoib_mcast *mcast = multicast->context; |
288 | struct net_device *dev = mcast->dev; | 287 | struct net_device *dev = mcast->dev; |
289 | struct ipoib_dev_priv *priv = netdev_priv(dev); | ||
290 | 288 | ||
291 | /* We trap for port events ourselves. */ | 289 | /* We trap for port events ourselves. */ |
292 | if (status == -ENETRESET) | 290 | if (status == -ENETRESET) |
@@ -302,12 +300,12 @@ ipoib_mcast_sendonly_join_complete(int status, | |||
302 | IPOIB_GID_ARG(mcast->mcmember.mgid), status); | 300 | IPOIB_GID_ARG(mcast->mcmember.mgid), status); |
303 | 301 | ||
304 | /* Flush out any queued packets */ | 302 | /* Flush out any queued packets */ |
305 | spin_lock_irq(&priv->tx_lock); | 303 | netif_tx_lock_bh(dev); |
306 | while (!skb_queue_empty(&mcast->pkt_queue)) { | 304 | while (!skb_queue_empty(&mcast->pkt_queue)) { |
307 | ++dev->stats.tx_dropped; | 305 | ++dev->stats.tx_dropped; |
308 | dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue)); | 306 | dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue)); |
309 | } | 307 | } |
310 | spin_unlock_irq(&priv->tx_lock); | 308 | netif_tx_unlock_bh(dev); |
311 | 309 | ||
312 | /* Clear the busy flag so we try again */ | 310 | /* Clear the busy flag so we try again */ |
313 | status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, | 311 | status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, |
@@ -662,12 +660,9 @@ void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb) | |||
662 | { | 660 | { |
663 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 661 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
664 | struct ipoib_mcast *mcast; | 662 | struct ipoib_mcast *mcast; |
663 | unsigned long flags; | ||
665 | 664 | ||
666 | /* | 665 | spin_lock_irqsave(&priv->lock, flags); |
667 | * We can only be called from ipoib_start_xmit, so we're | ||
668 | * inside tx_lock -- no need to save/restore flags. | ||
669 | */ | ||
670 | spin_lock(&priv->lock); | ||
671 | 666 | ||
672 | if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags) || | 667 | if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags) || |
673 | !priv->broadcast || | 668 | !priv->broadcast || |
@@ -738,7 +733,7 @@ out: | |||
738 | } | 733 | } |
739 | 734 | ||
740 | unlock: | 735 | unlock: |
741 | spin_unlock(&priv->lock); | 736 | spin_unlock_irqrestore(&priv->lock, flags); |
742 | } | 737 | } |
743 | 738 | ||
744 | void ipoib_mcast_dev_flush(struct net_device *dev) | 739 | void ipoib_mcast_dev_flush(struct net_device *dev) |