aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRoland Dreier <roland@purestorage.com>2015-01-30 18:38:55 -0500
committerRoland Dreier <roland@purestorage.com>2015-01-30 18:38:55 -0500
commit0306eda2269f961d77c09b1badc0031438c21f44 (patch)
treec9af7b5686f9e831d6d58dae9ce7c41df86b97dc
parent4e0ab200fa4594d16bb7fc697ec4d9edfb5a43f0 (diff)
Revert "IPoIB: Use dedicated workqueues per interface"
This reverts commit 5141861cd5e17eac9676ff49c5abfafbea2b0e98. The series of IPoIB bug fixes that went into 3.19-rc1 introduce regressions, and after trying to sort things out, we decided to revert to 3.18's IPoIB driver and get things right for 3.20. Signed-off-by: Roland Dreier <roland@purestorage.com>
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c18
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c6
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c19
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c26
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c22
6 files changed, 34 insertions, 58 deletions
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 45fd10a72ec1..f4c1b20b23b2 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -323,7 +323,6 @@ struct ipoib_dev_priv {
323 struct list_head multicast_list; 323 struct list_head multicast_list;
324 struct rb_root multicast_tree; 324 struct rb_root multicast_tree;
325 325
326 struct workqueue_struct *wq;
327 struct delayed_work mcast_task; 326 struct delayed_work mcast_task;
328 struct work_struct carrier_on_task; 327 struct work_struct carrier_on_task;
329 struct work_struct flush_light; 328 struct work_struct flush_light;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 56959adb6c7d..933efcea0d03 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -474,7 +474,7 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
474 } 474 }
475 475
476 spin_lock_irq(&priv->lock); 476 spin_lock_irq(&priv->lock);
477 queue_delayed_work(priv->wq, 477 queue_delayed_work(ipoib_workqueue,
478 &priv->cm.stale_task, IPOIB_CM_RX_DELAY); 478 &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
479 /* Add this entry to passive ids list head, but do not re-add it 479 /* Add this entry to passive ids list head, but do not re-add it
480 * if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */ 480 * if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */
@@ -576,7 +576,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
576 spin_lock_irqsave(&priv->lock, flags); 576 spin_lock_irqsave(&priv->lock, flags);
577 list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list); 577 list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list);
578 ipoib_cm_start_rx_drain(priv); 578 ipoib_cm_start_rx_drain(priv);
579 queue_work(priv->wq, &priv->cm.rx_reap_task); 579 queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
580 spin_unlock_irqrestore(&priv->lock, flags); 580 spin_unlock_irqrestore(&priv->lock, flags);
581 } else 581 } else
582 ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n", 582 ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n",
@@ -603,7 +603,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
603 spin_lock_irqsave(&priv->lock, flags); 603 spin_lock_irqsave(&priv->lock, flags);
604 list_move(&p->list, &priv->cm.rx_reap_list); 604 list_move(&p->list, &priv->cm.rx_reap_list);
605 spin_unlock_irqrestore(&priv->lock, flags); 605 spin_unlock_irqrestore(&priv->lock, flags);
606 queue_work(priv->wq, &priv->cm.rx_reap_task); 606 queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
607 } 607 }
608 return; 608 return;
609 } 609 }
@@ -827,7 +827,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
827 827
828 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { 828 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
829 list_move(&tx->list, &priv->cm.reap_list); 829 list_move(&tx->list, &priv->cm.reap_list);
830 queue_work(priv->wq, &priv->cm.reap_task); 830 queue_work(ipoib_workqueue, &priv->cm.reap_task);
831 } 831 }
832 832
833 clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags); 833 clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags);
@@ -1255,7 +1255,7 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
1255 1255
1256 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { 1256 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
1257 list_move(&tx->list, &priv->cm.reap_list); 1257 list_move(&tx->list, &priv->cm.reap_list);
1258 queue_work(priv->wq, &priv->cm.reap_task); 1258 queue_work(ipoib_workqueue, &priv->cm.reap_task);
1259 } 1259 }
1260 1260
1261 spin_unlock_irqrestore(&priv->lock, flags); 1261 spin_unlock_irqrestore(&priv->lock, flags);
@@ -1284,7 +1284,7 @@ struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path
1284 tx->dev = dev; 1284 tx->dev = dev;
1285 list_add(&tx->list, &priv->cm.start_list); 1285 list_add(&tx->list, &priv->cm.start_list);
1286 set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags); 1286 set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
1287 queue_work(priv->wq, &priv->cm.start_task); 1287 queue_work(ipoib_workqueue, &priv->cm.start_task);
1288 return tx; 1288 return tx;
1289} 1289}
1290 1290
@@ -1295,7 +1295,7 @@ void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
1295 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { 1295 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
1296 spin_lock_irqsave(&priv->lock, flags); 1296 spin_lock_irqsave(&priv->lock, flags);
1297 list_move(&tx->list, &priv->cm.reap_list); 1297 list_move(&tx->list, &priv->cm.reap_list);
1298 queue_work(priv->wq, &priv->cm.reap_task); 1298 queue_work(ipoib_workqueue, &priv->cm.reap_task);
1299 ipoib_dbg(priv, "Reap connection for gid %pI6\n", 1299 ipoib_dbg(priv, "Reap connection for gid %pI6\n",
1300 tx->neigh->daddr + 4); 1300 tx->neigh->daddr + 4);
1301 tx->neigh = NULL; 1301 tx->neigh = NULL;
@@ -1417,7 +1417,7 @@ void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
1417 1417
1418 skb_queue_tail(&priv->cm.skb_queue, skb); 1418 skb_queue_tail(&priv->cm.skb_queue, skb);
1419 if (e) 1419 if (e)
1420 queue_work(priv->wq, &priv->cm.skb_task); 1420 queue_work(ipoib_workqueue, &priv->cm.skb_task);
1421} 1421}
1422 1422
1423static void ipoib_cm_rx_reap(struct work_struct *work) 1423static void ipoib_cm_rx_reap(struct work_struct *work)
@@ -1450,7 +1450,7 @@ static void ipoib_cm_stale_task(struct work_struct *work)
1450 } 1450 }
1451 1451
1452 if (!list_empty(&priv->cm.passive_ids)) 1452 if (!list_empty(&priv->cm.passive_ids))
1453 queue_delayed_work(priv->wq, 1453 queue_delayed_work(ipoib_workqueue,
1454 &priv->cm.stale_task, IPOIB_CM_RX_DELAY); 1454 &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
1455 spin_unlock_irq(&priv->lock); 1455 spin_unlock_irq(&priv->lock);
1456} 1456}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index bfd17d41b5f2..72626c348174 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -655,7 +655,7 @@ void ipoib_reap_ah(struct work_struct *work)
655 __ipoib_reap_ah(dev); 655 __ipoib_reap_ah(dev);
656 656
657 if (!test_bit(IPOIB_STOP_REAPER, &priv->flags)) 657 if (!test_bit(IPOIB_STOP_REAPER, &priv->flags))
658 queue_delayed_work(priv->wq, &priv->ah_reap_task, 658 queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
659 round_jiffies_relative(HZ)); 659 round_jiffies_relative(HZ));
660} 660}
661 661
@@ -696,7 +696,7 @@ int ipoib_ib_dev_open(struct net_device *dev, int flush)
696 } 696 }
697 697
698 clear_bit(IPOIB_STOP_REAPER, &priv->flags); 698 clear_bit(IPOIB_STOP_REAPER, &priv->flags);
699 queue_delayed_work(priv->wq, &priv->ah_reap_task, 699 queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
700 round_jiffies_relative(HZ)); 700 round_jiffies_relative(HZ));
701 701
702 if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) 702 if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
@@ -881,7 +881,7 @@ timeout:
881 set_bit(IPOIB_STOP_REAPER, &priv->flags); 881 set_bit(IPOIB_STOP_REAPER, &priv->flags);
882 cancel_delayed_work(&priv->ah_reap_task); 882 cancel_delayed_work(&priv->ah_reap_task);
883 if (flush) 883 if (flush)
884 flush_workqueue(priv->wq); 884 flush_workqueue(ipoib_workqueue);
885 885
886 begin = jiffies; 886 begin = jiffies;
887 887
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 42e5c278f489..2cf81ef51412 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -839,7 +839,7 @@ static void ipoib_set_mcast_list(struct net_device *dev)
839 return; 839 return;
840 } 840 }
841 841
842 queue_work(priv->wq, &priv->restart_task); 842 queue_work(ipoib_workqueue, &priv->restart_task);
843} 843}
844 844
845static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr) 845static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr)
@@ -954,7 +954,7 @@ static void ipoib_reap_neigh(struct work_struct *work)
954 __ipoib_reap_neigh(priv); 954 __ipoib_reap_neigh(priv);
955 955
956 if (!test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags)) 956 if (!test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
957 queue_delayed_work(priv->wq, &priv->neigh_reap_task, 957 queue_delayed_work(ipoib_workqueue, &priv->neigh_reap_task,
958 arp_tbl.gc_interval); 958 arp_tbl.gc_interval);
959} 959}
960 960
@@ -1133,7 +1133,7 @@ static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv)
1133 1133
1134 /* start garbage collection */ 1134 /* start garbage collection */
1135 clear_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); 1135 clear_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
1136 queue_delayed_work(priv->wq, &priv->neigh_reap_task, 1136 queue_delayed_work(ipoib_workqueue, &priv->neigh_reap_task,
1137 arp_tbl.gc_interval); 1137 arp_tbl.gc_interval);
1138 1138
1139 return 0; 1139 return 0;
@@ -1293,7 +1293,7 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
1293 return 0; 1293 return 0;
1294 1294
1295out_dev_uninit: 1295out_dev_uninit:
1296 ipoib_ib_dev_cleanup(dev); 1296 ipoib_ib_dev_cleanup();
1297 1297
1298out_tx_ring_cleanup: 1298out_tx_ring_cleanup:
1299 vfree(priv->tx_ring); 1299 vfree(priv->tx_ring);
@@ -1646,7 +1646,7 @@ register_failed:
1646 /* Stop GC if started before flush */ 1646 /* Stop GC if started before flush */
1647 set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); 1647 set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
1648 cancel_delayed_work(&priv->neigh_reap_task); 1648 cancel_delayed_work(&priv->neigh_reap_task);
1649 flush_workqueue(priv->wq); 1649 flush_workqueue(ipoib_workqueue);
1650 1650
1651event_failed: 1651event_failed:
1652 ipoib_dev_cleanup(priv->dev); 1652 ipoib_dev_cleanup(priv->dev);
@@ -1717,7 +1717,7 @@ static void ipoib_remove_one(struct ib_device *device)
1717 /* Stop GC */ 1717 /* Stop GC */
1718 set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); 1718 set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
1719 cancel_delayed_work(&priv->neigh_reap_task); 1719 cancel_delayed_work(&priv->neigh_reap_task);
1720 flush_workqueue(priv->wq); 1720 flush_workqueue(ipoib_workqueue);
1721 1721
1722 unregister_netdev(priv->dev); 1722 unregister_netdev(priv->dev);
1723 free_netdev(priv->dev); 1723 free_netdev(priv->dev);
@@ -1758,13 +1758,8 @@ static int __init ipoib_init_module(void)
1758 * unregister_netdev() and linkwatch_event take the rtnl lock, 1758 * unregister_netdev() and linkwatch_event take the rtnl lock,
1759 * so flush_scheduled_work() can deadlock during device 1759 * so flush_scheduled_work() can deadlock during device
1760 * removal. 1760 * removal.
1761 *
1762 * In addition, bringing one device up and another down at the
1763 * same time can deadlock a single workqueue, so we have this
1764 * global fallback workqueue, but we also attempt to open a
1765 * per device workqueue each time we bring an interface up
1766 */ 1761 */
1767 ipoib_workqueue = create_singlethread_workqueue("ipoib_flush"); 1762 ipoib_workqueue = create_singlethread_workqueue("ipoib");
1768 if (!ipoib_workqueue) { 1763 if (!ipoib_workqueue) {
1769 ret = -ENOMEM; 1764 ret = -ENOMEM;
1770 goto err_fs; 1765 goto err_fs;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 845f910eb214..41325960e4e0 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -388,7 +388,7 @@ void ipoib_mcast_carrier_on_task(struct work_struct *work)
388 * the workqueue while holding the rtnl lock, so loop 388 * the workqueue while holding the rtnl lock, so loop
389 * on trylock until either we get the lock or we see 389 * on trylock until either we get the lock or we see
390 * FLAG_ADMIN_UP go away as that signals that we are bailing 390 * FLAG_ADMIN_UP go away as that signals that we are bailing
391 * and can safely ignore the carrier on work. 391 * and can safely ignore the carrier on work
392 */ 392 */
393 while (!rtnl_trylock()) { 393 while (!rtnl_trylock()) {
394 if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) 394 if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
@@ -432,14 +432,15 @@ static int ipoib_mcast_join_complete(int status,
432 if (!status) { 432 if (!status) {
433 mcast->backoff = 1; 433 mcast->backoff = 1;
434 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) 434 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
435 queue_delayed_work(priv->wq, &priv->mcast_task, 0); 435 queue_delayed_work(ipoib_workqueue,
436 &priv->mcast_task, 0);
436 437
437 /* 438 /*
438 * Defer carrier on work to priv->wq to avoid a 439 * Defer carrier on work to ipoib_workqueue to avoid a
439 * deadlock on rtnl_lock here. 440 * deadlock on rtnl_lock here.
440 */ 441 */
441 if (mcast == priv->broadcast) 442 if (mcast == priv->broadcast)
442 queue_work(priv->wq, &priv->carrier_on_task); 443 queue_work(ipoib_workqueue, &priv->carrier_on_task);
443 } else { 444 } else {
444 if (mcast->logcount++ < 20) { 445 if (mcast->logcount++ < 20) {
445 if (status == -ETIMEDOUT || status == -EAGAIN) { 446 if (status == -ETIMEDOUT || status == -EAGAIN) {
@@ -464,7 +465,7 @@ out:
464 if (status == -ENETRESET) 465 if (status == -ENETRESET)
465 status = 0; 466 status = 0;
466 if (status && test_bit(IPOIB_MCAST_RUN, &priv->flags)) 467 if (status && test_bit(IPOIB_MCAST_RUN, &priv->flags))
467 queue_delayed_work(priv->wq, &priv->mcast_task, 468 queue_delayed_work(ipoib_workqueue, &priv->mcast_task,
468 mcast->backoff * HZ); 469 mcast->backoff * HZ);
469 spin_unlock_irq(&priv->lock); 470 spin_unlock_irq(&priv->lock);
470 mutex_unlock(&mcast_mutex); 471 mutex_unlock(&mcast_mutex);
@@ -534,7 +535,8 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
534 mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS; 535 mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
535 536
536 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) 537 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
537 queue_delayed_work(priv->wq, &priv->mcast_task, 538 queue_delayed_work(ipoib_workqueue,
539 &priv->mcast_task,
538 mcast->backoff * HZ); 540 mcast->backoff * HZ);
539 } 541 }
540 mutex_unlock(&mcast_mutex); 542 mutex_unlock(&mcast_mutex);
@@ -574,8 +576,8 @@ void ipoib_mcast_join_task(struct work_struct *work)
574 ipoib_warn(priv, "failed to allocate broadcast group\n"); 576 ipoib_warn(priv, "failed to allocate broadcast group\n");
575 mutex_lock(&mcast_mutex); 577 mutex_lock(&mcast_mutex);
576 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) 578 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
577 queue_delayed_work(priv->wq, &priv->mcast_task, 579 queue_delayed_work(ipoib_workqueue,
578 HZ); 580 &priv->mcast_task, HZ);
579 mutex_unlock(&mcast_mutex); 581 mutex_unlock(&mcast_mutex);
580 return; 582 return;
581 } 583 }
@@ -642,7 +644,7 @@ int ipoib_mcast_start_thread(struct net_device *dev)
642 644
643 mutex_lock(&mcast_mutex); 645 mutex_lock(&mcast_mutex);
644 if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags)) 646 if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))
645 queue_delayed_work(priv->wq, &priv->mcast_task, 0); 647 queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0);
646 mutex_unlock(&mcast_mutex); 648 mutex_unlock(&mcast_mutex);
647 649
648 return 0; 650 return 0;
@@ -660,7 +662,7 @@ int ipoib_mcast_stop_thread(struct net_device *dev, int flush)
660 mutex_unlock(&mcast_mutex); 662 mutex_unlock(&mcast_mutex);
661 663
662 if (flush) 664 if (flush)
663 flush_workqueue(priv->wq); 665 flush_workqueue(ipoib_workqueue);
664 666
665 return 0; 667 return 0;
666} 668}
@@ -727,7 +729,7 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
727 __ipoib_mcast_add(dev, mcast); 729 __ipoib_mcast_add(dev, mcast);
728 list_add_tail(&mcast->list, &priv->multicast_list); 730 list_add_tail(&mcast->list, &priv->multicast_list);
729 if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags)) 731 if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))
730 queue_delayed_work(priv->wq, &priv->mcast_task, 0); 732 queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0);
731 } 733 }
732 734
733 if (!mcast->ah) { 735 if (!mcast->ah) {
@@ -942,7 +944,7 @@ void ipoib_mcast_restart_task(struct work_struct *work)
942 * completes. So do like the carrier on task and attempt to 944 * completes. So do like the carrier on task and attempt to
943 * take the rtnl lock, but if we can't before the ADMIN_UP flag 945 * take the rtnl lock, but if we can't before the ADMIN_UP flag
944 * goes away, then just return and know that the remove list will 946 * goes away, then just return and know that the remove list will
945 * get flushed later by mcast_stop_thread. 947 * get flushed later by mcast_dev_flush.
946 */ 948 */
947 while (!rtnl_trylock()) { 949 while (!rtnl_trylock()) {
948 if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) 950 if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index b72a753eb41d..c56d5d44c53b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -145,20 +145,10 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
145 int ret, size; 145 int ret, size;
146 int i; 146 int i;
147 147
148 /*
149 * the various IPoIB tasks assume they will never race against
150 * themselves, so always use a single thread workqueue
151 */
152 priv->wq = create_singlethread_workqueue("ipoib_wq");
153 if (!priv->wq) {
154 printk(KERN_WARNING "ipoib: failed to allocate device WQ\n");
155 return -ENODEV;
156 }
157
158 priv->pd = ib_alloc_pd(priv->ca); 148 priv->pd = ib_alloc_pd(priv->ca);
159 if (IS_ERR(priv->pd)) { 149 if (IS_ERR(priv->pd)) {
160 printk(KERN_WARNING "%s: failed to allocate PD\n", ca->name); 150 printk(KERN_WARNING "%s: failed to allocate PD\n", ca->name);
161 goto out_free_wq; 151 return -ENODEV;
162 } 152 }
163 153
164 priv->mr = ib_get_dma_mr(priv->pd, IB_ACCESS_LOCAL_WRITE); 154 priv->mr = ib_get_dma_mr(priv->pd, IB_ACCESS_LOCAL_WRITE);
@@ -252,10 +242,6 @@ out_free_mr:
252 242
253out_free_pd: 243out_free_pd:
254 ib_dealloc_pd(priv->pd); 244 ib_dealloc_pd(priv->pd);
255
256out_free_wq:
257 destroy_workqueue(priv->wq);
258 priv->wq = NULL;
259 return -ENODEV; 245 return -ENODEV;
260} 246}
261 247
@@ -284,12 +270,6 @@ void ipoib_transport_dev_cleanup(struct net_device *dev)
284 270
285 if (ib_dealloc_pd(priv->pd)) 271 if (ib_dealloc_pd(priv->pd))
286 ipoib_warn(priv, "ib_dealloc_pd failed\n"); 272 ipoib_warn(priv, "ib_dealloc_pd failed\n");
287
288 if (priv->wq) {
289 flush_workqueue(priv->wq);
290 destroy_workqueue(priv->wq);
291 priv->wq = NULL;
292 }
293} 273}
294 274
295void ipoib_event(struct ib_event_handler *handler, 275void ipoib_event(struct ib_event_handler *handler,