diff options
| -rw-r--r-- | drivers/infiniband/core/cache.c | 4 | ||||
| -rw-r--r-- | drivers/infiniband/core/device.c | 11 | ||||
| -rw-r--r-- | drivers/infiniband/core/sa_query.c | 2 | ||||
| -rw-r--r-- | drivers/infiniband/core/umem.c | 2 | ||||
| -rw-r--r-- | drivers/infiniband/hw/ipath/ipath_driver.c | 2 | ||||
| -rw-r--r-- | drivers/infiniband/hw/ipath/ipath_user_pages.c | 2 | ||||
| -rw-r--r-- | drivers/infiniband/hw/qib/qib_iba7220.c | 7 | ||||
| -rw-r--r-- | drivers/infiniband/hw/qib/qib_iba7322.c | 14 | ||||
| -rw-r--r-- | drivers/infiniband/hw/qib/qib_init.c | 26 | ||||
| -rw-r--r-- | drivers/infiniband/hw/qib/qib_qsfp.c | 9 | ||||
| -rw-r--r-- | drivers/infiniband/hw/qib/qib_verbs.h | 3 | ||||
| -rw-r--r-- | drivers/infiniband/ulp/srp/ib_srp.c | 4 | ||||
| -rw-r--r-- | include/rdma/ib_verbs.h | 3 |
13 files changed, 39 insertions, 50 deletions
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index 68883565b725..f9ba7d74dfc0 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c | |||
| @@ -308,7 +308,7 @@ static void ib_cache_event(struct ib_event_handler *handler, | |||
| 308 | INIT_WORK(&work->work, ib_cache_task); | 308 | INIT_WORK(&work->work, ib_cache_task); |
| 309 | work->device = event->device; | 309 | work->device = event->device; |
| 310 | work->port_num = event->element.port_num; | 310 | work->port_num = event->element.port_num; |
| 311 | schedule_work(&work->work); | 311 | queue_work(ib_wq, &work->work); |
| 312 | } | 312 | } |
| 313 | } | 313 | } |
| 314 | } | 314 | } |
| @@ -368,7 +368,7 @@ static void ib_cache_cleanup_one(struct ib_device *device) | |||
| 368 | int p; | 368 | int p; |
| 369 | 369 | ||
| 370 | ib_unregister_event_handler(&device->cache.event_handler); | 370 | ib_unregister_event_handler(&device->cache.event_handler); |
| 371 | flush_scheduled_work(); | 371 | flush_workqueue(ib_wq); |
| 372 | 372 | ||
| 373 | for (p = 0; p <= end_port(device) - start_port(device); ++p) { | 373 | for (p = 0; p <= end_port(device) - start_port(device); ++p) { |
| 374 | kfree(device->cache.pkey_cache[p]); | 374 | kfree(device->cache.pkey_cache[p]); |
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index a19effad0811..f793bf2f5da7 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c | |||
| @@ -38,7 +38,6 @@ | |||
| 38 | #include <linux/slab.h> | 38 | #include <linux/slab.h> |
| 39 | #include <linux/init.h> | 39 | #include <linux/init.h> |
| 40 | #include <linux/mutex.h> | 40 | #include <linux/mutex.h> |
| 41 | #include <linux/workqueue.h> | ||
| 42 | 41 | ||
| 43 | #include "core_priv.h" | 42 | #include "core_priv.h" |
| 44 | 43 | ||
| @@ -52,6 +51,9 @@ struct ib_client_data { | |||
| 52 | void * data; | 51 | void * data; |
| 53 | }; | 52 | }; |
| 54 | 53 | ||
| 54 | struct workqueue_struct *ib_wq; | ||
| 55 | EXPORT_SYMBOL_GPL(ib_wq); | ||
| 56 | |||
| 55 | static LIST_HEAD(device_list); | 57 | static LIST_HEAD(device_list); |
| 56 | static LIST_HEAD(client_list); | 58 | static LIST_HEAD(client_list); |
| 57 | 59 | ||
| @@ -718,6 +720,10 @@ static int __init ib_core_init(void) | |||
| 718 | { | 720 | { |
| 719 | int ret; | 721 | int ret; |
| 720 | 722 | ||
| 723 | ib_wq = alloc_workqueue("infiniband", 0, 0); | ||
| 724 | if (!ib_wq) | ||
| 725 | return -ENOMEM; | ||
| 726 | |||
| 721 | ret = ib_sysfs_setup(); | 727 | ret = ib_sysfs_setup(); |
| 722 | if (ret) | 728 | if (ret) |
| 723 | printk(KERN_WARNING "Couldn't create InfiniBand device class\n"); | 729 | printk(KERN_WARNING "Couldn't create InfiniBand device class\n"); |
| @@ -726,6 +732,7 @@ static int __init ib_core_init(void) | |||
| 726 | if (ret) { | 732 | if (ret) { |
| 727 | printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n"); | 733 | printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n"); |
| 728 | ib_sysfs_cleanup(); | 734 | ib_sysfs_cleanup(); |
| 735 | destroy_workqueue(ib_wq); | ||
| 729 | } | 736 | } |
| 730 | 737 | ||
| 731 | return ret; | 738 | return ret; |
| @@ -736,7 +743,7 @@ static void __exit ib_core_cleanup(void) | |||
| 736 | ib_cache_cleanup(); | 743 | ib_cache_cleanup(); |
| 737 | ib_sysfs_cleanup(); | 744 | ib_sysfs_cleanup(); |
| 738 | /* Make sure that any pending umem accounting work is done. */ | 745 | /* Make sure that any pending umem accounting work is done. */ |
| 739 | flush_scheduled_work(); | 746 | destroy_workqueue(ib_wq); |
| 740 | } | 747 | } |
| 741 | 748 | ||
| 742 | module_init(ib_core_init); | 749 | module_init(ib_core_init); |
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index 91a660310b7c..e38be1bcc01c 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c | |||
| @@ -425,7 +425,7 @@ static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event | |||
| 425 | port->sm_ah = NULL; | 425 | port->sm_ah = NULL; |
| 426 | spin_unlock_irqrestore(&port->ah_lock, flags); | 426 | spin_unlock_irqrestore(&port->ah_lock, flags); |
| 427 | 427 | ||
| 428 | schedule_work(&sa_dev->port[event->element.port_num - | 428 | queue_work(ib_wq, &sa_dev->port[event->element.port_num - |
| 429 | sa_dev->start_port].update_task); | 429 | sa_dev->start_port].update_task); |
| 430 | } | 430 | } |
| 431 | } | 431 | } |
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 415e186eee32..b645e558876f 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c | |||
| @@ -262,7 +262,7 @@ void ib_umem_release(struct ib_umem *umem) | |||
| 262 | umem->mm = mm; | 262 | umem->mm = mm; |
| 263 | umem->diff = diff; | 263 | umem->diff = diff; |
| 264 | 264 | ||
| 265 | schedule_work(&umem->work); | 265 | queue_work(ib_wq, &umem->work); |
| 266 | return; | 266 | return; |
| 267 | } | 267 | } |
| 268 | } else | 268 | } else |
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c index ae92da2d3f56..47db4bf34628 100644 --- a/drivers/infiniband/hw/ipath/ipath_driver.c +++ b/drivers/infiniband/hw/ipath/ipath_driver.c | |||
| @@ -755,7 +755,7 @@ static void __devexit ipath_remove_one(struct pci_dev *pdev) | |||
| 755 | */ | 755 | */ |
| 756 | ipath_shutdown_device(dd); | 756 | ipath_shutdown_device(dd); |
| 757 | 757 | ||
| 758 | flush_scheduled_work(); | 758 | flush_workqueue(ib_wq); |
| 759 | 759 | ||
| 760 | if (dd->verbs_dev) | 760 | if (dd->verbs_dev) |
| 761 | ipath_unregister_ib_device(dd->verbs_dev); | 761 | ipath_unregister_ib_device(dd->verbs_dev); |
diff --git a/drivers/infiniband/hw/ipath/ipath_user_pages.c b/drivers/infiniband/hw/ipath/ipath_user_pages.c index 5e86d73eba2a..bab9f74c0665 100644 --- a/drivers/infiniband/hw/ipath/ipath_user_pages.c +++ b/drivers/infiniband/hw/ipath/ipath_user_pages.c | |||
| @@ -220,7 +220,7 @@ void ipath_release_user_pages_on_close(struct page **p, size_t num_pages) | |||
| 220 | work->mm = mm; | 220 | work->mm = mm; |
| 221 | work->num_pages = num_pages; | 221 | work->num_pages = num_pages; |
| 222 | 222 | ||
| 223 | schedule_work(&work->work); | 223 | queue_work(ib_wq, &work->work); |
| 224 | return; | 224 | return; |
| 225 | 225 | ||
| 226 | bail_mm: | 226 | bail_mm: |
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c index 127a0d5069f0..de799f17cb9e 100644 --- a/drivers/infiniband/hw/qib/qib_iba7220.c +++ b/drivers/infiniband/hw/qib/qib_iba7220.c | |||
| @@ -1692,8 +1692,7 @@ static void qib_7220_quiet_serdes(struct qib_pportdata *ppd) | |||
| 1692 | ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG; | 1692 | ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG; |
| 1693 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | 1693 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); |
| 1694 | wake_up(&ppd->cpspec->autoneg_wait); | 1694 | wake_up(&ppd->cpspec->autoneg_wait); |
| 1695 | cancel_delayed_work(&ppd->cpspec->autoneg_work); | 1695 | cancel_delayed_work_sync(&ppd->cpspec->autoneg_work); |
| 1696 | flush_scheduled_work(); | ||
| 1697 | 1696 | ||
| 1698 | shutdown_7220_relock_poll(ppd->dd); | 1697 | shutdown_7220_relock_poll(ppd->dd); |
| 1699 | val = qib_read_kreg64(ppd->dd, kr_xgxs_cfg); | 1698 | val = qib_read_kreg64(ppd->dd, kr_xgxs_cfg); |
| @@ -3515,8 +3514,8 @@ static void try_7220_autoneg(struct qib_pportdata *ppd) | |||
| 3515 | 3514 | ||
| 3516 | toggle_7220_rclkrls(ppd->dd); | 3515 | toggle_7220_rclkrls(ppd->dd); |
| 3517 | /* 2 msec is minimum length of a poll cycle */ | 3516 | /* 2 msec is minimum length of a poll cycle */ |
| 3518 | schedule_delayed_work(&ppd->cpspec->autoneg_work, | 3517 | queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work, |
| 3519 | msecs_to_jiffies(2)); | 3518 | msecs_to_jiffies(2)); |
| 3520 | } | 3519 | } |
| 3521 | 3520 | ||
| 3522 | /* | 3521 | /* |
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index dbbb0e85afe4..ea46fbc34b17 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c | |||
| @@ -2406,10 +2406,9 @@ static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd) | |||
| 2406 | ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG; | 2406 | ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG; |
| 2407 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | 2407 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); |
| 2408 | wake_up(&ppd->cpspec->autoneg_wait); | 2408 | wake_up(&ppd->cpspec->autoneg_wait); |
| 2409 | cancel_delayed_work(&ppd->cpspec->autoneg_work); | 2409 | cancel_delayed_work_sync(&ppd->cpspec->autoneg_work); |
| 2410 | if (ppd->dd->cspec->r1) | 2410 | if (ppd->dd->cspec->r1) |
| 2411 | cancel_delayed_work(&ppd->cpspec->ipg_work); | 2411 | cancel_delayed_work_sync(&ppd->cpspec->ipg_work); |
| 2412 | flush_scheduled_work(); | ||
| 2413 | 2412 | ||
| 2414 | ppd->cpspec->chase_end = 0; | 2413 | ppd->cpspec->chase_end = 0; |
| 2415 | if (ppd->cpspec->chase_timer.data) /* if initted */ | 2414 | if (ppd->cpspec->chase_timer.data) /* if initted */ |
| @@ -2706,7 +2705,7 @@ static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd) | |||
| 2706 | if (!(pins & mask)) { | 2705 | if (!(pins & mask)) { |
| 2707 | ++handled; | 2706 | ++handled; |
| 2708 | qd->t_insert = get_jiffies_64(); | 2707 | qd->t_insert = get_jiffies_64(); |
| 2709 | schedule_work(&qd->work); | 2708 | queue_work(ib_wq, &qd->work); |
| 2710 | } | 2709 | } |
| 2711 | } | 2710 | } |
| 2712 | } | 2711 | } |
| @@ -4990,8 +4989,8 @@ static void try_7322_autoneg(struct qib_pportdata *ppd) | |||
| 4990 | set_7322_ibspeed_fast(ppd, QIB_IB_DDR); | 4989 | set_7322_ibspeed_fast(ppd, QIB_IB_DDR); |
| 4991 | qib_7322_mini_pcs_reset(ppd); | 4990 | qib_7322_mini_pcs_reset(ppd); |
| 4992 | /* 2 msec is minimum length of a poll cycle */ | 4991 | /* 2 msec is minimum length of a poll cycle */ |
| 4993 | schedule_delayed_work(&ppd->cpspec->autoneg_work, | 4992 | queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work, |
| 4994 | msecs_to_jiffies(2)); | 4993 | msecs_to_jiffies(2)); |
| 4995 | } | 4994 | } |
| 4996 | 4995 | ||
| 4997 | /* | 4996 | /* |
| @@ -5121,7 +5120,8 @@ static void try_7322_ipg(struct qib_pportdata *ppd) | |||
| 5121 | ib_free_send_mad(send_buf); | 5120 | ib_free_send_mad(send_buf); |
| 5122 | retry: | 5121 | retry: |
| 5123 | delay = 2 << ppd->cpspec->ipg_tries; | 5122 | delay = 2 << ppd->cpspec->ipg_tries; |
| 5124 | schedule_delayed_work(&ppd->cpspec->ipg_work, msecs_to_jiffies(delay)); | 5123 | queue_delayed_work(ib_wq, &ppd->cpspec->ipg_work, |
| 5124 | msecs_to_jiffies(delay)); | ||
| 5125 | } | 5125 | } |
| 5126 | 5126 | ||
| 5127 | /* | 5127 | /* |
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c index 304bd8038541..ffefb78b8949 100644 --- a/drivers/infiniband/hw/qib/qib_init.c +++ b/drivers/infiniband/hw/qib/qib_init.c | |||
| @@ -80,7 +80,6 @@ unsigned qib_wc_pat = 1; /* default (1) is to use PAT, not MTRR */ | |||
| 80 | module_param_named(wc_pat, qib_wc_pat, uint, S_IRUGO); | 80 | module_param_named(wc_pat, qib_wc_pat, uint, S_IRUGO); |
| 81 | MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism"); | 81 | MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism"); |
| 82 | 82 | ||
| 83 | struct workqueue_struct *qib_wq; | ||
| 84 | struct workqueue_struct *qib_cq_wq; | 83 | struct workqueue_struct *qib_cq_wq; |
| 85 | 84 | ||
| 86 | static void verify_interrupt(unsigned long); | 85 | static void verify_interrupt(unsigned long); |
| @@ -1044,24 +1043,10 @@ static int __init qlogic_ib_init(void) | |||
| 1044 | if (ret) | 1043 | if (ret) |
| 1045 | goto bail; | 1044 | goto bail; |
| 1046 | 1045 | ||
| 1047 | /* | ||
| 1048 | * We create our own workqueue mainly because we want to be | ||
| 1049 | * able to flush it when devices are being removed. We can't | ||
| 1050 | * use schedule_work()/flush_scheduled_work() because both | ||
| 1051 | * unregister_netdev() and linkwatch_event take the rtnl lock, | ||
| 1052 | * so flush_scheduled_work() can deadlock during device | ||
| 1053 | * removal. | ||
| 1054 | */ | ||
| 1055 | qib_wq = create_workqueue("qib"); | ||
| 1056 | if (!qib_wq) { | ||
| 1057 | ret = -ENOMEM; | ||
| 1058 | goto bail_dev; | ||
| 1059 | } | ||
| 1060 | |||
| 1061 | qib_cq_wq = create_singlethread_workqueue("qib_cq"); | 1046 | qib_cq_wq = create_singlethread_workqueue("qib_cq"); |
| 1062 | if (!qib_cq_wq) { | 1047 | if (!qib_cq_wq) { |
| 1063 | ret = -ENOMEM; | 1048 | ret = -ENOMEM; |
| 1064 | goto bail_wq; | 1049 | goto bail_dev; |
| 1065 | } | 1050 | } |
| 1066 | 1051 | ||
| 1067 | /* | 1052 | /* |
| @@ -1091,8 +1076,6 @@ bail_unit: | |||
| 1091 | idr_destroy(&qib_unit_table); | 1076 | idr_destroy(&qib_unit_table); |
| 1092 | bail_cq_wq: | 1077 | bail_cq_wq: |
| 1093 | destroy_workqueue(qib_cq_wq); | 1078 | destroy_workqueue(qib_cq_wq); |
| 1094 | bail_wq: | ||
| 1095 | destroy_workqueue(qib_wq); | ||
| 1096 | bail_dev: | 1079 | bail_dev: |
| 1097 | qib_dev_cleanup(); | 1080 | qib_dev_cleanup(); |
| 1098 | bail: | 1081 | bail: |
| @@ -1116,7 +1099,6 @@ static void __exit qlogic_ib_cleanup(void) | |||
| 1116 | 1099 | ||
| 1117 | pci_unregister_driver(&qib_driver); | 1100 | pci_unregister_driver(&qib_driver); |
| 1118 | 1101 | ||
| 1119 | destroy_workqueue(qib_wq); | ||
| 1120 | destroy_workqueue(qib_cq_wq); | 1102 | destroy_workqueue(qib_cq_wq); |
| 1121 | 1103 | ||
| 1122 | qib_cpulist_count = 0; | 1104 | qib_cpulist_count = 0; |
| @@ -1289,7 +1271,7 @@ static int __devinit qib_init_one(struct pci_dev *pdev, | |||
| 1289 | 1271 | ||
| 1290 | if (qib_mini_init || initfail || ret) { | 1272 | if (qib_mini_init || initfail || ret) { |
| 1291 | qib_stop_timers(dd); | 1273 | qib_stop_timers(dd); |
| 1292 | flush_scheduled_work(); | 1274 | flush_workqueue(ib_wq); |
| 1293 | for (pidx = 0; pidx < dd->num_pports; ++pidx) | 1275 | for (pidx = 0; pidx < dd->num_pports; ++pidx) |
| 1294 | dd->f_quiet_serdes(dd->pport + pidx); | 1276 | dd->f_quiet_serdes(dd->pport + pidx); |
| 1295 | if (qib_mini_init) | 1277 | if (qib_mini_init) |
| @@ -1338,8 +1320,8 @@ static void __devexit qib_remove_one(struct pci_dev *pdev) | |||
| 1338 | 1320 | ||
| 1339 | qib_stop_timers(dd); | 1321 | qib_stop_timers(dd); |
| 1340 | 1322 | ||
| 1341 | /* wait until all of our (qsfp) schedule_work() calls complete */ | 1323 | /* wait until all of our (qsfp) queue_work() calls complete */ |
| 1342 | flush_scheduled_work(); | 1324 | flush_workqueue(ib_wq); |
| 1343 | 1325 | ||
| 1344 | ret = qibfs_remove(dd); | 1326 | ret = qibfs_remove(dd); |
| 1345 | if (ret) | 1327 | if (ret) |
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.c b/drivers/infiniband/hw/qib/qib_qsfp.c index 35b3604b691d..3374a52232c1 100644 --- a/drivers/infiniband/hw/qib/qib_qsfp.c +++ b/drivers/infiniband/hw/qib/qib_qsfp.c | |||
| @@ -485,7 +485,7 @@ void qib_qsfp_init(struct qib_qsfp_data *qd, | |||
| 485 | goto bail; | 485 | goto bail; |
| 486 | /* We see a module, but it may be unwise to look yet. Just schedule */ | 486 | /* We see a module, but it may be unwise to look yet. Just schedule */ |
| 487 | qd->t_insert = get_jiffies_64(); | 487 | qd->t_insert = get_jiffies_64(); |
| 488 | schedule_work(&qd->work); | 488 | queue_work(ib_wq, &qd->work); |
| 489 | bail: | 489 | bail: |
| 490 | return; | 490 | return; |
| 491 | } | 491 | } |
| @@ -493,10 +493,9 @@ bail: | |||
| 493 | void qib_qsfp_deinit(struct qib_qsfp_data *qd) | 493 | void qib_qsfp_deinit(struct qib_qsfp_data *qd) |
| 494 | { | 494 | { |
| 495 | /* | 495 | /* |
| 496 | * There is nothing to do here for now. our | 496 | * There is nothing to do here for now. our work is scheduled |
| 497 | * work is scheduled with schedule_work(), and | 497 | * with queue_work(), and flush_workqueue() from remove_one |
| 498 | * flush_scheduled_work() from remove_one will | 498 | * will block until all work setup with queue_work() |
| 499 | * block until all work ssetup with schedule_work() | ||
| 500 | * completes. | 499 | * completes. |
| 501 | */ | 500 | */ |
| 502 | } | 501 | } |
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h index 63b22a9a7feb..95e5b47223b3 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.h +++ b/drivers/infiniband/hw/qib/qib_verbs.h | |||
| @@ -805,7 +805,6 @@ static inline int qib_send_ok(struct qib_qp *qp) | |||
| 805 | !(qp->s_flags & QIB_S_ANY_WAIT_SEND)); | 805 | !(qp->s_flags & QIB_S_ANY_WAIT_SEND)); |
| 806 | } | 806 | } |
| 807 | 807 | ||
| 808 | extern struct workqueue_struct *qib_wq; | ||
| 809 | extern struct workqueue_struct *qib_cq_wq; | 808 | extern struct workqueue_struct *qib_cq_wq; |
| 810 | 809 | ||
| 811 | /* | 810 | /* |
| @@ -814,7 +813,7 @@ extern struct workqueue_struct *qib_cq_wq; | |||
| 814 | static inline void qib_schedule_send(struct qib_qp *qp) | 813 | static inline void qib_schedule_send(struct qib_qp *qp) |
| 815 | { | 814 | { |
| 816 | if (qib_send_ok(qp)) | 815 | if (qib_send_ok(qp)) |
| 817 | queue_work(qib_wq, &qp->s_work); | 816 | queue_work(ib_wq, &qp->s_work); |
| 818 | } | 817 | } |
| 819 | 818 | ||
| 820 | static inline int qib_pkey_ok(u16 pkey1, u16 pkey2) | 819 | static inline int qib_pkey_ok(u16 pkey1, u16 pkey2) |
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 4b62105ed1e8..70ecb949683e 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c | |||
| @@ -638,7 +638,7 @@ err: | |||
| 638 | if (target->state == SRP_TARGET_CONNECTING) { | 638 | if (target->state == SRP_TARGET_CONNECTING) { |
| 639 | target->state = SRP_TARGET_DEAD; | 639 | target->state = SRP_TARGET_DEAD; |
| 640 | INIT_WORK(&target->work, srp_remove_work); | 640 | INIT_WORK(&target->work, srp_remove_work); |
| 641 | schedule_work(&target->work); | 641 | queue_work(ib_wq, &target->work); |
| 642 | } | 642 | } |
| 643 | spin_unlock_irq(&target->lock); | 643 | spin_unlock_irq(&target->lock); |
| 644 | 644 | ||
| @@ -2199,7 +2199,7 @@ static void srp_remove_one(struct ib_device *device) | |||
| 2199 | * started before we marked our target ports as | 2199 | * started before we marked our target ports as |
| 2200 | * removed, and any target port removal tasks. | 2200 | * removed, and any target port removal tasks. |
| 2201 | */ | 2201 | */ |
| 2202 | flush_scheduled_work(); | 2202 | flush_workqueue(ib_wq); |
| 2203 | 2203 | ||
| 2204 | list_for_each_entry_safe(target, tmp_target, | 2204 | list_for_each_entry_safe(target, tmp_target, |
| 2205 | &host->target_list, list) { | 2205 | &host->target_list, list) { |
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index e04c4888d1fd..55cd0a0bc977 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h | |||
| @@ -47,10 +47,13 @@ | |||
| 47 | #include <linux/list.h> | 47 | #include <linux/list.h> |
| 48 | #include <linux/rwsem.h> | 48 | #include <linux/rwsem.h> |
| 49 | #include <linux/scatterlist.h> | 49 | #include <linux/scatterlist.h> |
| 50 | #include <linux/workqueue.h> | ||
| 50 | 51 | ||
| 51 | #include <asm/atomic.h> | 52 | #include <asm/atomic.h> |
| 52 | #include <asm/uaccess.h> | 53 | #include <asm/uaccess.h> |
| 53 | 54 | ||
| 55 | extern struct workqueue_struct *ib_wq; | ||
| 56 | |||
| 54 | union ib_gid { | 57 | union ib_gid { |
| 55 | u8 raw[16]; | 58 | u8 raw[16]; |
| 56 | struct { | 59 | struct { |
