diff options
author | Tejun Heo <tj@kernel.org> | 2010-10-19 11:24:36 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2011-01-17 00:16:31 -0500 |
commit | f06267104dd9112f11586830d22501d0e26245ea (patch) | |
tree | 8f7c364abc84a5f69269974eaa2b955b24d8f421 /drivers/infiniband/core | |
parent | 948579cd8c6ea7c8c98c52b79f4470952e182ebd (diff) |
RDMA: Update workqueue usage
* ib_wq is added, which is used as the common workqueue for infiniband
instead of the system workqueue. All system workqueue usages
including flush_scheduled_work() callers are converted to use and
flush ib_wq.
* cancel_delayed_work() + flush_scheduled_work() converted to
cancel_delayed_work_sync().
* qib_wq is removed and ib_wq is used instead.
This is to prepare for deprecation of flush_scheduled_work().
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/core')
-rw-r--r-- | drivers/infiniband/core/cache.c | 4 | ||||
-rw-r--r-- | drivers/infiniband/core/device.c | 11 | ||||
-rw-r--r-- | drivers/infiniband/core/sa_query.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/core/umem.c | 2 |
4 files changed, 13 insertions, 6 deletions
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index 68883565b725..f9ba7d74dfc0 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c | |||
@@ -308,7 +308,7 @@ static void ib_cache_event(struct ib_event_handler *handler, | |||
308 | INIT_WORK(&work->work, ib_cache_task); | 308 | INIT_WORK(&work->work, ib_cache_task); |
309 | work->device = event->device; | 309 | work->device = event->device; |
310 | work->port_num = event->element.port_num; | 310 | work->port_num = event->element.port_num; |
311 | schedule_work(&work->work); | 311 | queue_work(ib_wq, &work->work); |
312 | } | 312 | } |
313 | } | 313 | } |
314 | } | 314 | } |
@@ -368,7 +368,7 @@ static void ib_cache_cleanup_one(struct ib_device *device) | |||
368 | int p; | 368 | int p; |
369 | 369 | ||
370 | ib_unregister_event_handler(&device->cache.event_handler); | 370 | ib_unregister_event_handler(&device->cache.event_handler); |
371 | flush_scheduled_work(); | 371 | flush_workqueue(ib_wq); |
372 | 372 | ||
373 | for (p = 0; p <= end_port(device) - start_port(device); ++p) { | 373 | for (p = 0; p <= end_port(device) - start_port(device); ++p) { |
374 | kfree(device->cache.pkey_cache[p]); | 374 | kfree(device->cache.pkey_cache[p]); |
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index a19effad0811..f793bf2f5da7 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c | |||
@@ -38,7 +38,6 @@ | |||
38 | #include <linux/slab.h> | 38 | #include <linux/slab.h> |
39 | #include <linux/init.h> | 39 | #include <linux/init.h> |
40 | #include <linux/mutex.h> | 40 | #include <linux/mutex.h> |
41 | #include <linux/workqueue.h> | ||
42 | 41 | ||
43 | #include "core_priv.h" | 42 | #include "core_priv.h" |
44 | 43 | ||
@@ -52,6 +51,9 @@ struct ib_client_data { | |||
52 | void * data; | 51 | void * data; |
53 | }; | 52 | }; |
54 | 53 | ||
54 | struct workqueue_struct *ib_wq; | ||
55 | EXPORT_SYMBOL_GPL(ib_wq); | ||
56 | |||
55 | static LIST_HEAD(device_list); | 57 | static LIST_HEAD(device_list); |
56 | static LIST_HEAD(client_list); | 58 | static LIST_HEAD(client_list); |
57 | 59 | ||
@@ -718,6 +720,10 @@ static int __init ib_core_init(void) | |||
718 | { | 720 | { |
719 | int ret; | 721 | int ret; |
720 | 722 | ||
723 | ib_wq = alloc_workqueue("infiniband", 0, 0); | ||
724 | if (!ib_wq) | ||
725 | return -ENOMEM; | ||
726 | |||
721 | ret = ib_sysfs_setup(); | 727 | ret = ib_sysfs_setup(); |
722 | if (ret) | 728 | if (ret) |
723 | printk(KERN_WARNING "Couldn't create InfiniBand device class\n"); | 729 | printk(KERN_WARNING "Couldn't create InfiniBand device class\n"); |
@@ -726,6 +732,7 @@ static int __init ib_core_init(void) | |||
726 | if (ret) { | 732 | if (ret) { |
727 | printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n"); | 733 | printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n"); |
728 | ib_sysfs_cleanup(); | 734 | ib_sysfs_cleanup(); |
735 | destroy_workqueue(ib_wq); | ||
729 | } | 736 | } |
730 | 737 | ||
731 | return ret; | 738 | return ret; |
@@ -736,7 +743,7 @@ static void __exit ib_core_cleanup(void) | |||
736 | ib_cache_cleanup(); | 743 | ib_cache_cleanup(); |
737 | ib_sysfs_cleanup(); | 744 | ib_sysfs_cleanup(); |
738 | /* Make sure that any pending umem accounting work is done. */ | 745 | /* Make sure that any pending umem accounting work is done. */ |
739 | flush_scheduled_work(); | 746 | destroy_workqueue(ib_wq); |
740 | } | 747 | } |
741 | 748 | ||
742 | module_init(ib_core_init); | 749 | module_init(ib_core_init); |
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index 91a660310b7c..e38be1bcc01c 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c | |||
@@ -425,7 +425,7 @@ static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event | |||
425 | port->sm_ah = NULL; | 425 | port->sm_ah = NULL; |
426 | spin_unlock_irqrestore(&port->ah_lock, flags); | 426 | spin_unlock_irqrestore(&port->ah_lock, flags); |
427 | 427 | ||
428 | schedule_work(&sa_dev->port[event->element.port_num - | 428 | queue_work(ib_wq, &sa_dev->port[event->element.port_num - |
429 | sa_dev->start_port].update_task); | 429 | sa_dev->start_port].update_task); |
430 | } | 430 | } |
431 | } | 431 | } |
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 415e186eee32..b645e558876f 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c | |||
@@ -262,7 +262,7 @@ void ib_umem_release(struct ib_umem *umem) | |||
262 | umem->mm = mm; | 262 | umem->mm = mm; |
263 | umem->diff = diff; | 263 | umem->diff = diff; |
264 | 264 | ||
265 | schedule_work(&umem->work); | 265 | queue_work(ib_wq, &umem->work); |
266 | return; | 266 | return; |
267 | } | 267 | } |
268 | } else | 268 | } else |