diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-07 15:42:58 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-07 15:42:58 -0400 |
commit | 3b7433b8a8a83c87972065b1852b7dcae691e464 (patch) | |
tree | 93fa2c003f8baef5ab0733b53bac77961ed5240c /drivers/acpi | |
parent | 4a386c3e177ca2fbc70c9283d0b46537844763a0 (diff) | |
parent | 6ee0578b4daaea01c96b172c6aacca43fd9807a6 (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq: (55 commits)
workqueue: mark init_workqueues() as early_initcall()
workqueue: explain for_each_*cwq_cpu() iterators
fscache: fix build on !CONFIG_SYSCTL
slow-work: kill it
gfs2: use workqueue instead of slow-work
drm: use workqueue instead of slow-work
cifs: use workqueue instead of slow-work
fscache: drop references to slow-work
fscache: convert operation to use workqueue instead of slow-work
fscache: convert object to use workqueue instead of slow-work
workqueue: fix how cpu number is stored in work->data
workqueue: fix mayday_mask handling on UP
workqueue: fix build problem on !CONFIG_SMP
workqueue: fix locking in retry path of maybe_create_worker()
async: use workqueue for worker pool
workqueue: remove WQ_SINGLE_CPU and use WQ_UNBOUND instead
workqueue: implement unbound workqueue
workqueue: prepare for WQ_UNBOUND implementation
libata: take advantage of cmwq and remove concurrency limitations
workqueue: fix worker management invocation without pending works
...
Fixed up conflicts in fs/cifs/* as per Tejun. Other trivial conflicts in
include/linux/workqueue.h, kernel/trace/Kconfig and kernel/workqueue.c
Diffstat (limited to 'drivers/acpi')
-rw-r--r-- | drivers/acpi/osl.c | 40 |
1 files changed, 11 insertions, 29 deletions
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 78418ce4fc78..46cce391fa46 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c | |||
@@ -191,36 +191,11 @@ acpi_status __init acpi_os_initialize(void) | |||
191 | return AE_OK; | 191 | return AE_OK; |
192 | } | 192 | } |
193 | 193 | ||
194 | static void bind_to_cpu0(struct work_struct *work) | ||
195 | { | ||
196 | set_cpus_allowed_ptr(current, cpumask_of(0)); | ||
197 | kfree(work); | ||
198 | } | ||
199 | |||
200 | static void bind_workqueue(struct workqueue_struct *wq) | ||
201 | { | ||
202 | struct work_struct *work; | ||
203 | |||
204 | work = kzalloc(sizeof(struct work_struct), GFP_KERNEL); | ||
205 | INIT_WORK(work, bind_to_cpu0); | ||
206 | queue_work(wq, work); | ||
207 | } | ||
208 | |||
209 | acpi_status acpi_os_initialize1(void) | 194 | acpi_status acpi_os_initialize1(void) |
210 | { | 195 | { |
211 | /* | 196 | kacpid_wq = create_workqueue("kacpid"); |
212 | * On some machines, a software-initiated SMI causes corruption unless | 197 | kacpi_notify_wq = create_workqueue("kacpi_notify"); |
213 | * the SMI runs on CPU 0. An SMI can be initiated by any AML, but | 198 | kacpi_hotplug_wq = create_workqueue("kacpi_hotplug"); |
214 | * typically it's done in GPE-related methods that are run via | ||
215 | * workqueues, so we can avoid the known corruption cases by binding | ||
216 | * the workqueues to CPU 0. | ||
217 | */ | ||
218 | kacpid_wq = create_singlethread_workqueue("kacpid"); | ||
219 | bind_workqueue(kacpid_wq); | ||
220 | kacpi_notify_wq = create_singlethread_workqueue("kacpi_notify"); | ||
221 | bind_workqueue(kacpi_notify_wq); | ||
222 | kacpi_hotplug_wq = create_singlethread_workqueue("kacpi_hotplug"); | ||
223 | bind_workqueue(kacpi_hotplug_wq); | ||
224 | BUG_ON(!kacpid_wq); | 199 | BUG_ON(!kacpid_wq); |
225 | BUG_ON(!kacpi_notify_wq); | 200 | BUG_ON(!kacpi_notify_wq); |
226 | BUG_ON(!kacpi_hotplug_wq); | 201 | BUG_ON(!kacpi_hotplug_wq); |
@@ -766,7 +741,14 @@ static acpi_status __acpi_os_execute(acpi_execute_type type, | |||
766 | else | 741 | else |
767 | INIT_WORK(&dpc->work, acpi_os_execute_deferred); | 742 | INIT_WORK(&dpc->work, acpi_os_execute_deferred); |
768 | 743 | ||
769 | ret = queue_work(queue, &dpc->work); | 744 | /* |
745 | * On some machines, a software-initiated SMI causes corruption unless | ||
746 | * the SMI runs on CPU 0. An SMI can be initiated by any AML, but | ||
747 | * typically it's done in GPE-related methods that are run via | ||
748 | * workqueues, so we can avoid the known corruption cases by always | ||
749 | * queueing on CPU 0. | ||
750 | */ | ||
751 | ret = queue_work_on(0, queue, &dpc->work); | ||
770 | 752 | ||
771 | if (!ret) { | 753 | if (!ret) { |
772 | printk(KERN_ERR PREFIX | 754 | printk(KERN_ERR PREFIX |