aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/acpi/osl.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-06-29 04:07:09 -0400
committerTejun Heo <tj@kernel.org>2010-06-29 04:07:09 -0400
commit8fec62b2d9d0c80b594d0d85678bfdf57a70df1b (patch)
tree74dcc29170958b7ef29057e8e7e6e4a4cd059cdf /drivers/acpi/osl.c
parent82805ab77d25643f579d90397dcd34f05d1b750a (diff)
acpi: use queue_work_on() instead of binding workqueue worker to cpu0
ACPI works need to be executed on cpu0 and acpi/osl.c achieves this by creating singlethread workqueue and then binding it to cpu0 from a work which is quite unorthodox. Make it create regular workqueues and use queue_work_on() instead. This is in preparation of concurrency managed workqueue and the extra workers won't be a problem after it's implemented. Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'drivers/acpi/osl.c')
-rw-r--r--drivers/acpi/osl.c40
1 files changed, 11 insertions, 29 deletions
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 78418ce4fc78..46cce391fa46 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -191,36 +191,11 @@ acpi_status __init acpi_os_initialize(void)
191 return AE_OK; 191 return AE_OK;
192} 192}
193 193
194static void bind_to_cpu0(struct work_struct *work)
195{
196 set_cpus_allowed_ptr(current, cpumask_of(0));
197 kfree(work);
198}
199
200static void bind_workqueue(struct workqueue_struct *wq)
201{
202 struct work_struct *work;
203
204 work = kzalloc(sizeof(struct work_struct), GFP_KERNEL);
205 INIT_WORK(work, bind_to_cpu0);
206 queue_work(wq, work);
207}
208
209acpi_status acpi_os_initialize1(void) 194acpi_status acpi_os_initialize1(void)
210{ 195{
211 /* 196 kacpid_wq = create_workqueue("kacpid");
212 * On some machines, a software-initiated SMI causes corruption unless 197 kacpi_notify_wq = create_workqueue("kacpi_notify");
213 * the SMI runs on CPU 0. An SMI can be initiated by any AML, but 198 kacpi_hotplug_wq = create_workqueue("kacpi_hotplug");
214 * typically it's done in GPE-related methods that are run via
215 * workqueues, so we can avoid the known corruption cases by binding
216 * the workqueues to CPU 0.
217 */
218 kacpid_wq = create_singlethread_workqueue("kacpid");
219 bind_workqueue(kacpid_wq);
220 kacpi_notify_wq = create_singlethread_workqueue("kacpi_notify");
221 bind_workqueue(kacpi_notify_wq);
222 kacpi_hotplug_wq = create_singlethread_workqueue("kacpi_hotplug");
223 bind_workqueue(kacpi_hotplug_wq);
224 BUG_ON(!kacpid_wq); 199 BUG_ON(!kacpid_wq);
225 BUG_ON(!kacpi_notify_wq); 200 BUG_ON(!kacpi_notify_wq);
226 BUG_ON(!kacpi_hotplug_wq); 201 BUG_ON(!kacpi_hotplug_wq);
@@ -766,7 +741,14 @@ static acpi_status __acpi_os_execute(acpi_execute_type type,
766 else 741 else
767 INIT_WORK(&dpc->work, acpi_os_execute_deferred); 742 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
768 743
769 ret = queue_work(queue, &dpc->work); 744 /*
745 * On some machines, a software-initiated SMI causes corruption unless
746 * the SMI runs on CPU 0. An SMI can be initiated by any AML, but
747 * typically it's done in GPE-related methods that are run via
748 * workqueues, so we can avoid the known corruption cases by always
749 * queueing on CPU 0.
750 */
751 ret = queue_work_on(0, queue, &dpc->work);
770 752
771 if (!ret) { 753 if (!ret) {
772 printk(KERN_ERR PREFIX 754 printk(KERN_ERR PREFIX