aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/acpi
diff options
context:
space:
mode:
authorAlexey Starikovskiy <alexey.y.starikovskiy@intel.com>2007-05-09 23:31:03 -0400
committerLen Brown <len.brown@intel.com>2007-05-09 23:31:03 -0400
commit88db5e1489f23876a226f5393fd978ddc09dc5f9 (patch)
tree69e7d810ce0e575df1f4f9fa860bcd6f155591e6 /drivers/acpi
parent262a7a28de060f3a63cae20035876d6f22fd7670 (diff)
ACPI: created a dedicated workqueue for notify() execution
HP nx6125/nx6325/... machines have a _GPE handler with an infinite loop sending Notify() events to different ACPI subsystems. Notify handler in ACPI driver is a C-routine, which may call ACPI interpreter again to get access to some ACPI variables (acpi_evaluate_xxx). On these HP machines such an evaluation changes state of some variable and lets the loop above break. In the current ACPI implementation Notify requests are being deferred to the same kacpid workqueue on which the above GPE handler with infinite loop is executing. Thus we have a deadlock -- loop will continue to spin, sending notify events, and at the same time preventing these notify events from being run on a workqueue. All notify events are deferred, thus we see increase in memory consumption noticed by author of the thread. Also as GPE handling is bloked, machines overheat. Eventually by external poll of the same acpi_evaluate, kacpid is released and all the queued notify events are free to run, thus 100% cpu utilization by kacpid for several seconds or more. To prevent all these horrors it's needed to not put notify events to kacpid workqueue by either executing them immediately or putting them on some other thread. It's dangerous to execute notify events in place, as it will put several ACPI interpreter stacks on top of each other (at least 4 in case of nx6125), thus causing kernel stack overflow. First attempt to create a new thread was done by Peter Wainwright He created a bunch of threads, which were stealing work from a kacpid workqueue. This patch appeared in 2.6.15 kernel shipped with Ubuntu 6.06 LTS. Second attempt was done by me, I created a new thread for each Notify event. This worked OK on HP nx machines, but broke Linus' Compaq n620c, by producing threads with a speed what they stopped the machine completely. Thus this patch was reverted from 18-rc2 as I remember. I re-made the patch to create second workqueue just for notify events, thus hopping it will not break Linus' machine. Patch was tested on the same HP nx machines in #5534 and #7122, but I did not received reply from Linus on a test patch sent to him. Patch went to 19-rc and was rejected with much fanfare again. There was 4th patch, which inserted schedule_timeout(1) into deferred execution of kacpid, if we had any notify requests pending, but Linus decided that it was too complex (involved either changes to workqueue to see if it's empty or atomic inc/dec). Now you see last variant which adds yield() to every GPE execution. http://bugzilla.kernel.org/show_bug.cgi?id=5534 http://bugzilla.kernel.org/show_bug.cgi?id=8385 Signed-off-by: Alexey Starikovskiy <alexey.y.starikovskiy@intel.com> Signed-off-by: Len Brown <len.brown@intel.com>
Diffstat (limited to 'drivers/acpi')
-rw-r--r--drivers/acpi/osl.c45
1 files changed, 35 insertions, 10 deletions
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index c2bed56915e1..b998340e23d4 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -71,6 +71,7 @@ static unsigned int acpi_irq_irq;
71static acpi_osd_handler acpi_irq_handler; 71static acpi_osd_handler acpi_irq_handler;
72static void *acpi_irq_context; 72static void *acpi_irq_context;
73static struct workqueue_struct *kacpid_wq; 73static struct workqueue_struct *kacpid_wq;
74static struct workqueue_struct *kacpi_notify_wq;
74 75
75static void __init acpi_request_region (struct acpi_generic_address *addr, 76static void __init acpi_request_region (struct acpi_generic_address *addr,
76 unsigned int length, char *desc) 77 unsigned int length, char *desc)
@@ -137,8 +138,9 @@ acpi_status acpi_os_initialize1(void)
137 return AE_NULL_ENTRY; 138 return AE_NULL_ENTRY;
138 } 139 }
139 kacpid_wq = create_singlethread_workqueue("kacpid"); 140 kacpid_wq = create_singlethread_workqueue("kacpid");
141 kacpi_notify_wq = create_singlethread_workqueue("kacpi_notify");
140 BUG_ON(!kacpid_wq); 142 BUG_ON(!kacpid_wq);
141 143 BUG_ON(!kacpi_notify_wq);
142 return AE_OK; 144 return AE_OK;
143} 145}
144 146
@@ -150,6 +152,7 @@ acpi_status acpi_os_terminate(void)
150 } 152 }
151 153
152 destroy_workqueue(kacpid_wq); 154 destroy_workqueue(kacpid_wq);
155 destroy_workqueue(kacpi_notify_wq);
153 156
154 return AE_OK; 157 return AE_OK;
155} 158}
@@ -603,6 +606,23 @@ void acpi_os_derive_pci_id(acpi_handle rhandle, /* upper bound */
603static void acpi_os_execute_deferred(struct work_struct *work) 606static void acpi_os_execute_deferred(struct work_struct *work)
604{ 607{
605 struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work); 608 struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
609 if (!dpc) {
610 printk(KERN_ERR PREFIX "Invalid (NULL) context\n");
611 return;
612 }
613
614 dpc->function(dpc->context);
615 kfree(dpc);
616
617 /* Yield cpu to notify thread */
618 cond_resched();
619
620 return;
621}
622
623static void acpi_os_execute_notify(struct work_struct *work)
624{
625 struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
606 626
607 if (!dpc) { 627 if (!dpc) {
608 printk(KERN_ERR PREFIX "Invalid (NULL) context\n"); 628 printk(KERN_ERR PREFIX "Invalid (NULL) context\n");
@@ -637,14 +657,12 @@ acpi_status acpi_os_execute(acpi_execute_type type,
637 acpi_status status = AE_OK; 657 acpi_status status = AE_OK;
638 struct acpi_os_dpc *dpc; 658 struct acpi_os_dpc *dpc;
639 659
640 ACPI_FUNCTION_TRACE("os_queue_for_execution");
641
642 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, 660 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
643 "Scheduling function [%p(%p)] for deferred execution.\n", 661 "Scheduling function [%p(%p)] for deferred execution.\n",
644 function, context)); 662 function, context));
645 663
646 if (!function) 664 if (!function)
647 return_ACPI_STATUS(AE_BAD_PARAMETER); 665 return AE_BAD_PARAMETER;
648 666
649 /* 667 /*
650 * Allocate/initialize DPC structure. Note that this memory will be 668 * Allocate/initialize DPC structure. Note that this memory will be
@@ -662,14 +680,21 @@ acpi_status acpi_os_execute(acpi_execute_type type,
662 dpc->function = function; 680 dpc->function = function;
663 dpc->context = context; 681 dpc->context = context;
664 682
665 INIT_WORK(&dpc->work, acpi_os_execute_deferred); 683 if (type == OSL_NOTIFY_HANDLER) {
666 if (!queue_work(kacpid_wq, &dpc->work)) { 684 INIT_WORK(&dpc->work, acpi_os_execute_notify);
667 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, 685 if (!queue_work(kacpi_notify_wq, &dpc->work)) {
686 status = AE_ERROR;
687 kfree(dpc);
688 }
689 } else {
690 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
691 if (!queue_work(kacpid_wq, &dpc->work)) {
692 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
668 "Call to queue_work() failed.\n")); 693 "Call to queue_work() failed.\n"));
669 kfree(dpc); 694 status = AE_ERROR;
670 status = AE_ERROR; 695 kfree(dpc);
696 }
671 } 697 }
672
673 return_ACPI_STATUS(status); 698 return_ACPI_STATUS(status);
674} 699}
675 700