diff options
author | Bob Moore <robert.moore@intel.com> | 2006-04-21 17:15:00 -0400 |
---|---|---|
committer | Len Brown <len.brown@intel.com> | 2006-06-14 02:30:55 -0400 |
commit | b229cf92eee616c7cb5ad8cdb35a19b119f00bc8 (patch) | |
tree | 74b52bec6ec029859c2320aba227290a503af31a /drivers/acpi/events/evgpe.c | |
parent | 793c2388cae3fd023b3b5166354931752d42353c (diff) |
ACPI: ACPICA 20060421
Removed a device initialization optimization introduced in
20051216 where the _STA method was not run unless an _INI
was also present for the same device. This optimization
could cause problems because it could allow _INI methods
to be run within a not-present device subtree (If a
not-present device had no _INI, _STA would not be run,
the not-present status would not be discovered, and the
children of the device would be incorrectly traversed.)
Implemented a new _STA optimization where namespace
subtrees that do not contain _INI are identified and
ignored during device initialization. Selectively running
_STA can significantly improve boot time on large machines
(with assistance from Len Brown.)
Implemented support for the device initialization case
where the returned _STA flags indicate a device not-present
but functioning. In this case, _INI is not run, but the
device children are examined for presence, as per the
ACPI specification.
Implemented an additional change to the IndexField support
in order to conform to MS behavior. The value written to
the Index Register is not simply a byte offset, it is a
byte offset in units of the access width of the parent
Index Field. (Fiodor Suietov)
Defined and deployed a new OSL interface,
acpi_os_validate_address(). This interface is called during
the creation of all AML operation regions, and allows
the host OS to exert control over what addresses it will
allow the AML code to access. Operation Regions whose
addresses are disallowed will cause a runtime exception
when they are actually accessed (will not affect or abort
table loading.)
Defined and deployed a new OSL interface,
acpi_os_validate_interface(). This interface allows the host OS
to match the various "optional" interface/behavior strings
for the _OSI predefined control method as appropriate
(with assistance from Bjorn Helgaas.)
Restructured and corrected various problems in the
exception handling code paths within DsCallControlMethod
and DsTerminateControlMethod in dsmethod (with assistance
from Takayoshi Kochi.)
Modified the Linux source converter to ignore quoted string
literals while converting identifiers from mixed to lower
case. This will correct problems with the disassembler
and other areas where such strings must not be modified.
The ACPI_FUNCTION_* macros no longer require quotes around
the function name. This allows the Linux source converter
to convert the names, now that the converter ignores
quoted strings.
Signed-off-by: Bob Moore <robert.moore@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
Diffstat (limited to 'drivers/acpi/events/evgpe.c')
-rw-r--r-- | drivers/acpi/events/evgpe.c | 18 |
1 files changed, 9 insertions, 9 deletions
diff --git a/drivers/acpi/events/evgpe.c b/drivers/acpi/events/evgpe.c index 7d7b81af972e..25fd12a29a29 100644 --- a/drivers/acpi/events/evgpe.c +++ b/drivers/acpi/events/evgpe.c | |||
@@ -69,7 +69,7 @@ acpi_ev_set_gpe_type(struct acpi_gpe_event_info *gpe_event_info, u8 type) | |||
69 | { | 69 | { |
70 | acpi_status status; | 70 | acpi_status status; |
71 | 71 | ||
72 | ACPI_FUNCTION_TRACE("ev_set_gpe_type"); | 72 | ACPI_FUNCTION_TRACE(ev_set_gpe_type); |
73 | 73 | ||
74 | /* Validate type and update register enable masks */ | 74 | /* Validate type and update register enable masks */ |
75 | 75 | ||
@@ -115,7 +115,7 @@ acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info, | |||
115 | struct acpi_gpe_register_info *gpe_register_info; | 115 | struct acpi_gpe_register_info *gpe_register_info; |
116 | u8 register_bit; | 116 | u8 register_bit; |
117 | 117 | ||
118 | ACPI_FUNCTION_TRACE("ev_update_gpe_enable_masks"); | 118 | ACPI_FUNCTION_TRACE(ev_update_gpe_enable_masks); |
119 | 119 | ||
120 | gpe_register_info = gpe_event_info->register_info; | 120 | gpe_register_info = gpe_event_info->register_info; |
121 | if (!gpe_register_info) { | 121 | if (!gpe_register_info) { |
@@ -178,7 +178,7 @@ acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info, | |||
178 | { | 178 | { |
179 | acpi_status status; | 179 | acpi_status status; |
180 | 180 | ||
181 | ACPI_FUNCTION_TRACE("ev_enable_gpe"); | 181 | ACPI_FUNCTION_TRACE(ev_enable_gpe); |
182 | 182 | ||
183 | /* Make sure HW enable masks are updated */ | 183 | /* Make sure HW enable masks are updated */ |
184 | 184 | ||
@@ -244,7 +244,7 @@ acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info) | |||
244 | { | 244 | { |
245 | acpi_status status; | 245 | acpi_status status; |
246 | 246 | ||
247 | ACPI_FUNCTION_TRACE("ev_disable_gpe"); | 247 | ACPI_FUNCTION_TRACE(ev_disable_gpe); |
248 | 248 | ||
249 | if (!(gpe_event_info->flags & ACPI_GPE_ENABLE_MASK)) { | 249 | if (!(gpe_event_info->flags & ACPI_GPE_ENABLE_MASK)) { |
250 | return_ACPI_STATUS(AE_OK); | 250 | return_ACPI_STATUS(AE_OK); |
@@ -385,7 +385,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) | |||
385 | acpi_native_uint i; | 385 | acpi_native_uint i; |
386 | acpi_native_uint j; | 386 | acpi_native_uint j; |
387 | 387 | ||
388 | ACPI_FUNCTION_NAME("ev_gpe_detect"); | 388 | ACPI_FUNCTION_NAME(ev_gpe_detect); |
389 | 389 | ||
390 | /* Check for the case where there are no GPEs */ | 390 | /* Check for the case where there are no GPEs */ |
391 | 391 | ||
@@ -504,7 +504,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) | |||
504 | struct acpi_gpe_event_info local_gpe_event_info; | 504 | struct acpi_gpe_event_info local_gpe_event_info; |
505 | struct acpi_parameter_info info; | 505 | struct acpi_parameter_info info; |
506 | 506 | ||
507 | ACPI_FUNCTION_TRACE("ev_asynch_execute_gpe_method"); | 507 | ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method); |
508 | 508 | ||
509 | status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); | 509 | status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); |
510 | if (ACPI_FAILURE(status)) { | 510 | if (ACPI_FAILURE(status)) { |
@@ -598,7 +598,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) | |||
598 | { | 598 | { |
599 | acpi_status status; | 599 | acpi_status status; |
600 | 600 | ||
601 | ACPI_FUNCTION_TRACE("ev_gpe_dispatch"); | 601 | ACPI_FUNCTION_TRACE(ev_gpe_dispatch); |
602 | 602 | ||
603 | /* | 603 | /* |
604 | * If edge-triggered, clear the GPE status bit now. Note that | 604 | * If edge-triggered, clear the GPE status bit now. Note that |
@@ -721,7 +721,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) | |||
721 | * | 721 | * |
722 | * DESCRIPTION: Determine if a a GPE is "wake-only". | 722 | * DESCRIPTION: Determine if a a GPE is "wake-only". |
723 | * | 723 | * |
724 | * Called from Notify() code in interpreter when a "device_wake" | 724 | * Called from Notify() code in interpreter when a "DeviceWake" |
725 | * Notify comes in. | 725 | * Notify comes in. |
726 | * | 726 | * |
727 | ******************************************************************************/ | 727 | ******************************************************************************/ |
@@ -731,7 +731,7 @@ acpi_ev_check_for_wake_only_gpe(struct acpi_gpe_event_info *gpe_event_info) | |||
731 | { | 731 | { |
732 | acpi_status status; | 732 | acpi_status status; |
733 | 733 | ||
734 | ACPI_FUNCTION_TRACE("ev_check_for_wake_only_gpe"); | 734 | ACPI_FUNCTION_TRACE(ev_check_for_wake_only_gpe); |
735 | 735 | ||
736 | if ((gpe_event_info) && /* Only >0 for _Lxx/_Exx */ | 736 | if ((gpe_event_info) && /* Only >0 for _Lxx/_Exx */ |
737 | ((gpe_event_info->flags & ACPI_GPE_SYSTEM_MASK) == ACPI_GPE_SYSTEM_RUNNING)) { /* System state at GPE time */ | 737 | ((gpe_event_info->flags & ACPI_GPE_SYSTEM_MASK) == ACPI_GPE_SYSTEM_RUNNING)) { /* System state at GPE time */ |