diff options
Diffstat (limited to 'drivers/acpi/events/evgpe.c')
-rw-r--r-- | drivers/acpi/events/evgpe.c | 85 |
1 files changed, 56 insertions, 29 deletions
diff --git a/drivers/acpi/events/evgpe.c b/drivers/acpi/events/evgpe.c index f64f977dd3d5..f01d339407f8 100644 --- a/drivers/acpi/events/evgpe.c +++ b/drivers/acpi/events/evgpe.c | |||
@@ -69,7 +69,7 @@ acpi_ev_set_gpe_type(struct acpi_gpe_event_info *gpe_event_info, u8 type) | |||
69 | { | 69 | { |
70 | acpi_status status; | 70 | acpi_status status; |
71 | 71 | ||
72 | ACPI_FUNCTION_TRACE("ev_set_gpe_type"); | 72 | ACPI_FUNCTION_TRACE(ev_set_gpe_type); |
73 | 73 | ||
74 | /* Validate type and update register enable masks */ | 74 | /* Validate type and update register enable masks */ |
75 | 75 | ||
@@ -115,7 +115,7 @@ acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info, | |||
115 | struct acpi_gpe_register_info *gpe_register_info; | 115 | struct acpi_gpe_register_info *gpe_register_info; |
116 | u8 register_bit; | 116 | u8 register_bit; |
117 | 117 | ||
118 | ACPI_FUNCTION_TRACE("ev_update_gpe_enable_masks"); | 118 | ACPI_FUNCTION_TRACE(ev_update_gpe_enable_masks); |
119 | 119 | ||
120 | gpe_register_info = gpe_event_info->register_info; | 120 | gpe_register_info = gpe_event_info->register_info; |
121 | if (!gpe_register_info) { | 121 | if (!gpe_register_info) { |
@@ -178,7 +178,7 @@ acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info, | |||
178 | { | 178 | { |
179 | acpi_status status; | 179 | acpi_status status; |
180 | 180 | ||
181 | ACPI_FUNCTION_TRACE("ev_enable_gpe"); | 181 | ACPI_FUNCTION_TRACE(ev_enable_gpe); |
182 | 182 | ||
183 | /* Make sure HW enable masks are updated */ | 183 | /* Make sure HW enable masks are updated */ |
184 | 184 | ||
@@ -207,6 +207,7 @@ acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info, | |||
207 | ACPI_SET_BIT(gpe_event_info->flags, ACPI_GPE_RUN_ENABLED); | 207 | ACPI_SET_BIT(gpe_event_info->flags, ACPI_GPE_RUN_ENABLED); |
208 | 208 | ||
209 | if (write_to_hardware) { | 209 | if (write_to_hardware) { |
210 | |||
210 | /* Clear the GPE (of stale events), then enable it */ | 211 | /* Clear the GPE (of stale events), then enable it */ |
211 | 212 | ||
212 | status = acpi_hw_clear_gpe(gpe_event_info); | 213 | status = acpi_hw_clear_gpe(gpe_event_info); |
@@ -243,7 +244,7 @@ acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info) | |||
243 | { | 244 | { |
244 | acpi_status status; | 245 | acpi_status status; |
245 | 246 | ||
246 | ACPI_FUNCTION_TRACE("ev_disable_gpe"); | 247 | ACPI_FUNCTION_TRACE(ev_disable_gpe); |
247 | 248 | ||
248 | if (!(gpe_event_info->flags & ACPI_GPE_ENABLE_MASK)) { | 249 | if (!(gpe_event_info->flags & ACPI_GPE_ENABLE_MASK)) { |
249 | return_ACPI_STATUS(AE_OK); | 250 | return_ACPI_STATUS(AE_OK); |
@@ -313,6 +314,7 @@ struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device, | |||
313 | /* A NULL gpe_block means use the FADT-defined GPE block(s) */ | 314 | /* A NULL gpe_block means use the FADT-defined GPE block(s) */ |
314 | 315 | ||
315 | if (!gpe_device) { | 316 | if (!gpe_device) { |
317 | |||
316 | /* Examine GPE Block 0 and 1 (These blocks are permanent) */ | 318 | /* Examine GPE Block 0 and 1 (These blocks are permanent) */ |
317 | 319 | ||
318 | for (i = 0; i < ACPI_MAX_GPE_BLOCKS; i++) { | 320 | for (i = 0; i < ACPI_MAX_GPE_BLOCKS; i++) { |
@@ -380,10 +382,11 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) | |||
380 | u32 status_reg; | 382 | u32 status_reg; |
381 | u32 enable_reg; | 383 | u32 enable_reg; |
382 | acpi_cpu_flags flags; | 384 | acpi_cpu_flags flags; |
385 | acpi_cpu_flags hw_flags; | ||
383 | acpi_native_uint i; | 386 | acpi_native_uint i; |
384 | acpi_native_uint j; | 387 | acpi_native_uint j; |
385 | 388 | ||
386 | ACPI_FUNCTION_NAME("ev_gpe_detect"); | 389 | ACPI_FUNCTION_NAME(ev_gpe_detect); |
387 | 390 | ||
388 | /* Check for the case where there are no GPEs */ | 391 | /* Check for the case where there are no GPEs */ |
389 | 392 | ||
@@ -391,9 +394,12 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) | |||
391 | return (int_status); | 394 | return (int_status); |
392 | } | 395 | } |
393 | 396 | ||
394 | /* Examine all GPE blocks attached to this interrupt level */ | 397 | /* We need to hold the GPE lock now, hardware lock in the loop */ |
395 | 398 | ||
396 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | 399 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); |
400 | |||
401 | /* Examine all GPE blocks attached to this interrupt level */ | ||
402 | |||
397 | gpe_block = gpe_xrupt_list->gpe_block_list_head; | 403 | gpe_block = gpe_xrupt_list->gpe_block_list_head; |
398 | while (gpe_block) { | 404 | while (gpe_block) { |
399 | /* | 405 | /* |
@@ -402,10 +408,13 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) | |||
402 | * Find all currently active GP events. | 408 | * Find all currently active GP events. |
403 | */ | 409 | */ |
404 | for (i = 0; i < gpe_block->register_count; i++) { | 410 | for (i = 0; i < gpe_block->register_count; i++) { |
411 | |||
405 | /* Get the next status/enable pair */ | 412 | /* Get the next status/enable pair */ |
406 | 413 | ||
407 | gpe_register_info = &gpe_block->register_info[i]; | 414 | gpe_register_info = &gpe_block->register_info[i]; |
408 | 415 | ||
416 | hw_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock); | ||
417 | |||
409 | /* Read the Status Register */ | 418 | /* Read the Status Register */ |
410 | 419 | ||
411 | status = | 420 | status = |
@@ -414,6 +423,8 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) | |||
414 | &gpe_register_info-> | 423 | &gpe_register_info-> |
415 | status_address); | 424 | status_address); |
416 | if (ACPI_FAILURE(status)) { | 425 | if (ACPI_FAILURE(status)) { |
426 | acpi_os_release_lock(acpi_gbl_hardware_lock, | ||
427 | hw_flags); | ||
417 | goto unlock_and_exit; | 428 | goto unlock_and_exit; |
418 | } | 429 | } |
419 | 430 | ||
@@ -424,6 +435,8 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) | |||
424 | &enable_reg, | 435 | &enable_reg, |
425 | &gpe_register_info-> | 436 | &gpe_register_info-> |
426 | enable_address); | 437 | enable_address); |
438 | acpi_os_release_lock(acpi_gbl_hardware_lock, hw_flags); | ||
439 | |||
427 | if (ACPI_FAILURE(status)) { | 440 | if (ACPI_FAILURE(status)) { |
428 | goto unlock_and_exit; | 441 | goto unlock_and_exit; |
429 | } | 442 | } |
@@ -437,6 +450,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) | |||
437 | 450 | ||
438 | enabled_status_byte = (u8) (status_reg & enable_reg); | 451 | enabled_status_byte = (u8) (status_reg & enable_reg); |
439 | if (!enabled_status_byte) { | 452 | if (!enabled_status_byte) { |
453 | |||
440 | /* No active GPEs in this register, move on */ | 454 | /* No active GPEs in this register, move on */ |
441 | 455 | ||
442 | continue; | 456 | continue; |
@@ -445,6 +459,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) | |||
445 | /* Now look at the individual GPEs in this byte register */ | 459 | /* Now look at the individual GPEs in this byte register */ |
446 | 460 | ||
447 | for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) { | 461 | for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) { |
462 | |||
448 | /* Examine one GPE bit */ | 463 | /* Examine one GPE bit */ |
449 | 464 | ||
450 | if (enabled_status_byte & | 465 | if (enabled_status_byte & |
@@ -483,9 +498,9 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) | |||
483 | * | 498 | * |
484 | * RETURN: None | 499 | * RETURN: None |
485 | * | 500 | * |
486 | * DESCRIPTION: Perform the actual execution of a GPE control method. This | 501 | * DESCRIPTION: Perform the actual execution of a GPE control method. This |
487 | * function is called from an invocation of acpi_os_queue_for_execution | 502 | * function is called from an invocation of acpi_os_execute and |
488 | * (and therefore does NOT execute at interrupt level) so that | 503 | * therefore does NOT execute at interrupt level - so that |
489 | * the control method itself is not executed in the context of | 504 | * the control method itself is not executed in the context of |
490 | * an interrupt handler. | 505 | * an interrupt handler. |
491 | * | 506 | * |
@@ -494,12 +509,11 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) | |||
494 | static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) | 509 | static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) |
495 | { | 510 | { |
496 | struct acpi_gpe_event_info *gpe_event_info = (void *)context; | 511 | struct acpi_gpe_event_info *gpe_event_info = (void *)context; |
497 | u32 gpe_number = 0; | ||
498 | acpi_status status; | 512 | acpi_status status; |
499 | struct acpi_gpe_event_info local_gpe_event_info; | 513 | struct acpi_gpe_event_info local_gpe_event_info; |
500 | struct acpi_parameter_info info; | 514 | struct acpi_evaluate_info *info; |
501 | 515 | ||
502 | ACPI_FUNCTION_TRACE("ev_asynch_execute_gpe_method"); | 516 | ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method); |
503 | 517 | ||
504 | status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); | 518 | status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); |
505 | if (ACPI_FAILURE(status)) { | 519 | if (ACPI_FAILURE(status)) { |
@@ -535,22 +549,35 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) | |||
535 | */ | 549 | */ |
536 | if ((local_gpe_event_info.flags & ACPI_GPE_DISPATCH_MASK) == | 550 | if ((local_gpe_event_info.flags & ACPI_GPE_DISPATCH_MASK) == |
537 | ACPI_GPE_DISPATCH_METHOD) { | 551 | ACPI_GPE_DISPATCH_METHOD) { |
538 | /* | ||
539 | * Invoke the GPE Method (_Lxx, _Exx) i.e., evaluate the _Lxx/_Exx | ||
540 | * control method that corresponds to this GPE | ||
541 | */ | ||
542 | info.node = local_gpe_event_info.dispatch.method_node; | ||
543 | info.parameters = | ||
544 | ACPI_CAST_PTR(union acpi_operand_object *, gpe_event_info); | ||
545 | info.parameter_type = ACPI_PARAM_GPE; | ||
546 | 552 | ||
547 | status = acpi_ns_evaluate_by_handle(&info); | 553 | /* Allocate the evaluation information block */ |
554 | |||
555 | info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info)); | ||
556 | if (!info) { | ||
557 | status = AE_NO_MEMORY; | ||
558 | } else { | ||
559 | /* | ||
560 | * Invoke the GPE Method (_Lxx, _Exx) i.e., evaluate the _Lxx/_Exx | ||
561 | * control method that corresponds to this GPE | ||
562 | */ | ||
563 | info->prefix_node = | ||
564 | local_gpe_event_info.dispatch.method_node; | ||
565 | info->parameters = | ||
566 | ACPI_CAST_PTR(union acpi_operand_object *, | ||
567 | gpe_event_info); | ||
568 | info->parameter_type = ACPI_PARAM_GPE; | ||
569 | info->flags = ACPI_IGNORE_RETURN_VALUE; | ||
570 | |||
571 | status = acpi_ns_evaluate(info); | ||
572 | ACPI_FREE(info); | ||
573 | } | ||
574 | |||
548 | if (ACPI_FAILURE(status)) { | 575 | if (ACPI_FAILURE(status)) { |
549 | ACPI_EXCEPTION((AE_INFO, status, | 576 | ACPI_EXCEPTION((AE_INFO, status, |
550 | "While evaluating method [%4.4s] for GPE[%2X]", | 577 | "While evaluating GPE method [%4.4s]", |
551 | acpi_ut_get_node_name | 578 | acpi_ut_get_node_name |
552 | (local_gpe_event_info.dispatch. | 579 | (local_gpe_event_info.dispatch. |
553 | method_node), gpe_number)); | 580 | method_node))); |
554 | } | 581 | } |
555 | } | 582 | } |
556 | 583 | ||
@@ -593,7 +620,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) | |||
593 | { | 620 | { |
594 | acpi_status status; | 621 | acpi_status status; |
595 | 622 | ||
596 | ACPI_FUNCTION_TRACE("ev_gpe_dispatch"); | 623 | ACPI_FUNCTION_TRACE(ev_gpe_dispatch); |
597 | 624 | ||
598 | /* | 625 | /* |
599 | * If edge-triggered, clear the GPE status bit now. Note that | 626 | * If edge-triggered, clear the GPE status bit now. Note that |
@@ -669,9 +696,9 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) | |||
669 | * Execute the method associated with the GPE | 696 | * Execute the method associated with the GPE |
670 | * NOTE: Level-triggered GPEs are cleared after the method completes. | 697 | * NOTE: Level-triggered GPEs are cleared after the method completes. |
671 | */ | 698 | */ |
672 | status = acpi_os_queue_for_execution(OSD_PRIORITY_GPE, | 699 | status = acpi_os_execute(OSL_GPE_HANDLER, |
673 | acpi_ev_asynch_execute_gpe_method, | 700 | acpi_ev_asynch_execute_gpe_method, |
674 | gpe_event_info); | 701 | gpe_event_info); |
675 | if (ACPI_FAILURE(status)) { | 702 | if (ACPI_FAILURE(status)) { |
676 | ACPI_EXCEPTION((AE_INFO, status, | 703 | ACPI_EXCEPTION((AE_INFO, status, |
677 | "Unable to queue handler for GPE[%2X] - event disabled", | 704 | "Unable to queue handler for GPE[%2X] - event disabled", |
@@ -716,7 +743,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) | |||
716 | * | 743 | * |
717 | * DESCRIPTION: Determine if a a GPE is "wake-only". | 744 | * DESCRIPTION: Determine if a a GPE is "wake-only". |
718 | * | 745 | * |
719 | * Called from Notify() code in interpreter when a "device_wake" | 746 | * Called from Notify() code in interpreter when a "DeviceWake" |
720 | * Notify comes in. | 747 | * Notify comes in. |
721 | * | 748 | * |
722 | ******************************************************************************/ | 749 | ******************************************************************************/ |
@@ -726,7 +753,7 @@ acpi_ev_check_for_wake_only_gpe(struct acpi_gpe_event_info *gpe_event_info) | |||
726 | { | 753 | { |
727 | acpi_status status; | 754 | acpi_status status; |
728 | 755 | ||
729 | ACPI_FUNCTION_TRACE("ev_check_for_wake_only_gpe"); | 756 | ACPI_FUNCTION_TRACE(ev_check_for_wake_only_gpe); |
730 | 757 | ||
731 | if ((gpe_event_info) && /* Only >0 for _Lxx/_Exx */ | 758 | if ((gpe_event_info) && /* Only >0 for _Lxx/_Exx */ |
732 | ((gpe_event_info->flags & ACPI_GPE_SYSTEM_MASK) == ACPI_GPE_SYSTEM_RUNNING)) { /* System state at GPE time */ | 759 | ((gpe_event_info->flags & ACPI_GPE_SYSTEM_MASK) == ACPI_GPE_SYSTEM_RUNNING)) { /* System state at GPE time */ |