diff options
| author | Lv Zheng <lv.zheng@intel.com> | 2015-02-05 02:20:01 -0500 |
|---|---|---|
| committer | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2015-02-05 09:31:37 -0500 |
| commit | b18da580343b88fa33bbba8a7f48392447bc6cbf (patch) | |
| tree | e2978df7f802a21e5df6c871aeccefbda9ce2f35 | |
| parent | 833bb9316a3b4644b2671317ec4889e343ae5d42 (diff) | |
ACPICA: Events: Remove acpi_ev_valid_gpe_event() due to current restriction
ACPICA commit 8823b44ff53859ab24ecfcfd3fba8cc56b17d223
Currently we rely on the logic that GPE blocks will never be deleted,
otherwise we can be broken by the race between acpi_ev_create_gpe_block(),
acpi_ev_delete_gpe_block() and acpi_ev_gpe_detect().
On the other hand, if we want to protect GPE block creation/deletion, we
need to use a different synchronization facility to protect the period
between acpi_ev_gpe_dispatch() and acpi_ev_asynch_enable_gpe(). Which leaves us
no choice but abandoning the ACPI_MTX_EVENTS used during this period.
This patch removes ACPI_MTX_EVENTS used during this period and the
acpi_ev_valid_gpe_event() to reflect current restriction. Lv Zheng.
Link: https://github.com/acpica/acpica/commit/8823b44f
Signed-off-by: Lv Zheng <lv.zheng@intel.com>
Signed-off-by: David E. Box <david.e.box@linux.intel.com>
Signed-off-by: Bob Moore <robert.moore@intel.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
| -rw-r--r-- | drivers/acpi/acpica/acevents.h | 2 | ||||
| -rw-r--r-- | drivers/acpi/acpica/evgpe.c | 62 | ||||
| -rw-r--r-- | drivers/acpi/acpica/evgpeutil.c | 51 |
3 files changed, 15 insertions, 100 deletions
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h index 7a7811a9fc26..38b94a1a5cf8 100644 --- a/drivers/acpi/acpica/acevents.h +++ b/drivers/acpi/acpica/acevents.h | |||
| @@ -143,8 +143,6 @@ acpi_ev_match_gpe_method(acpi_handle obj_handle, | |||
| 143 | acpi_status | 143 | acpi_status |
| 144 | acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context); | 144 | acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context); |
| 145 | 145 | ||
| 146 | u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info); | ||
| 147 | |||
| 148 | acpi_status | 146 | acpi_status |
| 149 | acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info, | 147 | acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info, |
| 150 | struct acpi_gpe_block_info *gpe_block, void *context); | 148 | struct acpi_gpe_block_info *gpe_block, void *context); |
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c index b2e63e785719..ddee96733403 100644 --- a/drivers/acpi/acpica/evgpe.c +++ b/drivers/acpi/acpica/evgpe.c | |||
| @@ -474,51 +474,14 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) | |||
| 474 | { | 474 | { |
| 475 | struct acpi_gpe_event_info *gpe_event_info = context; | 475 | struct acpi_gpe_event_info *gpe_event_info = context; |
| 476 | acpi_status status; | 476 | acpi_status status; |
| 477 | struct acpi_gpe_event_info *local_gpe_event_info; | ||
| 478 | struct acpi_evaluate_info *info; | 477 | struct acpi_evaluate_info *info; |
| 479 | struct acpi_gpe_notify_info *notify; | 478 | struct acpi_gpe_notify_info *notify; |
| 480 | 479 | ||
| 481 | ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method); | 480 | ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method); |
| 482 | 481 | ||
| 483 | /* Allocate a local GPE block */ | ||
| 484 | |||
| 485 | local_gpe_event_info = | ||
| 486 | ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_event_info)); | ||
| 487 | if (!local_gpe_event_info) { | ||
| 488 | ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY, "while handling a GPE")); | ||
| 489 | return_VOID; | ||
| 490 | } | ||
| 491 | |||
| 492 | status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); | ||
| 493 | if (ACPI_FAILURE(status)) { | ||
| 494 | ACPI_FREE(local_gpe_event_info); | ||
| 495 | return_VOID; | ||
| 496 | } | ||
| 497 | |||
| 498 | /* Must revalidate the gpe_number/gpe_block */ | ||
| 499 | |||
| 500 | if (!acpi_ev_valid_gpe_event(gpe_event_info)) { | ||
| 501 | status = acpi_ut_release_mutex(ACPI_MTX_EVENTS); | ||
| 502 | ACPI_FREE(local_gpe_event_info); | ||
| 503 | return_VOID; | ||
| 504 | } | ||
| 505 | |||
| 506 | /* | ||
| 507 | * Take a snapshot of the GPE info for this level - we copy the info to | ||
| 508 | * prevent a race condition with remove_handler/remove_block. | ||
| 509 | */ | ||
| 510 | ACPI_MEMCPY(local_gpe_event_info, gpe_event_info, | ||
| 511 | sizeof(struct acpi_gpe_event_info)); | ||
| 512 | |||
| 513 | status = acpi_ut_release_mutex(ACPI_MTX_EVENTS); | ||
| 514 | if (ACPI_FAILURE(status)) { | ||
| 515 | ACPI_FREE(local_gpe_event_info); | ||
| 516 | return_VOID; | ||
| 517 | } | ||
| 518 | |||
| 519 | /* Do the correct dispatch - normal method or implicit notify */ | 482 | /* Do the correct dispatch - normal method or implicit notify */ |
| 520 | 483 | ||
| 521 | switch (local_gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) { | 484 | switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) { |
| 522 | case ACPI_GPE_DISPATCH_NOTIFY: | 485 | case ACPI_GPE_DISPATCH_NOTIFY: |
| 523 | /* | 486 | /* |
| 524 | * Implicit notify. | 487 | * Implicit notify. |
| @@ -531,7 +494,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) | |||
| 531 | * June 2012: Expand implicit notify mechanism to support | 494 | * June 2012: Expand implicit notify mechanism to support |
| 532 | * notifies on multiple device objects. | 495 | * notifies on multiple device objects. |
| 533 | */ | 496 | */ |
| 534 | notify = local_gpe_event_info->dispatch.notify_list; | 497 | notify = gpe_event_info->dispatch.notify_list; |
| 535 | while (ACPI_SUCCESS(status) && notify) { | 498 | while (ACPI_SUCCESS(status) && notify) { |
| 536 | status = | 499 | status = |
| 537 | acpi_ev_queue_notify_request(notify->device_node, | 500 | acpi_ev_queue_notify_request(notify->device_node, |
| @@ -555,7 +518,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) | |||
| 555 | * _Lxx/_Exx control method that corresponds to this GPE | 518 | * _Lxx/_Exx control method that corresponds to this GPE |
| 556 | */ | 519 | */ |
| 557 | info->prefix_node = | 520 | info->prefix_node = |
| 558 | local_gpe_event_info->dispatch.method_node; | 521 | gpe_event_info->dispatch.method_node; |
| 559 | info->flags = ACPI_IGNORE_RETURN_VALUE; | 522 | info->flags = ACPI_IGNORE_RETURN_VALUE; |
| 560 | 523 | ||
| 561 | status = acpi_ns_evaluate(info); | 524 | status = acpi_ns_evaluate(info); |
| @@ -565,25 +528,27 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) | |||
| 565 | if (ACPI_FAILURE(status)) { | 528 | if (ACPI_FAILURE(status)) { |
| 566 | ACPI_EXCEPTION((AE_INFO, status, | 529 | ACPI_EXCEPTION((AE_INFO, status, |
| 567 | "while evaluating GPE method [%4.4s]", | 530 | "while evaluating GPE method [%4.4s]", |
| 568 | acpi_ut_get_node_name | 531 | acpi_ut_get_node_name(gpe_event_info-> |
| 569 | (local_gpe_event_info->dispatch. | 532 | dispatch. |
| 570 | method_node))); | 533 | method_node))); |
| 571 | } | 534 | } |
| 572 | break; | 535 | break; |
| 573 | 536 | ||
| 574 | default: | 537 | default: |
| 575 | 538 | ||
| 576 | return_VOID; /* Should never happen */ | 539 | goto error_exit; /* Should never happen */ |
| 577 | } | 540 | } |
| 578 | 541 | ||
| 579 | /* Defer enabling of GPE until all notify handlers are done */ | 542 | /* Defer enabling of GPE until all notify handlers are done */ |
| 580 | 543 | ||
| 581 | status = acpi_os_execute(OSL_NOTIFY_HANDLER, | 544 | status = acpi_os_execute(OSL_NOTIFY_HANDLER, |
| 582 | acpi_ev_asynch_enable_gpe, | 545 | acpi_ev_asynch_enable_gpe, gpe_event_info); |
| 583 | local_gpe_event_info); | 546 | if (ACPI_SUCCESS(status)) { |
| 584 | if (ACPI_FAILURE(status)) { | 547 | return_VOID; |
| 585 | ACPI_FREE(local_gpe_event_info); | ||
| 586 | } | 548 | } |
| 549 | |||
| 550 | error_exit: | ||
| 551 | acpi_ev_asynch_enable_gpe(gpe_event_info); | ||
| 587 | return_VOID; | 552 | return_VOID; |
| 588 | } | 553 | } |
| 589 | 554 | ||
| @@ -611,7 +576,6 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context) | |||
| 611 | (void)acpi_ev_finish_gpe(gpe_event_info); | 576 | (void)acpi_ev_finish_gpe(gpe_event_info); |
| 612 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | 577 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); |
| 613 | 578 | ||
| 614 | ACPI_FREE(gpe_event_info); | ||
| 615 | return; | 579 | return; |
| 616 | } | 580 | } |
| 617 | 581 | ||
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c index 9e2b94c8f7c4..3f1c5aa682a5 100644 --- a/drivers/acpi/acpica/evgpeutil.c +++ b/drivers/acpi/acpica/evgpeutil.c | |||
| @@ -108,53 +108,6 @@ unlock_and_exit: | |||
| 108 | 108 | ||
| 109 | /******************************************************************************* | 109 | /******************************************************************************* |
| 110 | * | 110 | * |
| 111 | * FUNCTION: acpi_ev_valid_gpe_event | ||
| 112 | * | ||
| 113 | * PARAMETERS: gpe_event_info - Info for this GPE | ||
| 114 | * | ||
| 115 | * RETURN: TRUE if the gpe_event is valid | ||
| 116 | * | ||
| 117 | * DESCRIPTION: Validate a GPE event. DO NOT CALL FROM INTERRUPT LEVEL. | ||
| 118 | * Should be called only when the GPE lists are semaphore locked | ||
| 119 | * and not subject to change. | ||
| 120 | * | ||
| 121 | ******************************************************************************/ | ||
| 122 | |||
| 123 | u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info) | ||
| 124 | { | ||
| 125 | struct acpi_gpe_xrupt_info *gpe_xrupt_block; | ||
| 126 | struct acpi_gpe_block_info *gpe_block; | ||
| 127 | |||
| 128 | ACPI_FUNCTION_ENTRY(); | ||
| 129 | |||
| 130 | /* No need for spin lock since we are not changing any list elements */ | ||
| 131 | |||
| 132 | /* Walk the GPE interrupt levels */ | ||
| 133 | |||
| 134 | gpe_xrupt_block = acpi_gbl_gpe_xrupt_list_head; | ||
| 135 | while (gpe_xrupt_block) { | ||
| 136 | gpe_block = gpe_xrupt_block->gpe_block_list_head; | ||
| 137 | |||
| 138 | /* Walk the GPE blocks on this interrupt level */ | ||
| 139 | |||
| 140 | while (gpe_block) { | ||
| 141 | if ((&gpe_block->event_info[0] <= gpe_event_info) && | ||
| 142 | (&gpe_block->event_info[gpe_block->gpe_count] > | ||
| 143 | gpe_event_info)) { | ||
| 144 | return (TRUE); | ||
| 145 | } | ||
| 146 | |||
| 147 | gpe_block = gpe_block->next; | ||
| 148 | } | ||
| 149 | |||
| 150 | gpe_xrupt_block = gpe_xrupt_block->next; | ||
| 151 | } | ||
| 152 | |||
| 153 | return (FALSE); | ||
| 154 | } | ||
| 155 | |||
| 156 | /******************************************************************************* | ||
| 157 | * | ||
| 158 | * FUNCTION: acpi_ev_get_gpe_device | 111 | * FUNCTION: acpi_ev_get_gpe_device |
| 159 | * | 112 | * |
| 160 | * PARAMETERS: GPE_WALK_CALLBACK | 113 | * PARAMETERS: GPE_WALK_CALLBACK |
| @@ -167,8 +120,8 @@ u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info) | |||
| 167 | ******************************************************************************/ | 120 | ******************************************************************************/ |
| 168 | 121 | ||
| 169 | acpi_status | 122 | acpi_status |
| 170 | acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info * gpe_xrupt_info, | 123 | acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info, |
| 171 | struct acpi_gpe_block_info * gpe_block, void *context) | 124 | struct acpi_gpe_block_info *gpe_block, void *context) |
| 172 | { | 125 | { |
| 173 | struct acpi_gpe_device_info *info = context; | 126 | struct acpi_gpe_device_info *info = context; |
| 174 | 127 | ||
