aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/acpi/events
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/acpi/events')
-rw-r--r--drivers/acpi/events/evevent.c17
-rw-r--r--drivers/acpi/events/evgpe.c91
-rw-r--r--drivers/acpi/events/evgpeblk.c64
-rw-r--r--drivers/acpi/events/evmisc.c201
-rw-r--r--drivers/acpi/events/evregion.c17
-rw-r--r--drivers/acpi/events/evrgnini.c168
-rw-r--r--drivers/acpi/events/evsci.c14
-rw-r--r--drivers/acpi/events/evxface.c8
-rw-r--r--drivers/acpi/events/evxfevnt.c27
-rw-r--r--drivers/acpi/events/evxfregn.c2
10 files changed, 336 insertions, 273 deletions
diff --git a/drivers/acpi/events/evevent.c b/drivers/acpi/events/evevent.c
index 919037d6acff..a1f87b5def2a 100644
--- a/drivers/acpi/events/evevent.c
+++ b/drivers/acpi/events/evevent.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -70,13 +70,6 @@ acpi_status acpi_ev_initialize_events(void)
70 70
71 ACPI_FUNCTION_TRACE(ev_initialize_events); 71 ACPI_FUNCTION_TRACE(ev_initialize_events);
72 72
73 /* Make sure we have ACPI tables */
74
75 if (!acpi_gbl_DSDT) {
76 ACPI_WARNING((AE_INFO, "No ACPI tables present!"));
77 return_ACPI_STATUS(AE_NO_ACPI_TABLES);
78 }
79
80 /* 73 /*
81 * Initialize the Fixed and General Purpose Events. This is done prior to 74 * Initialize the Fixed and General Purpose Events. This is done prior to
82 * enabling SCIs to prevent interrupts from occurring before the handlers are 75 * enabling SCIs to prevent interrupts from occurring before the handlers are
@@ -211,8 +204,7 @@ static acpi_status acpi_ev_fixed_event_initialize(void)
211 if (acpi_gbl_fixed_event_info[i].enable_register_id != 0xFF) { 204 if (acpi_gbl_fixed_event_info[i].enable_register_id != 0xFF) {
212 status = 205 status =
213 acpi_set_register(acpi_gbl_fixed_event_info[i]. 206 acpi_set_register(acpi_gbl_fixed_event_info[i].
214 enable_register_id, 0, 207 enable_register_id, 0);
215 ACPI_MTX_LOCK);
216 if (ACPI_FAILURE(status)) { 208 if (ACPI_FAILURE(status)) {
217 return (status); 209 return (status);
218 } 210 }
@@ -298,7 +290,7 @@ static u32 acpi_ev_fixed_event_dispatch(u32 event)
298 /* Clear the status bit */ 290 /* Clear the status bit */
299 291
300 (void)acpi_set_register(acpi_gbl_fixed_event_info[event]. 292 (void)acpi_set_register(acpi_gbl_fixed_event_info[event].
301 status_register_id, 1, ACPI_MTX_DO_NOT_LOCK); 293 status_register_id, 1);
302 294
303 /* 295 /*
304 * Make sure we've got a handler. If not, report an error. 296 * Make sure we've got a handler. If not, report an error.
@@ -306,8 +298,7 @@ static u32 acpi_ev_fixed_event_dispatch(u32 event)
306 */ 298 */
307 if (NULL == acpi_gbl_fixed_event_handlers[event].handler) { 299 if (NULL == acpi_gbl_fixed_event_handlers[event].handler) {
308 (void)acpi_set_register(acpi_gbl_fixed_event_info[event]. 300 (void)acpi_set_register(acpi_gbl_fixed_event_info[event].
309 enable_register_id, 0, 301 enable_register_id, 0);
310 ACPI_MTX_DO_NOT_LOCK);
311 302
312 ACPI_ERROR((AE_INFO, 303 ACPI_ERROR((AE_INFO,
313 "No installed handler for fixed event [%08X]", 304 "No installed handler for fixed event [%08X]",
diff --git a/drivers/acpi/events/evgpe.c b/drivers/acpi/events/evgpe.c
index c76c0583ca6a..dfac3ecc596e 100644
--- a/drivers/acpi/events/evgpe.c
+++ b/drivers/acpi/events/evgpe.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -121,7 +121,9 @@ acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info,
121 if (!gpe_register_info) { 121 if (!gpe_register_info) {
122 return_ACPI_STATUS(AE_NOT_EXIST); 122 return_ACPI_STATUS(AE_NOT_EXIST);
123 } 123 }
124 register_bit = gpe_event_info->register_bit; 124 register_bit = (u8)
125 (1 <<
126 (gpe_event_info->gpe_number - gpe_register_info->base_gpe_number));
125 127
126 /* 1) Disable case. Simply clear all enable bits */ 128 /* 1) Disable case. Simply clear all enable bits */
127 129
@@ -458,8 +460,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
458 460
459 /* Examine one GPE bit */ 461 /* Examine one GPE bit */
460 462
461 if (enabled_status_byte & 463 if (enabled_status_byte & (1 << j)) {
462 acpi_gbl_decode_to8bit[j]) {
463 /* 464 /*
464 * Found an active GPE. Dispatch the event to a handler 465 * Found an active GPE. Dispatch the event to a handler
465 * or method. 466 * or method.
@@ -570,7 +571,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
570 571
571 if (ACPI_FAILURE(status)) { 572 if (ACPI_FAILURE(status)) {
572 ACPI_EXCEPTION((AE_INFO, status, 573 ACPI_EXCEPTION((AE_INFO, status,
573 "While evaluating GPE method [%4.4s]", 574 "while evaluating GPE method [%4.4s]",
574 acpi_ut_get_node_name 575 acpi_ut_get_node_name
575 (local_gpe_event_info.dispatch. 576 (local_gpe_event_info.dispatch.
576 method_node))); 577 method_node)));
@@ -618,6 +619,8 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
618 619
619 ACPI_FUNCTION_TRACE(ev_gpe_dispatch); 620 ACPI_FUNCTION_TRACE(ev_gpe_dispatch);
620 621
622 acpi_gpe_count++;
623
621 /* 624 /*
622 * If edge-triggered, clear the GPE status bit now. Note that 625 * If edge-triggered, clear the GPE status bit now. Note that
623 * level-triggered events are cleared after the GPE is serviced. 626 * level-triggered events are cleared after the GPE is serviced.
@@ -633,20 +636,23 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
633 } 636 }
634 } 637 }
635 638
636 /* Save current system state */ 639 if (!acpi_gbl_system_awake_and_running) {
637 640 /*
638 if (acpi_gbl_system_awake_and_running) { 641 * We just woke up because of a wake GPE. Disable any further GPEs
639 ACPI_SET_BIT(gpe_event_info->flags, ACPI_GPE_SYSTEM_RUNNING); 642 * until we are fully up and running (Only wake GPEs should be enabled
640 } else { 643 * at this time, but we just brute-force disable them all.)
641 ACPI_CLEAR_BIT(gpe_event_info->flags, ACPI_GPE_SYSTEM_RUNNING); 644 * 1) We must disable this particular wake GPE so it won't fire again
645 * 2) We want to disable all wake GPEs, since we are now awake
646 */
647 (void)acpi_hw_disable_all_gpes();
642 } 648 }
643 649
644 /* 650 /*
645 * Dispatch the GPE to either an installed handler, or the control 651 * Dispatch the GPE to either an installed handler, or the control method
646 * method associated with this GPE (_Lxx or _Exx). 652 * associated with this GPE (_Lxx or _Exx). If a handler exists, we invoke
647 * If a handler exists, we invoke it and do not attempt to run the method. 653 * it and do not attempt to run the method. If there is neither a handler
648 * If there is neither a handler nor a method, we disable the level to 654 * nor a method, we disable this GPE to prevent further such pointless
649 * prevent further events from coming in here. 655 * events from firing.
650 */ 656 */
651 switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) { 657 switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) {
652 case ACPI_GPE_DISPATCH_HANDLER: 658 case ACPI_GPE_DISPATCH_HANDLER:
@@ -677,8 +683,8 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
677 case ACPI_GPE_DISPATCH_METHOD: 683 case ACPI_GPE_DISPATCH_METHOD:
678 684
679 /* 685 /*
680 * Disable GPE, so it doesn't keep firing before the method has a 686 * Disable the GPE, so it doesn't keep firing before the method has a
681 * chance to run. 687 * chance to run (it runs asynchronously with interrupts enabled).
682 */ 688 */
683 status = acpi_ev_disable_gpe(gpe_event_info); 689 status = acpi_ev_disable_gpe(gpe_event_info);
684 if (ACPI_FAILURE(status)) { 690 if (ACPI_FAILURE(status)) {
@@ -711,7 +717,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
711 gpe_number)); 717 gpe_number));
712 718
713 /* 719 /*
714 * Disable the GPE. The GPE will remain disabled until the ACPI 720 * Disable the GPE. The GPE will remain disabled until the ACPI
715 * Core Subsystem is restarted, or a handler is installed. 721 * Core Subsystem is restarted, or a handler is installed.
716 */ 722 */
717 status = acpi_ev_disable_gpe(gpe_event_info); 723 status = acpi_ev_disable_gpe(gpe_event_info);
@@ -726,50 +732,3 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
726 732
727 return_UINT32(ACPI_INTERRUPT_HANDLED); 733 return_UINT32(ACPI_INTERRUPT_HANDLED);
728} 734}
729
730#ifdef ACPI_GPE_NOTIFY_CHECK
731/*******************************************************************************
732 * TBD: NOT USED, PROTOTYPE ONLY AND WILL PROBABLY BE REMOVED
733 *
734 * FUNCTION: acpi_ev_check_for_wake_only_gpe
735 *
736 * PARAMETERS: gpe_event_info - info for this GPE
737 *
738 * RETURN: Status
739 *
740 * DESCRIPTION: Determine if a a GPE is "wake-only".
741 *
742 * Called from Notify() code in interpreter when a "DeviceWake"
743 * Notify comes in.
744 *
745 ******************************************************************************/
746
747acpi_status
748acpi_ev_check_for_wake_only_gpe(struct acpi_gpe_event_info *gpe_event_info)
749{
750 acpi_status status;
751
752 ACPI_FUNCTION_TRACE(ev_check_for_wake_only_gpe);
753
754 if ((gpe_event_info) && /* Only >0 for _Lxx/_Exx */
755 ((gpe_event_info->flags & ACPI_GPE_SYSTEM_MASK) == ACPI_GPE_SYSTEM_RUNNING)) { /* System state at GPE time */
756 /* This must be a wake-only GPE, disable it */
757
758 status = acpi_ev_disable_gpe(gpe_event_info);
759
760 /* Set GPE to wake-only. Do not change wake disabled/enabled status */
761
762 acpi_ev_set_gpe_type(gpe_event_info, ACPI_GPE_TYPE_WAKE);
763
764 ACPI_INFO((AE_INFO,
765 "GPE %p was updated from wake/run to wake-only",
766 gpe_event_info));
767
768 /* This was a wake-only GPE */
769
770 return_ACPI_STATUS(AE_WAKE_ONLY_GPE);
771 }
772
773 return_ACPI_STATUS(AE_OK);
774}
775#endif
diff --git a/drivers/acpi/events/evgpeblk.c b/drivers/acpi/events/evgpeblk.c
index 95ddeb48bc0f..ad5bc76edf46 100644
--- a/drivers/acpi/events/evgpeblk.c
+++ b/drivers/acpi/events/evgpeblk.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -529,7 +529,7 @@ static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32
529 529
530 /* Install new interrupt handler if not SCI_INT */ 530 /* Install new interrupt handler if not SCI_INT */
531 531
532 if (interrupt_number != acpi_gbl_FADT->sci_int) { 532 if (interrupt_number != acpi_gbl_FADT.sci_interrupt) {
533 status = acpi_os_install_interrupt_handler(interrupt_number, 533 status = acpi_os_install_interrupt_handler(interrupt_number,
534 acpi_ev_gpe_xrupt_handler, 534 acpi_ev_gpe_xrupt_handler,
535 gpe_xrupt); 535 gpe_xrupt);
@@ -567,7 +567,7 @@ acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
567 567
568 /* We never want to remove the SCI interrupt handler */ 568 /* We never want to remove the SCI interrupt handler */
569 569
570 if (gpe_xrupt->interrupt_number == acpi_gbl_FADT->sci_int) { 570 if (gpe_xrupt->interrupt_number == acpi_gbl_FADT.sci_interrupt) {
571 gpe_xrupt->gpe_block_list_head = NULL; 571 gpe_xrupt->gpe_block_list_head = NULL;
572 return_ACPI_STATUS(AE_OK); 572 return_ACPI_STATUS(AE_OK);
573 } 573 }
@@ -796,30 +796,31 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
796 (u8) (gpe_block->block_base_number + 796 (u8) (gpe_block->block_base_number +
797 (i * ACPI_GPE_REGISTER_WIDTH)); 797 (i * ACPI_GPE_REGISTER_WIDTH));
798 798
799 ACPI_STORE_ADDRESS(this_register->status_address.address, 799 this_register->status_address.address =
800 (gpe_block->block_address.address + i)); 800 gpe_block->block_address.address + i;
801 801
802 ACPI_STORE_ADDRESS(this_register->enable_address.address, 802 this_register->enable_address.address =
803 (gpe_block->block_address.address 803 gpe_block->block_address.address + i +
804 + i + gpe_block->register_count)); 804 gpe_block->register_count;
805 805
806 this_register->status_address.address_space_id = 806 this_register->status_address.space_id =
807 gpe_block->block_address.address_space_id; 807 gpe_block->block_address.space_id;
808 this_register->enable_address.address_space_id = 808 this_register->enable_address.space_id =
809 gpe_block->block_address.address_space_id; 809 gpe_block->block_address.space_id;
810 this_register->status_address.register_bit_width = 810 this_register->status_address.bit_width =
811 ACPI_GPE_REGISTER_WIDTH; 811 ACPI_GPE_REGISTER_WIDTH;
812 this_register->enable_address.register_bit_width = 812 this_register->enable_address.bit_width =
813 ACPI_GPE_REGISTER_WIDTH; 813 ACPI_GPE_REGISTER_WIDTH;
814 this_register->status_address.register_bit_offset = 814 this_register->status_address.bit_offset =
815 ACPI_GPE_REGISTER_WIDTH; 815 ACPI_GPE_REGISTER_WIDTH;
816 this_register->enable_address.register_bit_offset = 816 this_register->enable_address.bit_offset =
817 ACPI_GPE_REGISTER_WIDTH; 817 ACPI_GPE_REGISTER_WIDTH;
818 818
819 /* Init the event_info for each GPE within this register */ 819 /* Init the event_info for each GPE within this register */
820 820
821 for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) { 821 for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
822 this_event->register_bit = acpi_gbl_decode_to8bit[j]; 822 this_event->gpe_number =
823 (u8) (this_register->base_gpe_number + j);
823 this_event->register_info = this_register; 824 this_event->register_info = this_register;
824 this_event++; 825 this_event++;
825 } 826 }
@@ -1109,11 +1110,12 @@ acpi_status acpi_ev_gpe_initialize(void)
1109 * If EITHER the register length OR the block address are zero, then that 1110 * If EITHER the register length OR the block address are zero, then that
1110 * particular block is not supported. 1111 * particular block is not supported.
1111 */ 1112 */
1112 if (acpi_gbl_FADT->gpe0_blk_len && acpi_gbl_FADT->xgpe0_blk.address) { 1113 if (acpi_gbl_FADT.gpe0_block_length &&
1114 acpi_gbl_FADT.xgpe0_block.address) {
1113 1115
1114 /* GPE block 0 exists (has both length and address > 0) */ 1116 /* GPE block 0 exists (has both length and address > 0) */
1115 1117
1116 register_count0 = (u16) (acpi_gbl_FADT->gpe0_blk_len / 2); 1118 register_count0 = (u16) (acpi_gbl_FADT.gpe0_block_length / 2);
1117 1119
1118 gpe_number_max = 1120 gpe_number_max =
1119 (register_count0 * ACPI_GPE_REGISTER_WIDTH) - 1; 1121 (register_count0 * ACPI_GPE_REGISTER_WIDTH) - 1;
@@ -1121,9 +1123,9 @@ acpi_status acpi_ev_gpe_initialize(void)
1121 /* Install GPE Block 0 */ 1123 /* Install GPE Block 0 */
1122 1124
1123 status = acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device, 1125 status = acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
1124 &acpi_gbl_FADT->xgpe0_blk, 1126 &acpi_gbl_FADT.xgpe0_block,
1125 register_count0, 0, 1127 register_count0, 0,
1126 acpi_gbl_FADT->sci_int, 1128 acpi_gbl_FADT.sci_interrupt,
1127 &acpi_gbl_gpe_fadt_blocks[0]); 1129 &acpi_gbl_gpe_fadt_blocks[0]);
1128 1130
1129 if (ACPI_FAILURE(status)) { 1131 if (ACPI_FAILURE(status)) {
@@ -1132,20 +1134,21 @@ acpi_status acpi_ev_gpe_initialize(void)
1132 } 1134 }
1133 } 1135 }
1134 1136
1135 if (acpi_gbl_FADT->gpe1_blk_len && acpi_gbl_FADT->xgpe1_blk.address) { 1137 if (acpi_gbl_FADT.gpe1_block_length &&
1138 acpi_gbl_FADT.xgpe1_block.address) {
1136 1139
1137 /* GPE block 1 exists (has both length and address > 0) */ 1140 /* GPE block 1 exists (has both length and address > 0) */
1138 1141
1139 register_count1 = (u16) (acpi_gbl_FADT->gpe1_blk_len / 2); 1142 register_count1 = (u16) (acpi_gbl_FADT.gpe1_block_length / 2);
1140 1143
1141 /* Check for GPE0/GPE1 overlap (if both banks exist) */ 1144 /* Check for GPE0/GPE1 overlap (if both banks exist) */
1142 1145
1143 if ((register_count0) && 1146 if ((register_count0) &&
1144 (gpe_number_max >= acpi_gbl_FADT->gpe1_base)) { 1147 (gpe_number_max >= acpi_gbl_FADT.gpe1_base)) {
1145 ACPI_ERROR((AE_INFO, 1148 ACPI_ERROR((AE_INFO,
1146 "GPE0 block (GPE 0 to %d) overlaps the GPE1 block (GPE %d to %d) - Ignoring GPE1", 1149 "GPE0 block (GPE 0 to %d) overlaps the GPE1 block (GPE %d to %d) - Ignoring GPE1",
1147 gpe_number_max, acpi_gbl_FADT->gpe1_base, 1150 gpe_number_max, acpi_gbl_FADT.gpe1_base,
1148 acpi_gbl_FADT->gpe1_base + 1151 acpi_gbl_FADT.gpe1_base +
1149 ((register_count1 * 1152 ((register_count1 *
1150 ACPI_GPE_REGISTER_WIDTH) - 1))); 1153 ACPI_GPE_REGISTER_WIDTH) - 1)));
1151 1154
@@ -1157,10 +1160,11 @@ acpi_status acpi_ev_gpe_initialize(void)
1157 1160
1158 status = 1161 status =
1159 acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device, 1162 acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
1160 &acpi_gbl_FADT->xgpe1_blk, 1163 &acpi_gbl_FADT.xgpe1_block,
1161 register_count1, 1164 register_count1,
1162 acpi_gbl_FADT->gpe1_base, 1165 acpi_gbl_FADT.gpe1_base,
1163 acpi_gbl_FADT->sci_int, 1166 acpi_gbl_FADT.
1167 sci_interrupt,
1164 &acpi_gbl_gpe_fadt_blocks 1168 &acpi_gbl_gpe_fadt_blocks
1165 [1]); 1169 [1]);
1166 1170
@@ -1173,7 +1177,7 @@ acpi_status acpi_ev_gpe_initialize(void)
1173 * GPE0 and GPE1 do not have to be contiguous in the GPE number 1177 * GPE0 and GPE1 do not have to be contiguous in the GPE number
1174 * space. However, GPE0 always starts at GPE number zero. 1178 * space. However, GPE0 always starts at GPE number zero.
1175 */ 1179 */
1176 gpe_number_max = acpi_gbl_FADT->gpe1_base + 1180 gpe_number_max = acpi_gbl_FADT.gpe1_base +
1177 ((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1); 1181 ((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1);
1178 } 1182 }
1179 } 1183 }
diff --git a/drivers/acpi/events/evmisc.c b/drivers/acpi/events/evmisc.c
index bf63edc6608d..1b784ffe54c3 100644
--- a/drivers/acpi/events/evmisc.c
+++ b/drivers/acpi/events/evmisc.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -63,14 +63,18 @@ static const char *acpi_notify_value_names[] = {
63}; 63};
64#endif 64#endif
65 65
66/* Pointer to FACS needed for the Global Lock */
67
68static struct acpi_table_facs *facs = NULL;
69
66/* Local prototypes */ 70/* Local prototypes */
67 71
68static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context); 72static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context);
69 73
70static void ACPI_SYSTEM_XFACE acpi_ev_global_lock_thread(void *context);
71
72static u32 acpi_ev_global_lock_handler(void *context); 74static u32 acpi_ev_global_lock_handler(void *context);
73 75
76static acpi_status acpi_ev_remove_global_lock_handler(void);
77
74/******************************************************************************* 78/*******************************************************************************
75 * 79 *
76 * FUNCTION: acpi_ev_is_notify_object 80 * FUNCTION: acpi_ev_is_notify_object
@@ -282,49 +286,19 @@ static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context)
282 286
283/******************************************************************************* 287/*******************************************************************************
284 * 288 *
285 * FUNCTION: acpi_ev_global_lock_thread
286 *
287 * PARAMETERS: Context - From thread interface, not used
288 *
289 * RETURN: None
290 *
291 * DESCRIPTION: Invoked by SCI interrupt handler upon acquisition of the
292 * Global Lock. Simply signal all threads that are waiting
293 * for the lock.
294 *
295 ******************************************************************************/
296
297static void ACPI_SYSTEM_XFACE acpi_ev_global_lock_thread(void *context)
298{
299 acpi_status status;
300
301 /* Signal threads that are waiting for the lock */
302
303 if (acpi_gbl_global_lock_thread_count) {
304
305 /* Send sufficient units to the semaphore */
306
307 status =
308 acpi_os_signal_semaphore(acpi_gbl_global_lock_semaphore,
309 acpi_gbl_global_lock_thread_count);
310 if (ACPI_FAILURE(status)) {
311 ACPI_ERROR((AE_INFO,
312 "Could not signal Global Lock semaphore"));
313 }
314 }
315}
316
317/*******************************************************************************
318 *
319 * FUNCTION: acpi_ev_global_lock_handler 289 * FUNCTION: acpi_ev_global_lock_handler
320 * 290 *
321 * PARAMETERS: Context - From thread interface, not used 291 * PARAMETERS: Context - From thread interface, not used
322 * 292 *
323 * RETURN: ACPI_INTERRUPT_HANDLED or ACPI_INTERRUPT_NOT_HANDLED 293 * RETURN: ACPI_INTERRUPT_HANDLED
324 * 294 *
325 * DESCRIPTION: Invoked directly from the SCI handler when a global lock 295 * DESCRIPTION: Invoked directly from the SCI handler when a global lock
326 * release interrupt occurs. Grab the global lock and queue 296 * release interrupt occurs. Attempt to acquire the global lock,
327 * the global lock thread for execution 297 * if successful, signal the thread waiting for the lock.
298 *
299 * NOTE: Assumes that the semaphore can be signaled from interrupt level. If
300 * this is not possible for some reason, a separate thread will have to be
301 * scheduled to do this.
328 * 302 *
329 ******************************************************************************/ 303 ******************************************************************************/
330 304
@@ -333,16 +307,24 @@ static u32 acpi_ev_global_lock_handler(void *context)
333 u8 acquired = FALSE; 307 u8 acquired = FALSE;
334 308
335 /* 309 /*
336 * Attempt to get the lock 310 * Attempt to get the lock.
311 *
337 * If we don't get it now, it will be marked pending and we will 312 * If we don't get it now, it will be marked pending and we will
338 * take another interrupt when it becomes free. 313 * take another interrupt when it becomes free.
339 */ 314 */
340 ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_common_fACS.global_lock, acquired); 315 ACPI_ACQUIRE_GLOBAL_LOCK(facs, acquired);
341 if (acquired) { 316 if (acquired) {
342 317
343 /* Got the lock, now wake all threads waiting for it */ 318 /* Got the lock, now wake all threads waiting for it */
319
344 acpi_gbl_global_lock_acquired = TRUE; 320 acpi_gbl_global_lock_acquired = TRUE;
345 acpi_ev_global_lock_thread(context); 321 /* Send a unit to the semaphore */
322
323 if (ACPI_FAILURE(acpi_os_signal_semaphore(
324 acpi_gbl_global_lock_semaphore, 1))) {
325 ACPI_ERROR((AE_INFO,
326 "Could not signal Global Lock semaphore"));
327 }
346 } 328 }
347 329
348 return (ACPI_INTERRUPT_HANDLED); 330 return (ACPI_INTERRUPT_HANDLED);
@@ -366,6 +348,13 @@ acpi_status acpi_ev_init_global_lock_handler(void)
366 348
367 ACPI_FUNCTION_TRACE(ev_init_global_lock_handler); 349 ACPI_FUNCTION_TRACE(ev_init_global_lock_handler);
368 350
351 status =
352 acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
353 (struct acpi_table_header **)&facs);
354 if (ACPI_FAILURE(status)) {
355 return_ACPI_STATUS(status);
356 }
357
369 acpi_gbl_global_lock_present = TRUE; 358 acpi_gbl_global_lock_present = TRUE;
370 status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL, 359 status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL,
371 acpi_ev_global_lock_handler, 360 acpi_ev_global_lock_handler,
@@ -389,6 +378,31 @@ acpi_status acpi_ev_init_global_lock_handler(void)
389 return_ACPI_STATUS(status); 378 return_ACPI_STATUS(status);
390} 379}
391 380
381/*******************************************************************************
382 *
383 * FUNCTION: acpi_ev_remove_global_lock_handler
384 *
385 * PARAMETERS: None
386 *
387 * RETURN: Status
388 *
389 * DESCRIPTION: Remove the handler for the Global Lock
390 *
391 ******************************************************************************/
392
393static acpi_status acpi_ev_remove_global_lock_handler(void)
394{
395 acpi_status status;
396
397 ACPI_FUNCTION_TRACE(ev_remove_global_lock_handler);
398
399 acpi_gbl_global_lock_present = FALSE;
400 status = acpi_remove_fixed_event_handler(ACPI_EVENT_GLOBAL,
401 acpi_ev_global_lock_handler);
402
403 return_ACPI_STATUS(status);
404}
405
392/****************************************************************************** 406/******************************************************************************
393 * 407 *
394 * FUNCTION: acpi_ev_acquire_global_lock 408 * FUNCTION: acpi_ev_acquire_global_lock
@@ -399,6 +413,16 @@ acpi_status acpi_ev_init_global_lock_handler(void)
399 * 413 *
400 * DESCRIPTION: Attempt to gain ownership of the Global Lock. 414 * DESCRIPTION: Attempt to gain ownership of the Global Lock.
401 * 415 *
416 * MUTEX: Interpreter must be locked
417 *
418 * Note: The original implementation allowed multiple threads to "acquire" the
419 * Global Lock, and the OS would hold the lock until the last thread had
420 * released it. However, this could potentially starve the BIOS out of the
421 * lock, especially in the case where there is a tight handshake between the
422 * Embedded Controller driver and the BIOS. Therefore, this implementation
423 * allows only one thread to acquire the HW Global Lock at a time, and makes
424 * the global lock appear as a standard mutex on the OS side.
425 *
402 *****************************************************************************/ 426 *****************************************************************************/
403 427
404acpi_status acpi_ev_acquire_global_lock(u16 timeout) 428acpi_status acpi_ev_acquire_global_lock(u16 timeout)
@@ -408,53 +432,51 @@ acpi_status acpi_ev_acquire_global_lock(u16 timeout)
408 432
409 ACPI_FUNCTION_TRACE(ev_acquire_global_lock); 433 ACPI_FUNCTION_TRACE(ev_acquire_global_lock);
410 434
411#ifndef ACPI_APPLICATION 435 /*
412 /* Make sure that we actually have a global lock */ 436 * Only one thread can acquire the GL at a time, the global_lock_mutex
413 437 * enforces this. This interface releases the interpreter if we must wait.
414 if (!acpi_gbl_global_lock_present) { 438 */
415 return_ACPI_STATUS(AE_NO_GLOBAL_LOCK); 439 status = acpi_ex_system_wait_mutex(acpi_gbl_global_lock_mutex, timeout);
440 if (ACPI_FAILURE(status)) {
441 return_ACPI_STATUS(status);
416 } 442 }
417#endif
418
419 /* One more thread wants the global lock */
420
421 acpi_gbl_global_lock_thread_count++;
422 443
423 /* 444 /*
424 * If we (OS side vs. BIOS side) have the hardware lock already, 445 * Make sure that a global lock actually exists. If not, just treat
425 * we are done 446 * the lock as a standard mutex.
426 */ 447 */
427 if (acpi_gbl_global_lock_acquired) { 448 if (!acpi_gbl_global_lock_present) {
449 acpi_gbl_global_lock_acquired = TRUE;
428 return_ACPI_STATUS(AE_OK); 450 return_ACPI_STATUS(AE_OK);
429 } 451 }
430 452
431 /* We must acquire the actual hardware lock */ 453 /* Attempt to acquire the actual hardware lock */
432 454
433 ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_common_fACS.global_lock, acquired); 455 ACPI_ACQUIRE_GLOBAL_LOCK(facs, acquired);
434 if (acquired) { 456 if (acquired) {
435 457
436 /* We got the lock */ 458 /* We got the lock */
437 459
438 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, 460 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
439 "Acquired the HW Global Lock\n")); 461 "Acquired hardware Global Lock\n"));
440 462
441 acpi_gbl_global_lock_acquired = TRUE; 463 acpi_gbl_global_lock_acquired = TRUE;
442 return_ACPI_STATUS(AE_OK); 464 return_ACPI_STATUS(AE_OK);
443 } 465 }
444 466
445 /* 467 /*
446 * Did not get the lock. The pending bit was set above, and we must now 468 * Did not get the lock. The pending bit was set above, and we must now
447 * wait until we get the global lock released interrupt. 469 * wait until we get the global lock released interrupt.
448 */ 470 */
449 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Waiting for the HW Global Lock\n")); 471 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Waiting for hardware Global Lock\n"));
450 472
451 /* 473 /*
452 * Acquire the global lock semaphore first. 474 * Wait for handshake with the global lock interrupt handler.
453 * Since this wait will block, we must release the interpreter 475 * This interface releases the interpreter if we must wait.
454 */ 476 */
455 status = 477 status = acpi_ex_system_wait_semaphore(acpi_gbl_global_lock_semaphore,
456 acpi_ex_system_wait_semaphore(acpi_gbl_global_lock_semaphore, 478 ACPI_WAIT_FOREVER);
457 timeout); 479
458 return_ACPI_STATUS(status); 480 return_ACPI_STATUS(status);
459} 481}
460 482
@@ -477,38 +499,39 @@ acpi_status acpi_ev_release_global_lock(void)
477 499
478 ACPI_FUNCTION_TRACE(ev_release_global_lock); 500 ACPI_FUNCTION_TRACE(ev_release_global_lock);
479 501
480 if (!acpi_gbl_global_lock_thread_count) { 502 /* Lock must be already acquired */
503
504 if (!acpi_gbl_global_lock_acquired) {
481 ACPI_WARNING((AE_INFO, 505 ACPI_WARNING((AE_INFO,
482 "Cannot release HW Global Lock, it has not been acquired")); 506 "Cannot release the ACPI Global Lock, it has not been acquired"));
483 return_ACPI_STATUS(AE_NOT_ACQUIRED); 507 return_ACPI_STATUS(AE_NOT_ACQUIRED);
484 } 508 }
485 509
486 /* One fewer thread has the global lock */ 510 if (acpi_gbl_global_lock_present) {
487 511
488 acpi_gbl_global_lock_thread_count--; 512 /* Allow any thread to release the lock */
489 if (acpi_gbl_global_lock_thread_count) {
490 513
491 /* There are still some threads holding the lock, cannot release */ 514 ACPI_RELEASE_GLOBAL_LOCK(facs, pending);
492 515
493 return_ACPI_STATUS(AE_OK); 516 /*
517 * If the pending bit was set, we must write GBL_RLS to the control
518 * register
519 */
520 if (pending) {
521 status =
522 acpi_set_register(ACPI_BITREG_GLOBAL_LOCK_RELEASE,
523 1);
524 }
525
526 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
527 "Released hardware Global Lock\n"));
494 } 528 }
495 529
496 /*
497 * No more threads holding lock, we can do the actual hardware
498 * release
499 */
500 ACPI_RELEASE_GLOBAL_LOCK(acpi_gbl_common_fACS.global_lock, pending);
501 acpi_gbl_global_lock_acquired = FALSE; 530 acpi_gbl_global_lock_acquired = FALSE;
502 531
503 /* 532 /* Release the local GL mutex */
504 * If the pending bit was set, we must write GBL_RLS to the control
505 * register
506 */
507 if (pending) {
508 status = acpi_set_register(ACPI_BITREG_GLOBAL_LOCK_RELEASE,
509 1, ACPI_MTX_LOCK);
510 }
511 533
534 acpi_os_release_mutex(acpi_gbl_global_lock_mutex);
512 return_ACPI_STATUS(status); 535 return_ACPI_STATUS(status);
513} 536}
514 537
@@ -558,6 +581,12 @@ void acpi_ev_terminate(void)
558 if (ACPI_FAILURE(status)) { 581 if (ACPI_FAILURE(status)) {
559 ACPI_ERROR((AE_INFO, "Could not remove SCI handler")); 582 ACPI_ERROR((AE_INFO, "Could not remove SCI handler"));
560 } 583 }
584
585 status = acpi_ev_remove_global_lock_handler();
586 if (ACPI_FAILURE(status)) {
587 ACPI_ERROR((AE_INFO,
588 "Could not remove Global Lock handler"));
589 }
561 } 590 }
562 591
563 /* Deallocate all handler objects installed within GPE info structs */ 592 /* Deallocate all handler objects installed within GPE info structs */
diff --git a/drivers/acpi/events/evregion.c b/drivers/acpi/events/evregion.c
index 21caae04fe85..e99f0c435a47 100644
--- a/drivers/acpi/events/evregion.c
+++ b/drivers/acpi/events/evregion.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -291,7 +291,6 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
291 u32 bit_width, acpi_integer * value) 291 u32 bit_width, acpi_integer * value)
292{ 292{
293 acpi_status status; 293 acpi_status status;
294 acpi_status status2;
295 acpi_adr_space_handler handler; 294 acpi_adr_space_handler handler;
296 acpi_adr_space_setup region_setup; 295 acpi_adr_space_setup region_setup;
297 union acpi_operand_object *handler_desc; 296 union acpi_operand_object *handler_desc;
@@ -345,7 +344,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
345 * setup will potentially execute control methods 344 * setup will potentially execute control methods
346 * (e.g., _REG method for this region) 345 * (e.g., _REG method for this region)
347 */ 346 */
348 acpi_ex_exit_interpreter(); 347 acpi_ex_relinquish_interpreter();
349 348
350 status = region_setup(region_obj, ACPI_REGION_ACTIVATE, 349 status = region_setup(region_obj, ACPI_REGION_ACTIVATE,
351 handler_desc->address_space.context, 350 handler_desc->address_space.context,
@@ -353,10 +352,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
353 352
354 /* Re-enter the interpreter */ 353 /* Re-enter the interpreter */
355 354
356 status2 = acpi_ex_enter_interpreter(); 355 acpi_ex_reacquire_interpreter();
357 if (ACPI_FAILURE(status2)) {
358 return_ACPI_STATUS(status2);
359 }
360 356
361 /* Check for failure of the Region Setup */ 357 /* Check for failure of the Region Setup */
362 358
@@ -409,7 +405,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
409 * exit the interpreter because the handler *might* block -- we don't 405 * exit the interpreter because the handler *might* block -- we don't
410 * know what it will do, so we can't hold the lock on the intepreter. 406 * know what it will do, so we can't hold the lock on the intepreter.
411 */ 407 */
412 acpi_ex_exit_interpreter(); 408 acpi_ex_relinquish_interpreter();
413 } 409 }
414 410
415 /* Call the handler */ 411 /* Call the handler */
@@ -430,10 +426,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
430 * We just returned from a non-default handler, we must re-enter the 426 * We just returned from a non-default handler, we must re-enter the
431 * interpreter 427 * interpreter
432 */ 428 */
433 status2 = acpi_ex_enter_interpreter(); 429 acpi_ex_reacquire_interpreter();
434 if (ACPI_FAILURE(status2)) {
435 return_ACPI_STATUS(status2);
436 }
437 } 430 }
438 431
439 return_ACPI_STATUS(status); 432 return_ACPI_STATUS(status);
diff --git a/drivers/acpi/events/evrgnini.c b/drivers/acpi/events/evrgnini.c
index 203d1359190a..a4fa7e6822a3 100644
--- a/drivers/acpi/events/evrgnini.c
+++ b/drivers/acpi/events/evrgnini.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -48,6 +48,11 @@
48#define _COMPONENT ACPI_EVENTS 48#define _COMPONENT ACPI_EVENTS
49ACPI_MODULE_NAME("evrgnini") 49ACPI_MODULE_NAME("evrgnini")
50 50
51/* Local prototypes */
52static u8 acpi_ev_match_pci_root_bridge(char *id);
53
54static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node);
55
51/******************************************************************************* 56/*******************************************************************************
52 * 57 *
53 * FUNCTION: acpi_ev_system_memory_region_setup 58 * FUNCTION: acpi_ev_system_memory_region_setup
@@ -62,6 +67,7 @@ ACPI_MODULE_NAME("evrgnini")
62 * DESCRIPTION: Setup a system_memory operation region 67 * DESCRIPTION: Setup a system_memory operation region
63 * 68 *
64 ******************************************************************************/ 69 ******************************************************************************/
70
65acpi_status 71acpi_status
66acpi_ev_system_memory_region_setup(acpi_handle handle, 72acpi_ev_system_memory_region_setup(acpi_handle handle,
67 u32 function, 73 u32 function,
@@ -168,9 +174,9 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
168 union acpi_operand_object *handler_obj; 174 union acpi_operand_object *handler_obj;
169 struct acpi_namespace_node *parent_node; 175 struct acpi_namespace_node *parent_node;
170 struct acpi_namespace_node *pci_root_node; 176 struct acpi_namespace_node *pci_root_node;
177 struct acpi_namespace_node *pci_device_node;
171 union acpi_operand_object *region_obj = 178 union acpi_operand_object *region_obj =
172 (union acpi_operand_object *)handle; 179 (union acpi_operand_object *)handle;
173 struct acpi_device_id object_hID;
174 180
175 ACPI_FUNCTION_TRACE(ev_pci_config_region_setup); 181 ACPI_FUNCTION_TRACE(ev_pci_config_region_setup);
176 182
@@ -215,45 +221,30 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
215 221
216 pci_root_node = parent_node; 222 pci_root_node = parent_node;
217 while (pci_root_node != acpi_gbl_root_node) { 223 while (pci_root_node != acpi_gbl_root_node) {
218 status =
219 acpi_ut_execute_HID(pci_root_node, &object_hID);
220 if (ACPI_SUCCESS(status)) {
221 /*
222 * Got a valid _HID string, check if this is a PCI root.
223 * New for ACPI 3.0: check for a PCI Express root also.
224 */
225 if (!
226 (ACPI_STRNCMP
227 (object_hID.value, PCI_ROOT_HID_STRING,
228 sizeof(PCI_ROOT_HID_STRING)))
229 ||
230 !(ACPI_STRNCMP
231 (object_hID.value,
232 PCI_EXPRESS_ROOT_HID_STRING,
233 sizeof(PCI_EXPRESS_ROOT_HID_STRING)))) {
234
235 /* Install a handler for this PCI root bridge */
236 224
237 status = 225 /* Get the _HID/_CID in order to detect a root_bridge */
238 acpi_install_address_space_handler((acpi_handle) pci_root_node, ACPI_ADR_SPACE_PCI_CONFIG, ACPI_DEFAULT_HANDLER, NULL, NULL); 226
239 if (ACPI_FAILURE(status)) { 227 if (acpi_ev_is_pci_root_bridge(pci_root_node)) {
240 if (status == AE_SAME_HANDLER) { 228
241 /* 229 /* Install a handler for this PCI root bridge */
242 * It is OK if the handler is already installed on the root 230
243 * bridge. Still need to return a context object for the 231 status = acpi_install_address_space_handler((acpi_handle) pci_root_node, ACPI_ADR_SPACE_PCI_CONFIG, ACPI_DEFAULT_HANDLER, NULL, NULL);
244 * new PCI_Config operation region, however. 232 if (ACPI_FAILURE(status)) {
245 */ 233 if (status == AE_SAME_HANDLER) {
246 status = AE_OK; 234 /*
247 } else { 235 * It is OK if the handler is already installed on the root
248 ACPI_EXCEPTION((AE_INFO, 236 * bridge. Still need to return a context object for the
249 status, 237 * new PCI_Config operation region, however.
250 "Could not install PciConfig handler for Root Bridge %4.4s", 238 */
251 acpi_ut_get_node_name 239 status = AE_OK;
252 (pci_root_node))); 240 } else {
253 } 241 ACPI_EXCEPTION((AE_INFO, status,
242 "Could not install PciConfig handler for Root Bridge %4.4s",
243 acpi_ut_get_node_name
244 (pci_root_node)));
254 } 245 }
255 break;
256 } 246 }
247 break;
257 } 248 }
258 249
259 pci_root_node = acpi_ns_get_parent_node(pci_root_node); 250 pci_root_node = acpi_ns_get_parent_node(pci_root_node);
@@ -282,14 +273,25 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
282 /* 273 /*
283 * For PCI_Config space access, we need the segment, bus, 274 * For PCI_Config space access, we need the segment, bus,
284 * device and function numbers. Acquire them here. 275 * device and function numbers. Acquire them here.
276 *
277 * Find the parent device object. (This allows the operation region to be
278 * within a subscope under the device, such as a control method.)
285 */ 279 */
280 pci_device_node = region_obj->region.node;
281 while (pci_device_node && (pci_device_node->type != ACPI_TYPE_DEVICE)) {
282 pci_device_node = acpi_ns_get_parent_node(pci_device_node);
283 }
284
285 if (!pci_device_node) {
286 return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
287 }
286 288
287 /* 289 /*
288 * Get the PCI device and function numbers from the _ADR object 290 * Get the PCI device and function numbers from the _ADR object
289 * contained in the parent's scope. 291 * contained in the parent's scope.
290 */ 292 */
291 status = 293 status =
292 acpi_ut_evaluate_numeric_object(METHOD_NAME__ADR, parent_node, 294 acpi_ut_evaluate_numeric_object(METHOD_NAME__ADR, pci_device_node,
293 &pci_value); 295 &pci_value);
294 296
295 /* 297 /*
@@ -329,6 +331,91 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
329 331
330/******************************************************************************* 332/*******************************************************************************
331 * 333 *
334 * FUNCTION: acpi_ev_match_pci_root_bridge
335 *
336 * PARAMETERS: Id - The HID/CID in string format
337 *
338 * RETURN: TRUE if the Id is a match for a PCI/PCI-Express Root Bridge
339 *
340 * DESCRIPTION: Determine if the input ID is a PCI Root Bridge ID.
341 *
342 ******************************************************************************/
343
344static u8 acpi_ev_match_pci_root_bridge(char *id)
345{
346
347 /*
348 * Check if this is a PCI root.
349 * ACPI 3.0+: check for a PCI Express root also.
350 */
351 if (!(ACPI_STRNCMP(id,
352 PCI_ROOT_HID_STRING,
353 sizeof(PCI_ROOT_HID_STRING))) ||
354 !(ACPI_STRNCMP(id,
355 PCI_EXPRESS_ROOT_HID_STRING,
356 sizeof(PCI_EXPRESS_ROOT_HID_STRING)))) {
357 return (TRUE);
358 }
359
360 return (FALSE);
361}
362
363/*******************************************************************************
364 *
365 * FUNCTION: acpi_ev_is_pci_root_bridge
366 *
367 * PARAMETERS: Node - Device node being examined
368 *
369 * RETURN: TRUE if device is a PCI/PCI-Express Root Bridge
370 *
371 * DESCRIPTION: Determine if the input device represents a PCI Root Bridge by
372 * examining the _HID and _CID for the device.
373 *
374 ******************************************************************************/
375
376static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node)
377{
378 acpi_status status;
379 struct acpi_device_id hid;
380 struct acpi_compatible_id_list *cid;
381 acpi_native_uint i;
382
383 /*
384 * Get the _HID and check for a PCI Root Bridge
385 */
386 status = acpi_ut_execute_HID(node, &hid);
387 if (ACPI_FAILURE(status)) {
388 return (FALSE);
389 }
390
391 if (acpi_ev_match_pci_root_bridge(hid.value)) {
392 return (TRUE);
393 }
394
395 /*
396 * The _HID did not match.
397 * Get the _CID and check for a PCI Root Bridge
398 */
399 status = acpi_ut_execute_CID(node, &cid);
400 if (ACPI_FAILURE(status)) {
401 return (FALSE);
402 }
403
404 /* Check all _CIDs in the returned list */
405
406 for (i = 0; i < cid->count; i++) {
407 if (acpi_ev_match_pci_root_bridge(cid->id[i].value)) {
408 ACPI_FREE(cid);
409 return (TRUE);
410 }
411 }
412
413 ACPI_FREE(cid);
414 return (FALSE);
415}
416
417/*******************************************************************************
418 *
332 * FUNCTION: acpi_ev_pci_bar_region_setup 419 * FUNCTION: acpi_ev_pci_bar_region_setup
333 * 420 *
334 * PARAMETERS: Handle - Region we are interested in 421 * PARAMETERS: Handle - Region we are interested in
@@ -432,6 +519,9 @@ acpi_ev_default_region_setup(acpi_handle handle,
432 * a PCI address in the scope of the definition. This address is 519 * a PCI address in the scope of the definition. This address is
433 * required to perform an access to PCI config space. 520 * required to perform an access to PCI config space.
434 * 521 *
522 * MUTEX: Interpreter should be unlocked, because we may run the _REG
523 * method for this region.
524 *
435 ******************************************************************************/ 525 ******************************************************************************/
436 526
437acpi_status 527acpi_status
diff --git a/drivers/acpi/events/evsci.c b/drivers/acpi/events/evsci.c
index 8106215ad554..7e5d15ce2395 100644
--- a/drivers/acpi/events/evsci.c
+++ b/drivers/acpi/events/evsci.c
@@ -6,7 +6,7 @@
6 ******************************************************************************/ 6 ******************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -142,9 +142,10 @@ u32 acpi_ev_install_sci_handler(void)
142 142
143 ACPI_FUNCTION_TRACE(ev_install_sci_handler); 143 ACPI_FUNCTION_TRACE(ev_install_sci_handler);
144 144
145 status = acpi_os_install_interrupt_handler((u32) acpi_gbl_FADT->sci_int, 145 status =
146 acpi_ev_sci_xrupt_handler, 146 acpi_os_install_interrupt_handler((u32) acpi_gbl_FADT.sci_interrupt,
147 acpi_gbl_gpe_xrupt_list_head); 147 acpi_ev_sci_xrupt_handler,
148 acpi_gbl_gpe_xrupt_list_head);
148 return_ACPI_STATUS(status); 149 return_ACPI_STATUS(status);
149} 150}
150 151
@@ -175,8 +176,9 @@ acpi_status acpi_ev_remove_sci_handler(void)
175 176
176 /* Just let the OS remove the handler and disable the level */ 177 /* Just let the OS remove the handler and disable the level */
177 178
178 status = acpi_os_remove_interrupt_handler((u32) acpi_gbl_FADT->sci_int, 179 status =
179 acpi_ev_sci_xrupt_handler); 180 acpi_os_remove_interrupt_handler((u32) acpi_gbl_FADT.sci_interrupt,
181 acpi_ev_sci_xrupt_handler);
180 182
181 return_ACPI_STATUS(status); 183 return_ACPI_STATUS(status);
182} 184}
diff --git a/drivers/acpi/events/evxface.c b/drivers/acpi/events/evxface.c
index 923fd2b46955..685a103a3587 100644
--- a/drivers/acpi/events/evxface.c
+++ b/drivers/acpi/events/evxface.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -768,11 +768,9 @@ acpi_status acpi_acquire_global_lock(u16 timeout, u32 * handle)
768 return (AE_BAD_PARAMETER); 768 return (AE_BAD_PARAMETER);
769 } 769 }
770 770
771 status = acpi_ex_enter_interpreter(); 771 /* Must lock interpreter to prevent race conditions */
772 if (ACPI_FAILURE(status)) {
773 return (status);
774 }
775 772
773 acpi_ex_enter_interpreter();
776 status = acpi_ev_acquire_global_lock(timeout); 774 status = acpi_ev_acquire_global_lock(timeout);
777 acpi_ex_exit_interpreter(); 775 acpi_ex_exit_interpreter();
778 776
diff --git a/drivers/acpi/events/evxfevnt.c b/drivers/acpi/events/evxfevnt.c
index 7ebc2efac936..17065e98807c 100644
--- a/drivers/acpi/events/evxfevnt.c
+++ b/drivers/acpi/events/evxfevnt.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -44,6 +44,7 @@
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acevents.h> 45#include <acpi/acevents.h>
46#include <acpi/acnamesp.h> 46#include <acpi/acnamesp.h>
47#include <acpi/actables.h>
47 48
48#define _COMPONENT ACPI_EVENTS 49#define _COMPONENT ACPI_EVENTS
49ACPI_MODULE_NAME("evxfevnt") 50ACPI_MODULE_NAME("evxfevnt")
@@ -65,13 +66,14 @@ acpi_status acpi_enable(void)
65 66
66 ACPI_FUNCTION_TRACE(acpi_enable); 67 ACPI_FUNCTION_TRACE(acpi_enable);
67 68
68 /* Make sure we have the FADT */ 69 /* ACPI tables must be present */
69 70
70 if (!acpi_gbl_FADT) { 71 if (!acpi_tb_tables_loaded()) {
71 ACPI_WARNING((AE_INFO, "No FADT information present!"));
72 return_ACPI_STATUS(AE_NO_ACPI_TABLES); 72 return_ACPI_STATUS(AE_NO_ACPI_TABLES);
73 } 73 }
74 74
75 /* Check current mode */
76
75 if (acpi_hw_get_mode() == ACPI_SYS_MODE_ACPI) { 77 if (acpi_hw_get_mode() == ACPI_SYS_MODE_ACPI) {
76 ACPI_DEBUG_PRINT((ACPI_DB_INIT, 78 ACPI_DEBUG_PRINT((ACPI_DB_INIT,
77 "System is already in ACPI mode\n")); 79 "System is already in ACPI mode\n"));
@@ -111,11 +113,6 @@ acpi_status acpi_disable(void)
111 113
112 ACPI_FUNCTION_TRACE(acpi_disable); 114 ACPI_FUNCTION_TRACE(acpi_disable);
113 115
114 if (!acpi_gbl_FADT) {
115 ACPI_WARNING((AE_INFO, "No FADT information present!"));
116 return_ACPI_STATUS(AE_NO_ACPI_TABLES);
117 }
118
119 if (acpi_hw_get_mode() == ACPI_SYS_MODE_LEGACY) { 116 if (acpi_hw_get_mode() == ACPI_SYS_MODE_LEGACY) {
120 ACPI_DEBUG_PRINT((ACPI_DB_INIT, 117 ACPI_DEBUG_PRINT((ACPI_DB_INIT,
121 "System is already in legacy (non-ACPI) mode\n")); 118 "System is already in legacy (non-ACPI) mode\n"));
@@ -169,7 +166,7 @@ acpi_status acpi_enable_event(u32 event, u32 flags)
169 */ 166 */
170 status = 167 status =
171 acpi_set_register(acpi_gbl_fixed_event_info[event]. 168 acpi_set_register(acpi_gbl_fixed_event_info[event].
172 enable_register_id, 1, ACPI_MTX_LOCK); 169 enable_register_id, 1);
173 if (ACPI_FAILURE(status)) { 170 if (ACPI_FAILURE(status)) {
174 return_ACPI_STATUS(status); 171 return_ACPI_STATUS(status);
175 } 172 }
@@ -178,7 +175,7 @@ acpi_status acpi_enable_event(u32 event, u32 flags)
178 175
179 status = 176 status =
180 acpi_get_register(acpi_gbl_fixed_event_info[event]. 177 acpi_get_register(acpi_gbl_fixed_event_info[event].
181 enable_register_id, &value, ACPI_MTX_LOCK); 178 enable_register_id, &value);
182 if (ACPI_FAILURE(status)) { 179 if (ACPI_FAILURE(status)) {
183 return_ACPI_STATUS(status); 180 return_ACPI_STATUS(status);
184 } 181 }
@@ -368,14 +365,14 @@ acpi_status acpi_disable_event(u32 event, u32 flags)
368 */ 365 */
369 status = 366 status =
370 acpi_set_register(acpi_gbl_fixed_event_info[event]. 367 acpi_set_register(acpi_gbl_fixed_event_info[event].
371 enable_register_id, 0, ACPI_MTX_LOCK); 368 enable_register_id, 0);
372 if (ACPI_FAILURE(status)) { 369 if (ACPI_FAILURE(status)) {
373 return_ACPI_STATUS(status); 370 return_ACPI_STATUS(status);
374 } 371 }
375 372
376 status = 373 status =
377 acpi_get_register(acpi_gbl_fixed_event_info[event]. 374 acpi_get_register(acpi_gbl_fixed_event_info[event].
378 enable_register_id, &value, ACPI_MTX_LOCK); 375 enable_register_id, &value);
379 if (ACPI_FAILURE(status)) { 376 if (ACPI_FAILURE(status)) {
380 return_ACPI_STATUS(status); 377 return_ACPI_STATUS(status);
381 } 378 }
@@ -421,7 +418,7 @@ acpi_status acpi_clear_event(u32 event)
421 */ 418 */
422 status = 419 status =
423 acpi_set_register(acpi_gbl_fixed_event_info[event]. 420 acpi_set_register(acpi_gbl_fixed_event_info[event].
424 status_register_id, 1, ACPI_MTX_LOCK); 421 status_register_id, 1);
425 422
426 return_ACPI_STATUS(status); 423 return_ACPI_STATUS(status);
427} 424}
@@ -510,7 +507,7 @@ acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status)
510 507
511 status = 508 status =
512 acpi_get_register(acpi_gbl_fixed_event_info[event]. 509 acpi_get_register(acpi_gbl_fixed_event_info[event].
513 status_register_id, event_status, ACPI_MTX_LOCK); 510 status_register_id, event_status);
514 511
515 return_ACPI_STATUS(status); 512 return_ACPI_STATUS(status);
516} 513}
diff --git a/drivers/acpi/events/evxfregn.c b/drivers/acpi/events/evxfregn.c
index 83b12a9afa32..7bf09c5fb242 100644
--- a/drivers/acpi/events/evxfregn.c
+++ b/drivers/acpi/events/evxfregn.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without