aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/acpi/acpica
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-01-13 23:15:35 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-13 23:15:35 -0500
commit52cfd503ad7176d23a5dd7af3981744feb60622f (patch)
tree0a8aeaaf4acbc86ac682f18632b8070c1c6b7ba1 /drivers/acpi/acpica
parentdc8e7e3ec60bd5ef7868aa88755e9d4c948dc5cc (diff)
parent4263d9a3ae4d15785897d0543bb59316c84ee605 (diff)
Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6
* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6: (59 commits) ACPI / PM: Fix build problems for !CONFIG_ACPI related to NVS rework ACPI: fix resource check message ACPI / Battery: Update information on info notification and resume ACPI: Drop device flag wake_capable ACPI: Always check if _PRW is present before trying to evaluate it ACPI / PM: Check status of power resources under mutexes ACPI / PM: Rename acpi_power_off_device() ACPI / PM: Drop acpi_power_nocheck ACPI / PM: Drop acpi_bus_get_power() Platform / x86: Make fujitsu_laptop use acpi_bus_update_power() ACPI / Fan: Rework the handling of power resources ACPI / PM: Register power resource devices as soon as they are needed ACPI / PM: Register acpi_power_driver early ACPI / PM: Add function for updating device power state consistently ACPI / PM: Add function for device power state initialization ACPI / PM: Introduce __acpi_bus_get_power() ACPI / PM: Introduce function for refcounting device power resources ACPI / PM: Add functions for manipulating lists of power resources ACPI / PM: Prevent acpi_power_get_inferred_state() from making changes ACPICA: Update version to 20101209 ...
Diffstat (limited to 'drivers/acpi/acpica')
-rw-r--r--drivers/acpi/acpica/Makefile2
-rw-r--r--drivers/acpi/acpica/acevents.h21
-rw-r--r--drivers/acpi/acpica/acglobal.h7
-rw-r--r--drivers/acpi/acpica/achware.h2
-rw-r--r--drivers/acpi/acpica/aclocal.h13
-rw-r--r--drivers/acpi/acpica/evevent.c12
-rw-r--r--drivers/acpi/acpica/evgpe.c265
-rw-r--r--drivers/acpi/acpica/evgpeblk.c33
-rw-r--r--drivers/acpi/acpica/evgpeinit.c25
-rw-r--r--drivers/acpi/acpica/evgpeutil.c39
-rw-r--r--drivers/acpi/acpica/evmisc.c94
-rw-r--r--drivers/acpi/acpica/evxface.c77
-rw-r--r--drivers/acpi/acpica/evxfevnt.c600
-rw-r--r--drivers/acpi/acpica/evxfgpe.c669
-rw-r--r--drivers/acpi/acpica/hwgpe.c32
-rw-r--r--drivers/acpi/acpica/utglobal.c3
16 files changed, 1097 insertions, 797 deletions
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index a7e1d1aa4107..eec2eadd2431 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -14,7 +14,7 @@ acpi-y := dsfield.o dsmthdat.o dsopcode.o dswexec.o dswscope.o \
14 14
15acpi-y += evevent.o evregion.o evsci.o evxfevnt.o \ 15acpi-y += evevent.o evregion.o evsci.o evxfevnt.o \
16 evmisc.o evrgnini.o evxface.o evxfregn.o \ 16 evmisc.o evrgnini.o evxface.o evxfregn.o \
17 evgpe.o evgpeblk.o evgpeinit.o evgpeutil.o 17 evgpe.o evgpeblk.o evgpeinit.o evgpeutil.o evxfgpe.o
18 18
19acpi-y += exconfig.o exfield.o exnames.o exoparg6.o exresolv.o exstorob.o\ 19acpi-y += exconfig.o exfield.o exnames.o exoparg6.o exresolv.o exstorob.o\
20 exconvrt.o exfldio.o exoparg1.o exprep.o exresop.o exsystem.o\ 20 exconvrt.o exfldio.o exoparg1.o exprep.o exresop.o exsystem.o\
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index a6f99cc37a19..70e0b28801aa 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -51,8 +51,6 @@ acpi_status acpi_ev_initialize_events(void);
51 51
52acpi_status acpi_ev_install_xrupt_handlers(void); 52acpi_status acpi_ev_install_xrupt_handlers(void);
53 53
54acpi_status acpi_ev_install_fadt_gpes(void);
55
56u32 acpi_ev_fixed_event_detect(void); 54u32 acpi_ev_fixed_event_detect(void);
57 55
58/* 56/*
@@ -82,9 +80,9 @@ acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info);
82 80
83acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info); 81acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info);
84 82
85acpi_status acpi_raw_enable_gpe(struct acpi_gpe_event_info *gpe_event_info); 83acpi_status acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info);
86 84
87acpi_status acpi_raw_disable_gpe(struct acpi_gpe_event_info *gpe_event_info); 85acpi_status acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info);
88 86
89struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device, 87struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
90 u32 gpe_number); 88 u32 gpe_number);
@@ -93,6 +91,8 @@ struct acpi_gpe_event_info *acpi_ev_low_get_gpe_info(u32 gpe_number,
93 struct acpi_gpe_block_info 91 struct acpi_gpe_block_info
94 *gpe_block); 92 *gpe_block);
95 93
94acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info);
95
96/* 96/*
97 * evgpeblk - Upper-level GPE block support 97 * evgpeblk - Upper-level GPE block support
98 */ 98 */
@@ -107,12 +107,13 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
107acpi_status 107acpi_status
108acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, 108acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
109 struct acpi_gpe_block_info *gpe_block, 109 struct acpi_gpe_block_info *gpe_block,
110 void *ignored); 110 void *context);
111 111
112acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block); 112acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block);
113 113
114u32 114u32
115acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, 115acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
116 struct acpi_gpe_event_info *gpe_event_info,
116 u32 gpe_number); 117 u32 gpe_number);
117 118
118/* 119/*
@@ -126,10 +127,6 @@ acpi_status
126acpi_ev_match_gpe_method(acpi_handle obj_handle, 127acpi_ev_match_gpe_method(acpi_handle obj_handle,
127 u32 level, void *context, void **return_value); 128 u32 level, void *context, void **return_value);
128 129
129acpi_status
130acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
131 u32 level, void *context, void **return_value);
132
133/* 130/*
134 * evgpeutil - GPE utilities 131 * evgpeutil - GPE utilities
135 */ 132 */
@@ -138,6 +135,10 @@ acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context);
138 135
139u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info); 136u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info);
140 137
138acpi_status
139acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
140 struct acpi_gpe_block_info *gpe_block, void *context);
141
141struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32 interrupt_number); 142struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32 interrupt_number);
142 143
143acpi_status acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt); 144acpi_status acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt);
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index ad88fcae4eb9..9bb69c59bb12 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -146,6 +146,9 @@ u8 acpi_gbl_system_awake_and_running;
146 146
147extern u32 acpi_gbl_nesting_level; 147extern u32 acpi_gbl_nesting_level;
148 148
149ACPI_EXTERN u32 acpi_gpe_count;
150ACPI_EXTERN u32 acpi_fixed_event_count[ACPI_NUM_FIXED_EVENTS];
151
149/* Support for dynamic control method tracing mechanism */ 152/* Support for dynamic control method tracing mechanism */
150 153
151ACPI_EXTERN u32 acpi_gbl_original_dbg_level; 154ACPI_EXTERN u32 acpi_gbl_original_dbg_level;
@@ -370,7 +373,9 @@ ACPI_EXTERN struct acpi_fixed_event_handler
370ACPI_EXTERN struct acpi_gpe_xrupt_info *acpi_gbl_gpe_xrupt_list_head; 373ACPI_EXTERN struct acpi_gpe_xrupt_info *acpi_gbl_gpe_xrupt_list_head;
371ACPI_EXTERN struct acpi_gpe_block_info 374ACPI_EXTERN struct acpi_gpe_block_info
372*acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS]; 375*acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS];
373ACPI_EXTERN u8 acpi_all_gpes_initialized; 376ACPI_EXTERN u8 acpi_gbl_all_gpes_initialized;
377ACPI_EXTERN ACPI_GBL_EVENT_HANDLER acpi_gbl_global_event_handler;
378ACPI_EXTERN void *acpi_gbl_global_event_handler_context;
374 379
375/***************************************************************************** 380/*****************************************************************************
376 * 381 *
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index 167470ad2d21..258d628793ea 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -94,7 +94,7 @@ u32 acpi_hw_get_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info,
94 struct acpi_gpe_register_info *gpe_register_info); 94 struct acpi_gpe_register_info *gpe_register_info);
95 95
96acpi_status 96acpi_status
97acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 action); 97acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action);
98 98
99acpi_status 99acpi_status
100acpi_hw_disable_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, 100acpi_hw_disable_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 2ceb0c05b2d7..74000f5b7dab 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -408,17 +408,18 @@ struct acpi_predefined_data {
408 408
409/* Dispatch info for each GPE -- either a method or handler, cannot be both */ 409/* Dispatch info for each GPE -- either a method or handler, cannot be both */
410 410
411struct acpi_handler_info { 411struct acpi_gpe_handler_info {
412 acpi_event_handler address; /* Address of handler, if any */ 412 acpi_gpe_handler address; /* Address of handler, if any */
413 void *context; /* Context to be passed to handler */ 413 void *context; /* Context to be passed to handler */
414 struct acpi_namespace_node *method_node; /* Method node for this GPE level (saved) */ 414 struct acpi_namespace_node *method_node; /* Method node for this GPE level (saved) */
415 u8 orig_flags; /* Original misc info about this GPE */ 415 u8 original_flags; /* Original (pre-handler) GPE info */
416 u8 orig_enabled; /* Set if the GPE was originally enabled */ 416 u8 originally_enabled; /* True if GPE was originally enabled */
417}; 417};
418 418
419union acpi_gpe_dispatch_info { 419union acpi_gpe_dispatch_info {
420 struct acpi_namespace_node *method_node; /* Method node for this GPE level */ 420 struct acpi_namespace_node *method_node; /* Method node for this GPE level */
421 struct acpi_handler_info *handler; 421 struct acpi_gpe_handler_info *handler; /* Installed GPE handler */
422 struct acpi_namespace_node *device_node; /* Parent _PRW device for implicit notify */
422}; 423};
423 424
424/* 425/*
@@ -458,7 +459,7 @@ struct acpi_gpe_block_info {
458 u32 register_count; /* Number of register pairs in block */ 459 u32 register_count; /* Number of register pairs in block */
459 u16 gpe_count; /* Number of individual GPEs in block */ 460 u16 gpe_count; /* Number of individual GPEs in block */
460 u8 block_base_number; /* Base GPE number for this block */ 461 u8 block_base_number; /* Base GPE number for this block */
461 u8 initialized; /* If set, the GPE block has been initialized */ 462 u8 initialized; /* TRUE if this block is initialized */
462}; 463};
463 464
464/* Information about GPE interrupt handlers, one per each interrupt level used for GPEs */ 465/* Information about GPE interrupt handlers, one per each interrupt level used for GPEs */
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index c61c3039c31a..e5e313c663a5 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -217,9 +217,17 @@ u32 acpi_ev_fixed_event_detect(void)
217 status_bit_mask) 217 status_bit_mask)
218 && (fixed_enable & acpi_gbl_fixed_event_info[i]. 218 && (fixed_enable & acpi_gbl_fixed_event_info[i].
219 enable_bit_mask)) { 219 enable_bit_mask)) {
220 /*
221 * Found an active (signalled) event. Invoke global event
222 * handler if present.
223 */
224 acpi_fixed_event_count[i]++;
225 if (acpi_gbl_global_event_handler) {
226 acpi_gbl_global_event_handler
227 (ACPI_EVENT_TYPE_FIXED, NULL, i,
228 acpi_gbl_global_event_handler_context);
229 }
220 230
221 /* Found an active (signalled) event */
222 acpi_os_fixed_event_count(i);
223 int_status |= acpi_ev_fixed_event_dispatch(i); 231 int_status |= acpi_ev_fixed_event_dispatch(i);
224 } 232 }
225 } 233 }
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index f226eac314db..7c339d34ab42 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -52,6 +52,8 @@ ACPI_MODULE_NAME("evgpe")
52/* Local prototypes */ 52/* Local prototypes */
53static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context); 53static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context);
54 54
55static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context);
56
55/******************************************************************************* 57/*******************************************************************************
56 * 58 *
57 * FUNCTION: acpi_ev_update_gpe_enable_mask 59 * FUNCTION: acpi_ev_update_gpe_enable_mask
@@ -102,7 +104,7 @@ acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info)
102 * 104 *
103 * RETURN: Status 105 * RETURN: Status
104 * 106 *
105 * DESCRIPTION: Clear the given GPE from stale events and enable it. 107 * DESCRIPTION: Clear a GPE of stale events and enable it.
106 * 108 *
107 ******************************************************************************/ 109 ******************************************************************************/
108acpi_status 110acpi_status
@@ -113,12 +115,13 @@ acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
113 ACPI_FUNCTION_TRACE(ev_enable_gpe); 115 ACPI_FUNCTION_TRACE(ev_enable_gpe);
114 116
115 /* 117 /*
116 * We will only allow a GPE to be enabled if it has either an 118 * We will only allow a GPE to be enabled if it has either an associated
117 * associated method (_Lxx/_Exx) or a handler. Otherwise, the 119 * method (_Lxx/_Exx) or a handler, or is using the implicit notify
118 * GPE will be immediately disabled by acpi_ev_gpe_dispatch the 120 * feature. Otherwise, the GPE will be immediately disabled by
119 * first time it fires. 121 * acpi_ev_gpe_dispatch the first time it fires.
120 */ 122 */
121 if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)) { 123 if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
124 ACPI_GPE_DISPATCH_NONE) {
122 return_ACPI_STATUS(AE_NO_HANDLER); 125 return_ACPI_STATUS(AE_NO_HANDLER);
123 } 126 }
124 127
@@ -137,9 +140,9 @@ acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
137 140
138/******************************************************************************* 141/*******************************************************************************
139 * 142 *
140 * FUNCTION: acpi_raw_enable_gpe 143 * FUNCTION: acpi_ev_add_gpe_reference
141 * 144 *
142 * PARAMETERS: gpe_event_info - GPE to enable 145 * PARAMETERS: gpe_event_info - Add a reference to this GPE
143 * 146 *
144 * RETURN: Status 147 * RETURN: Status
145 * 148 *
@@ -148,16 +151,21 @@ acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
148 * 151 *
149 ******************************************************************************/ 152 ******************************************************************************/
150 153
151acpi_status acpi_raw_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) 154acpi_status acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
152{ 155{
153 acpi_status status = AE_OK; 156 acpi_status status = AE_OK;
154 157
158 ACPI_FUNCTION_TRACE(ev_add_gpe_reference);
159
155 if (gpe_event_info->runtime_count == ACPI_UINT8_MAX) { 160 if (gpe_event_info->runtime_count == ACPI_UINT8_MAX) {
156 return_ACPI_STATUS(AE_LIMIT); 161 return_ACPI_STATUS(AE_LIMIT);
157 } 162 }
158 163
159 gpe_event_info->runtime_count++; 164 gpe_event_info->runtime_count++;
160 if (gpe_event_info->runtime_count == 1) { 165 if (gpe_event_info->runtime_count == 1) {
166
167 /* Enable on first reference */
168
161 status = acpi_ev_update_gpe_enable_mask(gpe_event_info); 169 status = acpi_ev_update_gpe_enable_mask(gpe_event_info);
162 if (ACPI_SUCCESS(status)) { 170 if (ACPI_SUCCESS(status)) {
163 status = acpi_ev_enable_gpe(gpe_event_info); 171 status = acpi_ev_enable_gpe(gpe_event_info);
@@ -173,9 +181,9 @@ acpi_status acpi_raw_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
173 181
174/******************************************************************************* 182/*******************************************************************************
175 * 183 *
176 * FUNCTION: acpi_raw_disable_gpe 184 * FUNCTION: acpi_ev_remove_gpe_reference
177 * 185 *
178 * PARAMETERS: gpe_event_info - GPE to disable 186 * PARAMETERS: gpe_event_info - Remove a reference to this GPE
179 * 187 *
180 * RETURN: Status 188 * RETURN: Status
181 * 189 *
@@ -184,16 +192,21 @@ acpi_status acpi_raw_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
184 * 192 *
185 ******************************************************************************/ 193 ******************************************************************************/
186 194
187acpi_status acpi_raw_disable_gpe(struct acpi_gpe_event_info *gpe_event_info) 195acpi_status acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
188{ 196{
189 acpi_status status = AE_OK; 197 acpi_status status = AE_OK;
190 198
199 ACPI_FUNCTION_TRACE(ev_remove_gpe_reference);
200
191 if (!gpe_event_info->runtime_count) { 201 if (!gpe_event_info->runtime_count) {
192 return_ACPI_STATUS(AE_LIMIT); 202 return_ACPI_STATUS(AE_LIMIT);
193 } 203 }
194 204
195 gpe_event_info->runtime_count--; 205 gpe_event_info->runtime_count--;
196 if (!gpe_event_info->runtime_count) { 206 if (!gpe_event_info->runtime_count) {
207
208 /* Disable on last reference */
209
197 status = acpi_ev_update_gpe_enable_mask(gpe_event_info); 210 status = acpi_ev_update_gpe_enable_mask(gpe_event_info);
198 if (ACPI_SUCCESS(status)) { 211 if (ACPI_SUCCESS(status)) {
199 status = acpi_hw_low_set_gpe(gpe_event_info, 212 status = acpi_hw_low_set_gpe(gpe_event_info,
@@ -379,7 +392,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
379 } 392 }
380 393
381 ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS, 394 ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
382 "Read GPE Register at GPE%X: Status=%02X, Enable=%02X\n", 395 "Read GPE Register at GPE%02X: Status=%02X, Enable=%02X\n",
383 gpe_register_info->base_gpe_number, 396 gpe_register_info->base_gpe_number,
384 status_reg, enable_reg)); 397 status_reg, enable_reg));
385 398
@@ -405,7 +418,9 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
405 * or method. 418 * or method.
406 */ 419 */
407 int_status |= 420 int_status |=
408 acpi_ev_gpe_dispatch(&gpe_block-> 421 acpi_ev_gpe_dispatch(gpe_block->
422 node,
423 &gpe_block->
409 event_info[((acpi_size) i * ACPI_GPE_REGISTER_WIDTH) + j], j + gpe_register_info->base_gpe_number); 424 event_info[((acpi_size) i * ACPI_GPE_REGISTER_WIDTH) + j], j + gpe_register_info->base_gpe_number);
410 } 425 }
411 } 426 }
@@ -435,17 +450,25 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
435 * an interrupt handler. 450 * an interrupt handler.
436 * 451 *
437 ******************************************************************************/ 452 ******************************************************************************/
438static void acpi_ev_asynch_enable_gpe(void *context);
439 453
440static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) 454static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
441{ 455{
442 struct acpi_gpe_event_info *gpe_event_info = (void *)context; 456 struct acpi_gpe_event_info *gpe_event_info = context;
443 acpi_status status; 457 acpi_status status;
444 struct acpi_gpe_event_info local_gpe_event_info; 458 struct acpi_gpe_event_info *local_gpe_event_info;
445 struct acpi_evaluate_info *info; 459 struct acpi_evaluate_info *info;
446 460
447 ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method); 461 ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method);
448 462
463 /* Allocate a local GPE block */
464
465 local_gpe_event_info =
466 ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_event_info));
467 if (!local_gpe_event_info) {
468 ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY, "while handling a GPE"));
469 return_VOID;
470 }
471
449 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); 472 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
450 if (ACPI_FAILURE(status)) { 473 if (ACPI_FAILURE(status)) {
451 return_VOID; 474 return_VOID;
@@ -462,7 +485,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
462 * Take a snapshot of the GPE info for this level - we copy the info to 485 * Take a snapshot of the GPE info for this level - we copy the info to
463 * prevent a race condition with remove_handler/remove_block. 486 * prevent a race condition with remove_handler/remove_block.
464 */ 487 */
465 ACPI_MEMCPY(&local_gpe_event_info, gpe_event_info, 488 ACPI_MEMCPY(local_gpe_event_info, gpe_event_info,
466 sizeof(struct acpi_gpe_event_info)); 489 sizeof(struct acpi_gpe_event_info));
467 490
468 status = acpi_ut_release_mutex(ACPI_MTX_EVENTS); 491 status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
@@ -470,12 +493,26 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
470 return_VOID; 493 return_VOID;
471 } 494 }
472 495
473 /* 496 /* Do the correct dispatch - normal method or implicit notify */
474 * Must check for control method type dispatch one more time to avoid a 497
475 * race with ev_gpe_install_handler 498 switch (local_gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) {
476 */ 499 case ACPI_GPE_DISPATCH_NOTIFY:
477 if ((local_gpe_event_info.flags & ACPI_GPE_DISPATCH_MASK) == 500
478 ACPI_GPE_DISPATCH_METHOD) { 501 /*
502 * Implicit notify.
503 * Dispatch a DEVICE_WAKE notify to the appropriate handler.
504 * NOTE: the request is queued for execution after this method
505 * completes. The notify handlers are NOT invoked synchronously
506 * from this thread -- because handlers may in turn run other
507 * control methods.
508 */
509 status =
510 acpi_ev_queue_notify_request(local_gpe_event_info->dispatch.
511 device_node,
512 ACPI_NOTIFY_DEVICE_WAKE);
513 break;
514
515 case ACPI_GPE_DISPATCH_METHOD:
479 516
480 /* Allocate the evaluation information block */ 517 /* Allocate the evaluation information block */
481 518
@@ -488,7 +525,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
488 * control method that corresponds to this GPE 525 * control method that corresponds to this GPE
489 */ 526 */
490 info->prefix_node = 527 info->prefix_node =
491 local_gpe_event_info.dispatch.method_node; 528 local_gpe_event_info->dispatch.method_node;
492 info->flags = ACPI_IGNORE_RETURN_VALUE; 529 info->flags = ACPI_IGNORE_RETURN_VALUE;
493 530
494 status = acpi_ns_evaluate(info); 531 status = acpi_ns_evaluate(info);
@@ -499,46 +536,98 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
499 ACPI_EXCEPTION((AE_INFO, status, 536 ACPI_EXCEPTION((AE_INFO, status,
500 "while evaluating GPE method [%4.4s]", 537 "while evaluating GPE method [%4.4s]",
501 acpi_ut_get_node_name 538 acpi_ut_get_node_name
502 (local_gpe_event_info.dispatch. 539 (local_gpe_event_info->dispatch.
503 method_node))); 540 method_node)));
504 } 541 }
542
543 break;
544
545 default:
546 return_VOID; /* Should never happen */
505 } 547 }
548
506 /* Defer enabling of GPE until all notify handlers are done */ 549 /* Defer enabling of GPE until all notify handlers are done */
507 acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_asynch_enable_gpe, 550
508 gpe_event_info); 551 status = acpi_os_execute(OSL_NOTIFY_HANDLER,
552 acpi_ev_asynch_enable_gpe,
553 local_gpe_event_info);
554 if (ACPI_FAILURE(status)) {
555 ACPI_FREE(local_gpe_event_info);
556 }
509 return_VOID; 557 return_VOID;
510} 558}
511 559
512static void acpi_ev_asynch_enable_gpe(void *context) 560
561/*******************************************************************************
562 *
563 * FUNCTION: acpi_ev_asynch_enable_gpe
564 *
565 * PARAMETERS: Context (gpe_event_info) - Info for this GPE
566 * Callback from acpi_os_execute
567 *
568 * RETURN: None
569 *
570 * DESCRIPTION: Asynchronous clear/enable for GPE. This allows the GPE to
571 * complete (i.e., finish execution of Notify)
572 *
573 ******************************************************************************/
574
575static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context)
513{ 576{
514 struct acpi_gpe_event_info *gpe_event_info = context; 577 struct acpi_gpe_event_info *gpe_event_info = context;
578
579 (void)acpi_ev_finish_gpe(gpe_event_info);
580
581 ACPI_FREE(gpe_event_info);
582 return;
583}
584
585
586/*******************************************************************************
587 *
588 * FUNCTION: acpi_ev_finish_gpe
589 *
590 * PARAMETERS: gpe_event_info - Info for this GPE
591 *
592 * RETURN: Status
593 *
594 * DESCRIPTION: Clear/Enable a GPE. Common code that is used after execution
595 * of a GPE method or a synchronous or asynchronous GPE handler.
596 *
597 ******************************************************************************/
598
599acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info)
600{
515 acpi_status status; 601 acpi_status status;
602
516 if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == 603 if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
517 ACPI_GPE_LEVEL_TRIGGERED) { 604 ACPI_GPE_LEVEL_TRIGGERED) {
518 /* 605 /*
519 * GPE is level-triggered, we clear the GPE status bit after handling 606 * GPE is level-triggered, we clear the GPE status bit after
520 * the event. 607 * handling the event.
521 */ 608 */
522 status = acpi_hw_clear_gpe(gpe_event_info); 609 status = acpi_hw_clear_gpe(gpe_event_info);
523 if (ACPI_FAILURE(status)) { 610 if (ACPI_FAILURE(status)) {
524 return_VOID; 611 return (status);
525 } 612 }
526 } 613 }
527 614
528 /* 615 /*
529 * Enable this GPE, conditionally. This means that the GPE will only be 616 * Enable this GPE, conditionally. This means that the GPE will
530 * physically enabled if the enable_for_run bit is set in the event_info 617 * only be physically enabled if the enable_for_run bit is set
618 * in the event_info.
531 */ 619 */
532 (void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_COND_ENABLE); 620 (void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_CONDITIONAL_ENABLE);
533 621 return (AE_OK);
534 return_VOID;
535} 622}
536 623
624
537/******************************************************************************* 625/*******************************************************************************
538 * 626 *
539 * FUNCTION: acpi_ev_gpe_dispatch 627 * FUNCTION: acpi_ev_gpe_dispatch
540 * 628 *
541 * PARAMETERS: gpe_event_info - Info for this GPE 629 * PARAMETERS: gpe_device - Device node. NULL for GPE0/GPE1
630 * gpe_event_info - Info for this GPE
542 * gpe_number - Number relative to the parent GPE block 631 * gpe_number - Number relative to the parent GPE block
543 * 632 *
544 * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED 633 * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
@@ -551,13 +640,22 @@ static void acpi_ev_asynch_enable_gpe(void *context)
551 ******************************************************************************/ 640 ******************************************************************************/
552 641
553u32 642u32
554acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) 643acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
644 struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
555{ 645{
556 acpi_status status; 646 acpi_status status;
647 u32 return_value;
557 648
558 ACPI_FUNCTION_TRACE(ev_gpe_dispatch); 649 ACPI_FUNCTION_TRACE(ev_gpe_dispatch);
559 650
560 acpi_os_gpe_count(gpe_number); 651 /* Invoke global event handler if present */
652
653 acpi_gpe_count++;
654 if (acpi_gbl_global_event_handler) {
655 acpi_gbl_global_event_handler(ACPI_EVENT_TYPE_GPE, gpe_device,
656 gpe_number,
657 acpi_gbl_global_event_handler_context);
658 }
561 659
562 /* 660 /*
563 * If edge-triggered, clear the GPE status bit now. Note that 661 * If edge-triggered, clear the GPE status bit now. Note that
@@ -568,59 +666,55 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
568 status = acpi_hw_clear_gpe(gpe_event_info); 666 status = acpi_hw_clear_gpe(gpe_event_info);
569 if (ACPI_FAILURE(status)) { 667 if (ACPI_FAILURE(status)) {
570 ACPI_EXCEPTION((AE_INFO, status, 668 ACPI_EXCEPTION((AE_INFO, status,
571 "Unable to clear GPE[0x%2X]", 669 "Unable to clear GPE%02X", gpe_number));
572 gpe_number));
573 return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); 670 return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
574 } 671 }
575 } 672 }
576 673
577 /* 674 /*
578 * Dispatch the GPE to either an installed handler, or the control method 675 * Always disable the GPE so that it does not keep firing before
579 * associated with this GPE (_Lxx or _Exx). If a handler exists, we invoke 676 * any asynchronous activity completes (either from the execution
580 * it and do not attempt to run the method. If there is neither a handler 677 * of a GPE method or an asynchronous GPE handler.)
581 * nor a method, we disable this GPE to prevent further such pointless 678 *
582 * events from firing. 679 * If there is no handler or method to run, just disable the
680 * GPE and leave it disabled permanently to prevent further such
681 * pointless events from firing.
682 */
683 status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
684 if (ACPI_FAILURE(status)) {
685 ACPI_EXCEPTION((AE_INFO, status,
686 "Unable to disable GPE%02X", gpe_number));
687 return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
688 }
689
690 /*
691 * Dispatch the GPE to either an installed handler or the control
692 * method associated with this GPE (_Lxx or _Exx). If a handler
693 * exists, we invoke it and do not attempt to run the method.
694 * If there is neither a handler nor a method, leave the GPE
695 * disabled.
583 */ 696 */
584 switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) { 697 switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) {
585 case ACPI_GPE_DISPATCH_HANDLER: 698 case ACPI_GPE_DISPATCH_HANDLER:
586 699
587 /* 700 /* Invoke the installed handler (at interrupt level) */
588 * Invoke the installed handler (at interrupt level)
589 * Ignore return status for now.
590 * TBD: leave GPE disabled on error?
591 */
592 (void)gpe_event_info->dispatch.handler->address(gpe_event_info->
593 dispatch.
594 handler->
595 context);
596 701
597 /* It is now safe to clear level-triggered events. */ 702 return_value =
703 gpe_event_info->dispatch.handler->address(gpe_device,
704 gpe_number,
705 gpe_event_info->
706 dispatch.handler->
707 context);
598 708
599 if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == 709 /* If requested, clear (if level-triggered) and reenable the GPE */
600 ACPI_GPE_LEVEL_TRIGGERED) { 710
601 status = acpi_hw_clear_gpe(gpe_event_info); 711 if (return_value & ACPI_REENABLE_GPE) {
602 if (ACPI_FAILURE(status)) { 712 (void)acpi_ev_finish_gpe(gpe_event_info);
603 ACPI_EXCEPTION((AE_INFO, status,
604 "Unable to clear GPE[0x%2X]",
605 gpe_number));
606 return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
607 }
608 } 713 }
609 break; 714 break;
610 715
611 case ACPI_GPE_DISPATCH_METHOD: 716 case ACPI_GPE_DISPATCH_METHOD:
612 717 case ACPI_GPE_DISPATCH_NOTIFY:
613 /*
614 * Disable the GPE, so it doesn't keep firing before the method has a
615 * chance to run (it runs asynchronously with interrupts enabled).
616 */
617 status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
618 if (ACPI_FAILURE(status)) {
619 ACPI_EXCEPTION((AE_INFO, status,
620 "Unable to disable GPE[0x%2X]",
621 gpe_number));
622 return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
623 }
624 718
625 /* 719 /*
626 * Execute the method associated with the GPE 720 * Execute the method associated with the GPE
@@ -631,7 +725,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
631 gpe_event_info); 725 gpe_event_info);
632 if (ACPI_FAILURE(status)) { 726 if (ACPI_FAILURE(status)) {
633 ACPI_EXCEPTION((AE_INFO, status, 727 ACPI_EXCEPTION((AE_INFO, status,
634 "Unable to queue handler for GPE[0x%2X] - event disabled", 728 "Unable to queue handler for GPE%2X - event disabled",
635 gpe_number)); 729 gpe_number));
636 } 730 }
637 break; 731 break;
@@ -644,20 +738,9 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
644 * a GPE to be enabled if it has no handler or method. 738 * a GPE to be enabled if it has no handler or method.
645 */ 739 */
646 ACPI_ERROR((AE_INFO, 740 ACPI_ERROR((AE_INFO,
647 "No handler or method for GPE[0x%2X], disabling event", 741 "No handler or method for GPE%02X, disabling event",
648 gpe_number)); 742 gpe_number));
649 743
650 /*
651 * Disable the GPE. The GPE will remain disabled a handler
652 * is installed or ACPICA is restarted.
653 */
654 status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
655 if (ACPI_FAILURE(status)) {
656 ACPI_EXCEPTION((AE_INFO, status,
657 "Unable to disable GPE[0x%2X]",
658 gpe_number));
659 return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
660 }
661 break; 744 break;
662 } 745 }
663 746
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index 020add3eee1c..9acb86958c09 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -361,9 +361,9 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
361 361
362 gpe_block->node = gpe_device; 362 gpe_block->node = gpe_device;
363 gpe_block->gpe_count = (u16)(register_count * ACPI_GPE_REGISTER_WIDTH); 363 gpe_block->gpe_count = (u16)(register_count * ACPI_GPE_REGISTER_WIDTH);
364 gpe_block->initialized = FALSE;
364 gpe_block->register_count = register_count; 365 gpe_block->register_count = register_count;
365 gpe_block->block_base_number = gpe_block_base_number; 366 gpe_block->block_base_number = gpe_block_base_number;
366 gpe_block->initialized = FALSE;
367 367
368 ACPI_MEMCPY(&gpe_block->block_address, gpe_block_address, 368 ACPI_MEMCPY(&gpe_block->block_address, gpe_block_address,
369 sizeof(struct acpi_generic_address)); 369 sizeof(struct acpi_generic_address));
@@ -386,7 +386,7 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
386 return_ACPI_STATUS(status); 386 return_ACPI_STATUS(status);
387 } 387 }
388 388
389 acpi_all_gpes_initialized = FALSE; 389 acpi_gbl_all_gpes_initialized = FALSE;
390 390
391 /* Find all GPE methods (_Lxx or_Exx) for this block */ 391 /* Find all GPE methods (_Lxx or_Exx) for this block */
392 392
@@ -423,14 +423,12 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
423 * 423 *
424 * FUNCTION: acpi_ev_initialize_gpe_block 424 * FUNCTION: acpi_ev_initialize_gpe_block
425 * 425 *
426 * PARAMETERS: gpe_device - Handle to the parent GPE block 426 * PARAMETERS: acpi_gpe_callback
427 * gpe_block - Gpe Block info
428 * 427 *
429 * RETURN: Status 428 * RETURN: Status
430 * 429 *
431 * DESCRIPTION: Initialize and enable a GPE block. First find and run any 430 * DESCRIPTION: Initialize and enable a GPE block. Enable GPEs that have
432 * _PRT methods associated with the block, then enable the 431 * associated methods.
433 * appropriate GPEs.
434 * Note: Assumes namespace is locked. 432 * Note: Assumes namespace is locked.
435 * 433 *
436 ******************************************************************************/ 434 ******************************************************************************/
@@ -450,8 +448,8 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
450 ACPI_FUNCTION_TRACE(ev_initialize_gpe_block); 448 ACPI_FUNCTION_TRACE(ev_initialize_gpe_block);
451 449
452 /* 450 /*
453 * Ignore a null GPE block (e.g., if no GPE block 1 exists) and 451 * Ignore a null GPE block (e.g., if no GPE block 1 exists), and
454 * GPE blocks that have been initialized already. 452 * any GPE blocks that have been initialized already.
455 */ 453 */
456 if (!gpe_block || gpe_block->initialized) { 454 if (!gpe_block || gpe_block->initialized) {
457 return_ACPI_STATUS(AE_OK); 455 return_ACPI_STATUS(AE_OK);
@@ -459,8 +457,8 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
459 457
460 /* 458 /*
461 * Enable all GPEs that have a corresponding method and have the 459 * Enable all GPEs that have a corresponding method and have the
462 * ACPI_GPE_CAN_WAKE flag unset. Any other GPEs within this block must 460 * ACPI_GPE_CAN_WAKE flag unset. Any other GPEs within this block
463 * be enabled via the acpi_enable_gpe() interface. 461 * must be enabled via the acpi_enable_gpe() interface.
464 */ 462 */
465 gpe_enabled_count = 0; 463 gpe_enabled_count = 0;
466 464
@@ -472,14 +470,19 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
472 gpe_index = (i * ACPI_GPE_REGISTER_WIDTH) + j; 470 gpe_index = (i * ACPI_GPE_REGISTER_WIDTH) + j;
473 gpe_event_info = &gpe_block->event_info[gpe_index]; 471 gpe_event_info = &gpe_block->event_info[gpe_index];
474 472
475 /* Ignore GPEs that have no corresponding _Lxx/_Exx method */ 473 /*
476 474 * Ignore GPEs that have no corresponding _Lxx/_Exx method
477 if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_METHOD) 475 * and GPEs that are used to wake the system
476 */
477 if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
478 ACPI_GPE_DISPATCH_NONE)
479 || ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)
480 == ACPI_GPE_DISPATCH_HANDLER)
478 || (gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) { 481 || (gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
479 continue; 482 continue;
480 } 483 }
481 484
482 status = acpi_raw_enable_gpe(gpe_event_info); 485 status = acpi_ev_add_gpe_reference(gpe_event_info);
483 if (ACPI_FAILURE(status)) { 486 if (ACPI_FAILURE(status)) {
484 ACPI_EXCEPTION((AE_INFO, status, 487 ACPI_EXCEPTION((AE_INFO, status,
485 "Could not enable GPE 0x%02X", 488 "Could not enable GPE 0x%02X",
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index 4c8dea513b66..c59dc2340593 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -45,11 +45,27 @@
45#include "accommon.h" 45#include "accommon.h"
46#include "acevents.h" 46#include "acevents.h"
47#include "acnamesp.h" 47#include "acnamesp.h"
48#include "acinterp.h"
49 48
50#define _COMPONENT ACPI_EVENTS 49#define _COMPONENT ACPI_EVENTS
51ACPI_MODULE_NAME("evgpeinit") 50ACPI_MODULE_NAME("evgpeinit")
52 51
52/*
53 * Note: History of _PRW support in ACPICA
54 *
55 * Originally (2000 - 2010), the GPE initialization code performed a walk of
56 * the entire namespace to execute the _PRW methods and detect all GPEs
57 * capable of waking the system.
58 *
59 * As of 10/2010, the _PRW method execution has been removed since it is
60 * actually unnecessary. The host OS must in fact execute all _PRW methods
61 * in order to identify the device/power-resource dependencies. We now put
62 * the onus on the host OS to identify the wake GPEs as part of this process
63 * and to inform ACPICA of these GPEs via the acpi_setup_gpe_for_wake interface. This
64 * not only reduces the complexity of the ACPICA initialization code, but in
65 * some cases (on systems with very large namespaces) it should reduce the
66 * kernel boot time as well.
67 */
68
53/******************************************************************************* 69/*******************************************************************************
54 * 70 *
55 * FUNCTION: acpi_ev_gpe_initialize 71 * FUNCTION: acpi_ev_gpe_initialize
@@ -222,7 +238,7 @@ void acpi_ev_update_gpes(acpi_owner_id table_owner_id)
222 acpi_status status = AE_OK; 238 acpi_status status = AE_OK;
223 239
224 /* 240 /*
225 * 2) Find any _Lxx/_Exx GPE methods that have just been loaded. 241 * Find any _Lxx/_Exx GPE methods that have just been loaded.
226 * 242 *
227 * Any GPEs that correspond to new _Lxx/_Exx methods are immediately 243 * Any GPEs that correspond to new _Lxx/_Exx methods are immediately
228 * enabled. 244 * enabled.
@@ -235,9 +251,9 @@ void acpi_ev_update_gpes(acpi_owner_id table_owner_id)
235 return; 251 return;
236 } 252 }
237 253
254 walk_info.count = 0;
238 walk_info.owner_id = table_owner_id; 255 walk_info.owner_id = table_owner_id;
239 walk_info.execute_by_owner_id = TRUE; 256 walk_info.execute_by_owner_id = TRUE;
240 walk_info.count = 0;
241 257
242 /* Walk the interrupt level descriptor list */ 258 /* Walk the interrupt level descriptor list */
243 259
@@ -298,7 +314,7 @@ void acpi_ev_update_gpes(acpi_owner_id table_owner_id)
298 * xx - is the GPE number [in HEX] 314 * xx - is the GPE number [in HEX]
299 * 315 *
300 * If walk_info->execute_by_owner_id is TRUE, we only execute examine GPE methods 316 * If walk_info->execute_by_owner_id is TRUE, we only execute examine GPE methods
301 * with that owner. 317 * with that owner.
302 * 318 *
303 ******************************************************************************/ 319 ******************************************************************************/
304 320
@@ -415,6 +431,7 @@ acpi_ev_match_gpe_method(acpi_handle obj_handle,
415 * Add the GPE information from above to the gpe_event_info block for 431 * Add the GPE information from above to the gpe_event_info block for
416 * use during dispatch of this GPE. 432 * use during dispatch of this GPE.
417 */ 433 */
434 gpe_event_info->flags &= ~(ACPI_GPE_DISPATCH_MASK);
418 gpe_event_info->flags |= (u8)(type | ACPI_GPE_DISPATCH_METHOD); 435 gpe_event_info->flags |= (u8)(type | ACPI_GPE_DISPATCH_METHOD);
419 gpe_event_info->dispatch.method_node = method_node; 436 gpe_event_info->dispatch.method_node = method_node;
420 437
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
index 19a0e513ea48..10e477494dcf 100644
--- a/drivers/acpi/acpica/evgpeutil.c
+++ b/drivers/acpi/acpica/evgpeutil.c
@@ -154,6 +154,45 @@ u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info)
154 154
155/******************************************************************************* 155/*******************************************************************************
156 * 156 *
157 * FUNCTION: acpi_ev_get_gpe_device
158 *
159 * PARAMETERS: GPE_WALK_CALLBACK
160 *
161 * RETURN: Status
162 *
163 * DESCRIPTION: Matches the input GPE index (0-current_gpe_count) with a GPE
164 * block device. NULL if the GPE is one of the FADT-defined GPEs.
165 *
166 ******************************************************************************/
167
168acpi_status
169acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
170 struct acpi_gpe_block_info *gpe_block, void *context)
171{
172 struct acpi_gpe_device_info *info = context;
173
174 /* Increment Index by the number of GPEs in this block */
175
176 info->next_block_base_index += gpe_block->gpe_count;
177
178 if (info->index < info->next_block_base_index) {
179 /*
180 * The GPE index is within this block, get the node. Leave the node
181 * NULL for the FADT-defined GPEs
182 */
183 if ((gpe_block->node)->type == ACPI_TYPE_DEVICE) {
184 info->gpe_device = gpe_block->node;
185 }
186
187 info->status = AE_OK;
188 return (AE_CTRL_END);
189 }
190
191 return (AE_OK);
192}
193
194/*******************************************************************************
195 *
157 * FUNCTION: acpi_ev_get_gpe_xrupt_block 196 * FUNCTION: acpi_ev_get_gpe_xrupt_block
158 * 197 *
159 * PARAMETERS: interrupt_number - Interrupt for a GPE block 198 * PARAMETERS: interrupt_number - Interrupt for a GPE block
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index fcaed9fb44ff..8e31bb5a973a 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -284,41 +284,41 @@ static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context)
284 * RETURN: ACPI_INTERRUPT_HANDLED 284 * RETURN: ACPI_INTERRUPT_HANDLED
285 * 285 *
286 * DESCRIPTION: Invoked directly from the SCI handler when a global lock 286 * DESCRIPTION: Invoked directly from the SCI handler when a global lock
287 * release interrupt occurs. Attempt to acquire the global lock, 287 * release interrupt occurs. If there's a thread waiting for
288 * if successful, signal the thread waiting for the lock. 288 * the global lock, signal it.
289 * 289 *
290 * NOTE: Assumes that the semaphore can be signaled from interrupt level. If 290 * NOTE: Assumes that the semaphore can be signaled from interrupt level. If
291 * this is not possible for some reason, a separate thread will have to be 291 * this is not possible for some reason, a separate thread will have to be
292 * scheduled to do this. 292 * scheduled to do this.
293 * 293 *
294 ******************************************************************************/ 294 ******************************************************************************/
295static u8 acpi_ev_global_lock_pending;
296static spinlock_t _acpi_ev_global_lock_pending_lock;
297#define acpi_ev_global_lock_pending_lock &_acpi_ev_global_lock_pending_lock
295 298
296static u32 acpi_ev_global_lock_handler(void *context) 299static u32 acpi_ev_global_lock_handler(void *context)
297{ 300{
298 u8 acquired = FALSE; 301 acpi_status status;
302 acpi_cpu_flags flags;
299 303
300 /* 304 flags = acpi_os_acquire_lock(acpi_ev_global_lock_pending_lock);
301 * Attempt to get the lock.
302 *
303 * If we don't get it now, it will be marked pending and we will
304 * take another interrupt when it becomes free.
305 */
306 ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired);
307 if (acquired) {
308 305
309 /* Got the lock, now wake all threads waiting for it */ 306 if (!acpi_ev_global_lock_pending) {
307 goto out;
308 }
310 309
311 acpi_gbl_global_lock_acquired = TRUE; 310 /* Send a unit to the semaphore */
312 /* Send a unit to the semaphore */
313 311
314 if (ACPI_FAILURE 312 status = acpi_os_signal_semaphore(acpi_gbl_global_lock_semaphore, 1);
315 (acpi_os_signal_semaphore 313 if (ACPI_FAILURE(status)) {
316 (acpi_gbl_global_lock_semaphore, 1))) { 314 ACPI_ERROR((AE_INFO, "Could not signal Global Lock semaphore"));
317 ACPI_ERROR((AE_INFO,
318 "Could not signal Global Lock semaphore"));
319 }
320 } 315 }
321 316
317 acpi_ev_global_lock_pending = FALSE;
318
319 out:
320 acpi_os_release_lock(acpi_ev_global_lock_pending_lock, flags);
321
322 return (ACPI_INTERRUPT_HANDLED); 322 return (ACPI_INTERRUPT_HANDLED);
323} 323}
324 324
@@ -415,6 +415,7 @@ static int acpi_ev_global_lock_acquired;
415 415
416acpi_status acpi_ev_acquire_global_lock(u16 timeout) 416acpi_status acpi_ev_acquire_global_lock(u16 timeout)
417{ 417{
418 acpi_cpu_flags flags;
418 acpi_status status = AE_OK; 419 acpi_status status = AE_OK;
419 u8 acquired = FALSE; 420 u8 acquired = FALSE;
420 421
@@ -467,32 +468,47 @@ acpi_status acpi_ev_acquire_global_lock(u16 timeout)
467 return_ACPI_STATUS(AE_OK); 468 return_ACPI_STATUS(AE_OK);
468 } 469 }
469 470
470 /* Attempt to acquire the actual hardware lock */ 471 flags = acpi_os_acquire_lock(acpi_ev_global_lock_pending_lock);
472
473 do {
474
475 /* Attempt to acquire the actual hardware lock */
476
477 ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired);
478 if (acquired) {
479 acpi_gbl_global_lock_acquired = TRUE;
480
481 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
482 "Acquired hardware Global Lock\n"));
483 break;
484 }
471 485
472 ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired); 486 acpi_ev_global_lock_pending = TRUE;
473 if (acquired) {
474 487
475 /* We got the lock */ 488 acpi_os_release_lock(acpi_ev_global_lock_pending_lock, flags);
476 489
490 /*
491 * Did not get the lock. The pending bit was set above, and we
492 * must wait until we get the global lock released interrupt.
493 */
477 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, 494 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
478 "Acquired hardware Global Lock\n")); 495 "Waiting for hardware Global Lock\n"));
479 496
480 acpi_gbl_global_lock_acquired = TRUE; 497 /*
481 return_ACPI_STATUS(AE_OK); 498 * Wait for handshake with the global lock interrupt handler.
482 } 499 * This interface releases the interpreter if we must wait.
500 */
501 status = acpi_ex_system_wait_semaphore(
502 acpi_gbl_global_lock_semaphore,
503 ACPI_WAIT_FOREVER);
483 504
484 /* 505 flags = acpi_os_acquire_lock(acpi_ev_global_lock_pending_lock);
485 * Did not get the lock. The pending bit was set above, and we must now
486 * wait until we get the global lock released interrupt.
487 */
488 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Waiting for hardware Global Lock\n"));
489 506
490 /* 507 } while (ACPI_SUCCESS(status));
491 * Wait for handshake with the global lock interrupt handler. 508
492 * This interface releases the interpreter if we must wait. 509 acpi_ev_global_lock_pending = FALSE;
493 */ 510
494 status = acpi_ex_system_wait_semaphore(acpi_gbl_global_lock_semaphore, 511 acpi_os_release_lock(acpi_ev_global_lock_pending_lock, flags);
495 ACPI_WAIT_FOREVER);
496 512
497 return_ACPI_STATUS(status); 513 return_ACPI_STATUS(status);
498} 514}
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index 36af222cac65..1226689bdb1b 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -92,6 +92,57 @@ acpi_status acpi_install_exception_handler(acpi_exception_handler handler)
92 92
93ACPI_EXPORT_SYMBOL(acpi_install_exception_handler) 93ACPI_EXPORT_SYMBOL(acpi_install_exception_handler)
94#endif /* ACPI_FUTURE_USAGE */ 94#endif /* ACPI_FUTURE_USAGE */
95
96/*******************************************************************************
97 *
98 * FUNCTION: acpi_install_global_event_handler
99 *
100 * PARAMETERS: Handler - Pointer to the global event handler function
101 * Context - Value passed to the handler on each event
102 *
103 * RETURN: Status
104 *
105 * DESCRIPTION: Saves the pointer to the handler function. The global handler
106 * is invoked upon each incoming GPE and Fixed Event. It is
107 * invoked at interrupt level at the time of the event dispatch.
108 * Can be used to update event counters, etc.
109 *
110 ******************************************************************************/
111acpi_status
112acpi_install_global_event_handler(ACPI_GBL_EVENT_HANDLER handler, void *context)
113{
114 acpi_status status;
115
116 ACPI_FUNCTION_TRACE(acpi_install_global_event_handler);
117
118 /* Parameter validation */
119
120 if (!handler) {
121 return_ACPI_STATUS(AE_BAD_PARAMETER);
122 }
123
124 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
125 if (ACPI_FAILURE(status)) {
126 return_ACPI_STATUS(status);
127 }
128
129 /* Don't allow two handlers. */
130
131 if (acpi_gbl_global_event_handler) {
132 status = AE_ALREADY_EXISTS;
133 goto cleanup;
134 }
135
136 acpi_gbl_global_event_handler = handler;
137 acpi_gbl_global_event_handler_context = context;
138
139 cleanup:
140 (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
141 return_ACPI_STATUS(status);
142}
143
144ACPI_EXPORT_SYMBOL(acpi_install_global_event_handler)
145
95/******************************************************************************* 146/*******************************************************************************
96 * 147 *
97 * FUNCTION: acpi_install_fixed_event_handler 148 * FUNCTION: acpi_install_fixed_event_handler
@@ -671,10 +722,10 @@ ACPI_EXPORT_SYMBOL(acpi_remove_notify_handler)
671acpi_status 722acpi_status
672acpi_install_gpe_handler(acpi_handle gpe_device, 723acpi_install_gpe_handler(acpi_handle gpe_device,
673 u32 gpe_number, 724 u32 gpe_number,
674 u32 type, acpi_event_handler address, void *context) 725 u32 type, acpi_gpe_handler address, void *context)
675{ 726{
676 struct acpi_gpe_event_info *gpe_event_info; 727 struct acpi_gpe_event_info *gpe_event_info;
677 struct acpi_handler_info *handler; 728 struct acpi_gpe_handler_info *handler;
678 acpi_status status; 729 acpi_status status;
679 acpi_cpu_flags flags; 730 acpi_cpu_flags flags;
680 731
@@ -693,7 +744,7 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
693 744
694 /* Allocate memory for the handler object */ 745 /* Allocate memory for the handler object */
695 746
696 handler = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_handler_info)); 747 handler = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_handler_info));
697 if (!handler) { 748 if (!handler) {
698 status = AE_NO_MEMORY; 749 status = AE_NO_MEMORY;
699 goto unlock_and_exit; 750 goto unlock_and_exit;
@@ -722,7 +773,7 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
722 handler->address = address; 773 handler->address = address;
723 handler->context = context; 774 handler->context = context;
724 handler->method_node = gpe_event_info->dispatch.method_node; 775 handler->method_node = gpe_event_info->dispatch.method_node;
725 handler->orig_flags = gpe_event_info->flags & 776 handler->original_flags = gpe_event_info->flags &
726 (ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK); 777 (ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK);
727 778
728 /* 779 /*
@@ -731,10 +782,10 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
731 * disabled now to avoid spurious execution of the handler. 782 * disabled now to avoid spurious execution of the handler.
732 */ 783 */
733 784
734 if ((handler->orig_flags & ACPI_GPE_DISPATCH_METHOD) 785 if ((handler->original_flags & ACPI_GPE_DISPATCH_METHOD)
735 && gpe_event_info->runtime_count) { 786 && gpe_event_info->runtime_count) {
736 handler->orig_enabled = 1; 787 handler->originally_enabled = 1;
737 (void)acpi_raw_disable_gpe(gpe_event_info); 788 (void)acpi_ev_remove_gpe_reference(gpe_event_info);
738 } 789 }
739 790
740 /* Install the handler */ 791 /* Install the handler */
@@ -777,10 +828,10 @@ ACPI_EXPORT_SYMBOL(acpi_install_gpe_handler)
777 ******************************************************************************/ 828 ******************************************************************************/
778acpi_status 829acpi_status
779acpi_remove_gpe_handler(acpi_handle gpe_device, 830acpi_remove_gpe_handler(acpi_handle gpe_device,
780 u32 gpe_number, acpi_event_handler address) 831 u32 gpe_number, acpi_gpe_handler address)
781{ 832{
782 struct acpi_gpe_event_info *gpe_event_info; 833 struct acpi_gpe_event_info *gpe_event_info;
783 struct acpi_handler_info *handler; 834 struct acpi_gpe_handler_info *handler;
784 acpi_status status; 835 acpi_status status;
785 acpi_cpu_flags flags; 836 acpi_cpu_flags flags;
786 837
@@ -835,7 +886,7 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
835 gpe_event_info->dispatch.method_node = handler->method_node; 886 gpe_event_info->dispatch.method_node = handler->method_node;
836 gpe_event_info->flags &= 887 gpe_event_info->flags &=
837 ~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK); 888 ~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK);
838 gpe_event_info->flags |= handler->orig_flags; 889 gpe_event_info->flags |= handler->original_flags;
839 890
840 /* 891 /*
841 * If the GPE was previously associated with a method and it was 892 * If the GPE was previously associated with a method and it was
@@ -843,9 +894,9 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
843 * post-initialization configuration. 894 * post-initialization configuration.
844 */ 895 */
845 896
846 if ((handler->orig_flags & ACPI_GPE_DISPATCH_METHOD) 897 if ((handler->original_flags & ACPI_GPE_DISPATCH_METHOD)
847 && handler->orig_enabled) 898 && handler->originally_enabled)
848 (void)acpi_raw_enable_gpe(gpe_event_info); 899 (void)acpi_ev_add_gpe_reference(gpe_event_info);
849 900
850 /* Now we can free the handler object */ 901 /* Now we can free the handler object */
851 902
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index a1dabe3fd8ae..90488c1e0f3d 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -43,18 +43,11 @@
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include "accommon.h" 45#include "accommon.h"
46#include "acevents.h"
47#include "acnamesp.h"
48#include "actables.h" 46#include "actables.h"
49 47
50#define _COMPONENT ACPI_EVENTS 48#define _COMPONENT ACPI_EVENTS
51ACPI_MODULE_NAME("evxfevnt") 49ACPI_MODULE_NAME("evxfevnt")
52 50
53/* Local prototypes */
54static acpi_status
55acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
56 struct acpi_gpe_block_info *gpe_block, void *context);
57
58/******************************************************************************* 51/*******************************************************************************
59 * 52 *
60 * FUNCTION: acpi_enable 53 * FUNCTION: acpi_enable
@@ -213,185 +206,6 @@ ACPI_EXPORT_SYMBOL(acpi_enable_event)
213 206
214/******************************************************************************* 207/*******************************************************************************
215 * 208 *
216 * FUNCTION: acpi_gpe_wakeup
217 *
218 * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
219 * gpe_number - GPE level within the GPE block
220 * Action - Enable or Disable
221 *
222 * RETURN: Status
223 *
224 * DESCRIPTION: Set or clear the GPE's wakeup enable mask bit.
225 *
226 ******************************************************************************/
227acpi_status acpi_gpe_wakeup(acpi_handle gpe_device, u32 gpe_number, u8 action)
228{
229 acpi_status status = AE_OK;
230 struct acpi_gpe_event_info *gpe_event_info;
231 struct acpi_gpe_register_info *gpe_register_info;
232 acpi_cpu_flags flags;
233 u32 register_bit;
234
235 ACPI_FUNCTION_TRACE(acpi_gpe_wakeup);
236
237 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
238
239 /* Ensure that we have a valid GPE number */
240
241 gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
242 if (!gpe_event_info || !(gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
243 status = AE_BAD_PARAMETER;
244 goto unlock_and_exit;
245 }
246
247 gpe_register_info = gpe_event_info->register_info;
248 if (!gpe_register_info) {
249 status = AE_NOT_EXIST;
250 goto unlock_and_exit;
251 }
252
253 register_bit =
254 acpi_hw_get_gpe_register_bit(gpe_event_info, gpe_register_info);
255
256 /* Perform the action */
257
258 switch (action) {
259 case ACPI_GPE_ENABLE:
260 ACPI_SET_BIT(gpe_register_info->enable_for_wake,
261 (u8)register_bit);
262 break;
263
264 case ACPI_GPE_DISABLE:
265 ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake,
266 (u8)register_bit);
267 break;
268
269 default:
270 ACPI_ERROR((AE_INFO, "%u, Invalid action", action));
271 status = AE_BAD_PARAMETER;
272 break;
273 }
274
275unlock_and_exit:
276 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
277 return_ACPI_STATUS(status);
278}
279
280ACPI_EXPORT_SYMBOL(acpi_gpe_wakeup)
281
282/*******************************************************************************
283 *
284 * FUNCTION: acpi_enable_gpe
285 *
286 * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
287 * gpe_number - GPE level within the GPE block
288 *
289 * RETURN: Status
290 *
291 * DESCRIPTION: Add a reference to a GPE. On the first reference, the GPE is
292 * hardware-enabled.
293 *
294 ******************************************************************************/
295acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number)
296{
297 acpi_status status = AE_BAD_PARAMETER;
298 struct acpi_gpe_event_info *gpe_event_info;
299 acpi_cpu_flags flags;
300
301 ACPI_FUNCTION_TRACE(acpi_enable_gpe);
302
303 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
304
305 /* Ensure that we have a valid GPE number */
306
307 gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
308 if (gpe_event_info) {
309 status = acpi_raw_enable_gpe(gpe_event_info);
310 }
311
312 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
313 return_ACPI_STATUS(status);
314}
315ACPI_EXPORT_SYMBOL(acpi_enable_gpe)
316
317/*******************************************************************************
318 *
319 * FUNCTION: acpi_disable_gpe
320 *
321 * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
322 * gpe_number - GPE level within the GPE block
323 *
324 * RETURN: Status
325 *
326 * DESCRIPTION: Remove a reference to a GPE. When the last reference is
327 * removed, only then is the GPE disabled (for runtime GPEs), or
328 * the GPE mask bit disabled (for wake GPEs)
329 *
330 ******************************************************************************/
331acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number)
332{
333 acpi_status status = AE_BAD_PARAMETER;
334 struct acpi_gpe_event_info *gpe_event_info;
335 acpi_cpu_flags flags;
336
337 ACPI_FUNCTION_TRACE(acpi_disable_gpe);
338
339 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
340
341 /* Ensure that we have a valid GPE number */
342
343 gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
344 if (gpe_event_info) {
345 status = acpi_raw_disable_gpe(gpe_event_info) ;
346 }
347
348 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
349 return_ACPI_STATUS(status);
350}
351ACPI_EXPORT_SYMBOL(acpi_disable_gpe)
352
353/*******************************************************************************
354 *
355 * FUNCTION: acpi_gpe_can_wake
356 *
357 * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
358 * gpe_number - GPE level within the GPE block
359 *
360 * RETURN: Status
361 *
362 * DESCRIPTION: Set the ACPI_GPE_CAN_WAKE flag for the given GPE. If the GPE
363 * has a corresponding method and is currently enabled, disable it
364 * (GPEs with corresponding methods are enabled unconditionally
365 * during initialization, but GPEs that can wake up are expected
366 * to be initially disabled).
367 *
368 ******************************************************************************/
369acpi_status acpi_gpe_can_wake(acpi_handle gpe_device, u32 gpe_number)
370{
371 acpi_status status = AE_OK;
372 struct acpi_gpe_event_info *gpe_event_info;
373 acpi_cpu_flags flags;
374
375 ACPI_FUNCTION_TRACE(acpi_gpe_can_wake);
376
377 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
378
379 /* Ensure that we have a valid GPE number */
380
381 gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
382 if (gpe_event_info) {
383 gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
384 } else {
385 status = AE_BAD_PARAMETER;
386 }
387
388 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
389 return_ACPI_STATUS(status);
390}
391ACPI_EXPORT_SYMBOL(acpi_gpe_can_wake)
392
393/*******************************************************************************
394 *
395 * FUNCTION: acpi_disable_event 209 * FUNCTION: acpi_disable_event
396 * 210 *
397 * PARAMETERS: Event - The fixed eventto be enabled 211 * PARAMETERS: Event - The fixed eventto be enabled
@@ -483,44 +297,6 @@ ACPI_EXPORT_SYMBOL(acpi_clear_event)
483 297
484/******************************************************************************* 298/*******************************************************************************
485 * 299 *
486 * FUNCTION: acpi_clear_gpe
487 *
488 * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
489 * gpe_number - GPE level within the GPE block
490 *
491 * RETURN: Status
492 *
493 * DESCRIPTION: Clear an ACPI event (general purpose)
494 *
495 ******************************************************************************/
496acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number)
497{
498 acpi_status status = AE_OK;
499 struct acpi_gpe_event_info *gpe_event_info;
500 acpi_cpu_flags flags;
501
502 ACPI_FUNCTION_TRACE(acpi_clear_gpe);
503
504 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
505
506 /* Ensure that we have a valid GPE number */
507
508 gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
509 if (!gpe_event_info) {
510 status = AE_BAD_PARAMETER;
511 goto unlock_and_exit;
512 }
513
514 status = acpi_hw_clear_gpe(gpe_event_info);
515
516 unlock_and_exit:
517 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
518 return_ACPI_STATUS(status);
519}
520
521ACPI_EXPORT_SYMBOL(acpi_clear_gpe)
522/*******************************************************************************
523 *
524 * FUNCTION: acpi_get_event_status 300 * FUNCTION: acpi_get_event_status
525 * 301 *
526 * PARAMETERS: Event - The fixed event 302 * PARAMETERS: Event - The fixed event
@@ -575,379 +351,3 @@ acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status)
575} 351}
576 352
577ACPI_EXPORT_SYMBOL(acpi_get_event_status) 353ACPI_EXPORT_SYMBOL(acpi_get_event_status)
578
579/*******************************************************************************
580 *
581 * FUNCTION: acpi_get_gpe_status
582 *
583 * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
584 * gpe_number - GPE level within the GPE block
585 * event_status - Where the current status of the event will
586 * be returned
587 *
588 * RETURN: Status
589 *
590 * DESCRIPTION: Get status of an event (general purpose)
591 *
592 ******************************************************************************/
593acpi_status
594acpi_get_gpe_status(acpi_handle gpe_device,
595 u32 gpe_number, acpi_event_status *event_status)
596{
597 acpi_status status = AE_OK;
598 struct acpi_gpe_event_info *gpe_event_info;
599 acpi_cpu_flags flags;
600
601 ACPI_FUNCTION_TRACE(acpi_get_gpe_status);
602
603 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
604
605 /* Ensure that we have a valid GPE number */
606
607 gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
608 if (!gpe_event_info) {
609 status = AE_BAD_PARAMETER;
610 goto unlock_and_exit;
611 }
612
613 /* Obtain status on the requested GPE number */
614
615 status = acpi_hw_get_gpe_status(gpe_event_info, event_status);
616
617 if (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)
618 *event_status |= ACPI_EVENT_FLAG_HANDLE;
619
620 unlock_and_exit:
621 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
622 return_ACPI_STATUS(status);
623}
624
625ACPI_EXPORT_SYMBOL(acpi_get_gpe_status)
626/*******************************************************************************
627 *
628 * FUNCTION: acpi_install_gpe_block
629 *
630 * PARAMETERS: gpe_device - Handle to the parent GPE Block Device
631 * gpe_block_address - Address and space_iD
632 * register_count - Number of GPE register pairs in the block
633 * interrupt_number - H/W interrupt for the block
634 *
635 * RETURN: Status
636 *
637 * DESCRIPTION: Create and Install a block of GPE registers
638 *
639 ******************************************************************************/
640acpi_status
641acpi_install_gpe_block(acpi_handle gpe_device,
642 struct acpi_generic_address *gpe_block_address,
643 u32 register_count, u32 interrupt_number)
644{
645 acpi_status status = AE_OK;
646 union acpi_operand_object *obj_desc;
647 struct acpi_namespace_node *node;
648 struct acpi_gpe_block_info *gpe_block;
649
650 ACPI_FUNCTION_TRACE(acpi_install_gpe_block);
651
652 if ((!gpe_device) || (!gpe_block_address) || (!register_count)) {
653 return_ACPI_STATUS(AE_BAD_PARAMETER);
654 }
655
656 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
657 if (ACPI_FAILURE(status)) {
658 return (status);
659 }
660
661 node = acpi_ns_validate_handle(gpe_device);
662 if (!node) {
663 status = AE_BAD_PARAMETER;
664 goto unlock_and_exit;
665 }
666
667 /*
668 * For user-installed GPE Block Devices, the gpe_block_base_number
669 * is always zero
670 */
671 status =
672 acpi_ev_create_gpe_block(node, gpe_block_address, register_count, 0,
673 interrupt_number, &gpe_block);
674 if (ACPI_FAILURE(status)) {
675 goto unlock_and_exit;
676 }
677
678 /* Install block in the device_object attached to the node */
679
680 obj_desc = acpi_ns_get_attached_object(node);
681 if (!obj_desc) {
682
683 /*
684 * No object, create a new one (Device nodes do not always have
685 * an attached object)
686 */
687 obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_DEVICE);
688 if (!obj_desc) {
689 status = AE_NO_MEMORY;
690 goto unlock_and_exit;
691 }
692
693 status =
694 acpi_ns_attach_object(node, obj_desc, ACPI_TYPE_DEVICE);
695
696 /* Remove local reference to the object */
697
698 acpi_ut_remove_reference(obj_desc);
699
700 if (ACPI_FAILURE(status)) {
701 goto unlock_and_exit;
702 }
703 }
704
705 /* Now install the GPE block in the device_object */
706
707 obj_desc->device.gpe_block = gpe_block;
708
709 unlock_and_exit:
710 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
711 return_ACPI_STATUS(status);
712}
713
714ACPI_EXPORT_SYMBOL(acpi_install_gpe_block)
715
716/*******************************************************************************
717 *
718 * FUNCTION: acpi_remove_gpe_block
719 *
720 * PARAMETERS: gpe_device - Handle to the parent GPE Block Device
721 *
722 * RETURN: Status
723 *
724 * DESCRIPTION: Remove a previously installed block of GPE registers
725 *
726 ******************************************************************************/
727acpi_status acpi_remove_gpe_block(acpi_handle gpe_device)
728{
729 union acpi_operand_object *obj_desc;
730 acpi_status status;
731 struct acpi_namespace_node *node;
732
733 ACPI_FUNCTION_TRACE(acpi_remove_gpe_block);
734
735 if (!gpe_device) {
736 return_ACPI_STATUS(AE_BAD_PARAMETER);
737 }
738
739 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
740 if (ACPI_FAILURE(status)) {
741 return (status);
742 }
743
744 node = acpi_ns_validate_handle(gpe_device);
745 if (!node) {
746 status = AE_BAD_PARAMETER;
747 goto unlock_and_exit;
748 }
749
750 /* Get the device_object attached to the node */
751
752 obj_desc = acpi_ns_get_attached_object(node);
753 if (!obj_desc || !obj_desc->device.gpe_block) {
754 return_ACPI_STATUS(AE_NULL_OBJECT);
755 }
756
757 /* Delete the GPE block (but not the device_object) */
758
759 status = acpi_ev_delete_gpe_block(obj_desc->device.gpe_block);
760 if (ACPI_SUCCESS(status)) {
761 obj_desc->device.gpe_block = NULL;
762 }
763
764 unlock_and_exit:
765 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
766 return_ACPI_STATUS(status);
767}
768
769ACPI_EXPORT_SYMBOL(acpi_remove_gpe_block)
770
771/*******************************************************************************
772 *
773 * FUNCTION: acpi_get_gpe_device
774 *
775 * PARAMETERS: Index - System GPE index (0-current_gpe_count)
776 * gpe_device - Where the parent GPE Device is returned
777 *
778 * RETURN: Status
779 *
780 * DESCRIPTION: Obtain the GPE device associated with the input index. A NULL
781 * gpe device indicates that the gpe number is contained in one of
782 * the FADT-defined gpe blocks. Otherwise, the GPE block device.
783 *
784 ******************************************************************************/
785acpi_status
786acpi_get_gpe_device(u32 index, acpi_handle *gpe_device)
787{
788 struct acpi_gpe_device_info info;
789 acpi_status status;
790
791 ACPI_FUNCTION_TRACE(acpi_get_gpe_device);
792
793 if (!gpe_device) {
794 return_ACPI_STATUS(AE_BAD_PARAMETER);
795 }
796
797 if (index >= acpi_current_gpe_count) {
798 return_ACPI_STATUS(AE_NOT_EXIST);
799 }
800
801 /* Setup and walk the GPE list */
802
803 info.index = index;
804 info.status = AE_NOT_EXIST;
805 info.gpe_device = NULL;
806 info.next_block_base_index = 0;
807
808 status = acpi_ev_walk_gpe_list(acpi_ev_get_gpe_device, &info);
809 if (ACPI_FAILURE(status)) {
810 return_ACPI_STATUS(status);
811 }
812
813 *gpe_device = info.gpe_device;
814 return_ACPI_STATUS(info.status);
815}
816
817ACPI_EXPORT_SYMBOL(acpi_get_gpe_device)
818
819/*******************************************************************************
820 *
821 * FUNCTION: acpi_ev_get_gpe_device
822 *
823 * PARAMETERS: GPE_WALK_CALLBACK
824 *
825 * RETURN: Status
826 *
827 * DESCRIPTION: Matches the input GPE index (0-current_gpe_count) with a GPE
828 * block device. NULL if the GPE is one of the FADT-defined GPEs.
829 *
830 ******************************************************************************/
831static acpi_status
832acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
833 struct acpi_gpe_block_info *gpe_block, void *context)
834{
835 struct acpi_gpe_device_info *info = context;
836
837 /* Increment Index by the number of GPEs in this block */
838
839 info->next_block_base_index += gpe_block->gpe_count;
840
841 if (info->index < info->next_block_base_index) {
842 /*
843 * The GPE index is within this block, get the node. Leave the node
844 * NULL for the FADT-defined GPEs
845 */
846 if ((gpe_block->node)->type == ACPI_TYPE_DEVICE) {
847 info->gpe_device = gpe_block->node;
848 }
849
850 info->status = AE_OK;
851 return (AE_CTRL_END);
852 }
853
854 return (AE_OK);
855}
856
857/******************************************************************************
858 *
859 * FUNCTION: acpi_disable_all_gpes
860 *
861 * PARAMETERS: None
862 *
863 * RETURN: Status
864 *
865 * DESCRIPTION: Disable and clear all GPEs in all GPE blocks
866 *
867 ******************************************************************************/
868
869acpi_status acpi_disable_all_gpes(void)
870{
871 acpi_status status;
872
873 ACPI_FUNCTION_TRACE(acpi_disable_all_gpes);
874
875 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
876 if (ACPI_FAILURE(status)) {
877 return_ACPI_STATUS(status);
878 }
879
880 status = acpi_hw_disable_all_gpes();
881 (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
882
883 return_ACPI_STATUS(status);
884}
885
886/******************************************************************************
887 *
888 * FUNCTION: acpi_enable_all_runtime_gpes
889 *
890 * PARAMETERS: None
891 *
892 * RETURN: Status
893 *
894 * DESCRIPTION: Enable all "runtime" GPEs, in all GPE blocks
895 *
896 ******************************************************************************/
897
898acpi_status acpi_enable_all_runtime_gpes(void)
899{
900 acpi_status status;
901
902 ACPI_FUNCTION_TRACE(acpi_enable_all_runtime_gpes);
903
904 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
905 if (ACPI_FAILURE(status)) {
906 return_ACPI_STATUS(status);
907 }
908
909 status = acpi_hw_enable_all_runtime_gpes();
910 (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
911
912 return_ACPI_STATUS(status);
913}
914
915/******************************************************************************
916 *
917 * FUNCTION: acpi_update_gpes
918 *
919 * PARAMETERS: None
920 *
921 * RETURN: None
922 *
923 * DESCRIPTION: Enable all GPEs that have associated _Lxx or _Exx methods and
924 * are not pointed to by any device _PRW methods indicating that
925 * these GPEs are generally intended for system or device wakeup
926 * (such GPEs have to be enabled directly when the devices whose
927 * _PRW methods point to them are set up for wakeup signaling).
928 *
929 ******************************************************************************/
930
931acpi_status acpi_update_gpes(void)
932{
933 acpi_status status;
934
935 ACPI_FUNCTION_TRACE(acpi_update_gpes);
936
937 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
938 if (ACPI_FAILURE(status)) {
939 return_ACPI_STATUS(status);
940 } else if (acpi_all_gpes_initialized) {
941 goto unlock;
942 }
943
944 status = acpi_ev_walk_gpe_list(acpi_ev_initialize_gpe_block, NULL);
945 if (ACPI_SUCCESS(status)) {
946 acpi_all_gpes_initialized = TRUE;
947 }
948
949unlock:
950 (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
951
952 return_ACPI_STATUS(status);
953}
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
new file mode 100644
index 000000000000..416845bc9c1f
--- /dev/null
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -0,0 +1,669 @@
1/******************************************************************************
2 *
3 * Module Name: evxfgpe - External Interfaces for General Purpose Events (GPEs)
4 *
5 *****************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2010, Intel Corp.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#include <acpi/acpi.h>
45#include "accommon.h"
46#include "acevents.h"
47#include "acnamesp.h"
48
49#define _COMPONENT ACPI_EVENTS
50ACPI_MODULE_NAME("evxfgpe")
51
52/******************************************************************************
53 *
54 * FUNCTION: acpi_update_all_gpes
55 *
56 * PARAMETERS: None
57 *
58 * RETURN: Status
59 *
60 * DESCRIPTION: Complete GPE initialization and enable all GPEs that have
61 * associated _Lxx or _Exx methods and are not pointed to by any
62 * device _PRW methods (this indicates that these GPEs are
63 * generally intended for system or device wakeup. Such GPEs
64 * have to be enabled directly when the devices whose _PRW
65 * methods point to them are set up for wakeup signaling.)
66 *
67 * NOTE: Should be called after any GPEs are added to the system. Primarily,
68 * after the system _PRW methods have been run, but also after a GPE Block
69 * Device has been added or if any new GPE methods have been added via a
70 * dynamic table load.
71 *
72 ******************************************************************************/
73
74acpi_status acpi_update_all_gpes(void)
75{
76 acpi_status status;
77
78 ACPI_FUNCTION_TRACE(acpi_update_all_gpes);
79
80 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
81 if (ACPI_FAILURE(status)) {
82 return_ACPI_STATUS(status);
83 }
84
85 if (acpi_gbl_all_gpes_initialized) {
86 goto unlock_and_exit;
87 }
88
89 status = acpi_ev_walk_gpe_list(acpi_ev_initialize_gpe_block, NULL);
90 if (ACPI_SUCCESS(status)) {
91 acpi_gbl_all_gpes_initialized = TRUE;
92 }
93
94unlock_and_exit:
95 (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
96
97 return_ACPI_STATUS(status);
98}
99
100ACPI_EXPORT_SYMBOL(acpi_update_all_gpes)
101
102/*******************************************************************************
103 *
104 * FUNCTION: acpi_enable_gpe
105 *
106 * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
107 * gpe_number - GPE level within the GPE block
108 *
109 * RETURN: Status
110 *
111 * DESCRIPTION: Add a reference to a GPE. On the first reference, the GPE is
112 * hardware-enabled.
113 *
114 ******************************************************************************/
115
116acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number)
117{
118 acpi_status status = AE_BAD_PARAMETER;
119 struct acpi_gpe_event_info *gpe_event_info;
120 acpi_cpu_flags flags;
121
122 ACPI_FUNCTION_TRACE(acpi_enable_gpe);
123
124 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
125
126 /* Ensure that we have a valid GPE number */
127
128 gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
129 if (gpe_event_info) {
130 status = acpi_ev_add_gpe_reference(gpe_event_info);
131 }
132
133 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
134 return_ACPI_STATUS(status);
135}
136ACPI_EXPORT_SYMBOL(acpi_enable_gpe)
137
138/*******************************************************************************
139 *
140 * FUNCTION: acpi_disable_gpe
141 *
142 * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
143 * gpe_number - GPE level within the GPE block
144 *
145 * RETURN: Status
146 *
147 * DESCRIPTION: Remove a reference to a GPE. When the last reference is
148 * removed, only then is the GPE disabled (for runtime GPEs), or
149 * the GPE mask bit disabled (for wake GPEs)
150 *
151 ******************************************************************************/
152
153acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number)
154{
155 acpi_status status = AE_BAD_PARAMETER;
156 struct acpi_gpe_event_info *gpe_event_info;
157 acpi_cpu_flags flags;
158
159 ACPI_FUNCTION_TRACE(acpi_disable_gpe);
160
161 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
162
163 /* Ensure that we have a valid GPE number */
164
165 gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
166 if (gpe_event_info) {
167 status = acpi_ev_remove_gpe_reference(gpe_event_info) ;
168 }
169
170 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
171 return_ACPI_STATUS(status);
172}
173ACPI_EXPORT_SYMBOL(acpi_disable_gpe)
174
175
176/*******************************************************************************
177 *
178 * FUNCTION: acpi_setup_gpe_for_wake
179 *
180 * PARAMETERS: wake_device - Device associated with the GPE (via _PRW)
181 * gpe_device - Parent GPE Device. NULL for GPE0/GPE1
182 * gpe_number - GPE level within the GPE block
183 *
184 * RETURN: Status
185 *
186 * DESCRIPTION: Mark a GPE as having the ability to wake the system. This
187 * interface is intended to be used as the host executes the
188 * _PRW methods (Power Resources for Wake) in the system tables.
189 * Each _PRW appears under a Device Object (The wake_device), and
190 * contains the info for the wake GPE associated with the
191 * wake_device.
192 *
193 ******************************************************************************/
194acpi_status
195acpi_setup_gpe_for_wake(acpi_handle wake_device,
196 acpi_handle gpe_device, u32 gpe_number)
197{
198 acpi_status status = AE_BAD_PARAMETER;
199 struct acpi_gpe_event_info *gpe_event_info;
200 struct acpi_namespace_node *device_node;
201 acpi_cpu_flags flags;
202
203 ACPI_FUNCTION_TRACE(acpi_setup_gpe_for_wake);
204
205 /* Parameter Validation */
206
207 if (!wake_device) {
208 /*
209 * By forcing wake_device to be valid, we automatically enable the
210 * implicit notify feature on all hosts.
211 */
212 return_ACPI_STATUS(AE_BAD_PARAMETER);
213 }
214
215 /* Validate wake_device is of type Device */
216
217 device_node = ACPI_CAST_PTR(struct acpi_namespace_node, wake_device);
218 if (device_node->type != ACPI_TYPE_DEVICE) {
219 return_ACPI_STATUS(AE_BAD_PARAMETER);
220 }
221
222 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
223
224 /* Ensure that we have a valid GPE number */
225
226 gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
227 if (gpe_event_info) {
228 /*
229 * If there is no method or handler for this GPE, then the
230 * wake_device will be notified whenever this GPE fires (aka
231 * "implicit notify") Note: The GPE is assumed to be
232 * level-triggered (for windows compatibility).
233 */
234 if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
235 ACPI_GPE_DISPATCH_NONE) {
236 gpe_event_info->flags =
237 (ACPI_GPE_DISPATCH_NOTIFY |
238 ACPI_GPE_LEVEL_TRIGGERED);
239 gpe_event_info->dispatch.device_node = device_node;
240 }
241
242 gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
243 status = AE_OK;
244 }
245
246 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
247 return_ACPI_STATUS(status);
248}
249ACPI_EXPORT_SYMBOL(acpi_setup_gpe_for_wake)
250
251/*******************************************************************************
252 *
253 * FUNCTION: acpi_set_gpe_wake_mask
254 *
255 * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
256 * gpe_number - GPE level within the GPE block
257 * Action - Enable or Disable
258 *
259 * RETURN: Status
260 *
261 * DESCRIPTION: Set or clear the GPE's wakeup enable mask bit. The GPE must
262 * already be marked as a WAKE GPE.
263 *
264 ******************************************************************************/
265
266acpi_status acpi_set_gpe_wake_mask(acpi_handle gpe_device, u32 gpe_number, u8 action)
267{
268 acpi_status status = AE_OK;
269 struct acpi_gpe_event_info *gpe_event_info;
270 struct acpi_gpe_register_info *gpe_register_info;
271 acpi_cpu_flags flags;
272 u32 register_bit;
273
274 ACPI_FUNCTION_TRACE(acpi_set_gpe_wake_mask);
275
276 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
277
278 /*
279 * Ensure that we have a valid GPE number and that this GPE is in
280 * fact a wake GPE
281 */
282 gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
283 if (!gpe_event_info) {
284 status = AE_BAD_PARAMETER;
285 goto unlock_and_exit;
286 }
287
288 if (!(gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
289 status = AE_TYPE;
290 goto unlock_and_exit;
291 }
292
293 gpe_register_info = gpe_event_info->register_info;
294 if (!gpe_register_info) {
295 status = AE_NOT_EXIST;
296 goto unlock_and_exit;
297 }
298
299 register_bit =
300 acpi_hw_get_gpe_register_bit(gpe_event_info, gpe_register_info);
301
302 /* Perform the action */
303
304 switch (action) {
305 case ACPI_GPE_ENABLE:
306 ACPI_SET_BIT(gpe_register_info->enable_for_wake,
307 (u8)register_bit);
308 break;
309
310 case ACPI_GPE_DISABLE:
311 ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake,
312 (u8)register_bit);
313 break;
314
315 default:
316 ACPI_ERROR((AE_INFO, "%u, Invalid action", action));
317 status = AE_BAD_PARAMETER;
318 break;
319 }
320
321unlock_and_exit:
322 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
323 return_ACPI_STATUS(status);
324}
325
326ACPI_EXPORT_SYMBOL(acpi_set_gpe_wake_mask)
327
328/*******************************************************************************
329 *
330 * FUNCTION: acpi_clear_gpe
331 *
332 * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
333 * gpe_number - GPE level within the GPE block
334 *
335 * RETURN: Status
336 *
337 * DESCRIPTION: Clear an ACPI event (general purpose)
338 *
339 ******************************************************************************/
340acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number)
341{
342 acpi_status status = AE_OK;
343 struct acpi_gpe_event_info *gpe_event_info;
344 acpi_cpu_flags flags;
345
346 ACPI_FUNCTION_TRACE(acpi_clear_gpe);
347
348 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
349
350 /* Ensure that we have a valid GPE number */
351
352 gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
353 if (!gpe_event_info) {
354 status = AE_BAD_PARAMETER;
355 goto unlock_and_exit;
356 }
357
358 status = acpi_hw_clear_gpe(gpe_event_info);
359
360 unlock_and_exit:
361 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
362 return_ACPI_STATUS(status);
363}
364
365ACPI_EXPORT_SYMBOL(acpi_clear_gpe)
366
367/*******************************************************************************
368 *
369 * FUNCTION: acpi_get_gpe_status
370 *
371 * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
372 * gpe_number - GPE level within the GPE block
373 * event_status - Where the current status of the event will
374 * be returned
375 *
376 * RETURN: Status
377 *
378 * DESCRIPTION: Get the current status of a GPE (signalled/not_signalled)
379 *
380 ******************************************************************************/
381acpi_status
382acpi_get_gpe_status(acpi_handle gpe_device,
383 u32 gpe_number, acpi_event_status *event_status)
384{
385 acpi_status status = AE_OK;
386 struct acpi_gpe_event_info *gpe_event_info;
387 acpi_cpu_flags flags;
388
389 ACPI_FUNCTION_TRACE(acpi_get_gpe_status);
390
391 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
392
393 /* Ensure that we have a valid GPE number */
394
395 gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
396 if (!gpe_event_info) {
397 status = AE_BAD_PARAMETER;
398 goto unlock_and_exit;
399 }
400
401 /* Obtain status on the requested GPE number */
402
403 status = acpi_hw_get_gpe_status(gpe_event_info, event_status);
404
405 if (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)
406 *event_status |= ACPI_EVENT_FLAG_HANDLE;
407
408 unlock_and_exit:
409 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
410 return_ACPI_STATUS(status);
411}
412
413ACPI_EXPORT_SYMBOL(acpi_get_gpe_status)
414
415/******************************************************************************
416 *
417 * FUNCTION: acpi_disable_all_gpes
418 *
419 * PARAMETERS: None
420 *
421 * RETURN: Status
422 *
423 * DESCRIPTION: Disable and clear all GPEs in all GPE blocks
424 *
425 ******************************************************************************/
426
427acpi_status acpi_disable_all_gpes(void)
428{
429 acpi_status status;
430
431 ACPI_FUNCTION_TRACE(acpi_disable_all_gpes);
432
433 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
434 if (ACPI_FAILURE(status)) {
435 return_ACPI_STATUS(status);
436 }
437
438 status = acpi_hw_disable_all_gpes();
439 (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
440
441 return_ACPI_STATUS(status);
442}
443
444ACPI_EXPORT_SYMBOL(acpi_disable_all_gpes)
445
446/******************************************************************************
447 *
448 * FUNCTION: acpi_enable_all_runtime_gpes
449 *
450 * PARAMETERS: None
451 *
452 * RETURN: Status
453 *
454 * DESCRIPTION: Enable all "runtime" GPEs, in all GPE blocks
455 *
456 ******************************************************************************/
457
458acpi_status acpi_enable_all_runtime_gpes(void)
459{
460 acpi_status status;
461
462 ACPI_FUNCTION_TRACE(acpi_enable_all_runtime_gpes);
463
464 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
465 if (ACPI_FAILURE(status)) {
466 return_ACPI_STATUS(status);
467 }
468
469 status = acpi_hw_enable_all_runtime_gpes();
470 (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
471
472 return_ACPI_STATUS(status);
473}
474
475ACPI_EXPORT_SYMBOL(acpi_enable_all_runtime_gpes)
476
477/*******************************************************************************
478 *
479 * FUNCTION: acpi_install_gpe_block
480 *
481 * PARAMETERS: gpe_device - Handle to the parent GPE Block Device
482 * gpe_block_address - Address and space_iD
483 * register_count - Number of GPE register pairs in the block
484 * interrupt_number - H/W interrupt for the block
485 *
486 * RETURN: Status
487 *
488 * DESCRIPTION: Create and Install a block of GPE registers. The GPEs are not
489 * enabled here.
490 *
491 ******************************************************************************/
492acpi_status
493acpi_install_gpe_block(acpi_handle gpe_device,
494 struct acpi_generic_address *gpe_block_address,
495 u32 register_count, u32 interrupt_number)
496{
497 acpi_status status;
498 union acpi_operand_object *obj_desc;
499 struct acpi_namespace_node *node;
500 struct acpi_gpe_block_info *gpe_block;
501
502 ACPI_FUNCTION_TRACE(acpi_install_gpe_block);
503
504 if ((!gpe_device) || (!gpe_block_address) || (!register_count)) {
505 return_ACPI_STATUS(AE_BAD_PARAMETER);
506 }
507
508 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
509 if (ACPI_FAILURE(status)) {
510 return (status);
511 }
512
513 node = acpi_ns_validate_handle(gpe_device);
514 if (!node) {
515 status = AE_BAD_PARAMETER;
516 goto unlock_and_exit;
517 }
518
519 /*
520 * For user-installed GPE Block Devices, the gpe_block_base_number
521 * is always zero
522 */
523 status =
524 acpi_ev_create_gpe_block(node, gpe_block_address, register_count, 0,
525 interrupt_number, &gpe_block);
526 if (ACPI_FAILURE(status)) {
527 goto unlock_and_exit;
528 }
529
530 /* Install block in the device_object attached to the node */
531
532 obj_desc = acpi_ns_get_attached_object(node);
533 if (!obj_desc) {
534
535 /*
536 * No object, create a new one (Device nodes do not always have
537 * an attached object)
538 */
539 obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_DEVICE);
540 if (!obj_desc) {
541 status = AE_NO_MEMORY;
542 goto unlock_and_exit;
543 }
544
545 status =
546 acpi_ns_attach_object(node, obj_desc, ACPI_TYPE_DEVICE);
547
548 /* Remove local reference to the object */
549
550 acpi_ut_remove_reference(obj_desc);
551
552 if (ACPI_FAILURE(status)) {
553 goto unlock_and_exit;
554 }
555 }
556
557 /* Now install the GPE block in the device_object */
558
559 obj_desc->device.gpe_block = gpe_block;
560
561 unlock_and_exit:
562 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
563 return_ACPI_STATUS(status);
564}
565
566ACPI_EXPORT_SYMBOL(acpi_install_gpe_block)
567
568/*******************************************************************************
569 *
570 * FUNCTION: acpi_remove_gpe_block
571 *
572 * PARAMETERS: gpe_device - Handle to the parent GPE Block Device
573 *
574 * RETURN: Status
575 *
576 * DESCRIPTION: Remove a previously installed block of GPE registers
577 *
578 ******************************************************************************/
579acpi_status acpi_remove_gpe_block(acpi_handle gpe_device)
580{
581 union acpi_operand_object *obj_desc;
582 acpi_status status;
583 struct acpi_namespace_node *node;
584
585 ACPI_FUNCTION_TRACE(acpi_remove_gpe_block);
586
587 if (!gpe_device) {
588 return_ACPI_STATUS(AE_BAD_PARAMETER);
589 }
590
591 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
592 if (ACPI_FAILURE(status)) {
593 return (status);
594 }
595
596 node = acpi_ns_validate_handle(gpe_device);
597 if (!node) {
598 status = AE_BAD_PARAMETER;
599 goto unlock_and_exit;
600 }
601
602 /* Get the device_object attached to the node */
603
604 obj_desc = acpi_ns_get_attached_object(node);
605 if (!obj_desc || !obj_desc->device.gpe_block) {
606 return_ACPI_STATUS(AE_NULL_OBJECT);
607 }
608
609 /* Delete the GPE block (but not the device_object) */
610
611 status = acpi_ev_delete_gpe_block(obj_desc->device.gpe_block);
612 if (ACPI_SUCCESS(status)) {
613 obj_desc->device.gpe_block = NULL;
614 }
615
616 unlock_and_exit:
617 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
618 return_ACPI_STATUS(status);
619}
620
621ACPI_EXPORT_SYMBOL(acpi_remove_gpe_block)
622
623/*******************************************************************************
624 *
625 * FUNCTION: acpi_get_gpe_device
626 *
627 * PARAMETERS: Index - System GPE index (0-current_gpe_count)
628 * gpe_device - Where the parent GPE Device is returned
629 *
630 * RETURN: Status
631 *
632 * DESCRIPTION: Obtain the GPE device associated with the input index. A NULL
633 * gpe device indicates that the gpe number is contained in one of
634 * the FADT-defined gpe blocks. Otherwise, the GPE block device.
635 *
636 ******************************************************************************/
637acpi_status
638acpi_get_gpe_device(u32 index, acpi_handle *gpe_device)
639{
640 struct acpi_gpe_device_info info;
641 acpi_status status;
642
643 ACPI_FUNCTION_TRACE(acpi_get_gpe_device);
644
645 if (!gpe_device) {
646 return_ACPI_STATUS(AE_BAD_PARAMETER);
647 }
648
649 if (index >= acpi_current_gpe_count) {
650 return_ACPI_STATUS(AE_NOT_EXIST);
651 }
652
653 /* Setup and walk the GPE list */
654
655 info.index = index;
656 info.status = AE_NOT_EXIST;
657 info.gpe_device = NULL;
658 info.next_block_base_index = 0;
659
660 status = acpi_ev_walk_gpe_list(acpi_ev_get_gpe_device, &info);
661 if (ACPI_FAILURE(status)) {
662 return_ACPI_STATUS(status);
663 }
664
665 *gpe_device = ACPI_CAST_PTR(acpi_handle, info.gpe_device);
666 return_ACPI_STATUS(info.status);
667}
668
669ACPI_EXPORT_SYMBOL(acpi_get_gpe_device)
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index 14750db2a1b8..85c3cbd4304d 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -62,10 +62,10 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
62 * PARAMETERS: gpe_event_info - Info block for the GPE 62 * PARAMETERS: gpe_event_info - Info block for the GPE
63 * gpe_register_info - Info block for the GPE register 63 * gpe_register_info - Info block for the GPE register
64 * 64 *
65 * RETURN: Status 65 * RETURN: Register mask with a one in the GPE bit position
66 * 66 *
67 * DESCRIPTION: Compute GPE enable mask with one bit corresponding to the given 67 * DESCRIPTION: Compute the register mask for this GPE. One bit is set in the
68 * GPE set. 68 * correct position for the input GPE.
69 * 69 *
70 ******************************************************************************/ 70 ******************************************************************************/
71 71
@@ -85,12 +85,12 @@ u32 acpi_hw_get_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info,
85 * 85 *
86 * RETURN: Status 86 * RETURN: Status
87 * 87 *
88 * DESCRIPTION: Enable or disable a single GPE in its enable register. 88 * DESCRIPTION: Enable or disable a single GPE in the parent enable register.
89 * 89 *
90 ******************************************************************************/ 90 ******************************************************************************/
91 91
92acpi_status 92acpi_status
93acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 action) 93acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action)
94{ 94{
95 struct acpi_gpe_register_info *gpe_register_info; 95 struct acpi_gpe_register_info *gpe_register_info;
96 acpi_status status; 96 acpi_status status;
@@ -113,14 +113,20 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 action)
113 return (status); 113 return (status);
114 } 114 }
115 115
116 /* Set ot clear just the bit that corresponds to this GPE */ 116 /* Set or clear just the bit that corresponds to this GPE */
117 117
118 register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info, 118 register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info,
119 gpe_register_info); 119 gpe_register_info);
120 switch (action) { 120 switch (action) {
121 case ACPI_GPE_COND_ENABLE: 121 case ACPI_GPE_CONDITIONAL_ENABLE:
122 if (!(register_bit & gpe_register_info->enable_for_run)) 122
123 /* Only enable if the enable_for_run bit is set */
124
125 if (!(register_bit & gpe_register_info->enable_for_run)) {
123 return (AE_BAD_PARAMETER); 126 return (AE_BAD_PARAMETER);
127 }
128
129 /*lint -fallthrough */
124 130
125 case ACPI_GPE_ENABLE: 131 case ACPI_GPE_ENABLE:
126 ACPI_SET_BIT(enable_mask, register_bit); 132 ACPI_SET_BIT(enable_mask, register_bit);
@@ -131,7 +137,7 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 action)
131 break; 137 break;
132 138
133 default: 139 default:
134 ACPI_ERROR((AE_INFO, "Invalid action\n")); 140 ACPI_ERROR((AE_INFO, "Invalid GPE Action, %u\n", action));
135 return (AE_BAD_PARAMETER); 141 return (AE_BAD_PARAMETER);
136 } 142 }
137 143
@@ -168,13 +174,13 @@ acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info * gpe_event_info)
168 return (AE_NOT_EXIST); 174 return (AE_NOT_EXIST);
169 } 175 }
170 176
171 register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info,
172 gpe_register_info);
173
174 /* 177 /*
175 * Write a one to the appropriate bit in the status register to 178 * Write a one to the appropriate bit in the status register to
176 * clear this GPE. 179 * clear this GPE.
177 */ 180 */
181 register_bit =
182 acpi_hw_get_gpe_register_bit(gpe_event_info, gpe_register_info);
183
178 status = acpi_hw_write(register_bit, 184 status = acpi_hw_write(register_bit,
179 &gpe_register_info->status_address); 185 &gpe_register_info->status_address);
180 186
@@ -201,8 +207,8 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info,
201 u32 in_byte; 207 u32 in_byte;
202 u32 register_bit; 208 u32 register_bit;
203 struct acpi_gpe_register_info *gpe_register_info; 209 struct acpi_gpe_register_info *gpe_register_info;
204 acpi_status status;
205 acpi_event_status local_event_status = 0; 210 acpi_event_status local_event_status = 0;
211 acpi_status status;
206 212
207 ACPI_FUNCTION_ENTRY(); 213 ACPI_FUNCTION_ENTRY();
208 214
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index e87bc6760be6..508537f884ac 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -768,7 +768,7 @@ acpi_status acpi_ut_init_globals(void)
768 acpi_gbl_gpe_fadt_blocks[0] = NULL; 768 acpi_gbl_gpe_fadt_blocks[0] = NULL;
769 acpi_gbl_gpe_fadt_blocks[1] = NULL; 769 acpi_gbl_gpe_fadt_blocks[1] = NULL;
770 acpi_current_gpe_count = 0; 770 acpi_current_gpe_count = 0;
771 acpi_all_gpes_initialized = FALSE; 771 acpi_gbl_all_gpes_initialized = FALSE;
772 772
773 /* Global handlers */ 773 /* Global handlers */
774 774
@@ -778,6 +778,7 @@ acpi_status acpi_ut_init_globals(void)
778 acpi_gbl_init_handler = NULL; 778 acpi_gbl_init_handler = NULL;
779 acpi_gbl_table_handler = NULL; 779 acpi_gbl_table_handler = NULL;
780 acpi_gbl_interface_handler = NULL; 780 acpi_gbl_interface_handler = NULL;
781 acpi_gbl_global_event_handler = NULL;
781 782
782 /* Global Lock support */ 783 /* Global Lock support */
783 784