diff options
Diffstat (limited to 'drivers/acpi/acpica/evgpeblk.c')
-rw-r--r-- | drivers/acpi/acpica/evgpeblk.c | 1227 |
1 files changed, 1227 insertions, 0 deletions
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c new file mode 100644 index 000000000000..2a8d1856038f --- /dev/null +++ b/drivers/acpi/acpica/evgpeblk.c | |||
@@ -0,0 +1,1227 @@ | |||
1 | /****************************************************************************** | ||
2 | * | ||
3 | * Module Name: evgpeblk - GPE block creation and initialization. | ||
4 | * | ||
5 | *****************************************************************************/ | ||
6 | |||
7 | /* | ||
8 | * Copyright (C) 2000 - 2008, Intel Corp. | ||
9 | * All rights reserved. | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or without | ||
12 | * modification, are permitted provided that the following conditions | ||
13 | * are met: | ||
14 | * 1. Redistributions of source code must retain the above copyright | ||
15 | * notice, this list of conditions, and the following disclaimer, | ||
16 | * without modification. | ||
17 | * 2. Redistributions in binary form must reproduce at minimum a disclaimer | ||
18 | * substantially similar to the "NO WARRANTY" disclaimer below | ||
19 | * ("Disclaimer") and any redistribution must be conditioned upon | ||
20 | * including a substantially similar Disclaimer requirement for further | ||
21 | * binary redistribution. | ||
22 | * 3. Neither the names of the above-listed copyright holders nor the names | ||
23 | * of any contributors may be used to endorse or promote products derived | ||
24 | * from this software without specific prior written permission. | ||
25 | * | ||
26 | * Alternatively, this software may be distributed under the terms of the | ||
27 | * GNU General Public License ("GPL") version 2 as published by the Free | ||
28 | * Software Foundation. | ||
29 | * | ||
30 | * NO WARRANTY | ||
31 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
32 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
33 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR | ||
34 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
35 | * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
36 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | ||
37 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | ||
38 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, | ||
39 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING | ||
40 | * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
41 | * POSSIBILITY OF SUCH DAMAGES. | ||
42 | */ | ||
43 | |||
44 | #include <acpi/acpi.h> | ||
45 | #include <acpi/accommon.h> | ||
46 | #include <acpi/acevents.h> | ||
47 | #include <acpi/acnamesp.h> | ||
48 | |||
49 | #define _COMPONENT ACPI_EVENTS | ||
50 | ACPI_MODULE_NAME("evgpeblk") | ||
51 | |||
52 | /* Local prototypes */ | ||
53 | static acpi_status | ||
54 | acpi_ev_save_method_info(acpi_handle obj_handle, | ||
55 | u32 level, void *obj_desc, void **return_value); | ||
56 | |||
57 | static acpi_status | ||
58 | acpi_ev_match_prw_and_gpe(acpi_handle obj_handle, | ||
59 | u32 level, void *info, void **return_value); | ||
60 | |||
61 | static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32 | ||
62 | interrupt_number); | ||
63 | |||
64 | static acpi_status | ||
65 | acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt); | ||
66 | |||
67 | static acpi_status | ||
68 | acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block, | ||
69 | u32 interrupt_number); | ||
70 | |||
71 | static acpi_status | ||
72 | acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block); | ||
73 | |||
74 | /******************************************************************************* | ||
75 | * | ||
76 | * FUNCTION: acpi_ev_valid_gpe_event | ||
77 | * | ||
78 | * PARAMETERS: gpe_event_info - Info for this GPE | ||
79 | * | ||
80 | * RETURN: TRUE if the gpe_event is valid | ||
81 | * | ||
82 | * DESCRIPTION: Validate a GPE event. DO NOT CALL FROM INTERRUPT LEVEL. | ||
83 | * Should be called only when the GPE lists are semaphore locked | ||
84 | * and not subject to change. | ||
85 | * | ||
86 | ******************************************************************************/ | ||
87 | |||
88 | u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info) | ||
89 | { | ||
90 | struct acpi_gpe_xrupt_info *gpe_xrupt_block; | ||
91 | struct acpi_gpe_block_info *gpe_block; | ||
92 | |||
93 | ACPI_FUNCTION_ENTRY(); | ||
94 | |||
95 | /* No need for spin lock since we are not changing any list elements */ | ||
96 | |||
97 | /* Walk the GPE interrupt levels */ | ||
98 | |||
99 | gpe_xrupt_block = acpi_gbl_gpe_xrupt_list_head; | ||
100 | while (gpe_xrupt_block) { | ||
101 | gpe_block = gpe_xrupt_block->gpe_block_list_head; | ||
102 | |||
103 | /* Walk the GPE blocks on this interrupt level */ | ||
104 | |||
105 | while (gpe_block) { | ||
106 | if ((&gpe_block->event_info[0] <= gpe_event_info) && | ||
107 | (&gpe_block-> | ||
108 | event_info[((acpi_size) gpe_block-> | ||
109 | register_count) * 8] > | ||
110 | gpe_event_info)) { | ||
111 | return (TRUE); | ||
112 | } | ||
113 | |||
114 | gpe_block = gpe_block->next; | ||
115 | } | ||
116 | |||
117 | gpe_xrupt_block = gpe_xrupt_block->next; | ||
118 | } | ||
119 | |||
120 | return (FALSE); | ||
121 | } | ||
122 | |||
123 | /******************************************************************************* | ||
124 | * | ||
125 | * FUNCTION: acpi_ev_walk_gpe_list | ||
126 | * | ||
127 | * PARAMETERS: gpe_walk_callback - Routine called for each GPE block | ||
128 | * Context - Value passed to callback | ||
129 | * | ||
130 | * RETURN: Status | ||
131 | * | ||
132 | * DESCRIPTION: Walk the GPE lists. | ||
133 | * | ||
134 | ******************************************************************************/ | ||
135 | |||
136 | acpi_status | ||
137 | acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context) | ||
138 | { | ||
139 | struct acpi_gpe_block_info *gpe_block; | ||
140 | struct acpi_gpe_xrupt_info *gpe_xrupt_info; | ||
141 | acpi_status status = AE_OK; | ||
142 | acpi_cpu_flags flags; | ||
143 | |||
144 | ACPI_FUNCTION_TRACE(ev_walk_gpe_list); | ||
145 | |||
146 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | ||
147 | |||
148 | /* Walk the interrupt level descriptor list */ | ||
149 | |||
150 | gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head; | ||
151 | while (gpe_xrupt_info) { | ||
152 | |||
153 | /* Walk all Gpe Blocks attached to this interrupt level */ | ||
154 | |||
155 | gpe_block = gpe_xrupt_info->gpe_block_list_head; | ||
156 | while (gpe_block) { | ||
157 | |||
158 | /* One callback per GPE block */ | ||
159 | |||
160 | status = | ||
161 | gpe_walk_callback(gpe_xrupt_info, gpe_block, | ||
162 | context); | ||
163 | if (ACPI_FAILURE(status)) { | ||
164 | if (status == AE_CTRL_END) { /* Callback abort */ | ||
165 | status = AE_OK; | ||
166 | } | ||
167 | goto unlock_and_exit; | ||
168 | } | ||
169 | |||
170 | gpe_block = gpe_block->next; | ||
171 | } | ||
172 | |||
173 | gpe_xrupt_info = gpe_xrupt_info->next; | ||
174 | } | ||
175 | |||
176 | unlock_and_exit: | ||
177 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | ||
178 | return_ACPI_STATUS(status); | ||
179 | } | ||
180 | |||
181 | /******************************************************************************* | ||
182 | * | ||
183 | * FUNCTION: acpi_ev_delete_gpe_handlers | ||
184 | * | ||
185 | * PARAMETERS: gpe_xrupt_info - GPE Interrupt info | ||
186 | * gpe_block - Gpe Block info | ||
187 | * | ||
188 | * RETURN: Status | ||
189 | * | ||
190 | * DESCRIPTION: Delete all Handler objects found in the GPE data structs. | ||
191 | * Used only prior to termination. | ||
192 | * | ||
193 | ******************************************************************************/ | ||
194 | |||
195 | acpi_status | ||
196 | acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info, | ||
197 | struct acpi_gpe_block_info *gpe_block, | ||
198 | void *context) | ||
199 | { | ||
200 | struct acpi_gpe_event_info *gpe_event_info; | ||
201 | u32 i; | ||
202 | u32 j; | ||
203 | |||
204 | ACPI_FUNCTION_TRACE(ev_delete_gpe_handlers); | ||
205 | |||
206 | /* Examine each GPE Register within the block */ | ||
207 | |||
208 | for (i = 0; i < gpe_block->register_count; i++) { | ||
209 | |||
210 | /* Now look at the individual GPEs in this byte register */ | ||
211 | |||
212 | for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) { | ||
213 | gpe_event_info = | ||
214 | &gpe_block-> | ||
215 | event_info[((acpi_size) i * | ||
216 | ACPI_GPE_REGISTER_WIDTH) + j]; | ||
217 | |||
218 | if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == | ||
219 | ACPI_GPE_DISPATCH_HANDLER) { | ||
220 | ACPI_FREE(gpe_event_info->dispatch.handler); | ||
221 | gpe_event_info->dispatch.handler = NULL; | ||
222 | gpe_event_info->flags &= | ||
223 | ~ACPI_GPE_DISPATCH_MASK; | ||
224 | } | ||
225 | } | ||
226 | } | ||
227 | |||
228 | return_ACPI_STATUS(AE_OK); | ||
229 | } | ||
230 | |||
231 | /******************************************************************************* | ||
232 | * | ||
233 | * FUNCTION: acpi_ev_save_method_info | ||
234 | * | ||
235 | * PARAMETERS: Callback from walk_namespace | ||
236 | * | ||
237 | * RETURN: Status | ||
238 | * | ||
239 | * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a | ||
240 | * control method under the _GPE portion of the namespace. | ||
241 | * Extract the name and GPE type from the object, saving this | ||
242 | * information for quick lookup during GPE dispatch | ||
243 | * | ||
244 | * The name of each GPE control method is of the form: | ||
245 | * "_Lxx" or "_Exx" | ||
246 | * Where: | ||
247 | * L - means that the GPE is level triggered | ||
248 | * E - means that the GPE is edge triggered | ||
249 | * xx - is the GPE number [in HEX] | ||
250 | * | ||
251 | ******************************************************************************/ | ||
252 | |||
253 | static acpi_status | ||
254 | acpi_ev_save_method_info(acpi_handle obj_handle, | ||
255 | u32 level, void *obj_desc, void **return_value) | ||
256 | { | ||
257 | struct acpi_gpe_block_info *gpe_block = (void *)obj_desc; | ||
258 | struct acpi_gpe_event_info *gpe_event_info; | ||
259 | u32 gpe_number; | ||
260 | char name[ACPI_NAME_SIZE + 1]; | ||
261 | u8 type; | ||
262 | acpi_status status; | ||
263 | |||
264 | ACPI_FUNCTION_TRACE(ev_save_method_info); | ||
265 | |||
266 | /* | ||
267 | * _Lxx and _Exx GPE method support | ||
268 | * | ||
269 | * 1) Extract the name from the object and convert to a string | ||
270 | */ | ||
271 | ACPI_MOVE_32_TO_32(name, | ||
272 | &((struct acpi_namespace_node *)obj_handle)->name. | ||
273 | integer); | ||
274 | name[ACPI_NAME_SIZE] = 0; | ||
275 | |||
276 | /* | ||
277 | * 2) Edge/Level determination is based on the 2nd character | ||
278 | * of the method name | ||
279 | * | ||
280 | * NOTE: Default GPE type is RUNTIME. May be changed later to WAKE | ||
281 | * if a _PRW object is found that points to this GPE. | ||
282 | */ | ||
283 | switch (name[1]) { | ||
284 | case 'L': | ||
285 | type = ACPI_GPE_LEVEL_TRIGGERED; | ||
286 | break; | ||
287 | |||
288 | case 'E': | ||
289 | type = ACPI_GPE_EDGE_TRIGGERED; | ||
290 | break; | ||
291 | |||
292 | default: | ||
293 | /* Unknown method type, just ignore it! */ | ||
294 | |||
295 | ACPI_DEBUG_PRINT((ACPI_DB_LOAD, | ||
296 | "Ignoring unknown GPE method type: %s (name not of form _Lxx or _Exx)", | ||
297 | name)); | ||
298 | return_ACPI_STATUS(AE_OK); | ||
299 | } | ||
300 | |||
301 | /* Convert the last two characters of the name to the GPE Number */ | ||
302 | |||
303 | gpe_number = ACPI_STRTOUL(&name[2], NULL, 16); | ||
304 | if (gpe_number == ACPI_UINT32_MAX) { | ||
305 | |||
306 | /* Conversion failed; invalid method, just ignore it */ | ||
307 | |||
308 | ACPI_DEBUG_PRINT((ACPI_DB_LOAD, | ||
309 | "Could not extract GPE number from name: %s (name is not of form _Lxx or _Exx)", | ||
310 | name)); | ||
311 | return_ACPI_STATUS(AE_OK); | ||
312 | } | ||
313 | |||
314 | /* Ensure that we have a valid GPE number for this GPE block */ | ||
315 | |||
316 | if ((gpe_number < gpe_block->block_base_number) || | ||
317 | (gpe_number >= | ||
318 | (gpe_block->block_base_number + | ||
319 | (gpe_block->register_count * 8)))) { | ||
320 | /* | ||
321 | * Not valid for this GPE block, just ignore it. However, it may be | ||
322 | * valid for a different GPE block, since GPE0 and GPE1 methods both | ||
323 | * appear under \_GPE. | ||
324 | */ | ||
325 | return_ACPI_STATUS(AE_OK); | ||
326 | } | ||
327 | |||
328 | /* | ||
329 | * Now we can add this information to the gpe_event_info block for use | ||
330 | * during dispatch of this GPE. Default type is RUNTIME, although this may | ||
331 | * change when the _PRW methods are executed later. | ||
332 | */ | ||
333 | gpe_event_info = | ||
334 | &gpe_block->event_info[gpe_number - gpe_block->block_base_number]; | ||
335 | |||
336 | gpe_event_info->flags = (u8) | ||
337 | (type | ACPI_GPE_DISPATCH_METHOD | ACPI_GPE_TYPE_RUNTIME); | ||
338 | |||
339 | gpe_event_info->dispatch.method_node = | ||
340 | (struct acpi_namespace_node *)obj_handle; | ||
341 | |||
342 | /* Update enable mask, but don't enable the HW GPE as of yet */ | ||
343 | |||
344 | status = acpi_ev_enable_gpe(gpe_event_info, FALSE); | ||
345 | |||
346 | ACPI_DEBUG_PRINT((ACPI_DB_LOAD, | ||
347 | "Registered GPE method %s as GPE number 0x%.2X\n", | ||
348 | name, gpe_number)); | ||
349 | return_ACPI_STATUS(status); | ||
350 | } | ||
351 | |||
352 | /******************************************************************************* | ||
353 | * | ||
354 | * FUNCTION: acpi_ev_match_prw_and_gpe | ||
355 | * | ||
356 | * PARAMETERS: Callback from walk_namespace | ||
357 | * | ||
358 | * RETURN: Status. NOTE: We ignore errors so that the _PRW walk is | ||
359 | * not aborted on a single _PRW failure. | ||
360 | * | ||
361 | * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a | ||
362 | * Device. Run the _PRW method. If present, extract the GPE | ||
363 | * number and mark the GPE as a WAKE GPE. | ||
364 | * | ||
365 | ******************************************************************************/ | ||
366 | |||
367 | static acpi_status | ||
368 | acpi_ev_match_prw_and_gpe(acpi_handle obj_handle, | ||
369 | u32 level, void *info, void **return_value) | ||
370 | { | ||
371 | struct acpi_gpe_walk_info *gpe_info = (void *)info; | ||
372 | struct acpi_namespace_node *gpe_device; | ||
373 | struct acpi_gpe_block_info *gpe_block; | ||
374 | struct acpi_namespace_node *target_gpe_device; | ||
375 | struct acpi_gpe_event_info *gpe_event_info; | ||
376 | union acpi_operand_object *pkg_desc; | ||
377 | union acpi_operand_object *obj_desc; | ||
378 | u32 gpe_number; | ||
379 | acpi_status status; | ||
380 | |||
381 | ACPI_FUNCTION_TRACE(ev_match_prw_and_gpe); | ||
382 | |||
383 | /* Check for a _PRW method under this device */ | ||
384 | |||
385 | status = acpi_ut_evaluate_object(obj_handle, METHOD_NAME__PRW, | ||
386 | ACPI_BTYPE_PACKAGE, &pkg_desc); | ||
387 | if (ACPI_FAILURE(status)) { | ||
388 | |||
389 | /* Ignore all errors from _PRW, we don't want to abort the subsystem */ | ||
390 | |||
391 | return_ACPI_STATUS(AE_OK); | ||
392 | } | ||
393 | |||
394 | /* The returned _PRW package must have at least two elements */ | ||
395 | |||
396 | if (pkg_desc->package.count < 2) { | ||
397 | goto cleanup; | ||
398 | } | ||
399 | |||
400 | /* Extract pointers from the input context */ | ||
401 | |||
402 | gpe_device = gpe_info->gpe_device; | ||
403 | gpe_block = gpe_info->gpe_block; | ||
404 | |||
405 | /* | ||
406 | * The _PRW object must return a package, we are only interested in the | ||
407 | * first element | ||
408 | */ | ||
409 | obj_desc = pkg_desc->package.elements[0]; | ||
410 | |||
411 | if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_INTEGER) { | ||
412 | |||
413 | /* Use FADT-defined GPE device (from definition of _PRW) */ | ||
414 | |||
415 | target_gpe_device = acpi_gbl_fadt_gpe_device; | ||
416 | |||
417 | /* Integer is the GPE number in the FADT described GPE blocks */ | ||
418 | |||
419 | gpe_number = (u32) obj_desc->integer.value; | ||
420 | } else if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_PACKAGE) { | ||
421 | |||
422 | /* Package contains a GPE reference and GPE number within a GPE block */ | ||
423 | |||
424 | if ((obj_desc->package.count < 2) || | ||
425 | (ACPI_GET_OBJECT_TYPE(obj_desc->package.elements[0]) != | ||
426 | ACPI_TYPE_LOCAL_REFERENCE) | ||
427 | || (ACPI_GET_OBJECT_TYPE(obj_desc->package.elements[1]) != | ||
428 | ACPI_TYPE_INTEGER)) { | ||
429 | goto cleanup; | ||
430 | } | ||
431 | |||
432 | /* Get GPE block reference and decode */ | ||
433 | |||
434 | target_gpe_device = | ||
435 | obj_desc->package.elements[0]->reference.node; | ||
436 | gpe_number = (u32) obj_desc->package.elements[1]->integer.value; | ||
437 | } else { | ||
438 | /* Unknown type, just ignore it */ | ||
439 | |||
440 | goto cleanup; | ||
441 | } | ||
442 | |||
443 | /* | ||
444 | * Is this GPE within this block? | ||
445 | * | ||
446 | * TRUE if and only if these conditions are true: | ||
447 | * 1) The GPE devices match. | ||
448 | * 2) The GPE index(number) is within the range of the Gpe Block | ||
449 | * associated with the GPE device. | ||
450 | */ | ||
451 | if ((gpe_device == target_gpe_device) && | ||
452 | (gpe_number >= gpe_block->block_base_number) && | ||
453 | (gpe_number < | ||
454 | gpe_block->block_base_number + (gpe_block->register_count * 8))) { | ||
455 | gpe_event_info = | ||
456 | &gpe_block->event_info[gpe_number - | ||
457 | gpe_block->block_base_number]; | ||
458 | |||
459 | /* Mark GPE for WAKE-ONLY but WAKE_DISABLED */ | ||
460 | |||
461 | gpe_event_info->flags &= | ||
462 | ~(ACPI_GPE_WAKE_ENABLED | ACPI_GPE_RUN_ENABLED); | ||
463 | |||
464 | status = | ||
465 | acpi_ev_set_gpe_type(gpe_event_info, ACPI_GPE_TYPE_WAKE); | ||
466 | if (ACPI_FAILURE(status)) { | ||
467 | goto cleanup; | ||
468 | } | ||
469 | |||
470 | status = | ||
471 | acpi_ev_update_gpe_enable_masks(gpe_event_info, | ||
472 | ACPI_GPE_DISABLE); | ||
473 | } | ||
474 | |||
475 | cleanup: | ||
476 | acpi_ut_remove_reference(pkg_desc); | ||
477 | return_ACPI_STATUS(AE_OK); | ||
478 | } | ||
479 | |||
480 | /******************************************************************************* | ||
481 | * | ||
482 | * FUNCTION: acpi_ev_get_gpe_xrupt_block | ||
483 | * | ||
484 | * PARAMETERS: interrupt_number - Interrupt for a GPE block | ||
485 | * | ||
486 | * RETURN: A GPE interrupt block | ||
487 | * | ||
488 | * DESCRIPTION: Get or Create a GPE interrupt block. There is one interrupt | ||
489 | * block per unique interrupt level used for GPEs. Should be | ||
490 | * called only when the GPE lists are semaphore locked and not | ||
491 | * subject to change. | ||
492 | * | ||
493 | ******************************************************************************/ | ||
494 | |||
495 | static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32 | ||
496 | interrupt_number) | ||
497 | { | ||
498 | struct acpi_gpe_xrupt_info *next_gpe_xrupt; | ||
499 | struct acpi_gpe_xrupt_info *gpe_xrupt; | ||
500 | acpi_status status; | ||
501 | acpi_cpu_flags flags; | ||
502 | |||
503 | ACPI_FUNCTION_TRACE(ev_get_gpe_xrupt_block); | ||
504 | |||
505 | /* No need for lock since we are not changing any list elements here */ | ||
506 | |||
507 | next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head; | ||
508 | while (next_gpe_xrupt) { | ||
509 | if (next_gpe_xrupt->interrupt_number == interrupt_number) { | ||
510 | return_PTR(next_gpe_xrupt); | ||
511 | } | ||
512 | |||
513 | next_gpe_xrupt = next_gpe_xrupt->next; | ||
514 | } | ||
515 | |||
516 | /* Not found, must allocate a new xrupt descriptor */ | ||
517 | |||
518 | gpe_xrupt = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_xrupt_info)); | ||
519 | if (!gpe_xrupt) { | ||
520 | return_PTR(NULL); | ||
521 | } | ||
522 | |||
523 | gpe_xrupt->interrupt_number = interrupt_number; | ||
524 | |||
525 | /* Install new interrupt descriptor with spin lock */ | ||
526 | |||
527 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | ||
528 | if (acpi_gbl_gpe_xrupt_list_head) { | ||
529 | next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head; | ||
530 | while (next_gpe_xrupt->next) { | ||
531 | next_gpe_xrupt = next_gpe_xrupt->next; | ||
532 | } | ||
533 | |||
534 | next_gpe_xrupt->next = gpe_xrupt; | ||
535 | gpe_xrupt->previous = next_gpe_xrupt; | ||
536 | } else { | ||
537 | acpi_gbl_gpe_xrupt_list_head = gpe_xrupt; | ||
538 | } | ||
539 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | ||
540 | |||
541 | /* Install new interrupt handler if not SCI_INT */ | ||
542 | |||
543 | if (interrupt_number != acpi_gbl_FADT.sci_interrupt) { | ||
544 | status = acpi_os_install_interrupt_handler(interrupt_number, | ||
545 | acpi_ev_gpe_xrupt_handler, | ||
546 | gpe_xrupt); | ||
547 | if (ACPI_FAILURE(status)) { | ||
548 | ACPI_ERROR((AE_INFO, | ||
549 | "Could not install GPE interrupt handler at level 0x%X", | ||
550 | interrupt_number)); | ||
551 | return_PTR(NULL); | ||
552 | } | ||
553 | } | ||
554 | |||
555 | return_PTR(gpe_xrupt); | ||
556 | } | ||
557 | |||
558 | /******************************************************************************* | ||
559 | * | ||
560 | * FUNCTION: acpi_ev_delete_gpe_xrupt | ||
561 | * | ||
562 | * PARAMETERS: gpe_xrupt - A GPE interrupt info block | ||
563 | * | ||
564 | * RETURN: Status | ||
565 | * | ||
566 | * DESCRIPTION: Remove and free a gpe_xrupt block. Remove an associated | ||
567 | * interrupt handler if not the SCI interrupt. | ||
568 | * | ||
569 | ******************************************************************************/ | ||
570 | |||
571 | static acpi_status | ||
572 | acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt) | ||
573 | { | ||
574 | acpi_status status; | ||
575 | acpi_cpu_flags flags; | ||
576 | |||
577 | ACPI_FUNCTION_TRACE(ev_delete_gpe_xrupt); | ||
578 | |||
579 | /* We never want to remove the SCI interrupt handler */ | ||
580 | |||
581 | if (gpe_xrupt->interrupt_number == acpi_gbl_FADT.sci_interrupt) { | ||
582 | gpe_xrupt->gpe_block_list_head = NULL; | ||
583 | return_ACPI_STATUS(AE_OK); | ||
584 | } | ||
585 | |||
586 | /* Disable this interrupt */ | ||
587 | |||
588 | status = | ||
589 | acpi_os_remove_interrupt_handler(gpe_xrupt->interrupt_number, | ||
590 | acpi_ev_gpe_xrupt_handler); | ||
591 | if (ACPI_FAILURE(status)) { | ||
592 | return_ACPI_STATUS(status); | ||
593 | } | ||
594 | |||
595 | /* Unlink the interrupt block with lock */ | ||
596 | |||
597 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | ||
598 | if (gpe_xrupt->previous) { | ||
599 | gpe_xrupt->previous->next = gpe_xrupt->next; | ||
600 | } else { | ||
601 | /* No previous, update list head */ | ||
602 | |||
603 | acpi_gbl_gpe_xrupt_list_head = gpe_xrupt->next; | ||
604 | } | ||
605 | |||
606 | if (gpe_xrupt->next) { | ||
607 | gpe_xrupt->next->previous = gpe_xrupt->previous; | ||
608 | } | ||
609 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | ||
610 | |||
611 | /* Free the block */ | ||
612 | |||
613 | ACPI_FREE(gpe_xrupt); | ||
614 | return_ACPI_STATUS(AE_OK); | ||
615 | } | ||
616 | |||
617 | /******************************************************************************* | ||
618 | * | ||
619 | * FUNCTION: acpi_ev_install_gpe_block | ||
620 | * | ||
621 | * PARAMETERS: gpe_block - New GPE block | ||
622 | * interrupt_number - Xrupt to be associated with this | ||
623 | * GPE block | ||
624 | * | ||
625 | * RETURN: Status | ||
626 | * | ||
627 | * DESCRIPTION: Install new GPE block with mutex support | ||
628 | * | ||
629 | ******************************************************************************/ | ||
630 | |||
631 | static acpi_status | ||
632 | acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block, | ||
633 | u32 interrupt_number) | ||
634 | { | ||
635 | struct acpi_gpe_block_info *next_gpe_block; | ||
636 | struct acpi_gpe_xrupt_info *gpe_xrupt_block; | ||
637 | acpi_status status; | ||
638 | acpi_cpu_flags flags; | ||
639 | |||
640 | ACPI_FUNCTION_TRACE(ev_install_gpe_block); | ||
641 | |||
642 | status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); | ||
643 | if (ACPI_FAILURE(status)) { | ||
644 | return_ACPI_STATUS(status); | ||
645 | } | ||
646 | |||
647 | gpe_xrupt_block = acpi_ev_get_gpe_xrupt_block(interrupt_number); | ||
648 | if (!gpe_xrupt_block) { | ||
649 | status = AE_NO_MEMORY; | ||
650 | goto unlock_and_exit; | ||
651 | } | ||
652 | |||
653 | /* Install the new block at the end of the list with lock */ | ||
654 | |||
655 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | ||
656 | if (gpe_xrupt_block->gpe_block_list_head) { | ||
657 | next_gpe_block = gpe_xrupt_block->gpe_block_list_head; | ||
658 | while (next_gpe_block->next) { | ||
659 | next_gpe_block = next_gpe_block->next; | ||
660 | } | ||
661 | |||
662 | next_gpe_block->next = gpe_block; | ||
663 | gpe_block->previous = next_gpe_block; | ||
664 | } else { | ||
665 | gpe_xrupt_block->gpe_block_list_head = gpe_block; | ||
666 | } | ||
667 | |||
668 | gpe_block->xrupt_block = gpe_xrupt_block; | ||
669 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | ||
670 | |||
671 | unlock_and_exit: | ||
672 | status = acpi_ut_release_mutex(ACPI_MTX_EVENTS); | ||
673 | return_ACPI_STATUS(status); | ||
674 | } | ||
675 | |||
676 | /******************************************************************************* | ||
677 | * | ||
678 | * FUNCTION: acpi_ev_delete_gpe_block | ||
679 | * | ||
680 | * PARAMETERS: gpe_block - Existing GPE block | ||
681 | * | ||
682 | * RETURN: Status | ||
683 | * | ||
684 | * DESCRIPTION: Remove a GPE block | ||
685 | * | ||
686 | ******************************************************************************/ | ||
687 | |||
688 | acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block) | ||
689 | { | ||
690 | acpi_status status; | ||
691 | acpi_cpu_flags flags; | ||
692 | |||
693 | ACPI_FUNCTION_TRACE(ev_install_gpe_block); | ||
694 | |||
695 | status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); | ||
696 | if (ACPI_FAILURE(status)) { | ||
697 | return_ACPI_STATUS(status); | ||
698 | } | ||
699 | |||
700 | /* Disable all GPEs in this block */ | ||
701 | |||
702 | status = | ||
703 | acpi_hw_disable_gpe_block(gpe_block->xrupt_block, gpe_block, NULL); | ||
704 | |||
705 | if (!gpe_block->previous && !gpe_block->next) { | ||
706 | |||
707 | /* This is the last gpe_block on this interrupt */ | ||
708 | |||
709 | status = acpi_ev_delete_gpe_xrupt(gpe_block->xrupt_block); | ||
710 | if (ACPI_FAILURE(status)) { | ||
711 | goto unlock_and_exit; | ||
712 | } | ||
713 | } else { | ||
714 | /* Remove the block on this interrupt with lock */ | ||
715 | |||
716 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | ||
717 | if (gpe_block->previous) { | ||
718 | gpe_block->previous->next = gpe_block->next; | ||
719 | } else { | ||
720 | gpe_block->xrupt_block->gpe_block_list_head = | ||
721 | gpe_block->next; | ||
722 | } | ||
723 | |||
724 | if (gpe_block->next) { | ||
725 | gpe_block->next->previous = gpe_block->previous; | ||
726 | } | ||
727 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | ||
728 | } | ||
729 | |||
730 | acpi_current_gpe_count -= | ||
731 | gpe_block->register_count * ACPI_GPE_REGISTER_WIDTH; | ||
732 | |||
733 | /* Free the gpe_block */ | ||
734 | |||
735 | ACPI_FREE(gpe_block->register_info); | ||
736 | ACPI_FREE(gpe_block->event_info); | ||
737 | ACPI_FREE(gpe_block); | ||
738 | |||
739 | unlock_and_exit: | ||
740 | status = acpi_ut_release_mutex(ACPI_MTX_EVENTS); | ||
741 | return_ACPI_STATUS(status); | ||
742 | } | ||
743 | |||
744 | /******************************************************************************* | ||
745 | * | ||
746 | * FUNCTION: acpi_ev_create_gpe_info_blocks | ||
747 | * | ||
748 | * PARAMETERS: gpe_block - New GPE block | ||
749 | * | ||
750 | * RETURN: Status | ||
751 | * | ||
752 | * DESCRIPTION: Create the register_info and event_info blocks for this GPE block | ||
753 | * | ||
754 | ******************************************************************************/ | ||
755 | |||
756 | static acpi_status | ||
757 | acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block) | ||
758 | { | ||
759 | struct acpi_gpe_register_info *gpe_register_info = NULL; | ||
760 | struct acpi_gpe_event_info *gpe_event_info = NULL; | ||
761 | struct acpi_gpe_event_info *this_event; | ||
762 | struct acpi_gpe_register_info *this_register; | ||
763 | u32 i; | ||
764 | u32 j; | ||
765 | acpi_status status; | ||
766 | |||
767 | ACPI_FUNCTION_TRACE(ev_create_gpe_info_blocks); | ||
768 | |||
769 | /* Allocate the GPE register information block */ | ||
770 | |||
771 | gpe_register_info = ACPI_ALLOCATE_ZEROED((acpi_size) gpe_block-> | ||
772 | register_count * | ||
773 | sizeof(struct | ||
774 | acpi_gpe_register_info)); | ||
775 | if (!gpe_register_info) { | ||
776 | ACPI_ERROR((AE_INFO, | ||
777 | "Could not allocate the GpeRegisterInfo table")); | ||
778 | return_ACPI_STATUS(AE_NO_MEMORY); | ||
779 | } | ||
780 | |||
781 | /* | ||
782 | * Allocate the GPE event_info block. There are eight distinct GPEs | ||
783 | * per register. Initialization to zeros is sufficient. | ||
784 | */ | ||
785 | gpe_event_info = ACPI_ALLOCATE_ZEROED(((acpi_size) gpe_block-> | ||
786 | register_count * | ||
787 | ACPI_GPE_REGISTER_WIDTH) * | ||
788 | sizeof(struct | ||
789 | acpi_gpe_event_info)); | ||
790 | if (!gpe_event_info) { | ||
791 | ACPI_ERROR((AE_INFO, | ||
792 | "Could not allocate the GpeEventInfo table")); | ||
793 | status = AE_NO_MEMORY; | ||
794 | goto error_exit; | ||
795 | } | ||
796 | |||
797 | /* Save the new Info arrays in the GPE block */ | ||
798 | |||
799 | gpe_block->register_info = gpe_register_info; | ||
800 | gpe_block->event_info = gpe_event_info; | ||
801 | |||
802 | /* | ||
803 | * Initialize the GPE Register and Event structures. A goal of these | ||
804 | * tables is to hide the fact that there are two separate GPE register | ||
805 | * sets in a given GPE hardware block, the status registers occupy the | ||
806 | * first half, and the enable registers occupy the second half. | ||
807 | */ | ||
808 | this_register = gpe_register_info; | ||
809 | this_event = gpe_event_info; | ||
810 | |||
811 | for (i = 0; i < gpe_block->register_count; i++) { | ||
812 | |||
813 | /* Init the register_info for this GPE register (8 GPEs) */ | ||
814 | |||
815 | this_register->base_gpe_number = | ||
816 | (u8) (gpe_block->block_base_number + | ||
817 | (i * ACPI_GPE_REGISTER_WIDTH)); | ||
818 | |||
819 | this_register->status_address.address = | ||
820 | gpe_block->block_address.address + i; | ||
821 | |||
822 | this_register->enable_address.address = | ||
823 | gpe_block->block_address.address + i + | ||
824 | gpe_block->register_count; | ||
825 | |||
826 | this_register->status_address.space_id = | ||
827 | gpe_block->block_address.space_id; | ||
828 | this_register->enable_address.space_id = | ||
829 | gpe_block->block_address.space_id; | ||
830 | this_register->status_address.bit_width = | ||
831 | ACPI_GPE_REGISTER_WIDTH; | ||
832 | this_register->enable_address.bit_width = | ||
833 | ACPI_GPE_REGISTER_WIDTH; | ||
834 | this_register->status_address.bit_offset = 0; | ||
835 | this_register->enable_address.bit_offset = 0; | ||
836 | |||
837 | /* Init the event_info for each GPE within this register */ | ||
838 | |||
839 | for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) { | ||
840 | this_event->gpe_number = | ||
841 | (u8) (this_register->base_gpe_number + j); | ||
842 | this_event->register_info = this_register; | ||
843 | this_event++; | ||
844 | } | ||
845 | |||
846 | /* Disable all GPEs within this register */ | ||
847 | |||
848 | status = acpi_write(0x00, &this_register->enable_address); | ||
849 | if (ACPI_FAILURE(status)) { | ||
850 | goto error_exit; | ||
851 | } | ||
852 | |||
853 | /* Clear any pending GPE events within this register */ | ||
854 | |||
855 | status = acpi_write(0xFF, &this_register->status_address); | ||
856 | if (ACPI_FAILURE(status)) { | ||
857 | goto error_exit; | ||
858 | } | ||
859 | |||
860 | this_register++; | ||
861 | } | ||
862 | |||
863 | return_ACPI_STATUS(AE_OK); | ||
864 | |||
865 | error_exit: | ||
866 | if (gpe_register_info) { | ||
867 | ACPI_FREE(gpe_register_info); | ||
868 | } | ||
869 | if (gpe_event_info) { | ||
870 | ACPI_FREE(gpe_event_info); | ||
871 | } | ||
872 | |||
873 | return_ACPI_STATUS(status); | ||
874 | } | ||
875 | |||
876 | /******************************************************************************* | ||
877 | * | ||
878 | * FUNCTION: acpi_ev_create_gpe_block | ||
879 | * | ||
880 | * PARAMETERS: gpe_device - Handle to the parent GPE block | ||
881 | * gpe_block_address - Address and space_iD | ||
882 | * register_count - Number of GPE register pairs in the block | ||
883 | * gpe_block_base_number - Starting GPE number for the block | ||
884 | * interrupt_number - H/W interrupt for the block | ||
885 | * return_gpe_block - Where the new block descriptor is returned | ||
886 | * | ||
887 | * RETURN: Status | ||
888 | * | ||
889 | * DESCRIPTION: Create and Install a block of GPE registers. All GPEs within | ||
890 | * the block are disabled at exit. | ||
891 | * Note: Assumes namespace is locked. | ||
892 | * | ||
893 | ******************************************************************************/ | ||
894 | |||
895 | acpi_status | ||
896 | acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device, | ||
897 | struct acpi_generic_address *gpe_block_address, | ||
898 | u32 register_count, | ||
899 | u8 gpe_block_base_number, | ||
900 | u32 interrupt_number, | ||
901 | struct acpi_gpe_block_info **return_gpe_block) | ||
902 | { | ||
903 | acpi_status status; | ||
904 | struct acpi_gpe_block_info *gpe_block; | ||
905 | |||
906 | ACPI_FUNCTION_TRACE(ev_create_gpe_block); | ||
907 | |||
908 | if (!register_count) { | ||
909 | return_ACPI_STATUS(AE_OK); | ||
910 | } | ||
911 | |||
912 | /* Allocate a new GPE block */ | ||
913 | |||
914 | gpe_block = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_block_info)); | ||
915 | if (!gpe_block) { | ||
916 | return_ACPI_STATUS(AE_NO_MEMORY); | ||
917 | } | ||
918 | |||
919 | /* Initialize the new GPE block */ | ||
920 | |||
921 | gpe_block->node = gpe_device; | ||
922 | gpe_block->register_count = register_count; | ||
923 | gpe_block->block_base_number = gpe_block_base_number; | ||
924 | |||
925 | ACPI_MEMCPY(&gpe_block->block_address, gpe_block_address, | ||
926 | sizeof(struct acpi_generic_address)); | ||
927 | |||
928 | /* | ||
929 | * Create the register_info and event_info sub-structures | ||
930 | * Note: disables and clears all GPEs in the block | ||
931 | */ | ||
932 | status = acpi_ev_create_gpe_info_blocks(gpe_block); | ||
933 | if (ACPI_FAILURE(status)) { | ||
934 | ACPI_FREE(gpe_block); | ||
935 | return_ACPI_STATUS(status); | ||
936 | } | ||
937 | |||
938 | /* Install the new block in the global lists */ | ||
939 | |||
940 | status = acpi_ev_install_gpe_block(gpe_block, interrupt_number); | ||
941 | if (ACPI_FAILURE(status)) { | ||
942 | ACPI_FREE(gpe_block); | ||
943 | return_ACPI_STATUS(status); | ||
944 | } | ||
945 | |||
946 | /* Find all GPE methods (_Lxx, _Exx) for this block */ | ||
947 | |||
948 | status = acpi_ns_walk_namespace(ACPI_TYPE_METHOD, gpe_device, | ||
949 | ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK, | ||
950 | acpi_ev_save_method_info, gpe_block, | ||
951 | NULL); | ||
952 | |||
953 | /* Return the new block */ | ||
954 | |||
955 | if (return_gpe_block) { | ||
956 | (*return_gpe_block) = gpe_block; | ||
957 | } | ||
958 | |||
959 | ACPI_DEBUG_PRINT((ACPI_DB_INIT, | ||
960 | "GPE %02X to %02X [%4.4s] %u regs on int 0x%X\n", | ||
961 | (u32) gpe_block->block_base_number, | ||
962 | (u32) (gpe_block->block_base_number + | ||
963 | ((gpe_block->register_count * | ||
964 | ACPI_GPE_REGISTER_WIDTH) - 1)), | ||
965 | gpe_device->name.ascii, gpe_block->register_count, | ||
966 | interrupt_number)); | ||
967 | |||
968 | /* Update global count of currently available GPEs */ | ||
969 | |||
970 | acpi_current_gpe_count += register_count * ACPI_GPE_REGISTER_WIDTH; | ||
971 | return_ACPI_STATUS(AE_OK); | ||
972 | } | ||
973 | |||
974 | /******************************************************************************* | ||
975 | * | ||
976 | * FUNCTION: acpi_ev_initialize_gpe_block | ||
977 | * | ||
978 | * PARAMETERS: gpe_device - Handle to the parent GPE block | ||
979 | * gpe_block - Gpe Block info | ||
980 | * | ||
981 | * RETURN: Status | ||
982 | * | ||
983 | * DESCRIPTION: Initialize and enable a GPE block. First find and run any | ||
984 | * _PRT methods associated with the block, then enable the | ||
985 | * appropriate GPEs. | ||
986 | * Note: Assumes namespace is locked. | ||
987 | * | ||
988 | ******************************************************************************/ | ||
989 | |||
990 | acpi_status | ||
991 | acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device, | ||
992 | struct acpi_gpe_block_info *gpe_block) | ||
993 | { | ||
994 | acpi_status status; | ||
995 | struct acpi_gpe_event_info *gpe_event_info; | ||
996 | struct acpi_gpe_walk_info gpe_info; | ||
997 | u32 wake_gpe_count; | ||
998 | u32 gpe_enabled_count; | ||
999 | u32 i; | ||
1000 | u32 j; | ||
1001 | |||
1002 | ACPI_FUNCTION_TRACE(ev_initialize_gpe_block); | ||
1003 | |||
1004 | /* Ignore a null GPE block (e.g., if no GPE block 1 exists) */ | ||
1005 | |||
1006 | if (!gpe_block) { | ||
1007 | return_ACPI_STATUS(AE_OK); | ||
1008 | } | ||
1009 | |||
1010 | /* | ||
1011 | * Runtime option: Should wake GPEs be enabled at runtime? The default | ||
1012 | * is no, they should only be enabled just as the machine goes to sleep. | ||
1013 | */ | ||
1014 | if (acpi_gbl_leave_wake_gpes_disabled) { | ||
1015 | /* | ||
1016 | * Differentiate runtime vs wake GPEs, via the _PRW control methods. | ||
1017 | * Each GPE that has one or more _PRWs that reference it is by | ||
1018 | * definition a wake GPE and will not be enabled while the machine | ||
1019 | * is running. | ||
1020 | */ | ||
1021 | gpe_info.gpe_block = gpe_block; | ||
1022 | gpe_info.gpe_device = gpe_device; | ||
1023 | |||
1024 | status = | ||
1025 | acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, | ||
1026 | ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK, | ||
1027 | acpi_ev_match_prw_and_gpe, &gpe_info, | ||
1028 | NULL); | ||
1029 | } | ||
1030 | |||
1031 | /* | ||
1032 | * Enable all GPEs in this block that have these attributes: | ||
1033 | * 1) are "runtime" or "run/wake" GPEs, and | ||
1034 | * 2) have a corresponding _Lxx or _Exx method | ||
1035 | * | ||
1036 | * Any other GPEs within this block must be enabled via the acpi_enable_gpe() | ||
1037 | * external interface. | ||
1038 | */ | ||
1039 | wake_gpe_count = 0; | ||
1040 | gpe_enabled_count = 0; | ||
1041 | |||
1042 | for (i = 0; i < gpe_block->register_count; i++) { | ||
1043 | for (j = 0; j < 8; j++) { | ||
1044 | |||
1045 | /* Get the info block for this particular GPE */ | ||
1046 | |||
1047 | gpe_event_info = | ||
1048 | &gpe_block-> | ||
1049 | event_info[((acpi_size) i * | ||
1050 | ACPI_GPE_REGISTER_WIDTH) + j]; | ||
1051 | |||
1052 | if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == | ||
1053 | ACPI_GPE_DISPATCH_METHOD) | ||
1054 | && (gpe_event_info->flags & ACPI_GPE_TYPE_RUNTIME)) { | ||
1055 | gpe_enabled_count++; | ||
1056 | } | ||
1057 | |||
1058 | if (gpe_event_info->flags & ACPI_GPE_TYPE_WAKE) { | ||
1059 | wake_gpe_count++; | ||
1060 | } | ||
1061 | } | ||
1062 | } | ||
1063 | |||
1064 | ACPI_DEBUG_PRINT((ACPI_DB_INIT, | ||
1065 | "Found %u Wake, Enabled %u Runtime GPEs in this block\n", | ||
1066 | wake_gpe_count, gpe_enabled_count)); | ||
1067 | |||
1068 | /* Enable all valid runtime GPEs found above */ | ||
1069 | |||
1070 | status = acpi_hw_enable_runtime_gpe_block(NULL, gpe_block, NULL); | ||
1071 | if (ACPI_FAILURE(status)) { | ||
1072 | ACPI_ERROR((AE_INFO, "Could not enable GPEs in GpeBlock %p", | ||
1073 | gpe_block)); | ||
1074 | } | ||
1075 | |||
1076 | return_ACPI_STATUS(status); | ||
1077 | } | ||
1078 | |||
1079 | /******************************************************************************* | ||
1080 | * | ||
1081 | * FUNCTION: acpi_ev_gpe_initialize | ||
1082 | * | ||
1083 | * PARAMETERS: None | ||
1084 | * | ||
1085 | * RETURN: Status | ||
1086 | * | ||
1087 | * DESCRIPTION: Initialize the GPE data structures | ||
1088 | * | ||
1089 | ******************************************************************************/ | ||
1090 | |||
1091 | acpi_status acpi_ev_gpe_initialize(void) | ||
1092 | { | ||
1093 | u32 register_count0 = 0; | ||
1094 | u32 register_count1 = 0; | ||
1095 | u32 gpe_number_max = 0; | ||
1096 | acpi_status status; | ||
1097 | |||
1098 | ACPI_FUNCTION_TRACE(ev_gpe_initialize); | ||
1099 | |||
1100 | status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); | ||
1101 | if (ACPI_FAILURE(status)) { | ||
1102 | return_ACPI_STATUS(status); | ||
1103 | } | ||
1104 | |||
1105 | /* | ||
1106 | * Initialize the GPE Block(s) defined in the FADT | ||
1107 | * | ||
1108 | * Why the GPE register block lengths are divided by 2: From the ACPI Spec, | ||
1109 | * section "General-Purpose Event Registers", we have: | ||
1110 | * | ||
1111 | * "Each register block contains two registers of equal length | ||
1112 | * GPEx_STS and GPEx_EN (where x is 0 or 1). The length of the | ||
1113 | * GPE0_STS and GPE0_EN registers is equal to half the GPE0_LEN | ||
1114 | * The length of the GPE1_STS and GPE1_EN registers is equal to | ||
1115 | * half the GPE1_LEN. If a generic register block is not supported | ||
1116 | * then its respective block pointer and block length values in the | ||
1117 | * FADT table contain zeros. The GPE0_LEN and GPE1_LEN do not need | ||
1118 | * to be the same size." | ||
1119 | */ | ||
1120 | |||
1121 | /* | ||
1122 | * Determine the maximum GPE number for this machine. | ||
1123 | * | ||
1124 | * Note: both GPE0 and GPE1 are optional, and either can exist without | ||
1125 | * the other. | ||
1126 | * | ||
1127 | * If EITHER the register length OR the block address are zero, then that | ||
1128 | * particular block is not supported. | ||
1129 | */ | ||
1130 | if (acpi_gbl_FADT.gpe0_block_length && | ||
1131 | acpi_gbl_FADT.xgpe0_block.address) { | ||
1132 | |||
1133 | /* GPE block 0 exists (has both length and address > 0) */ | ||
1134 | |||
1135 | register_count0 = (u16) (acpi_gbl_FADT.gpe0_block_length / 2); | ||
1136 | |||
1137 | gpe_number_max = | ||
1138 | (register_count0 * ACPI_GPE_REGISTER_WIDTH) - 1; | ||
1139 | |||
1140 | /* Install GPE Block 0 */ | ||
1141 | |||
1142 | status = acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device, | ||
1143 | &acpi_gbl_FADT.xgpe0_block, | ||
1144 | register_count0, 0, | ||
1145 | acpi_gbl_FADT.sci_interrupt, | ||
1146 | &acpi_gbl_gpe_fadt_blocks[0]); | ||
1147 | |||
1148 | if (ACPI_FAILURE(status)) { | ||
1149 | ACPI_EXCEPTION((AE_INFO, status, | ||
1150 | "Could not create GPE Block 0")); | ||
1151 | } | ||
1152 | } | ||
1153 | |||
1154 | if (acpi_gbl_FADT.gpe1_block_length && | ||
1155 | acpi_gbl_FADT.xgpe1_block.address) { | ||
1156 | |||
1157 | /* GPE block 1 exists (has both length and address > 0) */ | ||
1158 | |||
1159 | register_count1 = (u16) (acpi_gbl_FADT.gpe1_block_length / 2); | ||
1160 | |||
1161 | /* Check for GPE0/GPE1 overlap (if both banks exist) */ | ||
1162 | |||
1163 | if ((register_count0) && | ||
1164 | (gpe_number_max >= acpi_gbl_FADT.gpe1_base)) { | ||
1165 | ACPI_ERROR((AE_INFO, | ||
1166 | "GPE0 block (GPE 0 to %d) overlaps the GPE1 block (GPE %d to %d) - Ignoring GPE1", | ||
1167 | gpe_number_max, acpi_gbl_FADT.gpe1_base, | ||
1168 | acpi_gbl_FADT.gpe1_base + | ||
1169 | ((register_count1 * | ||
1170 | ACPI_GPE_REGISTER_WIDTH) - 1))); | ||
1171 | |||
1172 | /* Ignore GPE1 block by setting the register count to zero */ | ||
1173 | |||
1174 | register_count1 = 0; | ||
1175 | } else { | ||
1176 | /* Install GPE Block 1 */ | ||
1177 | |||
1178 | status = | ||
1179 | acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device, | ||
1180 | &acpi_gbl_FADT.xgpe1_block, | ||
1181 | register_count1, | ||
1182 | acpi_gbl_FADT.gpe1_base, | ||
1183 | acpi_gbl_FADT. | ||
1184 | sci_interrupt, | ||
1185 | &acpi_gbl_gpe_fadt_blocks | ||
1186 | [1]); | ||
1187 | |||
1188 | if (ACPI_FAILURE(status)) { | ||
1189 | ACPI_EXCEPTION((AE_INFO, status, | ||
1190 | "Could not create GPE Block 1")); | ||
1191 | } | ||
1192 | |||
1193 | /* | ||
1194 | * GPE0 and GPE1 do not have to be contiguous in the GPE number | ||
1195 | * space. However, GPE0 always starts at GPE number zero. | ||
1196 | */ | ||
1197 | gpe_number_max = acpi_gbl_FADT.gpe1_base + | ||
1198 | ((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1); | ||
1199 | } | ||
1200 | } | ||
1201 | |||
1202 | /* Exit if there are no GPE registers */ | ||
1203 | |||
1204 | if ((register_count0 + register_count1) == 0) { | ||
1205 | |||
1206 | /* GPEs are not required by ACPI, this is OK */ | ||
1207 | |||
1208 | ACPI_DEBUG_PRINT((ACPI_DB_INIT, | ||
1209 | "There are no GPE blocks defined in the FADT\n")); | ||
1210 | status = AE_OK; | ||
1211 | goto cleanup; | ||
1212 | } | ||
1213 | |||
1214 | /* Check for Max GPE number out-of-range */ | ||
1215 | |||
1216 | if (gpe_number_max > ACPI_GPE_MAX) { | ||
1217 | ACPI_ERROR((AE_INFO, | ||
1218 | "Maximum GPE number from FADT is too large: 0x%X", | ||
1219 | gpe_number_max)); | ||
1220 | status = AE_BAD_VALUE; | ||
1221 | goto cleanup; | ||
1222 | } | ||
1223 | |||
1224 | cleanup: | ||
1225 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); | ||
1226 | return_ACPI_STATUS(AE_OK); | ||
1227 | } | ||