aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/acpi/acpica/evgpeblk.c
diff options
context:
space:
mode:
authorBob Moore <robert.moore@intel.com>2010-04-26 23:41:19 -0400
committerLen Brown <len.brown@intel.com>2010-05-06 03:05:54 -0400
commit3fe50208b29b2463eb6c181c1433dd1beb39f282 (patch)
treea35b7e47a5130e2d0179e34c69a7cae55a327b77 /drivers/acpi/acpica/evgpeblk.c
parent186c307f008d2a53961cd970aaf7cb9c33e79cb1 (diff)
ACPICA: Split large file, evgpeblk
Create two new files, evgpeinit.c and evgpeutil.c. Updated unix and linux makefiles. Signed-off-by: Bob Moore <robert.moore@intel.com> Signed-off-by: Lin Ming <ming.m.lin@intel.com> Signed-off-by: Len Brown <len.brown@intel.com>
Diffstat (limited to 'drivers/acpi/acpica/evgpeblk.c')
-rw-r--r--drivers/acpi/acpica/evgpeblk.c909
1 files changed, 0 insertions, 909 deletions
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index 3341d1dfe78a..7c28f2d9fd35 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -45,27 +45,12 @@
45#include "accommon.h" 45#include "accommon.h"
46#include "acevents.h" 46#include "acevents.h"
47#include "acnamesp.h" 47#include "acnamesp.h"
48#include "acinterp.h"
49 48
50#define _COMPONENT ACPI_EVENTS 49#define _COMPONENT ACPI_EVENTS
51ACPI_MODULE_NAME("evgpeblk") 50ACPI_MODULE_NAME("evgpeblk")
52 51
53/* Local prototypes */ 52/* Local prototypes */
54static acpi_status 53static acpi_status
55acpi_ev_match_gpe_method(acpi_handle obj_handle,
56 u32 level, void *obj_desc, void **return_value);
57
58static acpi_status
59acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
60 u32 level, void *info, void **return_value);
61
62static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32
63 interrupt_number);
64
65static acpi_status
66acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt);
67
68static acpi_status
69acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block, 54acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
70 u32 interrupt_number); 55 u32 interrupt_number);
71 56
@@ -74,632 +59,6 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block);
74 59
75/******************************************************************************* 60/*******************************************************************************
76 * 61 *
77 * FUNCTION: acpi_ev_valid_gpe_event
78 *
79 * PARAMETERS: gpe_event_info - Info for this GPE
80 *
81 * RETURN: TRUE if the gpe_event is valid
82 *
83 * DESCRIPTION: Validate a GPE event. DO NOT CALL FROM INTERRUPT LEVEL.
84 * Should be called only when the GPE lists are semaphore locked
85 * and not subject to change.
86 *
87 ******************************************************************************/
88
89u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info)
90{
91 struct acpi_gpe_xrupt_info *gpe_xrupt_block;
92 struct acpi_gpe_block_info *gpe_block;
93
94 ACPI_FUNCTION_ENTRY();
95
96 /* No need for spin lock since we are not changing any list elements */
97
98 /* Walk the GPE interrupt levels */
99
100 gpe_xrupt_block = acpi_gbl_gpe_xrupt_list_head;
101 while (gpe_xrupt_block) {
102 gpe_block = gpe_xrupt_block->gpe_block_list_head;
103
104 /* Walk the GPE blocks on this interrupt level */
105
106 while (gpe_block) {
107 if ((&gpe_block->event_info[0] <= gpe_event_info) &&
108 (&gpe_block->event_info[gpe_block->gpe_count] >
109 gpe_event_info)) {
110 return (TRUE);
111 }
112
113 gpe_block = gpe_block->next;
114 }
115
116 gpe_xrupt_block = gpe_xrupt_block->next;
117 }
118
119 return (FALSE);
120}
121
122/*******************************************************************************
123 *
124 * FUNCTION: acpi_ev_walk_gpe_list
125 *
126 * PARAMETERS: gpe_walk_callback - Routine called for each GPE block
127 * Context - Value passed to callback
128 *
129 * RETURN: Status
130 *
131 * DESCRIPTION: Walk the GPE lists.
132 *
133 ******************************************************************************/
134
135acpi_status
136acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context)
137{
138 struct acpi_gpe_block_info *gpe_block;
139 struct acpi_gpe_xrupt_info *gpe_xrupt_info;
140 acpi_status status = AE_OK;
141 acpi_cpu_flags flags;
142
143 ACPI_FUNCTION_TRACE(ev_walk_gpe_list);
144
145 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
146
147 /* Walk the interrupt level descriptor list */
148
149 gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
150 while (gpe_xrupt_info) {
151
152 /* Walk all Gpe Blocks attached to this interrupt level */
153
154 gpe_block = gpe_xrupt_info->gpe_block_list_head;
155 while (gpe_block) {
156
157 /* One callback per GPE block */
158
159 status =
160 gpe_walk_callback(gpe_xrupt_info, gpe_block,
161 context);
162 if (ACPI_FAILURE(status)) {
163 if (status == AE_CTRL_END) { /* Callback abort */
164 status = AE_OK;
165 }
166 goto unlock_and_exit;
167 }
168
169 gpe_block = gpe_block->next;
170 }
171
172 gpe_xrupt_info = gpe_xrupt_info->next;
173 }
174
175 unlock_and_exit:
176 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
177 return_ACPI_STATUS(status);
178}
179
180/*******************************************************************************
181 *
182 * FUNCTION: acpi_ev_delete_gpe_handlers
183 *
184 * PARAMETERS: gpe_xrupt_info - GPE Interrupt info
185 * gpe_block - Gpe Block info
186 *
187 * RETURN: Status
188 *
189 * DESCRIPTION: Delete all Handler objects found in the GPE data structs.
190 * Used only prior to termination.
191 *
192 ******************************************************************************/
193
194acpi_status
195acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
196 struct acpi_gpe_block_info *gpe_block,
197 void *context)
198{
199 struct acpi_gpe_event_info *gpe_event_info;
200 u32 i;
201 u32 j;
202
203 ACPI_FUNCTION_TRACE(ev_delete_gpe_handlers);
204
205 /* Examine each GPE Register within the block */
206
207 for (i = 0; i < gpe_block->register_count; i++) {
208
209 /* Now look at the individual GPEs in this byte register */
210
211 for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
212 gpe_event_info = &gpe_block->event_info[((acpi_size) i *
213 ACPI_GPE_REGISTER_WIDTH)
214 + j];
215
216 if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
217 ACPI_GPE_DISPATCH_HANDLER) {
218 ACPI_FREE(gpe_event_info->dispatch.handler);
219 gpe_event_info->dispatch.handler = NULL;
220 gpe_event_info->flags &=
221 ~ACPI_GPE_DISPATCH_MASK;
222 }
223 }
224 }
225
226 return_ACPI_STATUS(AE_OK);
227}
228
229/*******************************************************************************
230 *
231 * FUNCTION: acpi_ev_match_gpe_method
232 *
233 * PARAMETERS: Callback from walk_namespace
234 *
235 * RETURN: Status
236 *
237 * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
238 * control method under the _GPE portion of the namespace.
239 * Extract the name and GPE type from the object, saving this
240 * information for quick lookup during GPE dispatch. Allows a
241 * per-owner_id evaluation if execute_by_owner_id is TRUE in the
242 * walk_info parameter block.
243 *
244 * The name of each GPE control method is of the form:
245 * "_Lxx" or "_Exx", where:
246 * L - means that the GPE is level triggered
247 * E - means that the GPE is edge triggered
248 * xx - is the GPE number [in HEX]
249 *
250 * If walk_info->execute_by_owner_id is TRUE, we only execute examine GPE methods
251 * with that owner.
252 * If walk_info->enable_this_gpe is TRUE, the GPE that is referred to by a GPE
253 * method is immediately enabled (Used for Load/load_table operators)
254 *
255 ******************************************************************************/
256
257static acpi_status
258acpi_ev_match_gpe_method(acpi_handle obj_handle,
259 u32 level, void *context, void **return_value)
260{
261 struct acpi_namespace_node *method_node =
262 ACPI_CAST_PTR(struct acpi_namespace_node, obj_handle);
263 struct acpi_gpe_walk_info *walk_info =
264 ACPI_CAST_PTR(struct acpi_gpe_walk_info, context);
265 struct acpi_gpe_event_info *gpe_event_info;
266 struct acpi_namespace_node *gpe_device;
267 acpi_status status;
268 u32 gpe_number;
269 char name[ACPI_NAME_SIZE + 1];
270 u8 type;
271
272 ACPI_FUNCTION_TRACE(ev_match_gpe_method);
273
274 /* Check if requested owner_id matches this owner_id */
275
276 if ((walk_info->execute_by_owner_id) &&
277 (method_node->owner_id != walk_info->owner_id)) {
278 return_ACPI_STATUS(AE_OK);
279 }
280
281 /*
282 * Match and decode the _Lxx and _Exx GPE method names
283 *
284 * 1) Extract the method name and null terminate it
285 */
286 ACPI_MOVE_32_TO_32(name, &method_node->name.integer);
287 name[ACPI_NAME_SIZE] = 0;
288
289 /* 2) Name must begin with an underscore */
290
291 if (name[0] != '_') {
292 return_ACPI_STATUS(AE_OK); /* Ignore this method */
293 }
294
295 /*
296 * 3) Edge/Level determination is based on the 2nd character
297 * of the method name
298 *
299 * NOTE: Default GPE type is RUNTIME only. Later, if a _PRW object is
300 * found that points to this GPE, the ACPI_GPE_CAN_WAKE flag is set.
301 */
302 switch (name[1]) {
303 case 'L':
304 type = ACPI_GPE_LEVEL_TRIGGERED;
305 break;
306
307 case 'E':
308 type = ACPI_GPE_EDGE_TRIGGERED;
309 break;
310
311 default:
312 /* Unknown method type, just ignore it */
313
314 ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
315 "Ignoring unknown GPE method type: %s "
316 "(name not of form _Lxx or _Exx)", name));
317 return_ACPI_STATUS(AE_OK);
318 }
319
320 /* 4) The last two characters of the name are the hex GPE Number */
321
322 gpe_number = ACPI_STRTOUL(&name[2], NULL, 16);
323 if (gpe_number == ACPI_UINT32_MAX) {
324
325 /* Conversion failed; invalid method, just ignore it */
326
327 ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
328 "Could not extract GPE number from name: %s "
329 "(name is not of form _Lxx or _Exx)", name));
330 return_ACPI_STATUS(AE_OK);
331 }
332
333 /* Ensure that we have a valid GPE number for this GPE block */
334
335 gpe_event_info =
336 acpi_ev_low_get_gpe_info(gpe_number, walk_info->gpe_block);
337 if (!gpe_event_info) {
338 /*
339 * This gpe_number is not valid for this GPE block, just ignore it.
340 * However, it may be valid for a different GPE block, since GPE0
341 * and GPE1 methods both appear under \_GPE.
342 */
343 return_ACPI_STATUS(AE_OK);
344 }
345
346 if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
347 ACPI_GPE_DISPATCH_HANDLER) {
348
349 /* If there is already a handler, ignore this GPE method */
350
351 return_ACPI_STATUS(AE_OK);
352 }
353
354 if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
355 ACPI_GPE_DISPATCH_METHOD) {
356 /*
357 * If there is already a method, ignore this method. But check
358 * for a type mismatch (if both the _Lxx AND _Exx exist)
359 */
360 if (type != (gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK)) {
361 ACPI_ERROR((AE_INFO,
362 "For GPE 0x%.2X, found both _L%2.2X and _E%2.2X methods",
363 gpe_number, gpe_number, gpe_number));
364 }
365 return_ACPI_STATUS(AE_OK);
366 }
367
368 /*
369 * Add the GPE information from above to the gpe_event_info block for
370 * use during dispatch of this GPE.
371 */
372 gpe_event_info->flags |= (u8)(type | ACPI_GPE_DISPATCH_METHOD);
373 gpe_event_info->dispatch.method_node = method_node;
374
375 /*
376 * Enable this GPE if requested. This only happens when during the
377 * execution of a Load or load_table operator. We have found a new
378 * GPE method and want to immediately enable the GPE if it is a
379 * runtime GPE.
380 */
381 if (walk_info->enable_this_gpe) {
382
383 /* Ignore GPEs that can wake the system */
384
385 if (!(gpe_event_info->flags & ACPI_GPE_CAN_WAKE) ||
386 !acpi_gbl_leave_wake_gpes_disabled) {
387 walk_info->count++;
388 gpe_device = walk_info->gpe_device;
389
390 if (gpe_device == acpi_gbl_fadt_gpe_device) {
391 gpe_device = NULL;
392 }
393
394 status = acpi_enable_gpe(gpe_device, gpe_number,
395 ACPI_GPE_TYPE_RUNTIME);
396 if (ACPI_FAILURE(status)) {
397 ACPI_EXCEPTION((AE_INFO, status,
398 "Could not enable GPE 0x%02X",
399 gpe_number));
400 }
401 }
402 }
403
404 ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
405 "Registered GPE method %s as GPE number 0x%.2X\n",
406 name, gpe_number));
407 return_ACPI_STATUS(AE_OK);
408}
409
410/*******************************************************************************
411 *
412 * FUNCTION: acpi_ev_match_prw_and_gpe
413 *
414 * PARAMETERS: Callback from walk_namespace
415 *
416 * RETURN: Status. NOTE: We ignore errors so that the _PRW walk is
417 * not aborted on a single _PRW failure.
418 *
419 * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
420 * Device. Run the _PRW method. If present, extract the GPE
421 * number and mark the GPE as a CAN_WAKE GPE. Allows a
422 * per-owner_id execution if execute_by_owner_id is TRUE in the
423 * walk_info parameter block.
424 *
425 * If walk_info->execute_by_owner_id is TRUE, we only execute _PRWs with that
426 * owner.
427 * If walk_info->gpe_device is NULL, we execute every _PRW found. Otherwise,
428 * we only execute _PRWs that refer to the input gpe_device.
429 *
430 ******************************************************************************/
431
432static acpi_status
433acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
434 u32 level, void *context, void **return_value)
435{
436 struct acpi_gpe_walk_info *walk_info =
437 ACPI_CAST_PTR(struct acpi_gpe_walk_info, context);
438 struct acpi_namespace_node *gpe_device;
439 struct acpi_gpe_block_info *gpe_block;
440 struct acpi_namespace_node *target_gpe_device;
441 struct acpi_namespace_node *prw_node;
442 struct acpi_gpe_event_info *gpe_event_info;
443 union acpi_operand_object *pkg_desc;
444 union acpi_operand_object *obj_desc;
445 u32 gpe_number;
446 acpi_status status;
447
448 ACPI_FUNCTION_TRACE(ev_match_prw_and_gpe);
449
450 /* Check for a _PRW method under this device */
451
452 status = acpi_ns_get_node(obj_handle, METHOD_NAME__PRW,
453 ACPI_NS_NO_UPSEARCH, &prw_node);
454 if (ACPI_FAILURE(status)) {
455 return_ACPI_STATUS(AE_OK);
456 }
457
458 /* Check if requested owner_id matches this owner_id */
459
460 if ((walk_info->execute_by_owner_id) &&
461 (prw_node->owner_id != walk_info->owner_id)) {
462 return_ACPI_STATUS(AE_OK);
463 }
464
465 /* Execute the _PRW */
466
467 status = acpi_ut_evaluate_object(prw_node, NULL,
468 ACPI_BTYPE_PACKAGE, &pkg_desc);
469 if (ACPI_FAILURE(status)) {
470 return_ACPI_STATUS(AE_OK);
471 }
472
473 /* The returned _PRW package must have at least two elements */
474
475 if (pkg_desc->package.count < 2) {
476 goto cleanup;
477 }
478
479 /* Extract pointers from the input context */
480
481 gpe_device = walk_info->gpe_device;
482 gpe_block = walk_info->gpe_block;
483
484 /*
485 * The _PRW object must return a package, we are only interested
486 * in the first element
487 */
488 obj_desc = pkg_desc->package.elements[0];
489
490 if (obj_desc->common.type == ACPI_TYPE_INTEGER) {
491
492 /* Use FADT-defined GPE device (from definition of _PRW) */
493
494 target_gpe_device = NULL;
495 if (gpe_device) {
496 target_gpe_device = acpi_gbl_fadt_gpe_device;
497 }
498
499 /* Integer is the GPE number in the FADT described GPE blocks */
500
501 gpe_number = (u32) obj_desc->integer.value;
502 } else if (obj_desc->common.type == ACPI_TYPE_PACKAGE) {
503
504 /* Package contains a GPE reference and GPE number within a GPE block */
505
506 if ((obj_desc->package.count < 2) ||
507 ((obj_desc->package.elements[0])->common.type !=
508 ACPI_TYPE_LOCAL_REFERENCE) ||
509 ((obj_desc->package.elements[1])->common.type !=
510 ACPI_TYPE_INTEGER)) {
511 goto cleanup;
512 }
513
514 /* Get GPE block reference and decode */
515
516 target_gpe_device =
517 obj_desc->package.elements[0]->reference.node;
518 gpe_number = (u32) obj_desc->package.elements[1]->integer.value;
519 } else {
520 /* Unknown type, just ignore it */
521
522 goto cleanup;
523 }
524
525 /* Get the gpe_event_info for this GPE */
526
527 if (gpe_device) {
528 /*
529 * Is this GPE within this block?
530 *
531 * TRUE if and only if these conditions are true:
532 * 1) The GPE devices match.
533 * 2) The GPE index(number) is within the range of the Gpe Block
534 * associated with the GPE device.
535 */
536 if (gpe_device != target_gpe_device) {
537 goto cleanup;
538 }
539
540 gpe_event_info =
541 acpi_ev_low_get_gpe_info(gpe_number, gpe_block);
542 } else {
543 /* gpe_device is NULL, just match the target_device and gpe_number */
544
545 gpe_event_info =
546 acpi_ev_get_gpe_event_info(target_gpe_device, gpe_number);
547 }
548
549 if (gpe_event_info) {
550 if (!(gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
551
552 /* This GPE can wake the system */
553
554 gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
555 walk_info->count++;
556 }
557 }
558
559 cleanup:
560 acpi_ut_remove_reference(pkg_desc);
561 return_ACPI_STATUS(AE_OK);
562}
563
564/*******************************************************************************
565 *
566 * FUNCTION: acpi_ev_get_gpe_xrupt_block
567 *
568 * PARAMETERS: interrupt_number - Interrupt for a GPE block
569 *
570 * RETURN: A GPE interrupt block
571 *
572 * DESCRIPTION: Get or Create a GPE interrupt block. There is one interrupt
573 * block per unique interrupt level used for GPEs. Should be
574 * called only when the GPE lists are semaphore locked and not
575 * subject to change.
576 *
577 ******************************************************************************/
578
579static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32
580 interrupt_number)
581{
582 struct acpi_gpe_xrupt_info *next_gpe_xrupt;
583 struct acpi_gpe_xrupt_info *gpe_xrupt;
584 acpi_status status;
585 acpi_cpu_flags flags;
586
587 ACPI_FUNCTION_TRACE(ev_get_gpe_xrupt_block);
588
589 /* No need for lock since we are not changing any list elements here */
590
591 next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
592 while (next_gpe_xrupt) {
593 if (next_gpe_xrupt->interrupt_number == interrupt_number) {
594 return_PTR(next_gpe_xrupt);
595 }
596
597 next_gpe_xrupt = next_gpe_xrupt->next;
598 }
599
600 /* Not found, must allocate a new xrupt descriptor */
601
602 gpe_xrupt = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_xrupt_info));
603 if (!gpe_xrupt) {
604 return_PTR(NULL);
605 }
606
607 gpe_xrupt->interrupt_number = interrupt_number;
608
609 /* Install new interrupt descriptor with spin lock */
610
611 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
612 if (acpi_gbl_gpe_xrupt_list_head) {
613 next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
614 while (next_gpe_xrupt->next) {
615 next_gpe_xrupt = next_gpe_xrupt->next;
616 }
617
618 next_gpe_xrupt->next = gpe_xrupt;
619 gpe_xrupt->previous = next_gpe_xrupt;
620 } else {
621 acpi_gbl_gpe_xrupt_list_head = gpe_xrupt;
622 }
623 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
624
625 /* Install new interrupt handler if not SCI_INT */
626
627 if (interrupt_number != acpi_gbl_FADT.sci_interrupt) {
628 status = acpi_os_install_interrupt_handler(interrupt_number,
629 acpi_ev_gpe_xrupt_handler,
630 gpe_xrupt);
631 if (ACPI_FAILURE(status)) {
632 ACPI_ERROR((AE_INFO,
633 "Could not install GPE interrupt handler at level 0x%X",
634 interrupt_number));
635 return_PTR(NULL);
636 }
637 }
638
639 return_PTR(gpe_xrupt);
640}
641
642/*******************************************************************************
643 *
644 * FUNCTION: acpi_ev_delete_gpe_xrupt
645 *
646 * PARAMETERS: gpe_xrupt - A GPE interrupt info block
647 *
648 * RETURN: Status
649 *
650 * DESCRIPTION: Remove and free a gpe_xrupt block. Remove an associated
651 * interrupt handler if not the SCI interrupt.
652 *
653 ******************************************************************************/
654
655static acpi_status
656acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
657{
658 acpi_status status;
659 acpi_cpu_flags flags;
660
661 ACPI_FUNCTION_TRACE(ev_delete_gpe_xrupt);
662
663 /* We never want to remove the SCI interrupt handler */
664
665 if (gpe_xrupt->interrupt_number == acpi_gbl_FADT.sci_interrupt) {
666 gpe_xrupt->gpe_block_list_head = NULL;
667 return_ACPI_STATUS(AE_OK);
668 }
669
670 /* Disable this interrupt */
671
672 status =
673 acpi_os_remove_interrupt_handler(gpe_xrupt->interrupt_number,
674 acpi_ev_gpe_xrupt_handler);
675 if (ACPI_FAILURE(status)) {
676 return_ACPI_STATUS(status);
677 }
678
679 /* Unlink the interrupt block with lock */
680
681 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
682 if (gpe_xrupt->previous) {
683 gpe_xrupt->previous->next = gpe_xrupt->next;
684 } else {
685 /* No previous, update list head */
686
687 acpi_gbl_gpe_xrupt_list_head = gpe_xrupt->next;
688 }
689
690 if (gpe_xrupt->next) {
691 gpe_xrupt->next->previous = gpe_xrupt->previous;
692 }
693 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
694
695 /* Free the block */
696
697 ACPI_FREE(gpe_xrupt);
698 return_ACPI_STATUS(AE_OK);
699}
700
701/*******************************************************************************
702 *
703 * FUNCTION: acpi_ev_install_gpe_block 62 * FUNCTION: acpi_ev_install_gpe_block
704 * 63 *
705 * PARAMETERS: gpe_block - New GPE block 64 * PARAMETERS: gpe_block - New GPE block
@@ -1060,123 +419,6 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
1060 419
1061/******************************************************************************* 420/*******************************************************************************
1062 * 421 *
1063 * FUNCTION: acpi_ev_update_gpes
1064 *
1065 * PARAMETERS: table_owner_id - ID of the newly-loaded ACPI table
1066 *
1067 * RETURN: None
1068 *
1069 * DESCRIPTION: Check for new GPE methods (_Lxx/_Exx) made available as a
1070 * result of a Load() or load_table() operation. If new GPE
1071 * methods have been installed, register the new methods and
1072 * enable and runtime GPEs that are associated with them. Also,
1073 * run any newly loaded _PRW methods in order to discover any
1074 * new CAN_WAKE GPEs.
1075 *
1076 ******************************************************************************/
1077
1078void acpi_ev_update_gpes(acpi_owner_id table_owner_id)
1079{
1080 struct acpi_gpe_xrupt_info *gpe_xrupt_info;
1081 struct acpi_gpe_block_info *gpe_block;
1082 struct acpi_gpe_walk_info walk_info;
1083 acpi_status status = AE_OK;
1084 u32 new_wake_gpe_count = 0;
1085
1086 /* We will examine only _PRW/_Lxx/_Exx methods owned by this table */
1087
1088 walk_info.owner_id = table_owner_id;
1089 walk_info.execute_by_owner_id = TRUE;
1090 walk_info.count = 0;
1091
1092 if (acpi_gbl_leave_wake_gpes_disabled) {
1093 /*
1094 * 1) Run any newly-loaded _PRW methods to find any GPEs that
1095 * can now be marked as CAN_WAKE GPEs. Note: We must run the
1096 * _PRW methods before we process the _Lxx/_Exx methods because
1097 * we will enable all runtime GPEs associated with the new
1098 * _Lxx/_Exx methods at the time we process those methods.
1099 *
1100 * Unlock interpreter so that we can run the _PRW methods.
1101 */
1102 walk_info.gpe_block = NULL;
1103 walk_info.gpe_device = NULL;
1104
1105 acpi_ex_exit_interpreter();
1106
1107 status =
1108 acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
1109 ACPI_UINT32_MAX,
1110 ACPI_NS_WALK_NO_UNLOCK,
1111 acpi_ev_match_prw_and_gpe, NULL,
1112 &walk_info, NULL);
1113 if (ACPI_FAILURE(status)) {
1114 ACPI_EXCEPTION((AE_INFO, status,
1115 "While executing _PRW methods"));
1116 }
1117
1118 acpi_ex_enter_interpreter();
1119 new_wake_gpe_count = walk_info.count;
1120 }
1121
1122 /*
1123 * 2) Find any _Lxx/_Exx GPE methods that have just been loaded.
1124 *
1125 * Any GPEs that correspond to new _Lxx/_Exx methods and are not
1126 * marked as CAN_WAKE are immediately enabled.
1127 *
1128 * Examine the namespace underneath each gpe_device within the
1129 * gpe_block lists.
1130 */
1131 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
1132 if (ACPI_FAILURE(status)) {
1133 return;
1134 }
1135
1136 walk_info.count = 0;
1137 walk_info.enable_this_gpe = TRUE;
1138
1139 /* Walk the interrupt level descriptor list */
1140
1141 gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
1142 while (gpe_xrupt_info) {
1143
1144 /* Walk all Gpe Blocks attached to this interrupt level */
1145
1146 gpe_block = gpe_xrupt_info->gpe_block_list_head;
1147 while (gpe_block) {
1148 walk_info.gpe_block = gpe_block;
1149 walk_info.gpe_device = gpe_block->node;
1150
1151 status = acpi_ns_walk_namespace(ACPI_TYPE_METHOD,
1152 walk_info.gpe_device,
1153 ACPI_UINT32_MAX,
1154 ACPI_NS_WALK_NO_UNLOCK,
1155 acpi_ev_match_gpe_method,
1156 NULL, &walk_info, NULL);
1157 if (ACPI_FAILURE(status)) {
1158 ACPI_EXCEPTION((AE_INFO, status,
1159 "While decoding _Lxx/_Exx methods"));
1160 }
1161
1162 gpe_block = gpe_block->next;
1163 }
1164
1165 gpe_xrupt_info = gpe_xrupt_info->next;
1166 }
1167
1168 if (walk_info.count || new_wake_gpe_count) {
1169 ACPI_INFO((AE_INFO,
1170 "Enabled %u new runtime GPEs, added %u new wakeup GPEs",
1171 walk_info.count, new_wake_gpe_count));
1172 }
1173
1174 (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
1175 return;
1176}
1177
1178/*******************************************************************************
1179 *
1180 * FUNCTION: acpi_ev_initialize_gpe_block 422 * FUNCTION: acpi_ev_initialize_gpe_block
1181 * 423 *
1182 * PARAMETERS: gpe_device - Handle to the parent GPE block 424 * PARAMETERS: gpe_device - Handle to the parent GPE block
@@ -1296,154 +538,3 @@ acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
1296 538
1297 return_ACPI_STATUS(AE_OK); 539 return_ACPI_STATUS(AE_OK);
1298} 540}
1299
1300/*******************************************************************************
1301 *
1302 * FUNCTION: acpi_ev_gpe_initialize
1303 *
1304 * PARAMETERS: None
1305 *
1306 * RETURN: Status
1307 *
1308 * DESCRIPTION: Initialize the GPE data structures
1309 *
1310 ******************************************************************************/
1311
1312acpi_status acpi_ev_gpe_initialize(void)
1313{
1314 u32 register_count0 = 0;
1315 u32 register_count1 = 0;
1316 u32 gpe_number_max = 0;
1317 acpi_status status;
1318
1319 ACPI_FUNCTION_TRACE(ev_gpe_initialize);
1320
1321 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
1322 if (ACPI_FAILURE(status)) {
1323 return_ACPI_STATUS(status);
1324 }
1325
1326 /*
1327 * Initialize the GPE Block(s) defined in the FADT
1328 *
1329 * Why the GPE register block lengths are divided by 2: From the ACPI
1330 * Spec, section "General-Purpose Event Registers", we have:
1331 *
1332 * "Each register block contains two registers of equal length
1333 * GPEx_STS and GPEx_EN (where x is 0 or 1). The length of the
1334 * GPE0_STS and GPE0_EN registers is equal to half the GPE0_LEN
1335 * The length of the GPE1_STS and GPE1_EN registers is equal to
1336 * half the GPE1_LEN. If a generic register block is not supported
1337 * then its respective block pointer and block length values in the
1338 * FADT table contain zeros. The GPE0_LEN and GPE1_LEN do not need
1339 * to be the same size."
1340 */
1341
1342 /*
1343 * Determine the maximum GPE number for this machine.
1344 *
1345 * Note: both GPE0 and GPE1 are optional, and either can exist without
1346 * the other.
1347 *
1348 * If EITHER the register length OR the block address are zero, then that
1349 * particular block is not supported.
1350 */
1351 if (acpi_gbl_FADT.gpe0_block_length &&
1352 acpi_gbl_FADT.xgpe0_block.address) {
1353
1354 /* GPE block 0 exists (has both length and address > 0) */
1355
1356 register_count0 = (u16) (acpi_gbl_FADT.gpe0_block_length / 2);
1357
1358 gpe_number_max =
1359 (register_count0 * ACPI_GPE_REGISTER_WIDTH) - 1;
1360
1361 /* Install GPE Block 0 */
1362
1363 status = acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
1364 &acpi_gbl_FADT.xgpe0_block,
1365 register_count0, 0,
1366 acpi_gbl_FADT.sci_interrupt,
1367 &acpi_gbl_gpe_fadt_blocks[0]);
1368
1369 if (ACPI_FAILURE(status)) {
1370 ACPI_EXCEPTION((AE_INFO, status,
1371 "Could not create GPE Block 0"));
1372 }
1373 }
1374
1375 if (acpi_gbl_FADT.gpe1_block_length &&
1376 acpi_gbl_FADT.xgpe1_block.address) {
1377
1378 /* GPE block 1 exists (has both length and address > 0) */
1379
1380 register_count1 = (u16) (acpi_gbl_FADT.gpe1_block_length / 2);
1381
1382 /* Check for GPE0/GPE1 overlap (if both banks exist) */
1383
1384 if ((register_count0) &&
1385 (gpe_number_max >= acpi_gbl_FADT.gpe1_base)) {
1386 ACPI_ERROR((AE_INFO,
1387 "GPE0 block (GPE 0 to %u) overlaps the GPE1 block "
1388 "(GPE %u to %u) - Ignoring GPE1",
1389 gpe_number_max, acpi_gbl_FADT.gpe1_base,
1390 acpi_gbl_FADT.gpe1_base +
1391 ((register_count1 *
1392 ACPI_GPE_REGISTER_WIDTH) - 1)));
1393
1394 /* Ignore GPE1 block by setting the register count to zero */
1395
1396 register_count1 = 0;
1397 } else {
1398 /* Install GPE Block 1 */
1399
1400 status =
1401 acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
1402 &acpi_gbl_FADT.xgpe1_block,
1403 register_count1,
1404 acpi_gbl_FADT.gpe1_base,
1405 acpi_gbl_FADT.
1406 sci_interrupt,
1407 &acpi_gbl_gpe_fadt_blocks
1408 [1]);
1409
1410 if (ACPI_FAILURE(status)) {
1411 ACPI_EXCEPTION((AE_INFO, status,
1412 "Could not create GPE Block 1"));
1413 }
1414
1415 /*
1416 * GPE0 and GPE1 do not have to be contiguous in the GPE number
1417 * space. However, GPE0 always starts at GPE number zero.
1418 */
1419 gpe_number_max = acpi_gbl_FADT.gpe1_base +
1420 ((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1);
1421 }
1422 }
1423
1424 /* Exit if there are no GPE registers */
1425
1426 if ((register_count0 + register_count1) == 0) {
1427
1428 /* GPEs are not required by ACPI, this is OK */
1429
1430 ACPI_DEBUG_PRINT((ACPI_DB_INIT,
1431 "There are no GPE blocks defined in the FADT\n"));
1432 status = AE_OK;
1433 goto cleanup;
1434 }
1435
1436 /* Check for Max GPE number out-of-range */
1437
1438 if (gpe_number_max > ACPI_GPE_MAX) {
1439 ACPI_ERROR((AE_INFO,
1440 "Maximum GPE number from FADT is too large: 0x%X",
1441 gpe_number_max));
1442 status = AE_BAD_VALUE;
1443 goto cleanup;
1444 }
1445
1446 cleanup:
1447 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
1448 return_ACPI_STATUS(AE_OK);
1449}