diff options
Diffstat (limited to 'drivers/acpi/events')
-rw-r--r-- | drivers/acpi/events/Makefile | 9 | ||||
-rw-r--r-- | drivers/acpi/events/evevent.c | 313 | ||||
-rw-r--r-- | drivers/acpi/events/evgpe.c | 722 | ||||
-rw-r--r-- | drivers/acpi/events/evgpeblk.c | 1227 | ||||
-rw-r--r-- | drivers/acpi/events/evmisc.c | 621 | ||||
-rw-r--r-- | drivers/acpi/events/evregion.c | 1070 | ||||
-rw-r--r-- | drivers/acpi/events/evrgnini.c | 684 | ||||
-rw-r--r-- | drivers/acpi/events/evsci.c | 183 | ||||
-rw-r--r-- | drivers/acpi/events/evxface.c | 821 | ||||
-rw-r--r-- | drivers/acpi/events/evxfevnt.c | 871 | ||||
-rw-r--r-- | drivers/acpi/events/evxfregn.c | 254 |
11 files changed, 0 insertions, 6775 deletions
diff --git a/drivers/acpi/events/Makefile b/drivers/acpi/events/Makefile deleted file mode 100644 index d29f2ee449cc..000000000000 --- a/drivers/acpi/events/Makefile +++ /dev/null | |||
@@ -1,9 +0,0 @@ | |||
1 | # | ||
2 | # Makefile for all Linux ACPI interpreter subdirectories | ||
3 | # | ||
4 | |||
5 | obj-y := evevent.o evregion.o evsci.o evxfevnt.o \ | ||
6 | evmisc.o evrgnini.o evxface.o evxfregn.o \ | ||
7 | evgpe.o evgpeblk.o | ||
8 | |||
9 | EXTRA_CFLAGS += $(ACPI_CFLAGS) | ||
diff --git a/drivers/acpi/events/evevent.c b/drivers/acpi/events/evevent.c deleted file mode 100644 index 86bf08365dad..000000000000 --- a/drivers/acpi/events/evevent.c +++ /dev/null | |||
@@ -1,313 +0,0 @@ | |||
1 | /****************************************************************************** | ||
2 | * | ||
3 | * Module Name: evevent - Fixed Event handling and dispatch | ||
4 | * | ||
5 | *****************************************************************************/ | ||
6 | |||
7 | /* | ||
8 | * Copyright (C) 2000 - 2008, Intel Corp. | ||
9 | * All rights reserved. | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or without | ||
12 | * modification, are permitted provided that the following conditions | ||
13 | * are met: | ||
14 | * 1. Redistributions of source code must retain the above copyright | ||
15 | * notice, this list of conditions, and the following disclaimer, | ||
16 | * without modification. | ||
17 | * 2. Redistributions in binary form must reproduce at minimum a disclaimer | ||
18 | * substantially similar to the "NO WARRANTY" disclaimer below | ||
19 | * ("Disclaimer") and any redistribution must be conditioned upon | ||
20 | * including a substantially similar Disclaimer requirement for further | ||
21 | * binary redistribution. | ||
22 | * 3. Neither the names of the above-listed copyright holders nor the names | ||
23 | * of any contributors may be used to endorse or promote products derived | ||
24 | * from this software without specific prior written permission. | ||
25 | * | ||
26 | * Alternatively, this software may be distributed under the terms of the | ||
27 | * GNU General Public License ("GPL") version 2 as published by the Free | ||
28 | * Software Foundation. | ||
29 | * | ||
30 | * NO WARRANTY | ||
31 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
32 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
33 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR | ||
34 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
35 | * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
36 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | ||
37 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | ||
38 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, | ||
39 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING | ||
40 | * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
41 | * POSSIBILITY OF SUCH DAMAGES. | ||
42 | */ | ||
43 | |||
44 | #include <acpi/acpi.h> | ||
45 | #include <acpi/accommon.h> | ||
46 | #include <acpi/acevents.h> | ||
47 | |||
48 | #define _COMPONENT ACPI_EVENTS | ||
49 | ACPI_MODULE_NAME("evevent") | ||
50 | |||
51 | /* Local prototypes */ | ||
52 | static acpi_status acpi_ev_fixed_event_initialize(void); | ||
53 | |||
54 | static u32 acpi_ev_fixed_event_dispatch(u32 event); | ||
55 | |||
56 | /******************************************************************************* | ||
57 | * | ||
58 | * FUNCTION: acpi_ev_initialize_events | ||
59 | * | ||
60 | * PARAMETERS: None | ||
61 | * | ||
62 | * RETURN: Status | ||
63 | * | ||
64 | * DESCRIPTION: Initialize global data structures for ACPI events (Fixed, GPE) | ||
65 | * | ||
66 | ******************************************************************************/ | ||
67 | |||
68 | acpi_status acpi_ev_initialize_events(void) | ||
69 | { | ||
70 | acpi_status status; | ||
71 | |||
72 | ACPI_FUNCTION_TRACE(ev_initialize_events); | ||
73 | |||
74 | /* | ||
75 | * Initialize the Fixed and General Purpose Events. This is done prior to | ||
76 | * enabling SCIs to prevent interrupts from occurring before the handlers | ||
77 | * are installed. | ||
78 | */ | ||
79 | status = acpi_ev_fixed_event_initialize(); | ||
80 | if (ACPI_FAILURE(status)) { | ||
81 | ACPI_EXCEPTION((AE_INFO, status, | ||
82 | "Unable to initialize fixed events")); | ||
83 | return_ACPI_STATUS(status); | ||
84 | } | ||
85 | |||
86 | status = acpi_ev_gpe_initialize(); | ||
87 | if (ACPI_FAILURE(status)) { | ||
88 | ACPI_EXCEPTION((AE_INFO, status, | ||
89 | "Unable to initialize general purpose events")); | ||
90 | return_ACPI_STATUS(status); | ||
91 | } | ||
92 | |||
93 | return_ACPI_STATUS(status); | ||
94 | } | ||
95 | |||
96 | /******************************************************************************* | ||
97 | * | ||
98 | * FUNCTION: acpi_ev_install_fadt_gpes | ||
99 | * | ||
100 | * PARAMETERS: None | ||
101 | * | ||
102 | * RETURN: Status | ||
103 | * | ||
104 | * DESCRIPTION: Completes initialization of the FADT-defined GPE blocks | ||
105 | * (0 and 1). This causes the _PRW methods to be run, so the HW | ||
106 | * must be fully initialized at this point, including global lock | ||
107 | * support. | ||
108 | * | ||
109 | ******************************************************************************/ | ||
110 | |||
111 | acpi_status acpi_ev_install_fadt_gpes(void) | ||
112 | { | ||
113 | acpi_status status; | ||
114 | |||
115 | ACPI_FUNCTION_TRACE(ev_install_fadt_gpes); | ||
116 | |||
117 | /* Namespace must be locked */ | ||
118 | |||
119 | status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); | ||
120 | if (ACPI_FAILURE(status)) { | ||
121 | return (status); | ||
122 | } | ||
123 | |||
124 | /* FADT GPE Block 0 */ | ||
125 | |||
126 | (void)acpi_ev_initialize_gpe_block(acpi_gbl_fadt_gpe_device, | ||
127 | acpi_gbl_gpe_fadt_blocks[0]); | ||
128 | |||
129 | /* FADT GPE Block 1 */ | ||
130 | |||
131 | (void)acpi_ev_initialize_gpe_block(acpi_gbl_fadt_gpe_device, | ||
132 | acpi_gbl_gpe_fadt_blocks[1]); | ||
133 | |||
134 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); | ||
135 | return_ACPI_STATUS(AE_OK); | ||
136 | } | ||
137 | |||
138 | /******************************************************************************* | ||
139 | * | ||
140 | * FUNCTION: acpi_ev_install_xrupt_handlers | ||
141 | * | ||
142 | * PARAMETERS: None | ||
143 | * | ||
144 | * RETURN: Status | ||
145 | * | ||
146 | * DESCRIPTION: Install interrupt handlers for the SCI and Global Lock | ||
147 | * | ||
148 | ******************************************************************************/ | ||
149 | |||
150 | acpi_status acpi_ev_install_xrupt_handlers(void) | ||
151 | { | ||
152 | acpi_status status; | ||
153 | |||
154 | ACPI_FUNCTION_TRACE(ev_install_xrupt_handlers); | ||
155 | |||
156 | /* Install the SCI handler */ | ||
157 | |||
158 | status = acpi_ev_install_sci_handler(); | ||
159 | if (ACPI_FAILURE(status)) { | ||
160 | ACPI_EXCEPTION((AE_INFO, status, | ||
161 | "Unable to install System Control Interrupt handler")); | ||
162 | return_ACPI_STATUS(status); | ||
163 | } | ||
164 | |||
165 | /* Install the handler for the Global Lock */ | ||
166 | |||
167 | status = acpi_ev_init_global_lock_handler(); | ||
168 | if (ACPI_FAILURE(status)) { | ||
169 | ACPI_EXCEPTION((AE_INFO, status, | ||
170 | "Unable to initialize Global Lock handler")); | ||
171 | return_ACPI_STATUS(status); | ||
172 | } | ||
173 | |||
174 | acpi_gbl_events_initialized = TRUE; | ||
175 | return_ACPI_STATUS(status); | ||
176 | } | ||
177 | |||
178 | /******************************************************************************* | ||
179 | * | ||
180 | * FUNCTION: acpi_ev_fixed_event_initialize | ||
181 | * | ||
182 | * PARAMETERS: None | ||
183 | * | ||
184 | * RETURN: Status | ||
185 | * | ||
186 | * DESCRIPTION: Install the fixed event handlers and enable the fixed events. | ||
187 | * | ||
188 | ******************************************************************************/ | ||
189 | |||
190 | static acpi_status acpi_ev_fixed_event_initialize(void) | ||
191 | { | ||
192 | u32 i; | ||
193 | acpi_status status; | ||
194 | |||
195 | /* | ||
196 | * Initialize the structure that keeps track of fixed event handlers and | ||
197 | * enable the fixed events. | ||
198 | */ | ||
199 | for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) { | ||
200 | acpi_gbl_fixed_event_handlers[i].handler = NULL; | ||
201 | acpi_gbl_fixed_event_handlers[i].context = NULL; | ||
202 | |||
203 | /* Enable the fixed event */ | ||
204 | |||
205 | if (acpi_gbl_fixed_event_info[i].enable_register_id != 0xFF) { | ||
206 | status = | ||
207 | acpi_set_register(acpi_gbl_fixed_event_info[i]. | ||
208 | enable_register_id, 0); | ||
209 | if (ACPI_FAILURE(status)) { | ||
210 | return (status); | ||
211 | } | ||
212 | } | ||
213 | } | ||
214 | |||
215 | return (AE_OK); | ||
216 | } | ||
217 | |||
218 | /******************************************************************************* | ||
219 | * | ||
220 | * FUNCTION: acpi_ev_fixed_event_detect | ||
221 | * | ||
222 | * PARAMETERS: None | ||
223 | * | ||
224 | * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED | ||
225 | * | ||
226 | * DESCRIPTION: Checks the PM status register for active fixed events | ||
227 | * | ||
228 | ******************************************************************************/ | ||
229 | |||
230 | u32 acpi_ev_fixed_event_detect(void) | ||
231 | { | ||
232 | u32 int_status = ACPI_INTERRUPT_NOT_HANDLED; | ||
233 | u32 fixed_status; | ||
234 | u32 fixed_enable; | ||
235 | u32 i; | ||
236 | |||
237 | ACPI_FUNCTION_NAME(ev_fixed_event_detect); | ||
238 | |||
239 | /* | ||
240 | * Read the fixed feature status and enable registers, as all the cases | ||
241 | * depend on their values. Ignore errors here. | ||
242 | */ | ||
243 | (void)acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS, &fixed_status); | ||
244 | (void)acpi_hw_register_read(ACPI_REGISTER_PM1_ENABLE, &fixed_enable); | ||
245 | |||
246 | ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS, | ||
247 | "Fixed Event Block: Enable %08X Status %08X\n", | ||
248 | fixed_enable, fixed_status)); | ||
249 | |||
250 | /* | ||
251 | * Check for all possible Fixed Events and dispatch those that are active | ||
252 | */ | ||
253 | for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) { | ||
254 | |||
255 | /* Both the status and enable bits must be on for this event */ | ||
256 | |||
257 | if ((fixed_status & acpi_gbl_fixed_event_info[i]. | ||
258 | status_bit_mask) | ||
259 | && (fixed_enable & acpi_gbl_fixed_event_info[i]. | ||
260 | enable_bit_mask)) { | ||
261 | |||
262 | /* Found an active (signalled) event */ | ||
263 | acpi_os_fixed_event_count(i); | ||
264 | int_status |= acpi_ev_fixed_event_dispatch(i); | ||
265 | } | ||
266 | } | ||
267 | |||
268 | return (int_status); | ||
269 | } | ||
270 | |||
271 | /******************************************************************************* | ||
272 | * | ||
273 | * FUNCTION: acpi_ev_fixed_event_dispatch | ||
274 | * | ||
275 | * PARAMETERS: Event - Event type | ||
276 | * | ||
277 | * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED | ||
278 | * | ||
279 | * DESCRIPTION: Clears the status bit for the requested event, calls the | ||
280 | * handler that previously registered for the event. | ||
281 | * | ||
282 | ******************************************************************************/ | ||
283 | |||
284 | static u32 acpi_ev_fixed_event_dispatch(u32 event) | ||
285 | { | ||
286 | |||
287 | ACPI_FUNCTION_ENTRY(); | ||
288 | |||
289 | /* Clear the status bit */ | ||
290 | |||
291 | (void)acpi_set_register(acpi_gbl_fixed_event_info[event]. | ||
292 | status_register_id, 1); | ||
293 | |||
294 | /* | ||
295 | * Make sure we've got a handler. If not, report an error. The event is | ||
296 | * disabled to prevent further interrupts. | ||
297 | */ | ||
298 | if (NULL == acpi_gbl_fixed_event_handlers[event].handler) { | ||
299 | (void)acpi_set_register(acpi_gbl_fixed_event_info[event]. | ||
300 | enable_register_id, 0); | ||
301 | |||
302 | ACPI_ERROR((AE_INFO, | ||
303 | "No installed handler for fixed event [%08X]", | ||
304 | event)); | ||
305 | |||
306 | return (ACPI_INTERRUPT_NOT_HANDLED); | ||
307 | } | ||
308 | |||
309 | /* Invoke the Fixed Event handler */ | ||
310 | |||
311 | return ((acpi_gbl_fixed_event_handlers[event]. | ||
312 | handler) (acpi_gbl_fixed_event_handlers[event].context)); | ||
313 | } | ||
diff --git a/drivers/acpi/events/evgpe.c b/drivers/acpi/events/evgpe.c deleted file mode 100644 index d9779608dbd7..000000000000 --- a/drivers/acpi/events/evgpe.c +++ /dev/null | |||
@@ -1,722 +0,0 @@ | |||
1 | /****************************************************************************** | ||
2 | * | ||
3 | * Module Name: evgpe - General Purpose Event handling and dispatch | ||
4 | * | ||
5 | *****************************************************************************/ | ||
6 | |||
7 | /* | ||
8 | * Copyright (C) 2000 - 2008, Intel Corp. | ||
9 | * All rights reserved. | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or without | ||
12 | * modification, are permitted provided that the following conditions | ||
13 | * are met: | ||
14 | * 1. Redistributions of source code must retain the above copyright | ||
15 | * notice, this list of conditions, and the following disclaimer, | ||
16 | * without modification. | ||
17 | * 2. Redistributions in binary form must reproduce at minimum a disclaimer | ||
18 | * substantially similar to the "NO WARRANTY" disclaimer below | ||
19 | * ("Disclaimer") and any redistribution must be conditioned upon | ||
20 | * including a substantially similar Disclaimer requirement for further | ||
21 | * binary redistribution. | ||
22 | * 3. Neither the names of the above-listed copyright holders nor the names | ||
23 | * of any contributors may be used to endorse or promote products derived | ||
24 | * from this software without specific prior written permission. | ||
25 | * | ||
26 | * Alternatively, this software may be distributed under the terms of the | ||
27 | * GNU General Public License ("GPL") version 2 as published by the Free | ||
28 | * Software Foundation. | ||
29 | * | ||
30 | * NO WARRANTY | ||
31 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
32 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
33 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR | ||
34 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
35 | * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
36 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | ||
37 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | ||
38 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, | ||
39 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING | ||
40 | * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
41 | * POSSIBILITY OF SUCH DAMAGES. | ||
42 | */ | ||
43 | |||
44 | #include <acpi/acpi.h> | ||
45 | #include <acpi/accommon.h> | ||
46 | #include <acpi/acevents.h> | ||
47 | #include <acpi/acnamesp.h> | ||
48 | |||
49 | #define _COMPONENT ACPI_EVENTS | ||
50 | ACPI_MODULE_NAME("evgpe") | ||
51 | |||
52 | /* Local prototypes */ | ||
53 | static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context); | ||
54 | |||
55 | /******************************************************************************* | ||
56 | * | ||
57 | * FUNCTION: acpi_ev_set_gpe_type | ||
58 | * | ||
59 | * PARAMETERS: gpe_event_info - GPE to set | ||
60 | * Type - New type | ||
61 | * | ||
62 | * RETURN: Status | ||
63 | * | ||
64 | * DESCRIPTION: Sets the new type for the GPE (wake, run, or wake/run) | ||
65 | * | ||
66 | ******************************************************************************/ | ||
67 | |||
68 | acpi_status | ||
69 | acpi_ev_set_gpe_type(struct acpi_gpe_event_info *gpe_event_info, u8 type) | ||
70 | { | ||
71 | acpi_status status; | ||
72 | |||
73 | ACPI_FUNCTION_TRACE(ev_set_gpe_type); | ||
74 | |||
75 | /* Validate type and update register enable masks */ | ||
76 | |||
77 | switch (type) { | ||
78 | case ACPI_GPE_TYPE_WAKE: | ||
79 | case ACPI_GPE_TYPE_RUNTIME: | ||
80 | case ACPI_GPE_TYPE_WAKE_RUN: | ||
81 | break; | ||
82 | |||
83 | default: | ||
84 | return_ACPI_STATUS(AE_BAD_PARAMETER); | ||
85 | } | ||
86 | |||
87 | /* Disable the GPE if currently enabled */ | ||
88 | |||
89 | status = acpi_ev_disable_gpe(gpe_event_info); | ||
90 | |||
91 | /* Type was validated above */ | ||
92 | |||
93 | gpe_event_info->flags &= ~ACPI_GPE_TYPE_MASK; /* Clear type bits */ | ||
94 | gpe_event_info->flags |= type; /* Insert type */ | ||
95 | return_ACPI_STATUS(status); | ||
96 | } | ||
97 | |||
98 | /******************************************************************************* | ||
99 | * | ||
100 | * FUNCTION: acpi_ev_update_gpe_enable_masks | ||
101 | * | ||
102 | * PARAMETERS: gpe_event_info - GPE to update | ||
103 | * Type - What to do: ACPI_GPE_DISABLE or | ||
104 | * ACPI_GPE_ENABLE | ||
105 | * | ||
106 | * RETURN: Status | ||
107 | * | ||
108 | * DESCRIPTION: Updates GPE register enable masks based on the GPE type | ||
109 | * | ||
110 | ******************************************************************************/ | ||
111 | |||
112 | acpi_status | ||
113 | acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info, | ||
114 | u8 type) | ||
115 | { | ||
116 | struct acpi_gpe_register_info *gpe_register_info; | ||
117 | u8 register_bit; | ||
118 | |||
119 | ACPI_FUNCTION_TRACE(ev_update_gpe_enable_masks); | ||
120 | |||
121 | gpe_register_info = gpe_event_info->register_info; | ||
122 | if (!gpe_register_info) { | ||
123 | return_ACPI_STATUS(AE_NOT_EXIST); | ||
124 | } | ||
125 | register_bit = (u8) | ||
126 | (1 << | ||
127 | (gpe_event_info->gpe_number - gpe_register_info->base_gpe_number)); | ||
128 | |||
129 | /* 1) Disable case. Simply clear all enable bits */ | ||
130 | |||
131 | if (type == ACPI_GPE_DISABLE) { | ||
132 | ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake, | ||
133 | register_bit); | ||
134 | ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit); | ||
135 | return_ACPI_STATUS(AE_OK); | ||
136 | } | ||
137 | |||
138 | /* 2) Enable case. Set/Clear the appropriate enable bits */ | ||
139 | |||
140 | switch (gpe_event_info->flags & ACPI_GPE_TYPE_MASK) { | ||
141 | case ACPI_GPE_TYPE_WAKE: | ||
142 | ACPI_SET_BIT(gpe_register_info->enable_for_wake, register_bit); | ||
143 | ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit); | ||
144 | break; | ||
145 | |||
146 | case ACPI_GPE_TYPE_RUNTIME: | ||
147 | ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake, | ||
148 | register_bit); | ||
149 | ACPI_SET_BIT(gpe_register_info->enable_for_run, register_bit); | ||
150 | break; | ||
151 | |||
152 | case ACPI_GPE_TYPE_WAKE_RUN: | ||
153 | ACPI_SET_BIT(gpe_register_info->enable_for_wake, register_bit); | ||
154 | ACPI_SET_BIT(gpe_register_info->enable_for_run, register_bit); | ||
155 | break; | ||
156 | |||
157 | default: | ||
158 | return_ACPI_STATUS(AE_BAD_PARAMETER); | ||
159 | } | ||
160 | |||
161 | return_ACPI_STATUS(AE_OK); | ||
162 | } | ||
163 | |||
164 | /******************************************************************************* | ||
165 | * | ||
166 | * FUNCTION: acpi_ev_enable_gpe | ||
167 | * | ||
168 | * PARAMETERS: gpe_event_info - GPE to enable | ||
169 | * write_to_hardware - Enable now, or just mark data structs | ||
170 | * (WAKE GPEs should be deferred) | ||
171 | * | ||
172 | * RETURN: Status | ||
173 | * | ||
174 | * DESCRIPTION: Enable a GPE based on the GPE type | ||
175 | * | ||
176 | ******************************************************************************/ | ||
177 | |||
178 | acpi_status | ||
179 | acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info, | ||
180 | u8 write_to_hardware) | ||
181 | { | ||
182 | acpi_status status; | ||
183 | |||
184 | ACPI_FUNCTION_TRACE(ev_enable_gpe); | ||
185 | |||
186 | /* Make sure HW enable masks are updated */ | ||
187 | |||
188 | status = | ||
189 | acpi_ev_update_gpe_enable_masks(gpe_event_info, ACPI_GPE_ENABLE); | ||
190 | if (ACPI_FAILURE(status)) { | ||
191 | return_ACPI_STATUS(status); | ||
192 | } | ||
193 | |||
194 | /* Mark wake-enabled or HW enable, or both */ | ||
195 | |||
196 | switch (gpe_event_info->flags & ACPI_GPE_TYPE_MASK) { | ||
197 | case ACPI_GPE_TYPE_WAKE: | ||
198 | |||
199 | ACPI_SET_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED); | ||
200 | break; | ||
201 | |||
202 | case ACPI_GPE_TYPE_WAKE_RUN: | ||
203 | |||
204 | ACPI_SET_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED); | ||
205 | |||
206 | /*lint -fallthrough */ | ||
207 | |||
208 | case ACPI_GPE_TYPE_RUNTIME: | ||
209 | |||
210 | ACPI_SET_BIT(gpe_event_info->flags, ACPI_GPE_RUN_ENABLED); | ||
211 | |||
212 | if (write_to_hardware) { | ||
213 | |||
214 | /* Clear the GPE (of stale events), then enable it */ | ||
215 | |||
216 | status = acpi_hw_clear_gpe(gpe_event_info); | ||
217 | if (ACPI_FAILURE(status)) { | ||
218 | return_ACPI_STATUS(status); | ||
219 | } | ||
220 | |||
221 | /* Enable the requested runtime GPE */ | ||
222 | |||
223 | status = acpi_hw_write_gpe_enable_reg(gpe_event_info); | ||
224 | } | ||
225 | break; | ||
226 | |||
227 | default: | ||
228 | return_ACPI_STATUS(AE_BAD_PARAMETER); | ||
229 | } | ||
230 | |||
231 | return_ACPI_STATUS(AE_OK); | ||
232 | } | ||
233 | |||
234 | /******************************************************************************* | ||
235 | * | ||
236 | * FUNCTION: acpi_ev_disable_gpe | ||
237 | * | ||
238 | * PARAMETERS: gpe_event_info - GPE to disable | ||
239 | * | ||
240 | * RETURN: Status | ||
241 | * | ||
242 | * DESCRIPTION: Disable a GPE based on the GPE type | ||
243 | * | ||
244 | ******************************************************************************/ | ||
245 | |||
246 | acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info) | ||
247 | { | ||
248 | acpi_status status; | ||
249 | |||
250 | ACPI_FUNCTION_TRACE(ev_disable_gpe); | ||
251 | |||
252 | /* Make sure HW enable masks are updated */ | ||
253 | |||
254 | status = | ||
255 | acpi_ev_update_gpe_enable_masks(gpe_event_info, ACPI_GPE_DISABLE); | ||
256 | if (ACPI_FAILURE(status)) { | ||
257 | return_ACPI_STATUS(status); | ||
258 | } | ||
259 | |||
260 | /* Clear the appropriate enabled flags for this GPE */ | ||
261 | |||
262 | switch (gpe_event_info->flags & ACPI_GPE_TYPE_MASK) { | ||
263 | case ACPI_GPE_TYPE_WAKE: | ||
264 | ACPI_CLEAR_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED); | ||
265 | break; | ||
266 | |||
267 | case ACPI_GPE_TYPE_WAKE_RUN: | ||
268 | ACPI_CLEAR_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED); | ||
269 | |||
270 | /* fallthrough */ | ||
271 | |||
272 | case ACPI_GPE_TYPE_RUNTIME: | ||
273 | |||
274 | /* Disable the requested runtime GPE */ | ||
275 | |||
276 | ACPI_CLEAR_BIT(gpe_event_info->flags, ACPI_GPE_RUN_ENABLED); | ||
277 | break; | ||
278 | |||
279 | default: | ||
280 | break; | ||
281 | } | ||
282 | |||
283 | /* | ||
284 | * Even if we don't know the GPE type, make sure that we always | ||
285 | * disable it. low_disable_gpe will just clear the enable bit for this | ||
286 | * GPE and write it. It will not write out the current GPE enable mask, | ||
287 | * since this may inadvertently enable GPEs too early, if a rogue GPE has | ||
288 | * come in during ACPICA initialization - possibly as a result of AML or | ||
289 | * other code that has enabled the GPE. | ||
290 | */ | ||
291 | status = acpi_hw_low_disable_gpe(gpe_event_info); | ||
292 | return_ACPI_STATUS(status); | ||
293 | } | ||
294 | |||
295 | /******************************************************************************* | ||
296 | * | ||
297 | * FUNCTION: acpi_ev_get_gpe_event_info | ||
298 | * | ||
299 | * PARAMETERS: gpe_device - Device node. NULL for GPE0/GPE1 | ||
300 | * gpe_number - Raw GPE number | ||
301 | * | ||
302 | * RETURN: A GPE event_info struct. NULL if not a valid GPE | ||
303 | * | ||
304 | * DESCRIPTION: Returns the event_info struct associated with this GPE. | ||
305 | * Validates the gpe_block and the gpe_number | ||
306 | * | ||
307 | * Should be called only when the GPE lists are semaphore locked | ||
308 | * and not subject to change. | ||
309 | * | ||
310 | ******************************************************************************/ | ||
311 | |||
312 | struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device, | ||
313 | u32 gpe_number) | ||
314 | { | ||
315 | union acpi_operand_object *obj_desc; | ||
316 | struct acpi_gpe_block_info *gpe_block; | ||
317 | u32 i; | ||
318 | |||
319 | ACPI_FUNCTION_ENTRY(); | ||
320 | |||
321 | /* A NULL gpe_block means use the FADT-defined GPE block(s) */ | ||
322 | |||
323 | if (!gpe_device) { | ||
324 | |||
325 | /* Examine GPE Block 0 and 1 (These blocks are permanent) */ | ||
326 | |||
327 | for (i = 0; i < ACPI_MAX_GPE_BLOCKS; i++) { | ||
328 | gpe_block = acpi_gbl_gpe_fadt_blocks[i]; | ||
329 | if (gpe_block) { | ||
330 | if ((gpe_number >= gpe_block->block_base_number) | ||
331 | && (gpe_number < | ||
332 | gpe_block->block_base_number + | ||
333 | (gpe_block->register_count * 8))) { | ||
334 | return (&gpe_block-> | ||
335 | event_info[gpe_number - | ||
336 | gpe_block-> | ||
337 | block_base_number]); | ||
338 | } | ||
339 | } | ||
340 | } | ||
341 | |||
342 | /* The gpe_number was not in the range of either FADT GPE block */ | ||
343 | |||
344 | return (NULL); | ||
345 | } | ||
346 | |||
347 | /* A Non-NULL gpe_device means this is a GPE Block Device */ | ||
348 | |||
349 | obj_desc = acpi_ns_get_attached_object((struct acpi_namespace_node *) | ||
350 | gpe_device); | ||
351 | if (!obj_desc || !obj_desc->device.gpe_block) { | ||
352 | return (NULL); | ||
353 | } | ||
354 | |||
355 | gpe_block = obj_desc->device.gpe_block; | ||
356 | |||
357 | if ((gpe_number >= gpe_block->block_base_number) && | ||
358 | (gpe_number < | ||
359 | gpe_block->block_base_number + (gpe_block->register_count * 8))) { | ||
360 | return (&gpe_block-> | ||
361 | event_info[gpe_number - gpe_block->block_base_number]); | ||
362 | } | ||
363 | |||
364 | return (NULL); | ||
365 | } | ||
366 | |||
367 | /******************************************************************************* | ||
368 | * | ||
369 | * FUNCTION: acpi_ev_gpe_detect | ||
370 | * | ||
371 | * PARAMETERS: gpe_xrupt_list - Interrupt block for this interrupt. | ||
372 | * Can have multiple GPE blocks attached. | ||
373 | * | ||
374 | * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED | ||
375 | * | ||
376 | * DESCRIPTION: Detect if any GP events have occurred. This function is | ||
377 | * executed at interrupt level. | ||
378 | * | ||
379 | ******************************************************************************/ | ||
380 | |||
381 | u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) | ||
382 | { | ||
383 | acpi_status status; | ||
384 | struct acpi_gpe_block_info *gpe_block; | ||
385 | struct acpi_gpe_register_info *gpe_register_info; | ||
386 | u32 int_status = ACPI_INTERRUPT_NOT_HANDLED; | ||
387 | u8 enabled_status_byte; | ||
388 | u32 status_reg; | ||
389 | u32 enable_reg; | ||
390 | acpi_cpu_flags flags; | ||
391 | u32 i; | ||
392 | u32 j; | ||
393 | |||
394 | ACPI_FUNCTION_NAME(ev_gpe_detect); | ||
395 | |||
396 | /* Check for the case where there are no GPEs */ | ||
397 | |||
398 | if (!gpe_xrupt_list) { | ||
399 | return (int_status); | ||
400 | } | ||
401 | |||
402 | /* | ||
403 | * We need to obtain the GPE lock for both the data structs and registers | ||
404 | * Note: Not necessary to obtain the hardware lock, since the GPE | ||
405 | * registers are owned by the gpe_lock. | ||
406 | */ | ||
407 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | ||
408 | |||
409 | /* Examine all GPE blocks attached to this interrupt level */ | ||
410 | |||
411 | gpe_block = gpe_xrupt_list->gpe_block_list_head; | ||
412 | while (gpe_block) { | ||
413 | /* | ||
414 | * Read all of the 8-bit GPE status and enable registers in this GPE | ||
415 | * block, saving all of them. Find all currently active GP events. | ||
416 | */ | ||
417 | for (i = 0; i < gpe_block->register_count; i++) { | ||
418 | |||
419 | /* Get the next status/enable pair */ | ||
420 | |||
421 | gpe_register_info = &gpe_block->register_info[i]; | ||
422 | |||
423 | /* Read the Status Register */ | ||
424 | |||
425 | status = | ||
426 | acpi_read(&status_reg, | ||
427 | &gpe_register_info->status_address); | ||
428 | if (ACPI_FAILURE(status)) { | ||
429 | goto unlock_and_exit; | ||
430 | } | ||
431 | |||
432 | /* Read the Enable Register */ | ||
433 | |||
434 | status = | ||
435 | acpi_read(&enable_reg, | ||
436 | &gpe_register_info->enable_address); | ||
437 | if (ACPI_FAILURE(status)) { | ||
438 | goto unlock_and_exit; | ||
439 | } | ||
440 | |||
441 | ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS, | ||
442 | "Read GPE Register at GPE%X: Status=%02X, Enable=%02X\n", | ||
443 | gpe_register_info->base_gpe_number, | ||
444 | status_reg, enable_reg)); | ||
445 | |||
446 | /* Check if there is anything active at all in this register */ | ||
447 | |||
448 | enabled_status_byte = (u8) (status_reg & enable_reg); | ||
449 | if (!enabled_status_byte) { | ||
450 | |||
451 | /* No active GPEs in this register, move on */ | ||
452 | |||
453 | continue; | ||
454 | } | ||
455 | |||
456 | /* Now look at the individual GPEs in this byte register */ | ||
457 | |||
458 | for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) { | ||
459 | |||
460 | /* Examine one GPE bit */ | ||
461 | |||
462 | if (enabled_status_byte & (1 << j)) { | ||
463 | /* | ||
464 | * Found an active GPE. Dispatch the event to a handler | ||
465 | * or method. | ||
466 | */ | ||
467 | int_status |= | ||
468 | acpi_ev_gpe_dispatch(&gpe_block-> | ||
469 | event_info[((acpi_size) i * ACPI_GPE_REGISTER_WIDTH) + j], j + gpe_register_info->base_gpe_number); | ||
470 | } | ||
471 | } | ||
472 | } | ||
473 | |||
474 | gpe_block = gpe_block->next; | ||
475 | } | ||
476 | |||
477 | unlock_and_exit: | ||
478 | |||
479 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | ||
480 | return (int_status); | ||
481 | } | ||
482 | |||
483 | /******************************************************************************* | ||
484 | * | ||
485 | * FUNCTION: acpi_ev_asynch_execute_gpe_method | ||
486 | * | ||
487 | * PARAMETERS: Context (gpe_event_info) - Info for this GPE | ||
488 | * | ||
489 | * RETURN: None | ||
490 | * | ||
491 | * DESCRIPTION: Perform the actual execution of a GPE control method. This | ||
492 | * function is called from an invocation of acpi_os_execute and | ||
493 | * therefore does NOT execute at interrupt level - so that | ||
494 | * the control method itself is not executed in the context of | ||
495 | * an interrupt handler. | ||
496 | * | ||
497 | ******************************************************************************/ | ||
498 | static void acpi_ev_asynch_enable_gpe(void *context); | ||
499 | |||
500 | static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) | ||
501 | { | ||
502 | struct acpi_gpe_event_info *gpe_event_info = (void *)context; | ||
503 | acpi_status status; | ||
504 | struct acpi_gpe_event_info local_gpe_event_info; | ||
505 | struct acpi_evaluate_info *info; | ||
506 | |||
507 | ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method); | ||
508 | |||
509 | status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); | ||
510 | if (ACPI_FAILURE(status)) { | ||
511 | return_VOID; | ||
512 | } | ||
513 | |||
514 | /* Must revalidate the gpe_number/gpe_block */ | ||
515 | |||
516 | if (!acpi_ev_valid_gpe_event(gpe_event_info)) { | ||
517 | status = acpi_ut_release_mutex(ACPI_MTX_EVENTS); | ||
518 | return_VOID; | ||
519 | } | ||
520 | |||
521 | /* Set the GPE flags for return to enabled state */ | ||
522 | |||
523 | (void)acpi_ev_enable_gpe(gpe_event_info, FALSE); | ||
524 | |||
525 | /* | ||
526 | * Take a snapshot of the GPE info for this level - we copy the info to | ||
527 | * prevent a race condition with remove_handler/remove_block. | ||
528 | */ | ||
529 | ACPI_MEMCPY(&local_gpe_event_info, gpe_event_info, | ||
530 | sizeof(struct acpi_gpe_event_info)); | ||
531 | |||
532 | status = acpi_ut_release_mutex(ACPI_MTX_EVENTS); | ||
533 | if (ACPI_FAILURE(status)) { | ||
534 | return_VOID; | ||
535 | } | ||
536 | |||
537 | /* | ||
538 | * Must check for control method type dispatch one more time to avoid a | ||
539 | * race with ev_gpe_install_handler | ||
540 | */ | ||
541 | if ((local_gpe_event_info.flags & ACPI_GPE_DISPATCH_MASK) == | ||
542 | ACPI_GPE_DISPATCH_METHOD) { | ||
543 | |||
544 | /* Allocate the evaluation information block */ | ||
545 | |||
546 | info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info)); | ||
547 | if (!info) { | ||
548 | status = AE_NO_MEMORY; | ||
549 | } else { | ||
550 | /* | ||
551 | * Invoke the GPE Method (_Lxx, _Exx) i.e., evaluate the _Lxx/_Exx | ||
552 | * control method that corresponds to this GPE | ||
553 | */ | ||
554 | info->prefix_node = | ||
555 | local_gpe_event_info.dispatch.method_node; | ||
556 | info->flags = ACPI_IGNORE_RETURN_VALUE; | ||
557 | |||
558 | status = acpi_ns_evaluate(info); | ||
559 | ACPI_FREE(info); | ||
560 | } | ||
561 | |||
562 | if (ACPI_FAILURE(status)) { | ||
563 | ACPI_EXCEPTION((AE_INFO, status, | ||
564 | "while evaluating GPE method [%4.4s]", | ||
565 | acpi_ut_get_node_name | ||
566 | (local_gpe_event_info.dispatch. | ||
567 | method_node))); | ||
568 | } | ||
569 | } | ||
570 | /* Defer enabling of GPE until all notify handlers are done */ | ||
571 | acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_asynch_enable_gpe, | ||
572 | gpe_event_info); | ||
573 | return_VOID; | ||
574 | } | ||
575 | |||
576 | static void acpi_ev_asynch_enable_gpe(void *context) | ||
577 | { | ||
578 | struct acpi_gpe_event_info *gpe_event_info = context; | ||
579 | acpi_status status; | ||
580 | if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == | ||
581 | ACPI_GPE_LEVEL_TRIGGERED) { | ||
582 | /* | ||
583 | * GPE is level-triggered, we clear the GPE status bit after handling | ||
584 | * the event. | ||
585 | */ | ||
586 | status = acpi_hw_clear_gpe(gpe_event_info); | ||
587 | if (ACPI_FAILURE(status)) { | ||
588 | return_VOID; | ||
589 | } | ||
590 | } | ||
591 | |||
592 | /* Enable this GPE */ | ||
593 | (void)acpi_hw_write_gpe_enable_reg(gpe_event_info); | ||
594 | return_VOID; | ||
595 | } | ||
596 | |||
597 | /******************************************************************************* | ||
598 | * | ||
599 | * FUNCTION: acpi_ev_gpe_dispatch | ||
600 | * | ||
601 | * PARAMETERS: gpe_event_info - Info for this GPE | ||
602 | * gpe_number - Number relative to the parent GPE block | ||
603 | * | ||
604 | * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED | ||
605 | * | ||
606 | * DESCRIPTION: Dispatch a General Purpose Event to either a function (e.g. EC) | ||
607 | * or method (e.g. _Lxx/_Exx) handler. | ||
608 | * | ||
609 | * This function executes at interrupt level. | ||
610 | * | ||
611 | ******************************************************************************/ | ||
612 | |||
613 | u32 | ||
614 | acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) | ||
615 | { | ||
616 | acpi_status status; | ||
617 | |||
618 | ACPI_FUNCTION_TRACE(ev_gpe_dispatch); | ||
619 | |||
620 | acpi_os_gpe_count(gpe_number); | ||
621 | |||
622 | /* | ||
623 | * If edge-triggered, clear the GPE status bit now. Note that | ||
624 | * level-triggered events are cleared after the GPE is serviced. | ||
625 | */ | ||
626 | if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == | ||
627 | ACPI_GPE_EDGE_TRIGGERED) { | ||
628 | status = acpi_hw_clear_gpe(gpe_event_info); | ||
629 | if (ACPI_FAILURE(status)) { | ||
630 | ACPI_EXCEPTION((AE_INFO, status, | ||
631 | "Unable to clear GPE[%2X]", | ||
632 | gpe_number)); | ||
633 | return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); | ||
634 | } | ||
635 | } | ||
636 | |||
637 | /* | ||
638 | * Dispatch the GPE to either an installed handler, or the control method | ||
639 | * associated with this GPE (_Lxx or _Exx). If a handler exists, we invoke | ||
640 | * it and do not attempt to run the method. If there is neither a handler | ||
641 | * nor a method, we disable this GPE to prevent further such pointless | ||
642 | * events from firing. | ||
643 | */ | ||
644 | switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) { | ||
645 | case ACPI_GPE_DISPATCH_HANDLER: | ||
646 | |||
647 | /* | ||
648 | * Invoke the installed handler (at interrupt level) | ||
649 | * Ignore return status for now. | ||
650 | * TBD: leave GPE disabled on error? | ||
651 | */ | ||
652 | (void)gpe_event_info->dispatch.handler->address(gpe_event_info-> | ||
653 | dispatch. | ||
654 | handler-> | ||
655 | context); | ||
656 | |||
657 | /* It is now safe to clear level-triggered events. */ | ||
658 | |||
659 | if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == | ||
660 | ACPI_GPE_LEVEL_TRIGGERED) { | ||
661 | status = acpi_hw_clear_gpe(gpe_event_info); | ||
662 | if (ACPI_FAILURE(status)) { | ||
663 | ACPI_EXCEPTION((AE_INFO, status, | ||
664 | "Unable to clear GPE[%2X]", | ||
665 | gpe_number)); | ||
666 | return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); | ||
667 | } | ||
668 | } | ||
669 | break; | ||
670 | |||
671 | case ACPI_GPE_DISPATCH_METHOD: | ||
672 | |||
673 | /* | ||
674 | * Disable the GPE, so it doesn't keep firing before the method has a | ||
675 | * chance to run (it runs asynchronously with interrupts enabled). | ||
676 | */ | ||
677 | status = acpi_ev_disable_gpe(gpe_event_info); | ||
678 | if (ACPI_FAILURE(status)) { | ||
679 | ACPI_EXCEPTION((AE_INFO, status, | ||
680 | "Unable to disable GPE[%2X]", | ||
681 | gpe_number)); | ||
682 | return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); | ||
683 | } | ||
684 | |||
685 | /* | ||
686 | * Execute the method associated with the GPE | ||
687 | * NOTE: Level-triggered GPEs are cleared after the method completes. | ||
688 | */ | ||
689 | status = acpi_os_execute(OSL_GPE_HANDLER, | ||
690 | acpi_ev_asynch_execute_gpe_method, | ||
691 | gpe_event_info); | ||
692 | if (ACPI_FAILURE(status)) { | ||
693 | ACPI_EXCEPTION((AE_INFO, status, | ||
694 | "Unable to queue handler for GPE[%2X] - event disabled", | ||
695 | gpe_number)); | ||
696 | } | ||
697 | break; | ||
698 | |||
699 | default: | ||
700 | |||
701 | /* No handler or method to run! */ | ||
702 | |||
703 | ACPI_ERROR((AE_INFO, | ||
704 | "No handler or method for GPE[%2X], disabling event", | ||
705 | gpe_number)); | ||
706 | |||
707 | /* | ||
708 | * Disable the GPE. The GPE will remain disabled until the ACPICA | ||
709 | * Core Subsystem is restarted, or a handler is installed. | ||
710 | */ | ||
711 | status = acpi_ev_disable_gpe(gpe_event_info); | ||
712 | if (ACPI_FAILURE(status)) { | ||
713 | ACPI_EXCEPTION((AE_INFO, status, | ||
714 | "Unable to disable GPE[%2X]", | ||
715 | gpe_number)); | ||
716 | return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); | ||
717 | } | ||
718 | break; | ||
719 | } | ||
720 | |||
721 | return_UINT32(ACPI_INTERRUPT_HANDLED); | ||
722 | } | ||
diff --git a/drivers/acpi/events/evgpeblk.c b/drivers/acpi/events/evgpeblk.c deleted file mode 100644 index 2a8d1856038f..000000000000 --- a/drivers/acpi/events/evgpeblk.c +++ /dev/null | |||
@@ -1,1227 +0,0 @@ | |||
1 | /****************************************************************************** | ||
2 | * | ||
3 | * Module Name: evgpeblk - GPE block creation and initialization. | ||
4 | * | ||
5 | *****************************************************************************/ | ||
6 | |||
7 | /* | ||
8 | * Copyright (C) 2000 - 2008, Intel Corp. | ||
9 | * All rights reserved. | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or without | ||
12 | * modification, are permitted provided that the following conditions | ||
13 | * are met: | ||
14 | * 1. Redistributions of source code must retain the above copyright | ||
15 | * notice, this list of conditions, and the following disclaimer, | ||
16 | * without modification. | ||
17 | * 2. Redistributions in binary form must reproduce at minimum a disclaimer | ||
18 | * substantially similar to the "NO WARRANTY" disclaimer below | ||
19 | * ("Disclaimer") and any redistribution must be conditioned upon | ||
20 | * including a substantially similar Disclaimer requirement for further | ||
21 | * binary redistribution. | ||
22 | * 3. Neither the names of the above-listed copyright holders nor the names | ||
23 | * of any contributors may be used to endorse or promote products derived | ||
24 | * from this software without specific prior written permission. | ||
25 | * | ||
26 | * Alternatively, this software may be distributed under the terms of the | ||
27 | * GNU General Public License ("GPL") version 2 as published by the Free | ||
28 | * Software Foundation. | ||
29 | * | ||
30 | * NO WARRANTY | ||
31 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
32 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
33 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR | ||
34 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
35 | * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
36 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | ||
37 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | ||
38 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, | ||
39 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING | ||
40 | * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
41 | * POSSIBILITY OF SUCH DAMAGES. | ||
42 | */ | ||
43 | |||
44 | #include <acpi/acpi.h> | ||
45 | #include <acpi/accommon.h> | ||
46 | #include <acpi/acevents.h> | ||
47 | #include <acpi/acnamesp.h> | ||
48 | |||
49 | #define _COMPONENT ACPI_EVENTS | ||
50 | ACPI_MODULE_NAME("evgpeblk") | ||
51 | |||
52 | /* Local prototypes */ | ||
53 | static acpi_status | ||
54 | acpi_ev_save_method_info(acpi_handle obj_handle, | ||
55 | u32 level, void *obj_desc, void **return_value); | ||
56 | |||
57 | static acpi_status | ||
58 | acpi_ev_match_prw_and_gpe(acpi_handle obj_handle, | ||
59 | u32 level, void *info, void **return_value); | ||
60 | |||
61 | static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32 | ||
62 | interrupt_number); | ||
63 | |||
64 | static acpi_status | ||
65 | acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt); | ||
66 | |||
67 | static acpi_status | ||
68 | acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block, | ||
69 | u32 interrupt_number); | ||
70 | |||
71 | static acpi_status | ||
72 | acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block); | ||
73 | |||
74 | /******************************************************************************* | ||
75 | * | ||
76 | * FUNCTION: acpi_ev_valid_gpe_event | ||
77 | * | ||
78 | * PARAMETERS: gpe_event_info - Info for this GPE | ||
79 | * | ||
80 | * RETURN: TRUE if the gpe_event is valid | ||
81 | * | ||
82 | * DESCRIPTION: Validate a GPE event. DO NOT CALL FROM INTERRUPT LEVEL. | ||
83 | * Should be called only when the GPE lists are semaphore locked | ||
84 | * and not subject to change. | ||
85 | * | ||
86 | ******************************************************************************/ | ||
87 | |||
88 | u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info) | ||
89 | { | ||
90 | struct acpi_gpe_xrupt_info *gpe_xrupt_block; | ||
91 | struct acpi_gpe_block_info *gpe_block; | ||
92 | |||
93 | ACPI_FUNCTION_ENTRY(); | ||
94 | |||
95 | /* No need for spin lock since we are not changing any list elements */ | ||
96 | |||
97 | /* Walk the GPE interrupt levels */ | ||
98 | |||
99 | gpe_xrupt_block = acpi_gbl_gpe_xrupt_list_head; | ||
100 | while (gpe_xrupt_block) { | ||
101 | gpe_block = gpe_xrupt_block->gpe_block_list_head; | ||
102 | |||
103 | /* Walk the GPE blocks on this interrupt level */ | ||
104 | |||
105 | while (gpe_block) { | ||
106 | if ((&gpe_block->event_info[0] <= gpe_event_info) && | ||
107 | (&gpe_block-> | ||
108 | event_info[((acpi_size) gpe_block-> | ||
109 | register_count) * 8] > | ||
110 | gpe_event_info)) { | ||
111 | return (TRUE); | ||
112 | } | ||
113 | |||
114 | gpe_block = gpe_block->next; | ||
115 | } | ||
116 | |||
117 | gpe_xrupt_block = gpe_xrupt_block->next; | ||
118 | } | ||
119 | |||
120 | return (FALSE); | ||
121 | } | ||
122 | |||
123 | /******************************************************************************* | ||
124 | * | ||
125 | * FUNCTION: acpi_ev_walk_gpe_list | ||
126 | * | ||
127 | * PARAMETERS: gpe_walk_callback - Routine called for each GPE block | ||
128 | * Context - Value passed to callback | ||
129 | * | ||
130 | * RETURN: Status | ||
131 | * | ||
132 | * DESCRIPTION: Walk the GPE lists. | ||
133 | * | ||
134 | ******************************************************************************/ | ||
135 | |||
136 | acpi_status | ||
137 | acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context) | ||
138 | { | ||
139 | struct acpi_gpe_block_info *gpe_block; | ||
140 | struct acpi_gpe_xrupt_info *gpe_xrupt_info; | ||
141 | acpi_status status = AE_OK; | ||
142 | acpi_cpu_flags flags; | ||
143 | |||
144 | ACPI_FUNCTION_TRACE(ev_walk_gpe_list); | ||
145 | |||
146 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | ||
147 | |||
148 | /* Walk the interrupt level descriptor list */ | ||
149 | |||
150 | gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head; | ||
151 | while (gpe_xrupt_info) { | ||
152 | |||
153 | /* Walk all Gpe Blocks attached to this interrupt level */ | ||
154 | |||
155 | gpe_block = gpe_xrupt_info->gpe_block_list_head; | ||
156 | while (gpe_block) { | ||
157 | |||
158 | /* One callback per GPE block */ | ||
159 | |||
160 | status = | ||
161 | gpe_walk_callback(gpe_xrupt_info, gpe_block, | ||
162 | context); | ||
163 | if (ACPI_FAILURE(status)) { | ||
164 | if (status == AE_CTRL_END) { /* Callback abort */ | ||
165 | status = AE_OK; | ||
166 | } | ||
167 | goto unlock_and_exit; | ||
168 | } | ||
169 | |||
170 | gpe_block = gpe_block->next; | ||
171 | } | ||
172 | |||
173 | gpe_xrupt_info = gpe_xrupt_info->next; | ||
174 | } | ||
175 | |||
176 | unlock_and_exit: | ||
177 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | ||
178 | return_ACPI_STATUS(status); | ||
179 | } | ||
180 | |||
181 | /******************************************************************************* | ||
182 | * | ||
183 | * FUNCTION: acpi_ev_delete_gpe_handlers | ||
184 | * | ||
185 | * PARAMETERS: gpe_xrupt_info - GPE Interrupt info | ||
186 | * gpe_block - Gpe Block info | ||
187 | * | ||
188 | * RETURN: Status | ||
189 | * | ||
190 | * DESCRIPTION: Delete all Handler objects found in the GPE data structs. | ||
191 | * Used only prior to termination. | ||
192 | * | ||
193 | ******************************************************************************/ | ||
194 | |||
195 | acpi_status | ||
196 | acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info, | ||
197 | struct acpi_gpe_block_info *gpe_block, | ||
198 | void *context) | ||
199 | { | ||
200 | struct acpi_gpe_event_info *gpe_event_info; | ||
201 | u32 i; | ||
202 | u32 j; | ||
203 | |||
204 | ACPI_FUNCTION_TRACE(ev_delete_gpe_handlers); | ||
205 | |||
206 | /* Examine each GPE Register within the block */ | ||
207 | |||
208 | for (i = 0; i < gpe_block->register_count; i++) { | ||
209 | |||
210 | /* Now look at the individual GPEs in this byte register */ | ||
211 | |||
212 | for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) { | ||
213 | gpe_event_info = | ||
214 | &gpe_block-> | ||
215 | event_info[((acpi_size) i * | ||
216 | ACPI_GPE_REGISTER_WIDTH) + j]; | ||
217 | |||
218 | if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == | ||
219 | ACPI_GPE_DISPATCH_HANDLER) { | ||
220 | ACPI_FREE(gpe_event_info->dispatch.handler); | ||
221 | gpe_event_info->dispatch.handler = NULL; | ||
222 | gpe_event_info->flags &= | ||
223 | ~ACPI_GPE_DISPATCH_MASK; | ||
224 | } | ||
225 | } | ||
226 | } | ||
227 | |||
228 | return_ACPI_STATUS(AE_OK); | ||
229 | } | ||
230 | |||
231 | /******************************************************************************* | ||
232 | * | ||
233 | * FUNCTION: acpi_ev_save_method_info | ||
234 | * | ||
235 | * PARAMETERS: Callback from walk_namespace | ||
236 | * | ||
237 | * RETURN: Status | ||
238 | * | ||
239 | * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a | ||
240 | * control method under the _GPE portion of the namespace. | ||
241 | * Extract the name and GPE type from the object, saving this | ||
242 | * information for quick lookup during GPE dispatch | ||
243 | * | ||
244 | * The name of each GPE control method is of the form: | ||
245 | * "_Lxx" or "_Exx" | ||
246 | * Where: | ||
247 | * L - means that the GPE is level triggered | ||
248 | * E - means that the GPE is edge triggered | ||
249 | * xx - is the GPE number [in HEX] | ||
250 | * | ||
251 | ******************************************************************************/ | ||
252 | |||
253 | static acpi_status | ||
254 | acpi_ev_save_method_info(acpi_handle obj_handle, | ||
255 | u32 level, void *obj_desc, void **return_value) | ||
256 | { | ||
257 | struct acpi_gpe_block_info *gpe_block = (void *)obj_desc; | ||
258 | struct acpi_gpe_event_info *gpe_event_info; | ||
259 | u32 gpe_number; | ||
260 | char name[ACPI_NAME_SIZE + 1]; | ||
261 | u8 type; | ||
262 | acpi_status status; | ||
263 | |||
264 | ACPI_FUNCTION_TRACE(ev_save_method_info); | ||
265 | |||
266 | /* | ||
267 | * _Lxx and _Exx GPE method support | ||
268 | * | ||
269 | * 1) Extract the name from the object and convert to a string | ||
270 | */ | ||
271 | ACPI_MOVE_32_TO_32(name, | ||
272 | &((struct acpi_namespace_node *)obj_handle)->name. | ||
273 | integer); | ||
274 | name[ACPI_NAME_SIZE] = 0; | ||
275 | |||
276 | /* | ||
277 | * 2) Edge/Level determination is based on the 2nd character | ||
278 | * of the method name | ||
279 | * | ||
280 | * NOTE: Default GPE type is RUNTIME. May be changed later to WAKE | ||
281 | * if a _PRW object is found that points to this GPE. | ||
282 | */ | ||
283 | switch (name[1]) { | ||
284 | case 'L': | ||
285 | type = ACPI_GPE_LEVEL_TRIGGERED; | ||
286 | break; | ||
287 | |||
288 | case 'E': | ||
289 | type = ACPI_GPE_EDGE_TRIGGERED; | ||
290 | break; | ||
291 | |||
292 | default: | ||
293 | /* Unknown method type, just ignore it! */ | ||
294 | |||
295 | ACPI_DEBUG_PRINT((ACPI_DB_LOAD, | ||
296 | "Ignoring unknown GPE method type: %s (name not of form _Lxx or _Exx)", | ||
297 | name)); | ||
298 | return_ACPI_STATUS(AE_OK); | ||
299 | } | ||
300 | |||
301 | /* Convert the last two characters of the name to the GPE Number */ | ||
302 | |||
303 | gpe_number = ACPI_STRTOUL(&name[2], NULL, 16); | ||
304 | if (gpe_number == ACPI_UINT32_MAX) { | ||
305 | |||
306 | /* Conversion failed; invalid method, just ignore it */ | ||
307 | |||
308 | ACPI_DEBUG_PRINT((ACPI_DB_LOAD, | ||
309 | "Could not extract GPE number from name: %s (name is not of form _Lxx or _Exx)", | ||
310 | name)); | ||
311 | return_ACPI_STATUS(AE_OK); | ||
312 | } | ||
313 | |||
314 | /* Ensure that we have a valid GPE number for this GPE block */ | ||
315 | |||
316 | if ((gpe_number < gpe_block->block_base_number) || | ||
317 | (gpe_number >= | ||
318 | (gpe_block->block_base_number + | ||
319 | (gpe_block->register_count * 8)))) { | ||
320 | /* | ||
321 | * Not valid for this GPE block, just ignore it. However, it may be | ||
322 | * valid for a different GPE block, since GPE0 and GPE1 methods both | ||
323 | * appear under \_GPE. | ||
324 | */ | ||
325 | return_ACPI_STATUS(AE_OK); | ||
326 | } | ||
327 | |||
328 | /* | ||
329 | * Now we can add this information to the gpe_event_info block for use | ||
330 | * during dispatch of this GPE. Default type is RUNTIME, although this may | ||
331 | * change when the _PRW methods are executed later. | ||
332 | */ | ||
333 | gpe_event_info = | ||
334 | &gpe_block->event_info[gpe_number - gpe_block->block_base_number]; | ||
335 | |||
336 | gpe_event_info->flags = (u8) | ||
337 | (type | ACPI_GPE_DISPATCH_METHOD | ACPI_GPE_TYPE_RUNTIME); | ||
338 | |||
339 | gpe_event_info->dispatch.method_node = | ||
340 | (struct acpi_namespace_node *)obj_handle; | ||
341 | |||
342 | /* Update enable mask, but don't enable the HW GPE as of yet */ | ||
343 | |||
344 | status = acpi_ev_enable_gpe(gpe_event_info, FALSE); | ||
345 | |||
346 | ACPI_DEBUG_PRINT((ACPI_DB_LOAD, | ||
347 | "Registered GPE method %s as GPE number 0x%.2X\n", | ||
348 | name, gpe_number)); | ||
349 | return_ACPI_STATUS(status); | ||
350 | } | ||
351 | |||
352 | /******************************************************************************* | ||
353 | * | ||
354 | * FUNCTION: acpi_ev_match_prw_and_gpe | ||
355 | * | ||
356 | * PARAMETERS: Callback from walk_namespace | ||
357 | * | ||
358 | * RETURN: Status. NOTE: We ignore errors so that the _PRW walk is | ||
359 | * not aborted on a single _PRW failure. | ||
360 | * | ||
361 | * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a | ||
362 | * Device. Run the _PRW method. If present, extract the GPE | ||
363 | * number and mark the GPE as a WAKE GPE. | ||
364 | * | ||
365 | ******************************************************************************/ | ||
366 | |||
367 | static acpi_status | ||
368 | acpi_ev_match_prw_and_gpe(acpi_handle obj_handle, | ||
369 | u32 level, void *info, void **return_value) | ||
370 | { | ||
371 | struct acpi_gpe_walk_info *gpe_info = (void *)info; | ||
372 | struct acpi_namespace_node *gpe_device; | ||
373 | struct acpi_gpe_block_info *gpe_block; | ||
374 | struct acpi_namespace_node *target_gpe_device; | ||
375 | struct acpi_gpe_event_info *gpe_event_info; | ||
376 | union acpi_operand_object *pkg_desc; | ||
377 | union acpi_operand_object *obj_desc; | ||
378 | u32 gpe_number; | ||
379 | acpi_status status; | ||
380 | |||
381 | ACPI_FUNCTION_TRACE(ev_match_prw_and_gpe); | ||
382 | |||
383 | /* Check for a _PRW method under this device */ | ||
384 | |||
385 | status = acpi_ut_evaluate_object(obj_handle, METHOD_NAME__PRW, | ||
386 | ACPI_BTYPE_PACKAGE, &pkg_desc); | ||
387 | if (ACPI_FAILURE(status)) { | ||
388 | |||
389 | /* Ignore all errors from _PRW, we don't want to abort the subsystem */ | ||
390 | |||
391 | return_ACPI_STATUS(AE_OK); | ||
392 | } | ||
393 | |||
394 | /* The returned _PRW package must have at least two elements */ | ||
395 | |||
396 | if (pkg_desc->package.count < 2) { | ||
397 | goto cleanup; | ||
398 | } | ||
399 | |||
400 | /* Extract pointers from the input context */ | ||
401 | |||
402 | gpe_device = gpe_info->gpe_device; | ||
403 | gpe_block = gpe_info->gpe_block; | ||
404 | |||
405 | /* | ||
406 | * The _PRW object must return a package, we are only interested in the | ||
407 | * first element | ||
408 | */ | ||
409 | obj_desc = pkg_desc->package.elements[0]; | ||
410 | |||
411 | if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_INTEGER) { | ||
412 | |||
413 | /* Use FADT-defined GPE device (from definition of _PRW) */ | ||
414 | |||
415 | target_gpe_device = acpi_gbl_fadt_gpe_device; | ||
416 | |||
417 | /* Integer is the GPE number in the FADT described GPE blocks */ | ||
418 | |||
419 | gpe_number = (u32) obj_desc->integer.value; | ||
420 | } else if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_PACKAGE) { | ||
421 | |||
422 | /* Package contains a GPE reference and GPE number within a GPE block */ | ||
423 | |||
424 | if ((obj_desc->package.count < 2) || | ||
425 | (ACPI_GET_OBJECT_TYPE(obj_desc->package.elements[0]) != | ||
426 | ACPI_TYPE_LOCAL_REFERENCE) | ||
427 | || (ACPI_GET_OBJECT_TYPE(obj_desc->package.elements[1]) != | ||
428 | ACPI_TYPE_INTEGER)) { | ||
429 | goto cleanup; | ||
430 | } | ||
431 | |||
432 | /* Get GPE block reference and decode */ | ||
433 | |||
434 | target_gpe_device = | ||
435 | obj_desc->package.elements[0]->reference.node; | ||
436 | gpe_number = (u32) obj_desc->package.elements[1]->integer.value; | ||
437 | } else { | ||
438 | /* Unknown type, just ignore it */ | ||
439 | |||
440 | goto cleanup; | ||
441 | } | ||
442 | |||
443 | /* | ||
444 | * Is this GPE within this block? | ||
445 | * | ||
446 | * TRUE if and only if these conditions are true: | ||
447 | * 1) The GPE devices match. | ||
448 | * 2) The GPE index(number) is within the range of the Gpe Block | ||
449 | * associated with the GPE device. | ||
450 | */ | ||
451 | if ((gpe_device == target_gpe_device) && | ||
452 | (gpe_number >= gpe_block->block_base_number) && | ||
453 | (gpe_number < | ||
454 | gpe_block->block_base_number + (gpe_block->register_count * 8))) { | ||
455 | gpe_event_info = | ||
456 | &gpe_block->event_info[gpe_number - | ||
457 | gpe_block->block_base_number]; | ||
458 | |||
459 | /* Mark GPE for WAKE-ONLY but WAKE_DISABLED */ | ||
460 | |||
461 | gpe_event_info->flags &= | ||
462 | ~(ACPI_GPE_WAKE_ENABLED | ACPI_GPE_RUN_ENABLED); | ||
463 | |||
464 | status = | ||
465 | acpi_ev_set_gpe_type(gpe_event_info, ACPI_GPE_TYPE_WAKE); | ||
466 | if (ACPI_FAILURE(status)) { | ||
467 | goto cleanup; | ||
468 | } | ||
469 | |||
470 | status = | ||
471 | acpi_ev_update_gpe_enable_masks(gpe_event_info, | ||
472 | ACPI_GPE_DISABLE); | ||
473 | } | ||
474 | |||
475 | cleanup: | ||
476 | acpi_ut_remove_reference(pkg_desc); | ||
477 | return_ACPI_STATUS(AE_OK); | ||
478 | } | ||
479 | |||
480 | /******************************************************************************* | ||
481 | * | ||
482 | * FUNCTION: acpi_ev_get_gpe_xrupt_block | ||
483 | * | ||
484 | * PARAMETERS: interrupt_number - Interrupt for a GPE block | ||
485 | * | ||
486 | * RETURN: A GPE interrupt block | ||
487 | * | ||
488 | * DESCRIPTION: Get or Create a GPE interrupt block. There is one interrupt | ||
489 | * block per unique interrupt level used for GPEs. Should be | ||
490 | * called only when the GPE lists are semaphore locked and not | ||
491 | * subject to change. | ||
492 | * | ||
493 | ******************************************************************************/ | ||
494 | |||
495 | static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32 | ||
496 | interrupt_number) | ||
497 | { | ||
498 | struct acpi_gpe_xrupt_info *next_gpe_xrupt; | ||
499 | struct acpi_gpe_xrupt_info *gpe_xrupt; | ||
500 | acpi_status status; | ||
501 | acpi_cpu_flags flags; | ||
502 | |||
503 | ACPI_FUNCTION_TRACE(ev_get_gpe_xrupt_block); | ||
504 | |||
505 | /* No need for lock since we are not changing any list elements here */ | ||
506 | |||
507 | next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head; | ||
508 | while (next_gpe_xrupt) { | ||
509 | if (next_gpe_xrupt->interrupt_number == interrupt_number) { | ||
510 | return_PTR(next_gpe_xrupt); | ||
511 | } | ||
512 | |||
513 | next_gpe_xrupt = next_gpe_xrupt->next; | ||
514 | } | ||
515 | |||
516 | /* Not found, must allocate a new xrupt descriptor */ | ||
517 | |||
518 | gpe_xrupt = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_xrupt_info)); | ||
519 | if (!gpe_xrupt) { | ||
520 | return_PTR(NULL); | ||
521 | } | ||
522 | |||
523 | gpe_xrupt->interrupt_number = interrupt_number; | ||
524 | |||
525 | /* Install new interrupt descriptor with spin lock */ | ||
526 | |||
527 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | ||
528 | if (acpi_gbl_gpe_xrupt_list_head) { | ||
529 | next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head; | ||
530 | while (next_gpe_xrupt->next) { | ||
531 | next_gpe_xrupt = next_gpe_xrupt->next; | ||
532 | } | ||
533 | |||
534 | next_gpe_xrupt->next = gpe_xrupt; | ||
535 | gpe_xrupt->previous = next_gpe_xrupt; | ||
536 | } else { | ||
537 | acpi_gbl_gpe_xrupt_list_head = gpe_xrupt; | ||
538 | } | ||
539 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | ||
540 | |||
541 | /* Install new interrupt handler if not SCI_INT */ | ||
542 | |||
543 | if (interrupt_number != acpi_gbl_FADT.sci_interrupt) { | ||
544 | status = acpi_os_install_interrupt_handler(interrupt_number, | ||
545 | acpi_ev_gpe_xrupt_handler, | ||
546 | gpe_xrupt); | ||
547 | if (ACPI_FAILURE(status)) { | ||
548 | ACPI_ERROR((AE_INFO, | ||
549 | "Could not install GPE interrupt handler at level 0x%X", | ||
550 | interrupt_number)); | ||
551 | return_PTR(NULL); | ||
552 | } | ||
553 | } | ||
554 | |||
555 | return_PTR(gpe_xrupt); | ||
556 | } | ||
557 | |||
558 | /******************************************************************************* | ||
559 | * | ||
560 | * FUNCTION: acpi_ev_delete_gpe_xrupt | ||
561 | * | ||
562 | * PARAMETERS: gpe_xrupt - A GPE interrupt info block | ||
563 | * | ||
564 | * RETURN: Status | ||
565 | * | ||
566 | * DESCRIPTION: Remove and free a gpe_xrupt block. Remove an associated | ||
567 | * interrupt handler if not the SCI interrupt. | ||
568 | * | ||
569 | ******************************************************************************/ | ||
570 | |||
571 | static acpi_status | ||
572 | acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt) | ||
573 | { | ||
574 | acpi_status status; | ||
575 | acpi_cpu_flags flags; | ||
576 | |||
577 | ACPI_FUNCTION_TRACE(ev_delete_gpe_xrupt); | ||
578 | |||
579 | /* We never want to remove the SCI interrupt handler */ | ||
580 | |||
581 | if (gpe_xrupt->interrupt_number == acpi_gbl_FADT.sci_interrupt) { | ||
582 | gpe_xrupt->gpe_block_list_head = NULL; | ||
583 | return_ACPI_STATUS(AE_OK); | ||
584 | } | ||
585 | |||
586 | /* Disable this interrupt */ | ||
587 | |||
588 | status = | ||
589 | acpi_os_remove_interrupt_handler(gpe_xrupt->interrupt_number, | ||
590 | acpi_ev_gpe_xrupt_handler); | ||
591 | if (ACPI_FAILURE(status)) { | ||
592 | return_ACPI_STATUS(status); | ||
593 | } | ||
594 | |||
595 | /* Unlink the interrupt block with lock */ | ||
596 | |||
597 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | ||
598 | if (gpe_xrupt->previous) { | ||
599 | gpe_xrupt->previous->next = gpe_xrupt->next; | ||
600 | } else { | ||
601 | /* No previous, update list head */ | ||
602 | |||
603 | acpi_gbl_gpe_xrupt_list_head = gpe_xrupt->next; | ||
604 | } | ||
605 | |||
606 | if (gpe_xrupt->next) { | ||
607 | gpe_xrupt->next->previous = gpe_xrupt->previous; | ||
608 | } | ||
609 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | ||
610 | |||
611 | /* Free the block */ | ||
612 | |||
613 | ACPI_FREE(gpe_xrupt); | ||
614 | return_ACPI_STATUS(AE_OK); | ||
615 | } | ||
616 | |||
617 | /******************************************************************************* | ||
618 | * | ||
619 | * FUNCTION: acpi_ev_install_gpe_block | ||
620 | * | ||
621 | * PARAMETERS: gpe_block - New GPE block | ||
622 | * interrupt_number - Xrupt to be associated with this | ||
623 | * GPE block | ||
624 | * | ||
625 | * RETURN: Status | ||
626 | * | ||
627 | * DESCRIPTION: Install new GPE block with mutex support | ||
628 | * | ||
629 | ******************************************************************************/ | ||
630 | |||
631 | static acpi_status | ||
632 | acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block, | ||
633 | u32 interrupt_number) | ||
634 | { | ||
635 | struct acpi_gpe_block_info *next_gpe_block; | ||
636 | struct acpi_gpe_xrupt_info *gpe_xrupt_block; | ||
637 | acpi_status status; | ||
638 | acpi_cpu_flags flags; | ||
639 | |||
640 | ACPI_FUNCTION_TRACE(ev_install_gpe_block); | ||
641 | |||
642 | status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); | ||
643 | if (ACPI_FAILURE(status)) { | ||
644 | return_ACPI_STATUS(status); | ||
645 | } | ||
646 | |||
647 | gpe_xrupt_block = acpi_ev_get_gpe_xrupt_block(interrupt_number); | ||
648 | if (!gpe_xrupt_block) { | ||
649 | status = AE_NO_MEMORY; | ||
650 | goto unlock_and_exit; | ||
651 | } | ||
652 | |||
653 | /* Install the new block at the end of the list with lock */ | ||
654 | |||
655 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | ||
656 | if (gpe_xrupt_block->gpe_block_list_head) { | ||
657 | next_gpe_block = gpe_xrupt_block->gpe_block_list_head; | ||
658 | while (next_gpe_block->next) { | ||
659 | next_gpe_block = next_gpe_block->next; | ||
660 | } | ||
661 | |||
662 | next_gpe_block->next = gpe_block; | ||
663 | gpe_block->previous = next_gpe_block; | ||
664 | } else { | ||
665 | gpe_xrupt_block->gpe_block_list_head = gpe_block; | ||
666 | } | ||
667 | |||
668 | gpe_block->xrupt_block = gpe_xrupt_block; | ||
669 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | ||
670 | |||
671 | unlock_and_exit: | ||
672 | status = acpi_ut_release_mutex(ACPI_MTX_EVENTS); | ||
673 | return_ACPI_STATUS(status); | ||
674 | } | ||
675 | |||
676 | /******************************************************************************* | ||
677 | * | ||
678 | * FUNCTION: acpi_ev_delete_gpe_block | ||
679 | * | ||
680 | * PARAMETERS: gpe_block - Existing GPE block | ||
681 | * | ||
682 | * RETURN: Status | ||
683 | * | ||
684 | * DESCRIPTION: Remove a GPE block | ||
685 | * | ||
686 | ******************************************************************************/ | ||
687 | |||
688 | acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block) | ||
689 | { | ||
690 | acpi_status status; | ||
691 | acpi_cpu_flags flags; | ||
692 | |||
693 | ACPI_FUNCTION_TRACE(ev_install_gpe_block); | ||
694 | |||
695 | status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); | ||
696 | if (ACPI_FAILURE(status)) { | ||
697 | return_ACPI_STATUS(status); | ||
698 | } | ||
699 | |||
700 | /* Disable all GPEs in this block */ | ||
701 | |||
702 | status = | ||
703 | acpi_hw_disable_gpe_block(gpe_block->xrupt_block, gpe_block, NULL); | ||
704 | |||
705 | if (!gpe_block->previous && !gpe_block->next) { | ||
706 | |||
707 | /* This is the last gpe_block on this interrupt */ | ||
708 | |||
709 | status = acpi_ev_delete_gpe_xrupt(gpe_block->xrupt_block); | ||
710 | if (ACPI_FAILURE(status)) { | ||
711 | goto unlock_and_exit; | ||
712 | } | ||
713 | } else { | ||
714 | /* Remove the block on this interrupt with lock */ | ||
715 | |||
716 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | ||
717 | if (gpe_block->previous) { | ||
718 | gpe_block->previous->next = gpe_block->next; | ||
719 | } else { | ||
720 | gpe_block->xrupt_block->gpe_block_list_head = | ||
721 | gpe_block->next; | ||
722 | } | ||
723 | |||
724 | if (gpe_block->next) { | ||
725 | gpe_block->next->previous = gpe_block->previous; | ||
726 | } | ||
727 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | ||
728 | } | ||
729 | |||
730 | acpi_current_gpe_count -= | ||
731 | gpe_block->register_count * ACPI_GPE_REGISTER_WIDTH; | ||
732 | |||
733 | /* Free the gpe_block */ | ||
734 | |||
735 | ACPI_FREE(gpe_block->register_info); | ||
736 | ACPI_FREE(gpe_block->event_info); | ||
737 | ACPI_FREE(gpe_block); | ||
738 | |||
739 | unlock_and_exit: | ||
740 | status = acpi_ut_release_mutex(ACPI_MTX_EVENTS); | ||
741 | return_ACPI_STATUS(status); | ||
742 | } | ||
743 | |||
744 | /******************************************************************************* | ||
745 | * | ||
746 | * FUNCTION: acpi_ev_create_gpe_info_blocks | ||
747 | * | ||
748 | * PARAMETERS: gpe_block - New GPE block | ||
749 | * | ||
750 | * RETURN: Status | ||
751 | * | ||
752 | * DESCRIPTION: Create the register_info and event_info blocks for this GPE block | ||
753 | * | ||
754 | ******************************************************************************/ | ||
755 | |||
756 | static acpi_status | ||
757 | acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block) | ||
758 | { | ||
759 | struct acpi_gpe_register_info *gpe_register_info = NULL; | ||
760 | struct acpi_gpe_event_info *gpe_event_info = NULL; | ||
761 | struct acpi_gpe_event_info *this_event; | ||
762 | struct acpi_gpe_register_info *this_register; | ||
763 | u32 i; | ||
764 | u32 j; | ||
765 | acpi_status status; | ||
766 | |||
767 | ACPI_FUNCTION_TRACE(ev_create_gpe_info_blocks); | ||
768 | |||
769 | /* Allocate the GPE register information block */ | ||
770 | |||
771 | gpe_register_info = ACPI_ALLOCATE_ZEROED((acpi_size) gpe_block-> | ||
772 | register_count * | ||
773 | sizeof(struct | ||
774 | acpi_gpe_register_info)); | ||
775 | if (!gpe_register_info) { | ||
776 | ACPI_ERROR((AE_INFO, | ||
777 | "Could not allocate the GpeRegisterInfo table")); | ||
778 | return_ACPI_STATUS(AE_NO_MEMORY); | ||
779 | } | ||
780 | |||
781 | /* | ||
782 | * Allocate the GPE event_info block. There are eight distinct GPEs | ||
783 | * per register. Initialization to zeros is sufficient. | ||
784 | */ | ||
785 | gpe_event_info = ACPI_ALLOCATE_ZEROED(((acpi_size) gpe_block-> | ||
786 | register_count * | ||
787 | ACPI_GPE_REGISTER_WIDTH) * | ||
788 | sizeof(struct | ||
789 | acpi_gpe_event_info)); | ||
790 | if (!gpe_event_info) { | ||
791 | ACPI_ERROR((AE_INFO, | ||
792 | "Could not allocate the GpeEventInfo table")); | ||
793 | status = AE_NO_MEMORY; | ||
794 | goto error_exit; | ||
795 | } | ||
796 | |||
797 | /* Save the new Info arrays in the GPE block */ | ||
798 | |||
799 | gpe_block->register_info = gpe_register_info; | ||
800 | gpe_block->event_info = gpe_event_info; | ||
801 | |||
802 | /* | ||
803 | * Initialize the GPE Register and Event structures. A goal of these | ||
804 | * tables is to hide the fact that there are two separate GPE register | ||
805 | * sets in a given GPE hardware block, the status registers occupy the | ||
806 | * first half, and the enable registers occupy the second half. | ||
807 | */ | ||
808 | this_register = gpe_register_info; | ||
809 | this_event = gpe_event_info; | ||
810 | |||
811 | for (i = 0; i < gpe_block->register_count; i++) { | ||
812 | |||
813 | /* Init the register_info for this GPE register (8 GPEs) */ | ||
814 | |||
815 | this_register->base_gpe_number = | ||
816 | (u8) (gpe_block->block_base_number + | ||
817 | (i * ACPI_GPE_REGISTER_WIDTH)); | ||
818 | |||
819 | this_register->status_address.address = | ||
820 | gpe_block->block_address.address + i; | ||
821 | |||
822 | this_register->enable_address.address = | ||
823 | gpe_block->block_address.address + i + | ||
824 | gpe_block->register_count; | ||
825 | |||
826 | this_register->status_address.space_id = | ||
827 | gpe_block->block_address.space_id; | ||
828 | this_register->enable_address.space_id = | ||
829 | gpe_block->block_address.space_id; | ||
830 | this_register->status_address.bit_width = | ||
831 | ACPI_GPE_REGISTER_WIDTH; | ||
832 | this_register->enable_address.bit_width = | ||
833 | ACPI_GPE_REGISTER_WIDTH; | ||
834 | this_register->status_address.bit_offset = 0; | ||
835 | this_register->enable_address.bit_offset = 0; | ||
836 | |||
837 | /* Init the event_info for each GPE within this register */ | ||
838 | |||
839 | for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) { | ||
840 | this_event->gpe_number = | ||
841 | (u8) (this_register->base_gpe_number + j); | ||
842 | this_event->register_info = this_register; | ||
843 | this_event++; | ||
844 | } | ||
845 | |||
846 | /* Disable all GPEs within this register */ | ||
847 | |||
848 | status = acpi_write(0x00, &this_register->enable_address); | ||
849 | if (ACPI_FAILURE(status)) { | ||
850 | goto error_exit; | ||
851 | } | ||
852 | |||
853 | /* Clear any pending GPE events within this register */ | ||
854 | |||
855 | status = acpi_write(0xFF, &this_register->status_address); | ||
856 | if (ACPI_FAILURE(status)) { | ||
857 | goto error_exit; | ||
858 | } | ||
859 | |||
860 | this_register++; | ||
861 | } | ||
862 | |||
863 | return_ACPI_STATUS(AE_OK); | ||
864 | |||
865 | error_exit: | ||
866 | if (gpe_register_info) { | ||
867 | ACPI_FREE(gpe_register_info); | ||
868 | } | ||
869 | if (gpe_event_info) { | ||
870 | ACPI_FREE(gpe_event_info); | ||
871 | } | ||
872 | |||
873 | return_ACPI_STATUS(status); | ||
874 | } | ||
875 | |||
876 | /******************************************************************************* | ||
877 | * | ||
878 | * FUNCTION: acpi_ev_create_gpe_block | ||
879 | * | ||
880 | * PARAMETERS: gpe_device - Handle to the parent GPE block | ||
881 | * gpe_block_address - Address and space_iD | ||
882 | * register_count - Number of GPE register pairs in the block | ||
883 | * gpe_block_base_number - Starting GPE number for the block | ||
884 | * interrupt_number - H/W interrupt for the block | ||
885 | * return_gpe_block - Where the new block descriptor is returned | ||
886 | * | ||
887 | * RETURN: Status | ||
888 | * | ||
889 | * DESCRIPTION: Create and Install a block of GPE registers. All GPEs within | ||
890 | * the block are disabled at exit. | ||
891 | * Note: Assumes namespace is locked. | ||
892 | * | ||
893 | ******************************************************************************/ | ||
894 | |||
895 | acpi_status | ||
896 | acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device, | ||
897 | struct acpi_generic_address *gpe_block_address, | ||
898 | u32 register_count, | ||
899 | u8 gpe_block_base_number, | ||
900 | u32 interrupt_number, | ||
901 | struct acpi_gpe_block_info **return_gpe_block) | ||
902 | { | ||
903 | acpi_status status; | ||
904 | struct acpi_gpe_block_info *gpe_block; | ||
905 | |||
906 | ACPI_FUNCTION_TRACE(ev_create_gpe_block); | ||
907 | |||
908 | if (!register_count) { | ||
909 | return_ACPI_STATUS(AE_OK); | ||
910 | } | ||
911 | |||
912 | /* Allocate a new GPE block */ | ||
913 | |||
914 | gpe_block = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_block_info)); | ||
915 | if (!gpe_block) { | ||
916 | return_ACPI_STATUS(AE_NO_MEMORY); | ||
917 | } | ||
918 | |||
919 | /* Initialize the new GPE block */ | ||
920 | |||
921 | gpe_block->node = gpe_device; | ||
922 | gpe_block->register_count = register_count; | ||
923 | gpe_block->block_base_number = gpe_block_base_number; | ||
924 | |||
925 | ACPI_MEMCPY(&gpe_block->block_address, gpe_block_address, | ||
926 | sizeof(struct acpi_generic_address)); | ||
927 | |||
928 | /* | ||
929 | * Create the register_info and event_info sub-structures | ||
930 | * Note: disables and clears all GPEs in the block | ||
931 | */ | ||
932 | status = acpi_ev_create_gpe_info_blocks(gpe_block); | ||
933 | if (ACPI_FAILURE(status)) { | ||
934 | ACPI_FREE(gpe_block); | ||
935 | return_ACPI_STATUS(status); | ||
936 | } | ||
937 | |||
938 | /* Install the new block in the global lists */ | ||
939 | |||
940 | status = acpi_ev_install_gpe_block(gpe_block, interrupt_number); | ||
941 | if (ACPI_FAILURE(status)) { | ||
942 | ACPI_FREE(gpe_block); | ||
943 | return_ACPI_STATUS(status); | ||
944 | } | ||
945 | |||
946 | /* Find all GPE methods (_Lxx, _Exx) for this block */ | ||
947 | |||
948 | status = acpi_ns_walk_namespace(ACPI_TYPE_METHOD, gpe_device, | ||
949 | ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK, | ||
950 | acpi_ev_save_method_info, gpe_block, | ||
951 | NULL); | ||
952 | |||
953 | /* Return the new block */ | ||
954 | |||
955 | if (return_gpe_block) { | ||
956 | (*return_gpe_block) = gpe_block; | ||
957 | } | ||
958 | |||
959 | ACPI_DEBUG_PRINT((ACPI_DB_INIT, | ||
960 | "GPE %02X to %02X [%4.4s] %u regs on int 0x%X\n", | ||
961 | (u32) gpe_block->block_base_number, | ||
962 | (u32) (gpe_block->block_base_number + | ||
963 | ((gpe_block->register_count * | ||
964 | ACPI_GPE_REGISTER_WIDTH) - 1)), | ||
965 | gpe_device->name.ascii, gpe_block->register_count, | ||
966 | interrupt_number)); | ||
967 | |||
968 | /* Update global count of currently available GPEs */ | ||
969 | |||
970 | acpi_current_gpe_count += register_count * ACPI_GPE_REGISTER_WIDTH; | ||
971 | return_ACPI_STATUS(AE_OK); | ||
972 | } | ||
973 | |||
974 | /******************************************************************************* | ||
975 | * | ||
976 | * FUNCTION: acpi_ev_initialize_gpe_block | ||
977 | * | ||
978 | * PARAMETERS: gpe_device - Handle to the parent GPE block | ||
979 | * gpe_block - Gpe Block info | ||
980 | * | ||
981 | * RETURN: Status | ||
982 | * | ||
983 | * DESCRIPTION: Initialize and enable a GPE block. First find and run any | ||
984 | * _PRT methods associated with the block, then enable the | ||
985 | * appropriate GPEs. | ||
986 | * Note: Assumes namespace is locked. | ||
987 | * | ||
988 | ******************************************************************************/ | ||
989 | |||
990 | acpi_status | ||
991 | acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device, | ||
992 | struct acpi_gpe_block_info *gpe_block) | ||
993 | { | ||
994 | acpi_status status; | ||
995 | struct acpi_gpe_event_info *gpe_event_info; | ||
996 | struct acpi_gpe_walk_info gpe_info; | ||
997 | u32 wake_gpe_count; | ||
998 | u32 gpe_enabled_count; | ||
999 | u32 i; | ||
1000 | u32 j; | ||
1001 | |||
1002 | ACPI_FUNCTION_TRACE(ev_initialize_gpe_block); | ||
1003 | |||
1004 | /* Ignore a null GPE block (e.g., if no GPE block 1 exists) */ | ||
1005 | |||
1006 | if (!gpe_block) { | ||
1007 | return_ACPI_STATUS(AE_OK); | ||
1008 | } | ||
1009 | |||
1010 | /* | ||
1011 | * Runtime option: Should wake GPEs be enabled at runtime? The default | ||
1012 | * is no, they should only be enabled just as the machine goes to sleep. | ||
1013 | */ | ||
1014 | if (acpi_gbl_leave_wake_gpes_disabled) { | ||
1015 | /* | ||
1016 | * Differentiate runtime vs wake GPEs, via the _PRW control methods. | ||
1017 | * Each GPE that has one or more _PRWs that reference it is by | ||
1018 | * definition a wake GPE and will not be enabled while the machine | ||
1019 | * is running. | ||
1020 | */ | ||
1021 | gpe_info.gpe_block = gpe_block; | ||
1022 | gpe_info.gpe_device = gpe_device; | ||
1023 | |||
1024 | status = | ||
1025 | acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, | ||
1026 | ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK, | ||
1027 | acpi_ev_match_prw_and_gpe, &gpe_info, | ||
1028 | NULL); | ||
1029 | } | ||
1030 | |||
1031 | /* | ||
1032 | * Enable all GPEs in this block that have these attributes: | ||
1033 | * 1) are "runtime" or "run/wake" GPEs, and | ||
1034 | * 2) have a corresponding _Lxx or _Exx method | ||
1035 | * | ||
1036 | * Any other GPEs within this block must be enabled via the acpi_enable_gpe() | ||
1037 | * external interface. | ||
1038 | */ | ||
1039 | wake_gpe_count = 0; | ||
1040 | gpe_enabled_count = 0; | ||
1041 | |||
1042 | for (i = 0; i < gpe_block->register_count; i++) { | ||
1043 | for (j = 0; j < 8; j++) { | ||
1044 | |||
1045 | /* Get the info block for this particular GPE */ | ||
1046 | |||
1047 | gpe_event_info = | ||
1048 | &gpe_block-> | ||
1049 | event_info[((acpi_size) i * | ||
1050 | ACPI_GPE_REGISTER_WIDTH) + j]; | ||
1051 | |||
1052 | if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == | ||
1053 | ACPI_GPE_DISPATCH_METHOD) | ||
1054 | && (gpe_event_info->flags & ACPI_GPE_TYPE_RUNTIME)) { | ||
1055 | gpe_enabled_count++; | ||
1056 | } | ||
1057 | |||
1058 | if (gpe_event_info->flags & ACPI_GPE_TYPE_WAKE) { | ||
1059 | wake_gpe_count++; | ||
1060 | } | ||
1061 | } | ||
1062 | } | ||
1063 | |||
1064 | ACPI_DEBUG_PRINT((ACPI_DB_INIT, | ||
1065 | "Found %u Wake, Enabled %u Runtime GPEs in this block\n", | ||
1066 | wake_gpe_count, gpe_enabled_count)); | ||
1067 | |||
1068 | /* Enable all valid runtime GPEs found above */ | ||
1069 | |||
1070 | status = acpi_hw_enable_runtime_gpe_block(NULL, gpe_block, NULL); | ||
1071 | if (ACPI_FAILURE(status)) { | ||
1072 | ACPI_ERROR((AE_INFO, "Could not enable GPEs in GpeBlock %p", | ||
1073 | gpe_block)); | ||
1074 | } | ||
1075 | |||
1076 | return_ACPI_STATUS(status); | ||
1077 | } | ||
1078 | |||
1079 | /******************************************************************************* | ||
1080 | * | ||
1081 | * FUNCTION: acpi_ev_gpe_initialize | ||
1082 | * | ||
1083 | * PARAMETERS: None | ||
1084 | * | ||
1085 | * RETURN: Status | ||
1086 | * | ||
1087 | * DESCRIPTION: Initialize the GPE data structures | ||
1088 | * | ||
1089 | ******************************************************************************/ | ||
1090 | |||
1091 | acpi_status acpi_ev_gpe_initialize(void) | ||
1092 | { | ||
1093 | u32 register_count0 = 0; | ||
1094 | u32 register_count1 = 0; | ||
1095 | u32 gpe_number_max = 0; | ||
1096 | acpi_status status; | ||
1097 | |||
1098 | ACPI_FUNCTION_TRACE(ev_gpe_initialize); | ||
1099 | |||
1100 | status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); | ||
1101 | if (ACPI_FAILURE(status)) { | ||
1102 | return_ACPI_STATUS(status); | ||
1103 | } | ||
1104 | |||
1105 | /* | ||
1106 | * Initialize the GPE Block(s) defined in the FADT | ||
1107 | * | ||
1108 | * Why the GPE register block lengths are divided by 2: From the ACPI Spec, | ||
1109 | * section "General-Purpose Event Registers", we have: | ||
1110 | * | ||
1111 | * "Each register block contains two registers of equal length | ||
1112 | * GPEx_STS and GPEx_EN (where x is 0 or 1). The length of the | ||
1113 | * GPE0_STS and GPE0_EN registers is equal to half the GPE0_LEN | ||
1114 | * The length of the GPE1_STS and GPE1_EN registers is equal to | ||
1115 | * half the GPE1_LEN. If a generic register block is not supported | ||
1116 | * then its respective block pointer and block length values in the | ||
1117 | * FADT table contain zeros. The GPE0_LEN and GPE1_LEN do not need | ||
1118 | * to be the same size." | ||
1119 | */ | ||
1120 | |||
1121 | /* | ||
1122 | * Determine the maximum GPE number for this machine. | ||
1123 | * | ||
1124 | * Note: both GPE0 and GPE1 are optional, and either can exist without | ||
1125 | * the other. | ||
1126 | * | ||
1127 | * If EITHER the register length OR the block address are zero, then that | ||
1128 | * particular block is not supported. | ||
1129 | */ | ||
1130 | if (acpi_gbl_FADT.gpe0_block_length && | ||
1131 | acpi_gbl_FADT.xgpe0_block.address) { | ||
1132 | |||
1133 | /* GPE block 0 exists (has both length and address > 0) */ | ||
1134 | |||
1135 | register_count0 = (u16) (acpi_gbl_FADT.gpe0_block_length / 2); | ||
1136 | |||
1137 | gpe_number_max = | ||
1138 | (register_count0 * ACPI_GPE_REGISTER_WIDTH) - 1; | ||
1139 | |||
1140 | /* Install GPE Block 0 */ | ||
1141 | |||
1142 | status = acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device, | ||
1143 | &acpi_gbl_FADT.xgpe0_block, | ||
1144 | register_count0, 0, | ||
1145 | acpi_gbl_FADT.sci_interrupt, | ||
1146 | &acpi_gbl_gpe_fadt_blocks[0]); | ||
1147 | |||
1148 | if (ACPI_FAILURE(status)) { | ||
1149 | ACPI_EXCEPTION((AE_INFO, status, | ||
1150 | "Could not create GPE Block 0")); | ||
1151 | } | ||
1152 | } | ||
1153 | |||
1154 | if (acpi_gbl_FADT.gpe1_block_length && | ||
1155 | acpi_gbl_FADT.xgpe1_block.address) { | ||
1156 | |||
1157 | /* GPE block 1 exists (has both length and address > 0) */ | ||
1158 | |||
1159 | register_count1 = (u16) (acpi_gbl_FADT.gpe1_block_length / 2); | ||
1160 | |||
1161 | /* Check for GPE0/GPE1 overlap (if both banks exist) */ | ||
1162 | |||
1163 | if ((register_count0) && | ||
1164 | (gpe_number_max >= acpi_gbl_FADT.gpe1_base)) { | ||
1165 | ACPI_ERROR((AE_INFO, | ||
1166 | "GPE0 block (GPE 0 to %d) overlaps the GPE1 block (GPE %d to %d) - Ignoring GPE1", | ||
1167 | gpe_number_max, acpi_gbl_FADT.gpe1_base, | ||
1168 | acpi_gbl_FADT.gpe1_base + | ||
1169 | ((register_count1 * | ||
1170 | ACPI_GPE_REGISTER_WIDTH) - 1))); | ||
1171 | |||
1172 | /* Ignore GPE1 block by setting the register count to zero */ | ||
1173 | |||
1174 | register_count1 = 0; | ||
1175 | } else { | ||
1176 | /* Install GPE Block 1 */ | ||
1177 | |||
1178 | status = | ||
1179 | acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device, | ||
1180 | &acpi_gbl_FADT.xgpe1_block, | ||
1181 | register_count1, | ||
1182 | acpi_gbl_FADT.gpe1_base, | ||
1183 | acpi_gbl_FADT. | ||
1184 | sci_interrupt, | ||
1185 | &acpi_gbl_gpe_fadt_blocks | ||
1186 | [1]); | ||
1187 | |||
1188 | if (ACPI_FAILURE(status)) { | ||
1189 | ACPI_EXCEPTION((AE_INFO, status, | ||
1190 | "Could not create GPE Block 1")); | ||
1191 | } | ||
1192 | |||
1193 | /* | ||
1194 | * GPE0 and GPE1 do not have to be contiguous in the GPE number | ||
1195 | * space. However, GPE0 always starts at GPE number zero. | ||
1196 | */ | ||
1197 | gpe_number_max = acpi_gbl_FADT.gpe1_base + | ||
1198 | ((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1); | ||
1199 | } | ||
1200 | } | ||
1201 | |||
1202 | /* Exit if there are no GPE registers */ | ||
1203 | |||
1204 | if ((register_count0 + register_count1) == 0) { | ||
1205 | |||
1206 | /* GPEs are not required by ACPI, this is OK */ | ||
1207 | |||
1208 | ACPI_DEBUG_PRINT((ACPI_DB_INIT, | ||
1209 | "There are no GPE blocks defined in the FADT\n")); | ||
1210 | status = AE_OK; | ||
1211 | goto cleanup; | ||
1212 | } | ||
1213 | |||
1214 | /* Check for Max GPE number out-of-range */ | ||
1215 | |||
1216 | if (gpe_number_max > ACPI_GPE_MAX) { | ||
1217 | ACPI_ERROR((AE_INFO, | ||
1218 | "Maximum GPE number from FADT is too large: 0x%X", | ||
1219 | gpe_number_max)); | ||
1220 | status = AE_BAD_VALUE; | ||
1221 | goto cleanup; | ||
1222 | } | ||
1223 | |||
1224 | cleanup: | ||
1225 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); | ||
1226 | return_ACPI_STATUS(AE_OK); | ||
1227 | } | ||
diff --git a/drivers/acpi/events/evmisc.c b/drivers/acpi/events/evmisc.c deleted file mode 100644 index 16f2c1a00167..000000000000 --- a/drivers/acpi/events/evmisc.c +++ /dev/null | |||
@@ -1,621 +0,0 @@ | |||
1 | /****************************************************************************** | ||
2 | * | ||
3 | * Module Name: evmisc - Miscellaneous event manager support functions | ||
4 | * | ||
5 | *****************************************************************************/ | ||
6 | |||
7 | /* | ||
8 | * Copyright (C) 2000 - 2008, Intel Corp. | ||
9 | * All rights reserved. | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or without | ||
12 | * modification, are permitted provided that the following conditions | ||
13 | * are met: | ||
14 | * 1. Redistributions of source code must retain the above copyright | ||
15 | * notice, this list of conditions, and the following disclaimer, | ||
16 | * without modification. | ||
17 | * 2. Redistributions in binary form must reproduce at minimum a disclaimer | ||
18 | * substantially similar to the "NO WARRANTY" disclaimer below | ||
19 | * ("Disclaimer") and any redistribution must be conditioned upon | ||
20 | * including a substantially similar Disclaimer requirement for further | ||
21 | * binary redistribution. | ||
22 | * 3. Neither the names of the above-listed copyright holders nor the names | ||
23 | * of any contributors may be used to endorse or promote products derived | ||
24 | * from this software without specific prior written permission. | ||
25 | * | ||
26 | * Alternatively, this software may be distributed under the terms of the | ||
27 | * GNU General Public License ("GPL") version 2 as published by the Free | ||
28 | * Software Foundation. | ||
29 | * | ||
30 | * NO WARRANTY | ||
31 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
32 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
33 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR | ||
34 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
35 | * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
36 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | ||
37 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | ||
38 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, | ||
39 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING | ||
40 | * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
41 | * POSSIBILITY OF SUCH DAMAGES. | ||
42 | */ | ||
43 | |||
44 | #include <acpi/acpi.h> | ||
45 | #include <acpi/accommon.h> | ||
46 | #include <acpi/acevents.h> | ||
47 | #include <acpi/acnamesp.h> | ||
48 | #include <acpi/acinterp.h> | ||
49 | |||
50 | #define _COMPONENT ACPI_EVENTS | ||
51 | ACPI_MODULE_NAME("evmisc") | ||
52 | |||
53 | /* Local prototypes */ | ||
54 | static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context); | ||
55 | |||
56 | static u32 acpi_ev_global_lock_handler(void *context); | ||
57 | |||
58 | static acpi_status acpi_ev_remove_global_lock_handler(void); | ||
59 | |||
60 | /******************************************************************************* | ||
61 | * | ||
62 | * FUNCTION: acpi_ev_is_notify_object | ||
63 | * | ||
64 | * PARAMETERS: Node - Node to check | ||
65 | * | ||
66 | * RETURN: TRUE if notifies allowed on this object | ||
67 | * | ||
68 | * DESCRIPTION: Check type of node for a object that supports notifies. | ||
69 | * | ||
70 | * TBD: This could be replaced by a flag bit in the node. | ||
71 | * | ||
72 | ******************************************************************************/ | ||
73 | |||
74 | u8 acpi_ev_is_notify_object(struct acpi_namespace_node *node) | ||
75 | { | ||
76 | switch (node->type) { | ||
77 | case ACPI_TYPE_DEVICE: | ||
78 | case ACPI_TYPE_PROCESSOR: | ||
79 | case ACPI_TYPE_THERMAL: | ||
80 | /* | ||
81 | * These are the ONLY objects that can receive ACPI notifications | ||
82 | */ | ||
83 | return (TRUE); | ||
84 | |||
85 | default: | ||
86 | return (FALSE); | ||
87 | } | ||
88 | } | ||
89 | |||
90 | /******************************************************************************* | ||
91 | * | ||
92 | * FUNCTION: acpi_ev_queue_notify_request | ||
93 | * | ||
94 | * PARAMETERS: Node - NS node for the notified object | ||
95 | * notify_value - Value from the Notify() request | ||
96 | * | ||
97 | * RETURN: Status | ||
98 | * | ||
99 | * DESCRIPTION: Dispatch a device notification event to a previously | ||
100 | * installed handler. | ||
101 | * | ||
102 | ******************************************************************************/ | ||
103 | |||
104 | acpi_status | ||
105 | acpi_ev_queue_notify_request(struct acpi_namespace_node * node, | ||
106 | u32 notify_value) | ||
107 | { | ||
108 | union acpi_operand_object *obj_desc; | ||
109 | union acpi_operand_object *handler_obj = NULL; | ||
110 | union acpi_generic_state *notify_info; | ||
111 | acpi_status status = AE_OK; | ||
112 | |||
113 | ACPI_FUNCTION_NAME(ev_queue_notify_request); | ||
114 | |||
115 | /* | ||
116 | * For value 3 (Ejection Request), some device method may need to be run. | ||
117 | * For value 2 (Device Wake) if _PRW exists, the _PS0 method may need | ||
118 | * to be run. | ||
119 | * For value 0x80 (Status Change) on the power button or sleep button, | ||
120 | * initiate soft-off or sleep operation? | ||
121 | */ | ||
122 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
123 | "Dispatching Notify on [%4.4s] Node %p Value 0x%2.2X (%s)\n", | ||
124 | acpi_ut_get_node_name(node), node, notify_value, | ||
125 | acpi_ut_get_notify_name(notify_value))); | ||
126 | |||
127 | /* Get the notify object attached to the NS Node */ | ||
128 | |||
129 | obj_desc = acpi_ns_get_attached_object(node); | ||
130 | if (obj_desc) { | ||
131 | |||
132 | /* We have the notify object, Get the right handler */ | ||
133 | |||
134 | switch (node->type) { | ||
135 | |||
136 | /* Notify allowed only on these types */ | ||
137 | |||
138 | case ACPI_TYPE_DEVICE: | ||
139 | case ACPI_TYPE_THERMAL: | ||
140 | case ACPI_TYPE_PROCESSOR: | ||
141 | |||
142 | if (notify_value <= ACPI_MAX_SYS_NOTIFY) { | ||
143 | handler_obj = | ||
144 | obj_desc->common_notify.system_notify; | ||
145 | } else { | ||
146 | handler_obj = | ||
147 | obj_desc->common_notify.device_notify; | ||
148 | } | ||
149 | break; | ||
150 | |||
151 | default: | ||
152 | |||
153 | /* All other types are not supported */ | ||
154 | |||
155 | return (AE_TYPE); | ||
156 | } | ||
157 | } | ||
158 | |||
159 | /* | ||
160 | * If there is any handler to run, schedule the dispatcher. | ||
161 | * Check for: | ||
162 | * 1) Global system notify handler | ||
163 | * 2) Global device notify handler | ||
164 | * 3) Per-device notify handler | ||
165 | */ | ||
166 | if ((acpi_gbl_system_notify.handler | ||
167 | && (notify_value <= ACPI_MAX_SYS_NOTIFY)) | ||
168 | || (acpi_gbl_device_notify.handler | ||
169 | && (notify_value > ACPI_MAX_SYS_NOTIFY)) || handler_obj) { | ||
170 | notify_info = acpi_ut_create_generic_state(); | ||
171 | if (!notify_info) { | ||
172 | return (AE_NO_MEMORY); | ||
173 | } | ||
174 | |||
175 | if (!handler_obj) { | ||
176 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
177 | "Executing system notify handler for Notify (%4.4s, %X) node %p\n", | ||
178 | acpi_ut_get_node_name(node), | ||
179 | notify_value, node)); | ||
180 | } | ||
181 | |||
182 | notify_info->common.descriptor_type = | ||
183 | ACPI_DESC_TYPE_STATE_NOTIFY; | ||
184 | notify_info->notify.node = node; | ||
185 | notify_info->notify.value = (u16) notify_value; | ||
186 | notify_info->notify.handler_obj = handler_obj; | ||
187 | |||
188 | status = | ||
189 | acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_notify_dispatch, | ||
190 | notify_info); | ||
191 | if (ACPI_FAILURE(status)) { | ||
192 | acpi_ut_delete_generic_state(notify_info); | ||
193 | } | ||
194 | } else { | ||
195 | /* There is no notify handler (per-device or system) for this device */ | ||
196 | |||
197 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
198 | "No notify handler for Notify (%4.4s, %X) node %p\n", | ||
199 | acpi_ut_get_node_name(node), notify_value, | ||
200 | node)); | ||
201 | } | ||
202 | |||
203 | return (status); | ||
204 | } | ||
205 | |||
206 | /******************************************************************************* | ||
207 | * | ||
208 | * FUNCTION: acpi_ev_notify_dispatch | ||
209 | * | ||
210 | * PARAMETERS: Context - To be passed to the notify handler | ||
211 | * | ||
212 | * RETURN: None. | ||
213 | * | ||
214 | * DESCRIPTION: Dispatch a device notification event to a previously | ||
215 | * installed handler. | ||
216 | * | ||
217 | ******************************************************************************/ | ||
218 | |||
219 | static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context) | ||
220 | { | ||
221 | union acpi_generic_state *notify_info = | ||
222 | (union acpi_generic_state *)context; | ||
223 | acpi_notify_handler global_handler = NULL; | ||
224 | void *global_context = NULL; | ||
225 | union acpi_operand_object *handler_obj; | ||
226 | |||
227 | ACPI_FUNCTION_ENTRY(); | ||
228 | |||
229 | /* | ||
230 | * We will invoke a global notify handler if installed. This is done | ||
231 | * _before_ we invoke the per-device handler attached to the device. | ||
232 | */ | ||
233 | if (notify_info->notify.value <= ACPI_MAX_SYS_NOTIFY) { | ||
234 | |||
235 | /* Global system notification handler */ | ||
236 | |||
237 | if (acpi_gbl_system_notify.handler) { | ||
238 | global_handler = acpi_gbl_system_notify.handler; | ||
239 | global_context = acpi_gbl_system_notify.context; | ||
240 | } | ||
241 | } else { | ||
242 | /* Global driver notification handler */ | ||
243 | |||
244 | if (acpi_gbl_device_notify.handler) { | ||
245 | global_handler = acpi_gbl_device_notify.handler; | ||
246 | global_context = acpi_gbl_device_notify.context; | ||
247 | } | ||
248 | } | ||
249 | |||
250 | /* Invoke the system handler first, if present */ | ||
251 | |||
252 | if (global_handler) { | ||
253 | global_handler(notify_info->notify.node, | ||
254 | notify_info->notify.value, global_context); | ||
255 | } | ||
256 | |||
257 | /* Now invoke the per-device handler, if present */ | ||
258 | |||
259 | handler_obj = notify_info->notify.handler_obj; | ||
260 | if (handler_obj) { | ||
261 | handler_obj->notify.handler(notify_info->notify.node, | ||
262 | notify_info->notify.value, | ||
263 | handler_obj->notify.context); | ||
264 | } | ||
265 | |||
266 | /* All done with the info object */ | ||
267 | |||
268 | acpi_ut_delete_generic_state(notify_info); | ||
269 | } | ||
270 | |||
271 | /******************************************************************************* | ||
272 | * | ||
273 | * FUNCTION: acpi_ev_global_lock_handler | ||
274 | * | ||
275 | * PARAMETERS: Context - From thread interface, not used | ||
276 | * | ||
277 | * RETURN: ACPI_INTERRUPT_HANDLED | ||
278 | * | ||
279 | * DESCRIPTION: Invoked directly from the SCI handler when a global lock | ||
280 | * release interrupt occurs. Attempt to acquire the global lock, | ||
281 | * if successful, signal the thread waiting for the lock. | ||
282 | * | ||
283 | * NOTE: Assumes that the semaphore can be signaled from interrupt level. If | ||
284 | * this is not possible for some reason, a separate thread will have to be | ||
285 | * scheduled to do this. | ||
286 | * | ||
287 | ******************************************************************************/ | ||
288 | |||
289 | static u32 acpi_ev_global_lock_handler(void *context) | ||
290 | { | ||
291 | u8 acquired = FALSE; | ||
292 | |||
293 | /* | ||
294 | * Attempt to get the lock. | ||
295 | * | ||
296 | * If we don't get it now, it will be marked pending and we will | ||
297 | * take another interrupt when it becomes free. | ||
298 | */ | ||
299 | ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired); | ||
300 | if (acquired) { | ||
301 | |||
302 | /* Got the lock, now wake all threads waiting for it */ | ||
303 | |||
304 | acpi_gbl_global_lock_acquired = TRUE; | ||
305 | /* Send a unit to the semaphore */ | ||
306 | |||
307 | if (ACPI_FAILURE | ||
308 | (acpi_os_signal_semaphore | ||
309 | (acpi_gbl_global_lock_semaphore, 1))) { | ||
310 | ACPI_ERROR((AE_INFO, | ||
311 | "Could not signal Global Lock semaphore")); | ||
312 | } | ||
313 | } | ||
314 | |||
315 | return (ACPI_INTERRUPT_HANDLED); | ||
316 | } | ||
317 | |||
318 | /******************************************************************************* | ||
319 | * | ||
320 | * FUNCTION: acpi_ev_init_global_lock_handler | ||
321 | * | ||
322 | * PARAMETERS: None | ||
323 | * | ||
324 | * RETURN: Status | ||
325 | * | ||
326 | * DESCRIPTION: Install a handler for the global lock release event | ||
327 | * | ||
328 | ******************************************************************************/ | ||
329 | |||
330 | acpi_status acpi_ev_init_global_lock_handler(void) | ||
331 | { | ||
332 | acpi_status status; | ||
333 | |||
334 | ACPI_FUNCTION_TRACE(ev_init_global_lock_handler); | ||
335 | |||
336 | /* Attempt installation of the global lock handler */ | ||
337 | |||
338 | status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL, | ||
339 | acpi_ev_global_lock_handler, | ||
340 | NULL); | ||
341 | |||
342 | /* | ||
343 | * If the global lock does not exist on this platform, the attempt to | ||
344 | * enable GBL_STATUS will fail (the GBL_ENABLE bit will not stick). | ||
345 | * Map to AE_OK, but mark global lock as not present. Any attempt to | ||
346 | * actually use the global lock will be flagged with an error. | ||
347 | */ | ||
348 | if (status == AE_NO_HARDWARE_RESPONSE) { | ||
349 | ACPI_ERROR((AE_INFO, | ||
350 | "No response from Global Lock hardware, disabling lock")); | ||
351 | |||
352 | acpi_gbl_global_lock_present = FALSE; | ||
353 | return_ACPI_STATUS(AE_OK); | ||
354 | } | ||
355 | |||
356 | acpi_gbl_global_lock_present = TRUE; | ||
357 | return_ACPI_STATUS(status); | ||
358 | } | ||
359 | |||
360 | /******************************************************************************* | ||
361 | * | ||
362 | * FUNCTION: acpi_ev_remove_global_lock_handler | ||
363 | * | ||
364 | * PARAMETERS: None | ||
365 | * | ||
366 | * RETURN: Status | ||
367 | * | ||
368 | * DESCRIPTION: Remove the handler for the Global Lock | ||
369 | * | ||
370 | ******************************************************************************/ | ||
371 | |||
372 | static acpi_status acpi_ev_remove_global_lock_handler(void) | ||
373 | { | ||
374 | acpi_status status; | ||
375 | |||
376 | ACPI_FUNCTION_TRACE(ev_remove_global_lock_handler); | ||
377 | |||
378 | acpi_gbl_global_lock_present = FALSE; | ||
379 | status = acpi_remove_fixed_event_handler(ACPI_EVENT_GLOBAL, | ||
380 | acpi_ev_global_lock_handler); | ||
381 | |||
382 | return_ACPI_STATUS(status); | ||
383 | } | ||
384 | |||
385 | /****************************************************************************** | ||
386 | * | ||
387 | * FUNCTION: acpi_ev_acquire_global_lock | ||
388 | * | ||
389 | * PARAMETERS: Timeout - Max time to wait for the lock, in millisec. | ||
390 | * | ||
391 | * RETURN: Status | ||
392 | * | ||
393 | * DESCRIPTION: Attempt to gain ownership of the Global Lock. | ||
394 | * | ||
395 | * MUTEX: Interpreter must be locked | ||
396 | * | ||
397 | * Note: The original implementation allowed multiple threads to "acquire" the | ||
398 | * Global Lock, and the OS would hold the lock until the last thread had | ||
399 | * released it. However, this could potentially starve the BIOS out of the | ||
400 | * lock, especially in the case where there is a tight handshake between the | ||
401 | * Embedded Controller driver and the BIOS. Therefore, this implementation | ||
402 | * allows only one thread to acquire the HW Global Lock at a time, and makes | ||
403 | * the global lock appear as a standard mutex on the OS side. | ||
404 | * | ||
405 | *****************************************************************************/ | ||
406 | static acpi_thread_id acpi_ev_global_lock_thread_id; | ||
407 | static int acpi_ev_global_lock_acquired; | ||
408 | |||
409 | acpi_status acpi_ev_acquire_global_lock(u16 timeout) | ||
410 | { | ||
411 | acpi_status status = AE_OK; | ||
412 | u8 acquired = FALSE; | ||
413 | |||
414 | ACPI_FUNCTION_TRACE(ev_acquire_global_lock); | ||
415 | |||
416 | /* | ||
417 | * Only one thread can acquire the GL at a time, the global_lock_mutex | ||
418 | * enforces this. This interface releases the interpreter if we must wait. | ||
419 | */ | ||
420 | status = acpi_ex_system_wait_mutex( | ||
421 | acpi_gbl_global_lock_mutex->mutex.os_mutex, 0); | ||
422 | if (status == AE_TIME) { | ||
423 | if (acpi_ev_global_lock_thread_id == acpi_os_get_thread_id()) { | ||
424 | acpi_ev_global_lock_acquired++; | ||
425 | return AE_OK; | ||
426 | } | ||
427 | } | ||
428 | |||
429 | if (ACPI_FAILURE(status)) { | ||
430 | status = acpi_ex_system_wait_mutex( | ||
431 | acpi_gbl_global_lock_mutex->mutex.os_mutex, | ||
432 | timeout); | ||
433 | } | ||
434 | if (ACPI_FAILURE(status)) { | ||
435 | return_ACPI_STATUS(status); | ||
436 | } | ||
437 | |||
438 | acpi_ev_global_lock_thread_id = acpi_os_get_thread_id(); | ||
439 | acpi_ev_global_lock_acquired++; | ||
440 | |||
441 | /* | ||
442 | * Update the global lock handle and check for wraparound. The handle is | ||
443 | * only used for the external global lock interfaces, but it is updated | ||
444 | * here to properly handle the case where a single thread may acquire the | ||
445 | * lock via both the AML and the acpi_acquire_global_lock interfaces. The | ||
446 | * handle is therefore updated on the first acquire from a given thread | ||
447 | * regardless of where the acquisition request originated. | ||
448 | */ | ||
449 | acpi_gbl_global_lock_handle++; | ||
450 | if (acpi_gbl_global_lock_handle == 0) { | ||
451 | acpi_gbl_global_lock_handle = 1; | ||
452 | } | ||
453 | |||
454 | /* | ||
455 | * Make sure that a global lock actually exists. If not, just treat the | ||
456 | * lock as a standard mutex. | ||
457 | */ | ||
458 | if (!acpi_gbl_global_lock_present) { | ||
459 | acpi_gbl_global_lock_acquired = TRUE; | ||
460 | return_ACPI_STATUS(AE_OK); | ||
461 | } | ||
462 | |||
463 | /* Attempt to acquire the actual hardware lock */ | ||
464 | |||
465 | ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired); | ||
466 | if (acquired) { | ||
467 | |||
468 | /* We got the lock */ | ||
469 | |||
470 | ACPI_DEBUG_PRINT((ACPI_DB_EXEC, | ||
471 | "Acquired hardware Global Lock\n")); | ||
472 | |||
473 | acpi_gbl_global_lock_acquired = TRUE; | ||
474 | return_ACPI_STATUS(AE_OK); | ||
475 | } | ||
476 | |||
477 | /* | ||
478 | * Did not get the lock. The pending bit was set above, and we must now | ||
479 | * wait until we get the global lock released interrupt. | ||
480 | */ | ||
481 | ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Waiting for hardware Global Lock\n")); | ||
482 | |||
483 | /* | ||
484 | * Wait for handshake with the global lock interrupt handler. | ||
485 | * This interface releases the interpreter if we must wait. | ||
486 | */ | ||
487 | status = acpi_ex_system_wait_semaphore(acpi_gbl_global_lock_semaphore, | ||
488 | ACPI_WAIT_FOREVER); | ||
489 | |||
490 | return_ACPI_STATUS(status); | ||
491 | } | ||
492 | |||
493 | /******************************************************************************* | ||
494 | * | ||
495 | * FUNCTION: acpi_ev_release_global_lock | ||
496 | * | ||
497 | * PARAMETERS: None | ||
498 | * | ||
499 | * RETURN: Status | ||
500 | * | ||
501 | * DESCRIPTION: Releases ownership of the Global Lock. | ||
502 | * | ||
503 | ******************************************************************************/ | ||
504 | |||
505 | acpi_status acpi_ev_release_global_lock(void) | ||
506 | { | ||
507 | u8 pending = FALSE; | ||
508 | acpi_status status = AE_OK; | ||
509 | |||
510 | ACPI_FUNCTION_TRACE(ev_release_global_lock); | ||
511 | |||
512 | /* Lock must be already acquired */ | ||
513 | |||
514 | if (!acpi_gbl_global_lock_acquired) { | ||
515 | ACPI_WARNING((AE_INFO, | ||
516 | "Cannot release the ACPI Global Lock, it has not been acquired")); | ||
517 | return_ACPI_STATUS(AE_NOT_ACQUIRED); | ||
518 | } | ||
519 | |||
520 | acpi_ev_global_lock_acquired--; | ||
521 | if (acpi_ev_global_lock_acquired > 0) { | ||
522 | return AE_OK; | ||
523 | } | ||
524 | |||
525 | if (acpi_gbl_global_lock_present) { | ||
526 | |||
527 | /* Allow any thread to release the lock */ | ||
528 | |||
529 | ACPI_RELEASE_GLOBAL_LOCK(acpi_gbl_FACS, pending); | ||
530 | |||
531 | /* | ||
532 | * If the pending bit was set, we must write GBL_RLS to the control | ||
533 | * register | ||
534 | */ | ||
535 | if (pending) { | ||
536 | status = | ||
537 | acpi_set_register(ACPI_BITREG_GLOBAL_LOCK_RELEASE, | ||
538 | 1); | ||
539 | } | ||
540 | |||
541 | ACPI_DEBUG_PRINT((ACPI_DB_EXEC, | ||
542 | "Released hardware Global Lock\n")); | ||
543 | } | ||
544 | |||
545 | acpi_gbl_global_lock_acquired = FALSE; | ||
546 | |||
547 | /* Release the local GL mutex */ | ||
548 | acpi_ev_global_lock_thread_id = NULL; | ||
549 | acpi_ev_global_lock_acquired = 0; | ||
550 | acpi_os_release_mutex(acpi_gbl_global_lock_mutex->mutex.os_mutex); | ||
551 | return_ACPI_STATUS(status); | ||
552 | } | ||
553 | |||
554 | /****************************************************************************** | ||
555 | * | ||
556 | * FUNCTION: acpi_ev_terminate | ||
557 | * | ||
558 | * PARAMETERS: none | ||
559 | * | ||
560 | * RETURN: none | ||
561 | * | ||
562 | * DESCRIPTION: Disable events and free memory allocated for table storage. | ||
563 | * | ||
564 | ******************************************************************************/ | ||
565 | |||
566 | void acpi_ev_terminate(void) | ||
567 | { | ||
568 | u32 i; | ||
569 | acpi_status status; | ||
570 | |||
571 | ACPI_FUNCTION_TRACE(ev_terminate); | ||
572 | |||
573 | if (acpi_gbl_events_initialized) { | ||
574 | /* | ||
575 | * Disable all event-related functionality. In all cases, on error, | ||
576 | * print a message but obviously we don't abort. | ||
577 | */ | ||
578 | |||
579 | /* Disable all fixed events */ | ||
580 | |||
581 | for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) { | ||
582 | status = acpi_disable_event(i, 0); | ||
583 | if (ACPI_FAILURE(status)) { | ||
584 | ACPI_ERROR((AE_INFO, | ||
585 | "Could not disable fixed event %d", | ||
586 | (u32) i)); | ||
587 | } | ||
588 | } | ||
589 | |||
590 | /* Disable all GPEs in all GPE blocks */ | ||
591 | |||
592 | status = acpi_ev_walk_gpe_list(acpi_hw_disable_gpe_block, NULL); | ||
593 | |||
594 | /* Remove SCI handler */ | ||
595 | |||
596 | status = acpi_ev_remove_sci_handler(); | ||
597 | if (ACPI_FAILURE(status)) { | ||
598 | ACPI_ERROR((AE_INFO, "Could not remove SCI handler")); | ||
599 | } | ||
600 | |||
601 | status = acpi_ev_remove_global_lock_handler(); | ||
602 | if (ACPI_FAILURE(status)) { | ||
603 | ACPI_ERROR((AE_INFO, | ||
604 | "Could not remove Global Lock handler")); | ||
605 | } | ||
606 | } | ||
607 | |||
608 | /* Deallocate all handler objects installed within GPE info structs */ | ||
609 | |||
610 | status = acpi_ev_walk_gpe_list(acpi_ev_delete_gpe_handlers, NULL); | ||
611 | |||
612 | /* Return to original mode if necessary */ | ||
613 | |||
614 | if (acpi_gbl_original_mode == ACPI_SYS_MODE_LEGACY) { | ||
615 | status = acpi_disable(); | ||
616 | if (ACPI_FAILURE(status)) { | ||
617 | ACPI_WARNING((AE_INFO, "AcpiDisable failed")); | ||
618 | } | ||
619 | } | ||
620 | return_VOID; | ||
621 | } | ||
diff --git a/drivers/acpi/events/evregion.c b/drivers/acpi/events/evregion.c deleted file mode 100644 index 7346093f43ed..000000000000 --- a/drivers/acpi/events/evregion.c +++ /dev/null | |||
@@ -1,1070 +0,0 @@ | |||
1 | /****************************************************************************** | ||
2 | * | ||
3 | * Module Name: evregion - ACPI address_space (op_region) handler dispatch | ||
4 | * | ||
5 | *****************************************************************************/ | ||
6 | |||
7 | /* | ||
8 | * Copyright (C) 2000 - 2008, Intel Corp. | ||
9 | * All rights reserved. | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or without | ||
12 | * modification, are permitted provided that the following conditions | ||
13 | * are met: | ||
14 | * 1. Redistributions of source code must retain the above copyright | ||
15 | * notice, this list of conditions, and the following disclaimer, | ||
16 | * without modification. | ||
17 | * 2. Redistributions in binary form must reproduce at minimum a disclaimer | ||
18 | * substantially similar to the "NO WARRANTY" disclaimer below | ||
19 | * ("Disclaimer") and any redistribution must be conditioned upon | ||
20 | * including a substantially similar Disclaimer requirement for further | ||
21 | * binary redistribution. | ||
22 | * 3. Neither the names of the above-listed copyright holders nor the names | ||
23 | * of any contributors may be used to endorse or promote products derived | ||
24 | * from this software without specific prior written permission. | ||
25 | * | ||
26 | * Alternatively, this software may be distributed under the terms of the | ||
27 | * GNU General Public License ("GPL") version 2 as published by the Free | ||
28 | * Software Foundation. | ||
29 | * | ||
30 | * NO WARRANTY | ||
31 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
32 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
33 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR | ||
34 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
35 | * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
36 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | ||
37 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | ||
38 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, | ||
39 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING | ||
40 | * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
41 | * POSSIBILITY OF SUCH DAMAGES. | ||
42 | */ | ||
43 | |||
44 | #include <acpi/acpi.h> | ||
45 | #include <acpi/accommon.h> | ||
46 | #include <acpi/acevents.h> | ||
47 | #include <acpi/acnamesp.h> | ||
48 | #include <acpi/acinterp.h> | ||
49 | |||
50 | #define _COMPONENT ACPI_EVENTS | ||
51 | ACPI_MODULE_NAME("evregion") | ||
52 | |||
53 | /* Local prototypes */ | ||
54 | static acpi_status | ||
55 | acpi_ev_reg_run(acpi_handle obj_handle, | ||
56 | u32 level, void *context, void **return_value); | ||
57 | |||
58 | static acpi_status | ||
59 | acpi_ev_install_handler(acpi_handle obj_handle, | ||
60 | u32 level, void *context, void **return_value); | ||
61 | |||
62 | /* These are the address spaces that will get default handlers */ | ||
63 | |||
64 | #define ACPI_NUM_DEFAULT_SPACES 4 | ||
65 | |||
66 | static u8 acpi_gbl_default_address_spaces[ACPI_NUM_DEFAULT_SPACES] = { | ||
67 | ACPI_ADR_SPACE_SYSTEM_MEMORY, | ||
68 | ACPI_ADR_SPACE_SYSTEM_IO, | ||
69 | ACPI_ADR_SPACE_PCI_CONFIG, | ||
70 | ACPI_ADR_SPACE_DATA_TABLE | ||
71 | }; | ||
72 | |||
73 | /******************************************************************************* | ||
74 | * | ||
75 | * FUNCTION: acpi_ev_install_region_handlers | ||
76 | * | ||
77 | * PARAMETERS: None | ||
78 | * | ||
79 | * RETURN: Status | ||
80 | * | ||
81 | * DESCRIPTION: Installs the core subsystem default address space handlers. | ||
82 | * | ||
83 | ******************************************************************************/ | ||
84 | |||
85 | acpi_status acpi_ev_install_region_handlers(void) | ||
86 | { | ||
87 | acpi_status status; | ||
88 | u32 i; | ||
89 | |||
90 | ACPI_FUNCTION_TRACE(ev_install_region_handlers); | ||
91 | |||
92 | status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); | ||
93 | if (ACPI_FAILURE(status)) { | ||
94 | return_ACPI_STATUS(status); | ||
95 | } | ||
96 | |||
97 | /* | ||
98 | * All address spaces (PCI Config, EC, SMBus) are scope dependent and | ||
99 | * registration must occur for a specific device. | ||
100 | * | ||
101 | * In the case of the system memory and IO address spaces there is | ||
102 | * currently no device associated with the address space. For these we | ||
103 | * use the root. | ||
104 | * | ||
105 | * We install the default PCI config space handler at the root so that | ||
106 | * this space is immediately available even though the we have not | ||
107 | * enumerated all the PCI Root Buses yet. This is to conform to the ACPI | ||
108 | * specification which states that the PCI config space must be always | ||
109 | * available -- even though we are nowhere near ready to find the PCI root | ||
110 | * buses at this point. | ||
111 | * | ||
112 | * NOTE: We ignore AE_ALREADY_EXISTS because this means that a handler | ||
113 | * has already been installed (via acpi_install_address_space_handler). | ||
114 | * Similar for AE_SAME_HANDLER. | ||
115 | */ | ||
116 | for (i = 0; i < ACPI_NUM_DEFAULT_SPACES; i++) { | ||
117 | status = acpi_ev_install_space_handler(acpi_gbl_root_node, | ||
118 | acpi_gbl_default_address_spaces | ||
119 | [i], | ||
120 | ACPI_DEFAULT_HANDLER, | ||
121 | NULL, NULL); | ||
122 | switch (status) { | ||
123 | case AE_OK: | ||
124 | case AE_SAME_HANDLER: | ||
125 | case AE_ALREADY_EXISTS: | ||
126 | |||
127 | /* These exceptions are all OK */ | ||
128 | |||
129 | status = AE_OK; | ||
130 | break; | ||
131 | |||
132 | default: | ||
133 | |||
134 | goto unlock_and_exit; | ||
135 | } | ||
136 | } | ||
137 | |||
138 | unlock_and_exit: | ||
139 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); | ||
140 | return_ACPI_STATUS(status); | ||
141 | } | ||
142 | |||
143 | /******************************************************************************* | ||
144 | * | ||
145 | * FUNCTION: acpi_ev_initialize_op_regions | ||
146 | * | ||
147 | * PARAMETERS: None | ||
148 | * | ||
149 | * RETURN: Status | ||
150 | * | ||
151 | * DESCRIPTION: Execute _REG methods for all Operation Regions that have | ||
152 | * an installed default region handler. | ||
153 | * | ||
154 | ******************************************************************************/ | ||
155 | |||
156 | acpi_status acpi_ev_initialize_op_regions(void) | ||
157 | { | ||
158 | acpi_status status; | ||
159 | u32 i; | ||
160 | |||
161 | ACPI_FUNCTION_TRACE(ev_initialize_op_regions); | ||
162 | |||
163 | status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); | ||
164 | if (ACPI_FAILURE(status)) { | ||
165 | return_ACPI_STATUS(status); | ||
166 | } | ||
167 | |||
168 | /* Run the _REG methods for op_regions in each default address space */ | ||
169 | |||
170 | for (i = 0; i < ACPI_NUM_DEFAULT_SPACES; i++) { | ||
171 | /* | ||
172 | * TBD: Make sure handler is the DEFAULT handler, otherwise | ||
173 | * _REG will have already been run. | ||
174 | */ | ||
175 | status = acpi_ev_execute_reg_methods(acpi_gbl_root_node, | ||
176 | acpi_gbl_default_address_spaces | ||
177 | [i]); | ||
178 | } | ||
179 | |||
180 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); | ||
181 | return_ACPI_STATUS(status); | ||
182 | } | ||
183 | |||
184 | /******************************************************************************* | ||
185 | * | ||
186 | * FUNCTION: acpi_ev_execute_reg_method | ||
187 | * | ||
188 | * PARAMETERS: region_obj - Region object | ||
189 | * Function - Passed to _REG: On (1) or Off (0) | ||
190 | * | ||
191 | * RETURN: Status | ||
192 | * | ||
193 | * DESCRIPTION: Execute _REG method for a region | ||
194 | * | ||
195 | ******************************************************************************/ | ||
196 | |||
197 | acpi_status | ||
198 | acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function) | ||
199 | { | ||
200 | struct acpi_evaluate_info *info; | ||
201 | union acpi_operand_object *args[3]; | ||
202 | union acpi_operand_object *region_obj2; | ||
203 | acpi_status status; | ||
204 | |||
205 | ACPI_FUNCTION_TRACE(ev_execute_reg_method); | ||
206 | |||
207 | region_obj2 = acpi_ns_get_secondary_object(region_obj); | ||
208 | if (!region_obj2) { | ||
209 | return_ACPI_STATUS(AE_NOT_EXIST); | ||
210 | } | ||
211 | |||
212 | if (region_obj2->extra.method_REG == NULL) { | ||
213 | return_ACPI_STATUS(AE_OK); | ||
214 | } | ||
215 | |||
216 | /* Allocate and initialize the evaluation information block */ | ||
217 | |||
218 | info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info)); | ||
219 | if (!info) { | ||
220 | return_ACPI_STATUS(AE_NO_MEMORY); | ||
221 | } | ||
222 | |||
223 | info->prefix_node = region_obj2->extra.method_REG; | ||
224 | info->pathname = NULL; | ||
225 | info->parameters = args; | ||
226 | info->flags = ACPI_IGNORE_RETURN_VALUE; | ||
227 | |||
228 | /* | ||
229 | * The _REG method has two arguments: | ||
230 | * | ||
231 | * Arg0 - Integer: | ||
232 | * Operation region space ID Same value as region_obj->Region.space_id | ||
233 | * | ||
234 | * Arg1 - Integer: | ||
235 | * connection status 1 for connecting the handler, 0 for disconnecting | ||
236 | * the handler (Passed as a parameter) | ||
237 | */ | ||
238 | args[0] = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER); | ||
239 | if (!args[0]) { | ||
240 | status = AE_NO_MEMORY; | ||
241 | goto cleanup1; | ||
242 | } | ||
243 | |||
244 | args[1] = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER); | ||
245 | if (!args[1]) { | ||
246 | status = AE_NO_MEMORY; | ||
247 | goto cleanup2; | ||
248 | } | ||
249 | |||
250 | /* Setup the parameter objects */ | ||
251 | |||
252 | args[0]->integer.value = region_obj->region.space_id; | ||
253 | args[1]->integer.value = function; | ||
254 | args[2] = NULL; | ||
255 | |||
256 | /* Execute the method, no return value */ | ||
257 | |||
258 | ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname | ||
259 | (ACPI_TYPE_METHOD, info->prefix_node, NULL)); | ||
260 | |||
261 | status = acpi_ns_evaluate(info); | ||
262 | acpi_ut_remove_reference(args[1]); | ||
263 | |||
264 | cleanup2: | ||
265 | acpi_ut_remove_reference(args[0]); | ||
266 | |||
267 | cleanup1: | ||
268 | ACPI_FREE(info); | ||
269 | return_ACPI_STATUS(status); | ||
270 | } | ||
271 | |||
272 | /******************************************************************************* | ||
273 | * | ||
274 | * FUNCTION: acpi_ev_address_space_dispatch | ||
275 | * | ||
276 | * PARAMETERS: region_obj - Internal region object | ||
277 | * Function - Read or Write operation | ||
278 | * Address - Where in the space to read or write | ||
279 | * bit_width - Field width in bits (8, 16, 32, or 64) | ||
280 | * Value - Pointer to in or out value, must be | ||
281 | * full 64-bit acpi_integer | ||
282 | * | ||
283 | * RETURN: Status | ||
284 | * | ||
285 | * DESCRIPTION: Dispatch an address space or operation region access to | ||
286 | * a previously installed handler. | ||
287 | * | ||
288 | ******************************************************************************/ | ||
289 | |||
290 | acpi_status | ||
291 | acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, | ||
292 | u32 function, | ||
293 | acpi_physical_address address, | ||
294 | u32 bit_width, acpi_integer * value) | ||
295 | { | ||
296 | acpi_status status; | ||
297 | acpi_adr_space_handler handler; | ||
298 | acpi_adr_space_setup region_setup; | ||
299 | union acpi_operand_object *handler_desc; | ||
300 | union acpi_operand_object *region_obj2; | ||
301 | void *region_context = NULL; | ||
302 | |||
303 | ACPI_FUNCTION_TRACE(ev_address_space_dispatch); | ||
304 | |||
305 | region_obj2 = acpi_ns_get_secondary_object(region_obj); | ||
306 | if (!region_obj2) { | ||
307 | return_ACPI_STATUS(AE_NOT_EXIST); | ||
308 | } | ||
309 | |||
310 | /* Ensure that there is a handler associated with this region */ | ||
311 | |||
312 | handler_desc = region_obj->region.handler; | ||
313 | if (!handler_desc) { | ||
314 | ACPI_ERROR((AE_INFO, | ||
315 | "No handler for Region [%4.4s] (%p) [%s]", | ||
316 | acpi_ut_get_node_name(region_obj->region.node), | ||
317 | region_obj, | ||
318 | acpi_ut_get_region_name(region_obj->region. | ||
319 | space_id))); | ||
320 | |||
321 | return_ACPI_STATUS(AE_NOT_EXIST); | ||
322 | } | ||
323 | |||
324 | /* | ||
325 | * It may be the case that the region has never been initialized. | ||
326 | * Some types of regions require special init code | ||
327 | */ | ||
328 | if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE)) { | ||
329 | |||
330 | /* This region has not been initialized yet, do it */ | ||
331 | |||
332 | region_setup = handler_desc->address_space.setup; | ||
333 | if (!region_setup) { | ||
334 | |||
335 | /* No initialization routine, exit with error */ | ||
336 | |||
337 | ACPI_ERROR((AE_INFO, | ||
338 | "No init routine for region(%p) [%s]", | ||
339 | region_obj, | ||
340 | acpi_ut_get_region_name(region_obj->region. | ||
341 | space_id))); | ||
342 | return_ACPI_STATUS(AE_NOT_EXIST); | ||
343 | } | ||
344 | |||
345 | /* | ||
346 | * We must exit the interpreter because the region setup will | ||
347 | * potentially execute control methods (for example, the _REG method | ||
348 | * for this region) | ||
349 | */ | ||
350 | acpi_ex_exit_interpreter(); | ||
351 | |||
352 | status = region_setup(region_obj, ACPI_REGION_ACTIVATE, | ||
353 | handler_desc->address_space.context, | ||
354 | ®ion_context); | ||
355 | |||
356 | /* Re-enter the interpreter */ | ||
357 | |||
358 | acpi_ex_enter_interpreter(); | ||
359 | |||
360 | /* Check for failure of the Region Setup */ | ||
361 | |||
362 | if (ACPI_FAILURE(status)) { | ||
363 | ACPI_EXCEPTION((AE_INFO, status, | ||
364 | "During region initialization: [%s]", | ||
365 | acpi_ut_get_region_name(region_obj-> | ||
366 | region. | ||
367 | space_id))); | ||
368 | return_ACPI_STATUS(status); | ||
369 | } | ||
370 | |||
371 | /* Region initialization may have been completed by region_setup */ | ||
372 | |||
373 | if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE)) { | ||
374 | region_obj->region.flags |= AOPOBJ_SETUP_COMPLETE; | ||
375 | |||
376 | if (region_obj2->extra.region_context) { | ||
377 | |||
378 | /* The handler for this region was already installed */ | ||
379 | |||
380 | ACPI_FREE(region_context); | ||
381 | } else { | ||
382 | /* | ||
383 | * Save the returned context for use in all accesses to | ||
384 | * this particular region | ||
385 | */ | ||
386 | region_obj2->extra.region_context = | ||
387 | region_context; | ||
388 | } | ||
389 | } | ||
390 | } | ||
391 | |||
392 | /* We have everything we need, we can invoke the address space handler */ | ||
393 | |||
394 | handler = handler_desc->address_space.handler; | ||
395 | |||
396 | ACPI_DEBUG_PRINT((ACPI_DB_OPREGION, | ||
397 | "Handler %p (@%p) Address %8.8X%8.8X [%s]\n", | ||
398 | ®ion_obj->region.handler->address_space, handler, | ||
399 | ACPI_FORMAT_NATIVE_UINT(address), | ||
400 | acpi_ut_get_region_name(region_obj->region. | ||
401 | space_id))); | ||
402 | |||
403 | if (!(handler_desc->address_space.handler_flags & | ||
404 | ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) { | ||
405 | /* | ||
406 | * For handlers other than the default (supplied) handlers, we must | ||
407 | * exit the interpreter because the handler *might* block -- we don't | ||
408 | * know what it will do, so we can't hold the lock on the intepreter. | ||
409 | */ | ||
410 | acpi_ex_exit_interpreter(); | ||
411 | } | ||
412 | |||
413 | /* Call the handler */ | ||
414 | |||
415 | status = handler(function, address, bit_width, value, | ||
416 | handler_desc->address_space.context, | ||
417 | region_obj2->extra.region_context); | ||
418 | |||
419 | if (ACPI_FAILURE(status)) { | ||
420 | ACPI_EXCEPTION((AE_INFO, status, "Returned by Handler for [%s]", | ||
421 | acpi_ut_get_region_name(region_obj->region. | ||
422 | space_id))); | ||
423 | } | ||
424 | |||
425 | if (!(handler_desc->address_space.handler_flags & | ||
426 | ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) { | ||
427 | /* | ||
428 | * We just returned from a non-default handler, we must re-enter the | ||
429 | * interpreter | ||
430 | */ | ||
431 | acpi_ex_enter_interpreter(); | ||
432 | } | ||
433 | |||
434 | return_ACPI_STATUS(status); | ||
435 | } | ||
436 | |||
437 | /******************************************************************************* | ||
438 | * | ||
439 | * FUNCTION: acpi_ev_detach_region | ||
440 | * | ||
441 | * PARAMETERS: region_obj - Region Object | ||
442 | * acpi_ns_is_locked - Namespace Region Already Locked? | ||
443 | * | ||
444 | * RETURN: None | ||
445 | * | ||
446 | * DESCRIPTION: Break the association between the handler and the region | ||
447 | * this is a two way association. | ||
448 | * | ||
449 | ******************************************************************************/ | ||
450 | |||
451 | void | ||
452 | acpi_ev_detach_region(union acpi_operand_object *region_obj, | ||
453 | u8 acpi_ns_is_locked) | ||
454 | { | ||
455 | union acpi_operand_object *handler_obj; | ||
456 | union acpi_operand_object *obj_desc; | ||
457 | union acpi_operand_object **last_obj_ptr; | ||
458 | acpi_adr_space_setup region_setup; | ||
459 | void **region_context; | ||
460 | union acpi_operand_object *region_obj2; | ||
461 | acpi_status status; | ||
462 | |||
463 | ACPI_FUNCTION_TRACE(ev_detach_region); | ||
464 | |||
465 | region_obj2 = acpi_ns_get_secondary_object(region_obj); | ||
466 | if (!region_obj2) { | ||
467 | return_VOID; | ||
468 | } | ||
469 | region_context = ®ion_obj2->extra.region_context; | ||
470 | |||
471 | /* Get the address handler from the region object */ | ||
472 | |||
473 | handler_obj = region_obj->region.handler; | ||
474 | if (!handler_obj) { | ||
475 | |||
476 | /* This region has no handler, all done */ | ||
477 | |||
478 | return_VOID; | ||
479 | } | ||
480 | |||
481 | /* Find this region in the handler's list */ | ||
482 | |||
483 | obj_desc = handler_obj->address_space.region_list; | ||
484 | last_obj_ptr = &handler_obj->address_space.region_list; | ||
485 | |||
486 | while (obj_desc) { | ||
487 | |||
488 | /* Is this the correct Region? */ | ||
489 | |||
490 | if (obj_desc == region_obj) { | ||
491 | ACPI_DEBUG_PRINT((ACPI_DB_OPREGION, | ||
492 | "Removing Region %p from address handler %p\n", | ||
493 | region_obj, handler_obj)); | ||
494 | |||
495 | /* This is it, remove it from the handler's list */ | ||
496 | |||
497 | *last_obj_ptr = obj_desc->region.next; | ||
498 | obj_desc->region.next = NULL; /* Must clear field */ | ||
499 | |||
500 | if (acpi_ns_is_locked) { | ||
501 | status = | ||
502 | acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); | ||
503 | if (ACPI_FAILURE(status)) { | ||
504 | return_VOID; | ||
505 | } | ||
506 | } | ||
507 | |||
508 | /* Now stop region accesses by executing the _REG method */ | ||
509 | |||
510 | status = acpi_ev_execute_reg_method(region_obj, 0); | ||
511 | if (ACPI_FAILURE(status)) { | ||
512 | ACPI_EXCEPTION((AE_INFO, status, | ||
513 | "from region _REG, [%s]", | ||
514 | acpi_ut_get_region_name | ||
515 | (region_obj->region.space_id))); | ||
516 | } | ||
517 | |||
518 | if (acpi_ns_is_locked) { | ||
519 | status = | ||
520 | acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); | ||
521 | if (ACPI_FAILURE(status)) { | ||
522 | return_VOID; | ||
523 | } | ||
524 | } | ||
525 | |||
526 | /* | ||
527 | * If the region has been activated, call the setup handler with | ||
528 | * the deactivate notification | ||
529 | */ | ||
530 | if (region_obj->region.flags & AOPOBJ_SETUP_COMPLETE) { | ||
531 | region_setup = handler_obj->address_space.setup; | ||
532 | status = | ||
533 | region_setup(region_obj, | ||
534 | ACPI_REGION_DEACTIVATE, | ||
535 | handler_obj->address_space. | ||
536 | context, region_context); | ||
537 | |||
538 | /* Init routine may fail, Just ignore errors */ | ||
539 | |||
540 | if (ACPI_FAILURE(status)) { | ||
541 | ACPI_EXCEPTION((AE_INFO, status, | ||
542 | "from region handler - deactivate, [%s]", | ||
543 | acpi_ut_get_region_name | ||
544 | (region_obj->region. | ||
545 | space_id))); | ||
546 | } | ||
547 | |||
548 | region_obj->region.flags &= | ||
549 | ~(AOPOBJ_SETUP_COMPLETE); | ||
550 | } | ||
551 | |||
552 | /* | ||
553 | * Remove handler reference in the region | ||
554 | * | ||
555 | * NOTE: this doesn't mean that the region goes away, the region | ||
556 | * is just inaccessible as indicated to the _REG method | ||
557 | * | ||
558 | * If the region is on the handler's list, this must be the | ||
559 | * region's handler | ||
560 | */ | ||
561 | region_obj->region.handler = NULL; | ||
562 | acpi_ut_remove_reference(handler_obj); | ||
563 | |||
564 | return_VOID; | ||
565 | } | ||
566 | |||
567 | /* Walk the linked list of handlers */ | ||
568 | |||
569 | last_obj_ptr = &obj_desc->region.next; | ||
570 | obj_desc = obj_desc->region.next; | ||
571 | } | ||
572 | |||
573 | /* If we get here, the region was not in the handler's region list */ | ||
574 | |||
575 | ACPI_DEBUG_PRINT((ACPI_DB_OPREGION, | ||
576 | "Cannot remove region %p from address handler %p\n", | ||
577 | region_obj, handler_obj)); | ||
578 | |||
579 | return_VOID; | ||
580 | } | ||
581 | |||
582 | /******************************************************************************* | ||
583 | * | ||
584 | * FUNCTION: acpi_ev_attach_region | ||
585 | * | ||
586 | * PARAMETERS: handler_obj - Handler Object | ||
587 | * region_obj - Region Object | ||
588 | * acpi_ns_is_locked - Namespace Region Already Locked? | ||
589 | * | ||
590 | * RETURN: None | ||
591 | * | ||
592 | * DESCRIPTION: Create the association between the handler and the region | ||
593 | * this is a two way association. | ||
594 | * | ||
595 | ******************************************************************************/ | ||
596 | |||
597 | acpi_status | ||
598 | acpi_ev_attach_region(union acpi_operand_object *handler_obj, | ||
599 | union acpi_operand_object *region_obj, | ||
600 | u8 acpi_ns_is_locked) | ||
601 | { | ||
602 | |||
603 | ACPI_FUNCTION_TRACE(ev_attach_region); | ||
604 | |||
605 | ACPI_DEBUG_PRINT((ACPI_DB_OPREGION, | ||
606 | "Adding Region [%4.4s] %p to address handler %p [%s]\n", | ||
607 | acpi_ut_get_node_name(region_obj->region.node), | ||
608 | region_obj, handler_obj, | ||
609 | acpi_ut_get_region_name(region_obj->region. | ||
610 | space_id))); | ||
611 | |||
612 | /* Link this region to the front of the handler's list */ | ||
613 | |||
614 | region_obj->region.next = handler_obj->address_space.region_list; | ||
615 | handler_obj->address_space.region_list = region_obj; | ||
616 | |||
617 | /* Install the region's handler */ | ||
618 | |||
619 | if (region_obj->region.handler) { | ||
620 | return_ACPI_STATUS(AE_ALREADY_EXISTS); | ||
621 | } | ||
622 | |||
623 | region_obj->region.handler = handler_obj; | ||
624 | acpi_ut_add_reference(handler_obj); | ||
625 | |||
626 | return_ACPI_STATUS(AE_OK); | ||
627 | } | ||
628 | |||
629 | /******************************************************************************* | ||
630 | * | ||
631 | * FUNCTION: acpi_ev_install_handler | ||
632 | * | ||
633 | * PARAMETERS: walk_namespace callback | ||
634 | * | ||
635 | * DESCRIPTION: This routine installs an address handler into objects that are | ||
636 | * of type Region or Device. | ||
637 | * | ||
638 | * If the Object is a Device, and the device has a handler of | ||
639 | * the same type then the search is terminated in that branch. | ||
640 | * | ||
641 | * This is because the existing handler is closer in proximity | ||
642 | * to any more regions than the one we are trying to install. | ||
643 | * | ||
644 | ******************************************************************************/ | ||
645 | |||
646 | static acpi_status | ||
647 | acpi_ev_install_handler(acpi_handle obj_handle, | ||
648 | u32 level, void *context, void **return_value) | ||
649 | { | ||
650 | union acpi_operand_object *handler_obj; | ||
651 | union acpi_operand_object *next_handler_obj; | ||
652 | union acpi_operand_object *obj_desc; | ||
653 | struct acpi_namespace_node *node; | ||
654 | acpi_status status; | ||
655 | |||
656 | ACPI_FUNCTION_NAME(ev_install_handler); | ||
657 | |||
658 | handler_obj = (union acpi_operand_object *)context; | ||
659 | |||
660 | /* Parameter validation */ | ||
661 | |||
662 | if (!handler_obj) { | ||
663 | return (AE_OK); | ||
664 | } | ||
665 | |||
666 | /* Convert and validate the device handle */ | ||
667 | |||
668 | node = acpi_ns_map_handle_to_node(obj_handle); | ||
669 | if (!node) { | ||
670 | return (AE_BAD_PARAMETER); | ||
671 | } | ||
672 | |||
673 | /* | ||
674 | * We only care about regions and objects that are allowed to have | ||
675 | * address space handlers | ||
676 | */ | ||
677 | if ((node->type != ACPI_TYPE_DEVICE) && | ||
678 | (node->type != ACPI_TYPE_REGION) && (node != acpi_gbl_root_node)) { | ||
679 | return (AE_OK); | ||
680 | } | ||
681 | |||
682 | /* Check for an existing internal object */ | ||
683 | |||
684 | obj_desc = acpi_ns_get_attached_object(node); | ||
685 | if (!obj_desc) { | ||
686 | |||
687 | /* No object, just exit */ | ||
688 | |||
689 | return (AE_OK); | ||
690 | } | ||
691 | |||
692 | /* Devices are handled different than regions */ | ||
693 | |||
694 | if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_DEVICE) { | ||
695 | |||
696 | /* Check if this Device already has a handler for this address space */ | ||
697 | |||
698 | next_handler_obj = obj_desc->device.handler; | ||
699 | while (next_handler_obj) { | ||
700 | |||
701 | /* Found a handler, is it for the same address space? */ | ||
702 | |||
703 | if (next_handler_obj->address_space.space_id == | ||
704 | handler_obj->address_space.space_id) { | ||
705 | ACPI_DEBUG_PRINT((ACPI_DB_OPREGION, | ||
706 | "Found handler for region [%s] in device %p(%p) handler %p\n", | ||
707 | acpi_ut_get_region_name | ||
708 | (handler_obj->address_space. | ||
709 | space_id), obj_desc, | ||
710 | next_handler_obj, | ||
711 | handler_obj)); | ||
712 | |||
713 | /* | ||
714 | * Since the object we found it on was a device, then it | ||
715 | * means that someone has already installed a handler for | ||
716 | * the branch of the namespace from this device on. Just | ||
717 | * bail out telling the walk routine to not traverse this | ||
718 | * branch. This preserves the scoping rule for handlers. | ||
719 | */ | ||
720 | return (AE_CTRL_DEPTH); | ||
721 | } | ||
722 | |||
723 | /* Walk the linked list of handlers attached to this device */ | ||
724 | |||
725 | next_handler_obj = next_handler_obj->address_space.next; | ||
726 | } | ||
727 | |||
728 | /* | ||
729 | * As long as the device didn't have a handler for this space we | ||
730 | * don't care about it. We just ignore it and proceed. | ||
731 | */ | ||
732 | return (AE_OK); | ||
733 | } | ||
734 | |||
735 | /* Object is a Region */ | ||
736 | |||
737 | if (obj_desc->region.space_id != handler_obj->address_space.space_id) { | ||
738 | |||
739 | /* This region is for a different address space, just ignore it */ | ||
740 | |||
741 | return (AE_OK); | ||
742 | } | ||
743 | |||
744 | /* | ||
745 | * Now we have a region and it is for the handler's address space type. | ||
746 | * | ||
747 | * First disconnect region for any previous handler (if any) | ||
748 | */ | ||
749 | acpi_ev_detach_region(obj_desc, FALSE); | ||
750 | |||
751 | /* Connect the region to the new handler */ | ||
752 | |||
753 | status = acpi_ev_attach_region(handler_obj, obj_desc, FALSE); | ||
754 | return (status); | ||
755 | } | ||
756 | |||
757 | /******************************************************************************* | ||
758 | * | ||
759 | * FUNCTION: acpi_ev_install_space_handler | ||
760 | * | ||
761 | * PARAMETERS: Node - Namespace node for the device | ||
762 | * space_id - The address space ID | ||
763 | * Handler - Address of the handler | ||
764 | * Setup - Address of the setup function | ||
765 | * Context - Value passed to the handler on each access | ||
766 | * | ||
767 | * RETURN: Status | ||
768 | * | ||
769 | * DESCRIPTION: Install a handler for all op_regions of a given space_id. | ||
770 | * Assumes namespace is locked | ||
771 | * | ||
772 | ******************************************************************************/ | ||
773 | |||
774 | acpi_status | ||
775 | acpi_ev_install_space_handler(struct acpi_namespace_node * node, | ||
776 | acpi_adr_space_type space_id, | ||
777 | acpi_adr_space_handler handler, | ||
778 | acpi_adr_space_setup setup, void *context) | ||
779 | { | ||
780 | union acpi_operand_object *obj_desc; | ||
781 | union acpi_operand_object *handler_obj; | ||
782 | acpi_status status; | ||
783 | acpi_object_type type; | ||
784 | u8 flags = 0; | ||
785 | |||
786 | ACPI_FUNCTION_TRACE(ev_install_space_handler); | ||
787 | |||
788 | /* | ||
789 | * This registration is valid for only the types below and the root. This | ||
790 | * is where the default handlers get placed. | ||
791 | */ | ||
792 | if ((node->type != ACPI_TYPE_DEVICE) && | ||
793 | (node->type != ACPI_TYPE_PROCESSOR) && | ||
794 | (node->type != ACPI_TYPE_THERMAL) && (node != acpi_gbl_root_node)) { | ||
795 | status = AE_BAD_PARAMETER; | ||
796 | goto unlock_and_exit; | ||
797 | } | ||
798 | |||
799 | if (handler == ACPI_DEFAULT_HANDLER) { | ||
800 | flags = ACPI_ADDR_HANDLER_DEFAULT_INSTALLED; | ||
801 | |||
802 | switch (space_id) { | ||
803 | case ACPI_ADR_SPACE_SYSTEM_MEMORY: | ||
804 | handler = acpi_ex_system_memory_space_handler; | ||
805 | setup = acpi_ev_system_memory_region_setup; | ||
806 | break; | ||
807 | |||
808 | case ACPI_ADR_SPACE_SYSTEM_IO: | ||
809 | handler = acpi_ex_system_io_space_handler; | ||
810 | setup = acpi_ev_io_space_region_setup; | ||
811 | break; | ||
812 | |||
813 | case ACPI_ADR_SPACE_PCI_CONFIG: | ||
814 | handler = acpi_ex_pci_config_space_handler; | ||
815 | setup = acpi_ev_pci_config_region_setup; | ||
816 | break; | ||
817 | |||
818 | case ACPI_ADR_SPACE_CMOS: | ||
819 | handler = acpi_ex_cmos_space_handler; | ||
820 | setup = acpi_ev_cmos_region_setup; | ||
821 | break; | ||
822 | |||
823 | case ACPI_ADR_SPACE_PCI_BAR_TARGET: | ||
824 | handler = acpi_ex_pci_bar_space_handler; | ||
825 | setup = acpi_ev_pci_bar_region_setup; | ||
826 | break; | ||
827 | |||
828 | case ACPI_ADR_SPACE_DATA_TABLE: | ||
829 | handler = acpi_ex_data_table_space_handler; | ||
830 | setup = NULL; | ||
831 | break; | ||
832 | |||
833 | default: | ||
834 | status = AE_BAD_PARAMETER; | ||
835 | goto unlock_and_exit; | ||
836 | } | ||
837 | } | ||
838 | |||
839 | /* If the caller hasn't specified a setup routine, use the default */ | ||
840 | |||
841 | if (!setup) { | ||
842 | setup = acpi_ev_default_region_setup; | ||
843 | } | ||
844 | |||
845 | /* Check for an existing internal object */ | ||
846 | |||
847 | obj_desc = acpi_ns_get_attached_object(node); | ||
848 | if (obj_desc) { | ||
849 | /* | ||
850 | * The attached device object already exists. Make sure the handler | ||
851 | * is not already installed. | ||
852 | */ | ||
853 | handler_obj = obj_desc->device.handler; | ||
854 | |||
855 | /* Walk the handler list for this device */ | ||
856 | |||
857 | while (handler_obj) { | ||
858 | |||
859 | /* Same space_id indicates a handler already installed */ | ||
860 | |||
861 | if (handler_obj->address_space.space_id == space_id) { | ||
862 | if (handler_obj->address_space.handler == | ||
863 | handler) { | ||
864 | /* | ||
865 | * It is (relatively) OK to attempt to install the SAME | ||
866 | * handler twice. This can easily happen with the | ||
867 | * PCI_Config space. | ||
868 | */ | ||
869 | status = AE_SAME_HANDLER; | ||
870 | goto unlock_and_exit; | ||
871 | } else { | ||
872 | /* A handler is already installed */ | ||
873 | |||
874 | status = AE_ALREADY_EXISTS; | ||
875 | } | ||
876 | goto unlock_and_exit; | ||
877 | } | ||
878 | |||
879 | /* Walk the linked list of handlers */ | ||
880 | |||
881 | handler_obj = handler_obj->address_space.next; | ||
882 | } | ||
883 | } else { | ||
884 | ACPI_DEBUG_PRINT((ACPI_DB_OPREGION, | ||
885 | "Creating object on Device %p while installing handler\n", | ||
886 | node)); | ||
887 | |||
888 | /* obj_desc does not exist, create one */ | ||
889 | |||
890 | if (node->type == ACPI_TYPE_ANY) { | ||
891 | type = ACPI_TYPE_DEVICE; | ||
892 | } else { | ||
893 | type = node->type; | ||
894 | } | ||
895 | |||
896 | obj_desc = acpi_ut_create_internal_object(type); | ||
897 | if (!obj_desc) { | ||
898 | status = AE_NO_MEMORY; | ||
899 | goto unlock_and_exit; | ||
900 | } | ||
901 | |||
902 | /* Init new descriptor */ | ||
903 | |||
904 | obj_desc->common.type = (u8) type; | ||
905 | |||
906 | /* Attach the new object to the Node */ | ||
907 | |||
908 | status = acpi_ns_attach_object(node, obj_desc, type); | ||
909 | |||
910 | /* Remove local reference to the object */ | ||
911 | |||
912 | acpi_ut_remove_reference(obj_desc); | ||
913 | |||
914 | if (ACPI_FAILURE(status)) { | ||
915 | goto unlock_and_exit; | ||
916 | } | ||
917 | } | ||
918 | |||
919 | ACPI_DEBUG_PRINT((ACPI_DB_OPREGION, | ||
920 | "Installing address handler for region %s(%X) on Device %4.4s %p(%p)\n", | ||
921 | acpi_ut_get_region_name(space_id), space_id, | ||
922 | acpi_ut_get_node_name(node), node, obj_desc)); | ||
923 | |||
924 | /* | ||
925 | * Install the handler | ||
926 | * | ||
927 | * At this point there is no existing handler. Just allocate the object | ||
928 | * for the handler and link it into the list. | ||
929 | */ | ||
930 | handler_obj = | ||
931 | acpi_ut_create_internal_object(ACPI_TYPE_LOCAL_ADDRESS_HANDLER); | ||
932 | if (!handler_obj) { | ||
933 | status = AE_NO_MEMORY; | ||
934 | goto unlock_and_exit; | ||
935 | } | ||
936 | |||
937 | /* Init handler obj */ | ||
938 | |||
939 | handler_obj->address_space.space_id = (u8) space_id; | ||
940 | handler_obj->address_space.handler_flags = flags; | ||
941 | handler_obj->address_space.region_list = NULL; | ||
942 | handler_obj->address_space.node = node; | ||
943 | handler_obj->address_space.handler = handler; | ||
944 | handler_obj->address_space.context = context; | ||
945 | handler_obj->address_space.setup = setup; | ||
946 | |||
947 | /* Install at head of Device.address_space list */ | ||
948 | |||
949 | handler_obj->address_space.next = obj_desc->device.handler; | ||
950 | |||
951 | /* | ||
952 | * The Device object is the first reference on the handler_obj. | ||
953 | * Each region that uses the handler adds a reference. | ||
954 | */ | ||
955 | obj_desc->device.handler = handler_obj; | ||
956 | |||
957 | /* | ||
958 | * Walk the namespace finding all of the regions this | ||
959 | * handler will manage. | ||
960 | * | ||
961 | * Start at the device and search the branch toward | ||
962 | * the leaf nodes until either the leaf is encountered or | ||
963 | * a device is detected that has an address handler of the | ||
964 | * same type. | ||
965 | * | ||
966 | * In either case, back up and search down the remainder | ||
967 | * of the branch | ||
968 | */ | ||
969 | status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, ACPI_UINT32_MAX, | ||
970 | ACPI_NS_WALK_UNLOCK, | ||
971 | acpi_ev_install_handler, handler_obj, | ||
972 | NULL); | ||
973 | |||
974 | unlock_and_exit: | ||
975 | return_ACPI_STATUS(status); | ||
976 | } | ||
977 | |||
978 | /******************************************************************************* | ||
979 | * | ||
980 | * FUNCTION: acpi_ev_execute_reg_methods | ||
981 | * | ||
982 | * PARAMETERS: Node - Namespace node for the device | ||
983 | * space_id - The address space ID | ||
984 | * | ||
985 | * RETURN: Status | ||
986 | * | ||
987 | * DESCRIPTION: Run all _REG methods for the input Space ID; | ||
988 | * Note: assumes namespace is locked, or system init time. | ||
989 | * | ||
990 | ******************************************************************************/ | ||
991 | |||
992 | acpi_status | ||
993 | acpi_ev_execute_reg_methods(struct acpi_namespace_node *node, | ||
994 | acpi_adr_space_type space_id) | ||
995 | { | ||
996 | acpi_status status; | ||
997 | |||
998 | ACPI_FUNCTION_TRACE(ev_execute_reg_methods); | ||
999 | |||
1000 | /* | ||
1001 | * Run all _REG methods for all Operation Regions for this space ID. This | ||
1002 | * is a separate walk in order to handle any interdependencies between | ||
1003 | * regions and _REG methods. (i.e. handlers must be installed for all | ||
1004 | * regions of this Space ID before we can run any _REG methods) | ||
1005 | */ | ||
1006 | status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, ACPI_UINT32_MAX, | ||
1007 | ACPI_NS_WALK_UNLOCK, acpi_ev_reg_run, | ||
1008 | &space_id, NULL); | ||
1009 | |||
1010 | return_ACPI_STATUS(status); | ||
1011 | } | ||
1012 | |||
1013 | /******************************************************************************* | ||
1014 | * | ||
1015 | * FUNCTION: acpi_ev_reg_run | ||
1016 | * | ||
1017 | * PARAMETERS: walk_namespace callback | ||
1018 | * | ||
1019 | * DESCRIPTION: Run _REG method for region objects of the requested space_iD | ||
1020 | * | ||
1021 | ******************************************************************************/ | ||
1022 | |||
1023 | static acpi_status | ||
1024 | acpi_ev_reg_run(acpi_handle obj_handle, | ||
1025 | u32 level, void *context, void **return_value) | ||
1026 | { | ||
1027 | union acpi_operand_object *obj_desc; | ||
1028 | struct acpi_namespace_node *node; | ||
1029 | acpi_adr_space_type space_id; | ||
1030 | acpi_status status; | ||
1031 | |||
1032 | space_id = *ACPI_CAST_PTR(acpi_adr_space_type, context); | ||
1033 | |||
1034 | /* Convert and validate the device handle */ | ||
1035 | |||
1036 | node = acpi_ns_map_handle_to_node(obj_handle); | ||
1037 | if (!node) { | ||
1038 | return (AE_BAD_PARAMETER); | ||
1039 | } | ||
1040 | |||
1041 | /* | ||
1042 | * We only care about regions.and objects that are allowed to have address | ||
1043 | * space handlers | ||
1044 | */ | ||
1045 | if ((node->type != ACPI_TYPE_REGION) && (node != acpi_gbl_root_node)) { | ||
1046 | return (AE_OK); | ||
1047 | } | ||
1048 | |||
1049 | /* Check for an existing internal object */ | ||
1050 | |||
1051 | obj_desc = acpi_ns_get_attached_object(node); | ||
1052 | if (!obj_desc) { | ||
1053 | |||
1054 | /* No object, just exit */ | ||
1055 | |||
1056 | return (AE_OK); | ||
1057 | } | ||
1058 | |||
1059 | /* Object is a Region */ | ||
1060 | |||
1061 | if (obj_desc->region.space_id != space_id) { | ||
1062 | |||
1063 | /* This region is for a different address space, just ignore it */ | ||
1064 | |||
1065 | return (AE_OK); | ||
1066 | } | ||
1067 | |||
1068 | status = acpi_ev_execute_reg_method(obj_desc, 1); | ||
1069 | return (status); | ||
1070 | } | ||
diff --git a/drivers/acpi/events/evrgnini.c b/drivers/acpi/events/evrgnini.c deleted file mode 100644 index 1b7f9fdbef15..000000000000 --- a/drivers/acpi/events/evrgnini.c +++ /dev/null | |||
@@ -1,684 +0,0 @@ | |||
1 | /****************************************************************************** | ||
2 | * | ||
3 | * Module Name: evrgnini- ACPI address_space (op_region) init | ||
4 | * | ||
5 | *****************************************************************************/ | ||
6 | |||
7 | /* | ||
8 | * Copyright (C) 2000 - 2008, Intel Corp. | ||
9 | * All rights reserved. | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or without | ||
12 | * modification, are permitted provided that the following conditions | ||
13 | * are met: | ||
14 | * 1. Redistributions of source code must retain the above copyright | ||
15 | * notice, this list of conditions, and the following disclaimer, | ||
16 | * without modification. | ||
17 | * 2. Redistributions in binary form must reproduce at minimum a disclaimer | ||
18 | * substantially similar to the "NO WARRANTY" disclaimer below | ||
19 | * ("Disclaimer") and any redistribution must be conditioned upon | ||
20 | * including a substantially similar Disclaimer requirement for further | ||
21 | * binary redistribution. | ||
22 | * 3. Neither the names of the above-listed copyright holders nor the names | ||
23 | * of any contributors may be used to endorse or promote products derived | ||
24 | * from this software without specific prior written permission. | ||
25 | * | ||
26 | * Alternatively, this software may be distributed under the terms of the | ||
27 | * GNU General Public License ("GPL") version 2 as published by the Free | ||
28 | * Software Foundation. | ||
29 | * | ||
30 | * NO WARRANTY | ||
31 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
32 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
33 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR | ||
34 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
35 | * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
36 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | ||
37 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | ||
38 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, | ||
39 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING | ||
40 | * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
41 | * POSSIBILITY OF SUCH DAMAGES. | ||
42 | */ | ||
43 | |||
44 | #include <acpi/acpi.h> | ||
45 | #include <acpi/accommon.h> | ||
46 | #include <acpi/acevents.h> | ||
47 | #include <acpi/acnamesp.h> | ||
48 | |||
49 | #define _COMPONENT ACPI_EVENTS | ||
50 | ACPI_MODULE_NAME("evrgnini") | ||
51 | |||
52 | /* Local prototypes */ | ||
53 | static u8 acpi_ev_match_pci_root_bridge(char *id); | ||
54 | |||
55 | static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node); | ||
56 | |||
57 | /******************************************************************************* | ||
58 | * | ||
59 | * FUNCTION: acpi_ev_system_memory_region_setup | ||
60 | * | ||
61 | * PARAMETERS: Handle - Region we are interested in | ||
62 | * Function - Start or stop | ||
63 | * handler_context - Address space handler context | ||
64 | * region_context - Region specific context | ||
65 | * | ||
66 | * RETURN: Status | ||
67 | * | ||
68 | * DESCRIPTION: Setup a system_memory operation region | ||
69 | * | ||
70 | ******************************************************************************/ | ||
71 | |||
72 | acpi_status | ||
73 | acpi_ev_system_memory_region_setup(acpi_handle handle, | ||
74 | u32 function, | ||
75 | void *handler_context, void **region_context) | ||
76 | { | ||
77 | union acpi_operand_object *region_desc = | ||
78 | (union acpi_operand_object *)handle; | ||
79 | struct acpi_mem_space_context *local_region_context; | ||
80 | |||
81 | ACPI_FUNCTION_TRACE(ev_system_memory_region_setup); | ||
82 | |||
83 | if (function == ACPI_REGION_DEACTIVATE) { | ||
84 | if (*region_context) { | ||
85 | local_region_context = | ||
86 | (struct acpi_mem_space_context *)*region_context; | ||
87 | |||
88 | /* Delete a cached mapping if present */ | ||
89 | |||
90 | if (local_region_context->mapped_length) { | ||
91 | acpi_os_unmap_memory(local_region_context-> | ||
92 | mapped_logical_address, | ||
93 | local_region_context-> | ||
94 | mapped_length); | ||
95 | } | ||
96 | ACPI_FREE(local_region_context); | ||
97 | *region_context = NULL; | ||
98 | } | ||
99 | return_ACPI_STATUS(AE_OK); | ||
100 | } | ||
101 | |||
102 | /* Create a new context */ | ||
103 | |||
104 | local_region_context = | ||
105 | ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_mem_space_context)); | ||
106 | if (!(local_region_context)) { | ||
107 | return_ACPI_STATUS(AE_NO_MEMORY); | ||
108 | } | ||
109 | |||
110 | /* Save the region length and address for use in the handler */ | ||
111 | |||
112 | local_region_context->length = region_desc->region.length; | ||
113 | local_region_context->address = region_desc->region.address; | ||
114 | |||
115 | *region_context = local_region_context; | ||
116 | return_ACPI_STATUS(AE_OK); | ||
117 | } | ||
118 | |||
119 | /******************************************************************************* | ||
120 | * | ||
121 | * FUNCTION: acpi_ev_io_space_region_setup | ||
122 | * | ||
123 | * PARAMETERS: Handle - Region we are interested in | ||
124 | * Function - Start or stop | ||
125 | * handler_context - Address space handler context | ||
126 | * region_context - Region specific context | ||
127 | * | ||
128 | * RETURN: Status | ||
129 | * | ||
130 | * DESCRIPTION: Setup a IO operation region | ||
131 | * | ||
132 | ******************************************************************************/ | ||
133 | |||
134 | acpi_status | ||
135 | acpi_ev_io_space_region_setup(acpi_handle handle, | ||
136 | u32 function, | ||
137 | void *handler_context, void **region_context) | ||
138 | { | ||
139 | ACPI_FUNCTION_TRACE(ev_io_space_region_setup); | ||
140 | |||
141 | if (function == ACPI_REGION_DEACTIVATE) { | ||
142 | *region_context = NULL; | ||
143 | } else { | ||
144 | *region_context = handler_context; | ||
145 | } | ||
146 | |||
147 | return_ACPI_STATUS(AE_OK); | ||
148 | } | ||
149 | |||
150 | /******************************************************************************* | ||
151 | * | ||
152 | * FUNCTION: acpi_ev_pci_config_region_setup | ||
153 | * | ||
154 | * PARAMETERS: Handle - Region we are interested in | ||
155 | * Function - Start or stop | ||
156 | * handler_context - Address space handler context | ||
157 | * region_context - Region specific context | ||
158 | * | ||
159 | * RETURN: Status | ||
160 | * | ||
161 | * DESCRIPTION: Setup a PCI_Config operation region | ||
162 | * | ||
163 | * MUTEX: Assumes namespace is not locked | ||
164 | * | ||
165 | ******************************************************************************/ | ||
166 | |||
167 | acpi_status | ||
168 | acpi_ev_pci_config_region_setup(acpi_handle handle, | ||
169 | u32 function, | ||
170 | void *handler_context, void **region_context) | ||
171 | { | ||
172 | acpi_status status = AE_OK; | ||
173 | acpi_integer pci_value; | ||
174 | struct acpi_pci_id *pci_id = *region_context; | ||
175 | union acpi_operand_object *handler_obj; | ||
176 | struct acpi_namespace_node *parent_node; | ||
177 | struct acpi_namespace_node *pci_root_node; | ||
178 | struct acpi_namespace_node *pci_device_node; | ||
179 | union acpi_operand_object *region_obj = | ||
180 | (union acpi_operand_object *)handle; | ||
181 | |||
182 | ACPI_FUNCTION_TRACE(ev_pci_config_region_setup); | ||
183 | |||
184 | handler_obj = region_obj->region.handler; | ||
185 | if (!handler_obj) { | ||
186 | /* | ||
187 | * No installed handler. This shouldn't happen because the dispatch | ||
188 | * routine checks before we get here, but we check again just in case. | ||
189 | */ | ||
190 | ACPI_DEBUG_PRINT((ACPI_DB_OPREGION, | ||
191 | "Attempting to init a region %p, with no handler\n", | ||
192 | region_obj)); | ||
193 | return_ACPI_STATUS(AE_NOT_EXIST); | ||
194 | } | ||
195 | |||
196 | *region_context = NULL; | ||
197 | if (function == ACPI_REGION_DEACTIVATE) { | ||
198 | if (pci_id) { | ||
199 | ACPI_FREE(pci_id); | ||
200 | } | ||
201 | return_ACPI_STATUS(status); | ||
202 | } | ||
203 | |||
204 | parent_node = acpi_ns_get_parent_node(region_obj->region.node); | ||
205 | |||
206 | /* | ||
207 | * Get the _SEG and _BBN values from the device upon which the handler | ||
208 | * is installed. | ||
209 | * | ||
210 | * We need to get the _SEG and _BBN objects relative to the PCI BUS device. | ||
211 | * This is the device the handler has been registered to handle. | ||
212 | */ | ||
213 | |||
214 | /* | ||
215 | * If the address_space.Node is still pointing to the root, we need | ||
216 | * to scan upward for a PCI Root bridge and re-associate the op_region | ||
217 | * handlers with that device. | ||
218 | */ | ||
219 | if (handler_obj->address_space.node == acpi_gbl_root_node) { | ||
220 | |||
221 | /* Start search from the parent object */ | ||
222 | |||
223 | pci_root_node = parent_node; | ||
224 | while (pci_root_node != acpi_gbl_root_node) { | ||
225 | |||
226 | /* Get the _HID/_CID in order to detect a root_bridge */ | ||
227 | |||
228 | if (acpi_ev_is_pci_root_bridge(pci_root_node)) { | ||
229 | |||
230 | /* Install a handler for this PCI root bridge */ | ||
231 | |||
232 | status = | ||
233 | acpi_install_address_space_handler((acpi_handle) pci_root_node, ACPI_ADR_SPACE_PCI_CONFIG, ACPI_DEFAULT_HANDLER, NULL, NULL); | ||
234 | if (ACPI_FAILURE(status)) { | ||
235 | if (status == AE_SAME_HANDLER) { | ||
236 | /* | ||
237 | * It is OK if the handler is already installed on the | ||
238 | * root bridge. Still need to return a context object | ||
239 | * for the new PCI_Config operation region, however. | ||
240 | */ | ||
241 | status = AE_OK; | ||
242 | } else { | ||
243 | ACPI_EXCEPTION((AE_INFO, status, | ||
244 | "Could not install PciConfig handler for Root Bridge %4.4s", | ||
245 | acpi_ut_get_node_name | ||
246 | (pci_root_node))); | ||
247 | } | ||
248 | } | ||
249 | break; | ||
250 | } | ||
251 | |||
252 | pci_root_node = acpi_ns_get_parent_node(pci_root_node); | ||
253 | } | ||
254 | |||
255 | /* PCI root bridge not found, use namespace root node */ | ||
256 | } else { | ||
257 | pci_root_node = handler_obj->address_space.node; | ||
258 | } | ||
259 | |||
260 | /* | ||
261 | * If this region is now initialized, we are done. | ||
262 | * (install_address_space_handler could have initialized it) | ||
263 | */ | ||
264 | if (region_obj->region.flags & AOPOBJ_SETUP_COMPLETE) { | ||
265 | return_ACPI_STATUS(AE_OK); | ||
266 | } | ||
267 | |||
268 | /* Region is still not initialized. Create a new context */ | ||
269 | |||
270 | pci_id = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_pci_id)); | ||
271 | if (!pci_id) { | ||
272 | return_ACPI_STATUS(AE_NO_MEMORY); | ||
273 | } | ||
274 | |||
275 | /* | ||
276 | * For PCI_Config space access, we need the segment, bus, device and | ||
277 | * function numbers. Acquire them here. | ||
278 | * | ||
279 | * Find the parent device object. (This allows the operation region to be | ||
280 | * within a subscope under the device, such as a control method.) | ||
281 | */ | ||
282 | pci_device_node = region_obj->region.node; | ||
283 | while (pci_device_node && (pci_device_node->type != ACPI_TYPE_DEVICE)) { | ||
284 | pci_device_node = acpi_ns_get_parent_node(pci_device_node); | ||
285 | } | ||
286 | |||
287 | if (!pci_device_node) { | ||
288 | ACPI_FREE(pci_id); | ||
289 | return_ACPI_STATUS(AE_AML_OPERAND_TYPE); | ||
290 | } | ||
291 | |||
292 | /* | ||
293 | * Get the PCI device and function numbers from the _ADR object contained | ||
294 | * in the parent's scope. | ||
295 | */ | ||
296 | status = | ||
297 | acpi_ut_evaluate_numeric_object(METHOD_NAME__ADR, pci_device_node, | ||
298 | &pci_value); | ||
299 | |||
300 | /* | ||
301 | * The default is zero, and since the allocation above zeroed the data, | ||
302 | * just do nothing on failure. | ||
303 | */ | ||
304 | if (ACPI_SUCCESS(status)) { | ||
305 | pci_id->device = ACPI_HIWORD(ACPI_LODWORD(pci_value)); | ||
306 | pci_id->function = ACPI_LOWORD(ACPI_LODWORD(pci_value)); | ||
307 | } | ||
308 | |||
309 | /* The PCI segment number comes from the _SEG method */ | ||
310 | |||
311 | status = | ||
312 | acpi_ut_evaluate_numeric_object(METHOD_NAME__SEG, pci_root_node, | ||
313 | &pci_value); | ||
314 | if (ACPI_SUCCESS(status)) { | ||
315 | pci_id->segment = ACPI_LOWORD(pci_value); | ||
316 | } | ||
317 | |||
318 | /* The PCI bus number comes from the _BBN method */ | ||
319 | |||
320 | status = | ||
321 | acpi_ut_evaluate_numeric_object(METHOD_NAME__BBN, pci_root_node, | ||
322 | &pci_value); | ||
323 | if (ACPI_SUCCESS(status)) { | ||
324 | pci_id->bus = ACPI_LOWORD(pci_value); | ||
325 | } | ||
326 | |||
327 | /* Complete this device's pci_id */ | ||
328 | |||
329 | acpi_os_derive_pci_id(pci_root_node, region_obj->region.node, &pci_id); | ||
330 | |||
331 | *region_context = pci_id; | ||
332 | return_ACPI_STATUS(AE_OK); | ||
333 | } | ||
334 | |||
335 | /******************************************************************************* | ||
336 | * | ||
337 | * FUNCTION: acpi_ev_match_pci_root_bridge | ||
338 | * | ||
339 | * PARAMETERS: Id - The HID/CID in string format | ||
340 | * | ||
341 | * RETURN: TRUE if the Id is a match for a PCI/PCI-Express Root Bridge | ||
342 | * | ||
343 | * DESCRIPTION: Determine if the input ID is a PCI Root Bridge ID. | ||
344 | * | ||
345 | ******************************************************************************/ | ||
346 | |||
347 | static u8 acpi_ev_match_pci_root_bridge(char *id) | ||
348 | { | ||
349 | |||
350 | /* | ||
351 | * Check if this is a PCI root. | ||
352 | * ACPI 3.0+: check for a PCI Express root also. | ||
353 | */ | ||
354 | if (!(ACPI_STRNCMP(id, | ||
355 | PCI_ROOT_HID_STRING, | ||
356 | sizeof(PCI_ROOT_HID_STRING))) || | ||
357 | !(ACPI_STRNCMP(id, | ||
358 | PCI_EXPRESS_ROOT_HID_STRING, | ||
359 | sizeof(PCI_EXPRESS_ROOT_HID_STRING)))) { | ||
360 | return (TRUE); | ||
361 | } | ||
362 | |||
363 | return (FALSE); | ||
364 | } | ||
365 | |||
366 | /******************************************************************************* | ||
367 | * | ||
368 | * FUNCTION: acpi_ev_is_pci_root_bridge | ||
369 | * | ||
370 | * PARAMETERS: Node - Device node being examined | ||
371 | * | ||
372 | * RETURN: TRUE if device is a PCI/PCI-Express Root Bridge | ||
373 | * | ||
374 | * DESCRIPTION: Determine if the input device represents a PCI Root Bridge by | ||
375 | * examining the _HID and _CID for the device. | ||
376 | * | ||
377 | ******************************************************************************/ | ||
378 | |||
379 | static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node) | ||
380 | { | ||
381 | acpi_status status; | ||
382 | struct acpica_device_id hid; | ||
383 | struct acpi_compatible_id_list *cid; | ||
384 | u32 i; | ||
385 | |||
386 | /* Get the _HID and check for a PCI Root Bridge */ | ||
387 | |||
388 | status = acpi_ut_execute_HID(node, &hid); | ||
389 | if (ACPI_FAILURE(status)) { | ||
390 | return (FALSE); | ||
391 | } | ||
392 | |||
393 | if (acpi_ev_match_pci_root_bridge(hid.value)) { | ||
394 | return (TRUE); | ||
395 | } | ||
396 | |||
397 | /* The _HID did not match. Get the _CID and check for a PCI Root Bridge */ | ||
398 | |||
399 | status = acpi_ut_execute_CID(node, &cid); | ||
400 | if (ACPI_FAILURE(status)) { | ||
401 | return (FALSE); | ||
402 | } | ||
403 | |||
404 | /* Check all _CIDs in the returned list */ | ||
405 | |||
406 | for (i = 0; i < cid->count; i++) { | ||
407 | if (acpi_ev_match_pci_root_bridge(cid->id[i].value)) { | ||
408 | ACPI_FREE(cid); | ||
409 | return (TRUE); | ||
410 | } | ||
411 | } | ||
412 | |||
413 | ACPI_FREE(cid); | ||
414 | return (FALSE); | ||
415 | } | ||
416 | |||
417 | /******************************************************************************* | ||
418 | * | ||
419 | * FUNCTION: acpi_ev_pci_bar_region_setup | ||
420 | * | ||
421 | * PARAMETERS: Handle - Region we are interested in | ||
422 | * Function - Start or stop | ||
423 | * handler_context - Address space handler context | ||
424 | * region_context - Region specific context | ||
425 | * | ||
426 | * RETURN: Status | ||
427 | * | ||
428 | * DESCRIPTION: Setup a pci_bAR operation region | ||
429 | * | ||
430 | * MUTEX: Assumes namespace is not locked | ||
431 | * | ||
432 | ******************************************************************************/ | ||
433 | |||
434 | acpi_status | ||
435 | acpi_ev_pci_bar_region_setup(acpi_handle handle, | ||
436 | u32 function, | ||
437 | void *handler_context, void **region_context) | ||
438 | { | ||
439 | ACPI_FUNCTION_TRACE(ev_pci_bar_region_setup); | ||
440 | |||
441 | return_ACPI_STATUS(AE_OK); | ||
442 | } | ||
443 | |||
444 | /******************************************************************************* | ||
445 | * | ||
446 | * FUNCTION: acpi_ev_cmos_region_setup | ||
447 | * | ||
448 | * PARAMETERS: Handle - Region we are interested in | ||
449 | * Function - Start or stop | ||
450 | * handler_context - Address space handler context | ||
451 | * region_context - Region specific context | ||
452 | * | ||
453 | * RETURN: Status | ||
454 | * | ||
455 | * DESCRIPTION: Setup a CMOS operation region | ||
456 | * | ||
457 | * MUTEX: Assumes namespace is not locked | ||
458 | * | ||
459 | ******************************************************************************/ | ||
460 | |||
461 | acpi_status | ||
462 | acpi_ev_cmos_region_setup(acpi_handle handle, | ||
463 | u32 function, | ||
464 | void *handler_context, void **region_context) | ||
465 | { | ||
466 | ACPI_FUNCTION_TRACE(ev_cmos_region_setup); | ||
467 | |||
468 | return_ACPI_STATUS(AE_OK); | ||
469 | } | ||
470 | |||
471 | /******************************************************************************* | ||
472 | * | ||
473 | * FUNCTION: acpi_ev_default_region_setup | ||
474 | * | ||
475 | * PARAMETERS: Handle - Region we are interested in | ||
476 | * Function - Start or stop | ||
477 | * handler_context - Address space handler context | ||
478 | * region_context - Region specific context | ||
479 | * | ||
480 | * RETURN: Status | ||
481 | * | ||
482 | * DESCRIPTION: Default region initialization | ||
483 | * | ||
484 | ******************************************************************************/ | ||
485 | |||
486 | acpi_status | ||
487 | acpi_ev_default_region_setup(acpi_handle handle, | ||
488 | u32 function, | ||
489 | void *handler_context, void **region_context) | ||
490 | { | ||
491 | ACPI_FUNCTION_TRACE(ev_default_region_setup); | ||
492 | |||
493 | if (function == ACPI_REGION_DEACTIVATE) { | ||
494 | *region_context = NULL; | ||
495 | } else { | ||
496 | *region_context = handler_context; | ||
497 | } | ||
498 | |||
499 | return_ACPI_STATUS(AE_OK); | ||
500 | } | ||
501 | |||
502 | /******************************************************************************* | ||
503 | * | ||
504 | * FUNCTION: acpi_ev_initialize_region | ||
505 | * | ||
506 | * PARAMETERS: region_obj - Region we are initializing | ||
507 | * acpi_ns_locked - Is namespace locked? | ||
508 | * | ||
509 | * RETURN: Status | ||
510 | * | ||
511 | * DESCRIPTION: Initializes the region, finds any _REG methods and saves them | ||
512 | * for execution at a later time | ||
513 | * | ||
514 | * Get the appropriate address space handler for a newly | ||
515 | * created region. | ||
516 | * | ||
517 | * This also performs address space specific initialization. For | ||
518 | * example, PCI regions must have an _ADR object that contains | ||
519 | * a PCI address in the scope of the definition. This address is | ||
520 | * required to perform an access to PCI config space. | ||
521 | * | ||
522 | * MUTEX: Interpreter should be unlocked, because we may run the _REG | ||
523 | * method for this region. | ||
524 | * | ||
525 | ******************************************************************************/ | ||
526 | |||
527 | acpi_status | ||
528 | acpi_ev_initialize_region(union acpi_operand_object *region_obj, | ||
529 | u8 acpi_ns_locked) | ||
530 | { | ||
531 | union acpi_operand_object *handler_obj; | ||
532 | union acpi_operand_object *obj_desc; | ||
533 | acpi_adr_space_type space_id; | ||
534 | struct acpi_namespace_node *node; | ||
535 | acpi_status status; | ||
536 | struct acpi_namespace_node *method_node; | ||
537 | acpi_name *reg_name_ptr = (acpi_name *) METHOD_NAME__REG; | ||
538 | union acpi_operand_object *region_obj2; | ||
539 | |||
540 | ACPI_FUNCTION_TRACE_U32(ev_initialize_region, acpi_ns_locked); | ||
541 | |||
542 | if (!region_obj) { | ||
543 | return_ACPI_STATUS(AE_BAD_PARAMETER); | ||
544 | } | ||
545 | |||
546 | if (region_obj->common.flags & AOPOBJ_OBJECT_INITIALIZED) { | ||
547 | return_ACPI_STATUS(AE_OK); | ||
548 | } | ||
549 | |||
550 | region_obj2 = acpi_ns_get_secondary_object(region_obj); | ||
551 | if (!region_obj2) { | ||
552 | return_ACPI_STATUS(AE_NOT_EXIST); | ||
553 | } | ||
554 | |||
555 | node = acpi_ns_get_parent_node(region_obj->region.node); | ||
556 | space_id = region_obj->region.space_id; | ||
557 | |||
558 | /* Setup defaults */ | ||
559 | |||
560 | region_obj->region.handler = NULL; | ||
561 | region_obj2->extra.method_REG = NULL; | ||
562 | region_obj->common.flags &= ~(AOPOBJ_SETUP_COMPLETE); | ||
563 | region_obj->common.flags |= AOPOBJ_OBJECT_INITIALIZED; | ||
564 | |||
565 | /* Find any "_REG" method associated with this region definition */ | ||
566 | |||
567 | status = | ||
568 | acpi_ns_search_one_scope(*reg_name_ptr, node, ACPI_TYPE_METHOD, | ||
569 | &method_node); | ||
570 | if (ACPI_SUCCESS(status)) { | ||
571 | /* | ||
572 | * The _REG method is optional and there can be only one per region | ||
573 | * definition. This will be executed when the handler is attached | ||
574 | * or removed | ||
575 | */ | ||
576 | region_obj2->extra.method_REG = method_node; | ||
577 | } | ||
578 | |||
579 | /* | ||
580 | * The following loop depends upon the root Node having no parent | ||
581 | * ie: acpi_gbl_root_node->parent_entry being set to NULL | ||
582 | */ | ||
583 | while (node) { | ||
584 | |||
585 | /* Check to see if a handler exists */ | ||
586 | |||
587 | handler_obj = NULL; | ||
588 | obj_desc = acpi_ns_get_attached_object(node); | ||
589 | if (obj_desc) { | ||
590 | |||
591 | /* Can only be a handler if the object exists */ | ||
592 | |||
593 | switch (node->type) { | ||
594 | case ACPI_TYPE_DEVICE: | ||
595 | |||
596 | handler_obj = obj_desc->device.handler; | ||
597 | break; | ||
598 | |||
599 | case ACPI_TYPE_PROCESSOR: | ||
600 | |||
601 | handler_obj = obj_desc->processor.handler; | ||
602 | break; | ||
603 | |||
604 | case ACPI_TYPE_THERMAL: | ||
605 | |||
606 | handler_obj = obj_desc->thermal_zone.handler; | ||
607 | break; | ||
608 | |||
609 | default: | ||
610 | /* Ignore other objects */ | ||
611 | break; | ||
612 | } | ||
613 | |||
614 | while (handler_obj) { | ||
615 | |||
616 | /* Is this handler of the correct type? */ | ||
617 | |||
618 | if (handler_obj->address_space.space_id == | ||
619 | space_id) { | ||
620 | |||
621 | /* Found correct handler */ | ||
622 | |||
623 | ACPI_DEBUG_PRINT((ACPI_DB_OPREGION, | ||
624 | "Found handler %p for region %p in obj %p\n", | ||
625 | handler_obj, | ||
626 | region_obj, | ||
627 | obj_desc)); | ||
628 | |||
629 | status = | ||
630 | acpi_ev_attach_region(handler_obj, | ||
631 | region_obj, | ||
632 | acpi_ns_locked); | ||
633 | |||
634 | /* | ||
635 | * Tell all users that this region is usable by running the _REG | ||
636 | * method | ||
637 | */ | ||
638 | if (acpi_ns_locked) { | ||
639 | status = | ||
640 | acpi_ut_release_mutex | ||
641 | (ACPI_MTX_NAMESPACE); | ||
642 | if (ACPI_FAILURE(status)) { | ||
643 | return_ACPI_STATUS | ||
644 | (status); | ||
645 | } | ||
646 | } | ||
647 | |||
648 | status = | ||
649 | acpi_ev_execute_reg_method | ||
650 | (region_obj, 1); | ||
651 | |||
652 | if (acpi_ns_locked) { | ||
653 | status = | ||
654 | acpi_ut_acquire_mutex | ||
655 | (ACPI_MTX_NAMESPACE); | ||
656 | if (ACPI_FAILURE(status)) { | ||
657 | return_ACPI_STATUS | ||
658 | (status); | ||
659 | } | ||
660 | } | ||
661 | |||
662 | return_ACPI_STATUS(AE_OK); | ||
663 | } | ||
664 | |||
665 | /* Try next handler in the list */ | ||
666 | |||
667 | handler_obj = handler_obj->address_space.next; | ||
668 | } | ||
669 | } | ||
670 | |||
671 | /* This node does not have the handler we need; Pop up one level */ | ||
672 | |||
673 | node = acpi_ns_get_parent_node(node); | ||
674 | } | ||
675 | |||
676 | /* If we get here, there is no handler for this region */ | ||
677 | |||
678 | ACPI_DEBUG_PRINT((ACPI_DB_OPREGION, | ||
679 | "No handler for RegionType %s(%X) (RegionObj %p)\n", | ||
680 | acpi_ut_get_region_name(space_id), space_id, | ||
681 | region_obj)); | ||
682 | |||
683 | return_ACPI_STATUS(AE_NOT_EXIST); | ||
684 | } | ||
diff --git a/drivers/acpi/events/evsci.c b/drivers/acpi/events/evsci.c deleted file mode 100644 index 18dce10c5fb1..000000000000 --- a/drivers/acpi/events/evsci.c +++ /dev/null | |||
@@ -1,183 +0,0 @@ | |||
1 | /******************************************************************************* | ||
2 | * | ||
3 | * Module Name: evsci - System Control Interrupt configuration and | ||
4 | * legacy to ACPI mode state transition functions | ||
5 | * | ||
6 | ******************************************************************************/ | ||
7 | |||
8 | /* | ||
9 | * Copyright (C) 2000 - 2008, Intel Corp. | ||
10 | * All rights reserved. | ||
11 | * | ||
12 | * Redistribution and use in source and binary forms, with or without | ||
13 | * modification, are permitted provided that the following conditions | ||
14 | * are met: | ||
15 | * 1. Redistributions of source code must retain the above copyright | ||
16 | * notice, this list of conditions, and the following disclaimer, | ||
17 | * without modification. | ||
18 | * 2. Redistributions in binary form must reproduce at minimum a disclaimer | ||
19 | * substantially similar to the "NO WARRANTY" disclaimer below | ||
20 | * ("Disclaimer") and any redistribution must be conditioned upon | ||
21 | * including a substantially similar Disclaimer requirement for further | ||
22 | * binary redistribution. | ||
23 | * 3. Neither the names of the above-listed copyright holders nor the names | ||
24 | * of any contributors may be used to endorse or promote products derived | ||
25 | * from this software without specific prior written permission. | ||
26 | * | ||
27 | * Alternatively, this software may be distributed under the terms of the | ||
28 | * GNU General Public License ("GPL") version 2 as published by the Free | ||
29 | * Software Foundation. | ||
30 | * | ||
31 | * NO WARRANTY | ||
32 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
33 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
34 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR | ||
35 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
36 | * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
37 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | ||
38 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | ||
39 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, | ||
40 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING | ||
41 | * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
42 | * POSSIBILITY OF SUCH DAMAGES. | ||
43 | */ | ||
44 | |||
45 | #include <acpi/acpi.h> | ||
46 | #include <acpi/accommon.h> | ||
47 | #include <acpi/acevents.h> | ||
48 | |||
49 | #define _COMPONENT ACPI_EVENTS | ||
50 | ACPI_MODULE_NAME("evsci") | ||
51 | |||
52 | /* Local prototypes */ | ||
53 | static u32 ACPI_SYSTEM_XFACE acpi_ev_sci_xrupt_handler(void *context); | ||
54 | |||
55 | /******************************************************************************* | ||
56 | * | ||
57 | * FUNCTION: acpi_ev_sci_xrupt_handler | ||
58 | * | ||
59 | * PARAMETERS: Context - Calling Context | ||
60 | * | ||
61 | * RETURN: Status code indicates whether interrupt was handled. | ||
62 | * | ||
63 | * DESCRIPTION: Interrupt handler that will figure out what function or | ||
64 | * control method to call to deal with a SCI. | ||
65 | * | ||
66 | ******************************************************************************/ | ||
67 | |||
68 | static u32 ACPI_SYSTEM_XFACE acpi_ev_sci_xrupt_handler(void *context) | ||
69 | { | ||
70 | struct acpi_gpe_xrupt_info *gpe_xrupt_list = context; | ||
71 | u32 interrupt_handled = ACPI_INTERRUPT_NOT_HANDLED; | ||
72 | |||
73 | ACPI_FUNCTION_TRACE(ev_sci_xrupt_handler); | ||
74 | |||
75 | /* | ||
76 | * We are guaranteed by the ACPI CA initialization/shutdown code that | ||
77 | * if this interrupt handler is installed, ACPI is enabled. | ||
78 | */ | ||
79 | |||
80 | /* | ||
81 | * Fixed Events: | ||
82 | * Check for and dispatch any Fixed Events that have occurred | ||
83 | */ | ||
84 | interrupt_handled |= acpi_ev_fixed_event_detect(); | ||
85 | |||
86 | /* | ||
87 | * General Purpose Events: | ||
88 | * Check for and dispatch any GPEs that have occurred | ||
89 | */ | ||
90 | interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list); | ||
91 | |||
92 | return_UINT32(interrupt_handled); | ||
93 | } | ||
94 | |||
95 | /******************************************************************************* | ||
96 | * | ||
97 | * FUNCTION: acpi_ev_gpe_xrupt_handler | ||
98 | * | ||
99 | * PARAMETERS: Context - Calling Context | ||
100 | * | ||
101 | * RETURN: Status code indicates whether interrupt was handled. | ||
102 | * | ||
103 | * DESCRIPTION: Handler for GPE Block Device interrupts | ||
104 | * | ||
105 | ******************************************************************************/ | ||
106 | |||
107 | u32 ACPI_SYSTEM_XFACE acpi_ev_gpe_xrupt_handler(void *context) | ||
108 | { | ||
109 | struct acpi_gpe_xrupt_info *gpe_xrupt_list = context; | ||
110 | u32 interrupt_handled = ACPI_INTERRUPT_NOT_HANDLED; | ||
111 | |||
112 | ACPI_FUNCTION_TRACE(ev_gpe_xrupt_handler); | ||
113 | |||
114 | /* | ||
115 | * We are guaranteed by the ACPI CA initialization/shutdown code that | ||
116 | * if this interrupt handler is installed, ACPI is enabled. | ||
117 | */ | ||
118 | |||
119 | /* GPEs: Check for and dispatch any GPEs that have occurred */ | ||
120 | |||
121 | interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list); | ||
122 | |||
123 | return_UINT32(interrupt_handled); | ||
124 | } | ||
125 | |||
126 | /****************************************************************************** | ||
127 | * | ||
128 | * FUNCTION: acpi_ev_install_sci_handler | ||
129 | * | ||
130 | * PARAMETERS: none | ||
131 | * | ||
132 | * RETURN: Status | ||
133 | * | ||
134 | * DESCRIPTION: Installs SCI handler. | ||
135 | * | ||
136 | ******************************************************************************/ | ||
137 | |||
138 | u32 acpi_ev_install_sci_handler(void) | ||
139 | { | ||
140 | u32 status = AE_OK; | ||
141 | |||
142 | ACPI_FUNCTION_TRACE(ev_install_sci_handler); | ||
143 | |||
144 | status = | ||
145 | acpi_os_install_interrupt_handler((u32) acpi_gbl_FADT.sci_interrupt, | ||
146 | acpi_ev_sci_xrupt_handler, | ||
147 | acpi_gbl_gpe_xrupt_list_head); | ||
148 | return_ACPI_STATUS(status); | ||
149 | } | ||
150 | |||
151 | /****************************************************************************** | ||
152 | * | ||
153 | * FUNCTION: acpi_ev_remove_sci_handler | ||
154 | * | ||
155 | * PARAMETERS: none | ||
156 | * | ||
157 | * RETURN: E_OK if handler uninstalled OK, E_ERROR if handler was not | ||
158 | * installed to begin with | ||
159 | * | ||
160 | * DESCRIPTION: Remove the SCI interrupt handler. No further SCIs will be | ||
161 | * taken. | ||
162 | * | ||
163 | * Note: It doesn't seem important to disable all events or set the event | ||
164 | * enable registers to their original values. The OS should disable | ||
165 | * the SCI interrupt level when the handler is removed, so no more | ||
166 | * events will come in. | ||
167 | * | ||
168 | ******************************************************************************/ | ||
169 | |||
170 | acpi_status acpi_ev_remove_sci_handler(void) | ||
171 | { | ||
172 | acpi_status status; | ||
173 | |||
174 | ACPI_FUNCTION_TRACE(ev_remove_sci_handler); | ||
175 | |||
176 | /* Just let the OS remove the handler and disable the level */ | ||
177 | |||
178 | status = | ||
179 | acpi_os_remove_interrupt_handler((u32) acpi_gbl_FADT.sci_interrupt, | ||
180 | acpi_ev_sci_xrupt_handler); | ||
181 | |||
182 | return_ACPI_STATUS(status); | ||
183 | } | ||
diff --git a/drivers/acpi/events/evxface.c b/drivers/acpi/events/evxface.c deleted file mode 100644 index 3b6a069f5b06..000000000000 --- a/drivers/acpi/events/evxface.c +++ /dev/null | |||
@@ -1,821 +0,0 @@ | |||
1 | /****************************************************************************** | ||
2 | * | ||
3 | * Module Name: evxface - External interfaces for ACPI events | ||
4 | * | ||
5 | *****************************************************************************/ | ||
6 | |||
7 | /* | ||
8 | * Copyright (C) 2000 - 2008, Intel Corp. | ||
9 | * All rights reserved. | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or without | ||
12 | * modification, are permitted provided that the following conditions | ||
13 | * are met: | ||
14 | * 1. Redistributions of source code must retain the above copyright | ||
15 | * notice, this list of conditions, and the following disclaimer, | ||
16 | * without modification. | ||
17 | * 2. Redistributions in binary form must reproduce at minimum a disclaimer | ||
18 | * substantially similar to the "NO WARRANTY" disclaimer below | ||
19 | * ("Disclaimer") and any redistribution must be conditioned upon | ||
20 | * including a substantially similar Disclaimer requirement for further | ||
21 | * binary redistribution. | ||
22 | * 3. Neither the names of the above-listed copyright holders nor the names | ||
23 | * of any contributors may be used to endorse or promote products derived | ||
24 | * from this software without specific prior written permission. | ||
25 | * | ||
26 | * Alternatively, this software may be distributed under the terms of the | ||
27 | * GNU General Public License ("GPL") version 2 as published by the Free | ||
28 | * Software Foundation. | ||
29 | * | ||
30 | * NO WARRANTY | ||
31 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
32 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
33 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR | ||
34 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
35 | * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
36 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | ||
37 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | ||
38 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, | ||
39 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING | ||
40 | * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
41 | * POSSIBILITY OF SUCH DAMAGES. | ||
42 | */ | ||
43 | |||
44 | #include <acpi/acpi.h> | ||
45 | #include <acpi/accommon.h> | ||
46 | #include <acpi/acnamesp.h> | ||
47 | #include <acpi/acevents.h> | ||
48 | #include <acpi/acinterp.h> | ||
49 | |||
50 | #define _COMPONENT ACPI_EVENTS | ||
51 | ACPI_MODULE_NAME("evxface") | ||
52 | |||
53 | /******************************************************************************* | ||
54 | * | ||
55 | * FUNCTION: acpi_install_exception_handler | ||
56 | * | ||
57 | * PARAMETERS: Handler - Pointer to the handler function for the | ||
58 | * event | ||
59 | * | ||
60 | * RETURN: Status | ||
61 | * | ||
62 | * DESCRIPTION: Saves the pointer to the handler function | ||
63 | * | ||
64 | ******************************************************************************/ | ||
65 | #ifdef ACPI_FUTURE_USAGE | ||
66 | acpi_status acpi_install_exception_handler(acpi_exception_handler handler) | ||
67 | { | ||
68 | acpi_status status; | ||
69 | |||
70 | ACPI_FUNCTION_TRACE(acpi_install_exception_handler); | ||
71 | |||
72 | status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); | ||
73 | if (ACPI_FAILURE(status)) { | ||
74 | return_ACPI_STATUS(status); | ||
75 | } | ||
76 | |||
77 | /* Don't allow two handlers. */ | ||
78 | |||
79 | if (acpi_gbl_exception_handler) { | ||
80 | status = AE_ALREADY_EXISTS; | ||
81 | goto cleanup; | ||
82 | } | ||
83 | |||
84 | /* Install the handler */ | ||
85 | |||
86 | acpi_gbl_exception_handler = handler; | ||
87 | |||
88 | cleanup: | ||
89 | (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); | ||
90 | return_ACPI_STATUS(status); | ||
91 | } | ||
92 | |||
93 | ACPI_EXPORT_SYMBOL(acpi_install_exception_handler) | ||
94 | #endif /* ACPI_FUTURE_USAGE */ | ||
95 | /******************************************************************************* | ||
96 | * | ||
97 | * FUNCTION: acpi_install_fixed_event_handler | ||
98 | * | ||
99 | * PARAMETERS: Event - Event type to enable. | ||
100 | * Handler - Pointer to the handler function for the | ||
101 | * event | ||
102 | * Context - Value passed to the handler on each GPE | ||
103 | * | ||
104 | * RETURN: Status | ||
105 | * | ||
106 | * DESCRIPTION: Saves the pointer to the handler function and then enables the | ||
107 | * event. | ||
108 | * | ||
109 | ******************************************************************************/ | ||
110 | acpi_status | ||
111 | acpi_install_fixed_event_handler(u32 event, | ||
112 | acpi_event_handler handler, void *context) | ||
113 | { | ||
114 | acpi_status status; | ||
115 | |||
116 | ACPI_FUNCTION_TRACE(acpi_install_fixed_event_handler); | ||
117 | |||
118 | /* Parameter validation */ | ||
119 | |||
120 | if (event > ACPI_EVENT_MAX) { | ||
121 | return_ACPI_STATUS(AE_BAD_PARAMETER); | ||
122 | } | ||
123 | |||
124 | status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); | ||
125 | if (ACPI_FAILURE(status)) { | ||
126 | return_ACPI_STATUS(status); | ||
127 | } | ||
128 | |||
129 | /* Don't allow two handlers. */ | ||
130 | |||
131 | if (NULL != acpi_gbl_fixed_event_handlers[event].handler) { | ||
132 | status = AE_ALREADY_EXISTS; | ||
133 | goto cleanup; | ||
134 | } | ||
135 | |||
136 | /* Install the handler before enabling the event */ | ||
137 | |||
138 | acpi_gbl_fixed_event_handlers[event].handler = handler; | ||
139 | acpi_gbl_fixed_event_handlers[event].context = context; | ||
140 | |||
141 | status = acpi_clear_event(event); | ||
142 | if (ACPI_SUCCESS(status)) | ||
143 | status = acpi_enable_event(event, 0); | ||
144 | if (ACPI_FAILURE(status)) { | ||
145 | ACPI_WARNING((AE_INFO, "Could not enable fixed event %X", | ||
146 | event)); | ||
147 | |||
148 | /* Remove the handler */ | ||
149 | |||
150 | acpi_gbl_fixed_event_handlers[event].handler = NULL; | ||
151 | acpi_gbl_fixed_event_handlers[event].context = NULL; | ||
152 | } else { | ||
153 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
154 | "Enabled fixed event %X, Handler=%p\n", event, | ||
155 | handler)); | ||
156 | } | ||
157 | |||
158 | cleanup: | ||
159 | (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); | ||
160 | return_ACPI_STATUS(status); | ||
161 | } | ||
162 | |||
163 | ACPI_EXPORT_SYMBOL(acpi_install_fixed_event_handler) | ||
164 | |||
165 | /******************************************************************************* | ||
166 | * | ||
167 | * FUNCTION: acpi_remove_fixed_event_handler | ||
168 | * | ||
169 | * PARAMETERS: Event - Event type to disable. | ||
170 | * Handler - Address of the handler | ||
171 | * | ||
172 | * RETURN: Status | ||
173 | * | ||
174 | * DESCRIPTION: Disables the event and unregisters the event handler. | ||
175 | * | ||
176 | ******************************************************************************/ | ||
177 | acpi_status | ||
178 | acpi_remove_fixed_event_handler(u32 event, acpi_event_handler handler) | ||
179 | { | ||
180 | acpi_status status = AE_OK; | ||
181 | |||
182 | ACPI_FUNCTION_TRACE(acpi_remove_fixed_event_handler); | ||
183 | |||
184 | /* Parameter validation */ | ||
185 | |||
186 | if (event > ACPI_EVENT_MAX) { | ||
187 | return_ACPI_STATUS(AE_BAD_PARAMETER); | ||
188 | } | ||
189 | |||
190 | status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); | ||
191 | if (ACPI_FAILURE(status)) { | ||
192 | return_ACPI_STATUS(status); | ||
193 | } | ||
194 | |||
195 | /* Disable the event before removing the handler */ | ||
196 | |||
197 | status = acpi_disable_event(event, 0); | ||
198 | |||
199 | /* Always Remove the handler */ | ||
200 | |||
201 | acpi_gbl_fixed_event_handlers[event].handler = NULL; | ||
202 | acpi_gbl_fixed_event_handlers[event].context = NULL; | ||
203 | |||
204 | if (ACPI_FAILURE(status)) { | ||
205 | ACPI_WARNING((AE_INFO, | ||
206 | "Could not write to fixed event enable register %X", | ||
207 | event)); | ||
208 | } else { | ||
209 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Disabled fixed event %X\n", | ||
210 | event)); | ||
211 | } | ||
212 | |||
213 | (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); | ||
214 | return_ACPI_STATUS(status); | ||
215 | } | ||
216 | |||
217 | ACPI_EXPORT_SYMBOL(acpi_remove_fixed_event_handler) | ||
218 | |||
219 | /******************************************************************************* | ||
220 | * | ||
221 | * FUNCTION: acpi_install_notify_handler | ||
222 | * | ||
223 | * PARAMETERS: Device - The device for which notifies will be handled | ||
224 | * handler_type - The type of handler: | ||
225 | * ACPI_SYSTEM_NOTIFY: system_handler (00-7f) | ||
226 | * ACPI_DEVICE_NOTIFY: driver_handler (80-ff) | ||
227 | * ACPI_ALL_NOTIFY: both system and device | ||
228 | * Handler - Address of the handler | ||
229 | * Context - Value passed to the handler on each GPE | ||
230 | * | ||
231 | * RETURN: Status | ||
232 | * | ||
233 | * DESCRIPTION: Install a handler for notifies on an ACPI device | ||
234 | * | ||
235 | ******************************************************************************/ | ||
236 | acpi_status | ||
237 | acpi_install_notify_handler(acpi_handle device, | ||
238 | u32 handler_type, | ||
239 | acpi_notify_handler handler, void *context) | ||
240 | { | ||
241 | union acpi_operand_object *obj_desc; | ||
242 | union acpi_operand_object *notify_obj; | ||
243 | struct acpi_namespace_node *node; | ||
244 | acpi_status status; | ||
245 | |||
246 | ACPI_FUNCTION_TRACE(acpi_install_notify_handler); | ||
247 | |||
248 | /* Parameter validation */ | ||
249 | |||
250 | if ((!device) || | ||
251 | (!handler) || (handler_type > ACPI_MAX_NOTIFY_HANDLER_TYPE)) { | ||
252 | return_ACPI_STATUS(AE_BAD_PARAMETER); | ||
253 | } | ||
254 | |||
255 | status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); | ||
256 | if (ACPI_FAILURE(status)) { | ||
257 | return_ACPI_STATUS(status); | ||
258 | } | ||
259 | |||
260 | /* Convert and validate the device handle */ | ||
261 | |||
262 | node = acpi_ns_map_handle_to_node(device); | ||
263 | if (!node) { | ||
264 | status = AE_BAD_PARAMETER; | ||
265 | goto unlock_and_exit; | ||
266 | } | ||
267 | |||
268 | /* | ||
269 | * Root Object: | ||
270 | * Registering a notify handler on the root object indicates that the | ||
271 | * caller wishes to receive notifications for all objects. Note that | ||
272 | * only one <external> global handler can be regsitered (per notify type). | ||
273 | */ | ||
274 | if (device == ACPI_ROOT_OBJECT) { | ||
275 | |||
276 | /* Make sure the handler is not already installed */ | ||
277 | |||
278 | if (((handler_type & ACPI_SYSTEM_NOTIFY) && | ||
279 | acpi_gbl_system_notify.handler) || | ||
280 | ((handler_type & ACPI_DEVICE_NOTIFY) && | ||
281 | acpi_gbl_device_notify.handler)) { | ||
282 | status = AE_ALREADY_EXISTS; | ||
283 | goto unlock_and_exit; | ||
284 | } | ||
285 | |||
286 | if (handler_type & ACPI_SYSTEM_NOTIFY) { | ||
287 | acpi_gbl_system_notify.node = node; | ||
288 | acpi_gbl_system_notify.handler = handler; | ||
289 | acpi_gbl_system_notify.context = context; | ||
290 | } | ||
291 | |||
292 | if (handler_type & ACPI_DEVICE_NOTIFY) { | ||
293 | acpi_gbl_device_notify.node = node; | ||
294 | acpi_gbl_device_notify.handler = handler; | ||
295 | acpi_gbl_device_notify.context = context; | ||
296 | } | ||
297 | |||
298 | /* Global notify handler installed */ | ||
299 | } | ||
300 | |||
301 | /* | ||
302 | * All Other Objects: | ||
303 | * Caller will only receive notifications specific to the target object. | ||
304 | * Note that only certain object types can receive notifications. | ||
305 | */ | ||
306 | else { | ||
307 | /* Notifies allowed on this object? */ | ||
308 | |||
309 | if (!acpi_ev_is_notify_object(node)) { | ||
310 | status = AE_TYPE; | ||
311 | goto unlock_and_exit; | ||
312 | } | ||
313 | |||
314 | /* Check for an existing internal object */ | ||
315 | |||
316 | obj_desc = acpi_ns_get_attached_object(node); | ||
317 | if (obj_desc) { | ||
318 | |||
319 | /* Object exists - make sure there's no handler */ | ||
320 | |||
321 | if (((handler_type & ACPI_SYSTEM_NOTIFY) && | ||
322 | obj_desc->common_notify.system_notify) || | ||
323 | ((handler_type & ACPI_DEVICE_NOTIFY) && | ||
324 | obj_desc->common_notify.device_notify)) { | ||
325 | status = AE_ALREADY_EXISTS; | ||
326 | goto unlock_and_exit; | ||
327 | } | ||
328 | } else { | ||
329 | /* Create a new object */ | ||
330 | |||
331 | obj_desc = acpi_ut_create_internal_object(node->type); | ||
332 | if (!obj_desc) { | ||
333 | status = AE_NO_MEMORY; | ||
334 | goto unlock_and_exit; | ||
335 | } | ||
336 | |||
337 | /* Attach new object to the Node */ | ||
338 | |||
339 | status = | ||
340 | acpi_ns_attach_object(device, obj_desc, node->type); | ||
341 | |||
342 | /* Remove local reference to the object */ | ||
343 | |||
344 | acpi_ut_remove_reference(obj_desc); | ||
345 | if (ACPI_FAILURE(status)) { | ||
346 | goto unlock_and_exit; | ||
347 | } | ||
348 | } | ||
349 | |||
350 | /* Install the handler */ | ||
351 | |||
352 | notify_obj = | ||
353 | acpi_ut_create_internal_object(ACPI_TYPE_LOCAL_NOTIFY); | ||
354 | if (!notify_obj) { | ||
355 | status = AE_NO_MEMORY; | ||
356 | goto unlock_and_exit; | ||
357 | } | ||
358 | |||
359 | notify_obj->notify.node = node; | ||
360 | notify_obj->notify.handler = handler; | ||
361 | notify_obj->notify.context = context; | ||
362 | |||
363 | if (handler_type & ACPI_SYSTEM_NOTIFY) { | ||
364 | obj_desc->common_notify.system_notify = notify_obj; | ||
365 | } | ||
366 | |||
367 | if (handler_type & ACPI_DEVICE_NOTIFY) { | ||
368 | obj_desc->common_notify.device_notify = notify_obj; | ||
369 | } | ||
370 | |||
371 | if (handler_type == ACPI_ALL_NOTIFY) { | ||
372 | |||
373 | /* Extra ref if installed in both */ | ||
374 | |||
375 | acpi_ut_add_reference(notify_obj); | ||
376 | } | ||
377 | } | ||
378 | |||
379 | unlock_and_exit: | ||
380 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); | ||
381 | return_ACPI_STATUS(status); | ||
382 | } | ||
383 | |||
384 | ACPI_EXPORT_SYMBOL(acpi_install_notify_handler) | ||
385 | |||
386 | /******************************************************************************* | ||
387 | * | ||
388 | * FUNCTION: acpi_remove_notify_handler | ||
389 | * | ||
390 | * PARAMETERS: Device - The device for which notifies will be handled | ||
391 | * handler_type - The type of handler: | ||
392 | * ACPI_SYSTEM_NOTIFY: system_handler (00-7f) | ||
393 | * ACPI_DEVICE_NOTIFY: driver_handler (80-ff) | ||
394 | * ACPI_ALL_NOTIFY: both system and device | ||
395 | * Handler - Address of the handler | ||
396 | * | ||
397 | * RETURN: Status | ||
398 | * | ||
399 | * DESCRIPTION: Remove a handler for notifies on an ACPI device | ||
400 | * | ||
401 | ******************************************************************************/ | ||
402 | acpi_status | ||
403 | acpi_remove_notify_handler(acpi_handle device, | ||
404 | u32 handler_type, acpi_notify_handler handler) | ||
405 | { | ||
406 | union acpi_operand_object *notify_obj; | ||
407 | union acpi_operand_object *obj_desc; | ||
408 | struct acpi_namespace_node *node; | ||
409 | acpi_status status; | ||
410 | |||
411 | ACPI_FUNCTION_TRACE(acpi_remove_notify_handler); | ||
412 | |||
413 | /* Parameter validation */ | ||
414 | |||
415 | if ((!device) || | ||
416 | (!handler) || (handler_type > ACPI_MAX_NOTIFY_HANDLER_TYPE)) { | ||
417 | status = AE_BAD_PARAMETER; | ||
418 | goto exit; | ||
419 | } | ||
420 | |||
421 | status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); | ||
422 | if (ACPI_FAILURE(status)) { | ||
423 | goto exit; | ||
424 | } | ||
425 | |||
426 | /* Convert and validate the device handle */ | ||
427 | |||
428 | node = acpi_ns_map_handle_to_node(device); | ||
429 | if (!node) { | ||
430 | status = AE_BAD_PARAMETER; | ||
431 | goto unlock_and_exit; | ||
432 | } | ||
433 | |||
434 | /* Root Object */ | ||
435 | |||
436 | if (device == ACPI_ROOT_OBJECT) { | ||
437 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
438 | "Removing notify handler for namespace root object\n")); | ||
439 | |||
440 | if (((handler_type & ACPI_SYSTEM_NOTIFY) && | ||
441 | !acpi_gbl_system_notify.handler) || | ||
442 | ((handler_type & ACPI_DEVICE_NOTIFY) && | ||
443 | !acpi_gbl_device_notify.handler)) { | ||
444 | status = AE_NOT_EXIST; | ||
445 | goto unlock_and_exit; | ||
446 | } | ||
447 | |||
448 | /* Make sure all deferred tasks are completed */ | ||
449 | |||
450 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); | ||
451 | acpi_os_wait_events_complete(NULL); | ||
452 | status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); | ||
453 | if (ACPI_FAILURE(status)) { | ||
454 | goto exit; | ||
455 | } | ||
456 | |||
457 | if (handler_type & ACPI_SYSTEM_NOTIFY) { | ||
458 | acpi_gbl_system_notify.node = NULL; | ||
459 | acpi_gbl_system_notify.handler = NULL; | ||
460 | acpi_gbl_system_notify.context = NULL; | ||
461 | } | ||
462 | |||
463 | if (handler_type & ACPI_DEVICE_NOTIFY) { | ||
464 | acpi_gbl_device_notify.node = NULL; | ||
465 | acpi_gbl_device_notify.handler = NULL; | ||
466 | acpi_gbl_device_notify.context = NULL; | ||
467 | } | ||
468 | } | ||
469 | |||
470 | /* All Other Objects */ | ||
471 | |||
472 | else { | ||
473 | /* Notifies allowed on this object? */ | ||
474 | |||
475 | if (!acpi_ev_is_notify_object(node)) { | ||
476 | status = AE_TYPE; | ||
477 | goto unlock_and_exit; | ||
478 | } | ||
479 | |||
480 | /* Check for an existing internal object */ | ||
481 | |||
482 | obj_desc = acpi_ns_get_attached_object(node); | ||
483 | if (!obj_desc) { | ||
484 | status = AE_NOT_EXIST; | ||
485 | goto unlock_and_exit; | ||
486 | } | ||
487 | |||
488 | /* Object exists - make sure there's an existing handler */ | ||
489 | |||
490 | if (handler_type & ACPI_SYSTEM_NOTIFY) { | ||
491 | notify_obj = obj_desc->common_notify.system_notify; | ||
492 | if (!notify_obj) { | ||
493 | status = AE_NOT_EXIST; | ||
494 | goto unlock_and_exit; | ||
495 | } | ||
496 | |||
497 | if (notify_obj->notify.handler != handler) { | ||
498 | status = AE_BAD_PARAMETER; | ||
499 | goto unlock_and_exit; | ||
500 | } | ||
501 | /* Make sure all deferred tasks are completed */ | ||
502 | |||
503 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); | ||
504 | acpi_os_wait_events_complete(NULL); | ||
505 | status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); | ||
506 | if (ACPI_FAILURE(status)) { | ||
507 | goto exit; | ||
508 | } | ||
509 | |||
510 | /* Remove the handler */ | ||
511 | obj_desc->common_notify.system_notify = NULL; | ||
512 | acpi_ut_remove_reference(notify_obj); | ||
513 | } | ||
514 | |||
515 | if (handler_type & ACPI_DEVICE_NOTIFY) { | ||
516 | notify_obj = obj_desc->common_notify.device_notify; | ||
517 | if (!notify_obj) { | ||
518 | status = AE_NOT_EXIST; | ||
519 | goto unlock_and_exit; | ||
520 | } | ||
521 | |||
522 | if (notify_obj->notify.handler != handler) { | ||
523 | status = AE_BAD_PARAMETER; | ||
524 | goto unlock_and_exit; | ||
525 | } | ||
526 | /* Make sure all deferred tasks are completed */ | ||
527 | |||
528 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); | ||
529 | acpi_os_wait_events_complete(NULL); | ||
530 | status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); | ||
531 | if (ACPI_FAILURE(status)) { | ||
532 | goto exit; | ||
533 | } | ||
534 | |||
535 | /* Remove the handler */ | ||
536 | obj_desc->common_notify.device_notify = NULL; | ||
537 | acpi_ut_remove_reference(notify_obj); | ||
538 | } | ||
539 | } | ||
540 | |||
541 | unlock_and_exit: | ||
542 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); | ||
543 | exit: | ||
544 | if (ACPI_FAILURE(status)) | ||
545 | ACPI_EXCEPTION((AE_INFO, status, "Removing notify handler")); | ||
546 | return_ACPI_STATUS(status); | ||
547 | } | ||
548 | |||
549 | ACPI_EXPORT_SYMBOL(acpi_remove_notify_handler) | ||
550 | |||
551 | /******************************************************************************* | ||
552 | * | ||
553 | * FUNCTION: acpi_install_gpe_handler | ||
554 | * | ||
555 | * PARAMETERS: gpe_device - Namespace node for the GPE (NULL for FADT | ||
556 | * defined GPEs) | ||
557 | * gpe_number - The GPE number within the GPE block | ||
558 | * Type - Whether this GPE should be treated as an | ||
559 | * edge- or level-triggered interrupt. | ||
560 | * Address - Address of the handler | ||
561 | * Context - Value passed to the handler on each GPE | ||
562 | * | ||
563 | * RETURN: Status | ||
564 | * | ||
565 | * DESCRIPTION: Install a handler for a General Purpose Event. | ||
566 | * | ||
567 | ******************************************************************************/ | ||
568 | acpi_status | ||
569 | acpi_install_gpe_handler(acpi_handle gpe_device, | ||
570 | u32 gpe_number, | ||
571 | u32 type, acpi_event_handler address, void *context) | ||
572 | { | ||
573 | struct acpi_gpe_event_info *gpe_event_info; | ||
574 | struct acpi_handler_info *handler; | ||
575 | acpi_status status; | ||
576 | acpi_cpu_flags flags; | ||
577 | |||
578 | ACPI_FUNCTION_TRACE(acpi_install_gpe_handler); | ||
579 | |||
580 | /* Parameter validation */ | ||
581 | |||
582 | if ((!address) || (type > ACPI_GPE_XRUPT_TYPE_MASK)) { | ||
583 | status = AE_BAD_PARAMETER; | ||
584 | goto exit; | ||
585 | } | ||
586 | |||
587 | status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); | ||
588 | if (ACPI_FAILURE(status)) { | ||
589 | goto exit; | ||
590 | } | ||
591 | |||
592 | /* Ensure that we have a valid GPE number */ | ||
593 | |||
594 | gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); | ||
595 | if (!gpe_event_info) { | ||
596 | status = AE_BAD_PARAMETER; | ||
597 | goto unlock_and_exit; | ||
598 | } | ||
599 | |||
600 | /* Make sure that there isn't a handler there already */ | ||
601 | |||
602 | if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == | ||
603 | ACPI_GPE_DISPATCH_HANDLER) { | ||
604 | status = AE_ALREADY_EXISTS; | ||
605 | goto unlock_and_exit; | ||
606 | } | ||
607 | |||
608 | /* Allocate and init handler object */ | ||
609 | |||
610 | handler = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_handler_info)); | ||
611 | if (!handler) { | ||
612 | status = AE_NO_MEMORY; | ||
613 | goto unlock_and_exit; | ||
614 | } | ||
615 | |||
616 | handler->address = address; | ||
617 | handler->context = context; | ||
618 | handler->method_node = gpe_event_info->dispatch.method_node; | ||
619 | |||
620 | /* Disable the GPE before installing the handler */ | ||
621 | |||
622 | status = acpi_ev_disable_gpe(gpe_event_info); | ||
623 | if (ACPI_FAILURE(status)) { | ||
624 | goto unlock_and_exit; | ||
625 | } | ||
626 | |||
627 | /* Install the handler */ | ||
628 | |||
629 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | ||
630 | gpe_event_info->dispatch.handler = handler; | ||
631 | |||
632 | /* Setup up dispatch flags to indicate handler (vs. method) */ | ||
633 | |||
634 | gpe_event_info->flags &= ~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK); /* Clear bits */ | ||
635 | gpe_event_info->flags |= (u8) (type | ACPI_GPE_DISPATCH_HANDLER); | ||
636 | |||
637 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | ||
638 | |||
639 | unlock_and_exit: | ||
640 | (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); | ||
641 | exit: | ||
642 | if (ACPI_FAILURE(status)) | ||
643 | ACPI_EXCEPTION((AE_INFO, status, | ||
644 | "Installing notify handler failed")); | ||
645 | return_ACPI_STATUS(status); | ||
646 | } | ||
647 | |||
648 | ACPI_EXPORT_SYMBOL(acpi_install_gpe_handler) | ||
649 | |||
650 | /******************************************************************************* | ||
651 | * | ||
652 | * FUNCTION: acpi_remove_gpe_handler | ||
653 | * | ||
654 | * PARAMETERS: gpe_device - Namespace node for the GPE (NULL for FADT | ||
655 | * defined GPEs) | ||
656 | * gpe_number - The event to remove a handler | ||
657 | * Address - Address of the handler | ||
658 | * | ||
659 | * RETURN: Status | ||
660 | * | ||
661 | * DESCRIPTION: Remove a handler for a General Purpose acpi_event. | ||
662 | * | ||
663 | ******************************************************************************/ | ||
664 | acpi_status | ||
665 | acpi_remove_gpe_handler(acpi_handle gpe_device, | ||
666 | u32 gpe_number, acpi_event_handler address) | ||
667 | { | ||
668 | struct acpi_gpe_event_info *gpe_event_info; | ||
669 | struct acpi_handler_info *handler; | ||
670 | acpi_status status; | ||
671 | acpi_cpu_flags flags; | ||
672 | |||
673 | ACPI_FUNCTION_TRACE(acpi_remove_gpe_handler); | ||
674 | |||
675 | /* Parameter validation */ | ||
676 | |||
677 | if (!address) { | ||
678 | return_ACPI_STATUS(AE_BAD_PARAMETER); | ||
679 | } | ||
680 | |||
681 | status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); | ||
682 | if (ACPI_FAILURE(status)) { | ||
683 | return_ACPI_STATUS(status); | ||
684 | } | ||
685 | |||
686 | /* Ensure that we have a valid GPE number */ | ||
687 | |||
688 | gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); | ||
689 | if (!gpe_event_info) { | ||
690 | status = AE_BAD_PARAMETER; | ||
691 | goto unlock_and_exit; | ||
692 | } | ||
693 | |||
694 | /* Make sure that a handler is indeed installed */ | ||
695 | |||
696 | if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) != | ||
697 | ACPI_GPE_DISPATCH_HANDLER) { | ||
698 | status = AE_NOT_EXIST; | ||
699 | goto unlock_and_exit; | ||
700 | } | ||
701 | |||
702 | /* Make sure that the installed handler is the same */ | ||
703 | |||
704 | if (gpe_event_info->dispatch.handler->address != address) { | ||
705 | status = AE_BAD_PARAMETER; | ||
706 | goto unlock_and_exit; | ||
707 | } | ||
708 | |||
709 | /* Disable the GPE before removing the handler */ | ||
710 | |||
711 | status = acpi_ev_disable_gpe(gpe_event_info); | ||
712 | if (ACPI_FAILURE(status)) { | ||
713 | goto unlock_and_exit; | ||
714 | } | ||
715 | |||
716 | /* Make sure all deferred tasks are completed */ | ||
717 | |||
718 | (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); | ||
719 | acpi_os_wait_events_complete(NULL); | ||
720 | status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); | ||
721 | if (ACPI_FAILURE(status)) { | ||
722 | return_ACPI_STATUS(status); | ||
723 | } | ||
724 | |||
725 | /* Remove the handler */ | ||
726 | |||
727 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | ||
728 | handler = gpe_event_info->dispatch.handler; | ||
729 | |||
730 | /* Restore Method node (if any), set dispatch flags */ | ||
731 | |||
732 | gpe_event_info->dispatch.method_node = handler->method_node; | ||
733 | gpe_event_info->flags &= ~ACPI_GPE_DISPATCH_MASK; /* Clear bits */ | ||
734 | if (handler->method_node) { | ||
735 | gpe_event_info->flags |= ACPI_GPE_DISPATCH_METHOD; | ||
736 | } | ||
737 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | ||
738 | |||
739 | /* Now we can free the handler object */ | ||
740 | |||
741 | ACPI_FREE(handler); | ||
742 | |||
743 | unlock_and_exit: | ||
744 | (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); | ||
745 | return_ACPI_STATUS(status); | ||
746 | } | ||
747 | |||
748 | ACPI_EXPORT_SYMBOL(acpi_remove_gpe_handler) | ||
749 | |||
750 | /******************************************************************************* | ||
751 | * | ||
752 | * FUNCTION: acpi_acquire_global_lock | ||
753 | * | ||
754 | * PARAMETERS: Timeout - How long the caller is willing to wait | ||
755 | * Handle - Where the handle to the lock is returned | ||
756 | * (if acquired) | ||
757 | * | ||
758 | * RETURN: Status | ||
759 | * | ||
760 | * DESCRIPTION: Acquire the ACPI Global Lock | ||
761 | * | ||
762 | * Note: Allows callers with the same thread ID to acquire the global lock | ||
763 | * multiple times. In other words, externally, the behavior of the global lock | ||
764 | * is identical to an AML mutex. On the first acquire, a new handle is | ||
765 | * returned. On any subsequent calls to acquire by the same thread, the same | ||
766 | * handle is returned. | ||
767 | * | ||
768 | ******************************************************************************/ | ||
769 | acpi_status acpi_acquire_global_lock(u16 timeout, u32 * handle) | ||
770 | { | ||
771 | acpi_status status; | ||
772 | |||
773 | if (!handle) { | ||
774 | return (AE_BAD_PARAMETER); | ||
775 | } | ||
776 | |||
777 | /* Must lock interpreter to prevent race conditions */ | ||
778 | |||
779 | acpi_ex_enter_interpreter(); | ||
780 | |||
781 | status = acpi_ex_acquire_mutex_object(timeout, | ||
782 | acpi_gbl_global_lock_mutex, | ||
783 | acpi_os_get_thread_id()); | ||
784 | |||
785 | if (ACPI_SUCCESS(status)) { | ||
786 | |||
787 | /* Return the global lock handle (updated in acpi_ev_acquire_global_lock) */ | ||
788 | |||
789 | *handle = acpi_gbl_global_lock_handle; | ||
790 | } | ||
791 | |||
792 | acpi_ex_exit_interpreter(); | ||
793 | return (status); | ||
794 | } | ||
795 | |||
796 | ACPI_EXPORT_SYMBOL(acpi_acquire_global_lock) | ||
797 | |||
798 | /******************************************************************************* | ||
799 | * | ||
800 | * FUNCTION: acpi_release_global_lock | ||
801 | * | ||
802 | * PARAMETERS: Handle - Returned from acpi_acquire_global_lock | ||
803 | * | ||
804 | * RETURN: Status | ||
805 | * | ||
806 | * DESCRIPTION: Release the ACPI Global Lock. The handle must be valid. | ||
807 | * | ||
808 | ******************************************************************************/ | ||
809 | acpi_status acpi_release_global_lock(u32 handle) | ||
810 | { | ||
811 | acpi_status status; | ||
812 | |||
813 | if (!handle || (handle != acpi_gbl_global_lock_handle)) { | ||
814 | return (AE_NOT_ACQUIRED); | ||
815 | } | ||
816 | |||
817 | status = acpi_ex_release_mutex_object(acpi_gbl_global_lock_mutex); | ||
818 | return (status); | ||
819 | } | ||
820 | |||
821 | ACPI_EXPORT_SYMBOL(acpi_release_global_lock) | ||
diff --git a/drivers/acpi/events/evxfevnt.c b/drivers/acpi/events/evxfevnt.c deleted file mode 100644 index f33cc30cb6b8..000000000000 --- a/drivers/acpi/events/evxfevnt.c +++ /dev/null | |||
@@ -1,871 +0,0 @@ | |||
1 | /****************************************************************************** | ||
2 | * | ||
3 | * Module Name: evxfevnt - External Interfaces, ACPI event disable/enable | ||
4 | * | ||
5 | *****************************************************************************/ | ||
6 | |||
7 | /* | ||
8 | * Copyright (C) 2000 - 2008, Intel Corp. | ||
9 | * All rights reserved. | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or without | ||
12 | * modification, are permitted provided that the following conditions | ||
13 | * are met: | ||
14 | * 1. Redistributions of source code must retain the above copyright | ||
15 | * notice, this list of conditions, and the following disclaimer, | ||
16 | * without modification. | ||
17 | * 2. Redistributions in binary form must reproduce at minimum a disclaimer | ||
18 | * substantially similar to the "NO WARRANTY" disclaimer below | ||
19 | * ("Disclaimer") and any redistribution must be conditioned upon | ||
20 | * including a substantially similar Disclaimer requirement for further | ||
21 | * binary redistribution. | ||
22 | * 3. Neither the names of the above-listed copyright holders nor the names | ||
23 | * of any contributors may be used to endorse or promote products derived | ||
24 | * from this software without specific prior written permission. | ||
25 | * | ||
26 | * Alternatively, this software may be distributed under the terms of the | ||
27 | * GNU General Public License ("GPL") version 2 as published by the Free | ||
28 | * Software Foundation. | ||
29 | * | ||
30 | * NO WARRANTY | ||
31 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
32 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
33 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR | ||
34 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
35 | * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
36 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | ||
37 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | ||
38 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, | ||
39 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING | ||
40 | * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
41 | * POSSIBILITY OF SUCH DAMAGES. | ||
42 | */ | ||
43 | |||
44 | #include <acpi/acpi.h> | ||
45 | #include <acpi/accommon.h> | ||
46 | #include <acpi/acevents.h> | ||
47 | #include <acpi/acnamesp.h> | ||
48 | #include <acpi/actables.h> | ||
49 | |||
50 | #define _COMPONENT ACPI_EVENTS | ||
51 | ACPI_MODULE_NAME("evxfevnt") | ||
52 | |||
53 | /* Local prototypes */ | ||
54 | acpi_status | ||
55 | acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info, | ||
56 | struct acpi_gpe_block_info *gpe_block, void *context); | ||
57 | |||
58 | /******************************************************************************* | ||
59 | * | ||
60 | * FUNCTION: acpi_enable | ||
61 | * | ||
62 | * PARAMETERS: None | ||
63 | * | ||
64 | * RETURN: Status | ||
65 | * | ||
66 | * DESCRIPTION: Transfers the system into ACPI mode. | ||
67 | * | ||
68 | ******************************************************************************/ | ||
69 | |||
70 | acpi_status acpi_enable(void) | ||
71 | { | ||
72 | acpi_status status = AE_OK; | ||
73 | |||
74 | ACPI_FUNCTION_TRACE(acpi_enable); | ||
75 | |||
76 | /* ACPI tables must be present */ | ||
77 | |||
78 | if (!acpi_tb_tables_loaded()) { | ||
79 | return_ACPI_STATUS(AE_NO_ACPI_TABLES); | ||
80 | } | ||
81 | |||
82 | /* Check current mode */ | ||
83 | |||
84 | if (acpi_hw_get_mode() == ACPI_SYS_MODE_ACPI) { | ||
85 | ACPI_DEBUG_PRINT((ACPI_DB_INIT, | ||
86 | "System is already in ACPI mode\n")); | ||
87 | } else { | ||
88 | /* Transition to ACPI mode */ | ||
89 | |||
90 | status = acpi_hw_set_mode(ACPI_SYS_MODE_ACPI); | ||
91 | if (ACPI_FAILURE(status)) { | ||
92 | ACPI_ERROR((AE_INFO, | ||
93 | "Could not transition to ACPI mode")); | ||
94 | return_ACPI_STATUS(status); | ||
95 | } | ||
96 | |||
97 | ACPI_DEBUG_PRINT((ACPI_DB_INIT, | ||
98 | "Transition to ACPI mode successful\n")); | ||
99 | } | ||
100 | |||
101 | return_ACPI_STATUS(status); | ||
102 | } | ||
103 | |||
104 | ACPI_EXPORT_SYMBOL(acpi_enable) | ||
105 | |||
106 | /******************************************************************************* | ||
107 | * | ||
108 | * FUNCTION: acpi_disable | ||
109 | * | ||
110 | * PARAMETERS: None | ||
111 | * | ||
112 | * RETURN: Status | ||
113 | * | ||
114 | * DESCRIPTION: Transfers the system into LEGACY (non-ACPI) mode. | ||
115 | * | ||
116 | ******************************************************************************/ | ||
117 | acpi_status acpi_disable(void) | ||
118 | { | ||
119 | acpi_status status = AE_OK; | ||
120 | |||
121 | ACPI_FUNCTION_TRACE(acpi_disable); | ||
122 | |||
123 | if (acpi_hw_get_mode() == ACPI_SYS_MODE_LEGACY) { | ||
124 | ACPI_DEBUG_PRINT((ACPI_DB_INIT, | ||
125 | "System is already in legacy (non-ACPI) mode\n")); | ||
126 | } else { | ||
127 | /* Transition to LEGACY mode */ | ||
128 | |||
129 | status = acpi_hw_set_mode(ACPI_SYS_MODE_LEGACY); | ||
130 | |||
131 | if (ACPI_FAILURE(status)) { | ||
132 | ACPI_ERROR((AE_INFO, | ||
133 | "Could not exit ACPI mode to legacy mode")); | ||
134 | return_ACPI_STATUS(status); | ||
135 | } | ||
136 | |||
137 | ACPI_DEBUG_PRINT((ACPI_DB_INIT, "ACPI mode disabled\n")); | ||
138 | } | ||
139 | |||
140 | return_ACPI_STATUS(status); | ||
141 | } | ||
142 | |||
143 | ACPI_EXPORT_SYMBOL(acpi_disable) | ||
144 | |||
145 | /******************************************************************************* | ||
146 | * | ||
147 | * FUNCTION: acpi_enable_event | ||
148 | * | ||
149 | * PARAMETERS: Event - The fixed eventto be enabled | ||
150 | * Flags - Reserved | ||
151 | * | ||
152 | * RETURN: Status | ||
153 | * | ||
154 | * DESCRIPTION: Enable an ACPI event (fixed) | ||
155 | * | ||
156 | ******************************************************************************/ | ||
157 | acpi_status acpi_enable_event(u32 event, u32 flags) | ||
158 | { | ||
159 | acpi_status status = AE_OK; | ||
160 | u32 value; | ||
161 | |||
162 | ACPI_FUNCTION_TRACE(acpi_enable_event); | ||
163 | |||
164 | /* Decode the Fixed Event */ | ||
165 | |||
166 | if (event > ACPI_EVENT_MAX) { | ||
167 | return_ACPI_STATUS(AE_BAD_PARAMETER); | ||
168 | } | ||
169 | |||
170 | /* | ||
171 | * Enable the requested fixed event (by writing a one to the enable | ||
172 | * register bit) | ||
173 | */ | ||
174 | status = | ||
175 | acpi_set_register(acpi_gbl_fixed_event_info[event]. | ||
176 | enable_register_id, 1); | ||
177 | if (ACPI_FAILURE(status)) { | ||
178 | return_ACPI_STATUS(status); | ||
179 | } | ||
180 | |||
181 | /* Make sure that the hardware responded */ | ||
182 | |||
183 | status = | ||
184 | acpi_get_register(acpi_gbl_fixed_event_info[event]. | ||
185 | enable_register_id, &value); | ||
186 | if (ACPI_FAILURE(status)) { | ||
187 | return_ACPI_STATUS(status); | ||
188 | } | ||
189 | |||
190 | if (value != 1) { | ||
191 | ACPI_ERROR((AE_INFO, | ||
192 | "Could not enable %s event", | ||
193 | acpi_ut_get_event_name(event))); | ||
194 | return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE); | ||
195 | } | ||
196 | |||
197 | return_ACPI_STATUS(status); | ||
198 | } | ||
199 | |||
200 | ACPI_EXPORT_SYMBOL(acpi_enable_event) | ||
201 | |||
202 | /******************************************************************************* | ||
203 | * | ||
204 | * FUNCTION: acpi_set_gpe_type | ||
205 | * | ||
206 | * PARAMETERS: gpe_device - Parent GPE Device | ||
207 | * gpe_number - GPE level within the GPE block | ||
208 | * Type - New GPE type | ||
209 | * | ||
210 | * RETURN: Status | ||
211 | * | ||
212 | * DESCRIPTION: Set the type of an individual GPE | ||
213 | * | ||
214 | ******************************************************************************/ | ||
215 | acpi_status acpi_set_gpe_type(acpi_handle gpe_device, u32 gpe_number, u8 type) | ||
216 | { | ||
217 | acpi_status status = AE_OK; | ||
218 | struct acpi_gpe_event_info *gpe_event_info; | ||
219 | |||
220 | ACPI_FUNCTION_TRACE(acpi_set_gpe_type); | ||
221 | |||
222 | /* Ensure that we have a valid GPE number */ | ||
223 | |||
224 | gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); | ||
225 | if (!gpe_event_info) { | ||
226 | status = AE_BAD_PARAMETER; | ||
227 | goto unlock_and_exit; | ||
228 | } | ||
229 | |||
230 | if ((gpe_event_info->flags & ACPI_GPE_TYPE_MASK) == type) { | ||
231 | return_ACPI_STATUS(AE_OK); | ||
232 | } | ||
233 | |||
234 | /* Set the new type (will disable GPE if currently enabled) */ | ||
235 | |||
236 | status = acpi_ev_set_gpe_type(gpe_event_info, type); | ||
237 | |||
238 | unlock_and_exit: | ||
239 | return_ACPI_STATUS(status); | ||
240 | } | ||
241 | |||
242 | ACPI_EXPORT_SYMBOL(acpi_set_gpe_type) | ||
243 | |||
244 | /******************************************************************************* | ||
245 | * | ||
246 | * FUNCTION: acpi_enable_gpe | ||
247 | * | ||
248 | * PARAMETERS: gpe_device - Parent GPE Device | ||
249 | * gpe_number - GPE level within the GPE block | ||
250 | * Flags - Just enable, or also wake enable? | ||
251 | * Called from ISR or not | ||
252 | * | ||
253 | * RETURN: Status | ||
254 | * | ||
255 | * DESCRIPTION: Enable an ACPI event (general purpose) | ||
256 | * | ||
257 | ******************************************************************************/ | ||
258 | acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number) | ||
259 | { | ||
260 | acpi_status status = AE_OK; | ||
261 | acpi_cpu_flags flags; | ||
262 | struct acpi_gpe_event_info *gpe_event_info; | ||
263 | |||
264 | ACPI_FUNCTION_TRACE(acpi_enable_gpe); | ||
265 | |||
266 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | ||
267 | |||
268 | /* Ensure that we have a valid GPE number */ | ||
269 | |||
270 | gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); | ||
271 | if (!gpe_event_info) { | ||
272 | status = AE_BAD_PARAMETER; | ||
273 | goto unlock_and_exit; | ||
274 | } | ||
275 | |||
276 | /* Perform the enable */ | ||
277 | |||
278 | status = acpi_ev_enable_gpe(gpe_event_info, TRUE); | ||
279 | |||
280 | unlock_and_exit: | ||
281 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | ||
282 | return_ACPI_STATUS(status); | ||
283 | } | ||
284 | |||
285 | ACPI_EXPORT_SYMBOL(acpi_enable_gpe) | ||
286 | |||
287 | /******************************************************************************* | ||
288 | * | ||
289 | * FUNCTION: acpi_disable_gpe | ||
290 | * | ||
291 | * PARAMETERS: gpe_device - Parent GPE Device | ||
292 | * gpe_number - GPE level within the GPE block | ||
293 | * Flags - Just disable, or also wake disable? | ||
294 | * Called from ISR or not | ||
295 | * | ||
296 | * RETURN: Status | ||
297 | * | ||
298 | * DESCRIPTION: Disable an ACPI event (general purpose) | ||
299 | * | ||
300 | ******************************************************************************/ | ||
301 | acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number) | ||
302 | { | ||
303 | acpi_status status = AE_OK; | ||
304 | acpi_cpu_flags flags; | ||
305 | struct acpi_gpe_event_info *gpe_event_info; | ||
306 | |||
307 | ACPI_FUNCTION_TRACE(acpi_disable_gpe); | ||
308 | |||
309 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | ||
310 | /* Ensure that we have a valid GPE number */ | ||
311 | |||
312 | gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); | ||
313 | if (!gpe_event_info) { | ||
314 | status = AE_BAD_PARAMETER; | ||
315 | goto unlock_and_exit; | ||
316 | } | ||
317 | |||
318 | status = acpi_ev_disable_gpe(gpe_event_info); | ||
319 | |||
320 | unlock_and_exit: | ||
321 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | ||
322 | return_ACPI_STATUS(status); | ||
323 | } | ||
324 | |||
325 | ACPI_EXPORT_SYMBOL(acpi_disable_gpe) | ||
326 | |||
327 | /******************************************************************************* | ||
328 | * | ||
329 | * FUNCTION: acpi_disable_event | ||
330 | * | ||
331 | * PARAMETERS: Event - The fixed eventto be enabled | ||
332 | * Flags - Reserved | ||
333 | * | ||
334 | * RETURN: Status | ||
335 | * | ||
336 | * DESCRIPTION: Disable an ACPI event (fixed) | ||
337 | * | ||
338 | ******************************************************************************/ | ||
339 | acpi_status acpi_disable_event(u32 event, u32 flags) | ||
340 | { | ||
341 | acpi_status status = AE_OK; | ||
342 | u32 value; | ||
343 | |||
344 | ACPI_FUNCTION_TRACE(acpi_disable_event); | ||
345 | |||
346 | /* Decode the Fixed Event */ | ||
347 | |||
348 | if (event > ACPI_EVENT_MAX) { | ||
349 | return_ACPI_STATUS(AE_BAD_PARAMETER); | ||
350 | } | ||
351 | |||
352 | /* | ||
353 | * Disable the requested fixed event (by writing a zero to the enable | ||
354 | * register bit) | ||
355 | */ | ||
356 | status = | ||
357 | acpi_set_register(acpi_gbl_fixed_event_info[event]. | ||
358 | enable_register_id, 0); | ||
359 | if (ACPI_FAILURE(status)) { | ||
360 | return_ACPI_STATUS(status); | ||
361 | } | ||
362 | |||
363 | status = | ||
364 | acpi_get_register(acpi_gbl_fixed_event_info[event]. | ||
365 | enable_register_id, &value); | ||
366 | if (ACPI_FAILURE(status)) { | ||
367 | return_ACPI_STATUS(status); | ||
368 | } | ||
369 | |||
370 | if (value != 0) { | ||
371 | ACPI_ERROR((AE_INFO, | ||
372 | "Could not disable %s events", | ||
373 | acpi_ut_get_event_name(event))); | ||
374 | return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE); | ||
375 | } | ||
376 | |||
377 | return_ACPI_STATUS(status); | ||
378 | } | ||
379 | |||
380 | ACPI_EXPORT_SYMBOL(acpi_disable_event) | ||
381 | |||
382 | /******************************************************************************* | ||
383 | * | ||
384 | * FUNCTION: acpi_clear_event | ||
385 | * | ||
386 | * PARAMETERS: Event - The fixed event to be cleared | ||
387 | * | ||
388 | * RETURN: Status | ||
389 | * | ||
390 | * DESCRIPTION: Clear an ACPI event (fixed) | ||
391 | * | ||
392 | ******************************************************************************/ | ||
393 | acpi_status acpi_clear_event(u32 event) | ||
394 | { | ||
395 | acpi_status status = AE_OK; | ||
396 | |||
397 | ACPI_FUNCTION_TRACE(acpi_clear_event); | ||
398 | |||
399 | /* Decode the Fixed Event */ | ||
400 | |||
401 | if (event > ACPI_EVENT_MAX) { | ||
402 | return_ACPI_STATUS(AE_BAD_PARAMETER); | ||
403 | } | ||
404 | |||
405 | /* | ||
406 | * Clear the requested fixed event (By writing a one to the status | ||
407 | * register bit) | ||
408 | */ | ||
409 | status = | ||
410 | acpi_set_register(acpi_gbl_fixed_event_info[event]. | ||
411 | status_register_id, 1); | ||
412 | |||
413 | return_ACPI_STATUS(status); | ||
414 | } | ||
415 | |||
416 | ACPI_EXPORT_SYMBOL(acpi_clear_event) | ||
417 | |||
418 | /******************************************************************************* | ||
419 | * | ||
420 | * FUNCTION: acpi_clear_gpe | ||
421 | * | ||
422 | * PARAMETERS: gpe_device - Parent GPE Device | ||
423 | * gpe_number - GPE level within the GPE block | ||
424 | * Flags - Called from an ISR or not | ||
425 | * | ||
426 | * RETURN: Status | ||
427 | * | ||
428 | * DESCRIPTION: Clear an ACPI event (general purpose) | ||
429 | * | ||
430 | ******************************************************************************/ | ||
431 | acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags) | ||
432 | { | ||
433 | acpi_status status = AE_OK; | ||
434 | struct acpi_gpe_event_info *gpe_event_info; | ||
435 | |||
436 | ACPI_FUNCTION_TRACE(acpi_clear_gpe); | ||
437 | |||
438 | /* Use semaphore lock if not executing at interrupt level */ | ||
439 | |||
440 | if (flags & ACPI_NOT_ISR) { | ||
441 | status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); | ||
442 | if (ACPI_FAILURE(status)) { | ||
443 | return_ACPI_STATUS(status); | ||
444 | } | ||
445 | } | ||
446 | |||
447 | /* Ensure that we have a valid GPE number */ | ||
448 | |||
449 | gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); | ||
450 | if (!gpe_event_info) { | ||
451 | status = AE_BAD_PARAMETER; | ||
452 | goto unlock_and_exit; | ||
453 | } | ||
454 | |||
455 | status = acpi_hw_clear_gpe(gpe_event_info); | ||
456 | |||
457 | unlock_and_exit: | ||
458 | if (flags & ACPI_NOT_ISR) { | ||
459 | (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); | ||
460 | } | ||
461 | return_ACPI_STATUS(status); | ||
462 | } | ||
463 | |||
464 | ACPI_EXPORT_SYMBOL(acpi_clear_gpe) | ||
465 | /******************************************************************************* | ||
466 | * | ||
467 | * FUNCTION: acpi_get_event_status | ||
468 | * | ||
469 | * PARAMETERS: Event - The fixed event | ||
470 | * event_status - Where the current status of the event will | ||
471 | * be returned | ||
472 | * | ||
473 | * RETURN: Status | ||
474 | * | ||
475 | * DESCRIPTION: Obtains and returns the current status of the event | ||
476 | * | ||
477 | ******************************************************************************/ | ||
478 | acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status) | ||
479 | { | ||
480 | acpi_status status = AE_OK; | ||
481 | u32 value; | ||
482 | |||
483 | ACPI_FUNCTION_TRACE(acpi_get_event_status); | ||
484 | |||
485 | if (!event_status) { | ||
486 | return_ACPI_STATUS(AE_BAD_PARAMETER); | ||
487 | } | ||
488 | |||
489 | /* Decode the Fixed Event */ | ||
490 | |||
491 | if (event > ACPI_EVENT_MAX) { | ||
492 | return_ACPI_STATUS(AE_BAD_PARAMETER); | ||
493 | } | ||
494 | |||
495 | /* Get the status of the requested fixed event */ | ||
496 | |||
497 | status = | ||
498 | acpi_get_register(acpi_gbl_fixed_event_info[event]. | ||
499 | enable_register_id, &value); | ||
500 | if (ACPI_FAILURE(status)) | ||
501 | return_ACPI_STATUS(status); | ||
502 | |||
503 | *event_status = value; | ||
504 | |||
505 | status = | ||
506 | acpi_get_register(acpi_gbl_fixed_event_info[event]. | ||
507 | status_register_id, &value); | ||
508 | if (ACPI_FAILURE(status)) | ||
509 | return_ACPI_STATUS(status); | ||
510 | |||
511 | if (value) | ||
512 | *event_status |= ACPI_EVENT_FLAG_SET; | ||
513 | |||
514 | if (acpi_gbl_fixed_event_handlers[event].handler) | ||
515 | *event_status |= ACPI_EVENT_FLAG_HANDLE; | ||
516 | |||
517 | return_ACPI_STATUS(status); | ||
518 | } | ||
519 | |||
520 | ACPI_EXPORT_SYMBOL(acpi_get_event_status) | ||
521 | |||
522 | /******************************************************************************* | ||
523 | * | ||
524 | * FUNCTION: acpi_get_gpe_status | ||
525 | * | ||
526 | * PARAMETERS: gpe_device - Parent GPE Device | ||
527 | * gpe_number - GPE level within the GPE block | ||
528 | * Flags - Called from an ISR or not | ||
529 | * event_status - Where the current status of the event will | ||
530 | * be returned | ||
531 | * | ||
532 | * RETURN: Status | ||
533 | * | ||
534 | * DESCRIPTION: Get status of an event (general purpose) | ||
535 | * | ||
536 | ******************************************************************************/ | ||
537 | acpi_status | ||
538 | acpi_get_gpe_status(acpi_handle gpe_device, | ||
539 | u32 gpe_number, u32 flags, acpi_event_status * event_status) | ||
540 | { | ||
541 | acpi_status status = AE_OK; | ||
542 | struct acpi_gpe_event_info *gpe_event_info; | ||
543 | |||
544 | ACPI_FUNCTION_TRACE(acpi_get_gpe_status); | ||
545 | |||
546 | /* Use semaphore lock if not executing at interrupt level */ | ||
547 | |||
548 | if (flags & ACPI_NOT_ISR) { | ||
549 | status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); | ||
550 | if (ACPI_FAILURE(status)) { | ||
551 | return_ACPI_STATUS(status); | ||
552 | } | ||
553 | } | ||
554 | |||
555 | /* Ensure that we have a valid GPE number */ | ||
556 | |||
557 | gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); | ||
558 | if (!gpe_event_info) { | ||
559 | status = AE_BAD_PARAMETER; | ||
560 | goto unlock_and_exit; | ||
561 | } | ||
562 | |||
563 | /* Obtain status on the requested GPE number */ | ||
564 | |||
565 | status = acpi_hw_get_gpe_status(gpe_event_info, event_status); | ||
566 | |||
567 | if (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) | ||
568 | *event_status |= ACPI_EVENT_FLAG_HANDLE; | ||
569 | |||
570 | unlock_and_exit: | ||
571 | if (flags & ACPI_NOT_ISR) { | ||
572 | (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); | ||
573 | } | ||
574 | return_ACPI_STATUS(status); | ||
575 | } | ||
576 | |||
577 | ACPI_EXPORT_SYMBOL(acpi_get_gpe_status) | ||
578 | /******************************************************************************* | ||
579 | * | ||
580 | * FUNCTION: acpi_install_gpe_block | ||
581 | * | ||
582 | * PARAMETERS: gpe_device - Handle to the parent GPE Block Device | ||
583 | * gpe_block_address - Address and space_iD | ||
584 | * register_count - Number of GPE register pairs in the block | ||
585 | * interrupt_number - H/W interrupt for the block | ||
586 | * | ||
587 | * RETURN: Status | ||
588 | * | ||
589 | * DESCRIPTION: Create and Install a block of GPE registers | ||
590 | * | ||
591 | ******************************************************************************/ | ||
592 | acpi_status | ||
593 | acpi_install_gpe_block(acpi_handle gpe_device, | ||
594 | struct acpi_generic_address *gpe_block_address, | ||
595 | u32 register_count, u32 interrupt_number) | ||
596 | { | ||
597 | acpi_status status; | ||
598 | union acpi_operand_object *obj_desc; | ||
599 | struct acpi_namespace_node *node; | ||
600 | struct acpi_gpe_block_info *gpe_block; | ||
601 | |||
602 | ACPI_FUNCTION_TRACE(acpi_install_gpe_block); | ||
603 | |||
604 | if ((!gpe_device) || (!gpe_block_address) || (!register_count)) { | ||
605 | return_ACPI_STATUS(AE_BAD_PARAMETER); | ||
606 | } | ||
607 | |||
608 | status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); | ||
609 | if (ACPI_FAILURE(status)) { | ||
610 | return (status); | ||
611 | } | ||
612 | |||
613 | node = acpi_ns_map_handle_to_node(gpe_device); | ||
614 | if (!node) { | ||
615 | status = AE_BAD_PARAMETER; | ||
616 | goto unlock_and_exit; | ||
617 | } | ||
618 | |||
619 | /* | ||
620 | * For user-installed GPE Block Devices, the gpe_block_base_number | ||
621 | * is always zero | ||
622 | */ | ||
623 | status = | ||
624 | acpi_ev_create_gpe_block(node, gpe_block_address, register_count, 0, | ||
625 | interrupt_number, &gpe_block); | ||
626 | if (ACPI_FAILURE(status)) { | ||
627 | goto unlock_and_exit; | ||
628 | } | ||
629 | |||
630 | /* Run the _PRW methods and enable the GPEs */ | ||
631 | |||
632 | status = acpi_ev_initialize_gpe_block(node, gpe_block); | ||
633 | if (ACPI_FAILURE(status)) { | ||
634 | goto unlock_and_exit; | ||
635 | } | ||
636 | |||
637 | /* Get the device_object attached to the node */ | ||
638 | |||
639 | obj_desc = acpi_ns_get_attached_object(node); | ||
640 | if (!obj_desc) { | ||
641 | |||
642 | /* No object, create a new one */ | ||
643 | |||
644 | obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_DEVICE); | ||
645 | if (!obj_desc) { | ||
646 | status = AE_NO_MEMORY; | ||
647 | goto unlock_and_exit; | ||
648 | } | ||
649 | |||
650 | status = | ||
651 | acpi_ns_attach_object(node, obj_desc, ACPI_TYPE_DEVICE); | ||
652 | |||
653 | /* Remove local reference to the object */ | ||
654 | |||
655 | acpi_ut_remove_reference(obj_desc); | ||
656 | |||
657 | if (ACPI_FAILURE(status)) { | ||
658 | goto unlock_and_exit; | ||
659 | } | ||
660 | } | ||
661 | |||
662 | /* Install the GPE block in the device_object */ | ||
663 | |||
664 | obj_desc->device.gpe_block = gpe_block; | ||
665 | |||
666 | unlock_and_exit: | ||
667 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); | ||
668 | return_ACPI_STATUS(status); | ||
669 | } | ||
670 | |||
671 | ACPI_EXPORT_SYMBOL(acpi_install_gpe_block) | ||
672 | |||
673 | /******************************************************************************* | ||
674 | * | ||
675 | * FUNCTION: acpi_remove_gpe_block | ||
676 | * | ||
677 | * PARAMETERS: gpe_device - Handle to the parent GPE Block Device | ||
678 | * | ||
679 | * RETURN: Status | ||
680 | * | ||
681 | * DESCRIPTION: Remove a previously installed block of GPE registers | ||
682 | * | ||
683 | ******************************************************************************/ | ||
684 | acpi_status acpi_remove_gpe_block(acpi_handle gpe_device) | ||
685 | { | ||
686 | union acpi_operand_object *obj_desc; | ||
687 | acpi_status status; | ||
688 | struct acpi_namespace_node *node; | ||
689 | |||
690 | ACPI_FUNCTION_TRACE(acpi_remove_gpe_block); | ||
691 | |||
692 | if (!gpe_device) { | ||
693 | return_ACPI_STATUS(AE_BAD_PARAMETER); | ||
694 | } | ||
695 | |||
696 | status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); | ||
697 | if (ACPI_FAILURE(status)) { | ||
698 | return (status); | ||
699 | } | ||
700 | |||
701 | node = acpi_ns_map_handle_to_node(gpe_device); | ||
702 | if (!node) { | ||
703 | status = AE_BAD_PARAMETER; | ||
704 | goto unlock_and_exit; | ||
705 | } | ||
706 | |||
707 | /* Get the device_object attached to the node */ | ||
708 | |||
709 | obj_desc = acpi_ns_get_attached_object(node); | ||
710 | if (!obj_desc || !obj_desc->device.gpe_block) { | ||
711 | return_ACPI_STATUS(AE_NULL_OBJECT); | ||
712 | } | ||
713 | |||
714 | /* Delete the GPE block (but not the device_object) */ | ||
715 | |||
716 | status = acpi_ev_delete_gpe_block(obj_desc->device.gpe_block); | ||
717 | if (ACPI_SUCCESS(status)) { | ||
718 | obj_desc->device.gpe_block = NULL; | ||
719 | } | ||
720 | |||
721 | unlock_and_exit: | ||
722 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); | ||
723 | return_ACPI_STATUS(status); | ||
724 | } | ||
725 | |||
726 | ACPI_EXPORT_SYMBOL(acpi_remove_gpe_block) | ||
727 | |||
728 | /******************************************************************************* | ||
729 | * | ||
730 | * FUNCTION: acpi_get_gpe_device | ||
731 | * | ||
732 | * PARAMETERS: Index - System GPE index (0-current_gpe_count) | ||
733 | * gpe_device - Where the parent GPE Device is returned | ||
734 | * | ||
735 | * RETURN: Status | ||
736 | * | ||
737 | * DESCRIPTION: Obtain the GPE device associated with the input index. A NULL | ||
738 | * gpe device indicates that the gpe number is contained in one of | ||
739 | * the FADT-defined gpe blocks. Otherwise, the GPE block device. | ||
740 | * | ||
741 | ******************************************************************************/ | ||
742 | acpi_status | ||
743 | acpi_get_gpe_device(u32 index, acpi_handle *gpe_device) | ||
744 | { | ||
745 | struct acpi_gpe_device_info info; | ||
746 | acpi_status status; | ||
747 | |||
748 | ACPI_FUNCTION_TRACE(acpi_get_gpe_device); | ||
749 | |||
750 | if (!gpe_device) { | ||
751 | return_ACPI_STATUS(AE_BAD_PARAMETER); | ||
752 | } | ||
753 | |||
754 | if (index >= acpi_current_gpe_count) { | ||
755 | return_ACPI_STATUS(AE_NOT_EXIST); | ||
756 | } | ||
757 | |||
758 | /* Setup and walk the GPE list */ | ||
759 | |||
760 | info.index = index; | ||
761 | info.status = AE_NOT_EXIST; | ||
762 | info.gpe_device = NULL; | ||
763 | info.next_block_base_index = 0; | ||
764 | |||
765 | status = acpi_ev_walk_gpe_list(acpi_ev_get_gpe_device, &info); | ||
766 | if (ACPI_FAILURE(status)) { | ||
767 | return_ACPI_STATUS(status); | ||
768 | } | ||
769 | |||
770 | *gpe_device = info.gpe_device; | ||
771 | return_ACPI_STATUS(info.status); | ||
772 | } | ||
773 | |||
774 | ACPI_EXPORT_SYMBOL(acpi_get_gpe_device) | ||
775 | |||
776 | /******************************************************************************* | ||
777 | * | ||
778 | * FUNCTION: acpi_ev_get_gpe_device | ||
779 | * | ||
780 | * PARAMETERS: GPE_WALK_CALLBACK | ||
781 | * | ||
782 | * RETURN: Status | ||
783 | * | ||
784 | * DESCRIPTION: Matches the input GPE index (0-current_gpe_count) with a GPE | ||
785 | * block device. NULL if the GPE is one of the FADT-defined GPEs. | ||
786 | * | ||
787 | ******************************************************************************/ | ||
788 | acpi_status | ||
789 | acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info, | ||
790 | struct acpi_gpe_block_info *gpe_block, void *context) | ||
791 | { | ||
792 | struct acpi_gpe_device_info *info = context; | ||
793 | |||
794 | /* Increment Index by the number of GPEs in this block */ | ||
795 | |||
796 | info->next_block_base_index += | ||
797 | (gpe_block->register_count * ACPI_GPE_REGISTER_WIDTH); | ||
798 | |||
799 | if (info->index < info->next_block_base_index) { | ||
800 | /* | ||
801 | * The GPE index is within this block, get the node. Leave the node | ||
802 | * NULL for the FADT-defined GPEs | ||
803 | */ | ||
804 | if ((gpe_block->node)->type == ACPI_TYPE_DEVICE) { | ||
805 | info->gpe_device = gpe_block->node; | ||
806 | } | ||
807 | |||
808 | info->status = AE_OK; | ||
809 | return (AE_CTRL_END); | ||
810 | } | ||
811 | |||
812 | return (AE_OK); | ||
813 | } | ||
814 | |||
815 | /****************************************************************************** | ||
816 | * | ||
817 | * FUNCTION: acpi_disable_all_gpes | ||
818 | * | ||
819 | * PARAMETERS: None | ||
820 | * | ||
821 | * RETURN: Status | ||
822 | * | ||
823 | * DESCRIPTION: Disable and clear all GPEs in all GPE blocks | ||
824 | * | ||
825 | ******************************************************************************/ | ||
826 | |||
827 | acpi_status acpi_disable_all_gpes(void) | ||
828 | { | ||
829 | acpi_status status; | ||
830 | |||
831 | ACPI_FUNCTION_TRACE(acpi_disable_all_gpes); | ||
832 | |||
833 | status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); | ||
834 | if (ACPI_FAILURE(status)) { | ||
835 | return_ACPI_STATUS(status); | ||
836 | } | ||
837 | |||
838 | status = acpi_hw_disable_all_gpes(); | ||
839 | (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); | ||
840 | |||
841 | return_ACPI_STATUS(status); | ||
842 | } | ||
843 | |||
844 | /****************************************************************************** | ||
845 | * | ||
846 | * FUNCTION: acpi_enable_all_runtime_gpes | ||
847 | * | ||
848 | * PARAMETERS: None | ||
849 | * | ||
850 | * RETURN: Status | ||
851 | * | ||
852 | * DESCRIPTION: Enable all "runtime" GPEs, in all GPE blocks | ||
853 | * | ||
854 | ******************************************************************************/ | ||
855 | |||
856 | acpi_status acpi_enable_all_runtime_gpes(void) | ||
857 | { | ||
858 | acpi_status status; | ||
859 | |||
860 | ACPI_FUNCTION_TRACE(acpi_enable_all_runtime_gpes); | ||
861 | |||
862 | status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); | ||
863 | if (ACPI_FAILURE(status)) { | ||
864 | return_ACPI_STATUS(status); | ||
865 | } | ||
866 | |||
867 | status = acpi_hw_enable_all_runtime_gpes(); | ||
868 | (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); | ||
869 | |||
870 | return_ACPI_STATUS(status); | ||
871 | } | ||
diff --git a/drivers/acpi/events/evxfregn.c b/drivers/acpi/events/evxfregn.c deleted file mode 100644 index b8633947391b..000000000000 --- a/drivers/acpi/events/evxfregn.c +++ /dev/null | |||
@@ -1,254 +0,0 @@ | |||
1 | /****************************************************************************** | ||
2 | * | ||
3 | * Module Name: evxfregn - External Interfaces, ACPI Operation Regions and | ||
4 | * Address Spaces. | ||
5 | * | ||
6 | *****************************************************************************/ | ||
7 | |||
8 | /* | ||
9 | * Copyright (C) 2000 - 2008, Intel Corp. | ||
10 | * All rights reserved. | ||
11 | * | ||
12 | * Redistribution and use in source and binary forms, with or without | ||
13 | * modification, are permitted provided that the following conditions | ||
14 | * are met: | ||
15 | * 1. Redistributions of source code must retain the above copyright | ||
16 | * notice, this list of conditions, and the following disclaimer, | ||
17 | * without modification. | ||
18 | * 2. Redistributions in binary form must reproduce at minimum a disclaimer | ||
19 | * substantially similar to the "NO WARRANTY" disclaimer below | ||
20 | * ("Disclaimer") and any redistribution must be conditioned upon | ||
21 | * including a substantially similar Disclaimer requirement for further | ||
22 | * binary redistribution. | ||
23 | * 3. Neither the names of the above-listed copyright holders nor the names | ||
24 | * of any contributors may be used to endorse or promote products derived | ||
25 | * from this software without specific prior written permission. | ||
26 | * | ||
27 | * Alternatively, this software may be distributed under the terms of the | ||
28 | * GNU General Public License ("GPL") version 2 as published by the Free | ||
29 | * Software Foundation. | ||
30 | * | ||
31 | * NO WARRANTY | ||
32 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
33 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
34 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR | ||
35 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
36 | * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
37 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | ||
38 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | ||
39 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, | ||
40 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING | ||
41 | * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
42 | * POSSIBILITY OF SUCH DAMAGES. | ||
43 | */ | ||
44 | |||
45 | #include <acpi/acpi.h> | ||
46 | #include <acpi/accommon.h> | ||
47 | #include <acpi/acnamesp.h> | ||
48 | #include <acpi/acevents.h> | ||
49 | |||
50 | #define _COMPONENT ACPI_EVENTS | ||
51 | ACPI_MODULE_NAME("evxfregn") | ||
52 | |||
53 | /******************************************************************************* | ||
54 | * | ||
55 | * FUNCTION: acpi_install_address_space_handler | ||
56 | * | ||
57 | * PARAMETERS: Device - Handle for the device | ||
58 | * space_id - The address space ID | ||
59 | * Handler - Address of the handler | ||
60 | * Setup - Address of the setup function | ||
61 | * Context - Value passed to the handler on each access | ||
62 | * | ||
63 | * RETURN: Status | ||
64 | * | ||
65 | * DESCRIPTION: Install a handler for all op_regions of a given space_id. | ||
66 | * | ||
67 | ******************************************************************************/ | ||
68 | acpi_status | ||
69 | acpi_install_address_space_handler(acpi_handle device, | ||
70 | acpi_adr_space_type space_id, | ||
71 | acpi_adr_space_handler handler, | ||
72 | acpi_adr_space_setup setup, void *context) | ||
73 | { | ||
74 | struct acpi_namespace_node *node; | ||
75 | acpi_status status; | ||
76 | |||
77 | ACPI_FUNCTION_TRACE(acpi_install_address_space_handler); | ||
78 | |||
79 | /* Parameter validation */ | ||
80 | |||
81 | if (!device) { | ||
82 | return_ACPI_STATUS(AE_BAD_PARAMETER); | ||
83 | } | ||
84 | |||
85 | status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); | ||
86 | if (ACPI_FAILURE(status)) { | ||
87 | return_ACPI_STATUS(status); | ||
88 | } | ||
89 | |||
90 | /* Convert and validate the device handle */ | ||
91 | |||
92 | node = acpi_ns_map_handle_to_node(device); | ||
93 | if (!node) { | ||
94 | status = AE_BAD_PARAMETER; | ||
95 | goto unlock_and_exit; | ||
96 | } | ||
97 | |||
98 | /* Install the handler for all Regions for this Space ID */ | ||
99 | |||
100 | status = | ||
101 | acpi_ev_install_space_handler(node, space_id, handler, setup, | ||
102 | context); | ||
103 | if (ACPI_FAILURE(status)) { | ||
104 | goto unlock_and_exit; | ||
105 | } | ||
106 | |||
107 | /* Run all _REG methods for this address space */ | ||
108 | |||
109 | status = acpi_ev_execute_reg_methods(node, space_id); | ||
110 | |||
111 | unlock_and_exit: | ||
112 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); | ||
113 | return_ACPI_STATUS(status); | ||
114 | } | ||
115 | |||
116 | ACPI_EXPORT_SYMBOL(acpi_install_address_space_handler) | ||
117 | |||
118 | /******************************************************************************* | ||
119 | * | ||
120 | * FUNCTION: acpi_remove_address_space_handler | ||
121 | * | ||
122 | * PARAMETERS: Device - Handle for the device | ||
123 | * space_id - The address space ID | ||
124 | * Handler - Address of the handler | ||
125 | * | ||
126 | * RETURN: Status | ||
127 | * | ||
128 | * DESCRIPTION: Remove a previously installed handler. | ||
129 | * | ||
130 | ******************************************************************************/ | ||
131 | acpi_status | ||
132 | acpi_remove_address_space_handler(acpi_handle device, | ||
133 | acpi_adr_space_type space_id, | ||
134 | acpi_adr_space_handler handler) | ||
135 | { | ||
136 | union acpi_operand_object *obj_desc; | ||
137 | union acpi_operand_object *handler_obj; | ||
138 | union acpi_operand_object *region_obj; | ||
139 | union acpi_operand_object **last_obj_ptr; | ||
140 | struct acpi_namespace_node *node; | ||
141 | acpi_status status; | ||
142 | |||
143 | ACPI_FUNCTION_TRACE(acpi_remove_address_space_handler); | ||
144 | |||
145 | /* Parameter validation */ | ||
146 | |||
147 | if (!device) { | ||
148 | return_ACPI_STATUS(AE_BAD_PARAMETER); | ||
149 | } | ||
150 | |||
151 | status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); | ||
152 | if (ACPI_FAILURE(status)) { | ||
153 | return_ACPI_STATUS(status); | ||
154 | } | ||
155 | |||
156 | /* Convert and validate the device handle */ | ||
157 | |||
158 | node = acpi_ns_map_handle_to_node(device); | ||
159 | if (!node || | ||
160 | ((node->type != ACPI_TYPE_DEVICE) && | ||
161 | (node->type != ACPI_TYPE_PROCESSOR) && | ||
162 | (node->type != ACPI_TYPE_THERMAL) && | ||
163 | (node != acpi_gbl_root_node))) { | ||
164 | status = AE_BAD_PARAMETER; | ||
165 | goto unlock_and_exit; | ||
166 | } | ||
167 | |||
168 | /* Make sure the internal object exists */ | ||
169 | |||
170 | obj_desc = acpi_ns_get_attached_object(node); | ||
171 | if (!obj_desc) { | ||
172 | status = AE_NOT_EXIST; | ||
173 | goto unlock_and_exit; | ||
174 | } | ||
175 | |||
176 | /* Find the address handler the user requested */ | ||
177 | |||
178 | handler_obj = obj_desc->device.handler; | ||
179 | last_obj_ptr = &obj_desc->device.handler; | ||
180 | while (handler_obj) { | ||
181 | |||
182 | /* We have a handler, see if user requested this one */ | ||
183 | |||
184 | if (handler_obj->address_space.space_id == space_id) { | ||
185 | |||
186 | /* Handler must be the same as the installed handler */ | ||
187 | |||
188 | if (handler_obj->address_space.handler != handler) { | ||
189 | status = AE_BAD_PARAMETER; | ||
190 | goto unlock_and_exit; | ||
191 | } | ||
192 | |||
193 | /* Matched space_id, first dereference this in the Regions */ | ||
194 | |||
195 | ACPI_DEBUG_PRINT((ACPI_DB_OPREGION, | ||
196 | "Removing address handler %p(%p) for region %s on Device %p(%p)\n", | ||
197 | handler_obj, handler, | ||
198 | acpi_ut_get_region_name(space_id), | ||
199 | node, obj_desc)); | ||
200 | |||
201 | region_obj = handler_obj->address_space.region_list; | ||
202 | |||
203 | /* Walk the handler's region list */ | ||
204 | |||
205 | while (region_obj) { | ||
206 | /* | ||
207 | * First disassociate the handler from the region. | ||
208 | * | ||
209 | * NOTE: this doesn't mean that the region goes away | ||
210 | * The region is just inaccessible as indicated to | ||
211 | * the _REG method | ||
212 | */ | ||
213 | acpi_ev_detach_region(region_obj, TRUE); | ||
214 | |||
215 | /* | ||
216 | * Walk the list: Just grab the head because the | ||
217 | * detach_region removed the previous head. | ||
218 | */ | ||
219 | region_obj = | ||
220 | handler_obj->address_space.region_list; | ||
221 | |||
222 | } | ||
223 | |||
224 | /* Remove this Handler object from the list */ | ||
225 | |||
226 | *last_obj_ptr = handler_obj->address_space.next; | ||
227 | |||
228 | /* Now we can delete the handler object */ | ||
229 | |||
230 | acpi_ut_remove_reference(handler_obj); | ||
231 | goto unlock_and_exit; | ||
232 | } | ||
233 | |||
234 | /* Walk the linked list of handlers */ | ||
235 | |||
236 | last_obj_ptr = &handler_obj->address_space.next; | ||
237 | handler_obj = handler_obj->address_space.next; | ||
238 | } | ||
239 | |||
240 | /* The handler does not exist */ | ||
241 | |||
242 | ACPI_DEBUG_PRINT((ACPI_DB_OPREGION, | ||
243 | "Unable to remove address handler %p for %s(%X), DevNode %p, obj %p\n", | ||
244 | handler, acpi_ut_get_region_name(space_id), space_id, | ||
245 | node, obj_desc)); | ||
246 | |||
247 | status = AE_NOT_EXIST; | ||
248 | |||
249 | unlock_and_exit: | ||
250 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); | ||
251 | return_ACPI_STATUS(status); | ||
252 | } | ||
253 | |||
254 | ACPI_EXPORT_SYMBOL(acpi_remove_address_space_handler) | ||