diff options
Diffstat (limited to 'drivers/acpi/acpica/evgpe.c')
-rw-r--r-- | drivers/acpi/acpica/evgpe.c | 153 |
1 files changed, 109 insertions, 44 deletions
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c index b9d50ef9f3ec..deb26f4c6623 100644 --- a/drivers/acpi/acpica/evgpe.c +++ b/drivers/acpi/acpica/evgpe.c | |||
@@ -60,7 +60,8 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context); | |||
60 | * | 60 | * |
61 | * RETURN: Status | 61 | * RETURN: Status |
62 | * | 62 | * |
63 | * DESCRIPTION: Updates GPE register enable masks based on the GPE type | 63 | * DESCRIPTION: Updates GPE register enable masks based upon whether there are |
64 | * references (either wake or run) to this GPE | ||
64 | * | 65 | * |
65 | ******************************************************************************/ | 66 | ******************************************************************************/ |
66 | 67 | ||
@@ -81,14 +82,20 @@ acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info) | |||
81 | (1 << | 82 | (1 << |
82 | (gpe_event_info->gpe_number - gpe_register_info->base_gpe_number)); | 83 | (gpe_event_info->gpe_number - gpe_register_info->base_gpe_number)); |
83 | 84 | ||
85 | /* Clear the wake/run bits up front */ | ||
86 | |||
84 | ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake, register_bit); | 87 | ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake, register_bit); |
85 | ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit); | 88 | ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit); |
86 | 89 | ||
87 | if (gpe_event_info->runtime_count) | 90 | /* Set the mask bits only if there are references to this GPE */ |
91 | |||
92 | if (gpe_event_info->runtime_count) { | ||
88 | ACPI_SET_BIT(gpe_register_info->enable_for_run, register_bit); | 93 | ACPI_SET_BIT(gpe_register_info->enable_for_run, register_bit); |
94 | } | ||
89 | 95 | ||
90 | if (gpe_event_info->wakeup_count) | 96 | if (gpe_event_info->wakeup_count) { |
91 | ACPI_SET_BIT(gpe_register_info->enable_for_wake, register_bit); | 97 | ACPI_SET_BIT(gpe_register_info->enable_for_wake, register_bit); |
98 | } | ||
92 | 99 | ||
93 | return_ACPI_STATUS(AE_OK); | 100 | return_ACPI_STATUS(AE_OK); |
94 | } | 101 | } |
@@ -101,7 +108,10 @@ acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info) | |||
101 | * | 108 | * |
102 | * RETURN: Status | 109 | * RETURN: Status |
103 | * | 110 | * |
104 | * DESCRIPTION: Enable a GPE based on the GPE type | 111 | * DESCRIPTION: Hardware-enable a GPE. Always enables the GPE, regardless |
112 | * of type or number of references. | ||
113 | * | ||
114 | * Note: The GPE lock should be already acquired when this function is called. | ||
105 | * | 115 | * |
106 | ******************************************************************************/ | 116 | ******************************************************************************/ |
107 | 117 | ||
@@ -109,20 +119,36 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) | |||
109 | { | 119 | { |
110 | acpi_status status; | 120 | acpi_status status; |
111 | 121 | ||
122 | |||
112 | ACPI_FUNCTION_TRACE(ev_enable_gpe); | 123 | ACPI_FUNCTION_TRACE(ev_enable_gpe); |
113 | 124 | ||
114 | /* Make sure HW enable masks are updated */ | 125 | |
126 | /* | ||
127 | * We will only allow a GPE to be enabled if it has either an | ||
128 | * associated method (_Lxx/_Exx) or a handler. Otherwise, the | ||
129 | * GPE will be immediately disabled by acpi_ev_gpe_dispatch the | ||
130 | * first time it fires. | ||
131 | */ | ||
132 | if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)) { | ||
133 | return_ACPI_STATUS(AE_NO_HANDLER); | ||
134 | } | ||
135 | |||
136 | /* Ensure the HW enable masks are current */ | ||
115 | 137 | ||
116 | status = acpi_ev_update_gpe_enable_masks(gpe_event_info); | 138 | status = acpi_ev_update_gpe_enable_masks(gpe_event_info); |
117 | if (ACPI_FAILURE(status)) | 139 | if (ACPI_FAILURE(status)) { |
118 | return_ACPI_STATUS(status); | 140 | return_ACPI_STATUS(status); |
141 | } | ||
142 | |||
143 | /* Clear the GPE (of stale events) */ | ||
119 | 144 | ||
120 | /* Clear the GPE (of stale events), then enable it */ | ||
121 | status = acpi_hw_clear_gpe(gpe_event_info); | 145 | status = acpi_hw_clear_gpe(gpe_event_info); |
122 | if (ACPI_FAILURE(status)) | 146 | if (ACPI_FAILURE(status)) { |
123 | return_ACPI_STATUS(status); | 147 | return_ACPI_STATUS(status); |
148 | } | ||
124 | 149 | ||
125 | /* Enable the requested GPE */ | 150 | /* Enable the requested GPE */ |
151 | |||
126 | status = acpi_hw_write_gpe_enable_reg(gpe_event_info); | 152 | status = acpi_hw_write_gpe_enable_reg(gpe_event_info); |
127 | return_ACPI_STATUS(status); | 153 | return_ACPI_STATUS(status); |
128 | } | 154 | } |
@@ -135,7 +161,10 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) | |||
135 | * | 161 | * |
136 | * RETURN: Status | 162 | * RETURN: Status |
137 | * | 163 | * |
138 | * DESCRIPTION: Disable a GPE based on the GPE type | 164 | * DESCRIPTION: Hardware-disable a GPE. Always disables the requested GPE, |
165 | * regardless of the type or number of references. | ||
166 | * | ||
167 | * Note: The GPE lock should be already acquired when this function is called. | ||
139 | * | 168 | * |
140 | ******************************************************************************/ | 169 | ******************************************************************************/ |
141 | 170 | ||
@@ -145,24 +174,71 @@ acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info) | |||
145 | 174 | ||
146 | ACPI_FUNCTION_TRACE(ev_disable_gpe); | 175 | ACPI_FUNCTION_TRACE(ev_disable_gpe); |
147 | 176 | ||
148 | /* Make sure HW enable masks are updated */ | 177 | |
178 | /* | ||
179 | * Note: Always disable the GPE, even if we think that that it is already | ||
180 | * disabled. It is possible that the AML or some other code has enabled | ||
181 | * the GPE behind our back. | ||
182 | */ | ||
183 | |||
184 | /* Ensure the HW enable masks are current */ | ||
149 | 185 | ||
150 | status = acpi_ev_update_gpe_enable_masks(gpe_event_info); | 186 | status = acpi_ev_update_gpe_enable_masks(gpe_event_info); |
151 | if (ACPI_FAILURE(status)) | 187 | if (ACPI_FAILURE(status)) { |
152 | return_ACPI_STATUS(status); | 188 | return_ACPI_STATUS(status); |
189 | } | ||
153 | 190 | ||
154 | /* | 191 | /* |
155 | * Even if we don't know the GPE type, make sure that we always | 192 | * Always H/W disable this GPE, even if we don't know the GPE type. |
156 | * disable it. low_disable_gpe will just clear the enable bit for this | 193 | * Simply clear the enable bit for this particular GPE, but do not |
157 | * GPE and write it. It will not write out the current GPE enable mask, | 194 | * write out the current GPE enable mask since this may inadvertently |
158 | * since this may inadvertently enable GPEs too early, if a rogue GPE has | 195 | * enable GPEs too early. An example is a rogue GPE that has arrived |
159 | * come in during ACPICA initialization - possibly as a result of AML or | 196 | * during ACPICA initialization - possibly because AML or other code |
160 | * other code that has enabled the GPE. | 197 | * has enabled the GPE. |
161 | */ | 198 | */ |
162 | status = acpi_hw_low_disable_gpe(gpe_event_info); | 199 | status = acpi_hw_low_disable_gpe(gpe_event_info); |
163 | return_ACPI_STATUS(status); | 200 | return_ACPI_STATUS(status); |
164 | } | 201 | } |
165 | 202 | ||
203 | |||
204 | /******************************************************************************* | ||
205 | * | ||
206 | * FUNCTION: acpi_ev_low_get_gpe_info | ||
207 | * | ||
208 | * PARAMETERS: gpe_number - Raw GPE number | ||
209 | * gpe_block - A GPE info block | ||
210 | * | ||
211 | * RETURN: A GPE event_info struct. NULL if not a valid GPE (The gpe_number | ||
212 | * is not within the specified GPE block) | ||
213 | * | ||
214 | * DESCRIPTION: Returns the event_info struct associated with this GPE. This is | ||
215 | * the low-level implementation of ev_get_gpe_event_info. | ||
216 | * | ||
217 | ******************************************************************************/ | ||
218 | |||
219 | struct acpi_gpe_event_info *acpi_ev_low_get_gpe_info(u32 gpe_number, | ||
220 | struct acpi_gpe_block_info | ||
221 | *gpe_block) | ||
222 | { | ||
223 | u32 gpe_index; | ||
224 | |||
225 | /* | ||
226 | * Validate that the gpe_number is within the specified gpe_block. | ||
227 | * (Two steps) | ||
228 | */ | ||
229 | if (!gpe_block || (gpe_number < gpe_block->block_base_number)) { | ||
230 | return (NULL); | ||
231 | } | ||
232 | |||
233 | gpe_index = gpe_number - gpe_block->block_base_number; | ||
234 | if (gpe_index >= gpe_block->gpe_count) { | ||
235 | return (NULL); | ||
236 | } | ||
237 | |||
238 | return (&gpe_block->event_info[gpe_index]); | ||
239 | } | ||
240 | |||
241 | |||
166 | /******************************************************************************* | 242 | /******************************************************************************* |
167 | * | 243 | * |
168 | * FUNCTION: acpi_ev_get_gpe_event_info | 244 | * FUNCTION: acpi_ev_get_gpe_event_info |
@@ -184,7 +260,7 @@ struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device, | |||
184 | u32 gpe_number) | 260 | u32 gpe_number) |
185 | { | 261 | { |
186 | union acpi_operand_object *obj_desc; | 262 | union acpi_operand_object *obj_desc; |
187 | struct acpi_gpe_block_info *gpe_block; | 263 | struct acpi_gpe_event_info *gpe_info; |
188 | u32 i; | 264 | u32 i; |
189 | 265 | ||
190 | ACPI_FUNCTION_ENTRY(); | 266 | ACPI_FUNCTION_ENTRY(); |
@@ -196,17 +272,11 @@ struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device, | |||
196 | /* Examine GPE Block 0 and 1 (These blocks are permanent) */ | 272 | /* Examine GPE Block 0 and 1 (These blocks are permanent) */ |
197 | 273 | ||
198 | for (i = 0; i < ACPI_MAX_GPE_BLOCKS; i++) { | 274 | for (i = 0; i < ACPI_MAX_GPE_BLOCKS; i++) { |
199 | gpe_block = acpi_gbl_gpe_fadt_blocks[i]; | 275 | gpe_info = acpi_ev_low_get_gpe_info(gpe_number, |
200 | if (gpe_block) { | 276 | acpi_gbl_gpe_fadt_blocks |
201 | if ((gpe_number >= gpe_block->block_base_number) | 277 | [i]); |
202 | && (gpe_number < | 278 | if (gpe_info) { |
203 | gpe_block->block_base_number + | 279 | return (gpe_info); |
204 | (gpe_block->register_count * 8))) { | ||
205 | return (&gpe_block-> | ||
206 | event_info[gpe_number - | ||
207 | gpe_block-> | ||
208 | block_base_number]); | ||
209 | } | ||
210 | } | 280 | } |
211 | } | 281 | } |
212 | 282 | ||
@@ -223,16 +293,8 @@ struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device, | |||
223 | return (NULL); | 293 | return (NULL); |
224 | } | 294 | } |
225 | 295 | ||
226 | gpe_block = obj_desc->device.gpe_block; | 296 | return (acpi_ev_low_get_gpe_info |
227 | 297 | (gpe_number, obj_desc->device.gpe_block)); | |
228 | if ((gpe_number >= gpe_block->block_base_number) && | ||
229 | (gpe_number < | ||
230 | gpe_block->block_base_number + (gpe_block->register_count * 8))) { | ||
231 | return (&gpe_block-> | ||
232 | event_info[gpe_number - gpe_block->block_base_number]); | ||
233 | } | ||
234 | |||
235 | return (NULL); | ||
236 | } | 298 | } |
237 | 299 | ||
238 | /******************************************************************************* | 300 | /******************************************************************************* |
@@ -389,7 +451,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) | |||
389 | return_VOID; | 451 | return_VOID; |
390 | } | 452 | } |
391 | 453 | ||
392 | /* Set the GPE flags for return to enabled state */ | 454 | /* Update the GPE register masks for return to enabled state */ |
393 | 455 | ||
394 | (void)acpi_ev_update_gpe_enable_masks(gpe_event_info); | 456 | (void)acpi_ev_update_gpe_enable_masks(gpe_event_info); |
395 | 457 | ||
@@ -569,15 +631,18 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) | |||
569 | 631 | ||
570 | default: | 632 | default: |
571 | 633 | ||
572 | /* No handler or method to run! */ | 634 | /* |
573 | 635 | * No handler or method to run! | |
636 | * 03/2010: This case should no longer be possible. We will not allow | ||
637 | * a GPE to be enabled if it has no handler or method. | ||
638 | */ | ||
574 | ACPI_ERROR((AE_INFO, | 639 | ACPI_ERROR((AE_INFO, |
575 | "No handler or method for GPE[0x%2X], disabling event", | 640 | "No handler or method for GPE[0x%2X], disabling event", |
576 | gpe_number)); | 641 | gpe_number)); |
577 | 642 | ||
578 | /* | 643 | /* |
579 | * Disable the GPE. The GPE will remain disabled until the ACPICA | 644 | * Disable the GPE. The GPE will remain disabled a handler |
580 | * Core Subsystem is restarted, or a handler is installed. | 645 | * is installed or ACPICA is restarted. |
581 | */ | 646 | */ |
582 | status = acpi_ev_disable_gpe(gpe_event_info); | 647 | status = acpi_ev_disable_gpe(gpe_event_info); |
583 | if (ACPI_FAILURE(status)) { | 648 | if (ACPI_FAILURE(status)) { |