diff options
Diffstat (limited to 'drivers/gpio/gpiolib-acpi.c')
-rw-r--r-- | drivers/gpio/gpiolib-acpi.c | 86 |
1 files changed, 50 insertions, 36 deletions
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index c48ed9d89ff5..8b9d7e42c600 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c | |||
@@ -25,7 +25,6 @@ | |||
25 | 25 | ||
26 | struct acpi_gpio_event { | 26 | struct acpi_gpio_event { |
27 | struct list_head node; | 27 | struct list_head node; |
28 | struct list_head initial_sync_list; | ||
29 | acpi_handle handle; | 28 | acpi_handle handle; |
30 | unsigned int pin; | 29 | unsigned int pin; |
31 | unsigned int irq; | 30 | unsigned int irq; |
@@ -49,10 +48,19 @@ struct acpi_gpio_chip { | |||
49 | struct mutex conn_lock; | 48 | struct mutex conn_lock; |
50 | struct gpio_chip *chip; | 49 | struct gpio_chip *chip; |
51 | struct list_head events; | 50 | struct list_head events; |
51 | struct list_head deferred_req_irqs_list_entry; | ||
52 | }; | 52 | }; |
53 | 53 | ||
54 | static LIST_HEAD(acpi_gpio_initial_sync_list); | 54 | /* |
55 | static DEFINE_MUTEX(acpi_gpio_initial_sync_list_lock); | 55 | * For gpiochips which call acpi_gpiochip_request_interrupts() before late_init |
56 | * (so builtin drivers) we register the ACPI GpioInt event handlers from a | ||
57 | * late_initcall_sync handler, so that other builtin drivers can register their | ||
58 | * OpRegions before the event handlers can run. This list contains gpiochips | ||
59 | * for which the acpi_gpiochip_request_interrupts() has been deferred. | ||
60 | */ | ||
61 | static DEFINE_MUTEX(acpi_gpio_deferred_req_irqs_lock); | ||
62 | static LIST_HEAD(acpi_gpio_deferred_req_irqs_list); | ||
63 | static bool acpi_gpio_deferred_req_irqs_done; | ||
56 | 64 | ||
57 | static int acpi_gpiochip_find(struct gpio_chip *gc, void *data) | 65 | static int acpi_gpiochip_find(struct gpio_chip *gc, void *data) |
58 | { | 66 | { |
@@ -89,21 +97,6 @@ static struct gpio_desc *acpi_get_gpiod(char *path, int pin) | |||
89 | return gpiochip_get_desc(chip, pin); | 97 | return gpiochip_get_desc(chip, pin); |
90 | } | 98 | } |
91 | 99 | ||
92 | static void acpi_gpio_add_to_initial_sync_list(struct acpi_gpio_event *event) | ||
93 | { | ||
94 | mutex_lock(&acpi_gpio_initial_sync_list_lock); | ||
95 | list_add(&event->initial_sync_list, &acpi_gpio_initial_sync_list); | ||
96 | mutex_unlock(&acpi_gpio_initial_sync_list_lock); | ||
97 | } | ||
98 | |||
99 | static void acpi_gpio_del_from_initial_sync_list(struct acpi_gpio_event *event) | ||
100 | { | ||
101 | mutex_lock(&acpi_gpio_initial_sync_list_lock); | ||
102 | if (!list_empty(&event->initial_sync_list)) | ||
103 | list_del_init(&event->initial_sync_list); | ||
104 | mutex_unlock(&acpi_gpio_initial_sync_list_lock); | ||
105 | } | ||
106 | |||
107 | static irqreturn_t acpi_gpio_irq_handler(int irq, void *data) | 100 | static irqreturn_t acpi_gpio_irq_handler(int irq, void *data) |
108 | { | 101 | { |
109 | struct acpi_gpio_event *event = data; | 102 | struct acpi_gpio_event *event = data; |
@@ -186,7 +179,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares, | |||
186 | 179 | ||
187 | gpiod_direction_input(desc); | 180 | gpiod_direction_input(desc); |
188 | 181 | ||
189 | value = gpiod_get_value(desc); | 182 | value = gpiod_get_value_cansleep(desc); |
190 | 183 | ||
191 | ret = gpiochip_lock_as_irq(chip, pin); | 184 | ret = gpiochip_lock_as_irq(chip, pin); |
192 | if (ret) { | 185 | if (ret) { |
@@ -229,7 +222,6 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares, | |||
229 | event->irq = irq; | 222 | event->irq = irq; |
230 | event->pin = pin; | 223 | event->pin = pin; |
231 | event->desc = desc; | 224 | event->desc = desc; |
232 | INIT_LIST_HEAD(&event->initial_sync_list); | ||
233 | 225 | ||
234 | ret = request_threaded_irq(event->irq, NULL, handler, irqflags, | 226 | ret = request_threaded_irq(event->irq, NULL, handler, irqflags, |
235 | "ACPI:Event", event); | 227 | "ACPI:Event", event); |
@@ -251,10 +243,9 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares, | |||
251 | * may refer to OperationRegions from other (builtin) drivers which | 243 | * may refer to OperationRegions from other (builtin) drivers which |
252 | * may be probed after us. | 244 | * may be probed after us. |
253 | */ | 245 | */ |
254 | if (handler == acpi_gpio_irq_handler && | 246 | if (((irqflags & IRQF_TRIGGER_RISING) && value == 1) || |
255 | (((irqflags & IRQF_TRIGGER_RISING) && value == 1) || | 247 | ((irqflags & IRQF_TRIGGER_FALLING) && value == 0)) |
256 | ((irqflags & IRQF_TRIGGER_FALLING) && value == 0))) | 248 | handler(event->irq, event); |
257 | acpi_gpio_add_to_initial_sync_list(event); | ||
258 | 249 | ||
259 | return AE_OK; | 250 | return AE_OK; |
260 | 251 | ||
@@ -283,6 +274,7 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip) | |||
283 | struct acpi_gpio_chip *acpi_gpio; | 274 | struct acpi_gpio_chip *acpi_gpio; |
284 | acpi_handle handle; | 275 | acpi_handle handle; |
285 | acpi_status status; | 276 | acpi_status status; |
277 | bool defer; | ||
286 | 278 | ||
287 | if (!chip->parent || !chip->to_irq) | 279 | if (!chip->parent || !chip->to_irq) |
288 | return; | 280 | return; |
@@ -295,6 +287,16 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip) | |||
295 | if (ACPI_FAILURE(status)) | 287 | if (ACPI_FAILURE(status)) |
296 | return; | 288 | return; |
297 | 289 | ||
290 | mutex_lock(&acpi_gpio_deferred_req_irqs_lock); | ||
291 | defer = !acpi_gpio_deferred_req_irqs_done; | ||
292 | if (defer) | ||
293 | list_add(&acpi_gpio->deferred_req_irqs_list_entry, | ||
294 | &acpi_gpio_deferred_req_irqs_list); | ||
295 | mutex_unlock(&acpi_gpio_deferred_req_irqs_lock); | ||
296 | |||
297 | if (defer) | ||
298 | return; | ||
299 | |||
298 | acpi_walk_resources(handle, "_AEI", | 300 | acpi_walk_resources(handle, "_AEI", |
299 | acpi_gpiochip_request_interrupt, acpi_gpio); | 301 | acpi_gpiochip_request_interrupt, acpi_gpio); |
300 | } | 302 | } |
@@ -325,11 +327,14 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip) | |||
325 | if (ACPI_FAILURE(status)) | 327 | if (ACPI_FAILURE(status)) |
326 | return; | 328 | return; |
327 | 329 | ||
330 | mutex_lock(&acpi_gpio_deferred_req_irqs_lock); | ||
331 | if (!list_empty(&acpi_gpio->deferred_req_irqs_list_entry)) | ||
332 | list_del_init(&acpi_gpio->deferred_req_irqs_list_entry); | ||
333 | mutex_unlock(&acpi_gpio_deferred_req_irqs_lock); | ||
334 | |||
328 | list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) { | 335 | list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) { |
329 | struct gpio_desc *desc; | 336 | struct gpio_desc *desc; |
330 | 337 | ||
331 | acpi_gpio_del_from_initial_sync_list(event); | ||
332 | |||
333 | if (irqd_is_wakeup_set(irq_get_irq_data(event->irq))) | 338 | if (irqd_is_wakeup_set(irq_get_irq_data(event->irq))) |
334 | disable_irq_wake(event->irq); | 339 | disable_irq_wake(event->irq); |
335 | 340 | ||
@@ -1052,6 +1057,7 @@ void acpi_gpiochip_add(struct gpio_chip *chip) | |||
1052 | 1057 | ||
1053 | acpi_gpio->chip = chip; | 1058 | acpi_gpio->chip = chip; |
1054 | INIT_LIST_HEAD(&acpi_gpio->events); | 1059 | INIT_LIST_HEAD(&acpi_gpio->events); |
1060 | INIT_LIST_HEAD(&acpi_gpio->deferred_req_irqs_list_entry); | ||
1055 | 1061 | ||
1056 | status = acpi_attach_data(handle, acpi_gpio_chip_dh, acpi_gpio); | 1062 | status = acpi_attach_data(handle, acpi_gpio_chip_dh, acpi_gpio); |
1057 | if (ACPI_FAILURE(status)) { | 1063 | if (ACPI_FAILURE(status)) { |
@@ -1198,20 +1204,28 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id) | |||
1198 | return con_id == NULL; | 1204 | return con_id == NULL; |
1199 | } | 1205 | } |
1200 | 1206 | ||
1201 | /* Sync the initial state of handlers after all builtin drivers have probed */ | 1207 | /* Run deferred acpi_gpiochip_request_interrupts() */ |
1202 | static int acpi_gpio_initial_sync(void) | 1208 | static int acpi_gpio_handle_deferred_request_interrupts(void) |
1203 | { | 1209 | { |
1204 | struct acpi_gpio_event *event, *ep; | 1210 | struct acpi_gpio_chip *acpi_gpio, *tmp; |
1211 | |||
1212 | mutex_lock(&acpi_gpio_deferred_req_irqs_lock); | ||
1213 | list_for_each_entry_safe(acpi_gpio, tmp, | ||
1214 | &acpi_gpio_deferred_req_irqs_list, | ||
1215 | deferred_req_irqs_list_entry) { | ||
1216 | acpi_handle handle; | ||
1205 | 1217 | ||
1206 | mutex_lock(&acpi_gpio_initial_sync_list_lock); | 1218 | handle = ACPI_HANDLE(acpi_gpio->chip->parent); |
1207 | list_for_each_entry_safe(event, ep, &acpi_gpio_initial_sync_list, | 1219 | acpi_walk_resources(handle, "_AEI", |
1208 | initial_sync_list) { | 1220 | acpi_gpiochip_request_interrupt, acpi_gpio); |
1209 | acpi_evaluate_object(event->handle, NULL, NULL, NULL); | 1221 | |
1210 | list_del_init(&event->initial_sync_list); | 1222 | list_del_init(&acpi_gpio->deferred_req_irqs_list_entry); |
1211 | } | 1223 | } |
1212 | mutex_unlock(&acpi_gpio_initial_sync_list_lock); | 1224 | |
1225 | acpi_gpio_deferred_req_irqs_done = true; | ||
1226 | mutex_unlock(&acpi_gpio_deferred_req_irqs_lock); | ||
1213 | 1227 | ||
1214 | return 0; | 1228 | return 0; |
1215 | } | 1229 | } |
1216 | /* We must use _sync so that this runs after the first deferred_probe run */ | 1230 | /* We must use _sync so that this runs after the first deferred_probe run */ |
1217 | late_initcall_sync(acpi_gpio_initial_sync); | 1231 | late_initcall_sync(acpi_gpio_handle_deferred_request_interrupts); |