summaryrefslogtreecommitdiffstats
path: root/drivers/gpio/gpiolib-acpi.c
diff options
context:
space:
mode:
authorHans de Goede <hdegoede@redhat.com>2018-11-28 11:57:55 -0500
committerLinus Walleij <linus.walleij@linaro.org>2018-12-07 05:01:29 -0500
commite59f5e08ece1060073d92c66ded52e1f2c43b5bb (patch)
treec4c4e055d2608f315452c9261c0f2730cb0f3169 /drivers/gpio/gpiolib-acpi.c
parent2e6e902d185027f8e3cb8b7305238f7e35d6a436 (diff)
gpiolib-acpi: Only defer request_irq for GpioInt ACPI event handlers
Commit 78d3a92edbfb ("gpiolib-acpi: Register GpioInt ACPI event handlers from a late_initcall") deferred the entire acpi_gpiochip_request_interrupt call for each event resource. This means it also delays the gpiochip_request_own_desc(..., "ACPI:Event") call. This is a problem if some AML code reads the GPIO pin before we run the deferred acpi_gpiochip_request_interrupt, because in that case acpi_gpio_adr_space_handler() will already have called gpiochip_request_own_desc(..., "ACPI:OpRegion") causing the call from acpi_gpiochip_request_interrupt to fail with -EBUSY and we will fail to register an event handler. acpi_gpio_adr_space_handler is prepared for acpi_gpiochip_request_interrupt already having claimed the pin, but the other way around does not work. One example of a problem this causes, is the event handler for the OTG ID pin on a Prowise PT301 tablet not registering, keeping the port stuck in whatever mode it was in during boot and e.g. only allowing charging after a reboot. This commit fixes this by only deferring the request_irq call and the initial run of edge-triggered IRQs instead of deferring all of acpi_gpiochip_request_interrupt. Cc: stable@vger.kernel.org Fixes: 78d3a92edbfb ("gpiolib-acpi: Register GpioInt ACPI event ...") Signed-off-by: Hans de Goede <hdegoede@redhat.com> Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Acked-by: Mika Westerberg <mika.westerberg@linux.intel.com> Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
Diffstat (limited to 'drivers/gpio/gpiolib-acpi.c')
-rw-r--r--drivers/gpio/gpiolib-acpi.c144
1 files changed, 84 insertions, 60 deletions
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 55b72fbe1631..7f93954c58ea 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -19,11 +19,28 @@
19 19
20#include "gpiolib.h" 20#include "gpiolib.h"
21 21
22/**
23 * struct acpi_gpio_event - ACPI GPIO event handler data
24 *
25 * @node: list-entry of the events list of the struct acpi_gpio_chip
26 * @handle: handle of ACPI method to execute when the IRQ triggers
27 * @handler: irq_handler to pass to request_irq when requesting the IRQ
28 * @pin: GPIO pin number on the gpio_chip
29 * @irq: Linux IRQ number for the event, for request_ / free_irq
30 * @irqflags: flags to pass to request_irq when requesting the IRQ
31 * @irq_is_wake: If the ACPI flags indicate the IRQ is a wakeup source
32 * @is_requested: True if request_irq has been done
33 * @desc: gpio_desc for the GPIO pin for this event
34 */
22struct acpi_gpio_event { 35struct acpi_gpio_event {
23 struct list_head node; 36 struct list_head node;
24 acpi_handle handle; 37 acpi_handle handle;
38 irq_handler_t handler;
25 unsigned int pin; 39 unsigned int pin;
26 unsigned int irq; 40 unsigned int irq;
41 unsigned long irqflags;
42 bool irq_is_wake;
43 bool irq_requested;
27 struct gpio_desc *desc; 44 struct gpio_desc *desc;
28}; 45};
29 46
@@ -49,10 +66,10 @@ struct acpi_gpio_chip {
49 66
50/* 67/*
51 * For gpiochips which call acpi_gpiochip_request_interrupts() before late_init 68 * For gpiochips which call acpi_gpiochip_request_interrupts() before late_init
52 * (so builtin drivers) we register the ACPI GpioInt event handlers from a 69 * (so builtin drivers) we register the ACPI GpioInt IRQ handlers from a
53 * late_initcall_sync handler, so that other builtin drivers can register their 70 * late_initcall_sync handler, so that other builtin drivers can register their
54 * OpRegions before the event handlers can run. This list contains gpiochips 71 * OpRegions before the event handlers can run. This list contains gpiochips
55 * for which the acpi_gpiochip_request_interrupts() has been deferred. 72 * for which the acpi_gpiochip_request_irqs() call has been deferred.
56 */ 73 */
57static DEFINE_MUTEX(acpi_gpio_deferred_req_irqs_lock); 74static DEFINE_MUTEX(acpi_gpio_deferred_req_irqs_lock);
58static LIST_HEAD(acpi_gpio_deferred_req_irqs_list); 75static LIST_HEAD(acpi_gpio_deferred_req_irqs_list);
@@ -133,8 +150,42 @@ bool acpi_gpio_get_irq_resource(struct acpi_resource *ares,
133} 150}
134EXPORT_SYMBOL_GPL(acpi_gpio_get_irq_resource); 151EXPORT_SYMBOL_GPL(acpi_gpio_get_irq_resource);
135 152
136static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares, 153static void acpi_gpiochip_request_irq(struct acpi_gpio_chip *acpi_gpio,
137 void *context) 154 struct acpi_gpio_event *event)
155{
156 int ret, value;
157
158 ret = request_threaded_irq(event->irq, NULL, event->handler,
159 event->irqflags, "ACPI:Event", event);
160 if (ret) {
161 dev_err(acpi_gpio->chip->parent,
162 "Failed to setup interrupt handler for %d\n",
163 event->irq);
164 return;
165 }
166
167 if (event->irq_is_wake)
168 enable_irq_wake(event->irq);
169
170 event->irq_requested = true;
171
172 /* Make sure we trigger the initial state of edge-triggered IRQs */
173 value = gpiod_get_raw_value_cansleep(event->desc);
174 if (((event->irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
175 ((event->irqflags & IRQF_TRIGGER_FALLING) && value == 0))
176 event->handler(event->irq, event);
177}
178
179static void acpi_gpiochip_request_irqs(struct acpi_gpio_chip *acpi_gpio)
180{
181 struct acpi_gpio_event *event;
182
183 list_for_each_entry(event, &acpi_gpio->events, node)
184 acpi_gpiochip_request_irq(acpi_gpio, event);
185}
186
187static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
188 void *context)
138{ 189{
139 struct acpi_gpio_chip *acpi_gpio = context; 190 struct acpi_gpio_chip *acpi_gpio = context;
140 struct gpio_chip *chip = acpi_gpio->chip; 191 struct gpio_chip *chip = acpi_gpio->chip;
@@ -143,8 +194,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
143 struct acpi_gpio_event *event; 194 struct acpi_gpio_event *event;
144 irq_handler_t handler = NULL; 195 irq_handler_t handler = NULL;
145 struct gpio_desc *desc; 196 struct gpio_desc *desc;
146 unsigned long irqflags; 197 int ret, pin, irq;
147 int ret, pin, irq, value;
148 198
149 if (!acpi_gpio_get_irq_resource(ares, &agpio)) 199 if (!acpi_gpio_get_irq_resource(ares, &agpio))
150 return AE_OK; 200 return AE_OK;
@@ -175,8 +225,6 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
175 225
176 gpiod_direction_input(desc); 226 gpiod_direction_input(desc);
177 227
178 value = gpiod_get_value_cansleep(desc);
179
180 ret = gpiochip_lock_as_irq(chip, pin); 228 ret = gpiochip_lock_as_irq(chip, pin);
181 if (ret) { 229 if (ret) {
182 dev_err(chip->parent, "Failed to lock GPIO as interrupt\n"); 230 dev_err(chip->parent, "Failed to lock GPIO as interrupt\n");
@@ -189,64 +237,42 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
189 goto fail_unlock_irq; 237 goto fail_unlock_irq;
190 } 238 }
191 239
192 irqflags = IRQF_ONESHOT; 240 event = kzalloc(sizeof(*event), GFP_KERNEL);
241 if (!event)
242 goto fail_unlock_irq;
243
244 event->irqflags = IRQF_ONESHOT;
193 if (agpio->triggering == ACPI_LEVEL_SENSITIVE) { 245 if (agpio->triggering == ACPI_LEVEL_SENSITIVE) {
194 if (agpio->polarity == ACPI_ACTIVE_HIGH) 246 if (agpio->polarity == ACPI_ACTIVE_HIGH)
195 irqflags |= IRQF_TRIGGER_HIGH; 247 event->irqflags |= IRQF_TRIGGER_HIGH;
196 else 248 else
197 irqflags |= IRQF_TRIGGER_LOW; 249 event->irqflags |= IRQF_TRIGGER_LOW;
198 } else { 250 } else {
199 switch (agpio->polarity) { 251 switch (agpio->polarity) {
200 case ACPI_ACTIVE_HIGH: 252 case ACPI_ACTIVE_HIGH:
201 irqflags |= IRQF_TRIGGER_RISING; 253 event->irqflags |= IRQF_TRIGGER_RISING;
202 break; 254 break;
203 case ACPI_ACTIVE_LOW: 255 case ACPI_ACTIVE_LOW:
204 irqflags |= IRQF_TRIGGER_FALLING; 256 event->irqflags |= IRQF_TRIGGER_FALLING;
205 break; 257 break;
206 default: 258 default:
207 irqflags |= IRQF_TRIGGER_RISING | 259 event->irqflags |= IRQF_TRIGGER_RISING |
208 IRQF_TRIGGER_FALLING; 260 IRQF_TRIGGER_FALLING;
209 break; 261 break;
210 } 262 }
211 } 263 }
212 264
213 event = kzalloc(sizeof(*event), GFP_KERNEL);
214 if (!event)
215 goto fail_unlock_irq;
216
217 event->handle = evt_handle; 265 event->handle = evt_handle;
266 event->handler = handler;
218 event->irq = irq; 267 event->irq = irq;
268 event->irq_is_wake = agpio->wake_capable == ACPI_WAKE_CAPABLE;
219 event->pin = pin; 269 event->pin = pin;
220 event->desc = desc; 270 event->desc = desc;
221 271
222 ret = request_threaded_irq(event->irq, NULL, handler, irqflags,
223 "ACPI:Event", event);
224 if (ret) {
225 dev_err(chip->parent,
226 "Failed to setup interrupt handler for %d\n",
227 event->irq);
228 goto fail_free_event;
229 }
230
231 if (agpio->wake_capable == ACPI_WAKE_CAPABLE)
232 enable_irq_wake(irq);
233
234 list_add_tail(&event->node, &acpi_gpio->events); 272 list_add_tail(&event->node, &acpi_gpio->events);
235 273
236 /*
237 * Make sure we trigger the initial state of the IRQ when using RISING
238 * or FALLING. Note we run the handlers on late_init, the AML code
239 * may refer to OperationRegions from other (builtin) drivers which
240 * may be probed after us.
241 */
242 if (((irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
243 ((irqflags & IRQF_TRIGGER_FALLING) && value == 0))
244 handler(event->irq, event);
245
246 return AE_OK; 274 return AE_OK;
247 275
248fail_free_event:
249 kfree(event);
250fail_unlock_irq: 276fail_unlock_irq:
251 gpiochip_unlock_as_irq(chip, pin); 277 gpiochip_unlock_as_irq(chip, pin);
252fail_free_desc: 278fail_free_desc:
@@ -283,6 +309,9 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
283 if (ACPI_FAILURE(status)) 309 if (ACPI_FAILURE(status))
284 return; 310 return;
285 311
312 acpi_walk_resources(handle, "_AEI",
313 acpi_gpiochip_alloc_event, acpi_gpio);
314
286 mutex_lock(&acpi_gpio_deferred_req_irqs_lock); 315 mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
287 defer = !acpi_gpio_deferred_req_irqs_done; 316 defer = !acpi_gpio_deferred_req_irqs_done;
288 if (defer) 317 if (defer)
@@ -293,8 +322,7 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
293 if (defer) 322 if (defer)
294 return; 323 return;
295 324
296 acpi_walk_resources(handle, "_AEI", 325 acpi_gpiochip_request_irqs(acpi_gpio);
297 acpi_gpiochip_request_interrupt, acpi_gpio);
298} 326}
299EXPORT_SYMBOL_GPL(acpi_gpiochip_request_interrupts); 327EXPORT_SYMBOL_GPL(acpi_gpiochip_request_interrupts);
300 328
@@ -331,10 +359,13 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
331 list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) { 359 list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
332 struct gpio_desc *desc; 360 struct gpio_desc *desc;
333 361
334 if (irqd_is_wakeup_set(irq_get_irq_data(event->irq))) 362 if (event->irq_requested) {
335 disable_irq_wake(event->irq); 363 if (event->irq_is_wake)
364 disable_irq_wake(event->irq);
365
366 free_irq(event->irq, event);
367 }
336 368
337 free_irq(event->irq, event);
338 desc = event->desc; 369 desc = event->desc;
339 if (WARN_ON(IS_ERR(desc))) 370 if (WARN_ON(IS_ERR(desc)))
340 continue; 371 continue;
@@ -1200,23 +1231,16 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id)
1200 return con_id == NULL; 1231 return con_id == NULL;
1201} 1232}
1202 1233
1203/* Run deferred acpi_gpiochip_request_interrupts() */ 1234/* Run deferred acpi_gpiochip_request_irqs() */
1204static int acpi_gpio_handle_deferred_request_interrupts(void) 1235static int acpi_gpio_handle_deferred_request_irqs(void)
1205{ 1236{
1206 struct acpi_gpio_chip *acpi_gpio, *tmp; 1237 struct acpi_gpio_chip *acpi_gpio, *tmp;
1207 1238
1208 mutex_lock(&acpi_gpio_deferred_req_irqs_lock); 1239 mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
1209 list_for_each_entry_safe(acpi_gpio, tmp, 1240 list_for_each_entry_safe(acpi_gpio, tmp,
1210 &acpi_gpio_deferred_req_irqs_list, 1241 &acpi_gpio_deferred_req_irqs_list,
1211 deferred_req_irqs_list_entry) { 1242 deferred_req_irqs_list_entry)
1212 acpi_handle handle; 1243 acpi_gpiochip_request_irqs(acpi_gpio);
1213
1214 handle = ACPI_HANDLE(acpi_gpio->chip->parent);
1215 acpi_walk_resources(handle, "_AEI",
1216 acpi_gpiochip_request_interrupt, acpi_gpio);
1217
1218 list_del_init(&acpi_gpio->deferred_req_irqs_list_entry);
1219 }
1220 1244
1221 acpi_gpio_deferred_req_irqs_done = true; 1245 acpi_gpio_deferred_req_irqs_done = true;
1222 mutex_unlock(&acpi_gpio_deferred_req_irqs_lock); 1246 mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
@@ -1224,4 +1248,4 @@ static int acpi_gpio_handle_deferred_request_interrupts(void)
1224 return 0; 1248 return 0;
1225} 1249}
1226/* We must use _sync so that this runs after the first deferred_probe run */ 1250/* We must use _sync so that this runs after the first deferred_probe run */
1227late_initcall_sync(acpi_gpio_handle_deferred_request_interrupts); 1251late_initcall_sync(acpi_gpio_handle_deferred_request_irqs);