diff options
author | Jonathan Cameron <jic23@cam.ac.uk> | 2011-05-18 09:41:18 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2011-05-19 19:14:48 -0400 |
commit | d96d1337e339521a2bd56dc9d51fef140c1a49ee (patch) | |
tree | d3ad4b444c9d25b46f4183b59abee4d2003f4c54 /drivers/staging/iio/industrialio-trigger.c | |
parent | 461be806744d0c83babcfa5d63993b43bd801c46 (diff) |
staging:iio: Add infrastructure for irq_chip based triggers
V3: Get rid of separate interrupt pool. This is well handled
by irq_get_descs and irq_free_descs. Two functions I simply
wasn't aware of previously. Thus the allocation for a given
trigger is now handled by core code rather than us reinventing
the wheel.
V2: Stop silly name duplication.
Move pool handling to industrialio-trigger as that is the only user.
Changed over to using irq_modify_status rather than the arm
specific set_irq_flags as per Thomas Gleixner's suggestion.
Signed-off-by: Jonathan Cameron <jic23@cam.ac.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/staging/iio/industrialio-trigger.c')
-rw-r--r-- | drivers/staging/iio/industrialio-trigger.c | 175 |
1 files changed, 145 insertions, 30 deletions
diff --git a/drivers/staging/iio/industrialio-trigger.c b/drivers/staging/iio/industrialio-trigger.c index 083847c5c2d..5496ee272e2 100644 --- a/drivers/staging/iio/industrialio-trigger.c +++ b/drivers/staging/iio/industrialio-trigger.c | |||
@@ -163,6 +163,7 @@ static struct iio_trigger *iio_trigger_find_by_name(const char *name, | |||
163 | 163 | ||
164 | void iio_trigger_poll(struct iio_trigger *trig, s64 time) | 164 | void iio_trigger_poll(struct iio_trigger *trig, s64 time) |
165 | { | 165 | { |
166 | int i; | ||
166 | struct iio_poll_func *pf_cursor; | 167 | struct iio_poll_func *pf_cursor; |
167 | 168 | ||
168 | list_for_each_entry(pf_cursor, &trig->pollfunc_list, list) { | 169 | list_for_each_entry(pf_cursor, &trig->pollfunc_list, list) { |
@@ -178,6 +179,13 @@ void iio_trigger_poll(struct iio_trigger *trig, s64 time) | |||
178 | trig->use_count++; | 179 | trig->use_count++; |
179 | } | 180 | } |
180 | } | 181 | } |
182 | if (!trig->use_count) { | ||
183 | for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) | ||
184 | if (trig->subirqs[i].enabled) { | ||
185 | trig->use_count++; | ||
186 | generic_handle_irq(trig->subirq_base + i); | ||
187 | } | ||
188 | } | ||
181 | } | 189 | } |
182 | EXPORT_SYMBOL(iio_trigger_poll); | 190 | EXPORT_SYMBOL(iio_trigger_poll); |
183 | 191 | ||
@@ -219,16 +227,31 @@ int iio_trigger_attach_poll_func(struct iio_trigger *trig, | |||
219 | int ret = 0; | 227 | int ret = 0; |
220 | unsigned long flags; | 228 | unsigned long flags; |
221 | 229 | ||
222 | spin_lock_irqsave(&trig->pollfunc_list_lock, flags); | 230 | if (pf->thread) { |
223 | list_add_tail(&pf->list, &trig->pollfunc_list); | 231 | bool notinuse |
224 | spin_unlock_irqrestore(&trig->pollfunc_list_lock, flags); | 232 | = bitmap_empty(trig->pool, |
225 | 233 | CONFIG_IIO_CONSUMERS_PER_TRIGGER); | |
226 | if (trig->set_trigger_state) | 234 | |
227 | ret = trig->set_trigger_state(trig, true); | 235 | pf->irq = iio_trigger_get_irq(trig); |
228 | if (ret) { | 236 | ret = request_threaded_irq(pf->irq, pf->h, pf->thread, |
229 | printk(KERN_ERR "set trigger state failed\n"); | 237 | pf->type, pf->name, |
230 | list_del(&pf->list); | 238 | pf); |
239 | if (trig->set_trigger_state && notinuse) { | ||
240 | ret = trig->set_trigger_state(trig, true); | ||
241 | } else { | ||
242 | spin_lock_irqsave(&trig->pollfunc_list_lock, flags); | ||
243 | list_add_tail(&pf->list, &trig->pollfunc_list); | ||
244 | spin_unlock_irqrestore(&trig->pollfunc_list_lock, flags); | ||
245 | |||
246 | if (trig->set_trigger_state) | ||
247 | ret = trig->set_trigger_state(trig, true); | ||
248 | } | ||
249 | if (ret) { | ||
250 | printk(KERN_ERR "set trigger state failed\n"); | ||
251 | list_del(&pf->list); | ||
252 | } | ||
231 | } | 253 | } |
254 | |||
232 | return ret; | 255 | return ret; |
233 | } | 256 | } |
234 | EXPORT_SYMBOL(iio_trigger_attach_poll_func); | 257 | EXPORT_SYMBOL(iio_trigger_attach_poll_func); |
@@ -240,37 +263,63 @@ int iio_trigger_dettach_poll_func(struct iio_trigger *trig, | |||
240 | unsigned long flags; | 263 | unsigned long flags; |
241 | int ret = -EINVAL; | 264 | int ret = -EINVAL; |
242 | 265 | ||
243 | spin_lock_irqsave(&trig->pollfunc_list_lock, flags); | 266 | if (pf->thread) { |
244 | list_for_each_entry(pf_cursor, &trig->pollfunc_list, list) | 267 | bool no_other_users |
245 | if (pf_cursor == pf) { | 268 | = (bitmap_weight(trig->pool, |
246 | ret = 0; | 269 | CONFIG_IIO_CONSUMERS_PER_TRIGGER) |
247 | break; | 270 | == 1); |
248 | } | 271 | if (trig->set_trigger_state && no_other_users) { |
249 | if (!ret) { | ||
250 | if (list_is_singular(&trig->pollfunc_list) | ||
251 | && trig->set_trigger_state) { | ||
252 | spin_unlock_irqrestore(&trig->pollfunc_list_lock, | ||
253 | flags); | ||
254 | /* May sleep hence cannot hold the spin lock */ | ||
255 | ret = trig->set_trigger_state(trig, false); | 272 | ret = trig->set_trigger_state(trig, false); |
256 | if (ret) | 273 | if (ret) |
257 | goto error_ret; | 274 | goto error_ret; |
258 | spin_lock_irqsave(&trig->pollfunc_list_lock, flags); | 275 | } else |
276 | ret = 0; | ||
277 | iio_trigger_put_irq(trig, pf->irq); | ||
278 | free_irq(pf->irq, pf); | ||
279 | } else { | ||
280 | spin_lock_irqsave(&trig->pollfunc_list_lock, flags); | ||
281 | list_for_each_entry(pf_cursor, &trig->pollfunc_list, list) | ||
282 | if (pf_cursor == pf) { | ||
283 | ret = 0; | ||
284 | break; | ||
285 | } | ||
286 | if (!ret) { | ||
287 | if (list_is_singular(&trig->pollfunc_list) | ||
288 | && trig->set_trigger_state) { | ||
289 | spin_unlock_irqrestore(&trig | ||
290 | ->pollfunc_list_lock, | ||
291 | flags); | ||
292 | /* May sleep hence cannot hold the spin lock */ | ||
293 | ret = trig->set_trigger_state(trig, false); | ||
294 | if (ret) | ||
295 | goto error_ret; | ||
296 | spin_lock_irqsave(&trig->pollfunc_list_lock, | ||
297 | flags); | ||
298 | } | ||
299 | /* | ||
300 | * Now we can delete safe in the knowledge that, if | ||
301 | * this is the last pollfunc then we have disabled | ||
302 | * the trigger anyway and so nothing should be able | ||
303 | * to call the pollfunc. | ||
304 | */ | ||
305 | list_del(&pf_cursor->list); | ||
259 | } | 306 | } |
260 | /* | 307 | spin_unlock_irqrestore(&trig->pollfunc_list_lock, flags); |
261 | * Now we can delete safe in the knowledge that, if this is | ||
262 | * the last pollfunc then we have disabled the trigger anyway | ||
263 | * and so nothing should be able to call the pollfunc. | ||
264 | */ | ||
265 | list_del(&pf_cursor->list); | ||
266 | } | 308 | } |
267 | spin_unlock_irqrestore(&trig->pollfunc_list_lock, flags); | ||
268 | 309 | ||
269 | error_ret: | 310 | error_ret: |
270 | return ret; | 311 | return ret; |
271 | } | 312 | } |
272 | EXPORT_SYMBOL(iio_trigger_dettach_poll_func); | 313 | EXPORT_SYMBOL(iio_trigger_dettach_poll_func); |
273 | 314 | ||
315 | irqreturn_t iio_pollfunc_store_time(int irq, void *p) | ||
316 | { | ||
317 | struct iio_poll_func *pf = p; | ||
318 | pf->timestamp = iio_get_time_ns(); | ||
319 | return IRQ_WAKE_THREAD; | ||
320 | } | ||
321 | EXPORT_SYMBOL(iio_pollfunc_store_time); | ||
322 | |||
274 | /** | 323 | /** |
275 | * iio_trigger_read_currrent() - trigger consumer sysfs query which trigger | 324 | * iio_trigger_read_currrent() - trigger consumer sysfs query which trigger |
276 | * | 325 | * |
@@ -337,6 +386,22 @@ static const struct attribute_group iio_trigger_consumer_attr_group = { | |||
337 | static void iio_trig_release(struct device *device) | 386 | static void iio_trig_release(struct device *device) |
338 | { | 387 | { |
339 | struct iio_trigger *trig = to_iio_trigger(device); | 388 | struct iio_trigger *trig = to_iio_trigger(device); |
389 | int i; | ||
390 | |||
391 | if (trig->subirq_base) { | ||
392 | for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) { | ||
393 | irq_modify_status(trig->subirq_base + i, | ||
394 | IRQ_NOAUTOEN, | ||
395 | IRQ_NOREQUEST | IRQ_NOPROBE); | ||
396 | irq_set_chip(trig->subirq_base + i, | ||
397 | NULL); | ||
398 | irq_set_handler(trig->subirq_base + i, | ||
399 | NULL); | ||
400 | } | ||
401 | |||
402 | irq_free_descs(trig->subirq_base, | ||
403 | CONFIG_IIO_CONSUMERS_PER_TRIGGER); | ||
404 | } | ||
340 | kfree(trig); | 405 | kfree(trig); |
341 | iio_put(); | 406 | iio_put(); |
342 | } | 407 | } |
@@ -345,11 +410,30 @@ static struct device_type iio_trig_type = { | |||
345 | .release = iio_trig_release, | 410 | .release = iio_trig_release, |
346 | }; | 411 | }; |
347 | 412 | ||
348 | struct iio_trigger *iio_allocate_trigger(void) | 413 | static void iio_trig_subirqmask(struct irq_data *d) |
414 | { | ||
415 | struct irq_chip *chip = irq_data_get_irq_chip(d); | ||
416 | struct iio_trigger *trig | ||
417 | = container_of(chip, | ||
418 | struct iio_trigger, subirq_chip); | ||
419 | trig->subirqs[d->irq - trig->subirq_base].enabled = false; | ||
420 | } | ||
421 | |||
422 | static void iio_trig_subirqunmask(struct irq_data *d) | ||
423 | { | ||
424 | struct irq_chip *chip = irq_data_get_irq_chip(d); | ||
425 | struct iio_trigger *trig | ||
426 | = container_of(chip, | ||
427 | struct iio_trigger, subirq_chip); | ||
428 | trig->subirqs[d->irq - trig->subirq_base].enabled = true; | ||
429 | } | ||
430 | |||
431 | struct iio_trigger *iio_allocate_trigger_named(const char *name) | ||
349 | { | 432 | { |
350 | struct iio_trigger *trig; | 433 | struct iio_trigger *trig; |
351 | trig = kzalloc(sizeof *trig, GFP_KERNEL); | 434 | trig = kzalloc(sizeof *trig, GFP_KERNEL); |
352 | if (trig) { | 435 | if (trig) { |
436 | int i; | ||
353 | trig->dev.type = &iio_trig_type; | 437 | trig->dev.type = &iio_trig_type; |
354 | trig->dev.bus = &iio_bus_type; | 438 | trig->dev.bus = &iio_bus_type; |
355 | device_initialize(&trig->dev); | 439 | device_initialize(&trig->dev); |
@@ -357,10 +441,41 @@ struct iio_trigger *iio_allocate_trigger(void) | |||
357 | spin_lock_init(&trig->pollfunc_list_lock); | 441 | spin_lock_init(&trig->pollfunc_list_lock); |
358 | INIT_LIST_HEAD(&trig->list); | 442 | INIT_LIST_HEAD(&trig->list); |
359 | INIT_LIST_HEAD(&trig->pollfunc_list); | 443 | INIT_LIST_HEAD(&trig->pollfunc_list); |
444 | |||
445 | if (name) { | ||
446 | mutex_init(&trig->pool_lock); | ||
447 | trig->subirq_base | ||
448 | = irq_alloc_descs(-1, 0, | ||
449 | CONFIG_IIO_CONSUMERS_PER_TRIGGER, | ||
450 | 0); | ||
451 | if (trig->subirq_base < 0) { | ||
452 | kfree(trig); | ||
453 | return NULL; | ||
454 | } | ||
455 | trig->name = name; | ||
456 | trig->subirq_chip.name = name; | ||
457 | trig->subirq_chip.irq_mask = &iio_trig_subirqmask; | ||
458 | trig->subirq_chip.irq_unmask = &iio_trig_subirqunmask; | ||
459 | for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) { | ||
460 | irq_set_chip(trig->subirq_base + i, | ||
461 | &trig->subirq_chip); | ||
462 | irq_set_handler(trig->subirq_base + i, | ||
463 | &handle_simple_irq); | ||
464 | irq_modify_status(trig->subirq_base + i, | ||
465 | IRQ_NOREQUEST | IRQ_NOAUTOEN, | ||
466 | IRQ_NOPROBE); | ||
467 | } | ||
468 | } | ||
360 | iio_get(); | 469 | iio_get(); |
361 | } | 470 | } |
362 | return trig; | 471 | return trig; |
363 | } | 472 | } |
473 | EXPORT_SYMBOL(iio_allocate_trigger_named); | ||
474 | |||
475 | struct iio_trigger *iio_allocate_trigger(void) | ||
476 | { | ||
477 | return iio_allocate_trigger_named(NULL); | ||
478 | } | ||
364 | EXPORT_SYMBOL(iio_allocate_trigger); | 479 | EXPORT_SYMBOL(iio_allocate_trigger); |
365 | 480 | ||
366 | void iio_free_trigger(struct iio_trigger *trig) | 481 | void iio_free_trigger(struct iio_trigger *trig) |