diff options
author | Lars-Peter Clausen <lars@metafoo.de> | 2013-09-15 11:31:00 -0400 |
---|---|---|
committer | Jonathan Cameron <jic23@kernel.org> | 2013-09-15 12:35:31 -0400 |
commit | 705ee2c98a37130fd0aa914dc55b74a9174a6ff0 (patch) | |
tree | 995c7b810abd3915aef7f6a53b52b7364f117df1 /drivers/iio/industrialio-buffer.c | |
parent | a646fbf0fd11256c59ea6233eb243516086dbf15 (diff) |
iio:buffer: Simplify iio_buffer_is_active()
We can skip having to loop through all the device's buffers to see if a certain
buffer is active, if we let the buffer's list head point to itself when the
buffer is inactive.
Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
Signed-off-by: Jonathan Cameron <jic23@kernel.org>
Diffstat (limited to 'drivers/iio/industrialio-buffer.c')
-rw-r--r-- | drivers/iio/industrialio-buffer.c | 31 |
1 files changed, 11 insertions, 20 deletions
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c index e73033f3839a..96e97ad2538c 100644 --- a/drivers/iio/industrialio-buffer.c +++ b/drivers/iio/industrialio-buffer.c | |||
@@ -31,16 +31,9 @@ static const char * const iio_endian_prefix[] = { | |||
31 | [IIO_LE] = "le", | 31 | [IIO_LE] = "le", |
32 | }; | 32 | }; |
33 | 33 | ||
34 | static bool iio_buffer_is_active(struct iio_dev *indio_dev, | 34 | static bool iio_buffer_is_active(struct iio_buffer *buf) |
35 | struct iio_buffer *buf) | ||
36 | { | 35 | { |
37 | struct list_head *p; | 36 | return !list_empty(&buf->buffer_list); |
38 | |||
39 | list_for_each(p, &indio_dev->buffer_list) | ||
40 | if (p == &buf->buffer_list) | ||
41 | return true; | ||
42 | |||
43 | return false; | ||
44 | } | 37 | } |
45 | 38 | ||
46 | /** | 39 | /** |
@@ -79,6 +72,7 @@ unsigned int iio_buffer_poll(struct file *filp, | |||
79 | void iio_buffer_init(struct iio_buffer *buffer) | 72 | void iio_buffer_init(struct iio_buffer *buffer) |
80 | { | 73 | { |
81 | INIT_LIST_HEAD(&buffer->demux_list); | 74 | INIT_LIST_HEAD(&buffer->demux_list); |
75 | INIT_LIST_HEAD(&buffer->buffer_list); | ||
82 | init_waitqueue_head(&buffer->pollq); | 76 | init_waitqueue_head(&buffer->pollq); |
83 | } | 77 | } |
84 | EXPORT_SYMBOL(iio_buffer_init); | 78 | EXPORT_SYMBOL(iio_buffer_init); |
@@ -146,7 +140,7 @@ static ssize_t iio_scan_el_store(struct device *dev, | |||
146 | if (ret < 0) | 140 | if (ret < 0) |
147 | return ret; | 141 | return ret; |
148 | mutex_lock(&indio_dev->mlock); | 142 | mutex_lock(&indio_dev->mlock); |
149 | if (iio_buffer_is_active(indio_dev, indio_dev->buffer)) { | 143 | if (iio_buffer_is_active(indio_dev->buffer)) { |
150 | ret = -EBUSY; | 144 | ret = -EBUSY; |
151 | goto error_ret; | 145 | goto error_ret; |
152 | } | 146 | } |
@@ -192,7 +186,7 @@ static ssize_t iio_scan_el_ts_store(struct device *dev, | |||
192 | return ret; | 186 | return ret; |
193 | 187 | ||
194 | mutex_lock(&indio_dev->mlock); | 188 | mutex_lock(&indio_dev->mlock); |
195 | if (iio_buffer_is_active(indio_dev, indio_dev->buffer)) { | 189 | if (iio_buffer_is_active(indio_dev->buffer)) { |
196 | ret = -EBUSY; | 190 | ret = -EBUSY; |
197 | goto error_ret; | 191 | goto error_ret; |
198 | } | 192 | } |
@@ -396,7 +390,7 @@ ssize_t iio_buffer_write_length(struct device *dev, | |||
396 | return len; | 390 | return len; |
397 | 391 | ||
398 | mutex_lock(&indio_dev->mlock); | 392 | mutex_lock(&indio_dev->mlock); |
399 | if (iio_buffer_is_active(indio_dev, indio_dev->buffer)) { | 393 | if (iio_buffer_is_active(indio_dev->buffer)) { |
400 | ret = -EBUSY; | 394 | ret = -EBUSY; |
401 | } else { | 395 | } else { |
402 | if (buffer->access->set_length) | 396 | if (buffer->access->set_length) |
@@ -414,9 +408,7 @@ ssize_t iio_buffer_show_enable(struct device *dev, | |||
414 | char *buf) | 408 | char *buf) |
415 | { | 409 | { |
416 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); | 410 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); |
417 | return sprintf(buf, "%d\n", | 411 | return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer)); |
418 | iio_buffer_is_active(indio_dev, | ||
419 | indio_dev->buffer)); | ||
420 | } | 412 | } |
421 | EXPORT_SYMBOL(iio_buffer_show_enable); | 413 | EXPORT_SYMBOL(iio_buffer_show_enable); |
422 | 414 | ||
@@ -490,7 +482,7 @@ int iio_update_buffers(struct iio_dev *indio_dev, | |||
490 | indio_dev->active_scan_mask = NULL; | 482 | indio_dev->active_scan_mask = NULL; |
491 | 483 | ||
492 | if (remove_buffer) | 484 | if (remove_buffer) |
493 | list_del(&remove_buffer->buffer_list); | 485 | list_del_init(&remove_buffer->buffer_list); |
494 | if (insert_buffer) | 486 | if (insert_buffer) |
495 | list_add(&insert_buffer->buffer_list, &indio_dev->buffer_list); | 487 | list_add(&insert_buffer->buffer_list, &indio_dev->buffer_list); |
496 | 488 | ||
@@ -527,7 +519,7 @@ int iio_update_buffers(struct iio_dev *indio_dev, | |||
527 | * Roll back. | 519 | * Roll back. |
528 | * Note can only occur when adding a buffer. | 520 | * Note can only occur when adding a buffer. |
529 | */ | 521 | */ |
530 | list_del(&insert_buffer->buffer_list); | 522 | list_del_init(&insert_buffer->buffer_list); |
531 | indio_dev->active_scan_mask = old_mask; | 523 | indio_dev->active_scan_mask = old_mask; |
532 | success = -EINVAL; | 524 | success = -EINVAL; |
533 | } | 525 | } |
@@ -611,7 +603,7 @@ error_run_postdisable: | |||
611 | error_remove_inserted: | 603 | error_remove_inserted: |
612 | 604 | ||
613 | if (insert_buffer) | 605 | if (insert_buffer) |
614 | list_del(&insert_buffer->buffer_list); | 606 | list_del_init(&insert_buffer->buffer_list); |
615 | indio_dev->active_scan_mask = old_mask; | 607 | indio_dev->active_scan_mask = old_mask; |
616 | kfree(compound_mask); | 608 | kfree(compound_mask); |
617 | error_ret: | 609 | error_ret: |
@@ -628,7 +620,6 @@ ssize_t iio_buffer_store_enable(struct device *dev, | |||
628 | int ret; | 620 | int ret; |
629 | bool requested_state; | 621 | bool requested_state; |
630 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); | 622 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); |
631 | struct iio_buffer *pbuf = indio_dev->buffer; | ||
632 | bool inlist; | 623 | bool inlist; |
633 | 624 | ||
634 | ret = strtobool(buf, &requested_state); | 625 | ret = strtobool(buf, &requested_state); |
@@ -638,7 +629,7 @@ ssize_t iio_buffer_store_enable(struct device *dev, | |||
638 | mutex_lock(&indio_dev->mlock); | 629 | mutex_lock(&indio_dev->mlock); |
639 | 630 | ||
640 | /* Find out if it is in the list */ | 631 | /* Find out if it is in the list */ |
641 | inlist = iio_buffer_is_active(indio_dev, pbuf); | 632 | inlist = iio_buffer_is_active(indio_dev->buffer); |
642 | /* Already in desired state */ | 633 | /* Already in desired state */ |
643 | if (inlist == requested_state) | 634 | if (inlist == requested_state) |
644 | goto done; | 635 | goto done; |