diff options
Diffstat (limited to 'drivers/iio/industrialio-buffer.c')
-rw-r--r-- | drivers/iio/industrialio-buffer.c | 60 |
1 files changed, 27 insertions, 33 deletions
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c index 2710f7245c3b..e9f389b9da69 100644 --- a/drivers/iio/industrialio-buffer.c +++ b/drivers/iio/industrialio-buffer.c | |||
@@ -31,16 +31,9 @@ static const char * const iio_endian_prefix[] = { | |||
31 | [IIO_LE] = "le", | 31 | [IIO_LE] = "le", |
32 | }; | 32 | }; |
33 | 33 | ||
34 | static bool iio_buffer_is_active(struct iio_dev *indio_dev, | 34 | static bool iio_buffer_is_active(struct iio_buffer *buf) |
35 | struct iio_buffer *buf) | ||
36 | { | 35 | { |
37 | struct list_head *p; | 36 | return !list_empty(&buf->buffer_list); |
38 | |||
39 | list_for_each(p, &indio_dev->buffer_list) | ||
40 | if (p == &buf->buffer_list) | ||
41 | return true; | ||
42 | |||
43 | return false; | ||
44 | } | 37 | } |
45 | 38 | ||
46 | /** | 39 | /** |
@@ -79,6 +72,7 @@ unsigned int iio_buffer_poll(struct file *filp, | |||
79 | void iio_buffer_init(struct iio_buffer *buffer) | 72 | void iio_buffer_init(struct iio_buffer *buffer) |
80 | { | 73 | { |
81 | INIT_LIST_HEAD(&buffer->demux_list); | 74 | INIT_LIST_HEAD(&buffer->demux_list); |
75 | INIT_LIST_HEAD(&buffer->buffer_list); | ||
82 | init_waitqueue_head(&buffer->pollq); | 76 | init_waitqueue_head(&buffer->pollq); |
83 | } | 77 | } |
84 | EXPORT_SYMBOL(iio_buffer_init); | 78 | EXPORT_SYMBOL(iio_buffer_init); |
@@ -146,7 +140,7 @@ static ssize_t iio_scan_el_store(struct device *dev, | |||
146 | if (ret < 0) | 140 | if (ret < 0) |
147 | return ret; | 141 | return ret; |
148 | mutex_lock(&indio_dev->mlock); | 142 | mutex_lock(&indio_dev->mlock); |
149 | if (iio_buffer_is_active(indio_dev, indio_dev->buffer)) { | 143 | if (iio_buffer_is_active(indio_dev->buffer)) { |
150 | ret = -EBUSY; | 144 | ret = -EBUSY; |
151 | goto error_ret; | 145 | goto error_ret; |
152 | } | 146 | } |
@@ -192,7 +186,7 @@ static ssize_t iio_scan_el_ts_store(struct device *dev, | |||
192 | return ret; | 186 | return ret; |
193 | 187 | ||
194 | mutex_lock(&indio_dev->mlock); | 188 | mutex_lock(&indio_dev->mlock); |
195 | if (iio_buffer_is_active(indio_dev, indio_dev->buffer)) { | 189 | if (iio_buffer_is_active(indio_dev->buffer)) { |
196 | ret = -EBUSY; | 190 | ret = -EBUSY; |
197 | goto error_ret; | 191 | goto error_ret; |
198 | } | 192 | } |
@@ -214,7 +208,7 @@ static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev, | |||
214 | &iio_show_scan_index, | 208 | &iio_show_scan_index, |
215 | NULL, | 209 | NULL, |
216 | 0, | 210 | 0, |
217 | 0, | 211 | IIO_SEPARATE, |
218 | &indio_dev->dev, | 212 | &indio_dev->dev, |
219 | &buffer->scan_el_dev_attr_list); | 213 | &buffer->scan_el_dev_attr_list); |
220 | if (ret) | 214 | if (ret) |
@@ -249,6 +243,8 @@ static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev, | |||
249 | 0, | 243 | 0, |
250 | &indio_dev->dev, | 244 | &indio_dev->dev, |
251 | &buffer->scan_el_dev_attr_list); | 245 | &buffer->scan_el_dev_attr_list); |
246 | if (ret) | ||
247 | goto error_ret; | ||
252 | attrcount++; | 248 | attrcount++; |
253 | ret = attrcount; | 249 | ret = attrcount; |
254 | error_ret: | 250 | error_ret: |
@@ -396,7 +392,7 @@ ssize_t iio_buffer_write_length(struct device *dev, | |||
396 | return len; | 392 | return len; |
397 | 393 | ||
398 | mutex_lock(&indio_dev->mlock); | 394 | mutex_lock(&indio_dev->mlock); |
399 | if (iio_buffer_is_active(indio_dev, indio_dev->buffer)) { | 395 | if (iio_buffer_is_active(indio_dev->buffer)) { |
400 | ret = -EBUSY; | 396 | ret = -EBUSY; |
401 | } else { | 397 | } else { |
402 | if (buffer->access->set_length) | 398 | if (buffer->access->set_length) |
@@ -414,13 +410,11 @@ ssize_t iio_buffer_show_enable(struct device *dev, | |||
414 | char *buf) | 410 | char *buf) |
415 | { | 411 | { |
416 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); | 412 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); |
417 | return sprintf(buf, "%d\n", | 413 | return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer)); |
418 | iio_buffer_is_active(indio_dev, | ||
419 | indio_dev->buffer)); | ||
420 | } | 414 | } |
421 | EXPORT_SYMBOL(iio_buffer_show_enable); | 415 | EXPORT_SYMBOL(iio_buffer_show_enable); |
422 | 416 | ||
423 | /* note NULL used as error indicator as it doesn't make sense. */ | 417 | /* Note NULL used as error indicator as it doesn't make sense. */ |
424 | static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks, | 418 | static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks, |
425 | unsigned int masklength, | 419 | unsigned int masklength, |
426 | const unsigned long *mask) | 420 | const unsigned long *mask) |
@@ -435,8 +429,8 @@ static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks, | |||
435 | return NULL; | 429 | return NULL; |
436 | } | 430 | } |
437 | 431 | ||
438 | static int iio_compute_scan_bytes(struct iio_dev *indio_dev, const long *mask, | 432 | static int iio_compute_scan_bytes(struct iio_dev *indio_dev, |
439 | bool timestamp) | 433 | const unsigned long *mask, bool timestamp) |
440 | { | 434 | { |
441 | const struct iio_chan_spec *ch; | 435 | const struct iio_chan_spec *ch; |
442 | unsigned bytes = 0; | 436 | unsigned bytes = 0; |
@@ -509,7 +503,7 @@ int iio_update_buffers(struct iio_dev *indio_dev, | |||
509 | indio_dev->active_scan_mask = NULL; | 503 | indio_dev->active_scan_mask = NULL; |
510 | 504 | ||
511 | if (remove_buffer) | 505 | if (remove_buffer) |
512 | list_del(&remove_buffer->buffer_list); | 506 | list_del_init(&remove_buffer->buffer_list); |
513 | if (insert_buffer) | 507 | if (insert_buffer) |
514 | list_add(&insert_buffer->buffer_list, &indio_dev->buffer_list); | 508 | list_add(&insert_buffer->buffer_list, &indio_dev->buffer_list); |
515 | 509 | ||
@@ -521,7 +515,7 @@ int iio_update_buffers(struct iio_dev *indio_dev, | |||
521 | return 0; | 515 | return 0; |
522 | } | 516 | } |
523 | 517 | ||
524 | /* What scan mask do we actually have ?*/ | 518 | /* What scan mask do we actually have? */ |
525 | compound_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength), | 519 | compound_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength), |
526 | sizeof(long), GFP_KERNEL); | 520 | sizeof(long), GFP_KERNEL); |
527 | if (compound_mask == NULL) { | 521 | if (compound_mask == NULL) { |
@@ -546,7 +540,7 @@ int iio_update_buffers(struct iio_dev *indio_dev, | |||
546 | * Roll back. | 540 | * Roll back. |
547 | * Note can only occur when adding a buffer. | 541 | * Note can only occur when adding a buffer. |
548 | */ | 542 | */ |
549 | list_del(&insert_buffer->buffer_list); | 543 | list_del_init(&insert_buffer->buffer_list); |
550 | if (old_mask) { | 544 | if (old_mask) { |
551 | indio_dev->active_scan_mask = old_mask; | 545 | indio_dev->active_scan_mask = old_mask; |
552 | success = -EINVAL; | 546 | success = -EINVAL; |
@@ -594,7 +588,7 @@ int iio_update_buffers(struct iio_dev *indio_dev, | |||
594 | goto error_run_postdisable; | 588 | goto error_run_postdisable; |
595 | } | 589 | } |
596 | } | 590 | } |
597 | /* Definitely possible for devices to support both of these.*/ | 591 | /* Definitely possible for devices to support both of these. */ |
598 | if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) { | 592 | if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) { |
599 | if (!indio_dev->trig) { | 593 | if (!indio_dev->trig) { |
600 | printk(KERN_INFO "Buffer not started: no trigger\n"); | 594 | printk(KERN_INFO "Buffer not started: no trigger\n"); |
@@ -605,7 +599,7 @@ int iio_update_buffers(struct iio_dev *indio_dev, | |||
605 | indio_dev->currentmode = INDIO_BUFFER_TRIGGERED; | 599 | indio_dev->currentmode = INDIO_BUFFER_TRIGGERED; |
606 | } else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) { | 600 | } else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) { |
607 | indio_dev->currentmode = INDIO_BUFFER_HARDWARE; | 601 | indio_dev->currentmode = INDIO_BUFFER_HARDWARE; |
608 | } else { /* should never be reached */ | 602 | } else { /* Should never be reached */ |
609 | ret = -EINVAL; | 603 | ret = -EINVAL; |
610 | goto error_run_postdisable; | 604 | goto error_run_postdisable; |
611 | } | 605 | } |
@@ -637,7 +631,7 @@ error_run_postdisable: | |||
637 | error_remove_inserted: | 631 | error_remove_inserted: |
638 | 632 | ||
639 | if (insert_buffer) | 633 | if (insert_buffer) |
640 | list_del(&insert_buffer->buffer_list); | 634 | list_del_init(&insert_buffer->buffer_list); |
641 | indio_dev->active_scan_mask = old_mask; | 635 | indio_dev->active_scan_mask = old_mask; |
642 | kfree(compound_mask); | 636 | kfree(compound_mask); |
643 | error_ret: | 637 | error_ret: |
@@ -654,7 +648,6 @@ ssize_t iio_buffer_store_enable(struct device *dev, | |||
654 | int ret; | 648 | int ret; |
655 | bool requested_state; | 649 | bool requested_state; |
656 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); | 650 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); |
657 | struct iio_buffer *pbuf = indio_dev->buffer; | ||
658 | bool inlist; | 651 | bool inlist; |
659 | 652 | ||
660 | ret = strtobool(buf, &requested_state); | 653 | ret = strtobool(buf, &requested_state); |
@@ -664,7 +657,7 @@ ssize_t iio_buffer_store_enable(struct device *dev, | |||
664 | mutex_lock(&indio_dev->mlock); | 657 | mutex_lock(&indio_dev->mlock); |
665 | 658 | ||
666 | /* Find out if it is in the list */ | 659 | /* Find out if it is in the list */ |
667 | inlist = iio_buffer_is_active(indio_dev, pbuf); | 660 | inlist = iio_buffer_is_active(indio_dev->buffer); |
668 | /* Already in desired state */ | 661 | /* Already in desired state */ |
669 | if (inlist == requested_state) | 662 | if (inlist == requested_state) |
670 | goto done; | 663 | goto done; |
@@ -729,6 +722,7 @@ static bool iio_validate_scan_mask(struct iio_dev *indio_dev, | |||
729 | 722 | ||
730 | /** | 723 | /** |
731 | * iio_scan_mask_set() - set particular bit in the scan mask | 724 | * iio_scan_mask_set() - set particular bit in the scan mask |
725 | * @indio_dev: the iio device | ||
732 | * @buffer: the buffer whose scan mask we are interested in | 726 | * @buffer: the buffer whose scan mask we are interested in |
733 | * @bit: the bit to be set. | 727 | * @bit: the bit to be set. |
734 | * | 728 | * |
@@ -749,7 +743,7 @@ int iio_scan_mask_set(struct iio_dev *indio_dev, | |||
749 | if (trialmask == NULL) | 743 | if (trialmask == NULL) |
750 | return -ENOMEM; | 744 | return -ENOMEM; |
751 | if (!indio_dev->masklength) { | 745 | if (!indio_dev->masklength) { |
752 | WARN_ON("trying to set scanmask prior to registering buffer\n"); | 746 | WARN_ON("Trying to set scanmask prior to registering buffer\n"); |
753 | goto err_invalid_mask; | 747 | goto err_invalid_mask; |
754 | } | 748 | } |
755 | bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength); | 749 | bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength); |
@@ -804,8 +798,8 @@ struct iio_demux_table { | |||
804 | struct list_head l; | 798 | struct list_head l; |
805 | }; | 799 | }; |
806 | 800 | ||
807 | static unsigned char *iio_demux(struct iio_buffer *buffer, | 801 | static const void *iio_demux(struct iio_buffer *buffer, |
808 | unsigned char *datain) | 802 | const void *datain) |
809 | { | 803 | { |
810 | struct iio_demux_table *t; | 804 | struct iio_demux_table *t; |
811 | 805 | ||
@@ -818,9 +812,9 @@ static unsigned char *iio_demux(struct iio_buffer *buffer, | |||
818 | return buffer->demux_bounce; | 812 | return buffer->demux_bounce; |
819 | } | 813 | } |
820 | 814 | ||
821 | static int iio_push_to_buffer(struct iio_buffer *buffer, unsigned char *data) | 815 | static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data) |
822 | { | 816 | { |
823 | unsigned char *dataout = iio_demux(buffer, data); | 817 | const void *dataout = iio_demux(buffer, data); |
824 | 818 | ||
825 | return buffer->access->store_to(buffer, dataout); | 819 | return buffer->access->store_to(buffer, dataout); |
826 | } | 820 | } |
@@ -835,7 +829,7 @@ static void iio_buffer_demux_free(struct iio_buffer *buffer) | |||
835 | } | 829 | } |
836 | 830 | ||
837 | 831 | ||
838 | int iio_push_to_buffers(struct iio_dev *indio_dev, unsigned char *data) | 832 | int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data) |
839 | { | 833 | { |
840 | int ret; | 834 | int ret; |
841 | struct iio_buffer *buf; | 835 | struct iio_buffer *buf; |