diff options
author | Lars-Peter Clausen <lars@metafoo.de> | 2013-10-04 07:07:00 -0400 |
---|---|---|
committer | Jonathan Cameron <jic23@kernel.org> | 2013-10-12 07:07:11 -0400 |
commit | a95194569f697a6cc10d00f9b9b3d21b0b820520 (patch) | |
tree | b314a9155207e34ca0964d4ac3f127dcf2c87fd8 /drivers/iio | |
parent | d2f0a48f36aea38e0a5c4b439d5d9c96aecabad9 (diff) |
iio:buffer: Add proper locking for iio_update_buffers()
We need to make sure that in-kernel users of iio_update_buffers() do not race
against each other or against unregistration of the device. So we need to take
both the mlock and the info_exist_lock when calling iio_update_buffers() from a
in-kernel consumer.
Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
Signed-off-by: Jonathan Cameron <jic23@kernel.org>
Diffstat (limited to 'drivers/iio')
-rw-r--r-- | drivers/iio/industrialio-buffer.c | 29 |
1 files changed, 26 insertions, 3 deletions
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c index 5a46c57a038b..d6a5455ae51a 100644 --- a/drivers/iio/industrialio-buffer.c +++ b/drivers/iio/industrialio-buffer.c | |||
@@ -509,7 +509,7 @@ void iio_disable_all_buffers(struct iio_dev *indio_dev) | |||
509 | indio_dev->setup_ops->postdisable(indio_dev); | 509 | indio_dev->setup_ops->postdisable(indio_dev); |
510 | } | 510 | } |
511 | 511 | ||
512 | int iio_update_buffers(struct iio_dev *indio_dev, | 512 | static int __iio_update_buffers(struct iio_dev *indio_dev, |
513 | struct iio_buffer *insert_buffer, | 513 | struct iio_buffer *insert_buffer, |
514 | struct iio_buffer *remove_buffer) | 514 | struct iio_buffer *remove_buffer) |
515 | { | 515 | { |
@@ -674,6 +674,29 @@ error_ret: | |||
674 | 674 | ||
675 | return ret; | 675 | return ret; |
676 | } | 676 | } |
677 | |||
678 | int iio_update_buffers(struct iio_dev *indio_dev, | ||
679 | struct iio_buffer *insert_buffer, | ||
680 | struct iio_buffer *remove_buffer) | ||
681 | { | ||
682 | int ret; | ||
683 | |||
684 | mutex_lock(&indio_dev->info_exist_lock); | ||
685 | mutex_lock(&indio_dev->mlock); | ||
686 | |||
687 | if (indio_dev->info == NULL) { | ||
688 | ret = -ENODEV; | ||
689 | goto out_unlock; | ||
690 | } | ||
691 | |||
692 | ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer); | ||
693 | |||
694 | out_unlock: | ||
695 | mutex_unlock(&indio_dev->mlock); | ||
696 | mutex_unlock(&indio_dev->info_exist_lock); | ||
697 | |||
698 | return ret; | ||
699 | } | ||
677 | EXPORT_SYMBOL_GPL(iio_update_buffers); | 700 | EXPORT_SYMBOL_GPL(iio_update_buffers); |
678 | 701 | ||
679 | ssize_t iio_buffer_store_enable(struct device *dev, | 702 | ssize_t iio_buffer_store_enable(struct device *dev, |
@@ -699,10 +722,10 @@ ssize_t iio_buffer_store_enable(struct device *dev, | |||
699 | goto done; | 722 | goto done; |
700 | 723 | ||
701 | if (requested_state) | 724 | if (requested_state) |
702 | ret = iio_update_buffers(indio_dev, | 725 | ret = __iio_update_buffers(indio_dev, |
703 | indio_dev->buffer, NULL); | 726 | indio_dev->buffer, NULL); |
704 | else | 727 | else |
705 | ret = iio_update_buffers(indio_dev, | 728 | ret = __iio_update_buffers(indio_dev, |
706 | NULL, indio_dev->buffer); | 729 | NULL, indio_dev->buffer); |
707 | 730 | ||
708 | if (ret < 0) | 731 | if (ret < 0) |