diff options
author | Jonathan Cameron <jic23@cam.ac.uk> | 2011-05-18 09:42:34 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2011-05-19 19:15:05 -0400 |
commit | 8d213f24f2291a3edc7f94ac2bec8c85015aed96 (patch) | |
tree | 3bcefb4531f2d31edec8c694ae70e6735e86fa65 /drivers/staging | |
parent | c74b0de1666f8b8f6c65e1e944deff71fed0769a (diff) |
staging:iio: ring core cleanups + check if read_last available in lis3l02dq
Signed-off-by: Jonathan Cameron <jic23@cam.ac.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/staging')
-rw-r--r-- | drivers/staging/iio/accel/lis3l02dq_ring.c | 7 | ||||
-rw-r--r-- | drivers/staging/iio/industrialio-ring.c | 237 | ||||
-rw-r--r-- | drivers/staging/iio/ring_generic.h | 125 |
3 files changed, 136 insertions, 233 deletions
diff --git a/drivers/staging/iio/accel/lis3l02dq_ring.c b/drivers/staging/iio/accel/lis3l02dq_ring.c index 5029c51fe919..c8f29bc73f68 100644 --- a/drivers/staging/iio/accel/lis3l02dq_ring.c +++ b/drivers/staging/iio/accel/lis3l02dq_ring.c | |||
@@ -50,9 +50,13 @@ ssize_t lis3l02dq_read_accel_from_ring(struct iio_ring_buffer *ring, | |||
50 | { | 50 | { |
51 | int ret; | 51 | int ret; |
52 | s16 *data; | 52 | s16 *data; |
53 | |||
53 | if (!iio_scan_mask_query(ring, index)) | 54 | if (!iio_scan_mask_query(ring, index)) |
54 | return -EINVAL; | 55 | return -EINVAL; |
55 | 56 | ||
57 | if (!ring->access->read_last) | ||
58 | return -EBUSY; | ||
59 | |||
56 | data = kmalloc(ring->access->get_bytes_per_datum(ring), | 60 | data = kmalloc(ring->access->get_bytes_per_datum(ring), |
57 | GFP_KERNEL); | 61 | GFP_KERNEL); |
58 | if (data == NULL) | 62 | if (data == NULL) |
@@ -61,9 +65,10 @@ ssize_t lis3l02dq_read_accel_from_ring(struct iio_ring_buffer *ring, | |||
61 | ret = ring->access->read_last(ring, (u8 *)data); | 65 | ret = ring->access->read_last(ring, (u8 *)data); |
62 | if (ret) | 66 | if (ret) |
63 | goto error_free_data; | 67 | goto error_free_data; |
64 | *val = data[iio_scan_mask_count_to_right(ring, index)]; | 68 | *val = data[bitmap_weight(&ring->scan_mask, index)]; |
65 | error_free_data: | 69 | error_free_data: |
66 | kfree(data); | 70 | kfree(data); |
71 | |||
67 | return ret; | 72 | return ret; |
68 | } | 73 | } |
69 | 74 | ||
diff --git a/drivers/staging/iio/industrialio-ring.c b/drivers/staging/iio/industrialio-ring.c index 050f9f94058a..843eb82a69ba 100644 --- a/drivers/staging/iio/industrialio-ring.c +++ b/drivers/staging/iio/industrialio-ring.c | |||
@@ -71,14 +71,10 @@ static ssize_t iio_ring_read_first_n_outer(struct file *filp, char __user *buf, | |||
71 | size_t n, loff_t *f_ps) | 71 | size_t n, loff_t *f_ps) |
72 | { | 72 | { |
73 | struct iio_ring_buffer *rb = filp->private_data; | 73 | struct iio_ring_buffer *rb = filp->private_data; |
74 | int ret; | ||
75 | 74 | ||
76 | /* rip lots must exist. */ | ||
77 | if (!rb->access->read_first_n) | 75 | if (!rb->access->read_first_n) |
78 | return -EINVAL; | 76 | return -EINVAL; |
79 | ret = rb->access->read_first_n(rb, n, buf); | 77 | return rb->access->read_first_n(rb, n, buf); |
80 | |||
81 | return ret; | ||
82 | } | 78 | } |
83 | 79 | ||
84 | /** | 80 | /** |
@@ -88,13 +84,12 @@ static unsigned int iio_ring_poll(struct file *filp, | |||
88 | struct poll_table_struct *wait) | 84 | struct poll_table_struct *wait) |
89 | { | 85 | { |
90 | struct iio_ring_buffer *rb = filp->private_data; | 86 | struct iio_ring_buffer *rb = filp->private_data; |
91 | int ret = 0; | ||
92 | 87 | ||
93 | poll_wait(filp, &rb->pollq, wait); | 88 | poll_wait(filp, &rb->pollq, wait); |
94 | if (rb->stufftoread) | 89 | if (rb->stufftoread) |
95 | return POLLIN | POLLRDNORM; | 90 | return POLLIN | POLLRDNORM; |
96 | /* need a way of knowing if there may be enough data... */ | 91 | /* need a way of knowing if there may be enough data... */ |
97 | return ret; | 92 | return 0; |
98 | } | 93 | } |
99 | 94 | ||
100 | static const struct file_operations iio_ring_fileops = { | 95 | static const struct file_operations iio_ring_fileops = { |
@@ -117,24 +112,23 @@ EXPORT_SYMBOL(iio_ring_access_release); | |||
117 | 112 | ||
118 | static inline int | 113 | static inline int |
119 | __iio_request_ring_buffer_chrdev(struct iio_ring_buffer *buf, | 114 | __iio_request_ring_buffer_chrdev(struct iio_ring_buffer *buf, |
120 | struct module *owner) | 115 | struct module *owner, |
116 | int id) | ||
121 | { | 117 | { |
122 | int ret, minor; | 118 | int ret; |
123 | 119 | ||
124 | buf->access_handler.flags = 0; | 120 | buf->access_handler.flags = 0; |
125 | |||
126 | buf->dev.bus = &iio_bus_type; | 121 | buf->dev.bus = &iio_bus_type; |
127 | device_initialize(&buf->dev); | 122 | device_initialize(&buf->dev); |
128 | 123 | ||
129 | minor = iio_device_get_chrdev_minor(); | 124 | ret = iio_device_get_chrdev_minor(); |
130 | if (minor < 0) { | 125 | if (ret < 0) |
131 | ret = minor; | ||
132 | goto error_device_put; | 126 | goto error_device_put; |
133 | } | 127 | |
134 | buf->dev.devt = MKDEV(MAJOR(iio_devt), minor); | 128 | buf->dev.devt = MKDEV(MAJOR(iio_devt), ret); |
135 | dev_set_name(&buf->dev, "%s:buffer%d", | 129 | dev_set_name(&buf->dev, "%s:buffer%d", |
136 | dev_name(buf->dev.parent), | 130 | dev_name(buf->dev.parent), |
137 | buf->id); | 131 | id); |
138 | ret = device_add(&buf->dev); | 132 | ret = device_add(&buf->dev); |
139 | if (ret < 0) { | 133 | if (ret < 0) { |
140 | printk(KERN_ERR "failed to add the ring dev\n"); | 134 | printk(KERN_ERR "failed to add the ring dev\n"); |
@@ -172,11 +166,10 @@ void iio_ring_buffer_init(struct iio_ring_buffer *ring, | |||
172 | EXPORT_SYMBOL(iio_ring_buffer_init); | 166 | EXPORT_SYMBOL(iio_ring_buffer_init); |
173 | 167 | ||
174 | static ssize_t iio_show_scan_index(struct device *dev, | 168 | static ssize_t iio_show_scan_index(struct device *dev, |
175 | struct device_attribute *attr, | 169 | struct device_attribute *attr, |
176 | char *buf) | 170 | char *buf) |
177 | { | 171 | { |
178 | struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); | 172 | return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index); |
179 | return sprintf(buf, "%u\n", this_attr->c->scan_index); | ||
180 | } | 173 | } |
181 | 174 | ||
182 | static ssize_t iio_show_fixed_type(struct device *dev, | 175 | static ssize_t iio_show_fixed_type(struct device *dev, |
@@ -191,6 +184,95 @@ static ssize_t iio_show_fixed_type(struct device *dev, | |||
191 | this_attr->c->scan_type.shift); | 184 | this_attr->c->scan_type.shift); |
192 | } | 185 | } |
193 | 186 | ||
187 | static ssize_t iio_scan_el_show(struct device *dev, | ||
188 | struct device_attribute *attr, | ||
189 | char *buf) | ||
190 | { | ||
191 | int ret; | ||
192 | struct iio_ring_buffer *ring = dev_get_drvdata(dev); | ||
193 | |||
194 | ret = iio_scan_mask_query(ring, to_iio_dev_attr(attr)->address); | ||
195 | if (ret < 0) | ||
196 | return ret; | ||
197 | return sprintf(buf, "%d\n", ret); | ||
198 | } | ||
199 | |||
200 | static int iio_scan_mask_clear(struct iio_ring_buffer *ring, int bit) | ||
201 | { | ||
202 | if (bit > IIO_MAX_SCAN_LENGTH) | ||
203 | return -EINVAL; | ||
204 | ring->scan_mask &= ~(1 << bit); | ||
205 | ring->scan_count--; | ||
206 | return 0; | ||
207 | } | ||
208 | |||
209 | static ssize_t iio_scan_el_store(struct device *dev, | ||
210 | struct device_attribute *attr, | ||
211 | const char *buf, | ||
212 | size_t len) | ||
213 | { | ||
214 | int ret = 0; | ||
215 | bool state; | ||
216 | struct iio_ring_buffer *ring = dev_get_drvdata(dev); | ||
217 | struct iio_dev *indio_dev = ring->indio_dev; | ||
218 | struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); | ||
219 | |||
220 | state = !(buf[0] == '0'); | ||
221 | mutex_lock(&indio_dev->mlock); | ||
222 | if (indio_dev->currentmode == INDIO_RING_TRIGGERED) { | ||
223 | ret = -EBUSY; | ||
224 | goto error_ret; | ||
225 | } | ||
226 | ret = iio_scan_mask_query(ring, this_attr->address); | ||
227 | if (ret < 0) | ||
228 | goto error_ret; | ||
229 | if (!state && ret) { | ||
230 | ret = iio_scan_mask_clear(ring, this_attr->address); | ||
231 | if (ret) | ||
232 | goto error_ret; | ||
233 | } else if (state && !ret) { | ||
234 | ret = iio_scan_mask_set(ring, this_attr->address); | ||
235 | if (ret) | ||
236 | goto error_ret; | ||
237 | } | ||
238 | |||
239 | error_ret: | ||
240 | mutex_unlock(&indio_dev->mlock); | ||
241 | |||
242 | return ret ? ret : len; | ||
243 | |||
244 | } | ||
245 | |||
246 | static ssize_t iio_scan_el_ts_show(struct device *dev, | ||
247 | struct device_attribute *attr, | ||
248 | char *buf) | ||
249 | { | ||
250 | struct iio_ring_buffer *ring = dev_get_drvdata(dev); | ||
251 | return sprintf(buf, "%d\n", ring->scan_timestamp); | ||
252 | } | ||
253 | |||
254 | static ssize_t iio_scan_el_ts_store(struct device *dev, | ||
255 | struct device_attribute *attr, | ||
256 | const char *buf, | ||
257 | size_t len) | ||
258 | { | ||
259 | int ret = 0; | ||
260 | struct iio_ring_buffer *ring = dev_get_drvdata(dev); | ||
261 | struct iio_dev *indio_dev = ring->indio_dev; | ||
262 | bool state; | ||
263 | state = !(buf[0] == '0'); | ||
264 | mutex_lock(&indio_dev->mlock); | ||
265 | if (indio_dev->currentmode == INDIO_RING_TRIGGERED) { | ||
266 | ret = -EBUSY; | ||
267 | goto error_ret; | ||
268 | } | ||
269 | ring->scan_timestamp = state; | ||
270 | error_ret: | ||
271 | mutex_unlock(&indio_dev->mlock); | ||
272 | |||
273 | return ret ? ret : len; | ||
274 | } | ||
275 | |||
194 | static int iio_ring_add_channel_sysfs(struct iio_ring_buffer *ring, | 276 | static int iio_ring_add_channel_sysfs(struct iio_ring_buffer *ring, |
195 | const struct iio_chan_spec *chan) | 277 | const struct iio_chan_spec *chan) |
196 | { | 278 | { |
@@ -215,7 +297,6 @@ static int iio_ring_add_channel_sysfs(struct iio_ring_buffer *ring, | |||
215 | 0, | 297 | 0, |
216 | &ring->dev, | 298 | &ring->dev, |
217 | &ring->scan_el_dev_attr_list); | 299 | &ring->scan_el_dev_attr_list); |
218 | |||
219 | if (ret) | 300 | if (ret) |
220 | goto error_ret; | 301 | goto error_ret; |
221 | 302 | ||
@@ -281,12 +362,10 @@ int iio_ring_buffer_register_ex(struct iio_ring_buffer *ring, int id, | |||
281 | { | 362 | { |
282 | int ret, i; | 363 | int ret, i; |
283 | 364 | ||
284 | ring->id = id; | 365 | ret = __iio_request_ring_buffer_chrdev(ring, ring->owner, id); |
285 | |||
286 | ret = __iio_request_ring_buffer_chrdev(ring, ring->owner); | ||
287 | |||
288 | if (ret) | 366 | if (ret) |
289 | goto error_ret; | 367 | goto error_ret; |
368 | |||
290 | if (ring->scan_el_attrs) { | 369 | if (ring->scan_el_attrs) { |
291 | ret = sysfs_create_group(&ring->dev.kobj, | 370 | ret = sysfs_create_group(&ring->dev.kobj, |
292 | ring->scan_el_attrs); | 371 | ring->scan_el_attrs); |
@@ -322,12 +401,6 @@ error_ret: | |||
322 | } | 401 | } |
323 | EXPORT_SYMBOL(iio_ring_buffer_register_ex); | 402 | EXPORT_SYMBOL(iio_ring_buffer_register_ex); |
324 | 403 | ||
325 | int iio_ring_buffer_register(struct iio_ring_buffer *ring, int id) | ||
326 | { | ||
327 | return iio_ring_buffer_register_ex(ring, id, NULL, 0); | ||
328 | } | ||
329 | EXPORT_SYMBOL(iio_ring_buffer_register); | ||
330 | |||
331 | void iio_ring_buffer_unregister(struct iio_ring_buffer *ring) | 404 | void iio_ring_buffer_unregister(struct iio_ring_buffer *ring) |
332 | { | 405 | { |
333 | __iio_ring_attr_cleanup(ring); | 406 | __iio_ring_attr_cleanup(ring); |
@@ -339,14 +412,13 @@ ssize_t iio_read_ring_length(struct device *dev, | |||
339 | struct device_attribute *attr, | 412 | struct device_attribute *attr, |
340 | char *buf) | 413 | char *buf) |
341 | { | 414 | { |
342 | int len = 0; | ||
343 | struct iio_ring_buffer *ring = dev_get_drvdata(dev); | 415 | struct iio_ring_buffer *ring = dev_get_drvdata(dev); |
344 | 416 | ||
345 | if (ring->access->get_length) | 417 | if (ring->access->get_length) |
346 | len = sprintf(buf, "%d\n", | 418 | return sprintf(buf, "%d\n", |
347 | ring->access->get_length(ring)); | 419 | ring->access->get_length(ring)); |
348 | 420 | ||
349 | return len; | 421 | return 0; |
350 | } | 422 | } |
351 | EXPORT_SYMBOL(iio_read_ring_length); | 423 | EXPORT_SYMBOL(iio_read_ring_length); |
352 | 424 | ||
@@ -358,6 +430,7 @@ ssize_t iio_write_ring_length(struct device *dev, | |||
358 | int ret; | 430 | int ret; |
359 | ulong val; | 431 | ulong val; |
360 | struct iio_ring_buffer *ring = dev_get_drvdata(dev); | 432 | struct iio_ring_buffer *ring = dev_get_drvdata(dev); |
433 | |||
361 | ret = strict_strtoul(buf, 10, &val); | 434 | ret = strict_strtoul(buf, 10, &val); |
362 | if (ret) | 435 | if (ret) |
363 | return ret; | 436 | return ret; |
@@ -380,14 +453,13 @@ ssize_t iio_read_ring_bytes_per_datum(struct device *dev, | |||
380 | struct device_attribute *attr, | 453 | struct device_attribute *attr, |
381 | char *buf) | 454 | char *buf) |
382 | { | 455 | { |
383 | int len = 0; | ||
384 | struct iio_ring_buffer *ring = dev_get_drvdata(dev); | 456 | struct iio_ring_buffer *ring = dev_get_drvdata(dev); |
385 | 457 | ||
386 | if (ring->access->get_bytes_per_datum) | 458 | if (ring->access->get_bytes_per_datum) |
387 | len = sprintf(buf, "%d\n", | 459 | return sprintf(buf, "%d\n", |
388 | ring->access->get_bytes_per_datum(ring)); | 460 | ring->access->get_bytes_per_datum(ring)); |
389 | 461 | ||
390 | return len; | 462 | return 0; |
391 | } | 463 | } |
392 | EXPORT_SYMBOL(iio_read_ring_bytes_per_datum); | 464 | EXPORT_SYMBOL(iio_read_ring_bytes_per_datum); |
393 | 465 | ||
@@ -450,7 +522,6 @@ ssize_t iio_store_ring_enable(struct device *dev, | |||
450 | } | 522 | } |
451 | 523 | ||
452 | if (ring->setup_ops->postenable) { | 524 | if (ring->setup_ops->postenable) { |
453 | |||
454 | ret = ring->setup_ops->postenable(dev_info); | 525 | ret = ring->setup_ops->postenable(dev_info); |
455 | if (ret) { | 526 | if (ret) { |
456 | printk(KERN_INFO | 527 | printk(KERN_INFO |
@@ -488,6 +559,7 @@ error_ret: | |||
488 | return ret; | 559 | return ret; |
489 | } | 560 | } |
490 | EXPORT_SYMBOL(iio_store_ring_enable); | 561 | EXPORT_SYMBOL(iio_store_ring_enable); |
562 | |||
491 | ssize_t iio_show_ring_enable(struct device *dev, | 563 | ssize_t iio_show_ring_enable(struct device *dev, |
492 | struct device_attribute *attr, | 564 | struct device_attribute *attr, |
493 | char *buf) | 565 | char *buf) |
@@ -498,91 +570,6 @@ ssize_t iio_show_ring_enable(struct device *dev, | |||
498 | } | 570 | } |
499 | EXPORT_SYMBOL(iio_show_ring_enable); | 571 | EXPORT_SYMBOL(iio_show_ring_enable); |
500 | 572 | ||
501 | ssize_t iio_scan_el_show(struct device *dev, | ||
502 | struct device_attribute *attr, | ||
503 | char *buf) | ||
504 | { | ||
505 | int ret; | ||
506 | struct iio_ring_buffer *ring = dev_get_drvdata(dev); | ||
507 | struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); | ||
508 | |||
509 | ret = iio_scan_mask_query(ring, this_attr->address); | ||
510 | if (ret < 0) | ||
511 | return ret; | ||
512 | return sprintf(buf, "%d\n", ret); | ||
513 | } | ||
514 | EXPORT_SYMBOL(iio_scan_el_show); | ||
515 | |||
516 | ssize_t iio_scan_el_store(struct device *dev, | ||
517 | struct device_attribute *attr, | ||
518 | const char *buf, | ||
519 | size_t len) | ||
520 | { | ||
521 | int ret = 0; | ||
522 | bool state; | ||
523 | struct iio_ring_buffer *ring = dev_get_drvdata(dev); | ||
524 | struct iio_dev *indio_dev = ring->indio_dev; | ||
525 | struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); | ||
526 | |||
527 | state = !(buf[0] == '0'); | ||
528 | mutex_lock(&indio_dev->mlock); | ||
529 | if (indio_dev->currentmode == INDIO_RING_TRIGGERED) { | ||
530 | ret = -EBUSY; | ||
531 | goto error_ret; | ||
532 | } | ||
533 | ret = iio_scan_mask_query(ring, this_attr->address); | ||
534 | if (ret < 0) | ||
535 | goto error_ret; | ||
536 | if (!state && ret) { | ||
537 | ret = iio_scan_mask_clear(ring, this_attr->address); | ||
538 | if (ret) | ||
539 | goto error_ret; | ||
540 | } else if (state && !ret) { | ||
541 | ret = iio_scan_mask_set(ring, this_attr->address); | ||
542 | if (ret) | ||
543 | goto error_ret; | ||
544 | } | ||
545 | |||
546 | error_ret: | ||
547 | mutex_unlock(&indio_dev->mlock); | ||
548 | |||
549 | return ret ? ret : len; | ||
550 | |||
551 | } | ||
552 | EXPORT_SYMBOL(iio_scan_el_store); | ||
553 | |||
554 | ssize_t iio_scan_el_ts_show(struct device *dev, | ||
555 | struct device_attribute *attr, | ||
556 | char *buf) | ||
557 | { | ||
558 | struct iio_ring_buffer *ring = dev_get_drvdata(dev); | ||
559 | return sprintf(buf, "%d\n", ring->scan_timestamp); | ||
560 | } | ||
561 | EXPORT_SYMBOL(iio_scan_el_ts_show); | ||
562 | |||
563 | ssize_t iio_scan_el_ts_store(struct device *dev, | ||
564 | struct device_attribute *attr, | ||
565 | const char *buf, | ||
566 | size_t len) | ||
567 | { | ||
568 | int ret = 0; | ||
569 | struct iio_ring_buffer *ring = dev_get_drvdata(dev); | ||
570 | struct iio_dev *indio_dev = ring->indio_dev; | ||
571 | bool state; | ||
572 | state = !(buf[0] == '0'); | ||
573 | mutex_lock(&indio_dev->mlock); | ||
574 | if (indio_dev->currentmode == INDIO_RING_TRIGGERED) { | ||
575 | ret = -EBUSY; | ||
576 | goto error_ret; | ||
577 | } | ||
578 | ring->scan_timestamp = state; | ||
579 | error_ret: | ||
580 | mutex_unlock(&indio_dev->mlock); | ||
581 | |||
582 | return ret ? ret : len; | ||
583 | } | ||
584 | EXPORT_SYMBOL(iio_scan_el_ts_store); | ||
585 | |||
586 | int iio_sw_ring_preenable(struct iio_dev *indio_dev) | 573 | int iio_sw_ring_preenable(struct iio_dev *indio_dev) |
587 | { | 574 | { |
588 | struct iio_ring_buffer *ring = indio_dev->ring; | 575 | struct iio_ring_buffer *ring = indio_dev->ring; |
diff --git a/drivers/staging/iio/ring_generic.h b/drivers/staging/iio/ring_generic.h index 33496766863a..3f26f7175b6a 100644 --- a/drivers/staging/iio/ring_generic.h +++ b/drivers/staging/iio/ring_generic.h | |||
@@ -76,11 +76,9 @@ struct iio_ring_setup_ops { | |||
76 | * @dev: ring buffer device struct | 76 | * @dev: ring buffer device struct |
77 | * @indio_dev: industrial I/O device structure | 77 | * @indio_dev: industrial I/O device structure |
78 | * @owner: module that owns the ring buffer (for ref counting) | 78 | * @owner: module that owns the ring buffer (for ref counting) |
79 | * @id: unique id number | ||
80 | * @length: [DEVICE] number of datums in ring | 79 | * @length: [DEVICE] number of datums in ring |
81 | * @bytes_per_datum: [DEVICE] size of individual datum including timestamp | 80 | * @bytes_per_datum: [DEVICE] size of individual datum including timestamp |
82 | * @bpe: [DEVICE] size of individual channel value | 81 | * @bpe: [DEVICE] size of individual channel value |
83 | * @loopcount: [INTERN] number of times the ring has looped | ||
84 | * @scan_el_attrs: [DRIVER] control of scan elements if that scan mode | 82 | * @scan_el_attrs: [DRIVER] control of scan elements if that scan mode |
85 | * control method is used | 83 | * control method is used |
86 | * @scan_count: [INTERN] the number of elements in the current scan mode | 84 | * @scan_count: [INTERN] the number of elements in the current scan mode |
@@ -93,27 +91,25 @@ struct iio_ring_setup_ops { | |||
93 | * @postenable: [DRIVER] function to run after marking ring enabled | 91 | * @postenable: [DRIVER] function to run after marking ring enabled |
94 | * @predisable: [DRIVER] function to run prior to marking ring disabled | 92 | * @predisable: [DRIVER] function to run prior to marking ring disabled |
95 | * @postdisable: [DRIVER] function to run after marking ring disabled | 93 | * @postdisable: [DRIVER] function to run after marking ring disabled |
96 | **/ | 94 | **/ |
97 | struct iio_ring_buffer { | 95 | struct iio_ring_buffer { |
98 | struct device dev; | 96 | struct device dev; |
99 | struct iio_dev *indio_dev; | 97 | struct iio_dev *indio_dev; |
100 | struct module *owner; | 98 | struct module *owner; |
101 | int id; | 99 | int length; |
102 | int length; | 100 | int bytes_per_datum; |
103 | int bytes_per_datum; | 101 | int bpe; |
104 | int bpe; | 102 | struct attribute_group *scan_el_attrs; |
105 | int loopcount; | 103 | int scan_count; |
106 | struct attribute_group *scan_el_attrs; | 104 | unsigned long scan_mask; |
107 | int scan_count; | 105 | bool scan_timestamp; |
108 | u32 scan_mask; | 106 | struct iio_handler access_handler; |
109 | bool scan_timestamp; | ||
110 | struct iio_handler access_handler; | ||
111 | const struct iio_ring_access_funcs *access; | 107 | const struct iio_ring_access_funcs *access; |
112 | const struct iio_ring_setup_ops *setup_ops; | 108 | const struct iio_ring_setup_ops *setup_ops; |
113 | struct list_head scan_el_dev_attr_list; | 109 | struct list_head scan_el_dev_attr_list; |
114 | 110 | ||
115 | wait_queue_head_t pollq; | 111 | wait_queue_head_t pollq; |
116 | bool stufftoread; | 112 | bool stufftoread; |
117 | }; | 113 | }; |
118 | 114 | ||
119 | /** | 115 | /** |
@@ -135,48 +131,8 @@ static inline void __iio_update_ring_buffer(struct iio_ring_buffer *ring, | |||
135 | { | 131 | { |
136 | ring->bytes_per_datum = bytes_per_datum; | 132 | ring->bytes_per_datum = bytes_per_datum; |
137 | ring->length = length; | 133 | ring->length = length; |
138 | ring->loopcount = 0; | ||
139 | } | 134 | } |
140 | 135 | ||
141 | /** | ||
142 | * iio_scan_el_store() - sysfs scan element selection interface | ||
143 | * @dev: the target device | ||
144 | * @attr: the device attribute that is being processed | ||
145 | * @buf: input from userspace | ||
146 | * @len: length of input | ||
147 | * | ||
148 | * A generic function used to enable various scan elements. In some | ||
149 | * devices explicit read commands for each channel mean this is merely | ||
150 | * a software switch. In others this must actively disable the channel. | ||
151 | * Complexities occur when this interacts with data ready type triggers | ||
152 | * which may not reset unless every channel that is enabled is explicitly | ||
153 | * read. | ||
154 | **/ | ||
155 | ssize_t iio_scan_el_store(struct device *dev, struct device_attribute *attr, | ||
156 | const char *buf, size_t len); | ||
157 | /** | ||
158 | * iio_scan_el_show() - sysfs interface to query whether a scan element | ||
159 | * is enabled or not | ||
160 | * @dev: the target device | ||
161 | * @attr: the device attribute that is being processed | ||
162 | * @buf: output buffer | ||
163 | **/ | ||
164 | ssize_t iio_scan_el_show(struct device *dev, struct device_attribute *attr, | ||
165 | char *buf); | ||
166 | |||
167 | /** | ||
168 | * iio_scan_el_ts_store() - sysfs interface to set whether a timestamp is included | ||
169 | * in the scan. | ||
170 | **/ | ||
171 | ssize_t iio_scan_el_ts_store(struct device *dev, struct device_attribute *attr, | ||
172 | const char *buf, size_t len); | ||
173 | /** | ||
174 | * iio_scan_el_ts_show() - sysfs interface to query if a timestamp is included | ||
175 | * in the scan. | ||
176 | **/ | ||
177 | ssize_t iio_scan_el_ts_show(struct device *dev, struct device_attribute *attr, | ||
178 | char *buf); | ||
179 | |||
180 | /* | 136 | /* |
181 | * These are mainly provided to allow for a change of implementation if a device | 137 | * These are mainly provided to allow for a change of implementation if a device |
182 | * has a large number of scan elements | 138 | * has a large number of scan elements |
@@ -243,41 +199,6 @@ static inline int iio_scan_mask_set(struct iio_ring_buffer *ring, int bit) | |||
243 | }; | 199 | }; |
244 | 200 | ||
245 | /** | 201 | /** |
246 | * iio_scan_mask_clear() - clear a particular element from the scan mask | ||
247 | * @ring: the ring buffer whose scan mask we are interested in | ||
248 | * @bit: the bit to clear | ||
249 | **/ | ||
250 | static inline int iio_scan_mask_clear(struct iio_ring_buffer *ring, int bit) | ||
251 | { | ||
252 | if (bit > IIO_MAX_SCAN_LENGTH) | ||
253 | return -EINVAL; | ||
254 | ring->scan_mask &= ~(1 << bit); | ||
255 | ring->scan_count--; | ||
256 | return 0; | ||
257 | }; | ||
258 | |||
259 | /** | ||
260 | * iio_scan_mask_count_to_right() - how many scan elements occur before here | ||
261 | * @ring: the ring buffer whose scan mask we interested in | ||
262 | * @bit: which number scan element is this | ||
263 | **/ | ||
264 | static inline int iio_scan_mask_count_to_right(struct iio_ring_buffer *ring, | ||
265 | int bit) | ||
266 | { | ||
267 | int count = 0; | ||
268 | int mask = (1 << bit); | ||
269 | if (bit > IIO_MAX_SCAN_LENGTH) | ||
270 | return -EINVAL; | ||
271 | while (mask) { | ||
272 | mask >>= 1; | ||
273 | if (mask & ring->scan_mask) | ||
274 | count++; | ||
275 | } | ||
276 | |||
277 | return count; | ||
278 | } | ||
279 | |||
280 | /** | ||
281 | * iio_put_ring_buffer() - notify done with buffer | 202 | * iio_put_ring_buffer() - notify done with buffer |
282 | * @ring: the buffer we are done with. | 203 | * @ring: the buffer we are done with. |
283 | **/ | 204 | **/ |
@@ -286,17 +207,11 @@ static inline void iio_put_ring_buffer(struct iio_ring_buffer *ring) | |||
286 | put_device(&ring->dev); | 207 | put_device(&ring->dev); |
287 | }; | 208 | }; |
288 | 209 | ||
289 | #define to_iio_ring_buffer(d) \ | 210 | #define to_iio_ring_buffer(d) \ |
290 | container_of(d, struct iio_ring_buffer, dev) | 211 | container_of(d, struct iio_ring_buffer, dev) |
291 | 212 | ||
292 | /** | 213 | /** |
293 | * iio_ring_buffer_register() - register the buffer with IIO core | 214 | * iio_ring_buffer_register_ex() - register the buffer with IIO core |
294 | * @ring: the buffer to be registered | ||
295 | * @id: the id of the buffer (typically 0) | ||
296 | **/ | ||
297 | int iio_ring_buffer_register(struct iio_ring_buffer *ring, int id); | ||
298 | |||
299 | /** iio_ring_buffer_register_ex() - register the buffer with IIO core | ||
300 | * @ring: the buffer to be registered | 215 | * @ring: the buffer to be registered |
301 | * @id: the id of the buffer (typically 0) | 216 | * @id: the id of the buffer (typically 0) |
302 | **/ | 217 | **/ |
@@ -356,10 +271,6 @@ ssize_t iio_show_ring_enable(struct device *dev, | |||
356 | int iio_sw_ring_preenable(struct iio_dev *indio_dev); | 271 | int iio_sw_ring_preenable(struct iio_dev *indio_dev); |
357 | 272 | ||
358 | #else /* CONFIG_IIO_RING_BUFFER */ | 273 | #else /* CONFIG_IIO_RING_BUFFER */ |
359 | static inline int iio_ring_buffer_register(struct iio_ring_buffer *ring, int id) | ||
360 | { | ||
361 | return 0; | ||
362 | }; | ||
363 | 274 | ||
364 | static inline int iio_ring_buffer_register_ex(struct iio_ring_buffer *ring, | 275 | static inline int iio_ring_buffer_register_ex(struct iio_ring_buffer *ring, |
365 | int id, | 276 | int id, |