aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iio
diff options
context:
space:
mode:
authorJonathan Cameron <jic23@kernel.org>2012-06-30 15:06:00 -0400
committerJonathan Cameron <jic23@kernel.org>2012-11-10 05:17:21 -0500
commit84b36ce5f79c01f792c623f14e92ed86cdccb42f (patch)
treeaa763089df10007bc42aa02b747e652e0b99003e /drivers/iio
parent4eb3ccf157639a9d9c7829de94017c46c73d9cc4 (diff)
staging:iio: Add support for multiple buffers
Route all buffer writes through the demux. Addition or removal of a buffer results in tear down and setup of all the buffers for a given device. Signed-off-by: Jonathan Cameron <jic23@kernel.org> Tested-by: srinivas pandruvada <srinivas.pandruvada@intel.com>
Diffstat (limited to 'drivers/iio')
-rw-r--r--drivers/iio/accel/hid-sensor-accel-3d.c15
-rw-r--r--drivers/iio/adc/ad7266.c3
-rw-r--r--drivers/iio/adc/ad7476.c2
-rw-r--r--drivers/iio/adc/ad7887.c2
-rw-r--r--drivers/iio/adc/ad_sigma_delta.c2
-rw-r--r--drivers/iio/adc/at91_adc.c3
-rw-r--r--drivers/iio/gyro/hid-sensor-gyro-3d.c15
-rw-r--r--drivers/iio/industrialio-buffer.c380
-rw-r--r--drivers/iio/industrialio-core.c1
-rw-r--r--drivers/iio/light/adjd_s311.c3
-rw-r--r--drivers/iio/light/hid-sensor-als.c15
-rw-r--r--drivers/iio/magnetometer/hid-sensor-magn-3d.c15
12 files changed, 274 insertions, 182 deletions
diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c
index 314a4057879e..a95cda0e387f 100644
--- a/drivers/iio/accel/hid-sensor-accel-3d.c
+++ b/drivers/iio/accel/hid-sensor-accel-3d.c
@@ -197,21 +197,8 @@ static const struct iio_info accel_3d_info = {
197/* Function to push data to buffer */ 197/* Function to push data to buffer */
198static void hid_sensor_push_data(struct iio_dev *indio_dev, u8 *data, int len) 198static void hid_sensor_push_data(struct iio_dev *indio_dev, u8 *data, int len)
199{ 199{
200 struct iio_buffer *buffer = indio_dev->buffer;
201 int datum_sz;
202
203 dev_dbg(&indio_dev->dev, "hid_sensor_push_data\n"); 200 dev_dbg(&indio_dev->dev, "hid_sensor_push_data\n");
204 if (!buffer) { 201 iio_push_to_buffers(indio_dev, (u8 *)data);
205 dev_err(&indio_dev->dev, "Buffer == NULL\n");
206 return;
207 }
208 datum_sz = buffer->access->get_bytes_per_datum(buffer);
209 if (len > datum_sz) {
210 dev_err(&indio_dev->dev, "Datum size mismatch %d:%d\n", len,
211 datum_sz);
212 return;
213 }
214 iio_push_to_buffer(buffer, (u8 *)data);
215} 202}
216 203
217/* Callback handler to send event after all samples are received and captured */ 204/* Callback handler to send event after all samples are received and captured */
diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c
index b11f214779a2..a6f4fc5f8201 100644
--- a/drivers/iio/adc/ad7266.c
+++ b/drivers/iio/adc/ad7266.c
@@ -91,7 +91,6 @@ static irqreturn_t ad7266_trigger_handler(int irq, void *p)
91{ 91{
92 struct iio_poll_func *pf = p; 92 struct iio_poll_func *pf = p;
93 struct iio_dev *indio_dev = pf->indio_dev; 93 struct iio_dev *indio_dev = pf->indio_dev;
94 struct iio_buffer *buffer = indio_dev->buffer;
95 struct ad7266_state *st = iio_priv(indio_dev); 94 struct ad7266_state *st = iio_priv(indio_dev);
96 int ret; 95 int ret;
97 96
@@ -99,7 +98,7 @@ static irqreturn_t ad7266_trigger_handler(int irq, void *p)
99 if (ret == 0) { 98 if (ret == 0) {
100 if (indio_dev->scan_timestamp) 99 if (indio_dev->scan_timestamp)
101 ((s64 *)st->data)[1] = pf->timestamp; 100 ((s64 *)st->data)[1] = pf->timestamp;
102 iio_push_to_buffer(buffer, (u8 *)st->data); 101 iio_push_to_buffers(indio_dev, (u8 *)st->data);
103 } 102 }
104 103
105 iio_trigger_notify_done(indio_dev->trig); 104 iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/adc/ad7476.c b/drivers/iio/adc/ad7476.c
index 7f2f45a0a48d..330248bfebae 100644
--- a/drivers/iio/adc/ad7476.c
+++ b/drivers/iio/adc/ad7476.c
@@ -76,7 +76,7 @@ static irqreturn_t ad7476_trigger_handler(int irq, void *p)
76 if (indio_dev->scan_timestamp) 76 if (indio_dev->scan_timestamp)
77 ((s64 *)st->data)[1] = time_ns; 77 ((s64 *)st->data)[1] = time_ns;
78 78
79 iio_push_to_buffer(indio_dev->buffer, st->data); 79 iio_push_to_buffers(indio_dev, st->data);
80done: 80done:
81 iio_trigger_notify_done(indio_dev->trig); 81 iio_trigger_notify_done(indio_dev->trig);
82 82
diff --git a/drivers/iio/adc/ad7887.c b/drivers/iio/adc/ad7887.c
index fd62309b4d3d..81153fafac7a 100644
--- a/drivers/iio/adc/ad7887.c
+++ b/drivers/iio/adc/ad7887.c
@@ -134,7 +134,7 @@ static irqreturn_t ad7887_trigger_handler(int irq, void *p)
134 memcpy(st->data + indio_dev->scan_bytes - sizeof(s64), 134 memcpy(st->data + indio_dev->scan_bytes - sizeof(s64),
135 &time_ns, sizeof(time_ns)); 135 &time_ns, sizeof(time_ns));
136 136
137 iio_push_to_buffer(indio_dev->buffer, st->data); 137 iio_push_to_buffers(indio_dev, st->data);
138done: 138done:
139 iio_trigger_notify_done(indio_dev->trig); 139 iio_trigger_notify_done(indio_dev->trig);
140 140
diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
index 67baa1363d7a..afe6d78c8ff0 100644
--- a/drivers/iio/adc/ad_sigma_delta.c
+++ b/drivers/iio/adc/ad_sigma_delta.c
@@ -391,7 +391,7 @@ static irqreturn_t ad_sd_trigger_handler(int irq, void *p)
391 break; 391 break;
392 } 392 }
393 393
394 iio_push_to_buffer(indio_dev->buffer, (uint8_t *)data); 394 iio_push_to_buffers(indio_dev, (uint8_t *)data);
395 395
396 iio_trigger_notify_done(indio_dev->trig); 396 iio_trigger_notify_done(indio_dev->trig);
397 sigma_delta->irq_dis = false; 397 sigma_delta->irq_dis = false;
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index 2e2c9a80aa37..03b85940f4ba 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -65,7 +65,6 @@ static irqreturn_t at91_adc_trigger_handler(int irq, void *p)
65 struct iio_poll_func *pf = p; 65 struct iio_poll_func *pf = p;
66 struct iio_dev *idev = pf->indio_dev; 66 struct iio_dev *idev = pf->indio_dev;
67 struct at91_adc_state *st = iio_priv(idev); 67 struct at91_adc_state *st = iio_priv(idev);
68 struct iio_buffer *buffer = idev->buffer;
69 int i, j = 0; 68 int i, j = 0;
70 69
71 for (i = 0; i < idev->masklength; i++) { 70 for (i = 0; i < idev->masklength; i++) {
@@ -81,7 +80,7 @@ static irqreturn_t at91_adc_trigger_handler(int irq, void *p)
81 *timestamp = pf->timestamp; 80 *timestamp = pf->timestamp;
82 } 81 }
83 82
84 iio_push_to_buffer(buffer, st->buffer); 83 iio_push_to_buffers(indio_dev, (u8 *)st->buffer);
85 84
86 iio_trigger_notify_done(idev->trig); 85 iio_trigger_notify_done(idev->trig);
87 86
diff --git a/drivers/iio/gyro/hid-sensor-gyro-3d.c b/drivers/iio/gyro/hid-sensor-gyro-3d.c
index 4c56ada51c39..02ef989b830d 100644
--- a/drivers/iio/gyro/hid-sensor-gyro-3d.c
+++ b/drivers/iio/gyro/hid-sensor-gyro-3d.c
@@ -197,21 +197,8 @@ static const struct iio_info gyro_3d_info = {
197/* Function to push data to buffer */ 197/* Function to push data to buffer */
198static void hid_sensor_push_data(struct iio_dev *indio_dev, u8 *data, int len) 198static void hid_sensor_push_data(struct iio_dev *indio_dev, u8 *data, int len)
199{ 199{
200 struct iio_buffer *buffer = indio_dev->buffer;
201 int datum_sz;
202
203 dev_dbg(&indio_dev->dev, "hid_sensor_push_data\n"); 200 dev_dbg(&indio_dev->dev, "hid_sensor_push_data\n");
204 if (!buffer) { 201 iio_push_to_buffers(indio_dev, (u8 *)data);
205 dev_err(&indio_dev->dev, "Buffer == NULL\n");
206 return;
207 }
208 datum_sz = buffer->access->get_bytes_per_datum(buffer);
209 if (len > datum_sz) {
210 dev_err(&indio_dev->dev, "Datum size mismatch %d:%d\n", len,
211 datum_sz);
212 return;
213 }
214 iio_push_to_buffer(buffer, (u8 *)data);
215} 202}
216 203
217/* Callback handler to send event after all samples are received and captured */ 204/* Callback handler to send event after all samples are received and captured */
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index 722a83fd8d85..aaadd32f9f0d 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -31,6 +31,18 @@ static const char * const iio_endian_prefix[] = {
31 [IIO_LE] = "le", 31 [IIO_LE] = "le",
32}; 32};
33 33
34static bool iio_buffer_is_active(struct iio_dev *indio_dev,
35 struct iio_buffer *buf)
36{
37 struct list_head *p;
38
39 list_for_each(p, &indio_dev->buffer_list)
40 if (p == &buf->buffer_list)
41 return true;
42
43 return false;
44}
45
34/** 46/**
35 * iio_buffer_read_first_n_outer() - chrdev read for buffer access 47 * iio_buffer_read_first_n_outer() - chrdev read for buffer access
36 * 48 *
@@ -134,7 +146,7 @@ static ssize_t iio_scan_el_store(struct device *dev,
134 if (ret < 0) 146 if (ret < 0)
135 return ret; 147 return ret;
136 mutex_lock(&indio_dev->mlock); 148 mutex_lock(&indio_dev->mlock);
137 if (iio_buffer_enabled(indio_dev)) { 149 if (iio_buffer_is_active(indio_dev, indio_dev->buffer)) {
138 ret = -EBUSY; 150 ret = -EBUSY;
139 goto error_ret; 151 goto error_ret;
140 } 152 }
@@ -180,12 +192,11 @@ static ssize_t iio_scan_el_ts_store(struct device *dev,
180 return ret; 192 return ret;
181 193
182 mutex_lock(&indio_dev->mlock); 194 mutex_lock(&indio_dev->mlock);
183 if (iio_buffer_enabled(indio_dev)) { 195 if (iio_buffer_is_active(indio_dev, indio_dev->buffer)) {
184 ret = -EBUSY; 196 ret = -EBUSY;
185 goto error_ret; 197 goto error_ret;
186 } 198 }
187 indio_dev->buffer->scan_timestamp = state; 199 indio_dev->buffer->scan_timestamp = state;
188 indio_dev->scan_timestamp = state;
189error_ret: 200error_ret:
190 mutex_unlock(&indio_dev->mlock); 201 mutex_unlock(&indio_dev->mlock);
191 202
@@ -385,7 +396,7 @@ ssize_t iio_buffer_write_length(struct device *dev,
385 return len; 396 return len;
386 397
387 mutex_lock(&indio_dev->mlock); 398 mutex_lock(&indio_dev->mlock);
388 if (iio_buffer_enabled(indio_dev)) { 399 if (iio_buffer_is_active(indio_dev, indio_dev->buffer)) {
389 ret = -EBUSY; 400 ret = -EBUSY;
390 } else { 401 } else {
391 if (buffer->access->set_length) 402 if (buffer->access->set_length)
@@ -398,102 +409,14 @@ ssize_t iio_buffer_write_length(struct device *dev,
398} 409}
399EXPORT_SYMBOL(iio_buffer_write_length); 410EXPORT_SYMBOL(iio_buffer_write_length);
400 411
401ssize_t iio_buffer_store_enable(struct device *dev,
402 struct device_attribute *attr,
403 const char *buf,
404 size_t len)
405{
406 int ret;
407 bool requested_state, current_state;
408 int previous_mode;
409 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
410 struct iio_buffer *buffer = indio_dev->buffer;
411
412 mutex_lock(&indio_dev->mlock);
413 previous_mode = indio_dev->currentmode;
414 requested_state = !(buf[0] == '0');
415 current_state = iio_buffer_enabled(indio_dev);
416 if (current_state == requested_state) {
417 printk(KERN_INFO "iio-buffer, current state requested again\n");
418 goto done;
419 }
420 if (requested_state) {
421 if (indio_dev->setup_ops->preenable) {
422 ret = indio_dev->setup_ops->preenable(indio_dev);
423 if (ret) {
424 printk(KERN_ERR
425 "Buffer not started: "
426 "buffer preenable failed\n");
427 goto error_ret;
428 }
429 }
430 if (buffer->access->request_update) {
431 ret = buffer->access->request_update(buffer);
432 if (ret) {
433 printk(KERN_INFO
434 "Buffer not started: "
435 "buffer parameter update failed\n");
436 goto error_ret;
437 }
438 }
439 /* Definitely possible for devices to support both of these. */
440 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) {
441 if (!indio_dev->trig) {
442 printk(KERN_INFO
443 "Buffer not started: no trigger\n");
444 ret = -EINVAL;
445 goto error_ret;
446 }
447 indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
448 } else if (indio_dev->modes & INDIO_BUFFER_HARDWARE)
449 indio_dev->currentmode = INDIO_BUFFER_HARDWARE;
450 else { /* should never be reached */
451 ret = -EINVAL;
452 goto error_ret;
453 }
454
455 if (indio_dev->setup_ops->postenable) {
456 ret = indio_dev->setup_ops->postenable(indio_dev);
457 if (ret) {
458 printk(KERN_INFO
459 "Buffer not started: "
460 "postenable failed\n");
461 indio_dev->currentmode = previous_mode;
462 if (indio_dev->setup_ops->postdisable)
463 indio_dev->setup_ops->
464 postdisable(indio_dev);
465 goto error_ret;
466 }
467 }
468 } else {
469 if (indio_dev->setup_ops->predisable) {
470 ret = indio_dev->setup_ops->predisable(indio_dev);
471 if (ret)
472 goto error_ret;
473 }
474 indio_dev->currentmode = INDIO_DIRECT_MODE;
475 if (indio_dev->setup_ops->postdisable) {
476 ret = indio_dev->setup_ops->postdisable(indio_dev);
477 if (ret)
478 goto error_ret;
479 }
480 }
481done:
482 mutex_unlock(&indio_dev->mlock);
483 return len;
484
485error_ret:
486 mutex_unlock(&indio_dev->mlock);
487 return ret;
488}
489EXPORT_SYMBOL(iio_buffer_store_enable);
490
491ssize_t iio_buffer_show_enable(struct device *dev, 412ssize_t iio_buffer_show_enable(struct device *dev,
492 struct device_attribute *attr, 413 struct device_attribute *attr,
493 char *buf) 414 char *buf)
494{ 415{
495 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 416 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
496 return sprintf(buf, "%d\n", iio_buffer_enabled(indio_dev)); 417 return sprintf(buf, "%d\n",
418 iio_buffer_is_active(indio_dev,
419 indio_dev->buffer));
497} 420}
498EXPORT_SYMBOL(iio_buffer_show_enable); 421EXPORT_SYMBOL(iio_buffer_show_enable);
499 422
@@ -537,35 +460,220 @@ static int iio_compute_scan_bytes(struct iio_dev *indio_dev, const long *mask,
537 return bytes; 460 return bytes;
538} 461}
539 462
540int iio_sw_buffer_preenable(struct iio_dev *indio_dev) 463int iio_update_buffers(struct iio_dev *indio_dev,
464 struct iio_buffer *insert_buffer,
465 struct iio_buffer *remove_buffer)
541{ 466{
542 struct iio_buffer *buffer = indio_dev->buffer; 467 int ret;
543 dev_dbg(&indio_dev->dev, "%s\n", __func__); 468 int success = 0;
469 struct iio_buffer *buffer;
470 unsigned long *compound_mask;
471 const unsigned long *old_mask;
544 472
545 /* How much space will the demuxed element take? */ 473 /* Wind down existing buffers - iff there are any */
546 indio_dev->scan_bytes = 474 if (!list_empty(&indio_dev->buffer_list)) {
547 iio_compute_scan_bytes(indio_dev, buffer->scan_mask, 475 if (indio_dev->setup_ops->predisable) {
548 buffer->scan_timestamp); 476 ret = indio_dev->setup_ops->predisable(indio_dev);
549 buffer->access->set_bytes_per_datum(buffer, indio_dev->scan_bytes); 477 if (ret)
478 goto error_ret;
479 }
480 indio_dev->currentmode = INDIO_DIRECT_MODE;
481 if (indio_dev->setup_ops->postdisable) {
482 ret = indio_dev->setup_ops->postdisable(indio_dev);
483 if (ret)
484 goto error_ret;
485 }
486 }
487 /* Keep a copy of current setup to allow roll back */
488 old_mask = indio_dev->active_scan_mask;
489 if (!indio_dev->available_scan_masks)
490 indio_dev->active_scan_mask = NULL;
491
492 if (remove_buffer)
493 list_del(&remove_buffer->buffer_list);
494 if (insert_buffer)
495 list_add(&insert_buffer->buffer_list, &indio_dev->buffer_list);
496
497 /* If no buffers in list, we are done */
498 if (list_empty(&indio_dev->buffer_list)) {
499 indio_dev->currentmode = INDIO_DIRECT_MODE;
500 if (indio_dev->available_scan_masks == NULL)
501 kfree(old_mask);
502 return 0;
503 }
550 504
551 /* What scan mask do we actually have ?*/ 505 /* What scan mask do we actually have ?*/
552 if (indio_dev->available_scan_masks) 506 compound_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
507 sizeof(long), GFP_KERNEL);
508 if (compound_mask == NULL) {
509 if (indio_dev->available_scan_masks == NULL)
510 kfree(old_mask);
511 return -ENOMEM;
512 }
513 indio_dev->scan_timestamp = 0;
514
515 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
516 bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
517 indio_dev->masklength);
518 indio_dev->scan_timestamp |= buffer->scan_timestamp;
519 }
520 if (indio_dev->available_scan_masks) {
553 indio_dev->active_scan_mask = 521 indio_dev->active_scan_mask =
554 iio_scan_mask_match(indio_dev->available_scan_masks, 522 iio_scan_mask_match(indio_dev->available_scan_masks,
555 indio_dev->masklength, 523 indio_dev->masklength,
556 buffer->scan_mask); 524 compound_mask);
557 else 525 if (indio_dev->active_scan_mask == NULL) {
558 indio_dev->active_scan_mask = buffer->scan_mask; 526 /*
559 527 * Roll back.
560 if (indio_dev->active_scan_mask == NULL) 528 * Note can only occur when adding a buffer.
561 return -EINVAL; 529 */
530 list_del(&insert_buffer->buffer_list);
531 indio_dev->active_scan_mask = old_mask;
532 success = -EINVAL;
533 }
534 } else {
535 indio_dev->active_scan_mask = compound_mask;
536 }
562 537
563 iio_update_demux(indio_dev); 538 iio_update_demux(indio_dev);
564 539
565 if (indio_dev->info->update_scan_mode) 540 /* Wind up again */
566 return indio_dev->info 541 if (indio_dev->setup_ops->preenable) {
542 ret = indio_dev->setup_ops->preenable(indio_dev);
543 if (ret) {
544 printk(KERN_ERR
545 "Buffer not started:"
546 "buffer preenable failed\n");
547 goto error_remove_inserted;
548 }
549 }
550 indio_dev->scan_bytes =
551 iio_compute_scan_bytes(indio_dev,
552 indio_dev->active_scan_mask,
553 indio_dev->scan_timestamp);
554 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
555 if (buffer->access->request_update) {
556 ret = buffer->access->request_update(buffer);
557 if (ret) {
558 printk(KERN_INFO
559 "Buffer not started:"
560 "buffer parameter update failed\n");
561 goto error_run_postdisable;
562 }
563 }
564 if (indio_dev->info->update_scan_mode) {
565 ret = indio_dev->info
567 ->update_scan_mode(indio_dev, 566 ->update_scan_mode(indio_dev,
568 indio_dev->active_scan_mask); 567 indio_dev->active_scan_mask);
568 if (ret < 0) {
569 printk(KERN_INFO "update scan mode failed\n");
570 goto error_run_postdisable;
571 }
572 }
573 /* Definitely possible for devices to support both of these.*/
574 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) {
575 if (!indio_dev->trig) {
576 printk(KERN_INFO "Buffer not started: no trigger\n");
577 ret = -EINVAL;
578 /* Can only occur on first buffer */
579 goto error_run_postdisable;
580 }
581 indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
582 } else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) {
583 indio_dev->currentmode = INDIO_BUFFER_HARDWARE;
584 } else { /* should never be reached */
585 ret = -EINVAL;
586 goto error_run_postdisable;
587 }
588
589 if (indio_dev->setup_ops->postenable) {
590 ret = indio_dev->setup_ops->postenable(indio_dev);
591 if (ret) {
592 printk(KERN_INFO
593 "Buffer not started: postenable failed\n");
594 indio_dev->currentmode = INDIO_DIRECT_MODE;
595 if (indio_dev->setup_ops->postdisable)
596 indio_dev->setup_ops->postdisable(indio_dev);
597 goto error_disable_all_buffers;
598 }
599 }
600
601 if (indio_dev->available_scan_masks)
602 kfree(compound_mask);
603 else
604 kfree(old_mask);
605
606 return success;
607
608error_disable_all_buffers:
609 indio_dev->currentmode = INDIO_DIRECT_MODE;
610error_run_postdisable:
611 if (indio_dev->setup_ops->postdisable)
612 indio_dev->setup_ops->postdisable(indio_dev);
613error_remove_inserted:
614
615 if (insert_buffer)
616 list_del(&insert_buffer->buffer_list);
617 indio_dev->active_scan_mask = old_mask;
618 kfree(compound_mask);
619error_ret:
620
621 return ret;
622}
623EXPORT_SYMBOL_GPL(iio_update_buffers);
624
625ssize_t iio_buffer_store_enable(struct device *dev,
626 struct device_attribute *attr,
627 const char *buf,
628 size_t len)
629{
630 int ret;
631 bool requested_state;
632 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
633 struct iio_buffer *pbuf = indio_dev->buffer;
634 bool inlist;
635
636 ret = strtobool(buf, &requested_state);
637 if (ret < 0)
638 return ret;
639
640 mutex_lock(&indio_dev->mlock);
641
642 /* Find out if it is in the list */
643 inlist = iio_buffer_is_active(indio_dev, pbuf);
644 /* Already in desired state */
645 if (inlist == requested_state)
646 goto done;
647
648 if (requested_state)
649 ret = iio_update_buffers(indio_dev,
650 indio_dev->buffer, NULL);
651 else
652 ret = iio_update_buffers(indio_dev,
653 NULL, indio_dev->buffer);
654
655 if (ret < 0)
656 goto done;
657done:
658 mutex_unlock(&indio_dev->mlock);
659 return (ret < 0) ? ret : len;
660}
661EXPORT_SYMBOL(iio_buffer_store_enable);
662
663int iio_sw_buffer_preenable(struct iio_dev *indio_dev)
664{
665 struct iio_buffer *buffer;
666 unsigned bytes;
667 dev_dbg(&indio_dev->dev, "%s\n", __func__);
668
669 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
670 if (buffer->access->set_bytes_per_datum) {
671 bytes = iio_compute_scan_bytes(indio_dev,
672 buffer->scan_mask,
673 buffer->scan_timestamp);
674
675 buffer->access->set_bytes_per_datum(buffer, bytes);
676 }
569 return 0; 677 return 0;
570} 678}
571EXPORT_SYMBOL(iio_sw_buffer_preenable); 679EXPORT_SYMBOL(iio_sw_buffer_preenable);
@@ -599,7 +707,11 @@ static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
599 * iio_scan_mask_set() - set particular bit in the scan mask 707 * iio_scan_mask_set() - set particular bit in the scan mask
600 * @buffer: the buffer whose scan mask we are interested in 708 * @buffer: the buffer whose scan mask we are interested in
601 * @bit: the bit to be set. 709 * @bit: the bit to be set.
602 **/ 710 *
711 * Note that at this point we have no way of knowing what other
712 * buffers might request, hence this code only verifies that the
713 * individual buffers request is plausible.
714 */
603int iio_scan_mask_set(struct iio_dev *indio_dev, 715int iio_scan_mask_set(struct iio_dev *indio_dev,
604 struct iio_buffer *buffer, int bit) 716 struct iio_buffer *buffer, int bit)
605{ 717{
@@ -682,13 +794,12 @@ static unsigned char *iio_demux(struct iio_buffer *buffer,
682 return buffer->demux_bounce; 794 return buffer->demux_bounce;
683} 795}
684 796
685int iio_push_to_buffer(struct iio_buffer *buffer, unsigned char *data) 797static int iio_push_to_buffer(struct iio_buffer *buffer, unsigned char *data)
686{ 798{
687 unsigned char *dataout = iio_demux(buffer, data); 799 unsigned char *dataout = iio_demux(buffer, data);
688 800
689 return buffer->access->store_to(buffer, dataout); 801 return buffer->access->store_to(buffer, dataout);
690} 802}
691EXPORT_SYMBOL_GPL(iio_push_to_buffer);
692 803
693static void iio_buffer_demux_free(struct iio_buffer *buffer) 804static void iio_buffer_demux_free(struct iio_buffer *buffer)
694{ 805{
@@ -699,10 +810,26 @@ static void iio_buffer_demux_free(struct iio_buffer *buffer)
699 } 810 }
700} 811}
701 812
702int iio_update_demux(struct iio_dev *indio_dev) 813
814int iio_push_to_buffers(struct iio_dev *indio_dev, unsigned char *data)
815{
816 int ret;
817 struct iio_buffer *buf;
818
819 list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) {
820 ret = iio_push_to_buffer(buf, data);
821 if (ret < 0)
822 return ret;
823 }
824
825 return 0;
826}
827EXPORT_SYMBOL_GPL(iio_push_to_buffers);
828
829static int iio_buffer_update_demux(struct iio_dev *indio_dev,
830 struct iio_buffer *buffer)
703{ 831{
704 const struct iio_chan_spec *ch; 832 const struct iio_chan_spec *ch;
705 struct iio_buffer *buffer = indio_dev->buffer;
706 int ret, in_ind = -1, out_ind, length; 833 int ret, in_ind = -1, out_ind, length;
707 unsigned in_loc = 0, out_loc = 0; 834 unsigned in_loc = 0, out_loc = 0;
708 struct iio_demux_table *p; 835 struct iio_demux_table *p;
@@ -787,4 +914,23 @@ error_clear_mux_table:
787 914
788 return ret; 915 return ret;
789} 916}
917
918int iio_update_demux(struct iio_dev *indio_dev)
919{
920 struct iio_buffer *buffer;
921 int ret;
922
923 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
924 ret = iio_buffer_update_demux(indio_dev, buffer);
925 if (ret < 0)
926 goto error_clear_mux_table;
927 }
928 return 0;
929
930error_clear_mux_table:
931 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
932 iio_buffer_demux_free(buffer);
933
934 return ret;
935}
790EXPORT_SYMBOL_GPL(iio_update_demux); 936EXPORT_SYMBOL_GPL(iio_update_demux);
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index cd700368eed0..060a4045be85 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -856,6 +856,7 @@ struct iio_dev *iio_device_alloc(int sizeof_priv)
856 return NULL; 856 return NULL;
857 } 857 }
858 dev_set_name(&dev->dev, "iio:device%d", dev->id); 858 dev_set_name(&dev->dev, "iio:device%d", dev->id);
859 INIT_LIST_HEAD(&dev->buffer_list);
859 } 860 }
860 861
861 return dev; 862 return dev;
diff --git a/drivers/iio/light/adjd_s311.c b/drivers/iio/light/adjd_s311.c
index 164b62b91a4b..36d210a06b28 100644
--- a/drivers/iio/light/adjd_s311.c
+++ b/drivers/iio/light/adjd_s311.c
@@ -164,7 +164,6 @@ static irqreturn_t adjd_s311_trigger_handler(int irq, void *p)
164 struct iio_poll_func *pf = p; 164 struct iio_poll_func *pf = p;
165 struct iio_dev *indio_dev = pf->indio_dev; 165 struct iio_dev *indio_dev = pf->indio_dev;
166 struct adjd_s311_data *data = iio_priv(indio_dev); 166 struct adjd_s311_data *data = iio_priv(indio_dev);
167 struct iio_buffer *buffer = indio_dev->buffer;
168 s64 time_ns = iio_get_time_ns(); 167 s64 time_ns = iio_get_time_ns();
169 int len = 0; 168 int len = 0;
170 int i, j = 0; 169 int i, j = 0;
@@ -187,7 +186,7 @@ static irqreturn_t adjd_s311_trigger_handler(int irq, void *p)
187 if (indio_dev->scan_timestamp) 186 if (indio_dev->scan_timestamp)
188 *(s64 *)((u8 *)data->buffer + ALIGN(len, sizeof(s64))) 187 *(s64 *)((u8 *)data->buffer + ALIGN(len, sizeof(s64)))
189 = time_ns; 188 = time_ns;
190 iio_push_to_buffer(buffer, (u8 *)data->buffer); 189 iio_push_to_buffers(indio_dev, (u8 *)data->buffer);
191 190
192done: 191done:
193 iio_trigger_notify_done(indio_dev->trig); 192 iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/light/hid-sensor-als.c b/drivers/iio/light/hid-sensor-als.c
index 96e3691e42c4..8e1f69844eea 100644
--- a/drivers/iio/light/hid-sensor-als.c
+++ b/drivers/iio/light/hid-sensor-als.c
@@ -176,21 +176,8 @@ static const struct iio_info als_info = {
176/* Function to push data to buffer */ 176/* Function to push data to buffer */
177static void hid_sensor_push_data(struct iio_dev *indio_dev, u8 *data, int len) 177static void hid_sensor_push_data(struct iio_dev *indio_dev, u8 *data, int len)
178{ 178{
179 struct iio_buffer *buffer = indio_dev->buffer;
180 int datum_sz;
181
182 dev_dbg(&indio_dev->dev, "hid_sensor_push_data\n"); 179 dev_dbg(&indio_dev->dev, "hid_sensor_push_data\n");
183 if (!buffer) { 180 iio_push_to_buffers(indio_dev, (u8 *)data);
184 dev_err(&indio_dev->dev, "Buffer == NULL\n");
185 return;
186 }
187 datum_sz = buffer->access->get_bytes_per_datum(buffer);
188 if (len > datum_sz) {
189 dev_err(&indio_dev->dev, "Datum size mismatch %d:%d\n", len,
190 datum_sz);
191 return;
192 }
193 iio_push_to_buffer(buffer, (u8 *)data);
194} 181}
195 182
196/* Callback handler to send event after all samples are received and captured */ 183/* Callback handler to send event after all samples are received and captured */
diff --git a/drivers/iio/magnetometer/hid-sensor-magn-3d.c b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
index c4f0d274f577..d1b5fb74b9bf 100644
--- a/drivers/iio/magnetometer/hid-sensor-magn-3d.c
+++ b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
@@ -198,21 +198,8 @@ static const struct iio_info magn_3d_info = {
198/* Function to push data to buffer */ 198/* Function to push data to buffer */
199static void hid_sensor_push_data(struct iio_dev *indio_dev, u8 *data, int len) 199static void hid_sensor_push_data(struct iio_dev *indio_dev, u8 *data, int len)
200{ 200{
201 struct iio_buffer *buffer = indio_dev->buffer;
202 int datum_sz;
203
204 dev_dbg(&indio_dev->dev, "hid_sensor_push_data\n"); 201 dev_dbg(&indio_dev->dev, "hid_sensor_push_data\n");
205 if (!buffer) { 202 iio_push_to_buffers(indio_dev, (u8 *)data);
206 dev_err(&indio_dev->dev, "Buffer == NULL\n");
207 return;
208 }
209 datum_sz = buffer->access->get_bytes_per_datum(buffer);
210 if (len > datum_sz) {
211 dev_err(&indio_dev->dev, "Datum size mismatch %d:%d\n", len,
212 datum_sz);
213 return;
214 }
215 iio_push_to_buffer(buffer, (u8 *)data);
216} 203}
217 204
218/* Callback handler to send event after all samples are received and captured */ 205/* Callback handler to send event after all samples are received and captured */