aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging
diff options
context:
space:
mode:
authorJonathan Cameron <jic23@cam.ac.uk>2009-08-18 13:06:28 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2009-09-15 15:02:25 -0400
commit14cd9a73d97e3c1063fa1b2b02ef32ac8a914e11 (patch)
tree03e4dd0eb66d47f79d02bf6810151dd02c3da625 /drivers/staging
parent2235acb21890cdd3bc189720b4e98fc1b5c3b268 (diff)
Staging: IIO: lis3l02dq ring buffer and data ready trigger support
Example of relatively common case of device sampling based on internal clock and providing a data ready signal to indicate that new data is available to be read out. Generic trigger approach used to allow other devices to be sampled 'at the same time' as this the accelerometer. This is very useful in various motion estimation algorithms. Signed-off-by: Jonathan Cameron <jic23@cam.ac.uk> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/staging')
-rw-r--r--drivers/staging/iio/accel/Makefile1
-rw-r--r--drivers/staging/iio/accel/lis3l02dq.h22
-rw-r--r--drivers/staging/iio/accel/lis3l02dq_ring.c599
3 files changed, 621 insertions, 1 deletions
diff --git a/drivers/staging/iio/accel/Makefile b/drivers/staging/iio/accel/Makefile
index d5432086f2f..d5335f9094a 100644
--- a/drivers/staging/iio/accel/Makefile
+++ b/drivers/staging/iio/accel/Makefile
@@ -4,6 +4,7 @@
4obj-$(CONFIG_KXSD9) += kxsd9.o 4obj-$(CONFIG_KXSD9) += kxsd9.o
5 5
6lis3l02dq-y := lis3l02dq_core.o 6lis3l02dq-y := lis3l02dq_core.o
7lis3l02dq-$(CONFIG_IIO_RING_BUFFER) += lis3l02dq_ring.o
7obj-$(CONFIG_LIS3L02DQ) += lis3l02dq.o 8obj-$(CONFIG_LIS3L02DQ) += lis3l02dq.o
8 9
9sca3000-y := sca3000_core.o sca3000_ring.o 10sca3000-y := sca3000_core.o sca3000_ring.o
diff --git a/drivers/staging/iio/accel/lis3l02dq.h b/drivers/staging/iio/accel/lis3l02dq.h
index 18cb49ca7ab..91a5375408c 100644
--- a/drivers/staging/iio/accel/lis3l02dq.h
+++ b/drivers/staging/iio/accel/lis3l02dq.h
@@ -183,6 +183,26 @@ int lis3l02dq_spi_write_reg_8(struct device *dev,
183#define LIS3L02DQ_SCAN_ACC_Y 1 183#define LIS3L02DQ_SCAN_ACC_Y 1
184#define LIS3L02DQ_SCAN_ACC_Z 2 184#define LIS3L02DQ_SCAN_ACC_Z 2
185 185
186
187#ifdef CONFIG_IIO_RING_BUFFER
188/* At the moment triggers are only used for ring buffer
189 * filling. This may change!
190 */
191void lis3l02dq_remove_trigger(struct iio_dev *indio_dev);
192int lis3l02dq_probe_trigger(struct iio_dev *indio_dev);
193
194ssize_t lis3l02dq_read_accel_from_ring(struct device *dev,
195 struct device_attribute *attr,
196 char *buf);
197
198
199int lis3l02dq_configure_ring(struct iio_dev *indio_dev);
200void lis3l02dq_unconfigure_ring(struct iio_dev *indio_dev);
201
202int lis3l02dq_initialize_ring(struct iio_ring_buffer *ring);
203void lis3l02dq_uninitialize_ring(struct iio_ring_buffer *ring);
204#else /* CONFIG_IIO_RING_BUFFER */
205
186static inline void lis3l02dq_remove_trigger(struct iio_dev *indio_dev) {}; 206static inline void lis3l02dq_remove_trigger(struct iio_dev *indio_dev) {};
187static inline int lis3l02dq_probe_trigger(struct iio_dev *indio_dev) 207static inline int lis3l02dq_probe_trigger(struct iio_dev *indio_dev)
188{ 208{
@@ -208,5 +228,5 @@ static inline int lis3l02dq_initialize_ring(struct iio_ring_buffer *ring)
208 return 0; 228 return 0;
209}; 229};
210static inline void lis3l02dq_uninitialize_ring(struct iio_ring_buffer *ring) {}; 230static inline void lis3l02dq_uninitialize_ring(struct iio_ring_buffer *ring) {};
211 231#endif /* CONFIG_IIO_RING_BUFFER */
212#endif /* SPI_LIS3L02DQ_H_ */ 232#endif /* SPI_LIS3L02DQ_H_ */
diff --git a/drivers/staging/iio/accel/lis3l02dq_ring.c b/drivers/staging/iio/accel/lis3l02dq_ring.c
new file mode 100644
index 00000000000..ba7452d2b65
--- /dev/null
+++ b/drivers/staging/iio/accel/lis3l02dq_ring.c
@@ -0,0 +1,599 @@
1#include <linux/interrupt.h>
2#include <linux/irq.h>
3#include <linux/gpio.h>
4#include <linux/workqueue.h>
5#include <linux/mutex.h>
6#include <linux/device.h>
7#include <linux/kernel.h>
8#include <linux/spi/spi.h>
9#include <linux/sysfs.h>
10#include <linux/list.h>
11
12#include "../iio.h"
13#include "../sysfs.h"
14#include "../ring_sw.h"
15#include "accel.h"
16#include "../trigger.h"
17#include "lis3l02dq.h"
18
19/**
20 * combine_8_to_16() utility function to munge to u8s into u16
21 **/
22static inline u16 combine_8_to_16(u8 lower, u8 upper)
23{
24 u16 _lower = lower;
25 u16 _upper = upper;
26 return _lower | (_upper << 8);
27}
28
29/**
30 * lis3l02dq_scan_el_set_state() set whether a scan contains a given channel
31 * @scan_el: associtate iio scan element attribute
32 * @indio_dev: the device structure
33 * @bool: desired state
34 *
35 * mlock already held when this is called.
36 **/
37static int lis3l02dq_scan_el_set_state(struct iio_scan_el *scan_el,
38 struct iio_dev *indio_dev,
39 bool state)
40{
41 u8 t, mask;
42 int ret;
43
44 ret = lis3l02dq_spi_read_reg_8(&indio_dev->dev,
45 LIS3L02DQ_REG_CTRL_1_ADDR,
46 &t);
47 if (ret)
48 goto error_ret;
49 switch (scan_el->label) {
50 case LIS3L02DQ_REG_OUT_X_L_ADDR:
51 mask = LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE;
52 break;
53 case LIS3L02DQ_REG_OUT_Y_L_ADDR:
54 mask = LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE;
55 break;
56 case LIS3L02DQ_REG_OUT_Z_L_ADDR:
57 mask = LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE;
58 break;
59 default:
60 ret = -EINVAL;
61 goto error_ret;
62 }
63
64 if (!(mask & t) == state) {
65 if (state)
66 t |= mask;
67 else
68 t &= ~mask;
69 ret = lis3l02dq_spi_write_reg_8(&indio_dev->dev,
70 LIS3L02DQ_REG_CTRL_1_ADDR,
71 &t);
72 }
73error_ret:
74 return ret;
75
76}
77static IIO_SCAN_EL_C(accel_x, LIS3L02DQ_SCAN_ACC_X, IIO_SIGNED(16),
78 LIS3L02DQ_REG_OUT_X_L_ADDR,
79 &lis3l02dq_scan_el_set_state);
80static IIO_SCAN_EL_C(accel_y, LIS3L02DQ_SCAN_ACC_Y, IIO_SIGNED(16),
81 LIS3L02DQ_REG_OUT_Y_L_ADDR,
82 &lis3l02dq_scan_el_set_state);
83static IIO_SCAN_EL_C(accel_z, LIS3L02DQ_SCAN_ACC_Z, IIO_SIGNED(16),
84 LIS3L02DQ_REG_OUT_Z_L_ADDR,
85 &lis3l02dq_scan_el_set_state);
86static IIO_SCAN_EL_TIMESTAMP;
87
88static struct attribute *lis3l02dq_scan_el_attrs[] = {
89 &iio_scan_el_accel_x.dev_attr.attr,
90 &iio_scan_el_accel_y.dev_attr.attr,
91 &iio_scan_el_accel_z.dev_attr.attr,
92 &iio_scan_el_timestamp.dev_attr.attr,
93 NULL,
94};
95
96static struct attribute_group lis3l02dq_scan_el_group = {
97 .attrs = lis3l02dq_scan_el_attrs,
98 .name = "scan_elements",
99};
100
101/**
102 * lis3l02dq_poll_func_th() top half interrupt handler called by trigger
103 * @private_data: iio_dev
104 **/
105static void lis3l02dq_poll_func_th(struct iio_dev *indio_dev)
106{
107 struct lis3l02dq_state *st = iio_dev_get_devdata(indio_dev);
108 st->last_timestamp = indio_dev->trig->timestamp;
109 schedule_work(&st->work_trigger_to_ring);
110 /* Indicate that this interrupt is being handled */
111
112 /* Technically this is trigger related, but without this
113 * handler running there is currently now way for the interrupt
114 * to clear.
115 */
116 st->inter = 1;
117}
118
119/**
120 * lis3l02dq_data_rdy_trig_poll() the event handler for the data rdy trig
121 **/
122static int lis3l02dq_data_rdy_trig_poll(struct iio_dev *dev_info,
123 int index,
124 s64 timestamp,
125 int no_test)
126{
127 struct lis3l02dq_state *st = iio_dev_get_devdata(dev_info);
128 struct iio_trigger *trig = st->trig;
129
130 trig->timestamp = timestamp;
131 iio_trigger_poll(trig);
132
133 return IRQ_HANDLED;
134}
135
136/* This is an event as it is a response to a physical interrupt */
137IIO_EVENT_SH(data_rdy_trig, &lis3l02dq_data_rdy_trig_poll);
138
139/**
140 * lis3l02dq_read_accel_from_ring() individual acceleration read from ring
141 **/
142ssize_t lis3l02dq_read_accel_from_ring(struct device *dev,
143 struct device_attribute *attr,
144 char *buf)
145{
146 struct iio_scan_el *el = NULL;
147 int ret, len = 0, i = 0;
148 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
149 struct iio_dev *dev_info = dev_get_drvdata(dev);
150 s16 *data;
151
152 while (dev_info->scan_el_attrs->attrs[i]) {
153 el = to_iio_scan_el((struct device_attribute *)
154 (dev_info->scan_el_attrs->attrs[i]));
155 /* label is in fact the address */
156 if (el->label == this_attr->address)
157 break;
158 i++;
159 }
160 if (!dev_info->scan_el_attrs->attrs[i]) {
161 ret = -EINVAL;
162 goto error_ret;
163 }
164 /* If this element is in the scan mask */
165 ret = iio_scan_mask_query(dev_info, el->number);
166 if (ret < 0)
167 goto error_ret;
168 if (ret) {
169 data = kmalloc(dev_info->ring->access.get_bpd(dev_info->ring),
170 GFP_KERNEL);
171 if (data == NULL)
172 return -ENOMEM;
173 ret = dev_info->ring->access.read_last(dev_info->ring,
174 (u8 *)data);
175 if (ret)
176 goto error_free_data;
177 } else {
178 ret = -EINVAL;
179 goto error_ret;
180 }
181 len = iio_scan_mask_count_to_right(dev_info, el->number);
182 if (len < 0) {
183 ret = len;
184 goto error_free_data;
185 }
186 len = sprintf(buf, "ring %d\n", data[len]);
187error_free_data:
188 kfree(data);
189error_ret:
190 return ret ? ret : len;
191
192}
193
194static const u8 read_all_tx_array[] =
195{
196 LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_X_L_ADDR), 0,
197 LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_X_H_ADDR), 0,
198 LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Y_L_ADDR), 0,
199 LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Y_H_ADDR), 0,
200 LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Z_L_ADDR), 0,
201 LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Z_H_ADDR), 0,
202};
203
204/**
205 * lis3l02dq_read_all() Reads all channels currently selected
206 * @st: device specific state
207 * @rx_array: (dma capable) recieve array, must be at least
208 * 4*number of channels
209 **/
210int lis3l02dq_read_all(struct lis3l02dq_state *st, u8 *rx_array)
211{
212 struct spi_transfer *xfers;
213 struct spi_message msg;
214 int ret, i, j = 0;
215
216 xfers = kzalloc((st->indio_dev->scan_count) * 2
217 * sizeof(*xfers), GFP_KERNEL);
218 if (!xfers)
219 return -ENOMEM;
220
221 mutex_lock(&st->buf_lock);
222
223 for (i = 0; i < ARRAY_SIZE(read_all_tx_array)/4; i++) {
224 if (st->indio_dev->scan_mask & (1 << i)) {
225 /* lower byte */
226 xfers[j].tx_buf = st->tx + 2*j;
227 st->tx[2*j] = read_all_tx_array[i*4];
228 st->tx[2*j + 1] = 0;
229 if (rx_array)
230 xfers[j].rx_buf = rx_array + j*2;
231 xfers[j].bits_per_word = 8;
232 xfers[j].len = 2;
233 xfers[j].cs_change = 1;
234 j++;
235
236 /* upper byte */
237 xfers[j].tx_buf = st->tx + 2*j;
238 st->tx[2*j] = read_all_tx_array[i*4 + 2];
239 st->tx[2*j + 1] = 0;
240 if (rx_array)
241 xfers[j].rx_buf = rx_array + j*2;
242 xfers[j].bits_per_word = 8;
243 xfers[j].len = 2;
244 xfers[j].cs_change = 1;
245 j++;
246 }
247 }
248 /* After these are transmitted, the rx_buff should have
249 * values in alternate bytes
250 */
251 spi_message_init(&msg);
252 for (j = 0; j < st->indio_dev->scan_count * 2; j++)
253 spi_message_add_tail(&xfers[j], &msg);
254
255 ret = spi_sync(st->us, &msg);
256 mutex_unlock(&st->buf_lock);
257 kfree(xfers);
258
259 return ret;
260}
261
262
263/* Whilst this makes a lot of calls to iio_sw_ring functions - it is to device
264 * specific to be rolled into the core.
265 */
266static void lis3l02dq_trigger_bh_to_ring(struct work_struct *work_s)
267{
268 struct lis3l02dq_state *st
269 = container_of(work_s, struct lis3l02dq_state,
270 work_trigger_to_ring);
271
272 u8 *rx_array;
273 int i = 0;
274 u16 *data;
275 size_t datasize = st->indio_dev
276 ->ring->access.get_bpd(st->indio_dev->ring);
277
278 data = kmalloc(datasize , GFP_KERNEL);
279 if (data == NULL) {
280 dev_err(&st->us->dev, "memory alloc failed in ring bh");
281 return;
282 }
283 /* Due to interleaved nature of transmission this buffer must be
284 * twice the number of bytes, or 4 times the number of channels
285 */
286 rx_array = kmalloc(4 * (st->indio_dev->scan_count), GFP_KERNEL);
287 if (rx_array == NULL) {
288 dev_err(&st->us->dev, "memory alloc failed in ring bh");
289 return;
290 }
291
292 /* whilst trigger specific, if this read does nto occur the data
293 ready interrupt will not be cleared. Need to add a mechanism
294 to provide a dummy read function if this is not triggering on
295 the data ready function but something else is.
296 */
297 st->inter = 0;
298
299 if (st->indio_dev->scan_count)
300 if (lis3l02dq_read_all(st, rx_array) >= 0)
301 for (; i < st->indio_dev->scan_count; i++)
302 data[i] = combine_8_to_16(rx_array[i*4+1],
303 rx_array[i*4+3]);
304 /* Guaranteed to be aligned with 8 byte boundary */
305 if (st->indio_dev->scan_timestamp)
306 *((s64 *)(data + ((i + 3)/4)*4)) = st->last_timestamp;
307
308 st->indio_dev->ring->access.store_to(st->indio_dev->ring,
309 (u8 *)data,
310 st->last_timestamp);
311
312 iio_trigger_notify_done(st->indio_dev->trig);
313 kfree(rx_array);
314 kfree(data);
315
316 return;
317}
318/* in these circumstances is it better to go with unaligned packing and
319 * deal with the cost?*/
320static int lis3l02dq_data_rdy_ring_preenable(struct iio_dev *indio_dev)
321{
322 size_t size;
323 /* Check if there are any scan elements enabled, if not fail*/
324 if (!(indio_dev->scan_count || indio_dev->scan_timestamp))
325 return -EINVAL;
326
327 if (indio_dev->ring->access.set_bpd) {
328 if (indio_dev->scan_timestamp)
329 if (indio_dev->scan_count) /* Timestamp and data */
330 size = 2*sizeof(s64);
331 else /* Timestamp only */
332 size = sizeof(s64);
333 else /* Data only */
334 size = indio_dev->scan_count*sizeof(s16);
335 indio_dev->ring->access.set_bpd(indio_dev->ring, size);
336 }
337
338 return 0;
339}
340
341static int lis3l02dq_data_rdy_ring_postenable(struct iio_dev *indio_dev)
342{
343 return indio_dev->trig
344 ? iio_trigger_attach_poll_func(indio_dev->trig,
345 indio_dev->pollfunc)
346 : 0;
347}
348
349static int lis3l02dq_data_rdy_ring_predisable(struct iio_dev *indio_dev)
350{
351 return indio_dev->trig
352 ? iio_trigger_dettach_poll_func(indio_dev->trig,
353 indio_dev->pollfunc)
354 : 0;
355}
356
357
358/* Caller responsible for locking as necessary. */
359static int __lis3l02dq_write_data_ready_config(struct device *dev,
360 struct
361 iio_event_handler_list *list,
362 bool state)
363{
364 int ret;
365 u8 valold;
366 bool currentlyset;
367 struct iio_dev *indio_dev = dev_get_drvdata(dev);
368
369/* Get the current event mask register */
370 ret = lis3l02dq_spi_read_reg_8(dev,
371 LIS3L02DQ_REG_CTRL_2_ADDR,
372 &valold);
373 if (ret)
374 goto error_ret;
375/* Find out if data ready is already on */
376 currentlyset
377 = valold & LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
378
379/* Disable requested */
380 if (!state && currentlyset) {
381
382 valold &= ~LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
383 /* The double write is to overcome a hardware bug?*/
384 ret = lis3l02dq_spi_write_reg_8(dev,
385 LIS3L02DQ_REG_CTRL_2_ADDR,
386 &valold);
387 if (ret)
388 goto error_ret;
389 ret = lis3l02dq_spi_write_reg_8(dev,
390 LIS3L02DQ_REG_CTRL_2_ADDR,
391 &valold);
392 if (ret)
393 goto error_ret;
394
395 iio_remove_event_from_list(list,
396 &indio_dev->interrupts[0]
397 ->ev_list);
398
399/* Enable requested */
400 } else if (state && !currentlyset) {
401 /* if not set, enable requested */
402 valold |= LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
403 iio_add_event_to_list(list, &indio_dev->interrupts[0]->ev_list);
404 ret = lis3l02dq_spi_write_reg_8(dev,
405 LIS3L02DQ_REG_CTRL_2_ADDR,
406 &valold);
407 if (ret)
408 goto error_ret;
409 }
410
411 return 0;
412error_ret:
413 return ret;
414}
415
416/**
417 * lis3l02dq_data_rdy_trigger_set_state() set datardy interrupt state
418 *
419 * If disabling the interrupt also does a final read to ensure it is clear.
420 * This is only important in some cases where the scan enable elements are
421 * switched before the ring is reenabled.
422 **/
423static int lis3l02dq_data_rdy_trigger_set_state(struct iio_trigger *trig,
424 bool state)
425{
426 struct lis3l02dq_state *st = trig->private_data;
427 int ret = 0;
428 u8 t;
429 __lis3l02dq_write_data_ready_config(&st->indio_dev->dev,
430 &iio_event_data_rdy_trig,
431 state);
432 if (state == false) {
433 /* possible quirk with handler currently worked around
434 by ensuring the work queue is empty */
435 flush_scheduled_work();
436 /* Clear any outstanding ready events */
437 ret = lis3l02dq_read_all(st, NULL);
438 }
439 lis3l02dq_spi_read_reg_8(&st->indio_dev->dev,
440 LIS3L02DQ_REG_WAKE_UP_SRC_ADDR,
441 &t);
442 return ret;
443}
444DEVICE_ATTR(name, S_IRUGO, iio_trigger_read_name, NULL);
445
446static struct attribute *lis3l02dq_trigger_attrs[] = {
447 &dev_attr_name.attr,
448 NULL,
449};
450
451static const struct attribute_group lis3l02dq_trigger_attr_group = {
452 .attrs = lis3l02dq_trigger_attrs,
453};
454
455/**
456 * lis3l02dq_trig_try_reen() try renabling irq for data rdy trigger
457 * @trig: the datardy trigger
458 *
459 * As the trigger may occur on any data element being updated it is
460 * really rather likely to occur during the read from the previous
461 * trigger event. The only way to discover if this has occured on
462 * boards not supporting level interrupts is to take a look at the line.
463 * If it is indicating another interrupt and we don't seem to have a
464 * handler looking at it, then we need to notify the core that we need
465 * to tell the triggering core to try reading all these again.
466 **/
467static int lis3l02dq_trig_try_reen(struct iio_trigger *trig)
468{
469 struct lis3l02dq_state *st = trig->private_data;
470 enable_irq(st->us->irq);
471 /* If gpio still high (or high again) */
472 if (gpio_get_value(irq_to_gpio(st->us->irq)))
473 if (st->inter == 0) {
474 /* already interrupt handler dealing with it */
475 disable_irq_nosync(st->us->irq);
476 if (st->inter == 1) {
477 /* interrupt handler snuck in between test
478 * and disable */
479 enable_irq(st->us->irq);
480 return 0;
481 }
482 return -EAGAIN;
483 }
484 /* irq reenabled so success! */
485 return 0;
486}
487
488int lis3l02dq_probe_trigger(struct iio_dev *indio_dev)
489{
490 int ret;
491 struct lis3l02dq_state *state = indio_dev->dev_data;
492
493 state->trig = iio_allocate_trigger();
494 state->trig->name = kmalloc(IIO_TRIGGER_NAME_LENGTH, GFP_KERNEL);
495 if (!state->trig->name) {
496 ret = -ENOMEM;
497 goto error_free_trig;
498 }
499 snprintf((char *)state->trig->name,
500 IIO_TRIGGER_NAME_LENGTH,
501 "lis3l02dq-dev%d", indio_dev->id);
502 state->trig->dev.parent = &state->us->dev;
503 state->trig->owner = THIS_MODULE;
504 state->trig->private_data = state;
505 state->trig->set_trigger_state = &lis3l02dq_data_rdy_trigger_set_state;
506 state->trig->try_reenable = &lis3l02dq_trig_try_reen;
507 state->trig->control_attrs = &lis3l02dq_trigger_attr_group;
508 ret = iio_trigger_register(state->trig);
509 if (ret)
510 goto error_free_trig_name;
511
512 return 0;
513
514error_free_trig_name:
515 kfree(state->trig->name);
516error_free_trig:
517 iio_free_trigger(state->trig);
518
519 return ret;
520}
521
522void lis3l02dq_remove_trigger(struct iio_dev *indio_dev)
523{
524 struct lis3l02dq_state *state = indio_dev->dev_data;
525
526 iio_trigger_unregister(state->trig);
527 kfree(state->trig->name);
528 iio_free_trigger(state->trig);
529}
530
531void lis3l02dq_unconfigure_ring(struct iio_dev *indio_dev)
532{
533 kfree(indio_dev->pollfunc);
534 iio_sw_rb_free(indio_dev->ring);
535}
536
537int lis3l02dq_configure_ring(struct iio_dev *indio_dev)
538{
539 int ret = 0;
540 struct lis3l02dq_state *st = indio_dev->dev_data;
541 struct iio_ring_buffer *ring;
542 INIT_WORK(&st->work_trigger_to_ring, lis3l02dq_trigger_bh_to_ring);
543 /* Set default scan mode */
544
545 iio_scan_mask_set(indio_dev, iio_scan_el_accel_x.number);
546 iio_scan_mask_set(indio_dev, iio_scan_el_accel_y.number);
547 iio_scan_mask_set(indio_dev, iio_scan_el_accel_z.number);
548 indio_dev->scan_timestamp = true;
549
550 indio_dev->scan_el_attrs = &lis3l02dq_scan_el_group;
551
552 ring = iio_sw_rb_allocate(indio_dev);
553 if (!ring) {
554 ret = -ENOMEM;
555 return ret;
556 }
557 indio_dev->ring = ring;
558 /* Effectively select the ring buffer implementation */
559 iio_ring_sw_register_funcs(&ring->access);
560 ring->preenable = &lis3l02dq_data_rdy_ring_preenable;
561 ring->postenable = &lis3l02dq_data_rdy_ring_postenable;
562 ring->predisable = &lis3l02dq_data_rdy_ring_predisable;
563 ring->owner = THIS_MODULE;
564
565 indio_dev->pollfunc = kzalloc(sizeof(*indio_dev->pollfunc), GFP_KERNEL);
566 if (indio_dev->pollfunc == NULL) {
567 ret = -ENOMEM;
568 goto error_iio_sw_rb_free;;
569 }
570 indio_dev->pollfunc->poll_func_main = &lis3l02dq_poll_func_th;
571 indio_dev->pollfunc->private_data = indio_dev;
572 indio_dev->modes |= INDIO_RING_TRIGGERED;
573 return 0;
574
575error_iio_sw_rb_free:
576 iio_sw_rb_free(indio_dev->ring);
577 return ret;
578}
579
580int lis3l02dq_initialize_ring(struct iio_ring_buffer *ring)
581{
582 return iio_ring_buffer_register(ring);
583}
584
585void lis3l02dq_uninitialize_ring(struct iio_ring_buffer *ring)
586{
587 iio_ring_buffer_unregister(ring);
588}
589
590
591int lis3l02dq_set_ring_length(struct iio_dev *indio_dev, int length)
592{
593 /* Set sensible defaults for the ring buffer */
594 if (indio_dev->ring->access.set_length)
595 return indio_dev->ring->access.set_length(indio_dev->ring, 500);
596 return 0;
597}
598
599