diff options
author | Jonathan Cameron <jic23@cam.ac.uk> | 2011-05-18 09:42:24 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2011-05-19 19:15:03 -0400 |
commit | 5565a450248d827afa949aab157873d4b9be329e (patch) | |
tree | ef63d8173f83e685b00cdf41435575cfd34d84f2 /drivers | |
parent | 38d15f06f942306050a063abd111467d39c5cc37 (diff) |
staging:iio: rationalization of different buffer implementation hooks.
1) move a generic helper function out of ring_sw. It applies to other buffers as well.
2) Get rid of a lot of left over function definitions.
3) Move all the access functions into static structures.
4) Introduce and use a static structure for the setup functions, preenable etc.
Some driver conversions thanks to Michael Hennerich (pulled out of patches
that would otherwise sit after this).
Signed-off-by: Jonathan Cameron <jic23@cam.ac.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers')
23 files changed, 363 insertions, 463 deletions
diff --git a/drivers/staging/iio/accel/adis16201_ring.c b/drivers/staging/iio/accel/adis16201_ring.c index 5405a38b2674..83b53365d8ed 100644 --- a/drivers/staging/iio/accel/adis16201_ring.c +++ b/drivers/staging/iio/accel/adis16201_ring.c | |||
@@ -68,7 +68,7 @@ static irqreturn_t adis16201_trigger_handler(int irq, void *p) | |||
68 | 68 | ||
69 | int i = 0; | 69 | int i = 0; |
70 | s16 *data; | 70 | s16 *data; |
71 | size_t datasize = ring->access.get_bytes_per_datum(ring); | 71 | size_t datasize = ring->access->get_bytes_per_datum(ring); |
72 | 72 | ||
73 | data = kmalloc(datasize, GFP_KERNEL); | 73 | data = kmalloc(datasize, GFP_KERNEL); |
74 | if (data == NULL) { | 74 | if (data == NULL) { |
@@ -86,7 +86,7 @@ static irqreturn_t adis16201_trigger_handler(int irq, void *p) | |||
86 | if (ring->scan_timestamp) | 86 | if (ring->scan_timestamp) |
87 | *((s64 *)(data + ((i + 3)/4)*4)) = pf->timestamp; | 87 | *((s64 *)(data + ((i + 3)/4)*4)) = pf->timestamp; |
88 | 88 | ||
89 | ring->access.store_to(ring, (u8 *)data, pf->timestamp); | 89 | ring->access->store_to(ring, (u8 *)data, pf->timestamp); |
90 | 90 | ||
91 | iio_trigger_notify_done(st->indio_dev->trig); | 91 | iio_trigger_notify_done(st->indio_dev->trig); |
92 | kfree(data); | 92 | kfree(data); |
@@ -101,6 +101,12 @@ void adis16201_unconfigure_ring(struct iio_dev *indio_dev) | |||
101 | iio_sw_rb_free(indio_dev->ring); | 101 | iio_sw_rb_free(indio_dev->ring); |
102 | } | 102 | } |
103 | 103 | ||
104 | static const struct iio_ring_setup_ops adis16201_ring_setup_ops = { | ||
105 | .preenable = &iio_sw_ring_preenable, | ||
106 | .postenable = &iio_triggered_ring_postenable, | ||
107 | .predisable = &iio_triggered_ring_predisable, | ||
108 | }; | ||
109 | |||
104 | int adis16201_configure_ring(struct iio_dev *indio_dev) | 110 | int adis16201_configure_ring(struct iio_dev *indio_dev) |
105 | { | 111 | { |
106 | int ret = 0; | 112 | int ret = 0; |
@@ -113,12 +119,10 @@ int adis16201_configure_ring(struct iio_dev *indio_dev) | |||
113 | } | 119 | } |
114 | indio_dev->ring = ring; | 120 | indio_dev->ring = ring; |
115 | /* Effectively select the ring buffer implementation */ | 121 | /* Effectively select the ring buffer implementation */ |
116 | iio_ring_sw_register_funcs(&ring->access); | ||
117 | ring->bpe = 2; | 122 | ring->bpe = 2; |
118 | ring->scan_timestamp = true; | 123 | ring->scan_timestamp = true; |
119 | ring->preenable = &iio_sw_ring_preenable; | 124 | ring->access = &ring_sw_access_funcs; |
120 | ring->postenable = &iio_triggered_ring_postenable; | 125 | ring->setup_ops = &adis16201_ring_setup_ops; |
121 | ring->predisable = &iio_triggered_ring_predisable; | ||
122 | ring->owner = THIS_MODULE; | 126 | ring->owner = THIS_MODULE; |
123 | 127 | ||
124 | /* Set default scan mode */ | 128 | /* Set default scan mode */ |
diff --git a/drivers/staging/iio/accel/adis16203_ring.c b/drivers/staging/iio/accel/adis16203_ring.c index a21a71d583bd..1b8863d2ff09 100644 --- a/drivers/staging/iio/accel/adis16203_ring.c +++ b/drivers/staging/iio/accel/adis16203_ring.c | |||
@@ -73,7 +73,7 @@ static irqreturn_t adis16203_trigger_handler(int irq, void *p) | |||
73 | 73 | ||
74 | int i = 0; | 74 | int i = 0; |
75 | s16 *data; | 75 | s16 *data; |
76 | size_t datasize = ring->access.get_bytes_per_datum(ring); | 76 | size_t datasize = ring->access->get_bytes_per_datum(ring); |
77 | 77 | ||
78 | data = kmalloc(datasize, GFP_KERNEL); | 78 | data = kmalloc(datasize, GFP_KERNEL); |
79 | if (data == NULL) { | 79 | if (data == NULL) { |
@@ -91,7 +91,7 @@ static irqreturn_t adis16203_trigger_handler(int irq, void *p) | |||
91 | if (ring->scan_timestamp) | 91 | if (ring->scan_timestamp) |
92 | *((s64 *)(data + ((i + 3)/4)*4)) = pf->timestamp; | 92 | *((s64 *)(data + ((i + 3)/4)*4)) = pf->timestamp; |
93 | 93 | ||
94 | ring->access.store_to(ring, | 94 | ring->access->store_to(ring, |
95 | (u8 *)data, | 95 | (u8 *)data, |
96 | pf->timestamp); | 96 | pf->timestamp); |
97 | 97 | ||
@@ -108,6 +108,12 @@ void adis16203_unconfigure_ring(struct iio_dev *indio_dev) | |||
108 | iio_sw_rb_free(indio_dev->ring); | 108 | iio_sw_rb_free(indio_dev->ring); |
109 | } | 109 | } |
110 | 110 | ||
111 | static const struct iio_ring_setup_ops adis16203_ring_setup_ops = { | ||
112 | .preenable = &iio_sw_ring_preenable, | ||
113 | .postenable = &iio_triggered_ring_postenable, | ||
114 | .predisable = &iio_triggered_ring_predisable, | ||
115 | }; | ||
116 | |||
111 | int adis16203_configure_ring(struct iio_dev *indio_dev) | 117 | int adis16203_configure_ring(struct iio_dev *indio_dev) |
112 | { | 118 | { |
113 | int ret = 0; | 119 | int ret = 0; |
@@ -120,12 +126,10 @@ int adis16203_configure_ring(struct iio_dev *indio_dev) | |||
120 | } | 126 | } |
121 | indio_dev->ring = ring; | 127 | indio_dev->ring = ring; |
122 | /* Effectively select the ring buffer implementation */ | 128 | /* Effectively select the ring buffer implementation */ |
123 | iio_ring_sw_register_funcs(&ring->access); | ||
124 | ring->bpe = 2; | 129 | ring->bpe = 2; |
125 | ring->scan_timestamp = true; | 130 | ring->scan_timestamp = true; |
126 | ring->preenable = &iio_sw_ring_preenable; | 131 | ring->access = &ring_sw_access_funcs; |
127 | ring->postenable = &iio_triggered_ring_postenable; | 132 | ring->setup_ops = &adis16203_ring_setup_ops; |
128 | ring->predisable = &iio_triggered_ring_predisable; | ||
129 | ring->owner = THIS_MODULE; | 133 | ring->owner = THIS_MODULE; |
130 | 134 | ||
131 | /* Set default scan mode */ | 135 | /* Set default scan mode */ |
diff --git a/drivers/staging/iio/accel/adis16204_ring.c b/drivers/staging/iio/accel/adis16204_ring.c index 50cd073ca9a9..7d99b4884f94 100644 --- a/drivers/staging/iio/accel/adis16204_ring.c +++ b/drivers/staging/iio/accel/adis16204_ring.c | |||
@@ -70,7 +70,7 @@ static irqreturn_t adis16204_trigger_handler(int irq, void *p) | |||
70 | struct iio_ring_buffer *ring = indio_dev->ring; | 70 | struct iio_ring_buffer *ring = indio_dev->ring; |
71 | int i = 0; | 71 | int i = 0; |
72 | s16 *data; | 72 | s16 *data; |
73 | size_t datasize = ring->access.get_bytes_per_datum(ring); | 73 | size_t datasize = ring->access->get_bytes_per_datum(ring); |
74 | 74 | ||
75 | data = kmalloc(datasize, GFP_KERNEL); | 75 | data = kmalloc(datasize, GFP_KERNEL); |
76 | if (data == NULL) { | 76 | if (data == NULL) { |
@@ -88,7 +88,7 @@ static irqreturn_t adis16204_trigger_handler(int irq, void *p) | |||
88 | if (ring->scan_timestamp) | 88 | if (ring->scan_timestamp) |
89 | *((s64 *)(data + ((i + 3)/4)*4)) = pf->timestamp; | 89 | *((s64 *)(data + ((i + 3)/4)*4)) = pf->timestamp; |
90 | 90 | ||
91 | ring->access.store_to(ring, (u8 *)data, pf->timestamp); | 91 | ring->access->store_to(ring, (u8 *)data, pf->timestamp); |
92 | 92 | ||
93 | iio_trigger_notify_done(st->indio_dev->trig); | 93 | iio_trigger_notify_done(st->indio_dev->trig); |
94 | kfree(data); | 94 | kfree(data); |
@@ -103,6 +103,12 @@ void adis16204_unconfigure_ring(struct iio_dev *indio_dev) | |||
103 | iio_sw_rb_free(indio_dev->ring); | 103 | iio_sw_rb_free(indio_dev->ring); |
104 | } | 104 | } |
105 | 105 | ||
106 | static const struct iio_ring_setup_ops adis16204_ring_setup_ops = { | ||
107 | .preenable = &iio_sw_ring_preenable, | ||
108 | .postenable = &iio_triggered_ring_postenable, | ||
109 | .predisable = &iio_triggered_ring_predisable, | ||
110 | }; | ||
111 | |||
106 | int adis16204_configure_ring(struct iio_dev *indio_dev) | 112 | int adis16204_configure_ring(struct iio_dev *indio_dev) |
107 | { | 113 | { |
108 | int ret = 0; | 114 | int ret = 0; |
@@ -115,12 +121,10 @@ int adis16204_configure_ring(struct iio_dev *indio_dev) | |||
115 | } | 121 | } |
116 | indio_dev->ring = ring; | 122 | indio_dev->ring = ring; |
117 | /* Effectively select the ring buffer implementation */ | 123 | /* Effectively select the ring buffer implementation */ |
118 | iio_ring_sw_register_funcs(&ring->access); | 124 | ring->access = &ring_sw_access_funcs; |
119 | ring->bpe = 2; | 125 | ring->bpe = 2; |
120 | ring->scan_timestamp = true; | 126 | ring->scan_timestamp = true; |
121 | ring->preenable = &iio_sw_ring_preenable; | 127 | ring->setup_ops = &adis16204_ring_setup_ops; |
122 | ring->postenable = &iio_triggered_ring_postenable; | ||
123 | ring->predisable = &iio_triggered_ring_predisable; | ||
124 | ring->owner = THIS_MODULE; | 128 | ring->owner = THIS_MODULE; |
125 | 129 | ||
126 | /* Set default scan mode */ | 130 | /* Set default scan mode */ |
diff --git a/drivers/staging/iio/accel/adis16209_ring.c b/drivers/staging/iio/accel/adis16209_ring.c index 2c7be3bcc24d..26028e9a9056 100644 --- a/drivers/staging/iio/accel/adis16209_ring.c +++ b/drivers/staging/iio/accel/adis16209_ring.c | |||
@@ -71,7 +71,7 @@ static irqreturn_t adis16209_trigger_handler(int irq, void *p) | |||
71 | 71 | ||
72 | int i = 0; | 72 | int i = 0; |
73 | s16 *data; | 73 | s16 *data; |
74 | size_t datasize = ring->access.get_bytes_per_datum(ring); | 74 | size_t datasize = ring->access->get_bytes_per_datum(ring); |
75 | 75 | ||
76 | data = kmalloc(datasize , GFP_KERNEL); | 76 | data = kmalloc(datasize , GFP_KERNEL); |
77 | if (data == NULL) { | 77 | if (data == NULL) { |
@@ -88,7 +88,7 @@ static irqreturn_t adis16209_trigger_handler(int irq, void *p) | |||
88 | if (ring->scan_timestamp) | 88 | if (ring->scan_timestamp) |
89 | *((s64 *)(data + ((i + 3)/4)*4)) = pf->timestamp; | 89 | *((s64 *)(data + ((i + 3)/4)*4)) = pf->timestamp; |
90 | 90 | ||
91 | ring->access.store_to(ring, (u8 *)data, pf->timestamp); | 91 | ring->access->store_to(ring, (u8 *)data, pf->timestamp); |
92 | 92 | ||
93 | iio_trigger_notify_done(st->indio_dev->trig); | 93 | iio_trigger_notify_done(st->indio_dev->trig); |
94 | kfree(data); | 94 | kfree(data); |
@@ -103,6 +103,12 @@ void adis16209_unconfigure_ring(struct iio_dev *indio_dev) | |||
103 | iio_sw_rb_free(indio_dev->ring); | 103 | iio_sw_rb_free(indio_dev->ring); |
104 | } | 104 | } |
105 | 105 | ||
106 | static const struct iio_ring_setup_ops adis16209_ring_setup_ops = { | ||
107 | .preenable = &iio_sw_ring_preenable, | ||
108 | .postenable = &iio_triggered_ring_postenable, | ||
109 | .predisable = &iio_triggered_ring_predisable, | ||
110 | }; | ||
111 | |||
106 | int adis16209_configure_ring(struct iio_dev *indio_dev) | 112 | int adis16209_configure_ring(struct iio_dev *indio_dev) |
107 | { | 113 | { |
108 | int ret = 0; | 114 | int ret = 0; |
@@ -115,12 +121,10 @@ int adis16209_configure_ring(struct iio_dev *indio_dev) | |||
115 | } | 121 | } |
116 | indio_dev->ring = ring; | 122 | indio_dev->ring = ring; |
117 | /* Effectively select the ring buffer implementation */ | 123 | /* Effectively select the ring buffer implementation */ |
118 | iio_ring_sw_register_funcs(&ring->access); | 124 | ring->access = &ring_sw_access_funcs; |
119 | ring->bpe = 2; | 125 | ring->bpe = 2; |
120 | ring->scan_timestamp = true; | 126 | ring->scan_timestamp = true; |
121 | ring->preenable = &iio_sw_ring_preenable; | 127 | ring->setup_ops = &adis16209_ring_setup_ops; |
122 | ring->postenable = &iio_triggered_ring_postenable; | ||
123 | ring->predisable = &iio_triggered_ring_predisable; | ||
124 | ring->owner = THIS_MODULE; | 128 | ring->owner = THIS_MODULE; |
125 | 129 | ||
126 | /* Set default scan mode */ | 130 | /* Set default scan mode */ |
diff --git a/drivers/staging/iio/accel/adis16240_ring.c b/drivers/staging/iio/accel/adis16240_ring.c index bddd6990b3db..8450f7f0e538 100644 --- a/drivers/staging/iio/accel/adis16240_ring.c +++ b/drivers/staging/iio/accel/adis16240_ring.c | |||
@@ -68,7 +68,7 @@ static irqreturn_t adis16240_trigger_handler(int irq, void *p) | |||
68 | 68 | ||
69 | int i = 0; | 69 | int i = 0; |
70 | s16 *data; | 70 | s16 *data; |
71 | size_t datasize = ring->access.get_bytes_per_datum(ring); | 71 | size_t datasize = ring->access->get_bytes_per_datum(ring); |
72 | 72 | ||
73 | data = kmalloc(datasize, GFP_KERNEL); | 73 | data = kmalloc(datasize, GFP_KERNEL); |
74 | if (data == NULL) { | 74 | if (data == NULL) { |
@@ -85,7 +85,7 @@ static irqreturn_t adis16240_trigger_handler(int irq, void *p) | |||
85 | if (ring->scan_timestamp) | 85 | if (ring->scan_timestamp) |
86 | *((s64 *)(data + ((i + 3)/4)*4)) = pf->timestamp; | 86 | *((s64 *)(data + ((i + 3)/4)*4)) = pf->timestamp; |
87 | 87 | ||
88 | ring->access.store_to(ring, (u8 *)data, pf->timestamp); | 88 | ring->access->store_to(ring, (u8 *)data, pf->timestamp); |
89 | 89 | ||
90 | iio_trigger_notify_done(st->indio_dev->trig); | 90 | iio_trigger_notify_done(st->indio_dev->trig); |
91 | kfree(data); | 91 | kfree(data); |
@@ -100,6 +100,12 @@ void adis16240_unconfigure_ring(struct iio_dev *indio_dev) | |||
100 | iio_sw_rb_free(indio_dev->ring); | 100 | iio_sw_rb_free(indio_dev->ring); |
101 | } | 101 | } |
102 | 102 | ||
103 | static const struct iio_ring_setup_ops adis16240_ring_setup_ops = { | ||
104 | .preenable = &iio_sw_ring_preenable, | ||
105 | .postenable = &iio_triggered_ring_postenable, | ||
106 | .predisable = &iio_triggered_ring_predisable, | ||
107 | }; | ||
108 | |||
103 | int adis16240_configure_ring(struct iio_dev *indio_dev) | 109 | int adis16240_configure_ring(struct iio_dev *indio_dev) |
104 | { | 110 | { |
105 | int ret = 0; | 111 | int ret = 0; |
@@ -112,12 +118,10 @@ int adis16240_configure_ring(struct iio_dev *indio_dev) | |||
112 | } | 118 | } |
113 | indio_dev->ring = ring; | 119 | indio_dev->ring = ring; |
114 | /* Effectively select the ring buffer implementation */ | 120 | /* Effectively select the ring buffer implementation */ |
115 | iio_ring_sw_register_funcs(&ring->access); | 121 | ring->access = &ring_sw_access_funcs; |
116 | ring->bpe = 2; | 122 | ring->bpe = 2; |
117 | ring->scan_timestamp = true; | 123 | ring->scan_timestamp = true; |
118 | ring->preenable = &iio_sw_ring_preenable; | 124 | ring->setup_ops = &adis16240_ring_setup_ops; |
119 | ring->postenable = &iio_triggered_ring_postenable; | ||
120 | ring->predisable = &iio_triggered_ring_predisable; | ||
121 | ring->owner = THIS_MODULE; | 125 | ring->owner = THIS_MODULE; |
122 | 126 | ||
123 | /* Set default scan mode */ | 127 | /* Set default scan mode */ |
diff --git a/drivers/staging/iio/accel/lis3l02dq.h b/drivers/staging/iio/accel/lis3l02dq.h index 3f1d7c678867..43277d1dd9ab 100644 --- a/drivers/staging/iio/accel/lis3l02dq.h +++ b/drivers/staging/iio/accel/lis3l02dq.h | |||
@@ -196,12 +196,12 @@ void lis3l02dq_unconfigure_ring(struct iio_dev *indio_dev); | |||
196 | #ifdef CONFIG_LIS3L02DQ_BUF_RING_SW | 196 | #ifdef CONFIG_LIS3L02DQ_BUF_RING_SW |
197 | #define lis3l02dq_free_buf iio_sw_rb_free | 197 | #define lis3l02dq_free_buf iio_sw_rb_free |
198 | #define lis3l02dq_alloc_buf iio_sw_rb_allocate | 198 | #define lis3l02dq_alloc_buf iio_sw_rb_allocate |
199 | #define lis3l02dq_register_buf_funcs iio_ring_sw_register_funcs | 199 | #define lis3l02dq_access_funcs ring_sw_access_funcs |
200 | #endif | 200 | #endif |
201 | #ifdef CONFIG_LIS3L02DQ_BUF_KFIFO | 201 | #ifdef CONFIG_LIS3L02DQ_BUF_KFIFO |
202 | #define lis3l02dq_free_buf iio_kfifo_free | 202 | #define lis3l02dq_free_buf iio_kfifo_free |
203 | #define lis3l02dq_alloc_buf iio_kfifo_allocate | 203 | #define lis3l02dq_alloc_buf iio_kfifo_allocate |
204 | #define lis3l02dq_register_buf_funcs iio_kfifo_register_funcs | 204 | #define lis3l02dq_access_funcs kfifo_access_funcs |
205 | #endif | 205 | #endif |
206 | irqreturn_t lis3l02dq_data_rdy_trig_poll(int irq, void *private); | 206 | irqreturn_t lis3l02dq_data_rdy_trig_poll(int irq, void *private); |
207 | #define lis3l02dq_th lis3l02dq_data_rdy_trig_poll | 207 | #define lis3l02dq_th lis3l02dq_data_rdy_trig_poll |
diff --git a/drivers/staging/iio/accel/lis3l02dq_ring.c b/drivers/staging/iio/accel/lis3l02dq_ring.c index 2b7219b75fbb..ab11e527a6aa 100644 --- a/drivers/staging/iio/accel/lis3l02dq_ring.c +++ b/drivers/staging/iio/accel/lis3l02dq_ring.c | |||
@@ -54,12 +54,12 @@ ssize_t lis3l02dq_read_accel_from_ring(struct iio_ring_buffer *ring, | |||
54 | if (!iio_scan_mask_query(ring, index)) | 54 | if (!iio_scan_mask_query(ring, index)) |
55 | return -EINVAL; | 55 | return -EINVAL; |
56 | 56 | ||
57 | data = kmalloc(ring->access.get_bytes_per_datum(ring), | 57 | data = kmalloc(ring->access->get_bytes_per_datum(ring), |
58 | GFP_KERNEL); | 58 | GFP_KERNEL); |
59 | if (data == NULL) | 59 | if (data == NULL) |
60 | return -ENOMEM; | 60 | return -ENOMEM; |
61 | 61 | ||
62 | ret = ring->access.read_last(ring, (u8 *)data); | 62 | ret = ring->access->read_last(ring, (u8 *)data); |
63 | if (ret) | 63 | if (ret) |
64 | goto error_free_data; | 64 | goto error_free_data; |
65 | *val = data[iio_scan_mask_count_to_right(ring, index)]; | 65 | *val = data[iio_scan_mask_count_to_right(ring, index)]; |
@@ -400,6 +400,11 @@ error_ret: | |||
400 | return ret; | 400 | return ret; |
401 | } | 401 | } |
402 | 402 | ||
403 | static const struct iio_ring_setup_ops lis3l02dq_ring_setup_ops = { | ||
404 | .preenable = &iio_sw_ring_preenable, | ||
405 | .postenable = &lis3l02dq_ring_postenable, | ||
406 | .predisable = &lis3l02dq_ring_predisable, | ||
407 | }; | ||
403 | 408 | ||
404 | int lis3l02dq_configure_ring(struct iio_dev *indio_dev) | 409 | int lis3l02dq_configure_ring(struct iio_dev *indio_dev) |
405 | { | 410 | { |
@@ -415,13 +420,11 @@ int lis3l02dq_configure_ring(struct iio_dev *indio_dev) | |||
415 | 420 | ||
416 | indio_dev->ring = ring; | 421 | indio_dev->ring = ring; |
417 | /* Effectively select the ring buffer implementation */ | 422 | /* Effectively select the ring buffer implementation */ |
418 | lis3l02dq_register_buf_funcs(&ring->access); | 423 | indio_dev->ring->access = &lis3l02dq_access_funcs; |
419 | ring->bpe = 2; | 424 | ring->bpe = 2; |
420 | 425 | ||
421 | ring->scan_timestamp = true; | 426 | ring->scan_timestamp = true; |
422 | ring->preenable = &iio_sw_ring_preenable; | 427 | ring->setup_ops = &lis3l02dq_ring_setup_ops; |
423 | ring->postenable = &lis3l02dq_ring_postenable; | ||
424 | ring->predisable = &lis3l02dq_ring_predisable; | ||
425 | ring->owner = THIS_MODULE; | 428 | ring->owner = THIS_MODULE; |
426 | 429 | ||
427 | /* Set default scan mode */ | 430 | /* Set default scan mode */ |
diff --git a/drivers/staging/iio/accel/sca3000_ring.c b/drivers/staging/iio/accel/sca3000_ring.c index d3c37899f387..7c4ff0b1df04 100644 --- a/drivers/staging/iio/accel/sca3000_ring.c +++ b/drivers/staging/iio/accel/sca3000_ring.c | |||
@@ -367,6 +367,12 @@ static inline void sca3000_rb_free(struct iio_ring_buffer *r) | |||
367 | iio_put_ring_buffer(r); | 367 | iio_put_ring_buffer(r); |
368 | } | 368 | } |
369 | 369 | ||
370 | static const struct iio_ring_access_funcs sca3000_ring_access_funcs = { | ||
371 | .read_first_n = &sca3000_read_first_n_hw_rb, | ||
372 | .get_length = &sca3000_ring_get_length, | ||
373 | .get_bytes_per_datum = &sca3000_ring_get_bytes_per_datum, | ||
374 | }; | ||
375 | |||
370 | int sca3000_configure_ring(struct iio_dev *indio_dev) | 376 | int sca3000_configure_ring(struct iio_dev *indio_dev) |
371 | { | 377 | { |
372 | indio_dev->ring = sca3000_rb_allocate(indio_dev); | 378 | indio_dev->ring = sca3000_rb_allocate(indio_dev); |
@@ -374,10 +380,7 @@ int sca3000_configure_ring(struct iio_dev *indio_dev) | |||
374 | return -ENOMEM; | 380 | return -ENOMEM; |
375 | indio_dev->modes |= INDIO_RING_HARDWARE_BUFFER; | 381 | indio_dev->modes |= INDIO_RING_HARDWARE_BUFFER; |
376 | 382 | ||
377 | indio_dev->ring->access.read_first_n = &sca3000_read_first_n_hw_rb; | 383 | indio_dev->ring->access = &sca3000_ring_access_funcs; |
378 | indio_dev->ring->access.get_length = &sca3000_ring_get_length; | ||
379 | indio_dev->ring->access.get_bytes_per_datum = | ||
380 | &sca3000_ring_get_bytes_per_datum; | ||
381 | 384 | ||
382 | iio_scan_mask_set(indio_dev->ring, 0); | 385 | iio_scan_mask_set(indio_dev->ring, 0); |
383 | iio_scan_mask_set(indio_dev->ring, 1); | 386 | iio_scan_mask_set(indio_dev->ring, 1); |
@@ -432,10 +435,14 @@ static int sca3000_hw_ring_postdisable(struct iio_dev *indio_dev) | |||
432 | return __sca3000_hw_ring_state_set(indio_dev, 0); | 435 | return __sca3000_hw_ring_state_set(indio_dev, 0); |
433 | } | 436 | } |
434 | 437 | ||
438 | static const struct iio_ring_setup_ops sca3000_ring_setup_ops = { | ||
439 | .preenable = &sca3000_hw_ring_preenable, | ||
440 | .postdisable = &sca3000_hw_ring_postdisable, | ||
441 | }; | ||
442 | |||
435 | void sca3000_register_ring_funcs(struct iio_dev *indio_dev) | 443 | void sca3000_register_ring_funcs(struct iio_dev *indio_dev) |
436 | { | 444 | { |
437 | indio_dev->ring->preenable = &sca3000_hw_ring_preenable; | 445 | indio_dev->ring->setup_ops = &sca3000_ring_setup_ops; |
438 | indio_dev->ring->postdisable = &sca3000_hw_ring_postdisable; | ||
439 | } | 446 | } |
440 | 447 | ||
441 | /** | 448 | /** |
diff --git a/drivers/staging/iio/adc/ad7298_ring.c b/drivers/staging/iio/adc/ad7298_ring.c index d3251f7d0ab1..09b1477c09af 100644 --- a/drivers/staging/iio/adc/ad7298_ring.c +++ b/drivers/staging/iio/adc/ad7298_ring.c | |||
@@ -32,13 +32,13 @@ int ad7298_scan_from_ring(struct ad7298_state *st, long ch) | |||
32 | goto error_ret; | 32 | goto error_ret; |
33 | } | 33 | } |
34 | 34 | ||
35 | ring_data = kmalloc(ring->access.get_bytes_per_datum(ring), | 35 | ring_data = kmalloc(ring->access->get_bytes_per_datum(ring), |
36 | GFP_KERNEL); | 36 | GFP_KERNEL); |
37 | if (ring_data == NULL) { | 37 | if (ring_data == NULL) { |
38 | ret = -ENOMEM; | 38 | ret = -ENOMEM; |
39 | goto error_ret; | 39 | goto error_ret; |
40 | } | 40 | } |
41 | ret = ring->access.read_last(ring, (u8 *) ring_data); | 41 | ret = ring->access->read_last(ring, (u8 *) ring_data); |
42 | if (ret) | 42 | if (ret) |
43 | goto error_free_ring_data; | 43 | goto error_free_ring_data; |
44 | 44 | ||
@@ -74,8 +74,8 @@ static int ad7298_ring_preenable(struct iio_dev *indio_dev) | |||
74 | d_size += sizeof(s64) - (d_size % sizeof(s64)); | 74 | d_size += sizeof(s64) - (d_size % sizeof(s64)); |
75 | } | 75 | } |
76 | 76 | ||
77 | if (ring->access.set_bytes_per_datum) | 77 | if (ring->access->set_bytes_per_datum) |
78 | ring->access.set_bytes_per_datum(ring, d_size); | 78 | ring->access->set_bytes_per_datum(ring, d_size); |
79 | 79 | ||
80 | st->d_size = d_size; | 80 | st->d_size = d_size; |
81 | 81 | ||
@@ -140,12 +140,18 @@ static irqreturn_t ad7298_trigger_handler(int irq, void *p) | |||
140 | for (i = 0; i < ring->scan_count; i++) | 140 | for (i = 0; i < ring->scan_count; i++) |
141 | buf[i] = be16_to_cpu(st->rx_buf[i]); | 141 | buf[i] = be16_to_cpu(st->rx_buf[i]); |
142 | 142 | ||
143 | indio_dev->ring->access.store_to(ring, (u8 *)buf, time_ns); | 143 | indio_dev->ring->access->store_to(ring, (u8 *)buf, time_ns); |
144 | iio_trigger_notify_done(indio_dev->trig); | 144 | iio_trigger_notify_done(indio_dev->trig); |
145 | 145 | ||
146 | return IRQ_HANDLED; | 146 | return IRQ_HANDLED; |
147 | } | 147 | } |
148 | 148 | ||
149 | static const struct iio_ring_setup_ops ad7298_ring_setup_ops = { | ||
150 | .preenable = &ad7298_ring_preenable, | ||
151 | .postenable = &iio_triggered_ring_postenable, | ||
152 | .predisable = &iio_triggered_ring_predisable, | ||
153 | }; | ||
154 | |||
149 | int ad7298_register_ring_funcs_and_init(struct iio_dev *indio_dev) | 155 | int ad7298_register_ring_funcs_and_init(struct iio_dev *indio_dev) |
150 | { | 156 | { |
151 | int ret; | 157 | int ret; |
@@ -156,7 +162,7 @@ int ad7298_register_ring_funcs_and_init(struct iio_dev *indio_dev) | |||
156 | goto error_ret; | 162 | goto error_ret; |
157 | } | 163 | } |
158 | /* Effectively select the ring buffer implementation */ | 164 | /* Effectively select the ring buffer implementation */ |
159 | iio_ring_sw_register_funcs(&indio_dev->ring->access); | 165 | indio_dev->ring->access = &ring_sw_access_funcs; |
160 | 166 | ||
161 | indio_dev->pollfunc = kzalloc(sizeof(*indio_dev->pollfunc), GFP_KERNEL); | 167 | indio_dev->pollfunc = kzalloc(sizeof(*indio_dev->pollfunc), GFP_KERNEL); |
162 | if (indio_dev->pollfunc == NULL) { | 168 | if (indio_dev->pollfunc == NULL) { |
@@ -173,10 +179,7 @@ int ad7298_register_ring_funcs_and_init(struct iio_dev *indio_dev) | |||
173 | goto error_free_poll_func; | 179 | goto error_free_poll_func; |
174 | } | 180 | } |
175 | /* Ring buffer functions - here trigger setup related */ | 181 | /* Ring buffer functions - here trigger setup related */ |
176 | indio_dev->ring->preenable = &ad7298_ring_preenable; | 182 | indio_dev->ring->setup_ops = &ad7298_ring_setup_ops; |
177 | indio_dev->ring->postenable = &iio_triggered_ring_postenable; | ||
178 | indio_dev->ring->predisable = &iio_triggered_ring_predisable; | ||
179 | |||
180 | indio_dev->ring->scan_timestamp = true; | 183 | indio_dev->ring->scan_timestamp = true; |
181 | 184 | ||
182 | /* Flag that polled ring buffering is possible */ | 185 | /* Flag that polled ring buffering is possible */ |
diff --git a/drivers/staging/iio/adc/ad7476_ring.c b/drivers/staging/iio/adc/ad7476_ring.c index ec1fa14d86ab..1d696ef5f4de 100644 --- a/drivers/staging/iio/adc/ad7476_ring.c +++ b/drivers/staging/iio/adc/ad7476_ring.c | |||
@@ -28,12 +28,13 @@ int ad7476_scan_from_ring(struct ad7476_state *st) | |||
28 | int ret; | 28 | int ret; |
29 | u8 *ring_data; | 29 | u8 *ring_data; |
30 | 30 | ||
31 | ring_data = kmalloc(ring->access.get_bytes_per_datum(ring), GFP_KERNEL); | 31 | ring_data = kmalloc(ring->access->get_bytes_per_datum(ring), |
32 | GFP_KERNEL); | ||
32 | if (ring_data == NULL) { | 33 | if (ring_data == NULL) { |
33 | ret = -ENOMEM; | 34 | ret = -ENOMEM; |
34 | goto error_ret; | 35 | goto error_ret; |
35 | } | 36 | } |
36 | ret = ring->access.read_last(ring, ring_data); | 37 | ret = ring->access->read_last(ring, ring_data); |
37 | if (ret) | 38 | if (ret) |
38 | goto error_free_ring_data; | 39 | goto error_free_ring_data; |
39 | 40 | ||
@@ -67,8 +68,8 @@ static int ad7476_ring_preenable(struct iio_dev *indio_dev) | |||
67 | st->d_size += sizeof(s64) - (st->d_size % sizeof(s64)); | 68 | st->d_size += sizeof(s64) - (st->d_size % sizeof(s64)); |
68 | } | 69 | } |
69 | 70 | ||
70 | if (indio_dev->ring->access.set_bytes_per_datum) | 71 | if (indio_dev->ring->access->set_bytes_per_datum) |
71 | indio_dev->ring->access.set_bytes_per_datum(indio_dev->ring, | 72 | indio_dev->ring->access->set_bytes_per_datum(indio_dev->ring, |
72 | st->d_size); | 73 | st->d_size); |
73 | 74 | ||
74 | return 0; | 75 | return 0; |
@@ -79,7 +80,6 @@ static irqreturn_t ad7476_trigger_handler(int irq, void *p) | |||
79 | struct iio_poll_func *pf = p; | 80 | struct iio_poll_func *pf = p; |
80 | struct iio_dev *indio_dev = pf->private_data; | 81 | struct iio_dev *indio_dev = pf->private_data; |
81 | struct ad7476_state *st = iio_dev_get_devdata(indio_dev); | 82 | struct ad7476_state *st = iio_dev_get_devdata(indio_dev); |
82 | struct iio_sw_ring_buffer *sw_ring = iio_to_sw_ring(indio_dev->ring); | ||
83 | s64 time_ns; | 83 | s64 time_ns; |
84 | __u8 *rxbuf; | 84 | __u8 *rxbuf; |
85 | int b_sent; | 85 | int b_sent; |
@@ -99,7 +99,7 @@ static irqreturn_t ad7476_trigger_handler(int irq, void *p) | |||
99 | memcpy(rxbuf + st->d_size - sizeof(s64), | 99 | memcpy(rxbuf + st->d_size - sizeof(s64), |
100 | &time_ns, sizeof(time_ns)); | 100 | &time_ns, sizeof(time_ns)); |
101 | 101 | ||
102 | indio_dev->ring->access.store_to(&sw_ring->buf, rxbuf, time_ns); | 102 | indio_dev->ring->access->store_to(indio_dev->ring, rxbuf, time_ns); |
103 | done: | 103 | done: |
104 | iio_trigger_notify_done(indio_dev->trig); | 104 | iio_trigger_notify_done(indio_dev->trig); |
105 | kfree(rxbuf); | 105 | kfree(rxbuf); |
@@ -107,6 +107,12 @@ done: | |||
107 | return IRQ_HANDLED; | 107 | return IRQ_HANDLED; |
108 | } | 108 | } |
109 | 109 | ||
110 | static const struct iio_ring_setup_ops ad7476_ring_setup_ops = { | ||
111 | .preenable = &ad7476_ring_preenable, | ||
112 | .postenable = &iio_triggered_ring_postenable, | ||
113 | .predisable = &iio_triggered_ring_predisable, | ||
114 | }; | ||
115 | |||
110 | int ad7476_register_ring_funcs_and_init(struct iio_dev *indio_dev) | 116 | int ad7476_register_ring_funcs_and_init(struct iio_dev *indio_dev) |
111 | { | 117 | { |
112 | struct ad7476_state *st = indio_dev->dev_data; | 118 | struct ad7476_state *st = indio_dev->dev_data; |
@@ -118,7 +124,7 @@ int ad7476_register_ring_funcs_and_init(struct iio_dev *indio_dev) | |||
118 | goto error_ret; | 124 | goto error_ret; |
119 | } | 125 | } |
120 | /* Effectively select the ring buffer implementation */ | 126 | /* Effectively select the ring buffer implementation */ |
121 | iio_ring_sw_register_funcs(&indio_dev->ring->access); | 127 | indio_dev->ring->access = &ring_sw_access_funcs; |
122 | indio_dev->pollfunc = kzalloc(sizeof(indio_dev->pollfunc), GFP_KERNEL); | 128 | indio_dev->pollfunc = kzalloc(sizeof(indio_dev->pollfunc), GFP_KERNEL); |
123 | if (indio_dev->pollfunc == NULL) { | 129 | if (indio_dev->pollfunc == NULL) { |
124 | ret = -ENOMEM; | 130 | ret = -ENOMEM; |
@@ -137,10 +143,7 @@ int ad7476_register_ring_funcs_and_init(struct iio_dev *indio_dev) | |||
137 | } | 143 | } |
138 | 144 | ||
139 | /* Ring buffer functions - here trigger setup related */ | 145 | /* Ring buffer functions - here trigger setup related */ |
140 | 146 | indio_dev->ring->setup_ops = &ad7476_ring_setup_ops; | |
141 | indio_dev->ring->preenable = &ad7476_ring_preenable; | ||
142 | indio_dev->ring->postenable = &iio_triggered_ring_postenable; | ||
143 | indio_dev->ring->predisable = &iio_triggered_ring_predisable; | ||
144 | indio_dev->ring->scan_timestamp = true; | 147 | indio_dev->ring->scan_timestamp = true; |
145 | 148 | ||
146 | /* Flag that polled ring buffering is possible */ | 149 | /* Flag that polled ring buffering is possible */ |
diff --git a/drivers/staging/iio/adc/ad7606_ring.c b/drivers/staging/iio/adc/ad7606_ring.c index 351d58eb9082..925806c9cd53 100644 --- a/drivers/staging/iio/adc/ad7606_ring.c +++ b/drivers/staging/iio/adc/ad7606_ring.c | |||
@@ -27,13 +27,13 @@ int ad7606_scan_from_ring(struct ad7606_state *st, unsigned ch) | |||
27 | int ret; | 27 | int ret; |
28 | u16 *ring_data; | 28 | u16 *ring_data; |
29 | 29 | ||
30 | ring_data = kmalloc(ring->access.get_bytes_per_datum(ring), | 30 | ring_data = kmalloc(ring->access->get_bytes_per_datum(ring), |
31 | GFP_KERNEL); | 31 | GFP_KERNEL); |
32 | if (ring_data == NULL) { | 32 | if (ring_data == NULL) { |
33 | ret = -ENOMEM; | 33 | ret = -ENOMEM; |
34 | goto error_ret; | 34 | goto error_ret; |
35 | } | 35 | } |
36 | ret = ring->access.read_last(ring, (u8 *) ring_data); | 36 | ret = ring->access->read_last(ring, (u8 *) ring_data); |
37 | if (ret) | 37 | if (ret) |
38 | goto error_free_ring_data; | 38 | goto error_free_ring_data; |
39 | 39 | ||
@@ -68,8 +68,8 @@ static int ad7606_ring_preenable(struct iio_dev *indio_dev) | |||
68 | d_size += sizeof(s64) - (d_size % sizeof(s64)); | 68 | d_size += sizeof(s64) - (d_size % sizeof(s64)); |
69 | } | 69 | } |
70 | 70 | ||
71 | if (ring->access.set_bytes_per_datum) | 71 | if (ring->access->set_bytes_per_datum) |
72 | ring->access.set_bytes_per_datum(ring, d_size); | 72 | ring->access->set_bytes_per_datum(ring, d_size); |
73 | 73 | ||
74 | st->d_size = d_size; | 74 | st->d_size = d_size; |
75 | 75 | ||
@@ -105,7 +105,6 @@ static void ad7606_poll_bh_to_ring(struct work_struct *work_s) | |||
105 | struct ad7606_state *st = container_of(work_s, struct ad7606_state, | 105 | struct ad7606_state *st = container_of(work_s, struct ad7606_state, |
106 | poll_work); | 106 | poll_work); |
107 | struct iio_dev *indio_dev = iio_priv_to_dev(st); | 107 | struct iio_dev *indio_dev = iio_priv_to_dev(st); |
108 | struct iio_sw_ring_buffer *sw_ring = iio_to_sw_ring(indio_dev->ring); | ||
109 | struct iio_ring_buffer *ring = indio_dev->ring; | 108 | struct iio_ring_buffer *ring = indio_dev->ring; |
110 | s64 time_ns; | 109 | s64 time_ns; |
111 | __u8 *buf; | 110 | __u8 *buf; |
@@ -145,13 +144,19 @@ static void ad7606_poll_bh_to_ring(struct work_struct *work_s) | |||
145 | memcpy(buf + st->d_size - sizeof(s64), | 144 | memcpy(buf + st->d_size - sizeof(s64), |
146 | &time_ns, sizeof(time_ns)); | 145 | &time_ns, sizeof(time_ns)); |
147 | 146 | ||
148 | ring->access.store_to(&sw_ring->buf, buf, time_ns); | 147 | ring->access->store_to(indio_dev->ring, buf, time_ns); |
149 | done: | 148 | done: |
150 | gpio_set_value(st->pdata->gpio_convst, 0); | 149 | gpio_set_value(st->pdata->gpio_convst, 0); |
151 | iio_trigger_notify_done(indio_dev->trig); | 150 | iio_trigger_notify_done(indio_dev->trig); |
152 | kfree(buf); | 151 | kfree(buf); |
153 | } | 152 | } |
154 | 153 | ||
154 | static const struct iio_ring_setup_ops ad7606_ring_setup_ops = { | ||
155 | .preenable = &ad7606_ring_preenable, | ||
156 | .postenable = &iio_triggered_ring_postenable, | ||
157 | .predisable = &iio_triggered_ring_predisable, | ||
158 | }; | ||
159 | |||
155 | int ad7606_register_ring_funcs_and_init(struct iio_dev *indio_dev) | 160 | int ad7606_register_ring_funcs_and_init(struct iio_dev *indio_dev) |
156 | { | 161 | { |
157 | struct ad7606_state *st = indio_dev->dev_data; | 162 | struct ad7606_state *st = indio_dev->dev_data; |
@@ -164,7 +169,7 @@ int ad7606_register_ring_funcs_and_init(struct iio_dev *indio_dev) | |||
164 | } | 169 | } |
165 | 170 | ||
166 | /* Effectively select the ring buffer implementation */ | 171 | /* Effectively select the ring buffer implementation */ |
167 | iio_ring_sw_register_funcs(&indio_dev->ring->access); | 172 | indio_dev->ring->access = &ring_sw_access_funcs; |
168 | indio_dev->pollfunc = kzalloc(sizeof(*indio_dev->pollfunc), GFP_KERNEL); | 173 | indio_dev->pollfunc = kzalloc(sizeof(*indio_dev->pollfunc), GFP_KERNEL); |
169 | if (indio_dev->pollfunc == NULL) { | 174 | if (indio_dev->pollfunc == NULL) { |
170 | ret = -ENOMEM; | 175 | ret = -ENOMEM; |
@@ -183,9 +188,7 @@ int ad7606_register_ring_funcs_and_init(struct iio_dev *indio_dev) | |||
183 | } | 188 | } |
184 | /* Ring buffer functions - here trigger setup related */ | 189 | /* Ring buffer functions - here trigger setup related */ |
185 | 190 | ||
186 | indio_dev->ring->preenable = &ad7606_ring_preenable; | 191 | indio_dev->ring->setup_ops = &ad7606_ring_setup_ops; |
187 | indio_dev->ring->postenable = &iio_triggered_ring_postenable; | ||
188 | indio_dev->ring->predisable = &iio_triggered_ring_predisable; | ||
189 | indio_dev->ring->scan_timestamp = true ; | 192 | indio_dev->ring->scan_timestamp = true ; |
190 | 193 | ||
191 | INIT_WORK(&st->poll_work, &ad7606_poll_bh_to_ring); | 194 | INIT_WORK(&st->poll_work, &ad7606_poll_bh_to_ring); |
diff --git a/drivers/staging/iio/adc/ad7887_ring.c b/drivers/staging/iio/adc/ad7887_ring.c index 113e97ea1c28..f3485b372171 100644 --- a/drivers/staging/iio/adc/ad7887_ring.c +++ b/drivers/staging/iio/adc/ad7887_ring.c | |||
@@ -33,12 +33,13 @@ int ad7887_scan_from_ring(struct ad7887_state *st, long mask) | |||
33 | goto error_ret; | 33 | goto error_ret; |
34 | } | 34 | } |
35 | 35 | ||
36 | ring_data = kmalloc(ring->access.get_bytes_per_datum(ring), GFP_KERNEL); | 36 | ring_data = kmalloc(ring->access->get_bytes_per_datum(ring), |
37 | GFP_KERNEL); | ||
37 | if (ring_data == NULL) { | 38 | if (ring_data == NULL) { |
38 | ret = -ENOMEM; | 39 | ret = -ENOMEM; |
39 | goto error_ret; | 40 | goto error_ret; |
40 | } | 41 | } |
41 | ret = ring->access.read_last(ring, (u8 *) ring_data); | 42 | ret = ring->access->read_last(ring, (u8 *) ring_data); |
42 | if (ret) | 43 | if (ret) |
43 | goto error_free_ring_data; | 44 | goto error_free_ring_data; |
44 | 45 | ||
@@ -76,8 +77,8 @@ static int ad7887_ring_preenable(struct iio_dev *indio_dev) | |||
76 | st->d_size += sizeof(s64) - (st->d_size % sizeof(s64)); | 77 | st->d_size += sizeof(s64) - (st->d_size % sizeof(s64)); |
77 | } | 78 | } |
78 | 79 | ||
79 | if (indio_dev->ring->access.set_bytes_per_datum) | 80 | if (indio_dev->ring->access->set_bytes_per_datum) |
80 | indio_dev->ring->access.set_bytes_per_datum(indio_dev->ring, | 81 | indio_dev->ring->access->set_bytes_per_datum(indio_dev->ring, |
81 | st->d_size); | 82 | st->d_size); |
82 | 83 | ||
83 | switch (ring->scan_mask) { | 84 | switch (ring->scan_mask) { |
@@ -117,7 +118,6 @@ static irqreturn_t ad7887_trigger_handler(int irq, void *p) | |||
117 | struct iio_dev *indio_dev = pf->private_data; | 118 | struct iio_dev *indio_dev = pf->private_data; |
118 | struct ad7887_state *st = iio_dev_get_devdata(indio_dev); | 119 | struct ad7887_state *st = iio_dev_get_devdata(indio_dev); |
119 | struct iio_ring_buffer *ring = indio_dev->ring; | 120 | struct iio_ring_buffer *ring = indio_dev->ring; |
120 | struct iio_sw_ring_buffer *sw_ring = iio_to_sw_ring(indio_dev->ring); | ||
121 | s64 time_ns; | 121 | s64 time_ns; |
122 | __u8 *buf; | 122 | __u8 *buf; |
123 | int b_sent; | 123 | int b_sent; |
@@ -140,7 +140,7 @@ static irqreturn_t ad7887_trigger_handler(int irq, void *p) | |||
140 | memcpy(buf + st->d_size - sizeof(s64), | 140 | memcpy(buf + st->d_size - sizeof(s64), |
141 | &time_ns, sizeof(time_ns)); | 141 | &time_ns, sizeof(time_ns)); |
142 | 142 | ||
143 | indio_dev->ring->access.store_to(&sw_ring->buf, buf, time_ns); | 143 | indio_dev->ring->access->store_to(indio_dev->ring, buf, time_ns); |
144 | done: | 144 | done: |
145 | kfree(buf); | 145 | kfree(buf); |
146 | iio_trigger_notify_done(indio_dev->trig); | 146 | iio_trigger_notify_done(indio_dev->trig); |
@@ -148,6 +148,13 @@ done: | |||
148 | return IRQ_HANDLED; | 148 | return IRQ_HANDLED; |
149 | } | 149 | } |
150 | 150 | ||
151 | static const struct iio_ring_setup_ops ad7887_ring_setup_ops = { | ||
152 | .preenable = &ad7887_ring_preenable, | ||
153 | .postenable = &iio_triggered_ring_postenable, | ||
154 | .predisable = &iio_triggered_ring_predisable, | ||
155 | .postdisable = &ad7887_ring_postdisable, | ||
156 | }; | ||
157 | |||
151 | int ad7887_register_ring_funcs_and_init(struct iio_dev *indio_dev) | 158 | int ad7887_register_ring_funcs_and_init(struct iio_dev *indio_dev) |
152 | { | 159 | { |
153 | int ret; | 160 | int ret; |
@@ -158,7 +165,7 @@ int ad7887_register_ring_funcs_and_init(struct iio_dev *indio_dev) | |||
158 | goto error_ret; | 165 | goto error_ret; |
159 | } | 166 | } |
160 | /* Effectively select the ring buffer implementation */ | 167 | /* Effectively select the ring buffer implementation */ |
161 | iio_ring_sw_register_funcs(&indio_dev->ring->access); | 168 | indio_dev->ring->access = &ring_sw_access_funcs; |
162 | indio_dev->pollfunc = kzalloc(sizeof(*indio_dev->pollfunc), GFP_KERNEL); | 169 | indio_dev->pollfunc = kzalloc(sizeof(*indio_dev->pollfunc), GFP_KERNEL); |
163 | if (indio_dev->pollfunc == NULL) { | 170 | if (indio_dev->pollfunc == NULL) { |
164 | ret = -ENOMEM; | 171 | ret = -ENOMEM; |
@@ -176,11 +183,7 @@ int ad7887_register_ring_funcs_and_init(struct iio_dev *indio_dev) | |||
176 | goto error_free_pollfunc; | 183 | goto error_free_pollfunc; |
177 | } | 184 | } |
178 | /* Ring buffer functions - here trigger setup related */ | 185 | /* Ring buffer functions - here trigger setup related */ |
179 | 186 | indio_dev->ring->setup_ops = &ad7887_ring_setup_ops; | |
180 | indio_dev->ring->preenable = &ad7887_ring_preenable; | ||
181 | indio_dev->ring->postenable = &iio_triggered_ring_postenable; | ||
182 | indio_dev->ring->predisable = &iio_triggered_ring_predisable; | ||
183 | indio_dev->ring->postdisable = &ad7887_ring_postdisable; | ||
184 | 187 | ||
185 | /* Flag that polled ring buffering is possible */ | 188 | /* Flag that polled ring buffering is possible */ |
186 | indio_dev->modes |= INDIO_RING_TRIGGERED; | 189 | indio_dev->modes |= INDIO_RING_TRIGGERED; |
diff --git a/drivers/staging/iio/adc/ad799x_ring.c b/drivers/staging/iio/adc/ad799x_ring.c index 69065683d310..57dca2075487 100644 --- a/drivers/staging/iio/adc/ad799x_ring.c +++ b/drivers/staging/iio/adc/ad799x_ring.c | |||
@@ -37,12 +37,13 @@ int ad799x_single_channel_from_ring(struct ad799x_state *st, long mask) | |||
37 | goto error_ret; | 37 | goto error_ret; |
38 | } | 38 | } |
39 | 39 | ||
40 | ring_data = kmalloc(ring->access.get_bytes_per_datum(ring), GFP_KERNEL); | 40 | ring_data = kmalloc(ring->access->get_bytes_per_datum(ring), |
41 | GFP_KERNEL); | ||
41 | if (ring_data == NULL) { | 42 | if (ring_data == NULL) { |
42 | ret = -ENOMEM; | 43 | ret = -ENOMEM; |
43 | goto error_ret; | 44 | goto error_ret; |
44 | } | 45 | } |
45 | ret = ring->access.read_last(ring, (u8 *) ring_data); | 46 | ret = ring->access->read_last(ring, (u8 *) ring_data); |
46 | if (ret) | 47 | if (ret) |
47 | goto error_free_ring_data; | 48 | goto error_free_ring_data; |
48 | /* Need a count of channels prior to this one */ | 49 | /* Need a count of channels prior to this one */ |
@@ -90,8 +91,8 @@ static int ad799x_ring_preenable(struct iio_dev *indio_dev) | |||
90 | st->d_size += sizeof(s64) - (st->d_size % sizeof(s64)); | 91 | st->d_size += sizeof(s64) - (st->d_size % sizeof(s64)); |
91 | } | 92 | } |
92 | 93 | ||
93 | if (indio_dev->ring->access.set_bytes_per_datum) | 94 | if (indio_dev->ring->access->set_bytes_per_datum) |
94 | indio_dev->ring->access.set_bytes_per_datum(indio_dev->ring, | 95 | indio_dev->ring->access->set_bytes_per_datum(indio_dev->ring, |
95 | st->d_size); | 96 | st->d_size); |
96 | 97 | ||
97 | return 0; | 98 | return 0; |
@@ -110,7 +111,6 @@ static irqreturn_t ad799x_trigger_handler(int irq, void *p) | |||
110 | struct iio_dev *indio_dev = pf->private_data; | 111 | struct iio_dev *indio_dev = pf->private_data; |
111 | struct ad799x_state *st = iio_dev_get_devdata(indio_dev); | 112 | struct ad799x_state *st = iio_dev_get_devdata(indio_dev); |
112 | struct iio_ring_buffer *ring = indio_dev->ring; | 113 | struct iio_ring_buffer *ring = indio_dev->ring; |
113 | struct iio_sw_ring_buffer *ring_sw = iio_to_sw_ring(indio_dev->ring); | ||
114 | s64 time_ns; | 114 | s64 time_ns; |
115 | __u8 *rxbuf; | 115 | __u8 *rxbuf; |
116 | int b_sent; | 116 | int b_sent; |
@@ -151,7 +151,7 @@ static irqreturn_t ad799x_trigger_handler(int irq, void *p) | |||
151 | memcpy(rxbuf + st->d_size - sizeof(s64), | 151 | memcpy(rxbuf + st->d_size - sizeof(s64), |
152 | &time_ns, sizeof(time_ns)); | 152 | &time_ns, sizeof(time_ns)); |
153 | 153 | ||
154 | ring->access.store_to(&ring_sw->buf, rxbuf, time_ns); | 154 | ring->access->store_to(indio_dev->ring, rxbuf, time_ns); |
155 | done: | 155 | done: |
156 | kfree(rxbuf); | 156 | kfree(rxbuf); |
157 | if (b_sent < 0) | 157 | if (b_sent < 0) |
@@ -162,6 +162,11 @@ out: | |||
162 | return IRQ_HANDLED; | 162 | return IRQ_HANDLED; |
163 | } | 163 | } |
164 | 164 | ||
165 | static const struct iio_ring_setup_ops ad799x_buf_setup_ops = { | ||
166 | .preenable = &ad799x_ring_preenable, | ||
167 | .postenable = &iio_triggered_ring_postenable, | ||
168 | .predisable = &iio_triggered_ring_predisable, | ||
169 | }; | ||
165 | 170 | ||
166 | int ad799x_register_ring_funcs_and_init(struct iio_dev *indio_dev) | 171 | int ad799x_register_ring_funcs_and_init(struct iio_dev *indio_dev) |
167 | { | 172 | { |
@@ -173,7 +178,7 @@ int ad799x_register_ring_funcs_and_init(struct iio_dev *indio_dev) | |||
173 | goto error_ret; | 178 | goto error_ret; |
174 | } | 179 | } |
175 | /* Effectively select the ring buffer implementation */ | 180 | /* Effectively select the ring buffer implementation */ |
176 | iio_ring_sw_register_funcs(&indio_dev->ring->access); | 181 | indio_dev->ring->access = &ring_sw_access_funcs; |
177 | indio_dev->pollfunc = kzalloc(sizeof(*indio_dev->pollfunc), GFP_KERNEL); | 182 | indio_dev->pollfunc = kzalloc(sizeof(*indio_dev->pollfunc), GFP_KERNEL); |
178 | if (indio_dev->pollfunc == NULL) { | 183 | if (indio_dev->pollfunc == NULL) { |
179 | ret = -ENOMEM; | 184 | ret = -ENOMEM; |
@@ -190,10 +195,7 @@ int ad799x_register_ring_funcs_and_init(struct iio_dev *indio_dev) | |||
190 | goto error_free_poll_func; | 195 | goto error_free_poll_func; |
191 | } | 196 | } |
192 | /* Ring buffer functions - here trigger setup related */ | 197 | /* Ring buffer functions - here trigger setup related */ |
193 | 198 | indio_dev->ring->setup_ops = &ad799x_buf_setup_ops; | |
194 | indio_dev->ring->preenable = &ad799x_ring_preenable; | ||
195 | indio_dev->ring->postenable = &iio_triggered_ring_postenable; | ||
196 | indio_dev->ring->predisable = &iio_triggered_ring_predisable; | ||
197 | indio_dev->ring->scan_timestamp = true; | 199 | indio_dev->ring->scan_timestamp = true; |
198 | 200 | ||
199 | /* Flag that polled ring buffering is possible */ | 201 | /* Flag that polled ring buffering is possible */ |
diff --git a/drivers/staging/iio/adc/max1363_ring.c b/drivers/staging/iio/adc/max1363_ring.c index a387a9986388..963890933a2e 100644 --- a/drivers/staging/iio/adc/max1363_ring.c +++ b/drivers/staging/iio/adc/max1363_ring.c | |||
@@ -35,12 +35,13 @@ int max1363_single_channel_from_ring(long mask, struct max1363_state *st) | |||
35 | goto error_ret; | 35 | goto error_ret; |
36 | } | 36 | } |
37 | 37 | ||
38 | ring_data = kmalloc(ring->access.get_bytes_per_datum(ring), GFP_KERNEL); | 38 | ring_data = kmalloc(ring->access->get_bytes_per_datum(ring), |
39 | GFP_KERNEL); | ||
39 | if (ring_data == NULL) { | 40 | if (ring_data == NULL) { |
40 | ret = -ENOMEM; | 41 | ret = -ENOMEM; |
41 | goto error_ret; | 42 | goto error_ret; |
42 | } | 43 | } |
43 | ret = ring->access.read_last(ring, ring_data); | 44 | ret = ring->access->read_last(ring, ring_data); |
44 | if (ret) | 45 | if (ret) |
45 | goto error_free_ring_data; | 46 | goto error_free_ring_data; |
46 | /* Need a count of channels prior to this one */ | 47 | /* Need a count of channels prior to this one */ |
@@ -88,7 +89,7 @@ static int max1363_ring_preenable(struct iio_dev *indio_dev) | |||
88 | max1363_set_scan_mode(st); | 89 | max1363_set_scan_mode(st); |
89 | 90 | ||
90 | numvals = hweight_long(st->current_mode->modemask); | 91 | numvals = hweight_long(st->current_mode->modemask); |
91 | if (ring->access.set_bytes_per_datum) { | 92 | if (ring->access->set_bytes_per_datum) { |
92 | if (ring->scan_timestamp) | 93 | if (ring->scan_timestamp) |
93 | d_size += sizeof(s64); | 94 | d_size += sizeof(s64); |
94 | if (st->chip_info->bits != 8) | 95 | if (st->chip_info->bits != 8) |
@@ -97,7 +98,7 @@ static int max1363_ring_preenable(struct iio_dev *indio_dev) | |||
97 | d_size += numvals; | 98 | d_size += numvals; |
98 | if (ring->scan_timestamp && (d_size % 8)) | 99 | if (ring->scan_timestamp && (d_size % 8)) |
99 | d_size += 8 - (d_size % 8); | 100 | d_size += 8 - (d_size % 8); |
100 | ring->access.set_bytes_per_datum(ring, d_size); | 101 | ring->access->set_bytes_per_datum(ring, d_size); |
101 | } | 102 | } |
102 | 103 | ||
103 | return 0; | 104 | return 0; |
@@ -108,7 +109,6 @@ static irqreturn_t max1363_trigger_handler(int irq, void *p) | |||
108 | struct iio_poll_func *pf = p; | 109 | struct iio_poll_func *pf = p; |
109 | struct iio_dev *indio_dev = pf->private_data; | 110 | struct iio_dev *indio_dev = pf->private_data; |
110 | struct max1363_state *st = iio_priv(indio_dev); | 111 | struct max1363_state *st = iio_priv(indio_dev); |
111 | struct iio_sw_ring_buffer *sw_ring = iio_to_sw_ring(indio_dev->ring); | ||
112 | s64 time_ns; | 112 | s64 time_ns; |
113 | __u8 *rxbuf; | 113 | __u8 *rxbuf; |
114 | int b_sent; | 114 | int b_sent; |
@@ -144,7 +144,7 @@ static irqreturn_t max1363_trigger_handler(int irq, void *p) | |||
144 | 144 | ||
145 | memcpy(rxbuf + d_size - sizeof(s64), &time_ns, sizeof(time_ns)); | 145 | memcpy(rxbuf + d_size - sizeof(s64), &time_ns, sizeof(time_ns)); |
146 | 146 | ||
147 | indio_dev->ring->access.store_to(&sw_ring->buf, rxbuf, time_ns); | 147 | indio_dev->ring->access->store_to(indio_dev->ring, rxbuf, time_ns); |
148 | done: | 148 | done: |
149 | iio_trigger_notify_done(indio_dev->trig); | 149 | iio_trigger_notify_done(indio_dev->trig); |
150 | kfree(rxbuf); | 150 | kfree(rxbuf); |
@@ -152,6 +152,11 @@ done: | |||
152 | return IRQ_HANDLED; | 152 | return IRQ_HANDLED; |
153 | } | 153 | } |
154 | 154 | ||
155 | static const struct iio_ring_setup_ops max1363_ring_setup_ops = { | ||
156 | .postenable = &iio_triggered_ring_postenable, | ||
157 | .preenable = &max1363_ring_preenable, | ||
158 | .predisable = &iio_triggered_ring_predisable, | ||
159 | }; | ||
155 | 160 | ||
156 | int max1363_register_ring_funcs_and_init(struct iio_dev *indio_dev) | 161 | int max1363_register_ring_funcs_and_init(struct iio_dev *indio_dev) |
157 | { | 162 | { |
@@ -163,8 +168,6 @@ int max1363_register_ring_funcs_and_init(struct iio_dev *indio_dev) | |||
163 | ret = -ENOMEM; | 168 | ret = -ENOMEM; |
164 | goto error_ret; | 169 | goto error_ret; |
165 | } | 170 | } |
166 | /* Effectively select the ring buffer implementation */ | ||
167 | iio_ring_sw_register_funcs(&indio_dev->ring->access); | ||
168 | indio_dev->pollfunc = kzalloc(sizeof(*indio_dev->pollfunc), GFP_KERNEL); | 171 | indio_dev->pollfunc = kzalloc(sizeof(*indio_dev->pollfunc), GFP_KERNEL); |
169 | if (indio_dev->pollfunc == NULL) { | 172 | if (indio_dev->pollfunc == NULL) { |
170 | ret = -ENOMEM; | 173 | ret = -ENOMEM; |
@@ -180,11 +183,10 @@ int max1363_register_ring_funcs_and_init(struct iio_dev *indio_dev) | |||
180 | ret = -ENOMEM; | 183 | ret = -ENOMEM; |
181 | goto error_free_pollfunc; | 184 | goto error_free_pollfunc; |
182 | } | 185 | } |
183 | 186 | /* Effectively select the ring buffer implementation */ | |
187 | indio_dev->ring->access = &ring_sw_access_funcs; | ||
184 | /* Ring buffer functions - here trigger setup related */ | 188 | /* Ring buffer functions - here trigger setup related */ |
185 | indio_dev->ring->postenable = &iio_triggered_ring_postenable; | 189 | indio_dev->ring->setup_ops = &max1363_ring_setup_ops; |
186 | indio_dev->ring->preenable = &max1363_ring_preenable; | ||
187 | indio_dev->ring->predisable = &iio_triggered_ring_predisable; | ||
188 | 190 | ||
189 | /* Flag that polled ring buffering is possible */ | 191 | /* Flag that polled ring buffering is possible */ |
190 | indio_dev->modes |= INDIO_RING_TRIGGERED; | 192 | indio_dev->modes |= INDIO_RING_TRIGGERED; |
diff --git a/drivers/staging/iio/gyro/adis16260_ring.c b/drivers/staging/iio/gyro/adis16260_ring.c index 12fbbf2da48c..85586e42380c 100644 --- a/drivers/staging/iio/gyro/adis16260_ring.c +++ b/drivers/staging/iio/gyro/adis16260_ring.c | |||
@@ -74,7 +74,7 @@ static irqreturn_t adis16260_trigger_handler(int irq, void *p) | |||
74 | struct iio_ring_buffer *ring = indio_dev->ring; | 74 | struct iio_ring_buffer *ring = indio_dev->ring; |
75 | int i = 0; | 75 | int i = 0; |
76 | s16 *data; | 76 | s16 *data; |
77 | size_t datasize = ring->access.get_bytes_per_datum(ring); | 77 | size_t datasize = ring->access->get_bytes_per_datum(ring); |
78 | 78 | ||
79 | data = kmalloc(datasize , GFP_KERNEL); | 79 | data = kmalloc(datasize , GFP_KERNEL); |
80 | if (data == NULL) { | 80 | if (data == NULL) { |
@@ -91,7 +91,7 @@ static irqreturn_t adis16260_trigger_handler(int irq, void *p) | |||
91 | if (ring->scan_timestamp) | 91 | if (ring->scan_timestamp) |
92 | *((s64 *)(data + ((i + 3)/4)*4)) = pf->timestamp; | 92 | *((s64 *)(data + ((i + 3)/4)*4)) = pf->timestamp; |
93 | 93 | ||
94 | ring->access.store_to(ring, (u8 *)data, pf->timestamp); | 94 | ring->access->store_to(ring, (u8 *)data, pf->timestamp); |
95 | 95 | ||
96 | iio_trigger_notify_done(st->indio_dev->trig); | 96 | iio_trigger_notify_done(st->indio_dev->trig); |
97 | kfree(data); | 97 | kfree(data); |
@@ -106,6 +106,12 @@ void adis16260_unconfigure_ring(struct iio_dev *indio_dev) | |||
106 | iio_sw_rb_free(indio_dev->ring); | 106 | iio_sw_rb_free(indio_dev->ring); |
107 | } | 107 | } |
108 | 108 | ||
109 | static const struct iio_ring_setup_ops adis16260_ring_setup_ops = { | ||
110 | .preenable = &iio_sw_ring_preenable, | ||
111 | .postenable = &iio_triggered_ring_postenable, | ||
112 | .predisable = &iio_triggered_ring_predisable, | ||
113 | }; | ||
114 | |||
109 | int adis16260_configure_ring(struct iio_dev *indio_dev) | 115 | int adis16260_configure_ring(struct iio_dev *indio_dev) |
110 | { | 116 | { |
111 | int ret = 0; | 117 | int ret = 0; |
@@ -118,12 +124,10 @@ int adis16260_configure_ring(struct iio_dev *indio_dev) | |||
118 | } | 124 | } |
119 | indio_dev->ring = ring; | 125 | indio_dev->ring = ring; |
120 | /* Effectively select the ring buffer implementation */ | 126 | /* Effectively select the ring buffer implementation */ |
121 | iio_ring_sw_register_funcs(&ring->access); | 127 | ring->access = &ring_sw_access_funcs; |
122 | ring->bpe = 2; | 128 | ring->bpe = 2; |
123 | ring->scan_timestamp = true; | 129 | ring->scan_timestamp = true; |
124 | ring->preenable = &iio_sw_ring_preenable; | 130 | ring->setup_ops = &adis16260_ring_setup_ops; |
125 | ring->postenable = &iio_triggered_ring_postenable; | ||
126 | ring->predisable = &iio_triggered_ring_predisable; | ||
127 | ring->owner = THIS_MODULE; | 131 | ring->owner = THIS_MODULE; |
128 | 132 | ||
129 | /* Set default scan mode */ | 133 | /* Set default scan mode */ |
diff --git a/drivers/staging/iio/imu/adis16400_ring.c b/drivers/staging/iio/imu/adis16400_ring.c index 271fe1d7c52d..5d99fba8cc29 100644 --- a/drivers/staging/iio/imu/adis16400_ring.c +++ b/drivers/staging/iio/imu/adis16400_ring.c | |||
@@ -124,7 +124,7 @@ static irqreturn_t adis16400_trigger_handler(int irq, void *p) | |||
124 | struct iio_ring_buffer *ring = indio_dev->ring; | 124 | struct iio_ring_buffer *ring = indio_dev->ring; |
125 | int i = 0, j, ret = 0; | 125 | int i = 0, j, ret = 0; |
126 | s16 *data; | 126 | s16 *data; |
127 | size_t datasize = ring->access.get_bytes_per_datum(ring); | 127 | size_t datasize = ring->access->get_bytes_per_datum(ring); |
128 | unsigned long mask = ring->scan_mask; | 128 | unsigned long mask = ring->scan_mask; |
129 | 129 | ||
130 | data = kmalloc(datasize , GFP_KERNEL); | 130 | data = kmalloc(datasize , GFP_KERNEL); |
@@ -155,7 +155,7 @@ static irqreturn_t adis16400_trigger_handler(int irq, void *p) | |||
155 | /* Guaranteed to be aligned with 8 byte boundary */ | 155 | /* Guaranteed to be aligned with 8 byte boundary */ |
156 | if (ring->scan_timestamp) | 156 | if (ring->scan_timestamp) |
157 | *((s64 *)(data + ((i + 3)/4)*4)) = pf->timestamp; | 157 | *((s64 *)(data + ((i + 3)/4)*4)) = pf->timestamp; |
158 | ring->access.store_to(indio_dev->ring, (u8 *) data, pf->timestamp); | 158 | ring->access->store_to(indio_dev->ring, (u8 *) data, pf->timestamp); |
159 | 159 | ||
160 | iio_trigger_notify_done(indio_dev->trig); | 160 | iio_trigger_notify_done(indio_dev->trig); |
161 | kfree(data); | 161 | kfree(data); |
@@ -170,6 +170,12 @@ void adis16400_unconfigure_ring(struct iio_dev *indio_dev) | |||
170 | iio_sw_rb_free(indio_dev->ring); | 170 | iio_sw_rb_free(indio_dev->ring); |
171 | } | 171 | } |
172 | 172 | ||
173 | static const struct iio_ring_setup_ops adis16400_ring_setup_ops = { | ||
174 | .preenable = &iio_sw_ring_preenable, | ||
175 | .postenable = &iio_triggered_ring_postenable, | ||
176 | .predisable = &iio_triggered_ring_predisable, | ||
177 | }; | ||
178 | |||
173 | int adis16400_configure_ring(struct iio_dev *indio_dev) | 179 | int adis16400_configure_ring(struct iio_dev *indio_dev) |
174 | { | 180 | { |
175 | int ret = 0; | 181 | int ret = 0; |
@@ -183,12 +189,10 @@ int adis16400_configure_ring(struct iio_dev *indio_dev) | |||
183 | } | 189 | } |
184 | indio_dev->ring = ring; | 190 | indio_dev->ring = ring; |
185 | /* Effectively select the ring buffer implementation */ | 191 | /* Effectively select the ring buffer implementation */ |
186 | iio_ring_sw_register_funcs(&ring->access); | 192 | ring->access = &ring_sw_access_funcs; |
187 | ring->bpe = 2; | 193 | ring->bpe = 2; |
188 | ring->scan_timestamp = true; | 194 | ring->scan_timestamp = true; |
189 | ring->preenable = &iio_sw_ring_preenable; | 195 | ring->setup_ops = &adis16400_ring_setup_ops; |
190 | ring->postenable = &iio_triggered_ring_postenable; | ||
191 | ring->predisable = &iio_triggered_ring_predisable; | ||
192 | ring->owner = THIS_MODULE; | 196 | ring->owner = THIS_MODULE; |
193 | ring->scan_mask = st->variant->default_scan_mask; | 197 | ring->scan_mask = st->variant->default_scan_mask; |
194 | ring->scan_count = hweight_long(st->variant->default_scan_mask); | 198 | ring->scan_count = hweight_long(st->variant->default_scan_mask); |
diff --git a/drivers/staging/iio/industrialio-ring.c b/drivers/staging/iio/industrialio-ring.c index 853ebe91a8b5..050f9f94058a 100644 --- a/drivers/staging/iio/industrialio-ring.c +++ b/drivers/staging/iio/industrialio-ring.c | |||
@@ -36,8 +36,8 @@ static int iio_ring_open(struct inode *inode, struct file *filp) | |||
36 | struct iio_ring_buffer *rb = hand->private; | 36 | struct iio_ring_buffer *rb = hand->private; |
37 | 37 | ||
38 | filp->private_data = hand->private; | 38 | filp->private_data = hand->private; |
39 | if (rb->access.mark_in_use) | 39 | if (rb->access->mark_in_use) |
40 | rb->access.mark_in_use(rb); | 40 | rb->access->mark_in_use(rb); |
41 | 41 | ||
42 | return 0; | 42 | return 0; |
43 | } | 43 | } |
@@ -55,8 +55,8 @@ static int iio_ring_release(struct inode *inode, struct file *filp) | |||
55 | struct iio_ring_buffer *rb = hand->private; | 55 | struct iio_ring_buffer *rb = hand->private; |
56 | 56 | ||
57 | clear_bit(IIO_BUSY_BIT_POS, &rb->access_handler.flags); | 57 | clear_bit(IIO_BUSY_BIT_POS, &rb->access_handler.flags); |
58 | if (rb->access.unmark_in_use) | 58 | if (rb->access->unmark_in_use) |
59 | rb->access.unmark_in_use(rb); | 59 | rb->access->unmark_in_use(rb); |
60 | 60 | ||
61 | return 0; | 61 | return 0; |
62 | } | 62 | } |
@@ -74,9 +74,9 @@ static ssize_t iio_ring_read_first_n_outer(struct file *filp, char __user *buf, | |||
74 | int ret; | 74 | int ret; |
75 | 75 | ||
76 | /* rip lots must exist. */ | 76 | /* rip lots must exist. */ |
77 | if (!rb->access.read_first_n) | 77 | if (!rb->access->read_first_n) |
78 | return -EINVAL; | 78 | return -EINVAL; |
79 | ret = rb->access.read_first_n(rb, n, buf); | 79 | ret = rb->access->read_first_n(rb, n, buf); |
80 | 80 | ||
81 | return ret; | 81 | return ret; |
82 | } | 82 | } |
@@ -165,8 +165,6 @@ static void __iio_free_ring_buffer_chrdev(struct iio_ring_buffer *buf) | |||
165 | void iio_ring_buffer_init(struct iio_ring_buffer *ring, | 165 | void iio_ring_buffer_init(struct iio_ring_buffer *ring, |
166 | struct iio_dev *dev_info) | 166 | struct iio_dev *dev_info) |
167 | { | 167 | { |
168 | if (ring->access.mark_param_change) | ||
169 | ring->access.mark_param_change(ring); | ||
170 | ring->indio_dev = dev_info; | 168 | ring->indio_dev = dev_info; |
171 | ring->access_handler.private = ring; | 169 | ring->access_handler.private = ring; |
172 | init_waitqueue_head(&ring->pollq); | 170 | init_waitqueue_head(&ring->pollq); |
@@ -344,9 +342,9 @@ ssize_t iio_read_ring_length(struct device *dev, | |||
344 | int len = 0; | 342 | int len = 0; |
345 | struct iio_ring_buffer *ring = dev_get_drvdata(dev); | 343 | struct iio_ring_buffer *ring = dev_get_drvdata(dev); |
346 | 344 | ||
347 | if (ring->access.get_length) | 345 | if (ring->access->get_length) |
348 | len = sprintf(buf, "%d\n", | 346 | len = sprintf(buf, "%d\n", |
349 | ring->access.get_length(ring)); | 347 | ring->access->get_length(ring)); |
350 | 348 | ||
351 | return len; | 349 | return len; |
352 | } | 350 | } |
@@ -364,14 +362,14 @@ ssize_t iio_write_ring_length(struct device *dev, | |||
364 | if (ret) | 362 | if (ret) |
365 | return ret; | 363 | return ret; |
366 | 364 | ||
367 | if (ring->access.get_length) | 365 | if (ring->access->get_length) |
368 | if (val == ring->access.get_length(ring)) | 366 | if (val == ring->access->get_length(ring)) |
369 | return len; | 367 | return len; |
370 | 368 | ||
371 | if (ring->access.set_length) { | 369 | if (ring->access->set_length) { |
372 | ring->access.set_length(ring, val); | 370 | ring->access->set_length(ring, val); |
373 | if (ring->access.mark_param_change) | 371 | if (ring->access->mark_param_change) |
374 | ring->access.mark_param_change(ring); | 372 | ring->access->mark_param_change(ring); |
375 | } | 373 | } |
376 | 374 | ||
377 | return len; | 375 | return len; |
@@ -385,9 +383,9 @@ ssize_t iio_read_ring_bytes_per_datum(struct device *dev, | |||
385 | int len = 0; | 383 | int len = 0; |
386 | struct iio_ring_buffer *ring = dev_get_drvdata(dev); | 384 | struct iio_ring_buffer *ring = dev_get_drvdata(dev); |
387 | 385 | ||
388 | if (ring->access.get_bytes_per_datum) | 386 | if (ring->access->get_bytes_per_datum) |
389 | len = sprintf(buf, "%d\n", | 387 | len = sprintf(buf, "%d\n", |
390 | ring->access.get_bytes_per_datum(ring)); | 388 | ring->access->get_bytes_per_datum(ring)); |
391 | 389 | ||
392 | return len; | 390 | return len; |
393 | } | 391 | } |
@@ -413,8 +411,8 @@ ssize_t iio_store_ring_enable(struct device *dev, | |||
413 | goto done; | 411 | goto done; |
414 | } | 412 | } |
415 | if (requested_state) { | 413 | if (requested_state) { |
416 | if (ring->preenable) { | 414 | if (ring->setup_ops->preenable) { |
417 | ret = ring->preenable(dev_info); | 415 | ret = ring->setup_ops->preenable(dev_info); |
418 | if (ret) { | 416 | if (ret) { |
419 | printk(KERN_ERR | 417 | printk(KERN_ERR |
420 | "Buffer not started:" | 418 | "Buffer not started:" |
@@ -422,8 +420,8 @@ ssize_t iio_store_ring_enable(struct device *dev, | |||
422 | goto error_ret; | 420 | goto error_ret; |
423 | } | 421 | } |
424 | } | 422 | } |
425 | if (ring->access.request_update) { | 423 | if (ring->access->request_update) { |
426 | ret = ring->access.request_update(ring); | 424 | ret = ring->access->request_update(ring); |
427 | if (ret) { | 425 | if (ret) { |
428 | printk(KERN_INFO | 426 | printk(KERN_INFO |
429 | "Buffer not started:" | 427 | "Buffer not started:" |
@@ -431,16 +429,16 @@ ssize_t iio_store_ring_enable(struct device *dev, | |||
431 | goto error_ret; | 429 | goto error_ret; |
432 | } | 430 | } |
433 | } | 431 | } |
434 | if (ring->access.mark_in_use) | 432 | if (ring->access->mark_in_use) |
435 | ring->access.mark_in_use(ring); | 433 | ring->access->mark_in_use(ring); |
436 | /* Definitely possible for devices to support both of these.*/ | 434 | /* Definitely possible for devices to support both of these.*/ |
437 | if (dev_info->modes & INDIO_RING_TRIGGERED) { | 435 | if (dev_info->modes & INDIO_RING_TRIGGERED) { |
438 | if (!dev_info->trig) { | 436 | if (!dev_info->trig) { |
439 | printk(KERN_INFO | 437 | printk(KERN_INFO |
440 | "Buffer not started: no trigger\n"); | 438 | "Buffer not started: no trigger\n"); |
441 | ret = -EINVAL; | 439 | ret = -EINVAL; |
442 | if (ring->access.unmark_in_use) | 440 | if (ring->access->unmark_in_use) |
443 | ring->access.unmark_in_use(ring); | 441 | ring->access->unmark_in_use(ring); |
444 | goto error_ret; | 442 | goto error_ret; |
445 | } | 443 | } |
446 | dev_info->currentmode = INDIO_RING_TRIGGERED; | 444 | dev_info->currentmode = INDIO_RING_TRIGGERED; |
@@ -451,32 +449,32 @@ ssize_t iio_store_ring_enable(struct device *dev, | |||
451 | goto error_ret; | 449 | goto error_ret; |
452 | } | 450 | } |
453 | 451 | ||
454 | if (ring->postenable) { | 452 | if (ring->setup_ops->postenable) { |
455 | 453 | ||
456 | ret = ring->postenable(dev_info); | 454 | ret = ring->setup_ops->postenable(dev_info); |
457 | if (ret) { | 455 | if (ret) { |
458 | printk(KERN_INFO | 456 | printk(KERN_INFO |
459 | "Buffer not started:" | 457 | "Buffer not started:" |
460 | "postenable failed\n"); | 458 | "postenable failed\n"); |
461 | if (ring->access.unmark_in_use) | 459 | if (ring->access->unmark_in_use) |
462 | ring->access.unmark_in_use(ring); | 460 | ring->access->unmark_in_use(ring); |
463 | dev_info->currentmode = previous_mode; | 461 | dev_info->currentmode = previous_mode; |
464 | if (ring->postdisable) | 462 | if (ring->setup_ops->postdisable) |
465 | ring->postdisable(dev_info); | 463 | ring->setup_ops->postdisable(dev_info); |
466 | goto error_ret; | 464 | goto error_ret; |
467 | } | 465 | } |
468 | } | 466 | } |
469 | } else { | 467 | } else { |
470 | if (ring->predisable) { | 468 | if (ring->setup_ops->predisable) { |
471 | ret = ring->predisable(dev_info); | 469 | ret = ring->setup_ops->predisable(dev_info); |
472 | if (ret) | 470 | if (ret) |
473 | goto error_ret; | 471 | goto error_ret; |
474 | } | 472 | } |
475 | if (ring->access.unmark_in_use) | 473 | if (ring->access->unmark_in_use) |
476 | ring->access.unmark_in_use(ring); | 474 | ring->access->unmark_in_use(ring); |
477 | dev_info->currentmode = INDIO_DIRECT_MODE; | 475 | dev_info->currentmode = INDIO_DIRECT_MODE; |
478 | if (ring->postdisable) { | 476 | if (ring->setup_ops->postdisable) { |
479 | ret = ring->postdisable(dev_info); | 477 | ret = ring->setup_ops->postdisable(dev_info); |
480 | if (ret) | 478 | if (ret) |
481 | goto error_ret; | 479 | goto error_ret; |
482 | } | 480 | } |
@@ -584,3 +582,28 @@ error_ret: | |||
584 | return ret ? ret : len; | 582 | return ret ? ret : len; |
585 | } | 583 | } |
586 | EXPORT_SYMBOL(iio_scan_el_ts_store); | 584 | EXPORT_SYMBOL(iio_scan_el_ts_store); |
585 | |||
586 | int iio_sw_ring_preenable(struct iio_dev *indio_dev) | ||
587 | { | ||
588 | struct iio_ring_buffer *ring = indio_dev->ring; | ||
589 | size_t size; | ||
590 | dev_dbg(&indio_dev->dev, "%s\n", __func__); | ||
591 | /* Check if there are any scan elements enabled, if not fail*/ | ||
592 | if (!(ring->scan_count || ring->scan_timestamp)) | ||
593 | return -EINVAL; | ||
594 | if (ring->scan_timestamp) | ||
595 | if (ring->scan_count) | ||
596 | /* Timestamp (aligned to s64) and data */ | ||
597 | size = (((ring->scan_count * ring->bpe) | ||
598 | + sizeof(s64) - 1) | ||
599 | & ~(sizeof(s64) - 1)) | ||
600 | + sizeof(s64); | ||
601 | else /* Timestamp only */ | ||
602 | size = sizeof(s64); | ||
603 | else /* Data only */ | ||
604 | size = ring->scan_count * ring->bpe; | ||
605 | ring->access->set_bytes_per_datum(ring, size); | ||
606 | |||
607 | return 0; | ||
608 | } | ||
609 | EXPORT_SYMBOL(iio_sw_ring_preenable); | ||
diff --git a/drivers/staging/iio/kfifo_buf.c b/drivers/staging/iio/kfifo_buf.c index fdd5d9e77a97..cc14b96d814c 100644 --- a/drivers/staging/iio/kfifo_buf.c +++ b/drivers/staging/iio/kfifo_buf.c | |||
@@ -8,6 +8,8 @@ | |||
8 | 8 | ||
9 | #include "kfifo_buf.h" | 9 | #include "kfifo_buf.h" |
10 | 10 | ||
11 | #define iio_to_kfifo(r) container_of(r, struct iio_kfifo, ring) | ||
12 | |||
11 | static inline int __iio_allocate_kfifo(struct iio_kfifo *buf, | 13 | static inline int __iio_allocate_kfifo(struct iio_kfifo *buf, |
12 | int bytes_per_datum, int length) | 14 | int bytes_per_datum, int length) |
13 | { | 15 | { |
@@ -18,7 +20,7 @@ static inline int __iio_allocate_kfifo(struct iio_kfifo *buf, | |||
18 | return kfifo_alloc(&buf->kf, bytes_per_datum*length, GFP_KERNEL); | 20 | return kfifo_alloc(&buf->kf, bytes_per_datum*length, GFP_KERNEL); |
19 | } | 21 | } |
20 | 22 | ||
21 | int iio_request_update_kfifo(struct iio_ring_buffer *r) | 23 | static int iio_request_update_kfifo(struct iio_ring_buffer *r) |
22 | { | 24 | { |
23 | int ret = 0; | 25 | int ret = 0; |
24 | struct iio_kfifo *buf = iio_to_kfifo(r); | 26 | struct iio_kfifo *buf = iio_to_kfifo(r); |
@@ -37,31 +39,27 @@ error_ret: | |||
37 | mutex_unlock(&buf->use_lock); | 39 | mutex_unlock(&buf->use_lock); |
38 | return ret; | 40 | return ret; |
39 | } | 41 | } |
40 | EXPORT_SYMBOL(iio_request_update_kfifo); | ||
41 | 42 | ||
42 | void iio_mark_kfifo_in_use(struct iio_ring_buffer *r) | 43 | static void iio_mark_kfifo_in_use(struct iio_ring_buffer *r) |
43 | { | 44 | { |
44 | struct iio_kfifo *buf = iio_to_kfifo(r); | 45 | struct iio_kfifo *buf = iio_to_kfifo(r); |
45 | mutex_lock(&buf->use_lock); | 46 | mutex_lock(&buf->use_lock); |
46 | buf->use_count++; | 47 | buf->use_count++; |
47 | mutex_unlock(&buf->use_lock); | 48 | mutex_unlock(&buf->use_lock); |
48 | } | 49 | } |
49 | EXPORT_SYMBOL(iio_mark_kfifo_in_use); | ||
50 | 50 | ||
51 | void iio_unmark_kfifo_in_use(struct iio_ring_buffer *r) | 51 | static void iio_unmark_kfifo_in_use(struct iio_ring_buffer *r) |
52 | { | 52 | { |
53 | struct iio_kfifo *buf = iio_to_kfifo(r); | 53 | struct iio_kfifo *buf = iio_to_kfifo(r); |
54 | mutex_lock(&buf->use_lock); | 54 | mutex_lock(&buf->use_lock); |
55 | buf->use_count--; | 55 | buf->use_count--; |
56 | mutex_unlock(&buf->use_lock); | 56 | mutex_unlock(&buf->use_lock); |
57 | } | 57 | } |
58 | EXPORT_SYMBOL(iio_unmark_kfifo_in_use); | ||
59 | 58 | ||
60 | int iio_get_length_kfifo(struct iio_ring_buffer *r) | 59 | static int iio_get_length_kfifo(struct iio_ring_buffer *r) |
61 | { | 60 | { |
62 | return r->length; | 61 | return r->length; |
63 | } | 62 | } |
64 | EXPORT_SYMBOL(iio_get_length_kfifo); | ||
65 | 63 | ||
66 | static inline void __iio_init_kfifo(struct iio_kfifo *kf) | 64 | static inline void __iio_init_kfifo(struct iio_kfifo *kf) |
67 | { | 65 | { |
@@ -108,6 +106,7 @@ struct iio_ring_buffer *iio_kfifo_allocate(struct iio_dev *indio_dev) | |||
108 | kf = kzalloc(sizeof *kf, GFP_KERNEL); | 106 | kf = kzalloc(sizeof *kf, GFP_KERNEL); |
109 | if (!kf) | 107 | if (!kf) |
110 | return NULL; | 108 | return NULL; |
109 | kf->update_needed = true; | ||
111 | iio_ring_buffer_init(&kf->ring, indio_dev); | 110 | iio_ring_buffer_init(&kf->ring, indio_dev); |
112 | __iio_init_kfifo(kf); | 111 | __iio_init_kfifo(kf); |
113 | kf->ring.dev.type = &iio_kfifo_type; | 112 | kf->ring.dev.type = &iio_kfifo_type; |
@@ -120,41 +119,37 @@ struct iio_ring_buffer *iio_kfifo_allocate(struct iio_dev *indio_dev) | |||
120 | } | 119 | } |
121 | EXPORT_SYMBOL(iio_kfifo_allocate); | 120 | EXPORT_SYMBOL(iio_kfifo_allocate); |
122 | 121 | ||
123 | int iio_get_bytes_per_datum_kfifo(struct iio_ring_buffer *r) | 122 | static int iio_get_bytes_per_datum_kfifo(struct iio_ring_buffer *r) |
124 | { | 123 | { |
125 | return r->bytes_per_datum; | 124 | return r->bytes_per_datum; |
126 | } | 125 | } |
127 | EXPORT_SYMBOL(iio_get_bytes_per_datum_kfifo); | ||
128 | 126 | ||
129 | int iio_set_bytes_per_datum_kfifo(struct iio_ring_buffer *r, size_t bpd) | 127 | static int iio_set_bytes_per_datum_kfifo(struct iio_ring_buffer *r, size_t bpd) |
130 | { | 128 | { |
131 | if (r->bytes_per_datum != bpd) { | 129 | if (r->bytes_per_datum != bpd) { |
132 | r->bytes_per_datum = bpd; | 130 | r->bytes_per_datum = bpd; |
133 | if (r->access.mark_param_change) | 131 | if (r->access->mark_param_change) |
134 | r->access.mark_param_change(r); | 132 | r->access->mark_param_change(r); |
135 | } | 133 | } |
136 | return 0; | 134 | return 0; |
137 | } | 135 | } |
138 | EXPORT_SYMBOL(iio_set_bytes_per_datum_kfifo); | ||
139 | 136 | ||
140 | int iio_mark_update_needed_kfifo(struct iio_ring_buffer *r) | 137 | static int iio_mark_update_needed_kfifo(struct iio_ring_buffer *r) |
141 | { | 138 | { |
142 | struct iio_kfifo *kf = iio_to_kfifo(r); | 139 | struct iio_kfifo *kf = iio_to_kfifo(r); |
143 | kf->update_needed = true; | 140 | kf->update_needed = true; |
144 | return 0; | 141 | return 0; |
145 | } | 142 | } |
146 | EXPORT_SYMBOL(iio_mark_update_needed_kfifo); | ||
147 | 143 | ||
148 | int iio_set_length_kfifo(struct iio_ring_buffer *r, int length) | 144 | static int iio_set_length_kfifo(struct iio_ring_buffer *r, int length) |
149 | { | 145 | { |
150 | if (r->length != length) { | 146 | if (r->length != length) { |
151 | r->length = length; | 147 | r->length = length; |
152 | if (r->access.mark_param_change) | 148 | if (r->access->mark_param_change) |
153 | r->access.mark_param_change(r); | 149 | r->access->mark_param_change(r); |
154 | } | 150 | } |
155 | return 0; | 151 | return 0; |
156 | } | 152 | } |
157 | EXPORT_SYMBOL(iio_set_length_kfifo); | ||
158 | 153 | ||
159 | void iio_kfifo_free(struct iio_ring_buffer *r) | 154 | void iio_kfifo_free(struct iio_ring_buffer *r) |
160 | { | 155 | { |
@@ -163,7 +158,9 @@ void iio_kfifo_free(struct iio_ring_buffer *r) | |||
163 | } | 158 | } |
164 | EXPORT_SYMBOL(iio_kfifo_free); | 159 | EXPORT_SYMBOL(iio_kfifo_free); |
165 | 160 | ||
166 | int iio_store_to_kfifo(struct iio_ring_buffer *r, u8 *data, s64 timestamp) | 161 | static int iio_store_to_kfifo(struct iio_ring_buffer *r, |
162 | u8 *data, | ||
163 | s64 timestamp) | ||
167 | { | 164 | { |
168 | int ret; | 165 | int ret; |
169 | struct iio_kfifo *kf = iio_to_kfifo(r); | 166 | struct iio_kfifo *kf = iio_to_kfifo(r); |
@@ -179,9 +176,8 @@ int iio_store_to_kfifo(struct iio_ring_buffer *r, u8 *data, s64 timestamp) | |||
179 | kfree(datal); | 176 | kfree(datal); |
180 | return 0; | 177 | return 0; |
181 | } | 178 | } |
182 | EXPORT_SYMBOL(iio_store_to_kfifo); | ||
183 | 179 | ||
184 | int iio_read_first_n_kfifo(struct iio_ring_buffer *r, | 180 | static int iio_read_first_n_kfifo(struct iio_ring_buffer *r, |
185 | size_t n, char __user *buf) | 181 | size_t n, char __user *buf) |
186 | { | 182 | { |
187 | int ret, copied; | 183 | int ret, copied; |
@@ -191,5 +187,19 @@ int iio_read_first_n_kfifo(struct iio_ring_buffer *r, | |||
191 | 187 | ||
192 | return copied; | 188 | return copied; |
193 | } | 189 | } |
194 | EXPORT_SYMBOL(iio_read_first_n_kfifo); | 190 | |
191 | const struct iio_ring_access_funcs kfifo_access_funcs = { | ||
192 | .mark_in_use = &iio_mark_kfifo_in_use, | ||
193 | .unmark_in_use = &iio_unmark_kfifo_in_use, | ||
194 | .store_to = &iio_store_to_kfifo, | ||
195 | .read_first_n = &iio_read_first_n_kfifo, | ||
196 | .mark_param_change = &iio_mark_update_needed_kfifo, | ||
197 | .request_update = &iio_request_update_kfifo, | ||
198 | .get_bytes_per_datum = &iio_get_bytes_per_datum_kfifo, | ||
199 | .set_bytes_per_datum = &iio_set_bytes_per_datum_kfifo, | ||
200 | .get_length = &iio_get_length_kfifo, | ||
201 | .set_length = &iio_set_length_kfifo, | ||
202 | }; | ||
203 | EXPORT_SYMBOL(kfifo_access_funcs); | ||
204 | |||
195 | MODULE_LICENSE("GPL"); | 205 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/staging/iio/kfifo_buf.h b/drivers/staging/iio/kfifo_buf.h index eb337a47dd64..aac30539b2c6 100644 --- a/drivers/staging/iio/kfifo_buf.h +++ b/drivers/staging/iio/kfifo_buf.h | |||
@@ -11,44 +11,7 @@ struct iio_kfifo { | |||
11 | struct mutex use_lock; | 11 | struct mutex use_lock; |
12 | }; | 12 | }; |
13 | 13 | ||
14 | #define iio_to_kfifo(r) container_of(r, struct iio_kfifo, ring) | 14 | extern const struct iio_ring_access_funcs kfifo_access_funcs; |
15 | |||
16 | int iio_create_kfifo(struct iio_ring_buffer **r); | ||
17 | int iio_init_kfifo(struct iio_ring_buffer *r, struct iio_dev *indio_dev); | ||
18 | void iio_exit_kfifo(struct iio_ring_buffer *r); | ||
19 | void iio_free_kfifo(struct iio_ring_buffer *r); | ||
20 | void iio_mark_kfifo_in_use(struct iio_ring_buffer *r); | ||
21 | void iio_unmark_kfifo_in_use(struct iio_ring_buffer *r); | ||
22 | |||
23 | int iio_store_to_kfifo(struct iio_ring_buffer *r, u8 *data, s64 timestamp); | ||
24 | int iio_read_first_n_kfifo(struct iio_ring_buffer *r, | ||
25 | size_t n, | ||
26 | char __user *buf); | ||
27 | |||
28 | int iio_request_update_kfifo(struct iio_ring_buffer *r); | ||
29 | int iio_mark_update_needed_kfifo(struct iio_ring_buffer *r); | ||
30 | |||
31 | int iio_get_bytes_per_datum_kfifo(struct iio_ring_buffer *r); | ||
32 | int iio_set_bytes_per_datum_kfifo(struct iio_ring_buffer *r, size_t bpd); | ||
33 | int iio_get_length_kfifo(struct iio_ring_buffer *r); | ||
34 | int iio_set_length_kfifo(struct iio_ring_buffer *r, int length); | ||
35 | |||
36 | static inline void iio_kfifo_register_funcs(struct iio_ring_access_funcs *ra) | ||
37 | { | ||
38 | ra->mark_in_use = &iio_mark_kfifo_in_use; | ||
39 | ra->unmark_in_use = &iio_unmark_kfifo_in_use; | ||
40 | |||
41 | ra->store_to = &iio_store_to_kfifo; | ||
42 | ra->read_first_n = &iio_read_first_n_kfifo; | ||
43 | |||
44 | ra->mark_param_change = &iio_mark_update_needed_kfifo; | ||
45 | ra->request_update = &iio_request_update_kfifo; | ||
46 | |||
47 | ra->get_bytes_per_datum = &iio_get_bytes_per_datum_kfifo; | ||
48 | ra->set_bytes_per_datum = &iio_set_bytes_per_datum_kfifo; | ||
49 | ra->get_length = &iio_get_length_kfifo; | ||
50 | ra->set_length = &iio_set_length_kfifo; | ||
51 | }; | ||
52 | 15 | ||
53 | struct iio_ring_buffer *iio_kfifo_allocate(struct iio_dev *indio_dev); | 16 | struct iio_ring_buffer *iio_kfifo_allocate(struct iio_dev *indio_dev); |
54 | void iio_kfifo_free(struct iio_ring_buffer *r); | 17 | void iio_kfifo_free(struct iio_ring_buffer *r); |
diff --git a/drivers/staging/iio/meter/ade7758_ring.c b/drivers/staging/iio/meter/ade7758_ring.c index c8ebfd2fd9e3..564555a81191 100644 --- a/drivers/staging/iio/meter/ade7758_ring.c +++ b/drivers/staging/iio/meter/ade7758_ring.c | |||
@@ -83,7 +83,7 @@ static irqreturn_t ade7758_trigger_handler(int irq, void *p) | |||
83 | if (ring->scan_timestamp) | 83 | if (ring->scan_timestamp) |
84 | dat64[1] = pf->timestamp; | 84 | dat64[1] = pf->timestamp; |
85 | 85 | ||
86 | ring->access.store_to(ring, (u8 *)dat64, pf->timestamp); | 86 | ring->access->store_to(ring, (u8 *)dat64, pf->timestamp); |
87 | 87 | ||
88 | iio_trigger_notify_done(st->indio_dev->trig); | 88 | iio_trigger_notify_done(st->indio_dev->trig); |
89 | 89 | ||
@@ -118,8 +118,8 @@ static int ade7758_ring_preenable(struct iio_dev *indio_dev) | |||
118 | d_size += sizeof(s64) - (d_size % sizeof(s64)); | 118 | d_size += sizeof(s64) - (d_size % sizeof(s64)); |
119 | } | 119 | } |
120 | 120 | ||
121 | if (indio_dev->ring->access.set_bytes_per_datum) | 121 | if (indio_dev->ring->access->set_bytes_per_datum) |
122 | indio_dev->ring->access.set_bytes_per_datum(indio_dev->ring, | 122 | indio_dev->ring->access->set_bytes_per_datum(indio_dev->ring, |
123 | d_size); | 123 | d_size); |
124 | 124 | ||
125 | ade7758_write_waveform_type(&indio_dev->dev, | 125 | ade7758_write_waveform_type(&indio_dev->dev, |
@@ -128,6 +128,12 @@ static int ade7758_ring_preenable(struct iio_dev *indio_dev) | |||
128 | return 0; | 128 | return 0; |
129 | } | 129 | } |
130 | 130 | ||
131 | static const struct iio_ring_setup_ops ade7758_ring_setup_ops = { | ||
132 | .preenable = &ade7758_ring_preenable, | ||
133 | .postenable = &iio_triggered_ring_postenable, | ||
134 | .predisable = &iio_triggered_ring_predisable, | ||
135 | }; | ||
136 | |||
131 | void ade7758_unconfigure_ring(struct iio_dev *indio_dev) | 137 | void ade7758_unconfigure_ring(struct iio_dev *indio_dev) |
132 | { | 138 | { |
133 | /* ensure that the trigger has been detached */ | 139 | /* ensure that the trigger has been detached */ |
@@ -153,10 +159,8 @@ int ade7758_configure_ring(struct iio_dev *indio_dev) | |||
153 | } | 159 | } |
154 | 160 | ||
155 | /* Effectively select the ring buffer implementation */ | 161 | /* Effectively select the ring buffer implementation */ |
156 | iio_ring_sw_register_funcs(&indio_dev->ring->access); | 162 | indio_dev->ring->access = &ring_sw_access_funcs; |
157 | indio_dev->ring->preenable = &ade7758_ring_preenable; | 163 | indio_dev->ring->setup_ops = &ade7758_ring_setup_ops; |
158 | indio_dev->ring->postenable = &iio_triggered_ring_postenable; | ||
159 | indio_dev->ring->predisable = &iio_triggered_ring_predisable; | ||
160 | indio_dev->ring->owner = THIS_MODULE; | 164 | indio_dev->ring->owner = THIS_MODULE; |
161 | 165 | ||
162 | indio_dev->pollfunc = kzalloc(sizeof(*indio_dev->pollfunc), GFP_KERNEL); | 166 | indio_dev->pollfunc = kzalloc(sizeof(*indio_dev->pollfunc), GFP_KERNEL); |
diff --git a/drivers/staging/iio/ring_generic.h b/drivers/staging/iio/ring_generic.h index 15d15a46ee22..33496766863a 100644 --- a/drivers/staging/iio/ring_generic.h +++ b/drivers/staging/iio/ring_generic.h | |||
@@ -64,6 +64,13 @@ struct iio_ring_access_funcs { | |||
64 | int (*enable)(struct iio_ring_buffer *ring); | 64 | int (*enable)(struct iio_ring_buffer *ring); |
65 | }; | 65 | }; |
66 | 66 | ||
67 | struct iio_ring_setup_ops { | ||
68 | int (*preenable)(struct iio_dev *); | ||
69 | int (*postenable)(struct iio_dev *); | ||
70 | int (*predisable)(struct iio_dev *); | ||
71 | int (*postdisable)(struct iio_dev *); | ||
72 | }; | ||
73 | |||
67 | /** | 74 | /** |
68 | * struct iio_ring_buffer - general ring buffer structure | 75 | * struct iio_ring_buffer - general ring buffer structure |
69 | * @dev: ring buffer device struct | 76 | * @dev: ring buffer device struct |
@@ -101,12 +108,8 @@ struct iio_ring_buffer { | |||
101 | u32 scan_mask; | 108 | u32 scan_mask; |
102 | bool scan_timestamp; | 109 | bool scan_timestamp; |
103 | struct iio_handler access_handler; | 110 | struct iio_handler access_handler; |
104 | struct iio_ring_access_funcs access; | 111 | const struct iio_ring_access_funcs *access; |
105 | int (*preenable)(struct iio_dev *); | 112 | const struct iio_ring_setup_ops *setup_ops; |
106 | int (*postenable)(struct iio_dev *); | ||
107 | int (*predisable)(struct iio_dev *); | ||
108 | int (*postdisable)(struct iio_dev *); | ||
109 | |||
110 | struct list_head scan_el_dev_attr_list; | 113 | struct list_head scan_el_dev_attr_list; |
111 | 114 | ||
112 | wait_queue_head_t pollq; | 115 | wait_queue_head_t pollq; |
@@ -349,6 +352,9 @@ ssize_t iio_show_ring_enable(struct device *dev, | |||
349 | #define IIO_RING_ENABLE_ATTR DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, \ | 352 | #define IIO_RING_ENABLE_ATTR DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, \ |
350 | iio_show_ring_enable, \ | 353 | iio_show_ring_enable, \ |
351 | iio_store_ring_enable) | 354 | iio_store_ring_enable) |
355 | |||
356 | int iio_sw_ring_preenable(struct iio_dev *indio_dev); | ||
357 | |||
352 | #else /* CONFIG_IIO_RING_BUFFER */ | 358 | #else /* CONFIG_IIO_RING_BUFFER */ |
353 | static inline int iio_ring_buffer_register(struct iio_ring_buffer *ring, int id) | 359 | static inline int iio_ring_buffer_register(struct iio_ring_buffer *ring, int id) |
354 | { | 360 | { |
diff --git a/drivers/staging/iio/ring_sw.c b/drivers/staging/iio/ring_sw.c index d55757bb5fd9..35896cbb07d9 100644 --- a/drivers/staging/iio/ring_sw.c +++ b/drivers/staging/iio/ring_sw.c | |||
@@ -17,6 +17,36 @@ | |||
17 | #include "ring_sw.h" | 17 | #include "ring_sw.h" |
18 | #include "trigger.h" | 18 | #include "trigger.h" |
19 | 19 | ||
20 | /** | ||
21 | * struct iio_sw_ring_buffer - software ring buffer | ||
22 | * @buf: generic ring buffer elements | ||
23 | * @data: the ring buffer memory | ||
24 | * @read_p: read pointer (oldest available) | ||
25 | * @write_p: write pointer | ||
26 | * @last_written_p: read pointer (newest available) | ||
27 | * @half_p: half buffer length behind write_p (event generation) | ||
28 | * @use_count: reference count to prevent resizing when in use | ||
29 | * @update_needed: flag to indicated change in size requested | ||
30 | * @use_lock: lock to prevent change in size when in use | ||
31 | * | ||
32 | * Note that the first element of all ring buffers must be a | ||
33 | * struct iio_ring_buffer. | ||
34 | **/ | ||
35 | struct iio_sw_ring_buffer { | ||
36 | struct iio_ring_buffer buf; | ||
37 | unsigned char *data; | ||
38 | unsigned char *read_p; | ||
39 | unsigned char *write_p; | ||
40 | unsigned char *last_written_p; | ||
41 | /* used to act as a point at which to signal an event */ | ||
42 | unsigned char *half_p; | ||
43 | int use_count; | ||
44 | int update_needed; | ||
45 | spinlock_t use_lock; | ||
46 | }; | ||
47 | |||
48 | #define iio_to_sw_ring(r) container_of(r, struct iio_sw_ring_buffer, buf) | ||
49 | |||
20 | static inline int __iio_allocate_sw_ring_buffer(struct iio_sw_ring_buffer *ring, | 50 | static inline int __iio_allocate_sw_ring_buffer(struct iio_sw_ring_buffer *ring, |
21 | int bytes_per_datum, int length) | 51 | int bytes_per_datum, int length) |
22 | { | 52 | { |
@@ -41,23 +71,21 @@ static inline void __iio_free_sw_ring_buffer(struct iio_sw_ring_buffer *ring) | |||
41 | kfree(ring->data); | 71 | kfree(ring->data); |
42 | } | 72 | } |
43 | 73 | ||
44 | void iio_mark_sw_rb_in_use(struct iio_ring_buffer *r) | 74 | static void iio_mark_sw_rb_in_use(struct iio_ring_buffer *r) |
45 | { | 75 | { |
46 | struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r); | 76 | struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r); |
47 | spin_lock(&ring->use_lock); | 77 | spin_lock(&ring->use_lock); |
48 | ring->use_count++; | 78 | ring->use_count++; |
49 | spin_unlock(&ring->use_lock); | 79 | spin_unlock(&ring->use_lock); |
50 | } | 80 | } |
51 | EXPORT_SYMBOL(iio_mark_sw_rb_in_use); | ||
52 | 81 | ||
53 | void iio_unmark_sw_rb_in_use(struct iio_ring_buffer *r) | 82 | static void iio_unmark_sw_rb_in_use(struct iio_ring_buffer *r) |
54 | { | 83 | { |
55 | struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r); | 84 | struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r); |
56 | spin_lock(&ring->use_lock); | 85 | spin_lock(&ring->use_lock); |
57 | ring->use_count--; | 86 | ring->use_count--; |
58 | spin_unlock(&ring->use_lock); | 87 | spin_unlock(&ring->use_lock); |
59 | } | 88 | } |
60 | EXPORT_SYMBOL(iio_unmark_sw_rb_in_use); | ||
61 | 89 | ||
62 | 90 | ||
63 | /* Ring buffer related functionality */ | 91 | /* Ring buffer related functionality */ |
@@ -138,8 +166,8 @@ static int iio_store_to_sw_ring(struct iio_sw_ring_buffer *ring, | |||
138 | return ret; | 166 | return ret; |
139 | } | 167 | } |
140 | 168 | ||
141 | int iio_read_first_n_sw_rb(struct iio_ring_buffer *r, | 169 | static int iio_read_first_n_sw_rb(struct iio_ring_buffer *r, |
142 | size_t n, char __user *buf) | 170 | size_t n, char __user *buf) |
143 | { | 171 | { |
144 | struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r); | 172 | struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r); |
145 | 173 | ||
@@ -268,14 +296,14 @@ error_ret: | |||
268 | 296 | ||
269 | return ret; | 297 | return ret; |
270 | } | 298 | } |
271 | EXPORT_SYMBOL(iio_read_first_n_sw_rb); | ||
272 | 299 | ||
273 | int iio_store_to_sw_rb(struct iio_ring_buffer *r, u8 *data, s64 timestamp) | 300 | static int iio_store_to_sw_rb(struct iio_ring_buffer *r, |
301 | u8 *data, | ||
302 | s64 timestamp) | ||
274 | { | 303 | { |
275 | struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r); | 304 | struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r); |
276 | return iio_store_to_sw_ring(ring, data, timestamp); | 305 | return iio_store_to_sw_ring(ring, data, timestamp); |
277 | } | 306 | } |
278 | EXPORT_SYMBOL(iio_store_to_sw_rb); | ||
279 | 307 | ||
280 | static int iio_read_last_from_sw_ring(struct iio_sw_ring_buffer *ring, | 308 | static int iio_read_last_from_sw_ring(struct iio_sw_ring_buffer *ring, |
281 | unsigned char *data) | 309 | unsigned char *data) |
@@ -299,14 +327,13 @@ again: | |||
299 | return 0; | 327 | return 0; |
300 | } | 328 | } |
301 | 329 | ||
302 | int iio_read_last_from_sw_rb(struct iio_ring_buffer *r, | 330 | static int iio_read_last_from_sw_rb(struct iio_ring_buffer *r, |
303 | unsigned char *data) | 331 | unsigned char *data) |
304 | { | 332 | { |
305 | return iio_read_last_from_sw_ring(iio_to_sw_ring(r), data); | 333 | return iio_read_last_from_sw_ring(iio_to_sw_ring(r), data); |
306 | } | 334 | } |
307 | EXPORT_SYMBOL(iio_read_last_from_sw_rb); | ||
308 | 335 | ||
309 | int iio_request_update_sw_rb(struct iio_ring_buffer *r) | 336 | static int iio_request_update_sw_rb(struct iio_ring_buffer *r) |
310 | { | 337 | { |
311 | int ret = 0; | 338 | int ret = 0; |
312 | struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r); | 339 | struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r); |
@@ -326,50 +353,44 @@ error_ret: | |||
326 | spin_unlock(&ring->use_lock); | 353 | spin_unlock(&ring->use_lock); |
327 | return ret; | 354 | return ret; |
328 | } | 355 | } |
329 | EXPORT_SYMBOL(iio_request_update_sw_rb); | ||
330 | 356 | ||
331 | int iio_get_bytes_per_datum_sw_rb(struct iio_ring_buffer *r) | 357 | static int iio_get_bytes_per_datum_sw_rb(struct iio_ring_buffer *r) |
332 | { | 358 | { |
333 | struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r); | 359 | struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r); |
334 | return ring->buf.bytes_per_datum; | 360 | return ring->buf.bytes_per_datum; |
335 | } | 361 | } |
336 | EXPORT_SYMBOL(iio_get_bytes_per_datum_sw_rb); | ||
337 | 362 | ||
338 | int iio_set_bytes_per_datum_sw_rb(struct iio_ring_buffer *r, size_t bpd) | 363 | static int iio_set_bytes_per_datum_sw_rb(struct iio_ring_buffer *r, size_t bpd) |
339 | { | 364 | { |
340 | if (r->bytes_per_datum != bpd) { | 365 | if (r->bytes_per_datum != bpd) { |
341 | r->bytes_per_datum = bpd; | 366 | r->bytes_per_datum = bpd; |
342 | if (r->access.mark_param_change) | 367 | if (r->access->mark_param_change) |
343 | r->access.mark_param_change(r); | 368 | r->access->mark_param_change(r); |
344 | } | 369 | } |
345 | return 0; | 370 | return 0; |
346 | } | 371 | } |
347 | EXPORT_SYMBOL(iio_set_bytes_per_datum_sw_rb); | ||
348 | 372 | ||
349 | int iio_get_length_sw_rb(struct iio_ring_buffer *r) | 373 | static int iio_get_length_sw_rb(struct iio_ring_buffer *r) |
350 | { | 374 | { |
351 | return r->length; | 375 | return r->length; |
352 | } | 376 | } |
353 | EXPORT_SYMBOL(iio_get_length_sw_rb); | ||
354 | 377 | ||
355 | int iio_set_length_sw_rb(struct iio_ring_buffer *r, int length) | 378 | static int iio_set_length_sw_rb(struct iio_ring_buffer *r, int length) |
356 | { | 379 | { |
357 | if (r->length != length) { | 380 | if (r->length != length) { |
358 | r->length = length; | 381 | r->length = length; |
359 | if (r->access.mark_param_change) | 382 | if (r->access->mark_param_change) |
360 | r->access.mark_param_change(r); | 383 | r->access->mark_param_change(r); |
361 | } | 384 | } |
362 | return 0; | 385 | return 0; |
363 | } | 386 | } |
364 | EXPORT_SYMBOL(iio_set_length_sw_rb); | ||
365 | 387 | ||
366 | int iio_mark_update_needed_sw_rb(struct iio_ring_buffer *r) | 388 | static int iio_mark_update_needed_sw_rb(struct iio_ring_buffer *r) |
367 | { | 389 | { |
368 | struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r); | 390 | struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r); |
369 | ring->update_needed = true; | 391 | ring->update_needed = true; |
370 | return 0; | 392 | return 0; |
371 | } | 393 | } |
372 | EXPORT_SYMBOL(iio_mark_update_needed_sw_rb); | ||
373 | 394 | ||
374 | static void iio_sw_rb_release(struct device *dev) | 395 | static void iio_sw_rb_release(struct device *dev) |
375 | { | 396 | { |
@@ -412,6 +433,7 @@ struct iio_ring_buffer *iio_sw_rb_allocate(struct iio_dev *indio_dev) | |||
412 | ring = kzalloc(sizeof *ring, GFP_KERNEL); | 433 | ring = kzalloc(sizeof *ring, GFP_KERNEL); |
413 | if (!ring) | 434 | if (!ring) |
414 | return NULL; | 435 | return NULL; |
436 | ring->update_needed = true; | ||
415 | buf = &ring->buf; | 437 | buf = &ring->buf; |
416 | iio_ring_buffer_init(buf, indio_dev); | 438 | iio_ring_buffer_init(buf, indio_dev); |
417 | __iio_init_sw_ring_buffer(ring); | 439 | __iio_init_sw_ring_buffer(ring); |
@@ -430,36 +452,11 @@ void iio_sw_rb_free(struct iio_ring_buffer *r) | |||
430 | } | 452 | } |
431 | EXPORT_SYMBOL(iio_sw_rb_free); | 453 | EXPORT_SYMBOL(iio_sw_rb_free); |
432 | 454 | ||
433 | int iio_sw_ring_preenable(struct iio_dev *indio_dev) | ||
434 | { | ||
435 | struct iio_ring_buffer *ring = indio_dev->ring; | ||
436 | size_t size; | ||
437 | dev_dbg(&indio_dev->dev, "%s\n", __func__); | ||
438 | /* Check if there are any scan elements enabled, if not fail*/ | ||
439 | if (!(ring->scan_count || ring->scan_timestamp)) | ||
440 | return -EINVAL; | ||
441 | if (ring->scan_timestamp) | ||
442 | if (ring->scan_count) | ||
443 | /* Timestamp (aligned to s64) and data */ | ||
444 | size = (((ring->scan_count * ring->bpe) | ||
445 | + sizeof(s64) - 1) | ||
446 | & ~(sizeof(s64) - 1)) | ||
447 | + sizeof(s64); | ||
448 | else /* Timestamp only */ | ||
449 | size = sizeof(s64); | ||
450 | else /* Data only */ | ||
451 | size = ring->scan_count * ring->bpe; | ||
452 | ring->access.set_bytes_per_datum(ring, size); | ||
453 | |||
454 | return 0; | ||
455 | } | ||
456 | EXPORT_SYMBOL(iio_sw_ring_preenable); | ||
457 | |||
458 | void iio_sw_trigger_to_ring(struct iio_sw_ring_helper_state *st) | 455 | void iio_sw_trigger_to_ring(struct iio_sw_ring_helper_state *st) |
459 | { | 456 | { |
460 | struct iio_ring_buffer *ring = st->indio_dev->ring; | 457 | struct iio_ring_buffer *ring = st->indio_dev->ring; |
461 | int len = 0; | 458 | int len = 0; |
462 | size_t datasize = ring->access.get_bytes_per_datum(ring); | 459 | size_t datasize = ring->access->get_bytes_per_datum(ring); |
463 | char *data = kmalloc(datasize, GFP_KERNEL); | 460 | char *data = kmalloc(datasize, GFP_KERNEL); |
464 | 461 | ||
465 | if (data == NULL) { | 462 | if (data == NULL) { |
@@ -476,7 +473,7 @@ void iio_sw_trigger_to_ring(struct iio_sw_ring_helper_state *st) | |||
476 | *(s64 *)(((phys_addr_t)data + len | 473 | *(s64 *)(((phys_addr_t)data + len |
477 | + sizeof(s64) - 1) & ~(sizeof(s64) - 1)) | 474 | + sizeof(s64) - 1) & ~(sizeof(s64) - 1)) |
478 | = st->last_timestamp; | 475 | = st->last_timestamp; |
479 | ring->access.store_to(ring, | 476 | ring->access->store_to(ring, |
480 | (u8 *)data, | 477 | (u8 *)data, |
481 | st->last_timestamp); | 478 | st->last_timestamp); |
482 | 479 | ||
@@ -504,5 +501,20 @@ void iio_sw_poll_func_th(struct iio_dev *indio_dev, s64 time) | |||
504 | } | 501 | } |
505 | EXPORT_SYMBOL(iio_sw_poll_func_th); | 502 | EXPORT_SYMBOL(iio_sw_poll_func_th); |
506 | 503 | ||
504 | const struct iio_ring_access_funcs ring_sw_access_funcs = { | ||
505 | .mark_in_use = &iio_mark_sw_rb_in_use, | ||
506 | .unmark_in_use = &iio_unmark_sw_rb_in_use, | ||
507 | .store_to = &iio_store_to_sw_rb, | ||
508 | .read_last = &iio_read_last_from_sw_rb, | ||
509 | .read_first_n = &iio_read_first_n_sw_rb, | ||
510 | .mark_param_change = &iio_mark_update_needed_sw_rb, | ||
511 | .request_update = &iio_request_update_sw_rb, | ||
512 | .get_bytes_per_datum = &iio_get_bytes_per_datum_sw_rb, | ||
513 | .set_bytes_per_datum = &iio_set_bytes_per_datum_sw_rb, | ||
514 | .get_length = &iio_get_length_sw_rb, | ||
515 | .set_length = &iio_set_length_sw_rb, | ||
516 | }; | ||
517 | EXPORT_SYMBOL(ring_sw_access_funcs); | ||
518 | |||
507 | MODULE_DESCRIPTION("Industrialio I/O software ring buffer"); | 519 | MODULE_DESCRIPTION("Industrialio I/O software ring buffer"); |
508 | MODULE_LICENSE("GPL"); | 520 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/staging/iio/ring_sw.h b/drivers/staging/iio/ring_sw.h index 84b7c5a83214..93449890eeaa 100644 --- a/drivers/staging/iio/ring_sw.h +++ b/drivers/staging/iio/ring_sw.h | |||
@@ -23,190 +23,18 @@ | |||
23 | 23 | ||
24 | #ifndef _IIO_RING_SW_H_ | 24 | #ifndef _IIO_RING_SW_H_ |
25 | #define _IIO_RING_SW_H_ | 25 | #define _IIO_RING_SW_H_ |
26 | /* NEEDS COMMENTS */ | ||
27 | /* The intention is that this should be a separate module from the iio core. | ||
28 | * This is a bit like supporting algorithms dependent on what the device | ||
29 | * driver requests - some may support multiple options */ | ||
30 | |||
31 | |||
32 | #include "iio.h" | 26 | #include "iio.h" |
33 | #include "ring_generic.h" | 27 | #include "ring_generic.h" |
34 | 28 | ||
35 | #if defined CONFIG_IIO_SW_RING || defined CONFIG_IIO_SW_RING_MODULE | 29 | #if defined CONFIG_IIO_SW_RING || defined CONFIG_IIO_SW_RING_MODULE |
36 | |||
37 | /** | ||
38 | * iio_create_sw_rb() - software ring buffer allocation | ||
39 | * @r: pointer to ring buffer pointer | ||
40 | **/ | ||
41 | int iio_create_sw_rb(struct iio_ring_buffer **r); | ||
42 | |||
43 | /** | ||
44 | * iio_init_sw_rb() - initialize the software ring buffer | ||
45 | * @r: pointer to a software ring buffer created by an | ||
46 | * iio_create_sw_rb call | ||
47 | * @indio_dev: industrial I/O device structure | ||
48 | **/ | ||
49 | int iio_init_sw_rb(struct iio_ring_buffer *r, struct iio_dev *indio_dev); | ||
50 | |||
51 | /** | ||
52 | * iio_exit_sw_rb() - reverse what was done in iio_init_sw_rb | ||
53 | * @r: pointer to a software ring buffer created by an | ||
54 | * iio_create_sw_rb call | ||
55 | **/ | ||
56 | void iio_exit_sw_rb(struct iio_ring_buffer *r); | ||
57 | |||
58 | /** | 30 | /** |
59 | * iio_free_sw_rb() - free memory occupied by the core ring buffer struct | 31 | * ring_sw_access_funcs - access functions for a software ring buffer |
60 | * @r: pointer to a software ring buffer created by an | ||
61 | * iio_create_sw_rb call | ||
62 | **/ | 32 | **/ |
63 | void iio_free_sw_rb(struct iio_ring_buffer *r); | 33 | extern const struct iio_ring_access_funcs ring_sw_access_funcs; |
64 | |||
65 | /** | ||
66 | * iio_mark_sw_rb_in_use() - reference counting to prevent incorrect chances | ||
67 | * @r: pointer to a software ring buffer created by an | ||
68 | * iio_create_sw_rb call | ||
69 | **/ | ||
70 | void iio_mark_sw_rb_in_use(struct iio_ring_buffer *r); | ||
71 | |||
72 | /** | ||
73 | * iio_unmark_sw_rb_in_use() - notify the ring buffer that we don't care anymore | ||
74 | * @r: pointer to a software ring buffer created by an | ||
75 | * iio_create_sw_rb call | ||
76 | **/ | ||
77 | void iio_unmark_sw_rb_in_use(struct iio_ring_buffer *r); | ||
78 | |||
79 | /** | ||
80 | * iio_read_last_from_sw_rb() - attempt to read the last stored datum from the rb | ||
81 | * @r: pointer to a software ring buffer created by an | ||
82 | * iio_create_sw_rb call | ||
83 | * @data: where to store the last datum | ||
84 | **/ | ||
85 | int iio_read_last_from_sw_rb(struct iio_ring_buffer *r, u8 *data); | ||
86 | |||
87 | /** | ||
88 | * iio_store_to_sw_rb() - store a new datum to the ring buffer | ||
89 | * @r: pointer to ring buffer instance | ||
90 | * @data: the datum to be stored including timestamp if relevant | ||
91 | * @timestamp: timestamp which will be attached to buffer events if relevant | ||
92 | **/ | ||
93 | int iio_store_to_sw_rb(struct iio_ring_buffer *r, u8 *data, s64 timestamp); | ||
94 | |||
95 | /** | ||
96 | * iio_read_first_n_sw_rb() - attempt to read data from the ring buffer | ||
97 | * @r: ring buffer instance | ||
98 | * @n: number of datum's to try and read | ||
99 | * @buf: userspace buffer into which data is copied | ||
100 | * the end of the copy. | ||
101 | **/ | ||
102 | int iio_read_first_n_sw_rb(struct iio_ring_buffer *r, | ||
103 | size_t n, | ||
104 | char __user *buf); | ||
105 | |||
106 | /** | ||
107 | * iio_request_update_sw_rb() - update params if update needed | ||
108 | * @r: pointer to a software ring buffer created by an | ||
109 | * iio_create_sw_rb call | ||
110 | **/ | ||
111 | int iio_request_update_sw_rb(struct iio_ring_buffer *r); | ||
112 | |||
113 | /** | ||
114 | * iio_mark_update_needed_sw_rb() - tell the ring buffer it needs a param update | ||
115 | * @r: pointer to a software ring buffer created by an | ||
116 | * iio_create_sw_rb call | ||
117 | **/ | ||
118 | int iio_mark_update_needed_sw_rb(struct iio_ring_buffer *r); | ||
119 | |||
120 | |||
121 | /** | ||
122 | * iio_get_bytes_per_datum_sw_rb() - get the datum size in bytes | ||
123 | * @r: pointer to a software ring buffer created by an | ||
124 | * iio_create_sw_rb call | ||
125 | **/ | ||
126 | int iio_get_bytes_per_datum_sw_rb(struct iio_ring_buffer *r); | ||
127 | |||
128 | /** | ||
129 | * iio_set_bytes_per_datum_sw_rb() - set the datum size in bytes | ||
130 | * @r: pointer to a software ring buffer created by an | ||
131 | * iio_create_sw_rb call | ||
132 | * @bpd: bytes per datum value | ||
133 | **/ | ||
134 | int iio_set_bytes_per_datum_sw_rb(struct iio_ring_buffer *r, size_t bpd); | ||
135 | |||
136 | /** | ||
137 | * iio_get_length_sw_rb() - get how many datums the rb may contain | ||
138 | * @r: pointer to a software ring buffer created by an | ||
139 | * iio_create_sw_rb call | ||
140 | **/ | ||
141 | int iio_get_length_sw_rb(struct iio_ring_buffer *r); | ||
142 | |||
143 | /** | ||
144 | * iio_set_length_sw_rb() - set how many datums the rb may contain | ||
145 | * @r: pointer to a software ring buffer created by an | ||
146 | * iio_create_sw_rb call | ||
147 | * @length: max number of data items for the ring buffer | ||
148 | **/ | ||
149 | int iio_set_length_sw_rb(struct iio_ring_buffer *r, int length); | ||
150 | |||
151 | /** | ||
152 | * iio_ring_sw_register_funcs() - helper function to set up rb access | ||
153 | * @ra: pointer to @iio_ring_access_funcs | ||
154 | **/ | ||
155 | static inline void iio_ring_sw_register_funcs(struct iio_ring_access_funcs *ra) | ||
156 | { | ||
157 | ra->mark_in_use = &iio_mark_sw_rb_in_use; | ||
158 | ra->unmark_in_use = &iio_unmark_sw_rb_in_use; | ||
159 | |||
160 | ra->store_to = &iio_store_to_sw_rb; | ||
161 | ra->read_last = &iio_read_last_from_sw_rb; | ||
162 | ra->read_first_n = &iio_read_first_n_sw_rb; | ||
163 | |||
164 | ra->mark_param_change = &iio_mark_update_needed_sw_rb; | ||
165 | ra->request_update = &iio_request_update_sw_rb; | ||
166 | |||
167 | ra->get_bytes_per_datum = &iio_get_bytes_per_datum_sw_rb; | ||
168 | ra->set_bytes_per_datum = &iio_set_bytes_per_datum_sw_rb; | ||
169 | |||
170 | ra->get_length = &iio_get_length_sw_rb; | ||
171 | ra->set_length = &iio_set_length_sw_rb; | ||
172 | }; | ||
173 | |||
174 | /** | ||
175 | * struct iio_sw_ring_buffer - software ring buffer | ||
176 | * @buf: generic ring buffer elements | ||
177 | * @data: the ring buffer memory | ||
178 | * @read_p: read pointer (oldest available) | ||
179 | * @write_p: write pointer | ||
180 | * @last_written_p: read pointer (newest available) | ||
181 | * @half_p: half buffer length behind write_p (event generation) | ||
182 | * @use_count: reference count to prevent resizing when in use | ||
183 | * @update_needed: flag to indicated change in size requested | ||
184 | * @use_lock: lock to prevent change in size when in use | ||
185 | * | ||
186 | * Note that the first element of all ring buffers must be a | ||
187 | * struct iio_ring_buffer. | ||
188 | **/ | ||
189 | |||
190 | struct iio_sw_ring_buffer { | ||
191 | struct iio_ring_buffer buf; | ||
192 | unsigned char *data; | ||
193 | unsigned char *read_p; | ||
194 | unsigned char *write_p; | ||
195 | unsigned char *last_written_p; | ||
196 | /* used to act as a point at which to signal an event */ | ||
197 | unsigned char *half_p; | ||
198 | int use_count; | ||
199 | int update_needed; | ||
200 | spinlock_t use_lock; | ||
201 | }; | ||
202 | |||
203 | #define iio_to_sw_ring(r) container_of(r, struct iio_sw_ring_buffer, buf) | ||
204 | 34 | ||
205 | struct iio_ring_buffer *iio_sw_rb_allocate(struct iio_dev *indio_dev); | 35 | struct iio_ring_buffer *iio_sw_rb_allocate(struct iio_dev *indio_dev); |
206 | void iio_sw_rb_free(struct iio_ring_buffer *ring); | 36 | void iio_sw_rb_free(struct iio_ring_buffer *ring); |
207 | 37 | ||
208 | int iio_sw_ring_preenable(struct iio_dev *indio_dev); | ||
209 | |||
210 | struct iio_sw_ring_helper_state { | 38 | struct iio_sw_ring_helper_state { |
211 | struct work_struct work_trigger_to_ring; | 39 | struct work_struct work_trigger_to_ring; |
212 | struct iio_dev *indio_dev; | 40 | struct iio_dev *indio_dev; |