aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iio/inkern.c
diff options
context:
space:
mode:
authorJonathan Cameron <jic23@kernel.org>2012-04-25 10:54:59 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2012-04-25 14:11:38 -0400
commita980e046098b0a40eaff5e4e7fcde6cf035b7c06 (patch)
tree23375fc2bba39f088974cf621f7abf006d43d087 /drivers/iio/inkern.c
parent06458e277eac2b8761b0a04d3c808d57be281a2e (diff)
IIO: Move the core files to drivers/iio
Take the core support + the kfifo buffer implentation out of staging. Whilst we are far from done in improving this subsystem it is now at a stage where the userspae interfaces (provided by the core) can be considered stable. Drivers will follow over a longer time scale. Signed-off-by: Jonathan Cameron <jic23@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/iio/inkern.c')
-rw-r--r--drivers/iio/inkern.c292
1 files changed, 292 insertions, 0 deletions
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
new file mode 100644
index 000000000000..22ddf62b107c
--- /dev/null
+++ b/drivers/iio/inkern.c
@@ -0,0 +1,292 @@
1/* The industrial I/O core in kernel channel mapping
2 *
3 * Copyright (c) 2011 Jonathan Cameron
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 */
9#include <linux/err.h>
10#include <linux/export.h>
11#include <linux/slab.h>
12#include <linux/mutex.h>
13
14#include <linux/iio/iio.h>
15#include "iio_core.h"
16#include <linux/iio/machine.h>
17#include <linux/iio/driver.h>
18#include <linux/iio/consumer.h>
19
20struct iio_map_internal {
21 struct iio_dev *indio_dev;
22 struct iio_map *map;
23 struct list_head l;
24};
25
26static LIST_HEAD(iio_map_list);
27static DEFINE_MUTEX(iio_map_list_lock);
28
29int iio_map_array_register(struct iio_dev *indio_dev, struct iio_map *maps)
30{
31 int i = 0, ret = 0;
32 struct iio_map_internal *mapi;
33
34 if (maps == NULL)
35 return 0;
36
37 mutex_lock(&iio_map_list_lock);
38 while (maps[i].consumer_dev_name != NULL) {
39 mapi = kzalloc(sizeof(*mapi), GFP_KERNEL);
40 if (mapi == NULL) {
41 ret = -ENOMEM;
42 goto error_ret;
43 }
44 mapi->map = &maps[i];
45 mapi->indio_dev = indio_dev;
46 list_add(&mapi->l, &iio_map_list);
47 i++;
48 }
49error_ret:
50 mutex_unlock(&iio_map_list_lock);
51
52 return ret;
53}
54EXPORT_SYMBOL_GPL(iio_map_array_register);
55
56
57/* Assumes the exact same array (e.g. memory locations)
58 * used at unregistration as used at registration rather than
59 * more complex checking of contents.
60 */
61int iio_map_array_unregister(struct iio_dev *indio_dev,
62 struct iio_map *maps)
63{
64 int i = 0, ret = 0;
65 bool found_it;
66 struct iio_map_internal *mapi;
67
68 if (maps == NULL)
69 return 0;
70
71 mutex_lock(&iio_map_list_lock);
72 while (maps[i].consumer_dev_name != NULL) {
73 found_it = false;
74 list_for_each_entry(mapi, &iio_map_list, l)
75 if (&maps[i] == mapi->map) {
76 list_del(&mapi->l);
77 kfree(mapi);
78 found_it = true;
79 break;
80 }
81 if (found_it == false) {
82 ret = -ENODEV;
83 goto error_ret;
84 }
85 }
86error_ret:
87 mutex_unlock(&iio_map_list_lock);
88
89 return ret;
90}
91EXPORT_SYMBOL_GPL(iio_map_array_unregister);
92
93static const struct iio_chan_spec
94*iio_chan_spec_from_name(const struct iio_dev *indio_dev,
95 const char *name)
96{
97 int i;
98 const struct iio_chan_spec *chan = NULL;
99
100 for (i = 0; i < indio_dev->num_channels; i++)
101 if (indio_dev->channels[i].datasheet_name &&
102 strcmp(name, indio_dev->channels[i].datasheet_name) == 0) {
103 chan = &indio_dev->channels[i];
104 break;
105 }
106 return chan;
107}
108
109
110struct iio_channel *iio_st_channel_get(const char *name,
111 const char *channel_name)
112{
113 struct iio_map_internal *c_i = NULL, *c = NULL;
114 struct iio_channel *channel;
115
116 if (name == NULL && channel_name == NULL)
117 return ERR_PTR(-ENODEV);
118
119 /* first find matching entry the channel map */
120 mutex_lock(&iio_map_list_lock);
121 list_for_each_entry(c_i, &iio_map_list, l) {
122 if ((name && strcmp(name, c_i->map->consumer_dev_name) != 0) ||
123 (channel_name &&
124 strcmp(channel_name, c_i->map->consumer_channel) != 0))
125 continue;
126 c = c_i;
127 get_device(&c->indio_dev->dev);
128 break;
129 }
130 mutex_unlock(&iio_map_list_lock);
131 if (c == NULL)
132 return ERR_PTR(-ENODEV);
133
134 channel = kmalloc(sizeof(*channel), GFP_KERNEL);
135 if (channel == NULL)
136 return ERR_PTR(-ENOMEM);
137
138 channel->indio_dev = c->indio_dev;
139
140 if (c->map->adc_channel_label)
141 channel->channel =
142 iio_chan_spec_from_name(channel->indio_dev,
143 c->map->adc_channel_label);
144
145 return channel;
146}
147EXPORT_SYMBOL_GPL(iio_st_channel_get);
148
149void iio_st_channel_release(struct iio_channel *channel)
150{
151 put_device(&channel->indio_dev->dev);
152 kfree(channel);
153}
154EXPORT_SYMBOL_GPL(iio_st_channel_release);
155
156struct iio_channel *iio_st_channel_get_all(const char *name)
157{
158 struct iio_channel *chans;
159 struct iio_map_internal *c = NULL;
160 int nummaps = 0;
161 int mapind = 0;
162 int i, ret;
163
164 if (name == NULL)
165 return ERR_PTR(-EINVAL);
166
167 mutex_lock(&iio_map_list_lock);
168 /* first count the matching maps */
169 list_for_each_entry(c, &iio_map_list, l)
170 if (name && strcmp(name, c->map->consumer_dev_name) != 0)
171 continue;
172 else
173 nummaps++;
174
175 if (nummaps == 0) {
176 ret = -ENODEV;
177 goto error_ret;
178 }
179
180 /* NULL terminated array to save passing size */
181 chans = kzalloc(sizeof(*chans)*(nummaps + 1), GFP_KERNEL);
182 if (chans == NULL) {
183 ret = -ENOMEM;
184 goto error_ret;
185 }
186
187 /* for each map fill in the chans element */
188 list_for_each_entry(c, &iio_map_list, l) {
189 if (name && strcmp(name, c->map->consumer_dev_name) != 0)
190 continue;
191 chans[mapind].indio_dev = c->indio_dev;
192 chans[mapind].channel =
193 iio_chan_spec_from_name(chans[mapind].indio_dev,
194 c->map->adc_channel_label);
195 if (chans[mapind].channel == NULL) {
196 ret = -EINVAL;
197 put_device(&chans[mapind].indio_dev->dev);
198 goto error_free_chans;
199 }
200 get_device(&chans[mapind].indio_dev->dev);
201 mapind++;
202 }
203 mutex_unlock(&iio_map_list_lock);
204 if (mapind == 0) {
205 ret = -ENODEV;
206 goto error_free_chans;
207 }
208 return chans;
209
210error_free_chans:
211 for (i = 0; i < nummaps; i++)
212 if (chans[i].indio_dev)
213 put_device(&chans[i].indio_dev->dev);
214 kfree(chans);
215error_ret:
216 mutex_unlock(&iio_map_list_lock);
217
218 return ERR_PTR(ret);
219}
220EXPORT_SYMBOL_GPL(iio_st_channel_get_all);
221
222void iio_st_channel_release_all(struct iio_channel *channels)
223{
224 struct iio_channel *chan = &channels[0];
225
226 while (chan->indio_dev) {
227 put_device(&chan->indio_dev->dev);
228 chan++;
229 }
230 kfree(channels);
231}
232EXPORT_SYMBOL_GPL(iio_st_channel_release_all);
233
234int iio_st_read_channel_raw(struct iio_channel *chan, int *val)
235{
236 int val2, ret;
237
238 mutex_lock(&chan->indio_dev->info_exist_lock);
239 if (chan->indio_dev->info == NULL) {
240 ret = -ENODEV;
241 goto err_unlock;
242 }
243
244 ret = chan->indio_dev->info->read_raw(chan->indio_dev, chan->channel,
245 val, &val2, 0);
246err_unlock:
247 mutex_unlock(&chan->indio_dev->info_exist_lock);
248
249 return ret;
250}
251EXPORT_SYMBOL_GPL(iio_st_read_channel_raw);
252
253int iio_st_read_channel_scale(struct iio_channel *chan, int *val, int *val2)
254{
255 int ret;
256
257 mutex_lock(&chan->indio_dev->info_exist_lock);
258 if (chan->indio_dev->info == NULL) {
259 ret = -ENODEV;
260 goto err_unlock;
261 }
262
263 ret = chan->indio_dev->info->read_raw(chan->indio_dev,
264 chan->channel,
265 val, val2,
266 IIO_CHAN_INFO_SCALE);
267err_unlock:
268 mutex_unlock(&chan->indio_dev->info_exist_lock);
269
270 return ret;
271}
272EXPORT_SYMBOL_GPL(iio_st_read_channel_scale);
273
274int iio_st_get_channel_type(struct iio_channel *chan,
275 enum iio_chan_type *type)
276{
277 int ret = 0;
278 /* Need to verify underlying driver has not gone away */
279
280 mutex_lock(&chan->indio_dev->info_exist_lock);
281 if (chan->indio_dev->info == NULL) {
282 ret = -ENODEV;
283 goto err_unlock;
284 }
285
286 *type = chan->channel->type;
287err_unlock:
288 mutex_unlock(&chan->indio_dev->info_exist_lock);
289
290 return ret;
291}
292EXPORT_SYMBOL_GPL(iio_st_get_channel_type);