aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLars-Peter Clausen <lars@metafoo.de>2015-10-13 12:10:28 -0400
committerJonathan Cameron <jic23@kernel.org>2015-10-25 09:54:34 -0400
commit670b19ae9bfdbcb4ce2c2ffb2ec1659a7f4a2074 (patch)
tree47118d99269dc31aee91c8a619f9f12c55c223b6
parente18a2ad45caeb11226e49c25068d0f2efe2adf6c (diff)
iio: Add generic DMA buffer infrastructure
The traditional approach used in IIO to implement buffered capture requires the generation of at least one interrupt per sample. In the interrupt handler the driver reads the sample from the device and copies it to a software buffer. This approach has a rather large per sample overhead associated with it. And while it works fine for samplerates in the range of up to 1000 samples per second it starts to consume a rather large share of the available CPU processing time once we go beyond that, this is especially true on an embedded system with limited processing power. The regular interrupt also causes increased power consumption by not allowing the hardware into deeper sleep states, which is something that becomes more and more important on mobile battery powered devices. And while the recently added watermark support mitigates some of the issues by allowing the device to generate interrupts at a rate lower than the data output rate, this still requires a storage buffer inside the device and even if it exists it is only a few 100 samples deep at most. DMA support on the other hand allows to capture multiple millions or even more samples without any CPU interaction. This allows the CPU to either go to sleep for longer periods or focus on other tasks which increases overall system performance and power consumption. In addition to that some devices might not even offer a way to read the data other than using DMA, which makes DMA mandatory to use for them. The tasks involved in implementing a DMA buffer can be divided into two categories. The first category is memory buffer management (allocation, mapping, etc.) and hooking this up the IIO buffer callbacks like read(), enable(), disable(), etc. The second category of tasks is to setup the DMA hardware and manage the DMA transfers. Tasks from the first category will be very similar for all IIO drivers supporting DMA buffers, while the tasks from the second category will be hardware specific. This patch implements a generic infrastructure that take care of the former tasks. It provides a set of functions that implement the standard IIO buffer iio_buffer_access_funcs callbacks. These can either be used as is or be overloaded and augmented with driver specific code where necessary. For the DMA buffer support infrastructure that is introduced in this series sample data is grouped by so called blocks. A block is the basic unit at which data is exchanged between the application and the hardware. The application is responsible for allocating the memory associated with the block and then passes the block to the hardware. When the hardware has captured the amount of samples equal to size of a block it will notify the application, which can then read the data from the block and process it. The block size can freely chosen (within the constraints of the hardware). This allows to make a trade-off between latency and management overhead. The larger the block size the lower the per sample overhead but the latency between when the data was captured and when the application will be able to access it increases, in a similar way smaller block sizes have a larger per sample management overhead but a lower latency. The ideal block size thus depends on system and application requirements. For the time being the infrastructure only implements a simple double buffered scheme which allocates two blocks each with half the size of the configured buffer size. This provides basic support for capturing continuous uninterrupted data over the existing file-IO ABI. Future extensions to the DMA buffer infrastructure will give applications a more fine grained control over how many blocks are allocated and the size of each block. But this requires userspace ABI additions which are intentionally not part of this patch and will be added separately. Tasks of the second category need to be implemented by a device specific driver. They can be hooked up into the generic infrastructure using two simple callbacks, submit() and abort(). The submit() callback is used to schedule DMA transfers for blocks. Once a DMA transfer has been completed it is expected that the buffer driver calls iio_dma_buffer_block_done() to notify. The abort() callback is used for stopping all pending and active DMA transfers when the buffer is disabled. Signed-off-by: Lars-Peter Clausen <lars@metafoo.de> Signed-off-by: Jonathan Cameron <jic23@kernel.org>
-rw-r--r--drivers/iio/buffer/Kconfig9
-rw-r--r--drivers/iio/buffer/Makefile1
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dma.c683
-rw-r--r--include/linux/iio/buffer-dma.h152
4 files changed, 845 insertions, 0 deletions
diff --git a/drivers/iio/buffer/Kconfig b/drivers/iio/buffer/Kconfig
index 0a7b2fd3699b..b2fda1afc03e 100644
--- a/drivers/iio/buffer/Kconfig
+++ b/drivers/iio/buffer/Kconfig
@@ -9,6 +9,15 @@ config IIO_BUFFER_CB
9 Should be selected by any drivers that do in-kernel push 9 Should be selected by any drivers that do in-kernel push
10 usage. That is, those where the data is pushed to the consumer. 10 usage. That is, those where the data is pushed to the consumer.
11 11
12config IIO_BUFFER_DMA
13 tristate
14 help
15 Provides the generic IIO DMA buffer infrastructure that can be used by
16 drivers for devices with DMA support to implement the IIO buffer.
17
18 Should be selected by drivers that want to use the generic DMA buffer
19 infrastructure.
20
12config IIO_KFIFO_BUF 21config IIO_KFIFO_BUF
13 tristate "Industrial I/O buffering based on kfifo" 22 tristate "Industrial I/O buffering based on kfifo"
14 help 23 help
diff --git a/drivers/iio/buffer/Makefile b/drivers/iio/buffer/Makefile
index 4d193b9a9123..bda3f1143e72 100644
--- a/drivers/iio/buffer/Makefile
+++ b/drivers/iio/buffer/Makefile
@@ -4,5 +4,6 @@
4 4
5# When adding new entries keep the list in alphabetical order 5# When adding new entries keep the list in alphabetical order
6obj-$(CONFIG_IIO_BUFFER_CB) += industrialio-buffer-cb.o 6obj-$(CONFIG_IIO_BUFFER_CB) += industrialio-buffer-cb.o
7obj-$(CONFIG_IIO_BUFFER_DMA) += industrialio-buffer-dma.o
7obj-$(CONFIG_IIO_TRIGGERED_BUFFER) += industrialio-triggered-buffer.o 8obj-$(CONFIG_IIO_TRIGGERED_BUFFER) += industrialio-triggered-buffer.o
8obj-$(CONFIG_IIO_KFIFO_BUF) += kfifo_buf.o 9obj-$(CONFIG_IIO_KFIFO_BUF) += kfifo_buf.o
diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c b/drivers/iio/buffer/industrialio-buffer-dma.c
new file mode 100644
index 000000000000..212cbedc7abb
--- /dev/null
+++ b/drivers/iio/buffer/industrialio-buffer-dma.c
@@ -0,0 +1,683 @@
1/*
2 * Copyright 2013-2015 Analog Devices Inc.
3 * Author: Lars-Peter Clausen <lars@metafoo.de>
4 *
5 * Licensed under the GPL-2.
6 */
7
8#include <linux/slab.h>
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/device.h>
12#include <linux/workqueue.h>
13#include <linux/mutex.h>
14#include <linux/sched.h>
15#include <linux/poll.h>
16#include <linux/iio/buffer.h>
17#include <linux/iio/buffer-dma.h>
18#include <linux/dma-mapping.h>
19#include <linux/sizes.h>
20
21/*
22 * For DMA buffers the storage is sub-divided into so called blocks. Each block
23 * has its own memory buffer. The size of the block is the granularity at which
24 * memory is exchanged between the hardware and the application. Increasing the
25 * basic unit of data exchange from one sample to one block decreases the
26 * management overhead that is associated with each sample. E.g. if we say the
27 * management overhead for one exchange is x and the unit of exchange is one
28 * sample the overhead will be x for each sample. Whereas when using a block
29 * which contains n samples the overhead per sample is reduced to x/n. This
30 * allows to achieve much higher samplerates than what can be sustained with
31 * the one sample approach.
32 *
33 * Blocks are exchanged between the DMA controller and the application via the
34 * means of two queues. The incoming queue and the outgoing queue. Blocks on the
35 * incoming queue are waiting for the DMA controller to pick them up and fill
36 * them with data. Block on the outgoing queue have been filled with data and
37 * are waiting for the application to dequeue them and read the data.
38 *
39 * A block can be in one of the following states:
40 * * Owned by the application. In this state the application can read data from
41 * the block.
42 * * On the incoming list: Blocks on the incoming list are queued up to be
43 * processed by the DMA controller.
44 * * Owned by the DMA controller: The DMA controller is processing the block
45 * and filling it with data.
46 * * On the outgoing list: Blocks on the outgoing list have been successfully
47 * processed by the DMA controller and contain data. They can be dequeued by
48 * the application.
49 * * Dead: A block that is dead has been marked as to be freed. It might still
50 * be owned by either the application or the DMA controller at the moment.
51 * But once they are done processing it instead of going to either the
52 * incoming or outgoing queue the block will be freed.
53 *
54 * In addition to this blocks are reference counted and the memory associated
55 * with both the block structure as well as the storage memory for the block
56 * will be freed when the last reference to the block is dropped. This means a
57 * block must not be accessed without holding a reference.
58 *
59 * The iio_dma_buffer implementation provides a generic infrastructure for
60 * managing the blocks.
61 *
62 * A driver for a specific piece of hardware that has DMA capabilities need to
63 * implement the submit() callback from the iio_dma_buffer_ops structure. This
64 * callback is supposed to initiate the DMA transfer copying data from the
65 * converter to the memory region of the block. Once the DMA transfer has been
66 * completed the driver must call iio_dma_buffer_block_done() for the completed
67 * block.
68 *
69 * Prior to this it must set the bytes_used field of the block contains
70 * the actual number of bytes in the buffer. Typically this will be equal to the
71 * size of the block, but if the DMA hardware has certain alignment requirements
72 * for the transfer length it might choose to use less than the full size. In
73 * either case it is expected that bytes_used is a multiple of the bytes per
74 * datum, i.e. the block must not contain partial samples.
75 *
76 * The driver must call iio_dma_buffer_block_done() for each block it has
77 * received through its submit_block() callback, even if it does not actually
78 * perform a DMA transfer for the block, e.g. because the buffer was disabled
79 * before the block transfer was started. In this case it should set bytes_used
80 * to 0.
81 *
82 * In addition it is recommended that a driver implements the abort() callback.
83 * It will be called when the buffer is disabled and can be used to cancel
84 * pending and stop active transfers.
85 *
86 * The specific driver implementation should use the default callback
87 * implementations provided by this module for the iio_buffer_access_funcs
88 * struct. It may overload some callbacks with custom variants if the hardware
89 * has special requirements that are not handled by the generic functions. If a
90 * driver chooses to overload a callback it has to ensure that the generic
91 * callback is called from within the custom callback.
92 */
93
94static void iio_buffer_block_release(struct kref *kref)
95{
96 struct iio_dma_buffer_block *block = container_of(kref,
97 struct iio_dma_buffer_block, kref);
98
99 WARN_ON(block->state != IIO_BLOCK_STATE_DEAD);
100
101 dma_free_coherent(block->queue->dev, PAGE_ALIGN(block->size),
102 block->vaddr, block->phys_addr);
103
104 iio_buffer_put(&block->queue->buffer);
105 kfree(block);
106}
107
108static void iio_buffer_block_get(struct iio_dma_buffer_block *block)
109{
110 kref_get(&block->kref);
111}
112
113static void iio_buffer_block_put(struct iio_dma_buffer_block *block)
114{
115 kref_put(&block->kref, iio_buffer_block_release);
116}
117
118/*
119 * dma_free_coherent can sleep, hence we need to take some special care to be
120 * able to drop a reference from an atomic context.
121 */
122static LIST_HEAD(iio_dma_buffer_dead_blocks);
123static DEFINE_SPINLOCK(iio_dma_buffer_dead_blocks_lock);
124
125static void iio_dma_buffer_cleanup_worker(struct work_struct *work)
126{
127 struct iio_dma_buffer_block *block, *_block;
128 LIST_HEAD(block_list);
129
130 spin_lock_irq(&iio_dma_buffer_dead_blocks_lock);
131 list_splice_tail_init(&iio_dma_buffer_dead_blocks, &block_list);
132 spin_unlock_irq(&iio_dma_buffer_dead_blocks_lock);
133
134 list_for_each_entry_safe(block, _block, &block_list, head)
135 iio_buffer_block_release(&block->kref);
136}
137static DECLARE_WORK(iio_dma_buffer_cleanup_work, iio_dma_buffer_cleanup_worker);
138
139static void iio_buffer_block_release_atomic(struct kref *kref)
140{
141 struct iio_dma_buffer_block *block;
142 unsigned long flags;
143
144 block = container_of(kref, struct iio_dma_buffer_block, kref);
145
146 spin_lock_irqsave(&iio_dma_buffer_dead_blocks_lock, flags);
147 list_add_tail(&block->head, &iio_dma_buffer_dead_blocks);
148 spin_unlock_irqrestore(&iio_dma_buffer_dead_blocks_lock, flags);
149
150 schedule_work(&iio_dma_buffer_cleanup_work);
151}
152
153/*
154 * Version of iio_buffer_block_put() that can be called from atomic context
155 */
156static void iio_buffer_block_put_atomic(struct iio_dma_buffer_block *block)
157{
158 kref_put(&block->kref, iio_buffer_block_release_atomic);
159}
160
161static struct iio_dma_buffer_queue *iio_buffer_to_queue(struct iio_buffer *buf)
162{
163 return container_of(buf, struct iio_dma_buffer_queue, buffer);
164}
165
166static struct iio_dma_buffer_block *iio_dma_buffer_alloc_block(
167 struct iio_dma_buffer_queue *queue, size_t size)
168{
169 struct iio_dma_buffer_block *block;
170
171 block = kzalloc(sizeof(*block), GFP_KERNEL);
172 if (!block)
173 return NULL;
174
175 block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size),
176 &block->phys_addr, GFP_KERNEL);
177 if (!block->vaddr) {
178 kfree(block);
179 return NULL;
180 }
181
182 block->size = size;
183 block->state = IIO_BLOCK_STATE_DEQUEUED;
184 block->queue = queue;
185 INIT_LIST_HEAD(&block->head);
186 kref_init(&block->kref);
187
188 iio_buffer_get(&queue->buffer);
189
190 return block;
191}
192
193static void _iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
194{
195 struct iio_dma_buffer_queue *queue = block->queue;
196
197 /*
198 * The buffer has already been freed by the application, just drop the
199 * reference.
200 */
201 if (block->state != IIO_BLOCK_STATE_DEAD) {
202 block->state = IIO_BLOCK_STATE_DONE;
203 list_add_tail(&block->head, &queue->outgoing);
204 }
205}
206
207/**
208 * iio_dma_buffer_block_done() - Indicate that a block has been completed
209 * @block: The completed block
210 *
211 * Should be called when the DMA controller has finished handling the block to
212 * pass back ownership of the block to the queue.
213 */
214void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
215{
216 struct iio_dma_buffer_queue *queue = block->queue;
217 unsigned long flags;
218
219 spin_lock_irqsave(&queue->list_lock, flags);
220 _iio_dma_buffer_block_done(block);
221 spin_unlock_irqrestore(&queue->list_lock, flags);
222
223 iio_buffer_block_put_atomic(block);
224 wake_up_interruptible_poll(&queue->buffer.pollq, POLLIN | POLLRDNORM);
225}
226EXPORT_SYMBOL_GPL(iio_dma_buffer_block_done);
227
228/**
229 * iio_dma_buffer_block_list_abort() - Indicate that a list block has been
230 * aborted
231 * @queue: Queue for which to complete blocks.
232 * @list: List of aborted blocks. All blocks in this list must be from @queue.
233 *
234 * Typically called from the abort() callback after the DMA controller has been
235 * stopped. This will set bytes_used to 0 for each block in the list and then
236 * hand the blocks back to the queue.
237 */
238void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue,
239 struct list_head *list)
240{
241 struct iio_dma_buffer_block *block, *_block;
242 unsigned long flags;
243
244 spin_lock_irqsave(&queue->list_lock, flags);
245 list_for_each_entry_safe(block, _block, list, head) {
246 list_del(&block->head);
247 block->bytes_used = 0;
248 _iio_dma_buffer_block_done(block);
249 iio_buffer_block_put_atomic(block);
250 }
251 spin_unlock_irqrestore(&queue->list_lock, flags);
252
253 wake_up_interruptible_poll(&queue->buffer.pollq, POLLIN | POLLRDNORM);
254}
255EXPORT_SYMBOL_GPL(iio_dma_buffer_block_list_abort);
256
257static bool iio_dma_block_reusable(struct iio_dma_buffer_block *block)
258{
259 /*
260 * If the core owns the block it can be re-used. This should be the
261 * default case when enabling the buffer, unless the DMA controller does
262 * not support abort and has not given back the block yet.
263 */
264 switch (block->state) {
265 case IIO_BLOCK_STATE_DEQUEUED:
266 case IIO_BLOCK_STATE_QUEUED:
267 case IIO_BLOCK_STATE_DONE:
268 return true;
269 default:
270 return false;
271 }
272}
273
274/**
275 * iio_dma_buffer_request_update() - DMA buffer request_update callback
276 * @buffer: The buffer which to request an update
277 *
278 * Should be used as the iio_dma_buffer_request_update() callback for
279 * iio_buffer_access_ops struct for DMA buffers.
280 */
281int iio_dma_buffer_request_update(struct iio_buffer *buffer)
282{
283 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
284 struct iio_dma_buffer_block *block;
285 bool try_reuse = false;
286 size_t size;
287 int ret = 0;
288 int i;
289
290 /*
291 * Split the buffer into two even parts. This is used as a double
292 * buffering scheme with usually one block at a time being used by the
293 * DMA and the other one by the application.
294 */
295 size = DIV_ROUND_UP(queue->buffer.bytes_per_datum *
296 queue->buffer.length, 2);
297
298 mutex_lock(&queue->lock);
299
300 /* Allocations are page aligned */
301 if (PAGE_ALIGN(queue->fileio.block_size) == PAGE_ALIGN(size))
302 try_reuse = true;
303
304 queue->fileio.block_size = size;
305 queue->fileio.active_block = NULL;
306
307 spin_lock_irq(&queue->list_lock);
308 for (i = 0; i < 2; i++) {
309 block = queue->fileio.blocks[i];
310
311 /* If we can't re-use it free it */
312 if (block && (!iio_dma_block_reusable(block) || !try_reuse))
313 block->state = IIO_BLOCK_STATE_DEAD;
314 }
315
316 /*
317 * At this point all blocks are either owned by the core or marked as
318 * dead. This means we can reset the lists without having to fear
319 * corrution.
320 */
321 INIT_LIST_HEAD(&queue->outgoing);
322 spin_unlock_irq(&queue->list_lock);
323
324 INIT_LIST_HEAD(&queue->incoming);
325
326 for (i = 0; i < 2; i++) {
327 if (queue->fileio.blocks[i]) {
328 block = queue->fileio.blocks[i];
329 if (block->state == IIO_BLOCK_STATE_DEAD) {
330 /* Could not reuse it */
331 iio_buffer_block_put(block);
332 block = NULL;
333 } else {
334 block->size = size;
335 }
336 } else {
337 block = NULL;
338 }
339
340 if (!block) {
341 block = iio_dma_buffer_alloc_block(queue, size);
342 if (!block) {
343 ret = -ENOMEM;
344 goto out_unlock;
345 }
346 queue->fileio.blocks[i] = block;
347 }
348
349 block->state = IIO_BLOCK_STATE_QUEUED;
350 list_add_tail(&block->head, &queue->incoming);
351 }
352
353out_unlock:
354 mutex_unlock(&queue->lock);
355
356 return ret;
357}
358EXPORT_SYMBOL_GPL(iio_dma_buffer_request_update);
359
360static void iio_dma_buffer_submit_block(struct iio_dma_buffer_queue *queue,
361 struct iio_dma_buffer_block *block)
362{
363 int ret;
364
365 /*
366 * If the hardware has already been removed we put the block into
367 * limbo. It will neither be on the incoming nor outgoing list, nor will
368 * it ever complete. It will just wait to be freed eventually.
369 */
370 if (!queue->ops)
371 return;
372
373 block->state = IIO_BLOCK_STATE_ACTIVE;
374 iio_buffer_block_get(block);
375 ret = queue->ops->submit(queue, block);
376 if (ret) {
377 /*
378 * This is a bit of a problem and there is not much we can do
379 * other then wait for the buffer to be disabled and re-enabled
380 * and try again. But it should not really happen unless we run
381 * out of memory or something similar.
382 *
383 * TODO: Implement support in the IIO core to allow buffers to
384 * notify consumers that something went wrong and the buffer
385 * should be disabled.
386 */
387 iio_buffer_block_put(block);
388 }
389}
390
391/**
392 * iio_dma_buffer_enable() - Enable DMA buffer
393 * @buffer: IIO buffer to enable
394 * @indio_dev: IIO device the buffer is attached to
395 *
396 * Needs to be called when the device that the buffer is attached to starts
397 * sampling. Typically should be the iio_buffer_access_ops enable callback.
398 *
399 * This will allocate the DMA buffers and start the DMA transfers.
400 */
401int iio_dma_buffer_enable(struct iio_buffer *buffer,
402 struct iio_dev *indio_dev)
403{
404 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
405 struct iio_dma_buffer_block *block, *_block;
406
407 mutex_lock(&queue->lock);
408 queue->active = true;
409 list_for_each_entry_safe(block, _block, &queue->incoming, head) {
410 list_del(&block->head);
411 iio_dma_buffer_submit_block(queue, block);
412 }
413 mutex_unlock(&queue->lock);
414
415 return 0;
416}
417EXPORT_SYMBOL_GPL(iio_dma_buffer_enable);
418
419/**
420 * iio_dma_buffer_disable() - Disable DMA buffer
421 * @buffer: IIO DMA buffer to disable
422 * @indio_dev: IIO device the buffer is attached to
423 *
424 * Needs to be called when the device that the buffer is attached to stops
425 * sampling. Typically should be the iio_buffer_access_ops disable callback.
426 */
427int iio_dma_buffer_disable(struct iio_buffer *buffer,
428 struct iio_dev *indio_dev)
429{
430 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
431
432 mutex_lock(&queue->lock);
433 queue->active = false;
434
435 if (queue->ops && queue->ops->abort)
436 queue->ops->abort(queue);
437 mutex_unlock(&queue->lock);
438
439 return 0;
440}
441EXPORT_SYMBOL_GPL(iio_dma_buffer_disable);
442
443static void iio_dma_buffer_enqueue(struct iio_dma_buffer_queue *queue,
444 struct iio_dma_buffer_block *block)
445{
446 if (block->state == IIO_BLOCK_STATE_DEAD) {
447 iio_buffer_block_put(block);
448 } else if (queue->active) {
449 iio_dma_buffer_submit_block(queue, block);
450 } else {
451 block->state = IIO_BLOCK_STATE_QUEUED;
452 list_add_tail(&block->head, &queue->incoming);
453 }
454}
455
456static struct iio_dma_buffer_block *iio_dma_buffer_dequeue(
457 struct iio_dma_buffer_queue *queue)
458{
459 struct iio_dma_buffer_block *block;
460
461 spin_lock_irq(&queue->list_lock);
462 block = list_first_entry_or_null(&queue->outgoing, struct
463 iio_dma_buffer_block, head);
464 if (block != NULL) {
465 list_del(&block->head);
466 block->state = IIO_BLOCK_STATE_DEQUEUED;
467 }
468 spin_unlock_irq(&queue->list_lock);
469
470 return block;
471}
472
473/**
474 * iio_dma_buffer_read() - DMA buffer read callback
475 * @buffer: Buffer to read form
476 * @n: Number of bytes to read
477 * @user_buffer: Userspace buffer to copy the data to
478 *
479 * Should be used as the read_first_n callback for iio_buffer_access_ops
480 * struct for DMA buffers.
481 */
482int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
483 char __user *user_buffer)
484{
485 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
486 struct iio_dma_buffer_block *block;
487 int ret;
488
489 if (n < buffer->bytes_per_datum)
490 return -EINVAL;
491
492 mutex_lock(&queue->lock);
493
494 if (!queue->fileio.active_block) {
495 block = iio_dma_buffer_dequeue(queue);
496 if (block == NULL) {
497 ret = 0;
498 goto out_unlock;
499 }
500 queue->fileio.pos = 0;
501 queue->fileio.active_block = block;
502 } else {
503 block = queue->fileio.active_block;
504 }
505
506 n = rounddown(n, buffer->bytes_per_datum);
507 if (n > block->bytes_used - queue->fileio.pos)
508 n = block->bytes_used - queue->fileio.pos;
509
510 if (copy_to_user(user_buffer, block->vaddr + queue->fileio.pos, n)) {
511 ret = -EFAULT;
512 goto out_unlock;
513 }
514
515 queue->fileio.pos += n;
516
517 if (queue->fileio.pos == block->bytes_used) {
518 queue->fileio.active_block = NULL;
519 iio_dma_buffer_enqueue(queue, block);
520 }
521
522 ret = n;
523
524out_unlock:
525 mutex_unlock(&queue->lock);
526
527 return ret;
528}
529EXPORT_SYMBOL_GPL(iio_dma_buffer_read);
530
531/**
532 * iio_dma_buffer_data_available() - DMA buffer data_available callback
533 * @buf: Buffer to check for data availability
534 *
535 * Should be used as the data_available callback for iio_buffer_access_ops
536 * struct for DMA buffers.
537 */
538size_t iio_dma_buffer_data_available(struct iio_buffer *buf)
539{
540 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buf);
541 struct iio_dma_buffer_block *block;
542 size_t data_available = 0;
543
544 /*
545 * For counting the available bytes we'll use the size of the block not
546 * the number of actual bytes available in the block. Otherwise it is
547 * possible that we end up with a value that is lower than the watermark
548 * but won't increase since all blocks are in use.
549 */
550
551 mutex_lock(&queue->lock);
552 if (queue->fileio.active_block)
553 data_available += queue->fileio.active_block->size;
554
555 spin_lock_irq(&queue->list_lock);
556 list_for_each_entry(block, &queue->outgoing, head)
557 data_available += block->size;
558 spin_unlock_irq(&queue->list_lock);
559 mutex_unlock(&queue->lock);
560
561 return data_available;
562}
563EXPORT_SYMBOL_GPL(iio_dma_buffer_data_available);
564
565/**
566 * iio_dma_buffer_set_bytes_per_datum() - DMA buffer set_bytes_per_datum callback
567 * @buffer: Buffer to set the bytes-per-datum for
568 * @bpd: The new bytes-per-datum value
569 *
570 * Should be used as the set_bytes_per_datum callback for iio_buffer_access_ops
571 * struct for DMA buffers.
572 */
573int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd)
574{
575 buffer->bytes_per_datum = bpd;
576
577 return 0;
578}
579EXPORT_SYMBOL_GPL(iio_dma_buffer_set_bytes_per_datum);
580
581/**
582 * iio_dma_buffer_set_length - DMA buffer set_length callback
583 * @buffer: Buffer to set the length for
584 * @length: The new buffer length
585 *
586 * Should be used as the set_length callback for iio_buffer_access_ops
587 * struct for DMA buffers.
588 */
589int iio_dma_buffer_set_length(struct iio_buffer *buffer, int length)
590{
591 /* Avoid an invalid state */
592 if (length < 2)
593 length = 2;
594 buffer->length = length;
595 buffer->watermark = length / 2;
596
597 return 0;
598}
599EXPORT_SYMBOL_GPL(iio_dma_buffer_set_length);
600
601/**
602 * iio_dma_buffer_init() - Initialize DMA buffer queue
603 * @queue: Buffer to initialize
604 * @dev: DMA device
605 * @ops: DMA buffer queue callback operations
606 *
607 * The DMA device will be used by the queue to do DMA memory allocations. So it
608 * should refer to the device that will perform the DMA to ensure that
609 * allocations are done from a memory region that can be accessed by the device.
610 */
611int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
612 struct device *dev, const struct iio_dma_buffer_ops *ops)
613{
614 iio_buffer_init(&queue->buffer);
615 queue->buffer.length = PAGE_SIZE;
616 queue->buffer.watermark = queue->buffer.length / 2;
617 queue->dev = dev;
618 queue->ops = ops;
619
620 INIT_LIST_HEAD(&queue->incoming);
621 INIT_LIST_HEAD(&queue->outgoing);
622
623 mutex_init(&queue->lock);
624 spin_lock_init(&queue->list_lock);
625
626 return 0;
627}
628EXPORT_SYMBOL_GPL(iio_dma_buffer_init);
629
630/**
631 * iio_dma_buffer_exit() - Cleanup DMA buffer queue
632 * @queue: Buffer to cleanup
633 *
634 * After this function has completed it is safe to free any resources that are
635 * associated with the buffer and are accessed inside the callback operations.
636 */
637void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue)
638{
639 unsigned int i;
640
641 mutex_lock(&queue->lock);
642
643 spin_lock_irq(&queue->list_lock);
644 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
645 if (!queue->fileio.blocks[i])
646 continue;
647 queue->fileio.blocks[i]->state = IIO_BLOCK_STATE_DEAD;
648 }
649 INIT_LIST_HEAD(&queue->outgoing);
650 spin_unlock_irq(&queue->list_lock);
651
652 INIT_LIST_HEAD(&queue->incoming);
653
654 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
655 if (!queue->fileio.blocks[i])
656 continue;
657 iio_buffer_block_put(queue->fileio.blocks[i]);
658 queue->fileio.blocks[i] = NULL;
659 }
660 queue->fileio.active_block = NULL;
661 queue->ops = NULL;
662
663 mutex_unlock(&queue->lock);
664}
665EXPORT_SYMBOL_GPL(iio_dma_buffer_exit);
666
667/**
668 * iio_dma_buffer_release() - Release final buffer resources
669 * @queue: Buffer to release
670 *
671 * Frees resources that can't yet be freed in iio_dma_buffer_exit(). Should be
672 * called in the buffers release callback implementation right before freeing
673 * the memory associated with the buffer.
674 */
675void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue)
676{
677 mutex_destroy(&queue->lock);
678}
679EXPORT_SYMBOL_GPL(iio_dma_buffer_release);
680
681MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
682MODULE_DESCRIPTION("DMA buffer for the IIO framework");
683MODULE_LICENSE("GPL v2");
diff --git a/include/linux/iio/buffer-dma.h b/include/linux/iio/buffer-dma.h
new file mode 100644
index 000000000000..767467d886de
--- /dev/null
+++ b/include/linux/iio/buffer-dma.h
@@ -0,0 +1,152 @@
1/*
2 * Copyright 2013-2015 Analog Devices Inc.
3 * Author: Lars-Peter Clausen <lars@metafoo.de>
4 *
5 * Licensed under the GPL-2.
6 */
7
8#ifndef __INDUSTRIALIO_DMA_BUFFER_H__
9#define __INDUSTRIALIO_DMA_BUFFER_H__
10
11#include <linux/list.h>
12#include <linux/kref.h>
13#include <linux/spinlock.h>
14#include <linux/mutex.h>
15#include <linux/iio/buffer.h>
16
17struct iio_dma_buffer_queue;
18struct iio_dma_buffer_ops;
19struct device;
20
21struct iio_buffer_block {
22 u32 size;
23 u32 bytes_used;
24};
25
26/**
27 * enum iio_block_state - State of a struct iio_dma_buffer_block
28 * @IIO_BLOCK_STATE_DEQUEUED: Block is not queued
29 * @IIO_BLOCK_STATE_QUEUED: Block is on the incoming queue
30 * @IIO_BLOCK_STATE_ACTIVE: Block is currently being processed by the DMA
31 * @IIO_BLOCK_STATE_DONE: Block is on the outgoing queue
32 * @IIO_BLOCK_STATE_DEAD: Block has been marked as to be freed
33 */
34enum iio_block_state {
35 IIO_BLOCK_STATE_DEQUEUED,
36 IIO_BLOCK_STATE_QUEUED,
37 IIO_BLOCK_STATE_ACTIVE,
38 IIO_BLOCK_STATE_DONE,
39 IIO_BLOCK_STATE_DEAD,
40};
41
42/**
43 * struct iio_dma_buffer_block - IIO buffer block
44 * @head: List head
45 * @size: Total size of the block in bytes
46 * @bytes_used: Number of bytes that contain valid data
47 * @vaddr: Virutal address of the blocks memory
48 * @phys_addr: Physical address of the blocks memory
49 * @queue: Parent DMA buffer queue
50 * @kref: kref used to manage the lifetime of block
51 * @state: Current state of the block
52 */
53struct iio_dma_buffer_block {
54 /* May only be accessed by the owner of the block */
55 struct list_head head;
56 size_t bytes_used;
57
58 /*
59 * Set during allocation, constant thereafter. May be accessed read-only
60 * by anybody holding a reference to the block.
61 */
62 void *vaddr;
63 dma_addr_t phys_addr;
64 size_t size;
65 struct iio_dma_buffer_queue *queue;
66
67 /* Must not be accessed outside the core. */
68 struct kref kref;
69 /*
70 * Must not be accessed outside the core. Access needs to hold
71 * queue->list_lock if the block is not owned by the core.
72 */
73 enum iio_block_state state;
74};
75
76/**
77 * struct iio_dma_buffer_queue_fileio - FileIO state for the DMA buffer
78 * @blocks: Buffer blocks used for fileio
79 * @active_block: Block being used in read()
80 * @pos: Read offset in the active block
81 * @block_size: Size of each block
82 */
83struct iio_dma_buffer_queue_fileio {
84 struct iio_dma_buffer_block *blocks[2];
85 struct iio_dma_buffer_block *active_block;
86 size_t pos;
87 size_t block_size;
88};
89
90/**
91 * struct iio_dma_buffer_queue - DMA buffer base structure
92 * @buffer: IIO buffer base structure
93 * @dev: Parent device
94 * @ops: DMA buffer callbacks
95 * @lock: Protects the incoming list, active and the fields in the fileio
96 * substruct
97 * @list_lock: Protects lists that contain blocks which can be modified in
98 * atomic context as well as blocks on those lists. This is the outgoing queue
99 * list and typically also a list of active blocks in the part that handles
100 * the DMA controller
101 * @incoming: List of buffers on the incoming queue
102 * @outgoing: List of buffers on the outgoing queue
103 * @active: Whether the buffer is currently active
104 * @fileio: FileIO state
105 */
106struct iio_dma_buffer_queue {
107 struct iio_buffer buffer;
108 struct device *dev;
109 const struct iio_dma_buffer_ops *ops;
110
111 struct mutex lock;
112 spinlock_t list_lock;
113 struct list_head incoming;
114 struct list_head outgoing;
115
116 bool active;
117
118 struct iio_dma_buffer_queue_fileio fileio;
119};
120
121/**
122 * struct iio_dma_buffer_ops - DMA buffer callback operations
123 * @submit: Called when a block is submitted to the DMA controller
124 * @abort: Should abort all pending transfers
125 */
126struct iio_dma_buffer_ops {
127 int (*submit)(struct iio_dma_buffer_queue *queue,
128 struct iio_dma_buffer_block *block);
129 void (*abort)(struct iio_dma_buffer_queue *queue);
130};
131
132void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block);
133void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue,
134 struct list_head *list);
135
136int iio_dma_buffer_enable(struct iio_buffer *buffer,
137 struct iio_dev *indio_dev);
138int iio_dma_buffer_disable(struct iio_buffer *buffer,
139 struct iio_dev *indio_dev);
140int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
141 char __user *user_buffer);
142size_t iio_dma_buffer_data_available(struct iio_buffer *buffer);
143int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd);
144int iio_dma_buffer_set_length(struct iio_buffer *buffer, int length);
145int iio_dma_buffer_request_update(struct iio_buffer *buffer);
146
147int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
148 struct device *dma_dev, const struct iio_dma_buffer_ops *ops);
149void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue);
150void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue);
151
152#endif