aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/iio
diff options
context:
space:
mode:
authorLars-Peter Clausen <lars@metafoo.de>2015-10-13 12:10:28 -0400
committerJonathan Cameron <jic23@kernel.org>2015-10-25 09:54:34 -0400
commit670b19ae9bfdbcb4ce2c2ffb2ec1659a7f4a2074 (patch)
tree47118d99269dc31aee91c8a619f9f12c55c223b6 /include/linux/iio
parente18a2ad45caeb11226e49c25068d0f2efe2adf6c (diff)
iio: Add generic DMA buffer infrastructure
The traditional approach used in IIO to implement buffered capture requires the generation of at least one interrupt per sample. In the interrupt handler the driver reads the sample from the device and copies it to a software buffer. This approach has a rather large per sample overhead associated with it. And while it works fine for samplerates in the range of up to 1000 samples per second it starts to consume a rather large share of the available CPU processing time once we go beyond that, this is especially true on an embedded system with limited processing power. The regular interrupt also causes increased power consumption by not allowing the hardware into deeper sleep states, which is something that becomes more and more important on mobile battery powered devices. And while the recently added watermark support mitigates some of the issues by allowing the device to generate interrupts at a rate lower than the data output rate, this still requires a storage buffer inside the device and even if it exists it is only a few 100 samples deep at most. DMA support on the other hand allows to capture multiple millions or even more samples without any CPU interaction. This allows the CPU to either go to sleep for longer periods or focus on other tasks which increases overall system performance and power consumption. In addition to that some devices might not even offer a way to read the data other than using DMA, which makes DMA mandatory to use for them. The tasks involved in implementing a DMA buffer can be divided into two categories. The first category is memory buffer management (allocation, mapping, etc.) and hooking this up the IIO buffer callbacks like read(), enable(), disable(), etc. The second category of tasks is to setup the DMA hardware and manage the DMA transfers. Tasks from the first category will be very similar for all IIO drivers supporting DMA buffers, while the tasks from the second category will be hardware specific. This patch implements a generic infrastructure that take care of the former tasks. It provides a set of functions that implement the standard IIO buffer iio_buffer_access_funcs callbacks. These can either be used as is or be overloaded and augmented with driver specific code where necessary. For the DMA buffer support infrastructure that is introduced in this series sample data is grouped by so called blocks. A block is the basic unit at which data is exchanged between the application and the hardware. The application is responsible for allocating the memory associated with the block and then passes the block to the hardware. When the hardware has captured the amount of samples equal to size of a block it will notify the application, which can then read the data from the block and process it. The block size can freely chosen (within the constraints of the hardware). This allows to make a trade-off between latency and management overhead. The larger the block size the lower the per sample overhead but the latency between when the data was captured and when the application will be able to access it increases, in a similar way smaller block sizes have a larger per sample management overhead but a lower latency. The ideal block size thus depends on system and application requirements. For the time being the infrastructure only implements a simple double buffered scheme which allocates two blocks each with half the size of the configured buffer size. This provides basic support for capturing continuous uninterrupted data over the existing file-IO ABI. Future extensions to the DMA buffer infrastructure will give applications a more fine grained control over how many blocks are allocated and the size of each block. But this requires userspace ABI additions which are intentionally not part of this patch and will be added separately. Tasks of the second category need to be implemented by a device specific driver. They can be hooked up into the generic infrastructure using two simple callbacks, submit() and abort(). The submit() callback is used to schedule DMA transfers for blocks. Once a DMA transfer has been completed it is expected that the buffer driver calls iio_dma_buffer_block_done() to notify. The abort() callback is used for stopping all pending and active DMA transfers when the buffer is disabled. Signed-off-by: Lars-Peter Clausen <lars@metafoo.de> Signed-off-by: Jonathan Cameron <jic23@kernel.org>
Diffstat (limited to 'include/linux/iio')
-rw-r--r--include/linux/iio/buffer-dma.h152
1 files changed, 152 insertions, 0 deletions
diff --git a/include/linux/iio/buffer-dma.h b/include/linux/iio/buffer-dma.h
new file mode 100644
index 000000000000..767467d886de
--- /dev/null
+++ b/include/linux/iio/buffer-dma.h
@@ -0,0 +1,152 @@
1/*
2 * Copyright 2013-2015 Analog Devices Inc.
3 * Author: Lars-Peter Clausen <lars@metafoo.de>
4 *
5 * Licensed under the GPL-2.
6 */
7
8#ifndef __INDUSTRIALIO_DMA_BUFFER_H__
9#define __INDUSTRIALIO_DMA_BUFFER_H__
10
11#include <linux/list.h>
12#include <linux/kref.h>
13#include <linux/spinlock.h>
14#include <linux/mutex.h>
15#include <linux/iio/buffer.h>
16
17struct iio_dma_buffer_queue;
18struct iio_dma_buffer_ops;
19struct device;
20
21struct iio_buffer_block {
22 u32 size;
23 u32 bytes_used;
24};
25
26/**
27 * enum iio_block_state - State of a struct iio_dma_buffer_block
28 * @IIO_BLOCK_STATE_DEQUEUED: Block is not queued
29 * @IIO_BLOCK_STATE_QUEUED: Block is on the incoming queue
30 * @IIO_BLOCK_STATE_ACTIVE: Block is currently being processed by the DMA
31 * @IIO_BLOCK_STATE_DONE: Block is on the outgoing queue
32 * @IIO_BLOCK_STATE_DEAD: Block has been marked as to be freed
33 */
34enum iio_block_state {
35 IIO_BLOCK_STATE_DEQUEUED,
36 IIO_BLOCK_STATE_QUEUED,
37 IIO_BLOCK_STATE_ACTIVE,
38 IIO_BLOCK_STATE_DONE,
39 IIO_BLOCK_STATE_DEAD,
40};
41
42/**
43 * struct iio_dma_buffer_block - IIO buffer block
44 * @head: List head
45 * @size: Total size of the block in bytes
46 * @bytes_used: Number of bytes that contain valid data
47 * @vaddr: Virutal address of the blocks memory
48 * @phys_addr: Physical address of the blocks memory
49 * @queue: Parent DMA buffer queue
50 * @kref: kref used to manage the lifetime of block
51 * @state: Current state of the block
52 */
53struct iio_dma_buffer_block {
54 /* May only be accessed by the owner of the block */
55 struct list_head head;
56 size_t bytes_used;
57
58 /*
59 * Set during allocation, constant thereafter. May be accessed read-only
60 * by anybody holding a reference to the block.
61 */
62 void *vaddr;
63 dma_addr_t phys_addr;
64 size_t size;
65 struct iio_dma_buffer_queue *queue;
66
67 /* Must not be accessed outside the core. */
68 struct kref kref;
69 /*
70 * Must not be accessed outside the core. Access needs to hold
71 * queue->list_lock if the block is not owned by the core.
72 */
73 enum iio_block_state state;
74};
75
76/**
77 * struct iio_dma_buffer_queue_fileio - FileIO state for the DMA buffer
78 * @blocks: Buffer blocks used for fileio
79 * @active_block: Block being used in read()
80 * @pos: Read offset in the active block
81 * @block_size: Size of each block
82 */
83struct iio_dma_buffer_queue_fileio {
84 struct iio_dma_buffer_block *blocks[2];
85 struct iio_dma_buffer_block *active_block;
86 size_t pos;
87 size_t block_size;
88};
89
90/**
91 * struct iio_dma_buffer_queue - DMA buffer base structure
92 * @buffer: IIO buffer base structure
93 * @dev: Parent device
94 * @ops: DMA buffer callbacks
95 * @lock: Protects the incoming list, active and the fields in the fileio
96 * substruct
97 * @list_lock: Protects lists that contain blocks which can be modified in
98 * atomic context as well as blocks on those lists. This is the outgoing queue
99 * list and typically also a list of active blocks in the part that handles
100 * the DMA controller
101 * @incoming: List of buffers on the incoming queue
102 * @outgoing: List of buffers on the outgoing queue
103 * @active: Whether the buffer is currently active
104 * @fileio: FileIO state
105 */
106struct iio_dma_buffer_queue {
107 struct iio_buffer buffer;
108 struct device *dev;
109 const struct iio_dma_buffer_ops *ops;
110
111 struct mutex lock;
112 spinlock_t list_lock;
113 struct list_head incoming;
114 struct list_head outgoing;
115
116 bool active;
117
118 struct iio_dma_buffer_queue_fileio fileio;
119};
120
121/**
122 * struct iio_dma_buffer_ops - DMA buffer callback operations
123 * @submit: Called when a block is submitted to the DMA controller
124 * @abort: Should abort all pending transfers
125 */
126struct iio_dma_buffer_ops {
127 int (*submit)(struct iio_dma_buffer_queue *queue,
128 struct iio_dma_buffer_block *block);
129 void (*abort)(struct iio_dma_buffer_queue *queue);
130};
131
132void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block);
133void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue,
134 struct list_head *list);
135
136int iio_dma_buffer_enable(struct iio_buffer *buffer,
137 struct iio_dev *indio_dev);
138int iio_dma_buffer_disable(struct iio_buffer *buffer,
139 struct iio_dev *indio_dev);
140int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
141 char __user *user_buffer);
142size_t iio_dma_buffer_data_available(struct iio_buffer *buffer);
143int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd);
144int iio_dma_buffer_set_length(struct iio_buffer *buffer, int length);
145int iio_dma_buffer_request_update(struct iio_buffer *buffer);
146
147int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
148 struct device *dma_dev, const struct iio_dma_buffer_ops *ops);
149void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue);
150void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue);
151
152#endif