aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/dmaengine.h
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2007-07-09 14:56:42 -0400
committerDan Williams <dan.j.williams@intel.com>2007-07-13 11:06:13 -0400
commitd379b01e9087a582d58f4b678208a4f8d8376fe7 (patch)
tree155920bca93c18afba66b9d5acfecd359d5bec65 /include/linux/dmaengine.h
parent7405f74badf46b5d023c5d2b670b4471525f6c91 (diff)
dmaengine: make clients responsible for managing channels
The current implementation assumes that a channel will only be used by one client at a time. In order to enable channel sharing the dmaengine core is changed to a model where clients subscribe to channel-available-events. Instead of tracking how many channels a client wants and how many it has received the core just broadcasts the available channels and lets the clients optionally take a reference. The core learns about the clients' needs at dma_event_callback time. In support of multiple operation types, clients can specify a capability mask to only be notified of channels that satisfy a certain set of capabilities. Changelog: * removed DMA_TX_ARRAY_INIT, no longer needed * dma_client_chan_free -> dma_chan_release: switch to global reference counting only at device unregistration time, before it was also happening at client unregistration time * clients now return dma_state_client to dmaengine (ack, dup, nak) * checkpatch.pl fixes * fixup merge with git-ioat Cc: Chris Leech <christopher.leech@intel.com> Signed-off-by: Shannon Nelson <shannon.nelson@intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com> Acked-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux/dmaengine.h')
-rw-r--r--include/linux/dmaengine.h58
1 files changed, 35 insertions, 23 deletions
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 3de1cf71031a..a3b6035b6c86 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -29,20 +29,32 @@
29#include <linux/dma-mapping.h> 29#include <linux/dma-mapping.h>
30 30
31/** 31/**
32 * enum dma_event - resource PNP/power managment events 32 * enum dma_state - resource PNP/power managment state
33 * @DMA_RESOURCE_SUSPEND: DMA device going into low power state 33 * @DMA_RESOURCE_SUSPEND: DMA device going into low power state
34 * @DMA_RESOURCE_RESUME: DMA device returning to full power 34 * @DMA_RESOURCE_RESUME: DMA device returning to full power
35 * @DMA_RESOURCE_ADDED: DMA device added to the system 35 * @DMA_RESOURCE_AVAILABLE: DMA device available to the system
36 * @DMA_RESOURCE_REMOVED: DMA device removed from the system 36 * @DMA_RESOURCE_REMOVED: DMA device removed from the system
37 */ 37 */
38enum dma_event { 38enum dma_state {
39 DMA_RESOURCE_SUSPEND, 39 DMA_RESOURCE_SUSPEND,
40 DMA_RESOURCE_RESUME, 40 DMA_RESOURCE_RESUME,
41 DMA_RESOURCE_ADDED, 41 DMA_RESOURCE_AVAILABLE,
42 DMA_RESOURCE_REMOVED, 42 DMA_RESOURCE_REMOVED,
43}; 43};
44 44
45/** 45/**
46 * enum dma_state_client - state of the channel in the client
47 * @DMA_ACK: client would like to use, or was using this channel
48 * @DMA_DUP: client has already seen this channel, or is not using this channel
49 * @DMA_NAK: client does not want to see any more channels
50 */
51enum dma_state_client {
52 DMA_ACK,
53 DMA_DUP,
54 DMA_NAK,
55};
56
57/**
46 * typedef dma_cookie_t - an opaque DMA cookie 58 * typedef dma_cookie_t - an opaque DMA cookie
47 * 59 *
48 * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code 60 * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code
@@ -104,7 +116,6 @@ struct dma_chan_percpu {
104 116
105/** 117/**
106 * struct dma_chan - devices supply DMA channels, clients use them 118 * struct dma_chan - devices supply DMA channels, clients use them
107 * @client: ptr to the client user of this chan, will be %NULL when unused
108 * @device: ptr to the dma device who supplies this channel, always !%NULL 119 * @device: ptr to the dma device who supplies this channel, always !%NULL
109 * @cookie: last cookie value returned to client 120 * @cookie: last cookie value returned to client
110 * @chan_id: channel ID for sysfs 121 * @chan_id: channel ID for sysfs
@@ -112,12 +123,10 @@ struct dma_chan_percpu {
112 * @refcount: kref, used in "bigref" slow-mode 123 * @refcount: kref, used in "bigref" slow-mode
113 * @slow_ref: indicates that the DMA channel is free 124 * @slow_ref: indicates that the DMA channel is free
114 * @rcu: the DMA channel's RCU head 125 * @rcu: the DMA channel's RCU head
115 * @client_node: used to add this to the client chan list
116 * @device_node: used to add this to the device chan list 126 * @device_node: used to add this to the device chan list
117 * @local: per-cpu pointer to a struct dma_chan_percpu 127 * @local: per-cpu pointer to a struct dma_chan_percpu
118 */ 128 */
119struct dma_chan { 129struct dma_chan {
120 struct dma_client *client;
121 struct dma_device *device; 130 struct dma_device *device;
122 dma_cookie_t cookie; 131 dma_cookie_t cookie;
123 132
@@ -129,11 +138,11 @@ struct dma_chan {
129 int slow_ref; 138 int slow_ref;
130 struct rcu_head rcu; 139 struct rcu_head rcu;
131 140
132 struct list_head client_node;
133 struct list_head device_node; 141 struct list_head device_node;
134 struct dma_chan_percpu *local; 142 struct dma_chan_percpu *local;
135}; 143};
136 144
145
137void dma_chan_cleanup(struct kref *kref); 146void dma_chan_cleanup(struct kref *kref);
138 147
139static inline void dma_chan_get(struct dma_chan *chan) 148static inline void dma_chan_get(struct dma_chan *chan)
@@ -158,26 +167,31 @@ static inline void dma_chan_put(struct dma_chan *chan)
158 167
159/* 168/*
160 * typedef dma_event_callback - function pointer to a DMA event callback 169 * typedef dma_event_callback - function pointer to a DMA event callback
170 * For each channel added to the system this routine is called for each client.
171 * If the client would like to use the channel it returns '1' to signal (ack)
172 * the dmaengine core to take out a reference on the channel and its
173 * corresponding device. A client must not 'ack' an available channel more
174 * than once. When a channel is removed all clients are notified. If a client
175 * is using the channel it must 'ack' the removal. A client must not 'ack' a
176 * removed channel more than once.
177 * @client - 'this' pointer for the client context
178 * @chan - channel to be acted upon
179 * @state - available or removed
161 */ 180 */
162typedef void (*dma_event_callback) (struct dma_client *client, 181struct dma_client;
163 struct dma_chan *chan, enum dma_event event); 182typedef enum dma_state_client (*dma_event_callback) (struct dma_client *client,
183 struct dma_chan *chan, enum dma_state state);
164 184
165/** 185/**
166 * struct dma_client - info on the entity making use of DMA services 186 * struct dma_client - info on the entity making use of DMA services
167 * @event_callback: func ptr to call when something happens 187 * @event_callback: func ptr to call when something happens
168 * @chan_count: number of chans allocated 188 * @cap_mask: only return channels that satisfy the requested capabilities
169 * @chans_desired: number of chans requested. Can be +/- chan_count 189 * a value of zero corresponds to any capability
170 * @lock: protects access to the channels list
171 * @channels: the list of DMA channels allocated
172 * @global_node: list_head for global dma_client_list 190 * @global_node: list_head for global dma_client_list
173 */ 191 */
174struct dma_client { 192struct dma_client {
175 dma_event_callback event_callback; 193 dma_event_callback event_callback;
176 unsigned int chan_count; 194 dma_cap_mask_t cap_mask;
177 unsigned int chans_desired;
178
179 spinlock_t lock;
180 struct list_head channels;
181 struct list_head global_node; 195 struct list_head global_node;
182}; 196};
183 197
@@ -285,10 +299,9 @@ struct dma_device {
285 299
286/* --- public DMA engine API --- */ 300/* --- public DMA engine API --- */
287 301
288struct dma_client *dma_async_client_register(dma_event_callback event_callback); 302void dma_async_client_register(struct dma_client *client);
289void dma_async_client_unregister(struct dma_client *client); 303void dma_async_client_unregister(struct dma_client *client);
290void dma_async_client_chan_request(struct dma_client *client, 304void dma_async_client_chan_request(struct dma_client *client);
291 unsigned int number);
292dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, 305dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
293 void *dest, void *src, size_t len); 306 void *dest, void *src, size_t len);
294dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, 307dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
@@ -299,7 +312,6 @@ dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan,
299void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, 312void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
300 struct dma_chan *chan); 313 struct dma_chan *chan);
301 314
302
303static inline void 315static inline void
304async_tx_ack(struct dma_async_tx_descriptor *tx) 316async_tx_ack(struct dma_async_tx_descriptor *tx)
305{ 317{