diff options
author | Dan Williams <dan.j.williams@intel.com> | 2007-01-02 13:10:43 -0500 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2007-07-13 11:06:11 -0400 |
commit | 7405f74badf46b5d023c5d2b670b4471525f6c91 (patch) | |
tree | 20dd20571637dba1c2b04c7b13ac208c33b5706b /include/linux/dmaengine.h | |
parent | 428ed6024fa74a271142f3257966e9b5e1cb37a1 (diff) |
dmaengine: refactor dmaengine around dma_async_tx_descriptor
The current dmaengine interface defines mutliple routines per operation,
i.e. dma_async_memcpy_buf_to_buf, dma_async_memcpy_buf_to_page etc. Adding
more operation types (xor, crc, etc) to this model would result in an
unmanageable number of method permutations.
Are we really going to add a set of hooks for each DMA engine
whizbang feature?
- Jeff Garzik
The descriptor creation process is refactored using the new common
dma_async_tx_descriptor structure. Instead of per driver
do_<operation>_<dest>_to_<src> methods, drivers integrate
dma_async_tx_descriptor into their private software descriptor and then
define a 'prep' routine per operation. The prep routine allocates a
descriptor and ensures that the tx_set_src, tx_set_dest, tx_submit routines
are valid. Descriptor creation and submission becomes:
struct dma_device *dev;
struct dma_chan *chan;
struct dma_async_tx_descriptor *tx;
tx = dev->device_prep_dma_<operation>(chan, len, int_flag)
tx->tx_set_src(dma_addr_t, tx, index /* for multi-source ops */)
tx->tx_set_dest(dma_addr_t, tx, index)
tx->tx_submit(tx)
In addition to the refactoring, dma_async_tx_descriptor also lays the
groundwork for definining cross-channel-operation dependencies, and a
callback facility for asynchronous notification of operation completion.
Changelog:
* drop dma mapping methods, suggested by Chris Leech
* fix ioat_dma_dependency_added, also caught by Andrew Morton
* fix dma_sync_wait, change from Andrew Morton
* uninline large functions, change from Andrew Morton
* add tx->callback = NULL to dmaengine calls to interoperate with async_tx
calls
* hookup ioat_tx_submit
* convert channel capabilities to a 'cpumask_t like' bitmap
* removed DMA_TX_ARRAY_INIT, no longer needed
* checkpatch.pl fixes
* make set_src, set_dest, and tx_submit descriptor specific methods
* fixup git-ioat merge
* move group_list and phys to dma_async_tx_descriptor
Cc: Jeff Garzik <jeff@garzik.org>
Cc: Chris Leech <christopher.leech@intel.com>
Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Acked-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux/dmaengine.h')
-rw-r--r-- | include/linux/dmaengine.h | 237 |
1 files changed, 149 insertions, 88 deletions
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index c94d8f1d62e5..3de1cf71031a 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
@@ -21,13 +21,12 @@ | |||
21 | #ifndef DMAENGINE_H | 21 | #ifndef DMAENGINE_H |
22 | #define DMAENGINE_H | 22 | #define DMAENGINE_H |
23 | 23 | ||
24 | #ifdef CONFIG_DMA_ENGINE | ||
25 | |||
26 | #include <linux/device.h> | 24 | #include <linux/device.h> |
27 | #include <linux/uio.h> | 25 | #include <linux/uio.h> |
28 | #include <linux/kref.h> | 26 | #include <linux/kref.h> |
29 | #include <linux/completion.h> | 27 | #include <linux/completion.h> |
30 | #include <linux/rcupdate.h> | 28 | #include <linux/rcupdate.h> |
29 | #include <linux/dma-mapping.h> | ||
31 | 30 | ||
32 | /** | 31 | /** |
33 | * enum dma_event - resource PNP/power managment events | 32 | * enum dma_event - resource PNP/power managment events |
@@ -65,6 +64,31 @@ enum dma_status { | |||
65 | }; | 64 | }; |
66 | 65 | ||
67 | /** | 66 | /** |
67 | * enum dma_transaction_type - DMA transaction types/indexes | ||
68 | */ | ||
69 | enum dma_transaction_type { | ||
70 | DMA_MEMCPY, | ||
71 | DMA_XOR, | ||
72 | DMA_PQ_XOR, | ||
73 | DMA_DUAL_XOR, | ||
74 | DMA_PQ_UPDATE, | ||
75 | DMA_ZERO_SUM, | ||
76 | DMA_PQ_ZERO_SUM, | ||
77 | DMA_MEMSET, | ||
78 | DMA_MEMCPY_CRC32C, | ||
79 | DMA_INTERRUPT, | ||
80 | }; | ||
81 | |||
82 | /* last transaction type for creation of the capabilities mask */ | ||
83 | #define DMA_TX_TYPE_END (DMA_INTERRUPT + 1) | ||
84 | |||
85 | /** | ||
86 | * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t. | ||
87 | * See linux/cpumask.h | ||
88 | */ | ||
89 | typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t; | ||
90 | |||
91 | /** | ||
68 | * struct dma_chan_percpu - the per-CPU part of struct dma_chan | 92 | * struct dma_chan_percpu - the per-CPU part of struct dma_chan |
69 | * @refcount: local_t used for open-coded "bigref" counting | 93 | * @refcount: local_t used for open-coded "bigref" counting |
70 | * @memcpy_count: transaction counter | 94 | * @memcpy_count: transaction counter |
@@ -157,48 +181,106 @@ struct dma_client { | |||
157 | struct list_head global_node; | 181 | struct list_head global_node; |
158 | }; | 182 | }; |
159 | 183 | ||
184 | typedef void (*dma_async_tx_callback)(void *dma_async_param); | ||
185 | /** | ||
186 | * struct dma_async_tx_descriptor - async transaction descriptor | ||
187 | * ---dma generic offload fields--- | ||
188 | * @cookie: tracking cookie for this transaction, set to -EBUSY if | ||
189 | * this tx is sitting on a dependency list | ||
190 | * @ack: the descriptor can not be reused until the client acknowledges | ||
191 | * receipt, i.e. has has a chance to establish any dependency chains | ||
192 | * @phys: physical address of the descriptor | ||
193 | * @tx_list: driver common field for operations that require multiple | ||
194 | * descriptors | ||
195 | * @chan: target channel for this operation | ||
196 | * @tx_submit: set the prepared descriptor(s) to be executed by the engine | ||
197 | * @tx_set_dest: set a destination address in a hardware descriptor | ||
198 | * @tx_set_src: set a source address in a hardware descriptor | ||
199 | * @callback: routine to call after this operation is complete | ||
200 | * @callback_param: general parameter to pass to the callback routine | ||
201 | * ---async_tx api specific fields--- | ||
202 | * @depend_list: at completion this list of transactions are submitted | ||
203 | * @depend_node: allow this transaction to be executed after another | ||
204 | * transaction has completed, possibly on another channel | ||
205 | * @parent: pointer to the next level up in the dependency chain | ||
206 | * @lock: protect the dependency list | ||
207 | */ | ||
208 | struct dma_async_tx_descriptor { | ||
209 | dma_cookie_t cookie; | ||
210 | int ack; | ||
211 | dma_addr_t phys; | ||
212 | struct list_head tx_list; | ||
213 | struct dma_chan *chan; | ||
214 | dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); | ||
215 | void (*tx_set_dest)(dma_addr_t addr, | ||
216 | struct dma_async_tx_descriptor *tx, int index); | ||
217 | void (*tx_set_src)(dma_addr_t addr, | ||
218 | struct dma_async_tx_descriptor *tx, int index); | ||
219 | dma_async_tx_callback callback; | ||
220 | void *callback_param; | ||
221 | struct list_head depend_list; | ||
222 | struct list_head depend_node; | ||
223 | struct dma_async_tx_descriptor *parent; | ||
224 | spinlock_t lock; | ||
225 | }; | ||
226 | |||
160 | /** | 227 | /** |
161 | * struct dma_device - info on the entity supplying DMA services | 228 | * struct dma_device - info on the entity supplying DMA services |
162 | * @chancnt: how many DMA channels are supported | 229 | * @chancnt: how many DMA channels are supported |
163 | * @channels: the list of struct dma_chan | 230 | * @channels: the list of struct dma_chan |
164 | * @global_node: list_head for global dma_device_list | 231 | * @global_node: list_head for global dma_device_list |
232 | * @cap_mask: one or more dma_capability flags | ||
233 | * @max_xor: maximum number of xor sources, 0 if no capability | ||
165 | * @refcount: reference count | 234 | * @refcount: reference count |
166 | * @done: IO completion struct | 235 | * @done: IO completion struct |
167 | * @dev_id: unique device ID | 236 | * @dev_id: unique device ID |
237 | * @dev: struct device reference for dma mapping api | ||
168 | * @device_alloc_chan_resources: allocate resources and return the | 238 | * @device_alloc_chan_resources: allocate resources and return the |
169 | * number of allocated descriptors | 239 | * number of allocated descriptors |
170 | * @device_free_chan_resources: release DMA channel's resources | 240 | * @device_free_chan_resources: release DMA channel's resources |
171 | * @device_memcpy_buf_to_buf: memcpy buf pointer to buf pointer | 241 | * @device_prep_dma_memcpy: prepares a memcpy operation |
172 | * @device_memcpy_buf_to_pg: memcpy buf pointer to struct page | 242 | * @device_prep_dma_xor: prepares a xor operation |
173 | * @device_memcpy_pg_to_pg: memcpy struct page/offset to struct page/offset | 243 | * @device_prep_dma_zero_sum: prepares a zero_sum operation |
174 | * @device_memcpy_complete: poll the status of an IOAT DMA transaction | 244 | * @device_prep_dma_memset: prepares a memset operation |
175 | * @device_memcpy_issue_pending: push appended descriptors to hardware | 245 | * @device_prep_dma_interrupt: prepares an end of chain interrupt operation |
246 | * @device_dependency_added: async_tx notifies the channel about new deps | ||
247 | * @device_issue_pending: push pending transactions to hardware | ||
176 | */ | 248 | */ |
177 | struct dma_device { | 249 | struct dma_device { |
178 | 250 | ||
179 | unsigned int chancnt; | 251 | unsigned int chancnt; |
180 | struct list_head channels; | 252 | struct list_head channels; |
181 | struct list_head global_node; | 253 | struct list_head global_node; |
254 | dma_cap_mask_t cap_mask; | ||
255 | int max_xor; | ||
182 | 256 | ||
183 | struct kref refcount; | 257 | struct kref refcount; |
184 | struct completion done; | 258 | struct completion done; |
185 | 259 | ||
186 | int dev_id; | 260 | int dev_id; |
261 | struct device *dev; | ||
187 | 262 | ||
188 | int (*device_alloc_chan_resources)(struct dma_chan *chan); | 263 | int (*device_alloc_chan_resources)(struct dma_chan *chan); |
189 | void (*device_free_chan_resources)(struct dma_chan *chan); | 264 | void (*device_free_chan_resources)(struct dma_chan *chan); |
190 | dma_cookie_t (*device_memcpy_buf_to_buf)(struct dma_chan *chan, | 265 | |
191 | void *dest, void *src, size_t len); | 266 | struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)( |
192 | dma_cookie_t (*device_memcpy_buf_to_pg)(struct dma_chan *chan, | 267 | struct dma_chan *chan, size_t len, int int_en); |
193 | struct page *page, unsigned int offset, void *kdata, | 268 | struct dma_async_tx_descriptor *(*device_prep_dma_xor)( |
194 | size_t len); | 269 | struct dma_chan *chan, unsigned int src_cnt, size_t len, |
195 | dma_cookie_t (*device_memcpy_pg_to_pg)(struct dma_chan *chan, | 270 | int int_en); |
196 | struct page *dest_pg, unsigned int dest_off, | 271 | struct dma_async_tx_descriptor *(*device_prep_dma_zero_sum)( |
197 | struct page *src_pg, unsigned int src_off, size_t len); | 272 | struct dma_chan *chan, unsigned int src_cnt, size_t len, |
198 | enum dma_status (*device_memcpy_complete)(struct dma_chan *chan, | 273 | u32 *result, int int_en); |
274 | struct dma_async_tx_descriptor *(*device_prep_dma_memset)( | ||
275 | struct dma_chan *chan, int value, size_t len, int int_en); | ||
276 | struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( | ||
277 | struct dma_chan *chan); | ||
278 | |||
279 | void (*device_dependency_added)(struct dma_chan *chan); | ||
280 | enum dma_status (*device_is_tx_complete)(struct dma_chan *chan, | ||
199 | dma_cookie_t cookie, dma_cookie_t *last, | 281 | dma_cookie_t cookie, dma_cookie_t *last, |
200 | dma_cookie_t *used); | 282 | dma_cookie_t *used); |
201 | void (*device_memcpy_issue_pending)(struct dma_chan *chan); | 283 | void (*device_issue_pending)(struct dma_chan *chan); |
202 | }; | 284 | }; |
203 | 285 | ||
204 | /* --- public DMA engine API --- */ | 286 | /* --- public DMA engine API --- */ |
@@ -207,96 +289,72 @@ struct dma_client *dma_async_client_register(dma_event_callback event_callback); | |||
207 | void dma_async_client_unregister(struct dma_client *client); | 289 | void dma_async_client_unregister(struct dma_client *client); |
208 | void dma_async_client_chan_request(struct dma_client *client, | 290 | void dma_async_client_chan_request(struct dma_client *client, |
209 | unsigned int number); | 291 | unsigned int number); |
292 | dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, | ||
293 | void *dest, void *src, size_t len); | ||
294 | dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, | ||
295 | struct page *page, unsigned int offset, void *kdata, size_t len); | ||
296 | dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan, | ||
297 | struct page *dest_pg, unsigned int dest_off, struct page *src_pg, | ||
298 | unsigned int src_off, size_t len); | ||
299 | void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, | ||
300 | struct dma_chan *chan); | ||
210 | 301 | ||
211 | /** | ||
212 | * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses | ||
213 | * @chan: DMA channel to offload copy to | ||
214 | * @dest: destination address (virtual) | ||
215 | * @src: source address (virtual) | ||
216 | * @len: length | ||
217 | * | ||
218 | * Both @dest and @src must be mappable to a bus address according to the | ||
219 | * DMA mapping API rules for streaming mappings. | ||
220 | * Both @dest and @src must stay memory resident (kernel memory or locked | ||
221 | * user space pages). | ||
222 | */ | ||
223 | static inline dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, | ||
224 | void *dest, void *src, size_t len) | ||
225 | { | ||
226 | int cpu = get_cpu(); | ||
227 | per_cpu_ptr(chan->local, cpu)->bytes_transferred += len; | ||
228 | per_cpu_ptr(chan->local, cpu)->memcpy_count++; | ||
229 | put_cpu(); | ||
230 | 302 | ||
231 | return chan->device->device_memcpy_buf_to_buf(chan, dest, src, len); | 303 | static inline void |
304 | async_tx_ack(struct dma_async_tx_descriptor *tx) | ||
305 | { | ||
306 | tx->ack = 1; | ||
232 | } | 307 | } |
233 | 308 | ||
234 | /** | 309 | #define first_dma_cap(mask) __first_dma_cap(&(mask)) |
235 | * dma_async_memcpy_buf_to_pg - offloaded copy from address to page | 310 | static inline int __first_dma_cap(const dma_cap_mask_t *srcp) |
236 | * @chan: DMA channel to offload copy to | ||
237 | * @page: destination page | ||
238 | * @offset: offset in page to copy to | ||
239 | * @kdata: source address (virtual) | ||
240 | * @len: length | ||
241 | * | ||
242 | * Both @page/@offset and @kdata must be mappable to a bus address according | ||
243 | * to the DMA mapping API rules for streaming mappings. | ||
244 | * Both @page/@offset and @kdata must stay memory resident (kernel memory or | ||
245 | * locked user space pages) | ||
246 | */ | ||
247 | static inline dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, | ||
248 | struct page *page, unsigned int offset, void *kdata, size_t len) | ||
249 | { | 311 | { |
250 | int cpu = get_cpu(); | 312 | return min_t(int, DMA_TX_TYPE_END, |
251 | per_cpu_ptr(chan->local, cpu)->bytes_transferred += len; | 313 | find_first_bit(srcp->bits, DMA_TX_TYPE_END)); |
252 | per_cpu_ptr(chan->local, cpu)->memcpy_count++; | 314 | } |
253 | put_cpu(); | ||
254 | 315 | ||
255 | return chan->device->device_memcpy_buf_to_pg(chan, page, offset, | 316 | #define next_dma_cap(n, mask) __next_dma_cap((n), &(mask)) |
256 | kdata, len); | 317 | static inline int __next_dma_cap(int n, const dma_cap_mask_t *srcp) |
318 | { | ||
319 | return min_t(int, DMA_TX_TYPE_END, | ||
320 | find_next_bit(srcp->bits, DMA_TX_TYPE_END, n+1)); | ||
257 | } | 321 | } |
258 | 322 | ||
259 | /** | 323 | #define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask)) |
260 | * dma_async_memcpy_pg_to_pg - offloaded copy from page to page | 324 | static inline void |
261 | * @chan: DMA channel to offload copy to | 325 | __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp) |
262 | * @dest_pg: destination page | ||
263 | * @dest_off: offset in page to copy to | ||
264 | * @src_pg: source page | ||
265 | * @src_off: offset in page to copy from | ||
266 | * @len: length | ||
267 | * | ||
268 | * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus | ||
269 | * address according to the DMA mapping API rules for streaming mappings. | ||
270 | * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident | ||
271 | * (kernel memory or locked user space pages). | ||
272 | */ | ||
273 | static inline dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan, | ||
274 | struct page *dest_pg, unsigned int dest_off, struct page *src_pg, | ||
275 | unsigned int src_off, size_t len) | ||
276 | { | 326 | { |
277 | int cpu = get_cpu(); | 327 | set_bit(tx_type, dstp->bits); |
278 | per_cpu_ptr(chan->local, cpu)->bytes_transferred += len; | 328 | } |
279 | per_cpu_ptr(chan->local, cpu)->memcpy_count++; | ||
280 | put_cpu(); | ||
281 | 329 | ||
282 | return chan->device->device_memcpy_pg_to_pg(chan, dest_pg, dest_off, | 330 | #define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask)) |
283 | src_pg, src_off, len); | 331 | static inline int |
332 | __dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp) | ||
333 | { | ||
334 | return test_bit(tx_type, srcp->bits); | ||
284 | } | 335 | } |
285 | 336 | ||
337 | #define for_each_dma_cap_mask(cap, mask) \ | ||
338 | for ((cap) = first_dma_cap(mask); \ | ||
339 | (cap) < DMA_TX_TYPE_END; \ | ||
340 | (cap) = next_dma_cap((cap), (mask))) | ||
341 | |||
286 | /** | 342 | /** |
287 | * dma_async_memcpy_issue_pending - flush pending copies to HW | 343 | * dma_async_issue_pending - flush pending transactions to HW |
288 | * @chan: target DMA channel | 344 | * @chan: target DMA channel |
289 | * | 345 | * |
290 | * This allows drivers to push copies to HW in batches, | 346 | * This allows drivers to push copies to HW in batches, |
291 | * reducing MMIO writes where possible. | 347 | * reducing MMIO writes where possible. |
292 | */ | 348 | */ |
293 | static inline void dma_async_memcpy_issue_pending(struct dma_chan *chan) | 349 | static inline void dma_async_issue_pending(struct dma_chan *chan) |
294 | { | 350 | { |
295 | return chan->device->device_memcpy_issue_pending(chan); | 351 | return chan->device->device_issue_pending(chan); |
296 | } | 352 | } |
297 | 353 | ||
354 | #define dma_async_memcpy_issue_pending(chan) dma_async_issue_pending(chan) | ||
355 | |||
298 | /** | 356 | /** |
299 | * dma_async_memcpy_complete - poll for transaction completion | 357 | * dma_async_is_tx_complete - poll for transaction completion |
300 | * @chan: DMA channel | 358 | * @chan: DMA channel |
301 | * @cookie: transaction identifier to check status of | 359 | * @cookie: transaction identifier to check status of |
302 | * @last: returns last completed cookie, can be NULL | 360 | * @last: returns last completed cookie, can be NULL |
@@ -306,12 +364,15 @@ static inline void dma_async_memcpy_issue_pending(struct dma_chan *chan) | |||
306 | * internal state and can be used with dma_async_is_complete() to check | 364 | * internal state and can be used with dma_async_is_complete() to check |
307 | * the status of multiple cookies without re-checking hardware state. | 365 | * the status of multiple cookies without re-checking hardware state. |
308 | */ | 366 | */ |
309 | static inline enum dma_status dma_async_memcpy_complete(struct dma_chan *chan, | 367 | static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan, |
310 | dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used) | 368 | dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used) |
311 | { | 369 | { |
312 | return chan->device->device_memcpy_complete(chan, cookie, last, used); | 370 | return chan->device->device_is_tx_complete(chan, cookie, last, used); |
313 | } | 371 | } |
314 | 372 | ||
373 | #define dma_async_memcpy_complete(chan, cookie, last, used)\ | ||
374 | dma_async_is_tx_complete(chan, cookie, last, used) | ||
375 | |||
315 | /** | 376 | /** |
316 | * dma_async_is_complete - test a cookie against chan state | 377 | * dma_async_is_complete - test a cookie against chan state |
317 | * @cookie: transaction identifier to test status of | 378 | * @cookie: transaction identifier to test status of |
@@ -334,6 +395,7 @@ static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie, | |||
334 | return DMA_IN_PROGRESS; | 395 | return DMA_IN_PROGRESS; |
335 | } | 396 | } |
336 | 397 | ||
398 | enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); | ||
337 | 399 | ||
338 | /* --- DMA device --- */ | 400 | /* --- DMA device --- */ |
339 | 401 | ||
@@ -362,5 +424,4 @@ dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov, | |||
362 | struct dma_pinned_list *pinned_list, struct page *page, | 424 | struct dma_pinned_list *pinned_list, struct page *page, |
363 | unsigned int offset, size_t len); | 425 | unsigned int offset, size_t len); |
364 | 426 | ||
365 | #endif /* CONFIG_DMA_ENGINE */ | ||
366 | #endif /* DMAENGINE_H */ | 427 | #endif /* DMAENGINE_H */ |