aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/dmaengine.h
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2008-04-17 23:17:26 -0400
committerDan Williams <dan.j.williams@intel.com>2008-04-17 16:25:54 -0400
commit636bdeaa1243327501edfd2a597ed7443eb4239a (patch)
tree59b894f124e3664ea4a537d7c07c527abdb9c8da /include/linux/dmaengine.h
parentc4fe15541d0ef5cc8cc1ce43057663851f8fc387 (diff)
dmaengine: ack to flags: make use of the unused bits in the 'ack' field
'ack' is currently a simple integer that flags whether or not a client is done touching fields in the given descriptor. It is effectively just a single bit of information. Converting this to a flags parameter allows the other bits to be put to use to control completion actions, like dma-unmap, and capture results, like xor-zero-sum == 0. Changes are one of: 1/ convert all open-coded ->ack manipulations to use async_tx_ack and async_tx_test_ack. 2/ set the ack bit at prep time where possible 3/ make drivers store the flags at prep time 4/ add flags to the device_prep_dma_interrupt prototype Acked-by: Maciej Sosnowski <maciej.sosnowski@intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'include/linux/dmaengine.h')
-rw-r--r--include/linux/dmaengine.h25
1 files changed, 18 insertions, 7 deletions
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index cd34df78c6aa..b4d84ed6187d 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -95,12 +95,17 @@ enum dma_transaction_type {
95#define DMA_TX_TYPE_END (DMA_INTERRUPT + 1) 95#define DMA_TX_TYPE_END (DMA_INTERRUPT + 1)
96 96
97/** 97/**
98 * enum dma_prep_flags - DMA flags to augment operation preparation 98 * enum dma_ctrl_flags - DMA flags to augment operation preparation,
99 * control completion, and communicate status.
99 * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of 100 * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of
100 * this transaction 101 * this transaction
102 * @DMA_CTRL_ACK - the descriptor cannot be reused until the client
103 * acknowledges receipt, i.e. has has a chance to establish any
104 * dependency chains
101 */ 105 */
102enum dma_prep_flags { 106enum dma_ctrl_flags {
103 DMA_PREP_INTERRUPT = (1 << 0), 107 DMA_PREP_INTERRUPT = (1 << 0),
108 DMA_CTRL_ACK = (1 << 1),
104}; 109};
105 110
106/** 111/**
@@ -211,8 +216,8 @@ typedef void (*dma_async_tx_callback)(void *dma_async_param);
211 * ---dma generic offload fields--- 216 * ---dma generic offload fields---
212 * @cookie: tracking cookie for this transaction, set to -EBUSY if 217 * @cookie: tracking cookie for this transaction, set to -EBUSY if
213 * this tx is sitting on a dependency list 218 * this tx is sitting on a dependency list
214 * @ack: the descriptor can not be reused until the client acknowledges 219 * @flags: flags to augment operation preparation, control completion, and
215 * receipt, i.e. has has a chance to establish any dependency chains 220 * communicate status
216 * @phys: physical address of the descriptor 221 * @phys: physical address of the descriptor
217 * @tx_list: driver common field for operations that require multiple 222 * @tx_list: driver common field for operations that require multiple
218 * descriptors 223 * descriptors
@@ -227,7 +232,7 @@ typedef void (*dma_async_tx_callback)(void *dma_async_param);
227 */ 232 */
228struct dma_async_tx_descriptor { 233struct dma_async_tx_descriptor {
229 dma_cookie_t cookie; 234 dma_cookie_t cookie;
230 int ack; 235 enum dma_ctrl_flags flags; /* not a 'long' to pack with cookie */
231 dma_addr_t phys; 236 dma_addr_t phys;
232 struct list_head tx_list; 237 struct list_head tx_list;
233 struct dma_chan *chan; 238 struct dma_chan *chan;
@@ -290,7 +295,7 @@ struct dma_device {
290 struct dma_chan *chan, dma_addr_t dest, int value, size_t len, 295 struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
291 unsigned long flags); 296 unsigned long flags);
292 struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( 297 struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
293 struct dma_chan *chan); 298 struct dma_chan *chan, unsigned long flags);
294 299
295 enum dma_status (*device_is_tx_complete)(struct dma_chan *chan, 300 enum dma_status (*device_is_tx_complete)(struct dma_chan *chan,
296 dma_cookie_t cookie, dma_cookie_t *last, 301 dma_cookie_t cookie, dma_cookie_t *last,
@@ -316,7 +321,13 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
316static inline void 321static inline void
317async_tx_ack(struct dma_async_tx_descriptor *tx) 322async_tx_ack(struct dma_async_tx_descriptor *tx)
318{ 323{
319 tx->ack = 1; 324 tx->flags |= DMA_CTRL_ACK;
325}
326
327static inline int
328async_tx_test_ack(struct dma_async_tx_descriptor *tx)
329{
330 return tx->flags & DMA_CTRL_ACK;
320} 331}
321 332
322#define first_dma_cap(mask) __first_dma_cap(&(mask)) 333#define first_dma_cap(mask) __first_dma_cap(&(mask))