aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-04-18 11:38:55 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-18 11:38:55 -0400
commit07fe944e87d79f8d7e1b090913fe9f2ace78f41d (patch)
treeb20a3576d61d4dc3d89cf47515a786b31fd997d6 /include
parent8019aa946af5218bc4446c21e43cc19c9401ac68 (diff)
parent636bdeaa1243327501edfd2a597ed7443eb4239a (diff)
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx
* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx: dmaengine: ack to flags: make use of the unused bits in the 'ack' field iop-adma: remove the workaround for missed interrupts on iop3xx async_tx: kill ->device_dependency_added async_tx: fix multiple dependency submission fsldma: Split the MPC83xx event from MPC85xx and refine irq codes. fsldma: Remove CONFIG_FSL_DMA_SELFTEST, keep fsl_dma_self_test() running always.
Diffstat (limited to 'include')
-rw-r--r--include/asm-arm/arch-iop13xx/adma.h5
-rw-r--r--include/asm-arm/hardware/iop3xx-adma.h8
-rw-r--r--include/asm-arm/hardware/iop_adma.h2
-rw-r--r--include/linux/dmaengine.h36
4 files changed, 21 insertions, 30 deletions
diff --git a/include/asm-arm/arch-iop13xx/adma.h b/include/asm-arm/arch-iop13xx/adma.h
index efd9a5eb1008..90d14ee564f5 100644
--- a/include/asm-arm/arch-iop13xx/adma.h
+++ b/include/asm-arm/arch-iop13xx/adma.h
@@ -454,11 +454,6 @@ static inline void iop_chan_append(struct iop_adma_chan *chan)
454 __raw_writel(adma_accr, ADMA_ACCR(chan)); 454 __raw_writel(adma_accr, ADMA_ACCR(chan));
455} 455}
456 456
457static inline void iop_chan_idle(int busy, struct iop_adma_chan *chan)
458{
459 do { } while (0);
460}
461
462static inline u32 iop_chan_get_status(struct iop_adma_chan *chan) 457static inline u32 iop_chan_get_status(struct iop_adma_chan *chan)
463{ 458{
464 return __raw_readl(ADMA_ACSR(chan)); 459 return __raw_readl(ADMA_ACSR(chan));
diff --git a/include/asm-arm/hardware/iop3xx-adma.h b/include/asm-arm/hardware/iop3xx-adma.h
index 5c529e6a5e3b..84d635b0a71a 100644
--- a/include/asm-arm/hardware/iop3xx-adma.h
+++ b/include/asm-arm/hardware/iop3xx-adma.h
@@ -767,20 +767,12 @@ static inline int iop_desc_get_zero_result(struct iop_adma_desc_slot *desc)
767static inline void iop_chan_append(struct iop_adma_chan *chan) 767static inline void iop_chan_append(struct iop_adma_chan *chan)
768{ 768{
769 u32 dma_chan_ctrl; 769 u32 dma_chan_ctrl;
770 /* workaround dropped interrupts on 3xx */
771 mod_timer(&chan->cleanup_watchdog, jiffies + msecs_to_jiffies(3));
772 770
773 dma_chan_ctrl = __raw_readl(DMA_CCR(chan)); 771 dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
774 dma_chan_ctrl |= 0x2; 772 dma_chan_ctrl |= 0x2;
775 __raw_writel(dma_chan_ctrl, DMA_CCR(chan)); 773 __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
776} 774}
777 775
778static inline void iop_chan_idle(int busy, struct iop_adma_chan *chan)
779{
780 if (!busy)
781 del_timer(&chan->cleanup_watchdog);
782}
783
784static inline u32 iop_chan_get_status(struct iop_adma_chan *chan) 776static inline u32 iop_chan_get_status(struct iop_adma_chan *chan)
785{ 777{
786 return __raw_readl(DMA_CSR(chan)); 778 return __raw_readl(DMA_CSR(chan));
diff --git a/include/asm-arm/hardware/iop_adma.h b/include/asm-arm/hardware/iop_adma.h
index ca8e71f44346..cb7e3611bcba 100644
--- a/include/asm-arm/hardware/iop_adma.h
+++ b/include/asm-arm/hardware/iop_adma.h
@@ -51,7 +51,6 @@ struct iop_adma_device {
51 * @common: common dmaengine channel object members 51 * @common: common dmaengine channel object members
52 * @last_used: place holder for allocation to continue from where it left off 52 * @last_used: place holder for allocation to continue from where it left off
53 * @all_slots: complete domain of slots usable by the channel 53 * @all_slots: complete domain of slots usable by the channel
54 * @cleanup_watchdog: workaround missed interrupts on iop3xx
55 * @slots_allocated: records the actual size of the descriptor slot pool 54 * @slots_allocated: records the actual size of the descriptor slot pool
56 * @irq_tasklet: bottom half where iop_adma_slot_cleanup runs 55 * @irq_tasklet: bottom half where iop_adma_slot_cleanup runs
57 */ 56 */
@@ -65,7 +64,6 @@ struct iop_adma_chan {
65 struct dma_chan common; 64 struct dma_chan common;
66 struct iop_adma_desc_slot *last_used; 65 struct iop_adma_desc_slot *last_used;
67 struct list_head all_slots; 66 struct list_head all_slots;
68 struct timer_list cleanup_watchdog;
69 int slots_allocated; 67 int slots_allocated;
70 struct tasklet_struct irq_tasklet; 68 struct tasklet_struct irq_tasklet;
71}; 69};
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 34d440698293..b4d84ed6187d 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -95,12 +95,17 @@ enum dma_transaction_type {
95#define DMA_TX_TYPE_END (DMA_INTERRUPT + 1) 95#define DMA_TX_TYPE_END (DMA_INTERRUPT + 1)
96 96
97/** 97/**
98 * enum dma_prep_flags - DMA flags to augment operation preparation 98 * enum dma_ctrl_flags - DMA flags to augment operation preparation,
99 * control completion, and communicate status.
99 * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of 100 * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of
100 * this transaction 101 * this transaction
102 * @DMA_CTRL_ACK - the descriptor cannot be reused until the client
103 * acknowledges receipt, i.e. has has a chance to establish any
104 * dependency chains
101 */ 105 */
102enum dma_prep_flags { 106enum dma_ctrl_flags {
103 DMA_PREP_INTERRUPT = (1 << 0), 107 DMA_PREP_INTERRUPT = (1 << 0),
108 DMA_CTRL_ACK = (1 << 1),
104}; 109};
105 110
106/** 111/**
@@ -211,8 +216,8 @@ typedef void (*dma_async_tx_callback)(void *dma_async_param);
211 * ---dma generic offload fields--- 216 * ---dma generic offload fields---
212 * @cookie: tracking cookie for this transaction, set to -EBUSY if 217 * @cookie: tracking cookie for this transaction, set to -EBUSY if
213 * this tx is sitting on a dependency list 218 * this tx is sitting on a dependency list
214 * @ack: the descriptor can not be reused until the client acknowledges 219 * @flags: flags to augment operation preparation, control completion, and
215 * receipt, i.e. has has a chance to establish any dependency chains 220 * communicate status
216 * @phys: physical address of the descriptor 221 * @phys: physical address of the descriptor
217 * @tx_list: driver common field for operations that require multiple 222 * @tx_list: driver common field for operations that require multiple
218 * descriptors 223 * descriptors
@@ -221,23 +226,20 @@ typedef void (*dma_async_tx_callback)(void *dma_async_param);
221 * @callback: routine to call after this operation is complete 226 * @callback: routine to call after this operation is complete
222 * @callback_param: general parameter to pass to the callback routine 227 * @callback_param: general parameter to pass to the callback routine
223 * ---async_tx api specific fields--- 228 * ---async_tx api specific fields---
224 * @depend_list: at completion this list of transactions are submitted 229 * @next: at completion submit this descriptor
225 * @depend_node: allow this transaction to be executed after another
226 * transaction has completed, possibly on another channel
227 * @parent: pointer to the next level up in the dependency chain 230 * @parent: pointer to the next level up in the dependency chain
228 * @lock: protect the dependency list 231 * @lock: protect the parent and next pointers
229 */ 232 */
230struct dma_async_tx_descriptor { 233struct dma_async_tx_descriptor {
231 dma_cookie_t cookie; 234 dma_cookie_t cookie;
232 int ack; 235 enum dma_ctrl_flags flags; /* not a 'long' to pack with cookie */
233 dma_addr_t phys; 236 dma_addr_t phys;
234 struct list_head tx_list; 237 struct list_head tx_list;
235 struct dma_chan *chan; 238 struct dma_chan *chan;
236 dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); 239 dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
237 dma_async_tx_callback callback; 240 dma_async_tx_callback callback;
238 void *callback_param; 241 void *callback_param;
239 struct list_head depend_list; 242 struct dma_async_tx_descriptor *next;
240 struct list_head depend_node;
241 struct dma_async_tx_descriptor *parent; 243 struct dma_async_tx_descriptor *parent;
242 spinlock_t lock; 244 spinlock_t lock;
243}; 245};
@@ -261,7 +263,6 @@ struct dma_async_tx_descriptor {
261 * @device_prep_dma_zero_sum: prepares a zero_sum operation 263 * @device_prep_dma_zero_sum: prepares a zero_sum operation
262 * @device_prep_dma_memset: prepares a memset operation 264 * @device_prep_dma_memset: prepares a memset operation
263 * @device_prep_dma_interrupt: prepares an end of chain interrupt operation 265 * @device_prep_dma_interrupt: prepares an end of chain interrupt operation
264 * @device_dependency_added: async_tx notifies the channel about new deps
265 * @device_issue_pending: push pending transactions to hardware 266 * @device_issue_pending: push pending transactions to hardware
266 */ 267 */
267struct dma_device { 268struct dma_device {
@@ -294,9 +295,8 @@ struct dma_device {
294 struct dma_chan *chan, dma_addr_t dest, int value, size_t len, 295 struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
295 unsigned long flags); 296 unsigned long flags);
296 struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( 297 struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
297 struct dma_chan *chan); 298 struct dma_chan *chan, unsigned long flags);
298 299
299 void (*device_dependency_added)(struct dma_chan *chan);
300 enum dma_status (*device_is_tx_complete)(struct dma_chan *chan, 300 enum dma_status (*device_is_tx_complete)(struct dma_chan *chan,
301 dma_cookie_t cookie, dma_cookie_t *last, 301 dma_cookie_t cookie, dma_cookie_t *last,
302 dma_cookie_t *used); 302 dma_cookie_t *used);
@@ -321,7 +321,13 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
321static inline void 321static inline void
322async_tx_ack(struct dma_async_tx_descriptor *tx) 322async_tx_ack(struct dma_async_tx_descriptor *tx)
323{ 323{
324 tx->ack = 1; 324 tx->flags |= DMA_CTRL_ACK;
325}
326
327static inline int
328async_tx_test_ack(struct dma_async_tx_descriptor *tx)
329{
330 return tx->flags & DMA_CTRL_ACK;
325} 331}
326 332
327#define first_dma_cap(mask) __first_dma_cap(&(mask)) 333#define first_dma_cap(mask) __first_dma_cap(&(mask))