aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/dmaengine.h
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2010-05-17 19:24:16 -0400
committerDan Williams <dan.j.williams@intel.com>2010-05-17 19:24:16 -0400
commitcaa20d974c86af496b419eef70010e63b7fab7ac (patch)
treea38165bd839a398528a4ef4c7fa8481fb0fefed3 /include/linux/dmaengine.h
parentc86e1401c9f2ba8d989fa1c4b33d0f0ec3ba8aaf (diff)
async_tx: trim dma_async_tx_descriptor in 'no channel switch' case
Saves 24 bytes per descriptor (64-bit) when the channel-switching capabilities of async_tx are not required. Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'include/linux/dmaengine.h')
-rw-r--r--include/linux/dmaengine.h60
1 files changed, 60 insertions, 0 deletions
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 20ea12c86fd0..cb234979fc6b 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -230,11 +230,71 @@ struct dma_async_tx_descriptor {
230 dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); 230 dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
231 dma_async_tx_callback callback; 231 dma_async_tx_callback callback;
232 void *callback_param; 232 void *callback_param;
233#ifndef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH
233 struct dma_async_tx_descriptor *next; 234 struct dma_async_tx_descriptor *next;
234 struct dma_async_tx_descriptor *parent; 235 struct dma_async_tx_descriptor *parent;
235 spinlock_t lock; 236 spinlock_t lock;
237#endif
236}; 238};
237 239
240#ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH
241static inline void txd_lock(struct dma_async_tx_descriptor *txd)
242{
243}
244static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
245{
246}
247static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
248{
249 BUG();
250}
251static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
252{
253}
254static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
255{
256}
257static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
258{
259 return NULL;
260}
261static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
262{
263 return NULL;
264}
265
266#else
267static inline void txd_lock(struct dma_async_tx_descriptor *txd)
268{
269 spin_lock_bh(&txd->lock);
270}
271static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
272{
273 spin_unlock_bh(&txd->lock);
274}
275static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
276{
277 txd->next = next;
278 next->parent = txd;
279}
280static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
281{
282 txd->parent = NULL;
283}
284static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
285{
286 txd->next = NULL;
287}
288static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
289{
290 return txd->parent;
291}
292static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
293{
294 return txd->next;
295}
296#endif
297
238/** 298/**
239 * struct dma_device - info on the entity supplying DMA services 299 * struct dma_device - info on the entity supplying DMA services
240 * @chancnt: how many DMA channels are supported 300 * @chancnt: how many DMA channels are supported