diff options
author | Dan Williams <dan.j.williams@intel.com> | 2010-05-17 19:24:16 -0400 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2010-05-17 19:24:16 -0400 |
commit | caa20d974c86af496b419eef70010e63b7fab7ac (patch) | |
tree | a38165bd839a398528a4ef4c7fa8481fb0fefed3 /include/linux/dmaengine.h | |
parent | c86e1401c9f2ba8d989fa1c4b33d0f0ec3ba8aaf (diff) |
async_tx: trim dma_async_tx_descriptor in 'no channel switch' case
Saves 24 bytes per descriptor (64-bit) when the channel-switching
capabilities of async_tx are not required.
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'include/linux/dmaengine.h')
-rw-r--r-- | include/linux/dmaengine.h | 60 |
1 files changed, 60 insertions, 0 deletions
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 20ea12c86fd0..cb234979fc6b 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
@@ -230,11 +230,71 @@ struct dma_async_tx_descriptor { | |||
230 | dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); | 230 | dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); |
231 | dma_async_tx_callback callback; | 231 | dma_async_tx_callback callback; |
232 | void *callback_param; | 232 | void *callback_param; |
233 | #ifndef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH | ||
233 | struct dma_async_tx_descriptor *next; | 234 | struct dma_async_tx_descriptor *next; |
234 | struct dma_async_tx_descriptor *parent; | 235 | struct dma_async_tx_descriptor *parent; |
235 | spinlock_t lock; | 236 | spinlock_t lock; |
237 | #endif | ||
236 | }; | 238 | }; |
237 | 239 | ||
240 | #ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH | ||
241 | static inline void txd_lock(struct dma_async_tx_descriptor *txd) | ||
242 | { | ||
243 | } | ||
244 | static inline void txd_unlock(struct dma_async_tx_descriptor *txd) | ||
245 | { | ||
246 | } | ||
247 | static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next) | ||
248 | { | ||
249 | BUG(); | ||
250 | } | ||
251 | static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd) | ||
252 | { | ||
253 | } | ||
254 | static inline void txd_clear_next(struct dma_async_tx_descriptor *txd) | ||
255 | { | ||
256 | } | ||
257 | static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd) | ||
258 | { | ||
259 | return NULL; | ||
260 | } | ||
261 | static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd) | ||
262 | { | ||
263 | return NULL; | ||
264 | } | ||
265 | |||
266 | #else | ||
267 | static inline void txd_lock(struct dma_async_tx_descriptor *txd) | ||
268 | { | ||
269 | spin_lock_bh(&txd->lock); | ||
270 | } | ||
271 | static inline void txd_unlock(struct dma_async_tx_descriptor *txd) | ||
272 | { | ||
273 | spin_unlock_bh(&txd->lock); | ||
274 | } | ||
275 | static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next) | ||
276 | { | ||
277 | txd->next = next; | ||
278 | next->parent = txd; | ||
279 | } | ||
280 | static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd) | ||
281 | { | ||
282 | txd->parent = NULL; | ||
283 | } | ||
284 | static inline void txd_clear_next(struct dma_async_tx_descriptor *txd) | ||
285 | { | ||
286 | txd->next = NULL; | ||
287 | } | ||
288 | static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd) | ||
289 | { | ||
290 | return txd->parent; | ||
291 | } | ||
292 | static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd) | ||
293 | { | ||
294 | return txd->next; | ||
295 | } | ||
296 | #endif | ||
297 | |||
238 | /** | 298 | /** |
239 | * struct dma_device - info on the entity supplying DMA services | 299 | * struct dma_device - info on the entity supplying DMA services |
240 | * @chancnt: how many DMA channels are supported | 300 | * @chancnt: how many DMA channels are supported |