diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-05-21 20:05:46 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-05-21 20:05:46 -0400 |
| commit | 6f68fbaafbaa033205cd131d3e1f3c4b914e9b78 (patch) | |
| tree | 56b434496064ed170f94381e3ec4c6c340b71376 /include/linux | |
| parent | 6e4513972a5ad28517477d21f301a02ac7a0df76 (diff) | |
| parent | 0b28330e39bbe0ffee4c56b09fc415fcec595ea3 (diff) | |
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx
* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx:
DMAENGINE: extend the control command to include an arg
async_tx: trim dma_async_tx_descriptor in 'no channel switch' case
DMAENGINE: DMA40 fix for allocation of logical channel 0
DMAENGINE: DMA40 support paused channel status
dmaengine: mpc512x: Use resource_size
DMA ENGINE: Do not reset 'private' of channel
ioat: Remove duplicated devm_kzalloc() calls for ioatdma_device
ioat3: disable cacheline-unaligned transfers for raid operations
ioat2,3: convert to producer/consumer locking
ioat: convert to circ_buf
DMAENGINE: Support for ST-Ericssons DMA40 block v3
async_tx: use of kzalloc/kfree requires the include of slab.h
dmaengine: provide helper for setting txstate
DMAENGINE: generic channel status v2
DMAENGINE: generic slave control v2
dma: timb-dma: Update comment and fix compiler warning
dma: Add timb-dma
DMAENGINE: COH 901 318 fix bytesleft
DMAENGINE: COH 901 318 rename confusing vars
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/dmaengine.h | 127 | ||||
| -rw-r--r-- | include/linux/timb_dma.h | 55 |
2 files changed, 175 insertions, 7 deletions
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 20ea12c86fd0..5204f018931b 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
| @@ -40,11 +40,13 @@ typedef s32 dma_cookie_t; | |||
| 40 | * enum dma_status - DMA transaction status | 40 | * enum dma_status - DMA transaction status |
| 41 | * @DMA_SUCCESS: transaction completed successfully | 41 | * @DMA_SUCCESS: transaction completed successfully |
| 42 | * @DMA_IN_PROGRESS: transaction not yet processed | 42 | * @DMA_IN_PROGRESS: transaction not yet processed |
| 43 | * @DMA_PAUSED: transaction is paused | ||
| 43 | * @DMA_ERROR: transaction failed | 44 | * @DMA_ERROR: transaction failed |
| 44 | */ | 45 | */ |
| 45 | enum dma_status { | 46 | enum dma_status { |
| 46 | DMA_SUCCESS, | 47 | DMA_SUCCESS, |
| 47 | DMA_IN_PROGRESS, | 48 | DMA_IN_PROGRESS, |
| 49 | DMA_PAUSED, | ||
| 48 | DMA_ERROR, | 50 | DMA_ERROR, |
| 49 | }; | 51 | }; |
| 50 | 52 | ||
| @@ -107,6 +109,19 @@ enum dma_ctrl_flags { | |||
| 107 | }; | 109 | }; |
| 108 | 110 | ||
| 109 | /** | 111 | /** |
| 112 | * enum dma_ctrl_cmd - DMA operations that can optionally be exercised | ||
| 113 | * on a running channel. | ||
| 114 | * @DMA_TERMINATE_ALL: terminate all ongoing transfers | ||
| 115 | * @DMA_PAUSE: pause ongoing transfers | ||
| 116 | * @DMA_RESUME: resume paused transfer | ||
| 117 | */ | ||
| 118 | enum dma_ctrl_cmd { | ||
| 119 | DMA_TERMINATE_ALL, | ||
| 120 | DMA_PAUSE, | ||
| 121 | DMA_RESUME, | ||
| 122 | }; | ||
| 123 | |||
| 124 | /** | ||
| 110 | * enum sum_check_bits - bit position of pq_check_flags | 125 | * enum sum_check_bits - bit position of pq_check_flags |
| 111 | */ | 126 | */ |
| 112 | enum sum_check_bits { | 127 | enum sum_check_bits { |
| @@ -230,9 +245,84 @@ struct dma_async_tx_descriptor { | |||
| 230 | dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); | 245 | dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); |
| 231 | dma_async_tx_callback callback; | 246 | dma_async_tx_callback callback; |
| 232 | void *callback_param; | 247 | void *callback_param; |
| 248 | #ifndef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH | ||
| 233 | struct dma_async_tx_descriptor *next; | 249 | struct dma_async_tx_descriptor *next; |
| 234 | struct dma_async_tx_descriptor *parent; | 250 | struct dma_async_tx_descriptor *parent; |
| 235 | spinlock_t lock; | 251 | spinlock_t lock; |
| 252 | #endif | ||
| 253 | }; | ||
| 254 | |||
| 255 | #ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH | ||
| 256 | static inline void txd_lock(struct dma_async_tx_descriptor *txd) | ||
| 257 | { | ||
| 258 | } | ||
| 259 | static inline void txd_unlock(struct dma_async_tx_descriptor *txd) | ||
| 260 | { | ||
| 261 | } | ||
| 262 | static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next) | ||
| 263 | { | ||
| 264 | BUG(); | ||
| 265 | } | ||
| 266 | static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd) | ||
| 267 | { | ||
| 268 | } | ||
| 269 | static inline void txd_clear_next(struct dma_async_tx_descriptor *txd) | ||
| 270 | { | ||
| 271 | } | ||
| 272 | static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd) | ||
| 273 | { | ||
| 274 | return NULL; | ||
| 275 | } | ||
| 276 | static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd) | ||
| 277 | { | ||
| 278 | return NULL; | ||
| 279 | } | ||
| 280 | |||
| 281 | #else | ||
| 282 | static inline void txd_lock(struct dma_async_tx_descriptor *txd) | ||
| 283 | { | ||
| 284 | spin_lock_bh(&txd->lock); | ||
| 285 | } | ||
| 286 | static inline void txd_unlock(struct dma_async_tx_descriptor *txd) | ||
| 287 | { | ||
| 288 | spin_unlock_bh(&txd->lock); | ||
| 289 | } | ||
| 290 | static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next) | ||
| 291 | { | ||
| 292 | txd->next = next; | ||
| 293 | next->parent = txd; | ||
| 294 | } | ||
| 295 | static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd) | ||
| 296 | { | ||
| 297 | txd->parent = NULL; | ||
| 298 | } | ||
| 299 | static inline void txd_clear_next(struct dma_async_tx_descriptor *txd) | ||
| 300 | { | ||
| 301 | txd->next = NULL; | ||
| 302 | } | ||
| 303 | static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd) | ||
| 304 | { | ||
| 305 | return txd->parent; | ||
| 306 | } | ||
| 307 | static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd) | ||
| 308 | { | ||
| 309 | return txd->next; | ||
| 310 | } | ||
| 311 | #endif | ||
| 312 | |||
| 313 | /** | ||
| 314 | * struct dma_tx_state - filled in to report the status of | ||
| 315 | * a transfer. | ||
| 316 | * @last: last completed DMA cookie | ||
| 317 | * @used: last issued DMA cookie (i.e. the one in progress) | ||
| 318 | * @residue: the remaining number of bytes left to transmit | ||
| 319 | * on the selected transfer for states DMA_IN_PROGRESS and | ||
| 320 | * DMA_PAUSED if this is implemented in the driver, else 0 | ||
| 321 | */ | ||
| 322 | struct dma_tx_state { | ||
| 323 | dma_cookie_t last; | ||
| 324 | dma_cookie_t used; | ||
| 325 | u32 residue; | ||
| 236 | }; | 326 | }; |
| 237 | 327 | ||
| 238 | /** | 328 | /** |
| @@ -261,8 +351,12 @@ struct dma_async_tx_descriptor { | |||
| 261 | * @device_prep_dma_memset: prepares a memset operation | 351 | * @device_prep_dma_memset: prepares a memset operation |
| 262 | * @device_prep_dma_interrupt: prepares an end of chain interrupt operation | 352 | * @device_prep_dma_interrupt: prepares an end of chain interrupt operation |
| 263 | * @device_prep_slave_sg: prepares a slave dma operation | 353 | * @device_prep_slave_sg: prepares a slave dma operation |
| 264 | * @device_terminate_all: terminate all pending operations | 354 | * @device_control: manipulate all pending operations on a channel, returns |
| 265 | * @device_is_tx_complete: poll for transaction completion | 355 | * zero or error code |
| 356 | * @device_tx_status: poll for transaction completion, the optional | ||
| 357 | * txstate parameter can be supplied with a pointer to get a | ||
| 358 | * struct with auxilary transfer status information, otherwise the call | ||
| 359 | * will just return a simple status code | ||
| 266 | * @device_issue_pending: push pending transactions to hardware | 360 | * @device_issue_pending: push pending transactions to hardware |
| 267 | */ | 361 | */ |
| 268 | struct dma_device { | 362 | struct dma_device { |
| @@ -313,11 +407,12 @@ struct dma_device { | |||
| 313 | struct dma_chan *chan, struct scatterlist *sgl, | 407 | struct dma_chan *chan, struct scatterlist *sgl, |
| 314 | unsigned int sg_len, enum dma_data_direction direction, | 408 | unsigned int sg_len, enum dma_data_direction direction, |
| 315 | unsigned long flags); | 409 | unsigned long flags); |
| 316 | void (*device_terminate_all)(struct dma_chan *chan); | 410 | int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
| 411 | unsigned long arg); | ||
| 317 | 412 | ||
| 318 | enum dma_status (*device_is_tx_complete)(struct dma_chan *chan, | 413 | enum dma_status (*device_tx_status)(struct dma_chan *chan, |
| 319 | dma_cookie_t cookie, dma_cookie_t *last, | 414 | dma_cookie_t cookie, |
| 320 | dma_cookie_t *used); | 415 | struct dma_tx_state *txstate); |
| 321 | void (*device_issue_pending)(struct dma_chan *chan); | 416 | void (*device_issue_pending)(struct dma_chan *chan); |
| 322 | }; | 417 | }; |
| 323 | 418 | ||
| @@ -558,7 +653,15 @@ static inline void dma_async_issue_pending(struct dma_chan *chan) | |||
| 558 | static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan, | 653 | static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan, |
| 559 | dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used) | 654 | dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used) |
| 560 | { | 655 | { |
| 561 | return chan->device->device_is_tx_complete(chan, cookie, last, used); | 656 | struct dma_tx_state state; |
| 657 | enum dma_status status; | ||
| 658 | |||
| 659 | status = chan->device->device_tx_status(chan, cookie, &state); | ||
| 660 | if (last) | ||
| 661 | *last = state.last; | ||
| 662 | if (used) | ||
| 663 | *used = state.used; | ||
| 664 | return status; | ||
| 562 | } | 665 | } |
| 563 | 666 | ||
| 564 | #define dma_async_memcpy_complete(chan, cookie, last, used)\ | 667 | #define dma_async_memcpy_complete(chan, cookie, last, used)\ |
| @@ -586,6 +689,16 @@ static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie, | |||
| 586 | return DMA_IN_PROGRESS; | 689 | return DMA_IN_PROGRESS; |
| 587 | } | 690 | } |
| 588 | 691 | ||
| 692 | static inline void | ||
| 693 | dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue) | ||
| 694 | { | ||
| 695 | if (st) { | ||
| 696 | st->last = last; | ||
| 697 | st->used = used; | ||
| 698 | st->residue = residue; | ||
| 699 | } | ||
| 700 | } | ||
| 701 | |||
| 589 | enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); | 702 | enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); |
| 590 | #ifdef CONFIG_DMA_ENGINE | 703 | #ifdef CONFIG_DMA_ENGINE |
| 591 | enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); | 704 | enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); |
diff --git a/include/linux/timb_dma.h b/include/linux/timb_dma.h new file mode 100644 index 000000000000..bb043e970b96 --- /dev/null +++ b/include/linux/timb_dma.h | |||
| @@ -0,0 +1,55 @@ | |||
| 1 | /* | ||
| 2 | * timb_dma.h timberdale FPGA DMA driver defines | ||
| 3 | * Copyright (c) 2010 Intel Corporation | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify | ||
| 6 | * it under the terms of the GNU General Public License version 2 as | ||
| 7 | * published by the Free Software Foundation. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, | ||
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 12 | * GNU General Public License for more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License | ||
| 15 | * along with this program; if not, write to the Free Software | ||
| 16 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
| 17 | */ | ||
| 18 | |||
| 19 | /* Supports: | ||
| 20 | * Timberdale FPGA DMA engine | ||
| 21 | */ | ||
| 22 | |||
| 23 | #ifndef _LINUX_TIMB_DMA_H | ||
| 24 | #define _LINUX_TIMB_DMA_H | ||
| 25 | |||
| 26 | /** | ||
| 27 | * struct timb_dma_platform_data_channel - Description of each individual | ||
| 28 | * DMA channel for the timberdale DMA driver | ||
| 29 | * @rx: true if this channel handles data in the direction to | ||
| 30 | * the CPU. | ||
| 31 | * @bytes_per_line: Number of bytes per line, this is specific for channels | ||
| 32 | * handling video data. For other channels this shall be left to 0. | ||
| 33 | * @descriptors: Number of descriptors to allocate for this channel. | ||
| 34 | * @descriptor_elements: Number of elements in each descriptor. | ||
| 35 | * | ||
| 36 | */ | ||
| 37 | struct timb_dma_platform_data_channel { | ||
| 38 | bool rx; | ||
| 39 | unsigned int bytes_per_line; | ||
| 40 | unsigned int descriptors; | ||
| 41 | unsigned int descriptor_elements; | ||
| 42 | }; | ||
| 43 | |||
| 44 | /** | ||
| 45 | * struct timb_dma_platform_data - Platform data of the timberdale DMA driver | ||
| 46 | * @nr_channels: Number of defined channels in the channels array. | ||
| 47 | * @channels: Definition of the each channel. | ||
| 48 | * | ||
| 49 | */ | ||
| 50 | struct timb_dma_platform_data { | ||
| 51 | unsigned nr_channels; | ||
| 52 | struct timb_dma_platform_data_channel channels[32]; | ||
| 53 | }; | ||
| 54 | |||
| 55 | #endif | ||
