diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-27 22:04:36 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-27 22:04:36 -0400 |
| commit | e3e1288e86a07cdeb0aee5860a2dff111c6eff79 (patch) | |
| tree | cd22f8051a456c9d2b95698b6fe402776a67469b /include/linux | |
| parent | 9ae6d039224def926656206725ae6e89d1331417 (diff) | |
| parent | 964dc256bb91e990277010a3f6dc66daa130be8b (diff) | |
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx
* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx: (48 commits)
DMAENGINE: move COH901318 to arch_initcall
dma: imx-dma: fix signedness bug
dma/timberdale: simplify conditional
ste_dma40: remove channel_type
ste_dma40: remove enum for endianess
ste_dma40: remove TIM_FOR_LINK option
ste_dma40: move mode_opt to separate config
ste_dma40: move channel mode to a separate field
ste_dma40: move priority to separate field
ste_dma40: add variable to indicate valid dma_cfg
async_tx: make async_tx channel switching opt-in
move async raid6 test to lib/Kconfig.debug
dmaengine: Add Freescale i.MX1/21/27 DMA driver
intel_mid_dma: change the slave interface
intel_mid_dma: fix the WARN_ONs
intel_mid_dma: Add sg list support to DMA driver
intel_mid_dma: Allow DMAC2 to share interrupt
intel_mid_dma: Allow IRQ sharing
intel_mid_dma: Add runtime PM support
DMAENGINE: define a dummy filter function for ste_dma40
...
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/amba/pl08x.h | 222 | ||||
| -rw-r--r-- | include/linux/dmaengine.h | 60 | ||||
| -rw-r--r-- | include/linux/intel_mid_dma.h | 16 |
3 files changed, 280 insertions, 18 deletions
diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h new file mode 100644 index 000000000000..521a0f8974ac --- /dev/null +++ b/include/linux/amba/pl08x.h | |||
| @@ -0,0 +1,222 @@ | |||
| 1 | /* | ||
| 2 | * linux/amba/pl08x.h - ARM PrimeCell DMA Controller driver | ||
| 3 | * | ||
| 4 | * Copyright (C) 2005 ARM Ltd | ||
| 5 | * Copyright (C) 2010 ST-Ericsson SA | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify | ||
| 8 | * it under the terms of the GNU General Public License version 2 as | ||
| 9 | * published by the Free Software Foundation. | ||
| 10 | * | ||
| 11 | * pl08x information required by platform code | ||
| 12 | * | ||
| 13 | * Please credit ARM.com | ||
| 14 | * Documentation: ARM DDI 0196D | ||
| 15 | * | ||
| 16 | */ | ||
| 17 | |||
| 18 | #ifndef AMBA_PL08X_H | ||
| 19 | #define AMBA_PL08X_H | ||
| 20 | |||
| 21 | /* We need sizes of structs from this header */ | ||
| 22 | #include <linux/dmaengine.h> | ||
| 23 | #include <linux/interrupt.h> | ||
| 24 | |||
| 25 | /** | ||
| 26 | * struct pl08x_channel_data - data structure to pass info between | ||
| 27 | * platform and PL08x driver regarding channel configuration | ||
| 28 | * @bus_id: name of this device channel, not just a device name since | ||
| 29 | * devices may have more than one channel e.g. "foo_tx" | ||
| 30 | * @min_signal: the minimum DMA signal number to be muxed in for this | ||
| 31 | * channel (for platforms supporting muxed signals). If you have | ||
| 32 | * static assignments, make sure this is set to the assigned signal | ||
| 33 | * number, PL08x have 16 possible signals in number 0 thru 15 so | ||
| 34 | * when these are not enough they often get muxed (in hardware) | ||
| 35 | * disabling simultaneous use of the same channel for two devices. | ||
| 36 | * @max_signal: the maximum DMA signal number to be muxed in for | ||
| 37 | * the channel. Set to the same as min_signal for | ||
| 38 | * devices with static assignments | ||
| 39 | * @muxval: a number usually used to poke into some mux regiser to | ||
| 40 | * mux in the signal to this channel | ||
| 41 | * @cctl_opt: default options for the channel control register | ||
| 42 | * @addr: source/target address in physical memory for this DMA channel, | ||
| 43 | * can be the address of a FIFO register for burst requests for example. | ||
| 44 | * This can be left undefined if the PrimeCell API is used for configuring | ||
| 45 | * this. | ||
| 46 | * @circular_buffer: whether the buffer passed in is circular and | ||
| 47 | * shall simply be looped round round (like a record baby round | ||
| 48 | * round round round) | ||
| 49 | * @single: the device connected to this channel will request single | ||
| 50 | * DMA transfers, not bursts. (Bursts are default.) | ||
| 51 | */ | ||
| 52 | struct pl08x_channel_data { | ||
| 53 | char *bus_id; | ||
| 54 | int min_signal; | ||
| 55 | int max_signal; | ||
| 56 | u32 muxval; | ||
| 57 | u32 cctl; | ||
| 58 | u32 ccfg; | ||
| 59 | dma_addr_t addr; | ||
| 60 | bool circular_buffer; | ||
| 61 | bool single; | ||
| 62 | }; | ||
| 63 | |||
| 64 | /** | ||
| 65 | * Struct pl08x_bus_data - information of source or destination | ||
| 66 | * busses for a transfer | ||
| 67 | * @addr: current address | ||
| 68 | * @maxwidth: the maximum width of a transfer on this bus | ||
| 69 | * @buswidth: the width of this bus in bytes: 1, 2 or 4 | ||
| 70 | * @fill_bytes: bytes required to fill to the next bus memory | ||
| 71 | * boundary | ||
| 72 | */ | ||
| 73 | struct pl08x_bus_data { | ||
| 74 | dma_addr_t addr; | ||
| 75 | u8 maxwidth; | ||
| 76 | u8 buswidth; | ||
| 77 | u32 fill_bytes; | ||
| 78 | }; | ||
| 79 | |||
| 80 | /** | ||
| 81 | * struct pl08x_phy_chan - holder for the physical channels | ||
| 82 | * @id: physical index to this channel | ||
| 83 | * @lock: a lock to use when altering an instance of this struct | ||
| 84 | * @signal: the physical signal (aka channel) serving this | ||
| 85 | * physical channel right now | ||
| 86 | * @serving: the virtual channel currently being served by this | ||
| 87 | * physical channel | ||
| 88 | */ | ||
| 89 | struct pl08x_phy_chan { | ||
| 90 | unsigned int id; | ||
| 91 | void __iomem *base; | ||
| 92 | spinlock_t lock; | ||
| 93 | int signal; | ||
| 94 | struct pl08x_dma_chan *serving; | ||
| 95 | u32 csrc; | ||
| 96 | u32 cdst; | ||
| 97 | u32 clli; | ||
| 98 | u32 cctl; | ||
| 99 | u32 ccfg; | ||
| 100 | }; | ||
| 101 | |||
| 102 | /** | ||
| 103 | * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor | ||
| 104 | * @llis_bus: DMA memory address (physical) start for the LLIs | ||
| 105 | * @llis_va: virtual memory address start for the LLIs | ||
| 106 | */ | ||
| 107 | struct pl08x_txd { | ||
| 108 | struct dma_async_tx_descriptor tx; | ||
| 109 | struct list_head node; | ||
| 110 | enum dma_data_direction direction; | ||
| 111 | struct pl08x_bus_data srcbus; | ||
| 112 | struct pl08x_bus_data dstbus; | ||
| 113 | int len; | ||
| 114 | dma_addr_t llis_bus; | ||
| 115 | void *llis_va; | ||
| 116 | struct pl08x_channel_data *cd; | ||
| 117 | bool active; | ||
| 118 | /* | ||
| 119 | * Settings to be put into the physical channel when we | ||
| 120 | * trigger this txd | ||
| 121 | */ | ||
| 122 | u32 csrc; | ||
| 123 | u32 cdst; | ||
| 124 | u32 clli; | ||
| 125 | u32 cctl; | ||
| 126 | }; | ||
| 127 | |||
| 128 | /** | ||
| 129 | * struct pl08x_dma_chan_state - holds the PL08x specific virtual | ||
| 130 | * channel states | ||
| 131 | * @PL08X_CHAN_IDLE: the channel is idle | ||
| 132 | * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport | ||
| 133 | * channel and is running a transfer on it | ||
| 134 | * @PL08X_CHAN_PAUSED: the channel has allocated a physical transport | ||
| 135 | * channel, but the transfer is currently paused | ||
| 136 | * @PL08X_CHAN_WAITING: the channel is waiting for a physical transport | ||
| 137 | * channel to become available (only pertains to memcpy channels) | ||
| 138 | */ | ||
| 139 | enum pl08x_dma_chan_state { | ||
| 140 | PL08X_CHAN_IDLE, | ||
| 141 | PL08X_CHAN_RUNNING, | ||
| 142 | PL08X_CHAN_PAUSED, | ||
| 143 | PL08X_CHAN_WAITING, | ||
| 144 | }; | ||
| 145 | |||
| 146 | /** | ||
| 147 | * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel | ||
| 148 | * @chan: wrappped abstract channel | ||
| 149 | * @phychan: the physical channel utilized by this channel, if there is one | ||
| 150 | * @tasklet: tasklet scheduled by the IRQ to handle actual work etc | ||
| 151 | * @name: name of channel | ||
| 152 | * @cd: channel platform data | ||
| 153 | * @runtime_addr: address for RX/TX according to the runtime config | ||
| 154 | * @runtime_direction: current direction of this channel according to | ||
| 155 | * runtime config | ||
| 156 | * @lc: last completed transaction on this channel | ||
| 157 | * @desc_list: queued transactions pending on this channel | ||
| 158 | * @at: active transaction on this channel | ||
| 159 | * @lockflags: sometimes we let a lock last between two function calls, | ||
| 160 | * especially prep/submit, and then we need to store the IRQ flags | ||
| 161 | * in the channel state, here | ||
| 162 | * @lock: a lock for this channel data | ||
| 163 | * @host: a pointer to the host (internal use) | ||
| 164 | * @state: whether the channel is idle, paused, running etc | ||
| 165 | * @slave: whether this channel is a device (slave) or for memcpy | ||
| 166 | * @waiting: a TX descriptor on this channel which is waiting for | ||
| 167 | * a physical channel to become available | ||
| 168 | */ | ||
| 169 | struct pl08x_dma_chan { | ||
| 170 | struct dma_chan chan; | ||
| 171 | struct pl08x_phy_chan *phychan; | ||
| 172 | struct tasklet_struct tasklet; | ||
| 173 | char *name; | ||
| 174 | struct pl08x_channel_data *cd; | ||
| 175 | dma_addr_t runtime_addr; | ||
| 176 | enum dma_data_direction runtime_direction; | ||
| 177 | atomic_t last_issued; | ||
| 178 | dma_cookie_t lc; | ||
| 179 | struct list_head desc_list; | ||
| 180 | struct pl08x_txd *at; | ||
| 181 | unsigned long lockflags; | ||
| 182 | spinlock_t lock; | ||
| 183 | void *host; | ||
| 184 | enum pl08x_dma_chan_state state; | ||
| 185 | bool slave; | ||
| 186 | struct pl08x_txd *waiting; | ||
| 187 | }; | ||
| 188 | |||
| 189 | /** | ||
| 190 | * struct pl08x_platform_data - the platform configuration for the | ||
| 191 | * PL08x PrimeCells. | ||
| 192 | * @slave_channels: the channels defined for the different devices on the | ||
| 193 | * platform, all inclusive, including multiplexed channels. The available | ||
| 194 | * physical channels will be multiplexed around these signals as they | ||
| 195 | * are requested, just enumerate all possible channels. | ||
| 196 | * @get_signal: request a physical signal to be used for a DMA | ||
| 197 | * transfer immediately: if there is some multiplexing or similar blocking | ||
| 198 | * the use of the channel the transfer can be denied by returning | ||
| 199 | * less than zero, else it returns the allocated signal number | ||
| 200 | * @put_signal: indicate to the platform that this physical signal is not | ||
| 201 | * running any DMA transfer and multiplexing can be recycled | ||
| 202 | * @bus_bit_lli: Bit[0] of the address indicated which AHB bus master the | ||
| 203 | * LLI addresses are on 0/1 Master 1/2. | ||
| 204 | */ | ||
| 205 | struct pl08x_platform_data { | ||
| 206 | struct pl08x_channel_data *slave_channels; | ||
| 207 | unsigned int num_slave_channels; | ||
| 208 | struct pl08x_channel_data memcpy_channel; | ||
| 209 | int (*get_signal)(struct pl08x_dma_chan *); | ||
| 210 | void (*put_signal)(struct pl08x_dma_chan *); | ||
| 211 | }; | ||
| 212 | |||
| 213 | #ifdef CONFIG_AMBA_PL08X | ||
| 214 | bool pl08x_filter_id(struct dma_chan *chan, void *chan_id); | ||
| 215 | #else | ||
| 216 | static inline bool pl08x_filter_id(struct dma_chan *chan, void *chan_id) | ||
| 217 | { | ||
| 218 | return false; | ||
| 219 | } | ||
| 220 | #endif | ||
| 221 | |||
| 222 | #endif /* AMBA_PL08X_H */ | ||
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index e2106495cc11..9d8688b92d8b 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
| @@ -64,13 +64,15 @@ enum dma_transaction_type { | |||
| 64 | DMA_PQ_VAL, | 64 | DMA_PQ_VAL, |
| 65 | DMA_MEMSET, | 65 | DMA_MEMSET, |
| 66 | DMA_INTERRUPT, | 66 | DMA_INTERRUPT, |
| 67 | DMA_SG, | ||
| 67 | DMA_PRIVATE, | 68 | DMA_PRIVATE, |
| 68 | DMA_ASYNC_TX, | 69 | DMA_ASYNC_TX, |
| 69 | DMA_SLAVE, | 70 | DMA_SLAVE, |
| 71 | DMA_CYCLIC, | ||
| 70 | }; | 72 | }; |
| 71 | 73 | ||
| 72 | /* last transaction type for creation of the capabilities mask */ | 74 | /* last transaction type for creation of the capabilities mask */ |
| 73 | #define DMA_TX_TYPE_END (DMA_SLAVE + 1) | 75 | #define DMA_TX_TYPE_END (DMA_CYCLIC + 1) |
| 74 | 76 | ||
| 75 | 77 | ||
| 76 | /** | 78 | /** |
| @@ -119,12 +121,15 @@ enum dma_ctrl_flags { | |||
| 119 | * configuration data in statically from the platform). An additional | 121 | * configuration data in statically from the platform). An additional |
| 120 | * argument of struct dma_slave_config must be passed in with this | 122 | * argument of struct dma_slave_config must be passed in with this |
| 121 | * command. | 123 | * command. |
| 124 | * @FSLDMA_EXTERNAL_START: this command will put the Freescale DMA controller | ||
| 125 | * into external start mode. | ||
| 122 | */ | 126 | */ |
| 123 | enum dma_ctrl_cmd { | 127 | enum dma_ctrl_cmd { |
| 124 | DMA_TERMINATE_ALL, | 128 | DMA_TERMINATE_ALL, |
| 125 | DMA_PAUSE, | 129 | DMA_PAUSE, |
| 126 | DMA_RESUME, | 130 | DMA_RESUME, |
| 127 | DMA_SLAVE_CONFIG, | 131 | DMA_SLAVE_CONFIG, |
| 132 | FSLDMA_EXTERNAL_START, | ||
| 128 | }; | 133 | }; |
| 129 | 134 | ||
| 130 | /** | 135 | /** |
| @@ -316,14 +321,14 @@ struct dma_async_tx_descriptor { | |||
| 316 | dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); | 321 | dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); |
| 317 | dma_async_tx_callback callback; | 322 | dma_async_tx_callback callback; |
| 318 | void *callback_param; | 323 | void *callback_param; |
| 319 | #ifndef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH | 324 | #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH |
| 320 | struct dma_async_tx_descriptor *next; | 325 | struct dma_async_tx_descriptor *next; |
| 321 | struct dma_async_tx_descriptor *parent; | 326 | struct dma_async_tx_descriptor *parent; |
| 322 | spinlock_t lock; | 327 | spinlock_t lock; |
| 323 | #endif | 328 | #endif |
| 324 | }; | 329 | }; |
| 325 | 330 | ||
| 326 | #ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH | 331 | #ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH |
| 327 | static inline void txd_lock(struct dma_async_tx_descriptor *txd) | 332 | static inline void txd_lock(struct dma_async_tx_descriptor *txd) |
| 328 | { | 333 | { |
| 329 | } | 334 | } |
| @@ -422,6 +427,9 @@ struct dma_tx_state { | |||
| 422 | * @device_prep_dma_memset: prepares a memset operation | 427 | * @device_prep_dma_memset: prepares a memset operation |
| 423 | * @device_prep_dma_interrupt: prepares an end of chain interrupt operation | 428 | * @device_prep_dma_interrupt: prepares an end of chain interrupt operation |
| 424 | * @device_prep_slave_sg: prepares a slave dma operation | 429 | * @device_prep_slave_sg: prepares a slave dma operation |
| 430 | * @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio. | ||
| 431 | * The function takes a buffer of size buf_len. The callback function will | ||
| 432 | * be called after period_len bytes have been transferred. | ||
| 425 | * @device_control: manipulate all pending operations on a channel, returns | 433 | * @device_control: manipulate all pending operations on a channel, returns |
| 426 | * zero or error code | 434 | * zero or error code |
| 427 | * @device_tx_status: poll for transaction completion, the optional | 435 | * @device_tx_status: poll for transaction completion, the optional |
| @@ -473,11 +481,19 @@ struct dma_device { | |||
| 473 | unsigned long flags); | 481 | unsigned long flags); |
| 474 | struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( | 482 | struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( |
| 475 | struct dma_chan *chan, unsigned long flags); | 483 | struct dma_chan *chan, unsigned long flags); |
| 484 | struct dma_async_tx_descriptor *(*device_prep_dma_sg)( | ||
| 485 | struct dma_chan *chan, | ||
| 486 | struct scatterlist *dst_sg, unsigned int dst_nents, | ||
| 487 | struct scatterlist *src_sg, unsigned int src_nents, | ||
| 488 | unsigned long flags); | ||
| 476 | 489 | ||
| 477 | struct dma_async_tx_descriptor *(*device_prep_slave_sg)( | 490 | struct dma_async_tx_descriptor *(*device_prep_slave_sg)( |
| 478 | struct dma_chan *chan, struct scatterlist *sgl, | 491 | struct dma_chan *chan, struct scatterlist *sgl, |
| 479 | unsigned int sg_len, enum dma_data_direction direction, | 492 | unsigned int sg_len, enum dma_data_direction direction, |
| 480 | unsigned long flags); | 493 | unsigned long flags); |
| 494 | struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)( | ||
| 495 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | ||
| 496 | size_t period_len, enum dma_data_direction direction); | ||
| 481 | int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 497 | int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
| 482 | unsigned long arg); | 498 | unsigned long arg); |
| 483 | 499 | ||
| @@ -487,6 +503,40 @@ struct dma_device { | |||
| 487 | void (*device_issue_pending)(struct dma_chan *chan); | 503 | void (*device_issue_pending)(struct dma_chan *chan); |
| 488 | }; | 504 | }; |
| 489 | 505 | ||
| 506 | static inline int dmaengine_device_control(struct dma_chan *chan, | ||
| 507 | enum dma_ctrl_cmd cmd, | ||
| 508 | unsigned long arg) | ||
| 509 | { | ||
| 510 | return chan->device->device_control(chan, cmd, arg); | ||
| 511 | } | ||
| 512 | |||
| 513 | static inline int dmaengine_slave_config(struct dma_chan *chan, | ||
| 514 | struct dma_slave_config *config) | ||
| 515 | { | ||
| 516 | return dmaengine_device_control(chan, DMA_SLAVE_CONFIG, | ||
| 517 | (unsigned long)config); | ||
| 518 | } | ||
| 519 | |||
| 520 | static inline int dmaengine_terminate_all(struct dma_chan *chan) | ||
| 521 | { | ||
| 522 | return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0); | ||
| 523 | } | ||
| 524 | |||
| 525 | static inline int dmaengine_pause(struct dma_chan *chan) | ||
| 526 | { | ||
| 527 | return dmaengine_device_control(chan, DMA_PAUSE, 0); | ||
| 528 | } | ||
| 529 | |||
| 530 | static inline int dmaengine_resume(struct dma_chan *chan) | ||
| 531 | { | ||
| 532 | return dmaengine_device_control(chan, DMA_RESUME, 0); | ||
| 533 | } | ||
| 534 | |||
| 535 | static inline int dmaengine_submit(struct dma_async_tx_descriptor *desc) | ||
| 536 | { | ||
| 537 | return desc->tx_submit(desc); | ||
| 538 | } | ||
| 539 | |||
| 490 | static inline bool dmaengine_check_align(u8 align, size_t off1, size_t off2, size_t len) | 540 | static inline bool dmaengine_check_align(u8 align, size_t off1, size_t off2, size_t len) |
| 491 | { | 541 | { |
| 492 | size_t mask; | 542 | size_t mask; |
| @@ -606,11 +656,11 @@ static inline void net_dmaengine_put(void) | |||
| 606 | #ifdef CONFIG_ASYNC_TX_DMA | 656 | #ifdef CONFIG_ASYNC_TX_DMA |
| 607 | #define async_dmaengine_get() dmaengine_get() | 657 | #define async_dmaengine_get() dmaengine_get() |
| 608 | #define async_dmaengine_put() dmaengine_put() | 658 | #define async_dmaengine_put() dmaengine_put() |
| 609 | #ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH | 659 | #ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH |
| 610 | #define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX) | 660 | #define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX) |
| 611 | #else | 661 | #else |
| 612 | #define async_dma_find_channel(type) dma_find_channel(type) | 662 | #define async_dma_find_channel(type) dma_find_channel(type) |
| 613 | #endif /* CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH */ | 663 | #endif /* CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH */ |
| 614 | #else | 664 | #else |
| 615 | static inline void async_dmaengine_get(void) | 665 | static inline void async_dmaengine_get(void) |
| 616 | { | 666 | { |
diff --git a/include/linux/intel_mid_dma.h b/include/linux/intel_mid_dma.h index d9d08b6269b6..10496bd24c5c 100644 --- a/include/linux/intel_mid_dma.h +++ b/include/linux/intel_mid_dma.h | |||
| @@ -27,14 +27,7 @@ | |||
| 27 | 27 | ||
| 28 | #include <linux/dmaengine.h> | 28 | #include <linux/dmaengine.h> |
| 29 | 29 | ||
| 30 | /*DMA transaction width, src and dstn width would be same | 30 | #define DMA_PREP_CIRCULAR_LIST (1 << 10) |
| 31 | The DMA length must be width aligned, | ||
| 32 | for 32 bit width the length must be 32 bit (4bytes) aligned only*/ | ||
| 33 | enum intel_mid_dma_width { | ||
| 34 | LNW_DMA_WIDTH_8BIT = 0x0, | ||
| 35 | LNW_DMA_WIDTH_16BIT = 0x1, | ||
| 36 | LNW_DMA_WIDTH_32BIT = 0x2, | ||
| 37 | }; | ||
| 38 | 31 | ||
| 39 | /*DMA mode configurations*/ | 32 | /*DMA mode configurations*/ |
| 40 | enum intel_mid_dma_mode { | 33 | enum intel_mid_dma_mode { |
| @@ -69,18 +62,15 @@ enum intel_mid_dma_msize { | |||
| 69 | * @cfg_mode: DMA data transfer mode (per-per/mem-per/mem-mem) | 62 | * @cfg_mode: DMA data transfer mode (per-per/mem-per/mem-mem) |
| 70 | * @src_msize: Source DMA burst size | 63 | * @src_msize: Source DMA burst size |
| 71 | * @dst_msize: Dst DMA burst size | 64 | * @dst_msize: Dst DMA burst size |
| 65 | * @per_addr: Periphral address | ||
| 72 | * @device_instance: DMA peripheral device instance, we can have multiple | 66 | * @device_instance: DMA peripheral device instance, we can have multiple |
| 73 | * peripheral device connected to single DMAC | 67 | * peripheral device connected to single DMAC |
| 74 | */ | 68 | */ |
| 75 | struct intel_mid_dma_slave { | 69 | struct intel_mid_dma_slave { |
| 76 | enum dma_data_direction dirn; | ||
| 77 | enum intel_mid_dma_width src_width; /*width of DMA src txn*/ | ||
| 78 | enum intel_mid_dma_width dst_width; /*width of DMA dst txn*/ | ||
| 79 | enum intel_mid_dma_hs_mode hs_mode; /*handshaking*/ | 70 | enum intel_mid_dma_hs_mode hs_mode; /*handshaking*/ |
| 80 | enum intel_mid_dma_mode cfg_mode; /*mode configuration*/ | 71 | enum intel_mid_dma_mode cfg_mode; /*mode configuration*/ |
| 81 | enum intel_mid_dma_msize src_msize; /*size if src burst*/ | ||
| 82 | enum intel_mid_dma_msize dst_msize; /*size of dst burst*/ | ||
| 83 | unsigned int device_instance; /*0, 1 for periphral instance*/ | 72 | unsigned int device_instance; /*0, 1 for periphral instance*/ |
| 73 | struct dma_slave_config dma_slave; | ||
| 84 | }; | 74 | }; |
| 85 | 75 | ||
| 86 | #endif /*__INTEL_MID_DMA_H__*/ | 76 | #endif /*__INTEL_MID_DMA_H__*/ |
