diff options
Diffstat (limited to 'include/linux/dmaengine.h')
| -rw-r--r-- | include/linux/dmaengine.h | 204 |
1 files changed, 195 insertions, 9 deletions
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 78784982b33e..e2106495cc11 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
| @@ -31,6 +31,8 @@ | |||
| 31 | * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code | 31 | * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code |
| 32 | */ | 32 | */ |
| 33 | typedef s32 dma_cookie_t; | 33 | typedef s32 dma_cookie_t; |
| 34 | #define DMA_MIN_COOKIE 1 | ||
| 35 | #define DMA_MAX_COOKIE INT_MAX | ||
| 34 | 36 | ||
| 35 | #define dma_submit_error(cookie) ((cookie) < 0 ? 1 : 0) | 37 | #define dma_submit_error(cookie) ((cookie) < 0 ? 1 : 0) |
| 36 | 38 | ||
| @@ -38,11 +40,13 @@ typedef s32 dma_cookie_t; | |||
| 38 | * enum dma_status - DMA transaction status | 40 | * enum dma_status - DMA transaction status |
| 39 | * @DMA_SUCCESS: transaction completed successfully | 41 | * @DMA_SUCCESS: transaction completed successfully |
| 40 | * @DMA_IN_PROGRESS: transaction not yet processed | 42 | * @DMA_IN_PROGRESS: transaction not yet processed |
| 43 | * @DMA_PAUSED: transaction is paused | ||
| 41 | * @DMA_ERROR: transaction failed | 44 | * @DMA_ERROR: transaction failed |
| 42 | */ | 45 | */ |
| 43 | enum dma_status { | 46 | enum dma_status { |
| 44 | DMA_SUCCESS, | 47 | DMA_SUCCESS, |
| 45 | DMA_IN_PROGRESS, | 48 | DMA_IN_PROGRESS, |
| 49 | DMA_PAUSED, | ||
| 46 | DMA_ERROR, | 50 | DMA_ERROR, |
| 47 | }; | 51 | }; |
| 48 | 52 | ||
| @@ -105,6 +109,25 @@ enum dma_ctrl_flags { | |||
| 105 | }; | 109 | }; |
| 106 | 110 | ||
| 107 | /** | 111 | /** |
| 112 | * enum dma_ctrl_cmd - DMA operations that can optionally be exercised | ||
| 113 | * on a running channel. | ||
| 114 | * @DMA_TERMINATE_ALL: terminate all ongoing transfers | ||
| 115 | * @DMA_PAUSE: pause ongoing transfers | ||
| 116 | * @DMA_RESUME: resume paused transfer | ||
| 117 | * @DMA_SLAVE_CONFIG: this command is only implemented by DMA controllers | ||
| 118 | * that need to runtime reconfigure the slave channels (as opposed to passing | ||
| 119 | * configuration data in statically from the platform). An additional | ||
| 120 | * argument of struct dma_slave_config must be passed in with this | ||
| 121 | * command. | ||
| 122 | */ | ||
| 123 | enum dma_ctrl_cmd { | ||
| 124 | DMA_TERMINATE_ALL, | ||
| 125 | DMA_PAUSE, | ||
| 126 | DMA_RESUME, | ||
| 127 | DMA_SLAVE_CONFIG, | ||
| 128 | }; | ||
| 129 | |||
| 130 | /** | ||
| 108 | * enum sum_check_bits - bit position of pq_check_flags | 131 | * enum sum_check_bits - bit position of pq_check_flags |
| 109 | */ | 132 | */ |
| 110 | enum sum_check_bits { | 133 | enum sum_check_bits { |
| @@ -162,7 +185,7 @@ struct dma_chan { | |||
| 162 | struct dma_chan_dev *dev; | 185 | struct dma_chan_dev *dev; |
| 163 | 186 | ||
| 164 | struct list_head device_node; | 187 | struct list_head device_node; |
| 165 | struct dma_chan_percpu *local; | 188 | struct dma_chan_percpu __percpu *local; |
| 166 | int client_count; | 189 | int client_count; |
| 167 | int table_count; | 190 | int table_count; |
| 168 | void *private; | 191 | void *private; |
| @@ -182,6 +205,71 @@ struct dma_chan_dev { | |||
| 182 | atomic_t *idr_ref; | 205 | atomic_t *idr_ref; |
| 183 | }; | 206 | }; |
| 184 | 207 | ||
| 208 | /** | ||
| 209 | * enum dma_slave_buswidth - defines bus with of the DMA slave | ||
| 210 | * device, source or target buses | ||
| 211 | */ | ||
| 212 | enum dma_slave_buswidth { | ||
| 213 | DMA_SLAVE_BUSWIDTH_UNDEFINED = 0, | ||
| 214 | DMA_SLAVE_BUSWIDTH_1_BYTE = 1, | ||
| 215 | DMA_SLAVE_BUSWIDTH_2_BYTES = 2, | ||
| 216 | DMA_SLAVE_BUSWIDTH_4_BYTES = 4, | ||
| 217 | DMA_SLAVE_BUSWIDTH_8_BYTES = 8, | ||
| 218 | }; | ||
| 219 | |||
| 220 | /** | ||
| 221 | * struct dma_slave_config - dma slave channel runtime config | ||
| 222 | * @direction: whether the data shall go in or out on this slave | ||
| 223 | * channel, right now. DMA_TO_DEVICE and DMA_FROM_DEVICE are | ||
| 224 | * legal values, DMA_BIDIRECTIONAL is not acceptable since we | ||
| 225 | * need to differentiate source and target addresses. | ||
| 226 | * @src_addr: this is the physical address where DMA slave data | ||
| 227 | * should be read (RX), if the source is memory this argument is | ||
| 228 | * ignored. | ||
| 229 | * @dst_addr: this is the physical address where DMA slave data | ||
| 230 | * should be written (TX), if the source is memory this argument | ||
| 231 | * is ignored. | ||
| 232 | * @src_addr_width: this is the width in bytes of the source (RX) | ||
| 233 | * register where DMA data shall be read. If the source | ||
| 234 | * is memory this may be ignored depending on architecture. | ||
| 235 | * Legal values: 1, 2, 4, 8. | ||
| 236 | * @dst_addr_width: same as src_addr_width but for destination | ||
| 237 | * target (TX) mutatis mutandis. | ||
| 238 | * @src_maxburst: the maximum number of words (note: words, as in | ||
| 239 | * units of the src_addr_width member, not bytes) that can be sent | ||
| 240 | * in one burst to the device. Typically something like half the | ||
| 241 | * FIFO depth on I/O peripherals so you don't overflow it. This | ||
| 242 | * may or may not be applicable on memory sources. | ||
| 243 | * @dst_maxburst: same as src_maxburst but for destination target | ||
| 244 | * mutatis mutandis. | ||
| 245 | * | ||
| 246 | * This struct is passed in as configuration data to a DMA engine | ||
| 247 | * in order to set up a certain channel for DMA transport at runtime. | ||
| 248 | * The DMA device/engine has to provide support for an additional | ||
| 249 | * command in the channel config interface, DMA_SLAVE_CONFIG | ||
| 250 | * and this struct will then be passed in as an argument to the | ||
| 251 | * DMA engine device_control() function. | ||
| 252 | * | ||
| 253 | * The rationale for adding configuration information to this struct | ||
| 254 | * is as follows: if it is likely that most DMA slave controllers in | ||
| 255 | * the world will support the configuration option, then make it | ||
| 256 | * generic. If not: if it is fixed so that it be sent in static from | ||
| 257 | * the platform data, then prefer to do that. Else, if it is neither | ||
| 258 | * fixed at runtime, nor generic enough (such as bus mastership on | ||
| 259 | * some CPU family and whatnot) then create a custom slave config | ||
| 260 | * struct and pass that, then make this config a member of that | ||
| 261 | * struct, if applicable. | ||
| 262 | */ | ||
| 263 | struct dma_slave_config { | ||
| 264 | enum dma_data_direction direction; | ||
| 265 | dma_addr_t src_addr; | ||
| 266 | dma_addr_t dst_addr; | ||
| 267 | enum dma_slave_buswidth src_addr_width; | ||
| 268 | enum dma_slave_buswidth dst_addr_width; | ||
| 269 | u32 src_maxburst; | ||
| 270 | u32 dst_maxburst; | ||
| 271 | }; | ||
| 272 | |||
| 185 | static inline const char *dma_chan_name(struct dma_chan *chan) | 273 | static inline const char *dma_chan_name(struct dma_chan *chan) |
| 186 | { | 274 | { |
| 187 | return dev_name(&chan->dev->device); | 275 | return dev_name(&chan->dev->device); |
| @@ -228,9 +316,84 @@ struct dma_async_tx_descriptor { | |||
| 228 | dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); | 316 | dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); |
| 229 | dma_async_tx_callback callback; | 317 | dma_async_tx_callback callback; |
| 230 | void *callback_param; | 318 | void *callback_param; |
| 319 | #ifndef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH | ||
| 231 | struct dma_async_tx_descriptor *next; | 320 | struct dma_async_tx_descriptor *next; |
| 232 | struct dma_async_tx_descriptor *parent; | 321 | struct dma_async_tx_descriptor *parent; |
| 233 | spinlock_t lock; | 322 | spinlock_t lock; |
| 323 | #endif | ||
| 324 | }; | ||
| 325 | |||
| 326 | #ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH | ||
| 327 | static inline void txd_lock(struct dma_async_tx_descriptor *txd) | ||
| 328 | { | ||
| 329 | } | ||
| 330 | static inline void txd_unlock(struct dma_async_tx_descriptor *txd) | ||
| 331 | { | ||
| 332 | } | ||
| 333 | static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next) | ||
| 334 | { | ||
| 335 | BUG(); | ||
| 336 | } | ||
| 337 | static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd) | ||
| 338 | { | ||
| 339 | } | ||
| 340 | static inline void txd_clear_next(struct dma_async_tx_descriptor *txd) | ||
| 341 | { | ||
| 342 | } | ||
| 343 | static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd) | ||
| 344 | { | ||
| 345 | return NULL; | ||
| 346 | } | ||
| 347 | static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd) | ||
| 348 | { | ||
| 349 | return NULL; | ||
| 350 | } | ||
| 351 | |||
| 352 | #else | ||
| 353 | static inline void txd_lock(struct dma_async_tx_descriptor *txd) | ||
| 354 | { | ||
| 355 | spin_lock_bh(&txd->lock); | ||
| 356 | } | ||
| 357 | static inline void txd_unlock(struct dma_async_tx_descriptor *txd) | ||
| 358 | { | ||
| 359 | spin_unlock_bh(&txd->lock); | ||
| 360 | } | ||
| 361 | static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next) | ||
| 362 | { | ||
| 363 | txd->next = next; | ||
| 364 | next->parent = txd; | ||
| 365 | } | ||
| 366 | static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd) | ||
| 367 | { | ||
| 368 | txd->parent = NULL; | ||
| 369 | } | ||
| 370 | static inline void txd_clear_next(struct dma_async_tx_descriptor *txd) | ||
| 371 | { | ||
| 372 | txd->next = NULL; | ||
| 373 | } | ||
| 374 | static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd) | ||
| 375 | { | ||
| 376 | return txd->parent; | ||
| 377 | } | ||
| 378 | static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd) | ||
| 379 | { | ||
| 380 | return txd->next; | ||
| 381 | } | ||
| 382 | #endif | ||
| 383 | |||
| 384 | /** | ||
| 385 | * struct dma_tx_state - filled in to report the status of | ||
| 386 | * a transfer. | ||
| 387 | * @last: last completed DMA cookie | ||
| 388 | * @used: last issued DMA cookie (i.e. the one in progress) | ||
| 389 | * @residue: the remaining number of bytes left to transmit | ||
| 390 | * on the selected transfer for states DMA_IN_PROGRESS and | ||
| 391 | * DMA_PAUSED if this is implemented in the driver, else 0 | ||
| 392 | */ | ||
| 393 | struct dma_tx_state { | ||
| 394 | dma_cookie_t last; | ||
| 395 | dma_cookie_t used; | ||
| 396 | u32 residue; | ||
| 234 | }; | 397 | }; |
| 235 | 398 | ||
| 236 | /** | 399 | /** |
| @@ -259,8 +422,12 @@ struct dma_async_tx_descriptor { | |||
| 259 | * @device_prep_dma_memset: prepares a memset operation | 422 | * @device_prep_dma_memset: prepares a memset operation |
| 260 | * @device_prep_dma_interrupt: prepares an end of chain interrupt operation | 423 | * @device_prep_dma_interrupt: prepares an end of chain interrupt operation |
| 261 | * @device_prep_slave_sg: prepares a slave dma operation | 424 | * @device_prep_slave_sg: prepares a slave dma operation |
| 262 | * @device_terminate_all: terminate all pending operations | 425 | * @device_control: manipulate all pending operations on a channel, returns |
| 263 | * @device_is_tx_complete: poll for transaction completion | 426 | * zero or error code |
| 427 | * @device_tx_status: poll for transaction completion, the optional | ||
| 428 | * txstate parameter can be supplied with a pointer to get a | ||
| 429 | * struct with auxilary transfer status information, otherwise the call | ||
| 430 | * will just return a simple status code | ||
| 264 | * @device_issue_pending: push pending transactions to hardware | 431 | * @device_issue_pending: push pending transactions to hardware |
| 265 | */ | 432 | */ |
| 266 | struct dma_device { | 433 | struct dma_device { |
| @@ -311,11 +478,12 @@ struct dma_device { | |||
| 311 | struct dma_chan *chan, struct scatterlist *sgl, | 478 | struct dma_chan *chan, struct scatterlist *sgl, |
| 312 | unsigned int sg_len, enum dma_data_direction direction, | 479 | unsigned int sg_len, enum dma_data_direction direction, |
| 313 | unsigned long flags); | 480 | unsigned long flags); |
| 314 | void (*device_terminate_all)(struct dma_chan *chan); | 481 | int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
| 482 | unsigned long arg); | ||
| 315 | 483 | ||
| 316 | enum dma_status (*device_is_tx_complete)(struct dma_chan *chan, | 484 | enum dma_status (*device_tx_status)(struct dma_chan *chan, |
| 317 | dma_cookie_t cookie, dma_cookie_t *last, | 485 | dma_cookie_t cookie, |
| 318 | dma_cookie_t *used); | 486 | struct dma_tx_state *txstate); |
| 319 | void (*device_issue_pending)(struct dma_chan *chan); | 487 | void (*device_issue_pending)(struct dma_chan *chan); |
| 320 | }; | 488 | }; |
| 321 | 489 | ||
| @@ -380,7 +548,7 @@ static inline bool dma_dev_has_pq_continue(struct dma_device *dma) | |||
| 380 | return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE; | 548 | return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE; |
| 381 | } | 549 | } |
| 382 | 550 | ||
| 383 | static unsigned short dma_dev_to_maxpq(struct dma_device *dma) | 551 | static inline unsigned short dma_dev_to_maxpq(struct dma_device *dma) |
| 384 | { | 552 | { |
| 385 | return dma->max_pq & ~DMA_HAS_PQ_CONTINUE; | 553 | return dma->max_pq & ~DMA_HAS_PQ_CONTINUE; |
| 386 | } | 554 | } |
| @@ -556,7 +724,15 @@ static inline void dma_async_issue_pending(struct dma_chan *chan) | |||
| 556 | static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan, | 724 | static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan, |
| 557 | dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used) | 725 | dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used) |
| 558 | { | 726 | { |
| 559 | return chan->device->device_is_tx_complete(chan, cookie, last, used); | 727 | struct dma_tx_state state; |
| 728 | enum dma_status status; | ||
| 729 | |||
| 730 | status = chan->device->device_tx_status(chan, cookie, &state); | ||
| 731 | if (last) | ||
| 732 | *last = state.last; | ||
| 733 | if (used) | ||
| 734 | *used = state.used; | ||
| 735 | return status; | ||
| 560 | } | 736 | } |
| 561 | 737 | ||
| 562 | #define dma_async_memcpy_complete(chan, cookie, last, used)\ | 738 | #define dma_async_memcpy_complete(chan, cookie, last, used)\ |
| @@ -584,6 +760,16 @@ static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie, | |||
| 584 | return DMA_IN_PROGRESS; | 760 | return DMA_IN_PROGRESS; |
| 585 | } | 761 | } |
| 586 | 762 | ||
| 763 | static inline void | ||
| 764 | dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue) | ||
| 765 | { | ||
| 766 | if (st) { | ||
| 767 | st->last = last; | ||
| 768 | st->used = used; | ||
| 769 | st->residue = residue; | ||
| 770 | } | ||
| 771 | } | ||
| 772 | |||
| 587 | enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); | 773 | enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); |
| 588 | #ifdef CONFIG_DMA_ENGINE | 774 | #ifdef CONFIG_DMA_ENGINE |
| 589 | enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); | 775 | enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); |
