diff options
| author | David S. Miller <davem@davemloft.net> | 2015-03-03 21:16:48 -0500 |
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2015-03-03 21:16:48 -0500 |
| commit | 71a83a6db6138b9d41d8a0b6b91cb59f6dc4742c (patch) | |
| tree | f74b6e4e48257ec6ce40b95645ecb8533b9cc1f8 /include/linux/dmaengine.h | |
| parent | b97526f3ff95f92b107f0fb52cbb8627e395429b (diff) | |
| parent | a6c5170d1edea97c538c81e377e56c7b5c5b7e63 (diff) | |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts:
drivers/net/ethernet/rocker/rocker.c
The rocker commit was two overlapping changes, one to rename
the ->vport member to ->pport, and another making the bitmask
expression use '1ULL' instead of plain '1'.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux/dmaengine.h')
| -rw-r--r-- | include/linux/dmaengine.h | 120 |
1 files changed, 58 insertions, 62 deletions
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 40cd75e21ea2..b6997a0cb528 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
| @@ -189,25 +189,6 @@ enum dma_ctrl_flags { | |||
| 189 | }; | 189 | }; |
| 190 | 190 | ||
| 191 | /** | 191 | /** |
| 192 | * enum dma_ctrl_cmd - DMA operations that can optionally be exercised | ||
| 193 | * on a running channel. | ||
| 194 | * @DMA_TERMINATE_ALL: terminate all ongoing transfers | ||
| 195 | * @DMA_PAUSE: pause ongoing transfers | ||
| 196 | * @DMA_RESUME: resume paused transfer | ||
| 197 | * @DMA_SLAVE_CONFIG: this command is only implemented by DMA controllers | ||
| 198 | * that need to runtime reconfigure the slave channels (as opposed to passing | ||
| 199 | * configuration data in statically from the platform). An additional | ||
| 200 | * argument of struct dma_slave_config must be passed in with this | ||
| 201 | * command. | ||
| 202 | */ | ||
| 203 | enum dma_ctrl_cmd { | ||
| 204 | DMA_TERMINATE_ALL, | ||
| 205 | DMA_PAUSE, | ||
| 206 | DMA_RESUME, | ||
| 207 | DMA_SLAVE_CONFIG, | ||
| 208 | }; | ||
| 209 | |||
| 210 | /** | ||
| 211 | * enum sum_check_bits - bit position of pq_check_flags | 192 | * enum sum_check_bits - bit position of pq_check_flags |
| 212 | */ | 193 | */ |
| 213 | enum sum_check_bits { | 194 | enum sum_check_bits { |
| @@ -298,6 +279,9 @@ enum dma_slave_buswidth { | |||
| 298 | DMA_SLAVE_BUSWIDTH_3_BYTES = 3, | 279 | DMA_SLAVE_BUSWIDTH_3_BYTES = 3, |
| 299 | DMA_SLAVE_BUSWIDTH_4_BYTES = 4, | 280 | DMA_SLAVE_BUSWIDTH_4_BYTES = 4, |
| 300 | DMA_SLAVE_BUSWIDTH_8_BYTES = 8, | 281 | DMA_SLAVE_BUSWIDTH_8_BYTES = 8, |
| 282 | DMA_SLAVE_BUSWIDTH_16_BYTES = 16, | ||
| 283 | DMA_SLAVE_BUSWIDTH_32_BYTES = 32, | ||
| 284 | DMA_SLAVE_BUSWIDTH_64_BYTES = 64, | ||
| 301 | }; | 285 | }; |
| 302 | 286 | ||
| 303 | /** | 287 | /** |
| @@ -336,9 +320,8 @@ enum dma_slave_buswidth { | |||
| 336 | * This struct is passed in as configuration data to a DMA engine | 320 | * This struct is passed in as configuration data to a DMA engine |
| 337 | * in order to set up a certain channel for DMA transport at runtime. | 321 | * in order to set up a certain channel for DMA transport at runtime. |
| 338 | * The DMA device/engine has to provide support for an additional | 322 | * The DMA device/engine has to provide support for an additional |
| 339 | * command in the channel config interface, DMA_SLAVE_CONFIG | 323 | * callback in the dma_device structure, device_config and this struct |
| 340 | * and this struct will then be passed in as an argument to the | 324 | * will then be passed in as an argument to the function. |
| 341 | * DMA engine device_control() function. | ||
| 342 | * | 325 | * |
| 343 | * The rationale for adding configuration information to this struct is as | 326 | * The rationale for adding configuration information to this struct is as |
| 344 | * follows: if it is likely that more than one DMA slave controllers in | 327 | * follows: if it is likely that more than one DMA slave controllers in |
| @@ -387,7 +370,7 @@ enum dma_residue_granularity { | |||
| 387 | /* struct dma_slave_caps - expose capabilities of a slave channel only | 370 | /* struct dma_slave_caps - expose capabilities of a slave channel only |
| 388 | * | 371 | * |
| 389 | * @src_addr_widths: bit mask of src addr widths the channel supports | 372 | * @src_addr_widths: bit mask of src addr widths the channel supports |
| 390 | * @dstn_addr_widths: bit mask of dstn addr widths the channel supports | 373 | * @dst_addr_widths: bit mask of dstn addr widths the channel supports |
| 391 | * @directions: bit mask of slave direction the channel supported | 374 | * @directions: bit mask of slave direction the channel supported |
| 392 | * since the enum dma_transfer_direction is not defined as bits for each | 375 | * since the enum dma_transfer_direction is not defined as bits for each |
| 393 | * type of direction, the dma controller should fill (1 << <TYPE>) and same | 376 | * type of direction, the dma controller should fill (1 << <TYPE>) and same |
| @@ -398,7 +381,7 @@ enum dma_residue_granularity { | |||
| 398 | */ | 381 | */ |
| 399 | struct dma_slave_caps { | 382 | struct dma_slave_caps { |
| 400 | u32 src_addr_widths; | 383 | u32 src_addr_widths; |
| 401 | u32 dstn_addr_widths; | 384 | u32 dst_addr_widths; |
| 402 | u32 directions; | 385 | u32 directions; |
| 403 | bool cmd_pause; | 386 | bool cmd_pause; |
| 404 | bool cmd_terminate; | 387 | bool cmd_terminate; |
| @@ -594,6 +577,14 @@ struct dma_tx_state { | |||
| 594 | * @fill_align: alignment shift for memset operations | 577 | * @fill_align: alignment shift for memset operations |
| 595 | * @dev_id: unique device ID | 578 | * @dev_id: unique device ID |
| 596 | * @dev: struct device reference for dma mapping api | 579 | * @dev: struct device reference for dma mapping api |
| 580 | * @src_addr_widths: bit mask of src addr widths the device supports | ||
| 581 | * @dst_addr_widths: bit mask of dst addr widths the device supports | ||
| 582 | * @directions: bit mask of slave direction the device supports since | ||
| 583 | * the enum dma_transfer_direction is not defined as bits for | ||
| 584 | * each type of direction, the dma controller should fill (1 << | ||
| 585 | * <TYPE>) and same should be checked by controller as well | ||
| 586 | * @residue_granularity: granularity of the transfer residue reported | ||
| 587 | * by tx_status | ||
| 597 | * @device_alloc_chan_resources: allocate resources and return the | 588 | * @device_alloc_chan_resources: allocate resources and return the |
| 598 | * number of allocated descriptors | 589 | * number of allocated descriptors |
| 599 | * @device_free_chan_resources: release DMA channel's resources | 590 | * @device_free_chan_resources: release DMA channel's resources |
| @@ -608,14 +599,19 @@ struct dma_tx_state { | |||
| 608 | * The function takes a buffer of size buf_len. The callback function will | 599 | * The function takes a buffer of size buf_len. The callback function will |
| 609 | * be called after period_len bytes have been transferred. | 600 | * be called after period_len bytes have been transferred. |
| 610 | * @device_prep_interleaved_dma: Transfer expression in a generic way. | 601 | * @device_prep_interleaved_dma: Transfer expression in a generic way. |
| 611 | * @device_control: manipulate all pending operations on a channel, returns | 602 | * @device_config: Pushes a new configuration to a channel, return 0 or an error |
| 612 | * zero or error code | 603 | * code |
| 604 | * @device_pause: Pauses any transfer happening on a channel. Returns | ||
| 605 | * 0 or an error code | ||
| 606 | * @device_resume: Resumes any transfer on a channel previously | ||
| 607 | * paused. Returns 0 or an error code | ||
| 608 | * @device_terminate_all: Aborts all transfers on a channel. Returns 0 | ||
| 609 | * or an error code | ||
| 613 | * @device_tx_status: poll for transaction completion, the optional | 610 | * @device_tx_status: poll for transaction completion, the optional |
| 614 | * txstate parameter can be supplied with a pointer to get a | 611 | * txstate parameter can be supplied with a pointer to get a |
| 615 | * struct with auxiliary transfer status information, otherwise the call | 612 | * struct with auxiliary transfer status information, otherwise the call |
| 616 | * will just return a simple status code | 613 | * will just return a simple status code |
| 617 | * @device_issue_pending: push pending transactions to hardware | 614 | * @device_issue_pending: push pending transactions to hardware |
| 618 | * @device_slave_caps: return the slave channel capabilities | ||
| 619 | */ | 615 | */ |
| 620 | struct dma_device { | 616 | struct dma_device { |
| 621 | 617 | ||
| @@ -635,14 +631,19 @@ struct dma_device { | |||
| 635 | int dev_id; | 631 | int dev_id; |
| 636 | struct device *dev; | 632 | struct device *dev; |
| 637 | 633 | ||
| 634 | u32 src_addr_widths; | ||
| 635 | u32 dst_addr_widths; | ||
| 636 | u32 directions; | ||
| 637 | enum dma_residue_granularity residue_granularity; | ||
| 638 | |||
| 638 | int (*device_alloc_chan_resources)(struct dma_chan *chan); | 639 | int (*device_alloc_chan_resources)(struct dma_chan *chan); |
| 639 | void (*device_free_chan_resources)(struct dma_chan *chan); | 640 | void (*device_free_chan_resources)(struct dma_chan *chan); |
| 640 | 641 | ||
| 641 | struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)( | 642 | struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)( |
| 642 | struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | 643 | struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, |
| 643 | size_t len, unsigned long flags); | 644 | size_t len, unsigned long flags); |
| 644 | struct dma_async_tx_descriptor *(*device_prep_dma_xor)( | 645 | struct dma_async_tx_descriptor *(*device_prep_dma_xor)( |
| 645 | struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, | 646 | struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, |
| 646 | unsigned int src_cnt, size_t len, unsigned long flags); | 647 | unsigned int src_cnt, size_t len, unsigned long flags); |
| 647 | struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)( | 648 | struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)( |
| 648 | struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt, | 649 | struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt, |
| @@ -674,31 +675,26 @@ struct dma_device { | |||
| 674 | struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)( | 675 | struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)( |
| 675 | struct dma_chan *chan, struct dma_interleaved_template *xt, | 676 | struct dma_chan *chan, struct dma_interleaved_template *xt, |
| 676 | unsigned long flags); | 677 | unsigned long flags); |
| 677 | int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 678 | |
| 678 | unsigned long arg); | 679 | int (*device_config)(struct dma_chan *chan, |
| 680 | struct dma_slave_config *config); | ||
| 681 | int (*device_pause)(struct dma_chan *chan); | ||
| 682 | int (*device_resume)(struct dma_chan *chan); | ||
| 683 | int (*device_terminate_all)(struct dma_chan *chan); | ||
| 679 | 684 | ||
| 680 | enum dma_status (*device_tx_status)(struct dma_chan *chan, | 685 | enum dma_status (*device_tx_status)(struct dma_chan *chan, |
| 681 | dma_cookie_t cookie, | 686 | dma_cookie_t cookie, |
| 682 | struct dma_tx_state *txstate); | 687 | struct dma_tx_state *txstate); |
| 683 | void (*device_issue_pending)(struct dma_chan *chan); | 688 | void (*device_issue_pending)(struct dma_chan *chan); |
| 684 | int (*device_slave_caps)(struct dma_chan *chan, struct dma_slave_caps *caps); | ||
| 685 | }; | 689 | }; |
| 686 | 690 | ||
| 687 | static inline int dmaengine_device_control(struct dma_chan *chan, | ||
| 688 | enum dma_ctrl_cmd cmd, | ||
| 689 | unsigned long arg) | ||
| 690 | { | ||
| 691 | if (chan->device->device_control) | ||
| 692 | return chan->device->device_control(chan, cmd, arg); | ||
| 693 | |||
| 694 | return -ENOSYS; | ||
| 695 | } | ||
| 696 | |||
| 697 | static inline int dmaengine_slave_config(struct dma_chan *chan, | 691 | static inline int dmaengine_slave_config(struct dma_chan *chan, |
| 698 | struct dma_slave_config *config) | 692 | struct dma_slave_config *config) |
| 699 | { | 693 | { |
| 700 | return dmaengine_device_control(chan, DMA_SLAVE_CONFIG, | 694 | if (chan->device->device_config) |
| 701 | (unsigned long)config); | 695 | return chan->device->device_config(chan, config); |
| 696 | |||
| 697 | return -ENOSYS; | ||
| 702 | } | 698 | } |
| 703 | 699 | ||
| 704 | static inline bool is_slave_direction(enum dma_transfer_direction direction) | 700 | static inline bool is_slave_direction(enum dma_transfer_direction direction) |
| @@ -765,34 +761,28 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg( | |||
| 765 | src_sg, src_nents, flags); | 761 | src_sg, src_nents, flags); |
| 766 | } | 762 | } |
| 767 | 763 | ||
| 768 | static inline int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps) | ||
| 769 | { | ||
| 770 | if (!chan || !caps) | ||
| 771 | return -EINVAL; | ||
| 772 | |||
| 773 | /* check if the channel supports slave transactions */ | ||
| 774 | if (!test_bit(DMA_SLAVE, chan->device->cap_mask.bits)) | ||
| 775 | return -ENXIO; | ||
| 776 | |||
| 777 | if (chan->device->device_slave_caps) | ||
| 778 | return chan->device->device_slave_caps(chan, caps); | ||
| 779 | |||
| 780 | return -ENXIO; | ||
| 781 | } | ||
| 782 | |||
| 783 | static inline int dmaengine_terminate_all(struct dma_chan *chan) | 764 | static inline int dmaengine_terminate_all(struct dma_chan *chan) |
| 784 | { | 765 | { |
| 785 | return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0); | 766 | if (chan->device->device_terminate_all) |
| 767 | return chan->device->device_terminate_all(chan); | ||
| 768 | |||
| 769 | return -ENOSYS; | ||
| 786 | } | 770 | } |
| 787 | 771 | ||
| 788 | static inline int dmaengine_pause(struct dma_chan *chan) | 772 | static inline int dmaengine_pause(struct dma_chan *chan) |
| 789 | { | 773 | { |
| 790 | return dmaengine_device_control(chan, DMA_PAUSE, 0); | 774 | if (chan->device->device_pause) |
| 775 | return chan->device->device_pause(chan); | ||
| 776 | |||
| 777 | return -ENOSYS; | ||
| 791 | } | 778 | } |
| 792 | 779 | ||
| 793 | static inline int dmaengine_resume(struct dma_chan *chan) | 780 | static inline int dmaengine_resume(struct dma_chan *chan) |
| 794 | { | 781 | { |
| 795 | return dmaengine_device_control(chan, DMA_RESUME, 0); | 782 | if (chan->device->device_resume) |
| 783 | return chan->device->device_resume(chan); | ||
| 784 | |||
| 785 | return -ENOSYS; | ||
| 796 | } | 786 | } |
| 797 | 787 | ||
| 798 | static inline enum dma_status dmaengine_tx_status(struct dma_chan *chan, | 788 | static inline enum dma_status dmaengine_tx_status(struct dma_chan *chan, |
| @@ -1059,6 +1049,7 @@ struct dma_chan *dma_request_slave_channel_reason(struct device *dev, | |||
| 1059 | const char *name); | 1049 | const char *name); |
| 1060 | struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name); | 1050 | struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name); |
| 1061 | void dma_release_channel(struct dma_chan *chan); | 1051 | void dma_release_channel(struct dma_chan *chan); |
| 1052 | int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps); | ||
| 1062 | #else | 1053 | #else |
| 1063 | static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) | 1054 | static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) |
| 1064 | { | 1055 | { |
| @@ -1093,6 +1084,11 @@ static inline struct dma_chan *dma_request_slave_channel(struct device *dev, | |||
| 1093 | static inline void dma_release_channel(struct dma_chan *chan) | 1084 | static inline void dma_release_channel(struct dma_chan *chan) |
| 1094 | { | 1085 | { |
| 1095 | } | 1086 | } |
| 1087 | static inline int dma_get_slave_caps(struct dma_chan *chan, | ||
| 1088 | struct dma_slave_caps *caps) | ||
| 1089 | { | ||
| 1090 | return -ENXIO; | ||
| 1091 | } | ||
| 1096 | #endif | 1092 | #endif |
| 1097 | 1093 | ||
| 1098 | /* --- DMA device --- */ | 1094 | /* --- DMA device --- */ |
