diff options
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/async_tx.h | 129 | ||||
| -rw-r--r-- | include/linux/dmaengine.h | 176 | ||||
| -rw-r--r-- | include/linux/pci_ids.h | 10 |
3 files changed, 268 insertions, 47 deletions
diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h index 5fc2ef8d97fa..a1c486a88e88 100644 --- a/include/linux/async_tx.h +++ b/include/linux/async_tx.h | |||
| @@ -58,25 +58,60 @@ struct dma_chan_ref { | |||
| 58 | * array. | 58 | * array. |
| 59 | * @ASYNC_TX_ACK: immediately ack the descriptor, precludes setting up a | 59 | * @ASYNC_TX_ACK: immediately ack the descriptor, precludes setting up a |
| 60 | * dependency chain | 60 | * dependency chain |
| 61 | * @ASYNC_TX_DEP_ACK: ack the dependency descriptor. Useful for chaining. | 61 | * @ASYNC_TX_FENCE: specify that the next operation in the dependency |
| 62 | * chain uses this operation's result as an input | ||
| 62 | */ | 63 | */ |
| 63 | enum async_tx_flags { | 64 | enum async_tx_flags { |
| 64 | ASYNC_TX_XOR_ZERO_DST = (1 << 0), | 65 | ASYNC_TX_XOR_ZERO_DST = (1 << 0), |
| 65 | ASYNC_TX_XOR_DROP_DST = (1 << 1), | 66 | ASYNC_TX_XOR_DROP_DST = (1 << 1), |
| 66 | ASYNC_TX_ACK = (1 << 3), | 67 | ASYNC_TX_ACK = (1 << 2), |
| 67 | ASYNC_TX_DEP_ACK = (1 << 4), | 68 | ASYNC_TX_FENCE = (1 << 3), |
| 69 | }; | ||
| 70 | |||
| 71 | /** | ||
| 72 | * struct async_submit_ctl - async_tx submission/completion modifiers | ||
| 73 | * @flags: submission modifiers | ||
| 74 | * @depend_tx: parent dependency of the current operation being submitted | ||
| 75 | * @cb_fn: callback routine to run at operation completion | ||
| 76 | * @cb_param: parameter for the callback routine | ||
| 77 | * @scribble: caller provided space for dma/page address conversions | ||
| 78 | */ | ||
| 79 | struct async_submit_ctl { | ||
| 80 | enum async_tx_flags flags; | ||
| 81 | struct dma_async_tx_descriptor *depend_tx; | ||
| 82 | dma_async_tx_callback cb_fn; | ||
| 83 | void *cb_param; | ||
| 84 | void *scribble; | ||
| 68 | }; | 85 | }; |
| 69 | 86 | ||
| 70 | #ifdef CONFIG_DMA_ENGINE | 87 | #ifdef CONFIG_DMA_ENGINE |
| 71 | #define async_tx_issue_pending_all dma_issue_pending_all | 88 | #define async_tx_issue_pending_all dma_issue_pending_all |
| 89 | |||
| 90 | /** | ||
| 91 | * async_tx_issue_pending - send pending descriptor to the hardware channel | ||
| 92 | * @tx: descriptor handle to retrieve hardware context | ||
| 93 | * | ||
| 94 | * Note: any dependent operations will have already been issued by | ||
| 95 | * async_tx_channel_switch, or (in the case of no channel switch) will | ||
| 96 | * be already pending on this channel. | ||
| 97 | */ | ||
| 98 | static inline void async_tx_issue_pending(struct dma_async_tx_descriptor *tx) | ||
| 99 | { | ||
| 100 | if (likely(tx)) { | ||
| 101 | struct dma_chan *chan = tx->chan; | ||
| 102 | struct dma_device *dma = chan->device; | ||
| 103 | |||
| 104 | dma->device_issue_pending(chan); | ||
| 105 | } | ||
| 106 | } | ||
| 72 | #ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL | 107 | #ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL |
| 73 | #include <asm/async_tx.h> | 108 | #include <asm/async_tx.h> |
| 74 | #else | 109 | #else |
| 75 | #define async_tx_find_channel(dep, type, dst, dst_count, src, src_count, len) \ | 110 | #define async_tx_find_channel(dep, type, dst, dst_count, src, src_count, len) \ |
| 76 | __async_tx_find_channel(dep, type) | 111 | __async_tx_find_channel(dep, type) |
| 77 | struct dma_chan * | 112 | struct dma_chan * |
| 78 | __async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, | 113 | __async_tx_find_channel(struct async_submit_ctl *submit, |
| 79 | enum dma_transaction_type tx_type); | 114 | enum dma_transaction_type tx_type); |
| 80 | #endif /* CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL */ | 115 | #endif /* CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL */ |
| 81 | #else | 116 | #else |
| 82 | static inline void async_tx_issue_pending_all(void) | 117 | static inline void async_tx_issue_pending_all(void) |
| @@ -84,10 +119,16 @@ static inline void async_tx_issue_pending_all(void) | |||
| 84 | do { } while (0); | 119 | do { } while (0); |
| 85 | } | 120 | } |
| 86 | 121 | ||
| 122 | static inline void async_tx_issue_pending(struct dma_async_tx_descriptor *tx) | ||
| 123 | { | ||
| 124 | do { } while (0); | ||
| 125 | } | ||
| 126 | |||
| 87 | static inline struct dma_chan * | 127 | static inline struct dma_chan * |
| 88 | async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, | 128 | async_tx_find_channel(struct async_submit_ctl *submit, |
| 89 | enum dma_transaction_type tx_type, struct page **dst, int dst_count, | 129 | enum dma_transaction_type tx_type, struct page **dst, |
| 90 | struct page **src, int src_count, size_t len) | 130 | int dst_count, struct page **src, int src_count, |
| 131 | size_t len) | ||
| 91 | { | 132 | { |
| 92 | return NULL; | 133 | return NULL; |
| 93 | } | 134 | } |
| @@ -99,46 +140,70 @@ async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, | |||
| 99 | * @cb_fn_param: parameter to pass to the callback routine | 140 | * @cb_fn_param: parameter to pass to the callback routine |
| 100 | */ | 141 | */ |
| 101 | static inline void | 142 | static inline void |
| 102 | async_tx_sync_epilog(dma_async_tx_callback cb_fn, void *cb_fn_param) | 143 | async_tx_sync_epilog(struct async_submit_ctl *submit) |
| 103 | { | 144 | { |
| 104 | if (cb_fn) | 145 | if (submit->cb_fn) |
| 105 | cb_fn(cb_fn_param); | 146 | submit->cb_fn(submit->cb_param); |
| 106 | } | 147 | } |
| 107 | 148 | ||
| 108 | void | 149 | typedef union { |
| 109 | async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, | 150 | unsigned long addr; |
| 110 | enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx, | 151 | struct page *page; |
| 111 | dma_async_tx_callback cb_fn, void *cb_fn_param); | 152 | dma_addr_t dma; |
| 153 | } addr_conv_t; | ||
| 154 | |||
| 155 | static inline void | ||
| 156 | init_async_submit(struct async_submit_ctl *args, enum async_tx_flags flags, | ||
| 157 | struct dma_async_tx_descriptor *tx, | ||
| 158 | dma_async_tx_callback cb_fn, void *cb_param, | ||
| 159 | addr_conv_t *scribble) | ||
| 160 | { | ||
| 161 | args->flags = flags; | ||
| 162 | args->depend_tx = tx; | ||
| 163 | args->cb_fn = cb_fn; | ||
| 164 | args->cb_param = cb_param; | ||
| 165 | args->scribble = scribble; | ||
| 166 | } | ||
| 167 | |||
| 168 | void async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, | ||
| 169 | struct async_submit_ctl *submit); | ||
| 112 | 170 | ||
| 113 | struct dma_async_tx_descriptor * | 171 | struct dma_async_tx_descriptor * |
| 114 | async_xor(struct page *dest, struct page **src_list, unsigned int offset, | 172 | async_xor(struct page *dest, struct page **src_list, unsigned int offset, |
| 115 | int src_cnt, size_t len, enum async_tx_flags flags, | 173 | int src_cnt, size_t len, struct async_submit_ctl *submit); |
| 116 | struct dma_async_tx_descriptor *depend_tx, | ||
| 117 | dma_async_tx_callback cb_fn, void *cb_fn_param); | ||
| 118 | 174 | ||
| 119 | struct dma_async_tx_descriptor * | 175 | struct dma_async_tx_descriptor * |
| 120 | async_xor_zero_sum(struct page *dest, struct page **src_list, | 176 | async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, |
| 121 | unsigned int offset, int src_cnt, size_t len, | 177 | int src_cnt, size_t len, enum sum_check_flags *result, |
| 122 | u32 *result, enum async_tx_flags flags, | 178 | struct async_submit_ctl *submit); |
| 123 | struct dma_async_tx_descriptor *depend_tx, | ||
| 124 | dma_async_tx_callback cb_fn, void *cb_fn_param); | ||
| 125 | 179 | ||
| 126 | struct dma_async_tx_descriptor * | 180 | struct dma_async_tx_descriptor * |
| 127 | async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, | 181 | async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, |
| 128 | unsigned int src_offset, size_t len, enum async_tx_flags flags, | 182 | unsigned int src_offset, size_t len, |
| 129 | struct dma_async_tx_descriptor *depend_tx, | 183 | struct async_submit_ctl *submit); |
| 130 | dma_async_tx_callback cb_fn, void *cb_fn_param); | ||
| 131 | 184 | ||
| 132 | struct dma_async_tx_descriptor * | 185 | struct dma_async_tx_descriptor * |
| 133 | async_memset(struct page *dest, int val, unsigned int offset, | 186 | async_memset(struct page *dest, int val, unsigned int offset, |
| 134 | size_t len, enum async_tx_flags flags, | 187 | size_t len, struct async_submit_ctl *submit); |
| 135 | struct dma_async_tx_descriptor *depend_tx, | 188 | |
| 136 | dma_async_tx_callback cb_fn, void *cb_fn_param); | 189 | struct dma_async_tx_descriptor *async_trigger_callback(struct async_submit_ctl *submit); |
| 190 | |||
| 191 | struct dma_async_tx_descriptor * | ||
| 192 | async_gen_syndrome(struct page **blocks, unsigned int offset, int src_cnt, | ||
| 193 | size_t len, struct async_submit_ctl *submit); | ||
| 194 | |||
| 195 | struct dma_async_tx_descriptor * | ||
| 196 | async_syndrome_val(struct page **blocks, unsigned int offset, int src_cnt, | ||
| 197 | size_t len, enum sum_check_flags *pqres, struct page *spare, | ||
| 198 | struct async_submit_ctl *submit); | ||
| 199 | |||
| 200 | struct dma_async_tx_descriptor * | ||
| 201 | async_raid6_2data_recov(int src_num, size_t bytes, int faila, int failb, | ||
| 202 | struct page **ptrs, struct async_submit_ctl *submit); | ||
| 137 | 203 | ||
| 138 | struct dma_async_tx_descriptor * | 204 | struct dma_async_tx_descriptor * |
| 139 | async_trigger_callback(enum async_tx_flags flags, | 205 | async_raid6_datap_recov(int src_num, size_t bytes, int faila, |
| 140 | struct dma_async_tx_descriptor *depend_tx, | 206 | struct page **ptrs, struct async_submit_ctl *submit); |
| 141 | dma_async_tx_callback cb_fn, void *cb_fn_param); | ||
| 142 | 207 | ||
| 143 | void async_tx_quiesce(struct dma_async_tx_descriptor **tx); | 208 | void async_tx_quiesce(struct dma_async_tx_descriptor **tx); |
| 144 | #endif /* _ASYNC_TX_H_ */ | 209 | #endif /* _ASYNC_TX_H_ */ |
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index f114bc7790bc..2b9f2ac7ed60 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
| @@ -48,19 +48,20 @@ enum dma_status { | |||
| 48 | 48 | ||
| 49 | /** | 49 | /** |
| 50 | * enum dma_transaction_type - DMA transaction types/indexes | 50 | * enum dma_transaction_type - DMA transaction types/indexes |
| 51 | * | ||
| 52 | * Note: The DMA_ASYNC_TX capability is not to be set by drivers. It is | ||
| 53 | * automatically set as dma devices are registered. | ||
| 51 | */ | 54 | */ |
| 52 | enum dma_transaction_type { | 55 | enum dma_transaction_type { |
| 53 | DMA_MEMCPY, | 56 | DMA_MEMCPY, |
| 54 | DMA_XOR, | 57 | DMA_XOR, |
| 55 | DMA_PQ_XOR, | 58 | DMA_PQ, |
| 56 | DMA_DUAL_XOR, | 59 | DMA_XOR_VAL, |
| 57 | DMA_PQ_UPDATE, | 60 | DMA_PQ_VAL, |
| 58 | DMA_ZERO_SUM, | ||
| 59 | DMA_PQ_ZERO_SUM, | ||
| 60 | DMA_MEMSET, | 61 | DMA_MEMSET, |
| 61 | DMA_MEMCPY_CRC32C, | ||
| 62 | DMA_INTERRUPT, | 62 | DMA_INTERRUPT, |
| 63 | DMA_PRIVATE, | 63 | DMA_PRIVATE, |
| 64 | DMA_ASYNC_TX, | ||
| 64 | DMA_SLAVE, | 65 | DMA_SLAVE, |
| 65 | }; | 66 | }; |
| 66 | 67 | ||
| @@ -70,18 +71,25 @@ enum dma_transaction_type { | |||
| 70 | 71 | ||
| 71 | /** | 72 | /** |
| 72 | * enum dma_ctrl_flags - DMA flags to augment operation preparation, | 73 | * enum dma_ctrl_flags - DMA flags to augment operation preparation, |
| 73 | * control completion, and communicate status. | 74 | * control completion, and communicate status. |
| 74 | * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of | 75 | * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of |
| 75 | * this transaction | 76 | * this transaction |
| 76 | * @DMA_CTRL_ACK - the descriptor cannot be reused until the client | 77 | * @DMA_CTRL_ACK - the descriptor cannot be reused until the client |
| 77 | * acknowledges receipt, i.e. has has a chance to establish any | 78 | * acknowledges receipt, i.e. has has a chance to establish any dependency |
| 78 | * dependency chains | 79 | * chains |
| 79 | * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s) | 80 | * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s) |
| 80 | * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s) | 81 | * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s) |
| 81 | * @DMA_COMPL_SRC_UNMAP_SINGLE - set to do the source dma-unmapping as single | 82 | * @DMA_COMPL_SRC_UNMAP_SINGLE - set to do the source dma-unmapping as single |
| 82 | * (if not set, do the source dma-unmapping as page) | 83 | * (if not set, do the source dma-unmapping as page) |
| 83 | * @DMA_COMPL_DEST_UNMAP_SINGLE - set to do the destination dma-unmapping as single | 84 | * @DMA_COMPL_DEST_UNMAP_SINGLE - set to do the destination dma-unmapping as single |
| 84 | * (if not set, do the destination dma-unmapping as page) | 85 | * (if not set, do the destination dma-unmapping as page) |
| 86 | * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q | ||
| 87 | * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P | ||
| 88 | * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as | ||
| 89 | * sources that were the result of a previous operation, in the case of a PQ | ||
| 90 | * operation it continues the calculation with new sources | ||
| 91 | * @DMA_PREP_FENCE - tell the driver that subsequent operations depend | ||
| 92 | * on the result of this operation | ||
| 85 | */ | 93 | */ |
| 86 | enum dma_ctrl_flags { | 94 | enum dma_ctrl_flags { |
| 87 | DMA_PREP_INTERRUPT = (1 << 0), | 95 | DMA_PREP_INTERRUPT = (1 << 0), |
| @@ -90,9 +98,32 @@ enum dma_ctrl_flags { | |||
| 90 | DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3), | 98 | DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3), |
| 91 | DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4), | 99 | DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4), |
| 92 | DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5), | 100 | DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5), |
| 101 | DMA_PREP_PQ_DISABLE_P = (1 << 6), | ||
| 102 | DMA_PREP_PQ_DISABLE_Q = (1 << 7), | ||
| 103 | DMA_PREP_CONTINUE = (1 << 8), | ||
| 104 | DMA_PREP_FENCE = (1 << 9), | ||
| 93 | }; | 105 | }; |
| 94 | 106 | ||
| 95 | /** | 107 | /** |
| 108 | * enum sum_check_bits - bit position of pq_check_flags | ||
| 109 | */ | ||
| 110 | enum sum_check_bits { | ||
| 111 | SUM_CHECK_P = 0, | ||
| 112 | SUM_CHECK_Q = 1, | ||
| 113 | }; | ||
| 114 | |||
| 115 | /** | ||
| 116 | * enum pq_check_flags - result of async_{xor,pq}_zero_sum operations | ||
| 117 | * @SUM_CHECK_P_RESULT - 1 if xor zero sum error, 0 otherwise | ||
| 118 | * @SUM_CHECK_Q_RESULT - 1 if reed-solomon zero sum error, 0 otherwise | ||
| 119 | */ | ||
| 120 | enum sum_check_flags { | ||
| 121 | SUM_CHECK_P_RESULT = (1 << SUM_CHECK_P), | ||
| 122 | SUM_CHECK_Q_RESULT = (1 << SUM_CHECK_Q), | ||
| 123 | }; | ||
| 124 | |||
| 125 | |||
| 126 | /** | ||
| 96 | * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t. | 127 | * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t. |
| 97 | * See linux/cpumask.h | 128 | * See linux/cpumask.h |
| 98 | */ | 129 | */ |
| @@ -210,6 +241,11 @@ struct dma_async_tx_descriptor { | |||
| 210 | * @global_node: list_head for global dma_device_list | 241 | * @global_node: list_head for global dma_device_list |
| 211 | * @cap_mask: one or more dma_capability flags | 242 | * @cap_mask: one or more dma_capability flags |
| 212 | * @max_xor: maximum number of xor sources, 0 if no capability | 243 | * @max_xor: maximum number of xor sources, 0 if no capability |
| 244 | * @max_pq: maximum number of PQ sources and PQ-continue capability | ||
| 245 | * @copy_align: alignment shift for memcpy operations | ||
| 246 | * @xor_align: alignment shift for xor operations | ||
| 247 | * @pq_align: alignment shift for pq operations | ||
| 248 | * @fill_align: alignment shift for memset operations | ||
| 213 | * @dev_id: unique device ID | 249 | * @dev_id: unique device ID |
| 214 | * @dev: struct device reference for dma mapping api | 250 | * @dev: struct device reference for dma mapping api |
| 215 | * @device_alloc_chan_resources: allocate resources and return the | 251 | * @device_alloc_chan_resources: allocate resources and return the |
| @@ -217,7 +253,9 @@ struct dma_async_tx_descriptor { | |||
| 217 | * @device_free_chan_resources: release DMA channel's resources | 253 | * @device_free_chan_resources: release DMA channel's resources |
| 218 | * @device_prep_dma_memcpy: prepares a memcpy operation | 254 | * @device_prep_dma_memcpy: prepares a memcpy operation |
| 219 | * @device_prep_dma_xor: prepares a xor operation | 255 | * @device_prep_dma_xor: prepares a xor operation |
| 220 | * @device_prep_dma_zero_sum: prepares a zero_sum operation | 256 | * @device_prep_dma_xor_val: prepares a xor validation operation |
| 257 | * @device_prep_dma_pq: prepares a pq operation | ||
| 258 | * @device_prep_dma_pq_val: prepares a pqzero_sum operation | ||
| 221 | * @device_prep_dma_memset: prepares a memset operation | 259 | * @device_prep_dma_memset: prepares a memset operation |
| 222 | * @device_prep_dma_interrupt: prepares an end of chain interrupt operation | 260 | * @device_prep_dma_interrupt: prepares an end of chain interrupt operation |
| 223 | * @device_prep_slave_sg: prepares a slave dma operation | 261 | * @device_prep_slave_sg: prepares a slave dma operation |
| @@ -232,7 +270,13 @@ struct dma_device { | |||
| 232 | struct list_head channels; | 270 | struct list_head channels; |
| 233 | struct list_head global_node; | 271 | struct list_head global_node; |
| 234 | dma_cap_mask_t cap_mask; | 272 | dma_cap_mask_t cap_mask; |
| 235 | int max_xor; | 273 | unsigned short max_xor; |
| 274 | unsigned short max_pq; | ||
| 275 | u8 copy_align; | ||
| 276 | u8 xor_align; | ||
| 277 | u8 pq_align; | ||
| 278 | u8 fill_align; | ||
| 279 | #define DMA_HAS_PQ_CONTINUE (1 << 15) | ||
| 236 | 280 | ||
| 237 | int dev_id; | 281 | int dev_id; |
| 238 | struct device *dev; | 282 | struct device *dev; |
| @@ -246,9 +290,17 @@ struct dma_device { | |||
| 246 | struct dma_async_tx_descriptor *(*device_prep_dma_xor)( | 290 | struct dma_async_tx_descriptor *(*device_prep_dma_xor)( |
| 247 | struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, | 291 | struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, |
| 248 | unsigned int src_cnt, size_t len, unsigned long flags); | 292 | unsigned int src_cnt, size_t len, unsigned long flags); |
| 249 | struct dma_async_tx_descriptor *(*device_prep_dma_zero_sum)( | 293 | struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)( |
| 250 | struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt, | 294 | struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt, |
| 251 | size_t len, u32 *result, unsigned long flags); | 295 | size_t len, enum sum_check_flags *result, unsigned long flags); |
| 296 | struct dma_async_tx_descriptor *(*device_prep_dma_pq)( | ||
| 297 | struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, | ||
| 298 | unsigned int src_cnt, const unsigned char *scf, | ||
| 299 | size_t len, unsigned long flags); | ||
| 300 | struct dma_async_tx_descriptor *(*device_prep_dma_pq_val)( | ||
| 301 | struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, | ||
| 302 | unsigned int src_cnt, const unsigned char *scf, size_t len, | ||
| 303 | enum sum_check_flags *pqres, unsigned long flags); | ||
| 252 | struct dma_async_tx_descriptor *(*device_prep_dma_memset)( | 304 | struct dma_async_tx_descriptor *(*device_prep_dma_memset)( |
| 253 | struct dma_chan *chan, dma_addr_t dest, int value, size_t len, | 305 | struct dma_chan *chan, dma_addr_t dest, int value, size_t len, |
| 254 | unsigned long flags); | 306 | unsigned long flags); |
| @@ -267,6 +319,96 @@ struct dma_device { | |||
| 267 | void (*device_issue_pending)(struct dma_chan *chan); | 319 | void (*device_issue_pending)(struct dma_chan *chan); |
| 268 | }; | 320 | }; |
| 269 | 321 | ||
| 322 | static inline bool dmaengine_check_align(u8 align, size_t off1, size_t off2, size_t len) | ||
| 323 | { | ||
| 324 | size_t mask; | ||
| 325 | |||
| 326 | if (!align) | ||
| 327 | return true; | ||
| 328 | mask = (1 << align) - 1; | ||
| 329 | if (mask & (off1 | off2 | len)) | ||
| 330 | return false; | ||
| 331 | return true; | ||
| 332 | } | ||
| 333 | |||
| 334 | static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1, | ||
| 335 | size_t off2, size_t len) | ||
| 336 | { | ||
| 337 | return dmaengine_check_align(dev->copy_align, off1, off2, len); | ||
| 338 | } | ||
| 339 | |||
| 340 | static inline bool is_dma_xor_aligned(struct dma_device *dev, size_t off1, | ||
| 341 | size_t off2, size_t len) | ||
| 342 | { | ||
| 343 | return dmaengine_check_align(dev->xor_align, off1, off2, len); | ||
| 344 | } | ||
| 345 | |||
| 346 | static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1, | ||
| 347 | size_t off2, size_t len) | ||
| 348 | { | ||
| 349 | return dmaengine_check_align(dev->pq_align, off1, off2, len); | ||
| 350 | } | ||
| 351 | |||
| 352 | static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1, | ||
| 353 | size_t off2, size_t len) | ||
| 354 | { | ||
| 355 | return dmaengine_check_align(dev->fill_align, off1, off2, len); | ||
| 356 | } | ||
| 357 | |||
| 358 | static inline void | ||
| 359 | dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue) | ||
| 360 | { | ||
| 361 | dma->max_pq = maxpq; | ||
| 362 | if (has_pq_continue) | ||
| 363 | dma->max_pq |= DMA_HAS_PQ_CONTINUE; | ||
| 364 | } | ||
| 365 | |||
| 366 | static inline bool dmaf_continue(enum dma_ctrl_flags flags) | ||
| 367 | { | ||
| 368 | return (flags & DMA_PREP_CONTINUE) == DMA_PREP_CONTINUE; | ||
| 369 | } | ||
| 370 | |||
| 371 | static inline bool dmaf_p_disabled_continue(enum dma_ctrl_flags flags) | ||
| 372 | { | ||
| 373 | enum dma_ctrl_flags mask = DMA_PREP_CONTINUE | DMA_PREP_PQ_DISABLE_P; | ||
| 374 | |||
| 375 | return (flags & mask) == mask; | ||
| 376 | } | ||
| 377 | |||
| 378 | static inline bool dma_dev_has_pq_continue(struct dma_device *dma) | ||
| 379 | { | ||
| 380 | return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE; | ||
| 381 | } | ||
| 382 | |||
| 383 | static unsigned short dma_dev_to_maxpq(struct dma_device *dma) | ||
| 384 | { | ||
| 385 | return dma->max_pq & ~DMA_HAS_PQ_CONTINUE; | ||
| 386 | } | ||
| 387 | |||
| 388 | /* dma_maxpq - reduce maxpq in the face of continued operations | ||
| 389 | * @dma - dma device with PQ capability | ||
| 390 | * @flags - to check if DMA_PREP_CONTINUE and DMA_PREP_PQ_DISABLE_P are set | ||
| 391 | * | ||
| 392 | * When an engine does not support native continuation we need 3 extra | ||
| 393 | * source slots to reuse P and Q with the following coefficients: | ||
| 394 | * 1/ {00} * P : remove P from Q', but use it as a source for P' | ||
| 395 | * 2/ {01} * Q : use Q to continue Q' calculation | ||
| 396 | * 3/ {00} * Q : subtract Q from P' to cancel (2) | ||
| 397 | * | ||
| 398 | * In the case where P is disabled we only need 1 extra source: | ||
| 399 | * 1/ {01} * Q : use Q to continue Q' calculation | ||
| 400 | */ | ||
| 401 | static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags) | ||
| 402 | { | ||
| 403 | if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags)) | ||
| 404 | return dma_dev_to_maxpq(dma); | ||
| 405 | else if (dmaf_p_disabled_continue(flags)) | ||
| 406 | return dma_dev_to_maxpq(dma) - 1; | ||
| 407 | else if (dmaf_continue(flags)) | ||
| 408 | return dma_dev_to_maxpq(dma) - 3; | ||
| 409 | BUG(); | ||
| 410 | } | ||
| 411 | |||
| 270 | /* --- public DMA engine API --- */ | 412 | /* --- public DMA engine API --- */ |
| 271 | 413 | ||
| 272 | #ifdef CONFIG_DMA_ENGINE | 414 | #ifdef CONFIG_DMA_ENGINE |
| @@ -296,7 +438,11 @@ static inline void net_dmaengine_put(void) | |||
| 296 | #ifdef CONFIG_ASYNC_TX_DMA | 438 | #ifdef CONFIG_ASYNC_TX_DMA |
| 297 | #define async_dmaengine_get() dmaengine_get() | 439 | #define async_dmaengine_get() dmaengine_get() |
| 298 | #define async_dmaengine_put() dmaengine_put() | 440 | #define async_dmaengine_put() dmaengine_put() |
| 441 | #ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH | ||
| 442 | #define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX) | ||
| 443 | #else | ||
| 299 | #define async_dma_find_channel(type) dma_find_channel(type) | 444 | #define async_dma_find_channel(type) dma_find_channel(type) |
| 445 | #endif /* CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH */ | ||
| 300 | #else | 446 | #else |
| 301 | static inline void async_dmaengine_get(void) | 447 | static inline void async_dmaengine_get(void) |
| 302 | { | 448 | { |
| @@ -309,7 +455,7 @@ async_dma_find_channel(enum dma_transaction_type type) | |||
| 309 | { | 455 | { |
| 310 | return NULL; | 456 | return NULL; |
| 311 | } | 457 | } |
| 312 | #endif | 458 | #endif /* CONFIG_ASYNC_TX_DMA */ |
| 313 | 459 | ||
| 314 | dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, | 460 | dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, |
| 315 | void *dest, void *src, size_t len); | 461 | void *dest, void *src, size_t len); |
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index a3b000365795..bbeb13ceb8e8 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
| @@ -2515,6 +2515,16 @@ | |||
| 2515 | #define PCI_DEVICE_ID_INTEL_E7525_MCH 0x359e | 2515 | #define PCI_DEVICE_ID_INTEL_E7525_MCH 0x359e |
| 2516 | #define PCI_DEVICE_ID_INTEL_IOAT_CNB 0x360b | 2516 | #define PCI_DEVICE_ID_INTEL_IOAT_CNB 0x360b |
| 2517 | #define PCI_DEVICE_ID_INTEL_FBD_CNB 0x360c | 2517 | #define PCI_DEVICE_ID_INTEL_FBD_CNB 0x360c |
| 2518 | #define PCI_DEVICE_ID_INTEL_IOAT_JSF0 0x3710 | ||
| 2519 | #define PCI_DEVICE_ID_INTEL_IOAT_JSF1 0x3711 | ||
| 2520 | #define PCI_DEVICE_ID_INTEL_IOAT_JSF2 0x3712 | ||
| 2521 | #define PCI_DEVICE_ID_INTEL_IOAT_JSF3 0x3713 | ||
| 2522 | #define PCI_DEVICE_ID_INTEL_IOAT_JSF4 0x3714 | ||
| 2523 | #define PCI_DEVICE_ID_INTEL_IOAT_JSF5 0x3715 | ||
| 2524 | #define PCI_DEVICE_ID_INTEL_IOAT_JSF6 0x3716 | ||
| 2525 | #define PCI_DEVICE_ID_INTEL_IOAT_JSF7 0x3717 | ||
| 2526 | #define PCI_DEVICE_ID_INTEL_IOAT_JSF8 0x3718 | ||
| 2527 | #define PCI_DEVICE_ID_INTEL_IOAT_JSF9 0x3719 | ||
| 2518 | #define PCI_DEVICE_ID_INTEL_ICH10_0 0x3a14 | 2528 | #define PCI_DEVICE_ID_INTEL_ICH10_0 0x3a14 |
| 2519 | #define PCI_DEVICE_ID_INTEL_ICH10_1 0x3a16 | 2529 | #define PCI_DEVICE_ID_INTEL_ICH10_1 0x3a16 |
| 2520 | #define PCI_DEVICE_ID_INTEL_ICH10_2 0x3a18 | 2530 | #define PCI_DEVICE_ID_INTEL_ICH10_2 0x3a18 |
