aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2009-09-08 20:42:29 -0400
committerDan Williams <dan.j.williams@intel.com>2009-09-08 20:42:29 -0400
commitf9dd2134374c8de6b911e2b8652c6c9622eaa658 (patch)
treec1b8f8d622941606b9e7247ab31d811ba4295011 /include/linux
parent4b652f0db3be891c7b76b109c3b55003b920fc96 (diff)
parent07a3b417dc3d00802bd7b4874c3e811f0b015a7d (diff)
Merge branch 'md-raid6-accel' into ioat3.2
Conflicts: include/linux/dmaengine.h
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/async_tx.h126
-rw-r--r--include/linux/dmaengine.h116
2 files changed, 199 insertions, 43 deletions
diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h
index 5fc2ef8d97f..866e61c4e2e 100644
--- a/include/linux/async_tx.h
+++ b/include/linux/async_tx.h
@@ -58,25 +58,57 @@ struct dma_chan_ref {
58 * array. 58 * array.
59 * @ASYNC_TX_ACK: immediately ack the descriptor, precludes setting up a 59 * @ASYNC_TX_ACK: immediately ack the descriptor, precludes setting up a
60 * dependency chain 60 * dependency chain
61 * @ASYNC_TX_DEP_ACK: ack the dependency descriptor. Useful for chaining.
62 */ 61 */
63enum async_tx_flags { 62enum async_tx_flags {
64 ASYNC_TX_XOR_ZERO_DST = (1 << 0), 63 ASYNC_TX_XOR_ZERO_DST = (1 << 0),
65 ASYNC_TX_XOR_DROP_DST = (1 << 1), 64 ASYNC_TX_XOR_DROP_DST = (1 << 1),
66 ASYNC_TX_ACK = (1 << 3), 65 ASYNC_TX_ACK = (1 << 2),
67 ASYNC_TX_DEP_ACK = (1 << 4), 66};
67
68/**
69 * struct async_submit_ctl - async_tx submission/completion modifiers
70 * @flags: submission modifiers
71 * @depend_tx: parent dependency of the current operation being submitted
72 * @cb_fn: callback routine to run at operation completion
73 * @cb_param: parameter for the callback routine
74 * @scribble: caller provided space for dma/page address conversions
75 */
76struct async_submit_ctl {
77 enum async_tx_flags flags;
78 struct dma_async_tx_descriptor *depend_tx;
79 dma_async_tx_callback cb_fn;
80 void *cb_param;
81 void *scribble;
68}; 82};
69 83
70#ifdef CONFIG_DMA_ENGINE 84#ifdef CONFIG_DMA_ENGINE
71#define async_tx_issue_pending_all dma_issue_pending_all 85#define async_tx_issue_pending_all dma_issue_pending_all
86
87/**
88 * async_tx_issue_pending - send pending descriptor to the hardware channel
89 * @tx: descriptor handle to retrieve hardware context
90 *
91 * Note: any dependent operations will have already been issued by
92 * async_tx_channel_switch, or (in the case of no channel switch) will
93 * be already pending on this channel.
94 */
95static inline void async_tx_issue_pending(struct dma_async_tx_descriptor *tx)
96{
97 if (likely(tx)) {
98 struct dma_chan *chan = tx->chan;
99 struct dma_device *dma = chan->device;
100
101 dma->device_issue_pending(chan);
102 }
103}
72#ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL 104#ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL
73#include <asm/async_tx.h> 105#include <asm/async_tx.h>
74#else 106#else
75#define async_tx_find_channel(dep, type, dst, dst_count, src, src_count, len) \ 107#define async_tx_find_channel(dep, type, dst, dst_count, src, src_count, len) \
76 __async_tx_find_channel(dep, type) 108 __async_tx_find_channel(dep, type)
77struct dma_chan * 109struct dma_chan *
78__async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, 110__async_tx_find_channel(struct async_submit_ctl *submit,
79 enum dma_transaction_type tx_type); 111 enum dma_transaction_type tx_type);
80#endif /* CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL */ 112#endif /* CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL */
81#else 113#else
82static inline void async_tx_issue_pending_all(void) 114static inline void async_tx_issue_pending_all(void)
@@ -84,10 +116,16 @@ static inline void async_tx_issue_pending_all(void)
84 do { } while (0); 116 do { } while (0);
85} 117}
86 118
119static inline void async_tx_issue_pending(struct dma_async_tx_descriptor *tx)
120{
121 do { } while (0);
122}
123
87static inline struct dma_chan * 124static inline struct dma_chan *
88async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, 125async_tx_find_channel(struct async_submit_ctl *submit,
89 enum dma_transaction_type tx_type, struct page **dst, int dst_count, 126 enum dma_transaction_type tx_type, struct page **dst,
90 struct page **src, int src_count, size_t len) 127 int dst_count, struct page **src, int src_count,
128 size_t len)
91{ 129{
92 return NULL; 130 return NULL;
93} 131}
@@ -99,46 +137,70 @@ async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
99 * @cb_fn_param: parameter to pass to the callback routine 137 * @cb_fn_param: parameter to pass to the callback routine
100 */ 138 */
101static inline void 139static inline void
102async_tx_sync_epilog(dma_async_tx_callback cb_fn, void *cb_fn_param) 140async_tx_sync_epilog(struct async_submit_ctl *submit)
103{ 141{
104 if (cb_fn) 142 if (submit->cb_fn)
105 cb_fn(cb_fn_param); 143 submit->cb_fn(submit->cb_param);
106} 144}
107 145
108void 146typedef union {
109async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, 147 unsigned long addr;
110 enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx, 148 struct page *page;
111 dma_async_tx_callback cb_fn, void *cb_fn_param); 149 dma_addr_t dma;
150} addr_conv_t;
151
152static inline void
153init_async_submit(struct async_submit_ctl *args, enum async_tx_flags flags,
154 struct dma_async_tx_descriptor *tx,
155 dma_async_tx_callback cb_fn, void *cb_param,
156 addr_conv_t *scribble)
157{
158 args->flags = flags;
159 args->depend_tx = tx;
160 args->cb_fn = cb_fn;
161 args->cb_param = cb_param;
162 args->scribble = scribble;
163}
164
165void async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
166 struct async_submit_ctl *submit);
112 167
113struct dma_async_tx_descriptor * 168struct dma_async_tx_descriptor *
114async_xor(struct page *dest, struct page **src_list, unsigned int offset, 169async_xor(struct page *dest, struct page **src_list, unsigned int offset,
115 int src_cnt, size_t len, enum async_tx_flags flags, 170 int src_cnt, size_t len, struct async_submit_ctl *submit);
116 struct dma_async_tx_descriptor *depend_tx,
117 dma_async_tx_callback cb_fn, void *cb_fn_param);
118 171
119struct dma_async_tx_descriptor * 172struct dma_async_tx_descriptor *
120async_xor_zero_sum(struct page *dest, struct page **src_list, 173async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
121 unsigned int offset, int src_cnt, size_t len, 174 int src_cnt, size_t len, enum sum_check_flags *result,
122 u32 *result, enum async_tx_flags flags, 175 struct async_submit_ctl *submit);
123 struct dma_async_tx_descriptor *depend_tx,
124 dma_async_tx_callback cb_fn, void *cb_fn_param);
125 176
126struct dma_async_tx_descriptor * 177struct dma_async_tx_descriptor *
127async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, 178async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
128 unsigned int src_offset, size_t len, enum async_tx_flags flags, 179 unsigned int src_offset, size_t len,
129 struct dma_async_tx_descriptor *depend_tx, 180 struct async_submit_ctl *submit);
130 dma_async_tx_callback cb_fn, void *cb_fn_param);
131 181
132struct dma_async_tx_descriptor * 182struct dma_async_tx_descriptor *
133async_memset(struct page *dest, int val, unsigned int offset, 183async_memset(struct page *dest, int val, unsigned int offset,
134 size_t len, enum async_tx_flags flags, 184 size_t len, struct async_submit_ctl *submit);
135 struct dma_async_tx_descriptor *depend_tx, 185
136 dma_async_tx_callback cb_fn, void *cb_fn_param); 186struct dma_async_tx_descriptor *async_trigger_callback(struct async_submit_ctl *submit);
187
188struct dma_async_tx_descriptor *
189async_gen_syndrome(struct page **blocks, unsigned int offset, int src_cnt,
190 size_t len, struct async_submit_ctl *submit);
191
192struct dma_async_tx_descriptor *
193async_syndrome_val(struct page **blocks, unsigned int offset, int src_cnt,
194 size_t len, enum sum_check_flags *pqres, struct page *spare,
195 struct async_submit_ctl *submit);
196
197struct dma_async_tx_descriptor *
198async_raid6_2data_recov(int src_num, size_t bytes, int faila, int failb,
199 struct page **ptrs, struct async_submit_ctl *submit);
137 200
138struct dma_async_tx_descriptor * 201struct dma_async_tx_descriptor *
139async_trigger_callback(enum async_tx_flags flags, 202async_raid6_datap_recov(int src_num, size_t bytes, int faila,
140 struct dma_async_tx_descriptor *depend_tx, 203 struct page **ptrs, struct async_submit_ctl *submit);
141 dma_async_tx_callback cb_fn, void *cb_fn_param);
142 204
143void async_tx_quiesce(struct dma_async_tx_descriptor **tx); 205void async_tx_quiesce(struct dma_async_tx_descriptor **tx);
144#endif /* _ASYNC_TX_H_ */ 206#endif /* _ASYNC_TX_H_ */
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index ffefba81c81..1012f1abcb5 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -52,11 +52,11 @@ enum dma_status {
52enum dma_transaction_type { 52enum dma_transaction_type {
53 DMA_MEMCPY, 53 DMA_MEMCPY,
54 DMA_XOR, 54 DMA_XOR,
55 DMA_PQ_XOR, 55 DMA_PQ,
56 DMA_DUAL_XOR, 56 DMA_DUAL_XOR,
57 DMA_PQ_UPDATE, 57 DMA_PQ_UPDATE,
58 DMA_ZERO_SUM, 58 DMA_XOR_VAL,
59 DMA_PQ_ZERO_SUM, 59 DMA_PQ_VAL,
60 DMA_MEMSET, 60 DMA_MEMSET,
61 DMA_MEMCPY_CRC32C, 61 DMA_MEMCPY_CRC32C,
62 DMA_INTERRUPT, 62 DMA_INTERRUPT,
@@ -70,18 +70,23 @@ enum dma_transaction_type {
70 70
71/** 71/**
72 * enum dma_ctrl_flags - DMA flags to augment operation preparation, 72 * enum dma_ctrl_flags - DMA flags to augment operation preparation,
73 * control completion, and communicate status. 73 * control completion, and communicate status.
74 * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of 74 * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of
75 * this transaction 75 * this transaction
76 * @DMA_CTRL_ACK - the descriptor cannot be reused until the client 76 * @DMA_CTRL_ACK - the descriptor cannot be reused until the client
77 * acknowledges receipt, i.e. has has a chance to establish any 77 * acknowledges receipt, i.e. has has a chance to establish any dependency
78 * dependency chains 78 * chains
79 * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s) 79 * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s)
80 * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s) 80 * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s)
81 * @DMA_COMPL_SRC_UNMAP_SINGLE - set to do the source dma-unmapping as single 81 * @DMA_COMPL_SRC_UNMAP_SINGLE - set to do the source dma-unmapping as single
82 * (if not set, do the source dma-unmapping as page) 82 * (if not set, do the source dma-unmapping as page)
83 * @DMA_COMPL_DEST_UNMAP_SINGLE - set to do the destination dma-unmapping as single 83 * @DMA_COMPL_DEST_UNMAP_SINGLE - set to do the destination dma-unmapping as single
84 * (if not set, do the destination dma-unmapping as page) 84 * (if not set, do the destination dma-unmapping as page)
85 * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q
86 * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P
87 * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as
88 * sources that were the result of a previous operation, in the case of a PQ
89 * operation it continues the calculation with new sources
85 */ 90 */
86enum dma_ctrl_flags { 91enum dma_ctrl_flags {
87 DMA_PREP_INTERRUPT = (1 << 0), 92 DMA_PREP_INTERRUPT = (1 << 0),
@@ -90,9 +95,31 @@ enum dma_ctrl_flags {
90 DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3), 95 DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3),
91 DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4), 96 DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4),
92 DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5), 97 DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5),
98 DMA_PREP_PQ_DISABLE_P = (1 << 6),
99 DMA_PREP_PQ_DISABLE_Q = (1 << 7),
100 DMA_PREP_CONTINUE = (1 << 8),
93}; 101};
94 102
95/** 103/**
104 * enum sum_check_bits - bit position of pq_check_flags
105 */
106enum sum_check_bits {
107 SUM_CHECK_P = 0,
108 SUM_CHECK_Q = 1,
109};
110
111/**
112 * enum pq_check_flags - result of async_{xor,pq}_zero_sum operations
113 * @SUM_CHECK_P_RESULT - 1 if xor zero sum error, 0 otherwise
114 * @SUM_CHECK_Q_RESULT - 1 if reed-solomon zero sum error, 0 otherwise
115 */
116enum sum_check_flags {
117 SUM_CHECK_P_RESULT = (1 << SUM_CHECK_P),
118 SUM_CHECK_Q_RESULT = (1 << SUM_CHECK_Q),
119};
120
121
122/**
96 * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t. 123 * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t.
97 * See linux/cpumask.h 124 * See linux/cpumask.h
98 */ 125 */
@@ -213,6 +240,7 @@ struct dma_async_tx_descriptor {
213 * @global_node: list_head for global dma_device_list 240 * @global_node: list_head for global dma_device_list
214 * @cap_mask: one or more dma_capability flags 241 * @cap_mask: one or more dma_capability flags
215 * @max_xor: maximum number of xor sources, 0 if no capability 242 * @max_xor: maximum number of xor sources, 0 if no capability
243 * @max_pq: maximum number of PQ sources and PQ-continue capability
216 * @dev_id: unique device ID 244 * @dev_id: unique device ID
217 * @dev: struct device reference for dma mapping api 245 * @dev: struct device reference for dma mapping api
218 * @device_alloc_chan_resources: allocate resources and return the 246 * @device_alloc_chan_resources: allocate resources and return the
@@ -220,7 +248,9 @@ struct dma_async_tx_descriptor {
220 * @device_free_chan_resources: release DMA channel's resources 248 * @device_free_chan_resources: release DMA channel's resources
221 * @device_prep_dma_memcpy: prepares a memcpy operation 249 * @device_prep_dma_memcpy: prepares a memcpy operation
222 * @device_prep_dma_xor: prepares a xor operation 250 * @device_prep_dma_xor: prepares a xor operation
223 * @device_prep_dma_zero_sum: prepares a zero_sum operation 251 * @device_prep_dma_xor_val: prepares a xor validation operation
252 * @device_prep_dma_pq: prepares a pq operation
253 * @device_prep_dma_pq_val: prepares a pqzero_sum operation
224 * @device_prep_dma_memset: prepares a memset operation 254 * @device_prep_dma_memset: prepares a memset operation
225 * @device_prep_dma_interrupt: prepares an end of chain interrupt operation 255 * @device_prep_dma_interrupt: prepares an end of chain interrupt operation
226 * @device_prep_slave_sg: prepares a slave dma operation 256 * @device_prep_slave_sg: prepares a slave dma operation
@@ -235,7 +265,9 @@ struct dma_device {
235 struct list_head channels; 265 struct list_head channels;
236 struct list_head global_node; 266 struct list_head global_node;
237 dma_cap_mask_t cap_mask; 267 dma_cap_mask_t cap_mask;
238 int max_xor; 268 unsigned short max_xor;
269 unsigned short max_pq;
270 #define DMA_HAS_PQ_CONTINUE (1 << 15)
239 271
240 int dev_id; 272 int dev_id;
241 struct device *dev; 273 struct device *dev;
@@ -249,9 +281,17 @@ struct dma_device {
249 struct dma_async_tx_descriptor *(*device_prep_dma_xor)( 281 struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
250 struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, 282 struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
251 unsigned int src_cnt, size_t len, unsigned long flags); 283 unsigned int src_cnt, size_t len, unsigned long flags);
252 struct dma_async_tx_descriptor *(*device_prep_dma_zero_sum)( 284 struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)(
253 struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt, 285 struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt,
254 size_t len, u32 *result, unsigned long flags); 286 size_t len, enum sum_check_flags *result, unsigned long flags);
287 struct dma_async_tx_descriptor *(*device_prep_dma_pq)(
288 struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
289 unsigned int src_cnt, const unsigned char *scf,
290 size_t len, unsigned long flags);
291 struct dma_async_tx_descriptor *(*device_prep_dma_pq_val)(
292 struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
293 unsigned int src_cnt, const unsigned char *scf, size_t len,
294 enum sum_check_flags *pqres, unsigned long flags);
255 struct dma_async_tx_descriptor *(*device_prep_dma_memset)( 295 struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
256 struct dma_chan *chan, dma_addr_t dest, int value, size_t len, 296 struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
257 unsigned long flags); 297 unsigned long flags);
@@ -270,6 +310,60 @@ struct dma_device {
270 void (*device_issue_pending)(struct dma_chan *chan); 310 void (*device_issue_pending)(struct dma_chan *chan);
271}; 311};
272 312
313static inline void
314dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue)
315{
316 dma->max_pq = maxpq;
317 if (has_pq_continue)
318 dma->max_pq |= DMA_HAS_PQ_CONTINUE;
319}
320
321static inline bool dmaf_continue(enum dma_ctrl_flags flags)
322{
323 return (flags & DMA_PREP_CONTINUE) == DMA_PREP_CONTINUE;
324}
325
326static inline bool dmaf_p_disabled_continue(enum dma_ctrl_flags flags)
327{
328 enum dma_ctrl_flags mask = DMA_PREP_CONTINUE | DMA_PREP_PQ_DISABLE_P;
329
330 return (flags & mask) == mask;
331}
332
333static inline bool dma_dev_has_pq_continue(struct dma_device *dma)
334{
335 return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE;
336}
337
338static unsigned short dma_dev_to_maxpq(struct dma_device *dma)
339{
340 return dma->max_pq & ~DMA_HAS_PQ_CONTINUE;
341}
342
343/* dma_maxpq - reduce maxpq in the face of continued operations
344 * @dma - dma device with PQ capability
345 * @flags - to check if DMA_PREP_CONTINUE and DMA_PREP_PQ_DISABLE_P are set
346 *
347 * When an engine does not support native continuation we need 3 extra
348 * source slots to reuse P and Q with the following coefficients:
349 * 1/ {00} * P : remove P from Q', but use it as a source for P'
350 * 2/ {01} * Q : use Q to continue Q' calculation
351 * 3/ {00} * Q : subtract Q from P' to cancel (2)
352 *
353 * In the case where P is disabled we only need 1 extra source:
354 * 1/ {01} * Q : use Q to continue Q' calculation
355 */
356static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags)
357{
358 if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags))
359 return dma_dev_to_maxpq(dma);
360 else if (dmaf_p_disabled_continue(flags))
361 return dma_dev_to_maxpq(dma) - 1;
362 else if (dmaf_continue(flags))
363 return dma_dev_to_maxpq(dma) - 3;
364 BUG();
365}
366
273/* --- public DMA engine API --- */ 367/* --- public DMA engine API --- */
274 368
275#ifdef CONFIG_DMA_ENGINE 369#ifdef CONFIG_DMA_ENGINE