aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2009-09-23 04:31:11 -0400
committerNeilBrown <neilb@suse.de>2009-09-23 04:31:11 -0400
commit4b3df5668c8ebaebd8d66a5a94374be3e3b2ef0c (patch)
tree51a231742e211143f5845edf4b09d1712dcd2771 /include/linux
parent1ef04fefe2241087d9db7e9615c3f11b516e36cf (diff)
parent1f6672d44c1ae7408b43c06170ec34eb0a0e9b9f (diff)
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx into for-linus
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/async_tx.h129
-rw-r--r--include/linux/dca.h11
-rw-r--r--include/linux/dmaengine.h179
-rw-r--r--include/linux/pci_ids.h10
4 files changed, 278 insertions, 51 deletions
diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h
index 5fc2ef8d97fa..a1c486a88e88 100644
--- a/include/linux/async_tx.h
+++ b/include/linux/async_tx.h
@@ -58,25 +58,60 @@ struct dma_chan_ref {
58 * array. 58 * array.
59 * @ASYNC_TX_ACK: immediately ack the descriptor, precludes setting up a 59 * @ASYNC_TX_ACK: immediately ack the descriptor, precludes setting up a
60 * dependency chain 60 * dependency chain
61 * @ASYNC_TX_DEP_ACK: ack the dependency descriptor. Useful for chaining. 61 * @ASYNC_TX_FENCE: specify that the next operation in the dependency
62 * chain uses this operation's result as an input
62 */ 63 */
63enum async_tx_flags { 64enum async_tx_flags {
64 ASYNC_TX_XOR_ZERO_DST = (1 << 0), 65 ASYNC_TX_XOR_ZERO_DST = (1 << 0),
65 ASYNC_TX_XOR_DROP_DST = (1 << 1), 66 ASYNC_TX_XOR_DROP_DST = (1 << 1),
66 ASYNC_TX_ACK = (1 << 3), 67 ASYNC_TX_ACK = (1 << 2),
67 ASYNC_TX_DEP_ACK = (1 << 4), 68 ASYNC_TX_FENCE = (1 << 3),
69};
70
71/**
72 * struct async_submit_ctl - async_tx submission/completion modifiers
73 * @flags: submission modifiers
74 * @depend_tx: parent dependency of the current operation being submitted
75 * @cb_fn: callback routine to run at operation completion
76 * @cb_param: parameter for the callback routine
77 * @scribble: caller provided space for dma/page address conversions
78 */
79struct async_submit_ctl {
80 enum async_tx_flags flags;
81 struct dma_async_tx_descriptor *depend_tx;
82 dma_async_tx_callback cb_fn;
83 void *cb_param;
84 void *scribble;
68}; 85};
69 86
70#ifdef CONFIG_DMA_ENGINE 87#ifdef CONFIG_DMA_ENGINE
71#define async_tx_issue_pending_all dma_issue_pending_all 88#define async_tx_issue_pending_all dma_issue_pending_all
89
90/**
91 * async_tx_issue_pending - send pending descriptor to the hardware channel
92 * @tx: descriptor handle to retrieve hardware context
93 *
94 * Note: any dependent operations will have already been issued by
95 * async_tx_channel_switch, or (in the case of no channel switch) will
96 * be already pending on this channel.
97 */
98static inline void async_tx_issue_pending(struct dma_async_tx_descriptor *tx)
99{
100 if (likely(tx)) {
101 struct dma_chan *chan = tx->chan;
102 struct dma_device *dma = chan->device;
103
104 dma->device_issue_pending(chan);
105 }
106}
72#ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL 107#ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL
73#include <asm/async_tx.h> 108#include <asm/async_tx.h>
74#else 109#else
75#define async_tx_find_channel(dep, type, dst, dst_count, src, src_count, len) \ 110#define async_tx_find_channel(dep, type, dst, dst_count, src, src_count, len) \
76 __async_tx_find_channel(dep, type) 111 __async_tx_find_channel(dep, type)
77struct dma_chan * 112struct dma_chan *
78__async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, 113__async_tx_find_channel(struct async_submit_ctl *submit,
79 enum dma_transaction_type tx_type); 114 enum dma_transaction_type tx_type);
80#endif /* CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL */ 115#endif /* CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL */
81#else 116#else
82static inline void async_tx_issue_pending_all(void) 117static inline void async_tx_issue_pending_all(void)
@@ -84,10 +119,16 @@ static inline void async_tx_issue_pending_all(void)
84 do { } while (0); 119 do { } while (0);
85} 120}
86 121
122static inline void async_tx_issue_pending(struct dma_async_tx_descriptor *tx)
123{
124 do { } while (0);
125}
126
87static inline struct dma_chan * 127static inline struct dma_chan *
88async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, 128async_tx_find_channel(struct async_submit_ctl *submit,
89 enum dma_transaction_type tx_type, struct page **dst, int dst_count, 129 enum dma_transaction_type tx_type, struct page **dst,
90 struct page **src, int src_count, size_t len) 130 int dst_count, struct page **src, int src_count,
131 size_t len)
91{ 132{
92 return NULL; 133 return NULL;
93} 134}
@@ -99,46 +140,70 @@ async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
99 * @cb_fn_param: parameter to pass to the callback routine 140 * @cb_fn_param: parameter to pass to the callback routine
100 */ 141 */
101static inline void 142static inline void
102async_tx_sync_epilog(dma_async_tx_callback cb_fn, void *cb_fn_param) 143async_tx_sync_epilog(struct async_submit_ctl *submit)
103{ 144{
104 if (cb_fn) 145 if (submit->cb_fn)
105 cb_fn(cb_fn_param); 146 submit->cb_fn(submit->cb_param);
106} 147}
107 148
108void 149typedef union {
109async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, 150 unsigned long addr;
110 enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx, 151 struct page *page;
111 dma_async_tx_callback cb_fn, void *cb_fn_param); 152 dma_addr_t dma;
153} addr_conv_t;
154
155static inline void
156init_async_submit(struct async_submit_ctl *args, enum async_tx_flags flags,
157 struct dma_async_tx_descriptor *tx,
158 dma_async_tx_callback cb_fn, void *cb_param,
159 addr_conv_t *scribble)
160{
161 args->flags = flags;
162 args->depend_tx = tx;
163 args->cb_fn = cb_fn;
164 args->cb_param = cb_param;
165 args->scribble = scribble;
166}
167
168void async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
169 struct async_submit_ctl *submit);
112 170
113struct dma_async_tx_descriptor * 171struct dma_async_tx_descriptor *
114async_xor(struct page *dest, struct page **src_list, unsigned int offset, 172async_xor(struct page *dest, struct page **src_list, unsigned int offset,
115 int src_cnt, size_t len, enum async_tx_flags flags, 173 int src_cnt, size_t len, struct async_submit_ctl *submit);
116 struct dma_async_tx_descriptor *depend_tx,
117 dma_async_tx_callback cb_fn, void *cb_fn_param);
118 174
119struct dma_async_tx_descriptor * 175struct dma_async_tx_descriptor *
120async_xor_zero_sum(struct page *dest, struct page **src_list, 176async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
121 unsigned int offset, int src_cnt, size_t len, 177 int src_cnt, size_t len, enum sum_check_flags *result,
122 u32 *result, enum async_tx_flags flags, 178 struct async_submit_ctl *submit);
123 struct dma_async_tx_descriptor *depend_tx,
124 dma_async_tx_callback cb_fn, void *cb_fn_param);
125 179
126struct dma_async_tx_descriptor * 180struct dma_async_tx_descriptor *
127async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, 181async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
128 unsigned int src_offset, size_t len, enum async_tx_flags flags, 182 unsigned int src_offset, size_t len,
129 struct dma_async_tx_descriptor *depend_tx, 183 struct async_submit_ctl *submit);
130 dma_async_tx_callback cb_fn, void *cb_fn_param);
131 184
132struct dma_async_tx_descriptor * 185struct dma_async_tx_descriptor *
133async_memset(struct page *dest, int val, unsigned int offset, 186async_memset(struct page *dest, int val, unsigned int offset,
134 size_t len, enum async_tx_flags flags, 187 size_t len, struct async_submit_ctl *submit);
135 struct dma_async_tx_descriptor *depend_tx, 188
136 dma_async_tx_callback cb_fn, void *cb_fn_param); 189struct dma_async_tx_descriptor *async_trigger_callback(struct async_submit_ctl *submit);
190
191struct dma_async_tx_descriptor *
192async_gen_syndrome(struct page **blocks, unsigned int offset, int src_cnt,
193 size_t len, struct async_submit_ctl *submit);
194
195struct dma_async_tx_descriptor *
196async_syndrome_val(struct page **blocks, unsigned int offset, int src_cnt,
197 size_t len, enum sum_check_flags *pqres, struct page *spare,
198 struct async_submit_ctl *submit);
199
200struct dma_async_tx_descriptor *
201async_raid6_2data_recov(int src_num, size_t bytes, int faila, int failb,
202 struct page **ptrs, struct async_submit_ctl *submit);
137 203
138struct dma_async_tx_descriptor * 204struct dma_async_tx_descriptor *
139async_trigger_callback(enum async_tx_flags flags, 205async_raid6_datap_recov(int src_num, size_t bytes, int faila,
140 struct dma_async_tx_descriptor *depend_tx, 206 struct page **ptrs, struct async_submit_ctl *submit);
141 dma_async_tx_callback cb_fn, void *cb_fn_param);
142 207
143void async_tx_quiesce(struct dma_async_tx_descriptor **tx); 208void async_tx_quiesce(struct dma_async_tx_descriptor **tx);
144#endif /* _ASYNC_TX_H_ */ 209#endif /* _ASYNC_TX_H_ */
diff --git a/include/linux/dca.h b/include/linux/dca.h
index 9c20c7e87d0a..d27a7a05718d 100644
--- a/include/linux/dca.h
+++ b/include/linux/dca.h
@@ -20,6 +20,9 @@
20 */ 20 */
21#ifndef DCA_H 21#ifndef DCA_H
22#define DCA_H 22#define DCA_H
23
24#include <linux/pci.h>
25
23/* DCA Provider API */ 26/* DCA Provider API */
24 27
25/* DCA Notifier Interface */ 28/* DCA Notifier Interface */
@@ -36,6 +39,12 @@ struct dca_provider {
36 int id; 39 int id;
37}; 40};
38 41
42struct dca_domain {
43 struct list_head node;
44 struct list_head dca_providers;
45 struct pci_bus *pci_rc;
46};
47
39struct dca_ops { 48struct dca_ops {
40 int (*add_requester) (struct dca_provider *, struct device *); 49 int (*add_requester) (struct dca_provider *, struct device *);
41 int (*remove_requester) (struct dca_provider *, struct device *); 50 int (*remove_requester) (struct dca_provider *, struct device *);
@@ -47,7 +56,7 @@ struct dca_ops {
47struct dca_provider *alloc_dca_provider(struct dca_ops *ops, int priv_size); 56struct dca_provider *alloc_dca_provider(struct dca_ops *ops, int priv_size);
48void free_dca_provider(struct dca_provider *dca); 57void free_dca_provider(struct dca_provider *dca);
49int register_dca_provider(struct dca_provider *dca, struct device *dev); 58int register_dca_provider(struct dca_provider *dca, struct device *dev);
50void unregister_dca_provider(struct dca_provider *dca); 59void unregister_dca_provider(struct dca_provider *dca, struct device *dev);
51 60
52static inline void *dca_priv(struct dca_provider *dca) 61static inline void *dca_priv(struct dca_provider *dca)
53{ 62{
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index ffefba81c818..2b9f2ac7ed60 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -48,19 +48,20 @@ enum dma_status {
48 48
49/** 49/**
50 * enum dma_transaction_type - DMA transaction types/indexes 50 * enum dma_transaction_type - DMA transaction types/indexes
51 *
52 * Note: The DMA_ASYNC_TX capability is not to be set by drivers. It is
53 * automatically set as dma devices are registered.
51 */ 54 */
52enum dma_transaction_type { 55enum dma_transaction_type {
53 DMA_MEMCPY, 56 DMA_MEMCPY,
54 DMA_XOR, 57 DMA_XOR,
55 DMA_PQ_XOR, 58 DMA_PQ,
56 DMA_DUAL_XOR, 59 DMA_XOR_VAL,
57 DMA_PQ_UPDATE, 60 DMA_PQ_VAL,
58 DMA_ZERO_SUM,
59 DMA_PQ_ZERO_SUM,
60 DMA_MEMSET, 61 DMA_MEMSET,
61 DMA_MEMCPY_CRC32C,
62 DMA_INTERRUPT, 62 DMA_INTERRUPT,
63 DMA_PRIVATE, 63 DMA_PRIVATE,
64 DMA_ASYNC_TX,
64 DMA_SLAVE, 65 DMA_SLAVE,
65}; 66};
66 67
@@ -70,18 +71,25 @@ enum dma_transaction_type {
70 71
71/** 72/**
72 * enum dma_ctrl_flags - DMA flags to augment operation preparation, 73 * enum dma_ctrl_flags - DMA flags to augment operation preparation,
73 * control completion, and communicate status. 74 * control completion, and communicate status.
74 * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of 75 * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of
75 * this transaction 76 * this transaction
76 * @DMA_CTRL_ACK - the descriptor cannot be reused until the client 77 * @DMA_CTRL_ACK - the descriptor cannot be reused until the client
77 * acknowledges receipt, i.e. has has a chance to establish any 78 * acknowledges receipt, i.e. has has a chance to establish any dependency
78 * dependency chains 79 * chains
79 * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s) 80 * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s)
80 * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s) 81 * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s)
81 * @DMA_COMPL_SRC_UNMAP_SINGLE - set to do the source dma-unmapping as single 82 * @DMA_COMPL_SRC_UNMAP_SINGLE - set to do the source dma-unmapping as single
82 * (if not set, do the source dma-unmapping as page) 83 * (if not set, do the source dma-unmapping as page)
83 * @DMA_COMPL_DEST_UNMAP_SINGLE - set to do the destination dma-unmapping as single 84 * @DMA_COMPL_DEST_UNMAP_SINGLE - set to do the destination dma-unmapping as single
84 * (if not set, do the destination dma-unmapping as page) 85 * (if not set, do the destination dma-unmapping as page)
86 * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q
87 * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P
88 * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as
89 * sources that were the result of a previous operation, in the case of a PQ
90 * operation it continues the calculation with new sources
91 * @DMA_PREP_FENCE - tell the driver that subsequent operations depend
92 * on the result of this operation
85 */ 93 */
86enum dma_ctrl_flags { 94enum dma_ctrl_flags {
87 DMA_PREP_INTERRUPT = (1 << 0), 95 DMA_PREP_INTERRUPT = (1 << 0),
@@ -90,9 +98,32 @@ enum dma_ctrl_flags {
90 DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3), 98 DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3),
91 DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4), 99 DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4),
92 DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5), 100 DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5),
101 DMA_PREP_PQ_DISABLE_P = (1 << 6),
102 DMA_PREP_PQ_DISABLE_Q = (1 << 7),
103 DMA_PREP_CONTINUE = (1 << 8),
104 DMA_PREP_FENCE = (1 << 9),
93}; 105};
94 106
95/** 107/**
108 * enum sum_check_bits - bit position of pq_check_flags
109 */
110enum sum_check_bits {
111 SUM_CHECK_P = 0,
112 SUM_CHECK_Q = 1,
113};
114
115/**
116 * enum pq_check_flags - result of async_{xor,pq}_zero_sum operations
117 * @SUM_CHECK_P_RESULT - 1 if xor zero sum error, 0 otherwise
118 * @SUM_CHECK_Q_RESULT - 1 if reed-solomon zero sum error, 0 otherwise
119 */
120enum sum_check_flags {
121 SUM_CHECK_P_RESULT = (1 << SUM_CHECK_P),
122 SUM_CHECK_Q_RESULT = (1 << SUM_CHECK_Q),
123};
124
125
126/**
96 * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t. 127 * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t.
97 * See linux/cpumask.h 128 * See linux/cpumask.h
98 */ 129 */
@@ -180,8 +211,6 @@ typedef void (*dma_async_tx_callback)(void *dma_async_param);
180 * @flags: flags to augment operation preparation, control completion, and 211 * @flags: flags to augment operation preparation, control completion, and
181 * communicate status 212 * communicate status
182 * @phys: physical address of the descriptor 213 * @phys: physical address of the descriptor
183 * @tx_list: driver common field for operations that require multiple
184 * descriptors
185 * @chan: target channel for this operation 214 * @chan: target channel for this operation
186 * @tx_submit: set the prepared descriptor(s) to be executed by the engine 215 * @tx_submit: set the prepared descriptor(s) to be executed by the engine
187 * @callback: routine to call after this operation is complete 216 * @callback: routine to call after this operation is complete
@@ -195,7 +224,6 @@ struct dma_async_tx_descriptor {
195 dma_cookie_t cookie; 224 dma_cookie_t cookie;
196 enum dma_ctrl_flags flags; /* not a 'long' to pack with cookie */ 225 enum dma_ctrl_flags flags; /* not a 'long' to pack with cookie */
197 dma_addr_t phys; 226 dma_addr_t phys;
198 struct list_head tx_list;
199 struct dma_chan *chan; 227 struct dma_chan *chan;
200 dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); 228 dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
201 dma_async_tx_callback callback; 229 dma_async_tx_callback callback;
@@ -213,6 +241,11 @@ struct dma_async_tx_descriptor {
213 * @global_node: list_head for global dma_device_list 241 * @global_node: list_head for global dma_device_list
214 * @cap_mask: one or more dma_capability flags 242 * @cap_mask: one or more dma_capability flags
215 * @max_xor: maximum number of xor sources, 0 if no capability 243 * @max_xor: maximum number of xor sources, 0 if no capability
244 * @max_pq: maximum number of PQ sources and PQ-continue capability
245 * @copy_align: alignment shift for memcpy operations
246 * @xor_align: alignment shift for xor operations
247 * @pq_align: alignment shift for pq operations
248 * @fill_align: alignment shift for memset operations
216 * @dev_id: unique device ID 249 * @dev_id: unique device ID
217 * @dev: struct device reference for dma mapping api 250 * @dev: struct device reference for dma mapping api
218 * @device_alloc_chan_resources: allocate resources and return the 251 * @device_alloc_chan_resources: allocate resources and return the
@@ -220,7 +253,9 @@ struct dma_async_tx_descriptor {
220 * @device_free_chan_resources: release DMA channel's resources 253 * @device_free_chan_resources: release DMA channel's resources
221 * @device_prep_dma_memcpy: prepares a memcpy operation 254 * @device_prep_dma_memcpy: prepares a memcpy operation
222 * @device_prep_dma_xor: prepares a xor operation 255 * @device_prep_dma_xor: prepares a xor operation
223 * @device_prep_dma_zero_sum: prepares a zero_sum operation 256 * @device_prep_dma_xor_val: prepares a xor validation operation
257 * @device_prep_dma_pq: prepares a pq operation
258 * @device_prep_dma_pq_val: prepares a pqzero_sum operation
224 * @device_prep_dma_memset: prepares a memset operation 259 * @device_prep_dma_memset: prepares a memset operation
225 * @device_prep_dma_interrupt: prepares an end of chain interrupt operation 260 * @device_prep_dma_interrupt: prepares an end of chain interrupt operation
226 * @device_prep_slave_sg: prepares a slave dma operation 261 * @device_prep_slave_sg: prepares a slave dma operation
@@ -235,7 +270,13 @@ struct dma_device {
235 struct list_head channels; 270 struct list_head channels;
236 struct list_head global_node; 271 struct list_head global_node;
237 dma_cap_mask_t cap_mask; 272 dma_cap_mask_t cap_mask;
238 int max_xor; 273 unsigned short max_xor;
274 unsigned short max_pq;
275 u8 copy_align;
276 u8 xor_align;
277 u8 pq_align;
278 u8 fill_align;
279 #define DMA_HAS_PQ_CONTINUE (1 << 15)
239 280
240 int dev_id; 281 int dev_id;
241 struct device *dev; 282 struct device *dev;
@@ -249,9 +290,17 @@ struct dma_device {
249 struct dma_async_tx_descriptor *(*device_prep_dma_xor)( 290 struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
250 struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, 291 struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
251 unsigned int src_cnt, size_t len, unsigned long flags); 292 unsigned int src_cnt, size_t len, unsigned long flags);
252 struct dma_async_tx_descriptor *(*device_prep_dma_zero_sum)( 293 struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)(
253 struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt, 294 struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt,
254 size_t len, u32 *result, unsigned long flags); 295 size_t len, enum sum_check_flags *result, unsigned long flags);
296 struct dma_async_tx_descriptor *(*device_prep_dma_pq)(
297 struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
298 unsigned int src_cnt, const unsigned char *scf,
299 size_t len, unsigned long flags);
300 struct dma_async_tx_descriptor *(*device_prep_dma_pq_val)(
301 struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
302 unsigned int src_cnt, const unsigned char *scf, size_t len,
303 enum sum_check_flags *pqres, unsigned long flags);
255 struct dma_async_tx_descriptor *(*device_prep_dma_memset)( 304 struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
256 struct dma_chan *chan, dma_addr_t dest, int value, size_t len, 305 struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
257 unsigned long flags); 306 unsigned long flags);
@@ -270,6 +319,96 @@ struct dma_device {
270 void (*device_issue_pending)(struct dma_chan *chan); 319 void (*device_issue_pending)(struct dma_chan *chan);
271}; 320};
272 321
322static inline bool dmaengine_check_align(u8 align, size_t off1, size_t off2, size_t len)
323{
324 size_t mask;
325
326 if (!align)
327 return true;
328 mask = (1 << align) - 1;
329 if (mask & (off1 | off2 | len))
330 return false;
331 return true;
332}
333
334static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1,
335 size_t off2, size_t len)
336{
337 return dmaengine_check_align(dev->copy_align, off1, off2, len);
338}
339
340static inline bool is_dma_xor_aligned(struct dma_device *dev, size_t off1,
341 size_t off2, size_t len)
342{
343 return dmaengine_check_align(dev->xor_align, off1, off2, len);
344}
345
346static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1,
347 size_t off2, size_t len)
348{
349 return dmaengine_check_align(dev->pq_align, off1, off2, len);
350}
351
352static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1,
353 size_t off2, size_t len)
354{
355 return dmaengine_check_align(dev->fill_align, off1, off2, len);
356}
357
358static inline void
359dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue)
360{
361 dma->max_pq = maxpq;
362 if (has_pq_continue)
363 dma->max_pq |= DMA_HAS_PQ_CONTINUE;
364}
365
366static inline bool dmaf_continue(enum dma_ctrl_flags flags)
367{
368 return (flags & DMA_PREP_CONTINUE) == DMA_PREP_CONTINUE;
369}
370
371static inline bool dmaf_p_disabled_continue(enum dma_ctrl_flags flags)
372{
373 enum dma_ctrl_flags mask = DMA_PREP_CONTINUE | DMA_PREP_PQ_DISABLE_P;
374
375 return (flags & mask) == mask;
376}
377
378static inline bool dma_dev_has_pq_continue(struct dma_device *dma)
379{
380 return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE;
381}
382
383static unsigned short dma_dev_to_maxpq(struct dma_device *dma)
384{
385 return dma->max_pq & ~DMA_HAS_PQ_CONTINUE;
386}
387
388/* dma_maxpq - reduce maxpq in the face of continued operations
389 * @dma - dma device with PQ capability
390 * @flags - to check if DMA_PREP_CONTINUE and DMA_PREP_PQ_DISABLE_P are set
391 *
392 * When an engine does not support native continuation we need 3 extra
393 * source slots to reuse P and Q with the following coefficients:
394 * 1/ {00} * P : remove P from Q', but use it as a source for P'
395 * 2/ {01} * Q : use Q to continue Q' calculation
396 * 3/ {00} * Q : subtract Q from P' to cancel (2)
397 *
398 * In the case where P is disabled we only need 1 extra source:
399 * 1/ {01} * Q : use Q to continue Q' calculation
400 */
401static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags)
402{
403 if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags))
404 return dma_dev_to_maxpq(dma);
405 else if (dmaf_p_disabled_continue(flags))
406 return dma_dev_to_maxpq(dma) - 1;
407 else if (dmaf_continue(flags))
408 return dma_dev_to_maxpq(dma) - 3;
409 BUG();
410}
411
273/* --- public DMA engine API --- */ 412/* --- public DMA engine API --- */
274 413
275#ifdef CONFIG_DMA_ENGINE 414#ifdef CONFIG_DMA_ENGINE
@@ -299,7 +438,11 @@ static inline void net_dmaengine_put(void)
299#ifdef CONFIG_ASYNC_TX_DMA 438#ifdef CONFIG_ASYNC_TX_DMA
300#define async_dmaengine_get() dmaengine_get() 439#define async_dmaengine_get() dmaengine_get()
301#define async_dmaengine_put() dmaengine_put() 440#define async_dmaengine_put() dmaengine_put()
441#ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH
442#define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX)
443#else
302#define async_dma_find_channel(type) dma_find_channel(type) 444#define async_dma_find_channel(type) dma_find_channel(type)
445#endif /* CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH */
303#else 446#else
304static inline void async_dmaengine_get(void) 447static inline void async_dmaengine_get(void)
305{ 448{
@@ -312,7 +455,7 @@ async_dma_find_channel(enum dma_transaction_type type)
312{ 455{
313 return NULL; 456 return NULL;
314} 457}
315#endif 458#endif /* CONFIG_ASYNC_TX_DMA */
316 459
317dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, 460dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
318 void *dest, void *src, size_t len); 461 void *dest, void *src, size_t len);
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 3b6b788fe2b5..6f9623710f28 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2526,6 +2526,16 @@
2526#define PCI_DEVICE_ID_INTEL_E7525_MCH 0x359e 2526#define PCI_DEVICE_ID_INTEL_E7525_MCH 0x359e
2527#define PCI_DEVICE_ID_INTEL_IOAT_CNB 0x360b 2527#define PCI_DEVICE_ID_INTEL_IOAT_CNB 0x360b
2528#define PCI_DEVICE_ID_INTEL_FBD_CNB 0x360c 2528#define PCI_DEVICE_ID_INTEL_FBD_CNB 0x360c
2529#define PCI_DEVICE_ID_INTEL_IOAT_JSF0 0x3710
2530#define PCI_DEVICE_ID_INTEL_IOAT_JSF1 0x3711
2531#define PCI_DEVICE_ID_INTEL_IOAT_JSF2 0x3712
2532#define PCI_DEVICE_ID_INTEL_IOAT_JSF3 0x3713
2533#define PCI_DEVICE_ID_INTEL_IOAT_JSF4 0x3714
2534#define PCI_DEVICE_ID_INTEL_IOAT_JSF5 0x3715
2535#define PCI_DEVICE_ID_INTEL_IOAT_JSF6 0x3716
2536#define PCI_DEVICE_ID_INTEL_IOAT_JSF7 0x3717
2537#define PCI_DEVICE_ID_INTEL_IOAT_JSF8 0x3718
2538#define PCI_DEVICE_ID_INTEL_IOAT_JSF9 0x3719
2529#define PCI_DEVICE_ID_INTEL_ICH10_0 0x3a14 2539#define PCI_DEVICE_ID_INTEL_ICH10_0 0x3a14
2530#define PCI_DEVICE_ID_INTEL_ICH10_1 0x3a16 2540#define PCI_DEVICE_ID_INTEL_ICH10_1 0x3a16
2531#define PCI_DEVICE_ID_INTEL_ICH10_2 0x3a18 2541#define PCI_DEVICE_ID_INTEL_ICH10_2 0x3a18