diff options
author | Gary R Hook <gary.hook@amd.com> | 2016-04-18 10:21:44 -0400 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2016-04-20 05:50:06 -0400 |
commit | 58ea8abf490415c390e0cc671e875510c9b66318 (patch) | |
tree | 20a3e546b2fcf9e65410dd4ce9b088e7b2ceaecb | |
parent | 5343e674f32fb82b7a80a24b5a84eee62d3fe624 (diff) |
crypto: ccp - Register the CCP as a DMA resource
The CCP has the ability to provide DMA services to the
kernel using pass-through mode of the device. Register
these services as general purpose DMA channels.
Changes since v2:
- Add a Signed-off-by
Changes since v1:
- Allocate memory for a string in ccp_dmaengine_register
- Ensure register/unregister calls are properly ordered
- Verified all changed files are listed in the diffstat
- Undo some superfluous changes
- Added a cc:
Signed-off-by: Gary R Hook <gary.hook@amd.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
-rw-r--r-- | drivers/crypto/ccp/Kconfig | 1 | ||||
-rw-r--r-- | drivers/crypto/ccp/Makefile | 6 | ||||
-rw-r--r-- | drivers/crypto/ccp/ccp-dev-v3.c | 11 | ||||
-rw-r--r-- | drivers/crypto/ccp/ccp-dev.h | 47 | ||||
-rw-r--r-- | drivers/crypto/ccp/ccp-dmaengine.c | 727 | ||||
-rw-r--r-- | drivers/crypto/ccp/ccp-ops.c | 69 | ||||
-rw-r--r-- | include/linux/ccp.h | 36 |
7 files changed, 893 insertions, 4 deletions
diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig index 6e37845abf8f..79cabfba2a2a 100644 --- a/drivers/crypto/ccp/Kconfig +++ b/drivers/crypto/ccp/Kconfig | |||
@@ -3,6 +3,7 @@ config CRYPTO_DEV_CCP_DD | |||
3 | depends on CRYPTO_DEV_CCP | 3 | depends on CRYPTO_DEV_CCP |
4 | default m | 4 | default m |
5 | select HW_RANDOM | 5 | select HW_RANDOM |
6 | select DMA_ENGINE | ||
6 | select CRYPTO_SHA1 | 7 | select CRYPTO_SHA1 |
7 | select CRYPTO_SHA256 | 8 | select CRYPTO_SHA256 |
8 | help | 9 | help |
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index b750592cc936..ee4d2741b3ab 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile | |||
@@ -1,5 +1,9 @@ | |||
1 | obj-$(CONFIG_CRYPTO_DEV_CCP_DD) += ccp.o | 1 | obj-$(CONFIG_CRYPTO_DEV_CCP_DD) += ccp.o |
2 | ccp-objs := ccp-dev.o ccp-ops.o ccp-dev-v3.o ccp-platform.o | 2 | ccp-objs := ccp-dev.o \ |
3 | ccp-ops.o \ | ||
4 | ccp-dev-v3.o \ | ||
5 | ccp-platform.o \ | ||
6 | ccp-dmaengine.o | ||
3 | ccp-$(CONFIG_PCI) += ccp-pci.o | 7 | ccp-$(CONFIG_PCI) += ccp-pci.o |
4 | 8 | ||
5 | obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o | 9 | obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o |
diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c index 7d5eab49179e..597fc50bdaa6 100644 --- a/drivers/crypto/ccp/ccp-dev-v3.c +++ b/drivers/crypto/ccp/ccp-dev-v3.c | |||
@@ -406,6 +406,11 @@ static int ccp_init(struct ccp_device *ccp) | |||
406 | goto e_kthread; | 406 | goto e_kthread; |
407 | } | 407 | } |
408 | 408 | ||
409 | /* Register the DMA engine support */ | ||
410 | ret = ccp_dmaengine_register(ccp); | ||
411 | if (ret) | ||
412 | goto e_hwrng; | ||
413 | |||
409 | ccp_add_device(ccp); | 414 | ccp_add_device(ccp); |
410 | 415 | ||
411 | /* Enable interrupts */ | 416 | /* Enable interrupts */ |
@@ -413,6 +418,9 @@ static int ccp_init(struct ccp_device *ccp) | |||
413 | 418 | ||
414 | return 0; | 419 | return 0; |
415 | 420 | ||
421 | e_hwrng: | ||
422 | hwrng_unregister(&ccp->hwrng); | ||
423 | |||
416 | e_kthread: | 424 | e_kthread: |
417 | for (i = 0; i < ccp->cmd_q_count; i++) | 425 | for (i = 0; i < ccp->cmd_q_count; i++) |
418 | if (ccp->cmd_q[i].kthread) | 426 | if (ccp->cmd_q[i].kthread) |
@@ -436,6 +444,9 @@ static void ccp_destroy(struct ccp_device *ccp) | |||
436 | /* Remove this device from the list of available units first */ | 444 | /* Remove this device from the list of available units first */ |
437 | ccp_del_device(ccp); | 445 | ccp_del_device(ccp); |
438 | 446 | ||
447 | /* Unregister the DMA engine */ | ||
448 | ccp_dmaengine_unregister(ccp); | ||
449 | |||
439 | /* Unregister the RNG */ | 450 | /* Unregister the RNG */ |
440 | hwrng_unregister(&ccp->hwrng); | 451 | hwrng_unregister(&ccp->hwrng); |
441 | 452 | ||
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 7745d0be491d..5d986c9d72eb 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h | |||
@@ -22,6 +22,9 @@ | |||
22 | #include <linux/dmapool.h> | 22 | #include <linux/dmapool.h> |
23 | #include <linux/hw_random.h> | 23 | #include <linux/hw_random.h> |
24 | #include <linux/bitops.h> | 24 | #include <linux/bitops.h> |
25 | #include <linux/interrupt.h> | ||
26 | #include <linux/irqreturn.h> | ||
27 | #include <linux/dmaengine.h> | ||
25 | 28 | ||
26 | #define MAX_CCP_NAME_LEN 16 | 29 | #define MAX_CCP_NAME_LEN 16 |
27 | #define MAX_DMAPOOL_NAME_LEN 32 | 30 | #define MAX_DMAPOOL_NAME_LEN 32 |
@@ -167,6 +170,39 @@ extern struct ccp_vdata ccpv3; | |||
167 | struct ccp_device; | 170 | struct ccp_device; |
168 | struct ccp_cmd; | 171 | struct ccp_cmd; |
169 | 172 | ||
173 | struct ccp_dma_cmd { | ||
174 | struct list_head entry; | ||
175 | |||
176 | struct ccp_cmd ccp_cmd; | ||
177 | }; | ||
178 | |||
179 | struct ccp_dma_desc { | ||
180 | struct list_head entry; | ||
181 | |||
182 | struct ccp_device *ccp; | ||
183 | |||
184 | struct list_head pending; | ||
185 | struct list_head active; | ||
186 | |||
187 | enum dma_status status; | ||
188 | struct dma_async_tx_descriptor tx_desc; | ||
189 | size_t len; | ||
190 | }; | ||
191 | |||
192 | struct ccp_dma_chan { | ||
193 | struct ccp_device *ccp; | ||
194 | |||
195 | spinlock_t lock; | ||
196 | struct list_head pending; | ||
197 | struct list_head active; | ||
198 | struct list_head complete; | ||
199 | |||
200 | struct tasklet_struct cleanup_tasklet; | ||
201 | |||
202 | enum dma_status status; | ||
203 | struct dma_chan dma_chan; | ||
204 | }; | ||
205 | |||
170 | struct ccp_cmd_queue { | 206 | struct ccp_cmd_queue { |
171 | struct ccp_device *ccp; | 207 | struct ccp_device *ccp; |
172 | 208 | ||
@@ -261,6 +297,14 @@ struct ccp_device { | |||
261 | unsigned int hwrng_retries; | 297 | unsigned int hwrng_retries; |
262 | 298 | ||
263 | /* | 299 | /* |
300 | * Support for the CCP DMA capabilities | ||
301 | */ | ||
302 | struct dma_device dma_dev; | ||
303 | struct ccp_dma_chan *ccp_dma_chan; | ||
304 | struct kmem_cache *dma_cmd_cache; | ||
305 | struct kmem_cache *dma_desc_cache; | ||
306 | |||
307 | /* | ||
264 | * A counter used to generate job-ids for cmds submitted to the CCP | 308 | * A counter used to generate job-ids for cmds submitted to the CCP |
265 | */ | 309 | */ |
266 | atomic_t current_id ____cacheline_aligned; | 310 | atomic_t current_id ____cacheline_aligned; |
@@ -418,4 +462,7 @@ int ccp_cmd_queue_thread(void *data); | |||
418 | 462 | ||
419 | int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd); | 463 | int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd); |
420 | 464 | ||
465 | int ccp_dmaengine_register(struct ccp_device *ccp); | ||
466 | void ccp_dmaengine_unregister(struct ccp_device *ccp); | ||
467 | |||
421 | #endif | 468 | #endif |
diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c new file mode 100644 index 000000000000..94f77b0f9ae7 --- /dev/null +++ b/drivers/crypto/ccp/ccp-dmaengine.c | |||
@@ -0,0 +1,727 @@ | |||
1 | /* | ||
2 | * AMD Cryptographic Coprocessor (CCP) driver | ||
3 | * | ||
4 | * Copyright (C) 2016 Advanced Micro Devices, Inc. | ||
5 | * | ||
6 | * Author: Gary R Hook <gary.hook@amd.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | ||
12 | |||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/dmaengine.h> | ||
15 | #include <linux/spinlock.h> | ||
16 | #include <linux/mutex.h> | ||
17 | #include <linux/ccp.h> | ||
18 | |||
19 | #include "ccp-dev.h" | ||
20 | #include "../../dma/dmaengine.h" | ||
21 | |||
22 | #define CCP_DMA_WIDTH(_mask) \ | ||
23 | ({ \ | ||
24 | u64 mask = _mask + 1; \ | ||
25 | (mask == 0) ? 64 : fls64(mask); \ | ||
26 | }) | ||
27 | |||
28 | static void ccp_free_cmd_resources(struct ccp_device *ccp, | ||
29 | struct list_head *list) | ||
30 | { | ||
31 | struct ccp_dma_cmd *cmd, *ctmp; | ||
32 | |||
33 | list_for_each_entry_safe(cmd, ctmp, list, entry) { | ||
34 | list_del(&cmd->entry); | ||
35 | kmem_cache_free(ccp->dma_cmd_cache, cmd); | ||
36 | } | ||
37 | } | ||
38 | |||
39 | static void ccp_free_desc_resources(struct ccp_device *ccp, | ||
40 | struct list_head *list) | ||
41 | { | ||
42 | struct ccp_dma_desc *desc, *dtmp; | ||
43 | |||
44 | list_for_each_entry_safe(desc, dtmp, list, entry) { | ||
45 | ccp_free_cmd_resources(ccp, &desc->active); | ||
46 | ccp_free_cmd_resources(ccp, &desc->pending); | ||
47 | |||
48 | list_del(&desc->entry); | ||
49 | kmem_cache_free(ccp->dma_desc_cache, desc); | ||
50 | } | ||
51 | } | ||
52 | |||
53 | static void ccp_free_chan_resources(struct dma_chan *dma_chan) | ||
54 | { | ||
55 | struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan, | ||
56 | dma_chan); | ||
57 | unsigned long flags; | ||
58 | |||
59 | dev_dbg(chan->ccp->dev, "%s - chan=%p\n", __func__, chan); | ||
60 | |||
61 | spin_lock_irqsave(&chan->lock, flags); | ||
62 | |||
63 | ccp_free_desc_resources(chan->ccp, &chan->complete); | ||
64 | ccp_free_desc_resources(chan->ccp, &chan->active); | ||
65 | ccp_free_desc_resources(chan->ccp, &chan->pending); | ||
66 | |||
67 | spin_unlock_irqrestore(&chan->lock, flags); | ||
68 | } | ||
69 | |||
70 | static void ccp_cleanup_desc_resources(struct ccp_device *ccp, | ||
71 | struct list_head *list) | ||
72 | { | ||
73 | struct ccp_dma_desc *desc, *dtmp; | ||
74 | |||
75 | list_for_each_entry_safe_reverse(desc, dtmp, list, entry) { | ||
76 | if (!async_tx_test_ack(&desc->tx_desc)) | ||
77 | continue; | ||
78 | |||
79 | dev_dbg(ccp->dev, "%s - desc=%p\n", __func__, desc); | ||
80 | |||
81 | ccp_free_cmd_resources(ccp, &desc->active); | ||
82 | ccp_free_cmd_resources(ccp, &desc->pending); | ||
83 | |||
84 | list_del(&desc->entry); | ||
85 | kmem_cache_free(ccp->dma_desc_cache, desc); | ||
86 | } | ||
87 | } | ||
88 | |||
89 | static void ccp_do_cleanup(unsigned long data) | ||
90 | { | ||
91 | struct ccp_dma_chan *chan = (struct ccp_dma_chan *)data; | ||
92 | unsigned long flags; | ||
93 | |||
94 | dev_dbg(chan->ccp->dev, "%s - chan=%s\n", __func__, | ||
95 | dma_chan_name(&chan->dma_chan)); | ||
96 | |||
97 | spin_lock_irqsave(&chan->lock, flags); | ||
98 | |||
99 | ccp_cleanup_desc_resources(chan->ccp, &chan->complete); | ||
100 | |||
101 | spin_unlock_irqrestore(&chan->lock, flags); | ||
102 | } | ||
103 | |||
104 | static int ccp_issue_next_cmd(struct ccp_dma_desc *desc) | ||
105 | { | ||
106 | struct ccp_dma_cmd *cmd; | ||
107 | int ret; | ||
108 | |||
109 | cmd = list_first_entry(&desc->pending, struct ccp_dma_cmd, entry); | ||
110 | list_move(&cmd->entry, &desc->active); | ||
111 | |||
112 | dev_dbg(desc->ccp->dev, "%s - tx %d, cmd=%p\n", __func__, | ||
113 | desc->tx_desc.cookie, cmd); | ||
114 | |||
115 | ret = ccp_enqueue_cmd(&cmd->ccp_cmd); | ||
116 | if (!ret || (ret == -EINPROGRESS) || (ret == -EBUSY)) | ||
117 | return 0; | ||
118 | |||
119 | dev_dbg(desc->ccp->dev, "%s - error: ret=%d, tx %d, cmd=%p\n", __func__, | ||
120 | ret, desc->tx_desc.cookie, cmd); | ||
121 | |||
122 | return ret; | ||
123 | } | ||
124 | |||
125 | static void ccp_free_active_cmd(struct ccp_dma_desc *desc) | ||
126 | { | ||
127 | struct ccp_dma_cmd *cmd; | ||
128 | |||
129 | cmd = list_first_entry_or_null(&desc->active, struct ccp_dma_cmd, | ||
130 | entry); | ||
131 | if (!cmd) | ||
132 | return; | ||
133 | |||
134 | dev_dbg(desc->ccp->dev, "%s - freeing tx %d cmd=%p\n", | ||
135 | __func__, desc->tx_desc.cookie, cmd); | ||
136 | |||
137 | list_del(&cmd->entry); | ||
138 | kmem_cache_free(desc->ccp->dma_cmd_cache, cmd); | ||
139 | } | ||
140 | |||
141 | static struct ccp_dma_desc *__ccp_next_dma_desc(struct ccp_dma_chan *chan, | ||
142 | struct ccp_dma_desc *desc) | ||
143 | { | ||
144 | /* Move current DMA descriptor to the complete list */ | ||
145 | if (desc) | ||
146 | list_move(&desc->entry, &chan->complete); | ||
147 | |||
148 | /* Get the next DMA descriptor on the active list */ | ||
149 | desc = list_first_entry_or_null(&chan->active, struct ccp_dma_desc, | ||
150 | entry); | ||
151 | |||
152 | return desc; | ||
153 | } | ||
154 | |||
155 | static struct ccp_dma_desc *ccp_handle_active_desc(struct ccp_dma_chan *chan, | ||
156 | struct ccp_dma_desc *desc) | ||
157 | { | ||
158 | struct dma_async_tx_descriptor *tx_desc; | ||
159 | unsigned long flags; | ||
160 | |||
161 | /* Loop over descriptors until one is found with commands */ | ||
162 | do { | ||
163 | if (desc) { | ||
164 | /* Remove the DMA command from the list and free it */ | ||
165 | ccp_free_active_cmd(desc); | ||
166 | |||
167 | if (!list_empty(&desc->pending)) { | ||
168 | /* No errors, keep going */ | ||
169 | if (desc->status != DMA_ERROR) | ||
170 | return desc; | ||
171 | |||
172 | /* Error, free remaining commands and move on */ | ||
173 | ccp_free_cmd_resources(desc->ccp, | ||
174 | &desc->pending); | ||
175 | } | ||
176 | |||
177 | tx_desc = &desc->tx_desc; | ||
178 | } else { | ||
179 | tx_desc = NULL; | ||
180 | } | ||
181 | |||
182 | spin_lock_irqsave(&chan->lock, flags); | ||
183 | |||
184 | if (desc) { | ||
185 | if (desc->status != DMA_ERROR) | ||
186 | desc->status = DMA_COMPLETE; | ||
187 | |||
188 | dev_dbg(desc->ccp->dev, | ||
189 | "%s - tx %d complete, status=%u\n", __func__, | ||
190 | desc->tx_desc.cookie, desc->status); | ||
191 | |||
192 | dma_cookie_complete(tx_desc); | ||
193 | } | ||
194 | |||
195 | desc = __ccp_next_dma_desc(chan, desc); | ||
196 | |||
197 | spin_unlock_irqrestore(&chan->lock, flags); | ||
198 | |||
199 | if (tx_desc) { | ||
200 | if (tx_desc->callback && | ||
201 | (tx_desc->flags & DMA_PREP_INTERRUPT)) | ||
202 | tx_desc->callback(tx_desc->callback_param); | ||
203 | |||
204 | dma_run_dependencies(tx_desc); | ||
205 | } | ||
206 | } while (desc); | ||
207 | |||
208 | return NULL; | ||
209 | } | ||
210 | |||
211 | static struct ccp_dma_desc *__ccp_pending_to_active(struct ccp_dma_chan *chan) | ||
212 | { | ||
213 | struct ccp_dma_desc *desc; | ||
214 | |||
215 | if (list_empty(&chan->pending)) | ||
216 | return NULL; | ||
217 | |||
218 | desc = list_empty(&chan->active) | ||
219 | ? list_first_entry(&chan->pending, struct ccp_dma_desc, entry) | ||
220 | : NULL; | ||
221 | |||
222 | list_splice_tail_init(&chan->pending, &chan->active); | ||
223 | |||
224 | return desc; | ||
225 | } | ||
226 | |||
227 | static void ccp_cmd_callback(void *data, int err) | ||
228 | { | ||
229 | struct ccp_dma_desc *desc = data; | ||
230 | struct ccp_dma_chan *chan; | ||
231 | int ret; | ||
232 | |||
233 | if (err == -EINPROGRESS) | ||
234 | return; | ||
235 | |||
236 | chan = container_of(desc->tx_desc.chan, struct ccp_dma_chan, | ||
237 | dma_chan); | ||
238 | |||
239 | dev_dbg(chan->ccp->dev, "%s - tx %d callback, err=%d\n", | ||
240 | __func__, desc->tx_desc.cookie, err); | ||
241 | |||
242 | if (err) | ||
243 | desc->status = DMA_ERROR; | ||
244 | |||
245 | while (true) { | ||
246 | /* Check for DMA descriptor completion */ | ||
247 | desc = ccp_handle_active_desc(chan, desc); | ||
248 | |||
249 | /* Don't submit cmd if no descriptor or DMA is paused */ | ||
250 | if (!desc || (chan->status == DMA_PAUSED)) | ||
251 | break; | ||
252 | |||
253 | ret = ccp_issue_next_cmd(desc); | ||
254 | if (!ret) | ||
255 | break; | ||
256 | |||
257 | desc->status = DMA_ERROR; | ||
258 | } | ||
259 | |||
260 | tasklet_schedule(&chan->cleanup_tasklet); | ||
261 | } | ||
262 | |||
263 | static dma_cookie_t ccp_tx_submit(struct dma_async_tx_descriptor *tx_desc) | ||
264 | { | ||
265 | struct ccp_dma_desc *desc = container_of(tx_desc, struct ccp_dma_desc, | ||
266 | tx_desc); | ||
267 | struct ccp_dma_chan *chan; | ||
268 | dma_cookie_t cookie; | ||
269 | unsigned long flags; | ||
270 | |||
271 | chan = container_of(tx_desc->chan, struct ccp_dma_chan, dma_chan); | ||
272 | |||
273 | spin_lock_irqsave(&chan->lock, flags); | ||
274 | |||
275 | cookie = dma_cookie_assign(tx_desc); | ||
276 | list_add_tail(&desc->entry, &chan->pending); | ||
277 | |||
278 | spin_unlock_irqrestore(&chan->lock, flags); | ||
279 | |||
280 | dev_dbg(chan->ccp->dev, "%s - added tx descriptor %d to pending list\n", | ||
281 | __func__, cookie); | ||
282 | |||
283 | return cookie; | ||
284 | } | ||
285 | |||
286 | static struct ccp_dma_cmd *ccp_alloc_dma_cmd(struct ccp_dma_chan *chan) | ||
287 | { | ||
288 | struct ccp_dma_cmd *cmd; | ||
289 | |||
290 | cmd = kmem_cache_alloc(chan->ccp->dma_cmd_cache, GFP_NOWAIT); | ||
291 | if (cmd) | ||
292 | memset(cmd, 0, sizeof(*cmd)); | ||
293 | |||
294 | return cmd; | ||
295 | } | ||
296 | |||
297 | static struct ccp_dma_desc *ccp_alloc_dma_desc(struct ccp_dma_chan *chan, | ||
298 | unsigned long flags) | ||
299 | { | ||
300 | struct ccp_dma_desc *desc; | ||
301 | |||
302 | desc = kmem_cache_alloc(chan->ccp->dma_desc_cache, GFP_NOWAIT); | ||
303 | if (!desc) | ||
304 | return NULL; | ||
305 | |||
306 | memset(desc, 0, sizeof(*desc)); | ||
307 | |||
308 | dma_async_tx_descriptor_init(&desc->tx_desc, &chan->dma_chan); | ||
309 | desc->tx_desc.flags = flags; | ||
310 | desc->tx_desc.tx_submit = ccp_tx_submit; | ||
311 | desc->ccp = chan->ccp; | ||
312 | INIT_LIST_HEAD(&desc->pending); | ||
313 | INIT_LIST_HEAD(&desc->active); | ||
314 | desc->status = DMA_IN_PROGRESS; | ||
315 | |||
316 | return desc; | ||
317 | } | ||
318 | |||
319 | static struct ccp_dma_desc *ccp_create_desc(struct dma_chan *dma_chan, | ||
320 | struct scatterlist *dst_sg, | ||
321 | unsigned int dst_nents, | ||
322 | struct scatterlist *src_sg, | ||
323 | unsigned int src_nents, | ||
324 | unsigned long flags) | ||
325 | { | ||
326 | struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan, | ||
327 | dma_chan); | ||
328 | struct ccp_device *ccp = chan->ccp; | ||
329 | struct ccp_dma_desc *desc; | ||
330 | struct ccp_dma_cmd *cmd; | ||
331 | struct ccp_cmd *ccp_cmd; | ||
332 | struct ccp_passthru_nomap_engine *ccp_pt; | ||
333 | unsigned int src_offset, src_len; | ||
334 | unsigned int dst_offset, dst_len; | ||
335 | unsigned int len; | ||
336 | unsigned long sflags; | ||
337 | size_t total_len; | ||
338 | |||
339 | if (!dst_sg || !src_sg) | ||
340 | return NULL; | ||
341 | |||
342 | if (!dst_nents || !src_nents) | ||
343 | return NULL; | ||
344 | |||
345 | desc = ccp_alloc_dma_desc(chan, flags); | ||
346 | if (!desc) | ||
347 | return NULL; | ||
348 | |||
349 | total_len = 0; | ||
350 | |||
351 | src_len = sg_dma_len(src_sg); | ||
352 | src_offset = 0; | ||
353 | |||
354 | dst_len = sg_dma_len(dst_sg); | ||
355 | dst_offset = 0; | ||
356 | |||
357 | while (true) { | ||
358 | if (!src_len) { | ||
359 | src_nents--; | ||
360 | if (!src_nents) | ||
361 | break; | ||
362 | |||
363 | src_sg = sg_next(src_sg); | ||
364 | if (!src_sg) | ||
365 | break; | ||
366 | |||
367 | src_len = sg_dma_len(src_sg); | ||
368 | src_offset = 0; | ||
369 | continue; | ||
370 | } | ||
371 | |||
372 | if (!dst_len) { | ||
373 | dst_nents--; | ||
374 | if (!dst_nents) | ||
375 | break; | ||
376 | |||
377 | dst_sg = sg_next(dst_sg); | ||
378 | if (!dst_sg) | ||
379 | break; | ||
380 | |||
381 | dst_len = sg_dma_len(dst_sg); | ||
382 | dst_offset = 0; | ||
383 | continue; | ||
384 | } | ||
385 | |||
386 | len = min(dst_len, src_len); | ||
387 | |||
388 | cmd = ccp_alloc_dma_cmd(chan); | ||
389 | if (!cmd) | ||
390 | goto err; | ||
391 | |||
392 | ccp_cmd = &cmd->ccp_cmd; | ||
393 | ccp_pt = &ccp_cmd->u.passthru_nomap; | ||
394 | ccp_cmd->flags = CCP_CMD_MAY_BACKLOG; | ||
395 | ccp_cmd->flags |= CCP_CMD_PASSTHRU_NO_DMA_MAP; | ||
396 | ccp_cmd->engine = CCP_ENGINE_PASSTHRU; | ||
397 | ccp_pt->bit_mod = CCP_PASSTHRU_BITWISE_NOOP; | ||
398 | ccp_pt->byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP; | ||
399 | ccp_pt->src_dma = sg_dma_address(src_sg) + src_offset; | ||
400 | ccp_pt->dst_dma = sg_dma_address(dst_sg) + dst_offset; | ||
401 | ccp_pt->src_len = len; | ||
402 | ccp_pt->final = 1; | ||
403 | ccp_cmd->callback = ccp_cmd_callback; | ||
404 | ccp_cmd->data = desc; | ||
405 | |||
406 | list_add_tail(&cmd->entry, &desc->pending); | ||
407 | |||
408 | dev_dbg(ccp->dev, | ||
409 | "%s - cmd=%p, src=%pad, dst=%pad, len=%llu\n", __func__, | ||
410 | cmd, &ccp_pt->src_dma, | ||
411 | &ccp_pt->dst_dma, ccp_pt->src_len); | ||
412 | |||
413 | total_len += len; | ||
414 | |||
415 | src_len -= len; | ||
416 | src_offset += len; | ||
417 | |||
418 | dst_len -= len; | ||
419 | dst_offset += len; | ||
420 | } | ||
421 | |||
422 | desc->len = total_len; | ||
423 | |||
424 | if (list_empty(&desc->pending)) | ||
425 | goto err; | ||
426 | |||
427 | dev_dbg(ccp->dev, "%s - desc=%p\n", __func__, desc); | ||
428 | |||
429 | spin_lock_irqsave(&chan->lock, sflags); | ||
430 | |||
431 | list_add_tail(&desc->entry, &chan->pending); | ||
432 | |||
433 | spin_unlock_irqrestore(&chan->lock, sflags); | ||
434 | |||
435 | return desc; | ||
436 | |||
437 | err: | ||
438 | ccp_free_cmd_resources(ccp, &desc->pending); | ||
439 | kmem_cache_free(ccp->dma_desc_cache, desc); | ||
440 | |||
441 | return NULL; | ||
442 | } | ||
443 | |||
444 | static struct dma_async_tx_descriptor *ccp_prep_dma_memcpy( | ||
445 | struct dma_chan *dma_chan, dma_addr_t dst, dma_addr_t src, size_t len, | ||
446 | unsigned long flags) | ||
447 | { | ||
448 | struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan, | ||
449 | dma_chan); | ||
450 | struct ccp_dma_desc *desc; | ||
451 | struct scatterlist dst_sg, src_sg; | ||
452 | |||
453 | dev_dbg(chan->ccp->dev, | ||
454 | "%s - src=%pad, dst=%pad, len=%zu, flags=%#lx\n", | ||
455 | __func__, &src, &dst, len, flags); | ||
456 | |||
457 | sg_init_table(&dst_sg, 1); | ||
458 | sg_dma_address(&dst_sg) = dst; | ||
459 | sg_dma_len(&dst_sg) = len; | ||
460 | |||
461 | sg_init_table(&src_sg, 1); | ||
462 | sg_dma_address(&src_sg) = src; | ||
463 | sg_dma_len(&src_sg) = len; | ||
464 | |||
465 | desc = ccp_create_desc(dma_chan, &dst_sg, 1, &src_sg, 1, flags); | ||
466 | if (!desc) | ||
467 | return NULL; | ||
468 | |||
469 | return &desc->tx_desc; | ||
470 | } | ||
471 | |||
472 | static struct dma_async_tx_descriptor *ccp_prep_dma_sg( | ||
473 | struct dma_chan *dma_chan, struct scatterlist *dst_sg, | ||
474 | unsigned int dst_nents, struct scatterlist *src_sg, | ||
475 | unsigned int src_nents, unsigned long flags) | ||
476 | { | ||
477 | struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan, | ||
478 | dma_chan); | ||
479 | struct ccp_dma_desc *desc; | ||
480 | |||
481 | dev_dbg(chan->ccp->dev, | ||
482 | "%s - src=%p, src_nents=%u dst=%p, dst_nents=%u, flags=%#lx\n", | ||
483 | __func__, src_sg, src_nents, dst_sg, dst_nents, flags); | ||
484 | |||
485 | desc = ccp_create_desc(dma_chan, dst_sg, dst_nents, src_sg, src_nents, | ||
486 | flags); | ||
487 | if (!desc) | ||
488 | return NULL; | ||
489 | |||
490 | return &desc->tx_desc; | ||
491 | } | ||
492 | |||
493 | static struct dma_async_tx_descriptor *ccp_prep_dma_interrupt( | ||
494 | struct dma_chan *dma_chan, unsigned long flags) | ||
495 | { | ||
496 | struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan, | ||
497 | dma_chan); | ||
498 | struct ccp_dma_desc *desc; | ||
499 | |||
500 | desc = ccp_alloc_dma_desc(chan, flags); | ||
501 | if (!desc) | ||
502 | return NULL; | ||
503 | |||
504 | return &desc->tx_desc; | ||
505 | } | ||
506 | |||
507 | static void ccp_issue_pending(struct dma_chan *dma_chan) | ||
508 | { | ||
509 | struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan, | ||
510 | dma_chan); | ||
511 | struct ccp_dma_desc *desc; | ||
512 | unsigned long flags; | ||
513 | |||
514 | dev_dbg(chan->ccp->dev, "%s\n", __func__); | ||
515 | |||
516 | spin_lock_irqsave(&chan->lock, flags); | ||
517 | |||
518 | desc = __ccp_pending_to_active(chan); | ||
519 | |||
520 | spin_unlock_irqrestore(&chan->lock, flags); | ||
521 | |||
522 | /* If there was nothing active, start processing */ | ||
523 | if (desc) | ||
524 | ccp_cmd_callback(desc, 0); | ||
525 | } | ||
526 | |||
527 | static enum dma_status ccp_tx_status(struct dma_chan *dma_chan, | ||
528 | dma_cookie_t cookie, | ||
529 | struct dma_tx_state *state) | ||
530 | { | ||
531 | struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan, | ||
532 | dma_chan); | ||
533 | struct ccp_dma_desc *desc; | ||
534 | enum dma_status ret; | ||
535 | unsigned long flags; | ||
536 | |||
537 | if (chan->status == DMA_PAUSED) { | ||
538 | ret = DMA_PAUSED; | ||
539 | goto out; | ||
540 | } | ||
541 | |||
542 | ret = dma_cookie_status(dma_chan, cookie, state); | ||
543 | if (ret == DMA_COMPLETE) { | ||
544 | spin_lock_irqsave(&chan->lock, flags); | ||
545 | |||
546 | /* Get status from complete chain, if still there */ | ||
547 | list_for_each_entry(desc, &chan->complete, entry) { | ||
548 | if (desc->tx_desc.cookie != cookie) | ||
549 | continue; | ||
550 | |||
551 | ret = desc->status; | ||
552 | break; | ||
553 | } | ||
554 | |||
555 | spin_unlock_irqrestore(&chan->lock, flags); | ||
556 | } | ||
557 | |||
558 | out: | ||
559 | dev_dbg(chan->ccp->dev, "%s - %u\n", __func__, ret); | ||
560 | |||
561 | return ret; | ||
562 | } | ||
563 | |||
564 | static int ccp_pause(struct dma_chan *dma_chan) | ||
565 | { | ||
566 | struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan, | ||
567 | dma_chan); | ||
568 | |||
569 | chan->status = DMA_PAUSED; | ||
570 | |||
571 | /*TODO: Wait for active DMA to complete before returning? */ | ||
572 | |||
573 | return 0; | ||
574 | } | ||
575 | |||
576 | static int ccp_resume(struct dma_chan *dma_chan) | ||
577 | { | ||
578 | struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan, | ||
579 | dma_chan); | ||
580 | struct ccp_dma_desc *desc; | ||
581 | unsigned long flags; | ||
582 | |||
583 | spin_lock_irqsave(&chan->lock, flags); | ||
584 | |||
585 | desc = list_first_entry_or_null(&chan->active, struct ccp_dma_desc, | ||
586 | entry); | ||
587 | |||
588 | spin_unlock_irqrestore(&chan->lock, flags); | ||
589 | |||
590 | /* Indicate the channel is running again */ | ||
591 | chan->status = DMA_IN_PROGRESS; | ||
592 | |||
593 | /* If there was something active, re-start */ | ||
594 | if (desc) | ||
595 | ccp_cmd_callback(desc, 0); | ||
596 | |||
597 | return 0; | ||
598 | } | ||
599 | |||
600 | static int ccp_terminate_all(struct dma_chan *dma_chan) | ||
601 | { | ||
602 | struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan, | ||
603 | dma_chan); | ||
604 | unsigned long flags; | ||
605 | |||
606 | dev_dbg(chan->ccp->dev, "%s\n", __func__); | ||
607 | |||
608 | /*TODO: Wait for active DMA to complete before continuing */ | ||
609 | |||
610 | spin_lock_irqsave(&chan->lock, flags); | ||
611 | |||
612 | /*TODO: Purge the complete list? */ | ||
613 | ccp_free_desc_resources(chan->ccp, &chan->active); | ||
614 | ccp_free_desc_resources(chan->ccp, &chan->pending); | ||
615 | |||
616 | spin_unlock_irqrestore(&chan->lock, flags); | ||
617 | |||
618 | return 0; | ||
619 | } | ||
620 | |||
621 | int ccp_dmaengine_register(struct ccp_device *ccp) | ||
622 | { | ||
623 | struct ccp_dma_chan *chan; | ||
624 | struct dma_device *dma_dev = &ccp->dma_dev; | ||
625 | struct dma_chan *dma_chan; | ||
626 | char *dma_cmd_cache_name; | ||
627 | char *dma_desc_cache_name; | ||
628 | unsigned int i; | ||
629 | int ret; | ||
630 | |||
631 | ccp->ccp_dma_chan = devm_kcalloc(ccp->dev, ccp->cmd_q_count, | ||
632 | sizeof(*(ccp->ccp_dma_chan)), | ||
633 | GFP_KERNEL); | ||
634 | if (!ccp->ccp_dma_chan) | ||
635 | return -ENOMEM; | ||
636 | |||
637 | dma_cmd_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL, | ||
638 | "%s-dmaengine-cmd-cache", | ||
639 | ccp->name); | ||
640 | if (!dma_cmd_cache_name) | ||
641 | return -ENOMEM; | ||
642 | |||
643 | ccp->dma_cmd_cache = kmem_cache_create(dma_cmd_cache_name, | ||
644 | sizeof(struct ccp_dma_cmd), | ||
645 | sizeof(void *), | ||
646 | SLAB_HWCACHE_ALIGN, NULL); | ||
647 | if (!ccp->dma_cmd_cache) | ||
648 | return -ENOMEM; | ||
649 | |||
650 | dma_desc_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL, | ||
651 | "%s-dmaengine-desc-cache", | ||
652 | ccp->name); | ||
653 | if (!dma_cmd_cache_name) | ||
654 | return -ENOMEM; | ||
655 | ccp->dma_desc_cache = kmem_cache_create(dma_desc_cache_name, | ||
656 | sizeof(struct ccp_dma_desc), | ||
657 | sizeof(void *), | ||
658 | SLAB_HWCACHE_ALIGN, NULL); | ||
659 | if (!ccp->dma_desc_cache) { | ||
660 | ret = -ENOMEM; | ||
661 | goto err_cache; | ||
662 | } | ||
663 | |||
664 | dma_dev->dev = ccp->dev; | ||
665 | dma_dev->src_addr_widths = CCP_DMA_WIDTH(dma_get_mask(ccp->dev)); | ||
666 | dma_dev->dst_addr_widths = CCP_DMA_WIDTH(dma_get_mask(ccp->dev)); | ||
667 | dma_dev->directions = DMA_MEM_TO_MEM; | ||
668 | dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; | ||
669 | dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); | ||
670 | dma_cap_set(DMA_SG, dma_dev->cap_mask); | ||
671 | dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask); | ||
672 | |||
673 | INIT_LIST_HEAD(&dma_dev->channels); | ||
674 | for (i = 0; i < ccp->cmd_q_count; i++) { | ||
675 | chan = ccp->ccp_dma_chan + i; | ||
676 | dma_chan = &chan->dma_chan; | ||
677 | |||
678 | chan->ccp = ccp; | ||
679 | |||
680 | spin_lock_init(&chan->lock); | ||
681 | INIT_LIST_HEAD(&chan->pending); | ||
682 | INIT_LIST_HEAD(&chan->active); | ||
683 | INIT_LIST_HEAD(&chan->complete); | ||
684 | |||
685 | tasklet_init(&chan->cleanup_tasklet, ccp_do_cleanup, | ||
686 | (unsigned long)chan); | ||
687 | |||
688 | dma_chan->device = dma_dev; | ||
689 | dma_cookie_init(dma_chan); | ||
690 | |||
691 | list_add_tail(&dma_chan->device_node, &dma_dev->channels); | ||
692 | } | ||
693 | |||
694 | dma_dev->device_free_chan_resources = ccp_free_chan_resources; | ||
695 | dma_dev->device_prep_dma_memcpy = ccp_prep_dma_memcpy; | ||
696 | dma_dev->device_prep_dma_sg = ccp_prep_dma_sg; | ||
697 | dma_dev->device_prep_dma_interrupt = ccp_prep_dma_interrupt; | ||
698 | dma_dev->device_issue_pending = ccp_issue_pending; | ||
699 | dma_dev->device_tx_status = ccp_tx_status; | ||
700 | dma_dev->device_pause = ccp_pause; | ||
701 | dma_dev->device_resume = ccp_resume; | ||
702 | dma_dev->device_terminate_all = ccp_terminate_all; | ||
703 | |||
704 | ret = dma_async_device_register(dma_dev); | ||
705 | if (ret) | ||
706 | goto err_reg; | ||
707 | |||
708 | return 0; | ||
709 | |||
710 | err_reg: | ||
711 | kmem_cache_destroy(ccp->dma_desc_cache); | ||
712 | |||
713 | err_cache: | ||
714 | kmem_cache_destroy(ccp->dma_cmd_cache); | ||
715 | |||
716 | return ret; | ||
717 | } | ||
718 | |||
719 | void ccp_dmaengine_unregister(struct ccp_device *ccp) | ||
720 | { | ||
721 | struct dma_device *dma_dev = &ccp->dma_dev; | ||
722 | |||
723 | dma_async_device_unregister(dma_dev); | ||
724 | |||
725 | kmem_cache_destroy(ccp->dma_desc_cache); | ||
726 | kmem_cache_destroy(ccp->dma_cmd_cache); | ||
727 | } | ||
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index eefdf595f758..ffa2891035ac 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c | |||
@@ -1427,6 +1427,70 @@ e_mask: | |||
1427 | return ret; | 1427 | return ret; |
1428 | } | 1428 | } |
1429 | 1429 | ||
1430 | static int ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q, | ||
1431 | struct ccp_cmd *cmd) | ||
1432 | { | ||
1433 | struct ccp_passthru_nomap_engine *pt = &cmd->u.passthru_nomap; | ||
1434 | struct ccp_dm_workarea mask; | ||
1435 | struct ccp_op op; | ||
1436 | int ret; | ||
1437 | |||
1438 | if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1))) | ||
1439 | return -EINVAL; | ||
1440 | |||
1441 | if (!pt->src_dma || !pt->dst_dma) | ||
1442 | return -EINVAL; | ||
1443 | |||
1444 | if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { | ||
1445 | if (pt->mask_len != CCP_PASSTHRU_MASKSIZE) | ||
1446 | return -EINVAL; | ||
1447 | if (!pt->mask) | ||
1448 | return -EINVAL; | ||
1449 | } | ||
1450 | |||
1451 | BUILD_BUG_ON(CCP_PASSTHRU_KSB_COUNT != 1); | ||
1452 | |||
1453 | memset(&op, 0, sizeof(op)); | ||
1454 | op.cmd_q = cmd_q; | ||
1455 | op.jobid = ccp_gen_jobid(cmd_q->ccp); | ||
1456 | |||
1457 | if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { | ||
1458 | /* Load the mask */ | ||
1459 | op.ksb_key = cmd_q->ksb_key; | ||
1460 | |||
1461 | mask.length = pt->mask_len; | ||
1462 | mask.dma.address = pt->mask; | ||
1463 | mask.dma.length = pt->mask_len; | ||
1464 | |||
1465 | ret = ccp_copy_to_ksb(cmd_q, &mask, op.jobid, op.ksb_key, | ||
1466 | CCP_PASSTHRU_BYTESWAP_NOOP); | ||
1467 | if (ret) { | ||
1468 | cmd->engine_error = cmd_q->cmd_error; | ||
1469 | return ret; | ||
1470 | } | ||
1471 | } | ||
1472 | |||
1473 | /* Send data to the CCP Passthru engine */ | ||
1474 | op.eom = 1; | ||
1475 | op.soc = 1; | ||
1476 | |||
1477 | op.src.type = CCP_MEMTYPE_SYSTEM; | ||
1478 | op.src.u.dma.address = pt->src_dma; | ||
1479 | op.src.u.dma.offset = 0; | ||
1480 | op.src.u.dma.length = pt->src_len; | ||
1481 | |||
1482 | op.dst.type = CCP_MEMTYPE_SYSTEM; | ||
1483 | op.dst.u.dma.address = pt->dst_dma; | ||
1484 | op.dst.u.dma.offset = 0; | ||
1485 | op.dst.u.dma.length = pt->src_len; | ||
1486 | |||
1487 | ret = cmd_q->ccp->vdata->perform->perform_passthru(&op); | ||
1488 | if (ret) | ||
1489 | cmd->engine_error = cmd_q->cmd_error; | ||
1490 | |||
1491 | return ret; | ||
1492 | } | ||
1493 | |||
1430 | static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | 1494 | static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) |
1431 | { | 1495 | { |
1432 | struct ccp_ecc_engine *ecc = &cmd->u.ecc; | 1496 | struct ccp_ecc_engine *ecc = &cmd->u.ecc; |
@@ -1762,7 +1826,10 @@ int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
1762 | ret = ccp_run_rsa_cmd(cmd_q, cmd); | 1826 | ret = ccp_run_rsa_cmd(cmd_q, cmd); |
1763 | break; | 1827 | break; |
1764 | case CCP_ENGINE_PASSTHRU: | 1828 | case CCP_ENGINE_PASSTHRU: |
1765 | ret = ccp_run_passthru_cmd(cmd_q, cmd); | 1829 | if (cmd->flags & CCP_CMD_PASSTHRU_NO_DMA_MAP) |
1830 | ret = ccp_run_passthru_nomap_cmd(cmd_q, cmd); | ||
1831 | else | ||
1832 | ret = ccp_run_passthru_cmd(cmd_q, cmd); | ||
1766 | break; | 1833 | break; |
1767 | case CCP_ENGINE_ECC: | 1834 | case CCP_ENGINE_ECC: |
1768 | ret = ccp_run_ecc_cmd(cmd_q, cmd); | 1835 | ret = ccp_run_ecc_cmd(cmd_q, cmd); |
diff --git a/include/linux/ccp.h b/include/linux/ccp.h index 915af3095b39..7c2bb27c067c 100644 --- a/include/linux/ccp.h +++ b/include/linux/ccp.h | |||
@@ -1,9 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * AMD Cryptographic Coprocessor (CCP) driver | 2 | * AMD Cryptographic Coprocessor (CCP) driver |
3 | * | 3 | * |
4 | * Copyright (C) 2013 Advanced Micro Devices, Inc. | 4 | * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. |
5 | * | 5 | * |
6 | * Author: Tom Lendacky <thomas.lendacky@amd.com> | 6 | * Author: Tom Lendacky <thomas.lendacky@amd.com> |
7 | * Author: Gary R Hook <gary.hook@amd.com> | ||
7 | * | 8 | * |
8 | * This program is free software; you can redistribute it and/or modify | 9 | * This program is free software; you can redistribute it and/or modify |
9 | * it under the terms of the GNU General Public License version 2 as | 10 | * it under the terms of the GNU General Public License version 2 as |
@@ -381,6 +382,35 @@ struct ccp_passthru_engine { | |||
381 | u32 final; | 382 | u32 final; |
382 | }; | 383 | }; |
383 | 384 | ||
385 | /** | ||
386 | * struct ccp_passthru_nomap_engine - CCP pass-through operation | ||
387 | * without performing DMA mapping | ||
388 | * @bit_mod: bitwise operation to perform | ||
389 | * @byte_swap: byteswap operation to perform | ||
390 | * @mask: mask to be applied to data | ||
391 | * @mask_len: length in bytes of mask | ||
392 | * @src: data to be used for this operation | ||
393 | * @dst: data produced by this operation | ||
394 | * @src_len: length in bytes of data used for this operation | ||
395 | * @final: indicate final pass-through operation | ||
396 | * | ||
397 | * Variables required to be set when calling ccp_enqueue_cmd(): | ||
398 | * - bit_mod, byte_swap, src, dst, src_len | ||
399 | * - mask, mask_len if bit_mod is not CCP_PASSTHRU_BITWISE_NOOP | ||
400 | */ | ||
401 | struct ccp_passthru_nomap_engine { | ||
402 | enum ccp_passthru_bitwise bit_mod; | ||
403 | enum ccp_passthru_byteswap byte_swap; | ||
404 | |||
405 | dma_addr_t mask; | ||
406 | u32 mask_len; /* In bytes */ | ||
407 | |||
408 | dma_addr_t src_dma, dst_dma; | ||
409 | u64 src_len; /* In bytes */ | ||
410 | |||
411 | u32 final; | ||
412 | }; | ||
413 | |||
384 | /***** ECC engine *****/ | 414 | /***** ECC engine *****/ |
385 | #define CCP_ECC_MODULUS_BYTES 48 /* 384-bits */ | 415 | #define CCP_ECC_MODULUS_BYTES 48 /* 384-bits */ |
386 | #define CCP_ECC_MAX_OPERANDS 6 | 416 | #define CCP_ECC_MAX_OPERANDS 6 |
@@ -522,7 +552,8 @@ enum ccp_engine { | |||
522 | }; | 552 | }; |
523 | 553 | ||
524 | /* Flag values for flags member of ccp_cmd */ | 554 | /* Flag values for flags member of ccp_cmd */ |
525 | #define CCP_CMD_MAY_BACKLOG 0x00000001 | 555 | #define CCP_CMD_MAY_BACKLOG 0x00000001 |
556 | #define CCP_CMD_PASSTHRU_NO_DMA_MAP 0x00000002 | ||
526 | 557 | ||
527 | /** | 558 | /** |
528 | * struct ccp_cmd - CPP operation request | 559 | * struct ccp_cmd - CPP operation request |
@@ -562,6 +593,7 @@ struct ccp_cmd { | |||
562 | struct ccp_sha_engine sha; | 593 | struct ccp_sha_engine sha; |
563 | struct ccp_rsa_engine rsa; | 594 | struct ccp_rsa_engine rsa; |
564 | struct ccp_passthru_engine passthru; | 595 | struct ccp_passthru_engine passthru; |
596 | struct ccp_passthru_nomap_engine passthru_nomap; | ||
565 | struct ccp_ecc_engine ecc; | 597 | struct ccp_ecc_engine ecc; |
566 | } u; | 598 | } u; |
567 | 599 | ||