diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-25 18:56:15 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-25 18:56:15 -0500 |
commit | 32dc43e40a2707d0cb1ab8768d080c3e9bcfed52 (patch) | |
tree | 415f3a1935fba0db2f0410360983587bf65ee712 /drivers | |
parent | d414c104e26fd3b597f855cc29473a8b1527fb4c (diff) | |
parent | 8fd61d34226014fe7886babfca6f45a7eff89d25 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu:
"Here is the crypto update for 3.9:
- Added accelerated implementation of crc32 using pclmulqdq.
- Added test vector for fcrypt.
- Added support for OMAP4/AM33XX cipher and hash.
- Fixed loose crypto_user input checks.
- Misc fixes"
* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (43 commits)
crypto: user - ensure user supplied strings are nul-terminated
crypto: user - fix empty string test in report API
crypto: user - fix info leaks in report API
crypto: caam - Added property fsl,sec-era in SEC4.0 device tree binding.
crypto: use ERR_CAST
crypto: atmel-aes - adjust duplicate test
crypto: crc32-pclmul - Kill warning on x86-32
crypto: x86/twofish - assembler clean-ups: use ENTRY/ENDPROC, localize jump labels
crypto: x86/sha1 - assembler clean-ups: use ENTRY/ENDPROC
crypto: x86/serpent - use ENTRY/ENDPROC for assember functions and localize jump targets
crypto: x86/salsa20 - assembler cleanup, use ENTRY/ENDPROC for assember functions and rename ECRYPT_* to salsa20_*
crypto: x86/ghash - assembler clean-up: use ENDPROC at end of assember functions
crypto: x86/crc32c - assembler clean-up: use ENTRY/ENDPROC
crypto: cast6-avx: use ENTRY()/ENDPROC() for assembler functions
crypto: cast5-avx: use ENTRY()/ENDPROC() for assembler functions and localize jump targets
crypto: camellia-x86_64/aes-ni: use ENTRY()/ENDPROC() for assembler functions and localize jump targets
crypto: blowfish-x86_64: use ENTRY()/ENDPROC() for assembler functions and localize jump targets
crypto: aesni-intel - add ENDPROC statements for assembler functions
crypto: x86/aes - assembler clean-ups: use ENTRY/ENDPROC, localize jump targets
crypto: testmgr - add test vector for fcrypt
...
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/crypto/atmel-aes.c | 2 | ||||
-rw-r--r-- | drivers/crypto/bfin_crc.c | 6 | ||||
-rw-r--r-- | drivers/crypto/omap-aes.c | 658 | ||||
-rw-r--r-- | drivers/crypto/omap-sham.c | 918 | ||||
-rw-r--r-- | drivers/crypto/s5p-sss.c | 4 |
5 files changed, 1210 insertions, 378 deletions
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index c9d9d5c16f94..6f22ba51f969 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c | |||
@@ -332,7 +332,7 @@ static int atmel_aes_crypt_cpu_start(struct atmel_aes_dev *dd) | |||
332 | return -EINVAL; | 332 | return -EINVAL; |
333 | 333 | ||
334 | dd->nb_out_sg = atmel_aes_sg_length(dd->req, dd->out_sg); | 334 | dd->nb_out_sg = atmel_aes_sg_length(dd->req, dd->out_sg); |
335 | if (!dd->nb_in_sg) | 335 | if (!dd->nb_out_sg) |
336 | return -EINVAL; | 336 | return -EINVAL; |
337 | 337 | ||
338 | dd->bufcnt = sg_copy_to_buffer(dd->in_sg, dd->nb_in_sg, | 338 | dd->bufcnt = sg_copy_to_buffer(dd->in_sg, dd->nb_in_sg, |
diff --git a/drivers/crypto/bfin_crc.c b/drivers/crypto/bfin_crc.c index a22f1a9f895f..827913d7d33a 100644 --- a/drivers/crypto/bfin_crc.c +++ b/drivers/crypto/bfin_crc.c | |||
@@ -694,7 +694,7 @@ out_error_dma: | |||
694 | dma_free_coherent(&pdev->dev, PAGE_SIZE, crc->sg_cpu, crc->sg_dma); | 694 | dma_free_coherent(&pdev->dev, PAGE_SIZE, crc->sg_cpu, crc->sg_dma); |
695 | free_dma(crc->dma_ch); | 695 | free_dma(crc->dma_ch); |
696 | out_error_irq: | 696 | out_error_irq: |
697 | free_irq(crc->irq, crc->dev); | 697 | free_irq(crc->irq, crc); |
698 | out_error_unmap: | 698 | out_error_unmap: |
699 | iounmap((void *)crc->regs); | 699 | iounmap((void *)crc->regs); |
700 | out_error_free_mem: | 700 | out_error_free_mem: |
@@ -720,10 +720,10 @@ static int bfin_crypto_crc_remove(struct platform_device *pdev) | |||
720 | 720 | ||
721 | crypto_unregister_ahash(&algs); | 721 | crypto_unregister_ahash(&algs); |
722 | tasklet_kill(&crc->done_task); | 722 | tasklet_kill(&crc->done_task); |
723 | iounmap((void *)crc->regs); | ||
724 | free_dma(crc->dma_ch); | 723 | free_dma(crc->dma_ch); |
725 | if (crc->irq > 0) | 724 | if (crc->irq > 0) |
726 | free_irq(crc->irq, crc->dev); | 725 | free_irq(crc->irq, crc); |
726 | iounmap((void *)crc->regs); | ||
727 | kfree(crc); | 727 | kfree(crc); |
728 | 728 | ||
729 | return 0; | 729 | return 0; |
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index e66e8ee5a9af..6aa425fe0ed5 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c | |||
@@ -5,6 +5,7 @@ | |||
5 | * | 5 | * |
6 | * Copyright (c) 2010 Nokia Corporation | 6 | * Copyright (c) 2010 Nokia Corporation |
7 | * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com> | 7 | * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com> |
8 | * Copyright (c) 2011 Texas Instruments Incorporated | ||
8 | * | 9 | * |
9 | * This program is free software; you can redistribute it and/or modify | 10 | * This program is free software; you can redistribute it and/or modify |
10 | * it under the terms of the GNU General Public License version 2 as published | 11 | * it under the terms of the GNU General Public License version 2 as published |
@@ -19,28 +20,39 @@ | |||
19 | #include <linux/init.h> | 20 | #include <linux/init.h> |
20 | #include <linux/errno.h> | 21 | #include <linux/errno.h> |
21 | #include <linux/kernel.h> | 22 | #include <linux/kernel.h> |
22 | #include <linux/clk.h> | ||
23 | #include <linux/platform_device.h> | 23 | #include <linux/platform_device.h> |
24 | #include <linux/scatterlist.h> | 24 | #include <linux/scatterlist.h> |
25 | #include <linux/dma-mapping.h> | 25 | #include <linux/dma-mapping.h> |
26 | #include <linux/dmaengine.h> | ||
27 | #include <linux/omap-dma.h> | ||
28 | #include <linux/pm_runtime.h> | ||
29 | #include <linux/of.h> | ||
30 | #include <linux/of_device.h> | ||
31 | #include <linux/of_address.h> | ||
26 | #include <linux/io.h> | 32 | #include <linux/io.h> |
27 | #include <linux/crypto.h> | 33 | #include <linux/crypto.h> |
28 | #include <linux/interrupt.h> | 34 | #include <linux/interrupt.h> |
29 | #include <crypto/scatterwalk.h> | 35 | #include <crypto/scatterwalk.h> |
30 | #include <crypto/aes.h> | 36 | #include <crypto/aes.h> |
31 | 37 | ||
32 | #include <linux/omap-dma.h> | 38 | #define DST_MAXBURST 4 |
39 | #define DMA_MIN (DST_MAXBURST * sizeof(u32)) | ||
33 | 40 | ||
34 | /* OMAP TRM gives bitfields as start:end, where start is the higher bit | 41 | /* OMAP TRM gives bitfields as start:end, where start is the higher bit |
35 | number. For example 7:0 */ | 42 | number. For example 7:0 */ |
36 | #define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end)) | 43 | #define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end)) |
37 | #define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end)) | 44 | #define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end)) |
38 | 45 | ||
39 | #define AES_REG_KEY(x) (0x1C - ((x ^ 0x01) * 0x04)) | 46 | #define AES_REG_KEY(dd, x) ((dd)->pdata->key_ofs - \ |
40 | #define AES_REG_IV(x) (0x20 + ((x) * 0x04)) | 47 | ((x ^ 0x01) * 0x04)) |
48 | #define AES_REG_IV(dd, x) ((dd)->pdata->iv_ofs + ((x) * 0x04)) | ||
41 | 49 | ||
42 | #define AES_REG_CTRL 0x30 | 50 | #define AES_REG_CTRL(dd) ((dd)->pdata->ctrl_ofs) |
43 | #define AES_REG_CTRL_CTR_WIDTH (1 << 7) | 51 | #define AES_REG_CTRL_CTR_WIDTH_MASK (3 << 7) |
52 | #define AES_REG_CTRL_CTR_WIDTH_32 (0 << 7) | ||
53 | #define AES_REG_CTRL_CTR_WIDTH_64 (1 << 7) | ||
54 | #define AES_REG_CTRL_CTR_WIDTH_96 (2 << 7) | ||
55 | #define AES_REG_CTRL_CTR_WIDTH_128 (3 << 7) | ||
44 | #define AES_REG_CTRL_CTR (1 << 6) | 56 | #define AES_REG_CTRL_CTR (1 << 6) |
45 | #define AES_REG_CTRL_CBC (1 << 5) | 57 | #define AES_REG_CTRL_CBC (1 << 5) |
46 | #define AES_REG_CTRL_KEY_SIZE (3 << 3) | 58 | #define AES_REG_CTRL_KEY_SIZE (3 << 3) |
@@ -48,14 +60,11 @@ | |||
48 | #define AES_REG_CTRL_INPUT_READY (1 << 1) | 60 | #define AES_REG_CTRL_INPUT_READY (1 << 1) |
49 | #define AES_REG_CTRL_OUTPUT_READY (1 << 0) | 61 | #define AES_REG_CTRL_OUTPUT_READY (1 << 0) |
50 | 62 | ||
51 | #define AES_REG_DATA 0x34 | 63 | #define AES_REG_DATA_N(dd, x) ((dd)->pdata->data_ofs + ((x) * 0x04)) |
52 | #define AES_REG_DATA_N(x) (0x34 + ((x) * 0x04)) | ||
53 | 64 | ||
54 | #define AES_REG_REV 0x44 | 65 | #define AES_REG_REV(dd) ((dd)->pdata->rev_ofs) |
55 | #define AES_REG_REV_MAJOR 0xF0 | ||
56 | #define AES_REG_REV_MINOR 0x0F | ||
57 | 66 | ||
58 | #define AES_REG_MASK 0x48 | 67 | #define AES_REG_MASK(dd) ((dd)->pdata->mask_ofs) |
59 | #define AES_REG_MASK_SIDLE (1 << 6) | 68 | #define AES_REG_MASK_SIDLE (1 << 6) |
60 | #define AES_REG_MASK_START (1 << 5) | 69 | #define AES_REG_MASK_START (1 << 5) |
61 | #define AES_REG_MASK_DMA_OUT_EN (1 << 3) | 70 | #define AES_REG_MASK_DMA_OUT_EN (1 << 3) |
@@ -63,8 +72,7 @@ | |||
63 | #define AES_REG_MASK_SOFTRESET (1 << 1) | 72 | #define AES_REG_MASK_SOFTRESET (1 << 1) |
64 | #define AES_REG_AUTOIDLE (1 << 0) | 73 | #define AES_REG_AUTOIDLE (1 << 0) |
65 | 74 | ||
66 | #define AES_REG_SYSSTATUS 0x4C | 75 | #define AES_REG_LENGTH_N(x) (0x54 + ((x) * 0x04)) |
67 | #define AES_REG_SYSSTATUS_RESETDONE (1 << 0) | ||
68 | 76 | ||
69 | #define DEFAULT_TIMEOUT (5*HZ) | 77 | #define DEFAULT_TIMEOUT (5*HZ) |
70 | 78 | ||
@@ -72,6 +80,7 @@ | |||
72 | #define FLAGS_ENCRYPT BIT(0) | 80 | #define FLAGS_ENCRYPT BIT(0) |
73 | #define FLAGS_CBC BIT(1) | 81 | #define FLAGS_CBC BIT(1) |
74 | #define FLAGS_GIV BIT(2) | 82 | #define FLAGS_GIV BIT(2) |
83 | #define FLAGS_CTR BIT(3) | ||
75 | 84 | ||
76 | #define FLAGS_INIT BIT(4) | 85 | #define FLAGS_INIT BIT(4) |
77 | #define FLAGS_FAST BIT(5) | 86 | #define FLAGS_FAST BIT(5) |
@@ -92,11 +101,39 @@ struct omap_aes_reqctx { | |||
92 | #define OMAP_AES_QUEUE_LENGTH 1 | 101 | #define OMAP_AES_QUEUE_LENGTH 1 |
93 | #define OMAP_AES_CACHE_SIZE 0 | 102 | #define OMAP_AES_CACHE_SIZE 0 |
94 | 103 | ||
104 | struct omap_aes_algs_info { | ||
105 | struct crypto_alg *algs_list; | ||
106 | unsigned int size; | ||
107 | unsigned int registered; | ||
108 | }; | ||
109 | |||
110 | struct omap_aes_pdata { | ||
111 | struct omap_aes_algs_info *algs_info; | ||
112 | unsigned int algs_info_size; | ||
113 | |||
114 | void (*trigger)(struct omap_aes_dev *dd, int length); | ||
115 | |||
116 | u32 key_ofs; | ||
117 | u32 iv_ofs; | ||
118 | u32 ctrl_ofs; | ||
119 | u32 data_ofs; | ||
120 | u32 rev_ofs; | ||
121 | u32 mask_ofs; | ||
122 | |||
123 | u32 dma_enable_in; | ||
124 | u32 dma_enable_out; | ||
125 | u32 dma_start; | ||
126 | |||
127 | u32 major_mask; | ||
128 | u32 major_shift; | ||
129 | u32 minor_mask; | ||
130 | u32 minor_shift; | ||
131 | }; | ||
132 | |||
95 | struct omap_aes_dev { | 133 | struct omap_aes_dev { |
96 | struct list_head list; | 134 | struct list_head list; |
97 | unsigned long phys_base; | 135 | unsigned long phys_base; |
98 | void __iomem *io_base; | 136 | void __iomem *io_base; |
99 | struct clk *iclk; | ||
100 | struct omap_aes_ctx *ctx; | 137 | struct omap_aes_ctx *ctx; |
101 | struct device *dev; | 138 | struct device *dev; |
102 | unsigned long flags; | 139 | unsigned long flags; |
@@ -111,20 +148,24 @@ struct omap_aes_dev { | |||
111 | struct ablkcipher_request *req; | 148 | struct ablkcipher_request *req; |
112 | size_t total; | 149 | size_t total; |
113 | struct scatterlist *in_sg; | 150 | struct scatterlist *in_sg; |
151 | struct scatterlist in_sgl; | ||
114 | size_t in_offset; | 152 | size_t in_offset; |
115 | struct scatterlist *out_sg; | 153 | struct scatterlist *out_sg; |
154 | struct scatterlist out_sgl; | ||
116 | size_t out_offset; | 155 | size_t out_offset; |
117 | 156 | ||
118 | size_t buflen; | 157 | size_t buflen; |
119 | void *buf_in; | 158 | void *buf_in; |
120 | size_t dma_size; | 159 | size_t dma_size; |
121 | int dma_in; | 160 | int dma_in; |
122 | int dma_lch_in; | 161 | struct dma_chan *dma_lch_in; |
123 | dma_addr_t dma_addr_in; | 162 | dma_addr_t dma_addr_in; |
124 | void *buf_out; | 163 | void *buf_out; |
125 | int dma_out; | 164 | int dma_out; |
126 | int dma_lch_out; | 165 | struct dma_chan *dma_lch_out; |
127 | dma_addr_t dma_addr_out; | 166 | dma_addr_t dma_addr_out; |
167 | |||
168 | const struct omap_aes_pdata *pdata; | ||
128 | }; | 169 | }; |
129 | 170 | ||
130 | /* keep registered devices data here */ | 171 | /* keep registered devices data here */ |
@@ -160,19 +201,6 @@ static void omap_aes_write_n(struct omap_aes_dev *dd, u32 offset, | |||
160 | omap_aes_write(dd, offset, *value); | 201 | omap_aes_write(dd, offset, *value); |
161 | } | 202 | } |
162 | 203 | ||
163 | static int omap_aes_wait(struct omap_aes_dev *dd, u32 offset, u32 bit) | ||
164 | { | ||
165 | unsigned long timeout = jiffies + DEFAULT_TIMEOUT; | ||
166 | |||
167 | while (!(omap_aes_read(dd, offset) & bit)) { | ||
168 | if (time_is_before_jiffies(timeout)) { | ||
169 | dev_err(dd->dev, "omap-aes timeout\n"); | ||
170 | return -ETIMEDOUT; | ||
171 | } | ||
172 | } | ||
173 | return 0; | ||
174 | } | ||
175 | |||
176 | static int omap_aes_hw_init(struct omap_aes_dev *dd) | 204 | static int omap_aes_hw_init(struct omap_aes_dev *dd) |
177 | { | 205 | { |
178 | /* | 206 | /* |
@@ -180,23 +208,9 @@ static int omap_aes_hw_init(struct omap_aes_dev *dd) | |||
180 | * It may be long delays between requests. | 208 | * It may be long delays between requests. |
181 | * Device might go to off mode to save power. | 209 | * Device might go to off mode to save power. |
182 | */ | 210 | */ |
183 | clk_enable(dd->iclk); | 211 | pm_runtime_get_sync(dd->dev); |
184 | 212 | ||
185 | if (!(dd->flags & FLAGS_INIT)) { | 213 | if (!(dd->flags & FLAGS_INIT)) { |
186 | /* is it necessary to reset before every operation? */ | ||
187 | omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_SOFTRESET, | ||
188 | AES_REG_MASK_SOFTRESET); | ||
189 | /* | ||
190 | * prevent OCP bus error (SRESP) in case an access to the module | ||
191 | * is performed while the module is coming out of soft reset | ||
192 | */ | ||
193 | __asm__ __volatile__("nop"); | ||
194 | __asm__ __volatile__("nop"); | ||
195 | |||
196 | if (omap_aes_wait(dd, AES_REG_SYSSTATUS, | ||
197 | AES_REG_SYSSTATUS_RESETDONE)) | ||
198 | return -ETIMEDOUT; | ||
199 | |||
200 | dd->flags |= FLAGS_INIT; | 214 | dd->flags |= FLAGS_INIT; |
201 | dd->err = 0; | 215 | dd->err = 0; |
202 | } | 216 | } |
@@ -208,59 +222,75 @@ static int omap_aes_write_ctrl(struct omap_aes_dev *dd) | |||
208 | { | 222 | { |
209 | unsigned int key32; | 223 | unsigned int key32; |
210 | int i, err; | 224 | int i, err; |
211 | u32 val, mask; | 225 | u32 val, mask = 0; |
212 | 226 | ||
213 | err = omap_aes_hw_init(dd); | 227 | err = omap_aes_hw_init(dd); |
214 | if (err) | 228 | if (err) |
215 | return err; | 229 | return err; |
216 | 230 | ||
217 | val = 0; | ||
218 | if (dd->dma_lch_out >= 0) | ||
219 | val |= AES_REG_MASK_DMA_OUT_EN; | ||
220 | if (dd->dma_lch_in >= 0) | ||
221 | val |= AES_REG_MASK_DMA_IN_EN; | ||
222 | |||
223 | mask = AES_REG_MASK_DMA_IN_EN | AES_REG_MASK_DMA_OUT_EN; | ||
224 | |||
225 | omap_aes_write_mask(dd, AES_REG_MASK, val, mask); | ||
226 | |||
227 | key32 = dd->ctx->keylen / sizeof(u32); | 231 | key32 = dd->ctx->keylen / sizeof(u32); |
228 | 232 | ||
229 | /* it seems a key should always be set even if it has not changed */ | 233 | /* it seems a key should always be set even if it has not changed */ |
230 | for (i = 0; i < key32; i++) { | 234 | for (i = 0; i < key32; i++) { |
231 | omap_aes_write(dd, AES_REG_KEY(i), | 235 | omap_aes_write(dd, AES_REG_KEY(dd, i), |
232 | __le32_to_cpu(dd->ctx->key[i])); | 236 | __le32_to_cpu(dd->ctx->key[i])); |
233 | } | 237 | } |
234 | 238 | ||
235 | if ((dd->flags & FLAGS_CBC) && dd->req->info) | 239 | if ((dd->flags & (FLAGS_CBC | FLAGS_CTR)) && dd->req->info) |
236 | omap_aes_write_n(dd, AES_REG_IV(0), dd->req->info, 4); | 240 | omap_aes_write_n(dd, AES_REG_IV(dd, 0), dd->req->info, 4); |
237 | 241 | ||
238 | val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3); | 242 | val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3); |
239 | if (dd->flags & FLAGS_CBC) | 243 | if (dd->flags & FLAGS_CBC) |
240 | val |= AES_REG_CTRL_CBC; | 244 | val |= AES_REG_CTRL_CBC; |
245 | if (dd->flags & FLAGS_CTR) { | ||
246 | val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_32; | ||
247 | mask = AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_MASK; | ||
248 | } | ||
241 | if (dd->flags & FLAGS_ENCRYPT) | 249 | if (dd->flags & FLAGS_ENCRYPT) |
242 | val |= AES_REG_CTRL_DIRECTION; | 250 | val |= AES_REG_CTRL_DIRECTION; |
243 | 251 | ||
244 | mask = AES_REG_CTRL_CBC | AES_REG_CTRL_DIRECTION | | 252 | mask |= AES_REG_CTRL_CBC | AES_REG_CTRL_DIRECTION | |
245 | AES_REG_CTRL_KEY_SIZE; | 253 | AES_REG_CTRL_KEY_SIZE; |
246 | 254 | ||
247 | omap_aes_write_mask(dd, AES_REG_CTRL, val, mask); | 255 | omap_aes_write_mask(dd, AES_REG_CTRL(dd), val, mask); |
248 | 256 | ||
249 | /* IN */ | 257 | return 0; |
250 | omap_set_dma_dest_params(dd->dma_lch_in, 0, OMAP_DMA_AMODE_CONSTANT, | 258 | } |
251 | dd->phys_base + AES_REG_DATA, 0, 4); | ||
252 | 259 | ||
253 | omap_set_dma_dest_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4); | 260 | static void omap_aes_dma_trigger_omap2(struct omap_aes_dev *dd, int length) |
254 | omap_set_dma_src_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4); | 261 | { |
262 | u32 mask, val; | ||
255 | 263 | ||
256 | /* OUT */ | 264 | val = dd->pdata->dma_start; |
257 | omap_set_dma_src_params(dd->dma_lch_out, 0, OMAP_DMA_AMODE_CONSTANT, | ||
258 | dd->phys_base + AES_REG_DATA, 0, 4); | ||
259 | 265 | ||
260 | omap_set_dma_src_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4); | 266 | if (dd->dma_lch_out != NULL) |
261 | omap_set_dma_dest_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4); | 267 | val |= dd->pdata->dma_enable_out; |
268 | if (dd->dma_lch_in != NULL) | ||
269 | val |= dd->pdata->dma_enable_in; | ||
270 | |||
271 | mask = dd->pdata->dma_enable_out | dd->pdata->dma_enable_in | | ||
272 | dd->pdata->dma_start; | ||
273 | |||
274 | omap_aes_write_mask(dd, AES_REG_MASK(dd), val, mask); | ||
262 | 275 | ||
263 | return 0; | 276 | } |
277 | |||
278 | static void omap_aes_dma_trigger_omap4(struct omap_aes_dev *dd, int length) | ||
279 | { | ||
280 | omap_aes_write(dd, AES_REG_LENGTH_N(0), length); | ||
281 | omap_aes_write(dd, AES_REG_LENGTH_N(1), 0); | ||
282 | |||
283 | omap_aes_dma_trigger_omap2(dd, length); | ||
284 | } | ||
285 | |||
286 | static void omap_aes_dma_stop(struct omap_aes_dev *dd) | ||
287 | { | ||
288 | u32 mask; | ||
289 | |||
290 | mask = dd->pdata->dma_enable_out | dd->pdata->dma_enable_in | | ||
291 | dd->pdata->dma_start; | ||
292 | |||
293 | omap_aes_write_mask(dd, AES_REG_MASK(dd), 0, mask); | ||
264 | } | 294 | } |
265 | 295 | ||
266 | static struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx) | 296 | static struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx) |
@@ -284,18 +314,10 @@ static struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx) | |||
284 | return dd; | 314 | return dd; |
285 | } | 315 | } |
286 | 316 | ||
287 | static void omap_aes_dma_callback(int lch, u16 ch_status, void *data) | 317 | static void omap_aes_dma_out_callback(void *data) |
288 | { | 318 | { |
289 | struct omap_aes_dev *dd = data; | 319 | struct omap_aes_dev *dd = data; |
290 | 320 | ||
291 | if (ch_status != OMAP_DMA_BLOCK_IRQ) { | ||
292 | pr_err("omap-aes DMA error status: 0x%hx\n", ch_status); | ||
293 | dd->err = -EIO; | ||
294 | dd->flags &= ~FLAGS_INIT; /* request to re-initialize */ | ||
295 | } else if (lch == dd->dma_lch_in) { | ||
296 | return; | ||
297 | } | ||
298 | |||
299 | /* dma_lch_out - completed */ | 321 | /* dma_lch_out - completed */ |
300 | tasklet_schedule(&dd->done_task); | 322 | tasklet_schedule(&dd->done_task); |
301 | } | 323 | } |
@@ -303,9 +325,10 @@ static void omap_aes_dma_callback(int lch, u16 ch_status, void *data) | |||
303 | static int omap_aes_dma_init(struct omap_aes_dev *dd) | 325 | static int omap_aes_dma_init(struct omap_aes_dev *dd) |
304 | { | 326 | { |
305 | int err = -ENOMEM; | 327 | int err = -ENOMEM; |
328 | dma_cap_mask_t mask; | ||
306 | 329 | ||
307 | dd->dma_lch_out = -1; | 330 | dd->dma_lch_out = NULL; |
308 | dd->dma_lch_in = -1; | 331 | dd->dma_lch_in = NULL; |
309 | 332 | ||
310 | dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE); | 333 | dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE); |
311 | dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE); | 334 | dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE); |
@@ -334,23 +357,31 @@ static int omap_aes_dma_init(struct omap_aes_dev *dd) | |||
334 | goto err_map_out; | 357 | goto err_map_out; |
335 | } | 358 | } |
336 | 359 | ||
337 | err = omap_request_dma(dd->dma_in, "omap-aes-rx", | 360 | dma_cap_zero(mask); |
338 | omap_aes_dma_callback, dd, &dd->dma_lch_in); | 361 | dma_cap_set(DMA_SLAVE, mask); |
339 | if (err) { | 362 | |
340 | dev_err(dd->dev, "Unable to request DMA channel\n"); | 363 | dd->dma_lch_in = dma_request_slave_channel_compat(mask, |
364 | omap_dma_filter_fn, | ||
365 | &dd->dma_in, | ||
366 | dd->dev, "rx"); | ||
367 | if (!dd->dma_lch_in) { | ||
368 | dev_err(dd->dev, "Unable to request in DMA channel\n"); | ||
341 | goto err_dma_in; | 369 | goto err_dma_in; |
342 | } | 370 | } |
343 | err = omap_request_dma(dd->dma_out, "omap-aes-tx", | 371 | |
344 | omap_aes_dma_callback, dd, &dd->dma_lch_out); | 372 | dd->dma_lch_out = dma_request_slave_channel_compat(mask, |
345 | if (err) { | 373 | omap_dma_filter_fn, |
346 | dev_err(dd->dev, "Unable to request DMA channel\n"); | 374 | &dd->dma_out, |
375 | dd->dev, "tx"); | ||
376 | if (!dd->dma_lch_out) { | ||
377 | dev_err(dd->dev, "Unable to request out DMA channel\n"); | ||
347 | goto err_dma_out; | 378 | goto err_dma_out; |
348 | } | 379 | } |
349 | 380 | ||
350 | return 0; | 381 | return 0; |
351 | 382 | ||
352 | err_dma_out: | 383 | err_dma_out: |
353 | omap_free_dma(dd->dma_lch_in); | 384 | dma_release_channel(dd->dma_lch_in); |
354 | err_dma_in: | 385 | err_dma_in: |
355 | dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen, | 386 | dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen, |
356 | DMA_FROM_DEVICE); | 387 | DMA_FROM_DEVICE); |
@@ -367,8 +398,8 @@ err_alloc: | |||
367 | 398 | ||
368 | static void omap_aes_dma_cleanup(struct omap_aes_dev *dd) | 399 | static void omap_aes_dma_cleanup(struct omap_aes_dev *dd) |
369 | { | 400 | { |
370 | omap_free_dma(dd->dma_lch_out); | 401 | dma_release_channel(dd->dma_lch_out); |
371 | omap_free_dma(dd->dma_lch_in); | 402 | dma_release_channel(dd->dma_lch_in); |
372 | dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen, | 403 | dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen, |
373 | DMA_FROM_DEVICE); | 404 | DMA_FROM_DEVICE); |
374 | dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE); | 405 | dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE); |
@@ -426,12 +457,15 @@ static int sg_copy(struct scatterlist **sg, size_t *offset, void *buf, | |||
426 | return off; | 457 | return off; |
427 | } | 458 | } |
428 | 459 | ||
429 | static int omap_aes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in, | 460 | static int omap_aes_crypt_dma(struct crypto_tfm *tfm, |
430 | dma_addr_t dma_addr_out, int length) | 461 | struct scatterlist *in_sg, struct scatterlist *out_sg) |
431 | { | 462 | { |
432 | struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm); | 463 | struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm); |
433 | struct omap_aes_dev *dd = ctx->dd; | 464 | struct omap_aes_dev *dd = ctx->dd; |
434 | int len32; | 465 | struct dma_async_tx_descriptor *tx_in, *tx_out; |
466 | struct dma_slave_config cfg; | ||
467 | dma_addr_t dma_addr_in = sg_dma_address(in_sg); | ||
468 | int ret, length = sg_dma_len(in_sg); | ||
435 | 469 | ||
436 | pr_debug("len: %d\n", length); | 470 | pr_debug("len: %d\n", length); |
437 | 471 | ||
@@ -441,30 +475,61 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in, | |||
441 | dma_sync_single_for_device(dd->dev, dma_addr_in, length, | 475 | dma_sync_single_for_device(dd->dev, dma_addr_in, length, |
442 | DMA_TO_DEVICE); | 476 | DMA_TO_DEVICE); |
443 | 477 | ||
444 | len32 = DIV_ROUND_UP(length, sizeof(u32)); | 478 | memset(&cfg, 0, sizeof(cfg)); |
479 | |||
480 | cfg.src_addr = dd->phys_base + AES_REG_DATA_N(dd, 0); | ||
481 | cfg.dst_addr = dd->phys_base + AES_REG_DATA_N(dd, 0); | ||
482 | cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
483 | cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
484 | cfg.src_maxburst = DST_MAXBURST; | ||
485 | cfg.dst_maxburst = DST_MAXBURST; | ||
445 | 486 | ||
446 | /* IN */ | 487 | /* IN */ |
447 | omap_set_dma_transfer_params(dd->dma_lch_in, OMAP_DMA_DATA_TYPE_S32, | 488 | ret = dmaengine_slave_config(dd->dma_lch_in, &cfg); |
448 | len32, 1, OMAP_DMA_SYNC_PACKET, dd->dma_in, | 489 | if (ret) { |
449 | OMAP_DMA_DST_SYNC); | 490 | dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n", |
491 | ret); | ||
492 | return ret; | ||
493 | } | ||
494 | |||
495 | tx_in = dmaengine_prep_slave_sg(dd->dma_lch_in, in_sg, 1, | ||
496 | DMA_MEM_TO_DEV, | ||
497 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
498 | if (!tx_in) { | ||
499 | dev_err(dd->dev, "IN prep_slave_sg() failed\n"); | ||
500 | return -EINVAL; | ||
501 | } | ||
450 | 502 | ||
451 | omap_set_dma_src_params(dd->dma_lch_in, 0, OMAP_DMA_AMODE_POST_INC, | 503 | /* No callback necessary */ |
452 | dma_addr_in, 0, 0); | 504 | tx_in->callback_param = dd; |
453 | 505 | ||
454 | /* OUT */ | 506 | /* OUT */ |
455 | omap_set_dma_transfer_params(dd->dma_lch_out, OMAP_DMA_DATA_TYPE_S32, | 507 | ret = dmaengine_slave_config(dd->dma_lch_out, &cfg); |
456 | len32, 1, OMAP_DMA_SYNC_PACKET, | 508 | if (ret) { |
457 | dd->dma_out, OMAP_DMA_SRC_SYNC); | 509 | dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n", |
510 | ret); | ||
511 | return ret; | ||
512 | } | ||
513 | |||
514 | tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg, 1, | ||
515 | DMA_DEV_TO_MEM, | ||
516 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
517 | if (!tx_out) { | ||
518 | dev_err(dd->dev, "OUT prep_slave_sg() failed\n"); | ||
519 | return -EINVAL; | ||
520 | } | ||
458 | 521 | ||
459 | omap_set_dma_dest_params(dd->dma_lch_out, 0, OMAP_DMA_AMODE_POST_INC, | 522 | tx_out->callback = omap_aes_dma_out_callback; |
460 | dma_addr_out, 0, 0); | 523 | tx_out->callback_param = dd; |
461 | 524 | ||
462 | omap_start_dma(dd->dma_lch_in); | 525 | dmaengine_submit(tx_in); |
463 | omap_start_dma(dd->dma_lch_out); | 526 | dmaengine_submit(tx_out); |
464 | 527 | ||
465 | /* start DMA or disable idle mode */ | 528 | dma_async_issue_pending(dd->dma_lch_in); |
466 | omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_START, | 529 | dma_async_issue_pending(dd->dma_lch_out); |
467 | AES_REG_MASK_START); | 530 | |
531 | /* start DMA */ | ||
532 | dd->pdata->trigger(dd, length); | ||
468 | 533 | ||
469 | return 0; | 534 | return 0; |
470 | } | 535 | } |
@@ -476,6 +541,8 @@ static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd) | |||
476 | int err, fast = 0, in, out; | 541 | int err, fast = 0, in, out; |
477 | size_t count; | 542 | size_t count; |
478 | dma_addr_t addr_in, addr_out; | 543 | dma_addr_t addr_in, addr_out; |
544 | struct scatterlist *in_sg, *out_sg; | ||
545 | int len32; | ||
479 | 546 | ||
480 | pr_debug("total: %d\n", dd->total); | 547 | pr_debug("total: %d\n", dd->total); |
481 | 548 | ||
@@ -514,6 +581,9 @@ static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd) | |||
514 | addr_in = sg_dma_address(dd->in_sg); | 581 | addr_in = sg_dma_address(dd->in_sg); |
515 | addr_out = sg_dma_address(dd->out_sg); | 582 | addr_out = sg_dma_address(dd->out_sg); |
516 | 583 | ||
584 | in_sg = dd->in_sg; | ||
585 | out_sg = dd->out_sg; | ||
586 | |||
517 | dd->flags |= FLAGS_FAST; | 587 | dd->flags |= FLAGS_FAST; |
518 | 588 | ||
519 | } else { | 589 | } else { |
@@ -521,6 +591,27 @@ static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd) | |||
521 | count = sg_copy(&dd->in_sg, &dd->in_offset, dd->buf_in, | 591 | count = sg_copy(&dd->in_sg, &dd->in_offset, dd->buf_in, |
522 | dd->buflen, dd->total, 0); | 592 | dd->buflen, dd->total, 0); |
523 | 593 | ||
594 | len32 = DIV_ROUND_UP(count, DMA_MIN) * DMA_MIN; | ||
595 | |||
596 | /* | ||
597 | * The data going into the AES module has been copied | ||
598 | * to a local buffer and the data coming out will go | ||
599 | * into a local buffer so set up local SG entries for | ||
600 | * both. | ||
601 | */ | ||
602 | sg_init_table(&dd->in_sgl, 1); | ||
603 | dd->in_sgl.offset = dd->in_offset; | ||
604 | sg_dma_len(&dd->in_sgl) = len32; | ||
605 | sg_dma_address(&dd->in_sgl) = dd->dma_addr_in; | ||
606 | |||
607 | sg_init_table(&dd->out_sgl, 1); | ||
608 | dd->out_sgl.offset = dd->out_offset; | ||
609 | sg_dma_len(&dd->out_sgl) = len32; | ||
610 | sg_dma_address(&dd->out_sgl) = dd->dma_addr_out; | ||
611 | |||
612 | in_sg = &dd->in_sgl; | ||
613 | out_sg = &dd->out_sgl; | ||
614 | |||
524 | addr_in = dd->dma_addr_in; | 615 | addr_in = dd->dma_addr_in; |
525 | addr_out = dd->dma_addr_out; | 616 | addr_out = dd->dma_addr_out; |
526 | 617 | ||
@@ -530,7 +621,7 @@ static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd) | |||
530 | 621 | ||
531 | dd->total -= count; | 622 | dd->total -= count; |
532 | 623 | ||
533 | err = omap_aes_crypt_dma(tfm, addr_in, addr_out, count); | 624 | err = omap_aes_crypt_dma(tfm, in_sg, out_sg); |
534 | if (err) { | 625 | if (err) { |
535 | dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); | 626 | dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); |
536 | dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE); | 627 | dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE); |
@@ -545,7 +636,7 @@ static void omap_aes_finish_req(struct omap_aes_dev *dd, int err) | |||
545 | 636 | ||
546 | pr_debug("err: %d\n", err); | 637 | pr_debug("err: %d\n", err); |
547 | 638 | ||
548 | clk_disable(dd->iclk); | 639 | pm_runtime_put_sync(dd->dev); |
549 | dd->flags &= ~FLAGS_BUSY; | 640 | dd->flags &= ~FLAGS_BUSY; |
550 | 641 | ||
551 | req->base.complete(&req->base, err); | 642 | req->base.complete(&req->base, err); |
@@ -558,10 +649,10 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) | |||
558 | 649 | ||
559 | pr_debug("total: %d\n", dd->total); | 650 | pr_debug("total: %d\n", dd->total); |
560 | 651 | ||
561 | omap_aes_write_mask(dd, AES_REG_MASK, 0, AES_REG_MASK_START); | 652 | omap_aes_dma_stop(dd); |
562 | 653 | ||
563 | omap_stop_dma(dd->dma_lch_in); | 654 | dmaengine_terminate_all(dd->dma_lch_in); |
564 | omap_stop_dma(dd->dma_lch_out); | 655 | dmaengine_terminate_all(dd->dma_lch_out); |
565 | 656 | ||
566 | if (dd->flags & FLAGS_FAST) { | 657 | if (dd->flags & FLAGS_FAST) { |
567 | dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE); | 658 | dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE); |
@@ -734,6 +825,16 @@ static int omap_aes_cbc_decrypt(struct ablkcipher_request *req) | |||
734 | return omap_aes_crypt(req, FLAGS_CBC); | 825 | return omap_aes_crypt(req, FLAGS_CBC); |
735 | } | 826 | } |
736 | 827 | ||
828 | static int omap_aes_ctr_encrypt(struct ablkcipher_request *req) | ||
829 | { | ||
830 | return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CTR); | ||
831 | } | ||
832 | |||
833 | static int omap_aes_ctr_decrypt(struct ablkcipher_request *req) | ||
834 | { | ||
835 | return omap_aes_crypt(req, FLAGS_CTR); | ||
836 | } | ||
837 | |||
737 | static int omap_aes_cra_init(struct crypto_tfm *tfm) | 838 | static int omap_aes_cra_init(struct crypto_tfm *tfm) |
738 | { | 839 | { |
739 | pr_debug("enter\n"); | 840 | pr_debug("enter\n"); |
@@ -750,7 +851,7 @@ static void omap_aes_cra_exit(struct crypto_tfm *tfm) | |||
750 | 851 | ||
751 | /* ********************** ALGS ************************************ */ | 852 | /* ********************** ALGS ************************************ */ |
752 | 853 | ||
753 | static struct crypto_alg algs[] = { | 854 | static struct crypto_alg algs_ecb_cbc[] = { |
754 | { | 855 | { |
755 | .cra_name = "ecb(aes)", | 856 | .cra_name = "ecb(aes)", |
756 | .cra_driver_name = "ecb-aes-omap", | 857 | .cra_driver_name = "ecb-aes-omap", |
@@ -798,11 +899,213 @@ static struct crypto_alg algs[] = { | |||
798 | } | 899 | } |
799 | }; | 900 | }; |
800 | 901 | ||
902 | static struct crypto_alg algs_ctr[] = { | ||
903 | { | ||
904 | .cra_name = "ctr(aes)", | ||
905 | .cra_driver_name = "ctr-aes-omap", | ||
906 | .cra_priority = 100, | ||
907 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | ||
908 | CRYPTO_ALG_KERN_DRIVER_ONLY | | ||
909 | CRYPTO_ALG_ASYNC, | ||
910 | .cra_blocksize = AES_BLOCK_SIZE, | ||
911 | .cra_ctxsize = sizeof(struct omap_aes_ctx), | ||
912 | .cra_alignmask = 0, | ||
913 | .cra_type = &crypto_ablkcipher_type, | ||
914 | .cra_module = THIS_MODULE, | ||
915 | .cra_init = omap_aes_cra_init, | ||
916 | .cra_exit = omap_aes_cra_exit, | ||
917 | .cra_u.ablkcipher = { | ||
918 | .min_keysize = AES_MIN_KEY_SIZE, | ||
919 | .max_keysize = AES_MAX_KEY_SIZE, | ||
920 | .geniv = "eseqiv", | ||
921 | .ivsize = AES_BLOCK_SIZE, | ||
922 | .setkey = omap_aes_setkey, | ||
923 | .encrypt = omap_aes_ctr_encrypt, | ||
924 | .decrypt = omap_aes_ctr_decrypt, | ||
925 | } | ||
926 | } , | ||
927 | }; | ||
928 | |||
929 | static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc[] = { | ||
930 | { | ||
931 | .algs_list = algs_ecb_cbc, | ||
932 | .size = ARRAY_SIZE(algs_ecb_cbc), | ||
933 | }, | ||
934 | }; | ||
935 | |||
936 | static const struct omap_aes_pdata omap_aes_pdata_omap2 = { | ||
937 | .algs_info = omap_aes_algs_info_ecb_cbc, | ||
938 | .algs_info_size = ARRAY_SIZE(omap_aes_algs_info_ecb_cbc), | ||
939 | .trigger = omap_aes_dma_trigger_omap2, | ||
940 | .key_ofs = 0x1c, | ||
941 | .iv_ofs = 0x20, | ||
942 | .ctrl_ofs = 0x30, | ||
943 | .data_ofs = 0x34, | ||
944 | .rev_ofs = 0x44, | ||
945 | .mask_ofs = 0x48, | ||
946 | .dma_enable_in = BIT(2), | ||
947 | .dma_enable_out = BIT(3), | ||
948 | .dma_start = BIT(5), | ||
949 | .major_mask = 0xf0, | ||
950 | .major_shift = 4, | ||
951 | .minor_mask = 0x0f, | ||
952 | .minor_shift = 0, | ||
953 | }; | ||
954 | |||
955 | #ifdef CONFIG_OF | ||
956 | static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc_ctr[] = { | ||
957 | { | ||
958 | .algs_list = algs_ecb_cbc, | ||
959 | .size = ARRAY_SIZE(algs_ecb_cbc), | ||
960 | }, | ||
961 | { | ||
962 | .algs_list = algs_ctr, | ||
963 | .size = ARRAY_SIZE(algs_ctr), | ||
964 | }, | ||
965 | }; | ||
966 | |||
967 | static const struct omap_aes_pdata omap_aes_pdata_omap3 = { | ||
968 | .algs_info = omap_aes_algs_info_ecb_cbc_ctr, | ||
969 | .algs_info_size = ARRAY_SIZE(omap_aes_algs_info_ecb_cbc_ctr), | ||
970 | .trigger = omap_aes_dma_trigger_omap2, | ||
971 | .key_ofs = 0x1c, | ||
972 | .iv_ofs = 0x20, | ||
973 | .ctrl_ofs = 0x30, | ||
974 | .data_ofs = 0x34, | ||
975 | .rev_ofs = 0x44, | ||
976 | .mask_ofs = 0x48, | ||
977 | .dma_enable_in = BIT(2), | ||
978 | .dma_enable_out = BIT(3), | ||
979 | .dma_start = BIT(5), | ||
980 | .major_mask = 0xf0, | ||
981 | .major_shift = 4, | ||
982 | .minor_mask = 0x0f, | ||
983 | .minor_shift = 0, | ||
984 | }; | ||
985 | |||
986 | static const struct omap_aes_pdata omap_aes_pdata_omap4 = { | ||
987 | .algs_info = omap_aes_algs_info_ecb_cbc_ctr, | ||
988 | .algs_info_size = ARRAY_SIZE(omap_aes_algs_info_ecb_cbc_ctr), | ||
989 | .trigger = omap_aes_dma_trigger_omap4, | ||
990 | .key_ofs = 0x3c, | ||
991 | .iv_ofs = 0x40, | ||
992 | .ctrl_ofs = 0x50, | ||
993 | .data_ofs = 0x60, | ||
994 | .rev_ofs = 0x80, | ||
995 | .mask_ofs = 0x84, | ||
996 | .dma_enable_in = BIT(5), | ||
997 | .dma_enable_out = BIT(6), | ||
998 | .major_mask = 0x0700, | ||
999 | .major_shift = 8, | ||
1000 | .minor_mask = 0x003f, | ||
1001 | .minor_shift = 0, | ||
1002 | }; | ||
1003 | |||
1004 | static const struct of_device_id omap_aes_of_match[] = { | ||
1005 | { | ||
1006 | .compatible = "ti,omap2-aes", | ||
1007 | .data = &omap_aes_pdata_omap2, | ||
1008 | }, | ||
1009 | { | ||
1010 | .compatible = "ti,omap3-aes", | ||
1011 | .data = &omap_aes_pdata_omap3, | ||
1012 | }, | ||
1013 | { | ||
1014 | .compatible = "ti,omap4-aes", | ||
1015 | .data = &omap_aes_pdata_omap4, | ||
1016 | }, | ||
1017 | {}, | ||
1018 | }; | ||
1019 | MODULE_DEVICE_TABLE(of, omap_aes_of_match); | ||
1020 | |||
1021 | static int omap_aes_get_res_of(struct omap_aes_dev *dd, | ||
1022 | struct device *dev, struct resource *res) | ||
1023 | { | ||
1024 | struct device_node *node = dev->of_node; | ||
1025 | const struct of_device_id *match; | ||
1026 | int err = 0; | ||
1027 | |||
1028 | match = of_match_device(of_match_ptr(omap_aes_of_match), dev); | ||
1029 | if (!match) { | ||
1030 | dev_err(dev, "no compatible OF match\n"); | ||
1031 | err = -EINVAL; | ||
1032 | goto err; | ||
1033 | } | ||
1034 | |||
1035 | err = of_address_to_resource(node, 0, res); | ||
1036 | if (err < 0) { | ||
1037 | dev_err(dev, "can't translate OF node address\n"); | ||
1038 | err = -EINVAL; | ||
1039 | goto err; | ||
1040 | } | ||
1041 | |||
1042 | dd->dma_out = -1; /* Dummy value that's unused */ | ||
1043 | dd->dma_in = -1; /* Dummy value that's unused */ | ||
1044 | |||
1045 | dd->pdata = match->data; | ||
1046 | |||
1047 | err: | ||
1048 | return err; | ||
1049 | } | ||
1050 | #else | ||
1051 | static const struct of_device_id omap_aes_of_match[] = { | ||
1052 | {}, | ||
1053 | }; | ||
1054 | |||
1055 | static int omap_aes_get_res_of(struct omap_aes_dev *dd, | ||
1056 | struct device *dev, struct resource *res) | ||
1057 | { | ||
1058 | return -EINVAL; | ||
1059 | } | ||
1060 | #endif | ||
1061 | |||
1062 | static int omap_aes_get_res_pdev(struct omap_aes_dev *dd, | ||
1063 | struct platform_device *pdev, struct resource *res) | ||
1064 | { | ||
1065 | struct device *dev = &pdev->dev; | ||
1066 | struct resource *r; | ||
1067 | int err = 0; | ||
1068 | |||
1069 | /* Get the base address */ | ||
1070 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1071 | if (!r) { | ||
1072 | dev_err(dev, "no MEM resource info\n"); | ||
1073 | err = -ENODEV; | ||
1074 | goto err; | ||
1075 | } | ||
1076 | memcpy(res, r, sizeof(*res)); | ||
1077 | |||
1078 | /* Get the DMA out channel */ | ||
1079 | r = platform_get_resource(pdev, IORESOURCE_DMA, 0); | ||
1080 | if (!r) { | ||
1081 | dev_err(dev, "no DMA out resource info\n"); | ||
1082 | err = -ENODEV; | ||
1083 | goto err; | ||
1084 | } | ||
1085 | dd->dma_out = r->start; | ||
1086 | |||
1087 | /* Get the DMA in channel */ | ||
1088 | r = platform_get_resource(pdev, IORESOURCE_DMA, 1); | ||
1089 | if (!r) { | ||
1090 | dev_err(dev, "no DMA in resource info\n"); | ||
1091 | err = -ENODEV; | ||
1092 | goto err; | ||
1093 | } | ||
1094 | dd->dma_in = r->start; | ||
1095 | |||
1096 | /* Only OMAP2/3 can be non-DT */ | ||
1097 | dd->pdata = &omap_aes_pdata_omap2; | ||
1098 | |||
1099 | err: | ||
1100 | return err; | ||
1101 | } | ||
1102 | |||
801 | static int omap_aes_probe(struct platform_device *pdev) | 1103 | static int omap_aes_probe(struct platform_device *pdev) |
802 | { | 1104 | { |
803 | struct device *dev = &pdev->dev; | 1105 | struct device *dev = &pdev->dev; |
804 | struct omap_aes_dev *dd; | 1106 | struct omap_aes_dev *dd; |
805 | struct resource *res; | 1107 | struct crypto_alg *algp; |
1108 | struct resource res; | ||
806 | int err = -ENOMEM, i, j; | 1109 | int err = -ENOMEM, i, j; |
807 | u32 reg; | 1110 | u32 reg; |
808 | 1111 | ||
@@ -817,49 +1120,31 @@ static int omap_aes_probe(struct platform_device *pdev) | |||
817 | spin_lock_init(&dd->lock); | 1120 | spin_lock_init(&dd->lock); |
818 | crypto_init_queue(&dd->queue, OMAP_AES_QUEUE_LENGTH); | 1121 | crypto_init_queue(&dd->queue, OMAP_AES_QUEUE_LENGTH); |
819 | 1122 | ||
820 | /* Get the base address */ | 1123 | err = (dev->of_node) ? omap_aes_get_res_of(dd, dev, &res) : |
821 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1124 | omap_aes_get_res_pdev(dd, pdev, &res); |
822 | if (!res) { | 1125 | if (err) |
823 | dev_err(dev, "invalid resource type\n"); | ||
824 | err = -ENODEV; | ||
825 | goto err_res; | ||
826 | } | ||
827 | dd->phys_base = res->start; | ||
828 | |||
829 | /* Get the DMA */ | ||
830 | res = platform_get_resource(pdev, IORESOURCE_DMA, 0); | ||
831 | if (!res) | ||
832 | dev_info(dev, "no DMA info\n"); | ||
833 | else | ||
834 | dd->dma_out = res->start; | ||
835 | |||
836 | /* Get the DMA */ | ||
837 | res = platform_get_resource(pdev, IORESOURCE_DMA, 1); | ||
838 | if (!res) | ||
839 | dev_info(dev, "no DMA info\n"); | ||
840 | else | ||
841 | dd->dma_in = res->start; | ||
842 | |||
843 | /* Initializing the clock */ | ||
844 | dd->iclk = clk_get(dev, "ick"); | ||
845 | if (IS_ERR(dd->iclk)) { | ||
846 | dev_err(dev, "clock intialization failed.\n"); | ||
847 | err = PTR_ERR(dd->iclk); | ||
848 | goto err_res; | 1126 | goto err_res; |
849 | } | ||
850 | 1127 | ||
851 | dd->io_base = ioremap(dd->phys_base, SZ_4K); | 1128 | dd->io_base = devm_request_and_ioremap(dev, &res); |
852 | if (!dd->io_base) { | 1129 | if (!dd->io_base) { |
853 | dev_err(dev, "can't ioremap\n"); | 1130 | dev_err(dev, "can't ioremap\n"); |
854 | err = -ENOMEM; | 1131 | err = -ENOMEM; |
855 | goto err_io; | 1132 | goto err_res; |
856 | } | 1133 | } |
1134 | dd->phys_base = res.start; | ||
1135 | |||
1136 | pm_runtime_enable(dev); | ||
1137 | pm_runtime_get_sync(dev); | ||
1138 | |||
1139 | omap_aes_dma_stop(dd); | ||
1140 | |||
1141 | reg = omap_aes_read(dd, AES_REG_REV(dd)); | ||
1142 | |||
1143 | pm_runtime_put_sync(dev); | ||
857 | 1144 | ||
858 | clk_enable(dd->iclk); | ||
859 | reg = omap_aes_read(dd, AES_REG_REV); | ||
860 | dev_info(dev, "OMAP AES hw accel rev: %u.%u\n", | 1145 | dev_info(dev, "OMAP AES hw accel rev: %u.%u\n", |
861 | (reg & AES_REG_REV_MAJOR) >> 4, reg & AES_REG_REV_MINOR); | 1146 | (reg & dd->pdata->major_mask) >> dd->pdata->major_shift, |
862 | clk_disable(dd->iclk); | 1147 | (reg & dd->pdata->minor_mask) >> dd->pdata->minor_shift); |
863 | 1148 | ||
864 | tasklet_init(&dd->done_task, omap_aes_done_task, (unsigned long)dd); | 1149 | tasklet_init(&dd->done_task, omap_aes_done_task, (unsigned long)dd); |
865 | tasklet_init(&dd->queue_task, omap_aes_queue_task, (unsigned long)dd); | 1150 | tasklet_init(&dd->queue_task, omap_aes_queue_task, (unsigned long)dd); |
@@ -873,26 +1158,32 @@ static int omap_aes_probe(struct platform_device *pdev) | |||
873 | list_add_tail(&dd->list, &dev_list); | 1158 | list_add_tail(&dd->list, &dev_list); |
874 | spin_unlock(&list_lock); | 1159 | spin_unlock(&list_lock); |
875 | 1160 | ||
876 | for (i = 0; i < ARRAY_SIZE(algs); i++) { | 1161 | for (i = 0; i < dd->pdata->algs_info_size; i++) { |
877 | pr_debug("i: %d\n", i); | 1162 | for (j = 0; j < dd->pdata->algs_info[i].size; j++) { |
878 | err = crypto_register_alg(&algs[i]); | 1163 | algp = &dd->pdata->algs_info[i].algs_list[j]; |
879 | if (err) | 1164 | |
880 | goto err_algs; | 1165 | pr_debug("reg alg: %s\n", algp->cra_name); |
881 | } | 1166 | INIT_LIST_HEAD(&algp->cra_list); |
1167 | |||
1168 | err = crypto_register_alg(algp); | ||
1169 | if (err) | ||
1170 | goto err_algs; | ||
882 | 1171 | ||
883 | pr_info("probe() done\n"); | 1172 | dd->pdata->algs_info[i].registered++; |
1173 | } | ||
1174 | } | ||
884 | 1175 | ||
885 | return 0; | 1176 | return 0; |
886 | err_algs: | 1177 | err_algs: |
887 | for (j = 0; j < i; j++) | 1178 | for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) |
888 | crypto_unregister_alg(&algs[j]); | 1179 | for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) |
1180 | crypto_unregister_alg( | ||
1181 | &dd->pdata->algs_info[i].algs_list[j]); | ||
889 | omap_aes_dma_cleanup(dd); | 1182 | omap_aes_dma_cleanup(dd); |
890 | err_dma: | 1183 | err_dma: |
891 | tasklet_kill(&dd->done_task); | 1184 | tasklet_kill(&dd->done_task); |
892 | tasklet_kill(&dd->queue_task); | 1185 | tasklet_kill(&dd->queue_task); |
893 | iounmap(dd->io_base); | 1186 | pm_runtime_disable(dev); |
894 | err_io: | ||
895 | clk_put(dd->iclk); | ||
896 | err_res: | 1187 | err_res: |
897 | kfree(dd); | 1188 | kfree(dd); |
898 | dd = NULL; | 1189 | dd = NULL; |
@@ -904,7 +1195,7 @@ err_data: | |||
904 | static int omap_aes_remove(struct platform_device *pdev) | 1195 | static int omap_aes_remove(struct platform_device *pdev) |
905 | { | 1196 | { |
906 | struct omap_aes_dev *dd = platform_get_drvdata(pdev); | 1197 | struct omap_aes_dev *dd = platform_get_drvdata(pdev); |
907 | int i; | 1198 | int i, j; |
908 | 1199 | ||
909 | if (!dd) | 1200 | if (!dd) |
910 | return -ENODEV; | 1201 | return -ENODEV; |
@@ -913,33 +1204,52 @@ static int omap_aes_remove(struct platform_device *pdev) | |||
913 | list_del(&dd->list); | 1204 | list_del(&dd->list); |
914 | spin_unlock(&list_lock); | 1205 | spin_unlock(&list_lock); |
915 | 1206 | ||
916 | for (i = 0; i < ARRAY_SIZE(algs); i++) | 1207 | for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) |
917 | crypto_unregister_alg(&algs[i]); | 1208 | for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) |
1209 | crypto_unregister_alg( | ||
1210 | &dd->pdata->algs_info[i].algs_list[j]); | ||
918 | 1211 | ||
919 | tasklet_kill(&dd->done_task); | 1212 | tasklet_kill(&dd->done_task); |
920 | tasklet_kill(&dd->queue_task); | 1213 | tasklet_kill(&dd->queue_task); |
921 | omap_aes_dma_cleanup(dd); | 1214 | omap_aes_dma_cleanup(dd); |
922 | iounmap(dd->io_base); | 1215 | pm_runtime_disable(dd->dev); |
923 | clk_put(dd->iclk); | ||
924 | kfree(dd); | 1216 | kfree(dd); |
925 | dd = NULL; | 1217 | dd = NULL; |
926 | 1218 | ||
927 | return 0; | 1219 | return 0; |
928 | } | 1220 | } |
929 | 1221 | ||
1222 | #ifdef CONFIG_PM_SLEEP | ||
1223 | static int omap_aes_suspend(struct device *dev) | ||
1224 | { | ||
1225 | pm_runtime_put_sync(dev); | ||
1226 | return 0; | ||
1227 | } | ||
1228 | |||
1229 | static int omap_aes_resume(struct device *dev) | ||
1230 | { | ||
1231 | pm_runtime_get_sync(dev); | ||
1232 | return 0; | ||
1233 | } | ||
1234 | #endif | ||
1235 | |||
1236 | static const struct dev_pm_ops omap_aes_pm_ops = { | ||
1237 | SET_SYSTEM_SLEEP_PM_OPS(omap_aes_suspend, omap_aes_resume) | ||
1238 | }; | ||
1239 | |||
930 | static struct platform_driver omap_aes_driver = { | 1240 | static struct platform_driver omap_aes_driver = { |
931 | .probe = omap_aes_probe, | 1241 | .probe = omap_aes_probe, |
932 | .remove = omap_aes_remove, | 1242 | .remove = omap_aes_remove, |
933 | .driver = { | 1243 | .driver = { |
934 | .name = "omap-aes", | 1244 | .name = "omap-aes", |
935 | .owner = THIS_MODULE, | 1245 | .owner = THIS_MODULE, |
1246 | .pm = &omap_aes_pm_ops, | ||
1247 | .of_match_table = omap_aes_of_match, | ||
936 | }, | 1248 | }, |
937 | }; | 1249 | }; |
938 | 1250 | ||
939 | static int __init omap_aes_mod_init(void) | 1251 | static int __init omap_aes_mod_init(void) |
940 | { | 1252 | { |
941 | pr_info("loading %s driver\n", "omap-aes"); | ||
942 | |||
943 | return platform_driver_register(&omap_aes_driver); | 1253 | return platform_driver_register(&omap_aes_driver); |
944 | } | 1254 | } |
945 | 1255 | ||
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index 9e6947bc296f..3d1611f5aecf 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c | |||
@@ -5,6 +5,7 @@ | |||
5 | * | 5 | * |
6 | * Copyright (c) 2010 Nokia Corporation | 6 | * Copyright (c) 2010 Nokia Corporation |
7 | * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com> | 7 | * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com> |
8 | * Copyright (c) 2011 Texas Instruments Incorporated | ||
8 | * | 9 | * |
9 | * This program is free software; you can redistribute it and/or modify | 10 | * This program is free software; you can redistribute it and/or modify |
10 | * it under the terms of the GNU General Public License version 2 as published | 11 | * it under the terms of the GNU General Public License version 2 as published |
@@ -22,12 +23,18 @@ | |||
22 | #include <linux/errno.h> | 23 | #include <linux/errno.h> |
23 | #include <linux/interrupt.h> | 24 | #include <linux/interrupt.h> |
24 | #include <linux/kernel.h> | 25 | #include <linux/kernel.h> |
25 | #include <linux/clk.h> | ||
26 | #include <linux/irq.h> | 26 | #include <linux/irq.h> |
27 | #include <linux/io.h> | 27 | #include <linux/io.h> |
28 | #include <linux/platform_device.h> | 28 | #include <linux/platform_device.h> |
29 | #include <linux/scatterlist.h> | 29 | #include <linux/scatterlist.h> |
30 | #include <linux/dma-mapping.h> | 30 | #include <linux/dma-mapping.h> |
31 | #include <linux/dmaengine.h> | ||
32 | #include <linux/omap-dma.h> | ||
33 | #include <linux/pm_runtime.h> | ||
34 | #include <linux/of.h> | ||
35 | #include <linux/of_device.h> | ||
36 | #include <linux/of_address.h> | ||
37 | #include <linux/of_irq.h> | ||
31 | #include <linux/delay.h> | 38 | #include <linux/delay.h> |
32 | #include <linux/crypto.h> | 39 | #include <linux/crypto.h> |
33 | #include <linux/cryptohash.h> | 40 | #include <linux/cryptohash.h> |
@@ -37,19 +44,17 @@ | |||
37 | #include <crypto/hash.h> | 44 | #include <crypto/hash.h> |
38 | #include <crypto/internal/hash.h> | 45 | #include <crypto/internal/hash.h> |
39 | 46 | ||
40 | #include <linux/omap-dma.h> | ||
41 | |||
42 | #ifdef CONFIG_ARCH_OMAP1 | ||
43 | #include <mach/irqs.h> | ||
44 | #endif | ||
45 | |||
46 | #define SHA_REG_DIGEST(x) (0x00 + ((x) * 0x04)) | ||
47 | #define SHA_REG_DIN(x) (0x1C + ((x) * 0x04)) | ||
48 | |||
49 | #define SHA1_MD5_BLOCK_SIZE SHA1_BLOCK_SIZE | 47 | #define SHA1_MD5_BLOCK_SIZE SHA1_BLOCK_SIZE |
50 | #define MD5_DIGEST_SIZE 16 | 48 | #define MD5_DIGEST_SIZE 16 |
51 | 49 | ||
52 | #define SHA_REG_DIGCNT 0x14 | 50 | #define DST_MAXBURST 16 |
51 | #define DMA_MIN (DST_MAXBURST * sizeof(u32)) | ||
52 | |||
53 | #define SHA_REG_IDIGEST(dd, x) ((dd)->pdata->idigest_ofs + ((x)*0x04)) | ||
54 | #define SHA_REG_DIN(dd, x) ((dd)->pdata->din_ofs + ((x) * 0x04)) | ||
55 | #define SHA_REG_DIGCNT(dd) ((dd)->pdata->digcnt_ofs) | ||
56 | |||
57 | #define SHA_REG_ODIGEST(x) (0x00 + ((x) * 0x04)) | ||
53 | 58 | ||
54 | #define SHA_REG_CTRL 0x18 | 59 | #define SHA_REG_CTRL 0x18 |
55 | #define SHA_REG_CTRL_LENGTH (0xFFFFFFFF << 5) | 60 | #define SHA_REG_CTRL_LENGTH (0xFFFFFFFF << 5) |
@@ -59,19 +64,42 @@ | |||
59 | #define SHA_REG_CTRL_INPUT_READY (1 << 1) | 64 | #define SHA_REG_CTRL_INPUT_READY (1 << 1) |
60 | #define SHA_REG_CTRL_OUTPUT_READY (1 << 0) | 65 | #define SHA_REG_CTRL_OUTPUT_READY (1 << 0) |
61 | 66 | ||
62 | #define SHA_REG_REV 0x5C | 67 | #define SHA_REG_REV(dd) ((dd)->pdata->rev_ofs) |
63 | #define SHA_REG_REV_MAJOR 0xF0 | ||
64 | #define SHA_REG_REV_MINOR 0x0F | ||
65 | 68 | ||
66 | #define SHA_REG_MASK 0x60 | 69 | #define SHA_REG_MASK(dd) ((dd)->pdata->mask_ofs) |
67 | #define SHA_REG_MASK_DMA_EN (1 << 3) | 70 | #define SHA_REG_MASK_DMA_EN (1 << 3) |
68 | #define SHA_REG_MASK_IT_EN (1 << 2) | 71 | #define SHA_REG_MASK_IT_EN (1 << 2) |
69 | #define SHA_REG_MASK_SOFTRESET (1 << 1) | 72 | #define SHA_REG_MASK_SOFTRESET (1 << 1) |
70 | #define SHA_REG_AUTOIDLE (1 << 0) | 73 | #define SHA_REG_AUTOIDLE (1 << 0) |
71 | 74 | ||
72 | #define SHA_REG_SYSSTATUS 0x64 | 75 | #define SHA_REG_SYSSTATUS(dd) ((dd)->pdata->sysstatus_ofs) |
73 | #define SHA_REG_SYSSTATUS_RESETDONE (1 << 0) | 76 | #define SHA_REG_SYSSTATUS_RESETDONE (1 << 0) |
74 | 77 | ||
78 | #define SHA_REG_MODE 0x44 | ||
79 | #define SHA_REG_MODE_HMAC_OUTER_HASH (1 << 7) | ||
80 | #define SHA_REG_MODE_HMAC_KEY_PROC (1 << 5) | ||
81 | #define SHA_REG_MODE_CLOSE_HASH (1 << 4) | ||
82 | #define SHA_REG_MODE_ALGO_CONSTANT (1 << 3) | ||
83 | #define SHA_REG_MODE_ALGO_MASK (3 << 1) | ||
84 | #define SHA_REG_MODE_ALGO_MD5_128 (0 << 1) | ||
85 | #define SHA_REG_MODE_ALGO_SHA1_160 (1 << 1) | ||
86 | #define SHA_REG_MODE_ALGO_SHA2_224 (2 << 1) | ||
87 | #define SHA_REG_MODE_ALGO_SHA2_256 (3 << 1) | ||
88 | |||
89 | #define SHA_REG_LENGTH 0x48 | ||
90 | |||
91 | #define SHA_REG_IRQSTATUS 0x118 | ||
92 | #define SHA_REG_IRQSTATUS_CTX_RDY (1 << 3) | ||
93 | #define SHA_REG_IRQSTATUS_PARTHASH_RDY (1 << 2) | ||
94 | #define SHA_REG_IRQSTATUS_INPUT_RDY (1 << 1) | ||
95 | #define SHA_REG_IRQSTATUS_OUTPUT_RDY (1 << 0) | ||
96 | |||
97 | #define SHA_REG_IRQENA 0x11C | ||
98 | #define SHA_REG_IRQENA_CTX_RDY (1 << 3) | ||
99 | #define SHA_REG_IRQENA_PARTHASH_RDY (1 << 2) | ||
100 | #define SHA_REG_IRQENA_INPUT_RDY (1 << 1) | ||
101 | #define SHA_REG_IRQENA_OUTPUT_RDY (1 << 0) | ||
102 | |||
75 | #define DEFAULT_TIMEOUT_INTERVAL HZ | 103 | #define DEFAULT_TIMEOUT_INTERVAL HZ |
76 | 104 | ||
77 | /* mostly device flags */ | 105 | /* mostly device flags */ |
@@ -82,20 +110,33 @@ | |||
82 | #define FLAGS_INIT 4 | 110 | #define FLAGS_INIT 4 |
83 | #define FLAGS_CPU 5 | 111 | #define FLAGS_CPU 5 |
84 | #define FLAGS_DMA_READY 6 | 112 | #define FLAGS_DMA_READY 6 |
113 | #define FLAGS_AUTO_XOR 7 | ||
114 | #define FLAGS_BE32_SHA1 8 | ||
85 | /* context flags */ | 115 | /* context flags */ |
86 | #define FLAGS_FINUP 16 | 116 | #define FLAGS_FINUP 16 |
87 | #define FLAGS_SG 17 | 117 | #define FLAGS_SG 17 |
88 | #define FLAGS_SHA1 18 | ||
89 | #define FLAGS_HMAC 19 | ||
90 | #define FLAGS_ERROR 20 | ||
91 | 118 | ||
92 | #define OP_UPDATE 1 | 119 | #define FLAGS_MODE_SHIFT 18 |
93 | #define OP_FINAL 2 | 120 | #define FLAGS_MODE_MASK (SHA_REG_MODE_ALGO_MASK \ |
121 | << (FLAGS_MODE_SHIFT - 1)) | ||
122 | #define FLAGS_MODE_MD5 (SHA_REG_MODE_ALGO_MD5_128 \ | ||
123 | << (FLAGS_MODE_SHIFT - 1)) | ||
124 | #define FLAGS_MODE_SHA1 (SHA_REG_MODE_ALGO_SHA1_160 \ | ||
125 | << (FLAGS_MODE_SHIFT - 1)) | ||
126 | #define FLAGS_MODE_SHA224 (SHA_REG_MODE_ALGO_SHA2_224 \ | ||
127 | << (FLAGS_MODE_SHIFT - 1)) | ||
128 | #define FLAGS_MODE_SHA256 (SHA_REG_MODE_ALGO_SHA2_256 \ | ||
129 | << (FLAGS_MODE_SHIFT - 1)) | ||
130 | #define FLAGS_HMAC 20 | ||
131 | #define FLAGS_ERROR 21 | ||
132 | |||
133 | #define OP_UPDATE 1 | ||
134 | #define OP_FINAL 2 | ||
94 | 135 | ||
95 | #define OMAP_ALIGN_MASK (sizeof(u32)-1) | 136 | #define OMAP_ALIGN_MASK (sizeof(u32)-1) |
96 | #define OMAP_ALIGNED __attribute__((aligned(sizeof(u32)))) | 137 | #define OMAP_ALIGNED __attribute__((aligned(sizeof(u32)))) |
97 | 138 | ||
98 | #define BUFLEN PAGE_SIZE | 139 | #define BUFLEN PAGE_SIZE |
99 | 140 | ||
100 | struct omap_sham_dev; | 141 | struct omap_sham_dev; |
101 | 142 | ||
@@ -104,7 +145,7 @@ struct omap_sham_reqctx { | |||
104 | unsigned long flags; | 145 | unsigned long flags; |
105 | unsigned long op; | 146 | unsigned long op; |
106 | 147 | ||
107 | u8 digest[SHA1_DIGEST_SIZE] OMAP_ALIGNED; | 148 | u8 digest[SHA256_DIGEST_SIZE] OMAP_ALIGNED; |
108 | size_t digcnt; | 149 | size_t digcnt; |
109 | size_t bufcnt; | 150 | size_t bufcnt; |
110 | size_t buflen; | 151 | size_t buflen; |
@@ -112,6 +153,7 @@ struct omap_sham_reqctx { | |||
112 | 153 | ||
113 | /* walk state */ | 154 | /* walk state */ |
114 | struct scatterlist *sg; | 155 | struct scatterlist *sg; |
156 | struct scatterlist sgl; | ||
115 | unsigned int offset; /* offset in current sg */ | 157 | unsigned int offset; /* offset in current sg */ |
116 | unsigned int total; /* total request */ | 158 | unsigned int total; /* total request */ |
117 | 159 | ||
@@ -120,8 +162,8 @@ struct omap_sham_reqctx { | |||
120 | 162 | ||
121 | struct omap_sham_hmac_ctx { | 163 | struct omap_sham_hmac_ctx { |
122 | struct crypto_shash *shash; | 164 | struct crypto_shash *shash; |
123 | u8 ipad[SHA1_MD5_BLOCK_SIZE]; | 165 | u8 ipad[SHA1_MD5_BLOCK_SIZE] OMAP_ALIGNED; |
124 | u8 opad[SHA1_MD5_BLOCK_SIZE]; | 166 | u8 opad[SHA1_MD5_BLOCK_SIZE] OMAP_ALIGNED; |
125 | }; | 167 | }; |
126 | 168 | ||
127 | struct omap_sham_ctx { | 169 | struct omap_sham_ctx { |
@@ -137,22 +179,56 @@ struct omap_sham_ctx { | |||
137 | 179 | ||
138 | #define OMAP_SHAM_QUEUE_LENGTH 1 | 180 | #define OMAP_SHAM_QUEUE_LENGTH 1 |
139 | 181 | ||
182 | struct omap_sham_algs_info { | ||
183 | struct ahash_alg *algs_list; | ||
184 | unsigned int size; | ||
185 | unsigned int registered; | ||
186 | }; | ||
187 | |||
188 | struct omap_sham_pdata { | ||
189 | struct omap_sham_algs_info *algs_info; | ||
190 | unsigned int algs_info_size; | ||
191 | unsigned long flags; | ||
192 | int digest_size; | ||
193 | |||
194 | void (*copy_hash)(struct ahash_request *req, int out); | ||
195 | void (*write_ctrl)(struct omap_sham_dev *dd, size_t length, | ||
196 | int final, int dma); | ||
197 | void (*trigger)(struct omap_sham_dev *dd, size_t length); | ||
198 | int (*poll_irq)(struct omap_sham_dev *dd); | ||
199 | irqreturn_t (*intr_hdlr)(int irq, void *dev_id); | ||
200 | |||
201 | u32 odigest_ofs; | ||
202 | u32 idigest_ofs; | ||
203 | u32 din_ofs; | ||
204 | u32 digcnt_ofs; | ||
205 | u32 rev_ofs; | ||
206 | u32 mask_ofs; | ||
207 | u32 sysstatus_ofs; | ||
208 | |||
209 | u32 major_mask; | ||
210 | u32 major_shift; | ||
211 | u32 minor_mask; | ||
212 | u32 minor_shift; | ||
213 | }; | ||
214 | |||
140 | struct omap_sham_dev { | 215 | struct omap_sham_dev { |
141 | struct list_head list; | 216 | struct list_head list; |
142 | unsigned long phys_base; | 217 | unsigned long phys_base; |
143 | struct device *dev; | 218 | struct device *dev; |
144 | void __iomem *io_base; | 219 | void __iomem *io_base; |
145 | int irq; | 220 | int irq; |
146 | struct clk *iclk; | ||
147 | spinlock_t lock; | 221 | spinlock_t lock; |
148 | int err; | 222 | int err; |
149 | int dma; | 223 | unsigned int dma; |
150 | int dma_lch; | 224 | struct dma_chan *dma_lch; |
151 | struct tasklet_struct done_task; | 225 | struct tasklet_struct done_task; |
152 | 226 | ||
153 | unsigned long flags; | 227 | unsigned long flags; |
154 | struct crypto_queue queue; | 228 | struct crypto_queue queue; |
155 | struct ahash_request *req; | 229 | struct ahash_request *req; |
230 | |||
231 | const struct omap_sham_pdata *pdata; | ||
156 | }; | 232 | }; |
157 | 233 | ||
158 | struct omap_sham_drv { | 234 | struct omap_sham_drv { |
@@ -200,21 +276,44 @@ static inline int omap_sham_wait(struct omap_sham_dev *dd, u32 offset, u32 bit) | |||
200 | return 0; | 276 | return 0; |
201 | } | 277 | } |
202 | 278 | ||
203 | static void omap_sham_copy_hash(struct ahash_request *req, int out) | 279 | static void omap_sham_copy_hash_omap2(struct ahash_request *req, int out) |
204 | { | 280 | { |
205 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | 281 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); |
282 | struct omap_sham_dev *dd = ctx->dd; | ||
206 | u32 *hash = (u32 *)ctx->digest; | 283 | u32 *hash = (u32 *)ctx->digest; |
207 | int i; | 284 | int i; |
208 | 285 | ||
209 | /* MD5 is almost unused. So copy sha1 size to reduce code */ | 286 | for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) { |
210 | for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) { | ||
211 | if (out) | 287 | if (out) |
212 | hash[i] = omap_sham_read(ctx->dd, | 288 | hash[i] = omap_sham_read(dd, SHA_REG_IDIGEST(dd, i)); |
213 | SHA_REG_DIGEST(i)); | ||
214 | else | 289 | else |
215 | omap_sham_write(ctx->dd, | 290 | omap_sham_write(dd, SHA_REG_IDIGEST(dd, i), hash[i]); |
216 | SHA_REG_DIGEST(i), hash[i]); | 291 | } |
292 | } | ||
293 | |||
294 | static void omap_sham_copy_hash_omap4(struct ahash_request *req, int out) | ||
295 | { | ||
296 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | ||
297 | struct omap_sham_dev *dd = ctx->dd; | ||
298 | int i; | ||
299 | |||
300 | if (ctx->flags & BIT(FLAGS_HMAC)) { | ||
301 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req); | ||
302 | struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm); | ||
303 | struct omap_sham_hmac_ctx *bctx = tctx->base; | ||
304 | u32 *opad = (u32 *)bctx->opad; | ||
305 | |||
306 | for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) { | ||
307 | if (out) | ||
308 | opad[i] = omap_sham_read(dd, | ||
309 | SHA_REG_ODIGEST(i)); | ||
310 | else | ||
311 | omap_sham_write(dd, SHA_REG_ODIGEST(i), | ||
312 | opad[i]); | ||
313 | } | ||
217 | } | 314 | } |
315 | |||
316 | omap_sham_copy_hash_omap2(req, out); | ||
218 | } | 317 | } |
219 | 318 | ||
220 | static void omap_sham_copy_ready_hash(struct ahash_request *req) | 319 | static void omap_sham_copy_ready_hash(struct ahash_request *req) |
@@ -222,34 +321,44 @@ static void omap_sham_copy_ready_hash(struct ahash_request *req) | |||
222 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | 321 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); |
223 | u32 *in = (u32 *)ctx->digest; | 322 | u32 *in = (u32 *)ctx->digest; |
224 | u32 *hash = (u32 *)req->result; | 323 | u32 *hash = (u32 *)req->result; |
225 | int i; | 324 | int i, d, big_endian = 0; |
226 | 325 | ||
227 | if (!hash) | 326 | if (!hash) |
228 | return; | 327 | return; |
229 | 328 | ||
230 | if (likely(ctx->flags & BIT(FLAGS_SHA1))) { | 329 | switch (ctx->flags & FLAGS_MODE_MASK) { |
231 | /* SHA1 results are in big endian */ | 330 | case FLAGS_MODE_MD5: |
232 | for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) | 331 | d = MD5_DIGEST_SIZE / sizeof(u32); |
332 | break; | ||
333 | case FLAGS_MODE_SHA1: | ||
334 | /* OMAP2 SHA1 is big endian */ | ||
335 | if (test_bit(FLAGS_BE32_SHA1, &ctx->dd->flags)) | ||
336 | big_endian = 1; | ||
337 | d = SHA1_DIGEST_SIZE / sizeof(u32); | ||
338 | break; | ||
339 | case FLAGS_MODE_SHA224: | ||
340 | d = SHA224_DIGEST_SIZE / sizeof(u32); | ||
341 | break; | ||
342 | case FLAGS_MODE_SHA256: | ||
343 | d = SHA256_DIGEST_SIZE / sizeof(u32); | ||
344 | break; | ||
345 | default: | ||
346 | d = 0; | ||
347 | } | ||
348 | |||
349 | if (big_endian) | ||
350 | for (i = 0; i < d; i++) | ||
233 | hash[i] = be32_to_cpu(in[i]); | 351 | hash[i] = be32_to_cpu(in[i]); |
234 | } else { | 352 | else |
235 | /* MD5 results are in little endian */ | 353 | for (i = 0; i < d; i++) |
236 | for (i = 0; i < MD5_DIGEST_SIZE / sizeof(u32); i++) | ||
237 | hash[i] = le32_to_cpu(in[i]); | 354 | hash[i] = le32_to_cpu(in[i]); |
238 | } | ||
239 | } | 355 | } |
240 | 356 | ||
241 | static int omap_sham_hw_init(struct omap_sham_dev *dd) | 357 | static int omap_sham_hw_init(struct omap_sham_dev *dd) |
242 | { | 358 | { |
243 | clk_enable(dd->iclk); | 359 | pm_runtime_get_sync(dd->dev); |
244 | 360 | ||
245 | if (!test_bit(FLAGS_INIT, &dd->flags)) { | 361 | if (!test_bit(FLAGS_INIT, &dd->flags)) { |
246 | omap_sham_write_mask(dd, SHA_REG_MASK, | ||
247 | SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET); | ||
248 | |||
249 | if (omap_sham_wait(dd, SHA_REG_SYSSTATUS, | ||
250 | SHA_REG_SYSSTATUS_RESETDONE)) | ||
251 | return -ETIMEDOUT; | ||
252 | |||
253 | set_bit(FLAGS_INIT, &dd->flags); | 362 | set_bit(FLAGS_INIT, &dd->flags); |
254 | dd->err = 0; | 363 | dd->err = 0; |
255 | } | 364 | } |
@@ -257,23 +366,23 @@ static int omap_sham_hw_init(struct omap_sham_dev *dd) | |||
257 | return 0; | 366 | return 0; |
258 | } | 367 | } |
259 | 368 | ||
260 | static void omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length, | 369 | static void omap_sham_write_ctrl_omap2(struct omap_sham_dev *dd, size_t length, |
261 | int final, int dma) | 370 | int final, int dma) |
262 | { | 371 | { |
263 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); | 372 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); |
264 | u32 val = length << 5, mask; | 373 | u32 val = length << 5, mask; |
265 | 374 | ||
266 | if (likely(ctx->digcnt)) | 375 | if (likely(ctx->digcnt)) |
267 | omap_sham_write(dd, SHA_REG_DIGCNT, ctx->digcnt); | 376 | omap_sham_write(dd, SHA_REG_DIGCNT(dd), ctx->digcnt); |
268 | 377 | ||
269 | omap_sham_write_mask(dd, SHA_REG_MASK, | 378 | omap_sham_write_mask(dd, SHA_REG_MASK(dd), |
270 | SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0), | 379 | SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0), |
271 | SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN); | 380 | SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN); |
272 | /* | 381 | /* |
273 | * Setting ALGO_CONST only for the first iteration | 382 | * Setting ALGO_CONST only for the first iteration |
274 | * and CLOSE_HASH only for the last one. | 383 | * and CLOSE_HASH only for the last one. |
275 | */ | 384 | */ |
276 | if (ctx->flags & BIT(FLAGS_SHA1)) | 385 | if ((ctx->flags & FLAGS_MODE_MASK) == FLAGS_MODE_SHA1) |
277 | val |= SHA_REG_CTRL_ALGO; | 386 | val |= SHA_REG_CTRL_ALGO; |
278 | if (!ctx->digcnt) | 387 | if (!ctx->digcnt) |
279 | val |= SHA_REG_CTRL_ALGO_CONST; | 388 | val |= SHA_REG_CTRL_ALGO_CONST; |
@@ -286,6 +395,81 @@ static void omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length, | |||
286 | omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask); | 395 | omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask); |
287 | } | 396 | } |
288 | 397 | ||
398 | static void omap_sham_trigger_omap2(struct omap_sham_dev *dd, size_t length) | ||
399 | { | ||
400 | } | ||
401 | |||
402 | static int omap_sham_poll_irq_omap2(struct omap_sham_dev *dd) | ||
403 | { | ||
404 | return omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY); | ||
405 | } | ||
406 | |||
407 | static void omap_sham_write_n(struct omap_sham_dev *dd, u32 offset, | ||
408 | u32 *value, int count) | ||
409 | { | ||
410 | for (; count--; value++, offset += 4) | ||
411 | omap_sham_write(dd, offset, *value); | ||
412 | } | ||
413 | |||
414 | static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length, | ||
415 | int final, int dma) | ||
416 | { | ||
417 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); | ||
418 | u32 val, mask; | ||
419 | |||
420 | /* | ||
421 | * Setting ALGO_CONST only for the first iteration and | ||
422 | * CLOSE_HASH only for the last one. Note that flags mode bits | ||
423 | * correspond to algorithm encoding in mode register. | ||
424 | */ | ||
425 | val = (ctx->flags & FLAGS_MODE_MASK) >> (FLAGS_MODE_SHIFT - 1); | ||
426 | if (!ctx->digcnt) { | ||
427 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req); | ||
428 | struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm); | ||
429 | struct omap_sham_hmac_ctx *bctx = tctx->base; | ||
430 | |||
431 | val |= SHA_REG_MODE_ALGO_CONSTANT; | ||
432 | |||
433 | if (ctx->flags & BIT(FLAGS_HMAC)) { | ||
434 | val |= SHA_REG_MODE_HMAC_KEY_PROC; | ||
435 | omap_sham_write_n(dd, SHA_REG_ODIGEST(0), | ||
436 | (u32 *)bctx->ipad, | ||
437 | SHA1_BLOCK_SIZE / sizeof(u32)); | ||
438 | ctx->digcnt += SHA1_BLOCK_SIZE; | ||
439 | } | ||
440 | } | ||
441 | |||
442 | if (final) { | ||
443 | val |= SHA_REG_MODE_CLOSE_HASH; | ||
444 | |||
445 | if (ctx->flags & BIT(FLAGS_HMAC)) | ||
446 | val |= SHA_REG_MODE_HMAC_OUTER_HASH; | ||
447 | } | ||
448 | |||
449 | mask = SHA_REG_MODE_ALGO_CONSTANT | SHA_REG_MODE_CLOSE_HASH | | ||
450 | SHA_REG_MODE_ALGO_MASK | SHA_REG_MODE_HMAC_OUTER_HASH | | ||
451 | SHA_REG_MODE_HMAC_KEY_PROC; | ||
452 | |||
453 | dev_dbg(dd->dev, "ctrl: %08x, flags: %08lx\n", val, ctx->flags); | ||
454 | omap_sham_write_mask(dd, SHA_REG_MODE, val, mask); | ||
455 | omap_sham_write(dd, SHA_REG_IRQENA, SHA_REG_IRQENA_OUTPUT_RDY); | ||
456 | omap_sham_write_mask(dd, SHA_REG_MASK(dd), | ||
457 | SHA_REG_MASK_IT_EN | | ||
458 | (dma ? SHA_REG_MASK_DMA_EN : 0), | ||
459 | SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN); | ||
460 | } | ||
461 | |||
462 | static void omap_sham_trigger_omap4(struct omap_sham_dev *dd, size_t length) | ||
463 | { | ||
464 | omap_sham_write(dd, SHA_REG_LENGTH, length); | ||
465 | } | ||
466 | |||
467 | static int omap_sham_poll_irq_omap4(struct omap_sham_dev *dd) | ||
468 | { | ||
469 | return omap_sham_wait(dd, SHA_REG_IRQSTATUS, | ||
470 | SHA_REG_IRQSTATUS_INPUT_RDY); | ||
471 | } | ||
472 | |||
289 | static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf, | 473 | static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf, |
290 | size_t length, int final) | 474 | size_t length, int final) |
291 | { | 475 | { |
@@ -296,12 +480,13 @@ static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf, | |||
296 | dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n", | 480 | dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n", |
297 | ctx->digcnt, length, final); | 481 | ctx->digcnt, length, final); |
298 | 482 | ||
299 | omap_sham_write_ctrl(dd, length, final, 0); | 483 | dd->pdata->write_ctrl(dd, length, final, 0); |
484 | dd->pdata->trigger(dd, length); | ||
300 | 485 | ||
301 | /* should be non-zero before next lines to disable clocks later */ | 486 | /* should be non-zero before next lines to disable clocks later */ |
302 | ctx->digcnt += length; | 487 | ctx->digcnt += length; |
303 | 488 | ||
304 | if (omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY)) | 489 | if (dd->pdata->poll_irq(dd)) |
305 | return -ETIMEDOUT; | 490 | return -ETIMEDOUT; |
306 | 491 | ||
307 | if (final) | 492 | if (final) |
@@ -312,30 +497,73 @@ static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf, | |||
312 | len32 = DIV_ROUND_UP(length, sizeof(u32)); | 497 | len32 = DIV_ROUND_UP(length, sizeof(u32)); |
313 | 498 | ||
314 | for (count = 0; count < len32; count++) | 499 | for (count = 0; count < len32; count++) |
315 | omap_sham_write(dd, SHA_REG_DIN(count), buffer[count]); | 500 | omap_sham_write(dd, SHA_REG_DIN(dd, count), buffer[count]); |
316 | 501 | ||
317 | return -EINPROGRESS; | 502 | return -EINPROGRESS; |
318 | } | 503 | } |
319 | 504 | ||
505 | static void omap_sham_dma_callback(void *param) | ||
506 | { | ||
507 | struct omap_sham_dev *dd = param; | ||
508 | |||
509 | set_bit(FLAGS_DMA_READY, &dd->flags); | ||
510 | tasklet_schedule(&dd->done_task); | ||
511 | } | ||
512 | |||
320 | static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr, | 513 | static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr, |
321 | size_t length, int final) | 514 | size_t length, int final, int is_sg) |
322 | { | 515 | { |
323 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); | 516 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); |
324 | int len32; | 517 | struct dma_async_tx_descriptor *tx; |
518 | struct dma_slave_config cfg; | ||
519 | int len32, ret; | ||
325 | 520 | ||
326 | dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n", | 521 | dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n", |
327 | ctx->digcnt, length, final); | 522 | ctx->digcnt, length, final); |
328 | 523 | ||
329 | len32 = DIV_ROUND_UP(length, sizeof(u32)); | 524 | memset(&cfg, 0, sizeof(cfg)); |
525 | |||
526 | cfg.dst_addr = dd->phys_base + SHA_REG_DIN(dd, 0); | ||
527 | cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
528 | cfg.dst_maxburst = DST_MAXBURST; | ||
529 | |||
530 | ret = dmaengine_slave_config(dd->dma_lch, &cfg); | ||
531 | if (ret) { | ||
532 | pr_err("omap-sham: can't configure dmaengine slave: %d\n", ret); | ||
533 | return ret; | ||
534 | } | ||
535 | |||
536 | len32 = DIV_ROUND_UP(length, DMA_MIN) * DMA_MIN; | ||
537 | |||
538 | if (is_sg) { | ||
539 | /* | ||
540 | * The SG entry passed in may not have the 'length' member | ||
541 | * set correctly so use a local SG entry (sgl) with the | ||
542 | * proper value for 'length' instead. If this is not done, | ||
543 | * the dmaengine may try to DMA the incorrect amount of data. | ||
544 | */ | ||
545 | sg_init_table(&ctx->sgl, 1); | ||
546 | ctx->sgl.page_link = ctx->sg->page_link; | ||
547 | ctx->sgl.offset = ctx->sg->offset; | ||
548 | sg_dma_len(&ctx->sgl) = len32; | ||
549 | sg_dma_address(&ctx->sgl) = sg_dma_address(ctx->sg); | ||
550 | |||
551 | tx = dmaengine_prep_slave_sg(dd->dma_lch, &ctx->sgl, 1, | ||
552 | DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
553 | } else { | ||
554 | tx = dmaengine_prep_slave_single(dd->dma_lch, dma_addr, len32, | ||
555 | DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
556 | } | ||
330 | 557 | ||
331 | omap_set_dma_transfer_params(dd->dma_lch, OMAP_DMA_DATA_TYPE_S32, len32, | 558 | if (!tx) { |
332 | 1, OMAP_DMA_SYNC_PACKET, dd->dma, | 559 | dev_err(dd->dev, "prep_slave_sg/single() failed\n"); |
333 | OMAP_DMA_DST_SYNC_PREFETCH); | 560 | return -EINVAL; |
561 | } | ||
334 | 562 | ||
335 | omap_set_dma_src_params(dd->dma_lch, 0, OMAP_DMA_AMODE_POST_INC, | 563 | tx->callback = omap_sham_dma_callback; |
336 | dma_addr, 0, 0); | 564 | tx->callback_param = dd; |
337 | 565 | ||
338 | omap_sham_write_ctrl(dd, length, final, 1); | 566 | dd->pdata->write_ctrl(dd, length, final, 1); |
339 | 567 | ||
340 | ctx->digcnt += length; | 568 | ctx->digcnt += length; |
341 | 569 | ||
@@ -344,7 +572,10 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr, | |||
344 | 572 | ||
345 | set_bit(FLAGS_DMA_ACTIVE, &dd->flags); | 573 | set_bit(FLAGS_DMA_ACTIVE, &dd->flags); |
346 | 574 | ||
347 | omap_start_dma(dd->dma_lch); | 575 | dmaengine_submit(tx); |
576 | dma_async_issue_pending(dd->dma_lch); | ||
577 | |||
578 | dd->pdata->trigger(dd, length); | ||
348 | 579 | ||
349 | return -EINPROGRESS; | 580 | return -EINPROGRESS; |
350 | } | 581 | } |
@@ -391,6 +622,8 @@ static int omap_sham_xmit_dma_map(struct omap_sham_dev *dd, | |||
391 | struct omap_sham_reqctx *ctx, | 622 | struct omap_sham_reqctx *ctx, |
392 | size_t length, int final) | 623 | size_t length, int final) |
393 | { | 624 | { |
625 | int ret; | ||
626 | |||
394 | ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen, | 627 | ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen, |
395 | DMA_TO_DEVICE); | 628 | DMA_TO_DEVICE); |
396 | if (dma_mapping_error(dd->dev, ctx->dma_addr)) { | 629 | if (dma_mapping_error(dd->dev, ctx->dma_addr)) { |
@@ -400,8 +633,12 @@ static int omap_sham_xmit_dma_map(struct omap_sham_dev *dd, | |||
400 | 633 | ||
401 | ctx->flags &= ~BIT(FLAGS_SG); | 634 | ctx->flags &= ~BIT(FLAGS_SG); |
402 | 635 | ||
403 | /* next call does not fail... so no unmap in the case of error */ | 636 | ret = omap_sham_xmit_dma(dd, ctx->dma_addr, length, final, 0); |
404 | return omap_sham_xmit_dma(dd, ctx->dma_addr, length, final); | 637 | if (ret != -EINPROGRESS) |
638 | dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen, | ||
639 | DMA_TO_DEVICE); | ||
640 | |||
641 | return ret; | ||
405 | } | 642 | } |
406 | 643 | ||
407 | static int omap_sham_update_dma_slow(struct omap_sham_dev *dd) | 644 | static int omap_sham_update_dma_slow(struct omap_sham_dev *dd) |
@@ -436,6 +673,7 @@ static int omap_sham_update_dma_start(struct omap_sham_dev *dd) | |||
436 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); | 673 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); |
437 | unsigned int length, final, tail; | 674 | unsigned int length, final, tail; |
438 | struct scatterlist *sg; | 675 | struct scatterlist *sg; |
676 | int ret; | ||
439 | 677 | ||
440 | if (!ctx->total) | 678 | if (!ctx->total) |
441 | return 0; | 679 | return 0; |
@@ -443,6 +681,15 @@ static int omap_sham_update_dma_start(struct omap_sham_dev *dd) | |||
443 | if (ctx->bufcnt || ctx->offset) | 681 | if (ctx->bufcnt || ctx->offset) |
444 | return omap_sham_update_dma_slow(dd); | 682 | return omap_sham_update_dma_slow(dd); |
445 | 683 | ||
684 | /* | ||
685 | * Don't use the sg interface when the transfer size is less | ||
686 | * than the number of elements in a DMA frame. Otherwise, | ||
687 | * the dmaengine infrastructure will calculate that it needs | ||
688 | * to transfer 0 frames which ultimately fails. | ||
689 | */ | ||
690 | if (ctx->total < (DST_MAXBURST * sizeof(u32))) | ||
691 | return omap_sham_update_dma_slow(dd); | ||
692 | |||
446 | dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n", | 693 | dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n", |
447 | ctx->digcnt, ctx->bufcnt, ctx->total); | 694 | ctx->digcnt, ctx->bufcnt, ctx->total); |
448 | 695 | ||
@@ -480,8 +727,11 @@ static int omap_sham_update_dma_start(struct omap_sham_dev *dd) | |||
480 | 727 | ||
481 | final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total; | 728 | final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total; |
482 | 729 | ||
483 | /* next call does not fail... so no unmap in the case of error */ | 730 | ret = omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final, 1); |
484 | return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final); | 731 | if (ret != -EINPROGRESS) |
732 | dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); | ||
733 | |||
734 | return ret; | ||
485 | } | 735 | } |
486 | 736 | ||
487 | static int omap_sham_update_cpu(struct omap_sham_dev *dd) | 737 | static int omap_sham_update_cpu(struct omap_sham_dev *dd) |
@@ -500,7 +750,8 @@ static int omap_sham_update_dma_stop(struct omap_sham_dev *dd) | |||
500 | { | 750 | { |
501 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); | 751 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); |
502 | 752 | ||
503 | omap_stop_dma(dd->dma_lch); | 753 | dmaengine_terminate_all(dd->dma_lch); |
754 | |||
504 | if (ctx->flags & BIT(FLAGS_SG)) { | 755 | if (ctx->flags & BIT(FLAGS_SG)) { |
505 | dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); | 756 | dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); |
506 | if (ctx->sg->length == ctx->offset) { | 757 | if (ctx->sg->length == ctx->offset) { |
@@ -542,18 +793,33 @@ static int omap_sham_init(struct ahash_request *req) | |||
542 | dev_dbg(dd->dev, "init: digest size: %d\n", | 793 | dev_dbg(dd->dev, "init: digest size: %d\n", |
543 | crypto_ahash_digestsize(tfm)); | 794 | crypto_ahash_digestsize(tfm)); |
544 | 795 | ||
545 | if (crypto_ahash_digestsize(tfm) == SHA1_DIGEST_SIZE) | 796 | switch (crypto_ahash_digestsize(tfm)) { |
546 | ctx->flags |= BIT(FLAGS_SHA1); | 797 | case MD5_DIGEST_SIZE: |
798 | ctx->flags |= FLAGS_MODE_MD5; | ||
799 | break; | ||
800 | case SHA1_DIGEST_SIZE: | ||
801 | ctx->flags |= FLAGS_MODE_SHA1; | ||
802 | break; | ||
803 | case SHA224_DIGEST_SIZE: | ||
804 | ctx->flags |= FLAGS_MODE_SHA224; | ||
805 | break; | ||
806 | case SHA256_DIGEST_SIZE: | ||
807 | ctx->flags |= FLAGS_MODE_SHA256; | ||
808 | break; | ||
809 | } | ||
547 | 810 | ||
548 | ctx->bufcnt = 0; | 811 | ctx->bufcnt = 0; |
549 | ctx->digcnt = 0; | 812 | ctx->digcnt = 0; |
550 | ctx->buflen = BUFLEN; | 813 | ctx->buflen = BUFLEN; |
551 | 814 | ||
552 | if (tctx->flags & BIT(FLAGS_HMAC)) { | 815 | if (tctx->flags & BIT(FLAGS_HMAC)) { |
553 | struct omap_sham_hmac_ctx *bctx = tctx->base; | 816 | if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) { |
817 | struct omap_sham_hmac_ctx *bctx = tctx->base; | ||
818 | |||
819 | memcpy(ctx->buffer, bctx->ipad, SHA1_MD5_BLOCK_SIZE); | ||
820 | ctx->bufcnt = SHA1_MD5_BLOCK_SIZE; | ||
821 | } | ||
554 | 822 | ||
555 | memcpy(ctx->buffer, bctx->ipad, SHA1_MD5_BLOCK_SIZE); | ||
556 | ctx->bufcnt = SHA1_MD5_BLOCK_SIZE; | ||
557 | ctx->flags |= BIT(FLAGS_HMAC); | 823 | ctx->flags |= BIT(FLAGS_HMAC); |
558 | } | 824 | } |
559 | 825 | ||
@@ -587,7 +853,7 @@ static int omap_sham_final_req(struct omap_sham_dev *dd) | |||
587 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | 853 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); |
588 | int err = 0, use_dma = 1; | 854 | int err = 0, use_dma = 1; |
589 | 855 | ||
590 | if (ctx->bufcnt <= 64) | 856 | if (ctx->bufcnt <= DMA_MIN) |
591 | /* faster to handle last block with cpu */ | 857 | /* faster to handle last block with cpu */ |
592 | use_dma = 0; | 858 | use_dma = 0; |
593 | 859 | ||
@@ -630,7 +896,8 @@ static int omap_sham_finish(struct ahash_request *req) | |||
630 | 896 | ||
631 | if (ctx->digcnt) { | 897 | if (ctx->digcnt) { |
632 | omap_sham_copy_ready_hash(req); | 898 | omap_sham_copy_ready_hash(req); |
633 | if (ctx->flags & BIT(FLAGS_HMAC)) | 899 | if ((ctx->flags & BIT(FLAGS_HMAC)) && |
900 | !test_bit(FLAGS_AUTO_XOR, &dd->flags)) | ||
634 | err = omap_sham_finish_hmac(req); | 901 | err = omap_sham_finish_hmac(req); |
635 | } | 902 | } |
636 | 903 | ||
@@ -645,7 +912,7 @@ static void omap_sham_finish_req(struct ahash_request *req, int err) | |||
645 | struct omap_sham_dev *dd = ctx->dd; | 912 | struct omap_sham_dev *dd = ctx->dd; |
646 | 913 | ||
647 | if (!err) { | 914 | if (!err) { |
648 | omap_sham_copy_hash(req, 1); | 915 | dd->pdata->copy_hash(req, 1); |
649 | if (test_bit(FLAGS_FINAL, &dd->flags)) | 916 | if (test_bit(FLAGS_FINAL, &dd->flags)) |
650 | err = omap_sham_finish(req); | 917 | err = omap_sham_finish(req); |
651 | } else { | 918 | } else { |
@@ -655,7 +922,8 @@ static void omap_sham_finish_req(struct ahash_request *req, int err) | |||
655 | /* atomic operation is not needed here */ | 922 | /* atomic operation is not needed here */ |
656 | dd->flags &= ~(BIT(FLAGS_BUSY) | BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) | | 923 | dd->flags &= ~(BIT(FLAGS_BUSY) | BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) | |
657 | BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY)); | 924 | BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY)); |
658 | clk_disable(dd->iclk); | 925 | |
926 | pm_runtime_put_sync(dd->dev); | ||
659 | 927 | ||
660 | if (req->base.complete) | 928 | if (req->base.complete) |
661 | req->base.complete(&req->base, err); | 929 | req->base.complete(&req->base, err); |
@@ -702,19 +970,9 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd, | |||
702 | if (err) | 970 | if (err) |
703 | goto err1; | 971 | goto err1; |
704 | 972 | ||
705 | omap_set_dma_dest_params(dd->dma_lch, 0, | ||
706 | OMAP_DMA_AMODE_CONSTANT, | ||
707 | dd->phys_base + SHA_REG_DIN(0), 0, 16); | ||
708 | |||
709 | omap_set_dma_dest_burst_mode(dd->dma_lch, | ||
710 | OMAP_DMA_DATA_BURST_16); | ||
711 | |||
712 | omap_set_dma_src_burst_mode(dd->dma_lch, | ||
713 | OMAP_DMA_DATA_BURST_4); | ||
714 | |||
715 | if (ctx->digcnt) | 973 | if (ctx->digcnt) |
716 | /* request has changed - restore hash */ | 974 | /* request has changed - restore hash */ |
717 | omap_sham_copy_hash(req, 0); | 975 | dd->pdata->copy_hash(req, 0); |
718 | 976 | ||
719 | if (ctx->op == OP_UPDATE) { | 977 | if (ctx->op == OP_UPDATE) { |
720 | err = omap_sham_update_req(dd); | 978 | err = omap_sham_update_req(dd); |
@@ -853,7 +1111,21 @@ static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key, | |||
853 | struct omap_sham_hmac_ctx *bctx = tctx->base; | 1111 | struct omap_sham_hmac_ctx *bctx = tctx->base; |
854 | int bs = crypto_shash_blocksize(bctx->shash); | 1112 | int bs = crypto_shash_blocksize(bctx->shash); |
855 | int ds = crypto_shash_digestsize(bctx->shash); | 1113 | int ds = crypto_shash_digestsize(bctx->shash); |
1114 | struct omap_sham_dev *dd = NULL, *tmp; | ||
856 | int err, i; | 1115 | int err, i; |
1116 | |||
1117 | spin_lock_bh(&sham.lock); | ||
1118 | if (!tctx->dd) { | ||
1119 | list_for_each_entry(tmp, &sham.dev_list, list) { | ||
1120 | dd = tmp; | ||
1121 | break; | ||
1122 | } | ||
1123 | tctx->dd = dd; | ||
1124 | } else { | ||
1125 | dd = tctx->dd; | ||
1126 | } | ||
1127 | spin_unlock_bh(&sham.lock); | ||
1128 | |||
857 | err = crypto_shash_setkey(tctx->fallback, key, keylen); | 1129 | err = crypto_shash_setkey(tctx->fallback, key, keylen); |
858 | if (err) | 1130 | if (err) |
859 | return err; | 1131 | return err; |
@@ -870,11 +1142,14 @@ static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key, | |||
870 | } | 1142 | } |
871 | 1143 | ||
872 | memset(bctx->ipad + keylen, 0, bs - keylen); | 1144 | memset(bctx->ipad + keylen, 0, bs - keylen); |
873 | memcpy(bctx->opad, bctx->ipad, bs); | ||
874 | 1145 | ||
875 | for (i = 0; i < bs; i++) { | 1146 | if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) { |
876 | bctx->ipad[i] ^= 0x36; | 1147 | memcpy(bctx->opad, bctx->ipad, bs); |
877 | bctx->opad[i] ^= 0x5c; | 1148 | |
1149 | for (i = 0; i < bs; i++) { | ||
1150 | bctx->ipad[i] ^= 0x36; | ||
1151 | bctx->opad[i] ^= 0x5c; | ||
1152 | } | ||
878 | } | 1153 | } |
879 | 1154 | ||
880 | return err; | 1155 | return err; |
@@ -924,6 +1199,16 @@ static int omap_sham_cra_sha1_init(struct crypto_tfm *tfm) | |||
924 | return omap_sham_cra_init_alg(tfm, "sha1"); | 1199 | return omap_sham_cra_init_alg(tfm, "sha1"); |
925 | } | 1200 | } |
926 | 1201 | ||
1202 | static int omap_sham_cra_sha224_init(struct crypto_tfm *tfm) | ||
1203 | { | ||
1204 | return omap_sham_cra_init_alg(tfm, "sha224"); | ||
1205 | } | ||
1206 | |||
1207 | static int omap_sham_cra_sha256_init(struct crypto_tfm *tfm) | ||
1208 | { | ||
1209 | return omap_sham_cra_init_alg(tfm, "sha256"); | ||
1210 | } | ||
1211 | |||
927 | static int omap_sham_cra_md5_init(struct crypto_tfm *tfm) | 1212 | static int omap_sham_cra_md5_init(struct crypto_tfm *tfm) |
928 | { | 1213 | { |
929 | return omap_sham_cra_init_alg(tfm, "md5"); | 1214 | return omap_sham_cra_init_alg(tfm, "md5"); |
@@ -942,7 +1227,7 @@ static void omap_sham_cra_exit(struct crypto_tfm *tfm) | |||
942 | } | 1227 | } |
943 | } | 1228 | } |
944 | 1229 | ||
945 | static struct ahash_alg algs[] = { | 1230 | static struct ahash_alg algs_sha1_md5[] = { |
946 | { | 1231 | { |
947 | .init = omap_sham_init, | 1232 | .init = omap_sham_init, |
948 | .update = omap_sham_update, | 1233 | .update = omap_sham_update, |
@@ -1041,6 +1326,102 @@ static struct ahash_alg algs[] = { | |||
1041 | } | 1326 | } |
1042 | }; | 1327 | }; |
1043 | 1328 | ||
1329 | /* OMAP4 has some algs in addition to what OMAP2 has */ | ||
1330 | static struct ahash_alg algs_sha224_sha256[] = { | ||
1331 | { | ||
1332 | .init = omap_sham_init, | ||
1333 | .update = omap_sham_update, | ||
1334 | .final = omap_sham_final, | ||
1335 | .finup = omap_sham_finup, | ||
1336 | .digest = omap_sham_digest, | ||
1337 | .halg.digestsize = SHA224_DIGEST_SIZE, | ||
1338 | .halg.base = { | ||
1339 | .cra_name = "sha224", | ||
1340 | .cra_driver_name = "omap-sha224", | ||
1341 | .cra_priority = 100, | ||
1342 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | ||
1343 | CRYPTO_ALG_ASYNC | | ||
1344 | CRYPTO_ALG_NEED_FALLBACK, | ||
1345 | .cra_blocksize = SHA224_BLOCK_SIZE, | ||
1346 | .cra_ctxsize = sizeof(struct omap_sham_ctx), | ||
1347 | .cra_alignmask = 0, | ||
1348 | .cra_module = THIS_MODULE, | ||
1349 | .cra_init = omap_sham_cra_init, | ||
1350 | .cra_exit = omap_sham_cra_exit, | ||
1351 | } | ||
1352 | }, | ||
1353 | { | ||
1354 | .init = omap_sham_init, | ||
1355 | .update = omap_sham_update, | ||
1356 | .final = omap_sham_final, | ||
1357 | .finup = omap_sham_finup, | ||
1358 | .digest = omap_sham_digest, | ||
1359 | .halg.digestsize = SHA256_DIGEST_SIZE, | ||
1360 | .halg.base = { | ||
1361 | .cra_name = "sha256", | ||
1362 | .cra_driver_name = "omap-sha256", | ||
1363 | .cra_priority = 100, | ||
1364 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | ||
1365 | CRYPTO_ALG_ASYNC | | ||
1366 | CRYPTO_ALG_NEED_FALLBACK, | ||
1367 | .cra_blocksize = SHA256_BLOCK_SIZE, | ||
1368 | .cra_ctxsize = sizeof(struct omap_sham_ctx), | ||
1369 | .cra_alignmask = 0, | ||
1370 | .cra_module = THIS_MODULE, | ||
1371 | .cra_init = omap_sham_cra_init, | ||
1372 | .cra_exit = omap_sham_cra_exit, | ||
1373 | } | ||
1374 | }, | ||
1375 | { | ||
1376 | .init = omap_sham_init, | ||
1377 | .update = omap_sham_update, | ||
1378 | .final = omap_sham_final, | ||
1379 | .finup = omap_sham_finup, | ||
1380 | .digest = omap_sham_digest, | ||
1381 | .setkey = omap_sham_setkey, | ||
1382 | .halg.digestsize = SHA224_DIGEST_SIZE, | ||
1383 | .halg.base = { | ||
1384 | .cra_name = "hmac(sha224)", | ||
1385 | .cra_driver_name = "omap-hmac-sha224", | ||
1386 | .cra_priority = 100, | ||
1387 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | ||
1388 | CRYPTO_ALG_ASYNC | | ||
1389 | CRYPTO_ALG_NEED_FALLBACK, | ||
1390 | .cra_blocksize = SHA224_BLOCK_SIZE, | ||
1391 | .cra_ctxsize = sizeof(struct omap_sham_ctx) + | ||
1392 | sizeof(struct omap_sham_hmac_ctx), | ||
1393 | .cra_alignmask = OMAP_ALIGN_MASK, | ||
1394 | .cra_module = THIS_MODULE, | ||
1395 | .cra_init = omap_sham_cra_sha224_init, | ||
1396 | .cra_exit = omap_sham_cra_exit, | ||
1397 | } | ||
1398 | }, | ||
1399 | { | ||
1400 | .init = omap_sham_init, | ||
1401 | .update = omap_sham_update, | ||
1402 | .final = omap_sham_final, | ||
1403 | .finup = omap_sham_finup, | ||
1404 | .digest = omap_sham_digest, | ||
1405 | .setkey = omap_sham_setkey, | ||
1406 | .halg.digestsize = SHA256_DIGEST_SIZE, | ||
1407 | .halg.base = { | ||
1408 | .cra_name = "hmac(sha256)", | ||
1409 | .cra_driver_name = "omap-hmac-sha256", | ||
1410 | .cra_priority = 100, | ||
1411 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | ||
1412 | CRYPTO_ALG_ASYNC | | ||
1413 | CRYPTO_ALG_NEED_FALLBACK, | ||
1414 | .cra_blocksize = SHA256_BLOCK_SIZE, | ||
1415 | .cra_ctxsize = sizeof(struct omap_sham_ctx) + | ||
1416 | sizeof(struct omap_sham_hmac_ctx), | ||
1417 | .cra_alignmask = OMAP_ALIGN_MASK, | ||
1418 | .cra_module = THIS_MODULE, | ||
1419 | .cra_init = omap_sham_cra_sha256_init, | ||
1420 | .cra_exit = omap_sham_cra_exit, | ||
1421 | } | ||
1422 | }, | ||
1423 | }; | ||
1424 | |||
1044 | static void omap_sham_done_task(unsigned long data) | 1425 | static void omap_sham_done_task(unsigned long data) |
1045 | { | 1426 | { |
1046 | struct omap_sham_dev *dd = (struct omap_sham_dev *)data; | 1427 | struct omap_sham_dev *dd = (struct omap_sham_dev *)data; |
@@ -1079,7 +1460,19 @@ finish: | |||
1079 | omap_sham_finish_req(dd->req, err); | 1460 | omap_sham_finish_req(dd->req, err); |
1080 | } | 1461 | } |
1081 | 1462 | ||
1082 | static irqreturn_t omap_sham_irq(int irq, void *dev_id) | 1463 | static irqreturn_t omap_sham_irq_common(struct omap_sham_dev *dd) |
1464 | { | ||
1465 | if (!test_bit(FLAGS_BUSY, &dd->flags)) { | ||
1466 | dev_warn(dd->dev, "Interrupt when no active requests.\n"); | ||
1467 | } else { | ||
1468 | set_bit(FLAGS_OUTPUT_READY, &dd->flags); | ||
1469 | tasklet_schedule(&dd->done_task); | ||
1470 | } | ||
1471 | |||
1472 | return IRQ_HANDLED; | ||
1473 | } | ||
1474 | |||
1475 | static irqreturn_t omap_sham_irq_omap2(int irq, void *dev_id) | ||
1083 | { | 1476 | { |
1084 | struct omap_sham_dev *dd = dev_id; | 1477 | struct omap_sham_dev *dd = dev_id; |
1085 | 1478 | ||
@@ -1091,61 +1484,188 @@ static irqreturn_t omap_sham_irq(int irq, void *dev_id) | |||
1091 | SHA_REG_CTRL_OUTPUT_READY); | 1484 | SHA_REG_CTRL_OUTPUT_READY); |
1092 | omap_sham_read(dd, SHA_REG_CTRL); | 1485 | omap_sham_read(dd, SHA_REG_CTRL); |
1093 | 1486 | ||
1094 | if (!test_bit(FLAGS_BUSY, &dd->flags)) { | 1487 | return omap_sham_irq_common(dd); |
1095 | dev_warn(dd->dev, "Interrupt when no active requests.\n"); | 1488 | } |
1096 | return IRQ_HANDLED; | ||
1097 | } | ||
1098 | 1489 | ||
1099 | set_bit(FLAGS_OUTPUT_READY, &dd->flags); | 1490 | static irqreturn_t omap_sham_irq_omap4(int irq, void *dev_id) |
1100 | tasklet_schedule(&dd->done_task); | 1491 | { |
1492 | struct omap_sham_dev *dd = dev_id; | ||
1101 | 1493 | ||
1102 | return IRQ_HANDLED; | 1494 | omap_sham_write_mask(dd, SHA_REG_MASK(dd), 0, SHA_REG_MASK_IT_EN); |
1495 | |||
1496 | return omap_sham_irq_common(dd); | ||
1103 | } | 1497 | } |
1104 | 1498 | ||
1105 | static void omap_sham_dma_callback(int lch, u16 ch_status, void *data) | 1499 | static struct omap_sham_algs_info omap_sham_algs_info_omap2[] = { |
1500 | { | ||
1501 | .algs_list = algs_sha1_md5, | ||
1502 | .size = ARRAY_SIZE(algs_sha1_md5), | ||
1503 | }, | ||
1504 | }; | ||
1505 | |||
1506 | static const struct omap_sham_pdata omap_sham_pdata_omap2 = { | ||
1507 | .algs_info = omap_sham_algs_info_omap2, | ||
1508 | .algs_info_size = ARRAY_SIZE(omap_sham_algs_info_omap2), | ||
1509 | .flags = BIT(FLAGS_BE32_SHA1), | ||
1510 | .digest_size = SHA1_DIGEST_SIZE, | ||
1511 | .copy_hash = omap_sham_copy_hash_omap2, | ||
1512 | .write_ctrl = omap_sham_write_ctrl_omap2, | ||
1513 | .trigger = omap_sham_trigger_omap2, | ||
1514 | .poll_irq = omap_sham_poll_irq_omap2, | ||
1515 | .intr_hdlr = omap_sham_irq_omap2, | ||
1516 | .idigest_ofs = 0x00, | ||
1517 | .din_ofs = 0x1c, | ||
1518 | .digcnt_ofs = 0x14, | ||
1519 | .rev_ofs = 0x5c, | ||
1520 | .mask_ofs = 0x60, | ||
1521 | .sysstatus_ofs = 0x64, | ||
1522 | .major_mask = 0xf0, | ||
1523 | .major_shift = 4, | ||
1524 | .minor_mask = 0x0f, | ||
1525 | .minor_shift = 0, | ||
1526 | }; | ||
1527 | |||
1528 | #ifdef CONFIG_OF | ||
1529 | static struct omap_sham_algs_info omap_sham_algs_info_omap4[] = { | ||
1530 | { | ||
1531 | .algs_list = algs_sha1_md5, | ||
1532 | .size = ARRAY_SIZE(algs_sha1_md5), | ||
1533 | }, | ||
1534 | { | ||
1535 | .algs_list = algs_sha224_sha256, | ||
1536 | .size = ARRAY_SIZE(algs_sha224_sha256), | ||
1537 | }, | ||
1538 | }; | ||
1539 | |||
1540 | static const struct omap_sham_pdata omap_sham_pdata_omap4 = { | ||
1541 | .algs_info = omap_sham_algs_info_omap4, | ||
1542 | .algs_info_size = ARRAY_SIZE(omap_sham_algs_info_omap4), | ||
1543 | .flags = BIT(FLAGS_AUTO_XOR), | ||
1544 | .digest_size = SHA256_DIGEST_SIZE, | ||
1545 | .copy_hash = omap_sham_copy_hash_omap4, | ||
1546 | .write_ctrl = omap_sham_write_ctrl_omap4, | ||
1547 | .trigger = omap_sham_trigger_omap4, | ||
1548 | .poll_irq = omap_sham_poll_irq_omap4, | ||
1549 | .intr_hdlr = omap_sham_irq_omap4, | ||
1550 | .idigest_ofs = 0x020, | ||
1551 | .din_ofs = 0x080, | ||
1552 | .digcnt_ofs = 0x040, | ||
1553 | .rev_ofs = 0x100, | ||
1554 | .mask_ofs = 0x110, | ||
1555 | .sysstatus_ofs = 0x114, | ||
1556 | .major_mask = 0x0700, | ||
1557 | .major_shift = 8, | ||
1558 | .minor_mask = 0x003f, | ||
1559 | .minor_shift = 0, | ||
1560 | }; | ||
1561 | |||
1562 | static const struct of_device_id omap_sham_of_match[] = { | ||
1563 | { | ||
1564 | .compatible = "ti,omap2-sham", | ||
1565 | .data = &omap_sham_pdata_omap2, | ||
1566 | }, | ||
1567 | { | ||
1568 | .compatible = "ti,omap4-sham", | ||
1569 | .data = &omap_sham_pdata_omap4, | ||
1570 | }, | ||
1571 | {}, | ||
1572 | }; | ||
1573 | MODULE_DEVICE_TABLE(of, omap_sham_of_match); | ||
1574 | |||
1575 | static int omap_sham_get_res_of(struct omap_sham_dev *dd, | ||
1576 | struct device *dev, struct resource *res) | ||
1106 | { | 1577 | { |
1107 | struct omap_sham_dev *dd = data; | 1578 | struct device_node *node = dev->of_node; |
1579 | const struct of_device_id *match; | ||
1580 | int err = 0; | ||
1108 | 1581 | ||
1109 | if (ch_status != OMAP_DMA_BLOCK_IRQ) { | 1582 | match = of_match_device(of_match_ptr(omap_sham_of_match), dev); |
1110 | pr_err("omap-sham DMA error status: 0x%hx\n", ch_status); | 1583 | if (!match) { |
1111 | dd->err = -EIO; | 1584 | dev_err(dev, "no compatible OF match\n"); |
1112 | clear_bit(FLAGS_INIT, &dd->flags);/* request to re-initialize */ | 1585 | err = -EINVAL; |
1586 | goto err; | ||
1113 | } | 1587 | } |
1114 | 1588 | ||
1115 | set_bit(FLAGS_DMA_READY, &dd->flags); | 1589 | err = of_address_to_resource(node, 0, res); |
1116 | tasklet_schedule(&dd->done_task); | 1590 | if (err < 0) { |
1591 | dev_err(dev, "can't translate OF node address\n"); | ||
1592 | err = -EINVAL; | ||
1593 | goto err; | ||
1594 | } | ||
1595 | |||
1596 | dd->irq = of_irq_to_resource(node, 0, NULL); | ||
1597 | if (!dd->irq) { | ||
1598 | dev_err(dev, "can't translate OF irq value\n"); | ||
1599 | err = -EINVAL; | ||
1600 | goto err; | ||
1601 | } | ||
1602 | |||
1603 | dd->dma = -1; /* Dummy value that's unused */ | ||
1604 | dd->pdata = match->data; | ||
1605 | |||
1606 | err: | ||
1607 | return err; | ||
1117 | } | 1608 | } |
1609 | #else | ||
1610 | static const struct of_device_id omap_sham_of_match[] = { | ||
1611 | {}, | ||
1612 | }; | ||
1118 | 1613 | ||
1119 | static int omap_sham_dma_init(struct omap_sham_dev *dd) | 1614 | static int omap_sham_get_res_of(struct omap_sham_dev *dd, |
1615 | struct device *dev, struct resource *res) | ||
1120 | { | 1616 | { |
1121 | int err; | 1617 | return -EINVAL; |
1618 | } | ||
1619 | #endif | ||
1122 | 1620 | ||
1123 | dd->dma_lch = -1; | 1621 | static int omap_sham_get_res_pdev(struct omap_sham_dev *dd, |
1622 | struct platform_device *pdev, struct resource *res) | ||
1623 | { | ||
1624 | struct device *dev = &pdev->dev; | ||
1625 | struct resource *r; | ||
1626 | int err = 0; | ||
1124 | 1627 | ||
1125 | err = omap_request_dma(dd->dma, dev_name(dd->dev), | 1628 | /* Get the base address */ |
1126 | omap_sham_dma_callback, dd, &dd->dma_lch); | 1629 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1127 | if (err) { | 1630 | if (!r) { |
1128 | dev_err(dd->dev, "Unable to request DMA channel\n"); | 1631 | dev_err(dev, "no MEM resource info\n"); |
1129 | return err; | 1632 | err = -ENODEV; |
1633 | goto err; | ||
1130 | } | 1634 | } |
1635 | memcpy(res, r, sizeof(*res)); | ||
1131 | 1636 | ||
1132 | return 0; | 1637 | /* Get the IRQ */ |
1133 | } | 1638 | dd->irq = platform_get_irq(pdev, 0); |
1639 | if (dd->irq < 0) { | ||
1640 | dev_err(dev, "no IRQ resource info\n"); | ||
1641 | err = dd->irq; | ||
1642 | goto err; | ||
1643 | } | ||
1134 | 1644 | ||
1135 | static void omap_sham_dma_cleanup(struct omap_sham_dev *dd) | 1645 | /* Get the DMA */ |
1136 | { | 1646 | r = platform_get_resource(pdev, IORESOURCE_DMA, 0); |
1137 | if (dd->dma_lch >= 0) { | 1647 | if (!r) { |
1138 | omap_free_dma(dd->dma_lch); | 1648 | dev_err(dev, "no DMA resource info\n"); |
1139 | dd->dma_lch = -1; | 1649 | err = -ENODEV; |
1650 | goto err; | ||
1140 | } | 1651 | } |
1652 | dd->dma = r->start; | ||
1653 | |||
1654 | /* Only OMAP2/3 can be non-DT */ | ||
1655 | dd->pdata = &omap_sham_pdata_omap2; | ||
1656 | |||
1657 | err: | ||
1658 | return err; | ||
1141 | } | 1659 | } |
1142 | 1660 | ||
1143 | static int omap_sham_probe(struct platform_device *pdev) | 1661 | static int omap_sham_probe(struct platform_device *pdev) |
1144 | { | 1662 | { |
1145 | struct omap_sham_dev *dd; | 1663 | struct omap_sham_dev *dd; |
1146 | struct device *dev = &pdev->dev; | 1664 | struct device *dev = &pdev->dev; |
1147 | struct resource *res; | 1665 | struct resource res; |
1666 | dma_cap_mask_t mask; | ||
1148 | int err, i, j; | 1667 | int err, i, j; |
1668 | u32 rev; | ||
1149 | 1669 | ||
1150 | dd = kzalloc(sizeof(struct omap_sham_dev), GFP_KERNEL); | 1670 | dd = kzalloc(sizeof(struct omap_sham_dev), GFP_KERNEL); |
1151 | if (dd == NULL) { | 1671 | if (dd == NULL) { |
@@ -1161,89 +1681,75 @@ static int omap_sham_probe(struct platform_device *pdev) | |||
1161 | tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd); | 1681 | tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd); |
1162 | crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH); | 1682 | crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH); |
1163 | 1683 | ||
1164 | dd->irq = -1; | 1684 | err = (dev->of_node) ? omap_sham_get_res_of(dd, dev, &res) : |
1165 | 1685 | omap_sham_get_res_pdev(dd, pdev, &res); | |
1166 | /* Get the base address */ | 1686 | if (err) |
1167 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1168 | if (!res) { | ||
1169 | dev_err(dev, "no MEM resource info\n"); | ||
1170 | err = -ENODEV; | ||
1171 | goto res_err; | ||
1172 | } | ||
1173 | dd->phys_base = res->start; | ||
1174 | |||
1175 | /* Get the DMA */ | ||
1176 | res = platform_get_resource(pdev, IORESOURCE_DMA, 0); | ||
1177 | if (!res) { | ||
1178 | dev_err(dev, "no DMA resource info\n"); | ||
1179 | err = -ENODEV; | ||
1180 | goto res_err; | 1687 | goto res_err; |
1181 | } | ||
1182 | dd->dma = res->start; | ||
1183 | 1688 | ||
1184 | /* Get the IRQ */ | 1689 | dd->io_base = devm_request_and_ioremap(dev, &res); |
1185 | dd->irq = platform_get_irq(pdev, 0); | 1690 | if (!dd->io_base) { |
1186 | if (dd->irq < 0) { | 1691 | dev_err(dev, "can't ioremap\n"); |
1187 | dev_err(dev, "no IRQ resource info\n"); | 1692 | err = -ENOMEM; |
1188 | err = dd->irq; | ||
1189 | goto res_err; | 1693 | goto res_err; |
1190 | } | 1694 | } |
1695 | dd->phys_base = res.start; | ||
1191 | 1696 | ||
1192 | err = request_irq(dd->irq, omap_sham_irq, | 1697 | err = request_irq(dd->irq, dd->pdata->intr_hdlr, IRQF_TRIGGER_LOW, |
1193 | IRQF_TRIGGER_LOW, dev_name(dev), dd); | 1698 | dev_name(dev), dd); |
1194 | if (err) { | 1699 | if (err) { |
1195 | dev_err(dev, "unable to request irq.\n"); | 1700 | dev_err(dev, "unable to request irq.\n"); |
1196 | goto res_err; | 1701 | goto res_err; |
1197 | } | 1702 | } |
1198 | 1703 | ||
1199 | err = omap_sham_dma_init(dd); | 1704 | dma_cap_zero(mask); |
1200 | if (err) | 1705 | dma_cap_set(DMA_SLAVE, mask); |
1201 | goto dma_err; | ||
1202 | 1706 | ||
1203 | /* Initializing the clock */ | 1707 | dd->dma_lch = dma_request_slave_channel_compat(mask, omap_dma_filter_fn, |
1204 | dd->iclk = clk_get(dev, "ick"); | 1708 | &dd->dma, dev, "rx"); |
1205 | if (IS_ERR(dd->iclk)) { | 1709 | if (!dd->dma_lch) { |
1206 | dev_err(dev, "clock intialization failed.\n"); | 1710 | dev_err(dev, "unable to obtain RX DMA engine channel %u\n", |
1207 | err = PTR_ERR(dd->iclk); | 1711 | dd->dma); |
1208 | goto clk_err; | 1712 | err = -ENXIO; |
1713 | goto dma_err; | ||
1209 | } | 1714 | } |
1210 | 1715 | ||
1211 | dd->io_base = ioremap(dd->phys_base, SZ_4K); | 1716 | dd->flags |= dd->pdata->flags; |
1212 | if (!dd->io_base) { | 1717 | |
1213 | dev_err(dev, "can't ioremap\n"); | 1718 | pm_runtime_enable(dev); |
1214 | err = -ENOMEM; | 1719 | pm_runtime_get_sync(dev); |
1215 | goto io_err; | 1720 | rev = omap_sham_read(dd, SHA_REG_REV(dd)); |
1216 | } | 1721 | pm_runtime_put_sync(&pdev->dev); |
1217 | 1722 | ||
1218 | clk_enable(dd->iclk); | ||
1219 | dev_info(dev, "hw accel on OMAP rev %u.%u\n", | 1723 | dev_info(dev, "hw accel on OMAP rev %u.%u\n", |
1220 | (omap_sham_read(dd, SHA_REG_REV) & SHA_REG_REV_MAJOR) >> 4, | 1724 | (rev & dd->pdata->major_mask) >> dd->pdata->major_shift, |
1221 | omap_sham_read(dd, SHA_REG_REV) & SHA_REG_REV_MINOR); | 1725 | (rev & dd->pdata->minor_mask) >> dd->pdata->minor_shift); |
1222 | clk_disable(dd->iclk); | ||
1223 | 1726 | ||
1224 | spin_lock(&sham.lock); | 1727 | spin_lock(&sham.lock); |
1225 | list_add_tail(&dd->list, &sham.dev_list); | 1728 | list_add_tail(&dd->list, &sham.dev_list); |
1226 | spin_unlock(&sham.lock); | 1729 | spin_unlock(&sham.lock); |
1227 | 1730 | ||
1228 | for (i = 0; i < ARRAY_SIZE(algs); i++) { | 1731 | for (i = 0; i < dd->pdata->algs_info_size; i++) { |
1229 | err = crypto_register_ahash(&algs[i]); | 1732 | for (j = 0; j < dd->pdata->algs_info[i].size; j++) { |
1230 | if (err) | 1733 | err = crypto_register_ahash( |
1231 | goto err_algs; | 1734 | &dd->pdata->algs_info[i].algs_list[j]); |
1735 | if (err) | ||
1736 | goto err_algs; | ||
1737 | |||
1738 | dd->pdata->algs_info[i].registered++; | ||
1739 | } | ||
1232 | } | 1740 | } |
1233 | 1741 | ||
1234 | return 0; | 1742 | return 0; |
1235 | 1743 | ||
1236 | err_algs: | 1744 | err_algs: |
1237 | for (j = 0; j < i; j++) | 1745 | for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) |
1238 | crypto_unregister_ahash(&algs[j]); | 1746 | for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) |
1239 | iounmap(dd->io_base); | 1747 | crypto_unregister_ahash( |
1240 | io_err: | 1748 | &dd->pdata->algs_info[i].algs_list[j]); |
1241 | clk_put(dd->iclk); | 1749 | pm_runtime_disable(dev); |
1242 | clk_err: | 1750 | dma_release_channel(dd->dma_lch); |
1243 | omap_sham_dma_cleanup(dd); | ||
1244 | dma_err: | 1751 | dma_err: |
1245 | if (dd->irq >= 0) | 1752 | free_irq(dd->irq, dd); |
1246 | free_irq(dd->irq, dd); | ||
1247 | res_err: | 1753 | res_err: |
1248 | kfree(dd); | 1754 | kfree(dd); |
1249 | dd = NULL; | 1755 | dd = NULL; |
@@ -1256,7 +1762,7 @@ data_err: | |||
1256 | static int omap_sham_remove(struct platform_device *pdev) | 1762 | static int omap_sham_remove(struct platform_device *pdev) |
1257 | { | 1763 | { |
1258 | static struct omap_sham_dev *dd; | 1764 | static struct omap_sham_dev *dd; |
1259 | int i; | 1765 | int i, j; |
1260 | 1766 | ||
1261 | dd = platform_get_drvdata(pdev); | 1767 | dd = platform_get_drvdata(pdev); |
1262 | if (!dd) | 1768 | if (!dd) |
@@ -1264,33 +1770,51 @@ static int omap_sham_remove(struct platform_device *pdev) | |||
1264 | spin_lock(&sham.lock); | 1770 | spin_lock(&sham.lock); |
1265 | list_del(&dd->list); | 1771 | list_del(&dd->list); |
1266 | spin_unlock(&sham.lock); | 1772 | spin_unlock(&sham.lock); |
1267 | for (i = 0; i < ARRAY_SIZE(algs); i++) | 1773 | for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) |
1268 | crypto_unregister_ahash(&algs[i]); | 1774 | for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) |
1775 | crypto_unregister_ahash( | ||
1776 | &dd->pdata->algs_info[i].algs_list[j]); | ||
1269 | tasklet_kill(&dd->done_task); | 1777 | tasklet_kill(&dd->done_task); |
1270 | iounmap(dd->io_base); | 1778 | pm_runtime_disable(&pdev->dev); |
1271 | clk_put(dd->iclk); | 1779 | dma_release_channel(dd->dma_lch); |
1272 | omap_sham_dma_cleanup(dd); | 1780 | free_irq(dd->irq, dd); |
1273 | if (dd->irq >= 0) | ||
1274 | free_irq(dd->irq, dd); | ||
1275 | kfree(dd); | 1781 | kfree(dd); |
1276 | dd = NULL; | 1782 | dd = NULL; |
1277 | 1783 | ||
1278 | return 0; | 1784 | return 0; |
1279 | } | 1785 | } |
1280 | 1786 | ||
1787 | #ifdef CONFIG_PM_SLEEP | ||
1788 | static int omap_sham_suspend(struct device *dev) | ||
1789 | { | ||
1790 | pm_runtime_put_sync(dev); | ||
1791 | return 0; | ||
1792 | } | ||
1793 | |||
1794 | static int omap_sham_resume(struct device *dev) | ||
1795 | { | ||
1796 | pm_runtime_get_sync(dev); | ||
1797 | return 0; | ||
1798 | } | ||
1799 | #endif | ||
1800 | |||
1801 | static const struct dev_pm_ops omap_sham_pm_ops = { | ||
1802 | SET_SYSTEM_SLEEP_PM_OPS(omap_sham_suspend, omap_sham_resume) | ||
1803 | }; | ||
1804 | |||
1281 | static struct platform_driver omap_sham_driver = { | 1805 | static struct platform_driver omap_sham_driver = { |
1282 | .probe = omap_sham_probe, | 1806 | .probe = omap_sham_probe, |
1283 | .remove = omap_sham_remove, | 1807 | .remove = omap_sham_remove, |
1284 | .driver = { | 1808 | .driver = { |
1285 | .name = "omap-sham", | 1809 | .name = "omap-sham", |
1286 | .owner = THIS_MODULE, | 1810 | .owner = THIS_MODULE, |
1811 | .pm = &omap_sham_pm_ops, | ||
1812 | .of_match_table = omap_sham_of_match, | ||
1287 | }, | 1813 | }, |
1288 | }; | 1814 | }; |
1289 | 1815 | ||
1290 | static int __init omap_sham_mod_init(void) | 1816 | static int __init omap_sham_mod_init(void) |
1291 | { | 1817 | { |
1292 | pr_info("loading %s driver\n", "omap-sham"); | ||
1293 | |||
1294 | return platform_driver_register(&omap_sham_driver); | 1818 | return platform_driver_register(&omap_sham_driver); |
1295 | } | 1819 | } |
1296 | 1820 | ||
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c index 49ad8cbade69..4b314326f48a 100644 --- a/drivers/crypto/s5p-sss.c +++ b/drivers/crypto/s5p-sss.c | |||
@@ -580,7 +580,7 @@ static int s5p_aes_probe(struct platform_device *pdev) | |||
580 | resource_size(res), pdev->name)) | 580 | resource_size(res), pdev->name)) |
581 | return -EBUSY; | 581 | return -EBUSY; |
582 | 582 | ||
583 | pdata->clk = clk_get(dev, "secss"); | 583 | pdata->clk = devm_clk_get(dev, "secss"); |
584 | if (IS_ERR(pdata->clk)) { | 584 | if (IS_ERR(pdata->clk)) { |
585 | dev_err(dev, "failed to find secss clock source\n"); | 585 | dev_err(dev, "failed to find secss clock source\n"); |
586 | return -ENOENT; | 586 | return -ENOENT; |
@@ -645,7 +645,6 @@ static int s5p_aes_probe(struct platform_device *pdev) | |||
645 | 645 | ||
646 | err_irq: | 646 | err_irq: |
647 | clk_disable(pdata->clk); | 647 | clk_disable(pdata->clk); |
648 | clk_put(pdata->clk); | ||
649 | 648 | ||
650 | s5p_dev = NULL; | 649 | s5p_dev = NULL; |
651 | platform_set_drvdata(pdev, NULL); | 650 | platform_set_drvdata(pdev, NULL); |
@@ -667,7 +666,6 @@ static int s5p_aes_remove(struct platform_device *pdev) | |||
667 | tasklet_kill(&pdata->tasklet); | 666 | tasklet_kill(&pdata->tasklet); |
668 | 667 | ||
669 | clk_disable(pdata->clk); | 668 | clk_disable(pdata->clk); |
670 | clk_put(pdata->clk); | ||
671 | 669 | ||
672 | s5p_dev = NULL; | 670 | s5p_dev = NULL; |
673 | platform_set_drvdata(pdev, NULL); | 671 | platform_set_drvdata(pdev, NULL); |