diff options
author | Andreas Westin <andreas.westin@stericsson.com> | 2012-04-30 04:11:18 -0400 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2012-05-04 05:04:51 -0400 |
commit | 8a63b1994c500d4825ee73dc71502deffe5b135b (patch) | |
tree | 8495959cd99fba4c8a4a173b016a1cabdd1fe865 /drivers/crypto/ux500 | |
parent | 2789c08fffeae270820dda5d096634aecc810af5 (diff) |
crypto: ux500 - Add driver for HASH hardware
This adds a driver for the ST-Ericsson ux500 hash hardware
module. The driver implements support for SHA-1 and SHA-2.
Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Andreas Westin <andreas.westin@stericsson.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto/ux500')
-rw-r--r-- | drivers/crypto/ux500/Kconfig | 9 | ||||
-rw-r--r-- | drivers/crypto/ux500/Makefile | 1 | ||||
-rw-r--r-- | drivers/crypto/ux500/hash/Makefile | 11 | ||||
-rw-r--r-- | drivers/crypto/ux500/hash/hash_alg.h | 395 | ||||
-rw-r--r-- | drivers/crypto/ux500/hash/hash_core.c | 2019 |
5 files changed, 2435 insertions, 0 deletions
diff --git a/drivers/crypto/ux500/Kconfig b/drivers/crypto/ux500/Kconfig index b893fa061da4..b35e5c4b025a 100644 --- a/drivers/crypto/ux500/Kconfig +++ b/drivers/crypto/ux500/Kconfig | |||
@@ -12,6 +12,15 @@ config CRYPTO_DEV_UX500_CRYP | |||
12 | This selects the crypto driver for the UX500_CRYP hardware. It supports | 12 | This selects the crypto driver for the UX500_CRYP hardware. It supports |
13 | AES-ECB, CBC and CTR with keys sizes of 128, 192 and 256 bit sizes. | 13 | AES-ECB, CBC and CTR with keys sizes of 128, 192 and 256 bit sizes. |
14 | 14 | ||
15 | config CRYPTO_DEV_UX500_HASH | ||
16 | tristate "UX500 crypto driver for HASH block" | ||
17 | depends on CRYPTO_DEV_UX500 | ||
18 | select CRYPTO_HASH | ||
19 | select CRYPTO_HMAC | ||
20 | help | ||
21 | This selects the hash driver for the UX500_HASH hardware. | ||
22 | Depends on UX500/STM DMA if running in DMA mode. | ||
23 | |||
15 | config CRYPTO_DEV_UX500_DEBUG | 24 | config CRYPTO_DEV_UX500_DEBUG |
16 | bool "Activate ux500 platform debug-mode for crypto and hash block" | 25 | bool "Activate ux500 platform debug-mode for crypto and hash block" |
17 | depends on CRYPTO_DEV_UX500_CRYP || CRYPTO_DEV_UX500_HASH | 26 | depends on CRYPTO_DEV_UX500_CRYP || CRYPTO_DEV_UX500_HASH |
diff --git a/drivers/crypto/ux500/Makefile b/drivers/crypto/ux500/Makefile index beb4d37db7b4..b9a365bade86 100644 --- a/drivers/crypto/ux500/Makefile +++ b/drivers/crypto/ux500/Makefile | |||
@@ -4,4 +4,5 @@ | |||
4 | # License terms: GNU General Public License (GPL) version 2 | 4 | # License terms: GNU General Public License (GPL) version 2 |
5 | # | 5 | # |
6 | 6 | ||
7 | obj-$(CONFIG_CRYPTO_DEV_UX500_HASH) += hash/ | ||
7 | obj-$(CONFIG_CRYPTO_DEV_UX500_CRYP) += cryp/ | 8 | obj-$(CONFIG_CRYPTO_DEV_UX500_CRYP) += cryp/ |
diff --git a/drivers/crypto/ux500/hash/Makefile b/drivers/crypto/ux500/hash/Makefile new file mode 100644 index 000000000000..b2f90d9bac72 --- /dev/null +++ b/drivers/crypto/ux500/hash/Makefile | |||
@@ -0,0 +1,11 @@ | |||
1 | # | ||
2 | # Copyright (C) ST-Ericsson SA 2010 | ||
3 | # Author: Shujuan Chen (shujuan.chen@stericsson.com) | ||
4 | # License terms: GNU General Public License (GPL) version 2 | ||
5 | # | ||
6 | ifdef CONFIG_CRYPTO_DEV_UX500_DEBUG | ||
7 | CFLAGS_hash_core.o := -DDEBUG -O0 | ||
8 | endif | ||
9 | |||
10 | obj-$(CONFIG_CRYPTO_DEV_UX500_HASH) += ux500_hash.o | ||
11 | ux500_hash-objs := hash_core.o | ||
diff --git a/drivers/crypto/ux500/hash/hash_alg.h b/drivers/crypto/ux500/hash/hash_alg.h new file mode 100644 index 000000000000..cd9351cb24df --- /dev/null +++ b/drivers/crypto/ux500/hash/hash_alg.h | |||
@@ -0,0 +1,395 @@ | |||
1 | /* | ||
2 | * Copyright (C) ST-Ericsson SA 2010 | ||
3 | * Author: Shujuan Chen (shujuan.chen@stericsson.com) | ||
4 | * Author: Joakim Bech (joakim.xx.bech@stericsson.com) | ||
5 | * Author: Berne Hebark (berne.hebark@stericsson.com)) | ||
6 | * License terms: GNU General Public License (GPL) version 2 | ||
7 | */ | ||
8 | #ifndef _HASH_ALG_H | ||
9 | #define _HASH_ALG_H | ||
10 | |||
11 | #include <linux/bitops.h> | ||
12 | |||
13 | #define HASH_BLOCK_SIZE 64 | ||
14 | #define HASH_DMA_ALIGN_SIZE 4 | ||
15 | #define HASH_DMA_PERFORMANCE_MIN_SIZE 1024 | ||
16 | #define HASH_BYTES_PER_WORD 4 | ||
17 | |||
18 | /* Maximum value of the length's high word */ | ||
19 | #define HASH_HIGH_WORD_MAX_VAL 0xFFFFFFFFUL | ||
20 | |||
21 | /* Power on Reset values HASH registers */ | ||
22 | #define HASH_RESET_CR_VALUE 0x0 | ||
23 | #define HASH_RESET_STR_VALUE 0x0 | ||
24 | |||
25 | /* Number of context swap registers */ | ||
26 | #define HASH_CSR_COUNT 52 | ||
27 | |||
28 | #define HASH_RESET_CSRX_REG_VALUE 0x0 | ||
29 | #define HASH_RESET_CSFULL_REG_VALUE 0x0 | ||
30 | #define HASH_RESET_CSDATAIN_REG_VALUE 0x0 | ||
31 | |||
32 | #define HASH_RESET_INDEX_VAL 0x0 | ||
33 | #define HASH_RESET_BIT_INDEX_VAL 0x0 | ||
34 | #define HASH_RESET_BUFFER_VAL 0x0 | ||
35 | #define HASH_RESET_LEN_HIGH_VAL 0x0 | ||
36 | #define HASH_RESET_LEN_LOW_VAL 0x0 | ||
37 | |||
38 | /* Control register bitfields */ | ||
39 | #define HASH_CR_RESUME_MASK 0x11FCF | ||
40 | |||
41 | #define HASH_CR_SWITCHON_POS 31 | ||
42 | #define HASH_CR_SWITCHON_MASK BIT(31) | ||
43 | |||
44 | #define HASH_CR_EMPTYMSG_POS 20 | ||
45 | #define HASH_CR_EMPTYMSG_MASK BIT(20) | ||
46 | |||
47 | #define HASH_CR_DINF_POS 12 | ||
48 | #define HASH_CR_DINF_MASK BIT(12) | ||
49 | |||
50 | #define HASH_CR_NBW_POS 8 | ||
51 | #define HASH_CR_NBW_MASK 0x00000F00UL | ||
52 | |||
53 | #define HASH_CR_LKEY_POS 16 | ||
54 | #define HASH_CR_LKEY_MASK BIT(16) | ||
55 | |||
56 | #define HASH_CR_ALGO_POS 7 | ||
57 | #define HASH_CR_ALGO_MASK BIT(7) | ||
58 | |||
59 | #define HASH_CR_MODE_POS 6 | ||
60 | #define HASH_CR_MODE_MASK BIT(6) | ||
61 | |||
62 | #define HASH_CR_DATAFORM_POS 4 | ||
63 | #define HASH_CR_DATAFORM_MASK (BIT(4) | BIT(5)) | ||
64 | |||
65 | #define HASH_CR_DMAE_POS 3 | ||
66 | #define HASH_CR_DMAE_MASK BIT(3) | ||
67 | |||
68 | #define HASH_CR_INIT_POS 2 | ||
69 | #define HASH_CR_INIT_MASK BIT(2) | ||
70 | |||
71 | #define HASH_CR_PRIVN_POS 1 | ||
72 | #define HASH_CR_PRIVN_MASK BIT(1) | ||
73 | |||
74 | #define HASH_CR_SECN_POS 0 | ||
75 | #define HASH_CR_SECN_MASK BIT(0) | ||
76 | |||
77 | /* Start register bitfields */ | ||
78 | #define HASH_STR_DCAL_POS 8 | ||
79 | #define HASH_STR_DCAL_MASK BIT(8) | ||
80 | #define HASH_STR_DEFAULT 0x0 | ||
81 | |||
82 | #define HASH_STR_NBLW_POS 0 | ||
83 | #define HASH_STR_NBLW_MASK 0x0000001FUL | ||
84 | |||
85 | #define HASH_NBLW_MAX_VAL 0x1F | ||
86 | |||
87 | /* PrimeCell IDs */ | ||
88 | #define HASH_P_ID0 0xE0 | ||
89 | #define HASH_P_ID1 0x05 | ||
90 | #define HASH_P_ID2 0x38 | ||
91 | #define HASH_P_ID3 0x00 | ||
92 | #define HASH_CELL_ID0 0x0D | ||
93 | #define HASH_CELL_ID1 0xF0 | ||
94 | #define HASH_CELL_ID2 0x05 | ||
95 | #define HASH_CELL_ID3 0xB1 | ||
96 | |||
97 | #define HASH_SET_BITS(reg_name, mask) \ | ||
98 | writel_relaxed((readl_relaxed(reg_name) | mask), reg_name) | ||
99 | |||
100 | #define HASH_CLEAR_BITS(reg_name, mask) \ | ||
101 | writel_relaxed((readl_relaxed(reg_name) & ~mask), reg_name) | ||
102 | |||
103 | #define HASH_PUT_BITS(reg, val, shift, mask) \ | ||
104 | writel_relaxed(((readl(reg) & ~(mask)) | \ | ||
105 | (((u32)val << shift) & (mask))), reg) | ||
106 | |||
107 | #define HASH_SET_DIN(val, len) writesl(&device_data->base->din, (val), (len)) | ||
108 | |||
109 | #define HASH_INITIALIZE \ | ||
110 | HASH_PUT_BITS( \ | ||
111 | &device_data->base->cr, \ | ||
112 | 0x01, HASH_CR_INIT_POS, \ | ||
113 | HASH_CR_INIT_MASK) | ||
114 | |||
115 | #define HASH_SET_DATA_FORMAT(data_format) \ | ||
116 | HASH_PUT_BITS( \ | ||
117 | &device_data->base->cr, \ | ||
118 | (u32) (data_format), HASH_CR_DATAFORM_POS, \ | ||
119 | HASH_CR_DATAFORM_MASK) | ||
120 | #define HASH_SET_NBLW(val) \ | ||
121 | HASH_PUT_BITS( \ | ||
122 | &device_data->base->str, \ | ||
123 | (u32) (val), HASH_STR_NBLW_POS, \ | ||
124 | HASH_STR_NBLW_MASK) | ||
125 | #define HASH_SET_DCAL \ | ||
126 | HASH_PUT_BITS( \ | ||
127 | &device_data->base->str, \ | ||
128 | 0x01, HASH_STR_DCAL_POS, \ | ||
129 | HASH_STR_DCAL_MASK) | ||
130 | |||
131 | /* Hardware access method */ | ||
132 | enum hash_mode { | ||
133 | HASH_MODE_CPU, | ||
134 | HASH_MODE_DMA | ||
135 | }; | ||
136 | |||
137 | /** | ||
138 | * struct uint64 - Structure to handle 64 bits integers. | ||
139 | * @high_word: Most significant bits. | ||
140 | * @low_word: Least significant bits. | ||
141 | * | ||
142 | * Used to handle 64 bits integers. | ||
143 | */ | ||
144 | struct uint64 { | ||
145 | u32 high_word; | ||
146 | u32 low_word; | ||
147 | }; | ||
148 | |||
149 | /** | ||
150 | * struct hash_register - Contains all registers in ux500 hash hardware. | ||
151 | * @cr: HASH control register (0x000). | ||
152 | * @din: HASH data input register (0x004). | ||
153 | * @str: HASH start register (0x008). | ||
154 | * @hx: HASH digest register 0..7 (0x00c-0x01C). | ||
155 | * @padding0: Reserved (0x02C). | ||
156 | * @itcr: Integration test control register (0x080). | ||
157 | * @itip: Integration test input register (0x084). | ||
158 | * @itop: Integration test output register (0x088). | ||
159 | * @padding1: Reserved (0x08C). | ||
160 | * @csfull: HASH context full register (0x0F8). | ||
161 | * @csdatain: HASH context swap data input register (0x0FC). | ||
162 | * @csrx: HASH context swap register 0..51 (0x100-0x1CC). | ||
163 | * @padding2: Reserved (0x1D0). | ||
164 | * @periphid0: HASH peripheral identification register 0 (0xFE0). | ||
165 | * @periphid1: HASH peripheral identification register 1 (0xFE4). | ||
166 | * @periphid2: HASH peripheral identification register 2 (0xFE8). | ||
167 | * @periphid3: HASH peripheral identification register 3 (0xFEC). | ||
168 | * @cellid0: HASH PCell identification register 0 (0xFF0). | ||
169 | * @cellid1: HASH PCell identification register 1 (0xFF4). | ||
170 | * @cellid2: HASH PCell identification register 2 (0xFF8). | ||
171 | * @cellid3: HASH PCell identification register 3 (0xFFC). | ||
172 | * | ||
173 | * The device communicates to the HASH via 32-bit-wide control registers | ||
174 | * accessible via the 32-bit width AMBA rev. 2.0 AHB Bus. Below is a structure | ||
175 | * with the registers used. | ||
176 | */ | ||
177 | struct hash_register { | ||
178 | u32 cr; | ||
179 | u32 din; | ||
180 | u32 str; | ||
181 | u32 hx[8]; | ||
182 | |||
183 | u32 padding0[(0x080 - 0x02C) / sizeof(u32)]; | ||
184 | |||
185 | u32 itcr; | ||
186 | u32 itip; | ||
187 | u32 itop; | ||
188 | |||
189 | u32 padding1[(0x0F8 - 0x08C) / sizeof(u32)]; | ||
190 | |||
191 | u32 csfull; | ||
192 | u32 csdatain; | ||
193 | u32 csrx[HASH_CSR_COUNT]; | ||
194 | |||
195 | u32 padding2[(0xFE0 - 0x1D0) / sizeof(u32)]; | ||
196 | |||
197 | u32 periphid0; | ||
198 | u32 periphid1; | ||
199 | u32 periphid2; | ||
200 | u32 periphid3; | ||
201 | |||
202 | u32 cellid0; | ||
203 | u32 cellid1; | ||
204 | u32 cellid2; | ||
205 | u32 cellid3; | ||
206 | }; | ||
207 | |||
208 | /** | ||
209 | * struct hash_state - Hash context state. | ||
210 | * @temp_cr: Temporary HASH Control Register. | ||
211 | * @str_reg: HASH Start Register. | ||
212 | * @din_reg: HASH Data Input Register. | ||
213 | * @csr[52]: HASH Context Swap Registers 0-39. | ||
214 | * @csfull: HASH Context Swap Registers 40 ie Status flags. | ||
215 | * @csdatain: HASH Context Swap Registers 41 ie Input data. | ||
216 | * @buffer: Working buffer for messages going to the hardware. | ||
217 | * @length: Length of the part of message hashed so far (floor(N/64) * 64). | ||
218 | * @index: Valid number of bytes in buffer (N % 64). | ||
219 | * @bit_index: Valid number of bits in buffer (N % 8). | ||
220 | * | ||
221 | * This structure is used between context switches, i.e. when ongoing jobs are | ||
222 | * interupted with new jobs. When this happens we need to store intermediate | ||
223 | * results in software. | ||
224 | * | ||
225 | * WARNING: "index" is the member of the structure, to be sure that "buffer" | ||
226 | * is aligned on a 4-bytes boundary. This is highly implementation dependent | ||
227 | * and MUST be checked whenever this code is ported on new platforms. | ||
228 | */ | ||
229 | struct hash_state { | ||
230 | u32 temp_cr; | ||
231 | u32 str_reg; | ||
232 | u32 din_reg; | ||
233 | u32 csr[52]; | ||
234 | u32 csfull; | ||
235 | u32 csdatain; | ||
236 | u32 buffer[HASH_BLOCK_SIZE / sizeof(u32)]; | ||
237 | struct uint64 length; | ||
238 | u8 index; | ||
239 | u8 bit_index; | ||
240 | }; | ||
241 | |||
242 | /** | ||
243 | * enum hash_device_id - HASH device ID. | ||
244 | * @HASH_DEVICE_ID_0: Hash hardware with ID 0 | ||
245 | * @HASH_DEVICE_ID_1: Hash hardware with ID 1 | ||
246 | */ | ||
247 | enum hash_device_id { | ||
248 | HASH_DEVICE_ID_0 = 0, | ||
249 | HASH_DEVICE_ID_1 = 1 | ||
250 | }; | ||
251 | |||
252 | /** | ||
253 | * enum hash_data_format - HASH data format. | ||
254 | * @HASH_DATA_32_BITS: 32 bits data format | ||
255 | * @HASH_DATA_16_BITS: 16 bits data format | ||
256 | * @HASH_DATA_8_BITS: 8 bits data format. | ||
257 | * @HASH_DATA_1_BITS: 1 bit data format. | ||
258 | */ | ||
259 | enum hash_data_format { | ||
260 | HASH_DATA_32_BITS = 0x0, | ||
261 | HASH_DATA_16_BITS = 0x1, | ||
262 | HASH_DATA_8_BITS = 0x2, | ||
263 | HASH_DATA_1_BIT = 0x3 | ||
264 | }; | ||
265 | |||
266 | /** | ||
267 | * enum hash_algo - Enumeration for selecting between SHA1 or SHA2 algorithm. | ||
268 | * @HASH_ALGO_SHA1: Indicates that SHA1 is used. | ||
269 | * @HASH_ALGO_SHA2: Indicates that SHA2 (SHA256) is used. | ||
270 | */ | ||
271 | enum hash_algo { | ||
272 | HASH_ALGO_SHA1 = 0x0, | ||
273 | HASH_ALGO_SHA256 = 0x1 | ||
274 | }; | ||
275 | |||
276 | /** | ||
277 | * enum hash_op - Enumeration for selecting between HASH or HMAC mode. | ||
278 | * @HASH_OPER_MODE_HASH: Indicates usage of normal HASH mode. | ||
279 | * @HASH_OPER_MODE_HMAC: Indicates usage of HMAC. | ||
280 | */ | ||
281 | enum hash_op { | ||
282 | HASH_OPER_MODE_HASH = 0x0, | ||
283 | HASH_OPER_MODE_HMAC = 0x1 | ||
284 | }; | ||
285 | |||
286 | /** | ||
287 | * struct hash_config - Configuration data for the hardware. | ||
288 | * @data_format: Format of data entered into the hash data in register. | ||
289 | * @algorithm: Algorithm selection bit. | ||
290 | * @oper_mode: Operating mode selection bit. | ||
291 | */ | ||
292 | struct hash_config { | ||
293 | int data_format; | ||
294 | int algorithm; | ||
295 | int oper_mode; | ||
296 | }; | ||
297 | |||
298 | /** | ||
299 | * struct hash_dma - Structure used for dma. | ||
300 | * @mask: DMA capabilities bitmap mask. | ||
301 | * @complete: Used to maintain state for a "completion". | ||
302 | * @chan_mem2hash: DMA channel. | ||
303 | * @cfg_mem2hash: DMA channel configuration. | ||
304 | * @sg_len: Scatterlist length. | ||
305 | * @sg: Scatterlist. | ||
306 | * @nents: Number of sg entries. | ||
307 | */ | ||
308 | struct hash_dma { | ||
309 | dma_cap_mask_t mask; | ||
310 | struct completion complete; | ||
311 | struct dma_chan *chan_mem2hash; | ||
312 | void *cfg_mem2hash; | ||
313 | int sg_len; | ||
314 | struct scatterlist *sg; | ||
315 | int nents; | ||
316 | }; | ||
317 | |||
318 | /** | ||
319 | * struct hash_ctx - The context used for hash calculations. | ||
320 | * @key: The key used in the operation. | ||
321 | * @keylen: The length of the key. | ||
322 | * @state: The state of the current calculations. | ||
323 | * @config: The current configuration. | ||
324 | * @digestsize: The size of current digest. | ||
325 | * @device: Pointer to the device structure. | ||
326 | */ | ||
327 | struct hash_ctx { | ||
328 | u8 *key; | ||
329 | u32 keylen; | ||
330 | struct hash_config config; | ||
331 | int digestsize; | ||
332 | struct hash_device_data *device; | ||
333 | }; | ||
334 | |||
335 | /** | ||
336 | * struct hash_ctx - The request context used for hash calculations. | ||
337 | * @state: The state of the current calculations. | ||
338 | * @dma_mode: Used in special cases (workaround), e.g. need to change to | ||
339 | * cpu mode, if not supported/working in dma mode. | ||
340 | * @updated: Indicates if hardware is initialized for new operations. | ||
341 | */ | ||
342 | struct hash_req_ctx { | ||
343 | struct hash_state state; | ||
344 | bool dma_mode; | ||
345 | u8 updated; | ||
346 | }; | ||
347 | |||
348 | /** | ||
349 | * struct hash_device_data - structure for a hash device. | ||
350 | * @base: Pointer to the hardware base address. | ||
351 | * @list_node: For inclusion in klist. | ||
352 | * @dev: Pointer to the device dev structure. | ||
353 | * @ctx_lock: Spinlock for current_ctx. | ||
354 | * @current_ctx: Pointer to the currently allocated context. | ||
355 | * @power_state: TRUE = power state on, FALSE = power state off. | ||
356 | * @power_state_lock: Spinlock for power_state. | ||
357 | * @regulator: Pointer to the device's power control. | ||
358 | * @clk: Pointer to the device's clock control. | ||
359 | * @restore_dev_state: TRUE = saved state, FALSE = no saved state. | ||
360 | * @dma: Structure used for dma. | ||
361 | */ | ||
362 | struct hash_device_data { | ||
363 | struct hash_register __iomem *base; | ||
364 | struct klist_node list_node; | ||
365 | struct device *dev; | ||
366 | struct spinlock ctx_lock; | ||
367 | struct hash_ctx *current_ctx; | ||
368 | bool power_state; | ||
369 | struct spinlock power_state_lock; | ||
370 | struct regulator *regulator; | ||
371 | struct clk *clk; | ||
372 | bool restore_dev_state; | ||
373 | struct hash_state state; /* Used for saving and resuming state */ | ||
374 | struct hash_dma dma; | ||
375 | }; | ||
376 | |||
377 | int hash_check_hw(struct hash_device_data *device_data); | ||
378 | |||
379 | int hash_setconfiguration(struct hash_device_data *device_data, | ||
380 | struct hash_config *config); | ||
381 | |||
382 | void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx); | ||
383 | |||
384 | void hash_get_digest(struct hash_device_data *device_data, | ||
385 | u8 *digest, int algorithm); | ||
386 | |||
387 | int hash_hw_update(struct ahash_request *req); | ||
388 | |||
389 | int hash_save_state(struct hash_device_data *device_data, | ||
390 | struct hash_state *state); | ||
391 | |||
392 | int hash_resume_state(struct hash_device_data *device_data, | ||
393 | const struct hash_state *state); | ||
394 | |||
395 | #endif | ||
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c new file mode 100644 index 000000000000..cc6a371a2708 --- /dev/null +++ b/drivers/crypto/ux500/hash/hash_core.c | |||
@@ -0,0 +1,2019 @@ | |||
1 | /* | ||
2 | * Cryptographic API. | ||
3 | * Support for Nomadik hardware crypto engine. | ||
4 | |||
5 | * Copyright (C) ST-Ericsson SA 2010 | ||
6 | * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson | ||
7 | * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson | ||
8 | * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson. | ||
9 | * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson. | ||
10 | * Author: Andreas Westin <andreas.westin@stericsson.com> for ST-Ericsson. | ||
11 | * License terms: GNU General Public License (GPL) version 2 | ||
12 | */ | ||
13 | |||
14 | #include <linux/clk.h> | ||
15 | #include <linux/device.h> | ||
16 | #include <linux/err.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/io.h> | ||
19 | #include <linux/klist.h> | ||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/module.h> | ||
22 | #include <linux/platform_device.h> | ||
23 | #include <linux/crypto.h> | ||
24 | |||
25 | #include <linux/regulator/consumer.h> | ||
26 | #include <linux/dmaengine.h> | ||
27 | #include <linux/bitops.h> | ||
28 | |||
29 | #include <crypto/internal/hash.h> | ||
30 | #include <crypto/sha.h> | ||
31 | #include <crypto/scatterwalk.h> | ||
32 | #include <crypto/algapi.h> | ||
33 | |||
34 | #include <mach/crypto-ux500.h> | ||
35 | #include <mach/hardware.h> | ||
36 | |||
37 | #include "hash_alg.h" | ||
38 | |||
39 | #define DEV_DBG_NAME "hashX hashX:" | ||
40 | |||
41 | static int hash_mode; | ||
42 | module_param(hash_mode, int, 0); | ||
43 | MODULE_PARM_DESC(hash_mode, "CPU or DMA mode. CPU = 0 (default), DMA = 1"); | ||
44 | |||
45 | /** | ||
46 | * Pre-calculated empty message digests. | ||
47 | */ | ||
48 | static u8 zero_message_hash_sha1[SHA1_DIGEST_SIZE] = { | ||
49 | 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, | ||
50 | 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, | ||
51 | 0xaf, 0xd8, 0x07, 0x09 | ||
52 | }; | ||
53 | |||
54 | static u8 zero_message_hash_sha256[SHA256_DIGEST_SIZE] = { | ||
55 | 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, | ||
56 | 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, | ||
57 | 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, | ||
58 | 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55 | ||
59 | }; | ||
60 | |||
61 | /* HMAC-SHA1, no key */ | ||
62 | static u8 zero_message_hmac_sha1[SHA1_DIGEST_SIZE] = { | ||
63 | 0xfb, 0xdb, 0x1d, 0x1b, 0x18, 0xaa, 0x6c, 0x08, | ||
64 | 0x32, 0x4b, 0x7d, 0x64, 0xb7, 0x1f, 0xb7, 0x63, | ||
65 | 0x70, 0x69, 0x0e, 0x1d | ||
66 | }; | ||
67 | |||
68 | /* HMAC-SHA256, no key */ | ||
69 | static u8 zero_message_hmac_sha256[SHA256_DIGEST_SIZE] = { | ||
70 | 0xb6, 0x13, 0x67, 0x9a, 0x08, 0x14, 0xd9, 0xec, | ||
71 | 0x77, 0x2f, 0x95, 0xd7, 0x78, 0xc3, 0x5f, 0xc5, | ||
72 | 0xff, 0x16, 0x97, 0xc4, 0x93, 0x71, 0x56, 0x53, | ||
73 | 0xc6, 0xc7, 0x12, 0x14, 0x42, 0x92, 0xc5, 0xad | ||
74 | }; | ||
75 | |||
76 | /** | ||
77 | * struct hash_driver_data - data specific to the driver. | ||
78 | * | ||
79 | * @device_list: A list of registered devices to choose from. | ||
80 | * @device_allocation: A semaphore initialized with number of devices. | ||
81 | */ | ||
82 | struct hash_driver_data { | ||
83 | struct klist device_list; | ||
84 | struct semaphore device_allocation; | ||
85 | }; | ||
86 | |||
87 | static struct hash_driver_data driver_data; | ||
88 | |||
89 | /* Declaration of functions */ | ||
90 | /** | ||
91 | * hash_messagepad - Pads a message and write the nblw bits. | ||
92 | * @device_data: Structure for the hash device. | ||
93 | * @message: Last word of a message | ||
94 | * @index_bytes: The number of bytes in the last message | ||
95 | * | ||
96 | * This function manages the final part of the digest calculation, when less | ||
97 | * than 512 bits (64 bytes) remain in message. This means index_bytes < 64. | ||
98 | * | ||
99 | */ | ||
100 | static void hash_messagepad(struct hash_device_data *device_data, | ||
101 | const u32 *message, u8 index_bytes); | ||
102 | |||
103 | /** | ||
104 | * release_hash_device - Releases a previously allocated hash device. | ||
105 | * @device_data: Structure for the hash device. | ||
106 | * | ||
107 | */ | ||
108 | static void release_hash_device(struct hash_device_data *device_data) | ||
109 | { | ||
110 | spin_lock(&device_data->ctx_lock); | ||
111 | device_data->current_ctx->device = NULL; | ||
112 | device_data->current_ctx = NULL; | ||
113 | spin_unlock(&device_data->ctx_lock); | ||
114 | |||
115 | /* | ||
116 | * The down_interruptible part for this semaphore is called in | ||
117 | * cryp_get_device_data. | ||
118 | */ | ||
119 | up(&driver_data.device_allocation); | ||
120 | } | ||
121 | |||
122 | static void hash_dma_setup_channel(struct hash_device_data *device_data, | ||
123 | struct device *dev) | ||
124 | { | ||
125 | struct hash_platform_data *platform_data = dev->platform_data; | ||
126 | dma_cap_zero(device_data->dma.mask); | ||
127 | dma_cap_set(DMA_SLAVE, device_data->dma.mask); | ||
128 | |||
129 | device_data->dma.cfg_mem2hash = platform_data->mem_to_engine; | ||
130 | device_data->dma.chan_mem2hash = | ||
131 | dma_request_channel(device_data->dma.mask, | ||
132 | platform_data->dma_filter, | ||
133 | device_data->dma.cfg_mem2hash); | ||
134 | |||
135 | init_completion(&device_data->dma.complete); | ||
136 | } | ||
137 | |||
138 | static void hash_dma_callback(void *data) | ||
139 | { | ||
140 | struct hash_ctx *ctx = (struct hash_ctx *) data; | ||
141 | |||
142 | complete(&ctx->device->dma.complete); | ||
143 | } | ||
144 | |||
145 | static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg, | ||
146 | int len, enum dma_data_direction direction) | ||
147 | { | ||
148 | struct dma_async_tx_descriptor *desc = NULL; | ||
149 | struct dma_chan *channel = NULL; | ||
150 | dma_cookie_t cookie; | ||
151 | |||
152 | if (direction != DMA_TO_DEVICE) { | ||
153 | dev_err(ctx->device->dev, "[%s] Invalid DMA direction", | ||
154 | __func__); | ||
155 | return -EFAULT; | ||
156 | } | ||
157 | |||
158 | sg->length = ALIGN(sg->length, HASH_DMA_ALIGN_SIZE); | ||
159 | |||
160 | channel = ctx->device->dma.chan_mem2hash; | ||
161 | ctx->device->dma.sg = sg; | ||
162 | ctx->device->dma.sg_len = dma_map_sg(channel->device->dev, | ||
163 | ctx->device->dma.sg, ctx->device->dma.nents, | ||
164 | direction); | ||
165 | |||
166 | if (!ctx->device->dma.sg_len) { | ||
167 | dev_err(ctx->device->dev, | ||
168 | "[%s]: Could not map the sg list (TO_DEVICE)", | ||
169 | __func__); | ||
170 | return -EFAULT; | ||
171 | } | ||
172 | |||
173 | dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer " | ||
174 | "(TO_DEVICE)", __func__); | ||
175 | desc = channel->device->device_prep_slave_sg(channel, | ||
176 | ctx->device->dma.sg, ctx->device->dma.sg_len, | ||
177 | direction, DMA_CTRL_ACK | DMA_PREP_INTERRUPT); | ||
178 | if (!desc) { | ||
179 | dev_err(ctx->device->dev, | ||
180 | "[%s]: device_prep_slave_sg() failed!", __func__); | ||
181 | return -EFAULT; | ||
182 | } | ||
183 | |||
184 | desc->callback = hash_dma_callback; | ||
185 | desc->callback_param = ctx; | ||
186 | |||
187 | cookie = desc->tx_submit(desc); | ||
188 | dma_async_issue_pending(channel); | ||
189 | |||
190 | return 0; | ||
191 | } | ||
192 | |||
193 | static void hash_dma_done(struct hash_ctx *ctx) | ||
194 | { | ||
195 | struct dma_chan *chan; | ||
196 | |||
197 | chan = ctx->device->dma.chan_mem2hash; | ||
198 | chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); | ||
199 | dma_unmap_sg(chan->device->dev, ctx->device->dma.sg, | ||
200 | ctx->device->dma.sg_len, DMA_TO_DEVICE); | ||
201 | |||
202 | } | ||
203 | |||
204 | static int hash_dma_write(struct hash_ctx *ctx, | ||
205 | struct scatterlist *sg, int len) | ||
206 | { | ||
207 | int error = hash_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE); | ||
208 | if (error) { | ||
209 | dev_dbg(ctx->device->dev, "[%s]: hash_set_dma_transfer() " | ||
210 | "failed", __func__); | ||
211 | return error; | ||
212 | } | ||
213 | |||
214 | return len; | ||
215 | } | ||
216 | |||
217 | /** | ||
218 | * get_empty_message_digest - Returns a pre-calculated digest for | ||
219 | * the empty message. | ||
220 | * @device_data: Structure for the hash device. | ||
221 | * @zero_hash: Buffer to return the empty message digest. | ||
222 | * @zero_hash_size: Hash size of the empty message digest. | ||
223 | * @zero_digest: True if zero_digest returned. | ||
224 | */ | ||
225 | static int get_empty_message_digest( | ||
226 | struct hash_device_data *device_data, | ||
227 | u8 *zero_hash, u32 *zero_hash_size, bool *zero_digest) | ||
228 | { | ||
229 | int ret = 0; | ||
230 | struct hash_ctx *ctx = device_data->current_ctx; | ||
231 | *zero_digest = false; | ||
232 | |||
233 | /** | ||
234 | * Caller responsible for ctx != NULL. | ||
235 | */ | ||
236 | |||
237 | if (HASH_OPER_MODE_HASH == ctx->config.oper_mode) { | ||
238 | if (HASH_ALGO_SHA1 == ctx->config.algorithm) { | ||
239 | memcpy(zero_hash, &zero_message_hash_sha1[0], | ||
240 | SHA1_DIGEST_SIZE); | ||
241 | *zero_hash_size = SHA1_DIGEST_SIZE; | ||
242 | *zero_digest = true; | ||
243 | } else if (HASH_ALGO_SHA256 == | ||
244 | ctx->config.algorithm) { | ||
245 | memcpy(zero_hash, &zero_message_hash_sha256[0], | ||
246 | SHA256_DIGEST_SIZE); | ||
247 | *zero_hash_size = SHA256_DIGEST_SIZE; | ||
248 | *zero_digest = true; | ||
249 | } else { | ||
250 | dev_err(device_data->dev, "[%s] " | ||
251 | "Incorrect algorithm!" | ||
252 | , __func__); | ||
253 | ret = -EINVAL; | ||
254 | goto out; | ||
255 | } | ||
256 | } else if (HASH_OPER_MODE_HMAC == ctx->config.oper_mode) { | ||
257 | if (!ctx->keylen) { | ||
258 | if (HASH_ALGO_SHA1 == ctx->config.algorithm) { | ||
259 | memcpy(zero_hash, &zero_message_hmac_sha1[0], | ||
260 | SHA1_DIGEST_SIZE); | ||
261 | *zero_hash_size = SHA1_DIGEST_SIZE; | ||
262 | *zero_digest = true; | ||
263 | } else if (HASH_ALGO_SHA256 == ctx->config.algorithm) { | ||
264 | memcpy(zero_hash, &zero_message_hmac_sha256[0], | ||
265 | SHA256_DIGEST_SIZE); | ||
266 | *zero_hash_size = SHA256_DIGEST_SIZE; | ||
267 | *zero_digest = true; | ||
268 | } else { | ||
269 | dev_err(device_data->dev, "[%s] " | ||
270 | "Incorrect algorithm!" | ||
271 | , __func__); | ||
272 | ret = -EINVAL; | ||
273 | goto out; | ||
274 | } | ||
275 | } else { | ||
276 | dev_dbg(device_data->dev, "[%s] Continue hash " | ||
277 | "calculation, since hmac key avalable", | ||
278 | __func__); | ||
279 | } | ||
280 | } | ||
281 | out: | ||
282 | |||
283 | return ret; | ||
284 | } | ||
285 | |||
286 | /** | ||
287 | * hash_disable_power - Request to disable power and clock. | ||
288 | * @device_data: Structure for the hash device. | ||
289 | * @save_device_state: If true, saves the current hw state. | ||
290 | * | ||
291 | * This function request for disabling power (regulator) and clock, | ||
292 | * and could also save current hw state. | ||
293 | */ | ||
294 | static int hash_disable_power( | ||
295 | struct hash_device_data *device_data, | ||
296 | bool save_device_state) | ||
297 | { | ||
298 | int ret = 0; | ||
299 | struct device *dev = device_data->dev; | ||
300 | |||
301 | spin_lock(&device_data->power_state_lock); | ||
302 | if (!device_data->power_state) | ||
303 | goto out; | ||
304 | |||
305 | if (save_device_state) { | ||
306 | hash_save_state(device_data, | ||
307 | &device_data->state); | ||
308 | device_data->restore_dev_state = true; | ||
309 | } | ||
310 | |||
311 | clk_disable(device_data->clk); | ||
312 | ret = regulator_disable(device_data->regulator); | ||
313 | if (ret) | ||
314 | dev_err(dev, "[%s] regulator_disable() failed!", __func__); | ||
315 | |||
316 | device_data->power_state = false; | ||
317 | |||
318 | out: | ||
319 | spin_unlock(&device_data->power_state_lock); | ||
320 | |||
321 | return ret; | ||
322 | } | ||
323 | |||
324 | /** | ||
325 | * hash_enable_power - Request to enable power and clock. | ||
326 | * @device_data: Structure for the hash device. | ||
327 | * @restore_device_state: If true, restores a previous saved hw state. | ||
328 | * | ||
329 | * This function request for enabling power (regulator) and clock, | ||
330 | * and could also restore a previously saved hw state. | ||
331 | */ | ||
332 | static int hash_enable_power( | ||
333 | struct hash_device_data *device_data, | ||
334 | bool restore_device_state) | ||
335 | { | ||
336 | int ret = 0; | ||
337 | struct device *dev = device_data->dev; | ||
338 | |||
339 | spin_lock(&device_data->power_state_lock); | ||
340 | if (!device_data->power_state) { | ||
341 | ret = regulator_enable(device_data->regulator); | ||
342 | if (ret) { | ||
343 | dev_err(dev, "[%s]: regulator_enable() failed!", | ||
344 | __func__); | ||
345 | goto out; | ||
346 | } | ||
347 | ret = clk_enable(device_data->clk); | ||
348 | if (ret) { | ||
349 | dev_err(dev, "[%s]: clk_enable() failed!", | ||
350 | __func__); | ||
351 | ret = regulator_disable( | ||
352 | device_data->regulator); | ||
353 | goto out; | ||
354 | } | ||
355 | device_data->power_state = true; | ||
356 | } | ||
357 | |||
358 | if (device_data->restore_dev_state) { | ||
359 | if (restore_device_state) { | ||
360 | device_data->restore_dev_state = false; | ||
361 | hash_resume_state(device_data, | ||
362 | &device_data->state); | ||
363 | } | ||
364 | } | ||
365 | out: | ||
366 | spin_unlock(&device_data->power_state_lock); | ||
367 | |||
368 | return ret; | ||
369 | } | ||
370 | |||
371 | /** | ||
372 | * hash_get_device_data - Checks for an available hash device and return it. | ||
373 | * @hash_ctx: Structure for the hash context. | ||
374 | * @device_data: Structure for the hash device. | ||
375 | * | ||
376 | * This function check for an available hash device and return it to | ||
377 | * the caller. | ||
378 | * Note! Caller need to release the device, calling up(). | ||
379 | */ | ||
380 | static int hash_get_device_data(struct hash_ctx *ctx, | ||
381 | struct hash_device_data **device_data) | ||
382 | { | ||
383 | int ret; | ||
384 | struct klist_iter device_iterator; | ||
385 | struct klist_node *device_node; | ||
386 | struct hash_device_data *local_device_data = NULL; | ||
387 | |||
388 | /* Wait until a device is available */ | ||
389 | ret = down_interruptible(&driver_data.device_allocation); | ||
390 | if (ret) | ||
391 | return ret; /* Interrupted */ | ||
392 | |||
393 | /* Select a device */ | ||
394 | klist_iter_init(&driver_data.device_list, &device_iterator); | ||
395 | device_node = klist_next(&device_iterator); | ||
396 | while (device_node) { | ||
397 | local_device_data = container_of(device_node, | ||
398 | struct hash_device_data, list_node); | ||
399 | spin_lock(&local_device_data->ctx_lock); | ||
400 | /* current_ctx allocates a device, NULL = unallocated */ | ||
401 | if (local_device_data->current_ctx) { | ||
402 | device_node = klist_next(&device_iterator); | ||
403 | } else { | ||
404 | local_device_data->current_ctx = ctx; | ||
405 | ctx->device = local_device_data; | ||
406 | spin_unlock(&local_device_data->ctx_lock); | ||
407 | break; | ||
408 | } | ||
409 | spin_unlock(&local_device_data->ctx_lock); | ||
410 | } | ||
411 | klist_iter_exit(&device_iterator); | ||
412 | |||
413 | if (!device_node) { | ||
414 | /** | ||
415 | * No free device found. | ||
416 | * Since we allocated a device with down_interruptible, this | ||
417 | * should not be able to happen. | ||
418 | * Number of available devices, which are contained in | ||
419 | * device_allocation, is therefore decremented by not doing | ||
420 | * an up(device_allocation). | ||
421 | */ | ||
422 | return -EBUSY; | ||
423 | } | ||
424 | |||
425 | *device_data = local_device_data; | ||
426 | |||
427 | return 0; | ||
428 | } | ||
429 | |||
430 | /** | ||
431 | * hash_hw_write_key - Writes the key to the hardware registries. | ||
432 | * | ||
433 | * @device_data: Structure for the hash device. | ||
434 | * @key: Key to be written. | ||
435 | * @keylen: The lengt of the key. | ||
436 | * | ||
437 | * Note! This function DOES NOT write to the NBLW registry, even though | ||
438 | * specified in the the hw design spec. Either due to incorrect info in the | ||
439 | * spec or due to a bug in the hw. | ||
440 | */ | ||
441 | static void hash_hw_write_key(struct hash_device_data *device_data, | ||
442 | const u8 *key, unsigned int keylen) | ||
443 | { | ||
444 | u32 word = 0; | ||
445 | int nwords = 1; | ||
446 | |||
447 | HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK); | ||
448 | |||
449 | while (keylen >= 4) { | ||
450 | u32 *key_word = (u32 *)key; | ||
451 | |||
452 | HASH_SET_DIN(key_word, nwords); | ||
453 | keylen -= 4; | ||
454 | key += 4; | ||
455 | } | ||
456 | |||
457 | /* Take care of the remaining bytes in the last word */ | ||
458 | if (keylen) { | ||
459 | word = 0; | ||
460 | while (keylen) { | ||
461 | word |= (key[keylen - 1] << (8 * (keylen - 1))); | ||
462 | keylen--; | ||
463 | } | ||
464 | |||
465 | HASH_SET_DIN(&word, nwords); | ||
466 | } | ||
467 | |||
468 | while (device_data->base->str & HASH_STR_DCAL_MASK) | ||
469 | cpu_relax(); | ||
470 | |||
471 | HASH_SET_DCAL; | ||
472 | |||
473 | while (device_data->base->str & HASH_STR_DCAL_MASK) | ||
474 | cpu_relax(); | ||
475 | } | ||
476 | |||
477 | /** | ||
478 | * init_hash_hw - Initialise the hash hardware for a new calculation. | ||
479 | * @device_data: Structure for the hash device. | ||
480 | * @ctx: The hash context. | ||
481 | * | ||
482 | * This function will enable the bits needed to clear and start a new | ||
483 | * calculation. | ||
484 | */ | ||
485 | static int init_hash_hw(struct hash_device_data *device_data, | ||
486 | struct hash_ctx *ctx) | ||
487 | { | ||
488 | int ret = 0; | ||
489 | |||
490 | ret = hash_setconfiguration(device_data, &ctx->config); | ||
491 | if (ret) { | ||
492 | dev_err(device_data->dev, "[%s] hash_setconfiguration() " | ||
493 | "failed!", __func__); | ||
494 | return ret; | ||
495 | } | ||
496 | |||
497 | hash_begin(device_data, ctx); | ||
498 | |||
499 | if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC) | ||
500 | hash_hw_write_key(device_data, ctx->key, ctx->keylen); | ||
501 | |||
502 | return ret; | ||
503 | } | ||
504 | |||
505 | /** | ||
506 | * hash_get_nents - Return number of entries (nents) in scatterlist (sg). | ||
507 | * | ||
508 | * @sg: Scatterlist. | ||
509 | * @size: Size in bytes. | ||
510 | * @aligned: True if sg data aligned to work in DMA mode. | ||
511 | * | ||
512 | */ | ||
513 | static int hash_get_nents(struct scatterlist *sg, int size, bool *aligned) | ||
514 | { | ||
515 | int nents = 0; | ||
516 | bool aligned_data = true; | ||
517 | |||
518 | while (size > 0 && sg) { | ||
519 | nents++; | ||
520 | size -= sg->length; | ||
521 | |||
522 | /* hash_set_dma_transfer will align last nent */ | ||
523 | if ((aligned && !IS_ALIGNED(sg->offset, HASH_DMA_ALIGN_SIZE)) | ||
524 | || (!IS_ALIGNED(sg->length, HASH_DMA_ALIGN_SIZE) && | ||
525 | size > 0)) | ||
526 | aligned_data = false; | ||
527 | |||
528 | sg = sg_next(sg); | ||
529 | } | ||
530 | |||
531 | if (aligned) | ||
532 | *aligned = aligned_data; | ||
533 | |||
534 | if (size != 0) | ||
535 | return -EFAULT; | ||
536 | |||
537 | return nents; | ||
538 | } | ||
539 | |||
540 | /** | ||
541 | * hash_dma_valid_data - checks for dma valid sg data. | ||
542 | * @sg: Scatterlist. | ||
543 | * @datasize: Datasize in bytes. | ||
544 | * | ||
545 | * NOTE! This function checks for dma valid sg data, since dma | ||
546 | * only accept datasizes of even wordsize. | ||
547 | */ | ||
548 | static bool hash_dma_valid_data(struct scatterlist *sg, int datasize) | ||
549 | { | ||
550 | bool aligned; | ||
551 | |||
552 | /* Need to include at least one nent, else error */ | ||
553 | if (hash_get_nents(sg, datasize, &aligned) < 1) | ||
554 | return false; | ||
555 | |||
556 | return aligned; | ||
557 | } | ||
558 | |||
559 | /** | ||
560 | * hash_init - Common hash init function for SHA1/SHA2 (SHA256). | ||
561 | * @req: The hash request for the job. | ||
562 | * | ||
563 | * Initialize structures. | ||
564 | */ | ||
565 | static int hash_init(struct ahash_request *req) | ||
566 | { | ||
567 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
568 | struct hash_ctx *ctx = crypto_ahash_ctx(tfm); | ||
569 | struct hash_req_ctx *req_ctx = ahash_request_ctx(req); | ||
570 | |||
571 | if (!ctx->key) | ||
572 | ctx->keylen = 0; | ||
573 | |||
574 | memset(&req_ctx->state, 0, sizeof(struct hash_state)); | ||
575 | req_ctx->updated = 0; | ||
576 | if (hash_mode == HASH_MODE_DMA) { | ||
577 | if ((ctx->config.oper_mode == HASH_OPER_MODE_HMAC) && | ||
578 | cpu_is_u5500()) { | ||
579 | pr_debug(DEV_DBG_NAME " [%s] HMAC and DMA not working " | ||
580 | "on u5500, directing to CPU mode.", | ||
581 | __func__); | ||
582 | req_ctx->dma_mode = false; /* Don't use DMA */ | ||
583 | goto out; | ||
584 | } | ||
585 | |||
586 | if (req->nbytes < HASH_DMA_ALIGN_SIZE) { | ||
587 | req_ctx->dma_mode = false; /* Don't use DMA */ | ||
588 | |||
589 | pr_debug(DEV_DBG_NAME " [%s] DMA mode, but direct " | ||
590 | "to CPU mode for data size < %d", | ||
591 | __func__, HASH_DMA_ALIGN_SIZE); | ||
592 | } else { | ||
593 | if (req->nbytes >= HASH_DMA_PERFORMANCE_MIN_SIZE && | ||
594 | hash_dma_valid_data(req->src, | ||
595 | req->nbytes)) { | ||
596 | req_ctx->dma_mode = true; | ||
597 | } else { | ||
598 | req_ctx->dma_mode = false; | ||
599 | pr_debug(DEV_DBG_NAME " [%s] DMA mode, but use" | ||
600 | " CPU mode for datalength < %d" | ||
601 | " or non-aligned data, except " | ||
602 | "in last nent", __func__, | ||
603 | HASH_DMA_PERFORMANCE_MIN_SIZE); | ||
604 | } | ||
605 | } | ||
606 | } | ||
607 | out: | ||
608 | return 0; | ||
609 | } | ||
610 | |||
611 | /** | ||
612 | * hash_processblock - This function processes a single block of 512 bits (64 | ||
613 | * bytes), word aligned, starting at message. | ||
614 | * @device_data: Structure for the hash device. | ||
615 | * @message: Block (512 bits) of message to be written to | ||
616 | * the HASH hardware. | ||
617 | * | ||
618 | */ | ||
619 | static void hash_processblock( | ||
620 | struct hash_device_data *device_data, | ||
621 | const u32 *message, int length) | ||
622 | { | ||
623 | int len = length / HASH_BYTES_PER_WORD; | ||
624 | /* | ||
625 | * NBLW bits. Reset the number of bits in last word (NBLW). | ||
626 | */ | ||
627 | HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK); | ||
628 | |||
629 | /* | ||
630 | * Write message data to the HASH_DIN register. | ||
631 | */ | ||
632 | HASH_SET_DIN(message, len); | ||
633 | } | ||
634 | |||
635 | /** | ||
636 | * hash_messagepad - Pads a message and write the nblw bits. | ||
637 | * @device_data: Structure for the hash device. | ||
638 | * @message: Last word of a message. | ||
639 | * @index_bytes: The number of bytes in the last message. | ||
640 | * | ||
641 | * This function manages the final part of the digest calculation, when less | ||
642 | * than 512 bits (64 bytes) remain in message. This means index_bytes < 64. | ||
643 | * | ||
644 | */ | ||
645 | static void hash_messagepad(struct hash_device_data *device_data, | ||
646 | const u32 *message, u8 index_bytes) | ||
647 | { | ||
648 | int nwords = 1; | ||
649 | |||
650 | /* | ||
651 | * Clear hash str register, only clear NBLW | ||
652 | * since DCAL will be reset by hardware. | ||
653 | */ | ||
654 | HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK); | ||
655 | |||
656 | /* Main loop */ | ||
657 | while (index_bytes >= 4) { | ||
658 | HASH_SET_DIN(message, nwords); | ||
659 | index_bytes -= 4; | ||
660 | message++; | ||
661 | } | ||
662 | |||
663 | if (index_bytes) | ||
664 | HASH_SET_DIN(message, nwords); | ||
665 | |||
666 | while (device_data->base->str & HASH_STR_DCAL_MASK) | ||
667 | cpu_relax(); | ||
668 | |||
669 | /* num_of_bytes == 0 => NBLW <- 0 (32 bits valid in DATAIN) */ | ||
670 | HASH_SET_NBLW(index_bytes * 8); | ||
671 | dev_dbg(device_data->dev, "[%s] DIN=0x%08x NBLW=%d", __func__, | ||
672 | readl_relaxed(&device_data->base->din), | ||
673 | (int)(readl_relaxed(&device_data->base->str) & | ||
674 | HASH_STR_NBLW_MASK)); | ||
675 | HASH_SET_DCAL; | ||
676 | dev_dbg(device_data->dev, "[%s] after dcal -> DIN=0x%08x NBLW=%d", | ||
677 | __func__, readl_relaxed(&device_data->base->din), | ||
678 | (int)(readl_relaxed(&device_data->base->str) & | ||
679 | HASH_STR_NBLW_MASK)); | ||
680 | |||
681 | while (device_data->base->str & HASH_STR_DCAL_MASK) | ||
682 | cpu_relax(); | ||
683 | } | ||
684 | |||
685 | /** | ||
686 | * hash_incrementlength - Increments the length of the current message. | ||
687 | * @ctx: Hash context | ||
688 | * @incr: Length of message processed already | ||
689 | * | ||
690 | * Overflow cannot occur, because conditions for overflow are checked in | ||
691 | * hash_hw_update. | ||
692 | */ | ||
693 | static void hash_incrementlength(struct hash_req_ctx *ctx, u32 incr) | ||
694 | { | ||
695 | ctx->state.length.low_word += incr; | ||
696 | |||
697 | /* Check for wrap-around */ | ||
698 | if (ctx->state.length.low_word < incr) | ||
699 | ctx->state.length.high_word++; | ||
700 | } | ||
701 | |||
702 | /** | ||
703 | * hash_setconfiguration - Sets the required configuration for the hash | ||
704 | * hardware. | ||
705 | * @device_data: Structure for the hash device. | ||
706 | * @config: Pointer to a configuration structure. | ||
707 | */ | ||
708 | int hash_setconfiguration(struct hash_device_data *device_data, | ||
709 | struct hash_config *config) | ||
710 | { | ||
711 | int ret = 0; | ||
712 | |||
713 | if (config->algorithm != HASH_ALGO_SHA1 && | ||
714 | config->algorithm != HASH_ALGO_SHA256) | ||
715 | return -EPERM; | ||
716 | |||
717 | /* | ||
718 | * DATAFORM bits. Set the DATAFORM bits to 0b11, which means the data | ||
719 | * to be written to HASH_DIN is considered as 32 bits. | ||
720 | */ | ||
721 | HASH_SET_DATA_FORMAT(config->data_format); | ||
722 | |||
723 | /* | ||
724 | * ALGO bit. Set to 0b1 for SHA-1 and 0b0 for SHA-256 | ||
725 | */ | ||
726 | switch (config->algorithm) { | ||
727 | case HASH_ALGO_SHA1: | ||
728 | HASH_SET_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK); | ||
729 | break; | ||
730 | |||
731 | case HASH_ALGO_SHA256: | ||
732 | HASH_CLEAR_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK); | ||
733 | break; | ||
734 | |||
735 | default: | ||
736 | dev_err(device_data->dev, "[%s] Incorrect algorithm.", | ||
737 | __func__); | ||
738 | return -EPERM; | ||
739 | } | ||
740 | |||
741 | /* | ||
742 | * MODE bit. This bit selects between HASH or HMAC mode for the | ||
743 | * selected algorithm. 0b0 = HASH and 0b1 = HMAC. | ||
744 | */ | ||
745 | if (HASH_OPER_MODE_HASH == config->oper_mode) | ||
746 | HASH_CLEAR_BITS(&device_data->base->cr, | ||
747 | HASH_CR_MODE_MASK); | ||
748 | else if (HASH_OPER_MODE_HMAC == config->oper_mode) { | ||
749 | HASH_SET_BITS(&device_data->base->cr, | ||
750 | HASH_CR_MODE_MASK); | ||
751 | if (device_data->current_ctx->keylen > HASH_BLOCK_SIZE) { | ||
752 | /* Truncate key to blocksize */ | ||
753 | dev_dbg(device_data->dev, "[%s] LKEY set", __func__); | ||
754 | HASH_SET_BITS(&device_data->base->cr, | ||
755 | HASH_CR_LKEY_MASK); | ||
756 | } else { | ||
757 | dev_dbg(device_data->dev, "[%s] LKEY cleared", | ||
758 | __func__); | ||
759 | HASH_CLEAR_BITS(&device_data->base->cr, | ||
760 | HASH_CR_LKEY_MASK); | ||
761 | } | ||
762 | } else { /* Wrong hash mode */ | ||
763 | ret = -EPERM; | ||
764 | dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!", | ||
765 | __func__); | ||
766 | } | ||
767 | return ret; | ||
768 | } | ||
769 | |||
770 | /** | ||
771 | * hash_begin - This routine resets some globals and initializes the hash | ||
772 | * hardware. | ||
773 | * @device_data: Structure for the hash device. | ||
774 | * @ctx: Hash context. | ||
775 | */ | ||
776 | void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx) | ||
777 | { | ||
778 | /* HW and SW initializations */ | ||
779 | /* Note: there is no need to initialize buffer and digest members */ | ||
780 | |||
781 | while (device_data->base->str & HASH_STR_DCAL_MASK) | ||
782 | cpu_relax(); | ||
783 | |||
784 | /* | ||
785 | * INIT bit. Set this bit to 0b1 to reset the HASH processor core and | ||
786 | * prepare the initialize the HASH accelerator to compute the message | ||
787 | * digest of a new message. | ||
788 | */ | ||
789 | HASH_INITIALIZE; | ||
790 | |||
791 | /* | ||
792 | * NBLW bits. Reset the number of bits in last word (NBLW). | ||
793 | */ | ||
794 | HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK); | ||
795 | } | ||
796 | |||
797 | int hash_process_data( | ||
798 | struct hash_device_data *device_data, | ||
799 | struct hash_ctx *ctx, struct hash_req_ctx *req_ctx, | ||
800 | int msg_length, u8 *data_buffer, u8 *buffer, u8 *index) | ||
801 | { | ||
802 | int ret = 0; | ||
803 | u32 count; | ||
804 | |||
805 | do { | ||
806 | if ((*index + msg_length) < HASH_BLOCK_SIZE) { | ||
807 | for (count = 0; count < msg_length; count++) { | ||
808 | buffer[*index + count] = | ||
809 | *(data_buffer + count); | ||
810 | } | ||
811 | *index += msg_length; | ||
812 | msg_length = 0; | ||
813 | } else { | ||
814 | if (req_ctx->updated) { | ||
815 | |||
816 | ret = hash_resume_state(device_data, | ||
817 | &device_data->state); | ||
818 | memmove(req_ctx->state.buffer, | ||
819 | device_data->state.buffer, | ||
820 | HASH_BLOCK_SIZE / sizeof(u32)); | ||
821 | if (ret) { | ||
822 | dev_err(device_data->dev, "[%s] " | ||
823 | "hash_resume_state()" | ||
824 | " failed!", __func__); | ||
825 | goto out; | ||
826 | } | ||
827 | } else { | ||
828 | ret = init_hash_hw(device_data, ctx); | ||
829 | if (ret) { | ||
830 | dev_err(device_data->dev, "[%s] " | ||
831 | "init_hash_hw()" | ||
832 | " failed!", __func__); | ||
833 | goto out; | ||
834 | } | ||
835 | req_ctx->updated = 1; | ||
836 | } | ||
837 | /* | ||
838 | * If 'data_buffer' is four byte aligned and | ||
839 | * local buffer does not have any data, we can | ||
840 | * write data directly from 'data_buffer' to | ||
841 | * HW peripheral, otherwise we first copy data | ||
842 | * to a local buffer | ||
843 | */ | ||
844 | if ((0 == (((u32)data_buffer) % 4)) | ||
845 | && (0 == *index)) | ||
846 | hash_processblock(device_data, | ||
847 | (const u32 *) | ||
848 | data_buffer, HASH_BLOCK_SIZE); | ||
849 | else { | ||
850 | for (count = 0; count < | ||
851 | (u32)(HASH_BLOCK_SIZE - | ||
852 | *index); | ||
853 | count++) { | ||
854 | buffer[*index + count] = | ||
855 | *(data_buffer + count); | ||
856 | } | ||
857 | hash_processblock(device_data, | ||
858 | (const u32 *)buffer, | ||
859 | HASH_BLOCK_SIZE); | ||
860 | } | ||
861 | hash_incrementlength(req_ctx, HASH_BLOCK_SIZE); | ||
862 | data_buffer += (HASH_BLOCK_SIZE - *index); | ||
863 | |||
864 | msg_length -= (HASH_BLOCK_SIZE - *index); | ||
865 | *index = 0; | ||
866 | |||
867 | ret = hash_save_state(device_data, | ||
868 | &device_data->state); | ||
869 | |||
870 | memmove(device_data->state.buffer, | ||
871 | req_ctx->state.buffer, | ||
872 | HASH_BLOCK_SIZE / sizeof(u32)); | ||
873 | if (ret) { | ||
874 | dev_err(device_data->dev, "[%s] " | ||
875 | "hash_save_state()" | ||
876 | " failed!", __func__); | ||
877 | goto out; | ||
878 | } | ||
879 | } | ||
880 | } while (msg_length != 0); | ||
881 | out: | ||
882 | |||
883 | return ret; | ||
884 | } | ||
885 | |||
886 | /** | ||
887 | * hash_dma_final - The hash dma final function for SHA1/SHA256. | ||
888 | * @req: The hash request for the job. | ||
889 | */ | ||
890 | static int hash_dma_final(struct ahash_request *req) | ||
891 | { | ||
892 | int ret = 0; | ||
893 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
894 | struct hash_ctx *ctx = crypto_ahash_ctx(tfm); | ||
895 | struct hash_req_ctx *req_ctx = ahash_request_ctx(req); | ||
896 | struct hash_device_data *device_data; | ||
897 | u8 digest[SHA256_DIGEST_SIZE]; | ||
898 | int bytes_written = 0; | ||
899 | |||
900 | ret = hash_get_device_data(ctx, &device_data); | ||
901 | if (ret) | ||
902 | return ret; | ||
903 | |||
904 | dev_dbg(device_data->dev, "[%s] (ctx=0x%x)!", __func__, (u32) ctx); | ||
905 | |||
906 | if (req_ctx->updated) { | ||
907 | ret = hash_resume_state(device_data, &device_data->state); | ||
908 | |||
909 | if (ret) { | ||
910 | dev_err(device_data->dev, "[%s] hash_resume_state() " | ||
911 | "failed!", __func__); | ||
912 | goto out; | ||
913 | } | ||
914 | |||
915 | } | ||
916 | |||
917 | if (!req_ctx->updated) { | ||
918 | ret = hash_setconfiguration(device_data, &ctx->config); | ||
919 | if (ret) { | ||
920 | dev_err(device_data->dev, "[%s] " | ||
921 | "hash_setconfiguration() failed!", | ||
922 | __func__); | ||
923 | goto out; | ||
924 | } | ||
925 | |||
926 | /* Enable DMA input */ | ||
927 | if (hash_mode != HASH_MODE_DMA || !req_ctx->dma_mode) { | ||
928 | HASH_CLEAR_BITS(&device_data->base->cr, | ||
929 | HASH_CR_DMAE_MASK); | ||
930 | } else { | ||
931 | HASH_SET_BITS(&device_data->base->cr, | ||
932 | HASH_CR_DMAE_MASK); | ||
933 | HASH_SET_BITS(&device_data->base->cr, | ||
934 | HASH_CR_PRIVN_MASK); | ||
935 | } | ||
936 | |||
937 | HASH_INITIALIZE; | ||
938 | |||
939 | if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC) | ||
940 | hash_hw_write_key(device_data, ctx->key, ctx->keylen); | ||
941 | |||
942 | /* Number of bits in last word = (nbytes * 8) % 32 */ | ||
943 | HASH_SET_NBLW((req->nbytes * 8) % 32); | ||
944 | req_ctx->updated = 1; | ||
945 | } | ||
946 | |||
947 | /* Store the nents in the dma struct. */ | ||
948 | ctx->device->dma.nents = hash_get_nents(req->src, req->nbytes, NULL); | ||
949 | if (!ctx->device->dma.nents) { | ||
950 | dev_err(device_data->dev, "[%s] " | ||
951 | "ctx->device->dma.nents = 0", __func__); | ||
952 | goto out; | ||
953 | } | ||
954 | |||
955 | bytes_written = hash_dma_write(ctx, req->src, req->nbytes); | ||
956 | if (bytes_written != req->nbytes) { | ||
957 | dev_err(device_data->dev, "[%s] " | ||
958 | "hash_dma_write() failed!", __func__); | ||
959 | goto out; | ||
960 | } | ||
961 | |||
962 | wait_for_completion(&ctx->device->dma.complete); | ||
963 | hash_dma_done(ctx); | ||
964 | |||
965 | while (device_data->base->str & HASH_STR_DCAL_MASK) | ||
966 | cpu_relax(); | ||
967 | |||
968 | if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) { | ||
969 | unsigned int keylen = ctx->keylen; | ||
970 | u8 *key = ctx->key; | ||
971 | |||
972 | dev_dbg(device_data->dev, "[%s] keylen: %d", __func__, | ||
973 | ctx->keylen); | ||
974 | hash_hw_write_key(device_data, key, keylen); | ||
975 | } | ||
976 | |||
977 | hash_get_digest(device_data, digest, ctx->config.algorithm); | ||
978 | memcpy(req->result, digest, ctx->digestsize); | ||
979 | |||
980 | out: | ||
981 | release_hash_device(device_data); | ||
982 | |||
983 | /** | ||
984 | * Allocated in setkey, and only used in HMAC. | ||
985 | */ | ||
986 | kfree(ctx->key); | ||
987 | |||
988 | return ret; | ||
989 | } | ||
990 | |||
991 | /** | ||
992 | * hash_hw_final - The final hash calculation function | ||
993 | * @req: The hash request for the job. | ||
994 | */ | ||
995 | int hash_hw_final(struct ahash_request *req) | ||
996 | { | ||
997 | int ret = 0; | ||
998 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
999 | struct hash_ctx *ctx = crypto_ahash_ctx(tfm); | ||
1000 | struct hash_req_ctx *req_ctx = ahash_request_ctx(req); | ||
1001 | struct hash_device_data *device_data; | ||
1002 | u8 digest[SHA256_DIGEST_SIZE]; | ||
1003 | |||
1004 | ret = hash_get_device_data(ctx, &device_data); | ||
1005 | if (ret) | ||
1006 | return ret; | ||
1007 | |||
1008 | dev_dbg(device_data->dev, "[%s] (ctx=0x%x)!", __func__, (u32) ctx); | ||
1009 | |||
1010 | if (req_ctx->updated) { | ||
1011 | ret = hash_resume_state(device_data, &device_data->state); | ||
1012 | |||
1013 | if (ret) { | ||
1014 | dev_err(device_data->dev, "[%s] hash_resume_state() " | ||
1015 | "failed!", __func__); | ||
1016 | goto out; | ||
1017 | } | ||
1018 | } else if (req->nbytes == 0 && ctx->keylen == 0) { | ||
1019 | u8 zero_hash[SHA256_DIGEST_SIZE]; | ||
1020 | u32 zero_hash_size = 0; | ||
1021 | bool zero_digest = false; | ||
1022 | /** | ||
1023 | * Use a pre-calculated empty message digest | ||
1024 | * (workaround since hw return zeroes, hw bug!?) | ||
1025 | */ | ||
1026 | ret = get_empty_message_digest(device_data, &zero_hash[0], | ||
1027 | &zero_hash_size, &zero_digest); | ||
1028 | if (!ret && likely(zero_hash_size == ctx->digestsize) && | ||
1029 | zero_digest) { | ||
1030 | memcpy(req->result, &zero_hash[0], ctx->digestsize); | ||
1031 | goto out; | ||
1032 | } else if (!ret && !zero_digest) { | ||
1033 | dev_dbg(device_data->dev, "[%s] HMAC zero msg with " | ||
1034 | "key, continue...", __func__); | ||
1035 | } else { | ||
1036 | dev_err(device_data->dev, "[%s] ret=%d, or wrong " | ||
1037 | "digest size? %s", __func__, ret, | ||
1038 | (zero_hash_size == ctx->digestsize) ? | ||
1039 | "true" : "false"); | ||
1040 | /* Return error */ | ||
1041 | goto out; | ||
1042 | } | ||
1043 | } else if (req->nbytes == 0 && ctx->keylen > 0) { | ||
1044 | dev_err(device_data->dev, "[%s] Empty message with " | ||
1045 | "keylength > 0, NOT supported.", __func__); | ||
1046 | goto out; | ||
1047 | } | ||
1048 | |||
1049 | if (!req_ctx->updated) { | ||
1050 | ret = init_hash_hw(device_data, ctx); | ||
1051 | if (ret) { | ||
1052 | dev_err(device_data->dev, "[%s] init_hash_hw() " | ||
1053 | "failed!", __func__); | ||
1054 | goto out; | ||
1055 | } | ||
1056 | } | ||
1057 | |||
1058 | if (req_ctx->state.index) { | ||
1059 | hash_messagepad(device_data, req_ctx->state.buffer, | ||
1060 | req_ctx->state.index); | ||
1061 | } else { | ||
1062 | HASH_SET_DCAL; | ||
1063 | while (device_data->base->str & HASH_STR_DCAL_MASK) | ||
1064 | cpu_relax(); | ||
1065 | } | ||
1066 | |||
1067 | if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) { | ||
1068 | unsigned int keylen = ctx->keylen; | ||
1069 | u8 *key = ctx->key; | ||
1070 | |||
1071 | dev_dbg(device_data->dev, "[%s] keylen: %d", __func__, | ||
1072 | ctx->keylen); | ||
1073 | hash_hw_write_key(device_data, key, keylen); | ||
1074 | } | ||
1075 | |||
1076 | hash_get_digest(device_data, digest, ctx->config.algorithm); | ||
1077 | memcpy(req->result, digest, ctx->digestsize); | ||
1078 | |||
1079 | out: | ||
1080 | release_hash_device(device_data); | ||
1081 | |||
1082 | /** | ||
1083 | * Allocated in setkey, and only used in HMAC. | ||
1084 | */ | ||
1085 | kfree(ctx->key); | ||
1086 | |||
1087 | return ret; | ||
1088 | } | ||
1089 | |||
1090 | /** | ||
1091 | * hash_hw_update - Updates current HASH computation hashing another part of | ||
1092 | * the message. | ||
1093 | * @req: Byte array containing the message to be hashed (caller | ||
1094 | * allocated). | ||
1095 | */ | ||
1096 | int hash_hw_update(struct ahash_request *req) | ||
1097 | { | ||
1098 | int ret = 0; | ||
1099 | u8 index = 0; | ||
1100 | u8 *buffer; | ||
1101 | struct hash_device_data *device_data; | ||
1102 | u8 *data_buffer; | ||
1103 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
1104 | struct hash_ctx *ctx = crypto_ahash_ctx(tfm); | ||
1105 | struct hash_req_ctx *req_ctx = ahash_request_ctx(req); | ||
1106 | struct crypto_hash_walk walk; | ||
1107 | int msg_length = crypto_hash_walk_first(req, &walk); | ||
1108 | |||
1109 | /* Empty message ("") is correct indata */ | ||
1110 | if (msg_length == 0) | ||
1111 | return ret; | ||
1112 | |||
1113 | index = req_ctx->state.index; | ||
1114 | buffer = (u8 *)req_ctx->state.buffer; | ||
1115 | |||
1116 | /* Check if ctx->state.length + msg_length | ||
1117 | overflows */ | ||
1118 | if (msg_length > (req_ctx->state.length.low_word + msg_length) && | ||
1119 | HASH_HIGH_WORD_MAX_VAL == | ||
1120 | req_ctx->state.length.high_word) { | ||
1121 | pr_err(DEV_DBG_NAME " [%s] HASH_MSG_LENGTH_OVERFLOW!", | ||
1122 | __func__); | ||
1123 | return -EPERM; | ||
1124 | } | ||
1125 | |||
1126 | ret = hash_get_device_data(ctx, &device_data); | ||
1127 | if (ret) | ||
1128 | return ret; | ||
1129 | |||
1130 | /* Main loop */ | ||
1131 | while (0 != msg_length) { | ||
1132 | data_buffer = walk.data; | ||
1133 | ret = hash_process_data(device_data, ctx, req_ctx, msg_length, | ||
1134 | data_buffer, buffer, &index); | ||
1135 | |||
1136 | if (ret) { | ||
1137 | dev_err(device_data->dev, "[%s] hash_internal_hw_" | ||
1138 | "update() failed!", __func__); | ||
1139 | goto out; | ||
1140 | } | ||
1141 | |||
1142 | msg_length = crypto_hash_walk_done(&walk, 0); | ||
1143 | } | ||
1144 | |||
1145 | req_ctx->state.index = index; | ||
1146 | dev_dbg(device_data->dev, "[%s] indata length=%d, bin=%d))", | ||
1147 | __func__, req_ctx->state.index, | ||
1148 | req_ctx->state.bit_index); | ||
1149 | |||
1150 | out: | ||
1151 | release_hash_device(device_data); | ||
1152 | |||
1153 | return ret; | ||
1154 | } | ||
1155 | |||
1156 | /** | ||
1157 | * hash_resume_state - Function that resumes the state of an calculation. | ||
1158 | * @device_data: Pointer to the device structure. | ||
1159 | * @device_state: The state to be restored in the hash hardware | ||
1160 | */ | ||
1161 | int hash_resume_state(struct hash_device_data *device_data, | ||
1162 | const struct hash_state *device_state) | ||
1163 | { | ||
1164 | u32 temp_cr; | ||
1165 | s32 count; | ||
1166 | int hash_mode = HASH_OPER_MODE_HASH; | ||
1167 | |||
1168 | if (NULL == device_state) { | ||
1169 | dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!", | ||
1170 | __func__); | ||
1171 | return -EPERM; | ||
1172 | } | ||
1173 | |||
1174 | /* Check correctness of index and length members */ | ||
1175 | if (device_state->index > HASH_BLOCK_SIZE | ||
1176 | || (device_state->length.low_word % HASH_BLOCK_SIZE) != 0) { | ||
1177 | dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!", | ||
1178 | __func__); | ||
1179 | return -EPERM; | ||
1180 | } | ||
1181 | |||
1182 | /* | ||
1183 | * INIT bit. Set this bit to 0b1 to reset the HASH processor core and | ||
1184 | * prepare the initialize the HASH accelerator to compute the message | ||
1185 | * digest of a new message. | ||
1186 | */ | ||
1187 | HASH_INITIALIZE; | ||
1188 | |||
1189 | temp_cr = device_state->temp_cr; | ||
1190 | writel_relaxed(temp_cr & HASH_CR_RESUME_MASK, &device_data->base->cr); | ||
1191 | |||
1192 | if (device_data->base->cr & HASH_CR_MODE_MASK) | ||
1193 | hash_mode = HASH_OPER_MODE_HMAC; | ||
1194 | else | ||
1195 | hash_mode = HASH_OPER_MODE_HASH; | ||
1196 | |||
1197 | for (count = 0; count < HASH_CSR_COUNT; count++) { | ||
1198 | if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH)) | ||
1199 | break; | ||
1200 | |||
1201 | writel_relaxed(device_state->csr[count], | ||
1202 | &device_data->base->csrx[count]); | ||
1203 | } | ||
1204 | |||
1205 | writel_relaxed(device_state->csfull, &device_data->base->csfull); | ||
1206 | writel_relaxed(device_state->csdatain, &device_data->base->csdatain); | ||
1207 | |||
1208 | writel_relaxed(device_state->str_reg, &device_data->base->str); | ||
1209 | writel_relaxed(temp_cr, &device_data->base->cr); | ||
1210 | |||
1211 | return 0; | ||
1212 | } | ||
1213 | |||
1214 | /** | ||
1215 | * hash_save_state - Function that saves the state of hardware. | ||
1216 | * @device_data: Pointer to the device structure. | ||
1217 | * @device_state: The strucure where the hardware state should be saved. | ||
1218 | */ | ||
1219 | int hash_save_state(struct hash_device_data *device_data, | ||
1220 | struct hash_state *device_state) | ||
1221 | { | ||
1222 | u32 temp_cr; | ||
1223 | u32 count; | ||
1224 | int hash_mode = HASH_OPER_MODE_HASH; | ||
1225 | |||
1226 | if (NULL == device_state) { | ||
1227 | dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!", | ||
1228 | __func__); | ||
1229 | return -ENOTSUPP; | ||
1230 | } | ||
1231 | |||
1232 | /* Write dummy value to force digest intermediate calculation. This | ||
1233 | * actually makes sure that there isn't any ongoing calculation in the | ||
1234 | * hardware. | ||
1235 | */ | ||
1236 | while (device_data->base->str & HASH_STR_DCAL_MASK) | ||
1237 | cpu_relax(); | ||
1238 | |||
1239 | temp_cr = readl_relaxed(&device_data->base->cr); | ||
1240 | |||
1241 | device_state->str_reg = readl_relaxed(&device_data->base->str); | ||
1242 | |||
1243 | device_state->din_reg = readl_relaxed(&device_data->base->din); | ||
1244 | |||
1245 | if (device_data->base->cr & HASH_CR_MODE_MASK) | ||
1246 | hash_mode = HASH_OPER_MODE_HMAC; | ||
1247 | else | ||
1248 | hash_mode = HASH_OPER_MODE_HASH; | ||
1249 | |||
1250 | for (count = 0; count < HASH_CSR_COUNT; count++) { | ||
1251 | if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH)) | ||
1252 | break; | ||
1253 | |||
1254 | device_state->csr[count] = | ||
1255 | readl_relaxed(&device_data->base->csrx[count]); | ||
1256 | } | ||
1257 | |||
1258 | device_state->csfull = readl_relaxed(&device_data->base->csfull); | ||
1259 | device_state->csdatain = readl_relaxed(&device_data->base->csdatain); | ||
1260 | |||
1261 | device_state->temp_cr = temp_cr; | ||
1262 | |||
1263 | return 0; | ||
1264 | } | ||
1265 | |||
1266 | /** | ||
1267 | * hash_check_hw - This routine checks for peripheral Ids and PCell Ids. | ||
1268 | * @device_data: | ||
1269 | * | ||
1270 | */ | ||
1271 | int hash_check_hw(struct hash_device_data *device_data) | ||
1272 | { | ||
1273 | /* Checking Peripheral Ids */ | ||
1274 | if (HASH_P_ID0 == readl_relaxed(&device_data->base->periphid0) | ||
1275 | && HASH_P_ID1 == readl_relaxed(&device_data->base->periphid1) | ||
1276 | && HASH_P_ID2 == readl_relaxed(&device_data->base->periphid2) | ||
1277 | && HASH_P_ID3 == readl_relaxed(&device_data->base->periphid3) | ||
1278 | && HASH_CELL_ID0 == readl_relaxed(&device_data->base->cellid0) | ||
1279 | && HASH_CELL_ID1 == readl_relaxed(&device_data->base->cellid1) | ||
1280 | && HASH_CELL_ID2 == readl_relaxed(&device_data->base->cellid2) | ||
1281 | && HASH_CELL_ID3 == readl_relaxed(&device_data->base->cellid3) | ||
1282 | ) { | ||
1283 | return 0; | ||
1284 | } | ||
1285 | |||
1286 | dev_err(device_data->dev, "[%s] HASH_UNSUPPORTED_HW!", | ||
1287 | __func__); | ||
1288 | return -ENOTSUPP; | ||
1289 | } | ||
1290 | |||
1291 | /** | ||
1292 | * hash_get_digest - Gets the digest. | ||
1293 | * @device_data: Pointer to the device structure. | ||
1294 | * @digest: User allocated byte array for the calculated digest. | ||
1295 | * @algorithm: The algorithm in use. | ||
1296 | */ | ||
1297 | void hash_get_digest(struct hash_device_data *device_data, | ||
1298 | u8 *digest, int algorithm) | ||
1299 | { | ||
1300 | u32 temp_hx_val, count; | ||
1301 | int loop_ctr; | ||
1302 | |||
1303 | if (algorithm != HASH_ALGO_SHA1 && algorithm != HASH_ALGO_SHA256) { | ||
1304 | dev_err(device_data->dev, "[%s] Incorrect algorithm %d", | ||
1305 | __func__, algorithm); | ||
1306 | return; | ||
1307 | } | ||
1308 | |||
1309 | if (algorithm == HASH_ALGO_SHA1) | ||
1310 | loop_ctr = SHA1_DIGEST_SIZE / sizeof(u32); | ||
1311 | else | ||
1312 | loop_ctr = SHA256_DIGEST_SIZE / sizeof(u32); | ||
1313 | |||
1314 | dev_dbg(device_data->dev, "[%s] digest array:(0x%x)", | ||
1315 | __func__, (u32) digest); | ||
1316 | |||
1317 | /* Copy result into digest array */ | ||
1318 | for (count = 0; count < loop_ctr; count++) { | ||
1319 | temp_hx_val = readl_relaxed(&device_data->base->hx[count]); | ||
1320 | digest[count * 4] = (u8) ((temp_hx_val >> 24) & 0xFF); | ||
1321 | digest[count * 4 + 1] = (u8) ((temp_hx_val >> 16) & 0xFF); | ||
1322 | digest[count * 4 + 2] = (u8) ((temp_hx_val >> 8) & 0xFF); | ||
1323 | digest[count * 4 + 3] = (u8) ((temp_hx_val >> 0) & 0xFF); | ||
1324 | } | ||
1325 | } | ||
1326 | |||
1327 | /** | ||
1328 | * hash_update - The hash update function for SHA1/SHA2 (SHA256). | ||
1329 | * @req: The hash request for the job. | ||
1330 | */ | ||
1331 | static int ahash_update(struct ahash_request *req) | ||
1332 | { | ||
1333 | int ret = 0; | ||
1334 | struct hash_req_ctx *req_ctx = ahash_request_ctx(req); | ||
1335 | |||
1336 | if (hash_mode != HASH_MODE_DMA || !req_ctx->dma_mode) | ||
1337 | ret = hash_hw_update(req); | ||
1338 | /* Skip update for DMA, all data will be passed to DMA in final */ | ||
1339 | |||
1340 | if (ret) { | ||
1341 | pr_err(DEV_DBG_NAME " [%s] hash_hw_update() failed!", | ||
1342 | __func__); | ||
1343 | } | ||
1344 | |||
1345 | return ret; | ||
1346 | } | ||
1347 | |||
1348 | /** | ||
1349 | * hash_final - The hash final function for SHA1/SHA2 (SHA256). | ||
1350 | * @req: The hash request for the job. | ||
1351 | */ | ||
1352 | static int ahash_final(struct ahash_request *req) | ||
1353 | { | ||
1354 | int ret = 0; | ||
1355 | struct hash_req_ctx *req_ctx = ahash_request_ctx(req); | ||
1356 | |||
1357 | pr_debug(DEV_DBG_NAME " [%s] data size: %d", __func__, req->nbytes); | ||
1358 | |||
1359 | if ((hash_mode == HASH_MODE_DMA) && req_ctx->dma_mode) | ||
1360 | ret = hash_dma_final(req); | ||
1361 | else | ||
1362 | ret = hash_hw_final(req); | ||
1363 | |||
1364 | if (ret) { | ||
1365 | pr_err(DEV_DBG_NAME " [%s] hash_hw/dma_final() failed", | ||
1366 | __func__); | ||
1367 | } | ||
1368 | |||
1369 | return ret; | ||
1370 | } | ||
1371 | |||
1372 | static int hash_setkey(struct crypto_ahash *tfm, | ||
1373 | const u8 *key, unsigned int keylen, int alg) | ||
1374 | { | ||
1375 | int ret = 0; | ||
1376 | struct hash_ctx *ctx = crypto_ahash_ctx(tfm); | ||
1377 | |||
1378 | /** | ||
1379 | * Freed in final. | ||
1380 | */ | ||
1381 | ctx->key = kmalloc(keylen, GFP_KERNEL); | ||
1382 | if (!ctx->key) { | ||
1383 | pr_err(DEV_DBG_NAME " [%s] Failed to allocate ctx->key " | ||
1384 | "for %d\n", __func__, alg); | ||
1385 | return -ENOMEM; | ||
1386 | } | ||
1387 | |||
1388 | memcpy(ctx->key, key, keylen); | ||
1389 | ctx->keylen = keylen; | ||
1390 | |||
1391 | return ret; | ||
1392 | } | ||
1393 | |||
1394 | static int ahash_sha1_init(struct ahash_request *req) | ||
1395 | { | ||
1396 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
1397 | struct hash_ctx *ctx = crypto_ahash_ctx(tfm); | ||
1398 | |||
1399 | ctx->config.data_format = HASH_DATA_8_BITS; | ||
1400 | ctx->config.algorithm = HASH_ALGO_SHA1; | ||
1401 | ctx->config.oper_mode = HASH_OPER_MODE_HASH; | ||
1402 | ctx->digestsize = SHA1_DIGEST_SIZE; | ||
1403 | |||
1404 | return hash_init(req); | ||
1405 | } | ||
1406 | |||
1407 | static int ahash_sha256_init(struct ahash_request *req) | ||
1408 | { | ||
1409 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
1410 | struct hash_ctx *ctx = crypto_ahash_ctx(tfm); | ||
1411 | |||
1412 | ctx->config.data_format = HASH_DATA_8_BITS; | ||
1413 | ctx->config.algorithm = HASH_ALGO_SHA256; | ||
1414 | ctx->config.oper_mode = HASH_OPER_MODE_HASH; | ||
1415 | ctx->digestsize = SHA256_DIGEST_SIZE; | ||
1416 | |||
1417 | return hash_init(req); | ||
1418 | } | ||
1419 | |||
1420 | static int ahash_sha1_digest(struct ahash_request *req) | ||
1421 | { | ||
1422 | int ret2, ret1; | ||
1423 | |||
1424 | ret1 = ahash_sha1_init(req); | ||
1425 | if (ret1) | ||
1426 | goto out; | ||
1427 | |||
1428 | ret1 = ahash_update(req); | ||
1429 | ret2 = ahash_final(req); | ||
1430 | |||
1431 | out: | ||
1432 | return ret1 ? ret1 : ret2; | ||
1433 | } | ||
1434 | |||
1435 | static int ahash_sha256_digest(struct ahash_request *req) | ||
1436 | { | ||
1437 | int ret2, ret1; | ||
1438 | |||
1439 | ret1 = ahash_sha256_init(req); | ||
1440 | if (ret1) | ||
1441 | goto out; | ||
1442 | |||
1443 | ret1 = ahash_update(req); | ||
1444 | ret2 = ahash_final(req); | ||
1445 | |||
1446 | out: | ||
1447 | return ret1 ? ret1 : ret2; | ||
1448 | } | ||
1449 | |||
1450 | static int hmac_sha1_init(struct ahash_request *req) | ||
1451 | { | ||
1452 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
1453 | struct hash_ctx *ctx = crypto_ahash_ctx(tfm); | ||
1454 | |||
1455 | ctx->config.data_format = HASH_DATA_8_BITS; | ||
1456 | ctx->config.algorithm = HASH_ALGO_SHA1; | ||
1457 | ctx->config.oper_mode = HASH_OPER_MODE_HMAC; | ||
1458 | ctx->digestsize = SHA1_DIGEST_SIZE; | ||
1459 | |||
1460 | return hash_init(req); | ||
1461 | } | ||
1462 | |||
1463 | static int hmac_sha256_init(struct ahash_request *req) | ||
1464 | { | ||
1465 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
1466 | struct hash_ctx *ctx = crypto_ahash_ctx(tfm); | ||
1467 | |||
1468 | ctx->config.data_format = HASH_DATA_8_BITS; | ||
1469 | ctx->config.algorithm = HASH_ALGO_SHA256; | ||
1470 | ctx->config.oper_mode = HASH_OPER_MODE_HMAC; | ||
1471 | ctx->digestsize = SHA256_DIGEST_SIZE; | ||
1472 | |||
1473 | return hash_init(req); | ||
1474 | } | ||
1475 | |||
1476 | static int hmac_sha1_digest(struct ahash_request *req) | ||
1477 | { | ||
1478 | int ret2, ret1; | ||
1479 | |||
1480 | ret1 = hmac_sha1_init(req); | ||
1481 | if (ret1) | ||
1482 | goto out; | ||
1483 | |||
1484 | ret1 = ahash_update(req); | ||
1485 | ret2 = ahash_final(req); | ||
1486 | |||
1487 | out: | ||
1488 | return ret1 ? ret1 : ret2; | ||
1489 | } | ||
1490 | |||
1491 | static int hmac_sha256_digest(struct ahash_request *req) | ||
1492 | { | ||
1493 | int ret2, ret1; | ||
1494 | |||
1495 | ret1 = hmac_sha256_init(req); | ||
1496 | if (ret1) | ||
1497 | goto out; | ||
1498 | |||
1499 | ret1 = ahash_update(req); | ||
1500 | ret2 = ahash_final(req); | ||
1501 | |||
1502 | out: | ||
1503 | return ret1 ? ret1 : ret2; | ||
1504 | } | ||
1505 | |||
1506 | static int hmac_sha1_setkey(struct crypto_ahash *tfm, | ||
1507 | const u8 *key, unsigned int keylen) | ||
1508 | { | ||
1509 | return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA1); | ||
1510 | } | ||
1511 | |||
1512 | static int hmac_sha256_setkey(struct crypto_ahash *tfm, | ||
1513 | const u8 *key, unsigned int keylen) | ||
1514 | { | ||
1515 | return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA256); | ||
1516 | } | ||
1517 | |||
1518 | struct hash_algo_template { | ||
1519 | struct hash_config conf; | ||
1520 | struct ahash_alg hash; | ||
1521 | }; | ||
1522 | |||
1523 | static int hash_cra_init(struct crypto_tfm *tfm) | ||
1524 | { | ||
1525 | struct hash_ctx *ctx = crypto_tfm_ctx(tfm); | ||
1526 | struct crypto_alg *alg = tfm->__crt_alg; | ||
1527 | struct hash_algo_template *hash_alg; | ||
1528 | |||
1529 | hash_alg = container_of(__crypto_ahash_alg(alg), | ||
1530 | struct hash_algo_template, | ||
1531 | hash); | ||
1532 | |||
1533 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | ||
1534 | sizeof(struct hash_req_ctx)); | ||
1535 | |||
1536 | ctx->config.data_format = HASH_DATA_8_BITS; | ||
1537 | ctx->config.algorithm = hash_alg->conf.algorithm; | ||
1538 | ctx->config.oper_mode = hash_alg->conf.oper_mode; | ||
1539 | |||
1540 | ctx->digestsize = hash_alg->hash.halg.digestsize; | ||
1541 | |||
1542 | return 0; | ||
1543 | } | ||
1544 | |||
1545 | static struct hash_algo_template hash_algs[] = { | ||
1546 | { | ||
1547 | .conf.algorithm = HASH_ALGO_SHA1, | ||
1548 | .conf.oper_mode = HASH_OPER_MODE_HASH, | ||
1549 | .hash = { | ||
1550 | .init = hash_init, | ||
1551 | .update = ahash_update, | ||
1552 | .final = ahash_final, | ||
1553 | .digest = ahash_sha1_digest, | ||
1554 | .halg.digestsize = SHA1_DIGEST_SIZE, | ||
1555 | .halg.statesize = sizeof(struct hash_ctx), | ||
1556 | .halg.base = { | ||
1557 | .cra_name = "sha1", | ||
1558 | .cra_driver_name = "sha1-ux500", | ||
1559 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | ||
1560 | CRYPTO_ALG_ASYNC, | ||
1561 | .cra_blocksize = SHA1_BLOCK_SIZE, | ||
1562 | .cra_ctxsize = sizeof(struct hash_ctx), | ||
1563 | .cra_init = hash_cra_init, | ||
1564 | .cra_module = THIS_MODULE, | ||
1565 | } | ||
1566 | } | ||
1567 | }, | ||
1568 | { | ||
1569 | .conf.algorithm = HASH_ALGO_SHA256, | ||
1570 | .conf.oper_mode = HASH_OPER_MODE_HASH, | ||
1571 | .hash = { | ||
1572 | .init = hash_init, | ||
1573 | .update = ahash_update, | ||
1574 | .final = ahash_final, | ||
1575 | .digest = ahash_sha256_digest, | ||
1576 | .halg.digestsize = SHA256_DIGEST_SIZE, | ||
1577 | .halg.statesize = sizeof(struct hash_ctx), | ||
1578 | .halg.base = { | ||
1579 | .cra_name = "sha256", | ||
1580 | .cra_driver_name = "sha256-ux500", | ||
1581 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | ||
1582 | CRYPTO_ALG_ASYNC, | ||
1583 | .cra_blocksize = SHA256_BLOCK_SIZE, | ||
1584 | .cra_ctxsize = sizeof(struct hash_ctx), | ||
1585 | .cra_type = &crypto_ahash_type, | ||
1586 | .cra_init = hash_cra_init, | ||
1587 | .cra_module = THIS_MODULE, | ||
1588 | } | ||
1589 | } | ||
1590 | |||
1591 | }, | ||
1592 | { | ||
1593 | .conf.algorithm = HASH_ALGO_SHA1, | ||
1594 | .conf.oper_mode = HASH_OPER_MODE_HMAC, | ||
1595 | .hash = { | ||
1596 | .init = hash_init, | ||
1597 | .update = ahash_update, | ||
1598 | .final = ahash_final, | ||
1599 | .digest = hmac_sha1_digest, | ||
1600 | .setkey = hmac_sha1_setkey, | ||
1601 | .halg.digestsize = SHA1_DIGEST_SIZE, | ||
1602 | .halg.statesize = sizeof(struct hash_ctx), | ||
1603 | .halg.base = { | ||
1604 | .cra_name = "hmac(sha1)", | ||
1605 | .cra_driver_name = "hmac-sha1-ux500", | ||
1606 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | ||
1607 | CRYPTO_ALG_ASYNC, | ||
1608 | .cra_blocksize = SHA1_BLOCK_SIZE, | ||
1609 | .cra_ctxsize = sizeof(struct hash_ctx), | ||
1610 | .cra_type = &crypto_ahash_type, | ||
1611 | .cra_init = hash_cra_init, | ||
1612 | .cra_module = THIS_MODULE, | ||
1613 | } | ||
1614 | } | ||
1615 | }, | ||
1616 | { | ||
1617 | .conf.algorithm = HASH_ALGO_SHA256, | ||
1618 | .conf.oper_mode = HASH_OPER_MODE_HMAC, | ||
1619 | .hash = { | ||
1620 | .init = hash_init, | ||
1621 | .update = ahash_update, | ||
1622 | .final = ahash_final, | ||
1623 | .digest = hmac_sha256_digest, | ||
1624 | .setkey = hmac_sha256_setkey, | ||
1625 | .halg.digestsize = SHA256_DIGEST_SIZE, | ||
1626 | .halg.statesize = sizeof(struct hash_ctx), | ||
1627 | .halg.base = { | ||
1628 | .cra_name = "hmac(sha256)", | ||
1629 | .cra_driver_name = "hmac-sha256-ux500", | ||
1630 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | ||
1631 | CRYPTO_ALG_ASYNC, | ||
1632 | .cra_blocksize = SHA256_BLOCK_SIZE, | ||
1633 | .cra_ctxsize = sizeof(struct hash_ctx), | ||
1634 | .cra_type = &crypto_ahash_type, | ||
1635 | .cra_init = hash_cra_init, | ||
1636 | .cra_module = THIS_MODULE, | ||
1637 | } | ||
1638 | } | ||
1639 | } | ||
1640 | }; | ||
1641 | |||
1642 | /** | ||
1643 | * hash_algs_register_all - | ||
1644 | */ | ||
1645 | static int ahash_algs_register_all(struct hash_device_data *device_data) | ||
1646 | { | ||
1647 | int ret; | ||
1648 | int i; | ||
1649 | int count; | ||
1650 | |||
1651 | for (i = 0; i < ARRAY_SIZE(hash_algs); i++) { | ||
1652 | ret = crypto_register_ahash(&hash_algs[i].hash); | ||
1653 | if (ret) { | ||
1654 | count = i; | ||
1655 | dev_err(device_data->dev, "[%s] alg registration failed", | ||
1656 | hash_algs[i].hash.halg.base.cra_driver_name); | ||
1657 | goto unreg; | ||
1658 | } | ||
1659 | } | ||
1660 | return 0; | ||
1661 | unreg: | ||
1662 | for (i = 0; i < count; i++) | ||
1663 | crypto_unregister_ahash(&hash_algs[i].hash); | ||
1664 | return ret; | ||
1665 | } | ||
1666 | |||
1667 | /** | ||
1668 | * hash_algs_unregister_all - | ||
1669 | */ | ||
1670 | static void ahash_algs_unregister_all(struct hash_device_data *device_data) | ||
1671 | { | ||
1672 | int i; | ||
1673 | |||
1674 | for (i = 0; i < ARRAY_SIZE(hash_algs); i++) | ||
1675 | crypto_unregister_ahash(&hash_algs[i].hash); | ||
1676 | } | ||
1677 | |||
1678 | /** | ||
1679 | * ux500_hash_probe - Function that probes the hash hardware. | ||
1680 | * @pdev: The platform device. | ||
1681 | */ | ||
1682 | static int ux500_hash_probe(struct platform_device *pdev) | ||
1683 | { | ||
1684 | int ret = 0; | ||
1685 | struct resource *res = NULL; | ||
1686 | struct hash_device_data *device_data; | ||
1687 | struct device *dev = &pdev->dev; | ||
1688 | |||
1689 | device_data = kzalloc(sizeof(struct hash_device_data), GFP_ATOMIC); | ||
1690 | if (!device_data) { | ||
1691 | dev_dbg(dev, "[%s] kzalloc() failed!", __func__); | ||
1692 | ret = -ENOMEM; | ||
1693 | goto out; | ||
1694 | } | ||
1695 | |||
1696 | device_data->dev = dev; | ||
1697 | device_data->current_ctx = NULL; | ||
1698 | |||
1699 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1700 | if (!res) { | ||
1701 | dev_dbg(dev, "[%s] platform_get_resource() failed!", __func__); | ||
1702 | ret = -ENODEV; | ||
1703 | goto out_kfree; | ||
1704 | } | ||
1705 | |||
1706 | res = request_mem_region(res->start, resource_size(res), pdev->name); | ||
1707 | if (res == NULL) { | ||
1708 | dev_dbg(dev, "[%s] request_mem_region() failed!", __func__); | ||
1709 | ret = -EBUSY; | ||
1710 | goto out_kfree; | ||
1711 | } | ||
1712 | |||
1713 | device_data->base = ioremap(res->start, resource_size(res)); | ||
1714 | if (!device_data->base) { | ||
1715 | dev_err(dev, "[%s] ioremap() failed!", | ||
1716 | __func__); | ||
1717 | ret = -ENOMEM; | ||
1718 | goto out_free_mem; | ||
1719 | } | ||
1720 | spin_lock_init(&device_data->ctx_lock); | ||
1721 | spin_lock_init(&device_data->power_state_lock); | ||
1722 | |||
1723 | /* Enable power for HASH1 hardware block */ | ||
1724 | device_data->regulator = regulator_get(dev, "v-ape"); | ||
1725 | if (IS_ERR(device_data->regulator)) { | ||
1726 | dev_err(dev, "[%s] regulator_get() failed!", __func__); | ||
1727 | ret = PTR_ERR(device_data->regulator); | ||
1728 | device_data->regulator = NULL; | ||
1729 | goto out_unmap; | ||
1730 | } | ||
1731 | |||
1732 | /* Enable the clock for HASH1 hardware block */ | ||
1733 | device_data->clk = clk_get(dev, NULL); | ||
1734 | if (IS_ERR(device_data->clk)) { | ||
1735 | dev_err(dev, "[%s] clk_get() failed!", __func__); | ||
1736 | ret = PTR_ERR(device_data->clk); | ||
1737 | goto out_regulator; | ||
1738 | } | ||
1739 | |||
1740 | /* Enable device power (and clock) */ | ||
1741 | ret = hash_enable_power(device_data, false); | ||
1742 | if (ret) { | ||
1743 | dev_err(dev, "[%s]: hash_enable_power() failed!", __func__); | ||
1744 | goto out_clk; | ||
1745 | } | ||
1746 | |||
1747 | ret = hash_check_hw(device_data); | ||
1748 | if (ret) { | ||
1749 | dev_err(dev, "[%s] hash_check_hw() failed!", __func__); | ||
1750 | goto out_power; | ||
1751 | } | ||
1752 | |||
1753 | if (hash_mode == HASH_MODE_DMA) | ||
1754 | hash_dma_setup_channel(device_data, dev); | ||
1755 | |||
1756 | platform_set_drvdata(pdev, device_data); | ||
1757 | |||
1758 | /* Put the new device into the device list... */ | ||
1759 | klist_add_tail(&device_data->list_node, &driver_data.device_list); | ||
1760 | /* ... and signal that a new device is available. */ | ||
1761 | up(&driver_data.device_allocation); | ||
1762 | |||
1763 | ret = ahash_algs_register_all(device_data); | ||
1764 | if (ret) { | ||
1765 | dev_err(dev, "[%s] ahash_algs_register_all() " | ||
1766 | "failed!", __func__); | ||
1767 | goto out_power; | ||
1768 | } | ||
1769 | |||
1770 | dev_info(dev, "[%s] successfully probed\n", __func__); | ||
1771 | return 0; | ||
1772 | |||
1773 | out_power: | ||
1774 | hash_disable_power(device_data, false); | ||
1775 | |||
1776 | out_clk: | ||
1777 | clk_put(device_data->clk); | ||
1778 | |||
1779 | out_regulator: | ||
1780 | regulator_put(device_data->regulator); | ||
1781 | |||
1782 | out_unmap: | ||
1783 | iounmap(device_data->base); | ||
1784 | |||
1785 | out_free_mem: | ||
1786 | release_mem_region(res->start, resource_size(res)); | ||
1787 | |||
1788 | out_kfree: | ||
1789 | kfree(device_data); | ||
1790 | out: | ||
1791 | return ret; | ||
1792 | } | ||
1793 | |||
1794 | /** | ||
1795 | * ux500_hash_remove - Function that removes the hash device from the platform. | ||
1796 | * @pdev: The platform device. | ||
1797 | */ | ||
1798 | static int ux500_hash_remove(struct platform_device *pdev) | ||
1799 | { | ||
1800 | struct resource *res; | ||
1801 | struct hash_device_data *device_data; | ||
1802 | struct device *dev = &pdev->dev; | ||
1803 | |||
1804 | device_data = platform_get_drvdata(pdev); | ||
1805 | if (!device_data) { | ||
1806 | dev_err(dev, "[%s]: platform_get_drvdata() failed!", | ||
1807 | __func__); | ||
1808 | return -ENOMEM; | ||
1809 | } | ||
1810 | |||
1811 | /* Try to decrease the number of available devices. */ | ||
1812 | if (down_trylock(&driver_data.device_allocation)) | ||
1813 | return -EBUSY; | ||
1814 | |||
1815 | /* Check that the device is free */ | ||
1816 | spin_lock(&device_data->ctx_lock); | ||
1817 | /* current_ctx allocates a device, NULL = unallocated */ | ||
1818 | if (device_data->current_ctx) { | ||
1819 | /* The device is busy */ | ||
1820 | spin_unlock(&device_data->ctx_lock); | ||
1821 | /* Return the device to the pool. */ | ||
1822 | up(&driver_data.device_allocation); | ||
1823 | return -EBUSY; | ||
1824 | } | ||
1825 | |||
1826 | spin_unlock(&device_data->ctx_lock); | ||
1827 | |||
1828 | /* Remove the device from the list */ | ||
1829 | if (klist_node_attached(&device_data->list_node)) | ||
1830 | klist_remove(&device_data->list_node); | ||
1831 | |||
1832 | /* If this was the last device, remove the services */ | ||
1833 | if (list_empty(&driver_data.device_list.k_list)) | ||
1834 | ahash_algs_unregister_all(device_data); | ||
1835 | |||
1836 | if (hash_disable_power(device_data, false)) | ||
1837 | dev_err(dev, "[%s]: hash_disable_power() failed", | ||
1838 | __func__); | ||
1839 | |||
1840 | clk_put(device_data->clk); | ||
1841 | regulator_put(device_data->regulator); | ||
1842 | |||
1843 | iounmap(device_data->base); | ||
1844 | |||
1845 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1846 | if (res) | ||
1847 | release_mem_region(res->start, resource_size(res)); | ||
1848 | |||
1849 | kfree(device_data); | ||
1850 | |||
1851 | return 0; | ||
1852 | } | ||
1853 | |||
1854 | /** | ||
1855 | * ux500_hash_shutdown - Function that shutdown the hash device. | ||
1856 | * @pdev: The platform device | ||
1857 | */ | ||
1858 | static void ux500_hash_shutdown(struct platform_device *pdev) | ||
1859 | { | ||
1860 | struct resource *res = NULL; | ||
1861 | struct hash_device_data *device_data; | ||
1862 | |||
1863 | device_data = platform_get_drvdata(pdev); | ||
1864 | if (!device_data) { | ||
1865 | dev_err(&pdev->dev, "[%s] platform_get_drvdata() failed!", | ||
1866 | __func__); | ||
1867 | return; | ||
1868 | } | ||
1869 | |||
1870 | /* Check that the device is free */ | ||
1871 | spin_lock(&device_data->ctx_lock); | ||
1872 | /* current_ctx allocates a device, NULL = unallocated */ | ||
1873 | if (!device_data->current_ctx) { | ||
1874 | if (down_trylock(&driver_data.device_allocation)) | ||
1875 | dev_dbg(&pdev->dev, "[%s]: Cryp still in use!" | ||
1876 | "Shutting down anyway...", __func__); | ||
1877 | /** | ||
1878 | * (Allocate the device) | ||
1879 | * Need to set this to non-null (dummy) value, | ||
1880 | * to avoid usage if context switching. | ||
1881 | */ | ||
1882 | device_data->current_ctx++; | ||
1883 | } | ||
1884 | spin_unlock(&device_data->ctx_lock); | ||
1885 | |||
1886 | /* Remove the device from the list */ | ||
1887 | if (klist_node_attached(&device_data->list_node)) | ||
1888 | klist_remove(&device_data->list_node); | ||
1889 | |||
1890 | /* If this was the last device, remove the services */ | ||
1891 | if (list_empty(&driver_data.device_list.k_list)) | ||
1892 | ahash_algs_unregister_all(device_data); | ||
1893 | |||
1894 | iounmap(device_data->base); | ||
1895 | |||
1896 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1897 | if (res) | ||
1898 | release_mem_region(res->start, resource_size(res)); | ||
1899 | |||
1900 | if (hash_disable_power(device_data, false)) | ||
1901 | dev_err(&pdev->dev, "[%s] hash_disable_power() failed", | ||
1902 | __func__); | ||
1903 | } | ||
1904 | |||
1905 | /** | ||
1906 | * ux500_hash_suspend - Function that suspends the hash device. | ||
1907 | * @pdev: The platform device. | ||
1908 | * @state: - | ||
1909 | */ | ||
1910 | static int ux500_hash_suspend(struct platform_device *pdev, pm_message_t state) | ||
1911 | { | ||
1912 | int ret; | ||
1913 | struct hash_device_data *device_data; | ||
1914 | struct hash_ctx *temp_ctx = NULL; | ||
1915 | |||
1916 | device_data = platform_get_drvdata(pdev); | ||
1917 | if (!device_data) { | ||
1918 | dev_err(&pdev->dev, "[%s] platform_get_drvdata() failed!", | ||
1919 | __func__); | ||
1920 | return -ENOMEM; | ||
1921 | } | ||
1922 | |||
1923 | spin_lock(&device_data->ctx_lock); | ||
1924 | if (!device_data->current_ctx) | ||
1925 | device_data->current_ctx++; | ||
1926 | spin_unlock(&device_data->ctx_lock); | ||
1927 | |||
1928 | if (device_data->current_ctx == ++temp_ctx) { | ||
1929 | if (down_interruptible(&driver_data.device_allocation)) | ||
1930 | dev_dbg(&pdev->dev, "[%s]: down_interruptible() " | ||
1931 | "failed", __func__); | ||
1932 | ret = hash_disable_power(device_data, false); | ||
1933 | |||
1934 | } else | ||
1935 | ret = hash_disable_power(device_data, true); | ||
1936 | |||
1937 | if (ret) | ||
1938 | dev_err(&pdev->dev, "[%s]: hash_disable_power()", __func__); | ||
1939 | |||
1940 | return ret; | ||
1941 | } | ||
1942 | |||
1943 | /** | ||
1944 | * ux500_hash_resume - Function that resume the hash device. | ||
1945 | * @pdev: The platform device. | ||
1946 | */ | ||
1947 | static int ux500_hash_resume(struct platform_device *pdev) | ||
1948 | { | ||
1949 | int ret = 0; | ||
1950 | struct hash_device_data *device_data; | ||
1951 | struct hash_ctx *temp_ctx = NULL; | ||
1952 | |||
1953 | device_data = platform_get_drvdata(pdev); | ||
1954 | if (!device_data) { | ||
1955 | dev_err(&pdev->dev, "[%s] platform_get_drvdata() failed!", | ||
1956 | __func__); | ||
1957 | return -ENOMEM; | ||
1958 | } | ||
1959 | |||
1960 | spin_lock(&device_data->ctx_lock); | ||
1961 | if (device_data->current_ctx == ++temp_ctx) | ||
1962 | device_data->current_ctx = NULL; | ||
1963 | spin_unlock(&device_data->ctx_lock); | ||
1964 | |||
1965 | if (!device_data->current_ctx) | ||
1966 | up(&driver_data.device_allocation); | ||
1967 | else | ||
1968 | ret = hash_enable_power(device_data, true); | ||
1969 | |||
1970 | if (ret) | ||
1971 | dev_err(&pdev->dev, "[%s]: hash_enable_power() failed!", | ||
1972 | __func__); | ||
1973 | |||
1974 | return ret; | ||
1975 | } | ||
1976 | |||
1977 | static struct platform_driver hash_driver = { | ||
1978 | .probe = ux500_hash_probe, | ||
1979 | .remove = ux500_hash_remove, | ||
1980 | .shutdown = ux500_hash_shutdown, | ||
1981 | .suspend = ux500_hash_suspend, | ||
1982 | .resume = ux500_hash_resume, | ||
1983 | .driver = { | ||
1984 | .owner = THIS_MODULE, | ||
1985 | .name = "hash1", | ||
1986 | } | ||
1987 | }; | ||
1988 | |||
1989 | /** | ||
1990 | * ux500_hash_mod_init - The kernel module init function. | ||
1991 | */ | ||
1992 | static int __init ux500_hash_mod_init(void) | ||
1993 | { | ||
1994 | klist_init(&driver_data.device_list, NULL, NULL); | ||
1995 | /* Initialize the semaphore to 0 devices (locked state) */ | ||
1996 | sema_init(&driver_data.device_allocation, 0); | ||
1997 | |||
1998 | return platform_driver_register(&hash_driver); | ||
1999 | } | ||
2000 | |||
2001 | /** | ||
2002 | * ux500_hash_mod_fini - The kernel module exit function. | ||
2003 | */ | ||
2004 | static void __exit ux500_hash_mod_fini(void) | ||
2005 | { | ||
2006 | platform_driver_unregister(&hash_driver); | ||
2007 | return; | ||
2008 | } | ||
2009 | |||
2010 | module_init(ux500_hash_mod_init); | ||
2011 | module_exit(ux500_hash_mod_fini); | ||
2012 | |||
2013 | MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 HASH engine."); | ||
2014 | MODULE_LICENSE("GPL"); | ||
2015 | |||
2016 | MODULE_ALIAS("sha1-all"); | ||
2017 | MODULE_ALIAS("sha256-all"); | ||
2018 | MODULE_ALIAS("hmac-sha1-all"); | ||
2019 | MODULE_ALIAS("hmac-sha256-all"); | ||