diff options
author | Joe Perches <joe@perches.com> | 2013-07-16 17:06:48 -0400 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2013-07-31 20:53:59 -0400 |
commit | 69d2884debaa029ddcf9de4631c4c83249bc8c4d (patch) | |
tree | f22ef3e74171cdaddf4b8c2fb38e9b26160650a6 /drivers/crypto/ux500 | |
parent | 997ad2900ac13b8afcfc45ce79bf662551a501eb (diff) |
crypto: ux500 - Fix logging, make arrays const, neatening
Logging messages without newlines are possibly interleaved
with other messages. Add terminating newlines to avoid
this.
Other miscellaneous changes:
Make arrays const to reduce data size
Add pr_fmt to prefix pr_<level>, remove now unused DEV_DBG_NAME
Coalesce formats, align arguments
Remove unnecessary OOM messages as dump_stack is already done
Remove unnecessary cast of void *
Change kzalloc(sizeof(struct)...) to kzalloc(sizeof(*var), ...)
Reduce indents in struct definitions
Signed-off-by: Joe Perches <joe@perches.com>
Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto/ux500')
-rw-r--r-- | drivers/crypto/ux500/hash/hash_core.c | 586 |
1 files changed, 279 insertions, 307 deletions
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c index 496ae6aae316..1c73f4fbc252 100644 --- a/drivers/crypto/ux500/hash/hash_core.c +++ b/drivers/crypto/ux500/hash/hash_core.c | |||
@@ -11,6 +11,8 @@ | |||
11 | * License terms: GNU General Public License (GPL) version 2 | 11 | * License terms: GNU General Public License (GPL) version 2 |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #define pr_fmt(fmt) "hashX hashX: " fmt | ||
15 | |||
14 | #include <linux/clk.h> | 16 | #include <linux/clk.h> |
15 | #include <linux/device.h> | 17 | #include <linux/device.h> |
16 | #include <linux/err.h> | 18 | #include <linux/err.h> |
@@ -35,8 +37,6 @@ | |||
35 | 37 | ||
36 | #include "hash_alg.h" | 38 | #include "hash_alg.h" |
37 | 39 | ||
38 | #define DEV_DBG_NAME "hashX hashX:" | ||
39 | |||
40 | static int hash_mode; | 40 | static int hash_mode; |
41 | module_param(hash_mode, int, 0); | 41 | module_param(hash_mode, int, 0); |
42 | MODULE_PARM_DESC(hash_mode, "CPU or DMA mode. CPU = 0 (default), DMA = 1"); | 42 | MODULE_PARM_DESC(hash_mode, "CPU or DMA mode. CPU = 0 (default), DMA = 1"); |
@@ -44,13 +44,13 @@ MODULE_PARM_DESC(hash_mode, "CPU or DMA mode. CPU = 0 (default), DMA = 1"); | |||
44 | /** | 44 | /** |
45 | * Pre-calculated empty message digests. | 45 | * Pre-calculated empty message digests. |
46 | */ | 46 | */ |
47 | static u8 zero_message_hash_sha1[SHA1_DIGEST_SIZE] = { | 47 | static const u8 zero_message_hash_sha1[SHA1_DIGEST_SIZE] = { |
48 | 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, | 48 | 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, |
49 | 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, | 49 | 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, |
50 | 0xaf, 0xd8, 0x07, 0x09 | 50 | 0xaf, 0xd8, 0x07, 0x09 |
51 | }; | 51 | }; |
52 | 52 | ||
53 | static u8 zero_message_hash_sha256[SHA256_DIGEST_SIZE] = { | 53 | static const u8 zero_message_hash_sha256[SHA256_DIGEST_SIZE] = { |
54 | 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, | 54 | 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, |
55 | 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, | 55 | 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, |
56 | 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, | 56 | 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, |
@@ -58,14 +58,14 @@ static u8 zero_message_hash_sha256[SHA256_DIGEST_SIZE] = { | |||
58 | }; | 58 | }; |
59 | 59 | ||
60 | /* HMAC-SHA1, no key */ | 60 | /* HMAC-SHA1, no key */ |
61 | static u8 zero_message_hmac_sha1[SHA1_DIGEST_SIZE] = { | 61 | static const u8 zero_message_hmac_sha1[SHA1_DIGEST_SIZE] = { |
62 | 0xfb, 0xdb, 0x1d, 0x1b, 0x18, 0xaa, 0x6c, 0x08, | 62 | 0xfb, 0xdb, 0x1d, 0x1b, 0x18, 0xaa, 0x6c, 0x08, |
63 | 0x32, 0x4b, 0x7d, 0x64, 0xb7, 0x1f, 0xb7, 0x63, | 63 | 0x32, 0x4b, 0x7d, 0x64, 0xb7, 0x1f, 0xb7, 0x63, |
64 | 0x70, 0x69, 0x0e, 0x1d | 64 | 0x70, 0x69, 0x0e, 0x1d |
65 | }; | 65 | }; |
66 | 66 | ||
67 | /* HMAC-SHA256, no key */ | 67 | /* HMAC-SHA256, no key */ |
68 | static u8 zero_message_hmac_sha256[SHA256_DIGEST_SIZE] = { | 68 | static const u8 zero_message_hmac_sha256[SHA256_DIGEST_SIZE] = { |
69 | 0xb6, 0x13, 0x67, 0x9a, 0x08, 0x14, 0xd9, 0xec, | 69 | 0xb6, 0x13, 0x67, 0x9a, 0x08, 0x14, 0xd9, 0xec, |
70 | 0x77, 0x2f, 0x95, 0xd7, 0x78, 0xc3, 0x5f, 0xc5, | 70 | 0x77, 0x2f, 0x95, 0xd7, 0x78, 0xc3, 0x5f, 0xc5, |
71 | 0xff, 0x16, 0x97, 0xc4, 0x93, 0x71, 0x56, 0x53, | 71 | 0xff, 0x16, 0x97, 0xc4, 0x93, 0x71, 0x56, 0x53, |
@@ -97,7 +97,7 @@ static struct hash_driver_data driver_data; | |||
97 | * | 97 | * |
98 | */ | 98 | */ |
99 | static void hash_messagepad(struct hash_device_data *device_data, | 99 | static void hash_messagepad(struct hash_device_data *device_data, |
100 | const u32 *message, u8 index_bytes); | 100 | const u32 *message, u8 index_bytes); |
101 | 101 | ||
102 | /** | 102 | /** |
103 | * release_hash_device - Releases a previously allocated hash device. | 103 | * release_hash_device - Releases a previously allocated hash device. |
@@ -119,7 +119,7 @@ static void release_hash_device(struct hash_device_data *device_data) | |||
119 | } | 119 | } |
120 | 120 | ||
121 | static void hash_dma_setup_channel(struct hash_device_data *device_data, | 121 | static void hash_dma_setup_channel(struct hash_device_data *device_data, |
122 | struct device *dev) | 122 | struct device *dev) |
123 | { | 123 | { |
124 | struct hash_platform_data *platform_data = dev->platform_data; | 124 | struct hash_platform_data *platform_data = dev->platform_data; |
125 | struct dma_slave_config conf = { | 125 | struct dma_slave_config conf = { |
@@ -127,7 +127,7 @@ static void hash_dma_setup_channel(struct hash_device_data *device_data, | |||
127 | .dst_addr = device_data->phybase + HASH_DMA_FIFO, | 127 | .dst_addr = device_data->phybase + HASH_DMA_FIFO, |
128 | .dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES, | 128 | .dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES, |
129 | .dst_maxburst = 16, | 129 | .dst_maxburst = 16, |
130 | }; | 130 | }; |
131 | 131 | ||
132 | dma_cap_zero(device_data->dma.mask); | 132 | dma_cap_zero(device_data->dma.mask); |
133 | dma_cap_set(DMA_SLAVE, device_data->dma.mask); | 133 | dma_cap_set(DMA_SLAVE, device_data->dma.mask); |
@@ -135,8 +135,8 @@ static void hash_dma_setup_channel(struct hash_device_data *device_data, | |||
135 | device_data->dma.cfg_mem2hash = platform_data->mem_to_engine; | 135 | device_data->dma.cfg_mem2hash = platform_data->mem_to_engine; |
136 | device_data->dma.chan_mem2hash = | 136 | device_data->dma.chan_mem2hash = |
137 | dma_request_channel(device_data->dma.mask, | 137 | dma_request_channel(device_data->dma.mask, |
138 | platform_data->dma_filter, | 138 | platform_data->dma_filter, |
139 | device_data->dma.cfg_mem2hash); | 139 | device_data->dma.cfg_mem2hash); |
140 | 140 | ||
141 | dmaengine_slave_config(device_data->dma.chan_mem2hash, &conf); | 141 | dmaengine_slave_config(device_data->dma.chan_mem2hash, &conf); |
142 | 142 | ||
@@ -145,21 +145,21 @@ static void hash_dma_setup_channel(struct hash_device_data *device_data, | |||
145 | 145 | ||
146 | static void hash_dma_callback(void *data) | 146 | static void hash_dma_callback(void *data) |
147 | { | 147 | { |
148 | struct hash_ctx *ctx = (struct hash_ctx *) data; | 148 | struct hash_ctx *ctx = data; |
149 | 149 | ||
150 | complete(&ctx->device->dma.complete); | 150 | complete(&ctx->device->dma.complete); |
151 | } | 151 | } |
152 | 152 | ||
153 | static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg, | 153 | static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg, |
154 | int len, enum dma_data_direction direction) | 154 | int len, enum dma_data_direction direction) |
155 | { | 155 | { |
156 | struct dma_async_tx_descriptor *desc = NULL; | 156 | struct dma_async_tx_descriptor *desc = NULL; |
157 | struct dma_chan *channel = NULL; | 157 | struct dma_chan *channel = NULL; |
158 | dma_cookie_t cookie; | 158 | dma_cookie_t cookie; |
159 | 159 | ||
160 | if (direction != DMA_TO_DEVICE) { | 160 | if (direction != DMA_TO_DEVICE) { |
161 | dev_err(ctx->device->dev, "[%s] Invalid DMA direction", | 161 | dev_err(ctx->device->dev, "%s: Invalid DMA direction\n", |
162 | __func__); | 162 | __func__); |
163 | return -EFAULT; | 163 | return -EFAULT; |
164 | } | 164 | } |
165 | 165 | ||
@@ -172,20 +172,19 @@ static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg, | |||
172 | direction); | 172 | direction); |
173 | 173 | ||
174 | if (!ctx->device->dma.sg_len) { | 174 | if (!ctx->device->dma.sg_len) { |
175 | dev_err(ctx->device->dev, | 175 | dev_err(ctx->device->dev, "%s: Could not map the sg list (TO_DEVICE)\n", |
176 | "[%s]: Could not map the sg list (TO_DEVICE)", | 176 | __func__); |
177 | __func__); | ||
178 | return -EFAULT; | 177 | return -EFAULT; |
179 | } | 178 | } |
180 | 179 | ||
181 | dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer " | 180 | dev_dbg(ctx->device->dev, "%s: Setting up DMA for buffer (TO_DEVICE)\n", |
182 | "(TO_DEVICE)", __func__); | 181 | __func__); |
183 | desc = dmaengine_prep_slave_sg(channel, | 182 | desc = dmaengine_prep_slave_sg(channel, |
184 | ctx->device->dma.sg, ctx->device->dma.sg_len, | 183 | ctx->device->dma.sg, ctx->device->dma.sg_len, |
185 | direction, DMA_CTRL_ACK | DMA_PREP_INTERRUPT); | 184 | direction, DMA_CTRL_ACK | DMA_PREP_INTERRUPT); |
186 | if (!desc) { | 185 | if (!desc) { |
187 | dev_err(ctx->device->dev, | 186 | dev_err(ctx->device->dev, |
188 | "[%s]: device_prep_slave_sg() failed!", __func__); | 187 | "%s: device_prep_slave_sg() failed!\n", __func__); |
189 | return -EFAULT; | 188 | return -EFAULT; |
190 | } | 189 | } |
191 | 190 | ||
@@ -205,17 +204,16 @@ static void hash_dma_done(struct hash_ctx *ctx) | |||
205 | chan = ctx->device->dma.chan_mem2hash; | 204 | chan = ctx->device->dma.chan_mem2hash; |
206 | dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0); | 205 | dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0); |
207 | dma_unmap_sg(chan->device->dev, ctx->device->dma.sg, | 206 | dma_unmap_sg(chan->device->dev, ctx->device->dma.sg, |
208 | ctx->device->dma.sg_len, DMA_TO_DEVICE); | 207 | ctx->device->dma.sg_len, DMA_TO_DEVICE); |
209 | |||
210 | } | 208 | } |
211 | 209 | ||
212 | static int hash_dma_write(struct hash_ctx *ctx, | 210 | static int hash_dma_write(struct hash_ctx *ctx, |
213 | struct scatterlist *sg, int len) | 211 | struct scatterlist *sg, int len) |
214 | { | 212 | { |
215 | int error = hash_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE); | 213 | int error = hash_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE); |
216 | if (error) { | 214 | if (error) { |
217 | dev_dbg(ctx->device->dev, "[%s]: hash_set_dma_transfer() " | 215 | dev_dbg(ctx->device->dev, |
218 | "failed", __func__); | 216 | "%s: hash_set_dma_transfer() failed\n", __func__); |
219 | return error; | 217 | return error; |
220 | } | 218 | } |
221 | 219 | ||
@@ -245,19 +243,18 @@ static int get_empty_message_digest( | |||
245 | if (HASH_OPER_MODE_HASH == ctx->config.oper_mode) { | 243 | if (HASH_OPER_MODE_HASH == ctx->config.oper_mode) { |
246 | if (HASH_ALGO_SHA1 == ctx->config.algorithm) { | 244 | if (HASH_ALGO_SHA1 == ctx->config.algorithm) { |
247 | memcpy(zero_hash, &zero_message_hash_sha1[0], | 245 | memcpy(zero_hash, &zero_message_hash_sha1[0], |
248 | SHA1_DIGEST_SIZE); | 246 | SHA1_DIGEST_SIZE); |
249 | *zero_hash_size = SHA1_DIGEST_SIZE; | 247 | *zero_hash_size = SHA1_DIGEST_SIZE; |
250 | *zero_digest = true; | 248 | *zero_digest = true; |
251 | } else if (HASH_ALGO_SHA256 == | 249 | } else if (HASH_ALGO_SHA256 == |
252 | ctx->config.algorithm) { | 250 | ctx->config.algorithm) { |
253 | memcpy(zero_hash, &zero_message_hash_sha256[0], | 251 | memcpy(zero_hash, &zero_message_hash_sha256[0], |
254 | SHA256_DIGEST_SIZE); | 252 | SHA256_DIGEST_SIZE); |
255 | *zero_hash_size = SHA256_DIGEST_SIZE; | 253 | *zero_hash_size = SHA256_DIGEST_SIZE; |
256 | *zero_digest = true; | 254 | *zero_digest = true; |
257 | } else { | 255 | } else { |
258 | dev_err(device_data->dev, "[%s] " | 256 | dev_err(device_data->dev, "%s: Incorrect algorithm!\n", |
259 | "Incorrect algorithm!" | 257 | __func__); |
260 | , __func__); | ||
261 | ret = -EINVAL; | 258 | ret = -EINVAL; |
262 | goto out; | 259 | goto out; |
263 | } | 260 | } |
@@ -265,25 +262,24 @@ static int get_empty_message_digest( | |||
265 | if (!ctx->keylen) { | 262 | if (!ctx->keylen) { |
266 | if (HASH_ALGO_SHA1 == ctx->config.algorithm) { | 263 | if (HASH_ALGO_SHA1 == ctx->config.algorithm) { |
267 | memcpy(zero_hash, &zero_message_hmac_sha1[0], | 264 | memcpy(zero_hash, &zero_message_hmac_sha1[0], |
268 | SHA1_DIGEST_SIZE); | 265 | SHA1_DIGEST_SIZE); |
269 | *zero_hash_size = SHA1_DIGEST_SIZE; | 266 | *zero_hash_size = SHA1_DIGEST_SIZE; |
270 | *zero_digest = true; | 267 | *zero_digest = true; |
271 | } else if (HASH_ALGO_SHA256 == ctx->config.algorithm) { | 268 | } else if (HASH_ALGO_SHA256 == ctx->config.algorithm) { |
272 | memcpy(zero_hash, &zero_message_hmac_sha256[0], | 269 | memcpy(zero_hash, &zero_message_hmac_sha256[0], |
273 | SHA256_DIGEST_SIZE); | 270 | SHA256_DIGEST_SIZE); |
274 | *zero_hash_size = SHA256_DIGEST_SIZE; | 271 | *zero_hash_size = SHA256_DIGEST_SIZE; |
275 | *zero_digest = true; | 272 | *zero_digest = true; |
276 | } else { | 273 | } else { |
277 | dev_err(device_data->dev, "[%s] " | 274 | dev_err(device_data->dev, "%s: Incorrect algorithm!\n", |
278 | "Incorrect algorithm!" | 275 | __func__); |
279 | , __func__); | ||
280 | ret = -EINVAL; | 276 | ret = -EINVAL; |
281 | goto out; | 277 | goto out; |
282 | } | 278 | } |
283 | } else { | 279 | } else { |
284 | dev_dbg(device_data->dev, "[%s] Continue hash " | 280 | dev_dbg(device_data->dev, |
285 | "calculation, since hmac key avalable", | 281 | "%s: Continue hash calculation, since hmac key available\n", |
286 | __func__); | 282 | __func__); |
287 | } | 283 | } |
288 | } | 284 | } |
289 | out: | 285 | out: |
@@ -299,9 +295,8 @@ out: | |||
299 | * This function request for disabling power (regulator) and clock, | 295 | * This function request for disabling power (regulator) and clock, |
300 | * and could also save current hw state. | 296 | * and could also save current hw state. |
301 | */ | 297 | */ |
302 | static int hash_disable_power( | 298 | static int hash_disable_power(struct hash_device_data *device_data, |
303 | struct hash_device_data *device_data, | 299 | bool save_device_state) |
304 | bool save_device_state) | ||
305 | { | 300 | { |
306 | int ret = 0; | 301 | int ret = 0; |
307 | struct device *dev = device_data->dev; | 302 | struct device *dev = device_data->dev; |
@@ -319,7 +314,7 @@ static int hash_disable_power( | |||
319 | clk_disable(device_data->clk); | 314 | clk_disable(device_data->clk); |
320 | ret = regulator_disable(device_data->regulator); | 315 | ret = regulator_disable(device_data->regulator); |
321 | if (ret) | 316 | if (ret) |
322 | dev_err(dev, "[%s] regulator_disable() failed!", __func__); | 317 | dev_err(dev, "%s: regulator_disable() failed!\n", __func__); |
323 | 318 | ||
324 | device_data->power_state = false; | 319 | device_data->power_state = false; |
325 | 320 | ||
@@ -337,9 +332,8 @@ out: | |||
337 | * This function request for enabling power (regulator) and clock, | 332 | * This function request for enabling power (regulator) and clock, |
338 | * and could also restore a previously saved hw state. | 333 | * and could also restore a previously saved hw state. |
339 | */ | 334 | */ |
340 | static int hash_enable_power( | 335 | static int hash_enable_power(struct hash_device_data *device_data, |
341 | struct hash_device_data *device_data, | 336 | bool restore_device_state) |
342 | bool restore_device_state) | ||
343 | { | 337 | { |
344 | int ret = 0; | 338 | int ret = 0; |
345 | struct device *dev = device_data->dev; | 339 | struct device *dev = device_data->dev; |
@@ -348,14 +342,13 @@ static int hash_enable_power( | |||
348 | if (!device_data->power_state) { | 342 | if (!device_data->power_state) { |
349 | ret = regulator_enable(device_data->regulator); | 343 | ret = regulator_enable(device_data->regulator); |
350 | if (ret) { | 344 | if (ret) { |
351 | dev_err(dev, "[%s]: regulator_enable() failed!", | 345 | dev_err(dev, "%s: regulator_enable() failed!\n", |
352 | __func__); | 346 | __func__); |
353 | goto out; | 347 | goto out; |
354 | } | 348 | } |
355 | ret = clk_enable(device_data->clk); | 349 | ret = clk_enable(device_data->clk); |
356 | if (ret) { | 350 | if (ret) { |
357 | dev_err(dev, "[%s]: clk_enable() failed!", | 351 | dev_err(dev, "%s: clk_enable() failed!\n", __func__); |
358 | __func__); | ||
359 | ret = regulator_disable( | 352 | ret = regulator_disable( |
360 | device_data->regulator); | 353 | device_data->regulator); |
361 | goto out; | 354 | goto out; |
@@ -366,8 +359,7 @@ static int hash_enable_power( | |||
366 | if (device_data->restore_dev_state) { | 359 | if (device_data->restore_dev_state) { |
367 | if (restore_device_state) { | 360 | if (restore_device_state) { |
368 | device_data->restore_dev_state = false; | 361 | device_data->restore_dev_state = false; |
369 | hash_resume_state(device_data, | 362 | hash_resume_state(device_data, &device_data->state); |
370 | &device_data->state); | ||
371 | } | 363 | } |
372 | } | 364 | } |
373 | out: | 365 | out: |
@@ -447,7 +439,7 @@ static int hash_get_device_data(struct hash_ctx *ctx, | |||
447 | * spec or due to a bug in the hw. | 439 | * spec or due to a bug in the hw. |
448 | */ | 440 | */ |
449 | static void hash_hw_write_key(struct hash_device_data *device_data, | 441 | static void hash_hw_write_key(struct hash_device_data *device_data, |
450 | const u8 *key, unsigned int keylen) | 442 | const u8 *key, unsigned int keylen) |
451 | { | 443 | { |
452 | u32 word = 0; | 444 | u32 word = 0; |
453 | int nwords = 1; | 445 | int nwords = 1; |
@@ -491,14 +483,14 @@ static void hash_hw_write_key(struct hash_device_data *device_data, | |||
491 | * calculation. | 483 | * calculation. |
492 | */ | 484 | */ |
493 | static int init_hash_hw(struct hash_device_data *device_data, | 485 | static int init_hash_hw(struct hash_device_data *device_data, |
494 | struct hash_ctx *ctx) | 486 | struct hash_ctx *ctx) |
495 | { | 487 | { |
496 | int ret = 0; | 488 | int ret = 0; |
497 | 489 | ||
498 | ret = hash_setconfiguration(device_data, &ctx->config); | 490 | ret = hash_setconfiguration(device_data, &ctx->config); |
499 | if (ret) { | 491 | if (ret) { |
500 | dev_err(device_data->dev, "[%s] hash_setconfiguration() " | 492 | dev_err(device_data->dev, "%s: hash_setconfiguration() failed!\n", |
501 | "failed!", __func__); | 493 | __func__); |
502 | return ret; | 494 | return ret; |
503 | } | 495 | } |
504 | 496 | ||
@@ -528,9 +520,8 @@ static int hash_get_nents(struct scatterlist *sg, int size, bool *aligned) | |||
528 | size -= sg->length; | 520 | size -= sg->length; |
529 | 521 | ||
530 | /* hash_set_dma_transfer will align last nent */ | 522 | /* hash_set_dma_transfer will align last nent */ |
531 | if ((aligned && !IS_ALIGNED(sg->offset, HASH_DMA_ALIGN_SIZE)) | 523 | if ((aligned && !IS_ALIGNED(sg->offset, HASH_DMA_ALIGN_SIZE)) || |
532 | || (!IS_ALIGNED(sg->length, HASH_DMA_ALIGN_SIZE) && | 524 | (!IS_ALIGNED(sg->length, HASH_DMA_ALIGN_SIZE) && size > 0)) |
533 | size > 0)) | ||
534 | aligned_data = false; | 525 | aligned_data = false; |
535 | 526 | ||
536 | sg = sg_next(sg); | 527 | sg = sg_next(sg); |
@@ -585,21 +576,17 @@ static int hash_init(struct ahash_request *req) | |||
585 | if (req->nbytes < HASH_DMA_ALIGN_SIZE) { | 576 | if (req->nbytes < HASH_DMA_ALIGN_SIZE) { |
586 | req_ctx->dma_mode = false; /* Don't use DMA */ | 577 | req_ctx->dma_mode = false; /* Don't use DMA */ |
587 | 578 | ||
588 | pr_debug(DEV_DBG_NAME " [%s] DMA mode, but direct " | 579 | pr_debug("%s: DMA mode, but direct to CPU mode for data size < %d\n", |
589 | "to CPU mode for data size < %d", | 580 | __func__, HASH_DMA_ALIGN_SIZE); |
590 | __func__, HASH_DMA_ALIGN_SIZE); | ||
591 | } else { | 581 | } else { |
592 | if (req->nbytes >= HASH_DMA_PERFORMANCE_MIN_SIZE && | 582 | if (req->nbytes >= HASH_DMA_PERFORMANCE_MIN_SIZE && |
593 | hash_dma_valid_data(req->src, | 583 | hash_dma_valid_data(req->src, req->nbytes)) { |
594 | req->nbytes)) { | ||
595 | req_ctx->dma_mode = true; | 584 | req_ctx->dma_mode = true; |
596 | } else { | 585 | } else { |
597 | req_ctx->dma_mode = false; | 586 | req_ctx->dma_mode = false; |
598 | pr_debug(DEV_DBG_NAME " [%s] DMA mode, but use" | 587 | pr_debug("%s: DMA mode, but use CPU mode for datalength < %d or non-aligned data, except in last nent\n", |
599 | " CPU mode for datalength < %d" | 588 | __func__, |
600 | " or non-aligned data, except " | 589 | HASH_DMA_PERFORMANCE_MIN_SIZE); |
601 | "in last nent", __func__, | ||
602 | HASH_DMA_PERFORMANCE_MIN_SIZE); | ||
603 | } | 590 | } |
604 | } | 591 | } |
605 | } | 592 | } |
@@ -614,9 +601,8 @@ static int hash_init(struct ahash_request *req) | |||
614 | * the HASH hardware. | 601 | * the HASH hardware. |
615 | * | 602 | * |
616 | */ | 603 | */ |
617 | static void hash_processblock( | 604 | static void hash_processblock(struct hash_device_data *device_data, |
618 | struct hash_device_data *device_data, | 605 | const u32 *message, int length) |
619 | const u32 *message, int length) | ||
620 | { | 606 | { |
621 | int len = length / HASH_BYTES_PER_WORD; | 607 | int len = length / HASH_BYTES_PER_WORD; |
622 | /* | 608 | /* |
@@ -641,7 +627,7 @@ static void hash_processblock( | |||
641 | * | 627 | * |
642 | */ | 628 | */ |
643 | static void hash_messagepad(struct hash_device_data *device_data, | 629 | static void hash_messagepad(struct hash_device_data *device_data, |
644 | const u32 *message, u8 index_bytes) | 630 | const u32 *message, u8 index_bytes) |
645 | { | 631 | { |
646 | int nwords = 1; | 632 | int nwords = 1; |
647 | 633 | ||
@@ -666,15 +652,13 @@ static void hash_messagepad(struct hash_device_data *device_data, | |||
666 | 652 | ||
667 | /* num_of_bytes == 0 => NBLW <- 0 (32 bits valid in DATAIN) */ | 653 | /* num_of_bytes == 0 => NBLW <- 0 (32 bits valid in DATAIN) */ |
668 | HASH_SET_NBLW(index_bytes * 8); | 654 | HASH_SET_NBLW(index_bytes * 8); |
669 | dev_dbg(device_data->dev, "[%s] DIN=0x%08x NBLW=%d", __func__, | 655 | dev_dbg(device_data->dev, "%s: DIN=0x%08x NBLW=%lu\n", |
670 | readl_relaxed(&device_data->base->din), | 656 | __func__, readl_relaxed(&device_data->base->din), |
671 | (int)(readl_relaxed(&device_data->base->str) & | 657 | readl_relaxed(&device_data->base->str) & HASH_STR_NBLW_MASK); |
672 | HASH_STR_NBLW_MASK)); | ||
673 | HASH_SET_DCAL; | 658 | HASH_SET_DCAL; |
674 | dev_dbg(device_data->dev, "[%s] after dcal -> DIN=0x%08x NBLW=%d", | 659 | dev_dbg(device_data->dev, "%s: after dcal -> DIN=0x%08x NBLW=%lu\n", |
675 | __func__, readl_relaxed(&device_data->base->din), | 660 | __func__, readl_relaxed(&device_data->base->din), |
676 | (int)(readl_relaxed(&device_data->base->str) & | 661 | readl_relaxed(&device_data->base->str) & HASH_STR_NBLW_MASK); |
677 | HASH_STR_NBLW_MASK)); | ||
678 | 662 | ||
679 | while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK) | 663 | while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK) |
680 | cpu_relax(); | 664 | cpu_relax(); |
@@ -704,7 +688,7 @@ static void hash_incrementlength(struct hash_req_ctx *ctx, u32 incr) | |||
704 | * @config: Pointer to a configuration structure. | 688 | * @config: Pointer to a configuration structure. |
705 | */ | 689 | */ |
706 | int hash_setconfiguration(struct hash_device_data *device_data, | 690 | int hash_setconfiguration(struct hash_device_data *device_data, |
707 | struct hash_config *config) | 691 | struct hash_config *config) |
708 | { | 692 | { |
709 | int ret = 0; | 693 | int ret = 0; |
710 | 694 | ||
@@ -731,8 +715,8 @@ int hash_setconfiguration(struct hash_device_data *device_data, | |||
731 | break; | 715 | break; |
732 | 716 | ||
733 | default: | 717 | default: |
734 | dev_err(device_data->dev, "[%s] Incorrect algorithm.", | 718 | dev_err(device_data->dev, "%s: Incorrect algorithm\n", |
735 | __func__); | 719 | __func__); |
736 | return -EPERM; | 720 | return -EPERM; |
737 | } | 721 | } |
738 | 722 | ||
@@ -744,23 +728,22 @@ int hash_setconfiguration(struct hash_device_data *device_data, | |||
744 | HASH_CLEAR_BITS(&device_data->base->cr, | 728 | HASH_CLEAR_BITS(&device_data->base->cr, |
745 | HASH_CR_MODE_MASK); | 729 | HASH_CR_MODE_MASK); |
746 | else if (HASH_OPER_MODE_HMAC == config->oper_mode) { | 730 | else if (HASH_OPER_MODE_HMAC == config->oper_mode) { |
747 | HASH_SET_BITS(&device_data->base->cr, | 731 | HASH_SET_BITS(&device_data->base->cr, HASH_CR_MODE_MASK); |
748 | HASH_CR_MODE_MASK); | ||
749 | if (device_data->current_ctx->keylen > HASH_BLOCK_SIZE) { | 732 | if (device_data->current_ctx->keylen > HASH_BLOCK_SIZE) { |
750 | /* Truncate key to blocksize */ | 733 | /* Truncate key to blocksize */ |
751 | dev_dbg(device_data->dev, "[%s] LKEY set", __func__); | 734 | dev_dbg(device_data->dev, "%s: LKEY set\n", __func__); |
752 | HASH_SET_BITS(&device_data->base->cr, | 735 | HASH_SET_BITS(&device_data->base->cr, |
753 | HASH_CR_LKEY_MASK); | 736 | HASH_CR_LKEY_MASK); |
754 | } else { | 737 | } else { |
755 | dev_dbg(device_data->dev, "[%s] LKEY cleared", | 738 | dev_dbg(device_data->dev, "%s: LKEY cleared\n", |
756 | __func__); | 739 | __func__); |
757 | HASH_CLEAR_BITS(&device_data->base->cr, | 740 | HASH_CLEAR_BITS(&device_data->base->cr, |
758 | HASH_CR_LKEY_MASK); | 741 | HASH_CR_LKEY_MASK); |
759 | } | 742 | } |
760 | } else { /* Wrong hash mode */ | 743 | } else { /* Wrong hash mode */ |
761 | ret = -EPERM; | 744 | ret = -EPERM; |
762 | dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!", | 745 | dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n", |
763 | __func__); | 746 | __func__); |
764 | } | 747 | } |
765 | return ret; | 748 | return ret; |
766 | } | 749 | } |
@@ -793,8 +776,9 @@ void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx) | |||
793 | } | 776 | } |
794 | 777 | ||
795 | static int hash_process_data(struct hash_device_data *device_data, | 778 | static int hash_process_data(struct hash_device_data *device_data, |
796 | struct hash_ctx *ctx, struct hash_req_ctx *req_ctx, | 779 | struct hash_ctx *ctx, struct hash_req_ctx *req_ctx, |
797 | int msg_length, u8 *data_buffer, u8 *buffer, u8 *index) | 780 | int msg_length, u8 *data_buffer, u8 *buffer, |
781 | u8 *index) | ||
798 | { | 782 | { |
799 | int ret = 0; | 783 | int ret = 0; |
800 | u32 count; | 784 | u32 count; |
@@ -809,24 +793,23 @@ static int hash_process_data(struct hash_device_data *device_data, | |||
809 | msg_length = 0; | 793 | msg_length = 0; |
810 | } else { | 794 | } else { |
811 | if (req_ctx->updated) { | 795 | if (req_ctx->updated) { |
812 | |||
813 | ret = hash_resume_state(device_data, | 796 | ret = hash_resume_state(device_data, |
814 | &device_data->state); | 797 | &device_data->state); |
815 | memmove(req_ctx->state.buffer, | 798 | memmove(req_ctx->state.buffer, |
816 | device_data->state.buffer, | 799 | device_data->state.buffer, |
817 | HASH_BLOCK_SIZE / sizeof(u32)); | 800 | HASH_BLOCK_SIZE / sizeof(u32)); |
818 | if (ret) { | 801 | if (ret) { |
819 | dev_err(device_data->dev, "[%s] " | 802 | dev_err(device_data->dev, |
820 | "hash_resume_state()" | 803 | "%s: hash_resume_state() failed!\n", |
821 | " failed!", __func__); | 804 | __func__); |
822 | goto out; | 805 | goto out; |
823 | } | 806 | } |
824 | } else { | 807 | } else { |
825 | ret = init_hash_hw(device_data, ctx); | 808 | ret = init_hash_hw(device_data, ctx); |
826 | if (ret) { | 809 | if (ret) { |
827 | dev_err(device_data->dev, "[%s] " | 810 | dev_err(device_data->dev, |
828 | "init_hash_hw()" | 811 | "%s: init_hash_hw() failed!\n", |
829 | " failed!", __func__); | 812 | __func__); |
830 | goto out; | 813 | goto out; |
831 | } | 814 | } |
832 | req_ctx->updated = 1; | 815 | req_ctx->updated = 1; |
@@ -838,22 +821,21 @@ static int hash_process_data(struct hash_device_data *device_data, | |||
838 | * HW peripheral, otherwise we first copy data | 821 | * HW peripheral, otherwise we first copy data |
839 | * to a local buffer | 822 | * to a local buffer |
840 | */ | 823 | */ |
841 | if ((0 == (((u32)data_buffer) % 4)) | 824 | if ((0 == (((u32)data_buffer) % 4)) && |
842 | && (0 == *index)) | 825 | (0 == *index)) |
843 | hash_processblock(device_data, | 826 | hash_processblock(device_data, |
844 | (const u32 *) | 827 | (const u32 *)data_buffer, |
845 | data_buffer, HASH_BLOCK_SIZE); | 828 | HASH_BLOCK_SIZE); |
846 | else { | 829 | else { |
847 | for (count = 0; count < | 830 | for (count = 0; |
848 | (u32)(HASH_BLOCK_SIZE - | 831 | count < (u32)(HASH_BLOCK_SIZE - *index); |
849 | *index); | 832 | count++) { |
850 | count++) { | ||
851 | buffer[*index + count] = | 833 | buffer[*index + count] = |
852 | *(data_buffer + count); | 834 | *(data_buffer + count); |
853 | } | 835 | } |
854 | hash_processblock(device_data, | 836 | hash_processblock(device_data, |
855 | (const u32 *)buffer, | 837 | (const u32 *)buffer, |
856 | HASH_BLOCK_SIZE); | 838 | HASH_BLOCK_SIZE); |
857 | } | 839 | } |
858 | hash_incrementlength(req_ctx, HASH_BLOCK_SIZE); | 840 | hash_incrementlength(req_ctx, HASH_BLOCK_SIZE); |
859 | data_buffer += (HASH_BLOCK_SIZE - *index); | 841 | data_buffer += (HASH_BLOCK_SIZE - *index); |
@@ -865,12 +847,11 @@ static int hash_process_data(struct hash_device_data *device_data, | |||
865 | &device_data->state); | 847 | &device_data->state); |
866 | 848 | ||
867 | memmove(device_data->state.buffer, | 849 | memmove(device_data->state.buffer, |
868 | req_ctx->state.buffer, | 850 | req_ctx->state.buffer, |
869 | HASH_BLOCK_SIZE / sizeof(u32)); | 851 | HASH_BLOCK_SIZE / sizeof(u32)); |
870 | if (ret) { | 852 | if (ret) { |
871 | dev_err(device_data->dev, "[%s] " | 853 | dev_err(device_data->dev, "%s: hash_save_state() failed!\n", |
872 | "hash_save_state()" | 854 | __func__); |
873 | " failed!", __func__); | ||
874 | goto out; | 855 | goto out; |
875 | } | 856 | } |
876 | } | 857 | } |
@@ -898,25 +879,24 @@ static int hash_dma_final(struct ahash_request *req) | |||
898 | if (ret) | 879 | if (ret) |
899 | return ret; | 880 | return ret; |
900 | 881 | ||
901 | dev_dbg(device_data->dev, "[%s] (ctx=0x%x)!", __func__, (u32) ctx); | 882 | dev_dbg(device_data->dev, "%s: (ctx=0x%x)!\n", __func__, (u32) ctx); |
902 | 883 | ||
903 | if (req_ctx->updated) { | 884 | if (req_ctx->updated) { |
904 | ret = hash_resume_state(device_data, &device_data->state); | 885 | ret = hash_resume_state(device_data, &device_data->state); |
905 | 886 | ||
906 | if (ret) { | 887 | if (ret) { |
907 | dev_err(device_data->dev, "[%s] hash_resume_state() " | 888 | dev_err(device_data->dev, "%s: hash_resume_state() failed!\n", |
908 | "failed!", __func__); | 889 | __func__); |
909 | goto out; | 890 | goto out; |
910 | } | 891 | } |
911 | |||
912 | } | 892 | } |
913 | 893 | ||
914 | if (!req_ctx->updated) { | 894 | if (!req_ctx->updated) { |
915 | ret = hash_setconfiguration(device_data, &ctx->config); | 895 | ret = hash_setconfiguration(device_data, &ctx->config); |
916 | if (ret) { | 896 | if (ret) { |
917 | dev_err(device_data->dev, "[%s] " | 897 | dev_err(device_data->dev, |
918 | "hash_setconfiguration() failed!", | 898 | "%s: hash_setconfiguration() failed!\n", |
919 | __func__); | 899 | __func__); |
920 | goto out; | 900 | goto out; |
921 | } | 901 | } |
922 | 902 | ||
@@ -926,9 +906,9 @@ static int hash_dma_final(struct ahash_request *req) | |||
926 | HASH_CR_DMAE_MASK); | 906 | HASH_CR_DMAE_MASK); |
927 | } else { | 907 | } else { |
928 | HASH_SET_BITS(&device_data->base->cr, | 908 | HASH_SET_BITS(&device_data->base->cr, |
929 | HASH_CR_DMAE_MASK); | 909 | HASH_CR_DMAE_MASK); |
930 | HASH_SET_BITS(&device_data->base->cr, | 910 | HASH_SET_BITS(&device_data->base->cr, |
931 | HASH_CR_PRIVN_MASK); | 911 | HASH_CR_PRIVN_MASK); |
932 | } | 912 | } |
933 | 913 | ||
934 | HASH_INITIALIZE; | 914 | HASH_INITIALIZE; |
@@ -944,16 +924,16 @@ static int hash_dma_final(struct ahash_request *req) | |||
944 | /* Store the nents in the dma struct. */ | 924 | /* Store the nents in the dma struct. */ |
945 | ctx->device->dma.nents = hash_get_nents(req->src, req->nbytes, NULL); | 925 | ctx->device->dma.nents = hash_get_nents(req->src, req->nbytes, NULL); |
946 | if (!ctx->device->dma.nents) { | 926 | if (!ctx->device->dma.nents) { |
947 | dev_err(device_data->dev, "[%s] " | 927 | dev_err(device_data->dev, "%s: ctx->device->dma.nents = 0\n", |
948 | "ctx->device->dma.nents = 0", __func__); | 928 | __func__); |
949 | ret = ctx->device->dma.nents; | 929 | ret = ctx->device->dma.nents; |
950 | goto out; | 930 | goto out; |
951 | } | 931 | } |
952 | 932 | ||
953 | bytes_written = hash_dma_write(ctx, req->src, req->nbytes); | 933 | bytes_written = hash_dma_write(ctx, req->src, req->nbytes); |
954 | if (bytes_written != req->nbytes) { | 934 | if (bytes_written != req->nbytes) { |
955 | dev_err(device_data->dev, "[%s] " | 935 | dev_err(device_data->dev, "%s: hash_dma_write() failed!\n", |
956 | "hash_dma_write() failed!", __func__); | 936 | __func__); |
957 | ret = bytes_written; | 937 | ret = bytes_written; |
958 | goto out; | 938 | goto out; |
959 | } | 939 | } |
@@ -968,8 +948,8 @@ static int hash_dma_final(struct ahash_request *req) | |||
968 | unsigned int keylen = ctx->keylen; | 948 | unsigned int keylen = ctx->keylen; |
969 | u8 *key = ctx->key; | 949 | u8 *key = ctx->key; |
970 | 950 | ||
971 | dev_dbg(device_data->dev, "[%s] keylen: %d", __func__, | 951 | dev_dbg(device_data->dev, "%s: keylen: %d\n", |
972 | ctx->keylen); | 952 | __func__, ctx->keylen); |
973 | hash_hw_write_key(device_data, key, keylen); | 953 | hash_hw_write_key(device_data, key, keylen); |
974 | } | 954 | } |
975 | 955 | ||
@@ -1004,14 +984,14 @@ static int hash_hw_final(struct ahash_request *req) | |||
1004 | if (ret) | 984 | if (ret) |
1005 | return ret; | 985 | return ret; |
1006 | 986 | ||
1007 | dev_dbg(device_data->dev, "[%s] (ctx=0x%x)!", __func__, (u32) ctx); | 987 | dev_dbg(device_data->dev, "%s: (ctx=0x%x)!\n", __func__, (u32) ctx); |
1008 | 988 | ||
1009 | if (req_ctx->updated) { | 989 | if (req_ctx->updated) { |
1010 | ret = hash_resume_state(device_data, &device_data->state); | 990 | ret = hash_resume_state(device_data, &device_data->state); |
1011 | 991 | ||
1012 | if (ret) { | 992 | if (ret) { |
1013 | dev_err(device_data->dev, "[%s] hash_resume_state() " | 993 | dev_err(device_data->dev, |
1014 | "failed!", __func__); | 994 | "%s: hash_resume_state() failed!\n", __func__); |
1015 | goto out; | 995 | goto out; |
1016 | } | 996 | } |
1017 | } else if (req->nbytes == 0 && ctx->keylen == 0) { | 997 | } else if (req->nbytes == 0 && ctx->keylen == 0) { |
@@ -1025,31 +1005,33 @@ static int hash_hw_final(struct ahash_request *req) | |||
1025 | ret = get_empty_message_digest(device_data, &zero_hash[0], | 1005 | ret = get_empty_message_digest(device_data, &zero_hash[0], |
1026 | &zero_hash_size, &zero_digest); | 1006 | &zero_hash_size, &zero_digest); |
1027 | if (!ret && likely(zero_hash_size == ctx->digestsize) && | 1007 | if (!ret && likely(zero_hash_size == ctx->digestsize) && |
1028 | zero_digest) { | 1008 | zero_digest) { |
1029 | memcpy(req->result, &zero_hash[0], ctx->digestsize); | 1009 | memcpy(req->result, &zero_hash[0], ctx->digestsize); |
1030 | goto out; | 1010 | goto out; |
1031 | } else if (!ret && !zero_digest) { | 1011 | } else if (!ret && !zero_digest) { |
1032 | dev_dbg(device_data->dev, "[%s] HMAC zero msg with " | 1012 | dev_dbg(device_data->dev, |
1033 | "key, continue...", __func__); | 1013 | "%s: HMAC zero msg with key, continue...\n", |
1014 | __func__); | ||
1034 | } else { | 1015 | } else { |
1035 | dev_err(device_data->dev, "[%s] ret=%d, or wrong " | 1016 | dev_err(device_data->dev, |
1036 | "digest size? %s", __func__, ret, | 1017 | "%s: ret=%d, or wrong digest size? %s\n", |
1037 | (zero_hash_size == ctx->digestsize) ? | 1018 | __func__, ret, |
1038 | "true" : "false"); | 1019 | zero_hash_size == ctx->digestsize ? |
1020 | "true" : "false"); | ||
1039 | /* Return error */ | 1021 | /* Return error */ |
1040 | goto out; | 1022 | goto out; |
1041 | } | 1023 | } |
1042 | } else if (req->nbytes == 0 && ctx->keylen > 0) { | 1024 | } else if (req->nbytes == 0 && ctx->keylen > 0) { |
1043 | dev_err(device_data->dev, "[%s] Empty message with " | 1025 | dev_err(device_data->dev, "%s: Empty message with keylength > 0, NOT supported\n", |
1044 | "keylength > 0, NOT supported.", __func__); | 1026 | __func__); |
1045 | goto out; | 1027 | goto out; |
1046 | } | 1028 | } |
1047 | 1029 | ||
1048 | if (!req_ctx->updated) { | 1030 | if (!req_ctx->updated) { |
1049 | ret = init_hash_hw(device_data, ctx); | 1031 | ret = init_hash_hw(device_data, ctx); |
1050 | if (ret) { | 1032 | if (ret) { |
1051 | dev_err(device_data->dev, "[%s] init_hash_hw() " | 1033 | dev_err(device_data->dev, |
1052 | "failed!", __func__); | 1034 | "%s: init_hash_hw() failed!\n", __func__); |
1053 | goto out; | 1035 | goto out; |
1054 | } | 1036 | } |
1055 | } | 1037 | } |
@@ -1067,8 +1049,8 @@ static int hash_hw_final(struct ahash_request *req) | |||
1067 | unsigned int keylen = ctx->keylen; | 1049 | unsigned int keylen = ctx->keylen; |
1068 | u8 *key = ctx->key; | 1050 | u8 *key = ctx->key; |
1069 | 1051 | ||
1070 | dev_dbg(device_data->dev, "[%s] keylen: %d", __func__, | 1052 | dev_dbg(device_data->dev, "%s: keylen: %d\n", |
1071 | ctx->keylen); | 1053 | __func__, ctx->keylen); |
1072 | hash_hw_write_key(device_data, key, keylen); | 1054 | hash_hw_write_key(device_data, key, keylen); |
1073 | } | 1055 | } |
1074 | 1056 | ||
@@ -1115,10 +1097,8 @@ int hash_hw_update(struct ahash_request *req) | |||
1115 | /* Check if ctx->state.length + msg_length | 1097 | /* Check if ctx->state.length + msg_length |
1116 | overflows */ | 1098 | overflows */ |
1117 | if (msg_length > (req_ctx->state.length.low_word + msg_length) && | 1099 | if (msg_length > (req_ctx->state.length.low_word + msg_length) && |
1118 | HASH_HIGH_WORD_MAX_VAL == | 1100 | HASH_HIGH_WORD_MAX_VAL == req_ctx->state.length.high_word) { |
1119 | req_ctx->state.length.high_word) { | 1101 | pr_err("%s: HASH_MSG_LENGTH_OVERFLOW!\n", __func__); |
1120 | pr_err(DEV_DBG_NAME " [%s] HASH_MSG_LENGTH_OVERFLOW!", | ||
1121 | __func__); | ||
1122 | return -EPERM; | 1102 | return -EPERM; |
1123 | } | 1103 | } |
1124 | 1104 | ||
@@ -1133,8 +1113,8 @@ int hash_hw_update(struct ahash_request *req) | |||
1133 | data_buffer, buffer, &index); | 1113 | data_buffer, buffer, &index); |
1134 | 1114 | ||
1135 | if (ret) { | 1115 | if (ret) { |
1136 | dev_err(device_data->dev, "[%s] hash_internal_hw_" | 1116 | dev_err(device_data->dev, "%s: hash_internal_hw_update() failed!\n", |
1137 | "update() failed!", __func__); | 1117 | __func__); |
1138 | goto out; | 1118 | goto out; |
1139 | } | 1119 | } |
1140 | 1120 | ||
@@ -1142,9 +1122,8 @@ int hash_hw_update(struct ahash_request *req) | |||
1142 | } | 1122 | } |
1143 | 1123 | ||
1144 | req_ctx->state.index = index; | 1124 | req_ctx->state.index = index; |
1145 | dev_dbg(device_data->dev, "[%s] indata length=%d, bin=%d))", | 1125 | dev_dbg(device_data->dev, "%s: indata length=%d, bin=%d\n", |
1146 | __func__, req_ctx->state.index, | 1126 | __func__, req_ctx->state.index, req_ctx->state.bit_index); |
1147 | req_ctx->state.bit_index); | ||
1148 | 1127 | ||
1149 | out: | 1128 | out: |
1150 | release_hash_device(device_data); | 1129 | release_hash_device(device_data); |
@@ -1158,23 +1137,23 @@ out: | |||
1158 | * @device_state: The state to be restored in the hash hardware | 1137 | * @device_state: The state to be restored in the hash hardware |
1159 | */ | 1138 | */ |
1160 | int hash_resume_state(struct hash_device_data *device_data, | 1139 | int hash_resume_state(struct hash_device_data *device_data, |
1161 | const struct hash_state *device_state) | 1140 | const struct hash_state *device_state) |
1162 | { | 1141 | { |
1163 | u32 temp_cr; | 1142 | u32 temp_cr; |
1164 | s32 count; | 1143 | s32 count; |
1165 | int hash_mode = HASH_OPER_MODE_HASH; | 1144 | int hash_mode = HASH_OPER_MODE_HASH; |
1166 | 1145 | ||
1167 | if (NULL == device_state) { | 1146 | if (NULL == device_state) { |
1168 | dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!", | 1147 | dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n", |
1169 | __func__); | 1148 | __func__); |
1170 | return -EPERM; | 1149 | return -EPERM; |
1171 | } | 1150 | } |
1172 | 1151 | ||
1173 | /* Check correctness of index and length members */ | 1152 | /* Check correctness of index and length members */ |
1174 | if (device_state->index > HASH_BLOCK_SIZE | 1153 | if (device_state->index > HASH_BLOCK_SIZE || |
1175 | || (device_state->length.low_word % HASH_BLOCK_SIZE) != 0) { | 1154 | (device_state->length.low_word % HASH_BLOCK_SIZE) != 0) { |
1176 | dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!", | 1155 | dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n", |
1177 | __func__); | 1156 | __func__); |
1178 | return -EPERM; | 1157 | return -EPERM; |
1179 | } | 1158 | } |
1180 | 1159 | ||
@@ -1198,7 +1177,7 @@ int hash_resume_state(struct hash_device_data *device_data, | |||
1198 | break; | 1177 | break; |
1199 | 1178 | ||
1200 | writel_relaxed(device_state->csr[count], | 1179 | writel_relaxed(device_state->csr[count], |
1201 | &device_data->base->csrx[count]); | 1180 | &device_data->base->csrx[count]); |
1202 | } | 1181 | } |
1203 | 1182 | ||
1204 | writel_relaxed(device_state->csfull, &device_data->base->csfull); | 1183 | writel_relaxed(device_state->csfull, &device_data->base->csfull); |
@@ -1216,15 +1195,15 @@ int hash_resume_state(struct hash_device_data *device_data, | |||
1216 | * @device_state: The strucure where the hardware state should be saved. | 1195 | * @device_state: The strucure where the hardware state should be saved. |
1217 | */ | 1196 | */ |
1218 | int hash_save_state(struct hash_device_data *device_data, | 1197 | int hash_save_state(struct hash_device_data *device_data, |
1219 | struct hash_state *device_state) | 1198 | struct hash_state *device_state) |
1220 | { | 1199 | { |
1221 | u32 temp_cr; | 1200 | u32 temp_cr; |
1222 | u32 count; | 1201 | u32 count; |
1223 | int hash_mode = HASH_OPER_MODE_HASH; | 1202 | int hash_mode = HASH_OPER_MODE_HASH; |
1224 | 1203 | ||
1225 | if (NULL == device_state) { | 1204 | if (NULL == device_state) { |
1226 | dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!", | 1205 | dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n", |
1227 | __func__); | 1206 | __func__); |
1228 | return -ENOTSUPP; | 1207 | return -ENOTSUPP; |
1229 | } | 1208 | } |
1230 | 1209 | ||
@@ -1270,20 +1249,18 @@ int hash_save_state(struct hash_device_data *device_data, | |||
1270 | int hash_check_hw(struct hash_device_data *device_data) | 1249 | int hash_check_hw(struct hash_device_data *device_data) |
1271 | { | 1250 | { |
1272 | /* Checking Peripheral Ids */ | 1251 | /* Checking Peripheral Ids */ |
1273 | if (HASH_P_ID0 == readl_relaxed(&device_data->base->periphid0) | 1252 | if (HASH_P_ID0 == readl_relaxed(&device_data->base->periphid0) && |
1274 | && HASH_P_ID1 == readl_relaxed(&device_data->base->periphid1) | 1253 | HASH_P_ID1 == readl_relaxed(&device_data->base->periphid1) && |
1275 | && HASH_P_ID2 == readl_relaxed(&device_data->base->periphid2) | 1254 | HASH_P_ID2 == readl_relaxed(&device_data->base->periphid2) && |
1276 | && HASH_P_ID3 == readl_relaxed(&device_data->base->periphid3) | 1255 | HASH_P_ID3 == readl_relaxed(&device_data->base->periphid3) && |
1277 | && HASH_CELL_ID0 == readl_relaxed(&device_data->base->cellid0) | 1256 | HASH_CELL_ID0 == readl_relaxed(&device_data->base->cellid0) && |
1278 | && HASH_CELL_ID1 == readl_relaxed(&device_data->base->cellid1) | 1257 | HASH_CELL_ID1 == readl_relaxed(&device_data->base->cellid1) && |
1279 | && HASH_CELL_ID2 == readl_relaxed(&device_data->base->cellid2) | 1258 | HASH_CELL_ID2 == readl_relaxed(&device_data->base->cellid2) && |
1280 | && HASH_CELL_ID3 == readl_relaxed(&device_data->base->cellid3) | 1259 | HASH_CELL_ID3 == readl_relaxed(&device_data->base->cellid3)) { |
1281 | ) { | ||
1282 | return 0; | 1260 | return 0; |
1283 | } | 1261 | } |
1284 | 1262 | ||
1285 | dev_err(device_data->dev, "[%s] HASH_UNSUPPORTED_HW!", | 1263 | dev_err(device_data->dev, "%s: HASH_UNSUPPORTED_HW!\n", __func__); |
1286 | __func__); | ||
1287 | return -ENOTSUPP; | 1264 | return -ENOTSUPP; |
1288 | } | 1265 | } |
1289 | 1266 | ||
@@ -1294,14 +1271,14 @@ int hash_check_hw(struct hash_device_data *device_data) | |||
1294 | * @algorithm: The algorithm in use. | 1271 | * @algorithm: The algorithm in use. |
1295 | */ | 1272 | */ |
1296 | void hash_get_digest(struct hash_device_data *device_data, | 1273 | void hash_get_digest(struct hash_device_data *device_data, |
1297 | u8 *digest, int algorithm) | 1274 | u8 *digest, int algorithm) |
1298 | { | 1275 | { |
1299 | u32 temp_hx_val, count; | 1276 | u32 temp_hx_val, count; |
1300 | int loop_ctr; | 1277 | int loop_ctr; |
1301 | 1278 | ||
1302 | if (algorithm != HASH_ALGO_SHA1 && algorithm != HASH_ALGO_SHA256) { | 1279 | if (algorithm != HASH_ALGO_SHA1 && algorithm != HASH_ALGO_SHA256) { |
1303 | dev_err(device_data->dev, "[%s] Incorrect algorithm %d", | 1280 | dev_err(device_data->dev, "%s: Incorrect algorithm %d\n", |
1304 | __func__, algorithm); | 1281 | __func__, algorithm); |
1305 | return; | 1282 | return; |
1306 | } | 1283 | } |
1307 | 1284 | ||
@@ -1310,8 +1287,8 @@ void hash_get_digest(struct hash_device_data *device_data, | |||
1310 | else | 1287 | else |
1311 | loop_ctr = SHA256_DIGEST_SIZE / sizeof(u32); | 1288 | loop_ctr = SHA256_DIGEST_SIZE / sizeof(u32); |
1312 | 1289 | ||
1313 | dev_dbg(device_data->dev, "[%s] digest array:(0x%x)", | 1290 | dev_dbg(device_data->dev, "%s: digest array:(0x%x)\n", |
1314 | __func__, (u32) digest); | 1291 | __func__, (u32) digest); |
1315 | 1292 | ||
1316 | /* Copy result into digest array */ | 1293 | /* Copy result into digest array */ |
1317 | for (count = 0; count < loop_ctr; count++) { | 1294 | for (count = 0; count < loop_ctr; count++) { |
@@ -1337,8 +1314,7 @@ static int ahash_update(struct ahash_request *req) | |||
1337 | /* Skip update for DMA, all data will be passed to DMA in final */ | 1314 | /* Skip update for DMA, all data will be passed to DMA in final */ |
1338 | 1315 | ||
1339 | if (ret) { | 1316 | if (ret) { |
1340 | pr_err(DEV_DBG_NAME " [%s] hash_hw_update() failed!", | 1317 | pr_err("%s: hash_hw_update() failed!\n", __func__); |
1341 | __func__); | ||
1342 | } | 1318 | } |
1343 | 1319 | ||
1344 | return ret; | 1320 | return ret; |
@@ -1353,7 +1329,7 @@ static int ahash_final(struct ahash_request *req) | |||
1353 | int ret = 0; | 1329 | int ret = 0; |
1354 | struct hash_req_ctx *req_ctx = ahash_request_ctx(req); | 1330 | struct hash_req_ctx *req_ctx = ahash_request_ctx(req); |
1355 | 1331 | ||
1356 | pr_debug(DEV_DBG_NAME " [%s] data size: %d", __func__, req->nbytes); | 1332 | pr_debug("%s: data size: %d\n", __func__, req->nbytes); |
1357 | 1333 | ||
1358 | if ((hash_mode == HASH_MODE_DMA) && req_ctx->dma_mode) | 1334 | if ((hash_mode == HASH_MODE_DMA) && req_ctx->dma_mode) |
1359 | ret = hash_dma_final(req); | 1335 | ret = hash_dma_final(req); |
@@ -1361,15 +1337,14 @@ static int ahash_final(struct ahash_request *req) | |||
1361 | ret = hash_hw_final(req); | 1337 | ret = hash_hw_final(req); |
1362 | 1338 | ||
1363 | if (ret) { | 1339 | if (ret) { |
1364 | pr_err(DEV_DBG_NAME " [%s] hash_hw/dma_final() failed", | 1340 | pr_err("%s: hash_hw/dma_final() failed\n", __func__); |
1365 | __func__); | ||
1366 | } | 1341 | } |
1367 | 1342 | ||
1368 | return ret; | 1343 | return ret; |
1369 | } | 1344 | } |
1370 | 1345 | ||
1371 | static int hash_setkey(struct crypto_ahash *tfm, | 1346 | static int hash_setkey(struct crypto_ahash *tfm, |
1372 | const u8 *key, unsigned int keylen, int alg) | 1347 | const u8 *key, unsigned int keylen, int alg) |
1373 | { | 1348 | { |
1374 | int ret = 0; | 1349 | int ret = 0; |
1375 | struct hash_ctx *ctx = crypto_ahash_ctx(tfm); | 1350 | struct hash_ctx *ctx = crypto_ahash_ctx(tfm); |
@@ -1379,8 +1354,8 @@ static int hash_setkey(struct crypto_ahash *tfm, | |||
1379 | */ | 1354 | */ |
1380 | ctx->key = kmemdup(key, keylen, GFP_KERNEL); | 1355 | ctx->key = kmemdup(key, keylen, GFP_KERNEL); |
1381 | if (!ctx->key) { | 1356 | if (!ctx->key) { |
1382 | pr_err(DEV_DBG_NAME " [%s] Failed to allocate ctx->key " | 1357 | pr_err("%s: Failed to allocate ctx->key for %d\n", |
1383 | "for %d\n", __func__, alg); | 1358 | __func__, alg); |
1384 | return -ENOMEM; | 1359 | return -ENOMEM; |
1385 | } | 1360 | } |
1386 | ctx->keylen = keylen; | 1361 | ctx->keylen = keylen; |
@@ -1501,13 +1476,13 @@ out: | |||
1501 | } | 1476 | } |
1502 | 1477 | ||
1503 | static int hmac_sha1_setkey(struct crypto_ahash *tfm, | 1478 | static int hmac_sha1_setkey(struct crypto_ahash *tfm, |
1504 | const u8 *key, unsigned int keylen) | 1479 | const u8 *key, unsigned int keylen) |
1505 | { | 1480 | { |
1506 | return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA1); | 1481 | return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA1); |
1507 | } | 1482 | } |
1508 | 1483 | ||
1509 | static int hmac_sha256_setkey(struct crypto_ahash *tfm, | 1484 | static int hmac_sha256_setkey(struct crypto_ahash *tfm, |
1510 | const u8 *key, unsigned int keylen) | 1485 | const u8 *key, unsigned int keylen) |
1511 | { | 1486 | { |
1512 | return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA256); | 1487 | return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA256); |
1513 | } | 1488 | } |
@@ -1528,7 +1503,7 @@ static int hash_cra_init(struct crypto_tfm *tfm) | |||
1528 | hash); | 1503 | hash); |
1529 | 1504 | ||
1530 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | 1505 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), |
1531 | sizeof(struct hash_req_ctx)); | 1506 | sizeof(struct hash_req_ctx)); |
1532 | 1507 | ||
1533 | ctx->config.data_format = HASH_DATA_8_BITS; | 1508 | ctx->config.data_format = HASH_DATA_8_BITS; |
1534 | ctx->config.algorithm = hash_alg->conf.algorithm; | 1509 | ctx->config.algorithm = hash_alg->conf.algorithm; |
@@ -1541,98 +1516,97 @@ static int hash_cra_init(struct crypto_tfm *tfm) | |||
1541 | 1516 | ||
1542 | static struct hash_algo_template hash_algs[] = { | 1517 | static struct hash_algo_template hash_algs[] = { |
1543 | { | 1518 | { |
1544 | .conf.algorithm = HASH_ALGO_SHA1, | 1519 | .conf.algorithm = HASH_ALGO_SHA1, |
1545 | .conf.oper_mode = HASH_OPER_MODE_HASH, | 1520 | .conf.oper_mode = HASH_OPER_MODE_HASH, |
1546 | .hash = { | 1521 | .hash = { |
1547 | .init = hash_init, | 1522 | .init = hash_init, |
1548 | .update = ahash_update, | 1523 | .update = ahash_update, |
1549 | .final = ahash_final, | 1524 | .final = ahash_final, |
1550 | .digest = ahash_sha1_digest, | 1525 | .digest = ahash_sha1_digest, |
1551 | .halg.digestsize = SHA1_DIGEST_SIZE, | 1526 | .halg.digestsize = SHA1_DIGEST_SIZE, |
1552 | .halg.statesize = sizeof(struct hash_ctx), | 1527 | .halg.statesize = sizeof(struct hash_ctx), |
1553 | .halg.base = { | 1528 | .halg.base = { |
1554 | .cra_name = "sha1", | 1529 | .cra_name = "sha1", |
1555 | .cra_driver_name = "sha1-ux500", | 1530 | .cra_driver_name = "sha1-ux500", |
1556 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | 1531 | .cra_flags = (CRYPTO_ALG_TYPE_AHASH | |
1557 | CRYPTO_ALG_ASYNC, | 1532 | CRYPTO_ALG_ASYNC), |
1558 | .cra_blocksize = SHA1_BLOCK_SIZE, | 1533 | .cra_blocksize = SHA1_BLOCK_SIZE, |
1559 | .cra_ctxsize = sizeof(struct hash_ctx), | 1534 | .cra_ctxsize = sizeof(struct hash_ctx), |
1560 | .cra_init = hash_cra_init, | 1535 | .cra_init = hash_cra_init, |
1561 | .cra_module = THIS_MODULE, | 1536 | .cra_module = THIS_MODULE, |
1562 | } | 1537 | } |
1563 | } | 1538 | } |
1564 | }, | 1539 | }, |
1565 | { | 1540 | { |
1566 | .conf.algorithm = HASH_ALGO_SHA256, | 1541 | .conf.algorithm = HASH_ALGO_SHA256, |
1567 | .conf.oper_mode = HASH_OPER_MODE_HASH, | 1542 | .conf.oper_mode = HASH_OPER_MODE_HASH, |
1568 | .hash = { | 1543 | .hash = { |
1569 | .init = hash_init, | 1544 | .init = hash_init, |
1570 | .update = ahash_update, | 1545 | .update = ahash_update, |
1571 | .final = ahash_final, | 1546 | .final = ahash_final, |
1572 | .digest = ahash_sha256_digest, | 1547 | .digest = ahash_sha256_digest, |
1573 | .halg.digestsize = SHA256_DIGEST_SIZE, | 1548 | .halg.digestsize = SHA256_DIGEST_SIZE, |
1574 | .halg.statesize = sizeof(struct hash_ctx), | 1549 | .halg.statesize = sizeof(struct hash_ctx), |
1575 | .halg.base = { | 1550 | .halg.base = { |
1576 | .cra_name = "sha256", | 1551 | .cra_name = "sha256", |
1577 | .cra_driver_name = "sha256-ux500", | 1552 | .cra_driver_name = "sha256-ux500", |
1578 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | 1553 | .cra_flags = (CRYPTO_ALG_TYPE_AHASH | |
1579 | CRYPTO_ALG_ASYNC, | 1554 | CRYPTO_ALG_ASYNC), |
1580 | .cra_blocksize = SHA256_BLOCK_SIZE, | 1555 | .cra_blocksize = SHA256_BLOCK_SIZE, |
1581 | .cra_ctxsize = sizeof(struct hash_ctx), | 1556 | .cra_ctxsize = sizeof(struct hash_ctx), |
1582 | .cra_type = &crypto_ahash_type, | 1557 | .cra_type = &crypto_ahash_type, |
1583 | .cra_init = hash_cra_init, | 1558 | .cra_init = hash_cra_init, |
1584 | .cra_module = THIS_MODULE, | 1559 | .cra_module = THIS_MODULE, |
1585 | } | ||
1586 | } | 1560 | } |
1587 | 1561 | } | |
1588 | }, | 1562 | }, |
1589 | { | 1563 | { |
1590 | .conf.algorithm = HASH_ALGO_SHA1, | 1564 | .conf.algorithm = HASH_ALGO_SHA1, |
1591 | .conf.oper_mode = HASH_OPER_MODE_HMAC, | 1565 | .conf.oper_mode = HASH_OPER_MODE_HMAC, |
1592 | .hash = { | 1566 | .hash = { |
1593 | .init = hash_init, | 1567 | .init = hash_init, |
1594 | .update = ahash_update, | 1568 | .update = ahash_update, |
1595 | .final = ahash_final, | 1569 | .final = ahash_final, |
1596 | .digest = hmac_sha1_digest, | 1570 | .digest = hmac_sha1_digest, |
1597 | .setkey = hmac_sha1_setkey, | 1571 | .setkey = hmac_sha1_setkey, |
1598 | .halg.digestsize = SHA1_DIGEST_SIZE, | 1572 | .halg.digestsize = SHA1_DIGEST_SIZE, |
1599 | .halg.statesize = sizeof(struct hash_ctx), | 1573 | .halg.statesize = sizeof(struct hash_ctx), |
1600 | .halg.base = { | 1574 | .halg.base = { |
1601 | .cra_name = "hmac(sha1)", | 1575 | .cra_name = "hmac(sha1)", |
1602 | .cra_driver_name = "hmac-sha1-ux500", | 1576 | .cra_driver_name = "hmac-sha1-ux500", |
1603 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | 1577 | .cra_flags = (CRYPTO_ALG_TYPE_AHASH | |
1604 | CRYPTO_ALG_ASYNC, | 1578 | CRYPTO_ALG_ASYNC), |
1605 | .cra_blocksize = SHA1_BLOCK_SIZE, | 1579 | .cra_blocksize = SHA1_BLOCK_SIZE, |
1606 | .cra_ctxsize = sizeof(struct hash_ctx), | 1580 | .cra_ctxsize = sizeof(struct hash_ctx), |
1607 | .cra_type = &crypto_ahash_type, | 1581 | .cra_type = &crypto_ahash_type, |
1608 | .cra_init = hash_cra_init, | 1582 | .cra_init = hash_cra_init, |
1609 | .cra_module = THIS_MODULE, | 1583 | .cra_module = THIS_MODULE, |
1610 | } | ||
1611 | } | 1584 | } |
1585 | } | ||
1612 | }, | 1586 | }, |
1613 | { | 1587 | { |
1614 | .conf.algorithm = HASH_ALGO_SHA256, | 1588 | .conf.algorithm = HASH_ALGO_SHA256, |
1615 | .conf.oper_mode = HASH_OPER_MODE_HMAC, | 1589 | .conf.oper_mode = HASH_OPER_MODE_HMAC, |
1616 | .hash = { | 1590 | .hash = { |
1617 | .init = hash_init, | 1591 | .init = hash_init, |
1618 | .update = ahash_update, | 1592 | .update = ahash_update, |
1619 | .final = ahash_final, | 1593 | .final = ahash_final, |
1620 | .digest = hmac_sha256_digest, | 1594 | .digest = hmac_sha256_digest, |
1621 | .setkey = hmac_sha256_setkey, | 1595 | .setkey = hmac_sha256_setkey, |
1622 | .halg.digestsize = SHA256_DIGEST_SIZE, | 1596 | .halg.digestsize = SHA256_DIGEST_SIZE, |
1623 | .halg.statesize = sizeof(struct hash_ctx), | 1597 | .halg.statesize = sizeof(struct hash_ctx), |
1624 | .halg.base = { | 1598 | .halg.base = { |
1625 | .cra_name = "hmac(sha256)", | 1599 | .cra_name = "hmac(sha256)", |
1626 | .cra_driver_name = "hmac-sha256-ux500", | 1600 | .cra_driver_name = "hmac-sha256-ux500", |
1627 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | 1601 | .cra_flags = (CRYPTO_ALG_TYPE_AHASH | |
1628 | CRYPTO_ALG_ASYNC, | 1602 | CRYPTO_ALG_ASYNC), |
1629 | .cra_blocksize = SHA256_BLOCK_SIZE, | 1603 | .cra_blocksize = SHA256_BLOCK_SIZE, |
1630 | .cra_ctxsize = sizeof(struct hash_ctx), | 1604 | .cra_ctxsize = sizeof(struct hash_ctx), |
1631 | .cra_type = &crypto_ahash_type, | 1605 | .cra_type = &crypto_ahash_type, |
1632 | .cra_init = hash_cra_init, | 1606 | .cra_init = hash_cra_init, |
1633 | .cra_module = THIS_MODULE, | 1607 | .cra_module = THIS_MODULE, |
1634 | } | ||
1635 | } | 1608 | } |
1609 | } | ||
1636 | } | 1610 | } |
1637 | }; | 1611 | }; |
1638 | 1612 | ||
@@ -1649,7 +1623,7 @@ static int ahash_algs_register_all(struct hash_device_data *device_data) | |||
1649 | ret = crypto_register_ahash(&hash_algs[i].hash); | 1623 | ret = crypto_register_ahash(&hash_algs[i].hash); |
1650 | if (ret) { | 1624 | if (ret) { |
1651 | count = i; | 1625 | count = i; |
1652 | dev_err(device_data->dev, "[%s] alg registration failed", | 1626 | dev_err(device_data->dev, "%s: alg registration failed\n", |
1653 | hash_algs[i].hash.halg.base.cra_driver_name); | 1627 | hash_algs[i].hash.halg.base.cra_driver_name); |
1654 | goto unreg; | 1628 | goto unreg; |
1655 | } | 1629 | } |
@@ -1683,9 +1657,8 @@ static int ux500_hash_probe(struct platform_device *pdev) | |||
1683 | struct hash_device_data *device_data; | 1657 | struct hash_device_data *device_data; |
1684 | struct device *dev = &pdev->dev; | 1658 | struct device *dev = &pdev->dev; |
1685 | 1659 | ||
1686 | device_data = kzalloc(sizeof(struct hash_device_data), GFP_ATOMIC); | 1660 | device_data = kzalloc(sizeof(*device_data), GFP_ATOMIC); |
1687 | if (!device_data) { | 1661 | if (!device_data) { |
1688 | dev_dbg(dev, "[%s] kzalloc() failed!", __func__); | ||
1689 | ret = -ENOMEM; | 1662 | ret = -ENOMEM; |
1690 | goto out; | 1663 | goto out; |
1691 | } | 1664 | } |
@@ -1695,14 +1668,14 @@ static int ux500_hash_probe(struct platform_device *pdev) | |||
1695 | 1668 | ||
1696 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1669 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1697 | if (!res) { | 1670 | if (!res) { |
1698 | dev_dbg(dev, "[%s] platform_get_resource() failed!", __func__); | 1671 | dev_dbg(dev, "%s: platform_get_resource() failed!\n", __func__); |
1699 | ret = -ENODEV; | 1672 | ret = -ENODEV; |
1700 | goto out_kfree; | 1673 | goto out_kfree; |
1701 | } | 1674 | } |
1702 | 1675 | ||
1703 | res = request_mem_region(res->start, resource_size(res), pdev->name); | 1676 | res = request_mem_region(res->start, resource_size(res), pdev->name); |
1704 | if (res == NULL) { | 1677 | if (res == NULL) { |
1705 | dev_dbg(dev, "[%s] request_mem_region() failed!", __func__); | 1678 | dev_dbg(dev, "%s: request_mem_region() failed!\n", __func__); |
1706 | ret = -EBUSY; | 1679 | ret = -EBUSY; |
1707 | goto out_kfree; | 1680 | goto out_kfree; |
1708 | } | 1681 | } |
@@ -1710,8 +1683,7 @@ static int ux500_hash_probe(struct platform_device *pdev) | |||
1710 | device_data->phybase = res->start; | 1683 | device_data->phybase = res->start; |
1711 | device_data->base = ioremap(res->start, resource_size(res)); | 1684 | device_data->base = ioremap(res->start, resource_size(res)); |
1712 | if (!device_data->base) { | 1685 | if (!device_data->base) { |
1713 | dev_err(dev, "[%s] ioremap() failed!", | 1686 | dev_err(dev, "%s: ioremap() failed!\n", __func__); |
1714 | __func__); | ||
1715 | ret = -ENOMEM; | 1687 | ret = -ENOMEM; |
1716 | goto out_free_mem; | 1688 | goto out_free_mem; |
1717 | } | 1689 | } |
@@ -1721,7 +1693,7 @@ static int ux500_hash_probe(struct platform_device *pdev) | |||
1721 | /* Enable power for HASH1 hardware block */ | 1693 | /* Enable power for HASH1 hardware block */ |
1722 | device_data->regulator = regulator_get(dev, "v-ape"); | 1694 | device_data->regulator = regulator_get(dev, "v-ape"); |
1723 | if (IS_ERR(device_data->regulator)) { | 1695 | if (IS_ERR(device_data->regulator)) { |
1724 | dev_err(dev, "[%s] regulator_get() failed!", __func__); | 1696 | dev_err(dev, "%s: regulator_get() failed!\n", __func__); |
1725 | ret = PTR_ERR(device_data->regulator); | 1697 | ret = PTR_ERR(device_data->regulator); |
1726 | device_data->regulator = NULL; | 1698 | device_data->regulator = NULL; |
1727 | goto out_unmap; | 1699 | goto out_unmap; |
@@ -1730,27 +1702,27 @@ static int ux500_hash_probe(struct platform_device *pdev) | |||
1730 | /* Enable the clock for HASH1 hardware block */ | 1702 | /* Enable the clock for HASH1 hardware block */ |
1731 | device_data->clk = clk_get(dev, NULL); | 1703 | device_data->clk = clk_get(dev, NULL); |
1732 | if (IS_ERR(device_data->clk)) { | 1704 | if (IS_ERR(device_data->clk)) { |
1733 | dev_err(dev, "[%s] clk_get() failed!", __func__); | 1705 | dev_err(dev, "%s: clk_get() failed!\n", __func__); |
1734 | ret = PTR_ERR(device_data->clk); | 1706 | ret = PTR_ERR(device_data->clk); |
1735 | goto out_regulator; | 1707 | goto out_regulator; |
1736 | } | 1708 | } |
1737 | 1709 | ||
1738 | ret = clk_prepare(device_data->clk); | 1710 | ret = clk_prepare(device_data->clk); |
1739 | if (ret) { | 1711 | if (ret) { |
1740 | dev_err(dev, "[%s] clk_prepare() failed!", __func__); | 1712 | dev_err(dev, "%s: clk_prepare() failed!\n", __func__); |
1741 | goto out_clk; | 1713 | goto out_clk; |
1742 | } | 1714 | } |
1743 | 1715 | ||
1744 | /* Enable device power (and clock) */ | 1716 | /* Enable device power (and clock) */ |
1745 | ret = hash_enable_power(device_data, false); | 1717 | ret = hash_enable_power(device_data, false); |
1746 | if (ret) { | 1718 | if (ret) { |
1747 | dev_err(dev, "[%s]: hash_enable_power() failed!", __func__); | 1719 | dev_err(dev, "%s: hash_enable_power() failed!\n", __func__); |
1748 | goto out_clk_unprepare; | 1720 | goto out_clk_unprepare; |
1749 | } | 1721 | } |
1750 | 1722 | ||
1751 | ret = hash_check_hw(device_data); | 1723 | ret = hash_check_hw(device_data); |
1752 | if (ret) { | 1724 | if (ret) { |
1753 | dev_err(dev, "[%s] hash_check_hw() failed!", __func__); | 1725 | dev_err(dev, "%s: hash_check_hw() failed!\n", __func__); |
1754 | goto out_power; | 1726 | goto out_power; |
1755 | } | 1727 | } |
1756 | 1728 | ||
@@ -1766,8 +1738,8 @@ static int ux500_hash_probe(struct platform_device *pdev) | |||
1766 | 1738 | ||
1767 | ret = ahash_algs_register_all(device_data); | 1739 | ret = ahash_algs_register_all(device_data); |
1768 | if (ret) { | 1740 | if (ret) { |
1769 | dev_err(dev, "[%s] ahash_algs_register_all() " | 1741 | dev_err(dev, "%s: ahash_algs_register_all() failed!\n", |
1770 | "failed!", __func__); | 1742 | __func__); |
1771 | goto out_power; | 1743 | goto out_power; |
1772 | } | 1744 | } |
1773 | 1745 | ||
@@ -1810,8 +1782,7 @@ static int ux500_hash_remove(struct platform_device *pdev) | |||
1810 | 1782 | ||
1811 | device_data = platform_get_drvdata(pdev); | 1783 | device_data = platform_get_drvdata(pdev); |
1812 | if (!device_data) { | 1784 | if (!device_data) { |
1813 | dev_err(dev, "[%s]: platform_get_drvdata() failed!", | 1785 | dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__); |
1814 | __func__); | ||
1815 | return -ENOMEM; | 1786 | return -ENOMEM; |
1816 | } | 1787 | } |
1817 | 1788 | ||
@@ -1841,7 +1812,7 @@ static int ux500_hash_remove(struct platform_device *pdev) | |||
1841 | ahash_algs_unregister_all(device_data); | 1812 | ahash_algs_unregister_all(device_data); |
1842 | 1813 | ||
1843 | if (hash_disable_power(device_data, false)) | 1814 | if (hash_disable_power(device_data, false)) |
1844 | dev_err(dev, "[%s]: hash_disable_power() failed", | 1815 | dev_err(dev, "%s: hash_disable_power() failed\n", |
1845 | __func__); | 1816 | __func__); |
1846 | 1817 | ||
1847 | clk_unprepare(device_data->clk); | 1818 | clk_unprepare(device_data->clk); |
@@ -1870,8 +1841,8 @@ static void ux500_hash_shutdown(struct platform_device *pdev) | |||
1870 | 1841 | ||
1871 | device_data = platform_get_drvdata(pdev); | 1842 | device_data = platform_get_drvdata(pdev); |
1872 | if (!device_data) { | 1843 | if (!device_data) { |
1873 | dev_err(&pdev->dev, "[%s] platform_get_drvdata() failed!", | 1844 | dev_err(&pdev->dev, "%s: platform_get_drvdata() failed!\n", |
1874 | __func__); | 1845 | __func__); |
1875 | return; | 1846 | return; |
1876 | } | 1847 | } |
1877 | 1848 | ||
@@ -1880,8 +1851,8 @@ static void ux500_hash_shutdown(struct platform_device *pdev) | |||
1880 | /* current_ctx allocates a device, NULL = unallocated */ | 1851 | /* current_ctx allocates a device, NULL = unallocated */ |
1881 | if (!device_data->current_ctx) { | 1852 | if (!device_data->current_ctx) { |
1882 | if (down_trylock(&driver_data.device_allocation)) | 1853 | if (down_trylock(&driver_data.device_allocation)) |
1883 | dev_dbg(&pdev->dev, "[%s]: Cryp still in use!" | 1854 | dev_dbg(&pdev->dev, "%s: Cryp still in use! Shutting down anyway...\n", |
1884 | "Shutting down anyway...", __func__); | 1855 | __func__); |
1885 | /** | 1856 | /** |
1886 | * (Allocate the device) | 1857 | * (Allocate the device) |
1887 | * Need to set this to non-null (dummy) value, | 1858 | * Need to set this to non-null (dummy) value, |
@@ -1906,8 +1877,8 @@ static void ux500_hash_shutdown(struct platform_device *pdev) | |||
1906 | release_mem_region(res->start, resource_size(res)); | 1877 | release_mem_region(res->start, resource_size(res)); |
1907 | 1878 | ||
1908 | if (hash_disable_power(device_data, false)) | 1879 | if (hash_disable_power(device_data, false)) |
1909 | dev_err(&pdev->dev, "[%s] hash_disable_power() failed", | 1880 | dev_err(&pdev->dev, "%s: hash_disable_power() failed\n", |
1910 | __func__); | 1881 | __func__); |
1911 | } | 1882 | } |
1912 | 1883 | ||
1913 | /** | 1884 | /** |
@@ -1922,7 +1893,7 @@ static int ux500_hash_suspend(struct device *dev) | |||
1922 | 1893 | ||
1923 | device_data = dev_get_drvdata(dev); | 1894 | device_data = dev_get_drvdata(dev); |
1924 | if (!device_data) { | 1895 | if (!device_data) { |
1925 | dev_err(dev, "[%s] platform_get_drvdata() failed!", __func__); | 1896 | dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__); |
1926 | return -ENOMEM; | 1897 | return -ENOMEM; |
1927 | } | 1898 | } |
1928 | 1899 | ||
@@ -1933,15 +1904,16 @@ static int ux500_hash_suspend(struct device *dev) | |||
1933 | 1904 | ||
1934 | if (device_data->current_ctx == ++temp_ctx) { | 1905 | if (device_data->current_ctx == ++temp_ctx) { |
1935 | if (down_interruptible(&driver_data.device_allocation)) | 1906 | if (down_interruptible(&driver_data.device_allocation)) |
1936 | dev_dbg(dev, "[%s]: down_interruptible() failed", | 1907 | dev_dbg(dev, "%s: down_interruptible() failed\n", |
1937 | __func__); | 1908 | __func__); |
1938 | ret = hash_disable_power(device_data, false); | 1909 | ret = hash_disable_power(device_data, false); |
1939 | 1910 | ||
1940 | } else | 1911 | } else { |
1941 | ret = hash_disable_power(device_data, true); | 1912 | ret = hash_disable_power(device_data, true); |
1913 | } | ||
1942 | 1914 | ||
1943 | if (ret) | 1915 | if (ret) |
1944 | dev_err(dev, "[%s]: hash_disable_power()", __func__); | 1916 | dev_err(dev, "%s: hash_disable_power()\n", __func__); |
1945 | 1917 | ||
1946 | return ret; | 1918 | return ret; |
1947 | } | 1919 | } |
@@ -1958,7 +1930,7 @@ static int ux500_hash_resume(struct device *dev) | |||
1958 | 1930 | ||
1959 | device_data = dev_get_drvdata(dev); | 1931 | device_data = dev_get_drvdata(dev); |
1960 | if (!device_data) { | 1932 | if (!device_data) { |
1961 | dev_err(dev, "[%s] platform_get_drvdata() failed!", __func__); | 1933 | dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__); |
1962 | return -ENOMEM; | 1934 | return -ENOMEM; |
1963 | } | 1935 | } |
1964 | 1936 | ||
@@ -1973,7 +1945,7 @@ static int ux500_hash_resume(struct device *dev) | |||
1973 | ret = hash_enable_power(device_data, true); | 1945 | ret = hash_enable_power(device_data, true); |
1974 | 1946 | ||
1975 | if (ret) | 1947 | if (ret) |
1976 | dev_err(dev, "[%s]: hash_enable_power() failed!", __func__); | 1948 | dev_err(dev, "%s: hash_enable_power() failed!\n", __func__); |
1977 | 1949 | ||
1978 | return ret; | 1950 | return ret; |
1979 | } | 1951 | } |
@@ -1981,8 +1953,8 @@ static int ux500_hash_resume(struct device *dev) | |||
1981 | static SIMPLE_DEV_PM_OPS(ux500_hash_pm, ux500_hash_suspend, ux500_hash_resume); | 1953 | static SIMPLE_DEV_PM_OPS(ux500_hash_pm, ux500_hash_suspend, ux500_hash_resume); |
1982 | 1954 | ||
1983 | static const struct of_device_id ux500_hash_match[] = { | 1955 | static const struct of_device_id ux500_hash_match[] = { |
1984 | { .compatible = "stericsson,ux500-hash" }, | 1956 | { .compatible = "stericsson,ux500-hash" }, |
1985 | { }, | 1957 | { }, |
1986 | }; | 1958 | }; |
1987 | 1959 | ||
1988 | static struct platform_driver hash_driver = { | 1960 | static struct platform_driver hash_driver = { |