aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/n2_core.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/n2_core.c')
-rw-r--r--drivers/crypto/n2_core.c123
1 files changed, 65 insertions, 58 deletions
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 8566be832f51..23163fda5035 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -251,16 +251,10 @@ static void n2_base_ctx_init(struct n2_base_ctx *ctx)
251struct n2_hash_ctx { 251struct n2_hash_ctx {
252 struct n2_base_ctx base; 252 struct n2_base_ctx base;
253 253
254 struct crypto_ahash *fallback; 254 struct crypto_ahash *fallback_tfm;
255};
255 256
256 /* These next three members must match the layout created by 257struct n2_hash_req_ctx {
257 * crypto_init_shash_ops_async. This allows us to properly
258 * plumb requests we can't do in hardware down to the fallback
259 * operation, providing all of the data structures and layouts
260 * expected by those paths.
261 */
262 struct ahash_request fallback_req;
263 struct shash_desc fallback_desc;
264 union { 258 union {
265 struct md5_state md5; 259 struct md5_state md5;
266 struct sha1_state sha1; 260 struct sha1_state sha1;
@@ -269,56 +263,62 @@ struct n2_hash_ctx {
269 263
270 unsigned char hash_key[64]; 264 unsigned char hash_key[64];
271 unsigned char keyed_zero_hash[32]; 265 unsigned char keyed_zero_hash[32];
266
267 struct ahash_request fallback_req;
272}; 268};
273 269
274static int n2_hash_async_init(struct ahash_request *req) 270static int n2_hash_async_init(struct ahash_request *req)
275{ 271{
272 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
276 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 273 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
277 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 274 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
278 275
279 ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback); 276 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
280 ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 277 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
281 278
282 return crypto_ahash_init(&ctx->fallback_req); 279 return crypto_ahash_init(&rctx->fallback_req);
283} 280}
284 281
285static int n2_hash_async_update(struct ahash_request *req) 282static int n2_hash_async_update(struct ahash_request *req)
286{ 283{
284 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
287 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 285 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
288 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 286 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
289 287
290 ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback); 288 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
291 ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 289 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
292 ctx->fallback_req.nbytes = req->nbytes; 290 rctx->fallback_req.nbytes = req->nbytes;
293 ctx->fallback_req.src = req->src; 291 rctx->fallback_req.src = req->src;
294 292
295 return crypto_ahash_update(&ctx->fallback_req); 293 return crypto_ahash_update(&rctx->fallback_req);
296} 294}
297 295
298static int n2_hash_async_final(struct ahash_request *req) 296static int n2_hash_async_final(struct ahash_request *req)
299{ 297{
298 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
300 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 299 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
301 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 300 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
302 301
303 ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback); 302 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
304 ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 303 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
305 ctx->fallback_req.result = req->result; 304 rctx->fallback_req.result = req->result;
306 305
307 return crypto_ahash_final(&ctx->fallback_req); 306 return crypto_ahash_final(&rctx->fallback_req);
308} 307}
309 308
310static int n2_hash_async_finup(struct ahash_request *req) 309static int n2_hash_async_finup(struct ahash_request *req)
311{ 310{
311 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
312 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 312 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
313 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 313 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
314 314
315 ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback); 315 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
316 ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 316 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
317 ctx->fallback_req.nbytes = req->nbytes; 317 rctx->fallback_req.nbytes = req->nbytes;
318 ctx->fallback_req.src = req->src; 318 rctx->fallback_req.src = req->src;
319 ctx->fallback_req.result = req->result; 319 rctx->fallback_req.result = req->result;
320 320
321 return crypto_ahash_finup(&ctx->fallback_req); 321 return crypto_ahash_finup(&rctx->fallback_req);
322} 322}
323 323
324static int n2_hash_cra_init(struct crypto_tfm *tfm) 324static int n2_hash_cra_init(struct crypto_tfm *tfm)
@@ -338,7 +338,10 @@ static int n2_hash_cra_init(struct crypto_tfm *tfm)
338 goto out; 338 goto out;
339 } 339 }
340 340
341 ctx->fallback = fallback_tfm; 341 crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
342 crypto_ahash_reqsize(fallback_tfm)));
343
344 ctx->fallback_tfm = fallback_tfm;
342 return 0; 345 return 0;
343 346
344out: 347out:
@@ -350,7 +353,7 @@ static void n2_hash_cra_exit(struct crypto_tfm *tfm)
350 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); 353 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
351 struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash); 354 struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
352 355
353 crypto_free_ahash(ctx->fallback); 356 crypto_free_ahash(ctx->fallback_tfm);
354} 357}
355 358
356static unsigned long wait_for_tail(struct spu_queue *qp) 359static unsigned long wait_for_tail(struct spu_queue *qp)
@@ -399,14 +402,16 @@ static int n2_hash_async_digest(struct ahash_request *req,
399 * exceed 2^16. 402 * exceed 2^16.
400 */ 403 */
401 if (unlikely(req->nbytes > (1 << 16))) { 404 if (unlikely(req->nbytes > (1 << 16))) {
402 ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback); 405 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
403 ctx->fallback_req.base.flags = 406
407 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
408 rctx->fallback_req.base.flags =
404 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 409 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
405 ctx->fallback_req.nbytes = req->nbytes; 410 rctx->fallback_req.nbytes = req->nbytes;
406 ctx->fallback_req.src = req->src; 411 rctx->fallback_req.src = req->src;
407 ctx->fallback_req.result = req->result; 412 rctx->fallback_req.result = req->result;
408 413
409 return crypto_ahash_digest(&ctx->fallback_req); 414 return crypto_ahash_digest(&rctx->fallback_req);
410 } 415 }
411 416
412 n2_base_ctx_init(&ctx->base); 417 n2_base_ctx_init(&ctx->base);
@@ -472,9 +477,8 @@ out:
472 477
473static int n2_md5_async_digest(struct ahash_request *req) 478static int n2_md5_async_digest(struct ahash_request *req)
474{ 479{
475 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 480 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
476 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 481 struct md5_state *m = &rctx->u.md5;
477 struct md5_state *m = &ctx->u.md5;
478 482
479 if (unlikely(req->nbytes == 0)) { 483 if (unlikely(req->nbytes == 0)) {
480 static const char md5_zero[MD5_DIGEST_SIZE] = { 484 static const char md5_zero[MD5_DIGEST_SIZE] = {
@@ -497,9 +501,8 @@ static int n2_md5_async_digest(struct ahash_request *req)
497 501
498static int n2_sha1_async_digest(struct ahash_request *req) 502static int n2_sha1_async_digest(struct ahash_request *req)
499{ 503{
500 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 504 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
501 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 505 struct sha1_state *s = &rctx->u.sha1;
502 struct sha1_state *s = &ctx->u.sha1;
503 506
504 if (unlikely(req->nbytes == 0)) { 507 if (unlikely(req->nbytes == 0)) {
505 static const char sha1_zero[SHA1_DIGEST_SIZE] = { 508 static const char sha1_zero[SHA1_DIGEST_SIZE] = {
@@ -524,9 +527,8 @@ static int n2_sha1_async_digest(struct ahash_request *req)
524 527
525static int n2_sha256_async_digest(struct ahash_request *req) 528static int n2_sha256_async_digest(struct ahash_request *req)
526{ 529{
527 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 530 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
528 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 531 struct sha256_state *s = &rctx->u.sha256;
529 struct sha256_state *s = &ctx->u.sha256;
530 532
531 if (req->nbytes == 0) { 533 if (req->nbytes == 0) {
532 static const char sha256_zero[SHA256_DIGEST_SIZE] = { 534 static const char sha256_zero[SHA256_DIGEST_SIZE] = {
@@ -555,9 +557,8 @@ static int n2_sha256_async_digest(struct ahash_request *req)
555 557
556static int n2_sha224_async_digest(struct ahash_request *req) 558static int n2_sha224_async_digest(struct ahash_request *req)
557{ 559{
558 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 560 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
559 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 561 struct sha256_state *s = &rctx->u.sha256;
560 struct sha256_state *s = &ctx->u.sha256;
561 562
562 if (req->nbytes == 0) { 563 if (req->nbytes == 0) {
563 static const char sha224_zero[SHA224_DIGEST_SIZE] = { 564 static const char sha224_zero[SHA224_DIGEST_SIZE] = {
@@ -1398,7 +1399,7 @@ static int find_devino_index(struct of_device *dev, struct spu_mdesc_info *ip,
1398 1399
1399 intr = ip->ino_table[i].intr; 1400 intr = ip->ino_table[i].intr;
1400 1401
1401 dev_intrs = of_get_property(dev->node, "interrupts", NULL); 1402 dev_intrs = of_get_property(dev->dev.of_node, "interrupts", NULL);
1402 if (!dev_intrs) 1403 if (!dev_intrs)
1403 return -ENODEV; 1404 return -ENODEV;
1404 1405
@@ -1449,7 +1450,7 @@ static int queue_cache_init(void)
1449{ 1450{
1450 if (!queue_cache[HV_NCS_QTYPE_MAU - 1]) 1451 if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
1451 queue_cache[HV_NCS_QTYPE_MAU - 1] = 1452 queue_cache[HV_NCS_QTYPE_MAU - 1] =
1452 kmem_cache_create("cwq_queue", 1453 kmem_cache_create("mau_queue",
1453 (MAU_NUM_ENTRIES * 1454 (MAU_NUM_ENTRIES *
1454 MAU_ENTRY_SIZE), 1455 MAU_ENTRY_SIZE),
1455 MAU_ENTRY_SIZE, 0, NULL); 1456 MAU_ENTRY_SIZE, 0, NULL);
@@ -1574,7 +1575,7 @@ static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc,
1574 id = mdesc_get_property(mdesc, tgt, "id", NULL); 1575 id = mdesc_get_property(mdesc, tgt, "id", NULL);
1575 if (table[*id] != NULL) { 1576 if (table[*id] != NULL) {
1576 dev_err(&dev->dev, "%s: SPU cpu slot already set.\n", 1577 dev_err(&dev->dev, "%s: SPU cpu slot already set.\n",
1577 dev->node->full_name); 1578 dev->dev.of_node->full_name);
1578 return -EINVAL; 1579 return -EINVAL;
1579 } 1580 }
1580 cpu_set(*id, p->sharing); 1581 cpu_set(*id, p->sharing);
@@ -1595,7 +1596,7 @@ static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list,
1595 p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL); 1596 p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL);
1596 if (!p) { 1597 if (!p) {
1597 dev_err(&dev->dev, "%s: Could not allocate SPU queue.\n", 1598 dev_err(&dev->dev, "%s: Could not allocate SPU queue.\n",
1598 dev->node->full_name); 1599 dev->dev.of_node->full_name);
1599 return -ENOMEM; 1600 return -ENOMEM;
1600 } 1601 }
1601 1602
@@ -1684,7 +1685,7 @@ static int __devinit grab_mdesc_irq_props(struct mdesc_handle *mdesc,
1684 const unsigned int *reg; 1685 const unsigned int *reg;
1685 u64 node; 1686 u64 node;
1686 1687
1687 reg = of_get_property(dev->node, "reg", NULL); 1688 reg = of_get_property(dev->dev.of_node, "reg", NULL);
1688 if (!reg) 1689 if (!reg)
1689 return -ENODEV; 1690 return -ENODEV;
1690 1691
@@ -1836,7 +1837,7 @@ static int __devinit n2_crypto_probe(struct of_device *dev,
1836 1837
1837 n2_spu_driver_version(); 1838 n2_spu_driver_version();
1838 1839
1839 full_name = dev->node->full_name; 1840 full_name = dev->dev.of_node->full_name;
1840 pr_info("Found N2CP at %s\n", full_name); 1841 pr_info("Found N2CP at %s\n", full_name);
1841 1842
1842 np = alloc_n2cp(); 1843 np = alloc_n2cp();
@@ -1948,7 +1949,7 @@ static int __devinit n2_mau_probe(struct of_device *dev,
1948 1949
1949 n2_spu_driver_version(); 1950 n2_spu_driver_version();
1950 1951
1951 full_name = dev->node->full_name; 1952 full_name = dev->dev.of_node->full_name;
1952 pr_info("Found NCP at %s\n", full_name); 1953 pr_info("Found NCP at %s\n", full_name);
1953 1954
1954 mp = alloc_ncp(); 1955 mp = alloc_ncp();
@@ -2034,8 +2035,11 @@ static struct of_device_id n2_crypto_match[] = {
2034MODULE_DEVICE_TABLE(of, n2_crypto_match); 2035MODULE_DEVICE_TABLE(of, n2_crypto_match);
2035 2036
2036static struct of_platform_driver n2_crypto_driver = { 2037static struct of_platform_driver n2_crypto_driver = {
2037 .name = "n2cp", 2038 .driver = {
2038 .match_table = n2_crypto_match, 2039 .name = "n2cp",
2040 .owner = THIS_MODULE,
2041 .of_match_table = n2_crypto_match,
2042 },
2039 .probe = n2_crypto_probe, 2043 .probe = n2_crypto_probe,
2040 .remove = __devexit_p(n2_crypto_remove), 2044 .remove = __devexit_p(n2_crypto_remove),
2041}; 2045};
@@ -2055,8 +2059,11 @@ static struct of_device_id n2_mau_match[] = {
2055MODULE_DEVICE_TABLE(of, n2_mau_match); 2059MODULE_DEVICE_TABLE(of, n2_mau_match);
2056 2060
2057static struct of_platform_driver n2_mau_driver = { 2061static struct of_platform_driver n2_mau_driver = {
2058 .name = "ncp", 2062 .driver = {
2059 .match_table = n2_mau_match, 2063 .name = "ncp",
2064 .owner = THIS_MODULE,
2065 .of_match_table = n2_mau_match,
2066 },
2060 .probe = n2_mau_probe, 2067 .probe = n2_mau_probe,
2061 .remove = __devexit_p(n2_mau_remove), 2068 .remove = __devexit_p(n2_mau_remove),
2062}; 2069};