diff options
author | Herbert Xu <herbert@gondor.apana.org.au> | 2010-05-25 21:36:58 -0400 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2010-05-25 21:36:58 -0400 |
commit | 50d1e9302bab7d35dae7146f8c468e0943015616 (patch) | |
tree | fa05320f4a297bd582686574cf94ba444e264b3f /drivers/crypto | |
parent | 7cc2835083aedfde42de02301005a5555e00c4b1 (diff) | |
parent | dc4ccfd15d4fc7a91ddf222bc5eed5cc4bcf10e6 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/crypto-2.6
Diffstat (limited to 'drivers/crypto')
-rw-r--r-- | drivers/crypto/amcc/crypto4xx_core.c | 7 | ||||
-rw-r--r-- | drivers/crypto/n2_core.c | 520 | ||||
-rw-r--r-- | drivers/crypto/talitos.c | 9 |
3 files changed, 363 insertions, 173 deletions
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c index 6c4c8b7ce3aa..9d65b371de64 100644 --- a/drivers/crypto/amcc/crypto4xx_core.c +++ b/drivers/crypto/amcc/crypto4xx_core.c | |||
@@ -1281,8 +1281,11 @@ static const struct of_device_id crypto4xx_match[] = { | |||
1281 | }; | 1281 | }; |
1282 | 1282 | ||
1283 | static struct of_platform_driver crypto4xx_driver = { | 1283 | static struct of_platform_driver crypto4xx_driver = { |
1284 | .name = "crypto4xx", | 1284 | .driver = { |
1285 | .match_table = crypto4xx_match, | 1285 | .name = "crypto4xx", |
1286 | .owner = THIS_MODULE, | ||
1287 | .of_match_table = crypto4xx_match, | ||
1288 | }, | ||
1286 | .probe = crypto4xx_probe, | 1289 | .probe = crypto4xx_probe, |
1287 | .remove = crypto4xx_remove, | 1290 | .remove = crypto4xx_remove, |
1288 | }; | 1291 | }; |
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index 8566be832f51..b99c38f23d61 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c | |||
@@ -239,86 +239,119 @@ static inline bool n2_should_run_async(struct spu_queue *qp, int this_len) | |||
239 | } | 239 | } |
240 | #endif | 240 | #endif |
241 | 241 | ||
242 | struct n2_base_ctx { | 242 | struct n2_ahash_alg { |
243 | struct list_head list; | 243 | struct list_head entry; |
244 | const char *hash_zero; | ||
245 | const u32 *hash_init; | ||
246 | u8 hw_op_hashsz; | ||
247 | u8 digest_size; | ||
248 | u8 auth_type; | ||
249 | u8 hmac_type; | ||
250 | struct ahash_alg alg; | ||
251 | }; | ||
252 | |||
253 | static inline struct n2_ahash_alg *n2_ahash_alg(struct crypto_tfm *tfm) | ||
254 | { | ||
255 | struct crypto_alg *alg = tfm->__crt_alg; | ||
256 | struct ahash_alg *ahash_alg; | ||
257 | |||
258 | ahash_alg = container_of(alg, struct ahash_alg, halg.base); | ||
259 | |||
260 | return container_of(ahash_alg, struct n2_ahash_alg, alg); | ||
261 | } | ||
262 | |||
263 | struct n2_hmac_alg { | ||
264 | const char *child_alg; | ||
265 | struct n2_ahash_alg derived; | ||
244 | }; | 266 | }; |
245 | 267 | ||
246 | static void n2_base_ctx_init(struct n2_base_ctx *ctx) | 268 | static inline struct n2_hmac_alg *n2_hmac_alg(struct crypto_tfm *tfm) |
247 | { | 269 | { |
248 | INIT_LIST_HEAD(&ctx->list); | 270 | struct crypto_alg *alg = tfm->__crt_alg; |
271 | struct ahash_alg *ahash_alg; | ||
272 | |||
273 | ahash_alg = container_of(alg, struct ahash_alg, halg.base); | ||
274 | |||
275 | return container_of(ahash_alg, struct n2_hmac_alg, derived.alg); | ||
249 | } | 276 | } |
250 | 277 | ||
251 | struct n2_hash_ctx { | 278 | struct n2_hash_ctx { |
252 | struct n2_base_ctx base; | 279 | struct crypto_ahash *fallback_tfm; |
280 | }; | ||
253 | 281 | ||
254 | struct crypto_ahash *fallback; | 282 | #define N2_HASH_KEY_MAX 32 /* HW limit for all HMAC requests */ |
255 | 283 | ||
256 | /* These next three members must match the layout created by | 284 | struct n2_hmac_ctx { |
257 | * crypto_init_shash_ops_async. This allows us to properly | 285 | struct n2_hash_ctx base; |
258 | * plumb requests we can't do in hardware down to the fallback | 286 | |
259 | * operation, providing all of the data structures and layouts | 287 | struct crypto_shash *child_shash; |
260 | * expected by those paths. | 288 | |
261 | */ | 289 | int hash_key_len; |
262 | struct ahash_request fallback_req; | 290 | unsigned char hash_key[N2_HASH_KEY_MAX]; |
263 | struct shash_desc fallback_desc; | 291 | }; |
292 | |||
293 | struct n2_hash_req_ctx { | ||
264 | union { | 294 | union { |
265 | struct md5_state md5; | 295 | struct md5_state md5; |
266 | struct sha1_state sha1; | 296 | struct sha1_state sha1; |
267 | struct sha256_state sha256; | 297 | struct sha256_state sha256; |
268 | } u; | 298 | } u; |
269 | 299 | ||
270 | unsigned char hash_key[64]; | 300 | struct ahash_request fallback_req; |
271 | unsigned char keyed_zero_hash[32]; | ||
272 | }; | 301 | }; |
273 | 302 | ||
274 | static int n2_hash_async_init(struct ahash_request *req) | 303 | static int n2_hash_async_init(struct ahash_request *req) |
275 | { | 304 | { |
305 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); | ||
276 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | 306 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
277 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | 307 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); |
278 | 308 | ||
279 | ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback); | 309 | ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); |
280 | ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; | 310 | rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; |
281 | 311 | ||
282 | return crypto_ahash_init(&ctx->fallback_req); | 312 | return crypto_ahash_init(&rctx->fallback_req); |
283 | } | 313 | } |
284 | 314 | ||
285 | static int n2_hash_async_update(struct ahash_request *req) | 315 | static int n2_hash_async_update(struct ahash_request *req) |
286 | { | 316 | { |
317 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); | ||
287 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | 318 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
288 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | 319 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); |
289 | 320 | ||
290 | ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback); | 321 | ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); |
291 | ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; | 322 | rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; |
292 | ctx->fallback_req.nbytes = req->nbytes; | 323 | rctx->fallback_req.nbytes = req->nbytes; |
293 | ctx->fallback_req.src = req->src; | 324 | rctx->fallback_req.src = req->src; |
294 | 325 | ||
295 | return crypto_ahash_update(&ctx->fallback_req); | 326 | return crypto_ahash_update(&rctx->fallback_req); |
296 | } | 327 | } |
297 | 328 | ||
298 | static int n2_hash_async_final(struct ahash_request *req) | 329 | static int n2_hash_async_final(struct ahash_request *req) |
299 | { | 330 | { |
331 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); | ||
300 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | 332 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
301 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | 333 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); |
302 | 334 | ||
303 | ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback); | 335 | ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); |
304 | ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; | 336 | rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; |
305 | ctx->fallback_req.result = req->result; | 337 | rctx->fallback_req.result = req->result; |
306 | 338 | ||
307 | return crypto_ahash_final(&ctx->fallback_req); | 339 | return crypto_ahash_final(&rctx->fallback_req); |
308 | } | 340 | } |
309 | 341 | ||
310 | static int n2_hash_async_finup(struct ahash_request *req) | 342 | static int n2_hash_async_finup(struct ahash_request *req) |
311 | { | 343 | { |
344 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); | ||
312 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | 345 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
313 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | 346 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); |
314 | 347 | ||
315 | ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback); | 348 | ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); |
316 | ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; | 349 | rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; |
317 | ctx->fallback_req.nbytes = req->nbytes; | 350 | rctx->fallback_req.nbytes = req->nbytes; |
318 | ctx->fallback_req.src = req->src; | 351 | rctx->fallback_req.src = req->src; |
319 | ctx->fallback_req.result = req->result; | 352 | rctx->fallback_req.result = req->result; |
320 | 353 | ||
321 | return crypto_ahash_finup(&ctx->fallback_req); | 354 | return crypto_ahash_finup(&rctx->fallback_req); |
322 | } | 355 | } |
323 | 356 | ||
324 | static int n2_hash_cra_init(struct crypto_tfm *tfm) | 357 | static int n2_hash_cra_init(struct crypto_tfm *tfm) |
@@ -338,7 +371,10 @@ static int n2_hash_cra_init(struct crypto_tfm *tfm) | |||
338 | goto out; | 371 | goto out; |
339 | } | 372 | } |
340 | 373 | ||
341 | ctx->fallback = fallback_tfm; | 374 | crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) + |
375 | crypto_ahash_reqsize(fallback_tfm))); | ||
376 | |||
377 | ctx->fallback_tfm = fallback_tfm; | ||
342 | return 0; | 378 | return 0; |
343 | 379 | ||
344 | out: | 380 | out: |
@@ -350,7 +386,95 @@ static void n2_hash_cra_exit(struct crypto_tfm *tfm) | |||
350 | struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); | 386 | struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); |
351 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash); | 387 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash); |
352 | 388 | ||
353 | crypto_free_ahash(ctx->fallback); | 389 | crypto_free_ahash(ctx->fallback_tfm); |
390 | } | ||
391 | |||
392 | static int n2_hmac_cra_init(struct crypto_tfm *tfm) | ||
393 | { | ||
394 | const char *fallback_driver_name = tfm->__crt_alg->cra_name; | ||
395 | struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); | ||
396 | struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash); | ||
397 | struct n2_hmac_alg *n2alg = n2_hmac_alg(tfm); | ||
398 | struct crypto_ahash *fallback_tfm; | ||
399 | struct crypto_shash *child_shash; | ||
400 | int err; | ||
401 | |||
402 | fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0, | ||
403 | CRYPTO_ALG_NEED_FALLBACK); | ||
404 | if (IS_ERR(fallback_tfm)) { | ||
405 | pr_warning("Fallback driver '%s' could not be loaded!\n", | ||
406 | fallback_driver_name); | ||
407 | err = PTR_ERR(fallback_tfm); | ||
408 | goto out; | ||
409 | } | ||
410 | |||
411 | child_shash = crypto_alloc_shash(n2alg->child_alg, 0, 0); | ||
412 | if (IS_ERR(child_shash)) { | ||
413 | pr_warning("Child shash '%s' could not be loaded!\n", | ||
414 | n2alg->child_alg); | ||
415 | err = PTR_ERR(child_shash); | ||
416 | goto out_free_fallback; | ||
417 | } | ||
418 | |||
419 | crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) + | ||
420 | crypto_ahash_reqsize(fallback_tfm))); | ||
421 | |||
422 | ctx->child_shash = child_shash; | ||
423 | ctx->base.fallback_tfm = fallback_tfm; | ||
424 | return 0; | ||
425 | |||
426 | out_free_fallback: | ||
427 | crypto_free_ahash(fallback_tfm); | ||
428 | |||
429 | out: | ||
430 | return err; | ||
431 | } | ||
432 | |||
433 | static void n2_hmac_cra_exit(struct crypto_tfm *tfm) | ||
434 | { | ||
435 | struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); | ||
436 | struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash); | ||
437 | |||
438 | crypto_free_ahash(ctx->base.fallback_tfm); | ||
439 | crypto_free_shash(ctx->child_shash); | ||
440 | } | ||
441 | |||
442 | static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key, | ||
443 | unsigned int keylen) | ||
444 | { | ||
445 | struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm); | ||
446 | struct crypto_shash *child_shash = ctx->child_shash; | ||
447 | struct crypto_ahash *fallback_tfm; | ||
448 | struct { | ||
449 | struct shash_desc shash; | ||
450 | char ctx[crypto_shash_descsize(child_shash)]; | ||
451 | } desc; | ||
452 | int err, bs, ds; | ||
453 | |||
454 | fallback_tfm = ctx->base.fallback_tfm; | ||
455 | err = crypto_ahash_setkey(fallback_tfm, key, keylen); | ||
456 | if (err) | ||
457 | return err; | ||
458 | |||
459 | desc.shash.tfm = child_shash; | ||
460 | desc.shash.flags = crypto_ahash_get_flags(tfm) & | ||
461 | CRYPTO_TFM_REQ_MAY_SLEEP; | ||
462 | |||
463 | bs = crypto_shash_blocksize(child_shash); | ||
464 | ds = crypto_shash_digestsize(child_shash); | ||
465 | BUG_ON(ds > N2_HASH_KEY_MAX); | ||
466 | if (keylen > bs) { | ||
467 | err = crypto_shash_digest(&desc.shash, key, keylen, | ||
468 | ctx->hash_key); | ||
469 | if (err) | ||
470 | return err; | ||
471 | keylen = ds; | ||
472 | } else if (keylen <= N2_HASH_KEY_MAX) | ||
473 | memcpy(ctx->hash_key, key, keylen); | ||
474 | |||
475 | ctx->hash_key_len = keylen; | ||
476 | |||
477 | return err; | ||
354 | } | 478 | } |
355 | 479 | ||
356 | static unsigned long wait_for_tail(struct spu_queue *qp) | 480 | static unsigned long wait_for_tail(struct spu_queue *qp) |
@@ -382,12 +506,12 @@ static unsigned long submit_and_wait_for_tail(struct spu_queue *qp, | |||
382 | return hv_ret; | 506 | return hv_ret; |
383 | } | 507 | } |
384 | 508 | ||
385 | static int n2_hash_async_digest(struct ahash_request *req, | 509 | static int n2_do_async_digest(struct ahash_request *req, |
386 | unsigned int auth_type, unsigned int digest_size, | 510 | unsigned int auth_type, unsigned int digest_size, |
387 | unsigned int result_size, void *hash_loc) | 511 | unsigned int result_size, void *hash_loc, |
512 | unsigned long auth_key, unsigned int auth_key_len) | ||
388 | { | 513 | { |
389 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | 514 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
390 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | ||
391 | struct cwq_initial_entry *ent; | 515 | struct cwq_initial_entry *ent; |
392 | struct crypto_hash_walk walk; | 516 | struct crypto_hash_walk walk; |
393 | struct spu_queue *qp; | 517 | struct spu_queue *qp; |
@@ -399,18 +523,19 @@ static int n2_hash_async_digest(struct ahash_request *req, | |||
399 | * exceed 2^16. | 523 | * exceed 2^16. |
400 | */ | 524 | */ |
401 | if (unlikely(req->nbytes > (1 << 16))) { | 525 | if (unlikely(req->nbytes > (1 << 16))) { |
402 | ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback); | 526 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); |
403 | ctx->fallback_req.base.flags = | 527 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); |
528 | |||
529 | ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); | ||
530 | rctx->fallback_req.base.flags = | ||
404 | req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; | 531 | req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; |
405 | ctx->fallback_req.nbytes = req->nbytes; | 532 | rctx->fallback_req.nbytes = req->nbytes; |
406 | ctx->fallback_req.src = req->src; | 533 | rctx->fallback_req.src = req->src; |
407 | ctx->fallback_req.result = req->result; | 534 | rctx->fallback_req.result = req->result; |
408 | 535 | ||
409 | return crypto_ahash_digest(&ctx->fallback_req); | 536 | return crypto_ahash_digest(&rctx->fallback_req); |
410 | } | 537 | } |
411 | 538 | ||
412 | n2_base_ctx_init(&ctx->base); | ||
413 | |||
414 | nbytes = crypto_hash_walk_first(req, &walk); | 539 | nbytes = crypto_hash_walk_first(req, &walk); |
415 | 540 | ||
416 | cpu = get_cpu(); | 541 | cpu = get_cpu(); |
@@ -425,13 +550,13 @@ static int n2_hash_async_digest(struct ahash_request *req, | |||
425 | */ | 550 | */ |
426 | ent = qp->q + qp->tail; | 551 | ent = qp->q + qp->tail; |
427 | 552 | ||
428 | ent->control = control_word_base(nbytes, 0, 0, | 553 | ent->control = control_word_base(nbytes, auth_key_len, 0, |
429 | auth_type, digest_size, | 554 | auth_type, digest_size, |
430 | false, true, false, false, | 555 | false, true, false, false, |
431 | OPCODE_INPLACE_BIT | | 556 | OPCODE_INPLACE_BIT | |
432 | OPCODE_AUTH_MAC); | 557 | OPCODE_AUTH_MAC); |
433 | ent->src_addr = __pa(walk.data); | 558 | ent->src_addr = __pa(walk.data); |
434 | ent->auth_key_addr = 0UL; | 559 | ent->auth_key_addr = auth_key; |
435 | ent->auth_iv_addr = __pa(hash_loc); | 560 | ent->auth_iv_addr = __pa(hash_loc); |
436 | ent->final_auth_state_addr = 0UL; | 561 | ent->final_auth_state_addr = 0UL; |
437 | ent->enc_key_addr = 0UL; | 562 | ent->enc_key_addr = 0UL; |
@@ -470,118 +595,55 @@ out: | |||
470 | return err; | 595 | return err; |
471 | } | 596 | } |
472 | 597 | ||
473 | static int n2_md5_async_digest(struct ahash_request *req) | 598 | static int n2_hash_async_digest(struct ahash_request *req) |
474 | { | ||
475 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
476 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | ||
477 | struct md5_state *m = &ctx->u.md5; | ||
478 | |||
479 | if (unlikely(req->nbytes == 0)) { | ||
480 | static const char md5_zero[MD5_DIGEST_SIZE] = { | ||
481 | 0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04, | ||
482 | 0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e, | ||
483 | }; | ||
484 | |||
485 | memcpy(req->result, md5_zero, MD5_DIGEST_SIZE); | ||
486 | return 0; | ||
487 | } | ||
488 | m->hash[0] = cpu_to_le32(0x67452301); | ||
489 | m->hash[1] = cpu_to_le32(0xefcdab89); | ||
490 | m->hash[2] = cpu_to_le32(0x98badcfe); | ||
491 | m->hash[3] = cpu_to_le32(0x10325476); | ||
492 | |||
493 | return n2_hash_async_digest(req, AUTH_TYPE_MD5, | ||
494 | MD5_DIGEST_SIZE, MD5_DIGEST_SIZE, | ||
495 | m->hash); | ||
496 | } | ||
497 | |||
498 | static int n2_sha1_async_digest(struct ahash_request *req) | ||
499 | { | 599 | { |
500 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | 600 | struct n2_ahash_alg *n2alg = n2_ahash_alg(req->base.tfm); |
501 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | 601 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); |
502 | struct sha1_state *s = &ctx->u.sha1; | 602 | int ds; |
503 | 603 | ||
604 | ds = n2alg->digest_size; | ||
504 | if (unlikely(req->nbytes == 0)) { | 605 | if (unlikely(req->nbytes == 0)) { |
505 | static const char sha1_zero[SHA1_DIGEST_SIZE] = { | 606 | memcpy(req->result, n2alg->hash_zero, ds); |
506 | 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32, | ||
507 | 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8, | ||
508 | 0x07, 0x09 | ||
509 | }; | ||
510 | |||
511 | memcpy(req->result, sha1_zero, SHA1_DIGEST_SIZE); | ||
512 | return 0; | 607 | return 0; |
513 | } | 608 | } |
514 | s->state[0] = SHA1_H0; | 609 | memcpy(&rctx->u, n2alg->hash_init, n2alg->hw_op_hashsz); |
515 | s->state[1] = SHA1_H1; | ||
516 | s->state[2] = SHA1_H2; | ||
517 | s->state[3] = SHA1_H3; | ||
518 | s->state[4] = SHA1_H4; | ||
519 | 610 | ||
520 | return n2_hash_async_digest(req, AUTH_TYPE_SHA1, | 611 | return n2_do_async_digest(req, n2alg->auth_type, |
521 | SHA1_DIGEST_SIZE, SHA1_DIGEST_SIZE, | 612 | n2alg->hw_op_hashsz, ds, |
522 | s->state); | 613 | &rctx->u, 0UL, 0); |
523 | } | 614 | } |
524 | 615 | ||
525 | static int n2_sha256_async_digest(struct ahash_request *req) | 616 | static int n2_hmac_async_digest(struct ahash_request *req) |
526 | { | 617 | { |
618 | struct n2_hmac_alg *n2alg = n2_hmac_alg(req->base.tfm); | ||
619 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); | ||
527 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | 620 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
528 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | 621 | struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm); |
529 | struct sha256_state *s = &ctx->u.sha256; | 622 | int ds; |
530 | 623 | ||
531 | if (req->nbytes == 0) { | 624 | ds = n2alg->derived.digest_size; |
532 | static const char sha256_zero[SHA256_DIGEST_SIZE] = { | 625 | if (unlikely(req->nbytes == 0) || |
533 | 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, | 626 | unlikely(ctx->hash_key_len > N2_HASH_KEY_MAX)) { |
534 | 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, | 627 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); |
535 | 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, | 628 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); |
536 | 0x1b, 0x78, 0x52, 0xb8, 0x55 | ||
537 | }; | ||
538 | 629 | ||
539 | memcpy(req->result, sha256_zero, SHA256_DIGEST_SIZE); | 630 | ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); |
540 | return 0; | 631 | rctx->fallback_req.base.flags = |
541 | } | 632 | req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; |
542 | s->state[0] = SHA256_H0; | 633 | rctx->fallback_req.nbytes = req->nbytes; |
543 | s->state[1] = SHA256_H1; | 634 | rctx->fallback_req.src = req->src; |
544 | s->state[2] = SHA256_H2; | 635 | rctx->fallback_req.result = req->result; |
545 | s->state[3] = SHA256_H3; | ||
546 | s->state[4] = SHA256_H4; | ||
547 | s->state[5] = SHA256_H5; | ||
548 | s->state[6] = SHA256_H6; | ||
549 | s->state[7] = SHA256_H7; | ||
550 | |||
551 | return n2_hash_async_digest(req, AUTH_TYPE_SHA256, | ||
552 | SHA256_DIGEST_SIZE, SHA256_DIGEST_SIZE, | ||
553 | s->state); | ||
554 | } | ||
555 | |||
556 | static int n2_sha224_async_digest(struct ahash_request *req) | ||
557 | { | ||
558 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
559 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | ||
560 | struct sha256_state *s = &ctx->u.sha256; | ||
561 | |||
562 | if (req->nbytes == 0) { | ||
563 | static const char sha224_zero[SHA224_DIGEST_SIZE] = { | ||
564 | 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47, | ||
565 | 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2, | ||
566 | 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4, | ||
567 | 0x2f | ||
568 | }; | ||
569 | 636 | ||
570 | memcpy(req->result, sha224_zero, SHA224_DIGEST_SIZE); | 637 | return crypto_ahash_digest(&rctx->fallback_req); |
571 | return 0; | ||
572 | } | 638 | } |
573 | s->state[0] = SHA224_H0; | 639 | memcpy(&rctx->u, n2alg->derived.hash_init, |
574 | s->state[1] = SHA224_H1; | 640 | n2alg->derived.hw_op_hashsz); |
575 | s->state[2] = SHA224_H2; | ||
576 | s->state[3] = SHA224_H3; | ||
577 | s->state[4] = SHA224_H4; | ||
578 | s->state[5] = SHA224_H5; | ||
579 | s->state[6] = SHA224_H6; | ||
580 | s->state[7] = SHA224_H7; | ||
581 | 641 | ||
582 | return n2_hash_async_digest(req, AUTH_TYPE_SHA256, | 642 | return n2_do_async_digest(req, n2alg->derived.hmac_type, |
583 | SHA256_DIGEST_SIZE, SHA224_DIGEST_SIZE, | 643 | n2alg->derived.hw_op_hashsz, ds, |
584 | s->state); | 644 | &rctx->u, |
645 | __pa(&ctx->hash_key), | ||
646 | ctx->hash_key_len); | ||
585 | } | 647 | } |
586 | 648 | ||
587 | struct n2_cipher_context { | 649 | struct n2_cipher_context { |
@@ -1208,35 +1270,92 @@ static LIST_HEAD(cipher_algs); | |||
1208 | 1270 | ||
1209 | struct n2_hash_tmpl { | 1271 | struct n2_hash_tmpl { |
1210 | const char *name; | 1272 | const char *name; |
1211 | int (*digest)(struct ahash_request *req); | 1273 | const char *hash_zero; |
1274 | const u32 *hash_init; | ||
1275 | u8 hw_op_hashsz; | ||
1212 | u8 digest_size; | 1276 | u8 digest_size; |
1213 | u8 block_size; | 1277 | u8 block_size; |
1278 | u8 auth_type; | ||
1279 | u8 hmac_type; | ||
1280 | }; | ||
1281 | |||
1282 | static const char md5_zero[MD5_DIGEST_SIZE] = { | ||
1283 | 0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04, | ||
1284 | 0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e, | ||
1285 | }; | ||
1286 | static const u32 md5_init[MD5_HASH_WORDS] = { | ||
1287 | cpu_to_le32(0x67452301), | ||
1288 | cpu_to_le32(0xefcdab89), | ||
1289 | cpu_to_le32(0x98badcfe), | ||
1290 | cpu_to_le32(0x10325476), | ||
1291 | }; | ||
1292 | static const char sha1_zero[SHA1_DIGEST_SIZE] = { | ||
1293 | 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32, | ||
1294 | 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8, | ||
1295 | 0x07, 0x09 | ||
1296 | }; | ||
1297 | static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = { | ||
1298 | SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, | ||
1299 | }; | ||
1300 | static const char sha256_zero[SHA256_DIGEST_SIZE] = { | ||
1301 | 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, | ||
1302 | 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, | ||
1303 | 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, | ||
1304 | 0x1b, 0x78, 0x52, 0xb8, 0x55 | ||
1305 | }; | ||
1306 | static const u32 sha256_init[SHA256_DIGEST_SIZE / 4] = { | ||
1307 | SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, | ||
1308 | SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7, | ||
1214 | }; | 1309 | }; |
1310 | static const char sha224_zero[SHA224_DIGEST_SIZE] = { | ||
1311 | 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47, | ||
1312 | 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2, | ||
1313 | 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4, | ||
1314 | 0x2f | ||
1315 | }; | ||
1316 | static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = { | ||
1317 | SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3, | ||
1318 | SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7, | ||
1319 | }; | ||
1320 | |||
1215 | static const struct n2_hash_tmpl hash_tmpls[] = { | 1321 | static const struct n2_hash_tmpl hash_tmpls[] = { |
1216 | { .name = "md5", | 1322 | { .name = "md5", |
1217 | .digest = n2_md5_async_digest, | 1323 | .hash_zero = md5_zero, |
1324 | .hash_init = md5_init, | ||
1325 | .auth_type = AUTH_TYPE_MD5, | ||
1326 | .hmac_type = AUTH_TYPE_HMAC_MD5, | ||
1327 | .hw_op_hashsz = MD5_DIGEST_SIZE, | ||
1218 | .digest_size = MD5_DIGEST_SIZE, | 1328 | .digest_size = MD5_DIGEST_SIZE, |
1219 | .block_size = MD5_HMAC_BLOCK_SIZE }, | 1329 | .block_size = MD5_HMAC_BLOCK_SIZE }, |
1220 | { .name = "sha1", | 1330 | { .name = "sha1", |
1221 | .digest = n2_sha1_async_digest, | 1331 | .hash_zero = sha1_zero, |
1332 | .hash_init = sha1_init, | ||
1333 | .auth_type = AUTH_TYPE_SHA1, | ||
1334 | .hmac_type = AUTH_TYPE_HMAC_SHA1, | ||
1335 | .hw_op_hashsz = SHA1_DIGEST_SIZE, | ||
1222 | .digest_size = SHA1_DIGEST_SIZE, | 1336 | .digest_size = SHA1_DIGEST_SIZE, |
1223 | .block_size = SHA1_BLOCK_SIZE }, | 1337 | .block_size = SHA1_BLOCK_SIZE }, |
1224 | { .name = "sha256", | 1338 | { .name = "sha256", |
1225 | .digest = n2_sha256_async_digest, | 1339 | .hash_zero = sha256_zero, |
1340 | .hash_init = sha256_init, | ||
1341 | .auth_type = AUTH_TYPE_SHA256, | ||
1342 | .hmac_type = AUTH_TYPE_HMAC_SHA256, | ||
1343 | .hw_op_hashsz = SHA256_DIGEST_SIZE, | ||
1226 | .digest_size = SHA256_DIGEST_SIZE, | 1344 | .digest_size = SHA256_DIGEST_SIZE, |
1227 | .block_size = SHA256_BLOCK_SIZE }, | 1345 | .block_size = SHA256_BLOCK_SIZE }, |
1228 | { .name = "sha224", | 1346 | { .name = "sha224", |
1229 | .digest = n2_sha224_async_digest, | 1347 | .hash_zero = sha224_zero, |
1348 | .hash_init = sha224_init, | ||
1349 | .auth_type = AUTH_TYPE_SHA256, | ||
1350 | .hmac_type = AUTH_TYPE_RESERVED, | ||
1351 | .hw_op_hashsz = SHA256_DIGEST_SIZE, | ||
1230 | .digest_size = SHA224_DIGEST_SIZE, | 1352 | .digest_size = SHA224_DIGEST_SIZE, |
1231 | .block_size = SHA224_BLOCK_SIZE }, | 1353 | .block_size = SHA224_BLOCK_SIZE }, |
1232 | }; | 1354 | }; |
1233 | #define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls) | 1355 | #define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls) |
1234 | 1356 | ||
1235 | struct n2_ahash_alg { | ||
1236 | struct list_head entry; | ||
1237 | struct ahash_alg alg; | ||
1238 | }; | ||
1239 | static LIST_HEAD(ahash_algs); | 1357 | static LIST_HEAD(ahash_algs); |
1358 | static LIST_HEAD(hmac_algs); | ||
1240 | 1359 | ||
1241 | static int algs_registered; | 1360 | static int algs_registered; |
1242 | 1361 | ||
@@ -1244,12 +1363,18 @@ static void __n2_unregister_algs(void) | |||
1244 | { | 1363 | { |
1245 | struct n2_cipher_alg *cipher, *cipher_tmp; | 1364 | struct n2_cipher_alg *cipher, *cipher_tmp; |
1246 | struct n2_ahash_alg *alg, *alg_tmp; | 1365 | struct n2_ahash_alg *alg, *alg_tmp; |
1366 | struct n2_hmac_alg *hmac, *hmac_tmp; | ||
1247 | 1367 | ||
1248 | list_for_each_entry_safe(cipher, cipher_tmp, &cipher_algs, entry) { | 1368 | list_for_each_entry_safe(cipher, cipher_tmp, &cipher_algs, entry) { |
1249 | crypto_unregister_alg(&cipher->alg); | 1369 | crypto_unregister_alg(&cipher->alg); |
1250 | list_del(&cipher->entry); | 1370 | list_del(&cipher->entry); |
1251 | kfree(cipher); | 1371 | kfree(cipher); |
1252 | } | 1372 | } |
1373 | list_for_each_entry_safe(hmac, hmac_tmp, &hmac_algs, derived.entry) { | ||
1374 | crypto_unregister_ahash(&hmac->derived.alg); | ||
1375 | list_del(&hmac->derived.entry); | ||
1376 | kfree(hmac); | ||
1377 | } | ||
1253 | list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) { | 1378 | list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) { |
1254 | crypto_unregister_ahash(&alg->alg); | 1379 | crypto_unregister_ahash(&alg->alg); |
1255 | list_del(&alg->entry); | 1380 | list_del(&alg->entry); |
@@ -1289,8 +1414,49 @@ static int __devinit __n2_register_one_cipher(const struct n2_cipher_tmpl *tmpl) | |||
1289 | list_add(&p->entry, &cipher_algs); | 1414 | list_add(&p->entry, &cipher_algs); |
1290 | err = crypto_register_alg(alg); | 1415 | err = crypto_register_alg(alg); |
1291 | if (err) { | 1416 | if (err) { |
1417 | pr_err("%s alg registration failed\n", alg->cra_name); | ||
1292 | list_del(&p->entry); | 1418 | list_del(&p->entry); |
1293 | kfree(p); | 1419 | kfree(p); |
1420 | } else { | ||
1421 | pr_info("%s alg registered\n", alg->cra_name); | ||
1422 | } | ||
1423 | return err; | ||
1424 | } | ||
1425 | |||
1426 | static int __devinit __n2_register_one_hmac(struct n2_ahash_alg *n2ahash) | ||
1427 | { | ||
1428 | struct n2_hmac_alg *p = kzalloc(sizeof(*p), GFP_KERNEL); | ||
1429 | struct ahash_alg *ahash; | ||
1430 | struct crypto_alg *base; | ||
1431 | int err; | ||
1432 | |||
1433 | if (!p) | ||
1434 | return -ENOMEM; | ||
1435 | |||
1436 | p->child_alg = n2ahash->alg.halg.base.cra_name; | ||
1437 | memcpy(&p->derived, n2ahash, sizeof(struct n2_ahash_alg)); | ||
1438 | INIT_LIST_HEAD(&p->derived.entry); | ||
1439 | |||
1440 | ahash = &p->derived.alg; | ||
1441 | ahash->digest = n2_hmac_async_digest; | ||
1442 | ahash->setkey = n2_hmac_async_setkey; | ||
1443 | |||
1444 | base = &ahash->halg.base; | ||
1445 | snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", p->child_alg); | ||
1446 | snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s-n2", p->child_alg); | ||
1447 | |||
1448 | base->cra_ctxsize = sizeof(struct n2_hmac_ctx); | ||
1449 | base->cra_init = n2_hmac_cra_init; | ||
1450 | base->cra_exit = n2_hmac_cra_exit; | ||
1451 | |||
1452 | list_add(&p->derived.entry, &hmac_algs); | ||
1453 | err = crypto_register_ahash(ahash); | ||
1454 | if (err) { | ||
1455 | pr_err("%s alg registration failed\n", base->cra_name); | ||
1456 | list_del(&p->derived.entry); | ||
1457 | kfree(p); | ||
1458 | } else { | ||
1459 | pr_info("%s alg registered\n", base->cra_name); | ||
1294 | } | 1460 | } |
1295 | return err; | 1461 | return err; |
1296 | } | 1462 | } |
@@ -1306,12 +1472,19 @@ static int __devinit __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl) | |||
1306 | if (!p) | 1472 | if (!p) |
1307 | return -ENOMEM; | 1473 | return -ENOMEM; |
1308 | 1474 | ||
1475 | p->hash_zero = tmpl->hash_zero; | ||
1476 | p->hash_init = tmpl->hash_init; | ||
1477 | p->auth_type = tmpl->auth_type; | ||
1478 | p->hmac_type = tmpl->hmac_type; | ||
1479 | p->hw_op_hashsz = tmpl->hw_op_hashsz; | ||
1480 | p->digest_size = tmpl->digest_size; | ||
1481 | |||
1309 | ahash = &p->alg; | 1482 | ahash = &p->alg; |
1310 | ahash->init = n2_hash_async_init; | 1483 | ahash->init = n2_hash_async_init; |
1311 | ahash->update = n2_hash_async_update; | 1484 | ahash->update = n2_hash_async_update; |
1312 | ahash->final = n2_hash_async_final; | 1485 | ahash->final = n2_hash_async_final; |
1313 | ahash->finup = n2_hash_async_finup; | 1486 | ahash->finup = n2_hash_async_finup; |
1314 | ahash->digest = tmpl->digest; | 1487 | ahash->digest = n2_hash_async_digest; |
1315 | 1488 | ||
1316 | halg = &ahash->halg; | 1489 | halg = &ahash->halg; |
1317 | halg->digestsize = tmpl->digest_size; | 1490 | halg->digestsize = tmpl->digest_size; |
@@ -1330,9 +1503,14 @@ static int __devinit __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl) | |||
1330 | list_add(&p->entry, &ahash_algs); | 1503 | list_add(&p->entry, &ahash_algs); |
1331 | err = crypto_register_ahash(ahash); | 1504 | err = crypto_register_ahash(ahash); |
1332 | if (err) { | 1505 | if (err) { |
1506 | pr_err("%s alg registration failed\n", base->cra_name); | ||
1333 | list_del(&p->entry); | 1507 | list_del(&p->entry); |
1334 | kfree(p); | 1508 | kfree(p); |
1509 | } else { | ||
1510 | pr_info("%s alg registered\n", base->cra_name); | ||
1335 | } | 1511 | } |
1512 | if (!err && p->hmac_type != AUTH_TYPE_RESERVED) | ||
1513 | err = __n2_register_one_hmac(p); | ||
1336 | return err; | 1514 | return err; |
1337 | } | 1515 | } |
1338 | 1516 | ||
@@ -1398,7 +1576,7 @@ static int find_devino_index(struct of_device *dev, struct spu_mdesc_info *ip, | |||
1398 | 1576 | ||
1399 | intr = ip->ino_table[i].intr; | 1577 | intr = ip->ino_table[i].intr; |
1400 | 1578 | ||
1401 | dev_intrs = of_get_property(dev->node, "interrupts", NULL); | 1579 | dev_intrs = of_get_property(dev->dev.of_node, "interrupts", NULL); |
1402 | if (!dev_intrs) | 1580 | if (!dev_intrs) |
1403 | return -ENODEV; | 1581 | return -ENODEV; |
1404 | 1582 | ||
@@ -1449,7 +1627,7 @@ static int queue_cache_init(void) | |||
1449 | { | 1627 | { |
1450 | if (!queue_cache[HV_NCS_QTYPE_MAU - 1]) | 1628 | if (!queue_cache[HV_NCS_QTYPE_MAU - 1]) |
1451 | queue_cache[HV_NCS_QTYPE_MAU - 1] = | 1629 | queue_cache[HV_NCS_QTYPE_MAU - 1] = |
1452 | kmem_cache_create("cwq_queue", | 1630 | kmem_cache_create("mau_queue", |
1453 | (MAU_NUM_ENTRIES * | 1631 | (MAU_NUM_ENTRIES * |
1454 | MAU_ENTRY_SIZE), | 1632 | MAU_ENTRY_SIZE), |
1455 | MAU_ENTRY_SIZE, 0, NULL); | 1633 | MAU_ENTRY_SIZE, 0, NULL); |
@@ -1574,7 +1752,7 @@ static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc, | |||
1574 | id = mdesc_get_property(mdesc, tgt, "id", NULL); | 1752 | id = mdesc_get_property(mdesc, tgt, "id", NULL); |
1575 | if (table[*id] != NULL) { | 1753 | if (table[*id] != NULL) { |
1576 | dev_err(&dev->dev, "%s: SPU cpu slot already set.\n", | 1754 | dev_err(&dev->dev, "%s: SPU cpu slot already set.\n", |
1577 | dev->node->full_name); | 1755 | dev->dev.of_node->full_name); |
1578 | return -EINVAL; | 1756 | return -EINVAL; |
1579 | } | 1757 | } |
1580 | cpu_set(*id, p->sharing); | 1758 | cpu_set(*id, p->sharing); |
@@ -1595,7 +1773,7 @@ static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list, | |||
1595 | p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL); | 1773 | p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL); |
1596 | if (!p) { | 1774 | if (!p) { |
1597 | dev_err(&dev->dev, "%s: Could not allocate SPU queue.\n", | 1775 | dev_err(&dev->dev, "%s: Could not allocate SPU queue.\n", |
1598 | dev->node->full_name); | 1776 | dev->dev.of_node->full_name); |
1599 | return -ENOMEM; | 1777 | return -ENOMEM; |
1600 | } | 1778 | } |
1601 | 1779 | ||
@@ -1684,7 +1862,7 @@ static int __devinit grab_mdesc_irq_props(struct mdesc_handle *mdesc, | |||
1684 | const unsigned int *reg; | 1862 | const unsigned int *reg; |
1685 | u64 node; | 1863 | u64 node; |
1686 | 1864 | ||
1687 | reg = of_get_property(dev->node, "reg", NULL); | 1865 | reg = of_get_property(dev->dev.of_node, "reg", NULL); |
1688 | if (!reg) | 1866 | if (!reg) |
1689 | return -ENODEV; | 1867 | return -ENODEV; |
1690 | 1868 | ||
@@ -1836,7 +2014,7 @@ static int __devinit n2_crypto_probe(struct of_device *dev, | |||
1836 | 2014 | ||
1837 | n2_spu_driver_version(); | 2015 | n2_spu_driver_version(); |
1838 | 2016 | ||
1839 | full_name = dev->node->full_name; | 2017 | full_name = dev->dev.of_node->full_name; |
1840 | pr_info("Found N2CP at %s\n", full_name); | 2018 | pr_info("Found N2CP at %s\n", full_name); |
1841 | 2019 | ||
1842 | np = alloc_n2cp(); | 2020 | np = alloc_n2cp(); |
@@ -1948,7 +2126,7 @@ static int __devinit n2_mau_probe(struct of_device *dev, | |||
1948 | 2126 | ||
1949 | n2_spu_driver_version(); | 2127 | n2_spu_driver_version(); |
1950 | 2128 | ||
1951 | full_name = dev->node->full_name; | 2129 | full_name = dev->dev.of_node->full_name; |
1952 | pr_info("Found NCP at %s\n", full_name); | 2130 | pr_info("Found NCP at %s\n", full_name); |
1953 | 2131 | ||
1954 | mp = alloc_ncp(); | 2132 | mp = alloc_ncp(); |
@@ -2034,8 +2212,11 @@ static struct of_device_id n2_crypto_match[] = { | |||
2034 | MODULE_DEVICE_TABLE(of, n2_crypto_match); | 2212 | MODULE_DEVICE_TABLE(of, n2_crypto_match); |
2035 | 2213 | ||
2036 | static struct of_platform_driver n2_crypto_driver = { | 2214 | static struct of_platform_driver n2_crypto_driver = { |
2037 | .name = "n2cp", | 2215 | .driver = { |
2038 | .match_table = n2_crypto_match, | 2216 | .name = "n2cp", |
2217 | .owner = THIS_MODULE, | ||
2218 | .of_match_table = n2_crypto_match, | ||
2219 | }, | ||
2039 | .probe = n2_crypto_probe, | 2220 | .probe = n2_crypto_probe, |
2040 | .remove = __devexit_p(n2_crypto_remove), | 2221 | .remove = __devexit_p(n2_crypto_remove), |
2041 | }; | 2222 | }; |
@@ -2055,8 +2236,11 @@ static struct of_device_id n2_mau_match[] = { | |||
2055 | MODULE_DEVICE_TABLE(of, n2_mau_match); | 2236 | MODULE_DEVICE_TABLE(of, n2_mau_match); |
2056 | 2237 | ||
2057 | static struct of_platform_driver n2_mau_driver = { | 2238 | static struct of_platform_driver n2_mau_driver = { |
2058 | .name = "ncp", | 2239 | .driver = { |
2059 | .match_table = n2_mau_match, | 2240 | .name = "ncp", |
2241 | .owner = THIS_MODULE, | ||
2242 | .of_match_table = n2_mau_match, | ||
2243 | }, | ||
2060 | .probe = n2_mau_probe, | 2244 | .probe = n2_mau_probe, |
2061 | .remove = __devexit_p(n2_mau_remove), | 2245 | .remove = __devexit_p(n2_mau_remove), |
2062 | }; | 2246 | }; |
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 6a0f59d1fc5c..637c105f53d2 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c | |||
@@ -2398,7 +2398,7 @@ static int talitos_probe(struct of_device *ofdev, | |||
2398 | const struct of_device_id *match) | 2398 | const struct of_device_id *match) |
2399 | { | 2399 | { |
2400 | struct device *dev = &ofdev->dev; | 2400 | struct device *dev = &ofdev->dev; |
2401 | struct device_node *np = ofdev->node; | 2401 | struct device_node *np = ofdev->dev.of_node; |
2402 | struct talitos_private *priv; | 2402 | struct talitos_private *priv; |
2403 | const unsigned int *prop; | 2403 | const unsigned int *prop; |
2404 | int i, err; | 2404 | int i, err; |
@@ -2573,8 +2573,11 @@ static const struct of_device_id talitos_match[] = { | |||
2573 | MODULE_DEVICE_TABLE(of, talitos_match); | 2573 | MODULE_DEVICE_TABLE(of, talitos_match); |
2574 | 2574 | ||
2575 | static struct of_platform_driver talitos_driver = { | 2575 | static struct of_platform_driver talitos_driver = { |
2576 | .name = "talitos", | 2576 | .driver = { |
2577 | .match_table = talitos_match, | 2577 | .name = "talitos", |
2578 | .owner = THIS_MODULE, | ||
2579 | .of_match_table = talitos_match, | ||
2580 | }, | ||
2578 | .probe = talitos_probe, | 2581 | .probe = talitos_probe, |
2579 | .remove = talitos_remove, | 2582 | .remove = talitos_remove, |
2580 | }; | 2583 | }; |