diff options
author | Jens Axboe <axboe@kernel.dk> | 2013-12-31 11:51:02 -0500 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2013-12-31 11:51:02 -0500 |
commit | b28bc9b38c52f63f43e3fd875af982f2240a2859 (patch) | |
tree | 76cdb7b52b58f5685993cc15ed81d1c903023358 /drivers/crypto | |
parent | 8d30726912cb39c3a3ebde06214d54861f8fdde2 (diff) | |
parent | 802eee95bde72fd0cd0f3a5b2098375a487d1eda (diff) |
Merge tag 'v3.13-rc6' into for-3.14/core
Needed to bring blk-mq uptodate, since changes have been going in
since for-3.14/core was established.
Fixup merge issues related to the immutable biovec changes.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Conflicts:
block/blk-flush.c
fs/btrfs/check-integrity.c
fs/btrfs/extent_io.c
fs/btrfs/scrub.c
fs/logfs/dev_bdev.c
Diffstat (limited to 'drivers/crypto')
-rw-r--r-- | drivers/crypto/caam/Kconfig | 25 | ||||
-rw-r--r-- | drivers/crypto/caam/Makefile | 4 | ||||
-rw-r--r-- | drivers/crypto/caam/caamalg.c | 134 | ||||
-rw-r--r-- | drivers/crypto/caam/caamhash.c | 88 | ||||
-rw-r--r-- | drivers/crypto/caam/caamrng.c | 29 | ||||
-rw-r--r-- | drivers/crypto/caam/ctrl.c | 418 | ||||
-rw-r--r-- | drivers/crypto/caam/desc.h | 17 | ||||
-rw-r--r-- | drivers/crypto/caam/intern.h | 20 | ||||
-rw-r--r-- | drivers/crypto/caam/jr.c | 340 | ||||
-rw-r--r-- | drivers/crypto/caam/jr.h | 5 | ||||
-rw-r--r-- | drivers/crypto/caam/regs.h | 14 | ||||
-rw-r--r-- | drivers/crypto/caam/sg_sw_sec4.h | 34 | ||||
-rw-r--r-- | drivers/crypto/dcp.c | 49 | ||||
-rw-r--r-- | drivers/crypto/ixp4xx_crypto.c | 26 | ||||
-rw-r--r-- | drivers/crypto/mv_cesa.c | 14 | ||||
-rw-r--r-- | drivers/crypto/omap-aes.c | 6 | ||||
-rw-r--r-- | drivers/crypto/omap-sham.c | 1 | ||||
-rw-r--r-- | drivers/crypto/picoxcell_crypto.c | 32 | ||||
-rw-r--r-- | drivers/crypto/sahara.c | 2 | ||||
-rw-r--r-- | drivers/crypto/talitos.c | 103 | ||||
-rw-r--r-- | drivers/crypto/tegra-aes.c | 26 |
21 files changed, 831 insertions, 556 deletions
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig index ca89f6b84b06..e7555ff4cafd 100644 --- a/drivers/crypto/caam/Kconfig +++ b/drivers/crypto/caam/Kconfig | |||
@@ -4,16 +4,29 @@ config CRYPTO_DEV_FSL_CAAM | |||
4 | help | 4 | help |
5 | Enables the driver module for Freescale's Cryptographic Accelerator | 5 | Enables the driver module for Freescale's Cryptographic Accelerator |
6 | and Assurance Module (CAAM), also known as the SEC version 4 (SEC4). | 6 | and Assurance Module (CAAM), also known as the SEC version 4 (SEC4). |
7 | This module adds a job ring operation interface, and configures h/w | 7 | This module creates job ring devices, and configures h/w |
8 | to operate as a DPAA component automatically, depending | 8 | to operate as a DPAA component automatically, depending |
9 | on h/w feature availability. | 9 | on h/w feature availability. |
10 | 10 | ||
11 | To compile this driver as a module, choose M here: the module | 11 | To compile this driver as a module, choose M here: the module |
12 | will be called caam. | 12 | will be called caam. |
13 | 13 | ||
14 | config CRYPTO_DEV_FSL_CAAM_JR | ||
15 | tristate "Freescale CAAM Job Ring driver backend" | ||
16 | depends on CRYPTO_DEV_FSL_CAAM | ||
17 | default y | ||
18 | help | ||
19 | Enables the driver module for Job Rings which are part of | ||
20 | Freescale's Cryptographic Accelerator | ||
21 | and Assurance Module (CAAM). This module adds a job ring operation | ||
22 | interface. | ||
23 | |||
24 | To compile this driver as a module, choose M here: the module | ||
25 | will be called caam_jr. | ||
26 | |||
14 | config CRYPTO_DEV_FSL_CAAM_RINGSIZE | 27 | config CRYPTO_DEV_FSL_CAAM_RINGSIZE |
15 | int "Job Ring size" | 28 | int "Job Ring size" |
16 | depends on CRYPTO_DEV_FSL_CAAM | 29 | depends on CRYPTO_DEV_FSL_CAAM_JR |
17 | range 2 9 | 30 | range 2 9 |
18 | default "9" | 31 | default "9" |
19 | help | 32 | help |
@@ -31,7 +44,7 @@ config CRYPTO_DEV_FSL_CAAM_RINGSIZE | |||
31 | 44 | ||
32 | config CRYPTO_DEV_FSL_CAAM_INTC | 45 | config CRYPTO_DEV_FSL_CAAM_INTC |
33 | bool "Job Ring interrupt coalescing" | 46 | bool "Job Ring interrupt coalescing" |
34 | depends on CRYPTO_DEV_FSL_CAAM | 47 | depends on CRYPTO_DEV_FSL_CAAM_JR |
35 | default n | 48 | default n |
36 | help | 49 | help |
37 | Enable the Job Ring's interrupt coalescing feature. | 50 | Enable the Job Ring's interrupt coalescing feature. |
@@ -62,7 +75,7 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD | |||
62 | 75 | ||
63 | config CRYPTO_DEV_FSL_CAAM_CRYPTO_API | 76 | config CRYPTO_DEV_FSL_CAAM_CRYPTO_API |
64 | tristate "Register algorithm implementations with the Crypto API" | 77 | tristate "Register algorithm implementations with the Crypto API" |
65 | depends on CRYPTO_DEV_FSL_CAAM | 78 | depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR |
66 | default y | 79 | default y |
67 | select CRYPTO_ALGAPI | 80 | select CRYPTO_ALGAPI |
68 | select CRYPTO_AUTHENC | 81 | select CRYPTO_AUTHENC |
@@ -76,7 +89,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API | |||
76 | 89 | ||
77 | config CRYPTO_DEV_FSL_CAAM_AHASH_API | 90 | config CRYPTO_DEV_FSL_CAAM_AHASH_API |
78 | tristate "Register hash algorithm implementations with Crypto API" | 91 | tristate "Register hash algorithm implementations with Crypto API" |
79 | depends on CRYPTO_DEV_FSL_CAAM | 92 | depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR |
80 | default y | 93 | default y |
81 | select CRYPTO_HASH | 94 | select CRYPTO_HASH |
82 | help | 95 | help |
@@ -88,7 +101,7 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API | |||
88 | 101 | ||
89 | config CRYPTO_DEV_FSL_CAAM_RNG_API | 102 | config CRYPTO_DEV_FSL_CAAM_RNG_API |
90 | tristate "Register caam device for hwrng API" | 103 | tristate "Register caam device for hwrng API" |
91 | depends on CRYPTO_DEV_FSL_CAAM | 104 | depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR |
92 | default y | 105 | default y |
93 | select CRYPTO_RNG | 106 | select CRYPTO_RNG |
94 | select HW_RANDOM | 107 | select HW_RANDOM |
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile index d56bd0ec65d8..550758a333e7 100644 --- a/drivers/crypto/caam/Makefile +++ b/drivers/crypto/caam/Makefile | |||
@@ -6,8 +6,10 @@ ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG), y) | |||
6 | endif | 6 | endif |
7 | 7 | ||
8 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o | 8 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o |
9 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o | ||
9 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o | 10 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o |
10 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o | 11 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o |
11 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o | 12 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o |
12 | 13 | ||
13 | caam-objs := ctrl.o jr.o error.o key_gen.o | 14 | caam-objs := ctrl.o |
15 | caam_jr-objs := jr.o key_gen.o error.o | ||
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 7c63b72ecd75..4cf5dec826e1 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c | |||
@@ -86,6 +86,7 @@ | |||
86 | #else | 86 | #else |
87 | #define debug(format, arg...) | 87 | #define debug(format, arg...) |
88 | #endif | 88 | #endif |
89 | static struct list_head alg_list; | ||
89 | 90 | ||
90 | /* Set DK bit in class 1 operation if shared */ | 91 | /* Set DK bit in class 1 operation if shared */ |
91 | static inline void append_dec_op1(u32 *desc, u32 type) | 92 | static inline void append_dec_op1(u32 *desc, u32 type) |
@@ -817,7 +818,7 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, | |||
817 | ivsize, 1); | 818 | ivsize, 1); |
818 | print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ", | 819 | print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ", |
819 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst), | 820 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst), |
820 | req->cryptlen, 1); | 821 | req->cryptlen - ctx->authsize, 1); |
821 | #endif | 822 | #endif |
822 | 823 | ||
823 | if (err) { | 824 | if (err) { |
@@ -971,12 +972,9 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr, | |||
971 | (edesc->src_nents ? : 1); | 972 | (edesc->src_nents ? : 1); |
972 | in_options = LDST_SGF; | 973 | in_options = LDST_SGF; |
973 | } | 974 | } |
974 | if (encrypt) | 975 | |
975 | append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + | 976 | append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen, |
976 | req->cryptlen - authsize, in_options); | 977 | in_options); |
977 | else | ||
978 | append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + | ||
979 | req->cryptlen, in_options); | ||
980 | 978 | ||
981 | if (likely(req->src == req->dst)) { | 979 | if (likely(req->src == req->dst)) { |
982 | if (all_contig) { | 980 | if (all_contig) { |
@@ -997,7 +995,8 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr, | |||
997 | } | 995 | } |
998 | } | 996 | } |
999 | if (encrypt) | 997 | if (encrypt) |
1000 | append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options); | 998 | append_seq_out_ptr(desc, dst_dma, req->cryptlen + authsize, |
999 | out_options); | ||
1001 | else | 1000 | else |
1002 | append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize, | 1001 | append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize, |
1003 | out_options); | 1002 | out_options); |
@@ -1047,8 +1046,8 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr, | |||
1047 | sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents; | 1046 | sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents; |
1048 | in_options = LDST_SGF; | 1047 | in_options = LDST_SGF; |
1049 | } | 1048 | } |
1050 | append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + | 1049 | append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen, |
1051 | req->cryptlen - authsize, in_options); | 1050 | in_options); |
1052 | 1051 | ||
1053 | if (contig & GIV_DST_CONTIG) { | 1052 | if (contig & GIV_DST_CONTIG) { |
1054 | dst_dma = edesc->iv_dma; | 1053 | dst_dma = edesc->iv_dma; |
@@ -1065,7 +1064,8 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr, | |||
1065 | } | 1064 | } |
1066 | } | 1065 | } |
1067 | 1066 | ||
1068 | append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen, out_options); | 1067 | append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen + authsize, |
1068 | out_options); | ||
1069 | } | 1069 | } |
1070 | 1070 | ||
1071 | /* | 1071 | /* |
@@ -1129,7 +1129,8 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr, | |||
1129 | * allocate and map the aead extended descriptor | 1129 | * allocate and map the aead extended descriptor |
1130 | */ | 1130 | */ |
1131 | static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | 1131 | static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, |
1132 | int desc_bytes, bool *all_contig_ptr) | 1132 | int desc_bytes, bool *all_contig_ptr, |
1133 | bool encrypt) | ||
1133 | { | 1134 | { |
1134 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 1135 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
1135 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 1136 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
@@ -1144,12 +1145,22 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
1144 | bool assoc_chained = false, src_chained = false, dst_chained = false; | 1145 | bool assoc_chained = false, src_chained = false, dst_chained = false; |
1145 | int ivsize = crypto_aead_ivsize(aead); | 1146 | int ivsize = crypto_aead_ivsize(aead); |
1146 | int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; | 1147 | int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; |
1148 | unsigned int authsize = ctx->authsize; | ||
1147 | 1149 | ||
1148 | assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained); | 1150 | assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained); |
1149 | src_nents = sg_count(req->src, req->cryptlen, &src_chained); | ||
1150 | 1151 | ||
1151 | if (unlikely(req->dst != req->src)) | 1152 | if (unlikely(req->dst != req->src)) { |
1152 | dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained); | 1153 | src_nents = sg_count(req->src, req->cryptlen, &src_chained); |
1154 | dst_nents = sg_count(req->dst, | ||
1155 | req->cryptlen + | ||
1156 | (encrypt ? authsize : (-authsize)), | ||
1157 | &dst_chained); | ||
1158 | } else { | ||
1159 | src_nents = sg_count(req->src, | ||
1160 | req->cryptlen + | ||
1161 | (encrypt ? authsize : 0), | ||
1162 | &src_chained); | ||
1163 | } | ||
1153 | 1164 | ||
1154 | sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, | 1165 | sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, |
1155 | DMA_TO_DEVICE, assoc_chained); | 1166 | DMA_TO_DEVICE, assoc_chained); |
@@ -1233,11 +1244,9 @@ static int aead_encrypt(struct aead_request *req) | |||
1233 | u32 *desc; | 1244 | u32 *desc; |
1234 | int ret = 0; | 1245 | int ret = 0; |
1235 | 1246 | ||
1236 | req->cryptlen += ctx->authsize; | ||
1237 | |||
1238 | /* allocate extended descriptor */ | 1247 | /* allocate extended descriptor */ |
1239 | edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * | 1248 | edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * |
1240 | CAAM_CMD_SZ, &all_contig); | 1249 | CAAM_CMD_SZ, &all_contig, true); |
1241 | if (IS_ERR(edesc)) | 1250 | if (IS_ERR(edesc)) |
1242 | return PTR_ERR(edesc); | 1251 | return PTR_ERR(edesc); |
1243 | 1252 | ||
@@ -1274,7 +1283,7 @@ static int aead_decrypt(struct aead_request *req) | |||
1274 | 1283 | ||
1275 | /* allocate extended descriptor */ | 1284 | /* allocate extended descriptor */ |
1276 | edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * | 1285 | edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * |
1277 | CAAM_CMD_SZ, &all_contig); | 1286 | CAAM_CMD_SZ, &all_contig, false); |
1278 | if (IS_ERR(edesc)) | 1287 | if (IS_ERR(edesc)) |
1279 | return PTR_ERR(edesc); | 1288 | return PTR_ERR(edesc); |
1280 | 1289 | ||
@@ -1331,7 +1340,8 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request | |||
1331 | src_nents = sg_count(req->src, req->cryptlen, &src_chained); | 1340 | src_nents = sg_count(req->src, req->cryptlen, &src_chained); |
1332 | 1341 | ||
1333 | if (unlikely(req->dst != req->src)) | 1342 | if (unlikely(req->dst != req->src)) |
1334 | dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained); | 1343 | dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize, |
1344 | &dst_chained); | ||
1335 | 1345 | ||
1336 | sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, | 1346 | sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, |
1337 | DMA_TO_DEVICE, assoc_chained); | 1347 | DMA_TO_DEVICE, assoc_chained); |
@@ -1425,8 +1435,6 @@ static int aead_givencrypt(struct aead_givcrypt_request *areq) | |||
1425 | u32 *desc; | 1435 | u32 *desc; |
1426 | int ret = 0; | 1436 | int ret = 0; |
1427 | 1437 | ||
1428 | req->cryptlen += ctx->authsize; | ||
1429 | |||
1430 | /* allocate extended descriptor */ | 1438 | /* allocate extended descriptor */ |
1431 | edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN * | 1439 | edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN * |
1432 | CAAM_CMD_SZ, &contig); | 1440 | CAAM_CMD_SZ, &contig); |
@@ -2057,7 +2065,6 @@ static struct caam_alg_template driver_algs[] = { | |||
2057 | 2065 | ||
2058 | struct caam_crypto_alg { | 2066 | struct caam_crypto_alg { |
2059 | struct list_head entry; | 2067 | struct list_head entry; |
2060 | struct device *ctrldev; | ||
2061 | int class1_alg_type; | 2068 | int class1_alg_type; |
2062 | int class2_alg_type; | 2069 | int class2_alg_type; |
2063 | int alg_op; | 2070 | int alg_op; |
@@ -2070,14 +2077,12 @@ static int caam_cra_init(struct crypto_tfm *tfm) | |||
2070 | struct caam_crypto_alg *caam_alg = | 2077 | struct caam_crypto_alg *caam_alg = |
2071 | container_of(alg, struct caam_crypto_alg, crypto_alg); | 2078 | container_of(alg, struct caam_crypto_alg, crypto_alg); |
2072 | struct caam_ctx *ctx = crypto_tfm_ctx(tfm); | 2079 | struct caam_ctx *ctx = crypto_tfm_ctx(tfm); |
2073 | struct caam_drv_private *priv = dev_get_drvdata(caam_alg->ctrldev); | ||
2074 | int tgt_jr = atomic_inc_return(&priv->tfm_count); | ||
2075 | 2080 | ||
2076 | /* | 2081 | ctx->jrdev = caam_jr_alloc(); |
2077 | * distribute tfms across job rings to ensure in-order | 2082 | if (IS_ERR(ctx->jrdev)) { |
2078 | * crypto request processing per tfm | 2083 | pr_err("Job Ring Device allocation for transform failed\n"); |
2079 | */ | 2084 | return PTR_ERR(ctx->jrdev); |
2080 | ctx->jrdev = priv->jrdev[(tgt_jr / 2) % priv->total_jobrs]; | 2085 | } |
2081 | 2086 | ||
2082 | /* copy descriptor header template value */ | 2087 | /* copy descriptor header template value */ |
2083 | ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type; | 2088 | ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type; |
@@ -2104,44 +2109,26 @@ static void caam_cra_exit(struct crypto_tfm *tfm) | |||
2104 | dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma, | 2109 | dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma, |
2105 | desc_bytes(ctx->sh_desc_givenc), | 2110 | desc_bytes(ctx->sh_desc_givenc), |
2106 | DMA_TO_DEVICE); | 2111 | DMA_TO_DEVICE); |
2112 | |||
2113 | caam_jr_free(ctx->jrdev); | ||
2107 | } | 2114 | } |
2108 | 2115 | ||
2109 | static void __exit caam_algapi_exit(void) | 2116 | static void __exit caam_algapi_exit(void) |
2110 | { | 2117 | { |
2111 | 2118 | ||
2112 | struct device_node *dev_node; | ||
2113 | struct platform_device *pdev; | ||
2114 | struct device *ctrldev; | ||
2115 | struct caam_drv_private *priv; | ||
2116 | struct caam_crypto_alg *t_alg, *n; | 2119 | struct caam_crypto_alg *t_alg, *n; |
2117 | 2120 | ||
2118 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); | 2121 | if (!alg_list.next) |
2119 | if (!dev_node) { | ||
2120 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); | ||
2121 | if (!dev_node) | ||
2122 | return; | ||
2123 | } | ||
2124 | |||
2125 | pdev = of_find_device_by_node(dev_node); | ||
2126 | if (!pdev) | ||
2127 | return; | ||
2128 | |||
2129 | ctrldev = &pdev->dev; | ||
2130 | of_node_put(dev_node); | ||
2131 | priv = dev_get_drvdata(ctrldev); | ||
2132 | |||
2133 | if (!priv->alg_list.next) | ||
2134 | return; | 2122 | return; |
2135 | 2123 | ||
2136 | list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) { | 2124 | list_for_each_entry_safe(t_alg, n, &alg_list, entry) { |
2137 | crypto_unregister_alg(&t_alg->crypto_alg); | 2125 | crypto_unregister_alg(&t_alg->crypto_alg); |
2138 | list_del(&t_alg->entry); | 2126 | list_del(&t_alg->entry); |
2139 | kfree(t_alg); | 2127 | kfree(t_alg); |
2140 | } | 2128 | } |
2141 | } | 2129 | } |
2142 | 2130 | ||
2143 | static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev, | 2131 | static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template |
2144 | struct caam_alg_template | ||
2145 | *template) | 2132 | *template) |
2146 | { | 2133 | { |
2147 | struct caam_crypto_alg *t_alg; | 2134 | struct caam_crypto_alg *t_alg; |
@@ -2149,7 +2136,7 @@ static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev, | |||
2149 | 2136 | ||
2150 | t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL); | 2137 | t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL); |
2151 | if (!t_alg) { | 2138 | if (!t_alg) { |
2152 | dev_err(ctrldev, "failed to allocate t_alg\n"); | 2139 | pr_err("failed to allocate t_alg\n"); |
2153 | return ERR_PTR(-ENOMEM); | 2140 | return ERR_PTR(-ENOMEM); |
2154 | } | 2141 | } |
2155 | 2142 | ||
@@ -2181,62 +2168,39 @@ static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev, | |||
2181 | t_alg->class1_alg_type = template->class1_alg_type; | 2168 | t_alg->class1_alg_type = template->class1_alg_type; |
2182 | t_alg->class2_alg_type = template->class2_alg_type; | 2169 | t_alg->class2_alg_type = template->class2_alg_type; |
2183 | t_alg->alg_op = template->alg_op; | 2170 | t_alg->alg_op = template->alg_op; |
2184 | t_alg->ctrldev = ctrldev; | ||
2185 | 2171 | ||
2186 | return t_alg; | 2172 | return t_alg; |
2187 | } | 2173 | } |
2188 | 2174 | ||
2189 | static int __init caam_algapi_init(void) | 2175 | static int __init caam_algapi_init(void) |
2190 | { | 2176 | { |
2191 | struct device_node *dev_node; | ||
2192 | struct platform_device *pdev; | ||
2193 | struct device *ctrldev; | ||
2194 | struct caam_drv_private *priv; | ||
2195 | int i = 0, err = 0; | 2177 | int i = 0, err = 0; |
2196 | 2178 | ||
2197 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); | 2179 | INIT_LIST_HEAD(&alg_list); |
2198 | if (!dev_node) { | ||
2199 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); | ||
2200 | if (!dev_node) | ||
2201 | return -ENODEV; | ||
2202 | } | ||
2203 | |||
2204 | pdev = of_find_device_by_node(dev_node); | ||
2205 | if (!pdev) | ||
2206 | return -ENODEV; | ||
2207 | |||
2208 | ctrldev = &pdev->dev; | ||
2209 | priv = dev_get_drvdata(ctrldev); | ||
2210 | of_node_put(dev_node); | ||
2211 | |||
2212 | INIT_LIST_HEAD(&priv->alg_list); | ||
2213 | |||
2214 | atomic_set(&priv->tfm_count, -1); | ||
2215 | 2180 | ||
2216 | /* register crypto algorithms the device supports */ | 2181 | /* register crypto algorithms the device supports */ |
2217 | for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { | 2182 | for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { |
2218 | /* TODO: check if h/w supports alg */ | 2183 | /* TODO: check if h/w supports alg */ |
2219 | struct caam_crypto_alg *t_alg; | 2184 | struct caam_crypto_alg *t_alg; |
2220 | 2185 | ||
2221 | t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]); | 2186 | t_alg = caam_alg_alloc(&driver_algs[i]); |
2222 | if (IS_ERR(t_alg)) { | 2187 | if (IS_ERR(t_alg)) { |
2223 | err = PTR_ERR(t_alg); | 2188 | err = PTR_ERR(t_alg); |
2224 | dev_warn(ctrldev, "%s alg allocation failed\n", | 2189 | pr_warn("%s alg allocation failed\n", |
2225 | driver_algs[i].driver_name); | 2190 | driver_algs[i].driver_name); |
2226 | continue; | 2191 | continue; |
2227 | } | 2192 | } |
2228 | 2193 | ||
2229 | err = crypto_register_alg(&t_alg->crypto_alg); | 2194 | err = crypto_register_alg(&t_alg->crypto_alg); |
2230 | if (err) { | 2195 | if (err) { |
2231 | dev_warn(ctrldev, "%s alg registration failed\n", | 2196 | pr_warn("%s alg registration failed\n", |
2232 | t_alg->crypto_alg.cra_driver_name); | 2197 | t_alg->crypto_alg.cra_driver_name); |
2233 | kfree(t_alg); | 2198 | kfree(t_alg); |
2234 | } else | 2199 | } else |
2235 | list_add_tail(&t_alg->entry, &priv->alg_list); | 2200 | list_add_tail(&t_alg->entry, &alg_list); |
2236 | } | 2201 | } |
2237 | if (!list_empty(&priv->alg_list)) | 2202 | if (!list_empty(&alg_list)) |
2238 | dev_info(ctrldev, "%s algorithms registered in /proc/crypto\n", | 2203 | pr_info("caam algorithms registered in /proc/crypto\n"); |
2239 | (char *)of_get_property(dev_node, "compatible", NULL)); | ||
2240 | 2204 | ||
2241 | return err; | 2205 | return err; |
2242 | } | 2206 | } |
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index e732bd962e98..0378328f47a7 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c | |||
@@ -94,6 +94,9 @@ | |||
94 | #define debug(format, arg...) | 94 | #define debug(format, arg...) |
95 | #endif | 95 | #endif |
96 | 96 | ||
97 | |||
98 | static struct list_head hash_list; | ||
99 | |||
97 | /* ahash per-session context */ | 100 | /* ahash per-session context */ |
98 | struct caam_hash_ctx { | 101 | struct caam_hash_ctx { |
99 | struct device *jrdev; | 102 | struct device *jrdev; |
@@ -1653,7 +1656,6 @@ static struct caam_hash_template driver_hash[] = { | |||
1653 | 1656 | ||
1654 | struct caam_hash_alg { | 1657 | struct caam_hash_alg { |
1655 | struct list_head entry; | 1658 | struct list_head entry; |
1656 | struct device *ctrldev; | ||
1657 | int alg_type; | 1659 | int alg_type; |
1658 | int alg_op; | 1660 | int alg_op; |
1659 | struct ahash_alg ahash_alg; | 1661 | struct ahash_alg ahash_alg; |
@@ -1670,7 +1672,6 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm) | |||
1670 | struct caam_hash_alg *caam_hash = | 1672 | struct caam_hash_alg *caam_hash = |
1671 | container_of(alg, struct caam_hash_alg, ahash_alg); | 1673 | container_of(alg, struct caam_hash_alg, ahash_alg); |
1672 | struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); | 1674 | struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); |
1673 | struct caam_drv_private *priv = dev_get_drvdata(caam_hash->ctrldev); | ||
1674 | /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */ | 1675 | /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */ |
1675 | static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE, | 1676 | static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE, |
1676 | HASH_MSG_LEN + SHA1_DIGEST_SIZE, | 1677 | HASH_MSG_LEN + SHA1_DIGEST_SIZE, |
@@ -1678,15 +1679,17 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm) | |||
1678 | HASH_MSG_LEN + SHA256_DIGEST_SIZE, | 1679 | HASH_MSG_LEN + SHA256_DIGEST_SIZE, |
1679 | HASH_MSG_LEN + 64, | 1680 | HASH_MSG_LEN + 64, |
1680 | HASH_MSG_LEN + SHA512_DIGEST_SIZE }; | 1681 | HASH_MSG_LEN + SHA512_DIGEST_SIZE }; |
1681 | int tgt_jr = atomic_inc_return(&priv->tfm_count); | ||
1682 | int ret = 0; | 1682 | int ret = 0; |
1683 | 1683 | ||
1684 | /* | 1684 | /* |
1685 | * distribute tfms across job rings to ensure in-order | 1685 | * Get a Job ring from Job Ring driver to ensure in-order |
1686 | * crypto request processing per tfm | 1686 | * crypto request processing per tfm |
1687 | */ | 1687 | */ |
1688 | ctx->jrdev = priv->jrdev[tgt_jr % priv->total_jobrs]; | 1688 | ctx->jrdev = caam_jr_alloc(); |
1689 | 1689 | if (IS_ERR(ctx->jrdev)) { | |
1690 | pr_err("Job Ring Device allocation for transform failed\n"); | ||
1691 | return PTR_ERR(ctx->jrdev); | ||
1692 | } | ||
1690 | /* copy descriptor header template value */ | 1693 | /* copy descriptor header template value */ |
1691 | ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type; | 1694 | ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type; |
1692 | ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op; | 1695 | ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op; |
@@ -1729,35 +1732,18 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm) | |||
1729 | !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma)) | 1732 | !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma)) |
1730 | dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma, | 1733 | dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma, |
1731 | desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE); | 1734 | desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE); |
1735 | |||
1736 | caam_jr_free(ctx->jrdev); | ||
1732 | } | 1737 | } |
1733 | 1738 | ||
1734 | static void __exit caam_algapi_hash_exit(void) | 1739 | static void __exit caam_algapi_hash_exit(void) |
1735 | { | 1740 | { |
1736 | struct device_node *dev_node; | ||
1737 | struct platform_device *pdev; | ||
1738 | struct device *ctrldev; | ||
1739 | struct caam_drv_private *priv; | ||
1740 | struct caam_hash_alg *t_alg, *n; | 1741 | struct caam_hash_alg *t_alg, *n; |
1741 | 1742 | ||
1742 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); | 1743 | if (!hash_list.next) |
1743 | if (!dev_node) { | ||
1744 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); | ||
1745 | if (!dev_node) | ||
1746 | return; | ||
1747 | } | ||
1748 | |||
1749 | pdev = of_find_device_by_node(dev_node); | ||
1750 | if (!pdev) | ||
1751 | return; | 1744 | return; |
1752 | 1745 | ||
1753 | ctrldev = &pdev->dev; | 1746 | list_for_each_entry_safe(t_alg, n, &hash_list, entry) { |
1754 | of_node_put(dev_node); | ||
1755 | priv = dev_get_drvdata(ctrldev); | ||
1756 | |||
1757 | if (!priv->hash_list.next) | ||
1758 | return; | ||
1759 | |||
1760 | list_for_each_entry_safe(t_alg, n, &priv->hash_list, entry) { | ||
1761 | crypto_unregister_ahash(&t_alg->ahash_alg); | 1747 | crypto_unregister_ahash(&t_alg->ahash_alg); |
1762 | list_del(&t_alg->entry); | 1748 | list_del(&t_alg->entry); |
1763 | kfree(t_alg); | 1749 | kfree(t_alg); |
@@ -1765,7 +1751,7 @@ static void __exit caam_algapi_hash_exit(void) | |||
1765 | } | 1751 | } |
1766 | 1752 | ||
1767 | static struct caam_hash_alg * | 1753 | static struct caam_hash_alg * |
1768 | caam_hash_alloc(struct device *ctrldev, struct caam_hash_template *template, | 1754 | caam_hash_alloc(struct caam_hash_template *template, |
1769 | bool keyed) | 1755 | bool keyed) |
1770 | { | 1756 | { |
1771 | struct caam_hash_alg *t_alg; | 1757 | struct caam_hash_alg *t_alg; |
@@ -1774,7 +1760,7 @@ caam_hash_alloc(struct device *ctrldev, struct caam_hash_template *template, | |||
1774 | 1760 | ||
1775 | t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL); | 1761 | t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL); |
1776 | if (!t_alg) { | 1762 | if (!t_alg) { |
1777 | dev_err(ctrldev, "failed to allocate t_alg\n"); | 1763 | pr_err("failed to allocate t_alg\n"); |
1778 | return ERR_PTR(-ENOMEM); | 1764 | return ERR_PTR(-ENOMEM); |
1779 | } | 1765 | } |
1780 | 1766 | ||
@@ -1805,37 +1791,15 @@ caam_hash_alloc(struct device *ctrldev, struct caam_hash_template *template, | |||
1805 | 1791 | ||
1806 | t_alg->alg_type = template->alg_type; | 1792 | t_alg->alg_type = template->alg_type; |
1807 | t_alg->alg_op = template->alg_op; | 1793 | t_alg->alg_op = template->alg_op; |
1808 | t_alg->ctrldev = ctrldev; | ||
1809 | 1794 | ||
1810 | return t_alg; | 1795 | return t_alg; |
1811 | } | 1796 | } |
1812 | 1797 | ||
1813 | static int __init caam_algapi_hash_init(void) | 1798 | static int __init caam_algapi_hash_init(void) |
1814 | { | 1799 | { |
1815 | struct device_node *dev_node; | ||
1816 | struct platform_device *pdev; | ||
1817 | struct device *ctrldev; | ||
1818 | struct caam_drv_private *priv; | ||
1819 | int i = 0, err = 0; | 1800 | int i = 0, err = 0; |
1820 | 1801 | ||
1821 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); | 1802 | INIT_LIST_HEAD(&hash_list); |
1822 | if (!dev_node) { | ||
1823 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); | ||
1824 | if (!dev_node) | ||
1825 | return -ENODEV; | ||
1826 | } | ||
1827 | |||
1828 | pdev = of_find_device_by_node(dev_node); | ||
1829 | if (!pdev) | ||
1830 | return -ENODEV; | ||
1831 | |||
1832 | ctrldev = &pdev->dev; | ||
1833 | priv = dev_get_drvdata(ctrldev); | ||
1834 | of_node_put(dev_node); | ||
1835 | |||
1836 | INIT_LIST_HEAD(&priv->hash_list); | ||
1837 | |||
1838 | atomic_set(&priv->tfm_count, -1); | ||
1839 | 1803 | ||
1840 | /* register crypto algorithms the device supports */ | 1804 | /* register crypto algorithms the device supports */ |
1841 | for (i = 0; i < ARRAY_SIZE(driver_hash); i++) { | 1805 | for (i = 0; i < ARRAY_SIZE(driver_hash); i++) { |
@@ -1843,38 +1807,38 @@ static int __init caam_algapi_hash_init(void) | |||
1843 | struct caam_hash_alg *t_alg; | 1807 | struct caam_hash_alg *t_alg; |
1844 | 1808 | ||
1845 | /* register hmac version */ | 1809 | /* register hmac version */ |
1846 | t_alg = caam_hash_alloc(ctrldev, &driver_hash[i], true); | 1810 | t_alg = caam_hash_alloc(&driver_hash[i], true); |
1847 | if (IS_ERR(t_alg)) { | 1811 | if (IS_ERR(t_alg)) { |
1848 | err = PTR_ERR(t_alg); | 1812 | err = PTR_ERR(t_alg); |
1849 | dev_warn(ctrldev, "%s alg allocation failed\n", | 1813 | pr_warn("%s alg allocation failed\n", |
1850 | driver_hash[i].driver_name); | 1814 | driver_hash[i].driver_name); |
1851 | continue; | 1815 | continue; |
1852 | } | 1816 | } |
1853 | 1817 | ||
1854 | err = crypto_register_ahash(&t_alg->ahash_alg); | 1818 | err = crypto_register_ahash(&t_alg->ahash_alg); |
1855 | if (err) { | 1819 | if (err) { |
1856 | dev_warn(ctrldev, "%s alg registration failed\n", | 1820 | pr_warn("%s alg registration failed\n", |
1857 | t_alg->ahash_alg.halg.base.cra_driver_name); | 1821 | t_alg->ahash_alg.halg.base.cra_driver_name); |
1858 | kfree(t_alg); | 1822 | kfree(t_alg); |
1859 | } else | 1823 | } else |
1860 | list_add_tail(&t_alg->entry, &priv->hash_list); | 1824 | list_add_tail(&t_alg->entry, &hash_list); |
1861 | 1825 | ||
1862 | /* register unkeyed version */ | 1826 | /* register unkeyed version */ |
1863 | t_alg = caam_hash_alloc(ctrldev, &driver_hash[i], false); | 1827 | t_alg = caam_hash_alloc(&driver_hash[i], false); |
1864 | if (IS_ERR(t_alg)) { | 1828 | if (IS_ERR(t_alg)) { |
1865 | err = PTR_ERR(t_alg); | 1829 | err = PTR_ERR(t_alg); |
1866 | dev_warn(ctrldev, "%s alg allocation failed\n", | 1830 | pr_warn("%s alg allocation failed\n", |
1867 | driver_hash[i].driver_name); | 1831 | driver_hash[i].driver_name); |
1868 | continue; | 1832 | continue; |
1869 | } | 1833 | } |
1870 | 1834 | ||
1871 | err = crypto_register_ahash(&t_alg->ahash_alg); | 1835 | err = crypto_register_ahash(&t_alg->ahash_alg); |
1872 | if (err) { | 1836 | if (err) { |
1873 | dev_warn(ctrldev, "%s alg registration failed\n", | 1837 | pr_warn("%s alg registration failed\n", |
1874 | t_alg->ahash_alg.halg.base.cra_driver_name); | 1838 | t_alg->ahash_alg.halg.base.cra_driver_name); |
1875 | kfree(t_alg); | 1839 | kfree(t_alg); |
1876 | } else | 1840 | } else |
1877 | list_add_tail(&t_alg->entry, &priv->hash_list); | 1841 | list_add_tail(&t_alg->entry, &hash_list); |
1878 | } | 1842 | } |
1879 | 1843 | ||
1880 | return err; | 1844 | return err; |
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c index d1939a9539c0..28486b19fc36 100644 --- a/drivers/crypto/caam/caamrng.c +++ b/drivers/crypto/caam/caamrng.c | |||
@@ -273,34 +273,23 @@ static struct hwrng caam_rng = { | |||
273 | 273 | ||
274 | static void __exit caam_rng_exit(void) | 274 | static void __exit caam_rng_exit(void) |
275 | { | 275 | { |
276 | caam_jr_free(rng_ctx.jrdev); | ||
276 | hwrng_unregister(&caam_rng); | 277 | hwrng_unregister(&caam_rng); |
277 | } | 278 | } |
278 | 279 | ||
279 | static int __init caam_rng_init(void) | 280 | static int __init caam_rng_init(void) |
280 | { | 281 | { |
281 | struct device_node *dev_node; | 282 | struct device *dev; |
282 | struct platform_device *pdev; | ||
283 | struct device *ctrldev; | ||
284 | struct caam_drv_private *priv; | ||
285 | |||
286 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); | ||
287 | if (!dev_node) { | ||
288 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); | ||
289 | if (!dev_node) | ||
290 | return -ENODEV; | ||
291 | } | ||
292 | |||
293 | pdev = of_find_device_by_node(dev_node); | ||
294 | if (!pdev) | ||
295 | return -ENODEV; | ||
296 | 283 | ||
297 | ctrldev = &pdev->dev; | 284 | dev = caam_jr_alloc(); |
298 | priv = dev_get_drvdata(ctrldev); | 285 | if (IS_ERR(dev)) { |
299 | of_node_put(dev_node); | 286 | pr_err("Job Ring Device allocation for transform failed\n"); |
287 | return PTR_ERR(dev); | ||
288 | } | ||
300 | 289 | ||
301 | caam_init_rng(&rng_ctx, priv->jrdev[0]); | 290 | caam_init_rng(&rng_ctx, dev); |
302 | 291 | ||
303 | dev_info(priv->jrdev[0], "registering rng-caam\n"); | 292 | dev_info(dev, "registering rng-caam\n"); |
304 | return hwrng_register(&caam_rng); | 293 | return hwrng_register(&caam_rng); |
305 | } | 294 | } |
306 | 295 | ||
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index bc6d820812b6..63fb1af2c431 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c | |||
@@ -16,82 +16,75 @@ | |||
16 | #include "error.h" | 16 | #include "error.h" |
17 | #include "ctrl.h" | 17 | #include "ctrl.h" |
18 | 18 | ||
19 | static int caam_remove(struct platform_device *pdev) | ||
20 | { | ||
21 | struct device *ctrldev; | ||
22 | struct caam_drv_private *ctrlpriv; | ||
23 | struct caam_drv_private_jr *jrpriv; | ||
24 | struct caam_full __iomem *topregs; | ||
25 | int ring, ret = 0; | ||
26 | |||
27 | ctrldev = &pdev->dev; | ||
28 | ctrlpriv = dev_get_drvdata(ctrldev); | ||
29 | topregs = (struct caam_full __iomem *)ctrlpriv->ctrl; | ||
30 | |||
31 | /* shut down JobRs */ | ||
32 | for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) { | ||
33 | ret |= caam_jr_shutdown(ctrlpriv->jrdev[ring]); | ||
34 | jrpriv = dev_get_drvdata(ctrlpriv->jrdev[ring]); | ||
35 | irq_dispose_mapping(jrpriv->irq); | ||
36 | } | ||
37 | |||
38 | /* Shut down debug views */ | ||
39 | #ifdef CONFIG_DEBUG_FS | ||
40 | debugfs_remove_recursive(ctrlpriv->dfs_root); | ||
41 | #endif | ||
42 | |||
43 | /* Unmap controller region */ | ||
44 | iounmap(&topregs->ctrl); | ||
45 | |||
46 | kfree(ctrlpriv->jrdev); | ||
47 | kfree(ctrlpriv); | ||
48 | |||
49 | return ret; | ||
50 | } | ||
51 | |||
52 | /* | 19 | /* |
53 | * Descriptor to instantiate RNG State Handle 0 in normal mode and | 20 | * Descriptor to instantiate RNG State Handle 0 in normal mode and |
54 | * load the JDKEK, TDKEK and TDSK registers | 21 | * load the JDKEK, TDKEK and TDSK registers |
55 | */ | 22 | */ |
56 | static void build_instantiation_desc(u32 *desc) | 23 | static void build_instantiation_desc(u32 *desc, int handle, int do_sk) |
57 | { | 24 | { |
58 | u32 *jump_cmd; | 25 | u32 *jump_cmd, op_flags; |
59 | 26 | ||
60 | init_job_desc(desc, 0); | 27 | init_job_desc(desc, 0); |
61 | 28 | ||
29 | op_flags = OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG | | ||
30 | (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INIT; | ||
31 | |||
62 | /* INIT RNG in non-test mode */ | 32 | /* INIT RNG in non-test mode */ |
63 | append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG | | 33 | append_operation(desc, op_flags); |
64 | OP_ALG_AS_INIT); | 34 | |
35 | if (!handle && do_sk) { | ||
36 | /* | ||
37 | * For SH0, Secure Keys must be generated as well | ||
38 | */ | ||
39 | |||
40 | /* wait for done */ | ||
41 | jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1); | ||
42 | set_jump_tgt_here(desc, jump_cmd); | ||
43 | |||
44 | /* | ||
45 | * load 1 to clear written reg: | ||
46 | * resets the done interrrupt and returns the RNG to idle. | ||
47 | */ | ||
48 | append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW); | ||
49 | |||
50 | /* Initialize State Handle */ | ||
51 | append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG | | ||
52 | OP_ALG_AAI_RNG4_SK); | ||
53 | } | ||
65 | 54 | ||
66 | /* wait for done */ | 55 | append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT); |
67 | jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1); | 56 | } |
68 | set_jump_tgt_here(desc, jump_cmd); | ||
69 | 57 | ||
70 | /* | 58 | /* Descriptor for deinstantiation of State Handle 0 of the RNG block. */ |
71 | * load 1 to clear written reg: | 59 | static void build_deinstantiation_desc(u32 *desc, int handle) |
72 | * resets the done interrupt and returns the RNG to idle. | 60 | { |
73 | */ | 61 | init_job_desc(desc, 0); |
74 | append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW); | ||
75 | 62 | ||
76 | /* generate secure keys (non-test) */ | 63 | /* Uninstantiate State Handle 0 */ |
77 | append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG | | 64 | append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG | |
78 | OP_ALG_RNG4_SK); | 65 | (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INITFINAL); |
66 | |||
67 | append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT); | ||
79 | } | 68 | } |
80 | 69 | ||
81 | static int instantiate_rng(struct device *ctrldev) | 70 | /* |
71 | * run_descriptor_deco0 - runs a descriptor on DECO0, under direct control of | ||
72 | * the software (no JR/QI used). | ||
73 | * @ctrldev - pointer to device | ||
74 | * @status - descriptor status, after being run | ||
75 | * | ||
76 | * Return: - 0 if no error occurred | ||
77 | * - -ENODEV if the DECO couldn't be acquired | ||
78 | * - -EAGAIN if an error occurred while executing the descriptor | ||
79 | */ | ||
80 | static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc, | ||
81 | u32 *status) | ||
82 | { | 82 | { |
83 | struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); | 83 | struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); |
84 | struct caam_full __iomem *topregs; | 84 | struct caam_full __iomem *topregs; |
85 | unsigned int timeout = 100000; | 85 | unsigned int timeout = 100000; |
86 | u32 *desc; | 86 | u32 deco_dbg_reg, flags; |
87 | int i, ret = 0; | 87 | int i; |
88 | |||
89 | desc = kmalloc(CAAM_CMD_SZ * 6, GFP_KERNEL | GFP_DMA); | ||
90 | if (!desc) { | ||
91 | dev_err(ctrldev, "can't allocate RNG init descriptor memory\n"); | ||
92 | return -ENOMEM; | ||
93 | } | ||
94 | build_instantiation_desc(desc); | ||
95 | 88 | ||
96 | /* Set the bit to request direct access to DECO0 */ | 89 | /* Set the bit to request direct access to DECO0 */ |
97 | topregs = (struct caam_full __iomem *)ctrlpriv->ctrl; | 90 | topregs = (struct caam_full __iomem *)ctrlpriv->ctrl; |
@@ -103,36 +96,219 @@ static int instantiate_rng(struct device *ctrldev) | |||
103 | 96 | ||
104 | if (!timeout) { | 97 | if (!timeout) { |
105 | dev_err(ctrldev, "failed to acquire DECO 0\n"); | 98 | dev_err(ctrldev, "failed to acquire DECO 0\n"); |
106 | ret = -EIO; | 99 | clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE); |
107 | goto out; | 100 | return -ENODEV; |
108 | } | 101 | } |
109 | 102 | ||
110 | for (i = 0; i < desc_len(desc); i++) | 103 | for (i = 0; i < desc_len(desc); i++) |
111 | topregs->deco.descbuf[i] = *(desc + i); | 104 | wr_reg32(&topregs->deco.descbuf[i], *(desc + i)); |
105 | |||
106 | flags = DECO_JQCR_WHL; | ||
107 | /* | ||
108 | * If the descriptor length is longer than 4 words, then the | ||
109 | * FOUR bit in JRCTRL register must be set. | ||
110 | */ | ||
111 | if (desc_len(desc) >= 4) | ||
112 | flags |= DECO_JQCR_FOUR; | ||
112 | 113 | ||
113 | wr_reg32(&topregs->deco.jr_ctl_hi, DECO_JQCR_WHL | DECO_JQCR_FOUR); | 114 | /* Instruct the DECO to execute it */ |
115 | wr_reg32(&topregs->deco.jr_ctl_hi, flags); | ||
114 | 116 | ||
115 | timeout = 10000000; | 117 | timeout = 10000000; |
116 | while ((rd_reg32(&topregs->deco.desc_dbg) & DECO_DBG_VALID) && | 118 | do { |
117 | --timeout) | 119 | deco_dbg_reg = rd_reg32(&topregs->deco.desc_dbg); |
120 | /* | ||
121 | * If an error occured in the descriptor, then | ||
122 | * the DECO status field will be set to 0x0D | ||
123 | */ | ||
124 | if ((deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) == | ||
125 | DESC_DBG_DECO_STAT_HOST_ERR) | ||
126 | break; | ||
118 | cpu_relax(); | 127 | cpu_relax(); |
128 | } while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout); | ||
119 | 129 | ||
120 | if (!timeout) { | 130 | *status = rd_reg32(&topregs->deco.op_status_hi) & |
121 | dev_err(ctrldev, "failed to instantiate RNG\n"); | 131 | DECO_OP_STATUS_HI_ERR_MASK; |
122 | ret = -EIO; | ||
123 | } | ||
124 | 132 | ||
133 | /* Mark the DECO as free */ | ||
125 | clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE); | 134 | clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE); |
126 | out: | 135 | |
136 | if (!timeout) | ||
137 | return -EAGAIN; | ||
138 | |||
139 | return 0; | ||
140 | } | ||
141 | |||
142 | /* | ||
143 | * instantiate_rng - builds and executes a descriptor on DECO0, | ||
144 | * which initializes the RNG block. | ||
145 | * @ctrldev - pointer to device | ||
146 | * @state_handle_mask - bitmask containing the instantiation status | ||
147 | * for the RNG4 state handles which exist in | ||
148 | * the RNG4 block: 1 if it's been instantiated | ||
149 | * by an external entry, 0 otherwise. | ||
150 | * @gen_sk - generate data to be loaded into the JDKEK, TDKEK and TDSK; | ||
151 | * Caution: this can be done only once; if the keys need to be | ||
152 | * regenerated, a POR is required | ||
153 | * | ||
154 | * Return: - 0 if no error occurred | ||
155 | * - -ENOMEM if there isn't enough memory to allocate the descriptor | ||
156 | * - -ENODEV if DECO0 couldn't be acquired | ||
157 | * - -EAGAIN if an error occurred when executing the descriptor | ||
158 | * f.i. there was a RNG hardware error due to not "good enough" | ||
159 | * entropy being aquired. | ||
160 | */ | ||
161 | static int instantiate_rng(struct device *ctrldev, int state_handle_mask, | ||
162 | int gen_sk) | ||
163 | { | ||
164 | struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); | ||
165 | struct caam_full __iomem *topregs; | ||
166 | struct rng4tst __iomem *r4tst; | ||
167 | u32 *desc, status, rdsta_val; | ||
168 | int ret = 0, sh_idx; | ||
169 | |||
170 | topregs = (struct caam_full __iomem *)ctrlpriv->ctrl; | ||
171 | r4tst = &topregs->ctrl.r4tst[0]; | ||
172 | |||
173 | desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL); | ||
174 | if (!desc) | ||
175 | return -ENOMEM; | ||
176 | |||
177 | for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) { | ||
178 | /* | ||
179 | * If the corresponding bit is set, this state handle | ||
180 | * was initialized by somebody else, so it's left alone. | ||
181 | */ | ||
182 | if ((1 << sh_idx) & state_handle_mask) | ||
183 | continue; | ||
184 | |||
185 | /* Create the descriptor for instantiating RNG State Handle */ | ||
186 | build_instantiation_desc(desc, sh_idx, gen_sk); | ||
187 | |||
188 | /* Try to run it through DECO0 */ | ||
189 | ret = run_descriptor_deco0(ctrldev, desc, &status); | ||
190 | |||
191 | /* | ||
192 | * If ret is not 0, or descriptor status is not 0, then | ||
193 | * something went wrong. No need to try the next state | ||
194 | * handle (if available), bail out here. | ||
195 | * Also, if for some reason, the State Handle didn't get | ||
196 | * instantiated although the descriptor has finished | ||
197 | * without any error (HW optimizations for later | ||
198 | * CAAM eras), then try again. | ||
199 | */ | ||
200 | rdsta_val = | ||
201 | rd_reg32(&topregs->ctrl.r4tst[0].rdsta) & RDSTA_IFMASK; | ||
202 | if (status || !(rdsta_val & (1 << sh_idx))) | ||
203 | ret = -EAGAIN; | ||
204 | if (ret) | ||
205 | break; | ||
206 | |||
207 | dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx); | ||
208 | /* Clear the contents before recreating the descriptor */ | ||
209 | memset(desc, 0x00, CAAM_CMD_SZ * 7); | ||
210 | } | ||
211 | |||
127 | kfree(desc); | 212 | kfree(desc); |
213 | |||
128 | return ret; | 214 | return ret; |
129 | } | 215 | } |
130 | 216 | ||
131 | /* | 217 | /* |
132 | * By default, the TRNG runs for 200 clocks per sample; | 218 | * deinstantiate_rng - builds and executes a descriptor on DECO0, |
133 | * 1600 clocks per sample generates better entropy. | 219 | * which deinitializes the RNG block. |
220 | * @ctrldev - pointer to device | ||
221 | * @state_handle_mask - bitmask containing the instantiation status | ||
222 | * for the RNG4 state handles which exist in | ||
223 | * the RNG4 block: 1 if it's been instantiated | ||
224 | * | ||
225 | * Return: - 0 if no error occurred | ||
226 | * - -ENOMEM if there isn't enough memory to allocate the descriptor | ||
227 | * - -ENODEV if DECO0 couldn't be acquired | ||
228 | * - -EAGAIN if an error occurred when executing the descriptor | ||
134 | */ | 229 | */ |
135 | static void kick_trng(struct platform_device *pdev) | 230 | static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask) |
231 | { | ||
232 | u32 *desc, status; | ||
233 | int sh_idx, ret = 0; | ||
234 | |||
235 | desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL); | ||
236 | if (!desc) | ||
237 | return -ENOMEM; | ||
238 | |||
239 | for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) { | ||
240 | /* | ||
241 | * If the corresponding bit is set, then it means the state | ||
242 | * handle was initialized by us, and thus it needs to be | ||
243 | * deintialized as well | ||
244 | */ | ||
245 | if ((1 << sh_idx) & state_handle_mask) { | ||
246 | /* | ||
247 | * Create the descriptor for deinstantating this state | ||
248 | * handle | ||
249 | */ | ||
250 | build_deinstantiation_desc(desc, sh_idx); | ||
251 | |||
252 | /* Try to run it through DECO0 */ | ||
253 | ret = run_descriptor_deco0(ctrldev, desc, &status); | ||
254 | |||
255 | if (ret || status) { | ||
256 | dev_err(ctrldev, | ||
257 | "Failed to deinstantiate RNG4 SH%d\n", | ||
258 | sh_idx); | ||
259 | break; | ||
260 | } | ||
261 | dev_info(ctrldev, "Deinstantiated RNG4 SH%d\n", sh_idx); | ||
262 | } | ||
263 | } | ||
264 | |||
265 | kfree(desc); | ||
266 | |||
267 | return ret; | ||
268 | } | ||
269 | |||
270 | static int caam_remove(struct platform_device *pdev) | ||
271 | { | ||
272 | struct device *ctrldev; | ||
273 | struct caam_drv_private *ctrlpriv; | ||
274 | struct caam_full __iomem *topregs; | ||
275 | int ring, ret = 0; | ||
276 | |||
277 | ctrldev = &pdev->dev; | ||
278 | ctrlpriv = dev_get_drvdata(ctrldev); | ||
279 | topregs = (struct caam_full __iomem *)ctrlpriv->ctrl; | ||
280 | |||
281 | /* Remove platform devices for JobRs */ | ||
282 | for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) { | ||
283 | if (ctrlpriv->jrpdev[ring]) | ||
284 | of_device_unregister(ctrlpriv->jrpdev[ring]); | ||
285 | } | ||
286 | |||
287 | /* De-initialize RNG state handles initialized by this driver. */ | ||
288 | if (ctrlpriv->rng4_sh_init) | ||
289 | deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init); | ||
290 | |||
291 | /* Shut down debug views */ | ||
292 | #ifdef CONFIG_DEBUG_FS | ||
293 | debugfs_remove_recursive(ctrlpriv->dfs_root); | ||
294 | #endif | ||
295 | |||
296 | /* Unmap controller region */ | ||
297 | iounmap(&topregs->ctrl); | ||
298 | |||
299 | kfree(ctrlpriv->jrpdev); | ||
300 | kfree(ctrlpriv); | ||
301 | |||
302 | return ret; | ||
303 | } | ||
304 | |||
305 | /* | ||
306 | * kick_trng - sets the various parameters for enabling the initialization | ||
307 | * of the RNG4 block in CAAM | ||
308 | * @pdev - pointer to the platform device | ||
309 | * @ent_delay - Defines the length (in system clocks) of each entropy sample. | ||
310 | */ | ||
311 | static void kick_trng(struct platform_device *pdev, int ent_delay) | ||
136 | { | 312 | { |
137 | struct device *ctrldev = &pdev->dev; | 313 | struct device *ctrldev = &pdev->dev; |
138 | struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); | 314 | struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); |
@@ -145,14 +321,31 @@ static void kick_trng(struct platform_device *pdev) | |||
145 | 321 | ||
146 | /* put RNG4 into program mode */ | 322 | /* put RNG4 into program mode */ |
147 | setbits32(&r4tst->rtmctl, RTMCTL_PRGM); | 323 | setbits32(&r4tst->rtmctl, RTMCTL_PRGM); |
148 | /* 1600 clocks per sample */ | 324 | |
325 | /* | ||
326 | * Performance-wise, it does not make sense to | ||
327 | * set the delay to a value that is lower | ||
328 | * than the last one that worked (i.e. the state handles | ||
329 | * were instantiated properly. Thus, instead of wasting | ||
330 | * time trying to set the values controlling the sample | ||
331 | * frequency, the function simply returns. | ||
332 | */ | ||
333 | val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK) | ||
334 | >> RTSDCTL_ENT_DLY_SHIFT; | ||
335 | if (ent_delay <= val) { | ||
336 | /* put RNG4 into run mode */ | ||
337 | clrbits32(&r4tst->rtmctl, RTMCTL_PRGM); | ||
338 | return; | ||
339 | } | ||
340 | |||
149 | val = rd_reg32(&r4tst->rtsdctl); | 341 | val = rd_reg32(&r4tst->rtsdctl); |
150 | val = (val & ~RTSDCTL_ENT_DLY_MASK) | (1600 << RTSDCTL_ENT_DLY_SHIFT); | 342 | val = (val & ~RTSDCTL_ENT_DLY_MASK) | |
343 | (ent_delay << RTSDCTL_ENT_DLY_SHIFT); | ||
151 | wr_reg32(&r4tst->rtsdctl, val); | 344 | wr_reg32(&r4tst->rtsdctl, val); |
152 | /* min. freq. count */ | 345 | /* min. freq. count, equal to 1/4 of the entropy sample length */ |
153 | wr_reg32(&r4tst->rtfrqmin, 400); | 346 | wr_reg32(&r4tst->rtfrqmin, ent_delay >> 2); |
154 | /* max. freq. count */ | 347 | /* max. freq. count, equal to 8 times the entropy sample length */ |
155 | wr_reg32(&r4tst->rtfrqmax, 6400); | 348 | wr_reg32(&r4tst->rtfrqmax, ent_delay << 3); |
156 | /* put RNG4 into run mode */ | 349 | /* put RNG4 into run mode */ |
157 | clrbits32(&r4tst->rtmctl, RTMCTL_PRGM); | 350 | clrbits32(&r4tst->rtmctl, RTMCTL_PRGM); |
158 | } | 351 | } |
@@ -193,7 +386,7 @@ EXPORT_SYMBOL(caam_get_era); | |||
193 | /* Probe routine for CAAM top (controller) level */ | 386 | /* Probe routine for CAAM top (controller) level */ |
194 | static int caam_probe(struct platform_device *pdev) | 387 | static int caam_probe(struct platform_device *pdev) |
195 | { | 388 | { |
196 | int ret, ring, rspec; | 389 | int ret, ring, rspec, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN; |
197 | u64 caam_id; | 390 | u64 caam_id; |
198 | struct device *dev; | 391 | struct device *dev; |
199 | struct device_node *nprop, *np; | 392 | struct device_node *nprop, *np; |
@@ -258,8 +451,9 @@ static int caam_probe(struct platform_device *pdev) | |||
258 | rspec++; | 451 | rspec++; |
259 | } | 452 | } |
260 | 453 | ||
261 | ctrlpriv->jrdev = kzalloc(sizeof(struct device *) * rspec, GFP_KERNEL); | 454 | ctrlpriv->jrpdev = kzalloc(sizeof(struct platform_device *) * rspec, |
262 | if (ctrlpriv->jrdev == NULL) { | 455 | GFP_KERNEL); |
456 | if (ctrlpriv->jrpdev == NULL) { | ||
263 | iounmap(&topregs->ctrl); | 457 | iounmap(&topregs->ctrl); |
264 | return -ENOMEM; | 458 | return -ENOMEM; |
265 | } | 459 | } |
@@ -267,13 +461,24 @@ static int caam_probe(struct platform_device *pdev) | |||
267 | ring = 0; | 461 | ring = 0; |
268 | ctrlpriv->total_jobrs = 0; | 462 | ctrlpriv->total_jobrs = 0; |
269 | for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring") { | 463 | for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring") { |
270 | caam_jr_probe(pdev, np, ring); | 464 | ctrlpriv->jrpdev[ring] = |
465 | of_platform_device_create(np, NULL, dev); | ||
466 | if (!ctrlpriv->jrpdev[ring]) { | ||
467 | pr_warn("JR%d Platform device creation error\n", ring); | ||
468 | continue; | ||
469 | } | ||
271 | ctrlpriv->total_jobrs++; | 470 | ctrlpriv->total_jobrs++; |
272 | ring++; | 471 | ring++; |
273 | } | 472 | } |
274 | if (!ring) { | 473 | if (!ring) { |
275 | for_each_compatible_node(np, NULL, "fsl,sec4.0-job-ring") { | 474 | for_each_compatible_node(np, NULL, "fsl,sec4.0-job-ring") { |
276 | caam_jr_probe(pdev, np, ring); | 475 | ctrlpriv->jrpdev[ring] = |
476 | of_platform_device_create(np, NULL, dev); | ||
477 | if (!ctrlpriv->jrpdev[ring]) { | ||
478 | pr_warn("JR%d Platform device creation error\n", | ||
479 | ring); | ||
480 | continue; | ||
481 | } | ||
277 | ctrlpriv->total_jobrs++; | 482 | ctrlpriv->total_jobrs++; |
278 | ring++; | 483 | ring++; |
279 | } | 484 | } |
@@ -299,16 +504,55 @@ static int caam_probe(struct platform_device *pdev) | |||
299 | 504 | ||
300 | /* | 505 | /* |
301 | * If SEC has RNG version >= 4 and RNG state handle has not been | 506 | * If SEC has RNG version >= 4 and RNG state handle has not been |
302 | * already instantiated ,do RNG instantiation | 507 | * already instantiated, do RNG instantiation |
303 | */ | 508 | */ |
304 | if ((cha_vid & CHA_ID_RNG_MASK) >> CHA_ID_RNG_SHIFT >= 4 && | 509 | if ((cha_vid & CHA_ID_RNG_MASK) >> CHA_ID_RNG_SHIFT >= 4) { |
305 | !(rd_reg32(&topregs->ctrl.r4tst[0].rdsta) & RDSTA_IF0)) { | 510 | ctrlpriv->rng4_sh_init = |
306 | kick_trng(pdev); | 511 | rd_reg32(&topregs->ctrl.r4tst[0].rdsta); |
307 | ret = instantiate_rng(dev); | 512 | /* |
513 | * If the secure keys (TDKEK, JDKEK, TDSK), were already | ||
514 | * generated, signal this to the function that is instantiating | ||
515 | * the state handles. An error would occur if RNG4 attempts | ||
516 | * to regenerate these keys before the next POR. | ||
517 | */ | ||
518 | gen_sk = ctrlpriv->rng4_sh_init & RDSTA_SKVN ? 0 : 1; | ||
519 | ctrlpriv->rng4_sh_init &= RDSTA_IFMASK; | ||
520 | do { | ||
521 | int inst_handles = | ||
522 | rd_reg32(&topregs->ctrl.r4tst[0].rdsta) & | ||
523 | RDSTA_IFMASK; | ||
524 | /* | ||
525 | * If either SH were instantiated by somebody else | ||
526 | * (e.g. u-boot) then it is assumed that the entropy | ||
527 | * parameters are properly set and thus the function | ||
528 | * setting these (kick_trng(...)) is skipped. | ||
529 | * Also, if a handle was instantiated, do not change | ||
530 | * the TRNG parameters. | ||
531 | */ | ||
532 | if (!(ctrlpriv->rng4_sh_init || inst_handles)) { | ||
533 | kick_trng(pdev, ent_delay); | ||
534 | ent_delay += 400; | ||
535 | } | ||
536 | /* | ||
537 | * if instantiate_rng(...) fails, the loop will rerun | ||
538 | * and the kick_trng(...) function will modfiy the | ||
539 | * upper and lower limits of the entropy sampling | ||
540 | * interval, leading to a sucessful initialization of | ||
541 | * the RNG. | ||
542 | */ | ||
543 | ret = instantiate_rng(dev, inst_handles, | ||
544 | gen_sk); | ||
545 | } while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX)); | ||
308 | if (ret) { | 546 | if (ret) { |
547 | dev_err(dev, "failed to instantiate RNG"); | ||
309 | caam_remove(pdev); | 548 | caam_remove(pdev); |
310 | return ret; | 549 | return ret; |
311 | } | 550 | } |
551 | /* | ||
552 | * Set handles init'ed by this module as the complement of the | ||
553 | * already initialized ones | ||
554 | */ | ||
555 | ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_IFMASK; | ||
312 | 556 | ||
313 | /* Enable RDB bit so that RNG works faster */ | 557 | /* Enable RDB bit so that RNG works faster */ |
314 | setbits32(&topregs->ctrl.scfgr, SCFGR_RDBENABLE); | 558 | setbits32(&topregs->ctrl.scfgr, SCFGR_RDBENABLE); |
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h index 53b296f78b0d..7e4500f18df6 100644 --- a/drivers/crypto/caam/desc.h +++ b/drivers/crypto/caam/desc.h | |||
@@ -1155,8 +1155,15 @@ struct sec4_sg_entry { | |||
1155 | 1155 | ||
1156 | /* randomizer AAI set */ | 1156 | /* randomizer AAI set */ |
1157 | #define OP_ALG_AAI_RNG (0x00 << OP_ALG_AAI_SHIFT) | 1157 | #define OP_ALG_AAI_RNG (0x00 << OP_ALG_AAI_SHIFT) |
1158 | #define OP_ALG_AAI_RNG_NOZERO (0x10 << OP_ALG_AAI_SHIFT) | 1158 | #define OP_ALG_AAI_RNG_NZB (0x10 << OP_ALG_AAI_SHIFT) |
1159 | #define OP_ALG_AAI_RNG_ODD (0x20 << OP_ALG_AAI_SHIFT) | 1159 | #define OP_ALG_AAI_RNG_OBP (0x20 << OP_ALG_AAI_SHIFT) |
1160 | |||
1161 | /* RNG4 AAI set */ | ||
1162 | #define OP_ALG_AAI_RNG4_SH_0 (0x00 << OP_ALG_AAI_SHIFT) | ||
1163 | #define OP_ALG_AAI_RNG4_SH_1 (0x01 << OP_ALG_AAI_SHIFT) | ||
1164 | #define OP_ALG_AAI_RNG4_PS (0x40 << OP_ALG_AAI_SHIFT) | ||
1165 | #define OP_ALG_AAI_RNG4_AI (0x80 << OP_ALG_AAI_SHIFT) | ||
1166 | #define OP_ALG_AAI_RNG4_SK (0x100 << OP_ALG_AAI_SHIFT) | ||
1160 | 1167 | ||
1161 | /* hmac/smac AAI set */ | 1168 | /* hmac/smac AAI set */ |
1162 | #define OP_ALG_AAI_HASH (0x00 << OP_ALG_AAI_SHIFT) | 1169 | #define OP_ALG_AAI_HASH (0x00 << OP_ALG_AAI_SHIFT) |
@@ -1178,12 +1185,6 @@ struct sec4_sg_entry { | |||
1178 | #define OP_ALG_AAI_GSM (0x10 << OP_ALG_AAI_SHIFT) | 1185 | #define OP_ALG_AAI_GSM (0x10 << OP_ALG_AAI_SHIFT) |
1179 | #define OP_ALG_AAI_EDGE (0x20 << OP_ALG_AAI_SHIFT) | 1186 | #define OP_ALG_AAI_EDGE (0x20 << OP_ALG_AAI_SHIFT) |
1180 | 1187 | ||
1181 | /* RNG4 set */ | ||
1182 | #define OP_ALG_RNG4_SHIFT 4 | ||
1183 | #define OP_ALG_RNG4_MASK (0x1f3 << OP_ALG_RNG4_SHIFT) | ||
1184 | |||
1185 | #define OP_ALG_RNG4_SK (0x100 << OP_ALG_RNG4_SHIFT) | ||
1186 | |||
1187 | #define OP_ALG_AS_SHIFT 2 | 1188 | #define OP_ALG_AS_SHIFT 2 |
1188 | #define OP_ALG_AS_MASK (0x3 << OP_ALG_AS_SHIFT) | 1189 | #define OP_ALG_AS_MASK (0x3 << OP_ALG_AS_SHIFT) |
1189 | #define OP_ALG_AS_UPDATE (0 << OP_ALG_AS_SHIFT) | 1190 | #define OP_ALG_AS_UPDATE (0 << OP_ALG_AS_SHIFT) |
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h index 34c4b9f7fbfa..6d85fcc5bd0a 100644 --- a/drivers/crypto/caam/intern.h +++ b/drivers/crypto/caam/intern.h | |||
@@ -37,13 +37,16 @@ struct caam_jrentry_info { | |||
37 | 37 | ||
38 | /* Private sub-storage for a single JobR */ | 38 | /* Private sub-storage for a single JobR */ |
39 | struct caam_drv_private_jr { | 39 | struct caam_drv_private_jr { |
40 | struct device *parentdev; /* points back to controller dev */ | 40 | struct list_head list_node; /* Job Ring device list */ |
41 | struct platform_device *jr_pdev;/* points to platform device for JR */ | 41 | struct device *dev; |
42 | int ridx; | 42 | int ridx; |
43 | struct caam_job_ring __iomem *rregs; /* JobR's register space */ | 43 | struct caam_job_ring __iomem *rregs; /* JobR's register space */ |
44 | struct tasklet_struct irqtask; | 44 | struct tasklet_struct irqtask; |
45 | int irq; /* One per queue */ | 45 | int irq; /* One per queue */ |
46 | 46 | ||
47 | /* Number of scatterlist crypt transforms active on the JobR */ | ||
48 | atomic_t tfm_count ____cacheline_aligned; | ||
49 | |||
47 | /* Job ring info */ | 50 | /* Job ring info */ |
48 | int ringsize; /* Size of rings (assume input = output) */ | 51 | int ringsize; /* Size of rings (assume input = output) */ |
49 | struct caam_jrentry_info *entinfo; /* Alloc'ed 1 per ring entry */ | 52 | struct caam_jrentry_info *entinfo; /* Alloc'ed 1 per ring entry */ |
@@ -63,7 +66,7 @@ struct caam_drv_private_jr { | |||
63 | struct caam_drv_private { | 66 | struct caam_drv_private { |
64 | 67 | ||
65 | struct device *dev; | 68 | struct device *dev; |
66 | struct device **jrdev; /* Alloc'ed array per sub-device */ | 69 | struct platform_device **jrpdev; /* Alloc'ed array per sub-device */ |
67 | struct platform_device *pdev; | 70 | struct platform_device *pdev; |
68 | 71 | ||
69 | /* Physical-presence section */ | 72 | /* Physical-presence section */ |
@@ -80,12 +83,11 @@ struct caam_drv_private { | |||
80 | u8 qi_present; /* Nonzero if QI present in device */ | 83 | u8 qi_present; /* Nonzero if QI present in device */ |
81 | int secvio_irq; /* Security violation interrupt number */ | 84 | int secvio_irq; /* Security violation interrupt number */ |
82 | 85 | ||
83 | /* which jr allocated to scatterlist crypto */ | 86 | #define RNG4_MAX_HANDLES 2 |
84 | atomic_t tfm_count ____cacheline_aligned; | 87 | /* RNG4 block */ |
85 | /* list of registered crypto algorithms (mk generic context handle?) */ | 88 | u32 rng4_sh_init; /* This bitmap shows which of the State |
86 | struct list_head alg_list; | 89 | Handles of the RNG4 block are initialized |
87 | /* list of registered hash algorithms (mk generic context handle?) */ | 90 | by this driver */ |
88 | struct list_head hash_list; | ||
89 | 91 | ||
90 | /* | 92 | /* |
91 | * debugfs entries for developer view into driver/device | 93 | * debugfs entries for developer view into driver/device |
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index bdb786d5a5e5..1d80bd3636c5 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c | |||
@@ -6,6 +6,7 @@ | |||
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include <linux/of_irq.h> | 8 | #include <linux/of_irq.h> |
9 | #include <linux/of_address.h> | ||
9 | 10 | ||
10 | #include "compat.h" | 11 | #include "compat.h" |
11 | #include "regs.h" | 12 | #include "regs.h" |
@@ -13,6 +14,113 @@ | |||
13 | #include "desc.h" | 14 | #include "desc.h" |
14 | #include "intern.h" | 15 | #include "intern.h" |
15 | 16 | ||
17 | struct jr_driver_data { | ||
18 | /* List of Physical JobR's with the Driver */ | ||
19 | struct list_head jr_list; | ||
20 | spinlock_t jr_alloc_lock; /* jr_list lock */ | ||
21 | } ____cacheline_aligned; | ||
22 | |||
23 | static struct jr_driver_data driver_data; | ||
24 | |||
25 | static int caam_reset_hw_jr(struct device *dev) | ||
26 | { | ||
27 | struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); | ||
28 | unsigned int timeout = 100000; | ||
29 | |||
30 | /* | ||
31 | * mask interrupts since we are going to poll | ||
32 | * for reset completion status | ||
33 | */ | ||
34 | setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK); | ||
35 | |||
36 | /* initiate flush (required prior to reset) */ | ||
37 | wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET); | ||
38 | while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) == | ||
39 | JRINT_ERR_HALT_INPROGRESS) && --timeout) | ||
40 | cpu_relax(); | ||
41 | |||
42 | if ((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) != | ||
43 | JRINT_ERR_HALT_COMPLETE || timeout == 0) { | ||
44 | dev_err(dev, "failed to flush job ring %d\n", jrp->ridx); | ||
45 | return -EIO; | ||
46 | } | ||
47 | |||
48 | /* initiate reset */ | ||
49 | timeout = 100000; | ||
50 | wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET); | ||
51 | while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout) | ||
52 | cpu_relax(); | ||
53 | |||
54 | if (timeout == 0) { | ||
55 | dev_err(dev, "failed to reset job ring %d\n", jrp->ridx); | ||
56 | return -EIO; | ||
57 | } | ||
58 | |||
59 | /* unmask interrupts */ | ||
60 | clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK); | ||
61 | |||
62 | return 0; | ||
63 | } | ||
64 | |||
65 | /* | ||
66 | * Shutdown JobR independent of platform property code | ||
67 | */ | ||
68 | int caam_jr_shutdown(struct device *dev) | ||
69 | { | ||
70 | struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); | ||
71 | dma_addr_t inpbusaddr, outbusaddr; | ||
72 | int ret; | ||
73 | |||
74 | ret = caam_reset_hw_jr(dev); | ||
75 | |||
76 | tasklet_kill(&jrp->irqtask); | ||
77 | |||
78 | /* Release interrupt */ | ||
79 | free_irq(jrp->irq, dev); | ||
80 | |||
81 | /* Free rings */ | ||
82 | inpbusaddr = rd_reg64(&jrp->rregs->inpring_base); | ||
83 | outbusaddr = rd_reg64(&jrp->rregs->outring_base); | ||
84 | dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH, | ||
85 | jrp->inpring, inpbusaddr); | ||
86 | dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH, | ||
87 | jrp->outring, outbusaddr); | ||
88 | kfree(jrp->entinfo); | ||
89 | |||
90 | return ret; | ||
91 | } | ||
92 | |||
93 | static int caam_jr_remove(struct platform_device *pdev) | ||
94 | { | ||
95 | int ret; | ||
96 | struct device *jrdev; | ||
97 | struct caam_drv_private_jr *jrpriv; | ||
98 | |||
99 | jrdev = &pdev->dev; | ||
100 | jrpriv = dev_get_drvdata(jrdev); | ||
101 | |||
102 | /* | ||
103 | * Return EBUSY if job ring already allocated. | ||
104 | */ | ||
105 | if (atomic_read(&jrpriv->tfm_count)) { | ||
106 | dev_err(jrdev, "Device is busy\n"); | ||
107 | return -EBUSY; | ||
108 | } | ||
109 | |||
110 | /* Remove the node from Physical JobR list maintained by driver */ | ||
111 | spin_lock(&driver_data.jr_alloc_lock); | ||
112 | list_del(&jrpriv->list_node); | ||
113 | spin_unlock(&driver_data.jr_alloc_lock); | ||
114 | |||
115 | /* Release ring */ | ||
116 | ret = caam_jr_shutdown(jrdev); | ||
117 | if (ret) | ||
118 | dev_err(jrdev, "Failed to shut down job ring\n"); | ||
119 | irq_dispose_mapping(jrpriv->irq); | ||
120 | |||
121 | return ret; | ||
122 | } | ||
123 | |||
16 | /* Main per-ring interrupt handler */ | 124 | /* Main per-ring interrupt handler */ |
17 | static irqreturn_t caam_jr_interrupt(int irq, void *st_dev) | 125 | static irqreturn_t caam_jr_interrupt(int irq, void *st_dev) |
18 | { | 126 | { |
@@ -128,6 +236,59 @@ static void caam_jr_dequeue(unsigned long devarg) | |||
128 | } | 236 | } |
129 | 237 | ||
130 | /** | 238 | /** |
239 | * caam_jr_alloc() - Alloc a job ring for someone to use as needed. | ||
240 | * | ||
241 | * returns : pointer to the newly allocated physical | ||
242 | * JobR dev can be written to if successful. | ||
243 | **/ | ||
244 | struct device *caam_jr_alloc(void) | ||
245 | { | ||
246 | struct caam_drv_private_jr *jrpriv, *min_jrpriv = NULL; | ||
247 | struct device *dev = NULL; | ||
248 | int min_tfm_cnt = INT_MAX; | ||
249 | int tfm_cnt; | ||
250 | |||
251 | spin_lock(&driver_data.jr_alloc_lock); | ||
252 | |||
253 | if (list_empty(&driver_data.jr_list)) { | ||
254 | spin_unlock(&driver_data.jr_alloc_lock); | ||
255 | return ERR_PTR(-ENODEV); | ||
256 | } | ||
257 | |||
258 | list_for_each_entry(jrpriv, &driver_data.jr_list, list_node) { | ||
259 | tfm_cnt = atomic_read(&jrpriv->tfm_count); | ||
260 | if (tfm_cnt < min_tfm_cnt) { | ||
261 | min_tfm_cnt = tfm_cnt; | ||
262 | min_jrpriv = jrpriv; | ||
263 | } | ||
264 | if (!min_tfm_cnt) | ||
265 | break; | ||
266 | } | ||
267 | |||
268 | if (min_jrpriv) { | ||
269 | atomic_inc(&min_jrpriv->tfm_count); | ||
270 | dev = min_jrpriv->dev; | ||
271 | } | ||
272 | spin_unlock(&driver_data.jr_alloc_lock); | ||
273 | |||
274 | return dev; | ||
275 | } | ||
276 | EXPORT_SYMBOL(caam_jr_alloc); | ||
277 | |||
278 | /** | ||
279 | * caam_jr_free() - Free the Job Ring | ||
280 | * @rdev - points to the dev that identifies the Job ring to | ||
281 | * be released. | ||
282 | **/ | ||
283 | void caam_jr_free(struct device *rdev) | ||
284 | { | ||
285 | struct caam_drv_private_jr *jrpriv = dev_get_drvdata(rdev); | ||
286 | |||
287 | atomic_dec(&jrpriv->tfm_count); | ||
288 | } | ||
289 | EXPORT_SYMBOL(caam_jr_free); | ||
290 | |||
291 | /** | ||
131 | * caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK, | 292 | * caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK, |
132 | * -EBUSY if the queue is full, -EIO if it cannot map the caller's | 293 | * -EBUSY if the queue is full, -EIO if it cannot map the caller's |
133 | * descriptor. | 294 | * descriptor. |
@@ -207,46 +368,6 @@ int caam_jr_enqueue(struct device *dev, u32 *desc, | |||
207 | } | 368 | } |
208 | EXPORT_SYMBOL(caam_jr_enqueue); | 369 | EXPORT_SYMBOL(caam_jr_enqueue); |
209 | 370 | ||
210 | static int caam_reset_hw_jr(struct device *dev) | ||
211 | { | ||
212 | struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); | ||
213 | unsigned int timeout = 100000; | ||
214 | |||
215 | /* | ||
216 | * mask interrupts since we are going to poll | ||
217 | * for reset completion status | ||
218 | */ | ||
219 | setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK); | ||
220 | |||
221 | /* initiate flush (required prior to reset) */ | ||
222 | wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET); | ||
223 | while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) == | ||
224 | JRINT_ERR_HALT_INPROGRESS) && --timeout) | ||
225 | cpu_relax(); | ||
226 | |||
227 | if ((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) != | ||
228 | JRINT_ERR_HALT_COMPLETE || timeout == 0) { | ||
229 | dev_err(dev, "failed to flush job ring %d\n", jrp->ridx); | ||
230 | return -EIO; | ||
231 | } | ||
232 | |||
233 | /* initiate reset */ | ||
234 | timeout = 100000; | ||
235 | wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET); | ||
236 | while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout) | ||
237 | cpu_relax(); | ||
238 | |||
239 | if (timeout == 0) { | ||
240 | dev_err(dev, "failed to reset job ring %d\n", jrp->ridx); | ||
241 | return -EIO; | ||
242 | } | ||
243 | |||
244 | /* unmask interrupts */ | ||
245 | clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK); | ||
246 | |||
247 | return 0; | ||
248 | } | ||
249 | |||
250 | /* | 371 | /* |
251 | * Init JobR independent of platform property detection | 372 | * Init JobR independent of platform property detection |
252 | */ | 373 | */ |
@@ -262,7 +383,7 @@ static int caam_jr_init(struct device *dev) | |||
262 | 383 | ||
263 | /* Connect job ring interrupt handler. */ | 384 | /* Connect job ring interrupt handler. */ |
264 | error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED, | 385 | error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED, |
265 | "caam-jobr", dev); | 386 | dev_name(dev), dev); |
266 | if (error) { | 387 | if (error) { |
267 | dev_err(dev, "can't connect JobR %d interrupt (%d)\n", | 388 | dev_err(dev, "can't connect JobR %d interrupt (%d)\n", |
268 | jrp->ridx, jrp->irq); | 389 | jrp->ridx, jrp->irq); |
@@ -318,86 +439,43 @@ static int caam_jr_init(struct device *dev) | |||
318 | return 0; | 439 | return 0; |
319 | } | 440 | } |
320 | 441 | ||
321 | /* | ||
322 | * Shutdown JobR independent of platform property code | ||
323 | */ | ||
324 | int caam_jr_shutdown(struct device *dev) | ||
325 | { | ||
326 | struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); | ||
327 | dma_addr_t inpbusaddr, outbusaddr; | ||
328 | int ret; | ||
329 | |||
330 | ret = caam_reset_hw_jr(dev); | ||
331 | |||
332 | tasklet_kill(&jrp->irqtask); | ||
333 | |||
334 | /* Release interrupt */ | ||
335 | free_irq(jrp->irq, dev); | ||
336 | |||
337 | /* Free rings */ | ||
338 | inpbusaddr = rd_reg64(&jrp->rregs->inpring_base); | ||
339 | outbusaddr = rd_reg64(&jrp->rregs->outring_base); | ||
340 | dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH, | ||
341 | jrp->inpring, inpbusaddr); | ||
342 | dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH, | ||
343 | jrp->outring, outbusaddr); | ||
344 | kfree(jrp->entinfo); | ||
345 | of_device_unregister(jrp->jr_pdev); | ||
346 | |||
347 | return ret; | ||
348 | } | ||
349 | 442 | ||
350 | /* | 443 | /* |
351 | * Probe routine for each detected JobR subsystem. It assumes that | 444 | * Probe routine for each detected JobR subsystem. |
352 | * property detection was picked up externally. | ||
353 | */ | 445 | */ |
354 | int caam_jr_probe(struct platform_device *pdev, struct device_node *np, | 446 | static int caam_jr_probe(struct platform_device *pdev) |
355 | int ring) | ||
356 | { | 447 | { |
357 | struct device *ctrldev, *jrdev; | 448 | struct device *jrdev; |
358 | struct platform_device *jr_pdev; | 449 | struct device_node *nprop; |
359 | struct caam_drv_private *ctrlpriv; | 450 | struct caam_job_ring __iomem *ctrl; |
360 | struct caam_drv_private_jr *jrpriv; | 451 | struct caam_drv_private_jr *jrpriv; |
361 | u32 *jroffset; | 452 | static int total_jobrs; |
362 | int error; | 453 | int error; |
363 | 454 | ||
364 | ctrldev = &pdev->dev; | 455 | jrdev = &pdev->dev; |
365 | ctrlpriv = dev_get_drvdata(ctrldev); | ||
366 | |||
367 | jrpriv = kmalloc(sizeof(struct caam_drv_private_jr), | 456 | jrpriv = kmalloc(sizeof(struct caam_drv_private_jr), |
368 | GFP_KERNEL); | 457 | GFP_KERNEL); |
369 | if (jrpriv == NULL) { | 458 | if (!jrpriv) |
370 | dev_err(ctrldev, "can't alloc private mem for job ring %d\n", | ||
371 | ring); | ||
372 | return -ENOMEM; | 459 | return -ENOMEM; |
373 | } | ||
374 | jrpriv->parentdev = ctrldev; /* point back to parent */ | ||
375 | jrpriv->ridx = ring; /* save ring identity relative to detection */ | ||
376 | 460 | ||
377 | /* | 461 | dev_set_drvdata(jrdev, jrpriv); |
378 | * Derive a pointer to the detected JobRs regs | ||
379 | * Driver has already iomapped the entire space, we just | ||
380 | * need to add in the offset to this JobR. Don't know if I | ||
381 | * like this long-term, but it'll run | ||
382 | */ | ||
383 | jroffset = (u32 *)of_get_property(np, "reg", NULL); | ||
384 | jrpriv->rregs = (struct caam_job_ring __iomem *)((void *)ctrlpriv->ctrl | ||
385 | + *jroffset); | ||
386 | 462 | ||
387 | /* Build a local dev for each detected queue */ | 463 | /* save ring identity relative to detection */ |
388 | jr_pdev = of_platform_device_create(np, NULL, ctrldev); | 464 | jrpriv->ridx = total_jobrs++; |
389 | if (jr_pdev == NULL) { | 465 | |
390 | kfree(jrpriv); | 466 | nprop = pdev->dev.of_node; |
391 | return -EINVAL; | 467 | /* Get configuration properties from device tree */ |
468 | /* First, get register page */ | ||
469 | ctrl = of_iomap(nprop, 0); | ||
470 | if (!ctrl) { | ||
471 | dev_err(jrdev, "of_iomap() failed\n"); | ||
472 | return -ENOMEM; | ||
392 | } | 473 | } |
393 | 474 | ||
394 | jrpriv->jr_pdev = jr_pdev; | 475 | jrpriv->rregs = (struct caam_job_ring __force *)ctrl; |
395 | jrdev = &jr_pdev->dev; | ||
396 | dev_set_drvdata(jrdev, jrpriv); | ||
397 | ctrlpriv->jrdev[ring] = jrdev; | ||
398 | 476 | ||
399 | if (sizeof(dma_addr_t) == sizeof(u64)) | 477 | if (sizeof(dma_addr_t) == sizeof(u64)) |
400 | if (of_device_is_compatible(np, "fsl,sec-v5.0-job-ring")) | 478 | if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring")) |
401 | dma_set_mask(jrdev, DMA_BIT_MASK(40)); | 479 | dma_set_mask(jrdev, DMA_BIT_MASK(40)); |
402 | else | 480 | else |
403 | dma_set_mask(jrdev, DMA_BIT_MASK(36)); | 481 | dma_set_mask(jrdev, DMA_BIT_MASK(36)); |
@@ -405,15 +483,61 @@ int caam_jr_probe(struct platform_device *pdev, struct device_node *np, | |||
405 | dma_set_mask(jrdev, DMA_BIT_MASK(32)); | 483 | dma_set_mask(jrdev, DMA_BIT_MASK(32)); |
406 | 484 | ||
407 | /* Identify the interrupt */ | 485 | /* Identify the interrupt */ |
408 | jrpriv->irq = irq_of_parse_and_map(np, 0); | 486 | jrpriv->irq = irq_of_parse_and_map(nprop, 0); |
409 | 487 | ||
410 | /* Now do the platform independent part */ | 488 | /* Now do the platform independent part */ |
411 | error = caam_jr_init(jrdev); /* now turn on hardware */ | 489 | error = caam_jr_init(jrdev); /* now turn on hardware */ |
412 | if (error) { | 490 | if (error) { |
413 | of_device_unregister(jr_pdev); | ||
414 | kfree(jrpriv); | 491 | kfree(jrpriv); |
415 | return error; | 492 | return error; |
416 | } | 493 | } |
417 | 494 | ||
418 | return error; | 495 | jrpriv->dev = jrdev; |
496 | spin_lock(&driver_data.jr_alloc_lock); | ||
497 | list_add_tail(&jrpriv->list_node, &driver_data.jr_list); | ||
498 | spin_unlock(&driver_data.jr_alloc_lock); | ||
499 | |||
500 | atomic_set(&jrpriv->tfm_count, 0); | ||
501 | |||
502 | return 0; | ||
503 | } | ||
504 | |||
505 | static struct of_device_id caam_jr_match[] = { | ||
506 | { | ||
507 | .compatible = "fsl,sec-v4.0-job-ring", | ||
508 | }, | ||
509 | { | ||
510 | .compatible = "fsl,sec4.0-job-ring", | ||
511 | }, | ||
512 | {}, | ||
513 | }; | ||
514 | MODULE_DEVICE_TABLE(of, caam_jr_match); | ||
515 | |||
516 | static struct platform_driver caam_jr_driver = { | ||
517 | .driver = { | ||
518 | .name = "caam_jr", | ||
519 | .owner = THIS_MODULE, | ||
520 | .of_match_table = caam_jr_match, | ||
521 | }, | ||
522 | .probe = caam_jr_probe, | ||
523 | .remove = caam_jr_remove, | ||
524 | }; | ||
525 | |||
526 | static int __init jr_driver_init(void) | ||
527 | { | ||
528 | spin_lock_init(&driver_data.jr_alloc_lock); | ||
529 | INIT_LIST_HEAD(&driver_data.jr_list); | ||
530 | return platform_driver_register(&caam_jr_driver); | ||
531 | } | ||
532 | |||
533 | static void __exit jr_driver_exit(void) | ||
534 | { | ||
535 | platform_driver_unregister(&caam_jr_driver); | ||
419 | } | 536 | } |
537 | |||
538 | module_init(jr_driver_init); | ||
539 | module_exit(jr_driver_exit); | ||
540 | |||
541 | MODULE_LICENSE("GPL"); | ||
542 | MODULE_DESCRIPTION("FSL CAAM JR request backend"); | ||
543 | MODULE_AUTHOR("Freescale Semiconductor - NMG/STC"); | ||
diff --git a/drivers/crypto/caam/jr.h b/drivers/crypto/caam/jr.h index 9d8741a59037..97113a6d6c58 100644 --- a/drivers/crypto/caam/jr.h +++ b/drivers/crypto/caam/jr.h | |||
@@ -8,12 +8,11 @@ | |||
8 | #define JR_H | 8 | #define JR_H |
9 | 9 | ||
10 | /* Prototypes for backend-level services exposed to APIs */ | 10 | /* Prototypes for backend-level services exposed to APIs */ |
11 | struct device *caam_jr_alloc(void); | ||
12 | void caam_jr_free(struct device *rdev); | ||
11 | int caam_jr_enqueue(struct device *dev, u32 *desc, | 13 | int caam_jr_enqueue(struct device *dev, u32 *desc, |
12 | void (*cbk)(struct device *dev, u32 *desc, u32 status, | 14 | void (*cbk)(struct device *dev, u32 *desc, u32 status, |
13 | void *areq), | 15 | void *areq), |
14 | void *areq); | 16 | void *areq); |
15 | 17 | ||
16 | extern int caam_jr_probe(struct platform_device *pdev, struct device_node *np, | ||
17 | int ring); | ||
18 | extern int caam_jr_shutdown(struct device *dev); | ||
19 | #endif /* JR_H */ | 18 | #endif /* JR_H */ |
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h index 4455396918de..d50174f45b21 100644 --- a/drivers/crypto/caam/regs.h +++ b/drivers/crypto/caam/regs.h | |||
@@ -245,7 +245,7 @@ struct rngtst { | |||
245 | 245 | ||
246 | /* RNG4 TRNG test registers */ | 246 | /* RNG4 TRNG test registers */ |
247 | struct rng4tst { | 247 | struct rng4tst { |
248 | #define RTMCTL_PRGM 0x00010000 /* 1 -> program mode, 0 -> run mode */ | 248 | #define RTMCTL_PRGM 0x00010000 /* 1 -> program mode, 0 -> run mode */ |
249 | u32 rtmctl; /* misc. control register */ | 249 | u32 rtmctl; /* misc. control register */ |
250 | u32 rtscmisc; /* statistical check misc. register */ | 250 | u32 rtscmisc; /* statistical check misc. register */ |
251 | u32 rtpkrrng; /* poker range register */ | 251 | u32 rtpkrrng; /* poker range register */ |
@@ -255,6 +255,8 @@ struct rng4tst { | |||
255 | }; | 255 | }; |
256 | #define RTSDCTL_ENT_DLY_SHIFT 16 | 256 | #define RTSDCTL_ENT_DLY_SHIFT 16 |
257 | #define RTSDCTL_ENT_DLY_MASK (0xffff << RTSDCTL_ENT_DLY_SHIFT) | 257 | #define RTSDCTL_ENT_DLY_MASK (0xffff << RTSDCTL_ENT_DLY_SHIFT) |
258 | #define RTSDCTL_ENT_DLY_MIN 1200 | ||
259 | #define RTSDCTL_ENT_DLY_MAX 12800 | ||
258 | u32 rtsdctl; /* seed control register */ | 260 | u32 rtsdctl; /* seed control register */ |
259 | union { | 261 | union { |
260 | u32 rtsblim; /* PRGM=1: sparse bit limit register */ | 262 | u32 rtsblim; /* PRGM=1: sparse bit limit register */ |
@@ -266,7 +268,11 @@ struct rng4tst { | |||
266 | u32 rtfrqcnt; /* PRGM=0: freq. count register */ | 268 | u32 rtfrqcnt; /* PRGM=0: freq. count register */ |
267 | }; | 269 | }; |
268 | u32 rsvd1[40]; | 270 | u32 rsvd1[40]; |
271 | #define RDSTA_SKVT 0x80000000 | ||
272 | #define RDSTA_SKVN 0x40000000 | ||
269 | #define RDSTA_IF0 0x00000001 | 273 | #define RDSTA_IF0 0x00000001 |
274 | #define RDSTA_IF1 0x00000002 | ||
275 | #define RDSTA_IFMASK (RDSTA_IF1 | RDSTA_IF0) | ||
270 | u32 rdsta; | 276 | u32 rdsta; |
271 | u32 rsvd2[15]; | 277 | u32 rsvd2[15]; |
272 | }; | 278 | }; |
@@ -692,6 +698,7 @@ struct caam_deco { | |||
692 | u32 jr_ctl_hi; /* CxJRR - JobR Control Register @800 */ | 698 | u32 jr_ctl_hi; /* CxJRR - JobR Control Register @800 */ |
693 | u32 jr_ctl_lo; | 699 | u32 jr_ctl_lo; |
694 | u64 jr_descaddr; /* CxDADR - JobR Descriptor Address */ | 700 | u64 jr_descaddr; /* CxDADR - JobR Descriptor Address */ |
701 | #define DECO_OP_STATUS_HI_ERR_MASK 0xF00000FF | ||
695 | u32 op_status_hi; /* DxOPSTA - DECO Operation Status */ | 702 | u32 op_status_hi; /* DxOPSTA - DECO Operation Status */ |
696 | u32 op_status_lo; | 703 | u32 op_status_lo; |
697 | u32 rsvd24[2]; | 704 | u32 rsvd24[2]; |
@@ -706,12 +713,13 @@ struct caam_deco { | |||
706 | u32 rsvd29[48]; | 713 | u32 rsvd29[48]; |
707 | u32 descbuf[64]; /* DxDESB - Descriptor buffer */ | 714 | u32 descbuf[64]; /* DxDESB - Descriptor buffer */ |
708 | u32 rscvd30[193]; | 715 | u32 rscvd30[193]; |
716 | #define DESC_DBG_DECO_STAT_HOST_ERR 0x00D00000 | ||
717 | #define DESC_DBG_DECO_STAT_VALID 0x80000000 | ||
718 | #define DESC_DBG_DECO_STAT_MASK 0x00F00000 | ||
709 | u32 desc_dbg; /* DxDDR - DECO Debug Register */ | 719 | u32 desc_dbg; /* DxDDR - DECO Debug Register */ |
710 | u32 rsvd31[126]; | 720 | u32 rsvd31[126]; |
711 | }; | 721 | }; |
712 | 722 | ||
713 | /* DECO DBG Register Valid Bit*/ | ||
714 | #define DECO_DBG_VALID 0x80000000 | ||
715 | #define DECO_JQCR_WHL 0x20000000 | 723 | #define DECO_JQCR_WHL 0x20000000 |
716 | #define DECO_JQCR_FOUR 0x10000000 | 724 | #define DECO_JQCR_FOUR 0x10000000 |
717 | 725 | ||
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h index e0037c8ee243..b12ff85f4241 100644 --- a/drivers/crypto/caam/sg_sw_sec4.h +++ b/drivers/crypto/caam/sg_sw_sec4.h | |||
@@ -117,6 +117,21 @@ static int dma_unmap_sg_chained(struct device *dev, struct scatterlist *sg, | |||
117 | return nents; | 117 | return nents; |
118 | } | 118 | } |
119 | 119 | ||
120 | /* Map SG page in kernel virtual address space and copy */ | ||
121 | static inline void sg_map_copy(u8 *dest, struct scatterlist *sg, | ||
122 | int len, int offset) | ||
123 | { | ||
124 | u8 *mapped_addr; | ||
125 | |||
126 | /* | ||
127 | * Page here can be user-space pinned using get_user_pages | ||
128 | * Same must be kmapped before use and kunmapped subsequently | ||
129 | */ | ||
130 | mapped_addr = kmap_atomic(sg_page(sg)); | ||
131 | memcpy(dest, mapped_addr + offset, len); | ||
132 | kunmap_atomic(mapped_addr); | ||
133 | } | ||
134 | |||
120 | /* Copy from len bytes of sg to dest, starting from beginning */ | 135 | /* Copy from len bytes of sg to dest, starting from beginning */ |
121 | static inline void sg_copy(u8 *dest, struct scatterlist *sg, unsigned int len) | 136 | static inline void sg_copy(u8 *dest, struct scatterlist *sg, unsigned int len) |
122 | { | 137 | { |
@@ -124,15 +139,15 @@ static inline void sg_copy(u8 *dest, struct scatterlist *sg, unsigned int len) | |||
124 | int cpy_index = 0, next_cpy_index = current_sg->length; | 139 | int cpy_index = 0, next_cpy_index = current_sg->length; |
125 | 140 | ||
126 | while (next_cpy_index < len) { | 141 | while (next_cpy_index < len) { |
127 | memcpy(dest + cpy_index, (u8 *) sg_virt(current_sg), | 142 | sg_map_copy(dest + cpy_index, current_sg, current_sg->length, |
128 | current_sg->length); | 143 | current_sg->offset); |
129 | current_sg = scatterwalk_sg_next(current_sg); | 144 | current_sg = scatterwalk_sg_next(current_sg); |
130 | cpy_index = next_cpy_index; | 145 | cpy_index = next_cpy_index; |
131 | next_cpy_index += current_sg->length; | 146 | next_cpy_index += current_sg->length; |
132 | } | 147 | } |
133 | if (cpy_index < len) | 148 | if (cpy_index < len) |
134 | memcpy(dest + cpy_index, (u8 *) sg_virt(current_sg), | 149 | sg_map_copy(dest + cpy_index, current_sg, len-cpy_index, |
135 | len - cpy_index); | 150 | current_sg->offset); |
136 | } | 151 | } |
137 | 152 | ||
138 | /* Copy sg data, from to_skip to end, to dest */ | 153 | /* Copy sg data, from to_skip to end, to dest */ |
@@ -140,7 +155,7 @@ static inline void sg_copy_part(u8 *dest, struct scatterlist *sg, | |||
140 | int to_skip, unsigned int end) | 155 | int to_skip, unsigned int end) |
141 | { | 156 | { |
142 | struct scatterlist *current_sg = sg; | 157 | struct scatterlist *current_sg = sg; |
143 | int sg_index, cpy_index; | 158 | int sg_index, cpy_index, offset; |
144 | 159 | ||
145 | sg_index = current_sg->length; | 160 | sg_index = current_sg->length; |
146 | while (sg_index <= to_skip) { | 161 | while (sg_index <= to_skip) { |
@@ -148,9 +163,10 @@ static inline void sg_copy_part(u8 *dest, struct scatterlist *sg, | |||
148 | sg_index += current_sg->length; | 163 | sg_index += current_sg->length; |
149 | } | 164 | } |
150 | cpy_index = sg_index - to_skip; | 165 | cpy_index = sg_index - to_skip; |
151 | memcpy(dest, (u8 *) sg_virt(current_sg) + | 166 | offset = current_sg->offset + current_sg->length - cpy_index; |
152 | current_sg->length - cpy_index, cpy_index); | 167 | sg_map_copy(dest, current_sg, cpy_index, offset); |
153 | current_sg = scatterwalk_sg_next(current_sg); | 168 | if (end - sg_index) { |
154 | if (end - sg_index) | 169 | current_sg = scatterwalk_sg_next(current_sg); |
155 | sg_copy(dest + cpy_index, current_sg, end - sg_index); | 170 | sg_copy(dest + cpy_index, current_sg, end - sg_index); |
171 | } | ||
156 | } | 172 | } |
diff --git a/drivers/crypto/dcp.c b/drivers/crypto/dcp.c index a8a7dd4b0d25..247ab8048f5b 100644 --- a/drivers/crypto/dcp.c +++ b/drivers/crypto/dcp.c | |||
@@ -733,12 +733,9 @@ static int dcp_probe(struct platform_device *pdev) | |||
733 | platform_set_drvdata(pdev, dev); | 733 | platform_set_drvdata(pdev, dev); |
734 | 734 | ||
735 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 735 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
736 | if (!r) { | 736 | dev->dcp_regs_base = devm_ioremap_resource(&pdev->dev, r); |
737 | dev_err(&pdev->dev, "failed to get IORESOURCE_MEM\n"); | 737 | if (IS_ERR(dev->dcp_regs_base)) |
738 | return -ENXIO; | 738 | return PTR_ERR(dev->dcp_regs_base); |
739 | } | ||
740 | dev->dcp_regs_base = devm_ioremap(&pdev->dev, r->start, | ||
741 | resource_size(r)); | ||
742 | 739 | ||
743 | dcp_set(dev, DCP_CTRL_SFRST, DCP_REG_CTRL); | 740 | dcp_set(dev, DCP_CTRL_SFRST, DCP_REG_CTRL); |
744 | udelay(10); | 741 | udelay(10); |
@@ -762,7 +759,8 @@ static int dcp_probe(struct platform_device *pdev) | |||
762 | return -EIO; | 759 | return -EIO; |
763 | } | 760 | } |
764 | dev->dcp_vmi_irq = r->start; | 761 | dev->dcp_vmi_irq = r->start; |
765 | ret = request_irq(dev->dcp_vmi_irq, dcp_vmi_irq, 0, "dcp", dev); | 762 | ret = devm_request_irq(&pdev->dev, dev->dcp_vmi_irq, dcp_vmi_irq, 0, |
763 | "dcp", dev); | ||
766 | if (ret != 0) { | 764 | if (ret != 0) { |
767 | dev_err(&pdev->dev, "can't request_irq (0)\n"); | 765 | dev_err(&pdev->dev, "can't request_irq (0)\n"); |
768 | return -EIO; | 766 | return -EIO; |
@@ -771,15 +769,14 @@ static int dcp_probe(struct platform_device *pdev) | |||
771 | r = platform_get_resource(pdev, IORESOURCE_IRQ, 1); | 769 | r = platform_get_resource(pdev, IORESOURCE_IRQ, 1); |
772 | if (!r) { | 770 | if (!r) { |
773 | dev_err(&pdev->dev, "can't get IRQ resource (1)\n"); | 771 | dev_err(&pdev->dev, "can't get IRQ resource (1)\n"); |
774 | ret = -EIO; | 772 | return -EIO; |
775 | goto err_free_irq0; | ||
776 | } | 773 | } |
777 | dev->dcp_irq = r->start; | 774 | dev->dcp_irq = r->start; |
778 | ret = request_irq(dev->dcp_irq, dcp_irq, 0, "dcp", dev); | 775 | ret = devm_request_irq(&pdev->dev, dev->dcp_irq, dcp_irq, 0, "dcp", |
776 | dev); | ||
779 | if (ret != 0) { | 777 | if (ret != 0) { |
780 | dev_err(&pdev->dev, "can't request_irq (1)\n"); | 778 | dev_err(&pdev->dev, "can't request_irq (1)\n"); |
781 | ret = -EIO; | 779 | return -EIO; |
782 | goto err_free_irq0; | ||
783 | } | 780 | } |
784 | 781 | ||
785 | dev->hw_pkg[0] = dma_alloc_coherent(&pdev->dev, | 782 | dev->hw_pkg[0] = dma_alloc_coherent(&pdev->dev, |
@@ -788,8 +785,7 @@ static int dcp_probe(struct platform_device *pdev) | |||
788 | GFP_KERNEL); | 785 | GFP_KERNEL); |
789 | if (!dev->hw_pkg[0]) { | 786 | if (!dev->hw_pkg[0]) { |
790 | dev_err(&pdev->dev, "Could not allocate hw descriptors\n"); | 787 | dev_err(&pdev->dev, "Could not allocate hw descriptors\n"); |
791 | ret = -ENOMEM; | 788 | return -ENOMEM; |
792 | goto err_free_irq1; | ||
793 | } | 789 | } |
794 | 790 | ||
795 | for (i = 1; i < DCP_MAX_PKG; i++) { | 791 | for (i = 1; i < DCP_MAX_PKG; i++) { |
@@ -848,16 +844,14 @@ err_unregister: | |||
848 | for (j = 0; j < i; j++) | 844 | for (j = 0; j < i; j++) |
849 | crypto_unregister_alg(&algs[j]); | 845 | crypto_unregister_alg(&algs[j]); |
850 | err_free_key_iv: | 846 | err_free_key_iv: |
847 | tasklet_kill(&dev->done_task); | ||
848 | tasklet_kill(&dev->queue_task); | ||
851 | dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->payload_base, | 849 | dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->payload_base, |
852 | dev->payload_base_dma); | 850 | dev->payload_base_dma); |
853 | err_free_hw_packet: | 851 | err_free_hw_packet: |
854 | dma_free_coherent(&pdev->dev, DCP_MAX_PKG * | 852 | dma_free_coherent(&pdev->dev, DCP_MAX_PKG * |
855 | sizeof(struct dcp_hw_packet), dev->hw_pkg[0], | 853 | sizeof(struct dcp_hw_packet), dev->hw_pkg[0], |
856 | dev->hw_phys_pkg); | 854 | dev->hw_phys_pkg); |
857 | err_free_irq1: | ||
858 | free_irq(dev->dcp_irq, dev); | ||
859 | err_free_irq0: | ||
860 | free_irq(dev->dcp_vmi_irq, dev); | ||
861 | 855 | ||
862 | return ret; | 856 | return ret; |
863 | } | 857 | } |
@@ -868,23 +862,20 @@ static int dcp_remove(struct platform_device *pdev) | |||
868 | int j; | 862 | int j; |
869 | dev = platform_get_drvdata(pdev); | 863 | dev = platform_get_drvdata(pdev); |
870 | 864 | ||
871 | dma_free_coherent(&pdev->dev, | 865 | misc_deregister(&dev->dcp_bootstream_misc); |
872 | DCP_MAX_PKG * sizeof(struct dcp_hw_packet), | ||
873 | dev->hw_pkg[0], dev->hw_phys_pkg); | ||
874 | |||
875 | dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->payload_base, | ||
876 | dev->payload_base_dma); | ||
877 | 866 | ||
878 | free_irq(dev->dcp_irq, dev); | 867 | for (j = 0; j < ARRAY_SIZE(algs); j++) |
879 | free_irq(dev->dcp_vmi_irq, dev); | 868 | crypto_unregister_alg(&algs[j]); |
880 | 869 | ||
881 | tasklet_kill(&dev->done_task); | 870 | tasklet_kill(&dev->done_task); |
882 | tasklet_kill(&dev->queue_task); | 871 | tasklet_kill(&dev->queue_task); |
883 | 872 | ||
884 | for (j = 0; j < ARRAY_SIZE(algs); j++) | 873 | dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->payload_base, |
885 | crypto_unregister_alg(&algs[j]); | 874 | dev->payload_base_dma); |
886 | 875 | ||
887 | misc_deregister(&dev->dcp_bootstream_misc); | 876 | dma_free_coherent(&pdev->dev, |
877 | DCP_MAX_PKG * sizeof(struct dcp_hw_packet), | ||
878 | dev->hw_pkg[0], dev->hw_phys_pkg); | ||
888 | 879 | ||
889 | return 0; | 880 | return 0; |
890 | } | 881 | } |
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index 214357e12dc0..9dd6e01eac33 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c | |||
@@ -1149,32 +1149,24 @@ static int aead_setkey(struct crypto_aead *tfm, const u8 *key, | |||
1149 | unsigned int keylen) | 1149 | unsigned int keylen) |
1150 | { | 1150 | { |
1151 | struct ixp_ctx *ctx = crypto_aead_ctx(tfm); | 1151 | struct ixp_ctx *ctx = crypto_aead_ctx(tfm); |
1152 | struct rtattr *rta = (struct rtattr *)key; | 1152 | struct crypto_authenc_keys keys; |
1153 | struct crypto_authenc_key_param *param; | ||
1154 | 1153 | ||
1155 | if (!RTA_OK(rta, keylen)) | 1154 | if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) |
1156 | goto badkey; | ||
1157 | if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) | ||
1158 | goto badkey; | ||
1159 | if (RTA_PAYLOAD(rta) < sizeof(*param)) | ||
1160 | goto badkey; | 1155 | goto badkey; |
1161 | 1156 | ||
1162 | param = RTA_DATA(rta); | 1157 | if (keys.authkeylen > sizeof(ctx->authkey)) |
1163 | ctx->enckey_len = be32_to_cpu(param->enckeylen); | 1158 | goto badkey; |
1164 | |||
1165 | key += RTA_ALIGN(rta->rta_len); | ||
1166 | keylen -= RTA_ALIGN(rta->rta_len); | ||
1167 | 1159 | ||
1168 | if (keylen < ctx->enckey_len) | 1160 | if (keys.enckeylen > sizeof(ctx->enckey)) |
1169 | goto badkey; | 1161 | goto badkey; |
1170 | 1162 | ||
1171 | ctx->authkey_len = keylen - ctx->enckey_len; | 1163 | memcpy(ctx->authkey, keys.authkey, keys.authkeylen); |
1172 | memcpy(ctx->enckey, key + ctx->authkey_len, ctx->enckey_len); | 1164 | memcpy(ctx->enckey, keys.enckey, keys.enckeylen); |
1173 | memcpy(ctx->authkey, key, ctx->authkey_len); | 1165 | ctx->authkey_len = keys.authkeylen; |
1166 | ctx->enckey_len = keys.enckeylen; | ||
1174 | 1167 | ||
1175 | return aead_setup(tfm, crypto_aead_authsize(tfm)); | 1168 | return aead_setup(tfm, crypto_aead_authsize(tfm)); |
1176 | badkey: | 1169 | badkey: |
1177 | ctx->enckey_len = 0; | ||
1178 | crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | 1170 | crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); |
1179 | return -EINVAL; | 1171 | return -EINVAL; |
1180 | } | 1172 | } |
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c index 3374a3ebe4c7..8d1e6f8e9e9c 100644 --- a/drivers/crypto/mv_cesa.c +++ b/drivers/crypto/mv_cesa.c | |||
@@ -907,7 +907,7 @@ static int mv_cra_hash_hmac_sha1_init(struct crypto_tfm *tfm) | |||
907 | return mv_cra_hash_init(tfm, "sha1", COP_HMAC_SHA1, SHA1_BLOCK_SIZE); | 907 | return mv_cra_hash_init(tfm, "sha1", COP_HMAC_SHA1, SHA1_BLOCK_SIZE); |
908 | } | 908 | } |
909 | 909 | ||
910 | irqreturn_t crypto_int(int irq, void *priv) | 910 | static irqreturn_t crypto_int(int irq, void *priv) |
911 | { | 911 | { |
912 | u32 val; | 912 | u32 val; |
913 | 913 | ||
@@ -928,7 +928,7 @@ irqreturn_t crypto_int(int irq, void *priv) | |||
928 | return IRQ_HANDLED; | 928 | return IRQ_HANDLED; |
929 | } | 929 | } |
930 | 930 | ||
931 | struct crypto_alg mv_aes_alg_ecb = { | 931 | static struct crypto_alg mv_aes_alg_ecb = { |
932 | .cra_name = "ecb(aes)", | 932 | .cra_name = "ecb(aes)", |
933 | .cra_driver_name = "mv-ecb-aes", | 933 | .cra_driver_name = "mv-ecb-aes", |
934 | .cra_priority = 300, | 934 | .cra_priority = 300, |
@@ -951,7 +951,7 @@ struct crypto_alg mv_aes_alg_ecb = { | |||
951 | }, | 951 | }, |
952 | }; | 952 | }; |
953 | 953 | ||
954 | struct crypto_alg mv_aes_alg_cbc = { | 954 | static struct crypto_alg mv_aes_alg_cbc = { |
955 | .cra_name = "cbc(aes)", | 955 | .cra_name = "cbc(aes)", |
956 | .cra_driver_name = "mv-cbc-aes", | 956 | .cra_driver_name = "mv-cbc-aes", |
957 | .cra_priority = 300, | 957 | .cra_priority = 300, |
@@ -975,7 +975,7 @@ struct crypto_alg mv_aes_alg_cbc = { | |||
975 | }, | 975 | }, |
976 | }; | 976 | }; |
977 | 977 | ||
978 | struct ahash_alg mv_sha1_alg = { | 978 | static struct ahash_alg mv_sha1_alg = { |
979 | .init = mv_hash_init, | 979 | .init = mv_hash_init, |
980 | .update = mv_hash_update, | 980 | .update = mv_hash_update, |
981 | .final = mv_hash_final, | 981 | .final = mv_hash_final, |
@@ -999,7 +999,7 @@ struct ahash_alg mv_sha1_alg = { | |||
999 | } | 999 | } |
1000 | }; | 1000 | }; |
1001 | 1001 | ||
1002 | struct ahash_alg mv_hmac_sha1_alg = { | 1002 | static struct ahash_alg mv_hmac_sha1_alg = { |
1003 | .init = mv_hash_init, | 1003 | .init = mv_hash_init, |
1004 | .update = mv_hash_update, | 1004 | .update = mv_hash_update, |
1005 | .final = mv_hash_final, | 1005 | .final = mv_hash_final, |
@@ -1084,7 +1084,7 @@ static int mv_probe(struct platform_device *pdev) | |||
1084 | goto err_unmap_sram; | 1084 | goto err_unmap_sram; |
1085 | } | 1085 | } |
1086 | 1086 | ||
1087 | ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev), | 1087 | ret = request_irq(irq, crypto_int, 0, dev_name(&pdev->dev), |
1088 | cp); | 1088 | cp); |
1089 | if (ret) | 1089 | if (ret) |
1090 | goto err_thread; | 1090 | goto err_thread; |
@@ -1187,7 +1187,7 @@ static struct platform_driver marvell_crypto = { | |||
1187 | .driver = { | 1187 | .driver = { |
1188 | .owner = THIS_MODULE, | 1188 | .owner = THIS_MODULE, |
1189 | .name = "mv_crypto", | 1189 | .name = "mv_crypto", |
1190 | .of_match_table = of_match_ptr(mv_cesa_of_match_table), | 1190 | .of_match_table = mv_cesa_of_match_table, |
1191 | }, | 1191 | }, |
1192 | }; | 1192 | }; |
1193 | MODULE_ALIAS("platform:mv_crypto"); | 1193 | MODULE_ALIAS("platform:mv_crypto"); |
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index ce791c2f81f7..a9ccbf14096e 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c | |||
@@ -275,7 +275,7 @@ static int omap_aes_write_ctrl(struct omap_aes_dev *dd) | |||
275 | if (dd->flags & FLAGS_CBC) | 275 | if (dd->flags & FLAGS_CBC) |
276 | val |= AES_REG_CTRL_CBC; | 276 | val |= AES_REG_CTRL_CBC; |
277 | if (dd->flags & FLAGS_CTR) { | 277 | if (dd->flags & FLAGS_CTR) { |
278 | val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_32; | 278 | val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_128; |
279 | mask = AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_MASK; | 279 | mask = AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_MASK; |
280 | } | 280 | } |
281 | if (dd->flags & FLAGS_ENCRYPT) | 281 | if (dd->flags & FLAGS_ENCRYPT) |
@@ -554,7 +554,7 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) | |||
554 | return err; | 554 | return err; |
555 | } | 555 | } |
556 | 556 | ||
557 | int omap_aes_check_aligned(struct scatterlist *sg) | 557 | static int omap_aes_check_aligned(struct scatterlist *sg) |
558 | { | 558 | { |
559 | while (sg) { | 559 | while (sg) { |
560 | if (!IS_ALIGNED(sg->offset, 4)) | 560 | if (!IS_ALIGNED(sg->offset, 4)) |
@@ -566,7 +566,7 @@ int omap_aes_check_aligned(struct scatterlist *sg) | |||
566 | return 0; | 566 | return 0; |
567 | } | 567 | } |
568 | 568 | ||
569 | int omap_aes_copy_sgs(struct omap_aes_dev *dd) | 569 | static int omap_aes_copy_sgs(struct omap_aes_dev *dd) |
570 | { | 570 | { |
571 | void *buf_in, *buf_out; | 571 | void *buf_in, *buf_out; |
572 | int pages; | 572 | int pages; |
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index e28104b4aab0..e45aaaf0db30 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c | |||
@@ -2033,3 +2033,4 @@ module_platform_driver(omap_sham_driver); | |||
2033 | MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support."); | 2033 | MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support."); |
2034 | MODULE_LICENSE("GPL v2"); | 2034 | MODULE_LICENSE("GPL v2"); |
2035 | MODULE_AUTHOR("Dmitry Kasatkin"); | 2035 | MODULE_AUTHOR("Dmitry Kasatkin"); |
2036 | MODULE_ALIAS("platform:omap-sham"); | ||
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c index 888f7f4a6d3f..a6175ba6d238 100644 --- a/drivers/crypto/picoxcell_crypto.c +++ b/drivers/crypto/picoxcell_crypto.c | |||
@@ -495,45 +495,29 @@ static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key, | |||
495 | { | 495 | { |
496 | struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm); | 496 | struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm); |
497 | struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg); | 497 | struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg); |
498 | struct rtattr *rta = (void *)key; | 498 | struct crypto_authenc_keys keys; |
499 | struct crypto_authenc_key_param *param; | ||
500 | unsigned int authkeylen, enckeylen; | ||
501 | int err = -EINVAL; | 499 | int err = -EINVAL; |
502 | 500 | ||
503 | if (!RTA_OK(rta, keylen)) | 501 | if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) |
504 | goto badkey; | 502 | goto badkey; |
505 | 503 | ||
506 | if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) | 504 | if (keys.enckeylen > AES_MAX_KEY_SIZE) |
507 | goto badkey; | 505 | goto badkey; |
508 | 506 | ||
509 | if (RTA_PAYLOAD(rta) < sizeof(*param)) | 507 | if (keys.authkeylen > sizeof(ctx->hash_ctx)) |
510 | goto badkey; | ||
511 | |||
512 | param = RTA_DATA(rta); | ||
513 | enckeylen = be32_to_cpu(param->enckeylen); | ||
514 | |||
515 | key += RTA_ALIGN(rta->rta_len); | ||
516 | keylen -= RTA_ALIGN(rta->rta_len); | ||
517 | |||
518 | if (keylen < enckeylen) | ||
519 | goto badkey; | ||
520 | |||
521 | authkeylen = keylen - enckeylen; | ||
522 | |||
523 | if (enckeylen > AES_MAX_KEY_SIZE) | ||
524 | goto badkey; | 508 | goto badkey; |
525 | 509 | ||
526 | if ((alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) == | 510 | if ((alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) == |
527 | SPA_CTRL_CIPH_ALG_AES) | 511 | SPA_CTRL_CIPH_ALG_AES) |
528 | err = spacc_aead_aes_setkey(tfm, key + authkeylen, enckeylen); | 512 | err = spacc_aead_aes_setkey(tfm, keys.enckey, keys.enckeylen); |
529 | else | 513 | else |
530 | err = spacc_aead_des_setkey(tfm, key + authkeylen, enckeylen); | 514 | err = spacc_aead_des_setkey(tfm, keys.enckey, keys.enckeylen); |
531 | 515 | ||
532 | if (err) | 516 | if (err) |
533 | goto badkey; | 517 | goto badkey; |
534 | 518 | ||
535 | memcpy(ctx->hash_ctx, key, authkeylen); | 519 | memcpy(ctx->hash_ctx, keys.authkey, keys.authkeylen); |
536 | ctx->hash_key_len = authkeylen; | 520 | ctx->hash_key_len = keys.authkeylen; |
537 | 521 | ||
538 | return 0; | 522 | return 0; |
539 | 523 | ||
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c index d7bb8bac36e9..785a9ded7bdf 100644 --- a/drivers/crypto/sahara.c +++ b/drivers/crypto/sahara.c | |||
@@ -1058,7 +1058,7 @@ static struct platform_driver sahara_driver = { | |||
1058 | .driver = { | 1058 | .driver = { |
1059 | .name = SAHARA_NAME, | 1059 | .name = SAHARA_NAME, |
1060 | .owner = THIS_MODULE, | 1060 | .owner = THIS_MODULE, |
1061 | .of_match_table = of_match_ptr(sahara_dt_ids), | 1061 | .of_match_table = sahara_dt_ids, |
1062 | }, | 1062 | }, |
1063 | .id_table = sahara_platform_ids, | 1063 | .id_table = sahara_platform_ids, |
1064 | }; | 1064 | }; |
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 6cd0e6038583..b44f4ddc565c 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c | |||
@@ -673,39 +673,20 @@ static int aead_setkey(struct crypto_aead *authenc, | |||
673 | const u8 *key, unsigned int keylen) | 673 | const u8 *key, unsigned int keylen) |
674 | { | 674 | { |
675 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); | 675 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); |
676 | struct rtattr *rta = (void *)key; | 676 | struct crypto_authenc_keys keys; |
677 | struct crypto_authenc_key_param *param; | ||
678 | unsigned int authkeylen; | ||
679 | unsigned int enckeylen; | ||
680 | |||
681 | if (!RTA_OK(rta, keylen)) | ||
682 | goto badkey; | ||
683 | 677 | ||
684 | if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) | 678 | if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) |
685 | goto badkey; | 679 | goto badkey; |
686 | 680 | ||
687 | if (RTA_PAYLOAD(rta) < sizeof(*param)) | 681 | if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE) |
688 | goto badkey; | 682 | goto badkey; |
689 | 683 | ||
690 | param = RTA_DATA(rta); | 684 | memcpy(ctx->key, keys.authkey, keys.authkeylen); |
691 | enckeylen = be32_to_cpu(param->enckeylen); | 685 | memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen); |
692 | |||
693 | key += RTA_ALIGN(rta->rta_len); | ||
694 | keylen -= RTA_ALIGN(rta->rta_len); | ||
695 | 686 | ||
696 | if (keylen < enckeylen) | 687 | ctx->keylen = keys.authkeylen + keys.enckeylen; |
697 | goto badkey; | 688 | ctx->enckeylen = keys.enckeylen; |
698 | 689 | ctx->authkeylen = keys.authkeylen; | |
699 | authkeylen = keylen - enckeylen; | ||
700 | |||
701 | if (keylen > TALITOS_MAX_KEY_SIZE) | ||
702 | goto badkey; | ||
703 | |||
704 | memcpy(&ctx->key, key, keylen); | ||
705 | |||
706 | ctx->keylen = keylen; | ||
707 | ctx->enckeylen = enckeylen; | ||
708 | ctx->authkeylen = authkeylen; | ||
709 | 690 | ||
710 | return 0; | 691 | return 0; |
711 | 692 | ||
@@ -809,7 +790,7 @@ static void ipsec_esp_unmap(struct device *dev, | |||
809 | 790 | ||
810 | if (edesc->assoc_chained) | 791 | if (edesc->assoc_chained) |
811 | talitos_unmap_sg_chain(dev, areq->assoc, DMA_TO_DEVICE); | 792 | talitos_unmap_sg_chain(dev, areq->assoc, DMA_TO_DEVICE); |
812 | else | 793 | else if (areq->assoclen) |
813 | /* assoc_nents counts also for IV in non-contiguous cases */ | 794 | /* assoc_nents counts also for IV in non-contiguous cases */ |
814 | dma_unmap_sg(dev, areq->assoc, | 795 | dma_unmap_sg(dev, areq->assoc, |
815 | edesc->assoc_nents ? edesc->assoc_nents - 1 : 1, | 796 | edesc->assoc_nents ? edesc->assoc_nents - 1 : 1, |
@@ -992,7 +973,11 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | |||
992 | dma_sync_single_for_device(dev, edesc->dma_link_tbl, | 973 | dma_sync_single_for_device(dev, edesc->dma_link_tbl, |
993 | edesc->dma_len, DMA_BIDIRECTIONAL); | 974 | edesc->dma_len, DMA_BIDIRECTIONAL); |
994 | } else { | 975 | } else { |
995 | to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->assoc)); | 976 | if (areq->assoclen) |
977 | to_talitos_ptr(&desc->ptr[1], | ||
978 | sg_dma_address(areq->assoc)); | ||
979 | else | ||
980 | to_talitos_ptr(&desc->ptr[1], edesc->iv_dma); | ||
996 | desc->ptr[1].j_extent = 0; | 981 | desc->ptr[1].j_extent = 0; |
997 | } | 982 | } |
998 | 983 | ||
@@ -1127,7 +1112,8 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, | |||
1127 | unsigned int authsize, | 1112 | unsigned int authsize, |
1128 | unsigned int ivsize, | 1113 | unsigned int ivsize, |
1129 | int icv_stashing, | 1114 | int icv_stashing, |
1130 | u32 cryptoflags) | 1115 | u32 cryptoflags, |
1116 | bool encrypt) | ||
1131 | { | 1117 | { |
1132 | struct talitos_edesc *edesc; | 1118 | struct talitos_edesc *edesc; |
1133 | int assoc_nents = 0, src_nents, dst_nents, alloc_len, dma_len; | 1119 | int assoc_nents = 0, src_nents, dst_nents, alloc_len, dma_len; |
@@ -1141,10 +1127,10 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, | |||
1141 | return ERR_PTR(-EINVAL); | 1127 | return ERR_PTR(-EINVAL); |
1142 | } | 1128 | } |
1143 | 1129 | ||
1144 | if (iv) | 1130 | if (ivsize) |
1145 | iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); | 1131 | iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); |
1146 | 1132 | ||
1147 | if (assoc) { | 1133 | if (assoclen) { |
1148 | /* | 1134 | /* |
1149 | * Currently it is assumed that iv is provided whenever assoc | 1135 | * Currently it is assumed that iv is provided whenever assoc |
1150 | * is. | 1136 | * is. |
@@ -1160,19 +1146,17 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, | |||
1160 | assoc_nents = assoc_nents ? assoc_nents + 1 : 2; | 1146 | assoc_nents = assoc_nents ? assoc_nents + 1 : 2; |
1161 | } | 1147 | } |
1162 | 1148 | ||
1163 | src_nents = sg_count(src, cryptlen + authsize, &src_chained); | 1149 | if (!dst || dst == src) { |
1164 | src_nents = (src_nents == 1) ? 0 : src_nents; | 1150 | src_nents = sg_count(src, cryptlen + authsize, &src_chained); |
1165 | 1151 | src_nents = (src_nents == 1) ? 0 : src_nents; | |
1166 | if (!dst) { | 1152 | dst_nents = dst ? src_nents : 0; |
1167 | dst_nents = 0; | 1153 | } else { /* dst && dst != src*/ |
1168 | } else { | 1154 | src_nents = sg_count(src, cryptlen + (encrypt ? 0 : authsize), |
1169 | if (dst == src) { | 1155 | &src_chained); |
1170 | dst_nents = src_nents; | 1156 | src_nents = (src_nents == 1) ? 0 : src_nents; |
1171 | } else { | 1157 | dst_nents = sg_count(dst, cryptlen + (encrypt ? authsize : 0), |
1172 | dst_nents = sg_count(dst, cryptlen + authsize, | 1158 | &dst_chained); |
1173 | &dst_chained); | 1159 | dst_nents = (dst_nents == 1) ? 0 : dst_nents; |
1174 | dst_nents = (dst_nents == 1) ? 0 : dst_nents; | ||
1175 | } | ||
1176 | } | 1160 | } |
1177 | 1161 | ||
1178 | /* | 1162 | /* |
@@ -1192,9 +1176,16 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, | |||
1192 | 1176 | ||
1193 | edesc = kmalloc(alloc_len, GFP_DMA | flags); | 1177 | edesc = kmalloc(alloc_len, GFP_DMA | flags); |
1194 | if (!edesc) { | 1178 | if (!edesc) { |
1195 | talitos_unmap_sg_chain(dev, assoc, DMA_TO_DEVICE); | 1179 | if (assoc_chained) |
1180 | talitos_unmap_sg_chain(dev, assoc, DMA_TO_DEVICE); | ||
1181 | else if (assoclen) | ||
1182 | dma_unmap_sg(dev, assoc, | ||
1183 | assoc_nents ? assoc_nents - 1 : 1, | ||
1184 | DMA_TO_DEVICE); | ||
1185 | |||
1196 | if (iv_dma) | 1186 | if (iv_dma) |
1197 | dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); | 1187 | dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); |
1188 | |||
1198 | dev_err(dev, "could not allocate edescriptor\n"); | 1189 | dev_err(dev, "could not allocate edescriptor\n"); |
1199 | return ERR_PTR(-ENOMEM); | 1190 | return ERR_PTR(-ENOMEM); |
1200 | } | 1191 | } |
@@ -1216,7 +1207,7 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, | |||
1216 | } | 1207 | } |
1217 | 1208 | ||
1218 | static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv, | 1209 | static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv, |
1219 | int icv_stashing) | 1210 | int icv_stashing, bool encrypt) |
1220 | { | 1211 | { |
1221 | struct crypto_aead *authenc = crypto_aead_reqtfm(areq); | 1212 | struct crypto_aead *authenc = crypto_aead_reqtfm(areq); |
1222 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); | 1213 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); |
@@ -1225,7 +1216,7 @@ static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv, | |||
1225 | return talitos_edesc_alloc(ctx->dev, areq->assoc, areq->src, areq->dst, | 1216 | return talitos_edesc_alloc(ctx->dev, areq->assoc, areq->src, areq->dst, |
1226 | iv, areq->assoclen, areq->cryptlen, | 1217 | iv, areq->assoclen, areq->cryptlen, |
1227 | ctx->authsize, ivsize, icv_stashing, | 1218 | ctx->authsize, ivsize, icv_stashing, |
1228 | areq->base.flags); | 1219 | areq->base.flags, encrypt); |
1229 | } | 1220 | } |
1230 | 1221 | ||
1231 | static int aead_encrypt(struct aead_request *req) | 1222 | static int aead_encrypt(struct aead_request *req) |
@@ -1235,7 +1226,7 @@ static int aead_encrypt(struct aead_request *req) | |||
1235 | struct talitos_edesc *edesc; | 1226 | struct talitos_edesc *edesc; |
1236 | 1227 | ||
1237 | /* allocate extended descriptor */ | 1228 | /* allocate extended descriptor */ |
1238 | edesc = aead_edesc_alloc(req, req->iv, 0); | 1229 | edesc = aead_edesc_alloc(req, req->iv, 0, true); |
1239 | if (IS_ERR(edesc)) | 1230 | if (IS_ERR(edesc)) |
1240 | return PTR_ERR(edesc); | 1231 | return PTR_ERR(edesc); |
1241 | 1232 | ||
@@ -1258,7 +1249,7 @@ static int aead_decrypt(struct aead_request *req) | |||
1258 | req->cryptlen -= authsize; | 1249 | req->cryptlen -= authsize; |
1259 | 1250 | ||
1260 | /* allocate extended descriptor */ | 1251 | /* allocate extended descriptor */ |
1261 | edesc = aead_edesc_alloc(req, req->iv, 1); | 1252 | edesc = aead_edesc_alloc(req, req->iv, 1, false); |
1262 | if (IS_ERR(edesc)) | 1253 | if (IS_ERR(edesc)) |
1263 | return PTR_ERR(edesc); | 1254 | return PTR_ERR(edesc); |
1264 | 1255 | ||
@@ -1304,7 +1295,7 @@ static int aead_givencrypt(struct aead_givcrypt_request *req) | |||
1304 | struct talitos_edesc *edesc; | 1295 | struct talitos_edesc *edesc; |
1305 | 1296 | ||
1306 | /* allocate extended descriptor */ | 1297 | /* allocate extended descriptor */ |
1307 | edesc = aead_edesc_alloc(areq, req->giv, 0); | 1298 | edesc = aead_edesc_alloc(areq, req->giv, 0, true); |
1308 | if (IS_ERR(edesc)) | 1299 | if (IS_ERR(edesc)) |
1309 | return PTR_ERR(edesc); | 1300 | return PTR_ERR(edesc); |
1310 | 1301 | ||
@@ -1460,7 +1451,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc, | |||
1460 | } | 1451 | } |
1461 | 1452 | ||
1462 | static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request * | 1453 | static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request * |
1463 | areq) | 1454 | areq, bool encrypt) |
1464 | { | 1455 | { |
1465 | struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); | 1456 | struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); |
1466 | struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); | 1457 | struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); |
@@ -1468,7 +1459,7 @@ static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request * | |||
1468 | 1459 | ||
1469 | return talitos_edesc_alloc(ctx->dev, NULL, areq->src, areq->dst, | 1460 | return talitos_edesc_alloc(ctx->dev, NULL, areq->src, areq->dst, |
1470 | areq->info, 0, areq->nbytes, 0, ivsize, 0, | 1461 | areq->info, 0, areq->nbytes, 0, ivsize, 0, |
1471 | areq->base.flags); | 1462 | areq->base.flags, encrypt); |
1472 | } | 1463 | } |
1473 | 1464 | ||
1474 | static int ablkcipher_encrypt(struct ablkcipher_request *areq) | 1465 | static int ablkcipher_encrypt(struct ablkcipher_request *areq) |
@@ -1478,7 +1469,7 @@ static int ablkcipher_encrypt(struct ablkcipher_request *areq) | |||
1478 | struct talitos_edesc *edesc; | 1469 | struct talitos_edesc *edesc; |
1479 | 1470 | ||
1480 | /* allocate extended descriptor */ | 1471 | /* allocate extended descriptor */ |
1481 | edesc = ablkcipher_edesc_alloc(areq); | 1472 | edesc = ablkcipher_edesc_alloc(areq, true); |
1482 | if (IS_ERR(edesc)) | 1473 | if (IS_ERR(edesc)) |
1483 | return PTR_ERR(edesc); | 1474 | return PTR_ERR(edesc); |
1484 | 1475 | ||
@@ -1495,7 +1486,7 @@ static int ablkcipher_decrypt(struct ablkcipher_request *areq) | |||
1495 | struct talitos_edesc *edesc; | 1486 | struct talitos_edesc *edesc; |
1496 | 1487 | ||
1497 | /* allocate extended descriptor */ | 1488 | /* allocate extended descriptor */ |
1498 | edesc = ablkcipher_edesc_alloc(areq); | 1489 | edesc = ablkcipher_edesc_alloc(areq, false); |
1499 | if (IS_ERR(edesc)) | 1490 | if (IS_ERR(edesc)) |
1500 | return PTR_ERR(edesc); | 1491 | return PTR_ERR(edesc); |
1501 | 1492 | ||
@@ -1647,7 +1638,7 @@ static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq, | |||
1647 | struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); | 1638 | struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); |
1648 | 1639 | ||
1649 | return talitos_edesc_alloc(ctx->dev, NULL, req_ctx->psrc, NULL, NULL, 0, | 1640 | return talitos_edesc_alloc(ctx->dev, NULL, req_ctx->psrc, NULL, NULL, 0, |
1650 | nbytes, 0, 0, 0, areq->base.flags); | 1641 | nbytes, 0, 0, 0, areq->base.flags, false); |
1651 | } | 1642 | } |
1652 | 1643 | ||
1653 | static int ahash_init(struct ahash_request *areq) | 1644 | static int ahash_init(struct ahash_request *areq) |
diff --git a/drivers/crypto/tegra-aes.c b/drivers/crypto/tegra-aes.c index fa05e3c329bd..060eecc5dbc3 100644 --- a/drivers/crypto/tegra-aes.c +++ b/drivers/crypto/tegra-aes.c | |||
@@ -27,6 +27,8 @@ | |||
27 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | 27 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. |
28 | */ | 28 | */ |
29 | 29 | ||
30 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
31 | |||
30 | #include <linux/module.h> | 32 | #include <linux/module.h> |
31 | #include <linux/init.h> | 33 | #include <linux/init.h> |
32 | #include <linux/errno.h> | 34 | #include <linux/errno.h> |
@@ -199,8 +201,6 @@ static void aes_workqueue_handler(struct work_struct *work); | |||
199 | static DECLARE_WORK(aes_work, aes_workqueue_handler); | 201 | static DECLARE_WORK(aes_work, aes_workqueue_handler); |
200 | static struct workqueue_struct *aes_wq; | 202 | static struct workqueue_struct *aes_wq; |
201 | 203 | ||
202 | extern unsigned long long tegra_chip_uid(void); | ||
203 | |||
204 | static inline u32 aes_readl(struct tegra_aes_dev *dd, u32 offset) | 204 | static inline u32 aes_readl(struct tegra_aes_dev *dd, u32 offset) |
205 | { | 205 | { |
206 | return readl(dd->io_base + offset); | 206 | return readl(dd->io_base + offset); |
@@ -713,13 +713,12 @@ static int tegra_aes_rng_reset(struct crypto_rng *tfm, u8 *seed, | |||
713 | struct tegra_aes_dev *dd = aes_dev; | 713 | struct tegra_aes_dev *dd = aes_dev; |
714 | struct tegra_aes_ctx *ctx = &rng_ctx; | 714 | struct tegra_aes_ctx *ctx = &rng_ctx; |
715 | struct tegra_aes_slot *key_slot; | 715 | struct tegra_aes_slot *key_slot; |
716 | struct timespec ts; | ||
717 | int ret = 0; | 716 | int ret = 0; |
718 | u64 nsec, tmp[2]; | 717 | u8 tmp[16]; /* 16 bytes = 128 bits of entropy */ |
719 | u8 *dt; | 718 | u8 *dt; |
720 | 719 | ||
721 | if (!ctx || !dd) { | 720 | if (!ctx || !dd) { |
722 | dev_err(dd->dev, "ctx=0x%x, dd=0x%x\n", | 721 | pr_err("ctx=0x%x, dd=0x%x\n", |
723 | (unsigned int)ctx, (unsigned int)dd); | 722 | (unsigned int)ctx, (unsigned int)dd); |
724 | return -EINVAL; | 723 | return -EINVAL; |
725 | } | 724 | } |
@@ -778,14 +777,8 @@ static int tegra_aes_rng_reset(struct crypto_rng *tfm, u8 *seed, | |||
778 | if (dd->ivlen >= (2 * DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128)) { | 777 | if (dd->ivlen >= (2 * DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128)) { |
779 | dt = dd->iv + DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128; | 778 | dt = dd->iv + DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128; |
780 | } else { | 779 | } else { |
781 | getnstimeofday(&ts); | 780 | get_random_bytes(tmp, sizeof(tmp)); |
782 | nsec = timespec_to_ns(&ts); | 781 | dt = tmp; |
783 | do_div(nsec, 1000); | ||
784 | nsec ^= dd->ctr << 56; | ||
785 | dd->ctr++; | ||
786 | tmp[0] = nsec; | ||
787 | tmp[1] = tegra_chip_uid(); | ||
788 | dt = (u8 *)tmp; | ||
789 | } | 782 | } |
790 | memcpy(dd->dt, dt, DEFAULT_RNG_BLK_SZ); | 783 | memcpy(dd->dt, dt, DEFAULT_RNG_BLK_SZ); |
791 | 784 | ||
@@ -804,7 +797,7 @@ static int tegra_aes_cra_init(struct crypto_tfm *tfm) | |||
804 | return 0; | 797 | return 0; |
805 | } | 798 | } |
806 | 799 | ||
807 | void tegra_aes_cra_exit(struct crypto_tfm *tfm) | 800 | static void tegra_aes_cra_exit(struct crypto_tfm *tfm) |
808 | { | 801 | { |
809 | struct tegra_aes_ctx *ctx = | 802 | struct tegra_aes_ctx *ctx = |
810 | crypto_ablkcipher_ctx((struct crypto_ablkcipher *)tfm); | 803 | crypto_ablkcipher_ctx((struct crypto_ablkcipher *)tfm); |
@@ -924,7 +917,7 @@ static int tegra_aes_probe(struct platform_device *pdev) | |||
924 | } | 917 | } |
925 | 918 | ||
926 | /* Initialize the vde clock */ | 919 | /* Initialize the vde clock */ |
927 | dd->aes_clk = clk_get(dev, "vde"); | 920 | dd->aes_clk = devm_clk_get(dev, "vde"); |
928 | if (IS_ERR(dd->aes_clk)) { | 921 | if (IS_ERR(dd->aes_clk)) { |
929 | dev_err(dev, "iclock intialization failed.\n"); | 922 | dev_err(dev, "iclock intialization failed.\n"); |
930 | err = -ENODEV; | 923 | err = -ENODEV; |
@@ -1033,8 +1026,6 @@ out: | |||
1033 | if (dd->buf_out) | 1026 | if (dd->buf_out) |
1034 | dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES, | 1027 | dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES, |
1035 | dd->buf_out, dd->dma_buf_out); | 1028 | dd->buf_out, dd->dma_buf_out); |
1036 | if (!IS_ERR(dd->aes_clk)) | ||
1037 | clk_put(dd->aes_clk); | ||
1038 | if (aes_wq) | 1029 | if (aes_wq) |
1039 | destroy_workqueue(aes_wq); | 1030 | destroy_workqueue(aes_wq); |
1040 | spin_lock(&list_lock); | 1031 | spin_lock(&list_lock); |
@@ -1068,7 +1059,6 @@ static int tegra_aes_remove(struct platform_device *pdev) | |||
1068 | dd->buf_in, dd->dma_buf_in); | 1059 | dd->buf_in, dd->dma_buf_in); |
1069 | dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES, | 1060 | dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES, |
1070 | dd->buf_out, dd->dma_buf_out); | 1061 | dd->buf_out, dd->dma_buf_out); |
1071 | clk_put(dd->aes_clk); | ||
1072 | aes_dev = NULL; | 1062 | aes_dev = NULL; |
1073 | 1063 | ||
1074 | return 0; | 1064 | return 0; |