diff options
Diffstat (limited to 'drivers/crypto/nx/nx-sha512.c')
-rw-r--r-- | drivers/crypto/nx/nx-sha512.c | 222 |
1 files changed, 100 insertions, 122 deletions
diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c index 4ae5b0f221d5..b3adf1022673 100644 --- a/drivers/crypto/nx/nx-sha512.c +++ b/drivers/crypto/nx/nx-sha512.c | |||
@@ -32,7 +32,8 @@ static int nx_sha512_init(struct shash_desc *desc) | |||
32 | { | 32 | { |
33 | struct sha512_state *sctx = shash_desc_ctx(desc); | 33 | struct sha512_state *sctx = shash_desc_ctx(desc); |
34 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); | 34 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); |
35 | struct nx_sg *out_sg; | 35 | int len; |
36 | int rc; | ||
36 | 37 | ||
37 | nx_ctx_init(nx_ctx, HCOP_FC_SHA); | 38 | nx_ctx_init(nx_ctx, HCOP_FC_SHA); |
38 | 39 | ||
@@ -41,10 +42,28 @@ static int nx_sha512_init(struct shash_desc *desc) | |||
41 | nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA512]; | 42 | nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA512]; |
42 | 43 | ||
43 | NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA512); | 44 | NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA512); |
44 | out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state, | ||
45 | SHA512_DIGEST_SIZE, nx_ctx->ap->sglen); | ||
46 | nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); | ||
47 | 45 | ||
46 | len = SHA512_DIGEST_SIZE; | ||
47 | rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg, | ||
48 | &nx_ctx->op.outlen, | ||
49 | &len, | ||
50 | (u8 *)sctx->state, | ||
51 | NX_DS_SHA512); | ||
52 | |||
53 | if (rc || len != SHA512_DIGEST_SIZE) | ||
54 | goto out; | ||
55 | |||
56 | sctx->state[0] = __cpu_to_be64(SHA512_H0); | ||
57 | sctx->state[1] = __cpu_to_be64(SHA512_H1); | ||
58 | sctx->state[2] = __cpu_to_be64(SHA512_H2); | ||
59 | sctx->state[3] = __cpu_to_be64(SHA512_H3); | ||
60 | sctx->state[4] = __cpu_to_be64(SHA512_H4); | ||
61 | sctx->state[5] = __cpu_to_be64(SHA512_H5); | ||
62 | sctx->state[6] = __cpu_to_be64(SHA512_H6); | ||
63 | sctx->state[7] = __cpu_to_be64(SHA512_H7); | ||
64 | sctx->count[0] = 0; | ||
65 | |||
66 | out: | ||
48 | return 0; | 67 | return 0; |
49 | } | 68 | } |
50 | 69 | ||
@@ -54,11 +73,11 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data, | |||
54 | struct sha512_state *sctx = shash_desc_ctx(desc); | 73 | struct sha512_state *sctx = shash_desc_ctx(desc); |
55 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); | 74 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); |
56 | struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; | 75 | struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; |
57 | struct nx_sg *in_sg; | 76 | u64 to_process, leftover = 0, total; |
58 | u64 to_process, leftover, total, spbc_bits; | ||
59 | u32 max_sg_len; | ||
60 | unsigned long irq_flags; | 77 | unsigned long irq_flags; |
61 | int rc = 0; | 78 | int rc = 0; |
79 | int data_len; | ||
80 | u64 buf_len = (sctx->count[0] % SHA512_BLOCK_SIZE); | ||
62 | 81 | ||
63 | spin_lock_irqsave(&nx_ctx->lock, irq_flags); | 82 | spin_lock_irqsave(&nx_ctx->lock, irq_flags); |
64 | 83 | ||
@@ -66,16 +85,16 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data, | |||
66 | * 1: < SHA512_BLOCK_SIZE: copy into state, return 0 | 85 | * 1: < SHA512_BLOCK_SIZE: copy into state, return 0 |
67 | * 2: >= SHA512_BLOCK_SIZE: process X blocks, copy in leftover | 86 | * 2: >= SHA512_BLOCK_SIZE: process X blocks, copy in leftover |
68 | */ | 87 | */ |
69 | total = sctx->count[0] + len; | 88 | total = (sctx->count[0] % SHA512_BLOCK_SIZE) + len; |
70 | if (total < SHA512_BLOCK_SIZE) { | 89 | if (total < SHA512_BLOCK_SIZE) { |
71 | memcpy(sctx->buf + sctx->count[0], data, len); | 90 | memcpy(sctx->buf + buf_len, data, len); |
72 | sctx->count[0] += len; | 91 | sctx->count[0] += len; |
73 | goto out; | 92 | goto out; |
74 | } | 93 | } |
75 | 94 | ||
76 | in_sg = nx_ctx->in_sg; | 95 | memcpy(csbcpb->cpb.sha512.message_digest, sctx->state, SHA512_DIGEST_SIZE); |
77 | max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg), | 96 | NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; |
78 | nx_ctx->ap->sglen); | 97 | NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; |
79 | 98 | ||
80 | do { | 99 | do { |
81 | /* | 100 | /* |
@@ -83,34 +102,43 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data, | |||
83 | * this update. This value is also restricted by the sg list | 102 | * this update. This value is also restricted by the sg list |
84 | * limits. | 103 | * limits. |
85 | */ | 104 | */ |
86 | to_process = min_t(u64, total, nx_ctx->ap->databytelen); | 105 | to_process = total - leftover; |
87 | to_process = min_t(u64, to_process, | ||
88 | NX_PAGE_SIZE * (max_sg_len - 1)); | ||
89 | to_process = to_process & ~(SHA512_BLOCK_SIZE - 1); | 106 | to_process = to_process & ~(SHA512_BLOCK_SIZE - 1); |
90 | leftover = total - to_process; | 107 | leftover = total - to_process; |
91 | 108 | ||
92 | if (sctx->count[0]) { | 109 | if (buf_len) { |
93 | in_sg = nx_build_sg_list(nx_ctx->in_sg, | 110 | data_len = buf_len; |
94 | (u8 *) sctx->buf, | 111 | rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg, |
95 | sctx->count[0], max_sg_len); | 112 | &nx_ctx->op.inlen, |
113 | &data_len, | ||
114 | (u8 *) sctx->buf, | ||
115 | NX_DS_SHA512); | ||
116 | |||
117 | if (rc || data_len != buf_len) | ||
118 | goto out; | ||
96 | } | 119 | } |
97 | in_sg = nx_build_sg_list(in_sg, (u8 *) data, | 120 | |
98 | to_process - sctx->count[0], | 121 | data_len = to_process - buf_len; |
99 | max_sg_len); | 122 | rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg, |
100 | nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * | 123 | &nx_ctx->op.inlen, |
101 | sizeof(struct nx_sg); | 124 | &data_len, |
102 | 125 | (u8 *) data, | |
103 | if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { | 126 | NX_DS_SHA512); |
104 | /* | 127 | |
105 | * we've hit the nx chip previously and we're updating | 128 | if (rc || data_len != (to_process - buf_len)) |
106 | * again, so copy over the partial digest. | 129 | goto out; |
107 | */ | 130 | |
108 | memcpy(csbcpb->cpb.sha512.input_partial_digest, | 131 | to_process = (data_len + buf_len); |
132 | leftover = total - to_process; | ||
133 | |||
134 | /* | ||
135 | * we've hit the nx chip previously and we're updating | ||
136 | * again, so copy over the partial digest. | ||
137 | */ | ||
138 | memcpy(csbcpb->cpb.sha512.input_partial_digest, | ||
109 | csbcpb->cpb.sha512.message_digest, | 139 | csbcpb->cpb.sha512.message_digest, |
110 | SHA512_DIGEST_SIZE); | 140 | SHA512_DIGEST_SIZE); |
111 | } | ||
112 | 141 | ||
113 | NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; | ||
114 | if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { | 142 | if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { |
115 | rc = -EINVAL; | 143 | rc = -EINVAL; |
116 | goto out; | 144 | goto out; |
@@ -122,24 +150,18 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data, | |||
122 | goto out; | 150 | goto out; |
123 | 151 | ||
124 | atomic_inc(&(nx_ctx->stats->sha512_ops)); | 152 | atomic_inc(&(nx_ctx->stats->sha512_ops)); |
125 | spbc_bits = csbcpb->cpb.sha512.spbc * 8; | ||
126 | csbcpb->cpb.sha512.message_bit_length_lo += spbc_bits; | ||
127 | if (csbcpb->cpb.sha512.message_bit_length_lo < spbc_bits) | ||
128 | csbcpb->cpb.sha512.message_bit_length_hi++; | ||
129 | |||
130 | /* everything after the first update is continuation */ | ||
131 | NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; | ||
132 | 153 | ||
133 | total -= to_process; | 154 | total -= to_process; |
134 | data += to_process - sctx->count[0]; | 155 | data += to_process - buf_len; |
135 | sctx->count[0] = 0; | 156 | buf_len = 0; |
136 | in_sg = nx_ctx->in_sg; | 157 | |
137 | } while (leftover >= SHA512_BLOCK_SIZE); | 158 | } while (leftover >= SHA512_BLOCK_SIZE); |
138 | 159 | ||
139 | /* copy the leftover back into the state struct */ | 160 | /* copy the leftover back into the state struct */ |
140 | if (leftover) | 161 | if (leftover) |
141 | memcpy(sctx->buf, data, leftover); | 162 | memcpy(sctx->buf, data, leftover); |
142 | sctx->count[0] = leftover; | 163 | sctx->count[0] += len; |
164 | memcpy(sctx->state, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE); | ||
143 | out: | 165 | out: |
144 | spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); | 166 | spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); |
145 | return rc; | 167 | return rc; |
@@ -150,39 +172,52 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out) | |||
150 | struct sha512_state *sctx = shash_desc_ctx(desc); | 172 | struct sha512_state *sctx = shash_desc_ctx(desc); |
151 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); | 173 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); |
152 | struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; | 174 | struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; |
153 | struct nx_sg *in_sg, *out_sg; | ||
154 | u32 max_sg_len; | ||
155 | u64 count0; | 175 | u64 count0; |
156 | unsigned long irq_flags; | 176 | unsigned long irq_flags; |
157 | int rc; | 177 | int rc; |
178 | int len; | ||
158 | 179 | ||
159 | spin_lock_irqsave(&nx_ctx->lock, irq_flags); | 180 | spin_lock_irqsave(&nx_ctx->lock, irq_flags); |
160 | 181 | ||
161 | max_sg_len = min_t(u32, nx_driver.of.max_sg_len, nx_ctx->ap->sglen); | 182 | /* final is represented by continuing the operation and indicating that |
162 | 183 | * this is not an intermediate operation */ | |
163 | if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { | 184 | if (sctx->count[0] >= SHA512_BLOCK_SIZE) { |
164 | /* we've hit the nx chip previously, now we're finalizing, | 185 | /* we've hit the nx chip previously, now we're finalizing, |
165 | * so copy over the partial digest */ | 186 | * so copy over the partial digest */ |
166 | memcpy(csbcpb->cpb.sha512.input_partial_digest, | 187 | memcpy(csbcpb->cpb.sha512.input_partial_digest, sctx->state, |
167 | csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE); | 188 | SHA512_DIGEST_SIZE); |
189 | NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; | ||
190 | NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; | ||
191 | } else { | ||
192 | NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; | ||
193 | NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION; | ||
168 | } | 194 | } |
169 | 195 | ||
170 | /* final is represented by continuing the operation and indicating that | ||
171 | * this is not an intermediate operation */ | ||
172 | NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; | 196 | NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; |
173 | 197 | ||
174 | count0 = sctx->count[0] * 8; | 198 | count0 = sctx->count[0] * 8; |
175 | 199 | ||
176 | csbcpb->cpb.sha512.message_bit_length_lo += count0; | 200 | csbcpb->cpb.sha512.message_bit_length_lo = count0; |
177 | if (csbcpb->cpb.sha512.message_bit_length_lo < count0) | ||
178 | csbcpb->cpb.sha512.message_bit_length_hi++; | ||
179 | 201 | ||
180 | in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buf, sctx->count[0], | 202 | len = sctx->count[0] & (SHA512_BLOCK_SIZE - 1); |
181 | max_sg_len); | 203 | rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg, |
182 | out_sg = nx_build_sg_list(nx_ctx->out_sg, out, SHA512_DIGEST_SIZE, | 204 | &nx_ctx->op.inlen, |
183 | max_sg_len); | 205 | &len, |
184 | nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); | 206 | (u8 *)sctx->buf, |
185 | nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); | 207 | NX_DS_SHA512); |
208 | |||
209 | if (rc || len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1))) | ||
210 | goto out; | ||
211 | |||
212 | len = SHA512_DIGEST_SIZE; | ||
213 | rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg, | ||
214 | &nx_ctx->op.outlen, | ||
215 | &len, | ||
216 | out, | ||
217 | NX_DS_SHA512); | ||
218 | |||
219 | if (rc) | ||
220 | goto out; | ||
186 | 221 | ||
187 | if (!nx_ctx->op.outlen) { | 222 | if (!nx_ctx->op.outlen) { |
188 | rc = -EINVAL; | 223 | rc = -EINVAL; |
@@ -195,8 +230,7 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out) | |||
195 | goto out; | 230 | goto out; |
196 | 231 | ||
197 | atomic_inc(&(nx_ctx->stats->sha512_ops)); | 232 | atomic_inc(&(nx_ctx->stats->sha512_ops)); |
198 | atomic64_add(csbcpb->cpb.sha512.message_bit_length_lo / 8, | 233 | atomic64_add(sctx->count[0], &(nx_ctx->stats->sha512_bytes)); |
199 | &(nx_ctx->stats->sha512_bytes)); | ||
200 | 234 | ||
201 | memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE); | 235 | memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE); |
202 | out: | 236 | out: |
@@ -207,74 +241,18 @@ out: | |||
207 | static int nx_sha512_export(struct shash_desc *desc, void *out) | 241 | static int nx_sha512_export(struct shash_desc *desc, void *out) |
208 | { | 242 | { |
209 | struct sha512_state *sctx = shash_desc_ctx(desc); | 243 | struct sha512_state *sctx = shash_desc_ctx(desc); |
210 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); | ||
211 | struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; | ||
212 | struct sha512_state *octx = out; | ||
213 | unsigned long irq_flags; | ||
214 | 244 | ||
215 | spin_lock_irqsave(&nx_ctx->lock, irq_flags); | 245 | memcpy(out, sctx, sizeof(*sctx)); |
216 | 246 | ||
217 | /* move message_bit_length (128 bits) into count and convert its value | ||
218 | * to bytes */ | ||
219 | octx->count[0] = csbcpb->cpb.sha512.message_bit_length_lo >> 3 | | ||
220 | ((csbcpb->cpb.sha512.message_bit_length_hi & 7) << 61); | ||
221 | octx->count[1] = csbcpb->cpb.sha512.message_bit_length_hi >> 3; | ||
222 | |||
223 | octx->count[0] += sctx->count[0]; | ||
224 | if (octx->count[0] < sctx->count[0]) | ||
225 | octx->count[1]++; | ||
226 | |||
227 | memcpy(octx->buf, sctx->buf, sizeof(octx->buf)); | ||
228 | |||
229 | /* if no data has been processed yet, we need to export SHA512's | ||
230 | * initial data, in case this context gets imported into a software | ||
231 | * context */ | ||
232 | if (csbcpb->cpb.sha512.message_bit_length_hi || | ||
233 | csbcpb->cpb.sha512.message_bit_length_lo) | ||
234 | memcpy(octx->state, csbcpb->cpb.sha512.message_digest, | ||
235 | SHA512_DIGEST_SIZE); | ||
236 | else { | ||
237 | octx->state[0] = SHA512_H0; | ||
238 | octx->state[1] = SHA512_H1; | ||
239 | octx->state[2] = SHA512_H2; | ||
240 | octx->state[3] = SHA512_H3; | ||
241 | octx->state[4] = SHA512_H4; | ||
242 | octx->state[5] = SHA512_H5; | ||
243 | octx->state[6] = SHA512_H6; | ||
244 | octx->state[7] = SHA512_H7; | ||
245 | } | ||
246 | |||
247 | spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); | ||
248 | return 0; | 247 | return 0; |
249 | } | 248 | } |
250 | 249 | ||
251 | static int nx_sha512_import(struct shash_desc *desc, const void *in) | 250 | static int nx_sha512_import(struct shash_desc *desc, const void *in) |
252 | { | 251 | { |
253 | struct sha512_state *sctx = shash_desc_ctx(desc); | 252 | struct sha512_state *sctx = shash_desc_ctx(desc); |
254 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); | ||
255 | struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; | ||
256 | const struct sha512_state *ictx = in; | ||
257 | unsigned long irq_flags; | ||
258 | |||
259 | spin_lock_irqsave(&nx_ctx->lock, irq_flags); | ||
260 | |||
261 | memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf)); | ||
262 | sctx->count[0] = ictx->count[0] & 0x3f; | ||
263 | csbcpb->cpb.sha512.message_bit_length_lo = (ictx->count[0] & ~0x3f) | ||
264 | << 3; | ||
265 | csbcpb->cpb.sha512.message_bit_length_hi = ictx->count[1] << 3 | | ||
266 | ictx->count[0] >> 61; | ||
267 | |||
268 | if (csbcpb->cpb.sha512.message_bit_length_hi || | ||
269 | csbcpb->cpb.sha512.message_bit_length_lo) { | ||
270 | memcpy(csbcpb->cpb.sha512.message_digest, ictx->state, | ||
271 | SHA512_DIGEST_SIZE); | ||
272 | 253 | ||
273 | NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; | 254 | memcpy(sctx, in, sizeof(*sctx)); |
274 | NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; | ||
275 | } | ||
276 | 255 | ||
277 | spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); | ||
278 | return 0; | 256 | return 0; |
279 | } | 257 | } |
280 | 258 | ||