aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto
diff options
context:
space:
mode:
authorLeonidas S. Barbosa <leosilva@linux.vnet.ibm.com>2014-10-28 13:49:46 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2014-11-06 10:15:02 -0500
commit000851119e80edd46443250a1c89d3c45cd6eeca (patch)
tree10324f2a435198b3486fd74db86acbf4b057c8d8 /drivers/crypto
parent5313231ac9a4334ded1dc205aac60dd63c62ff1d (diff)
crypto: nx - Fix SHA concurrence issue and sg limit bounds
NX SHA algorithms stores the message digest into tfm what cause a concurrence issue where hashes may be replaced by others. This patch cleans up the cases where it's handling unnecessarily shared variables in nx context and copies the current msg digest to a sctx->state in order to safetly handle with the hashe's state. Also fixes and does some clean ups regarding the right sg max limit and bounds to the sg list avoind a memory crash. Signed-off-by: Leonidas S. Barbosa <leosilva@linux.vnet.ibm.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/nx/nx-sha256.c208
-rw-r--r--drivers/crypto/nx/nx-sha512.c222
2 files changed, 200 insertions, 230 deletions
diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c
index da0b24a7633f..23621da624c3 100644
--- a/drivers/crypto/nx/nx-sha256.c
+++ b/drivers/crypto/nx/nx-sha256.c
@@ -23,6 +23,7 @@
23#include <crypto/sha.h> 23#include <crypto/sha.h>
24#include <linux/module.h> 24#include <linux/module.h>
25#include <asm/vio.h> 25#include <asm/vio.h>
26#include <asm/byteorder.h>
26 27
27#include "nx_csbcpb.h" 28#include "nx_csbcpb.h"
28#include "nx.h" 29#include "nx.h"
@@ -32,7 +33,8 @@ static int nx_sha256_init(struct shash_desc *desc)
32{ 33{
33 struct sha256_state *sctx = shash_desc_ctx(desc); 34 struct sha256_state *sctx = shash_desc_ctx(desc);
34 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 35 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
35 struct nx_sg *out_sg; 36 int len;
37 int rc;
36 38
37 nx_ctx_init(nx_ctx, HCOP_FC_SHA); 39 nx_ctx_init(nx_ctx, HCOP_FC_SHA);
38 40
@@ -41,10 +43,28 @@ static int nx_sha256_init(struct shash_desc *desc)
41 nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA256]; 43 nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA256];
42 44
43 NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA256); 45 NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA256);
44 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
45 SHA256_DIGEST_SIZE, nx_ctx->ap->sglen);
46 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
47 46
47 len = SHA256_DIGEST_SIZE;
48 rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
49 &nx_ctx->op.outlen,
50 &len,
51 (u8 *) sctx->state,
52 NX_DS_SHA256);
53
54 if (rc)
55 goto out;
56
57 sctx->state[0] = __cpu_to_be32(SHA256_H0);
58 sctx->state[1] = __cpu_to_be32(SHA256_H1);
59 sctx->state[2] = __cpu_to_be32(SHA256_H2);
60 sctx->state[3] = __cpu_to_be32(SHA256_H3);
61 sctx->state[4] = __cpu_to_be32(SHA256_H4);
62 sctx->state[5] = __cpu_to_be32(SHA256_H5);
63 sctx->state[6] = __cpu_to_be32(SHA256_H6);
64 sctx->state[7] = __cpu_to_be32(SHA256_H7);
65 sctx->count = 0;
66
67out:
48 return 0; 68 return 0;
49} 69}
50 70
@@ -54,11 +74,11 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
54 struct sha256_state *sctx = shash_desc_ctx(desc); 74 struct sha256_state *sctx = shash_desc_ctx(desc);
55 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 75 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
56 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; 76 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
57 struct nx_sg *in_sg; 77 u64 to_process = 0, leftover, total;
58 u64 to_process, leftover, total;
59 u32 max_sg_len;
60 unsigned long irq_flags; 78 unsigned long irq_flags;
61 int rc = 0; 79 int rc = 0;
80 int data_len;
81 u64 buf_len = (sctx->count % SHA256_BLOCK_SIZE);
62 82
63 spin_lock_irqsave(&nx_ctx->lock, irq_flags); 83 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
64 84
@@ -66,16 +86,16 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
66 * 1: < SHA256_BLOCK_SIZE: copy into state, return 0 86 * 1: < SHA256_BLOCK_SIZE: copy into state, return 0
67 * 2: >= SHA256_BLOCK_SIZE: process X blocks, copy in leftover 87 * 2: >= SHA256_BLOCK_SIZE: process X blocks, copy in leftover
68 */ 88 */
69 total = sctx->count + len; 89 total = (sctx->count % SHA256_BLOCK_SIZE) + len;
70 if (total < SHA256_BLOCK_SIZE) { 90 if (total < SHA256_BLOCK_SIZE) {
71 memcpy(sctx->buf + sctx->count, data, len); 91 memcpy(sctx->buf + buf_len, data, len);
72 sctx->count += len; 92 sctx->count += len;
73 goto out; 93 goto out;
74 } 94 }
75 95
76 in_sg = nx_ctx->in_sg; 96 memcpy(csbcpb->cpb.sha256.message_digest, sctx->state, SHA256_DIGEST_SIZE);
77 max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg), 97 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
78 nx_ctx->ap->sglen); 98 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
79 99
80 do { 100 do {
81 /* 101 /*
@@ -83,34 +103,42 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
83 * this update. This value is also restricted by the sg list 103 * this update. This value is also restricted by the sg list
84 * limits. 104 * limits.
85 */ 105 */
86 to_process = min_t(u64, total, nx_ctx->ap->databytelen); 106 to_process = total - to_process;
87 to_process = min_t(u64, to_process,
88 NX_PAGE_SIZE * (max_sg_len - 1));
89 to_process = to_process & ~(SHA256_BLOCK_SIZE - 1); 107 to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
90 leftover = total - to_process;
91 108
92 if (sctx->count) { 109 if (buf_len) {
93 in_sg = nx_build_sg_list(nx_ctx->in_sg, 110 data_len = buf_len;
94 (u8 *) sctx->buf, 111 rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
95 sctx->count, max_sg_len); 112 &nx_ctx->op.inlen,
113 &data_len,
114 (u8 *) sctx->buf,
115 NX_DS_SHA256);
116
117 if (rc || data_len != buf_len)
118 goto out;
96 } 119 }
97 in_sg = nx_build_sg_list(in_sg, (u8 *) data, 120
98 to_process - sctx->count, 121 data_len = to_process - buf_len;
99 max_sg_len); 122 rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
100 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * 123 &nx_ctx->op.inlen,
101 sizeof(struct nx_sg); 124 &data_len,
102 125 (u8 *) data,
103 if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { 126 NX_DS_SHA256);
104 /* 127
105 * we've hit the nx chip previously and we're updating 128 if (rc)
106 * again, so copy over the partial digest. 129 goto out;
107 */ 130
108 memcpy(csbcpb->cpb.sha256.input_partial_digest, 131 to_process = (data_len + buf_len);
132 leftover = total - to_process;
133
134 /*
135 * we've hit the nx chip previously and we're updating
136 * again, so copy over the partial digest.
137 */
138 memcpy(csbcpb->cpb.sha256.input_partial_digest,
109 csbcpb->cpb.sha256.message_digest, 139 csbcpb->cpb.sha256.message_digest,
110 SHA256_DIGEST_SIZE); 140 SHA256_DIGEST_SIZE);
111 }
112 141
113 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
114 if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { 142 if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
115 rc = -EINVAL; 143 rc = -EINVAL;
116 goto out; 144 goto out;
@@ -122,22 +150,19 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
122 goto out; 150 goto out;
123 151
124 atomic_inc(&(nx_ctx->stats->sha256_ops)); 152 atomic_inc(&(nx_ctx->stats->sha256_ops));
125 csbcpb->cpb.sha256.message_bit_length += (u64)
126 (csbcpb->cpb.sha256.spbc * 8);
127
128 /* everything after the first update is continuation */
129 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
130 153
131 total -= to_process; 154 total -= to_process;
132 data += to_process - sctx->count; 155 data += to_process - buf_len;
133 sctx->count = 0; 156 buf_len = 0;
134 in_sg = nx_ctx->in_sg; 157
135 } while (leftover >= SHA256_BLOCK_SIZE); 158 } while (leftover >= SHA256_BLOCK_SIZE);
136 159
137 /* copy the leftover back into the state struct */ 160 /* copy the leftover back into the state struct */
138 if (leftover) 161 if (leftover)
139 memcpy(sctx->buf, data, leftover); 162 memcpy(sctx->buf, data, leftover);
140 sctx->count = leftover; 163
164 sctx->count += len;
165 memcpy(sctx->state, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
141out: 166out:
142 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); 167 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
143 return rc; 168 return rc;
@@ -148,34 +173,46 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
148 struct sha256_state *sctx = shash_desc_ctx(desc); 173 struct sha256_state *sctx = shash_desc_ctx(desc);
149 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 174 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
150 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; 175 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
151 struct nx_sg *in_sg, *out_sg;
152 u32 max_sg_len;
153 unsigned long irq_flags; 176 unsigned long irq_flags;
154 int rc; 177 int rc;
178 int len;
155 179
156 spin_lock_irqsave(&nx_ctx->lock, irq_flags); 180 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
157 181
158 max_sg_len = min_t(u32, nx_driver.of.max_sg_len, nx_ctx->ap->sglen); 182 /* final is represented by continuing the operation and indicating that
159 183 * this is not an intermediate operation */
160 if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { 184 if (sctx->count >= SHA256_BLOCK_SIZE) {
161 /* we've hit the nx chip previously, now we're finalizing, 185 /* we've hit the nx chip previously, now we're finalizing,
162 * so copy over the partial digest */ 186 * so copy over the partial digest */
163 memcpy(csbcpb->cpb.sha256.input_partial_digest, 187 memcpy(csbcpb->cpb.sha256.input_partial_digest, sctx->state, SHA256_DIGEST_SIZE);
164 csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE); 188 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
189 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
190 } else {
191 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
192 NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
165 } 193 }
166 194
167 /* final is represented by continuing the operation and indicating that 195 csbcpb->cpb.sha256.message_bit_length = (u64) (sctx->count * 8);
168 * this is not an intermediate operation */
169 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
170 196
171 csbcpb->cpb.sha256.message_bit_length += (u64)(sctx->count * 8); 197 len = sctx->count & (SHA256_BLOCK_SIZE - 1);
198 rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
199 &nx_ctx->op.inlen,
200 &len,
201 (u8 *) sctx->buf,
202 NX_DS_SHA256);
172 203
173 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buf, 204 if (rc || len != (sctx->count & (SHA256_BLOCK_SIZE - 1)))
174 sctx->count, max_sg_len); 205 goto out;
175 out_sg = nx_build_sg_list(nx_ctx->out_sg, out, SHA256_DIGEST_SIZE, 206
176 max_sg_len); 207 len = SHA256_DIGEST_SIZE;
177 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); 208 rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
178 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); 209 &nx_ctx->op.outlen,
210 &len,
211 out,
212 NX_DS_SHA256);
213
214 if (rc || len != SHA256_DIGEST_SIZE)
215 goto out;
179 216
180 if (!nx_ctx->op.outlen) { 217 if (!nx_ctx->op.outlen) {
181 rc = -EINVAL; 218 rc = -EINVAL;
@@ -189,8 +226,7 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
189 226
190 atomic_inc(&(nx_ctx->stats->sha256_ops)); 227 atomic_inc(&(nx_ctx->stats->sha256_ops));
191 228
192 atomic64_add(csbcpb->cpb.sha256.message_bit_length / 8, 229 atomic64_add(sctx->count, &(nx_ctx->stats->sha256_bytes));
193 &(nx_ctx->stats->sha256_bytes));
194 memcpy(out, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE); 230 memcpy(out, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
195out: 231out:
196 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); 232 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
@@ -200,62 +236,18 @@ out:
200static int nx_sha256_export(struct shash_desc *desc, void *out) 236static int nx_sha256_export(struct shash_desc *desc, void *out)
201{ 237{
202 struct sha256_state *sctx = shash_desc_ctx(desc); 238 struct sha256_state *sctx = shash_desc_ctx(desc);
203 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
204 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
205 struct sha256_state *octx = out;
206 unsigned long irq_flags;
207
208 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
209 239
210 octx->count = sctx->count + 240 memcpy(out, sctx, sizeof(*sctx));
211 (csbcpb->cpb.sha256.message_bit_length / 8);
212 memcpy(octx->buf, sctx->buf, sizeof(octx->buf));
213
214 /* if no data has been processed yet, we need to export SHA256's
215 * initial data, in case this context gets imported into a software
216 * context */
217 if (csbcpb->cpb.sha256.message_bit_length)
218 memcpy(octx->state, csbcpb->cpb.sha256.message_digest,
219 SHA256_DIGEST_SIZE);
220 else {
221 octx->state[0] = SHA256_H0;
222 octx->state[1] = SHA256_H1;
223 octx->state[2] = SHA256_H2;
224 octx->state[3] = SHA256_H3;
225 octx->state[4] = SHA256_H4;
226 octx->state[5] = SHA256_H5;
227 octx->state[6] = SHA256_H6;
228 octx->state[7] = SHA256_H7;
229 }
230 241
231 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
232 return 0; 242 return 0;
233} 243}
234 244
235static int nx_sha256_import(struct shash_desc *desc, const void *in) 245static int nx_sha256_import(struct shash_desc *desc, const void *in)
236{ 246{
237 struct sha256_state *sctx = shash_desc_ctx(desc); 247 struct sha256_state *sctx = shash_desc_ctx(desc);
238 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
239 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
240 const struct sha256_state *ictx = in;
241 unsigned long irq_flags;
242
243 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
244 248
245 memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf)); 249 memcpy(sctx, in, sizeof(*sctx));
246 250
247 sctx->count = ictx->count & 0x3f;
248 csbcpb->cpb.sha256.message_bit_length = (ictx->count & ~0x3f) * 8;
249
250 if (csbcpb->cpb.sha256.message_bit_length) {
251 memcpy(csbcpb->cpb.sha256.message_digest, ictx->state,
252 SHA256_DIGEST_SIZE);
253
254 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
255 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
256 }
257
258 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
259 return 0; 251 return 0;
260} 252}
261 253
diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c
index 4ae5b0f221d5..b3adf1022673 100644
--- a/drivers/crypto/nx/nx-sha512.c
+++ b/drivers/crypto/nx/nx-sha512.c
@@ -32,7 +32,8 @@ static int nx_sha512_init(struct shash_desc *desc)
32{ 32{
33 struct sha512_state *sctx = shash_desc_ctx(desc); 33 struct sha512_state *sctx = shash_desc_ctx(desc);
34 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 34 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
35 struct nx_sg *out_sg; 35 int len;
36 int rc;
36 37
37 nx_ctx_init(nx_ctx, HCOP_FC_SHA); 38 nx_ctx_init(nx_ctx, HCOP_FC_SHA);
38 39
@@ -41,10 +42,28 @@ static int nx_sha512_init(struct shash_desc *desc)
41 nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA512]; 42 nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA512];
42 43
43 NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA512); 44 NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA512);
44 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
45 SHA512_DIGEST_SIZE, nx_ctx->ap->sglen);
46 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
47 45
46 len = SHA512_DIGEST_SIZE;
47 rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
48 &nx_ctx->op.outlen,
49 &len,
50 (u8 *)sctx->state,
51 NX_DS_SHA512);
52
53 if (rc || len != SHA512_DIGEST_SIZE)
54 goto out;
55
56 sctx->state[0] = __cpu_to_be64(SHA512_H0);
57 sctx->state[1] = __cpu_to_be64(SHA512_H1);
58 sctx->state[2] = __cpu_to_be64(SHA512_H2);
59 sctx->state[3] = __cpu_to_be64(SHA512_H3);
60 sctx->state[4] = __cpu_to_be64(SHA512_H4);
61 sctx->state[5] = __cpu_to_be64(SHA512_H5);
62 sctx->state[6] = __cpu_to_be64(SHA512_H6);
63 sctx->state[7] = __cpu_to_be64(SHA512_H7);
64 sctx->count[0] = 0;
65
66out:
48 return 0; 67 return 0;
49} 68}
50 69
@@ -54,11 +73,11 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
54 struct sha512_state *sctx = shash_desc_ctx(desc); 73 struct sha512_state *sctx = shash_desc_ctx(desc);
55 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 74 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
56 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; 75 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
57 struct nx_sg *in_sg; 76 u64 to_process, leftover = 0, total;
58 u64 to_process, leftover, total, spbc_bits;
59 u32 max_sg_len;
60 unsigned long irq_flags; 77 unsigned long irq_flags;
61 int rc = 0; 78 int rc = 0;
79 int data_len;
80 u64 buf_len = (sctx->count[0] % SHA512_BLOCK_SIZE);
62 81
63 spin_lock_irqsave(&nx_ctx->lock, irq_flags); 82 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
64 83
@@ -66,16 +85,16 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
66 * 1: < SHA512_BLOCK_SIZE: copy into state, return 0 85 * 1: < SHA512_BLOCK_SIZE: copy into state, return 0
67 * 2: >= SHA512_BLOCK_SIZE: process X blocks, copy in leftover 86 * 2: >= SHA512_BLOCK_SIZE: process X blocks, copy in leftover
68 */ 87 */
69 total = sctx->count[0] + len; 88 total = (sctx->count[0] % SHA512_BLOCK_SIZE) + len;
70 if (total < SHA512_BLOCK_SIZE) { 89 if (total < SHA512_BLOCK_SIZE) {
71 memcpy(sctx->buf + sctx->count[0], data, len); 90 memcpy(sctx->buf + buf_len, data, len);
72 sctx->count[0] += len; 91 sctx->count[0] += len;
73 goto out; 92 goto out;
74 } 93 }
75 94
76 in_sg = nx_ctx->in_sg; 95 memcpy(csbcpb->cpb.sha512.message_digest, sctx->state, SHA512_DIGEST_SIZE);
77 max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg), 96 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
78 nx_ctx->ap->sglen); 97 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
79 98
80 do { 99 do {
81 /* 100 /*
@@ -83,34 +102,43 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
83 * this update. This value is also restricted by the sg list 102 * this update. This value is also restricted by the sg list
84 * limits. 103 * limits.
85 */ 104 */
86 to_process = min_t(u64, total, nx_ctx->ap->databytelen); 105 to_process = total - leftover;
87 to_process = min_t(u64, to_process,
88 NX_PAGE_SIZE * (max_sg_len - 1));
89 to_process = to_process & ~(SHA512_BLOCK_SIZE - 1); 106 to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
90 leftover = total - to_process; 107 leftover = total - to_process;
91 108
92 if (sctx->count[0]) { 109 if (buf_len) {
93 in_sg = nx_build_sg_list(nx_ctx->in_sg, 110 data_len = buf_len;
94 (u8 *) sctx->buf, 111 rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
95 sctx->count[0], max_sg_len); 112 &nx_ctx->op.inlen,
113 &data_len,
114 (u8 *) sctx->buf,
115 NX_DS_SHA512);
116
117 if (rc || data_len != buf_len)
118 goto out;
96 } 119 }
97 in_sg = nx_build_sg_list(in_sg, (u8 *) data, 120
98 to_process - sctx->count[0], 121 data_len = to_process - buf_len;
99 max_sg_len); 122 rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
100 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * 123 &nx_ctx->op.inlen,
101 sizeof(struct nx_sg); 124 &data_len,
102 125 (u8 *) data,
103 if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { 126 NX_DS_SHA512);
104 /* 127
105 * we've hit the nx chip previously and we're updating 128 if (rc || data_len != (to_process - buf_len))
106 * again, so copy over the partial digest. 129 goto out;
107 */ 130
108 memcpy(csbcpb->cpb.sha512.input_partial_digest, 131 to_process = (data_len + buf_len);
132 leftover = total - to_process;
133
134 /*
135 * we've hit the nx chip previously and we're updating
136 * again, so copy over the partial digest.
137 */
138 memcpy(csbcpb->cpb.sha512.input_partial_digest,
109 csbcpb->cpb.sha512.message_digest, 139 csbcpb->cpb.sha512.message_digest,
110 SHA512_DIGEST_SIZE); 140 SHA512_DIGEST_SIZE);
111 }
112 141
113 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
114 if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { 142 if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
115 rc = -EINVAL; 143 rc = -EINVAL;
116 goto out; 144 goto out;
@@ -122,24 +150,18 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
122 goto out; 150 goto out;
123 151
124 atomic_inc(&(nx_ctx->stats->sha512_ops)); 152 atomic_inc(&(nx_ctx->stats->sha512_ops));
125 spbc_bits = csbcpb->cpb.sha512.spbc * 8;
126 csbcpb->cpb.sha512.message_bit_length_lo += spbc_bits;
127 if (csbcpb->cpb.sha512.message_bit_length_lo < spbc_bits)
128 csbcpb->cpb.sha512.message_bit_length_hi++;
129
130 /* everything after the first update is continuation */
131 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
132 153
133 total -= to_process; 154 total -= to_process;
134 data += to_process - sctx->count[0]; 155 data += to_process - buf_len;
135 sctx->count[0] = 0; 156 buf_len = 0;
136 in_sg = nx_ctx->in_sg; 157
137 } while (leftover >= SHA512_BLOCK_SIZE); 158 } while (leftover >= SHA512_BLOCK_SIZE);
138 159
139 /* copy the leftover back into the state struct */ 160 /* copy the leftover back into the state struct */
140 if (leftover) 161 if (leftover)
141 memcpy(sctx->buf, data, leftover); 162 memcpy(sctx->buf, data, leftover);
142 sctx->count[0] = leftover; 163 sctx->count[0] += len;
164 memcpy(sctx->state, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
143out: 165out:
144 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); 166 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
145 return rc; 167 return rc;
@@ -150,39 +172,52 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
150 struct sha512_state *sctx = shash_desc_ctx(desc); 172 struct sha512_state *sctx = shash_desc_ctx(desc);
151 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 173 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
152 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; 174 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
153 struct nx_sg *in_sg, *out_sg;
154 u32 max_sg_len;
155 u64 count0; 175 u64 count0;
156 unsigned long irq_flags; 176 unsigned long irq_flags;
157 int rc; 177 int rc;
178 int len;
158 179
159 spin_lock_irqsave(&nx_ctx->lock, irq_flags); 180 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
160 181
161 max_sg_len = min_t(u32, nx_driver.of.max_sg_len, nx_ctx->ap->sglen); 182 /* final is represented by continuing the operation and indicating that
162 183 * this is not an intermediate operation */
163 if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { 184 if (sctx->count[0] >= SHA512_BLOCK_SIZE) {
164 /* we've hit the nx chip previously, now we're finalizing, 185 /* we've hit the nx chip previously, now we're finalizing,
165 * so copy over the partial digest */ 186 * so copy over the partial digest */
166 memcpy(csbcpb->cpb.sha512.input_partial_digest, 187 memcpy(csbcpb->cpb.sha512.input_partial_digest, sctx->state,
167 csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE); 188 SHA512_DIGEST_SIZE);
189 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
190 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
191 } else {
192 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
193 NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
168 } 194 }
169 195
170 /* final is represented by continuing the operation and indicating that
171 * this is not an intermediate operation */
172 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; 196 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
173 197
174 count0 = sctx->count[0] * 8; 198 count0 = sctx->count[0] * 8;
175 199
176 csbcpb->cpb.sha512.message_bit_length_lo += count0; 200 csbcpb->cpb.sha512.message_bit_length_lo = count0;
177 if (csbcpb->cpb.sha512.message_bit_length_lo < count0)
178 csbcpb->cpb.sha512.message_bit_length_hi++;
179 201
180 in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buf, sctx->count[0], 202 len = sctx->count[0] & (SHA512_BLOCK_SIZE - 1);
181 max_sg_len); 203 rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
182 out_sg = nx_build_sg_list(nx_ctx->out_sg, out, SHA512_DIGEST_SIZE, 204 &nx_ctx->op.inlen,
183 max_sg_len); 205 &len,
184 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); 206 (u8 *)sctx->buf,
185 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); 207 NX_DS_SHA512);
208
209 if (rc || len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1)))
210 goto out;
211
212 len = SHA512_DIGEST_SIZE;
213 rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
214 &nx_ctx->op.outlen,
215 &len,
216 out,
217 NX_DS_SHA512);
218
219 if (rc)
220 goto out;
186 221
187 if (!nx_ctx->op.outlen) { 222 if (!nx_ctx->op.outlen) {
188 rc = -EINVAL; 223 rc = -EINVAL;
@@ -195,8 +230,7 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
195 goto out; 230 goto out;
196 231
197 atomic_inc(&(nx_ctx->stats->sha512_ops)); 232 atomic_inc(&(nx_ctx->stats->sha512_ops));
198 atomic64_add(csbcpb->cpb.sha512.message_bit_length_lo / 8, 233 atomic64_add(sctx->count[0], &(nx_ctx->stats->sha512_bytes));
199 &(nx_ctx->stats->sha512_bytes));
200 234
201 memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE); 235 memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
202out: 236out:
@@ -207,74 +241,18 @@ out:
207static int nx_sha512_export(struct shash_desc *desc, void *out) 241static int nx_sha512_export(struct shash_desc *desc, void *out)
208{ 242{
209 struct sha512_state *sctx = shash_desc_ctx(desc); 243 struct sha512_state *sctx = shash_desc_ctx(desc);
210 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
211 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
212 struct sha512_state *octx = out;
213 unsigned long irq_flags;
214 244
215 spin_lock_irqsave(&nx_ctx->lock, irq_flags); 245 memcpy(out, sctx, sizeof(*sctx));
216 246
217 /* move message_bit_length (128 bits) into count and convert its value
218 * to bytes */
219 octx->count[0] = csbcpb->cpb.sha512.message_bit_length_lo >> 3 |
220 ((csbcpb->cpb.sha512.message_bit_length_hi & 7) << 61);
221 octx->count[1] = csbcpb->cpb.sha512.message_bit_length_hi >> 3;
222
223 octx->count[0] += sctx->count[0];
224 if (octx->count[0] < sctx->count[0])
225 octx->count[1]++;
226
227 memcpy(octx->buf, sctx->buf, sizeof(octx->buf));
228
229 /* if no data has been processed yet, we need to export SHA512's
230 * initial data, in case this context gets imported into a software
231 * context */
232 if (csbcpb->cpb.sha512.message_bit_length_hi ||
233 csbcpb->cpb.sha512.message_bit_length_lo)
234 memcpy(octx->state, csbcpb->cpb.sha512.message_digest,
235 SHA512_DIGEST_SIZE);
236 else {
237 octx->state[0] = SHA512_H0;
238 octx->state[1] = SHA512_H1;
239 octx->state[2] = SHA512_H2;
240 octx->state[3] = SHA512_H3;
241 octx->state[4] = SHA512_H4;
242 octx->state[5] = SHA512_H5;
243 octx->state[6] = SHA512_H6;
244 octx->state[7] = SHA512_H7;
245 }
246
247 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
248 return 0; 247 return 0;
249} 248}
250 249
251static int nx_sha512_import(struct shash_desc *desc, const void *in) 250static int nx_sha512_import(struct shash_desc *desc, const void *in)
252{ 251{
253 struct sha512_state *sctx = shash_desc_ctx(desc); 252 struct sha512_state *sctx = shash_desc_ctx(desc);
254 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
255 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
256 const struct sha512_state *ictx = in;
257 unsigned long irq_flags;
258
259 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
260
261 memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
262 sctx->count[0] = ictx->count[0] & 0x3f;
263 csbcpb->cpb.sha512.message_bit_length_lo = (ictx->count[0] & ~0x3f)
264 << 3;
265 csbcpb->cpb.sha512.message_bit_length_hi = ictx->count[1] << 3 |
266 ictx->count[0] >> 61;
267
268 if (csbcpb->cpb.sha512.message_bit_length_hi ||
269 csbcpb->cpb.sha512.message_bit_length_lo) {
270 memcpy(csbcpb->cpb.sha512.message_digest, ictx->state,
271 SHA512_DIGEST_SIZE);
272 253
273 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; 254 memcpy(sctx, in, sizeof(*sctx));
274 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
275 }
276 255
277 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
278 return 0; 256 return 0;
279} 257}
280 258