aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto
diff options
context:
space:
mode:
authorLeonidas S. Barbosa <leosilva@linux.vnet.ibm.com>2014-10-28 13:48:47 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2014-11-06 10:15:02 -0500
commit5313231ac9a4334ded1dc205aac60dd63c62ff1d (patch)
tree3e28bbb807ff614e6833cee1363c3026b817cf54 /drivers/crypto
parente13a79acf9a41bdf30f96558b8cc0734cb63dc35 (diff)
crypto: nx - Moving NX-AES-XCBC to be processed logic
The previous limits were estimated locally in a single step basead on bound values, however it was not correct since when given certain scatterlist the function nx_build_sg_lists was consuming more sg entries than allocated causing a memory corruption and crashes. This patch removes the old logic and replaces it into nx_sg_build_lists in order to build a correct nx_sg list using the correct sg_max limit and bounds. Signed-off-by: Leonidas S. Barbosa <leosilva@linux.vnet.ibm.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/nx/nx-aes-xcbc.c81
1 files changed, 63 insertions, 18 deletions
diff --git a/drivers/crypto/nx/nx-aes-xcbc.c b/drivers/crypto/nx/nx-aes-xcbc.c
index 03c4bf57d066..8c2faffab4a3 100644
--- a/drivers/crypto/nx/nx-aes-xcbc.c
+++ b/drivers/crypto/nx/nx-aes-xcbc.c
@@ -75,6 +75,7 @@ static int nx_xcbc_empty(struct shash_desc *desc, u8 *out)
75 u8 keys[2][AES_BLOCK_SIZE]; 75 u8 keys[2][AES_BLOCK_SIZE];
76 u8 key[32]; 76 u8 key[32];
77 int rc = 0; 77 int rc = 0;
78 int len;
78 79
79 /* Change to ECB mode */ 80 /* Change to ECB mode */
80 csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB; 81 csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB;
@@ -86,11 +87,20 @@ static int nx_xcbc_empty(struct shash_desc *desc, u8 *out)
86 memset(keys[0], 0x01, sizeof(keys[0])); 87 memset(keys[0], 0x01, sizeof(keys[0]));
87 memset(keys[1], 0x03, sizeof(keys[1])); 88 memset(keys[1], 0x03, sizeof(keys[1]));
88 89
90 len = sizeof(keys);
89 /* Generate K1 and K3 encrypting the patterns */ 91 /* Generate K1 and K3 encrypting the patterns */
90 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) keys, sizeof(keys), 92 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) keys, &len,
91 nx_ctx->ap->sglen); 93 nx_ctx->ap->sglen);
92 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) keys, sizeof(keys), 94
95 if (len != sizeof(keys))
96 return -EINVAL;
97
98 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) keys, &len,
93 nx_ctx->ap->sglen); 99 nx_ctx->ap->sglen);
100
101 if (len != sizeof(keys))
102 return -EINVAL;
103
94 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); 104 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
95 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); 105 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
96 106
@@ -103,12 +113,23 @@ static int nx_xcbc_empty(struct shash_desc *desc, u8 *out)
103 /* XOr K3 with the padding for a 0 length message */ 113 /* XOr K3 with the padding for a 0 length message */
104 keys[1][0] ^= 0x80; 114 keys[1][0] ^= 0x80;
105 115
116 len = sizeof(keys[1]);
117
106 /* Encrypt the final result */ 118 /* Encrypt the final result */
107 memcpy(csbcpb->cpb.aes_ecb.key, keys[0], AES_BLOCK_SIZE); 119 memcpy(csbcpb->cpb.aes_ecb.key, keys[0], AES_BLOCK_SIZE);
108 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) keys[1], sizeof(keys[1]), 120 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) keys[1], &len,
109 nx_ctx->ap->sglen); 121 nx_ctx->ap->sglen);
110 out_sg = nx_build_sg_list(nx_ctx->out_sg, out, AES_BLOCK_SIZE, 122
123 if (len != sizeof(keys[1]))
124 return -EINVAL;
125
126 len = AES_BLOCK_SIZE;
127 out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len,
111 nx_ctx->ap->sglen); 128 nx_ctx->ap->sglen);
129
130 if (len != AES_BLOCK_SIZE)
131 return -EINVAL;
132
112 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); 133 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
113 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); 134 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
114 135
@@ -133,6 +154,7 @@ static int nx_xcbc_init(struct shash_desc *desc)
133 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 154 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
134 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 155 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
135 struct nx_sg *out_sg; 156 struct nx_sg *out_sg;
157 int len;
136 158
137 nx_ctx_init(nx_ctx, HCOP_FC_AES); 159 nx_ctx_init(nx_ctx, HCOP_FC_AES);
138 160
@@ -144,8 +166,13 @@ static int nx_xcbc_init(struct shash_desc *desc)
144 memcpy(csbcpb->cpb.aes_xcbc.key, nx_ctx->priv.xcbc.key, AES_BLOCK_SIZE); 166 memcpy(csbcpb->cpb.aes_xcbc.key, nx_ctx->priv.xcbc.key, AES_BLOCK_SIZE);
145 memset(nx_ctx->priv.xcbc.key, 0, sizeof *nx_ctx->priv.xcbc.key); 167 memset(nx_ctx->priv.xcbc.key, 0, sizeof *nx_ctx->priv.xcbc.key);
146 168
169 len = AES_BLOCK_SIZE;
147 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state, 170 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
148 AES_BLOCK_SIZE, nx_ctx->ap->sglen); 171 &len, nx_ctx->ap->sglen);
172
173 if (len != AES_BLOCK_SIZE)
174 return -EINVAL;
175
149 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); 176 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
150 177
151 return 0; 178 return 0;
@@ -159,10 +186,11 @@ static int nx_xcbc_update(struct shash_desc *desc,
159 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 186 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
160 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 187 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
161 struct nx_sg *in_sg; 188 struct nx_sg *in_sg;
162 u32 to_process, leftover, total; 189 u32 to_process = 0, leftover, total;
163 u32 max_sg_len; 190 unsigned int max_sg_len;
164 unsigned long irq_flags; 191 unsigned long irq_flags;
165 int rc = 0; 192 int rc = 0;
193 int data_len;
166 194
167 spin_lock_irqsave(&nx_ctx->lock, irq_flags); 195 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
168 196
@@ -180,17 +208,15 @@ static int nx_xcbc_update(struct shash_desc *desc,
180 } 208 }
181 209
182 in_sg = nx_ctx->in_sg; 210 in_sg = nx_ctx->in_sg;
183 max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg), 211 max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
184 nx_ctx->ap->sglen); 212 nx_ctx->ap->sglen);
213 max_sg_len = min_t(u64, max_sg_len,
214 nx_ctx->ap->databytelen/NX_PAGE_SIZE);
185 215
186 do { 216 do {
187 217 to_process = total - to_process;
188 /* to_process: the AES_BLOCK_SIZE data chunk to process in this
189 * update */
190 to_process = min_t(u64, total, nx_ctx->ap->databytelen);
191 to_process = min_t(u64, to_process,
192 NX_PAGE_SIZE * (max_sg_len - 1));
193 to_process = to_process & ~(AES_BLOCK_SIZE - 1); 218 to_process = to_process & ~(AES_BLOCK_SIZE - 1);
219
194 leftover = total - to_process; 220 leftover = total - to_process;
195 221
196 /* the hardware will not accept a 0 byte operation for this 222 /* the hardware will not accept a 0 byte operation for this
@@ -204,15 +230,24 @@ static int nx_xcbc_update(struct shash_desc *desc,
204 } 230 }
205 231
206 if (sctx->count) { 232 if (sctx->count) {
233 data_len = sctx->count;
207 in_sg = nx_build_sg_list(nx_ctx->in_sg, 234 in_sg = nx_build_sg_list(nx_ctx->in_sg,
208 (u8 *) sctx->buffer, 235 (u8 *) sctx->buffer,
209 sctx->count, 236 &data_len,
210 max_sg_len); 237 max_sg_len);
238 if (data_len != sctx->count)
239 return -EINVAL;
211 } 240 }
241
242 data_len = to_process - sctx->count;
212 in_sg = nx_build_sg_list(in_sg, 243 in_sg = nx_build_sg_list(in_sg,
213 (u8 *) data, 244 (u8 *) data,
214 to_process - sctx->count, 245 &data_len,
215 max_sg_len); 246 max_sg_len);
247
248 if (data_len != to_process - sctx->count)
249 return -EINVAL;
250
216 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * 251 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
217 sizeof(struct nx_sg); 252 sizeof(struct nx_sg);
218 253
@@ -263,6 +298,7 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
263 struct nx_sg *in_sg, *out_sg; 298 struct nx_sg *in_sg, *out_sg;
264 unsigned long irq_flags; 299 unsigned long irq_flags;
265 int rc = 0; 300 int rc = 0;
301 int len;
266 302
267 spin_lock_irqsave(&nx_ctx->lock, irq_flags); 303 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
268 304
@@ -285,11 +321,20 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
285 * this is not an intermediate operation */ 321 * this is not an intermediate operation */
286 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; 322 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
287 323
324 len = sctx->count;
288 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buffer, 325 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buffer,
289 sctx->count, nx_ctx->ap->sglen); 326 &len, nx_ctx->ap->sglen);
290 out_sg = nx_build_sg_list(nx_ctx->out_sg, out, AES_BLOCK_SIZE, 327
328 if (len != sctx->count)
329 return -EINVAL;
330
331 len = AES_BLOCK_SIZE;
332 out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len,
291 nx_ctx->ap->sglen); 333 nx_ctx->ap->sglen);
292 334
335 if (len != AES_BLOCK_SIZE)
336 return -EINVAL;
337
293 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); 338 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
294 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); 339 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
295 340