diff options
author | Leonidas S. Barbosa <leosilva@linux.vnet.ibm.com> | 2014-10-28 13:50:45 -0400 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2014-11-06 10:15:03 -0500 |
commit | f129430dd87dfe868845292e661b186fbfa89ce3 (patch) | |
tree | 450b9efa606062ddf30b501405b0b18cefeaff24 | |
parent | 000851119e80edd46443250a1c89d3c45cd6eeca (diff) |
crypto: nx - Fixing the limit number of bytes to be processed
The previous limits were estimated locally in a single step
basead on bound values, however it was not correct since
when given certain scatterlist the function nx_build_sg_lists
was consuming more sg entries than allocated causing a
memory corruption and crashes.
e.g.: in the worst case we could have one sg entry for a single byte.
This patch fixes it modifying the logic of the bound limit
moving it to nx_sg_build_lists and set a correct sg_max limit,
adding a trim function to ensure the bound in sg_list. Also fixing
nx_build_sg_list NULL and untreated return in case of overflow.
Signed-off-by: Leonidas S. Barbosa <leosilva@linux.vnet.ibm.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
-rw-r--r-- | drivers/crypto/nx/nx.c | 127 | ||||
-rw-r--r-- | drivers/crypto/nx/nx.h | 8 |
2 files changed, 113 insertions, 22 deletions
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c index 5533fe31c90d..a392465d3e3f 100644 --- a/drivers/crypto/nx/nx.c +++ b/drivers/crypto/nx/nx.c | |||
@@ -90,7 +90,7 @@ int nx_hcall_sync(struct nx_crypto_ctx *nx_ctx, | |||
90 | */ | 90 | */ |
91 | struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head, | 91 | struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head, |
92 | u8 *start_addr, | 92 | u8 *start_addr, |
93 | unsigned int len, | 93 | unsigned int *len, |
94 | u32 sgmax) | 94 | u32 sgmax) |
95 | { | 95 | { |
96 | unsigned int sg_len = 0; | 96 | unsigned int sg_len = 0; |
@@ -106,7 +106,7 @@ struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head, | |||
106 | else | 106 | else |
107 | sg_addr = __pa(sg_addr); | 107 | sg_addr = __pa(sg_addr); |
108 | 108 | ||
109 | end_addr = sg_addr + len; | 109 | end_addr = sg_addr + *len; |
110 | 110 | ||
111 | /* each iteration will write one struct nx_sg element and add the | 111 | /* each iteration will write one struct nx_sg element and add the |
112 | * length of data described by that element to sg_len. Once @len bytes | 112 | * length of data described by that element to sg_len. Once @len bytes |
@@ -118,7 +118,7 @@ struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head, | |||
118 | * Also when using vmalloc'ed data, every time that a system page | 118 | * Also when using vmalloc'ed data, every time that a system page |
119 | * boundary is crossed the physical address needs to be re-calculated. | 119 | * boundary is crossed the physical address needs to be re-calculated. |
120 | */ | 120 | */ |
121 | for (sg = sg_head; sg_len < len; sg++) { | 121 | for (sg = sg_head; sg_len < *len; sg++) { |
122 | u64 next_page; | 122 | u64 next_page; |
123 | 123 | ||
124 | sg->addr = sg_addr; | 124 | sg->addr = sg_addr; |
@@ -133,15 +133,17 @@ struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head, | |||
133 | is_vmalloc_addr(start_addr + sg_len)) { | 133 | is_vmalloc_addr(start_addr + sg_len)) { |
134 | sg_addr = page_to_phys(vmalloc_to_page( | 134 | sg_addr = page_to_phys(vmalloc_to_page( |
135 | start_addr + sg_len)); | 135 | start_addr + sg_len)); |
136 | end_addr = sg_addr + len - sg_len; | 136 | end_addr = sg_addr + *len - sg_len; |
137 | } | 137 | } |
138 | 138 | ||
139 | if ((sg - sg_head) == sgmax) { | 139 | if ((sg - sg_head) == sgmax) { |
140 | pr_err("nx: scatter/gather list overflow, pid: %d\n", | 140 | pr_err("nx: scatter/gather list overflow, pid: %d\n", |
141 | current->pid); | 141 | current->pid); |
142 | return NULL; | 142 | sg++; |
143 | break; | ||
143 | } | 144 | } |
144 | } | 145 | } |
146 | *len = sg_len; | ||
145 | 147 | ||
146 | /* return the moved sg_head pointer */ | 148 | /* return the moved sg_head pointer */ |
147 | return sg; | 149 | return sg; |
@@ -160,11 +162,11 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst, | |||
160 | unsigned int sglen, | 162 | unsigned int sglen, |
161 | struct scatterlist *sg_src, | 163 | struct scatterlist *sg_src, |
162 | unsigned int start, | 164 | unsigned int start, |
163 | unsigned int src_len) | 165 | unsigned int *src_len) |
164 | { | 166 | { |
165 | struct scatter_walk walk; | 167 | struct scatter_walk walk; |
166 | struct nx_sg *nx_sg = nx_dst; | 168 | struct nx_sg *nx_sg = nx_dst; |
167 | unsigned int n, offset = 0, len = src_len; | 169 | unsigned int n, offset = 0, len = *src_len; |
168 | char *dst; | 170 | char *dst; |
169 | 171 | ||
170 | /* we need to fast forward through @start bytes first */ | 172 | /* we need to fast forward through @start bytes first */ |
@@ -182,27 +184,101 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst, | |||
182 | * element we're currently looking at */ | 184 | * element we're currently looking at */ |
183 | scatterwalk_advance(&walk, start - offset); | 185 | scatterwalk_advance(&walk, start - offset); |
184 | 186 | ||
185 | while (len && nx_sg) { | 187 | while (len && (nx_sg - nx_dst) < sglen) { |
186 | n = scatterwalk_clamp(&walk, len); | 188 | n = scatterwalk_clamp(&walk, len); |
187 | if (!n) { | 189 | if (!n) { |
188 | scatterwalk_start(&walk, sg_next(walk.sg)); | 190 | /* In cases where we have scatterlist chain scatterwalk_sg_next |
191 | * handles with it properly */ | ||
192 | scatterwalk_start(&walk, scatterwalk_sg_next(walk.sg)); | ||
189 | n = scatterwalk_clamp(&walk, len); | 193 | n = scatterwalk_clamp(&walk, len); |
190 | } | 194 | } |
191 | dst = scatterwalk_map(&walk); | 195 | dst = scatterwalk_map(&walk); |
192 | 196 | ||
193 | nx_sg = nx_build_sg_list(nx_sg, dst, n, sglen); | 197 | nx_sg = nx_build_sg_list(nx_sg, dst, &n, sglen - (nx_sg - nx_dst)); |
194 | len -= n; | 198 | len -= n; |
195 | 199 | ||
196 | scatterwalk_unmap(dst); | 200 | scatterwalk_unmap(dst); |
197 | scatterwalk_advance(&walk, n); | 201 | scatterwalk_advance(&walk, n); |
198 | scatterwalk_done(&walk, SCATTERWALK_FROM_SG, len); | 202 | scatterwalk_done(&walk, SCATTERWALK_FROM_SG, len); |
199 | } | 203 | } |
204 | /* update to_process */ | ||
205 | *src_len -= len; | ||
200 | 206 | ||
201 | /* return the moved destination pointer */ | 207 | /* return the moved destination pointer */ |
202 | return nx_sg; | 208 | return nx_sg; |
203 | } | 209 | } |
204 | 210 | ||
205 | /** | 211 | /** |
212 | * trim_sg_list - ensures the bound in sg list. | ||
213 | * @sg: sg list head | ||
214 | * @end: sg lisg end | ||
215 | * @delta: is the amount we need to crop in order to bound the list. | ||
216 | * | ||
217 | */ | ||
218 | static long int trim_sg_list(struct nx_sg *sg, struct nx_sg *end, unsigned int delta) | ||
219 | { | ||
220 | while (delta && end > sg) { | ||
221 | struct nx_sg *last = end - 1; | ||
222 | |||
223 | if (last->len > delta) { | ||
224 | last->len -= delta; | ||
225 | delta = 0; | ||
226 | } else { | ||
227 | end--; | ||
228 | delta -= last->len; | ||
229 | } | ||
230 | } | ||
231 | return (sg - end) * sizeof(struct nx_sg); | ||
232 | } | ||
233 | |||
234 | /** | ||
235 | * nx_sha_build_sg_list - walk and build sg list to sha modes | ||
236 | * using right bounds and limits. | ||
237 | * @nx_ctx: NX crypto context for the lists we're building | ||
238 | * @nx_sg: current sg list in or out list | ||
239 | * @op_len: current op_len to be used in order to build a sg list | ||
240 | * @nbytes: number or bytes to be processed | ||
241 | * @offset: buf offset | ||
242 | * @mode: SHA256 or SHA512 | ||
243 | */ | ||
244 | int nx_sha_build_sg_list(struct nx_crypto_ctx *nx_ctx, | ||
245 | struct nx_sg *nx_in_outsg, | ||
246 | s64 *op_len, | ||
247 | unsigned int *nbytes, | ||
248 | u8 *offset, | ||
249 | u32 mode) | ||
250 | { | ||
251 | unsigned int delta = 0; | ||
252 | unsigned int total = *nbytes; | ||
253 | struct nx_sg *nx_insg = nx_in_outsg; | ||
254 | unsigned int max_sg_len; | ||
255 | |||
256 | max_sg_len = min_t(u64, nx_ctx->ap->sglen, | ||
257 | nx_driver.of.max_sg_len/sizeof(struct nx_sg)); | ||
258 | max_sg_len = min_t(u64, max_sg_len, | ||
259 | nx_ctx->ap->databytelen/NX_PAGE_SIZE); | ||
260 | |||
261 | *nbytes = min_t(u64, *nbytes, nx_ctx->ap->databytelen); | ||
262 | nx_insg = nx_build_sg_list(nx_insg, offset, nbytes, max_sg_len); | ||
263 | |||
264 | switch (mode) { | ||
265 | case NX_DS_SHA256: | ||
266 | if (*nbytes < total) | ||
267 | delta = *nbytes - (*nbytes & ~(SHA256_BLOCK_SIZE - 1)); | ||
268 | break; | ||
269 | case NX_DS_SHA512: | ||
270 | if (*nbytes < total) | ||
271 | delta = *nbytes - (*nbytes & ~(SHA512_BLOCK_SIZE - 1)); | ||
272 | break; | ||
273 | default: | ||
274 | return -EINVAL; | ||
275 | } | ||
276 | *op_len = trim_sg_list(nx_in_outsg, nx_insg, delta); | ||
277 | |||
278 | return 0; | ||
279 | } | ||
280 | |||
281 | /** | ||
206 | * nx_build_sg_lists - walk the input scatterlists and build arrays of NX | 282 | * nx_build_sg_lists - walk the input scatterlists and build arrays of NX |
207 | * scatterlists based on them. | 283 | * scatterlists based on them. |
208 | * | 284 | * |
@@ -223,26 +299,39 @@ int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx, | |||
223 | struct blkcipher_desc *desc, | 299 | struct blkcipher_desc *desc, |
224 | struct scatterlist *dst, | 300 | struct scatterlist *dst, |
225 | struct scatterlist *src, | 301 | struct scatterlist *src, |
226 | unsigned int nbytes, | 302 | unsigned int *nbytes, |
227 | unsigned int offset, | 303 | unsigned int offset, |
228 | u8 *iv) | 304 | u8 *iv) |
229 | { | 305 | { |
306 | unsigned int delta = 0; | ||
307 | unsigned int total = *nbytes; | ||
230 | struct nx_sg *nx_insg = nx_ctx->in_sg; | 308 | struct nx_sg *nx_insg = nx_ctx->in_sg; |
231 | struct nx_sg *nx_outsg = nx_ctx->out_sg; | 309 | struct nx_sg *nx_outsg = nx_ctx->out_sg; |
310 | unsigned int max_sg_len; | ||
311 | |||
312 | max_sg_len = min_t(u64, nx_ctx->ap->sglen, | ||
313 | nx_driver.of.max_sg_len/sizeof(struct nx_sg)); | ||
314 | max_sg_len = min_t(u64, max_sg_len, | ||
315 | nx_ctx->ap->databytelen/NX_PAGE_SIZE); | ||
232 | 316 | ||
233 | if (iv) | 317 | if (iv) |
234 | memcpy(iv, desc->info, AES_BLOCK_SIZE); | 318 | memcpy(iv, desc->info, AES_BLOCK_SIZE); |
235 | 319 | ||
236 | nx_insg = nx_walk_and_build(nx_insg, nx_ctx->ap->sglen, src, | 320 | *nbytes = min_t(u64, *nbytes, nx_ctx->ap->databytelen); |
237 | offset, nbytes); | 321 | |
238 | nx_outsg = nx_walk_and_build(nx_outsg, nx_ctx->ap->sglen, dst, | 322 | nx_outsg = nx_walk_and_build(nx_outsg, max_sg_len, dst, |
239 | offset, nbytes); | 323 | offset, nbytes); |
324 | nx_insg = nx_walk_and_build(nx_insg, max_sg_len, src, | ||
325 | offset, nbytes); | ||
326 | |||
327 | if (*nbytes < total) | ||
328 | delta = *nbytes - (*nbytes & ~(AES_BLOCK_SIZE - 1)); | ||
240 | 329 | ||
241 | /* these lengths should be negative, which will indicate to phyp that | 330 | /* these lengths should be negative, which will indicate to phyp that |
242 | * the input and output parameters are scatterlists, not linear | 331 | * the input and output parameters are scatterlists, not linear |
243 | * buffers */ | 332 | * buffers */ |
244 | nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) * sizeof(struct nx_sg); | 333 | nx_ctx->op.inlen = trim_sg_list(nx_ctx->in_sg, nx_insg, delta); |
245 | nx_ctx->op.outlen = (nx_ctx->out_sg - nx_outsg) * sizeof(struct nx_sg); | 334 | nx_ctx->op.outlen = trim_sg_list(nx_ctx->out_sg, nx_outsg, delta); |
246 | 335 | ||
247 | return 0; | 336 | return 0; |
248 | } | 337 | } |
@@ -540,10 +629,10 @@ static int nx_crypto_ctx_init(struct nx_crypto_ctx *nx_ctx, u32 fc, u32 mode) | |||
540 | 629 | ||
541 | /* we need an extra page for csbcpb_aead for these modes */ | 630 | /* we need an extra page for csbcpb_aead for these modes */ |
542 | if (mode == NX_MODE_AES_GCM || mode == NX_MODE_AES_CCM) | 631 | if (mode == NX_MODE_AES_GCM || mode == NX_MODE_AES_CCM) |
543 | nx_ctx->kmem_len = (4 * NX_PAGE_SIZE) + | 632 | nx_ctx->kmem_len = (5 * NX_PAGE_SIZE) + |
544 | sizeof(struct nx_csbcpb); | 633 | sizeof(struct nx_csbcpb); |
545 | else | 634 | else |
546 | nx_ctx->kmem_len = (3 * NX_PAGE_SIZE) + | 635 | nx_ctx->kmem_len = (4 * NX_PAGE_SIZE) + |
547 | sizeof(struct nx_csbcpb); | 636 | sizeof(struct nx_csbcpb); |
548 | 637 | ||
549 | nx_ctx->kmem = kmalloc(nx_ctx->kmem_len, GFP_KERNEL); | 638 | nx_ctx->kmem = kmalloc(nx_ctx->kmem_len, GFP_KERNEL); |
diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h index befda07ca1da..6c9ecaaead52 100644 --- a/drivers/crypto/nx/nx.h +++ b/drivers/crypto/nx/nx.h | |||
@@ -153,13 +153,15 @@ void nx_crypto_ctx_exit(struct crypto_tfm *tfm); | |||
153 | void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function); | 153 | void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function); |
154 | int nx_hcall_sync(struct nx_crypto_ctx *ctx, struct vio_pfo_op *op, | 154 | int nx_hcall_sync(struct nx_crypto_ctx *ctx, struct vio_pfo_op *op, |
155 | u32 may_sleep); | 155 | u32 may_sleep); |
156 | struct nx_sg *nx_build_sg_list(struct nx_sg *, u8 *, unsigned int, u32); | 156 | int nx_sha_build_sg_list(struct nx_crypto_ctx *, struct nx_sg *, |
157 | s64 *, unsigned int *, u8 *, u32); | ||
158 | struct nx_sg *nx_build_sg_list(struct nx_sg *, u8 *, unsigned int *, u32); | ||
157 | int nx_build_sg_lists(struct nx_crypto_ctx *, struct blkcipher_desc *, | 159 | int nx_build_sg_lists(struct nx_crypto_ctx *, struct blkcipher_desc *, |
158 | struct scatterlist *, struct scatterlist *, unsigned int, | 160 | struct scatterlist *, struct scatterlist *, unsigned int *, |
159 | unsigned int, u8 *); | 161 | unsigned int, u8 *); |
160 | struct nx_sg *nx_walk_and_build(struct nx_sg *, unsigned int, | 162 | struct nx_sg *nx_walk_and_build(struct nx_sg *, unsigned int, |
161 | struct scatterlist *, unsigned int, | 163 | struct scatterlist *, unsigned int, |
162 | unsigned int); | 164 | unsigned int *); |
163 | 165 | ||
164 | #ifdef CONFIG_DEBUG_FS | 166 | #ifdef CONFIG_DEBUG_FS |
165 | #define NX_DEBUGFS_INIT(drv) nx_debugfs_init(drv) | 167 | #define NX_DEBUGFS_INIT(drv) nx_debugfs_init(drv) |