diff options
author | Fionnuala Gunter <fin@linux.vnet.ibm.com> | 2013-08-29 10:36:36 -0400 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2013-09-02 06:32:54 -0400 |
commit | 9d6f1a82d3a81d603526980ef705b9ab39f997f3 (patch) | |
tree | 97bcfcabaf349fca5e0ae1478eb06eb855adcc10 /drivers/crypto/nx | |
parent | 799804348d11763b84213156318bb92cb955bfb5 (diff) |
crypto: nx - fix limits to sg lists for AES-XCBC
This patch updates the NX driver to perform several hyper calls when necessary
so that the length limits of scatter/gather lists are respected.
Reviewed-by: Joy Latten <jmlatten@linux.vnet.ibm.com>
Reviewed-by: Marcelo Cerri <mhcerri@linux.vnet.ibm.com>
Signed-off-by: Fionnuala Gunter <fin@linux.vnet.ibm.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto/nx')
-rw-r--r-- | drivers/crypto/nx/nx-aes-xcbc.c | 113 |
1 files changed, 66 insertions, 47 deletions
diff --git a/drivers/crypto/nx/nx-aes-xcbc.c b/drivers/crypto/nx/nx-aes-xcbc.c index 658da0fd3e1f..1a5d9e372b4e 100644 --- a/drivers/crypto/nx/nx-aes-xcbc.c +++ b/drivers/crypto/nx/nx-aes-xcbc.c | |||
@@ -88,78 +88,97 @@ static int nx_xcbc_update(struct shash_desc *desc, | |||
88 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); | 88 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); |
89 | struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; | 89 | struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; |
90 | struct nx_sg *in_sg; | 90 | struct nx_sg *in_sg; |
91 | u32 to_process, leftover; | 91 | u32 to_process, leftover, total; |
92 | u32 max_sg_len; | ||
92 | unsigned long irq_flags; | 93 | unsigned long irq_flags; |
93 | int rc = 0; | 94 | int rc = 0; |
94 | 95 | ||
95 | spin_lock_irqsave(&nx_ctx->lock, irq_flags); | 96 | spin_lock_irqsave(&nx_ctx->lock, irq_flags); |
96 | 97 | ||
97 | if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { | 98 | |
98 | /* we've hit the nx chip previously and we're updating again, | 99 | total = sctx->count + len; |
99 | * so copy over the partial digest */ | ||
100 | memcpy(csbcpb->cpb.aes_xcbc.cv, | ||
101 | csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE); | ||
102 | } | ||
103 | 100 | ||
104 | /* 2 cases for total data len: | 101 | /* 2 cases for total data len: |
105 | * 1: <= AES_BLOCK_SIZE: copy into state, return 0 | 102 | * 1: <= AES_BLOCK_SIZE: copy into state, return 0 |
106 | * 2: > AES_BLOCK_SIZE: process X blocks, copy in leftover | 103 | * 2: > AES_BLOCK_SIZE: process X blocks, copy in leftover |
107 | */ | 104 | */ |
108 | if (len + sctx->count <= AES_BLOCK_SIZE) { | 105 | if (total <= AES_BLOCK_SIZE) { |
109 | memcpy(sctx->buffer + sctx->count, data, len); | 106 | memcpy(sctx->buffer + sctx->count, data, len); |
110 | sctx->count += len; | 107 | sctx->count += len; |
111 | goto out; | 108 | goto out; |
112 | } | 109 | } |
113 | 110 | ||
114 | /* to_process: the AES_BLOCK_SIZE data chunk to process in this | 111 | in_sg = nx_ctx->in_sg; |
115 | * update */ | 112 | max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg), |
116 | to_process = (sctx->count + len) & ~(AES_BLOCK_SIZE - 1); | 113 | nx_ctx->ap->sglen); |
117 | leftover = (sctx->count + len) & (AES_BLOCK_SIZE - 1); | 114 | |
118 | 115 | do { | |
119 | /* the hardware will not accept a 0 byte operation for this algorithm | 116 | |
120 | * and the operation MUST be finalized to be correct. So if we happen | 117 | /* to_process: the AES_BLOCK_SIZE data chunk to process in this |
121 | * to get an update that falls on a block sized boundary, we must | 118 | * update */ |
122 | * save off the last block to finalize with later. */ | 119 | to_process = min_t(u64, total, nx_ctx->ap->databytelen); |
123 | if (!leftover) { | 120 | to_process = min_t(u64, to_process, |
124 | to_process -= AES_BLOCK_SIZE; | 121 | NX_PAGE_SIZE * (max_sg_len - 1)); |
125 | leftover = AES_BLOCK_SIZE; | 122 | to_process = to_process & ~(AES_BLOCK_SIZE - 1); |
126 | } | 123 | leftover = total - to_process; |
127 | 124 | ||
128 | if (sctx->count) { | 125 | /* the hardware will not accept a 0 byte operation for this |
129 | in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buffer, | 126 | * algorithm and the operation MUST be finalized to be correct. |
130 | sctx->count, nx_ctx->ap->sglen); | 127 | * So if we happen to get an update that falls on a block sized |
131 | in_sg = nx_build_sg_list(in_sg, (u8 *)data, | 128 | * boundary, we must save off the last block to finalize with |
132 | to_process - sctx->count, | 129 | * later. */ |
133 | nx_ctx->ap->sglen); | 130 | if (!leftover) { |
131 | to_process -= AES_BLOCK_SIZE; | ||
132 | leftover = AES_BLOCK_SIZE; | ||
133 | } | ||
134 | |||
135 | if (sctx->count) { | ||
136 | in_sg = nx_build_sg_list(nx_ctx->in_sg, | ||
137 | (u8 *) sctx->buffer, | ||
138 | sctx->count, | ||
139 | max_sg_len); | ||
140 | } | ||
141 | in_sg = nx_build_sg_list(in_sg, | ||
142 | (u8 *) data, | ||
143 | to_process - sctx->count, | ||
144 | max_sg_len); | ||
134 | nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * | 145 | nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * |
135 | sizeof(struct nx_sg); | 146 | sizeof(struct nx_sg); |
136 | } else { | ||
137 | in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)data, to_process, | ||
138 | nx_ctx->ap->sglen); | ||
139 | nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * | ||
140 | sizeof(struct nx_sg); | ||
141 | } | ||
142 | 147 | ||
143 | NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; | 148 | /* we've hit the nx chip previously and we're updating again, |
149 | * so copy over the partial digest */ | ||
150 | if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { | ||
151 | memcpy(csbcpb->cpb.aes_xcbc.cv, | ||
152 | csbcpb->cpb.aes_xcbc.out_cv_mac, | ||
153 | AES_BLOCK_SIZE); | ||
154 | } | ||
155 | |||
156 | NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; | ||
157 | if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { | ||
158 | rc = -EINVAL; | ||
159 | goto out; | ||
160 | } | ||
161 | |||
162 | rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, | ||
163 | desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); | ||
164 | if (rc) | ||
165 | goto out; | ||
144 | 166 | ||
145 | if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { | 167 | atomic_inc(&(nx_ctx->stats->aes_ops)); |
146 | rc = -EINVAL; | ||
147 | goto out; | ||
148 | } | ||
149 | 168 | ||
150 | rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, | 169 | /* everything after the first update is continuation */ |
151 | desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); | 170 | NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; |
152 | if (rc) | ||
153 | goto out; | ||
154 | 171 | ||
155 | atomic_inc(&(nx_ctx->stats->aes_ops)); | 172 | total -= to_process; |
173 | data += to_process - sctx->count; | ||
174 | sctx->count = 0; | ||
175 | in_sg = nx_ctx->in_sg; | ||
176 | } while (leftover > AES_BLOCK_SIZE); | ||
156 | 177 | ||
157 | /* copy the leftover back into the state struct */ | 178 | /* copy the leftover back into the state struct */ |
158 | memcpy(sctx->buffer, data + len - leftover, leftover); | 179 | memcpy(sctx->buffer, data, leftover); |
159 | sctx->count = leftover; | 180 | sctx->count = leftover; |
160 | 181 | ||
161 | /* everything after the first update is continuation */ | ||
162 | NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; | ||
163 | out: | 182 | out: |
164 | spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); | 183 | spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); |
165 | return rc; | 184 | return rc; |