aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/nx
diff options
context:
space:
mode:
authorMarcelo Cerri <mhcerri@linux.vnet.ibm.com>2013-08-12 17:49:37 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2013-08-14 06:42:04 -0400
commitc849163b80c05f4567b1adef5db7f377460f88cd (patch)
treeb7e3e40e463ed921040c1049212e40132103c536 /drivers/crypto/nx
parentf22d08111a1d23f7432ee8d9c2dd637deb6963bd (diff)
crypto: nx - fix concurrency issue
The NX driver uses the transformation context to store several fields containing data related to the state of the operations in progress. Since a single tfm can be used by different kernel threads at the same time, we need to protect the data stored into the context. This patch makes use of spin locks to protect the data where a race condition can happen. Reviewed-by: Fionnuala Gunter <fin@linux.vnet.ibm.com> Reviewed-by: Joy Latten <jmlatten@linux.vnet.ibm.com> Signed-off-by: Marcelo Cerri <mhcerri@linux.vnet.ibm.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto/nx')
-rw-r--r--drivers/crypto/nx/nx-aes-cbc.c10
-rw-r--r--drivers/crypto/nx/nx-aes-ccm.c20
-rw-r--r--drivers/crypto/nx/nx-aes-ctr.c10
-rw-r--r--drivers/crypto/nx/nx-aes-ecb.c10
-rw-r--r--drivers/crypto/nx/nx-aes-gcm.c4
-rw-r--r--drivers/crypto/nx/nx-aes-xcbc.c8
-rw-r--r--drivers/crypto/nx/nx-sha256.c16
-rw-r--r--drivers/crypto/nx/nx-sha512.c16
-rw-r--r--drivers/crypto/nx/nx.c4
-rw-r--r--drivers/crypto/nx/nx.h1
10 files changed, 87 insertions, 12 deletions
diff --git a/drivers/crypto/nx/nx-aes-cbc.c b/drivers/crypto/nx/nx-aes-cbc.c
index a2f99a910e4a..7c0237dae02d 100644
--- a/drivers/crypto/nx/nx-aes-cbc.c
+++ b/drivers/crypto/nx/nx-aes-cbc.c
@@ -70,10 +70,15 @@ static int cbc_aes_nx_crypt(struct blkcipher_desc *desc,
70{ 70{
71 struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm); 71 struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
72 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 72 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
73 unsigned long irq_flags;
73 int rc; 74 int rc;
74 75
75 if (nbytes > nx_ctx->ap->databytelen) 76 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
76 return -EINVAL; 77
78 if (nbytes > nx_ctx->ap->databytelen) {
79 rc = -EINVAL;
80 goto out;
81 }
77 82
78 if (enc) 83 if (enc)
79 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; 84 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
@@ -100,6 +105,7 @@ static int cbc_aes_nx_crypt(struct blkcipher_desc *desc,
100 atomic64_add(csbcpb->csb.processed_byte_count, 105 atomic64_add(csbcpb->csb.processed_byte_count,
101 &(nx_ctx->stats->aes_bytes)); 106 &(nx_ctx->stats->aes_bytes));
102out: 107out:
108 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
103 return rc; 109 return rc;
104} 110}
105 111
diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c
index ef5eae6d1400..39d42245bc79 100644
--- a/drivers/crypto/nx/nx-aes-ccm.c
+++ b/drivers/crypto/nx/nx-aes-ccm.c
@@ -271,10 +271,15 @@ static int ccm_nx_decrypt(struct aead_request *req,
271 unsigned int nbytes = req->cryptlen; 271 unsigned int nbytes = req->cryptlen;
272 unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req)); 272 unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
273 struct nx_ccm_priv *priv = &nx_ctx->priv.ccm; 273 struct nx_ccm_priv *priv = &nx_ctx->priv.ccm;
274 unsigned long irq_flags;
274 int rc = -1; 275 int rc = -1;
275 276
276 if (nbytes > nx_ctx->ap->databytelen) 277 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
277 return -EINVAL; 278
279 if (nbytes > nx_ctx->ap->databytelen) {
280 rc = -EINVAL;
281 goto out;
282 }
278 283
279 nbytes -= authsize; 284 nbytes -= authsize;
280 285
@@ -308,6 +313,7 @@ static int ccm_nx_decrypt(struct aead_request *req,
308 rc = memcmp(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag, 313 rc = memcmp(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag,
309 authsize) ? -EBADMSG : 0; 314 authsize) ? -EBADMSG : 0;
310out: 315out:
316 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
311 return rc; 317 return rc;
312} 318}
313 319
@@ -318,10 +324,15 @@ static int ccm_nx_encrypt(struct aead_request *req,
318 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 324 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
319 unsigned int nbytes = req->cryptlen; 325 unsigned int nbytes = req->cryptlen;
320 unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req)); 326 unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
327 unsigned long irq_flags;
321 int rc = -1; 328 int rc = -1;
322 329
323 if (nbytes > nx_ctx->ap->databytelen) 330 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
324 return -EINVAL; 331
332 if (nbytes > nx_ctx->ap->databytelen) {
333 rc = -EINVAL;
334 goto out;
335 }
325 336
326 rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, 337 rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes,
327 csbcpb->cpb.aes_ccm.in_pat_or_b0); 338 csbcpb->cpb.aes_ccm.in_pat_or_b0);
@@ -350,6 +361,7 @@ static int ccm_nx_encrypt(struct aead_request *req,
350 req->dst, nbytes, authsize, 361 req->dst, nbytes, authsize,
351 SCATTERWALK_TO_SG); 362 SCATTERWALK_TO_SG);
352out: 363out:
364 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
353 return rc; 365 return rc;
354} 366}
355 367
diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c
index b6286f14680b..762611b883cb 100644
--- a/drivers/crypto/nx/nx-aes-ctr.c
+++ b/drivers/crypto/nx/nx-aes-ctr.c
@@ -88,10 +88,15 @@ static int ctr_aes_nx_crypt(struct blkcipher_desc *desc,
88{ 88{
89 struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm); 89 struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
90 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 90 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
91 unsigned long irq_flags;
91 int rc; 92 int rc;
92 93
93 if (nbytes > nx_ctx->ap->databytelen) 94 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
94 return -EINVAL; 95
96 if (nbytes > nx_ctx->ap->databytelen) {
97 rc = -EINVAL;
98 goto out;
99 }
95 100
96 rc = nx_build_sg_lists(nx_ctx, desc, dst, src, nbytes, 101 rc = nx_build_sg_lists(nx_ctx, desc, dst, src, nbytes,
97 csbcpb->cpb.aes_ctr.iv); 102 csbcpb->cpb.aes_ctr.iv);
@@ -112,6 +117,7 @@ static int ctr_aes_nx_crypt(struct blkcipher_desc *desc,
112 atomic64_add(csbcpb->csb.processed_byte_count, 117 atomic64_add(csbcpb->csb.processed_byte_count,
113 &(nx_ctx->stats->aes_bytes)); 118 &(nx_ctx->stats->aes_bytes));
114out: 119out:
120 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
115 return rc; 121 return rc;
116} 122}
117 123
diff --git a/drivers/crypto/nx/nx-aes-ecb.c b/drivers/crypto/nx/nx-aes-ecb.c
index 7bbc9a81da21..77dbe084ba41 100644
--- a/drivers/crypto/nx/nx-aes-ecb.c
+++ b/drivers/crypto/nx/nx-aes-ecb.c
@@ -70,10 +70,15 @@ static int ecb_aes_nx_crypt(struct blkcipher_desc *desc,
70{ 70{
71 struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm); 71 struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
72 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 72 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
73 unsigned long irq_flags;
73 int rc; 74 int rc;
74 75
75 if (nbytes > nx_ctx->ap->databytelen) 76 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
76 return -EINVAL; 77
78 if (nbytes > nx_ctx->ap->databytelen) {
79 rc = -EINVAL;
80 goto out;
81 }
77 82
78 if (enc) 83 if (enc)
79 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; 84 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
@@ -98,6 +103,7 @@ static int ecb_aes_nx_crypt(struct blkcipher_desc *desc,
98 atomic64_add(csbcpb->csb.processed_byte_count, 103 atomic64_add(csbcpb->csb.processed_byte_count,
99 &(nx_ctx->stats->aes_bytes)); 104 &(nx_ctx->stats->aes_bytes));
100out: 105out:
106 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
101 return rc; 107 return rc;
102} 108}
103 109
diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c
index 6cca6c392b00..df90d03afc10 100644
--- a/drivers/crypto/nx/nx-aes-gcm.c
+++ b/drivers/crypto/nx/nx-aes-gcm.c
@@ -166,8 +166,11 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
166 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 166 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
167 struct blkcipher_desc desc; 167 struct blkcipher_desc desc;
168 unsigned int nbytes = req->cryptlen; 168 unsigned int nbytes = req->cryptlen;
169 unsigned long irq_flags;
169 int rc = -EINVAL; 170 int rc = -EINVAL;
170 171
172 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
173
171 if (nbytes > nx_ctx->ap->databytelen) 174 if (nbytes > nx_ctx->ap->databytelen)
172 goto out; 175 goto out;
173 176
@@ -255,6 +258,7 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
255 -EBADMSG : 0; 258 -EBADMSG : 0;
256 } 259 }
257out: 260out:
261 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
258 return rc; 262 return rc;
259} 263}
260 264
diff --git a/drivers/crypto/nx/nx-aes-xcbc.c b/drivers/crypto/nx/nx-aes-xcbc.c
index 93923e4628c0..658da0fd3e1f 100644
--- a/drivers/crypto/nx/nx-aes-xcbc.c
+++ b/drivers/crypto/nx/nx-aes-xcbc.c
@@ -89,8 +89,11 @@ static int nx_xcbc_update(struct shash_desc *desc,
89 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 89 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
90 struct nx_sg *in_sg; 90 struct nx_sg *in_sg;
91 u32 to_process, leftover; 91 u32 to_process, leftover;
92 unsigned long irq_flags;
92 int rc = 0; 93 int rc = 0;
93 94
95 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
96
94 if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { 97 if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
95 /* we've hit the nx chip previously and we're updating again, 98 /* we've hit the nx chip previously and we're updating again,
96 * so copy over the partial digest */ 99 * so copy over the partial digest */
@@ -158,6 +161,7 @@ static int nx_xcbc_update(struct shash_desc *desc,
158 /* everything after the first update is continuation */ 161 /* everything after the first update is continuation */
159 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; 162 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
160out: 163out:
164 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
161 return rc; 165 return rc;
162} 166}
163 167
@@ -167,8 +171,11 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
167 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 171 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
168 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 172 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
169 struct nx_sg *in_sg, *out_sg; 173 struct nx_sg *in_sg, *out_sg;
174 unsigned long irq_flags;
170 int rc = 0; 175 int rc = 0;
171 176
177 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
178
172 if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { 179 if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
173 /* we've hit the nx chip previously, now we're finalizing, 180 /* we've hit the nx chip previously, now we're finalizing,
174 * so copy over the partial digest */ 181 * so copy over the partial digest */
@@ -211,6 +218,7 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
211 218
212 memcpy(out, csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE); 219 memcpy(out, csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE);
213out: 220out:
221 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
214 return rc; 222 return rc;
215} 223}
216 224
diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c
index 254b01abef64..6547a7104bf6 100644
--- a/drivers/crypto/nx/nx-sha256.c
+++ b/drivers/crypto/nx/nx-sha256.c
@@ -57,8 +57,11 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
57 struct nx_sg *in_sg; 57 struct nx_sg *in_sg;
58 u64 to_process, leftover, total; 58 u64 to_process, leftover, total;
59 u32 max_sg_len; 59 u32 max_sg_len;
60 unsigned long irq_flags;
60 int rc = 0; 61 int rc = 0;
61 62
63 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
64
62 /* 2 cases for total data len: 65 /* 2 cases for total data len:
63 * 1: < SHA256_BLOCK_SIZE: copy into state, return 0 66 * 1: < SHA256_BLOCK_SIZE: copy into state, return 0
64 * 2: >= SHA256_BLOCK_SIZE: process X blocks, copy in leftover 67 * 2: >= SHA256_BLOCK_SIZE: process X blocks, copy in leftover
@@ -136,6 +139,7 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
136 memcpy(sctx->buf, data, leftover); 139 memcpy(sctx->buf, data, leftover);
137 sctx->count = leftover; 140 sctx->count = leftover;
138out: 141out:
142 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
139 return rc; 143 return rc;
140} 144}
141 145
@@ -146,8 +150,11 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
146 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; 150 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
147 struct nx_sg *in_sg, *out_sg; 151 struct nx_sg *in_sg, *out_sg;
148 u32 max_sg_len; 152 u32 max_sg_len;
153 unsigned long irq_flags;
149 int rc; 154 int rc;
150 155
156 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
157
151 max_sg_len = min_t(u32, nx_driver.of.max_sg_len, nx_ctx->ap->sglen); 158 max_sg_len = min_t(u32, nx_driver.of.max_sg_len, nx_ctx->ap->sglen);
152 159
153 if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { 160 if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
@@ -186,6 +193,7 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
186 &(nx_ctx->stats->sha256_bytes)); 193 &(nx_ctx->stats->sha256_bytes));
187 memcpy(out, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE); 194 memcpy(out, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
188out: 195out:
196 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
189 return rc; 197 return rc;
190} 198}
191 199
@@ -195,6 +203,9 @@ static int nx_sha256_export(struct shash_desc *desc, void *out)
195 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 203 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
196 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; 204 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
197 struct sha256_state *octx = out; 205 struct sha256_state *octx = out;
206 unsigned long irq_flags;
207
208 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
198 209
199 octx->count = sctx->count + 210 octx->count = sctx->count +
200 (csbcpb->cpb.sha256.message_bit_length / 8); 211 (csbcpb->cpb.sha256.message_bit_length / 8);
@@ -217,6 +228,7 @@ static int nx_sha256_export(struct shash_desc *desc, void *out)
217 octx->state[7] = SHA256_H7; 228 octx->state[7] = SHA256_H7;
218 } 229 }
219 230
231 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
220 return 0; 232 return 0;
221} 233}
222 234
@@ -226,6 +238,9 @@ static int nx_sha256_import(struct shash_desc *desc, const void *in)
226 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 238 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
227 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; 239 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
228 const struct sha256_state *ictx = in; 240 const struct sha256_state *ictx = in;
241 unsigned long irq_flags;
242
243 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
229 244
230 memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf)); 245 memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
231 246
@@ -240,6 +255,7 @@ static int nx_sha256_import(struct shash_desc *desc, const void *in)
240 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; 255 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
241 } 256 }
242 257
258 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
243 return 0; 259 return 0;
244} 260}
245 261
diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c
index 2d6d91359833..236e6afeab10 100644
--- a/drivers/crypto/nx/nx-sha512.c
+++ b/drivers/crypto/nx/nx-sha512.c
@@ -57,8 +57,11 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
57 struct nx_sg *in_sg; 57 struct nx_sg *in_sg;
58 u64 to_process, leftover, total, spbc_bits; 58 u64 to_process, leftover, total, spbc_bits;
59 u32 max_sg_len; 59 u32 max_sg_len;
60 unsigned long irq_flags;
60 int rc = 0; 61 int rc = 0;
61 62
63 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
64
62 /* 2 cases for total data len: 65 /* 2 cases for total data len:
63 * 1: < SHA512_BLOCK_SIZE: copy into state, return 0 66 * 1: < SHA512_BLOCK_SIZE: copy into state, return 0
64 * 2: >= SHA512_BLOCK_SIZE: process X blocks, copy in leftover 67 * 2: >= SHA512_BLOCK_SIZE: process X blocks, copy in leftover
@@ -138,6 +141,7 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
138 memcpy(sctx->buf, data, leftover); 141 memcpy(sctx->buf, data, leftover);
139 sctx->count[0] = leftover; 142 sctx->count[0] = leftover;
140out: 143out:
144 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
141 return rc; 145 return rc;
142} 146}
143 147
@@ -149,8 +153,11 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
149 struct nx_sg *in_sg, *out_sg; 153 struct nx_sg *in_sg, *out_sg;
150 u32 max_sg_len; 154 u32 max_sg_len;
151 u64 count0; 155 u64 count0;
156 unsigned long irq_flags;
152 int rc; 157 int rc;
153 158
159 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
160
154 max_sg_len = min_t(u32, nx_driver.of.max_sg_len, nx_ctx->ap->sglen); 161 max_sg_len = min_t(u32, nx_driver.of.max_sg_len, nx_ctx->ap->sglen);
155 162
156 if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { 163 if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
@@ -193,6 +200,7 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
193 200
194 memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE); 201 memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
195out: 202out:
203 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
196 return rc; 204 return rc;
197} 205}
198 206
@@ -202,6 +210,9 @@ static int nx_sha512_export(struct shash_desc *desc, void *out)
202 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 210 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
203 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; 211 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
204 struct sha512_state *octx = out; 212 struct sha512_state *octx = out;
213 unsigned long irq_flags;
214
215 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
205 216
206 /* move message_bit_length (128 bits) into count and convert its value 217 /* move message_bit_length (128 bits) into count and convert its value
207 * to bytes */ 218 * to bytes */
@@ -233,6 +244,7 @@ static int nx_sha512_export(struct shash_desc *desc, void *out)
233 octx->state[7] = SHA512_H7; 244 octx->state[7] = SHA512_H7;
234 } 245 }
235 246
247 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
236 return 0; 248 return 0;
237} 249}
238 250
@@ -242,6 +254,9 @@ static int nx_sha512_import(struct shash_desc *desc, const void *in)
242 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 254 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
243 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; 255 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
244 const struct sha512_state *ictx = in; 256 const struct sha512_state *ictx = in;
257 unsigned long irq_flags;
258
259 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
245 260
246 memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf)); 261 memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
247 sctx->count[0] = ictx->count[0] & 0x3f; 262 sctx->count[0] = ictx->count[0] & 0x3f;
@@ -259,6 +274,7 @@ static int nx_sha512_import(struct shash_desc *desc, const void *in)
259 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; 274 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
260 } 275 }
261 276
277 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
262 return 0; 278 return 0;
263} 279}
264 280
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
index ad07dc62b95a..bdf4990f9758 100644
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -61,8 +61,7 @@ int nx_hcall_sync(struct nx_crypto_ctx *nx_ctx,
61 61
62 do { 62 do {
63 rc = vio_h_cop_sync(viodev, op); 63 rc = vio_h_cop_sync(viodev, op);
64 } while ((rc == -EBUSY && !may_sleep && retries--) || 64 } while (rc == -EBUSY && !may_sleep && retries--);
65 (rc == -EBUSY && may_sleep && cond_resched()));
66 65
67 if (rc) { 66 if (rc) {
68 dev_dbg(&viodev->dev, "vio_h_cop_sync failed: rc: %d " 67 dev_dbg(&viodev->dev, "vio_h_cop_sync failed: rc: %d "
@@ -251,6 +250,7 @@ int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx,
251 */ 250 */
252void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function) 251void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function)
253{ 252{
253 spin_lock_init(&nx_ctx->lock);
254 memset(nx_ctx->kmem, 0, nx_ctx->kmem_len); 254 memset(nx_ctx->kmem, 0, nx_ctx->kmem_len);
255 nx_ctx->csbcpb->csb.valid |= NX_CSB_VALID_BIT; 255 nx_ctx->csbcpb->csb.valid |= NX_CSB_VALID_BIT;
256 256
diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h
index 3232b182dd28..14bb97f1c339 100644
--- a/drivers/crypto/nx/nx.h
+++ b/drivers/crypto/nx/nx.h
@@ -117,6 +117,7 @@ struct nx_ctr_priv {
117}; 117};
118 118
119struct nx_crypto_ctx { 119struct nx_crypto_ctx {
120 spinlock_t lock; /* synchronize access to the context */
120 void *kmem; /* unaligned, kmalloc'd buffer */ 121 void *kmem; /* unaligned, kmalloc'd buffer */
121 size_t kmem_len; /* length of kmem */ 122 size_t kmem_len; /* length of kmem */
122 struct nx_csbcpb *csbcpb; /* aligned page given to phyp @ hcall time */ 123 struct nx_csbcpb *csbcpb; /* aligned page given to phyp @ hcall time */