aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto
diff options
context:
space:
mode:
authorTom Lendacky <thomas.lendacky@amd.com>2014-02-24 09:42:14 -0500
committerHerbert Xu <herbert@gondor.apana.org.au>2014-02-26 16:57:01 -0500
commitc65a52f8360dc29116395708bde18399e4699f87 (patch)
tree82e139cf4cde4bcb172aa94243a015590ab645aa /drivers/crypto
parent950b10bae656d7edf5e2047bf0c9205980f49f2c (diff)
crypto: ccp - Account for CCP backlog processing
When the crypto layer is able to queue up a command for processing by the CCP on the initial call to ccp_crypto_enqueue_request and the CCP returns -EBUSY, then if the backlog flag is not set the command needs to be freed and not added to the active command list. Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/ccp/ccp-crypto-main.c18
1 files changed, 11 insertions, 7 deletions
diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c
index 7d98635c2c5e..20dc848481e7 100644
--- a/drivers/crypto/ccp/ccp-crypto-main.c
+++ b/drivers/crypto/ccp/ccp-crypto-main.c
@@ -205,6 +205,7 @@ static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd)
205{ 205{
206 struct ccp_crypto_cmd *active = NULL, *tmp; 206 struct ccp_crypto_cmd *active = NULL, *tmp;
207 unsigned long flags; 207 unsigned long flags;
208 bool free_cmd = true;
208 int ret; 209 int ret;
209 210
210 spin_lock_irqsave(&req_queue_lock, flags); 211 spin_lock_irqsave(&req_queue_lock, flags);
@@ -231,7 +232,10 @@ static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd)
231 if (!active) { 232 if (!active) {
232 ret = ccp_enqueue_cmd(crypto_cmd->cmd); 233 ret = ccp_enqueue_cmd(crypto_cmd->cmd);
233 if (!ccp_crypto_success(ret)) 234 if (!ccp_crypto_success(ret))
234 goto e_lock; 235 goto e_lock; /* Error, don't queue it */
236 if ((ret == -EBUSY) &&
237 !(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG))
238 goto e_lock; /* Not backlogging, don't queue it */
235 } 239 }
236 240
237 if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) { 241 if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) {
@@ -244,9 +248,14 @@ static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd)
244 req_queue.cmd_count++; 248 req_queue.cmd_count++;
245 list_add_tail(&crypto_cmd->entry, &req_queue.cmds); 249 list_add_tail(&crypto_cmd->entry, &req_queue.cmds);
246 250
251 free_cmd = false;
252
247e_lock: 253e_lock:
248 spin_unlock_irqrestore(&req_queue_lock, flags); 254 spin_unlock_irqrestore(&req_queue_lock, flags);
249 255
256 if (free_cmd)
257 kfree(crypto_cmd);
258
250 return ret; 259 return ret;
251} 260}
252 261
@@ -262,7 +271,6 @@ int ccp_crypto_enqueue_request(struct crypto_async_request *req,
262{ 271{
263 struct ccp_crypto_cmd *crypto_cmd; 272 struct ccp_crypto_cmd *crypto_cmd;
264 gfp_t gfp; 273 gfp_t gfp;
265 int ret;
266 274
267 gfp = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; 275 gfp = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
268 276
@@ -287,11 +295,7 @@ int ccp_crypto_enqueue_request(struct crypto_async_request *req,
287 else 295 else
288 cmd->flags &= ~CCP_CMD_MAY_BACKLOG; 296 cmd->flags &= ~CCP_CMD_MAY_BACKLOG;
289 297
290 ret = ccp_crypto_enqueue_cmd(crypto_cmd); 298 return ccp_crypto_enqueue_cmd(crypto_cmd);
291 if (!ccp_crypto_success(ret))
292 kfree(crypto_cmd);
293
294 return ret;
295} 299}
296 300
297struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table, 301struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table,