aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/qat
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-08 06:44:48 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-08 06:44:48 -0400
commit87d7bcee4f5973a593b0d50134364cfe5652ff33 (patch)
tree677125896b64de2f5acfa204955442f58e74cfa9 /drivers/crypto/qat
parent0223f9aaef94a09ffc0b6abcba732e62a483b88c (diff)
parentbe34c4ef693ff5c10f55606dbd656ddf0b4a8340 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu: - add multibuffer infrastructure (single_task_running scheduler helper, OKed by Peter on lkml. - add SHA1 multibuffer implementation for AVX2. - reenable "by8" AVX CTR optimisation after fixing counter overflow. - add APM X-Gene SoC RNG support. - SHA256/SHA512 now handles unaligned input correctly. - set lz4 decompressed length correctly. - fix algif socket buffer allocation failure for 64K page machines. - misc fixes * git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (47 commits) crypto: sha - Handle unaligned input data in generic sha256 and sha512. Revert "crypto: aesni - disable "by8" AVX CTR optimization" crypto: aesni - remove unused defines in "by8" variant crypto: aesni - fix counter overflow handling in "by8" variant hwrng: printk replacement crypto: qat - Removed unneeded partial state crypto: qat - Fix typo in name of tasklet_struct crypto: caam - Dynamic allocation of addresses for various memory blocks in CAAM. crypto: mcryptd - Fix typos in CRYPTO_MCRYPTD description crypto: algif - avoid excessive use of socket buffer in skcipher arm64: dts: add random number generator dts node to APM X-Gene platform. Documentation: rng: Add X-Gene SoC RNG driver documentation hwrng: xgene - add support for APM X-Gene SoC RNG support crypto: mv_cesa - Add missing #define crypto: testmgr - add test for lz4 and lz4hc crypto: lz4,lz4hc - fix decompression crypto: qat - Use pci_enable_msix_exact() instead of pci_enable_msix() crypto: drbg - fix maximum value checks on 32 bit systems crypto: drbg - fix sparse warning for cpu_to_be[32|64] crypto: sha-mb - sha1_mb_alg_state can be static ...
Diffstat (limited to 'drivers/crypto/qat')
-rw-r--r--drivers/crypto/qat/qat_common/adf_ctl_drv.c2
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport_internal.h2
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c66
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_isr.c14
4 files changed, 16 insertions, 68 deletions
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
index 6a92284a86b2..244d73378f0e 100644
--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
@@ -111,7 +111,7 @@ static int adf_chr_drv_create(void)
111 drv_device = device_create(adt_ctl_drv.drv_class, NULL, 111 drv_device = device_create(adt_ctl_drv.drv_class, NULL,
112 MKDEV(adt_ctl_drv.major, 0), 112 MKDEV(adt_ctl_drv.major, 0),
113 NULL, DEVICE_NAME); 113 NULL, DEVICE_NAME);
114 if (!drv_device) { 114 if (IS_ERR(drv_device)) {
115 pr_err("QAT: failed to create device\n"); 115 pr_err("QAT: failed to create device\n");
116 goto err_cdev_del; 116 goto err_cdev_del;
117 } 117 }
diff --git a/drivers/crypto/qat/qat_common/adf_transport_internal.h b/drivers/crypto/qat/qat_common/adf_transport_internal.h
index f854bac276b0..c40546079981 100644
--- a/drivers/crypto/qat/qat_common/adf_transport_internal.h
+++ b/drivers/crypto/qat/qat_common/adf_transport_internal.h
@@ -75,7 +75,7 @@ struct adf_etr_ring_data {
75 75
76struct adf_etr_bank_data { 76struct adf_etr_bank_data {
77 struct adf_etr_ring_data rings[ADF_ETR_MAX_RINGS_PER_BANK]; 77 struct adf_etr_ring_data rings[ADF_ETR_MAX_RINGS_PER_BANK];
78 struct tasklet_struct resp_hanlder; 78 struct tasklet_struct resp_handler;
79 void __iomem *csr_addr; 79 void __iomem *csr_addr;
80 struct adf_accel_dev *accel_dev; 80 struct adf_accel_dev *accel_dev;
81 uint32_t irq_coalesc_timer; 81 uint32_t irq_coalesc_timer;
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 59df48872955..3e26fa2b293f 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -105,7 +105,7 @@ struct qat_alg_cd {
105#define MAX_AUTH_STATE_SIZE sizeof(struct icp_qat_hw_auth_algo_blk) 105#define MAX_AUTH_STATE_SIZE sizeof(struct icp_qat_hw_auth_algo_blk)
106 106
107struct qat_auth_state { 107struct qat_auth_state {
108 uint8_t data[MAX_AUTH_STATE_SIZE]; 108 uint8_t data[MAX_AUTH_STATE_SIZE + 64];
109} __aligned(64); 109} __aligned(64);
110 110
111struct qat_alg_session_ctx { 111struct qat_alg_session_ctx {
@@ -113,10 +113,6 @@ struct qat_alg_session_ctx {
113 dma_addr_t enc_cd_paddr; 113 dma_addr_t enc_cd_paddr;
114 struct qat_alg_cd *dec_cd; 114 struct qat_alg_cd *dec_cd;
115 dma_addr_t dec_cd_paddr; 115 dma_addr_t dec_cd_paddr;
116 struct qat_auth_state *auth_hw_state_enc;
117 dma_addr_t auth_state_enc_paddr;
118 struct qat_auth_state *auth_hw_state_dec;
119 dma_addr_t auth_state_dec_paddr;
120 struct icp_qat_fw_la_bulk_req enc_fw_req_tmpl; 116 struct icp_qat_fw_la_bulk_req enc_fw_req_tmpl;
121 struct icp_qat_fw_la_bulk_req dec_fw_req_tmpl; 117 struct icp_qat_fw_la_bulk_req dec_fw_req_tmpl;
122 struct qat_crypto_instance *inst; 118 struct qat_crypto_instance *inst;
@@ -150,8 +146,9 @@ static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
150static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash, 146static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
151 struct qat_alg_session_ctx *ctx, 147 struct qat_alg_session_ctx *ctx,
152 const uint8_t *auth_key, 148 const uint8_t *auth_key,
153 unsigned int auth_keylen, uint8_t *auth_state) 149 unsigned int auth_keylen)
154{ 150{
151 struct qat_auth_state auth_state;
155 struct { 152 struct {
156 struct shash_desc shash; 153 struct shash_desc shash;
157 char ctx[crypto_shash_descsize(ctx->hash_tfm)]; 154 char ctx[crypto_shash_descsize(ctx->hash_tfm)];
@@ -161,12 +158,13 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
161 struct sha512_state sha512; 158 struct sha512_state sha512;
162 int block_size = crypto_shash_blocksize(ctx->hash_tfm); 159 int block_size = crypto_shash_blocksize(ctx->hash_tfm);
163 int digest_size = crypto_shash_digestsize(ctx->hash_tfm); 160 int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
164 uint8_t *ipad = auth_state; 161 uint8_t *ipad = auth_state.data;
165 uint8_t *opad = ipad + block_size; 162 uint8_t *opad = ipad + block_size;
166 __be32 *hash_state_out; 163 __be32 *hash_state_out;
167 __be64 *hash512_state_out; 164 __be64 *hash512_state_out;
168 int i, offset; 165 int i, offset;
169 166
167 memset(auth_state.data, '\0', MAX_AUTH_STATE_SIZE + 64);
170 desc.shash.tfm = ctx->hash_tfm; 168 desc.shash.tfm = ctx->hash_tfm;
171 desc.shash.flags = 0x0; 169 desc.shash.flags = 0x0;
172 170
@@ -298,10 +296,6 @@ static int qat_alg_init_enc_session(struct qat_alg_session_ctx *ctx,
298 void *ptr = &req_tmpl->cd_ctrl; 296 void *ptr = &req_tmpl->cd_ctrl;
299 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr; 297 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
300 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr; 298 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
301 struct icp_qat_fw_la_auth_req_params *auth_param =
302 (struct icp_qat_fw_la_auth_req_params *)
303 ((char *)&req_tmpl->serv_specif_rqpars +
304 sizeof(struct icp_qat_fw_la_cipher_req_params));
305 299
306 /* CD setup */ 300 /* CD setup */
307 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg); 301 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg);
@@ -312,8 +306,7 @@ static int qat_alg_init_enc_session(struct qat_alg_session_ctx *ctx,
312 hash->sha.inner_setup.auth_counter.counter = 306 hash->sha.inner_setup.auth_counter.counter =
313 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm)); 307 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
314 308
315 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen, 309 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
316 (uint8_t *)ctx->auth_hw_state_enc))
317 return -EFAULT; 310 return -EFAULT;
318 311
319 /* Request setup */ 312 /* Request setup */
@@ -359,9 +352,6 @@ static int qat_alg_init_enc_session(struct qat_alg_session_ctx *ctx,
359 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset + 352 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
360 ((sizeof(struct icp_qat_hw_auth_setup) + 353 ((sizeof(struct icp_qat_hw_auth_setup) +
361 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3); 354 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
362 auth_param->u1.auth_partial_st_prefix = ctx->auth_state_enc_paddr +
363 sizeof(struct icp_qat_hw_auth_counter) +
364 round_up(hash_cd_ctrl->inner_state1_sz, 8);
365 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH); 355 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
366 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR); 356 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
367 return 0; 357 return 0;
@@ -399,8 +389,7 @@ static int qat_alg_init_dec_session(struct qat_alg_session_ctx *ctx,
399 hash->sha.inner_setup.auth_counter.counter = 389 hash->sha.inner_setup.auth_counter.counter =
400 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm)); 390 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
401 391
402 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen, 392 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
403 (uint8_t *)ctx->auth_hw_state_dec))
404 return -EFAULT; 393 return -EFAULT;
405 394
406 /* Request setup */ 395 /* Request setup */
@@ -450,9 +439,6 @@ static int qat_alg_init_dec_session(struct qat_alg_session_ctx *ctx,
450 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset + 439 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
451 ((sizeof(struct icp_qat_hw_auth_setup) + 440 ((sizeof(struct icp_qat_hw_auth_setup) +
452 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3); 441 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
453 auth_param->u1.auth_partial_st_prefix = ctx->auth_state_enc_paddr +
454 sizeof(struct icp_qat_hw_auth_counter) +
455 round_up(hash_cd_ctrl->inner_state1_sz, 8);
456 auth_param->auth_res_sz = digestsize; 442 auth_param->auth_res_sz = digestsize;
457 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH); 443 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
458 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER); 444 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
@@ -512,10 +498,6 @@ static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key,
512 dev = &GET_DEV(ctx->inst->accel_dev); 498 dev = &GET_DEV(ctx->inst->accel_dev);
513 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd)); 499 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
514 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd)); 500 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
515 memset(ctx->auth_hw_state_enc, 0,
516 sizeof(struct qat_auth_state));
517 memset(ctx->auth_hw_state_dec, 0,
518 sizeof(struct qat_auth_state));
519 memset(&ctx->enc_fw_req_tmpl, 0, 501 memset(&ctx->enc_fw_req_tmpl, 0,
520 sizeof(struct icp_qat_fw_la_bulk_req)); 502 sizeof(struct icp_qat_fw_la_bulk_req));
521 memset(&ctx->dec_fw_req_tmpl, 0, 503 memset(&ctx->dec_fw_req_tmpl, 0,
@@ -548,22 +530,6 @@ static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key,
548 spin_unlock(&ctx->lock); 530 spin_unlock(&ctx->lock);
549 goto out_free_enc; 531 goto out_free_enc;
550 } 532 }
551 ctx->auth_hw_state_enc =
552 dma_zalloc_coherent(dev, sizeof(struct qat_auth_state),
553 &ctx->auth_state_enc_paddr,
554 GFP_ATOMIC);
555 if (!ctx->auth_hw_state_enc) {
556 spin_unlock(&ctx->lock);
557 goto out_free_dec;
558 }
559 ctx->auth_hw_state_dec =
560 dma_zalloc_coherent(dev, sizeof(struct qat_auth_state),
561 &ctx->auth_state_dec_paddr,
562 GFP_ATOMIC);
563 if (!ctx->auth_hw_state_dec) {
564 spin_unlock(&ctx->lock);
565 goto out_free_auth_enc;
566 }
567 } 533 }
568 spin_unlock(&ctx->lock); 534 spin_unlock(&ctx->lock);
569 if (qat_alg_init_sessions(ctx, key, keylen)) 535 if (qat_alg_init_sessions(ctx, key, keylen))
@@ -572,14 +538,6 @@ static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key,
572 return 0; 538 return 0;
573 539
574out_free_all: 540out_free_all:
575 dma_free_coherent(dev, sizeof(struct qat_auth_state),
576 ctx->auth_hw_state_dec, ctx->auth_state_dec_paddr);
577 ctx->auth_hw_state_dec = NULL;
578out_free_auth_enc:
579 dma_free_coherent(dev, sizeof(struct qat_auth_state),
580 ctx->auth_hw_state_enc, ctx->auth_state_enc_paddr);
581 ctx->auth_hw_state_enc = NULL;
582out_free_dec:
583 dma_free_coherent(dev, sizeof(struct qat_alg_cd), 541 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
584 ctx->dec_cd, ctx->dec_cd_paddr); 542 ctx->dec_cd, ctx->dec_cd_paddr);
585 ctx->dec_cd = NULL; 543 ctx->dec_cd = NULL;
@@ -924,16 +882,6 @@ static void qat_alg_exit(struct crypto_tfm *tfm)
924 if (ctx->dec_cd) 882 if (ctx->dec_cd)
925 dma_free_coherent(dev, sizeof(struct qat_alg_cd), 883 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
926 ctx->dec_cd, ctx->dec_cd_paddr); 884 ctx->dec_cd, ctx->dec_cd_paddr);
927 if (ctx->auth_hw_state_enc)
928 dma_free_coherent(dev, sizeof(struct qat_auth_state),
929 ctx->auth_hw_state_enc,
930 ctx->auth_state_enc_paddr);
931
932 if (ctx->auth_hw_state_dec)
933 dma_free_coherent(dev, sizeof(struct qat_auth_state),
934 ctx->auth_hw_state_dec,
935 ctx->auth_state_dec_paddr);
936
937 qat_crypto_put_instance(inst); 885 qat_crypto_put_instance(inst);
938} 886}
939 887
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
index d4172dedf775..67ec61e51185 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
@@ -70,9 +70,9 @@ static int adf_enable_msix(struct adf_accel_dev *accel_dev)
70 for (i = 0; i < msix_num_entries; i++) 70 for (i = 0; i < msix_num_entries; i++)
71 pci_dev_info->msix_entries.entries[i].entry = i; 71 pci_dev_info->msix_entries.entries[i].entry = i;
72 72
73 if (pci_enable_msix(pci_dev_info->pci_dev, 73 if (pci_enable_msix_exact(pci_dev_info->pci_dev,
74 pci_dev_info->msix_entries.entries, 74 pci_dev_info->msix_entries.entries,
75 msix_num_entries)) { 75 msix_num_entries)) {
76 pr_err("QAT: Failed to enable MSIX IRQ\n"); 76 pr_err("QAT: Failed to enable MSIX IRQ\n");
77 return -EFAULT; 77 return -EFAULT;
78 } 78 }
@@ -89,7 +89,7 @@ static irqreturn_t adf_msix_isr_bundle(int irq, void *bank_ptr)
89 struct adf_etr_bank_data *bank = bank_ptr; 89 struct adf_etr_bank_data *bank = bank_ptr;
90 90
91 WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number, 0); 91 WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number, 0);
92 tasklet_hi_schedule(&bank->resp_hanlder); 92 tasklet_hi_schedule(&bank->resp_handler);
93 return IRQ_HANDLED; 93 return IRQ_HANDLED;
94} 94}
95 95
@@ -217,7 +217,7 @@ static int adf_setup_bh(struct adf_accel_dev *accel_dev)
217 int i; 217 int i;
218 218
219 for (i = 0; i < hw_data->num_banks; i++) 219 for (i = 0; i < hw_data->num_banks; i++)
220 tasklet_init(&priv_data->banks[i].resp_hanlder, 220 tasklet_init(&priv_data->banks[i].resp_handler,
221 adf_response_handler, 221 adf_response_handler,
222 (unsigned long)&priv_data->banks[i]); 222 (unsigned long)&priv_data->banks[i]);
223 return 0; 223 return 0;
@@ -230,8 +230,8 @@ static void adf_cleanup_bh(struct adf_accel_dev *accel_dev)
230 int i; 230 int i;
231 231
232 for (i = 0; i < hw_data->num_banks; i++) { 232 for (i = 0; i < hw_data->num_banks; i++) {
233 tasklet_disable(&priv_data->banks[i].resp_hanlder); 233 tasklet_disable(&priv_data->banks[i].resp_handler);
234 tasklet_kill(&priv_data->banks[i].resp_hanlder); 234 tasklet_kill(&priv_data->banks[i].resp_handler);
235 } 235 }
236} 236}
237 237