diff options
Diffstat (limited to 'drivers/crypto')
| -rw-r--r-- | drivers/crypto/amcc/crypto4xx_core.c | 6 | ||||
| -rw-r--r-- | drivers/crypto/cavium/cpt/cptpf_main.c | 4 | ||||
| -rw-r--r-- | drivers/crypto/cavium/cpt/cptvf_main.c | 7 | ||||
| -rw-r--r-- | drivers/crypto/cavium/nitrox/nitrox_lib.c | 6 | ||||
| -rw-r--r-- | drivers/crypto/ccp/ccp-dev-v5.c | 6 | ||||
| -rw-r--r-- | drivers/crypto/hisilicon/sec/sec_algs.c | 4 | ||||
| -rw-r--r-- | drivers/crypto/hisilicon/sec/sec_drv.c | 15 | ||||
| -rw-r--r-- | drivers/crypto/ixp4xx_crypto.c | 6 | ||||
| -rw-r--r-- | drivers/crypto/mediatek/mtk-platform.c | 16 | ||||
| -rw-r--r-- | drivers/crypto/qat/qat_common/adf_admin.c | 12 | ||||
| -rw-r--r-- | drivers/crypto/qat/qat_common/qat_algs.c | 24 | ||||
| -rw-r--r-- | drivers/crypto/qat/qat_common/qat_asym_algs.c | 68 |
12 files changed, 86 insertions, 88 deletions
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c index 63cb6956c948..acf79889d903 100644 --- a/drivers/crypto/amcc/crypto4xx_core.c +++ b/drivers/crypto/amcc/crypto4xx_core.c | |||
| @@ -283,9 +283,9 @@ static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx) | |||
| 283 | */ | 283 | */ |
| 284 | static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev) | 284 | static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev) |
| 285 | { | 285 | { |
| 286 | dev->gdr = dma_zalloc_coherent(dev->core_dev->device, | 286 | dev->gdr = dma_alloc_coherent(dev->core_dev->device, |
| 287 | sizeof(struct ce_gd) * PPC4XX_NUM_GD, | 287 | sizeof(struct ce_gd) * PPC4XX_NUM_GD, |
| 288 | &dev->gdr_pa, GFP_ATOMIC); | 288 | &dev->gdr_pa, GFP_ATOMIC); |
| 289 | if (!dev->gdr) | 289 | if (!dev->gdr) |
| 290 | return -ENOMEM; | 290 | return -ENOMEM; |
| 291 | 291 | ||
diff --git a/drivers/crypto/cavium/cpt/cptpf_main.c b/drivers/crypto/cavium/cpt/cptpf_main.c index 06ad85ab5e86..a876535529d1 100644 --- a/drivers/crypto/cavium/cpt/cptpf_main.c +++ b/drivers/crypto/cavium/cpt/cptpf_main.c | |||
| @@ -278,8 +278,8 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae) | |||
| 278 | mcode->num_cores = is_ae ? 6 : 10; | 278 | mcode->num_cores = is_ae ? 6 : 10; |
| 279 | 279 | ||
| 280 | /* Allocate DMAable space */ | 280 | /* Allocate DMAable space */ |
| 281 | mcode->code = dma_zalloc_coherent(&cpt->pdev->dev, mcode->code_size, | 281 | mcode->code = dma_alloc_coherent(&cpt->pdev->dev, mcode->code_size, |
| 282 | &mcode->phys_base, GFP_KERNEL); | 282 | &mcode->phys_base, GFP_KERNEL); |
| 283 | if (!mcode->code) { | 283 | if (!mcode->code) { |
| 284 | dev_err(dev, "Unable to allocate space for microcode"); | 284 | dev_err(dev, "Unable to allocate space for microcode"); |
| 285 | ret = -ENOMEM; | 285 | ret = -ENOMEM; |
diff --git a/drivers/crypto/cavium/cpt/cptvf_main.c b/drivers/crypto/cavium/cpt/cptvf_main.c index 5c796ed55eba..2ca431ed1db8 100644 --- a/drivers/crypto/cavium/cpt/cptvf_main.c +++ b/drivers/crypto/cavium/cpt/cptvf_main.c | |||
| @@ -236,9 +236,10 @@ static int alloc_command_queues(struct cpt_vf *cptvf, | |||
| 236 | 236 | ||
| 237 | c_size = (rem_q_size > qcsize_bytes) ? qcsize_bytes : | 237 | c_size = (rem_q_size > qcsize_bytes) ? qcsize_bytes : |
| 238 | rem_q_size; | 238 | rem_q_size; |
| 239 | curr->head = (u8 *)dma_zalloc_coherent(&pdev->dev, | 239 | curr->head = (u8 *)dma_alloc_coherent(&pdev->dev, |
| 240 | c_size + CPT_NEXT_CHUNK_PTR_SIZE, | 240 | c_size + CPT_NEXT_CHUNK_PTR_SIZE, |
| 241 | &curr->dma_addr, GFP_KERNEL); | 241 | &curr->dma_addr, |
| 242 | GFP_KERNEL); | ||
| 242 | if (!curr->head) { | 243 | if (!curr->head) { |
| 243 | dev_err(&pdev->dev, "Command Q (%d) chunk (%d) allocation failed\n", | 244 | dev_err(&pdev->dev, "Command Q (%d) chunk (%d) allocation failed\n", |
| 244 | i, queue->nchunks); | 245 | i, queue->nchunks); |
diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c b/drivers/crypto/cavium/nitrox/nitrox_lib.c index 9138bae12521..4ace9bcd603a 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_lib.c +++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c | |||
| @@ -25,9 +25,9 @@ static int nitrox_cmdq_init(struct nitrox_cmdq *cmdq, int align_bytes) | |||
| 25 | struct nitrox_device *ndev = cmdq->ndev; | 25 | struct nitrox_device *ndev = cmdq->ndev; |
| 26 | 26 | ||
| 27 | cmdq->qsize = (ndev->qlen * cmdq->instr_size) + align_bytes; | 27 | cmdq->qsize = (ndev->qlen * cmdq->instr_size) + align_bytes; |
| 28 | cmdq->unalign_base = dma_zalloc_coherent(DEV(ndev), cmdq->qsize, | 28 | cmdq->unalign_base = dma_alloc_coherent(DEV(ndev), cmdq->qsize, |
| 29 | &cmdq->unalign_dma, | 29 | &cmdq->unalign_dma, |
| 30 | GFP_KERNEL); | 30 | GFP_KERNEL); |
| 31 | if (!cmdq->unalign_base) | 31 | if (!cmdq->unalign_base) |
| 32 | return -ENOMEM; | 32 | return -ENOMEM; |
| 33 | 33 | ||
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index 44a4d2779b15..c9bfd4f439ce 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c | |||
| @@ -822,9 +822,9 @@ static int ccp5_init(struct ccp_device *ccp) | |||
| 822 | /* Page alignment satisfies our needs for N <= 128 */ | 822 | /* Page alignment satisfies our needs for N <= 128 */ |
| 823 | BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128); | 823 | BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128); |
| 824 | cmd_q->qsize = Q_SIZE(Q_DESC_SIZE); | 824 | cmd_q->qsize = Q_SIZE(Q_DESC_SIZE); |
| 825 | cmd_q->qbase = dma_zalloc_coherent(dev, cmd_q->qsize, | 825 | cmd_q->qbase = dma_alloc_coherent(dev, cmd_q->qsize, |
| 826 | &cmd_q->qbase_dma, | 826 | &cmd_q->qbase_dma, |
| 827 | GFP_KERNEL); | 827 | GFP_KERNEL); |
| 828 | if (!cmd_q->qbase) { | 828 | if (!cmd_q->qbase) { |
| 829 | dev_err(dev, "unable to allocate command queue\n"); | 829 | dev_err(dev, "unable to allocate command queue\n"); |
| 830 | ret = -ENOMEM; | 830 | ret = -ENOMEM; |
diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c index cdc4f9a171d9..adc0cd8ae97b 100644 --- a/drivers/crypto/hisilicon/sec/sec_algs.c +++ b/drivers/crypto/hisilicon/sec/sec_algs.c | |||
| @@ -241,8 +241,8 @@ static int sec_alg_skcipher_setkey(struct crypto_skcipher *tfm, | |||
| 241 | memset(ctx->key, 0, SEC_MAX_CIPHER_KEY); | 241 | memset(ctx->key, 0, SEC_MAX_CIPHER_KEY); |
| 242 | } else { | 242 | } else { |
| 243 | /* new key */ | 243 | /* new key */ |
| 244 | ctx->key = dma_zalloc_coherent(dev, SEC_MAX_CIPHER_KEY, | 244 | ctx->key = dma_alloc_coherent(dev, SEC_MAX_CIPHER_KEY, |
| 245 | &ctx->pkey, GFP_KERNEL); | 245 | &ctx->pkey, GFP_KERNEL); |
| 246 | if (!ctx->key) { | 246 | if (!ctx->key) { |
| 247 | mutex_unlock(&ctx->lock); | 247 | mutex_unlock(&ctx->lock); |
| 248 | return -ENOMEM; | 248 | return -ENOMEM; |
diff --git a/drivers/crypto/hisilicon/sec/sec_drv.c b/drivers/crypto/hisilicon/sec/sec_drv.c index c1ee4e7bf996..91ee2bb575df 100644 --- a/drivers/crypto/hisilicon/sec/sec_drv.c +++ b/drivers/crypto/hisilicon/sec/sec_drv.c | |||
| @@ -1082,9 +1082,8 @@ static int sec_queue_res_cfg(struct sec_queue *queue) | |||
| 1082 | struct sec_queue_ring_db *ring_db = &queue->ring_db; | 1082 | struct sec_queue_ring_db *ring_db = &queue->ring_db; |
| 1083 | int ret; | 1083 | int ret; |
| 1084 | 1084 | ||
| 1085 | ring_cmd->vaddr = dma_zalloc_coherent(dev, SEC_Q_CMD_SIZE, | 1085 | ring_cmd->vaddr = dma_alloc_coherent(dev, SEC_Q_CMD_SIZE, |
| 1086 | &ring_cmd->paddr, | 1086 | &ring_cmd->paddr, GFP_KERNEL); |
| 1087 | GFP_KERNEL); | ||
| 1088 | if (!ring_cmd->vaddr) | 1087 | if (!ring_cmd->vaddr) |
| 1089 | return -ENOMEM; | 1088 | return -ENOMEM; |
| 1090 | 1089 | ||
| @@ -1092,17 +1091,15 @@ static int sec_queue_res_cfg(struct sec_queue *queue) | |||
| 1092 | mutex_init(&ring_cmd->lock); | 1091 | mutex_init(&ring_cmd->lock); |
| 1093 | ring_cmd->callback = sec_alg_callback; | 1092 | ring_cmd->callback = sec_alg_callback; |
| 1094 | 1093 | ||
| 1095 | ring_cq->vaddr = dma_zalloc_coherent(dev, SEC_Q_CQ_SIZE, | 1094 | ring_cq->vaddr = dma_alloc_coherent(dev, SEC_Q_CQ_SIZE, |
| 1096 | &ring_cq->paddr, | 1095 | &ring_cq->paddr, GFP_KERNEL); |
| 1097 | GFP_KERNEL); | ||
| 1098 | if (!ring_cq->vaddr) { | 1096 | if (!ring_cq->vaddr) { |
| 1099 | ret = -ENOMEM; | 1097 | ret = -ENOMEM; |
| 1100 | goto err_free_ring_cmd; | 1098 | goto err_free_ring_cmd; |
| 1101 | } | 1099 | } |
| 1102 | 1100 | ||
| 1103 | ring_db->vaddr = dma_zalloc_coherent(dev, SEC_Q_DB_SIZE, | 1101 | ring_db->vaddr = dma_alloc_coherent(dev, SEC_Q_DB_SIZE, |
| 1104 | &ring_db->paddr, | 1102 | &ring_db->paddr, GFP_KERNEL); |
| 1105 | GFP_KERNEL); | ||
| 1106 | if (!ring_db->vaddr) { | 1103 | if (!ring_db->vaddr) { |
| 1107 | ret = -ENOMEM; | 1104 | ret = -ENOMEM; |
| 1108 | goto err_free_ring_cq; | 1105 | goto err_free_ring_cq; |
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index 19fba998b86b..1b0d156bb9be 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c | |||
| @@ -260,9 +260,9 @@ static int setup_crypt_desc(void) | |||
| 260 | { | 260 | { |
| 261 | struct device *dev = &pdev->dev; | 261 | struct device *dev = &pdev->dev; |
| 262 | BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64); | 262 | BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64); |
| 263 | crypt_virt = dma_zalloc_coherent(dev, | 263 | crypt_virt = dma_alloc_coherent(dev, |
| 264 | NPE_QLEN * sizeof(struct crypt_ctl), | 264 | NPE_QLEN * sizeof(struct crypt_ctl), |
| 265 | &crypt_phys, GFP_ATOMIC); | 265 | &crypt_phys, GFP_ATOMIC); |
| 266 | if (!crypt_virt) | 266 | if (!crypt_virt) |
| 267 | return -ENOMEM; | 267 | return -ENOMEM; |
| 268 | return 0; | 268 | return 0; |
diff --git a/drivers/crypto/mediatek/mtk-platform.c b/drivers/crypto/mediatek/mtk-platform.c index ee0404e27a0f..5660e5e5e022 100644 --- a/drivers/crypto/mediatek/mtk-platform.c +++ b/drivers/crypto/mediatek/mtk-platform.c | |||
| @@ -453,17 +453,17 @@ static int mtk_desc_ring_alloc(struct mtk_cryp *cryp) | |||
| 453 | if (!ring[i]) | 453 | if (!ring[i]) |
| 454 | goto err_cleanup; | 454 | goto err_cleanup; |
| 455 | 455 | ||
| 456 | ring[i]->cmd_base = dma_zalloc_coherent(cryp->dev, | 456 | ring[i]->cmd_base = dma_alloc_coherent(cryp->dev, |
| 457 | MTK_DESC_RING_SZ, | 457 | MTK_DESC_RING_SZ, |
| 458 | &ring[i]->cmd_dma, | 458 | &ring[i]->cmd_dma, |
| 459 | GFP_KERNEL); | 459 | GFP_KERNEL); |
| 460 | if (!ring[i]->cmd_base) | 460 | if (!ring[i]->cmd_base) |
| 461 | goto err_cleanup; | 461 | goto err_cleanup; |
| 462 | 462 | ||
| 463 | ring[i]->res_base = dma_zalloc_coherent(cryp->dev, | 463 | ring[i]->res_base = dma_alloc_coherent(cryp->dev, |
| 464 | MTK_DESC_RING_SZ, | 464 | MTK_DESC_RING_SZ, |
| 465 | &ring[i]->res_dma, | 465 | &ring[i]->res_dma, |
| 466 | GFP_KERNEL); | 466 | GFP_KERNEL); |
| 467 | if (!ring[i]->res_base) | 467 | if (!ring[i]->res_base) |
| 468 | goto err_cleanup; | 468 | goto err_cleanup; |
| 469 | 469 | ||
diff --git a/drivers/crypto/qat/qat_common/adf_admin.c b/drivers/crypto/qat/qat_common/adf_admin.c index 3744b22f0c46..d28cba34773e 100644 --- a/drivers/crypto/qat/qat_common/adf_admin.c +++ b/drivers/crypto/qat/qat_common/adf_admin.c | |||
| @@ -244,18 +244,18 @@ int adf_init_admin_comms(struct adf_accel_dev *accel_dev) | |||
| 244 | dev_to_node(&GET_DEV(accel_dev))); | 244 | dev_to_node(&GET_DEV(accel_dev))); |
| 245 | if (!admin) | 245 | if (!admin) |
| 246 | return -ENOMEM; | 246 | return -ENOMEM; |
| 247 | admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE, | 247 | admin->virt_addr = dma_alloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE, |
| 248 | &admin->phy_addr, GFP_KERNEL); | 248 | &admin->phy_addr, GFP_KERNEL); |
| 249 | if (!admin->virt_addr) { | 249 | if (!admin->virt_addr) { |
| 250 | dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n"); | 250 | dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n"); |
| 251 | kfree(admin); | 251 | kfree(admin); |
| 252 | return -ENOMEM; | 252 | return -ENOMEM; |
| 253 | } | 253 | } |
| 254 | 254 | ||
| 255 | admin->virt_tbl_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), | 255 | admin->virt_tbl_addr = dma_alloc_coherent(&GET_DEV(accel_dev), |
| 256 | PAGE_SIZE, | 256 | PAGE_SIZE, |
| 257 | &admin->const_tbl_addr, | 257 | &admin->const_tbl_addr, |
| 258 | GFP_KERNEL); | 258 | GFP_KERNEL); |
| 259 | if (!admin->virt_tbl_addr) { | 259 | if (!admin->virt_tbl_addr) { |
| 260 | dev_err(&GET_DEV(accel_dev), "Failed to allocate const_tbl\n"); | 260 | dev_err(&GET_DEV(accel_dev), "Failed to allocate const_tbl\n"); |
| 261 | dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE, | 261 | dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE, |
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index d2698299896f..975c75198f56 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c | |||
| @@ -601,15 +601,15 @@ static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key, | |||
| 601 | 601 | ||
| 602 | dev = &GET_DEV(inst->accel_dev); | 602 | dev = &GET_DEV(inst->accel_dev); |
| 603 | ctx->inst = inst; | 603 | ctx->inst = inst; |
| 604 | ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd), | 604 | ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd), |
| 605 | &ctx->enc_cd_paddr, | 605 | &ctx->enc_cd_paddr, |
| 606 | GFP_ATOMIC); | 606 | GFP_ATOMIC); |
| 607 | if (!ctx->enc_cd) { | 607 | if (!ctx->enc_cd) { |
| 608 | return -ENOMEM; | 608 | return -ENOMEM; |
| 609 | } | 609 | } |
| 610 | ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd), | 610 | ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd), |
| 611 | &ctx->dec_cd_paddr, | 611 | &ctx->dec_cd_paddr, |
| 612 | GFP_ATOMIC); | 612 | GFP_ATOMIC); |
| 613 | if (!ctx->dec_cd) { | 613 | if (!ctx->dec_cd) { |
| 614 | goto out_free_enc; | 614 | goto out_free_enc; |
| 615 | } | 615 | } |
| @@ -933,16 +933,16 @@ static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm, | |||
| 933 | 933 | ||
| 934 | dev = &GET_DEV(inst->accel_dev); | 934 | dev = &GET_DEV(inst->accel_dev); |
| 935 | ctx->inst = inst; | 935 | ctx->inst = inst; |
| 936 | ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd), | 936 | ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd), |
| 937 | &ctx->enc_cd_paddr, | 937 | &ctx->enc_cd_paddr, |
| 938 | GFP_ATOMIC); | 938 | GFP_ATOMIC); |
| 939 | if (!ctx->enc_cd) { | 939 | if (!ctx->enc_cd) { |
| 940 | spin_unlock(&ctx->lock); | 940 | spin_unlock(&ctx->lock); |
| 941 | return -ENOMEM; | 941 | return -ENOMEM; |
| 942 | } | 942 | } |
| 943 | ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd), | 943 | ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd), |
| 944 | &ctx->dec_cd_paddr, | 944 | &ctx->dec_cd_paddr, |
| 945 | GFP_ATOMIC); | 945 | GFP_ATOMIC); |
| 946 | if (!ctx->dec_cd) { | 946 | if (!ctx->dec_cd) { |
| 947 | spin_unlock(&ctx->lock); | 947 | spin_unlock(&ctx->lock); |
| 948 | goto out_free_enc; | 948 | goto out_free_enc; |
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c index 320e7854b4ee..c9f324730d71 100644 --- a/drivers/crypto/qat/qat_common/qat_asym_algs.c +++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c | |||
| @@ -332,10 +332,10 @@ static int qat_dh_compute_value(struct kpp_request *req) | |||
| 332 | } else { | 332 | } else { |
| 333 | int shift = ctx->p_size - req->src_len; | 333 | int shift = ctx->p_size - req->src_len; |
| 334 | 334 | ||
| 335 | qat_req->src_align = dma_zalloc_coherent(dev, | 335 | qat_req->src_align = dma_alloc_coherent(dev, |
| 336 | ctx->p_size, | 336 | ctx->p_size, |
| 337 | &qat_req->in.dh.in.b, | 337 | &qat_req->in.dh.in.b, |
| 338 | GFP_KERNEL); | 338 | GFP_KERNEL); |
| 339 | if (unlikely(!qat_req->src_align)) | 339 | if (unlikely(!qat_req->src_align)) |
| 340 | return ret; | 340 | return ret; |
| 341 | 341 | ||
| @@ -360,9 +360,9 @@ static int qat_dh_compute_value(struct kpp_request *req) | |||
| 360 | goto unmap_src; | 360 | goto unmap_src; |
| 361 | 361 | ||
| 362 | } else { | 362 | } else { |
| 363 | qat_req->dst_align = dma_zalloc_coherent(dev, ctx->p_size, | 363 | qat_req->dst_align = dma_alloc_coherent(dev, ctx->p_size, |
| 364 | &qat_req->out.dh.r, | 364 | &qat_req->out.dh.r, |
| 365 | GFP_KERNEL); | 365 | GFP_KERNEL); |
| 366 | if (unlikely(!qat_req->dst_align)) | 366 | if (unlikely(!qat_req->dst_align)) |
| 367 | goto unmap_src; | 367 | goto unmap_src; |
| 368 | } | 368 | } |
| @@ -447,7 +447,7 @@ static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params) | |||
| 447 | return -EINVAL; | 447 | return -EINVAL; |
| 448 | 448 | ||
| 449 | ctx->p_size = params->p_size; | 449 | ctx->p_size = params->p_size; |
| 450 | ctx->p = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL); | 450 | ctx->p = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL); |
| 451 | if (!ctx->p) | 451 | if (!ctx->p) |
| 452 | return -ENOMEM; | 452 | return -ENOMEM; |
| 453 | memcpy(ctx->p, params->p, ctx->p_size); | 453 | memcpy(ctx->p, params->p, ctx->p_size); |
| @@ -458,7 +458,7 @@ static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params) | |||
| 458 | return 0; | 458 | return 0; |
| 459 | } | 459 | } |
| 460 | 460 | ||
| 461 | ctx->g = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL); | 461 | ctx->g = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL); |
| 462 | if (!ctx->g) | 462 | if (!ctx->g) |
| 463 | return -ENOMEM; | 463 | return -ENOMEM; |
| 464 | memcpy(ctx->g + (ctx->p_size - params->g_size), params->g, | 464 | memcpy(ctx->g + (ctx->p_size - params->g_size), params->g, |
| @@ -503,8 +503,8 @@ static int qat_dh_set_secret(struct crypto_kpp *tfm, const void *buf, | |||
| 503 | if (ret < 0) | 503 | if (ret < 0) |
| 504 | goto err_clear_ctx; | 504 | goto err_clear_ctx; |
| 505 | 505 | ||
| 506 | ctx->xa = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_xa, | 506 | ctx->xa = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_xa, |
| 507 | GFP_KERNEL); | 507 | GFP_KERNEL); |
| 508 | if (!ctx->xa) { | 508 | if (!ctx->xa) { |
| 509 | ret = -ENOMEM; | 509 | ret = -ENOMEM; |
| 510 | goto err_clear_ctx; | 510 | goto err_clear_ctx; |
| @@ -737,9 +737,9 @@ static int qat_rsa_enc(struct akcipher_request *req) | |||
| 737 | } else { | 737 | } else { |
| 738 | int shift = ctx->key_sz - req->src_len; | 738 | int shift = ctx->key_sz - req->src_len; |
| 739 | 739 | ||
| 740 | qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz, | 740 | qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz, |
| 741 | &qat_req->in.rsa.enc.m, | 741 | &qat_req->in.rsa.enc.m, |
| 742 | GFP_KERNEL); | 742 | GFP_KERNEL); |
| 743 | if (unlikely(!qat_req->src_align)) | 743 | if (unlikely(!qat_req->src_align)) |
| 744 | return ret; | 744 | return ret; |
| 745 | 745 | ||
| @@ -756,9 +756,9 @@ static int qat_rsa_enc(struct akcipher_request *req) | |||
| 756 | goto unmap_src; | 756 | goto unmap_src; |
| 757 | 757 | ||
| 758 | } else { | 758 | } else { |
| 759 | qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz, | 759 | qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz, |
| 760 | &qat_req->out.rsa.enc.c, | 760 | &qat_req->out.rsa.enc.c, |
| 761 | GFP_KERNEL); | 761 | GFP_KERNEL); |
| 762 | if (unlikely(!qat_req->dst_align)) | 762 | if (unlikely(!qat_req->dst_align)) |
| 763 | goto unmap_src; | 763 | goto unmap_src; |
| 764 | 764 | ||
| @@ -881,9 +881,9 @@ static int qat_rsa_dec(struct akcipher_request *req) | |||
| 881 | } else { | 881 | } else { |
| 882 | int shift = ctx->key_sz - req->src_len; | 882 | int shift = ctx->key_sz - req->src_len; |
| 883 | 883 | ||
| 884 | qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz, | 884 | qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz, |
| 885 | &qat_req->in.rsa.dec.c, | 885 | &qat_req->in.rsa.dec.c, |
| 886 | GFP_KERNEL); | 886 | GFP_KERNEL); |
| 887 | if (unlikely(!qat_req->src_align)) | 887 | if (unlikely(!qat_req->src_align)) |
| 888 | return ret; | 888 | return ret; |
| 889 | 889 | ||
| @@ -900,9 +900,9 @@ static int qat_rsa_dec(struct akcipher_request *req) | |||
| 900 | goto unmap_src; | 900 | goto unmap_src; |
| 901 | 901 | ||
| 902 | } else { | 902 | } else { |
| 903 | qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz, | 903 | qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz, |
| 904 | &qat_req->out.rsa.dec.m, | 904 | &qat_req->out.rsa.dec.m, |
| 905 | GFP_KERNEL); | 905 | GFP_KERNEL); |
| 906 | if (unlikely(!qat_req->dst_align)) | 906 | if (unlikely(!qat_req->dst_align)) |
| 907 | goto unmap_src; | 907 | goto unmap_src; |
| 908 | 908 | ||
| @@ -989,7 +989,7 @@ static int qat_rsa_set_n(struct qat_rsa_ctx *ctx, const char *value, | |||
| 989 | goto err; | 989 | goto err; |
| 990 | 990 | ||
| 991 | ret = -ENOMEM; | 991 | ret = -ENOMEM; |
| 992 | ctx->n = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL); | 992 | ctx->n = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL); |
| 993 | if (!ctx->n) | 993 | if (!ctx->n) |
| 994 | goto err; | 994 | goto err; |
| 995 | 995 | ||
| @@ -1018,7 +1018,7 @@ static int qat_rsa_set_e(struct qat_rsa_ctx *ctx, const char *value, | |||
| 1018 | return -EINVAL; | 1018 | return -EINVAL; |
| 1019 | } | 1019 | } |
| 1020 | 1020 | ||
| 1021 | ctx->e = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL); | 1021 | ctx->e = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL); |
| 1022 | if (!ctx->e) | 1022 | if (!ctx->e) |
| 1023 | return -ENOMEM; | 1023 | return -ENOMEM; |
| 1024 | 1024 | ||
| @@ -1044,7 +1044,7 @@ static int qat_rsa_set_d(struct qat_rsa_ctx *ctx, const char *value, | |||
| 1044 | goto err; | 1044 | goto err; |
| 1045 | 1045 | ||
| 1046 | ret = -ENOMEM; | 1046 | ret = -ENOMEM; |
| 1047 | ctx->d = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL); | 1047 | ctx->d = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL); |
| 1048 | if (!ctx->d) | 1048 | if (!ctx->d) |
| 1049 | goto err; | 1049 | goto err; |
| 1050 | 1050 | ||
| @@ -1077,7 +1077,7 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key) | |||
| 1077 | qat_rsa_drop_leading_zeros(&ptr, &len); | 1077 | qat_rsa_drop_leading_zeros(&ptr, &len); |
| 1078 | if (!len) | 1078 | if (!len) |
| 1079 | goto err; | 1079 | goto err; |
| 1080 | ctx->p = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_p, GFP_KERNEL); | 1080 | ctx->p = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_p, GFP_KERNEL); |
| 1081 | if (!ctx->p) | 1081 | if (!ctx->p) |
| 1082 | goto err; | 1082 | goto err; |
| 1083 | memcpy(ctx->p + (half_key_sz - len), ptr, len); | 1083 | memcpy(ctx->p + (half_key_sz - len), ptr, len); |
| @@ -1088,7 +1088,7 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key) | |||
| 1088 | qat_rsa_drop_leading_zeros(&ptr, &len); | 1088 | qat_rsa_drop_leading_zeros(&ptr, &len); |
| 1089 | if (!len) | 1089 | if (!len) |
| 1090 | goto free_p; | 1090 | goto free_p; |
| 1091 | ctx->q = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_q, GFP_KERNEL); | 1091 | ctx->q = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_q, GFP_KERNEL); |
| 1092 | if (!ctx->q) | 1092 | if (!ctx->q) |
| 1093 | goto free_p; | 1093 | goto free_p; |
| 1094 | memcpy(ctx->q + (half_key_sz - len), ptr, len); | 1094 | memcpy(ctx->q + (half_key_sz - len), ptr, len); |
| @@ -1099,8 +1099,8 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key) | |||
| 1099 | qat_rsa_drop_leading_zeros(&ptr, &len); | 1099 | qat_rsa_drop_leading_zeros(&ptr, &len); |
| 1100 | if (!len) | 1100 | if (!len) |
| 1101 | goto free_q; | 1101 | goto free_q; |
| 1102 | ctx->dp = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_dp, | 1102 | ctx->dp = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_dp, |
| 1103 | GFP_KERNEL); | 1103 | GFP_KERNEL); |
| 1104 | if (!ctx->dp) | 1104 | if (!ctx->dp) |
| 1105 | goto free_q; | 1105 | goto free_q; |
| 1106 | memcpy(ctx->dp + (half_key_sz - len), ptr, len); | 1106 | memcpy(ctx->dp + (half_key_sz - len), ptr, len); |
| @@ -1111,8 +1111,8 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key) | |||
| 1111 | qat_rsa_drop_leading_zeros(&ptr, &len); | 1111 | qat_rsa_drop_leading_zeros(&ptr, &len); |
| 1112 | if (!len) | 1112 | if (!len) |
| 1113 | goto free_dp; | 1113 | goto free_dp; |
| 1114 | ctx->dq = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_dq, | 1114 | ctx->dq = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_dq, |
| 1115 | GFP_KERNEL); | 1115 | GFP_KERNEL); |
| 1116 | if (!ctx->dq) | 1116 | if (!ctx->dq) |
| 1117 | goto free_dp; | 1117 | goto free_dp; |
| 1118 | memcpy(ctx->dq + (half_key_sz - len), ptr, len); | 1118 | memcpy(ctx->dq + (half_key_sz - len), ptr, len); |
| @@ -1123,8 +1123,8 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key) | |||
| 1123 | qat_rsa_drop_leading_zeros(&ptr, &len); | 1123 | qat_rsa_drop_leading_zeros(&ptr, &len); |
| 1124 | if (!len) | 1124 | if (!len) |
| 1125 | goto free_dq; | 1125 | goto free_dq; |
| 1126 | ctx->qinv = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_qinv, | 1126 | ctx->qinv = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_qinv, |
| 1127 | GFP_KERNEL); | 1127 | GFP_KERNEL); |
| 1128 | if (!ctx->qinv) | 1128 | if (!ctx->qinv) |
| 1129 | goto free_dq; | 1129 | goto free_dq; |
| 1130 | memcpy(ctx->qinv + (half_key_sz - len), ptr, len); | 1130 | memcpy(ctx->qinv + (half_key_sz - len), ptr, len); |
