diff options
author | Luis Chamberlain <mcgrof@kernel.org> | 2019-01-04 03:23:09 -0500 |
---|---|---|
committer | Christoph Hellwig <hch@lst.de> | 2019-01-08 07:58:37 -0500 |
commit | 750afb08ca71310fcf0c4e2cb1565c63b8235b60 (patch) | |
tree | 1dde3877eb4a1a0f0349786b66c3d9276ae94561 | |
parent | 3bd6e94bec122a951d462c239b47954cf5f36e33 (diff) |
cross-tree: phase out dma_zalloc_coherent()
We already need to zero out memory for dma_alloc_coherent(), as such
using dma_zalloc_coherent() is superflous. Phase it out.
This change was generated with the following Coccinelle SmPL patch:
@ replace_dma_zalloc_coherent @
expression dev, size, data, handle, flags;
@@
-dma_zalloc_coherent(dev, size, handle, flags)
+dma_alloc_coherent(dev, size, handle, flags)
Suggested-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Luis Chamberlain <mcgrof@kernel.org>
[hch: re-ran the script on the latest tree]
Signed-off-by: Christoph Hellwig <hch@lst.de>
173 files changed, 915 insertions, 949 deletions
diff --git a/arch/mips/lantiq/xway/dma.c b/arch/mips/lantiq/xway/dma.c index 982859f2b2a3..5e6a1a45cbd2 100644 --- a/arch/mips/lantiq/xway/dma.c +++ b/arch/mips/lantiq/xway/dma.c | |||
@@ -129,9 +129,9 @@ ltq_dma_alloc(struct ltq_dma_channel *ch) | |||
129 | unsigned long flags; | 129 | unsigned long flags; |
130 | 130 | ||
131 | ch->desc = 0; | 131 | ch->desc = 0; |
132 | ch->desc_base = dma_zalloc_coherent(ch->dev, | 132 | ch->desc_base = dma_alloc_coherent(ch->dev, |
133 | LTQ_DESC_NUM * LTQ_DESC_SIZE, | 133 | LTQ_DESC_NUM * LTQ_DESC_SIZE, |
134 | &ch->phys, GFP_ATOMIC); | 134 | &ch->phys, GFP_ATOMIC); |
135 | 135 | ||
136 | spin_lock_irqsave(<q_dma_lock, flags); | 136 | spin_lock_irqsave(<q_dma_lock, flags); |
137 | ltq_dma_w32(ch->nr, LTQ_DMA_CS); | 137 | ltq_dma_w32(ch->nr, LTQ_DMA_CS); |
diff --git a/arch/powerpc/platforms/pasemi/dma_lib.c b/arch/powerpc/platforms/pasemi/dma_lib.c index d18d16489a15..bdf9b716e848 100644 --- a/arch/powerpc/platforms/pasemi/dma_lib.c +++ b/arch/powerpc/platforms/pasemi/dma_lib.c | |||
@@ -255,7 +255,7 @@ int pasemi_dma_alloc_ring(struct pasemi_dmachan *chan, int ring_size) | |||
255 | 255 | ||
256 | chan->ring_size = ring_size; | 256 | chan->ring_size = ring_size; |
257 | 257 | ||
258 | chan->ring_virt = dma_zalloc_coherent(&dma_pdev->dev, | 258 | chan->ring_virt = dma_alloc_coherent(&dma_pdev->dev, |
259 | ring_size * sizeof(u64), | 259 | ring_size * sizeof(u64), |
260 | &chan->ring_dma, GFP_KERNEL); | 260 | &chan->ring_dma, GFP_KERNEL); |
261 | 261 | ||
diff --git a/arch/powerpc/sysdev/fsl_rmu.c b/arch/powerpc/sysdev/fsl_rmu.c index 8b0ebf3940d2..ebed46f80254 100644 --- a/arch/powerpc/sysdev/fsl_rmu.c +++ b/arch/powerpc/sysdev/fsl_rmu.c | |||
@@ -756,9 +756,10 @@ fsl_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries) | |||
756 | } | 756 | } |
757 | 757 | ||
758 | /* Initialize outbound message descriptor ring */ | 758 | /* Initialize outbound message descriptor ring */ |
759 | rmu->msg_tx_ring.virt = dma_zalloc_coherent(priv->dev, | 759 | rmu->msg_tx_ring.virt = dma_alloc_coherent(priv->dev, |
760 | rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE, | 760 | rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE, |
761 | &rmu->msg_tx_ring.phys, GFP_KERNEL); | 761 | &rmu->msg_tx_ring.phys, |
762 | GFP_KERNEL); | ||
762 | if (!rmu->msg_tx_ring.virt) { | 763 | if (!rmu->msg_tx_ring.virt) { |
763 | rc = -ENOMEM; | 764 | rc = -ENOMEM; |
764 | goto out_dma; | 765 | goto out_dma; |
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c index 4dc528bf8e85..9c1247d42897 100644 --- a/drivers/ata/sata_fsl.c +++ b/drivers/ata/sata_fsl.c | |||
@@ -729,8 +729,8 @@ static int sata_fsl_port_start(struct ata_port *ap) | |||
729 | if (!pp) | 729 | if (!pp) |
730 | return -ENOMEM; | 730 | return -ENOMEM; |
731 | 731 | ||
732 | mem = dma_zalloc_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ, &mem_dma, | 732 | mem = dma_alloc_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ, &mem_dma, |
733 | GFP_KERNEL); | 733 | GFP_KERNEL); |
734 | if (!mem) { | 734 | if (!mem) { |
735 | kfree(pp); | 735 | kfree(pp); |
736 | return -ENOMEM; | 736 | return -ENOMEM; |
diff --git a/drivers/atm/he.c b/drivers/atm/he.c index 29f102dcfec4..2e9d1cfe3aeb 100644 --- a/drivers/atm/he.c +++ b/drivers/atm/he.c | |||
@@ -533,9 +533,10 @@ static void he_init_tx_lbfp(struct he_dev *he_dev) | |||
533 | 533 | ||
534 | static int he_init_tpdrq(struct he_dev *he_dev) | 534 | static int he_init_tpdrq(struct he_dev *he_dev) |
535 | { | 535 | { |
536 | he_dev->tpdrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev, | 536 | he_dev->tpdrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev, |
537 | CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), | 537 | CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), |
538 | &he_dev->tpdrq_phys, GFP_KERNEL); | 538 | &he_dev->tpdrq_phys, |
539 | GFP_KERNEL); | ||
539 | if (he_dev->tpdrq_base == NULL) { | 540 | if (he_dev->tpdrq_base == NULL) { |
540 | hprintk("failed to alloc tpdrq\n"); | 541 | hprintk("failed to alloc tpdrq\n"); |
541 | return -ENOMEM; | 542 | return -ENOMEM; |
@@ -805,9 +806,9 @@ static int he_init_group(struct he_dev *he_dev, int group) | |||
805 | goto out_free_rbpl_virt; | 806 | goto out_free_rbpl_virt; |
806 | } | 807 | } |
807 | 808 | ||
808 | he_dev->rbpl_base = dma_zalloc_coherent(&he_dev->pci_dev->dev, | 809 | he_dev->rbpl_base = dma_alloc_coherent(&he_dev->pci_dev->dev, |
809 | CONFIG_RBPL_SIZE * sizeof(struct he_rbp), | 810 | CONFIG_RBPL_SIZE * sizeof(struct he_rbp), |
810 | &he_dev->rbpl_phys, GFP_KERNEL); | 811 | &he_dev->rbpl_phys, GFP_KERNEL); |
811 | if (he_dev->rbpl_base == NULL) { | 812 | if (he_dev->rbpl_base == NULL) { |
812 | hprintk("failed to alloc rbpl_base\n"); | 813 | hprintk("failed to alloc rbpl_base\n"); |
813 | goto out_destroy_rbpl_pool; | 814 | goto out_destroy_rbpl_pool; |
@@ -844,9 +845,9 @@ static int he_init_group(struct he_dev *he_dev, int group) | |||
844 | 845 | ||
845 | /* rx buffer ready queue */ | 846 | /* rx buffer ready queue */ |
846 | 847 | ||
847 | he_dev->rbrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev, | 848 | he_dev->rbrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev, |
848 | CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), | 849 | CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), |
849 | &he_dev->rbrq_phys, GFP_KERNEL); | 850 | &he_dev->rbrq_phys, GFP_KERNEL); |
850 | if (he_dev->rbrq_base == NULL) { | 851 | if (he_dev->rbrq_base == NULL) { |
851 | hprintk("failed to allocate rbrq\n"); | 852 | hprintk("failed to allocate rbrq\n"); |
852 | goto out_free_rbpl; | 853 | goto out_free_rbpl; |
@@ -868,9 +869,9 @@ static int he_init_group(struct he_dev *he_dev, int group) | |||
868 | 869 | ||
869 | /* tx buffer ready queue */ | 870 | /* tx buffer ready queue */ |
870 | 871 | ||
871 | he_dev->tbrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev, | 872 | he_dev->tbrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev, |
872 | CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), | 873 | CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), |
873 | &he_dev->tbrq_phys, GFP_KERNEL); | 874 | &he_dev->tbrq_phys, GFP_KERNEL); |
874 | if (he_dev->tbrq_base == NULL) { | 875 | if (he_dev->tbrq_base == NULL) { |
875 | hprintk("failed to allocate tbrq\n"); | 876 | hprintk("failed to allocate tbrq\n"); |
876 | goto out_free_rbpq_base; | 877 | goto out_free_rbpq_base; |
@@ -913,11 +914,9 @@ static int he_init_irq(struct he_dev *he_dev) | |||
913 | /* 2.9.3.5 tail offset for each interrupt queue is located after the | 914 | /* 2.9.3.5 tail offset for each interrupt queue is located after the |
914 | end of the interrupt queue */ | 915 | end of the interrupt queue */ |
915 | 916 | ||
916 | he_dev->irq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev, | 917 | he_dev->irq_base = dma_alloc_coherent(&he_dev->pci_dev->dev, |
917 | (CONFIG_IRQ_SIZE + 1) | 918 | (CONFIG_IRQ_SIZE + 1) * sizeof(struct he_irq), |
918 | * sizeof(struct he_irq), | 919 | &he_dev->irq_phys, GFP_KERNEL); |
919 | &he_dev->irq_phys, | ||
920 | GFP_KERNEL); | ||
921 | if (he_dev->irq_base == NULL) { | 920 | if (he_dev->irq_base == NULL) { |
922 | hprintk("failed to allocate irq\n"); | 921 | hprintk("failed to allocate irq\n"); |
923 | return -ENOMEM; | 922 | return -ENOMEM; |
@@ -1464,9 +1463,9 @@ static int he_start(struct atm_dev *dev) | |||
1464 | 1463 | ||
1465 | /* host status page */ | 1464 | /* host status page */ |
1466 | 1465 | ||
1467 | he_dev->hsp = dma_zalloc_coherent(&he_dev->pci_dev->dev, | 1466 | he_dev->hsp = dma_alloc_coherent(&he_dev->pci_dev->dev, |
1468 | sizeof(struct he_hsp), | 1467 | sizeof(struct he_hsp), |
1469 | &he_dev->hsp_phys, GFP_KERNEL); | 1468 | &he_dev->hsp_phys, GFP_KERNEL); |
1470 | if (he_dev->hsp == NULL) { | 1469 | if (he_dev->hsp == NULL) { |
1471 | hprintk("failed to allocate host status page\n"); | 1470 | hprintk("failed to allocate host status page\n"); |
1472 | return -ENOMEM; | 1471 | return -ENOMEM; |
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c index 6e737142ceaa..43a14579e80e 100644 --- a/drivers/atm/idt77252.c +++ b/drivers/atm/idt77252.c | |||
@@ -641,8 +641,8 @@ alloc_scq(struct idt77252_dev *card, int class) | |||
641 | scq = kzalloc(sizeof(struct scq_info), GFP_KERNEL); | 641 | scq = kzalloc(sizeof(struct scq_info), GFP_KERNEL); |
642 | if (!scq) | 642 | if (!scq) |
643 | return NULL; | 643 | return NULL; |
644 | scq->base = dma_zalloc_coherent(&card->pcidev->dev, SCQ_SIZE, | 644 | scq->base = dma_alloc_coherent(&card->pcidev->dev, SCQ_SIZE, |
645 | &scq->paddr, GFP_KERNEL); | 645 | &scq->paddr, GFP_KERNEL); |
646 | if (scq->base == NULL) { | 646 | if (scq->base == NULL) { |
647 | kfree(scq); | 647 | kfree(scq); |
648 | return NULL; | 648 | return NULL; |
@@ -971,8 +971,8 @@ init_rsq(struct idt77252_dev *card) | |||
971 | { | 971 | { |
972 | struct rsq_entry *rsqe; | 972 | struct rsq_entry *rsqe; |
973 | 973 | ||
974 | card->rsq.base = dma_zalloc_coherent(&card->pcidev->dev, RSQSIZE, | 974 | card->rsq.base = dma_alloc_coherent(&card->pcidev->dev, RSQSIZE, |
975 | &card->rsq.paddr, GFP_KERNEL); | 975 | &card->rsq.paddr, GFP_KERNEL); |
976 | if (card->rsq.base == NULL) { | 976 | if (card->rsq.base == NULL) { |
977 | printk("%s: can't allocate RSQ.\n", card->name); | 977 | printk("%s: can't allocate RSQ.\n", card->name); |
978 | return -1; | 978 | return -1; |
@@ -3390,10 +3390,10 @@ static int init_card(struct atm_dev *dev) | |||
3390 | writel(0, SAR_REG_GP); | 3390 | writel(0, SAR_REG_GP); |
3391 | 3391 | ||
3392 | /* Initialize RAW Cell Handle Register */ | 3392 | /* Initialize RAW Cell Handle Register */ |
3393 | card->raw_cell_hnd = dma_zalloc_coherent(&card->pcidev->dev, | 3393 | card->raw_cell_hnd = dma_alloc_coherent(&card->pcidev->dev, |
3394 | 2 * sizeof(u32), | 3394 | 2 * sizeof(u32), |
3395 | &card->raw_cell_paddr, | 3395 | &card->raw_cell_paddr, |
3396 | GFP_KERNEL); | 3396 | GFP_KERNEL); |
3397 | if (!card->raw_cell_hnd) { | 3397 | if (!card->raw_cell_hnd) { |
3398 | printk("%s: memory allocation failure.\n", card->name); | 3398 | printk("%s: memory allocation failure.\n", card->name); |
3399 | deinit_card(card); | 3399 | deinit_card(card); |
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c index a10d5736d8f7..ab893a7571a2 100644 --- a/drivers/block/skd_main.c +++ b/drivers/block/skd_main.c | |||
@@ -2641,8 +2641,8 @@ static int skd_cons_skcomp(struct skd_device *skdev) | |||
2641 | "comp pci_alloc, total bytes %zd entries %d\n", | 2641 | "comp pci_alloc, total bytes %zd entries %d\n", |
2642 | SKD_SKCOMP_SIZE, SKD_N_COMPLETION_ENTRY); | 2642 | SKD_SKCOMP_SIZE, SKD_N_COMPLETION_ENTRY); |
2643 | 2643 | ||
2644 | skcomp = dma_zalloc_coherent(&skdev->pdev->dev, SKD_SKCOMP_SIZE, | 2644 | skcomp = dma_alloc_coherent(&skdev->pdev->dev, SKD_SKCOMP_SIZE, |
2645 | &skdev->cq_dma_address, GFP_KERNEL); | 2645 | &skdev->cq_dma_address, GFP_KERNEL); |
2646 | 2646 | ||
2647 | if (skcomp == NULL) { | 2647 | if (skcomp == NULL) { |
2648 | rc = -ENOMEM; | 2648 | rc = -ENOMEM; |
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c index 63cb6956c948..acf79889d903 100644 --- a/drivers/crypto/amcc/crypto4xx_core.c +++ b/drivers/crypto/amcc/crypto4xx_core.c | |||
@@ -283,9 +283,9 @@ static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx) | |||
283 | */ | 283 | */ |
284 | static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev) | 284 | static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev) |
285 | { | 285 | { |
286 | dev->gdr = dma_zalloc_coherent(dev->core_dev->device, | 286 | dev->gdr = dma_alloc_coherent(dev->core_dev->device, |
287 | sizeof(struct ce_gd) * PPC4XX_NUM_GD, | 287 | sizeof(struct ce_gd) * PPC4XX_NUM_GD, |
288 | &dev->gdr_pa, GFP_ATOMIC); | 288 | &dev->gdr_pa, GFP_ATOMIC); |
289 | if (!dev->gdr) | 289 | if (!dev->gdr) |
290 | return -ENOMEM; | 290 | return -ENOMEM; |
291 | 291 | ||
diff --git a/drivers/crypto/cavium/cpt/cptpf_main.c b/drivers/crypto/cavium/cpt/cptpf_main.c index 06ad85ab5e86..a876535529d1 100644 --- a/drivers/crypto/cavium/cpt/cptpf_main.c +++ b/drivers/crypto/cavium/cpt/cptpf_main.c | |||
@@ -278,8 +278,8 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae) | |||
278 | mcode->num_cores = is_ae ? 6 : 10; | 278 | mcode->num_cores = is_ae ? 6 : 10; |
279 | 279 | ||
280 | /* Allocate DMAable space */ | 280 | /* Allocate DMAable space */ |
281 | mcode->code = dma_zalloc_coherent(&cpt->pdev->dev, mcode->code_size, | 281 | mcode->code = dma_alloc_coherent(&cpt->pdev->dev, mcode->code_size, |
282 | &mcode->phys_base, GFP_KERNEL); | 282 | &mcode->phys_base, GFP_KERNEL); |
283 | if (!mcode->code) { | 283 | if (!mcode->code) { |
284 | dev_err(dev, "Unable to allocate space for microcode"); | 284 | dev_err(dev, "Unable to allocate space for microcode"); |
285 | ret = -ENOMEM; | 285 | ret = -ENOMEM; |
diff --git a/drivers/crypto/cavium/cpt/cptvf_main.c b/drivers/crypto/cavium/cpt/cptvf_main.c index 5c796ed55eba..2ca431ed1db8 100644 --- a/drivers/crypto/cavium/cpt/cptvf_main.c +++ b/drivers/crypto/cavium/cpt/cptvf_main.c | |||
@@ -236,9 +236,10 @@ static int alloc_command_queues(struct cpt_vf *cptvf, | |||
236 | 236 | ||
237 | c_size = (rem_q_size > qcsize_bytes) ? qcsize_bytes : | 237 | c_size = (rem_q_size > qcsize_bytes) ? qcsize_bytes : |
238 | rem_q_size; | 238 | rem_q_size; |
239 | curr->head = (u8 *)dma_zalloc_coherent(&pdev->dev, | 239 | curr->head = (u8 *)dma_alloc_coherent(&pdev->dev, |
240 | c_size + CPT_NEXT_CHUNK_PTR_SIZE, | 240 | c_size + CPT_NEXT_CHUNK_PTR_SIZE, |
241 | &curr->dma_addr, GFP_KERNEL); | 241 | &curr->dma_addr, |
242 | GFP_KERNEL); | ||
242 | if (!curr->head) { | 243 | if (!curr->head) { |
243 | dev_err(&pdev->dev, "Command Q (%d) chunk (%d) allocation failed\n", | 244 | dev_err(&pdev->dev, "Command Q (%d) chunk (%d) allocation failed\n", |
244 | i, queue->nchunks); | 245 | i, queue->nchunks); |
diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c b/drivers/crypto/cavium/nitrox/nitrox_lib.c index 9138bae12521..4ace9bcd603a 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_lib.c +++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c | |||
@@ -25,9 +25,9 @@ static int nitrox_cmdq_init(struct nitrox_cmdq *cmdq, int align_bytes) | |||
25 | struct nitrox_device *ndev = cmdq->ndev; | 25 | struct nitrox_device *ndev = cmdq->ndev; |
26 | 26 | ||
27 | cmdq->qsize = (ndev->qlen * cmdq->instr_size) + align_bytes; | 27 | cmdq->qsize = (ndev->qlen * cmdq->instr_size) + align_bytes; |
28 | cmdq->unalign_base = dma_zalloc_coherent(DEV(ndev), cmdq->qsize, | 28 | cmdq->unalign_base = dma_alloc_coherent(DEV(ndev), cmdq->qsize, |
29 | &cmdq->unalign_dma, | 29 | &cmdq->unalign_dma, |
30 | GFP_KERNEL); | 30 | GFP_KERNEL); |
31 | if (!cmdq->unalign_base) | 31 | if (!cmdq->unalign_base) |
32 | return -ENOMEM; | 32 | return -ENOMEM; |
33 | 33 | ||
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index 44a4d2779b15..c9bfd4f439ce 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c | |||
@@ -822,9 +822,9 @@ static int ccp5_init(struct ccp_device *ccp) | |||
822 | /* Page alignment satisfies our needs for N <= 128 */ | 822 | /* Page alignment satisfies our needs for N <= 128 */ |
823 | BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128); | 823 | BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128); |
824 | cmd_q->qsize = Q_SIZE(Q_DESC_SIZE); | 824 | cmd_q->qsize = Q_SIZE(Q_DESC_SIZE); |
825 | cmd_q->qbase = dma_zalloc_coherent(dev, cmd_q->qsize, | 825 | cmd_q->qbase = dma_alloc_coherent(dev, cmd_q->qsize, |
826 | &cmd_q->qbase_dma, | 826 | &cmd_q->qbase_dma, |
827 | GFP_KERNEL); | 827 | GFP_KERNEL); |
828 | if (!cmd_q->qbase) { | 828 | if (!cmd_q->qbase) { |
829 | dev_err(dev, "unable to allocate command queue\n"); | 829 | dev_err(dev, "unable to allocate command queue\n"); |
830 | ret = -ENOMEM; | 830 | ret = -ENOMEM; |
diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c index cdc4f9a171d9..adc0cd8ae97b 100644 --- a/drivers/crypto/hisilicon/sec/sec_algs.c +++ b/drivers/crypto/hisilicon/sec/sec_algs.c | |||
@@ -241,8 +241,8 @@ static int sec_alg_skcipher_setkey(struct crypto_skcipher *tfm, | |||
241 | memset(ctx->key, 0, SEC_MAX_CIPHER_KEY); | 241 | memset(ctx->key, 0, SEC_MAX_CIPHER_KEY); |
242 | } else { | 242 | } else { |
243 | /* new key */ | 243 | /* new key */ |
244 | ctx->key = dma_zalloc_coherent(dev, SEC_MAX_CIPHER_KEY, | 244 | ctx->key = dma_alloc_coherent(dev, SEC_MAX_CIPHER_KEY, |
245 | &ctx->pkey, GFP_KERNEL); | 245 | &ctx->pkey, GFP_KERNEL); |
246 | if (!ctx->key) { | 246 | if (!ctx->key) { |
247 | mutex_unlock(&ctx->lock); | 247 | mutex_unlock(&ctx->lock); |
248 | return -ENOMEM; | 248 | return -ENOMEM; |
diff --git a/drivers/crypto/hisilicon/sec/sec_drv.c b/drivers/crypto/hisilicon/sec/sec_drv.c index c1ee4e7bf996..91ee2bb575df 100644 --- a/drivers/crypto/hisilicon/sec/sec_drv.c +++ b/drivers/crypto/hisilicon/sec/sec_drv.c | |||
@@ -1082,9 +1082,8 @@ static int sec_queue_res_cfg(struct sec_queue *queue) | |||
1082 | struct sec_queue_ring_db *ring_db = &queue->ring_db; | 1082 | struct sec_queue_ring_db *ring_db = &queue->ring_db; |
1083 | int ret; | 1083 | int ret; |
1084 | 1084 | ||
1085 | ring_cmd->vaddr = dma_zalloc_coherent(dev, SEC_Q_CMD_SIZE, | 1085 | ring_cmd->vaddr = dma_alloc_coherent(dev, SEC_Q_CMD_SIZE, |
1086 | &ring_cmd->paddr, | 1086 | &ring_cmd->paddr, GFP_KERNEL); |
1087 | GFP_KERNEL); | ||
1088 | if (!ring_cmd->vaddr) | 1087 | if (!ring_cmd->vaddr) |
1089 | return -ENOMEM; | 1088 | return -ENOMEM; |
1090 | 1089 | ||
@@ -1092,17 +1091,15 @@ static int sec_queue_res_cfg(struct sec_queue *queue) | |||
1092 | mutex_init(&ring_cmd->lock); | 1091 | mutex_init(&ring_cmd->lock); |
1093 | ring_cmd->callback = sec_alg_callback; | 1092 | ring_cmd->callback = sec_alg_callback; |
1094 | 1093 | ||
1095 | ring_cq->vaddr = dma_zalloc_coherent(dev, SEC_Q_CQ_SIZE, | 1094 | ring_cq->vaddr = dma_alloc_coherent(dev, SEC_Q_CQ_SIZE, |
1096 | &ring_cq->paddr, | 1095 | &ring_cq->paddr, GFP_KERNEL); |
1097 | GFP_KERNEL); | ||
1098 | if (!ring_cq->vaddr) { | 1096 | if (!ring_cq->vaddr) { |
1099 | ret = -ENOMEM; | 1097 | ret = -ENOMEM; |
1100 | goto err_free_ring_cmd; | 1098 | goto err_free_ring_cmd; |
1101 | } | 1099 | } |
1102 | 1100 | ||
1103 | ring_db->vaddr = dma_zalloc_coherent(dev, SEC_Q_DB_SIZE, | 1101 | ring_db->vaddr = dma_alloc_coherent(dev, SEC_Q_DB_SIZE, |
1104 | &ring_db->paddr, | 1102 | &ring_db->paddr, GFP_KERNEL); |
1105 | GFP_KERNEL); | ||
1106 | if (!ring_db->vaddr) { | 1103 | if (!ring_db->vaddr) { |
1107 | ret = -ENOMEM; | 1104 | ret = -ENOMEM; |
1108 | goto err_free_ring_cq; | 1105 | goto err_free_ring_cq; |
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index 19fba998b86b..1b0d156bb9be 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c | |||
@@ -260,9 +260,9 @@ static int setup_crypt_desc(void) | |||
260 | { | 260 | { |
261 | struct device *dev = &pdev->dev; | 261 | struct device *dev = &pdev->dev; |
262 | BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64); | 262 | BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64); |
263 | crypt_virt = dma_zalloc_coherent(dev, | 263 | crypt_virt = dma_alloc_coherent(dev, |
264 | NPE_QLEN * sizeof(struct crypt_ctl), | 264 | NPE_QLEN * sizeof(struct crypt_ctl), |
265 | &crypt_phys, GFP_ATOMIC); | 265 | &crypt_phys, GFP_ATOMIC); |
266 | if (!crypt_virt) | 266 | if (!crypt_virt) |
267 | return -ENOMEM; | 267 | return -ENOMEM; |
268 | return 0; | 268 | return 0; |
diff --git a/drivers/crypto/mediatek/mtk-platform.c b/drivers/crypto/mediatek/mtk-platform.c index ee0404e27a0f..5660e5e5e022 100644 --- a/drivers/crypto/mediatek/mtk-platform.c +++ b/drivers/crypto/mediatek/mtk-platform.c | |||
@@ -453,17 +453,17 @@ static int mtk_desc_ring_alloc(struct mtk_cryp *cryp) | |||
453 | if (!ring[i]) | 453 | if (!ring[i]) |
454 | goto err_cleanup; | 454 | goto err_cleanup; |
455 | 455 | ||
456 | ring[i]->cmd_base = dma_zalloc_coherent(cryp->dev, | 456 | ring[i]->cmd_base = dma_alloc_coherent(cryp->dev, |
457 | MTK_DESC_RING_SZ, | 457 | MTK_DESC_RING_SZ, |
458 | &ring[i]->cmd_dma, | 458 | &ring[i]->cmd_dma, |
459 | GFP_KERNEL); | 459 | GFP_KERNEL); |
460 | if (!ring[i]->cmd_base) | 460 | if (!ring[i]->cmd_base) |
461 | goto err_cleanup; | 461 | goto err_cleanup; |
462 | 462 | ||
463 | ring[i]->res_base = dma_zalloc_coherent(cryp->dev, | 463 | ring[i]->res_base = dma_alloc_coherent(cryp->dev, |
464 | MTK_DESC_RING_SZ, | 464 | MTK_DESC_RING_SZ, |
465 | &ring[i]->res_dma, | 465 | &ring[i]->res_dma, |
466 | GFP_KERNEL); | 466 | GFP_KERNEL); |
467 | if (!ring[i]->res_base) | 467 | if (!ring[i]->res_base) |
468 | goto err_cleanup; | 468 | goto err_cleanup; |
469 | 469 | ||
diff --git a/drivers/crypto/qat/qat_common/adf_admin.c b/drivers/crypto/qat/qat_common/adf_admin.c index 3744b22f0c46..d28cba34773e 100644 --- a/drivers/crypto/qat/qat_common/adf_admin.c +++ b/drivers/crypto/qat/qat_common/adf_admin.c | |||
@@ -244,18 +244,18 @@ int adf_init_admin_comms(struct adf_accel_dev *accel_dev) | |||
244 | dev_to_node(&GET_DEV(accel_dev))); | 244 | dev_to_node(&GET_DEV(accel_dev))); |
245 | if (!admin) | 245 | if (!admin) |
246 | return -ENOMEM; | 246 | return -ENOMEM; |
247 | admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE, | 247 | admin->virt_addr = dma_alloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE, |
248 | &admin->phy_addr, GFP_KERNEL); | 248 | &admin->phy_addr, GFP_KERNEL); |
249 | if (!admin->virt_addr) { | 249 | if (!admin->virt_addr) { |
250 | dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n"); | 250 | dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n"); |
251 | kfree(admin); | 251 | kfree(admin); |
252 | return -ENOMEM; | 252 | return -ENOMEM; |
253 | } | 253 | } |
254 | 254 | ||
255 | admin->virt_tbl_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), | 255 | admin->virt_tbl_addr = dma_alloc_coherent(&GET_DEV(accel_dev), |
256 | PAGE_SIZE, | 256 | PAGE_SIZE, |
257 | &admin->const_tbl_addr, | 257 | &admin->const_tbl_addr, |
258 | GFP_KERNEL); | 258 | GFP_KERNEL); |
259 | if (!admin->virt_tbl_addr) { | 259 | if (!admin->virt_tbl_addr) { |
260 | dev_err(&GET_DEV(accel_dev), "Failed to allocate const_tbl\n"); | 260 | dev_err(&GET_DEV(accel_dev), "Failed to allocate const_tbl\n"); |
261 | dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE, | 261 | dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE, |
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index d2698299896f..975c75198f56 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c | |||
@@ -601,15 +601,15 @@ static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key, | |||
601 | 601 | ||
602 | dev = &GET_DEV(inst->accel_dev); | 602 | dev = &GET_DEV(inst->accel_dev); |
603 | ctx->inst = inst; | 603 | ctx->inst = inst; |
604 | ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd), | 604 | ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd), |
605 | &ctx->enc_cd_paddr, | 605 | &ctx->enc_cd_paddr, |
606 | GFP_ATOMIC); | 606 | GFP_ATOMIC); |
607 | if (!ctx->enc_cd) { | 607 | if (!ctx->enc_cd) { |
608 | return -ENOMEM; | 608 | return -ENOMEM; |
609 | } | 609 | } |
610 | ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd), | 610 | ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd), |
611 | &ctx->dec_cd_paddr, | 611 | &ctx->dec_cd_paddr, |
612 | GFP_ATOMIC); | 612 | GFP_ATOMIC); |
613 | if (!ctx->dec_cd) { | 613 | if (!ctx->dec_cd) { |
614 | goto out_free_enc; | 614 | goto out_free_enc; |
615 | } | 615 | } |
@@ -933,16 +933,16 @@ static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm, | |||
933 | 933 | ||
934 | dev = &GET_DEV(inst->accel_dev); | 934 | dev = &GET_DEV(inst->accel_dev); |
935 | ctx->inst = inst; | 935 | ctx->inst = inst; |
936 | ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd), | 936 | ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd), |
937 | &ctx->enc_cd_paddr, | 937 | &ctx->enc_cd_paddr, |
938 | GFP_ATOMIC); | 938 | GFP_ATOMIC); |
939 | if (!ctx->enc_cd) { | 939 | if (!ctx->enc_cd) { |
940 | spin_unlock(&ctx->lock); | 940 | spin_unlock(&ctx->lock); |
941 | return -ENOMEM; | 941 | return -ENOMEM; |
942 | } | 942 | } |
943 | ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd), | 943 | ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd), |
944 | &ctx->dec_cd_paddr, | 944 | &ctx->dec_cd_paddr, |
945 | GFP_ATOMIC); | 945 | GFP_ATOMIC); |
946 | if (!ctx->dec_cd) { | 946 | if (!ctx->dec_cd) { |
947 | spin_unlock(&ctx->lock); | 947 | spin_unlock(&ctx->lock); |
948 | goto out_free_enc; | 948 | goto out_free_enc; |
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c index 320e7854b4ee..c9f324730d71 100644 --- a/drivers/crypto/qat/qat_common/qat_asym_algs.c +++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c | |||
@@ -332,10 +332,10 @@ static int qat_dh_compute_value(struct kpp_request *req) | |||
332 | } else { | 332 | } else { |
333 | int shift = ctx->p_size - req->src_len; | 333 | int shift = ctx->p_size - req->src_len; |
334 | 334 | ||
335 | qat_req->src_align = dma_zalloc_coherent(dev, | 335 | qat_req->src_align = dma_alloc_coherent(dev, |
336 | ctx->p_size, | 336 | ctx->p_size, |
337 | &qat_req->in.dh.in.b, | 337 | &qat_req->in.dh.in.b, |
338 | GFP_KERNEL); | 338 | GFP_KERNEL); |
339 | if (unlikely(!qat_req->src_align)) | 339 | if (unlikely(!qat_req->src_align)) |
340 | return ret; | 340 | return ret; |
341 | 341 | ||
@@ -360,9 +360,9 @@ static int qat_dh_compute_value(struct kpp_request *req) | |||
360 | goto unmap_src; | 360 | goto unmap_src; |
361 | 361 | ||
362 | } else { | 362 | } else { |
363 | qat_req->dst_align = dma_zalloc_coherent(dev, ctx->p_size, | 363 | qat_req->dst_align = dma_alloc_coherent(dev, ctx->p_size, |
364 | &qat_req->out.dh.r, | 364 | &qat_req->out.dh.r, |
365 | GFP_KERNEL); | 365 | GFP_KERNEL); |
366 | if (unlikely(!qat_req->dst_align)) | 366 | if (unlikely(!qat_req->dst_align)) |
367 | goto unmap_src; | 367 | goto unmap_src; |
368 | } | 368 | } |
@@ -447,7 +447,7 @@ static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params) | |||
447 | return -EINVAL; | 447 | return -EINVAL; |
448 | 448 | ||
449 | ctx->p_size = params->p_size; | 449 | ctx->p_size = params->p_size; |
450 | ctx->p = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL); | 450 | ctx->p = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL); |
451 | if (!ctx->p) | 451 | if (!ctx->p) |
452 | return -ENOMEM; | 452 | return -ENOMEM; |
453 | memcpy(ctx->p, params->p, ctx->p_size); | 453 | memcpy(ctx->p, params->p, ctx->p_size); |
@@ -458,7 +458,7 @@ static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params) | |||
458 | return 0; | 458 | return 0; |
459 | } | 459 | } |
460 | 460 | ||
461 | ctx->g = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL); | 461 | ctx->g = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL); |
462 | if (!ctx->g) | 462 | if (!ctx->g) |
463 | return -ENOMEM; | 463 | return -ENOMEM; |
464 | memcpy(ctx->g + (ctx->p_size - params->g_size), params->g, | 464 | memcpy(ctx->g + (ctx->p_size - params->g_size), params->g, |
@@ -503,8 +503,8 @@ static int qat_dh_set_secret(struct crypto_kpp *tfm, const void *buf, | |||
503 | if (ret < 0) | 503 | if (ret < 0) |
504 | goto err_clear_ctx; | 504 | goto err_clear_ctx; |
505 | 505 | ||
506 | ctx->xa = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_xa, | 506 | ctx->xa = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_xa, |
507 | GFP_KERNEL); | 507 | GFP_KERNEL); |
508 | if (!ctx->xa) { | 508 | if (!ctx->xa) { |
509 | ret = -ENOMEM; | 509 | ret = -ENOMEM; |
510 | goto err_clear_ctx; | 510 | goto err_clear_ctx; |
@@ -737,9 +737,9 @@ static int qat_rsa_enc(struct akcipher_request *req) | |||
737 | } else { | 737 | } else { |
738 | int shift = ctx->key_sz - req->src_len; | 738 | int shift = ctx->key_sz - req->src_len; |
739 | 739 | ||
740 | qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz, | 740 | qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz, |
741 | &qat_req->in.rsa.enc.m, | 741 | &qat_req->in.rsa.enc.m, |
742 | GFP_KERNEL); | 742 | GFP_KERNEL); |
743 | if (unlikely(!qat_req->src_align)) | 743 | if (unlikely(!qat_req->src_align)) |
744 | return ret; | 744 | return ret; |
745 | 745 | ||
@@ -756,9 +756,9 @@ static int qat_rsa_enc(struct akcipher_request *req) | |||
756 | goto unmap_src; | 756 | goto unmap_src; |
757 | 757 | ||
758 | } else { | 758 | } else { |
759 | qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz, | 759 | qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz, |
760 | &qat_req->out.rsa.enc.c, | 760 | &qat_req->out.rsa.enc.c, |
761 | GFP_KERNEL); | 761 | GFP_KERNEL); |
762 | if (unlikely(!qat_req->dst_align)) | 762 | if (unlikely(!qat_req->dst_align)) |
763 | goto unmap_src; | 763 | goto unmap_src; |
764 | 764 | ||
@@ -881,9 +881,9 @@ static int qat_rsa_dec(struct akcipher_request *req) | |||
881 | } else { | 881 | } else { |
882 | int shift = ctx->key_sz - req->src_len; | 882 | int shift = ctx->key_sz - req->src_len; |
883 | 883 | ||
884 | qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz, | 884 | qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz, |
885 | &qat_req->in.rsa.dec.c, | 885 | &qat_req->in.rsa.dec.c, |
886 | GFP_KERNEL); | 886 | GFP_KERNEL); |
887 | if (unlikely(!qat_req->src_align)) | 887 | if (unlikely(!qat_req->src_align)) |
888 | return ret; | 888 | return ret; |
889 | 889 | ||
@@ -900,9 +900,9 @@ static int qat_rsa_dec(struct akcipher_request *req) | |||
900 | goto unmap_src; | 900 | goto unmap_src; |
901 | 901 | ||
902 | } else { | 902 | } else { |
903 | qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz, | 903 | qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz, |
904 | &qat_req->out.rsa.dec.m, | 904 | &qat_req->out.rsa.dec.m, |
905 | GFP_KERNEL); | 905 | GFP_KERNEL); |
906 | if (unlikely(!qat_req->dst_align)) | 906 | if (unlikely(!qat_req->dst_align)) |
907 | goto unmap_src; | 907 | goto unmap_src; |
908 | 908 | ||
@@ -989,7 +989,7 @@ static int qat_rsa_set_n(struct qat_rsa_ctx *ctx, const char *value, | |||
989 | goto err; | 989 | goto err; |
990 | 990 | ||
991 | ret = -ENOMEM; | 991 | ret = -ENOMEM; |
992 | ctx->n = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL); | 992 | ctx->n = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL); |
993 | if (!ctx->n) | 993 | if (!ctx->n) |
994 | goto err; | 994 | goto err; |
995 | 995 | ||
@@ -1018,7 +1018,7 @@ static int qat_rsa_set_e(struct qat_rsa_ctx *ctx, const char *value, | |||
1018 | return -EINVAL; | 1018 | return -EINVAL; |
1019 | } | 1019 | } |
1020 | 1020 | ||
1021 | ctx->e = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL); | 1021 | ctx->e = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL); |
1022 | if (!ctx->e) | 1022 | if (!ctx->e) |
1023 | return -ENOMEM; | 1023 | return -ENOMEM; |
1024 | 1024 | ||
@@ -1044,7 +1044,7 @@ static int qat_rsa_set_d(struct qat_rsa_ctx *ctx, const char *value, | |||
1044 | goto err; | 1044 | goto err; |
1045 | 1045 | ||
1046 | ret = -ENOMEM; | 1046 | ret = -ENOMEM; |
1047 | ctx->d = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL); | 1047 | ctx->d = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL); |
1048 | if (!ctx->d) | 1048 | if (!ctx->d) |
1049 | goto err; | 1049 | goto err; |
1050 | 1050 | ||
@@ -1077,7 +1077,7 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key) | |||
1077 | qat_rsa_drop_leading_zeros(&ptr, &len); | 1077 | qat_rsa_drop_leading_zeros(&ptr, &len); |
1078 | if (!len) | 1078 | if (!len) |
1079 | goto err; | 1079 | goto err; |
1080 | ctx->p = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_p, GFP_KERNEL); | 1080 | ctx->p = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_p, GFP_KERNEL); |
1081 | if (!ctx->p) | 1081 | if (!ctx->p) |
1082 | goto err; | 1082 | goto err; |
1083 | memcpy(ctx->p + (half_key_sz - len), ptr, len); | 1083 | memcpy(ctx->p + (half_key_sz - len), ptr, len); |
@@ -1088,7 +1088,7 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key) | |||
1088 | qat_rsa_drop_leading_zeros(&ptr, &len); | 1088 | qat_rsa_drop_leading_zeros(&ptr, &len); |
1089 | if (!len) | 1089 | if (!len) |
1090 | goto free_p; | 1090 | goto free_p; |
1091 | ctx->q = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_q, GFP_KERNEL); | 1091 | ctx->q = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_q, GFP_KERNEL); |
1092 | if (!ctx->q) | 1092 | if (!ctx->q) |
1093 | goto free_p; | 1093 | goto free_p; |
1094 | memcpy(ctx->q + (half_key_sz - len), ptr, len); | 1094 | memcpy(ctx->q + (half_key_sz - len), ptr, len); |
@@ -1099,8 +1099,8 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key) | |||
1099 | qat_rsa_drop_leading_zeros(&ptr, &len); | 1099 | qat_rsa_drop_leading_zeros(&ptr, &len); |
1100 | if (!len) | 1100 | if (!len) |
1101 | goto free_q; | 1101 | goto free_q; |
1102 | ctx->dp = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_dp, | 1102 | ctx->dp = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_dp, |
1103 | GFP_KERNEL); | 1103 | GFP_KERNEL); |
1104 | if (!ctx->dp) | 1104 | if (!ctx->dp) |
1105 | goto free_q; | 1105 | goto free_q; |
1106 | memcpy(ctx->dp + (half_key_sz - len), ptr, len); | 1106 | memcpy(ctx->dp + (half_key_sz - len), ptr, len); |
@@ -1111,8 +1111,8 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key) | |||
1111 | qat_rsa_drop_leading_zeros(&ptr, &len); | 1111 | qat_rsa_drop_leading_zeros(&ptr, &len); |
1112 | if (!len) | 1112 | if (!len) |
1113 | goto free_dp; | 1113 | goto free_dp; |
1114 | ctx->dq = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_dq, | 1114 | ctx->dq = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_dq, |
1115 | GFP_KERNEL); | 1115 | GFP_KERNEL); |
1116 | if (!ctx->dq) | 1116 | if (!ctx->dq) |
1117 | goto free_dp; | 1117 | goto free_dp; |
1118 | memcpy(ctx->dq + (half_key_sz - len), ptr, len); | 1118 | memcpy(ctx->dq + (half_key_sz - len), ptr, len); |
@@ -1123,8 +1123,8 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key) | |||
1123 | qat_rsa_drop_leading_zeros(&ptr, &len); | 1123 | qat_rsa_drop_leading_zeros(&ptr, &len); |
1124 | if (!len) | 1124 | if (!len) |
1125 | goto free_dq; | 1125 | goto free_dq; |
1126 | ctx->qinv = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_qinv, | 1126 | ctx->qinv = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_qinv, |
1127 | GFP_KERNEL); | 1127 | GFP_KERNEL); |
1128 | if (!ctx->qinv) | 1128 | if (!ctx->qinv) |
1129 | goto free_dq; | 1129 | goto free_dq; |
1130 | memcpy(ctx->qinv + (half_key_sz - len), ptr, len); | 1130 | memcpy(ctx->qinv + (half_key_sz - len), ptr, len); |
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index a2b0a0e71168..86708fb9bda1 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c | |||
@@ -1182,8 +1182,8 @@ static int sdma_request_channel0(struct sdma_engine *sdma) | |||
1182 | { | 1182 | { |
1183 | int ret = -EBUSY; | 1183 | int ret = -EBUSY; |
1184 | 1184 | ||
1185 | sdma->bd0 = dma_zalloc_coherent(NULL, PAGE_SIZE, &sdma->bd0_phys, | 1185 | sdma->bd0 = dma_alloc_coherent(NULL, PAGE_SIZE, &sdma->bd0_phys, |
1186 | GFP_NOWAIT); | 1186 | GFP_NOWAIT); |
1187 | if (!sdma->bd0) { | 1187 | if (!sdma->bd0) { |
1188 | ret = -ENOMEM; | 1188 | ret = -ENOMEM; |
1189 | goto out; | 1189 | goto out; |
@@ -1205,8 +1205,8 @@ static int sdma_alloc_bd(struct sdma_desc *desc) | |||
1205 | u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor); | 1205 | u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor); |
1206 | int ret = 0; | 1206 | int ret = 0; |
1207 | 1207 | ||
1208 | desc->bd = dma_zalloc_coherent(NULL, bd_size, &desc->bd_phys, | 1208 | desc->bd = dma_alloc_coherent(NULL, bd_size, &desc->bd_phys, |
1209 | GFP_NOWAIT); | 1209 | GFP_NOWAIT); |
1210 | if (!desc->bd) { | 1210 | if (!desc->bd) { |
1211 | ret = -ENOMEM; | 1211 | ret = -ENOMEM; |
1212 | goto out; | 1212 | goto out; |
diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c index b7ec56ae02a6..1a2028e1c29e 100644 --- a/drivers/dma/mediatek/mtk-hsdma.c +++ b/drivers/dma/mediatek/mtk-hsdma.c | |||
@@ -325,8 +325,8 @@ static int mtk_hsdma_alloc_pchan(struct mtk_hsdma_device *hsdma, | |||
325 | * and [MTK_DMA_SIZE ... 2 * MTK_DMA_SIZE - 1] is for RX ring. | 325 | * and [MTK_DMA_SIZE ... 2 * MTK_DMA_SIZE - 1] is for RX ring. |
326 | */ | 326 | */ |
327 | pc->sz_ring = 2 * MTK_DMA_SIZE * sizeof(*ring->txd); | 327 | pc->sz_ring = 2 * MTK_DMA_SIZE * sizeof(*ring->txd); |
328 | ring->txd = dma_zalloc_coherent(hsdma2dev(hsdma), pc->sz_ring, | 328 | ring->txd = dma_alloc_coherent(hsdma2dev(hsdma), pc->sz_ring, |
329 | &ring->tphys, GFP_NOWAIT); | 329 | &ring->tphys, GFP_NOWAIT); |
330 | if (!ring->txd) | 330 | if (!ring->txd) |
331 | return -ENOMEM; | 331 | return -ENOMEM; |
332 | 332 | ||
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index 35193b31a9e0..22cc7f68ef6e 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c | |||
@@ -416,9 +416,9 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan) | |||
416 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | 416 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
417 | int ret; | 417 | int ret; |
418 | 418 | ||
419 | mxs_chan->ccw = dma_zalloc_coherent(mxs_dma->dma_device.dev, | 419 | mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev, |
420 | CCW_BLOCK_SIZE, | 420 | CCW_BLOCK_SIZE, |
421 | &mxs_chan->ccw_phys, GFP_KERNEL); | 421 | &mxs_chan->ccw_phys, GFP_KERNEL); |
422 | if (!mxs_chan->ccw) { | 422 | if (!mxs_chan->ccw) { |
423 | ret = -ENOMEM; | 423 | ret = -ENOMEM; |
424 | goto err_alloc; | 424 | goto err_alloc; |
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c index 1d5988849aa6..eafd6c4b90fe 100644 --- a/drivers/dma/xgene-dma.c +++ b/drivers/dma/xgene-dma.c | |||
@@ -1208,8 +1208,8 @@ static int xgene_dma_create_ring_one(struct xgene_dma_chan *chan, | |||
1208 | ring->size = ret; | 1208 | ring->size = ret; |
1209 | 1209 | ||
1210 | /* Allocate memory for DMA ring descriptor */ | 1210 | /* Allocate memory for DMA ring descriptor */ |
1211 | ring->desc_vaddr = dma_zalloc_coherent(chan->dev, ring->size, | 1211 | ring->desc_vaddr = dma_alloc_coherent(chan->dev, ring->size, |
1212 | &ring->desc_paddr, GFP_KERNEL); | 1212 | &ring->desc_paddr, GFP_KERNEL); |
1213 | if (!ring->desc_vaddr) { | 1213 | if (!ring->desc_vaddr) { |
1214 | chan_err(chan, "Failed to allocate ring desc\n"); | 1214 | chan_err(chan, "Failed to allocate ring desc\n"); |
1215 | return -ENOMEM; | 1215 | return -ENOMEM; |
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index 02880963092f..cb20b411493e 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c | |||
@@ -879,10 +879,9 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) | |||
879 | */ | 879 | */ |
880 | if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { | 880 | if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { |
881 | /* Allocate the buffer descriptors. */ | 881 | /* Allocate the buffer descriptors. */ |
882 | chan->seg_v = dma_zalloc_coherent(chan->dev, | 882 | chan->seg_v = dma_alloc_coherent(chan->dev, |
883 | sizeof(*chan->seg_v) * | 883 | sizeof(*chan->seg_v) * XILINX_DMA_NUM_DESCS, |
884 | XILINX_DMA_NUM_DESCS, | 884 | &chan->seg_p, GFP_KERNEL); |
885 | &chan->seg_p, GFP_KERNEL); | ||
886 | if (!chan->seg_v) { | 885 | if (!chan->seg_v) { |
887 | dev_err(chan->dev, | 886 | dev_err(chan->dev, |
888 | "unable to allocate channel %d descriptors\n", | 887 | "unable to allocate channel %d descriptors\n", |
@@ -895,9 +894,10 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) | |||
895 | * so allocating a desc segment during channel allocation for | 894 | * so allocating a desc segment during channel allocation for |
896 | * programming tail descriptor. | 895 | * programming tail descriptor. |
897 | */ | 896 | */ |
898 | chan->cyclic_seg_v = dma_zalloc_coherent(chan->dev, | 897 | chan->cyclic_seg_v = dma_alloc_coherent(chan->dev, |
899 | sizeof(*chan->cyclic_seg_v), | 898 | sizeof(*chan->cyclic_seg_v), |
900 | &chan->cyclic_seg_p, GFP_KERNEL); | 899 | &chan->cyclic_seg_p, |
900 | GFP_KERNEL); | ||
901 | if (!chan->cyclic_seg_v) { | 901 | if (!chan->cyclic_seg_v) { |
902 | dev_err(chan->dev, | 902 | dev_err(chan->dev, |
903 | "unable to allocate desc segment for cyclic DMA\n"); | 903 | "unable to allocate desc segment for cyclic DMA\n"); |
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c index 8db51750ce93..4478787a247f 100644 --- a/drivers/dma/xilinx/zynqmp_dma.c +++ b/drivers/dma/xilinx/zynqmp_dma.c | |||
@@ -490,9 +490,9 @@ static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan) | |||
490 | list_add_tail(&desc->node, &chan->free_list); | 490 | list_add_tail(&desc->node, &chan->free_list); |
491 | } | 491 | } |
492 | 492 | ||
493 | chan->desc_pool_v = dma_zalloc_coherent(chan->dev, | 493 | chan->desc_pool_v = dma_alloc_coherent(chan->dev, |
494 | (2 * chan->desc_size * ZYNQMP_DMA_NUM_DESCS), | 494 | (2 * chan->desc_size * ZYNQMP_DMA_NUM_DESCS), |
495 | &chan->desc_pool_p, GFP_KERNEL); | 495 | &chan->desc_pool_p, GFP_KERNEL); |
496 | if (!chan->desc_pool_v) | 496 | if (!chan->desc_pool_v) |
497 | return -ENOMEM; | 497 | return -ENOMEM; |
498 | 498 | ||
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c index a9d9df6c85ad..693748ad8b88 100644 --- a/drivers/gpu/drm/drm_pci.c +++ b/drivers/gpu/drm/drm_pci.c | |||
@@ -61,8 +61,9 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali | |||
61 | return NULL; | 61 | return NULL; |
62 | 62 | ||
63 | dmah->size = size; | 63 | dmah->size = size; |
64 | dmah->vaddr = dma_zalloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, | 64 | dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, |
65 | GFP_KERNEL | __GFP_COMP); | 65 | &dmah->busaddr, |
66 | GFP_KERNEL | __GFP_COMP); | ||
66 | 67 | ||
67 | if (dmah->vaddr == NULL) { | 68 | if (dmah->vaddr == NULL) { |
68 | kfree(dmah); | 69 | kfree(dmah); |
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c index 326805461265..19551aa43850 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c | |||
@@ -766,8 +766,8 @@ struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf( | |||
766 | return NULL; | 766 | return NULL; |
767 | 767 | ||
768 | sbuf->size = size; | 768 | sbuf->size = size; |
769 | sbuf->sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf->size, | 769 | sbuf->sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf->size, |
770 | &sbuf->dma_addr, GFP_ATOMIC); | 770 | &sbuf->dma_addr, GFP_ATOMIC); |
771 | if (!sbuf->sb) | 771 | if (!sbuf->sb) |
772 | goto bail; | 772 | goto bail; |
773 | 773 | ||
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c index 59eeac55626f..57d4951679cb 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c | |||
@@ -105,10 +105,10 @@ static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl, | |||
105 | 105 | ||
106 | if (!sghead) { | 106 | if (!sghead) { |
107 | for (i = 0; i < pages; i++) { | 107 | for (i = 0; i < pages; i++) { |
108 | pbl->pg_arr[i] = dma_zalloc_coherent(&pdev->dev, | 108 | pbl->pg_arr[i] = dma_alloc_coherent(&pdev->dev, |
109 | pbl->pg_size, | 109 | pbl->pg_size, |
110 | &pbl->pg_map_arr[i], | 110 | &pbl->pg_map_arr[i], |
111 | GFP_KERNEL); | 111 | GFP_KERNEL); |
112 | if (!pbl->pg_arr[i]) | 112 | if (!pbl->pg_arr[i]) |
113 | goto fail; | 113 | goto fail; |
114 | pbl->pg_count++; | 114 | pbl->pg_count++; |
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c index df4f7a3f043d..8ac72ac7cbac 100644 --- a/drivers/infiniband/hw/cxgb3/cxio_hal.c +++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c | |||
@@ -291,9 +291,9 @@ int cxio_create_qp(struct cxio_rdev *rdev_p, u32 kernel_domain, | |||
291 | if (!wq->sq) | 291 | if (!wq->sq) |
292 | goto err3; | 292 | goto err3; |
293 | 293 | ||
294 | wq->queue = dma_zalloc_coherent(&(rdev_p->rnic_info.pdev->dev), | 294 | wq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev), |
295 | depth * sizeof(union t3_wr), | 295 | depth * sizeof(union t3_wr), |
296 | &(wq->dma_addr), GFP_KERNEL); | 296 | &(wq->dma_addr), GFP_KERNEL); |
297 | if (!wq->queue) | 297 | if (!wq->queue) |
298 | goto err4; | 298 | goto err4; |
299 | 299 | ||
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 981ff5cfb5d1..504cf525508f 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c | |||
@@ -2564,9 +2564,8 @@ static int alloc_srq_queue(struct c4iw_srq *srq, struct c4iw_dev_ucontext *uctx, | |||
2564 | wq->rqt_abs_idx = (wq->rqt_hwaddr - rdev->lldi.vr->rq.start) >> | 2564 | wq->rqt_abs_idx = (wq->rqt_hwaddr - rdev->lldi.vr->rq.start) >> |
2565 | T4_RQT_ENTRY_SHIFT; | 2565 | T4_RQT_ENTRY_SHIFT; |
2566 | 2566 | ||
2567 | wq->queue = dma_zalloc_coherent(&rdev->lldi.pdev->dev, | 2567 | wq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, wq->memsize, |
2568 | wq->memsize, &wq->dma_addr, | 2568 | &wq->dma_addr, GFP_KERNEL); |
2569 | GFP_KERNEL); | ||
2570 | if (!wq->queue) | 2569 | if (!wq->queue) |
2571 | goto err_free_rqtpool; | 2570 | goto err_free_rqtpool; |
2572 | 2571 | ||
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c index 09044905284f..7835eb52e7c5 100644 --- a/drivers/infiniband/hw/hfi1/init.c +++ b/drivers/infiniband/hw/hfi1/init.c | |||
@@ -899,10 +899,10 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit) | |||
899 | goto done; | 899 | goto done; |
900 | 900 | ||
901 | /* allocate dummy tail memory for all receive contexts */ | 901 | /* allocate dummy tail memory for all receive contexts */ |
902 | dd->rcvhdrtail_dummy_kvaddr = dma_zalloc_coherent( | 902 | dd->rcvhdrtail_dummy_kvaddr = dma_alloc_coherent(&dd->pcidev->dev, |
903 | &dd->pcidev->dev, sizeof(u64), | 903 | sizeof(u64), |
904 | &dd->rcvhdrtail_dummy_dma, | 904 | &dd->rcvhdrtail_dummy_dma, |
905 | GFP_KERNEL); | 905 | GFP_KERNEL); |
906 | 906 | ||
907 | if (!dd->rcvhdrtail_dummy_kvaddr) { | 907 | if (!dd->rcvhdrtail_dummy_kvaddr) { |
908 | dd_dev_err(dd, "cannot allocate dummy tail memory\n"); | 908 | dd_dev_err(dd, "cannot allocate dummy tail memory\n"); |
@@ -1863,9 +1863,9 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) | |||
1863 | gfp_flags = GFP_KERNEL; | 1863 | gfp_flags = GFP_KERNEL; |
1864 | else | 1864 | else |
1865 | gfp_flags = GFP_USER; | 1865 | gfp_flags = GFP_USER; |
1866 | rcd->rcvhdrq = dma_zalloc_coherent( | 1866 | rcd->rcvhdrq = dma_alloc_coherent(&dd->pcidev->dev, amt, |
1867 | &dd->pcidev->dev, amt, &rcd->rcvhdrq_dma, | 1867 | &rcd->rcvhdrq_dma, |
1868 | gfp_flags | __GFP_COMP); | 1868 | gfp_flags | __GFP_COMP); |
1869 | 1869 | ||
1870 | if (!rcd->rcvhdrq) { | 1870 | if (!rcd->rcvhdrq) { |
1871 | dd_dev_err(dd, | 1871 | dd_dev_err(dd, |
@@ -1876,9 +1876,10 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) | |||
1876 | 1876 | ||
1877 | if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) || | 1877 | if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) || |
1878 | HFI1_CAP_UGET_MASK(rcd->flags, DMA_RTAIL)) { | 1878 | HFI1_CAP_UGET_MASK(rcd->flags, DMA_RTAIL)) { |
1879 | rcd->rcvhdrtail_kvaddr = dma_zalloc_coherent( | 1879 | rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(&dd->pcidev->dev, |
1880 | &dd->pcidev->dev, PAGE_SIZE, | 1880 | PAGE_SIZE, |
1881 | &rcd->rcvhdrqtailaddr_dma, gfp_flags); | 1881 | &rcd->rcvhdrqtailaddr_dma, |
1882 | gfp_flags); | ||
1882 | if (!rcd->rcvhdrtail_kvaddr) | 1883 | if (!rcd->rcvhdrtail_kvaddr) |
1883 | goto bail_free; | 1884 | goto bail_free; |
1884 | } | 1885 | } |
@@ -1974,10 +1975,10 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd) | |||
1974 | while (alloced_bytes < rcd->egrbufs.size && | 1975 | while (alloced_bytes < rcd->egrbufs.size && |
1975 | rcd->egrbufs.alloced < rcd->egrbufs.count) { | 1976 | rcd->egrbufs.alloced < rcd->egrbufs.count) { |
1976 | rcd->egrbufs.buffers[idx].addr = | 1977 | rcd->egrbufs.buffers[idx].addr = |
1977 | dma_zalloc_coherent(&dd->pcidev->dev, | 1978 | dma_alloc_coherent(&dd->pcidev->dev, |
1978 | rcd->egrbufs.rcvtid_size, | 1979 | rcd->egrbufs.rcvtid_size, |
1979 | &rcd->egrbufs.buffers[idx].dma, | 1980 | &rcd->egrbufs.buffers[idx].dma, |
1980 | gfp_flags); | 1981 | gfp_flags); |
1981 | if (rcd->egrbufs.buffers[idx].addr) { | 1982 | if (rcd->egrbufs.buffers[idx].addr) { |
1982 | rcd->egrbufs.buffers[idx].len = | 1983 | rcd->egrbufs.buffers[idx].len = |
1983 | rcd->egrbufs.rcvtid_size; | 1984 | rcd->egrbufs.rcvtid_size; |
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c index dd5a5c030066..04126d7e318d 100644 --- a/drivers/infiniband/hw/hfi1/pio.c +++ b/drivers/infiniband/hw/hfi1/pio.c | |||
@@ -2098,11 +2098,10 @@ int init_credit_return(struct hfi1_devdata *dd) | |||
2098 | int bytes = TXE_NUM_CONTEXTS * sizeof(struct credit_return); | 2098 | int bytes = TXE_NUM_CONTEXTS * sizeof(struct credit_return); |
2099 | 2099 | ||
2100 | set_dev_node(&dd->pcidev->dev, i); | 2100 | set_dev_node(&dd->pcidev->dev, i); |
2101 | dd->cr_base[i].va = dma_zalloc_coherent( | 2101 | dd->cr_base[i].va = dma_alloc_coherent(&dd->pcidev->dev, |
2102 | &dd->pcidev->dev, | 2102 | bytes, |
2103 | bytes, | 2103 | &dd->cr_base[i].dma, |
2104 | &dd->cr_base[i].dma, | 2104 | GFP_KERNEL); |
2105 | GFP_KERNEL); | ||
2106 | if (!dd->cr_base[i].va) { | 2105 | if (!dd->cr_base[i].va) { |
2107 | set_dev_node(&dd->pcidev->dev, dd->node); | 2106 | set_dev_node(&dd->pcidev->dev, dd->node); |
2108 | dd_dev_err(dd, | 2107 | dd_dev_err(dd, |
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c index b84356e1a4c1..96897a91fb0a 100644 --- a/drivers/infiniband/hw/hfi1/sdma.c +++ b/drivers/infiniband/hw/hfi1/sdma.c | |||
@@ -1453,12 +1453,9 @@ int sdma_init(struct hfi1_devdata *dd, u8 port) | |||
1453 | timer_setup(&sde->err_progress_check_timer, | 1453 | timer_setup(&sde->err_progress_check_timer, |
1454 | sdma_err_progress_check, 0); | 1454 | sdma_err_progress_check, 0); |
1455 | 1455 | ||
1456 | sde->descq = dma_zalloc_coherent( | 1456 | sde->descq = dma_alloc_coherent(&dd->pcidev->dev, |
1457 | &dd->pcidev->dev, | 1457 | descq_cnt * sizeof(u64[2]), |
1458 | descq_cnt * sizeof(u64[2]), | 1458 | &sde->descq_phys, GFP_KERNEL); |
1459 | &sde->descq_phys, | ||
1460 | GFP_KERNEL | ||
1461 | ); | ||
1462 | if (!sde->descq) | 1459 | if (!sde->descq) |
1463 | goto bail; | 1460 | goto bail; |
1464 | sde->tx_ring = | 1461 | sde->tx_ring = |
@@ -1471,24 +1468,18 @@ int sdma_init(struct hfi1_devdata *dd, u8 port) | |||
1471 | 1468 | ||
1472 | dd->sdma_heads_size = L1_CACHE_BYTES * num_engines; | 1469 | dd->sdma_heads_size = L1_CACHE_BYTES * num_engines; |
1473 | /* Allocate memory for DMA of head registers to memory */ | 1470 | /* Allocate memory for DMA of head registers to memory */ |
1474 | dd->sdma_heads_dma = dma_zalloc_coherent( | 1471 | dd->sdma_heads_dma = dma_alloc_coherent(&dd->pcidev->dev, |
1475 | &dd->pcidev->dev, | 1472 | dd->sdma_heads_size, |
1476 | dd->sdma_heads_size, | 1473 | &dd->sdma_heads_phys, |
1477 | &dd->sdma_heads_phys, | 1474 | GFP_KERNEL); |
1478 | GFP_KERNEL | ||
1479 | ); | ||
1480 | if (!dd->sdma_heads_dma) { | 1475 | if (!dd->sdma_heads_dma) { |
1481 | dd_dev_err(dd, "failed to allocate SendDMA head memory\n"); | 1476 | dd_dev_err(dd, "failed to allocate SendDMA head memory\n"); |
1482 | goto bail; | 1477 | goto bail; |
1483 | } | 1478 | } |
1484 | 1479 | ||
1485 | /* Allocate memory for pad */ | 1480 | /* Allocate memory for pad */ |
1486 | dd->sdma_pad_dma = dma_zalloc_coherent( | 1481 | dd->sdma_pad_dma = dma_alloc_coherent(&dd->pcidev->dev, sizeof(u32), |
1487 | &dd->pcidev->dev, | 1482 | &dd->sdma_pad_phys, GFP_KERNEL); |
1488 | sizeof(u32), | ||
1489 | &dd->sdma_pad_phys, | ||
1490 | GFP_KERNEL | ||
1491 | ); | ||
1492 | if (!dd->sdma_pad_dma) { | 1483 | if (!dd->sdma_pad_dma) { |
1493 | dd_dev_err(dd, "failed to allocate SendDMA pad memory\n"); | 1484 | dd_dev_err(dd, "failed to allocate SendDMA pad memory\n"); |
1494 | goto bail; | 1485 | goto bail; |
diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c index 6300033a448f..dac058d3df53 100644 --- a/drivers/infiniband/hw/hns/hns_roce_alloc.c +++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c | |||
@@ -197,8 +197,8 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct, | |||
197 | buf->npages = 1 << order; | 197 | buf->npages = 1 << order; |
198 | buf->page_shift = page_shift; | 198 | buf->page_shift = page_shift; |
199 | /* MTT PA must be recorded in 4k alignment, t is 4k aligned */ | 199 | /* MTT PA must be recorded in 4k alignment, t is 4k aligned */ |
200 | buf->direct.buf = dma_zalloc_coherent(dev, | 200 | buf->direct.buf = dma_alloc_coherent(dev, size, &t, |
201 | size, &t, GFP_KERNEL); | 201 | GFP_KERNEL); |
202 | if (!buf->direct.buf) | 202 | if (!buf->direct.buf) |
203 | return -ENOMEM; | 203 | return -ENOMEM; |
204 | 204 | ||
@@ -219,9 +219,10 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct, | |||
219 | return -ENOMEM; | 219 | return -ENOMEM; |
220 | 220 | ||
221 | for (i = 0; i < buf->nbufs; ++i) { | 221 | for (i = 0; i < buf->nbufs; ++i) { |
222 | buf->page_list[i].buf = dma_zalloc_coherent(dev, | 222 | buf->page_list[i].buf = dma_alloc_coherent(dev, |
223 | page_size, &t, | 223 | page_size, |
224 | GFP_KERNEL); | 224 | &t, |
225 | GFP_KERNEL); | ||
225 | 226 | ||
226 | if (!buf->page_list[i].buf) | 227 | if (!buf->page_list[i].buf) |
227 | goto err_free; | 228 | goto err_free; |
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 3a669451cf86..543fa1504cd3 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c | |||
@@ -5091,7 +5091,7 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev, | |||
5091 | eqe_alloc = i * (buf_chk_sz / eq->eqe_size); | 5091 | eqe_alloc = i * (buf_chk_sz / eq->eqe_size); |
5092 | size = (eq->entries - eqe_alloc) * eq->eqe_size; | 5092 | size = (eq->entries - eqe_alloc) * eq->eqe_size; |
5093 | } | 5093 | } |
5094 | eq->buf[i] = dma_zalloc_coherent(dev, size, | 5094 | eq->buf[i] = dma_alloc_coherent(dev, size, |
5095 | &(eq->buf_dma[i]), | 5095 | &(eq->buf_dma[i]), |
5096 | GFP_KERNEL); | 5096 | GFP_KERNEL); |
5097 | if (!eq->buf[i]) | 5097 | if (!eq->buf[i]) |
@@ -5126,9 +5126,9 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev, | |||
5126 | size = (eq->entries - eqe_alloc) | 5126 | size = (eq->entries - eqe_alloc) |
5127 | * eq->eqe_size; | 5127 | * eq->eqe_size; |
5128 | } | 5128 | } |
5129 | eq->buf[idx] = dma_zalloc_coherent(dev, size, | 5129 | eq->buf[idx] = dma_alloc_coherent(dev, size, |
5130 | &(eq->buf_dma[idx]), | 5130 | &(eq->buf_dma[idx]), |
5131 | GFP_KERNEL); | 5131 | GFP_KERNEL); |
5132 | if (!eq->buf[idx]) | 5132 | if (!eq->buf[idx]) |
5133 | goto err_dma_alloc_buf; | 5133 | goto err_dma_alloc_buf; |
5134 | 5134 | ||
@@ -5241,7 +5241,7 @@ static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev, | |||
5241 | goto free_cmd_mbox; | 5241 | goto free_cmd_mbox; |
5242 | } | 5242 | } |
5243 | 5243 | ||
5244 | eq->buf_list->buf = dma_zalloc_coherent(dev, buf_chk_sz, | 5244 | eq->buf_list->buf = dma_alloc_coherent(dev, buf_chk_sz, |
5245 | &(eq->buf_list->map), | 5245 | &(eq->buf_list->map), |
5246 | GFP_KERNEL); | 5246 | GFP_KERNEL); |
5247 | if (!eq->buf_list->buf) { | 5247 | if (!eq->buf_list->buf) { |
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c index a9ea966877f2..59e978141ad4 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_utils.c +++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c | |||
@@ -745,8 +745,8 @@ enum i40iw_status_code i40iw_allocate_dma_mem(struct i40iw_hw *hw, | |||
745 | if (!mem) | 745 | if (!mem) |
746 | return I40IW_ERR_PARAM; | 746 | return I40IW_ERR_PARAM; |
747 | mem->size = ALIGN(size, alignment); | 747 | mem->size = ALIGN(size, alignment); |
748 | mem->va = dma_zalloc_coherent(&pcidev->dev, mem->size, | 748 | mem->va = dma_alloc_coherent(&pcidev->dev, mem->size, |
749 | (dma_addr_t *)&mem->pa, GFP_KERNEL); | 749 | (dma_addr_t *)&mem->pa, GFP_KERNEL); |
750 | if (!mem->va) | 750 | if (!mem->va) |
751 | return I40IW_ERR_NO_MEMORY; | 751 | return I40IW_ERR_NO_MEMORY; |
752 | return 0; | 752 | return 0; |
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c index cc9c0c8ccba3..112d2f38e0de 100644 --- a/drivers/infiniband/hw/mthca/mthca_memfree.c +++ b/drivers/infiniband/hw/mthca/mthca_memfree.c | |||
@@ -623,8 +623,9 @@ int mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type, | |||
623 | page = dev->db_tab->page + end; | 623 | page = dev->db_tab->page + end; |
624 | 624 | ||
625 | alloc: | 625 | alloc: |
626 | page->db_rec = dma_zalloc_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE, | 626 | page->db_rec = dma_alloc_coherent(&dev->pdev->dev, |
627 | &page->mapping, GFP_KERNEL); | 627 | MTHCA_ICM_PAGE_SIZE, &page->mapping, |
628 | GFP_KERNEL); | ||
628 | if (!page->db_rec) { | 629 | if (!page->db_rec) { |
629 | ret = -ENOMEM; | 630 | ret = -ENOMEM; |
630 | goto out; | 631 | goto out; |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c index 241a57a07485..097e5ab2a19f 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c | |||
@@ -380,8 +380,8 @@ static int ocrdma_alloc_q(struct ocrdma_dev *dev, | |||
380 | q->len = len; | 380 | q->len = len; |
381 | q->entry_size = entry_size; | 381 | q->entry_size = entry_size; |
382 | q->size = len * entry_size; | 382 | q->size = len * entry_size; |
383 | q->va = dma_zalloc_coherent(&dev->nic_info.pdev->dev, q->size, | 383 | q->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, q->size, &q->dma, |
384 | &q->dma, GFP_KERNEL); | 384 | GFP_KERNEL); |
385 | if (!q->va) | 385 | if (!q->va) |
386 | return -ENOMEM; | 386 | return -ENOMEM; |
387 | return 0; | 387 | return 0; |
@@ -1819,7 +1819,7 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq, | |||
1819 | return -ENOMEM; | 1819 | return -ENOMEM; |
1820 | ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_CREATE_CQ, | 1820 | ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_CREATE_CQ, |
1821 | OCRDMA_SUBSYS_COMMON, sizeof(*cmd)); | 1821 | OCRDMA_SUBSYS_COMMON, sizeof(*cmd)); |
1822 | cq->va = dma_zalloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL); | 1822 | cq->va = dma_alloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL); |
1823 | if (!cq->va) { | 1823 | if (!cq->va) { |
1824 | status = -ENOMEM; | 1824 | status = -ENOMEM; |
1825 | goto mem_err; | 1825 | goto mem_err; |
@@ -2209,7 +2209,7 @@ static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd, | |||
2209 | qp->sq.max_cnt = max_wqe_allocated; | 2209 | qp->sq.max_cnt = max_wqe_allocated; |
2210 | len = (hw_pages * hw_page_size); | 2210 | len = (hw_pages * hw_page_size); |
2211 | 2211 | ||
2212 | qp->sq.va = dma_zalloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL); | 2212 | qp->sq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL); |
2213 | if (!qp->sq.va) | 2213 | if (!qp->sq.va) |
2214 | return -EINVAL; | 2214 | return -EINVAL; |
2215 | qp->sq.len = len; | 2215 | qp->sq.len = len; |
@@ -2259,7 +2259,7 @@ static int ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req *cmd, | |||
2259 | qp->rq.max_cnt = max_rqe_allocated; | 2259 | qp->rq.max_cnt = max_rqe_allocated; |
2260 | len = (hw_pages * hw_page_size); | 2260 | len = (hw_pages * hw_page_size); |
2261 | 2261 | ||
2262 | qp->rq.va = dma_zalloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL); | 2262 | qp->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL); |
2263 | if (!qp->rq.va) | 2263 | if (!qp->rq.va) |
2264 | return -ENOMEM; | 2264 | return -ENOMEM; |
2265 | qp->rq.pa = pa; | 2265 | qp->rq.pa = pa; |
@@ -2315,8 +2315,8 @@ static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd, | |||
2315 | if (dev->attr.ird == 0) | 2315 | if (dev->attr.ird == 0) |
2316 | return 0; | 2316 | return 0; |
2317 | 2317 | ||
2318 | qp->ird_q_va = dma_zalloc_coherent(&pdev->dev, ird_q_len, &pa, | 2318 | qp->ird_q_va = dma_alloc_coherent(&pdev->dev, ird_q_len, &pa, |
2319 | GFP_KERNEL); | 2319 | GFP_KERNEL); |
2320 | if (!qp->ird_q_va) | 2320 | if (!qp->ird_q_va) |
2321 | return -ENOMEM; | 2321 | return -ENOMEM; |
2322 | ocrdma_build_q_pages(&cmd->ird_addr[0], dev->attr.num_ird_pages, | 2322 | ocrdma_build_q_pages(&cmd->ird_addr[0], dev->attr.num_ird_pages, |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c index dd15474b19b7..6be0ea109138 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c | |||
@@ -73,8 +73,8 @@ bool ocrdma_alloc_stats_resources(struct ocrdma_dev *dev) | |||
73 | mem->size = max_t(u32, sizeof(struct ocrdma_rdma_stats_req), | 73 | mem->size = max_t(u32, sizeof(struct ocrdma_rdma_stats_req), |
74 | sizeof(struct ocrdma_rdma_stats_resp)); | 74 | sizeof(struct ocrdma_rdma_stats_resp)); |
75 | 75 | ||
76 | mem->va = dma_zalloc_coherent(&dev->nic_info.pdev->dev, mem->size, | 76 | mem->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, mem->size, |
77 | &mem->pa, GFP_KERNEL); | 77 | &mem->pa, GFP_KERNEL); |
78 | if (!mem->va) { | 78 | if (!mem->va) { |
79 | pr_err("%s: stats mbox allocation failed\n", __func__); | 79 | pr_err("%s: stats mbox allocation failed\n", __func__); |
80 | return false; | 80 | return false; |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index c46bed0c5513..287c332ff0e6 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | |||
@@ -504,8 +504,8 @@ struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev, | |||
504 | INIT_LIST_HEAD(&ctx->mm_head); | 504 | INIT_LIST_HEAD(&ctx->mm_head); |
505 | mutex_init(&ctx->mm_list_lock); | 505 | mutex_init(&ctx->mm_list_lock); |
506 | 506 | ||
507 | ctx->ah_tbl.va = dma_zalloc_coherent(&pdev->dev, map_len, | 507 | ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len, |
508 | &ctx->ah_tbl.pa, GFP_KERNEL); | 508 | &ctx->ah_tbl.pa, GFP_KERNEL); |
509 | if (!ctx->ah_tbl.va) { | 509 | if (!ctx->ah_tbl.va) { |
510 | kfree(ctx); | 510 | kfree(ctx); |
511 | return ERR_PTR(-ENOMEM); | 511 | return ERR_PTR(-ENOMEM); |
@@ -838,7 +838,7 @@ static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr) | |||
838 | return -ENOMEM; | 838 | return -ENOMEM; |
839 | 839 | ||
840 | for (i = 0; i < mr->num_pbls; i++) { | 840 | for (i = 0; i < mr->num_pbls; i++) { |
841 | va = dma_zalloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL); | 841 | va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL); |
842 | if (!va) { | 842 | if (!va) { |
843 | ocrdma_free_mr_pbl_tbl(dev, mr); | 843 | ocrdma_free_mr_pbl_tbl(dev, mr); |
844 | status = -ENOMEM; | 844 | status = -ENOMEM; |
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index b342a70e2814..e1ccf32b1c3d 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c | |||
@@ -556,8 +556,8 @@ static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev, | |||
556 | return ERR_PTR(-ENOMEM); | 556 | return ERR_PTR(-ENOMEM); |
557 | 557 | ||
558 | for (i = 0; i < pbl_info->num_pbls; i++) { | 558 | for (i = 0; i < pbl_info->num_pbls; i++) { |
559 | va = dma_zalloc_coherent(&pdev->dev, pbl_info->pbl_size, | 559 | va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size, &pa, |
560 | &pa, flags); | 560 | flags); |
561 | if (!va) | 561 | if (!va) |
562 | goto err; | 562 | goto err; |
563 | 563 | ||
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c index eaa109dbc96a..39c37b6fd715 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c | |||
@@ -890,8 +890,8 @@ static int pvrdma_pci_probe(struct pci_dev *pdev, | |||
890 | dev_info(&pdev->dev, "device version %d, driver version %d\n", | 890 | dev_info(&pdev->dev, "device version %d, driver version %d\n", |
891 | dev->dsr_version, PVRDMA_VERSION); | 891 | dev->dsr_version, PVRDMA_VERSION); |
892 | 892 | ||
893 | dev->dsr = dma_zalloc_coherent(&pdev->dev, sizeof(*dev->dsr), | 893 | dev->dsr = dma_alloc_coherent(&pdev->dev, sizeof(*dev->dsr), |
894 | &dev->dsrbase, GFP_KERNEL); | 894 | &dev->dsrbase, GFP_KERNEL); |
895 | if (!dev->dsr) { | 895 | if (!dev->dsr) { |
896 | dev_err(&pdev->dev, "failed to allocate shared region\n"); | 896 | dev_err(&pdev->dev, "failed to allocate shared region\n"); |
897 | ret = -ENOMEM; | 897 | ret = -ENOMEM; |
diff --git a/drivers/input/touchscreen/raspberrypi-ts.c b/drivers/input/touchscreen/raspberrypi-ts.c index f456c1125bd6..69881265d121 100644 --- a/drivers/input/touchscreen/raspberrypi-ts.c +++ b/drivers/input/touchscreen/raspberrypi-ts.c | |||
@@ -147,8 +147,8 @@ static int rpi_ts_probe(struct platform_device *pdev) | |||
147 | return -ENOMEM; | 147 | return -ENOMEM; |
148 | ts->pdev = pdev; | 148 | ts->pdev = pdev; |
149 | 149 | ||
150 | ts->fw_regs_va = dma_zalloc_coherent(dev, PAGE_SIZE, &ts->fw_regs_phys, | 150 | ts->fw_regs_va = dma_alloc_coherent(dev, PAGE_SIZE, &ts->fw_regs_phys, |
151 | GFP_KERNEL); | 151 | GFP_KERNEL); |
152 | if (!ts->fw_regs_va) { | 152 | if (!ts->fw_regs_va) { |
153 | dev_err(dev, "failed to dma_alloc_coherent\n"); | 153 | dev_err(dev, "failed to dma_alloc_coherent\n"); |
154 | return -ENOMEM; | 154 | return -ENOMEM; |
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c index 6ede4286b835..730f7dabcf37 100644 --- a/drivers/iommu/mtk_iommu_v1.c +++ b/drivers/iommu/mtk_iommu_v1.c | |||
@@ -232,9 +232,8 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_data *data) | |||
232 | 232 | ||
233 | spin_lock_init(&dom->pgtlock); | 233 | spin_lock_init(&dom->pgtlock); |
234 | 234 | ||
235 | dom->pgt_va = dma_zalloc_coherent(data->dev, | 235 | dom->pgt_va = dma_alloc_coherent(data->dev, M2701_IOMMU_PGT_SIZE, |
236 | M2701_IOMMU_PGT_SIZE, | 236 | &dom->pgt_pa, GFP_KERNEL); |
237 | &dom->pgt_pa, GFP_KERNEL); | ||
238 | if (!dom->pgt_va) | 237 | if (!dom->pgt_va) |
239 | return -ENOMEM; | 238 | return -ENOMEM; |
240 | 239 | ||
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c index 447baaebca44..cdb79ae2d8dc 100644 --- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c +++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c | |||
@@ -218,8 +218,8 @@ static int cio2_fbpt_init(struct cio2_device *cio2, struct cio2_queue *q) | |||
218 | { | 218 | { |
219 | struct device *dev = &cio2->pci_dev->dev; | 219 | struct device *dev = &cio2->pci_dev->dev; |
220 | 220 | ||
221 | q->fbpt = dma_zalloc_coherent(dev, CIO2_FBPT_SIZE, &q->fbpt_bus_addr, | 221 | q->fbpt = dma_alloc_coherent(dev, CIO2_FBPT_SIZE, &q->fbpt_bus_addr, |
222 | GFP_KERNEL); | 222 | GFP_KERNEL); |
223 | if (!q->fbpt) | 223 | if (!q->fbpt) |
224 | return -ENOMEM; | 224 | return -ENOMEM; |
225 | 225 | ||
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c index e80123cba406..060c0ad6243a 100644 --- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c +++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c | |||
@@ -49,7 +49,7 @@ int mtk_vcodec_mem_alloc(struct mtk_vcodec_ctx *data, | |||
49 | struct mtk_vcodec_ctx *ctx = (struct mtk_vcodec_ctx *)data; | 49 | struct mtk_vcodec_ctx *ctx = (struct mtk_vcodec_ctx *)data; |
50 | struct device *dev = &ctx->dev->plat_dev->dev; | 50 | struct device *dev = &ctx->dev->plat_dev->dev; |
51 | 51 | ||
52 | mem->va = dma_zalloc_coherent(dev, size, &mem->dma_addr, GFP_KERNEL); | 52 | mem->va = dma_alloc_coherent(dev, size, &mem->dma_addr, GFP_KERNEL); |
53 | if (!mem->va) { | 53 | if (!mem->va) { |
54 | mtk_v4l2_err("%s dma_alloc size=%ld failed!", dev_name(dev), | 54 | mtk_v4l2_err("%s dma_alloc size=%ld failed!", dev_name(dev), |
55 | size); | 55 | size); |
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c index efe2fb72d54b..25265fd0fd6e 100644 --- a/drivers/misc/genwqe/card_utils.c +++ b/drivers/misc/genwqe/card_utils.c | |||
@@ -218,8 +218,8 @@ void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size, | |||
218 | if (get_order(size) >= MAX_ORDER) | 218 | if (get_order(size) >= MAX_ORDER) |
219 | return NULL; | 219 | return NULL; |
220 | 220 | ||
221 | return dma_zalloc_coherent(&cd->pci_dev->dev, size, dma_handle, | 221 | return dma_alloc_coherent(&cd->pci_dev->dev, size, dma_handle, |
222 | GFP_KERNEL); | 222 | GFP_KERNEL); |
223 | } | 223 | } |
224 | 224 | ||
225 | void __genwqe_free_consistent(struct genwqe_dev *cd, size_t size, | 225 | void __genwqe_free_consistent(struct genwqe_dev *cd, size_t size, |
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index a22e11a65658..eba9bcc92ad3 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c | |||
@@ -3763,8 +3763,9 @@ int sdhci_setup_host(struct sdhci_host *host) | |||
3763 | * Use zalloc to zero the reserved high 32-bits of 128-bit | 3763 | * Use zalloc to zero the reserved high 32-bits of 128-bit |
3764 | * descriptors so that they never need to be written. | 3764 | * descriptors so that they never need to be written. |
3765 | */ | 3765 | */ |
3766 | buf = dma_zalloc_coherent(mmc_dev(mmc), host->align_buffer_sz + | 3766 | buf = dma_alloc_coherent(mmc_dev(mmc), |
3767 | host->adma_table_sz, &dma, GFP_KERNEL); | 3767 | host->align_buffer_sz + host->adma_table_sz, |
3768 | &dma, GFP_KERNEL); | ||
3768 | if (!buf) { | 3769 | if (!buf) { |
3769 | pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n", | 3770 | pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n", |
3770 | mmc_hostname(mmc)); | 3771 | mmc_hostname(mmc)); |
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c index 91fc64c1145e..47e5984f16fb 100644 --- a/drivers/net/ethernet/aeroflex/greth.c +++ b/drivers/net/ethernet/aeroflex/greth.c | |||
@@ -1433,18 +1433,18 @@ static int greth_of_probe(struct platform_device *ofdev) | |||
1433 | } | 1433 | } |
1434 | 1434 | ||
1435 | /* Allocate TX descriptor ring in coherent memory */ | 1435 | /* Allocate TX descriptor ring in coherent memory */ |
1436 | greth->tx_bd_base = dma_zalloc_coherent(greth->dev, 1024, | 1436 | greth->tx_bd_base = dma_alloc_coherent(greth->dev, 1024, |
1437 | &greth->tx_bd_base_phys, | 1437 | &greth->tx_bd_base_phys, |
1438 | GFP_KERNEL); | 1438 | GFP_KERNEL); |
1439 | if (!greth->tx_bd_base) { | 1439 | if (!greth->tx_bd_base) { |
1440 | err = -ENOMEM; | 1440 | err = -ENOMEM; |
1441 | goto error3; | 1441 | goto error3; |
1442 | } | 1442 | } |
1443 | 1443 | ||
1444 | /* Allocate RX descriptor ring in coherent memory */ | 1444 | /* Allocate RX descriptor ring in coherent memory */ |
1445 | greth->rx_bd_base = dma_zalloc_coherent(greth->dev, 1024, | 1445 | greth->rx_bd_base = dma_alloc_coherent(greth->dev, 1024, |
1446 | &greth->rx_bd_base_phys, | 1446 | &greth->rx_bd_base_phys, |
1447 | GFP_KERNEL); | 1447 | GFP_KERNEL); |
1448 | if (!greth->rx_bd_base) { | 1448 | if (!greth->rx_bd_base) { |
1449 | err = -ENOMEM; | 1449 | err = -ENOMEM; |
1450 | goto error4; | 1450 | goto error4; |
diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c index 0b60921c392f..16477aa6d61f 100644 --- a/drivers/net/ethernet/alacritech/slicoss.c +++ b/drivers/net/ethernet/alacritech/slicoss.c | |||
@@ -795,8 +795,8 @@ static int slic_init_stat_queue(struct slic_device *sdev) | |||
795 | size = stq->len * sizeof(*descs) + DESC_ALIGN_MASK; | 795 | size = stq->len * sizeof(*descs) + DESC_ALIGN_MASK; |
796 | 796 | ||
797 | for (i = 0; i < SLIC_NUM_STAT_DESC_ARRAYS; i++) { | 797 | for (i = 0; i < SLIC_NUM_STAT_DESC_ARRAYS; i++) { |
798 | descs = dma_zalloc_coherent(&sdev->pdev->dev, size, &paddr, | 798 | descs = dma_alloc_coherent(&sdev->pdev->dev, size, &paddr, |
799 | GFP_KERNEL); | 799 | GFP_KERNEL); |
800 | if (!descs) { | 800 | if (!descs) { |
801 | netdev_err(sdev->netdev, | 801 | netdev_err(sdev->netdev, |
802 | "failed to allocate status descriptors\n"); | 802 | "failed to allocate status descriptors\n"); |
@@ -1240,8 +1240,8 @@ static int slic_init_shmem(struct slic_device *sdev) | |||
1240 | struct slic_shmem_data *sm_data; | 1240 | struct slic_shmem_data *sm_data; |
1241 | dma_addr_t paddr; | 1241 | dma_addr_t paddr; |
1242 | 1242 | ||
1243 | sm_data = dma_zalloc_coherent(&sdev->pdev->dev, sizeof(*sm_data), | 1243 | sm_data = dma_alloc_coherent(&sdev->pdev->dev, sizeof(*sm_data), |
1244 | &paddr, GFP_KERNEL); | 1244 | &paddr, GFP_KERNEL); |
1245 | if (!sm_data) { | 1245 | if (!sm_data) { |
1246 | dev_err(&sdev->pdev->dev, "failed to allocate shared memory\n"); | 1246 | dev_err(&sdev->pdev->dev, "failed to allocate shared memory\n"); |
1247 | return -ENOMEM; | 1247 | return -ENOMEM; |
@@ -1621,8 +1621,8 @@ static int slic_read_eeprom(struct slic_device *sdev) | |||
1621 | int err = 0; | 1621 | int err = 0; |
1622 | u8 *mac[2]; | 1622 | u8 *mac[2]; |
1623 | 1623 | ||
1624 | eeprom = dma_zalloc_coherent(&sdev->pdev->dev, SLIC_EEPROM_SIZE, | 1624 | eeprom = dma_alloc_coherent(&sdev->pdev->dev, SLIC_EEPROM_SIZE, |
1625 | &paddr, GFP_KERNEL); | 1625 | &paddr, GFP_KERNEL); |
1626 | if (!eeprom) | 1626 | if (!eeprom) |
1627 | return -ENOMEM; | 1627 | return -ENOMEM; |
1628 | 1628 | ||
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index 420cede41ca4..b17d435de09f 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c | |||
@@ -111,8 +111,8 @@ static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue) | |||
111 | struct ena_com_admin_sq *sq = &queue->sq; | 111 | struct ena_com_admin_sq *sq = &queue->sq; |
112 | u16 size = ADMIN_SQ_SIZE(queue->q_depth); | 112 | u16 size = ADMIN_SQ_SIZE(queue->q_depth); |
113 | 113 | ||
114 | sq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &sq->dma_addr, | 114 | sq->entries = dma_alloc_coherent(queue->q_dmadev, size, &sq->dma_addr, |
115 | GFP_KERNEL); | 115 | GFP_KERNEL); |
116 | 116 | ||
117 | if (!sq->entries) { | 117 | if (!sq->entries) { |
118 | pr_err("memory allocation failed"); | 118 | pr_err("memory allocation failed"); |
@@ -133,8 +133,8 @@ static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue) | |||
133 | struct ena_com_admin_cq *cq = &queue->cq; | 133 | struct ena_com_admin_cq *cq = &queue->cq; |
134 | u16 size = ADMIN_CQ_SIZE(queue->q_depth); | 134 | u16 size = ADMIN_CQ_SIZE(queue->q_depth); |
135 | 135 | ||
136 | cq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &cq->dma_addr, | 136 | cq->entries = dma_alloc_coherent(queue->q_dmadev, size, &cq->dma_addr, |
137 | GFP_KERNEL); | 137 | GFP_KERNEL); |
138 | 138 | ||
139 | if (!cq->entries) { | 139 | if (!cq->entries) { |
140 | pr_err("memory allocation failed"); | 140 | pr_err("memory allocation failed"); |
@@ -156,8 +156,8 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *dev, | |||
156 | 156 | ||
157 | dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH; | 157 | dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH; |
158 | size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH); | 158 | size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH); |
159 | aenq->entries = dma_zalloc_coherent(dev->dmadev, size, &aenq->dma_addr, | 159 | aenq->entries = dma_alloc_coherent(dev->dmadev, size, &aenq->dma_addr, |
160 | GFP_KERNEL); | 160 | GFP_KERNEL); |
161 | 161 | ||
162 | if (!aenq->entries) { | 162 | if (!aenq->entries) { |
163 | pr_err("memory allocation failed"); | 163 | pr_err("memory allocation failed"); |
@@ -344,15 +344,15 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, | |||
344 | dev_node = dev_to_node(ena_dev->dmadev); | 344 | dev_node = dev_to_node(ena_dev->dmadev); |
345 | set_dev_node(ena_dev->dmadev, ctx->numa_node); | 345 | set_dev_node(ena_dev->dmadev, ctx->numa_node); |
346 | io_sq->desc_addr.virt_addr = | 346 | io_sq->desc_addr.virt_addr = |
347 | dma_zalloc_coherent(ena_dev->dmadev, size, | 347 | dma_alloc_coherent(ena_dev->dmadev, size, |
348 | &io_sq->desc_addr.phys_addr, | 348 | &io_sq->desc_addr.phys_addr, |
349 | GFP_KERNEL); | 349 | GFP_KERNEL); |
350 | set_dev_node(ena_dev->dmadev, dev_node); | 350 | set_dev_node(ena_dev->dmadev, dev_node); |
351 | if (!io_sq->desc_addr.virt_addr) { | 351 | if (!io_sq->desc_addr.virt_addr) { |
352 | io_sq->desc_addr.virt_addr = | 352 | io_sq->desc_addr.virt_addr = |
353 | dma_zalloc_coherent(ena_dev->dmadev, size, | 353 | dma_alloc_coherent(ena_dev->dmadev, size, |
354 | &io_sq->desc_addr.phys_addr, | 354 | &io_sq->desc_addr.phys_addr, |
355 | GFP_KERNEL); | 355 | GFP_KERNEL); |
356 | } | 356 | } |
357 | 357 | ||
358 | if (!io_sq->desc_addr.virt_addr) { | 358 | if (!io_sq->desc_addr.virt_addr) { |
@@ -425,14 +425,14 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev, | |||
425 | prev_node = dev_to_node(ena_dev->dmadev); | 425 | prev_node = dev_to_node(ena_dev->dmadev); |
426 | set_dev_node(ena_dev->dmadev, ctx->numa_node); | 426 | set_dev_node(ena_dev->dmadev, ctx->numa_node); |
427 | io_cq->cdesc_addr.virt_addr = | 427 | io_cq->cdesc_addr.virt_addr = |
428 | dma_zalloc_coherent(ena_dev->dmadev, size, | 428 | dma_alloc_coherent(ena_dev->dmadev, size, |
429 | &io_cq->cdesc_addr.phys_addr, GFP_KERNEL); | 429 | &io_cq->cdesc_addr.phys_addr, GFP_KERNEL); |
430 | set_dev_node(ena_dev->dmadev, prev_node); | 430 | set_dev_node(ena_dev->dmadev, prev_node); |
431 | if (!io_cq->cdesc_addr.virt_addr) { | 431 | if (!io_cq->cdesc_addr.virt_addr) { |
432 | io_cq->cdesc_addr.virt_addr = | 432 | io_cq->cdesc_addr.virt_addr = |
433 | dma_zalloc_coherent(ena_dev->dmadev, size, | 433 | dma_alloc_coherent(ena_dev->dmadev, size, |
434 | &io_cq->cdesc_addr.phys_addr, | 434 | &io_cq->cdesc_addr.phys_addr, |
435 | GFP_KERNEL); | 435 | GFP_KERNEL); |
436 | } | 436 | } |
437 | 437 | ||
438 | if (!io_cq->cdesc_addr.virt_addr) { | 438 | if (!io_cq->cdesc_addr.virt_addr) { |
@@ -1026,8 +1026,8 @@ static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev) | |||
1026 | struct ena_rss *rss = &ena_dev->rss; | 1026 | struct ena_rss *rss = &ena_dev->rss; |
1027 | 1027 | ||
1028 | rss->hash_key = | 1028 | rss->hash_key = |
1029 | dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), | 1029 | dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), |
1030 | &rss->hash_key_dma_addr, GFP_KERNEL); | 1030 | &rss->hash_key_dma_addr, GFP_KERNEL); |
1031 | 1031 | ||
1032 | if (unlikely(!rss->hash_key)) | 1032 | if (unlikely(!rss->hash_key)) |
1033 | return -ENOMEM; | 1033 | return -ENOMEM; |
@@ -1050,8 +1050,8 @@ static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev) | |||
1050 | struct ena_rss *rss = &ena_dev->rss; | 1050 | struct ena_rss *rss = &ena_dev->rss; |
1051 | 1051 | ||
1052 | rss->hash_ctrl = | 1052 | rss->hash_ctrl = |
1053 | dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl), | 1053 | dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl), |
1054 | &rss->hash_ctrl_dma_addr, GFP_KERNEL); | 1054 | &rss->hash_ctrl_dma_addr, GFP_KERNEL); |
1055 | 1055 | ||
1056 | if (unlikely(!rss->hash_ctrl)) | 1056 | if (unlikely(!rss->hash_ctrl)) |
1057 | return -ENOMEM; | 1057 | return -ENOMEM; |
@@ -1094,8 +1094,8 @@ static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev, | |||
1094 | sizeof(struct ena_admin_rss_ind_table_entry); | 1094 | sizeof(struct ena_admin_rss_ind_table_entry); |
1095 | 1095 | ||
1096 | rss->rss_ind_tbl = | 1096 | rss->rss_ind_tbl = |
1097 | dma_zalloc_coherent(ena_dev->dmadev, tbl_size, | 1097 | dma_alloc_coherent(ena_dev->dmadev, tbl_size, |
1098 | &rss->rss_ind_tbl_dma_addr, GFP_KERNEL); | 1098 | &rss->rss_ind_tbl_dma_addr, GFP_KERNEL); |
1099 | if (unlikely(!rss->rss_ind_tbl)) | 1099 | if (unlikely(!rss->rss_ind_tbl)) |
1100 | goto mem_err1; | 1100 | goto mem_err1; |
1101 | 1101 | ||
@@ -1649,9 +1649,9 @@ int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev) | |||
1649 | 1649 | ||
1650 | spin_lock_init(&mmio_read->lock); | 1650 | spin_lock_init(&mmio_read->lock); |
1651 | mmio_read->read_resp = | 1651 | mmio_read->read_resp = |
1652 | dma_zalloc_coherent(ena_dev->dmadev, | 1652 | dma_alloc_coherent(ena_dev->dmadev, |
1653 | sizeof(*mmio_read->read_resp), | 1653 | sizeof(*mmio_read->read_resp), |
1654 | &mmio_read->read_resp_dma_addr, GFP_KERNEL); | 1654 | &mmio_read->read_resp_dma_addr, GFP_KERNEL); |
1655 | if (unlikely(!mmio_read->read_resp)) | 1655 | if (unlikely(!mmio_read->read_resp)) |
1656 | goto err; | 1656 | goto err; |
1657 | 1657 | ||
@@ -2623,8 +2623,8 @@ int ena_com_allocate_host_info(struct ena_com_dev *ena_dev) | |||
2623 | struct ena_host_attribute *host_attr = &ena_dev->host_attr; | 2623 | struct ena_host_attribute *host_attr = &ena_dev->host_attr; |
2624 | 2624 | ||
2625 | host_attr->host_info = | 2625 | host_attr->host_info = |
2626 | dma_zalloc_coherent(ena_dev->dmadev, SZ_4K, | 2626 | dma_alloc_coherent(ena_dev->dmadev, SZ_4K, |
2627 | &host_attr->host_info_dma_addr, GFP_KERNEL); | 2627 | &host_attr->host_info_dma_addr, GFP_KERNEL); |
2628 | if (unlikely(!host_attr->host_info)) | 2628 | if (unlikely(!host_attr->host_info)) |
2629 | return -ENOMEM; | 2629 | return -ENOMEM; |
2630 | 2630 | ||
@@ -2641,8 +2641,9 @@ int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev, | |||
2641 | struct ena_host_attribute *host_attr = &ena_dev->host_attr; | 2641 | struct ena_host_attribute *host_attr = &ena_dev->host_attr; |
2642 | 2642 | ||
2643 | host_attr->debug_area_virt_addr = | 2643 | host_attr->debug_area_virt_addr = |
2644 | dma_zalloc_coherent(ena_dev->dmadev, debug_area_size, | 2644 | dma_alloc_coherent(ena_dev->dmadev, debug_area_size, |
2645 | &host_attr->debug_area_dma_addr, GFP_KERNEL); | 2645 | &host_attr->debug_area_dma_addr, |
2646 | GFP_KERNEL); | ||
2646 | if (unlikely(!host_attr->debug_area_virt_addr)) { | 2647 | if (unlikely(!host_attr->debug_area_virt_addr)) { |
2647 | host_attr->debug_area_size = 0; | 2648 | host_attr->debug_area_size = 0; |
2648 | return -ENOMEM; | 2649 | return -ENOMEM; |
diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c index 0f2ad50f3bd7..87b142a312e0 100644 --- a/drivers/net/ethernet/apm/xgene-v2/main.c +++ b/drivers/net/ethernet/apm/xgene-v2/main.c | |||
@@ -206,8 +206,8 @@ static netdev_tx_t xge_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
206 | } | 206 | } |
207 | 207 | ||
208 | /* Packet buffers should be 64B aligned */ | 208 | /* Packet buffers should be 64B aligned */ |
209 | pkt_buf = dma_zalloc_coherent(dev, XGENE_ENET_STD_MTU, &dma_addr, | 209 | pkt_buf = dma_alloc_coherent(dev, XGENE_ENET_STD_MTU, &dma_addr, |
210 | GFP_ATOMIC); | 210 | GFP_ATOMIC); |
211 | if (unlikely(!pkt_buf)) { | 211 | if (unlikely(!pkt_buf)) { |
212 | dev_kfree_skb_any(skb); | 212 | dev_kfree_skb_any(skb); |
213 | return NETDEV_TX_OK; | 213 | return NETDEV_TX_OK; |
@@ -428,8 +428,8 @@ static struct xge_desc_ring *xge_create_desc_ring(struct net_device *ndev) | |||
428 | ring->ndev = ndev; | 428 | ring->ndev = ndev; |
429 | 429 | ||
430 | size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC; | 430 | size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC; |
431 | ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma_addr, | 431 | ring->desc_addr = dma_alloc_coherent(dev, size, &ring->dma_addr, |
432 | GFP_KERNEL); | 432 | GFP_KERNEL); |
433 | if (!ring->desc_addr) | 433 | if (!ring->desc_addr) |
434 | goto err; | 434 | goto err; |
435 | 435 | ||
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index c131cfc1b79d..e3538ba7d0e7 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c | |||
@@ -660,10 +660,9 @@ static int alx_alloc_rings(struct alx_priv *alx) | |||
660 | alx->num_txq + | 660 | alx->num_txq + |
661 | sizeof(struct alx_rrd) * alx->rx_ringsz + | 661 | sizeof(struct alx_rrd) * alx->rx_ringsz + |
662 | sizeof(struct alx_rfd) * alx->rx_ringsz; | 662 | sizeof(struct alx_rfd) * alx->rx_ringsz; |
663 | alx->descmem.virt = dma_zalloc_coherent(&alx->hw.pdev->dev, | 663 | alx->descmem.virt = dma_alloc_coherent(&alx->hw.pdev->dev, |
664 | alx->descmem.size, | 664 | alx->descmem.size, |
665 | &alx->descmem.dma, | 665 | &alx->descmem.dma, GFP_KERNEL); |
666 | GFP_KERNEL); | ||
667 | if (!alx->descmem.virt) | 666 | if (!alx->descmem.virt) |
668 | return -ENOMEM; | 667 | return -ENOMEM; |
669 | 668 | ||
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index 7087b88550db..3a3b35b5df67 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c | |||
@@ -1019,8 +1019,8 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter) | |||
1019 | sizeof(struct atl1c_recv_ret_status) * rx_desc_count + | 1019 | sizeof(struct atl1c_recv_ret_status) * rx_desc_count + |
1020 | 8 * 4; | 1020 | 8 * 4; |
1021 | 1021 | ||
1022 | ring_header->desc = dma_zalloc_coherent(&pdev->dev, ring_header->size, | 1022 | ring_header->desc = dma_alloc_coherent(&pdev->dev, ring_header->size, |
1023 | &ring_header->dma, GFP_KERNEL); | 1023 | &ring_header->dma, GFP_KERNEL); |
1024 | if (unlikely(!ring_header->desc)) { | 1024 | if (unlikely(!ring_header->desc)) { |
1025 | dev_err(&pdev->dev, "could not get memory for DMA buffer\n"); | 1025 | dev_err(&pdev->dev, "could not get memory for DMA buffer\n"); |
1026 | goto err_nomem; | 1026 | goto err_nomem; |
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index 6bae973d4dce..09cd188826b1 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c | |||
@@ -936,7 +936,7 @@ static int bcm_enet_open(struct net_device *dev) | |||
936 | 936 | ||
937 | /* allocate rx dma ring */ | 937 | /* allocate rx dma ring */ |
938 | size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); | 938 | size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); |
939 | p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); | 939 | p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); |
940 | if (!p) { | 940 | if (!p) { |
941 | ret = -ENOMEM; | 941 | ret = -ENOMEM; |
942 | goto out_freeirq_tx; | 942 | goto out_freeirq_tx; |
@@ -947,7 +947,7 @@ static int bcm_enet_open(struct net_device *dev) | |||
947 | 947 | ||
948 | /* allocate tx dma ring */ | 948 | /* allocate tx dma ring */ |
949 | size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); | 949 | size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); |
950 | p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); | 950 | p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); |
951 | if (!p) { | 951 | if (!p) { |
952 | ret = -ENOMEM; | 952 | ret = -ENOMEM; |
953 | goto out_free_rx_ring; | 953 | goto out_free_rx_ring; |
@@ -2120,7 +2120,7 @@ static int bcm_enetsw_open(struct net_device *dev) | |||
2120 | 2120 | ||
2121 | /* allocate rx dma ring */ | 2121 | /* allocate rx dma ring */ |
2122 | size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); | 2122 | size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); |
2123 | p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); | 2123 | p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); |
2124 | if (!p) { | 2124 | if (!p) { |
2125 | dev_err(kdev, "cannot allocate rx ring %u\n", size); | 2125 | dev_err(kdev, "cannot allocate rx ring %u\n", size); |
2126 | ret = -ENOMEM; | 2126 | ret = -ENOMEM; |
@@ -2132,7 +2132,7 @@ static int bcm_enetsw_open(struct net_device *dev) | |||
2132 | 2132 | ||
2133 | /* allocate tx dma ring */ | 2133 | /* allocate tx dma ring */ |
2134 | size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); | 2134 | size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); |
2135 | p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); | 2135 | p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); |
2136 | if (!p) { | 2136 | if (!p) { |
2137 | dev_err(kdev, "cannot allocate tx ring\n"); | 2137 | dev_err(kdev, "cannot allocate tx ring\n"); |
2138 | ret = -ENOMEM; | 2138 | ret = -ENOMEM; |
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 4574275ef445..f9521d0274b7 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c | |||
@@ -1506,8 +1506,8 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, | |||
1506 | /* We just need one DMA descriptor which is DMA-able, since writing to | 1506 | /* We just need one DMA descriptor which is DMA-able, since writing to |
1507 | * the port will allocate a new descriptor in its internal linked-list | 1507 | * the port will allocate a new descriptor in its internal linked-list |
1508 | */ | 1508 | */ |
1509 | p = dma_zalloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma, | 1509 | p = dma_alloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma, |
1510 | GFP_KERNEL); | 1510 | GFP_KERNEL); |
1511 | if (!p) { | 1511 | if (!p) { |
1512 | netif_err(priv, hw, priv->netdev, "DMA alloc failed\n"); | 1512 | netif_err(priv, hw, priv->netdev, "DMA alloc failed\n"); |
1513 | return -ENOMEM; | 1513 | return -ENOMEM; |
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index cabc8e49ad24..2d3a44c40221 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c | |||
@@ -634,9 +634,9 @@ static int bgmac_dma_alloc(struct bgmac *bgmac) | |||
634 | 634 | ||
635 | /* Alloc ring of descriptors */ | 635 | /* Alloc ring of descriptors */ |
636 | size = BGMAC_TX_RING_SLOTS * sizeof(struct bgmac_dma_desc); | 636 | size = BGMAC_TX_RING_SLOTS * sizeof(struct bgmac_dma_desc); |
637 | ring->cpu_base = dma_zalloc_coherent(dma_dev, size, | 637 | ring->cpu_base = dma_alloc_coherent(dma_dev, size, |
638 | &ring->dma_base, | 638 | &ring->dma_base, |
639 | GFP_KERNEL); | 639 | GFP_KERNEL); |
640 | if (!ring->cpu_base) { | 640 | if (!ring->cpu_base) { |
641 | dev_err(bgmac->dev, "Allocation of TX ring 0x%X failed\n", | 641 | dev_err(bgmac->dev, "Allocation of TX ring 0x%X failed\n", |
642 | ring->mmio_base); | 642 | ring->mmio_base); |
@@ -659,9 +659,9 @@ static int bgmac_dma_alloc(struct bgmac *bgmac) | |||
659 | 659 | ||
660 | /* Alloc ring of descriptors */ | 660 | /* Alloc ring of descriptors */ |
661 | size = BGMAC_RX_RING_SLOTS * sizeof(struct bgmac_dma_desc); | 661 | size = BGMAC_RX_RING_SLOTS * sizeof(struct bgmac_dma_desc); |
662 | ring->cpu_base = dma_zalloc_coherent(dma_dev, size, | 662 | ring->cpu_base = dma_alloc_coherent(dma_dev, size, |
663 | &ring->dma_base, | 663 | &ring->dma_base, |
664 | GFP_KERNEL); | 664 | GFP_KERNEL); |
665 | if (!ring->cpu_base) { | 665 | if (!ring->cpu_base) { |
666 | dev_err(bgmac->dev, "Allocation of RX ring 0x%X failed\n", | 666 | dev_err(bgmac->dev, "Allocation of RX ring 0x%X failed\n", |
667 | ring->mmio_base); | 667 | ring->mmio_base); |
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index bbb247116045..d63371d70bce 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c | |||
@@ -844,8 +844,8 @@ bnx2_alloc_stats_blk(struct net_device *dev) | |||
844 | BNX2_SBLK_MSIX_ALIGN_SIZE); | 844 | BNX2_SBLK_MSIX_ALIGN_SIZE); |
845 | bp->status_stats_size = status_blk_size + | 845 | bp->status_stats_size = status_blk_size + |
846 | sizeof(struct statistics_block); | 846 | sizeof(struct statistics_block); |
847 | status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size, | 847 | status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size, |
848 | &bp->status_blk_mapping, GFP_KERNEL); | 848 | &bp->status_blk_mapping, GFP_KERNEL); |
849 | if (!status_blk) | 849 | if (!status_blk) |
850 | return -ENOMEM; | 850 | return -ENOMEM; |
851 | 851 | ||
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 3aa80da973d7..4ab6eb3baefc 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c | |||
@@ -3449,10 +3449,10 @@ alloc_ext_stats: | |||
3449 | goto alloc_tx_ext_stats; | 3449 | goto alloc_tx_ext_stats; |
3450 | 3450 | ||
3451 | bp->hw_rx_port_stats_ext = | 3451 | bp->hw_rx_port_stats_ext = |
3452 | dma_zalloc_coherent(&pdev->dev, | 3452 | dma_alloc_coherent(&pdev->dev, |
3453 | sizeof(struct rx_port_stats_ext), | 3453 | sizeof(struct rx_port_stats_ext), |
3454 | &bp->hw_rx_port_stats_ext_map, | 3454 | &bp->hw_rx_port_stats_ext_map, |
3455 | GFP_KERNEL); | 3455 | GFP_KERNEL); |
3456 | if (!bp->hw_rx_port_stats_ext) | 3456 | if (!bp->hw_rx_port_stats_ext) |
3457 | return 0; | 3457 | return 0; |
3458 | 3458 | ||
@@ -3462,10 +3462,10 @@ alloc_tx_ext_stats: | |||
3462 | 3462 | ||
3463 | if (bp->hwrm_spec_code >= 0x10902) { | 3463 | if (bp->hwrm_spec_code >= 0x10902) { |
3464 | bp->hw_tx_port_stats_ext = | 3464 | bp->hw_tx_port_stats_ext = |
3465 | dma_zalloc_coherent(&pdev->dev, | 3465 | dma_alloc_coherent(&pdev->dev, |
3466 | sizeof(struct tx_port_stats_ext), | 3466 | sizeof(struct tx_port_stats_ext), |
3467 | &bp->hw_tx_port_stats_ext_map, | 3467 | &bp->hw_tx_port_stats_ext_map, |
3468 | GFP_KERNEL); | 3468 | GFP_KERNEL); |
3469 | } | 3469 | } |
3470 | bp->flags |= BNXT_FLAG_PORT_STATS_EXT; | 3470 | bp->flags |= BNXT_FLAG_PORT_STATS_EXT; |
3471 | } | 3471 | } |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c index 15c7041e937b..70775158c8c4 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c | |||
@@ -316,8 +316,8 @@ static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app, | |||
316 | 316 | ||
317 | n = IEEE_8021QAZ_MAX_TCS; | 317 | n = IEEE_8021QAZ_MAX_TCS; |
318 | data_len = sizeof(*data) + sizeof(*fw_app) * n; | 318 | data_len = sizeof(*data) + sizeof(*fw_app) * n; |
319 | data = dma_zalloc_coherent(&bp->pdev->dev, data_len, &mapping, | 319 | data = dma_alloc_coherent(&bp->pdev->dev, data_len, &mapping, |
320 | GFP_KERNEL); | 320 | GFP_KERNEL); |
321 | if (!data) | 321 | if (!data) |
322 | return -ENOMEM; | 322 | return -ENOMEM; |
323 | 323 | ||
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index 140dbd62106d..7f56032e44ac 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c | |||
@@ -85,8 +85,8 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, | |||
85 | return -EFAULT; | 85 | return -EFAULT; |
86 | } | 86 | } |
87 | 87 | ||
88 | data_addr = dma_zalloc_coherent(&bp->pdev->dev, bytesize, | 88 | data_addr = dma_alloc_coherent(&bp->pdev->dev, bytesize, |
89 | &data_dma_addr, GFP_KERNEL); | 89 | &data_dma_addr, GFP_KERNEL); |
90 | if (!data_addr) | 90 | if (!data_addr) |
91 | return -ENOMEM; | 91 | return -ENOMEM; |
92 | 92 | ||
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 3b1397af81f7..b1627dd5f2fd 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c | |||
@@ -8712,10 +8712,10 @@ static int tg3_mem_rx_acquire(struct tg3 *tp) | |||
8712 | if (!i && tg3_flag(tp, ENABLE_RSS)) | 8712 | if (!i && tg3_flag(tp, ENABLE_RSS)) |
8713 | continue; | 8713 | continue; |
8714 | 8714 | ||
8715 | tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev, | 8715 | tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev, |
8716 | TG3_RX_RCB_RING_BYTES(tp), | 8716 | TG3_RX_RCB_RING_BYTES(tp), |
8717 | &tnapi->rx_rcb_mapping, | 8717 | &tnapi->rx_rcb_mapping, |
8718 | GFP_KERNEL); | 8718 | GFP_KERNEL); |
8719 | if (!tnapi->rx_rcb) | 8719 | if (!tnapi->rx_rcb) |
8720 | goto err_out; | 8720 | goto err_out; |
8721 | } | 8721 | } |
@@ -8768,9 +8768,9 @@ static int tg3_alloc_consistent(struct tg3 *tp) | |||
8768 | { | 8768 | { |
8769 | int i; | 8769 | int i; |
8770 | 8770 | ||
8771 | tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev, | 8771 | tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev, |
8772 | sizeof(struct tg3_hw_stats), | 8772 | sizeof(struct tg3_hw_stats), |
8773 | &tp->stats_mapping, GFP_KERNEL); | 8773 | &tp->stats_mapping, GFP_KERNEL); |
8774 | if (!tp->hw_stats) | 8774 | if (!tp->hw_stats) |
8775 | goto err_out; | 8775 | goto err_out; |
8776 | 8776 | ||
@@ -8778,10 +8778,10 @@ static int tg3_alloc_consistent(struct tg3 *tp) | |||
8778 | struct tg3_napi *tnapi = &tp->napi[i]; | 8778 | struct tg3_napi *tnapi = &tp->napi[i]; |
8779 | struct tg3_hw_status *sblk; | 8779 | struct tg3_hw_status *sblk; |
8780 | 8780 | ||
8781 | tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev, | 8781 | tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev, |
8782 | TG3_HW_STATUS_SIZE, | 8782 | TG3_HW_STATUS_SIZE, |
8783 | &tnapi->status_mapping, | 8783 | &tnapi->status_mapping, |
8784 | GFP_KERNEL); | 8784 | GFP_KERNEL); |
8785 | if (!tnapi->hw_status) | 8785 | if (!tnapi->hw_status) |
8786 | goto err_out; | 8786 | goto err_out; |
8787 | 8787 | ||
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index fcaf18fa3904..5b4d3badcb73 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c | |||
@@ -59,7 +59,7 @@ static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem, | |||
59 | dmem->q_len = q_len; | 59 | dmem->q_len = q_len; |
60 | dmem->size = (desc_size * q_len) + align_bytes; | 60 | dmem->size = (desc_size * q_len) + align_bytes; |
61 | /* Save address, need it while freeing */ | 61 | /* Save address, need it while freeing */ |
62 | dmem->unalign_base = dma_zalloc_coherent(&nic->pdev->dev, dmem->size, | 62 | dmem->unalign_base = dma_alloc_coherent(&nic->pdev->dev, dmem->size, |
63 | &dmem->dma, GFP_KERNEL); | 63 | &dmem->dma, GFP_KERNEL); |
64 | if (!dmem->unalign_base) | 64 | if (!dmem->unalign_base) |
65 | return -ENOMEM; | 65 | return -ENOMEM; |
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c index 20b6e1b3f5e3..85f22c286680 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c | |||
@@ -620,7 +620,7 @@ static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size, | |||
620 | { | 620 | { |
621 | size_t len = nelem * elem_size; | 621 | size_t len = nelem * elem_size; |
622 | void *s = NULL; | 622 | void *s = NULL; |
623 | void *p = dma_zalloc_coherent(&pdev->dev, len, phys, GFP_KERNEL); | 623 | void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL); |
624 | 624 | ||
625 | if (!p) | 625 | if (!p) |
626 | return NULL; | 626 | return NULL; |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index b90188401d4a..fc0bc6458e84 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c | |||
@@ -694,7 +694,7 @@ static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size, | |||
694 | { | 694 | { |
695 | size_t len = nelem * elem_size + stat_size; | 695 | size_t len = nelem * elem_size + stat_size; |
696 | void *s = NULL; | 696 | void *s = NULL; |
697 | void *p = dma_zalloc_coherent(dev, len, phys, GFP_KERNEL); | 697 | void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL); |
698 | 698 | ||
699 | if (!p) | 699 | if (!p) |
700 | return NULL; | 700 | return NULL; |
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index 3007e1ac1e61..1d534f0baa69 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c | |||
@@ -756,7 +756,7 @@ static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize, | |||
756 | * Allocate the hardware ring and PCI DMA bus address space for said. | 756 | * Allocate the hardware ring and PCI DMA bus address space for said. |
757 | */ | 757 | */ |
758 | size_t hwlen = nelem * hwsize + stat_size; | 758 | size_t hwlen = nelem * hwsize + stat_size; |
759 | void *hwring = dma_zalloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL); | 759 | void *hwring = dma_alloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL); |
760 | 760 | ||
761 | if (!hwring) | 761 | if (!hwring) |
762 | return NULL; | 762 | return NULL; |
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 1e9d882c04ef..59a7f0b99069 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c | |||
@@ -1808,9 +1808,9 @@ int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf) | |||
1808 | total_size = buf_len; | 1808 | total_size = buf_len; |
1809 | 1809 | ||
1810 | get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024; | 1810 | get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024; |
1811 | get_fat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, | 1811 | get_fat_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, |
1812 | get_fat_cmd.size, | 1812 | get_fat_cmd.size, |
1813 | &get_fat_cmd.dma, GFP_ATOMIC); | 1813 | &get_fat_cmd.dma, GFP_ATOMIC); |
1814 | if (!get_fat_cmd.va) | 1814 | if (!get_fat_cmd.va) |
1815 | return -ENOMEM; | 1815 | return -ENOMEM; |
1816 | 1816 | ||
@@ -2302,8 +2302,8 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter, | |||
2302 | return -EINVAL; | 2302 | return -EINVAL; |
2303 | 2303 | ||
2304 | cmd.size = sizeof(struct be_cmd_resp_port_type); | 2304 | cmd.size = sizeof(struct be_cmd_resp_port_type); |
2305 | cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, | 2305 | cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, |
2306 | GFP_ATOMIC); | 2306 | GFP_ATOMIC); |
2307 | if (!cmd.va) { | 2307 | if (!cmd.va) { |
2308 | dev_err(&adapter->pdev->dev, "Memory allocation failed\n"); | 2308 | dev_err(&adapter->pdev->dev, "Memory allocation failed\n"); |
2309 | return -ENOMEM; | 2309 | return -ENOMEM; |
@@ -3066,8 +3066,8 @@ int lancer_fw_download(struct be_adapter *adapter, | |||
3066 | 3066 | ||
3067 | flash_cmd.size = sizeof(struct lancer_cmd_req_write_object) | 3067 | flash_cmd.size = sizeof(struct lancer_cmd_req_write_object) |
3068 | + LANCER_FW_DOWNLOAD_CHUNK; | 3068 | + LANCER_FW_DOWNLOAD_CHUNK; |
3069 | flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, | 3069 | flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma, |
3070 | &flash_cmd.dma, GFP_KERNEL); | 3070 | GFP_KERNEL); |
3071 | if (!flash_cmd.va) | 3071 | if (!flash_cmd.va) |
3072 | return -ENOMEM; | 3072 | return -ENOMEM; |
3073 | 3073 | ||
@@ -3184,8 +3184,8 @@ int be_fw_download(struct be_adapter *adapter, const struct firmware *fw) | |||
3184 | } | 3184 | } |
3185 | 3185 | ||
3186 | flash_cmd.size = sizeof(struct be_cmd_write_flashrom); | 3186 | flash_cmd.size = sizeof(struct be_cmd_write_flashrom); |
3187 | flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, &flash_cmd.dma, | 3187 | flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma, |
3188 | GFP_KERNEL); | 3188 | GFP_KERNEL); |
3189 | if (!flash_cmd.va) | 3189 | if (!flash_cmd.va) |
3190 | return -ENOMEM; | 3190 | return -ENOMEM; |
3191 | 3191 | ||
@@ -3435,8 +3435,8 @@ int be_cmd_get_phy_info(struct be_adapter *adapter) | |||
3435 | goto err; | 3435 | goto err; |
3436 | } | 3436 | } |
3437 | cmd.size = sizeof(struct be_cmd_req_get_phy_info); | 3437 | cmd.size = sizeof(struct be_cmd_req_get_phy_info); |
3438 | cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, | 3438 | cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, |
3439 | GFP_ATOMIC); | 3439 | GFP_ATOMIC); |
3440 | if (!cmd.va) { | 3440 | if (!cmd.va) { |
3441 | dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); | 3441 | dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); |
3442 | status = -ENOMEM; | 3442 | status = -ENOMEM; |
@@ -3522,9 +3522,9 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter) | |||
3522 | 3522 | ||
3523 | memset(&attribs_cmd, 0, sizeof(struct be_dma_mem)); | 3523 | memset(&attribs_cmd, 0, sizeof(struct be_dma_mem)); |
3524 | attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs); | 3524 | attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs); |
3525 | attribs_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, | 3525 | attribs_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, |
3526 | attribs_cmd.size, | 3526 | attribs_cmd.size, |
3527 | &attribs_cmd.dma, GFP_ATOMIC); | 3527 | &attribs_cmd.dma, GFP_ATOMIC); |
3528 | if (!attribs_cmd.va) { | 3528 | if (!attribs_cmd.va) { |
3529 | dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); | 3529 | dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); |
3530 | status = -ENOMEM; | 3530 | status = -ENOMEM; |
@@ -3699,10 +3699,10 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, | |||
3699 | 3699 | ||
3700 | memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem)); | 3700 | memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem)); |
3701 | get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list); | 3701 | get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list); |
3702 | get_mac_list_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, | 3702 | get_mac_list_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, |
3703 | get_mac_list_cmd.size, | 3703 | get_mac_list_cmd.size, |
3704 | &get_mac_list_cmd.dma, | 3704 | &get_mac_list_cmd.dma, |
3705 | GFP_ATOMIC); | 3705 | GFP_ATOMIC); |
3706 | 3706 | ||
3707 | if (!get_mac_list_cmd.va) { | 3707 | if (!get_mac_list_cmd.va) { |
3708 | dev_err(&adapter->pdev->dev, | 3708 | dev_err(&adapter->pdev->dev, |
@@ -3829,8 +3829,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, | |||
3829 | 3829 | ||
3830 | memset(&cmd, 0, sizeof(struct be_dma_mem)); | 3830 | memset(&cmd, 0, sizeof(struct be_dma_mem)); |
3831 | cmd.size = sizeof(struct be_cmd_req_set_mac_list); | 3831 | cmd.size = sizeof(struct be_cmd_req_set_mac_list); |
3832 | cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, | 3832 | cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, |
3833 | GFP_KERNEL); | 3833 | GFP_KERNEL); |
3834 | if (!cmd.va) | 3834 | if (!cmd.va) |
3835 | return -ENOMEM; | 3835 | return -ENOMEM; |
3836 | 3836 | ||
@@ -4035,8 +4035,8 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter) | |||
4035 | 4035 | ||
4036 | memset(&cmd, 0, sizeof(struct be_dma_mem)); | 4036 | memset(&cmd, 0, sizeof(struct be_dma_mem)); |
4037 | cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1); | 4037 | cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1); |
4038 | cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, | 4038 | cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, |
4039 | GFP_ATOMIC); | 4039 | GFP_ATOMIC); |
4040 | if (!cmd.va) { | 4040 | if (!cmd.va) { |
4041 | dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); | 4041 | dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); |
4042 | status = -ENOMEM; | 4042 | status = -ENOMEM; |
@@ -4089,9 +4089,9 @@ int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level) | |||
4089 | 4089 | ||
4090 | memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); | 4090 | memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); |
4091 | extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); | 4091 | extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); |
4092 | extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, | 4092 | extfat_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, |
4093 | extfat_cmd.size, &extfat_cmd.dma, | 4093 | extfat_cmd.size, &extfat_cmd.dma, |
4094 | GFP_ATOMIC); | 4094 | GFP_ATOMIC); |
4095 | if (!extfat_cmd.va) | 4095 | if (!extfat_cmd.va) |
4096 | return -ENOMEM; | 4096 | return -ENOMEM; |
4097 | 4097 | ||
@@ -4127,9 +4127,9 @@ int be_cmd_get_fw_log_level(struct be_adapter *adapter) | |||
4127 | 4127 | ||
4128 | memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); | 4128 | memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); |
4129 | extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); | 4129 | extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); |
4130 | extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, | 4130 | extfat_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, |
4131 | extfat_cmd.size, &extfat_cmd.dma, | 4131 | extfat_cmd.size, &extfat_cmd.dma, |
4132 | GFP_ATOMIC); | 4132 | GFP_ATOMIC); |
4133 | 4133 | ||
4134 | if (!extfat_cmd.va) { | 4134 | if (!extfat_cmd.va) { |
4135 | dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n", | 4135 | dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n", |
@@ -4354,8 +4354,8 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res) | |||
4354 | 4354 | ||
4355 | memset(&cmd, 0, sizeof(struct be_dma_mem)); | 4355 | memset(&cmd, 0, sizeof(struct be_dma_mem)); |
4356 | cmd.size = sizeof(struct be_cmd_resp_get_func_config); | 4356 | cmd.size = sizeof(struct be_cmd_resp_get_func_config); |
4357 | cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, | 4357 | cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, |
4358 | GFP_ATOMIC); | 4358 | GFP_ATOMIC); |
4359 | if (!cmd.va) { | 4359 | if (!cmd.va) { |
4360 | dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); | 4360 | dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); |
4361 | status = -ENOMEM; | 4361 | status = -ENOMEM; |
@@ -4452,8 +4452,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter, | |||
4452 | 4452 | ||
4453 | memset(&cmd, 0, sizeof(struct be_dma_mem)); | 4453 | memset(&cmd, 0, sizeof(struct be_dma_mem)); |
4454 | cmd.size = sizeof(struct be_cmd_resp_get_profile_config); | 4454 | cmd.size = sizeof(struct be_cmd_resp_get_profile_config); |
4455 | cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, | 4455 | cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, |
4456 | GFP_ATOMIC); | 4456 | GFP_ATOMIC); |
4457 | if (!cmd.va) | 4457 | if (!cmd.va) |
4458 | return -ENOMEM; | 4458 | return -ENOMEM; |
4459 | 4459 | ||
@@ -4539,8 +4539,8 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc, | |||
4539 | 4539 | ||
4540 | memset(&cmd, 0, sizeof(struct be_dma_mem)); | 4540 | memset(&cmd, 0, sizeof(struct be_dma_mem)); |
4541 | cmd.size = sizeof(struct be_cmd_req_set_profile_config); | 4541 | cmd.size = sizeof(struct be_cmd_req_set_profile_config); |
4542 | cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, | 4542 | cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, |
4543 | GFP_ATOMIC); | 4543 | GFP_ATOMIC); |
4544 | if (!cmd.va) | 4544 | if (!cmd.va) |
4545 | return -ENOMEM; | 4545 | return -ENOMEM; |
4546 | 4546 | ||
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index 3f6749fc889f..4c218341c51b 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c | |||
@@ -274,8 +274,8 @@ static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name, | |||
274 | int status = 0; | 274 | int status = 0; |
275 | 275 | ||
276 | read_cmd.size = LANCER_READ_FILE_CHUNK; | 276 | read_cmd.size = LANCER_READ_FILE_CHUNK; |
277 | read_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, read_cmd.size, | 277 | read_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, read_cmd.size, |
278 | &read_cmd.dma, GFP_ATOMIC); | 278 | &read_cmd.dma, GFP_ATOMIC); |
279 | 279 | ||
280 | if (!read_cmd.va) { | 280 | if (!read_cmd.va) { |
281 | dev_err(&adapter->pdev->dev, | 281 | dev_err(&adapter->pdev->dev, |
@@ -815,7 +815,7 @@ static int be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) | |||
815 | } | 815 | } |
816 | 816 | ||
817 | cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config); | 817 | cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config); |
818 | cmd.va = dma_zalloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL); | 818 | cmd.va = dma_alloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL); |
819 | if (!cmd.va) | 819 | if (!cmd.va) |
820 | return -ENOMEM; | 820 | return -ENOMEM; |
821 | 821 | ||
@@ -851,9 +851,9 @@ static int be_test_ddr_dma(struct be_adapter *adapter) | |||
851 | }; | 851 | }; |
852 | 852 | ||
853 | ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test); | 853 | ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test); |
854 | ddrdma_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, | 854 | ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, |
855 | ddrdma_cmd.size, &ddrdma_cmd.dma, | 855 | ddrdma_cmd.size, &ddrdma_cmd.dma, |
856 | GFP_KERNEL); | 856 | GFP_KERNEL); |
857 | if (!ddrdma_cmd.va) | 857 | if (!ddrdma_cmd.va) |
858 | return -ENOMEM; | 858 | return -ENOMEM; |
859 | 859 | ||
@@ -1014,9 +1014,9 @@ static int be_read_eeprom(struct net_device *netdev, | |||
1014 | 1014 | ||
1015 | memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem)); | 1015 | memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem)); |
1016 | eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read); | 1016 | eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read); |
1017 | eeprom_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, | 1017 | eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, |
1018 | eeprom_cmd.size, &eeprom_cmd.dma, | 1018 | eeprom_cmd.size, &eeprom_cmd.dma, |
1019 | GFP_KERNEL); | 1019 | GFP_KERNEL); |
1020 | 1020 | ||
1021 | if (!eeprom_cmd.va) | 1021 | if (!eeprom_cmd.va) |
1022 | return -ENOMEM; | 1022 | return -ENOMEM; |
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 852f5bfe5f6d..d5026909dec5 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |||
@@ -167,8 +167,8 @@ static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q, | |||
167 | q->len = len; | 167 | q->len = len; |
168 | q->entry_size = entry_size; | 168 | q->entry_size = entry_size; |
169 | mem->size = len * entry_size; | 169 | mem->size = len * entry_size; |
170 | mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma, | 170 | mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, |
171 | GFP_KERNEL); | 171 | &mem->dma, GFP_KERNEL); |
172 | if (!mem->va) | 172 | if (!mem->va) |
173 | return -ENOMEM; | 173 | return -ENOMEM; |
174 | return 0; | 174 | return 0; |
@@ -5766,9 +5766,9 @@ static int be_drv_init(struct be_adapter *adapter) | |||
5766 | int status = 0; | 5766 | int status = 0; |
5767 | 5767 | ||
5768 | mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; | 5768 | mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; |
5769 | mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size, | 5769 | mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size, |
5770 | &mbox_mem_alloc->dma, | 5770 | &mbox_mem_alloc->dma, |
5771 | GFP_KERNEL); | 5771 | GFP_KERNEL); |
5772 | if (!mbox_mem_alloc->va) | 5772 | if (!mbox_mem_alloc->va) |
5773 | return -ENOMEM; | 5773 | return -ENOMEM; |
5774 | 5774 | ||
@@ -5777,8 +5777,8 @@ static int be_drv_init(struct be_adapter *adapter) | |||
5777 | mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); | 5777 | mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); |
5778 | 5778 | ||
5779 | rx_filter->size = sizeof(struct be_cmd_req_rx_filter); | 5779 | rx_filter->size = sizeof(struct be_cmd_req_rx_filter); |
5780 | rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size, | 5780 | rx_filter->va = dma_alloc_coherent(dev, rx_filter->size, |
5781 | &rx_filter->dma, GFP_KERNEL); | 5781 | &rx_filter->dma, GFP_KERNEL); |
5782 | if (!rx_filter->va) { | 5782 | if (!rx_filter->va) { |
5783 | status = -ENOMEM; | 5783 | status = -ENOMEM; |
5784 | goto free_mbox; | 5784 | goto free_mbox; |
@@ -5792,8 +5792,8 @@ static int be_drv_init(struct be_adapter *adapter) | |||
5792 | stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1); | 5792 | stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1); |
5793 | else | 5793 | else |
5794 | stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2); | 5794 | stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2); |
5795 | stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size, | 5795 | stats_cmd->va = dma_alloc_coherent(dev, stats_cmd->size, |
5796 | &stats_cmd->dma, GFP_KERNEL); | 5796 | &stats_cmd->dma, GFP_KERNEL); |
5797 | if (!stats_cmd->va) { | 5797 | if (!stats_cmd->va) { |
5798 | status = -ENOMEM; | 5798 | status = -ENOMEM; |
5799 | goto free_rx_filter; | 5799 | goto free_rx_filter; |
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index 4d673225ed3e..3e5e97186fc4 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c | |||
@@ -935,16 +935,14 @@ static int ftgmac100_alloc_rings(struct ftgmac100 *priv) | |||
935 | return -ENOMEM; | 935 | return -ENOMEM; |
936 | 936 | ||
937 | /* Allocate descriptors */ | 937 | /* Allocate descriptors */ |
938 | priv->rxdes = dma_zalloc_coherent(priv->dev, | 938 | priv->rxdes = dma_alloc_coherent(priv->dev, |
939 | MAX_RX_QUEUE_ENTRIES * | 939 | MAX_RX_QUEUE_ENTRIES * sizeof(struct ftgmac100_rxdes), |
940 | sizeof(struct ftgmac100_rxdes), | 940 | &priv->rxdes_dma, GFP_KERNEL); |
941 | &priv->rxdes_dma, GFP_KERNEL); | ||
942 | if (!priv->rxdes) | 941 | if (!priv->rxdes) |
943 | return -ENOMEM; | 942 | return -ENOMEM; |
944 | priv->txdes = dma_zalloc_coherent(priv->dev, | 943 | priv->txdes = dma_alloc_coherent(priv->dev, |
945 | MAX_TX_QUEUE_ENTRIES * | 944 | MAX_TX_QUEUE_ENTRIES * sizeof(struct ftgmac100_txdes), |
946 | sizeof(struct ftgmac100_txdes), | 945 | &priv->txdes_dma, GFP_KERNEL); |
947 | &priv->txdes_dma, GFP_KERNEL); | ||
948 | if (!priv->txdes) | 946 | if (!priv->txdes) |
949 | return -ENOMEM; | 947 | return -ENOMEM; |
950 | 948 | ||
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c index 084f24daf2b5..2a0e820526dc 100644 --- a/drivers/net/ethernet/faraday/ftmac100.c +++ b/drivers/net/ethernet/faraday/ftmac100.c | |||
@@ -734,10 +734,9 @@ static int ftmac100_alloc_buffers(struct ftmac100 *priv) | |||
734 | { | 734 | { |
735 | int i; | 735 | int i; |
736 | 736 | ||
737 | priv->descs = dma_zalloc_coherent(priv->dev, | 737 | priv->descs = dma_alloc_coherent(priv->dev, |
738 | sizeof(struct ftmac100_descs), | 738 | sizeof(struct ftmac100_descs), |
739 | &priv->descs_dma_addr, | 739 | &priv->descs_dma_addr, GFP_KERNEL); |
740 | GFP_KERNEL); | ||
741 | if (!priv->descs) | 740 | if (!priv->descs) |
742 | return -ENOMEM; | 741 | return -ENOMEM; |
743 | 742 | ||
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c index 471805ea363b..e5d853b7b454 100644 --- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c +++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c | |||
@@ -1006,8 +1006,8 @@ static int hix5hd2_init_hw_desc_queue(struct hix5hd2_priv *priv) | |||
1006 | 1006 | ||
1007 | for (i = 0; i < QUEUE_NUMS; i++) { | 1007 | for (i = 0; i < QUEUE_NUMS; i++) { |
1008 | size = priv->pool[i].count * sizeof(struct hix5hd2_desc); | 1008 | size = priv->pool[i].count * sizeof(struct hix5hd2_desc); |
1009 | virt_addr = dma_zalloc_coherent(dev, size, &phys_addr, | 1009 | virt_addr = dma_alloc_coherent(dev, size, &phys_addr, |
1010 | GFP_KERNEL); | 1010 | GFP_KERNEL); |
1011 | if (virt_addr == NULL) | 1011 | if (virt_addr == NULL) |
1012 | goto error_free_pool; | 1012 | goto error_free_pool; |
1013 | 1013 | ||
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 07cd58798083..1bf7a5f116a0 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | |||
@@ -2041,9 +2041,8 @@ static int hns3_alloc_desc(struct hns3_enet_ring *ring) | |||
2041 | { | 2041 | { |
2042 | int size = ring->desc_num * sizeof(ring->desc[0]); | 2042 | int size = ring->desc_num * sizeof(ring->desc[0]); |
2043 | 2043 | ||
2044 | ring->desc = dma_zalloc_coherent(ring_to_dev(ring), size, | 2044 | ring->desc = dma_alloc_coherent(ring_to_dev(ring), size, |
2045 | &ring->desc_dma_addr, | 2045 | &ring->desc_dma_addr, GFP_KERNEL); |
2046 | GFP_KERNEL); | ||
2047 | if (!ring->desc) | 2046 | if (!ring->desc) |
2048 | return -ENOMEM; | 2047 | return -ENOMEM; |
2049 | 2048 | ||
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index 8af0cef5609b..e483a6e730e6 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c | |||
@@ -39,9 +39,8 @@ static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring) | |||
39 | { | 39 | { |
40 | int size = ring->desc_num * sizeof(struct hclge_desc); | 40 | int size = ring->desc_num * sizeof(struct hclge_desc); |
41 | 41 | ||
42 | ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring), | 42 | ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size, |
43 | size, &ring->desc_dma_addr, | 43 | &ring->desc_dma_addr, GFP_KERNEL); |
44 | GFP_KERNEL); | ||
45 | if (!ring->desc) | 44 | if (!ring->desc) |
46 | return -ENOMEM; | 45 | return -ENOMEM; |
47 | 46 | ||
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c index d5765c8cf3a3..4e78e8812a04 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c | |||
@@ -115,9 +115,8 @@ static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring) | |||
115 | { | 115 | { |
116 | int size = ring->desc_num * sizeof(struct hclgevf_desc); | 116 | int size = ring->desc_num * sizeof(struct hclgevf_desc); |
117 | 117 | ||
118 | ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring), | 118 | ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size, |
119 | size, &ring->desc_dma_addr, | 119 | &ring->desc_dma_addr, GFP_KERNEL); |
120 | GFP_KERNEL); | ||
121 | if (!ring->desc) | 120 | if (!ring->desc) |
122 | return -ENOMEM; | 121 | return -ENOMEM; |
123 | 122 | ||
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c index c40603a183df..b4fefb4c3064 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c | |||
@@ -613,8 +613,8 @@ static int alloc_cmd_buf(struct hinic_api_cmd_chain *chain, | |||
613 | u8 *cmd_vaddr; | 613 | u8 *cmd_vaddr; |
614 | int err = 0; | 614 | int err = 0; |
615 | 615 | ||
616 | cmd_vaddr = dma_zalloc_coherent(&pdev->dev, API_CMD_BUF_SIZE, | 616 | cmd_vaddr = dma_alloc_coherent(&pdev->dev, API_CMD_BUF_SIZE, |
617 | &cmd_paddr, GFP_KERNEL); | 617 | &cmd_paddr, GFP_KERNEL); |
618 | if (!cmd_vaddr) { | 618 | if (!cmd_vaddr) { |
619 | dev_err(&pdev->dev, "Failed to allocate API CMD DMA memory\n"); | 619 | dev_err(&pdev->dev, "Failed to allocate API CMD DMA memory\n"); |
620 | return -ENOMEM; | 620 | return -ENOMEM; |
@@ -663,8 +663,8 @@ static int api_cmd_create_cell(struct hinic_api_cmd_chain *chain, | |||
663 | dma_addr_t node_paddr; | 663 | dma_addr_t node_paddr; |
664 | int err; | 664 | int err; |
665 | 665 | ||
666 | node = dma_zalloc_coherent(&pdev->dev, chain->cell_size, | 666 | node = dma_alloc_coherent(&pdev->dev, chain->cell_size, &node_paddr, |
667 | &node_paddr, GFP_KERNEL); | 667 | GFP_KERNEL); |
668 | if (!node) { | 668 | if (!node) { |
669 | dev_err(&pdev->dev, "Failed to allocate dma API CMD cell\n"); | 669 | dev_err(&pdev->dev, "Failed to allocate dma API CMD cell\n"); |
670 | return -ENOMEM; | 670 | return -ENOMEM; |
@@ -821,10 +821,10 @@ static int api_chain_init(struct hinic_api_cmd_chain *chain, | |||
821 | if (!chain->cell_ctxt) | 821 | if (!chain->cell_ctxt) |
822 | return -ENOMEM; | 822 | return -ENOMEM; |
823 | 823 | ||
824 | chain->wb_status = dma_zalloc_coherent(&pdev->dev, | 824 | chain->wb_status = dma_alloc_coherent(&pdev->dev, |
825 | sizeof(*chain->wb_status), | 825 | sizeof(*chain->wb_status), |
826 | &chain->wb_status_paddr, | 826 | &chain->wb_status_paddr, |
827 | GFP_KERNEL); | 827 | GFP_KERNEL); |
828 | if (!chain->wb_status) { | 828 | if (!chain->wb_status) { |
829 | dev_err(&pdev->dev, "Failed to allocate DMA wb status\n"); | 829 | dev_err(&pdev->dev, "Failed to allocate DMA wb status\n"); |
830 | return -ENOMEM; | 830 | return -ENOMEM; |
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c index 7cb8b9b94726..683e67515016 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c | |||
@@ -593,10 +593,10 @@ static int alloc_eq_pages(struct hinic_eq *eq) | |||
593 | } | 593 | } |
594 | 594 | ||
595 | for (pg = 0; pg < eq->num_pages; pg++) { | 595 | for (pg = 0; pg < eq->num_pages; pg++) { |
596 | eq->virt_addr[pg] = dma_zalloc_coherent(&pdev->dev, | 596 | eq->virt_addr[pg] = dma_alloc_coherent(&pdev->dev, |
597 | eq->page_size, | 597 | eq->page_size, |
598 | &eq->dma_addr[pg], | 598 | &eq->dma_addr[pg], |
599 | GFP_KERNEL); | 599 | GFP_KERNEL); |
600 | if (!eq->virt_addr[pg]) { | 600 | if (!eq->virt_addr[pg]) { |
601 | err = -ENOMEM; | 601 | err = -ENOMEM; |
602 | goto err_dma_alloc; | 602 | goto err_dma_alloc; |
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c index 8e5897669a3a..a322a22d9357 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c | |||
@@ -355,9 +355,9 @@ int hinic_io_create_qps(struct hinic_func_to_io *func_to_io, | |||
355 | goto err_sq_db; | 355 | goto err_sq_db; |
356 | } | 356 | } |
357 | 357 | ||
358 | ci_addr_base = dma_zalloc_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps), | 358 | ci_addr_base = dma_alloc_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps), |
359 | &func_to_io->ci_dma_base, | 359 | &func_to_io->ci_dma_base, |
360 | GFP_KERNEL); | 360 | GFP_KERNEL); |
361 | if (!ci_addr_base) { | 361 | if (!ci_addr_base) { |
362 | dev_err(&pdev->dev, "Failed to allocate CI area\n"); | 362 | dev_err(&pdev->dev, "Failed to allocate CI area\n"); |
363 | err = -ENOMEM; | 363 | err = -ENOMEM; |
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c index bbf9bdd0ee3e..d62cf509646a 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c | |||
@@ -336,9 +336,9 @@ static int alloc_rq_cqe(struct hinic_rq *rq) | |||
336 | goto err_cqe_dma_arr_alloc; | 336 | goto err_cqe_dma_arr_alloc; |
337 | 337 | ||
338 | for (i = 0; i < wq->q_depth; i++) { | 338 | for (i = 0; i < wq->q_depth; i++) { |
339 | rq->cqe[i] = dma_zalloc_coherent(&pdev->dev, | 339 | rq->cqe[i] = dma_alloc_coherent(&pdev->dev, |
340 | sizeof(*rq->cqe[i]), | 340 | sizeof(*rq->cqe[i]), |
341 | &rq->cqe_dma[i], GFP_KERNEL); | 341 | &rq->cqe_dma[i], GFP_KERNEL); |
342 | if (!rq->cqe[i]) | 342 | if (!rq->cqe[i]) |
343 | goto err_cqe_alloc; | 343 | goto err_cqe_alloc; |
344 | } | 344 | } |
@@ -415,8 +415,8 @@ int hinic_init_rq(struct hinic_rq *rq, struct hinic_hwif *hwif, | |||
415 | 415 | ||
416 | /* HW requirements: Must be at least 32 bit */ | 416 | /* HW requirements: Must be at least 32 bit */ |
417 | pi_size = ALIGN(sizeof(*rq->pi_virt_addr), sizeof(u32)); | 417 | pi_size = ALIGN(sizeof(*rq->pi_virt_addr), sizeof(u32)); |
418 | rq->pi_virt_addr = dma_zalloc_coherent(&pdev->dev, pi_size, | 418 | rq->pi_virt_addr = dma_alloc_coherent(&pdev->dev, pi_size, |
419 | &rq->pi_dma_addr, GFP_KERNEL); | 419 | &rq->pi_dma_addr, GFP_KERNEL); |
420 | if (!rq->pi_virt_addr) { | 420 | if (!rq->pi_virt_addr) { |
421 | dev_err(&pdev->dev, "Failed to allocate PI address\n"); | 421 | dev_err(&pdev->dev, "Failed to allocate PI address\n"); |
422 | err = -ENOMEM; | 422 | err = -ENOMEM; |
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c index 1dfa7eb05c10..cb66e7024659 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c | |||
@@ -114,8 +114,8 @@ static int queue_alloc_page(struct hinic_hwif *hwif, u64 **vaddr, u64 *paddr, | |||
114 | struct pci_dev *pdev = hwif->pdev; | 114 | struct pci_dev *pdev = hwif->pdev; |
115 | dma_addr_t dma_addr; | 115 | dma_addr_t dma_addr; |
116 | 116 | ||
117 | *vaddr = dma_zalloc_coherent(&pdev->dev, page_sz, &dma_addr, | 117 | *vaddr = dma_alloc_coherent(&pdev->dev, page_sz, &dma_addr, |
118 | GFP_KERNEL); | 118 | GFP_KERNEL); |
119 | if (!*vaddr) { | 119 | if (!*vaddr) { |
120 | dev_err(&pdev->dev, "Failed to allocate dma for wqs page\n"); | 120 | dev_err(&pdev->dev, "Failed to allocate dma for wqs page\n"); |
121 | return -ENOMEM; | 121 | return -ENOMEM; |
@@ -482,8 +482,8 @@ static int alloc_wq_pages(struct hinic_wq *wq, struct hinic_hwif *hwif, | |||
482 | u64 *paddr = &wq->block_vaddr[i]; | 482 | u64 *paddr = &wq->block_vaddr[i]; |
483 | dma_addr_t dma_addr; | 483 | dma_addr_t dma_addr; |
484 | 484 | ||
485 | *vaddr = dma_zalloc_coherent(&pdev->dev, wq->wq_page_size, | 485 | *vaddr = dma_alloc_coherent(&pdev->dev, wq->wq_page_size, |
486 | &dma_addr, GFP_KERNEL); | 486 | &dma_addr, GFP_KERNEL); |
487 | if (!*vaddr) { | 487 | if (!*vaddr) { |
488 | dev_err(&pdev->dev, "Failed to allocate wq page\n"); | 488 | dev_err(&pdev->dev, "Failed to allocate wq page\n"); |
489 | goto err_alloc_wq_pages; | 489 | goto err_alloc_wq_pages; |
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c index fff09dcf9e34..787d5aca5278 100644 --- a/drivers/net/ethernet/ibm/emac/mal.c +++ b/drivers/net/ethernet/ibm/emac/mal.c | |||
@@ -636,8 +636,8 @@ static int mal_probe(struct platform_device *ofdev) | |||
636 | bd_size = sizeof(struct mal_descriptor) * | 636 | bd_size = sizeof(struct mal_descriptor) * |
637 | (NUM_TX_BUFF * mal->num_tx_chans + | 637 | (NUM_TX_BUFF * mal->num_tx_chans + |
638 | NUM_RX_BUFF * mal->num_rx_chans); | 638 | NUM_RX_BUFF * mal->num_rx_chans); |
639 | mal->bd_virt = dma_zalloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma, | 639 | mal->bd_virt = dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma, |
640 | GFP_KERNEL); | 640 | GFP_KERNEL); |
641 | if (mal->bd_virt == NULL) { | 641 | if (mal->bd_virt == NULL) { |
642 | err = -ENOMEM; | 642 | err = -ENOMEM; |
643 | goto fail_unmap; | 643 | goto fail_unmap; |
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c index 2569a168334c..a41008523c98 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c +++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c | |||
@@ -993,8 +993,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) | |||
993 | 993 | ||
994 | txdr->size = txdr->count * sizeof(struct e1000_tx_desc); | 994 | txdr->size = txdr->count * sizeof(struct e1000_tx_desc); |
995 | txdr->size = ALIGN(txdr->size, 4096); | 995 | txdr->size = ALIGN(txdr->size, 4096); |
996 | txdr->desc = dma_zalloc_coherent(&pdev->dev, txdr->size, &txdr->dma, | 996 | txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, |
997 | GFP_KERNEL); | 997 | GFP_KERNEL); |
998 | if (!txdr->desc) { | 998 | if (!txdr->desc) { |
999 | ret_val = 2; | 999 | ret_val = 2; |
1000 | goto err_nomem; | 1000 | goto err_nomem; |
@@ -1051,8 +1051,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) | |||
1051 | } | 1051 | } |
1052 | 1052 | ||
1053 | rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc); | 1053 | rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc); |
1054 | rxdr->desc = dma_zalloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, | 1054 | rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, |
1055 | GFP_KERNEL); | 1055 | GFP_KERNEL); |
1056 | if (!rxdr->desc) { | 1056 | if (!rxdr->desc) { |
1057 | ret_val = 6; | 1057 | ret_val = 6; |
1058 | goto err_nomem; | 1058 | goto err_nomem; |
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 308c006cb41d..189f231075c2 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c | |||
@@ -2305,8 +2305,8 @@ static int e1000_alloc_ring_dma(struct e1000_adapter *adapter, | |||
2305 | { | 2305 | { |
2306 | struct pci_dev *pdev = adapter->pdev; | 2306 | struct pci_dev *pdev = adapter->pdev; |
2307 | 2307 | ||
2308 | ring->desc = dma_zalloc_coherent(&pdev->dev, ring->size, &ring->dma, | 2308 | ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma, |
2309 | GFP_KERNEL); | 2309 | GFP_KERNEL); |
2310 | if (!ring->desc) | 2310 | if (!ring->desc) |
2311 | return -ENOMEM; | 2311 | return -ENOMEM; |
2312 | 2312 | ||
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 4d40878e395a..f52e2c46e6a7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c | |||
@@ -109,8 +109,8 @@ int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem, | |||
109 | struct i40e_pf *pf = (struct i40e_pf *)hw->back; | 109 | struct i40e_pf *pf = (struct i40e_pf *)hw->back; |
110 | 110 | ||
111 | mem->size = ALIGN(size, alignment); | 111 | mem->size = ALIGN(size, alignment); |
112 | mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size, | 112 | mem->va = dma_alloc_coherent(&pf->pdev->dev, mem->size, &mem->pa, |
113 | &mem->pa, GFP_KERNEL); | 113 | GFP_KERNEL); |
114 | if (!mem->va) | 114 | if (!mem->va) |
115 | return -ENOMEM; | 115 | return -ENOMEM; |
116 | 116 | ||
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index 1d4d1686909a..e5ac2d3fd816 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c | |||
@@ -680,8 +680,8 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter) | |||
680 | txdr->size = txdr->count * sizeof(struct ixgb_tx_desc); | 680 | txdr->size = txdr->count * sizeof(struct ixgb_tx_desc); |
681 | txdr->size = ALIGN(txdr->size, 4096); | 681 | txdr->size = ALIGN(txdr->size, 4096); |
682 | 682 | ||
683 | txdr->desc = dma_zalloc_coherent(&pdev->dev, txdr->size, &txdr->dma, | 683 | txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, |
684 | GFP_KERNEL); | 684 | GFP_KERNEL); |
685 | if (!txdr->desc) { | 685 | if (!txdr->desc) { |
686 | vfree(txdr->buffer_info); | 686 | vfree(txdr->buffer_info); |
687 | return -ENOMEM; | 687 | return -ENOMEM; |
@@ -763,8 +763,8 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter) | |||
763 | rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc); | 763 | rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc); |
764 | rxdr->size = ALIGN(rxdr->size, 4096); | 764 | rxdr->size = ALIGN(rxdr->size, 4096); |
765 | 765 | ||
766 | rxdr->desc = dma_zalloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, | 766 | rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, |
767 | GFP_KERNEL); | 767 | GFP_KERNEL); |
768 | 768 | ||
769 | if (!rxdr->desc) { | 769 | if (!rxdr->desc) { |
770 | vfree(rxdr->buffer_info); | 770 | vfree(rxdr->buffer_info); |
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index e0875476a780..16066c2d5b3a 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c | |||
@@ -2044,9 +2044,9 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev, | |||
2044 | u32 txq_dma; | 2044 | u32 txq_dma; |
2045 | 2045 | ||
2046 | /* Allocate memory for TX descriptors */ | 2046 | /* Allocate memory for TX descriptors */ |
2047 | aggr_txq->descs = dma_zalloc_coherent(&pdev->dev, | 2047 | aggr_txq->descs = dma_alloc_coherent(&pdev->dev, |
2048 | MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, | 2048 | MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, |
2049 | &aggr_txq->descs_dma, GFP_KERNEL); | 2049 | &aggr_txq->descs_dma, GFP_KERNEL); |
2050 | if (!aggr_txq->descs) | 2050 | if (!aggr_txq->descs) |
2051 | return -ENOMEM; | 2051 | return -ENOMEM; |
2052 | 2052 | ||
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index 0bd4351b2a49..f8a6d6e3cb7a 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c | |||
@@ -557,9 +557,9 @@ static int init_hash_table(struct pxa168_eth_private *pep) | |||
557 | * table is full. | 557 | * table is full. |
558 | */ | 558 | */ |
559 | if (!pep->htpr) { | 559 | if (!pep->htpr) { |
560 | pep->htpr = dma_zalloc_coherent(pep->dev->dev.parent, | 560 | pep->htpr = dma_alloc_coherent(pep->dev->dev.parent, |
561 | HASH_ADDR_TABLE_SIZE, | 561 | HASH_ADDR_TABLE_SIZE, |
562 | &pep->htpr_dma, GFP_KERNEL); | 562 | &pep->htpr_dma, GFP_KERNEL); |
563 | if (!pep->htpr) | 563 | if (!pep->htpr) |
564 | return -ENOMEM; | 564 | return -ENOMEM; |
565 | } else { | 565 | } else { |
@@ -1044,9 +1044,9 @@ static int rxq_init(struct net_device *dev) | |||
1044 | pep->rx_desc_count = 0; | 1044 | pep->rx_desc_count = 0; |
1045 | size = pep->rx_ring_size * sizeof(struct rx_desc); | 1045 | size = pep->rx_ring_size * sizeof(struct rx_desc); |
1046 | pep->rx_desc_area_size = size; | 1046 | pep->rx_desc_area_size = size; |
1047 | pep->p_rx_desc_area = dma_zalloc_coherent(pep->dev->dev.parent, size, | 1047 | pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size, |
1048 | &pep->rx_desc_dma, | 1048 | &pep->rx_desc_dma, |
1049 | GFP_KERNEL); | 1049 | GFP_KERNEL); |
1050 | if (!pep->p_rx_desc_area) | 1050 | if (!pep->p_rx_desc_area) |
1051 | goto out; | 1051 | goto out; |
1052 | 1052 | ||
@@ -1103,9 +1103,9 @@ static int txq_init(struct net_device *dev) | |||
1103 | pep->tx_desc_count = 0; | 1103 | pep->tx_desc_count = 0; |
1104 | size = pep->tx_ring_size * sizeof(struct tx_desc); | 1104 | size = pep->tx_ring_size * sizeof(struct tx_desc); |
1105 | pep->tx_desc_area_size = size; | 1105 | pep->tx_desc_area_size = size; |
1106 | pep->p_tx_desc_area = dma_zalloc_coherent(pep->dev->dev.parent, size, | 1106 | pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size, |
1107 | &pep->tx_desc_dma, | 1107 | &pep->tx_desc_dma, |
1108 | GFP_KERNEL); | 1108 | GFP_KERNEL); |
1109 | if (!pep->p_tx_desc_area) | 1109 | if (!pep->p_tx_desc_area) |
1110 | goto out; | 1110 | goto out; |
1111 | /* Initialize the next_desc_ptr links in the Tx descriptors ring */ | 1111 | /* Initialize the next_desc_ptr links in the Tx descriptors ring */ |
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 399f565dd85a..fe9653fa8aea 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c | |||
@@ -598,10 +598,10 @@ static int mtk_init_fq_dma(struct mtk_eth *eth) | |||
598 | dma_addr_t dma_addr; | 598 | dma_addr_t dma_addr; |
599 | int i; | 599 | int i; |
600 | 600 | ||
601 | eth->scratch_ring = dma_zalloc_coherent(eth->dev, | 601 | eth->scratch_ring = dma_alloc_coherent(eth->dev, |
602 | cnt * sizeof(struct mtk_tx_dma), | 602 | cnt * sizeof(struct mtk_tx_dma), |
603 | ð->phy_scratch_ring, | 603 | ð->phy_scratch_ring, |
604 | GFP_ATOMIC); | 604 | GFP_ATOMIC); |
605 | if (unlikely(!eth->scratch_ring)) | 605 | if (unlikely(!eth->scratch_ring)) |
606 | return -ENOMEM; | 606 | return -ENOMEM; |
607 | 607 | ||
@@ -1213,8 +1213,8 @@ static int mtk_tx_alloc(struct mtk_eth *eth) | |||
1213 | if (!ring->buf) | 1213 | if (!ring->buf) |
1214 | goto no_tx_mem; | 1214 | goto no_tx_mem; |
1215 | 1215 | ||
1216 | ring->dma = dma_zalloc_coherent(eth->dev, MTK_DMA_SIZE * sz, | 1216 | ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz, |
1217 | &ring->phys, GFP_ATOMIC); | 1217 | &ring->phys, GFP_ATOMIC); |
1218 | if (!ring->dma) | 1218 | if (!ring->dma) |
1219 | goto no_tx_mem; | 1219 | goto no_tx_mem; |
1220 | 1220 | ||
@@ -1310,9 +1310,9 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) | |||
1310 | return -ENOMEM; | 1310 | return -ENOMEM; |
1311 | } | 1311 | } |
1312 | 1312 | ||
1313 | ring->dma = dma_zalloc_coherent(eth->dev, | 1313 | ring->dma = dma_alloc_coherent(eth->dev, |
1314 | rx_dma_size * sizeof(*ring->dma), | 1314 | rx_dma_size * sizeof(*ring->dma), |
1315 | &ring->phys, GFP_ATOMIC); | 1315 | &ring->phys, GFP_ATOMIC); |
1316 | if (!ring->dma) | 1316 | if (!ring->dma) |
1317 | return -ENOMEM; | 1317 | return -ENOMEM; |
1318 | 1318 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c index 9af34e03892c..dbc483e4a2ef 100644 --- a/drivers/net/ethernet/mellanox/mlx4/alloc.c +++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c | |||
@@ -584,8 +584,8 @@ static int mlx4_buf_direct_alloc(struct mlx4_dev *dev, int size, | |||
584 | buf->npages = 1; | 584 | buf->npages = 1; |
585 | buf->page_shift = get_order(size) + PAGE_SHIFT; | 585 | buf->page_shift = get_order(size) + PAGE_SHIFT; |
586 | buf->direct.buf = | 586 | buf->direct.buf = |
587 | dma_zalloc_coherent(&dev->persist->pdev->dev, | 587 | dma_alloc_coherent(&dev->persist->pdev->dev, size, &t, |
588 | size, &t, GFP_KERNEL); | 588 | GFP_KERNEL); |
589 | if (!buf->direct.buf) | 589 | if (!buf->direct.buf) |
590 | return -ENOMEM; | 590 | return -ENOMEM; |
591 | 591 | ||
@@ -624,8 +624,8 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, | |||
624 | 624 | ||
625 | for (i = 0; i < buf->nbufs; ++i) { | 625 | for (i = 0; i < buf->nbufs; ++i) { |
626 | buf->page_list[i].buf = | 626 | buf->page_list[i].buf = |
627 | dma_zalloc_coherent(&dev->persist->pdev->dev, | 627 | dma_alloc_coherent(&dev->persist->pdev->dev, |
628 | PAGE_SIZE, &t, GFP_KERNEL); | 628 | PAGE_SIZE, &t, GFP_KERNEL); |
629 | if (!buf->page_list[i].buf) | 629 | if (!buf->page_list[i].buf) |
630 | goto err_free; | 630 | goto err_free; |
631 | 631 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c index 456f30007ad6..421b9c3c8bf7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c | |||
@@ -63,8 +63,8 @@ static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev, | |||
63 | mutex_lock(&priv->alloc_mutex); | 63 | mutex_lock(&priv->alloc_mutex); |
64 | original_node = dev_to_node(&dev->pdev->dev); | 64 | original_node = dev_to_node(&dev->pdev->dev); |
65 | set_dev_node(&dev->pdev->dev, node); | 65 | set_dev_node(&dev->pdev->dev, node); |
66 | cpu_handle = dma_zalloc_coherent(&dev->pdev->dev, size, | 66 | cpu_handle = dma_alloc_coherent(&dev->pdev->dev, size, dma_handle, |
67 | dma_handle, GFP_KERNEL); | 67 | GFP_KERNEL); |
68 | set_dev_node(&dev->pdev->dev, original_node); | 68 | set_dev_node(&dev->pdev->dev, original_node); |
69 | mutex_unlock(&priv->alloc_mutex); | 69 | mutex_unlock(&priv->alloc_mutex); |
70 | return cpu_handle; | 70 | return cpu_handle; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index d3125cdf69db..3e0fa8a8077b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c | |||
@@ -1789,8 +1789,8 @@ static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) | |||
1789 | { | 1789 | { |
1790 | struct device *ddev = &dev->pdev->dev; | 1790 | struct device *ddev = &dev->pdev->dev; |
1791 | 1791 | ||
1792 | cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, | 1792 | cmd->cmd_alloc_buf = dma_alloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, |
1793 | &cmd->alloc_dma, GFP_KERNEL); | 1793 | &cmd->alloc_dma, GFP_KERNEL); |
1794 | if (!cmd->cmd_alloc_buf) | 1794 | if (!cmd->cmd_alloc_buf) |
1795 | return -ENOMEM; | 1795 | return -ENOMEM; |
1796 | 1796 | ||
@@ -1804,9 +1804,9 @@ static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) | |||
1804 | 1804 | ||
1805 | dma_free_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf, | 1805 | dma_free_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf, |
1806 | cmd->alloc_dma); | 1806 | cmd->alloc_dma); |
1807 | cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, | 1807 | cmd->cmd_alloc_buf = dma_alloc_coherent(ddev, |
1808 | 2 * MLX5_ADAPTER_PAGE_SIZE - 1, | 1808 | 2 * MLX5_ADAPTER_PAGE_SIZE - 1, |
1809 | &cmd->alloc_dma, GFP_KERNEL); | 1809 | &cmd->alloc_dma, GFP_KERNEL); |
1810 | if (!cmd->cmd_alloc_buf) | 1810 | if (!cmd->cmd_alloc_buf) |
1811 | return -ENOMEM; | 1811 | return -ENOMEM; |
1812 | 1812 | ||
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index 5f384f73007d..19ce0e605096 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c | |||
@@ -3604,9 +3604,9 @@ static int myri10ge_alloc_slices(struct myri10ge_priv *mgp) | |||
3604 | for (i = 0; i < mgp->num_slices; i++) { | 3604 | for (i = 0; i < mgp->num_slices; i++) { |
3605 | ss = &mgp->ss[i]; | 3605 | ss = &mgp->ss[i]; |
3606 | bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry); | 3606 | bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry); |
3607 | ss->rx_done.entry = dma_zalloc_coherent(&pdev->dev, bytes, | 3607 | ss->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes, |
3608 | &ss->rx_done.bus, | 3608 | &ss->rx_done.bus, |
3609 | GFP_KERNEL); | 3609 | GFP_KERNEL); |
3610 | if (ss->rx_done.entry == NULL) | 3610 | if (ss->rx_done.entry == NULL) |
3611 | goto abort; | 3611 | goto abort; |
3612 | bytes = sizeof(*ss->fw_stats); | 3612 | bytes = sizeof(*ss->fw_stats); |
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index e97636d2e6ee..7d2d4241498f 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c | |||
@@ -2170,9 +2170,9 @@ nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring) | |||
2170 | tx_ring->cnt = dp->txd_cnt; | 2170 | tx_ring->cnt = dp->txd_cnt; |
2171 | 2171 | ||
2172 | tx_ring->size = array_size(tx_ring->cnt, sizeof(*tx_ring->txds)); | 2172 | tx_ring->size = array_size(tx_ring->cnt, sizeof(*tx_ring->txds)); |
2173 | tx_ring->txds = dma_zalloc_coherent(dp->dev, tx_ring->size, | 2173 | tx_ring->txds = dma_alloc_coherent(dp->dev, tx_ring->size, |
2174 | &tx_ring->dma, | 2174 | &tx_ring->dma, |
2175 | GFP_KERNEL | __GFP_NOWARN); | 2175 | GFP_KERNEL | __GFP_NOWARN); |
2176 | if (!tx_ring->txds) { | 2176 | if (!tx_ring->txds) { |
2177 | netdev_warn(dp->netdev, "failed to allocate TX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n", | 2177 | netdev_warn(dp->netdev, "failed to allocate TX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n", |
2178 | tx_ring->cnt); | 2178 | tx_ring->cnt); |
@@ -2328,9 +2328,9 @@ nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring) | |||
2328 | 2328 | ||
2329 | rx_ring->cnt = dp->rxd_cnt; | 2329 | rx_ring->cnt = dp->rxd_cnt; |
2330 | rx_ring->size = array_size(rx_ring->cnt, sizeof(*rx_ring->rxds)); | 2330 | rx_ring->size = array_size(rx_ring->cnt, sizeof(*rx_ring->rxds)); |
2331 | rx_ring->rxds = dma_zalloc_coherent(dp->dev, rx_ring->size, | 2331 | rx_ring->rxds = dma_alloc_coherent(dp->dev, rx_ring->size, |
2332 | &rx_ring->dma, | 2332 | &rx_ring->dma, |
2333 | GFP_KERNEL | __GFP_NOWARN); | 2333 | GFP_KERNEL | __GFP_NOWARN); |
2334 | if (!rx_ring->rxds) { | 2334 | if (!rx_ring->rxds) { |
2335 | netdev_warn(dp->netdev, "failed to allocate RX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n", | 2335 | netdev_warn(dp->netdev, "failed to allocate RX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n", |
2336 | rx_ring->cnt); | 2336 | rx_ring->cnt); |
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c index 0611f2335b4a..1e408d1a9b5f 100644 --- a/drivers/net/ethernet/ni/nixge.c +++ b/drivers/net/ethernet/ni/nixge.c | |||
@@ -287,9 +287,9 @@ static int nixge_hw_dma_bd_init(struct net_device *ndev) | |||
287 | priv->rx_bd_ci = 0; | 287 | priv->rx_bd_ci = 0; |
288 | 288 | ||
289 | /* Allocate the Tx and Rx buffer descriptors. */ | 289 | /* Allocate the Tx and Rx buffer descriptors. */ |
290 | priv->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent, | 290 | priv->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, |
291 | sizeof(*priv->tx_bd_v) * TX_BD_NUM, | 291 | sizeof(*priv->tx_bd_v) * TX_BD_NUM, |
292 | &priv->tx_bd_p, GFP_KERNEL); | 292 | &priv->tx_bd_p, GFP_KERNEL); |
293 | if (!priv->tx_bd_v) | 293 | if (!priv->tx_bd_v) |
294 | goto out; | 294 | goto out; |
295 | 295 | ||
@@ -299,9 +299,9 @@ static int nixge_hw_dma_bd_init(struct net_device *ndev) | |||
299 | if (!priv->tx_skb) | 299 | if (!priv->tx_skb) |
300 | goto out; | 300 | goto out; |
301 | 301 | ||
302 | priv->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent, | 302 | priv->rx_bd_v = dma_alloc_coherent(ndev->dev.parent, |
303 | sizeof(*priv->rx_bd_v) * RX_BD_NUM, | 303 | sizeof(*priv->rx_bd_v) * RX_BD_NUM, |
304 | &priv->rx_bd_p, GFP_KERNEL); | 304 | &priv->rx_bd_p, GFP_KERNEL); |
305 | if (!priv->rx_bd_v) | 305 | if (!priv->rx_bd_v) |
306 | goto out; | 306 | goto out; |
307 | 307 | ||
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 43c0c10dfeb7..552d930e3940 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c | |||
@@ -1440,8 +1440,8 @@ pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter, | |||
1440 | 1440 | ||
1441 | size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY; | 1441 | size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY; |
1442 | rx_ring->rx_buff_pool = | 1442 | rx_ring->rx_buff_pool = |
1443 | dma_zalloc_coherent(&pdev->dev, size, | 1443 | dma_alloc_coherent(&pdev->dev, size, |
1444 | &rx_ring->rx_buff_pool_logic, GFP_KERNEL); | 1444 | &rx_ring->rx_buff_pool_logic, GFP_KERNEL); |
1445 | if (!rx_ring->rx_buff_pool) | 1445 | if (!rx_ring->rx_buff_pool) |
1446 | return -ENOMEM; | 1446 | return -ENOMEM; |
1447 | 1447 | ||
@@ -1755,8 +1755,8 @@ int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter, | |||
1755 | 1755 | ||
1756 | tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc); | 1756 | tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc); |
1757 | 1757 | ||
1758 | tx_ring->desc = dma_zalloc_coherent(&pdev->dev, tx_ring->size, | 1758 | tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, |
1759 | &tx_ring->dma, GFP_KERNEL); | 1759 | &tx_ring->dma, GFP_KERNEL); |
1760 | if (!tx_ring->desc) { | 1760 | if (!tx_ring->desc) { |
1761 | vfree(tx_ring->buffer_info); | 1761 | vfree(tx_ring->buffer_info); |
1762 | return -ENOMEM; | 1762 | return -ENOMEM; |
@@ -1798,8 +1798,8 @@ int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter, | |||
1798 | return -ENOMEM; | 1798 | return -ENOMEM; |
1799 | 1799 | ||
1800 | rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc); | 1800 | rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc); |
1801 | rx_ring->desc = dma_zalloc_coherent(&pdev->dev, rx_ring->size, | 1801 | rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, |
1802 | &rx_ring->dma, GFP_KERNEL); | 1802 | &rx_ring->dma, GFP_KERNEL); |
1803 | if (!rx_ring->desc) { | 1803 | if (!rx_ring->desc) { |
1804 | vfree(rx_ring->buffer_info); | 1804 | vfree(rx_ring->buffer_info); |
1805 | return -ENOMEM; | 1805 | return -ENOMEM; |
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c index 8a31a02c9f47..d21041554507 100644 --- a/drivers/net/ethernet/pasemi/pasemi_mac.c +++ b/drivers/net/ethernet/pasemi/pasemi_mac.c | |||
@@ -401,9 +401,9 @@ static int pasemi_mac_setup_rx_resources(const struct net_device *dev) | |||
401 | if (pasemi_dma_alloc_ring(&ring->chan, RX_RING_SIZE)) | 401 | if (pasemi_dma_alloc_ring(&ring->chan, RX_RING_SIZE)) |
402 | goto out_ring_desc; | 402 | goto out_ring_desc; |
403 | 403 | ||
404 | ring->buffers = dma_zalloc_coherent(&mac->dma_pdev->dev, | 404 | ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev, |
405 | RX_RING_SIZE * sizeof(u64), | 405 | RX_RING_SIZE * sizeof(u64), |
406 | &ring->buf_dma, GFP_KERNEL); | 406 | &ring->buf_dma, GFP_KERNEL); |
407 | if (!ring->buffers) | 407 | if (!ring->buffers) |
408 | goto out_ring_desc; | 408 | goto out_ring_desc; |
409 | 409 | ||
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index dc1c1b616084..c2ad405b2f50 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c | |||
@@ -936,9 +936,9 @@ static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn) | |||
936 | u32 size = min_t(u32, total_size, psz); | 936 | u32 size = min_t(u32, total_size, psz); |
937 | void **p_virt = &p_mngr->t2[i].p_virt; | 937 | void **p_virt = &p_mngr->t2[i].p_virt; |
938 | 938 | ||
939 | *p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev, | 939 | *p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, size, |
940 | size, &p_mngr->t2[i].p_phys, | 940 | &p_mngr->t2[i].p_phys, |
941 | GFP_KERNEL); | 941 | GFP_KERNEL); |
942 | if (!p_mngr->t2[i].p_virt) { | 942 | if (!p_mngr->t2[i].p_virt) { |
943 | rc = -ENOMEM; | 943 | rc = -ENOMEM; |
944 | goto t2_fail; | 944 | goto t2_fail; |
@@ -1054,8 +1054,8 @@ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn, | |||
1054 | u32 size; | 1054 | u32 size; |
1055 | 1055 | ||
1056 | size = min_t(u32, sz_left, p_blk->real_size_in_page); | 1056 | size = min_t(u32, sz_left, p_blk->real_size_in_page); |
1057 | p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev, size, | 1057 | p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, size, |
1058 | &p_phys, GFP_KERNEL); | 1058 | &p_phys, GFP_KERNEL); |
1059 | if (!p_virt) | 1059 | if (!p_virt) |
1060 | return -ENOMEM; | 1060 | return -ENOMEM; |
1061 | 1061 | ||
@@ -2306,9 +2306,9 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn, | |||
2306 | goto out0; | 2306 | goto out0; |
2307 | } | 2307 | } |
2308 | 2308 | ||
2309 | p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev, | 2309 | p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, |
2310 | p_blk->real_size_in_page, &p_phys, | 2310 | p_blk->real_size_in_page, &p_phys, |
2311 | GFP_KERNEL); | 2311 | GFP_KERNEL); |
2312 | if (!p_virt) { | 2312 | if (!p_virt) { |
2313 | rc = -ENOMEM; | 2313 | rc = -ENOMEM; |
2314 | goto out1; | 2314 | goto out1; |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c index d344e9d43832..af38d3d73291 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c | |||
@@ -434,14 +434,14 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter, | |||
434 | *(tx_ring->hw_consumer) = 0; | 434 | *(tx_ring->hw_consumer) = 0; |
435 | 435 | ||
436 | rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx); | 436 | rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx); |
437 | rq_addr = dma_zalloc_coherent(&adapter->pdev->dev, rq_size, | 437 | rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size, |
438 | &rq_phys_addr, GFP_KERNEL); | 438 | &rq_phys_addr, GFP_KERNEL); |
439 | if (!rq_addr) | 439 | if (!rq_addr) |
440 | return -ENOMEM; | 440 | return -ENOMEM; |
441 | 441 | ||
442 | rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx); | 442 | rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx); |
443 | rsp_addr = dma_zalloc_coherent(&adapter->pdev->dev, rsp_size, | 443 | rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size, |
444 | &rsp_phys_addr, GFP_KERNEL); | 444 | &rsp_phys_addr, GFP_KERNEL); |
445 | if (!rsp_addr) { | 445 | if (!rsp_addr) { |
446 | err = -ENOMEM; | 446 | err = -ENOMEM; |
447 | goto out_free_rq; | 447 | goto out_free_rq; |
@@ -855,8 +855,8 @@ int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter, | |||
855 | struct qlcnic_cmd_args cmd; | 855 | struct qlcnic_cmd_args cmd; |
856 | size_t nic_size = sizeof(struct qlcnic_info_le); | 856 | size_t nic_size = sizeof(struct qlcnic_info_le); |
857 | 857 | ||
858 | nic_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, nic_size, | 858 | nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size, |
859 | &nic_dma_t, GFP_KERNEL); | 859 | &nic_dma_t, GFP_KERNEL); |
860 | if (!nic_info_addr) | 860 | if (!nic_info_addr) |
861 | return -ENOMEM; | 861 | return -ENOMEM; |
862 | 862 | ||
@@ -909,8 +909,8 @@ int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *adapter, | |||
909 | if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) | 909 | if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) |
910 | return err; | 910 | return err; |
911 | 911 | ||
912 | nic_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, nic_size, | 912 | nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size, |
913 | &nic_dma_t, GFP_KERNEL); | 913 | &nic_dma_t, GFP_KERNEL); |
914 | if (!nic_info_addr) | 914 | if (!nic_info_addr) |
915 | return -ENOMEM; | 915 | return -ENOMEM; |
916 | 916 | ||
@@ -964,8 +964,8 @@ int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter, | |||
964 | void *pci_info_addr; | 964 | void *pci_info_addr; |
965 | int err = 0, i; | 965 | int err = 0, i; |
966 | 966 | ||
967 | pci_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, pci_size, | 967 | pci_info_addr = dma_alloc_coherent(&adapter->pdev->dev, pci_size, |
968 | &pci_info_dma_t, GFP_KERNEL); | 968 | &pci_info_dma_t, GFP_KERNEL); |
969 | if (!pci_info_addr) | 969 | if (!pci_info_addr) |
970 | return -ENOMEM; | 970 | return -ENOMEM; |
971 | 971 | ||
@@ -1078,8 +1078,8 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func, | |||
1078 | return -EIO; | 1078 | return -EIO; |
1079 | } | 1079 | } |
1080 | 1080 | ||
1081 | stats_addr = dma_zalloc_coherent(&adapter->pdev->dev, stats_size, | 1081 | stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size, |
1082 | &stats_dma_t, GFP_KERNEL); | 1082 | &stats_dma_t, GFP_KERNEL); |
1083 | if (!stats_addr) | 1083 | if (!stats_addr) |
1084 | return -ENOMEM; | 1084 | return -ENOMEM; |
1085 | 1085 | ||
@@ -1134,8 +1134,8 @@ int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter, | |||
1134 | if (mac_stats == NULL) | 1134 | if (mac_stats == NULL) |
1135 | return -ENOMEM; | 1135 | return -ENOMEM; |
1136 | 1136 | ||
1137 | stats_addr = dma_zalloc_coherent(&adapter->pdev->dev, stats_size, | 1137 | stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size, |
1138 | &stats_dma_t, GFP_KERNEL); | 1138 | &stats_dma_t, GFP_KERNEL); |
1139 | if (!stats_addr) | 1139 | if (!stats_addr) |
1140 | return -ENOMEM; | 1140 | return -ENOMEM; |
1141 | 1141 | ||
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c index 031f6e6ee9c1..8d790313ee3d 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c | |||
@@ -776,7 +776,7 @@ int emac_mac_rx_tx_rings_alloc_all(struct emac_adapter *adpt) | |||
776 | 8 + 2 * 8; /* 8 byte per one Tx and two Rx rings */ | 776 | 8 + 2 * 8; /* 8 byte per one Tx and two Rx rings */ |
777 | 777 | ||
778 | ring_header->used = 0; | 778 | ring_header->used = 0; |
779 | ring_header->v_addr = dma_zalloc_coherent(dev, ring_header->size, | 779 | ring_header->v_addr = dma_alloc_coherent(dev, ring_header->size, |
780 | &ring_header->dma_addr, | 780 | &ring_header->dma_addr, |
781 | GFP_KERNEL); | 781 | GFP_KERNEL); |
782 | if (!ring_header->v_addr) | 782 | if (!ring_header->v_addr) |
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c index 690aee88f0eb..6d22dd500790 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c | |||
@@ -400,9 +400,9 @@ static int init_tx_ring(struct device *dev, u8 queue_no, | |||
400 | } | 400 | } |
401 | 401 | ||
402 | /* allocate memory for TX descriptors */ | 402 | /* allocate memory for TX descriptors */ |
403 | tx_ring->dma_tx = dma_zalloc_coherent(dev, | 403 | tx_ring->dma_tx = dma_alloc_coherent(dev, |
404 | tx_rsize * sizeof(struct sxgbe_tx_norm_desc), | 404 | tx_rsize * sizeof(struct sxgbe_tx_norm_desc), |
405 | &tx_ring->dma_tx_phy, GFP_KERNEL); | 405 | &tx_ring->dma_tx_phy, GFP_KERNEL); |
406 | if (!tx_ring->dma_tx) | 406 | if (!tx_ring->dma_tx) |
407 | return -ENOMEM; | 407 | return -ENOMEM; |
408 | 408 | ||
@@ -479,9 +479,9 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no, | |||
479 | rx_ring->queue_no = queue_no; | 479 | rx_ring->queue_no = queue_no; |
480 | 480 | ||
481 | /* allocate memory for RX descriptors */ | 481 | /* allocate memory for RX descriptors */ |
482 | rx_ring->dma_rx = dma_zalloc_coherent(priv->device, | 482 | rx_ring->dma_rx = dma_alloc_coherent(priv->device, |
483 | rx_rsize * sizeof(struct sxgbe_rx_norm_desc), | 483 | rx_rsize * sizeof(struct sxgbe_rx_norm_desc), |
484 | &rx_ring->dma_rx_phy, GFP_KERNEL); | 484 | &rx_ring->dma_rx_phy, GFP_KERNEL); |
485 | 485 | ||
486 | if (rx_ring->dma_rx == NULL) | 486 | if (rx_ring->dma_rx == NULL) |
487 | return -ENOMEM; | 487 | return -ENOMEM; |
diff --git a/drivers/net/ethernet/sfc/falcon/nic.c b/drivers/net/ethernet/sfc/falcon/nic.c index a8ecb33390da..9c07b5175581 100644 --- a/drivers/net/ethernet/sfc/falcon/nic.c +++ b/drivers/net/ethernet/sfc/falcon/nic.c | |||
@@ -33,8 +33,8 @@ | |||
33 | int ef4_nic_alloc_buffer(struct ef4_nic *efx, struct ef4_buffer *buffer, | 33 | int ef4_nic_alloc_buffer(struct ef4_nic *efx, struct ef4_buffer *buffer, |
34 | unsigned int len, gfp_t gfp_flags) | 34 | unsigned int len, gfp_t gfp_flags) |
35 | { | 35 | { |
36 | buffer->addr = dma_zalloc_coherent(&efx->pci_dev->dev, len, | 36 | buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len, |
37 | &buffer->dma_addr, gfp_flags); | 37 | &buffer->dma_addr, gfp_flags); |
38 | if (!buffer->addr) | 38 | if (!buffer->addr) |
39 | return -ENOMEM; | 39 | return -ENOMEM; |
40 | buffer->len = len; | 40 | buffer->len = len; |
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c index aa1945a858d5..c2d45a40eb48 100644 --- a/drivers/net/ethernet/sfc/nic.c +++ b/drivers/net/ethernet/sfc/nic.c | |||
@@ -34,8 +34,8 @@ | |||
34 | int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, | 34 | int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, |
35 | unsigned int len, gfp_t gfp_flags) | 35 | unsigned int len, gfp_t gfp_flags) |
36 | { | 36 | { |
37 | buffer->addr = dma_zalloc_coherent(&efx->pci_dev->dev, len, | 37 | buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len, |
38 | &buffer->dma_addr, gfp_flags); | 38 | &buffer->dma_addr, gfp_flags); |
39 | if (!buffer->addr) | 39 | if (!buffer->addr) |
40 | return -ENOMEM; | 40 | return -ENOMEM; |
41 | buffer->len = len; | 41 | buffer->len = len; |
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c index 703fbbefea44..0e1b7e960b98 100644 --- a/drivers/net/ethernet/sgi/meth.c +++ b/drivers/net/ethernet/sgi/meth.c | |||
@@ -211,8 +211,8 @@ static void meth_check_link(struct net_device *dev) | |||
211 | static int meth_init_tx_ring(struct meth_private *priv) | 211 | static int meth_init_tx_ring(struct meth_private *priv) |
212 | { | 212 | { |
213 | /* Init TX ring */ | 213 | /* Init TX ring */ |
214 | priv->tx_ring = dma_zalloc_coherent(NULL, TX_RING_BUFFER_SIZE, | 214 | priv->tx_ring = dma_alloc_coherent(NULL, TX_RING_BUFFER_SIZE, |
215 | &priv->tx_ring_dma, GFP_ATOMIC); | 215 | &priv->tx_ring_dma, GFP_ATOMIC); |
216 | if (!priv->tx_ring) | 216 | if (!priv->tx_ring) |
217 | return -ENOMEM; | 217 | return -ENOMEM; |
218 | 218 | ||
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index 05a0948ad929..a18149720aa2 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c | |||
@@ -1029,8 +1029,8 @@ static int netsec_alloc_dring(struct netsec_priv *priv, enum ring_id id) | |||
1029 | struct netsec_desc_ring *dring = &priv->desc_ring[id]; | 1029 | struct netsec_desc_ring *dring = &priv->desc_ring[id]; |
1030 | int i; | 1030 | int i; |
1031 | 1031 | ||
1032 | dring->vaddr = dma_zalloc_coherent(priv->dev, DESC_SZ * DESC_NUM, | 1032 | dring->vaddr = dma_alloc_coherent(priv->dev, DESC_SZ * DESC_NUM, |
1033 | &dring->desc_dma, GFP_KERNEL); | 1033 | &dring->desc_dma, GFP_KERNEL); |
1034 | if (!dring->vaddr) | 1034 | if (!dring->vaddr) |
1035 | goto err; | 1035 | goto err; |
1036 | 1036 | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 0e0a0789c2ed..0c4ab3444cc3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
@@ -1549,22 +1549,18 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv) | |||
1549 | goto err_dma; | 1549 | goto err_dma; |
1550 | 1550 | ||
1551 | if (priv->extend_desc) { | 1551 | if (priv->extend_desc) { |
1552 | rx_q->dma_erx = dma_zalloc_coherent(priv->device, | 1552 | rx_q->dma_erx = dma_alloc_coherent(priv->device, |
1553 | DMA_RX_SIZE * | 1553 | DMA_RX_SIZE * sizeof(struct dma_extended_desc), |
1554 | sizeof(struct | 1554 | &rx_q->dma_rx_phy, |
1555 | dma_extended_desc), | 1555 | GFP_KERNEL); |
1556 | &rx_q->dma_rx_phy, | ||
1557 | GFP_KERNEL); | ||
1558 | if (!rx_q->dma_erx) | 1556 | if (!rx_q->dma_erx) |
1559 | goto err_dma; | 1557 | goto err_dma; |
1560 | 1558 | ||
1561 | } else { | 1559 | } else { |
1562 | rx_q->dma_rx = dma_zalloc_coherent(priv->device, | 1560 | rx_q->dma_rx = dma_alloc_coherent(priv->device, |
1563 | DMA_RX_SIZE * | 1561 | DMA_RX_SIZE * sizeof(struct dma_desc), |
1564 | sizeof(struct | 1562 | &rx_q->dma_rx_phy, |
1565 | dma_desc), | 1563 | GFP_KERNEL); |
1566 | &rx_q->dma_rx_phy, | ||
1567 | GFP_KERNEL); | ||
1568 | if (!rx_q->dma_rx) | 1564 | if (!rx_q->dma_rx) |
1569 | goto err_dma; | 1565 | goto err_dma; |
1570 | } | 1566 | } |
@@ -1612,21 +1608,17 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv) | |||
1612 | goto err_dma; | 1608 | goto err_dma; |
1613 | 1609 | ||
1614 | if (priv->extend_desc) { | 1610 | if (priv->extend_desc) { |
1615 | tx_q->dma_etx = dma_zalloc_coherent(priv->device, | 1611 | tx_q->dma_etx = dma_alloc_coherent(priv->device, |
1616 | DMA_TX_SIZE * | 1612 | DMA_TX_SIZE * sizeof(struct dma_extended_desc), |
1617 | sizeof(struct | 1613 | &tx_q->dma_tx_phy, |
1618 | dma_extended_desc), | 1614 | GFP_KERNEL); |
1619 | &tx_q->dma_tx_phy, | ||
1620 | GFP_KERNEL); | ||
1621 | if (!tx_q->dma_etx) | 1615 | if (!tx_q->dma_etx) |
1622 | goto err_dma; | 1616 | goto err_dma; |
1623 | } else { | 1617 | } else { |
1624 | tx_q->dma_tx = dma_zalloc_coherent(priv->device, | 1618 | tx_q->dma_tx = dma_alloc_coherent(priv->device, |
1625 | DMA_TX_SIZE * | 1619 | DMA_TX_SIZE * sizeof(struct dma_desc), |
1626 | sizeof(struct | 1620 | &tx_q->dma_tx_phy, |
1627 | dma_desc), | 1621 | GFP_KERNEL); |
1628 | &tx_q->dma_tx_phy, | ||
1629 | GFP_KERNEL); | ||
1630 | if (!tx_q->dma_tx) | 1622 | if (!tx_q->dma_tx) |
1631 | goto err_dma; | 1623 | goto err_dma; |
1632 | } | 1624 | } |
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c index edcd1e60b30d..37925a1d58de 100644 --- a/drivers/net/ethernet/tundra/tsi108_eth.c +++ b/drivers/net/ethernet/tundra/tsi108_eth.c | |||
@@ -1311,13 +1311,13 @@ static int tsi108_open(struct net_device *dev) | |||
1311 | data->id, dev->irq, dev->name); | 1311 | data->id, dev->irq, dev->name); |
1312 | } | 1312 | } |
1313 | 1313 | ||
1314 | data->rxring = dma_zalloc_coherent(&data->pdev->dev, rxring_size, | 1314 | data->rxring = dma_alloc_coherent(&data->pdev->dev, rxring_size, |
1315 | &data->rxdma, GFP_KERNEL); | 1315 | &data->rxdma, GFP_KERNEL); |
1316 | if (!data->rxring) | 1316 | if (!data->rxring) |
1317 | return -ENOMEM; | 1317 | return -ENOMEM; |
1318 | 1318 | ||
1319 | data->txring = dma_zalloc_coherent(&data->pdev->dev, txring_size, | 1319 | data->txring = dma_alloc_coherent(&data->pdev->dev, txring_size, |
1320 | &data->txdma, GFP_KERNEL); | 1320 | &data->txdma, GFP_KERNEL); |
1321 | if (!data->txring) { | 1321 | if (!data->txring) { |
1322 | dma_free_coherent(&data->pdev->dev, rxring_size, data->rxring, | 1322 | dma_free_coherent(&data->pdev->dev, rxring_size, data->rxring, |
1323 | data->rxdma); | 1323 | data->rxdma); |
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 2241f9897092..15bb058db392 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c | |||
@@ -243,15 +243,15 @@ static int temac_dma_bd_init(struct net_device *ndev) | |||
243 | 243 | ||
244 | /* allocate the tx and rx ring buffer descriptors. */ | 244 | /* allocate the tx and rx ring buffer descriptors. */ |
245 | /* returns a virtual address and a physical address. */ | 245 | /* returns a virtual address and a physical address. */ |
246 | lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent, | 246 | lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, |
247 | sizeof(*lp->tx_bd_v) * TX_BD_NUM, | 247 | sizeof(*lp->tx_bd_v) * TX_BD_NUM, |
248 | &lp->tx_bd_p, GFP_KERNEL); | 248 | &lp->tx_bd_p, GFP_KERNEL); |
249 | if (!lp->tx_bd_v) | 249 | if (!lp->tx_bd_v) |
250 | goto out; | 250 | goto out; |
251 | 251 | ||
252 | lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent, | 252 | lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent, |
253 | sizeof(*lp->rx_bd_v) * RX_BD_NUM, | 253 | sizeof(*lp->rx_bd_v) * RX_BD_NUM, |
254 | &lp->rx_bd_p, GFP_KERNEL); | 254 | &lp->rx_bd_p, GFP_KERNEL); |
255 | if (!lp->rx_bd_v) | 255 | if (!lp->rx_bd_v) |
256 | goto out; | 256 | goto out; |
257 | 257 | ||
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 12a14609ec47..0789d8af7d72 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c | |||
@@ -199,15 +199,15 @@ static int axienet_dma_bd_init(struct net_device *ndev) | |||
199 | lp->rx_bd_ci = 0; | 199 | lp->rx_bd_ci = 0; |
200 | 200 | ||
201 | /* Allocate the Tx and Rx buffer descriptors. */ | 201 | /* Allocate the Tx and Rx buffer descriptors. */ |
202 | lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent, | 202 | lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, |
203 | sizeof(*lp->tx_bd_v) * TX_BD_NUM, | 203 | sizeof(*lp->tx_bd_v) * TX_BD_NUM, |
204 | &lp->tx_bd_p, GFP_KERNEL); | 204 | &lp->tx_bd_p, GFP_KERNEL); |
205 | if (!lp->tx_bd_v) | 205 | if (!lp->tx_bd_v) |
206 | goto out; | 206 | goto out; |
207 | 207 | ||
208 | lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent, | 208 | lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent, |
209 | sizeof(*lp->rx_bd_v) * RX_BD_NUM, | 209 | sizeof(*lp->rx_bd_v) * RX_BD_NUM, |
210 | &lp->rx_bd_p, GFP_KERNEL); | 210 | &lp->rx_bd_p, GFP_KERNEL); |
211 | if (!lp->rx_bd_v) | 211 | if (!lp->rx_bd_v) |
212 | goto out; | 212 | goto out; |
213 | 213 | ||
diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c index 61fceee73c1b..38ac8ef41f5f 100644 --- a/drivers/net/fddi/defxx.c +++ b/drivers/net/fddi/defxx.c | |||
@@ -1139,9 +1139,9 @@ static int dfx_driver_init(struct net_device *dev, const char *print_name, | |||
1139 | #endif | 1139 | #endif |
1140 | sizeof(PI_CONSUMER_BLOCK) + | 1140 | sizeof(PI_CONSUMER_BLOCK) + |
1141 | (PI_ALIGN_K_DESC_BLK - 1); | 1141 | (PI_ALIGN_K_DESC_BLK - 1); |
1142 | bp->kmalloced = top_v = dma_zalloc_coherent(bp->bus_dev, alloc_size, | 1142 | bp->kmalloced = top_v = dma_alloc_coherent(bp->bus_dev, alloc_size, |
1143 | &bp->kmalloced_dma, | 1143 | &bp->kmalloced_dma, |
1144 | GFP_ATOMIC); | 1144 | GFP_ATOMIC); |
1145 | if (top_v == NULL) | 1145 | if (top_v == NULL) |
1146 | return DFX_K_FAILURE; | 1146 | return DFX_K_FAILURE; |
1147 | 1147 | ||
diff --git a/drivers/net/fddi/skfp/skfddi.c b/drivers/net/fddi/skfp/skfddi.c index 72433f3efc74..5d661f60b101 100644 --- a/drivers/net/fddi/skfp/skfddi.c +++ b/drivers/net/fddi/skfp/skfddi.c | |||
@@ -409,10 +409,10 @@ static int skfp_driver_init(struct net_device *dev) | |||
409 | if (bp->SharedMemSize > 0) { | 409 | if (bp->SharedMemSize > 0) { |
410 | bp->SharedMemSize += 16; // for descriptor alignment | 410 | bp->SharedMemSize += 16; // for descriptor alignment |
411 | 411 | ||
412 | bp->SharedMemAddr = dma_zalloc_coherent(&bp->pdev.dev, | 412 | bp->SharedMemAddr = dma_alloc_coherent(&bp->pdev.dev, |
413 | bp->SharedMemSize, | 413 | bp->SharedMemSize, |
414 | &bp->SharedMemDMA, | 414 | &bp->SharedMemDMA, |
415 | GFP_ATOMIC); | 415 | GFP_ATOMIC); |
416 | if (!bp->SharedMemAddr) { | 416 | if (!bp->SharedMemAddr) { |
417 | printk("could not allocate mem for "); | 417 | printk("could not allocate mem for "); |
418 | printk("hardware module: %ld byte\n", | 418 | printk("hardware module: %ld byte\n", |
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index e454dfc9ad8f..89984fcab01e 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c | |||
@@ -535,8 +535,8 @@ vmxnet3_tq_create(struct vmxnet3_tx_queue *tq, | |||
535 | } | 535 | } |
536 | 536 | ||
537 | sz = tq->tx_ring.size * sizeof(tq->buf_info[0]); | 537 | sz = tq->tx_ring.size * sizeof(tq->buf_info[0]); |
538 | tq->buf_info = dma_zalloc_coherent(&adapter->pdev->dev, sz, | 538 | tq->buf_info = dma_alloc_coherent(&adapter->pdev->dev, sz, |
539 | &tq->buf_info_pa, GFP_KERNEL); | 539 | &tq->buf_info_pa, GFP_KERNEL); |
540 | if (!tq->buf_info) | 540 | if (!tq->buf_info) |
541 | goto err; | 541 | goto err; |
542 | 542 | ||
@@ -1815,8 +1815,8 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter) | |||
1815 | 1815 | ||
1816 | sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size + | 1816 | sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size + |
1817 | rq->rx_ring[1].size); | 1817 | rq->rx_ring[1].size); |
1818 | bi = dma_zalloc_coherent(&adapter->pdev->dev, sz, &rq->buf_info_pa, | 1818 | bi = dma_alloc_coherent(&adapter->pdev->dev, sz, &rq->buf_info_pa, |
1819 | GFP_KERNEL); | 1819 | GFP_KERNEL); |
1820 | if (!bi) | 1820 | if (!bi) |
1821 | goto err; | 1821 | goto err; |
1822 | 1822 | ||
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c index 839fa7715709..be6485428198 100644 --- a/drivers/net/wan/fsl_ucc_hdlc.c +++ b/drivers/net/wan/fsl_ucc_hdlc.c | |||
@@ -279,10 +279,9 @@ static int uhdlc_init(struct ucc_hdlc_private *priv) | |||
279 | iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr4); | 279 | iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr4); |
280 | 280 | ||
281 | /* Get BD buffer */ | 281 | /* Get BD buffer */ |
282 | bd_buffer = dma_zalloc_coherent(priv->dev, | 282 | bd_buffer = dma_alloc_coherent(priv->dev, |
283 | (RX_BD_RING_LEN + TX_BD_RING_LEN) * | 283 | (RX_BD_RING_LEN + TX_BD_RING_LEN) * MAX_RX_BUF_LENGTH, |
284 | MAX_RX_BUF_LENGTH, | 284 | &bd_dma_addr, GFP_KERNEL); |
285 | &bd_dma_addr, GFP_KERNEL); | ||
286 | 285 | ||
287 | if (!bd_buffer) { | 286 | if (!bd_buffer) { |
288 | dev_err(priv->dev, "Could not allocate buffer descriptors\n"); | 287 | dev_err(priv->dev, "Could not allocate buffer descriptors\n"); |
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c index f6d3ecbdd3a3..2a5668b4f6bc 100644 --- a/drivers/net/wireless/ath/ath10k/ce.c +++ b/drivers/net/wireless/ath/ath10k/ce.c | |||
@@ -1553,10 +1553,9 @@ ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id, | |||
1553 | * coherent DMA are unsupported | 1553 | * coherent DMA are unsupported |
1554 | */ | 1554 | */ |
1555 | dest_ring->base_addr_owner_space_unaligned = | 1555 | dest_ring->base_addr_owner_space_unaligned = |
1556 | dma_zalloc_coherent(ar->dev, | 1556 | dma_alloc_coherent(ar->dev, |
1557 | (nentries * sizeof(struct ce_desc) + | 1557 | (nentries * sizeof(struct ce_desc) + CE_DESC_RING_ALIGN), |
1558 | CE_DESC_RING_ALIGN), | 1558 | &base_addr, GFP_KERNEL); |
1559 | &base_addr, GFP_KERNEL); | ||
1560 | if (!dest_ring->base_addr_owner_space_unaligned) { | 1559 | if (!dest_ring->base_addr_owner_space_unaligned) { |
1561 | kfree(dest_ring); | 1560 | kfree(dest_ring); |
1562 | return ERR_PTR(-ENOMEM); | 1561 | return ERR_PTR(-ENOMEM); |
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index e49b36752ba2..49758490eaba 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c | |||
@@ -5169,10 +5169,10 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, | |||
5169 | if (vif->type == NL80211_IFTYPE_ADHOC || | 5169 | if (vif->type == NL80211_IFTYPE_ADHOC || |
5170 | vif->type == NL80211_IFTYPE_MESH_POINT || | 5170 | vif->type == NL80211_IFTYPE_MESH_POINT || |
5171 | vif->type == NL80211_IFTYPE_AP) { | 5171 | vif->type == NL80211_IFTYPE_AP) { |
5172 | arvif->beacon_buf = dma_zalloc_coherent(ar->dev, | 5172 | arvif->beacon_buf = dma_alloc_coherent(ar->dev, |
5173 | IEEE80211_MAX_FRAME_LEN, | 5173 | IEEE80211_MAX_FRAME_LEN, |
5174 | &arvif->beacon_paddr, | 5174 | &arvif->beacon_paddr, |
5175 | GFP_ATOMIC); | 5175 | GFP_ATOMIC); |
5176 | if (!arvif->beacon_buf) { | 5176 | if (!arvif->beacon_buf) { |
5177 | ret = -ENOMEM; | 5177 | ret = -ENOMEM; |
5178 | ath10k_warn(ar, "failed to allocate beacon buffer: %d\n", | 5178 | ath10k_warn(ar, "failed to allocate beacon buffer: %d\n", |
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 01b4edb00e9e..39e0b1cc2a12 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c | |||
@@ -936,8 +936,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, | |||
936 | */ | 936 | */ |
937 | alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT); | 937 | alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT); |
938 | 938 | ||
939 | data_buf = (unsigned char *)dma_zalloc_coherent(ar->dev, | 939 | data_buf = (unsigned char *)dma_alloc_coherent(ar->dev, alloc_nbytes, |
940 | alloc_nbytes, | ||
941 | &ce_data_base, | 940 | &ce_data_base, |
942 | GFP_ATOMIC); | 941 | GFP_ATOMIC); |
943 | 942 | ||
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index ba837403e266..8e236d158ca6 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c | |||
@@ -5193,7 +5193,7 @@ static int ath10k_wmi_alloc_chunk(struct ath10k *ar, u32 req_id, | |||
5193 | void *vaddr; | 5193 | void *vaddr; |
5194 | 5194 | ||
5195 | pool_size = num_units * round_up(unit_len, 4); | 5195 | pool_size = num_units * round_up(unit_len, 4); |
5196 | vaddr = dma_zalloc_coherent(ar->dev, pool_size, &paddr, GFP_KERNEL); | 5196 | vaddr = dma_alloc_coherent(ar->dev, pool_size, &paddr, GFP_KERNEL); |
5197 | 5197 | ||
5198 | if (!vaddr) | 5198 | if (!vaddr) |
5199 | return -ENOMEM; | 5199 | return -ENOMEM; |
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c index 5ab3e31c9ffa..bab30f7a443c 100644 --- a/drivers/net/wireless/ath/wcn36xx/dxe.c +++ b/drivers/net/wireless/ath/wcn36xx/dxe.c | |||
@@ -174,9 +174,8 @@ static int wcn36xx_dxe_init_descs(struct device *dev, struct wcn36xx_dxe_ch *wcn | |||
174 | int i; | 174 | int i; |
175 | 175 | ||
176 | size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc); | 176 | size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc); |
177 | wcn_ch->cpu_addr = dma_zalloc_coherent(dev, size, | 177 | wcn_ch->cpu_addr = dma_alloc_coherent(dev, size, &wcn_ch->dma_addr, |
178 | &wcn_ch->dma_addr, | 178 | GFP_KERNEL); |
179 | GFP_KERNEL); | ||
180 | if (!wcn_ch->cpu_addr) | 179 | if (!wcn_ch->cpu_addr) |
181 | return -ENOMEM; | 180 | return -ENOMEM; |
182 | 181 | ||
@@ -627,9 +626,9 @@ int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn) | |||
627 | 16 - (WCN36XX_BD_CHUNK_SIZE % 8); | 626 | 16 - (WCN36XX_BD_CHUNK_SIZE % 8); |
628 | 627 | ||
629 | s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H; | 628 | s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H; |
630 | cpu_addr = dma_zalloc_coherent(wcn->dev, s, | 629 | cpu_addr = dma_alloc_coherent(wcn->dev, s, |
631 | &wcn->mgmt_mem_pool.phy_addr, | 630 | &wcn->mgmt_mem_pool.phy_addr, |
632 | GFP_KERNEL); | 631 | GFP_KERNEL); |
633 | if (!cpu_addr) | 632 | if (!cpu_addr) |
634 | goto out_err; | 633 | goto out_err; |
635 | 634 | ||
@@ -642,9 +641,9 @@ int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn) | |||
642 | 16 - (WCN36XX_BD_CHUNK_SIZE % 8); | 641 | 16 - (WCN36XX_BD_CHUNK_SIZE % 8); |
643 | 642 | ||
644 | s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L; | 643 | s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L; |
645 | cpu_addr = dma_zalloc_coherent(wcn->dev, s, | 644 | cpu_addr = dma_alloc_coherent(wcn->dev, s, |
646 | &wcn->data_mem_pool.phy_addr, | 645 | &wcn->data_mem_pool.phy_addr, |
647 | GFP_KERNEL); | 646 | GFP_KERNEL); |
648 | if (!cpu_addr) | 647 | if (!cpu_addr) |
649 | goto out_err; | 648 | goto out_err; |
650 | 649 | ||
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c index 05a8348bd7b9..3380aaef456c 100644 --- a/drivers/net/wireless/ath/wil6210/txrx_edma.c +++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c | |||
@@ -99,7 +99,7 @@ static int wil_sring_alloc(struct wil6210_priv *wil, | |||
99 | /* Status messages are allocated and initialized to 0. This is necessary | 99 | /* Status messages are allocated and initialized to 0. This is necessary |
100 | * since DR bit should be initialized to 0. | 100 | * since DR bit should be initialized to 0. |
101 | */ | 101 | */ |
102 | sring->va = dma_zalloc_coherent(dev, sz, &sring->pa, GFP_KERNEL); | 102 | sring->va = dma_alloc_coherent(dev, sz, &sring->pa, GFP_KERNEL); |
103 | if (!sring->va) | 103 | if (!sring->va) |
104 | return -ENOMEM; | 104 | return -ENOMEM; |
105 | 105 | ||
@@ -381,15 +381,15 @@ static int wil_ring_alloc_desc_ring(struct wil6210_priv *wil, | |||
381 | if (!ring->ctx) | 381 | if (!ring->ctx) |
382 | goto err; | 382 | goto err; |
383 | 383 | ||
384 | ring->va = dma_zalloc_coherent(dev, sz, &ring->pa, GFP_KERNEL); | 384 | ring->va = dma_alloc_coherent(dev, sz, &ring->pa, GFP_KERNEL); |
385 | if (!ring->va) | 385 | if (!ring->va) |
386 | goto err_free_ctx; | 386 | goto err_free_ctx; |
387 | 387 | ||
388 | if (ring->is_rx) { | 388 | if (ring->is_rx) { |
389 | sz = sizeof(*ring->edma_rx_swtail.va); | 389 | sz = sizeof(*ring->edma_rx_swtail.va); |
390 | ring->edma_rx_swtail.va = | 390 | ring->edma_rx_swtail.va = |
391 | dma_zalloc_coherent(dev, sz, &ring->edma_rx_swtail.pa, | 391 | dma_alloc_coherent(dev, sz, &ring->edma_rx_swtail.pa, |
392 | GFP_KERNEL); | 392 | GFP_KERNEL); |
393 | if (!ring->edma_rx_swtail.va) | 393 | if (!ring->edma_rx_swtail.va) |
394 | goto err_free_va; | 394 | goto err_free_va; |
395 | } | 395 | } |
diff --git a/drivers/net/wireless/broadcom/b43/dma.c b/drivers/net/wireless/broadcom/b43/dma.c index dfc4c34298d4..b34e51933257 100644 --- a/drivers/net/wireless/broadcom/b43/dma.c +++ b/drivers/net/wireless/broadcom/b43/dma.c | |||
@@ -431,9 +431,9 @@ static int alloc_ringmemory(struct b43_dmaring *ring) | |||
431 | u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ? | 431 | u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ? |
432 | B43_DMA64_RINGMEMSIZE : B43_DMA32_RINGMEMSIZE; | 432 | B43_DMA64_RINGMEMSIZE : B43_DMA32_RINGMEMSIZE; |
433 | 433 | ||
434 | ring->descbase = dma_zalloc_coherent(ring->dev->dev->dma_dev, | 434 | ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev, |
435 | ring_mem_size, &(ring->dmabase), | 435 | ring_mem_size, &(ring->dmabase), |
436 | GFP_KERNEL); | 436 | GFP_KERNEL); |
437 | if (!ring->descbase) | 437 | if (!ring->descbase) |
438 | return -ENOMEM; | 438 | return -ENOMEM; |
439 | 439 | ||
diff --git a/drivers/net/wireless/broadcom/b43legacy/dma.c b/drivers/net/wireless/broadcom/b43legacy/dma.c index 1b1da7d83652..2ce1537d983c 100644 --- a/drivers/net/wireless/broadcom/b43legacy/dma.c +++ b/drivers/net/wireless/broadcom/b43legacy/dma.c | |||
@@ -331,9 +331,9 @@ void free_descriptor_buffer(struct b43legacy_dmaring *ring, | |||
331 | static int alloc_ringmemory(struct b43legacy_dmaring *ring) | 331 | static int alloc_ringmemory(struct b43legacy_dmaring *ring) |
332 | { | 332 | { |
333 | /* GFP flags must match the flags in free_ringmemory()! */ | 333 | /* GFP flags must match the flags in free_ringmemory()! */ |
334 | ring->descbase = dma_zalloc_coherent(ring->dev->dev->dma_dev, | 334 | ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev, |
335 | B43legacy_DMA_RINGMEMSIZE, | 335 | B43legacy_DMA_RINGMEMSIZE, |
336 | &(ring->dmabase), GFP_KERNEL); | 336 | &(ring->dmabase), GFP_KERNEL); |
337 | if (!ring->descbase) | 337 | if (!ring->descbase) |
338 | return -ENOMEM; | 338 | return -ENOMEM; |
339 | 339 | ||
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index 16d7dda965d8..0f69b3fa296e 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c | |||
@@ -1281,10 +1281,10 @@ static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo) | |||
1281 | u32 addr; | 1281 | u32 addr; |
1282 | 1282 | ||
1283 | devinfo->shared.scratch = | 1283 | devinfo->shared.scratch = |
1284 | dma_zalloc_coherent(&devinfo->pdev->dev, | 1284 | dma_alloc_coherent(&devinfo->pdev->dev, |
1285 | BRCMF_DMA_D2H_SCRATCH_BUF_LEN, | 1285 | BRCMF_DMA_D2H_SCRATCH_BUF_LEN, |
1286 | &devinfo->shared.scratch_dmahandle, | 1286 | &devinfo->shared.scratch_dmahandle, |
1287 | GFP_KERNEL); | 1287 | GFP_KERNEL); |
1288 | if (!devinfo->shared.scratch) | 1288 | if (!devinfo->shared.scratch) |
1289 | goto fail; | 1289 | goto fail; |
1290 | 1290 | ||
@@ -1298,10 +1298,10 @@ static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo) | |||
1298 | brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_SCRATCH_BUF_LEN); | 1298 | brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_SCRATCH_BUF_LEN); |
1299 | 1299 | ||
1300 | devinfo->shared.ringupd = | 1300 | devinfo->shared.ringupd = |
1301 | dma_zalloc_coherent(&devinfo->pdev->dev, | 1301 | dma_alloc_coherent(&devinfo->pdev->dev, |
1302 | BRCMF_DMA_D2H_RINGUPD_BUF_LEN, | 1302 | BRCMF_DMA_D2H_RINGUPD_BUF_LEN, |
1303 | &devinfo->shared.ringupd_dmahandle, | 1303 | &devinfo->shared.ringupd_dmahandle, |
1304 | GFP_KERNEL); | 1304 | GFP_KERNEL); |
1305 | if (!devinfo->shared.ringupd) | 1305 | if (!devinfo->shared.ringupd) |
1306 | goto fail; | 1306 | goto fail; |
1307 | 1307 | ||
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index e965cc588850..9e850c25877b 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c | |||
@@ -711,30 +711,24 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans, | |||
711 | * Allocate the circular buffer of Read Buffer Descriptors | 711 | * Allocate the circular buffer of Read Buffer Descriptors |
712 | * (RBDs) | 712 | * (RBDs) |
713 | */ | 713 | */ |
714 | rxq->bd = dma_zalloc_coherent(dev, | 714 | rxq->bd = dma_alloc_coherent(dev, free_size * rxq->queue_size, |
715 | free_size * rxq->queue_size, | 715 | &rxq->bd_dma, GFP_KERNEL); |
716 | &rxq->bd_dma, GFP_KERNEL); | ||
717 | if (!rxq->bd) | 716 | if (!rxq->bd) |
718 | goto err; | 717 | goto err; |
719 | 718 | ||
720 | if (trans->cfg->mq_rx_supported) { | 719 | if (trans->cfg->mq_rx_supported) { |
721 | rxq->used_bd = dma_zalloc_coherent(dev, | 720 | rxq->used_bd = dma_alloc_coherent(dev, |
722 | (use_rx_td ? | 721 | (use_rx_td ? sizeof(*rxq->cd) : sizeof(__le32)) * rxq->queue_size, |
723 | sizeof(*rxq->cd) : | 722 | &rxq->used_bd_dma, |
724 | sizeof(__le32)) * | 723 | GFP_KERNEL); |
725 | rxq->queue_size, | ||
726 | &rxq->used_bd_dma, | ||
727 | GFP_KERNEL); | ||
728 | if (!rxq->used_bd) | 724 | if (!rxq->used_bd) |
729 | goto err; | 725 | goto err; |
730 | } | 726 | } |
731 | 727 | ||
732 | /* Allocate the driver's pointer to receive buffer status */ | 728 | /* Allocate the driver's pointer to receive buffer status */ |
733 | rxq->rb_stts = dma_zalloc_coherent(dev, use_rx_td ? | 729 | rxq->rb_stts = dma_alloc_coherent(dev, |
734 | sizeof(__le16) : | 730 | use_rx_td ? sizeof(__le16) : sizeof(struct iwl_rb_status), |
735 | sizeof(struct iwl_rb_status), | 731 | &rxq->rb_stts_dma, GFP_KERNEL); |
736 | &rxq->rb_stts_dma, | ||
737 | GFP_KERNEL); | ||
738 | if (!rxq->rb_stts) | 732 | if (!rxq->rb_stts) |
739 | goto err; | 733 | goto err; |
740 | 734 | ||
@@ -742,16 +736,14 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans, | |||
742 | return 0; | 736 | return 0; |
743 | 737 | ||
744 | /* Allocate the driver's pointer to TR tail */ | 738 | /* Allocate the driver's pointer to TR tail */ |
745 | rxq->tr_tail = dma_zalloc_coherent(dev, sizeof(__le16), | 739 | rxq->tr_tail = dma_alloc_coherent(dev, sizeof(__le16), |
746 | &rxq->tr_tail_dma, | 740 | &rxq->tr_tail_dma, GFP_KERNEL); |
747 | GFP_KERNEL); | ||
748 | if (!rxq->tr_tail) | 741 | if (!rxq->tr_tail) |
749 | goto err; | 742 | goto err; |
750 | 743 | ||
751 | /* Allocate the driver's pointer to CR tail */ | 744 | /* Allocate the driver's pointer to CR tail */ |
752 | rxq->cr_tail = dma_zalloc_coherent(dev, sizeof(__le16), | 745 | rxq->cr_tail = dma_alloc_coherent(dev, sizeof(__le16), |
753 | &rxq->cr_tail_dma, | 746 | &rxq->cr_tail_dma, GFP_KERNEL); |
754 | GFP_KERNEL); | ||
755 | if (!rxq->cr_tail) | 747 | if (!rxq->cr_tail) |
756 | goto err; | 748 | goto err; |
757 | /* | 749 | /* |
@@ -1947,9 +1939,8 @@ int iwl_pcie_alloc_ict(struct iwl_trans *trans) | |||
1947 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 1939 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1948 | 1940 | ||
1949 | trans_pcie->ict_tbl = | 1941 | trans_pcie->ict_tbl = |
1950 | dma_zalloc_coherent(trans->dev, ICT_SIZE, | 1942 | dma_alloc_coherent(trans->dev, ICT_SIZE, |
1951 | &trans_pcie->ict_tbl_dma, | 1943 | &trans_pcie->ict_tbl_dma, GFP_KERNEL); |
1952 | GFP_KERNEL); | ||
1953 | if (!trans_pcie->ict_tbl) | 1944 | if (!trans_pcie->ict_tbl) |
1954 | return -ENOMEM; | 1945 | return -ENOMEM; |
1955 | 1946 | ||
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c index 528cb0401df1..4956a54151cb 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c | |||
@@ -119,9 +119,9 @@ static int rt2x00mmio_alloc_queue_dma(struct rt2x00_dev *rt2x00dev, | |||
119 | /* | 119 | /* |
120 | * Allocate DMA memory for descriptor and buffer. | 120 | * Allocate DMA memory for descriptor and buffer. |
121 | */ | 121 | */ |
122 | addr = dma_zalloc_coherent(rt2x00dev->dev, | 122 | addr = dma_alloc_coherent(rt2x00dev->dev, |
123 | queue->limit * queue->desc_size, &dma, | 123 | queue->limit * queue->desc_size, &dma, |
124 | GFP_KERNEL); | 124 | GFP_KERNEL); |
125 | if (!addr) | 125 | if (!addr) |
126 | return -ENOMEM; | 126 | return -ENOMEM; |
127 | 127 | ||
diff --git a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c index 5ee5f40b4dfc..f1eaa3c4d46a 100644 --- a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c +++ b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c | |||
@@ -1339,10 +1339,10 @@ static int switchtec_ntb_init_shared_mw(struct switchtec_ntb *sndev) | |||
1339 | int rc; | 1339 | int rc; |
1340 | 1340 | ||
1341 | sndev->nr_rsvd_luts++; | 1341 | sndev->nr_rsvd_luts++; |
1342 | sndev->self_shared = dma_zalloc_coherent(&sndev->stdev->pdev->dev, | 1342 | sndev->self_shared = dma_alloc_coherent(&sndev->stdev->pdev->dev, |
1343 | LUT_SIZE, | 1343 | LUT_SIZE, |
1344 | &sndev->self_shared_dma, | 1344 | &sndev->self_shared_dma, |
1345 | GFP_KERNEL); | 1345 | GFP_KERNEL); |
1346 | if (!sndev->self_shared) { | 1346 | if (!sndev->self_shared) { |
1347 | dev_err(&sndev->stdev->dev, | 1347 | dev_err(&sndev->stdev->dev, |
1348 | "unable to allocate memory for shared mw\n"); | 1348 | "unable to allocate memory for shared mw\n"); |
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 5a0bf6a24d50..e8d0942c9c92 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
@@ -1485,8 +1485,8 @@ static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth) | |||
1485 | if (dev->ctrl.queue_count > qid) | 1485 | if (dev->ctrl.queue_count > qid) |
1486 | return 0; | 1486 | return 0; |
1487 | 1487 | ||
1488 | nvmeq->cqes = dma_zalloc_coherent(dev->dev, CQ_SIZE(depth), | 1488 | nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(depth), |
1489 | &nvmeq->cq_dma_addr, GFP_KERNEL); | 1489 | &nvmeq->cq_dma_addr, GFP_KERNEL); |
1490 | if (!nvmeq->cqes) | 1490 | if (!nvmeq->cqes) |
1491 | goto free_nvmeq; | 1491 | goto free_nvmeq; |
1492 | 1492 | ||
@@ -1915,8 +1915,8 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred, | |||
1915 | if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries) | 1915 | if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries) |
1916 | max_entries = dev->ctrl.hmmaxd; | 1916 | max_entries = dev->ctrl.hmmaxd; |
1917 | 1917 | ||
1918 | descs = dma_zalloc_coherent(dev->dev, max_entries * sizeof(*descs), | 1918 | descs = dma_alloc_coherent(dev->dev, max_entries * sizeof(*descs), |
1919 | &descs_dma, GFP_KERNEL); | 1919 | &descs_dma, GFP_KERNEL); |
1920 | if (!descs) | 1920 | if (!descs) |
1921 | goto out; | 1921 | goto out; |
1922 | 1922 | ||
diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c index 9deb56989d72..cb3401a931f8 100644 --- a/drivers/pci/controller/pcie-iproc-msi.c +++ b/drivers/pci/controller/pcie-iproc-msi.c | |||
@@ -602,9 +602,9 @@ int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node) | |||
602 | } | 602 | } |
603 | 603 | ||
604 | /* Reserve memory for event queue and make sure memories are zeroed */ | 604 | /* Reserve memory for event queue and make sure memories are zeroed */ |
605 | msi->eq_cpu = dma_zalloc_coherent(pcie->dev, | 605 | msi->eq_cpu = dma_alloc_coherent(pcie->dev, |
606 | msi->nr_eq_region * EQ_MEM_REGION_SIZE, | 606 | msi->nr_eq_region * EQ_MEM_REGION_SIZE, |
607 | &msi->eq_dma, GFP_KERNEL); | 607 | &msi->eq_dma, GFP_KERNEL); |
608 | if (!msi->eq_cpu) { | 608 | if (!msi->eq_cpu) { |
609 | ret = -ENOMEM; | 609 | ret = -ENOMEM; |
610 | goto free_irqs; | 610 | goto free_irqs; |
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c index 6c5536d3d42a..e22766c79fe9 100644 --- a/drivers/pci/switch/switchtec.c +++ b/drivers/pci/switch/switchtec.c | |||
@@ -1373,10 +1373,10 @@ static int switchtec_init_pci(struct switchtec_dev *stdev, | |||
1373 | if (ioread32(&stdev->mmio_mrpc->dma_ver) == 0) | 1373 | if (ioread32(&stdev->mmio_mrpc->dma_ver) == 0) |
1374 | return 0; | 1374 | return 0; |
1375 | 1375 | ||
1376 | stdev->dma_mrpc = dma_zalloc_coherent(&stdev->pdev->dev, | 1376 | stdev->dma_mrpc = dma_alloc_coherent(&stdev->pdev->dev, |
1377 | sizeof(*stdev->dma_mrpc), | 1377 | sizeof(*stdev->dma_mrpc), |
1378 | &stdev->dma_mrpc_dma_addr, | 1378 | &stdev->dma_mrpc_dma_addr, |
1379 | GFP_KERNEL); | 1379 | GFP_KERNEL); |
1380 | if (stdev->dma_mrpc == NULL) | 1380 | if (stdev->dma_mrpc == NULL) |
1381 | return -ENOMEM; | 1381 | return -ENOMEM; |
1382 | 1382 | ||
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c index bb655854713d..b64c56c33c3b 100644 --- a/drivers/rapidio/devices/tsi721.c +++ b/drivers/rapidio/devices/tsi721.c | |||
@@ -1382,9 +1382,9 @@ static int tsi721_doorbell_init(struct tsi721_device *priv) | |||
1382 | INIT_WORK(&priv->idb_work, tsi721_db_dpc); | 1382 | INIT_WORK(&priv->idb_work, tsi721_db_dpc); |
1383 | 1383 | ||
1384 | /* Allocate buffer for inbound doorbells queue */ | 1384 | /* Allocate buffer for inbound doorbells queue */ |
1385 | priv->idb_base = dma_zalloc_coherent(&priv->pdev->dev, | 1385 | priv->idb_base = dma_alloc_coherent(&priv->pdev->dev, |
1386 | IDB_QSIZE * TSI721_IDB_ENTRY_SIZE, | 1386 | IDB_QSIZE * TSI721_IDB_ENTRY_SIZE, |
1387 | &priv->idb_dma, GFP_KERNEL); | 1387 | &priv->idb_dma, GFP_KERNEL); |
1388 | if (!priv->idb_base) | 1388 | if (!priv->idb_base) |
1389 | return -ENOMEM; | 1389 | return -ENOMEM; |
1390 | 1390 | ||
@@ -1447,9 +1447,9 @@ static int tsi721_bdma_maint_init(struct tsi721_device *priv) | |||
1447 | regs = priv->regs + TSI721_DMAC_BASE(TSI721_DMACH_MAINT); | 1447 | regs = priv->regs + TSI721_DMAC_BASE(TSI721_DMACH_MAINT); |
1448 | 1448 | ||
1449 | /* Allocate space for DMA descriptors */ | 1449 | /* Allocate space for DMA descriptors */ |
1450 | bd_ptr = dma_zalloc_coherent(&priv->pdev->dev, | 1450 | bd_ptr = dma_alloc_coherent(&priv->pdev->dev, |
1451 | bd_num * sizeof(struct tsi721_dma_desc), | 1451 | bd_num * sizeof(struct tsi721_dma_desc), |
1452 | &bd_phys, GFP_KERNEL); | 1452 | &bd_phys, GFP_KERNEL); |
1453 | if (!bd_ptr) | 1453 | if (!bd_ptr) |
1454 | return -ENOMEM; | 1454 | return -ENOMEM; |
1455 | 1455 | ||
@@ -1464,7 +1464,7 @@ static int tsi721_bdma_maint_init(struct tsi721_device *priv) | |||
1464 | sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ? | 1464 | sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ? |
1465 | bd_num : TSI721_DMA_MINSTSSZ; | 1465 | bd_num : TSI721_DMA_MINSTSSZ; |
1466 | sts_size = roundup_pow_of_two(sts_size); | 1466 | sts_size = roundup_pow_of_two(sts_size); |
1467 | sts_ptr = dma_zalloc_coherent(&priv->pdev->dev, | 1467 | sts_ptr = dma_alloc_coherent(&priv->pdev->dev, |
1468 | sts_size * sizeof(struct tsi721_dma_sts), | 1468 | sts_size * sizeof(struct tsi721_dma_sts), |
1469 | &sts_phys, GFP_KERNEL); | 1469 | &sts_phys, GFP_KERNEL); |
1470 | if (!sts_ptr) { | 1470 | if (!sts_ptr) { |
@@ -1939,10 +1939,10 @@ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id, | |||
1939 | 1939 | ||
1940 | /* Outbound message descriptor status FIFO allocation */ | 1940 | /* Outbound message descriptor status FIFO allocation */ |
1941 | priv->omsg_ring[mbox].sts_size = roundup_pow_of_two(entries + 1); | 1941 | priv->omsg_ring[mbox].sts_size = roundup_pow_of_two(entries + 1); |
1942 | priv->omsg_ring[mbox].sts_base = dma_zalloc_coherent(&priv->pdev->dev, | 1942 | priv->omsg_ring[mbox].sts_base = dma_alloc_coherent(&priv->pdev->dev, |
1943 | priv->omsg_ring[mbox].sts_size * | 1943 | priv->omsg_ring[mbox].sts_size * sizeof(struct tsi721_dma_sts), |
1944 | sizeof(struct tsi721_dma_sts), | 1944 | &priv->omsg_ring[mbox].sts_phys, |
1945 | &priv->omsg_ring[mbox].sts_phys, GFP_KERNEL); | 1945 | GFP_KERNEL); |
1946 | if (priv->omsg_ring[mbox].sts_base == NULL) { | 1946 | if (priv->omsg_ring[mbox].sts_base == NULL) { |
1947 | tsi_debug(OMSG, &priv->pdev->dev, | 1947 | tsi_debug(OMSG, &priv->pdev->dev, |
1948 | "ENOMEM for OB_MSG_%d status FIFO", mbox); | 1948 | "ENOMEM for OB_MSG_%d status FIFO", mbox); |
diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c index 006ea5a45020..7f5d4436f594 100644 --- a/drivers/rapidio/devices/tsi721_dma.c +++ b/drivers/rapidio/devices/tsi721_dma.c | |||
@@ -90,9 +90,9 @@ static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num) | |||
90 | * Allocate space for DMA descriptors | 90 | * Allocate space for DMA descriptors |
91 | * (add an extra element for link descriptor) | 91 | * (add an extra element for link descriptor) |
92 | */ | 92 | */ |
93 | bd_ptr = dma_zalloc_coherent(dev, | 93 | bd_ptr = dma_alloc_coherent(dev, |
94 | (bd_num + 1) * sizeof(struct tsi721_dma_desc), | 94 | (bd_num + 1) * sizeof(struct tsi721_dma_desc), |
95 | &bd_phys, GFP_ATOMIC); | 95 | &bd_phys, GFP_ATOMIC); |
96 | if (!bd_ptr) | 96 | if (!bd_ptr) |
97 | return -ENOMEM; | 97 | return -ENOMEM; |
98 | 98 | ||
@@ -108,7 +108,7 @@ static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num) | |||
108 | sts_size = ((bd_num + 1) >= TSI721_DMA_MINSTSSZ) ? | 108 | sts_size = ((bd_num + 1) >= TSI721_DMA_MINSTSSZ) ? |
109 | (bd_num + 1) : TSI721_DMA_MINSTSSZ; | 109 | (bd_num + 1) : TSI721_DMA_MINSTSSZ; |
110 | sts_size = roundup_pow_of_two(sts_size); | 110 | sts_size = roundup_pow_of_two(sts_size); |
111 | sts_ptr = dma_zalloc_coherent(dev, | 111 | sts_ptr = dma_alloc_coherent(dev, |
112 | sts_size * sizeof(struct tsi721_dma_sts), | 112 | sts_size * sizeof(struct tsi721_dma_sts), |
113 | &sts_phys, GFP_ATOMIC); | 113 | &sts_phys, GFP_ATOMIC); |
114 | if (!sts_ptr) { | 114 | if (!sts_ptr) { |
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c index dcbf5c857743..ed8e58f09054 100644 --- a/drivers/s390/net/ism_drv.c +++ b/drivers/s390/net/ism_drv.c | |||
@@ -89,8 +89,8 @@ static int register_sba(struct ism_dev *ism) | |||
89 | dma_addr_t dma_handle; | 89 | dma_addr_t dma_handle; |
90 | struct ism_sba *sba; | 90 | struct ism_sba *sba; |
91 | 91 | ||
92 | sba = dma_zalloc_coherent(&ism->pdev->dev, PAGE_SIZE, | 92 | sba = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle, |
93 | &dma_handle, GFP_KERNEL); | 93 | GFP_KERNEL); |
94 | if (!sba) | 94 | if (!sba) |
95 | return -ENOMEM; | 95 | return -ENOMEM; |
96 | 96 | ||
@@ -116,8 +116,8 @@ static int register_ieq(struct ism_dev *ism) | |||
116 | dma_addr_t dma_handle; | 116 | dma_addr_t dma_handle; |
117 | struct ism_eq *ieq; | 117 | struct ism_eq *ieq; |
118 | 118 | ||
119 | ieq = dma_zalloc_coherent(&ism->pdev->dev, PAGE_SIZE, | 119 | ieq = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle, |
120 | &dma_handle, GFP_KERNEL); | 120 | GFP_KERNEL); |
121 | if (!ieq) | 121 | if (!ieq) |
122 | return -ENOMEM; | 122 | return -ENOMEM; |
123 | 123 | ||
@@ -234,10 +234,9 @@ static int ism_alloc_dmb(struct ism_dev *ism, struct smcd_dmb *dmb) | |||
234 | test_and_set_bit(dmb->sba_idx, ism->sba_bitmap)) | 234 | test_and_set_bit(dmb->sba_idx, ism->sba_bitmap)) |
235 | return -EINVAL; | 235 | return -EINVAL; |
236 | 236 | ||
237 | dmb->cpu_addr = dma_zalloc_coherent(&ism->pdev->dev, dmb->dmb_len, | 237 | dmb->cpu_addr = dma_alloc_coherent(&ism->pdev->dev, dmb->dmb_len, |
238 | &dmb->dma_addr, GFP_KERNEL | | 238 | &dmb->dma_addr, |
239 | __GFP_NOWARN | __GFP_NOMEMALLOC | | 239 | GFP_KERNEL | __GFP_NOWARN | __GFP_NOMEMALLOC | __GFP_COMP | __GFP_NORETRY); |
240 | __GFP_COMP | __GFP_NORETRY); | ||
241 | if (!dmb->cpu_addr) | 240 | if (!dmb->cpu_addr) |
242 | clear_bit(dmb->sba_idx, ism->sba_bitmap); | 241 | clear_bit(dmb->sba_idx, ism->sba_bitmap); |
243 | 242 | ||
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c index e8f5f7c63190..cd096104bcec 100644 --- a/drivers/scsi/3w-sas.c +++ b/drivers/scsi/3w-sas.c | |||
@@ -646,8 +646,9 @@ static int twl_allocate_memory(TW_Device_Extension *tw_dev, int size, int which) | |||
646 | unsigned long *cpu_addr; | 646 | unsigned long *cpu_addr; |
647 | int retval = 1; | 647 | int retval = 1; |
648 | 648 | ||
649 | cpu_addr = dma_zalloc_coherent(&tw_dev->tw_pci_dev->dev, | 649 | cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, |
650 | size * TW_Q_LENGTH, &dma_handle, GFP_KERNEL); | 650 | size * TW_Q_LENGTH, &dma_handle, |
651 | GFP_KERNEL); | ||
651 | if (!cpu_addr) { | 652 | if (!cpu_addr) { |
652 | TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed"); | 653 | TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed"); |
653 | goto out; | 654 | goto out; |
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c index ff53fd0d12f2..66c514310f3c 100644 --- a/drivers/scsi/a100u2w.c +++ b/drivers/scsi/a100u2w.c | |||
@@ -1123,8 +1123,8 @@ static int inia100_probe_one(struct pci_dev *pdev, | |||
1123 | 1123 | ||
1124 | /* Get total memory needed for SCB */ | 1124 | /* Get total memory needed for SCB */ |
1125 | sz = ORC_MAXQUEUE * sizeof(struct orc_scb); | 1125 | sz = ORC_MAXQUEUE * sizeof(struct orc_scb); |
1126 | host->scb_virt = dma_zalloc_coherent(&pdev->dev, sz, &host->scb_phys, | 1126 | host->scb_virt = dma_alloc_coherent(&pdev->dev, sz, &host->scb_phys, |
1127 | GFP_KERNEL); | 1127 | GFP_KERNEL); |
1128 | if (!host->scb_virt) { | 1128 | if (!host->scb_virt) { |
1129 | printk("inia100: SCB memory allocation error\n"); | 1129 | printk("inia100: SCB memory allocation error\n"); |
1130 | goto out_host_put; | 1130 | goto out_host_put; |
@@ -1132,8 +1132,8 @@ static int inia100_probe_one(struct pci_dev *pdev, | |||
1132 | 1132 | ||
1133 | /* Get total memory needed for ESCB */ | 1133 | /* Get total memory needed for ESCB */ |
1134 | sz = ORC_MAXQUEUE * sizeof(struct orc_extended_scb); | 1134 | sz = ORC_MAXQUEUE * sizeof(struct orc_extended_scb); |
1135 | host->escb_virt = dma_zalloc_coherent(&pdev->dev, sz, &host->escb_phys, | 1135 | host->escb_virt = dma_alloc_coherent(&pdev->dev, sz, &host->escb_phys, |
1136 | GFP_KERNEL); | 1136 | GFP_KERNEL); |
1137 | if (!host->escb_virt) { | 1137 | if (!host->escb_virt) { |
1138 | printk("inia100: ESCB memory allocation error\n"); | 1138 | printk("inia100: ESCB memory allocation error\n"); |
1139 | goto out_free_scb_array; | 1139 | goto out_free_scb_array; |
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index 0f6751b0a633..57c6fa388bf6 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c | |||
@@ -587,8 +587,10 @@ static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb) | |||
587 | case ACB_ADAPTER_TYPE_B: { | 587 | case ACB_ADAPTER_TYPE_B: { |
588 | struct MessageUnit_B *reg; | 588 | struct MessageUnit_B *reg; |
589 | acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_B), 32); | 589 | acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_B), 32); |
590 | dma_coherent = dma_zalloc_coherent(&pdev->dev, acb->roundup_ccbsize, | 590 | dma_coherent = dma_alloc_coherent(&pdev->dev, |
591 | &dma_coherent_handle, GFP_KERNEL); | 591 | acb->roundup_ccbsize, |
592 | &dma_coherent_handle, | ||
593 | GFP_KERNEL); | ||
592 | if (!dma_coherent) { | 594 | if (!dma_coherent) { |
593 | pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no); | 595 | pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no); |
594 | return false; | 596 | return false; |
@@ -617,8 +619,10 @@ static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb) | |||
617 | struct MessageUnit_D *reg; | 619 | struct MessageUnit_D *reg; |
618 | 620 | ||
619 | acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_D), 32); | 621 | acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_D), 32); |
620 | dma_coherent = dma_zalloc_coherent(&pdev->dev, acb->roundup_ccbsize, | 622 | dma_coherent = dma_alloc_coherent(&pdev->dev, |
621 | &dma_coherent_handle, GFP_KERNEL); | 623 | acb->roundup_ccbsize, |
624 | &dma_coherent_handle, | ||
625 | GFP_KERNEL); | ||
622 | if (!dma_coherent) { | 626 | if (!dma_coherent) { |
623 | pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no); | 627 | pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no); |
624 | return false; | 628 | return false; |
@@ -659,8 +663,10 @@ static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb) | |||
659 | uint32_t completeQ_size; | 663 | uint32_t completeQ_size; |
660 | completeQ_size = sizeof(struct deliver_completeQ) * ARCMSR_MAX_HBE_DONEQUEUE + 128; | 664 | completeQ_size = sizeof(struct deliver_completeQ) * ARCMSR_MAX_HBE_DONEQUEUE + 128; |
661 | acb->roundup_ccbsize = roundup(completeQ_size, 32); | 665 | acb->roundup_ccbsize = roundup(completeQ_size, 32); |
662 | dma_coherent = dma_zalloc_coherent(&pdev->dev, acb->roundup_ccbsize, | 666 | dma_coherent = dma_alloc_coherent(&pdev->dev, |
663 | &dma_coherent_handle, GFP_KERNEL); | 667 | acb->roundup_ccbsize, |
668 | &dma_coherent_handle, | ||
669 | GFP_KERNEL); | ||
664 | if (!dma_coherent){ | 670 | if (!dma_coherent){ |
665 | pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no); | 671 | pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no); |
666 | return false; | 672 | return false; |
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 39f3820572b4..74e260027c7d 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c | |||
@@ -3321,8 +3321,8 @@ static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q, | |||
3321 | q->len = len; | 3321 | q->len = len; |
3322 | q->entry_size = entry_size; | 3322 | q->entry_size = entry_size; |
3323 | mem->size = len * entry_size; | 3323 | mem->size = len * entry_size; |
3324 | mem->va = dma_zalloc_coherent(&phba->pcidev->dev, mem->size, &mem->dma, | 3324 | mem->va = dma_alloc_coherent(&phba->pcidev->dev, mem->size, &mem->dma, |
3325 | GFP_KERNEL); | 3325 | GFP_KERNEL); |
3326 | if (!mem->va) | 3326 | if (!mem->va) |
3327 | return -ENOMEM; | 3327 | return -ENOMEM; |
3328 | return 0; | 3328 | return 0; |
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c index ca7b7bbc8371..d4febaadfaa3 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.c +++ b/drivers/scsi/be2iscsi/be_mgmt.c | |||
@@ -293,8 +293,8 @@ static int beiscsi_prep_nemb_cmd(struct beiscsi_hba *phba, | |||
293 | struct be_dma_mem *cmd, | 293 | struct be_dma_mem *cmd, |
294 | u8 subsystem, u8 opcode, u32 size) | 294 | u8 subsystem, u8 opcode, u32 size) |
295 | { | 295 | { |
296 | cmd->va = dma_zalloc_coherent(&phba->ctrl.pdev->dev, size, &cmd->dma, | 296 | cmd->va = dma_alloc_coherent(&phba->ctrl.pdev->dev, size, &cmd->dma, |
297 | GFP_KERNEL); | 297 | GFP_KERNEL); |
298 | if (!cmd->va) { | 298 | if (!cmd->va) { |
299 | beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, | 299 | beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, |
300 | "BG_%d : Failed to allocate memory for if info\n"); | 300 | "BG_%d : Failed to allocate memory for if info\n"); |
@@ -1510,10 +1510,9 @@ int beiscsi_mgmt_invalidate_icds(struct beiscsi_hba *phba, | |||
1510 | return -EINVAL; | 1510 | return -EINVAL; |
1511 | 1511 | ||
1512 | nonemb_cmd.size = sizeof(union be_invldt_cmds_params); | 1512 | nonemb_cmd.size = sizeof(union be_invldt_cmds_params); |
1513 | nonemb_cmd.va = dma_zalloc_coherent(&phba->ctrl.pdev->dev, | 1513 | nonemb_cmd.va = dma_alloc_coherent(&phba->ctrl.pdev->dev, |
1514 | nonemb_cmd.size, | 1514 | nonemb_cmd.size, &nonemb_cmd.dma, |
1515 | &nonemb_cmd.dma, | 1515 | GFP_KERNEL); |
1516 | GFP_KERNEL); | ||
1517 | if (!nonemb_cmd.va) { | 1516 | if (!nonemb_cmd.va) { |
1518 | beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH, | 1517 | beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH, |
1519 | "BM_%d : invldt_cmds_params alloc failed\n"); | 1518 | "BM_%d : invldt_cmds_params alloc failed\n"); |
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c index 5d163ca1b366..d8e6d7480f35 100644 --- a/drivers/scsi/bfa/bfad_bsg.c +++ b/drivers/scsi/bfa/bfad_bsg.c | |||
@@ -3264,9 +3264,9 @@ bfad_fcxp_map_sg(struct bfad_s *bfad, void *payload_kbuf, | |||
3264 | /* Allocate dma coherent memory */ | 3264 | /* Allocate dma coherent memory */ |
3265 | buf_info = buf_base; | 3265 | buf_info = buf_base; |
3266 | buf_info->size = payload_len; | 3266 | buf_info->size = payload_len; |
3267 | buf_info->virt = dma_zalloc_coherent(&bfad->pcidev->dev, | 3267 | buf_info->virt = dma_alloc_coherent(&bfad->pcidev->dev, |
3268 | buf_info->size, &buf_info->phys, | 3268 | buf_info->size, &buf_info->phys, |
3269 | GFP_KERNEL); | 3269 | GFP_KERNEL); |
3270 | if (!buf_info->virt) | 3270 | if (!buf_info->virt) |
3271 | goto out_free_mem; | 3271 | goto out_free_mem; |
3272 | 3272 | ||
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c index e8ae4d671d23..039328d9ef13 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c +++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c | |||
@@ -1857,10 +1857,10 @@ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba) | |||
1857 | * entries. Hence the limit with one page is 8192 task context | 1857 | * entries. Hence the limit with one page is 8192 task context |
1858 | * entries. | 1858 | * entries. |
1859 | */ | 1859 | */ |
1860 | hba->task_ctx_bd_tbl = dma_zalloc_coherent(&hba->pcidev->dev, | 1860 | hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, |
1861 | PAGE_SIZE, | 1861 | PAGE_SIZE, |
1862 | &hba->task_ctx_bd_dma, | 1862 | &hba->task_ctx_bd_dma, |
1863 | GFP_KERNEL); | 1863 | GFP_KERNEL); |
1864 | if (!hba->task_ctx_bd_tbl) { | 1864 | if (!hba->task_ctx_bd_tbl) { |
1865 | printk(KERN_ERR PFX "unable to allocate task context BDT\n"); | 1865 | printk(KERN_ERR PFX "unable to allocate task context BDT\n"); |
1866 | rc = -1; | 1866 | rc = -1; |
@@ -1894,10 +1894,10 @@ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba) | |||
1894 | task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl; | 1894 | task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl; |
1895 | for (i = 0; i < task_ctx_arr_sz; i++) { | 1895 | for (i = 0; i < task_ctx_arr_sz; i++) { |
1896 | 1896 | ||
1897 | hba->task_ctx[i] = dma_zalloc_coherent(&hba->pcidev->dev, | 1897 | hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev, |
1898 | PAGE_SIZE, | 1898 | PAGE_SIZE, |
1899 | &hba->task_ctx_dma[i], | 1899 | &hba->task_ctx_dma[i], |
1900 | GFP_KERNEL); | 1900 | GFP_KERNEL); |
1901 | if (!hba->task_ctx[i]) { | 1901 | if (!hba->task_ctx[i]) { |
1902 | printk(KERN_ERR PFX "unable to alloc task context\n"); | 1902 | printk(KERN_ERR PFX "unable to alloc task context\n"); |
1903 | rc = -1; | 1903 | rc = -1; |
@@ -2031,19 +2031,19 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba) | |||
2031 | } | 2031 | } |
2032 | 2032 | ||
2033 | for (i = 0; i < segment_count; ++i) { | 2033 | for (i = 0; i < segment_count; ++i) { |
2034 | hba->hash_tbl_segments[i] = dma_zalloc_coherent(&hba->pcidev->dev, | 2034 | hba->hash_tbl_segments[i] = dma_alloc_coherent(&hba->pcidev->dev, |
2035 | BNX2FC_HASH_TBL_CHUNK_SIZE, | 2035 | BNX2FC_HASH_TBL_CHUNK_SIZE, |
2036 | &dma_segment_array[i], | 2036 | &dma_segment_array[i], |
2037 | GFP_KERNEL); | 2037 | GFP_KERNEL); |
2038 | if (!hba->hash_tbl_segments[i]) { | 2038 | if (!hba->hash_tbl_segments[i]) { |
2039 | printk(KERN_ERR PFX "hash segment alloc failed\n"); | 2039 | printk(KERN_ERR PFX "hash segment alloc failed\n"); |
2040 | goto cleanup_dma; | 2040 | goto cleanup_dma; |
2041 | } | 2041 | } |
2042 | } | 2042 | } |
2043 | 2043 | ||
2044 | hba->hash_tbl_pbl = dma_zalloc_coherent(&hba->pcidev->dev, PAGE_SIZE, | 2044 | hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, |
2045 | &hba->hash_tbl_pbl_dma, | 2045 | &hba->hash_tbl_pbl_dma, |
2046 | GFP_KERNEL); | 2046 | GFP_KERNEL); |
2047 | if (!hba->hash_tbl_pbl) { | 2047 | if (!hba->hash_tbl_pbl) { |
2048 | printk(KERN_ERR PFX "hash table pbl alloc failed\n"); | 2048 | printk(KERN_ERR PFX "hash table pbl alloc failed\n"); |
2049 | goto cleanup_dma; | 2049 | goto cleanup_dma; |
@@ -2104,10 +2104,9 @@ int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba) | |||
2104 | return -ENOMEM; | 2104 | return -ENOMEM; |
2105 | 2105 | ||
2106 | mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair); | 2106 | mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair); |
2107 | hba->t2_hash_tbl_ptr = dma_zalloc_coherent(&hba->pcidev->dev, | 2107 | hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size, |
2108 | mem_size, | 2108 | &hba->t2_hash_tbl_ptr_dma, |
2109 | &hba->t2_hash_tbl_ptr_dma, | 2109 | GFP_KERNEL); |
2110 | GFP_KERNEL); | ||
2111 | if (!hba->t2_hash_tbl_ptr) { | 2110 | if (!hba->t2_hash_tbl_ptr) { |
2112 | printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n"); | 2111 | printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n"); |
2113 | bnx2fc_free_fw_resc(hba); | 2112 | bnx2fc_free_fw_resc(hba); |
@@ -2116,9 +2115,9 @@ int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba) | |||
2116 | 2115 | ||
2117 | mem_size = BNX2FC_NUM_MAX_SESS * | 2116 | mem_size = BNX2FC_NUM_MAX_SESS * |
2118 | sizeof(struct fcoe_t2_hash_table_entry); | 2117 | sizeof(struct fcoe_t2_hash_table_entry); |
2119 | hba->t2_hash_tbl = dma_zalloc_coherent(&hba->pcidev->dev, mem_size, | 2118 | hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size, |
2120 | &hba->t2_hash_tbl_dma, | 2119 | &hba->t2_hash_tbl_dma, |
2121 | GFP_KERNEL); | 2120 | GFP_KERNEL); |
2122 | if (!hba->t2_hash_tbl) { | 2121 | if (!hba->t2_hash_tbl) { |
2123 | printk(KERN_ERR PFX "unable to allocate t2 hash table\n"); | 2122 | printk(KERN_ERR PFX "unable to allocate t2 hash table\n"); |
2124 | bnx2fc_free_fw_resc(hba); | 2123 | bnx2fc_free_fw_resc(hba); |
@@ -2140,9 +2139,9 @@ int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba) | |||
2140 | return -ENOMEM; | 2139 | return -ENOMEM; |
2141 | } | 2140 | } |
2142 | 2141 | ||
2143 | hba->stats_buffer = dma_zalloc_coherent(&hba->pcidev->dev, PAGE_SIZE, | 2142 | hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, |
2144 | &hba->stats_buf_dma, | 2143 | &hba->stats_buf_dma, |
2145 | GFP_KERNEL); | 2144 | GFP_KERNEL); |
2146 | if (!hba->stats_buffer) { | 2145 | if (!hba->stats_buffer) { |
2147 | printk(KERN_ERR PFX "unable to alloc Stats Buffer\n"); | 2146 | printk(KERN_ERR PFX "unable to alloc Stats Buffer\n"); |
2148 | bnx2fc_free_fw_resc(hba); | 2147 | bnx2fc_free_fw_resc(hba); |
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c index e3d1c7c440c8..d735e87e416a 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c +++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c | |||
@@ -672,8 +672,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |||
672 | tgt->sq_mem_size = (tgt->sq_mem_size + (CNIC_PAGE_SIZE - 1)) & | 672 | tgt->sq_mem_size = (tgt->sq_mem_size + (CNIC_PAGE_SIZE - 1)) & |
673 | CNIC_PAGE_MASK; | 673 | CNIC_PAGE_MASK; |
674 | 674 | ||
675 | tgt->sq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size, | 675 | tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size, |
676 | &tgt->sq_dma, GFP_KERNEL); | 676 | &tgt->sq_dma, GFP_KERNEL); |
677 | if (!tgt->sq) { | 677 | if (!tgt->sq) { |
678 | printk(KERN_ERR PFX "unable to allocate SQ memory %d\n", | 678 | printk(KERN_ERR PFX "unable to allocate SQ memory %d\n", |
679 | tgt->sq_mem_size); | 679 | tgt->sq_mem_size); |
@@ -685,8 +685,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |||
685 | tgt->cq_mem_size = (tgt->cq_mem_size + (CNIC_PAGE_SIZE - 1)) & | 685 | tgt->cq_mem_size = (tgt->cq_mem_size + (CNIC_PAGE_SIZE - 1)) & |
686 | CNIC_PAGE_MASK; | 686 | CNIC_PAGE_MASK; |
687 | 687 | ||
688 | tgt->cq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size, | 688 | tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size, |
689 | &tgt->cq_dma, GFP_KERNEL); | 689 | &tgt->cq_dma, GFP_KERNEL); |
690 | if (!tgt->cq) { | 690 | if (!tgt->cq) { |
691 | printk(KERN_ERR PFX "unable to allocate CQ memory %d\n", | 691 | printk(KERN_ERR PFX "unable to allocate CQ memory %d\n", |
692 | tgt->cq_mem_size); | 692 | tgt->cq_mem_size); |
@@ -698,8 +698,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |||
698 | tgt->rq_mem_size = (tgt->rq_mem_size + (CNIC_PAGE_SIZE - 1)) & | 698 | tgt->rq_mem_size = (tgt->rq_mem_size + (CNIC_PAGE_SIZE - 1)) & |
699 | CNIC_PAGE_MASK; | 699 | CNIC_PAGE_MASK; |
700 | 700 | ||
701 | tgt->rq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size, | 701 | tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size, |
702 | &tgt->rq_dma, GFP_KERNEL); | 702 | &tgt->rq_dma, GFP_KERNEL); |
703 | if (!tgt->rq) { | 703 | if (!tgt->rq) { |
704 | printk(KERN_ERR PFX "unable to allocate RQ memory %d\n", | 704 | printk(KERN_ERR PFX "unable to allocate RQ memory %d\n", |
705 | tgt->rq_mem_size); | 705 | tgt->rq_mem_size); |
@@ -710,8 +710,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |||
710 | tgt->rq_pbl_size = (tgt->rq_pbl_size + (CNIC_PAGE_SIZE - 1)) & | 710 | tgt->rq_pbl_size = (tgt->rq_pbl_size + (CNIC_PAGE_SIZE - 1)) & |
711 | CNIC_PAGE_MASK; | 711 | CNIC_PAGE_MASK; |
712 | 712 | ||
713 | tgt->rq_pbl = dma_zalloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size, | 713 | tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size, |
714 | &tgt->rq_pbl_dma, GFP_KERNEL); | 714 | &tgt->rq_pbl_dma, GFP_KERNEL); |
715 | if (!tgt->rq_pbl) { | 715 | if (!tgt->rq_pbl) { |
716 | printk(KERN_ERR PFX "unable to allocate RQ PBL %d\n", | 716 | printk(KERN_ERR PFX "unable to allocate RQ PBL %d\n", |
717 | tgt->rq_pbl_size); | 717 | tgt->rq_pbl_size); |
@@ -735,9 +735,9 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |||
735 | tgt->xferq_mem_size = (tgt->xferq_mem_size + (CNIC_PAGE_SIZE - 1)) & | 735 | tgt->xferq_mem_size = (tgt->xferq_mem_size + (CNIC_PAGE_SIZE - 1)) & |
736 | CNIC_PAGE_MASK; | 736 | CNIC_PAGE_MASK; |
737 | 737 | ||
738 | tgt->xferq = dma_zalloc_coherent(&hba->pcidev->dev, | 738 | tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, |
739 | tgt->xferq_mem_size, &tgt->xferq_dma, | 739 | tgt->xferq_mem_size, &tgt->xferq_dma, |
740 | GFP_KERNEL); | 740 | GFP_KERNEL); |
741 | if (!tgt->xferq) { | 741 | if (!tgt->xferq) { |
742 | printk(KERN_ERR PFX "unable to allocate XFERQ %d\n", | 742 | printk(KERN_ERR PFX "unable to allocate XFERQ %d\n", |
743 | tgt->xferq_mem_size); | 743 | tgt->xferq_mem_size); |
@@ -749,9 +749,9 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |||
749 | tgt->confq_mem_size = (tgt->confq_mem_size + (CNIC_PAGE_SIZE - 1)) & | 749 | tgt->confq_mem_size = (tgt->confq_mem_size + (CNIC_PAGE_SIZE - 1)) & |
750 | CNIC_PAGE_MASK; | 750 | CNIC_PAGE_MASK; |
751 | 751 | ||
752 | tgt->confq = dma_zalloc_coherent(&hba->pcidev->dev, | 752 | tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, |
753 | tgt->confq_mem_size, &tgt->confq_dma, | 753 | tgt->confq_mem_size, &tgt->confq_dma, |
754 | GFP_KERNEL); | 754 | GFP_KERNEL); |
755 | if (!tgt->confq) { | 755 | if (!tgt->confq) { |
756 | printk(KERN_ERR PFX "unable to allocate CONFQ %d\n", | 756 | printk(KERN_ERR PFX "unable to allocate CONFQ %d\n", |
757 | tgt->confq_mem_size); | 757 | tgt->confq_mem_size); |
@@ -763,9 +763,9 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |||
763 | tgt->confq_pbl_size = | 763 | tgt->confq_pbl_size = |
764 | (tgt->confq_pbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; | 764 | (tgt->confq_pbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; |
765 | 765 | ||
766 | tgt->confq_pbl = dma_zalloc_coherent(&hba->pcidev->dev, | 766 | tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev, |
767 | tgt->confq_pbl_size, | 767 | tgt->confq_pbl_size, |
768 | &tgt->confq_pbl_dma, GFP_KERNEL); | 768 | &tgt->confq_pbl_dma, GFP_KERNEL); |
769 | if (!tgt->confq_pbl) { | 769 | if (!tgt->confq_pbl) { |
770 | printk(KERN_ERR PFX "unable to allocate CONFQ PBL %d\n", | 770 | printk(KERN_ERR PFX "unable to allocate CONFQ PBL %d\n", |
771 | tgt->confq_pbl_size); | 771 | tgt->confq_pbl_size); |
@@ -787,9 +787,9 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |||
787 | /* Allocate and map ConnDB */ | 787 | /* Allocate and map ConnDB */ |
788 | tgt->conn_db_mem_size = sizeof(struct fcoe_conn_db); | 788 | tgt->conn_db_mem_size = sizeof(struct fcoe_conn_db); |
789 | 789 | ||
790 | tgt->conn_db = dma_zalloc_coherent(&hba->pcidev->dev, | 790 | tgt->conn_db = dma_alloc_coherent(&hba->pcidev->dev, |
791 | tgt->conn_db_mem_size, | 791 | tgt->conn_db_mem_size, |
792 | &tgt->conn_db_dma, GFP_KERNEL); | 792 | &tgt->conn_db_dma, GFP_KERNEL); |
793 | if (!tgt->conn_db) { | 793 | if (!tgt->conn_db) { |
794 | printk(KERN_ERR PFX "unable to allocate conn_db %d\n", | 794 | printk(KERN_ERR PFX "unable to allocate conn_db %d\n", |
795 | tgt->conn_db_mem_size); | 795 | tgt->conn_db_mem_size); |
@@ -802,8 +802,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |||
802 | tgt->lcq_mem_size = (tgt->lcq_mem_size + (CNIC_PAGE_SIZE - 1)) & | 802 | tgt->lcq_mem_size = (tgt->lcq_mem_size + (CNIC_PAGE_SIZE - 1)) & |
803 | CNIC_PAGE_MASK; | 803 | CNIC_PAGE_MASK; |
804 | 804 | ||
805 | tgt->lcq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size, | 805 | tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size, |
806 | &tgt->lcq_dma, GFP_KERNEL); | 806 | &tgt->lcq_dma, GFP_KERNEL); |
807 | 807 | ||
808 | if (!tgt->lcq) { | 808 | if (!tgt->lcq) { |
809 | printk(KERN_ERR PFX "unable to allocate lcq %d\n", | 809 | printk(KERN_ERR PFX "unable to allocate lcq %d\n", |
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c index 91f5316aa3ab..fae6f71e677d 100644 --- a/drivers/scsi/bnx2i/bnx2i_hwi.c +++ b/drivers/scsi/bnx2i/bnx2i_hwi.c | |||
@@ -1070,8 +1070,8 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) | |||
1070 | 1070 | ||
1071 | /* Allocate memory area for actual SQ element */ | 1071 | /* Allocate memory area for actual SQ element */ |
1072 | ep->qp.sq_virt = | 1072 | ep->qp.sq_virt = |
1073 | dma_zalloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size, | 1073 | dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size, |
1074 | &ep->qp.sq_phys, GFP_KERNEL); | 1074 | &ep->qp.sq_phys, GFP_KERNEL); |
1075 | if (!ep->qp.sq_virt) { | 1075 | if (!ep->qp.sq_virt) { |
1076 | printk(KERN_ALERT "bnx2i: unable to alloc SQ BD memory %d\n", | 1076 | printk(KERN_ALERT "bnx2i: unable to alloc SQ BD memory %d\n", |
1077 | ep->qp.sq_mem_size); | 1077 | ep->qp.sq_mem_size); |
@@ -1106,8 +1106,8 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) | |||
1106 | 1106 | ||
1107 | /* Allocate memory area for actual CQ element */ | 1107 | /* Allocate memory area for actual CQ element */ |
1108 | ep->qp.cq_virt = | 1108 | ep->qp.cq_virt = |
1109 | dma_zalloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size, | 1109 | dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size, |
1110 | &ep->qp.cq_phys, GFP_KERNEL); | 1110 | &ep->qp.cq_phys, GFP_KERNEL); |
1111 | if (!ep->qp.cq_virt) { | 1111 | if (!ep->qp.cq_virt) { |
1112 | printk(KERN_ALERT "bnx2i: unable to alloc CQ BD memory %d\n", | 1112 | printk(KERN_ALERT "bnx2i: unable to alloc CQ BD memory %d\n", |
1113 | ep->qp.cq_mem_size); | 1113 | ep->qp.cq_mem_size); |
diff --git a/drivers/scsi/csiostor/csio_wr.c b/drivers/scsi/csiostor/csio_wr.c index dc12933533d5..66bbd21819ae 100644 --- a/drivers/scsi/csiostor/csio_wr.c +++ b/drivers/scsi/csiostor/csio_wr.c | |||
@@ -233,8 +233,8 @@ csio_wr_alloc_q(struct csio_hw *hw, uint32_t qsize, uint32_t wrsize, | |||
233 | 233 | ||
234 | q = wrm->q_arr[free_idx]; | 234 | q = wrm->q_arr[free_idx]; |
235 | 235 | ||
236 | q->vstart = dma_zalloc_coherent(&hw->pdev->dev, qsz, &q->pstart, | 236 | q->vstart = dma_alloc_coherent(&hw->pdev->dev, qsz, &q->pstart, |
237 | GFP_KERNEL); | 237 | GFP_KERNEL); |
238 | if (!q->vstart) { | 238 | if (!q->vstart) { |
239 | csio_err(hw, | 239 | csio_err(hw, |
240 | "Failed to allocate DMA memory for " | 240 | "Failed to allocate DMA memory for " |
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index 8698af86485d..2dc564e59430 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c | |||
@@ -2730,8 +2730,8 @@ lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba) | |||
2730 | INIT_LIST_HEAD(&dmabuf->list); | 2730 | INIT_LIST_HEAD(&dmabuf->list); |
2731 | 2731 | ||
2732 | /* now, allocate dma buffer */ | 2732 | /* now, allocate dma buffer */ |
2733 | dmabuf->virt = dma_zalloc_coherent(&pcidev->dev, BSG_MBOX_SIZE, | 2733 | dmabuf->virt = dma_alloc_coherent(&pcidev->dev, BSG_MBOX_SIZE, |
2734 | &(dmabuf->phys), GFP_KERNEL); | 2734 | &(dmabuf->phys), GFP_KERNEL); |
2735 | 2735 | ||
2736 | if (!dmabuf->virt) { | 2736 | if (!dmabuf->virt) { |
2737 | kfree(dmabuf); | 2737 | kfree(dmabuf); |
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index c1c36812c3d2..bede11e16349 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c | |||
@@ -6973,9 +6973,9 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) | |||
6973 | if (!dmabuf) | 6973 | if (!dmabuf) |
6974 | return NULL; | 6974 | return NULL; |
6975 | 6975 | ||
6976 | dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, | 6976 | dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, |
6977 | LPFC_HDR_TEMPLATE_SIZE, | 6977 | LPFC_HDR_TEMPLATE_SIZE, |
6978 | &dmabuf->phys, GFP_KERNEL); | 6978 | &dmabuf->phys, GFP_KERNEL); |
6979 | if (!dmabuf->virt) { | 6979 | if (!dmabuf->virt) { |
6980 | rpi_hdr = NULL; | 6980 | rpi_hdr = NULL; |
6981 | goto err_free_dmabuf; | 6981 | goto err_free_dmabuf; |
@@ -7397,8 +7397,8 @@ lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) | |||
7397 | } | 7397 | } |
7398 | 7398 | ||
7399 | /* Allocate memory for SLI-2 structures */ | 7399 | /* Allocate memory for SLI-2 structures */ |
7400 | phba->slim2p.virt = dma_zalloc_coherent(&pdev->dev, SLI2_SLIM_SIZE, | 7400 | phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE, |
7401 | &phba->slim2p.phys, GFP_KERNEL); | 7401 | &phba->slim2p.phys, GFP_KERNEL); |
7402 | if (!phba->slim2p.virt) | 7402 | if (!phba->slim2p.virt) |
7403 | goto out_iounmap; | 7403 | goto out_iounmap; |
7404 | 7404 | ||
@@ -7816,8 +7816,8 @@ lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) | |||
7816 | * plus an alignment restriction of 16 bytes. | 7816 | * plus an alignment restriction of 16 bytes. |
7817 | */ | 7817 | */ |
7818 | bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); | 7818 | bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); |
7819 | dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, bmbx_size, | 7819 | dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size, |
7820 | &dmabuf->phys, GFP_KERNEL); | 7820 | &dmabuf->phys, GFP_KERNEL); |
7821 | if (!dmabuf->virt) { | 7821 | if (!dmabuf->virt) { |
7822 | kfree(dmabuf); | 7822 | kfree(dmabuf); |
7823 | return -ENOMEM; | 7823 | return -ENOMEM; |
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index f6a5083a621e..4d3b94317515 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c | |||
@@ -1827,9 +1827,9 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox, | |||
1827 | * page, this is used as a priori size of SLI4_PAGE_SIZE for | 1827 | * page, this is used as a priori size of SLI4_PAGE_SIZE for |
1828 | * the later DMA memory free. | 1828 | * the later DMA memory free. |
1829 | */ | 1829 | */ |
1830 | viraddr = dma_zalloc_coherent(&phba->pcidev->dev, | 1830 | viraddr = dma_alloc_coherent(&phba->pcidev->dev, |
1831 | SLI4_PAGE_SIZE, &phyaddr, | 1831 | SLI4_PAGE_SIZE, &phyaddr, |
1832 | GFP_KERNEL); | 1832 | GFP_KERNEL); |
1833 | /* In case of malloc fails, proceed with whatever we have */ | 1833 | /* In case of malloc fails, proceed with whatever we have */ |
1834 | if (!viraddr) | 1834 | if (!viraddr) |
1835 | break; | 1835 | break; |
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 30734caf77e1..12fd74761ae0 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c | |||
@@ -5362,8 +5362,8 @@ lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, | |||
5362 | * mailbox command. | 5362 | * mailbox command. |
5363 | */ | 5363 | */ |
5364 | dma_size = *vpd_size; | 5364 | dma_size = *vpd_size; |
5365 | dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, dma_size, | 5365 | dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size, |
5366 | &dmabuf->phys, GFP_KERNEL); | 5366 | &dmabuf->phys, GFP_KERNEL); |
5367 | if (!dmabuf->virt) { | 5367 | if (!dmabuf->virt) { |
5368 | kfree(dmabuf); | 5368 | kfree(dmabuf); |
5369 | return -ENOMEM; | 5369 | return -ENOMEM; |
@@ -6300,10 +6300,9 @@ lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba, | |||
6300 | goto free_mem; | 6300 | goto free_mem; |
6301 | } | 6301 | } |
6302 | 6302 | ||
6303 | dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, | 6303 | dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, |
6304 | LPFC_RAS_MAX_ENTRY_SIZE, | 6304 | LPFC_RAS_MAX_ENTRY_SIZE, |
6305 | &dmabuf->phys, | 6305 | &dmabuf->phys, GFP_KERNEL); |
6306 | GFP_KERNEL); | ||
6307 | if (!dmabuf->virt) { | 6306 | if (!dmabuf->virt) { |
6308 | kfree(dmabuf); | 6307 | kfree(dmabuf); |
6309 | rc = -ENOMEM; | 6308 | rc = -ENOMEM; |
@@ -14613,9 +14612,9 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size, | |||
14613 | dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); | 14612 | dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); |
14614 | if (!dmabuf) | 14613 | if (!dmabuf) |
14615 | goto out_fail; | 14614 | goto out_fail; |
14616 | dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, | 14615 | dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, |
14617 | hw_page_size, &dmabuf->phys, | 14616 | hw_page_size, &dmabuf->phys, |
14618 | GFP_KERNEL); | 14617 | GFP_KERNEL); |
14619 | if (!dmabuf->virt) { | 14618 | if (!dmabuf->virt) { |
14620 | kfree(dmabuf); | 14619 | kfree(dmabuf); |
14621 | goto out_fail; | 14620 | goto out_fail; |
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c index e836392b75e8..f112458023ff 100644 --- a/drivers/scsi/megaraid/megaraid_mbox.c +++ b/drivers/scsi/megaraid/megaraid_mbox.c | |||
@@ -967,9 +967,10 @@ megaraid_alloc_cmd_packets(adapter_t *adapter) | |||
967 | * Allocate the common 16-byte aligned memory for the handshake | 967 | * Allocate the common 16-byte aligned memory for the handshake |
968 | * mailbox. | 968 | * mailbox. |
969 | */ | 969 | */ |
970 | raid_dev->una_mbox64 = dma_zalloc_coherent(&adapter->pdev->dev, | 970 | raid_dev->una_mbox64 = dma_alloc_coherent(&adapter->pdev->dev, |
971 | sizeof(mbox64_t), &raid_dev->una_mbox64_dma, | 971 | sizeof(mbox64_t), |
972 | GFP_KERNEL); | 972 | &raid_dev->una_mbox64_dma, |
973 | GFP_KERNEL); | ||
973 | 974 | ||
974 | if (!raid_dev->una_mbox64) { | 975 | if (!raid_dev->una_mbox64) { |
975 | con_log(CL_ANN, (KERN_WARNING | 976 | con_log(CL_ANN, (KERN_WARNING |
@@ -995,8 +996,8 @@ megaraid_alloc_cmd_packets(adapter_t *adapter) | |||
995 | align; | 996 | align; |
996 | 997 | ||
997 | // Allocate memory for commands issued internally | 998 | // Allocate memory for commands issued internally |
998 | adapter->ibuf = dma_zalloc_coherent(&pdev->dev, MBOX_IBUF_SIZE, | 999 | adapter->ibuf = dma_alloc_coherent(&pdev->dev, MBOX_IBUF_SIZE, |
999 | &adapter->ibuf_dma_h, GFP_KERNEL); | 1000 | &adapter->ibuf_dma_h, GFP_KERNEL); |
1000 | if (!adapter->ibuf) { | 1001 | if (!adapter->ibuf) { |
1001 | 1002 | ||
1002 | con_log(CL_ANN, (KERN_WARNING | 1003 | con_log(CL_ANN, (KERN_WARNING |
@@ -2897,8 +2898,8 @@ megaraid_mbox_product_info(adapter_t *adapter) | |||
2897 | * Issue an ENQUIRY3 command to find out certain adapter parameters, | 2898 | * Issue an ENQUIRY3 command to find out certain adapter parameters, |
2898 | * e.g., max channels, max commands etc. | 2899 | * e.g., max channels, max commands etc. |
2899 | */ | 2900 | */ |
2900 | pinfo = dma_zalloc_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t), | 2901 | pinfo = dma_alloc_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t), |
2901 | &pinfo_dma_h, GFP_KERNEL); | 2902 | &pinfo_dma_h, GFP_KERNEL); |
2902 | if (pinfo == NULL) { | 2903 | if (pinfo == NULL) { |
2903 | con_log(CL_ANN, (KERN_WARNING | 2904 | con_log(CL_ANN, (KERN_WARNING |
2904 | "megaraid: out of memory, %s %d\n", __func__, | 2905 | "megaraid: out of memory, %s %d\n", __func__, |
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index f7bdd783360a..7eaa400f6328 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c | |||
@@ -2273,9 +2273,9 @@ static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance, | |||
2273 | sizeof(struct MR_LD_VF_AFFILIATION_111)); | 2273 | sizeof(struct MR_LD_VF_AFFILIATION_111)); |
2274 | else { | 2274 | else { |
2275 | new_affiliation_111 = | 2275 | new_affiliation_111 = |
2276 | dma_zalloc_coherent(&instance->pdev->dev, | 2276 | dma_alloc_coherent(&instance->pdev->dev, |
2277 | sizeof(struct MR_LD_VF_AFFILIATION_111), | 2277 | sizeof(struct MR_LD_VF_AFFILIATION_111), |
2278 | &new_affiliation_111_h, GFP_KERNEL); | 2278 | &new_affiliation_111_h, GFP_KERNEL); |
2279 | if (!new_affiliation_111) { | 2279 | if (!new_affiliation_111) { |
2280 | dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate " | 2280 | dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate " |
2281 | "memory for new affiliation for scsi%d\n", | 2281 | "memory for new affiliation for scsi%d\n", |
@@ -2380,10 +2380,9 @@ static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance, | |||
2380 | sizeof(struct MR_LD_VF_AFFILIATION)); | 2380 | sizeof(struct MR_LD_VF_AFFILIATION)); |
2381 | else { | 2381 | else { |
2382 | new_affiliation = | 2382 | new_affiliation = |
2383 | dma_zalloc_coherent(&instance->pdev->dev, | 2383 | dma_alloc_coherent(&instance->pdev->dev, |
2384 | (MAX_LOGICAL_DRIVES + 1) * | 2384 | (MAX_LOGICAL_DRIVES + 1) * sizeof(struct MR_LD_VF_AFFILIATION), |
2385 | sizeof(struct MR_LD_VF_AFFILIATION), | 2385 | &new_affiliation_h, GFP_KERNEL); |
2386 | &new_affiliation_h, GFP_KERNEL); | ||
2387 | if (!new_affiliation) { | 2386 | if (!new_affiliation) { |
2388 | dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate " | 2387 | dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate " |
2389 | "memory for new affiliation for scsi%d\n", | 2388 | "memory for new affiliation for scsi%d\n", |
@@ -2546,9 +2545,10 @@ int megasas_sriov_start_heartbeat(struct megasas_instance *instance, | |||
2546 | 2545 | ||
2547 | if (initial) { | 2546 | if (initial) { |
2548 | instance->hb_host_mem = | 2547 | instance->hb_host_mem = |
2549 | dma_zalloc_coherent(&instance->pdev->dev, | 2548 | dma_alloc_coherent(&instance->pdev->dev, |
2550 | sizeof(struct MR_CTRL_HB_HOST_MEM), | 2549 | sizeof(struct MR_CTRL_HB_HOST_MEM), |
2551 | &instance->hb_host_mem_h, GFP_KERNEL); | 2550 | &instance->hb_host_mem_h, |
2551 | GFP_KERNEL); | ||
2552 | if (!instance->hb_host_mem) { | 2552 | if (!instance->hb_host_mem) { |
2553 | dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate" | 2553 | dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate" |
2554 | " memory for heartbeat host memory for scsi%d\n", | 2554 | " memory for heartbeat host memory for scsi%d\n", |
@@ -5816,9 +5816,9 @@ megasas_get_seq_num(struct megasas_instance *instance, | |||
5816 | } | 5816 | } |
5817 | 5817 | ||
5818 | dcmd = &cmd->frame->dcmd; | 5818 | dcmd = &cmd->frame->dcmd; |
5819 | el_info = dma_zalloc_coherent(&instance->pdev->dev, | 5819 | el_info = dma_alloc_coherent(&instance->pdev->dev, |
5820 | sizeof(struct megasas_evt_log_info), &el_info_h, | 5820 | sizeof(struct megasas_evt_log_info), |
5821 | GFP_KERNEL); | 5821 | &el_info_h, GFP_KERNEL); |
5822 | if (!el_info) { | 5822 | if (!el_info) { |
5823 | megasas_return_cmd(instance, cmd); | 5823 | megasas_return_cmd(instance, cmd); |
5824 | return -ENOMEM; | 5824 | return -ENOMEM; |
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 211c17c33aa0..a9a25f0eaf6f 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c | |||
@@ -689,8 +689,9 @@ megasas_alloc_rdpq_fusion(struct megasas_instance *instance) | |||
689 | array_size = sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * | 689 | array_size = sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * |
690 | MAX_MSIX_QUEUES_FUSION; | 690 | MAX_MSIX_QUEUES_FUSION; |
691 | 691 | ||
692 | fusion->rdpq_virt = dma_zalloc_coherent(&instance->pdev->dev, | 692 | fusion->rdpq_virt = dma_alloc_coherent(&instance->pdev->dev, |
693 | array_size, &fusion->rdpq_phys, GFP_KERNEL); | 693 | array_size, &fusion->rdpq_phys, |
694 | GFP_KERNEL); | ||
694 | if (!fusion->rdpq_virt) { | 695 | if (!fusion->rdpq_virt) { |
695 | dev_err(&instance->pdev->dev, | 696 | dev_err(&instance->pdev->dev, |
696 | "Failed from %s %d\n", __func__, __LINE__); | 697 | "Failed from %s %d\n", __func__, __LINE__); |
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c index f3e182eb0970..c9dc7740e9e7 100644 --- a/drivers/scsi/mesh.c +++ b/drivers/scsi/mesh.c | |||
@@ -1915,8 +1915,9 @@ static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match) | |||
1915 | /* We use the PCI APIs for now until the generic one gets fixed | 1915 | /* We use the PCI APIs for now until the generic one gets fixed |
1916 | * enough or until we get some macio-specific versions | 1916 | * enough or until we get some macio-specific versions |
1917 | */ | 1917 | */ |
1918 | dma_cmd_space = dma_zalloc_coherent(&macio_get_pci_dev(mdev)->dev, | 1918 | dma_cmd_space = dma_alloc_coherent(&macio_get_pci_dev(mdev)->dev, |
1919 | ms->dma_cmd_size, &dma_cmd_bus, GFP_KERNEL); | 1919 | ms->dma_cmd_size, &dma_cmd_bus, |
1920 | GFP_KERNEL); | ||
1920 | if (dma_cmd_space == NULL) { | 1921 | if (dma_cmd_space == NULL) { |
1921 | printk(KERN_ERR "mesh: can't allocate DMA table\n"); | 1922 | printk(KERN_ERR "mesh: can't allocate DMA table\n"); |
1922 | goto out_unmap; | 1923 | goto out_unmap; |
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c index dbe753fba486..36f64205ecfa 100644 --- a/drivers/scsi/mvumi.c +++ b/drivers/scsi/mvumi.c | |||
@@ -143,8 +143,9 @@ static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba, | |||
143 | 143 | ||
144 | case RESOURCE_UNCACHED_MEMORY: | 144 | case RESOURCE_UNCACHED_MEMORY: |
145 | size = round_up(size, 8); | 145 | size = round_up(size, 8); |
146 | res->virt_addr = dma_zalloc_coherent(&mhba->pdev->dev, size, | 146 | res->virt_addr = dma_alloc_coherent(&mhba->pdev->dev, size, |
147 | &res->bus_addr, GFP_KERNEL); | 147 | &res->bus_addr, |
148 | GFP_KERNEL); | ||
148 | if (!res->virt_addr) { | 149 | if (!res->virt_addr) { |
149 | dev_err(&mhba->pdev->dev, | 150 | dev_err(&mhba->pdev->dev, |
150 | "unable to allocate consistent mem," | 151 | "unable to allocate consistent mem," |
@@ -246,8 +247,8 @@ static int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd, | |||
246 | if (size == 0) | 247 | if (size == 0) |
247 | return 0; | 248 | return 0; |
248 | 249 | ||
249 | virt_addr = dma_zalloc_coherent(&mhba->pdev->dev, size, &phy_addr, | 250 | virt_addr = dma_alloc_coherent(&mhba->pdev->dev, size, &phy_addr, |
250 | GFP_KERNEL); | 251 | GFP_KERNEL); |
251 | if (!virt_addr) | 252 | if (!virt_addr) |
252 | return -1; | 253 | return -1; |
253 | 254 | ||
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c index b3be49d41375..4c5a3d23e010 100644 --- a/drivers/scsi/pm8001/pm8001_sas.c +++ b/drivers/scsi/pm8001/pm8001_sas.c | |||
@@ -116,8 +116,8 @@ int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr, | |||
116 | u64 align_offset = 0; | 116 | u64 align_offset = 0; |
117 | if (align) | 117 | if (align) |
118 | align_offset = (dma_addr_t)align - 1; | 118 | align_offset = (dma_addr_t)align - 1; |
119 | mem_virt_alloc = dma_zalloc_coherent(&pdev->dev, mem_size + align, | 119 | mem_virt_alloc = dma_alloc_coherent(&pdev->dev, mem_size + align, |
120 | &mem_dma_handle, GFP_KERNEL); | 120 | &mem_dma_handle, GFP_KERNEL); |
121 | if (!mem_virt_alloc) { | 121 | if (!mem_virt_alloc) { |
122 | pm8001_printk("memory allocation error\n"); | 122 | pm8001_printk("memory allocation error\n"); |
123 | return -1; | 123 | return -1; |
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index edcaf4b0cb0b..9bbc19fc190b 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c | |||
@@ -1050,16 +1050,17 @@ static int qedf_alloc_sq(struct qedf_ctx *qedf, struct qedf_rport *fcport) | |||
1050 | sizeof(void *); | 1050 | sizeof(void *); |
1051 | fcport->sq_pbl_size = fcport->sq_pbl_size + QEDF_PAGE_SIZE; | 1051 | fcport->sq_pbl_size = fcport->sq_pbl_size + QEDF_PAGE_SIZE; |
1052 | 1052 | ||
1053 | fcport->sq = dma_zalloc_coherent(&qedf->pdev->dev, | 1053 | fcport->sq = dma_alloc_coherent(&qedf->pdev->dev, fcport->sq_mem_size, |
1054 | fcport->sq_mem_size, &fcport->sq_dma, GFP_KERNEL); | 1054 | &fcport->sq_dma, GFP_KERNEL); |
1055 | if (!fcport->sq) { | 1055 | if (!fcport->sq) { |
1056 | QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue.\n"); | 1056 | QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue.\n"); |
1057 | rval = 1; | 1057 | rval = 1; |
1058 | goto out; | 1058 | goto out; |
1059 | } | 1059 | } |
1060 | 1060 | ||
1061 | fcport->sq_pbl = dma_zalloc_coherent(&qedf->pdev->dev, | 1061 | fcport->sq_pbl = dma_alloc_coherent(&qedf->pdev->dev, |
1062 | fcport->sq_pbl_size, &fcport->sq_pbl_dma, GFP_KERNEL); | 1062 | fcport->sq_pbl_size, |
1063 | &fcport->sq_pbl_dma, GFP_KERNEL); | ||
1063 | if (!fcport->sq_pbl) { | 1064 | if (!fcport->sq_pbl) { |
1064 | QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue PBL.\n"); | 1065 | QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue PBL.\n"); |
1065 | rval = 1; | 1066 | rval = 1; |
@@ -2680,8 +2681,10 @@ static int qedf_alloc_bdq(struct qedf_ctx *qedf) | |||
2680 | } | 2681 | } |
2681 | 2682 | ||
2682 | /* Allocate list of PBL pages */ | 2683 | /* Allocate list of PBL pages */ |
2683 | qedf->bdq_pbl_list = dma_zalloc_coherent(&qedf->pdev->dev, | 2684 | qedf->bdq_pbl_list = dma_alloc_coherent(&qedf->pdev->dev, |
2684 | QEDF_PAGE_SIZE, &qedf->bdq_pbl_list_dma, GFP_KERNEL); | 2685 | QEDF_PAGE_SIZE, |
2686 | &qedf->bdq_pbl_list_dma, | ||
2687 | GFP_KERNEL); | ||
2685 | if (!qedf->bdq_pbl_list) { | 2688 | if (!qedf->bdq_pbl_list) { |
2686 | QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate list of PBL pages.\n"); | 2689 | QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate list of PBL pages.\n"); |
2687 | return -ENOMEM; | 2690 | return -ENOMEM; |
@@ -2770,9 +2773,10 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf) | |||
2770 | ALIGN(qedf->global_queues[i]->cq_pbl_size, QEDF_PAGE_SIZE); | 2773 | ALIGN(qedf->global_queues[i]->cq_pbl_size, QEDF_PAGE_SIZE); |
2771 | 2774 | ||
2772 | qedf->global_queues[i]->cq = | 2775 | qedf->global_queues[i]->cq = |
2773 | dma_zalloc_coherent(&qedf->pdev->dev, | 2776 | dma_alloc_coherent(&qedf->pdev->dev, |
2774 | qedf->global_queues[i]->cq_mem_size, | 2777 | qedf->global_queues[i]->cq_mem_size, |
2775 | &qedf->global_queues[i]->cq_dma, GFP_KERNEL); | 2778 | &qedf->global_queues[i]->cq_dma, |
2779 | GFP_KERNEL); | ||
2776 | 2780 | ||
2777 | if (!qedf->global_queues[i]->cq) { | 2781 | if (!qedf->global_queues[i]->cq) { |
2778 | QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq.\n"); | 2782 | QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq.\n"); |
@@ -2781,9 +2785,10 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf) | |||
2781 | } | 2785 | } |
2782 | 2786 | ||
2783 | qedf->global_queues[i]->cq_pbl = | 2787 | qedf->global_queues[i]->cq_pbl = |
2784 | dma_zalloc_coherent(&qedf->pdev->dev, | 2788 | dma_alloc_coherent(&qedf->pdev->dev, |
2785 | qedf->global_queues[i]->cq_pbl_size, | 2789 | qedf->global_queues[i]->cq_pbl_size, |
2786 | &qedf->global_queues[i]->cq_pbl_dma, GFP_KERNEL); | 2790 | &qedf->global_queues[i]->cq_pbl_dma, |
2791 | GFP_KERNEL); | ||
2787 | 2792 | ||
2788 | if (!qedf->global_queues[i]->cq_pbl) { | 2793 | if (!qedf->global_queues[i]->cq_pbl) { |
2789 | QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq PBL.\n"); | 2794 | QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq PBL.\n"); |
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c index 5c53409a8cea..e74a62448ba4 100644 --- a/drivers/scsi/qedi/qedi_main.c +++ b/drivers/scsi/qedi/qedi_main.c | |||
@@ -1394,10 +1394,9 @@ static int qedi_alloc_nvm_iscsi_cfg(struct qedi_ctx *qedi) | |||
1394 | { | 1394 | { |
1395 | struct qedi_nvm_iscsi_image nvm_image; | 1395 | struct qedi_nvm_iscsi_image nvm_image; |
1396 | 1396 | ||
1397 | qedi->iscsi_image = dma_zalloc_coherent(&qedi->pdev->dev, | 1397 | qedi->iscsi_image = dma_alloc_coherent(&qedi->pdev->dev, |
1398 | sizeof(nvm_image), | 1398 | sizeof(nvm_image), |
1399 | &qedi->nvm_buf_dma, | 1399 | &qedi->nvm_buf_dma, GFP_KERNEL); |
1400 | GFP_KERNEL); | ||
1401 | if (!qedi->iscsi_image) { | 1400 | if (!qedi->iscsi_image) { |
1402 | QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n"); | 1401 | QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n"); |
1403 | return -ENOMEM; | 1402 | return -ENOMEM; |
@@ -1510,10 +1509,10 @@ static int qedi_alloc_bdq(struct qedi_ctx *qedi) | |||
1510 | } | 1509 | } |
1511 | 1510 | ||
1512 | /* Allocate list of PBL pages */ | 1511 | /* Allocate list of PBL pages */ |
1513 | qedi->bdq_pbl_list = dma_zalloc_coherent(&qedi->pdev->dev, | 1512 | qedi->bdq_pbl_list = dma_alloc_coherent(&qedi->pdev->dev, |
1514 | QEDI_PAGE_SIZE, | 1513 | QEDI_PAGE_SIZE, |
1515 | &qedi->bdq_pbl_list_dma, | 1514 | &qedi->bdq_pbl_list_dma, |
1516 | GFP_KERNEL); | 1515 | GFP_KERNEL); |
1517 | if (!qedi->bdq_pbl_list) { | 1516 | if (!qedi->bdq_pbl_list) { |
1518 | QEDI_ERR(&qedi->dbg_ctx, | 1517 | QEDI_ERR(&qedi->dbg_ctx, |
1519 | "Could not allocate list of PBL pages.\n"); | 1518 | "Could not allocate list of PBL pages.\n"); |
@@ -1609,10 +1608,10 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi) | |||
1609 | (qedi->global_queues[i]->cq_pbl_size + | 1608 | (qedi->global_queues[i]->cq_pbl_size + |
1610 | (QEDI_PAGE_SIZE - 1)); | 1609 | (QEDI_PAGE_SIZE - 1)); |
1611 | 1610 | ||
1612 | qedi->global_queues[i]->cq = dma_zalloc_coherent(&qedi->pdev->dev, | 1611 | qedi->global_queues[i]->cq = dma_alloc_coherent(&qedi->pdev->dev, |
1613 | qedi->global_queues[i]->cq_mem_size, | 1612 | qedi->global_queues[i]->cq_mem_size, |
1614 | &qedi->global_queues[i]->cq_dma, | 1613 | &qedi->global_queues[i]->cq_dma, |
1615 | GFP_KERNEL); | 1614 | GFP_KERNEL); |
1616 | 1615 | ||
1617 | if (!qedi->global_queues[i]->cq) { | 1616 | if (!qedi->global_queues[i]->cq) { |
1618 | QEDI_WARN(&qedi->dbg_ctx, | 1617 | QEDI_WARN(&qedi->dbg_ctx, |
@@ -1620,10 +1619,10 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi) | |||
1620 | status = -ENOMEM; | 1619 | status = -ENOMEM; |
1621 | goto mem_alloc_failure; | 1620 | goto mem_alloc_failure; |
1622 | } | 1621 | } |
1623 | qedi->global_queues[i]->cq_pbl = dma_zalloc_coherent(&qedi->pdev->dev, | 1622 | qedi->global_queues[i]->cq_pbl = dma_alloc_coherent(&qedi->pdev->dev, |
1624 | qedi->global_queues[i]->cq_pbl_size, | 1623 | qedi->global_queues[i]->cq_pbl_size, |
1625 | &qedi->global_queues[i]->cq_pbl_dma, | 1624 | &qedi->global_queues[i]->cq_pbl_dma, |
1626 | GFP_KERNEL); | 1625 | GFP_KERNEL); |
1627 | 1626 | ||
1628 | if (!qedi->global_queues[i]->cq_pbl) { | 1627 | if (!qedi->global_queues[i]->cq_pbl) { |
1629 | QEDI_WARN(&qedi->dbg_ctx, | 1628 | QEDI_WARN(&qedi->dbg_ctx, |
@@ -1691,16 +1690,16 @@ int qedi_alloc_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep) | |||
1691 | ep->sq_pbl_size = (ep->sq_mem_size / QEDI_PAGE_SIZE) * sizeof(void *); | 1690 | ep->sq_pbl_size = (ep->sq_mem_size / QEDI_PAGE_SIZE) * sizeof(void *); |
1692 | ep->sq_pbl_size = ep->sq_pbl_size + QEDI_PAGE_SIZE; | 1691 | ep->sq_pbl_size = ep->sq_pbl_size + QEDI_PAGE_SIZE; |
1693 | 1692 | ||
1694 | ep->sq = dma_zalloc_coherent(&qedi->pdev->dev, ep->sq_mem_size, | 1693 | ep->sq = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_mem_size, |
1695 | &ep->sq_dma, GFP_KERNEL); | 1694 | &ep->sq_dma, GFP_KERNEL); |
1696 | if (!ep->sq) { | 1695 | if (!ep->sq) { |
1697 | QEDI_WARN(&qedi->dbg_ctx, | 1696 | QEDI_WARN(&qedi->dbg_ctx, |
1698 | "Could not allocate send queue.\n"); | 1697 | "Could not allocate send queue.\n"); |
1699 | rval = -ENOMEM; | 1698 | rval = -ENOMEM; |
1700 | goto out; | 1699 | goto out; |
1701 | } | 1700 | } |
1702 | ep->sq_pbl = dma_zalloc_coherent(&qedi->pdev->dev, ep->sq_pbl_size, | 1701 | ep->sq_pbl = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_pbl_size, |
1703 | &ep->sq_pbl_dma, GFP_KERNEL); | 1702 | &ep->sq_pbl_dma, GFP_KERNEL); |
1704 | if (!ep->sq_pbl) { | 1703 | if (!ep->sq_pbl) { |
1705 | QEDI_WARN(&qedi->dbg_ctx, | 1704 | QEDI_WARN(&qedi->dbg_ctx, |
1706 | "Could not allocate send queue PBL.\n"); | 1705 | "Could not allocate send queue PBL.\n"); |
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 00444dc79756..ac504a1ff0ff 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c | |||
@@ -2415,8 +2415,8 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost) | |||
2415 | if (qla2x00_chip_is_down(vha)) | 2415 | if (qla2x00_chip_is_down(vha)) |
2416 | goto done; | 2416 | goto done; |
2417 | 2417 | ||
2418 | stats = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*stats), | 2418 | stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma, |
2419 | &stats_dma, GFP_KERNEL); | 2419 | GFP_KERNEL); |
2420 | if (!stats) { | 2420 | if (!stats) { |
2421 | ql_log(ql_log_warn, vha, 0x707d, | 2421 | ql_log(ql_log_warn, vha, 0x707d, |
2422 | "Failed to allocate memory for stats.\n"); | 2422 | "Failed to allocate memory for stats.\n"); |
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c index 4a9fd8d944d6..17d42658ad9a 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.c +++ b/drivers/scsi/qla2xxx/qla_bsg.c | |||
@@ -2312,8 +2312,8 @@ qla2x00_get_priv_stats(struct bsg_job *bsg_job) | |||
2312 | if (!IS_FWI2_CAPABLE(ha)) | 2312 | if (!IS_FWI2_CAPABLE(ha)) |
2313 | return -EPERM; | 2313 | return -EPERM; |
2314 | 2314 | ||
2315 | stats = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*stats), | 2315 | stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma, |
2316 | &stats_dma, GFP_KERNEL); | 2316 | GFP_KERNEL); |
2317 | if (!stats) { | 2317 | if (!stats) { |
2318 | ql_log(ql_log_warn, vha, 0x70e2, | 2318 | ql_log(ql_log_warn, vha, 0x70e2, |
2319 | "Failed to allocate memory for stats.\n"); | 2319 | "Failed to allocate memory for stats.\n"); |
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index 90cfa394f942..cbc3bc49d4d1 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c | |||
@@ -4147,9 +4147,10 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp) | |||
4147 | return rval; | 4147 | return rval; |
4148 | } | 4148 | } |
4149 | 4149 | ||
4150 | sp->u.iocb_cmd.u.ctarg.req = dma_zalloc_coherent( | 4150 | sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, |
4151 | &vha->hw->pdev->dev, sizeof(struct ct_sns_pkt), | 4151 | sizeof(struct ct_sns_pkt), |
4152 | &sp->u.iocb_cmd.u.ctarg.req_dma, GFP_KERNEL); | 4152 | &sp->u.iocb_cmd.u.ctarg.req_dma, |
4153 | GFP_KERNEL); | ||
4153 | sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); | 4154 | sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); |
4154 | if (!sp->u.iocb_cmd.u.ctarg.req) { | 4155 | if (!sp->u.iocb_cmd.u.ctarg.req) { |
4155 | ql_log(ql_log_warn, vha, 0xffff, | 4156 | ql_log(ql_log_warn, vha, 0xffff, |
@@ -4165,9 +4166,10 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp) | |||
4165 | ((vha->hw->max_fibre_devices - 1) * | 4166 | ((vha->hw->max_fibre_devices - 1) * |
4166 | sizeof(struct ct_sns_gpn_ft_data)); | 4167 | sizeof(struct ct_sns_gpn_ft_data)); |
4167 | 4168 | ||
4168 | sp->u.iocb_cmd.u.ctarg.rsp = dma_zalloc_coherent( | 4169 | sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, |
4169 | &vha->hw->pdev->dev, rspsz, | 4170 | rspsz, |
4170 | &sp->u.iocb_cmd.u.ctarg.rsp_dma, GFP_KERNEL); | 4171 | &sp->u.iocb_cmd.u.ctarg.rsp_dma, |
4172 | GFP_KERNEL); | ||
4171 | sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt); | 4173 | sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt); |
4172 | if (!sp->u.iocb_cmd.u.ctarg.rsp) { | 4174 | if (!sp->u.iocb_cmd.u.ctarg.rsp) { |
4173 | ql_log(ql_log_warn, vha, 0xffff, | 4175 | ql_log(ql_log_warn, vha, 0xffff, |
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 364bb52ed2a6..aeeb0144bd55 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c | |||
@@ -3099,8 +3099,8 @@ qla2x00_alloc_offload_mem(scsi_qla_host_t *vha) | |||
3099 | FCE_SIZE, ha->fce, ha->fce_dma); | 3099 | FCE_SIZE, ha->fce, ha->fce_dma); |
3100 | 3100 | ||
3101 | /* Allocate memory for Fibre Channel Event Buffer. */ | 3101 | /* Allocate memory for Fibre Channel Event Buffer. */ |
3102 | tc = dma_zalloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma, | 3102 | tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma, |
3103 | GFP_KERNEL); | 3103 | GFP_KERNEL); |
3104 | if (!tc) { | 3104 | if (!tc) { |
3105 | ql_log(ql_log_warn, vha, 0x00be, | 3105 | ql_log(ql_log_warn, vha, 0x00be, |
3106 | "Unable to allocate (%d KB) for FCE.\n", | 3106 | "Unable to allocate (%d KB) for FCE.\n", |
@@ -3131,8 +3131,8 @@ try_eft: | |||
3131 | EFT_SIZE, ha->eft, ha->eft_dma); | 3131 | EFT_SIZE, ha->eft, ha->eft_dma); |
3132 | 3132 | ||
3133 | /* Allocate memory for Extended Trace Buffer. */ | 3133 | /* Allocate memory for Extended Trace Buffer. */ |
3134 | tc = dma_zalloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma, | 3134 | tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma, |
3135 | GFP_KERNEL); | 3135 | GFP_KERNEL); |
3136 | if (!tc) { | 3136 | if (!tc) { |
3137 | ql_log(ql_log_warn, vha, 0x00c1, | 3137 | ql_log(ql_log_warn, vha, 0x00c1, |
3138 | "Unable to allocate (%d KB) for EFT.\n", | 3138 | "Unable to allocate (%d KB) for EFT.\n", |
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c index 1ef74aa2d00a..2bf5e3e639e1 100644 --- a/drivers/scsi/qla4xxx/ql4_init.c +++ b/drivers/scsi/qla4xxx/ql4_init.c | |||
@@ -153,8 +153,8 @@ int qla4xxx_get_sys_info(struct scsi_qla_host *ha) | |||
153 | dma_addr_t sys_info_dma; | 153 | dma_addr_t sys_info_dma; |
154 | int status = QLA_ERROR; | 154 | int status = QLA_ERROR; |
155 | 155 | ||
156 | sys_info = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*sys_info), | 156 | sys_info = dma_alloc_coherent(&ha->pdev->dev, sizeof(*sys_info), |
157 | &sys_info_dma, GFP_KERNEL); | 157 | &sys_info_dma, GFP_KERNEL); |
158 | if (sys_info == NULL) { | 158 | if (sys_info == NULL) { |
159 | DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n", | 159 | DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n", |
160 | ha->host_no, __func__)); | 160 | ha->host_no, __func__)); |
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c index 5d56904687b9..dac9a7013208 100644 --- a/drivers/scsi/qla4xxx/ql4_mbx.c +++ b/drivers/scsi/qla4xxx/ql4_mbx.c | |||
@@ -625,9 +625,9 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha) | |||
625 | uint32_t mbox_sts[MBOX_REG_COUNT]; | 625 | uint32_t mbox_sts[MBOX_REG_COUNT]; |
626 | int status = QLA_ERROR; | 626 | int status = QLA_ERROR; |
627 | 627 | ||
628 | init_fw_cb = dma_zalloc_coherent(&ha->pdev->dev, | 628 | init_fw_cb = dma_alloc_coherent(&ha->pdev->dev, |
629 | sizeof(struct addr_ctrl_blk), | 629 | sizeof(struct addr_ctrl_blk), |
630 | &init_fw_cb_dma, GFP_KERNEL); | 630 | &init_fw_cb_dma, GFP_KERNEL); |
631 | if (init_fw_cb == NULL) { | 631 | if (init_fw_cb == NULL) { |
632 | DEBUG2(printk("scsi%ld: %s: Unable to alloc init_cb\n", | 632 | DEBUG2(printk("scsi%ld: %s: Unable to alloc init_cb\n", |
633 | ha->host_no, __func__)); | 633 | ha->host_no, __func__)); |
@@ -709,9 +709,9 @@ int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha) | |||
709 | uint32_t mbox_cmd[MBOX_REG_COUNT]; | 709 | uint32_t mbox_cmd[MBOX_REG_COUNT]; |
710 | uint32_t mbox_sts[MBOX_REG_COUNT]; | 710 | uint32_t mbox_sts[MBOX_REG_COUNT]; |
711 | 711 | ||
712 | init_fw_cb = dma_zalloc_coherent(&ha->pdev->dev, | 712 | init_fw_cb = dma_alloc_coherent(&ha->pdev->dev, |
713 | sizeof(struct addr_ctrl_blk), | 713 | sizeof(struct addr_ctrl_blk), |
714 | &init_fw_cb_dma, GFP_KERNEL); | 714 | &init_fw_cb_dma, GFP_KERNEL); |
715 | if (init_fw_cb == NULL) { | 715 | if (init_fw_cb == NULL) { |
716 | printk("scsi%ld: %s: Unable to alloc init_cb\n", ha->host_no, | 716 | printk("scsi%ld: %s: Unable to alloc init_cb\n", ha->host_no, |
717 | __func__); | 717 | __func__); |
@@ -1340,9 +1340,9 @@ int qla4xxx_about_firmware(struct scsi_qla_host *ha) | |||
1340 | uint32_t mbox_sts[MBOX_REG_COUNT]; | 1340 | uint32_t mbox_sts[MBOX_REG_COUNT]; |
1341 | int status = QLA_ERROR; | 1341 | int status = QLA_ERROR; |
1342 | 1342 | ||
1343 | about_fw = dma_zalloc_coherent(&ha->pdev->dev, | 1343 | about_fw = dma_alloc_coherent(&ha->pdev->dev, |
1344 | sizeof(struct about_fw_info), | 1344 | sizeof(struct about_fw_info), |
1345 | &about_fw_dma, GFP_KERNEL); | 1345 | &about_fw_dma, GFP_KERNEL); |
1346 | if (!about_fw) { | 1346 | if (!about_fw) { |
1347 | DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Unable to alloc memory " | 1347 | DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Unable to alloc memory " |
1348 | "for about_fw\n", __func__)); | 1348 | "for about_fw\n", __func__)); |
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c index d2b333d629be..5a31877c9d04 100644 --- a/drivers/scsi/qla4xxx/ql4_nx.c +++ b/drivers/scsi/qla4xxx/ql4_nx.c | |||
@@ -4052,8 +4052,8 @@ int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha) | |||
4052 | dma_addr_t sys_info_dma; | 4052 | dma_addr_t sys_info_dma; |
4053 | int status = QLA_ERROR; | 4053 | int status = QLA_ERROR; |
4054 | 4054 | ||
4055 | sys_info = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*sys_info), | 4055 | sys_info = dma_alloc_coherent(&ha->pdev->dev, sizeof(*sys_info), |
4056 | &sys_info_dma, GFP_KERNEL); | 4056 | &sys_info_dma, GFP_KERNEL); |
4057 | if (sys_info == NULL) { | 4057 | if (sys_info == NULL) { |
4058 | DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n", | 4058 | DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n", |
4059 | ha->host_no, __func__)); | 4059 | ha->host_no, __func__)); |
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index 949e186cc5d7..cfdfcda28072 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c | |||
@@ -2704,9 +2704,9 @@ qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len) | |||
2704 | uint32_t rem = len; | 2704 | uint32_t rem = len; |
2705 | struct nlattr *attr; | 2705 | struct nlattr *attr; |
2706 | 2706 | ||
2707 | init_fw_cb = dma_zalloc_coherent(&ha->pdev->dev, | 2707 | init_fw_cb = dma_alloc_coherent(&ha->pdev->dev, |
2708 | sizeof(struct addr_ctrl_blk), | 2708 | sizeof(struct addr_ctrl_blk), |
2709 | &init_fw_cb_dma, GFP_KERNEL); | 2709 | &init_fw_cb_dma, GFP_KERNEL); |
2710 | if (!init_fw_cb) { | 2710 | if (!init_fw_cb) { |
2711 | ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n", | 2711 | ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n", |
2712 | __func__); | 2712 | __func__); |
@@ -4206,8 +4206,8 @@ static int qla4xxx_mem_alloc(struct scsi_qla_host *ha) | |||
4206 | sizeof(struct shadow_regs) + | 4206 | sizeof(struct shadow_regs) + |
4207 | MEM_ALIGN_VALUE + | 4207 | MEM_ALIGN_VALUE + |
4208 | (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); | 4208 | (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); |
4209 | ha->queues = dma_zalloc_coherent(&ha->pdev->dev, ha->queues_len, | 4209 | ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len, |
4210 | &ha->queues_dma, GFP_KERNEL); | 4210 | &ha->queues_dma, GFP_KERNEL); |
4211 | if (ha->queues == NULL) { | 4211 | if (ha->queues == NULL) { |
4212 | ql4_printk(KERN_WARNING, ha, | 4212 | ql4_printk(KERN_WARNING, ha, |
4213 | "Memory Allocation failed - queues.\n"); | 4213 | "Memory Allocation failed - queues.\n"); |
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index e2fa3f476227..7bde6c809442 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c | |||
@@ -3576,9 +3576,9 @@ static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info) | |||
3576 | alloc_length += PQI_EXTRA_SGL_MEMORY; | 3576 | alloc_length += PQI_EXTRA_SGL_MEMORY; |
3577 | 3577 | ||
3578 | ctrl_info->queue_memory_base = | 3578 | ctrl_info->queue_memory_base = |
3579 | dma_zalloc_coherent(&ctrl_info->pci_dev->dev, | 3579 | dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length, |
3580 | alloc_length, | 3580 | &ctrl_info->queue_memory_base_dma_handle, |
3581 | &ctrl_info->queue_memory_base_dma_handle, GFP_KERNEL); | 3581 | GFP_KERNEL); |
3582 | 3582 | ||
3583 | if (!ctrl_info->queue_memory_base) | 3583 | if (!ctrl_info->queue_memory_base) |
3584 | return -ENOMEM; | 3584 | return -ENOMEM; |
@@ -3715,10 +3715,9 @@ static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info) | |||
3715 | PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT; | 3715 | PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT; |
3716 | 3716 | ||
3717 | ctrl_info->admin_queue_memory_base = | 3717 | ctrl_info->admin_queue_memory_base = |
3718 | dma_zalloc_coherent(&ctrl_info->pci_dev->dev, | 3718 | dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length, |
3719 | alloc_length, | 3719 | &ctrl_info->admin_queue_memory_base_dma_handle, |
3720 | &ctrl_info->admin_queue_memory_base_dma_handle, | 3720 | GFP_KERNEL); |
3721 | GFP_KERNEL); | ||
3722 | 3721 | ||
3723 | if (!ctrl_info->admin_queue_memory_base) | 3722 | if (!ctrl_info->admin_queue_memory_base) |
3724 | return -ENOMEM; | 3723 | return -ENOMEM; |
@@ -4602,9 +4601,10 @@ static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info) | |||
4602 | 4601 | ||
4603 | static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info) | 4602 | static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info) |
4604 | { | 4603 | { |
4605 | ctrl_info->error_buffer = dma_zalloc_coherent(&ctrl_info->pci_dev->dev, | 4604 | ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev, |
4606 | ctrl_info->error_buffer_length, | 4605 | ctrl_info->error_buffer_length, |
4607 | &ctrl_info->error_buffer_dma_handle, GFP_KERNEL); | 4606 | &ctrl_info->error_buffer_dma_handle, |
4607 | GFP_KERNEL); | ||
4608 | 4608 | ||
4609 | if (!ctrl_info->error_buffer) | 4609 | if (!ctrl_info->error_buffer) |
4610 | return -ENOMEM; | 4610 | return -ENOMEM; |
@@ -7487,8 +7487,8 @@ static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info, | |||
7487 | dma_addr_t dma_handle; | 7487 | dma_addr_t dma_handle; |
7488 | 7488 | ||
7489 | ctrl_info->pqi_ofa_chunk_virt_addr[i] = | 7489 | ctrl_info->pqi_ofa_chunk_virt_addr[i] = |
7490 | dma_zalloc_coherent(dev, chunk_size, &dma_handle, | 7490 | dma_alloc_coherent(dev, chunk_size, &dma_handle, |
7491 | GFP_KERNEL); | 7491 | GFP_KERNEL); |
7492 | 7492 | ||
7493 | if (!ctrl_info->pqi_ofa_chunk_virt_addr[i]) | 7493 | if (!ctrl_info->pqi_ofa_chunk_virt_addr[i]) |
7494 | break; | 7494 | break; |
@@ -7545,10 +7545,10 @@ static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info, | |||
7545 | struct device *dev; | 7545 | struct device *dev; |
7546 | 7546 | ||
7547 | dev = &ctrl_info->pci_dev->dev; | 7547 | dev = &ctrl_info->pci_dev->dev; |
7548 | pqi_ofa_memory = dma_zalloc_coherent(dev, | 7548 | pqi_ofa_memory = dma_alloc_coherent(dev, |
7549 | PQI_OFA_MEMORY_DESCRIPTOR_LENGTH, | 7549 | PQI_OFA_MEMORY_DESCRIPTOR_LENGTH, |
7550 | &ctrl_info->pqi_ofa_mem_dma_handle, | 7550 | &ctrl_info->pqi_ofa_mem_dma_handle, |
7551 | GFP_KERNEL); | 7551 | GFP_KERNEL); |
7552 | 7552 | ||
7553 | if (!pqi_ofa_memory) | 7553 | if (!pqi_ofa_memory) |
7554 | return; | 7554 | return; |
diff --git a/drivers/soc/fsl/qbman/dpaa_sys.c b/drivers/soc/fsl/qbman/dpaa_sys.c index 9436aa83ff1b..e6d48dccb8d5 100644 --- a/drivers/soc/fsl/qbman/dpaa_sys.c +++ b/drivers/soc/fsl/qbman/dpaa_sys.c | |||
@@ -62,7 +62,7 @@ int qbman_init_private_mem(struct device *dev, int idx, dma_addr_t *addr, | |||
62 | return -ENODEV; | 62 | return -ENODEV; |
63 | } | 63 | } |
64 | 64 | ||
65 | if (!dma_zalloc_coherent(dev, *size, addr, 0)) { | 65 | if (!dma_alloc_coherent(dev, *size, addr, 0)) { |
66 | dev_err(dev, "DMA Alloc memory failed\n"); | 66 | dev_err(dev, "DMA Alloc memory failed\n"); |
67 | return -ENODEV; | 67 | return -ENODEV; |
68 | } | 68 | } |
diff --git a/drivers/spi/spi-pic32-sqi.c b/drivers/spi/spi-pic32-sqi.c index d7e4e18ec3df..1ae9af5f17ec 100644 --- a/drivers/spi/spi-pic32-sqi.c +++ b/drivers/spi/spi-pic32-sqi.c | |||
@@ -466,9 +466,9 @@ static int ring_desc_ring_alloc(struct pic32_sqi *sqi) | |||
466 | int i; | 466 | int i; |
467 | 467 | ||
468 | /* allocate coherent DMAable memory for hardware buffer descriptors. */ | 468 | /* allocate coherent DMAable memory for hardware buffer descriptors. */ |
469 | sqi->bd = dma_zalloc_coherent(&sqi->master->dev, | 469 | sqi->bd = dma_alloc_coherent(&sqi->master->dev, |
470 | sizeof(*bd) * PESQI_BD_COUNT, | 470 | sizeof(*bd) * PESQI_BD_COUNT, |
471 | &sqi->bd_dma, GFP_KERNEL); | 471 | &sqi->bd_dma, GFP_KERNEL); |
472 | if (!sqi->bd) { | 472 | if (!sqi->bd) { |
473 | dev_err(&sqi->master->dev, "failed allocating dma buffer\n"); | 473 | dev_err(&sqi->master->dev, "failed allocating dma buffer\n"); |
474 | return -ENOMEM; | 474 | return -ENOMEM; |
diff --git a/drivers/staging/mt7621-eth/mtk_eth_soc.c b/drivers/staging/mt7621-eth/mtk_eth_soc.c index 21a76a8ccc26..6027b19f7bc2 100644 --- a/drivers/staging/mt7621-eth/mtk_eth_soc.c +++ b/drivers/staging/mt7621-eth/mtk_eth_soc.c | |||
@@ -1396,8 +1396,7 @@ static int mtk_qdma_tx_alloc_tx(struct mtk_eth *eth) | |||
1396 | if (!ring->tx_buf) | 1396 | if (!ring->tx_buf) |
1397 | goto no_tx_mem; | 1397 | goto no_tx_mem; |
1398 | 1398 | ||
1399 | ring->tx_dma = dma_zalloc_coherent(eth->dev, | 1399 | ring->tx_dma = dma_alloc_coherent(eth->dev, ring->tx_ring_size * sz, |
1400 | ring->tx_ring_size * sz, | ||
1401 | &ring->tx_phys, | 1400 | &ring->tx_phys, |
1402 | GFP_ATOMIC | __GFP_ZERO); | 1401 | GFP_ATOMIC | __GFP_ZERO); |
1403 | if (!ring->tx_dma) | 1402 | if (!ring->tx_dma) |
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c index 338b6e952515..dd4898861b83 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c | |||
@@ -407,10 +407,8 @@ create_pagelist(char __user *buf, size_t count, unsigned short type) | |||
407 | /* Allocate enough storage to hold the page pointers and the page | 407 | /* Allocate enough storage to hold the page pointers and the page |
408 | * list | 408 | * list |
409 | */ | 409 | */ |
410 | pagelist = dma_zalloc_coherent(g_dev, | 410 | pagelist = dma_alloc_coherent(g_dev, pagelist_size, &dma_addr, |
411 | pagelist_size, | 411 | GFP_KERNEL); |
412 | &dma_addr, | ||
413 | GFP_KERNEL); | ||
414 | 412 | ||
415 | vchiq_log_trace(vchiq_arm_log_level, "%s - %pK", __func__, pagelist); | 413 | vchiq_log_trace(vchiq_arm_log_level, "%s - %pK", __func__, pagelist); |
416 | 414 | ||
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c index 1ab0e8562d40..c9097e7367d8 100644 --- a/drivers/staging/vt6655/device_main.c +++ b/drivers/staging/vt6655/device_main.c | |||
@@ -440,12 +440,9 @@ static bool device_init_rings(struct vnt_private *priv) | |||
440 | void *vir_pool; | 440 | void *vir_pool; |
441 | 441 | ||
442 | /*allocate all RD/TD rings a single pool*/ | 442 | /*allocate all RD/TD rings a single pool*/ |
443 | vir_pool = dma_zalloc_coherent(&priv->pcid->dev, | 443 | vir_pool = dma_alloc_coherent(&priv->pcid->dev, |
444 | priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc) + | 444 | priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc) + priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc) + priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc) + priv->opts.tx_descs[1] * sizeof(struct vnt_tx_desc), |
445 | priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc) + | 445 | &priv->pool_dma, GFP_ATOMIC); |
446 | priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc) + | ||
447 | priv->opts.tx_descs[1] * sizeof(struct vnt_tx_desc), | ||
448 | &priv->pool_dma, GFP_ATOMIC); | ||
449 | if (!vir_pool) { | 446 | if (!vir_pool) { |
450 | dev_err(&priv->pcid->dev, "allocate desc dma memory failed\n"); | 447 | dev_err(&priv->pcid->dev, "allocate desc dma memory failed\n"); |
451 | return false; | 448 | return false; |
@@ -459,13 +456,9 @@ static bool device_init_rings(struct vnt_private *priv) | |||
459 | priv->rd1_pool_dma = priv->rd0_pool_dma + | 456 | priv->rd1_pool_dma = priv->rd0_pool_dma + |
460 | priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc); | 457 | priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc); |
461 | 458 | ||
462 | priv->tx0_bufs = dma_zalloc_coherent(&priv->pcid->dev, | 459 | priv->tx0_bufs = dma_alloc_coherent(&priv->pcid->dev, |
463 | priv->opts.tx_descs[0] * PKT_BUF_SZ + | 460 | priv->opts.tx_descs[0] * PKT_BUF_SZ + priv->opts.tx_descs[1] * PKT_BUF_SZ + CB_BEACON_BUF_SIZE + CB_MAX_BUF_SIZE, |
464 | priv->opts.tx_descs[1] * PKT_BUF_SZ + | 461 | &priv->tx_bufs_dma0, GFP_ATOMIC); |
465 | CB_BEACON_BUF_SIZE + | ||
466 | CB_MAX_BUF_SIZE, | ||
467 | &priv->tx_bufs_dma0, | ||
468 | GFP_ATOMIC); | ||
469 | if (!priv->tx0_bufs) { | 462 | if (!priv->tx0_bufs) { |
470 | dev_err(&priv->pcid->dev, "allocate buf dma memory failed\n"); | 463 | dev_err(&priv->pcid->dev, "allocate buf dma memory failed\n"); |
471 | 464 | ||
diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c index 01b44e159623..ccbd1d34eb2a 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_core.c +++ b/drivers/usb/gadget/udc/bdc/bdc_core.c | |||
@@ -172,8 +172,9 @@ static int scratchpad_setup(struct bdc *bdc) | |||
172 | /* Refer to BDC spec, Table 4 for description of SPB */ | 172 | /* Refer to BDC spec, Table 4 for description of SPB */ |
173 | sp_buff_size = 1 << (sp_buff_size + 5); | 173 | sp_buff_size = 1 << (sp_buff_size + 5); |
174 | dev_dbg(bdc->dev, "Allocating %d bytes for scratchpad\n", sp_buff_size); | 174 | dev_dbg(bdc->dev, "Allocating %d bytes for scratchpad\n", sp_buff_size); |
175 | bdc->scratchpad.buff = dma_zalloc_coherent(bdc->dev, sp_buff_size, | 175 | bdc->scratchpad.buff = dma_alloc_coherent(bdc->dev, sp_buff_size, |
176 | &bdc->scratchpad.sp_dma, GFP_KERNEL); | 176 | &bdc->scratchpad.sp_dma, |
177 | GFP_KERNEL); | ||
177 | 178 | ||
178 | if (!bdc->scratchpad.buff) | 179 | if (!bdc->scratchpad.buff) |
179 | goto fail; | 180 | goto fail; |
@@ -202,11 +203,9 @@ static int setup_srr(struct bdc *bdc, int interrupter) | |||
202 | bdc_writel(bdc->regs, BDC_SRRINT(0), BDC_SRR_RWS | BDC_SRR_RST); | 203 | bdc_writel(bdc->regs, BDC_SRRINT(0), BDC_SRR_RWS | BDC_SRR_RST); |
203 | bdc->srr.dqp_index = 0; | 204 | bdc->srr.dqp_index = 0; |
204 | /* allocate the status report descriptors */ | 205 | /* allocate the status report descriptors */ |
205 | bdc->srr.sr_bds = dma_zalloc_coherent( | 206 | bdc->srr.sr_bds = dma_alloc_coherent(bdc->dev, |
206 | bdc->dev, | 207 | NUM_SR_ENTRIES * sizeof(struct bdc_bd), |
207 | NUM_SR_ENTRIES * sizeof(struct bdc_bd), | 208 | &bdc->srr.dma_addr, GFP_KERNEL); |
208 | &bdc->srr.dma_addr, | ||
209 | GFP_KERNEL); | ||
210 | if (!bdc->srr.sr_bds) | 209 | if (!bdc->srr.sr_bds) |
211 | return -ENOMEM; | 210 | return -ENOMEM; |
212 | 211 | ||
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c index 6218bfe54f52..98deb5f64268 100644 --- a/drivers/usb/host/uhci-hcd.c +++ b/drivers/usb/host/uhci-hcd.c | |||
@@ -596,9 +596,9 @@ static int uhci_start(struct usb_hcd *hcd) | |||
596 | &uhci_debug_operations); | 596 | &uhci_debug_operations); |
597 | #endif | 597 | #endif |
598 | 598 | ||
599 | uhci->frame = dma_zalloc_coherent(uhci_dev(uhci), | 599 | uhci->frame = dma_alloc_coherent(uhci_dev(uhci), |
600 | UHCI_NUMFRAMES * sizeof(*uhci->frame), | 600 | UHCI_NUMFRAMES * sizeof(*uhci->frame), |
601 | &uhci->frame_dma_handle, GFP_KERNEL); | 601 | &uhci->frame_dma_handle, GFP_KERNEL); |
602 | if (!uhci->frame) { | 602 | if (!uhci->frame) { |
603 | dev_err(uhci_dev(uhci), | 603 | dev_err(uhci_dev(uhci), |
604 | "unable to allocate consistent memory for frame list\n"); | 604 | "unable to allocate consistent memory for frame list\n"); |
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 36a3eb8849f1..8067f178fa84 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c | |||
@@ -1672,8 +1672,8 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags) | |||
1672 | xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma); | 1672 | xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma); |
1673 | for (i = 0; i < num_sp; i++) { | 1673 | for (i = 0; i < num_sp; i++) { |
1674 | dma_addr_t dma; | 1674 | dma_addr_t dma; |
1675 | void *buf = dma_zalloc_coherent(dev, xhci->page_size, &dma, | 1675 | void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma, |
1676 | flags); | 1676 | flags); |
1677 | if (!buf) | 1677 | if (!buf) |
1678 | goto fail_sp4; | 1678 | goto fail_sp4; |
1679 | 1679 | ||
@@ -1799,8 +1799,8 @@ int xhci_alloc_erst(struct xhci_hcd *xhci, | |||
1799 | struct xhci_erst_entry *entry; | 1799 | struct xhci_erst_entry *entry; |
1800 | 1800 | ||
1801 | size = sizeof(struct xhci_erst_entry) * evt_ring->num_segs; | 1801 | size = sizeof(struct xhci_erst_entry) * evt_ring->num_segs; |
1802 | erst->entries = dma_zalloc_coherent(xhci_to_hcd(xhci)->self.sysdev, | 1802 | erst->entries = dma_alloc_coherent(xhci_to_hcd(xhci)->self.sysdev, |
1803 | size, &erst->erst_dma_addr, flags); | 1803 | size, &erst->erst_dma_addr, flags); |
1804 | if (!erst->entries) | 1804 | if (!erst->entries) |
1805 | return -ENOMEM; | 1805 | return -ENOMEM; |
1806 | 1806 | ||
diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c index a74096c53cb5..43f2a4816860 100644 --- a/drivers/video/fbdev/da8xx-fb.c +++ b/drivers/video/fbdev/da8xx-fb.c | |||
@@ -1446,9 +1446,9 @@ static int fb_probe(struct platform_device *device) | |||
1446 | da8xx_fb_fix.line_length - 1; | 1446 | da8xx_fb_fix.line_length - 1; |
1447 | 1447 | ||
1448 | /* allocate palette buffer */ | 1448 | /* allocate palette buffer */ |
1449 | par->v_palette_base = dma_zalloc_coherent(NULL, PALETTE_SIZE, | 1449 | par->v_palette_base = dma_alloc_coherent(NULL, PALETTE_SIZE, |
1450 | &par->p_palette_base, | 1450 | &par->p_palette_base, |
1451 | GFP_KERNEL | GFP_DMA); | 1451 | GFP_KERNEL | GFP_DMA); |
1452 | if (!par->v_palette_base) { | 1452 | if (!par->v_palette_base) { |
1453 | dev_err(&device->dev, | 1453 | dev_err(&device->dev, |
1454 | "GLCD: kmalloc for palette buffer failed\n"); | 1454 | "GLCD: kmalloc for palette buffer failed\n"); |
diff --git a/include/linux/pci-dma-compat.h b/include/linux/pci-dma-compat.h index cb1adf0b78a9..249d4d7fbf18 100644 --- a/include/linux/pci-dma-compat.h +++ b/include/linux/pci-dma-compat.h | |||
@@ -24,7 +24,7 @@ static inline void * | |||
24 | pci_zalloc_consistent(struct pci_dev *hwdev, size_t size, | 24 | pci_zalloc_consistent(struct pci_dev *hwdev, size_t size, |
25 | dma_addr_t *dma_handle) | 25 | dma_addr_t *dma_handle) |
26 | { | 26 | { |
27 | return dma_zalloc_coherent(&hwdev->dev, size, dma_handle, GFP_ATOMIC); | 27 | return dma_alloc_coherent(&hwdev->dev, size, dma_handle, GFP_ATOMIC); |
28 | } | 28 | } |
29 | 29 | ||
30 | static inline void | 30 | static inline void |
diff --git a/sound/aoa/soundbus/i2sbus/core.c b/sound/aoa/soundbus/i2sbus/core.c index c3f57a3fb1a5..40ebde2e1ab1 100644 --- a/sound/aoa/soundbus/i2sbus/core.c +++ b/sound/aoa/soundbus/i2sbus/core.c | |||
@@ -47,8 +47,8 @@ static int alloc_dbdma_descriptor_ring(struct i2sbus_dev *i2sdev, | |||
47 | /* We use the PCI APIs for now until the generic one gets fixed | 47 | /* We use the PCI APIs for now until the generic one gets fixed |
48 | * enough or until we get some macio-specific versions | 48 | * enough or until we get some macio-specific versions |
49 | */ | 49 | */ |
50 | r->space = dma_zalloc_coherent(&macio_get_pci_dev(i2sdev->macio)->dev, | 50 | r->space = dma_alloc_coherent(&macio_get_pci_dev(i2sdev->macio)->dev, |
51 | r->size, &r->bus_addr, GFP_KERNEL); | 51 | r->size, &r->bus_addr, GFP_KERNEL); |
52 | if (!r->space) | 52 | if (!r->space) |
53 | return -ENOMEM; | 53 | return -ENOMEM; |
54 | 54 | ||
diff --git a/sound/sparc/dbri.c b/sound/sparc/dbri.c index 7609eceba1a2..9e71d7cda999 100644 --- a/sound/sparc/dbri.c +++ b/sound/sparc/dbri.c | |||
@@ -2541,8 +2541,8 @@ static int snd_dbri_create(struct snd_card *card, | |||
2541 | dbri->op = op; | 2541 | dbri->op = op; |
2542 | dbri->irq = irq; | 2542 | dbri->irq = irq; |
2543 | 2543 | ||
2544 | dbri->dma = dma_zalloc_coherent(&op->dev, sizeof(struct dbri_dma), | 2544 | dbri->dma = dma_alloc_coherent(&op->dev, sizeof(struct dbri_dma), |
2545 | &dbri->dma_dvma, GFP_KERNEL); | 2545 | &dbri->dma_dvma, GFP_KERNEL); |
2546 | if (!dbri->dma) | 2546 | if (!dbri->dma) |
2547 | return -ENOMEM; | 2547 | return -ENOMEM; |
2548 | 2548 | ||