aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Jiang <dave.jiang@intel.com>2016-02-10 17:00:21 -0500
committerVinod Koul <vinod.koul@intel.com>2016-02-15 12:36:53 -0500
commit679cfbf79b4eb7d7d81195e6b9ab98106fd78a54 (patch)
tree32955ecf1140219e6739bf9cfb5a046440dd35ac
parent92e963f50fc74041b5e9e744c330dca48e04f08d (diff)
dmaengine: IOATDMA: Convert pci_pool_* to dma_pool_*
Converting old pci_pool_* calls to "new" dma_pool_* to make everything uniform. Signed-off-by: Dave Jiang <dave.jiang@intel.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
-rw-r--r--drivers/dma/ioat/dma.c6
-rw-r--r--drivers/dma/ioat/dma.h4
-rw-r--r--drivers/dma/ioat/init.c20
3 files changed, 15 insertions, 15 deletions
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 1d5df2ef148b..7a04c16a0bfa 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -298,14 +298,14 @@ ioat_alloc_ring_ent(struct dma_chan *chan, gfp_t flags)
298 dma_addr_t phys; 298 dma_addr_t phys;
299 299
300 ioat_dma = to_ioatdma_device(chan->device); 300 ioat_dma = to_ioatdma_device(chan->device);
301 hw = pci_pool_alloc(ioat_dma->dma_pool, flags, &phys); 301 hw = dma_pool_alloc(ioat_dma->dma_pool, flags, &phys);
302 if (!hw) 302 if (!hw)
303 return NULL; 303 return NULL;
304 memset(hw, 0, sizeof(*hw)); 304 memset(hw, 0, sizeof(*hw));
305 305
306 desc = kmem_cache_zalloc(ioat_cache, flags); 306 desc = kmem_cache_zalloc(ioat_cache, flags);
307 if (!desc) { 307 if (!desc) {
308 pci_pool_free(ioat_dma->dma_pool, hw, phys); 308 dma_pool_free(ioat_dma->dma_pool, hw, phys);
309 return NULL; 309 return NULL;
310 } 310 }
311 311
@@ -321,7 +321,7 @@ void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan)
321 struct ioatdma_device *ioat_dma; 321 struct ioatdma_device *ioat_dma;
322 322
323 ioat_dma = to_ioatdma_device(chan->device); 323 ioat_dma = to_ioatdma_device(chan->device);
324 pci_pool_free(ioat_dma->dma_pool, desc->hw, desc->txd.phys); 324 dma_pool_free(ioat_dma->dma_pool, desc->hw, desc->txd.phys);
325 kmem_cache_free(ioat_cache, desc); 325 kmem_cache_free(ioat_cache, desc);
326} 326}
327 327
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index b8f48074789f..f471092440d3 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -76,8 +76,8 @@ enum ioat_irq_mode {
76struct ioatdma_device { 76struct ioatdma_device {
77 struct pci_dev *pdev; 77 struct pci_dev *pdev;
78 void __iomem *reg_base; 78 void __iomem *reg_base;
79 struct pci_pool *dma_pool; 79 struct dma_pool *dma_pool;
80 struct pci_pool *completion_pool; 80 struct dma_pool *completion_pool;
81#define MAX_SED_POOLS 5 81#define MAX_SED_POOLS 5
82 struct dma_pool *sed_hw_pool[MAX_SED_POOLS]; 82 struct dma_pool *sed_hw_pool[MAX_SED_POOLS];
83 struct dma_device dma_dev; 83 struct dma_device dma_dev;
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 4ef0c5e07912..b02b63b719db 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -505,7 +505,7 @@ static int ioat_probe(struct ioatdma_device *ioat_dma)
505 struct device *dev = &pdev->dev; 505 struct device *dev = &pdev->dev;
506 506
507 /* DMA coherent memory pool for DMA descriptor allocations */ 507 /* DMA coherent memory pool for DMA descriptor allocations */
508 ioat_dma->dma_pool = pci_pool_create("dma_desc_pool", pdev, 508 ioat_dma->dma_pool = dma_pool_create("dma_desc_pool", dev,
509 sizeof(struct ioat_dma_descriptor), 509 sizeof(struct ioat_dma_descriptor),
510 64, 0); 510 64, 0);
511 if (!ioat_dma->dma_pool) { 511 if (!ioat_dma->dma_pool) {
@@ -513,7 +513,7 @@ static int ioat_probe(struct ioatdma_device *ioat_dma)
513 goto err_dma_pool; 513 goto err_dma_pool;
514 } 514 }
515 515
516 ioat_dma->completion_pool = pci_pool_create("completion_pool", pdev, 516 ioat_dma->completion_pool = dma_pool_create("completion_pool", dev,
517 sizeof(u64), 517 sizeof(u64),
518 SMP_CACHE_BYTES, 518 SMP_CACHE_BYTES,
519 SMP_CACHE_BYTES); 519 SMP_CACHE_BYTES);
@@ -546,9 +546,9 @@ static int ioat_probe(struct ioatdma_device *ioat_dma)
546err_self_test: 546err_self_test:
547 ioat_disable_interrupts(ioat_dma); 547 ioat_disable_interrupts(ioat_dma);
548err_setup_interrupts: 548err_setup_interrupts:
549 pci_pool_destroy(ioat_dma->completion_pool); 549 dma_pool_destroy(ioat_dma->completion_pool);
550err_completion_pool: 550err_completion_pool:
551 pci_pool_destroy(ioat_dma->dma_pool); 551 dma_pool_destroy(ioat_dma->dma_pool);
552err_dma_pool: 552err_dma_pool:
553 return err; 553 return err;
554} 554}
@@ -559,8 +559,8 @@ static int ioat_register(struct ioatdma_device *ioat_dma)
559 559
560 if (err) { 560 if (err) {
561 ioat_disable_interrupts(ioat_dma); 561 ioat_disable_interrupts(ioat_dma);
562 pci_pool_destroy(ioat_dma->completion_pool); 562 dma_pool_destroy(ioat_dma->completion_pool);
563 pci_pool_destroy(ioat_dma->dma_pool); 563 dma_pool_destroy(ioat_dma->dma_pool);
564 } 564 }
565 565
566 return err; 566 return err;
@@ -576,8 +576,8 @@ static void ioat_dma_remove(struct ioatdma_device *ioat_dma)
576 576
577 dma_async_device_unregister(dma); 577 dma_async_device_unregister(dma);
578 578
579 pci_pool_destroy(ioat_dma->dma_pool); 579 dma_pool_destroy(ioat_dma->dma_pool);
580 pci_pool_destroy(ioat_dma->completion_pool); 580 dma_pool_destroy(ioat_dma->completion_pool);
581 581
582 INIT_LIST_HEAD(&dma->channels); 582 INIT_LIST_HEAD(&dma->channels);
583} 583}
@@ -669,7 +669,7 @@ static void ioat_free_chan_resources(struct dma_chan *c)
669 kfree(ioat_chan->ring); 669 kfree(ioat_chan->ring);
670 ioat_chan->ring = NULL; 670 ioat_chan->ring = NULL;
671 ioat_chan->alloc_order = 0; 671 ioat_chan->alloc_order = 0;
672 pci_pool_free(ioat_dma->completion_pool, ioat_chan->completion, 672 dma_pool_free(ioat_dma->completion_pool, ioat_chan->completion,
673 ioat_chan->completion_dma); 673 ioat_chan->completion_dma);
674 spin_unlock_bh(&ioat_chan->prep_lock); 674 spin_unlock_bh(&ioat_chan->prep_lock);
675 spin_unlock_bh(&ioat_chan->cleanup_lock); 675 spin_unlock_bh(&ioat_chan->cleanup_lock);
@@ -701,7 +701,7 @@ static int ioat_alloc_chan_resources(struct dma_chan *c)
701 /* allocate a completion writeback area */ 701 /* allocate a completion writeback area */
702 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ 702 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
703 ioat_chan->completion = 703 ioat_chan->completion =
704 pci_pool_alloc(ioat_chan->ioat_dma->completion_pool, 704 dma_pool_alloc(ioat_chan->ioat_dma->completion_pool,
705 GFP_KERNEL, &ioat_chan->completion_dma); 705 GFP_KERNEL, &ioat_chan->completion_dma);
706 if (!ioat_chan->completion) 706 if (!ioat_chan->completion)
707 return -ENOMEM; 707 return -ENOMEM;