aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2009-01-06 13:38:16 -0500
committerDan Williams <dan.j.williams@intel.com>2009-01-06 13:38:16 -0500
commit74465b4ff9ac1da503025c0a0042e023bfa6505c (patch)
treece63f4a4b055b65cae1edaddd334931bf512c76e /drivers/dma
parent33df8ca068123457db56c316946a3c0e4ef787d6 (diff)
atmel-mci: convert to dma_request_channel and down-level dma_slave
dma_request_channel provides an exclusive channel, so we no longer need to pass slave data through dmaengine. Cc: Haavard Skinnemoen <haavard.skinnemoen@atmel.com> Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/dmaengine.c8
-rw-r--r--drivers/dma/dw_dmac.c25
2 files changed, 6 insertions, 27 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 7a0594f24a3..90aca505a1d 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -234,10 +234,6 @@ static void dma_client_chan_alloc(struct dma_client *client)
234 list_for_each_entry(device, &dma_device_list, global_node) { 234 list_for_each_entry(device, &dma_device_list, global_node) {
235 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) 235 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
236 continue; 236 continue;
237 /* Does the client require a specific DMA controller? */
238 if (client->slave && client->slave->dma_dev
239 && client->slave->dma_dev != device->dev)
240 continue;
241 if (!dma_device_satisfies_mask(device, client->cap_mask)) 237 if (!dma_device_satisfies_mask(device, client->cap_mask))
242 continue; 238 continue;
243 239
@@ -613,10 +609,6 @@ void dma_async_client_register(struct dma_client *client)
613 struct dma_chan *chan; 609 struct dma_chan *chan;
614 int err; 610 int err;
615 611
616 /* validate client data */
617 BUG_ON(dma_has_cap(DMA_SLAVE, client->cap_mask) &&
618 !client->slave);
619
620 mutex_lock(&dma_list_mutex); 612 mutex_lock(&dma_list_mutex);
621 dmaengine_ref_count++; 613 dmaengine_ref_count++;
622 614
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 377dafa37a2..dbd50804e5d 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -567,7 +567,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
567 if (unlikely(!dws || !sg_len)) 567 if (unlikely(!dws || !sg_len))
568 return NULL; 568 return NULL;
569 569
570 reg_width = dws->slave.reg_width; 570 reg_width = dws->reg_width;
571 prev = first = NULL; 571 prev = first = NULL;
572 572
573 sg_len = dma_map_sg(chan->dev.parent, sgl, sg_len, direction); 573 sg_len = dma_map_sg(chan->dev.parent, sgl, sg_len, direction);
@@ -579,7 +579,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
579 | DWC_CTLL_DST_FIX 579 | DWC_CTLL_DST_FIX
580 | DWC_CTLL_SRC_INC 580 | DWC_CTLL_SRC_INC
581 | DWC_CTLL_FC_M2P); 581 | DWC_CTLL_FC_M2P);
582 reg = dws->slave.tx_reg; 582 reg = dws->tx_reg;
583 for_each_sg(sgl, sg, sg_len, i) { 583 for_each_sg(sgl, sg, sg_len, i) {
584 struct dw_desc *desc; 584 struct dw_desc *desc;
585 u32 len; 585 u32 len;
@@ -625,7 +625,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
625 | DWC_CTLL_SRC_FIX 625 | DWC_CTLL_SRC_FIX
626 | DWC_CTLL_FC_P2M); 626 | DWC_CTLL_FC_P2M);
627 627
628 reg = dws->slave.rx_reg; 628 reg = dws->rx_reg;
629 for_each_sg(sgl, sg, sg_len, i) { 629 for_each_sg(sgl, sg, sg_len, i) {
630 struct dw_desc *desc; 630 struct dw_desc *desc;
631 u32 len; 631 u32 len;
@@ -764,7 +764,6 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan,
764 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 764 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
765 struct dw_dma *dw = to_dw_dma(chan->device); 765 struct dw_dma *dw = to_dw_dma(chan->device);
766 struct dw_desc *desc; 766 struct dw_desc *desc;
767 struct dma_slave *slave;
768 struct dw_dma_slave *dws; 767 struct dw_dma_slave *dws;
769 int i; 768 int i;
770 u32 cfghi; 769 u32 cfghi;
@@ -772,12 +771,6 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan,
772 771
773 dev_vdbg(&chan->dev, "alloc_chan_resources\n"); 772 dev_vdbg(&chan->dev, "alloc_chan_resources\n");
774 773
775 /* Channels doing slave DMA can only handle one client. */
776 if (dwc->dws || (client && client->slave)) {
777 if (chan->client_count)
778 return -EBUSY;
779 }
780
781 /* ASSERT: channel is idle */ 774 /* ASSERT: channel is idle */
782 if (dma_readl(dw, CH_EN) & dwc->mask) { 775 if (dma_readl(dw, CH_EN) & dwc->mask) {
783 dev_dbg(&chan->dev, "DMA channel not idle?\n"); 776 dev_dbg(&chan->dev, "DMA channel not idle?\n");
@@ -789,23 +782,17 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan,
789 cfghi = DWC_CFGH_FIFO_MODE; 782 cfghi = DWC_CFGH_FIFO_MODE;
790 cfglo = 0; 783 cfglo = 0;
791 784
792 slave = client->slave; 785 dws = dwc->dws;
793 if (slave) { 786 if (dws) {
794 /* 787 /*
795 * We need controller-specific data to set up slave 788 * We need controller-specific data to set up slave
796 * transfers. 789 * transfers.
797 */ 790 */
798 BUG_ON(!slave->dma_dev || slave->dma_dev != dw->dma.dev); 791 BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
799
800 dws = container_of(slave, struct dw_dma_slave, slave);
801 792
802 dwc->dws = dws;
803 cfghi = dws->cfg_hi; 793 cfghi = dws->cfg_hi;
804 cfglo = dws->cfg_lo; 794 cfglo = dws->cfg_lo;
805 } else {
806 dwc->dws = NULL;
807 } 795 }
808
809 channel_writel(dwc, CFG_LO, cfglo); 796 channel_writel(dwc, CFG_LO, cfglo);
810 channel_writel(dwc, CFG_HI, cfghi); 797 channel_writel(dwc, CFG_HI, cfghi);
811 798