diff options
author | Daniel Mack <zonque@gmail.com> | 2013-08-10 12:52:22 -0400 |
---|---|---|
committer | Vinod Koul <vinod.koul@intel.com> | 2013-08-14 04:25:16 -0400 |
commit | 6fc4573c4e17eb57f6e1a20992dcf0b6a105afdb (patch) | |
tree | 4a3e10a868da8fdff40bd68a42e0aa8346e42905 /drivers/dma | |
parent | 8fd6aac3a8234d4ffd0ea514ba2aba9488888a36 (diff) |
dma: mmp_pdma: add support for byte-aligned transfers
The PXA DMA controller has a DALGN register which allows for
byte-aligned DMA transfers. Use it in case any of the transfer
descriptors is not aligned to a mask of ~0x7.
Signed-off-by: Daniel Mack <zonque@gmail.com>
Acked-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/mmp_pdma.c | 17 |
1 files changed, 16 insertions, 1 deletions
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c index b426e55f8316..dba9fe9cbbbe 100644 --- a/drivers/dma/mmp_pdma.c +++ b/drivers/dma/mmp_pdma.c | |||
@@ -109,6 +109,7 @@ struct mmp_pdma_chan { | |||
109 | struct list_head chain_pending; /* Link descriptors queue for pending */ | 109 | struct list_head chain_pending; /* Link descriptors queue for pending */ |
110 | struct list_head chain_running; /* Link descriptors queue for running */ | 110 | struct list_head chain_running; /* Link descriptors queue for running */ |
111 | bool idle; /* channel statue machine */ | 111 | bool idle; /* channel statue machine */ |
112 | bool byte_align; | ||
112 | 113 | ||
113 | struct dma_pool *desc_pool; /* Descriptors pool */ | 114 | struct dma_pool *desc_pool; /* Descriptors pool */ |
114 | }; | 115 | }; |
@@ -142,7 +143,7 @@ static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr) | |||
142 | 143 | ||
143 | static void enable_chan(struct mmp_pdma_phy *phy) | 144 | static void enable_chan(struct mmp_pdma_phy *phy) |
144 | { | 145 | { |
145 | u32 reg; | 146 | u32 reg, dalgn; |
146 | 147 | ||
147 | if (!phy->vchan) | 148 | if (!phy->vchan) |
148 | return; | 149 | return; |
@@ -150,6 +151,13 @@ static void enable_chan(struct mmp_pdma_phy *phy) | |||
150 | reg = DRCMR(phy->vchan->drcmr); | 151 | reg = DRCMR(phy->vchan->drcmr); |
151 | writel(DRCMR_MAPVLD | phy->idx, phy->base + reg); | 152 | writel(DRCMR_MAPVLD | phy->idx, phy->base + reg); |
152 | 153 | ||
154 | dalgn = readl(phy->base + DALGN); | ||
155 | if (phy->vchan->byte_align) | ||
156 | dalgn |= 1 << phy->idx; | ||
157 | else | ||
158 | dalgn &= ~(1 << phy->idx); | ||
159 | writel(dalgn, phy->base + DALGN); | ||
160 | |||
153 | reg = (phy->idx << 2) + DCSR; | 161 | reg = (phy->idx << 2) + DCSR; |
154 | writel(readl(phy->base + reg) | DCSR_RUN, | 162 | writel(readl(phy->base + reg) | DCSR_RUN, |
155 | phy->base + reg); | 163 | phy->base + reg); |
@@ -455,6 +463,7 @@ mmp_pdma_prep_memcpy(struct dma_chan *dchan, | |||
455 | return NULL; | 463 | return NULL; |
456 | 464 | ||
457 | chan = to_mmp_pdma_chan(dchan); | 465 | chan = to_mmp_pdma_chan(dchan); |
466 | chan->byte_align = false; | ||
458 | 467 | ||
459 | if (!chan->dir) { | 468 | if (!chan->dir) { |
460 | chan->dir = DMA_MEM_TO_MEM; | 469 | chan->dir = DMA_MEM_TO_MEM; |
@@ -471,6 +480,8 @@ mmp_pdma_prep_memcpy(struct dma_chan *dchan, | |||
471 | } | 480 | } |
472 | 481 | ||
473 | copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES); | 482 | copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES); |
483 | if (dma_src & 0x7 || dma_dst & 0x7) | ||
484 | chan->byte_align = true; | ||
474 | 485 | ||
475 | new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy); | 486 | new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy); |
476 | new->desc.dsadr = dma_src; | 487 | new->desc.dsadr = dma_src; |
@@ -530,12 +541,16 @@ mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, | |||
530 | if ((sgl == NULL) || (sg_len == 0)) | 541 | if ((sgl == NULL) || (sg_len == 0)) |
531 | return NULL; | 542 | return NULL; |
532 | 543 | ||
544 | chan->byte_align = false; | ||
545 | |||
533 | for_each_sg(sgl, sg, sg_len, i) { | 546 | for_each_sg(sgl, sg, sg_len, i) { |
534 | addr = sg_dma_address(sg); | 547 | addr = sg_dma_address(sg); |
535 | avail = sg_dma_len(sgl); | 548 | avail = sg_dma_len(sgl); |
536 | 549 | ||
537 | do { | 550 | do { |
538 | len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES); | 551 | len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES); |
552 | if (addr & 0x7) | ||
553 | chan->byte_align = true; | ||
539 | 554 | ||
540 | /* allocate and populate the descriptor */ | 555 | /* allocate and populate the descriptor */ |
541 | new = mmp_pdma_alloc_descriptor(chan); | 556 | new = mmp_pdma_alloc_descriptor(chan); |