aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVinod Koul <vkoul@kernel.org>2018-06-04 00:59:12 -0400
committerVinod Koul <vkoul@kernel.org>2018-06-04 00:59:12 -0400
commitd97594f16c9f30ba94e6f575b2bb92bb0aa88dd2 (patch)
tree9385c1fa0de9792aabaabd8bbc3702a19e5e2440
parentd9939da4141edf56b00bb9c858d58639f0bd7e76 (diff)
parentd317d32b4f9ad68626fb527bde151f025a7bfbb1 (diff)
Merge branch 'topic/stm' into for-linus
-rw-r--r--drivers/dma/stm32-mdma.c100
1 files changed, 59 insertions, 41 deletions
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index daa1602eb9f5..9dc450b7ace6 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -252,13 +252,17 @@ struct stm32_mdma_hwdesc {
252 u32 cmdr; 252 u32 cmdr;
253} __aligned(64); 253} __aligned(64);
254 254
255struct stm32_mdma_desc_node {
256 struct stm32_mdma_hwdesc *hwdesc;
257 dma_addr_t hwdesc_phys;
258};
259
255struct stm32_mdma_desc { 260struct stm32_mdma_desc {
256 struct virt_dma_desc vdesc; 261 struct virt_dma_desc vdesc;
257 u32 ccr; 262 u32 ccr;
258 struct stm32_mdma_hwdesc *hwdesc;
259 dma_addr_t hwdesc_phys;
260 bool cyclic; 263 bool cyclic;
261 u32 count; 264 u32 count;
265 struct stm32_mdma_desc_node node[];
262}; 266};
263 267
264struct stm32_mdma_chan { 268struct stm32_mdma_chan {
@@ -344,30 +348,42 @@ static struct stm32_mdma_desc *stm32_mdma_alloc_desc(
344 struct stm32_mdma_chan *chan, u32 count) 348 struct stm32_mdma_chan *chan, u32 count)
345{ 349{
346 struct stm32_mdma_desc *desc; 350 struct stm32_mdma_desc *desc;
351 int i;
347 352
348 desc = kzalloc(sizeof(*desc), GFP_NOWAIT); 353 desc = kzalloc(offsetof(typeof(*desc), node[count]), GFP_NOWAIT);
349 if (!desc) 354 if (!desc)
350 return NULL; 355 return NULL;
351 356
352 desc->hwdesc = dma_pool_alloc(chan->desc_pool, GFP_NOWAIT, 357 for (i = 0; i < count; i++) {
353 &desc->hwdesc_phys); 358 desc->node[i].hwdesc =
354 if (!desc->hwdesc) { 359 dma_pool_alloc(chan->desc_pool, GFP_NOWAIT,
355 dev_err(chan2dev(chan), "Failed to allocate descriptor\n"); 360 &desc->node[i].hwdesc_phys);
356 kfree(desc); 361 if (!desc->node[i].hwdesc)
357 return NULL; 362 goto err;
358 } 363 }
359 364
360 desc->count = count; 365 desc->count = count;
361 366
362 return desc; 367 return desc;
368
369err:
370 dev_err(chan2dev(chan), "Failed to allocate descriptor\n");
371 while (--i >= 0)
372 dma_pool_free(chan->desc_pool, desc->node[i].hwdesc,
373 desc->node[i].hwdesc_phys);
374 kfree(desc);
375 return NULL;
363} 376}
364 377
365static void stm32_mdma_desc_free(struct virt_dma_desc *vdesc) 378static void stm32_mdma_desc_free(struct virt_dma_desc *vdesc)
366{ 379{
367 struct stm32_mdma_desc *desc = to_stm32_mdma_desc(vdesc); 380 struct stm32_mdma_desc *desc = to_stm32_mdma_desc(vdesc);
368 struct stm32_mdma_chan *chan = to_stm32_mdma_chan(vdesc->tx.chan); 381 struct stm32_mdma_chan *chan = to_stm32_mdma_chan(vdesc->tx.chan);
382 int i;
369 383
370 dma_pool_free(chan->desc_pool, desc->hwdesc, desc->hwdesc_phys); 384 for (i = 0; i < desc->count; i++)
385 dma_pool_free(chan->desc_pool, desc->node[i].hwdesc,
386 desc->node[i].hwdesc_phys);
371 kfree(desc); 387 kfree(desc);
372} 388}
373 389
@@ -410,13 +426,10 @@ static enum dma_slave_buswidth stm32_mdma_get_max_width(dma_addr_t addr,
410static u32 stm32_mdma_get_best_burst(u32 buf_len, u32 tlen, u32 max_burst, 426static u32 stm32_mdma_get_best_burst(u32 buf_len, u32 tlen, u32 max_burst,
411 enum dma_slave_buswidth width) 427 enum dma_slave_buswidth width)
412{ 428{
413 u32 best_burst = max_burst; 429 u32 best_burst;
414 u32 burst_len = best_burst * width;
415 430
416 while ((burst_len > 0) && (tlen % burst_len)) { 431 best_burst = min((u32)1 << __ffs(tlen | buf_len),
417 best_burst = best_burst >> 1; 432 max_burst * width) / width;
418 burst_len = best_burst * width;
419 }
420 433
421 return (best_burst > 0) ? best_burst : 1; 434 return (best_burst > 0) ? best_burst : 1;
422} 435}
@@ -669,18 +682,18 @@ static int stm32_mdma_set_xfer_param(struct stm32_mdma_chan *chan,
669} 682}
670 683
671static void stm32_mdma_dump_hwdesc(struct stm32_mdma_chan *chan, 684static void stm32_mdma_dump_hwdesc(struct stm32_mdma_chan *chan,
672 struct stm32_mdma_hwdesc *hwdesc) 685 struct stm32_mdma_desc_node *node)
673{ 686{
674 dev_dbg(chan2dev(chan), "hwdesc: 0x%p\n", hwdesc); 687 dev_dbg(chan2dev(chan), "hwdesc: %pad\n", &node->hwdesc_phys);
675 dev_dbg(chan2dev(chan), "CTCR: 0x%08x\n", hwdesc->ctcr); 688 dev_dbg(chan2dev(chan), "CTCR: 0x%08x\n", node->hwdesc->ctcr);
676 dev_dbg(chan2dev(chan), "CBNDTR: 0x%08x\n", hwdesc->cbndtr); 689 dev_dbg(chan2dev(chan), "CBNDTR: 0x%08x\n", node->hwdesc->cbndtr);
677 dev_dbg(chan2dev(chan), "CSAR: 0x%08x\n", hwdesc->csar); 690 dev_dbg(chan2dev(chan), "CSAR: 0x%08x\n", node->hwdesc->csar);
678 dev_dbg(chan2dev(chan), "CDAR: 0x%08x\n", hwdesc->cdar); 691 dev_dbg(chan2dev(chan), "CDAR: 0x%08x\n", node->hwdesc->cdar);
679 dev_dbg(chan2dev(chan), "CBRUR: 0x%08x\n", hwdesc->cbrur); 692 dev_dbg(chan2dev(chan), "CBRUR: 0x%08x\n", node->hwdesc->cbrur);
680 dev_dbg(chan2dev(chan), "CLAR: 0x%08x\n", hwdesc->clar); 693 dev_dbg(chan2dev(chan), "CLAR: 0x%08x\n", node->hwdesc->clar);
681 dev_dbg(chan2dev(chan), "CTBR: 0x%08x\n", hwdesc->ctbr); 694 dev_dbg(chan2dev(chan), "CTBR: 0x%08x\n", node->hwdesc->ctbr);
682 dev_dbg(chan2dev(chan), "CMAR: 0x%08x\n", hwdesc->cmar); 695 dev_dbg(chan2dev(chan), "CMAR: 0x%08x\n", node->hwdesc->cmar);
683 dev_dbg(chan2dev(chan), "CMDR: 0x%08x\n\n", hwdesc->cmdr); 696 dev_dbg(chan2dev(chan), "CMDR: 0x%08x\n\n", node->hwdesc->cmdr);
684} 697}
685 698
686static void stm32_mdma_setup_hwdesc(struct stm32_mdma_chan *chan, 699static void stm32_mdma_setup_hwdesc(struct stm32_mdma_chan *chan,
@@ -694,7 +707,7 @@ static void stm32_mdma_setup_hwdesc(struct stm32_mdma_chan *chan,
694 struct stm32_mdma_hwdesc *hwdesc; 707 struct stm32_mdma_hwdesc *hwdesc;
695 u32 next = count + 1; 708 u32 next = count + 1;
696 709
697 hwdesc = &desc->hwdesc[count]; 710 hwdesc = desc->node[count].hwdesc;
698 hwdesc->ctcr = ctcr; 711 hwdesc->ctcr = ctcr;
699 hwdesc->cbndtr &= ~(STM32_MDMA_CBNDTR_BRC_MK | 712 hwdesc->cbndtr &= ~(STM32_MDMA_CBNDTR_BRC_MK |
700 STM32_MDMA_CBNDTR_BRDUM | 713 STM32_MDMA_CBNDTR_BRDUM |
@@ -704,19 +717,20 @@ static void stm32_mdma_setup_hwdesc(struct stm32_mdma_chan *chan,
704 hwdesc->csar = src_addr; 717 hwdesc->csar = src_addr;
705 hwdesc->cdar = dst_addr; 718 hwdesc->cdar = dst_addr;
706 hwdesc->cbrur = 0; 719 hwdesc->cbrur = 0;
707 hwdesc->clar = desc->hwdesc_phys + next * sizeof(*hwdesc);
708 hwdesc->ctbr = ctbr; 720 hwdesc->ctbr = ctbr;
709 hwdesc->cmar = config->mask_addr; 721 hwdesc->cmar = config->mask_addr;
710 hwdesc->cmdr = config->mask_data; 722 hwdesc->cmdr = config->mask_data;
711 723
712 if (is_last) { 724 if (is_last) {
713 if (is_cyclic) 725 if (is_cyclic)
714 hwdesc->clar = desc->hwdesc_phys; 726 hwdesc->clar = desc->node[0].hwdesc_phys;
715 else 727 else
716 hwdesc->clar = 0; 728 hwdesc->clar = 0;
729 } else {
730 hwdesc->clar = desc->node[next].hwdesc_phys;
717 } 731 }
718 732
719 stm32_mdma_dump_hwdesc(chan, hwdesc); 733 stm32_mdma_dump_hwdesc(chan, &desc->node[count]);
720} 734}
721 735
722static int stm32_mdma_setup_xfer(struct stm32_mdma_chan *chan, 736static int stm32_mdma_setup_xfer(struct stm32_mdma_chan *chan,
@@ -780,7 +794,7 @@ stm32_mdma_prep_slave_sg(struct dma_chan *c, struct scatterlist *sgl,
780{ 794{
781 struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c); 795 struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
782 struct stm32_mdma_desc *desc; 796 struct stm32_mdma_desc *desc;
783 int ret; 797 int i, ret;
784 798
785 /* 799 /*
786 * Once DMA is in setup cyclic mode the channel we cannot assign this 800 * Once DMA is in setup cyclic mode the channel we cannot assign this
@@ -806,7 +820,9 @@ stm32_mdma_prep_slave_sg(struct dma_chan *c, struct scatterlist *sgl,
806 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); 820 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
807 821
808xfer_setup_err: 822xfer_setup_err:
809 dma_pool_free(chan->desc_pool, &desc->hwdesc, desc->hwdesc_phys); 823 for (i = 0; i < desc->count; i++)
824 dma_pool_free(chan->desc_pool, desc->node[i].hwdesc,
825 desc->node[i].hwdesc_phys);
810 kfree(desc); 826 kfree(desc);
811 return NULL; 827 return NULL;
812} 828}
@@ -895,7 +911,9 @@ stm32_mdma_prep_dma_cyclic(struct dma_chan *c, dma_addr_t buf_addr,
895 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); 911 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
896 912
897xfer_setup_err: 913xfer_setup_err:
898 dma_pool_free(chan->desc_pool, &desc->hwdesc, desc->hwdesc_phys); 914 for (i = 0; i < desc->count; i++)
915 dma_pool_free(chan->desc_pool, desc->node[i].hwdesc,
916 desc->node[i].hwdesc_phys);
899 kfree(desc); 917 kfree(desc);
900 return NULL; 918 return NULL;
901} 919}
@@ -1009,7 +1027,7 @@ stm32_mdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest, dma_addr_t src,
1009 ctcr |= STM32_MDMA_CTCR_PKE; 1027 ctcr |= STM32_MDMA_CTCR_PKE;
1010 1028
1011 /* Prepare hardware descriptor */ 1029 /* Prepare hardware descriptor */
1012 hwdesc = desc->hwdesc; 1030 hwdesc = desc->node[0].hwdesc;
1013 hwdesc->ctcr = ctcr; 1031 hwdesc->ctcr = ctcr;
1014 hwdesc->cbndtr = cbndtr; 1032 hwdesc->cbndtr = cbndtr;
1015 hwdesc->csar = src; 1033 hwdesc->csar = src;
@@ -1020,7 +1038,7 @@ stm32_mdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest, dma_addr_t src,
1020 hwdesc->cmar = 0; 1038 hwdesc->cmar = 0;
1021 hwdesc->cmdr = 0; 1039 hwdesc->cmdr = 0;
1022 1040
1023 stm32_mdma_dump_hwdesc(chan, hwdesc); 1041 stm32_mdma_dump_hwdesc(chan, &desc->node[0]);
1024 } else { 1042 } else {
1025 /* Setup a LLI transfer */ 1043 /* Setup a LLI transfer */
1026 ctcr |= STM32_MDMA_CTCR_TRGM(STM32_MDMA_LINKED_LIST) | 1044 ctcr |= STM32_MDMA_CTCR_TRGM(STM32_MDMA_LINKED_LIST) |
@@ -1120,7 +1138,7 @@ static void stm32_mdma_start_transfer(struct stm32_mdma_chan *chan)
1120 } 1138 }
1121 1139
1122 chan->desc = to_stm32_mdma_desc(vdesc); 1140 chan->desc = to_stm32_mdma_desc(vdesc);
1123 hwdesc = chan->desc->hwdesc; 1141 hwdesc = chan->desc->node[0].hwdesc;
1124 chan->curr_hwdesc = 0; 1142 chan->curr_hwdesc = 0;
1125 1143
1126 stm32_mdma_write(dmadev, STM32_MDMA_CCR(id), chan->desc->ccr); 1144 stm32_mdma_write(dmadev, STM32_MDMA_CCR(id), chan->desc->ccr);
@@ -1198,7 +1216,7 @@ static int stm32_mdma_resume(struct dma_chan *c)
1198 unsigned long flags; 1216 unsigned long flags;
1199 u32 status, reg; 1217 u32 status, reg;
1200 1218
1201 hwdesc = &chan->desc->hwdesc[chan->curr_hwdesc]; 1219 hwdesc = chan->desc->node[chan->curr_hwdesc].hwdesc;
1202 1220
1203 spin_lock_irqsave(&chan->vchan.lock, flags); 1221 spin_lock_irqsave(&chan->vchan.lock, flags);
1204 1222
@@ -1268,13 +1286,13 @@ static size_t stm32_mdma_desc_residue(struct stm32_mdma_chan *chan,
1268 u32 curr_hwdesc) 1286 u32 curr_hwdesc)
1269{ 1287{
1270 struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan); 1288 struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
1289 struct stm32_mdma_hwdesc *hwdesc = desc->node[0].hwdesc;
1271 u32 cbndtr, residue, modulo, burst_size; 1290 u32 cbndtr, residue, modulo, burst_size;
1272 int i; 1291 int i;
1273 1292
1274 residue = 0; 1293 residue = 0;
1275 for (i = curr_hwdesc + 1; i < desc->count; i++) { 1294 for (i = curr_hwdesc + 1; i < desc->count; i++) {
1276 struct stm32_mdma_hwdesc *hwdesc = &desc->hwdesc[i]; 1295 hwdesc = desc->node[i].hwdesc;
1277
1278 residue += STM32_MDMA_CBNDTR_BNDT(hwdesc->cbndtr); 1296 residue += STM32_MDMA_CBNDTR_BNDT(hwdesc->cbndtr);
1279 } 1297 }
1280 cbndtr = stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id)); 1298 cbndtr = stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id));
@@ -1503,7 +1521,7 @@ static struct dma_chan *stm32_mdma_of_xlate(struct of_phandle_args *dma_spec,
1503 1521
1504 c = dma_get_any_slave_channel(&dmadev->ddev); 1522 c = dma_get_any_slave_channel(&dmadev->ddev);
1505 if (!c) { 1523 if (!c) {
1506 dev_err(mdma2dev(dmadev), "No more channel avalaible\n"); 1524 dev_err(mdma2dev(dmadev), "No more channels available\n");
1507 return NULL; 1525 return NULL;
1508 } 1526 }
1509 1527