aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/sun6i-dma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/sun6i-dma.c')
-rw-r--r--drivers/dma/sun6i-dma.c122
1 files changed, 87 insertions, 35 deletions
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c
index 91292f5513ff..159f1736a16f 100644
--- a/drivers/dma/sun6i-dma.c
+++ b/drivers/dma/sun6i-dma.c
@@ -18,6 +18,7 @@
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/of_dma.h> 20#include <linux/of_dma.h>
21#include <linux/of_device.h>
21#include <linux/platform_device.h> 22#include <linux/platform_device.h>
22#include <linux/reset.h> 23#include <linux/reset.h>
23#include <linux/slab.h> 24#include <linux/slab.h>
@@ -26,24 +27,6 @@
26#include "virt-dma.h" 27#include "virt-dma.h"
27 28
28/* 29/*
29 * There's 16 physical channels that can work in parallel.
30 *
31 * However we have 30 different endpoints for our requests.
32 *
33 * Since the channels are able to handle only an unidirectional
34 * transfer, we need to allocate more virtual channels so that
35 * everyone can grab one channel.
36 *
37 * Some devices can't work in both direction (mostly because it
38 * wouldn't make sense), so we have a bit fewer virtual channels than
39 * 2 channels per endpoints.
40 */
41
42#define NR_MAX_CHANNELS 16
43#define NR_MAX_REQUESTS 30
44#define NR_MAX_VCHANS 53
45
46/*
47 * Common registers 30 * Common registers
48 */ 31 */
49#define DMA_IRQ_EN(x) ((x) * 0x04) 32#define DMA_IRQ_EN(x) ((x) * 0x04)
@@ -60,6 +43,12 @@
60#define DMA_STAT 0x30 43#define DMA_STAT 0x30
61 44
62/* 45/*
46 * sun8i specific registers
47 */
48#define SUN8I_DMA_GATE 0x20
49#define SUN8I_DMA_GATE_ENABLE 0x4
50
51/*
63 * Channels specific registers 52 * Channels specific registers
64 */ 53 */
65#define DMA_CHAN_ENABLE 0x00 54#define DMA_CHAN_ENABLE 0x00
@@ -102,6 +91,19 @@
102#define DRQ_SDRAM 1 91#define DRQ_SDRAM 1
103 92
104/* 93/*
94 * Hardware channels / ports representation
95 *
96 * The hardware is used in several SoCs, with differing numbers
97 * of channels and endpoints. This structure ties those numbers
98 * to a certain compatible string.
99 */
100struct sun6i_dma_config {
101 u32 nr_max_channels;
102 u32 nr_max_requests;
103 u32 nr_max_vchans;
104};
105
106/*
105 * Hardware representation of the LLI 107 * Hardware representation of the LLI
106 * 108 *
107 * The hardware will be fed the physical address of this structure, 109 * The hardware will be fed the physical address of this structure,
@@ -159,6 +161,7 @@ struct sun6i_dma_dev {
159 struct dma_pool *pool; 161 struct dma_pool *pool;
160 struct sun6i_pchan *pchans; 162 struct sun6i_pchan *pchans;
161 struct sun6i_vchan *vchans; 163 struct sun6i_vchan *vchans;
164 const struct sun6i_dma_config *cfg;
162}; 165};
163 166
164static struct device *chan2dev(struct dma_chan *chan) 167static struct device *chan2dev(struct dma_chan *chan)
@@ -426,6 +429,7 @@ static int sun6i_dma_start_desc(struct sun6i_vchan *vchan)
426static void sun6i_dma_tasklet(unsigned long data) 429static void sun6i_dma_tasklet(unsigned long data)
427{ 430{
428 struct sun6i_dma_dev *sdev = (struct sun6i_dma_dev *)data; 431 struct sun6i_dma_dev *sdev = (struct sun6i_dma_dev *)data;
432 const struct sun6i_dma_config *cfg = sdev->cfg;
429 struct sun6i_vchan *vchan; 433 struct sun6i_vchan *vchan;
430 struct sun6i_pchan *pchan; 434 struct sun6i_pchan *pchan;
431 unsigned int pchan_alloc = 0; 435 unsigned int pchan_alloc = 0;
@@ -453,7 +457,7 @@ static void sun6i_dma_tasklet(unsigned long data)
453 } 457 }
454 458
455 spin_lock_irq(&sdev->lock); 459 spin_lock_irq(&sdev->lock);
456 for (pchan_idx = 0; pchan_idx < NR_MAX_CHANNELS; pchan_idx++) { 460 for (pchan_idx = 0; pchan_idx < cfg->nr_max_channels; pchan_idx++) {
457 pchan = &sdev->pchans[pchan_idx]; 461 pchan = &sdev->pchans[pchan_idx];
458 462
459 if (pchan->vchan || list_empty(&sdev->pending)) 463 if (pchan->vchan || list_empty(&sdev->pending))
@@ -474,7 +478,7 @@ static void sun6i_dma_tasklet(unsigned long data)
474 } 478 }
475 spin_unlock_irq(&sdev->lock); 479 spin_unlock_irq(&sdev->lock);
476 480
477 for (pchan_idx = 0; pchan_idx < NR_MAX_CHANNELS; pchan_idx++) { 481 for (pchan_idx = 0; pchan_idx < cfg->nr_max_channels; pchan_idx++) {
478 if (!(pchan_alloc & BIT(pchan_idx))) 482 if (!(pchan_alloc & BIT(pchan_idx)))
479 continue; 483 continue;
480 484
@@ -496,7 +500,7 @@ static irqreturn_t sun6i_dma_interrupt(int irq, void *dev_id)
496 int i, j, ret = IRQ_NONE; 500 int i, j, ret = IRQ_NONE;
497 u32 status; 501 u32 status;
498 502
499 for (i = 0; i < 2; i++) { 503 for (i = 0; i < sdev->cfg->nr_max_channels / DMA_IRQ_CHAN_NR; i++) {
500 status = readl(sdev->base + DMA_IRQ_STAT(i)); 504 status = readl(sdev->base + DMA_IRQ_STAT(i));
501 if (!status) 505 if (!status)
502 continue; 506 continue;
@@ -506,7 +510,7 @@ static irqreturn_t sun6i_dma_interrupt(int irq, void *dev_id)
506 510
507 writel(status, sdev->base + DMA_IRQ_STAT(i)); 511 writel(status, sdev->base + DMA_IRQ_STAT(i));
508 512
509 for (j = 0; (j < 8) && status; j++) { 513 for (j = 0; (j < DMA_IRQ_CHAN_NR) && status; j++) {
510 if (status & DMA_IRQ_QUEUE) { 514 if (status & DMA_IRQ_QUEUE) {
511 pchan = sdev->pchans + j; 515 pchan = sdev->pchans + j;
512 vchan = pchan->vchan; 516 vchan = pchan->vchan;
@@ -519,7 +523,7 @@ static irqreturn_t sun6i_dma_interrupt(int irq, void *dev_id)
519 } 523 }
520 } 524 }
521 525
522 status = status >> 4; 526 status = status >> DMA_IRQ_CHAN_WIDTH;
523 } 527 }
524 528
525 if (!atomic_read(&sdev->tasklet_shutdown)) 529 if (!atomic_read(&sdev->tasklet_shutdown))
@@ -815,7 +819,7 @@ static struct dma_chan *sun6i_dma_of_xlate(struct of_phandle_args *dma_spec,
815 struct dma_chan *chan; 819 struct dma_chan *chan;
816 u8 port = dma_spec->args[0]; 820 u8 port = dma_spec->args[0];
817 821
818 if (port > NR_MAX_REQUESTS) 822 if (port > sdev->cfg->nr_max_requests)
819 return NULL; 823 return NULL;
820 824
821 chan = dma_get_any_slave_channel(&sdev->slave); 825 chan = dma_get_any_slave_channel(&sdev->slave);
@@ -848,7 +852,7 @@ static inline void sun6i_dma_free(struct sun6i_dma_dev *sdev)
848{ 852{
849 int i; 853 int i;
850 854
851 for (i = 0; i < NR_MAX_VCHANS; i++) { 855 for (i = 0; i < sdev->cfg->nr_max_vchans; i++) {
852 struct sun6i_vchan *vchan = &sdev->vchans[i]; 856 struct sun6i_vchan *vchan = &sdev->vchans[i];
853 857
854 list_del(&vchan->vc.chan.device_node); 858 list_del(&vchan->vc.chan.device_node);
@@ -856,8 +860,48 @@ static inline void sun6i_dma_free(struct sun6i_dma_dev *sdev)
856 } 860 }
857} 861}
858 862
863/*
864 * For A31:
865 *
866 * There's 16 physical channels that can work in parallel.
867 *
868 * However we have 30 different endpoints for our requests.
869 *
870 * Since the channels are able to handle only an unidirectional
871 * transfer, we need to allocate more virtual channels so that
872 * everyone can grab one channel.
873 *
874 * Some devices can't work in both direction (mostly because it
875 * wouldn't make sense), so we have a bit fewer virtual channels than
876 * 2 channels per endpoints.
877 */
878
879static struct sun6i_dma_config sun6i_a31_dma_cfg = {
880 .nr_max_channels = 16,
881 .nr_max_requests = 30,
882 .nr_max_vchans = 53,
883};
884
885/*
886 * The A23 only has 8 physical channels, a maximum DRQ port id of 24,
887 * and a total of 37 usable source and destination endpoints.
888 */
889
890static struct sun6i_dma_config sun8i_a23_dma_cfg = {
891 .nr_max_channels = 8,
892 .nr_max_requests = 24,
893 .nr_max_vchans = 37,
894};
895
896static struct of_device_id sun6i_dma_match[] = {
897 { .compatible = "allwinner,sun6i-a31-dma", .data = &sun6i_a31_dma_cfg },
898 { .compatible = "allwinner,sun8i-a23-dma", .data = &sun8i_a23_dma_cfg },
899 { /* sentinel */ }
900};
901
859static int sun6i_dma_probe(struct platform_device *pdev) 902static int sun6i_dma_probe(struct platform_device *pdev)
860{ 903{
904 const struct of_device_id *device;
861 struct sun6i_dma_dev *sdc; 905 struct sun6i_dma_dev *sdc;
862 struct resource *res; 906 struct resource *res;
863 int ret, i; 907 int ret, i;
@@ -866,6 +910,11 @@ static int sun6i_dma_probe(struct platform_device *pdev)
866 if (!sdc) 910 if (!sdc)
867 return -ENOMEM; 911 return -ENOMEM;
868 912
913 device = of_match_device(sun6i_dma_match, &pdev->dev);
914 if (!device)
915 return -ENODEV;
916 sdc->cfg = device->data;
917
869 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 918 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
870 sdc->base = devm_ioremap_resource(&pdev->dev, res); 919 sdc->base = devm_ioremap_resource(&pdev->dev, res);
871 if (IS_ERR(sdc->base)) 920 if (IS_ERR(sdc->base))
@@ -912,31 +961,30 @@ static int sun6i_dma_probe(struct platform_device *pdev)
912 sdc->slave.device_prep_slave_sg = sun6i_dma_prep_slave_sg; 961 sdc->slave.device_prep_slave_sg = sun6i_dma_prep_slave_sg;
913 sdc->slave.device_prep_dma_memcpy = sun6i_dma_prep_dma_memcpy; 962 sdc->slave.device_prep_dma_memcpy = sun6i_dma_prep_dma_memcpy;
914 sdc->slave.device_control = sun6i_dma_control; 963 sdc->slave.device_control = sun6i_dma_control;
915 sdc->slave.chancnt = NR_MAX_VCHANS;
916 sdc->slave.copy_align = 4; 964 sdc->slave.copy_align = 4;
917 965
918 sdc->slave.dev = &pdev->dev; 966 sdc->slave.dev = &pdev->dev;
919 967
920 sdc->pchans = devm_kcalloc(&pdev->dev, NR_MAX_CHANNELS, 968 sdc->pchans = devm_kcalloc(&pdev->dev, sdc->cfg->nr_max_channels,
921 sizeof(struct sun6i_pchan), GFP_KERNEL); 969 sizeof(struct sun6i_pchan), GFP_KERNEL);
922 if (!sdc->pchans) 970 if (!sdc->pchans)
923 return -ENOMEM; 971 return -ENOMEM;
924 972
925 sdc->vchans = devm_kcalloc(&pdev->dev, NR_MAX_VCHANS, 973 sdc->vchans = devm_kcalloc(&pdev->dev, sdc->cfg->nr_max_vchans,
926 sizeof(struct sun6i_vchan), GFP_KERNEL); 974 sizeof(struct sun6i_vchan), GFP_KERNEL);
927 if (!sdc->vchans) 975 if (!sdc->vchans)
928 return -ENOMEM; 976 return -ENOMEM;
929 977
930 tasklet_init(&sdc->task, sun6i_dma_tasklet, (unsigned long)sdc); 978 tasklet_init(&sdc->task, sun6i_dma_tasklet, (unsigned long)sdc);
931 979
932 for (i = 0; i < NR_MAX_CHANNELS; i++) { 980 for (i = 0; i < sdc->cfg->nr_max_channels; i++) {
933 struct sun6i_pchan *pchan = &sdc->pchans[i]; 981 struct sun6i_pchan *pchan = &sdc->pchans[i];
934 982
935 pchan->idx = i; 983 pchan->idx = i;
936 pchan->base = sdc->base + 0x100 + i * 0x40; 984 pchan->base = sdc->base + 0x100 + i * 0x40;
937 } 985 }
938 986
939 for (i = 0; i < NR_MAX_VCHANS; i++) { 987 for (i = 0; i < sdc->cfg->nr_max_vchans; i++) {
940 struct sun6i_vchan *vchan = &sdc->vchans[i]; 988 struct sun6i_vchan *vchan = &sdc->vchans[i];
941 989
942 INIT_LIST_HEAD(&vchan->node); 990 INIT_LIST_HEAD(&vchan->node);
@@ -976,6 +1024,15 @@ static int sun6i_dma_probe(struct platform_device *pdev)
976 goto err_dma_unregister; 1024 goto err_dma_unregister;
977 } 1025 }
978 1026
1027 /*
1028 * sun8i variant requires us to toggle a dma gating register,
1029 * as seen in Allwinner's SDK. This register is not documented
1030 * in the A23 user manual.
1031 */
1032 if (of_device_is_compatible(pdev->dev.of_node,
1033 "allwinner,sun8i-a23-dma"))
1034 writel(SUN8I_DMA_GATE_ENABLE, sdc->base + SUN8I_DMA_GATE);
1035
979 return 0; 1036 return 0;
980 1037
981err_dma_unregister: 1038err_dma_unregister:
@@ -1008,11 +1065,6 @@ static int sun6i_dma_remove(struct platform_device *pdev)
1008 return 0; 1065 return 0;
1009} 1066}
1010 1067
1011static struct of_device_id sun6i_dma_match[] = {
1012 { .compatible = "allwinner,sun6i-a31-dma" },
1013 { /* sentinel */ }
1014};
1015
1016static struct platform_driver sun6i_dma_driver = { 1068static struct platform_driver sun6i_dma_driver = {
1017 .probe = sun6i_dma_probe, 1069 .probe = sun6i_dma_probe,
1018 .remove = sun6i_dma_remove, 1070 .remove = sun6i_dma_remove,