diff options
author | Guennadi Liakhovetski <g.liakhovetski@gmx.de> | 2010-02-11 11:50:10 -0500 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2010-03-01 21:09:02 -0500 |
commit | 027811b9b81a6b3ae5aa20c3302897bee9dcf09e (patch) | |
tree | cde9b764d10d7ba9d0a41d9c780bf9032214dcae /drivers/dma | |
parent | 47a4dc26eeb89a3746f9b1e2092602b40469640a (diff) |
dmaengine: shdma: convert to platform device resources
The shdma dmaengine driver currently uses numerous macros to support various
platforms, selected by ifdef's. Convert it to use platform device resources and
lists of channel descriptors to specify register locations, interrupt numbers
and other system-specific configuration variants. Unavoidably, we have to
simultaneously convert all shdma users to provide those resources.
Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/shdma.c | 318 | ||||
-rw-r--r-- | drivers/dma/shdma.h | 6 |
2 files changed, 194 insertions, 130 deletions
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c index 77311698c046..ab12fa5a1296 100644 --- a/drivers/dma/shdma.c +++ b/drivers/dma/shdma.c | |||
@@ -53,15 +53,24 @@ static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SHDMA_SLAVE_NUMBER)]; | |||
53 | 53 | ||
54 | static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all); | 54 | static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all); |
55 | 55 | ||
56 | #define SH_DMAC_CHAN_BASE(id) (dma_base_addr[id]) | ||
57 | static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) | 56 | static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) |
58 | { | 57 | { |
59 | ctrl_outl(data, SH_DMAC_CHAN_BASE(sh_dc->id) + reg); | 58 | __raw_writel(data, sh_dc->base + reg / sizeof(u32)); |
60 | } | 59 | } |
61 | 60 | ||
62 | static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) | 61 | static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) |
63 | { | 62 | { |
64 | return ctrl_inl(SH_DMAC_CHAN_BASE(sh_dc->id) + reg); | 63 | return __raw_readl(sh_dc->base + reg / sizeof(u32)); |
64 | } | ||
65 | |||
66 | static u16 dmaor_read(struct sh_dmae_device *shdev) | ||
67 | { | ||
68 | return __raw_readw(shdev->chan_reg + DMAOR / sizeof(u32)); | ||
69 | } | ||
70 | |||
71 | static void dmaor_write(struct sh_dmae_device *shdev, u16 data) | ||
72 | { | ||
73 | __raw_writew(data, shdev->chan_reg + DMAOR / sizeof(u32)); | ||
65 | } | 74 | } |
66 | 75 | ||
67 | /* | 76 | /* |
@@ -69,23 +78,22 @@ static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) | |||
69 | * | 78 | * |
70 | * SH7780 has two DMAOR register | 79 | * SH7780 has two DMAOR register |
71 | */ | 80 | */ |
72 | static void sh_dmae_ctl_stop(int id) | 81 | static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev) |
73 | { | 82 | { |
74 | unsigned short dmaor = dmaor_read_reg(id); | 83 | unsigned short dmaor = dmaor_read(shdev); |
75 | 84 | ||
76 | dmaor &= ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME); | 85 | dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME)); |
77 | dmaor_write_reg(id, dmaor); | ||
78 | } | 86 | } |
79 | 87 | ||
80 | static int sh_dmae_rst(int id) | 88 | static int sh_dmae_rst(struct sh_dmae_device *shdev) |
81 | { | 89 | { |
82 | unsigned short dmaor; | 90 | unsigned short dmaor; |
83 | 91 | ||
84 | sh_dmae_ctl_stop(id); | 92 | sh_dmae_ctl_stop(shdev); |
85 | dmaor = dmaor_read_reg(id) | DMAOR_INIT; | 93 | dmaor = dmaor_read(shdev) | DMAOR_INIT; |
86 | 94 | ||
87 | dmaor_write_reg(id, dmaor); | 95 | dmaor_write(shdev, dmaor); |
88 | if (dmaor_read_reg(id) & (DMAOR_AE | DMAOR_NMIF)) { | 96 | if (dmaor_read(shdev) & (DMAOR_AE | DMAOR_NMIF)) { |
89 | pr_warning("dma-sh: Can't initialize DMAOR.\n"); | 97 | pr_warning("dma-sh: Can't initialize DMAOR.\n"); |
90 | return -EINVAL; | 98 | return -EINVAL; |
91 | } | 99 | } |
@@ -153,31 +161,20 @@ static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) | |||
153 | return 0; | 161 | return 0; |
154 | } | 162 | } |
155 | 163 | ||
156 | #define DMARS_SHIFT 8 | ||
157 | #define DMARS_CHAN_MSK 0x01 | ||
158 | static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) | 164 | static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) |
159 | { | 165 | { |
160 | u32 addr; | 166 | struct sh_dmae_device *shdev = container_of(sh_chan->common.device, |
161 | int shift = 0; | 167 | struct sh_dmae_device, common); |
168 | struct sh_dmae_pdata *pdata = shdev->pdata; | ||
169 | struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id]; | ||
170 | u16 __iomem *addr = shdev->dmars + chan_pdata->dmars / sizeof(u16); | ||
171 | int shift = chan_pdata->dmars_bit; | ||
162 | 172 | ||
163 | if (dmae_is_busy(sh_chan)) | 173 | if (dmae_is_busy(sh_chan)) |
164 | return -EBUSY; | 174 | return -EBUSY; |
165 | 175 | ||
166 | if (sh_chan->id & DMARS_CHAN_MSK) | 176 | __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift), |
167 | shift = DMARS_SHIFT; | 177 | addr); |
168 | |||
169 | if (sh_chan->id < 6) | ||
170 | /* DMA0RS0 - DMA0RS2 */ | ||
171 | addr = SH_DMARS_BASE0 + (sh_chan->id / 2) * 4; | ||
172 | #ifdef SH_DMARS_BASE1 | ||
173 | else if (sh_chan->id < 12) | ||
174 | /* DMA1RS0 - DMA1RS2 */ | ||
175 | addr = SH_DMARS_BASE1 + ((sh_chan->id - 6) / 2) * 4; | ||
176 | #endif | ||
177 | else | ||
178 | return -EINVAL; | ||
179 | |||
180 | ctrl_outw((val << shift) | (ctrl_inw(addr) & (0xFF00 >> shift)), addr); | ||
181 | 178 | ||
182 | return 0; | 179 | return 0; |
183 | } | 180 | } |
@@ -251,15 +248,15 @@ static struct sh_dmae_slave_config *sh_dmae_find_slave( | |||
251 | struct dma_device *dma_dev = sh_chan->common.device; | 248 | struct dma_device *dma_dev = sh_chan->common.device; |
252 | struct sh_dmae_device *shdev = container_of(dma_dev, | 249 | struct sh_dmae_device *shdev = container_of(dma_dev, |
253 | struct sh_dmae_device, common); | 250 | struct sh_dmae_device, common); |
254 | struct sh_dmae_pdata *pdata = &shdev->pdata; | 251 | struct sh_dmae_pdata *pdata = shdev->pdata; |
255 | int i; | 252 | int i; |
256 | 253 | ||
257 | if ((unsigned)slave_id >= SHDMA_SLAVE_NUMBER) | 254 | if ((unsigned)slave_id >= SHDMA_SLAVE_NUMBER) |
258 | return NULL; | 255 | return NULL; |
259 | 256 | ||
260 | for (i = 0; i < pdata->config_num; i++) | 257 | for (i = 0; i < pdata->slave_num; i++) |
261 | if (pdata->config[i].slave_id == slave_id) | 258 | if (pdata->slave[i].slave_id == slave_id) |
262 | return pdata->config + i; | 259 | return pdata->slave + i; |
263 | 260 | ||
264 | return NULL; | 261 | return NULL; |
265 | } | 262 | } |
@@ -757,9 +754,7 @@ static irqreturn_t sh_dmae_err(int irq, void *data) | |||
757 | int i; | 754 | int i; |
758 | 755 | ||
759 | /* halt the dma controller */ | 756 | /* halt the dma controller */ |
760 | sh_dmae_ctl_stop(0); | 757 | sh_dmae_ctl_stop(shdev); |
761 | if (shdev->pdata.mode & SHDMA_DMAOR1) | ||
762 | sh_dmae_ctl_stop(1); | ||
763 | 758 | ||
764 | /* We cannot detect, which channel caused the error, have to reset all */ | 759 | /* We cannot detect, which channel caused the error, have to reset all */ |
765 | for (i = 0; i < MAX_DMA_CHANNELS; i++) { | 760 | for (i = 0; i < MAX_DMA_CHANNELS; i++) { |
@@ -778,9 +773,7 @@ static irqreturn_t sh_dmae_err(int irq, void *data) | |||
778 | list_splice_init(&sh_chan->ld_queue, &sh_chan->ld_free); | 773 | list_splice_init(&sh_chan->ld_queue, &sh_chan->ld_free); |
779 | } | 774 | } |
780 | } | 775 | } |
781 | sh_dmae_rst(0); | 776 | sh_dmae_rst(shdev); |
782 | if (shdev->pdata.mode & SHDMA_DMAOR1) | ||
783 | sh_dmae_rst(1); | ||
784 | 777 | ||
785 | return IRQ_HANDLED; | 778 | return IRQ_HANDLED; |
786 | } | 779 | } |
@@ -813,19 +806,12 @@ static void dmae_do_tasklet(unsigned long data) | |||
813 | sh_dmae_chan_ld_cleanup(sh_chan, false); | 806 | sh_dmae_chan_ld_cleanup(sh_chan, false); |
814 | } | 807 | } |
815 | 808 | ||
816 | static unsigned int get_dmae_irq(unsigned int id) | 809 | static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id, |
817 | { | 810 | int irq, unsigned long flags) |
818 | unsigned int irq = 0; | ||
819 | if (id < ARRAY_SIZE(dmte_irq_map)) | ||
820 | irq = dmte_irq_map[id]; | ||
821 | return irq; | ||
822 | } | ||
823 | |||
824 | static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id) | ||
825 | { | 811 | { |
826 | int err; | 812 | int err; |
827 | unsigned int irq = get_dmae_irq(id); | 813 | struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id]; |
828 | unsigned long irqflags = IRQF_DISABLED; | 814 | struct platform_device *pdev = to_platform_device(shdev->common.dev); |
829 | struct sh_dmae_chan *new_sh_chan; | 815 | struct sh_dmae_chan *new_sh_chan; |
830 | 816 | ||
831 | /* alloc channel */ | 817 | /* alloc channel */ |
@@ -838,6 +824,8 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id) | |||
838 | 824 | ||
839 | new_sh_chan->dev = shdev->common.dev; | 825 | new_sh_chan->dev = shdev->common.dev; |
840 | new_sh_chan->id = id; | 826 | new_sh_chan->id = id; |
827 | new_sh_chan->irq = irq; | ||
828 | new_sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32); | ||
841 | 829 | ||
842 | /* Init DMA tasklet */ | 830 | /* Init DMA tasklet */ |
843 | tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet, | 831 | tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet, |
@@ -860,21 +848,15 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id) | |||
860 | &shdev->common.channels); | 848 | &shdev->common.channels); |
861 | shdev->common.chancnt++; | 849 | shdev->common.chancnt++; |
862 | 850 | ||
863 | if (shdev->pdata.mode & SHDMA_MIX_IRQ) { | 851 | if (pdev->id >= 0) |
864 | irqflags = IRQF_SHARED; | 852 | snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id), |
865 | #if defined(DMTE6_IRQ) | 853 | "sh-dmae%d.%d", pdev->id, new_sh_chan->id); |
866 | if (irq >= DMTE6_IRQ) | 854 | else |
867 | irq = DMTE6_IRQ; | 855 | snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id), |
868 | else | 856 | "sh-dma%d", new_sh_chan->id); |
869 | #endif | ||
870 | irq = DMTE0_IRQ; | ||
871 | } | ||
872 | |||
873 | snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id), | ||
874 | "sh-dmae%d", new_sh_chan->id); | ||
875 | 857 | ||
876 | /* set up channel irq */ | 858 | /* set up channel irq */ |
877 | err = request_irq(irq, &sh_dmae_interrupt, irqflags, | 859 | err = request_irq(irq, &sh_dmae_interrupt, flags, |
878 | new_sh_chan->dev_id, new_sh_chan); | 860 | new_sh_chan->dev_id, new_sh_chan); |
879 | if (err) { | 861 | if (err) { |
880 | dev_err(shdev->common.dev, "DMA channel %d request_irq error " | 862 | dev_err(shdev->common.dev, "DMA channel %d request_irq error " |
@@ -898,12 +880,12 @@ static void sh_dmae_chan_remove(struct sh_dmae_device *shdev) | |||
898 | 880 | ||
899 | for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) { | 881 | for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) { |
900 | if (shdev->chan[i]) { | 882 | if (shdev->chan[i]) { |
901 | struct sh_dmae_chan *shchan = shdev->chan[i]; | 883 | struct sh_dmae_chan *sh_chan = shdev->chan[i]; |
902 | if (!(shdev->pdata.mode & SHDMA_MIX_IRQ)) | 884 | |
903 | free_irq(dmte_irq_map[i], shchan); | 885 | free_irq(sh_chan->irq, sh_chan); |
904 | 886 | ||
905 | list_del(&shchan->common.device_node); | 887 | list_del(&sh_chan->common.device_node); |
906 | kfree(shchan); | 888 | kfree(sh_chan); |
907 | shdev->chan[i] = NULL; | 889 | shdev->chan[i] = NULL; |
908 | } | 890 | } |
909 | } | 891 | } |
@@ -912,47 +894,81 @@ static void sh_dmae_chan_remove(struct sh_dmae_device *shdev) | |||
912 | 894 | ||
913 | static int __init sh_dmae_probe(struct platform_device *pdev) | 895 | static int __init sh_dmae_probe(struct platform_device *pdev) |
914 | { | 896 | { |
915 | int err = 0, cnt, ecnt; | 897 | struct sh_dmae_pdata *pdata = pdev->dev.platform_data; |
916 | unsigned long irqflags = IRQF_DISABLED; | 898 | unsigned long irqflags = IRQF_DISABLED, |
917 | #if defined(CONFIG_CPU_SH4) | 899 | chan_flag[MAX_DMA_CHANNELS] = {}; |
918 | int eirq[] = { DMAE0_IRQ, | 900 | int errirq, chan_irq[MAX_DMA_CHANNELS]; |
919 | #if defined(DMAE1_IRQ) | 901 | int err, i, irq_cnt = 0, irqres = 0; |
920 | DMAE1_IRQ | ||
921 | #endif | ||
922 | }; | ||
923 | #endif | ||
924 | struct sh_dmae_device *shdev; | 902 | struct sh_dmae_device *shdev; |
903 | struct resource *chan, *dmars, *errirq_res, *chanirq_res; | ||
925 | 904 | ||
926 | /* get platform data */ | 905 | /* get platform data */ |
927 | if (!pdev->dev.platform_data) | 906 | if (!pdata || !pdata->channel_num) |
928 | return -ENODEV; | 907 | return -ENODEV; |
929 | 908 | ||
909 | chan = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
910 | /* DMARS area is optional, if absent, this controller cannot do slave DMA */ | ||
911 | dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
912 | /* | ||
913 | * IRQ resources: | ||
914 | * 1. there always must be at least one IRQ IO-resource. On SH4 it is | ||
915 | * the error IRQ, in which case it is the only IRQ in this resource: | ||
916 | * start == end. If it is the only IRQ resource, all channels also | ||
917 | * use the same IRQ. | ||
918 | * 2. DMA channel IRQ resources can be specified one per resource or in | ||
919 | * ranges (start != end) | ||
920 | * 3. iff all events (channels and, optionally, error) on this | ||
921 | * controller use the same IRQ, only one IRQ resource can be | ||
922 | * specified, otherwise there must be one IRQ per channel, even if | ||
923 | * some of them are equal | ||
924 | * 4. if all IRQs on this controller are equal or if some specific IRQs | ||
925 | * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be | ||
926 | * requested with the IRQF_SHARED flag | ||
927 | */ | ||
928 | errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | ||
929 | if (!chan || !errirq_res) | ||
930 | return -ENODEV; | ||
931 | |||
932 | if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) { | ||
933 | dev_err(&pdev->dev, "DMAC register region already claimed\n"); | ||
934 | return -EBUSY; | ||
935 | } | ||
936 | |||
937 | if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) { | ||
938 | dev_err(&pdev->dev, "DMAC DMARS region already claimed\n"); | ||
939 | err = -EBUSY; | ||
940 | goto ermrdmars; | ||
941 | } | ||
942 | |||
943 | err = -ENOMEM; | ||
930 | shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL); | 944 | shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL); |
931 | if (!shdev) { | 945 | if (!shdev) { |
932 | dev_err(&pdev->dev, "No enough memory\n"); | 946 | dev_err(&pdev->dev, "Not enough memory\n"); |
933 | return -ENOMEM; | 947 | goto ealloc; |
948 | } | ||
949 | |||
950 | shdev->chan_reg = ioremap(chan->start, resource_size(chan)); | ||
951 | if (!shdev->chan_reg) | ||
952 | goto emapchan; | ||
953 | if (dmars) { | ||
954 | shdev->dmars = ioremap(dmars->start, resource_size(dmars)); | ||
955 | if (!shdev->dmars) | ||
956 | goto emapdmars; | ||
934 | } | 957 | } |
935 | 958 | ||
936 | /* platform data */ | 959 | /* platform data */ |
937 | memcpy(&shdev->pdata, pdev->dev.platform_data, | 960 | shdev->pdata = pdata; |
938 | sizeof(struct sh_dmae_pdata)); | ||
939 | 961 | ||
940 | /* reset dma controller */ | 962 | /* reset dma controller */ |
941 | err = sh_dmae_rst(0); | 963 | err = sh_dmae_rst(shdev); |
942 | if (err) | 964 | if (err) |
943 | goto rst_err; | 965 | goto rst_err; |
944 | 966 | ||
945 | /* SH7780/85/23 has DMAOR1 */ | ||
946 | if (shdev->pdata.mode & SHDMA_DMAOR1) { | ||
947 | err = sh_dmae_rst(1); | ||
948 | if (err) | ||
949 | goto rst_err; | ||
950 | } | ||
951 | |||
952 | INIT_LIST_HEAD(&shdev->common.channels); | 967 | INIT_LIST_HEAD(&shdev->common.channels); |
953 | 968 | ||
954 | dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask); | 969 | dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask); |
955 | dma_cap_set(DMA_SLAVE, shdev->common.cap_mask); | 970 | if (dmars) |
971 | dma_cap_set(DMA_SLAVE, shdev->common.cap_mask); | ||
956 | 972 | ||
957 | shdev->common.device_alloc_chan_resources | 973 | shdev->common.device_alloc_chan_resources |
958 | = sh_dmae_alloc_chan_resources; | 974 | = sh_dmae_alloc_chan_resources; |
@@ -970,30 +986,63 @@ static int __init sh_dmae_probe(struct platform_device *pdev) | |||
970 | shdev->common.copy_align = 5; | 986 | shdev->common.copy_align = 5; |
971 | 987 | ||
972 | #if defined(CONFIG_CPU_SH4) | 988 | #if defined(CONFIG_CPU_SH4) |
973 | /* Non Mix IRQ mode SH7722/SH7730 etc... */ | 989 | chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1); |
974 | if (shdev->pdata.mode & SHDMA_MIX_IRQ) { | 990 | |
991 | if (!chanirq_res) | ||
992 | chanirq_res = errirq_res; | ||
993 | else | ||
994 | irqres++; | ||
995 | |||
996 | if (chanirq_res == errirq_res || | ||
997 | (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE) | ||
975 | irqflags = IRQF_SHARED; | 998 | irqflags = IRQF_SHARED; |
976 | eirq[0] = DMTE0_IRQ; | 999 | |
977 | #if defined(DMTE6_IRQ) && defined(DMAE1_IRQ) | 1000 | errirq = errirq_res->start; |
978 | eirq[1] = DMTE6_IRQ; | 1001 | |
979 | #endif | 1002 | err = request_irq(errirq, sh_dmae_err, irqflags, |
1003 | "DMAC Address Error", shdev); | ||
1004 | if (err) { | ||
1005 | dev_err(&pdev->dev, | ||
1006 | "DMA failed requesting irq #%d, error %d\n", | ||
1007 | errirq, err); | ||
1008 | goto eirq_err; | ||
980 | } | 1009 | } |
981 | 1010 | ||
982 | for (ecnt = 0 ; ecnt < ARRAY_SIZE(eirq); ecnt++) { | 1011 | #else |
983 | err = request_irq(eirq[ecnt], sh_dmae_err, irqflags, | 1012 | chanirq_res = errirq_res; |
984 | "DMAC Address Error", shdev); | 1013 | #endif /* CONFIG_CPU_SH4 */ |
985 | if (err) { | 1014 | |
986 | dev_err(&pdev->dev, "DMA device request_irq" | 1015 | if (chanirq_res->start == chanirq_res->end && |
987 | "error (irq %d) with return %d\n", | 1016 | !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) { |
988 | eirq[ecnt], err); | 1017 | /* Special case - all multiplexed */ |
989 | goto eirq_err; | 1018 | for (; irq_cnt < pdata->channel_num; irq_cnt++) { |
1019 | chan_irq[irq_cnt] = chanirq_res->start; | ||
1020 | chan_flag[irq_cnt] = IRQF_SHARED; | ||
990 | } | 1021 | } |
1022 | } else { | ||
1023 | do { | ||
1024 | for (i = chanirq_res->start; i <= chanirq_res->end; i++) { | ||
1025 | if ((errirq_res->flags & IORESOURCE_BITS) == | ||
1026 | IORESOURCE_IRQ_SHAREABLE) | ||
1027 | chan_flag[irq_cnt] = IRQF_SHARED; | ||
1028 | else | ||
1029 | chan_flag[irq_cnt] = IRQF_DISABLED; | ||
1030 | dev_dbg(&pdev->dev, | ||
1031 | "Found IRQ %d for channel %d\n", | ||
1032 | i, irq_cnt); | ||
1033 | chan_irq[irq_cnt++] = i; | ||
1034 | } | ||
1035 | chanirq_res = platform_get_resource(pdev, | ||
1036 | IORESOURCE_IRQ, ++irqres); | ||
1037 | } while (irq_cnt < pdata->channel_num && chanirq_res); | ||
991 | } | 1038 | } |
992 | #endif /* CONFIG_CPU_SH4 */ | 1039 | |
1040 | if (irq_cnt < pdata->channel_num) | ||
1041 | goto eirqres; | ||
993 | 1042 | ||
994 | /* Create DMA Channel */ | 1043 | /* Create DMA Channel */ |
995 | for (cnt = 0 ; cnt < MAX_DMA_CHANNELS ; cnt++) { | 1044 | for (i = 0; i < pdata->channel_num; i++) { |
996 | err = sh_dmae_chan_probe(shdev, cnt); | 1045 | err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]); |
997 | if (err) | 1046 | if (err) |
998 | goto chan_probe_err; | 1047 | goto chan_probe_err; |
999 | } | 1048 | } |
@@ -1005,13 +1054,23 @@ static int __init sh_dmae_probe(struct platform_device *pdev) | |||
1005 | 1054 | ||
1006 | chan_probe_err: | 1055 | chan_probe_err: |
1007 | sh_dmae_chan_remove(shdev); | 1056 | sh_dmae_chan_remove(shdev); |
1008 | 1057 | eirqres: | |
1058 | #if defined(CONFIG_CPU_SH4) | ||
1059 | free_irq(errirq, shdev); | ||
1009 | eirq_err: | 1060 | eirq_err: |
1010 | for (ecnt-- ; ecnt >= 0; ecnt--) | 1061 | #endif |
1011 | free_irq(eirq[ecnt], shdev); | ||
1012 | |||
1013 | rst_err: | 1062 | rst_err: |
1063 | if (dmars) | ||
1064 | iounmap(shdev->dmars); | ||
1065 | emapdmars: | ||
1066 | iounmap(shdev->chan_reg); | ||
1067 | emapchan: | ||
1014 | kfree(shdev); | 1068 | kfree(shdev); |
1069 | ealloc: | ||
1070 | if (dmars) | ||
1071 | release_mem_region(dmars->start, resource_size(dmars)); | ||
1072 | ermrdmars: | ||
1073 | release_mem_region(chan->start, resource_size(chan)); | ||
1015 | 1074 | ||
1016 | return err; | 1075 | return err; |
1017 | } | 1076 | } |
@@ -1019,36 +1078,37 @@ rst_err: | |||
1019 | static int __exit sh_dmae_remove(struct platform_device *pdev) | 1078 | static int __exit sh_dmae_remove(struct platform_device *pdev) |
1020 | { | 1079 | { |
1021 | struct sh_dmae_device *shdev = platform_get_drvdata(pdev); | 1080 | struct sh_dmae_device *shdev = platform_get_drvdata(pdev); |
1081 | struct resource *res; | ||
1082 | int errirq = platform_get_irq(pdev, 0); | ||
1022 | 1083 | ||
1023 | dma_async_device_unregister(&shdev->common); | 1084 | dma_async_device_unregister(&shdev->common); |
1024 | 1085 | ||
1025 | if (shdev->pdata.mode & SHDMA_MIX_IRQ) { | 1086 | if (errirq > 0) |
1026 | free_irq(DMTE0_IRQ, shdev); | 1087 | free_irq(errirq, shdev); |
1027 | #if defined(DMTE6_IRQ) | ||
1028 | free_irq(DMTE6_IRQ, shdev); | ||
1029 | #endif | ||
1030 | } | ||
1031 | 1088 | ||
1032 | /* channel data remove */ | 1089 | /* channel data remove */ |
1033 | sh_dmae_chan_remove(shdev); | 1090 | sh_dmae_chan_remove(shdev); |
1034 | 1091 | ||
1035 | if (!(shdev->pdata.mode & SHDMA_MIX_IRQ)) { | 1092 | if (shdev->dmars) |
1036 | free_irq(DMAE0_IRQ, shdev); | 1093 | iounmap(shdev->dmars); |
1037 | #if defined(DMAE1_IRQ) | 1094 | iounmap(shdev->chan_reg); |
1038 | free_irq(DMAE1_IRQ, shdev); | 1095 | |
1039 | #endif | ||
1040 | } | ||
1041 | kfree(shdev); | 1096 | kfree(shdev); |
1042 | 1097 | ||
1098 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1099 | if (res) | ||
1100 | release_mem_region(res->start, resource_size(res)); | ||
1101 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
1102 | if (res) | ||
1103 | release_mem_region(res->start, resource_size(res)); | ||
1104 | |||
1043 | return 0; | 1105 | return 0; |
1044 | } | 1106 | } |
1045 | 1107 | ||
1046 | static void sh_dmae_shutdown(struct platform_device *pdev) | 1108 | static void sh_dmae_shutdown(struct platform_device *pdev) |
1047 | { | 1109 | { |
1048 | struct sh_dmae_device *shdev = platform_get_drvdata(pdev); | 1110 | struct sh_dmae_device *shdev = platform_get_drvdata(pdev); |
1049 | sh_dmae_ctl_stop(0); | 1111 | sh_dmae_ctl_stop(shdev); |
1050 | if (shdev->pdata.mode & SHDMA_DMAOR1) | ||
1051 | sh_dmae_ctl_stop(1); | ||
1052 | } | 1112 | } |
1053 | 1113 | ||
1054 | static struct platform_driver sh_dmae_driver = { | 1114 | static struct platform_driver sh_dmae_driver = { |
diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h index 7e227f3c87c4..800fd884be83 100644 --- a/drivers/dma/shdma.h +++ b/drivers/dma/shdma.h | |||
@@ -47,14 +47,18 @@ struct sh_dmae_chan { | |||
47 | struct tasklet_struct tasklet; /* Tasklet */ | 47 | struct tasklet_struct tasklet; /* Tasklet */ |
48 | int descs_allocated; /* desc count */ | 48 | int descs_allocated; /* desc count */ |
49 | int xmit_shift; /* log_2(bytes_per_xfer) */ | 49 | int xmit_shift; /* log_2(bytes_per_xfer) */ |
50 | int irq; | ||
50 | int id; /* Raw id of this channel */ | 51 | int id; /* Raw id of this channel */ |
52 | u32 __iomem *base; | ||
51 | char dev_id[16]; /* unique name per DMAC of channel */ | 53 | char dev_id[16]; /* unique name per DMAC of channel */ |
52 | }; | 54 | }; |
53 | 55 | ||
54 | struct sh_dmae_device { | 56 | struct sh_dmae_device { |
55 | struct dma_device common; | 57 | struct dma_device common; |
56 | struct sh_dmae_chan *chan[MAX_DMA_CHANNELS]; | 58 | struct sh_dmae_chan *chan[MAX_DMA_CHANNELS]; |
57 | struct sh_dmae_pdata pdata; | 59 | struct sh_dmae_pdata *pdata; |
60 | u32 __iomem *chan_reg; | ||
61 | u16 __iomem *dmars; | ||
58 | }; | 62 | }; |
59 | 63 | ||
60 | #define to_sh_chan(chan) container_of(chan, struct sh_dmae_chan, common) | 64 | #define to_sh_chan(chan) container_of(chan, struct sh_dmae_chan, common) |