diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-11-10 13:05:17 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-11-10 13:05:17 -0500 |
commit | 041c79514af9080c75197078283134f538f46b44 (patch) | |
tree | d5e465d5967d84adb37d735fddec48ee0509b93c | |
parent | 7d884710bb3635f94dac152ae226ca54a585a223 (diff) | |
parent | 34635b1accb99b3c3ad3b35a210be198701aac7e (diff) |
Merge tag 'dmaengine-4.4-rc1' of git://git.infradead.org/users/vkoul/slave-dma
Pull dmaengine updates from Vinod Koul:
"This time we have a very typical update which is mostly fixes and
updates to drivers and no new drivers.
- the biggest change is coming from Peter for edma cleanup which even
caused some last minute regression, things seem settled now
- idma64 and dw updates
- iotdma updates
- module autoload fixes for various drivers
- scatter gather support for hdmac"
* tag 'dmaengine-4.4-rc1' of git://git.infradead.org/users/vkoul/slave-dma: (77 commits)
dmaengine: edma: Add dummy driver skeleton for edma3-tptc
Revert "ARM: DTS: am33xx: Use the new DT bindings for the eDMA3"
Revert "ARM: DTS: am437x: Use the new DT bindings for the eDMA3"
dmaengine: dw: some Intel devices has no memcpy support
dmaengine: dw: platform: provide platform data for Intel
dmaengine: dw: don't override platform data with autocfg
dmaengine: hdmac: Add scatter-gathered memset support
dmaengine: hdmac: factorise memset descriptor allocation
dmaengine: virt-dma: Fix kernel-doc annotations
ARM: DTS: am437x: Use the new DT bindings for the eDMA3
ARM: DTS: am33xx: Use the new DT bindings for the eDMA3
dmaengine: edma: New device tree binding
dmaengine: Kconfig: edma: Select TI_DMA_CROSSBAR in case of ARCH_OMAP
dmaengine: ti-dma-crossbar: Add support for crossbar on AM33xx/AM43xx
dmaengine: edma: Merge the of parsing functions
dmaengine: edma: Do not allocate memory for edma_rsv_info in case of DT boot
dmaengine: edma: Refactor the dma device and channel struct initialization
dmaengine: edma: Get qDMA channel information from HW also
dmaengine: edma: Merge map_dmach_to_queue into assign_channel_eventq
dmaengine: edma: Correct PaRAM access function names (_parm_ to _param_)
...
46 files changed, 2533 insertions, 2684 deletions
diff --git a/Documentation/devicetree/bindings/dma/ti-dma-crossbar.txt b/Documentation/devicetree/bindings/dma/ti-dma-crossbar.txt index 63a48928f3a8..b152a75dceae 100644 --- a/Documentation/devicetree/bindings/dma/ti-dma-crossbar.txt +++ b/Documentation/devicetree/bindings/dma/ti-dma-crossbar.txt | |||
@@ -2,9 +2,10 @@ Texas Instruments DMA Crossbar (DMA request router) | |||
2 | 2 | ||
3 | Required properties: | 3 | Required properties: |
4 | - compatible: "ti,dra7-dma-crossbar" for DRA7xx DMA crossbar | 4 | - compatible: "ti,dra7-dma-crossbar" for DRA7xx DMA crossbar |
5 | "ti,am335x-edma-crossbar" for AM335x and AM437x | ||
5 | - reg: Memory map for accessing module | 6 | - reg: Memory map for accessing module |
6 | - #dma-cells: Should be set to <1>. | 7 | - #dma-cells: Should be set to to match with the DMA controller's dma-cells |
7 | Clients should use the crossbar request number (input) | 8 | for ti,dra7-dma-crossbar and <3> for ti,am335x-edma-crossbar. |
8 | - dma-requests: Number of DMA requests the crossbar can receive | 9 | - dma-requests: Number of DMA requests the crossbar can receive |
9 | - dma-masters: phandle pointing to the DMA controller | 10 | - dma-masters: phandle pointing to the DMA controller |
10 | 11 | ||
@@ -14,6 +15,15 @@ The DMA controller node need to have the following poroperties: | |||
14 | Optional properties: | 15 | Optional properties: |
15 | - ti,dma-safe-map: Safe routing value for unused request lines | 16 | - ti,dma-safe-map: Safe routing value for unused request lines |
16 | 17 | ||
18 | Notes: | ||
19 | When requesting channel via ti,dra7-dma-crossbar, the DMA clinet must request | ||
20 | the DMA event number as crossbar ID (input to the DMA crossbar). | ||
21 | |||
22 | For ti,am335x-edma-crossbar: the meaning of parameters of dmas for clients: | ||
23 | dmas = <&edma_xbar 12 0 1>; where <12> is the DMA request number, <0> is the TC | ||
24 | the event should be assigned and <1> is the mux selection for in the crossbar. | ||
25 | When mux 0 is used the DMA channel can be requested directly from edma node. | ||
26 | |||
17 | Example: | 27 | Example: |
18 | 28 | ||
19 | /* DMA controller */ | 29 | /* DMA controller */ |
@@ -47,6 +57,7 @@ uart1: serial@4806a000 { | |||
47 | ti,hwmods = "uart1"; | 57 | ti,hwmods = "uart1"; |
48 | clock-frequency = <48000000>; | 58 | clock-frequency = <48000000>; |
49 | status = "disabled"; | 59 | status = "disabled"; |
60 | /* Requesting crossbar input 49 and 50 */ | ||
50 | dmas = <&sdma_xbar 49>, <&sdma_xbar 50>; | 61 | dmas = <&sdma_xbar 49>, <&sdma_xbar 50>; |
51 | dma-names = "tx", "rx"; | 62 | dma-names = "tx", "rx"; |
52 | }; | 63 | }; |
diff --git a/Documentation/devicetree/bindings/dma/ti-edma.txt b/Documentation/devicetree/bindings/dma/ti-edma.txt index 5ba525a10035..d3d0a4fb1c73 100644 --- a/Documentation/devicetree/bindings/dma/ti-edma.txt +++ b/Documentation/devicetree/bindings/dma/ti-edma.txt | |||
@@ -1,4 +1,119 @@ | |||
1 | TI EDMA | 1 | Texas Instruments eDMA |
2 | |||
3 | The eDMA3 consists of two components: Channel controller (CC) and Transfer | ||
4 | Controller(s) (TC). The CC is the main entry for DMA users since it is | ||
5 | responsible for the DMA channel handling, while the TCs are responsible to | ||
6 | execute the actual DMA tansfer. | ||
7 | |||
8 | ------------------------------------------------------------------------------ | ||
9 | eDMA3 Channel Controller | ||
10 | |||
11 | Required properties: | ||
12 | - compatible: "ti,edma3-tpcc" for the channel controller(s) | ||
13 | - #dma-cells: Should be set to <2>. The first number is the DMA request | ||
14 | number and the second is the TC the channel is serviced on. | ||
15 | - reg: Memory map of eDMA CC | ||
16 | - reg-names: "edma3_cc" | ||
17 | - interrupts: Interrupt lines for CCINT, MPERR and CCERRINT. | ||
18 | - interrupt-names: "edma3_ccint", "emda3_mperr" and "edma3_ccerrint" | ||
19 | - ti,tptcs: List of TPTCs associated with the eDMA in the following form: | ||
20 | <&tptc_phandle TC_priority_number>. The highest priority is 0. | ||
21 | |||
22 | Optional properties: | ||
23 | - ti,hwmods: Name of the hwmods associated to the eDMA CC | ||
24 | - ti,edma-memcpy-channels: List of channels allocated to be used for memcpy, iow | ||
25 | these channels will be SW triggered channels. The list must | ||
26 | contain 16 bits numbers, see example. | ||
27 | - ti,edma-reserved-slot-ranges: PaRAM slot ranges which should not be used by | ||
28 | the driver, they are allocated to be used by for example the | ||
29 | DSP. See example. | ||
30 | |||
31 | ------------------------------------------------------------------------------ | ||
32 | eDMA3 Transfer Controller | ||
33 | |||
34 | Required properties: | ||
35 | - compatible: "ti,edma3-tptc" for the transfer controller(s) | ||
36 | - reg: Memory map of eDMA TC | ||
37 | - interrupts: Interrupt number for TCerrint. | ||
38 | |||
39 | Optional properties: | ||
40 | - ti,hwmods: Name of the hwmods associated to the given eDMA TC | ||
41 | - interrupt-names: "edma3_tcerrint" | ||
42 | |||
43 | ------------------------------------------------------------------------------ | ||
44 | Example: | ||
45 | |||
46 | edma: edma@49000000 { | ||
47 | compatible = "ti,edma3-tpcc"; | ||
48 | ti,hwmods = "tpcc"; | ||
49 | reg = <0x49000000 0x10000>; | ||
50 | reg-names = "edma3_cc"; | ||
51 | interrupts = <12 13 14>; | ||
52 | interrupt-names = "edma3_ccint", "emda3_mperr", "edma3_ccerrint"; | ||
53 | dma-requests = <64>; | ||
54 | #dma-cells = <2>; | ||
55 | |||
56 | ti,tptcs = <&edma_tptc0 7>, <&edma_tptc1 7>, <&edma_tptc2 0>; | ||
57 | |||
58 | /* Channel 20 and 21 is allocated for memcpy */ | ||
59 | ti,edma-memcpy-channels = /bits/ 16 <20 21>; | ||
60 | /* The following PaRAM slots are reserved: 35-45 and 100-110 */ | ||
61 | ti,edma-reserved-slot-ranges = /bits/ 16 <35 10>, | ||
62 | /bits/ 16 <100 10>; | ||
63 | }; | ||
64 | |||
65 | edma_tptc0: tptc@49800000 { | ||
66 | compatible = "ti,edma3-tptc"; | ||
67 | ti,hwmods = "tptc0"; | ||
68 | reg = <0x49800000 0x100000>; | ||
69 | interrupts = <112>; | ||
70 | interrupt-names = "edm3_tcerrint"; | ||
71 | }; | ||
72 | |||
73 | edma_tptc1: tptc@49900000 { | ||
74 | compatible = "ti,edma3-tptc"; | ||
75 | ti,hwmods = "tptc1"; | ||
76 | reg = <0x49900000 0x100000>; | ||
77 | interrupts = <113>; | ||
78 | interrupt-names = "edm3_tcerrint"; | ||
79 | }; | ||
80 | |||
81 | edma_tptc2: tptc@49a00000 { | ||
82 | compatible = "ti,edma3-tptc"; | ||
83 | ti,hwmods = "tptc2"; | ||
84 | reg = <0x49a00000 0x100000>; | ||
85 | interrupts = <114>; | ||
86 | interrupt-names = "edm3_tcerrint"; | ||
87 | }; | ||
88 | |||
89 | sham: sham@53100000 { | ||
90 | compatible = "ti,omap4-sham"; | ||
91 | ti,hwmods = "sham"; | ||
92 | reg = <0x53100000 0x200>; | ||
93 | interrupts = <109>; | ||
94 | /* DMA channel 36 executed on eDMA TC0 - low priority queue */ | ||
95 | dmas = <&edma 36 0>; | ||
96 | dma-names = "rx"; | ||
97 | }; | ||
98 | |||
99 | mcasp0: mcasp@48038000 { | ||
100 | compatible = "ti,am33xx-mcasp-audio"; | ||
101 | ti,hwmods = "mcasp0"; | ||
102 | reg = <0x48038000 0x2000>, | ||
103 | <0x46000000 0x400000>; | ||
104 | reg-names = "mpu", "dat"; | ||
105 | interrupts = <80>, <81>; | ||
106 | interrupt-names = "tx", "rx"; | ||
107 | status = "disabled"; | ||
108 | /* DMA channels 8 and 9 executed on eDMA TC2 - high priority queue */ | ||
109 | dmas = <&edma 8 2>, | ||
110 | <&edma 9 2>; | ||
111 | dma-names = "tx", "rx"; | ||
112 | }; | ||
113 | |||
114 | ------------------------------------------------------------------------------ | ||
115 | DEPRECATED binding, new DTS files must use the ti,edma3-tpcc/ti,edma3-tptc | ||
116 | binding. | ||
2 | 117 | ||
3 | Required properties: | 118 | Required properties: |
4 | - compatible : "ti,edma3" | 119 | - compatible : "ti,edma3" |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index f1ed1109f488..9246bd7cc3cf 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -737,7 +737,6 @@ config ARCH_DAVINCI | |||
737 | select GENERIC_CLOCKEVENTS | 737 | select GENERIC_CLOCKEVENTS |
738 | select GENERIC_IRQ_CHIP | 738 | select GENERIC_IRQ_CHIP |
739 | select HAVE_IDE | 739 | select HAVE_IDE |
740 | select TI_PRIV_EDMA | ||
741 | select USE_OF | 740 | select USE_OF |
742 | select ZONE_DMA | 741 | select ZONE_DMA |
743 | help | 742 | help |
diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig index c3a4e9ceba34..9353184d730d 100644 --- a/arch/arm/common/Kconfig +++ b/arch/arm/common/Kconfig | |||
@@ -17,6 +17,3 @@ config SHARP_PARAM | |||
17 | 17 | ||
18 | config SHARP_SCOOP | 18 | config SHARP_SCOOP |
19 | bool | 19 | bool |
20 | |||
21 | config TI_PRIV_EDMA | ||
22 | bool | ||
diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile index 6ee5959a813b..27f23b15b1ea 100644 --- a/arch/arm/common/Makefile +++ b/arch/arm/common/Makefile | |||
@@ -15,6 +15,5 @@ obj-$(CONFIG_MCPM) += mcpm_head.o mcpm_entry.o mcpm_platsmp.o vlock.o | |||
15 | CFLAGS_REMOVE_mcpm_entry.o = -pg | 15 | CFLAGS_REMOVE_mcpm_entry.o = -pg |
16 | AFLAGS_mcpm_head.o := -march=armv7-a | 16 | AFLAGS_mcpm_head.o := -march=armv7-a |
17 | AFLAGS_vlock.o := -march=armv7-a | 17 | AFLAGS_vlock.o := -march=armv7-a |
18 | obj-$(CONFIG_TI_PRIV_EDMA) += edma.o | ||
19 | obj-$(CONFIG_BL_SWITCHER) += bL_switcher.o | 18 | obj-$(CONFIG_BL_SWITCHER) += bL_switcher.o |
20 | obj-$(CONFIG_BL_SWITCHER_DUMMY_IF) += bL_switcher_dummy_if.o | 19 | obj-$(CONFIG_BL_SWITCHER_DUMMY_IF) += bL_switcher_dummy_if.o |
diff --git a/arch/arm/common/edma.c b/arch/arm/common/edma.c deleted file mode 100644 index 873dbfcc7dc9..000000000000 --- a/arch/arm/common/edma.c +++ /dev/null | |||
@@ -1,1876 +0,0 @@ | |||
1 | /* | ||
2 | * EDMA3 support for DaVinci | ||
3 | * | ||
4 | * Copyright (C) 2006-2009 Texas Instruments. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
19 | */ | ||
20 | #include <linux/err.h> | ||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/init.h> | ||
23 | #include <linux/module.h> | ||
24 | #include <linux/interrupt.h> | ||
25 | #include <linux/platform_device.h> | ||
26 | #include <linux/io.h> | ||
27 | #include <linux/slab.h> | ||
28 | #include <linux/edma.h> | ||
29 | #include <linux/dma-mapping.h> | ||
30 | #include <linux/of_address.h> | ||
31 | #include <linux/of_device.h> | ||
32 | #include <linux/of_dma.h> | ||
33 | #include <linux/of_irq.h> | ||
34 | #include <linux/pm_runtime.h> | ||
35 | |||
36 | #include <linux/platform_data/edma.h> | ||
37 | |||
38 | /* Offsets matching "struct edmacc_param" */ | ||
39 | #define PARM_OPT 0x00 | ||
40 | #define PARM_SRC 0x04 | ||
41 | #define PARM_A_B_CNT 0x08 | ||
42 | #define PARM_DST 0x0c | ||
43 | #define PARM_SRC_DST_BIDX 0x10 | ||
44 | #define PARM_LINK_BCNTRLD 0x14 | ||
45 | #define PARM_SRC_DST_CIDX 0x18 | ||
46 | #define PARM_CCNT 0x1c | ||
47 | |||
48 | #define PARM_SIZE 0x20 | ||
49 | |||
50 | /* Offsets for EDMA CC global channel registers and their shadows */ | ||
51 | #define SH_ER 0x00 /* 64 bits */ | ||
52 | #define SH_ECR 0x08 /* 64 bits */ | ||
53 | #define SH_ESR 0x10 /* 64 bits */ | ||
54 | #define SH_CER 0x18 /* 64 bits */ | ||
55 | #define SH_EER 0x20 /* 64 bits */ | ||
56 | #define SH_EECR 0x28 /* 64 bits */ | ||
57 | #define SH_EESR 0x30 /* 64 bits */ | ||
58 | #define SH_SER 0x38 /* 64 bits */ | ||
59 | #define SH_SECR 0x40 /* 64 bits */ | ||
60 | #define SH_IER 0x50 /* 64 bits */ | ||
61 | #define SH_IECR 0x58 /* 64 bits */ | ||
62 | #define SH_IESR 0x60 /* 64 bits */ | ||
63 | #define SH_IPR 0x68 /* 64 bits */ | ||
64 | #define SH_ICR 0x70 /* 64 bits */ | ||
65 | #define SH_IEVAL 0x78 | ||
66 | #define SH_QER 0x80 | ||
67 | #define SH_QEER 0x84 | ||
68 | #define SH_QEECR 0x88 | ||
69 | #define SH_QEESR 0x8c | ||
70 | #define SH_QSER 0x90 | ||
71 | #define SH_QSECR 0x94 | ||
72 | #define SH_SIZE 0x200 | ||
73 | |||
74 | /* Offsets for EDMA CC global registers */ | ||
75 | #define EDMA_REV 0x0000 | ||
76 | #define EDMA_CCCFG 0x0004 | ||
77 | #define EDMA_QCHMAP 0x0200 /* 8 registers */ | ||
78 | #define EDMA_DMAQNUM 0x0240 /* 8 registers (4 on OMAP-L1xx) */ | ||
79 | #define EDMA_QDMAQNUM 0x0260 | ||
80 | #define EDMA_QUETCMAP 0x0280 | ||
81 | #define EDMA_QUEPRI 0x0284 | ||
82 | #define EDMA_EMR 0x0300 /* 64 bits */ | ||
83 | #define EDMA_EMCR 0x0308 /* 64 bits */ | ||
84 | #define EDMA_QEMR 0x0310 | ||
85 | #define EDMA_QEMCR 0x0314 | ||
86 | #define EDMA_CCERR 0x0318 | ||
87 | #define EDMA_CCERRCLR 0x031c | ||
88 | #define EDMA_EEVAL 0x0320 | ||
89 | #define EDMA_DRAE 0x0340 /* 4 x 64 bits*/ | ||
90 | #define EDMA_QRAE 0x0380 /* 4 registers */ | ||
91 | #define EDMA_QUEEVTENTRY 0x0400 /* 2 x 16 registers */ | ||
92 | #define EDMA_QSTAT 0x0600 /* 2 registers */ | ||
93 | #define EDMA_QWMTHRA 0x0620 | ||
94 | #define EDMA_QWMTHRB 0x0624 | ||
95 | #define EDMA_CCSTAT 0x0640 | ||
96 | |||
97 | #define EDMA_M 0x1000 /* global channel registers */ | ||
98 | #define EDMA_ECR 0x1008 | ||
99 | #define EDMA_ECRH 0x100C | ||
100 | #define EDMA_SHADOW0 0x2000 /* 4 regions shadowing global channels */ | ||
101 | #define EDMA_PARM 0x4000 /* 128 param entries */ | ||
102 | |||
103 | #define PARM_OFFSET(param_no) (EDMA_PARM + ((param_no) << 5)) | ||
104 | |||
105 | #define EDMA_DCHMAP 0x0100 /* 64 registers */ | ||
106 | |||
107 | /* CCCFG register */ | ||
108 | #define GET_NUM_DMACH(x) (x & 0x7) /* bits 0-2 */ | ||
109 | #define GET_NUM_PAENTRY(x) ((x & 0x7000) >> 12) /* bits 12-14 */ | ||
110 | #define GET_NUM_EVQUE(x) ((x & 0x70000) >> 16) /* bits 16-18 */ | ||
111 | #define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */ | ||
112 | #define CHMAP_EXIST BIT(24) | ||
113 | |||
114 | #define EDMA_MAX_DMACH 64 | ||
115 | #define EDMA_MAX_PARAMENTRY 512 | ||
116 | |||
117 | /*****************************************************************************/ | ||
118 | |||
119 | static void __iomem *edmacc_regs_base[EDMA_MAX_CC]; | ||
120 | |||
121 | static inline unsigned int edma_read(unsigned ctlr, int offset) | ||
122 | { | ||
123 | return (unsigned int)__raw_readl(edmacc_regs_base[ctlr] + offset); | ||
124 | } | ||
125 | |||
126 | static inline void edma_write(unsigned ctlr, int offset, int val) | ||
127 | { | ||
128 | __raw_writel(val, edmacc_regs_base[ctlr] + offset); | ||
129 | } | ||
130 | static inline void edma_modify(unsigned ctlr, int offset, unsigned and, | ||
131 | unsigned or) | ||
132 | { | ||
133 | unsigned val = edma_read(ctlr, offset); | ||
134 | val &= and; | ||
135 | val |= or; | ||
136 | edma_write(ctlr, offset, val); | ||
137 | } | ||
138 | static inline void edma_and(unsigned ctlr, int offset, unsigned and) | ||
139 | { | ||
140 | unsigned val = edma_read(ctlr, offset); | ||
141 | val &= and; | ||
142 | edma_write(ctlr, offset, val); | ||
143 | } | ||
144 | static inline void edma_or(unsigned ctlr, int offset, unsigned or) | ||
145 | { | ||
146 | unsigned val = edma_read(ctlr, offset); | ||
147 | val |= or; | ||
148 | edma_write(ctlr, offset, val); | ||
149 | } | ||
150 | static inline unsigned int edma_read_array(unsigned ctlr, int offset, int i) | ||
151 | { | ||
152 | return edma_read(ctlr, offset + (i << 2)); | ||
153 | } | ||
154 | static inline void edma_write_array(unsigned ctlr, int offset, int i, | ||
155 | unsigned val) | ||
156 | { | ||
157 | edma_write(ctlr, offset + (i << 2), val); | ||
158 | } | ||
159 | static inline void edma_modify_array(unsigned ctlr, int offset, int i, | ||
160 | unsigned and, unsigned or) | ||
161 | { | ||
162 | edma_modify(ctlr, offset + (i << 2), and, or); | ||
163 | } | ||
164 | static inline void edma_or_array(unsigned ctlr, int offset, int i, unsigned or) | ||
165 | { | ||
166 | edma_or(ctlr, offset + (i << 2), or); | ||
167 | } | ||
168 | static inline void edma_or_array2(unsigned ctlr, int offset, int i, int j, | ||
169 | unsigned or) | ||
170 | { | ||
171 | edma_or(ctlr, offset + ((i*2 + j) << 2), or); | ||
172 | } | ||
173 | static inline void edma_write_array2(unsigned ctlr, int offset, int i, int j, | ||
174 | unsigned val) | ||
175 | { | ||
176 | edma_write(ctlr, offset + ((i*2 + j) << 2), val); | ||
177 | } | ||
178 | static inline unsigned int edma_shadow0_read(unsigned ctlr, int offset) | ||
179 | { | ||
180 | return edma_read(ctlr, EDMA_SHADOW0 + offset); | ||
181 | } | ||
182 | static inline unsigned int edma_shadow0_read_array(unsigned ctlr, int offset, | ||
183 | int i) | ||
184 | { | ||
185 | return edma_read(ctlr, EDMA_SHADOW0 + offset + (i << 2)); | ||
186 | } | ||
187 | static inline void edma_shadow0_write(unsigned ctlr, int offset, unsigned val) | ||
188 | { | ||
189 | edma_write(ctlr, EDMA_SHADOW0 + offset, val); | ||
190 | } | ||
191 | static inline void edma_shadow0_write_array(unsigned ctlr, int offset, int i, | ||
192 | unsigned val) | ||
193 | { | ||
194 | edma_write(ctlr, EDMA_SHADOW0 + offset + (i << 2), val); | ||
195 | } | ||
196 | static inline unsigned int edma_parm_read(unsigned ctlr, int offset, | ||
197 | int param_no) | ||
198 | { | ||
199 | return edma_read(ctlr, EDMA_PARM + offset + (param_no << 5)); | ||
200 | } | ||
201 | static inline void edma_parm_write(unsigned ctlr, int offset, int param_no, | ||
202 | unsigned val) | ||
203 | { | ||
204 | edma_write(ctlr, EDMA_PARM + offset + (param_no << 5), val); | ||
205 | } | ||
206 | static inline void edma_parm_modify(unsigned ctlr, int offset, int param_no, | ||
207 | unsigned and, unsigned or) | ||
208 | { | ||
209 | edma_modify(ctlr, EDMA_PARM + offset + (param_no << 5), and, or); | ||
210 | } | ||
211 | static inline void edma_parm_and(unsigned ctlr, int offset, int param_no, | ||
212 | unsigned and) | ||
213 | { | ||
214 | edma_and(ctlr, EDMA_PARM + offset + (param_no << 5), and); | ||
215 | } | ||
216 | static inline void edma_parm_or(unsigned ctlr, int offset, int param_no, | ||
217 | unsigned or) | ||
218 | { | ||
219 | edma_or(ctlr, EDMA_PARM + offset + (param_no << 5), or); | ||
220 | } | ||
221 | |||
222 | static inline void set_bits(int offset, int len, unsigned long *p) | ||
223 | { | ||
224 | for (; len > 0; len--) | ||
225 | set_bit(offset + (len - 1), p); | ||
226 | } | ||
227 | |||
228 | static inline void clear_bits(int offset, int len, unsigned long *p) | ||
229 | { | ||
230 | for (; len > 0; len--) | ||
231 | clear_bit(offset + (len - 1), p); | ||
232 | } | ||
233 | |||
234 | /*****************************************************************************/ | ||
235 | |||
236 | /* actual number of DMA channels and slots on this silicon */ | ||
237 | struct edma { | ||
238 | /* how many dma resources of each type */ | ||
239 | unsigned num_channels; | ||
240 | unsigned num_region; | ||
241 | unsigned num_slots; | ||
242 | unsigned num_tc; | ||
243 | enum dma_event_q default_queue; | ||
244 | |||
245 | /* list of channels with no even trigger; terminated by "-1" */ | ||
246 | const s8 *noevent; | ||
247 | |||
248 | struct edma_soc_info *info; | ||
249 | |||
250 | /* The edma_inuse bit for each PaRAM slot is clear unless the | ||
251 | * channel is in use ... by ARM or DSP, for QDMA, or whatever. | ||
252 | */ | ||
253 | DECLARE_BITMAP(edma_inuse, EDMA_MAX_PARAMENTRY); | ||
254 | |||
255 | /* The edma_unused bit for each channel is clear unless | ||
256 | * it is not being used on this platform. It uses a bit | ||
257 | * of SOC-specific initialization code. | ||
258 | */ | ||
259 | DECLARE_BITMAP(edma_unused, EDMA_MAX_DMACH); | ||
260 | |||
261 | unsigned irq_res_start; | ||
262 | unsigned irq_res_end; | ||
263 | |||
264 | struct dma_interrupt_data { | ||
265 | void (*callback)(unsigned channel, unsigned short ch_status, | ||
266 | void *data); | ||
267 | void *data; | ||
268 | } intr_data[EDMA_MAX_DMACH]; | ||
269 | }; | ||
270 | |||
271 | static struct edma *edma_cc[EDMA_MAX_CC]; | ||
272 | static int arch_num_cc; | ||
273 | |||
274 | /* dummy param set used to (re)initialize parameter RAM slots */ | ||
275 | static const struct edmacc_param dummy_paramset = { | ||
276 | .link_bcntrld = 0xffff, | ||
277 | .ccnt = 1, | ||
278 | }; | ||
279 | |||
280 | static const struct of_device_id edma_of_ids[] = { | ||
281 | { .compatible = "ti,edma3", }, | ||
282 | {} | ||
283 | }; | ||
284 | |||
285 | /*****************************************************************************/ | ||
286 | |||
287 | static void map_dmach_queue(unsigned ctlr, unsigned ch_no, | ||
288 | enum dma_event_q queue_no) | ||
289 | { | ||
290 | int bit = (ch_no & 0x7) * 4; | ||
291 | |||
292 | /* default to low priority queue */ | ||
293 | if (queue_no == EVENTQ_DEFAULT) | ||
294 | queue_no = edma_cc[ctlr]->default_queue; | ||
295 | |||
296 | queue_no &= 7; | ||
297 | edma_modify_array(ctlr, EDMA_DMAQNUM, (ch_no >> 3), | ||
298 | ~(0x7 << bit), queue_no << bit); | ||
299 | } | ||
300 | |||
301 | static void assign_priority_to_queue(unsigned ctlr, int queue_no, | ||
302 | int priority) | ||
303 | { | ||
304 | int bit = queue_no * 4; | ||
305 | edma_modify(ctlr, EDMA_QUEPRI, ~(0x7 << bit), | ||
306 | ((priority & 0x7) << bit)); | ||
307 | } | ||
308 | |||
309 | /** | ||
310 | * map_dmach_param - Maps channel number to param entry number | ||
311 | * | ||
312 | * This maps the dma channel number to param entry numberter. In | ||
313 | * other words using the DMA channel mapping registers a param entry | ||
314 | * can be mapped to any channel | ||
315 | * | ||
316 | * Callers are responsible for ensuring the channel mapping logic is | ||
317 | * included in that particular EDMA variant (Eg : dm646x) | ||
318 | * | ||
319 | */ | ||
320 | static void map_dmach_param(unsigned ctlr) | ||
321 | { | ||
322 | int i; | ||
323 | for (i = 0; i < EDMA_MAX_DMACH; i++) | ||
324 | edma_write_array(ctlr, EDMA_DCHMAP , i , (i << 5)); | ||
325 | } | ||
326 | |||
327 | static inline void | ||
328 | setup_dma_interrupt(unsigned lch, | ||
329 | void (*callback)(unsigned channel, u16 ch_status, void *data), | ||
330 | void *data) | ||
331 | { | ||
332 | unsigned ctlr; | ||
333 | |||
334 | ctlr = EDMA_CTLR(lch); | ||
335 | lch = EDMA_CHAN_SLOT(lch); | ||
336 | |||
337 | if (!callback) | ||
338 | edma_shadow0_write_array(ctlr, SH_IECR, lch >> 5, | ||
339 | BIT(lch & 0x1f)); | ||
340 | |||
341 | edma_cc[ctlr]->intr_data[lch].callback = callback; | ||
342 | edma_cc[ctlr]->intr_data[lch].data = data; | ||
343 | |||
344 | if (callback) { | ||
345 | edma_shadow0_write_array(ctlr, SH_ICR, lch >> 5, | ||
346 | BIT(lch & 0x1f)); | ||
347 | edma_shadow0_write_array(ctlr, SH_IESR, lch >> 5, | ||
348 | BIT(lch & 0x1f)); | ||
349 | } | ||
350 | } | ||
351 | |||
352 | static int irq2ctlr(int irq) | ||
353 | { | ||
354 | if (irq >= edma_cc[0]->irq_res_start && irq <= edma_cc[0]->irq_res_end) | ||
355 | return 0; | ||
356 | else if (irq >= edma_cc[1]->irq_res_start && | ||
357 | irq <= edma_cc[1]->irq_res_end) | ||
358 | return 1; | ||
359 | |||
360 | return -1; | ||
361 | } | ||
362 | |||
363 | /****************************************************************************** | ||
364 | * | ||
365 | * DMA interrupt handler | ||
366 | * | ||
367 | *****************************************************************************/ | ||
368 | static irqreturn_t dma_irq_handler(int irq, void *data) | ||
369 | { | ||
370 | int ctlr; | ||
371 | u32 sh_ier; | ||
372 | u32 sh_ipr; | ||
373 | u32 bank; | ||
374 | |||
375 | ctlr = irq2ctlr(irq); | ||
376 | if (ctlr < 0) | ||
377 | return IRQ_NONE; | ||
378 | |||
379 | dev_dbg(data, "dma_irq_handler\n"); | ||
380 | |||
381 | sh_ipr = edma_shadow0_read_array(ctlr, SH_IPR, 0); | ||
382 | if (!sh_ipr) { | ||
383 | sh_ipr = edma_shadow0_read_array(ctlr, SH_IPR, 1); | ||
384 | if (!sh_ipr) | ||
385 | return IRQ_NONE; | ||
386 | sh_ier = edma_shadow0_read_array(ctlr, SH_IER, 1); | ||
387 | bank = 1; | ||
388 | } else { | ||
389 | sh_ier = edma_shadow0_read_array(ctlr, SH_IER, 0); | ||
390 | bank = 0; | ||
391 | } | ||
392 | |||
393 | do { | ||
394 | u32 slot; | ||
395 | u32 channel; | ||
396 | |||
397 | dev_dbg(data, "IPR%d %08x\n", bank, sh_ipr); | ||
398 | |||
399 | slot = __ffs(sh_ipr); | ||
400 | sh_ipr &= ~(BIT(slot)); | ||
401 | |||
402 | if (sh_ier & BIT(slot)) { | ||
403 | channel = (bank << 5) | slot; | ||
404 | /* Clear the corresponding IPR bits */ | ||
405 | edma_shadow0_write_array(ctlr, SH_ICR, bank, | ||
406 | BIT(slot)); | ||
407 | if (edma_cc[ctlr]->intr_data[channel].callback) | ||
408 | edma_cc[ctlr]->intr_data[channel].callback( | ||
409 | channel, EDMA_DMA_COMPLETE, | ||
410 | edma_cc[ctlr]->intr_data[channel].data); | ||
411 | } | ||
412 | } while (sh_ipr); | ||
413 | |||
414 | edma_shadow0_write(ctlr, SH_IEVAL, 1); | ||
415 | return IRQ_HANDLED; | ||
416 | } | ||
417 | |||
418 | /****************************************************************************** | ||
419 | * | ||
420 | * DMA error interrupt handler | ||
421 | * | ||
422 | *****************************************************************************/ | ||
423 | static irqreturn_t dma_ccerr_handler(int irq, void *data) | ||
424 | { | ||
425 | int i; | ||
426 | int ctlr; | ||
427 | unsigned int cnt = 0; | ||
428 | |||
429 | ctlr = irq2ctlr(irq); | ||
430 | if (ctlr < 0) | ||
431 | return IRQ_NONE; | ||
432 | |||
433 | dev_dbg(data, "dma_ccerr_handler\n"); | ||
434 | |||
435 | if ((edma_read_array(ctlr, EDMA_EMR, 0) == 0) && | ||
436 | (edma_read_array(ctlr, EDMA_EMR, 1) == 0) && | ||
437 | (edma_read(ctlr, EDMA_QEMR) == 0) && | ||
438 | (edma_read(ctlr, EDMA_CCERR) == 0)) | ||
439 | return IRQ_NONE; | ||
440 | |||
441 | while (1) { | ||
442 | int j = -1; | ||
443 | if (edma_read_array(ctlr, EDMA_EMR, 0)) | ||
444 | j = 0; | ||
445 | else if (edma_read_array(ctlr, EDMA_EMR, 1)) | ||
446 | j = 1; | ||
447 | if (j >= 0) { | ||
448 | dev_dbg(data, "EMR%d %08x\n", j, | ||
449 | edma_read_array(ctlr, EDMA_EMR, j)); | ||
450 | for (i = 0; i < 32; i++) { | ||
451 | int k = (j << 5) + i; | ||
452 | if (edma_read_array(ctlr, EDMA_EMR, j) & | ||
453 | BIT(i)) { | ||
454 | /* Clear the corresponding EMR bits */ | ||
455 | edma_write_array(ctlr, EDMA_EMCR, j, | ||
456 | BIT(i)); | ||
457 | /* Clear any SER */ | ||
458 | edma_shadow0_write_array(ctlr, SH_SECR, | ||
459 | j, BIT(i)); | ||
460 | if (edma_cc[ctlr]->intr_data[k]. | ||
461 | callback) { | ||
462 | edma_cc[ctlr]->intr_data[k]. | ||
463 | callback(k, | ||
464 | EDMA_DMA_CC_ERROR, | ||
465 | edma_cc[ctlr]->intr_data | ||
466 | [k].data); | ||
467 | } | ||
468 | } | ||
469 | } | ||
470 | } else if (edma_read(ctlr, EDMA_QEMR)) { | ||
471 | dev_dbg(data, "QEMR %02x\n", | ||
472 | edma_read(ctlr, EDMA_QEMR)); | ||
473 | for (i = 0; i < 8; i++) { | ||
474 | if (edma_read(ctlr, EDMA_QEMR) & BIT(i)) { | ||
475 | /* Clear the corresponding IPR bits */ | ||
476 | edma_write(ctlr, EDMA_QEMCR, BIT(i)); | ||
477 | edma_shadow0_write(ctlr, SH_QSECR, | ||
478 | BIT(i)); | ||
479 | |||
480 | /* NOTE: not reported!! */ | ||
481 | } | ||
482 | } | ||
483 | } else if (edma_read(ctlr, EDMA_CCERR)) { | ||
484 | dev_dbg(data, "CCERR %08x\n", | ||
485 | edma_read(ctlr, EDMA_CCERR)); | ||
486 | /* FIXME: CCERR.BIT(16) ignored! much better | ||
487 | * to just write CCERRCLR with CCERR value... | ||
488 | */ | ||
489 | for (i = 0; i < 8; i++) { | ||
490 | if (edma_read(ctlr, EDMA_CCERR) & BIT(i)) { | ||
491 | /* Clear the corresponding IPR bits */ | ||
492 | edma_write(ctlr, EDMA_CCERRCLR, BIT(i)); | ||
493 | |||
494 | /* NOTE: not reported!! */ | ||
495 | } | ||
496 | } | ||
497 | } | ||
498 | if ((edma_read_array(ctlr, EDMA_EMR, 0) == 0) && | ||
499 | (edma_read_array(ctlr, EDMA_EMR, 1) == 0) && | ||
500 | (edma_read(ctlr, EDMA_QEMR) == 0) && | ||
501 | (edma_read(ctlr, EDMA_CCERR) == 0)) | ||
502 | break; | ||
503 | cnt++; | ||
504 | if (cnt > 10) | ||
505 | break; | ||
506 | } | ||
507 | edma_write(ctlr, EDMA_EEVAL, 1); | ||
508 | return IRQ_HANDLED; | ||
509 | } | ||
510 | |||
511 | static int reserve_contiguous_slots(int ctlr, unsigned int id, | ||
512 | unsigned int num_slots, | ||
513 | unsigned int start_slot) | ||
514 | { | ||
515 | int i, j; | ||
516 | unsigned int count = num_slots; | ||
517 | int stop_slot = start_slot; | ||
518 | DECLARE_BITMAP(tmp_inuse, EDMA_MAX_PARAMENTRY); | ||
519 | |||
520 | for (i = start_slot; i < edma_cc[ctlr]->num_slots; ++i) { | ||
521 | j = EDMA_CHAN_SLOT(i); | ||
522 | if (!test_and_set_bit(j, edma_cc[ctlr]->edma_inuse)) { | ||
523 | /* Record our current beginning slot */ | ||
524 | if (count == num_slots) | ||
525 | stop_slot = i; | ||
526 | |||
527 | count--; | ||
528 | set_bit(j, tmp_inuse); | ||
529 | |||
530 | if (count == 0) | ||
531 | break; | ||
532 | } else { | ||
533 | clear_bit(j, tmp_inuse); | ||
534 | |||
535 | if (id == EDMA_CONT_PARAMS_FIXED_EXACT) { | ||
536 | stop_slot = i; | ||
537 | break; | ||
538 | } else { | ||
539 | count = num_slots; | ||
540 | } | ||
541 | } | ||
542 | } | ||
543 | |||
544 | /* | ||
545 | * We have to clear any bits that we set | ||
546 | * if we run out parameter RAM slots, i.e we do find a set | ||
547 | * of contiguous parameter RAM slots but do not find the exact number | ||
548 | * requested as we may reach the total number of parameter RAM slots | ||
549 | */ | ||
550 | if (i == edma_cc[ctlr]->num_slots) | ||
551 | stop_slot = i; | ||
552 | |||
553 | j = start_slot; | ||
554 | for_each_set_bit_from(j, tmp_inuse, stop_slot) | ||
555 | clear_bit(j, edma_cc[ctlr]->edma_inuse); | ||
556 | |||
557 | if (count) | ||
558 | return -EBUSY; | ||
559 | |||
560 | for (j = i - num_slots + 1; j <= i; ++j) | ||
561 | memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(j), | ||
562 | &dummy_paramset, PARM_SIZE); | ||
563 | |||
564 | return EDMA_CTLR_CHAN(ctlr, i - num_slots + 1); | ||
565 | } | ||
566 | |||
567 | static int prepare_unused_channel_list(struct device *dev, void *data) | ||
568 | { | ||
569 | struct platform_device *pdev = to_platform_device(dev); | ||
570 | int i, count, ctlr; | ||
571 | struct of_phandle_args dma_spec; | ||
572 | |||
573 | if (dev->of_node) { | ||
574 | count = of_property_count_strings(dev->of_node, "dma-names"); | ||
575 | if (count < 0) | ||
576 | return 0; | ||
577 | for (i = 0; i < count; i++) { | ||
578 | if (of_parse_phandle_with_args(dev->of_node, "dmas", | ||
579 | "#dma-cells", i, | ||
580 | &dma_spec)) | ||
581 | continue; | ||
582 | |||
583 | if (!of_match_node(edma_of_ids, dma_spec.np)) { | ||
584 | of_node_put(dma_spec.np); | ||
585 | continue; | ||
586 | } | ||
587 | |||
588 | clear_bit(EDMA_CHAN_SLOT(dma_spec.args[0]), | ||
589 | edma_cc[0]->edma_unused); | ||
590 | of_node_put(dma_spec.np); | ||
591 | } | ||
592 | return 0; | ||
593 | } | ||
594 | |||
595 | /* For non-OF case */ | ||
596 | for (i = 0; i < pdev->num_resources; i++) { | ||
597 | if ((pdev->resource[i].flags & IORESOURCE_DMA) && | ||
598 | (int)pdev->resource[i].start >= 0) { | ||
599 | ctlr = EDMA_CTLR(pdev->resource[i].start); | ||
600 | clear_bit(EDMA_CHAN_SLOT(pdev->resource[i].start), | ||
601 | edma_cc[ctlr]->edma_unused); | ||
602 | } | ||
603 | } | ||
604 | |||
605 | return 0; | ||
606 | } | ||
607 | |||
608 | /*-----------------------------------------------------------------------*/ | ||
609 | |||
610 | static bool unused_chan_list_done; | ||
611 | |||
612 | /* Resource alloc/free: dma channels, parameter RAM slots */ | ||
613 | |||
614 | /** | ||
615 | * edma_alloc_channel - allocate DMA channel and paired parameter RAM | ||
616 | * @channel: specific channel to allocate; negative for "any unmapped channel" | ||
617 | * @callback: optional; to be issued on DMA completion or errors | ||
618 | * @data: passed to callback | ||
619 | * @eventq_no: an EVENTQ_* constant, used to choose which Transfer | ||
620 | * Controller (TC) executes requests using this channel. Use | ||
621 | * EVENTQ_DEFAULT unless you really need a high priority queue. | ||
622 | * | ||
623 | * This allocates a DMA channel and its associated parameter RAM slot. | ||
624 | * The parameter RAM is initialized to hold a dummy transfer. | ||
625 | * | ||
626 | * Normal use is to pass a specific channel number as @channel, to make | ||
627 | * use of hardware events mapped to that channel. When the channel will | ||
628 | * be used only for software triggering or event chaining, channels not | ||
629 | * mapped to hardware events (or mapped to unused events) are preferable. | ||
630 | * | ||
631 | * DMA transfers start from a channel using edma_start(), or by | ||
632 | * chaining. When the transfer described in that channel's parameter RAM | ||
633 | * slot completes, that slot's data may be reloaded through a link. | ||
634 | * | ||
635 | * DMA errors are only reported to the @callback associated with the | ||
636 | * channel driving that transfer, but transfer completion callbacks can | ||
637 | * be sent to another channel under control of the TCC field in | ||
638 | * the option word of the transfer's parameter RAM set. Drivers must not | ||
639 | * use DMA transfer completion callbacks for channels they did not allocate. | ||
640 | * (The same applies to TCC codes used in transfer chaining.) | ||
641 | * | ||
642 | * Returns the number of the channel, else negative errno. | ||
643 | */ | ||
644 | int edma_alloc_channel(int channel, | ||
645 | void (*callback)(unsigned channel, u16 ch_status, void *data), | ||
646 | void *data, | ||
647 | enum dma_event_q eventq_no) | ||
648 | { | ||
649 | unsigned i, done = 0, ctlr = 0; | ||
650 | int ret = 0; | ||
651 | |||
652 | if (!unused_chan_list_done) { | ||
653 | /* | ||
654 | * Scan all the platform devices to find out the EDMA channels | ||
655 | * used and clear them in the unused list, making the rest | ||
656 | * available for ARM usage. | ||
657 | */ | ||
658 | ret = bus_for_each_dev(&platform_bus_type, NULL, NULL, | ||
659 | prepare_unused_channel_list); | ||
660 | if (ret < 0) | ||
661 | return ret; | ||
662 | |||
663 | unused_chan_list_done = true; | ||
664 | } | ||
665 | |||
666 | if (channel >= 0) { | ||
667 | ctlr = EDMA_CTLR(channel); | ||
668 | channel = EDMA_CHAN_SLOT(channel); | ||
669 | } | ||
670 | |||
671 | if (channel < 0) { | ||
672 | for (i = 0; i < arch_num_cc; i++) { | ||
673 | channel = 0; | ||
674 | for (;;) { | ||
675 | channel = find_next_bit(edma_cc[i]->edma_unused, | ||
676 | edma_cc[i]->num_channels, | ||
677 | channel); | ||
678 | if (channel == edma_cc[i]->num_channels) | ||
679 | break; | ||
680 | if (!test_and_set_bit(channel, | ||
681 | edma_cc[i]->edma_inuse)) { | ||
682 | done = 1; | ||
683 | ctlr = i; | ||
684 | break; | ||
685 | } | ||
686 | channel++; | ||
687 | } | ||
688 | if (done) | ||
689 | break; | ||
690 | } | ||
691 | if (!done) | ||
692 | return -ENOMEM; | ||
693 | } else if (channel >= edma_cc[ctlr]->num_channels) { | ||
694 | return -EINVAL; | ||
695 | } else if (test_and_set_bit(channel, edma_cc[ctlr]->edma_inuse)) { | ||
696 | return -EBUSY; | ||
697 | } | ||
698 | |||
699 | /* ensure access through shadow region 0 */ | ||
700 | edma_or_array2(ctlr, EDMA_DRAE, 0, channel >> 5, BIT(channel & 0x1f)); | ||
701 | |||
702 | /* ensure no events are pending */ | ||
703 | edma_stop(EDMA_CTLR_CHAN(ctlr, channel)); | ||
704 | memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(channel), | ||
705 | &dummy_paramset, PARM_SIZE); | ||
706 | |||
707 | if (callback) | ||
708 | setup_dma_interrupt(EDMA_CTLR_CHAN(ctlr, channel), | ||
709 | callback, data); | ||
710 | |||
711 | map_dmach_queue(ctlr, channel, eventq_no); | ||
712 | |||
713 | return EDMA_CTLR_CHAN(ctlr, channel); | ||
714 | } | ||
715 | EXPORT_SYMBOL(edma_alloc_channel); | ||
716 | |||
717 | |||
718 | /** | ||
719 | * edma_free_channel - deallocate DMA channel | ||
720 | * @channel: dma channel returned from edma_alloc_channel() | ||
721 | * | ||
722 | * This deallocates the DMA channel and associated parameter RAM slot | ||
723 | * allocated by edma_alloc_channel(). | ||
724 | * | ||
725 | * Callers are responsible for ensuring the channel is inactive, and | ||
726 | * will not be reactivated by linking, chaining, or software calls to | ||
727 | * edma_start(). | ||
728 | */ | ||
729 | void edma_free_channel(unsigned channel) | ||
730 | { | ||
731 | unsigned ctlr; | ||
732 | |||
733 | ctlr = EDMA_CTLR(channel); | ||
734 | channel = EDMA_CHAN_SLOT(channel); | ||
735 | |||
736 | if (channel >= edma_cc[ctlr]->num_channels) | ||
737 | return; | ||
738 | |||
739 | setup_dma_interrupt(channel, NULL, NULL); | ||
740 | /* REVISIT should probably take out of shadow region 0 */ | ||
741 | |||
742 | memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(channel), | ||
743 | &dummy_paramset, PARM_SIZE); | ||
744 | clear_bit(channel, edma_cc[ctlr]->edma_inuse); | ||
745 | } | ||
746 | EXPORT_SYMBOL(edma_free_channel); | ||
747 | |||
748 | /** | ||
749 | * edma_alloc_slot - allocate DMA parameter RAM | ||
750 | * @slot: specific slot to allocate; negative for "any unused slot" | ||
751 | * | ||
752 | * This allocates a parameter RAM slot, initializing it to hold a | ||
753 | * dummy transfer. Slots allocated using this routine have not been | ||
754 | * mapped to a hardware DMA channel, and will normally be used by | ||
755 | * linking to them from a slot associated with a DMA channel. | ||
756 | * | ||
757 | * Normal use is to pass EDMA_SLOT_ANY as the @slot, but specific | ||
758 | * slots may be allocated on behalf of DSP firmware. | ||
759 | * | ||
760 | * Returns the number of the slot, else negative errno. | ||
761 | */ | ||
762 | int edma_alloc_slot(unsigned ctlr, int slot) | ||
763 | { | ||
764 | if (!edma_cc[ctlr]) | ||
765 | return -EINVAL; | ||
766 | |||
767 | if (slot >= 0) | ||
768 | slot = EDMA_CHAN_SLOT(slot); | ||
769 | |||
770 | if (slot < 0) { | ||
771 | slot = edma_cc[ctlr]->num_channels; | ||
772 | for (;;) { | ||
773 | slot = find_next_zero_bit(edma_cc[ctlr]->edma_inuse, | ||
774 | edma_cc[ctlr]->num_slots, slot); | ||
775 | if (slot == edma_cc[ctlr]->num_slots) | ||
776 | return -ENOMEM; | ||
777 | if (!test_and_set_bit(slot, edma_cc[ctlr]->edma_inuse)) | ||
778 | break; | ||
779 | } | ||
780 | } else if (slot < edma_cc[ctlr]->num_channels || | ||
781 | slot >= edma_cc[ctlr]->num_slots) { | ||
782 | return -EINVAL; | ||
783 | } else if (test_and_set_bit(slot, edma_cc[ctlr]->edma_inuse)) { | ||
784 | return -EBUSY; | ||
785 | } | ||
786 | |||
787 | memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot), | ||
788 | &dummy_paramset, PARM_SIZE); | ||
789 | |||
790 | return EDMA_CTLR_CHAN(ctlr, slot); | ||
791 | } | ||
792 | EXPORT_SYMBOL(edma_alloc_slot); | ||
793 | |||
794 | /** | ||
795 | * edma_free_slot - deallocate DMA parameter RAM | ||
796 | * @slot: parameter RAM slot returned from edma_alloc_slot() | ||
797 | * | ||
798 | * This deallocates the parameter RAM slot allocated by edma_alloc_slot(). | ||
799 | * Callers are responsible for ensuring the slot is inactive, and will | ||
800 | * not be activated. | ||
801 | */ | ||
802 | void edma_free_slot(unsigned slot) | ||
803 | { | ||
804 | unsigned ctlr; | ||
805 | |||
806 | ctlr = EDMA_CTLR(slot); | ||
807 | slot = EDMA_CHAN_SLOT(slot); | ||
808 | |||
809 | if (slot < edma_cc[ctlr]->num_channels || | ||
810 | slot >= edma_cc[ctlr]->num_slots) | ||
811 | return; | ||
812 | |||
813 | memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot), | ||
814 | &dummy_paramset, PARM_SIZE); | ||
815 | clear_bit(slot, edma_cc[ctlr]->edma_inuse); | ||
816 | } | ||
817 | EXPORT_SYMBOL(edma_free_slot); | ||
818 | |||
819 | |||
820 | /** | ||
821 | * edma_alloc_cont_slots- alloc contiguous parameter RAM slots | ||
822 | * The API will return the starting point of a set of | ||
823 | * contiguous parameter RAM slots that have been requested | ||
824 | * | ||
825 | * @id: can only be EDMA_CONT_PARAMS_ANY or EDMA_CONT_PARAMS_FIXED_EXACT | ||
826 | * or EDMA_CONT_PARAMS_FIXED_NOT_EXACT | ||
827 | * @count: number of contiguous Paramter RAM slots | ||
828 | * @slot - the start value of Parameter RAM slot that should be passed if id | ||
829 | * is EDMA_CONT_PARAMS_FIXED_EXACT or EDMA_CONT_PARAMS_FIXED_NOT_EXACT | ||
830 | * | ||
831 | * If id is EDMA_CONT_PARAMS_ANY then the API starts looking for a set of | ||
832 | * contiguous Parameter RAM slots from parameter RAM 64 in the case of | ||
833 | * DaVinci SOCs and 32 in the case of DA8xx SOCs. | ||
834 | * | ||
835 | * If id is EDMA_CONT_PARAMS_FIXED_EXACT then the API starts looking for a | ||
836 | * set of contiguous parameter RAM slots from the "slot" that is passed as an | ||
837 | * argument to the API. | ||
838 | * | ||
839 | * If id is EDMA_CONT_PARAMS_FIXED_NOT_EXACT then the API initially tries | ||
840 | * starts looking for a set of contiguous parameter RAMs from the "slot" | ||
841 | * that is passed as an argument to the API. On failure the API will try to | ||
842 | * find a set of contiguous Parameter RAM slots from the remaining Parameter | ||
843 | * RAM slots | ||
844 | */ | ||
845 | int edma_alloc_cont_slots(unsigned ctlr, unsigned int id, int slot, int count) | ||
846 | { | ||
847 | /* | ||
848 | * The start slot requested should be greater than | ||
849 | * the number of channels and lesser than the total number | ||
850 | * of slots | ||
851 | */ | ||
852 | if ((id != EDMA_CONT_PARAMS_ANY) && | ||
853 | (slot < edma_cc[ctlr]->num_channels || | ||
854 | slot >= edma_cc[ctlr]->num_slots)) | ||
855 | return -EINVAL; | ||
856 | |||
857 | /* | ||
858 | * The number of parameter RAM slots requested cannot be less than 1 | ||
859 | * and cannot be more than the number of slots minus the number of | ||
860 | * channels | ||
861 | */ | ||
862 | if (count < 1 || count > | ||
863 | (edma_cc[ctlr]->num_slots - edma_cc[ctlr]->num_channels)) | ||
864 | return -EINVAL; | ||
865 | |||
866 | switch (id) { | ||
867 | case EDMA_CONT_PARAMS_ANY: | ||
868 | return reserve_contiguous_slots(ctlr, id, count, | ||
869 | edma_cc[ctlr]->num_channels); | ||
870 | case EDMA_CONT_PARAMS_FIXED_EXACT: | ||
871 | case EDMA_CONT_PARAMS_FIXED_NOT_EXACT: | ||
872 | return reserve_contiguous_slots(ctlr, id, count, slot); | ||
873 | default: | ||
874 | return -EINVAL; | ||
875 | } | ||
876 | |||
877 | } | ||
878 | EXPORT_SYMBOL(edma_alloc_cont_slots); | ||
879 | |||
880 | /** | ||
881 | * edma_free_cont_slots - deallocate DMA parameter RAM slots | ||
882 | * @slot: first parameter RAM of a set of parameter RAM slots to be freed | ||
883 | * @count: the number of contiguous parameter RAM slots to be freed | ||
884 | * | ||
885 | * This deallocates the parameter RAM slots allocated by | ||
886 | * edma_alloc_cont_slots. | ||
887 | * Callers/applications need to keep track of sets of contiguous | ||
888 | * parameter RAM slots that have been allocated using the edma_alloc_cont_slots | ||
889 | * API. | ||
890 | * Callers are responsible for ensuring the slots are inactive, and will | ||
891 | * not be activated. | ||
892 | */ | ||
893 | int edma_free_cont_slots(unsigned slot, int count) | ||
894 | { | ||
895 | unsigned ctlr, slot_to_free; | ||
896 | int i; | ||
897 | |||
898 | ctlr = EDMA_CTLR(slot); | ||
899 | slot = EDMA_CHAN_SLOT(slot); | ||
900 | |||
901 | if (slot < edma_cc[ctlr]->num_channels || | ||
902 | slot >= edma_cc[ctlr]->num_slots || | ||
903 | count < 1) | ||
904 | return -EINVAL; | ||
905 | |||
906 | for (i = slot; i < slot + count; ++i) { | ||
907 | ctlr = EDMA_CTLR(i); | ||
908 | slot_to_free = EDMA_CHAN_SLOT(i); | ||
909 | |||
910 | memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot_to_free), | ||
911 | &dummy_paramset, PARM_SIZE); | ||
912 | clear_bit(slot_to_free, edma_cc[ctlr]->edma_inuse); | ||
913 | } | ||
914 | |||
915 | return 0; | ||
916 | } | ||
917 | EXPORT_SYMBOL(edma_free_cont_slots); | ||
918 | |||
919 | /*-----------------------------------------------------------------------*/ | ||
920 | |||
921 | /* Parameter RAM operations (i) -- read/write partial slots */ | ||
922 | |||
923 | /** | ||
924 | * edma_set_src - set initial DMA source address in parameter RAM slot | ||
925 | * @slot: parameter RAM slot being configured | ||
926 | * @src_port: physical address of source (memory, controller FIFO, etc) | ||
927 | * @addressMode: INCR, except in very rare cases | ||
928 | * @fifoWidth: ignored unless @addressMode is FIFO, else specifies the | ||
929 | * width to use when addressing the fifo (e.g. W8BIT, W32BIT) | ||
930 | * | ||
931 | * Note that the source address is modified during the DMA transfer | ||
932 | * according to edma_set_src_index(). | ||
933 | */ | ||
934 | void edma_set_src(unsigned slot, dma_addr_t src_port, | ||
935 | enum address_mode mode, enum fifo_width width) | ||
936 | { | ||
937 | unsigned ctlr; | ||
938 | |||
939 | ctlr = EDMA_CTLR(slot); | ||
940 | slot = EDMA_CHAN_SLOT(slot); | ||
941 | |||
942 | if (slot < edma_cc[ctlr]->num_slots) { | ||
943 | unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot); | ||
944 | |||
945 | if (mode) { | ||
946 | /* set SAM and program FWID */ | ||
947 | i = (i & ~(EDMA_FWID)) | (SAM | ((width & 0x7) << 8)); | ||
948 | } else { | ||
949 | /* clear SAM */ | ||
950 | i &= ~SAM; | ||
951 | } | ||
952 | edma_parm_write(ctlr, PARM_OPT, slot, i); | ||
953 | |||
954 | /* set the source port address | ||
955 | in source register of param structure */ | ||
956 | edma_parm_write(ctlr, PARM_SRC, slot, src_port); | ||
957 | } | ||
958 | } | ||
959 | EXPORT_SYMBOL(edma_set_src); | ||
960 | |||
961 | /** | ||
962 | * edma_set_dest - set initial DMA destination address in parameter RAM slot | ||
963 | * @slot: parameter RAM slot being configured | ||
964 | * @dest_port: physical address of destination (memory, controller FIFO, etc) | ||
965 | * @addressMode: INCR, except in very rare cases | ||
966 | * @fifoWidth: ignored unless @addressMode is FIFO, else specifies the | ||
967 | * width to use when addressing the fifo (e.g. W8BIT, W32BIT) | ||
968 | * | ||
969 | * Note that the destination address is modified during the DMA transfer | ||
970 | * according to edma_set_dest_index(). | ||
971 | */ | ||
972 | void edma_set_dest(unsigned slot, dma_addr_t dest_port, | ||
973 | enum address_mode mode, enum fifo_width width) | ||
974 | { | ||
975 | unsigned ctlr; | ||
976 | |||
977 | ctlr = EDMA_CTLR(slot); | ||
978 | slot = EDMA_CHAN_SLOT(slot); | ||
979 | |||
980 | if (slot < edma_cc[ctlr]->num_slots) { | ||
981 | unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot); | ||
982 | |||
983 | if (mode) { | ||
984 | /* set DAM and program FWID */ | ||
985 | i = (i & ~(EDMA_FWID)) | (DAM | ((width & 0x7) << 8)); | ||
986 | } else { | ||
987 | /* clear DAM */ | ||
988 | i &= ~DAM; | ||
989 | } | ||
990 | edma_parm_write(ctlr, PARM_OPT, slot, i); | ||
991 | /* set the destination port address | ||
992 | in dest register of param structure */ | ||
993 | edma_parm_write(ctlr, PARM_DST, slot, dest_port); | ||
994 | } | ||
995 | } | ||
996 | EXPORT_SYMBOL(edma_set_dest); | ||
997 | |||
998 | /** | ||
999 | * edma_get_position - returns the current transfer point | ||
1000 | * @slot: parameter RAM slot being examined | ||
1001 | * @dst: true selects the dest position, false the source | ||
1002 | * | ||
1003 | * Returns the position of the current active slot | ||
1004 | */ | ||
1005 | dma_addr_t edma_get_position(unsigned slot, bool dst) | ||
1006 | { | ||
1007 | u32 offs, ctlr = EDMA_CTLR(slot); | ||
1008 | |||
1009 | slot = EDMA_CHAN_SLOT(slot); | ||
1010 | |||
1011 | offs = PARM_OFFSET(slot); | ||
1012 | offs += dst ? PARM_DST : PARM_SRC; | ||
1013 | |||
1014 | return edma_read(ctlr, offs); | ||
1015 | } | ||
1016 | |||
1017 | /** | ||
1018 | * edma_set_src_index - configure DMA source address indexing | ||
1019 | * @slot: parameter RAM slot being configured | ||
1020 | * @src_bidx: byte offset between source arrays in a frame | ||
1021 | * @src_cidx: byte offset between source frames in a block | ||
1022 | * | ||
1023 | * Offsets are specified to support either contiguous or discontiguous | ||
1024 | * memory transfers, or repeated access to a hardware register, as needed. | ||
1025 | * When accessing hardware registers, both offsets are normally zero. | ||
1026 | */ | ||
1027 | void edma_set_src_index(unsigned slot, s16 src_bidx, s16 src_cidx) | ||
1028 | { | ||
1029 | unsigned ctlr; | ||
1030 | |||
1031 | ctlr = EDMA_CTLR(slot); | ||
1032 | slot = EDMA_CHAN_SLOT(slot); | ||
1033 | |||
1034 | if (slot < edma_cc[ctlr]->num_slots) { | ||
1035 | edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot, | ||
1036 | 0xffff0000, src_bidx); | ||
1037 | edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot, | ||
1038 | 0xffff0000, src_cidx); | ||
1039 | } | ||
1040 | } | ||
1041 | EXPORT_SYMBOL(edma_set_src_index); | ||
1042 | |||
1043 | /** | ||
1044 | * edma_set_dest_index - configure DMA destination address indexing | ||
1045 | * @slot: parameter RAM slot being configured | ||
1046 | * @dest_bidx: byte offset between destination arrays in a frame | ||
1047 | * @dest_cidx: byte offset between destination frames in a block | ||
1048 | * | ||
1049 | * Offsets are specified to support either contiguous or discontiguous | ||
1050 | * memory transfers, or repeated access to a hardware register, as needed. | ||
1051 | * When accessing hardware registers, both offsets are normally zero. | ||
1052 | */ | ||
1053 | void edma_set_dest_index(unsigned slot, s16 dest_bidx, s16 dest_cidx) | ||
1054 | { | ||
1055 | unsigned ctlr; | ||
1056 | |||
1057 | ctlr = EDMA_CTLR(slot); | ||
1058 | slot = EDMA_CHAN_SLOT(slot); | ||
1059 | |||
1060 | if (slot < edma_cc[ctlr]->num_slots) { | ||
1061 | edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot, | ||
1062 | 0x0000ffff, dest_bidx << 16); | ||
1063 | edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot, | ||
1064 | 0x0000ffff, dest_cidx << 16); | ||
1065 | } | ||
1066 | } | ||
1067 | EXPORT_SYMBOL(edma_set_dest_index); | ||
1068 | |||
1069 | /** | ||
1070 | * edma_set_transfer_params - configure DMA transfer parameters | ||
1071 | * @slot: parameter RAM slot being configured | ||
1072 | * @acnt: how many bytes per array (at least one) | ||
1073 | * @bcnt: how many arrays per frame (at least one) | ||
1074 | * @ccnt: how many frames per block (at least one) | ||
1075 | * @bcnt_rld: used only for A-Synchronized transfers; this specifies | ||
1076 | * the value to reload into bcnt when it decrements to zero | ||
1077 | * @sync_mode: ASYNC or ABSYNC | ||
1078 | * | ||
1079 | * See the EDMA3 documentation to understand how to configure and link | ||
1080 | * transfers using the fields in PaRAM slots. If you are not doing it | ||
1081 | * all at once with edma_write_slot(), you will use this routine | ||
1082 | * plus two calls each for source and destination, setting the initial | ||
1083 | * address and saying how to index that address. | ||
1084 | * | ||
1085 | * An example of an A-Synchronized transfer is a serial link using a | ||
1086 | * single word shift register. In that case, @acnt would be equal to | ||
1087 | * that word size; the serial controller issues a DMA synchronization | ||
1088 | * event to transfer each word, and memory access by the DMA transfer | ||
1089 | * controller will be word-at-a-time. | ||
1090 | * | ||
1091 | * An example of an AB-Synchronized transfer is a device using a FIFO. | ||
1092 | * In that case, @acnt equals the FIFO width and @bcnt equals its depth. | ||
1093 | * The controller with the FIFO issues DMA synchronization events when | ||
1094 | * the FIFO threshold is reached, and the DMA transfer controller will | ||
1095 | * transfer one frame to (or from) the FIFO. It will probably use | ||
1096 | * efficient burst modes to access memory. | ||
1097 | */ | ||
1098 | void edma_set_transfer_params(unsigned slot, | ||
1099 | u16 acnt, u16 bcnt, u16 ccnt, | ||
1100 | u16 bcnt_rld, enum sync_dimension sync_mode) | ||
1101 | { | ||
1102 | unsigned ctlr; | ||
1103 | |||
1104 | ctlr = EDMA_CTLR(slot); | ||
1105 | slot = EDMA_CHAN_SLOT(slot); | ||
1106 | |||
1107 | if (slot < edma_cc[ctlr]->num_slots) { | ||
1108 | edma_parm_modify(ctlr, PARM_LINK_BCNTRLD, slot, | ||
1109 | 0x0000ffff, bcnt_rld << 16); | ||
1110 | if (sync_mode == ASYNC) | ||
1111 | edma_parm_and(ctlr, PARM_OPT, slot, ~SYNCDIM); | ||
1112 | else | ||
1113 | edma_parm_or(ctlr, PARM_OPT, slot, SYNCDIM); | ||
1114 | /* Set the acount, bcount, ccount registers */ | ||
1115 | edma_parm_write(ctlr, PARM_A_B_CNT, slot, (bcnt << 16) | acnt); | ||
1116 | edma_parm_write(ctlr, PARM_CCNT, slot, ccnt); | ||
1117 | } | ||
1118 | } | ||
1119 | EXPORT_SYMBOL(edma_set_transfer_params); | ||
1120 | |||
1121 | /** | ||
1122 | * edma_link - link one parameter RAM slot to another | ||
1123 | * @from: parameter RAM slot originating the link | ||
1124 | * @to: parameter RAM slot which is the link target | ||
1125 | * | ||
1126 | * The originating slot should not be part of any active DMA transfer. | ||
1127 | */ | ||
1128 | void edma_link(unsigned from, unsigned to) | ||
1129 | { | ||
1130 | unsigned ctlr_from, ctlr_to; | ||
1131 | |||
1132 | ctlr_from = EDMA_CTLR(from); | ||
1133 | from = EDMA_CHAN_SLOT(from); | ||
1134 | ctlr_to = EDMA_CTLR(to); | ||
1135 | to = EDMA_CHAN_SLOT(to); | ||
1136 | |||
1137 | if (from >= edma_cc[ctlr_from]->num_slots) | ||
1138 | return; | ||
1139 | if (to >= edma_cc[ctlr_to]->num_slots) | ||
1140 | return; | ||
1141 | edma_parm_modify(ctlr_from, PARM_LINK_BCNTRLD, from, 0xffff0000, | ||
1142 | PARM_OFFSET(to)); | ||
1143 | } | ||
1144 | EXPORT_SYMBOL(edma_link); | ||
1145 | |||
1146 | /** | ||
1147 | * edma_unlink - cut link from one parameter RAM slot | ||
1148 | * @from: parameter RAM slot originating the link | ||
1149 | * | ||
1150 | * The originating slot should not be part of any active DMA transfer. | ||
1151 | * Its link is set to 0xffff. | ||
1152 | */ | ||
1153 | void edma_unlink(unsigned from) | ||
1154 | { | ||
1155 | unsigned ctlr; | ||
1156 | |||
1157 | ctlr = EDMA_CTLR(from); | ||
1158 | from = EDMA_CHAN_SLOT(from); | ||
1159 | |||
1160 | if (from >= edma_cc[ctlr]->num_slots) | ||
1161 | return; | ||
1162 | edma_parm_or(ctlr, PARM_LINK_BCNTRLD, from, 0xffff); | ||
1163 | } | ||
1164 | EXPORT_SYMBOL(edma_unlink); | ||
1165 | |||
1166 | /*-----------------------------------------------------------------------*/ | ||
1167 | |||
1168 | /* Parameter RAM operations (ii) -- read/write whole parameter sets */ | ||
1169 | |||
1170 | /** | ||
1171 | * edma_write_slot - write parameter RAM data for slot | ||
1172 | * @slot: number of parameter RAM slot being modified | ||
1173 | * @param: data to be written into parameter RAM slot | ||
1174 | * | ||
1175 | * Use this to assign all parameters of a transfer at once. This | ||
1176 | * allows more efficient setup of transfers than issuing multiple | ||
1177 | * calls to set up those parameters in small pieces, and provides | ||
1178 | * complete control over all transfer options. | ||
1179 | */ | ||
1180 | void edma_write_slot(unsigned slot, const struct edmacc_param *param) | ||
1181 | { | ||
1182 | unsigned ctlr; | ||
1183 | |||
1184 | ctlr = EDMA_CTLR(slot); | ||
1185 | slot = EDMA_CHAN_SLOT(slot); | ||
1186 | |||
1187 | if (slot >= edma_cc[ctlr]->num_slots) | ||
1188 | return; | ||
1189 | memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot), param, | ||
1190 | PARM_SIZE); | ||
1191 | } | ||
1192 | EXPORT_SYMBOL(edma_write_slot); | ||
1193 | |||
1194 | /** | ||
1195 | * edma_read_slot - read parameter RAM data from slot | ||
1196 | * @slot: number of parameter RAM slot being copied | ||
1197 | * @param: where to store copy of parameter RAM data | ||
1198 | * | ||
1199 | * Use this to read data from a parameter RAM slot, perhaps to | ||
1200 | * save them as a template for later reuse. | ||
1201 | */ | ||
1202 | void edma_read_slot(unsigned slot, struct edmacc_param *param) | ||
1203 | { | ||
1204 | unsigned ctlr; | ||
1205 | |||
1206 | ctlr = EDMA_CTLR(slot); | ||
1207 | slot = EDMA_CHAN_SLOT(slot); | ||
1208 | |||
1209 | if (slot >= edma_cc[ctlr]->num_slots) | ||
1210 | return; | ||
1211 | memcpy_fromio(param, edmacc_regs_base[ctlr] + PARM_OFFSET(slot), | ||
1212 | PARM_SIZE); | ||
1213 | } | ||
1214 | EXPORT_SYMBOL(edma_read_slot); | ||
1215 | |||
1216 | /*-----------------------------------------------------------------------*/ | ||
1217 | |||
1218 | /* Various EDMA channel control operations */ | ||
1219 | |||
1220 | /** | ||
1221 | * edma_pause - pause dma on a channel | ||
1222 | * @channel: on which edma_start() has been called | ||
1223 | * | ||
1224 | * This temporarily disables EDMA hardware events on the specified channel, | ||
1225 | * preventing them from triggering new transfers on its behalf | ||
1226 | */ | ||
1227 | void edma_pause(unsigned channel) | ||
1228 | { | ||
1229 | unsigned ctlr; | ||
1230 | |||
1231 | ctlr = EDMA_CTLR(channel); | ||
1232 | channel = EDMA_CHAN_SLOT(channel); | ||
1233 | |||
1234 | if (channel < edma_cc[ctlr]->num_channels) { | ||
1235 | unsigned int mask = BIT(channel & 0x1f); | ||
1236 | |||
1237 | edma_shadow0_write_array(ctlr, SH_EECR, channel >> 5, mask); | ||
1238 | } | ||
1239 | } | ||
1240 | EXPORT_SYMBOL(edma_pause); | ||
1241 | |||
1242 | /** | ||
1243 | * edma_resume - resumes dma on a paused channel | ||
1244 | * @channel: on which edma_pause() has been called | ||
1245 | * | ||
1246 | * This re-enables EDMA hardware events on the specified channel. | ||
1247 | */ | ||
1248 | void edma_resume(unsigned channel) | ||
1249 | { | ||
1250 | unsigned ctlr; | ||
1251 | |||
1252 | ctlr = EDMA_CTLR(channel); | ||
1253 | channel = EDMA_CHAN_SLOT(channel); | ||
1254 | |||
1255 | if (channel < edma_cc[ctlr]->num_channels) { | ||
1256 | unsigned int mask = BIT(channel & 0x1f); | ||
1257 | |||
1258 | edma_shadow0_write_array(ctlr, SH_EESR, channel >> 5, mask); | ||
1259 | } | ||
1260 | } | ||
1261 | EXPORT_SYMBOL(edma_resume); | ||
1262 | |||
1263 | int edma_trigger_channel(unsigned channel) | ||
1264 | { | ||
1265 | unsigned ctlr; | ||
1266 | unsigned int mask; | ||
1267 | |||
1268 | ctlr = EDMA_CTLR(channel); | ||
1269 | channel = EDMA_CHAN_SLOT(channel); | ||
1270 | mask = BIT(channel & 0x1f); | ||
1271 | |||
1272 | edma_shadow0_write_array(ctlr, SH_ESR, (channel >> 5), mask); | ||
1273 | |||
1274 | pr_debug("EDMA: ESR%d %08x\n", (channel >> 5), | ||
1275 | edma_shadow0_read_array(ctlr, SH_ESR, (channel >> 5))); | ||
1276 | return 0; | ||
1277 | } | ||
1278 | EXPORT_SYMBOL(edma_trigger_channel); | ||
1279 | |||
1280 | /** | ||
1281 | * edma_start - start dma on a channel | ||
1282 | * @channel: channel being activated | ||
1283 | * | ||
1284 | * Channels with event associations will be triggered by their hardware | ||
1285 | * events, and channels without such associations will be triggered by | ||
1286 | * software. (At this writing there is no interface for using software | ||
1287 | * triggers except with channels that don't support hardware triggers.) | ||
1288 | * | ||
1289 | * Returns zero on success, else negative errno. | ||
1290 | */ | ||
1291 | int edma_start(unsigned channel) | ||
1292 | { | ||
1293 | unsigned ctlr; | ||
1294 | |||
1295 | ctlr = EDMA_CTLR(channel); | ||
1296 | channel = EDMA_CHAN_SLOT(channel); | ||
1297 | |||
1298 | if (channel < edma_cc[ctlr]->num_channels) { | ||
1299 | int j = channel >> 5; | ||
1300 | unsigned int mask = BIT(channel & 0x1f); | ||
1301 | |||
1302 | /* EDMA channels without event association */ | ||
1303 | if (test_bit(channel, edma_cc[ctlr]->edma_unused)) { | ||
1304 | pr_debug("EDMA: ESR%d %08x\n", j, | ||
1305 | edma_shadow0_read_array(ctlr, SH_ESR, j)); | ||
1306 | edma_shadow0_write_array(ctlr, SH_ESR, j, mask); | ||
1307 | return 0; | ||
1308 | } | ||
1309 | |||
1310 | /* EDMA channel with event association */ | ||
1311 | pr_debug("EDMA: ER%d %08x\n", j, | ||
1312 | edma_shadow0_read_array(ctlr, SH_ER, j)); | ||
1313 | /* Clear any pending event or error */ | ||
1314 | edma_write_array(ctlr, EDMA_ECR, j, mask); | ||
1315 | edma_write_array(ctlr, EDMA_EMCR, j, mask); | ||
1316 | /* Clear any SER */ | ||
1317 | edma_shadow0_write_array(ctlr, SH_SECR, j, mask); | ||
1318 | edma_shadow0_write_array(ctlr, SH_EESR, j, mask); | ||
1319 | pr_debug("EDMA: EER%d %08x\n", j, | ||
1320 | edma_shadow0_read_array(ctlr, SH_EER, j)); | ||
1321 | return 0; | ||
1322 | } | ||
1323 | |||
1324 | return -EINVAL; | ||
1325 | } | ||
1326 | EXPORT_SYMBOL(edma_start); | ||
1327 | |||
1328 | /** | ||
1329 | * edma_stop - stops dma on the channel passed | ||
1330 | * @channel: channel being deactivated | ||
1331 | * | ||
1332 | * When @lch is a channel, any active transfer is paused and | ||
1333 | * all pending hardware events are cleared. The current transfer | ||
1334 | * may not be resumed, and the channel's Parameter RAM should be | ||
1335 | * reinitialized before being reused. | ||
1336 | */ | ||
1337 | void edma_stop(unsigned channel) | ||
1338 | { | ||
1339 | unsigned ctlr; | ||
1340 | |||
1341 | ctlr = EDMA_CTLR(channel); | ||
1342 | channel = EDMA_CHAN_SLOT(channel); | ||
1343 | |||
1344 | if (channel < edma_cc[ctlr]->num_channels) { | ||
1345 | int j = channel >> 5; | ||
1346 | unsigned int mask = BIT(channel & 0x1f); | ||
1347 | |||
1348 | edma_shadow0_write_array(ctlr, SH_EECR, j, mask); | ||
1349 | edma_shadow0_write_array(ctlr, SH_ECR, j, mask); | ||
1350 | edma_shadow0_write_array(ctlr, SH_SECR, j, mask); | ||
1351 | edma_write_array(ctlr, EDMA_EMCR, j, mask); | ||
1352 | |||
1353 | /* clear possibly pending completion interrupt */ | ||
1354 | edma_shadow0_write_array(ctlr, SH_ICR, j, mask); | ||
1355 | |||
1356 | pr_debug("EDMA: EER%d %08x\n", j, | ||
1357 | edma_shadow0_read_array(ctlr, SH_EER, j)); | ||
1358 | |||
1359 | /* REVISIT: consider guarding against inappropriate event | ||
1360 | * chaining by overwriting with dummy_paramset. | ||
1361 | */ | ||
1362 | } | ||
1363 | } | ||
1364 | EXPORT_SYMBOL(edma_stop); | ||
1365 | |||
1366 | /****************************************************************************** | ||
1367 | * | ||
1368 | * It cleans ParamEntry qand bring back EDMA to initial state if media has | ||
1369 | * been removed before EDMA has finished.It is usedful for removable media. | ||
1370 | * Arguments: | ||
1371 | * ch_no - channel no | ||
1372 | * | ||
1373 | * Return: zero on success, or corresponding error no on failure | ||
1374 | * | ||
1375 | * FIXME this should not be needed ... edma_stop() should suffice. | ||
1376 | * | ||
1377 | *****************************************************************************/ | ||
1378 | |||
1379 | void edma_clean_channel(unsigned channel) | ||
1380 | { | ||
1381 | unsigned ctlr; | ||
1382 | |||
1383 | ctlr = EDMA_CTLR(channel); | ||
1384 | channel = EDMA_CHAN_SLOT(channel); | ||
1385 | |||
1386 | if (channel < edma_cc[ctlr]->num_channels) { | ||
1387 | int j = (channel >> 5); | ||
1388 | unsigned int mask = BIT(channel & 0x1f); | ||
1389 | |||
1390 | pr_debug("EDMA: EMR%d %08x\n", j, | ||
1391 | edma_read_array(ctlr, EDMA_EMR, j)); | ||
1392 | edma_shadow0_write_array(ctlr, SH_ECR, j, mask); | ||
1393 | /* Clear the corresponding EMR bits */ | ||
1394 | edma_write_array(ctlr, EDMA_EMCR, j, mask); | ||
1395 | /* Clear any SER */ | ||
1396 | edma_shadow0_write_array(ctlr, SH_SECR, j, mask); | ||
1397 | edma_write(ctlr, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0)); | ||
1398 | } | ||
1399 | } | ||
1400 | EXPORT_SYMBOL(edma_clean_channel); | ||
1401 | |||
1402 | /* | ||
1403 | * edma_clear_event - clear an outstanding event on the DMA channel | ||
1404 | * Arguments: | ||
1405 | * channel - channel number | ||
1406 | */ | ||
1407 | void edma_clear_event(unsigned channel) | ||
1408 | { | ||
1409 | unsigned ctlr; | ||
1410 | |||
1411 | ctlr = EDMA_CTLR(channel); | ||
1412 | channel = EDMA_CHAN_SLOT(channel); | ||
1413 | |||
1414 | if (channel >= edma_cc[ctlr]->num_channels) | ||
1415 | return; | ||
1416 | if (channel < 32) | ||
1417 | edma_write(ctlr, EDMA_ECR, BIT(channel)); | ||
1418 | else | ||
1419 | edma_write(ctlr, EDMA_ECRH, BIT(channel - 32)); | ||
1420 | } | ||
1421 | EXPORT_SYMBOL(edma_clear_event); | ||
1422 | |||
1423 | /* | ||
1424 | * edma_assign_channel_eventq - move given channel to desired eventq | ||
1425 | * Arguments: | ||
1426 | * channel - channel number | ||
1427 | * eventq_no - queue to move the channel | ||
1428 | * | ||
1429 | * Can be used to move a channel to a selected event queue. | ||
1430 | */ | ||
1431 | void edma_assign_channel_eventq(unsigned channel, enum dma_event_q eventq_no) | ||
1432 | { | ||
1433 | unsigned ctlr; | ||
1434 | |||
1435 | ctlr = EDMA_CTLR(channel); | ||
1436 | channel = EDMA_CHAN_SLOT(channel); | ||
1437 | |||
1438 | if (channel >= edma_cc[ctlr]->num_channels) | ||
1439 | return; | ||
1440 | |||
1441 | /* default to low priority queue */ | ||
1442 | if (eventq_no == EVENTQ_DEFAULT) | ||
1443 | eventq_no = edma_cc[ctlr]->default_queue; | ||
1444 | if (eventq_no >= edma_cc[ctlr]->num_tc) | ||
1445 | return; | ||
1446 | |||
1447 | map_dmach_queue(ctlr, channel, eventq_no); | ||
1448 | } | ||
1449 | EXPORT_SYMBOL(edma_assign_channel_eventq); | ||
1450 | |||
1451 | static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata, | ||
1452 | struct edma *edma_cc, int cc_id) | ||
1453 | { | ||
1454 | int i; | ||
1455 | u32 value, cccfg; | ||
1456 | s8 (*queue_priority_map)[2]; | ||
1457 | |||
1458 | /* Decode the eDMA3 configuration from CCCFG register */ | ||
1459 | cccfg = edma_read(cc_id, EDMA_CCCFG); | ||
1460 | |||
1461 | value = GET_NUM_REGN(cccfg); | ||
1462 | edma_cc->num_region = BIT(value); | ||
1463 | |||
1464 | value = GET_NUM_DMACH(cccfg); | ||
1465 | edma_cc->num_channels = BIT(value + 1); | ||
1466 | |||
1467 | value = GET_NUM_PAENTRY(cccfg); | ||
1468 | edma_cc->num_slots = BIT(value + 4); | ||
1469 | |||
1470 | value = GET_NUM_EVQUE(cccfg); | ||
1471 | edma_cc->num_tc = value + 1; | ||
1472 | |||
1473 | dev_dbg(dev, "eDMA3 CC%d HW configuration (cccfg: 0x%08x):\n", cc_id, | ||
1474 | cccfg); | ||
1475 | dev_dbg(dev, "num_region: %u\n", edma_cc->num_region); | ||
1476 | dev_dbg(dev, "num_channel: %u\n", edma_cc->num_channels); | ||
1477 | dev_dbg(dev, "num_slot: %u\n", edma_cc->num_slots); | ||
1478 | dev_dbg(dev, "num_tc: %u\n", edma_cc->num_tc); | ||
1479 | |||
1480 | /* Nothing need to be done if queue priority is provided */ | ||
1481 | if (pdata->queue_priority_mapping) | ||
1482 | return 0; | ||
1483 | |||
1484 | /* | ||
1485 | * Configure TC/queue priority as follows: | ||
1486 | * Q0 - priority 0 | ||
1487 | * Q1 - priority 1 | ||
1488 | * Q2 - priority 2 | ||
1489 | * ... | ||
1490 | * The meaning of priority numbers: 0 highest priority, 7 lowest | ||
1491 | * priority. So Q0 is the highest priority queue and the last queue has | ||
1492 | * the lowest priority. | ||
1493 | */ | ||
1494 | queue_priority_map = devm_kzalloc(dev, | ||
1495 | (edma_cc->num_tc + 1) * sizeof(s8), | ||
1496 | GFP_KERNEL); | ||
1497 | if (!queue_priority_map) | ||
1498 | return -ENOMEM; | ||
1499 | |||
1500 | for (i = 0; i < edma_cc->num_tc; i++) { | ||
1501 | queue_priority_map[i][0] = i; | ||
1502 | queue_priority_map[i][1] = i; | ||
1503 | } | ||
1504 | queue_priority_map[i][0] = -1; | ||
1505 | queue_priority_map[i][1] = -1; | ||
1506 | |||
1507 | pdata->queue_priority_mapping = queue_priority_map; | ||
1508 | /* Default queue has the lowest priority */ | ||
1509 | pdata->default_queue = i - 1; | ||
1510 | |||
1511 | return 0; | ||
1512 | } | ||
1513 | |||
1514 | #if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DMADEVICES) | ||
1515 | |||
1516 | static int edma_xbar_event_map(struct device *dev, struct device_node *node, | ||
1517 | struct edma_soc_info *pdata, size_t sz) | ||
1518 | { | ||
1519 | const char pname[] = "ti,edma-xbar-event-map"; | ||
1520 | struct resource res; | ||
1521 | void __iomem *xbar; | ||
1522 | s16 (*xbar_chans)[2]; | ||
1523 | size_t nelm = sz / sizeof(s16); | ||
1524 | u32 shift, offset, mux; | ||
1525 | int ret, i; | ||
1526 | |||
1527 | xbar_chans = devm_kzalloc(dev, (nelm + 2) * sizeof(s16), GFP_KERNEL); | ||
1528 | if (!xbar_chans) | ||
1529 | return -ENOMEM; | ||
1530 | |||
1531 | ret = of_address_to_resource(node, 1, &res); | ||
1532 | if (ret) | ||
1533 | return -ENOMEM; | ||
1534 | |||
1535 | xbar = devm_ioremap(dev, res.start, resource_size(&res)); | ||
1536 | if (!xbar) | ||
1537 | return -ENOMEM; | ||
1538 | |||
1539 | ret = of_property_read_u16_array(node, pname, (u16 *)xbar_chans, nelm); | ||
1540 | if (ret) | ||
1541 | return -EIO; | ||
1542 | |||
1543 | /* Invalidate last entry for the other user of this mess */ | ||
1544 | nelm >>= 1; | ||
1545 | xbar_chans[nelm][0] = xbar_chans[nelm][1] = -1; | ||
1546 | |||
1547 | for (i = 0; i < nelm; i++) { | ||
1548 | shift = (xbar_chans[i][1] & 0x03) << 3; | ||
1549 | offset = xbar_chans[i][1] & 0xfffffffc; | ||
1550 | mux = readl(xbar + offset); | ||
1551 | mux &= ~(0xff << shift); | ||
1552 | mux |= xbar_chans[i][0] << shift; | ||
1553 | writel(mux, (xbar + offset)); | ||
1554 | } | ||
1555 | |||
1556 | pdata->xbar_chans = (const s16 (*)[2]) xbar_chans; | ||
1557 | return 0; | ||
1558 | } | ||
1559 | |||
1560 | static int edma_of_parse_dt(struct device *dev, | ||
1561 | struct device_node *node, | ||
1562 | struct edma_soc_info *pdata) | ||
1563 | { | ||
1564 | int ret = 0; | ||
1565 | struct property *prop; | ||
1566 | size_t sz; | ||
1567 | struct edma_rsv_info *rsv_info; | ||
1568 | |||
1569 | rsv_info = devm_kzalloc(dev, sizeof(struct edma_rsv_info), GFP_KERNEL); | ||
1570 | if (!rsv_info) | ||
1571 | return -ENOMEM; | ||
1572 | pdata->rsv = rsv_info; | ||
1573 | |||
1574 | prop = of_find_property(node, "ti,edma-xbar-event-map", &sz); | ||
1575 | if (prop) | ||
1576 | ret = edma_xbar_event_map(dev, node, pdata, sz); | ||
1577 | |||
1578 | return ret; | ||
1579 | } | ||
1580 | |||
1581 | static struct of_dma_filter_info edma_filter_info = { | ||
1582 | .filter_fn = edma_filter_fn, | ||
1583 | }; | ||
1584 | |||
1585 | static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev, | ||
1586 | struct device_node *node) | ||
1587 | { | ||
1588 | struct edma_soc_info *info; | ||
1589 | int ret; | ||
1590 | |||
1591 | info = devm_kzalloc(dev, sizeof(struct edma_soc_info), GFP_KERNEL); | ||
1592 | if (!info) | ||
1593 | return ERR_PTR(-ENOMEM); | ||
1594 | |||
1595 | ret = edma_of_parse_dt(dev, node, info); | ||
1596 | if (ret) | ||
1597 | return ERR_PTR(ret); | ||
1598 | |||
1599 | dma_cap_set(DMA_SLAVE, edma_filter_info.dma_cap); | ||
1600 | dma_cap_set(DMA_CYCLIC, edma_filter_info.dma_cap); | ||
1601 | of_dma_controller_register(dev->of_node, of_dma_simple_xlate, | ||
1602 | &edma_filter_info); | ||
1603 | |||
1604 | return info; | ||
1605 | } | ||
1606 | #else | ||
1607 | static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev, | ||
1608 | struct device_node *node) | ||
1609 | { | ||
1610 | return ERR_PTR(-ENOSYS); | ||
1611 | } | ||
1612 | #endif | ||
1613 | |||
1614 | static int edma_probe(struct platform_device *pdev) | ||
1615 | { | ||
1616 | struct edma_soc_info **info = pdev->dev.platform_data; | ||
1617 | struct edma_soc_info *ninfo[EDMA_MAX_CC] = {NULL}; | ||
1618 | s8 (*queue_priority_mapping)[2]; | ||
1619 | int i, j, off, ln, found = 0; | ||
1620 | int status = -1; | ||
1621 | const s16 (*rsv_chans)[2]; | ||
1622 | const s16 (*rsv_slots)[2]; | ||
1623 | const s16 (*xbar_chans)[2]; | ||
1624 | int irq[EDMA_MAX_CC] = {0, 0}; | ||
1625 | int err_irq[EDMA_MAX_CC] = {0, 0}; | ||
1626 | struct resource *r[EDMA_MAX_CC] = {NULL}; | ||
1627 | struct resource res[EDMA_MAX_CC]; | ||
1628 | char res_name[10]; | ||
1629 | struct device_node *node = pdev->dev.of_node; | ||
1630 | struct device *dev = &pdev->dev; | ||
1631 | int ret; | ||
1632 | struct platform_device_info edma_dev_info = { | ||
1633 | .name = "edma-dma-engine", | ||
1634 | .dma_mask = DMA_BIT_MASK(32), | ||
1635 | .parent = &pdev->dev, | ||
1636 | }; | ||
1637 | |||
1638 | if (node) { | ||
1639 | /* Check if this is a second instance registered */ | ||
1640 | if (arch_num_cc) { | ||
1641 | dev_err(dev, "only one EDMA instance is supported via DT\n"); | ||
1642 | return -ENODEV; | ||
1643 | } | ||
1644 | |||
1645 | ninfo[0] = edma_setup_info_from_dt(dev, node); | ||
1646 | if (IS_ERR(ninfo[0])) { | ||
1647 | dev_err(dev, "failed to get DT data\n"); | ||
1648 | return PTR_ERR(ninfo[0]); | ||
1649 | } | ||
1650 | |||
1651 | info = ninfo; | ||
1652 | } | ||
1653 | |||
1654 | if (!info) | ||
1655 | return -ENODEV; | ||
1656 | |||
1657 | pm_runtime_enable(dev); | ||
1658 | ret = pm_runtime_get_sync(dev); | ||
1659 | if (ret < 0) { | ||
1660 | dev_err(dev, "pm_runtime_get_sync() failed\n"); | ||
1661 | return ret; | ||
1662 | } | ||
1663 | |||
1664 | for (j = 0; j < EDMA_MAX_CC; j++) { | ||
1665 | if (!info[j]) { | ||
1666 | if (!found) | ||
1667 | return -ENODEV; | ||
1668 | break; | ||
1669 | } | ||
1670 | if (node) { | ||
1671 | ret = of_address_to_resource(node, j, &res[j]); | ||
1672 | if (!ret) | ||
1673 | r[j] = &res[j]; | ||
1674 | } else { | ||
1675 | sprintf(res_name, "edma_cc%d", j); | ||
1676 | r[j] = platform_get_resource_byname(pdev, | ||
1677 | IORESOURCE_MEM, | ||
1678 | res_name); | ||
1679 | } | ||
1680 | if (!r[j]) { | ||
1681 | if (found) | ||
1682 | break; | ||
1683 | else | ||
1684 | return -ENODEV; | ||
1685 | } else { | ||
1686 | found = 1; | ||
1687 | } | ||
1688 | |||
1689 | edmacc_regs_base[j] = devm_ioremap_resource(&pdev->dev, r[j]); | ||
1690 | if (IS_ERR(edmacc_regs_base[j])) | ||
1691 | return PTR_ERR(edmacc_regs_base[j]); | ||
1692 | |||
1693 | edma_cc[j] = devm_kzalloc(&pdev->dev, sizeof(struct edma), | ||
1694 | GFP_KERNEL); | ||
1695 | if (!edma_cc[j]) | ||
1696 | return -ENOMEM; | ||
1697 | |||
1698 | /* Get eDMA3 configuration from IP */ | ||
1699 | ret = edma_setup_from_hw(dev, info[j], edma_cc[j], j); | ||
1700 | if (ret) | ||
1701 | return ret; | ||
1702 | |||
1703 | edma_cc[j]->default_queue = info[j]->default_queue; | ||
1704 | |||
1705 | dev_dbg(&pdev->dev, "DMA REG BASE ADDR=%p\n", | ||
1706 | edmacc_regs_base[j]); | ||
1707 | |||
1708 | for (i = 0; i < edma_cc[j]->num_slots; i++) | ||
1709 | memcpy_toio(edmacc_regs_base[j] + PARM_OFFSET(i), | ||
1710 | &dummy_paramset, PARM_SIZE); | ||
1711 | |||
1712 | /* Mark all channels as unused */ | ||
1713 | memset(edma_cc[j]->edma_unused, 0xff, | ||
1714 | sizeof(edma_cc[j]->edma_unused)); | ||
1715 | |||
1716 | if (info[j]->rsv) { | ||
1717 | |||
1718 | /* Clear the reserved channels in unused list */ | ||
1719 | rsv_chans = info[j]->rsv->rsv_chans; | ||
1720 | if (rsv_chans) { | ||
1721 | for (i = 0; rsv_chans[i][0] != -1; i++) { | ||
1722 | off = rsv_chans[i][0]; | ||
1723 | ln = rsv_chans[i][1]; | ||
1724 | clear_bits(off, ln, | ||
1725 | edma_cc[j]->edma_unused); | ||
1726 | } | ||
1727 | } | ||
1728 | |||
1729 | /* Set the reserved slots in inuse list */ | ||
1730 | rsv_slots = info[j]->rsv->rsv_slots; | ||
1731 | if (rsv_slots) { | ||
1732 | for (i = 0; rsv_slots[i][0] != -1; i++) { | ||
1733 | off = rsv_slots[i][0]; | ||
1734 | ln = rsv_slots[i][1]; | ||
1735 | set_bits(off, ln, | ||
1736 | edma_cc[j]->edma_inuse); | ||
1737 | } | ||
1738 | } | ||
1739 | } | ||
1740 | |||
1741 | /* Clear the xbar mapped channels in unused list */ | ||
1742 | xbar_chans = info[j]->xbar_chans; | ||
1743 | if (xbar_chans) { | ||
1744 | for (i = 0; xbar_chans[i][1] != -1; i++) { | ||
1745 | off = xbar_chans[i][1]; | ||
1746 | clear_bits(off, 1, | ||
1747 | edma_cc[j]->edma_unused); | ||
1748 | } | ||
1749 | } | ||
1750 | |||
1751 | if (node) { | ||
1752 | irq[j] = irq_of_parse_and_map(node, 0); | ||
1753 | err_irq[j] = irq_of_parse_and_map(node, 2); | ||
1754 | } else { | ||
1755 | char irq_name[10]; | ||
1756 | |||
1757 | sprintf(irq_name, "edma%d", j); | ||
1758 | irq[j] = platform_get_irq_byname(pdev, irq_name); | ||
1759 | |||
1760 | sprintf(irq_name, "edma%d_err", j); | ||
1761 | err_irq[j] = platform_get_irq_byname(pdev, irq_name); | ||
1762 | } | ||
1763 | edma_cc[j]->irq_res_start = irq[j]; | ||
1764 | edma_cc[j]->irq_res_end = err_irq[j]; | ||
1765 | |||
1766 | status = devm_request_irq(dev, irq[j], dma_irq_handler, 0, | ||
1767 | "edma", dev); | ||
1768 | if (status < 0) { | ||
1769 | dev_dbg(&pdev->dev, | ||
1770 | "devm_request_irq %d failed --> %d\n", | ||
1771 | irq[j], status); | ||
1772 | return status; | ||
1773 | } | ||
1774 | |||
1775 | status = devm_request_irq(dev, err_irq[j], dma_ccerr_handler, 0, | ||
1776 | "edma_error", dev); | ||
1777 | if (status < 0) { | ||
1778 | dev_dbg(&pdev->dev, | ||
1779 | "devm_request_irq %d failed --> %d\n", | ||
1780 | err_irq[j], status); | ||
1781 | return status; | ||
1782 | } | ||
1783 | |||
1784 | for (i = 0; i < edma_cc[j]->num_channels; i++) | ||
1785 | map_dmach_queue(j, i, info[j]->default_queue); | ||
1786 | |||
1787 | queue_priority_mapping = info[j]->queue_priority_mapping; | ||
1788 | |||
1789 | /* Event queue priority mapping */ | ||
1790 | for (i = 0; queue_priority_mapping[i][0] != -1; i++) | ||
1791 | assign_priority_to_queue(j, | ||
1792 | queue_priority_mapping[i][0], | ||
1793 | queue_priority_mapping[i][1]); | ||
1794 | |||
1795 | /* Map the channel to param entry if channel mapping logic | ||
1796 | * exist | ||
1797 | */ | ||
1798 | if (edma_read(j, EDMA_CCCFG) & CHMAP_EXIST) | ||
1799 | map_dmach_param(j); | ||
1800 | |||
1801 | for (i = 0; i < edma_cc[j]->num_region; i++) { | ||
1802 | edma_write_array2(j, EDMA_DRAE, i, 0, 0x0); | ||
1803 | edma_write_array2(j, EDMA_DRAE, i, 1, 0x0); | ||
1804 | edma_write_array(j, EDMA_QRAE, i, 0x0); | ||
1805 | } | ||
1806 | edma_cc[j]->info = info[j]; | ||
1807 | arch_num_cc++; | ||
1808 | |||
1809 | edma_dev_info.id = j; | ||
1810 | platform_device_register_full(&edma_dev_info); | ||
1811 | } | ||
1812 | |||
1813 | return 0; | ||
1814 | } | ||
1815 | |||
1816 | #ifdef CONFIG_PM_SLEEP | ||
1817 | static int edma_pm_resume(struct device *dev) | ||
1818 | { | ||
1819 | int i, j; | ||
1820 | |||
1821 | for (j = 0; j < arch_num_cc; j++) { | ||
1822 | struct edma *cc = edma_cc[j]; | ||
1823 | |||
1824 | s8 (*queue_priority_mapping)[2]; | ||
1825 | |||
1826 | queue_priority_mapping = cc->info->queue_priority_mapping; | ||
1827 | |||
1828 | /* Event queue priority mapping */ | ||
1829 | for (i = 0; queue_priority_mapping[i][0] != -1; i++) | ||
1830 | assign_priority_to_queue(j, | ||
1831 | queue_priority_mapping[i][0], | ||
1832 | queue_priority_mapping[i][1]); | ||
1833 | |||
1834 | /* | ||
1835 | * Map the channel to param entry if channel mapping logic | ||
1836 | * exist | ||
1837 | */ | ||
1838 | if (edma_read(j, EDMA_CCCFG) & CHMAP_EXIST) | ||
1839 | map_dmach_param(j); | ||
1840 | |||
1841 | for (i = 0; i < cc->num_channels; i++) { | ||
1842 | if (test_bit(i, cc->edma_inuse)) { | ||
1843 | /* ensure access through shadow region 0 */ | ||
1844 | edma_or_array2(j, EDMA_DRAE, 0, i >> 5, | ||
1845 | BIT(i & 0x1f)); | ||
1846 | |||
1847 | setup_dma_interrupt(i, | ||
1848 | cc->intr_data[i].callback, | ||
1849 | cc->intr_data[i].data); | ||
1850 | } | ||
1851 | } | ||
1852 | } | ||
1853 | |||
1854 | return 0; | ||
1855 | } | ||
1856 | #endif | ||
1857 | |||
1858 | static const struct dev_pm_ops edma_pm_ops = { | ||
1859 | SET_LATE_SYSTEM_SLEEP_PM_OPS(NULL, edma_pm_resume) | ||
1860 | }; | ||
1861 | |||
1862 | static struct platform_driver edma_driver = { | ||
1863 | .driver = { | ||
1864 | .name = "edma", | ||
1865 | .pm = &edma_pm_ops, | ||
1866 | .of_match_table = edma_of_ids, | ||
1867 | }, | ||
1868 | .probe = edma_probe, | ||
1869 | }; | ||
1870 | |||
1871 | static int __init edma_init(void) | ||
1872 | { | ||
1873 | return platform_driver_probe(&edma_driver, edma_probe); | ||
1874 | } | ||
1875 | arch_initcall(edma_init); | ||
1876 | |||
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c index 29e08aac8294..28c90bc372bd 100644 --- a/arch/arm/mach-davinci/devices-da8xx.c +++ b/arch/arm/mach-davinci/devices-da8xx.c | |||
@@ -147,150 +147,118 @@ static s8 da850_queue_priority_mapping[][2] = { | |||
147 | {-1, -1} | 147 | {-1, -1} |
148 | }; | 148 | }; |
149 | 149 | ||
150 | static struct edma_soc_info da830_edma_cc0_info = { | 150 | static struct edma_soc_info da8xx_edma0_pdata = { |
151 | .queue_priority_mapping = da8xx_queue_priority_mapping, | 151 | .queue_priority_mapping = da8xx_queue_priority_mapping, |
152 | .default_queue = EVENTQ_1, | 152 | .default_queue = EVENTQ_1, |
153 | }; | 153 | }; |
154 | 154 | ||
155 | static struct edma_soc_info *da830_edma_info[EDMA_MAX_CC] = { | 155 | static struct edma_soc_info da850_edma1_pdata = { |
156 | &da830_edma_cc0_info, | 156 | .queue_priority_mapping = da850_queue_priority_mapping, |
157 | .default_queue = EVENTQ_0, | ||
157 | }; | 158 | }; |
158 | 159 | ||
159 | static struct edma_soc_info da850_edma_cc_info[] = { | 160 | static struct resource da8xx_edma0_resources[] = { |
160 | { | 161 | { |
161 | .queue_priority_mapping = da8xx_queue_priority_mapping, | 162 | .name = "edma3_cc", |
162 | .default_queue = EVENTQ_1, | ||
163 | }, | ||
164 | { | ||
165 | .queue_priority_mapping = da850_queue_priority_mapping, | ||
166 | .default_queue = EVENTQ_0, | ||
167 | }, | ||
168 | }; | ||
169 | |||
170 | static struct edma_soc_info *da850_edma_info[EDMA_MAX_CC] = { | ||
171 | &da850_edma_cc_info[0], | ||
172 | &da850_edma_cc_info[1], | ||
173 | }; | ||
174 | |||
175 | static struct resource da830_edma_resources[] = { | ||
176 | { | ||
177 | .name = "edma_cc0", | ||
178 | .start = DA8XX_TPCC_BASE, | 163 | .start = DA8XX_TPCC_BASE, |
179 | .end = DA8XX_TPCC_BASE + SZ_32K - 1, | 164 | .end = DA8XX_TPCC_BASE + SZ_32K - 1, |
180 | .flags = IORESOURCE_MEM, | 165 | .flags = IORESOURCE_MEM, |
181 | }, | 166 | }, |
182 | { | 167 | { |
183 | .name = "edma_tc0", | 168 | .name = "edma3_tc0", |
184 | .start = DA8XX_TPTC0_BASE, | 169 | .start = DA8XX_TPTC0_BASE, |
185 | .end = DA8XX_TPTC0_BASE + SZ_1K - 1, | 170 | .end = DA8XX_TPTC0_BASE + SZ_1K - 1, |
186 | .flags = IORESOURCE_MEM, | 171 | .flags = IORESOURCE_MEM, |
187 | }, | 172 | }, |
188 | { | 173 | { |
189 | .name = "edma_tc1", | 174 | .name = "edma3_tc1", |
190 | .start = DA8XX_TPTC1_BASE, | 175 | .start = DA8XX_TPTC1_BASE, |
191 | .end = DA8XX_TPTC1_BASE + SZ_1K - 1, | 176 | .end = DA8XX_TPTC1_BASE + SZ_1K - 1, |
192 | .flags = IORESOURCE_MEM, | 177 | .flags = IORESOURCE_MEM, |
193 | }, | 178 | }, |
194 | { | 179 | { |
195 | .name = "edma0", | 180 | .name = "edma3_ccint", |
196 | .start = IRQ_DA8XX_CCINT0, | 181 | .start = IRQ_DA8XX_CCINT0, |
197 | .flags = IORESOURCE_IRQ, | 182 | .flags = IORESOURCE_IRQ, |
198 | }, | 183 | }, |
199 | { | 184 | { |
200 | .name = "edma0_err", | 185 | .name = "edma3_ccerrint", |
201 | .start = IRQ_DA8XX_CCERRINT, | 186 | .start = IRQ_DA8XX_CCERRINT, |
202 | .flags = IORESOURCE_IRQ, | 187 | .flags = IORESOURCE_IRQ, |
203 | }, | 188 | }, |
204 | }; | 189 | }; |
205 | 190 | ||
206 | static struct resource da850_edma_resources[] = { | 191 | static struct resource da850_edma1_resources[] = { |
207 | { | ||
208 | .name = "edma_cc0", | ||
209 | .start = DA8XX_TPCC_BASE, | ||
210 | .end = DA8XX_TPCC_BASE + SZ_32K - 1, | ||
211 | .flags = IORESOURCE_MEM, | ||
212 | }, | ||
213 | { | ||
214 | .name = "edma_tc0", | ||
215 | .start = DA8XX_TPTC0_BASE, | ||
216 | .end = DA8XX_TPTC0_BASE + SZ_1K - 1, | ||
217 | .flags = IORESOURCE_MEM, | ||
218 | }, | ||
219 | { | ||
220 | .name = "edma_tc1", | ||
221 | .start = DA8XX_TPTC1_BASE, | ||
222 | .end = DA8XX_TPTC1_BASE + SZ_1K - 1, | ||
223 | .flags = IORESOURCE_MEM, | ||
224 | }, | ||
225 | { | 192 | { |
226 | .name = "edma_cc1", | 193 | .name = "edma3_cc", |
227 | .start = DA850_TPCC1_BASE, | 194 | .start = DA850_TPCC1_BASE, |
228 | .end = DA850_TPCC1_BASE + SZ_32K - 1, | 195 | .end = DA850_TPCC1_BASE + SZ_32K - 1, |
229 | .flags = IORESOURCE_MEM, | 196 | .flags = IORESOURCE_MEM, |
230 | }, | 197 | }, |
231 | { | 198 | { |
232 | .name = "edma_tc2", | 199 | .name = "edma3_tc0", |
233 | .start = DA850_TPTC2_BASE, | 200 | .start = DA850_TPTC2_BASE, |
234 | .end = DA850_TPTC2_BASE + SZ_1K - 1, | 201 | .end = DA850_TPTC2_BASE + SZ_1K - 1, |
235 | .flags = IORESOURCE_MEM, | 202 | .flags = IORESOURCE_MEM, |
236 | }, | 203 | }, |
237 | { | 204 | { |
238 | .name = "edma0", | 205 | .name = "edma3_ccint", |
239 | .start = IRQ_DA8XX_CCINT0, | ||
240 | .flags = IORESOURCE_IRQ, | ||
241 | }, | ||
242 | { | ||
243 | .name = "edma0_err", | ||
244 | .start = IRQ_DA8XX_CCERRINT, | ||
245 | .flags = IORESOURCE_IRQ, | ||
246 | }, | ||
247 | { | ||
248 | .name = "edma1", | ||
249 | .start = IRQ_DA850_CCINT1, | 206 | .start = IRQ_DA850_CCINT1, |
250 | .flags = IORESOURCE_IRQ, | 207 | .flags = IORESOURCE_IRQ, |
251 | }, | 208 | }, |
252 | { | 209 | { |
253 | .name = "edma1_err", | 210 | .name = "edma3_ccerrint", |
254 | .start = IRQ_DA850_CCERRINT1, | 211 | .start = IRQ_DA850_CCERRINT1, |
255 | .flags = IORESOURCE_IRQ, | 212 | .flags = IORESOURCE_IRQ, |
256 | }, | 213 | }, |
257 | }; | 214 | }; |
258 | 215 | ||
259 | static struct platform_device da830_edma_device = { | 216 | static const struct platform_device_info da8xx_edma0_device __initconst = { |
260 | .name = "edma", | 217 | .name = "edma", |
261 | .id = -1, | 218 | .id = 0, |
262 | .dev = { | 219 | .dma_mask = DMA_BIT_MASK(32), |
263 | .platform_data = da830_edma_info, | 220 | .res = da8xx_edma0_resources, |
264 | }, | 221 | .num_res = ARRAY_SIZE(da8xx_edma0_resources), |
265 | .num_resources = ARRAY_SIZE(da830_edma_resources), | 222 | .data = &da8xx_edma0_pdata, |
266 | .resource = da830_edma_resources, | 223 | .size_data = sizeof(da8xx_edma0_pdata), |
267 | }; | 224 | }; |
268 | 225 | ||
269 | static struct platform_device da850_edma_device = { | 226 | static const struct platform_device_info da850_edma1_device __initconst = { |
270 | .name = "edma", | 227 | .name = "edma", |
271 | .id = -1, | 228 | .id = 1, |
272 | .dev = { | 229 | .dma_mask = DMA_BIT_MASK(32), |
273 | .platform_data = da850_edma_info, | 230 | .res = da850_edma1_resources, |
274 | }, | 231 | .num_res = ARRAY_SIZE(da850_edma1_resources), |
275 | .num_resources = ARRAY_SIZE(da850_edma_resources), | 232 | .data = &da850_edma1_pdata, |
276 | .resource = da850_edma_resources, | 233 | .size_data = sizeof(da850_edma1_pdata), |
277 | }; | 234 | }; |
278 | 235 | ||
279 | int __init da830_register_edma(struct edma_rsv_info *rsv) | 236 | int __init da830_register_edma(struct edma_rsv_info *rsv) |
280 | { | 237 | { |
281 | da830_edma_cc0_info.rsv = rsv; | 238 | struct platform_device *edma_pdev; |
239 | |||
240 | da8xx_edma0_pdata.rsv = rsv; | ||
282 | 241 | ||
283 | return platform_device_register(&da830_edma_device); | 242 | edma_pdev = platform_device_register_full(&da8xx_edma0_device); |
243 | return IS_ERR(edma_pdev) ? PTR_ERR(edma_pdev) : 0; | ||
284 | } | 244 | } |
285 | 245 | ||
286 | int __init da850_register_edma(struct edma_rsv_info *rsv[2]) | 246 | int __init da850_register_edma(struct edma_rsv_info *rsv[2]) |
287 | { | 247 | { |
248 | struct platform_device *edma_pdev; | ||
249 | |||
288 | if (rsv) { | 250 | if (rsv) { |
289 | da850_edma_cc_info[0].rsv = rsv[0]; | 251 | da8xx_edma0_pdata.rsv = rsv[0]; |
290 | da850_edma_cc_info[1].rsv = rsv[1]; | 252 | da850_edma1_pdata.rsv = rsv[1]; |
291 | } | 253 | } |
292 | 254 | ||
293 | return platform_device_register(&da850_edma_device); | 255 | edma_pdev = platform_device_register_full(&da8xx_edma0_device); |
256 | if (IS_ERR(edma_pdev)) { | ||
257 | pr_warn("%s: Failed to register eDMA0\n", __func__); | ||
258 | return PTR_ERR(edma_pdev); | ||
259 | } | ||
260 | edma_pdev = platform_device_register_full(&da850_edma1_device); | ||
261 | return IS_ERR(edma_pdev) ? PTR_ERR(edma_pdev) : 0; | ||
294 | } | 262 | } |
295 | 263 | ||
296 | static struct resource da8xx_i2c_resources0[] = { | 264 | static struct resource da8xx_i2c_resources0[] = { |
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c index 567dc56fe8cd..609950b8c191 100644 --- a/arch/arm/mach-davinci/dm355.c +++ b/arch/arm/mach-davinci/dm355.c | |||
@@ -569,61 +569,58 @@ static u8 dm355_default_priorities[DAVINCI_N_AINTC_IRQ] = { | |||
569 | 569 | ||
570 | /*----------------------------------------------------------------------*/ | 570 | /*----------------------------------------------------------------------*/ |
571 | 571 | ||
572 | static s8 | 572 | static s8 queue_priority_mapping[][2] = { |
573 | queue_priority_mapping[][2] = { | ||
574 | /* {event queue no, Priority} */ | 573 | /* {event queue no, Priority} */ |
575 | {0, 3}, | 574 | {0, 3}, |
576 | {1, 7}, | 575 | {1, 7}, |
577 | {-1, -1}, | 576 | {-1, -1}, |
578 | }; | 577 | }; |
579 | 578 | ||
580 | static struct edma_soc_info edma_cc0_info = { | 579 | static struct edma_soc_info dm355_edma_pdata = { |
581 | .queue_priority_mapping = queue_priority_mapping, | 580 | .queue_priority_mapping = queue_priority_mapping, |
582 | .default_queue = EVENTQ_1, | 581 | .default_queue = EVENTQ_1, |
583 | }; | 582 | }; |
584 | 583 | ||
585 | static struct edma_soc_info *dm355_edma_info[EDMA_MAX_CC] = { | ||
586 | &edma_cc0_info, | ||
587 | }; | ||
588 | |||
589 | static struct resource edma_resources[] = { | 584 | static struct resource edma_resources[] = { |
590 | { | 585 | { |
591 | .name = "edma_cc0", | 586 | .name = "edma3_cc", |
592 | .start = 0x01c00000, | 587 | .start = 0x01c00000, |
593 | .end = 0x01c00000 + SZ_64K - 1, | 588 | .end = 0x01c00000 + SZ_64K - 1, |
594 | .flags = IORESOURCE_MEM, | 589 | .flags = IORESOURCE_MEM, |
595 | }, | 590 | }, |
596 | { | 591 | { |
597 | .name = "edma_tc0", | 592 | .name = "edma3_tc0", |
598 | .start = 0x01c10000, | 593 | .start = 0x01c10000, |
599 | .end = 0x01c10000 + SZ_1K - 1, | 594 | .end = 0x01c10000 + SZ_1K - 1, |
600 | .flags = IORESOURCE_MEM, | 595 | .flags = IORESOURCE_MEM, |
601 | }, | 596 | }, |
602 | { | 597 | { |
603 | .name = "edma_tc1", | 598 | .name = "edma3_tc1", |
604 | .start = 0x01c10400, | 599 | .start = 0x01c10400, |
605 | .end = 0x01c10400 + SZ_1K - 1, | 600 | .end = 0x01c10400 + SZ_1K - 1, |
606 | .flags = IORESOURCE_MEM, | 601 | .flags = IORESOURCE_MEM, |
607 | }, | 602 | }, |
608 | { | 603 | { |
609 | .name = "edma0", | 604 | .name = "edma3_ccint", |
610 | .start = IRQ_CCINT0, | 605 | .start = IRQ_CCINT0, |
611 | .flags = IORESOURCE_IRQ, | 606 | .flags = IORESOURCE_IRQ, |
612 | }, | 607 | }, |
613 | { | 608 | { |
614 | .name = "edma0_err", | 609 | .name = "edma3_ccerrint", |
615 | .start = IRQ_CCERRINT, | 610 | .start = IRQ_CCERRINT, |
616 | .flags = IORESOURCE_IRQ, | 611 | .flags = IORESOURCE_IRQ, |
617 | }, | 612 | }, |
618 | /* not using (or muxing) TC*_ERR */ | 613 | /* not using (or muxing) TC*_ERR */ |
619 | }; | 614 | }; |
620 | 615 | ||
621 | static struct platform_device dm355_edma_device = { | 616 | static const struct platform_device_info dm355_edma_device __initconst = { |
622 | .name = "edma", | 617 | .name = "edma", |
623 | .id = 0, | 618 | .id = 0, |
624 | .dev.platform_data = dm355_edma_info, | 619 | .dma_mask = DMA_BIT_MASK(32), |
625 | .num_resources = ARRAY_SIZE(edma_resources), | 620 | .res = edma_resources, |
626 | .resource = edma_resources, | 621 | .num_res = ARRAY_SIZE(edma_resources), |
622 | .data = &dm355_edma_pdata, | ||
623 | .size_data = sizeof(dm355_edma_pdata), | ||
627 | }; | 624 | }; |
628 | 625 | ||
629 | static struct resource dm355_asp1_resources[] = { | 626 | static struct resource dm355_asp1_resources[] = { |
@@ -1062,13 +1059,18 @@ int __init dm355_init_video(struct vpfe_config *vpfe_cfg, | |||
1062 | 1059 | ||
1063 | static int __init dm355_init_devices(void) | 1060 | static int __init dm355_init_devices(void) |
1064 | { | 1061 | { |
1062 | struct platform_device *edma_pdev; | ||
1065 | int ret = 0; | 1063 | int ret = 0; |
1066 | 1064 | ||
1067 | if (!cpu_is_davinci_dm355()) | 1065 | if (!cpu_is_davinci_dm355()) |
1068 | return 0; | 1066 | return 0; |
1069 | 1067 | ||
1070 | davinci_cfg_reg(DM355_INT_EDMA_CC); | 1068 | davinci_cfg_reg(DM355_INT_EDMA_CC); |
1071 | platform_device_register(&dm355_edma_device); | 1069 | edma_pdev = platform_device_register_full(&dm355_edma_device); |
1070 | if (IS_ERR(edma_pdev)) { | ||
1071 | pr_warn("%s: Failed to register eDMA\n", __func__); | ||
1072 | return PTR_ERR(edma_pdev); | ||
1073 | } | ||
1072 | 1074 | ||
1073 | ret = davinci_init_wdt(); | 1075 | ret = davinci_init_wdt(); |
1074 | if (ret) | 1076 | if (ret) |
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c index 6a890a8486d0..2068cbeaeb03 100644 --- a/arch/arm/mach-davinci/dm365.c +++ b/arch/arm/mach-davinci/dm365.c | |||
@@ -853,8 +853,7 @@ static u8 dm365_default_priorities[DAVINCI_N_AINTC_IRQ] = { | |||
853 | }; | 853 | }; |
854 | 854 | ||
855 | /* Four Transfer Controllers on DM365 */ | 855 | /* Four Transfer Controllers on DM365 */ |
856 | static s8 | 856 | static s8 dm365_queue_priority_mapping[][2] = { |
857 | dm365_queue_priority_mapping[][2] = { | ||
858 | /* {event queue no, Priority} */ | 857 | /* {event queue no, Priority} */ |
859 | {0, 7}, | 858 | {0, 7}, |
860 | {1, 7}, | 859 | {1, 7}, |
@@ -863,53 +862,49 @@ dm365_queue_priority_mapping[][2] = { | |||
863 | {-1, -1}, | 862 | {-1, -1}, |
864 | }; | 863 | }; |
865 | 864 | ||
866 | static struct edma_soc_info edma_cc0_info = { | 865 | static struct edma_soc_info dm365_edma_pdata = { |
867 | .queue_priority_mapping = dm365_queue_priority_mapping, | 866 | .queue_priority_mapping = dm365_queue_priority_mapping, |
868 | .default_queue = EVENTQ_3, | 867 | .default_queue = EVENTQ_3, |
869 | }; | 868 | }; |
870 | 869 | ||
871 | static struct edma_soc_info *dm365_edma_info[EDMA_MAX_CC] = { | ||
872 | &edma_cc0_info, | ||
873 | }; | ||
874 | |||
875 | static struct resource edma_resources[] = { | 870 | static struct resource edma_resources[] = { |
876 | { | 871 | { |
877 | .name = "edma_cc0", | 872 | .name = "edma3_cc", |
878 | .start = 0x01c00000, | 873 | .start = 0x01c00000, |
879 | .end = 0x01c00000 + SZ_64K - 1, | 874 | .end = 0x01c00000 + SZ_64K - 1, |
880 | .flags = IORESOURCE_MEM, | 875 | .flags = IORESOURCE_MEM, |
881 | }, | 876 | }, |
882 | { | 877 | { |
883 | .name = "edma_tc0", | 878 | .name = "edma3_tc0", |
884 | .start = 0x01c10000, | 879 | .start = 0x01c10000, |
885 | .end = 0x01c10000 + SZ_1K - 1, | 880 | .end = 0x01c10000 + SZ_1K - 1, |
886 | .flags = IORESOURCE_MEM, | 881 | .flags = IORESOURCE_MEM, |
887 | }, | 882 | }, |
888 | { | 883 | { |
889 | .name = "edma_tc1", | 884 | .name = "edma3_tc1", |
890 | .start = 0x01c10400, | 885 | .start = 0x01c10400, |
891 | .end = 0x01c10400 + SZ_1K - 1, | 886 | .end = 0x01c10400 + SZ_1K - 1, |
892 | .flags = IORESOURCE_MEM, | 887 | .flags = IORESOURCE_MEM, |
893 | }, | 888 | }, |
894 | { | 889 | { |
895 | .name = "edma_tc2", | 890 | .name = "edma3_tc2", |
896 | .start = 0x01c10800, | 891 | .start = 0x01c10800, |
897 | .end = 0x01c10800 + SZ_1K - 1, | 892 | .end = 0x01c10800 + SZ_1K - 1, |
898 | .flags = IORESOURCE_MEM, | 893 | .flags = IORESOURCE_MEM, |
899 | }, | 894 | }, |
900 | { | 895 | { |
901 | .name = "edma_tc3", | 896 | .name = "edma3_tc3", |
902 | .start = 0x01c10c00, | 897 | .start = 0x01c10c00, |
903 | .end = 0x01c10c00 + SZ_1K - 1, | 898 | .end = 0x01c10c00 + SZ_1K - 1, |
904 | .flags = IORESOURCE_MEM, | 899 | .flags = IORESOURCE_MEM, |
905 | }, | 900 | }, |
906 | { | 901 | { |
907 | .name = "edma0", | 902 | .name = "edma3_ccint", |
908 | .start = IRQ_CCINT0, | 903 | .start = IRQ_CCINT0, |
909 | .flags = IORESOURCE_IRQ, | 904 | .flags = IORESOURCE_IRQ, |
910 | }, | 905 | }, |
911 | { | 906 | { |
912 | .name = "edma0_err", | 907 | .name = "edma3_ccerrint", |
913 | .start = IRQ_CCERRINT, | 908 | .start = IRQ_CCERRINT, |
914 | .flags = IORESOURCE_IRQ, | 909 | .flags = IORESOURCE_IRQ, |
915 | }, | 910 | }, |
@@ -919,7 +914,7 @@ static struct resource edma_resources[] = { | |||
919 | static struct platform_device dm365_edma_device = { | 914 | static struct platform_device dm365_edma_device = { |
920 | .name = "edma", | 915 | .name = "edma", |
921 | .id = 0, | 916 | .id = 0, |
922 | .dev.platform_data = dm365_edma_info, | 917 | .dev.platform_data = &dm365_edma_pdata, |
923 | .num_resources = ARRAY_SIZE(edma_resources), | 918 | .num_resources = ARRAY_SIZE(edma_resources), |
924 | .resource = edma_resources, | 919 | .resource = edma_resources, |
925 | }; | 920 | }; |
diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c index dc52657909c4..d38f5049d56e 100644 --- a/arch/arm/mach-davinci/dm644x.c +++ b/arch/arm/mach-davinci/dm644x.c | |||
@@ -498,61 +498,58 @@ static u8 dm644x_default_priorities[DAVINCI_N_AINTC_IRQ] = { | |||
498 | 498 | ||
499 | /*----------------------------------------------------------------------*/ | 499 | /*----------------------------------------------------------------------*/ |
500 | 500 | ||
501 | static s8 | 501 | static s8 queue_priority_mapping[][2] = { |
502 | queue_priority_mapping[][2] = { | ||
503 | /* {event queue no, Priority} */ | 502 | /* {event queue no, Priority} */ |
504 | {0, 3}, | 503 | {0, 3}, |
505 | {1, 7}, | 504 | {1, 7}, |
506 | {-1, -1}, | 505 | {-1, -1}, |
507 | }; | 506 | }; |
508 | 507 | ||
509 | static struct edma_soc_info edma_cc0_info = { | 508 | static struct edma_soc_info dm644x_edma_pdata = { |
510 | .queue_priority_mapping = queue_priority_mapping, | 509 | .queue_priority_mapping = queue_priority_mapping, |
511 | .default_queue = EVENTQ_1, | 510 | .default_queue = EVENTQ_1, |
512 | }; | 511 | }; |
513 | 512 | ||
514 | static struct edma_soc_info *dm644x_edma_info[EDMA_MAX_CC] = { | ||
515 | &edma_cc0_info, | ||
516 | }; | ||
517 | |||
518 | static struct resource edma_resources[] = { | 513 | static struct resource edma_resources[] = { |
519 | { | 514 | { |
520 | .name = "edma_cc0", | 515 | .name = "edma3_cc", |
521 | .start = 0x01c00000, | 516 | .start = 0x01c00000, |
522 | .end = 0x01c00000 + SZ_64K - 1, | 517 | .end = 0x01c00000 + SZ_64K - 1, |
523 | .flags = IORESOURCE_MEM, | 518 | .flags = IORESOURCE_MEM, |
524 | }, | 519 | }, |
525 | { | 520 | { |
526 | .name = "edma_tc0", | 521 | .name = "edma3_tc0", |
527 | .start = 0x01c10000, | 522 | .start = 0x01c10000, |
528 | .end = 0x01c10000 + SZ_1K - 1, | 523 | .end = 0x01c10000 + SZ_1K - 1, |
529 | .flags = IORESOURCE_MEM, | 524 | .flags = IORESOURCE_MEM, |
530 | }, | 525 | }, |
531 | { | 526 | { |
532 | .name = "edma_tc1", | 527 | .name = "edma3_tc1", |
533 | .start = 0x01c10400, | 528 | .start = 0x01c10400, |
534 | .end = 0x01c10400 + SZ_1K - 1, | 529 | .end = 0x01c10400 + SZ_1K - 1, |
535 | .flags = IORESOURCE_MEM, | 530 | .flags = IORESOURCE_MEM, |
536 | }, | 531 | }, |
537 | { | 532 | { |
538 | .name = "edma0", | 533 | .name = "edma3_ccint", |
539 | .start = IRQ_CCINT0, | 534 | .start = IRQ_CCINT0, |
540 | .flags = IORESOURCE_IRQ, | 535 | .flags = IORESOURCE_IRQ, |
541 | }, | 536 | }, |
542 | { | 537 | { |
543 | .name = "edma0_err", | 538 | .name = "edma3_ccerrint", |
544 | .start = IRQ_CCERRINT, | 539 | .start = IRQ_CCERRINT, |
545 | .flags = IORESOURCE_IRQ, | 540 | .flags = IORESOURCE_IRQ, |
546 | }, | 541 | }, |
547 | /* not using TC*_ERR */ | 542 | /* not using TC*_ERR */ |
548 | }; | 543 | }; |
549 | 544 | ||
550 | static struct platform_device dm644x_edma_device = { | 545 | static const struct platform_device_info dm644x_edma_device __initconst = { |
551 | .name = "edma", | 546 | .name = "edma", |
552 | .id = 0, | 547 | .id = 0, |
553 | .dev.platform_data = dm644x_edma_info, | 548 | .dma_mask = DMA_BIT_MASK(32), |
554 | .num_resources = ARRAY_SIZE(edma_resources), | 549 | .res = edma_resources, |
555 | .resource = edma_resources, | 550 | .num_res = ARRAY_SIZE(edma_resources), |
551 | .data = &dm644x_edma_pdata, | ||
552 | .size_data = sizeof(dm644x_edma_pdata), | ||
556 | }; | 553 | }; |
557 | 554 | ||
558 | /* DM6446 EVM uses ASP0; line-out is a pair of RCA jacks */ | 555 | /* DM6446 EVM uses ASP0; line-out is a pair of RCA jacks */ |
@@ -950,12 +947,17 @@ int __init dm644x_init_video(struct vpfe_config *vpfe_cfg, | |||
950 | 947 | ||
951 | static int __init dm644x_init_devices(void) | 948 | static int __init dm644x_init_devices(void) |
952 | { | 949 | { |
950 | struct platform_device *edma_pdev; | ||
953 | int ret = 0; | 951 | int ret = 0; |
954 | 952 | ||
955 | if (!cpu_is_davinci_dm644x()) | 953 | if (!cpu_is_davinci_dm644x()) |
956 | return 0; | 954 | return 0; |
957 | 955 | ||
958 | platform_device_register(&dm644x_edma_device); | 956 | edma_pdev = platform_device_register_full(&dm644x_edma_device); |
957 | if (IS_ERR(edma_pdev)) { | ||
958 | pr_warn("%s: Failed to register eDMA\n", __func__); | ||
959 | return PTR_ERR(edma_pdev); | ||
960 | } | ||
959 | 961 | ||
960 | platform_device_register(&dm644x_mdio_device); | 962 | platform_device_register(&dm644x_mdio_device); |
961 | platform_device_register(&dm644x_emac_device); | 963 | platform_device_register(&dm644x_emac_device); |
diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c index 3f842bb266d6..70eb42725eec 100644 --- a/arch/arm/mach-davinci/dm646x.c +++ b/arch/arm/mach-davinci/dm646x.c | |||
@@ -531,8 +531,7 @@ static u8 dm646x_default_priorities[DAVINCI_N_AINTC_IRQ] = { | |||
531 | /*----------------------------------------------------------------------*/ | 531 | /*----------------------------------------------------------------------*/ |
532 | 532 | ||
533 | /* Four Transfer Controllers on DM646x */ | 533 | /* Four Transfer Controllers on DM646x */ |
534 | static s8 | 534 | static s8 dm646x_queue_priority_mapping[][2] = { |
535 | dm646x_queue_priority_mapping[][2] = { | ||
536 | /* {event queue no, Priority} */ | 535 | /* {event queue no, Priority} */ |
537 | {0, 4}, | 536 | {0, 4}, |
538 | {1, 0}, | 537 | {1, 0}, |
@@ -541,65 +540,63 @@ dm646x_queue_priority_mapping[][2] = { | |||
541 | {-1, -1}, | 540 | {-1, -1}, |
542 | }; | 541 | }; |
543 | 542 | ||
544 | static struct edma_soc_info edma_cc0_info = { | 543 | static struct edma_soc_info dm646x_edma_pdata = { |
545 | .queue_priority_mapping = dm646x_queue_priority_mapping, | 544 | .queue_priority_mapping = dm646x_queue_priority_mapping, |
546 | .default_queue = EVENTQ_1, | 545 | .default_queue = EVENTQ_1, |
547 | }; | 546 | }; |
548 | 547 | ||
549 | static struct edma_soc_info *dm646x_edma_info[EDMA_MAX_CC] = { | ||
550 | &edma_cc0_info, | ||
551 | }; | ||
552 | |||
553 | static struct resource edma_resources[] = { | 548 | static struct resource edma_resources[] = { |
554 | { | 549 | { |
555 | .name = "edma_cc0", | 550 | .name = "edma3_cc", |
556 | .start = 0x01c00000, | 551 | .start = 0x01c00000, |
557 | .end = 0x01c00000 + SZ_64K - 1, | 552 | .end = 0x01c00000 + SZ_64K - 1, |
558 | .flags = IORESOURCE_MEM, | 553 | .flags = IORESOURCE_MEM, |
559 | }, | 554 | }, |
560 | { | 555 | { |
561 | .name = "edma_tc0", | 556 | .name = "edma3_tc0", |
562 | .start = 0x01c10000, | 557 | .start = 0x01c10000, |
563 | .end = 0x01c10000 + SZ_1K - 1, | 558 | .end = 0x01c10000 + SZ_1K - 1, |
564 | .flags = IORESOURCE_MEM, | 559 | .flags = IORESOURCE_MEM, |
565 | }, | 560 | }, |
566 | { | 561 | { |
567 | .name = "edma_tc1", | 562 | .name = "edma3_tc1", |
568 | .start = 0x01c10400, | 563 | .start = 0x01c10400, |
569 | .end = 0x01c10400 + SZ_1K - 1, | 564 | .end = 0x01c10400 + SZ_1K - 1, |
570 | .flags = IORESOURCE_MEM, | 565 | .flags = IORESOURCE_MEM, |
571 | }, | 566 | }, |
572 | { | 567 | { |
573 | .name = "edma_tc2", | 568 | .name = "edma3_tc2", |
574 | .start = 0x01c10800, | 569 | .start = 0x01c10800, |
575 | .end = 0x01c10800 + SZ_1K - 1, | 570 | .end = 0x01c10800 + SZ_1K - 1, |
576 | .flags = IORESOURCE_MEM, | 571 | .flags = IORESOURCE_MEM, |
577 | }, | 572 | }, |
578 | { | 573 | { |
579 | .name = "edma_tc3", | 574 | .name = "edma3_tc3", |
580 | .start = 0x01c10c00, | 575 | .start = 0x01c10c00, |
581 | .end = 0x01c10c00 + SZ_1K - 1, | 576 | .end = 0x01c10c00 + SZ_1K - 1, |
582 | .flags = IORESOURCE_MEM, | 577 | .flags = IORESOURCE_MEM, |
583 | }, | 578 | }, |
584 | { | 579 | { |
585 | .name = "edma0", | 580 | .name = "edma3_ccint", |
586 | .start = IRQ_CCINT0, | 581 | .start = IRQ_CCINT0, |
587 | .flags = IORESOURCE_IRQ, | 582 | .flags = IORESOURCE_IRQ, |
588 | }, | 583 | }, |
589 | { | 584 | { |
590 | .name = "edma0_err", | 585 | .name = "edma3_ccerrint", |
591 | .start = IRQ_CCERRINT, | 586 | .start = IRQ_CCERRINT, |
592 | .flags = IORESOURCE_IRQ, | 587 | .flags = IORESOURCE_IRQ, |
593 | }, | 588 | }, |
594 | /* not using TC*_ERR */ | 589 | /* not using TC*_ERR */ |
595 | }; | 590 | }; |
596 | 591 | ||
597 | static struct platform_device dm646x_edma_device = { | 592 | static const struct platform_device_info dm646x_edma_device __initconst = { |
598 | .name = "edma", | 593 | .name = "edma", |
599 | .id = 0, | 594 | .id = 0, |
600 | .dev.platform_data = dm646x_edma_info, | 595 | .dma_mask = DMA_BIT_MASK(32), |
601 | .num_resources = ARRAY_SIZE(edma_resources), | 596 | .res = edma_resources, |
602 | .resource = edma_resources, | 597 | .num_res = ARRAY_SIZE(edma_resources), |
598 | .data = &dm646x_edma_pdata, | ||
599 | .size_data = sizeof(dm646x_edma_pdata), | ||
603 | }; | 600 | }; |
604 | 601 | ||
605 | static struct resource dm646x_mcasp0_resources[] = { | 602 | static struct resource dm646x_mcasp0_resources[] = { |
@@ -936,9 +933,12 @@ void dm646x_setup_vpif(struct vpif_display_config *display_config, | |||
936 | 933 | ||
937 | int __init dm646x_init_edma(struct edma_rsv_info *rsv) | 934 | int __init dm646x_init_edma(struct edma_rsv_info *rsv) |
938 | { | 935 | { |
939 | edma_cc0_info.rsv = rsv; | 936 | struct platform_device *edma_pdev; |
937 | |||
938 | dm646x_edma_pdata.rsv = rsv; | ||
940 | 939 | ||
941 | return platform_device_register(&dm646x_edma_device); | 940 | edma_pdev = platform_device_register_full(&dm646x_edma_device); |
941 | return IS_ERR(edma_pdev) ? PTR_ERR(edma_pdev) : 0; | ||
942 | } | 942 | } |
943 | 943 | ||
944 | void __init dm646x_init(void) | 944 | void __init dm646x_init(void) |
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index 33d1460a5639..ddf912406ce8 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig | |||
@@ -96,7 +96,6 @@ config ARCH_OMAP2PLUS | |||
96 | select OMAP_GPMC | 96 | select OMAP_GPMC |
97 | select PINCTRL | 97 | select PINCTRL |
98 | select SOC_BUS | 98 | select SOC_BUS |
99 | select TI_PRIV_EDMA | ||
100 | select OMAP_IRQCHIP | 99 | select OMAP_IRQCHIP |
101 | help | 100 | help |
102 | Systems based on OMAP2, OMAP3, OMAP4 or OMAP5 | 101 | Systems based on OMAP2, OMAP3, OMAP4 or OMAP5 |
diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c index 1d8b147282cf..b4cb3bd89d8a 100644 --- a/arch/avr32/mach-at32ap/at32ap700x.c +++ b/arch/avr32/mach-at32ap/at32ap700x.c | |||
@@ -603,18 +603,11 @@ static void __init genclk_init_parent(struct clk *clk) | |||
603 | clk->parent = parent; | 603 | clk->parent = parent; |
604 | } | 604 | } |
605 | 605 | ||
606 | static struct dw_dma_platform_data dw_dmac0_data = { | ||
607 | .nr_channels = 3, | ||
608 | .block_size = 4095U, | ||
609 | .nr_masters = 2, | ||
610 | .data_width = { 2, 2 }, | ||
611 | }; | ||
612 | |||
613 | static struct resource dw_dmac0_resource[] = { | 606 | static struct resource dw_dmac0_resource[] = { |
614 | PBMEM(0xff200000), | 607 | PBMEM(0xff200000), |
615 | IRQ(2), | 608 | IRQ(2), |
616 | }; | 609 | }; |
617 | DEFINE_DEV_DATA(dw_dmac, 0); | 610 | DEFINE_DEV(dw_dmac, 0); |
618 | DEV_CLK(hclk, dw_dmac0, hsb, 10); | 611 | DEV_CLK(hclk, dw_dmac0, hsb, 10); |
619 | 612 | ||
620 | /* -------------------------------------------------------------------- | 613 | /* -------------------------------------------------------------------- |
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index b4584757dae0..e6cd1a32025a 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -229,7 +229,7 @@ config IMX_SDMA | |||
229 | Support the i.MX SDMA engine. This engine is integrated into | 229 | Support the i.MX SDMA engine. This engine is integrated into |
230 | Freescale i.MX25/31/35/51/53/6 chips. | 230 | Freescale i.MX25/31/35/51/53/6 chips. |
231 | 231 | ||
232 | config IDMA64 | 232 | config INTEL_IDMA64 |
233 | tristate "Intel integrated DMA 64-bit support" | 233 | tristate "Intel integrated DMA 64-bit support" |
234 | select DMA_ENGINE | 234 | select DMA_ENGINE |
235 | select DMA_VIRTUAL_CHANNELS | 235 | select DMA_VIRTUAL_CHANNELS |
@@ -486,7 +486,7 @@ config TI_EDMA | |||
486 | depends on ARCH_DAVINCI || ARCH_OMAP || ARCH_KEYSTONE | 486 | depends on ARCH_DAVINCI || ARCH_OMAP || ARCH_KEYSTONE |
487 | select DMA_ENGINE | 487 | select DMA_ENGINE |
488 | select DMA_VIRTUAL_CHANNELS | 488 | select DMA_VIRTUAL_CHANNELS |
489 | select TI_PRIV_EDMA | 489 | select TI_DMA_CROSSBAR if ARCH_OMAP |
490 | default n | 490 | default n |
491 | help | 491 | help |
492 | Enable support for the TI EDMA controller. This DMA | 492 | Enable support for the TI EDMA controller. This DMA |
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 7711a7180726..ef9c099bd2b6 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
@@ -34,7 +34,7 @@ obj-$(CONFIG_HSU_DMA) += hsu/ | |||
34 | obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o | 34 | obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o |
35 | obj-$(CONFIG_IMX_DMA) += imx-dma.o | 35 | obj-$(CONFIG_IMX_DMA) += imx-dma.o |
36 | obj-$(CONFIG_IMX_SDMA) += imx-sdma.o | 36 | obj-$(CONFIG_IMX_SDMA) += imx-sdma.o |
37 | obj-$(CONFIG_IDMA64) += idma64.o | 37 | obj-$(CONFIG_INTEL_IDMA64) += idma64.o |
38 | obj-$(CONFIG_INTEL_IOATDMA) += ioat/ | 38 | obj-$(CONFIG_INTEL_IOATDMA) += ioat/ |
39 | obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o | 39 | obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o |
40 | obj-$(CONFIG_INTEL_MIC_X100_DMA) += mic_x100_dma.o | 40 | obj-$(CONFIG_INTEL_MIC_X100_DMA) += mic_x100_dma.o |
diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c index 981a38fc4cb8..16d0daa058a5 100644 --- a/drivers/dma/acpi-dma.c +++ b/drivers/dma/acpi-dma.c | |||
@@ -161,10 +161,8 @@ int acpi_dma_controller_register(struct device *dev, | |||
161 | return -EINVAL; | 161 | return -EINVAL; |
162 | 162 | ||
163 | /* Check if the device was enumerated by ACPI */ | 163 | /* Check if the device was enumerated by ACPI */ |
164 | if (!ACPI_HANDLE(dev)) | 164 | adev = ACPI_COMPANION(dev); |
165 | return -EINVAL; | 165 | if (!adev) |
166 | |||
167 | if (acpi_bus_get_device(ACPI_HANDLE(dev), &adev)) | ||
168 | return -EINVAL; | 166 | return -EINVAL; |
169 | 167 | ||
170 | adma = kzalloc(sizeof(*adma), GFP_KERNEL); | 168 | adma = kzalloc(sizeof(*adma), GFP_KERNEL); |
@@ -359,10 +357,11 @@ struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev, | |||
359 | int found; | 357 | int found; |
360 | 358 | ||
361 | /* Check if the device was enumerated by ACPI */ | 359 | /* Check if the device was enumerated by ACPI */ |
362 | if (!dev || !ACPI_HANDLE(dev)) | 360 | if (!dev) |
363 | return ERR_PTR(-ENODEV); | 361 | return ERR_PTR(-ENODEV); |
364 | 362 | ||
365 | if (acpi_bus_get_device(ACPI_HANDLE(dev), &adev)) | 363 | adev = ACPI_COMPANION(dev); |
364 | if (!adev) | ||
366 | return ERR_PTR(-ENODEV); | 365 | return ERR_PTR(-ENODEV); |
367 | 366 | ||
368 | memset(&pdata, 0, sizeof(pdata)); | 367 | memset(&pdata, 0, sizeof(pdata)); |
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 58d406230d89..4e55239c7a30 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c | |||
@@ -458,10 +458,10 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) | |||
458 | dma_cookie_complete(txd); | 458 | dma_cookie_complete(txd); |
459 | 459 | ||
460 | /* If the transfer was a memset, free our temporary buffer */ | 460 | /* If the transfer was a memset, free our temporary buffer */ |
461 | if (desc->memset) { | 461 | if (desc->memset_buffer) { |
462 | dma_pool_free(atdma->memset_pool, desc->memset_vaddr, | 462 | dma_pool_free(atdma->memset_pool, desc->memset_vaddr, |
463 | desc->memset_paddr); | 463 | desc->memset_paddr); |
464 | desc->memset = false; | 464 | desc->memset_buffer = false; |
465 | } | 465 | } |
466 | 466 | ||
467 | /* move children to free_list */ | 467 | /* move children to free_list */ |
@@ -881,6 +881,46 @@ err_desc_get: | |||
881 | return NULL; | 881 | return NULL; |
882 | } | 882 | } |
883 | 883 | ||
884 | static struct at_desc *atc_create_memset_desc(struct dma_chan *chan, | ||
885 | dma_addr_t psrc, | ||
886 | dma_addr_t pdst, | ||
887 | size_t len) | ||
888 | { | ||
889 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | ||
890 | struct at_desc *desc; | ||
891 | size_t xfer_count; | ||
892 | |||
893 | u32 ctrla = ATC_SRC_WIDTH(2) | ATC_DST_WIDTH(2); | ||
894 | u32 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN | | ||
895 | ATC_SRC_ADDR_MODE_FIXED | | ||
896 | ATC_DST_ADDR_MODE_INCR | | ||
897 | ATC_FC_MEM2MEM; | ||
898 | |||
899 | xfer_count = len >> 2; | ||
900 | if (xfer_count > ATC_BTSIZE_MAX) { | ||
901 | dev_err(chan2dev(chan), "%s: buffer is too big\n", | ||
902 | __func__); | ||
903 | return NULL; | ||
904 | } | ||
905 | |||
906 | desc = atc_desc_get(atchan); | ||
907 | if (!desc) { | ||
908 | dev_err(chan2dev(chan), "%s: can't get a descriptor\n", | ||
909 | __func__); | ||
910 | return NULL; | ||
911 | } | ||
912 | |||
913 | desc->lli.saddr = psrc; | ||
914 | desc->lli.daddr = pdst; | ||
915 | desc->lli.ctrla = ctrla | xfer_count; | ||
916 | desc->lli.ctrlb = ctrlb; | ||
917 | |||
918 | desc->txd.cookie = 0; | ||
919 | desc->len = len; | ||
920 | |||
921 | return desc; | ||
922 | } | ||
923 | |||
884 | /** | 924 | /** |
885 | * atc_prep_dma_memset - prepare a memcpy operation | 925 | * atc_prep_dma_memset - prepare a memcpy operation |
886 | * @chan: the channel to prepare operation on | 926 | * @chan: the channel to prepare operation on |
@@ -893,12 +933,10 @@ static struct dma_async_tx_descriptor * | |||
893 | atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, | 933 | atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, |
894 | size_t len, unsigned long flags) | 934 | size_t len, unsigned long flags) |
895 | { | 935 | { |
896 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | ||
897 | struct at_dma *atdma = to_at_dma(chan->device); | 936 | struct at_dma *atdma = to_at_dma(chan->device); |
898 | struct at_desc *desc = NULL; | 937 | struct at_desc *desc; |
899 | size_t xfer_count; | 938 | void __iomem *vaddr; |
900 | u32 ctrla; | 939 | dma_addr_t paddr; |
901 | u32 ctrlb; | ||
902 | 940 | ||
903 | dev_vdbg(chan2dev(chan), "%s: d0x%x v0x%x l0x%zx f0x%lx\n", __func__, | 941 | dev_vdbg(chan2dev(chan), "%s: d0x%x v0x%x l0x%zx f0x%lx\n", __func__, |
904 | dest, value, len, flags); | 942 | dest, value, len, flags); |
@@ -914,61 +952,117 @@ atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, | |||
914 | return NULL; | 952 | return NULL; |
915 | } | 953 | } |
916 | 954 | ||
917 | xfer_count = len >> 2; | 955 | vaddr = dma_pool_alloc(atdma->memset_pool, GFP_ATOMIC, &paddr); |
918 | if (xfer_count > ATC_BTSIZE_MAX) { | 956 | if (!vaddr) { |
919 | dev_err(chan2dev(chan), "%s: buffer is too big\n", | 957 | dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n", |
920 | __func__); | 958 | __func__); |
921 | return NULL; | 959 | return NULL; |
922 | } | 960 | } |
961 | *(u32*)vaddr = value; | ||
923 | 962 | ||
924 | ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN | 963 | desc = atc_create_memset_desc(chan, paddr, dest, len); |
925 | | ATC_SRC_ADDR_MODE_FIXED | 964 | if (!desc) { |
926 | | ATC_DST_ADDR_MODE_INCR | 965 | dev_err(chan2dev(chan), "%s: couldn't get a descriptor\n", |
927 | | ATC_FC_MEM2MEM; | 966 | __func__); |
967 | goto err_free_buffer; | ||
968 | } | ||
928 | 969 | ||
929 | ctrla = ATC_SRC_WIDTH(2) | | 970 | desc->memset_paddr = paddr; |
930 | ATC_DST_WIDTH(2); | 971 | desc->memset_vaddr = vaddr; |
972 | desc->memset_buffer = true; | ||
931 | 973 | ||
932 | desc = atc_desc_get(atchan); | 974 | desc->txd.cookie = -EBUSY; |
933 | if (!desc) { | 975 | desc->total_len = len; |
934 | dev_err(chan2dev(chan), "%s: can't get a descriptor\n", | 976 | |
977 | /* set end-of-link on the descriptor */ | ||
978 | set_desc_eol(desc); | ||
979 | |||
980 | desc->txd.flags = flags; | ||
981 | |||
982 | return &desc->txd; | ||
983 | |||
984 | err_free_buffer: | ||
985 | dma_pool_free(atdma->memset_pool, vaddr, paddr); | ||
986 | return NULL; | ||
987 | } | ||
988 | |||
989 | static struct dma_async_tx_descriptor * | ||
990 | atc_prep_dma_memset_sg(struct dma_chan *chan, | ||
991 | struct scatterlist *sgl, | ||
992 | unsigned int sg_len, int value, | ||
993 | unsigned long flags) | ||
994 | { | ||
995 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | ||
996 | struct at_dma *atdma = to_at_dma(chan->device); | ||
997 | struct at_desc *desc = NULL, *first = NULL, *prev = NULL; | ||
998 | struct scatterlist *sg; | ||
999 | void __iomem *vaddr; | ||
1000 | dma_addr_t paddr; | ||
1001 | size_t total_len = 0; | ||
1002 | int i; | ||
1003 | |||
1004 | dev_vdbg(chan2dev(chan), "%s: v0x%x l0x%zx f0x%lx\n", __func__, | ||
1005 | value, sg_len, flags); | ||
1006 | |||
1007 | if (unlikely(!sgl || !sg_len)) { | ||
1008 | dev_dbg(chan2dev(chan), "%s: scatterlist is empty!\n", | ||
935 | __func__); | 1009 | __func__); |
936 | return NULL; | 1010 | return NULL; |
937 | } | 1011 | } |
938 | 1012 | ||
939 | desc->memset_vaddr = dma_pool_alloc(atdma->memset_pool, GFP_ATOMIC, | 1013 | vaddr = dma_pool_alloc(atdma->memset_pool, GFP_ATOMIC, &paddr); |
940 | &desc->memset_paddr); | 1014 | if (!vaddr) { |
941 | if (!desc->memset_vaddr) { | ||
942 | dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n", | 1015 | dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n", |
943 | __func__); | 1016 | __func__); |
944 | goto err_put_desc; | 1017 | return NULL; |
945 | } | 1018 | } |
1019 | *(u32*)vaddr = value; | ||
946 | 1020 | ||
947 | *desc->memset_vaddr = value; | 1021 | for_each_sg(sgl, sg, sg_len, i) { |
948 | desc->memset = true; | 1022 | dma_addr_t dest = sg_dma_address(sg); |
1023 | size_t len = sg_dma_len(sg); | ||
949 | 1024 | ||
950 | desc->lli.saddr = desc->memset_paddr; | 1025 | dev_vdbg(chan2dev(chan), "%s: d0x%08x, l0x%zx\n", |
951 | desc->lli.daddr = dest; | 1026 | __func__, dest, len); |
952 | desc->lli.ctrla = ctrla | xfer_count; | ||
953 | desc->lli.ctrlb = ctrlb; | ||
954 | 1027 | ||
955 | desc->txd.cookie = -EBUSY; | 1028 | if (!is_dma_fill_aligned(chan->device, dest, 0, len)) { |
956 | desc->len = len; | 1029 | dev_err(chan2dev(chan), "%s: buffer is not aligned\n", |
957 | desc->total_len = len; | 1030 | __func__); |
1031 | goto err_put_desc; | ||
1032 | } | ||
1033 | |||
1034 | desc = atc_create_memset_desc(chan, paddr, dest, len); | ||
1035 | if (!desc) | ||
1036 | goto err_put_desc; | ||
1037 | |||
1038 | atc_desc_chain(&first, &prev, desc); | ||
1039 | |||
1040 | total_len += len; | ||
1041 | } | ||
1042 | |||
1043 | /* | ||
1044 | * Only set the buffer pointers on the last descriptor to | ||
1045 | * avoid free'ing while we have our transfer still going | ||
1046 | */ | ||
1047 | desc->memset_paddr = paddr; | ||
1048 | desc->memset_vaddr = vaddr; | ||
1049 | desc->memset_buffer = true; | ||
1050 | |||
1051 | first->txd.cookie = -EBUSY; | ||
1052 | first->total_len = total_len; | ||
958 | 1053 | ||
959 | /* set end-of-link on the descriptor */ | 1054 | /* set end-of-link on the descriptor */ |
960 | set_desc_eol(desc); | 1055 | set_desc_eol(desc); |
961 | 1056 | ||
962 | desc->txd.flags = flags; | 1057 | first->txd.flags = flags; |
963 | 1058 | ||
964 | return &desc->txd; | 1059 | return &first->txd; |
965 | 1060 | ||
966 | err_put_desc: | 1061 | err_put_desc: |
967 | atc_desc_put(atchan, desc); | 1062 | atc_desc_put(atchan, first); |
968 | return NULL; | 1063 | return NULL; |
969 | } | 1064 | } |
970 | 1065 | ||
971 | |||
972 | /** | 1066 | /** |
973 | * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction | 1067 | * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction |
974 | * @chan: DMA channel | 1068 | * @chan: DMA channel |
@@ -1851,6 +1945,7 @@ static int __init at_dma_probe(struct platform_device *pdev) | |||
1851 | dma_cap_set(DMA_INTERLEAVE, at91sam9g45_config.cap_mask); | 1945 | dma_cap_set(DMA_INTERLEAVE, at91sam9g45_config.cap_mask); |
1852 | dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask); | 1946 | dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask); |
1853 | dma_cap_set(DMA_MEMSET, at91sam9g45_config.cap_mask); | 1947 | dma_cap_set(DMA_MEMSET, at91sam9g45_config.cap_mask); |
1948 | dma_cap_set(DMA_MEMSET_SG, at91sam9g45_config.cap_mask); | ||
1854 | dma_cap_set(DMA_PRIVATE, at91sam9g45_config.cap_mask); | 1949 | dma_cap_set(DMA_PRIVATE, at91sam9g45_config.cap_mask); |
1855 | dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask); | 1950 | dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask); |
1856 | dma_cap_set(DMA_SG, at91sam9g45_config.cap_mask); | 1951 | dma_cap_set(DMA_SG, at91sam9g45_config.cap_mask); |
@@ -1972,6 +2067,7 @@ static int __init at_dma_probe(struct platform_device *pdev) | |||
1972 | 2067 | ||
1973 | if (dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask)) { | 2068 | if (dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask)) { |
1974 | atdma->dma_common.device_prep_dma_memset = atc_prep_dma_memset; | 2069 | atdma->dma_common.device_prep_dma_memset = atc_prep_dma_memset; |
2070 | atdma->dma_common.device_prep_dma_memset_sg = atc_prep_dma_memset_sg; | ||
1975 | atdma->dma_common.fill_align = DMAENGINE_ALIGN_4_BYTES; | 2071 | atdma->dma_common.fill_align = DMAENGINE_ALIGN_4_BYTES; |
1976 | } | 2072 | } |
1977 | 2073 | ||
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index c3bebbe899ac..d1cfc8c876f9 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h | |||
@@ -202,7 +202,7 @@ struct at_desc { | |||
202 | size_t src_hole; | 202 | size_t src_hole; |
203 | 203 | ||
204 | /* Memset temporary buffer */ | 204 | /* Memset temporary buffer */ |
205 | bool memset; | 205 | bool memset_buffer; |
206 | dma_addr_t memset_paddr; | 206 | dma_addr_t memset_paddr; |
207 | int *memset_vaddr; | 207 | int *memset_vaddr; |
208 | }; | 208 | }; |
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index dd24375b76dd..b5e132d4bae5 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c | |||
@@ -938,13 +938,19 @@ at_xdmac_prep_interleaved(struct dma_chan *chan, | |||
938 | { | 938 | { |
939 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); | 939 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); |
940 | struct at_xdmac_desc *prev = NULL, *first = NULL; | 940 | struct at_xdmac_desc *prev = NULL, *first = NULL; |
941 | struct data_chunk *chunk, *prev_chunk = NULL; | ||
942 | dma_addr_t dst_addr, src_addr; | 941 | dma_addr_t dst_addr, src_addr; |
943 | size_t dst_skip, src_skip, len = 0; | 942 | size_t src_skip = 0, dst_skip = 0, len = 0; |
944 | size_t prev_dst_icg = 0, prev_src_icg = 0; | 943 | struct data_chunk *chunk; |
945 | int i; | 944 | int i; |
946 | 945 | ||
947 | if (!xt || (xt->numf != 1) || (xt->dir != DMA_MEM_TO_MEM)) | 946 | if (!xt || !xt->numf || (xt->dir != DMA_MEM_TO_MEM)) |
947 | return NULL; | ||
948 | |||
949 | /* | ||
950 | * TODO: Handle the case where we have to repeat a chain of | ||
951 | * descriptors... | ||
952 | */ | ||
953 | if ((xt->numf > 1) && (xt->frame_size > 1)) | ||
948 | return NULL; | 954 | return NULL; |
949 | 955 | ||
950 | dev_dbg(chan2dev(chan), "%s: src=0x%08x, dest=0x%08x, numf=%d, frame_size=%d, flags=0x%lx\n", | 956 | dev_dbg(chan2dev(chan), "%s: src=0x%08x, dest=0x%08x, numf=%d, frame_size=%d, flags=0x%lx\n", |
@@ -954,66 +960,60 @@ at_xdmac_prep_interleaved(struct dma_chan *chan, | |||
954 | src_addr = xt->src_start; | 960 | src_addr = xt->src_start; |
955 | dst_addr = xt->dst_start; | 961 | dst_addr = xt->dst_start; |
956 | 962 | ||
957 | for (i = 0; i < xt->frame_size; i++) { | 963 | if (xt->numf > 1) { |
958 | struct at_xdmac_desc *desc; | 964 | first = at_xdmac_interleaved_queue_desc(chan, atchan, |
959 | size_t src_icg, dst_icg; | 965 | NULL, |
966 | src_addr, dst_addr, | ||
967 | xt, xt->sgl); | ||
968 | for (i = 0; i < xt->numf; i++) | ||
969 | at_xdmac_increment_block_count(chan, first); | ||
960 | 970 | ||
961 | chunk = xt->sgl + i; | 971 | dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n", |
972 | __func__, first, first); | ||
973 | list_add_tail(&first->desc_node, &first->descs_list); | ||
974 | } else { | ||
975 | for (i = 0; i < xt->frame_size; i++) { | ||
976 | size_t src_icg = 0, dst_icg = 0; | ||
977 | struct at_xdmac_desc *desc; | ||
962 | 978 | ||
963 | dst_icg = dmaengine_get_dst_icg(xt, chunk); | 979 | chunk = xt->sgl + i; |
964 | src_icg = dmaengine_get_src_icg(xt, chunk); | ||
965 | 980 | ||
966 | src_skip = chunk->size + src_icg; | 981 | dst_icg = dmaengine_get_dst_icg(xt, chunk); |
967 | dst_skip = chunk->size + dst_icg; | 982 | src_icg = dmaengine_get_src_icg(xt, chunk); |
968 | 983 | ||
969 | dev_dbg(chan2dev(chan), | 984 | src_skip = chunk->size + src_icg; |
970 | "%s: chunk size=%d, src icg=%d, dst icg=%d\n", | 985 | dst_skip = chunk->size + dst_icg; |
971 | __func__, chunk->size, src_icg, dst_icg); | ||
972 | 986 | ||
973 | /* | ||
974 | * Handle the case where we just have the same | ||
975 | * transfer to setup, we can just increase the | ||
976 | * block number and reuse the same descriptor. | ||
977 | */ | ||
978 | if (prev_chunk && prev && | ||
979 | (prev_chunk->size == chunk->size) && | ||
980 | (prev_src_icg == src_icg) && | ||
981 | (prev_dst_icg == dst_icg)) { | ||
982 | dev_dbg(chan2dev(chan), | 987 | dev_dbg(chan2dev(chan), |
983 | "%s: same configuration that the previous chunk, merging the descriptors...\n", | 988 | "%s: chunk size=%d, src icg=%d, dst icg=%d\n", |
984 | __func__); | 989 | __func__, chunk->size, src_icg, dst_icg); |
985 | at_xdmac_increment_block_count(chan, prev); | 990 | |
986 | continue; | 991 | desc = at_xdmac_interleaved_queue_desc(chan, atchan, |
987 | } | 992 | prev, |
988 | 993 | src_addr, dst_addr, | |
989 | desc = at_xdmac_interleaved_queue_desc(chan, atchan, | 994 | xt, chunk); |
990 | prev, | 995 | if (!desc) { |
991 | src_addr, dst_addr, | 996 | list_splice_init(&first->descs_list, |
992 | xt, chunk); | 997 | &atchan->free_descs_list); |
993 | if (!desc) { | 998 | return NULL; |
994 | list_splice_init(&first->descs_list, | 999 | } |
995 | &atchan->free_descs_list); | ||
996 | return NULL; | ||
997 | } | ||
998 | 1000 | ||
999 | if (!first) | 1001 | if (!first) |
1000 | first = desc; | 1002 | first = desc; |
1001 | 1003 | ||
1002 | dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n", | 1004 | dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n", |
1003 | __func__, desc, first); | 1005 | __func__, desc, first); |
1004 | list_add_tail(&desc->desc_node, &first->descs_list); | 1006 | list_add_tail(&desc->desc_node, &first->descs_list); |
1005 | 1007 | ||
1006 | if (xt->src_sgl) | 1008 | if (xt->src_sgl) |
1007 | src_addr += src_skip; | 1009 | src_addr += src_skip; |
1008 | 1010 | ||
1009 | if (xt->dst_sgl) | 1011 | if (xt->dst_sgl) |
1010 | dst_addr += dst_skip; | 1012 | dst_addr += dst_skip; |
1011 | 1013 | ||
1012 | len += chunk->size; | 1014 | len += chunk->size; |
1013 | prev_chunk = chunk; | 1015 | prev = desc; |
1014 | prev_dst_icg = dst_icg; | 1016 | } |
1015 | prev_src_icg = src_icg; | ||
1016 | prev = desc; | ||
1017 | } | 1017 | } |
1018 | 1018 | ||
1019 | first->tx_dma_desc.cookie = -EBUSY; | 1019 | first->tx_dma_desc.cookie = -EBUSY; |
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 09479d4be4db..3ecec1445adf 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -1074,11 +1074,9 @@ static void dmaengine_destroy_unmap_pool(void) | |||
1074 | for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) { | 1074 | for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) { |
1075 | struct dmaengine_unmap_pool *p = &unmap_pool[i]; | 1075 | struct dmaengine_unmap_pool *p = &unmap_pool[i]; |
1076 | 1076 | ||
1077 | if (p->pool) | 1077 | mempool_destroy(p->pool); |
1078 | mempool_destroy(p->pool); | ||
1079 | p->pool = NULL; | 1078 | p->pool = NULL; |
1080 | if (p->cache) | 1079 | kmem_cache_destroy(p->cache); |
1081 | kmem_cache_destroy(p->cache); | ||
1082 | p->cache = NULL; | 1080 | p->cache = NULL; |
1083 | } | 1081 | } |
1084 | } | 1082 | } |
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index bedce038c6e2..7067b6ddc1db 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c | |||
@@ -163,7 +163,7 @@ static void dwc_initialize(struct dw_dma_chan *dwc) | |||
163 | 163 | ||
164 | /*----------------------------------------------------------------------*/ | 164 | /*----------------------------------------------------------------------*/ |
165 | 165 | ||
166 | static inline unsigned int dwc_fast_fls(unsigned long long v) | 166 | static inline unsigned int dwc_fast_ffs(unsigned long long v) |
167 | { | 167 | { |
168 | /* | 168 | /* |
169 | * We can be a lot more clever here, but this should take care | 169 | * We can be a lot more clever here, but this should take care |
@@ -712,7 +712,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
712 | dw->data_width[dwc->dst_master]); | 712 | dw->data_width[dwc->dst_master]); |
713 | 713 | ||
714 | src_width = dst_width = min_t(unsigned int, data_width, | 714 | src_width = dst_width = min_t(unsigned int, data_width, |
715 | dwc_fast_fls(src | dest | len)); | 715 | dwc_fast_ffs(src | dest | len)); |
716 | 716 | ||
717 | ctllo = DWC_DEFAULT_CTLLO(chan) | 717 | ctllo = DWC_DEFAULT_CTLLO(chan) |
718 | | DWC_CTLL_DST_WIDTH(dst_width) | 718 | | DWC_CTLL_DST_WIDTH(dst_width) |
@@ -791,7 +791,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
791 | 791 | ||
792 | switch (direction) { | 792 | switch (direction) { |
793 | case DMA_MEM_TO_DEV: | 793 | case DMA_MEM_TO_DEV: |
794 | reg_width = __fls(sconfig->dst_addr_width); | 794 | reg_width = __ffs(sconfig->dst_addr_width); |
795 | reg = sconfig->dst_addr; | 795 | reg = sconfig->dst_addr; |
796 | ctllo = (DWC_DEFAULT_CTLLO(chan) | 796 | ctllo = (DWC_DEFAULT_CTLLO(chan) |
797 | | DWC_CTLL_DST_WIDTH(reg_width) | 797 | | DWC_CTLL_DST_WIDTH(reg_width) |
@@ -811,7 +811,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
811 | len = sg_dma_len(sg); | 811 | len = sg_dma_len(sg); |
812 | 812 | ||
813 | mem_width = min_t(unsigned int, | 813 | mem_width = min_t(unsigned int, |
814 | data_width, dwc_fast_fls(mem | len)); | 814 | data_width, dwc_fast_ffs(mem | len)); |
815 | 815 | ||
816 | slave_sg_todev_fill_desc: | 816 | slave_sg_todev_fill_desc: |
817 | desc = dwc_desc_get(dwc); | 817 | desc = dwc_desc_get(dwc); |
@@ -848,7 +848,7 @@ slave_sg_todev_fill_desc: | |||
848 | } | 848 | } |
849 | break; | 849 | break; |
850 | case DMA_DEV_TO_MEM: | 850 | case DMA_DEV_TO_MEM: |
851 | reg_width = __fls(sconfig->src_addr_width); | 851 | reg_width = __ffs(sconfig->src_addr_width); |
852 | reg = sconfig->src_addr; | 852 | reg = sconfig->src_addr; |
853 | ctllo = (DWC_DEFAULT_CTLLO(chan) | 853 | ctllo = (DWC_DEFAULT_CTLLO(chan) |
854 | | DWC_CTLL_SRC_WIDTH(reg_width) | 854 | | DWC_CTLL_SRC_WIDTH(reg_width) |
@@ -868,7 +868,7 @@ slave_sg_todev_fill_desc: | |||
868 | len = sg_dma_len(sg); | 868 | len = sg_dma_len(sg); |
869 | 869 | ||
870 | mem_width = min_t(unsigned int, | 870 | mem_width = min_t(unsigned int, |
871 | data_width, dwc_fast_fls(mem | len)); | 871 | data_width, dwc_fast_ffs(mem | len)); |
872 | 872 | ||
873 | slave_sg_fromdev_fill_desc: | 873 | slave_sg_fromdev_fill_desc: |
874 | desc = dwc_desc_get(dwc); | 874 | desc = dwc_desc_get(dwc); |
@@ -1499,9 +1499,8 @@ EXPORT_SYMBOL(dw_dma_cyclic_free); | |||
1499 | int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) | 1499 | int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) |
1500 | { | 1500 | { |
1501 | struct dw_dma *dw; | 1501 | struct dw_dma *dw; |
1502 | bool autocfg; | 1502 | bool autocfg = false; |
1503 | unsigned int dw_params; | 1503 | unsigned int dw_params; |
1504 | unsigned int nr_channels; | ||
1505 | unsigned int max_blk_size = 0; | 1504 | unsigned int max_blk_size = 0; |
1506 | int err; | 1505 | int err; |
1507 | int i; | 1506 | int i; |
@@ -1515,33 +1514,42 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) | |||
1515 | 1514 | ||
1516 | pm_runtime_get_sync(chip->dev); | 1515 | pm_runtime_get_sync(chip->dev); |
1517 | 1516 | ||
1518 | dw_params = dma_read_byaddr(chip->regs, DW_PARAMS); | 1517 | if (!pdata) { |
1519 | autocfg = dw_params >> DW_PARAMS_EN & 0x1; | 1518 | dw_params = dma_read_byaddr(chip->regs, DW_PARAMS); |
1519 | dev_dbg(chip->dev, "DW_PARAMS: 0x%08x\n", dw_params); | ||
1520 | 1520 | ||
1521 | dev_dbg(chip->dev, "DW_PARAMS: 0x%08x\n", dw_params); | 1521 | autocfg = dw_params >> DW_PARAMS_EN & 1; |
1522 | if (!autocfg) { | ||
1523 | err = -EINVAL; | ||
1524 | goto err_pdata; | ||
1525 | } | ||
1522 | 1526 | ||
1523 | if (!pdata && autocfg) { | ||
1524 | pdata = devm_kzalloc(chip->dev, sizeof(*pdata), GFP_KERNEL); | 1527 | pdata = devm_kzalloc(chip->dev, sizeof(*pdata), GFP_KERNEL); |
1525 | if (!pdata) { | 1528 | if (!pdata) { |
1526 | err = -ENOMEM; | 1529 | err = -ENOMEM; |
1527 | goto err_pdata; | 1530 | goto err_pdata; |
1528 | } | 1531 | } |
1529 | 1532 | ||
1533 | /* Get hardware configuration parameters */ | ||
1534 | pdata->nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 7) + 1; | ||
1535 | pdata->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1; | ||
1536 | for (i = 0; i < pdata->nr_masters; i++) { | ||
1537 | pdata->data_width[i] = | ||
1538 | (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3) + 2; | ||
1539 | } | ||
1540 | max_blk_size = dma_readl(dw, MAX_BLK_SIZE); | ||
1541 | |||
1530 | /* Fill platform data with the default values */ | 1542 | /* Fill platform data with the default values */ |
1531 | pdata->is_private = true; | 1543 | pdata->is_private = true; |
1544 | pdata->is_memcpy = true; | ||
1532 | pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING; | 1545 | pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING; |
1533 | pdata->chan_priority = CHAN_PRIORITY_ASCENDING; | 1546 | pdata->chan_priority = CHAN_PRIORITY_ASCENDING; |
1534 | } else if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) { | 1547 | } else if (pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) { |
1535 | err = -EINVAL; | 1548 | err = -EINVAL; |
1536 | goto err_pdata; | 1549 | goto err_pdata; |
1537 | } | 1550 | } |
1538 | 1551 | ||
1539 | if (autocfg) | 1552 | dw->chan = devm_kcalloc(chip->dev, pdata->nr_channels, sizeof(*dw->chan), |
1540 | nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 0x7) + 1; | ||
1541 | else | ||
1542 | nr_channels = pdata->nr_channels; | ||
1543 | |||
1544 | dw->chan = devm_kcalloc(chip->dev, nr_channels, sizeof(*dw->chan), | ||
1545 | GFP_KERNEL); | 1553 | GFP_KERNEL); |
1546 | if (!dw->chan) { | 1554 | if (!dw->chan) { |
1547 | err = -ENOMEM; | 1555 | err = -ENOMEM; |
@@ -1549,22 +1557,12 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) | |||
1549 | } | 1557 | } |
1550 | 1558 | ||
1551 | /* Get hardware configuration parameters */ | 1559 | /* Get hardware configuration parameters */ |
1552 | if (autocfg) { | 1560 | dw->nr_masters = pdata->nr_masters; |
1553 | max_blk_size = dma_readl(dw, MAX_BLK_SIZE); | 1561 | for (i = 0; i < dw->nr_masters; i++) |
1554 | 1562 | dw->data_width[i] = pdata->data_width[i]; | |
1555 | dw->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1; | ||
1556 | for (i = 0; i < dw->nr_masters; i++) { | ||
1557 | dw->data_width[i] = | ||
1558 | (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3) + 2; | ||
1559 | } | ||
1560 | } else { | ||
1561 | dw->nr_masters = pdata->nr_masters; | ||
1562 | for (i = 0; i < dw->nr_masters; i++) | ||
1563 | dw->data_width[i] = pdata->data_width[i]; | ||
1564 | } | ||
1565 | 1563 | ||
1566 | /* Calculate all channel mask before DMA setup */ | 1564 | /* Calculate all channel mask before DMA setup */ |
1567 | dw->all_chan_mask = (1 << nr_channels) - 1; | 1565 | dw->all_chan_mask = (1 << pdata->nr_channels) - 1; |
1568 | 1566 | ||
1569 | /* Force dma off, just in case */ | 1567 | /* Force dma off, just in case */ |
1570 | dw_dma_off(dw); | 1568 | dw_dma_off(dw); |
@@ -1589,7 +1587,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) | |||
1589 | goto err_pdata; | 1587 | goto err_pdata; |
1590 | 1588 | ||
1591 | INIT_LIST_HEAD(&dw->dma.channels); | 1589 | INIT_LIST_HEAD(&dw->dma.channels); |
1592 | for (i = 0; i < nr_channels; i++) { | 1590 | for (i = 0; i < pdata->nr_channels; i++) { |
1593 | struct dw_dma_chan *dwc = &dw->chan[i]; | 1591 | struct dw_dma_chan *dwc = &dw->chan[i]; |
1594 | 1592 | ||
1595 | dwc->chan.device = &dw->dma; | 1593 | dwc->chan.device = &dw->dma; |
@@ -1602,7 +1600,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) | |||
1602 | 1600 | ||
1603 | /* 7 is highest priority & 0 is lowest. */ | 1601 | /* 7 is highest priority & 0 is lowest. */ |
1604 | if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) | 1602 | if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) |
1605 | dwc->priority = nr_channels - i - 1; | 1603 | dwc->priority = pdata->nr_channels - i - 1; |
1606 | else | 1604 | else |
1607 | dwc->priority = i; | 1605 | dwc->priority = i; |
1608 | 1606 | ||
@@ -1656,10 +1654,13 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) | |||
1656 | dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask); | 1654 | dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask); |
1657 | dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask); | 1655 | dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask); |
1658 | 1656 | ||
1659 | dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); | 1657 | /* Set capabilities */ |
1660 | dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); | 1658 | dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); |
1661 | if (pdata->is_private) | 1659 | if (pdata->is_private) |
1662 | dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask); | 1660 | dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask); |
1661 | if (pdata->is_memcpy) | ||
1662 | dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); | ||
1663 | |||
1663 | dw->dma.dev = chip->dev; | 1664 | dw->dma.dev = chip->dev; |
1664 | dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources; | 1665 | dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources; |
1665 | dw->dma.device_free_chan_resources = dwc_free_chan_resources; | 1666 | dw->dma.device_free_chan_resources = dwc_free_chan_resources; |
@@ -1687,7 +1688,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) | |||
1687 | goto err_dma_register; | 1688 | goto err_dma_register; |
1688 | 1689 | ||
1689 | dev_info(chip->dev, "DesignWare DMA Controller, %d channels\n", | 1690 | dev_info(chip->dev, "DesignWare DMA Controller, %d channels\n", |
1690 | nr_channels); | 1691 | pdata->nr_channels); |
1691 | 1692 | ||
1692 | pm_runtime_put_sync_suspend(chip->dev); | 1693 | pm_runtime_put_sync_suspend(chip->dev); |
1693 | 1694 | ||
diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c index b144706b3d85..4c30fdd092b3 100644 --- a/drivers/dma/dw/pci.c +++ b/drivers/dma/dw/pci.c | |||
@@ -15,12 +15,6 @@ | |||
15 | 15 | ||
16 | #include "internal.h" | 16 | #include "internal.h" |
17 | 17 | ||
18 | static struct dw_dma_platform_data dw_pci_pdata = { | ||
19 | .is_private = 1, | ||
20 | .chan_allocation_order = CHAN_ALLOCATION_ASCENDING, | ||
21 | .chan_priority = CHAN_PRIORITY_ASCENDING, | ||
22 | }; | ||
23 | |||
24 | static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) | 18 | static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) |
25 | { | 19 | { |
26 | struct dw_dma_chip *chip; | 20 | struct dw_dma_chip *chip; |
@@ -101,19 +95,19 @@ static const struct dev_pm_ops dw_pci_dev_pm_ops = { | |||
101 | 95 | ||
102 | static const struct pci_device_id dw_pci_id_table[] = { | 96 | static const struct pci_device_id dw_pci_id_table[] = { |
103 | /* Medfield */ | 97 | /* Medfield */ |
104 | { PCI_VDEVICE(INTEL, 0x0827), (kernel_ulong_t)&dw_pci_pdata }, | 98 | { PCI_VDEVICE(INTEL, 0x0827) }, |
105 | { PCI_VDEVICE(INTEL, 0x0830), (kernel_ulong_t)&dw_pci_pdata }, | 99 | { PCI_VDEVICE(INTEL, 0x0830) }, |
106 | 100 | ||
107 | /* BayTrail */ | 101 | /* BayTrail */ |
108 | { PCI_VDEVICE(INTEL, 0x0f06), (kernel_ulong_t)&dw_pci_pdata }, | 102 | { PCI_VDEVICE(INTEL, 0x0f06) }, |
109 | { PCI_VDEVICE(INTEL, 0x0f40), (kernel_ulong_t)&dw_pci_pdata }, | 103 | { PCI_VDEVICE(INTEL, 0x0f40) }, |
110 | 104 | ||
111 | /* Braswell */ | 105 | /* Braswell */ |
112 | { PCI_VDEVICE(INTEL, 0x2286), (kernel_ulong_t)&dw_pci_pdata }, | 106 | { PCI_VDEVICE(INTEL, 0x2286) }, |
113 | { PCI_VDEVICE(INTEL, 0x22c0), (kernel_ulong_t)&dw_pci_pdata }, | 107 | { PCI_VDEVICE(INTEL, 0x22c0) }, |
114 | 108 | ||
115 | /* Haswell */ | 109 | /* Haswell */ |
116 | { PCI_VDEVICE(INTEL, 0x9c60), (kernel_ulong_t)&dw_pci_pdata }, | 110 | { PCI_VDEVICE(INTEL, 0x9c60) }, |
117 | { } | 111 | { } |
118 | }; | 112 | }; |
119 | MODULE_DEVICE_TABLE(pci, dw_pci_id_table); | 113 | MODULE_DEVICE_TABLE(pci, dw_pci_id_table); |
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c index b2c3ae071429..68a4815750b5 100644 --- a/drivers/dma/dw/platform.c +++ b/drivers/dma/dw/platform.c | |||
@@ -155,6 +155,7 @@ static int dw_probe(struct platform_device *pdev) | |||
155 | struct dw_dma_chip *chip; | 155 | struct dw_dma_chip *chip; |
156 | struct device *dev = &pdev->dev; | 156 | struct device *dev = &pdev->dev; |
157 | struct resource *mem; | 157 | struct resource *mem; |
158 | const struct acpi_device_id *id; | ||
158 | struct dw_dma_platform_data *pdata; | 159 | struct dw_dma_platform_data *pdata; |
159 | int err; | 160 | int err; |
160 | 161 | ||
@@ -178,6 +179,11 @@ static int dw_probe(struct platform_device *pdev) | |||
178 | pdata = dev_get_platdata(dev); | 179 | pdata = dev_get_platdata(dev); |
179 | if (!pdata) | 180 | if (!pdata) |
180 | pdata = dw_dma_parse_dt(pdev); | 181 | pdata = dw_dma_parse_dt(pdev); |
182 | if (!pdata && has_acpi_companion(dev)) { | ||
183 | id = acpi_match_device(dev->driver->acpi_match_table, dev); | ||
184 | if (id) | ||
185 | pdata = (struct dw_dma_platform_data *)id->driver_data; | ||
186 | } | ||
181 | 187 | ||
182 | chip->dev = dev; | 188 | chip->dev = dev; |
183 | 189 | ||
@@ -246,8 +252,17 @@ MODULE_DEVICE_TABLE(of, dw_dma_of_id_table); | |||
246 | #endif | 252 | #endif |
247 | 253 | ||
248 | #ifdef CONFIG_ACPI | 254 | #ifdef CONFIG_ACPI |
255 | static struct dw_dma_platform_data dw_dma_acpi_pdata = { | ||
256 | .nr_channels = 8, | ||
257 | .is_private = true, | ||
258 | .chan_allocation_order = CHAN_ALLOCATION_ASCENDING, | ||
259 | .chan_priority = CHAN_PRIORITY_ASCENDING, | ||
260 | .block_size = 4095, | ||
261 | .nr_masters = 2, | ||
262 | }; | ||
263 | |||
249 | static const struct acpi_device_id dw_dma_acpi_id_table[] = { | 264 | static const struct acpi_device_id dw_dma_acpi_id_table[] = { |
250 | { "INTL9C60", 0 }, | 265 | { "INTL9C60", (kernel_ulong_t)&dw_dma_acpi_pdata }, |
251 | { } | 266 | { } |
252 | }; | 267 | }; |
253 | MODULE_DEVICE_TABLE(acpi, dw_dma_acpi_id_table); | 268 | MODULE_DEVICE_TABLE(acpi, dw_dma_acpi_id_table); |
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 3e5d4f193005..6b03e4e84e6b 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c | |||
@@ -25,28 +25,93 @@ | |||
25 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
26 | #include <linux/spinlock.h> | 26 | #include <linux/spinlock.h> |
27 | #include <linux/of.h> | 27 | #include <linux/of.h> |
28 | #include <linux/of_dma.h> | ||
29 | #include <linux/of_irq.h> | ||
30 | #include <linux/of_address.h> | ||
31 | #include <linux/of_device.h> | ||
32 | #include <linux/pm_runtime.h> | ||
28 | 33 | ||
29 | #include <linux/platform_data/edma.h> | 34 | #include <linux/platform_data/edma.h> |
30 | 35 | ||
31 | #include "dmaengine.h" | 36 | #include "dmaengine.h" |
32 | #include "virt-dma.h" | 37 | #include "virt-dma.h" |
33 | 38 | ||
34 | /* | 39 | /* Offsets matching "struct edmacc_param" */ |
35 | * This will go away when the private EDMA API is folded | 40 | #define PARM_OPT 0x00 |
36 | * into this driver and the platform device(s) are | 41 | #define PARM_SRC 0x04 |
37 | * instantiated in the arch code. We can only get away | 42 | #define PARM_A_B_CNT 0x08 |
38 | * with this simplification because DA8XX may not be built | 43 | #define PARM_DST 0x0c |
39 | * in the same kernel image with other DaVinci parts. This | 44 | #define PARM_SRC_DST_BIDX 0x10 |
40 | * avoids having to sprinkle dmaengine driver platform devices | 45 | #define PARM_LINK_BCNTRLD 0x14 |
41 | * and data throughout all the existing board files. | 46 | #define PARM_SRC_DST_CIDX 0x18 |
42 | */ | 47 | #define PARM_CCNT 0x1c |
43 | #ifdef CONFIG_ARCH_DAVINCI_DA8XX | 48 | |
44 | #define EDMA_CTLRS 2 | 49 | #define PARM_SIZE 0x20 |
45 | #define EDMA_CHANS 32 | 50 | |
46 | #else | 51 | /* Offsets for EDMA CC global channel registers and their shadows */ |
47 | #define EDMA_CTLRS 1 | 52 | #define SH_ER 0x00 /* 64 bits */ |
48 | #define EDMA_CHANS 64 | 53 | #define SH_ECR 0x08 /* 64 bits */ |
49 | #endif /* CONFIG_ARCH_DAVINCI_DA8XX */ | 54 | #define SH_ESR 0x10 /* 64 bits */ |
55 | #define SH_CER 0x18 /* 64 bits */ | ||
56 | #define SH_EER 0x20 /* 64 bits */ | ||
57 | #define SH_EECR 0x28 /* 64 bits */ | ||
58 | #define SH_EESR 0x30 /* 64 bits */ | ||
59 | #define SH_SER 0x38 /* 64 bits */ | ||
60 | #define SH_SECR 0x40 /* 64 bits */ | ||
61 | #define SH_IER 0x50 /* 64 bits */ | ||
62 | #define SH_IECR 0x58 /* 64 bits */ | ||
63 | #define SH_IESR 0x60 /* 64 bits */ | ||
64 | #define SH_IPR 0x68 /* 64 bits */ | ||
65 | #define SH_ICR 0x70 /* 64 bits */ | ||
66 | #define SH_IEVAL 0x78 | ||
67 | #define SH_QER 0x80 | ||
68 | #define SH_QEER 0x84 | ||
69 | #define SH_QEECR 0x88 | ||
70 | #define SH_QEESR 0x8c | ||
71 | #define SH_QSER 0x90 | ||
72 | #define SH_QSECR 0x94 | ||
73 | #define SH_SIZE 0x200 | ||
74 | |||
75 | /* Offsets for EDMA CC global registers */ | ||
76 | #define EDMA_REV 0x0000 | ||
77 | #define EDMA_CCCFG 0x0004 | ||
78 | #define EDMA_QCHMAP 0x0200 /* 8 registers */ | ||
79 | #define EDMA_DMAQNUM 0x0240 /* 8 registers (4 on OMAP-L1xx) */ | ||
80 | #define EDMA_QDMAQNUM 0x0260 | ||
81 | #define EDMA_QUETCMAP 0x0280 | ||
82 | #define EDMA_QUEPRI 0x0284 | ||
83 | #define EDMA_EMR 0x0300 /* 64 bits */ | ||
84 | #define EDMA_EMCR 0x0308 /* 64 bits */ | ||
85 | #define EDMA_QEMR 0x0310 | ||
86 | #define EDMA_QEMCR 0x0314 | ||
87 | #define EDMA_CCERR 0x0318 | ||
88 | #define EDMA_CCERRCLR 0x031c | ||
89 | #define EDMA_EEVAL 0x0320 | ||
90 | #define EDMA_DRAE 0x0340 /* 4 x 64 bits*/ | ||
91 | #define EDMA_QRAE 0x0380 /* 4 registers */ | ||
92 | #define EDMA_QUEEVTENTRY 0x0400 /* 2 x 16 registers */ | ||
93 | #define EDMA_QSTAT 0x0600 /* 2 registers */ | ||
94 | #define EDMA_QWMTHRA 0x0620 | ||
95 | #define EDMA_QWMTHRB 0x0624 | ||
96 | #define EDMA_CCSTAT 0x0640 | ||
97 | |||
98 | #define EDMA_M 0x1000 /* global channel registers */ | ||
99 | #define EDMA_ECR 0x1008 | ||
100 | #define EDMA_ECRH 0x100C | ||
101 | #define EDMA_SHADOW0 0x2000 /* 4 shadow regions */ | ||
102 | #define EDMA_PARM 0x4000 /* PaRAM entries */ | ||
103 | |||
104 | #define PARM_OFFSET(param_no) (EDMA_PARM + ((param_no) << 5)) | ||
105 | |||
106 | #define EDMA_DCHMAP 0x0100 /* 64 registers */ | ||
107 | |||
108 | /* CCCFG register */ | ||
109 | #define GET_NUM_DMACH(x) (x & 0x7) /* bits 0-2 */ | ||
110 | #define GET_NUM_QDMACH(x) (x & 0x70 >> 4) /* bits 4-6 */ | ||
111 | #define GET_NUM_PAENTRY(x) ((x & 0x7000) >> 12) /* bits 12-14 */ | ||
112 | #define GET_NUM_EVQUE(x) ((x & 0x70000) >> 16) /* bits 16-18 */ | ||
113 | #define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */ | ||
114 | #define CHMAP_EXIST BIT(24) | ||
50 | 115 | ||
51 | /* | 116 | /* |
52 | * Max of 20 segments per channel to conserve PaRAM slots | 117 | * Max of 20 segments per channel to conserve PaRAM slots |
@@ -59,6 +124,37 @@ | |||
59 | #define EDMA_MAX_SLOTS MAX_NR_SG | 124 | #define EDMA_MAX_SLOTS MAX_NR_SG |
60 | #define EDMA_DESCRIPTORS 16 | 125 | #define EDMA_DESCRIPTORS 16 |
61 | 126 | ||
127 | #define EDMA_CHANNEL_ANY -1 /* for edma_alloc_channel() */ | ||
128 | #define EDMA_SLOT_ANY -1 /* for edma_alloc_slot() */ | ||
129 | #define EDMA_CONT_PARAMS_ANY 1001 | ||
130 | #define EDMA_CONT_PARAMS_FIXED_EXACT 1002 | ||
131 | #define EDMA_CONT_PARAMS_FIXED_NOT_EXACT 1003 | ||
132 | |||
133 | /* PaRAM slots are laid out like this */ | ||
134 | struct edmacc_param { | ||
135 | u32 opt; | ||
136 | u32 src; | ||
137 | u32 a_b_cnt; | ||
138 | u32 dst; | ||
139 | u32 src_dst_bidx; | ||
140 | u32 link_bcntrld; | ||
141 | u32 src_dst_cidx; | ||
142 | u32 ccnt; | ||
143 | } __packed; | ||
144 | |||
145 | /* fields in edmacc_param.opt */ | ||
146 | #define SAM BIT(0) | ||
147 | #define DAM BIT(1) | ||
148 | #define SYNCDIM BIT(2) | ||
149 | #define STATIC BIT(3) | ||
150 | #define EDMA_FWID (0x07 << 8) | ||
151 | #define TCCMODE BIT(11) | ||
152 | #define EDMA_TCC(t) ((t) << 12) | ||
153 | #define TCINTEN BIT(20) | ||
154 | #define ITCINTEN BIT(21) | ||
155 | #define TCCHEN BIT(22) | ||
156 | #define ITCCHEN BIT(23) | ||
157 | |||
62 | struct edma_pset { | 158 | struct edma_pset { |
63 | u32 len; | 159 | u32 len; |
64 | dma_addr_t addr; | 160 | dma_addr_t addr; |
@@ -105,26 +201,524 @@ struct edma_desc { | |||
105 | 201 | ||
106 | struct edma_cc; | 202 | struct edma_cc; |
107 | 203 | ||
204 | struct edma_tc { | ||
205 | struct device_node *node; | ||
206 | u16 id; | ||
207 | }; | ||
208 | |||
108 | struct edma_chan { | 209 | struct edma_chan { |
109 | struct virt_dma_chan vchan; | 210 | struct virt_dma_chan vchan; |
110 | struct list_head node; | 211 | struct list_head node; |
111 | struct edma_desc *edesc; | 212 | struct edma_desc *edesc; |
112 | struct edma_cc *ecc; | 213 | struct edma_cc *ecc; |
214 | struct edma_tc *tc; | ||
113 | int ch_num; | 215 | int ch_num; |
114 | bool alloced; | 216 | bool alloced; |
217 | bool hw_triggered; | ||
115 | int slot[EDMA_MAX_SLOTS]; | 218 | int slot[EDMA_MAX_SLOTS]; |
116 | int missed; | 219 | int missed; |
117 | struct dma_slave_config cfg; | 220 | struct dma_slave_config cfg; |
118 | }; | 221 | }; |
119 | 222 | ||
120 | struct edma_cc { | 223 | struct edma_cc { |
121 | int ctlr; | 224 | struct device *dev; |
225 | struct edma_soc_info *info; | ||
226 | void __iomem *base; | ||
227 | int id; | ||
228 | bool legacy_mode; | ||
229 | |||
230 | /* eDMA3 resource information */ | ||
231 | unsigned num_channels; | ||
232 | unsigned num_qchannels; | ||
233 | unsigned num_region; | ||
234 | unsigned num_slots; | ||
235 | unsigned num_tc; | ||
236 | bool chmap_exist; | ||
237 | enum dma_event_q default_queue; | ||
238 | |||
239 | /* | ||
240 | * The slot_inuse bit for each PaRAM slot is clear unless the slot is | ||
241 | * in use by Linux or if it is allocated to be used by DSP. | ||
242 | */ | ||
243 | unsigned long *slot_inuse; | ||
244 | |||
122 | struct dma_device dma_slave; | 245 | struct dma_device dma_slave; |
123 | struct edma_chan slave_chans[EDMA_CHANS]; | 246 | struct dma_device *dma_memcpy; |
124 | int num_slave_chans; | 247 | struct edma_chan *slave_chans; |
248 | struct edma_tc *tc_list; | ||
125 | int dummy_slot; | 249 | int dummy_slot; |
126 | }; | 250 | }; |
127 | 251 | ||
252 | /* dummy param set used to (re)initialize parameter RAM slots */ | ||
253 | static const struct edmacc_param dummy_paramset = { | ||
254 | .link_bcntrld = 0xffff, | ||
255 | .ccnt = 1, | ||
256 | }; | ||
257 | |||
258 | #define EDMA_BINDING_LEGACY 0 | ||
259 | #define EDMA_BINDING_TPCC 1 | ||
260 | static const struct of_device_id edma_of_ids[] = { | ||
261 | { | ||
262 | .compatible = "ti,edma3", | ||
263 | .data = (void *)EDMA_BINDING_LEGACY, | ||
264 | }, | ||
265 | { | ||
266 | .compatible = "ti,edma3-tpcc", | ||
267 | .data = (void *)EDMA_BINDING_TPCC, | ||
268 | }, | ||
269 | {} | ||
270 | }; | ||
271 | |||
272 | static const struct of_device_id edma_tptc_of_ids[] = { | ||
273 | { .compatible = "ti,edma3-tptc", }, | ||
274 | {} | ||
275 | }; | ||
276 | |||
277 | static inline unsigned int edma_read(struct edma_cc *ecc, int offset) | ||
278 | { | ||
279 | return (unsigned int)__raw_readl(ecc->base + offset); | ||
280 | } | ||
281 | |||
282 | static inline void edma_write(struct edma_cc *ecc, int offset, int val) | ||
283 | { | ||
284 | __raw_writel(val, ecc->base + offset); | ||
285 | } | ||
286 | |||
287 | static inline void edma_modify(struct edma_cc *ecc, int offset, unsigned and, | ||
288 | unsigned or) | ||
289 | { | ||
290 | unsigned val = edma_read(ecc, offset); | ||
291 | |||
292 | val &= and; | ||
293 | val |= or; | ||
294 | edma_write(ecc, offset, val); | ||
295 | } | ||
296 | |||
297 | static inline void edma_and(struct edma_cc *ecc, int offset, unsigned and) | ||
298 | { | ||
299 | unsigned val = edma_read(ecc, offset); | ||
300 | |||
301 | val &= and; | ||
302 | edma_write(ecc, offset, val); | ||
303 | } | ||
304 | |||
305 | static inline void edma_or(struct edma_cc *ecc, int offset, unsigned or) | ||
306 | { | ||
307 | unsigned val = edma_read(ecc, offset); | ||
308 | |||
309 | val |= or; | ||
310 | edma_write(ecc, offset, val); | ||
311 | } | ||
312 | |||
313 | static inline unsigned int edma_read_array(struct edma_cc *ecc, int offset, | ||
314 | int i) | ||
315 | { | ||
316 | return edma_read(ecc, offset + (i << 2)); | ||
317 | } | ||
318 | |||
319 | static inline void edma_write_array(struct edma_cc *ecc, int offset, int i, | ||
320 | unsigned val) | ||
321 | { | ||
322 | edma_write(ecc, offset + (i << 2), val); | ||
323 | } | ||
324 | |||
325 | static inline void edma_modify_array(struct edma_cc *ecc, int offset, int i, | ||
326 | unsigned and, unsigned or) | ||
327 | { | ||
328 | edma_modify(ecc, offset + (i << 2), and, or); | ||
329 | } | ||
330 | |||
331 | static inline void edma_or_array(struct edma_cc *ecc, int offset, int i, | ||
332 | unsigned or) | ||
333 | { | ||
334 | edma_or(ecc, offset + (i << 2), or); | ||
335 | } | ||
336 | |||
337 | static inline void edma_or_array2(struct edma_cc *ecc, int offset, int i, int j, | ||
338 | unsigned or) | ||
339 | { | ||
340 | edma_or(ecc, offset + ((i * 2 + j) << 2), or); | ||
341 | } | ||
342 | |||
343 | static inline void edma_write_array2(struct edma_cc *ecc, int offset, int i, | ||
344 | int j, unsigned val) | ||
345 | { | ||
346 | edma_write(ecc, offset + ((i * 2 + j) << 2), val); | ||
347 | } | ||
348 | |||
349 | static inline unsigned int edma_shadow0_read(struct edma_cc *ecc, int offset) | ||
350 | { | ||
351 | return edma_read(ecc, EDMA_SHADOW0 + offset); | ||
352 | } | ||
353 | |||
354 | static inline unsigned int edma_shadow0_read_array(struct edma_cc *ecc, | ||
355 | int offset, int i) | ||
356 | { | ||
357 | return edma_read(ecc, EDMA_SHADOW0 + offset + (i << 2)); | ||
358 | } | ||
359 | |||
360 | static inline void edma_shadow0_write(struct edma_cc *ecc, int offset, | ||
361 | unsigned val) | ||
362 | { | ||
363 | edma_write(ecc, EDMA_SHADOW0 + offset, val); | ||
364 | } | ||
365 | |||
366 | static inline void edma_shadow0_write_array(struct edma_cc *ecc, int offset, | ||
367 | int i, unsigned val) | ||
368 | { | ||
369 | edma_write(ecc, EDMA_SHADOW0 + offset + (i << 2), val); | ||
370 | } | ||
371 | |||
372 | static inline unsigned int edma_param_read(struct edma_cc *ecc, int offset, | ||
373 | int param_no) | ||
374 | { | ||
375 | return edma_read(ecc, EDMA_PARM + offset + (param_no << 5)); | ||
376 | } | ||
377 | |||
378 | static inline void edma_param_write(struct edma_cc *ecc, int offset, | ||
379 | int param_no, unsigned val) | ||
380 | { | ||
381 | edma_write(ecc, EDMA_PARM + offset + (param_no << 5), val); | ||
382 | } | ||
383 | |||
384 | static inline void edma_param_modify(struct edma_cc *ecc, int offset, | ||
385 | int param_no, unsigned and, unsigned or) | ||
386 | { | ||
387 | edma_modify(ecc, EDMA_PARM + offset + (param_no << 5), and, or); | ||
388 | } | ||
389 | |||
390 | static inline void edma_param_and(struct edma_cc *ecc, int offset, int param_no, | ||
391 | unsigned and) | ||
392 | { | ||
393 | edma_and(ecc, EDMA_PARM + offset + (param_no << 5), and); | ||
394 | } | ||
395 | |||
396 | static inline void edma_param_or(struct edma_cc *ecc, int offset, int param_no, | ||
397 | unsigned or) | ||
398 | { | ||
399 | edma_or(ecc, EDMA_PARM + offset + (param_no << 5), or); | ||
400 | } | ||
401 | |||
402 | static inline void set_bits(int offset, int len, unsigned long *p) | ||
403 | { | ||
404 | for (; len > 0; len--) | ||
405 | set_bit(offset + (len - 1), p); | ||
406 | } | ||
407 | |||
408 | static inline void clear_bits(int offset, int len, unsigned long *p) | ||
409 | { | ||
410 | for (; len > 0; len--) | ||
411 | clear_bit(offset + (len - 1), p); | ||
412 | } | ||
413 | |||
414 | static void edma_assign_priority_to_queue(struct edma_cc *ecc, int queue_no, | ||
415 | int priority) | ||
416 | { | ||
417 | int bit = queue_no * 4; | ||
418 | |||
419 | edma_modify(ecc, EDMA_QUEPRI, ~(0x7 << bit), ((priority & 0x7) << bit)); | ||
420 | } | ||
421 | |||
422 | static void edma_set_chmap(struct edma_chan *echan, int slot) | ||
423 | { | ||
424 | struct edma_cc *ecc = echan->ecc; | ||
425 | int channel = EDMA_CHAN_SLOT(echan->ch_num); | ||
426 | |||
427 | if (ecc->chmap_exist) { | ||
428 | slot = EDMA_CHAN_SLOT(slot); | ||
429 | edma_write_array(ecc, EDMA_DCHMAP, channel, (slot << 5)); | ||
430 | } | ||
431 | } | ||
432 | |||
433 | static void edma_setup_interrupt(struct edma_chan *echan, bool enable) | ||
434 | { | ||
435 | struct edma_cc *ecc = echan->ecc; | ||
436 | int channel = EDMA_CHAN_SLOT(echan->ch_num); | ||
437 | |||
438 | if (enable) { | ||
439 | edma_shadow0_write_array(ecc, SH_ICR, channel >> 5, | ||
440 | BIT(channel & 0x1f)); | ||
441 | edma_shadow0_write_array(ecc, SH_IESR, channel >> 5, | ||
442 | BIT(channel & 0x1f)); | ||
443 | } else { | ||
444 | edma_shadow0_write_array(ecc, SH_IECR, channel >> 5, | ||
445 | BIT(channel & 0x1f)); | ||
446 | } | ||
447 | } | ||
448 | |||
449 | /* | ||
450 | * paRAM slot management functions | ||
451 | */ | ||
452 | static void edma_write_slot(struct edma_cc *ecc, unsigned slot, | ||
453 | const struct edmacc_param *param) | ||
454 | { | ||
455 | slot = EDMA_CHAN_SLOT(slot); | ||
456 | if (slot >= ecc->num_slots) | ||
457 | return; | ||
458 | memcpy_toio(ecc->base + PARM_OFFSET(slot), param, PARM_SIZE); | ||
459 | } | ||
460 | |||
461 | static void edma_read_slot(struct edma_cc *ecc, unsigned slot, | ||
462 | struct edmacc_param *param) | ||
463 | { | ||
464 | slot = EDMA_CHAN_SLOT(slot); | ||
465 | if (slot >= ecc->num_slots) | ||
466 | return; | ||
467 | memcpy_fromio(param, ecc->base + PARM_OFFSET(slot), PARM_SIZE); | ||
468 | } | ||
469 | |||
470 | /** | ||
471 | * edma_alloc_slot - allocate DMA parameter RAM | ||
472 | * @ecc: pointer to edma_cc struct | ||
473 | * @slot: specific slot to allocate; negative for "any unused slot" | ||
474 | * | ||
475 | * This allocates a parameter RAM slot, initializing it to hold a | ||
476 | * dummy transfer. Slots allocated using this routine have not been | ||
477 | * mapped to a hardware DMA channel, and will normally be used by | ||
478 | * linking to them from a slot associated with a DMA channel. | ||
479 | * | ||
480 | * Normal use is to pass EDMA_SLOT_ANY as the @slot, but specific | ||
481 | * slots may be allocated on behalf of DSP firmware. | ||
482 | * | ||
483 | * Returns the number of the slot, else negative errno. | ||
484 | */ | ||
485 | static int edma_alloc_slot(struct edma_cc *ecc, int slot) | ||
486 | { | ||
487 | if (slot > 0) { | ||
488 | slot = EDMA_CHAN_SLOT(slot); | ||
489 | /* Requesting entry paRAM slot for a HW triggered channel. */ | ||
490 | if (ecc->chmap_exist && slot < ecc->num_channels) | ||
491 | slot = EDMA_SLOT_ANY; | ||
492 | } | ||
493 | |||
494 | if (slot < 0) { | ||
495 | if (ecc->chmap_exist) | ||
496 | slot = 0; | ||
497 | else | ||
498 | slot = ecc->num_channels; | ||
499 | for (;;) { | ||
500 | slot = find_next_zero_bit(ecc->slot_inuse, | ||
501 | ecc->num_slots, | ||
502 | slot); | ||
503 | if (slot == ecc->num_slots) | ||
504 | return -ENOMEM; | ||
505 | if (!test_and_set_bit(slot, ecc->slot_inuse)) | ||
506 | break; | ||
507 | } | ||
508 | } else if (slot >= ecc->num_slots) { | ||
509 | return -EINVAL; | ||
510 | } else if (test_and_set_bit(slot, ecc->slot_inuse)) { | ||
511 | return -EBUSY; | ||
512 | } | ||
513 | |||
514 | edma_write_slot(ecc, slot, &dummy_paramset); | ||
515 | |||
516 | return EDMA_CTLR_CHAN(ecc->id, slot); | ||
517 | } | ||
518 | |||
519 | static void edma_free_slot(struct edma_cc *ecc, unsigned slot) | ||
520 | { | ||
521 | slot = EDMA_CHAN_SLOT(slot); | ||
522 | if (slot >= ecc->num_slots) | ||
523 | return; | ||
524 | |||
525 | edma_write_slot(ecc, slot, &dummy_paramset); | ||
526 | clear_bit(slot, ecc->slot_inuse); | ||
527 | } | ||
528 | |||
529 | /** | ||
530 | * edma_link - link one parameter RAM slot to another | ||
531 | * @ecc: pointer to edma_cc struct | ||
532 | * @from: parameter RAM slot originating the link | ||
533 | * @to: parameter RAM slot which is the link target | ||
534 | * | ||
535 | * The originating slot should not be part of any active DMA transfer. | ||
536 | */ | ||
537 | static void edma_link(struct edma_cc *ecc, unsigned from, unsigned to) | ||
538 | { | ||
539 | if (unlikely(EDMA_CTLR(from) != EDMA_CTLR(to))) | ||
540 | dev_warn(ecc->dev, "Ignoring eDMA instance for linking\n"); | ||
541 | |||
542 | from = EDMA_CHAN_SLOT(from); | ||
543 | to = EDMA_CHAN_SLOT(to); | ||
544 | if (from >= ecc->num_slots || to >= ecc->num_slots) | ||
545 | return; | ||
546 | |||
547 | edma_param_modify(ecc, PARM_LINK_BCNTRLD, from, 0xffff0000, | ||
548 | PARM_OFFSET(to)); | ||
549 | } | ||
550 | |||
551 | /** | ||
552 | * edma_get_position - returns the current transfer point | ||
553 | * @ecc: pointer to edma_cc struct | ||
554 | * @slot: parameter RAM slot being examined | ||
555 | * @dst: true selects the dest position, false the source | ||
556 | * | ||
557 | * Returns the position of the current active slot | ||
558 | */ | ||
559 | static dma_addr_t edma_get_position(struct edma_cc *ecc, unsigned slot, | ||
560 | bool dst) | ||
561 | { | ||
562 | u32 offs; | ||
563 | |||
564 | slot = EDMA_CHAN_SLOT(slot); | ||
565 | offs = PARM_OFFSET(slot); | ||
566 | offs += dst ? PARM_DST : PARM_SRC; | ||
567 | |||
568 | return edma_read(ecc, offs); | ||
569 | } | ||
570 | |||
571 | /* | ||
572 | * Channels with event associations will be triggered by their hardware | ||
573 | * events, and channels without such associations will be triggered by | ||
574 | * software. (At this writing there is no interface for using software | ||
575 | * triggers except with channels that don't support hardware triggers.) | ||
576 | */ | ||
577 | static void edma_start(struct edma_chan *echan) | ||
578 | { | ||
579 | struct edma_cc *ecc = echan->ecc; | ||
580 | int channel = EDMA_CHAN_SLOT(echan->ch_num); | ||
581 | int j = (channel >> 5); | ||
582 | unsigned int mask = BIT(channel & 0x1f); | ||
583 | |||
584 | if (!echan->hw_triggered) { | ||
585 | /* EDMA channels without event association */ | ||
586 | dev_dbg(ecc->dev, "ESR%d %08x\n", j, | ||
587 | edma_shadow0_read_array(ecc, SH_ESR, j)); | ||
588 | edma_shadow0_write_array(ecc, SH_ESR, j, mask); | ||
589 | } else { | ||
590 | /* EDMA channel with event association */ | ||
591 | dev_dbg(ecc->dev, "ER%d %08x\n", j, | ||
592 | edma_shadow0_read_array(ecc, SH_ER, j)); | ||
593 | /* Clear any pending event or error */ | ||
594 | edma_write_array(ecc, EDMA_ECR, j, mask); | ||
595 | edma_write_array(ecc, EDMA_EMCR, j, mask); | ||
596 | /* Clear any SER */ | ||
597 | edma_shadow0_write_array(ecc, SH_SECR, j, mask); | ||
598 | edma_shadow0_write_array(ecc, SH_EESR, j, mask); | ||
599 | dev_dbg(ecc->dev, "EER%d %08x\n", j, | ||
600 | edma_shadow0_read_array(ecc, SH_EER, j)); | ||
601 | } | ||
602 | } | ||
603 | |||
604 | static void edma_stop(struct edma_chan *echan) | ||
605 | { | ||
606 | struct edma_cc *ecc = echan->ecc; | ||
607 | int channel = EDMA_CHAN_SLOT(echan->ch_num); | ||
608 | int j = (channel >> 5); | ||
609 | unsigned int mask = BIT(channel & 0x1f); | ||
610 | |||
611 | edma_shadow0_write_array(ecc, SH_EECR, j, mask); | ||
612 | edma_shadow0_write_array(ecc, SH_ECR, j, mask); | ||
613 | edma_shadow0_write_array(ecc, SH_SECR, j, mask); | ||
614 | edma_write_array(ecc, EDMA_EMCR, j, mask); | ||
615 | |||
616 | /* clear possibly pending completion interrupt */ | ||
617 | edma_shadow0_write_array(ecc, SH_ICR, j, mask); | ||
618 | |||
619 | dev_dbg(ecc->dev, "EER%d %08x\n", j, | ||
620 | edma_shadow0_read_array(ecc, SH_EER, j)); | ||
621 | |||
622 | /* REVISIT: consider guarding against inappropriate event | ||
623 | * chaining by overwriting with dummy_paramset. | ||
624 | */ | ||
625 | } | ||
626 | |||
627 | /* | ||
628 | * Temporarily disable EDMA hardware events on the specified channel, | ||
629 | * preventing them from triggering new transfers | ||
630 | */ | ||
631 | static void edma_pause(struct edma_chan *echan) | ||
632 | { | ||
633 | int channel = EDMA_CHAN_SLOT(echan->ch_num); | ||
634 | unsigned int mask = BIT(channel & 0x1f); | ||
635 | |||
636 | edma_shadow0_write_array(echan->ecc, SH_EECR, channel >> 5, mask); | ||
637 | } | ||
638 | |||
639 | /* Re-enable EDMA hardware events on the specified channel. */ | ||
640 | static void edma_resume(struct edma_chan *echan) | ||
641 | { | ||
642 | int channel = EDMA_CHAN_SLOT(echan->ch_num); | ||
643 | unsigned int mask = BIT(channel & 0x1f); | ||
644 | |||
645 | edma_shadow0_write_array(echan->ecc, SH_EESR, channel >> 5, mask); | ||
646 | } | ||
647 | |||
648 | static void edma_trigger_channel(struct edma_chan *echan) | ||
649 | { | ||
650 | struct edma_cc *ecc = echan->ecc; | ||
651 | int channel = EDMA_CHAN_SLOT(echan->ch_num); | ||
652 | unsigned int mask = BIT(channel & 0x1f); | ||
653 | |||
654 | edma_shadow0_write_array(ecc, SH_ESR, (channel >> 5), mask); | ||
655 | |||
656 | dev_dbg(ecc->dev, "ESR%d %08x\n", (channel >> 5), | ||
657 | edma_shadow0_read_array(ecc, SH_ESR, (channel >> 5))); | ||
658 | } | ||
659 | |||
660 | static void edma_clean_channel(struct edma_chan *echan) | ||
661 | { | ||
662 | struct edma_cc *ecc = echan->ecc; | ||
663 | int channel = EDMA_CHAN_SLOT(echan->ch_num); | ||
664 | int j = (channel >> 5); | ||
665 | unsigned int mask = BIT(channel & 0x1f); | ||
666 | |||
667 | dev_dbg(ecc->dev, "EMR%d %08x\n", j, edma_read_array(ecc, EDMA_EMR, j)); | ||
668 | edma_shadow0_write_array(ecc, SH_ECR, j, mask); | ||
669 | /* Clear the corresponding EMR bits */ | ||
670 | edma_write_array(ecc, EDMA_EMCR, j, mask); | ||
671 | /* Clear any SER */ | ||
672 | edma_shadow0_write_array(ecc, SH_SECR, j, mask); | ||
673 | edma_write(ecc, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0)); | ||
674 | } | ||
675 | |||
676 | /* Move channel to a specific event queue */ | ||
677 | static void edma_assign_channel_eventq(struct edma_chan *echan, | ||
678 | enum dma_event_q eventq_no) | ||
679 | { | ||
680 | struct edma_cc *ecc = echan->ecc; | ||
681 | int channel = EDMA_CHAN_SLOT(echan->ch_num); | ||
682 | int bit = (channel & 0x7) * 4; | ||
683 | |||
684 | /* default to low priority queue */ | ||
685 | if (eventq_no == EVENTQ_DEFAULT) | ||
686 | eventq_no = ecc->default_queue; | ||
687 | if (eventq_no >= ecc->num_tc) | ||
688 | return; | ||
689 | |||
690 | eventq_no &= 7; | ||
691 | edma_modify_array(ecc, EDMA_DMAQNUM, (channel >> 3), ~(0x7 << bit), | ||
692 | eventq_no << bit); | ||
693 | } | ||
694 | |||
695 | static int edma_alloc_channel(struct edma_chan *echan, | ||
696 | enum dma_event_q eventq_no) | ||
697 | { | ||
698 | struct edma_cc *ecc = echan->ecc; | ||
699 | int channel = EDMA_CHAN_SLOT(echan->ch_num); | ||
700 | |||
701 | /* ensure access through shadow region 0 */ | ||
702 | edma_or_array2(ecc, EDMA_DRAE, 0, channel >> 5, BIT(channel & 0x1f)); | ||
703 | |||
704 | /* ensure no events are pending */ | ||
705 | edma_stop(echan); | ||
706 | |||
707 | edma_setup_interrupt(echan, true); | ||
708 | |||
709 | edma_assign_channel_eventq(echan, eventq_no); | ||
710 | |||
711 | return 0; | ||
712 | } | ||
713 | |||
714 | static void edma_free_channel(struct edma_chan *echan) | ||
715 | { | ||
716 | /* ensure no events are pending */ | ||
717 | edma_stop(echan); | ||
718 | /* REVISIT should probably take out of shadow region 0 */ | ||
719 | edma_setup_interrupt(echan, false); | ||
720 | } | ||
721 | |||
128 | static inline struct edma_cc *to_edma_cc(struct dma_device *d) | 722 | static inline struct edma_cc *to_edma_cc(struct dma_device *d) |
129 | { | 723 | { |
130 | return container_of(d, struct edma_cc, dma_slave); | 724 | return container_of(d, struct edma_cc, dma_slave); |
@@ -135,8 +729,7 @@ static inline struct edma_chan *to_edma_chan(struct dma_chan *c) | |||
135 | return container_of(c, struct edma_chan, vchan.chan); | 729 | return container_of(c, struct edma_chan, vchan.chan); |
136 | } | 730 | } |
137 | 731 | ||
138 | static inline struct edma_desc | 732 | static inline struct edma_desc *to_edma_desc(struct dma_async_tx_descriptor *tx) |
139 | *to_edma_desc(struct dma_async_tx_descriptor *tx) | ||
140 | { | 733 | { |
141 | return container_of(tx, struct edma_desc, vdesc.tx); | 734 | return container_of(tx, struct edma_desc, vdesc.tx); |
142 | } | 735 | } |
@@ -149,20 +742,17 @@ static void edma_desc_free(struct virt_dma_desc *vdesc) | |||
149 | /* Dispatch a queued descriptor to the controller (caller holds lock) */ | 742 | /* Dispatch a queued descriptor to the controller (caller holds lock) */ |
150 | static void edma_execute(struct edma_chan *echan) | 743 | static void edma_execute(struct edma_chan *echan) |
151 | { | 744 | { |
745 | struct edma_cc *ecc = echan->ecc; | ||
152 | struct virt_dma_desc *vdesc; | 746 | struct virt_dma_desc *vdesc; |
153 | struct edma_desc *edesc; | 747 | struct edma_desc *edesc; |
154 | struct device *dev = echan->vchan.chan.device->dev; | 748 | struct device *dev = echan->vchan.chan.device->dev; |
155 | int i, j, left, nslots; | 749 | int i, j, left, nslots; |
156 | 750 | ||
157 | /* If either we processed all psets or we're still not started */ | 751 | if (!echan->edesc) { |
158 | if (!echan->edesc || | 752 | /* Setup is needed for the first transfer */ |
159 | echan->edesc->pset_nr == echan->edesc->processed) { | ||
160 | /* Get next vdesc */ | ||
161 | vdesc = vchan_next_desc(&echan->vchan); | 753 | vdesc = vchan_next_desc(&echan->vchan); |
162 | if (!vdesc) { | 754 | if (!vdesc) |
163 | echan->edesc = NULL; | ||
164 | return; | 755 | return; |
165 | } | ||
166 | list_del(&vdesc->node); | 756 | list_del(&vdesc->node); |
167 | echan->edesc = to_edma_desc(&vdesc->tx); | 757 | echan->edesc = to_edma_desc(&vdesc->tx); |
168 | } | 758 | } |
@@ -177,32 +767,32 @@ static void edma_execute(struct edma_chan *echan) | |||
177 | /* Write descriptor PaRAM set(s) */ | 767 | /* Write descriptor PaRAM set(s) */ |
178 | for (i = 0; i < nslots; i++) { | 768 | for (i = 0; i < nslots; i++) { |
179 | j = i + edesc->processed; | 769 | j = i + edesc->processed; |
180 | edma_write_slot(echan->slot[i], &edesc->pset[j].param); | 770 | edma_write_slot(ecc, echan->slot[i], &edesc->pset[j].param); |
181 | edesc->sg_len += edesc->pset[j].len; | 771 | edesc->sg_len += edesc->pset[j].len; |
182 | dev_vdbg(echan->vchan.chan.device->dev, | 772 | dev_vdbg(dev, |
183 | "\n pset[%d]:\n" | 773 | "\n pset[%d]:\n" |
184 | " chnum\t%d\n" | 774 | " chnum\t%d\n" |
185 | " slot\t%d\n" | 775 | " slot\t%d\n" |
186 | " opt\t%08x\n" | 776 | " opt\t%08x\n" |
187 | " src\t%08x\n" | 777 | " src\t%08x\n" |
188 | " dst\t%08x\n" | 778 | " dst\t%08x\n" |
189 | " abcnt\t%08x\n" | 779 | " abcnt\t%08x\n" |
190 | " ccnt\t%08x\n" | 780 | " ccnt\t%08x\n" |
191 | " bidx\t%08x\n" | 781 | " bidx\t%08x\n" |
192 | " cidx\t%08x\n" | 782 | " cidx\t%08x\n" |
193 | " lkrld\t%08x\n", | 783 | " lkrld\t%08x\n", |
194 | j, echan->ch_num, echan->slot[i], | 784 | j, echan->ch_num, echan->slot[i], |
195 | edesc->pset[j].param.opt, | 785 | edesc->pset[j].param.opt, |
196 | edesc->pset[j].param.src, | 786 | edesc->pset[j].param.src, |
197 | edesc->pset[j].param.dst, | 787 | edesc->pset[j].param.dst, |
198 | edesc->pset[j].param.a_b_cnt, | 788 | edesc->pset[j].param.a_b_cnt, |
199 | edesc->pset[j].param.ccnt, | 789 | edesc->pset[j].param.ccnt, |
200 | edesc->pset[j].param.src_dst_bidx, | 790 | edesc->pset[j].param.src_dst_bidx, |
201 | edesc->pset[j].param.src_dst_cidx, | 791 | edesc->pset[j].param.src_dst_cidx, |
202 | edesc->pset[j].param.link_bcntrld); | 792 | edesc->pset[j].param.link_bcntrld); |
203 | /* Link to the previous slot if not the last set */ | 793 | /* Link to the previous slot if not the last set */ |
204 | if (i != (nslots - 1)) | 794 | if (i != (nslots - 1)) |
205 | edma_link(echan->slot[i], echan->slot[i+1]); | 795 | edma_link(ecc, echan->slot[i], echan->slot[i + 1]); |
206 | } | 796 | } |
207 | 797 | ||
208 | edesc->processed += nslots; | 798 | edesc->processed += nslots; |
@@ -214,34 +804,32 @@ static void edma_execute(struct edma_chan *echan) | |||
214 | */ | 804 | */ |
215 | if (edesc->processed == edesc->pset_nr) { | 805 | if (edesc->processed == edesc->pset_nr) { |
216 | if (edesc->cyclic) | 806 | if (edesc->cyclic) |
217 | edma_link(echan->slot[nslots-1], echan->slot[1]); | 807 | edma_link(ecc, echan->slot[nslots - 1], echan->slot[1]); |
218 | else | 808 | else |
219 | edma_link(echan->slot[nslots-1], | 809 | edma_link(ecc, echan->slot[nslots - 1], |
220 | echan->ecc->dummy_slot); | 810 | echan->ecc->dummy_slot); |
221 | } | 811 | } |
222 | 812 | ||
223 | if (edesc->processed <= MAX_NR_SG) { | 813 | if (echan->missed) { |
814 | /* | ||
815 | * This happens due to setup times between intermediate | ||
816 | * transfers in long SG lists which have to be broken up into | ||
817 | * transfers of MAX_NR_SG | ||
818 | */ | ||
819 | dev_dbg(dev, "missed event on channel %d\n", echan->ch_num); | ||
820 | edma_clean_channel(echan); | ||
821 | edma_stop(echan); | ||
822 | edma_start(echan); | ||
823 | edma_trigger_channel(echan); | ||
824 | echan->missed = 0; | ||
825 | } else if (edesc->processed <= MAX_NR_SG) { | ||
224 | dev_dbg(dev, "first transfer starting on channel %d\n", | 826 | dev_dbg(dev, "first transfer starting on channel %d\n", |
225 | echan->ch_num); | 827 | echan->ch_num); |
226 | edma_start(echan->ch_num); | 828 | edma_start(echan); |
227 | } else { | 829 | } else { |
228 | dev_dbg(dev, "chan: %d: completed %d elements, resuming\n", | 830 | dev_dbg(dev, "chan: %d: completed %d elements, resuming\n", |
229 | echan->ch_num, edesc->processed); | 831 | echan->ch_num, edesc->processed); |
230 | edma_resume(echan->ch_num); | 832 | edma_resume(echan); |
231 | } | ||
232 | |||
233 | /* | ||
234 | * This happens due to setup times between intermediate transfers | ||
235 | * in long SG lists which have to be broken up into transfers of | ||
236 | * MAX_NR_SG | ||
237 | */ | ||
238 | if (echan->missed) { | ||
239 | dev_dbg(dev, "missed event on channel %d\n", echan->ch_num); | ||
240 | edma_clean_channel(echan->ch_num); | ||
241 | edma_stop(echan->ch_num); | ||
242 | edma_start(echan->ch_num); | ||
243 | edma_trigger_channel(echan->ch_num); | ||
244 | echan->missed = 0; | ||
245 | } | 833 | } |
246 | } | 834 | } |
247 | 835 | ||
@@ -259,20 +847,16 @@ static int edma_terminate_all(struct dma_chan *chan) | |||
259 | * echan->edesc is NULL and exit.) | 847 | * echan->edesc is NULL and exit.) |
260 | */ | 848 | */ |
261 | if (echan->edesc) { | 849 | if (echan->edesc) { |
262 | int cyclic = echan->edesc->cyclic; | 850 | edma_stop(echan); |
263 | 851 | /* Move the cyclic channel back to default queue */ | |
852 | if (!echan->tc && echan->edesc->cyclic) | ||
853 | edma_assign_channel_eventq(echan, EVENTQ_DEFAULT); | ||
264 | /* | 854 | /* |
265 | * free the running request descriptor | 855 | * free the running request descriptor |
266 | * since it is not in any of the vdesc lists | 856 | * since it is not in any of the vdesc lists |
267 | */ | 857 | */ |
268 | edma_desc_free(&echan->edesc->vdesc); | 858 | edma_desc_free(&echan->edesc->vdesc); |
269 | |||
270 | echan->edesc = NULL; | 859 | echan->edesc = NULL; |
271 | edma_stop(echan->ch_num); | ||
272 | /* Move the cyclic channel back to default queue */ | ||
273 | if (cyclic) | ||
274 | edma_assign_channel_eventq(echan->ch_num, | ||
275 | EVENTQ_DEFAULT); | ||
276 | } | 860 | } |
277 | 861 | ||
278 | vchan_get_all_descriptors(&echan->vchan, &head); | 862 | vchan_get_all_descriptors(&echan->vchan, &head); |
@@ -303,7 +887,7 @@ static int edma_dma_pause(struct dma_chan *chan) | |||
303 | if (!echan->edesc) | 887 | if (!echan->edesc) |
304 | return -EINVAL; | 888 | return -EINVAL; |
305 | 889 | ||
306 | edma_pause(echan->ch_num); | 890 | edma_pause(echan); |
307 | return 0; | 891 | return 0; |
308 | } | 892 | } |
309 | 893 | ||
@@ -311,7 +895,7 @@ static int edma_dma_resume(struct dma_chan *chan) | |||
311 | { | 895 | { |
312 | struct edma_chan *echan = to_edma_chan(chan); | 896 | struct edma_chan *echan = to_edma_chan(chan); |
313 | 897 | ||
314 | edma_resume(echan->ch_num); | 898 | edma_resume(echan); |
315 | return 0; | 899 | return 0; |
316 | } | 900 | } |
317 | 901 | ||
@@ -327,19 +911,17 @@ static int edma_dma_resume(struct dma_chan *chan) | |||
327 | * @direction: Direction of the transfer | 911 | * @direction: Direction of the transfer |
328 | */ | 912 | */ |
329 | static int edma_config_pset(struct dma_chan *chan, struct edma_pset *epset, | 913 | static int edma_config_pset(struct dma_chan *chan, struct edma_pset *epset, |
330 | dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst, | 914 | dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst, |
331 | enum dma_slave_buswidth dev_width, unsigned int dma_length, | 915 | unsigned int acnt, unsigned int dma_length, |
332 | enum dma_transfer_direction direction) | 916 | enum dma_transfer_direction direction) |
333 | { | 917 | { |
334 | struct edma_chan *echan = to_edma_chan(chan); | 918 | struct edma_chan *echan = to_edma_chan(chan); |
335 | struct device *dev = chan->device->dev; | 919 | struct device *dev = chan->device->dev; |
336 | struct edmacc_param *param = &epset->param; | 920 | struct edmacc_param *param = &epset->param; |
337 | int acnt, bcnt, ccnt, cidx; | 921 | int bcnt, ccnt, cidx; |
338 | int src_bidx, dst_bidx, src_cidx, dst_cidx; | 922 | int src_bidx, dst_bidx, src_cidx, dst_cidx; |
339 | int absync; | 923 | int absync; |
340 | 924 | ||
341 | acnt = dev_width; | ||
342 | |||
343 | /* src/dst_maxburst == 0 is the same case as src/dst_maxburst == 1 */ | 925 | /* src/dst_maxburst == 0 is the same case as src/dst_maxburst == 1 */ |
344 | if (!burst) | 926 | if (!burst) |
345 | burst = 1; | 927 | burst = 1; |
@@ -475,8 +1057,8 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( | |||
475 | return NULL; | 1057 | return NULL; |
476 | } | 1058 | } |
477 | 1059 | ||
478 | edesc = kzalloc(sizeof(*edesc) + sg_len * | 1060 | edesc = kzalloc(sizeof(*edesc) + sg_len * sizeof(edesc->pset[0]), |
479 | sizeof(edesc->pset[0]), GFP_ATOMIC); | 1061 | GFP_ATOMIC); |
480 | if (!edesc) { | 1062 | if (!edesc) { |
481 | dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__); | 1063 | dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__); |
482 | return NULL; | 1064 | return NULL; |
@@ -493,8 +1075,7 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( | |||
493 | for (i = 0; i < nslots; i++) { | 1075 | for (i = 0; i < nslots; i++) { |
494 | if (echan->slot[i] < 0) { | 1076 | if (echan->slot[i] < 0) { |
495 | echan->slot[i] = | 1077 | echan->slot[i] = |
496 | edma_alloc_slot(EDMA_CTLR(echan->ch_num), | 1078 | edma_alloc_slot(echan->ecc, EDMA_SLOT_ANY); |
497 | EDMA_SLOT_ANY); | ||
498 | if (echan->slot[i] < 0) { | 1079 | if (echan->slot[i] < 0) { |
499 | kfree(edesc); | 1080 | kfree(edesc); |
500 | dev_err(dev, "%s: Failed to allocate slot\n", | 1081 | dev_err(dev, "%s: Failed to allocate slot\n", |
@@ -541,36 +1122,98 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy( | |||
541 | struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | 1122 | struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, |
542 | size_t len, unsigned long tx_flags) | 1123 | size_t len, unsigned long tx_flags) |
543 | { | 1124 | { |
544 | int ret; | 1125 | int ret, nslots; |
545 | struct edma_desc *edesc; | 1126 | struct edma_desc *edesc; |
546 | struct device *dev = chan->device->dev; | 1127 | struct device *dev = chan->device->dev; |
547 | struct edma_chan *echan = to_edma_chan(chan); | 1128 | struct edma_chan *echan = to_edma_chan(chan); |
1129 | unsigned int width, pset_len; | ||
548 | 1130 | ||
549 | if (unlikely(!echan || !len)) | 1131 | if (unlikely(!echan || !len)) |
550 | return NULL; | 1132 | return NULL; |
551 | 1133 | ||
552 | edesc = kzalloc(sizeof(*edesc) + sizeof(edesc->pset[0]), GFP_ATOMIC); | 1134 | if (len < SZ_64K) { |
1135 | /* | ||
1136 | * Transfer size less than 64K can be handled with one paRAM | ||
1137 | * slot and with one burst. | ||
1138 | * ACNT = length | ||
1139 | */ | ||
1140 | width = len; | ||
1141 | pset_len = len; | ||
1142 | nslots = 1; | ||
1143 | } else { | ||
1144 | /* | ||
1145 | * Transfer size bigger than 64K will be handled with maximum of | ||
1146 | * two paRAM slots. | ||
1147 | * slot1: (full_length / 32767) times 32767 bytes bursts. | ||
1148 | * ACNT = 32767, length1: (full_length / 32767) * 32767 | ||
1149 | * slot2: the remaining amount of data after slot1. | ||
1150 | * ACNT = full_length - length1, length2 = ACNT | ||
1151 | * | ||
1152 | * When the full_length is multibple of 32767 one slot can be | ||
1153 | * used to complete the transfer. | ||
1154 | */ | ||
1155 | width = SZ_32K - 1; | ||
1156 | pset_len = rounddown(len, width); | ||
1157 | /* One slot is enough for lengths multiple of (SZ_32K -1) */ | ||
1158 | if (unlikely(pset_len == len)) | ||
1159 | nslots = 1; | ||
1160 | else | ||
1161 | nslots = 2; | ||
1162 | } | ||
1163 | |||
1164 | edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]), | ||
1165 | GFP_ATOMIC); | ||
553 | if (!edesc) { | 1166 | if (!edesc) { |
554 | dev_dbg(dev, "Failed to allocate a descriptor\n"); | 1167 | dev_dbg(dev, "Failed to allocate a descriptor\n"); |
555 | return NULL; | 1168 | return NULL; |
556 | } | 1169 | } |
557 | 1170 | ||
558 | edesc->pset_nr = 1; | 1171 | edesc->pset_nr = nslots; |
1172 | edesc->residue = edesc->residue_stat = len; | ||
1173 | edesc->direction = DMA_MEM_TO_MEM; | ||
1174 | edesc->echan = echan; | ||
559 | 1175 | ||
560 | ret = edma_config_pset(chan, &edesc->pset[0], src, dest, 1, | 1176 | ret = edma_config_pset(chan, &edesc->pset[0], src, dest, 1, |
561 | DMA_SLAVE_BUSWIDTH_4_BYTES, len, DMA_MEM_TO_MEM); | 1177 | width, pset_len, DMA_MEM_TO_MEM); |
562 | if (ret < 0) | 1178 | if (ret < 0) { |
1179 | kfree(edesc); | ||
563 | return NULL; | 1180 | return NULL; |
1181 | } | ||
564 | 1182 | ||
565 | edesc->absync = ret; | 1183 | edesc->absync = ret; |
566 | 1184 | ||
567 | /* | ||
568 | * Enable intermediate transfer chaining to re-trigger channel | ||
569 | * on completion of every TR, and enable transfer-completion | ||
570 | * interrupt on completion of the whole transfer. | ||
571 | */ | ||
572 | edesc->pset[0].param.opt |= ITCCHEN; | 1185 | edesc->pset[0].param.opt |= ITCCHEN; |
573 | edesc->pset[0].param.opt |= TCINTEN; | 1186 | if (nslots == 1) { |
1187 | /* Enable transfer complete interrupt */ | ||
1188 | edesc->pset[0].param.opt |= TCINTEN; | ||
1189 | } else { | ||
1190 | /* Enable transfer complete chaining for the first slot */ | ||
1191 | edesc->pset[0].param.opt |= TCCHEN; | ||
1192 | |||
1193 | if (echan->slot[1] < 0) { | ||
1194 | echan->slot[1] = edma_alloc_slot(echan->ecc, | ||
1195 | EDMA_SLOT_ANY); | ||
1196 | if (echan->slot[1] < 0) { | ||
1197 | kfree(edesc); | ||
1198 | dev_err(dev, "%s: Failed to allocate slot\n", | ||
1199 | __func__); | ||
1200 | return NULL; | ||
1201 | } | ||
1202 | } | ||
1203 | dest += pset_len; | ||
1204 | src += pset_len; | ||
1205 | pset_len = width = len % (SZ_32K - 1); | ||
1206 | |||
1207 | ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1, | ||
1208 | width, pset_len, DMA_MEM_TO_MEM); | ||
1209 | if (ret < 0) { | ||
1210 | kfree(edesc); | ||
1211 | return NULL; | ||
1212 | } | ||
1213 | |||
1214 | edesc->pset[1].param.opt |= ITCCHEN; | ||
1215 | edesc->pset[1].param.opt |= TCINTEN; | ||
1216 | } | ||
574 | 1217 | ||
575 | return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); | 1218 | return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); |
576 | } | 1219 | } |
@@ -629,8 +1272,8 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( | |||
629 | if (nslots > MAX_NR_SG) | 1272 | if (nslots > MAX_NR_SG) |
630 | return NULL; | 1273 | return NULL; |
631 | 1274 | ||
632 | edesc = kzalloc(sizeof(*edesc) + nslots * | 1275 | edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]), |
633 | sizeof(edesc->pset[0]), GFP_ATOMIC); | 1276 | GFP_ATOMIC); |
634 | if (!edesc) { | 1277 | if (!edesc) { |
635 | dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__); | 1278 | dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__); |
636 | return NULL; | 1279 | return NULL; |
@@ -649,8 +1292,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( | |||
649 | /* Allocate a PaRAM slot, if needed */ | 1292 | /* Allocate a PaRAM slot, if needed */ |
650 | if (echan->slot[i] < 0) { | 1293 | if (echan->slot[i] < 0) { |
651 | echan->slot[i] = | 1294 | echan->slot[i] = |
652 | edma_alloc_slot(EDMA_CTLR(echan->ch_num), | 1295 | edma_alloc_slot(echan->ecc, EDMA_SLOT_ANY); |
653 | EDMA_SLOT_ANY); | ||
654 | if (echan->slot[i] < 0) { | 1296 | if (echan->slot[i] < 0) { |
655 | kfree(edesc); | 1297 | kfree(edesc); |
656 | dev_err(dev, "%s: Failed to allocate slot\n", | 1298 | dev_err(dev, "%s: Failed to allocate slot\n", |
@@ -711,128 +1353,281 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( | |||
711 | } | 1353 | } |
712 | 1354 | ||
713 | /* Place the cyclic channel to highest priority queue */ | 1355 | /* Place the cyclic channel to highest priority queue */ |
714 | edma_assign_channel_eventq(echan->ch_num, EVENTQ_0); | 1356 | if (!echan->tc) |
1357 | edma_assign_channel_eventq(echan, EVENTQ_0); | ||
715 | 1358 | ||
716 | return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); | 1359 | return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); |
717 | } | 1360 | } |
718 | 1361 | ||
719 | static void edma_callback(unsigned ch_num, u16 ch_status, void *data) | 1362 | static void edma_completion_handler(struct edma_chan *echan) |
720 | { | 1363 | { |
721 | struct edma_chan *echan = data; | ||
722 | struct device *dev = echan->vchan.chan.device->dev; | 1364 | struct device *dev = echan->vchan.chan.device->dev; |
723 | struct edma_desc *edesc; | 1365 | struct edma_desc *edesc = echan->edesc; |
724 | struct edmacc_param p; | ||
725 | 1366 | ||
726 | edesc = echan->edesc; | 1367 | if (!edesc) |
1368 | return; | ||
727 | 1369 | ||
728 | /* Pause the channel for non-cyclic */ | 1370 | spin_lock(&echan->vchan.lock); |
729 | if (!edesc || (edesc && !edesc->cyclic)) | 1371 | if (edesc->cyclic) { |
730 | edma_pause(echan->ch_num); | 1372 | vchan_cyclic_callback(&edesc->vdesc); |
731 | 1373 | spin_unlock(&echan->vchan.lock); | |
732 | switch (ch_status) { | 1374 | return; |
733 | case EDMA_DMA_COMPLETE: | 1375 | } else if (edesc->processed == edesc->pset_nr) { |
734 | spin_lock(&echan->vchan.lock); | 1376 | edesc->residue = 0; |
735 | 1377 | edma_stop(echan); | |
736 | if (edesc) { | 1378 | vchan_cookie_complete(&edesc->vdesc); |
737 | if (edesc->cyclic) { | 1379 | echan->edesc = NULL; |
738 | vchan_cyclic_callback(&edesc->vdesc); | 1380 | |
739 | } else if (edesc->processed == edesc->pset_nr) { | 1381 | dev_dbg(dev, "Transfer completed on channel %d\n", |
740 | dev_dbg(dev, "Transfer complete, stopping channel %d\n", ch_num); | 1382 | echan->ch_num); |
741 | edesc->residue = 0; | 1383 | } else { |
742 | edma_stop(echan->ch_num); | 1384 | dev_dbg(dev, "Sub transfer completed on channel %d\n", |
743 | vchan_cookie_complete(&edesc->vdesc); | 1385 | echan->ch_num); |
744 | edma_execute(echan); | 1386 | |
745 | } else { | 1387 | edma_pause(echan); |
746 | dev_dbg(dev, "Intermediate transfer complete on channel %d\n", ch_num); | 1388 | |
747 | 1389 | /* Update statistics for tx_status */ | |
748 | /* Update statistics for tx_status */ | 1390 | edesc->residue -= edesc->sg_len; |
749 | edesc->residue -= edesc->sg_len; | 1391 | edesc->residue_stat = edesc->residue; |
750 | edesc->residue_stat = edesc->residue; | 1392 | edesc->processed_stat = edesc->processed; |
751 | edesc->processed_stat = edesc->processed; | 1393 | } |
752 | 1394 | edma_execute(echan); | |
753 | edma_execute(echan); | 1395 | |
754 | } | 1396 | spin_unlock(&echan->vchan.lock); |
1397 | } | ||
1398 | |||
1399 | /* eDMA interrupt handler */ | ||
1400 | static irqreturn_t dma_irq_handler(int irq, void *data) | ||
1401 | { | ||
1402 | struct edma_cc *ecc = data; | ||
1403 | int ctlr; | ||
1404 | u32 sh_ier; | ||
1405 | u32 sh_ipr; | ||
1406 | u32 bank; | ||
1407 | |||
1408 | ctlr = ecc->id; | ||
1409 | if (ctlr < 0) | ||
1410 | return IRQ_NONE; | ||
1411 | |||
1412 | dev_vdbg(ecc->dev, "dma_irq_handler\n"); | ||
1413 | |||
1414 | sh_ipr = edma_shadow0_read_array(ecc, SH_IPR, 0); | ||
1415 | if (!sh_ipr) { | ||
1416 | sh_ipr = edma_shadow0_read_array(ecc, SH_IPR, 1); | ||
1417 | if (!sh_ipr) | ||
1418 | return IRQ_NONE; | ||
1419 | sh_ier = edma_shadow0_read_array(ecc, SH_IER, 1); | ||
1420 | bank = 1; | ||
1421 | } else { | ||
1422 | sh_ier = edma_shadow0_read_array(ecc, SH_IER, 0); | ||
1423 | bank = 0; | ||
1424 | } | ||
1425 | |||
1426 | do { | ||
1427 | u32 slot; | ||
1428 | u32 channel; | ||
1429 | |||
1430 | slot = __ffs(sh_ipr); | ||
1431 | sh_ipr &= ~(BIT(slot)); | ||
1432 | |||
1433 | if (sh_ier & BIT(slot)) { | ||
1434 | channel = (bank << 5) | slot; | ||
1435 | /* Clear the corresponding IPR bits */ | ||
1436 | edma_shadow0_write_array(ecc, SH_ICR, bank, BIT(slot)); | ||
1437 | edma_completion_handler(&ecc->slave_chans[channel]); | ||
755 | } | 1438 | } |
1439 | } while (sh_ipr); | ||
756 | 1440 | ||
757 | spin_unlock(&echan->vchan.lock); | 1441 | edma_shadow0_write(ecc, SH_IEVAL, 1); |
1442 | return IRQ_HANDLED; | ||
1443 | } | ||
1444 | |||
1445 | static void edma_error_handler(struct edma_chan *echan) | ||
1446 | { | ||
1447 | struct edma_cc *ecc = echan->ecc; | ||
1448 | struct device *dev = echan->vchan.chan.device->dev; | ||
1449 | struct edmacc_param p; | ||
758 | 1450 | ||
759 | break; | 1451 | if (!echan->edesc) |
760 | case EDMA_DMA_CC_ERROR: | 1452 | return; |
761 | spin_lock(&echan->vchan.lock); | ||
762 | 1453 | ||
763 | edma_read_slot(EDMA_CHAN_SLOT(echan->slot[0]), &p); | 1454 | spin_lock(&echan->vchan.lock); |
764 | 1455 | ||
1456 | edma_read_slot(ecc, echan->slot[0], &p); | ||
1457 | /* | ||
1458 | * Issue later based on missed flag which will be sure | ||
1459 | * to happen as: | ||
1460 | * (1) we finished transmitting an intermediate slot and | ||
1461 | * edma_execute is coming up. | ||
1462 | * (2) or we finished current transfer and issue will | ||
1463 | * call edma_execute. | ||
1464 | * | ||
1465 | * Important note: issuing can be dangerous here and | ||
1466 | * lead to some nasty recursion when we are in a NULL | ||
1467 | * slot. So we avoid doing so and set the missed flag. | ||
1468 | */ | ||
1469 | if (p.a_b_cnt == 0 && p.ccnt == 0) { | ||
1470 | dev_dbg(dev, "Error on null slot, setting miss\n"); | ||
1471 | echan->missed = 1; | ||
1472 | } else { | ||
765 | /* | 1473 | /* |
766 | * Issue later based on missed flag which will be sure | 1474 | * The slot is already programmed but the event got |
767 | * to happen as: | 1475 | * missed, so its safe to issue it here. |
768 | * (1) we finished transmitting an intermediate slot and | ||
769 | * edma_execute is coming up. | ||
770 | * (2) or we finished current transfer and issue will | ||
771 | * call edma_execute. | ||
772 | * | ||
773 | * Important note: issuing can be dangerous here and | ||
774 | * lead to some nasty recursion when we are in a NULL | ||
775 | * slot. So we avoid doing so and set the missed flag. | ||
776 | */ | 1476 | */ |
777 | if (p.a_b_cnt == 0 && p.ccnt == 0) { | 1477 | dev_dbg(dev, "Missed event, TRIGGERING\n"); |
778 | dev_dbg(dev, "Error occurred, looks like slot is null, just setting miss\n"); | 1478 | edma_clean_channel(echan); |
779 | echan->missed = 1; | 1479 | edma_stop(echan); |
780 | } else { | 1480 | edma_start(echan); |
781 | /* | 1481 | edma_trigger_channel(echan); |
782 | * The slot is already programmed but the event got | 1482 | } |
783 | * missed, so its safe to issue it here. | 1483 | spin_unlock(&echan->vchan.lock); |
784 | */ | 1484 | } |
785 | dev_dbg(dev, "Error occurred but slot is non-null, TRIGGERING\n"); | 1485 | |
786 | edma_clean_channel(echan->ch_num); | 1486 | static inline bool edma_error_pending(struct edma_cc *ecc) |
787 | edma_stop(echan->ch_num); | 1487 | { |
788 | edma_start(echan->ch_num); | 1488 | if (edma_read_array(ecc, EDMA_EMR, 0) || |
789 | edma_trigger_channel(echan->ch_num); | 1489 | edma_read_array(ecc, EDMA_EMR, 1) || |
1490 | edma_read(ecc, EDMA_QEMR) || edma_read(ecc, EDMA_CCERR)) | ||
1491 | return true; | ||
1492 | |||
1493 | return false; | ||
1494 | } | ||
1495 | |||
1496 | /* eDMA error interrupt handler */ | ||
1497 | static irqreturn_t dma_ccerr_handler(int irq, void *data) | ||
1498 | { | ||
1499 | struct edma_cc *ecc = data; | ||
1500 | int i, j; | ||
1501 | int ctlr; | ||
1502 | unsigned int cnt = 0; | ||
1503 | unsigned int val; | ||
1504 | |||
1505 | ctlr = ecc->id; | ||
1506 | if (ctlr < 0) | ||
1507 | return IRQ_NONE; | ||
1508 | |||
1509 | dev_vdbg(ecc->dev, "dma_ccerr_handler\n"); | ||
1510 | |||
1511 | if (!edma_error_pending(ecc)) | ||
1512 | return IRQ_NONE; | ||
1513 | |||
1514 | while (1) { | ||
1515 | /* Event missed register(s) */ | ||
1516 | for (j = 0; j < 2; j++) { | ||
1517 | unsigned long emr; | ||
1518 | |||
1519 | val = edma_read_array(ecc, EDMA_EMR, j); | ||
1520 | if (!val) | ||
1521 | continue; | ||
1522 | |||
1523 | dev_dbg(ecc->dev, "EMR%d 0x%08x\n", j, val); | ||
1524 | emr = val; | ||
1525 | for (i = find_next_bit(&emr, 32, 0); i < 32; | ||
1526 | i = find_next_bit(&emr, 32, i + 1)) { | ||
1527 | int k = (j << 5) + i; | ||
1528 | |||
1529 | /* Clear the corresponding EMR bits */ | ||
1530 | edma_write_array(ecc, EDMA_EMCR, j, BIT(i)); | ||
1531 | /* Clear any SER */ | ||
1532 | edma_shadow0_write_array(ecc, SH_SECR, j, | ||
1533 | BIT(i)); | ||
1534 | edma_error_handler(&ecc->slave_chans[k]); | ||
1535 | } | ||
790 | } | 1536 | } |
791 | 1537 | ||
792 | spin_unlock(&echan->vchan.lock); | 1538 | val = edma_read(ecc, EDMA_QEMR); |
1539 | if (val) { | ||
1540 | dev_dbg(ecc->dev, "QEMR 0x%02x\n", val); | ||
1541 | /* Not reported, just clear the interrupt reason. */ | ||
1542 | edma_write(ecc, EDMA_QEMCR, val); | ||
1543 | edma_shadow0_write(ecc, SH_QSECR, val); | ||
1544 | } | ||
1545 | |||
1546 | val = edma_read(ecc, EDMA_CCERR); | ||
1547 | if (val) { | ||
1548 | dev_warn(ecc->dev, "CCERR 0x%08x\n", val); | ||
1549 | /* Not reported, just clear the interrupt reason. */ | ||
1550 | edma_write(ecc, EDMA_CCERRCLR, val); | ||
1551 | } | ||
793 | 1552 | ||
794 | break; | 1553 | if (!edma_error_pending(ecc)) |
795 | default: | 1554 | break; |
796 | break; | 1555 | cnt++; |
1556 | if (cnt > 10) | ||
1557 | break; | ||
797 | } | 1558 | } |
1559 | edma_write(ecc, EDMA_EEVAL, 1); | ||
1560 | return IRQ_HANDLED; | ||
1561 | } | ||
1562 | |||
1563 | static void edma_tc_set_pm_state(struct edma_tc *tc, bool enable) | ||
1564 | { | ||
1565 | struct platform_device *tc_pdev; | ||
1566 | int ret; | ||
1567 | |||
1568 | if (!tc) | ||
1569 | return; | ||
1570 | |||
1571 | tc_pdev = of_find_device_by_node(tc->node); | ||
1572 | if (!tc_pdev) { | ||
1573 | pr_err("%s: TPTC device is not found\n", __func__); | ||
1574 | return; | ||
1575 | } | ||
1576 | if (!pm_runtime_enabled(&tc_pdev->dev)) | ||
1577 | pm_runtime_enable(&tc_pdev->dev); | ||
1578 | |||
1579 | if (enable) | ||
1580 | ret = pm_runtime_get_sync(&tc_pdev->dev); | ||
1581 | else | ||
1582 | ret = pm_runtime_put_sync(&tc_pdev->dev); | ||
1583 | |||
1584 | if (ret < 0) | ||
1585 | pr_err("%s: pm_runtime_%s_sync() failed for %s\n", __func__, | ||
1586 | enable ? "get" : "put", dev_name(&tc_pdev->dev)); | ||
798 | } | 1587 | } |
799 | 1588 | ||
800 | /* Alloc channel resources */ | 1589 | /* Alloc channel resources */ |
801 | static int edma_alloc_chan_resources(struct dma_chan *chan) | 1590 | static int edma_alloc_chan_resources(struct dma_chan *chan) |
802 | { | 1591 | { |
803 | struct edma_chan *echan = to_edma_chan(chan); | 1592 | struct edma_chan *echan = to_edma_chan(chan); |
804 | struct device *dev = chan->device->dev; | 1593 | struct edma_cc *ecc = echan->ecc; |
1594 | struct device *dev = ecc->dev; | ||
1595 | enum dma_event_q eventq_no = EVENTQ_DEFAULT; | ||
805 | int ret; | 1596 | int ret; |
806 | int a_ch_num; | ||
807 | LIST_HEAD(descs); | ||
808 | 1597 | ||
809 | a_ch_num = edma_alloc_channel(echan->ch_num, edma_callback, | 1598 | if (echan->tc) { |
810 | echan, EVENTQ_DEFAULT); | 1599 | eventq_no = echan->tc->id; |
811 | 1600 | } else if (ecc->tc_list) { | |
812 | if (a_ch_num < 0) { | 1601 | /* memcpy channel */ |
813 | ret = -ENODEV; | 1602 | echan->tc = &ecc->tc_list[ecc->info->default_queue]; |
814 | goto err_no_chan; | 1603 | eventq_no = echan->tc->id; |
815 | } | 1604 | } |
816 | 1605 | ||
817 | if (a_ch_num != echan->ch_num) { | 1606 | ret = edma_alloc_channel(echan, eventq_no); |
818 | dev_err(dev, "failed to allocate requested channel %u:%u\n", | 1607 | if (ret) |
819 | EDMA_CTLR(echan->ch_num), | 1608 | return ret; |
1609 | |||
1610 | echan->slot[0] = edma_alloc_slot(ecc, echan->ch_num); | ||
1611 | if (echan->slot[0] < 0) { | ||
1612 | dev_err(dev, "Entry slot allocation failed for channel %u\n", | ||
820 | EDMA_CHAN_SLOT(echan->ch_num)); | 1613 | EDMA_CHAN_SLOT(echan->ch_num)); |
821 | ret = -ENODEV; | 1614 | goto err_slot; |
822 | goto err_wrong_chan; | ||
823 | } | 1615 | } |
824 | 1616 | ||
1617 | /* Set up channel -> slot mapping for the entry slot */ | ||
1618 | edma_set_chmap(echan, echan->slot[0]); | ||
825 | echan->alloced = true; | 1619 | echan->alloced = true; |
826 | echan->slot[0] = echan->ch_num; | ||
827 | 1620 | ||
828 | dev_dbg(dev, "allocated channel %d for %u:%u\n", echan->ch_num, | 1621 | dev_dbg(dev, "Got eDMA channel %d for virt channel %d (%s trigger)\n", |
829 | EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num)); | 1622 | EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id, |
1623 | echan->hw_triggered ? "HW" : "SW"); | ||
1624 | |||
1625 | edma_tc_set_pm_state(echan->tc, true); | ||
830 | 1626 | ||
831 | return 0; | 1627 | return 0; |
832 | 1628 | ||
833 | err_wrong_chan: | 1629 | err_slot: |
834 | edma_free_channel(a_ch_num); | 1630 | edma_free_channel(echan); |
835 | err_no_chan: | ||
836 | return ret; | 1631 | return ret; |
837 | } | 1632 | } |
838 | 1633 | ||
@@ -840,29 +1635,37 @@ err_no_chan: | |||
840 | static void edma_free_chan_resources(struct dma_chan *chan) | 1635 | static void edma_free_chan_resources(struct dma_chan *chan) |
841 | { | 1636 | { |
842 | struct edma_chan *echan = to_edma_chan(chan); | 1637 | struct edma_chan *echan = to_edma_chan(chan); |
843 | struct device *dev = chan->device->dev; | 1638 | struct device *dev = echan->ecc->dev; |
844 | int i; | 1639 | int i; |
845 | 1640 | ||
846 | /* Terminate transfers */ | 1641 | /* Terminate transfers */ |
847 | edma_stop(echan->ch_num); | 1642 | edma_stop(echan); |
848 | 1643 | ||
849 | vchan_free_chan_resources(&echan->vchan); | 1644 | vchan_free_chan_resources(&echan->vchan); |
850 | 1645 | ||
851 | /* Free EDMA PaRAM slots */ | 1646 | /* Free EDMA PaRAM slots */ |
852 | for (i = 1; i < EDMA_MAX_SLOTS; i++) { | 1647 | for (i = 0; i < EDMA_MAX_SLOTS; i++) { |
853 | if (echan->slot[i] >= 0) { | 1648 | if (echan->slot[i] >= 0) { |
854 | edma_free_slot(echan->slot[i]); | 1649 | edma_free_slot(echan->ecc, echan->slot[i]); |
855 | echan->slot[i] = -1; | 1650 | echan->slot[i] = -1; |
856 | } | 1651 | } |
857 | } | 1652 | } |
858 | 1653 | ||
1654 | /* Set entry slot to the dummy slot */ | ||
1655 | edma_set_chmap(echan, echan->ecc->dummy_slot); | ||
1656 | |||
859 | /* Free EDMA channel */ | 1657 | /* Free EDMA channel */ |
860 | if (echan->alloced) { | 1658 | if (echan->alloced) { |
861 | edma_free_channel(echan->ch_num); | 1659 | edma_free_channel(echan); |
862 | echan->alloced = false; | 1660 | echan->alloced = false; |
863 | } | 1661 | } |
864 | 1662 | ||
865 | dev_dbg(dev, "freeing channel for %u\n", echan->ch_num); | 1663 | edma_tc_set_pm_state(echan->tc, false); |
1664 | echan->tc = NULL; | ||
1665 | echan->hw_triggered = false; | ||
1666 | |||
1667 | dev_dbg(dev, "Free eDMA channel %d for virt channel %d\n", | ||
1668 | EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id); | ||
866 | } | 1669 | } |
867 | 1670 | ||
868 | /* Send pending descriptor to hardware */ | 1671 | /* Send pending descriptor to hardware */ |
@@ -888,7 +1691,7 @@ static u32 edma_residue(struct edma_desc *edesc) | |||
888 | * We always read the dst/src position from the first RamPar | 1691 | * We always read the dst/src position from the first RamPar |
889 | * pset. That's the one which is active now. | 1692 | * pset. That's the one which is active now. |
890 | */ | 1693 | */ |
891 | pos = edma_get_position(edesc->echan->slot[0], dst); | 1694 | pos = edma_get_position(edesc->echan->ecc, edesc->echan->slot[0], dst); |
892 | 1695 | ||
893 | /* | 1696 | /* |
894 | * Cyclic is simple. Just subtract pset[0].addr from pos. | 1697 | * Cyclic is simple. Just subtract pset[0].addr from pos. |
@@ -949,19 +1752,101 @@ static enum dma_status edma_tx_status(struct dma_chan *chan, | |||
949 | return ret; | 1752 | return ret; |
950 | } | 1753 | } |
951 | 1754 | ||
952 | static void __init edma_chan_init(struct edma_cc *ecc, | 1755 | static bool edma_is_memcpy_channel(int ch_num, u16 *memcpy_channels) |
953 | struct dma_device *dma, | ||
954 | struct edma_chan *echans) | ||
955 | { | 1756 | { |
1757 | s16 *memcpy_ch = memcpy_channels; | ||
1758 | |||
1759 | if (!memcpy_channels) | ||
1760 | return false; | ||
1761 | while (*memcpy_ch != -1) { | ||
1762 | if (*memcpy_ch == ch_num) | ||
1763 | return true; | ||
1764 | memcpy_ch++; | ||
1765 | } | ||
1766 | return false; | ||
1767 | } | ||
1768 | |||
1769 | #define EDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ | ||
1770 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ | ||
1771 | BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \ | ||
1772 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) | ||
1773 | |||
1774 | static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode) | ||
1775 | { | ||
1776 | struct dma_device *s_ddev = &ecc->dma_slave; | ||
1777 | struct dma_device *m_ddev = NULL; | ||
1778 | s16 *memcpy_channels = ecc->info->memcpy_channels; | ||
956 | int i, j; | 1779 | int i, j; |
957 | 1780 | ||
958 | for (i = 0; i < EDMA_CHANS; i++) { | 1781 | dma_cap_zero(s_ddev->cap_mask); |
959 | struct edma_chan *echan = &echans[i]; | 1782 | dma_cap_set(DMA_SLAVE, s_ddev->cap_mask); |
960 | echan->ch_num = EDMA_CTLR_CHAN(ecc->ctlr, i); | 1783 | dma_cap_set(DMA_CYCLIC, s_ddev->cap_mask); |
1784 | if (ecc->legacy_mode && !memcpy_channels) { | ||
1785 | dev_warn(ecc->dev, | ||
1786 | "Legacy memcpy is enabled, things might not work\n"); | ||
1787 | |||
1788 | dma_cap_set(DMA_MEMCPY, s_ddev->cap_mask); | ||
1789 | s_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy; | ||
1790 | s_ddev->directions = BIT(DMA_MEM_TO_MEM); | ||
1791 | } | ||
1792 | |||
1793 | s_ddev->device_prep_slave_sg = edma_prep_slave_sg; | ||
1794 | s_ddev->device_prep_dma_cyclic = edma_prep_dma_cyclic; | ||
1795 | s_ddev->device_alloc_chan_resources = edma_alloc_chan_resources; | ||
1796 | s_ddev->device_free_chan_resources = edma_free_chan_resources; | ||
1797 | s_ddev->device_issue_pending = edma_issue_pending; | ||
1798 | s_ddev->device_tx_status = edma_tx_status; | ||
1799 | s_ddev->device_config = edma_slave_config; | ||
1800 | s_ddev->device_pause = edma_dma_pause; | ||
1801 | s_ddev->device_resume = edma_dma_resume; | ||
1802 | s_ddev->device_terminate_all = edma_terminate_all; | ||
1803 | |||
1804 | s_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS; | ||
1805 | s_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS; | ||
1806 | s_ddev->directions |= (BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV)); | ||
1807 | s_ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | ||
1808 | |||
1809 | s_ddev->dev = ecc->dev; | ||
1810 | INIT_LIST_HEAD(&s_ddev->channels); | ||
1811 | |||
1812 | if (memcpy_channels) { | ||
1813 | m_ddev = devm_kzalloc(ecc->dev, sizeof(*m_ddev), GFP_KERNEL); | ||
1814 | ecc->dma_memcpy = m_ddev; | ||
1815 | |||
1816 | dma_cap_zero(m_ddev->cap_mask); | ||
1817 | dma_cap_set(DMA_MEMCPY, m_ddev->cap_mask); | ||
1818 | |||
1819 | m_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy; | ||
1820 | m_ddev->device_alloc_chan_resources = edma_alloc_chan_resources; | ||
1821 | m_ddev->device_free_chan_resources = edma_free_chan_resources; | ||
1822 | m_ddev->device_issue_pending = edma_issue_pending; | ||
1823 | m_ddev->device_tx_status = edma_tx_status; | ||
1824 | m_ddev->device_config = edma_slave_config; | ||
1825 | m_ddev->device_pause = edma_dma_pause; | ||
1826 | m_ddev->device_resume = edma_dma_resume; | ||
1827 | m_ddev->device_terminate_all = edma_terminate_all; | ||
1828 | |||
1829 | m_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS; | ||
1830 | m_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS; | ||
1831 | m_ddev->directions = BIT(DMA_MEM_TO_MEM); | ||
1832 | m_ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | ||
1833 | |||
1834 | m_ddev->dev = ecc->dev; | ||
1835 | INIT_LIST_HEAD(&m_ddev->channels); | ||
1836 | } else if (!ecc->legacy_mode) { | ||
1837 | dev_info(ecc->dev, "memcpy is disabled\n"); | ||
1838 | } | ||
1839 | |||
1840 | for (i = 0; i < ecc->num_channels; i++) { | ||
1841 | struct edma_chan *echan = &ecc->slave_chans[i]; | ||
1842 | echan->ch_num = EDMA_CTLR_CHAN(ecc->id, i); | ||
961 | echan->ecc = ecc; | 1843 | echan->ecc = ecc; |
962 | echan->vchan.desc_free = edma_desc_free; | 1844 | echan->vchan.desc_free = edma_desc_free; |
963 | 1845 | ||
964 | vchan_init(&echan->vchan, dma); | 1846 | if (m_ddev && edma_is_memcpy_channel(i, memcpy_channels)) |
1847 | vchan_init(&echan->vchan, m_ddev); | ||
1848 | else | ||
1849 | vchan_init(&echan->vchan, s_ddev); | ||
965 | 1850 | ||
966 | INIT_LIST_HEAD(&echan->node); | 1851 | INIT_LIST_HEAD(&echan->node); |
967 | for (j = 0; j < EDMA_MAX_SLOTS; j++) | 1852 | for (j = 0; j < EDMA_MAX_SLOTS; j++) |
@@ -969,85 +1854,474 @@ static void __init edma_chan_init(struct edma_cc *ecc, | |||
969 | } | 1854 | } |
970 | } | 1855 | } |
971 | 1856 | ||
972 | #define EDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ | 1857 | static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata, |
973 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ | 1858 | struct edma_cc *ecc) |
974 | BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \ | ||
975 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) | ||
976 | |||
977 | static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma, | ||
978 | struct device *dev) | ||
979 | { | 1859 | { |
980 | dma->device_prep_slave_sg = edma_prep_slave_sg; | 1860 | int i; |
981 | dma->device_prep_dma_cyclic = edma_prep_dma_cyclic; | 1861 | u32 value, cccfg; |
982 | dma->device_prep_dma_memcpy = edma_prep_dma_memcpy; | 1862 | s8 (*queue_priority_map)[2]; |
983 | dma->device_alloc_chan_resources = edma_alloc_chan_resources; | 1863 | |
984 | dma->device_free_chan_resources = edma_free_chan_resources; | 1864 | /* Decode the eDMA3 configuration from CCCFG register */ |
985 | dma->device_issue_pending = edma_issue_pending; | 1865 | cccfg = edma_read(ecc, EDMA_CCCFG); |
986 | dma->device_tx_status = edma_tx_status; | 1866 | |
987 | dma->device_config = edma_slave_config; | 1867 | value = GET_NUM_REGN(cccfg); |
988 | dma->device_pause = edma_dma_pause; | 1868 | ecc->num_region = BIT(value); |
989 | dma->device_resume = edma_dma_resume; | 1869 | |
990 | dma->device_terminate_all = edma_terminate_all; | 1870 | value = GET_NUM_DMACH(cccfg); |
1871 | ecc->num_channels = BIT(value + 1); | ||
991 | 1872 | ||
992 | dma->src_addr_widths = EDMA_DMA_BUSWIDTHS; | 1873 | value = GET_NUM_QDMACH(cccfg); |
993 | dma->dst_addr_widths = EDMA_DMA_BUSWIDTHS; | 1874 | ecc->num_qchannels = value * 2; |
994 | dma->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | ||
995 | dma->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | ||
996 | 1875 | ||
997 | dma->dev = dev; | 1876 | value = GET_NUM_PAENTRY(cccfg); |
1877 | ecc->num_slots = BIT(value + 4); | ||
1878 | |||
1879 | value = GET_NUM_EVQUE(cccfg); | ||
1880 | ecc->num_tc = value + 1; | ||
1881 | |||
1882 | ecc->chmap_exist = (cccfg & CHMAP_EXIST) ? true : false; | ||
1883 | |||
1884 | dev_dbg(dev, "eDMA3 CC HW configuration (cccfg: 0x%08x):\n", cccfg); | ||
1885 | dev_dbg(dev, "num_region: %u\n", ecc->num_region); | ||
1886 | dev_dbg(dev, "num_channels: %u\n", ecc->num_channels); | ||
1887 | dev_dbg(dev, "num_qchannels: %u\n", ecc->num_qchannels); | ||
1888 | dev_dbg(dev, "num_slots: %u\n", ecc->num_slots); | ||
1889 | dev_dbg(dev, "num_tc: %u\n", ecc->num_tc); | ||
1890 | dev_dbg(dev, "chmap_exist: %s\n", ecc->chmap_exist ? "yes" : "no"); | ||
1891 | |||
1892 | /* Nothing need to be done if queue priority is provided */ | ||
1893 | if (pdata->queue_priority_mapping) | ||
1894 | return 0; | ||
998 | 1895 | ||
999 | /* | 1896 | /* |
1000 | * code using dma memcpy must make sure alignment of | 1897 | * Configure TC/queue priority as follows: |
1001 | * length is at dma->copy_align boundary. | 1898 | * Q0 - priority 0 |
1899 | * Q1 - priority 1 | ||
1900 | * Q2 - priority 2 | ||
1901 | * ... | ||
1902 | * The meaning of priority numbers: 0 highest priority, 7 lowest | ||
1903 | * priority. So Q0 is the highest priority queue and the last queue has | ||
1904 | * the lowest priority. | ||
1002 | */ | 1905 | */ |
1003 | dma->copy_align = DMAENGINE_ALIGN_4_BYTES; | 1906 | queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1, sizeof(s8), |
1907 | GFP_KERNEL); | ||
1908 | if (!queue_priority_map) | ||
1909 | return -ENOMEM; | ||
1910 | |||
1911 | for (i = 0; i < ecc->num_tc; i++) { | ||
1912 | queue_priority_map[i][0] = i; | ||
1913 | queue_priority_map[i][1] = i; | ||
1914 | } | ||
1915 | queue_priority_map[i][0] = -1; | ||
1916 | queue_priority_map[i][1] = -1; | ||
1917 | |||
1918 | pdata->queue_priority_mapping = queue_priority_map; | ||
1919 | /* Default queue has the lowest priority */ | ||
1920 | pdata->default_queue = i - 1; | ||
1921 | |||
1922 | return 0; | ||
1923 | } | ||
1924 | |||
1925 | #if IS_ENABLED(CONFIG_OF) | ||
1926 | static int edma_xbar_event_map(struct device *dev, struct edma_soc_info *pdata, | ||
1927 | size_t sz) | ||
1928 | { | ||
1929 | const char pname[] = "ti,edma-xbar-event-map"; | ||
1930 | struct resource res; | ||
1931 | void __iomem *xbar; | ||
1932 | s16 (*xbar_chans)[2]; | ||
1933 | size_t nelm = sz / sizeof(s16); | ||
1934 | u32 shift, offset, mux; | ||
1935 | int ret, i; | ||
1936 | |||
1937 | xbar_chans = devm_kcalloc(dev, nelm + 2, sizeof(s16), GFP_KERNEL); | ||
1938 | if (!xbar_chans) | ||
1939 | return -ENOMEM; | ||
1940 | |||
1941 | ret = of_address_to_resource(dev->of_node, 1, &res); | ||
1942 | if (ret) | ||
1943 | return -ENOMEM; | ||
1944 | |||
1945 | xbar = devm_ioremap(dev, res.start, resource_size(&res)); | ||
1946 | if (!xbar) | ||
1947 | return -ENOMEM; | ||
1948 | |||
1949 | ret = of_property_read_u16_array(dev->of_node, pname, (u16 *)xbar_chans, | ||
1950 | nelm); | ||
1951 | if (ret) | ||
1952 | return -EIO; | ||
1953 | |||
1954 | /* Invalidate last entry for the other user of this mess */ | ||
1955 | nelm >>= 1; | ||
1956 | xbar_chans[nelm][0] = -1; | ||
1957 | xbar_chans[nelm][1] = -1; | ||
1958 | |||
1959 | for (i = 0; i < nelm; i++) { | ||
1960 | shift = (xbar_chans[i][1] & 0x03) << 3; | ||
1961 | offset = xbar_chans[i][1] & 0xfffffffc; | ||
1962 | mux = readl(xbar + offset); | ||
1963 | mux &= ~(0xff << shift); | ||
1964 | mux |= xbar_chans[i][0] << shift; | ||
1965 | writel(mux, (xbar + offset)); | ||
1966 | } | ||
1004 | 1967 | ||
1005 | INIT_LIST_HEAD(&dma->channels); | 1968 | pdata->xbar_chans = (const s16 (*)[2]) xbar_chans; |
1969 | return 0; | ||
1970 | } | ||
1971 | |||
1972 | static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev, | ||
1973 | bool legacy_mode) | ||
1974 | { | ||
1975 | struct edma_soc_info *info; | ||
1976 | struct property *prop; | ||
1977 | size_t sz; | ||
1978 | int ret; | ||
1979 | |||
1980 | info = devm_kzalloc(dev, sizeof(struct edma_soc_info), GFP_KERNEL); | ||
1981 | if (!info) | ||
1982 | return ERR_PTR(-ENOMEM); | ||
1983 | |||
1984 | if (legacy_mode) { | ||
1985 | prop = of_find_property(dev->of_node, "ti,edma-xbar-event-map", | ||
1986 | &sz); | ||
1987 | if (prop) { | ||
1988 | ret = edma_xbar_event_map(dev, info, sz); | ||
1989 | if (ret) | ||
1990 | return ERR_PTR(ret); | ||
1991 | } | ||
1992 | return info; | ||
1993 | } | ||
1994 | |||
1995 | /* Get the list of channels allocated to be used for memcpy */ | ||
1996 | prop = of_find_property(dev->of_node, "ti,edma-memcpy-channels", &sz); | ||
1997 | if (prop) { | ||
1998 | const char pname[] = "ti,edma-memcpy-channels"; | ||
1999 | size_t nelm = sz / sizeof(s16); | ||
2000 | s16 *memcpy_ch; | ||
2001 | |||
2002 | memcpy_ch = devm_kcalloc(dev, nelm + 1, sizeof(s16), | ||
2003 | GFP_KERNEL); | ||
2004 | if (!memcpy_ch) | ||
2005 | return ERR_PTR(-ENOMEM); | ||
2006 | |||
2007 | ret = of_property_read_u16_array(dev->of_node, pname, | ||
2008 | (u16 *)memcpy_ch, nelm); | ||
2009 | if (ret) | ||
2010 | return ERR_PTR(ret); | ||
2011 | |||
2012 | memcpy_ch[nelm] = -1; | ||
2013 | info->memcpy_channels = memcpy_ch; | ||
2014 | } | ||
2015 | |||
2016 | prop = of_find_property(dev->of_node, "ti,edma-reserved-slot-ranges", | ||
2017 | &sz); | ||
2018 | if (prop) { | ||
2019 | const char pname[] = "ti,edma-reserved-slot-ranges"; | ||
2020 | s16 (*rsv_slots)[2]; | ||
2021 | size_t nelm = sz / sizeof(*rsv_slots); | ||
2022 | struct edma_rsv_info *rsv_info; | ||
2023 | |||
2024 | if (!nelm) | ||
2025 | return info; | ||
2026 | |||
2027 | rsv_info = devm_kzalloc(dev, sizeof(*rsv_info), GFP_KERNEL); | ||
2028 | if (!rsv_info) | ||
2029 | return ERR_PTR(-ENOMEM); | ||
2030 | |||
2031 | rsv_slots = devm_kcalloc(dev, nelm + 1, sizeof(*rsv_slots), | ||
2032 | GFP_KERNEL); | ||
2033 | if (!rsv_slots) | ||
2034 | return ERR_PTR(-ENOMEM); | ||
2035 | |||
2036 | ret = of_property_read_u16_array(dev->of_node, pname, | ||
2037 | (u16 *)rsv_slots, nelm * 2); | ||
2038 | if (ret) | ||
2039 | return ERR_PTR(ret); | ||
2040 | |||
2041 | rsv_slots[nelm][0] = -1; | ||
2042 | rsv_slots[nelm][1] = -1; | ||
2043 | info->rsv = rsv_info; | ||
2044 | info->rsv->rsv_slots = (const s16 (*)[2])rsv_slots; | ||
2045 | } | ||
2046 | |||
2047 | return info; | ||
2048 | } | ||
2049 | |||
2050 | static struct dma_chan *of_edma_xlate(struct of_phandle_args *dma_spec, | ||
2051 | struct of_dma *ofdma) | ||
2052 | { | ||
2053 | struct edma_cc *ecc = ofdma->of_dma_data; | ||
2054 | struct dma_chan *chan = NULL; | ||
2055 | struct edma_chan *echan; | ||
2056 | int i; | ||
2057 | |||
2058 | if (!ecc || dma_spec->args_count < 1) | ||
2059 | return NULL; | ||
2060 | |||
2061 | for (i = 0; i < ecc->num_channels; i++) { | ||
2062 | echan = &ecc->slave_chans[i]; | ||
2063 | if (echan->ch_num == dma_spec->args[0]) { | ||
2064 | chan = &echan->vchan.chan; | ||
2065 | break; | ||
2066 | } | ||
2067 | } | ||
2068 | |||
2069 | if (!chan) | ||
2070 | return NULL; | ||
2071 | |||
2072 | if (echan->ecc->legacy_mode && dma_spec->args_count == 1) | ||
2073 | goto out; | ||
2074 | |||
2075 | if (!echan->ecc->legacy_mode && dma_spec->args_count == 2 && | ||
2076 | dma_spec->args[1] < echan->ecc->num_tc) { | ||
2077 | echan->tc = &echan->ecc->tc_list[dma_spec->args[1]]; | ||
2078 | goto out; | ||
2079 | } | ||
2080 | |||
2081 | return NULL; | ||
2082 | out: | ||
2083 | /* The channel is going to be used as HW synchronized */ | ||
2084 | echan->hw_triggered = true; | ||
2085 | return dma_get_slave_channel(chan); | ||
2086 | } | ||
2087 | #else | ||
2088 | static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev, | ||
2089 | bool legacy_mode) | ||
2090 | { | ||
2091 | return ERR_PTR(-EINVAL); | ||
2092 | } | ||
2093 | |||
2094 | static struct dma_chan *of_edma_xlate(struct of_phandle_args *dma_spec, | ||
2095 | struct of_dma *ofdma) | ||
2096 | { | ||
2097 | return NULL; | ||
1006 | } | 2098 | } |
2099 | #endif | ||
1007 | 2100 | ||
1008 | static int edma_probe(struct platform_device *pdev) | 2101 | static int edma_probe(struct platform_device *pdev) |
1009 | { | 2102 | { |
1010 | struct edma_cc *ecc; | 2103 | struct edma_soc_info *info = pdev->dev.platform_data; |
2104 | s8 (*queue_priority_mapping)[2]; | ||
2105 | int i, off, ln; | ||
2106 | const s16 (*rsv_slots)[2]; | ||
2107 | const s16 (*xbar_chans)[2]; | ||
2108 | int irq; | ||
2109 | char *irq_name; | ||
2110 | struct resource *mem; | ||
2111 | struct device_node *node = pdev->dev.of_node; | ||
2112 | struct device *dev = &pdev->dev; | ||
2113 | struct edma_cc *ecc; | ||
2114 | bool legacy_mode = true; | ||
1011 | int ret; | 2115 | int ret; |
1012 | 2116 | ||
1013 | ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); | 2117 | if (node) { |
2118 | const struct of_device_id *match; | ||
2119 | |||
2120 | match = of_match_node(edma_of_ids, node); | ||
2121 | if (match && (u32)match->data == EDMA_BINDING_TPCC) | ||
2122 | legacy_mode = false; | ||
2123 | |||
2124 | info = edma_setup_info_from_dt(dev, legacy_mode); | ||
2125 | if (IS_ERR(info)) { | ||
2126 | dev_err(dev, "failed to get DT data\n"); | ||
2127 | return PTR_ERR(info); | ||
2128 | } | ||
2129 | } | ||
2130 | |||
2131 | if (!info) | ||
2132 | return -ENODEV; | ||
2133 | |||
2134 | pm_runtime_enable(dev); | ||
2135 | ret = pm_runtime_get_sync(dev); | ||
2136 | if (ret < 0) { | ||
2137 | dev_err(dev, "pm_runtime_get_sync() failed\n"); | ||
2138 | return ret; | ||
2139 | } | ||
2140 | |||
2141 | ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); | ||
1014 | if (ret) | 2142 | if (ret) |
1015 | return ret; | 2143 | return ret; |
1016 | 2144 | ||
1017 | ecc = devm_kzalloc(&pdev->dev, sizeof(*ecc), GFP_KERNEL); | 2145 | ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL); |
1018 | if (!ecc) { | 2146 | if (!ecc) { |
1019 | dev_err(&pdev->dev, "Can't allocate controller\n"); | 2147 | dev_err(dev, "Can't allocate controller\n"); |
1020 | return -ENOMEM; | 2148 | return -ENOMEM; |
1021 | } | 2149 | } |
1022 | 2150 | ||
1023 | ecc->ctlr = pdev->id; | 2151 | ecc->dev = dev; |
1024 | ecc->dummy_slot = edma_alloc_slot(ecc->ctlr, EDMA_SLOT_ANY); | 2152 | ecc->id = pdev->id; |
2153 | ecc->legacy_mode = legacy_mode; | ||
2154 | /* When booting with DT the pdev->id is -1 */ | ||
2155 | if (ecc->id < 0) | ||
2156 | ecc->id = 0; | ||
2157 | |||
2158 | mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "edma3_cc"); | ||
2159 | if (!mem) { | ||
2160 | dev_dbg(dev, "mem resource not found, using index 0\n"); | ||
2161 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
2162 | if (!mem) { | ||
2163 | dev_err(dev, "no mem resource?\n"); | ||
2164 | return -ENODEV; | ||
2165 | } | ||
2166 | } | ||
2167 | ecc->base = devm_ioremap_resource(dev, mem); | ||
2168 | if (IS_ERR(ecc->base)) | ||
2169 | return PTR_ERR(ecc->base); | ||
2170 | |||
2171 | platform_set_drvdata(pdev, ecc); | ||
2172 | |||
2173 | /* Get eDMA3 configuration from IP */ | ||
2174 | ret = edma_setup_from_hw(dev, info, ecc); | ||
2175 | if (ret) | ||
2176 | return ret; | ||
2177 | |||
2178 | /* Allocate memory based on the information we got from the IP */ | ||
2179 | ecc->slave_chans = devm_kcalloc(dev, ecc->num_channels, | ||
2180 | sizeof(*ecc->slave_chans), GFP_KERNEL); | ||
2181 | if (!ecc->slave_chans) | ||
2182 | return -ENOMEM; | ||
2183 | |||
2184 | ecc->slot_inuse = devm_kcalloc(dev, BITS_TO_LONGS(ecc->num_slots), | ||
2185 | sizeof(unsigned long), GFP_KERNEL); | ||
2186 | if (!ecc->slot_inuse) | ||
2187 | return -ENOMEM; | ||
2188 | |||
2189 | ecc->default_queue = info->default_queue; | ||
2190 | |||
2191 | for (i = 0; i < ecc->num_slots; i++) | ||
2192 | edma_write_slot(ecc, i, &dummy_paramset); | ||
2193 | |||
2194 | if (info->rsv) { | ||
2195 | /* Set the reserved slots in inuse list */ | ||
2196 | rsv_slots = info->rsv->rsv_slots; | ||
2197 | if (rsv_slots) { | ||
2198 | for (i = 0; rsv_slots[i][0] != -1; i++) { | ||
2199 | off = rsv_slots[i][0]; | ||
2200 | ln = rsv_slots[i][1]; | ||
2201 | set_bits(off, ln, ecc->slot_inuse); | ||
2202 | } | ||
2203 | } | ||
2204 | } | ||
2205 | |||
2206 | /* Clear the xbar mapped channels in unused list */ | ||
2207 | xbar_chans = info->xbar_chans; | ||
2208 | if (xbar_chans) { | ||
2209 | for (i = 0; xbar_chans[i][1] != -1; i++) { | ||
2210 | off = xbar_chans[i][1]; | ||
2211 | } | ||
2212 | } | ||
2213 | |||
2214 | irq = platform_get_irq_byname(pdev, "edma3_ccint"); | ||
2215 | if (irq < 0 && node) | ||
2216 | irq = irq_of_parse_and_map(node, 0); | ||
2217 | |||
2218 | if (irq >= 0) { | ||
2219 | irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccint", | ||
2220 | dev_name(dev)); | ||
2221 | ret = devm_request_irq(dev, irq, dma_irq_handler, 0, irq_name, | ||
2222 | ecc); | ||
2223 | if (ret) { | ||
2224 | dev_err(dev, "CCINT (%d) failed --> %d\n", irq, ret); | ||
2225 | return ret; | ||
2226 | } | ||
2227 | } | ||
2228 | |||
2229 | irq = platform_get_irq_byname(pdev, "edma3_ccerrint"); | ||
2230 | if (irq < 0 && node) | ||
2231 | irq = irq_of_parse_and_map(node, 2); | ||
2232 | |||
2233 | if (irq >= 0) { | ||
2234 | irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccerrint", | ||
2235 | dev_name(dev)); | ||
2236 | ret = devm_request_irq(dev, irq, dma_ccerr_handler, 0, irq_name, | ||
2237 | ecc); | ||
2238 | if (ret) { | ||
2239 | dev_err(dev, "CCERRINT (%d) failed --> %d\n", irq, ret); | ||
2240 | return ret; | ||
2241 | } | ||
2242 | } | ||
2243 | |||
2244 | ecc->dummy_slot = edma_alloc_slot(ecc, EDMA_SLOT_ANY); | ||
1025 | if (ecc->dummy_slot < 0) { | 2245 | if (ecc->dummy_slot < 0) { |
1026 | dev_err(&pdev->dev, "Can't allocate PaRAM dummy slot\n"); | 2246 | dev_err(dev, "Can't allocate PaRAM dummy slot\n"); |
1027 | return ecc->dummy_slot; | 2247 | return ecc->dummy_slot; |
1028 | } | 2248 | } |
1029 | 2249 | ||
1030 | dma_cap_zero(ecc->dma_slave.cap_mask); | 2250 | queue_priority_mapping = info->queue_priority_mapping; |
1031 | dma_cap_set(DMA_SLAVE, ecc->dma_slave.cap_mask); | 2251 | |
1032 | dma_cap_set(DMA_CYCLIC, ecc->dma_slave.cap_mask); | 2252 | if (!ecc->legacy_mode) { |
1033 | dma_cap_set(DMA_MEMCPY, ecc->dma_slave.cap_mask); | 2253 | int lowest_priority = 0; |
2254 | struct of_phandle_args tc_args; | ||
2255 | |||
2256 | ecc->tc_list = devm_kcalloc(dev, ecc->num_tc, | ||
2257 | sizeof(*ecc->tc_list), GFP_KERNEL); | ||
2258 | if (!ecc->tc_list) | ||
2259 | return -ENOMEM; | ||
2260 | |||
2261 | for (i = 0;; i++) { | ||
2262 | ret = of_parse_phandle_with_fixed_args(node, "ti,tptcs", | ||
2263 | 1, i, &tc_args); | ||
2264 | if (ret || i == ecc->num_tc) | ||
2265 | break; | ||
2266 | |||
2267 | ecc->tc_list[i].node = tc_args.np; | ||
2268 | ecc->tc_list[i].id = i; | ||
2269 | queue_priority_mapping[i][1] = tc_args.args[0]; | ||
2270 | if (queue_priority_mapping[i][1] > lowest_priority) { | ||
2271 | lowest_priority = queue_priority_mapping[i][1]; | ||
2272 | info->default_queue = i; | ||
2273 | } | ||
2274 | } | ||
2275 | } | ||
2276 | |||
2277 | /* Event queue priority mapping */ | ||
2278 | for (i = 0; queue_priority_mapping[i][0] != -1; i++) | ||
2279 | edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0], | ||
2280 | queue_priority_mapping[i][1]); | ||
2281 | |||
2282 | for (i = 0; i < ecc->num_region; i++) { | ||
2283 | edma_write_array2(ecc, EDMA_DRAE, i, 0, 0x0); | ||
2284 | edma_write_array2(ecc, EDMA_DRAE, i, 1, 0x0); | ||
2285 | edma_write_array(ecc, EDMA_QRAE, i, 0x0); | ||
2286 | } | ||
2287 | ecc->info = info; | ||
1034 | 2288 | ||
1035 | edma_dma_init(ecc, &ecc->dma_slave, &pdev->dev); | 2289 | /* Init the dma device and channels */ |
2290 | edma_dma_init(ecc, legacy_mode); | ||
1036 | 2291 | ||
1037 | edma_chan_init(ecc, &ecc->dma_slave, ecc->slave_chans); | 2292 | for (i = 0; i < ecc->num_channels; i++) { |
2293 | /* Assign all channels to the default queue */ | ||
2294 | edma_assign_channel_eventq(&ecc->slave_chans[i], | ||
2295 | info->default_queue); | ||
2296 | /* Set entry slot to the dummy slot */ | ||
2297 | edma_set_chmap(&ecc->slave_chans[i], ecc->dummy_slot); | ||
2298 | } | ||
1038 | 2299 | ||
1039 | ret = dma_async_device_register(&ecc->dma_slave); | 2300 | ret = dma_async_device_register(&ecc->dma_slave); |
1040 | if (ret) | 2301 | if (ret) { |
2302 | dev_err(dev, "slave ddev registration failed (%d)\n", ret); | ||
1041 | goto err_reg1; | 2303 | goto err_reg1; |
2304 | } | ||
1042 | 2305 | ||
1043 | platform_set_drvdata(pdev, ecc); | 2306 | if (ecc->dma_memcpy) { |
2307 | ret = dma_async_device_register(ecc->dma_memcpy); | ||
2308 | if (ret) { | ||
2309 | dev_err(dev, "memcpy ddev registration failed (%d)\n", | ||
2310 | ret); | ||
2311 | dma_async_device_unregister(&ecc->dma_slave); | ||
2312 | goto err_reg1; | ||
2313 | } | ||
2314 | } | ||
2315 | |||
2316 | if (node) | ||
2317 | of_dma_controller_register(node, of_edma_xlate, ecc); | ||
1044 | 2318 | ||
1045 | dev_info(&pdev->dev, "TI EDMA DMA engine driver\n"); | 2319 | dev_info(dev, "TI EDMA DMA engine driver\n"); |
1046 | 2320 | ||
1047 | return 0; | 2321 | return 0; |
1048 | 2322 | ||
1049 | err_reg1: | 2323 | err_reg1: |
1050 | edma_free_slot(ecc->dummy_slot); | 2324 | edma_free_slot(ecc, ecc->dummy_slot); |
1051 | return ret; | 2325 | return ret; |
1052 | } | 2326 | } |
1053 | 2327 | ||
@@ -1056,33 +2330,112 @@ static int edma_remove(struct platform_device *pdev) | |||
1056 | struct device *dev = &pdev->dev; | 2330 | struct device *dev = &pdev->dev; |
1057 | struct edma_cc *ecc = dev_get_drvdata(dev); | 2331 | struct edma_cc *ecc = dev_get_drvdata(dev); |
1058 | 2332 | ||
2333 | if (dev->of_node) | ||
2334 | of_dma_controller_free(dev->of_node); | ||
1059 | dma_async_device_unregister(&ecc->dma_slave); | 2335 | dma_async_device_unregister(&ecc->dma_slave); |
1060 | edma_free_slot(ecc->dummy_slot); | 2336 | if (ecc->dma_memcpy) |
2337 | dma_async_device_unregister(ecc->dma_memcpy); | ||
2338 | edma_free_slot(ecc, ecc->dummy_slot); | ||
1061 | 2339 | ||
1062 | return 0; | 2340 | return 0; |
1063 | } | 2341 | } |
1064 | 2342 | ||
2343 | #ifdef CONFIG_PM_SLEEP | ||
2344 | static int edma_pm_suspend(struct device *dev) | ||
2345 | { | ||
2346 | struct edma_cc *ecc = dev_get_drvdata(dev); | ||
2347 | struct edma_chan *echan = ecc->slave_chans; | ||
2348 | int i; | ||
2349 | |||
2350 | for (i = 0; i < ecc->num_channels; i++) { | ||
2351 | if (echan[i].alloced) { | ||
2352 | edma_setup_interrupt(&echan[i], false); | ||
2353 | edma_tc_set_pm_state(echan[i].tc, false); | ||
2354 | } | ||
2355 | } | ||
2356 | |||
2357 | return 0; | ||
2358 | } | ||
2359 | |||
2360 | static int edma_pm_resume(struct device *dev) | ||
2361 | { | ||
2362 | struct edma_cc *ecc = dev_get_drvdata(dev); | ||
2363 | struct edma_chan *echan = ecc->slave_chans; | ||
2364 | int i; | ||
2365 | s8 (*queue_priority_mapping)[2]; | ||
2366 | |||
2367 | queue_priority_mapping = ecc->info->queue_priority_mapping; | ||
2368 | |||
2369 | /* Event queue priority mapping */ | ||
2370 | for (i = 0; queue_priority_mapping[i][0] != -1; i++) | ||
2371 | edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0], | ||
2372 | queue_priority_mapping[i][1]); | ||
2373 | |||
2374 | for (i = 0; i < ecc->num_channels; i++) { | ||
2375 | if (echan[i].alloced) { | ||
2376 | /* ensure access through shadow region 0 */ | ||
2377 | edma_or_array2(ecc, EDMA_DRAE, 0, i >> 5, | ||
2378 | BIT(i & 0x1f)); | ||
2379 | |||
2380 | edma_setup_interrupt(&echan[i], true); | ||
2381 | |||
2382 | /* Set up channel -> slot mapping for the entry slot */ | ||
2383 | edma_set_chmap(&echan[i], echan[i].slot[0]); | ||
2384 | |||
2385 | edma_tc_set_pm_state(echan[i].tc, true); | ||
2386 | } | ||
2387 | } | ||
2388 | |||
2389 | return 0; | ||
2390 | } | ||
2391 | #endif | ||
2392 | |||
2393 | static const struct dev_pm_ops edma_pm_ops = { | ||
2394 | SET_LATE_SYSTEM_SLEEP_PM_OPS(edma_pm_suspend, edma_pm_resume) | ||
2395 | }; | ||
2396 | |||
1065 | static struct platform_driver edma_driver = { | 2397 | static struct platform_driver edma_driver = { |
1066 | .probe = edma_probe, | 2398 | .probe = edma_probe, |
1067 | .remove = edma_remove, | 2399 | .remove = edma_remove, |
1068 | .driver = { | 2400 | .driver = { |
1069 | .name = "edma-dma-engine", | 2401 | .name = "edma", |
2402 | .pm = &edma_pm_ops, | ||
2403 | .of_match_table = edma_of_ids, | ||
2404 | }, | ||
2405 | }; | ||
2406 | |||
2407 | static struct platform_driver edma_tptc_driver = { | ||
2408 | .driver = { | ||
2409 | .name = "edma3-tptc", | ||
2410 | .of_match_table = edma_tptc_of_ids, | ||
1070 | }, | 2411 | }, |
1071 | }; | 2412 | }; |
1072 | 2413 | ||
1073 | bool edma_filter_fn(struct dma_chan *chan, void *param) | 2414 | bool edma_filter_fn(struct dma_chan *chan, void *param) |
1074 | { | 2415 | { |
2416 | bool match = false; | ||
2417 | |||
1075 | if (chan->device->dev->driver == &edma_driver.driver) { | 2418 | if (chan->device->dev->driver == &edma_driver.driver) { |
1076 | struct edma_chan *echan = to_edma_chan(chan); | 2419 | struct edma_chan *echan = to_edma_chan(chan); |
1077 | unsigned ch_req = *(unsigned *)param; | 2420 | unsigned ch_req = *(unsigned *)param; |
1078 | return ch_req == echan->ch_num; | 2421 | if (ch_req == echan->ch_num) { |
2422 | /* The channel is going to be used as HW synchronized */ | ||
2423 | echan->hw_triggered = true; | ||
2424 | match = true; | ||
2425 | } | ||
1079 | } | 2426 | } |
1080 | return false; | 2427 | return match; |
1081 | } | 2428 | } |
1082 | EXPORT_SYMBOL(edma_filter_fn); | 2429 | EXPORT_SYMBOL(edma_filter_fn); |
1083 | 2430 | ||
1084 | static int edma_init(void) | 2431 | static int edma_init(void) |
1085 | { | 2432 | { |
2433 | int ret; | ||
2434 | |||
2435 | ret = platform_driver_register(&edma_tptc_driver); | ||
2436 | if (ret) | ||
2437 | return ret; | ||
2438 | |||
1086 | return platform_driver_register(&edma_driver); | 2439 | return platform_driver_register(&edma_driver); |
1087 | } | 2440 | } |
1088 | subsys_initcall(edma_init); | 2441 | subsys_initcall(edma_init); |
@@ -1090,6 +2443,7 @@ subsys_initcall(edma_init); | |||
1090 | static void __exit edma_exit(void) | 2443 | static void __exit edma_exit(void) |
1091 | { | 2444 | { |
1092 | platform_driver_unregister(&edma_driver); | 2445 | platform_driver_unregister(&edma_driver); |
2446 | platform_driver_unregister(&edma_tptc_driver); | ||
1093 | } | 2447 | } |
1094 | module_exit(edma_exit); | 2448 | module_exit(edma_exit); |
1095 | 2449 | ||
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 300f821f1890..2209f75fdf05 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c | |||
@@ -1512,6 +1512,7 @@ static const struct of_device_id fsldma_of_ids[] = { | |||
1512 | { .compatible = "fsl,elo-dma", }, | 1512 | { .compatible = "fsl,elo-dma", }, |
1513 | {} | 1513 | {} |
1514 | }; | 1514 | }; |
1515 | MODULE_DEVICE_TABLE(of, fsldma_of_ids); | ||
1515 | 1516 | ||
1516 | static struct platform_driver fsldma_of_driver = { | 1517 | static struct platform_driver fsldma_of_driver = { |
1517 | .driver = { | 1518 | .driver = { |
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c index 48d6d9e94f67..7d56b47e4fcf 100644 --- a/drivers/dma/idma64.c +++ b/drivers/dma/idma64.c | |||
@@ -65,9 +65,6 @@ static void idma64_chan_init(struct idma64 *idma64, struct idma64_chan *idma64c) | |||
65 | u32 cfghi = IDMA64C_CFGH_SRC_PER(1) | IDMA64C_CFGH_DST_PER(0); | 65 | u32 cfghi = IDMA64C_CFGH_SRC_PER(1) | IDMA64C_CFGH_DST_PER(0); |
66 | u32 cfglo = 0; | 66 | u32 cfglo = 0; |
67 | 67 | ||
68 | /* Enforce FIFO drain when channel is suspended */ | ||
69 | cfglo |= IDMA64C_CFGL_CH_DRAIN; | ||
70 | |||
71 | /* Set default burst alignment */ | 68 | /* Set default burst alignment */ |
72 | cfglo |= IDMA64C_CFGL_DST_BURST_ALIGN | IDMA64C_CFGL_SRC_BURST_ALIGN; | 69 | cfglo |= IDMA64C_CFGL_DST_BURST_ALIGN | IDMA64C_CFGL_SRC_BURST_ALIGN; |
73 | 70 | ||
@@ -257,15 +254,15 @@ static u64 idma64_hw_desc_fill(struct idma64_hw_desc *hw, | |||
257 | dar = config->dst_addr; | 254 | dar = config->dst_addr; |
258 | ctllo |= IDMA64C_CTLL_DST_FIX | IDMA64C_CTLL_SRC_INC | | 255 | ctllo |= IDMA64C_CTLL_DST_FIX | IDMA64C_CTLL_SRC_INC | |
259 | IDMA64C_CTLL_FC_M2P; | 256 | IDMA64C_CTLL_FC_M2P; |
260 | src_width = min_t(u32, 2, __fls(sar | hw->len)); | 257 | src_width = __ffs(sar | hw->len | 4); |
261 | dst_width = __fls(config->dst_addr_width); | 258 | dst_width = __ffs(config->dst_addr_width); |
262 | } else { /* DMA_DEV_TO_MEM */ | 259 | } else { /* DMA_DEV_TO_MEM */ |
263 | sar = config->src_addr; | 260 | sar = config->src_addr; |
264 | dar = hw->phys; | 261 | dar = hw->phys; |
265 | ctllo |= IDMA64C_CTLL_DST_INC | IDMA64C_CTLL_SRC_FIX | | 262 | ctllo |= IDMA64C_CTLL_DST_INC | IDMA64C_CTLL_SRC_FIX | |
266 | IDMA64C_CTLL_FC_P2M; | 263 | IDMA64C_CTLL_FC_P2M; |
267 | src_width = __fls(config->src_addr_width); | 264 | src_width = __ffs(config->src_addr_width); |
268 | dst_width = min_t(u32, 2, __fls(dar | hw->len)); | 265 | dst_width = __ffs(dar | hw->len | 4); |
269 | } | 266 | } |
270 | 267 | ||
271 | lli->sar = sar; | 268 | lli->sar = sar; |
@@ -428,12 +425,17 @@ static int idma64_slave_config(struct dma_chan *chan, | |||
428 | return 0; | 425 | return 0; |
429 | } | 426 | } |
430 | 427 | ||
431 | static void idma64_chan_deactivate(struct idma64_chan *idma64c) | 428 | static void idma64_chan_deactivate(struct idma64_chan *idma64c, bool drain) |
432 | { | 429 | { |
433 | unsigned short count = 100; | 430 | unsigned short count = 100; |
434 | u32 cfglo; | 431 | u32 cfglo; |
435 | 432 | ||
436 | cfglo = channel_readl(idma64c, CFG_LO); | 433 | cfglo = channel_readl(idma64c, CFG_LO); |
434 | if (drain) | ||
435 | cfglo |= IDMA64C_CFGL_CH_DRAIN; | ||
436 | else | ||
437 | cfglo &= ~IDMA64C_CFGL_CH_DRAIN; | ||
438 | |||
437 | channel_writel(idma64c, CFG_LO, cfglo | IDMA64C_CFGL_CH_SUSP); | 439 | channel_writel(idma64c, CFG_LO, cfglo | IDMA64C_CFGL_CH_SUSP); |
438 | do { | 440 | do { |
439 | udelay(1); | 441 | udelay(1); |
@@ -456,7 +458,7 @@ static int idma64_pause(struct dma_chan *chan) | |||
456 | 458 | ||
457 | spin_lock_irqsave(&idma64c->vchan.lock, flags); | 459 | spin_lock_irqsave(&idma64c->vchan.lock, flags); |
458 | if (idma64c->desc && idma64c->desc->status == DMA_IN_PROGRESS) { | 460 | if (idma64c->desc && idma64c->desc->status == DMA_IN_PROGRESS) { |
459 | idma64_chan_deactivate(idma64c); | 461 | idma64_chan_deactivate(idma64c, false); |
460 | idma64c->desc->status = DMA_PAUSED; | 462 | idma64c->desc->status = DMA_PAUSED; |
461 | } | 463 | } |
462 | spin_unlock_irqrestore(&idma64c->vchan.lock, flags); | 464 | spin_unlock_irqrestore(&idma64c->vchan.lock, flags); |
@@ -486,7 +488,7 @@ static int idma64_terminate_all(struct dma_chan *chan) | |||
486 | LIST_HEAD(head); | 488 | LIST_HEAD(head); |
487 | 489 | ||
488 | spin_lock_irqsave(&idma64c->vchan.lock, flags); | 490 | spin_lock_irqsave(&idma64c->vchan.lock, flags); |
489 | idma64_chan_deactivate(idma64c); | 491 | idma64_chan_deactivate(idma64c, true); |
490 | idma64_stop_transfer(idma64c); | 492 | idma64_stop_transfer(idma64c); |
491 | if (idma64c->desc) { | 493 | if (idma64c->desc) { |
492 | idma64_vdesc_free(&idma64c->desc->vdesc); | 494 | idma64_vdesc_free(&idma64c->desc->vdesc); |
diff --git a/drivers/dma/idma64.h b/drivers/dma/idma64.h index a4d99685a7c4..f6aeff0af8a5 100644 --- a/drivers/dma/idma64.h +++ b/drivers/dma/idma64.h | |||
@@ -16,6 +16,8 @@ | |||
16 | #include <linux/spinlock.h> | 16 | #include <linux/spinlock.h> |
17 | #include <linux/types.h> | 17 | #include <linux/types.h> |
18 | 18 | ||
19 | #include <asm-generic/io-64-nonatomic-lo-hi.h> | ||
20 | |||
19 | #include "virt-dma.h" | 21 | #include "virt-dma.h" |
20 | 22 | ||
21 | /* Channel registers */ | 23 | /* Channel registers */ |
@@ -166,19 +168,13 @@ static inline void idma64c_writel(struct idma64_chan *idma64c, int offset, | |||
166 | 168 | ||
167 | static inline u64 idma64c_readq(struct idma64_chan *idma64c, int offset) | 169 | static inline u64 idma64c_readq(struct idma64_chan *idma64c, int offset) |
168 | { | 170 | { |
169 | u64 l, h; | 171 | return lo_hi_readq(idma64c->regs + offset); |
170 | |||
171 | l = idma64c_readl(idma64c, offset); | ||
172 | h = idma64c_readl(idma64c, offset + 4); | ||
173 | |||
174 | return l | (h << 32); | ||
175 | } | 172 | } |
176 | 173 | ||
177 | static inline void idma64c_writeq(struct idma64_chan *idma64c, int offset, | 174 | static inline void idma64c_writeq(struct idma64_chan *idma64c, int offset, |
178 | u64 value) | 175 | u64 value) |
179 | { | 176 | { |
180 | idma64c_writel(idma64c, offset, value); | 177 | lo_hi_writeq(value, idma64c->regs + offset); |
181 | idma64c_writel(idma64c, offset + 4, value >> 32); | ||
182 | } | 178 | } |
183 | 179 | ||
184 | #define channel_readq(idma64c, reg) \ | 180 | #define channel_readq(idma64c, reg) \ |
@@ -217,7 +213,7 @@ static inline void idma64_writel(struct idma64 *idma64, int offset, u32 value) | |||
217 | idma64_writel(idma64, IDMA64_##reg, (value)) | 213 | idma64_writel(idma64, IDMA64_##reg, (value)) |
218 | 214 | ||
219 | /** | 215 | /** |
220 | * struct idma64_chip - representation of DesignWare DMA controller hardware | 216 | * struct idma64_chip - representation of iDMA 64-bit controller hardware |
221 | * @dev: struct device of the DMA controller | 217 | * @dev: struct device of the DMA controller |
222 | * @irq: irq line | 218 | * @irq: irq line |
223 | * @regs: memory mapped I/O space | 219 | * @regs: memory mapped I/O space |
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 9d375bc7590a..7058d58ba588 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c | |||
@@ -1478,7 +1478,7 @@ static int __init sdma_event_remap(struct sdma_engine *sdma) | |||
1478 | event_remap = of_find_property(np, propname, NULL); | 1478 | event_remap = of_find_property(np, propname, NULL); |
1479 | num_map = event_remap ? (event_remap->length / sizeof(u32)) : 0; | 1479 | num_map = event_remap ? (event_remap->length / sizeof(u32)) : 0; |
1480 | if (!num_map) { | 1480 | if (!num_map) { |
1481 | dev_warn(sdma->dev, "no event needs to be remapped\n"); | 1481 | dev_dbg(sdma->dev, "no event needs to be remapped\n"); |
1482 | goto out; | 1482 | goto out; |
1483 | } else if (num_map % EVENT_REMAP_CELLS) { | 1483 | } else if (num_map % EVENT_REMAP_CELLS) { |
1484 | dev_err(sdma->dev, "the property %s must modulo %d\n", | 1484 | dev_err(sdma->dev, "the property %s must modulo %d\n", |
@@ -1826,8 +1826,6 @@ static int sdma_probe(struct platform_device *pdev) | |||
1826 | of_node_put(spba_bus); | 1826 | of_node_put(spba_bus); |
1827 | } | 1827 | } |
1828 | 1828 | ||
1829 | dev_info(sdma->dev, "initialized\n"); | ||
1830 | |||
1831 | return 0; | 1829 | return 0; |
1832 | 1830 | ||
1833 | err_register: | 1831 | err_register: |
@@ -1852,7 +1850,6 @@ static int sdma_remove(struct platform_device *pdev) | |||
1852 | } | 1850 | } |
1853 | 1851 | ||
1854 | platform_set_drvdata(pdev, NULL); | 1852 | platform_set_drvdata(pdev, NULL); |
1855 | dev_info(&pdev->dev, "Removed...\n"); | ||
1856 | return 0; | 1853 | return 0; |
1857 | } | 1854 | } |
1858 | 1855 | ||
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index f66b7e640610..1d5df2ef148b 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c | |||
@@ -197,7 +197,8 @@ static void __ioat_start_null_desc(struct ioatdma_chan *ioat_chan) | |||
197 | void ioat_start_null_desc(struct ioatdma_chan *ioat_chan) | 197 | void ioat_start_null_desc(struct ioatdma_chan *ioat_chan) |
198 | { | 198 | { |
199 | spin_lock_bh(&ioat_chan->prep_lock); | 199 | spin_lock_bh(&ioat_chan->prep_lock); |
200 | __ioat_start_null_desc(ioat_chan); | 200 | if (!test_bit(IOAT_CHAN_DOWN, &ioat_chan->state)) |
201 | __ioat_start_null_desc(ioat_chan); | ||
201 | spin_unlock_bh(&ioat_chan->prep_lock); | 202 | spin_unlock_bh(&ioat_chan->prep_lock); |
202 | } | 203 | } |
203 | 204 | ||
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 1bc084986646..8f4e607d5817 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h | |||
@@ -82,8 +82,9 @@ struct ioatdma_device { | |||
82 | struct dma_pool *sed_hw_pool[MAX_SED_POOLS]; | 82 | struct dma_pool *sed_hw_pool[MAX_SED_POOLS]; |
83 | struct dma_device dma_dev; | 83 | struct dma_device dma_dev; |
84 | u8 version; | 84 | u8 version; |
85 | struct msix_entry msix_entries[4]; | 85 | #define IOAT_MAX_CHANS 4 |
86 | struct ioatdma_chan *idx[4]; | 86 | struct msix_entry msix_entries[IOAT_MAX_CHANS]; |
87 | struct ioatdma_chan *idx[IOAT_MAX_CHANS]; | ||
87 | struct dca_provider *dca; | 88 | struct dca_provider *dca; |
88 | enum ioat_irq_mode irq_mode; | 89 | enum ioat_irq_mode irq_mode; |
89 | u32 cap; | 90 | u32 cap; |
@@ -95,6 +96,7 @@ struct ioatdma_chan { | |||
95 | dma_addr_t last_completion; | 96 | dma_addr_t last_completion; |
96 | spinlock_t cleanup_lock; | 97 | spinlock_t cleanup_lock; |
97 | unsigned long state; | 98 | unsigned long state; |
99 | #define IOAT_CHAN_DOWN 0 | ||
98 | #define IOAT_COMPLETION_ACK 1 | 100 | #define IOAT_COMPLETION_ACK 1 |
99 | #define IOAT_RESET_PENDING 2 | 101 | #define IOAT_RESET_PENDING 2 |
100 | #define IOAT_KOBJ_INIT_FAIL 3 | 102 | #define IOAT_KOBJ_INIT_FAIL 3 |
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c index 1c3c9b0abf4e..4ef0c5e07912 100644 --- a/drivers/dma/ioat/init.c +++ b/drivers/dma/ioat/init.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/workqueue.h> | 27 | #include <linux/workqueue.h> |
28 | #include <linux/prefetch.h> | 28 | #include <linux/prefetch.h> |
29 | #include <linux/dca.h> | 29 | #include <linux/dca.h> |
30 | #include <linux/aer.h> | ||
30 | #include "dma.h" | 31 | #include "dma.h" |
31 | #include "registers.h" | 32 | #include "registers.h" |
32 | #include "hw.h" | 33 | #include "hw.h" |
@@ -1186,13 +1187,116 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca) | |||
1186 | return 0; | 1187 | return 0; |
1187 | } | 1188 | } |
1188 | 1189 | ||
1190 | static void ioat_shutdown(struct pci_dev *pdev) | ||
1191 | { | ||
1192 | struct ioatdma_device *ioat_dma = pci_get_drvdata(pdev); | ||
1193 | struct ioatdma_chan *ioat_chan; | ||
1194 | int i; | ||
1195 | |||
1196 | if (!ioat_dma) | ||
1197 | return; | ||
1198 | |||
1199 | for (i = 0; i < IOAT_MAX_CHANS; i++) { | ||
1200 | ioat_chan = ioat_dma->idx[i]; | ||
1201 | if (!ioat_chan) | ||
1202 | continue; | ||
1203 | |||
1204 | spin_lock_bh(&ioat_chan->prep_lock); | ||
1205 | set_bit(IOAT_CHAN_DOWN, &ioat_chan->state); | ||
1206 | del_timer_sync(&ioat_chan->timer); | ||
1207 | spin_unlock_bh(&ioat_chan->prep_lock); | ||
1208 | /* this should quiesce then reset */ | ||
1209 | ioat_reset_hw(ioat_chan); | ||
1210 | } | ||
1211 | |||
1212 | ioat_disable_interrupts(ioat_dma); | ||
1213 | } | ||
1214 | |||
1215 | void ioat_resume(struct ioatdma_device *ioat_dma) | ||
1216 | { | ||
1217 | struct ioatdma_chan *ioat_chan; | ||
1218 | u32 chanerr; | ||
1219 | int i; | ||
1220 | |||
1221 | for (i = 0; i < IOAT_MAX_CHANS; i++) { | ||
1222 | ioat_chan = ioat_dma->idx[i]; | ||
1223 | if (!ioat_chan) | ||
1224 | continue; | ||
1225 | |||
1226 | spin_lock_bh(&ioat_chan->prep_lock); | ||
1227 | clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state); | ||
1228 | spin_unlock_bh(&ioat_chan->prep_lock); | ||
1229 | |||
1230 | chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); | ||
1231 | writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); | ||
1232 | |||
1233 | /* no need to reset as shutdown already did that */ | ||
1234 | } | ||
1235 | } | ||
1236 | |||
1189 | #define DRV_NAME "ioatdma" | 1237 | #define DRV_NAME "ioatdma" |
1190 | 1238 | ||
1239 | static pci_ers_result_t ioat_pcie_error_detected(struct pci_dev *pdev, | ||
1240 | enum pci_channel_state error) | ||
1241 | { | ||
1242 | dev_dbg(&pdev->dev, "%s: PCIe AER error %d\n", DRV_NAME, error); | ||
1243 | |||
1244 | /* quiesce and block I/O */ | ||
1245 | ioat_shutdown(pdev); | ||
1246 | |||
1247 | return PCI_ERS_RESULT_NEED_RESET; | ||
1248 | } | ||
1249 | |||
1250 | static pci_ers_result_t ioat_pcie_error_slot_reset(struct pci_dev *pdev) | ||
1251 | { | ||
1252 | pci_ers_result_t result = PCI_ERS_RESULT_RECOVERED; | ||
1253 | int err; | ||
1254 | |||
1255 | dev_dbg(&pdev->dev, "%s post reset handling\n", DRV_NAME); | ||
1256 | |||
1257 | if (pci_enable_device_mem(pdev) < 0) { | ||
1258 | dev_err(&pdev->dev, | ||
1259 | "Failed to enable PCIe device after reset.\n"); | ||
1260 | result = PCI_ERS_RESULT_DISCONNECT; | ||
1261 | } else { | ||
1262 | pci_set_master(pdev); | ||
1263 | pci_restore_state(pdev); | ||
1264 | pci_save_state(pdev); | ||
1265 | pci_wake_from_d3(pdev, false); | ||
1266 | } | ||
1267 | |||
1268 | err = pci_cleanup_aer_uncorrect_error_status(pdev); | ||
1269 | if (err) { | ||
1270 | dev_err(&pdev->dev, | ||
1271 | "AER uncorrect error status clear failed: %#x\n", err); | ||
1272 | } | ||
1273 | |||
1274 | return result; | ||
1275 | } | ||
1276 | |||
1277 | static void ioat_pcie_error_resume(struct pci_dev *pdev) | ||
1278 | { | ||
1279 | struct ioatdma_device *ioat_dma = pci_get_drvdata(pdev); | ||
1280 | |||
1281 | dev_dbg(&pdev->dev, "%s: AER handling resuming\n", DRV_NAME); | ||
1282 | |||
1283 | /* initialize and bring everything back */ | ||
1284 | ioat_resume(ioat_dma); | ||
1285 | } | ||
1286 | |||
1287 | static const struct pci_error_handlers ioat_err_handler = { | ||
1288 | .error_detected = ioat_pcie_error_detected, | ||
1289 | .slot_reset = ioat_pcie_error_slot_reset, | ||
1290 | .resume = ioat_pcie_error_resume, | ||
1291 | }; | ||
1292 | |||
1191 | static struct pci_driver ioat_pci_driver = { | 1293 | static struct pci_driver ioat_pci_driver = { |
1192 | .name = DRV_NAME, | 1294 | .name = DRV_NAME, |
1193 | .id_table = ioat_pci_tbl, | 1295 | .id_table = ioat_pci_tbl, |
1194 | .probe = ioat_pci_probe, | 1296 | .probe = ioat_pci_probe, |
1195 | .remove = ioat_remove, | 1297 | .remove = ioat_remove, |
1298 | .shutdown = ioat_shutdown, | ||
1299 | .err_handler = &ioat_err_handler, | ||
1196 | }; | 1300 | }; |
1197 | 1301 | ||
1198 | static struct ioatdma_device * | 1302 | static struct ioatdma_device * |
@@ -1245,13 +1349,17 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1245 | pci_set_drvdata(pdev, device); | 1349 | pci_set_drvdata(pdev, device); |
1246 | 1350 | ||
1247 | device->version = readb(device->reg_base + IOAT_VER_OFFSET); | 1351 | device->version = readb(device->reg_base + IOAT_VER_OFFSET); |
1248 | if (device->version >= IOAT_VER_3_0) | 1352 | if (device->version >= IOAT_VER_3_0) { |
1249 | err = ioat3_dma_probe(device, ioat_dca_enabled); | 1353 | err = ioat3_dma_probe(device, ioat_dca_enabled); |
1250 | else | 1354 | |
1355 | if (device->version >= IOAT_VER_3_3) | ||
1356 | pci_enable_pcie_error_reporting(pdev); | ||
1357 | } else | ||
1251 | return -ENODEV; | 1358 | return -ENODEV; |
1252 | 1359 | ||
1253 | if (err) { | 1360 | if (err) { |
1254 | dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n"); | 1361 | dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n"); |
1362 | pci_disable_pcie_error_reporting(pdev); | ||
1255 | return -ENODEV; | 1363 | return -ENODEV; |
1256 | } | 1364 | } |
1257 | 1365 | ||
@@ -1271,6 +1379,8 @@ static void ioat_remove(struct pci_dev *pdev) | |||
1271 | free_dca_provider(device->dca); | 1379 | free_dca_provider(device->dca); |
1272 | device->dca = NULL; | 1380 | device->dca = NULL; |
1273 | } | 1381 | } |
1382 | |||
1383 | pci_disable_pcie_error_reporting(pdev); | ||
1274 | ioat_dma_remove(device); | 1384 | ioat_dma_remove(device); |
1275 | } | 1385 | } |
1276 | 1386 | ||
diff --git a/drivers/dma/ioat/prep.c b/drivers/dma/ioat/prep.c index ad4fb41cd23b..6bb4a13a8fbd 100644 --- a/drivers/dma/ioat/prep.c +++ b/drivers/dma/ioat/prep.c | |||
@@ -121,6 +121,9 @@ ioat_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, | |||
121 | size_t total_len = len; | 121 | size_t total_len = len; |
122 | int num_descs, idx, i; | 122 | int num_descs, idx, i; |
123 | 123 | ||
124 | if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state)) | ||
125 | return NULL; | ||
126 | |||
124 | num_descs = ioat_xferlen_to_descs(ioat_chan, len); | 127 | num_descs = ioat_xferlen_to_descs(ioat_chan, len); |
125 | if (likely(num_descs) && | 128 | if (likely(num_descs) && |
126 | ioat_check_space_lock(ioat_chan, num_descs) == 0) | 129 | ioat_check_space_lock(ioat_chan, num_descs) == 0) |
@@ -254,6 +257,11 @@ struct dma_async_tx_descriptor * | |||
254 | ioat_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, | 257 | ioat_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, |
255 | unsigned int src_cnt, size_t len, unsigned long flags) | 258 | unsigned int src_cnt, size_t len, unsigned long flags) |
256 | { | 259 | { |
260 | struct ioatdma_chan *ioat_chan = to_ioat_chan(chan); | ||
261 | |||
262 | if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state)) | ||
263 | return NULL; | ||
264 | |||
257 | return __ioat_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags); | 265 | return __ioat_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags); |
258 | } | 266 | } |
259 | 267 | ||
@@ -262,6 +270,11 @@ ioat_prep_xor_val(struct dma_chan *chan, dma_addr_t *src, | |||
262 | unsigned int src_cnt, size_t len, | 270 | unsigned int src_cnt, size_t len, |
263 | enum sum_check_flags *result, unsigned long flags) | 271 | enum sum_check_flags *result, unsigned long flags) |
264 | { | 272 | { |
273 | struct ioatdma_chan *ioat_chan = to_ioat_chan(chan); | ||
274 | |||
275 | if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state)) | ||
276 | return NULL; | ||
277 | |||
265 | /* the cleanup routine only sets bits on validate failure, it | 278 | /* the cleanup routine only sets bits on validate failure, it |
266 | * does not clear bits on validate success... so clear it here | 279 | * does not clear bits on validate success... so clear it here |
267 | */ | 280 | */ |
@@ -574,6 +587,11 @@ ioat_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, | |||
574 | unsigned int src_cnt, const unsigned char *scf, size_t len, | 587 | unsigned int src_cnt, const unsigned char *scf, size_t len, |
575 | unsigned long flags) | 588 | unsigned long flags) |
576 | { | 589 | { |
590 | struct ioatdma_chan *ioat_chan = to_ioat_chan(chan); | ||
591 | |||
592 | if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state)) | ||
593 | return NULL; | ||
594 | |||
577 | /* specify valid address for disabled result */ | 595 | /* specify valid address for disabled result */ |
578 | if (flags & DMA_PREP_PQ_DISABLE_P) | 596 | if (flags & DMA_PREP_PQ_DISABLE_P) |
579 | dst[0] = dst[1]; | 597 | dst[0] = dst[1]; |
@@ -614,6 +632,11 @@ ioat_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, | |||
614 | unsigned int src_cnt, const unsigned char *scf, size_t len, | 632 | unsigned int src_cnt, const unsigned char *scf, size_t len, |
615 | enum sum_check_flags *pqres, unsigned long flags) | 633 | enum sum_check_flags *pqres, unsigned long flags) |
616 | { | 634 | { |
635 | struct ioatdma_chan *ioat_chan = to_ioat_chan(chan); | ||
636 | |||
637 | if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state)) | ||
638 | return NULL; | ||
639 | |||
617 | /* specify valid address for disabled result */ | 640 | /* specify valid address for disabled result */ |
618 | if (flags & DMA_PREP_PQ_DISABLE_P) | 641 | if (flags & DMA_PREP_PQ_DISABLE_P) |
619 | pq[0] = pq[1]; | 642 | pq[0] = pq[1]; |
@@ -638,6 +661,10 @@ ioat_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, | |||
638 | { | 661 | { |
639 | unsigned char scf[MAX_SCF]; | 662 | unsigned char scf[MAX_SCF]; |
640 | dma_addr_t pq[2]; | 663 | dma_addr_t pq[2]; |
664 | struct ioatdma_chan *ioat_chan = to_ioat_chan(chan); | ||
665 | |||
666 | if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state)) | ||
667 | return NULL; | ||
641 | 668 | ||
642 | if (src_cnt > MAX_SCF) | 669 | if (src_cnt > MAX_SCF) |
643 | return NULL; | 670 | return NULL; |
@@ -661,6 +688,10 @@ ioat_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, | |||
661 | { | 688 | { |
662 | unsigned char scf[MAX_SCF]; | 689 | unsigned char scf[MAX_SCF]; |
663 | dma_addr_t pq[2]; | 690 | dma_addr_t pq[2]; |
691 | struct ioatdma_chan *ioat_chan = to_ioat_chan(chan); | ||
692 | |||
693 | if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state)) | ||
694 | return NULL; | ||
664 | 695 | ||
665 | if (src_cnt > MAX_SCF) | 696 | if (src_cnt > MAX_SCF) |
666 | return NULL; | 697 | return NULL; |
@@ -689,6 +720,9 @@ ioat_prep_interrupt_lock(struct dma_chan *c, unsigned long flags) | |||
689 | struct ioat_ring_ent *desc; | 720 | struct ioat_ring_ent *desc; |
690 | struct ioat_dma_descriptor *hw; | 721 | struct ioat_dma_descriptor *hw; |
691 | 722 | ||
723 | if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state)) | ||
724 | return NULL; | ||
725 | |||
692 | if (ioat_check_space_lock(ioat_chan, 1) == 0) | 726 | if (ioat_check_space_lock(ioat_chan, 1) == 0) |
693 | desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head); | 727 | desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head); |
694 | else | 728 | else |
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c index b4634109e010..631c4435e075 100644 --- a/drivers/dma/moxart-dma.c +++ b/drivers/dma/moxart-dma.c | |||
@@ -652,6 +652,7 @@ static const struct of_device_id moxart_dma_match[] = { | |||
652 | { .compatible = "moxa,moxart-dma" }, | 652 | { .compatible = "moxa,moxart-dma" }, |
653 | { } | 653 | { } |
654 | }; | 654 | }; |
655 | MODULE_DEVICE_TABLE(of, moxart_dma_match); | ||
655 | 656 | ||
656 | static struct platform_driver moxart_driver = { | 657 | static struct platform_driver moxart_driver = { |
657 | .probe = moxart_probe, | 658 | .probe = moxart_probe, |
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c index e6281e7aa46e..aae76fb39adc 100644 --- a/drivers/dma/mpc512x_dma.c +++ b/drivers/dma/mpc512x_dma.c | |||
@@ -1073,6 +1073,7 @@ static const struct of_device_id mpc_dma_match[] = { | |||
1073 | { .compatible = "fsl,mpc8308-dma", }, | 1073 | { .compatible = "fsl,mpc8308-dma", }, |
1074 | {}, | 1074 | {}, |
1075 | }; | 1075 | }; |
1076 | MODULE_DEVICE_TABLE(of, mpc_dma_match); | ||
1076 | 1077 | ||
1077 | static struct platform_driver mpc_dma_driver = { | 1078 | static struct platform_driver mpc_dma_driver = { |
1078 | .probe = mpc_dma_probe, | 1079 | .probe = mpc_dma_probe, |
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index 249445c8a4c6..1dfc71c90123 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c | |||
@@ -935,8 +935,12 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic( | |||
935 | else | 935 | else |
936 | d->ccr |= CCR_SYNC_ELEMENT; | 936 | d->ccr |= CCR_SYNC_ELEMENT; |
937 | 937 | ||
938 | if (dir == DMA_DEV_TO_MEM) | 938 | if (dir == DMA_DEV_TO_MEM) { |
939 | d->ccr |= CCR_TRIGGER_SRC; | 939 | d->ccr |= CCR_TRIGGER_SRC; |
940 | d->csdp |= CSDP_DST_PACKED; | ||
941 | } else { | ||
942 | d->csdp |= CSDP_SRC_PACKED; | ||
943 | } | ||
940 | 944 | ||
941 | d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE; | 945 | d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE; |
942 | 946 | ||
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c index 7d5598d874e1..22ea2419ee56 100644 --- a/drivers/dma/sirf-dma.c +++ b/drivers/dma/sirf-dma.c | |||
@@ -1149,6 +1149,7 @@ static const struct of_device_id sirfsoc_dma_match[] = { | |||
1149 | { .compatible = "sirf,atlas7-dmac-v2", .data = &sirfsoc_dmadata_a7v2,}, | 1149 | { .compatible = "sirf,atlas7-dmac-v2", .data = &sirfsoc_dmadata_a7v2,}, |
1150 | {}, | 1150 | {}, |
1151 | }; | 1151 | }; |
1152 | MODULE_DEVICE_TABLE(of, sirfsoc_dma_match); | ||
1152 | 1153 | ||
1153 | static struct platform_driver sirfsoc_dma_driver = { | 1154 | static struct platform_driver sirfsoc_dma_driver = { |
1154 | .probe = sirfsoc_dma_probe, | 1155 | .probe = sirfsoc_dma_probe, |
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 750d1b313684..dd3e7ba273ad 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c | |||
@@ -2907,7 +2907,7 @@ static int __init d40_dmaengine_init(struct d40_base *base, | |||
2907 | 2907 | ||
2908 | if (err) { | 2908 | if (err) { |
2909 | d40_err(base->dev, | 2909 | d40_err(base->dev, |
2910 | "Failed to regsiter memcpy only channels\n"); | 2910 | "Failed to register memcpy only channels\n"); |
2911 | goto failure2; | 2911 | goto failure2; |
2912 | } | 2912 | } |
2913 | 2913 | ||
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c index 73e0be6e2100..2db12e493c53 100644 --- a/drivers/dma/sun6i-dma.c +++ b/drivers/dma/sun6i-dma.c | |||
@@ -908,6 +908,7 @@ static const struct of_device_id sun6i_dma_match[] = { | |||
908 | { .compatible = "allwinner,sun8i-h3-dma", .data = &sun8i_h3_dma_cfg }, | 908 | { .compatible = "allwinner,sun8i-h3-dma", .data = &sun8i_h3_dma_cfg }, |
909 | { /* sentinel */ } | 909 | { /* sentinel */ } |
910 | }; | 910 | }; |
911 | MODULE_DEVICE_TABLE(of, sun6i_dma_match); | ||
911 | 912 | ||
912 | static int sun6i_dma_probe(struct platform_device *pdev) | 913 | static int sun6i_dma_probe(struct platform_device *pdev) |
913 | { | 914 | { |
diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c index 5cce8c9d0026..a415edbe61b1 100644 --- a/drivers/dma/ti-dma-crossbar.c +++ b/drivers/dma/ti-dma-crossbar.c | |||
@@ -17,13 +17,184 @@ | |||
17 | #include <linux/of_device.h> | 17 | #include <linux/of_device.h> |
18 | #include <linux/of_dma.h> | 18 | #include <linux/of_dma.h> |
19 | 19 | ||
20 | #define TI_XBAR_OUTPUTS 127 | 20 | #define TI_XBAR_DRA7 0 |
21 | #define TI_XBAR_INPUTS 256 | 21 | #define TI_XBAR_AM335X 1 |
22 | |||
23 | static const struct of_device_id ti_dma_xbar_match[] = { | ||
24 | { | ||
25 | .compatible = "ti,dra7-dma-crossbar", | ||
26 | .data = (void *)TI_XBAR_DRA7, | ||
27 | }, | ||
28 | { | ||
29 | .compatible = "ti,am335x-edma-crossbar", | ||
30 | .data = (void *)TI_XBAR_AM335X, | ||
31 | }, | ||
32 | {}, | ||
33 | }; | ||
34 | |||
35 | /* Crossbar on AM335x/AM437x family */ | ||
36 | #define TI_AM335X_XBAR_LINES 64 | ||
37 | |||
38 | struct ti_am335x_xbar_data { | ||
39 | void __iomem *iomem; | ||
40 | |||
41 | struct dma_router dmarouter; | ||
42 | |||
43 | u32 xbar_events; /* maximum number of events to select in xbar */ | ||
44 | u32 dma_requests; /* number of DMA requests on eDMA */ | ||
45 | }; | ||
46 | |||
47 | struct ti_am335x_xbar_map { | ||
48 | u16 dma_line; | ||
49 | u16 mux_val; | ||
50 | }; | ||
51 | |||
52 | static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u16 val) | ||
53 | { | ||
54 | writeb_relaxed(val & 0x1f, iomem + event); | ||
55 | } | ||
56 | |||
57 | static void ti_am335x_xbar_free(struct device *dev, void *route_data) | ||
58 | { | ||
59 | struct ti_am335x_xbar_data *xbar = dev_get_drvdata(dev); | ||
60 | struct ti_am335x_xbar_map *map = route_data; | ||
61 | |||
62 | dev_dbg(dev, "Unmapping XBAR event %u on channel %u\n", | ||
63 | map->mux_val, map->dma_line); | ||
64 | |||
65 | ti_am335x_xbar_write(xbar->iomem, map->dma_line, 0); | ||
66 | kfree(map); | ||
67 | } | ||
68 | |||
69 | static void *ti_am335x_xbar_route_allocate(struct of_phandle_args *dma_spec, | ||
70 | struct of_dma *ofdma) | ||
71 | { | ||
72 | struct platform_device *pdev = of_find_device_by_node(ofdma->of_node); | ||
73 | struct ti_am335x_xbar_data *xbar = platform_get_drvdata(pdev); | ||
74 | struct ti_am335x_xbar_map *map; | ||
75 | |||
76 | if (dma_spec->args_count != 3) | ||
77 | return ERR_PTR(-EINVAL); | ||
78 | |||
79 | if (dma_spec->args[2] >= xbar->xbar_events) { | ||
80 | dev_err(&pdev->dev, "Invalid XBAR event number: %d\n", | ||
81 | dma_spec->args[2]); | ||
82 | return ERR_PTR(-EINVAL); | ||
83 | } | ||
84 | |||
85 | if (dma_spec->args[0] >= xbar->dma_requests) { | ||
86 | dev_err(&pdev->dev, "Invalid DMA request line number: %d\n", | ||
87 | dma_spec->args[0]); | ||
88 | return ERR_PTR(-EINVAL); | ||
89 | } | ||
90 | |||
91 | /* The of_node_put() will be done in the core for the node */ | ||
92 | dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0); | ||
93 | if (!dma_spec->np) { | ||
94 | dev_err(&pdev->dev, "Can't get DMA master\n"); | ||
95 | return ERR_PTR(-EINVAL); | ||
96 | } | ||
97 | |||
98 | map = kzalloc(sizeof(*map), GFP_KERNEL); | ||
99 | if (!map) { | ||
100 | of_node_put(dma_spec->np); | ||
101 | return ERR_PTR(-ENOMEM); | ||
102 | } | ||
103 | |||
104 | map->dma_line = (u16)dma_spec->args[0]; | ||
105 | map->mux_val = (u16)dma_spec->args[2]; | ||
106 | |||
107 | dma_spec->args[2] = 0; | ||
108 | dma_spec->args_count = 2; | ||
109 | |||
110 | dev_dbg(&pdev->dev, "Mapping XBAR event%u to DMA%u\n", | ||
111 | map->mux_val, map->dma_line); | ||
112 | |||
113 | ti_am335x_xbar_write(xbar->iomem, map->dma_line, map->mux_val); | ||
114 | |||
115 | return map; | ||
116 | } | ||
117 | |||
118 | static const struct of_device_id ti_am335x_master_match[] = { | ||
119 | { .compatible = "ti,edma3-tpcc", }, | ||
120 | {}, | ||
121 | }; | ||
122 | |||
123 | static int ti_am335x_xbar_probe(struct platform_device *pdev) | ||
124 | { | ||
125 | struct device_node *node = pdev->dev.of_node; | ||
126 | const struct of_device_id *match; | ||
127 | struct device_node *dma_node; | ||
128 | struct ti_am335x_xbar_data *xbar; | ||
129 | struct resource *res; | ||
130 | void __iomem *iomem; | ||
131 | int i, ret; | ||
132 | |||
133 | if (!node) | ||
134 | return -ENODEV; | ||
135 | |||
136 | xbar = devm_kzalloc(&pdev->dev, sizeof(*xbar), GFP_KERNEL); | ||
137 | if (!xbar) | ||
138 | return -ENOMEM; | ||
139 | |||
140 | dma_node = of_parse_phandle(node, "dma-masters", 0); | ||
141 | if (!dma_node) { | ||
142 | dev_err(&pdev->dev, "Can't get DMA master node\n"); | ||
143 | return -ENODEV; | ||
144 | } | ||
145 | |||
146 | match = of_match_node(ti_am335x_master_match, dma_node); | ||
147 | if (!match) { | ||
148 | dev_err(&pdev->dev, "DMA master is not supported\n"); | ||
149 | return -EINVAL; | ||
150 | } | ||
151 | |||
152 | if (of_property_read_u32(dma_node, "dma-requests", | ||
153 | &xbar->dma_requests)) { | ||
154 | dev_info(&pdev->dev, | ||
155 | "Missing XBAR output information, using %u.\n", | ||
156 | TI_AM335X_XBAR_LINES); | ||
157 | xbar->dma_requests = TI_AM335X_XBAR_LINES; | ||
158 | } | ||
159 | of_node_put(dma_node); | ||
160 | |||
161 | if (of_property_read_u32(node, "dma-requests", &xbar->xbar_events)) { | ||
162 | dev_info(&pdev->dev, | ||
163 | "Missing XBAR input information, using %u.\n", | ||
164 | TI_AM335X_XBAR_LINES); | ||
165 | xbar->xbar_events = TI_AM335X_XBAR_LINES; | ||
166 | } | ||
167 | |||
168 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
169 | iomem = devm_ioremap_resource(&pdev->dev, res); | ||
170 | if (IS_ERR(iomem)) | ||
171 | return PTR_ERR(iomem); | ||
172 | |||
173 | xbar->iomem = iomem; | ||
174 | |||
175 | xbar->dmarouter.dev = &pdev->dev; | ||
176 | xbar->dmarouter.route_free = ti_am335x_xbar_free; | ||
177 | |||
178 | platform_set_drvdata(pdev, xbar); | ||
179 | |||
180 | /* Reset the crossbar */ | ||
181 | for (i = 0; i < xbar->dma_requests; i++) | ||
182 | ti_am335x_xbar_write(xbar->iomem, i, 0); | ||
183 | |||
184 | ret = of_dma_router_register(node, ti_am335x_xbar_route_allocate, | ||
185 | &xbar->dmarouter); | ||
186 | |||
187 | return ret; | ||
188 | } | ||
189 | |||
190 | /* Crossbar on DRA7xx family */ | ||
191 | #define TI_DRA7_XBAR_OUTPUTS 127 | ||
192 | #define TI_DRA7_XBAR_INPUTS 256 | ||
22 | 193 | ||
23 | #define TI_XBAR_EDMA_OFFSET 0 | 194 | #define TI_XBAR_EDMA_OFFSET 0 |
24 | #define TI_XBAR_SDMA_OFFSET 1 | 195 | #define TI_XBAR_SDMA_OFFSET 1 |
25 | 196 | ||
26 | struct ti_dma_xbar_data { | 197 | struct ti_dra7_xbar_data { |
27 | void __iomem *iomem; | 198 | void __iomem *iomem; |
28 | 199 | ||
29 | struct dma_router dmarouter; | 200 | struct dma_router dmarouter; |
@@ -35,35 +206,35 @@ struct ti_dma_xbar_data { | |||
35 | u32 dma_offset; | 206 | u32 dma_offset; |
36 | }; | 207 | }; |
37 | 208 | ||
38 | struct ti_dma_xbar_map { | 209 | struct ti_dra7_xbar_map { |
39 | u16 xbar_in; | 210 | u16 xbar_in; |
40 | int xbar_out; | 211 | int xbar_out; |
41 | }; | 212 | }; |
42 | 213 | ||
43 | static inline void ti_dma_xbar_write(void __iomem *iomem, int xbar, u16 val) | 214 | static inline void ti_dra7_xbar_write(void __iomem *iomem, int xbar, u16 val) |
44 | { | 215 | { |
45 | writew_relaxed(val, iomem + (xbar * 2)); | 216 | writew_relaxed(val, iomem + (xbar * 2)); |
46 | } | 217 | } |
47 | 218 | ||
48 | static void ti_dma_xbar_free(struct device *dev, void *route_data) | 219 | static void ti_dra7_xbar_free(struct device *dev, void *route_data) |
49 | { | 220 | { |
50 | struct ti_dma_xbar_data *xbar = dev_get_drvdata(dev); | 221 | struct ti_dra7_xbar_data *xbar = dev_get_drvdata(dev); |
51 | struct ti_dma_xbar_map *map = route_data; | 222 | struct ti_dra7_xbar_map *map = route_data; |
52 | 223 | ||
53 | dev_dbg(dev, "Unmapping XBAR%u (was routed to %d)\n", | 224 | dev_dbg(dev, "Unmapping XBAR%u (was routed to %d)\n", |
54 | map->xbar_in, map->xbar_out); | 225 | map->xbar_in, map->xbar_out); |
55 | 226 | ||
56 | ti_dma_xbar_write(xbar->iomem, map->xbar_out, xbar->safe_val); | 227 | ti_dra7_xbar_write(xbar->iomem, map->xbar_out, xbar->safe_val); |
57 | idr_remove(&xbar->map_idr, map->xbar_out); | 228 | idr_remove(&xbar->map_idr, map->xbar_out); |
58 | kfree(map); | 229 | kfree(map); |
59 | } | 230 | } |
60 | 231 | ||
61 | static void *ti_dma_xbar_route_allocate(struct of_phandle_args *dma_spec, | 232 | static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec, |
62 | struct of_dma *ofdma) | 233 | struct of_dma *ofdma) |
63 | { | 234 | { |
64 | struct platform_device *pdev = of_find_device_by_node(ofdma->of_node); | 235 | struct platform_device *pdev = of_find_device_by_node(ofdma->of_node); |
65 | struct ti_dma_xbar_data *xbar = platform_get_drvdata(pdev); | 236 | struct ti_dra7_xbar_data *xbar = platform_get_drvdata(pdev); |
66 | struct ti_dma_xbar_map *map; | 237 | struct ti_dra7_xbar_map *map; |
67 | 238 | ||
68 | if (dma_spec->args[0] >= xbar->xbar_requests) { | 239 | if (dma_spec->args[0] >= xbar->xbar_requests) { |
69 | dev_err(&pdev->dev, "Invalid XBAR request number: %d\n", | 240 | dev_err(&pdev->dev, "Invalid XBAR request number: %d\n", |
@@ -93,12 +264,12 @@ static void *ti_dma_xbar_route_allocate(struct of_phandle_args *dma_spec, | |||
93 | dev_dbg(&pdev->dev, "Mapping XBAR%u to DMA%d\n", | 264 | dev_dbg(&pdev->dev, "Mapping XBAR%u to DMA%d\n", |
94 | map->xbar_in, map->xbar_out); | 265 | map->xbar_in, map->xbar_out); |
95 | 266 | ||
96 | ti_dma_xbar_write(xbar->iomem, map->xbar_out, map->xbar_in); | 267 | ti_dra7_xbar_write(xbar->iomem, map->xbar_out, map->xbar_in); |
97 | 268 | ||
98 | return map; | 269 | return map; |
99 | } | 270 | } |
100 | 271 | ||
101 | static const struct of_device_id ti_dma_master_match[] = { | 272 | static const struct of_device_id ti_dra7_master_match[] = { |
102 | { | 273 | { |
103 | .compatible = "ti,omap4430-sdma", | 274 | .compatible = "ti,omap4430-sdma", |
104 | .data = (void *)TI_XBAR_SDMA_OFFSET, | 275 | .data = (void *)TI_XBAR_SDMA_OFFSET, |
@@ -110,12 +281,12 @@ static const struct of_device_id ti_dma_master_match[] = { | |||
110 | {}, | 281 | {}, |
111 | }; | 282 | }; |
112 | 283 | ||
113 | static int ti_dma_xbar_probe(struct platform_device *pdev) | 284 | static int ti_dra7_xbar_probe(struct platform_device *pdev) |
114 | { | 285 | { |
115 | struct device_node *node = pdev->dev.of_node; | 286 | struct device_node *node = pdev->dev.of_node; |
116 | const struct of_device_id *match; | 287 | const struct of_device_id *match; |
117 | struct device_node *dma_node; | 288 | struct device_node *dma_node; |
118 | struct ti_dma_xbar_data *xbar; | 289 | struct ti_dra7_xbar_data *xbar; |
119 | struct resource *res; | 290 | struct resource *res; |
120 | u32 safe_val; | 291 | u32 safe_val; |
121 | void __iomem *iomem; | 292 | void __iomem *iomem; |
@@ -136,7 +307,7 @@ static int ti_dma_xbar_probe(struct platform_device *pdev) | |||
136 | return -ENODEV; | 307 | return -ENODEV; |
137 | } | 308 | } |
138 | 309 | ||
139 | match = of_match_node(ti_dma_master_match, dma_node); | 310 | match = of_match_node(ti_dra7_master_match, dma_node); |
140 | if (!match) { | 311 | if (!match) { |
141 | dev_err(&pdev->dev, "DMA master is not supported\n"); | 312 | dev_err(&pdev->dev, "DMA master is not supported\n"); |
142 | return -EINVAL; | 313 | return -EINVAL; |
@@ -146,16 +317,16 @@ static int ti_dma_xbar_probe(struct platform_device *pdev) | |||
146 | &xbar->dma_requests)) { | 317 | &xbar->dma_requests)) { |
147 | dev_info(&pdev->dev, | 318 | dev_info(&pdev->dev, |
148 | "Missing XBAR output information, using %u.\n", | 319 | "Missing XBAR output information, using %u.\n", |
149 | TI_XBAR_OUTPUTS); | 320 | TI_DRA7_XBAR_OUTPUTS); |
150 | xbar->dma_requests = TI_XBAR_OUTPUTS; | 321 | xbar->dma_requests = TI_DRA7_XBAR_OUTPUTS; |
151 | } | 322 | } |
152 | of_node_put(dma_node); | 323 | of_node_put(dma_node); |
153 | 324 | ||
154 | if (of_property_read_u32(node, "dma-requests", &xbar->xbar_requests)) { | 325 | if (of_property_read_u32(node, "dma-requests", &xbar->xbar_requests)) { |
155 | dev_info(&pdev->dev, | 326 | dev_info(&pdev->dev, |
156 | "Missing XBAR input information, using %u.\n", | 327 | "Missing XBAR input information, using %u.\n", |
157 | TI_XBAR_INPUTS); | 328 | TI_DRA7_XBAR_INPUTS); |
158 | xbar->xbar_requests = TI_XBAR_INPUTS; | 329 | xbar->xbar_requests = TI_DRA7_XBAR_INPUTS; |
159 | } | 330 | } |
160 | 331 | ||
161 | if (!of_property_read_u32(node, "ti,dma-safe-map", &safe_val)) | 332 | if (!of_property_read_u32(node, "ti,dma-safe-map", &safe_val)) |
@@ -169,30 +340,50 @@ static int ti_dma_xbar_probe(struct platform_device *pdev) | |||
169 | xbar->iomem = iomem; | 340 | xbar->iomem = iomem; |
170 | 341 | ||
171 | xbar->dmarouter.dev = &pdev->dev; | 342 | xbar->dmarouter.dev = &pdev->dev; |
172 | xbar->dmarouter.route_free = ti_dma_xbar_free; | 343 | xbar->dmarouter.route_free = ti_dra7_xbar_free; |
173 | xbar->dma_offset = (u32)match->data; | 344 | xbar->dma_offset = (u32)match->data; |
174 | 345 | ||
175 | platform_set_drvdata(pdev, xbar); | 346 | platform_set_drvdata(pdev, xbar); |
176 | 347 | ||
177 | /* Reset the crossbar */ | 348 | /* Reset the crossbar */ |
178 | for (i = 0; i < xbar->dma_requests; i++) | 349 | for (i = 0; i < xbar->dma_requests; i++) |
179 | ti_dma_xbar_write(xbar->iomem, i, xbar->safe_val); | 350 | ti_dra7_xbar_write(xbar->iomem, i, xbar->safe_val); |
180 | 351 | ||
181 | ret = of_dma_router_register(node, ti_dma_xbar_route_allocate, | 352 | ret = of_dma_router_register(node, ti_dra7_xbar_route_allocate, |
182 | &xbar->dmarouter); | 353 | &xbar->dmarouter); |
183 | if (ret) { | 354 | if (ret) { |
184 | /* Restore the defaults for the crossbar */ | 355 | /* Restore the defaults for the crossbar */ |
185 | for (i = 0; i < xbar->dma_requests; i++) | 356 | for (i = 0; i < xbar->dma_requests; i++) |
186 | ti_dma_xbar_write(xbar->iomem, i, i); | 357 | ti_dra7_xbar_write(xbar->iomem, i, i); |
187 | } | 358 | } |
188 | 359 | ||
189 | return ret; | 360 | return ret; |
190 | } | 361 | } |
191 | 362 | ||
192 | static const struct of_device_id ti_dma_xbar_match[] = { | 363 | static int ti_dma_xbar_probe(struct platform_device *pdev) |
193 | { .compatible = "ti,dra7-dma-crossbar" }, | 364 | { |
194 | {}, | 365 | const struct of_device_id *match; |
195 | }; | 366 | int ret; |
367 | |||
368 | match = of_match_node(ti_dma_xbar_match, pdev->dev.of_node); | ||
369 | if (unlikely(!match)) | ||
370 | return -EINVAL; | ||
371 | |||
372 | switch ((u32)match->data) { | ||
373 | case TI_XBAR_DRA7: | ||
374 | ret = ti_dra7_xbar_probe(pdev); | ||
375 | break; | ||
376 | case TI_XBAR_AM335X: | ||
377 | ret = ti_am335x_xbar_probe(pdev); | ||
378 | break; | ||
379 | default: | ||
380 | dev_err(&pdev->dev, "Unsupported crossbar\n"); | ||
381 | ret = -ENODEV; | ||
382 | break; | ||
383 | } | ||
384 | |||
385 | return ret; | ||
386 | } | ||
196 | 387 | ||
197 | static struct platform_driver ti_dma_xbar_driver = { | 388 | static struct platform_driver ti_dma_xbar_driver = { |
198 | .driver = { | 389 | .driver = { |
diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h index 181b95267866..2fa47745a41f 100644 --- a/drivers/dma/virt-dma.h +++ b/drivers/dma/virt-dma.h | |||
@@ -47,9 +47,9 @@ struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *, dma_cookie_t); | |||
47 | 47 | ||
48 | /** | 48 | /** |
49 | * vchan_tx_prep - prepare a descriptor | 49 | * vchan_tx_prep - prepare a descriptor |
50 | * vc: virtual channel allocating this descriptor | 50 | * @vc: virtual channel allocating this descriptor |
51 | * vd: virtual descriptor to prepare | 51 | * @vd: virtual descriptor to prepare |
52 | * tx_flags: flags argument passed in to prepare function | 52 | * @tx_flags: flags argument passed in to prepare function |
53 | */ | 53 | */ |
54 | static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan *vc, | 54 | static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan *vc, |
55 | struct virt_dma_desc *vd, unsigned long tx_flags) | 55 | struct virt_dma_desc *vd, unsigned long tx_flags) |
@@ -65,7 +65,7 @@ static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan | |||
65 | 65 | ||
66 | /** | 66 | /** |
67 | * vchan_issue_pending - move submitted descriptors to issued list | 67 | * vchan_issue_pending - move submitted descriptors to issued list |
68 | * vc: virtual channel to update | 68 | * @vc: virtual channel to update |
69 | * | 69 | * |
70 | * vc.lock must be held by caller | 70 | * vc.lock must be held by caller |
71 | */ | 71 | */ |
@@ -77,7 +77,7 @@ static inline bool vchan_issue_pending(struct virt_dma_chan *vc) | |||
77 | 77 | ||
78 | /** | 78 | /** |
79 | * vchan_cookie_complete - report completion of a descriptor | 79 | * vchan_cookie_complete - report completion of a descriptor |
80 | * vd: virtual descriptor to update | 80 | * @vd: virtual descriptor to update |
81 | * | 81 | * |
82 | * vc.lock must be held by caller | 82 | * vc.lock must be held by caller |
83 | */ | 83 | */ |
@@ -97,7 +97,7 @@ static inline void vchan_cookie_complete(struct virt_dma_desc *vd) | |||
97 | 97 | ||
98 | /** | 98 | /** |
99 | * vchan_cyclic_callback - report the completion of a period | 99 | * vchan_cyclic_callback - report the completion of a period |
100 | * vd: virtual descriptor | 100 | * @vd: virtual descriptor |
101 | */ | 101 | */ |
102 | static inline void vchan_cyclic_callback(struct virt_dma_desc *vd) | 102 | static inline void vchan_cyclic_callback(struct virt_dma_desc *vd) |
103 | { | 103 | { |
@@ -109,7 +109,7 @@ static inline void vchan_cyclic_callback(struct virt_dma_desc *vd) | |||
109 | 109 | ||
110 | /** | 110 | /** |
111 | * vchan_next_desc - peek at the next descriptor to be processed | 111 | * vchan_next_desc - peek at the next descriptor to be processed |
112 | * vc: virtual channel to obtain descriptor from | 112 | * @vc: virtual channel to obtain descriptor from |
113 | * | 113 | * |
114 | * vc.lock must be held by caller | 114 | * vc.lock must be held by caller |
115 | */ | 115 | */ |
@@ -123,8 +123,8 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc) | |||
123 | 123 | ||
124 | /** | 124 | /** |
125 | * vchan_get_all_descriptors - obtain all submitted and issued descriptors | 125 | * vchan_get_all_descriptors - obtain all submitted and issued descriptors |
126 | * vc: virtual channel to get descriptors from | 126 | * @vc: virtual channel to get descriptors from |
127 | * head: list of descriptors found | 127 | * @head: list of descriptors found |
128 | * | 128 | * |
129 | * vc.lock must be held by caller | 129 | * vc.lock must be held by caller |
130 | * | 130 | * |
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c index 8d57b1b12e41..9dfa2b0fa5da 100644 --- a/drivers/dma/xgene-dma.c +++ b/drivers/dma/xgene-dma.c | |||
@@ -547,14 +547,12 @@ static struct xgene_dma_desc_sw *xgene_dma_alloc_descriptor( | |||
547 | struct xgene_dma_desc_sw *desc; | 547 | struct xgene_dma_desc_sw *desc; |
548 | dma_addr_t phys; | 548 | dma_addr_t phys; |
549 | 549 | ||
550 | desc = dma_pool_alloc(chan->desc_pool, GFP_NOWAIT, &phys); | 550 | desc = dma_pool_zalloc(chan->desc_pool, GFP_NOWAIT, &phys); |
551 | if (!desc) { | 551 | if (!desc) { |
552 | chan_err(chan, "Failed to allocate LDs\n"); | 552 | chan_err(chan, "Failed to allocate LDs\n"); |
553 | return NULL; | 553 | return NULL; |
554 | } | 554 | } |
555 | 555 | ||
556 | memset(desc, 0, sizeof(*desc)); | ||
557 | |||
558 | INIT_LIST_HEAD(&desc->tx_list); | 556 | INIT_LIST_HEAD(&desc->tx_list); |
559 | desc->tx.phys = phys; | 557 | desc->tx.phys = phys; |
560 | desc->tx.tx_submit = xgene_dma_tx_submit; | 558 | desc->tx.tx_submit = xgene_dma_tx_submit; |
@@ -894,60 +892,6 @@ static void xgene_dma_free_chan_resources(struct dma_chan *dchan) | |||
894 | chan->desc_pool = NULL; | 892 | chan->desc_pool = NULL; |
895 | } | 893 | } |
896 | 894 | ||
897 | static struct dma_async_tx_descriptor *xgene_dma_prep_memcpy( | ||
898 | struct dma_chan *dchan, dma_addr_t dst, dma_addr_t src, | ||
899 | size_t len, unsigned long flags) | ||
900 | { | ||
901 | struct xgene_dma_desc_sw *first = NULL, *new; | ||
902 | struct xgene_dma_chan *chan; | ||
903 | size_t copy; | ||
904 | |||
905 | if (unlikely(!dchan || !len)) | ||
906 | return NULL; | ||
907 | |||
908 | chan = to_dma_chan(dchan); | ||
909 | |||
910 | do { | ||
911 | /* Allocate the link descriptor from DMA pool */ | ||
912 | new = xgene_dma_alloc_descriptor(chan); | ||
913 | if (!new) | ||
914 | goto fail; | ||
915 | |||
916 | /* Create the largest transaction possible */ | ||
917 | copy = min_t(size_t, len, XGENE_DMA_MAX_64B_DESC_BYTE_CNT); | ||
918 | |||
919 | /* Prepare DMA descriptor */ | ||
920 | xgene_dma_prep_cpy_desc(chan, new, dst, src, copy); | ||
921 | |||
922 | if (!first) | ||
923 | first = new; | ||
924 | |||
925 | new->tx.cookie = 0; | ||
926 | async_tx_ack(&new->tx); | ||
927 | |||
928 | /* Update metadata */ | ||
929 | len -= copy; | ||
930 | dst += copy; | ||
931 | src += copy; | ||
932 | |||
933 | /* Insert the link descriptor to the LD ring */ | ||
934 | list_add_tail(&new->node, &first->tx_list); | ||
935 | } while (len); | ||
936 | |||
937 | new->tx.flags = flags; /* client is in control of this ack */ | ||
938 | new->tx.cookie = -EBUSY; | ||
939 | list_splice(&first->tx_list, &new->tx_list); | ||
940 | |||
941 | return &new->tx; | ||
942 | |||
943 | fail: | ||
944 | if (!first) | ||
945 | return NULL; | ||
946 | |||
947 | xgene_dma_free_desc_list(chan, &first->tx_list); | ||
948 | return NULL; | ||
949 | } | ||
950 | |||
951 | static struct dma_async_tx_descriptor *xgene_dma_prep_sg( | 895 | static struct dma_async_tx_descriptor *xgene_dma_prep_sg( |
952 | struct dma_chan *dchan, struct scatterlist *dst_sg, | 896 | struct dma_chan *dchan, struct scatterlist *dst_sg, |
953 | u32 dst_nents, struct scatterlist *src_sg, | 897 | u32 dst_nents, struct scatterlist *src_sg, |
@@ -1707,7 +1651,6 @@ static void xgene_dma_set_caps(struct xgene_dma_chan *chan, | |||
1707 | dma_cap_zero(dma_dev->cap_mask); | 1651 | dma_cap_zero(dma_dev->cap_mask); |
1708 | 1652 | ||
1709 | /* Set DMA device capability */ | 1653 | /* Set DMA device capability */ |
1710 | dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); | ||
1711 | dma_cap_set(DMA_SG, dma_dev->cap_mask); | 1654 | dma_cap_set(DMA_SG, dma_dev->cap_mask); |
1712 | 1655 | ||
1713 | /* Basically here, the X-Gene SoC DMA engine channel 0 supports XOR | 1656 | /* Basically here, the X-Gene SoC DMA engine channel 0 supports XOR |
@@ -1734,7 +1677,6 @@ static void xgene_dma_set_caps(struct xgene_dma_chan *chan, | |||
1734 | dma_dev->device_free_chan_resources = xgene_dma_free_chan_resources; | 1677 | dma_dev->device_free_chan_resources = xgene_dma_free_chan_resources; |
1735 | dma_dev->device_issue_pending = xgene_dma_issue_pending; | 1678 | dma_dev->device_issue_pending = xgene_dma_issue_pending; |
1736 | dma_dev->device_tx_status = xgene_dma_tx_status; | 1679 | dma_dev->device_tx_status = xgene_dma_tx_status; |
1737 | dma_dev->device_prep_dma_memcpy = xgene_dma_prep_memcpy; | ||
1738 | dma_dev->device_prep_dma_sg = xgene_dma_prep_sg; | 1680 | dma_dev->device_prep_dma_sg = xgene_dma_prep_sg; |
1739 | 1681 | ||
1740 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { | 1682 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { |
@@ -1787,8 +1729,7 @@ static int xgene_dma_async_register(struct xgene_dma *pdma, int id) | |||
1787 | 1729 | ||
1788 | /* DMA capability info */ | 1730 | /* DMA capability info */ |
1789 | dev_info(pdma->dev, | 1731 | dev_info(pdma->dev, |
1790 | "%s: CAPABILITY ( %s%s%s%s)\n", dma_chan_name(&chan->dma_chan), | 1732 | "%s: CAPABILITY ( %s%s%s)\n", dma_chan_name(&chan->dma_chan), |
1791 | dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "MEMCPY " : "", | ||
1792 | dma_has_cap(DMA_SG, dma_dev->cap_mask) ? "SGCPY " : "", | 1733 | dma_has_cap(DMA_SG, dma_dev->cap_mask) ? "SGCPY " : "", |
1793 | dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "XOR " : "", | 1734 | dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "XOR " : "", |
1794 | dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "PQ " : ""); | 1735 | dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "PQ " : ""); |
diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c index d8434d465885..6f4b5017ca3b 100644 --- a/drivers/dma/xilinx/xilinx_vdma.c +++ b/drivers/dma/xilinx/xilinx_vdma.c | |||
@@ -1349,6 +1349,7 @@ static const struct of_device_id xilinx_vdma_of_ids[] = { | |||
1349 | { .compatible = "xlnx,axi-vdma-1.00.a",}, | 1349 | { .compatible = "xlnx,axi-vdma-1.00.a",}, |
1350 | {} | 1350 | {} |
1351 | }; | 1351 | }; |
1352 | MODULE_DEVICE_TABLE(of, xilinx_vdma_of_ids); | ||
1352 | 1353 | ||
1353 | static struct platform_driver xilinx_vdma_driver = { | 1354 | static struct platform_driver xilinx_vdma_driver = { |
1354 | .driver = { | 1355 | .driver = { |
diff --git a/drivers/dma/zx296702_dma.c b/drivers/dma/zx296702_dma.c index c017fcd8e07c..245d759d5ffc 100644 --- a/drivers/dma/zx296702_dma.c +++ b/drivers/dma/zx296702_dma.c | |||
@@ -441,7 +441,7 @@ static struct zx_dma_desc_sw *zx_alloc_desc_resource(int num, | |||
441 | kfree(ds); | 441 | kfree(ds); |
442 | return NULL; | 442 | return NULL; |
443 | } | 443 | } |
444 | memset(ds->desc_hw, sizeof(struct zx_desc_hw) * num, 0); | 444 | memset(ds->desc_hw, 0, sizeof(struct zx_desc_hw) * num); |
445 | ds->desc_num = num; | 445 | ds->desc_num = num; |
446 | return ds; | 446 | return ds; |
447 | } | 447 | } |
diff --git a/include/linux/of_dma.h b/include/linux/of_dma.h index 98ba7525929e..36112cdd665a 100644 --- a/include/linux/of_dma.h +++ b/include/linux/of_dma.h | |||
@@ -34,7 +34,7 @@ struct of_dma_filter_info { | |||
34 | dma_filter_fn filter_fn; | 34 | dma_filter_fn filter_fn; |
35 | }; | 35 | }; |
36 | 36 | ||
37 | #ifdef CONFIG_OF | 37 | #ifdef CONFIG_DMA_OF |
38 | extern int of_dma_controller_register(struct device_node *np, | 38 | extern int of_dma_controller_register(struct device_node *np, |
39 | struct dma_chan *(*of_dma_xlate) | 39 | struct dma_chan *(*of_dma_xlate) |
40 | (struct of_phandle_args *, struct of_dma *), | 40 | (struct of_phandle_args *, struct of_dma *), |
diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h index 87ac14c584f2..03b6095d3b18 100644 --- a/include/linux/platform_data/dma-dw.h +++ b/include/linux/platform_data/dma-dw.h | |||
@@ -37,6 +37,7 @@ struct dw_dma_slave { | |||
37 | * @nr_channels: Number of channels supported by hardware (max 8) | 37 | * @nr_channels: Number of channels supported by hardware (max 8) |
38 | * @is_private: The device channels should be marked as private and not for | 38 | * @is_private: The device channels should be marked as private and not for |
39 | * by the general purpose DMA channel allocator. | 39 | * by the general purpose DMA channel allocator. |
40 | * @is_memcpy: The device channels do support memory-to-memory transfers. | ||
40 | * @chan_allocation_order: Allocate channels starting from 0 or 7 | 41 | * @chan_allocation_order: Allocate channels starting from 0 or 7 |
41 | * @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0. | 42 | * @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0. |
42 | * @block_size: Maximum block size supported by the controller | 43 | * @block_size: Maximum block size supported by the controller |
@@ -47,6 +48,7 @@ struct dw_dma_slave { | |||
47 | struct dw_dma_platform_data { | 48 | struct dw_dma_platform_data { |
48 | unsigned int nr_channels; | 49 | unsigned int nr_channels; |
49 | bool is_private; | 50 | bool is_private; |
51 | bool is_memcpy; | ||
50 | #define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */ | 52 | #define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */ |
51 | #define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */ | 53 | #define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */ |
52 | unsigned char chan_allocation_order; | 54 | unsigned char chan_allocation_order; |
diff --git a/include/linux/platform_data/edma.h b/include/linux/platform_data/edma.h index bdb2710e2aab..e2878baeb90e 100644 --- a/include/linux/platform_data/edma.h +++ b/include/linux/platform_data/edma.h | |||
@@ -41,51 +41,6 @@ | |||
41 | #ifndef EDMA_H_ | 41 | #ifndef EDMA_H_ |
42 | #define EDMA_H_ | 42 | #define EDMA_H_ |
43 | 43 | ||
44 | /* PaRAM slots are laid out like this */ | ||
45 | struct edmacc_param { | ||
46 | u32 opt; | ||
47 | u32 src; | ||
48 | u32 a_b_cnt; | ||
49 | u32 dst; | ||
50 | u32 src_dst_bidx; | ||
51 | u32 link_bcntrld; | ||
52 | u32 src_dst_cidx; | ||
53 | u32 ccnt; | ||
54 | } __packed; | ||
55 | |||
56 | /* fields in edmacc_param.opt */ | ||
57 | #define SAM BIT(0) | ||
58 | #define DAM BIT(1) | ||
59 | #define SYNCDIM BIT(2) | ||
60 | #define STATIC BIT(3) | ||
61 | #define EDMA_FWID (0x07 << 8) | ||
62 | #define TCCMODE BIT(11) | ||
63 | #define EDMA_TCC(t) ((t) << 12) | ||
64 | #define TCINTEN BIT(20) | ||
65 | #define ITCINTEN BIT(21) | ||
66 | #define TCCHEN BIT(22) | ||
67 | #define ITCCHEN BIT(23) | ||
68 | |||
69 | /*ch_status paramater of callback function possible values*/ | ||
70 | #define EDMA_DMA_COMPLETE 1 | ||
71 | #define EDMA_DMA_CC_ERROR 2 | ||
72 | #define EDMA_DMA_TC1_ERROR 3 | ||
73 | #define EDMA_DMA_TC2_ERROR 4 | ||
74 | |||
75 | enum address_mode { | ||
76 | INCR = 0, | ||
77 | FIFO = 1 | ||
78 | }; | ||
79 | |||
80 | enum fifo_width { | ||
81 | W8BIT = 0, | ||
82 | W16BIT = 1, | ||
83 | W32BIT = 2, | ||
84 | W64BIT = 3, | ||
85 | W128BIT = 4, | ||
86 | W256BIT = 5 | ||
87 | }; | ||
88 | |||
89 | enum dma_event_q { | 44 | enum dma_event_q { |
90 | EVENTQ_0 = 0, | 45 | EVENTQ_0 = 0, |
91 | EVENTQ_1 = 1, | 46 | EVENTQ_1 = 1, |
@@ -94,64 +49,10 @@ enum dma_event_q { | |||
94 | EVENTQ_DEFAULT = -1 | 49 | EVENTQ_DEFAULT = -1 |
95 | }; | 50 | }; |
96 | 51 | ||
97 | enum sync_dimension { | ||
98 | ASYNC = 0, | ||
99 | ABSYNC = 1 | ||
100 | }; | ||
101 | |||
102 | #define EDMA_CTLR_CHAN(ctlr, chan) (((ctlr) << 16) | (chan)) | 52 | #define EDMA_CTLR_CHAN(ctlr, chan) (((ctlr) << 16) | (chan)) |
103 | #define EDMA_CTLR(i) ((i) >> 16) | 53 | #define EDMA_CTLR(i) ((i) >> 16) |
104 | #define EDMA_CHAN_SLOT(i) ((i) & 0xffff) | 54 | #define EDMA_CHAN_SLOT(i) ((i) & 0xffff) |
105 | 55 | ||
106 | #define EDMA_CHANNEL_ANY -1 /* for edma_alloc_channel() */ | ||
107 | #define EDMA_SLOT_ANY -1 /* for edma_alloc_slot() */ | ||
108 | #define EDMA_CONT_PARAMS_ANY 1001 | ||
109 | #define EDMA_CONT_PARAMS_FIXED_EXACT 1002 | ||
110 | #define EDMA_CONT_PARAMS_FIXED_NOT_EXACT 1003 | ||
111 | |||
112 | #define EDMA_MAX_CC 2 | ||
113 | |||
114 | /* alloc/free DMA channels and their dedicated parameter RAM slots */ | ||
115 | int edma_alloc_channel(int channel, | ||
116 | void (*callback)(unsigned channel, u16 ch_status, void *data), | ||
117 | void *data, enum dma_event_q); | ||
118 | void edma_free_channel(unsigned channel); | ||
119 | |||
120 | /* alloc/free parameter RAM slots */ | ||
121 | int edma_alloc_slot(unsigned ctlr, int slot); | ||
122 | void edma_free_slot(unsigned slot); | ||
123 | |||
124 | /* alloc/free a set of contiguous parameter RAM slots */ | ||
125 | int edma_alloc_cont_slots(unsigned ctlr, unsigned int id, int slot, int count); | ||
126 | int edma_free_cont_slots(unsigned slot, int count); | ||
127 | |||
128 | /* calls that operate on part of a parameter RAM slot */ | ||
129 | void edma_set_src(unsigned slot, dma_addr_t src_port, | ||
130 | enum address_mode mode, enum fifo_width); | ||
131 | void edma_set_dest(unsigned slot, dma_addr_t dest_port, | ||
132 | enum address_mode mode, enum fifo_width); | ||
133 | dma_addr_t edma_get_position(unsigned slot, bool dst); | ||
134 | void edma_set_src_index(unsigned slot, s16 src_bidx, s16 src_cidx); | ||
135 | void edma_set_dest_index(unsigned slot, s16 dest_bidx, s16 dest_cidx); | ||
136 | void edma_set_transfer_params(unsigned slot, u16 acnt, u16 bcnt, u16 ccnt, | ||
137 | u16 bcnt_rld, enum sync_dimension sync_mode); | ||
138 | void edma_link(unsigned from, unsigned to); | ||
139 | void edma_unlink(unsigned from); | ||
140 | |||
141 | /* calls that operate on an entire parameter RAM slot */ | ||
142 | void edma_write_slot(unsigned slot, const struct edmacc_param *params); | ||
143 | void edma_read_slot(unsigned slot, struct edmacc_param *params); | ||
144 | |||
145 | /* channel control operations */ | ||
146 | int edma_start(unsigned channel); | ||
147 | void edma_stop(unsigned channel); | ||
148 | void edma_clean_channel(unsigned channel); | ||
149 | void edma_clear_event(unsigned channel); | ||
150 | void edma_pause(unsigned channel); | ||
151 | void edma_resume(unsigned channel); | ||
152 | |||
153 | void edma_assign_channel_eventq(unsigned channel, enum dma_event_q eventq_no); | ||
154 | |||
155 | struct edma_rsv_info { | 56 | struct edma_rsv_info { |
156 | 57 | ||
157 | const s16 (*rsv_chans)[2]; | 58 | const s16 (*rsv_chans)[2]; |
@@ -170,10 +71,11 @@ struct edma_soc_info { | |||
170 | /* Resource reservation for other cores */ | 71 | /* Resource reservation for other cores */ |
171 | struct edma_rsv_info *rsv; | 72 | struct edma_rsv_info *rsv; |
172 | 73 | ||
74 | /* List of channels allocated for memcpy, terminated with -1 */ | ||
75 | s16 *memcpy_channels; | ||
76 | |||
173 | s8 (*queue_priority_mapping)[2]; | 77 | s8 (*queue_priority_mapping)[2]; |
174 | const s16 (*xbar_chans)[2]; | 78 | const s16 (*xbar_chans)[2]; |
175 | }; | 79 | }; |
176 | 80 | ||
177 | int edma_trigger_channel(unsigned); | ||
178 | |||
179 | #endif | 81 | #endif |