diff options
-rw-r--r-- | Documentation/devicetree/bindings/soc/ti/keystone-navigator-dma.txt | 111 | ||||
-rw-r--r-- | Documentation/devicetree/bindings/soc/ti/keystone-navigator-qmss.txt | 232 | ||||
-rw-r--r-- | MAINTAINERS | 9 | ||||
-rw-r--r-- | drivers/Kconfig | 2 | ||||
-rw-r--r-- | drivers/soc/Kconfig | 1 | ||||
-rw-r--r-- | drivers/soc/Makefile | 1 | ||||
-rw-r--r-- | drivers/soc/ti/Kconfig | 31 | ||||
-rw-r--r-- | drivers/soc/ti/Makefile | 5 | ||||
-rw-r--r-- | drivers/soc/ti/knav_dma.c | 815 | ||||
-rw-r--r-- | drivers/soc/ti/knav_qmss.h | 386 | ||||
-rw-r--r-- | drivers/soc/ti/knav_qmss_acc.c | 591 | ||||
-rw-r--r-- | drivers/soc/ti/knav_qmss_queue.c | 1816 | ||||
-rw-r--r-- | include/linux/soc/ti/knav_dma.h | 175 | ||||
-rw-r--r-- | include/linux/soc/ti/knav_qmss.h | 90 |
14 files changed, 4265 insertions, 0 deletions
diff --git a/Documentation/devicetree/bindings/soc/ti/keystone-navigator-dma.txt b/Documentation/devicetree/bindings/soc/ti/keystone-navigator-dma.txt new file mode 100644 index 000000000000..337c4ea5c57b --- /dev/null +++ b/Documentation/devicetree/bindings/soc/ti/keystone-navigator-dma.txt | |||
@@ -0,0 +1,111 @@ | |||
1 | Keystone Navigator DMA Controller | ||
2 | |||
3 | This document explains the device tree bindings for the packet dma | ||
4 | on keystone devices. The Keystone Navigator DMA driver sets up the dma | ||
5 | channels and flows for the QMSS(Queue Manager SubSystem) who triggers | ||
6 | the actual data movements across clients using destination queues. Every | ||
7 | client modules like NETCP(Network Coprocessor), SRIO(Serial Rapid IO), | ||
8 | CRYPTO Engines etc has its own instance of dma hardware. QMSS has also | ||
9 | an internal packet DMA module which is used as an infrastructure DMA | ||
10 | with zero copy. | ||
11 | |||
12 | Navigator DMA cloud layout: | ||
13 | ------------------ | ||
14 | | Navigator DMAs | | ||
15 | ------------------ | ||
16 | | | ||
17 | |-> DMA instance #0 | ||
18 | | | ||
19 | |-> DMA instance #1 | ||
20 | . | ||
21 | . | ||
22 | | | ||
23 | |-> DMA instance #n | ||
24 | |||
25 | Navigator DMA properties: | ||
26 | Required properties: | ||
27 | - compatible: Should be "ti,keystone-navigator-dma" | ||
28 | - clocks: phandle to dma instances clocks. The clock handles can be as | ||
29 | many as the dma instances. The order should be maintained as per | ||
30 | the dma instances. | ||
31 | - ti,navigator-cloud-address: Should contain base address for the multi-core | ||
32 | navigator cloud and number of addresses depends on SOC integration | ||
33 | configuration.. Navigator cloud global address needs to be programmed | ||
34 | into DMA and the DMA uses it as the physical addresses to reach queue | ||
35 | managers. Note that these addresses though points to queue managers, | ||
36 | they are relevant only from DMA perspective. The QMSS may not choose to | ||
37 | use them since it has a different address space view to reach all | ||
38 | its components. | ||
39 | |||
40 | DMA instance properties: | ||
41 | Required properties: | ||
42 | - reg: Should contain register location and length of the following dma | ||
43 | register regions. Register regions should be specified in the following | ||
44 | order. | ||
45 | - Global control register region (global). | ||
46 | - Tx DMA channel configuration register region (txchan). | ||
47 | - Rx DMA channel configuration register region (rxchan). | ||
48 | - Tx DMA channel Scheduler configuration register region (txsched). | ||
49 | - Rx DMA flow configuration register region (rxflow). | ||
50 | |||
51 | Optional properties: | ||
52 | - reg-names: Names for the register regions. | ||
53 | - ti,enable-all: Enable all DMA channels vs clients opening specific channels | ||
54 | what they need. This property is useful for the userspace fast path | ||
55 | case where the linux drivers enables the channels used by userland | ||
56 | stack. | ||
57 | - ti,loop-back: To loopback Tx streaming I/F to Rx streaming I/F. Used for | ||
58 | infrastructure transfers. | ||
59 | - ti,rx-retry-timeout: Number of dma cycles to wait before retry on buffer | ||
60 | starvation. | ||
61 | |||
62 | Example: | ||
63 | |||
64 | knav_dmas: knav_dmas@0 { | ||
65 | compatible = "ti,keystone-navigator-dma"; | ||
66 | clocks = <&papllclk>, <&clkxge>; | ||
67 | #address-cells = <1>; | ||
68 | #size-cells = <1>; | ||
69 | ranges; | ||
70 | ti,navigator-cloud-address = <0x23a80000 0x23a90000 | ||
71 | 0x23aa0000 0x23ab0000>; | ||
72 | |||
73 | dma_gbe: dma_gbe@0 { | ||
74 | reg = <0x2004000 0x100>, | ||
75 | <0x2004400 0x120>, | ||
76 | <0x2004800 0x300>, | ||
77 | <0x2004c00 0x120>, | ||
78 | <0x2005000 0x400>; | ||
79 | reg-names = "global", "txchan", "rxchan", | ||
80 | "txsched", "rxflow"; | ||
81 | }; | ||
82 | |||
83 | dma_xgbe: dma_xgbe@0 { | ||
84 | reg = <0x2fa1000 0x100>, | ||
85 | <0x2fa1400 0x200>, | ||
86 | <0x2fa1800 0x200>, | ||
87 | <0x2fa1c00 0x200>, | ||
88 | <0x2fa2000 0x400>; | ||
89 | reg-names = "global", "txchan", "rxchan", | ||
90 | "txsched", "rxflow"; | ||
91 | }; | ||
92 | }; | ||
93 | |||
94 | Navigator DMA client: | ||
95 | Required properties: | ||
96 | - ti,navigator-dmas: List of one or more DMA specifiers, each consisting of | ||
97 | - A phandle pointing to DMA instance node | ||
98 | - A DMA channel number as a phandle arg. | ||
99 | - ti,navigator-dma-names: Contains dma channel name for each DMA specifier in | ||
100 | the 'ti,navigator-dmas' property. | ||
101 | |||
102 | Example: | ||
103 | |||
104 | netcp: netcp@2090000 { | ||
105 | .. | ||
106 | ti,navigator-dmas = <&dma_gbe 22>, | ||
107 | <&dma_gbe 23>, | ||
108 | <&dma_gbe 8>; | ||
109 | ti,navigator-dma-names = "netrx0", "netrx1", "nettx"; | ||
110 | .. | ||
111 | }; | ||
diff --git a/Documentation/devicetree/bindings/soc/ti/keystone-navigator-qmss.txt b/Documentation/devicetree/bindings/soc/ti/keystone-navigator-qmss.txt new file mode 100644 index 000000000000..d8e8cdb733f9 --- /dev/null +++ b/Documentation/devicetree/bindings/soc/ti/keystone-navigator-qmss.txt | |||
@@ -0,0 +1,232 @@ | |||
1 | * Texas Instruments Keystone Navigator Queue Management SubSystem driver | ||
2 | |||
3 | The QMSS (Queue Manager Sub System) found on Keystone SOCs is one of | ||
4 | the main hardware sub system which forms the backbone of the Keystone | ||
5 | multi-core Navigator. QMSS consist of queue managers, packed-data structure | ||
6 | processors(PDSP), linking RAM, descriptor pools and infrastructure | ||
7 | Packet DMA. | ||
8 | The Queue Manager is a hardware module that is responsible for accelerating | ||
9 | management of the packet queues. Packets are queued/de-queued by writing or | ||
10 | reading descriptor address to a particular memory mapped location. The PDSPs | ||
11 | perform QMSS related functions like accumulation, QoS, or event management. | ||
12 | Linking RAM registers are used to link the descriptors which are stored in | ||
13 | descriptor RAM. Descriptor RAM is configurable as internal or external memory. | ||
14 | The QMSS driver manages the PDSP setups, linking RAM regions, | ||
15 | queue pool management (allocation, push, pop and notify) and descriptor | ||
16 | pool management. | ||
17 | |||
18 | |||
19 | Required properties: | ||
20 | - compatible : Must be "ti,keystone-navigator-qmss"; | ||
21 | - clocks : phandle to the reference clock for this device. | ||
22 | - queue-range : <start number> total range of queue numbers for the device. | ||
23 | - linkram0 : <address size> for internal link ram, where size is the total | ||
24 | link ram entries. | ||
25 | - linkram1 : <address size> for external link ram, where size is the total | ||
26 | external link ram entries. If the address is specified as "0" | ||
27 | driver will allocate memory. | ||
28 | - qmgrs : child node describing the individual queue managers on the | ||
29 | SoC. On keystone 1 devices there should be only one node. | ||
30 | On keystone 2 devices there can be more than 1 node. | ||
31 | -- managed-queues : the actual queues managed by each queue manager | ||
32 | instance, specified as <"base queue #" "# of queues">. | ||
33 | -- reg : Address and size of the register set for the device. | ||
34 | Register regions should be specified in the following | ||
35 | order | ||
36 | - Queue Peek region. | ||
37 | - Queue status RAM. | ||
38 | - Queue configuration region. | ||
39 | - Descriptor memory setup region. | ||
40 | - Queue Management/Queue Proxy region for queue Push. | ||
41 | - Queue Management/Queue Proxy region for queue Pop. | ||
42 | - queue-pools : child node classifying the queue ranges into pools. | ||
43 | Queue ranges are grouped into 3 type of pools: | ||
44 | - qpend : pool of qpend(interruptible) queues | ||
45 | - general-purpose : pool of general queues, primarly used | ||
46 | as free descriptor queues or the | ||
47 | transmit DMA queues. | ||
48 | - accumulator : pool of queues on PDSP accumulator channel | ||
49 | Each range can have the following properties: | ||
50 | -- qrange : number of queues to use per queue range, specified as | ||
51 | <"base queue #" "# of queues">. | ||
52 | -- interrupts : Optional property to specify the interrupt mapping | ||
53 | for interruptible queues. The driver additionaly sets | ||
54 | the interrupt affinity hint based on the cpu mask. | ||
55 | -- qalloc-by-id : Optional property to specify that the queues in this | ||
56 | range can only be allocated by queue id. | ||
57 | -- accumulator : Accumulator channel specification. Any of the PDSPs in | ||
58 | QMSS can be loaded with the accumulator firmware. The | ||
59 | accumulator firmware’s job is to poll a select number of | ||
60 | queues looking for descriptors that have been pushed | ||
61 | into them. Descriptors are popped from the queue and | ||
62 | placed in a buffer provided by the host. When the list | ||
63 | becomes full or a programmed time period expires, the | ||
64 | accumulator triggers an interrupt to the host to read | ||
65 | the buffer for descriptor information. This firmware | ||
66 | comes in 16, 32, and 48 channel builds. Each of these | ||
67 | channels can be configured to monitor 32 contiguous | ||
68 | queues. Accumulator channel property is specified as: | ||
69 | <pdsp-id, channel, entries, pacing mode, latency> | ||
70 | pdsp-id : QMSS PDSP running accumulator firmware | ||
71 | on which the channel has to be | ||
72 | configured | ||
73 | channel : Accumulator channel number | ||
74 | entries : Size of the accumulator descriptor list | ||
75 | pacing mode : Interrupt pacing mode | ||
76 | 0 : None, i.e interrupt on list full only | ||
77 | 1 : Time delay since last interrupt | ||
78 | 2 : Time delay since first new packet | ||
79 | 3 : Time delay since last new packet | ||
80 | latency : time to delay the interrupt, specified | ||
81 | in microseconds. | ||
82 | -- multi-queue : Optional property to specify that the channel has to | ||
83 | monitor upto 32 queues starting at the base queue #. | ||
84 | - descriptor-regions : child node describing the memory regions for keystone | ||
85 | navigator packet DMA descriptors. The memory for | ||
86 | descriptors will be allocated by the driver. | ||
87 | -- id : region number in QMSS. | ||
88 | -- region-spec : specifies the number of descriptors in the | ||
89 | region, specified as | ||
90 | <"# of descriptors" "descriptor size">. | ||
91 | -- link-index : start index, i.e. index of the first | ||
92 | descriptor in the region. | ||
93 | |||
94 | Optional properties: | ||
95 | - dma-coherent : Present if DMA operations are coherent. | ||
96 | - pdsps : child node describing the PDSP configuration. | ||
97 | -- firmware : firmware to be loaded on the PDSP. | ||
98 | -- id : the qmss pdsp that will run the firmware. | ||
99 | -- reg : Address and size of the register set for the PDSP. | ||
100 | Register regions should be specified in the following | ||
101 | order | ||
102 | - PDSP internal RAM region. | ||
103 | - PDSP control/status region registers. | ||
104 | - QMSS interrupt distributor registers. | ||
105 | - PDSP command interface region. | ||
106 | |||
107 | Example: | ||
108 | |||
109 | qmss: qmss@2a40000 { | ||
110 | compatible = "ti,keystone-qmss"; | ||
111 | dma-coherent; | ||
112 | #address-cells = <1>; | ||
113 | #size-cells = <1>; | ||
114 | clocks = <&chipclk13>; | ||
115 | ranges; | ||
116 | queue-range = <0 0x4000>; | ||
117 | linkram0 = <0x100000 0x8000>; | ||
118 | linkram1 = <0x0 0x10000>; | ||
119 | |||
120 | qmgrs { | ||
121 | #address-cells = <1>; | ||
122 | #size-cells = <1>; | ||
123 | ranges; | ||
124 | qmgr0 { | ||
125 | managed-queues = <0 0x2000>; | ||
126 | reg = <0x2a40000 0x20000>, | ||
127 | <0x2a06000 0x400>, | ||
128 | <0x2a02000 0x1000>, | ||
129 | <0x2a03000 0x1000>, | ||
130 | <0x23a80000 0x20000>, | ||
131 | <0x2a80000 0x20000>; | ||
132 | }; | ||
133 | |||
134 | qmgr1 { | ||
135 | managed-queues = <0x2000 0x2000>; | ||
136 | reg = <0x2a60000 0x20000>, | ||
137 | <0x2a06400 0x400>, | ||
138 | <0x2a04000 0x1000>, | ||
139 | <0x2a05000 0x1000>, | ||
140 | <0x23aa0000 0x20000>, | ||
141 | <0x2aa0000 0x20000>; | ||
142 | }; | ||
143 | }; | ||
144 | queue-pools { | ||
145 | qpend { | ||
146 | qpend-0 { | ||
147 | qrange = <658 8>; | ||
148 | interrupts =<0 40 0xf04 0 41 0xf04 0 42 0xf04 | ||
149 | 0 43 0xf04 0 44 0xf04 0 45 0xf04 | ||
150 | 0 46 0xf04 0 47 0xf04>; | ||
151 | }; | ||
152 | qpend-1 { | ||
153 | qrange = <8704 16>; | ||
154 | interrupts = <0 48 0xf04 0 49 0xf04 0 50 0xf04 | ||
155 | 0 51 0xf04 0 52 0xf04 0 53 0xf04 | ||
156 | 0 54 0xf04 0 55 0xf04 0 56 0xf04 | ||
157 | 0 57 0xf04 0 58 0xf04 0 59 0xf04 | ||
158 | 0 60 0xf04 0 61 0xf04 0 62 0xf04 | ||
159 | 0 63 0xf04>; | ||
160 | qalloc-by-id; | ||
161 | }; | ||
162 | qpend-2 { | ||
163 | qrange = <8720 16>; | ||
164 | interrupts = <0 64 0xf04 0 65 0xf04 0 66 0xf04 | ||
165 | 0 59 0xf04 0 68 0xf04 0 69 0xf04 | ||
166 | 0 70 0xf04 0 71 0xf04 0 72 0xf04 | ||
167 | 0 73 0xf04 0 74 0xf04 0 75 0xf04 | ||
168 | 0 76 0xf04 0 77 0xf04 0 78 0xf04 | ||
169 | 0 79 0xf04>; | ||
170 | }; | ||
171 | }; | ||
172 | general-purpose { | ||
173 | gp-0 { | ||
174 | qrange = <4000 64>; | ||
175 | }; | ||
176 | netcp-tx { | ||
177 | qrange = <640 9>; | ||
178 | qalloc-by-id; | ||
179 | }; | ||
180 | }; | ||
181 | accumulator { | ||
182 | acc-0 { | ||
183 | qrange = <128 32>; | ||
184 | accumulator = <0 36 16 2 50>; | ||
185 | interrupts = <0 215 0xf01>; | ||
186 | multi-queue; | ||
187 | qalloc-by-id; | ||
188 | }; | ||
189 | acc-1 { | ||
190 | qrange = <160 32>; | ||
191 | accumulator = <0 37 16 2 50>; | ||
192 | interrupts = <0 216 0xf01>; | ||
193 | multi-queue; | ||
194 | }; | ||
195 | acc-2 { | ||
196 | qrange = <192 32>; | ||
197 | accumulator = <0 38 16 2 50>; | ||
198 | interrupts = <0 217 0xf01>; | ||
199 | multi-queue; | ||
200 | }; | ||
201 | acc-3 { | ||
202 | qrange = <224 32>; | ||
203 | accumulator = <0 39 16 2 50>; | ||
204 | interrupts = <0 218 0xf01>; | ||
205 | multi-queue; | ||
206 | }; | ||
207 | }; | ||
208 | }; | ||
209 | descriptor-regions { | ||
210 | #address-cells = <1>; | ||
211 | #size-cells = <1>; | ||
212 | ranges; | ||
213 | region-12 { | ||
214 | id = <12>; | ||
215 | region-spec = <8192 128>; /* num_desc desc_size */ | ||
216 | link-index = <0x4000>; | ||
217 | }; | ||
218 | }; | ||
219 | pdsps { | ||
220 | #address-cells = <1>; | ||
221 | #size-cells = <1>; | ||
222 | ranges; | ||
223 | pdsp0@0x2a10000 { | ||
224 | firmware = "keystone/qmss_pdsp_acc48_k2_le_1_0_0_8.fw"; | ||
225 | reg = <0x2a10000 0x1000>, | ||
226 | <0x2a0f000 0x100>, | ||
227 | <0x2a0c000 0x3c8>, | ||
228 | <0x2a20000 0x4000>; | ||
229 | id = <0>; | ||
230 | }; | ||
231 | }; | ||
232 | }; /* qmss */ | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 93e6a92a951d..08f6277a7df3 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -9138,6 +9138,15 @@ F: drivers/misc/tifm* | |||
9138 | F: drivers/mmc/host/tifm_sd.c | 9138 | F: drivers/mmc/host/tifm_sd.c |
9139 | F: include/linux/tifm.h | 9139 | F: include/linux/tifm.h |
9140 | 9140 | ||
9141 | TI KEYSTONE MULTICORE NAVIGATOR DRIVERS | ||
9142 | M: Santosh Shilimkar <santosh.shilimkar@ti.com> | ||
9143 | L: linux-kernel@vger.kernel.org | ||
9144 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | ||
9145 | S: Maintained | ||
9146 | F: drivers/soc/ti/* | ||
9147 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git | ||
9148 | |||
9149 | |||
9141 | TI LM49xxx FAMILY ASoC CODEC DRIVERS | 9150 | TI LM49xxx FAMILY ASoC CODEC DRIVERS |
9142 | M: M R Swami Reddy <mr.swami.reddy@ti.com> | 9151 | M: M R Swami Reddy <mr.swami.reddy@ti.com> |
9143 | M: Vishwas A Deshpande <vishwas.a.deshpande@ti.com> | 9152 | M: Vishwas A Deshpande <vishwas.a.deshpande@ti.com> |
diff --git a/drivers/Kconfig b/drivers/Kconfig index 622fa266b29e..1a693d3f9d51 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig | |||
@@ -148,6 +148,8 @@ source "drivers/remoteproc/Kconfig" | |||
148 | 148 | ||
149 | source "drivers/rpmsg/Kconfig" | 149 | source "drivers/rpmsg/Kconfig" |
150 | 150 | ||
151 | source "drivers/soc/Kconfig" | ||
152 | |||
151 | source "drivers/devfreq/Kconfig" | 153 | source "drivers/devfreq/Kconfig" |
152 | 154 | ||
153 | source "drivers/extcon/Kconfig" | 155 | source "drivers/extcon/Kconfig" |
diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig index c8543855aa82..49e3f0cc71af 100644 --- a/drivers/soc/Kconfig +++ b/drivers/soc/Kconfig | |||
@@ -1,5 +1,6 @@ | |||
1 | menu "SOC (System On Chip) specific Drivers" | 1 | menu "SOC (System On Chip) specific Drivers" |
2 | 2 | ||
3 | source "drivers/soc/qcom/Kconfig" | 3 | source "drivers/soc/qcom/Kconfig" |
4 | source "drivers/soc/ti/Kconfig" | ||
4 | 5 | ||
5 | endmenu | 6 | endmenu |
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile index 3b1b95d932d1..0d6e35dfea8c 100644 --- a/drivers/soc/Makefile +++ b/drivers/soc/Makefile | |||
@@ -4,3 +4,4 @@ | |||
4 | 4 | ||
5 | obj-$(CONFIG_ARCH_QCOM) += qcom/ | 5 | obj-$(CONFIG_ARCH_QCOM) += qcom/ |
6 | obj-$(CONFIG_ARCH_TEGRA) += tegra/ | 6 | obj-$(CONFIG_ARCH_TEGRA) += tegra/ |
7 | obj-$(CONFIG_SOC_TI) += ti/ | ||
diff --git a/drivers/soc/ti/Kconfig b/drivers/soc/ti/Kconfig new file mode 100644 index 000000000000..7266b2165183 --- /dev/null +++ b/drivers/soc/ti/Kconfig | |||
@@ -0,0 +1,31 @@ | |||
1 | # | ||
2 | # TI SOC drivers | ||
3 | # | ||
4 | menuconfig SOC_TI | ||
5 | bool "TI SOC drivers support" | ||
6 | |||
7 | if SOC_TI | ||
8 | |||
9 | config KEYSTONE_NAVIGATOR_QMSS | ||
10 | tristate "Keystone Queue Manager Sub System" | ||
11 | depends on ARCH_KEYSTONE | ||
12 | help | ||
13 | Say y here to support the Keystone multicore Navigator Queue | ||
14 | Manager support. The Queue Manager is a hardware module that | ||
15 | is responsible for accelerating management of the packet queues. | ||
16 | Packets are queued/de-queued by writing/reading descriptor address | ||
17 | to a particular memory mapped location in the Queue Manager module. | ||
18 | |||
19 | If unsure, say N. | ||
20 | |||
21 | config KEYSTONE_NAVIGATOR_DMA | ||
22 | tristate "TI Keystone Navigator Packet DMA support" | ||
23 | depends on ARCH_KEYSTONE | ||
24 | help | ||
25 | Say y tp enable support for the Keystone Navigator Packet DMA on | ||
26 | on Keystone family of devices. It sets up the dma channels for the | ||
27 | Queue Manager Sub System. | ||
28 | |||
29 | If unsure, say N. | ||
30 | |||
31 | endif # SOC_TI | ||
diff --git a/drivers/soc/ti/Makefile b/drivers/soc/ti/Makefile new file mode 100644 index 000000000000..6bed611e1934 --- /dev/null +++ b/drivers/soc/ti/Makefile | |||
@@ -0,0 +1,5 @@ | |||
1 | # | ||
2 | # TI Keystone SOC drivers | ||
3 | # | ||
4 | obj-$(CONFIG_KEYSTONE_NAVIGATOR_QMSS) += knav_qmss_queue.o knav_qmss_acc.o | ||
5 | obj-$(CONFIG_KEYSTONE_NAVIGATOR_DMA) += knav_dma.o | ||
diff --git a/drivers/soc/ti/knav_dma.c b/drivers/soc/ti/knav_dma.c new file mode 100644 index 000000000000..17264275f32b --- /dev/null +++ b/drivers/soc/ti/knav_dma.c | |||
@@ -0,0 +1,815 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2014 Texas Instruments Incorporated | ||
3 | * Authors: Santosh Shilimkar <santosh.shilimkar@ti.com> | ||
4 | * Sandeep Nair <sandeep_n@ti.com> | ||
5 | * Cyril Chemparathy <cyril@ti.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License as | ||
9 | * published by the Free Software Foundation version 2. | ||
10 | * | ||
11 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any | ||
12 | * kind, whether express or implied; without even the implied warranty | ||
13 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | */ | ||
16 | |||
17 | #include <linux/io.h> | ||
18 | #include <linux/sched.h> | ||
19 | #include <linux/module.h> | ||
20 | #include <linux/dma-direction.h> | ||
21 | #include <linux/interrupt.h> | ||
22 | #include <linux/pm_runtime.h> | ||
23 | #include <linux/of_dma.h> | ||
24 | #include <linux/of_address.h> | ||
25 | #include <linux/platform_device.h> | ||
26 | #include <linux/soc/ti/knav_dma.h> | ||
27 | #include <linux/debugfs.h> | ||
28 | #include <linux/seq_file.h> | ||
29 | |||
30 | #define REG_MASK 0xffffffff | ||
31 | |||
32 | #define DMA_LOOPBACK BIT(31) | ||
33 | #define DMA_ENABLE BIT(31) | ||
34 | #define DMA_TEARDOWN BIT(30) | ||
35 | |||
36 | #define DMA_TX_FILT_PSWORDS BIT(29) | ||
37 | #define DMA_TX_FILT_EINFO BIT(30) | ||
38 | #define DMA_TX_PRIO_SHIFT 0 | ||
39 | #define DMA_RX_PRIO_SHIFT 16 | ||
40 | #define DMA_PRIO_MASK GENMASK(3, 0) | ||
41 | #define DMA_PRIO_DEFAULT 0 | ||
42 | #define DMA_RX_TIMEOUT_DEFAULT 17500 /* cycles */ | ||
43 | #define DMA_RX_TIMEOUT_MASK GENMASK(16, 0) | ||
44 | #define DMA_RX_TIMEOUT_SHIFT 0 | ||
45 | |||
46 | #define CHAN_HAS_EPIB BIT(30) | ||
47 | #define CHAN_HAS_PSINFO BIT(29) | ||
48 | #define CHAN_ERR_RETRY BIT(28) | ||
49 | #define CHAN_PSINFO_AT_SOP BIT(25) | ||
50 | #define CHAN_SOP_OFF_SHIFT 16 | ||
51 | #define CHAN_SOP_OFF_MASK GENMASK(9, 0) | ||
52 | #define DESC_TYPE_SHIFT 26 | ||
53 | #define DESC_TYPE_MASK GENMASK(2, 0) | ||
54 | |||
55 | /* | ||
56 | * QMGR & QNUM together make up 14 bits with QMGR as the 2 MSb's in the logical | ||
57 | * navigator cloud mapping scheme. | ||
58 | * using the 14bit physical queue numbers directly maps into this scheme. | ||
59 | */ | ||
60 | #define CHAN_QNUM_MASK GENMASK(14, 0) | ||
61 | #define DMA_MAX_QMS 4 | ||
62 | #define DMA_TIMEOUT 1 /* msecs */ | ||
63 | #define DMA_INVALID_ID 0xffff | ||
64 | |||
65 | struct reg_global { | ||
66 | u32 revision; | ||
67 | u32 perf_control; | ||
68 | u32 emulation_control; | ||
69 | u32 priority_control; | ||
70 | u32 qm_base_address[DMA_MAX_QMS]; | ||
71 | }; | ||
72 | |||
73 | struct reg_chan { | ||
74 | u32 control; | ||
75 | u32 mode; | ||
76 | u32 __rsvd[6]; | ||
77 | }; | ||
78 | |||
79 | struct reg_tx_sched { | ||
80 | u32 prio; | ||
81 | }; | ||
82 | |||
83 | struct reg_rx_flow { | ||
84 | u32 control; | ||
85 | u32 tags; | ||
86 | u32 tag_sel; | ||
87 | u32 fdq_sel[2]; | ||
88 | u32 thresh[3]; | ||
89 | }; | ||
90 | |||
91 | struct knav_dma_pool_device { | ||
92 | struct device *dev; | ||
93 | struct list_head list; | ||
94 | }; | ||
95 | |||
96 | struct knav_dma_device { | ||
97 | bool loopback, enable_all; | ||
98 | unsigned tx_priority, rx_priority, rx_timeout; | ||
99 | unsigned logical_queue_managers; | ||
100 | unsigned qm_base_address[DMA_MAX_QMS]; | ||
101 | struct reg_global __iomem *reg_global; | ||
102 | struct reg_chan __iomem *reg_tx_chan; | ||
103 | struct reg_rx_flow __iomem *reg_rx_flow; | ||
104 | struct reg_chan __iomem *reg_rx_chan; | ||
105 | struct reg_tx_sched __iomem *reg_tx_sched; | ||
106 | unsigned max_rx_chan, max_tx_chan; | ||
107 | unsigned max_rx_flow; | ||
108 | char name[32]; | ||
109 | atomic_t ref_count; | ||
110 | struct list_head list; | ||
111 | struct list_head chan_list; | ||
112 | spinlock_t lock; | ||
113 | }; | ||
114 | |||
115 | struct knav_dma_chan { | ||
116 | enum dma_transfer_direction direction; | ||
117 | struct knav_dma_device *dma; | ||
118 | atomic_t ref_count; | ||
119 | |||
120 | /* registers */ | ||
121 | struct reg_chan __iomem *reg_chan; | ||
122 | struct reg_tx_sched __iomem *reg_tx_sched; | ||
123 | struct reg_rx_flow __iomem *reg_rx_flow; | ||
124 | |||
125 | /* configuration stuff */ | ||
126 | unsigned channel, flow; | ||
127 | struct knav_dma_cfg cfg; | ||
128 | struct list_head list; | ||
129 | spinlock_t lock; | ||
130 | }; | ||
131 | |||
132 | #define chan_number(ch) ((ch->direction == DMA_MEM_TO_DEV) ? \ | ||
133 | ch->channel : ch->flow) | ||
134 | |||
135 | static struct knav_dma_pool_device *kdev; | ||
136 | |||
137 | static bool check_config(struct knav_dma_chan *chan, struct knav_dma_cfg *cfg) | ||
138 | { | ||
139 | if (!memcmp(&chan->cfg, cfg, sizeof(*cfg))) | ||
140 | return true; | ||
141 | else | ||
142 | return false; | ||
143 | } | ||
144 | |||
145 | static int chan_start(struct knav_dma_chan *chan, | ||
146 | struct knav_dma_cfg *cfg) | ||
147 | { | ||
148 | u32 v = 0; | ||
149 | |||
150 | spin_lock(&chan->lock); | ||
151 | if ((chan->direction == DMA_MEM_TO_DEV) && chan->reg_chan) { | ||
152 | if (cfg->u.tx.filt_pswords) | ||
153 | v |= DMA_TX_FILT_PSWORDS; | ||
154 | if (cfg->u.tx.filt_einfo) | ||
155 | v |= DMA_TX_FILT_EINFO; | ||
156 | writel_relaxed(v, &chan->reg_chan->mode); | ||
157 | writel_relaxed(DMA_ENABLE, &chan->reg_chan->control); | ||
158 | } | ||
159 | |||
160 | if (chan->reg_tx_sched) | ||
161 | writel_relaxed(cfg->u.tx.priority, &chan->reg_tx_sched->prio); | ||
162 | |||
163 | if (chan->reg_rx_flow) { | ||
164 | v = 0; | ||
165 | |||
166 | if (cfg->u.rx.einfo_present) | ||
167 | v |= CHAN_HAS_EPIB; | ||
168 | if (cfg->u.rx.psinfo_present) | ||
169 | v |= CHAN_HAS_PSINFO; | ||
170 | if (cfg->u.rx.err_mode == DMA_RETRY) | ||
171 | v |= CHAN_ERR_RETRY; | ||
172 | v |= (cfg->u.rx.desc_type & DESC_TYPE_MASK) << DESC_TYPE_SHIFT; | ||
173 | if (cfg->u.rx.psinfo_at_sop) | ||
174 | v |= CHAN_PSINFO_AT_SOP; | ||
175 | v |= (cfg->u.rx.sop_offset & CHAN_SOP_OFF_MASK) | ||
176 | << CHAN_SOP_OFF_SHIFT; | ||
177 | v |= cfg->u.rx.dst_q & CHAN_QNUM_MASK; | ||
178 | |||
179 | writel_relaxed(v, &chan->reg_rx_flow->control); | ||
180 | writel_relaxed(0, &chan->reg_rx_flow->tags); | ||
181 | writel_relaxed(0, &chan->reg_rx_flow->tag_sel); | ||
182 | |||
183 | v = cfg->u.rx.fdq[0] << 16; | ||
184 | v |= cfg->u.rx.fdq[1] & CHAN_QNUM_MASK; | ||
185 | writel_relaxed(v, &chan->reg_rx_flow->fdq_sel[0]); | ||
186 | |||
187 | v = cfg->u.rx.fdq[2] << 16; | ||
188 | v |= cfg->u.rx.fdq[3] & CHAN_QNUM_MASK; | ||
189 | writel_relaxed(v, &chan->reg_rx_flow->fdq_sel[1]); | ||
190 | |||
191 | writel_relaxed(0, &chan->reg_rx_flow->thresh[0]); | ||
192 | writel_relaxed(0, &chan->reg_rx_flow->thresh[1]); | ||
193 | writel_relaxed(0, &chan->reg_rx_flow->thresh[2]); | ||
194 | } | ||
195 | |||
196 | /* Keep a copy of the cfg */ | ||
197 | memcpy(&chan->cfg, cfg, sizeof(*cfg)); | ||
198 | spin_unlock(&chan->lock); | ||
199 | |||
200 | return 0; | ||
201 | } | ||
202 | |||
203 | static int chan_teardown(struct knav_dma_chan *chan) | ||
204 | { | ||
205 | unsigned long end, value; | ||
206 | |||
207 | if (!chan->reg_chan) | ||
208 | return 0; | ||
209 | |||
210 | /* indicate teardown */ | ||
211 | writel_relaxed(DMA_TEARDOWN, &chan->reg_chan->control); | ||
212 | |||
213 | /* wait for the dma to shut itself down */ | ||
214 | end = jiffies + msecs_to_jiffies(DMA_TIMEOUT); | ||
215 | do { | ||
216 | value = readl_relaxed(&chan->reg_chan->control); | ||
217 | if ((value & DMA_ENABLE) == 0) | ||
218 | break; | ||
219 | } while (time_after(end, jiffies)); | ||
220 | |||
221 | if (readl_relaxed(&chan->reg_chan->control) & DMA_ENABLE) { | ||
222 | dev_err(kdev->dev, "timeout waiting for teardown\n"); | ||
223 | return -ETIMEDOUT; | ||
224 | } | ||
225 | |||
226 | return 0; | ||
227 | } | ||
228 | |||
229 | static void chan_stop(struct knav_dma_chan *chan) | ||
230 | { | ||
231 | spin_lock(&chan->lock); | ||
232 | if (chan->reg_rx_flow) { | ||
233 | /* first detach fdqs, starve out the flow */ | ||
234 | writel_relaxed(0, &chan->reg_rx_flow->fdq_sel[0]); | ||
235 | writel_relaxed(0, &chan->reg_rx_flow->fdq_sel[1]); | ||
236 | writel_relaxed(0, &chan->reg_rx_flow->thresh[0]); | ||
237 | writel_relaxed(0, &chan->reg_rx_flow->thresh[1]); | ||
238 | writel_relaxed(0, &chan->reg_rx_flow->thresh[2]); | ||
239 | } | ||
240 | |||
241 | /* teardown the dma channel */ | ||
242 | chan_teardown(chan); | ||
243 | |||
244 | /* then disconnect the completion side */ | ||
245 | if (chan->reg_rx_flow) { | ||
246 | writel_relaxed(0, &chan->reg_rx_flow->control); | ||
247 | writel_relaxed(0, &chan->reg_rx_flow->tags); | ||
248 | writel_relaxed(0, &chan->reg_rx_flow->tag_sel); | ||
249 | } | ||
250 | |||
251 | memset(&chan->cfg, 0, sizeof(struct knav_dma_cfg)); | ||
252 | spin_unlock(&chan->lock); | ||
253 | |||
254 | dev_dbg(kdev->dev, "channel stopped\n"); | ||
255 | } | ||
256 | |||
257 | static void dma_hw_enable_all(struct knav_dma_device *dma) | ||
258 | { | ||
259 | int i; | ||
260 | |||
261 | for (i = 0; i < dma->max_tx_chan; i++) { | ||
262 | writel_relaxed(0, &dma->reg_tx_chan[i].mode); | ||
263 | writel_relaxed(DMA_ENABLE, &dma->reg_tx_chan[i].control); | ||
264 | } | ||
265 | } | ||
266 | |||
267 | |||
268 | static void knav_dma_hw_init(struct knav_dma_device *dma) | ||
269 | { | ||
270 | unsigned v; | ||
271 | int i; | ||
272 | |||
273 | spin_lock(&dma->lock); | ||
274 | v = dma->loopback ? DMA_LOOPBACK : 0; | ||
275 | writel_relaxed(v, &dma->reg_global->emulation_control); | ||
276 | |||
277 | v = readl_relaxed(&dma->reg_global->perf_control); | ||
278 | v |= ((dma->rx_timeout & DMA_RX_TIMEOUT_MASK) << DMA_RX_TIMEOUT_SHIFT); | ||
279 | writel_relaxed(v, &dma->reg_global->perf_control); | ||
280 | |||
281 | v = ((dma->tx_priority << DMA_TX_PRIO_SHIFT) | | ||
282 | (dma->rx_priority << DMA_RX_PRIO_SHIFT)); | ||
283 | |||
284 | writel_relaxed(v, &dma->reg_global->priority_control); | ||
285 | |||
286 | /* Always enable all Rx channels. Rx paths are managed using flows */ | ||
287 | for (i = 0; i < dma->max_rx_chan; i++) | ||
288 | writel_relaxed(DMA_ENABLE, &dma->reg_rx_chan[i].control); | ||
289 | |||
290 | for (i = 0; i < dma->logical_queue_managers; i++) | ||
291 | writel_relaxed(dma->qm_base_address[i], | ||
292 | &dma->reg_global->qm_base_address[i]); | ||
293 | spin_unlock(&dma->lock); | ||
294 | } | ||
295 | |||
296 | static void knav_dma_hw_destroy(struct knav_dma_device *dma) | ||
297 | { | ||
298 | int i; | ||
299 | unsigned v; | ||
300 | |||
301 | spin_lock(&dma->lock); | ||
302 | v = ~DMA_ENABLE & REG_MASK; | ||
303 | |||
304 | for (i = 0; i < dma->max_rx_chan; i++) | ||
305 | writel_relaxed(v, &dma->reg_rx_chan[i].control); | ||
306 | |||
307 | for (i = 0; i < dma->max_tx_chan; i++) | ||
308 | writel_relaxed(v, &dma->reg_tx_chan[i].control); | ||
309 | spin_unlock(&dma->lock); | ||
310 | } | ||
311 | |||
312 | static void dma_debug_show_channels(struct seq_file *s, | ||
313 | struct knav_dma_chan *chan) | ||
314 | { | ||
315 | int i; | ||
316 | |||
317 | seq_printf(s, "\t%s %d:\t", | ||
318 | ((chan->direction == DMA_MEM_TO_DEV) ? "tx chan" : "rx flow"), | ||
319 | chan_number(chan)); | ||
320 | |||
321 | if (chan->direction == DMA_MEM_TO_DEV) { | ||
322 | seq_printf(s, "einfo - %d, pswords - %d, priority - %d\n", | ||
323 | chan->cfg.u.tx.filt_einfo, | ||
324 | chan->cfg.u.tx.filt_pswords, | ||
325 | chan->cfg.u.tx.priority); | ||
326 | } else { | ||
327 | seq_printf(s, "einfo - %d, psinfo - %d, desc_type - %d\n", | ||
328 | chan->cfg.u.rx.einfo_present, | ||
329 | chan->cfg.u.rx.psinfo_present, | ||
330 | chan->cfg.u.rx.desc_type); | ||
331 | seq_printf(s, "\t\t\tdst_q: [%d], thresh: %d fdq: ", | ||
332 | chan->cfg.u.rx.dst_q, | ||
333 | chan->cfg.u.rx.thresh); | ||
334 | for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN; i++) | ||
335 | seq_printf(s, "[%d]", chan->cfg.u.rx.fdq[i]); | ||
336 | seq_printf(s, "\n"); | ||
337 | } | ||
338 | } | ||
339 | |||
340 | static void dma_debug_show_devices(struct seq_file *s, | ||
341 | struct knav_dma_device *dma) | ||
342 | { | ||
343 | struct knav_dma_chan *chan; | ||
344 | |||
345 | list_for_each_entry(chan, &dma->chan_list, list) { | ||
346 | if (atomic_read(&chan->ref_count)) | ||
347 | dma_debug_show_channels(s, chan); | ||
348 | } | ||
349 | } | ||
350 | |||
351 | static int dma_debug_show(struct seq_file *s, void *v) | ||
352 | { | ||
353 | struct knav_dma_device *dma; | ||
354 | |||
355 | list_for_each_entry(dma, &kdev->list, list) { | ||
356 | if (atomic_read(&dma->ref_count)) { | ||
357 | seq_printf(s, "%s : max_tx_chan: (%d), max_rx_flows: (%d)\n", | ||
358 | dma->name, dma->max_tx_chan, dma->max_rx_flow); | ||
359 | dma_debug_show_devices(s, dma); | ||
360 | } | ||
361 | } | ||
362 | |||
363 | return 0; | ||
364 | } | ||
365 | |||
366 | static int knav_dma_debug_open(struct inode *inode, struct file *file) | ||
367 | { | ||
368 | return single_open(file, dma_debug_show, NULL); | ||
369 | } | ||
370 | |||
371 | static const struct file_operations knav_dma_debug_ops = { | ||
372 | .open = knav_dma_debug_open, | ||
373 | .read = seq_read, | ||
374 | .llseek = seq_lseek, | ||
375 | .release = single_release, | ||
376 | }; | ||
377 | |||
378 | static int of_channel_match_helper(struct device_node *np, const char *name, | ||
379 | const char **dma_instance) | ||
380 | { | ||
381 | struct of_phandle_args args; | ||
382 | struct device_node *dma_node; | ||
383 | int index; | ||
384 | |||
385 | dma_node = of_parse_phandle(np, "ti,navigator-dmas", 0); | ||
386 | if (!dma_node) | ||
387 | return -ENODEV; | ||
388 | |||
389 | *dma_instance = dma_node->name; | ||
390 | index = of_property_match_string(np, "ti,navigator-dma-names", name); | ||
391 | if (index < 0) { | ||
392 | dev_err(kdev->dev, "No 'ti,navigator-dma-names' propery\n"); | ||
393 | return -ENODEV; | ||
394 | } | ||
395 | |||
396 | if (of_parse_phandle_with_fixed_args(np, "ti,navigator-dmas", | ||
397 | 1, index, &args)) { | ||
398 | dev_err(kdev->dev, "Missing the pahndle args name %s\n", name); | ||
399 | return -ENODEV; | ||
400 | } | ||
401 | |||
402 | if (args.args[0] < 0) { | ||
403 | dev_err(kdev->dev, "Missing args for %s\n", name); | ||
404 | return -ENODEV; | ||
405 | } | ||
406 | |||
407 | return args.args[0]; | ||
408 | } | ||
409 | |||
410 | /** | ||
411 | * knav_dma_open_channel() - try to setup an exclusive slave channel | ||
412 | * @dev: pointer to client device structure | ||
413 | * @name: slave channel name | ||
414 | * @config: dma configuration parameters | ||
415 | * | ||
416 | * Returns pointer to appropriate DMA channel on success or NULL. | ||
417 | */ | ||
418 | void *knav_dma_open_channel(struct device *dev, const char *name, | ||
419 | struct knav_dma_cfg *config) | ||
420 | { | ||
421 | struct knav_dma_chan *chan; | ||
422 | struct knav_dma_device *dma; | ||
423 | bool found = false; | ||
424 | int chan_num = -1; | ||
425 | const char *instance; | ||
426 | |||
427 | if (!kdev) { | ||
428 | pr_err("keystone-navigator-dma driver not registered\n"); | ||
429 | return (void *)-EINVAL; | ||
430 | } | ||
431 | |||
432 | chan_num = of_channel_match_helper(dev->of_node, name, &instance); | ||
433 | if (chan_num < 0) { | ||
434 | dev_err(kdev->dev, "No DMA instace with name %s\n", name); | ||
435 | return (void *)-EINVAL; | ||
436 | } | ||
437 | |||
438 | dev_dbg(kdev->dev, "initializing %s channel %d from DMA %s\n", | ||
439 | config->direction == DMA_MEM_TO_DEV ? "transmit" : | ||
440 | config->direction == DMA_DEV_TO_MEM ? "receive" : | ||
441 | "unknown", chan_num, instance); | ||
442 | |||
443 | if (config->direction != DMA_MEM_TO_DEV && | ||
444 | config->direction != DMA_DEV_TO_MEM) { | ||
445 | dev_err(kdev->dev, "bad direction\n"); | ||
446 | return (void *)-EINVAL; | ||
447 | } | ||
448 | |||
449 | /* Look for correct dma instance */ | ||
450 | list_for_each_entry(dma, &kdev->list, list) { | ||
451 | if (!strcmp(dma->name, instance)) { | ||
452 | found = true; | ||
453 | break; | ||
454 | } | ||
455 | } | ||
456 | if (!found) { | ||
457 | dev_err(kdev->dev, "No DMA instace with name %s\n", instance); | ||
458 | return (void *)-EINVAL; | ||
459 | } | ||
460 | |||
461 | /* Look for correct dma channel from dma instance */ | ||
462 | found = false; | ||
463 | list_for_each_entry(chan, &dma->chan_list, list) { | ||
464 | if (config->direction == DMA_MEM_TO_DEV) { | ||
465 | if (chan->channel == chan_num) { | ||
466 | found = true; | ||
467 | break; | ||
468 | } | ||
469 | } else { | ||
470 | if (chan->flow == chan_num) { | ||
471 | found = true; | ||
472 | break; | ||
473 | } | ||
474 | } | ||
475 | } | ||
476 | if (!found) { | ||
477 | dev_err(kdev->dev, "channel %d is not in DMA %s\n", | ||
478 | chan_num, instance); | ||
479 | return (void *)-EINVAL; | ||
480 | } | ||
481 | |||
482 | if (atomic_read(&chan->ref_count) >= 1) { | ||
483 | if (!check_config(chan, config)) { | ||
484 | dev_err(kdev->dev, "channel %d config miss-match\n", | ||
485 | chan_num); | ||
486 | return (void *)-EINVAL; | ||
487 | } | ||
488 | } | ||
489 | |||
490 | if (atomic_inc_return(&chan->dma->ref_count) <= 1) | ||
491 | knav_dma_hw_init(chan->dma); | ||
492 | |||
493 | if (atomic_inc_return(&chan->ref_count) <= 1) | ||
494 | chan_start(chan, config); | ||
495 | |||
496 | dev_dbg(kdev->dev, "channel %d opened from DMA %s\n", | ||
497 | chan_num, instance); | ||
498 | |||
499 | return chan; | ||
500 | } | ||
501 | EXPORT_SYMBOL_GPL(knav_dma_open_channel); | ||
502 | |||
503 | /** | ||
504 | * knav_dma_close_channel() - Destroy a dma channel | ||
505 | * | ||
506 | * channel: dma channel handle | ||
507 | * | ||
508 | */ | ||
509 | void knav_dma_close_channel(void *channel) | ||
510 | { | ||
511 | struct knav_dma_chan *chan = channel; | ||
512 | |||
513 | if (!kdev) { | ||
514 | pr_err("keystone-navigator-dma driver not registered\n"); | ||
515 | return; | ||
516 | } | ||
517 | |||
518 | if (atomic_dec_return(&chan->ref_count) <= 0) | ||
519 | chan_stop(chan); | ||
520 | |||
521 | if (atomic_dec_return(&chan->dma->ref_count) <= 0) | ||
522 | knav_dma_hw_destroy(chan->dma); | ||
523 | |||
524 | dev_dbg(kdev->dev, "channel %d or flow %d closed from DMA %s\n", | ||
525 | chan->channel, chan->flow, chan->dma->name); | ||
526 | } | ||
527 | EXPORT_SYMBOL_GPL(knav_dma_close_channel); | ||
528 | |||
529 | static void __iomem *pktdma_get_regs(struct knav_dma_device *dma, | ||
530 | struct device_node *node, | ||
531 | unsigned index, resource_size_t *_size) | ||
532 | { | ||
533 | struct device *dev = kdev->dev; | ||
534 | struct resource res; | ||
535 | void __iomem *regs; | ||
536 | int ret; | ||
537 | |||
538 | ret = of_address_to_resource(node, index, &res); | ||
539 | if (ret) { | ||
540 | dev_err(dev, "Can't translate of node(%s) address for index(%d)\n", | ||
541 | node->name, index); | ||
542 | return ERR_PTR(ret); | ||
543 | } | ||
544 | |||
545 | regs = devm_ioremap_resource(kdev->dev, &res); | ||
546 | if (IS_ERR(regs)) | ||
547 | dev_err(dev, "Failed to map register base for index(%d) node(%s)\n", | ||
548 | index, node->name); | ||
549 | if (_size) | ||
550 | *_size = resource_size(&res); | ||
551 | |||
552 | return regs; | ||
553 | } | ||
554 | |||
555 | static int pktdma_init_rx_chan(struct knav_dma_chan *chan, u32 flow) | ||
556 | { | ||
557 | struct knav_dma_device *dma = chan->dma; | ||
558 | |||
559 | chan->flow = flow; | ||
560 | chan->reg_rx_flow = dma->reg_rx_flow + flow; | ||
561 | chan->channel = DMA_INVALID_ID; | ||
562 | dev_dbg(kdev->dev, "rx flow(%d) (%p)\n", chan->flow, chan->reg_rx_flow); | ||
563 | |||
564 | return 0; | ||
565 | } | ||
566 | |||
567 | static int pktdma_init_tx_chan(struct knav_dma_chan *chan, u32 channel) | ||
568 | { | ||
569 | struct knav_dma_device *dma = chan->dma; | ||
570 | |||
571 | chan->channel = channel; | ||
572 | chan->reg_chan = dma->reg_tx_chan + channel; | ||
573 | chan->reg_tx_sched = dma->reg_tx_sched + channel; | ||
574 | chan->flow = DMA_INVALID_ID; | ||
575 | dev_dbg(kdev->dev, "tx channel(%d) (%p)\n", chan->channel, chan->reg_chan); | ||
576 | |||
577 | return 0; | ||
578 | } | ||
579 | |||
580 | static int pktdma_init_chan(struct knav_dma_device *dma, | ||
581 | enum dma_transfer_direction dir, | ||
582 | unsigned chan_num) | ||
583 | { | ||
584 | struct device *dev = kdev->dev; | ||
585 | struct knav_dma_chan *chan; | ||
586 | int ret = -EINVAL; | ||
587 | |||
588 | chan = devm_kzalloc(dev, sizeof(*chan), GFP_KERNEL); | ||
589 | if (!chan) | ||
590 | return -ENOMEM; | ||
591 | |||
592 | INIT_LIST_HEAD(&chan->list); | ||
593 | chan->dma = dma; | ||
594 | chan->direction = DMA_NONE; | ||
595 | atomic_set(&chan->ref_count, 0); | ||
596 | spin_lock_init(&chan->lock); | ||
597 | |||
598 | if (dir == DMA_MEM_TO_DEV) { | ||
599 | chan->direction = dir; | ||
600 | ret = pktdma_init_tx_chan(chan, chan_num); | ||
601 | } else if (dir == DMA_DEV_TO_MEM) { | ||
602 | chan->direction = dir; | ||
603 | ret = pktdma_init_rx_chan(chan, chan_num); | ||
604 | } else { | ||
605 | dev_err(dev, "channel(%d) direction unknown\n", chan_num); | ||
606 | } | ||
607 | |||
608 | list_add_tail(&chan->list, &dma->chan_list); | ||
609 | |||
610 | return ret; | ||
611 | } | ||
612 | |||
613 | static int dma_init(struct device_node *cloud, struct device_node *dma_node) | ||
614 | { | ||
615 | unsigned max_tx_chan, max_rx_chan, max_rx_flow, max_tx_sched; | ||
616 | struct device_node *node = dma_node; | ||
617 | struct knav_dma_device *dma; | ||
618 | int ret, len, num_chan = 0; | ||
619 | resource_size_t size; | ||
620 | u32 timeout; | ||
621 | u32 i; | ||
622 | |||
623 | dma = devm_kzalloc(kdev->dev, sizeof(*dma), GFP_KERNEL); | ||
624 | if (!dma) { | ||
625 | dev_err(kdev->dev, "could not allocate driver mem\n"); | ||
626 | return -ENOMEM; | ||
627 | } | ||
628 | INIT_LIST_HEAD(&dma->list); | ||
629 | INIT_LIST_HEAD(&dma->chan_list); | ||
630 | |||
631 | if (!of_find_property(cloud, "ti,navigator-cloud-address", &len)) { | ||
632 | dev_err(kdev->dev, "unspecified navigator cloud addresses\n"); | ||
633 | return -ENODEV; | ||
634 | } | ||
635 | |||
636 | dma->logical_queue_managers = len / sizeof(u32); | ||
637 | if (dma->logical_queue_managers > DMA_MAX_QMS) { | ||
638 | dev_warn(kdev->dev, "too many queue mgrs(>%d) rest ignored\n", | ||
639 | dma->logical_queue_managers); | ||
640 | dma->logical_queue_managers = DMA_MAX_QMS; | ||
641 | } | ||
642 | |||
643 | ret = of_property_read_u32_array(cloud, "ti,navigator-cloud-address", | ||
644 | dma->qm_base_address, | ||
645 | dma->logical_queue_managers); | ||
646 | if (ret) { | ||
647 | dev_err(kdev->dev, "invalid navigator cloud addresses\n"); | ||
648 | return -ENODEV; | ||
649 | } | ||
650 | |||
651 | dma->reg_global = pktdma_get_regs(dma, node, 0, &size); | ||
652 | if (!dma->reg_global) | ||
653 | return -ENODEV; | ||
654 | if (size < sizeof(struct reg_global)) { | ||
655 | dev_err(kdev->dev, "bad size %pa for global regs\n", &size); | ||
656 | return -ENODEV; | ||
657 | } | ||
658 | |||
659 | dma->reg_tx_chan = pktdma_get_regs(dma, node, 1, &size); | ||
660 | if (!dma->reg_tx_chan) | ||
661 | return -ENODEV; | ||
662 | |||
663 | max_tx_chan = size / sizeof(struct reg_chan); | ||
664 | dma->reg_rx_chan = pktdma_get_regs(dma, node, 2, &size); | ||
665 | if (!dma->reg_rx_chan) | ||
666 | return -ENODEV; | ||
667 | |||
668 | max_rx_chan = size / sizeof(struct reg_chan); | ||
669 | dma->reg_tx_sched = pktdma_get_regs(dma, node, 3, &size); | ||
670 | if (!dma->reg_tx_sched) | ||
671 | return -ENODEV; | ||
672 | |||
673 | max_tx_sched = size / sizeof(struct reg_tx_sched); | ||
674 | dma->reg_rx_flow = pktdma_get_regs(dma, node, 4, &size); | ||
675 | if (!dma->reg_rx_flow) | ||
676 | return -ENODEV; | ||
677 | |||
678 | max_rx_flow = size / sizeof(struct reg_rx_flow); | ||
679 | dma->rx_priority = DMA_PRIO_DEFAULT; | ||
680 | dma->tx_priority = DMA_PRIO_DEFAULT; | ||
681 | |||
682 | dma->enable_all = (of_get_property(node, "ti,enable-all", NULL) != NULL); | ||
683 | dma->loopback = (of_get_property(node, "ti,loop-back", NULL) != NULL); | ||
684 | |||
685 | ret = of_property_read_u32(node, "ti,rx-retry-timeout", &timeout); | ||
686 | if (ret < 0) { | ||
687 | dev_dbg(kdev->dev, "unspecified rx timeout using value %d\n", | ||
688 | DMA_RX_TIMEOUT_DEFAULT); | ||
689 | timeout = DMA_RX_TIMEOUT_DEFAULT; | ||
690 | } | ||
691 | |||
692 | dma->rx_timeout = timeout; | ||
693 | dma->max_rx_chan = max_rx_chan; | ||
694 | dma->max_rx_flow = max_rx_flow; | ||
695 | dma->max_tx_chan = min(max_tx_chan, max_tx_sched); | ||
696 | atomic_set(&dma->ref_count, 0); | ||
697 | strcpy(dma->name, node->name); | ||
698 | spin_lock_init(&dma->lock); | ||
699 | |||
700 | for (i = 0; i < dma->max_tx_chan; i++) { | ||
701 | if (pktdma_init_chan(dma, DMA_MEM_TO_DEV, i) >= 0) | ||
702 | num_chan++; | ||
703 | } | ||
704 | |||
705 | for (i = 0; i < dma->max_rx_flow; i++) { | ||
706 | if (pktdma_init_chan(dma, DMA_DEV_TO_MEM, i) >= 0) | ||
707 | num_chan++; | ||
708 | } | ||
709 | |||
710 | list_add_tail(&dma->list, &kdev->list); | ||
711 | |||
712 | /* | ||
713 | * For DSP software usecases or userpace transport software, setup all | ||
714 | * the DMA hardware resources. | ||
715 | */ | ||
716 | if (dma->enable_all) { | ||
717 | atomic_inc(&dma->ref_count); | ||
718 | knav_dma_hw_init(dma); | ||
719 | dma_hw_enable_all(dma); | ||
720 | } | ||
721 | |||
722 | dev_info(kdev->dev, "DMA %s registered %d logical channels, flows %d, tx chans: %d, rx chans: %d%s\n", | ||
723 | dma->name, num_chan, dma->max_rx_flow, | ||
724 | dma->max_tx_chan, dma->max_rx_chan, | ||
725 | dma->loopback ? ", loopback" : ""); | ||
726 | |||
727 | return 0; | ||
728 | } | ||
729 | |||
730 | static int knav_dma_probe(struct platform_device *pdev) | ||
731 | { | ||
732 | struct device *dev = &pdev->dev; | ||
733 | struct device_node *node = pdev->dev.of_node; | ||
734 | struct device_node *child; | ||
735 | int ret = 0; | ||
736 | |||
737 | if (!node) { | ||
738 | dev_err(&pdev->dev, "could not find device info\n"); | ||
739 | return -EINVAL; | ||
740 | } | ||
741 | |||
742 | kdev = devm_kzalloc(dev, | ||
743 | sizeof(struct knav_dma_pool_device), GFP_KERNEL); | ||
744 | if (!kdev) { | ||
745 | dev_err(dev, "could not allocate driver mem\n"); | ||
746 | return -ENOMEM; | ||
747 | } | ||
748 | |||
749 | kdev->dev = dev; | ||
750 | INIT_LIST_HEAD(&kdev->list); | ||
751 | |||
752 | pm_runtime_enable(kdev->dev); | ||
753 | ret = pm_runtime_get_sync(kdev->dev); | ||
754 | if (ret < 0) { | ||
755 | dev_err(kdev->dev, "unable to enable pktdma, err %d\n", ret); | ||
756 | return ret; | ||
757 | } | ||
758 | |||
759 | /* Initialise all packet dmas */ | ||
760 | for_each_child_of_node(node, child) { | ||
761 | ret = dma_init(node, child); | ||
762 | if (ret) { | ||
763 | dev_err(&pdev->dev, "init failed with %d\n", ret); | ||
764 | break; | ||
765 | } | ||
766 | } | ||
767 | |||
768 | if (list_empty(&kdev->list)) { | ||
769 | dev_err(dev, "no valid dma instance\n"); | ||
770 | return -ENODEV; | ||
771 | } | ||
772 | |||
773 | debugfs_create_file("knav_dma", S_IFREG | S_IRUGO, NULL, NULL, | ||
774 | &knav_dma_debug_ops); | ||
775 | |||
776 | return ret; | ||
777 | } | ||
778 | |||
779 | static int knav_dma_remove(struct platform_device *pdev) | ||
780 | { | ||
781 | struct knav_dma_device *dma; | ||
782 | |||
783 | list_for_each_entry(dma, &kdev->list, list) { | ||
784 | if (atomic_dec_return(&dma->ref_count) == 0) | ||
785 | knav_dma_hw_destroy(dma); | ||
786 | } | ||
787 | |||
788 | pm_runtime_put_sync(&pdev->dev); | ||
789 | pm_runtime_disable(&pdev->dev); | ||
790 | |||
791 | return 0; | ||
792 | } | ||
793 | |||
794 | static struct of_device_id of_match[] = { | ||
795 | { .compatible = "ti,keystone-navigator-dma", }, | ||
796 | {}, | ||
797 | }; | ||
798 | |||
799 | MODULE_DEVICE_TABLE(of, of_match); | ||
800 | |||
801 | static struct platform_driver knav_dma_driver = { | ||
802 | .probe = knav_dma_probe, | ||
803 | .remove = knav_dma_remove, | ||
804 | .driver = { | ||
805 | .name = "keystone-navigator-dma", | ||
806 | .owner = THIS_MODULE, | ||
807 | .of_match_table = of_match, | ||
808 | }, | ||
809 | }; | ||
810 | module_platform_driver(knav_dma_driver); | ||
811 | |||
812 | MODULE_LICENSE("GPL v2"); | ||
813 | MODULE_DESCRIPTION("TI Keystone Navigator Packet DMA driver"); | ||
814 | MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com>"); | ||
815 | MODULE_AUTHOR("Santosh Shilimkar <santosh.shilimkar@ti.com>"); | ||
diff --git a/drivers/soc/ti/knav_qmss.h b/drivers/soc/ti/knav_qmss.h new file mode 100644 index 000000000000..bc9dcc8cc3ce --- /dev/null +++ b/drivers/soc/ti/knav_qmss.h | |||
@@ -0,0 +1,386 @@ | |||
1 | /* | ||
2 | * Keystone Navigator QMSS driver internal header | ||
3 | * | ||
4 | * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com | ||
5 | * Author: Sandeep Nair <sandeep_n@ti.com> | ||
6 | * Cyril Chemparathy <cyril@ti.com> | ||
7 | * Santosh Shilimkar <santosh.shilimkar@ti.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License | ||
11 | * version 2 as published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but | ||
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
16 | * General Public License for more details. | ||
17 | */ | ||
18 | |||
19 | #ifndef __KNAV_QMSS_H__ | ||
20 | #define __KNAV_QMSS_H__ | ||
21 | |||
22 | #define THRESH_GTE BIT(7) | ||
23 | #define THRESH_LT 0 | ||
24 | |||
25 | #define PDSP_CTRL_PC_MASK 0xffff0000 | ||
26 | #define PDSP_CTRL_SOFT_RESET BIT(0) | ||
27 | #define PDSP_CTRL_ENABLE BIT(1) | ||
28 | #define PDSP_CTRL_RUNNING BIT(15) | ||
29 | |||
30 | #define ACC_MAX_CHANNEL 48 | ||
31 | #define ACC_DEFAULT_PERIOD 25 /* usecs */ | ||
32 | |||
33 | #define ACC_CHANNEL_INT_BASE 2 | ||
34 | |||
35 | #define ACC_LIST_ENTRY_TYPE 1 | ||
36 | #define ACC_LIST_ENTRY_WORDS (1 << ACC_LIST_ENTRY_TYPE) | ||
37 | #define ACC_LIST_ENTRY_QUEUE_IDX 0 | ||
38 | #define ACC_LIST_ENTRY_DESC_IDX (ACC_LIST_ENTRY_WORDS - 1) | ||
39 | |||
40 | #define ACC_CMD_DISABLE_CHANNEL 0x80 | ||
41 | #define ACC_CMD_ENABLE_CHANNEL 0x81 | ||
42 | #define ACC_CFG_MULTI_QUEUE BIT(21) | ||
43 | |||
44 | #define ACC_INTD_OFFSET_EOI (0x0010) | ||
45 | #define ACC_INTD_OFFSET_COUNT(ch) (0x0300 + 4 * (ch)) | ||
46 | #define ACC_INTD_OFFSET_STATUS(ch) (0x0200 + 4 * ((ch) / 32)) | ||
47 | |||
48 | #define RANGE_MAX_IRQS 64 | ||
49 | |||
50 | #define ACC_DESCS_MAX SZ_1K | ||
51 | #define ACC_DESCS_MASK (ACC_DESCS_MAX - 1) | ||
52 | #define DESC_SIZE_MASK 0xful | ||
53 | #define DESC_PTR_MASK (~DESC_SIZE_MASK) | ||
54 | |||
55 | #define KNAV_NAME_SIZE 32 | ||
56 | |||
57 | enum knav_acc_result { | ||
58 | ACC_RET_IDLE, | ||
59 | ACC_RET_SUCCESS, | ||
60 | ACC_RET_INVALID_COMMAND, | ||
61 | ACC_RET_INVALID_CHANNEL, | ||
62 | ACC_RET_INACTIVE_CHANNEL, | ||
63 | ACC_RET_ACTIVE_CHANNEL, | ||
64 | ACC_RET_INVALID_QUEUE, | ||
65 | ACC_RET_INVALID_RET, | ||
66 | }; | ||
67 | |||
68 | struct knav_reg_config { | ||
69 | u32 revision; | ||
70 | u32 __pad1; | ||
71 | u32 divert; | ||
72 | u32 link_ram_base0; | ||
73 | u32 link_ram_size0; | ||
74 | u32 link_ram_base1; | ||
75 | u32 __pad2[2]; | ||
76 | u32 starvation[0]; | ||
77 | }; | ||
78 | |||
79 | struct knav_reg_region { | ||
80 | u32 base; | ||
81 | u32 start_index; | ||
82 | u32 size_count; | ||
83 | u32 __pad; | ||
84 | }; | ||
85 | |||
86 | struct knav_reg_pdsp_regs { | ||
87 | u32 control; | ||
88 | u32 status; | ||
89 | u32 cycle_count; | ||
90 | u32 stall_count; | ||
91 | }; | ||
92 | |||
93 | struct knav_reg_acc_command { | ||
94 | u32 command; | ||
95 | u32 queue_mask; | ||
96 | u32 list_phys; | ||
97 | u32 queue_num; | ||
98 | u32 timer_config; | ||
99 | }; | ||
100 | |||
101 | struct knav_link_ram_block { | ||
102 | dma_addr_t phys; | ||
103 | void *virt; | ||
104 | size_t size; | ||
105 | }; | ||
106 | |||
107 | struct knav_acc_info { | ||
108 | u32 pdsp_id; | ||
109 | u32 start_channel; | ||
110 | u32 list_entries; | ||
111 | u32 pacing_mode; | ||
112 | u32 timer_count; | ||
113 | int mem_size; | ||
114 | int list_size; | ||
115 | struct knav_pdsp_info *pdsp; | ||
116 | }; | ||
117 | |||
118 | struct knav_acc_channel { | ||
119 | u32 channel; | ||
120 | u32 list_index; | ||
121 | u32 open_mask; | ||
122 | u32 *list_cpu[2]; | ||
123 | dma_addr_t list_dma[2]; | ||
124 | char name[KNAV_NAME_SIZE]; | ||
125 | atomic_t retrigger_count; | ||
126 | }; | ||
127 | |||
128 | struct knav_pdsp_info { | ||
129 | const char *name; | ||
130 | struct knav_reg_pdsp_regs __iomem *regs; | ||
131 | union { | ||
132 | void __iomem *command; | ||
133 | struct knav_reg_acc_command __iomem *acc_command; | ||
134 | u32 __iomem *qos_command; | ||
135 | }; | ||
136 | void __iomem *intd; | ||
137 | u32 __iomem *iram; | ||
138 | const char *firmware; | ||
139 | u32 id; | ||
140 | struct list_head list; | ||
141 | }; | ||
142 | |||
143 | struct knav_qmgr_info { | ||
144 | unsigned start_queue; | ||
145 | unsigned num_queues; | ||
146 | struct knav_reg_config __iomem *reg_config; | ||
147 | struct knav_reg_region __iomem *reg_region; | ||
148 | struct knav_reg_queue __iomem *reg_push, *reg_pop, *reg_peek; | ||
149 | void __iomem *reg_status; | ||
150 | struct list_head list; | ||
151 | }; | ||
152 | |||
153 | #define KNAV_NUM_LINKRAM 2 | ||
154 | |||
155 | /** | ||
156 | * struct knav_queue_stats: queue statistics | ||
157 | * pushes: number of push operations | ||
158 | * pops: number of pop operations | ||
159 | * push_errors: number of push errors | ||
160 | * pop_errors: number of pop errors | ||
161 | * notifies: notifier counts | ||
162 | */ | ||
163 | struct knav_queue_stats { | ||
164 | atomic_t pushes; | ||
165 | atomic_t pops; | ||
166 | atomic_t push_errors; | ||
167 | atomic_t pop_errors; | ||
168 | atomic_t notifies; | ||
169 | }; | ||
170 | |||
171 | /** | ||
172 | * struct knav_reg_queue: queue registers | ||
173 | * @entry_count: valid entries in the queue | ||
174 | * @byte_count: total byte count in thhe queue | ||
175 | * @packet_size: packet size for the queue | ||
176 | * @ptr_size_thresh: packet pointer size threshold | ||
177 | */ | ||
178 | struct knav_reg_queue { | ||
179 | u32 entry_count; | ||
180 | u32 byte_count; | ||
181 | u32 packet_size; | ||
182 | u32 ptr_size_thresh; | ||
183 | }; | ||
184 | |||
185 | /** | ||
186 | * struct knav_region: qmss region info | ||
187 | * @dma_start, dma_end: start and end dma address | ||
188 | * @virt_start, virt_end: start and end virtual address | ||
189 | * @desc_size: descriptor size | ||
190 | * @used_desc: consumed descriptors | ||
191 | * @id: region number | ||
192 | * @num_desc: total descriptors | ||
193 | * @link_index: index of the first descriptor | ||
194 | * @name: region name | ||
195 | * @list: instance in the device's region list | ||
196 | * @pools: list of descriptor pools in the region | ||
197 | */ | ||
198 | struct knav_region { | ||
199 | dma_addr_t dma_start, dma_end; | ||
200 | void *virt_start, *virt_end; | ||
201 | unsigned desc_size; | ||
202 | unsigned used_desc; | ||
203 | unsigned id; | ||
204 | unsigned num_desc; | ||
205 | unsigned link_index; | ||
206 | const char *name; | ||
207 | struct list_head list; | ||
208 | struct list_head pools; | ||
209 | }; | ||
210 | |||
211 | /** | ||
212 | * struct knav_pool: qmss pools | ||
213 | * @dev: device pointer | ||
214 | * @region: qmss region info | ||
215 | * @queue: queue registers | ||
216 | * @kdev: qmss device pointer | ||
217 | * @region_offset: offset from the base | ||
218 | * @num_desc: total descriptors | ||
219 | * @desc_size: descriptor size | ||
220 | * @region_id: region number | ||
221 | * @name: pool name | ||
222 | * @list: list head | ||
223 | * @region_inst: instance in the region's pool list | ||
224 | */ | ||
225 | struct knav_pool { | ||
226 | struct device *dev; | ||
227 | struct knav_region *region; | ||
228 | struct knav_queue *queue; | ||
229 | struct knav_device *kdev; | ||
230 | int region_offset; | ||
231 | int num_desc; | ||
232 | int desc_size; | ||
233 | int region_id; | ||
234 | const char *name; | ||
235 | struct list_head list; | ||
236 | struct list_head region_inst; | ||
237 | }; | ||
238 | |||
239 | /** | ||
240 | * struct knav_queue_inst: qmss queue instace properties | ||
241 | * @descs: descriptor pointer | ||
242 | * @desc_head, desc_tail, desc_count: descriptor counters | ||
243 | * @acc: accumulator channel pointer | ||
244 | * @kdev: qmss device pointer | ||
245 | * @range: range info | ||
246 | * @qmgr: queue manager info | ||
247 | * @id: queue instace id | ||
248 | * @irq_num: irq line number | ||
249 | * @notify_needed: notifier needed based on queue type | ||
250 | * @num_notifiers: total notifiers | ||
251 | * @handles: list head | ||
252 | * @name: queue instance name | ||
253 | * @irq_name: irq line name | ||
254 | */ | ||
255 | struct knav_queue_inst { | ||
256 | u32 *descs; | ||
257 | atomic_t desc_head, desc_tail, desc_count; | ||
258 | struct knav_acc_channel *acc; | ||
259 | struct knav_device *kdev; | ||
260 | struct knav_range_info *range; | ||
261 | struct knav_qmgr_info *qmgr; | ||
262 | u32 id; | ||
263 | int irq_num; | ||
264 | int notify_needed; | ||
265 | atomic_t num_notifiers; | ||
266 | struct list_head handles; | ||
267 | const char *name; | ||
268 | const char *irq_name; | ||
269 | }; | ||
270 | |||
271 | /** | ||
272 | * struct knav_queue: qmss queue properties | ||
273 | * @reg_push, reg_pop, reg_peek: push, pop queue registers | ||
274 | * @inst: qmss queue instace properties | ||
275 | * @notifier_fn: notifier function | ||
276 | * @notifier_fn_arg: notifier function argument | ||
277 | * @notifier_enabled: notier enabled for a give queue | ||
278 | * @rcu: rcu head | ||
279 | * @flags: queue flags | ||
280 | * @list: list head | ||
281 | */ | ||
282 | struct knav_queue { | ||
283 | struct knav_reg_queue __iomem *reg_push, *reg_pop, *reg_peek; | ||
284 | struct knav_queue_inst *inst; | ||
285 | struct knav_queue_stats stats; | ||
286 | knav_queue_notify_fn notifier_fn; | ||
287 | void *notifier_fn_arg; | ||
288 | atomic_t notifier_enabled; | ||
289 | struct rcu_head rcu; | ||
290 | unsigned flags; | ||
291 | struct list_head list; | ||
292 | }; | ||
293 | |||
294 | struct knav_device { | ||
295 | struct device *dev; | ||
296 | unsigned base_id; | ||
297 | unsigned num_queues; | ||
298 | unsigned num_queues_in_use; | ||
299 | unsigned inst_shift; | ||
300 | struct knav_link_ram_block link_rams[KNAV_NUM_LINKRAM]; | ||
301 | void *instances; | ||
302 | struct list_head regions; | ||
303 | struct list_head queue_ranges; | ||
304 | struct list_head pools; | ||
305 | struct list_head pdsps; | ||
306 | struct list_head qmgrs; | ||
307 | }; | ||
308 | |||
309 | struct knav_range_ops { | ||
310 | int (*init_range)(struct knav_range_info *range); | ||
311 | int (*free_range)(struct knav_range_info *range); | ||
312 | int (*init_queue)(struct knav_range_info *range, | ||
313 | struct knav_queue_inst *inst); | ||
314 | int (*open_queue)(struct knav_range_info *range, | ||
315 | struct knav_queue_inst *inst, unsigned flags); | ||
316 | int (*close_queue)(struct knav_range_info *range, | ||
317 | struct knav_queue_inst *inst); | ||
318 | int (*set_notify)(struct knav_range_info *range, | ||
319 | struct knav_queue_inst *inst, bool enabled); | ||
320 | }; | ||
321 | |||
322 | struct knav_irq_info { | ||
323 | int irq; | ||
324 | u32 cpu_map; | ||
325 | }; | ||
326 | |||
327 | struct knav_range_info { | ||
328 | const char *name; | ||
329 | struct knav_device *kdev; | ||
330 | unsigned queue_base; | ||
331 | unsigned num_queues; | ||
332 | void *queue_base_inst; | ||
333 | unsigned flags; | ||
334 | struct list_head list; | ||
335 | struct knav_range_ops *ops; | ||
336 | struct knav_acc_info acc_info; | ||
337 | struct knav_acc_channel *acc; | ||
338 | unsigned num_irqs; | ||
339 | struct knav_irq_info irqs[RANGE_MAX_IRQS]; | ||
340 | }; | ||
341 | |||
342 | #define RANGE_RESERVED BIT(0) | ||
343 | #define RANGE_HAS_IRQ BIT(1) | ||
344 | #define RANGE_HAS_ACCUMULATOR BIT(2) | ||
345 | #define RANGE_MULTI_QUEUE BIT(3) | ||
346 | |||
347 | #define for_each_region(kdev, region) \ | ||
348 | list_for_each_entry(region, &kdev->regions, list) | ||
349 | |||
350 | #define first_region(kdev) \ | ||
351 | list_first_entry(&kdev->regions, \ | ||
352 | struct knav_region, list) | ||
353 | |||
354 | #define for_each_queue_range(kdev, range) \ | ||
355 | list_for_each_entry(range, &kdev->queue_ranges, list) | ||
356 | |||
357 | #define first_queue_range(kdev) \ | ||
358 | list_first_entry(&kdev->queue_ranges, \ | ||
359 | struct knav_range_info, list) | ||
360 | |||
361 | #define for_each_pool(kdev, pool) \ | ||
362 | list_for_each_entry(pool, &kdev->pools, list) | ||
363 | |||
364 | #define for_each_pdsp(kdev, pdsp) \ | ||
365 | list_for_each_entry(pdsp, &kdev->pdsps, list) | ||
366 | |||
367 | #define for_each_qmgr(kdev, qmgr) \ | ||
368 | list_for_each_entry(qmgr, &kdev->qmgrs, list) | ||
369 | |||
370 | static inline struct knav_pdsp_info * | ||
371 | knav_find_pdsp(struct knav_device *kdev, unsigned pdsp_id) | ||
372 | { | ||
373 | struct knav_pdsp_info *pdsp; | ||
374 | |||
375 | for_each_pdsp(kdev, pdsp) | ||
376 | if (pdsp_id == pdsp->id) | ||
377 | return pdsp; | ||
378 | return NULL; | ||
379 | } | ||
380 | |||
381 | extern int knav_init_acc_range(struct knav_device *kdev, | ||
382 | struct device_node *node, | ||
383 | struct knav_range_info *range); | ||
384 | extern void knav_queue_notify(struct knav_queue_inst *inst); | ||
385 | |||
386 | #endif /* __KNAV_QMSS_H__ */ | ||
diff --git a/drivers/soc/ti/knav_qmss_acc.c b/drivers/soc/ti/knav_qmss_acc.c new file mode 100644 index 000000000000..6fbfde6e748f --- /dev/null +++ b/drivers/soc/ti/knav_qmss_acc.c | |||
@@ -0,0 +1,591 @@ | |||
1 | /* | ||
2 | * Keystone accumulator queue manager | ||
3 | * | ||
4 | * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com | ||
5 | * Author: Sandeep Nair <sandeep_n@ti.com> | ||
6 | * Cyril Chemparathy <cyril@ti.com> | ||
7 | * Santosh Shilimkar <santosh.shilimkar@ti.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License | ||
11 | * version 2 as published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but | ||
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
16 | * General Public License for more details. | ||
17 | */ | ||
18 | |||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/module.h> | ||
21 | #include <linux/device.h> | ||
22 | #include <linux/io.h> | ||
23 | #include <linux/interrupt.h> | ||
24 | #include <linux/bitops.h> | ||
25 | #include <linux/slab.h> | ||
26 | #include <linux/spinlock.h> | ||
27 | #include <linux/soc/ti/knav_qmss.h> | ||
28 | #include <linux/platform_device.h> | ||
29 | #include <linux/dma-mapping.h> | ||
30 | #include <linux/of.h> | ||
31 | #include <linux/of_device.h> | ||
32 | #include <linux/of_address.h> | ||
33 | #include <linux/firmware.h> | ||
34 | |||
35 | #include "knav_qmss.h" | ||
36 | |||
37 | #define knav_range_offset_to_inst(kdev, range, q) \ | ||
38 | (range->queue_base_inst + (q << kdev->inst_shift)) | ||
39 | |||
40 | static void __knav_acc_notify(struct knav_range_info *range, | ||
41 | struct knav_acc_channel *acc) | ||
42 | { | ||
43 | struct knav_device *kdev = range->kdev; | ||
44 | struct knav_queue_inst *inst; | ||
45 | int range_base, queue; | ||
46 | |||
47 | range_base = kdev->base_id + range->queue_base; | ||
48 | |||
49 | if (range->flags & RANGE_MULTI_QUEUE) { | ||
50 | for (queue = 0; queue < range->num_queues; queue++) { | ||
51 | inst = knav_range_offset_to_inst(kdev, range, | ||
52 | queue); | ||
53 | if (inst->notify_needed) { | ||
54 | inst->notify_needed = 0; | ||
55 | dev_dbg(kdev->dev, "acc-irq: notifying %d\n", | ||
56 | range_base + queue); | ||
57 | knav_queue_notify(inst); | ||
58 | } | ||
59 | } | ||
60 | } else { | ||
61 | queue = acc->channel - range->acc_info.start_channel; | ||
62 | inst = knav_range_offset_to_inst(kdev, range, queue); | ||
63 | dev_dbg(kdev->dev, "acc-irq: notifying %d\n", | ||
64 | range_base + queue); | ||
65 | knav_queue_notify(inst); | ||
66 | } | ||
67 | } | ||
68 | |||
69 | static int knav_acc_set_notify(struct knav_range_info *range, | ||
70 | struct knav_queue_inst *kq, | ||
71 | bool enabled) | ||
72 | { | ||
73 | struct knav_pdsp_info *pdsp = range->acc_info.pdsp; | ||
74 | struct knav_device *kdev = range->kdev; | ||
75 | u32 mask, offset; | ||
76 | |||
77 | /* | ||
78 | * when enabling, we need to re-trigger an interrupt if we | ||
79 | * have descriptors pending | ||
80 | */ | ||
81 | if (!enabled || atomic_read(&kq->desc_count) <= 0) | ||
82 | return 0; | ||
83 | |||
84 | kq->notify_needed = 1; | ||
85 | atomic_inc(&kq->acc->retrigger_count); | ||
86 | mask = BIT(kq->acc->channel % 32); | ||
87 | offset = ACC_INTD_OFFSET_STATUS(kq->acc->channel); | ||
88 | dev_dbg(kdev->dev, "setup-notify: re-triggering irq for %s\n", | ||
89 | kq->acc->name); | ||
90 | writel_relaxed(mask, pdsp->intd + offset); | ||
91 | return 0; | ||
92 | } | ||
93 | |||
94 | static irqreturn_t knav_acc_int_handler(int irq, void *_instdata) | ||
95 | { | ||
96 | struct knav_acc_channel *acc; | ||
97 | struct knav_queue_inst *kq = NULL; | ||
98 | struct knav_range_info *range; | ||
99 | struct knav_pdsp_info *pdsp; | ||
100 | struct knav_acc_info *info; | ||
101 | struct knav_device *kdev; | ||
102 | |||
103 | u32 *list, *list_cpu, val, idx, notifies; | ||
104 | int range_base, channel, queue = 0; | ||
105 | dma_addr_t list_dma; | ||
106 | |||
107 | range = _instdata; | ||
108 | info = &range->acc_info; | ||
109 | kdev = range->kdev; | ||
110 | pdsp = range->acc_info.pdsp; | ||
111 | acc = range->acc; | ||
112 | |||
113 | range_base = kdev->base_id + range->queue_base; | ||
114 | if ((range->flags & RANGE_MULTI_QUEUE) == 0) { | ||
115 | for (queue = 0; queue < range->num_irqs; queue++) | ||
116 | if (range->irqs[queue].irq == irq) | ||
117 | break; | ||
118 | kq = knav_range_offset_to_inst(kdev, range, queue); | ||
119 | acc += queue; | ||
120 | } | ||
121 | |||
122 | channel = acc->channel; | ||
123 | list_dma = acc->list_dma[acc->list_index]; | ||
124 | list_cpu = acc->list_cpu[acc->list_index]; | ||
125 | dev_dbg(kdev->dev, "acc-irq: channel %d, list %d, virt %p, phys %x\n", | ||
126 | channel, acc->list_index, list_cpu, list_dma); | ||
127 | if (atomic_read(&acc->retrigger_count)) { | ||
128 | atomic_dec(&acc->retrigger_count); | ||
129 | __knav_acc_notify(range, acc); | ||
130 | writel_relaxed(1, pdsp->intd + ACC_INTD_OFFSET_COUNT(channel)); | ||
131 | /* ack the interrupt */ | ||
132 | writel_relaxed(ACC_CHANNEL_INT_BASE + channel, | ||
133 | pdsp->intd + ACC_INTD_OFFSET_EOI); | ||
134 | |||
135 | return IRQ_HANDLED; | ||
136 | } | ||
137 | |||
138 | notifies = readl_relaxed(pdsp->intd + ACC_INTD_OFFSET_COUNT(channel)); | ||
139 | WARN_ON(!notifies); | ||
140 | dma_sync_single_for_cpu(kdev->dev, list_dma, info->list_size, | ||
141 | DMA_FROM_DEVICE); | ||
142 | |||
143 | for (list = list_cpu; list < list_cpu + (info->list_size / sizeof(u32)); | ||
144 | list += ACC_LIST_ENTRY_WORDS) { | ||
145 | if (ACC_LIST_ENTRY_WORDS == 1) { | ||
146 | dev_dbg(kdev->dev, | ||
147 | "acc-irq: list %d, entry @%p, %08x\n", | ||
148 | acc->list_index, list, list[0]); | ||
149 | } else if (ACC_LIST_ENTRY_WORDS == 2) { | ||
150 | dev_dbg(kdev->dev, | ||
151 | "acc-irq: list %d, entry @%p, %08x %08x\n", | ||
152 | acc->list_index, list, list[0], list[1]); | ||
153 | } else if (ACC_LIST_ENTRY_WORDS == 4) { | ||
154 | dev_dbg(kdev->dev, | ||
155 | "acc-irq: list %d, entry @%p, %08x %08x %08x %08x\n", | ||
156 | acc->list_index, list, list[0], list[1], | ||
157 | list[2], list[3]); | ||
158 | } | ||
159 | |||
160 | val = list[ACC_LIST_ENTRY_DESC_IDX]; | ||
161 | if (!val) | ||
162 | break; | ||
163 | |||
164 | if (range->flags & RANGE_MULTI_QUEUE) { | ||
165 | queue = list[ACC_LIST_ENTRY_QUEUE_IDX] >> 16; | ||
166 | if (queue < range_base || | ||
167 | queue >= range_base + range->num_queues) { | ||
168 | dev_err(kdev->dev, | ||
169 | "bad queue %d, expecting %d-%d\n", | ||
170 | queue, range_base, | ||
171 | range_base + range->num_queues); | ||
172 | break; | ||
173 | } | ||
174 | queue -= range_base; | ||
175 | kq = knav_range_offset_to_inst(kdev, range, | ||
176 | queue); | ||
177 | } | ||
178 | |||
179 | if (atomic_inc_return(&kq->desc_count) >= ACC_DESCS_MAX) { | ||
180 | atomic_dec(&kq->desc_count); | ||
181 | dev_err(kdev->dev, | ||
182 | "acc-irq: queue %d full, entry dropped\n", | ||
183 | queue + range_base); | ||
184 | continue; | ||
185 | } | ||
186 | |||
187 | idx = atomic_inc_return(&kq->desc_tail) & ACC_DESCS_MASK; | ||
188 | kq->descs[idx] = val; | ||
189 | kq->notify_needed = 1; | ||
190 | dev_dbg(kdev->dev, "acc-irq: enqueue %08x at %d, queue %d\n", | ||
191 | val, idx, queue + range_base); | ||
192 | } | ||
193 | |||
194 | __knav_acc_notify(range, acc); | ||
195 | memset(list_cpu, 0, info->list_size); | ||
196 | dma_sync_single_for_device(kdev->dev, list_dma, info->list_size, | ||
197 | DMA_TO_DEVICE); | ||
198 | |||
199 | /* flip to the other list */ | ||
200 | acc->list_index ^= 1; | ||
201 | |||
202 | /* reset the interrupt counter */ | ||
203 | writel_relaxed(1, pdsp->intd + ACC_INTD_OFFSET_COUNT(channel)); | ||
204 | |||
205 | /* ack the interrupt */ | ||
206 | writel_relaxed(ACC_CHANNEL_INT_BASE + channel, | ||
207 | pdsp->intd + ACC_INTD_OFFSET_EOI); | ||
208 | |||
209 | return IRQ_HANDLED; | ||
210 | } | ||
211 | |||
212 | int knav_range_setup_acc_irq(struct knav_range_info *range, | ||
213 | int queue, bool enabled) | ||
214 | { | ||
215 | struct knav_device *kdev = range->kdev; | ||
216 | struct knav_acc_channel *acc; | ||
217 | unsigned long cpu_map; | ||
218 | int ret = 0, irq; | ||
219 | u32 old, new; | ||
220 | |||
221 | if (range->flags & RANGE_MULTI_QUEUE) { | ||
222 | acc = range->acc; | ||
223 | irq = range->irqs[0].irq; | ||
224 | cpu_map = range->irqs[0].cpu_map; | ||
225 | } else { | ||
226 | acc = range->acc + queue; | ||
227 | irq = range->irqs[queue].irq; | ||
228 | cpu_map = range->irqs[queue].cpu_map; | ||
229 | } | ||
230 | |||
231 | old = acc->open_mask; | ||
232 | if (enabled) | ||
233 | new = old | BIT(queue); | ||
234 | else | ||
235 | new = old & ~BIT(queue); | ||
236 | acc->open_mask = new; | ||
237 | |||
238 | dev_dbg(kdev->dev, | ||
239 | "setup-acc-irq: open mask old %08x, new %08x, channel %s\n", | ||
240 | old, new, acc->name); | ||
241 | |||
242 | if (likely(new == old)) | ||
243 | return 0; | ||
244 | |||
245 | if (new && !old) { | ||
246 | dev_dbg(kdev->dev, | ||
247 | "setup-acc-irq: requesting %s for channel %s\n", | ||
248 | acc->name, acc->name); | ||
249 | ret = request_irq(irq, knav_acc_int_handler, 0, acc->name, | ||
250 | range); | ||
251 | if (!ret && cpu_map) { | ||
252 | ret = irq_set_affinity_hint(irq, to_cpumask(&cpu_map)); | ||
253 | if (ret) { | ||
254 | dev_warn(range->kdev->dev, | ||
255 | "Failed to set IRQ affinity\n"); | ||
256 | return ret; | ||
257 | } | ||
258 | } | ||
259 | } | ||
260 | |||
261 | if (old && !new) { | ||
262 | dev_dbg(kdev->dev, "setup-acc-irq: freeing %s for channel %s\n", | ||
263 | acc->name, acc->name); | ||
264 | free_irq(irq, range); | ||
265 | } | ||
266 | |||
267 | return ret; | ||
268 | } | ||
269 | |||
270 | static const char *knav_acc_result_str(enum knav_acc_result result) | ||
271 | { | ||
272 | static const char * const result_str[] = { | ||
273 | [ACC_RET_IDLE] = "idle", | ||
274 | [ACC_RET_SUCCESS] = "success", | ||
275 | [ACC_RET_INVALID_COMMAND] = "invalid command", | ||
276 | [ACC_RET_INVALID_CHANNEL] = "invalid channel", | ||
277 | [ACC_RET_INACTIVE_CHANNEL] = "inactive channel", | ||
278 | [ACC_RET_ACTIVE_CHANNEL] = "active channel", | ||
279 | [ACC_RET_INVALID_QUEUE] = "invalid queue", | ||
280 | [ACC_RET_INVALID_RET] = "invalid return code", | ||
281 | }; | ||
282 | |||
283 | if (result >= ARRAY_SIZE(result_str)) | ||
284 | return result_str[ACC_RET_INVALID_RET]; | ||
285 | else | ||
286 | return result_str[result]; | ||
287 | } | ||
288 | |||
289 | static enum knav_acc_result | ||
290 | knav_acc_write(struct knav_device *kdev, struct knav_pdsp_info *pdsp, | ||
291 | struct knav_reg_acc_command *cmd) | ||
292 | { | ||
293 | u32 result; | ||
294 | |||
295 | dev_dbg(kdev->dev, "acc command %08x %08x %08x %08x %08x\n", | ||
296 | cmd->command, cmd->queue_mask, cmd->list_phys, | ||
297 | cmd->queue_num, cmd->timer_config); | ||
298 | |||
299 | writel_relaxed(cmd->timer_config, &pdsp->acc_command->timer_config); | ||
300 | writel_relaxed(cmd->queue_num, &pdsp->acc_command->queue_num); | ||
301 | writel_relaxed(cmd->list_phys, &pdsp->acc_command->list_phys); | ||
302 | writel_relaxed(cmd->queue_mask, &pdsp->acc_command->queue_mask); | ||
303 | writel_relaxed(cmd->command, &pdsp->acc_command->command); | ||
304 | |||
305 | /* wait for the command to clear */ | ||
306 | do { | ||
307 | result = readl_relaxed(&pdsp->acc_command->command); | ||
308 | } while ((result >> 8) & 0xff); | ||
309 | |||
310 | return (result >> 24) & 0xff; | ||
311 | } | ||
312 | |||
313 | static void knav_acc_setup_cmd(struct knav_device *kdev, | ||
314 | struct knav_range_info *range, | ||
315 | struct knav_reg_acc_command *cmd, | ||
316 | int queue) | ||
317 | { | ||
318 | struct knav_acc_info *info = &range->acc_info; | ||
319 | struct knav_acc_channel *acc; | ||
320 | int queue_base; | ||
321 | u32 queue_mask; | ||
322 | |||
323 | if (range->flags & RANGE_MULTI_QUEUE) { | ||
324 | acc = range->acc; | ||
325 | queue_base = range->queue_base; | ||
326 | queue_mask = BIT(range->num_queues) - 1; | ||
327 | } else { | ||
328 | acc = range->acc + queue; | ||
329 | queue_base = range->queue_base + queue; | ||
330 | queue_mask = 0; | ||
331 | } | ||
332 | |||
333 | memset(cmd, 0, sizeof(*cmd)); | ||
334 | cmd->command = acc->channel; | ||
335 | cmd->queue_mask = queue_mask; | ||
336 | cmd->list_phys = acc->list_dma[0]; | ||
337 | cmd->queue_num = info->list_entries << 16; | ||
338 | cmd->queue_num |= queue_base; | ||
339 | |||
340 | cmd->timer_config = ACC_LIST_ENTRY_TYPE << 18; | ||
341 | if (range->flags & RANGE_MULTI_QUEUE) | ||
342 | cmd->timer_config |= ACC_CFG_MULTI_QUEUE; | ||
343 | cmd->timer_config |= info->pacing_mode << 16; | ||
344 | cmd->timer_config |= info->timer_count; | ||
345 | } | ||
346 | |||
347 | static void knav_acc_stop(struct knav_device *kdev, | ||
348 | struct knav_range_info *range, | ||
349 | int queue) | ||
350 | { | ||
351 | struct knav_reg_acc_command cmd; | ||
352 | struct knav_acc_channel *acc; | ||
353 | enum knav_acc_result result; | ||
354 | |||
355 | acc = range->acc + queue; | ||
356 | |||
357 | knav_acc_setup_cmd(kdev, range, &cmd, queue); | ||
358 | cmd.command |= ACC_CMD_DISABLE_CHANNEL << 8; | ||
359 | result = knav_acc_write(kdev, range->acc_info.pdsp, &cmd); | ||
360 | |||
361 | dev_dbg(kdev->dev, "stopped acc channel %s, result %s\n", | ||
362 | acc->name, knav_acc_result_str(result)); | ||
363 | } | ||
364 | |||
365 | static enum knav_acc_result knav_acc_start(struct knav_device *kdev, | ||
366 | struct knav_range_info *range, | ||
367 | int queue) | ||
368 | { | ||
369 | struct knav_reg_acc_command cmd; | ||
370 | struct knav_acc_channel *acc; | ||
371 | enum knav_acc_result result; | ||
372 | |||
373 | acc = range->acc + queue; | ||
374 | |||
375 | knav_acc_setup_cmd(kdev, range, &cmd, queue); | ||
376 | cmd.command |= ACC_CMD_ENABLE_CHANNEL << 8; | ||
377 | result = knav_acc_write(kdev, range->acc_info.pdsp, &cmd); | ||
378 | |||
379 | dev_dbg(kdev->dev, "started acc channel %s, result %s\n", | ||
380 | acc->name, knav_acc_result_str(result)); | ||
381 | |||
382 | return result; | ||
383 | } | ||
384 | |||
385 | static int knav_acc_init_range(struct knav_range_info *range) | ||
386 | { | ||
387 | struct knav_device *kdev = range->kdev; | ||
388 | struct knav_acc_channel *acc; | ||
389 | enum knav_acc_result result; | ||
390 | int queue; | ||
391 | |||
392 | for (queue = 0; queue < range->num_queues; queue++) { | ||
393 | acc = range->acc + queue; | ||
394 | |||
395 | knav_acc_stop(kdev, range, queue); | ||
396 | acc->list_index = 0; | ||
397 | result = knav_acc_start(kdev, range, queue); | ||
398 | |||
399 | if (result != ACC_RET_SUCCESS) | ||
400 | return -EIO; | ||
401 | |||
402 | if (range->flags & RANGE_MULTI_QUEUE) | ||
403 | return 0; | ||
404 | } | ||
405 | return 0; | ||
406 | } | ||
407 | |||
408 | static int knav_acc_init_queue(struct knav_range_info *range, | ||
409 | struct knav_queue_inst *kq) | ||
410 | { | ||
411 | unsigned id = kq->id - range->queue_base; | ||
412 | |||
413 | kq->descs = devm_kzalloc(range->kdev->dev, | ||
414 | ACC_DESCS_MAX * sizeof(u32), GFP_KERNEL); | ||
415 | if (!kq->descs) | ||
416 | return -ENOMEM; | ||
417 | |||
418 | kq->acc = range->acc; | ||
419 | if ((range->flags & RANGE_MULTI_QUEUE) == 0) | ||
420 | kq->acc += id; | ||
421 | return 0; | ||
422 | } | ||
423 | |||
424 | static int knav_acc_open_queue(struct knav_range_info *range, | ||
425 | struct knav_queue_inst *inst, unsigned flags) | ||
426 | { | ||
427 | unsigned id = inst->id - range->queue_base; | ||
428 | |||
429 | return knav_range_setup_acc_irq(range, id, true); | ||
430 | } | ||
431 | |||
432 | static int knav_acc_close_queue(struct knav_range_info *range, | ||
433 | struct knav_queue_inst *inst) | ||
434 | { | ||
435 | unsigned id = inst->id - range->queue_base; | ||
436 | |||
437 | return knav_range_setup_acc_irq(range, id, false); | ||
438 | } | ||
439 | |||
440 | static int knav_acc_free_range(struct knav_range_info *range) | ||
441 | { | ||
442 | struct knav_device *kdev = range->kdev; | ||
443 | struct knav_acc_channel *acc; | ||
444 | struct knav_acc_info *info; | ||
445 | int channel, channels; | ||
446 | |||
447 | info = &range->acc_info; | ||
448 | |||
449 | if (range->flags & RANGE_MULTI_QUEUE) | ||
450 | channels = 1; | ||
451 | else | ||
452 | channels = range->num_queues; | ||
453 | |||
454 | for (channel = 0; channel < channels; channel++) { | ||
455 | acc = range->acc + channel; | ||
456 | if (!acc->list_cpu[0]) | ||
457 | continue; | ||
458 | dma_unmap_single(kdev->dev, acc->list_dma[0], | ||
459 | info->mem_size, DMA_BIDIRECTIONAL); | ||
460 | free_pages_exact(acc->list_cpu[0], info->mem_size); | ||
461 | } | ||
462 | devm_kfree(range->kdev->dev, range->acc); | ||
463 | return 0; | ||
464 | } | ||
465 | |||
466 | struct knav_range_ops knav_acc_range_ops = { | ||
467 | .set_notify = knav_acc_set_notify, | ||
468 | .init_queue = knav_acc_init_queue, | ||
469 | .open_queue = knav_acc_open_queue, | ||
470 | .close_queue = knav_acc_close_queue, | ||
471 | .init_range = knav_acc_init_range, | ||
472 | .free_range = knav_acc_free_range, | ||
473 | }; | ||
474 | |||
475 | /** | ||
476 | * knav_init_acc_range: Initialise accumulator ranges | ||
477 | * | ||
478 | * @kdev: qmss device | ||
479 | * @node: device node | ||
480 | * @range: qmms range information | ||
481 | * | ||
482 | * Return 0 on success or error | ||
483 | */ | ||
484 | int knav_init_acc_range(struct knav_device *kdev, | ||
485 | struct device_node *node, | ||
486 | struct knav_range_info *range) | ||
487 | { | ||
488 | struct knav_acc_channel *acc; | ||
489 | struct knav_pdsp_info *pdsp; | ||
490 | struct knav_acc_info *info; | ||
491 | int ret, channel, channels; | ||
492 | int list_size, mem_size; | ||
493 | dma_addr_t list_dma; | ||
494 | void *list_mem; | ||
495 | u32 config[5]; | ||
496 | |||
497 | range->flags |= RANGE_HAS_ACCUMULATOR; | ||
498 | info = &range->acc_info; | ||
499 | |||
500 | ret = of_property_read_u32_array(node, "accumulator", config, 5); | ||
501 | if (ret) | ||
502 | return ret; | ||
503 | |||
504 | info->pdsp_id = config[0]; | ||
505 | info->start_channel = config[1]; | ||
506 | info->list_entries = config[2]; | ||
507 | info->pacing_mode = config[3]; | ||
508 | info->timer_count = config[4] / ACC_DEFAULT_PERIOD; | ||
509 | |||
510 | if (info->start_channel > ACC_MAX_CHANNEL) { | ||
511 | dev_err(kdev->dev, "channel %d invalid for range %s\n", | ||
512 | info->start_channel, range->name); | ||
513 | return -EINVAL; | ||
514 | } | ||
515 | |||
516 | if (info->pacing_mode > 3) { | ||
517 | dev_err(kdev->dev, "pacing mode %d invalid for range %s\n", | ||
518 | info->pacing_mode, range->name); | ||
519 | return -EINVAL; | ||
520 | } | ||
521 | |||
522 | pdsp = knav_find_pdsp(kdev, info->pdsp_id); | ||
523 | if (!pdsp) { | ||
524 | dev_err(kdev->dev, "pdsp id %d not found for range %s\n", | ||
525 | info->pdsp_id, range->name); | ||
526 | return -EINVAL; | ||
527 | } | ||
528 | |||
529 | info->pdsp = pdsp; | ||
530 | channels = range->num_queues; | ||
531 | if (of_get_property(node, "multi-queue", NULL)) { | ||
532 | range->flags |= RANGE_MULTI_QUEUE; | ||
533 | channels = 1; | ||
534 | if (range->queue_base & (32 - 1)) { | ||
535 | dev_err(kdev->dev, | ||
536 | "misaligned multi-queue accumulator range %s\n", | ||
537 | range->name); | ||
538 | return -EINVAL; | ||
539 | } | ||
540 | if (range->num_queues > 32) { | ||
541 | dev_err(kdev->dev, | ||
542 | "too many queues in accumulator range %s\n", | ||
543 | range->name); | ||
544 | return -EINVAL; | ||
545 | } | ||
546 | } | ||
547 | |||
548 | /* figure out list size */ | ||
549 | list_size = info->list_entries; | ||
550 | list_size *= ACC_LIST_ENTRY_WORDS * sizeof(u32); | ||
551 | info->list_size = list_size; | ||
552 | mem_size = PAGE_ALIGN(list_size * 2); | ||
553 | info->mem_size = mem_size; | ||
554 | range->acc = devm_kzalloc(kdev->dev, channels * sizeof(*range->acc), | ||
555 | GFP_KERNEL); | ||
556 | if (!range->acc) | ||
557 | return -ENOMEM; | ||
558 | |||
559 | for (channel = 0; channel < channels; channel++) { | ||
560 | acc = range->acc + channel; | ||
561 | acc->channel = info->start_channel + channel; | ||
562 | |||
563 | /* allocate memory for the two lists */ | ||
564 | list_mem = alloc_pages_exact(mem_size, GFP_KERNEL | GFP_DMA); | ||
565 | if (!list_mem) | ||
566 | return -ENOMEM; | ||
567 | |||
568 | list_dma = dma_map_single(kdev->dev, list_mem, mem_size, | ||
569 | DMA_BIDIRECTIONAL); | ||
570 | if (dma_mapping_error(kdev->dev, list_dma)) { | ||
571 | free_pages_exact(list_mem, mem_size); | ||
572 | return -ENOMEM; | ||
573 | } | ||
574 | |||
575 | memset(list_mem, 0, mem_size); | ||
576 | dma_sync_single_for_device(kdev->dev, list_dma, mem_size, | ||
577 | DMA_TO_DEVICE); | ||
578 | scnprintf(acc->name, sizeof(acc->name), "hwqueue-acc-%d", | ||
579 | acc->channel); | ||
580 | acc->list_cpu[0] = list_mem; | ||
581 | acc->list_cpu[1] = list_mem + list_size; | ||
582 | acc->list_dma[0] = list_dma; | ||
583 | acc->list_dma[1] = list_dma + list_size; | ||
584 | dev_dbg(kdev->dev, "%s: channel %d, phys %08x, virt %8p\n", | ||
585 | acc->name, acc->channel, list_dma, list_mem); | ||
586 | } | ||
587 | |||
588 | range->ops = &knav_acc_range_ops; | ||
589 | return 0; | ||
590 | } | ||
591 | EXPORT_SYMBOL_GPL(knav_init_acc_range); | ||
diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c new file mode 100644 index 000000000000..0a2c8634c48b --- /dev/null +++ b/drivers/soc/ti/knav_qmss_queue.c | |||
@@ -0,0 +1,1816 @@ | |||
1 | /* | ||
2 | * Keystone Queue Manager subsystem driver | ||
3 | * | ||
4 | * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com | ||
5 | * Authors: Sandeep Nair <sandeep_n@ti.com> | ||
6 | * Cyril Chemparathy <cyril@ti.com> | ||
7 | * Santosh Shilimkar <santosh.shilimkar@ti.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License | ||
11 | * version 2 as published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but | ||
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
16 | * General Public License for more details. | ||
17 | */ | ||
18 | |||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/module.h> | ||
21 | #include <linux/device.h> | ||
22 | #include <linux/clk.h> | ||
23 | #include <linux/io.h> | ||
24 | #include <linux/interrupt.h> | ||
25 | #include <linux/bitops.h> | ||
26 | #include <linux/slab.h> | ||
27 | #include <linux/spinlock.h> | ||
28 | #include <linux/platform_device.h> | ||
29 | #include <linux/dma-mapping.h> | ||
30 | #include <linux/of.h> | ||
31 | #include <linux/of_irq.h> | ||
32 | #include <linux/of_device.h> | ||
33 | #include <linux/of_address.h> | ||
34 | #include <linux/pm_runtime.h> | ||
35 | #include <linux/firmware.h> | ||
36 | #include <linux/debugfs.h> | ||
37 | #include <linux/seq_file.h> | ||
38 | #include <linux/string.h> | ||
39 | #include <linux/soc/ti/knav_qmss.h> | ||
40 | |||
41 | #include "knav_qmss.h" | ||
42 | |||
43 | static struct knav_device *kdev; | ||
44 | static DEFINE_MUTEX(knav_dev_lock); | ||
45 | |||
46 | /* Queue manager register indices in DTS */ | ||
47 | #define KNAV_QUEUE_PEEK_REG_INDEX 0 | ||
48 | #define KNAV_QUEUE_STATUS_REG_INDEX 1 | ||
49 | #define KNAV_QUEUE_CONFIG_REG_INDEX 2 | ||
50 | #define KNAV_QUEUE_REGION_REG_INDEX 3 | ||
51 | #define KNAV_QUEUE_PUSH_REG_INDEX 4 | ||
52 | #define KNAV_QUEUE_POP_REG_INDEX 5 | ||
53 | |||
54 | /* PDSP register indices in DTS */ | ||
55 | #define KNAV_QUEUE_PDSP_IRAM_REG_INDEX 0 | ||
56 | #define KNAV_QUEUE_PDSP_REGS_REG_INDEX 1 | ||
57 | #define KNAV_QUEUE_PDSP_INTD_REG_INDEX 2 | ||
58 | #define KNAV_QUEUE_PDSP_CMD_REG_INDEX 3 | ||
59 | |||
60 | #define knav_queue_idx_to_inst(kdev, idx) \ | ||
61 | (kdev->instances + (idx << kdev->inst_shift)) | ||
62 | |||
63 | #define for_each_handle_rcu(qh, inst) \ | ||
64 | list_for_each_entry_rcu(qh, &inst->handles, list) | ||
65 | |||
66 | #define for_each_instance(idx, inst, kdev) \ | ||
67 | for (idx = 0, inst = kdev->instances; \ | ||
68 | idx < (kdev)->num_queues_in_use; \ | ||
69 | idx++, inst = knav_queue_idx_to_inst(kdev, idx)) | ||
70 | |||
71 | /** | ||
72 | * knav_queue_notify: qmss queue notfier call | ||
73 | * | ||
74 | * @inst: qmss queue instance like accumulator | ||
75 | */ | ||
76 | void knav_queue_notify(struct knav_queue_inst *inst) | ||
77 | { | ||
78 | struct knav_queue *qh; | ||
79 | |||
80 | if (!inst) | ||
81 | return; | ||
82 | |||
83 | rcu_read_lock(); | ||
84 | for_each_handle_rcu(qh, inst) { | ||
85 | if (atomic_read(&qh->notifier_enabled) <= 0) | ||
86 | continue; | ||
87 | if (WARN_ON(!qh->notifier_fn)) | ||
88 | continue; | ||
89 | atomic_inc(&qh->stats.notifies); | ||
90 | qh->notifier_fn(qh->notifier_fn_arg); | ||
91 | } | ||
92 | rcu_read_unlock(); | ||
93 | } | ||
94 | EXPORT_SYMBOL_GPL(knav_queue_notify); | ||
95 | |||
96 | static irqreturn_t knav_queue_int_handler(int irq, void *_instdata) | ||
97 | { | ||
98 | struct knav_queue_inst *inst = _instdata; | ||
99 | |||
100 | knav_queue_notify(inst); | ||
101 | return IRQ_HANDLED; | ||
102 | } | ||
103 | |||
104 | static int knav_queue_setup_irq(struct knav_range_info *range, | ||
105 | struct knav_queue_inst *inst) | ||
106 | { | ||
107 | unsigned queue = inst->id - range->queue_base; | ||
108 | unsigned long cpu_map; | ||
109 | int ret = 0, irq; | ||
110 | |||
111 | if (range->flags & RANGE_HAS_IRQ) { | ||
112 | irq = range->irqs[queue].irq; | ||
113 | cpu_map = range->irqs[queue].cpu_map; | ||
114 | ret = request_irq(irq, knav_queue_int_handler, 0, | ||
115 | inst->irq_name, inst); | ||
116 | if (ret) | ||
117 | return ret; | ||
118 | disable_irq(irq); | ||
119 | if (cpu_map) { | ||
120 | ret = irq_set_affinity_hint(irq, to_cpumask(&cpu_map)); | ||
121 | if (ret) { | ||
122 | dev_warn(range->kdev->dev, | ||
123 | "Failed to set IRQ affinity\n"); | ||
124 | return ret; | ||
125 | } | ||
126 | } | ||
127 | } | ||
128 | return ret; | ||
129 | } | ||
130 | |||
131 | static void knav_queue_free_irq(struct knav_queue_inst *inst) | ||
132 | { | ||
133 | struct knav_range_info *range = inst->range; | ||
134 | unsigned queue = inst->id - inst->range->queue_base; | ||
135 | int irq; | ||
136 | |||
137 | if (range->flags & RANGE_HAS_IRQ) { | ||
138 | irq = range->irqs[queue].irq; | ||
139 | irq_set_affinity_hint(irq, NULL); | ||
140 | free_irq(irq, inst); | ||
141 | } | ||
142 | } | ||
143 | |||
144 | static inline bool knav_queue_is_busy(struct knav_queue_inst *inst) | ||
145 | { | ||
146 | return !list_empty(&inst->handles); | ||
147 | } | ||
148 | |||
149 | static inline bool knav_queue_is_reserved(struct knav_queue_inst *inst) | ||
150 | { | ||
151 | return inst->range->flags & RANGE_RESERVED; | ||
152 | } | ||
153 | |||
154 | static inline bool knav_queue_is_shared(struct knav_queue_inst *inst) | ||
155 | { | ||
156 | struct knav_queue *tmp; | ||
157 | |||
158 | rcu_read_lock(); | ||
159 | for_each_handle_rcu(tmp, inst) { | ||
160 | if (tmp->flags & KNAV_QUEUE_SHARED) { | ||
161 | rcu_read_unlock(); | ||
162 | return true; | ||
163 | } | ||
164 | } | ||
165 | rcu_read_unlock(); | ||
166 | return false; | ||
167 | } | ||
168 | |||
169 | static inline bool knav_queue_match_type(struct knav_queue_inst *inst, | ||
170 | unsigned type) | ||
171 | { | ||
172 | if ((type == KNAV_QUEUE_QPEND) && | ||
173 | (inst->range->flags & RANGE_HAS_IRQ)) { | ||
174 | return true; | ||
175 | } else if ((type == KNAV_QUEUE_ACC) && | ||
176 | (inst->range->flags & RANGE_HAS_ACCUMULATOR)) { | ||
177 | return true; | ||
178 | } else if ((type == KNAV_QUEUE_GP) && | ||
179 | !(inst->range->flags & | ||
180 | (RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ))) { | ||
181 | return true; | ||
182 | } | ||
183 | return false; | ||
184 | } | ||
185 | |||
186 | static inline struct knav_queue_inst * | ||
187 | knav_queue_match_id_to_inst(struct knav_device *kdev, unsigned id) | ||
188 | { | ||
189 | struct knav_queue_inst *inst; | ||
190 | int idx; | ||
191 | |||
192 | for_each_instance(idx, inst, kdev) { | ||
193 | if (inst->id == id) | ||
194 | return inst; | ||
195 | } | ||
196 | return NULL; | ||
197 | } | ||
198 | |||
199 | static inline struct knav_queue_inst *knav_queue_find_by_id(int id) | ||
200 | { | ||
201 | if (kdev->base_id <= id && | ||
202 | kdev->base_id + kdev->num_queues > id) { | ||
203 | id -= kdev->base_id; | ||
204 | return knav_queue_match_id_to_inst(kdev, id); | ||
205 | } | ||
206 | return NULL; | ||
207 | } | ||
208 | |||
209 | static struct knav_queue *__knav_queue_open(struct knav_queue_inst *inst, | ||
210 | const char *name, unsigned flags) | ||
211 | { | ||
212 | struct knav_queue *qh; | ||
213 | unsigned id; | ||
214 | int ret = 0; | ||
215 | |||
216 | qh = devm_kzalloc(inst->kdev->dev, sizeof(*qh), GFP_KERNEL); | ||
217 | if (!qh) | ||
218 | return ERR_PTR(-ENOMEM); | ||
219 | |||
220 | qh->flags = flags; | ||
221 | qh->inst = inst; | ||
222 | id = inst->id - inst->qmgr->start_queue; | ||
223 | qh->reg_push = &inst->qmgr->reg_push[id]; | ||
224 | qh->reg_pop = &inst->qmgr->reg_pop[id]; | ||
225 | qh->reg_peek = &inst->qmgr->reg_peek[id]; | ||
226 | |||
227 | /* first opener? */ | ||
228 | if (!knav_queue_is_busy(inst)) { | ||
229 | struct knav_range_info *range = inst->range; | ||
230 | |||
231 | inst->name = kstrndup(name, KNAV_NAME_SIZE, GFP_KERNEL); | ||
232 | if (range->ops && range->ops->open_queue) | ||
233 | ret = range->ops->open_queue(range, inst, flags); | ||
234 | |||
235 | if (ret) { | ||
236 | devm_kfree(inst->kdev->dev, qh); | ||
237 | return ERR_PTR(ret); | ||
238 | } | ||
239 | } | ||
240 | list_add_tail_rcu(&qh->list, &inst->handles); | ||
241 | return qh; | ||
242 | } | ||
243 | |||
244 | static struct knav_queue * | ||
245 | knav_queue_open_by_id(const char *name, unsigned id, unsigned flags) | ||
246 | { | ||
247 | struct knav_queue_inst *inst; | ||
248 | struct knav_queue *qh; | ||
249 | |||
250 | mutex_lock(&knav_dev_lock); | ||
251 | |||
252 | qh = ERR_PTR(-ENODEV); | ||
253 | inst = knav_queue_find_by_id(id); | ||
254 | if (!inst) | ||
255 | goto unlock_ret; | ||
256 | |||
257 | qh = ERR_PTR(-EEXIST); | ||
258 | if (!(flags & KNAV_QUEUE_SHARED) && knav_queue_is_busy(inst)) | ||
259 | goto unlock_ret; | ||
260 | |||
261 | qh = ERR_PTR(-EBUSY); | ||
262 | if ((flags & KNAV_QUEUE_SHARED) && | ||
263 | (knav_queue_is_busy(inst) && !knav_queue_is_shared(inst))) | ||
264 | goto unlock_ret; | ||
265 | |||
266 | qh = __knav_queue_open(inst, name, flags); | ||
267 | |||
268 | unlock_ret: | ||
269 | mutex_unlock(&knav_dev_lock); | ||
270 | |||
271 | return qh; | ||
272 | } | ||
273 | |||
274 | static struct knav_queue *knav_queue_open_by_type(const char *name, | ||
275 | unsigned type, unsigned flags) | ||
276 | { | ||
277 | struct knav_queue_inst *inst; | ||
278 | struct knav_queue *qh = ERR_PTR(-EINVAL); | ||
279 | int idx; | ||
280 | |||
281 | mutex_lock(&knav_dev_lock); | ||
282 | |||
283 | for_each_instance(idx, inst, kdev) { | ||
284 | if (knav_queue_is_reserved(inst)) | ||
285 | continue; | ||
286 | if (!knav_queue_match_type(inst, type)) | ||
287 | continue; | ||
288 | if (knav_queue_is_busy(inst)) | ||
289 | continue; | ||
290 | qh = __knav_queue_open(inst, name, flags); | ||
291 | goto unlock_ret; | ||
292 | } | ||
293 | |||
294 | unlock_ret: | ||
295 | mutex_unlock(&knav_dev_lock); | ||
296 | return qh; | ||
297 | } | ||
298 | |||
299 | static void knav_queue_set_notify(struct knav_queue_inst *inst, bool enabled) | ||
300 | { | ||
301 | struct knav_range_info *range = inst->range; | ||
302 | |||
303 | if (range->ops && range->ops->set_notify) | ||
304 | range->ops->set_notify(range, inst, enabled); | ||
305 | } | ||
306 | |||
307 | static int knav_queue_enable_notifier(struct knav_queue *qh) | ||
308 | { | ||
309 | struct knav_queue_inst *inst = qh->inst; | ||
310 | bool first; | ||
311 | |||
312 | if (WARN_ON(!qh->notifier_fn)) | ||
313 | return -EINVAL; | ||
314 | |||
315 | /* Adjust the per handle notifier count */ | ||
316 | first = (atomic_inc_return(&qh->notifier_enabled) == 1); | ||
317 | if (!first) | ||
318 | return 0; /* nothing to do */ | ||
319 | |||
320 | /* Now adjust the per instance notifier count */ | ||
321 | first = (atomic_inc_return(&inst->num_notifiers) == 1); | ||
322 | if (first) | ||
323 | knav_queue_set_notify(inst, true); | ||
324 | |||
325 | return 0; | ||
326 | } | ||
327 | |||
328 | static int knav_queue_disable_notifier(struct knav_queue *qh) | ||
329 | { | ||
330 | struct knav_queue_inst *inst = qh->inst; | ||
331 | bool last; | ||
332 | |||
333 | last = (atomic_dec_return(&qh->notifier_enabled) == 0); | ||
334 | if (!last) | ||
335 | return 0; /* nothing to do */ | ||
336 | |||
337 | last = (atomic_dec_return(&inst->num_notifiers) == 0); | ||
338 | if (last) | ||
339 | knav_queue_set_notify(inst, false); | ||
340 | |||
341 | return 0; | ||
342 | } | ||
343 | |||
344 | static int knav_queue_set_notifier(struct knav_queue *qh, | ||
345 | struct knav_queue_notify_config *cfg) | ||
346 | { | ||
347 | knav_queue_notify_fn old_fn = qh->notifier_fn; | ||
348 | |||
349 | if (!cfg) | ||
350 | return -EINVAL; | ||
351 | |||
352 | if (!(qh->inst->range->flags & (RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ))) | ||
353 | return -ENOTSUPP; | ||
354 | |||
355 | if (!cfg->fn && old_fn) | ||
356 | knav_queue_disable_notifier(qh); | ||
357 | |||
358 | qh->notifier_fn = cfg->fn; | ||
359 | qh->notifier_fn_arg = cfg->fn_arg; | ||
360 | |||
361 | if (cfg->fn && !old_fn) | ||
362 | knav_queue_enable_notifier(qh); | ||
363 | |||
364 | return 0; | ||
365 | } | ||
366 | |||
367 | static int knav_gp_set_notify(struct knav_range_info *range, | ||
368 | struct knav_queue_inst *inst, | ||
369 | bool enabled) | ||
370 | { | ||
371 | unsigned queue; | ||
372 | |||
373 | if (range->flags & RANGE_HAS_IRQ) { | ||
374 | queue = inst->id - range->queue_base; | ||
375 | if (enabled) | ||
376 | enable_irq(range->irqs[queue].irq); | ||
377 | else | ||
378 | disable_irq_nosync(range->irqs[queue].irq); | ||
379 | } | ||
380 | return 0; | ||
381 | } | ||
382 | |||
383 | static int knav_gp_open_queue(struct knav_range_info *range, | ||
384 | struct knav_queue_inst *inst, unsigned flags) | ||
385 | { | ||
386 | return knav_queue_setup_irq(range, inst); | ||
387 | } | ||
388 | |||
389 | static int knav_gp_close_queue(struct knav_range_info *range, | ||
390 | struct knav_queue_inst *inst) | ||
391 | { | ||
392 | knav_queue_free_irq(inst); | ||
393 | return 0; | ||
394 | } | ||
395 | |||
396 | struct knav_range_ops knav_gp_range_ops = { | ||
397 | .set_notify = knav_gp_set_notify, | ||
398 | .open_queue = knav_gp_open_queue, | ||
399 | .close_queue = knav_gp_close_queue, | ||
400 | }; | ||
401 | |||
402 | |||
403 | static int knav_queue_get_count(void *qhandle) | ||
404 | { | ||
405 | struct knav_queue *qh = qhandle; | ||
406 | struct knav_queue_inst *inst = qh->inst; | ||
407 | |||
408 | return readl_relaxed(&qh->reg_peek[0].entry_count) + | ||
409 | atomic_read(&inst->desc_count); | ||
410 | } | ||
411 | |||
412 | static void knav_queue_debug_show_instance(struct seq_file *s, | ||
413 | struct knav_queue_inst *inst) | ||
414 | { | ||
415 | struct knav_device *kdev = inst->kdev; | ||
416 | struct knav_queue *qh; | ||
417 | |||
418 | if (!knav_queue_is_busy(inst)) | ||
419 | return; | ||
420 | |||
421 | seq_printf(s, "\tqueue id %d (%s)\n", | ||
422 | kdev->base_id + inst->id, inst->name); | ||
423 | for_each_handle_rcu(qh, inst) { | ||
424 | seq_printf(s, "\t\thandle %p: ", qh); | ||
425 | seq_printf(s, "pushes %8d, ", | ||
426 | atomic_read(&qh->stats.pushes)); | ||
427 | seq_printf(s, "pops %8d, ", | ||
428 | atomic_read(&qh->stats.pops)); | ||
429 | seq_printf(s, "count %8d, ", | ||
430 | knav_queue_get_count(qh)); | ||
431 | seq_printf(s, "notifies %8d, ", | ||
432 | atomic_read(&qh->stats.notifies)); | ||
433 | seq_printf(s, "push errors %8d, ", | ||
434 | atomic_read(&qh->stats.push_errors)); | ||
435 | seq_printf(s, "pop errors %8d\n", | ||
436 | atomic_read(&qh->stats.pop_errors)); | ||
437 | } | ||
438 | } | ||
439 | |||
440 | static int knav_queue_debug_show(struct seq_file *s, void *v) | ||
441 | { | ||
442 | struct knav_queue_inst *inst; | ||
443 | int idx; | ||
444 | |||
445 | mutex_lock(&knav_dev_lock); | ||
446 | seq_printf(s, "%s: %u-%u\n", | ||
447 | dev_name(kdev->dev), kdev->base_id, | ||
448 | kdev->base_id + kdev->num_queues - 1); | ||
449 | for_each_instance(idx, inst, kdev) | ||
450 | knav_queue_debug_show_instance(s, inst); | ||
451 | mutex_unlock(&knav_dev_lock); | ||
452 | |||
453 | return 0; | ||
454 | } | ||
455 | |||
456 | static int knav_queue_debug_open(struct inode *inode, struct file *file) | ||
457 | { | ||
458 | return single_open(file, knav_queue_debug_show, NULL); | ||
459 | } | ||
460 | |||
461 | static const struct file_operations knav_queue_debug_ops = { | ||
462 | .open = knav_queue_debug_open, | ||
463 | .read = seq_read, | ||
464 | .llseek = seq_lseek, | ||
465 | .release = single_release, | ||
466 | }; | ||
467 | |||
468 | static inline int knav_queue_pdsp_wait(u32 * __iomem addr, unsigned timeout, | ||
469 | u32 flags) | ||
470 | { | ||
471 | unsigned long end; | ||
472 | u32 val = 0; | ||
473 | |||
474 | end = jiffies + msecs_to_jiffies(timeout); | ||
475 | while (time_after(end, jiffies)) { | ||
476 | val = readl_relaxed(addr); | ||
477 | if (flags) | ||
478 | val &= flags; | ||
479 | if (!val) | ||
480 | break; | ||
481 | cpu_relax(); | ||
482 | } | ||
483 | return val ? -ETIMEDOUT : 0; | ||
484 | } | ||
485 | |||
486 | |||
487 | static int knav_queue_flush(struct knav_queue *qh) | ||
488 | { | ||
489 | struct knav_queue_inst *inst = qh->inst; | ||
490 | unsigned id = inst->id - inst->qmgr->start_queue; | ||
491 | |||
492 | atomic_set(&inst->desc_count, 0); | ||
493 | writel_relaxed(0, &inst->qmgr->reg_push[id].ptr_size_thresh); | ||
494 | return 0; | ||
495 | } | ||
496 | |||
497 | /** | ||
498 | * knav_queue_open() - open a hardware queue | ||
499 | * @name - name to give the queue handle | ||
500 | * @id - desired queue number if any or specifes the type | ||
501 | * of queue | ||
502 | * @flags - the following flags are applicable to queues: | ||
503 | * KNAV_QUEUE_SHARED - allow the queue to be shared. Queues are | ||
504 | * exclusive by default. | ||
505 | * Subsequent attempts to open a shared queue should | ||
506 | * also have this flag. | ||
507 | * | ||
508 | * Returns a handle to the open hardware queue if successful. Use IS_ERR() | ||
509 | * to check the returned value for error codes. | ||
510 | */ | ||
511 | void *knav_queue_open(const char *name, unsigned id, | ||
512 | unsigned flags) | ||
513 | { | ||
514 | struct knav_queue *qh = ERR_PTR(-EINVAL); | ||
515 | |||
516 | switch (id) { | ||
517 | case KNAV_QUEUE_QPEND: | ||
518 | case KNAV_QUEUE_ACC: | ||
519 | case KNAV_QUEUE_GP: | ||
520 | qh = knav_queue_open_by_type(name, id, flags); | ||
521 | break; | ||
522 | |||
523 | default: | ||
524 | qh = knav_queue_open_by_id(name, id, flags); | ||
525 | break; | ||
526 | } | ||
527 | return qh; | ||
528 | } | ||
529 | EXPORT_SYMBOL_GPL(knav_queue_open); | ||
530 | |||
531 | /** | ||
532 | * knav_queue_close() - close a hardware queue handle | ||
533 | * @qh - handle to close | ||
534 | */ | ||
535 | void knav_queue_close(void *qhandle) | ||
536 | { | ||
537 | struct knav_queue *qh = qhandle; | ||
538 | struct knav_queue_inst *inst = qh->inst; | ||
539 | |||
540 | while (atomic_read(&qh->notifier_enabled) > 0) | ||
541 | knav_queue_disable_notifier(qh); | ||
542 | |||
543 | mutex_lock(&knav_dev_lock); | ||
544 | list_del_rcu(&qh->list); | ||
545 | mutex_unlock(&knav_dev_lock); | ||
546 | synchronize_rcu(); | ||
547 | if (!knav_queue_is_busy(inst)) { | ||
548 | struct knav_range_info *range = inst->range; | ||
549 | |||
550 | if (range->ops && range->ops->close_queue) | ||
551 | range->ops->close_queue(range, inst); | ||
552 | } | ||
553 | devm_kfree(inst->kdev->dev, qh); | ||
554 | } | ||
555 | EXPORT_SYMBOL_GPL(knav_queue_close); | ||
556 | |||
557 | /** | ||
558 | * knav_queue_device_control() - Perform control operations on a queue | ||
559 | * @qh - queue handle | ||
560 | * @cmd - control commands | ||
561 | * @arg - command argument | ||
562 | * | ||
563 | * Returns 0 on success, errno otherwise. | ||
564 | */ | ||
565 | int knav_queue_device_control(void *qhandle, enum knav_queue_ctrl_cmd cmd, | ||
566 | unsigned long arg) | ||
567 | { | ||
568 | struct knav_queue *qh = qhandle; | ||
569 | struct knav_queue_notify_config *cfg; | ||
570 | int ret; | ||
571 | |||
572 | switch ((int)cmd) { | ||
573 | case KNAV_QUEUE_GET_ID: | ||
574 | ret = qh->inst->kdev->base_id + qh->inst->id; | ||
575 | break; | ||
576 | |||
577 | case KNAV_QUEUE_FLUSH: | ||
578 | ret = knav_queue_flush(qh); | ||
579 | break; | ||
580 | |||
581 | case KNAV_QUEUE_SET_NOTIFIER: | ||
582 | cfg = (void *)arg; | ||
583 | ret = knav_queue_set_notifier(qh, cfg); | ||
584 | break; | ||
585 | |||
586 | case KNAV_QUEUE_ENABLE_NOTIFY: | ||
587 | ret = knav_queue_enable_notifier(qh); | ||
588 | break; | ||
589 | |||
590 | case KNAV_QUEUE_DISABLE_NOTIFY: | ||
591 | ret = knav_queue_disable_notifier(qh); | ||
592 | break; | ||
593 | |||
594 | case KNAV_QUEUE_GET_COUNT: | ||
595 | ret = knav_queue_get_count(qh); | ||
596 | break; | ||
597 | |||
598 | default: | ||
599 | ret = -ENOTSUPP; | ||
600 | break; | ||
601 | } | ||
602 | return ret; | ||
603 | } | ||
604 | EXPORT_SYMBOL_GPL(knav_queue_device_control); | ||
605 | |||
606 | |||
607 | |||
608 | /** | ||
609 | * knav_queue_push() - push data (or descriptor) to the tail of a queue | ||
610 | * @qh - hardware queue handle | ||
611 | * @data - data to push | ||
612 | * @size - size of data to push | ||
613 | * @flags - can be used to pass additional information | ||
614 | * | ||
615 | * Returns 0 on success, errno otherwise. | ||
616 | */ | ||
617 | int knav_queue_push(void *qhandle, dma_addr_t dma, | ||
618 | unsigned size, unsigned flags) | ||
619 | { | ||
620 | struct knav_queue *qh = qhandle; | ||
621 | u32 val; | ||
622 | |||
623 | val = (u32)dma | ((size / 16) - 1); | ||
624 | writel_relaxed(val, &qh->reg_push[0].ptr_size_thresh); | ||
625 | |||
626 | atomic_inc(&qh->stats.pushes); | ||
627 | return 0; | ||
628 | } | ||
629 | |||
630 | /** | ||
631 | * knav_queue_pop() - pop data (or descriptor) from the head of a queue | ||
632 | * @qh - hardware queue handle | ||
633 | * @size - (optional) size of the data pop'ed. | ||
634 | * | ||
635 | * Returns a DMA address on success, 0 on failure. | ||
636 | */ | ||
637 | dma_addr_t knav_queue_pop(void *qhandle, unsigned *size) | ||
638 | { | ||
639 | struct knav_queue *qh = qhandle; | ||
640 | struct knav_queue_inst *inst = qh->inst; | ||
641 | dma_addr_t dma; | ||
642 | u32 val, idx; | ||
643 | |||
644 | /* are we accumulated? */ | ||
645 | if (inst->descs) { | ||
646 | if (unlikely(atomic_dec_return(&inst->desc_count) < 0)) { | ||
647 | atomic_inc(&inst->desc_count); | ||
648 | return 0; | ||
649 | } | ||
650 | idx = atomic_inc_return(&inst->desc_head); | ||
651 | idx &= ACC_DESCS_MASK; | ||
652 | val = inst->descs[idx]; | ||
653 | } else { | ||
654 | val = readl_relaxed(&qh->reg_pop[0].ptr_size_thresh); | ||
655 | if (unlikely(!val)) | ||
656 | return 0; | ||
657 | } | ||
658 | |||
659 | dma = val & DESC_PTR_MASK; | ||
660 | if (size) | ||
661 | *size = ((val & DESC_SIZE_MASK) + 1) * 16; | ||
662 | |||
663 | atomic_inc(&qh->stats.pops); | ||
664 | return dma; | ||
665 | } | ||
666 | |||
667 | /* carve out descriptors and push into queue */ | ||
668 | static void kdesc_fill_pool(struct knav_pool *pool) | ||
669 | { | ||
670 | struct knav_region *region; | ||
671 | int i; | ||
672 | |||
673 | region = pool->region; | ||
674 | pool->desc_size = region->desc_size; | ||
675 | for (i = 0; i < pool->num_desc; i++) { | ||
676 | int index = pool->region_offset + i; | ||
677 | dma_addr_t dma_addr; | ||
678 | unsigned dma_size; | ||
679 | dma_addr = region->dma_start + (region->desc_size * index); | ||
680 | dma_size = ALIGN(pool->desc_size, SMP_CACHE_BYTES); | ||
681 | dma_sync_single_for_device(pool->dev, dma_addr, dma_size, | ||
682 | DMA_TO_DEVICE); | ||
683 | knav_queue_push(pool->queue, dma_addr, dma_size, 0); | ||
684 | } | ||
685 | } | ||
686 | |||
687 | /* pop out descriptors and close the queue */ | ||
688 | static void kdesc_empty_pool(struct knav_pool *pool) | ||
689 | { | ||
690 | dma_addr_t dma; | ||
691 | unsigned size; | ||
692 | void *desc; | ||
693 | int i; | ||
694 | |||
695 | if (!pool->queue) | ||
696 | return; | ||
697 | |||
698 | for (i = 0;; i++) { | ||
699 | dma = knav_queue_pop(pool->queue, &size); | ||
700 | if (!dma) | ||
701 | break; | ||
702 | desc = knav_pool_desc_dma_to_virt(pool, dma); | ||
703 | if (!desc) { | ||
704 | dev_dbg(pool->kdev->dev, | ||
705 | "couldn't unmap desc, continuing\n"); | ||
706 | continue; | ||
707 | } | ||
708 | } | ||
709 | WARN_ON(i != pool->num_desc); | ||
710 | knav_queue_close(pool->queue); | ||
711 | } | ||
712 | |||
713 | |||
714 | /* Get the DMA address of a descriptor */ | ||
715 | dma_addr_t knav_pool_desc_virt_to_dma(void *ph, void *virt) | ||
716 | { | ||
717 | struct knav_pool *pool = ph; | ||
718 | return pool->region->dma_start + (virt - pool->region->virt_start); | ||
719 | } | ||
720 | |||
721 | void *knav_pool_desc_dma_to_virt(void *ph, dma_addr_t dma) | ||
722 | { | ||
723 | struct knav_pool *pool = ph; | ||
724 | return pool->region->virt_start + (dma - pool->region->dma_start); | ||
725 | } | ||
726 | |||
727 | /** | ||
728 | * knav_pool_create() - Create a pool of descriptors | ||
729 | * @name - name to give the pool handle | ||
730 | * @num_desc - numbers of descriptors in the pool | ||
731 | * @region_id - QMSS region id from which the descriptors are to be | ||
732 | * allocated. | ||
733 | * | ||
734 | * Returns a pool handle on success. | ||
735 | * Use IS_ERR_OR_NULL() to identify error values on return. | ||
736 | */ | ||
737 | void *knav_pool_create(const char *name, | ||
738 | int num_desc, int region_id) | ||
739 | { | ||
740 | struct knav_region *reg_itr, *region = NULL; | ||
741 | struct knav_pool *pool, *pi; | ||
742 | struct list_head *node; | ||
743 | unsigned last_offset; | ||
744 | bool slot_found; | ||
745 | int ret; | ||
746 | |||
747 | if (!kdev->dev) | ||
748 | return ERR_PTR(-ENODEV); | ||
749 | |||
750 | pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL); | ||
751 | if (!pool) { | ||
752 | dev_err(kdev->dev, "out of memory allocating pool\n"); | ||
753 | return ERR_PTR(-ENOMEM); | ||
754 | } | ||
755 | |||
756 | for_each_region(kdev, reg_itr) { | ||
757 | if (reg_itr->id != region_id) | ||
758 | continue; | ||
759 | region = reg_itr; | ||
760 | break; | ||
761 | } | ||
762 | |||
763 | if (!region) { | ||
764 | dev_err(kdev->dev, "region-id(%d) not found\n", region_id); | ||
765 | ret = -EINVAL; | ||
766 | goto err; | ||
767 | } | ||
768 | |||
769 | pool->queue = knav_queue_open(name, KNAV_QUEUE_GP, 0); | ||
770 | if (IS_ERR_OR_NULL(pool->queue)) { | ||
771 | dev_err(kdev->dev, | ||
772 | "failed to open queue for pool(%s), error %ld\n", | ||
773 | name, PTR_ERR(pool->queue)); | ||
774 | ret = PTR_ERR(pool->queue); | ||
775 | goto err; | ||
776 | } | ||
777 | |||
778 | pool->name = kstrndup(name, KNAV_NAME_SIZE, GFP_KERNEL); | ||
779 | pool->kdev = kdev; | ||
780 | pool->dev = kdev->dev; | ||
781 | |||
782 | mutex_lock(&knav_dev_lock); | ||
783 | |||
784 | if (num_desc > (region->num_desc - region->used_desc)) { | ||
785 | dev_err(kdev->dev, "out of descs in region(%d) for pool(%s)\n", | ||
786 | region_id, name); | ||
787 | ret = -ENOMEM; | ||
788 | goto err; | ||
789 | } | ||
790 | |||
791 | /* Region maintains a sorted (by region offset) list of pools | ||
792 | * use the first free slot which is large enough to accomodate | ||
793 | * the request | ||
794 | */ | ||
795 | last_offset = 0; | ||
796 | slot_found = false; | ||
797 | node = ®ion->pools; | ||
798 | list_for_each_entry(pi, ®ion->pools, region_inst) { | ||
799 | if ((pi->region_offset - last_offset) >= num_desc) { | ||
800 | slot_found = true; | ||
801 | break; | ||
802 | } | ||
803 | last_offset = pi->region_offset + pi->num_desc; | ||
804 | } | ||
805 | node = &pi->region_inst; | ||
806 | |||
807 | if (slot_found) { | ||
808 | pool->region = region; | ||
809 | pool->num_desc = num_desc; | ||
810 | pool->region_offset = last_offset; | ||
811 | region->used_desc += num_desc; | ||
812 | list_add_tail(&pool->list, &kdev->pools); | ||
813 | list_add_tail(&pool->region_inst, node); | ||
814 | } else { | ||
815 | dev_err(kdev->dev, "pool(%s) create failed: fragmented desc pool in region(%d)\n", | ||
816 | name, region_id); | ||
817 | ret = -ENOMEM; | ||
818 | goto err; | ||
819 | } | ||
820 | |||
821 | mutex_unlock(&knav_dev_lock); | ||
822 | kdesc_fill_pool(pool); | ||
823 | return pool; | ||
824 | |||
825 | err: | ||
826 | mutex_unlock(&knav_dev_lock); | ||
827 | kfree(pool->name); | ||
828 | devm_kfree(kdev->dev, pool); | ||
829 | return ERR_PTR(ret); | ||
830 | } | ||
831 | EXPORT_SYMBOL_GPL(knav_pool_create); | ||
832 | |||
833 | /** | ||
834 | * knav_pool_destroy() - Free a pool of descriptors | ||
835 | * @pool - pool handle | ||
836 | */ | ||
837 | void knav_pool_destroy(void *ph) | ||
838 | { | ||
839 | struct knav_pool *pool = ph; | ||
840 | |||
841 | if (!pool) | ||
842 | return; | ||
843 | |||
844 | if (!pool->region) | ||
845 | return; | ||
846 | |||
847 | kdesc_empty_pool(pool); | ||
848 | mutex_lock(&knav_dev_lock); | ||
849 | |||
850 | pool->region->used_desc -= pool->num_desc; | ||
851 | list_del(&pool->region_inst); | ||
852 | list_del(&pool->list); | ||
853 | |||
854 | mutex_unlock(&knav_dev_lock); | ||
855 | kfree(pool->name); | ||
856 | devm_kfree(kdev->dev, pool); | ||
857 | } | ||
858 | EXPORT_SYMBOL_GPL(knav_pool_destroy); | ||
859 | |||
860 | |||
861 | /** | ||
862 | * knav_pool_desc_get() - Get a descriptor from the pool | ||
863 | * @pool - pool handle | ||
864 | * | ||
865 | * Returns descriptor from the pool. | ||
866 | */ | ||
867 | void *knav_pool_desc_get(void *ph) | ||
868 | { | ||
869 | struct knav_pool *pool = ph; | ||
870 | dma_addr_t dma; | ||
871 | unsigned size; | ||
872 | void *data; | ||
873 | |||
874 | dma = knav_queue_pop(pool->queue, &size); | ||
875 | if (unlikely(!dma)) | ||
876 | return ERR_PTR(-ENOMEM); | ||
877 | data = knav_pool_desc_dma_to_virt(pool, dma); | ||
878 | return data; | ||
879 | } | ||
880 | |||
881 | /** | ||
882 | * knav_pool_desc_put() - return a descriptor to the pool | ||
883 | * @pool - pool handle | ||
884 | */ | ||
885 | void knav_pool_desc_put(void *ph, void *desc) | ||
886 | { | ||
887 | struct knav_pool *pool = ph; | ||
888 | dma_addr_t dma; | ||
889 | dma = knav_pool_desc_virt_to_dma(pool, desc); | ||
890 | knav_queue_push(pool->queue, dma, pool->region->desc_size, 0); | ||
891 | } | ||
892 | |||
893 | /** | ||
894 | * knav_pool_desc_map() - Map descriptor for DMA transfer | ||
895 | * @pool - pool handle | ||
896 | * @desc - address of descriptor to map | ||
897 | * @size - size of descriptor to map | ||
898 | * @dma - DMA address return pointer | ||
899 | * @dma_sz - adjusted return pointer | ||
900 | * | ||
901 | * Returns 0 on success, errno otherwise. | ||
902 | */ | ||
903 | int knav_pool_desc_map(void *ph, void *desc, unsigned size, | ||
904 | dma_addr_t *dma, unsigned *dma_sz) | ||
905 | { | ||
906 | struct knav_pool *pool = ph; | ||
907 | *dma = knav_pool_desc_virt_to_dma(pool, desc); | ||
908 | size = min(size, pool->region->desc_size); | ||
909 | size = ALIGN(size, SMP_CACHE_BYTES); | ||
910 | *dma_sz = size; | ||
911 | dma_sync_single_for_device(pool->dev, *dma, size, DMA_TO_DEVICE); | ||
912 | |||
913 | /* Ensure the descriptor reaches to the memory */ | ||
914 | __iowmb(); | ||
915 | |||
916 | return 0; | ||
917 | } | ||
918 | |||
919 | /** | ||
920 | * knav_pool_desc_unmap() - Unmap descriptor after DMA transfer | ||
921 | * @pool - pool handle | ||
922 | * @dma - DMA address of descriptor to unmap | ||
923 | * @dma_sz - size of descriptor to unmap | ||
924 | * | ||
925 | * Returns descriptor address on success, Use IS_ERR_OR_NULL() to identify | ||
926 | * error values on return. | ||
927 | */ | ||
928 | void *knav_pool_desc_unmap(void *ph, dma_addr_t dma, unsigned dma_sz) | ||
929 | { | ||
930 | struct knav_pool *pool = ph; | ||
931 | unsigned desc_sz; | ||
932 | void *desc; | ||
933 | |||
934 | desc_sz = min(dma_sz, pool->region->desc_size); | ||
935 | desc = knav_pool_desc_dma_to_virt(pool, dma); | ||
936 | dma_sync_single_for_cpu(pool->dev, dma, desc_sz, DMA_FROM_DEVICE); | ||
937 | prefetch(desc); | ||
938 | return desc; | ||
939 | } | ||
940 | |||
941 | /** | ||
942 | * knav_pool_count() - Get the number of descriptors in pool. | ||
943 | * @pool - pool handle | ||
944 | * Returns number of elements in the pool. | ||
945 | */ | ||
946 | int knav_pool_count(void *ph) | ||
947 | { | ||
948 | struct knav_pool *pool = ph; | ||
949 | return knav_queue_get_count(pool->queue); | ||
950 | } | ||
951 | |||
952 | static void knav_queue_setup_region(struct knav_device *kdev, | ||
953 | struct knav_region *region) | ||
954 | { | ||
955 | unsigned hw_num_desc, hw_desc_size, size; | ||
956 | struct knav_reg_region __iomem *regs; | ||
957 | struct knav_qmgr_info *qmgr; | ||
958 | struct knav_pool *pool; | ||
959 | int id = region->id; | ||
960 | struct page *page; | ||
961 | |||
962 | /* unused region? */ | ||
963 | if (!region->num_desc) { | ||
964 | dev_warn(kdev->dev, "unused region %s\n", region->name); | ||
965 | return; | ||
966 | } | ||
967 | |||
968 | /* get hardware descriptor value */ | ||
969 | hw_num_desc = ilog2(region->num_desc - 1) + 1; | ||
970 | |||
971 | /* did we force fit ourselves into nothingness? */ | ||
972 | if (region->num_desc < 32) { | ||
973 | region->num_desc = 0; | ||
974 | dev_warn(kdev->dev, "too few descriptors in region %s\n", | ||
975 | region->name); | ||
976 | return; | ||
977 | } | ||
978 | |||
979 | size = region->num_desc * region->desc_size; | ||
980 | region->virt_start = alloc_pages_exact(size, GFP_KERNEL | GFP_DMA | | ||
981 | GFP_DMA32); | ||
982 | if (!region->virt_start) { | ||
983 | region->num_desc = 0; | ||
984 | dev_err(kdev->dev, "memory alloc failed for region %s\n", | ||
985 | region->name); | ||
986 | return; | ||
987 | } | ||
988 | region->virt_end = region->virt_start + size; | ||
989 | page = virt_to_page(region->virt_start); | ||
990 | |||
991 | region->dma_start = dma_map_page(kdev->dev, page, 0, size, | ||
992 | DMA_BIDIRECTIONAL); | ||
993 | if (dma_mapping_error(kdev->dev, region->dma_start)) { | ||
994 | dev_err(kdev->dev, "dma map failed for region %s\n", | ||
995 | region->name); | ||
996 | goto fail; | ||
997 | } | ||
998 | region->dma_end = region->dma_start + size; | ||
999 | |||
1000 | pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL); | ||
1001 | if (!pool) { | ||
1002 | dev_err(kdev->dev, "out of memory allocating dummy pool\n"); | ||
1003 | goto fail; | ||
1004 | } | ||
1005 | pool->num_desc = 0; | ||
1006 | pool->region_offset = region->num_desc; | ||
1007 | list_add(&pool->region_inst, ®ion->pools); | ||
1008 | |||
1009 | dev_dbg(kdev->dev, | ||
1010 | "region %s (%d): size:%d, link:%d@%d, phys:%08x-%08x, virt:%p-%p\n", | ||
1011 | region->name, id, region->desc_size, region->num_desc, | ||
1012 | region->link_index, region->dma_start, region->dma_end, | ||
1013 | region->virt_start, region->virt_end); | ||
1014 | |||
1015 | hw_desc_size = (region->desc_size / 16) - 1; | ||
1016 | hw_num_desc -= 5; | ||
1017 | |||
1018 | for_each_qmgr(kdev, qmgr) { | ||
1019 | regs = qmgr->reg_region + id; | ||
1020 | writel_relaxed(region->dma_start, ®s->base); | ||
1021 | writel_relaxed(region->link_index, ®s->start_index); | ||
1022 | writel_relaxed(hw_desc_size << 16 | hw_num_desc, | ||
1023 | ®s->size_count); | ||
1024 | } | ||
1025 | return; | ||
1026 | |||
1027 | fail: | ||
1028 | if (region->dma_start) | ||
1029 | dma_unmap_page(kdev->dev, region->dma_start, size, | ||
1030 | DMA_BIDIRECTIONAL); | ||
1031 | if (region->virt_start) | ||
1032 | free_pages_exact(region->virt_start, size); | ||
1033 | region->num_desc = 0; | ||
1034 | return; | ||
1035 | } | ||
1036 | |||
1037 | static const char *knav_queue_find_name(struct device_node *node) | ||
1038 | { | ||
1039 | const char *name; | ||
1040 | |||
1041 | if (of_property_read_string(node, "label", &name) < 0) | ||
1042 | name = node->name; | ||
1043 | if (!name) | ||
1044 | name = "unknown"; | ||
1045 | return name; | ||
1046 | } | ||
1047 | |||
1048 | static int knav_queue_setup_regions(struct knav_device *kdev, | ||
1049 | struct device_node *regions) | ||
1050 | { | ||
1051 | struct device *dev = kdev->dev; | ||
1052 | struct knav_region *region; | ||
1053 | struct device_node *child; | ||
1054 | u32 temp[2]; | ||
1055 | int ret; | ||
1056 | |||
1057 | for_each_child_of_node(regions, child) { | ||
1058 | region = devm_kzalloc(dev, sizeof(*region), GFP_KERNEL); | ||
1059 | if (!region) { | ||
1060 | dev_err(dev, "out of memory allocating region\n"); | ||
1061 | return -ENOMEM; | ||
1062 | } | ||
1063 | |||
1064 | region->name = knav_queue_find_name(child); | ||
1065 | of_property_read_u32(child, "id", ®ion->id); | ||
1066 | ret = of_property_read_u32_array(child, "region-spec", temp, 2); | ||
1067 | if (!ret) { | ||
1068 | region->num_desc = temp[0]; | ||
1069 | region->desc_size = temp[1]; | ||
1070 | } else { | ||
1071 | dev_err(dev, "invalid region info %s\n", region->name); | ||
1072 | devm_kfree(dev, region); | ||
1073 | continue; | ||
1074 | } | ||
1075 | |||
1076 | if (!of_get_property(child, "link-index", NULL)) { | ||
1077 | dev_err(dev, "No link info for %s\n", region->name); | ||
1078 | devm_kfree(dev, region); | ||
1079 | continue; | ||
1080 | } | ||
1081 | ret = of_property_read_u32(child, "link-index", | ||
1082 | ®ion->link_index); | ||
1083 | if (ret) { | ||
1084 | dev_err(dev, "link index not found for %s\n", | ||
1085 | region->name); | ||
1086 | devm_kfree(dev, region); | ||
1087 | continue; | ||
1088 | } | ||
1089 | |||
1090 | INIT_LIST_HEAD(®ion->pools); | ||
1091 | list_add_tail(®ion->list, &kdev->regions); | ||
1092 | } | ||
1093 | if (list_empty(&kdev->regions)) { | ||
1094 | dev_err(dev, "no valid region information found\n"); | ||
1095 | return -ENODEV; | ||
1096 | } | ||
1097 | |||
1098 | /* Next, we run through the regions and set things up */ | ||
1099 | for_each_region(kdev, region) | ||
1100 | knav_queue_setup_region(kdev, region); | ||
1101 | |||
1102 | return 0; | ||
1103 | } | ||
1104 | |||
1105 | static int knav_get_link_ram(struct knav_device *kdev, | ||
1106 | const char *name, | ||
1107 | struct knav_link_ram_block *block) | ||
1108 | { | ||
1109 | struct platform_device *pdev = to_platform_device(kdev->dev); | ||
1110 | struct device_node *node = pdev->dev.of_node; | ||
1111 | u32 temp[2]; | ||
1112 | |||
1113 | /* | ||
1114 | * Note: link ram resources are specified in "entry" sized units. In | ||
1115 | * reality, although entries are ~40bits in hardware, we treat them as | ||
1116 | * 64-bit entities here. | ||
1117 | * | ||
1118 | * For example, to specify the internal link ram for Keystone-I class | ||
1119 | * devices, we would set the linkram0 resource to 0x80000-0x83fff. | ||
1120 | * | ||
1121 | * This gets a bit weird when other link rams are used. For example, | ||
1122 | * if the range specified is 0x0c000000-0x0c003fff (i.e., 16K entries | ||
1123 | * in MSMC SRAM), the actual memory used is 0x0c000000-0x0c020000, | ||
1124 | * which accounts for 64-bits per entry, for 16K entries. | ||
1125 | */ | ||
1126 | if (!of_property_read_u32_array(node, name , temp, 2)) { | ||
1127 | if (temp[0]) { | ||
1128 | /* | ||
1129 | * queue_base specified => using internal or onchip | ||
1130 | * link ram WARNING - we do not "reserve" this block | ||
1131 | */ | ||
1132 | block->phys = (dma_addr_t)temp[0]; | ||
1133 | block->virt = NULL; | ||
1134 | block->size = temp[1]; | ||
1135 | } else { | ||
1136 | block->size = temp[1]; | ||
1137 | /* queue_base not specific => allocate requested size */ | ||
1138 | block->virt = dmam_alloc_coherent(kdev->dev, | ||
1139 | 8 * block->size, &block->phys, | ||
1140 | GFP_KERNEL); | ||
1141 | if (!block->virt) { | ||
1142 | dev_err(kdev->dev, "failed to alloc linkram\n"); | ||
1143 | return -ENOMEM; | ||
1144 | } | ||
1145 | } | ||
1146 | } else { | ||
1147 | return -ENODEV; | ||
1148 | } | ||
1149 | return 0; | ||
1150 | } | ||
1151 | |||
1152 | static int knav_queue_setup_link_ram(struct knav_device *kdev) | ||
1153 | { | ||
1154 | struct knav_link_ram_block *block; | ||
1155 | struct knav_qmgr_info *qmgr; | ||
1156 | |||
1157 | for_each_qmgr(kdev, qmgr) { | ||
1158 | block = &kdev->link_rams[0]; | ||
1159 | dev_dbg(kdev->dev, "linkram0: phys:%x, virt:%p, size:%x\n", | ||
1160 | block->phys, block->virt, block->size); | ||
1161 | writel_relaxed(block->phys, &qmgr->reg_config->link_ram_base0); | ||
1162 | writel_relaxed(block->size, &qmgr->reg_config->link_ram_size0); | ||
1163 | |||
1164 | block++; | ||
1165 | if (!block->size) | ||
1166 | return 0; | ||
1167 | |||
1168 | dev_dbg(kdev->dev, "linkram1: phys:%x, virt:%p, size:%x\n", | ||
1169 | block->phys, block->virt, block->size); | ||
1170 | writel_relaxed(block->phys, &qmgr->reg_config->link_ram_base1); | ||
1171 | } | ||
1172 | |||
1173 | return 0; | ||
1174 | } | ||
1175 | |||
1176 | static int knav_setup_queue_range(struct knav_device *kdev, | ||
1177 | struct device_node *node) | ||
1178 | { | ||
1179 | struct device *dev = kdev->dev; | ||
1180 | struct knav_range_info *range; | ||
1181 | struct knav_qmgr_info *qmgr; | ||
1182 | u32 temp[2], start, end, id, index; | ||
1183 | int ret, i; | ||
1184 | |||
1185 | range = devm_kzalloc(dev, sizeof(*range), GFP_KERNEL); | ||
1186 | if (!range) { | ||
1187 | dev_err(dev, "out of memory allocating range\n"); | ||
1188 | return -ENOMEM; | ||
1189 | } | ||
1190 | |||
1191 | range->kdev = kdev; | ||
1192 | range->name = knav_queue_find_name(node); | ||
1193 | ret = of_property_read_u32_array(node, "qrange", temp, 2); | ||
1194 | if (!ret) { | ||
1195 | range->queue_base = temp[0] - kdev->base_id; | ||
1196 | range->num_queues = temp[1]; | ||
1197 | } else { | ||
1198 | dev_err(dev, "invalid queue range %s\n", range->name); | ||
1199 | devm_kfree(dev, range); | ||
1200 | return -EINVAL; | ||
1201 | } | ||
1202 | |||
1203 | for (i = 0; i < RANGE_MAX_IRQS; i++) { | ||
1204 | struct of_phandle_args oirq; | ||
1205 | |||
1206 | if (of_irq_parse_one(node, i, &oirq)) | ||
1207 | break; | ||
1208 | |||
1209 | range->irqs[i].irq = irq_create_of_mapping(&oirq); | ||
1210 | if (range->irqs[i].irq == IRQ_NONE) | ||
1211 | break; | ||
1212 | |||
1213 | range->num_irqs++; | ||
1214 | |||
1215 | if (oirq.args_count == 3) | ||
1216 | range->irqs[i].cpu_map = | ||
1217 | (oirq.args[2] & 0x0000ff00) >> 8; | ||
1218 | } | ||
1219 | |||
1220 | range->num_irqs = min(range->num_irqs, range->num_queues); | ||
1221 | if (range->num_irqs) | ||
1222 | range->flags |= RANGE_HAS_IRQ; | ||
1223 | |||
1224 | if (of_get_property(node, "qalloc-by-id", NULL)) | ||
1225 | range->flags |= RANGE_RESERVED; | ||
1226 | |||
1227 | if (of_get_property(node, "accumulator", NULL)) { | ||
1228 | ret = knav_init_acc_range(kdev, node, range); | ||
1229 | if (ret < 0) { | ||
1230 | devm_kfree(dev, range); | ||
1231 | return ret; | ||
1232 | } | ||
1233 | } else { | ||
1234 | range->ops = &knav_gp_range_ops; | ||
1235 | } | ||
1236 | |||
1237 | /* set threshold to 1, and flush out the queues */ | ||
1238 | for_each_qmgr(kdev, qmgr) { | ||
1239 | start = max(qmgr->start_queue, range->queue_base); | ||
1240 | end = min(qmgr->start_queue + qmgr->num_queues, | ||
1241 | range->queue_base + range->num_queues); | ||
1242 | for (id = start; id < end; id++) { | ||
1243 | index = id - qmgr->start_queue; | ||
1244 | writel_relaxed(THRESH_GTE | 1, | ||
1245 | &qmgr->reg_peek[index].ptr_size_thresh); | ||
1246 | writel_relaxed(0, | ||
1247 | &qmgr->reg_push[index].ptr_size_thresh); | ||
1248 | } | ||
1249 | } | ||
1250 | |||
1251 | list_add_tail(&range->list, &kdev->queue_ranges); | ||
1252 | dev_dbg(dev, "added range %s: %d-%d, %d irqs%s%s%s\n", | ||
1253 | range->name, range->queue_base, | ||
1254 | range->queue_base + range->num_queues - 1, | ||
1255 | range->num_irqs, | ||
1256 | (range->flags & RANGE_HAS_IRQ) ? ", has irq" : "", | ||
1257 | (range->flags & RANGE_RESERVED) ? ", reserved" : "", | ||
1258 | (range->flags & RANGE_HAS_ACCUMULATOR) ? ", acc" : ""); | ||
1259 | kdev->num_queues_in_use += range->num_queues; | ||
1260 | return 0; | ||
1261 | } | ||
1262 | |||
1263 | static int knav_setup_queue_pools(struct knav_device *kdev, | ||
1264 | struct device_node *queue_pools) | ||
1265 | { | ||
1266 | struct device_node *type, *range; | ||
1267 | int ret; | ||
1268 | |||
1269 | for_each_child_of_node(queue_pools, type) { | ||
1270 | for_each_child_of_node(type, range) { | ||
1271 | ret = knav_setup_queue_range(kdev, range); | ||
1272 | /* return value ignored, we init the rest... */ | ||
1273 | } | ||
1274 | } | ||
1275 | |||
1276 | /* ... and barf if they all failed! */ | ||
1277 | if (list_empty(&kdev->queue_ranges)) { | ||
1278 | dev_err(kdev->dev, "no valid queue range found\n"); | ||
1279 | return -ENODEV; | ||
1280 | } | ||
1281 | return 0; | ||
1282 | } | ||
1283 | |||
1284 | static void knav_free_queue_range(struct knav_device *kdev, | ||
1285 | struct knav_range_info *range) | ||
1286 | { | ||
1287 | if (range->ops && range->ops->free_range) | ||
1288 | range->ops->free_range(range); | ||
1289 | list_del(&range->list); | ||
1290 | devm_kfree(kdev->dev, range); | ||
1291 | } | ||
1292 | |||
1293 | static void knav_free_queue_ranges(struct knav_device *kdev) | ||
1294 | { | ||
1295 | struct knav_range_info *range; | ||
1296 | |||
1297 | for (;;) { | ||
1298 | range = first_queue_range(kdev); | ||
1299 | if (!range) | ||
1300 | break; | ||
1301 | knav_free_queue_range(kdev, range); | ||
1302 | } | ||
1303 | } | ||
1304 | |||
1305 | static void knav_queue_free_regions(struct knav_device *kdev) | ||
1306 | { | ||
1307 | struct knav_region *region; | ||
1308 | struct knav_pool *pool; | ||
1309 | unsigned size; | ||
1310 | |||
1311 | for (;;) { | ||
1312 | region = first_region(kdev); | ||
1313 | if (!region) | ||
1314 | break; | ||
1315 | list_for_each_entry(pool, ®ion->pools, region_inst) | ||
1316 | knav_pool_destroy(pool); | ||
1317 | |||
1318 | size = region->virt_end - region->virt_start; | ||
1319 | if (size) | ||
1320 | free_pages_exact(region->virt_start, size); | ||
1321 | list_del(®ion->list); | ||
1322 | devm_kfree(kdev->dev, region); | ||
1323 | } | ||
1324 | } | ||
1325 | |||
1326 | static void __iomem *knav_queue_map_reg(struct knav_device *kdev, | ||
1327 | struct device_node *node, int index) | ||
1328 | { | ||
1329 | struct resource res; | ||
1330 | void __iomem *regs; | ||
1331 | int ret; | ||
1332 | |||
1333 | ret = of_address_to_resource(node, index, &res); | ||
1334 | if (ret) { | ||
1335 | dev_err(kdev->dev, "Can't translate of node(%s) address for index(%d)\n", | ||
1336 | node->name, index); | ||
1337 | return ERR_PTR(ret); | ||
1338 | } | ||
1339 | |||
1340 | regs = devm_ioremap_resource(kdev->dev, &res); | ||
1341 | if (IS_ERR(regs)) | ||
1342 | dev_err(kdev->dev, "Failed to map register base for index(%d) node(%s)\n", | ||
1343 | index, node->name); | ||
1344 | return regs; | ||
1345 | } | ||
1346 | |||
1347 | static int knav_queue_init_qmgrs(struct knav_device *kdev, | ||
1348 | struct device_node *qmgrs) | ||
1349 | { | ||
1350 | struct device *dev = kdev->dev; | ||
1351 | struct knav_qmgr_info *qmgr; | ||
1352 | struct device_node *child; | ||
1353 | u32 temp[2]; | ||
1354 | int ret; | ||
1355 | |||
1356 | for_each_child_of_node(qmgrs, child) { | ||
1357 | qmgr = devm_kzalloc(dev, sizeof(*qmgr), GFP_KERNEL); | ||
1358 | if (!qmgr) { | ||
1359 | dev_err(dev, "out of memory allocating qmgr\n"); | ||
1360 | return -ENOMEM; | ||
1361 | } | ||
1362 | |||
1363 | ret = of_property_read_u32_array(child, "managed-queues", | ||
1364 | temp, 2); | ||
1365 | if (!ret) { | ||
1366 | qmgr->start_queue = temp[0]; | ||
1367 | qmgr->num_queues = temp[1]; | ||
1368 | } else { | ||
1369 | dev_err(dev, "invalid qmgr queue range\n"); | ||
1370 | devm_kfree(dev, qmgr); | ||
1371 | continue; | ||
1372 | } | ||
1373 | |||
1374 | dev_info(dev, "qmgr start queue %d, number of queues %d\n", | ||
1375 | qmgr->start_queue, qmgr->num_queues); | ||
1376 | |||
1377 | qmgr->reg_peek = | ||
1378 | knav_queue_map_reg(kdev, child, | ||
1379 | KNAV_QUEUE_PEEK_REG_INDEX); | ||
1380 | qmgr->reg_status = | ||
1381 | knav_queue_map_reg(kdev, child, | ||
1382 | KNAV_QUEUE_STATUS_REG_INDEX); | ||
1383 | qmgr->reg_config = | ||
1384 | knav_queue_map_reg(kdev, child, | ||
1385 | KNAV_QUEUE_CONFIG_REG_INDEX); | ||
1386 | qmgr->reg_region = | ||
1387 | knav_queue_map_reg(kdev, child, | ||
1388 | KNAV_QUEUE_REGION_REG_INDEX); | ||
1389 | qmgr->reg_push = | ||
1390 | knav_queue_map_reg(kdev, child, | ||
1391 | KNAV_QUEUE_PUSH_REG_INDEX); | ||
1392 | qmgr->reg_pop = | ||
1393 | knav_queue_map_reg(kdev, child, | ||
1394 | KNAV_QUEUE_POP_REG_INDEX); | ||
1395 | |||
1396 | if (IS_ERR(qmgr->reg_peek) || IS_ERR(qmgr->reg_status) || | ||
1397 | IS_ERR(qmgr->reg_config) || IS_ERR(qmgr->reg_region) || | ||
1398 | IS_ERR(qmgr->reg_push) || IS_ERR(qmgr->reg_pop)) { | ||
1399 | dev_err(dev, "failed to map qmgr regs\n"); | ||
1400 | if (!IS_ERR(qmgr->reg_peek)) | ||
1401 | devm_iounmap(dev, qmgr->reg_peek); | ||
1402 | if (!IS_ERR(qmgr->reg_status)) | ||
1403 | devm_iounmap(dev, qmgr->reg_status); | ||
1404 | if (!IS_ERR(qmgr->reg_config)) | ||
1405 | devm_iounmap(dev, qmgr->reg_config); | ||
1406 | if (!IS_ERR(qmgr->reg_region)) | ||
1407 | devm_iounmap(dev, qmgr->reg_region); | ||
1408 | if (!IS_ERR(qmgr->reg_push)) | ||
1409 | devm_iounmap(dev, qmgr->reg_push); | ||
1410 | if (!IS_ERR(qmgr->reg_pop)) | ||
1411 | devm_iounmap(dev, qmgr->reg_pop); | ||
1412 | devm_kfree(dev, qmgr); | ||
1413 | continue; | ||
1414 | } | ||
1415 | |||
1416 | list_add_tail(&qmgr->list, &kdev->qmgrs); | ||
1417 | dev_info(dev, "added qmgr start queue %d, num of queues %d, reg_peek %p, reg_status %p, reg_config %p, reg_region %p, reg_push %p, reg_pop %p\n", | ||
1418 | qmgr->start_queue, qmgr->num_queues, | ||
1419 | qmgr->reg_peek, qmgr->reg_status, | ||
1420 | qmgr->reg_config, qmgr->reg_region, | ||
1421 | qmgr->reg_push, qmgr->reg_pop); | ||
1422 | } | ||
1423 | return 0; | ||
1424 | } | ||
1425 | |||
1426 | static int knav_queue_init_pdsps(struct knav_device *kdev, | ||
1427 | struct device_node *pdsps) | ||
1428 | { | ||
1429 | struct device *dev = kdev->dev; | ||
1430 | struct knav_pdsp_info *pdsp; | ||
1431 | struct device_node *child; | ||
1432 | int ret; | ||
1433 | |||
1434 | for_each_child_of_node(pdsps, child) { | ||
1435 | pdsp = devm_kzalloc(dev, sizeof(*pdsp), GFP_KERNEL); | ||
1436 | if (!pdsp) { | ||
1437 | dev_err(dev, "out of memory allocating pdsp\n"); | ||
1438 | return -ENOMEM; | ||
1439 | } | ||
1440 | pdsp->name = knav_queue_find_name(child); | ||
1441 | ret = of_property_read_string(child, "firmware", | ||
1442 | &pdsp->firmware); | ||
1443 | if (ret < 0 || !pdsp->firmware) { | ||
1444 | dev_err(dev, "unknown firmware for pdsp %s\n", | ||
1445 | pdsp->name); | ||
1446 | devm_kfree(dev, pdsp); | ||
1447 | continue; | ||
1448 | } | ||
1449 | dev_dbg(dev, "pdsp name %s fw name :%s\n", pdsp->name, | ||
1450 | pdsp->firmware); | ||
1451 | |||
1452 | pdsp->iram = | ||
1453 | knav_queue_map_reg(kdev, child, | ||
1454 | KNAV_QUEUE_PDSP_IRAM_REG_INDEX); | ||
1455 | pdsp->regs = | ||
1456 | knav_queue_map_reg(kdev, child, | ||
1457 | KNAV_QUEUE_PDSP_REGS_REG_INDEX); | ||
1458 | pdsp->intd = | ||
1459 | knav_queue_map_reg(kdev, child, | ||
1460 | KNAV_QUEUE_PDSP_INTD_REG_INDEX); | ||
1461 | pdsp->command = | ||
1462 | knav_queue_map_reg(kdev, child, | ||
1463 | KNAV_QUEUE_PDSP_CMD_REG_INDEX); | ||
1464 | |||
1465 | if (IS_ERR(pdsp->command) || IS_ERR(pdsp->iram) || | ||
1466 | IS_ERR(pdsp->regs) || IS_ERR(pdsp->intd)) { | ||
1467 | dev_err(dev, "failed to map pdsp %s regs\n", | ||
1468 | pdsp->name); | ||
1469 | if (!IS_ERR(pdsp->command)) | ||
1470 | devm_iounmap(dev, pdsp->command); | ||
1471 | if (!IS_ERR(pdsp->iram)) | ||
1472 | devm_iounmap(dev, pdsp->iram); | ||
1473 | if (!IS_ERR(pdsp->regs)) | ||
1474 | devm_iounmap(dev, pdsp->regs); | ||
1475 | if (!IS_ERR(pdsp->intd)) | ||
1476 | devm_iounmap(dev, pdsp->intd); | ||
1477 | devm_kfree(dev, pdsp); | ||
1478 | continue; | ||
1479 | } | ||
1480 | of_property_read_u32(child, "id", &pdsp->id); | ||
1481 | list_add_tail(&pdsp->list, &kdev->pdsps); | ||
1482 | dev_dbg(dev, "added pdsp %s: command %p, iram %p, regs %p, intd %p, firmware %s\n", | ||
1483 | pdsp->name, pdsp->command, pdsp->iram, pdsp->regs, | ||
1484 | pdsp->intd, pdsp->firmware); | ||
1485 | } | ||
1486 | return 0; | ||
1487 | } | ||
1488 | |||
1489 | static int knav_queue_stop_pdsp(struct knav_device *kdev, | ||
1490 | struct knav_pdsp_info *pdsp) | ||
1491 | { | ||
1492 | u32 val, timeout = 1000; | ||
1493 | int ret; | ||
1494 | |||
1495 | val = readl_relaxed(&pdsp->regs->control) & ~PDSP_CTRL_ENABLE; | ||
1496 | writel_relaxed(val, &pdsp->regs->control); | ||
1497 | ret = knav_queue_pdsp_wait(&pdsp->regs->control, timeout, | ||
1498 | PDSP_CTRL_RUNNING); | ||
1499 | if (ret < 0) { | ||
1500 | dev_err(kdev->dev, "timed out on pdsp %s stop\n", pdsp->name); | ||
1501 | return ret; | ||
1502 | } | ||
1503 | return 0; | ||
1504 | } | ||
1505 | |||
1506 | static int knav_queue_load_pdsp(struct knav_device *kdev, | ||
1507 | struct knav_pdsp_info *pdsp) | ||
1508 | { | ||
1509 | int i, ret, fwlen; | ||
1510 | const struct firmware *fw; | ||
1511 | u32 *fwdata; | ||
1512 | |||
1513 | ret = request_firmware(&fw, pdsp->firmware, kdev->dev); | ||
1514 | if (ret) { | ||
1515 | dev_err(kdev->dev, "failed to get firmware %s for pdsp %s\n", | ||
1516 | pdsp->firmware, pdsp->name); | ||
1517 | return ret; | ||
1518 | } | ||
1519 | writel_relaxed(pdsp->id + 1, pdsp->command + 0x18); | ||
1520 | /* download the firmware */ | ||
1521 | fwdata = (u32 *)fw->data; | ||
1522 | fwlen = (fw->size + sizeof(u32) - 1) / sizeof(u32); | ||
1523 | for (i = 0; i < fwlen; i++) | ||
1524 | writel_relaxed(be32_to_cpu(fwdata[i]), pdsp->iram + i); | ||
1525 | |||
1526 | release_firmware(fw); | ||
1527 | return 0; | ||
1528 | } | ||
1529 | |||
1530 | static int knav_queue_start_pdsp(struct knav_device *kdev, | ||
1531 | struct knav_pdsp_info *pdsp) | ||
1532 | { | ||
1533 | u32 val, timeout = 1000; | ||
1534 | int ret; | ||
1535 | |||
1536 | /* write a command for sync */ | ||
1537 | writel_relaxed(0xffffffff, pdsp->command); | ||
1538 | while (readl_relaxed(pdsp->command) != 0xffffffff) | ||
1539 | cpu_relax(); | ||
1540 | |||
1541 | /* soft reset the PDSP */ | ||
1542 | val = readl_relaxed(&pdsp->regs->control); | ||
1543 | val &= ~(PDSP_CTRL_PC_MASK | PDSP_CTRL_SOFT_RESET); | ||
1544 | writel_relaxed(val, &pdsp->regs->control); | ||
1545 | |||
1546 | /* enable pdsp */ | ||
1547 | val = readl_relaxed(&pdsp->regs->control) | PDSP_CTRL_ENABLE; | ||
1548 | writel_relaxed(val, &pdsp->regs->control); | ||
1549 | |||
1550 | /* wait for command register to clear */ | ||
1551 | ret = knav_queue_pdsp_wait(pdsp->command, timeout, 0); | ||
1552 | if (ret < 0) { | ||
1553 | dev_err(kdev->dev, | ||
1554 | "timed out on pdsp %s command register wait\n", | ||
1555 | pdsp->name); | ||
1556 | return ret; | ||
1557 | } | ||
1558 | return 0; | ||
1559 | } | ||
1560 | |||
1561 | static void knav_queue_stop_pdsps(struct knav_device *kdev) | ||
1562 | { | ||
1563 | struct knav_pdsp_info *pdsp; | ||
1564 | |||
1565 | /* disable all pdsps */ | ||
1566 | for_each_pdsp(kdev, pdsp) | ||
1567 | knav_queue_stop_pdsp(kdev, pdsp); | ||
1568 | } | ||
1569 | |||
1570 | static int knav_queue_start_pdsps(struct knav_device *kdev) | ||
1571 | { | ||
1572 | struct knav_pdsp_info *pdsp; | ||
1573 | int ret; | ||
1574 | |||
1575 | knav_queue_stop_pdsps(kdev); | ||
1576 | /* now load them all */ | ||
1577 | for_each_pdsp(kdev, pdsp) { | ||
1578 | ret = knav_queue_load_pdsp(kdev, pdsp); | ||
1579 | if (ret < 0) | ||
1580 | return ret; | ||
1581 | } | ||
1582 | |||
1583 | for_each_pdsp(kdev, pdsp) { | ||
1584 | ret = knav_queue_start_pdsp(kdev, pdsp); | ||
1585 | WARN_ON(ret); | ||
1586 | } | ||
1587 | return 0; | ||
1588 | } | ||
1589 | |||
1590 | static inline struct knav_qmgr_info *knav_find_qmgr(unsigned id) | ||
1591 | { | ||
1592 | struct knav_qmgr_info *qmgr; | ||
1593 | |||
1594 | for_each_qmgr(kdev, qmgr) { | ||
1595 | if ((id >= qmgr->start_queue) && | ||
1596 | (id < qmgr->start_queue + qmgr->num_queues)) | ||
1597 | return qmgr; | ||
1598 | } | ||
1599 | return NULL; | ||
1600 | } | ||
1601 | |||
1602 | static int knav_queue_init_queue(struct knav_device *kdev, | ||
1603 | struct knav_range_info *range, | ||
1604 | struct knav_queue_inst *inst, | ||
1605 | unsigned id) | ||
1606 | { | ||
1607 | char irq_name[KNAV_NAME_SIZE]; | ||
1608 | inst->qmgr = knav_find_qmgr(id); | ||
1609 | if (!inst->qmgr) | ||
1610 | return -1; | ||
1611 | |||
1612 | INIT_LIST_HEAD(&inst->handles); | ||
1613 | inst->kdev = kdev; | ||
1614 | inst->range = range; | ||
1615 | inst->irq_num = -1; | ||
1616 | inst->id = id; | ||
1617 | scnprintf(irq_name, sizeof(irq_name), "hwqueue-%d", id); | ||
1618 | inst->irq_name = kstrndup(irq_name, sizeof(irq_name), GFP_KERNEL); | ||
1619 | |||
1620 | if (range->ops && range->ops->init_queue) | ||
1621 | return range->ops->init_queue(range, inst); | ||
1622 | else | ||
1623 | return 0; | ||
1624 | } | ||
1625 | |||
1626 | static int knav_queue_init_queues(struct knav_device *kdev) | ||
1627 | { | ||
1628 | struct knav_range_info *range; | ||
1629 | int size, id, base_idx; | ||
1630 | int idx = 0, ret = 0; | ||
1631 | |||
1632 | /* how much do we need for instance data? */ | ||
1633 | size = sizeof(struct knav_queue_inst); | ||
1634 | |||
1635 | /* round this up to a power of 2, keep the index to instance | ||
1636 | * arithmetic fast. | ||
1637 | * */ | ||
1638 | kdev->inst_shift = order_base_2(size); | ||
1639 | size = (1 << kdev->inst_shift) * kdev->num_queues_in_use; | ||
1640 | kdev->instances = devm_kzalloc(kdev->dev, size, GFP_KERNEL); | ||
1641 | if (!kdev->instances) | ||
1642 | return -1; | ||
1643 | |||
1644 | for_each_queue_range(kdev, range) { | ||
1645 | if (range->ops && range->ops->init_range) | ||
1646 | range->ops->init_range(range); | ||
1647 | base_idx = idx; | ||
1648 | for (id = range->queue_base; | ||
1649 | id < range->queue_base + range->num_queues; id++, idx++) { | ||
1650 | ret = knav_queue_init_queue(kdev, range, | ||
1651 | knav_queue_idx_to_inst(kdev, idx), id); | ||
1652 | if (ret < 0) | ||
1653 | return ret; | ||
1654 | } | ||
1655 | range->queue_base_inst = | ||
1656 | knav_queue_idx_to_inst(kdev, base_idx); | ||
1657 | } | ||
1658 | return 0; | ||
1659 | } | ||
1660 | |||
1661 | static int knav_queue_probe(struct platform_device *pdev) | ||
1662 | { | ||
1663 | struct device_node *node = pdev->dev.of_node; | ||
1664 | struct device_node *qmgrs, *queue_pools, *regions, *pdsps; | ||
1665 | struct device *dev = &pdev->dev; | ||
1666 | u32 temp[2]; | ||
1667 | int ret; | ||
1668 | |||
1669 | if (!node) { | ||
1670 | dev_err(dev, "device tree info unavailable\n"); | ||
1671 | return -ENODEV; | ||
1672 | } | ||
1673 | |||
1674 | kdev = devm_kzalloc(dev, sizeof(struct knav_device), GFP_KERNEL); | ||
1675 | if (!kdev) { | ||
1676 | dev_err(dev, "memory allocation failed\n"); | ||
1677 | return -ENOMEM; | ||
1678 | } | ||
1679 | |||
1680 | platform_set_drvdata(pdev, kdev); | ||
1681 | kdev->dev = dev; | ||
1682 | INIT_LIST_HEAD(&kdev->queue_ranges); | ||
1683 | INIT_LIST_HEAD(&kdev->qmgrs); | ||
1684 | INIT_LIST_HEAD(&kdev->pools); | ||
1685 | INIT_LIST_HEAD(&kdev->regions); | ||
1686 | INIT_LIST_HEAD(&kdev->pdsps); | ||
1687 | |||
1688 | pm_runtime_enable(&pdev->dev); | ||
1689 | ret = pm_runtime_get_sync(&pdev->dev); | ||
1690 | if (ret < 0) { | ||
1691 | dev_err(dev, "Failed to enable QMSS\n"); | ||
1692 | return ret; | ||
1693 | } | ||
1694 | |||
1695 | if (of_property_read_u32_array(node, "queue-range", temp, 2)) { | ||
1696 | dev_err(dev, "queue-range not specified\n"); | ||
1697 | ret = -ENODEV; | ||
1698 | goto err; | ||
1699 | } | ||
1700 | kdev->base_id = temp[0]; | ||
1701 | kdev->num_queues = temp[1]; | ||
1702 | |||
1703 | /* Initialize queue managers using device tree configuration */ | ||
1704 | qmgrs = of_get_child_by_name(node, "qmgrs"); | ||
1705 | if (!qmgrs) { | ||
1706 | dev_err(dev, "queue manager info not specified\n"); | ||
1707 | ret = -ENODEV; | ||
1708 | goto err; | ||
1709 | } | ||
1710 | ret = knav_queue_init_qmgrs(kdev, qmgrs); | ||
1711 | of_node_put(qmgrs); | ||
1712 | if (ret) | ||
1713 | goto err; | ||
1714 | |||
1715 | /* get pdsp configuration values from device tree */ | ||
1716 | pdsps = of_get_child_by_name(node, "pdsps"); | ||
1717 | if (pdsps) { | ||
1718 | ret = knav_queue_init_pdsps(kdev, pdsps); | ||
1719 | if (ret) | ||
1720 | goto err; | ||
1721 | |||
1722 | ret = knav_queue_start_pdsps(kdev); | ||
1723 | if (ret) | ||
1724 | goto err; | ||
1725 | } | ||
1726 | of_node_put(pdsps); | ||
1727 | |||
1728 | /* get usable queue range values from device tree */ | ||
1729 | queue_pools = of_get_child_by_name(node, "queue-pools"); | ||
1730 | if (!queue_pools) { | ||
1731 | dev_err(dev, "queue-pools not specified\n"); | ||
1732 | ret = -ENODEV; | ||
1733 | goto err; | ||
1734 | } | ||
1735 | ret = knav_setup_queue_pools(kdev, queue_pools); | ||
1736 | of_node_put(queue_pools); | ||
1737 | if (ret) | ||
1738 | goto err; | ||
1739 | |||
1740 | ret = knav_get_link_ram(kdev, "linkram0", &kdev->link_rams[0]); | ||
1741 | if (ret) { | ||
1742 | dev_err(kdev->dev, "could not setup linking ram\n"); | ||
1743 | goto err; | ||
1744 | } | ||
1745 | |||
1746 | ret = knav_get_link_ram(kdev, "linkram1", &kdev->link_rams[1]); | ||
1747 | if (ret) { | ||
1748 | /* | ||
1749 | * nothing really, we have one linking ram already, so we just | ||
1750 | * live within our means | ||
1751 | */ | ||
1752 | } | ||
1753 | |||
1754 | ret = knav_queue_setup_link_ram(kdev); | ||
1755 | if (ret) | ||
1756 | goto err; | ||
1757 | |||
1758 | regions = of_get_child_by_name(node, "descriptor-regions"); | ||
1759 | if (!regions) { | ||
1760 | dev_err(dev, "descriptor-regions not specified\n"); | ||
1761 | goto err; | ||
1762 | } | ||
1763 | ret = knav_queue_setup_regions(kdev, regions); | ||
1764 | of_node_put(regions); | ||
1765 | if (ret) | ||
1766 | goto err; | ||
1767 | |||
1768 | ret = knav_queue_init_queues(kdev); | ||
1769 | if (ret < 0) { | ||
1770 | dev_err(dev, "hwqueue initialization failed\n"); | ||
1771 | goto err; | ||
1772 | } | ||
1773 | |||
1774 | debugfs_create_file("qmss", S_IFREG | S_IRUGO, NULL, NULL, | ||
1775 | &knav_queue_debug_ops); | ||
1776 | return 0; | ||
1777 | |||
1778 | err: | ||
1779 | knav_queue_stop_pdsps(kdev); | ||
1780 | knav_queue_free_regions(kdev); | ||
1781 | knav_free_queue_ranges(kdev); | ||
1782 | pm_runtime_put_sync(&pdev->dev); | ||
1783 | pm_runtime_disable(&pdev->dev); | ||
1784 | return ret; | ||
1785 | } | ||
1786 | |||
1787 | static int knav_queue_remove(struct platform_device *pdev) | ||
1788 | { | ||
1789 | /* TODO: Free resources */ | ||
1790 | pm_runtime_put_sync(&pdev->dev); | ||
1791 | pm_runtime_disable(&pdev->dev); | ||
1792 | return 0; | ||
1793 | } | ||
1794 | |||
1795 | /* Match table for of_platform binding */ | ||
1796 | static struct of_device_id keystone_qmss_of_match[] = { | ||
1797 | { .compatible = "ti,keystone-navigator-qmss", }, | ||
1798 | {}, | ||
1799 | }; | ||
1800 | MODULE_DEVICE_TABLE(of, keystone_qmss_of_match); | ||
1801 | |||
1802 | static struct platform_driver keystone_qmss_driver = { | ||
1803 | .probe = knav_queue_probe, | ||
1804 | .remove = knav_queue_remove, | ||
1805 | .driver = { | ||
1806 | .name = "keystone-navigator-qmss", | ||
1807 | .owner = THIS_MODULE, | ||
1808 | .of_match_table = keystone_qmss_of_match, | ||
1809 | }, | ||
1810 | }; | ||
1811 | module_platform_driver(keystone_qmss_driver); | ||
1812 | |||
1813 | MODULE_LICENSE("GPL v2"); | ||
1814 | MODULE_DESCRIPTION("TI QMSS driver for Keystone SOCs"); | ||
1815 | MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com>"); | ||
1816 | MODULE_AUTHOR("Santosh Shilimkar <santosh.shilimkar@ti.com>"); | ||
diff --git a/include/linux/soc/ti/knav_dma.h b/include/linux/soc/ti/knav_dma.h new file mode 100644 index 000000000000..e864a3eb9ac4 --- /dev/null +++ b/include/linux/soc/ti/knav_dma.h | |||
@@ -0,0 +1,175 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2014 Texas Instruments Incorporated | ||
3 | * Authors: Sandeep Nair <sandeep_n@ti.com | ||
4 | * Cyril Chemparathy <cyril@ti.com | ||
5 | Santosh Shilimkar <santosh.shilimkar@ti.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License as | ||
9 | * published by the Free Software Foundation version 2. | ||
10 | * | ||
11 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any | ||
12 | * kind, whether express or implied; without even the implied warranty | ||
13 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | */ | ||
16 | |||
17 | #ifndef __SOC_TI_KEYSTONE_NAVIGATOR_DMA_H__ | ||
18 | #define __SOC_TI_KEYSTONE_NAVIGATOR_DMA_H__ | ||
19 | |||
20 | /* | ||
21 | * PKTDMA descriptor manipulation macros for host packet descriptor | ||
22 | */ | ||
23 | #define MASK(x) (BIT(x) - 1) | ||
24 | #define KNAV_DMA_DESC_PKT_LEN_MASK MASK(22) | ||
25 | #define KNAV_DMA_DESC_PKT_LEN_SHIFT 0 | ||
26 | #define KNAV_DMA_DESC_PS_INFO_IN_SOP BIT(22) | ||
27 | #define KNAV_DMA_DESC_PS_INFO_IN_DESC 0 | ||
28 | #define KNAV_DMA_DESC_TAG_MASK MASK(8) | ||
29 | #define KNAV_DMA_DESC_SAG_HI_SHIFT 24 | ||
30 | #define KNAV_DMA_DESC_STAG_LO_SHIFT 16 | ||
31 | #define KNAV_DMA_DESC_DTAG_HI_SHIFT 8 | ||
32 | #define KNAV_DMA_DESC_DTAG_LO_SHIFT 0 | ||
33 | #define KNAV_DMA_DESC_HAS_EPIB BIT(31) | ||
34 | #define KNAV_DMA_DESC_NO_EPIB 0 | ||
35 | #define KNAV_DMA_DESC_PSLEN_SHIFT 24 | ||
36 | #define KNAV_DMA_DESC_PSLEN_MASK MASK(6) | ||
37 | #define KNAV_DMA_DESC_ERR_FLAG_SHIFT 20 | ||
38 | #define KNAV_DMA_DESC_ERR_FLAG_MASK MASK(4) | ||
39 | #define KNAV_DMA_DESC_PSFLAG_SHIFT 16 | ||
40 | #define KNAV_DMA_DESC_PSFLAG_MASK MASK(4) | ||
41 | #define KNAV_DMA_DESC_RETQ_SHIFT 0 | ||
42 | #define KNAV_DMA_DESC_RETQ_MASK MASK(14) | ||
43 | #define KNAV_DMA_DESC_BUF_LEN_MASK MASK(22) | ||
44 | |||
45 | #define KNAV_DMA_NUM_EPIB_WORDS 4 | ||
46 | #define KNAV_DMA_NUM_PS_WORDS 16 | ||
47 | #define KNAV_DMA_FDQ_PER_CHAN 4 | ||
48 | |||
49 | /* Tx channel scheduling priority */ | ||
50 | enum knav_dma_tx_priority { | ||
51 | DMA_PRIO_HIGH = 0, | ||
52 | DMA_PRIO_MED_H, | ||
53 | DMA_PRIO_MED_L, | ||
54 | DMA_PRIO_LOW | ||
55 | }; | ||
56 | |||
57 | /* Rx channel error handling mode during buffer starvation */ | ||
58 | enum knav_dma_rx_err_mode { | ||
59 | DMA_DROP = 0, | ||
60 | DMA_RETRY | ||
61 | }; | ||
62 | |||
63 | /* Rx flow size threshold configuration */ | ||
64 | enum knav_dma_rx_thresholds { | ||
65 | DMA_THRESH_NONE = 0, | ||
66 | DMA_THRESH_0 = 1, | ||
67 | DMA_THRESH_0_1 = 3, | ||
68 | DMA_THRESH_0_1_2 = 7 | ||
69 | }; | ||
70 | |||
71 | /* Descriptor type */ | ||
72 | enum knav_dma_desc_type { | ||
73 | DMA_DESC_HOST = 0, | ||
74 | DMA_DESC_MONOLITHIC = 2 | ||
75 | }; | ||
76 | |||
77 | /** | ||
78 | * struct knav_dma_tx_cfg: Tx channel configuration | ||
79 | * @filt_einfo: Filter extended packet info | ||
80 | * @filt_pswords: Filter PS words present | ||
81 | * @knav_dma_tx_priority: Tx channel scheduling priority | ||
82 | */ | ||
83 | struct knav_dma_tx_cfg { | ||
84 | bool filt_einfo; | ||
85 | bool filt_pswords; | ||
86 | enum knav_dma_tx_priority priority; | ||
87 | }; | ||
88 | |||
89 | /** | ||
90 | * struct knav_dma_rx_cfg: Rx flow configuration | ||
91 | * @einfo_present: Extended packet info present | ||
92 | * @psinfo_present: PS words present | ||
93 | * @knav_dma_rx_err_mode: Error during buffer starvation | ||
94 | * @knav_dma_desc_type: Host or Monolithic desc | ||
95 | * @psinfo_at_sop: PS word located at start of packet | ||
96 | * @sop_offset: Start of packet offset | ||
97 | * @dst_q: Destination queue for a given flow | ||
98 | * @thresh: Rx flow size threshold | ||
99 | * @fdq[]: Free desc Queue array | ||
100 | * @sz_thresh0: RX packet size threshold 0 | ||
101 | * @sz_thresh1: RX packet size threshold 1 | ||
102 | * @sz_thresh2: RX packet size threshold 2 | ||
103 | */ | ||
104 | struct knav_dma_rx_cfg { | ||
105 | bool einfo_present; | ||
106 | bool psinfo_present; | ||
107 | enum knav_dma_rx_err_mode err_mode; | ||
108 | enum knav_dma_desc_type desc_type; | ||
109 | bool psinfo_at_sop; | ||
110 | unsigned int sop_offset; | ||
111 | unsigned int dst_q; | ||
112 | enum knav_dma_rx_thresholds thresh; | ||
113 | unsigned int fdq[KNAV_DMA_FDQ_PER_CHAN]; | ||
114 | unsigned int sz_thresh0; | ||
115 | unsigned int sz_thresh1; | ||
116 | unsigned int sz_thresh2; | ||
117 | }; | ||
118 | |||
119 | /** | ||
120 | * struct knav_dma_cfg: Pktdma channel configuration | ||
121 | * @sl_cfg: Slave configuration | ||
122 | * @tx: Tx channel configuration | ||
123 | * @rx: Rx flow configuration | ||
124 | */ | ||
125 | struct knav_dma_cfg { | ||
126 | enum dma_transfer_direction direction; | ||
127 | union { | ||
128 | struct knav_dma_tx_cfg tx; | ||
129 | struct knav_dma_rx_cfg rx; | ||
130 | } u; | ||
131 | }; | ||
132 | |||
133 | /** | ||
134 | * struct knav_dma_desc: Host packet descriptor layout | ||
135 | * @desc_info: Descriptor information like id, type, length | ||
136 | * @tag_info: Flow tag info written in during RX | ||
137 | * @packet_info: Queue Manager, policy, flags etc | ||
138 | * @buff_len: Buffer length in bytes | ||
139 | * @buff: Buffer pointer | ||
140 | * @next_desc: For chaining the descriptors | ||
141 | * @orig_len: length since 'buff_len' can be overwritten | ||
142 | * @orig_buff: buff pointer since 'buff' can be overwritten | ||
143 | * @epib: Extended packet info block | ||
144 | * @psdata: Protocol specific | ||
145 | */ | ||
146 | struct knav_dma_desc { | ||
147 | u32 desc_info; | ||
148 | u32 tag_info; | ||
149 | u32 packet_info; | ||
150 | u32 buff_len; | ||
151 | u32 buff; | ||
152 | u32 next_desc; | ||
153 | u32 orig_len; | ||
154 | u32 orig_buff; | ||
155 | u32 epib[KNAV_DMA_NUM_EPIB_WORDS]; | ||
156 | u32 psdata[KNAV_DMA_NUM_PS_WORDS]; | ||
157 | u32 pad[4]; | ||
158 | } ____cacheline_aligned; | ||
159 | |||
160 | #ifdef CONFIG_KEYSTONE_NAVIGATOR_DMA | ||
161 | void *knav_dma_open_channel(struct device *dev, const char *name, | ||
162 | struct knav_dma_cfg *config); | ||
163 | void knav_dma_close_channel(void *channel); | ||
164 | #else | ||
165 | static inline void *knav_dma_open_channel(struct device *dev, const char *name, | ||
166 | struct knav_dma_cfg *config) | ||
167 | { | ||
168 | return (void *) NULL; | ||
169 | } | ||
170 | static inline void knav_dma_close_channel(void *channel) | ||
171 | {} | ||
172 | |||
173 | #endif | ||
174 | |||
175 | #endif /* __SOC_TI_KEYSTONE_NAVIGATOR_DMA_H__ */ | ||
diff --git a/include/linux/soc/ti/knav_qmss.h b/include/linux/soc/ti/knav_qmss.h new file mode 100644 index 000000000000..9f0ebb3bad27 --- /dev/null +++ b/include/linux/soc/ti/knav_qmss.h | |||
@@ -0,0 +1,90 @@ | |||
1 | /* | ||
2 | * Keystone Navigator Queue Management Sub-System header | ||
3 | * | ||
4 | * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com | ||
5 | * Author: Sandeep Nair <sandeep_n@ti.com> | ||
6 | * Cyril Chemparathy <cyril@ti.com> | ||
7 | * Santosh Shilimkar <santosh.shilimkar@ti.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License as | ||
11 | * published by the Free Software Foundation version 2. | ||
12 | * | ||
13 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any | ||
14 | * kind, whether express or implied; without even the implied warranty | ||
15 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | */ | ||
18 | |||
19 | #ifndef __SOC_TI_KNAV_QMSS_H__ | ||
20 | #define __SOC_TI_KNAV_QMSS_H__ | ||
21 | |||
22 | #include <linux/err.h> | ||
23 | #include <linux/time.h> | ||
24 | #include <linux/atomic.h> | ||
25 | #include <linux/device.h> | ||
26 | #include <linux/fcntl.h> | ||
27 | #include <linux/dma-mapping.h> | ||
28 | |||
29 | /* queue types */ | ||
30 | #define KNAV_QUEUE_QPEND ((unsigned)-2) /* interruptible qpend queue */ | ||
31 | #define KNAV_QUEUE_ACC ((unsigned)-3) /* Accumulated queue */ | ||
32 | #define KNAV_QUEUE_GP ((unsigned)-4) /* General purpose queue */ | ||
33 | |||
34 | /* queue flags */ | ||
35 | #define KNAV_QUEUE_SHARED 0x0001 /* Queue can be shared */ | ||
36 | |||
37 | /** | ||
38 | * enum knav_queue_ctrl_cmd - queue operations. | ||
39 | * @KNAV_QUEUE_GET_ID: Get the ID number for an open queue | ||
40 | * @KNAV_QUEUE_FLUSH: forcibly empty a queue if possible | ||
41 | * @KNAV_QUEUE_SET_NOTIFIER: Set a notifier callback to a queue handle. | ||
42 | * @KNAV_QUEUE_ENABLE_NOTIFY: Enable notifier callback for a queue handle. | ||
43 | * @KNAV_QUEUE_DISABLE_NOTIFY: Disable notifier callback for a queue handle. | ||
44 | * @KNAV_QUEUE_GET_COUNT: Get number of queues. | ||
45 | */ | ||
46 | enum knav_queue_ctrl_cmd { | ||
47 | KNAV_QUEUE_GET_ID, | ||
48 | KNAV_QUEUE_FLUSH, | ||
49 | KNAV_QUEUE_SET_NOTIFIER, | ||
50 | KNAV_QUEUE_ENABLE_NOTIFY, | ||
51 | KNAV_QUEUE_DISABLE_NOTIFY, | ||
52 | KNAV_QUEUE_GET_COUNT | ||
53 | }; | ||
54 | |||
55 | /* Queue notifier callback prototype */ | ||
56 | typedef void (*knav_queue_notify_fn)(void *arg); | ||
57 | |||
58 | /** | ||
59 | * struct knav_queue_notify_config: Notifier configuration | ||
60 | * @fn: Notifier function | ||
61 | * @fn_arg: Notifier function arguments | ||
62 | */ | ||
63 | struct knav_queue_notify_config { | ||
64 | knav_queue_notify_fn fn; | ||
65 | void *fn_arg; | ||
66 | }; | ||
67 | |||
68 | void *knav_queue_open(const char *name, unsigned id, | ||
69 | unsigned flags); | ||
70 | void knav_queue_close(void *qhandle); | ||
71 | int knav_queue_device_control(void *qhandle, | ||
72 | enum knav_queue_ctrl_cmd cmd, | ||
73 | unsigned long arg); | ||
74 | dma_addr_t knav_queue_pop(void *qhandle, unsigned *size); | ||
75 | int knav_queue_push(void *qhandle, dma_addr_t dma, | ||
76 | unsigned size, unsigned flags); | ||
77 | |||
78 | void *knav_pool_create(const char *name, | ||
79 | int num_desc, int region_id); | ||
80 | void knav_pool_destroy(void *ph); | ||
81 | int knav_pool_count(void *ph); | ||
82 | void *knav_pool_desc_get(void *ph); | ||
83 | void knav_pool_desc_put(void *ph, void *desc); | ||
84 | int knav_pool_desc_map(void *ph, void *desc, unsigned size, | ||
85 | dma_addr_t *dma, unsigned *dma_sz); | ||
86 | void *knav_pool_desc_unmap(void *ph, dma_addr_t dma, unsigned dma_sz); | ||
87 | dma_addr_t knav_pool_desc_virt_to_dma(void *ph, void *virt); | ||
88 | void *knav_pool_desc_dma_to_virt(void *ph, dma_addr_t dma); | ||
89 | |||
90 | #endif /* __SOC_TI_KNAV_QMSS_H__ */ | ||