diff options
author | Sebastian Reichel <sre@kernel.org> | 2013-12-15 17:38:58 -0500 |
---|---|---|
committer | Sebastian Reichel <sre@kernel.org> | 2014-05-15 18:54:51 -0400 |
commit | b209e047bc743247f74ce79e8827ae1ed556bae0 (patch) | |
tree | c12e55da83c2ce9a73d8ed22f46bd3c25ab051d6 /drivers/hsi | |
parent | a2aa24734d9dbbd3b9062c2459936c336278fa6a (diff) |
HSI: Introduce OMAP SSI driver
Add OMAP SSI driver to the HSI subsystem.
The Synchronous Serial Interface (SSI) is a legacy version
of HSI. As in the case of HSI, it is mainly used to connect
Application engines (APE) with cellular modem engines (CMT)
in cellular handsets.
It provides a multichannel, full-duplex, multi-core communication
with no reference clock. The OMAP SSI block is capable of reaching
speeds of 110 Mbit/s.
Signed-off-by: Carlos Chinea <carlos.chinea@nokia.com>
Signed-off-by: Sebastian Reichel <sre@kernel.org>
Tested-By: Ivaylo Dimitrov <ivo.g.dimitrov.75@gmail.com>
Diffstat (limited to 'drivers/hsi')
-rw-r--r-- | drivers/hsi/Kconfig | 1 | ||||
-rw-r--r-- | drivers/hsi/Makefile | 1 | ||||
-rw-r--r-- | drivers/hsi/controllers/Kconfig | 19 | ||||
-rw-r--r-- | drivers/hsi/controllers/Makefile | 6 | ||||
-rw-r--r-- | drivers/hsi/controllers/omap_ssi.c | 625 | ||||
-rw-r--r-- | drivers/hsi/controllers/omap_ssi.h | 166 | ||||
-rw-r--r-- | drivers/hsi/controllers/omap_ssi_port.c | 1399 | ||||
-rw-r--r-- | drivers/hsi/controllers/omap_ssi_regs.h | 171 |
8 files changed, 2388 insertions, 0 deletions
diff --git a/drivers/hsi/Kconfig b/drivers/hsi/Kconfig index d94e38dd80c7..2c76de438eb1 100644 --- a/drivers/hsi/Kconfig +++ b/drivers/hsi/Kconfig | |||
@@ -14,6 +14,7 @@ config HSI_BOARDINFO | |||
14 | bool | 14 | bool |
15 | default y | 15 | default y |
16 | 16 | ||
17 | source "drivers/hsi/controllers/Kconfig" | ||
17 | source "drivers/hsi/clients/Kconfig" | 18 | source "drivers/hsi/clients/Kconfig" |
18 | 19 | ||
19 | endif # HSI | 20 | endif # HSI |
diff --git a/drivers/hsi/Makefile b/drivers/hsi/Makefile index 9d5d33f90de2..360371e134f1 100644 --- a/drivers/hsi/Makefile +++ b/drivers/hsi/Makefile | |||
@@ -3,4 +3,5 @@ | |||
3 | # | 3 | # |
4 | obj-$(CONFIG_HSI_BOARDINFO) += hsi_boardinfo.o | 4 | obj-$(CONFIG_HSI_BOARDINFO) += hsi_boardinfo.o |
5 | obj-$(CONFIG_HSI) += hsi.o | 5 | obj-$(CONFIG_HSI) += hsi.o |
6 | obj-y += controllers/ | ||
6 | obj-y += clients/ | 7 | obj-y += clients/ |
diff --git a/drivers/hsi/controllers/Kconfig b/drivers/hsi/controllers/Kconfig new file mode 100644 index 000000000000..6aba27808172 --- /dev/null +++ b/drivers/hsi/controllers/Kconfig | |||
@@ -0,0 +1,19 @@ | |||
1 | # | ||
2 | # HSI controllers configuration | ||
3 | # | ||
4 | comment "HSI controllers" | ||
5 | |||
6 | config OMAP_SSI | ||
7 | tristate "OMAP SSI hardware driver" | ||
8 | depends on HSI && OF && (ARCH_OMAP3 || (ARM && COMPILE_TEST)) | ||
9 | ---help--- | ||
10 | SSI is a legacy version of HSI. It is usually used to connect | ||
11 | an application engine with a cellular modem. | ||
12 | If you say Y here, you will enable the OMAP SSI hardware driver. | ||
13 | |||
14 | If unsure, say N. | ||
15 | |||
16 | config OMAP_SSI_PORT | ||
17 | tristate | ||
18 | default m if OMAP_SSI=m | ||
19 | default y if OMAP_SSI=y | ||
diff --git a/drivers/hsi/controllers/Makefile b/drivers/hsi/controllers/Makefile new file mode 100644 index 000000000000..d2665cf9c545 --- /dev/null +++ b/drivers/hsi/controllers/Makefile | |||
@@ -0,0 +1,6 @@ | |||
1 | # | ||
2 | # Makefile for HSI controllers drivers | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_OMAP_SSI) += omap_ssi.o | ||
6 | obj-$(CONFIG_OMAP_SSI_PORT) += omap_ssi_port.o | ||
diff --git a/drivers/hsi/controllers/omap_ssi.c b/drivers/hsi/controllers/omap_ssi.c new file mode 100644 index 000000000000..0fc7a7fd0140 --- /dev/null +++ b/drivers/hsi/controllers/omap_ssi.c | |||
@@ -0,0 +1,625 @@ | |||
1 | /* OMAP SSI driver. | ||
2 | * | ||
3 | * Copyright (C) 2010 Nokia Corporation. All rights reserved. | ||
4 | * Copyright (C) 2014 Sebastian Reichel <sre@kernel.org> | ||
5 | * | ||
6 | * Contact: Carlos Chinea <carlos.chinea@nokia.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License | ||
10 | * version 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but | ||
13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
15 | * General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | ||
20 | * 02110-1301 USA | ||
21 | */ | ||
22 | |||
23 | #include <linux/compiler.h> | ||
24 | #include <linux/err.h> | ||
25 | #include <linux/ioport.h> | ||
26 | #include <linux/io.h> | ||
27 | #include <linux/gpio.h> | ||
28 | #include <linux/clk.h> | ||
29 | #include <linux/device.h> | ||
30 | #include <linux/platform_device.h> | ||
31 | #include <linux/dma-mapping.h> | ||
32 | #include <linux/dmaengine.h> | ||
33 | #include <linux/delay.h> | ||
34 | #include <linux/seq_file.h> | ||
35 | #include <linux/scatterlist.h> | ||
36 | #include <linux/interrupt.h> | ||
37 | #include <linux/spinlock.h> | ||
38 | #include <linux/debugfs.h> | ||
39 | #include <linux/pm_runtime.h> | ||
40 | #include <linux/of_platform.h> | ||
41 | #include <linux/hsi/hsi.h> | ||
42 | #include <linux/idr.h> | ||
43 | |||
44 | #include "omap_ssi_regs.h" | ||
45 | #include "omap_ssi.h" | ||
46 | |||
47 | /* For automatically allocated device IDs */ | ||
48 | static DEFINE_IDA(platform_omap_ssi_ida); | ||
49 | |||
50 | #ifdef CONFIG_DEBUG_FS | ||
51 | static int ssi_debug_show(struct seq_file *m, void *p __maybe_unused) | ||
52 | { | ||
53 | struct hsi_controller *ssi = m->private; | ||
54 | struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); | ||
55 | void __iomem *sys = omap_ssi->sys; | ||
56 | |||
57 | pm_runtime_get_sync(ssi->device.parent); | ||
58 | seq_printf(m, "REVISION\t: 0x%08x\n", readl(sys + SSI_REVISION_REG)); | ||
59 | seq_printf(m, "SYSCONFIG\t: 0x%08x\n", readl(sys + SSI_SYSCONFIG_REG)); | ||
60 | seq_printf(m, "SYSSTATUS\t: 0x%08x\n", readl(sys + SSI_SYSSTATUS_REG)); | ||
61 | pm_runtime_put_sync(ssi->device.parent); | ||
62 | |||
63 | return 0; | ||
64 | } | ||
65 | |||
66 | static int ssi_debug_gdd_show(struct seq_file *m, void *p __maybe_unused) | ||
67 | { | ||
68 | struct hsi_controller *ssi = m->private; | ||
69 | struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); | ||
70 | void __iomem *gdd = omap_ssi->gdd; | ||
71 | void __iomem *sys = omap_ssi->sys; | ||
72 | int lch; | ||
73 | |||
74 | pm_runtime_get_sync(ssi->device.parent); | ||
75 | |||
76 | seq_printf(m, "GDD_MPU_STATUS\t: 0x%08x\n", | ||
77 | readl(sys + SSI_GDD_MPU_IRQ_STATUS_REG)); | ||
78 | seq_printf(m, "GDD_MPU_ENABLE\t: 0x%08x\n\n", | ||
79 | readl(sys + SSI_GDD_MPU_IRQ_ENABLE_REG)); | ||
80 | seq_printf(m, "HW_ID\t\t: 0x%08x\n", | ||
81 | readl(gdd + SSI_GDD_HW_ID_REG)); | ||
82 | seq_printf(m, "PPORT_ID\t: 0x%08x\n", | ||
83 | readl(gdd + SSI_GDD_PPORT_ID_REG)); | ||
84 | seq_printf(m, "MPORT_ID\t: 0x%08x\n", | ||
85 | readl(gdd + SSI_GDD_MPORT_ID_REG)); | ||
86 | seq_printf(m, "TEST\t\t: 0x%08x\n", | ||
87 | readl(gdd + SSI_GDD_TEST_REG)); | ||
88 | seq_printf(m, "GCR\t\t: 0x%08x\n", | ||
89 | readl(gdd + SSI_GDD_GCR_REG)); | ||
90 | |||
91 | for (lch = 0; lch < SSI_MAX_GDD_LCH; lch++) { | ||
92 | seq_printf(m, "\nGDD LCH %d\n=========\n", lch); | ||
93 | seq_printf(m, "CSDP\t\t: 0x%04x\n", | ||
94 | readw(gdd + SSI_GDD_CSDP_REG(lch))); | ||
95 | seq_printf(m, "CCR\t\t: 0x%04x\n", | ||
96 | readw(gdd + SSI_GDD_CCR_REG(lch))); | ||
97 | seq_printf(m, "CICR\t\t: 0x%04x\n", | ||
98 | readw(gdd + SSI_GDD_CICR_REG(lch))); | ||
99 | seq_printf(m, "CSR\t\t: 0x%04x\n", | ||
100 | readw(gdd + SSI_GDD_CSR_REG(lch))); | ||
101 | seq_printf(m, "CSSA\t\t: 0x%08x\n", | ||
102 | readl(gdd + SSI_GDD_CSSA_REG(lch))); | ||
103 | seq_printf(m, "CDSA\t\t: 0x%08x\n", | ||
104 | readl(gdd + SSI_GDD_CDSA_REG(lch))); | ||
105 | seq_printf(m, "CEN\t\t: 0x%04x\n", | ||
106 | readw(gdd + SSI_GDD_CEN_REG(lch))); | ||
107 | seq_printf(m, "CSAC\t\t: 0x%04x\n", | ||
108 | readw(gdd + SSI_GDD_CSAC_REG(lch))); | ||
109 | seq_printf(m, "CDAC\t\t: 0x%04x\n", | ||
110 | readw(gdd + SSI_GDD_CDAC_REG(lch))); | ||
111 | seq_printf(m, "CLNK_CTRL\t: 0x%04x\n", | ||
112 | readw(gdd + SSI_GDD_CLNK_CTRL_REG(lch))); | ||
113 | } | ||
114 | |||
115 | pm_runtime_put_sync(ssi->device.parent); | ||
116 | |||
117 | return 0; | ||
118 | } | ||
119 | |||
120 | static int ssi_regs_open(struct inode *inode, struct file *file) | ||
121 | { | ||
122 | return single_open(file, ssi_debug_show, inode->i_private); | ||
123 | } | ||
124 | |||
125 | static int ssi_gdd_regs_open(struct inode *inode, struct file *file) | ||
126 | { | ||
127 | return single_open(file, ssi_debug_gdd_show, inode->i_private); | ||
128 | } | ||
129 | |||
130 | static const struct file_operations ssi_regs_fops = { | ||
131 | .open = ssi_regs_open, | ||
132 | .read = seq_read, | ||
133 | .llseek = seq_lseek, | ||
134 | .release = single_release, | ||
135 | }; | ||
136 | |||
137 | static const struct file_operations ssi_gdd_regs_fops = { | ||
138 | .open = ssi_gdd_regs_open, | ||
139 | .read = seq_read, | ||
140 | .llseek = seq_lseek, | ||
141 | .release = single_release, | ||
142 | }; | ||
143 | |||
144 | static int __init ssi_debug_add_ctrl(struct hsi_controller *ssi) | ||
145 | { | ||
146 | struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); | ||
147 | struct dentry *dir; | ||
148 | |||
149 | /* SSI controller */ | ||
150 | omap_ssi->dir = debugfs_create_dir(dev_name(&ssi->device), NULL); | ||
151 | if (IS_ERR(omap_ssi->dir)) | ||
152 | return PTR_ERR(omap_ssi->dir); | ||
153 | |||
154 | debugfs_create_file("regs", S_IRUGO, omap_ssi->dir, ssi, | ||
155 | &ssi_regs_fops); | ||
156 | /* SSI GDD (DMA) */ | ||
157 | dir = debugfs_create_dir("gdd", omap_ssi->dir); | ||
158 | if (IS_ERR(dir)) | ||
159 | goto rback; | ||
160 | debugfs_create_file("regs", S_IRUGO, dir, ssi, &ssi_gdd_regs_fops); | ||
161 | |||
162 | return 0; | ||
163 | rback: | ||
164 | debugfs_remove_recursive(omap_ssi->dir); | ||
165 | |||
166 | return PTR_ERR(dir); | ||
167 | } | ||
168 | |||
169 | static void ssi_debug_remove_ctrl(struct hsi_controller *ssi) | ||
170 | { | ||
171 | struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); | ||
172 | |||
173 | debugfs_remove_recursive(omap_ssi->dir); | ||
174 | } | ||
175 | #endif /* CONFIG_DEBUG_FS */ | ||
176 | |||
177 | /* | ||
178 | * FIXME: Horrible HACK needed until we remove the useless wakeline test | ||
179 | * in the CMT. To be removed !!!! | ||
180 | */ | ||
181 | void ssi_waketest(struct hsi_client *cl, unsigned int enable) | ||
182 | { | ||
183 | struct hsi_port *port = hsi_get_port(cl); | ||
184 | struct omap_ssi_port *omap_port = hsi_port_drvdata(port); | ||
185 | struct hsi_controller *ssi = to_hsi_controller(port->device.parent); | ||
186 | struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); | ||
187 | |||
188 | omap_port->wktest = !!enable; | ||
189 | if (omap_port->wktest) { | ||
190 | pm_runtime_get_sync(ssi->device.parent); | ||
191 | writel_relaxed(SSI_WAKE(0), | ||
192 | omap_ssi->sys + SSI_SET_WAKE_REG(port->num)); | ||
193 | } else { | ||
194 | writel_relaxed(SSI_WAKE(0), | ||
195 | omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num)); | ||
196 | pm_runtime_put_sync(ssi->device.parent); | ||
197 | } | ||
198 | } | ||
199 | EXPORT_SYMBOL_GPL(ssi_waketest); | ||
200 | |||
201 | static void ssi_gdd_complete(struct hsi_controller *ssi, unsigned int lch) | ||
202 | { | ||
203 | struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); | ||
204 | struct hsi_msg *msg = omap_ssi->gdd_trn[lch].msg; | ||
205 | struct hsi_port *port = to_hsi_port(msg->cl->device.parent); | ||
206 | struct omap_ssi_port *omap_port = hsi_port_drvdata(port); | ||
207 | unsigned int dir; | ||
208 | u32 csr; | ||
209 | u32 val; | ||
210 | |||
211 | spin_lock(&omap_ssi->lock); | ||
212 | |||
213 | val = readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); | ||
214 | val &= ~SSI_GDD_LCH(lch); | ||
215 | writel_relaxed(val, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); | ||
216 | |||
217 | if (msg->ttype == HSI_MSG_READ) { | ||
218 | dir = DMA_FROM_DEVICE; | ||
219 | val = SSI_DATAAVAILABLE(msg->channel); | ||
220 | pm_runtime_put_sync(ssi->device.parent); | ||
221 | } else { | ||
222 | dir = DMA_TO_DEVICE; | ||
223 | val = SSI_DATAACCEPT(msg->channel); | ||
224 | /* Keep clocks reference for write pio event */ | ||
225 | } | ||
226 | dma_unmap_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents, dir); | ||
227 | csr = readw(omap_ssi->gdd + SSI_GDD_CSR_REG(lch)); | ||
228 | omap_ssi->gdd_trn[lch].msg = NULL; /* release GDD lch */ | ||
229 | dev_dbg(&port->device, "DMA completed ch %d ttype %d\n", | ||
230 | msg->channel, msg->ttype); | ||
231 | spin_unlock(&omap_ssi->lock); | ||
232 | if (csr & SSI_CSR_TOUR) { /* Timeout error */ | ||
233 | msg->status = HSI_STATUS_ERROR; | ||
234 | msg->actual_len = 0; | ||
235 | spin_lock(&omap_port->lock); | ||
236 | list_del(&msg->link); /* Dequeue msg */ | ||
237 | spin_unlock(&omap_port->lock); | ||
238 | msg->complete(msg); | ||
239 | return; | ||
240 | } | ||
241 | spin_lock(&omap_port->lock); | ||
242 | val |= readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); | ||
243 | writel_relaxed(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); | ||
244 | spin_unlock(&omap_port->lock); | ||
245 | |||
246 | msg->status = HSI_STATUS_COMPLETED; | ||
247 | msg->actual_len = sg_dma_len(msg->sgt.sgl); | ||
248 | } | ||
249 | |||
250 | static void ssi_gdd_tasklet(unsigned long dev) | ||
251 | { | ||
252 | struct hsi_controller *ssi = (struct hsi_controller *)dev; | ||
253 | struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); | ||
254 | void __iomem *sys = omap_ssi->sys; | ||
255 | unsigned int lch; | ||
256 | u32 status_reg; | ||
257 | |||
258 | pm_runtime_get_sync(ssi->device.parent); | ||
259 | |||
260 | status_reg = readl(sys + SSI_GDD_MPU_IRQ_STATUS_REG); | ||
261 | for (lch = 0; lch < SSI_MAX_GDD_LCH; lch++) { | ||
262 | if (status_reg & SSI_GDD_LCH(lch)) | ||
263 | ssi_gdd_complete(ssi, lch); | ||
264 | } | ||
265 | writel_relaxed(status_reg, sys + SSI_GDD_MPU_IRQ_STATUS_REG); | ||
266 | status_reg = readl(sys + SSI_GDD_MPU_IRQ_STATUS_REG); | ||
267 | |||
268 | pm_runtime_put_sync(ssi->device.parent); | ||
269 | |||
270 | if (status_reg) | ||
271 | tasklet_hi_schedule(&omap_ssi->gdd_tasklet); | ||
272 | else | ||
273 | enable_irq(omap_ssi->gdd_irq); | ||
274 | |||
275 | } | ||
276 | |||
277 | static irqreturn_t ssi_gdd_isr(int irq, void *ssi) | ||
278 | { | ||
279 | struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); | ||
280 | |||
281 | tasklet_hi_schedule(&omap_ssi->gdd_tasklet); | ||
282 | disable_irq_nosync(irq); | ||
283 | |||
284 | return IRQ_HANDLED; | ||
285 | } | ||
286 | |||
287 | static unsigned long ssi_get_clk_rate(struct hsi_controller *ssi) | ||
288 | { | ||
289 | struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); | ||
290 | unsigned long rate = clk_get_rate(omap_ssi->fck); | ||
291 | return rate; | ||
292 | } | ||
293 | |||
294 | static int __init ssi_get_iomem(struct platform_device *pd, | ||
295 | const char *name, void __iomem **pbase, dma_addr_t *phy) | ||
296 | { | ||
297 | struct resource *mem; | ||
298 | struct resource *ioarea; | ||
299 | void __iomem *base; | ||
300 | struct hsi_controller *ssi = platform_get_drvdata(pd); | ||
301 | |||
302 | mem = platform_get_resource_byname(pd, IORESOURCE_MEM, name); | ||
303 | if (!mem) { | ||
304 | dev_err(&pd->dev, "IO memory region missing (%s)\n", name); | ||
305 | return -ENXIO; | ||
306 | } | ||
307 | ioarea = devm_request_mem_region(&ssi->device, mem->start, | ||
308 | resource_size(mem), dev_name(&pd->dev)); | ||
309 | if (!ioarea) { | ||
310 | dev_err(&pd->dev, "%s IO memory region request failed\n", | ||
311 | mem->name); | ||
312 | return -ENXIO; | ||
313 | } | ||
314 | base = devm_ioremap(&ssi->device, mem->start, resource_size(mem)); | ||
315 | if (!base) { | ||
316 | dev_err(&pd->dev, "%s IO remap failed\n", mem->name); | ||
317 | return -ENXIO; | ||
318 | } | ||
319 | *pbase = base; | ||
320 | |||
321 | if (phy) | ||
322 | *phy = mem->start; | ||
323 | |||
324 | return 0; | ||
325 | } | ||
326 | |||
327 | static int __init ssi_add_controller(struct hsi_controller *ssi, | ||
328 | struct platform_device *pd) | ||
329 | { | ||
330 | struct omap_ssi_controller *omap_ssi; | ||
331 | int err; | ||
332 | |||
333 | omap_ssi = devm_kzalloc(&ssi->device, sizeof(*omap_ssi), GFP_KERNEL); | ||
334 | if (!omap_ssi) { | ||
335 | dev_err(&pd->dev, "not enough memory for omap ssi\n"); | ||
336 | return -ENOMEM; | ||
337 | } | ||
338 | |||
339 | ssi->id = ida_simple_get(&platform_omap_ssi_ida, 0, 0, GFP_KERNEL); | ||
340 | if (ssi->id < 0) { | ||
341 | err = ssi->id; | ||
342 | goto out_err; | ||
343 | } | ||
344 | |||
345 | ssi->owner = THIS_MODULE; | ||
346 | ssi->device.parent = &pd->dev; | ||
347 | dev_set_name(&ssi->device, "ssi%d", ssi->id); | ||
348 | hsi_controller_set_drvdata(ssi, omap_ssi); | ||
349 | omap_ssi->dev = &ssi->device; | ||
350 | err = ssi_get_iomem(pd, "sys", &omap_ssi->sys, NULL); | ||
351 | if (err < 0) | ||
352 | goto out_err; | ||
353 | err = ssi_get_iomem(pd, "gdd", &omap_ssi->gdd, NULL); | ||
354 | if (err < 0) | ||
355 | goto out_err; | ||
356 | omap_ssi->gdd_irq = platform_get_irq_byname(pd, "gdd_mpu"); | ||
357 | if (omap_ssi->gdd_irq < 0) { | ||
358 | dev_err(&pd->dev, "GDD IRQ resource missing\n"); | ||
359 | err = omap_ssi->gdd_irq; | ||
360 | goto out_err; | ||
361 | } | ||
362 | tasklet_init(&omap_ssi->gdd_tasklet, ssi_gdd_tasklet, | ||
363 | (unsigned long)ssi); | ||
364 | err = devm_request_irq(&ssi->device, omap_ssi->gdd_irq, ssi_gdd_isr, | ||
365 | 0, "gdd_mpu", ssi); | ||
366 | if (err < 0) { | ||
367 | dev_err(&ssi->device, "Request GDD IRQ %d failed (%d)", | ||
368 | omap_ssi->gdd_irq, err); | ||
369 | goto out_err; | ||
370 | } | ||
371 | |||
372 | omap_ssi->port = devm_kzalloc(&ssi->device, | ||
373 | sizeof(struct omap_ssi_port *) * ssi->num_ports, GFP_KERNEL); | ||
374 | if (!omap_ssi->port) { | ||
375 | err = -ENOMEM; | ||
376 | goto out_err; | ||
377 | } | ||
378 | |||
379 | omap_ssi->fck = devm_clk_get(&ssi->device, "ssi_ssr_fck"); | ||
380 | if (IS_ERR(omap_ssi->fck)) { | ||
381 | dev_err(&pd->dev, "Could not acquire clock \"ssi_ssr_fck\": %li\n", | ||
382 | PTR_ERR(omap_ssi->fck)); | ||
383 | err = -ENODEV; | ||
384 | goto out_err; | ||
385 | } | ||
386 | |||
387 | /* TODO: find register, which can be used to detect context loss */ | ||
388 | omap_ssi->get_loss = NULL; | ||
389 | |||
390 | omap_ssi->max_speed = UINT_MAX; | ||
391 | spin_lock_init(&omap_ssi->lock); | ||
392 | err = hsi_register_controller(ssi); | ||
393 | |||
394 | if (err < 0) | ||
395 | goto out_err; | ||
396 | |||
397 | return 0; | ||
398 | |||
399 | out_err: | ||
400 | ida_simple_remove(&platform_omap_ssi_ida, ssi->id); | ||
401 | return err; | ||
402 | } | ||
403 | |||
404 | static int __init ssi_hw_init(struct hsi_controller *ssi) | ||
405 | { | ||
406 | struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); | ||
407 | unsigned int i; | ||
408 | u32 val; | ||
409 | int err; | ||
410 | |||
411 | err = pm_runtime_get_sync(ssi->device.parent); | ||
412 | if (err < 0) { | ||
413 | dev_err(&ssi->device, "runtime PM failed %d\n", err); | ||
414 | return err; | ||
415 | } | ||
416 | /* Reseting SSI controller */ | ||
417 | writel_relaxed(SSI_SOFTRESET, omap_ssi->sys + SSI_SYSCONFIG_REG); | ||
418 | val = readl(omap_ssi->sys + SSI_SYSSTATUS_REG); | ||
419 | for (i = 0; ((i < 20) && !(val & SSI_RESETDONE)); i++) { | ||
420 | msleep(20); | ||
421 | val = readl(omap_ssi->sys + SSI_SYSSTATUS_REG); | ||
422 | } | ||
423 | if (!(val & SSI_RESETDONE)) { | ||
424 | dev_err(&ssi->device, "SSI HW reset failed\n"); | ||
425 | pm_runtime_put_sync(ssi->device.parent); | ||
426 | return -EIO; | ||
427 | } | ||
428 | /* Reseting GDD */ | ||
429 | writel_relaxed(SSI_SWRESET, omap_ssi->gdd + SSI_GDD_GRST_REG); | ||
430 | /* Get FCK rate in KHz */ | ||
431 | omap_ssi->fck_rate = DIV_ROUND_CLOSEST(ssi_get_clk_rate(ssi), 1000); | ||
432 | dev_dbg(&ssi->device, "SSI fck rate %lu KHz\n", omap_ssi->fck_rate); | ||
433 | /* Set default PM settings */ | ||
434 | val = SSI_AUTOIDLE | SSI_SIDLEMODE_SMART | SSI_MIDLEMODE_SMART; | ||
435 | writel_relaxed(val, omap_ssi->sys + SSI_SYSCONFIG_REG); | ||
436 | omap_ssi->sysconfig = val; | ||
437 | writel_relaxed(SSI_CLK_AUTOGATING_ON, omap_ssi->sys + SSI_GDD_GCR_REG); | ||
438 | omap_ssi->gdd_gcr = SSI_CLK_AUTOGATING_ON; | ||
439 | pm_runtime_put_sync(ssi->device.parent); | ||
440 | |||
441 | return 0; | ||
442 | } | ||
443 | |||
444 | static void ssi_remove_controller(struct hsi_controller *ssi) | ||
445 | { | ||
446 | struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); | ||
447 | int id = ssi->id; | ||
448 | tasklet_kill(&omap_ssi->gdd_tasklet); | ||
449 | hsi_unregister_controller(ssi); | ||
450 | ida_simple_remove(&platform_omap_ssi_ida, id); | ||
451 | } | ||
452 | |||
453 | static inline int ssi_of_get_available_ports_count(const struct device_node *np) | ||
454 | { | ||
455 | struct device_node *child; | ||
456 | int num = 0; | ||
457 | |||
458 | for_each_available_child_of_node(np, child) | ||
459 | if (of_device_is_compatible(child, "ti,omap3-ssi-port")) | ||
460 | num++; | ||
461 | |||
462 | return num; | ||
463 | } | ||
464 | |||
465 | static int ssi_remove_ports(struct device *dev, void *c) | ||
466 | { | ||
467 | struct platform_device *pdev = to_platform_device(dev); | ||
468 | |||
469 | of_device_unregister(pdev); | ||
470 | |||
471 | return 0; | ||
472 | } | ||
473 | |||
474 | static int __init ssi_probe(struct platform_device *pd) | ||
475 | { | ||
476 | struct platform_device *childpdev; | ||
477 | struct device_node *np = pd->dev.of_node; | ||
478 | struct device_node *child; | ||
479 | struct hsi_controller *ssi; | ||
480 | int err; | ||
481 | int num_ports; | ||
482 | |||
483 | if (!np) { | ||
484 | dev_err(&pd->dev, "missing device tree data\n"); | ||
485 | return -EINVAL; | ||
486 | } | ||
487 | |||
488 | num_ports = ssi_of_get_available_ports_count(np); | ||
489 | |||
490 | ssi = hsi_alloc_controller(num_ports, GFP_KERNEL); | ||
491 | if (!ssi) { | ||
492 | dev_err(&pd->dev, "No memory for controller\n"); | ||
493 | return -ENOMEM; | ||
494 | } | ||
495 | |||
496 | platform_set_drvdata(pd, ssi); | ||
497 | |||
498 | err = ssi_add_controller(ssi, pd); | ||
499 | if (err < 0) | ||
500 | goto out1; | ||
501 | |||
502 | pm_runtime_irq_safe(&pd->dev); | ||
503 | pm_runtime_enable(&pd->dev); | ||
504 | |||
505 | err = ssi_hw_init(ssi); | ||
506 | if (err < 0) | ||
507 | goto out2; | ||
508 | #ifdef CONFIG_DEBUG_FS | ||
509 | err = ssi_debug_add_ctrl(ssi); | ||
510 | if (err < 0) | ||
511 | goto out2; | ||
512 | #endif | ||
513 | |||
514 | for_each_available_child_of_node(np, child) { | ||
515 | if (!of_device_is_compatible(child, "ti,omap3-ssi-port")) | ||
516 | continue; | ||
517 | |||
518 | childpdev = of_platform_device_create(child, NULL, &pd->dev); | ||
519 | if (!childpdev) { | ||
520 | err = -ENODEV; | ||
521 | dev_err(&pd->dev, "failed to create ssi controller port\n"); | ||
522 | goto out3; | ||
523 | } | ||
524 | } | ||
525 | |||
526 | dev_info(&pd->dev, "ssi controller %d initialized (%d ports)!\n", | ||
527 | ssi->id, num_ports); | ||
528 | return err; | ||
529 | out3: | ||
530 | device_for_each_child(&pd->dev, NULL, ssi_remove_ports); | ||
531 | out2: | ||
532 | ssi_remove_controller(ssi); | ||
533 | out1: | ||
534 | platform_set_drvdata(pd, NULL); | ||
535 | pm_runtime_disable(&pd->dev); | ||
536 | |||
537 | return err; | ||
538 | } | ||
539 | |||
540 | static int __exit ssi_remove(struct platform_device *pd) | ||
541 | { | ||
542 | struct hsi_controller *ssi = platform_get_drvdata(pd); | ||
543 | |||
544 | #ifdef CONFIG_DEBUG_FS | ||
545 | ssi_debug_remove_ctrl(ssi); | ||
546 | #endif | ||
547 | ssi_remove_controller(ssi); | ||
548 | platform_set_drvdata(pd, NULL); | ||
549 | |||
550 | pm_runtime_disable(&pd->dev); | ||
551 | |||
552 | /* cleanup of of_platform_populate() call */ | ||
553 | device_for_each_child(&pd->dev, NULL, ssi_remove_ports); | ||
554 | |||
555 | return 0; | ||
556 | } | ||
557 | |||
558 | #ifdef CONFIG_PM_RUNTIME | ||
559 | static int omap_ssi_runtime_suspend(struct device *dev) | ||
560 | { | ||
561 | struct hsi_controller *ssi = dev_get_drvdata(dev); | ||
562 | struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); | ||
563 | |||
564 | dev_dbg(dev, "runtime suspend!\n"); | ||
565 | |||
566 | if (omap_ssi->get_loss) | ||
567 | omap_ssi->loss_count = | ||
568 | omap_ssi->get_loss(ssi->device.parent); | ||
569 | |||
570 | return 0; | ||
571 | } | ||
572 | |||
573 | static int omap_ssi_runtime_resume(struct device *dev) | ||
574 | { | ||
575 | struct hsi_controller *ssi = dev_get_drvdata(dev); | ||
576 | struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); | ||
577 | |||
578 | dev_dbg(dev, "runtime resume!\n"); | ||
579 | |||
580 | if ((omap_ssi->get_loss) && (omap_ssi->loss_count == | ||
581 | omap_ssi->get_loss(ssi->device.parent))) | ||
582 | return 0; | ||
583 | |||
584 | writel_relaxed(omap_ssi->gdd_gcr, omap_ssi->gdd + SSI_GDD_GCR_REG); | ||
585 | |||
586 | return 0; | ||
587 | } | ||
588 | |||
589 | static const struct dev_pm_ops omap_ssi_pm_ops = { | ||
590 | SET_RUNTIME_PM_OPS(omap_ssi_runtime_suspend, omap_ssi_runtime_resume, | ||
591 | NULL) | ||
592 | }; | ||
593 | |||
594 | #define DEV_PM_OPS (&omap_ssi_pm_ops) | ||
595 | #else | ||
596 | #define DEV_PM_OPS NULL | ||
597 | #endif | ||
598 | |||
599 | #ifdef CONFIG_OF | ||
600 | static const struct of_device_id omap_ssi_of_match[] = { | ||
601 | { .compatible = "ti,omap3-ssi", }, | ||
602 | {}, | ||
603 | }; | ||
604 | MODULE_DEVICE_TABLE(of, omap_ssi_of_match); | ||
605 | #else | ||
606 | #define omap_ssi_of_match NULL | ||
607 | #endif | ||
608 | |||
609 | static struct platform_driver ssi_pdriver = { | ||
610 | .remove = __exit_p(ssi_remove), | ||
611 | .driver = { | ||
612 | .name = "omap_ssi", | ||
613 | .owner = THIS_MODULE, | ||
614 | .pm = DEV_PM_OPS, | ||
615 | .of_match_table = omap_ssi_of_match, | ||
616 | }, | ||
617 | }; | ||
618 | |||
619 | module_platform_driver_probe(ssi_pdriver, ssi_probe); | ||
620 | |||
621 | MODULE_ALIAS("platform:omap_ssi"); | ||
622 | MODULE_AUTHOR("Carlos Chinea <carlos.chinea@nokia.com>"); | ||
623 | MODULE_AUTHOR("Sebastian Reichel <sre@kernel.org>"); | ||
624 | MODULE_DESCRIPTION("Synchronous Serial Interface Driver"); | ||
625 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/hsi/controllers/omap_ssi.h b/drivers/hsi/controllers/omap_ssi.h new file mode 100644 index 000000000000..9d056417d88c --- /dev/null +++ b/drivers/hsi/controllers/omap_ssi.h | |||
@@ -0,0 +1,166 @@ | |||
1 | /* OMAP SSI internal interface. | ||
2 | * | ||
3 | * Copyright (C) 2010 Nokia Corporation. All rights reserved. | ||
4 | * Copyright (C) 2013 Sebastian Reichel | ||
5 | * | ||
6 | * Contact: Carlos Chinea <carlos.chinea@nokia.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License | ||
10 | * version 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but | ||
13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
15 | * General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | ||
20 | * 02110-1301 USA | ||
21 | */ | ||
22 | |||
23 | #ifndef __LINUX_HSI_OMAP_SSI_H__ | ||
24 | #define __LINUX_HSI_OMAP_SSI_H__ | ||
25 | |||
26 | #include <linux/device.h> | ||
27 | #include <linux/platform_device.h> | ||
28 | #include <linux/hsi/hsi.h> | ||
29 | #include <linux/gpio.h> | ||
30 | #include <linux/interrupt.h> | ||
31 | #include <linux/io.h> | ||
32 | |||
33 | #define SSI_MAX_CHANNELS 8 | ||
34 | #define SSI_MAX_GDD_LCH 8 | ||
35 | #define SSI_BYTES_TO_FRAMES(x) ((((x) - 1) >> 2) + 1) | ||
36 | |||
37 | /** | ||
38 | * struct omap_ssm_ctx - OMAP synchronous serial module (TX/RX) context | ||
39 | * @mode: Bit transmission mode | ||
40 | * @channels: Number of channels | ||
41 | * @framesize: Frame size in bits | ||
42 | * @timeout: RX frame timeout | ||
43 | * @divisor: TX divider | ||
44 | * @arb_mode: Arbitration mode for TX frame (Round robin, priority) | ||
45 | */ | ||
46 | struct omap_ssm_ctx { | ||
47 | u32 mode; | ||
48 | u32 channels; | ||
49 | u32 frame_size; | ||
50 | union { | ||
51 | u32 timeout; /* Rx Only */ | ||
52 | struct { | ||
53 | u32 arb_mode; | ||
54 | u32 divisor; | ||
55 | }; /* Tx only */ | ||
56 | }; | ||
57 | }; | ||
58 | |||
59 | /** | ||
60 | * struct omap_ssi_port - OMAP SSI port data | ||
61 | * @dev: device associated to the port (HSI port) | ||
62 | * @pdev: platform device associated to the port | ||
63 | * @sst_dma: SSI transmitter physical base address | ||
64 | * @ssr_dma: SSI receiver physical base address | ||
65 | * @sst_base: SSI transmitter base address | ||
66 | * @ssr_base: SSI receiver base address | ||
67 | * @wk_lock: spin lock to serialize access to the wake lines | ||
68 | * @lock: Spin lock to serialize access to the SSI port | ||
69 | * @channels: Current number of channels configured (1,2,4 or 8) | ||
70 | * @txqueue: TX message queues | ||
71 | * @rxqueue: RX message queues | ||
72 | * @brkqueue: Queue of incoming HWBREAK requests (FRAME mode) | ||
73 | * @irq: IRQ number | ||
74 | * @wake_irq: IRQ number for incoming wake line (-1 if none) | ||
75 | * @wake_gpio: GPIO number for incoming wake line (-1 if none) | ||
76 | * @pio_tasklet: Bottom half for PIO transfers and events | ||
77 | * @wake_tasklet: Bottom half for incoming wake events | ||
78 | * @wkin_cken: Keep track of clock references due to the incoming wake line | ||
79 | * @wk_refcount: Reference count for output wake line | ||
80 | * @sys_mpu_enable: Context for the interrupt enable register for irq 0 | ||
81 | * @sst: Context for the synchronous serial transmitter | ||
82 | * @ssr: Context for the synchronous serial receiver | ||
83 | */ | ||
84 | struct omap_ssi_port { | ||
85 | struct device *dev; | ||
86 | struct device *pdev; | ||
87 | dma_addr_t sst_dma; | ||
88 | dma_addr_t ssr_dma; | ||
89 | void __iomem *sst_base; | ||
90 | void __iomem *ssr_base; | ||
91 | spinlock_t wk_lock; | ||
92 | spinlock_t lock; | ||
93 | unsigned int channels; | ||
94 | struct list_head txqueue[SSI_MAX_CHANNELS]; | ||
95 | struct list_head rxqueue[SSI_MAX_CHANNELS]; | ||
96 | struct list_head brkqueue; | ||
97 | unsigned int irq; | ||
98 | int wake_irq; | ||
99 | int wake_gpio; | ||
100 | struct tasklet_struct pio_tasklet; | ||
101 | struct tasklet_struct wake_tasklet; | ||
102 | bool wktest:1; /* FIXME: HACK to be removed */ | ||
103 | bool wkin_cken:1; /* Workaround */ | ||
104 | unsigned int wk_refcount; | ||
105 | /* OMAP SSI port context */ | ||
106 | u32 sys_mpu_enable; /* We use only one irq */ | ||
107 | struct omap_ssm_ctx sst; | ||
108 | struct omap_ssm_ctx ssr; | ||
109 | u32 loss_count; | ||
110 | u32 port_id; | ||
111 | #ifdef CONFIG_DEBUG_FS | ||
112 | struct dentry *dir; | ||
113 | #endif | ||
114 | }; | ||
115 | |||
116 | /** | ||
117 | * struct gdd_trn - GDD transaction data | ||
118 | * @msg: Pointer to the HSI message being served | ||
119 | * @sg: Pointer to the current sg entry being served | ||
120 | */ | ||
121 | struct gdd_trn { | ||
122 | struct hsi_msg *msg; | ||
123 | struct scatterlist *sg; | ||
124 | }; | ||
125 | |||
126 | /** | ||
127 | * struct omap_ssi_controller - OMAP SSI controller data | ||
128 | * @dev: device associated to the controller (HSI controller) | ||
129 | * @sys: SSI I/O base address | ||
130 | * @gdd: GDD I/O base address | ||
131 | * @fck: SSI functional clock | ||
132 | * @gdd_irq: IRQ line for GDD | ||
133 | * @gdd_tasklet: bottom half for DMA transfers | ||
134 | * @gdd_trn: Array of GDD transaction data for ongoing GDD transfers | ||
135 | * @lock: lock to serialize access to GDD | ||
136 | * @loss_count: To follow if we need to restore context or not | ||
137 | * @max_speed: Maximum TX speed (Kb/s) set by the clients. | ||
138 | * @sysconfig: SSI controller saved context | ||
139 | * @gdd_gcr: SSI GDD saved context | ||
140 | * @get_loss: Pointer to omap_pm_get_dev_context_loss_count, if any | ||
141 | * @port: Array of pointers of the ports of the controller | ||
142 | * @dir: Debugfs SSI root directory | ||
143 | */ | ||
144 | struct omap_ssi_controller { | ||
145 | struct device *dev; | ||
146 | void __iomem *sys; | ||
147 | void __iomem *gdd; | ||
148 | struct clk *fck; | ||
149 | unsigned int gdd_irq; | ||
150 | struct tasklet_struct gdd_tasklet; | ||
151 | struct gdd_trn gdd_trn[SSI_MAX_GDD_LCH]; | ||
152 | spinlock_t lock; | ||
153 | unsigned long fck_rate; | ||
154 | u32 loss_count; | ||
155 | u32 max_speed; | ||
156 | /* OMAP SSI Controller context */ | ||
157 | u32 sysconfig; | ||
158 | u32 gdd_gcr; | ||
159 | int (*get_loss)(struct device *dev); | ||
160 | struct omap_ssi_port **port; | ||
161 | #ifdef CONFIG_DEBUG_FS | ||
162 | struct dentry *dir; | ||
163 | #endif | ||
164 | }; | ||
165 | |||
166 | #endif /* __LINUX_HSI_OMAP_SSI_H__ */ | ||
diff --git a/drivers/hsi/controllers/omap_ssi_port.c b/drivers/hsi/controllers/omap_ssi_port.c new file mode 100644 index 000000000000..b8693f0b27fe --- /dev/null +++ b/drivers/hsi/controllers/omap_ssi_port.c | |||
@@ -0,0 +1,1399 @@ | |||
1 | /* OMAP SSI port driver. | ||
2 | * | ||
3 | * Copyright (C) 2010 Nokia Corporation. All rights reserved. | ||
4 | * Copyright (C) 2014 Sebastian Reichel <sre@kernel.org> | ||
5 | * | ||
6 | * Contact: Carlos Chinea <carlos.chinea@nokia.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License | ||
10 | * version 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but | ||
13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
15 | * General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | ||
20 | * 02110-1301 USA | ||
21 | */ | ||
22 | |||
23 | #include <linux/platform_device.h> | ||
24 | #include <linux/dma-mapping.h> | ||
25 | #include <linux/pm_runtime.h> | ||
26 | |||
27 | #include <linux/of_gpio.h> | ||
28 | #include <linux/debugfs.h> | ||
29 | |||
30 | #include "omap_ssi_regs.h" | ||
31 | #include "omap_ssi.h" | ||
32 | |||
33 | static inline int hsi_dummy_msg(struct hsi_msg *msg __maybe_unused) | ||
34 | { | ||
35 | return 0; | ||
36 | } | ||
37 | |||
38 | static inline int hsi_dummy_cl(struct hsi_client *cl __maybe_unused) | ||
39 | { | ||
40 | return 0; | ||
41 | } | ||
42 | |||
43 | static inline unsigned int ssi_wakein(struct hsi_port *port) | ||
44 | { | ||
45 | struct omap_ssi_port *omap_port = hsi_port_drvdata(port); | ||
46 | return gpio_get_value(omap_port->wake_gpio); | ||
47 | } | ||
48 | |||
49 | #ifdef CONFIG_DEBUG_FS | ||
50 | static void ssi_debug_remove_port(struct hsi_port *port) | ||
51 | { | ||
52 | struct omap_ssi_port *omap_port = hsi_port_drvdata(port); | ||
53 | |||
54 | debugfs_remove_recursive(omap_port->dir); | ||
55 | } | ||
56 | |||
57 | static int ssi_debug_port_show(struct seq_file *m, void *p __maybe_unused) | ||
58 | { | ||
59 | struct hsi_port *port = m->private; | ||
60 | struct omap_ssi_port *omap_port = hsi_port_drvdata(port); | ||
61 | struct hsi_controller *ssi = to_hsi_controller(port->device.parent); | ||
62 | struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); | ||
63 | void __iomem *base = omap_ssi->sys; | ||
64 | unsigned int ch; | ||
65 | |||
66 | pm_runtime_get_sync(omap_port->pdev); | ||
67 | if (omap_port->wake_irq > 0) | ||
68 | seq_printf(m, "CAWAKE\t\t: %d\n", ssi_wakein(port)); | ||
69 | seq_printf(m, "WAKE\t\t: 0x%08x\n", | ||
70 | readl(base + SSI_WAKE_REG(port->num))); | ||
71 | seq_printf(m, "MPU_ENABLE_IRQ%d\t: 0x%08x\n", 0, | ||
72 | readl(base + SSI_MPU_ENABLE_REG(port->num, 0))); | ||
73 | seq_printf(m, "MPU_STATUS_IRQ%d\t: 0x%08x\n", 0, | ||
74 | readl(base + SSI_MPU_STATUS_REG(port->num, 0))); | ||
75 | /* SST */ | ||
76 | base = omap_port->sst_base; | ||
77 | seq_puts(m, "\nSST\n===\n"); | ||
78 | seq_printf(m, "ID SST\t\t: 0x%08x\n", | ||
79 | readl(base + SSI_SST_ID_REG)); | ||
80 | seq_printf(m, "MODE\t\t: 0x%08x\n", | ||
81 | readl(base + SSI_SST_MODE_REG)); | ||
82 | seq_printf(m, "FRAMESIZE\t: 0x%08x\n", | ||
83 | readl(base + SSI_SST_FRAMESIZE_REG)); | ||
84 | seq_printf(m, "DIVISOR\t\t: 0x%08x\n", | ||
85 | readl(base + SSI_SST_DIVISOR_REG)); | ||
86 | seq_printf(m, "CHANNELS\t: 0x%08x\n", | ||
87 | readl(base + SSI_SST_CHANNELS_REG)); | ||
88 | seq_printf(m, "ARBMODE\t\t: 0x%08x\n", | ||
89 | readl(base + SSI_SST_ARBMODE_REG)); | ||
90 | seq_printf(m, "TXSTATE\t\t: 0x%08x\n", | ||
91 | readl(base + SSI_SST_TXSTATE_REG)); | ||
92 | seq_printf(m, "BUFSTATE\t: 0x%08x\n", | ||
93 | readl(base + SSI_SST_BUFSTATE_REG)); | ||
94 | seq_printf(m, "BREAK\t\t: 0x%08x\n", | ||
95 | readl(base + SSI_SST_BREAK_REG)); | ||
96 | for (ch = 0; ch < omap_port->channels; ch++) { | ||
97 | seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch, | ||
98 | readl(base + SSI_SST_BUFFER_CH_REG(ch))); | ||
99 | } | ||
100 | /* SSR */ | ||
101 | base = omap_port->ssr_base; | ||
102 | seq_puts(m, "\nSSR\n===\n"); | ||
103 | seq_printf(m, "ID SSR\t\t: 0x%08x\n", | ||
104 | readl(base + SSI_SSR_ID_REG)); | ||
105 | seq_printf(m, "MODE\t\t: 0x%08x\n", | ||
106 | readl(base + SSI_SSR_MODE_REG)); | ||
107 | seq_printf(m, "FRAMESIZE\t: 0x%08x\n", | ||
108 | readl(base + SSI_SSR_FRAMESIZE_REG)); | ||
109 | seq_printf(m, "CHANNELS\t: 0x%08x\n", | ||
110 | readl(base + SSI_SSR_CHANNELS_REG)); | ||
111 | seq_printf(m, "TIMEOUT\t\t: 0x%08x\n", | ||
112 | readl(base + SSI_SSR_TIMEOUT_REG)); | ||
113 | seq_printf(m, "RXSTATE\t\t: 0x%08x\n", | ||
114 | readl(base + SSI_SSR_RXSTATE_REG)); | ||
115 | seq_printf(m, "BUFSTATE\t: 0x%08x\n", | ||
116 | readl(base + SSI_SSR_BUFSTATE_REG)); | ||
117 | seq_printf(m, "BREAK\t\t: 0x%08x\n", | ||
118 | readl(base + SSI_SSR_BREAK_REG)); | ||
119 | seq_printf(m, "ERROR\t\t: 0x%08x\n", | ||
120 | readl(base + SSI_SSR_ERROR_REG)); | ||
121 | seq_printf(m, "ERRORACK\t: 0x%08x\n", | ||
122 | readl(base + SSI_SSR_ERRORACK_REG)); | ||
123 | for (ch = 0; ch < omap_port->channels; ch++) { | ||
124 | seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch, | ||
125 | readl(base + SSI_SSR_BUFFER_CH_REG(ch))); | ||
126 | } | ||
127 | pm_runtime_put_sync(omap_port->pdev); | ||
128 | |||
129 | return 0; | ||
130 | } | ||
131 | |||
132 | static int ssi_port_regs_open(struct inode *inode, struct file *file) | ||
133 | { | ||
134 | return single_open(file, ssi_debug_port_show, inode->i_private); | ||
135 | } | ||
136 | |||
137 | static const struct file_operations ssi_port_regs_fops = { | ||
138 | .open = ssi_port_regs_open, | ||
139 | .read = seq_read, | ||
140 | .llseek = seq_lseek, | ||
141 | .release = single_release, | ||
142 | }; | ||
143 | |||
144 | static int ssi_div_get(void *data, u64 *val) | ||
145 | { | ||
146 | struct hsi_port *port = data; | ||
147 | struct omap_ssi_port *omap_port = hsi_port_drvdata(port); | ||
148 | |||
149 | pm_runtime_get_sync(omap_port->pdev); | ||
150 | *val = readl(omap_port->sst_base + SSI_SST_DIVISOR_REG); | ||
151 | pm_runtime_put_sync(omap_port->pdev); | ||
152 | |||
153 | return 0; | ||
154 | } | ||
155 | |||
156 | static int ssi_div_set(void *data, u64 val) | ||
157 | { | ||
158 | struct hsi_port *port = data; | ||
159 | struct omap_ssi_port *omap_port = hsi_port_drvdata(port); | ||
160 | |||
161 | if (val > 127) | ||
162 | return -EINVAL; | ||
163 | |||
164 | pm_runtime_get_sync(omap_port->pdev); | ||
165 | writel(val, omap_port->sst_base + SSI_SST_DIVISOR_REG); | ||
166 | omap_port->sst.divisor = val; | ||
167 | pm_runtime_put_sync(omap_port->pdev); | ||
168 | |||
169 | return 0; | ||
170 | } | ||
171 | |||
172 | DEFINE_SIMPLE_ATTRIBUTE(ssi_sst_div_fops, ssi_div_get, ssi_div_set, "%llu\n"); | ||
173 | |||
174 | static int __init ssi_debug_add_port(struct omap_ssi_port *omap_port, | ||
175 | struct dentry *dir) | ||
176 | { | ||
177 | struct hsi_port *port = to_hsi_port(omap_port->dev); | ||
178 | |||
179 | dir = debugfs_create_dir(dev_name(omap_port->dev), dir); | ||
180 | if (IS_ERR(dir)) | ||
181 | return PTR_ERR(dir); | ||
182 | omap_port->dir = dir; | ||
183 | debugfs_create_file("regs", S_IRUGO, dir, port, &ssi_port_regs_fops); | ||
184 | dir = debugfs_create_dir("sst", dir); | ||
185 | if (IS_ERR(dir)) | ||
186 | return PTR_ERR(dir); | ||
187 | debugfs_create_file("divisor", S_IRUGO | S_IWUSR, dir, port, | ||
188 | &ssi_sst_div_fops); | ||
189 | |||
190 | return 0; | ||
191 | } | ||
192 | #endif | ||
193 | |||
194 | static int ssi_claim_lch(struct hsi_msg *msg) | ||
195 | { | ||
196 | |||
197 | struct hsi_port *port = hsi_get_port(msg->cl); | ||
198 | struct hsi_controller *ssi = to_hsi_controller(port->device.parent); | ||
199 | struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); | ||
200 | int lch; | ||
201 | |||
202 | for (lch = 0; lch < SSI_MAX_GDD_LCH; lch++) | ||
203 | if (!omap_ssi->gdd_trn[lch].msg) { | ||
204 | omap_ssi->gdd_trn[lch].msg = msg; | ||
205 | omap_ssi->gdd_trn[lch].sg = msg->sgt.sgl; | ||
206 | return lch; | ||
207 | } | ||
208 | |||
209 | return -EBUSY; | ||
210 | } | ||
211 | |||
212 | static int ssi_start_dma(struct hsi_msg *msg, int lch) | ||
213 | { | ||
214 | struct hsi_port *port = hsi_get_port(msg->cl); | ||
215 | struct omap_ssi_port *omap_port = hsi_port_drvdata(port); | ||
216 | struct hsi_controller *ssi = to_hsi_controller(port->device.parent); | ||
217 | struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); | ||
218 | void __iomem *gdd = omap_ssi->gdd; | ||
219 | int err; | ||
220 | u16 csdp; | ||
221 | u16 ccr; | ||
222 | u32 s_addr; | ||
223 | u32 d_addr; | ||
224 | u32 tmp; | ||
225 | |||
226 | if (msg->ttype == HSI_MSG_READ) { | ||
227 | err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents, | ||
228 | DMA_FROM_DEVICE); | ||
229 | if (err < 0) { | ||
230 | dev_dbg(&ssi->device, "DMA map SG failed !\n"); | ||
231 | return err; | ||
232 | } | ||
233 | csdp = SSI_DST_BURST_4x32_BIT | SSI_DST_MEMORY_PORT | | ||
234 | SSI_SRC_SINGLE_ACCESS0 | SSI_SRC_PERIPHERAL_PORT | | ||
235 | SSI_DATA_TYPE_S32; | ||
236 | ccr = msg->channel + 0x10 + (port->num * 8); /* Sync */ | ||
237 | ccr |= SSI_DST_AMODE_POSTINC | SSI_SRC_AMODE_CONST | | ||
238 | SSI_CCR_ENABLE; | ||
239 | s_addr = omap_port->ssr_dma + | ||
240 | SSI_SSR_BUFFER_CH_REG(msg->channel); | ||
241 | d_addr = sg_dma_address(msg->sgt.sgl); | ||
242 | } else { | ||
243 | err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents, | ||
244 | DMA_TO_DEVICE); | ||
245 | if (err < 0) { | ||
246 | dev_dbg(&ssi->device, "DMA map SG failed !\n"); | ||
247 | return err; | ||
248 | } | ||
249 | csdp = SSI_SRC_BURST_4x32_BIT | SSI_SRC_MEMORY_PORT | | ||
250 | SSI_DST_SINGLE_ACCESS0 | SSI_DST_PERIPHERAL_PORT | | ||
251 | SSI_DATA_TYPE_S32; | ||
252 | ccr = (msg->channel + 1 + (port->num * 8)) & 0xf; /* Sync */ | ||
253 | ccr |= SSI_SRC_AMODE_POSTINC | SSI_DST_AMODE_CONST | | ||
254 | SSI_CCR_ENABLE; | ||
255 | s_addr = sg_dma_address(msg->sgt.sgl); | ||
256 | d_addr = omap_port->sst_dma + | ||
257 | SSI_SST_BUFFER_CH_REG(msg->channel); | ||
258 | } | ||
259 | dev_dbg(&ssi->device, "lch %d cdsp %08x ccr %04x s_addr %08x d_addr %08x\n", | ||
260 | lch, csdp, ccr, s_addr, d_addr); | ||
261 | |||
262 | /* Hold clocks during the transfer */ | ||
263 | pm_runtime_get_sync(omap_port->pdev); | ||
264 | |||
265 | writew_relaxed(csdp, gdd + SSI_GDD_CSDP_REG(lch)); | ||
266 | writew_relaxed(SSI_BLOCK_IE | SSI_TOUT_IE, gdd + SSI_GDD_CICR_REG(lch)); | ||
267 | writel_relaxed(d_addr, gdd + SSI_GDD_CDSA_REG(lch)); | ||
268 | writel_relaxed(s_addr, gdd + SSI_GDD_CSSA_REG(lch)); | ||
269 | writew_relaxed(SSI_BYTES_TO_FRAMES(msg->sgt.sgl->length), | ||
270 | gdd + SSI_GDD_CEN_REG(lch)); | ||
271 | |||
272 | spin_lock_bh(&omap_ssi->lock); | ||
273 | tmp = readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); | ||
274 | tmp |= SSI_GDD_LCH(lch); | ||
275 | writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); | ||
276 | spin_unlock_bh(&omap_ssi->lock); | ||
277 | writew(ccr, gdd + SSI_GDD_CCR_REG(lch)); | ||
278 | msg->status = HSI_STATUS_PROCEEDING; | ||
279 | |||
280 | return 0; | ||
281 | } | ||
282 | |||
283 | static int ssi_start_pio(struct hsi_msg *msg) | ||
284 | { | ||
285 | struct hsi_port *port = hsi_get_port(msg->cl); | ||
286 | struct omap_ssi_port *omap_port = hsi_port_drvdata(port); | ||
287 | struct hsi_controller *ssi = to_hsi_controller(port->device.parent); | ||
288 | struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); | ||
289 | u32 val; | ||
290 | |||
291 | pm_runtime_get_sync(omap_port->pdev); | ||
292 | if (msg->ttype == HSI_MSG_WRITE) { | ||
293 | val = SSI_DATAACCEPT(msg->channel); | ||
294 | /* Hold clocks for pio writes */ | ||
295 | pm_runtime_get_sync(omap_port->pdev); | ||
296 | } else { | ||
297 | val = SSI_DATAAVAILABLE(msg->channel) | SSI_ERROROCCURED; | ||
298 | } | ||
299 | dev_dbg(&port->device, "Single %s transfer\n", | ||
300 | msg->ttype ? "write" : "read"); | ||
301 | val |= readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); | ||
302 | writel(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); | ||
303 | pm_runtime_put_sync(omap_port->pdev); | ||
304 | msg->actual_len = 0; | ||
305 | msg->status = HSI_STATUS_PROCEEDING; | ||
306 | |||
307 | return 0; | ||
308 | } | ||
309 | |||
310 | static int ssi_start_transfer(struct list_head *queue) | ||
311 | { | ||
312 | struct hsi_msg *msg; | ||
313 | int lch = -1; | ||
314 | |||
315 | if (list_empty(queue)) | ||
316 | return 0; | ||
317 | msg = list_first_entry(queue, struct hsi_msg, link); | ||
318 | if (msg->status != HSI_STATUS_QUEUED) | ||
319 | return 0; | ||
320 | if ((msg->sgt.nents) && (msg->sgt.sgl->length > sizeof(u32))) | ||
321 | lch = ssi_claim_lch(msg); | ||
322 | if (lch >= 0) | ||
323 | return ssi_start_dma(msg, lch); | ||
324 | else | ||
325 | return ssi_start_pio(msg); | ||
326 | } | ||
327 | |||
328 | static int ssi_async_break(struct hsi_msg *msg) | ||
329 | { | ||
330 | struct hsi_port *port = hsi_get_port(msg->cl); | ||
331 | struct omap_ssi_port *omap_port = hsi_port_drvdata(port); | ||
332 | struct hsi_controller *ssi = to_hsi_controller(port->device.parent); | ||
333 | struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); | ||
334 | int err = 0; | ||
335 | u32 tmp; | ||
336 | |||
337 | pm_runtime_get_sync(omap_port->pdev); | ||
338 | if (msg->ttype == HSI_MSG_WRITE) { | ||
339 | if (omap_port->sst.mode != SSI_MODE_FRAME) { | ||
340 | err = -EINVAL; | ||
341 | goto out; | ||
342 | } | ||
343 | writel(1, omap_port->sst_base + SSI_SST_BREAK_REG); | ||
344 | msg->status = HSI_STATUS_COMPLETED; | ||
345 | msg->complete(msg); | ||
346 | } else { | ||
347 | if (omap_port->ssr.mode != SSI_MODE_FRAME) { | ||
348 | err = -EINVAL; | ||
349 | goto out; | ||
350 | } | ||
351 | spin_lock_bh(&omap_port->lock); | ||
352 | tmp = readl(omap_ssi->sys + | ||
353 | SSI_MPU_ENABLE_REG(port->num, 0)); | ||
354 | writel(tmp | SSI_BREAKDETECTED, | ||
355 | omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); | ||
356 | msg->status = HSI_STATUS_PROCEEDING; | ||
357 | list_add_tail(&msg->link, &omap_port->brkqueue); | ||
358 | spin_unlock_bh(&omap_port->lock); | ||
359 | } | ||
360 | out: | ||
361 | pm_runtime_put_sync(omap_port->pdev); | ||
362 | |||
363 | return err; | ||
364 | } | ||
365 | |||
366 | static int ssi_async(struct hsi_msg *msg) | ||
367 | { | ||
368 | struct hsi_port *port = hsi_get_port(msg->cl); | ||
369 | struct omap_ssi_port *omap_port = hsi_port_drvdata(port); | ||
370 | struct list_head *queue; | ||
371 | int err = 0; | ||
372 | |||
373 | BUG_ON(!msg); | ||
374 | |||
375 | if (msg->sgt.nents > 1) | ||
376 | return -ENOSYS; /* TODO: Add sg support */ | ||
377 | |||
378 | if (msg->break_frame) | ||
379 | return ssi_async_break(msg); | ||
380 | |||
381 | if (msg->ttype) { | ||
382 | BUG_ON(msg->channel >= omap_port->sst.channels); | ||
383 | queue = &omap_port->txqueue[msg->channel]; | ||
384 | } else { | ||
385 | BUG_ON(msg->channel >= omap_port->ssr.channels); | ||
386 | queue = &omap_port->rxqueue[msg->channel]; | ||
387 | } | ||
388 | msg->status = HSI_STATUS_QUEUED; | ||
389 | spin_lock_bh(&omap_port->lock); | ||
390 | list_add_tail(&msg->link, queue); | ||
391 | err = ssi_start_transfer(queue); | ||
392 | if (err < 0) { | ||
393 | list_del(&msg->link); | ||
394 | msg->status = HSI_STATUS_ERROR; | ||
395 | } | ||
396 | spin_unlock_bh(&omap_port->lock); | ||
397 | dev_dbg(&port->device, "msg status %d ttype %d ch %d\n", | ||
398 | msg->status, msg->ttype, msg->channel); | ||
399 | |||
400 | return err; | ||
401 | } | ||
402 | |||
403 | static u32 ssi_calculate_div(struct hsi_controller *ssi) | ||
404 | { | ||
405 | struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); | ||
406 | u32 tx_fckrate = (u32) omap_ssi->fck_rate; | ||
407 | |||
408 | /* / 2 : SSI TX clock is always half of the SSI functional clock */ | ||
409 | tx_fckrate >>= 1; | ||
410 | /* Round down when tx_fckrate % omap_ssi->max_speed == 0 */ | ||
411 | tx_fckrate--; | ||
412 | dev_dbg(&ssi->device, "TX div %d for fck_rate %lu Khz speed %d Kb/s\n", | ||
413 | tx_fckrate / omap_ssi->max_speed, omap_ssi->fck_rate, | ||
414 | omap_ssi->max_speed); | ||
415 | |||
416 | return tx_fckrate / omap_ssi->max_speed; | ||
417 | } | ||
418 | |||
419 | static void ssi_flush_queue(struct list_head *queue, struct hsi_client *cl) | ||
420 | { | ||
421 | struct list_head *node, *tmp; | ||
422 | struct hsi_msg *msg; | ||
423 | |||
424 | list_for_each_safe(node, tmp, queue) { | ||
425 | msg = list_entry(node, struct hsi_msg, link); | ||
426 | if ((cl) && (cl != msg->cl)) | ||
427 | continue; | ||
428 | list_del(node); | ||
429 | pr_debug("flush queue: ch %d, msg %p len %d type %d ctxt %p\n", | ||
430 | msg->channel, msg, msg->sgt.sgl->length, | ||
431 | msg->ttype, msg->context); | ||
432 | if (msg->destructor) | ||
433 | msg->destructor(msg); | ||
434 | else | ||
435 | hsi_free_msg(msg); | ||
436 | } | ||
437 | } | ||
438 | |||
439 | static int ssi_setup(struct hsi_client *cl) | ||
440 | { | ||
441 | struct hsi_port *port = to_hsi_port(cl->device.parent); | ||
442 | struct omap_ssi_port *omap_port = hsi_port_drvdata(port); | ||
443 | struct hsi_controller *ssi = to_hsi_controller(port->device.parent); | ||
444 | struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); | ||
445 | void __iomem *sst = omap_port->sst_base; | ||
446 | void __iomem *ssr = omap_port->ssr_base; | ||
447 | u32 div; | ||
448 | u32 val; | ||
449 | int err = 0; | ||
450 | |||
451 | pm_runtime_get_sync(omap_port->pdev); | ||
452 | spin_lock_bh(&omap_port->lock); | ||
453 | if (cl->tx_cfg.speed) | ||
454 | omap_ssi->max_speed = cl->tx_cfg.speed; | ||
455 | div = ssi_calculate_div(ssi); | ||
456 | if (div > SSI_MAX_DIVISOR) { | ||
457 | dev_err(&cl->device, "Invalid TX speed %d Mb/s (div %d)\n", | ||
458 | cl->tx_cfg.speed, div); | ||
459 | err = -EINVAL; | ||
460 | goto out; | ||
461 | } | ||
462 | /* Set TX/RX module to sleep to stop TX/RX during cfg update */ | ||
463 | writel_relaxed(SSI_MODE_SLEEP, sst + SSI_SST_MODE_REG); | ||
464 | writel_relaxed(SSI_MODE_SLEEP, ssr + SSI_SSR_MODE_REG); | ||
465 | /* Flush posted write */ | ||
466 | val = readl(ssr + SSI_SSR_MODE_REG); | ||
467 | /* TX */ | ||
468 | writel_relaxed(31, sst + SSI_SST_FRAMESIZE_REG); | ||
469 | writel_relaxed(div, sst + SSI_SST_DIVISOR_REG); | ||
470 | writel_relaxed(cl->tx_cfg.num_hw_channels, sst + SSI_SST_CHANNELS_REG); | ||
471 | writel_relaxed(cl->tx_cfg.arb_mode, sst + SSI_SST_ARBMODE_REG); | ||
472 | writel_relaxed(cl->tx_cfg.mode, sst + SSI_SST_MODE_REG); | ||
473 | /* RX */ | ||
474 | writel_relaxed(31, ssr + SSI_SSR_FRAMESIZE_REG); | ||
475 | writel_relaxed(cl->rx_cfg.num_hw_channels, ssr + SSI_SSR_CHANNELS_REG); | ||
476 | writel_relaxed(0, ssr + SSI_SSR_TIMEOUT_REG); | ||
477 | /* Cleanup the break queue if we leave FRAME mode */ | ||
478 | if ((omap_port->ssr.mode == SSI_MODE_FRAME) && | ||
479 | (cl->rx_cfg.mode != SSI_MODE_FRAME)) | ||
480 | ssi_flush_queue(&omap_port->brkqueue, cl); | ||
481 | writel_relaxed(cl->rx_cfg.mode, ssr + SSI_SSR_MODE_REG); | ||
482 | omap_port->channels = max(cl->rx_cfg.num_hw_channels, | ||
483 | cl->tx_cfg.num_hw_channels); | ||
484 | /* Shadow registering for OFF mode */ | ||
485 | /* SST */ | ||
486 | omap_port->sst.divisor = div; | ||
487 | omap_port->sst.frame_size = 31; | ||
488 | omap_port->sst.channels = cl->tx_cfg.num_hw_channels; | ||
489 | omap_port->sst.arb_mode = cl->tx_cfg.arb_mode; | ||
490 | omap_port->sst.mode = cl->tx_cfg.mode; | ||
491 | /* SSR */ | ||
492 | omap_port->ssr.frame_size = 31; | ||
493 | omap_port->ssr.timeout = 0; | ||
494 | omap_port->ssr.channels = cl->rx_cfg.num_hw_channels; | ||
495 | omap_port->ssr.mode = cl->rx_cfg.mode; | ||
496 | out: | ||
497 | spin_unlock_bh(&omap_port->lock); | ||
498 | pm_runtime_put_sync(omap_port->pdev); | ||
499 | |||
500 | return err; | ||
501 | } | ||
502 | |||
503 | static int ssi_flush(struct hsi_client *cl) | ||
504 | { | ||
505 | struct hsi_port *port = hsi_get_port(cl); | ||
506 | struct omap_ssi_port *omap_port = hsi_port_drvdata(port); | ||
507 | struct hsi_controller *ssi = to_hsi_controller(port->device.parent); | ||
508 | struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); | ||
509 | struct hsi_msg *msg; | ||
510 | void __iomem *sst = omap_port->sst_base; | ||
511 | void __iomem *ssr = omap_port->ssr_base; | ||
512 | unsigned int i; | ||
513 | u32 err; | ||
514 | |||
515 | pm_runtime_get_sync(omap_port->pdev); | ||
516 | spin_lock_bh(&omap_port->lock); | ||
517 | /* Stop all DMA transfers */ | ||
518 | for (i = 0; i < SSI_MAX_GDD_LCH; i++) { | ||
519 | msg = omap_ssi->gdd_trn[i].msg; | ||
520 | if (!msg || (port != hsi_get_port(msg->cl))) | ||
521 | continue; | ||
522 | writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i)); | ||
523 | if (msg->ttype == HSI_MSG_READ) | ||
524 | pm_runtime_put_sync(omap_port->pdev); | ||
525 | omap_ssi->gdd_trn[i].msg = NULL; | ||
526 | } | ||
527 | /* Flush all SST buffers */ | ||
528 | writel_relaxed(0, sst + SSI_SST_BUFSTATE_REG); | ||
529 | writel_relaxed(0, sst + SSI_SST_TXSTATE_REG); | ||
530 | /* Flush all SSR buffers */ | ||
531 | writel_relaxed(0, ssr + SSI_SSR_RXSTATE_REG); | ||
532 | writel_relaxed(0, ssr + SSI_SSR_BUFSTATE_REG); | ||
533 | /* Flush all errors */ | ||
534 | err = readl(ssr + SSI_SSR_ERROR_REG); | ||
535 | writel_relaxed(err, ssr + SSI_SSR_ERRORACK_REG); | ||
536 | /* Flush break */ | ||
537 | writel_relaxed(0, ssr + SSI_SSR_BREAK_REG); | ||
538 | /* Clear interrupts */ | ||
539 | writel_relaxed(0, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); | ||
540 | writel_relaxed(0xffffff00, | ||
541 | omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0)); | ||
542 | writel_relaxed(0, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); | ||
543 | writel(0xff, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG); | ||
544 | /* Dequeue all pending requests */ | ||
545 | for (i = 0; i < omap_port->channels; i++) { | ||
546 | /* Release write clocks */ | ||
547 | if (!list_empty(&omap_port->txqueue[i])) | ||
548 | pm_runtime_put_sync(omap_port->pdev); | ||
549 | ssi_flush_queue(&omap_port->txqueue[i], NULL); | ||
550 | ssi_flush_queue(&omap_port->rxqueue[i], NULL); | ||
551 | } | ||
552 | ssi_flush_queue(&omap_port->brkqueue, NULL); | ||
553 | spin_unlock_bh(&omap_port->lock); | ||
554 | pm_runtime_put_sync(omap_port->pdev); | ||
555 | |||
556 | return 0; | ||
557 | } | ||
558 | |||
559 | static int ssi_start_tx(struct hsi_client *cl) | ||
560 | { | ||
561 | struct hsi_port *port = hsi_get_port(cl); | ||
562 | struct omap_ssi_port *omap_port = hsi_port_drvdata(port); | ||
563 | struct hsi_controller *ssi = to_hsi_controller(port->device.parent); | ||
564 | struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); | ||
565 | |||
566 | dev_dbg(&port->device, "Wake out high %d\n", omap_port->wk_refcount); | ||
567 | |||
568 | spin_lock_bh(&omap_port->wk_lock); | ||
569 | if (omap_port->wk_refcount++) { | ||
570 | spin_unlock_bh(&omap_port->wk_lock); | ||
571 | return 0; | ||
572 | } | ||
573 | pm_runtime_get_sync(omap_port->pdev); /* Grab clocks */ | ||
574 | writel(SSI_WAKE(0), omap_ssi->sys + SSI_SET_WAKE_REG(port->num)); | ||
575 | spin_unlock_bh(&omap_port->wk_lock); | ||
576 | |||
577 | return 0; | ||
578 | } | ||
579 | |||
580 | static int ssi_stop_tx(struct hsi_client *cl) | ||
581 | { | ||
582 | struct hsi_port *port = hsi_get_port(cl); | ||
583 | struct omap_ssi_port *omap_port = hsi_port_drvdata(port); | ||
584 | struct hsi_controller *ssi = to_hsi_controller(port->device.parent); | ||
585 | struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); | ||
586 | |||
587 | dev_dbg(&port->device, "Wake out low %d\n", omap_port->wk_refcount); | ||
588 | |||
589 | spin_lock_bh(&omap_port->wk_lock); | ||
590 | BUG_ON(!omap_port->wk_refcount); | ||
591 | if (--omap_port->wk_refcount) { | ||
592 | spin_unlock_bh(&omap_port->wk_lock); | ||
593 | return 0; | ||
594 | } | ||
595 | writel(SSI_WAKE(0), omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num)); | ||
596 | pm_runtime_put_sync(omap_port->pdev); /* Release clocks */ | ||
597 | spin_unlock_bh(&omap_port->wk_lock); | ||
598 | |||
599 | return 0; | ||
600 | } | ||
601 | |||
602 | static void ssi_transfer(struct omap_ssi_port *omap_port, | ||
603 | struct list_head *queue) | ||
604 | { | ||
605 | struct hsi_msg *msg; | ||
606 | int err = -1; | ||
607 | |||
608 | spin_lock_bh(&omap_port->lock); | ||
609 | while (err < 0) { | ||
610 | err = ssi_start_transfer(queue); | ||
611 | if (err < 0) { | ||
612 | msg = list_first_entry(queue, struct hsi_msg, link); | ||
613 | msg->status = HSI_STATUS_ERROR; | ||
614 | msg->actual_len = 0; | ||
615 | list_del(&msg->link); | ||
616 | spin_unlock_bh(&omap_port->lock); | ||
617 | msg->complete(msg); | ||
618 | spin_lock_bh(&omap_port->lock); | ||
619 | } | ||
620 | } | ||
621 | spin_unlock_bh(&omap_port->lock); | ||
622 | } | ||
623 | |||
624 | static void ssi_cleanup_queues(struct hsi_client *cl) | ||
625 | { | ||
626 | struct hsi_port *port = hsi_get_port(cl); | ||
627 | struct omap_ssi_port *omap_port = hsi_port_drvdata(port); | ||
628 | struct hsi_controller *ssi = to_hsi_controller(port->device.parent); | ||
629 | struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); | ||
630 | struct hsi_msg *msg; | ||
631 | unsigned int i; | ||
632 | u32 rxbufstate = 0; | ||
633 | u32 txbufstate = 0; | ||
634 | u32 status = SSI_ERROROCCURED; | ||
635 | u32 tmp; | ||
636 | |||
637 | ssi_flush_queue(&omap_port->brkqueue, cl); | ||
638 | if (list_empty(&omap_port->brkqueue)) | ||
639 | status |= SSI_BREAKDETECTED; | ||
640 | |||
641 | for (i = 0; i < omap_port->channels; i++) { | ||
642 | if (list_empty(&omap_port->txqueue[i])) | ||
643 | continue; | ||
644 | msg = list_first_entry(&omap_port->txqueue[i], struct hsi_msg, | ||
645 | link); | ||
646 | if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) { | ||
647 | txbufstate |= (1 << i); | ||
648 | status |= SSI_DATAACCEPT(i); | ||
649 | /* Release the clocks writes, also GDD ones */ | ||
650 | pm_runtime_put_sync(omap_port->pdev); | ||
651 | } | ||
652 | ssi_flush_queue(&omap_port->txqueue[i], cl); | ||
653 | } | ||
654 | for (i = 0; i < omap_port->channels; i++) { | ||
655 | if (list_empty(&omap_port->rxqueue[i])) | ||
656 | continue; | ||
657 | msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg, | ||
658 | link); | ||
659 | if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) { | ||
660 | rxbufstate |= (1 << i); | ||
661 | status |= SSI_DATAAVAILABLE(i); | ||
662 | } | ||
663 | ssi_flush_queue(&omap_port->rxqueue[i], cl); | ||
664 | /* Check if we keep the error detection interrupt armed */ | ||
665 | if (!list_empty(&omap_port->rxqueue[i])) | ||
666 | status &= ~SSI_ERROROCCURED; | ||
667 | } | ||
668 | /* Cleanup write buffers */ | ||
669 | tmp = readl(omap_port->sst_base + SSI_SST_BUFSTATE_REG); | ||
670 | tmp &= ~txbufstate; | ||
671 | writel_relaxed(tmp, omap_port->sst_base + SSI_SST_BUFSTATE_REG); | ||
672 | /* Cleanup read buffers */ | ||
673 | tmp = readl(omap_port->ssr_base + SSI_SSR_BUFSTATE_REG); | ||
674 | tmp &= ~rxbufstate; | ||
675 | writel_relaxed(tmp, omap_port->ssr_base + SSI_SSR_BUFSTATE_REG); | ||
676 | /* Disarm and ack pending interrupts */ | ||
677 | tmp = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); | ||
678 | tmp &= ~status; | ||
679 | writel_relaxed(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); | ||
680 | writel_relaxed(status, omap_ssi->sys + | ||
681 | SSI_MPU_STATUS_REG(port->num, 0)); | ||
682 | } | ||
683 | |||
684 | static void ssi_cleanup_gdd(struct hsi_controller *ssi, struct hsi_client *cl) | ||
685 | { | ||
686 | struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); | ||
687 | struct hsi_port *port = hsi_get_port(cl); | ||
688 | struct omap_ssi_port *omap_port = hsi_port_drvdata(port); | ||
689 | struct hsi_msg *msg; | ||
690 | unsigned int i; | ||
691 | u32 val = 0; | ||
692 | u32 tmp; | ||
693 | |||
694 | for (i = 0; i < SSI_MAX_GDD_LCH; i++) { | ||
695 | msg = omap_ssi->gdd_trn[i].msg; | ||
696 | if ((!msg) || (msg->cl != cl)) | ||
697 | continue; | ||
698 | writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i)); | ||
699 | val |= (1 << i); | ||
700 | /* | ||
701 | * Clock references for write will be handled in | ||
702 | * ssi_cleanup_queues | ||
703 | */ | ||
704 | if (msg->ttype == HSI_MSG_READ) | ||
705 | pm_runtime_put_sync(omap_port->pdev); | ||
706 | omap_ssi->gdd_trn[i].msg = NULL; | ||
707 | } | ||
708 | tmp = readl_relaxed(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); | ||
709 | tmp &= ~val; | ||
710 | writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); | ||
711 | writel(val, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG); | ||
712 | } | ||
713 | |||
714 | static int ssi_set_port_mode(struct omap_ssi_port *omap_port, u32 mode) | ||
715 | { | ||
716 | writel(mode, omap_port->sst_base + SSI_SST_MODE_REG); | ||
717 | writel(mode, omap_port->ssr_base + SSI_SSR_MODE_REG); | ||
718 | /* OCP barrier */ | ||
719 | mode = readl(omap_port->ssr_base + SSI_SSR_MODE_REG); | ||
720 | |||
721 | return 0; | ||
722 | } | ||
723 | |||
724 | static int ssi_release(struct hsi_client *cl) | ||
725 | { | ||
726 | struct hsi_port *port = hsi_get_port(cl); | ||
727 | struct omap_ssi_port *omap_port = hsi_port_drvdata(port); | ||
728 | struct hsi_controller *ssi = to_hsi_controller(port->device.parent); | ||
729 | |||
730 | spin_lock_bh(&omap_port->lock); | ||
731 | pm_runtime_get_sync(omap_port->pdev); | ||
732 | /* Stop all the pending DMA requests for that client */ | ||
733 | ssi_cleanup_gdd(ssi, cl); | ||
734 | /* Now cleanup all the queues */ | ||
735 | ssi_cleanup_queues(cl); | ||
736 | pm_runtime_put_sync(omap_port->pdev); | ||
737 | /* If it is the last client of the port, do extra checks and cleanup */ | ||
738 | if (port->claimed <= 1) { | ||
739 | /* | ||
740 | * Drop the clock reference for the incoming wake line | ||
741 | * if it is still kept high by the other side. | ||
742 | */ | ||
743 | if (omap_port->wkin_cken) { | ||
744 | pm_runtime_put_sync(omap_port->pdev); | ||
745 | omap_port->wkin_cken = 0; | ||
746 | } | ||
747 | pm_runtime_get_sync(omap_port->pdev); | ||
748 | /* Stop any SSI TX/RX without a client */ | ||
749 | ssi_set_port_mode(omap_port, SSI_MODE_SLEEP); | ||
750 | omap_port->sst.mode = SSI_MODE_SLEEP; | ||
751 | omap_port->ssr.mode = SSI_MODE_SLEEP; | ||
752 | pm_runtime_put_sync(omap_port->pdev); | ||
753 | WARN_ON(omap_port->wk_refcount != 0); | ||
754 | } | ||
755 | spin_unlock_bh(&omap_port->lock); | ||
756 | |||
757 | return 0; | ||
758 | } | ||
759 | |||
760 | |||
761 | |||
762 | static void ssi_error(struct hsi_port *port) | ||
763 | { | ||
764 | struct omap_ssi_port *omap_port = hsi_port_drvdata(port); | ||
765 | struct hsi_controller *ssi = to_hsi_controller(port->device.parent); | ||
766 | struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); | ||
767 | struct hsi_msg *msg; | ||
768 | unsigned int i; | ||
769 | u32 err; | ||
770 | u32 val; | ||
771 | u32 tmp; | ||
772 | |||
773 | /* ACK error */ | ||
774 | err = readl(omap_port->ssr_base + SSI_SSR_ERROR_REG); | ||
775 | dev_err(&port->device, "SSI error: 0x%02x\n", err); | ||
776 | if (!err) { | ||
777 | dev_dbg(&port->device, "spurious SSI error ignored!\n"); | ||
778 | return; | ||
779 | } | ||
780 | spin_lock(&omap_ssi->lock); | ||
781 | /* Cancel all GDD read transfers */ | ||
782 | for (i = 0, val = 0; i < SSI_MAX_GDD_LCH; i++) { | ||
783 | msg = omap_ssi->gdd_trn[i].msg; | ||
784 | if ((msg) && (msg->ttype == HSI_MSG_READ)) { | ||
785 | writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i)); | ||
786 | val |= (1 << i); | ||
787 | omap_ssi->gdd_trn[i].msg = NULL; | ||
788 | } | ||
789 | } | ||
790 | tmp = readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); | ||
791 | tmp &= ~val; | ||
792 | writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); | ||
793 | spin_unlock(&omap_ssi->lock); | ||
794 | /* Cancel all PIO read transfers */ | ||
795 | spin_lock(&omap_port->lock); | ||
796 | tmp = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); | ||
797 | tmp &= 0xfeff00ff; /* Disable error & all dataavailable interrupts */ | ||
798 | writel_relaxed(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); | ||
799 | /* ACK error */ | ||
800 | writel_relaxed(err, omap_port->ssr_base + SSI_SSR_ERRORACK_REG); | ||
801 | writel_relaxed(SSI_ERROROCCURED, | ||
802 | omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0)); | ||
803 | /* Signal the error all current pending read requests */ | ||
804 | for (i = 0; i < omap_port->channels; i++) { | ||
805 | if (list_empty(&omap_port->rxqueue[i])) | ||
806 | continue; | ||
807 | msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg, | ||
808 | link); | ||
809 | list_del(&msg->link); | ||
810 | msg->status = HSI_STATUS_ERROR; | ||
811 | spin_unlock(&omap_port->lock); | ||
812 | msg->complete(msg); | ||
813 | /* Now restart queued reads if any */ | ||
814 | ssi_transfer(omap_port, &omap_port->rxqueue[i]); | ||
815 | spin_lock(&omap_port->lock); | ||
816 | } | ||
817 | spin_unlock(&omap_port->lock); | ||
818 | } | ||
819 | |||
820 | static void ssi_break_complete(struct hsi_port *port) | ||
821 | { | ||
822 | struct omap_ssi_port *omap_port = hsi_port_drvdata(port); | ||
823 | struct hsi_controller *ssi = to_hsi_controller(port->device.parent); | ||
824 | struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); | ||
825 | struct hsi_msg *msg; | ||
826 | struct hsi_msg *tmp; | ||
827 | u32 val; | ||
828 | |||
829 | dev_dbg(&port->device, "HWBREAK received\n"); | ||
830 | |||
831 | spin_lock(&omap_port->lock); | ||
832 | val = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); | ||
833 | val &= ~SSI_BREAKDETECTED; | ||
834 | writel_relaxed(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); | ||
835 | writel_relaxed(0, omap_port->ssr_base + SSI_SSR_BREAK_REG); | ||
836 | writel(SSI_BREAKDETECTED, | ||
837 | omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0)); | ||
838 | spin_unlock(&omap_port->lock); | ||
839 | |||
840 | list_for_each_entry_safe(msg, tmp, &omap_port->brkqueue, link) { | ||
841 | msg->status = HSI_STATUS_COMPLETED; | ||
842 | spin_lock(&omap_port->lock); | ||
843 | list_del(&msg->link); | ||
844 | spin_unlock(&omap_port->lock); | ||
845 | msg->complete(msg); | ||
846 | } | ||
847 | |||
848 | } | ||
849 | |||
850 | static void ssi_pio_complete(struct hsi_port *port, struct list_head *queue) | ||
851 | { | ||
852 | struct hsi_controller *ssi = to_hsi_controller(port->device.parent); | ||
853 | struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); | ||
854 | struct omap_ssi_port *omap_port = hsi_port_drvdata(port); | ||
855 | struct hsi_msg *msg; | ||
856 | u32 *buf; | ||
857 | u32 reg; | ||
858 | u32 val; | ||
859 | |||
860 | spin_lock(&omap_port->lock); | ||
861 | msg = list_first_entry(queue, struct hsi_msg, link); | ||
862 | if ((!msg->sgt.nents) || (!msg->sgt.sgl->length)) { | ||
863 | msg->actual_len = 0; | ||
864 | msg->status = HSI_STATUS_PENDING; | ||
865 | } | ||
866 | if (msg->ttype == HSI_MSG_WRITE) | ||
867 | val = SSI_DATAACCEPT(msg->channel); | ||
868 | else | ||
869 | val = SSI_DATAAVAILABLE(msg->channel); | ||
870 | if (msg->status == HSI_STATUS_PROCEEDING) { | ||
871 | buf = sg_virt(msg->sgt.sgl) + msg->actual_len; | ||
872 | if (msg->ttype == HSI_MSG_WRITE) | ||
873 | writel(*buf, omap_port->sst_base + | ||
874 | SSI_SST_BUFFER_CH_REG(msg->channel)); | ||
875 | else | ||
876 | *buf = readl(omap_port->ssr_base + | ||
877 | SSI_SSR_BUFFER_CH_REG(msg->channel)); | ||
878 | dev_dbg(&port->device, "ch %d ttype %d 0x%08x\n", msg->channel, | ||
879 | msg->ttype, *buf); | ||
880 | msg->actual_len += sizeof(*buf); | ||
881 | if (msg->actual_len >= msg->sgt.sgl->length) | ||
882 | msg->status = HSI_STATUS_COMPLETED; | ||
883 | /* | ||
884 | * Wait for the last written frame to be really sent before | ||
885 | * we call the complete callback | ||
886 | */ | ||
887 | if ((msg->status == HSI_STATUS_PROCEEDING) || | ||
888 | ((msg->status == HSI_STATUS_COMPLETED) && | ||
889 | (msg->ttype == HSI_MSG_WRITE))) { | ||
890 | writel(val, omap_ssi->sys + | ||
891 | SSI_MPU_STATUS_REG(port->num, 0)); | ||
892 | spin_unlock(&omap_port->lock); | ||
893 | |||
894 | return; | ||
895 | } | ||
896 | |||
897 | } | ||
898 | /* Transfer completed at this point */ | ||
899 | reg = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); | ||
900 | if (msg->ttype == HSI_MSG_WRITE) { | ||
901 | /* Release clocks for write transfer */ | ||
902 | pm_runtime_put_sync(omap_port->pdev); | ||
903 | } | ||
904 | reg &= ~val; | ||
905 | writel_relaxed(reg, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); | ||
906 | writel_relaxed(val, omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0)); | ||
907 | list_del(&msg->link); | ||
908 | spin_unlock(&omap_port->lock); | ||
909 | msg->complete(msg); | ||
910 | ssi_transfer(omap_port, queue); | ||
911 | } | ||
912 | |||
913 | static void ssi_pio_tasklet(unsigned long ssi_port) | ||
914 | { | ||
915 | struct hsi_port *port = (struct hsi_port *)ssi_port; | ||
916 | struct hsi_controller *ssi = to_hsi_controller(port->device.parent); | ||
917 | struct omap_ssi_port *omap_port = hsi_port_drvdata(port); | ||
918 | struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); | ||
919 | void __iomem *sys = omap_ssi->sys; | ||
920 | unsigned int ch; | ||
921 | u32 status_reg; | ||
922 | |||
923 | pm_runtime_get_sync(omap_port->pdev); | ||
924 | status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0)); | ||
925 | status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0)); | ||
926 | |||
927 | for (ch = 0; ch < omap_port->channels; ch++) { | ||
928 | if (status_reg & SSI_DATAACCEPT(ch)) | ||
929 | ssi_pio_complete(port, &omap_port->txqueue[ch]); | ||
930 | if (status_reg & SSI_DATAAVAILABLE(ch)) | ||
931 | ssi_pio_complete(port, &omap_port->rxqueue[ch]); | ||
932 | } | ||
933 | if (status_reg & SSI_BREAKDETECTED) | ||
934 | ssi_break_complete(port); | ||
935 | if (status_reg & SSI_ERROROCCURED) | ||
936 | ssi_error(port); | ||
937 | |||
938 | status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0)); | ||
939 | status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0)); | ||
940 | pm_runtime_put_sync(omap_port->pdev); | ||
941 | |||
942 | if (status_reg) | ||
943 | tasklet_hi_schedule(&omap_port->pio_tasklet); | ||
944 | else | ||
945 | enable_irq(omap_port->irq); | ||
946 | } | ||
947 | |||
948 | static irqreturn_t ssi_pio_isr(int irq, void *port) | ||
949 | { | ||
950 | struct omap_ssi_port *omap_port = hsi_port_drvdata(port); | ||
951 | |||
952 | tasklet_hi_schedule(&omap_port->pio_tasklet); | ||
953 | disable_irq_nosync(irq); | ||
954 | |||
955 | return IRQ_HANDLED; | ||
956 | } | ||
957 | |||
958 | static void ssi_wake_tasklet(unsigned long ssi_port) | ||
959 | { | ||
960 | struct hsi_port *port = (struct hsi_port *)ssi_port; | ||
961 | struct hsi_controller *ssi = to_hsi_controller(port->device.parent); | ||
962 | struct omap_ssi_port *omap_port = hsi_port_drvdata(port); | ||
963 | struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); | ||
964 | |||
965 | if (ssi_wakein(port)) { | ||
966 | /** | ||
967 | * We can have a quick High-Low-High transition in the line. | ||
968 | * In such a case if we have long interrupt latencies, | ||
969 | * we can miss the low event or get twice a high event. | ||
970 | * This workaround will avoid breaking the clock reference | ||
971 | * count when such a situation ocurrs. | ||
972 | */ | ||
973 | spin_lock(&omap_port->lock); | ||
974 | if (!omap_port->wkin_cken) { | ||
975 | omap_port->wkin_cken = 1; | ||
976 | pm_runtime_get_sync(omap_port->pdev); | ||
977 | } | ||
978 | spin_unlock(&omap_port->lock); | ||
979 | dev_dbg(&ssi->device, "Wake in high\n"); | ||
980 | if (omap_port->wktest) { /* FIXME: HACK ! To be removed */ | ||
981 | writel(SSI_WAKE(0), | ||
982 | omap_ssi->sys + SSI_SET_WAKE_REG(port->num)); | ||
983 | } | ||
984 | hsi_event(port, HSI_EVENT_START_RX); | ||
985 | } else { | ||
986 | dev_dbg(&ssi->device, "Wake in low\n"); | ||
987 | if (omap_port->wktest) { /* FIXME: HACK ! To be removed */ | ||
988 | writel(SSI_WAKE(0), | ||
989 | omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num)); | ||
990 | } | ||
991 | hsi_event(port, HSI_EVENT_STOP_RX); | ||
992 | spin_lock(&omap_port->lock); | ||
993 | if (omap_port->wkin_cken) { | ||
994 | pm_runtime_put_sync(omap_port->pdev); | ||
995 | omap_port->wkin_cken = 0; | ||
996 | } | ||
997 | spin_unlock(&omap_port->lock); | ||
998 | } | ||
999 | } | ||
1000 | |||
1001 | static irqreturn_t ssi_wake_isr(int irq __maybe_unused, void *ssi_port) | ||
1002 | { | ||
1003 | struct omap_ssi_port *omap_port = hsi_port_drvdata(ssi_port); | ||
1004 | |||
1005 | tasklet_hi_schedule(&omap_port->wake_tasklet); | ||
1006 | |||
1007 | return IRQ_HANDLED; | ||
1008 | } | ||
1009 | |||
1010 | static int __init ssi_port_irq(struct hsi_port *port, | ||
1011 | struct platform_device *pd) | ||
1012 | { | ||
1013 | struct omap_ssi_port *omap_port = hsi_port_drvdata(port); | ||
1014 | int err; | ||
1015 | |||
1016 | omap_port->irq = platform_get_irq(pd, 0); | ||
1017 | if (omap_port->irq < 0) { | ||
1018 | dev_err(&port->device, "Port IRQ resource missing\n"); | ||
1019 | return omap_port->irq; | ||
1020 | } | ||
1021 | tasklet_init(&omap_port->pio_tasklet, ssi_pio_tasklet, | ||
1022 | (unsigned long)port); | ||
1023 | err = devm_request_irq(&port->device, omap_port->irq, ssi_pio_isr, | ||
1024 | 0, "mpu_irq0", port); | ||
1025 | if (err < 0) | ||
1026 | dev_err(&port->device, "Request IRQ %d failed (%d)\n", | ||
1027 | omap_port->irq, err); | ||
1028 | return err; | ||
1029 | } | ||
1030 | |||
1031 | static int __init ssi_wake_irq(struct hsi_port *port, | ||
1032 | struct platform_device *pd) | ||
1033 | { | ||
1034 | struct omap_ssi_port *omap_port = hsi_port_drvdata(port); | ||
1035 | int cawake_irq; | ||
1036 | int err; | ||
1037 | |||
1038 | if (omap_port->wake_gpio == -1) { | ||
1039 | omap_port->wake_irq = -1; | ||
1040 | return 0; | ||
1041 | } | ||
1042 | |||
1043 | cawake_irq = gpio_to_irq(omap_port->wake_gpio); | ||
1044 | |||
1045 | omap_port->wake_irq = cawake_irq; | ||
1046 | tasklet_init(&omap_port->wake_tasklet, ssi_wake_tasklet, | ||
1047 | (unsigned long)port); | ||
1048 | err = devm_request_irq(&port->device, cawake_irq, ssi_wake_isr, | ||
1049 | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, | ||
1050 | "cawake", port); | ||
1051 | if (err < 0) | ||
1052 | dev_err(&port->device, "Request Wake in IRQ %d failed %d\n", | ||
1053 | cawake_irq, err); | ||
1054 | err = enable_irq_wake(cawake_irq); | ||
1055 | if (err < 0) | ||
1056 | dev_err(&port->device, "Enable wake on the wakeline in irq %d failed %d\n", | ||
1057 | cawake_irq, err); | ||
1058 | |||
1059 | return err; | ||
1060 | } | ||
1061 | |||
1062 | static void __init ssi_queues_init(struct omap_ssi_port *omap_port) | ||
1063 | { | ||
1064 | unsigned int ch; | ||
1065 | |||
1066 | for (ch = 0; ch < SSI_MAX_CHANNELS; ch++) { | ||
1067 | INIT_LIST_HEAD(&omap_port->txqueue[ch]); | ||
1068 | INIT_LIST_HEAD(&omap_port->rxqueue[ch]); | ||
1069 | } | ||
1070 | INIT_LIST_HEAD(&omap_port->brkqueue); | ||
1071 | } | ||
1072 | |||
1073 | static int __init ssi_port_get_iomem(struct platform_device *pd, | ||
1074 | const char *name, void __iomem **pbase, dma_addr_t *phy) | ||
1075 | { | ||
1076 | struct hsi_port *port = platform_get_drvdata(pd); | ||
1077 | struct resource *mem; | ||
1078 | struct resource *ioarea; | ||
1079 | void __iomem *base; | ||
1080 | |||
1081 | mem = platform_get_resource_byname(pd, IORESOURCE_MEM, name); | ||
1082 | if (!mem) { | ||
1083 | dev_err(&pd->dev, "IO memory region missing (%s)\n", name); | ||
1084 | return -ENXIO; | ||
1085 | } | ||
1086 | ioarea = devm_request_mem_region(&port->device, mem->start, | ||
1087 | resource_size(mem), dev_name(&pd->dev)); | ||
1088 | if (!ioarea) { | ||
1089 | dev_err(&pd->dev, "%s IO memory region request failed\n", | ||
1090 | mem->name); | ||
1091 | return -ENXIO; | ||
1092 | } | ||
1093 | base = devm_ioremap(&port->device, mem->start, resource_size(mem)); | ||
1094 | if (!base) { | ||
1095 | dev_err(&pd->dev, "%s IO remap failed\n", mem->name); | ||
1096 | return -ENXIO; | ||
1097 | } | ||
1098 | *pbase = base; | ||
1099 | |||
1100 | if (phy) | ||
1101 | *phy = mem->start; | ||
1102 | |||
1103 | return 0; | ||
1104 | } | ||
1105 | |||
1106 | static int __init ssi_port_probe(struct platform_device *pd) | ||
1107 | { | ||
1108 | struct device_node *np = pd->dev.of_node; | ||
1109 | struct hsi_port *port; | ||
1110 | struct omap_ssi_port *omap_port; | ||
1111 | struct hsi_controller *ssi = dev_get_drvdata(pd->dev.parent); | ||
1112 | struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); | ||
1113 | u32 cawake_gpio = 0; | ||
1114 | u32 port_id; | ||
1115 | int err; | ||
1116 | |||
1117 | dev_dbg(&pd->dev, "init ssi port...\n"); | ||
1118 | |||
1119 | err = ref_module(THIS_MODULE, ssi->owner); | ||
1120 | if (err) { | ||
1121 | dev_err(&pd->dev, "could not increment parent module refcount (err=%d)\n", | ||
1122 | err); | ||
1123 | return -ENODEV; | ||
1124 | } | ||
1125 | |||
1126 | if (!ssi->port || !omap_ssi->port) { | ||
1127 | dev_err(&pd->dev, "ssi controller not initialized!\n"); | ||
1128 | err = -ENODEV; | ||
1129 | goto error; | ||
1130 | } | ||
1131 | |||
1132 | /* get id of first uninitialized port in controller */ | ||
1133 | for (port_id = 0; port_id < ssi->num_ports && omap_ssi->port[port_id]; | ||
1134 | port_id++) | ||
1135 | ; | ||
1136 | |||
1137 | if (port_id >= ssi->num_ports) { | ||
1138 | dev_err(&pd->dev, "port id out of range!\n"); | ||
1139 | err = -ENODEV; | ||
1140 | goto error; | ||
1141 | } | ||
1142 | |||
1143 | port = ssi->port[port_id]; | ||
1144 | |||
1145 | if (!np) { | ||
1146 | dev_err(&pd->dev, "missing device tree data\n"); | ||
1147 | err = -EINVAL; | ||
1148 | goto error; | ||
1149 | } | ||
1150 | |||
1151 | cawake_gpio = of_get_named_gpio(np, "ti,ssi-cawake-gpio", 0); | ||
1152 | if (cawake_gpio < 0) { | ||
1153 | dev_err(&pd->dev, "DT data is missing cawake gpio (err=%d)\n", | ||
1154 | cawake_gpio); | ||
1155 | err = -ENODEV; | ||
1156 | goto error; | ||
1157 | } | ||
1158 | |||
1159 | err = devm_gpio_request_one(&port->device, cawake_gpio, GPIOF_DIR_IN, | ||
1160 | "cawake"); | ||
1161 | if (err) { | ||
1162 | dev_err(&pd->dev, "could not request cawake gpio (err=%d)!\n", | ||
1163 | err); | ||
1164 | err = -ENXIO; | ||
1165 | goto error; | ||
1166 | } | ||
1167 | |||
1168 | omap_port = devm_kzalloc(&port->device, sizeof(*omap_port), GFP_KERNEL); | ||
1169 | if (!omap_port) { | ||
1170 | err = -ENOMEM; | ||
1171 | goto error; | ||
1172 | } | ||
1173 | omap_port->wake_gpio = cawake_gpio; | ||
1174 | omap_port->pdev = &pd->dev; | ||
1175 | omap_port->port_id = port_id; | ||
1176 | |||
1177 | /* initialize HSI port */ | ||
1178 | port->async = ssi_async; | ||
1179 | port->setup = ssi_setup; | ||
1180 | port->flush = ssi_flush; | ||
1181 | port->start_tx = ssi_start_tx; | ||
1182 | port->stop_tx = ssi_stop_tx; | ||
1183 | port->release = ssi_release; | ||
1184 | hsi_port_set_drvdata(port, omap_port); | ||
1185 | omap_ssi->port[port_id] = omap_port; | ||
1186 | |||
1187 | platform_set_drvdata(pd, port); | ||
1188 | |||
1189 | err = ssi_port_get_iomem(pd, "tx", &omap_port->sst_base, | ||
1190 | &omap_port->sst_dma); | ||
1191 | if (err < 0) | ||
1192 | goto error; | ||
1193 | err = ssi_port_get_iomem(pd, "rx", &omap_port->ssr_base, | ||
1194 | &omap_port->ssr_dma); | ||
1195 | if (err < 0) | ||
1196 | goto error; | ||
1197 | |||
1198 | err = ssi_port_irq(port, pd); | ||
1199 | if (err < 0) | ||
1200 | goto error; | ||
1201 | err = ssi_wake_irq(port, pd); | ||
1202 | if (err < 0) | ||
1203 | goto error; | ||
1204 | |||
1205 | ssi_queues_init(omap_port); | ||
1206 | spin_lock_init(&omap_port->lock); | ||
1207 | spin_lock_init(&omap_port->wk_lock); | ||
1208 | omap_port->dev = &port->device; | ||
1209 | |||
1210 | pm_runtime_irq_safe(omap_port->pdev); | ||
1211 | pm_runtime_enable(omap_port->pdev); | ||
1212 | |||
1213 | #ifdef CONFIG_DEBUG_FS | ||
1214 | err = ssi_debug_add_port(omap_port, omap_ssi->dir); | ||
1215 | if (err < 0) { | ||
1216 | pm_runtime_disable(omap_port->pdev); | ||
1217 | goto error; | ||
1218 | } | ||
1219 | #endif | ||
1220 | |||
1221 | hsi_add_clients_from_dt(port, np); | ||
1222 | |||
1223 | dev_info(&pd->dev, "ssi port %u successfully initialized (cawake=%d)\n", | ||
1224 | port_id, cawake_gpio); | ||
1225 | |||
1226 | return 0; | ||
1227 | |||
1228 | error: | ||
1229 | return err; | ||
1230 | } | ||
1231 | |||
1232 | static int __exit ssi_port_remove(struct platform_device *pd) | ||
1233 | { | ||
1234 | struct hsi_port *port = platform_get_drvdata(pd); | ||
1235 | struct omap_ssi_port *omap_port = hsi_port_drvdata(port); | ||
1236 | struct hsi_controller *ssi = to_hsi_controller(port->device.parent); | ||
1237 | struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); | ||
1238 | |||
1239 | #ifdef CONFIG_DEBUG_FS | ||
1240 | ssi_debug_remove_port(port); | ||
1241 | #endif | ||
1242 | |||
1243 | hsi_port_unregister_clients(port); | ||
1244 | |||
1245 | tasklet_kill(&omap_port->wake_tasklet); | ||
1246 | tasklet_kill(&omap_port->pio_tasklet); | ||
1247 | |||
1248 | port->async = hsi_dummy_msg; | ||
1249 | port->setup = hsi_dummy_cl; | ||
1250 | port->flush = hsi_dummy_cl; | ||
1251 | port->start_tx = hsi_dummy_cl; | ||
1252 | port->stop_tx = hsi_dummy_cl; | ||
1253 | port->release = hsi_dummy_cl; | ||
1254 | |||
1255 | omap_ssi->port[omap_port->port_id] = NULL; | ||
1256 | platform_set_drvdata(pd, NULL); | ||
1257 | pm_runtime_disable(&pd->dev); | ||
1258 | |||
1259 | return 0; | ||
1260 | } | ||
1261 | |||
1262 | #ifdef CONFIG_PM_RUNTIME | ||
1263 | static int ssi_save_port_ctx(struct omap_ssi_port *omap_port) | ||
1264 | { | ||
1265 | struct hsi_port *port = to_hsi_port(omap_port->dev); | ||
1266 | struct hsi_controller *ssi = to_hsi_controller(port->device.parent); | ||
1267 | struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); | ||
1268 | |||
1269 | omap_port->sys_mpu_enable = readl(omap_ssi->sys + | ||
1270 | SSI_MPU_ENABLE_REG(port->num, 0)); | ||
1271 | |||
1272 | return 0; | ||
1273 | } | ||
1274 | |||
1275 | static int ssi_restore_port_ctx(struct omap_ssi_port *omap_port) | ||
1276 | { | ||
1277 | struct hsi_port *port = to_hsi_port(omap_port->dev); | ||
1278 | struct hsi_controller *ssi = to_hsi_controller(port->device.parent); | ||
1279 | struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); | ||
1280 | void __iomem *base; | ||
1281 | |||
1282 | writel_relaxed(omap_port->sys_mpu_enable, | ||
1283 | omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); | ||
1284 | |||
1285 | /* SST context */ | ||
1286 | base = omap_port->sst_base; | ||
1287 | writel_relaxed(omap_port->sst.frame_size, base + SSI_SST_FRAMESIZE_REG); | ||
1288 | writel_relaxed(omap_port->sst.channels, base + SSI_SST_CHANNELS_REG); | ||
1289 | writel_relaxed(omap_port->sst.arb_mode, base + SSI_SST_ARBMODE_REG); | ||
1290 | |||
1291 | /* SSR context */ | ||
1292 | base = omap_port->ssr_base; | ||
1293 | writel_relaxed(omap_port->ssr.frame_size, base + SSI_SSR_FRAMESIZE_REG); | ||
1294 | writel_relaxed(omap_port->ssr.channels, base + SSI_SSR_CHANNELS_REG); | ||
1295 | writel_relaxed(omap_port->ssr.timeout, base + SSI_SSR_TIMEOUT_REG); | ||
1296 | |||
1297 | return 0; | ||
1298 | } | ||
1299 | |||
1300 | static int ssi_restore_port_mode(struct omap_ssi_port *omap_port) | ||
1301 | { | ||
1302 | u32 mode; | ||
1303 | |||
1304 | writel_relaxed(omap_port->sst.mode, | ||
1305 | omap_port->sst_base + SSI_SST_MODE_REG); | ||
1306 | writel_relaxed(omap_port->ssr.mode, | ||
1307 | omap_port->ssr_base + SSI_SSR_MODE_REG); | ||
1308 | /* OCP barrier */ | ||
1309 | mode = readl(omap_port->ssr_base + SSI_SSR_MODE_REG); | ||
1310 | |||
1311 | return 0; | ||
1312 | } | ||
1313 | |||
1314 | static int ssi_restore_divisor(struct omap_ssi_port *omap_port) | ||
1315 | { | ||
1316 | writel_relaxed(omap_port->sst.divisor, | ||
1317 | omap_port->sst_base + SSI_SST_DIVISOR_REG); | ||
1318 | |||
1319 | return 0; | ||
1320 | } | ||
1321 | |||
1322 | static int omap_ssi_port_runtime_suspend(struct device *dev) | ||
1323 | { | ||
1324 | struct hsi_port *port = dev_get_drvdata(dev); | ||
1325 | struct omap_ssi_port *omap_port = hsi_port_drvdata(port); | ||
1326 | struct hsi_controller *ssi = to_hsi_controller(port->device.parent); | ||
1327 | struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); | ||
1328 | |||
1329 | dev_dbg(dev, "port runtime suspend!\n"); | ||
1330 | |||
1331 | ssi_set_port_mode(omap_port, SSI_MODE_SLEEP); | ||
1332 | if (omap_ssi->get_loss) | ||
1333 | omap_port->loss_count = | ||
1334 | omap_ssi->get_loss(ssi->device.parent); | ||
1335 | ssi_save_port_ctx(omap_port); | ||
1336 | |||
1337 | return 0; | ||
1338 | } | ||
1339 | |||
1340 | static int omap_ssi_port_runtime_resume(struct device *dev) | ||
1341 | { | ||
1342 | struct hsi_port *port = dev_get_drvdata(dev); | ||
1343 | struct omap_ssi_port *omap_port = hsi_port_drvdata(port); | ||
1344 | struct hsi_controller *ssi = to_hsi_controller(port->device.parent); | ||
1345 | struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); | ||
1346 | |||
1347 | dev_dbg(dev, "port runtime resume!\n"); | ||
1348 | |||
1349 | if ((omap_ssi->get_loss) && (omap_port->loss_count == | ||
1350 | omap_ssi->get_loss(ssi->device.parent))) | ||
1351 | goto mode; /* We always need to restore the mode & TX divisor */ | ||
1352 | |||
1353 | ssi_restore_port_ctx(omap_port); | ||
1354 | |||
1355 | mode: | ||
1356 | ssi_restore_divisor(omap_port); | ||
1357 | ssi_restore_port_mode(omap_port); | ||
1358 | |||
1359 | return 0; | ||
1360 | } | ||
1361 | |||
1362 | static const struct dev_pm_ops omap_ssi_port_pm_ops = { | ||
1363 | SET_RUNTIME_PM_OPS(omap_ssi_port_runtime_suspend, | ||
1364 | omap_ssi_port_runtime_resume, NULL) | ||
1365 | }; | ||
1366 | |||
1367 | #define DEV_PM_OPS (&omap_ssi_port_pm_ops) | ||
1368 | #else | ||
1369 | #define DEV_PM_OPS NULL | ||
1370 | #endif | ||
1371 | |||
1372 | |||
1373 | #ifdef CONFIG_OF | ||
1374 | static const struct of_device_id omap_ssi_port_of_match[] = { | ||
1375 | { .compatible = "ti,omap3-ssi-port", }, | ||
1376 | {}, | ||
1377 | }; | ||
1378 | MODULE_DEVICE_TABLE(of, omap_ssi_port_of_match); | ||
1379 | #else | ||
1380 | #define omap_ssi_port_of_match NULL | ||
1381 | #endif | ||
1382 | |||
1383 | static struct platform_driver ssi_port_pdriver = { | ||
1384 | .remove = __exit_p(ssi_port_remove), | ||
1385 | .driver = { | ||
1386 | .name = "omap_ssi_port", | ||
1387 | .owner = THIS_MODULE, | ||
1388 | .of_match_table = omap_ssi_port_of_match, | ||
1389 | .pm = DEV_PM_OPS, | ||
1390 | }, | ||
1391 | }; | ||
1392 | |||
1393 | module_platform_driver_probe(ssi_port_pdriver, ssi_port_probe); | ||
1394 | |||
1395 | MODULE_ALIAS("platform:omap_ssi_port"); | ||
1396 | MODULE_AUTHOR("Carlos Chinea <carlos.chinea@nokia.com>"); | ||
1397 | MODULE_AUTHOR("Sebastian Reichel <sre@kernel.org>"); | ||
1398 | MODULE_DESCRIPTION("Synchronous Serial Interface Port Driver"); | ||
1399 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/hsi/controllers/omap_ssi_regs.h b/drivers/hsi/controllers/omap_ssi_regs.h new file mode 100644 index 000000000000..08f98dd1d01f --- /dev/null +++ b/drivers/hsi/controllers/omap_ssi_regs.h | |||
@@ -0,0 +1,171 @@ | |||
1 | /* Hardware definitions for SSI. | ||
2 | * | ||
3 | * Copyright (C) 2010 Nokia Corporation. All rights reserved. | ||
4 | * | ||
5 | * Contact: Carlos Chinea <carlos.chinea@nokia.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * version 2 as published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, but | ||
12 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | * General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | ||
19 | * 02110-1301 USA | ||
20 | */ | ||
21 | |||
22 | #ifndef __OMAP_SSI_REGS_H__ | ||
23 | #define __OMAP_SSI_REGS_H__ | ||
24 | |||
25 | /* | ||
26 | * SSI SYS registers | ||
27 | */ | ||
28 | #define SSI_REVISION_REG 0 | ||
29 | # define SSI_REV_MAJOR 0xf0 | ||
30 | # define SSI_REV_MINOR 0xf | ||
31 | #define SSI_SYSCONFIG_REG 0x10 | ||
32 | # define SSI_AUTOIDLE (1 << 0) | ||
33 | # define SSI_SOFTRESET (1 << 1) | ||
34 | # define SSI_SIDLEMODE_FORCE 0 | ||
35 | # define SSI_SIDLEMODE_NO (1 << 3) | ||
36 | # define SSI_SIDLEMODE_SMART (1 << 4) | ||
37 | # define SSI_SIDLEMODE_MASK 0x18 | ||
38 | # define SSI_MIDLEMODE_FORCE 0 | ||
39 | # define SSI_MIDLEMODE_NO (1 << 12) | ||
40 | # define SSI_MIDLEMODE_SMART (1 << 13) | ||
41 | # define SSI_MIDLEMODE_MASK 0x3000 | ||
42 | #define SSI_SYSSTATUS_REG 0x14 | ||
43 | # define SSI_RESETDONE 1 | ||
44 | #define SSI_MPU_STATUS_REG(port, irq) (0x808 + ((port) * 0x10) + ((irq) * 2)) | ||
45 | #define SSI_MPU_ENABLE_REG(port, irq) (0x80c + ((port) * 0x10) + ((irq) * 8)) | ||
46 | # define SSI_DATAACCEPT(channel) (1 << (channel)) | ||
47 | # define SSI_DATAAVAILABLE(channel) (1 << ((channel) + 8)) | ||
48 | # define SSI_DATAOVERRUN(channel) (1 << ((channel) + 16)) | ||
49 | # define SSI_ERROROCCURED (1 << 24) | ||
50 | # define SSI_BREAKDETECTED (1 << 25) | ||
51 | #define SSI_GDD_MPU_IRQ_STATUS_REG 0x0800 | ||
52 | #define SSI_GDD_MPU_IRQ_ENABLE_REG 0x0804 | ||
53 | # define SSI_GDD_LCH(channel) (1 << (channel)) | ||
54 | #define SSI_WAKE_REG(port) (0xc00 + ((port) * 0x10)) | ||
55 | #define SSI_CLEAR_WAKE_REG(port) (0xc04 + ((port) * 0x10)) | ||
56 | #define SSI_SET_WAKE_REG(port) (0xc08 + ((port) * 0x10)) | ||
57 | # define SSI_WAKE(channel) (1 << (channel)) | ||
58 | # define SSI_WAKE_MASK 0xff | ||
59 | |||
60 | /* | ||
61 | * SSI SST registers | ||
62 | */ | ||
63 | #define SSI_SST_ID_REG 0 | ||
64 | #define SSI_SST_MODE_REG 4 | ||
65 | # define SSI_MODE_VAL_MASK 3 | ||
66 | # define SSI_MODE_SLEEP 0 | ||
67 | # define SSI_MODE_STREAM 1 | ||
68 | # define SSI_MODE_FRAME 2 | ||
69 | # define SSI_MODE_MULTIPOINTS 3 | ||
70 | #define SSI_SST_FRAMESIZE_REG 8 | ||
71 | # define SSI_FRAMESIZE_DEFAULT 31 | ||
72 | #define SSI_SST_TXSTATE_REG 0xc | ||
73 | # define SSI_TXSTATE_IDLE 0 | ||
74 | #define SSI_SST_BUFSTATE_REG 0x10 | ||
75 | # define SSI_FULL(channel) (1 << (channel)) | ||
76 | #define SSI_SST_DIVISOR_REG 0x18 | ||
77 | # define SSI_MAX_DIVISOR 127 | ||
78 | #define SSI_SST_BREAK_REG 0x20 | ||
79 | #define SSI_SST_CHANNELS_REG 0x24 | ||
80 | # define SSI_CHANNELS_DEFAULT 4 | ||
81 | #define SSI_SST_ARBMODE_REG 0x28 | ||
82 | # define SSI_ARBMODE_ROUNDROBIN 0 | ||
83 | # define SSI_ARBMODE_PRIORITY 1 | ||
84 | #define SSI_SST_BUFFER_CH_REG(channel) (0x80 + ((channel) * 4)) | ||
85 | #define SSI_SST_SWAPBUF_CH_REG(channel) (0xc0 + ((channel) * 4)) | ||
86 | |||
87 | /* | ||
88 | * SSI SSR registers | ||
89 | */ | ||
90 | #define SSI_SSR_ID_REG 0 | ||
91 | #define SSI_SSR_MODE_REG 4 | ||
92 | #define SSI_SSR_FRAMESIZE_REG 8 | ||
93 | #define SSI_SSR_RXSTATE_REG 0xc | ||
94 | #define SSI_SSR_BUFSTATE_REG 0x10 | ||
95 | # define SSI_NOTEMPTY(channel) (1 << (channel)) | ||
96 | #define SSI_SSR_BREAK_REG 0x1c | ||
97 | #define SSI_SSR_ERROR_REG 0x20 | ||
98 | #define SSI_SSR_ERRORACK_REG 0x24 | ||
99 | #define SSI_SSR_OVERRUN_REG 0x2c | ||
100 | #define SSI_SSR_OVERRUNACK_REG 0x30 | ||
101 | #define SSI_SSR_TIMEOUT_REG 0x34 | ||
102 | # define SSI_TIMEOUT_DEFAULT 0 | ||
103 | #define SSI_SSR_CHANNELS_REG 0x28 | ||
104 | #define SSI_SSR_BUFFER_CH_REG(channel) (0x80 + ((channel) * 4)) | ||
105 | #define SSI_SSR_SWAPBUF_CH_REG(channel) (0xc0 + ((channel) * 4)) | ||
106 | |||
107 | /* | ||
108 | * SSI GDD registers | ||
109 | */ | ||
110 | #define SSI_GDD_HW_ID_REG 0 | ||
111 | #define SSI_GDD_PPORT_ID_REG 0x10 | ||
112 | #define SSI_GDD_MPORT_ID_REG 0x14 | ||
113 | #define SSI_GDD_PPORT_SR_REG 0x20 | ||
114 | #define SSI_GDD_MPORT_SR_REG 0x24 | ||
115 | # define SSI_ACTIVE_LCH_NUM_MASK 0xff | ||
116 | #define SSI_GDD_TEST_REG 0x40 | ||
117 | # define SSI_TEST 1 | ||
118 | #define SSI_GDD_GCR_REG 0x100 | ||
119 | # define SSI_CLK_AUTOGATING_ON (1 << 3) | ||
120 | # define SSI_FREE (1 << 2) | ||
121 | # define SSI_SWITCH_OFF (1 << 0) | ||
122 | #define SSI_GDD_GRST_REG 0x200 | ||
123 | # define SSI_SWRESET 1 | ||
124 | #define SSI_GDD_CSDP_REG(channel) (0x800 + ((channel) * 0x40)) | ||
125 | # define SSI_DST_BURST_EN_MASK 0xc000 | ||
126 | # define SSI_DST_SINGLE_ACCESS0 0 | ||
127 | # define SSI_DST_SINGLE_ACCESS (1 << 14) | ||
128 | # define SSI_DST_BURST_4x32_BIT (2 << 14) | ||
129 | # define SSI_DST_BURST_8x32_BIT (3 << 14) | ||
130 | # define SSI_DST_MASK 0x1e00 | ||
131 | # define SSI_DST_MEMORY_PORT (8 << 9) | ||
132 | # define SSI_DST_PERIPHERAL_PORT (9 << 9) | ||
133 | # define SSI_SRC_BURST_EN_MASK 0x180 | ||
134 | # define SSI_SRC_SINGLE_ACCESS0 0 | ||
135 | # define SSI_SRC_SINGLE_ACCESS (1 << 7) | ||
136 | # define SSI_SRC_BURST_4x32_BIT (2 << 7) | ||
137 | # define SSI_SRC_BURST_8x32_BIT (3 << 7) | ||
138 | # define SSI_SRC_MASK 0x3c | ||
139 | # define SSI_SRC_MEMORY_PORT (8 << 2) | ||
140 | # define SSI_SRC_PERIPHERAL_PORT (9 << 2) | ||
141 | # define SSI_DATA_TYPE_MASK 3 | ||
142 | # define SSI_DATA_TYPE_S32 2 | ||
143 | #define SSI_GDD_CCR_REG(channel) (0x802 + ((channel) * 0x40)) | ||
144 | # define SSI_DST_AMODE_MASK (3 << 14) | ||
145 | # define SSI_DST_AMODE_CONST 0 | ||
146 | # define SSI_DST_AMODE_POSTINC (1 << 12) | ||
147 | # define SSI_SRC_AMODE_MASK (3 << 12) | ||
148 | # define SSI_SRC_AMODE_CONST 0 | ||
149 | # define SSI_SRC_AMODE_POSTINC (1 << 12) | ||
150 | # define SSI_CCR_ENABLE (1 << 7) | ||
151 | # define SSI_CCR_SYNC_MASK 0x1f | ||
152 | #define SSI_GDD_CICR_REG(channel) (0x804 + ((channel) * 0x40)) | ||
153 | # define SSI_BLOCK_IE (1 << 5) | ||
154 | # define SSI_HALF_IE (1 << 2) | ||
155 | # define SSI_TOUT_IE (1 << 0) | ||
156 | #define SSI_GDD_CSR_REG(channel) (0x806 + ((channel) * 0x40)) | ||
157 | # define SSI_CSR_SYNC (1 << 6) | ||
158 | # define SSI_CSR_BLOCK (1 << 5) | ||
159 | # define SSI_CSR_HALF (1 << 2) | ||
160 | # define SSI_CSR_TOUR (1 << 0) | ||
161 | #define SSI_GDD_CSSA_REG(channel) (0x808 + ((channel) * 0x40)) | ||
162 | #define SSI_GDD_CDSA_REG(channel) (0x80c + ((channel) * 0x40)) | ||
163 | #define SSI_GDD_CEN_REG(channel) (0x810 + ((channel) * 0x40)) | ||
164 | #define SSI_GDD_CSAC_REG(channel) (0x818 + ((channel) * 0x40)) | ||
165 | #define SSI_GDD_CDAC_REG(channel) (0x81a + ((channel) * 0x40)) | ||
166 | #define SSI_GDD_CLNK_CTRL_REG(channel) (0x828 + ((channel) * 0x40)) | ||
167 | # define SSI_ENABLE_LNK (1 << 15) | ||
168 | # define SSI_STOP_LNK (1 << 14) | ||
169 | # define SSI_NEXT_CH_ID_MASK 0xf | ||
170 | |||
171 | #endif /* __OMAP_SSI_REGS_H__ */ | ||