diff options
-rw-r--r-- | arch/arm/mach-sa1100/generic.c | 25 | ||||
-rw-r--r-- | drivers/dma/Kconfig | 9 | ||||
-rw-r--r-- | drivers/dma/Makefile | 1 | ||||
-rw-r--r-- | drivers/dma/sa11x0-dma.c | 1109 | ||||
-rw-r--r-- | drivers/net/irda/Kconfig | 2 | ||||
-rw-r--r-- | drivers/net/irda/sa1100_ir.c | 953 | ||||
-rw-r--r-- | include/linux/sa11x0-dma.h | 24 |
7 files changed, 1700 insertions, 423 deletions
diff --git a/arch/arm/mach-sa1100/generic.c b/arch/arm/mach-sa1100/generic.c index 2b33b4597468..1752686e9abc 100644 --- a/arch/arm/mach-sa1100/generic.c +++ b/arch/arm/mach-sa1100/generic.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
16 | #include <linux/delay.h> | 16 | #include <linux/delay.h> |
17 | #include <linux/dma-mapping.h> | ||
17 | #include <linux/pm.h> | 18 | #include <linux/pm.h> |
18 | #include <linux/cpufreq.h> | 19 | #include <linux/cpufreq.h> |
19 | #include <linux/ioport.h> | 20 | #include <linux/ioport.h> |
@@ -289,6 +290,29 @@ static struct platform_device sa11x0rtc_device = { | |||
289 | .id = -1, | 290 | .id = -1, |
290 | }; | 291 | }; |
291 | 292 | ||
293 | static struct resource sa11x0dma_resources[] = { | ||
294 | DEFINE_RES_MEM(__PREG(DDAR(0)), 6 * DMASp), | ||
295 | DEFINE_RES_IRQ(IRQ_DMA0), | ||
296 | DEFINE_RES_IRQ(IRQ_DMA1), | ||
297 | DEFINE_RES_IRQ(IRQ_DMA2), | ||
298 | DEFINE_RES_IRQ(IRQ_DMA3), | ||
299 | DEFINE_RES_IRQ(IRQ_DMA4), | ||
300 | DEFINE_RES_IRQ(IRQ_DMA5), | ||
301 | }; | ||
302 | |||
303 | static u64 sa11x0dma_dma_mask = DMA_BIT_MASK(32); | ||
304 | |||
305 | static struct platform_device sa11x0dma_device = { | ||
306 | .name = "sa11x0-dma", | ||
307 | .id = -1, | ||
308 | .dev = { | ||
309 | .dma_mask = &sa11x0dma_dma_mask, | ||
310 | .coherent_dma_mask = 0xffffffff, | ||
311 | }, | ||
312 | .num_resources = ARRAY_SIZE(sa11x0dma_resources), | ||
313 | .resource = sa11x0dma_resources, | ||
314 | }; | ||
315 | |||
292 | static struct platform_device *sa11x0_devices[] __initdata = { | 316 | static struct platform_device *sa11x0_devices[] __initdata = { |
293 | &sa11x0udc_device, | 317 | &sa11x0udc_device, |
294 | &sa11x0uart1_device, | 318 | &sa11x0uart1_device, |
@@ -297,6 +321,7 @@ static struct platform_device *sa11x0_devices[] __initdata = { | |||
297 | &sa11x0pcmcia_device, | 321 | &sa11x0pcmcia_device, |
298 | &sa11x0fb_device, | 322 | &sa11x0fb_device, |
299 | &sa11x0rtc_device, | 323 | &sa11x0rtc_device, |
324 | &sa11x0dma_device, | ||
300 | }; | 325 | }; |
301 | 326 | ||
302 | static int __init sa1100_init(void) | 327 | static int __init sa1100_init(void) |
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index f1a274994bb1..4a6c46dea8a0 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -252,6 +252,15 @@ config EP93XX_DMA | |||
252 | help | 252 | help |
253 | Enable support for the Cirrus Logic EP93xx M2P/M2M DMA controller. | 253 | Enable support for the Cirrus Logic EP93xx M2P/M2M DMA controller. |
254 | 254 | ||
255 | config DMA_SA11X0 | ||
256 | tristate "SA-11x0 DMA support" | ||
257 | depends on ARCH_SA1100 | ||
258 | select DMA_ENGINE | ||
259 | help | ||
260 | Support the DMA engine found on Intel StrongARM SA-1100 and | ||
261 | SA-1110 SoCs. This DMA engine can only be used with on-chip | ||
262 | devices. | ||
263 | |||
255 | config DMA_ENGINE | 264 | config DMA_ENGINE |
256 | bool | 265 | bool |
257 | 266 | ||
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 009a222e8283..86b795baba98 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
@@ -27,3 +27,4 @@ obj-$(CONFIG_PL330_DMA) += pl330.o | |||
27 | obj-$(CONFIG_PCH_DMA) += pch_dma.o | 27 | obj-$(CONFIG_PCH_DMA) += pch_dma.o |
28 | obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o | 28 | obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o |
29 | obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o | 29 | obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o |
30 | obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o | ||
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c new file mode 100644 index 000000000000..16a6b48883cf --- /dev/null +++ b/drivers/dma/sa11x0-dma.c | |||
@@ -0,0 +1,1109 @@ | |||
1 | /* | ||
2 | * SA11x0 DMAengine support | ||
3 | * | ||
4 | * Copyright (C) 2012 Russell King | ||
5 | * Derived in part from arch/arm/mach-sa1100/dma.c, | ||
6 | * Copyright (C) 2000, 2001 by Nicolas Pitre | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | ||
12 | #include <linux/sched.h> | ||
13 | #include <linux/device.h> | ||
14 | #include <linux/dmaengine.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/interrupt.h> | ||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/platform_device.h> | ||
20 | #include <linux/sa11x0-dma.h> | ||
21 | #include <linux/slab.h> | ||
22 | #include <linux/spinlock.h> | ||
23 | |||
24 | #define NR_PHY_CHAN 6 | ||
25 | #define DMA_ALIGN 3 | ||
26 | #define DMA_MAX_SIZE 0x1fff | ||
27 | #define DMA_CHUNK_SIZE 0x1000 | ||
28 | |||
29 | #define DMA_DDAR 0x00 | ||
30 | #define DMA_DCSR_S 0x04 | ||
31 | #define DMA_DCSR_C 0x08 | ||
32 | #define DMA_DCSR_R 0x0c | ||
33 | #define DMA_DBSA 0x10 | ||
34 | #define DMA_DBTA 0x14 | ||
35 | #define DMA_DBSB 0x18 | ||
36 | #define DMA_DBTB 0x1c | ||
37 | #define DMA_SIZE 0x20 | ||
38 | |||
39 | #define DCSR_RUN (1 << 0) | ||
40 | #define DCSR_IE (1 << 1) | ||
41 | #define DCSR_ERROR (1 << 2) | ||
42 | #define DCSR_DONEA (1 << 3) | ||
43 | #define DCSR_STRTA (1 << 4) | ||
44 | #define DCSR_DONEB (1 << 5) | ||
45 | #define DCSR_STRTB (1 << 6) | ||
46 | #define DCSR_BIU (1 << 7) | ||
47 | |||
48 | #define DDAR_RW (1 << 0) /* 0 = W, 1 = R */ | ||
49 | #define DDAR_E (1 << 1) /* 0 = LE, 1 = BE */ | ||
50 | #define DDAR_BS (1 << 2) /* 0 = BS4, 1 = BS8 */ | ||
51 | #define DDAR_DW (1 << 3) /* 0 = 8b, 1 = 16b */ | ||
52 | #define DDAR_Ser0UDCTr (0x0 << 4) | ||
53 | #define DDAR_Ser0UDCRc (0x1 << 4) | ||
54 | #define DDAR_Ser1SDLCTr (0x2 << 4) | ||
55 | #define DDAR_Ser1SDLCRc (0x3 << 4) | ||
56 | #define DDAR_Ser1UARTTr (0x4 << 4) | ||
57 | #define DDAR_Ser1UARTRc (0x5 << 4) | ||
58 | #define DDAR_Ser2ICPTr (0x6 << 4) | ||
59 | #define DDAR_Ser2ICPRc (0x7 << 4) | ||
60 | #define DDAR_Ser3UARTTr (0x8 << 4) | ||
61 | #define DDAR_Ser3UARTRc (0x9 << 4) | ||
62 | #define DDAR_Ser4MCP0Tr (0xa << 4) | ||
63 | #define DDAR_Ser4MCP0Rc (0xb << 4) | ||
64 | #define DDAR_Ser4MCP1Tr (0xc << 4) | ||
65 | #define DDAR_Ser4MCP1Rc (0xd << 4) | ||
66 | #define DDAR_Ser4SSPTr (0xe << 4) | ||
67 | #define DDAR_Ser4SSPRc (0xf << 4) | ||
68 | |||
69 | struct sa11x0_dma_sg { | ||
70 | u32 addr; | ||
71 | u32 len; | ||
72 | }; | ||
73 | |||
74 | struct sa11x0_dma_desc { | ||
75 | struct dma_async_tx_descriptor tx; | ||
76 | u32 ddar; | ||
77 | size_t size; | ||
78 | |||
79 | /* maybe protected by c->lock */ | ||
80 | struct list_head node; | ||
81 | unsigned sglen; | ||
82 | struct sa11x0_dma_sg sg[0]; | ||
83 | }; | ||
84 | |||
85 | struct sa11x0_dma_phy; | ||
86 | |||
87 | struct sa11x0_dma_chan { | ||
88 | struct dma_chan chan; | ||
89 | spinlock_t lock; | ||
90 | dma_cookie_t lc; | ||
91 | |||
92 | /* protected by c->lock */ | ||
93 | struct sa11x0_dma_phy *phy; | ||
94 | enum dma_status status; | ||
95 | struct list_head desc_submitted; | ||
96 | struct list_head desc_issued; | ||
97 | |||
98 | /* protected by d->lock */ | ||
99 | struct list_head node; | ||
100 | |||
101 | u32 ddar; | ||
102 | const char *name; | ||
103 | }; | ||
104 | |||
105 | struct sa11x0_dma_phy { | ||
106 | void __iomem *base; | ||
107 | struct sa11x0_dma_dev *dev; | ||
108 | unsigned num; | ||
109 | |||
110 | struct sa11x0_dma_chan *vchan; | ||
111 | |||
112 | /* Protected by c->lock */ | ||
113 | unsigned sg_load; | ||
114 | struct sa11x0_dma_desc *txd_load; | ||
115 | unsigned sg_done; | ||
116 | struct sa11x0_dma_desc *txd_done; | ||
117 | #ifdef CONFIG_PM_SLEEP | ||
118 | u32 dbs[2]; | ||
119 | u32 dbt[2]; | ||
120 | u32 dcsr; | ||
121 | #endif | ||
122 | }; | ||
123 | |||
124 | struct sa11x0_dma_dev { | ||
125 | struct dma_device slave; | ||
126 | void __iomem *base; | ||
127 | spinlock_t lock; | ||
128 | struct tasklet_struct task; | ||
129 | struct list_head chan_pending; | ||
130 | struct list_head desc_complete; | ||
131 | struct sa11x0_dma_phy phy[NR_PHY_CHAN]; | ||
132 | }; | ||
133 | |||
134 | static struct sa11x0_dma_chan *to_sa11x0_dma_chan(struct dma_chan *chan) | ||
135 | { | ||
136 | return container_of(chan, struct sa11x0_dma_chan, chan); | ||
137 | } | ||
138 | |||
139 | static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev) | ||
140 | { | ||
141 | return container_of(dmadev, struct sa11x0_dma_dev, slave); | ||
142 | } | ||
143 | |||
144 | static struct sa11x0_dma_desc *to_sa11x0_dma_tx(struct dma_async_tx_descriptor *tx) | ||
145 | { | ||
146 | return container_of(tx, struct sa11x0_dma_desc, tx); | ||
147 | } | ||
148 | |||
149 | static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c) | ||
150 | { | ||
151 | if (list_empty(&c->desc_issued)) | ||
152 | return NULL; | ||
153 | |||
154 | return list_first_entry(&c->desc_issued, struct sa11x0_dma_desc, node); | ||
155 | } | ||
156 | |||
157 | static void sa11x0_dma_start_desc(struct sa11x0_dma_phy *p, struct sa11x0_dma_desc *txd) | ||
158 | { | ||
159 | list_del(&txd->node); | ||
160 | p->txd_load = txd; | ||
161 | p->sg_load = 0; | ||
162 | |||
163 | dev_vdbg(p->dev->slave.dev, "pchan %u: txd %p[%x]: starting: DDAR:%x\n", | ||
164 | p->num, txd, txd->tx.cookie, txd->ddar); | ||
165 | } | ||
166 | |||
167 | static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p, | ||
168 | struct sa11x0_dma_chan *c) | ||
169 | { | ||
170 | struct sa11x0_dma_desc *txd = p->txd_load; | ||
171 | struct sa11x0_dma_sg *sg; | ||
172 | void __iomem *base = p->base; | ||
173 | unsigned dbsx, dbtx; | ||
174 | u32 dcsr; | ||
175 | |||
176 | if (!txd) | ||
177 | return; | ||
178 | |||
179 | dcsr = readl_relaxed(base + DMA_DCSR_R); | ||
180 | |||
181 | /* Don't try to load the next transfer if both buffers are started */ | ||
182 | if ((dcsr & (DCSR_STRTA | DCSR_STRTB)) == (DCSR_STRTA | DCSR_STRTB)) | ||
183 | return; | ||
184 | |||
185 | if (p->sg_load == txd->sglen) { | ||
186 | struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c); | ||
187 | |||
188 | /* | ||
189 | * We have reached the end of the current descriptor. | ||
190 | * Peek at the next descriptor, and if compatible with | ||
191 | * the current, start processing it. | ||
192 | */ | ||
193 | if (txn && txn->ddar == txd->ddar) { | ||
194 | txd = txn; | ||
195 | sa11x0_dma_start_desc(p, txn); | ||
196 | } else { | ||
197 | p->txd_load = NULL; | ||
198 | return; | ||
199 | } | ||
200 | } | ||
201 | |||
202 | sg = &txd->sg[p->sg_load++]; | ||
203 | |||
204 | /* Select buffer to load according to channel status */ | ||
205 | if (((dcsr & (DCSR_BIU | DCSR_STRTB)) == (DCSR_BIU | DCSR_STRTB)) || | ||
206 | ((dcsr & (DCSR_BIU | DCSR_STRTA)) == 0)) { | ||
207 | dbsx = DMA_DBSA; | ||
208 | dbtx = DMA_DBTA; | ||
209 | dcsr = DCSR_STRTA | DCSR_IE | DCSR_RUN; | ||
210 | } else { | ||
211 | dbsx = DMA_DBSB; | ||
212 | dbtx = DMA_DBTB; | ||
213 | dcsr = DCSR_STRTB | DCSR_IE | DCSR_RUN; | ||
214 | } | ||
215 | |||
216 | writel_relaxed(sg->addr, base + dbsx); | ||
217 | writel_relaxed(sg->len, base + dbtx); | ||
218 | writel(dcsr, base + DMA_DCSR_S); | ||
219 | |||
220 | dev_dbg(p->dev->slave.dev, "pchan %u: load: DCSR:%02x DBS%c:%08x DBT%c:%08x\n", | ||
221 | p->num, dcsr, | ||
222 | 'A' + (dbsx == DMA_DBSB), sg->addr, | ||
223 | 'A' + (dbtx == DMA_DBTB), sg->len); | ||
224 | } | ||
225 | |||
226 | static void noinline sa11x0_dma_complete(struct sa11x0_dma_phy *p, | ||
227 | struct sa11x0_dma_chan *c) | ||
228 | { | ||
229 | struct sa11x0_dma_desc *txd = p->txd_done; | ||
230 | |||
231 | if (++p->sg_done == txd->sglen) { | ||
232 | struct sa11x0_dma_dev *d = p->dev; | ||
233 | |||
234 | dev_vdbg(d->slave.dev, "pchan %u: txd %p[%x]: completed\n", | ||
235 | p->num, p->txd_done, p->txd_done->tx.cookie); | ||
236 | |||
237 | c->lc = txd->tx.cookie; | ||
238 | |||
239 | spin_lock(&d->lock); | ||
240 | list_add_tail(&txd->node, &d->desc_complete); | ||
241 | spin_unlock(&d->lock); | ||
242 | |||
243 | p->sg_done = 0; | ||
244 | p->txd_done = p->txd_load; | ||
245 | |||
246 | tasklet_schedule(&d->task); | ||
247 | } | ||
248 | |||
249 | sa11x0_dma_start_sg(p, c); | ||
250 | } | ||
251 | |||
252 | static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id) | ||
253 | { | ||
254 | struct sa11x0_dma_phy *p = dev_id; | ||
255 | struct sa11x0_dma_dev *d = p->dev; | ||
256 | struct sa11x0_dma_chan *c; | ||
257 | u32 dcsr; | ||
258 | |||
259 | dcsr = readl_relaxed(p->base + DMA_DCSR_R); | ||
260 | if (!(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB))) | ||
261 | return IRQ_NONE; | ||
262 | |||
263 | /* Clear reported status bits */ | ||
264 | writel_relaxed(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB), | ||
265 | p->base + DMA_DCSR_C); | ||
266 | |||
267 | dev_dbg(d->slave.dev, "pchan %u: irq: DCSR:%02x\n", p->num, dcsr); | ||
268 | |||
269 | if (dcsr & DCSR_ERROR) { | ||
270 | dev_err(d->slave.dev, "pchan %u: error. DCSR:%02x DDAR:%08x DBSA:%08x DBTA:%08x DBSB:%08x DBTB:%08x\n", | ||
271 | p->num, dcsr, | ||
272 | readl_relaxed(p->base + DMA_DDAR), | ||
273 | readl_relaxed(p->base + DMA_DBSA), | ||
274 | readl_relaxed(p->base + DMA_DBTA), | ||
275 | readl_relaxed(p->base + DMA_DBSB), | ||
276 | readl_relaxed(p->base + DMA_DBTB)); | ||
277 | } | ||
278 | |||
279 | c = p->vchan; | ||
280 | if (c) { | ||
281 | unsigned long flags; | ||
282 | |||
283 | spin_lock_irqsave(&c->lock, flags); | ||
284 | /* | ||
285 | * Now that we're holding the lock, check that the vchan | ||
286 | * really is associated with this pchan before touching the | ||
287 | * hardware. This should always succeed, because we won't | ||
288 | * change p->vchan or c->phy while the channel is actively | ||
289 | * transferring. | ||
290 | */ | ||
291 | if (c->phy == p) { | ||
292 | if (dcsr & DCSR_DONEA) | ||
293 | sa11x0_dma_complete(p, c); | ||
294 | if (dcsr & DCSR_DONEB) | ||
295 | sa11x0_dma_complete(p, c); | ||
296 | } | ||
297 | spin_unlock_irqrestore(&c->lock, flags); | ||
298 | } | ||
299 | |||
300 | return IRQ_HANDLED; | ||
301 | } | ||
302 | |||
303 | static void sa11x0_dma_start_txd(struct sa11x0_dma_chan *c) | ||
304 | { | ||
305 | struct sa11x0_dma_desc *txd = sa11x0_dma_next_desc(c); | ||
306 | |||
307 | /* If the issued list is empty, we have no further txds to process */ | ||
308 | if (txd) { | ||
309 | struct sa11x0_dma_phy *p = c->phy; | ||
310 | |||
311 | sa11x0_dma_start_desc(p, txd); | ||
312 | p->txd_done = txd; | ||
313 | p->sg_done = 0; | ||
314 | |||
315 | /* The channel should not have any transfers started */ | ||
316 | WARN_ON(readl_relaxed(p->base + DMA_DCSR_R) & | ||
317 | (DCSR_STRTA | DCSR_STRTB)); | ||
318 | |||
319 | /* Clear the run and start bits before changing DDAR */ | ||
320 | writel_relaxed(DCSR_RUN | DCSR_STRTA | DCSR_STRTB, | ||
321 | p->base + DMA_DCSR_C); | ||
322 | writel_relaxed(txd->ddar, p->base + DMA_DDAR); | ||
323 | |||
324 | /* Try to start both buffers */ | ||
325 | sa11x0_dma_start_sg(p, c); | ||
326 | sa11x0_dma_start_sg(p, c); | ||
327 | } | ||
328 | } | ||
329 | |||
330 | static void sa11x0_dma_tasklet(unsigned long arg) | ||
331 | { | ||
332 | struct sa11x0_dma_dev *d = (struct sa11x0_dma_dev *)arg; | ||
333 | struct sa11x0_dma_phy *p; | ||
334 | struct sa11x0_dma_chan *c; | ||
335 | struct sa11x0_dma_desc *txd, *txn; | ||
336 | LIST_HEAD(head); | ||
337 | unsigned pch, pch_alloc = 0; | ||
338 | |||
339 | dev_dbg(d->slave.dev, "tasklet enter\n"); | ||
340 | |||
341 | /* Get the completed tx descriptors */ | ||
342 | spin_lock_irq(&d->lock); | ||
343 | list_splice_init(&d->desc_complete, &head); | ||
344 | spin_unlock_irq(&d->lock); | ||
345 | |||
346 | list_for_each_entry(txd, &head, node) { | ||
347 | c = to_sa11x0_dma_chan(txd->tx.chan); | ||
348 | |||
349 | dev_dbg(d->slave.dev, "vchan %p: txd %p[%x] completed\n", | ||
350 | c, txd, txd->tx.cookie); | ||
351 | |||
352 | spin_lock_irq(&c->lock); | ||
353 | p = c->phy; | ||
354 | if (p) { | ||
355 | if (!p->txd_done) | ||
356 | sa11x0_dma_start_txd(c); | ||
357 | if (!p->txd_done) { | ||
358 | /* No current txd associated with this channel */ | ||
359 | dev_dbg(d->slave.dev, "pchan %u: free\n", p->num); | ||
360 | |||
361 | /* Mark this channel free */ | ||
362 | c->phy = NULL; | ||
363 | p->vchan = NULL; | ||
364 | } | ||
365 | } | ||
366 | spin_unlock_irq(&c->lock); | ||
367 | } | ||
368 | |||
369 | spin_lock_irq(&d->lock); | ||
370 | for (pch = 0; pch < NR_PHY_CHAN; pch++) { | ||
371 | p = &d->phy[pch]; | ||
372 | |||
373 | if (p->vchan == NULL && !list_empty(&d->chan_pending)) { | ||
374 | c = list_first_entry(&d->chan_pending, | ||
375 | struct sa11x0_dma_chan, node); | ||
376 | list_del_init(&c->node); | ||
377 | |||
378 | pch_alloc |= 1 << pch; | ||
379 | |||
380 | /* Mark this channel allocated */ | ||
381 | p->vchan = c; | ||
382 | |||
383 | dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, c); | ||
384 | } | ||
385 | } | ||
386 | spin_unlock_irq(&d->lock); | ||
387 | |||
388 | for (pch = 0; pch < NR_PHY_CHAN; pch++) { | ||
389 | if (pch_alloc & (1 << pch)) { | ||
390 | p = &d->phy[pch]; | ||
391 | c = p->vchan; | ||
392 | |||
393 | spin_lock_irq(&c->lock); | ||
394 | c->phy = p; | ||
395 | |||
396 | sa11x0_dma_start_txd(c); | ||
397 | spin_unlock_irq(&c->lock); | ||
398 | } | ||
399 | } | ||
400 | |||
401 | /* Now free the completed tx descriptor, and call their callbacks */ | ||
402 | list_for_each_entry_safe(txd, txn, &head, node) { | ||
403 | dma_async_tx_callback callback = txd->tx.callback; | ||
404 | void *callback_param = txd->tx.callback_param; | ||
405 | |||
406 | dev_dbg(d->slave.dev, "txd %p[%x]: callback and free\n", | ||
407 | txd, txd->tx.cookie); | ||
408 | |||
409 | kfree(txd); | ||
410 | |||
411 | if (callback) | ||
412 | callback(callback_param); | ||
413 | } | ||
414 | |||
415 | dev_dbg(d->slave.dev, "tasklet exit\n"); | ||
416 | } | ||
417 | |||
418 | |||
419 | static void sa11x0_dma_desc_free(struct sa11x0_dma_dev *d, struct list_head *head) | ||
420 | { | ||
421 | struct sa11x0_dma_desc *txd, *txn; | ||
422 | |||
423 | list_for_each_entry_safe(txd, txn, head, node) { | ||
424 | dev_dbg(d->slave.dev, "txd %p: freeing\n", txd); | ||
425 | kfree(txd); | ||
426 | } | ||
427 | } | ||
428 | |||
429 | static int sa11x0_dma_alloc_chan_resources(struct dma_chan *chan) | ||
430 | { | ||
431 | return 0; | ||
432 | } | ||
433 | |||
434 | static void sa11x0_dma_free_chan_resources(struct dma_chan *chan) | ||
435 | { | ||
436 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); | ||
437 | struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); | ||
438 | unsigned long flags; | ||
439 | LIST_HEAD(head); | ||
440 | |||
441 | spin_lock_irqsave(&c->lock, flags); | ||
442 | spin_lock(&d->lock); | ||
443 | list_del_init(&c->node); | ||
444 | spin_unlock(&d->lock); | ||
445 | |||
446 | list_splice_tail_init(&c->desc_submitted, &head); | ||
447 | list_splice_tail_init(&c->desc_issued, &head); | ||
448 | spin_unlock_irqrestore(&c->lock, flags); | ||
449 | |||
450 | sa11x0_dma_desc_free(d, &head); | ||
451 | } | ||
452 | |||
453 | static dma_addr_t sa11x0_dma_pos(struct sa11x0_dma_phy *p) | ||
454 | { | ||
455 | unsigned reg; | ||
456 | u32 dcsr; | ||
457 | |||
458 | dcsr = readl_relaxed(p->base + DMA_DCSR_R); | ||
459 | |||
460 | if ((dcsr & (DCSR_BIU | DCSR_STRTA)) == DCSR_STRTA || | ||
461 | (dcsr & (DCSR_BIU | DCSR_STRTB)) == DCSR_BIU) | ||
462 | reg = DMA_DBSA; | ||
463 | else | ||
464 | reg = DMA_DBSB; | ||
465 | |||
466 | return readl_relaxed(p->base + reg); | ||
467 | } | ||
468 | |||
469 | static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan, | ||
470 | dma_cookie_t cookie, struct dma_tx_state *state) | ||
471 | { | ||
472 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); | ||
473 | struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); | ||
474 | struct sa11x0_dma_phy *p; | ||
475 | struct sa11x0_dma_desc *txd; | ||
476 | dma_cookie_t last_used, last_complete; | ||
477 | unsigned long flags; | ||
478 | enum dma_status ret; | ||
479 | size_t bytes = 0; | ||
480 | |||
481 | last_used = c->chan.cookie; | ||
482 | last_complete = c->lc; | ||
483 | |||
484 | ret = dma_async_is_complete(cookie, last_complete, last_used); | ||
485 | if (ret == DMA_SUCCESS) { | ||
486 | dma_set_tx_state(state, last_complete, last_used, 0); | ||
487 | return ret; | ||
488 | } | ||
489 | |||
490 | spin_lock_irqsave(&c->lock, flags); | ||
491 | p = c->phy; | ||
492 | ret = c->status; | ||
493 | if (p) { | ||
494 | dma_addr_t addr = sa11x0_dma_pos(p); | ||
495 | |||
496 | dev_vdbg(d->slave.dev, "tx_status: addr:%x\n", addr); | ||
497 | |||
498 | txd = p->txd_done; | ||
499 | if (txd) { | ||
500 | unsigned i; | ||
501 | |||
502 | for (i = 0; i < txd->sglen; i++) { | ||
503 | dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x\n", | ||
504 | i, txd->sg[i].addr, txd->sg[i].len); | ||
505 | if (addr >= txd->sg[i].addr && | ||
506 | addr < txd->sg[i].addr + txd->sg[i].len) { | ||
507 | unsigned len; | ||
508 | |||
509 | len = txd->sg[i].len - | ||
510 | (addr - txd->sg[i].addr); | ||
511 | dev_vdbg(d->slave.dev, "tx_status: [%u] +%x\n", | ||
512 | i, len); | ||
513 | bytes += len; | ||
514 | i++; | ||
515 | break; | ||
516 | } | ||
517 | } | ||
518 | for (; i < txd->sglen; i++) { | ||
519 | dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x ++\n", | ||
520 | i, txd->sg[i].addr, txd->sg[i].len); | ||
521 | bytes += txd->sg[i].len; | ||
522 | } | ||
523 | } | ||
524 | if (txd != p->txd_load && p->txd_load) | ||
525 | bytes += p->txd_load->size; | ||
526 | } | ||
527 | list_for_each_entry(txd, &c->desc_issued, node) { | ||
528 | bytes += txd->size; | ||
529 | } | ||
530 | spin_unlock_irqrestore(&c->lock, flags); | ||
531 | |||
532 | dma_set_tx_state(state, last_complete, last_used, bytes); | ||
533 | |||
534 | dev_vdbg(d->slave.dev, "tx_status: bytes 0x%zx\n", bytes); | ||
535 | |||
536 | return ret; | ||
537 | } | ||
538 | |||
539 | /* | ||
540 | * Move pending txds to the issued list, and re-init pending list. | ||
541 | * If not already pending, add this channel to the list of pending | ||
542 | * channels and trigger the tasklet to run. | ||
543 | */ | ||
544 | static void sa11x0_dma_issue_pending(struct dma_chan *chan) | ||
545 | { | ||
546 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); | ||
547 | struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); | ||
548 | unsigned long flags; | ||
549 | |||
550 | spin_lock_irqsave(&c->lock, flags); | ||
551 | list_splice_tail_init(&c->desc_submitted, &c->desc_issued); | ||
552 | if (!list_empty(&c->desc_issued)) { | ||
553 | spin_lock(&d->lock); | ||
554 | if (!c->phy && list_empty(&c->node)) { | ||
555 | list_add_tail(&c->node, &d->chan_pending); | ||
556 | tasklet_schedule(&d->task); | ||
557 | dev_dbg(d->slave.dev, "vchan %p: issued\n", c); | ||
558 | } | ||
559 | spin_unlock(&d->lock); | ||
560 | } else | ||
561 | dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", c); | ||
562 | spin_unlock_irqrestore(&c->lock, flags); | ||
563 | } | ||
564 | |||
565 | static dma_cookie_t sa11x0_dma_tx_submit(struct dma_async_tx_descriptor *tx) | ||
566 | { | ||
567 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(tx->chan); | ||
568 | struct sa11x0_dma_desc *txd = to_sa11x0_dma_tx(tx); | ||
569 | unsigned long flags; | ||
570 | |||
571 | spin_lock_irqsave(&c->lock, flags); | ||
572 | c->chan.cookie += 1; | ||
573 | if (c->chan.cookie < 0) | ||
574 | c->chan.cookie = 1; | ||
575 | txd->tx.cookie = c->chan.cookie; | ||
576 | |||
577 | list_add_tail(&txd->node, &c->desc_submitted); | ||
578 | spin_unlock_irqrestore(&c->lock, flags); | ||
579 | |||
580 | dev_dbg(tx->chan->device->dev, "vchan %p: txd %p[%x]: submitted\n", | ||
581 | c, txd, txd->tx.cookie); | ||
582 | |||
583 | return txd->tx.cookie; | ||
584 | } | ||
585 | |||
586 | static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg( | ||
587 | struct dma_chan *chan, struct scatterlist *sg, unsigned int sglen, | ||
588 | enum dma_transfer_direction dir, unsigned long flags) | ||
589 | { | ||
590 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); | ||
591 | struct sa11x0_dma_desc *txd; | ||
592 | struct scatterlist *sgent; | ||
593 | unsigned i, j = sglen; | ||
594 | size_t size = 0; | ||
595 | |||
596 | /* SA11x0 channels can only operate in their native direction */ | ||
597 | if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) { | ||
598 | dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n", | ||
599 | c, c->ddar, dir); | ||
600 | return NULL; | ||
601 | } | ||
602 | |||
603 | /* Do not allow zero-sized txds */ | ||
604 | if (sglen == 0) | ||
605 | return NULL; | ||
606 | |||
607 | for_each_sg(sg, sgent, sglen, i) { | ||
608 | dma_addr_t addr = sg_dma_address(sgent); | ||
609 | unsigned int len = sg_dma_len(sgent); | ||
610 | |||
611 | if (len > DMA_MAX_SIZE) | ||
612 | j += DIV_ROUND_UP(len, DMA_MAX_SIZE & ~DMA_ALIGN) - 1; | ||
613 | if (addr & DMA_ALIGN) { | ||
614 | dev_dbg(chan->device->dev, "vchan %p: bad buffer alignment: %08x\n", | ||
615 | c, addr); | ||
616 | return NULL; | ||
617 | } | ||
618 | } | ||
619 | |||
620 | txd = kzalloc(sizeof(*txd) + j * sizeof(txd->sg[0]), GFP_ATOMIC); | ||
621 | if (!txd) { | ||
622 | dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", c); | ||
623 | return NULL; | ||
624 | } | ||
625 | |||
626 | j = 0; | ||
627 | for_each_sg(sg, sgent, sglen, i) { | ||
628 | dma_addr_t addr = sg_dma_address(sgent); | ||
629 | unsigned len = sg_dma_len(sgent); | ||
630 | |||
631 | size += len; | ||
632 | |||
633 | do { | ||
634 | unsigned tlen = len; | ||
635 | |||
636 | /* | ||
637 | * Check whether the transfer will fit. If not, try | ||
638 | * to split the transfer up such that we end up with | ||
639 | * equal chunks - but make sure that we preserve the | ||
640 | * alignment. This avoids small segments. | ||
641 | */ | ||
642 | if (tlen > DMA_MAX_SIZE) { | ||
643 | unsigned mult = DIV_ROUND_UP(tlen, | ||
644 | DMA_MAX_SIZE & ~DMA_ALIGN); | ||
645 | |||
646 | tlen = (tlen / mult) & ~DMA_ALIGN; | ||
647 | } | ||
648 | |||
649 | txd->sg[j].addr = addr; | ||
650 | txd->sg[j].len = tlen; | ||
651 | |||
652 | addr += tlen; | ||
653 | len -= tlen; | ||
654 | j++; | ||
655 | } while (len); | ||
656 | } | ||
657 | |||
658 | dma_async_tx_descriptor_init(&txd->tx, &c->chan); | ||
659 | txd->tx.flags = flags; | ||
660 | txd->tx.tx_submit = sa11x0_dma_tx_submit; | ||
661 | txd->ddar = c->ddar; | ||
662 | txd->size = size; | ||
663 | txd->sglen = j; | ||
664 | |||
665 | dev_dbg(chan->device->dev, "vchan %p: txd %p: size %u nr %u\n", | ||
666 | c, txd, txd->size, txd->sglen); | ||
667 | |||
668 | return &txd->tx; | ||
669 | } | ||
670 | |||
671 | static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_config *cfg) | ||
672 | { | ||
673 | u32 ddar = c->ddar & ((0xf << 4) | DDAR_RW); | ||
674 | dma_addr_t addr; | ||
675 | enum dma_slave_buswidth width; | ||
676 | u32 maxburst; | ||
677 | |||
678 | if (ddar & DDAR_RW) { | ||
679 | addr = cfg->src_addr; | ||
680 | width = cfg->src_addr_width; | ||
681 | maxburst = cfg->src_maxburst; | ||
682 | } else { | ||
683 | addr = cfg->dst_addr; | ||
684 | width = cfg->dst_addr_width; | ||
685 | maxburst = cfg->dst_maxburst; | ||
686 | } | ||
687 | |||
688 | if ((width != DMA_SLAVE_BUSWIDTH_1_BYTE && | ||
689 | width != DMA_SLAVE_BUSWIDTH_2_BYTES) || | ||
690 | (maxburst != 4 && maxburst != 8)) | ||
691 | return -EINVAL; | ||
692 | |||
693 | if (width == DMA_SLAVE_BUSWIDTH_2_BYTES) | ||
694 | ddar |= DDAR_DW; | ||
695 | if (maxburst == 8) | ||
696 | ddar |= DDAR_BS; | ||
697 | |||
698 | dev_dbg(c->chan.device->dev, "vchan %p: dma_slave_config addr %x width %u burst %u\n", | ||
699 | c, addr, width, maxburst); | ||
700 | |||
701 | c->ddar = ddar | (addr & 0xf0000000) | (addr & 0x003ffffc) << 6; | ||
702 | |||
703 | return 0; | ||
704 | } | ||
705 | |||
706 | static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | ||
707 | unsigned long arg) | ||
708 | { | ||
709 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); | ||
710 | struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); | ||
711 | struct sa11x0_dma_phy *p; | ||
712 | LIST_HEAD(head); | ||
713 | unsigned long flags; | ||
714 | int ret; | ||
715 | |||
716 | switch (cmd) { | ||
717 | case DMA_SLAVE_CONFIG: | ||
718 | return sa11x0_dma_slave_config(c, (struct dma_slave_config *)arg); | ||
719 | |||
720 | case DMA_TERMINATE_ALL: | ||
721 | dev_dbg(d->slave.dev, "vchan %p: terminate all\n", c); | ||
722 | /* Clear the tx descriptor lists */ | ||
723 | spin_lock_irqsave(&c->lock, flags); | ||
724 | list_splice_tail_init(&c->desc_submitted, &head); | ||
725 | list_splice_tail_init(&c->desc_issued, &head); | ||
726 | |||
727 | p = c->phy; | ||
728 | if (p) { | ||
729 | struct sa11x0_dma_desc *txd, *txn; | ||
730 | |||
731 | dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num); | ||
732 | /* vchan is assigned to a pchan - stop the channel */ | ||
733 | writel(DCSR_RUN | DCSR_IE | | ||
734 | DCSR_STRTA | DCSR_DONEA | | ||
735 | DCSR_STRTB | DCSR_DONEB, | ||
736 | p->base + DMA_DCSR_C); | ||
737 | |||
738 | list_for_each_entry_safe(txd, txn, &d->desc_complete, node) | ||
739 | if (txd->tx.chan == &c->chan) | ||
740 | list_move(&txd->node, &head); | ||
741 | |||
742 | if (p->txd_load) { | ||
743 | if (p->txd_load != p->txd_done) | ||
744 | list_add_tail(&p->txd_load->node, &head); | ||
745 | p->txd_load = NULL; | ||
746 | } | ||
747 | if (p->txd_done) { | ||
748 | list_add_tail(&p->txd_done->node, &head); | ||
749 | p->txd_done = NULL; | ||
750 | } | ||
751 | c->phy = NULL; | ||
752 | spin_lock(&d->lock); | ||
753 | p->vchan = NULL; | ||
754 | spin_unlock(&d->lock); | ||
755 | tasklet_schedule(&d->task); | ||
756 | } | ||
757 | spin_unlock_irqrestore(&c->lock, flags); | ||
758 | sa11x0_dma_desc_free(d, &head); | ||
759 | ret = 0; | ||
760 | break; | ||
761 | |||
762 | case DMA_PAUSE: | ||
763 | dev_dbg(d->slave.dev, "vchan %p: pause\n", c); | ||
764 | spin_lock_irqsave(&c->lock, flags); | ||
765 | if (c->status == DMA_IN_PROGRESS) { | ||
766 | c->status = DMA_PAUSED; | ||
767 | |||
768 | p = c->phy; | ||
769 | if (p) { | ||
770 | writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C); | ||
771 | } else { | ||
772 | spin_lock(&d->lock); | ||
773 | list_del_init(&c->node); | ||
774 | spin_unlock(&d->lock); | ||
775 | } | ||
776 | } | ||
777 | spin_unlock_irqrestore(&c->lock, flags); | ||
778 | ret = 0; | ||
779 | break; | ||
780 | |||
781 | case DMA_RESUME: | ||
782 | dev_dbg(d->slave.dev, "vchan %p: resume\n", c); | ||
783 | spin_lock_irqsave(&c->lock, flags); | ||
784 | if (c->status == DMA_PAUSED) { | ||
785 | c->status = DMA_IN_PROGRESS; | ||
786 | |||
787 | p = c->phy; | ||
788 | if (p) { | ||
789 | writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S); | ||
790 | } else if (!list_empty(&c->desc_issued)) { | ||
791 | spin_lock(&d->lock); | ||
792 | list_add_tail(&c->node, &d->chan_pending); | ||
793 | spin_unlock(&d->lock); | ||
794 | } | ||
795 | } | ||
796 | spin_unlock_irqrestore(&c->lock, flags); | ||
797 | ret = 0; | ||
798 | break; | ||
799 | |||
800 | default: | ||
801 | ret = -ENXIO; | ||
802 | break; | ||
803 | } | ||
804 | |||
805 | return ret; | ||
806 | } | ||
807 | |||
808 | struct sa11x0_dma_channel_desc { | ||
809 | u32 ddar; | ||
810 | const char *name; | ||
811 | }; | ||
812 | |||
813 | #define CD(d1, d2) { .ddar = DDAR_##d1 | d2, .name = #d1 } | ||
814 | static const struct sa11x0_dma_channel_desc chan_desc[] = { | ||
815 | CD(Ser0UDCTr, 0), | ||
816 | CD(Ser0UDCRc, DDAR_RW), | ||
817 | CD(Ser1SDLCTr, 0), | ||
818 | CD(Ser1SDLCRc, DDAR_RW), | ||
819 | CD(Ser1UARTTr, 0), | ||
820 | CD(Ser1UARTRc, DDAR_RW), | ||
821 | CD(Ser2ICPTr, 0), | ||
822 | CD(Ser2ICPRc, DDAR_RW), | ||
823 | CD(Ser3UARTTr, 0), | ||
824 | CD(Ser3UARTRc, DDAR_RW), | ||
825 | CD(Ser4MCP0Tr, 0), | ||
826 | CD(Ser4MCP0Rc, DDAR_RW), | ||
827 | CD(Ser4MCP1Tr, 0), | ||
828 | CD(Ser4MCP1Rc, DDAR_RW), | ||
829 | CD(Ser4SSPTr, 0), | ||
830 | CD(Ser4SSPRc, DDAR_RW), | ||
831 | }; | ||
832 | |||
833 | static int __devinit sa11x0_dma_init_dmadev(struct dma_device *dmadev, | ||
834 | struct device *dev) | ||
835 | { | ||
836 | unsigned i; | ||
837 | |||
838 | dmadev->chancnt = ARRAY_SIZE(chan_desc); | ||
839 | INIT_LIST_HEAD(&dmadev->channels); | ||
840 | dmadev->dev = dev; | ||
841 | dmadev->device_alloc_chan_resources = sa11x0_dma_alloc_chan_resources; | ||
842 | dmadev->device_free_chan_resources = sa11x0_dma_free_chan_resources; | ||
843 | dmadev->device_control = sa11x0_dma_control; | ||
844 | dmadev->device_tx_status = sa11x0_dma_tx_status; | ||
845 | dmadev->device_issue_pending = sa11x0_dma_issue_pending; | ||
846 | |||
847 | for (i = 0; i < dmadev->chancnt; i++) { | ||
848 | struct sa11x0_dma_chan *c; | ||
849 | |||
850 | c = kzalloc(sizeof(*c), GFP_KERNEL); | ||
851 | if (!c) { | ||
852 | dev_err(dev, "no memory for channel %u\n", i); | ||
853 | return -ENOMEM; | ||
854 | } | ||
855 | |||
856 | c->chan.device = dmadev; | ||
857 | c->status = DMA_IN_PROGRESS; | ||
858 | c->ddar = chan_desc[i].ddar; | ||
859 | c->name = chan_desc[i].name; | ||
860 | spin_lock_init(&c->lock); | ||
861 | INIT_LIST_HEAD(&c->desc_submitted); | ||
862 | INIT_LIST_HEAD(&c->desc_issued); | ||
863 | INIT_LIST_HEAD(&c->node); | ||
864 | list_add_tail(&c->chan.device_node, &dmadev->channels); | ||
865 | } | ||
866 | |||
867 | return dma_async_device_register(dmadev); | ||
868 | } | ||
869 | |||
870 | static int sa11x0_dma_request_irq(struct platform_device *pdev, int nr, | ||
871 | void *data) | ||
872 | { | ||
873 | int irq = platform_get_irq(pdev, nr); | ||
874 | |||
875 | if (irq <= 0) | ||
876 | return -ENXIO; | ||
877 | |||
878 | return request_irq(irq, sa11x0_dma_irq, 0, dev_name(&pdev->dev), data); | ||
879 | } | ||
880 | |||
881 | static void sa11x0_dma_free_irq(struct platform_device *pdev, int nr, | ||
882 | void *data) | ||
883 | { | ||
884 | int irq = platform_get_irq(pdev, nr); | ||
885 | if (irq > 0) | ||
886 | free_irq(irq, data); | ||
887 | } | ||
888 | |||
889 | static void sa11x0_dma_free_channels(struct dma_device *dmadev) | ||
890 | { | ||
891 | struct sa11x0_dma_chan *c, *cn; | ||
892 | |||
893 | list_for_each_entry_safe(c, cn, &dmadev->channels, chan.device_node) { | ||
894 | list_del(&c->chan.device_node); | ||
895 | kfree(c); | ||
896 | } | ||
897 | } | ||
898 | |||
899 | static int __devinit sa11x0_dma_probe(struct platform_device *pdev) | ||
900 | { | ||
901 | struct sa11x0_dma_dev *d; | ||
902 | struct resource *res; | ||
903 | unsigned i; | ||
904 | int ret; | ||
905 | |||
906 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
907 | if (!res) | ||
908 | return -ENXIO; | ||
909 | |||
910 | d = kzalloc(sizeof(*d), GFP_KERNEL); | ||
911 | if (!d) { | ||
912 | ret = -ENOMEM; | ||
913 | goto err_alloc; | ||
914 | } | ||
915 | |||
916 | spin_lock_init(&d->lock); | ||
917 | INIT_LIST_HEAD(&d->chan_pending); | ||
918 | INIT_LIST_HEAD(&d->desc_complete); | ||
919 | |||
920 | d->base = ioremap(res->start, resource_size(res)); | ||
921 | if (!d->base) { | ||
922 | ret = -ENOMEM; | ||
923 | goto err_ioremap; | ||
924 | } | ||
925 | |||
926 | tasklet_init(&d->task, sa11x0_dma_tasklet, (unsigned long)d); | ||
927 | |||
928 | for (i = 0; i < NR_PHY_CHAN; i++) { | ||
929 | struct sa11x0_dma_phy *p = &d->phy[i]; | ||
930 | |||
931 | p->dev = d; | ||
932 | p->num = i; | ||
933 | p->base = d->base + i * DMA_SIZE; | ||
934 | writel_relaxed(DCSR_RUN | DCSR_IE | DCSR_ERROR | | ||
935 | DCSR_DONEA | DCSR_STRTA | DCSR_DONEB | DCSR_STRTB, | ||
936 | p->base + DMA_DCSR_C); | ||
937 | writel_relaxed(0, p->base + DMA_DDAR); | ||
938 | |||
939 | ret = sa11x0_dma_request_irq(pdev, i, p); | ||
940 | if (ret) { | ||
941 | while (i) { | ||
942 | i--; | ||
943 | sa11x0_dma_free_irq(pdev, i, &d->phy[i]); | ||
944 | } | ||
945 | goto err_irq; | ||
946 | } | ||
947 | } | ||
948 | |||
949 | dma_cap_set(DMA_SLAVE, d->slave.cap_mask); | ||
950 | d->slave.device_prep_slave_sg = sa11x0_dma_prep_slave_sg; | ||
951 | ret = sa11x0_dma_init_dmadev(&d->slave, &pdev->dev); | ||
952 | if (ret) { | ||
953 | dev_warn(d->slave.dev, "failed to register slave async device: %d\n", | ||
954 | ret); | ||
955 | goto err_slave_reg; | ||
956 | } | ||
957 | |||
958 | platform_set_drvdata(pdev, d); | ||
959 | return 0; | ||
960 | |||
961 | err_slave_reg: | ||
962 | sa11x0_dma_free_channels(&d->slave); | ||
963 | for (i = 0; i < NR_PHY_CHAN; i++) | ||
964 | sa11x0_dma_free_irq(pdev, i, &d->phy[i]); | ||
965 | err_irq: | ||
966 | tasklet_kill(&d->task); | ||
967 | iounmap(d->base); | ||
968 | err_ioremap: | ||
969 | kfree(d); | ||
970 | err_alloc: | ||
971 | return ret; | ||
972 | } | ||
973 | |||
974 | static int __devexit sa11x0_dma_remove(struct platform_device *pdev) | ||
975 | { | ||
976 | struct sa11x0_dma_dev *d = platform_get_drvdata(pdev); | ||
977 | unsigned pch; | ||
978 | |||
979 | dma_async_device_unregister(&d->slave); | ||
980 | |||
981 | sa11x0_dma_free_channels(&d->slave); | ||
982 | for (pch = 0; pch < NR_PHY_CHAN; pch++) | ||
983 | sa11x0_dma_free_irq(pdev, pch, &d->phy[pch]); | ||
984 | tasklet_kill(&d->task); | ||
985 | iounmap(d->base); | ||
986 | kfree(d); | ||
987 | |||
988 | return 0; | ||
989 | } | ||
990 | |||
991 | #ifdef CONFIG_PM_SLEEP | ||
992 | static int sa11x0_dma_suspend(struct device *dev) | ||
993 | { | ||
994 | struct sa11x0_dma_dev *d = dev_get_drvdata(dev); | ||
995 | unsigned pch; | ||
996 | |||
997 | for (pch = 0; pch < NR_PHY_CHAN; pch++) { | ||
998 | struct sa11x0_dma_phy *p = &d->phy[pch]; | ||
999 | u32 dcsr, saved_dcsr; | ||
1000 | |||
1001 | dcsr = saved_dcsr = readl_relaxed(p->base + DMA_DCSR_R); | ||
1002 | if (dcsr & DCSR_RUN) { | ||
1003 | writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C); | ||
1004 | dcsr = readl_relaxed(p->base + DMA_DCSR_R); | ||
1005 | } | ||
1006 | |||
1007 | saved_dcsr &= DCSR_RUN | DCSR_IE; | ||
1008 | if (dcsr & DCSR_BIU) { | ||
1009 | p->dbs[0] = readl_relaxed(p->base + DMA_DBSB); | ||
1010 | p->dbt[0] = readl_relaxed(p->base + DMA_DBTB); | ||
1011 | p->dbs[1] = readl_relaxed(p->base + DMA_DBSA); | ||
1012 | p->dbt[1] = readl_relaxed(p->base + DMA_DBTA); | ||
1013 | saved_dcsr |= (dcsr & DCSR_STRTA ? DCSR_STRTB : 0) | | ||
1014 | (dcsr & DCSR_STRTB ? DCSR_STRTA : 0); | ||
1015 | } else { | ||
1016 | p->dbs[0] = readl_relaxed(p->base + DMA_DBSA); | ||
1017 | p->dbt[0] = readl_relaxed(p->base + DMA_DBTA); | ||
1018 | p->dbs[1] = readl_relaxed(p->base + DMA_DBSB); | ||
1019 | p->dbt[1] = readl_relaxed(p->base + DMA_DBTB); | ||
1020 | saved_dcsr |= dcsr & (DCSR_STRTA | DCSR_STRTB); | ||
1021 | } | ||
1022 | p->dcsr = saved_dcsr; | ||
1023 | |||
1024 | writel(DCSR_STRTA | DCSR_STRTB, p->base + DMA_DCSR_C); | ||
1025 | } | ||
1026 | |||
1027 | return 0; | ||
1028 | } | ||
1029 | |||
1030 | static int sa11x0_dma_resume(struct device *dev) | ||
1031 | { | ||
1032 | struct sa11x0_dma_dev *d = dev_get_drvdata(dev); | ||
1033 | unsigned pch; | ||
1034 | |||
1035 | for (pch = 0; pch < NR_PHY_CHAN; pch++) { | ||
1036 | struct sa11x0_dma_phy *p = &d->phy[pch]; | ||
1037 | struct sa11x0_dma_desc *txd = NULL; | ||
1038 | u32 dcsr = readl_relaxed(p->base + DMA_DCSR_R); | ||
1039 | |||
1040 | WARN_ON(dcsr & (DCSR_BIU | DCSR_STRTA | DCSR_STRTB | DCSR_RUN)); | ||
1041 | |||
1042 | if (p->txd_done) | ||
1043 | txd = p->txd_done; | ||
1044 | else if (p->txd_load) | ||
1045 | txd = p->txd_load; | ||
1046 | |||
1047 | if (!txd) | ||
1048 | continue; | ||
1049 | |||
1050 | writel_relaxed(txd->ddar, p->base + DMA_DDAR); | ||
1051 | |||
1052 | writel_relaxed(p->dbs[0], p->base + DMA_DBSA); | ||
1053 | writel_relaxed(p->dbt[0], p->base + DMA_DBTA); | ||
1054 | writel_relaxed(p->dbs[1], p->base + DMA_DBSB); | ||
1055 | writel_relaxed(p->dbt[1], p->base + DMA_DBTB); | ||
1056 | writel_relaxed(p->dcsr, p->base + DMA_DCSR_S); | ||
1057 | } | ||
1058 | |||
1059 | return 0; | ||
1060 | } | ||
1061 | #endif | ||
1062 | |||
1063 | static const struct dev_pm_ops sa11x0_dma_pm_ops = { | ||
1064 | .suspend_noirq = sa11x0_dma_suspend, | ||
1065 | .resume_noirq = sa11x0_dma_resume, | ||
1066 | .freeze_noirq = sa11x0_dma_suspend, | ||
1067 | .thaw_noirq = sa11x0_dma_resume, | ||
1068 | .poweroff_noirq = sa11x0_dma_suspend, | ||
1069 | .restore_noirq = sa11x0_dma_resume, | ||
1070 | }; | ||
1071 | |||
1072 | static struct platform_driver sa11x0_dma_driver = { | ||
1073 | .driver = { | ||
1074 | .name = "sa11x0-dma", | ||
1075 | .owner = THIS_MODULE, | ||
1076 | .pm = &sa11x0_dma_pm_ops, | ||
1077 | }, | ||
1078 | .probe = sa11x0_dma_probe, | ||
1079 | .remove = __devexit_p(sa11x0_dma_remove), | ||
1080 | }; | ||
1081 | |||
1082 | bool sa11x0_dma_filter_fn(struct dma_chan *chan, void *param) | ||
1083 | { | ||
1084 | if (chan->device->dev->driver == &sa11x0_dma_driver.driver) { | ||
1085 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); | ||
1086 | const char *p = param; | ||
1087 | |||
1088 | return !strcmp(c->name, p); | ||
1089 | } | ||
1090 | return false; | ||
1091 | } | ||
1092 | EXPORT_SYMBOL(sa11x0_dma_filter_fn); | ||
1093 | |||
1094 | static int __init sa11x0_dma_init(void) | ||
1095 | { | ||
1096 | return platform_driver_register(&sa11x0_dma_driver); | ||
1097 | } | ||
1098 | subsys_initcall(sa11x0_dma_init); | ||
1099 | |||
1100 | static void __exit sa11x0_dma_exit(void) | ||
1101 | { | ||
1102 | platform_driver_unregister(&sa11x0_dma_driver); | ||
1103 | } | ||
1104 | module_exit(sa11x0_dma_exit); | ||
1105 | |||
1106 | MODULE_AUTHOR("Russell King"); | ||
1107 | MODULE_DESCRIPTION("SA-11x0 DMA driver"); | ||
1108 | MODULE_LICENSE("GPL v2"); | ||
1109 | MODULE_ALIAS("platform:sa11x0-dma"); | ||
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig index e535137eb2d0..468047866c8c 100644 --- a/drivers/net/irda/Kconfig +++ b/drivers/net/irda/Kconfig | |||
@@ -356,7 +356,7 @@ config VLSI_FIR | |||
356 | 356 | ||
357 | config SA1100_FIR | 357 | config SA1100_FIR |
358 | tristate "SA1100 Internal IR" | 358 | tristate "SA1100 Internal IR" |
359 | depends on ARCH_SA1100 && IRDA | 359 | depends on ARCH_SA1100 && IRDA && DMA_SA11X0 |
360 | 360 | ||
361 | config VIA_FIR | 361 | config VIA_FIR |
362 | tristate "VIA VT8231/VT1211 SIR/MIR/FIR" | 362 | tristate "VIA VT8231/VT1211 SIR/MIR/FIR" |
diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c index da2705061a60..a0d1913a58d3 100644 --- a/drivers/net/irda/sa1100_ir.c +++ b/drivers/net/irda/sa1100_ir.c | |||
@@ -15,7 +15,7 @@ | |||
15 | * This driver takes one kernel command line parameter, sa1100ir=, with | 15 | * This driver takes one kernel command line parameter, sa1100ir=, with |
16 | * the following options: | 16 | * the following options: |
17 | * max_rate:baudrate - set the maximum baud rate | 17 | * max_rate:baudrate - set the maximum baud rate |
18 | * power_leve:level - set the transmitter power level | 18 | * power_level:level - set the transmitter power level |
19 | * tx_lpm:0|1 - set transmit low power mode | 19 | * tx_lpm:0|1 - set transmit low power mode |
20 | */ | 20 | */ |
21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
@@ -30,13 +30,13 @@ | |||
30 | #include <linux/delay.h> | 30 | #include <linux/delay.h> |
31 | #include <linux/platform_device.h> | 31 | #include <linux/platform_device.h> |
32 | #include <linux/dma-mapping.h> | 32 | #include <linux/dma-mapping.h> |
33 | #include <linux/dmaengine.h> | ||
34 | #include <linux/sa11x0-dma.h> | ||
33 | 35 | ||
34 | #include <net/irda/irda.h> | 36 | #include <net/irda/irda.h> |
35 | #include <net/irda/wrapper.h> | 37 | #include <net/irda/wrapper.h> |
36 | #include <net/irda/irda_device.h> | 38 | #include <net/irda/irda_device.h> |
37 | 39 | ||
38 | #include <asm/irq.h> | ||
39 | #include <mach/dma.h> | ||
40 | #include <mach/hardware.h> | 40 | #include <mach/hardware.h> |
41 | #include <asm/mach/irda.h> | 41 | #include <asm/mach/irda.h> |
42 | 42 | ||
@@ -44,8 +44,15 @@ static int power_level = 3; | |||
44 | static int tx_lpm; | 44 | static int tx_lpm; |
45 | static int max_rate = 4000000; | 45 | static int max_rate = 4000000; |
46 | 46 | ||
47 | struct sa1100_buf { | ||
48 | struct device *dev; | ||
49 | struct sk_buff *skb; | ||
50 | struct scatterlist sg; | ||
51 | struct dma_chan *chan; | ||
52 | dma_cookie_t cookie; | ||
53 | }; | ||
54 | |||
47 | struct sa1100_irda { | 55 | struct sa1100_irda { |
48 | unsigned char hscr0; | ||
49 | unsigned char utcr4; | 56 | unsigned char utcr4; |
50 | unsigned char power; | 57 | unsigned char power; |
51 | unsigned char open; | 58 | unsigned char open; |
@@ -53,12 +60,8 @@ struct sa1100_irda { | |||
53 | int speed; | 60 | int speed; |
54 | int newspeed; | 61 | int newspeed; |
55 | 62 | ||
56 | struct sk_buff *txskb; | 63 | struct sa1100_buf dma_rx; |
57 | struct sk_buff *rxskb; | 64 | struct sa1100_buf dma_tx; |
58 | dma_addr_t txbuf_dma; | ||
59 | dma_addr_t rxbuf_dma; | ||
60 | dma_regs_t *txdma; | ||
61 | dma_regs_t *rxdma; | ||
62 | 65 | ||
63 | struct device *dev; | 66 | struct device *dev; |
64 | struct irda_platform_data *pdata; | 67 | struct irda_platform_data *pdata; |
@@ -67,23 +70,103 @@ struct sa1100_irda { | |||
67 | 70 | ||
68 | iobuff_t tx_buff; | 71 | iobuff_t tx_buff; |
69 | iobuff_t rx_buff; | 72 | iobuff_t rx_buff; |
73 | |||
74 | int (*tx_start)(struct sk_buff *, struct net_device *, struct sa1100_irda *); | ||
75 | irqreturn_t (*irq)(struct net_device *, struct sa1100_irda *); | ||
70 | }; | 76 | }; |
71 | 77 | ||
78 | static int sa1100_irda_set_speed(struct sa1100_irda *, int); | ||
79 | |||
72 | #define IS_FIR(si) ((si)->speed >= 4000000) | 80 | #define IS_FIR(si) ((si)->speed >= 4000000) |
73 | 81 | ||
74 | #define HPSIR_MAX_RXLEN 2047 | 82 | #define HPSIR_MAX_RXLEN 2047 |
75 | 83 | ||
84 | static struct dma_slave_config sa1100_irda_sir_tx = { | ||
85 | .direction = DMA_TO_DEVICE, | ||
86 | .dst_addr = __PREG(Ser2UTDR), | ||
87 | .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, | ||
88 | .dst_maxburst = 4, | ||
89 | }; | ||
90 | |||
91 | static struct dma_slave_config sa1100_irda_fir_rx = { | ||
92 | .direction = DMA_FROM_DEVICE, | ||
93 | .src_addr = __PREG(Ser2HSDR), | ||
94 | .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, | ||
95 | .src_maxburst = 8, | ||
96 | }; | ||
97 | |||
98 | static struct dma_slave_config sa1100_irda_fir_tx = { | ||
99 | .direction = DMA_TO_DEVICE, | ||
100 | .dst_addr = __PREG(Ser2HSDR), | ||
101 | .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, | ||
102 | .dst_maxburst = 8, | ||
103 | }; | ||
104 | |||
105 | static unsigned sa1100_irda_dma_xferred(struct sa1100_buf *buf) | ||
106 | { | ||
107 | struct dma_chan *chan = buf->chan; | ||
108 | struct dma_tx_state state; | ||
109 | enum dma_status status; | ||
110 | |||
111 | status = chan->device->device_tx_status(chan, buf->cookie, &state); | ||
112 | if (status != DMA_PAUSED) | ||
113 | return 0; | ||
114 | |||
115 | return sg_dma_len(&buf->sg) - state.residue; | ||
116 | } | ||
117 | |||
118 | static int sa1100_irda_dma_request(struct device *dev, struct sa1100_buf *buf, | ||
119 | const char *name, struct dma_slave_config *cfg) | ||
120 | { | ||
121 | dma_cap_mask_t m; | ||
122 | int ret; | ||
123 | |||
124 | dma_cap_zero(m); | ||
125 | dma_cap_set(DMA_SLAVE, m); | ||
126 | |||
127 | buf->chan = dma_request_channel(m, sa11x0_dma_filter_fn, (void *)name); | ||
128 | if (!buf->chan) { | ||
129 | dev_err(dev, "unable to request DMA channel for %s\n", | ||
130 | name); | ||
131 | return -ENOENT; | ||
132 | } | ||
133 | |||
134 | ret = dmaengine_slave_config(buf->chan, cfg); | ||
135 | if (ret) | ||
136 | dev_warn(dev, "DMA slave_config for %s returned %d\n", | ||
137 | name, ret); | ||
138 | |||
139 | buf->dev = buf->chan->device->dev; | ||
140 | |||
141 | return 0; | ||
142 | } | ||
143 | |||
144 | static void sa1100_irda_dma_start(struct sa1100_buf *buf, | ||
145 | enum dma_transfer_direction dir, dma_async_tx_callback cb, void *cb_p) | ||
146 | { | ||
147 | struct dma_async_tx_descriptor *desc; | ||
148 | struct dma_chan *chan = buf->chan; | ||
149 | |||
150 | desc = chan->device->device_prep_slave_sg(chan, &buf->sg, 1, dir, | ||
151 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
152 | if (desc) { | ||
153 | desc->callback = cb; | ||
154 | desc->callback_param = cb_p; | ||
155 | buf->cookie = dmaengine_submit(desc); | ||
156 | dma_async_issue_pending(chan); | ||
157 | } | ||
158 | } | ||
159 | |||
76 | /* | 160 | /* |
77 | * Allocate and map the receive buffer, unless it is already allocated. | 161 | * Allocate and map the receive buffer, unless it is already allocated. |
78 | */ | 162 | */ |
79 | static int sa1100_irda_rx_alloc(struct sa1100_irda *si) | 163 | static int sa1100_irda_rx_alloc(struct sa1100_irda *si) |
80 | { | 164 | { |
81 | if (si->rxskb) | 165 | if (si->dma_rx.skb) |
82 | return 0; | 166 | return 0; |
83 | 167 | ||
84 | si->rxskb = alloc_skb(HPSIR_MAX_RXLEN + 1, GFP_ATOMIC); | 168 | si->dma_rx.skb = alloc_skb(HPSIR_MAX_RXLEN + 1, GFP_ATOMIC); |
85 | 169 | if (!si->dma_rx.skb) { | |
86 | if (!si->rxskb) { | ||
87 | printk(KERN_ERR "sa1100_ir: out of memory for RX SKB\n"); | 170 | printk(KERN_ERR "sa1100_ir: out of memory for RX SKB\n"); |
88 | return -ENOMEM; | 171 | return -ENOMEM; |
89 | } | 172 | } |
@@ -92,11 +175,14 @@ static int sa1100_irda_rx_alloc(struct sa1100_irda *si) | |||
92 | * Align any IP headers that may be contained | 175 | * Align any IP headers that may be contained |
93 | * within the frame. | 176 | * within the frame. |
94 | */ | 177 | */ |
95 | skb_reserve(si->rxskb, 1); | 178 | skb_reserve(si->dma_rx.skb, 1); |
179 | |||
180 | sg_set_buf(&si->dma_rx.sg, si->dma_rx.skb->data, HPSIR_MAX_RXLEN); | ||
181 | if (dma_map_sg(si->dma_rx.dev, &si->dma_rx.sg, 1, DMA_FROM_DEVICE) == 0) { | ||
182 | dev_kfree_skb_any(si->dma_rx.skb); | ||
183 | return -ENOMEM; | ||
184 | } | ||
96 | 185 | ||
97 | si->rxbuf_dma = dma_map_single(si->dev, si->rxskb->data, | ||
98 | HPSIR_MAX_RXLEN, | ||
99 | DMA_FROM_DEVICE); | ||
100 | return 0; | 186 | return 0; |
101 | } | 187 | } |
102 | 188 | ||
@@ -106,7 +192,7 @@ static int sa1100_irda_rx_alloc(struct sa1100_irda *si) | |||
106 | */ | 192 | */ |
107 | static void sa1100_irda_rx_dma_start(struct sa1100_irda *si) | 193 | static void sa1100_irda_rx_dma_start(struct sa1100_irda *si) |
108 | { | 194 | { |
109 | if (!si->rxskb) { | 195 | if (!si->dma_rx.skb) { |
110 | printk(KERN_ERR "sa1100_ir: rx buffer went missing\n"); | 196 | printk(KERN_ERR "sa1100_ir: rx buffer went missing\n"); |
111 | return; | 197 | return; |
112 | } | 198 | } |
@@ -114,254 +200,87 @@ static void sa1100_irda_rx_dma_start(struct sa1100_irda *si) | |||
114 | /* | 200 | /* |
115 | * First empty receive FIFO | 201 | * First empty receive FIFO |
116 | */ | 202 | */ |
117 | Ser2HSCR0 = si->hscr0 | HSCR0_HSSP; | 203 | Ser2HSCR0 = HSCR0_HSSP; |
118 | 204 | ||
119 | /* | 205 | /* |
120 | * Enable the DMA, receiver and receive interrupt. | 206 | * Enable the DMA, receiver and receive interrupt. |
121 | */ | 207 | */ |
122 | sa1100_clear_dma(si->rxdma); | 208 | dmaengine_terminate_all(si->dma_rx.chan); |
123 | sa1100_start_dma(si->rxdma, si->rxbuf_dma, HPSIR_MAX_RXLEN); | 209 | sa1100_irda_dma_start(&si->dma_rx, DMA_DEV_TO_MEM, NULL, NULL); |
124 | Ser2HSCR0 = si->hscr0 | HSCR0_HSSP | HSCR0_RXE; | 210 | |
211 | Ser2HSCR0 = HSCR0_HSSP | HSCR0_RXE; | ||
125 | } | 212 | } |
126 | 213 | ||
127 | /* | 214 | static void sa1100_irda_check_speed(struct sa1100_irda *si) |
128 | * Set the IrDA communications speed. | ||
129 | */ | ||
130 | static int sa1100_irda_set_speed(struct sa1100_irda *si, int speed) | ||
131 | { | 215 | { |
132 | unsigned long flags; | 216 | if (si->newspeed) { |
133 | int brd, ret = -EINVAL; | 217 | sa1100_irda_set_speed(si, si->newspeed); |
134 | 218 | si->newspeed = 0; | |
135 | switch (speed) { | ||
136 | case 9600: case 19200: case 38400: | ||
137 | case 57600: case 115200: | ||
138 | brd = 3686400 / (16 * speed) - 1; | ||
139 | |||
140 | /* | ||
141 | * Stop the receive DMA. | ||
142 | */ | ||
143 | if (IS_FIR(si)) | ||
144 | sa1100_stop_dma(si->rxdma); | ||
145 | |||
146 | local_irq_save(flags); | ||
147 | |||
148 | Ser2UTCR3 = 0; | ||
149 | Ser2HSCR0 = HSCR0_UART; | ||
150 | |||
151 | Ser2UTCR1 = brd >> 8; | ||
152 | Ser2UTCR2 = brd; | ||
153 | |||
154 | /* | ||
155 | * Clear status register | ||
156 | */ | ||
157 | Ser2UTSR0 = UTSR0_REB | UTSR0_RBB | UTSR0_RID; | ||
158 | Ser2UTCR3 = UTCR3_RIE | UTCR3_RXE | UTCR3_TXE; | ||
159 | |||
160 | if (si->pdata->set_speed) | ||
161 | si->pdata->set_speed(si->dev, speed); | ||
162 | |||
163 | si->speed = speed; | ||
164 | |||
165 | local_irq_restore(flags); | ||
166 | ret = 0; | ||
167 | break; | ||
168 | |||
169 | case 4000000: | ||
170 | local_irq_save(flags); | ||
171 | |||
172 | si->hscr0 = 0; | ||
173 | |||
174 | Ser2HSSR0 = 0xff; | ||
175 | Ser2HSCR0 = si->hscr0 | HSCR0_HSSP; | ||
176 | Ser2UTCR3 = 0; | ||
177 | |||
178 | si->speed = speed; | ||
179 | |||
180 | if (si->pdata->set_speed) | ||
181 | si->pdata->set_speed(si->dev, speed); | ||
182 | |||
183 | sa1100_irda_rx_alloc(si); | ||
184 | sa1100_irda_rx_dma_start(si); | ||
185 | |||
186 | local_irq_restore(flags); | ||
187 | |||
188 | break; | ||
189 | |||
190 | default: | ||
191 | break; | ||
192 | } | 219 | } |
193 | |||
194 | return ret; | ||
195 | } | 220 | } |
196 | 221 | ||
197 | /* | 222 | /* |
198 | * Control the power state of the IrDA transmitter. | 223 | * HP-SIR format support. |
199 | * State: | ||
200 | * 0 - off | ||
201 | * 1 - short range, lowest power | ||
202 | * 2 - medium range, medium power | ||
203 | * 3 - maximum range, high power | ||
204 | * | ||
205 | * Currently, only assabet is known to support this. | ||
206 | */ | 224 | */ |
207 | static int | 225 | static void sa1100_irda_sirtxdma_irq(void *id) |
208 | __sa1100_irda_set_power(struct sa1100_irda *si, unsigned int state) | ||
209 | { | 226 | { |
210 | int ret = 0; | 227 | struct net_device *dev = id; |
211 | if (si->pdata->set_power) | 228 | struct sa1100_irda *si = netdev_priv(dev); |
212 | ret = si->pdata->set_power(si->dev, state); | ||
213 | return ret; | ||
214 | } | ||
215 | |||
216 | static inline int | ||
217 | sa1100_set_power(struct sa1100_irda *si, unsigned int state) | ||
218 | { | ||
219 | int ret; | ||
220 | |||
221 | ret = __sa1100_irda_set_power(si, state); | ||
222 | if (ret == 0) | ||
223 | si->power = state; | ||
224 | 229 | ||
225 | return ret; | 230 | dma_unmap_sg(si->dma_tx.dev, &si->dma_tx.sg, 1, DMA_TO_DEVICE); |
226 | } | 231 | dev_kfree_skb(si->dma_tx.skb); |
232 | si->dma_tx.skb = NULL; | ||
227 | 233 | ||
228 | static int sa1100_irda_startup(struct sa1100_irda *si) | 234 | dev->stats.tx_packets++; |
229 | { | 235 | dev->stats.tx_bytes += sg_dma_len(&si->dma_tx.sg); |
230 | int ret; | ||
231 | 236 | ||
232 | /* | 237 | /* We need to ensure that the transmitter has finished. */ |
233 | * Ensure that the ports for this device are setup correctly. | 238 | do |
234 | */ | 239 | rmb(); |
235 | if (si->pdata->startup) { | 240 | while (Ser2UTSR1 & UTSR1_TBY); |
236 | ret = si->pdata->startup(si->dev); | ||
237 | if (ret) | ||
238 | return ret; | ||
239 | } | ||
240 | |||
241 | /* | ||
242 | * Configure PPC for IRDA - we want to drive TXD2 low. | ||
243 | * We also want to drive this pin low during sleep. | ||
244 | */ | ||
245 | PPSR &= ~PPC_TXD2; | ||
246 | PSDR &= ~PPC_TXD2; | ||
247 | PPDR |= PPC_TXD2; | ||
248 | |||
249 | /* | ||
250 | * Enable HP-SIR modulation, and ensure that the port is disabled. | ||
251 | */ | ||
252 | Ser2UTCR3 = 0; | ||
253 | Ser2HSCR0 = HSCR0_UART; | ||
254 | Ser2UTCR4 = si->utcr4; | ||
255 | Ser2UTCR0 = UTCR0_8BitData; | ||
256 | Ser2HSCR2 = HSCR2_TrDataH | HSCR2_RcDataL; | ||
257 | 241 | ||
258 | /* | 242 | /* |
259 | * Clear status register | 243 | * Ok, we've finished transmitting. Now enable the receiver. |
244 | * Sometimes we get a receive IRQ immediately after a transmit... | ||
260 | */ | 245 | */ |
261 | Ser2UTSR0 = UTSR0_REB | UTSR0_RBB | UTSR0_RID; | 246 | Ser2UTSR0 = UTSR0_REB | UTSR0_RBB | UTSR0_RID; |
247 | Ser2UTCR3 = UTCR3_RIE | UTCR3_RXE | UTCR3_TXE; | ||
262 | 248 | ||
263 | ret = sa1100_irda_set_speed(si, si->speed = 9600); | 249 | sa1100_irda_check_speed(si); |
264 | if (ret) { | ||
265 | Ser2UTCR3 = 0; | ||
266 | Ser2HSCR0 = 0; | ||
267 | |||
268 | if (si->pdata->shutdown) | ||
269 | si->pdata->shutdown(si->dev); | ||
270 | } | ||
271 | |||
272 | return ret; | ||
273 | } | ||
274 | |||
275 | static void sa1100_irda_shutdown(struct sa1100_irda *si) | ||
276 | { | ||
277 | /* | ||
278 | * Stop all DMA activity. | ||
279 | */ | ||
280 | sa1100_stop_dma(si->rxdma); | ||
281 | sa1100_stop_dma(si->txdma); | ||
282 | |||
283 | /* Disable the port. */ | ||
284 | Ser2UTCR3 = 0; | ||
285 | Ser2HSCR0 = 0; | ||
286 | 250 | ||
287 | if (si->pdata->shutdown) | 251 | /* I'm hungry! */ |
288 | si->pdata->shutdown(si->dev); | 252 | netif_wake_queue(dev); |
289 | } | 253 | } |
290 | 254 | ||
291 | #ifdef CONFIG_PM | 255 | static int sa1100_irda_sir_tx_start(struct sk_buff *skb, struct net_device *dev, |
292 | /* | 256 | struct sa1100_irda *si) |
293 | * Suspend the IrDA interface. | ||
294 | */ | ||
295 | static int sa1100_irda_suspend(struct platform_device *pdev, pm_message_t state) | ||
296 | { | 257 | { |
297 | struct net_device *dev = platform_get_drvdata(pdev); | 258 | si->tx_buff.data = si->tx_buff.head; |
298 | struct sa1100_irda *si; | 259 | si->tx_buff.len = async_wrap_skb(skb, si->tx_buff.data, |
299 | 260 | si->tx_buff.truesize); | |
300 | if (!dev) | 261 | |
301 | return 0; | 262 | si->dma_tx.skb = skb; |
302 | 263 | sg_set_buf(&si->dma_tx.sg, si->tx_buff.data, si->tx_buff.len); | |
303 | si = netdev_priv(dev); | 264 | if (dma_map_sg(si->dma_tx.dev, &si->dma_tx.sg, 1, DMA_TO_DEVICE) == 0) { |
304 | if (si->open) { | 265 | si->dma_tx.skb = NULL; |
305 | /* | 266 | netif_wake_queue(dev); |
306 | * Stop the transmit queue | 267 | dev->stats.tx_dropped++; |
307 | */ | 268 | return NETDEV_TX_OK; |
308 | netif_device_detach(dev); | ||
309 | disable_irq(dev->irq); | ||
310 | sa1100_irda_shutdown(si); | ||
311 | __sa1100_irda_set_power(si, 0); | ||
312 | } | 269 | } |
313 | 270 | ||
314 | return 0; | 271 | sa1100_irda_dma_start(&si->dma_tx, DMA_MEM_TO_DEV, sa1100_irda_sirtxdma_irq, dev); |
315 | } | ||
316 | |||
317 | /* | ||
318 | * Resume the IrDA interface. | ||
319 | */ | ||
320 | static int sa1100_irda_resume(struct platform_device *pdev) | ||
321 | { | ||
322 | struct net_device *dev = platform_get_drvdata(pdev); | ||
323 | struct sa1100_irda *si; | ||
324 | |||
325 | if (!dev) | ||
326 | return 0; | ||
327 | 272 | ||
328 | si = netdev_priv(dev); | 273 | /* |
329 | if (si->open) { | 274 | * The mean turn-around time is enforced by XBOF padding, |
330 | /* | 275 | * so we don't have to do anything special here. |
331 | * If we missed a speed change, initialise at the new speed | 276 | */ |
332 | * directly. It is debatable whether this is actually | 277 | Ser2UTCR3 = UTCR3_TXE; |
333 | * required, but in the interests of continuing from where | ||
334 | * we left off it is desirable. The converse argument is | ||
335 | * that we should re-negotiate at 9600 baud again. | ||
336 | */ | ||
337 | if (si->newspeed) { | ||
338 | si->speed = si->newspeed; | ||
339 | si->newspeed = 0; | ||
340 | } | ||
341 | |||
342 | sa1100_irda_startup(si); | ||
343 | __sa1100_irda_set_power(si, si->power); | ||
344 | enable_irq(dev->irq); | ||
345 | |||
346 | /* | ||
347 | * This automatically wakes up the queue | ||
348 | */ | ||
349 | netif_device_attach(dev); | ||
350 | } | ||
351 | 278 | ||
352 | return 0; | 279 | return NETDEV_TX_OK; |
353 | } | 280 | } |
354 | #else | ||
355 | #define sa1100_irda_suspend NULL | ||
356 | #define sa1100_irda_resume NULL | ||
357 | #endif | ||
358 | 281 | ||
359 | /* | 282 | static irqreturn_t sa1100_irda_sir_irq(struct net_device *dev, struct sa1100_irda *si) |
360 | * HP-SIR format interrupt service routines. | ||
361 | */ | ||
362 | static void sa1100_irda_hpsir_irq(struct net_device *dev) | ||
363 | { | 283 | { |
364 | struct sa1100_irda *si = netdev_priv(dev); | ||
365 | int status; | 284 | int status; |
366 | 285 | ||
367 | status = Ser2UTSR0; | 286 | status = Ser2UTSR0; |
@@ -414,51 +333,96 @@ static void sa1100_irda_hpsir_irq(struct net_device *dev) | |||
414 | 333 | ||
415 | } | 334 | } |
416 | 335 | ||
417 | if (status & UTSR0_TFS && si->tx_buff.len) { | 336 | return IRQ_HANDLED; |
418 | /* | 337 | } |
419 | * Transmitter FIFO is not full | ||
420 | */ | ||
421 | do { | ||
422 | Ser2UTDR = *si->tx_buff.data++; | ||
423 | si->tx_buff.len -= 1; | ||
424 | } while (Ser2UTSR1 & UTSR1_TNF && si->tx_buff.len); | ||
425 | 338 | ||
426 | if (si->tx_buff.len == 0) { | 339 | /* |
427 | dev->stats.tx_packets++; | 340 | * FIR format support. |
428 | dev->stats.tx_bytes += si->tx_buff.data - | 341 | */ |
429 | si->tx_buff.head; | 342 | static void sa1100_irda_firtxdma_irq(void *id) |
343 | { | ||
344 | struct net_device *dev = id; | ||
345 | struct sa1100_irda *si = netdev_priv(dev); | ||
346 | struct sk_buff *skb; | ||
430 | 347 | ||
431 | /* | 348 | /* |
432 | * We need to ensure that the transmitter has | 349 | * Wait for the transmission to complete. Unfortunately, |
433 | * finished. | 350 | * the hardware doesn't give us an interrupt to indicate |
434 | */ | 351 | * "end of frame". |
435 | do | 352 | */ |
436 | rmb(); | 353 | do |
437 | while (Ser2UTSR1 & UTSR1_TBY); | 354 | rmb(); |
355 | while (!(Ser2HSSR0 & HSSR0_TUR) || Ser2HSSR1 & HSSR1_TBY); | ||
438 | 356 | ||
439 | /* | 357 | /* |
440 | * Ok, we've finished transmitting. Now enable | 358 | * Clear the transmit underrun bit. |
441 | * the receiver. Sometimes we get a receive IRQ | 359 | */ |
442 | * immediately after a transmit... | 360 | Ser2HSSR0 = HSSR0_TUR; |
443 | */ | ||
444 | Ser2UTSR0 = UTSR0_REB | UTSR0_RBB | UTSR0_RID; | ||
445 | Ser2UTCR3 = UTCR3_RIE | UTCR3_RXE | UTCR3_TXE; | ||
446 | 361 | ||
447 | if (si->newspeed) { | 362 | /* |
448 | sa1100_irda_set_speed(si, si->newspeed); | 363 | * Do we need to change speed? Note that we're lazy |
449 | si->newspeed = 0; | 364 | * here - we don't free the old dma_rx.skb. We don't need |
450 | } | 365 | * to allocate a buffer either. |
366 | */ | ||
367 | sa1100_irda_check_speed(si); | ||
451 | 368 | ||
452 | /* I'm hungry! */ | 369 | /* |
453 | netif_wake_queue(dev); | 370 | * Start reception. This disables the transmitter for |
454 | } | 371 | * us. This will be using the existing RX buffer. |
372 | */ | ||
373 | sa1100_irda_rx_dma_start(si); | ||
374 | |||
375 | /* Account and free the packet. */ | ||
376 | skb = si->dma_tx.skb; | ||
377 | if (skb) { | ||
378 | dma_unmap_sg(si->dma_tx.dev, &si->dma_tx.sg, 1, | ||
379 | DMA_TO_DEVICE); | ||
380 | dev->stats.tx_packets ++; | ||
381 | dev->stats.tx_bytes += skb->len; | ||
382 | dev_kfree_skb_irq(skb); | ||
383 | si->dma_tx.skb = NULL; | ||
455 | } | 384 | } |
385 | |||
386 | /* | ||
387 | * Make sure that the TX queue is available for sending | ||
388 | * (for retries). TX has priority over RX at all times. | ||
389 | */ | ||
390 | netif_wake_queue(dev); | ||
391 | } | ||
392 | |||
393 | static int sa1100_irda_fir_tx_start(struct sk_buff *skb, struct net_device *dev, | ||
394 | struct sa1100_irda *si) | ||
395 | { | ||
396 | int mtt = irda_get_mtt(skb); | ||
397 | |||
398 | si->dma_tx.skb = skb; | ||
399 | sg_set_buf(&si->dma_tx.sg, skb->data, skb->len); | ||
400 | if (dma_map_sg(si->dma_tx.dev, &si->dma_tx.sg, 1, DMA_TO_DEVICE) == 0) { | ||
401 | si->dma_tx.skb = NULL; | ||
402 | netif_wake_queue(dev); | ||
403 | dev->stats.tx_dropped++; | ||
404 | dev_kfree_skb(skb); | ||
405 | return NETDEV_TX_OK; | ||
406 | } | ||
407 | |||
408 | sa1100_irda_dma_start(&si->dma_tx, DMA_MEM_TO_DEV, sa1100_irda_firtxdma_irq, dev); | ||
409 | |||
410 | /* | ||
411 | * If we have a mean turn-around time, impose the specified | ||
412 | * specified delay. We could shorten this by timing from | ||
413 | * the point we received the packet. | ||
414 | */ | ||
415 | if (mtt) | ||
416 | udelay(mtt); | ||
417 | |||
418 | Ser2HSCR0 = HSCR0_HSSP | HSCR0_TXE; | ||
419 | |||
420 | return NETDEV_TX_OK; | ||
456 | } | 421 | } |
457 | 422 | ||
458 | static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev) | 423 | static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev) |
459 | { | 424 | { |
460 | struct sk_buff *skb = si->rxskb; | 425 | struct sk_buff *skb = si->dma_rx.skb; |
461 | dma_addr_t dma_addr; | ||
462 | unsigned int len, stat, data; | 426 | unsigned int len, stat, data; |
463 | 427 | ||
464 | if (!skb) { | 428 | if (!skb) { |
@@ -469,11 +433,10 @@ static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev | |||
469 | /* | 433 | /* |
470 | * Get the current data position. | 434 | * Get the current data position. |
471 | */ | 435 | */ |
472 | dma_addr = sa1100_get_dma_pos(si->rxdma); | 436 | len = sa1100_irda_dma_xferred(&si->dma_rx); |
473 | len = dma_addr - si->rxbuf_dma; | ||
474 | if (len > HPSIR_MAX_RXLEN) | 437 | if (len > HPSIR_MAX_RXLEN) |
475 | len = HPSIR_MAX_RXLEN; | 438 | len = HPSIR_MAX_RXLEN; |
476 | dma_unmap_single(si->dev, si->rxbuf_dma, len, DMA_FROM_DEVICE); | 439 | dma_unmap_sg(si->dma_rx.dev, &si->dma_rx.sg, 1, DMA_FROM_DEVICE); |
477 | 440 | ||
478 | do { | 441 | do { |
479 | /* | 442 | /* |
@@ -501,7 +464,7 @@ static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev | |||
501 | } while (Ser2HSSR0 & HSSR0_EIF); | 464 | } while (Ser2HSSR0 & HSSR0_EIF); |
502 | 465 | ||
503 | if (stat & HSSR1_EOF) { | 466 | if (stat & HSSR1_EOF) { |
504 | si->rxskb = NULL; | 467 | si->dma_rx.skb = NULL; |
505 | 468 | ||
506 | skb_put(skb, len); | 469 | skb_put(skb, len); |
507 | skb->dev = dev; | 470 | skb->dev = dev; |
@@ -518,28 +481,23 @@ static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev | |||
518 | netif_rx(skb); | 481 | netif_rx(skb); |
519 | } else { | 482 | } else { |
520 | /* | 483 | /* |
521 | * Remap the buffer. | 484 | * Remap the buffer - it was previously mapped, and we |
485 | * hope that this succeeds. | ||
522 | */ | 486 | */ |
523 | si->rxbuf_dma = dma_map_single(si->dev, si->rxskb->data, | 487 | dma_map_sg(si->dma_rx.dev, &si->dma_rx.sg, 1, DMA_FROM_DEVICE); |
524 | HPSIR_MAX_RXLEN, | ||
525 | DMA_FROM_DEVICE); | ||
526 | } | 488 | } |
527 | } | 489 | } |
528 | 490 | ||
529 | /* | 491 | /* |
530 | * FIR format interrupt service routine. We only have to | 492 | * We only have to handle RX events here; transmit events go via the TX |
531 | * handle RX events; transmit events go via the TX DMA handler. | 493 | * DMA handler. We disable RX, process, and the restart RX. |
532 | * | ||
533 | * No matter what, we disable RX, process, and the restart RX. | ||
534 | */ | 494 | */ |
535 | static void sa1100_irda_fir_irq(struct net_device *dev) | 495 | static irqreturn_t sa1100_irda_fir_irq(struct net_device *dev, struct sa1100_irda *si) |
536 | { | 496 | { |
537 | struct sa1100_irda *si = netdev_priv(dev); | ||
538 | |||
539 | /* | 497 | /* |
540 | * Stop RX DMA | 498 | * Stop RX DMA |
541 | */ | 499 | */ |
542 | sa1100_stop_dma(si->rxdma); | 500 | dmaengine_pause(si->dma_rx.chan); |
543 | 501 | ||
544 | /* | 502 | /* |
545 | * Framing error - we throw away the packet completely. | 503 | * Framing error - we throw away the packet completely. |
@@ -555,7 +513,7 @@ static void sa1100_irda_fir_irq(struct net_device *dev) | |||
555 | /* | 513 | /* |
556 | * Clear out the DMA... | 514 | * Clear out the DMA... |
557 | */ | 515 | */ |
558 | Ser2HSCR0 = si->hscr0 | HSCR0_HSSP; | 516 | Ser2HSCR0 = HSCR0_HSSP; |
559 | 517 | ||
560 | /* | 518 | /* |
561 | * Clear selected status bits now, so we | 519 | * Clear selected status bits now, so we |
@@ -577,74 +535,124 @@ static void sa1100_irda_fir_irq(struct net_device *dev) | |||
577 | * No matter what happens, we must restart reception. | 535 | * No matter what happens, we must restart reception. |
578 | */ | 536 | */ |
579 | sa1100_irda_rx_dma_start(si); | 537 | sa1100_irda_rx_dma_start(si); |
580 | } | ||
581 | 538 | ||
582 | static irqreturn_t sa1100_irda_irq(int irq, void *dev_id) | ||
583 | { | ||
584 | struct net_device *dev = dev_id; | ||
585 | if (IS_FIR(((struct sa1100_irda *)netdev_priv(dev)))) | ||
586 | sa1100_irda_fir_irq(dev); | ||
587 | else | ||
588 | sa1100_irda_hpsir_irq(dev); | ||
589 | return IRQ_HANDLED; | 539 | return IRQ_HANDLED; |
590 | } | 540 | } |
591 | 541 | ||
592 | /* | 542 | /* |
593 | * TX DMA completion handler. | 543 | * Set the IrDA communications speed. |
594 | */ | 544 | */ |
595 | static void sa1100_irda_txdma_irq(void *id) | 545 | static int sa1100_irda_set_speed(struct sa1100_irda *si, int speed) |
596 | { | 546 | { |
597 | struct net_device *dev = id; | 547 | unsigned long flags; |
598 | struct sa1100_irda *si = netdev_priv(dev); | 548 | int brd, ret = -EINVAL; |
599 | struct sk_buff *skb = si->txskb; | ||
600 | 549 | ||
601 | si->txskb = NULL; | 550 | switch (speed) { |
551 | case 9600: case 19200: case 38400: | ||
552 | case 57600: case 115200: | ||
553 | brd = 3686400 / (16 * speed) - 1; | ||
602 | 554 | ||
603 | /* | 555 | /* Stop the receive DMA, and configure transmit. */ |
604 | * Wait for the transmission to complete. Unfortunately, | 556 | if (IS_FIR(si)) { |
605 | * the hardware doesn't give us an interrupt to indicate | 557 | dmaengine_terminate_all(si->dma_rx.chan); |
606 | * "end of frame". | 558 | dmaengine_slave_config(si->dma_tx.chan, |
607 | */ | 559 | &sa1100_irda_sir_tx); |
608 | do | 560 | } |
609 | rmb(); | ||
610 | while (!(Ser2HSSR0 & HSSR0_TUR) || Ser2HSSR1 & HSSR1_TBY); | ||
611 | 561 | ||
612 | /* | 562 | local_irq_save(flags); |
613 | * Clear the transmit underrun bit. | ||
614 | */ | ||
615 | Ser2HSSR0 = HSSR0_TUR; | ||
616 | 563 | ||
617 | /* | 564 | Ser2UTCR3 = 0; |
618 | * Do we need to change speed? Note that we're lazy | 565 | Ser2HSCR0 = HSCR0_UART; |
619 | * here - we don't free the old rxskb. We don't need | ||
620 | * to allocate a buffer either. | ||
621 | */ | ||
622 | if (si->newspeed) { | ||
623 | sa1100_irda_set_speed(si, si->newspeed); | ||
624 | si->newspeed = 0; | ||
625 | } | ||
626 | 566 | ||
627 | /* | 567 | Ser2UTCR1 = brd >> 8; |
628 | * Start reception. This disables the transmitter for | 568 | Ser2UTCR2 = brd; |
629 | * us. This will be using the existing RX buffer. | ||
630 | */ | ||
631 | sa1100_irda_rx_dma_start(si); | ||
632 | 569 | ||
633 | /* | 570 | /* |
634 | * Account and free the packet. | 571 | * Clear status register |
635 | */ | 572 | */ |
636 | if (skb) { | 573 | Ser2UTSR0 = UTSR0_REB | UTSR0_RBB | UTSR0_RID; |
637 | dma_unmap_single(si->dev, si->txbuf_dma, skb->len, DMA_TO_DEVICE); | 574 | Ser2UTCR3 = UTCR3_RIE | UTCR3_RXE | UTCR3_TXE; |
638 | dev->stats.tx_packets ++; | 575 | |
639 | dev->stats.tx_bytes += skb->len; | 576 | if (si->pdata->set_speed) |
640 | dev_kfree_skb_irq(skb); | 577 | si->pdata->set_speed(si->dev, speed); |
578 | |||
579 | si->speed = speed; | ||
580 | si->tx_start = sa1100_irda_sir_tx_start; | ||
581 | si->irq = sa1100_irda_sir_irq; | ||
582 | |||
583 | local_irq_restore(flags); | ||
584 | ret = 0; | ||
585 | break; | ||
586 | |||
587 | case 4000000: | ||
588 | if (!IS_FIR(si)) | ||
589 | dmaengine_slave_config(si->dma_tx.chan, | ||
590 | &sa1100_irda_fir_tx); | ||
591 | |||
592 | local_irq_save(flags); | ||
593 | |||
594 | Ser2HSSR0 = 0xff; | ||
595 | Ser2HSCR0 = HSCR0_HSSP; | ||
596 | Ser2UTCR3 = 0; | ||
597 | |||
598 | si->speed = speed; | ||
599 | si->tx_start = sa1100_irda_fir_tx_start; | ||
600 | si->irq = sa1100_irda_fir_irq; | ||
601 | |||
602 | if (si->pdata->set_speed) | ||
603 | si->pdata->set_speed(si->dev, speed); | ||
604 | |||
605 | sa1100_irda_rx_alloc(si); | ||
606 | sa1100_irda_rx_dma_start(si); | ||
607 | |||
608 | local_irq_restore(flags); | ||
609 | |||
610 | break; | ||
611 | |||
612 | default: | ||
613 | break; | ||
641 | } | 614 | } |
642 | 615 | ||
643 | /* | 616 | return ret; |
644 | * Make sure that the TX queue is available for sending | 617 | } |
645 | * (for retries). TX has priority over RX at all times. | 618 | |
646 | */ | 619 | /* |
647 | netif_wake_queue(dev); | 620 | * Control the power state of the IrDA transmitter. |
621 | * State: | ||
622 | * 0 - off | ||
623 | * 1 - short range, lowest power | ||
624 | * 2 - medium range, medium power | ||
625 | * 3 - maximum range, high power | ||
626 | * | ||
627 | * Currently, only assabet is known to support this. | ||
628 | */ | ||
629 | static int | ||
630 | __sa1100_irda_set_power(struct sa1100_irda *si, unsigned int state) | ||
631 | { | ||
632 | int ret = 0; | ||
633 | if (si->pdata->set_power) | ||
634 | ret = si->pdata->set_power(si->dev, state); | ||
635 | return ret; | ||
636 | } | ||
637 | |||
638 | static inline int | ||
639 | sa1100_set_power(struct sa1100_irda *si, unsigned int state) | ||
640 | { | ||
641 | int ret; | ||
642 | |||
643 | ret = __sa1100_irda_set_power(si, state); | ||
644 | if (ret == 0) | ||
645 | si->power = state; | ||
646 | |||
647 | return ret; | ||
648 | } | ||
649 | |||
650 | static irqreturn_t sa1100_irda_irq(int irq, void *dev_id) | ||
651 | { | ||
652 | struct net_device *dev = dev_id; | ||
653 | struct sa1100_irda *si = netdev_priv(dev); | ||
654 | |||
655 | return si->irq(dev, si); | ||
648 | } | 656 | } |
649 | 657 | ||
650 | static int sa1100_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev) | 658 | static int sa1100_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev) |
@@ -660,62 +668,19 @@ static int sa1100_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev) | |||
660 | if (speed != si->speed && speed != -1) | 668 | if (speed != si->speed && speed != -1) |
661 | si->newspeed = speed; | 669 | si->newspeed = speed; |
662 | 670 | ||
663 | /* | 671 | /* If this is an empty frame, we can bypass a lot. */ |
664 | * If this is an empty frame, we can bypass a lot. | ||
665 | */ | ||
666 | if (skb->len == 0) { | 672 | if (skb->len == 0) { |
667 | if (si->newspeed) { | 673 | sa1100_irda_check_speed(si); |
668 | si->newspeed = 0; | ||
669 | sa1100_irda_set_speed(si, speed); | ||
670 | } | ||
671 | dev_kfree_skb(skb); | 674 | dev_kfree_skb(skb); |
672 | return NETDEV_TX_OK; | 675 | return NETDEV_TX_OK; |
673 | } | 676 | } |
674 | 677 | ||
675 | if (!IS_FIR(si)) { | 678 | netif_stop_queue(dev); |
676 | netif_stop_queue(dev); | ||
677 | |||
678 | si->tx_buff.data = si->tx_buff.head; | ||
679 | si->tx_buff.len = async_wrap_skb(skb, si->tx_buff.data, | ||
680 | si->tx_buff.truesize); | ||
681 | |||
682 | /* | ||
683 | * Set the transmit interrupt enable. This will fire | ||
684 | * off an interrupt immediately. Note that we disable | ||
685 | * the receiver so we won't get spurious characteres | ||
686 | * received. | ||
687 | */ | ||
688 | Ser2UTCR3 = UTCR3_TIE | UTCR3_TXE; | ||
689 | |||
690 | dev_kfree_skb(skb); | ||
691 | } else { | ||
692 | int mtt = irda_get_mtt(skb); | ||
693 | |||
694 | /* | ||
695 | * We must not be transmitting... | ||
696 | */ | ||
697 | BUG_ON(si->txskb); | ||
698 | |||
699 | netif_stop_queue(dev); | ||
700 | |||
701 | si->txskb = skb; | ||
702 | si->txbuf_dma = dma_map_single(si->dev, skb->data, | ||
703 | skb->len, DMA_TO_DEVICE); | ||
704 | |||
705 | sa1100_start_dma(si->txdma, si->txbuf_dma, skb->len); | ||
706 | |||
707 | /* | ||
708 | * If we have a mean turn-around time, impose the specified | ||
709 | * specified delay. We could shorten this by timing from | ||
710 | * the point we received the packet. | ||
711 | */ | ||
712 | if (mtt) | ||
713 | udelay(mtt); | ||
714 | 679 | ||
715 | Ser2HSCR0 = si->hscr0 | HSCR0_HSSP | HSCR0_TXE; | 680 | /* We must not already have a skb to transmit... */ |
716 | } | 681 | BUG_ON(si->dma_tx.skb); |
717 | 682 | ||
718 | return NETDEV_TX_OK; | 683 | return si->tx_start(skb, dev, si); |
719 | } | 684 | } |
720 | 685 | ||
721 | static int | 686 | static int |
@@ -762,6 +727,69 @@ sa1100_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd) | |||
762 | return ret; | 727 | return ret; |
763 | } | 728 | } |
764 | 729 | ||
730 | static int sa1100_irda_startup(struct sa1100_irda *si) | ||
731 | { | ||
732 | int ret; | ||
733 | |||
734 | /* | ||
735 | * Ensure that the ports for this device are setup correctly. | ||
736 | */ | ||
737 | if (si->pdata->startup) { | ||
738 | ret = si->pdata->startup(si->dev); | ||
739 | if (ret) | ||
740 | return ret; | ||
741 | } | ||
742 | |||
743 | /* | ||
744 | * Configure PPC for IRDA - we want to drive TXD2 low. | ||
745 | * We also want to drive this pin low during sleep. | ||
746 | */ | ||
747 | PPSR &= ~PPC_TXD2; | ||
748 | PSDR &= ~PPC_TXD2; | ||
749 | PPDR |= PPC_TXD2; | ||
750 | |||
751 | /* | ||
752 | * Enable HP-SIR modulation, and ensure that the port is disabled. | ||
753 | */ | ||
754 | Ser2UTCR3 = 0; | ||
755 | Ser2HSCR0 = HSCR0_UART; | ||
756 | Ser2UTCR4 = si->utcr4; | ||
757 | Ser2UTCR0 = UTCR0_8BitData; | ||
758 | Ser2HSCR2 = HSCR2_TrDataH | HSCR2_RcDataL; | ||
759 | |||
760 | /* | ||
761 | * Clear status register | ||
762 | */ | ||
763 | Ser2UTSR0 = UTSR0_REB | UTSR0_RBB | UTSR0_RID; | ||
764 | |||
765 | ret = sa1100_irda_set_speed(si, si->speed = 9600); | ||
766 | if (ret) { | ||
767 | Ser2UTCR3 = 0; | ||
768 | Ser2HSCR0 = 0; | ||
769 | |||
770 | if (si->pdata->shutdown) | ||
771 | si->pdata->shutdown(si->dev); | ||
772 | } | ||
773 | |||
774 | return ret; | ||
775 | } | ||
776 | |||
777 | static void sa1100_irda_shutdown(struct sa1100_irda *si) | ||
778 | { | ||
779 | /* | ||
780 | * Stop all DMA activity. | ||
781 | */ | ||
782 | dmaengine_terminate_all(si->dma_rx.chan); | ||
783 | dmaengine_terminate_all(si->dma_tx.chan); | ||
784 | |||
785 | /* Disable the port. */ | ||
786 | Ser2UTCR3 = 0; | ||
787 | Ser2HSCR0 = 0; | ||
788 | |||
789 | if (si->pdata->shutdown) | ||
790 | si->pdata->shutdown(si->dev); | ||
791 | } | ||
792 | |||
765 | static int sa1100_irda_start(struct net_device *dev) | 793 | static int sa1100_irda_start(struct net_device *dev) |
766 | { | 794 | { |
767 | struct sa1100_irda *si = netdev_priv(dev); | 795 | struct sa1100_irda *si = netdev_priv(dev); |
@@ -769,26 +797,17 @@ static int sa1100_irda_start(struct net_device *dev) | |||
769 | 797 | ||
770 | si->speed = 9600; | 798 | si->speed = 9600; |
771 | 799 | ||
772 | err = request_irq(dev->irq, sa1100_irda_irq, 0, dev->name, dev); | 800 | err = sa1100_irda_dma_request(si->dev, &si->dma_rx, "Ser2ICPRc", |
773 | if (err) | 801 | &sa1100_irda_fir_rx); |
774 | goto err_irq; | ||
775 | |||
776 | err = sa1100_request_dma(DMA_Ser2HSSPRd, "IrDA receive", | ||
777 | NULL, NULL, &si->rxdma); | ||
778 | if (err) | 802 | if (err) |
779 | goto err_rx_dma; | 803 | goto err_rx_dma; |
780 | 804 | ||
781 | err = sa1100_request_dma(DMA_Ser2HSSPWr, "IrDA transmit", | 805 | err = sa1100_irda_dma_request(si->dev, &si->dma_tx, "Ser2ICPTr", |
782 | sa1100_irda_txdma_irq, dev, &si->txdma); | 806 | &sa1100_irda_sir_tx); |
783 | if (err) | 807 | if (err) |
784 | goto err_tx_dma; | 808 | goto err_tx_dma; |
785 | 809 | ||
786 | /* | 810 | /* |
787 | * The interrupt must remain disabled for now. | ||
788 | */ | ||
789 | disable_irq(dev->irq); | ||
790 | |||
791 | /* | ||
792 | * Setup the serial port for the specified speed. | 811 | * Setup the serial port for the specified speed. |
793 | */ | 812 | */ |
794 | err = sa1100_irda_startup(si); | 813 | err = sa1100_irda_startup(si); |
@@ -803,44 +822,60 @@ static int sa1100_irda_start(struct net_device *dev) | |||
803 | if (!si->irlap) | 822 | if (!si->irlap) |
804 | goto err_irlap; | 823 | goto err_irlap; |
805 | 824 | ||
825 | err = request_irq(dev->irq, sa1100_irda_irq, 0, dev->name, dev); | ||
826 | if (err) | ||
827 | goto err_irq; | ||
828 | |||
806 | /* | 829 | /* |
807 | * Now enable the interrupt and start the queue | 830 | * Now enable the interrupt and start the queue |
808 | */ | 831 | */ |
809 | si->open = 1; | 832 | si->open = 1; |
810 | sa1100_set_power(si, power_level); /* low power mode */ | 833 | sa1100_set_power(si, power_level); /* low power mode */ |
811 | enable_irq(dev->irq); | 834 | |
812 | netif_start_queue(dev); | 835 | netif_start_queue(dev); |
813 | return 0; | 836 | return 0; |
814 | 837 | ||
838 | err_irq: | ||
839 | irlap_close(si->irlap); | ||
815 | err_irlap: | 840 | err_irlap: |
816 | si->open = 0; | 841 | si->open = 0; |
817 | sa1100_irda_shutdown(si); | 842 | sa1100_irda_shutdown(si); |
818 | err_startup: | 843 | err_startup: |
819 | sa1100_free_dma(si->txdma); | 844 | dma_release_channel(si->dma_tx.chan); |
820 | err_tx_dma: | 845 | err_tx_dma: |
821 | sa1100_free_dma(si->rxdma); | 846 | dma_release_channel(si->dma_rx.chan); |
822 | err_rx_dma: | 847 | err_rx_dma: |
823 | free_irq(dev->irq, dev); | ||
824 | err_irq: | ||
825 | return err; | 848 | return err; |
826 | } | 849 | } |
827 | 850 | ||
828 | static int sa1100_irda_stop(struct net_device *dev) | 851 | static int sa1100_irda_stop(struct net_device *dev) |
829 | { | 852 | { |
830 | struct sa1100_irda *si = netdev_priv(dev); | 853 | struct sa1100_irda *si = netdev_priv(dev); |
854 | struct sk_buff *skb; | ||
855 | |||
856 | netif_stop_queue(dev); | ||
831 | 857 | ||
832 | disable_irq(dev->irq); | 858 | si->open = 0; |
833 | sa1100_irda_shutdown(si); | 859 | sa1100_irda_shutdown(si); |
834 | 860 | ||
835 | /* | 861 | /* |
836 | * If we have been doing DMA receive, make sure we | 862 | * If we have been doing any DMA activity, make sure we |
837 | * tidy that up cleanly. | 863 | * tidy that up cleanly. |
838 | */ | 864 | */ |
839 | if (si->rxskb) { | 865 | skb = si->dma_rx.skb; |
840 | dma_unmap_single(si->dev, si->rxbuf_dma, HPSIR_MAX_RXLEN, | 866 | if (skb) { |
841 | DMA_FROM_DEVICE); | 867 | dma_unmap_sg(si->dma_rx.dev, &si->dma_rx.sg, 1, |
842 | dev_kfree_skb(si->rxskb); | 868 | DMA_FROM_DEVICE); |
843 | si->rxskb = NULL; | 869 | dev_kfree_skb(skb); |
870 | si->dma_rx.skb = NULL; | ||
871 | } | ||
872 | |||
873 | skb = si->dma_tx.skb; | ||
874 | if (skb) { | ||
875 | dma_unmap_sg(si->dma_tx.dev, &si->dma_tx.sg, 1, | ||
876 | DMA_TO_DEVICE); | ||
877 | dev_kfree_skb(skb); | ||
878 | si->dma_tx.skb = NULL; | ||
844 | } | 879 | } |
845 | 880 | ||
846 | /* Stop IrLAP */ | 881 | /* Stop IrLAP */ |
@@ -849,14 +884,11 @@ static int sa1100_irda_stop(struct net_device *dev) | |||
849 | si->irlap = NULL; | 884 | si->irlap = NULL; |
850 | } | 885 | } |
851 | 886 | ||
852 | netif_stop_queue(dev); | ||
853 | si->open = 0; | ||
854 | |||
855 | /* | 887 | /* |
856 | * Free resources | 888 | * Free resources |
857 | */ | 889 | */ |
858 | sa1100_free_dma(si->txdma); | 890 | dma_release_channel(si->dma_tx.chan); |
859 | sa1100_free_dma(si->rxdma); | 891 | dma_release_channel(si->dma_rx.chan); |
860 | free_irq(dev->irq, dev); | 892 | free_irq(dev->irq, dev); |
861 | 893 | ||
862 | sa1100_set_power(si, 0); | 894 | sa1100_set_power(si, 0); |
@@ -888,11 +920,15 @@ static int sa1100_irda_probe(struct platform_device *pdev) | |||
888 | struct net_device *dev; | 920 | struct net_device *dev; |
889 | struct sa1100_irda *si; | 921 | struct sa1100_irda *si; |
890 | unsigned int baudrate_mask; | 922 | unsigned int baudrate_mask; |
891 | int err; | 923 | int err, irq; |
892 | 924 | ||
893 | if (!pdev->dev.platform_data) | 925 | if (!pdev->dev.platform_data) |
894 | return -EINVAL; | 926 | return -EINVAL; |
895 | 927 | ||
928 | irq = platform_get_irq(pdev, 0); | ||
929 | if (irq <= 0) | ||
930 | return irq < 0 ? irq : -ENXIO; | ||
931 | |||
896 | err = request_mem_region(__PREG(Ser2UTCR0), 0x24, "IrDA") ? 0 : -EBUSY; | 932 | err = request_mem_region(__PREG(Ser2UTCR0), 0x24, "IrDA") ? 0 : -EBUSY; |
897 | if (err) | 933 | if (err) |
898 | goto err_mem_1; | 934 | goto err_mem_1; |
@@ -907,22 +943,27 @@ static int sa1100_irda_probe(struct platform_device *pdev) | |||
907 | if (!dev) | 943 | if (!dev) |
908 | goto err_mem_4; | 944 | goto err_mem_4; |
909 | 945 | ||
946 | SET_NETDEV_DEV(dev, &pdev->dev); | ||
947 | |||
910 | si = netdev_priv(dev); | 948 | si = netdev_priv(dev); |
911 | si->dev = &pdev->dev; | 949 | si->dev = &pdev->dev; |
912 | si->pdata = pdev->dev.platform_data; | 950 | si->pdata = pdev->dev.platform_data; |
913 | 951 | ||
952 | sg_init_table(&si->dma_rx.sg, 1); | ||
953 | sg_init_table(&si->dma_tx.sg, 1); | ||
954 | |||
914 | /* | 955 | /* |
915 | * Initialise the HP-SIR buffers | 956 | * Initialise the HP-SIR buffers |
916 | */ | 957 | */ |
917 | err = sa1100_irda_init_iobuf(&si->rx_buff, 14384); | 958 | err = sa1100_irda_init_iobuf(&si->rx_buff, 14384); |
918 | if (err) | 959 | if (err) |
919 | goto err_mem_5; | 960 | goto err_mem_5; |
920 | err = sa1100_irda_init_iobuf(&si->tx_buff, 4000); | 961 | err = sa1100_irda_init_iobuf(&si->tx_buff, IRDA_SIR_MAX_FRAME); |
921 | if (err) | 962 | if (err) |
922 | goto err_mem_5; | 963 | goto err_mem_5; |
923 | 964 | ||
924 | dev->netdev_ops = &sa1100_irda_netdev_ops; | 965 | dev->netdev_ops = &sa1100_irda_netdev_ops; |
925 | dev->irq = IRQ_Ser2ICP; | 966 | dev->irq = irq; |
926 | 967 | ||
927 | irda_init_max_qos_capabilies(&si->qos); | 968 | irda_init_max_qos_capabilies(&si->qos); |
928 | 969 | ||
@@ -996,6 +1037,74 @@ static int sa1100_irda_remove(struct platform_device *pdev) | |||
996 | return 0; | 1037 | return 0; |
997 | } | 1038 | } |
998 | 1039 | ||
1040 | #ifdef CONFIG_PM | ||
1041 | /* | ||
1042 | * Suspend the IrDA interface. | ||
1043 | */ | ||
1044 | static int sa1100_irda_suspend(struct platform_device *pdev, pm_message_t state) | ||
1045 | { | ||
1046 | struct net_device *dev = platform_get_drvdata(pdev); | ||
1047 | struct sa1100_irda *si; | ||
1048 | |||
1049 | if (!dev) | ||
1050 | return 0; | ||
1051 | |||
1052 | si = netdev_priv(dev); | ||
1053 | if (si->open) { | ||
1054 | /* | ||
1055 | * Stop the transmit queue | ||
1056 | */ | ||
1057 | netif_device_detach(dev); | ||
1058 | disable_irq(dev->irq); | ||
1059 | sa1100_irda_shutdown(si); | ||
1060 | __sa1100_irda_set_power(si, 0); | ||
1061 | } | ||
1062 | |||
1063 | return 0; | ||
1064 | } | ||
1065 | |||
1066 | /* | ||
1067 | * Resume the IrDA interface. | ||
1068 | */ | ||
1069 | static int sa1100_irda_resume(struct platform_device *pdev) | ||
1070 | { | ||
1071 | struct net_device *dev = platform_get_drvdata(pdev); | ||
1072 | struct sa1100_irda *si; | ||
1073 | |||
1074 | if (!dev) | ||
1075 | return 0; | ||
1076 | |||
1077 | si = netdev_priv(dev); | ||
1078 | if (si->open) { | ||
1079 | /* | ||
1080 | * If we missed a speed change, initialise at the new speed | ||
1081 | * directly. It is debatable whether this is actually | ||
1082 | * required, but in the interests of continuing from where | ||
1083 | * we left off it is desirable. The converse argument is | ||
1084 | * that we should re-negotiate at 9600 baud again. | ||
1085 | */ | ||
1086 | if (si->newspeed) { | ||
1087 | si->speed = si->newspeed; | ||
1088 | si->newspeed = 0; | ||
1089 | } | ||
1090 | |||
1091 | sa1100_irda_startup(si); | ||
1092 | __sa1100_irda_set_power(si, si->power); | ||
1093 | enable_irq(dev->irq); | ||
1094 | |||
1095 | /* | ||
1096 | * This automatically wakes up the queue | ||
1097 | */ | ||
1098 | netif_device_attach(dev); | ||
1099 | } | ||
1100 | |||
1101 | return 0; | ||
1102 | } | ||
1103 | #else | ||
1104 | #define sa1100_irda_suspend NULL | ||
1105 | #define sa1100_irda_resume NULL | ||
1106 | #endif | ||
1107 | |||
999 | static struct platform_driver sa1100ir_driver = { | 1108 | static struct platform_driver sa1100ir_driver = { |
1000 | .probe = sa1100_irda_probe, | 1109 | .probe = sa1100_irda_probe, |
1001 | .remove = sa1100_irda_remove, | 1110 | .remove = sa1100_irda_remove, |
diff --git a/include/linux/sa11x0-dma.h b/include/linux/sa11x0-dma.h new file mode 100644 index 000000000000..65839a58b8e5 --- /dev/null +++ b/include/linux/sa11x0-dma.h | |||
@@ -0,0 +1,24 @@ | |||
1 | /* | ||
2 | * SA11x0 DMA Engine support | ||
3 | * | ||
4 | * Copyright (C) 2012 Russell King | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | #ifndef __LINUX_SA11X0_DMA_H | ||
11 | #define __LINUX_SA11X0_DMA_H | ||
12 | |||
13 | struct dma_chan; | ||
14 | |||
15 | #if defined(CONFIG_DMA_SA11X0) || defined(CONFIG_DMA_SA11X0_MODULE) | ||
16 | bool sa11x0_dma_filter_fn(struct dma_chan *, void *); | ||
17 | #else | ||
18 | static inline bool sa11x0_dma_filter_fn(struct dma_chan *c, void *d) | ||
19 | { | ||
20 | return false; | ||
21 | } | ||
22 | #endif | ||
23 | |||
24 | #endif | ||