aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-27 21:17:02 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-27 21:17:02 -0400
commit9e4db1c3eed55c22328d8022c2c80adb3093833f (patch)
tree9643545e6bd182f1d3e19942f590a6a1e3198320 /drivers
parentde8856d2c11f562c60ed9340a83db4a4f829a6e6 (diff)
parentaae528d9a8ad79d4b21b1b723abc9447fdb0d200 (diff)
Merge branch 'platforms' of git://git.linaro.org/people/rmk/linux-arm
Pull ARM platform updates from Russell King: "This covers platform stuff for platforms I have a direct interest in (iow, I have the hardware). Essentially: - as we no longer support any other Acorn platforms other than RiscPC anymore, we can collect all that code into mach-rpc. - convert Acorn expansion card stuff to use IRQ allocation functions, and get rid of NO_IRQ from there. - cleanups to the ebsa110 platform to move some private stuff out of its header files. - large amount of SA11x0 updates: - conversion of private DMA implementation to DMA engine support (this actually gives us greater flexibility in drivers over the old API.) - re-worked ucb1x00 updates - convert to genirq, remove sa11x0 dependencies, fix various minor issues - move platform specific sa11x0 framebuffer data into platform files in arch/arm instead of keeping this in the driver itself - update sa11x0 IrDA driver for DMA engine, and allow it to use DMA for SIR transmissions as well as FIR - rework sa1111 support for genirq, and irq allocation - fix sa1111 IRQ support so it works again - use sparse IRQ support After this, I have one more pull request remaining from my current set, which I think is going to be the most problematical as it generates 8 conflicts." Fixed up the trivial conflict in arch/arm/mach-rpc/Makefile as per Russell. * 'platforms' of git://git.linaro.org/people/rmk/linux-arm: (125 commits) ARM: 7343/1: sa11x0: convert to sparse IRQ ARM: 7342/2: sa1100: prepare for sparse irq conversion ARM: 7341/1: input: prepare jornada720 keyboard and ts for sa11x0 sparse irq ARM: 7340/1: rtc: sa1100: include mach/irqs.h instead of asm/irq.h ARM: sa11x0: remove unused DMA controller definitions ARM: sa11x0: remove old SoC private DMA driver USB: sa1111: add hcd .reset method USB: sa1111: add OHCI shutdown methods USB: sa1111: reorganize ohci-sa1111.c USB: sa1111: get rid of nasty printk(KERN_DEBUG "%s: ...", __FILE__) USB: sa1111: sparse and checkpatch cleanups ARM: sa11x0: don't static map sa1111 ARM: sa1111: use dev_err() rather than printk() ARM: sa1111: cleanup sub-device registration and unregistration ARM: sa1111: only setup DMA for DMA capable devices ARM: sa1111: register sa1111 devices with dmabounce in bus notifier ARM: sa1111: move USB interface register definitions to ohci-sa1111.c ARM: sa1111: move PCMCIA interface register definitions to sa1111_generic.c ARM: sa1111: move PS/2 interface register definitions to sa1111p2.c ARM: sa1111: delete unused physical GPIO register definitions ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/dma/Kconfig9
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/sa11x0-dma.c1109
-rw-r--r--drivers/gpio/gpio-sa1100.c1
-rw-r--r--drivers/input/keyboard/jornada720_kbd.c1
-rw-r--r--drivers/input/serio/rpckbd.c44
-rw-r--r--drivers/input/serio/sa1111ps2.c59
-rw-r--r--drivers/input/touchscreen/jornada720_ts.c1
-rw-r--r--drivers/mfd/Kconfig5
-rw-r--r--drivers/mfd/mcp-core.c49
-rw-r--r--drivers/mfd/mcp-sa11x0.c198
-rw-r--r--drivers/mfd/ucb1x00-assabet.c46
-rw-r--r--drivers/mfd/ucb1x00-core.c433
-rw-r--r--drivers/mfd/ucb1x00-ts.c39
-rw-r--r--drivers/mtd/maps/sa1100-flash.c112
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c2
-rw-r--r--drivers/net/irda/Kconfig2
-rw-r--r--drivers/net/irda/sa1100_ir.c953
-rw-r--r--drivers/pcmcia/sa1111_generic.c55
-rw-r--r--drivers/pcmcia/sa1111_neponset.c7
-rw-r--r--drivers/rtc/rtc-sa1100.c2
-rw-r--r--drivers/scsi/arm/arxescsi.c2
-rw-r--r--drivers/scsi/arm/fas216.c4
-rw-r--r--drivers/scsi/arm/fas216.h4
-rw-r--r--drivers/tty/serial/sa1100.c1
-rw-r--r--drivers/usb/host/ohci-sa1111.c297
-rw-r--r--drivers/video/sa1100fb.c493
-rw-r--r--drivers/video/sa1100fb.h76
28 files changed, 2531 insertions, 1474 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index f1a274994bb1..4a6c46dea8a0 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -252,6 +252,15 @@ config EP93XX_DMA
252 help 252 help
253 Enable support for the Cirrus Logic EP93xx M2P/M2M DMA controller. 253 Enable support for the Cirrus Logic EP93xx M2P/M2M DMA controller.
254 254
255config DMA_SA11X0
256 tristate "SA-11x0 DMA support"
257 depends on ARCH_SA1100
258 select DMA_ENGINE
259 help
260 Support the DMA engine found on Intel StrongARM SA-1100 and
261 SA-1110 SoCs. This DMA engine can only be used with on-chip
262 devices.
263
255config DMA_ENGINE 264config DMA_ENGINE
256 bool 265 bool
257 266
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 009a222e8283..86b795baba98 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -27,3 +27,4 @@ obj-$(CONFIG_PL330_DMA) += pl330.o
27obj-$(CONFIG_PCH_DMA) += pch_dma.o 27obj-$(CONFIG_PCH_DMA) += pch_dma.o
28obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o 28obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
29obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o 29obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
30obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
new file mode 100644
index 000000000000..16a6b48883cf
--- /dev/null
+++ b/drivers/dma/sa11x0-dma.c
@@ -0,0 +1,1109 @@
1/*
2 * SA11x0 DMAengine support
3 *
4 * Copyright (C) 2012 Russell King
5 * Derived in part from arch/arm/mach-sa1100/dma.c,
6 * Copyright (C) 2000, 2001 by Nicolas Pitre
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#include <linux/sched.h>
13#include <linux/device.h>
14#include <linux/dmaengine.h>
15#include <linux/init.h>
16#include <linux/interrupt.h>
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/platform_device.h>
20#include <linux/sa11x0-dma.h>
21#include <linux/slab.h>
22#include <linux/spinlock.h>
23
24#define NR_PHY_CHAN 6
25#define DMA_ALIGN 3
26#define DMA_MAX_SIZE 0x1fff
27#define DMA_CHUNK_SIZE 0x1000
28
29#define DMA_DDAR 0x00
30#define DMA_DCSR_S 0x04
31#define DMA_DCSR_C 0x08
32#define DMA_DCSR_R 0x0c
33#define DMA_DBSA 0x10
34#define DMA_DBTA 0x14
35#define DMA_DBSB 0x18
36#define DMA_DBTB 0x1c
37#define DMA_SIZE 0x20
38
39#define DCSR_RUN (1 << 0)
40#define DCSR_IE (1 << 1)
41#define DCSR_ERROR (1 << 2)
42#define DCSR_DONEA (1 << 3)
43#define DCSR_STRTA (1 << 4)
44#define DCSR_DONEB (1 << 5)
45#define DCSR_STRTB (1 << 6)
46#define DCSR_BIU (1 << 7)
47
48#define DDAR_RW (1 << 0) /* 0 = W, 1 = R */
49#define DDAR_E (1 << 1) /* 0 = LE, 1 = BE */
50#define DDAR_BS (1 << 2) /* 0 = BS4, 1 = BS8 */
51#define DDAR_DW (1 << 3) /* 0 = 8b, 1 = 16b */
52#define DDAR_Ser0UDCTr (0x0 << 4)
53#define DDAR_Ser0UDCRc (0x1 << 4)
54#define DDAR_Ser1SDLCTr (0x2 << 4)
55#define DDAR_Ser1SDLCRc (0x3 << 4)
56#define DDAR_Ser1UARTTr (0x4 << 4)
57#define DDAR_Ser1UARTRc (0x5 << 4)
58#define DDAR_Ser2ICPTr (0x6 << 4)
59#define DDAR_Ser2ICPRc (0x7 << 4)
60#define DDAR_Ser3UARTTr (0x8 << 4)
61#define DDAR_Ser3UARTRc (0x9 << 4)
62#define DDAR_Ser4MCP0Tr (0xa << 4)
63#define DDAR_Ser4MCP0Rc (0xb << 4)
64#define DDAR_Ser4MCP1Tr (0xc << 4)
65#define DDAR_Ser4MCP1Rc (0xd << 4)
66#define DDAR_Ser4SSPTr (0xe << 4)
67#define DDAR_Ser4SSPRc (0xf << 4)
68
69struct sa11x0_dma_sg {
70 u32 addr;
71 u32 len;
72};
73
74struct sa11x0_dma_desc {
75 struct dma_async_tx_descriptor tx;
76 u32 ddar;
77 size_t size;
78
79 /* maybe protected by c->lock */
80 struct list_head node;
81 unsigned sglen;
82 struct sa11x0_dma_sg sg[0];
83};
84
85struct sa11x0_dma_phy;
86
87struct sa11x0_dma_chan {
88 struct dma_chan chan;
89 spinlock_t lock;
90 dma_cookie_t lc;
91
92 /* protected by c->lock */
93 struct sa11x0_dma_phy *phy;
94 enum dma_status status;
95 struct list_head desc_submitted;
96 struct list_head desc_issued;
97
98 /* protected by d->lock */
99 struct list_head node;
100
101 u32 ddar;
102 const char *name;
103};
104
105struct sa11x0_dma_phy {
106 void __iomem *base;
107 struct sa11x0_dma_dev *dev;
108 unsigned num;
109
110 struct sa11x0_dma_chan *vchan;
111
112 /* Protected by c->lock */
113 unsigned sg_load;
114 struct sa11x0_dma_desc *txd_load;
115 unsigned sg_done;
116 struct sa11x0_dma_desc *txd_done;
117#ifdef CONFIG_PM_SLEEP
118 u32 dbs[2];
119 u32 dbt[2];
120 u32 dcsr;
121#endif
122};
123
124struct sa11x0_dma_dev {
125 struct dma_device slave;
126 void __iomem *base;
127 spinlock_t lock;
128 struct tasklet_struct task;
129 struct list_head chan_pending;
130 struct list_head desc_complete;
131 struct sa11x0_dma_phy phy[NR_PHY_CHAN];
132};
133
134static struct sa11x0_dma_chan *to_sa11x0_dma_chan(struct dma_chan *chan)
135{
136 return container_of(chan, struct sa11x0_dma_chan, chan);
137}
138
139static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev)
140{
141 return container_of(dmadev, struct sa11x0_dma_dev, slave);
142}
143
144static struct sa11x0_dma_desc *to_sa11x0_dma_tx(struct dma_async_tx_descriptor *tx)
145{
146 return container_of(tx, struct sa11x0_dma_desc, tx);
147}
148
149static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c)
150{
151 if (list_empty(&c->desc_issued))
152 return NULL;
153
154 return list_first_entry(&c->desc_issued, struct sa11x0_dma_desc, node);
155}
156
157static void sa11x0_dma_start_desc(struct sa11x0_dma_phy *p, struct sa11x0_dma_desc *txd)
158{
159 list_del(&txd->node);
160 p->txd_load = txd;
161 p->sg_load = 0;
162
163 dev_vdbg(p->dev->slave.dev, "pchan %u: txd %p[%x]: starting: DDAR:%x\n",
164 p->num, txd, txd->tx.cookie, txd->ddar);
165}
166
167static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p,
168 struct sa11x0_dma_chan *c)
169{
170 struct sa11x0_dma_desc *txd = p->txd_load;
171 struct sa11x0_dma_sg *sg;
172 void __iomem *base = p->base;
173 unsigned dbsx, dbtx;
174 u32 dcsr;
175
176 if (!txd)
177 return;
178
179 dcsr = readl_relaxed(base + DMA_DCSR_R);
180
181 /* Don't try to load the next transfer if both buffers are started */
182 if ((dcsr & (DCSR_STRTA | DCSR_STRTB)) == (DCSR_STRTA | DCSR_STRTB))
183 return;
184
185 if (p->sg_load == txd->sglen) {
186 struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c);
187
188 /*
189 * We have reached the end of the current descriptor.
190 * Peek at the next descriptor, and if compatible with
191 * the current, start processing it.
192 */
193 if (txn && txn->ddar == txd->ddar) {
194 txd = txn;
195 sa11x0_dma_start_desc(p, txn);
196 } else {
197 p->txd_load = NULL;
198 return;
199 }
200 }
201
202 sg = &txd->sg[p->sg_load++];
203
204 /* Select buffer to load according to channel status */
205 if (((dcsr & (DCSR_BIU | DCSR_STRTB)) == (DCSR_BIU | DCSR_STRTB)) ||
206 ((dcsr & (DCSR_BIU | DCSR_STRTA)) == 0)) {
207 dbsx = DMA_DBSA;
208 dbtx = DMA_DBTA;
209 dcsr = DCSR_STRTA | DCSR_IE | DCSR_RUN;
210 } else {
211 dbsx = DMA_DBSB;
212 dbtx = DMA_DBTB;
213 dcsr = DCSR_STRTB | DCSR_IE | DCSR_RUN;
214 }
215
216 writel_relaxed(sg->addr, base + dbsx);
217 writel_relaxed(sg->len, base + dbtx);
218 writel(dcsr, base + DMA_DCSR_S);
219
220 dev_dbg(p->dev->slave.dev, "pchan %u: load: DCSR:%02x DBS%c:%08x DBT%c:%08x\n",
221 p->num, dcsr,
222 'A' + (dbsx == DMA_DBSB), sg->addr,
223 'A' + (dbtx == DMA_DBTB), sg->len);
224}
225
226static void noinline sa11x0_dma_complete(struct sa11x0_dma_phy *p,
227 struct sa11x0_dma_chan *c)
228{
229 struct sa11x0_dma_desc *txd = p->txd_done;
230
231 if (++p->sg_done == txd->sglen) {
232 struct sa11x0_dma_dev *d = p->dev;
233
234 dev_vdbg(d->slave.dev, "pchan %u: txd %p[%x]: completed\n",
235 p->num, p->txd_done, p->txd_done->tx.cookie);
236
237 c->lc = txd->tx.cookie;
238
239 spin_lock(&d->lock);
240 list_add_tail(&txd->node, &d->desc_complete);
241 spin_unlock(&d->lock);
242
243 p->sg_done = 0;
244 p->txd_done = p->txd_load;
245
246 tasklet_schedule(&d->task);
247 }
248
249 sa11x0_dma_start_sg(p, c);
250}
251
252static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id)
253{
254 struct sa11x0_dma_phy *p = dev_id;
255 struct sa11x0_dma_dev *d = p->dev;
256 struct sa11x0_dma_chan *c;
257 u32 dcsr;
258
259 dcsr = readl_relaxed(p->base + DMA_DCSR_R);
260 if (!(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB)))
261 return IRQ_NONE;
262
263 /* Clear reported status bits */
264 writel_relaxed(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB),
265 p->base + DMA_DCSR_C);
266
267 dev_dbg(d->slave.dev, "pchan %u: irq: DCSR:%02x\n", p->num, dcsr);
268
269 if (dcsr & DCSR_ERROR) {
270 dev_err(d->slave.dev, "pchan %u: error. DCSR:%02x DDAR:%08x DBSA:%08x DBTA:%08x DBSB:%08x DBTB:%08x\n",
271 p->num, dcsr,
272 readl_relaxed(p->base + DMA_DDAR),
273 readl_relaxed(p->base + DMA_DBSA),
274 readl_relaxed(p->base + DMA_DBTA),
275 readl_relaxed(p->base + DMA_DBSB),
276 readl_relaxed(p->base + DMA_DBTB));
277 }
278
279 c = p->vchan;
280 if (c) {
281 unsigned long flags;
282
283 spin_lock_irqsave(&c->lock, flags);
284 /*
285 * Now that we're holding the lock, check that the vchan
286 * really is associated with this pchan before touching the
287 * hardware. This should always succeed, because we won't
288 * change p->vchan or c->phy while the channel is actively
289 * transferring.
290 */
291 if (c->phy == p) {
292 if (dcsr & DCSR_DONEA)
293 sa11x0_dma_complete(p, c);
294 if (dcsr & DCSR_DONEB)
295 sa11x0_dma_complete(p, c);
296 }
297 spin_unlock_irqrestore(&c->lock, flags);
298 }
299
300 return IRQ_HANDLED;
301}
302
303static void sa11x0_dma_start_txd(struct sa11x0_dma_chan *c)
304{
305 struct sa11x0_dma_desc *txd = sa11x0_dma_next_desc(c);
306
307 /* If the issued list is empty, we have no further txds to process */
308 if (txd) {
309 struct sa11x0_dma_phy *p = c->phy;
310
311 sa11x0_dma_start_desc(p, txd);
312 p->txd_done = txd;
313 p->sg_done = 0;
314
315 /* The channel should not have any transfers started */
316 WARN_ON(readl_relaxed(p->base + DMA_DCSR_R) &
317 (DCSR_STRTA | DCSR_STRTB));
318
319 /* Clear the run and start bits before changing DDAR */
320 writel_relaxed(DCSR_RUN | DCSR_STRTA | DCSR_STRTB,
321 p->base + DMA_DCSR_C);
322 writel_relaxed(txd->ddar, p->base + DMA_DDAR);
323
324 /* Try to start both buffers */
325 sa11x0_dma_start_sg(p, c);
326 sa11x0_dma_start_sg(p, c);
327 }
328}
329
330static void sa11x0_dma_tasklet(unsigned long arg)
331{
332 struct sa11x0_dma_dev *d = (struct sa11x0_dma_dev *)arg;
333 struct sa11x0_dma_phy *p;
334 struct sa11x0_dma_chan *c;
335 struct sa11x0_dma_desc *txd, *txn;
336 LIST_HEAD(head);
337 unsigned pch, pch_alloc = 0;
338
339 dev_dbg(d->slave.dev, "tasklet enter\n");
340
341 /* Get the completed tx descriptors */
342 spin_lock_irq(&d->lock);
343 list_splice_init(&d->desc_complete, &head);
344 spin_unlock_irq(&d->lock);
345
346 list_for_each_entry(txd, &head, node) {
347 c = to_sa11x0_dma_chan(txd->tx.chan);
348
349 dev_dbg(d->slave.dev, "vchan %p: txd %p[%x] completed\n",
350 c, txd, txd->tx.cookie);
351
352 spin_lock_irq(&c->lock);
353 p = c->phy;
354 if (p) {
355 if (!p->txd_done)
356 sa11x0_dma_start_txd(c);
357 if (!p->txd_done) {
358 /* No current txd associated with this channel */
359 dev_dbg(d->slave.dev, "pchan %u: free\n", p->num);
360
361 /* Mark this channel free */
362 c->phy = NULL;
363 p->vchan = NULL;
364 }
365 }
366 spin_unlock_irq(&c->lock);
367 }
368
369 spin_lock_irq(&d->lock);
370 for (pch = 0; pch < NR_PHY_CHAN; pch++) {
371 p = &d->phy[pch];
372
373 if (p->vchan == NULL && !list_empty(&d->chan_pending)) {
374 c = list_first_entry(&d->chan_pending,
375 struct sa11x0_dma_chan, node);
376 list_del_init(&c->node);
377
378 pch_alloc |= 1 << pch;
379
380 /* Mark this channel allocated */
381 p->vchan = c;
382
383 dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, c);
384 }
385 }
386 spin_unlock_irq(&d->lock);
387
388 for (pch = 0; pch < NR_PHY_CHAN; pch++) {
389 if (pch_alloc & (1 << pch)) {
390 p = &d->phy[pch];
391 c = p->vchan;
392
393 spin_lock_irq(&c->lock);
394 c->phy = p;
395
396 sa11x0_dma_start_txd(c);
397 spin_unlock_irq(&c->lock);
398 }
399 }
400
401 /* Now free the completed tx descriptor, and call their callbacks */
402 list_for_each_entry_safe(txd, txn, &head, node) {
403 dma_async_tx_callback callback = txd->tx.callback;
404 void *callback_param = txd->tx.callback_param;
405
406 dev_dbg(d->slave.dev, "txd %p[%x]: callback and free\n",
407 txd, txd->tx.cookie);
408
409 kfree(txd);
410
411 if (callback)
412 callback(callback_param);
413 }
414
415 dev_dbg(d->slave.dev, "tasklet exit\n");
416}
417
418
419static void sa11x0_dma_desc_free(struct sa11x0_dma_dev *d, struct list_head *head)
420{
421 struct sa11x0_dma_desc *txd, *txn;
422
423 list_for_each_entry_safe(txd, txn, head, node) {
424 dev_dbg(d->slave.dev, "txd %p: freeing\n", txd);
425 kfree(txd);
426 }
427}
428
429static int sa11x0_dma_alloc_chan_resources(struct dma_chan *chan)
430{
431 return 0;
432}
433
434static void sa11x0_dma_free_chan_resources(struct dma_chan *chan)
435{
436 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
437 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
438 unsigned long flags;
439 LIST_HEAD(head);
440
441 spin_lock_irqsave(&c->lock, flags);
442 spin_lock(&d->lock);
443 list_del_init(&c->node);
444 spin_unlock(&d->lock);
445
446 list_splice_tail_init(&c->desc_submitted, &head);
447 list_splice_tail_init(&c->desc_issued, &head);
448 spin_unlock_irqrestore(&c->lock, flags);
449
450 sa11x0_dma_desc_free(d, &head);
451}
452
453static dma_addr_t sa11x0_dma_pos(struct sa11x0_dma_phy *p)
454{
455 unsigned reg;
456 u32 dcsr;
457
458 dcsr = readl_relaxed(p->base + DMA_DCSR_R);
459
460 if ((dcsr & (DCSR_BIU | DCSR_STRTA)) == DCSR_STRTA ||
461 (dcsr & (DCSR_BIU | DCSR_STRTB)) == DCSR_BIU)
462 reg = DMA_DBSA;
463 else
464 reg = DMA_DBSB;
465
466 return readl_relaxed(p->base + reg);
467}
468
469static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
470 dma_cookie_t cookie, struct dma_tx_state *state)
471{
472 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
473 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
474 struct sa11x0_dma_phy *p;
475 struct sa11x0_dma_desc *txd;
476 dma_cookie_t last_used, last_complete;
477 unsigned long flags;
478 enum dma_status ret;
479 size_t bytes = 0;
480
481 last_used = c->chan.cookie;
482 last_complete = c->lc;
483
484 ret = dma_async_is_complete(cookie, last_complete, last_used);
485 if (ret == DMA_SUCCESS) {
486 dma_set_tx_state(state, last_complete, last_used, 0);
487 return ret;
488 }
489
490 spin_lock_irqsave(&c->lock, flags);
491 p = c->phy;
492 ret = c->status;
493 if (p) {
494 dma_addr_t addr = sa11x0_dma_pos(p);
495
496 dev_vdbg(d->slave.dev, "tx_status: addr:%x\n", addr);
497
498 txd = p->txd_done;
499 if (txd) {
500 unsigned i;
501
502 for (i = 0; i < txd->sglen; i++) {
503 dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x\n",
504 i, txd->sg[i].addr, txd->sg[i].len);
505 if (addr >= txd->sg[i].addr &&
506 addr < txd->sg[i].addr + txd->sg[i].len) {
507 unsigned len;
508
509 len = txd->sg[i].len -
510 (addr - txd->sg[i].addr);
511 dev_vdbg(d->slave.dev, "tx_status: [%u] +%x\n",
512 i, len);
513 bytes += len;
514 i++;
515 break;
516 }
517 }
518 for (; i < txd->sglen; i++) {
519 dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x ++\n",
520 i, txd->sg[i].addr, txd->sg[i].len);
521 bytes += txd->sg[i].len;
522 }
523 }
524 if (txd != p->txd_load && p->txd_load)
525 bytes += p->txd_load->size;
526 }
527 list_for_each_entry(txd, &c->desc_issued, node) {
528 bytes += txd->size;
529 }
530 spin_unlock_irqrestore(&c->lock, flags);
531
532 dma_set_tx_state(state, last_complete, last_used, bytes);
533
534 dev_vdbg(d->slave.dev, "tx_status: bytes 0x%zx\n", bytes);
535
536 return ret;
537}
538
539/*
540 * Move pending txds to the issued list, and re-init pending list.
541 * If not already pending, add this channel to the list of pending
542 * channels and trigger the tasklet to run.
543 */
544static void sa11x0_dma_issue_pending(struct dma_chan *chan)
545{
546 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
547 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
548 unsigned long flags;
549
550 spin_lock_irqsave(&c->lock, flags);
551 list_splice_tail_init(&c->desc_submitted, &c->desc_issued);
552 if (!list_empty(&c->desc_issued)) {
553 spin_lock(&d->lock);
554 if (!c->phy && list_empty(&c->node)) {
555 list_add_tail(&c->node, &d->chan_pending);
556 tasklet_schedule(&d->task);
557 dev_dbg(d->slave.dev, "vchan %p: issued\n", c);
558 }
559 spin_unlock(&d->lock);
560 } else
561 dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", c);
562 spin_unlock_irqrestore(&c->lock, flags);
563}
564
565static dma_cookie_t sa11x0_dma_tx_submit(struct dma_async_tx_descriptor *tx)
566{
567 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(tx->chan);
568 struct sa11x0_dma_desc *txd = to_sa11x0_dma_tx(tx);
569 unsigned long flags;
570
571 spin_lock_irqsave(&c->lock, flags);
572 c->chan.cookie += 1;
573 if (c->chan.cookie < 0)
574 c->chan.cookie = 1;
575 txd->tx.cookie = c->chan.cookie;
576
577 list_add_tail(&txd->node, &c->desc_submitted);
578 spin_unlock_irqrestore(&c->lock, flags);
579
580 dev_dbg(tx->chan->device->dev, "vchan %p: txd %p[%x]: submitted\n",
581 c, txd, txd->tx.cookie);
582
583 return txd->tx.cookie;
584}
585
586static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
587 struct dma_chan *chan, struct scatterlist *sg, unsigned int sglen,
588 enum dma_transfer_direction dir, unsigned long flags)
589{
590 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
591 struct sa11x0_dma_desc *txd;
592 struct scatterlist *sgent;
593 unsigned i, j = sglen;
594 size_t size = 0;
595
596 /* SA11x0 channels can only operate in their native direction */
597 if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) {
598 dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n",
599 c, c->ddar, dir);
600 return NULL;
601 }
602
603 /* Do not allow zero-sized txds */
604 if (sglen == 0)
605 return NULL;
606
607 for_each_sg(sg, sgent, sglen, i) {
608 dma_addr_t addr = sg_dma_address(sgent);
609 unsigned int len = sg_dma_len(sgent);
610
611 if (len > DMA_MAX_SIZE)
612 j += DIV_ROUND_UP(len, DMA_MAX_SIZE & ~DMA_ALIGN) - 1;
613 if (addr & DMA_ALIGN) {
614 dev_dbg(chan->device->dev, "vchan %p: bad buffer alignment: %08x\n",
615 c, addr);
616 return NULL;
617 }
618 }
619
620 txd = kzalloc(sizeof(*txd) + j * sizeof(txd->sg[0]), GFP_ATOMIC);
621 if (!txd) {
622 dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", c);
623 return NULL;
624 }
625
626 j = 0;
627 for_each_sg(sg, sgent, sglen, i) {
628 dma_addr_t addr = sg_dma_address(sgent);
629 unsigned len = sg_dma_len(sgent);
630
631 size += len;
632
633 do {
634 unsigned tlen = len;
635
636 /*
637 * Check whether the transfer will fit. If not, try
638 * to split the transfer up such that we end up with
639 * equal chunks - but make sure that we preserve the
640 * alignment. This avoids small segments.
641 */
642 if (tlen > DMA_MAX_SIZE) {
643 unsigned mult = DIV_ROUND_UP(tlen,
644 DMA_MAX_SIZE & ~DMA_ALIGN);
645
646 tlen = (tlen / mult) & ~DMA_ALIGN;
647 }
648
649 txd->sg[j].addr = addr;
650 txd->sg[j].len = tlen;
651
652 addr += tlen;
653 len -= tlen;
654 j++;
655 } while (len);
656 }
657
658 dma_async_tx_descriptor_init(&txd->tx, &c->chan);
659 txd->tx.flags = flags;
660 txd->tx.tx_submit = sa11x0_dma_tx_submit;
661 txd->ddar = c->ddar;
662 txd->size = size;
663 txd->sglen = j;
664
665 dev_dbg(chan->device->dev, "vchan %p: txd %p: size %u nr %u\n",
666 c, txd, txd->size, txd->sglen);
667
668 return &txd->tx;
669}
670
671static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_config *cfg)
672{
673 u32 ddar = c->ddar & ((0xf << 4) | DDAR_RW);
674 dma_addr_t addr;
675 enum dma_slave_buswidth width;
676 u32 maxburst;
677
678 if (ddar & DDAR_RW) {
679 addr = cfg->src_addr;
680 width = cfg->src_addr_width;
681 maxburst = cfg->src_maxburst;
682 } else {
683 addr = cfg->dst_addr;
684 width = cfg->dst_addr_width;
685 maxburst = cfg->dst_maxburst;
686 }
687
688 if ((width != DMA_SLAVE_BUSWIDTH_1_BYTE &&
689 width != DMA_SLAVE_BUSWIDTH_2_BYTES) ||
690 (maxburst != 4 && maxburst != 8))
691 return -EINVAL;
692
693 if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
694 ddar |= DDAR_DW;
695 if (maxburst == 8)
696 ddar |= DDAR_BS;
697
698 dev_dbg(c->chan.device->dev, "vchan %p: dma_slave_config addr %x width %u burst %u\n",
699 c, addr, width, maxburst);
700
701 c->ddar = ddar | (addr & 0xf0000000) | (addr & 0x003ffffc) << 6;
702
703 return 0;
704}
705
706static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
707 unsigned long arg)
708{
709 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
710 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
711 struct sa11x0_dma_phy *p;
712 LIST_HEAD(head);
713 unsigned long flags;
714 int ret;
715
716 switch (cmd) {
717 case DMA_SLAVE_CONFIG:
718 return sa11x0_dma_slave_config(c, (struct dma_slave_config *)arg);
719
720 case DMA_TERMINATE_ALL:
721 dev_dbg(d->slave.dev, "vchan %p: terminate all\n", c);
722 /* Clear the tx descriptor lists */
723 spin_lock_irqsave(&c->lock, flags);
724 list_splice_tail_init(&c->desc_submitted, &head);
725 list_splice_tail_init(&c->desc_issued, &head);
726
727 p = c->phy;
728 if (p) {
729 struct sa11x0_dma_desc *txd, *txn;
730
731 dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num);
732 /* vchan is assigned to a pchan - stop the channel */
733 writel(DCSR_RUN | DCSR_IE |
734 DCSR_STRTA | DCSR_DONEA |
735 DCSR_STRTB | DCSR_DONEB,
736 p->base + DMA_DCSR_C);
737
738 list_for_each_entry_safe(txd, txn, &d->desc_complete, node)
739 if (txd->tx.chan == &c->chan)
740 list_move(&txd->node, &head);
741
742 if (p->txd_load) {
743 if (p->txd_load != p->txd_done)
744 list_add_tail(&p->txd_load->node, &head);
745 p->txd_load = NULL;
746 }
747 if (p->txd_done) {
748 list_add_tail(&p->txd_done->node, &head);
749 p->txd_done = NULL;
750 }
751 c->phy = NULL;
752 spin_lock(&d->lock);
753 p->vchan = NULL;
754 spin_unlock(&d->lock);
755 tasklet_schedule(&d->task);
756 }
757 spin_unlock_irqrestore(&c->lock, flags);
758 sa11x0_dma_desc_free(d, &head);
759 ret = 0;
760 break;
761
762 case DMA_PAUSE:
763 dev_dbg(d->slave.dev, "vchan %p: pause\n", c);
764 spin_lock_irqsave(&c->lock, flags);
765 if (c->status == DMA_IN_PROGRESS) {
766 c->status = DMA_PAUSED;
767
768 p = c->phy;
769 if (p) {
770 writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C);
771 } else {
772 spin_lock(&d->lock);
773 list_del_init(&c->node);
774 spin_unlock(&d->lock);
775 }
776 }
777 spin_unlock_irqrestore(&c->lock, flags);
778 ret = 0;
779 break;
780
781 case DMA_RESUME:
782 dev_dbg(d->slave.dev, "vchan %p: resume\n", c);
783 spin_lock_irqsave(&c->lock, flags);
784 if (c->status == DMA_PAUSED) {
785 c->status = DMA_IN_PROGRESS;
786
787 p = c->phy;
788 if (p) {
789 writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S);
790 } else if (!list_empty(&c->desc_issued)) {
791 spin_lock(&d->lock);
792 list_add_tail(&c->node, &d->chan_pending);
793 spin_unlock(&d->lock);
794 }
795 }
796 spin_unlock_irqrestore(&c->lock, flags);
797 ret = 0;
798 break;
799
800 default:
801 ret = -ENXIO;
802 break;
803 }
804
805 return ret;
806}
807
808struct sa11x0_dma_channel_desc {
809 u32 ddar;
810 const char *name;
811};
812
813#define CD(d1, d2) { .ddar = DDAR_##d1 | d2, .name = #d1 }
814static const struct sa11x0_dma_channel_desc chan_desc[] = {
815 CD(Ser0UDCTr, 0),
816 CD(Ser0UDCRc, DDAR_RW),
817 CD(Ser1SDLCTr, 0),
818 CD(Ser1SDLCRc, DDAR_RW),
819 CD(Ser1UARTTr, 0),
820 CD(Ser1UARTRc, DDAR_RW),
821 CD(Ser2ICPTr, 0),
822 CD(Ser2ICPRc, DDAR_RW),
823 CD(Ser3UARTTr, 0),
824 CD(Ser3UARTRc, DDAR_RW),
825 CD(Ser4MCP0Tr, 0),
826 CD(Ser4MCP0Rc, DDAR_RW),
827 CD(Ser4MCP1Tr, 0),
828 CD(Ser4MCP1Rc, DDAR_RW),
829 CD(Ser4SSPTr, 0),
830 CD(Ser4SSPRc, DDAR_RW),
831};
832
833static int __devinit sa11x0_dma_init_dmadev(struct dma_device *dmadev,
834 struct device *dev)
835{
836 unsigned i;
837
838 dmadev->chancnt = ARRAY_SIZE(chan_desc);
839 INIT_LIST_HEAD(&dmadev->channels);
840 dmadev->dev = dev;
841 dmadev->device_alloc_chan_resources = sa11x0_dma_alloc_chan_resources;
842 dmadev->device_free_chan_resources = sa11x0_dma_free_chan_resources;
843 dmadev->device_control = sa11x0_dma_control;
844 dmadev->device_tx_status = sa11x0_dma_tx_status;
845 dmadev->device_issue_pending = sa11x0_dma_issue_pending;
846
847 for (i = 0; i < dmadev->chancnt; i++) {
848 struct sa11x0_dma_chan *c;
849
850 c = kzalloc(sizeof(*c), GFP_KERNEL);
851 if (!c) {
852 dev_err(dev, "no memory for channel %u\n", i);
853 return -ENOMEM;
854 }
855
856 c->chan.device = dmadev;
857 c->status = DMA_IN_PROGRESS;
858 c->ddar = chan_desc[i].ddar;
859 c->name = chan_desc[i].name;
860 spin_lock_init(&c->lock);
861 INIT_LIST_HEAD(&c->desc_submitted);
862 INIT_LIST_HEAD(&c->desc_issued);
863 INIT_LIST_HEAD(&c->node);
864 list_add_tail(&c->chan.device_node, &dmadev->channels);
865 }
866
867 return dma_async_device_register(dmadev);
868}
869
870static int sa11x0_dma_request_irq(struct platform_device *pdev, int nr,
871 void *data)
872{
873 int irq = platform_get_irq(pdev, nr);
874
875 if (irq <= 0)
876 return -ENXIO;
877
878 return request_irq(irq, sa11x0_dma_irq, 0, dev_name(&pdev->dev), data);
879}
880
881static void sa11x0_dma_free_irq(struct platform_device *pdev, int nr,
882 void *data)
883{
884 int irq = platform_get_irq(pdev, nr);
885 if (irq > 0)
886 free_irq(irq, data);
887}
888
889static void sa11x0_dma_free_channels(struct dma_device *dmadev)
890{
891 struct sa11x0_dma_chan *c, *cn;
892
893 list_for_each_entry_safe(c, cn, &dmadev->channels, chan.device_node) {
894 list_del(&c->chan.device_node);
895 kfree(c);
896 }
897}
898
899static int __devinit sa11x0_dma_probe(struct platform_device *pdev)
900{
901 struct sa11x0_dma_dev *d;
902 struct resource *res;
903 unsigned i;
904 int ret;
905
906 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
907 if (!res)
908 return -ENXIO;
909
910 d = kzalloc(sizeof(*d), GFP_KERNEL);
911 if (!d) {
912 ret = -ENOMEM;
913 goto err_alloc;
914 }
915
916 spin_lock_init(&d->lock);
917 INIT_LIST_HEAD(&d->chan_pending);
918 INIT_LIST_HEAD(&d->desc_complete);
919
920 d->base = ioremap(res->start, resource_size(res));
921 if (!d->base) {
922 ret = -ENOMEM;
923 goto err_ioremap;
924 }
925
926 tasklet_init(&d->task, sa11x0_dma_tasklet, (unsigned long)d);
927
928 for (i = 0; i < NR_PHY_CHAN; i++) {
929 struct sa11x0_dma_phy *p = &d->phy[i];
930
931 p->dev = d;
932 p->num = i;
933 p->base = d->base + i * DMA_SIZE;
934 writel_relaxed(DCSR_RUN | DCSR_IE | DCSR_ERROR |
935 DCSR_DONEA | DCSR_STRTA | DCSR_DONEB | DCSR_STRTB,
936 p->base + DMA_DCSR_C);
937 writel_relaxed(0, p->base + DMA_DDAR);
938
939 ret = sa11x0_dma_request_irq(pdev, i, p);
940 if (ret) {
941 while (i) {
942 i--;
943 sa11x0_dma_free_irq(pdev, i, &d->phy[i]);
944 }
945 goto err_irq;
946 }
947 }
948
949 dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
950 d->slave.device_prep_slave_sg = sa11x0_dma_prep_slave_sg;
951 ret = sa11x0_dma_init_dmadev(&d->slave, &pdev->dev);
952 if (ret) {
953 dev_warn(d->slave.dev, "failed to register slave async device: %d\n",
954 ret);
955 goto err_slave_reg;
956 }
957
958 platform_set_drvdata(pdev, d);
959 return 0;
960
961 err_slave_reg:
962 sa11x0_dma_free_channels(&d->slave);
963 for (i = 0; i < NR_PHY_CHAN; i++)
964 sa11x0_dma_free_irq(pdev, i, &d->phy[i]);
965 err_irq:
966 tasklet_kill(&d->task);
967 iounmap(d->base);
968 err_ioremap:
969 kfree(d);
970 err_alloc:
971 return ret;
972}
973
974static int __devexit sa11x0_dma_remove(struct platform_device *pdev)
975{
976 struct sa11x0_dma_dev *d = platform_get_drvdata(pdev);
977 unsigned pch;
978
979 dma_async_device_unregister(&d->slave);
980
981 sa11x0_dma_free_channels(&d->slave);
982 for (pch = 0; pch < NR_PHY_CHAN; pch++)
983 sa11x0_dma_free_irq(pdev, pch, &d->phy[pch]);
984 tasklet_kill(&d->task);
985 iounmap(d->base);
986 kfree(d);
987
988 return 0;
989}
990
991#ifdef CONFIG_PM_SLEEP
992static int sa11x0_dma_suspend(struct device *dev)
993{
994 struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
995 unsigned pch;
996
997 for (pch = 0; pch < NR_PHY_CHAN; pch++) {
998 struct sa11x0_dma_phy *p = &d->phy[pch];
999 u32 dcsr, saved_dcsr;
1000
1001 dcsr = saved_dcsr = readl_relaxed(p->base + DMA_DCSR_R);
1002 if (dcsr & DCSR_RUN) {
1003 writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C);
1004 dcsr = readl_relaxed(p->base + DMA_DCSR_R);
1005 }
1006
1007 saved_dcsr &= DCSR_RUN | DCSR_IE;
1008 if (dcsr & DCSR_BIU) {
1009 p->dbs[0] = readl_relaxed(p->base + DMA_DBSB);
1010 p->dbt[0] = readl_relaxed(p->base + DMA_DBTB);
1011 p->dbs[1] = readl_relaxed(p->base + DMA_DBSA);
1012 p->dbt[1] = readl_relaxed(p->base + DMA_DBTA);
1013 saved_dcsr |= (dcsr & DCSR_STRTA ? DCSR_STRTB : 0) |
1014 (dcsr & DCSR_STRTB ? DCSR_STRTA : 0);
1015 } else {
1016 p->dbs[0] = readl_relaxed(p->base + DMA_DBSA);
1017 p->dbt[0] = readl_relaxed(p->base + DMA_DBTA);
1018 p->dbs[1] = readl_relaxed(p->base + DMA_DBSB);
1019 p->dbt[1] = readl_relaxed(p->base + DMA_DBTB);
1020 saved_dcsr |= dcsr & (DCSR_STRTA | DCSR_STRTB);
1021 }
1022 p->dcsr = saved_dcsr;
1023
1024 writel(DCSR_STRTA | DCSR_STRTB, p->base + DMA_DCSR_C);
1025 }
1026
1027 return 0;
1028}
1029
1030static int sa11x0_dma_resume(struct device *dev)
1031{
1032 struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
1033 unsigned pch;
1034
1035 for (pch = 0; pch < NR_PHY_CHAN; pch++) {
1036 struct sa11x0_dma_phy *p = &d->phy[pch];
1037 struct sa11x0_dma_desc *txd = NULL;
1038 u32 dcsr = readl_relaxed(p->base + DMA_DCSR_R);
1039
1040 WARN_ON(dcsr & (DCSR_BIU | DCSR_STRTA | DCSR_STRTB | DCSR_RUN));
1041
1042 if (p->txd_done)
1043 txd = p->txd_done;
1044 else if (p->txd_load)
1045 txd = p->txd_load;
1046
1047 if (!txd)
1048 continue;
1049
1050 writel_relaxed(txd->ddar, p->base + DMA_DDAR);
1051
1052 writel_relaxed(p->dbs[0], p->base + DMA_DBSA);
1053 writel_relaxed(p->dbt[0], p->base + DMA_DBTA);
1054 writel_relaxed(p->dbs[1], p->base + DMA_DBSB);
1055 writel_relaxed(p->dbt[1], p->base + DMA_DBTB);
1056 writel_relaxed(p->dcsr, p->base + DMA_DCSR_S);
1057 }
1058
1059 return 0;
1060}
1061#endif
1062
1063static const struct dev_pm_ops sa11x0_dma_pm_ops = {
1064 .suspend_noirq = sa11x0_dma_suspend,
1065 .resume_noirq = sa11x0_dma_resume,
1066 .freeze_noirq = sa11x0_dma_suspend,
1067 .thaw_noirq = sa11x0_dma_resume,
1068 .poweroff_noirq = sa11x0_dma_suspend,
1069 .restore_noirq = sa11x0_dma_resume,
1070};
1071
1072static struct platform_driver sa11x0_dma_driver = {
1073 .driver = {
1074 .name = "sa11x0-dma",
1075 .owner = THIS_MODULE,
1076 .pm = &sa11x0_dma_pm_ops,
1077 },
1078 .probe = sa11x0_dma_probe,
1079 .remove = __devexit_p(sa11x0_dma_remove),
1080};
1081
1082bool sa11x0_dma_filter_fn(struct dma_chan *chan, void *param)
1083{
1084 if (chan->device->dev->driver == &sa11x0_dma_driver.driver) {
1085 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
1086 const char *p = param;
1087
1088 return !strcmp(c->name, p);
1089 }
1090 return false;
1091}
1092EXPORT_SYMBOL(sa11x0_dma_filter_fn);
1093
1094static int __init sa11x0_dma_init(void)
1095{
1096 return platform_driver_register(&sa11x0_dma_driver);
1097}
1098subsys_initcall(sa11x0_dma_init);
1099
1100static void __exit sa11x0_dma_exit(void)
1101{
1102 platform_driver_unregister(&sa11x0_dma_driver);
1103}
1104module_exit(sa11x0_dma_exit);
1105
1106MODULE_AUTHOR("Russell King");
1107MODULE_DESCRIPTION("SA-11x0 DMA driver");
1108MODULE_LICENSE("GPL v2");
1109MODULE_ALIAS("platform:sa11x0-dma");
diff --git a/drivers/gpio/gpio-sa1100.c b/drivers/gpio/gpio-sa1100.c
index 7eecf69362ee..8ea3b33d4b40 100644
--- a/drivers/gpio/gpio-sa1100.c
+++ b/drivers/gpio/gpio-sa1100.c
@@ -12,6 +12,7 @@
12#include <linux/module.h> 12#include <linux/module.h>
13 13
14#include <mach/hardware.h> 14#include <mach/hardware.h>
15#include <mach/irqs.h>
15 16
16static int sa1100_gpio_get(struct gpio_chip *chip, unsigned offset) 17static int sa1100_gpio_get(struct gpio_chip *chip, unsigned offset)
17{ 18{
diff --git a/drivers/input/keyboard/jornada720_kbd.c b/drivers/input/keyboard/jornada720_kbd.c
index eeafc30b207b..9d639fa1afbd 100644
--- a/drivers/input/keyboard/jornada720_kbd.c
+++ b/drivers/input/keyboard/jornada720_kbd.c
@@ -27,6 +27,7 @@
27 27
28#include <mach/jornada720.h> 28#include <mach/jornada720.h>
29#include <mach/hardware.h> 29#include <mach/hardware.h>
30#include <mach/irqs.h>
30 31
31MODULE_AUTHOR("Kristoffer Ericson <Kristoffer.Ericson@gmail.com>"); 32MODULE_AUTHOR("Kristoffer Ericson <Kristoffer.Ericson@gmail.com>");
32MODULE_DESCRIPTION("HP Jornada 710/720/728 keyboard driver"); 33MODULE_DESCRIPTION("HP Jornada 710/720/728 keyboard driver");
diff --git a/drivers/input/serio/rpckbd.c b/drivers/input/serio/rpckbd.c
index 8b44ddc8041c..58b224498b35 100644
--- a/drivers/input/serio/rpckbd.c
+++ b/drivers/input/serio/rpckbd.c
@@ -36,7 +36,6 @@
36#include <linux/io.h> 36#include <linux/io.h>
37#include <linux/slab.h> 37#include <linux/slab.h>
38 38
39#include <asm/irq.h>
40#include <mach/hardware.h> 39#include <mach/hardware.h>
41#include <asm/hardware/iomd.h> 40#include <asm/hardware/iomd.h>
42#include <asm/system.h> 41#include <asm/system.h>
@@ -46,6 +45,11 @@ MODULE_DESCRIPTION("Acorn RiscPC PS/2 keyboard controller driver");
46MODULE_LICENSE("GPL"); 45MODULE_LICENSE("GPL");
47MODULE_ALIAS("platform:kart"); 46MODULE_ALIAS("platform:kart");
48 47
48struct rpckbd_data {
49 int tx_irq;
50 int rx_irq;
51};
52
49static int rpckbd_write(struct serio *port, unsigned char val) 53static int rpckbd_write(struct serio *port, unsigned char val)
50{ 54{
51 while (!(iomd_readb(IOMD_KCTRL) & (1 << 7))) 55 while (!(iomd_readb(IOMD_KCTRL) & (1 << 7)))
@@ -78,19 +82,21 @@ static irqreturn_t rpckbd_tx(int irq, void *dev_id)
78 82
79static int rpckbd_open(struct serio *port) 83static int rpckbd_open(struct serio *port)
80{ 84{
85 struct rpckbd_data *rpckbd = port->port_data;
86
81 /* Reset the keyboard state machine. */ 87 /* Reset the keyboard state machine. */
82 iomd_writeb(0, IOMD_KCTRL); 88 iomd_writeb(0, IOMD_KCTRL);
83 iomd_writeb(8, IOMD_KCTRL); 89 iomd_writeb(8, IOMD_KCTRL);
84 iomd_readb(IOMD_KARTRX); 90 iomd_readb(IOMD_KARTRX);
85 91
86 if (request_irq(IRQ_KEYBOARDRX, rpckbd_rx, 0, "rpckbd", port) != 0) { 92 if (request_irq(rpckbd->rx_irq, rpckbd_rx, 0, "rpckbd", port) != 0) {
87 printk(KERN_ERR "rpckbd.c: Could not allocate keyboard receive IRQ\n"); 93 printk(KERN_ERR "rpckbd.c: Could not allocate keyboard receive IRQ\n");
88 return -EBUSY; 94 return -EBUSY;
89 } 95 }
90 96
91 if (request_irq(IRQ_KEYBOARDTX, rpckbd_tx, 0, "rpckbd", port) != 0) { 97 if (request_irq(rpckbd->tx_irq, rpckbd_tx, 0, "rpckbd", port) != 0) {
92 printk(KERN_ERR "rpckbd.c: Could not allocate keyboard transmit IRQ\n"); 98 printk(KERN_ERR "rpckbd.c: Could not allocate keyboard transmit IRQ\n");
93 free_irq(IRQ_KEYBOARDRX, port); 99 free_irq(rpckbd->rx_irq, port);
94 return -EBUSY; 100 return -EBUSY;
95 } 101 }
96 102
@@ -99,8 +105,10 @@ static int rpckbd_open(struct serio *port)
99 105
100static void rpckbd_close(struct serio *port) 106static void rpckbd_close(struct serio *port)
101{ 107{
102 free_irq(IRQ_KEYBOARDRX, port); 108 struct rpckbd_data *rpckbd = port->port_data;
103 free_irq(IRQ_KEYBOARDTX, port); 109
110 free_irq(rpckbd->rx_irq, port);
111 free_irq(rpckbd->tx_irq, port);
104} 112}
105 113
106/* 114/*
@@ -109,17 +117,35 @@ static void rpckbd_close(struct serio *port)
109 */ 117 */
110static int __devinit rpckbd_probe(struct platform_device *dev) 118static int __devinit rpckbd_probe(struct platform_device *dev)
111{ 119{
120 struct rpckbd_data *rpckbd;
112 struct serio *serio; 121 struct serio *serio;
122 int tx_irq, rx_irq;
123
124 rx_irq = platform_get_irq(dev, 0);
125 if (rx_irq <= 0)
126 return rx_irq < 0 ? rx_irq : -ENXIO;
127
128 tx_irq = platform_get_irq(dev, 1);
129 if (tx_irq <= 0)
130 return tx_irq < 0 ? tx_irq : -ENXIO;
113 131
114 serio = kzalloc(sizeof(struct serio), GFP_KERNEL); 132 serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
115 if (!serio) 133 rpckbd = kzalloc(sizeof(*rpckbd), GFP_KERNEL);
134 if (!serio || !rpckbd) {
135 kfree(rpckbd);
136 kfree(serio);
116 return -ENOMEM; 137 return -ENOMEM;
138 }
139
140 rpckbd->rx_irq = rx_irq;
141 rpckbd->tx_irq = tx_irq;
117 142
118 serio->id.type = SERIO_8042; 143 serio->id.type = SERIO_8042;
119 serio->write = rpckbd_write; 144 serio->write = rpckbd_write;
120 serio->open = rpckbd_open; 145 serio->open = rpckbd_open;
121 serio->close = rpckbd_close; 146 serio->close = rpckbd_close;
122 serio->dev.parent = &dev->dev; 147 serio->dev.parent = &dev->dev;
148 serio->port_data = rpckbd;
123 strlcpy(serio->name, "RiscPC PS/2 kbd port", sizeof(serio->name)); 149 strlcpy(serio->name, "RiscPC PS/2 kbd port", sizeof(serio->name));
124 strlcpy(serio->phys, "rpckbd/serio0", sizeof(serio->phys)); 150 strlcpy(serio->phys, "rpckbd/serio0", sizeof(serio->phys));
125 151
@@ -131,7 +157,11 @@ static int __devinit rpckbd_probe(struct platform_device *dev)
131static int __devexit rpckbd_remove(struct platform_device *dev) 157static int __devexit rpckbd_remove(struct platform_device *dev)
132{ 158{
133 struct serio *serio = platform_get_drvdata(dev); 159 struct serio *serio = platform_get_drvdata(dev);
160 struct rpckbd_data *rpckbd = serio->port_data;
161
134 serio_unregister_port(serio); 162 serio_unregister_port(serio);
163 kfree(rpckbd);
164
135 return 0; 165 return 0;
136} 166}
137 167
diff --git a/drivers/input/serio/sa1111ps2.c b/drivers/input/serio/sa1111ps2.c
index 44fc8b4bcd81..5ebabe3fc845 100644
--- a/drivers/input/serio/sa1111ps2.c
+++ b/drivers/input/serio/sa1111ps2.c
@@ -24,6 +24,26 @@
24 24
25#include <asm/hardware/sa1111.h> 25#include <asm/hardware/sa1111.h>
26 26
27#define PS2CR 0x0000
28#define PS2STAT 0x0004
29#define PS2DATA 0x0008
30#define PS2CLKDIV 0x000c
31#define PS2PRECNT 0x0010
32
33#define PS2CR_ENA 0x08
34#define PS2CR_FKD 0x02
35#define PS2CR_FKC 0x01
36
37#define PS2STAT_STP 0x0100
38#define PS2STAT_TXE 0x0080
39#define PS2STAT_TXB 0x0040
40#define PS2STAT_RXF 0x0020
41#define PS2STAT_RXB 0x0010
42#define PS2STAT_ENA 0x0008
43#define PS2STAT_RXP 0x0004
44#define PS2STAT_KBD 0x0002
45#define PS2STAT_KBC 0x0001
46
27struct ps2if { 47struct ps2if {
28 struct serio *io; 48 struct serio *io;
29 struct sa1111_dev *dev; 49 struct sa1111_dev *dev;
@@ -45,22 +65,22 @@ static irqreturn_t ps2_rxint(int irq, void *dev_id)
45 struct ps2if *ps2if = dev_id; 65 struct ps2if *ps2if = dev_id;
46 unsigned int scancode, flag, status; 66 unsigned int scancode, flag, status;
47 67
48 status = sa1111_readl(ps2if->base + SA1111_PS2STAT); 68 status = sa1111_readl(ps2if->base + PS2STAT);
49 while (status & PS2STAT_RXF) { 69 while (status & PS2STAT_RXF) {
50 if (status & PS2STAT_STP) 70 if (status & PS2STAT_STP)
51 sa1111_writel(PS2STAT_STP, ps2if->base + SA1111_PS2STAT); 71 sa1111_writel(PS2STAT_STP, ps2if->base + PS2STAT);
52 72
53 flag = (status & PS2STAT_STP ? SERIO_FRAME : 0) | 73 flag = (status & PS2STAT_STP ? SERIO_FRAME : 0) |
54 (status & PS2STAT_RXP ? 0 : SERIO_PARITY); 74 (status & PS2STAT_RXP ? 0 : SERIO_PARITY);
55 75
56 scancode = sa1111_readl(ps2if->base + SA1111_PS2DATA) & 0xff; 76 scancode = sa1111_readl(ps2if->base + PS2DATA) & 0xff;
57 77
58 if (hweight8(scancode) & 1) 78 if (hweight8(scancode) & 1)
59 flag ^= SERIO_PARITY; 79 flag ^= SERIO_PARITY;
60 80
61 serio_interrupt(ps2if->io, scancode, flag); 81 serio_interrupt(ps2if->io, scancode, flag);
62 82
63 status = sa1111_readl(ps2if->base + SA1111_PS2STAT); 83 status = sa1111_readl(ps2if->base + PS2STAT);
64 } 84 }
65 85
66 return IRQ_HANDLED; 86 return IRQ_HANDLED;
@@ -75,12 +95,12 @@ static irqreturn_t ps2_txint(int irq, void *dev_id)
75 unsigned int status; 95 unsigned int status;
76 96
77 spin_lock(&ps2if->lock); 97 spin_lock(&ps2if->lock);
78 status = sa1111_readl(ps2if->base + SA1111_PS2STAT); 98 status = sa1111_readl(ps2if->base + PS2STAT);
79 if (ps2if->head == ps2if->tail) { 99 if (ps2if->head == ps2if->tail) {
80 disable_irq_nosync(irq); 100 disable_irq_nosync(irq);
81 /* done */ 101 /* done */
82 } else if (status & PS2STAT_TXE) { 102 } else if (status & PS2STAT_TXE) {
83 sa1111_writel(ps2if->buf[ps2if->tail], ps2if->base + SA1111_PS2DATA); 103 sa1111_writel(ps2if->buf[ps2if->tail], ps2if->base + PS2DATA);
84 ps2if->tail = (ps2if->tail + 1) & (sizeof(ps2if->buf) - 1); 104 ps2if->tail = (ps2if->tail + 1) & (sizeof(ps2if->buf) - 1);
85 } 105 }
86 spin_unlock(&ps2if->lock); 106 spin_unlock(&ps2if->lock);
@@ -103,8 +123,8 @@ static int ps2_write(struct serio *io, unsigned char val)
103 /* 123 /*
104 * If the TX register is empty, we can go straight out. 124 * If the TX register is empty, we can go straight out.
105 */ 125 */
106 if (sa1111_readl(ps2if->base + SA1111_PS2STAT) & PS2STAT_TXE) { 126 if (sa1111_readl(ps2if->base + PS2STAT) & PS2STAT_TXE) {
107 sa1111_writel(val, ps2if->base + SA1111_PS2DATA); 127 sa1111_writel(val, ps2if->base + PS2DATA);
108 } else { 128 } else {
109 if (ps2if->head == ps2if->tail) 129 if (ps2if->head == ps2if->tail)
110 enable_irq(ps2if->dev->irq[1]); 130 enable_irq(ps2if->dev->irq[1]);
@@ -124,13 +144,16 @@ static int ps2_open(struct serio *io)
124 struct ps2if *ps2if = io->port_data; 144 struct ps2if *ps2if = io->port_data;
125 int ret; 145 int ret;
126 146
127 sa1111_enable_device(ps2if->dev); 147 ret = sa1111_enable_device(ps2if->dev);
148 if (ret)
149 return ret;
128 150
129 ret = request_irq(ps2if->dev->irq[0], ps2_rxint, 0, 151 ret = request_irq(ps2if->dev->irq[0], ps2_rxint, 0,
130 SA1111_DRIVER_NAME(ps2if->dev), ps2if); 152 SA1111_DRIVER_NAME(ps2if->dev), ps2if);
131 if (ret) { 153 if (ret) {
132 printk(KERN_ERR "sa1111ps2: could not allocate IRQ%d: %d\n", 154 printk(KERN_ERR "sa1111ps2: could not allocate IRQ%d: %d\n",
133 ps2if->dev->irq[0], ret); 155 ps2if->dev->irq[0], ret);
156 sa1111_disable_device(ps2if->dev);
134 return ret; 157 return ret;
135 } 158 }
136 159
@@ -140,6 +163,7 @@ static int ps2_open(struct serio *io)
140 printk(KERN_ERR "sa1111ps2: could not allocate IRQ%d: %d\n", 163 printk(KERN_ERR "sa1111ps2: could not allocate IRQ%d: %d\n",
141 ps2if->dev->irq[1], ret); 164 ps2if->dev->irq[1], ret);
142 free_irq(ps2if->dev->irq[0], ps2if); 165 free_irq(ps2if->dev->irq[0], ps2if);
166 sa1111_disable_device(ps2if->dev);
143 return ret; 167 return ret;
144 } 168 }
145 169
@@ -147,7 +171,7 @@ static int ps2_open(struct serio *io)
147 171
148 enable_irq_wake(ps2if->dev->irq[0]); 172 enable_irq_wake(ps2if->dev->irq[0]);
149 173
150 sa1111_writel(PS2CR_ENA, ps2if->base + SA1111_PS2CR); 174 sa1111_writel(PS2CR_ENA, ps2if->base + PS2CR);
151 return 0; 175 return 0;
152} 176}
153 177
@@ -155,7 +179,7 @@ static void ps2_close(struct serio *io)
155{ 179{
156 struct ps2if *ps2if = io->port_data; 180 struct ps2if *ps2if = io->port_data;
157 181
158 sa1111_writel(0, ps2if->base + SA1111_PS2CR); 182 sa1111_writel(0, ps2if->base + PS2CR);
159 183
160 disable_irq_wake(ps2if->dev->irq[0]); 184 disable_irq_wake(ps2if->dev->irq[0]);
161 185
@@ -175,7 +199,7 @@ static void __devinit ps2_clear_input(struct ps2if *ps2if)
175 int maxread = 100; 199 int maxread = 100;
176 200
177 while (maxread--) { 201 while (maxread--) {
178 if ((sa1111_readl(ps2if->base + SA1111_PS2DATA) & 0xff) == 0xff) 202 if ((sa1111_readl(ps2if->base + PS2DATA) & 0xff) == 0xff)
179 break; 203 break;
180 } 204 }
181} 205}
@@ -185,11 +209,11 @@ static unsigned int __devinit ps2_test_one(struct ps2if *ps2if,
185{ 209{
186 unsigned int val; 210 unsigned int val;
187 211
188 sa1111_writel(PS2CR_ENA | mask, ps2if->base + SA1111_PS2CR); 212 sa1111_writel(PS2CR_ENA | mask, ps2if->base + PS2CR);
189 213
190 udelay(2); 214 udelay(2);
191 215
192 val = sa1111_readl(ps2if->base + SA1111_PS2STAT); 216 val = sa1111_readl(ps2if->base + PS2STAT);
193 return val & (PS2STAT_KBC | PS2STAT_KBD); 217 return val & (PS2STAT_KBC | PS2STAT_KBD);
194} 218}
195 219
@@ -220,7 +244,7 @@ static int __devinit ps2_test(struct ps2if *ps2if)
220 ret = -ENODEV; 244 ret = -ENODEV;
221 } 245 }
222 246
223 sa1111_writel(0, ps2if->base + SA1111_PS2CR); 247 sa1111_writel(0, ps2if->base + PS2CR);
224 248
225 return ret; 249 return ret;
226} 250}
@@ -274,8 +298,8 @@ static int __devinit ps2_probe(struct sa1111_dev *dev)
274 sa1111_enable_device(ps2if->dev); 298 sa1111_enable_device(ps2if->dev);
275 299
276 /* Incoming clock is 8MHz */ 300 /* Incoming clock is 8MHz */
277 sa1111_writel(0, ps2if->base + SA1111_PS2CLKDIV); 301 sa1111_writel(0, ps2if->base + PS2CLKDIV);
278 sa1111_writel(127, ps2if->base + SA1111_PS2PRECNT); 302 sa1111_writel(127, ps2if->base + PS2PRECNT);
279 303
280 /* 304 /*
281 * Flush any pending input. 305 * Flush any pending input.
@@ -330,6 +354,7 @@ static int __devexit ps2_remove(struct sa1111_dev *dev)
330static struct sa1111_driver ps2_driver = { 354static struct sa1111_driver ps2_driver = {
331 .drv = { 355 .drv = {
332 .name = "sa1111-ps2", 356 .name = "sa1111-ps2",
357 .owner = THIS_MODULE,
333 }, 358 },
334 .devid = SA1111_DEVID_PS2, 359 .devid = SA1111_DEVID_PS2,
335 .probe = ps2_probe, 360 .probe = ps2_probe,
diff --git a/drivers/input/touchscreen/jornada720_ts.c b/drivers/input/touchscreen/jornada720_ts.c
index c3848ad2325b..d9be6eac99b1 100644
--- a/drivers/input/touchscreen/jornada720_ts.c
+++ b/drivers/input/touchscreen/jornada720_ts.c
@@ -22,6 +22,7 @@
22 22
23#include <mach/hardware.h> 23#include <mach/hardware.h>
24#include <mach/jornada720.h> 24#include <mach/jornada720.h>
25#include <mach/irqs.h>
25 26
26MODULE_AUTHOR("Kristoffer Ericson <kristoffer.ericson@gmail.com>"); 27MODULE_AUTHOR("Kristoffer Ericson <kristoffer.ericson@gmail.com>");
27MODULE_DESCRIPTION("HP Jornada 710/720/728 touchscreen driver"); 28MODULE_DESCRIPTION("HP Jornada 710/720/728 touchscreen driver");
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 1489c3540f96..243e0c663c37 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -848,8 +848,9 @@ config MCP_SA11X0
848 848
849# Chip drivers 849# Chip drivers
850config MCP_UCB1200 850config MCP_UCB1200
851 tristate "Support for UCB1200 / UCB1300" 851 bool "Support for UCB1200 / UCB1300"
852 depends on MCP 852 depends on MCP_SA11X0
853 select MCP
853 854
854config MCP_UCB1200_TS 855config MCP_UCB1200_TS
855 tristate "Touchscreen interface support" 856 tristate "Touchscreen interface support"
diff --git a/drivers/mfd/mcp-core.c b/drivers/mfd/mcp-core.c
index 86cc3f7841cd..6acf2e03f2ba 100644
--- a/drivers/mfd/mcp-core.c
+++ b/drivers/mfd/mcp-core.c
@@ -19,7 +19,6 @@
19#include <linux/string.h> 19#include <linux/string.h>
20#include <linux/mfd/mcp.h> 20#include <linux/mfd/mcp.h>
21 21
22#include <mach/dma.h>
23#include <asm/system.h> 22#include <asm/system.h>
24 23
25 24
@@ -48,39 +47,11 @@ static int mcp_bus_remove(struct device *dev)
48 return 0; 47 return 0;
49} 48}
50 49
51static int mcp_bus_suspend(struct device *dev, pm_message_t state)
52{
53 struct mcp *mcp = to_mcp(dev);
54 int ret = 0;
55
56 if (dev->driver) {
57 struct mcp_driver *drv = to_mcp_driver(dev->driver);
58
59 ret = drv->suspend(mcp, state);
60 }
61 return ret;
62}
63
64static int mcp_bus_resume(struct device *dev)
65{
66 struct mcp *mcp = to_mcp(dev);
67 int ret = 0;
68
69 if (dev->driver) {
70 struct mcp_driver *drv = to_mcp_driver(dev->driver);
71
72 ret = drv->resume(mcp);
73 }
74 return ret;
75}
76
77static struct bus_type mcp_bus_type = { 50static struct bus_type mcp_bus_type = {
78 .name = "mcp", 51 .name = "mcp",
79 .match = mcp_bus_match, 52 .match = mcp_bus_match,
80 .probe = mcp_bus_probe, 53 .probe = mcp_bus_probe,
81 .remove = mcp_bus_remove, 54 .remove = mcp_bus_remove,
82 .suspend = mcp_bus_suspend,
83 .resume = mcp_bus_resume,
84}; 55};
85 56
86/** 57/**
@@ -208,6 +179,7 @@ struct mcp *mcp_host_alloc(struct device *parent, size_t size)
208 mcp = kzalloc(sizeof(struct mcp) + size, GFP_KERNEL); 179 mcp = kzalloc(sizeof(struct mcp) + size, GFP_KERNEL);
209 if (mcp) { 180 if (mcp) {
210 spin_lock_init(&mcp->lock); 181 spin_lock_init(&mcp->lock);
182 device_initialize(&mcp->attached_device);
211 mcp->attached_device.parent = parent; 183 mcp->attached_device.parent = parent;
212 mcp->attached_device.bus = &mcp_bus_type; 184 mcp->attached_device.bus = &mcp_bus_type;
213 mcp->attached_device.dma_mask = parent->dma_mask; 185 mcp->attached_device.dma_mask = parent->dma_mask;
@@ -217,18 +189,25 @@ struct mcp *mcp_host_alloc(struct device *parent, size_t size)
217} 189}
218EXPORT_SYMBOL(mcp_host_alloc); 190EXPORT_SYMBOL(mcp_host_alloc);
219 191
220int mcp_host_register(struct mcp *mcp) 192int mcp_host_add(struct mcp *mcp, void *pdata)
221{ 193{
194 mcp->attached_device.platform_data = pdata;
222 dev_set_name(&mcp->attached_device, "mcp0"); 195 dev_set_name(&mcp->attached_device, "mcp0");
223 return device_register(&mcp->attached_device); 196 return device_add(&mcp->attached_device);
197}
198EXPORT_SYMBOL(mcp_host_add);
199
200void mcp_host_del(struct mcp *mcp)
201{
202 device_del(&mcp->attached_device);
224} 203}
225EXPORT_SYMBOL(mcp_host_register); 204EXPORT_SYMBOL(mcp_host_del);
226 205
227void mcp_host_unregister(struct mcp *mcp) 206void mcp_host_free(struct mcp *mcp)
228{ 207{
229 device_unregister(&mcp->attached_device); 208 put_device(&mcp->attached_device);
230} 209}
231EXPORT_SYMBOL(mcp_host_unregister); 210EXPORT_SYMBOL(mcp_host_free);
232 211
233int mcp_driver_register(struct mcp_driver *mcpdrv) 212int mcp_driver_register(struct mcp_driver *mcpdrv)
234{ 213{
diff --git a/drivers/mfd/mcp-sa11x0.c b/drivers/mfd/mcp-sa11x0.c
index 02c53a0766c4..1c0ceacaa1f6 100644
--- a/drivers/mfd/mcp-sa11x0.c
+++ b/drivers/mfd/mcp-sa11x0.c
@@ -13,51 +13,61 @@
13 */ 13 */
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/io.h>
16#include <linux/errno.h> 17#include <linux/errno.h>
17#include <linux/kernel.h> 18#include <linux/kernel.h>
18#include <linux/delay.h> 19#include <linux/delay.h>
19#include <linux/spinlock.h> 20#include <linux/spinlock.h>
20#include <linux/platform_device.h> 21#include <linux/platform_device.h>
22#include <linux/pm.h>
21#include <linux/mfd/mcp.h> 23#include <linux/mfd/mcp.h>
22 24
23#include <mach/dma.h>
24#include <mach/hardware.h> 25#include <mach/hardware.h>
25#include <asm/mach-types.h> 26#include <asm/mach-types.h>
26#include <asm/system.h> 27#include <asm/system.h>
27#include <mach/mcp.h> 28#include <mach/mcp.h>
28 29
29#include <mach/assabet.h> 30#define DRIVER_NAME "sa11x0-mcp"
30
31 31
32struct mcp_sa11x0 { 32struct mcp_sa11x0 {
33 u32 mccr0; 33 void __iomem *base0;
34 u32 mccr1; 34 void __iomem *base1;
35 u32 mccr0;
36 u32 mccr1;
35}; 37};
36 38
39/* Register offsets */
40#define MCCR0(m) ((m)->base0 + 0x00)
41#define MCDR0(m) ((m)->base0 + 0x08)
42#define MCDR1(m) ((m)->base0 + 0x0c)
43#define MCDR2(m) ((m)->base0 + 0x10)
44#define MCSR(m) ((m)->base0 + 0x18)
45#define MCCR1(m) ((m)->base1 + 0x00)
46
37#define priv(mcp) ((struct mcp_sa11x0 *)mcp_priv(mcp)) 47#define priv(mcp) ((struct mcp_sa11x0 *)mcp_priv(mcp))
38 48
39static void 49static void
40mcp_sa11x0_set_telecom_divisor(struct mcp *mcp, unsigned int divisor) 50mcp_sa11x0_set_telecom_divisor(struct mcp *mcp, unsigned int divisor)
41{ 51{
42 unsigned int mccr0; 52 struct mcp_sa11x0 *m = priv(mcp);
43 53
44 divisor /= 32; 54 divisor /= 32;
45 55
46 mccr0 = Ser4MCCR0 & ~0x00007f00; 56 m->mccr0 &= ~0x00007f00;
47 mccr0 |= divisor << 8; 57 m->mccr0 |= divisor << 8;
48 Ser4MCCR0 = mccr0; 58 writel_relaxed(m->mccr0, MCCR0(m));
49} 59}
50 60
51static void 61static void
52mcp_sa11x0_set_audio_divisor(struct mcp *mcp, unsigned int divisor) 62mcp_sa11x0_set_audio_divisor(struct mcp *mcp, unsigned int divisor)
53{ 63{
54 unsigned int mccr0; 64 struct mcp_sa11x0 *m = priv(mcp);
55 65
56 divisor /= 32; 66 divisor /= 32;
57 67
58 mccr0 = Ser4MCCR0 & ~0x0000007f; 68 m->mccr0 &= ~0x0000007f;
59 mccr0 |= divisor; 69 m->mccr0 |= divisor;
60 Ser4MCCR0 = mccr0; 70 writel_relaxed(m->mccr0, MCCR0(m));
61} 71}
62 72
63/* 73/*
@@ -69,14 +79,15 @@ mcp_sa11x0_set_audio_divisor(struct mcp *mcp, unsigned int divisor)
69static void 79static void
70mcp_sa11x0_write(struct mcp *mcp, unsigned int reg, unsigned int val) 80mcp_sa11x0_write(struct mcp *mcp, unsigned int reg, unsigned int val)
71{ 81{
82 struct mcp_sa11x0 *m = priv(mcp);
72 int ret = -ETIME; 83 int ret = -ETIME;
73 int i; 84 int i;
74 85
75 Ser4MCDR2 = reg << 17 | MCDR2_Wr | (val & 0xffff); 86 writel_relaxed(reg << 17 | MCDR2_Wr | (val & 0xffff), MCDR2(m));
76 87
77 for (i = 0; i < 2; i++) { 88 for (i = 0; i < 2; i++) {
78 udelay(mcp->rw_timeout); 89 udelay(mcp->rw_timeout);
79 if (Ser4MCSR & MCSR_CWC) { 90 if (readl_relaxed(MCSR(m)) & MCSR_CWC) {
80 ret = 0; 91 ret = 0;
81 break; 92 break;
82 } 93 }
@@ -95,15 +106,16 @@ mcp_sa11x0_write(struct mcp *mcp, unsigned int reg, unsigned int val)
95static unsigned int 106static unsigned int
96mcp_sa11x0_read(struct mcp *mcp, unsigned int reg) 107mcp_sa11x0_read(struct mcp *mcp, unsigned int reg)
97{ 108{
109 struct mcp_sa11x0 *m = priv(mcp);
98 int ret = -ETIME; 110 int ret = -ETIME;
99 int i; 111 int i;
100 112
101 Ser4MCDR2 = reg << 17 | MCDR2_Rd; 113 writel_relaxed(reg << 17 | MCDR2_Rd, MCDR2(m));
102 114
103 for (i = 0; i < 2; i++) { 115 for (i = 0; i < 2; i++) {
104 udelay(mcp->rw_timeout); 116 udelay(mcp->rw_timeout);
105 if (Ser4MCSR & MCSR_CRC) { 117 if (readl_relaxed(MCSR(m)) & MCSR_CRC) {
106 ret = Ser4MCDR2 & 0xffff; 118 ret = readl_relaxed(MCDR2(m)) & 0xffff;
107 break; 119 break;
108 } 120 }
109 } 121 }
@@ -116,13 +128,19 @@ mcp_sa11x0_read(struct mcp *mcp, unsigned int reg)
116 128
117static void mcp_sa11x0_enable(struct mcp *mcp) 129static void mcp_sa11x0_enable(struct mcp *mcp)
118{ 130{
119 Ser4MCSR = -1; 131 struct mcp_sa11x0 *m = priv(mcp);
120 Ser4MCCR0 |= MCCR0_MCE; 132
133 writel(-1, MCSR(m));
134 m->mccr0 |= MCCR0_MCE;
135 writel_relaxed(m->mccr0, MCCR0(m));
121} 136}
122 137
123static void mcp_sa11x0_disable(struct mcp *mcp) 138static void mcp_sa11x0_disable(struct mcp *mcp)
124{ 139{
125 Ser4MCCR0 &= ~MCCR0_MCE; 140 struct mcp_sa11x0 *m = priv(mcp);
141
142 m->mccr0 &= ~MCCR0_MCE;
143 writel_relaxed(m->mccr0, MCCR0(m));
126} 144}
127 145
128/* 146/*
@@ -137,55 +155,64 @@ static struct mcp_ops mcp_sa11x0 = {
137 .disable = mcp_sa11x0_disable, 155 .disable = mcp_sa11x0_disable,
138}; 156};
139 157
140static int mcp_sa11x0_probe(struct platform_device *pdev) 158static int mcp_sa11x0_probe(struct platform_device *dev)
141{ 159{
142 struct mcp_plat_data *data = pdev->dev.platform_data; 160 struct mcp_plat_data *data = dev->dev.platform_data;
161 struct resource *mem0, *mem1;
162 struct mcp_sa11x0 *m;
143 struct mcp *mcp; 163 struct mcp *mcp;
144 int ret; 164 int ret;
145 165
146 if (!data) 166 if (!data)
147 return -ENODEV; 167 return -ENODEV;
148 168
149 if (!request_mem_region(0x80060000, 0x60, "sa11x0-mcp")) 169 mem0 = platform_get_resource(dev, IORESOURCE_MEM, 0);
150 return -EBUSY; 170 mem1 = platform_get_resource(dev, IORESOURCE_MEM, 1);
171 if (!mem0 || !mem1)
172 return -ENXIO;
173
174 if (!request_mem_region(mem0->start, resource_size(mem0),
175 DRIVER_NAME)) {
176 ret = -EBUSY;
177 goto err_mem0;
178 }
151 179
152 mcp = mcp_host_alloc(&pdev->dev, sizeof(struct mcp_sa11x0)); 180 if (!request_mem_region(mem1->start, resource_size(mem1),
181 DRIVER_NAME)) {
182 ret = -EBUSY;
183 goto err_mem1;
184 }
185
186 mcp = mcp_host_alloc(&dev->dev, sizeof(struct mcp_sa11x0));
153 if (!mcp) { 187 if (!mcp) {
154 ret = -ENOMEM; 188 ret = -ENOMEM;
155 goto release; 189 goto err_alloc;
156 } 190 }
157 191
158 mcp->owner = THIS_MODULE; 192 mcp->owner = THIS_MODULE;
159 mcp->ops = &mcp_sa11x0; 193 mcp->ops = &mcp_sa11x0;
160 mcp->sclk_rate = data->sclk_rate; 194 mcp->sclk_rate = data->sclk_rate;
161 mcp->dma_audio_rd = DMA_Ser4MCP0Rd;
162 mcp->dma_audio_wr = DMA_Ser4MCP0Wr;
163 mcp->dma_telco_rd = DMA_Ser4MCP1Rd;
164 mcp->dma_telco_wr = DMA_Ser4MCP1Wr;
165 mcp->gpio_base = data->gpio_base;
166 195
167 platform_set_drvdata(pdev, mcp); 196 m = priv(mcp);
197 m->mccr0 = data->mccr0 | 0x7f7f;
198 m->mccr1 = data->mccr1;
168 199
169 if (machine_is_assabet()) { 200 m->base0 = ioremap(mem0->start, resource_size(mem0));
170 ASSABET_BCR_set(ASSABET_BCR_CODEC_RST); 201 m->base1 = ioremap(mem1->start, resource_size(mem1));
202 if (!m->base0 || !m->base1) {
203 ret = -ENOMEM;
204 goto err_ioremap;
171 } 205 }
172 206
173 /* 207 platform_set_drvdata(dev, mcp);
174 * Setup the PPC unit correctly.
175 */
176 PPDR &= ~PPC_RXD4;
177 PPDR |= PPC_TXD4 | PPC_SCLK | PPC_SFRM;
178 PSDR |= PPC_RXD4;
179 PSDR &= ~(PPC_TXD4 | PPC_SCLK | PPC_SFRM);
180 PPSR &= ~(PPC_TXD4 | PPC_SCLK | PPC_SFRM);
181 208
182 /* 209 /*
183 * Initialise device. Note that we initially 210 * Initialise device. Note that we initially
184 * set the sampling rate to minimum. 211 * set the sampling rate to minimum.
185 */ 212 */
186 Ser4MCSR = -1; 213 writel_relaxed(-1, MCSR(m));
187 Ser4MCCR1 = data->mccr1; 214 writel_relaxed(m->mccr1, MCCR1(m));
188 Ser4MCCR0 = data->mccr0 | 0x7f7f; 215 writel_relaxed(m->mccr0, MCCR0(m));
189 216
190 /* 217 /*
191 * Calculate the read/write timeout (us) from the bit clock 218 * Calculate the read/write timeout (us) from the bit clock
@@ -195,62 +222,90 @@ static int mcp_sa11x0_probe(struct platform_device *pdev)
195 mcp->rw_timeout = (64 * 3 * 1000000 + mcp->sclk_rate - 1) / 222 mcp->rw_timeout = (64 * 3 * 1000000 + mcp->sclk_rate - 1) /
196 mcp->sclk_rate; 223 mcp->sclk_rate;
197 224
198 ret = mcp_host_register(mcp); 225 ret = mcp_host_add(mcp, data->codec_pdata);
199 if (ret == 0) 226 if (ret == 0)
200 goto out; 227 return 0;
201 228
202 release: 229 platform_set_drvdata(dev, NULL);
203 release_mem_region(0x80060000, 0x60);
204 platform_set_drvdata(pdev, NULL);
205 230
206 out: 231 err_ioremap:
232 iounmap(m->base1);
233 iounmap(m->base0);
234 mcp_host_free(mcp);
235 err_alloc:
236 release_mem_region(mem1->start, resource_size(mem1));
237 err_mem1:
238 release_mem_region(mem0->start, resource_size(mem0));
239 err_mem0:
207 return ret; 240 return ret;
208} 241}
209 242
210static int mcp_sa11x0_remove(struct platform_device *dev) 243static int mcp_sa11x0_remove(struct platform_device *dev)
211{ 244{
212 struct mcp *mcp = platform_get_drvdata(dev); 245 struct mcp *mcp = platform_get_drvdata(dev);
246 struct mcp_sa11x0 *m = priv(mcp);
247 struct resource *mem0, *mem1;
248
249 if (m->mccr0 & MCCR0_MCE)
250 dev_warn(&dev->dev,
251 "device left active (missing disable call?)\n");
252
253 mem0 = platform_get_resource(dev, IORESOURCE_MEM, 0);
254 mem1 = platform_get_resource(dev, IORESOURCE_MEM, 1);
213 255
214 platform_set_drvdata(dev, NULL); 256 platform_set_drvdata(dev, NULL);
215 mcp_host_unregister(mcp); 257 mcp_host_del(mcp);
216 release_mem_region(0x80060000, 0x60); 258 iounmap(m->base1);
259 iounmap(m->base0);
260 mcp_host_free(mcp);
261 release_mem_region(mem1->start, resource_size(mem1));
262 release_mem_region(mem0->start, resource_size(mem0));
217 263
218 return 0; 264 return 0;
219} 265}
220 266
221static int mcp_sa11x0_suspend(struct platform_device *dev, pm_message_t state) 267#ifdef CONFIG_PM_SLEEP
268static int mcp_sa11x0_suspend(struct device *dev)
222{ 269{
223 struct mcp *mcp = platform_get_drvdata(dev); 270 struct mcp_sa11x0 *m = priv(dev_get_drvdata(dev));
271
272 if (m->mccr0 & MCCR0_MCE)
273 dev_warn(dev, "device left active (missing disable call?)\n");
224 274
225 priv(mcp)->mccr0 = Ser4MCCR0; 275 writel(m->mccr0 & ~MCCR0_MCE, MCCR0(m));
226 priv(mcp)->mccr1 = Ser4MCCR1;
227 Ser4MCCR0 &= ~MCCR0_MCE;
228 276
229 return 0; 277 return 0;
230} 278}
231 279
232static int mcp_sa11x0_resume(struct platform_device *dev) 280static int mcp_sa11x0_resume(struct device *dev)
233{ 281{
234 struct mcp *mcp = platform_get_drvdata(dev); 282 struct mcp_sa11x0 *m = priv(dev_get_drvdata(dev));
235 283
236 Ser4MCCR1 = priv(mcp)->mccr1; 284 writel_relaxed(m->mccr1, MCCR1(m));
237 Ser4MCCR0 = priv(mcp)->mccr0; 285 writel_relaxed(m->mccr0, MCCR0(m));
238 286
239 return 0; 287 return 0;
240} 288}
241 289#endif
242/* 290
243 * The driver for the SA11x0 MCP port. 291static const struct dev_pm_ops mcp_sa11x0_pm_ops = {
244 */ 292#ifdef CONFIG_PM_SLEEP
245MODULE_ALIAS("platform:sa11x0-mcp"); 293 .suspend = mcp_sa11x0_suspend,
294 .freeze = mcp_sa11x0_suspend,
295 .poweroff = mcp_sa11x0_suspend,
296 .resume_noirq = mcp_sa11x0_resume,
297 .thaw_noirq = mcp_sa11x0_resume,
298 .restore_noirq = mcp_sa11x0_resume,
299#endif
300};
246 301
247static struct platform_driver mcp_sa11x0_driver = { 302static struct platform_driver mcp_sa11x0_driver = {
248 .probe = mcp_sa11x0_probe, 303 .probe = mcp_sa11x0_probe,
249 .remove = mcp_sa11x0_remove, 304 .remove = mcp_sa11x0_remove,
250 .suspend = mcp_sa11x0_suspend,
251 .resume = mcp_sa11x0_resume,
252 .driver = { 305 .driver = {
253 .name = "sa11x0-mcp", 306 .name = DRIVER_NAME,
307 .owner = THIS_MODULE,
308 .pm = &mcp_sa11x0_pm_ops,
254 }, 309 },
255}; 310};
256 311
@@ -259,6 +314,7 @@ static struct platform_driver mcp_sa11x0_driver = {
259 */ 314 */
260module_platform_driver(mcp_sa11x0_driver); 315module_platform_driver(mcp_sa11x0_driver);
261 316
317MODULE_ALIAS("platform:" DRIVER_NAME);
262MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>"); 318MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
263MODULE_DESCRIPTION("SA11x0 multimedia communications port driver"); 319MODULE_DESCRIPTION("SA11x0 multimedia communications port driver");
264MODULE_LICENSE("GPL"); 320MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/ucb1x00-assabet.c b/drivers/mfd/ucb1x00-assabet.c
index cea9da60850d..b63c0756a669 100644
--- a/drivers/mfd/ucb1x00-assabet.c
+++ b/drivers/mfd/ucb1x00-assabet.c
@@ -11,14 +11,15 @@
11 */ 11 */
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/device.h>
15#include <linux/err.h>
14#include <linux/fs.h> 16#include <linux/fs.h>
17#include <linux/gpio_keys.h>
18#include <linux/input.h>
19#include <linux/platform_device.h>
15#include <linux/proc_fs.h> 20#include <linux/proc_fs.h>
16#include <linux/device.h>
17#include <linux/mfd/ucb1x00.h> 21#include <linux/mfd/ucb1x00.h>
18 22
19#include <mach/dma.h>
20
21
22#define UCB1X00_ATTR(name,input)\ 23#define UCB1X00_ATTR(name,input)\
23static ssize_t name##_show(struct device *dev, struct device_attribute *attr, \ 24static ssize_t name##_show(struct device *dev, struct device_attribute *attr, \
24 char *buf) \ 25 char *buf) \
@@ -38,14 +39,45 @@ UCB1X00_ATTR(batt_temp, UCB_ADC_INP_AD2);
38 39
39static int ucb1x00_assabet_add(struct ucb1x00_dev *dev) 40static int ucb1x00_assabet_add(struct ucb1x00_dev *dev)
40{ 41{
41 device_create_file(&dev->ucb->dev, &dev_attr_vbatt); 42 struct ucb1x00 *ucb = dev->ucb;
42 device_create_file(&dev->ucb->dev, &dev_attr_vcharger); 43 struct platform_device *pdev;
43 device_create_file(&dev->ucb->dev, &dev_attr_batt_temp); 44 struct gpio_keys_platform_data keys;
45 static struct gpio_keys_button buttons[6];
46 unsigned i;
47
48 memset(buttons, 0, sizeof(buttons));
49 memset(&keys, 0, sizeof(keys));
50
51 for (i = 0; i < ARRAY_SIZE(buttons); i++) {
52 buttons[i].code = BTN_0 + i;
53 buttons[i].gpio = ucb->gpio.base + i;
54 buttons[i].type = EV_KEY;
55 buttons[i].can_disable = true;
56 }
57
58 keys.buttons = buttons;
59 keys.nbuttons = ARRAY_SIZE(buttons);
60 keys.poll_interval = 50;
61 keys.name = "ucb1x00";
62
63 pdev = platform_device_register_data(&ucb->dev, "gpio-keys", -1,
64 &keys, sizeof(keys));
65
66 device_create_file(&ucb->dev, &dev_attr_vbatt);
67 device_create_file(&ucb->dev, &dev_attr_vcharger);
68 device_create_file(&ucb->dev, &dev_attr_batt_temp);
69
70 dev->priv = pdev;
44 return 0; 71 return 0;
45} 72}
46 73
47static void ucb1x00_assabet_remove(struct ucb1x00_dev *dev) 74static void ucb1x00_assabet_remove(struct ucb1x00_dev *dev)
48{ 75{
76 struct platform_device *pdev = dev->priv;
77
78 if (!IS_ERR(pdev))
79 platform_device_unregister(pdev);
80
49 device_remove_file(&dev->ucb->dev, &dev_attr_batt_temp); 81 device_remove_file(&dev->ucb->dev, &dev_attr_batt_temp);
50 device_remove_file(&dev->ucb->dev, &dev_attr_vcharger); 82 device_remove_file(&dev->ucb->dev, &dev_attr_vcharger);
51 device_remove_file(&dev->ucb->dev, &dev_attr_vbatt); 83 device_remove_file(&dev->ucb->dev, &dev_attr_vbatt);
diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c
index febc90cdef7e..70f02daeb22a 100644
--- a/drivers/mfd/ucb1x00-core.c
+++ b/drivers/mfd/ucb1x00-core.c
@@ -23,14 +23,12 @@
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/errno.h> 24#include <linux/errno.h>
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/irq.h>
26#include <linux/device.h> 27#include <linux/device.h>
27#include <linux/mutex.h> 28#include <linux/mutex.h>
28#include <linux/mfd/ucb1x00.h> 29#include <linux/mfd/ucb1x00.h>
30#include <linux/pm.h>
29#include <linux/gpio.h> 31#include <linux/gpio.h>
30#include <linux/semaphore.h>
31
32#include <mach/dma.h>
33#include <mach/hardware.h>
34 32
35static DEFINE_MUTEX(ucb1x00_mutex); 33static DEFINE_MUTEX(ucb1x00_mutex);
36static LIST_HEAD(ucb1x00_drivers); 34static LIST_HEAD(ucb1x00_drivers);
@@ -102,7 +100,7 @@ void ucb1x00_io_write(struct ucb1x00 *ucb, unsigned int set, unsigned int clear)
102 * ucb1x00_enable must have been called to enable the comms 100 * ucb1x00_enable must have been called to enable the comms
103 * before using this function. 101 * before using this function.
104 * 102 *
105 * This function does not take any semaphores or spinlocks. 103 * This function does not take any mutexes or spinlocks.
106 */ 104 */
107unsigned int ucb1x00_io_read(struct ucb1x00 *ucb) 105unsigned int ucb1x00_io_read(struct ucb1x00 *ucb)
108{ 106{
@@ -120,14 +118,22 @@ static void ucb1x00_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
120 else 118 else
121 ucb->io_out &= ~(1 << offset); 119 ucb->io_out &= ~(1 << offset);
122 120
121 ucb1x00_enable(ucb);
123 ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out); 122 ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out);
123 ucb1x00_disable(ucb);
124 spin_unlock_irqrestore(&ucb->io_lock, flags); 124 spin_unlock_irqrestore(&ucb->io_lock, flags);
125} 125}
126 126
127static int ucb1x00_gpio_get(struct gpio_chip *chip, unsigned offset) 127static int ucb1x00_gpio_get(struct gpio_chip *chip, unsigned offset)
128{ 128{
129 struct ucb1x00 *ucb = container_of(chip, struct ucb1x00, gpio); 129 struct ucb1x00 *ucb = container_of(chip, struct ucb1x00, gpio);
130 return ucb1x00_reg_read(ucb, UCB_IO_DATA) & (1 << offset); 130 unsigned val;
131
132 ucb1x00_enable(ucb);
133 val = ucb1x00_reg_read(ucb, UCB_IO_DATA);
134 ucb1x00_disable(ucb);
135
136 return val & (1 << offset);
131} 137}
132 138
133static int ucb1x00_gpio_direction_input(struct gpio_chip *chip, unsigned offset) 139static int ucb1x00_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
@@ -137,7 +143,9 @@ static int ucb1x00_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
137 143
138 spin_lock_irqsave(&ucb->io_lock, flags); 144 spin_lock_irqsave(&ucb->io_lock, flags);
139 ucb->io_dir &= ~(1 << offset); 145 ucb->io_dir &= ~(1 << offset);
146 ucb1x00_enable(ucb);
140 ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir); 147 ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir);
148 ucb1x00_disable(ucb);
141 spin_unlock_irqrestore(&ucb->io_lock, flags); 149 spin_unlock_irqrestore(&ucb->io_lock, flags);
142 150
143 return 0; 151 return 0;
@@ -157,6 +165,7 @@ static int ucb1x00_gpio_direction_output(struct gpio_chip *chip, unsigned offset
157 else 165 else
158 ucb->io_out &= ~mask; 166 ucb->io_out &= ~mask;
159 167
168 ucb1x00_enable(ucb);
160 if (old != ucb->io_out) 169 if (old != ucb->io_out)
161 ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out); 170 ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out);
162 171
@@ -164,11 +173,19 @@ static int ucb1x00_gpio_direction_output(struct gpio_chip *chip, unsigned offset
164 ucb->io_dir |= mask; 173 ucb->io_dir |= mask;
165 ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir); 174 ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir);
166 } 175 }
176 ucb1x00_disable(ucb);
167 spin_unlock_irqrestore(&ucb->io_lock, flags); 177 spin_unlock_irqrestore(&ucb->io_lock, flags);
168 178
169 return 0; 179 return 0;
170} 180}
171 181
182static int ucb1x00_to_irq(struct gpio_chip *chip, unsigned offset)
183{
184 struct ucb1x00 *ucb = container_of(chip, struct ucb1x00, gpio);
185
186 return ucb->irq_base > 0 ? ucb->irq_base + offset : -ENXIO;
187}
188
172/* 189/*
173 * UCB1300 data sheet says we must: 190 * UCB1300 data sheet says we must:
174 * 1. enable ADC => 5us (including reference startup time) 191 * 1. enable ADC => 5us (including reference startup time)
@@ -186,7 +203,7 @@ static int ucb1x00_gpio_direction_output(struct gpio_chip *chip, unsigned offset
186 * Any code wishing to use the ADC converter must call this 203 * Any code wishing to use the ADC converter must call this
187 * function prior to using it. 204 * function prior to using it.
188 * 205 *
189 * This function takes the ADC semaphore to prevent two or more 206 * This function takes the ADC mutex to prevent two or more
190 * concurrent uses, and therefore may sleep. As a result, it 207 * concurrent uses, and therefore may sleep. As a result, it
191 * can only be called from process context, not interrupt 208 * can only be called from process context, not interrupt
192 * context. 209 * context.
@@ -196,7 +213,7 @@ static int ucb1x00_gpio_direction_output(struct gpio_chip *chip, unsigned offset
196 */ 213 */
197void ucb1x00_adc_enable(struct ucb1x00 *ucb) 214void ucb1x00_adc_enable(struct ucb1x00 *ucb)
198{ 215{
199 down(&ucb->adc_sem); 216 mutex_lock(&ucb->adc_mutex);
200 217
201 ucb->adc_cr |= UCB_ADC_ENA; 218 ucb->adc_cr |= UCB_ADC_ENA;
202 219
@@ -218,7 +235,7 @@ void ucb1x00_adc_enable(struct ucb1x00 *ucb)
218 * complete (2 frames max without sync). 235 * complete (2 frames max without sync).
219 * 236 *
220 * If called for a synchronised ADC conversion, it may sleep 237 * If called for a synchronised ADC conversion, it may sleep
221 * with the ADC semaphore held. 238 * with the ADC mutex held.
222 */ 239 */
223unsigned int ucb1x00_adc_read(struct ucb1x00 *ucb, int adc_channel, int sync) 240unsigned int ucb1x00_adc_read(struct ucb1x00 *ucb, int adc_channel, int sync)
224{ 241{
@@ -246,7 +263,7 @@ unsigned int ucb1x00_adc_read(struct ucb1x00 *ucb, int adc_channel, int sync)
246 * ucb1x00_adc_disable - disable the ADC converter 263 * ucb1x00_adc_disable - disable the ADC converter
247 * @ucb: UCB1x00 structure describing chip 264 * @ucb: UCB1x00 structure describing chip
248 * 265 *
249 * Disable the ADC converter and release the ADC semaphore. 266 * Disable the ADC converter and release the ADC mutex.
250 */ 267 */
251void ucb1x00_adc_disable(struct ucb1x00 *ucb) 268void ucb1x00_adc_disable(struct ucb1x00 *ucb)
252{ 269{
@@ -254,7 +271,7 @@ void ucb1x00_adc_disable(struct ucb1x00 *ucb)
254 ucb1x00_reg_write(ucb, UCB_ADC_CR, ucb->adc_cr); 271 ucb1x00_reg_write(ucb, UCB_ADC_CR, ucb->adc_cr);
255 ucb1x00_disable(ucb); 272 ucb1x00_disable(ucb);
256 273
257 up(&ucb->adc_sem); 274 mutex_unlock(&ucb->adc_mutex);
258} 275}
259 276
260/* 277/*
@@ -265,10 +282,9 @@ void ucb1x00_adc_disable(struct ucb1x00 *ucb)
265 * SIBCLK to talk to the chip. We leave the clock running until 282 * SIBCLK to talk to the chip. We leave the clock running until
266 * we have finished processing all interrupts from the chip. 283 * we have finished processing all interrupts from the chip.
267 */ 284 */
268static irqreturn_t ucb1x00_irq(int irqnr, void *devid) 285static void ucb1x00_irq(unsigned int irq, struct irq_desc *desc)
269{ 286{
270 struct ucb1x00 *ucb = devid; 287 struct ucb1x00 *ucb = irq_desc_get_handler_data(desc);
271 struct ucb1x00_irq *irq;
272 unsigned int isr, i; 288 unsigned int isr, i;
273 289
274 ucb1x00_enable(ucb); 290 ucb1x00_enable(ucb);
@@ -276,157 +292,104 @@ static irqreturn_t ucb1x00_irq(int irqnr, void *devid)
276 ucb1x00_reg_write(ucb, UCB_IE_CLEAR, isr); 292 ucb1x00_reg_write(ucb, UCB_IE_CLEAR, isr);
277 ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0); 293 ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0);
278 294
279 for (i = 0, irq = ucb->irq_handler; i < 16 && isr; i++, isr >>= 1, irq++) 295 for (i = 0; i < 16 && isr; i++, isr >>= 1, irq++)
280 if (isr & 1 && irq->fn) 296 if (isr & 1)
281 irq->fn(i, irq->devid); 297 generic_handle_irq(ucb->irq_base + i);
282 ucb1x00_disable(ucb); 298 ucb1x00_disable(ucb);
283
284 return IRQ_HANDLED;
285} 299}
286 300
287/** 301static void ucb1x00_irq_update(struct ucb1x00 *ucb, unsigned mask)
288 * ucb1x00_hook_irq - hook a UCB1x00 interrupt
289 * @ucb: UCB1x00 structure describing chip
290 * @idx: interrupt index
291 * @fn: function to call when interrupt is triggered
292 * @devid: device id to pass to interrupt handler
293 *
294 * Hook the specified interrupt. You can only register one handler
295 * for each interrupt source. The interrupt source is not enabled
296 * by this function; use ucb1x00_enable_irq instead.
297 *
298 * Interrupt handlers will be called with other interrupts enabled.
299 *
300 * Returns zero on success, or one of the following errors:
301 * -EINVAL if the interrupt index is invalid
302 * -EBUSY if the interrupt has already been hooked
303 */
304int ucb1x00_hook_irq(struct ucb1x00 *ucb, unsigned int idx, void (*fn)(int, void *), void *devid)
305{ 302{
306 struct ucb1x00_irq *irq; 303 ucb1x00_enable(ucb);
307 int ret = -EINVAL; 304 if (ucb->irq_ris_enbl & mask)
308 305 ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl &
309 if (idx < 16) { 306 ucb->irq_mask);
310 irq = ucb->irq_handler + idx; 307 if (ucb->irq_fal_enbl & mask)
311 ret = -EBUSY; 308 ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl &
312 309 ucb->irq_mask);
313 spin_lock_irq(&ucb->lock); 310 ucb1x00_disable(ucb);
314 if (irq->fn == NULL) {
315 irq->devid = devid;
316 irq->fn = fn;
317 ret = 0;
318 }
319 spin_unlock_irq(&ucb->lock);
320 }
321 return ret;
322} 311}
323 312
324/** 313static void ucb1x00_irq_noop(struct irq_data *data)
325 * ucb1x00_enable_irq - enable an UCB1x00 interrupt source
326 * @ucb: UCB1x00 structure describing chip
327 * @idx: interrupt index
328 * @edges: interrupt edges to enable
329 *
330 * Enable the specified interrupt to trigger on %UCB_RISING,
331 * %UCB_FALLING or both edges. The interrupt should have been
332 * hooked by ucb1x00_hook_irq.
333 */
334void ucb1x00_enable_irq(struct ucb1x00 *ucb, unsigned int idx, int edges)
335{ 314{
336 unsigned long flags; 315}
337 316
338 if (idx < 16) { 317static void ucb1x00_irq_mask(struct irq_data *data)
339 spin_lock_irqsave(&ucb->lock, flags); 318{
319 struct ucb1x00 *ucb = irq_data_get_irq_chip_data(data);
320 unsigned mask = 1 << (data->irq - ucb->irq_base);
340 321
341 ucb1x00_enable(ucb); 322 raw_spin_lock(&ucb->irq_lock);
342 if (edges & UCB_RISING) { 323 ucb->irq_mask &= ~mask;
343 ucb->irq_ris_enbl |= 1 << idx; 324 ucb1x00_irq_update(ucb, mask);
344 ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl); 325 raw_spin_unlock(&ucb->irq_lock);
345 }
346 if (edges & UCB_FALLING) {
347 ucb->irq_fal_enbl |= 1 << idx;
348 ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl);
349 }
350 ucb1x00_disable(ucb);
351 spin_unlock_irqrestore(&ucb->lock, flags);
352 }
353} 326}
354 327
355/** 328static void ucb1x00_irq_unmask(struct irq_data *data)
356 * ucb1x00_disable_irq - disable an UCB1x00 interrupt source
357 * @ucb: UCB1x00 structure describing chip
358 * @edges: interrupt edges to disable
359 *
360 * Disable the specified interrupt triggering on the specified
361 * (%UCB_RISING, %UCB_FALLING or both) edges.
362 */
363void ucb1x00_disable_irq(struct ucb1x00 *ucb, unsigned int idx, int edges)
364{ 329{
365 unsigned long flags; 330 struct ucb1x00 *ucb = irq_data_get_irq_chip_data(data);
331 unsigned mask = 1 << (data->irq - ucb->irq_base);
366 332
367 if (idx < 16) { 333 raw_spin_lock(&ucb->irq_lock);
368 spin_lock_irqsave(&ucb->lock, flags); 334 ucb->irq_mask |= mask;
369 335 ucb1x00_irq_update(ucb, mask);
370 ucb1x00_enable(ucb); 336 raw_spin_unlock(&ucb->irq_lock);
371 if (edges & UCB_RISING) {
372 ucb->irq_ris_enbl &= ~(1 << idx);
373 ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl);
374 }
375 if (edges & UCB_FALLING) {
376 ucb->irq_fal_enbl &= ~(1 << idx);
377 ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl);
378 }
379 ucb1x00_disable(ucb);
380 spin_unlock_irqrestore(&ucb->lock, flags);
381 }
382} 337}
383 338
384/** 339static int ucb1x00_irq_set_type(struct irq_data *data, unsigned int type)
385 * ucb1x00_free_irq - disable and free the specified UCB1x00 interrupt
386 * @ucb: UCB1x00 structure describing chip
387 * @idx: interrupt index
388 * @devid: device id.
389 *
390 * Disable the interrupt source and remove the handler. devid must
391 * match the devid passed when hooking the interrupt.
392 *
393 * Returns zero on success, or one of the following errors:
394 * -EINVAL if the interrupt index is invalid
395 * -ENOENT if devid does not match
396 */
397int ucb1x00_free_irq(struct ucb1x00 *ucb, unsigned int idx, void *devid)
398{ 340{
399 struct ucb1x00_irq *irq; 341 struct ucb1x00 *ucb = irq_data_get_irq_chip_data(data);
400 int ret; 342 unsigned mask = 1 << (data->irq - ucb->irq_base);
401 343
402 if (idx >= 16) 344 raw_spin_lock(&ucb->irq_lock);
403 goto bad; 345 if (type & IRQ_TYPE_EDGE_RISING)
346 ucb->irq_ris_enbl |= mask;
347 else
348 ucb->irq_ris_enbl &= ~mask;
404 349
405 irq = ucb->irq_handler + idx; 350 if (type & IRQ_TYPE_EDGE_FALLING)
406 ret = -ENOENT; 351 ucb->irq_fal_enbl |= mask;
352 else
353 ucb->irq_fal_enbl &= ~mask;
354 if (ucb->irq_mask & mask) {
355 ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl &
356 ucb->irq_mask);
357 ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl &
358 ucb->irq_mask);
359 }
360 raw_spin_unlock(&ucb->irq_lock);
407 361
408 spin_lock_irq(&ucb->lock); 362 return 0;
409 if (irq->devid == devid) { 363}
410 ucb->irq_ris_enbl &= ~(1 << idx);
411 ucb->irq_fal_enbl &= ~(1 << idx);
412 364
413 ucb1x00_enable(ucb); 365static int ucb1x00_irq_set_wake(struct irq_data *data, unsigned int on)
414 ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl); 366{
415 ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl); 367 struct ucb1x00 *ucb = irq_data_get_irq_chip_data(data);
416 ucb1x00_disable(ucb); 368 struct ucb1x00_plat_data *pdata = ucb->mcp->attached_device.platform_data;
369 unsigned mask = 1 << (data->irq - ucb->irq_base);
417 370
418 irq->fn = NULL; 371 if (!pdata || !pdata->can_wakeup)
419 irq->devid = NULL; 372 return -EINVAL;
420 ret = 0;
421 }
422 spin_unlock_irq(&ucb->lock);
423 return ret;
424 373
425bad: 374 raw_spin_lock(&ucb->irq_lock);
426 printk(KERN_ERR "Freeing bad UCB1x00 irq %d\n", idx); 375 if (on)
427 return -EINVAL; 376 ucb->irq_wake |= mask;
377 else
378 ucb->irq_wake &= ~mask;
379 raw_spin_unlock(&ucb->irq_lock);
380
381 return 0;
428} 382}
429 383
384static struct irq_chip ucb1x00_irqchip = {
385 .name = "ucb1x00",
386 .irq_ack = ucb1x00_irq_noop,
387 .irq_mask = ucb1x00_irq_mask,
388 .irq_unmask = ucb1x00_irq_unmask,
389 .irq_set_type = ucb1x00_irq_set_type,
390 .irq_set_wake = ucb1x00_irq_set_wake,
391};
392
430static int ucb1x00_add_dev(struct ucb1x00 *ucb, struct ucb1x00_driver *drv) 393static int ucb1x00_add_dev(struct ucb1x00 *ucb, struct ucb1x00_driver *drv)
431{ 394{
432 struct ucb1x00_dev *dev; 395 struct ucb1x00_dev *dev;
@@ -440,8 +403,8 @@ static int ucb1x00_add_dev(struct ucb1x00 *ucb, struct ucb1x00_driver *drv)
440 ret = drv->add(dev); 403 ret = drv->add(dev);
441 404
442 if (ret == 0) { 405 if (ret == 0) {
443 list_add(&dev->dev_node, &ucb->devs); 406 list_add_tail(&dev->dev_node, &ucb->devs);
444 list_add(&dev->drv_node, &drv->devs); 407 list_add_tail(&dev->drv_node, &drv->devs);
445 } else { 408 } else {
446 kfree(dev); 409 kfree(dev);
447 } 410 }
@@ -533,98 +496,126 @@ static struct class ucb1x00_class = {
533 496
534static int ucb1x00_probe(struct mcp *mcp) 497static int ucb1x00_probe(struct mcp *mcp)
535{ 498{
536 struct ucb1x00 *ucb; 499 struct ucb1x00_plat_data *pdata = mcp->attached_device.platform_data;
537 struct ucb1x00_driver *drv; 500 struct ucb1x00_driver *drv;
538 unsigned int id; 501 struct ucb1x00 *ucb;
502 unsigned id, i, irq_base;
539 int ret = -ENODEV; 503 int ret = -ENODEV;
540 int temp; 504
505 /* Tell the platform to deassert the UCB1x00 reset */
506 if (pdata && pdata->reset)
507 pdata->reset(UCB_RST_PROBE);
541 508
542 mcp_enable(mcp); 509 mcp_enable(mcp);
543 id = mcp_reg_read(mcp, UCB_ID); 510 id = mcp_reg_read(mcp, UCB_ID);
511 mcp_disable(mcp);
544 512
545 if (id != UCB_ID_1200 && id != UCB_ID_1300 && id != UCB_ID_TC35143) { 513 if (id != UCB_ID_1200 && id != UCB_ID_1300 && id != UCB_ID_TC35143) {
546 printk(KERN_WARNING "UCB1x00 ID not found: %04x\n", id); 514 printk(KERN_WARNING "UCB1x00 ID not found: %04x\n", id);
547 goto err_disable; 515 goto out;
548 } 516 }
549 517
550 ucb = kzalloc(sizeof(struct ucb1x00), GFP_KERNEL); 518 ucb = kzalloc(sizeof(struct ucb1x00), GFP_KERNEL);
551 ret = -ENOMEM; 519 ret = -ENOMEM;
552 if (!ucb) 520 if (!ucb)
553 goto err_disable; 521 goto out;
554
555 522
523 device_initialize(&ucb->dev);
556 ucb->dev.class = &ucb1x00_class; 524 ucb->dev.class = &ucb1x00_class;
557 ucb->dev.parent = &mcp->attached_device; 525 ucb->dev.parent = &mcp->attached_device;
558 dev_set_name(&ucb->dev, "ucb1x00"); 526 dev_set_name(&ucb->dev, "ucb1x00");
559 527
560 spin_lock_init(&ucb->lock); 528 raw_spin_lock_init(&ucb->irq_lock);
561 spin_lock_init(&ucb->io_lock); 529 spin_lock_init(&ucb->io_lock);
562 sema_init(&ucb->adc_sem, 1); 530 mutex_init(&ucb->adc_mutex);
563 531
564 ucb->id = id; 532 ucb->id = id;
565 ucb->mcp = mcp; 533 ucb->mcp = mcp;
534
535 ret = device_add(&ucb->dev);
536 if (ret)
537 goto err_dev_add;
538
539 ucb1x00_enable(ucb);
566 ucb->irq = ucb1x00_detect_irq(ucb); 540 ucb->irq = ucb1x00_detect_irq(ucb);
541 ucb1x00_disable(ucb);
567 if (ucb->irq == NO_IRQ) { 542 if (ucb->irq == NO_IRQ) {
568 printk(KERN_ERR "UCB1x00: IRQ probe failed\n"); 543 dev_err(&ucb->dev, "IRQ probe failed\n");
569 ret = -ENODEV; 544 ret = -ENODEV;
570 goto err_free; 545 goto err_no_irq;
571 } 546 }
572 547
573 ucb->gpio.base = -1; 548 ucb->gpio.base = -1;
574 if (mcp->gpio_base != 0) { 549 irq_base = pdata ? pdata->irq_base : 0;
550 ucb->irq_base = irq_alloc_descs(-1, irq_base, 16, -1);
551 if (ucb->irq_base < 0) {
552 dev_err(&ucb->dev, "unable to allocate 16 irqs: %d\n",
553 ucb->irq_base);
554 goto err_irq_alloc;
555 }
556
557 for (i = 0; i < 16; i++) {
558 unsigned irq = ucb->irq_base + i;
559
560 irq_set_chip_and_handler(irq, &ucb1x00_irqchip, handle_edge_irq);
561 irq_set_chip_data(irq, ucb);
562 set_irq_flags(irq, IRQF_VALID | IRQ_NOREQUEST);
563 }
564
565 irq_set_irq_type(ucb->irq, IRQ_TYPE_EDGE_RISING);
566 irq_set_handler_data(ucb->irq, ucb);
567 irq_set_chained_handler(ucb->irq, ucb1x00_irq);
568
569 if (pdata && pdata->gpio_base) {
575 ucb->gpio.label = dev_name(&ucb->dev); 570 ucb->gpio.label = dev_name(&ucb->dev);
576 ucb->gpio.base = mcp->gpio_base; 571 ucb->gpio.dev = &ucb->dev;
572 ucb->gpio.owner = THIS_MODULE;
573 ucb->gpio.base = pdata->gpio_base;
577 ucb->gpio.ngpio = 10; 574 ucb->gpio.ngpio = 10;
578 ucb->gpio.set = ucb1x00_gpio_set; 575 ucb->gpio.set = ucb1x00_gpio_set;
579 ucb->gpio.get = ucb1x00_gpio_get; 576 ucb->gpio.get = ucb1x00_gpio_get;
580 ucb->gpio.direction_input = ucb1x00_gpio_direction_input; 577 ucb->gpio.direction_input = ucb1x00_gpio_direction_input;
581 ucb->gpio.direction_output = ucb1x00_gpio_direction_output; 578 ucb->gpio.direction_output = ucb1x00_gpio_direction_output;
579 ucb->gpio.to_irq = ucb1x00_to_irq;
582 ret = gpiochip_add(&ucb->gpio); 580 ret = gpiochip_add(&ucb->gpio);
583 if (ret) 581 if (ret)
584 goto err_free; 582 goto err_gpio_add;
585 } else 583 } else
586 dev_info(&ucb->dev, "gpio_base not set so no gpiolib support"); 584 dev_info(&ucb->dev, "gpio_base not set so no gpiolib support");
587 585
588 ret = request_irq(ucb->irq, ucb1x00_irq, IRQF_TRIGGER_RISING,
589 "UCB1x00", ucb);
590 if (ret) {
591 printk(KERN_ERR "ucb1x00: unable to grab irq%d: %d\n",
592 ucb->irq, ret);
593 goto err_gpio;
594 }
595
596 mcp_set_drvdata(mcp, ucb); 586 mcp_set_drvdata(mcp, ucb);
597 587
598 ret = device_register(&ucb->dev); 588 if (pdata)
599 if (ret) 589 device_set_wakeup_capable(&ucb->dev, pdata->can_wakeup);
600 goto err_irq;
601
602 590
603 INIT_LIST_HEAD(&ucb->devs); 591 INIT_LIST_HEAD(&ucb->devs);
604 mutex_lock(&ucb1x00_mutex); 592 mutex_lock(&ucb1x00_mutex);
605 list_add(&ucb->node, &ucb1x00_devices); 593 list_add_tail(&ucb->node, &ucb1x00_devices);
606 list_for_each_entry(drv, &ucb1x00_drivers, node) { 594 list_for_each_entry(drv, &ucb1x00_drivers, node) {
607 ucb1x00_add_dev(ucb, drv); 595 ucb1x00_add_dev(ucb, drv);
608 } 596 }
609 mutex_unlock(&ucb1x00_mutex); 597 mutex_unlock(&ucb1x00_mutex);
610 598
611 goto out; 599 return ret;
612 600
613 err_irq: 601 err_gpio_add:
614 free_irq(ucb->irq, ucb); 602 irq_set_chained_handler(ucb->irq, NULL);
615 err_gpio: 603 err_irq_alloc:
616 if (ucb->gpio.base != -1) 604 if (ucb->irq_base > 0)
617 temp = gpiochip_remove(&ucb->gpio); 605 irq_free_descs(ucb->irq_base, 16);
618 err_free: 606 err_no_irq:
619 kfree(ucb); 607 device_del(&ucb->dev);
620 err_disable: 608 err_dev_add:
621 mcp_disable(mcp); 609 put_device(&ucb->dev);
622 out: 610 out:
611 if (pdata && pdata->reset)
612 pdata->reset(UCB_RST_PROBE_FAIL);
623 return ret; 613 return ret;
624} 614}
625 615
626static void ucb1x00_remove(struct mcp *mcp) 616static void ucb1x00_remove(struct mcp *mcp)
627{ 617{
618 struct ucb1x00_plat_data *pdata = mcp->attached_device.platform_data;
628 struct ucb1x00 *ucb = mcp_get_drvdata(mcp); 619 struct ucb1x00 *ucb = mcp_get_drvdata(mcp);
629 struct list_head *l, *n; 620 struct list_head *l, *n;
630 int ret; 621 int ret;
@@ -643,8 +634,12 @@ static void ucb1x00_remove(struct mcp *mcp)
643 dev_err(&ucb->dev, "Can't remove gpio chip: %d\n", ret); 634 dev_err(&ucb->dev, "Can't remove gpio chip: %d\n", ret);
644 } 635 }
645 636
646 free_irq(ucb->irq, ucb); 637 irq_set_chained_handler(ucb->irq, NULL);
638 irq_free_descs(ucb->irq_base, 16);
647 device_unregister(&ucb->dev); 639 device_unregister(&ucb->dev);
640
641 if (pdata && pdata->reset)
642 pdata->reset(UCB_RST_REMOVE);
648} 643}
649 644
650int ucb1x00_register_driver(struct ucb1x00_driver *drv) 645int ucb1x00_register_driver(struct ucb1x00_driver *drv)
@@ -653,7 +648,7 @@ int ucb1x00_register_driver(struct ucb1x00_driver *drv)
653 648
654 INIT_LIST_HEAD(&drv->devs); 649 INIT_LIST_HEAD(&drv->devs);
655 mutex_lock(&ucb1x00_mutex); 650 mutex_lock(&ucb1x00_mutex);
656 list_add(&drv->node, &ucb1x00_drivers); 651 list_add_tail(&drv->node, &ucb1x00_drivers);
657 list_for_each_entry(ucb, &ucb1x00_devices, node) { 652 list_for_each_entry(ucb, &ucb1x00_devices, node) {
658 ucb1x00_add_dev(ucb, drv); 653 ucb1x00_add_dev(ucb, drv);
659 } 654 }
@@ -674,44 +669,86 @@ void ucb1x00_unregister_driver(struct ucb1x00_driver *drv)
674 mutex_unlock(&ucb1x00_mutex); 669 mutex_unlock(&ucb1x00_mutex);
675} 670}
676 671
677static int ucb1x00_suspend(struct mcp *mcp, pm_message_t state) 672static int ucb1x00_suspend(struct device *dev)
678{ 673{
679 struct ucb1x00 *ucb = mcp_get_drvdata(mcp); 674 struct ucb1x00_plat_data *pdata = dev->platform_data;
680 struct ucb1x00_dev *dev; 675 struct ucb1x00 *ucb = dev_get_drvdata(dev);
676 struct ucb1x00_dev *udev;
681 677
682 mutex_lock(&ucb1x00_mutex); 678 mutex_lock(&ucb1x00_mutex);
683 list_for_each_entry(dev, &ucb->devs, dev_node) { 679 list_for_each_entry(udev, &ucb->devs, dev_node) {
684 if (dev->drv->suspend) 680 if (udev->drv->suspend)
685 dev->drv->suspend(dev, state); 681 udev->drv->suspend(udev);
686 } 682 }
687 mutex_unlock(&ucb1x00_mutex); 683 mutex_unlock(&ucb1x00_mutex);
684
685 if (ucb->irq_wake) {
686 unsigned long flags;
687
688 raw_spin_lock_irqsave(&ucb->irq_lock, flags);
689 ucb1x00_enable(ucb);
690 ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl &
691 ucb->irq_wake);
692 ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl &
693 ucb->irq_wake);
694 ucb1x00_disable(ucb);
695 raw_spin_unlock_irqrestore(&ucb->irq_lock, flags);
696
697 enable_irq_wake(ucb->irq);
698 } else if (pdata && pdata->reset)
699 pdata->reset(UCB_RST_SUSPEND);
700
688 return 0; 701 return 0;
689} 702}
690 703
691static int ucb1x00_resume(struct mcp *mcp) 704static int ucb1x00_resume(struct device *dev)
692{ 705{
693 struct ucb1x00 *ucb = mcp_get_drvdata(mcp); 706 struct ucb1x00_plat_data *pdata = dev->platform_data;
694 struct ucb1x00_dev *dev; 707 struct ucb1x00 *ucb = dev_get_drvdata(dev);
708 struct ucb1x00_dev *udev;
709
710 if (!ucb->irq_wake && pdata && pdata->reset)
711 pdata->reset(UCB_RST_RESUME);
695 712
713 ucb1x00_enable(ucb);
696 ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out); 714 ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out);
697 ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir); 715 ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir);
716
717 if (ucb->irq_wake) {
718 unsigned long flags;
719
720 raw_spin_lock_irqsave(&ucb->irq_lock, flags);
721 ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl &
722 ucb->irq_mask);
723 ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl &
724 ucb->irq_mask);
725 raw_spin_unlock_irqrestore(&ucb->irq_lock, flags);
726
727 disable_irq_wake(ucb->irq);
728 }
729 ucb1x00_disable(ucb);
730
698 mutex_lock(&ucb1x00_mutex); 731 mutex_lock(&ucb1x00_mutex);
699 list_for_each_entry(dev, &ucb->devs, dev_node) { 732 list_for_each_entry(udev, &ucb->devs, dev_node) {
700 if (dev->drv->resume) 733 if (udev->drv->resume)
701 dev->drv->resume(dev); 734 udev->drv->resume(udev);
702 } 735 }
703 mutex_unlock(&ucb1x00_mutex); 736 mutex_unlock(&ucb1x00_mutex);
704 return 0; 737 return 0;
705} 738}
706 739
740static const struct dev_pm_ops ucb1x00_pm_ops = {
741 SET_SYSTEM_SLEEP_PM_OPS(ucb1x00_suspend, ucb1x00_resume)
742};
743
707static struct mcp_driver ucb1x00_driver = { 744static struct mcp_driver ucb1x00_driver = {
708 .drv = { 745 .drv = {
709 .name = "ucb1x00", 746 .name = "ucb1x00",
747 .owner = THIS_MODULE,
748 .pm = &ucb1x00_pm_ops,
710 }, 749 },
711 .probe = ucb1x00_probe, 750 .probe = ucb1x00_probe,
712 .remove = ucb1x00_remove, 751 .remove = ucb1x00_remove,
713 .suspend = ucb1x00_suspend,
714 .resume = ucb1x00_resume,
715}; 752};
716 753
717static int __init ucb1x00_init(void) 754static int __init ucb1x00_init(void)
@@ -742,14 +779,10 @@ EXPORT_SYMBOL(ucb1x00_adc_enable);
742EXPORT_SYMBOL(ucb1x00_adc_read); 779EXPORT_SYMBOL(ucb1x00_adc_read);
743EXPORT_SYMBOL(ucb1x00_adc_disable); 780EXPORT_SYMBOL(ucb1x00_adc_disable);
744 781
745EXPORT_SYMBOL(ucb1x00_hook_irq);
746EXPORT_SYMBOL(ucb1x00_free_irq);
747EXPORT_SYMBOL(ucb1x00_enable_irq);
748EXPORT_SYMBOL(ucb1x00_disable_irq);
749
750EXPORT_SYMBOL(ucb1x00_register_driver); 782EXPORT_SYMBOL(ucb1x00_register_driver);
751EXPORT_SYMBOL(ucb1x00_unregister_driver); 783EXPORT_SYMBOL(ucb1x00_unregister_driver);
752 784
785MODULE_ALIAS("mcp:ucb1x00");
753MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>"); 786MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
754MODULE_DESCRIPTION("UCB1x00 core driver"); 787MODULE_DESCRIPTION("UCB1x00 core driver");
755MODULE_LICENSE("GPL"); 788MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/ucb1x00-ts.c b/drivers/mfd/ucb1x00-ts.c
index 63a3cbdfa3f3..1e0e20c0e082 100644
--- a/drivers/mfd/ucb1x00-ts.c
+++ b/drivers/mfd/ucb1x00-ts.c
@@ -20,8 +20,9 @@
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/moduleparam.h> 21#include <linux/moduleparam.h>
22#include <linux/init.h> 22#include <linux/init.h>
23#include <linux/smp.h> 23#include <linux/interrupt.h>
24#include <linux/sched.h> 24#include <linux/sched.h>
25#include <linux/spinlock.h>
25#include <linux/completion.h> 26#include <linux/completion.h>
26#include <linux/delay.h> 27#include <linux/delay.h>
27#include <linux/string.h> 28#include <linux/string.h>
@@ -32,7 +33,6 @@
32#include <linux/kthread.h> 33#include <linux/kthread.h>
33#include <linux/mfd/ucb1x00.h> 34#include <linux/mfd/ucb1x00.h>
34 35
35#include <mach/dma.h>
36#include <mach/collie.h> 36#include <mach/collie.h>
37#include <asm/mach-types.h> 37#include <asm/mach-types.h>
38 38
@@ -42,6 +42,8 @@ struct ucb1x00_ts {
42 struct input_dev *idev; 42 struct input_dev *idev;
43 struct ucb1x00 *ucb; 43 struct ucb1x00 *ucb;
44 44
45 spinlock_t irq_lock;
46 unsigned irq_disabled;
45 wait_queue_head_t irq_wait; 47 wait_queue_head_t irq_wait;
46 struct task_struct *rtask; 48 struct task_struct *rtask;
47 u16 x_res; 49 u16 x_res;
@@ -238,7 +240,12 @@ static int ucb1x00_thread(void *_ts)
238 if (ucb1x00_ts_pen_down(ts)) { 240 if (ucb1x00_ts_pen_down(ts)) {
239 set_current_state(TASK_INTERRUPTIBLE); 241 set_current_state(TASK_INTERRUPTIBLE);
240 242
241 ucb1x00_enable_irq(ts->ucb, UCB_IRQ_TSPX, machine_is_collie() ? UCB_RISING : UCB_FALLING); 243 spin_lock_irq(&ts->irq_lock);
244 if (ts->irq_disabled) {
245 ts->irq_disabled = 0;
246 enable_irq(ts->ucb->irq_base + UCB_IRQ_TSPX);
247 }
248 spin_unlock_irq(&ts->irq_lock);
242 ucb1x00_disable(ts->ucb); 249 ucb1x00_disable(ts->ucb);
243 250
244 /* 251 /*
@@ -281,23 +288,37 @@ static int ucb1x00_thread(void *_ts)
281 * We only detect touch screen _touches_ with this interrupt 288 * We only detect touch screen _touches_ with this interrupt
282 * handler, and even then we just schedule our task. 289 * handler, and even then we just schedule our task.
283 */ 290 */
284static void ucb1x00_ts_irq(int idx, void *id) 291static irqreturn_t ucb1x00_ts_irq(int irq, void *id)
285{ 292{
286 struct ucb1x00_ts *ts = id; 293 struct ucb1x00_ts *ts = id;
287 294
288 ucb1x00_disable_irq(ts->ucb, UCB_IRQ_TSPX, UCB_FALLING); 295 spin_lock(&ts->irq_lock);
296 ts->irq_disabled = 1;
297 disable_irq_nosync(ts->ucb->irq_base + UCB_IRQ_TSPX);
298 spin_unlock(&ts->irq_lock);
289 wake_up(&ts->irq_wait); 299 wake_up(&ts->irq_wait);
300
301 return IRQ_HANDLED;
290} 302}
291 303
292static int ucb1x00_ts_open(struct input_dev *idev) 304static int ucb1x00_ts_open(struct input_dev *idev)
293{ 305{
294 struct ucb1x00_ts *ts = input_get_drvdata(idev); 306 struct ucb1x00_ts *ts = input_get_drvdata(idev);
307 unsigned long flags = 0;
295 int ret = 0; 308 int ret = 0;
296 309
297 BUG_ON(ts->rtask); 310 BUG_ON(ts->rtask);
298 311
312 if (machine_is_collie())
313 flags = IRQF_TRIGGER_RISING;
314 else
315 flags = IRQF_TRIGGER_FALLING;
316
317 ts->irq_disabled = 0;
318
299 init_waitqueue_head(&ts->irq_wait); 319 init_waitqueue_head(&ts->irq_wait);
300 ret = ucb1x00_hook_irq(ts->ucb, UCB_IRQ_TSPX, ucb1x00_ts_irq, ts); 320 ret = request_irq(ts->ucb->irq_base + UCB_IRQ_TSPX, ucb1x00_ts_irq,
321 flags, "ucb1x00-ts", ts);
301 if (ret < 0) 322 if (ret < 0)
302 goto out; 323 goto out;
303 324
@@ -314,7 +335,7 @@ static int ucb1x00_ts_open(struct input_dev *idev)
314 if (!IS_ERR(ts->rtask)) { 335 if (!IS_ERR(ts->rtask)) {
315 ret = 0; 336 ret = 0;
316 } else { 337 } else {
317 ucb1x00_free_irq(ts->ucb, UCB_IRQ_TSPX, ts); 338 free_irq(ts->ucb->irq_base + UCB_IRQ_TSPX, ts);
318 ts->rtask = NULL; 339 ts->rtask = NULL;
319 ret = -EFAULT; 340 ret = -EFAULT;
320 } 341 }
@@ -334,7 +355,7 @@ static void ucb1x00_ts_close(struct input_dev *idev)
334 kthread_stop(ts->rtask); 355 kthread_stop(ts->rtask);
335 356
336 ucb1x00_enable(ts->ucb); 357 ucb1x00_enable(ts->ucb);
337 ucb1x00_free_irq(ts->ucb, UCB_IRQ_TSPX, ts); 358 free_irq(ts->ucb->irq_base + UCB_IRQ_TSPX, ts);
338 ucb1x00_reg_write(ts->ucb, UCB_TS_CR, 0); 359 ucb1x00_reg_write(ts->ucb, UCB_TS_CR, 0);
339 ucb1x00_disable(ts->ucb); 360 ucb1x00_disable(ts->ucb);
340} 361}
@@ -359,11 +380,13 @@ static int ucb1x00_ts_add(struct ucb1x00_dev *dev)
359 ts->ucb = dev->ucb; 380 ts->ucb = dev->ucb;
360 ts->idev = idev; 381 ts->idev = idev;
361 ts->adcsync = adcsync ? UCB_SYNC : UCB_NOSYNC; 382 ts->adcsync = adcsync ? UCB_SYNC : UCB_NOSYNC;
383 spin_lock_init(&ts->irq_lock);
362 384
363 idev->name = "Touchscreen panel"; 385 idev->name = "Touchscreen panel";
364 idev->id.product = ts->ucb->id; 386 idev->id.product = ts->ucb->id;
365 idev->open = ucb1x00_ts_open; 387 idev->open = ucb1x00_ts_open;
366 idev->close = ucb1x00_ts_close; 388 idev->close = ucb1x00_ts_close;
389 idev->dev.parent = &ts->ucb->dev;
367 390
368 idev->evbit[0] = BIT_MASK(EV_ABS) | BIT_MASK(EV_KEY); 391 idev->evbit[0] = BIT_MASK(EV_ABS) | BIT_MASK(EV_KEY);
369 idev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); 392 idev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index 502821997707..cbc3b7867910 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -23,106 +23,6 @@
23#include <asm/sizes.h> 23#include <asm/sizes.h>
24#include <asm/mach/flash.h> 24#include <asm/mach/flash.h>
25 25
26#if 0
27/*
28 * This is here for documentation purposes only - until these people
29 * submit their machine types. It will be gone January 2005.
30 */
31static struct mtd_partition consus_partitions[] = {
32 {
33 .name = "Consus boot firmware",
34 .offset = 0,
35 .size = 0x00040000,
36 .mask_flags = MTD_WRITABLE, /* force read-only */
37 }, {
38 .name = "Consus kernel",
39 .offset = 0x00040000,
40 .size = 0x00100000,
41 .mask_flags = 0,
42 }, {
43 .name = "Consus disk",
44 .offset = 0x00140000,
45 /* The rest (up to 16M) for jffs. We could put 0 and
46 make it find the size automatically, but right now
47 i have 32 megs. jffs will use all 32 megs if given
48 the chance, and this leads to horrible problems
49 when you try to re-flash the image because blob
50 won't erase the whole partition. */
51 .size = 0x01000000 - 0x00140000,
52 .mask_flags = 0,
53 }, {
54 /* this disk is a secondary disk, which can be used as
55 needed, for simplicity, make it the size of the other
56 consus partition, although realistically it could be
57 the remainder of the disk (depending on the file
58 system used) */
59 .name = "Consus disk2",
60 .offset = 0x01000000,
61 .size = 0x01000000 - 0x00140000,
62 .mask_flags = 0,
63 }
64};
65
66/* Frodo has 2 x 16M 28F128J3A flash chips in bank 0: */
67static struct mtd_partition frodo_partitions[] =
68{
69 {
70 .name = "bootloader",
71 .size = 0x00040000,
72 .offset = 0x00000000,
73 .mask_flags = MTD_WRITEABLE
74 }, {
75 .name = "bootloader params",
76 .size = 0x00040000,
77 .offset = MTDPART_OFS_APPEND,
78 .mask_flags = MTD_WRITEABLE
79 }, {
80 .name = "kernel",
81 .size = 0x00100000,
82 .offset = MTDPART_OFS_APPEND,
83 .mask_flags = MTD_WRITEABLE
84 }, {
85 .name = "ramdisk",
86 .size = 0x00400000,
87 .offset = MTDPART_OFS_APPEND,
88 .mask_flags = MTD_WRITEABLE
89 }, {
90 .name = "file system",
91 .size = MTDPART_SIZ_FULL,
92 .offset = MTDPART_OFS_APPEND
93 }
94};
95
96static struct mtd_partition jornada56x_partitions[] = {
97 {
98 .name = "bootldr",
99 .size = 0x00040000,
100 .offset = 0,
101 .mask_flags = MTD_WRITEABLE,
102 }, {
103 .name = "rootfs",
104 .size = MTDPART_SIZ_FULL,
105 .offset = MTDPART_OFS_APPEND,
106 }
107};
108
109static void jornada56x_set_vpp(int vpp)
110{
111 if (vpp)
112 GPSR = GPIO_GPIO26;
113 else
114 GPCR = GPIO_GPIO26;
115 GPDR |= GPIO_GPIO26;
116}
117
118/*
119 * Machine Phys Size set_vpp
120 * Consus : SA1100_CS0_PHYS SZ_32M
121 * Frodo : SA1100_CS0_PHYS SZ_32M
122 * Jornada56x: SA1100_CS0_PHYS SZ_32M jornada56x_set_vpp
123 */
124#endif
125
126struct sa_subdev_info { 26struct sa_subdev_info {
127 char name[16]; 27 char name[16];
128 struct map_info map; 28 struct map_info map;
@@ -373,21 +273,9 @@ static int __exit sa1100_mtd_remove(struct platform_device *pdev)
373 return 0; 273 return 0;
374} 274}
375 275
376#ifdef CONFIG_PM
377static void sa1100_mtd_shutdown(struct platform_device *dev)
378{
379 struct sa_info *info = platform_get_drvdata(dev);
380 if (info && mtd_suspend(info->mtd) == 0)
381 mtd_resume(info->mtd);
382}
383#else
384#define sa1100_mtd_shutdown NULL
385#endif
386
387static struct platform_driver sa1100_mtd_driver = { 276static struct platform_driver sa1100_mtd_driver = {
388 .probe = sa1100_mtd_probe, 277 .probe = sa1100_mtd_probe,
389 .remove = __exit_p(sa1100_mtd_remove), 278 .remove = __exit_p(sa1100_mtd_remove),
390 .shutdown = sa1100_mtd_shutdown,
391 .driver = { 279 .driver = {
392 .name = "sa1100-mtd", 280 .name = "sa1100-mtd",
393 .owner = THIS_MODULE, 281 .owner = THIS_MODULE,
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 1dc4fad593e7..fee449355014 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2280,7 +2280,7 @@ static int __devinit smc_drv_probe(struct platform_device *pdev)
2280 if (ret) 2280 if (ret)
2281 goto out_release_io; 2281 goto out_release_io;
2282#if defined(CONFIG_SA1100_ASSABET) 2282#if defined(CONFIG_SA1100_ASSABET)
2283 NCR_0 |= NCR_ENET_OSC_EN; 2283 neponset_ncr_set(NCR_ENET_OSC_EN);
2284#endif 2284#endif
2285 platform_set_drvdata(pdev, ndev); 2285 platform_set_drvdata(pdev, ndev);
2286 ret = smc_enable_device(pdev); 2286 ret = smc_enable_device(pdev);
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index e535137eb2d0..468047866c8c 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -356,7 +356,7 @@ config VLSI_FIR
356 356
357config SA1100_FIR 357config SA1100_FIR
358 tristate "SA1100 Internal IR" 358 tristate "SA1100 Internal IR"
359 depends on ARCH_SA1100 && IRDA 359 depends on ARCH_SA1100 && IRDA && DMA_SA11X0
360 360
361config VIA_FIR 361config VIA_FIR
362 tristate "VIA VT8231/VT1211 SIR/MIR/FIR" 362 tristate "VIA VT8231/VT1211 SIR/MIR/FIR"
diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c
index da2705061a60..a0d1913a58d3 100644
--- a/drivers/net/irda/sa1100_ir.c
+++ b/drivers/net/irda/sa1100_ir.c
@@ -15,7 +15,7 @@
15 * This driver takes one kernel command line parameter, sa1100ir=, with 15 * This driver takes one kernel command line parameter, sa1100ir=, with
16 * the following options: 16 * the following options:
17 * max_rate:baudrate - set the maximum baud rate 17 * max_rate:baudrate - set the maximum baud rate
18 * power_leve:level - set the transmitter power level 18 * power_level:level - set the transmitter power level
19 * tx_lpm:0|1 - set transmit low power mode 19 * tx_lpm:0|1 - set transmit low power mode
20 */ 20 */
21#include <linux/module.h> 21#include <linux/module.h>
@@ -30,13 +30,13 @@
30#include <linux/delay.h> 30#include <linux/delay.h>
31#include <linux/platform_device.h> 31#include <linux/platform_device.h>
32#include <linux/dma-mapping.h> 32#include <linux/dma-mapping.h>
33#include <linux/dmaengine.h>
34#include <linux/sa11x0-dma.h>
33 35
34#include <net/irda/irda.h> 36#include <net/irda/irda.h>
35#include <net/irda/wrapper.h> 37#include <net/irda/wrapper.h>
36#include <net/irda/irda_device.h> 38#include <net/irda/irda_device.h>
37 39
38#include <asm/irq.h>
39#include <mach/dma.h>
40#include <mach/hardware.h> 40#include <mach/hardware.h>
41#include <asm/mach/irda.h> 41#include <asm/mach/irda.h>
42 42
@@ -44,8 +44,15 @@ static int power_level = 3;
44static int tx_lpm; 44static int tx_lpm;
45static int max_rate = 4000000; 45static int max_rate = 4000000;
46 46
47struct sa1100_buf {
48 struct device *dev;
49 struct sk_buff *skb;
50 struct scatterlist sg;
51 struct dma_chan *chan;
52 dma_cookie_t cookie;
53};
54
47struct sa1100_irda { 55struct sa1100_irda {
48 unsigned char hscr0;
49 unsigned char utcr4; 56 unsigned char utcr4;
50 unsigned char power; 57 unsigned char power;
51 unsigned char open; 58 unsigned char open;
@@ -53,12 +60,8 @@ struct sa1100_irda {
53 int speed; 60 int speed;
54 int newspeed; 61 int newspeed;
55 62
56 struct sk_buff *txskb; 63 struct sa1100_buf dma_rx;
57 struct sk_buff *rxskb; 64 struct sa1100_buf dma_tx;
58 dma_addr_t txbuf_dma;
59 dma_addr_t rxbuf_dma;
60 dma_regs_t *txdma;
61 dma_regs_t *rxdma;
62 65
63 struct device *dev; 66 struct device *dev;
64 struct irda_platform_data *pdata; 67 struct irda_platform_data *pdata;
@@ -67,23 +70,103 @@ struct sa1100_irda {
67 70
68 iobuff_t tx_buff; 71 iobuff_t tx_buff;
69 iobuff_t rx_buff; 72 iobuff_t rx_buff;
73
74 int (*tx_start)(struct sk_buff *, struct net_device *, struct sa1100_irda *);
75 irqreturn_t (*irq)(struct net_device *, struct sa1100_irda *);
70}; 76};
71 77
78static int sa1100_irda_set_speed(struct sa1100_irda *, int);
79
72#define IS_FIR(si) ((si)->speed >= 4000000) 80#define IS_FIR(si) ((si)->speed >= 4000000)
73 81
74#define HPSIR_MAX_RXLEN 2047 82#define HPSIR_MAX_RXLEN 2047
75 83
84static struct dma_slave_config sa1100_irda_sir_tx = {
85 .direction = DMA_TO_DEVICE,
86 .dst_addr = __PREG(Ser2UTDR),
87 .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
88 .dst_maxburst = 4,
89};
90
91static struct dma_slave_config sa1100_irda_fir_rx = {
92 .direction = DMA_FROM_DEVICE,
93 .src_addr = __PREG(Ser2HSDR),
94 .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
95 .src_maxburst = 8,
96};
97
98static struct dma_slave_config sa1100_irda_fir_tx = {
99 .direction = DMA_TO_DEVICE,
100 .dst_addr = __PREG(Ser2HSDR),
101 .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
102 .dst_maxburst = 8,
103};
104
105static unsigned sa1100_irda_dma_xferred(struct sa1100_buf *buf)
106{
107 struct dma_chan *chan = buf->chan;
108 struct dma_tx_state state;
109 enum dma_status status;
110
111 status = chan->device->device_tx_status(chan, buf->cookie, &state);
112 if (status != DMA_PAUSED)
113 return 0;
114
115 return sg_dma_len(&buf->sg) - state.residue;
116}
117
118static int sa1100_irda_dma_request(struct device *dev, struct sa1100_buf *buf,
119 const char *name, struct dma_slave_config *cfg)
120{
121 dma_cap_mask_t m;
122 int ret;
123
124 dma_cap_zero(m);
125 dma_cap_set(DMA_SLAVE, m);
126
127 buf->chan = dma_request_channel(m, sa11x0_dma_filter_fn, (void *)name);
128 if (!buf->chan) {
129 dev_err(dev, "unable to request DMA channel for %s\n",
130 name);
131 return -ENOENT;
132 }
133
134 ret = dmaengine_slave_config(buf->chan, cfg);
135 if (ret)
136 dev_warn(dev, "DMA slave_config for %s returned %d\n",
137 name, ret);
138
139 buf->dev = buf->chan->device->dev;
140
141 return 0;
142}
143
144static void sa1100_irda_dma_start(struct sa1100_buf *buf,
145 enum dma_transfer_direction dir, dma_async_tx_callback cb, void *cb_p)
146{
147 struct dma_async_tx_descriptor *desc;
148 struct dma_chan *chan = buf->chan;
149
150 desc = chan->device->device_prep_slave_sg(chan, &buf->sg, 1, dir,
151 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
152 if (desc) {
153 desc->callback = cb;
154 desc->callback_param = cb_p;
155 buf->cookie = dmaengine_submit(desc);
156 dma_async_issue_pending(chan);
157 }
158}
159
76/* 160/*
77 * Allocate and map the receive buffer, unless it is already allocated. 161 * Allocate and map the receive buffer, unless it is already allocated.
78 */ 162 */
79static int sa1100_irda_rx_alloc(struct sa1100_irda *si) 163static int sa1100_irda_rx_alloc(struct sa1100_irda *si)
80{ 164{
81 if (si->rxskb) 165 if (si->dma_rx.skb)
82 return 0; 166 return 0;
83 167
84 si->rxskb = alloc_skb(HPSIR_MAX_RXLEN + 1, GFP_ATOMIC); 168 si->dma_rx.skb = alloc_skb(HPSIR_MAX_RXLEN + 1, GFP_ATOMIC);
85 169 if (!si->dma_rx.skb) {
86 if (!si->rxskb) {
87 printk(KERN_ERR "sa1100_ir: out of memory for RX SKB\n"); 170 printk(KERN_ERR "sa1100_ir: out of memory for RX SKB\n");
88 return -ENOMEM; 171 return -ENOMEM;
89 } 172 }
@@ -92,11 +175,14 @@ static int sa1100_irda_rx_alloc(struct sa1100_irda *si)
92 * Align any IP headers that may be contained 175 * Align any IP headers that may be contained
93 * within the frame. 176 * within the frame.
94 */ 177 */
95 skb_reserve(si->rxskb, 1); 178 skb_reserve(si->dma_rx.skb, 1);
179
180 sg_set_buf(&si->dma_rx.sg, si->dma_rx.skb->data, HPSIR_MAX_RXLEN);
181 if (dma_map_sg(si->dma_rx.dev, &si->dma_rx.sg, 1, DMA_FROM_DEVICE) == 0) {
182 dev_kfree_skb_any(si->dma_rx.skb);
183 return -ENOMEM;
184 }
96 185
97 si->rxbuf_dma = dma_map_single(si->dev, si->rxskb->data,
98 HPSIR_MAX_RXLEN,
99 DMA_FROM_DEVICE);
100 return 0; 186 return 0;
101} 187}
102 188
@@ -106,7 +192,7 @@ static int sa1100_irda_rx_alloc(struct sa1100_irda *si)
106 */ 192 */
107static void sa1100_irda_rx_dma_start(struct sa1100_irda *si) 193static void sa1100_irda_rx_dma_start(struct sa1100_irda *si)
108{ 194{
109 if (!si->rxskb) { 195 if (!si->dma_rx.skb) {
110 printk(KERN_ERR "sa1100_ir: rx buffer went missing\n"); 196 printk(KERN_ERR "sa1100_ir: rx buffer went missing\n");
111 return; 197 return;
112 } 198 }
@@ -114,254 +200,87 @@ static void sa1100_irda_rx_dma_start(struct sa1100_irda *si)
114 /* 200 /*
115 * First empty receive FIFO 201 * First empty receive FIFO
116 */ 202 */
117 Ser2HSCR0 = si->hscr0 | HSCR0_HSSP; 203 Ser2HSCR0 = HSCR0_HSSP;
118 204
119 /* 205 /*
120 * Enable the DMA, receiver and receive interrupt. 206 * Enable the DMA, receiver and receive interrupt.
121 */ 207 */
122 sa1100_clear_dma(si->rxdma); 208 dmaengine_terminate_all(si->dma_rx.chan);
123 sa1100_start_dma(si->rxdma, si->rxbuf_dma, HPSIR_MAX_RXLEN); 209 sa1100_irda_dma_start(&si->dma_rx, DMA_DEV_TO_MEM, NULL, NULL);
124 Ser2HSCR0 = si->hscr0 | HSCR0_HSSP | HSCR0_RXE; 210
211 Ser2HSCR0 = HSCR0_HSSP | HSCR0_RXE;
125} 212}
126 213
127/* 214static void sa1100_irda_check_speed(struct sa1100_irda *si)
128 * Set the IrDA communications speed.
129 */
130static int sa1100_irda_set_speed(struct sa1100_irda *si, int speed)
131{ 215{
132 unsigned long flags; 216 if (si->newspeed) {
133 int brd, ret = -EINVAL; 217 sa1100_irda_set_speed(si, si->newspeed);
134 218 si->newspeed = 0;
135 switch (speed) {
136 case 9600: case 19200: case 38400:
137 case 57600: case 115200:
138 brd = 3686400 / (16 * speed) - 1;
139
140 /*
141 * Stop the receive DMA.
142 */
143 if (IS_FIR(si))
144 sa1100_stop_dma(si->rxdma);
145
146 local_irq_save(flags);
147
148 Ser2UTCR3 = 0;
149 Ser2HSCR0 = HSCR0_UART;
150
151 Ser2UTCR1 = brd >> 8;
152 Ser2UTCR2 = brd;
153
154 /*
155 * Clear status register
156 */
157 Ser2UTSR0 = UTSR0_REB | UTSR0_RBB | UTSR0_RID;
158 Ser2UTCR3 = UTCR3_RIE | UTCR3_RXE | UTCR3_TXE;
159
160 if (si->pdata->set_speed)
161 si->pdata->set_speed(si->dev, speed);
162
163 si->speed = speed;
164
165 local_irq_restore(flags);
166 ret = 0;
167 break;
168
169 case 4000000:
170 local_irq_save(flags);
171
172 si->hscr0 = 0;
173
174 Ser2HSSR0 = 0xff;
175 Ser2HSCR0 = si->hscr0 | HSCR0_HSSP;
176 Ser2UTCR3 = 0;
177
178 si->speed = speed;
179
180 if (si->pdata->set_speed)
181 si->pdata->set_speed(si->dev, speed);
182
183 sa1100_irda_rx_alloc(si);
184 sa1100_irda_rx_dma_start(si);
185
186 local_irq_restore(flags);
187
188 break;
189
190 default:
191 break;
192 } 219 }
193
194 return ret;
195} 220}
196 221
197/* 222/*
198 * Control the power state of the IrDA transmitter. 223 * HP-SIR format support.
199 * State:
200 * 0 - off
201 * 1 - short range, lowest power
202 * 2 - medium range, medium power
203 * 3 - maximum range, high power
204 *
205 * Currently, only assabet is known to support this.
206 */ 224 */
207static int 225static void sa1100_irda_sirtxdma_irq(void *id)
208__sa1100_irda_set_power(struct sa1100_irda *si, unsigned int state)
209{ 226{
210 int ret = 0; 227 struct net_device *dev = id;
211 if (si->pdata->set_power) 228 struct sa1100_irda *si = netdev_priv(dev);
212 ret = si->pdata->set_power(si->dev, state);
213 return ret;
214}
215
216static inline int
217sa1100_set_power(struct sa1100_irda *si, unsigned int state)
218{
219 int ret;
220
221 ret = __sa1100_irda_set_power(si, state);
222 if (ret == 0)
223 si->power = state;
224 229
225 return ret; 230 dma_unmap_sg(si->dma_tx.dev, &si->dma_tx.sg, 1, DMA_TO_DEVICE);
226} 231 dev_kfree_skb(si->dma_tx.skb);
232 si->dma_tx.skb = NULL;
227 233
228static int sa1100_irda_startup(struct sa1100_irda *si) 234 dev->stats.tx_packets++;
229{ 235 dev->stats.tx_bytes += sg_dma_len(&si->dma_tx.sg);
230 int ret;
231 236
232 /* 237 /* We need to ensure that the transmitter has finished. */
233 * Ensure that the ports for this device are setup correctly. 238 do
234 */ 239 rmb();
235 if (si->pdata->startup) { 240 while (Ser2UTSR1 & UTSR1_TBY);
236 ret = si->pdata->startup(si->dev);
237 if (ret)
238 return ret;
239 }
240
241 /*
242 * Configure PPC for IRDA - we want to drive TXD2 low.
243 * We also want to drive this pin low during sleep.
244 */
245 PPSR &= ~PPC_TXD2;
246 PSDR &= ~PPC_TXD2;
247 PPDR |= PPC_TXD2;
248
249 /*
250 * Enable HP-SIR modulation, and ensure that the port is disabled.
251 */
252 Ser2UTCR3 = 0;
253 Ser2HSCR0 = HSCR0_UART;
254 Ser2UTCR4 = si->utcr4;
255 Ser2UTCR0 = UTCR0_8BitData;
256 Ser2HSCR2 = HSCR2_TrDataH | HSCR2_RcDataL;
257 241
258 /* 242 /*
259 * Clear status register 243 * Ok, we've finished transmitting. Now enable the receiver.
244 * Sometimes we get a receive IRQ immediately after a transmit...
260 */ 245 */
261 Ser2UTSR0 = UTSR0_REB | UTSR0_RBB | UTSR0_RID; 246 Ser2UTSR0 = UTSR0_REB | UTSR0_RBB | UTSR0_RID;
247 Ser2UTCR3 = UTCR3_RIE | UTCR3_RXE | UTCR3_TXE;
262 248
263 ret = sa1100_irda_set_speed(si, si->speed = 9600); 249 sa1100_irda_check_speed(si);
264 if (ret) {
265 Ser2UTCR3 = 0;
266 Ser2HSCR0 = 0;
267
268 if (si->pdata->shutdown)
269 si->pdata->shutdown(si->dev);
270 }
271
272 return ret;
273}
274
275static void sa1100_irda_shutdown(struct sa1100_irda *si)
276{
277 /*
278 * Stop all DMA activity.
279 */
280 sa1100_stop_dma(si->rxdma);
281 sa1100_stop_dma(si->txdma);
282
283 /* Disable the port. */
284 Ser2UTCR3 = 0;
285 Ser2HSCR0 = 0;
286 250
287 if (si->pdata->shutdown) 251 /* I'm hungry! */
288 si->pdata->shutdown(si->dev); 252 netif_wake_queue(dev);
289} 253}
290 254
291#ifdef CONFIG_PM 255static int sa1100_irda_sir_tx_start(struct sk_buff *skb, struct net_device *dev,
292/* 256 struct sa1100_irda *si)
293 * Suspend the IrDA interface.
294 */
295static int sa1100_irda_suspend(struct platform_device *pdev, pm_message_t state)
296{ 257{
297 struct net_device *dev = platform_get_drvdata(pdev); 258 si->tx_buff.data = si->tx_buff.head;
298 struct sa1100_irda *si; 259 si->tx_buff.len = async_wrap_skb(skb, si->tx_buff.data,
299 260 si->tx_buff.truesize);
300 if (!dev) 261
301 return 0; 262 si->dma_tx.skb = skb;
302 263 sg_set_buf(&si->dma_tx.sg, si->tx_buff.data, si->tx_buff.len);
303 si = netdev_priv(dev); 264 if (dma_map_sg(si->dma_tx.dev, &si->dma_tx.sg, 1, DMA_TO_DEVICE) == 0) {
304 if (si->open) { 265 si->dma_tx.skb = NULL;
305 /* 266 netif_wake_queue(dev);
306 * Stop the transmit queue 267 dev->stats.tx_dropped++;
307 */ 268 return NETDEV_TX_OK;
308 netif_device_detach(dev);
309 disable_irq(dev->irq);
310 sa1100_irda_shutdown(si);
311 __sa1100_irda_set_power(si, 0);
312 } 269 }
313 270
314 return 0; 271 sa1100_irda_dma_start(&si->dma_tx, DMA_MEM_TO_DEV, sa1100_irda_sirtxdma_irq, dev);
315}
316
317/*
318 * Resume the IrDA interface.
319 */
320static int sa1100_irda_resume(struct platform_device *pdev)
321{
322 struct net_device *dev = platform_get_drvdata(pdev);
323 struct sa1100_irda *si;
324
325 if (!dev)
326 return 0;
327 272
328 si = netdev_priv(dev); 273 /*
329 if (si->open) { 274 * The mean turn-around time is enforced by XBOF padding,
330 /* 275 * so we don't have to do anything special here.
331 * If we missed a speed change, initialise at the new speed 276 */
332 * directly. It is debatable whether this is actually 277 Ser2UTCR3 = UTCR3_TXE;
333 * required, but in the interests of continuing from where
334 * we left off it is desirable. The converse argument is
335 * that we should re-negotiate at 9600 baud again.
336 */
337 if (si->newspeed) {
338 si->speed = si->newspeed;
339 si->newspeed = 0;
340 }
341
342 sa1100_irda_startup(si);
343 __sa1100_irda_set_power(si, si->power);
344 enable_irq(dev->irq);
345
346 /*
347 * This automatically wakes up the queue
348 */
349 netif_device_attach(dev);
350 }
351 278
352 return 0; 279 return NETDEV_TX_OK;
353} 280}
354#else
355#define sa1100_irda_suspend NULL
356#define sa1100_irda_resume NULL
357#endif
358 281
359/* 282static irqreturn_t sa1100_irda_sir_irq(struct net_device *dev, struct sa1100_irda *si)
360 * HP-SIR format interrupt service routines.
361 */
362static void sa1100_irda_hpsir_irq(struct net_device *dev)
363{ 283{
364 struct sa1100_irda *si = netdev_priv(dev);
365 int status; 284 int status;
366 285
367 status = Ser2UTSR0; 286 status = Ser2UTSR0;
@@ -414,51 +333,96 @@ static void sa1100_irda_hpsir_irq(struct net_device *dev)
414 333
415 } 334 }
416 335
417 if (status & UTSR0_TFS && si->tx_buff.len) { 336 return IRQ_HANDLED;
418 /* 337}
419 * Transmitter FIFO is not full
420 */
421 do {
422 Ser2UTDR = *si->tx_buff.data++;
423 si->tx_buff.len -= 1;
424 } while (Ser2UTSR1 & UTSR1_TNF && si->tx_buff.len);
425 338
426 if (si->tx_buff.len == 0) { 339/*
427 dev->stats.tx_packets++; 340 * FIR format support.
428 dev->stats.tx_bytes += si->tx_buff.data - 341 */
429 si->tx_buff.head; 342static void sa1100_irda_firtxdma_irq(void *id)
343{
344 struct net_device *dev = id;
345 struct sa1100_irda *si = netdev_priv(dev);
346 struct sk_buff *skb;
430 347
431 /* 348 /*
432 * We need to ensure that the transmitter has 349 * Wait for the transmission to complete. Unfortunately,
433 * finished. 350 * the hardware doesn't give us an interrupt to indicate
434 */ 351 * "end of frame".
435 do 352 */
436 rmb(); 353 do
437 while (Ser2UTSR1 & UTSR1_TBY); 354 rmb();
355 while (!(Ser2HSSR0 & HSSR0_TUR) || Ser2HSSR1 & HSSR1_TBY);
438 356
439 /* 357 /*
440 * Ok, we've finished transmitting. Now enable 358 * Clear the transmit underrun bit.
441 * the receiver. Sometimes we get a receive IRQ 359 */
442 * immediately after a transmit... 360 Ser2HSSR0 = HSSR0_TUR;
443 */
444 Ser2UTSR0 = UTSR0_REB | UTSR0_RBB | UTSR0_RID;
445 Ser2UTCR3 = UTCR3_RIE | UTCR3_RXE | UTCR3_TXE;
446 361
447 if (si->newspeed) { 362 /*
448 sa1100_irda_set_speed(si, si->newspeed); 363 * Do we need to change speed? Note that we're lazy
449 si->newspeed = 0; 364 * here - we don't free the old dma_rx.skb. We don't need
450 } 365 * to allocate a buffer either.
366 */
367 sa1100_irda_check_speed(si);
451 368
452 /* I'm hungry! */ 369 /*
453 netif_wake_queue(dev); 370 * Start reception. This disables the transmitter for
454 } 371 * us. This will be using the existing RX buffer.
372 */
373 sa1100_irda_rx_dma_start(si);
374
375 /* Account and free the packet. */
376 skb = si->dma_tx.skb;
377 if (skb) {
378 dma_unmap_sg(si->dma_tx.dev, &si->dma_tx.sg, 1,
379 DMA_TO_DEVICE);
380 dev->stats.tx_packets ++;
381 dev->stats.tx_bytes += skb->len;
382 dev_kfree_skb_irq(skb);
383 si->dma_tx.skb = NULL;
455 } 384 }
385
386 /*
387 * Make sure that the TX queue is available for sending
388 * (for retries). TX has priority over RX at all times.
389 */
390 netif_wake_queue(dev);
391}
392
393static int sa1100_irda_fir_tx_start(struct sk_buff *skb, struct net_device *dev,
394 struct sa1100_irda *si)
395{
396 int mtt = irda_get_mtt(skb);
397
398 si->dma_tx.skb = skb;
399 sg_set_buf(&si->dma_tx.sg, skb->data, skb->len);
400 if (dma_map_sg(si->dma_tx.dev, &si->dma_tx.sg, 1, DMA_TO_DEVICE) == 0) {
401 si->dma_tx.skb = NULL;
402 netif_wake_queue(dev);
403 dev->stats.tx_dropped++;
404 dev_kfree_skb(skb);
405 return NETDEV_TX_OK;
406 }
407
408 sa1100_irda_dma_start(&si->dma_tx, DMA_MEM_TO_DEV, sa1100_irda_firtxdma_irq, dev);
409
410 /*
411 * If we have a mean turn-around time, impose the specified
412 * specified delay. We could shorten this by timing from
413 * the point we received the packet.
414 */
415 if (mtt)
416 udelay(mtt);
417
418 Ser2HSCR0 = HSCR0_HSSP | HSCR0_TXE;
419
420 return NETDEV_TX_OK;
456} 421}
457 422
458static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev) 423static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev)
459{ 424{
460 struct sk_buff *skb = si->rxskb; 425 struct sk_buff *skb = si->dma_rx.skb;
461 dma_addr_t dma_addr;
462 unsigned int len, stat, data; 426 unsigned int len, stat, data;
463 427
464 if (!skb) { 428 if (!skb) {
@@ -469,11 +433,10 @@ static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev
469 /* 433 /*
470 * Get the current data position. 434 * Get the current data position.
471 */ 435 */
472 dma_addr = sa1100_get_dma_pos(si->rxdma); 436 len = sa1100_irda_dma_xferred(&si->dma_rx);
473 len = dma_addr - si->rxbuf_dma;
474 if (len > HPSIR_MAX_RXLEN) 437 if (len > HPSIR_MAX_RXLEN)
475 len = HPSIR_MAX_RXLEN; 438 len = HPSIR_MAX_RXLEN;
476 dma_unmap_single(si->dev, si->rxbuf_dma, len, DMA_FROM_DEVICE); 439 dma_unmap_sg(si->dma_rx.dev, &si->dma_rx.sg, 1, DMA_FROM_DEVICE);
477 440
478 do { 441 do {
479 /* 442 /*
@@ -501,7 +464,7 @@ static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev
501 } while (Ser2HSSR0 & HSSR0_EIF); 464 } while (Ser2HSSR0 & HSSR0_EIF);
502 465
503 if (stat & HSSR1_EOF) { 466 if (stat & HSSR1_EOF) {
504 si->rxskb = NULL; 467 si->dma_rx.skb = NULL;
505 468
506 skb_put(skb, len); 469 skb_put(skb, len);
507 skb->dev = dev; 470 skb->dev = dev;
@@ -518,28 +481,23 @@ static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev
518 netif_rx(skb); 481 netif_rx(skb);
519 } else { 482 } else {
520 /* 483 /*
521 * Remap the buffer. 484 * Remap the buffer - it was previously mapped, and we
485 * hope that this succeeds.
522 */ 486 */
523 si->rxbuf_dma = dma_map_single(si->dev, si->rxskb->data, 487 dma_map_sg(si->dma_rx.dev, &si->dma_rx.sg, 1, DMA_FROM_DEVICE);
524 HPSIR_MAX_RXLEN,
525 DMA_FROM_DEVICE);
526 } 488 }
527} 489}
528 490
529/* 491/*
530 * FIR format interrupt service routine. We only have to 492 * We only have to handle RX events here; transmit events go via the TX
531 * handle RX events; transmit events go via the TX DMA handler. 493 * DMA handler. We disable RX, process, and the restart RX.
532 *
533 * No matter what, we disable RX, process, and the restart RX.
534 */ 494 */
535static void sa1100_irda_fir_irq(struct net_device *dev) 495static irqreturn_t sa1100_irda_fir_irq(struct net_device *dev, struct sa1100_irda *si)
536{ 496{
537 struct sa1100_irda *si = netdev_priv(dev);
538
539 /* 497 /*
540 * Stop RX DMA 498 * Stop RX DMA
541 */ 499 */
542 sa1100_stop_dma(si->rxdma); 500 dmaengine_pause(si->dma_rx.chan);
543 501
544 /* 502 /*
545 * Framing error - we throw away the packet completely. 503 * Framing error - we throw away the packet completely.
@@ -555,7 +513,7 @@ static void sa1100_irda_fir_irq(struct net_device *dev)
555 /* 513 /*
556 * Clear out the DMA... 514 * Clear out the DMA...
557 */ 515 */
558 Ser2HSCR0 = si->hscr0 | HSCR0_HSSP; 516 Ser2HSCR0 = HSCR0_HSSP;
559 517
560 /* 518 /*
561 * Clear selected status bits now, so we 519 * Clear selected status bits now, so we
@@ -577,74 +535,124 @@ static void sa1100_irda_fir_irq(struct net_device *dev)
577 * No matter what happens, we must restart reception. 535 * No matter what happens, we must restart reception.
578 */ 536 */
579 sa1100_irda_rx_dma_start(si); 537 sa1100_irda_rx_dma_start(si);
580}
581 538
582static irqreturn_t sa1100_irda_irq(int irq, void *dev_id)
583{
584 struct net_device *dev = dev_id;
585 if (IS_FIR(((struct sa1100_irda *)netdev_priv(dev))))
586 sa1100_irda_fir_irq(dev);
587 else
588 sa1100_irda_hpsir_irq(dev);
589 return IRQ_HANDLED; 539 return IRQ_HANDLED;
590} 540}
591 541
592/* 542/*
593 * TX DMA completion handler. 543 * Set the IrDA communications speed.
594 */ 544 */
595static void sa1100_irda_txdma_irq(void *id) 545static int sa1100_irda_set_speed(struct sa1100_irda *si, int speed)
596{ 546{
597 struct net_device *dev = id; 547 unsigned long flags;
598 struct sa1100_irda *si = netdev_priv(dev); 548 int brd, ret = -EINVAL;
599 struct sk_buff *skb = si->txskb;
600 549
601 si->txskb = NULL; 550 switch (speed) {
551 case 9600: case 19200: case 38400:
552 case 57600: case 115200:
553 brd = 3686400 / (16 * speed) - 1;
602 554
603 /* 555 /* Stop the receive DMA, and configure transmit. */
604 * Wait for the transmission to complete. Unfortunately, 556 if (IS_FIR(si)) {
605 * the hardware doesn't give us an interrupt to indicate 557 dmaengine_terminate_all(si->dma_rx.chan);
606 * "end of frame". 558 dmaengine_slave_config(si->dma_tx.chan,
607 */ 559 &sa1100_irda_sir_tx);
608 do 560 }
609 rmb();
610 while (!(Ser2HSSR0 & HSSR0_TUR) || Ser2HSSR1 & HSSR1_TBY);
611 561
612 /* 562 local_irq_save(flags);
613 * Clear the transmit underrun bit.
614 */
615 Ser2HSSR0 = HSSR0_TUR;
616 563
617 /* 564 Ser2UTCR3 = 0;
618 * Do we need to change speed? Note that we're lazy 565 Ser2HSCR0 = HSCR0_UART;
619 * here - we don't free the old rxskb. We don't need
620 * to allocate a buffer either.
621 */
622 if (si->newspeed) {
623 sa1100_irda_set_speed(si, si->newspeed);
624 si->newspeed = 0;
625 }
626 566
627 /* 567 Ser2UTCR1 = brd >> 8;
628 * Start reception. This disables the transmitter for 568 Ser2UTCR2 = brd;
629 * us. This will be using the existing RX buffer.
630 */
631 sa1100_irda_rx_dma_start(si);
632 569
633 /* 570 /*
634 * Account and free the packet. 571 * Clear status register
635 */ 572 */
636 if (skb) { 573 Ser2UTSR0 = UTSR0_REB | UTSR0_RBB | UTSR0_RID;
637 dma_unmap_single(si->dev, si->txbuf_dma, skb->len, DMA_TO_DEVICE); 574 Ser2UTCR3 = UTCR3_RIE | UTCR3_RXE | UTCR3_TXE;
638 dev->stats.tx_packets ++; 575
639 dev->stats.tx_bytes += skb->len; 576 if (si->pdata->set_speed)
640 dev_kfree_skb_irq(skb); 577 si->pdata->set_speed(si->dev, speed);
578
579 si->speed = speed;
580 si->tx_start = sa1100_irda_sir_tx_start;
581 si->irq = sa1100_irda_sir_irq;
582
583 local_irq_restore(flags);
584 ret = 0;
585 break;
586
587 case 4000000:
588 if (!IS_FIR(si))
589 dmaengine_slave_config(si->dma_tx.chan,
590 &sa1100_irda_fir_tx);
591
592 local_irq_save(flags);
593
594 Ser2HSSR0 = 0xff;
595 Ser2HSCR0 = HSCR0_HSSP;
596 Ser2UTCR3 = 0;
597
598 si->speed = speed;
599 si->tx_start = sa1100_irda_fir_tx_start;
600 si->irq = sa1100_irda_fir_irq;
601
602 if (si->pdata->set_speed)
603 si->pdata->set_speed(si->dev, speed);
604
605 sa1100_irda_rx_alloc(si);
606 sa1100_irda_rx_dma_start(si);
607
608 local_irq_restore(flags);
609
610 break;
611
612 default:
613 break;
641 } 614 }
642 615
643 /* 616 return ret;
644 * Make sure that the TX queue is available for sending 617}
645 * (for retries). TX has priority over RX at all times. 618
646 */ 619/*
647 netif_wake_queue(dev); 620 * Control the power state of the IrDA transmitter.
621 * State:
622 * 0 - off
623 * 1 - short range, lowest power
624 * 2 - medium range, medium power
625 * 3 - maximum range, high power
626 *
627 * Currently, only assabet is known to support this.
628 */
629static int
630__sa1100_irda_set_power(struct sa1100_irda *si, unsigned int state)
631{
632 int ret = 0;
633 if (si->pdata->set_power)
634 ret = si->pdata->set_power(si->dev, state);
635 return ret;
636}
637
638static inline int
639sa1100_set_power(struct sa1100_irda *si, unsigned int state)
640{
641 int ret;
642
643 ret = __sa1100_irda_set_power(si, state);
644 if (ret == 0)
645 si->power = state;
646
647 return ret;
648}
649
650static irqreturn_t sa1100_irda_irq(int irq, void *dev_id)
651{
652 struct net_device *dev = dev_id;
653 struct sa1100_irda *si = netdev_priv(dev);
654
655 return si->irq(dev, si);
648} 656}
649 657
650static int sa1100_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev) 658static int sa1100_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -660,62 +668,19 @@ static int sa1100_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
660 if (speed != si->speed && speed != -1) 668 if (speed != si->speed && speed != -1)
661 si->newspeed = speed; 669 si->newspeed = speed;
662 670
663 /* 671 /* If this is an empty frame, we can bypass a lot. */
664 * If this is an empty frame, we can bypass a lot.
665 */
666 if (skb->len == 0) { 672 if (skb->len == 0) {
667 if (si->newspeed) { 673 sa1100_irda_check_speed(si);
668 si->newspeed = 0;
669 sa1100_irda_set_speed(si, speed);
670 }
671 dev_kfree_skb(skb); 674 dev_kfree_skb(skb);
672 return NETDEV_TX_OK; 675 return NETDEV_TX_OK;
673 } 676 }
674 677
675 if (!IS_FIR(si)) { 678 netif_stop_queue(dev);
676 netif_stop_queue(dev);
677
678 si->tx_buff.data = si->tx_buff.head;
679 si->tx_buff.len = async_wrap_skb(skb, si->tx_buff.data,
680 si->tx_buff.truesize);
681
682 /*
683 * Set the transmit interrupt enable. This will fire
684 * off an interrupt immediately. Note that we disable
685 * the receiver so we won't get spurious characteres
686 * received.
687 */
688 Ser2UTCR3 = UTCR3_TIE | UTCR3_TXE;
689
690 dev_kfree_skb(skb);
691 } else {
692 int mtt = irda_get_mtt(skb);
693
694 /*
695 * We must not be transmitting...
696 */
697 BUG_ON(si->txskb);
698
699 netif_stop_queue(dev);
700
701 si->txskb = skb;
702 si->txbuf_dma = dma_map_single(si->dev, skb->data,
703 skb->len, DMA_TO_DEVICE);
704
705 sa1100_start_dma(si->txdma, si->txbuf_dma, skb->len);
706
707 /*
708 * If we have a mean turn-around time, impose the specified
709 * specified delay. We could shorten this by timing from
710 * the point we received the packet.
711 */
712 if (mtt)
713 udelay(mtt);
714 679
715 Ser2HSCR0 = si->hscr0 | HSCR0_HSSP | HSCR0_TXE; 680 /* We must not already have a skb to transmit... */
716 } 681 BUG_ON(si->dma_tx.skb);
717 682
718 return NETDEV_TX_OK; 683 return si->tx_start(skb, dev, si);
719} 684}
720 685
721static int 686static int
@@ -762,6 +727,69 @@ sa1100_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
762 return ret; 727 return ret;
763} 728}
764 729
730static int sa1100_irda_startup(struct sa1100_irda *si)
731{
732 int ret;
733
734 /*
735 * Ensure that the ports for this device are setup correctly.
736 */
737 if (si->pdata->startup) {
738 ret = si->pdata->startup(si->dev);
739 if (ret)
740 return ret;
741 }
742
743 /*
744 * Configure PPC for IRDA - we want to drive TXD2 low.
745 * We also want to drive this pin low during sleep.
746 */
747 PPSR &= ~PPC_TXD2;
748 PSDR &= ~PPC_TXD2;
749 PPDR |= PPC_TXD2;
750
751 /*
752 * Enable HP-SIR modulation, and ensure that the port is disabled.
753 */
754 Ser2UTCR3 = 0;
755 Ser2HSCR0 = HSCR0_UART;
756 Ser2UTCR4 = si->utcr4;
757 Ser2UTCR0 = UTCR0_8BitData;
758 Ser2HSCR2 = HSCR2_TrDataH | HSCR2_RcDataL;
759
760 /*
761 * Clear status register
762 */
763 Ser2UTSR0 = UTSR0_REB | UTSR0_RBB | UTSR0_RID;
764
765 ret = sa1100_irda_set_speed(si, si->speed = 9600);
766 if (ret) {
767 Ser2UTCR3 = 0;
768 Ser2HSCR0 = 0;
769
770 if (si->pdata->shutdown)
771 si->pdata->shutdown(si->dev);
772 }
773
774 return ret;
775}
776
777static void sa1100_irda_shutdown(struct sa1100_irda *si)
778{
779 /*
780 * Stop all DMA activity.
781 */
782 dmaengine_terminate_all(si->dma_rx.chan);
783 dmaengine_terminate_all(si->dma_tx.chan);
784
785 /* Disable the port. */
786 Ser2UTCR3 = 0;
787 Ser2HSCR0 = 0;
788
789 if (si->pdata->shutdown)
790 si->pdata->shutdown(si->dev);
791}
792
765static int sa1100_irda_start(struct net_device *dev) 793static int sa1100_irda_start(struct net_device *dev)
766{ 794{
767 struct sa1100_irda *si = netdev_priv(dev); 795 struct sa1100_irda *si = netdev_priv(dev);
@@ -769,26 +797,17 @@ static int sa1100_irda_start(struct net_device *dev)
769 797
770 si->speed = 9600; 798 si->speed = 9600;
771 799
772 err = request_irq(dev->irq, sa1100_irda_irq, 0, dev->name, dev); 800 err = sa1100_irda_dma_request(si->dev, &si->dma_rx, "Ser2ICPRc",
773 if (err) 801 &sa1100_irda_fir_rx);
774 goto err_irq;
775
776 err = sa1100_request_dma(DMA_Ser2HSSPRd, "IrDA receive",
777 NULL, NULL, &si->rxdma);
778 if (err) 802 if (err)
779 goto err_rx_dma; 803 goto err_rx_dma;
780 804
781 err = sa1100_request_dma(DMA_Ser2HSSPWr, "IrDA transmit", 805 err = sa1100_irda_dma_request(si->dev, &si->dma_tx, "Ser2ICPTr",
782 sa1100_irda_txdma_irq, dev, &si->txdma); 806 &sa1100_irda_sir_tx);
783 if (err) 807 if (err)
784 goto err_tx_dma; 808 goto err_tx_dma;
785 809
786 /* 810 /*
787 * The interrupt must remain disabled for now.
788 */
789 disable_irq(dev->irq);
790
791 /*
792 * Setup the serial port for the specified speed. 811 * Setup the serial port for the specified speed.
793 */ 812 */
794 err = sa1100_irda_startup(si); 813 err = sa1100_irda_startup(si);
@@ -803,44 +822,60 @@ static int sa1100_irda_start(struct net_device *dev)
803 if (!si->irlap) 822 if (!si->irlap)
804 goto err_irlap; 823 goto err_irlap;
805 824
825 err = request_irq(dev->irq, sa1100_irda_irq, 0, dev->name, dev);
826 if (err)
827 goto err_irq;
828
806 /* 829 /*
807 * Now enable the interrupt and start the queue 830 * Now enable the interrupt and start the queue
808 */ 831 */
809 si->open = 1; 832 si->open = 1;
810 sa1100_set_power(si, power_level); /* low power mode */ 833 sa1100_set_power(si, power_level); /* low power mode */
811 enable_irq(dev->irq); 834
812 netif_start_queue(dev); 835 netif_start_queue(dev);
813 return 0; 836 return 0;
814 837
838err_irq:
839 irlap_close(si->irlap);
815err_irlap: 840err_irlap:
816 si->open = 0; 841 si->open = 0;
817 sa1100_irda_shutdown(si); 842 sa1100_irda_shutdown(si);
818err_startup: 843err_startup:
819 sa1100_free_dma(si->txdma); 844 dma_release_channel(si->dma_tx.chan);
820err_tx_dma: 845err_tx_dma:
821 sa1100_free_dma(si->rxdma); 846 dma_release_channel(si->dma_rx.chan);
822err_rx_dma: 847err_rx_dma:
823 free_irq(dev->irq, dev);
824err_irq:
825 return err; 848 return err;
826} 849}
827 850
828static int sa1100_irda_stop(struct net_device *dev) 851static int sa1100_irda_stop(struct net_device *dev)
829{ 852{
830 struct sa1100_irda *si = netdev_priv(dev); 853 struct sa1100_irda *si = netdev_priv(dev);
854 struct sk_buff *skb;
855
856 netif_stop_queue(dev);
831 857
832 disable_irq(dev->irq); 858 si->open = 0;
833 sa1100_irda_shutdown(si); 859 sa1100_irda_shutdown(si);
834 860
835 /* 861 /*
836 * If we have been doing DMA receive, make sure we 862 * If we have been doing any DMA activity, make sure we
837 * tidy that up cleanly. 863 * tidy that up cleanly.
838 */ 864 */
839 if (si->rxskb) { 865 skb = si->dma_rx.skb;
840 dma_unmap_single(si->dev, si->rxbuf_dma, HPSIR_MAX_RXLEN, 866 if (skb) {
841 DMA_FROM_DEVICE); 867 dma_unmap_sg(si->dma_rx.dev, &si->dma_rx.sg, 1,
842 dev_kfree_skb(si->rxskb); 868 DMA_FROM_DEVICE);
843 si->rxskb = NULL; 869 dev_kfree_skb(skb);
870 si->dma_rx.skb = NULL;
871 }
872
873 skb = si->dma_tx.skb;
874 if (skb) {
875 dma_unmap_sg(si->dma_tx.dev, &si->dma_tx.sg, 1,
876 DMA_TO_DEVICE);
877 dev_kfree_skb(skb);
878 si->dma_tx.skb = NULL;
844 } 879 }
845 880
846 /* Stop IrLAP */ 881 /* Stop IrLAP */
@@ -849,14 +884,11 @@ static int sa1100_irda_stop(struct net_device *dev)
849 si->irlap = NULL; 884 si->irlap = NULL;
850 } 885 }
851 886
852 netif_stop_queue(dev);
853 si->open = 0;
854
855 /* 887 /*
856 * Free resources 888 * Free resources
857 */ 889 */
858 sa1100_free_dma(si->txdma); 890 dma_release_channel(si->dma_tx.chan);
859 sa1100_free_dma(si->rxdma); 891 dma_release_channel(si->dma_rx.chan);
860 free_irq(dev->irq, dev); 892 free_irq(dev->irq, dev);
861 893
862 sa1100_set_power(si, 0); 894 sa1100_set_power(si, 0);
@@ -888,11 +920,15 @@ static int sa1100_irda_probe(struct platform_device *pdev)
888 struct net_device *dev; 920 struct net_device *dev;
889 struct sa1100_irda *si; 921 struct sa1100_irda *si;
890 unsigned int baudrate_mask; 922 unsigned int baudrate_mask;
891 int err; 923 int err, irq;
892 924
893 if (!pdev->dev.platform_data) 925 if (!pdev->dev.platform_data)
894 return -EINVAL; 926 return -EINVAL;
895 927
928 irq = platform_get_irq(pdev, 0);
929 if (irq <= 0)
930 return irq < 0 ? irq : -ENXIO;
931
896 err = request_mem_region(__PREG(Ser2UTCR0), 0x24, "IrDA") ? 0 : -EBUSY; 932 err = request_mem_region(__PREG(Ser2UTCR0), 0x24, "IrDA") ? 0 : -EBUSY;
897 if (err) 933 if (err)
898 goto err_mem_1; 934 goto err_mem_1;
@@ -907,22 +943,27 @@ static int sa1100_irda_probe(struct platform_device *pdev)
907 if (!dev) 943 if (!dev)
908 goto err_mem_4; 944 goto err_mem_4;
909 945
946 SET_NETDEV_DEV(dev, &pdev->dev);
947
910 si = netdev_priv(dev); 948 si = netdev_priv(dev);
911 si->dev = &pdev->dev; 949 si->dev = &pdev->dev;
912 si->pdata = pdev->dev.platform_data; 950 si->pdata = pdev->dev.platform_data;
913 951
952 sg_init_table(&si->dma_rx.sg, 1);
953 sg_init_table(&si->dma_tx.sg, 1);
954
914 /* 955 /*
915 * Initialise the HP-SIR buffers 956 * Initialise the HP-SIR buffers
916 */ 957 */
917 err = sa1100_irda_init_iobuf(&si->rx_buff, 14384); 958 err = sa1100_irda_init_iobuf(&si->rx_buff, 14384);
918 if (err) 959 if (err)
919 goto err_mem_5; 960 goto err_mem_5;
920 err = sa1100_irda_init_iobuf(&si->tx_buff, 4000); 961 err = sa1100_irda_init_iobuf(&si->tx_buff, IRDA_SIR_MAX_FRAME);
921 if (err) 962 if (err)
922 goto err_mem_5; 963 goto err_mem_5;
923 964
924 dev->netdev_ops = &sa1100_irda_netdev_ops; 965 dev->netdev_ops = &sa1100_irda_netdev_ops;
925 dev->irq = IRQ_Ser2ICP; 966 dev->irq = irq;
926 967
927 irda_init_max_qos_capabilies(&si->qos); 968 irda_init_max_qos_capabilies(&si->qos);
928 969
@@ -996,6 +1037,74 @@ static int sa1100_irda_remove(struct platform_device *pdev)
996 return 0; 1037 return 0;
997} 1038}
998 1039
1040#ifdef CONFIG_PM
1041/*
1042 * Suspend the IrDA interface.
1043 */
1044static int sa1100_irda_suspend(struct platform_device *pdev, pm_message_t state)
1045{
1046 struct net_device *dev = platform_get_drvdata(pdev);
1047 struct sa1100_irda *si;
1048
1049 if (!dev)
1050 return 0;
1051
1052 si = netdev_priv(dev);
1053 if (si->open) {
1054 /*
1055 * Stop the transmit queue
1056 */
1057 netif_device_detach(dev);
1058 disable_irq(dev->irq);
1059 sa1100_irda_shutdown(si);
1060 __sa1100_irda_set_power(si, 0);
1061 }
1062
1063 return 0;
1064}
1065
1066/*
1067 * Resume the IrDA interface.
1068 */
1069static int sa1100_irda_resume(struct platform_device *pdev)
1070{
1071 struct net_device *dev = platform_get_drvdata(pdev);
1072 struct sa1100_irda *si;
1073
1074 if (!dev)
1075 return 0;
1076
1077 si = netdev_priv(dev);
1078 if (si->open) {
1079 /*
1080 * If we missed a speed change, initialise at the new speed
1081 * directly. It is debatable whether this is actually
1082 * required, but in the interests of continuing from where
1083 * we left off it is desirable. The converse argument is
1084 * that we should re-negotiate at 9600 baud again.
1085 */
1086 if (si->newspeed) {
1087 si->speed = si->newspeed;
1088 si->newspeed = 0;
1089 }
1090
1091 sa1100_irda_startup(si);
1092 __sa1100_irda_set_power(si, si->power);
1093 enable_irq(dev->irq);
1094
1095 /*
1096 * This automatically wakes up the queue
1097 */
1098 netif_device_attach(dev);
1099 }
1100
1101 return 0;
1102}
1103#else
1104#define sa1100_irda_suspend NULL
1105#define sa1100_irda_resume NULL
1106#endif
1107
999static struct platform_driver sa1100ir_driver = { 1108static struct platform_driver sa1100ir_driver = {
1000 .probe = sa1100_irda_probe, 1109 .probe = sa1100_irda_probe,
1001 .remove = sa1100_irda_remove, 1110 .remove = sa1100_irda_remove,
diff --git a/drivers/pcmcia/sa1111_generic.c b/drivers/pcmcia/sa1111_generic.c
index ef5848f65241..70f728ce1856 100644
--- a/drivers/pcmcia/sa1111_generic.c
+++ b/drivers/pcmcia/sa1111_generic.c
@@ -22,6 +22,40 @@
22 22
23#include "sa1111_generic.h" 23#include "sa1111_generic.h"
24 24
25/*
26 * These are offsets from the above base.
27 */
28#define PCCR 0x0000
29#define PCSSR 0x0004
30#define PCSR 0x0008
31
32#define PCSR_S0_READY (1<<0)
33#define PCSR_S1_READY (1<<1)
34#define PCSR_S0_DETECT (1<<2)
35#define PCSR_S1_DETECT (1<<3)
36#define PCSR_S0_VS1 (1<<4)
37#define PCSR_S0_VS2 (1<<5)
38#define PCSR_S1_VS1 (1<<6)
39#define PCSR_S1_VS2 (1<<7)
40#define PCSR_S0_WP (1<<8)
41#define PCSR_S1_WP (1<<9)
42#define PCSR_S0_BVD1 (1<<10)
43#define PCSR_S0_BVD2 (1<<11)
44#define PCSR_S1_BVD1 (1<<12)
45#define PCSR_S1_BVD2 (1<<13)
46
47#define PCCR_S0_RST (1<<0)
48#define PCCR_S1_RST (1<<1)
49#define PCCR_S0_FLT (1<<2)
50#define PCCR_S1_FLT (1<<3)
51#define PCCR_S0_PWAITEN (1<<4)
52#define PCCR_S1_PWAITEN (1<<5)
53#define PCCR_S0_PSE (1<<6)
54#define PCCR_S1_PSE (1<<7)
55
56#define PCSSR_S0_SLEEP (1<<0)
57#define PCSSR_S1_SLEEP (1<<1)
58
25#define IDX_IRQ_S0_READY_NINT (0) 59#define IDX_IRQ_S0_READY_NINT (0)
26#define IDX_IRQ_S0_CD_VALID (1) 60#define IDX_IRQ_S0_CD_VALID (1)
27#define IDX_IRQ_S0_BVD1_STSCHG (2) 61#define IDX_IRQ_S0_BVD1_STSCHG (2)
@@ -32,7 +66,7 @@
32void sa1111_pcmcia_socket_state(struct soc_pcmcia_socket *skt, struct pcmcia_state *state) 66void sa1111_pcmcia_socket_state(struct soc_pcmcia_socket *skt, struct pcmcia_state *state)
33{ 67{
34 struct sa1111_pcmcia_socket *s = to_skt(skt); 68 struct sa1111_pcmcia_socket *s = to_skt(skt);
35 unsigned long status = sa1111_readl(s->dev->mapbase + SA1111_PCSR); 69 unsigned long status = sa1111_readl(s->dev->mapbase + PCSR);
36 70
37 switch (skt->nr) { 71 switch (skt->nr) {
38 case 0: 72 case 0:
@@ -88,10 +122,10 @@ int sa1111_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_s
88 pccr_set_mask |= PCCR_S0_FLT|PCCR_S1_FLT; 122 pccr_set_mask |= PCCR_S0_FLT|PCCR_S1_FLT;
89 123
90 local_irq_save(flags); 124 local_irq_save(flags);
91 val = sa1111_readl(s->dev->mapbase + SA1111_PCCR); 125 val = sa1111_readl(s->dev->mapbase + PCCR);
92 val &= ~pccr_skt_mask; 126 val &= ~pccr_skt_mask;
93 val |= pccr_set_mask & pccr_skt_mask; 127 val |= pccr_set_mask & pccr_skt_mask;
94 sa1111_writel(val, s->dev->mapbase + SA1111_PCCR); 128 sa1111_writel(val, s->dev->mapbase + PCCR);
95 local_irq_restore(flags); 129 local_irq_restore(flags);
96 130
97 return 0; 131 return 0;
@@ -141,20 +175,26 @@ int sa1111_pcmcia_add(struct sa1111_dev *dev, struct pcmcia_low_level *ops,
141static int pcmcia_probe(struct sa1111_dev *dev) 175static int pcmcia_probe(struct sa1111_dev *dev)
142{ 176{
143 void __iomem *base; 177 void __iomem *base;
178 int ret;
179
180 ret = sa1111_enable_device(dev);
181 if (ret)
182 return ret;
144 183
145 dev_set_drvdata(&dev->dev, NULL); 184 dev_set_drvdata(&dev->dev, NULL);
146 185
147 if (!request_mem_region(dev->res.start, 512, 186 if (!request_mem_region(dev->res.start, 512, SA1111_DRIVER_NAME(dev))) {
148 SA1111_DRIVER_NAME(dev))) 187 sa1111_disable_device(dev);
149 return -EBUSY; 188 return -EBUSY;
189 }
150 190
151 base = dev->mapbase; 191 base = dev->mapbase;
152 192
153 /* 193 /*
154 * Initialise the suspend state. 194 * Initialise the suspend state.
155 */ 195 */
156 sa1111_writel(PCSSR_S0_SLEEP | PCSSR_S1_SLEEP, base + SA1111_PCSSR); 196 sa1111_writel(PCSSR_S0_SLEEP | PCSSR_S1_SLEEP, base + PCSSR);
157 sa1111_writel(PCCR_S0_FLT | PCCR_S1_FLT, base + SA1111_PCCR); 197 sa1111_writel(PCCR_S0_FLT | PCCR_S1_FLT, base + PCCR);
158 198
159#ifdef CONFIG_SA1100_BADGE4 199#ifdef CONFIG_SA1100_BADGE4
160 pcmcia_badge4_init(&dev->dev); 200 pcmcia_badge4_init(&dev->dev);
@@ -184,6 +224,7 @@ static int __devexit pcmcia_remove(struct sa1111_dev *dev)
184 } 224 }
185 225
186 release_mem_region(dev->res.start, 512); 226 release_mem_region(dev->res.start, 512);
227 sa1111_disable_device(dev);
187 return 0; 228 return 0;
188} 229}
189 230
diff --git a/drivers/pcmcia/sa1111_neponset.c b/drivers/pcmcia/sa1111_neponset.c
index 50f297d850e7..1d78739c4c07 100644
--- a/drivers/pcmcia/sa1111_neponset.c
+++ b/drivers/pcmcia/sa1111_neponset.c
@@ -94,12 +94,7 @@ neponset_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_sta
94 94
95 ret = sa1111_pcmcia_configure_socket(skt, state); 95 ret = sa1111_pcmcia_configure_socket(skt, state);
96 if (ret == 0) { 96 if (ret == 0) {
97 unsigned long flags; 97 neponset_ncr_frob(ncr_mask, ncr_set);
98
99 local_irq_save(flags);
100 NCR_0 = (NCR_0 & ~ncr_mask) | ncr_set;
101
102 local_irq_restore(flags);
103 sa1111_set_io(s->dev, pa_dwr_mask, pa_dwr_set); 98 sa1111_set_io(s->dev, pa_dwr_mask, pa_dwr_set);
104 } 99 }
105 100
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c
index fa512ed42017..4940fa8c4e10 100644
--- a/drivers/rtc/rtc-sa1100.c
+++ b/drivers/rtc/rtc-sa1100.c
@@ -35,7 +35,7 @@
35#include <linux/bitops.h> 35#include <linux/bitops.h>
36 36
37#include <mach/hardware.h> 37#include <mach/hardware.h>
38#include <asm/irq.h> 38#include <mach/irqs.h>
39 39
40#if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP) 40#if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP)
41#include <mach/regs-rtc.h> 41#include <mach/regs-rtc.h>
diff --git a/drivers/scsi/arm/arxescsi.c b/drivers/scsi/arm/arxescsi.c
index a750aa72b8ef..2a28b4ad1975 100644
--- a/drivers/scsi/arm/arxescsi.c
+++ b/drivers/scsi/arm/arxescsi.c
@@ -305,7 +305,7 @@ arxescsi_probe(struct expansion_card *ec, const struct ecard_id *id)
305 info->base = base; 305 info->base = base;
306 306
307 info->info.scsi.io_base = base + 0x2000; 307 info->info.scsi.io_base = base + 0x2000;
308 info->info.scsi.irq = NO_IRQ; 308 info->info.scsi.irq = 0;
309 info->info.scsi.dma = NO_DMA; 309 info->info.scsi.dma = NO_DMA;
310 info->info.scsi.io_shift = 5; 310 info->info.scsi.io_shift = 5;
311 info->info.ifcfg.clockrate = 24; /* MHz */ 311 info->info.ifcfg.clockrate = 24; /* MHz */
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index e85c40b6e19b..6206a666a8ec 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -2176,7 +2176,7 @@ static void fas216_done(FAS216_Info *info, unsigned int result)
2176 fn = (void (*)(FAS216_Info *, struct scsi_cmnd *, unsigned int))SCpnt->host_scribble; 2176 fn = (void (*)(FAS216_Info *, struct scsi_cmnd *, unsigned int))SCpnt->host_scribble;
2177 fn(info, SCpnt, result); 2177 fn(info, SCpnt, result);
2178 2178
2179 if (info->scsi.irq != NO_IRQ) { 2179 if (info->scsi.irq) {
2180 spin_lock_irqsave(&info->host_lock, flags); 2180 spin_lock_irqsave(&info->host_lock, flags);
2181 if (info->scsi.phase == PHASE_IDLE) 2181 if (info->scsi.phase == PHASE_IDLE)
2182 fas216_kick(info); 2182 fas216_kick(info);
@@ -2276,7 +2276,7 @@ static int fas216_noqueue_command_lck(struct scsi_cmnd *SCpnt,
2276 * We should only be using this if we don't have an interrupt. 2276 * We should only be using this if we don't have an interrupt.
2277 * Provide some "incentive" to use the queueing code. 2277 * Provide some "incentive" to use the queueing code.
2278 */ 2278 */
2279 BUG_ON(info->scsi.irq != NO_IRQ); 2279 BUG_ON(info->scsi.irq);
2280 2280
2281 info->internal_done = 0; 2281 info->internal_done = 0;
2282 fas216_queue_command_lck(SCpnt, fas216_internal_done); 2282 fas216_queue_command_lck(SCpnt, fas216_internal_done);
diff --git a/drivers/scsi/arm/fas216.h b/drivers/scsi/arm/fas216.h
index 84b7127c0121..df2e1b3ddfe2 100644
--- a/drivers/scsi/arm/fas216.h
+++ b/drivers/scsi/arm/fas216.h
@@ -12,10 +12,6 @@
12#ifndef FAS216_H 12#ifndef FAS216_H
13#define FAS216_H 13#define FAS216_H
14 14
15#ifndef NO_IRQ
16#define NO_IRQ 255
17#endif
18
19#include <scsi/scsi_eh.h> 15#include <scsi/scsi_eh.h>
20 16
21#include "queue.h" 17#include "queue.h"
diff --git a/drivers/tty/serial/sa1100.c b/drivers/tty/serial/sa1100.c
index ef7a21a6a01b..2ca5959ec3fa 100644
--- a/drivers/tty/serial/sa1100.c
+++ b/drivers/tty/serial/sa1100.c
@@ -38,6 +38,7 @@
38 38
39#include <asm/irq.h> 39#include <asm/irq.h>
40#include <mach/hardware.h> 40#include <mach/hardware.h>
41#include <mach/irqs.h>
41#include <asm/mach/serial_sa1100.h> 42#include <asm/mach/serial_sa1100.h>
42 43
43/* We've been assigned a range on the "Low-density serial ports" major */ 44/* We've been assigned a range on the "Low-density serial ports" major */
diff --git a/drivers/usb/host/ohci-sa1111.c b/drivers/usb/host/ohci-sa1111.c
index 4bde4f9821ba..e1004fb37bd9 100644
--- a/drivers/usb/host/ohci-sa1111.c
+++ b/drivers/usb/host/ohci-sa1111.c
@@ -16,29 +16,115 @@
16#include <mach/hardware.h> 16#include <mach/hardware.h>
17#include <asm/mach-types.h> 17#include <asm/mach-types.h>
18#include <mach/assabet.h> 18#include <mach/assabet.h>
19#include <mach/badge4.h>
20#include <asm/hardware/sa1111.h> 19#include <asm/hardware/sa1111.h>
21 20
22#ifndef CONFIG_SA1111 21#ifndef CONFIG_SA1111
23#error "This file is SA-1111 bus glue. CONFIG_SA1111 must be defined." 22#error "This file is SA-1111 bus glue. CONFIG_SA1111 must be defined."
24#endif 23#endif
25 24
26extern int usb_disabled(void); 25#define USB_STATUS 0x0118
26#define USB_RESET 0x011c
27#define USB_IRQTEST 0x0120
28
29#define USB_RESET_FORCEIFRESET (1 << 0)
30#define USB_RESET_FORCEHCRESET (1 << 1)
31#define USB_RESET_CLKGENRESET (1 << 2)
32#define USB_RESET_SIMSCALEDOWN (1 << 3)
33#define USB_RESET_USBINTTEST (1 << 4)
34#define USB_RESET_SLEEPSTBYEN (1 << 5)
35#define USB_RESET_PWRSENSELOW (1 << 6)
36#define USB_RESET_PWRCTRLLOW (1 << 7)
37
38#define USB_STATUS_IRQHCIRMTWKUP (1 << 7)
39#define USB_STATUS_IRQHCIBUFFACC (1 << 8)
40#define USB_STATUS_NIRQHCIM (1 << 9)
41#define USB_STATUS_NHCIMFCLR (1 << 10)
42#define USB_STATUS_USBPWRSENSE (1 << 11)
27 43
28/*-------------------------------------------------------------------------*/ 44#if 0
45static void dump_hci_status(struct usb_hcd *hcd, const char *label)
46{
47 unsigned long status = sa1111_readl(hcd->regs + USB_STATUS);
48
49 dbg("%s USB_STATUS = { %s%s%s%s%s}", label,
50 ((status & USB_STATUS_IRQHCIRMTWKUP) ? "IRQHCIRMTWKUP " : ""),
51 ((status & USB_STATUS_IRQHCIBUFFACC) ? "IRQHCIBUFFACC " : ""),
52 ((status & USB_STATUS_NIRQHCIM) ? "" : "IRQHCIM "),
53 ((status & USB_STATUS_NHCIMFCLR) ? "" : "HCIMFCLR "),
54 ((status & USB_STATUS_USBPWRSENSE) ? "USBPWRSENSE " : ""));
55}
56#endif
29 57
30static void sa1111_start_hc(struct sa1111_dev *dev) 58static int ohci_sa1111_reset(struct usb_hcd *hcd)
31{ 59{
32 unsigned int usb_rst = 0; 60 struct ohci_hcd *ohci = hcd_to_ohci(hcd);
61
62 ohci_hcd_init(ohci);
63 return ohci_init(ohci);
64}
33 65
34 printk(KERN_DEBUG "%s: starting SA-1111 OHCI USB Controller\n", 66static int __devinit ohci_sa1111_start(struct usb_hcd *hcd)
35 __FILE__); 67{
68 struct ohci_hcd *ohci = hcd_to_ohci(hcd);
69 int ret;
36 70
37#ifdef CONFIG_SA1100_BADGE4 71 ret = ohci_run(ohci);
38 if (machine_is_badge4()) { 72 if (ret < 0) {
39 badge4_set_5V(BADGE4_5V_USB, 1); 73 ohci_err(ohci, "can't start\n");
74 ohci_stop(hcd);
40 } 75 }
76 return ret;
77}
78
79static const struct hc_driver ohci_sa1111_hc_driver = {
80 .description = hcd_name,
81 .product_desc = "SA-1111 OHCI",
82 .hcd_priv_size = sizeof(struct ohci_hcd),
83
84 /*
85 * generic hardware linkage
86 */
87 .irq = ohci_irq,
88 .flags = HCD_USB11 | HCD_MEMORY,
89
90 /*
91 * basic lifecycle operations
92 */
93 .reset = ohci_sa1111_reset,
94 .start = ohci_sa1111_start,
95 .stop = ohci_stop,
96 .shutdown = ohci_shutdown,
97
98 /*
99 * managing i/o requests and associated device resources
100 */
101 .urb_enqueue = ohci_urb_enqueue,
102 .urb_dequeue = ohci_urb_dequeue,
103 .endpoint_disable = ohci_endpoint_disable,
104
105 /*
106 * scheduling support
107 */
108 .get_frame_number = ohci_get_frame,
109
110 /*
111 * root hub support
112 */
113 .hub_status_data = ohci_hub_status_data,
114 .hub_control = ohci_hub_control,
115#ifdef CONFIG_PM
116 .bus_suspend = ohci_bus_suspend,
117 .bus_resume = ohci_bus_resume,
41#endif 118#endif
119 .start_port_reset = ohci_start_port_reset,
120};
121
122static int sa1111_start_hc(struct sa1111_dev *dev)
123{
124 unsigned int usb_rst = 0;
125 int ret;
126
127 dev_dbg(&dev->dev, "starting SA-1111 OHCI USB Controller\n");
42 128
43 if (machine_is_xp860() || 129 if (machine_is_xp860() ||
44 machine_has_neponset() || 130 machine_has_neponset() ||
@@ -51,220 +137,121 @@ static void sa1111_start_hc(struct sa1111_dev *dev)
51 * host controller in reset. 137 * host controller in reset.
52 */ 138 */
53 sa1111_writel(usb_rst | USB_RESET_FORCEIFRESET | USB_RESET_FORCEHCRESET, 139 sa1111_writel(usb_rst | USB_RESET_FORCEIFRESET | USB_RESET_FORCEHCRESET,
54 dev->mapbase + SA1111_USB_RESET); 140 dev->mapbase + USB_RESET);
55 141
56 /* 142 /*
57 * Now, carefully enable the USB clock, and take 143 * Now, carefully enable the USB clock, and take
58 * the USB host controller out of reset. 144 * the USB host controller out of reset.
59 */ 145 */
60 sa1111_enable_device(dev); 146 ret = sa1111_enable_device(dev);
61 udelay(11); 147 if (ret == 0) {
62 sa1111_writel(usb_rst, dev->mapbase + SA1111_USB_RESET); 148 udelay(11);
149 sa1111_writel(usb_rst, dev->mapbase + USB_RESET);
150 }
151
152 return ret;
63} 153}
64 154
65static void sa1111_stop_hc(struct sa1111_dev *dev) 155static void sa1111_stop_hc(struct sa1111_dev *dev)
66{ 156{
67 unsigned int usb_rst; 157 unsigned int usb_rst;
68 printk(KERN_DEBUG "%s: stopping SA-1111 OHCI USB Controller\n", 158
69 __FILE__); 159 dev_dbg(&dev->dev, "stopping SA-1111 OHCI USB Controller\n");
70 160
71 /* 161 /*
72 * Put the USB host controller into reset. 162 * Put the USB host controller into reset.
73 */ 163 */
74 usb_rst = sa1111_readl(dev->mapbase + SA1111_USB_RESET); 164 usb_rst = sa1111_readl(dev->mapbase + USB_RESET);
75 sa1111_writel(usb_rst | USB_RESET_FORCEIFRESET | USB_RESET_FORCEHCRESET, 165 sa1111_writel(usb_rst | USB_RESET_FORCEIFRESET | USB_RESET_FORCEHCRESET,
76 dev->mapbase + SA1111_USB_RESET); 166 dev->mapbase + USB_RESET);
77 167
78 /* 168 /*
79 * Stop the USB clock. 169 * Stop the USB clock.
80 */ 170 */
81 sa1111_disable_device(dev); 171 sa1111_disable_device(dev);
82
83#ifdef CONFIG_SA1100_BADGE4
84 if (machine_is_badge4()) {
85 /* Disable power to the USB bus */
86 badge4_set_5V(BADGE4_5V_USB, 0);
87 }
88#endif
89}
90
91
92/*-------------------------------------------------------------------------*/
93
94#if 0
95static void dump_hci_status(struct usb_hcd *hcd, const char *label)
96{
97 unsigned long status = sa1111_readl(hcd->regs + SA1111_USB_STATUS);
98
99 dbg ("%s USB_STATUS = { %s%s%s%s%s}", label,
100 ((status & USB_STATUS_IRQHCIRMTWKUP) ? "IRQHCIRMTWKUP " : ""),
101 ((status & USB_STATUS_IRQHCIBUFFACC) ? "IRQHCIBUFFACC " : ""),
102 ((status & USB_STATUS_NIRQHCIM) ? "" : "IRQHCIM "),
103 ((status & USB_STATUS_NHCIMFCLR) ? "" : "HCIMFCLR "),
104 ((status & USB_STATUS_USBPWRSENSE) ? "USBPWRSENSE " : ""));
105} 172}
106#endif
107
108/*-------------------------------------------------------------------------*/
109
110/* configure so an HC device and id are always provided */
111/* always called with process context; sleeping is OK */
112
113 173
114/** 174/**
115 * usb_hcd_sa1111_probe - initialize SA-1111-based HCDs 175 * ohci_hcd_sa1111_probe - initialize SA-1111-based HCDs
116 * Context: !in_interrupt()
117 * 176 *
118 * Allocates basic resources for this USB host controller, and 177 * Allocates basic resources for this USB host controller, and
119 * then invokes the start() method for the HCD associated with it 178 * then invokes the start() method for the HCD associated with it.
120 * through the hotplug entry's driver_data.
121 *
122 * Store this function in the HCD's struct pci_driver as probe().
123 */ 179 */
124int usb_hcd_sa1111_probe (const struct hc_driver *driver, 180static int ohci_hcd_sa1111_probe(struct sa1111_dev *dev)
125 struct sa1111_dev *dev)
126{ 181{
127 struct usb_hcd *hcd; 182 struct usb_hcd *hcd;
128 int retval; 183 int ret;
129 184
130 hcd = usb_create_hcd (driver, &dev->dev, "sa1111"); 185 if (usb_disabled())
186 return -ENODEV;
187
188 hcd = usb_create_hcd(&ohci_sa1111_hc_driver, &dev->dev, "sa1111");
131 if (!hcd) 189 if (!hcd)
132 return -ENOMEM; 190 return -ENOMEM;
191
133 hcd->rsrc_start = dev->res.start; 192 hcd->rsrc_start = dev->res.start;
134 hcd->rsrc_len = resource_size(&dev->res); 193 hcd->rsrc_len = resource_size(&dev->res);
135 194
136 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { 195 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
137 dbg("request_mem_region failed"); 196 dbg("request_mem_region failed");
138 retval = -EBUSY; 197 ret = -EBUSY;
139 goto err1; 198 goto err1;
140 } 199 }
200
141 hcd->regs = dev->mapbase; 201 hcd->regs = dev->mapbase;
142 202
143 sa1111_start_hc(dev); 203 ret = sa1111_start_hc(dev);
144 ohci_hcd_init(hcd_to_ohci(hcd)); 204 if (ret)
205 goto err2;
145 206
146 retval = usb_add_hcd(hcd, dev->irq[1], 0); 207 ret = usb_add_hcd(hcd, dev->irq[1], 0);
147 if (retval == 0) 208 if (ret == 0)
148 return retval; 209 return ret;
149 210
150 sa1111_stop_hc(dev); 211 sa1111_stop_hc(dev);
212 err2:
151 release_mem_region(hcd->rsrc_start, hcd->rsrc_len); 213 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
152 err1: 214 err1:
153 usb_put_hcd(hcd); 215 usb_put_hcd(hcd);
154 return retval; 216 return ret;
155} 217}
156 218
157
158/* may be called without controller electrically present */
159/* may be called with controller, bus, and devices active */
160
161/** 219/**
162 * usb_hcd_sa1111_remove - shutdown processing for SA-1111-based HCDs 220 * ohci_hcd_sa1111_remove - shutdown processing for SA-1111-based HCDs
163 * @dev: USB Host Controller being removed 221 * @dev: USB Host Controller being removed
164 * Context: !in_interrupt()
165 *
166 * Reverses the effect of usb_hcd_sa1111_probe(), first invoking
167 * the HCD's stop() method. It is always called from a thread
168 * context, normally "rmmod", "apmd", or something similar.
169 * 222 *
223 * Reverses the effect of ohci_hcd_sa1111_probe(), first invoking
224 * the HCD's stop() method.
170 */ 225 */
171void usb_hcd_sa1111_remove (struct usb_hcd *hcd, struct sa1111_dev *dev) 226static int ohci_hcd_sa1111_remove(struct sa1111_dev *dev)
172{ 227{
228 struct usb_hcd *hcd = sa1111_get_drvdata(dev);
229
173 usb_remove_hcd(hcd); 230 usb_remove_hcd(hcd);
174 sa1111_stop_hc(dev); 231 sa1111_stop_hc(dev);
175 release_mem_region(hcd->rsrc_start, hcd->rsrc_len); 232 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
176 usb_put_hcd(hcd); 233 usb_put_hcd(hcd);
177}
178
179/*-------------------------------------------------------------------------*/
180 234
181static int __devinit
182ohci_sa1111_start (struct usb_hcd *hcd)
183{
184 struct ohci_hcd *ohci = hcd_to_ohci (hcd);
185 int ret;
186
187 if ((ret = ohci_init(ohci)) < 0)
188 return ret;
189
190 if ((ret = ohci_run (ohci)) < 0) {
191 err ("can't start %s", hcd->self.bus_name);
192 ohci_stop (hcd);
193 return ret;
194 }
195 return 0; 235 return 0;
196} 236}
197 237
198/*-------------------------------------------------------------------------*/ 238static void ohci_hcd_sa1111_shutdown(struct sa1111_dev *dev)
199
200static const struct hc_driver ohci_sa1111_hc_driver = {
201 .description = hcd_name,
202 .product_desc = "SA-1111 OHCI",
203 .hcd_priv_size = sizeof(struct ohci_hcd),
204
205 /*
206 * generic hardware linkage
207 */
208 .irq = ohci_irq,
209 .flags = HCD_USB11 | HCD_MEMORY,
210
211 /*
212 * basic lifecycle operations
213 */
214 .start = ohci_sa1111_start,
215 .stop = ohci_stop,
216
217 /*
218 * managing i/o requests and associated device resources
219 */
220 .urb_enqueue = ohci_urb_enqueue,
221 .urb_dequeue = ohci_urb_dequeue,
222 .endpoint_disable = ohci_endpoint_disable,
223
224 /*
225 * scheduling support
226 */
227 .get_frame_number = ohci_get_frame,
228
229 /*
230 * root hub support
231 */
232 .hub_status_data = ohci_hub_status_data,
233 .hub_control = ohci_hub_control,
234#ifdef CONFIG_PM
235 .bus_suspend = ohci_bus_suspend,
236 .bus_resume = ohci_bus_resume,
237#endif
238 .start_port_reset = ohci_start_port_reset,
239};
240
241/*-------------------------------------------------------------------------*/
242
243static int ohci_hcd_sa1111_drv_probe(struct sa1111_dev *dev)
244{
245 int ret;
246
247 if (usb_disabled())
248 return -ENODEV;
249
250 ret = usb_hcd_sa1111_probe(&ohci_sa1111_hc_driver, dev);
251 return ret;
252}
253
254static int ohci_hcd_sa1111_drv_remove(struct sa1111_dev *dev)
255{ 239{
256 struct usb_hcd *hcd = sa1111_get_drvdata(dev); 240 struct usb_hcd *hcd = sa1111_get_drvdata(dev);
257 241
258 usb_hcd_sa1111_remove(hcd, dev); 242 if (test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
259 return 0; 243 hcd->driver->shutdown(hcd);
244 sa1111_stop_hc(dev);
245 }
260} 246}
261 247
262static struct sa1111_driver ohci_hcd_sa1111_driver = { 248static struct sa1111_driver ohci_hcd_sa1111_driver = {
263 .drv = { 249 .drv = {
264 .name = "sa1111-ohci", 250 .name = "sa1111-ohci",
251 .owner = THIS_MODULE,
265 }, 252 },
266 .devid = SA1111_DEVID_USB, 253 .devid = SA1111_DEVID_USB,
267 .probe = ohci_hcd_sa1111_drv_probe, 254 .probe = ohci_hcd_sa1111_probe,
268 .remove = ohci_hcd_sa1111_drv_remove, 255 .remove = ohci_hcd_sa1111_remove,
256 .shutdown = ohci_hcd_sa1111_shutdown,
269}; 257};
270
diff --git a/drivers/video/sa1100fb.c b/drivers/video/sa1100fb.c
index 98d55d0e2da5..b6325848ad61 100644
--- a/drivers/video/sa1100fb.c
+++ b/drivers/video/sa1100fb.c
@@ -173,282 +173,48 @@
173#include <linux/init.h> 173#include <linux/init.h>
174#include <linux/ioport.h> 174#include <linux/ioport.h>
175#include <linux/cpufreq.h> 175#include <linux/cpufreq.h>
176#include <linux/gpio.h>
176#include <linux/platform_device.h> 177#include <linux/platform_device.h>
177#include <linux/dma-mapping.h> 178#include <linux/dma-mapping.h>
178#include <linux/mutex.h> 179#include <linux/mutex.h>
179#include <linux/io.h> 180#include <linux/io.h>
180 181
182#include <video/sa1100fb.h>
183
181#include <mach/hardware.h> 184#include <mach/hardware.h>
182#include <asm/mach-types.h> 185#include <asm/mach-types.h>
183#include <mach/assabet.h>
184#include <mach/shannon.h> 186#include <mach/shannon.h>
185 187
186/* 188/*
187 * debugging?
188 */
189#define DEBUG 0
190/*
191 * Complain if VAR is out of range. 189 * Complain if VAR is out of range.
192 */ 190 */
193#define DEBUG_VAR 1 191#define DEBUG_VAR 1
194 192
195#undef ASSABET_PAL_VIDEO
196
197#include "sa1100fb.h" 193#include "sa1100fb.h"
198 194
199extern void (*sa1100fb_backlight_power)(int on); 195static const struct sa1100fb_rgb rgb_4 = {
200extern void (*sa1100fb_lcd_power)(int on);
201
202static struct sa1100fb_rgb rgb_4 = {
203 .red = { .offset = 0, .length = 4, }, 196 .red = { .offset = 0, .length = 4, },
204 .green = { .offset = 0, .length = 4, }, 197 .green = { .offset = 0, .length = 4, },
205 .blue = { .offset = 0, .length = 4, }, 198 .blue = { .offset = 0, .length = 4, },
206 .transp = { .offset = 0, .length = 0, }, 199 .transp = { .offset = 0, .length = 0, },
207}; 200};
208 201
209static struct sa1100fb_rgb rgb_8 = { 202static const struct sa1100fb_rgb rgb_8 = {
210 .red = { .offset = 0, .length = 8, }, 203 .red = { .offset = 0, .length = 8, },
211 .green = { .offset = 0, .length = 8, }, 204 .green = { .offset = 0, .length = 8, },
212 .blue = { .offset = 0, .length = 8, }, 205 .blue = { .offset = 0, .length = 8, },
213 .transp = { .offset = 0, .length = 0, }, 206 .transp = { .offset = 0, .length = 0, },
214}; 207};
215 208
216static struct sa1100fb_rgb def_rgb_16 = { 209static const struct sa1100fb_rgb def_rgb_16 = {
217 .red = { .offset = 11, .length = 5, }, 210 .red = { .offset = 11, .length = 5, },
218 .green = { .offset = 5, .length = 6, }, 211 .green = { .offset = 5, .length = 6, },
219 .blue = { .offset = 0, .length = 5, }, 212 .blue = { .offset = 0, .length = 5, },
220 .transp = { .offset = 0, .length = 0, }, 213 .transp = { .offset = 0, .length = 0, },
221}; 214};
222 215
223#ifdef CONFIG_SA1100_ASSABET
224#ifndef ASSABET_PAL_VIDEO
225/*
226 * The assabet uses a sharp LQ039Q2DS54 LCD module. It is actually
227 * takes an RGB666 signal, but we provide it with an RGB565 signal
228 * instead (def_rgb_16).
229 */
230static struct sa1100fb_mach_info lq039q2ds54_info __initdata = {
231 .pixclock = 171521, .bpp = 16,
232 .xres = 320, .yres = 240,
233
234 .hsync_len = 5, .vsync_len = 1,
235 .left_margin = 61, .upper_margin = 3,
236 .right_margin = 9, .lower_margin = 0,
237
238 .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
239
240 .lccr0 = LCCR0_Color | LCCR0_Sngl | LCCR0_Act,
241 .lccr3 = LCCR3_OutEnH | LCCR3_PixRsEdg | LCCR3_ACBsDiv(2),
242};
243#else
244static struct sa1100fb_mach_info pal_info __initdata = {
245 .pixclock = 67797, .bpp = 16,
246 .xres = 640, .yres = 512,
247
248 .hsync_len = 64, .vsync_len = 6,
249 .left_margin = 125, .upper_margin = 70,
250 .right_margin = 115, .lower_margin = 36,
251
252 .lccr0 = LCCR0_Color | LCCR0_Sngl | LCCR0_Act,
253 .lccr3 = LCCR3_OutEnH | LCCR3_PixRsEdg | LCCR3_ACBsDiv(512),
254};
255#endif
256#endif
257
258#ifdef CONFIG_SA1100_H3600
259static struct sa1100fb_mach_info h3600_info __initdata = {
260 .pixclock = 174757, .bpp = 16,
261 .xres = 320, .yres = 240,
262
263 .hsync_len = 3, .vsync_len = 3,
264 .left_margin = 12, .upper_margin = 10,
265 .right_margin = 17, .lower_margin = 1,
266
267 .cmap_static = 1,
268
269 .lccr0 = LCCR0_Color | LCCR0_Sngl | LCCR0_Act,
270 .lccr3 = LCCR3_OutEnH | LCCR3_PixRsEdg | LCCR3_ACBsDiv(2),
271};
272
273static struct sa1100fb_rgb h3600_rgb_16 = {
274 .red = { .offset = 12, .length = 4, },
275 .green = { .offset = 7, .length = 4, },
276 .blue = { .offset = 1, .length = 4, },
277 .transp = { .offset = 0, .length = 0, },
278};
279#endif
280
281#ifdef CONFIG_SA1100_H3100
282static struct sa1100fb_mach_info h3100_info __initdata = {
283 .pixclock = 406977, .bpp = 4,
284 .xres = 320, .yres = 240,
285
286 .hsync_len = 26, .vsync_len = 41,
287 .left_margin = 4, .upper_margin = 0,
288 .right_margin = 4, .lower_margin = 0,
289
290 .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
291 .cmap_greyscale = 1,
292 .cmap_inverse = 1,
293
294 .lccr0 = LCCR0_Mono | LCCR0_4PixMono | LCCR0_Sngl | LCCR0_Pas,
295 .lccr3 = LCCR3_OutEnH | LCCR3_PixRsEdg | LCCR3_ACBsDiv(2),
296};
297#endif
298
299#ifdef CONFIG_SA1100_COLLIE
300static struct sa1100fb_mach_info collie_info __initdata = {
301 .pixclock = 171521, .bpp = 16,
302 .xres = 320, .yres = 240,
303
304 .hsync_len = 5, .vsync_len = 1,
305 .left_margin = 11, .upper_margin = 2,
306 .right_margin = 30, .lower_margin = 0,
307
308 .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
309
310 .lccr0 = LCCR0_Color | LCCR0_Sngl | LCCR0_Act,
311 .lccr3 = LCCR3_OutEnH | LCCR3_PixRsEdg | LCCR3_ACBsDiv(2),
312};
313#endif
314
315#ifdef LART_GREY_LCD
316static struct sa1100fb_mach_info lart_grey_info __initdata = {
317 .pixclock = 150000, .bpp = 4,
318 .xres = 320, .yres = 240,
319
320 .hsync_len = 1, .vsync_len = 1,
321 .left_margin = 4, .upper_margin = 0,
322 .right_margin = 2, .lower_margin = 0,
323
324 .cmap_greyscale = 1,
325 .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
326
327 .lccr0 = LCCR0_Mono | LCCR0_Sngl | LCCR0_Pas | LCCR0_4PixMono,
328 .lccr3 = LCCR3_OutEnH | LCCR3_PixRsEdg | LCCR3_ACBsDiv(512),
329};
330#endif
331#ifdef LART_COLOR_LCD
332static struct sa1100fb_mach_info lart_color_info __initdata = {
333 .pixclock = 150000, .bpp = 16,
334 .xres = 320, .yres = 240,
335
336 .hsync_len = 2, .vsync_len = 3,
337 .left_margin = 69, .upper_margin = 14,
338 .right_margin = 8, .lower_margin = 4,
339
340 .lccr0 = LCCR0_Color | LCCR0_Sngl | LCCR0_Act,
341 .lccr3 = LCCR3_OutEnH | LCCR3_PixFlEdg | LCCR3_ACBsDiv(512),
342};
343#endif
344#ifdef LART_VIDEO_OUT
345static struct sa1100fb_mach_info lart_video_info __initdata = {
346 .pixclock = 39721, .bpp = 16,
347 .xres = 640, .yres = 480,
348
349 .hsync_len = 95, .vsync_len = 2,
350 .left_margin = 40, .upper_margin = 32,
351 .right_margin = 24, .lower_margin = 11,
352
353 .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
354
355 .lccr0 = LCCR0_Color | LCCR0_Sngl | LCCR0_Act,
356 .lccr3 = LCCR3_OutEnL | LCCR3_PixFlEdg | LCCR3_ACBsDiv(512),
357};
358#endif
359
360#ifdef LART_KIT01_LCD
361static struct sa1100fb_mach_info lart_kit01_info __initdata = {
362 .pixclock = 63291, .bpp = 16,
363 .xres = 640, .yres = 480,
364
365 .hsync_len = 64, .vsync_len = 3,
366 .left_margin = 122, .upper_margin = 45,
367 .right_margin = 10, .lower_margin = 10,
368
369 .lccr0 = LCCR0_Color | LCCR0_Sngl | LCCR0_Act,
370 .lccr3 = LCCR3_OutEnH | LCCR3_PixFlEdg
371};
372#endif
373
374#ifdef CONFIG_SA1100_SHANNON
375static struct sa1100fb_mach_info shannon_info __initdata = {
376 .pixclock = 152500, .bpp = 8,
377 .xres = 640, .yres = 480,
378
379 .hsync_len = 4, .vsync_len = 3,
380 .left_margin = 2, .upper_margin = 0,
381 .right_margin = 1, .lower_margin = 0,
382
383 .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
384
385 .lccr0 = LCCR0_Color | LCCR0_Dual | LCCR0_Pas,
386 .lccr3 = LCCR3_ACBsDiv(512),
387};
388#endif
389
390 216
391 217
392static struct sa1100fb_mach_info * __init
393sa1100fb_get_machine_info(struct sa1100fb_info *fbi)
394{
395 struct sa1100fb_mach_info *inf = NULL;
396
397 /*
398 * R G B T
399 * default {11,5}, { 5,6}, { 0,5}, { 0,0}
400 * h3600 {12,4}, { 7,4}, { 1,4}, { 0,0}
401 * freebird { 8,4}, { 4,4}, { 0,4}, {12,4}
402 */
403#ifdef CONFIG_SA1100_ASSABET
404 if (machine_is_assabet()) {
405#ifndef ASSABET_PAL_VIDEO
406 inf = &lq039q2ds54_info;
407#else
408 inf = &pal_info;
409#endif
410 }
411#endif
412#ifdef CONFIG_SA1100_H3100
413 if (machine_is_h3100()) {
414 inf = &h3100_info;
415 }
416#endif
417#ifdef CONFIG_SA1100_H3600
418 if (machine_is_h3600()) {
419 inf = &h3600_info;
420 fbi->rgb[RGB_16] = &h3600_rgb_16;
421 }
422#endif
423#ifdef CONFIG_SA1100_COLLIE
424 if (machine_is_collie()) {
425 inf = &collie_info;
426 }
427#endif
428#ifdef CONFIG_SA1100_LART
429 if (machine_is_lart()) {
430#ifdef LART_GREY_LCD
431 inf = &lart_grey_info;
432#endif
433#ifdef LART_COLOR_LCD
434 inf = &lart_color_info;
435#endif
436#ifdef LART_VIDEO_OUT
437 inf = &lart_video_info;
438#endif
439#ifdef LART_KIT01_LCD
440 inf = &lart_kit01_info;
441#endif
442 }
443#endif
444#ifdef CONFIG_SA1100_SHANNON
445 if (machine_is_shannon()) {
446 inf = &shannon_info;
447 }
448#endif
449 return inf;
450}
451
452static int sa1100fb_activate_var(struct fb_var_screeninfo *var, struct sa1100fb_info *); 218static int sa1100fb_activate_var(struct fb_var_screeninfo *var, struct sa1100fb_info *);
453static void set_ctrlr_state(struct sa1100fb_info *fbi, u_int state); 219static void set_ctrlr_state(struct sa1100fb_info *fbi, u_int state);
454 220
@@ -533,7 +299,7 @@ sa1100fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
533 * is what you poke into the framebuffer to produce the 299 * is what you poke into the framebuffer to produce the
534 * colour you requested. 300 * colour you requested.
535 */ 301 */
536 if (fbi->cmap_inverse) { 302 if (fbi->inf->cmap_inverse) {
537 red = 0xffff - red; 303 red = 0xffff - red;
538 green = 0xffff - green; 304 green = 0xffff - green;
539 blue = 0xffff - blue; 305 blue = 0xffff - blue;
@@ -607,14 +373,14 @@ sa1100fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
607 var->xres = MIN_XRES; 373 var->xres = MIN_XRES;
608 if (var->yres < MIN_YRES) 374 if (var->yres < MIN_YRES)
609 var->yres = MIN_YRES; 375 var->yres = MIN_YRES;
610 if (var->xres > fbi->max_xres) 376 if (var->xres > fbi->inf->xres)
611 var->xres = fbi->max_xres; 377 var->xres = fbi->inf->xres;
612 if (var->yres > fbi->max_yres) 378 if (var->yres > fbi->inf->yres)
613 var->yres = fbi->max_yres; 379 var->yres = fbi->inf->yres;
614 var->xres_virtual = max(var->xres_virtual, var->xres); 380 var->xres_virtual = max(var->xres_virtual, var->xres);
615 var->yres_virtual = max(var->yres_virtual, var->yres); 381 var->yres_virtual = max(var->yres_virtual, var->yres);
616 382
617 DPRINTK("var->bits_per_pixel=%d\n", var->bits_per_pixel); 383 dev_dbg(fbi->dev, "var->bits_per_pixel=%d\n", var->bits_per_pixel);
618 switch (var->bits_per_pixel) { 384 switch (var->bits_per_pixel) {
619 case 4: 385 case 4:
620 rgbidx = RGB_4; 386 rgbidx = RGB_4;
@@ -638,16 +404,16 @@ sa1100fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
638 var->blue = fbi->rgb[rgbidx]->blue; 404 var->blue = fbi->rgb[rgbidx]->blue;
639 var->transp = fbi->rgb[rgbidx]->transp; 405 var->transp = fbi->rgb[rgbidx]->transp;
640 406
641 DPRINTK("RGBT length = %d:%d:%d:%d\n", 407 dev_dbg(fbi->dev, "RGBT length = %d:%d:%d:%d\n",
642 var->red.length, var->green.length, var->blue.length, 408 var->red.length, var->green.length, var->blue.length,
643 var->transp.length); 409 var->transp.length);
644 410
645 DPRINTK("RGBT offset = %d:%d:%d:%d\n", 411 dev_dbg(fbi->dev, "RGBT offset = %d:%d:%d:%d\n",
646 var->red.offset, var->green.offset, var->blue.offset, 412 var->red.offset, var->green.offset, var->blue.offset,
647 var->transp.offset); 413 var->transp.offset);
648 414
649#ifdef CONFIG_CPU_FREQ 415#ifdef CONFIG_CPU_FREQ
650 printk(KERN_DEBUG "dma period = %d ps, clock = %d kHz\n", 416 dev_dbg(fbi->dev, "dma period = %d ps, clock = %d kHz\n",
651 sa1100fb_display_dma_period(var), 417 sa1100fb_display_dma_period(var),
652 cpufreq_get(smp_processor_id())); 418 cpufreq_get(smp_processor_id()));
653#endif 419#endif
@@ -655,22 +421,10 @@ sa1100fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
655 return 0; 421 return 0;
656} 422}
657 423
658static inline void sa1100fb_set_truecolor(u_int is_true_color) 424static void sa1100fb_set_visual(struct sa1100fb_info *fbi, u32 visual)
659{ 425{
660 if (machine_is_assabet()) { 426 if (fbi->inf->set_visual)
661#if 1 // phase 4 or newer Assabet's 427 fbi->inf->set_visual(visual);
662 if (is_true_color)
663 ASSABET_BCR_set(ASSABET_BCR_LCD_12RGB);
664 else
665 ASSABET_BCR_clear(ASSABET_BCR_LCD_12RGB);
666#else
667 // older Assabet's
668 if (is_true_color)
669 ASSABET_BCR_clear(ASSABET_BCR_LCD_12RGB);
670 else
671 ASSABET_BCR_set(ASSABET_BCR_LCD_12RGB);
672#endif
673 }
674} 428}
675 429
676/* 430/*
@@ -683,11 +437,11 @@ static int sa1100fb_set_par(struct fb_info *info)
683 struct fb_var_screeninfo *var = &info->var; 437 struct fb_var_screeninfo *var = &info->var;
684 unsigned long palette_mem_size; 438 unsigned long palette_mem_size;
685 439
686 DPRINTK("set_par\n"); 440 dev_dbg(fbi->dev, "set_par\n");
687 441
688 if (var->bits_per_pixel == 16) 442 if (var->bits_per_pixel == 16)
689 fbi->fb.fix.visual = FB_VISUAL_TRUECOLOR; 443 fbi->fb.fix.visual = FB_VISUAL_TRUECOLOR;
690 else if (!fbi->cmap_static) 444 else if (!fbi->inf->cmap_static)
691 fbi->fb.fix.visual = FB_VISUAL_PSEUDOCOLOR; 445 fbi->fb.fix.visual = FB_VISUAL_PSEUDOCOLOR;
692 else { 446 else {
693 /* 447 /*
@@ -704,7 +458,7 @@ static int sa1100fb_set_par(struct fb_info *info)
704 458
705 palette_mem_size = fbi->palette_size * sizeof(u16); 459 palette_mem_size = fbi->palette_size * sizeof(u16);
706 460
707 DPRINTK("palette_mem_size = 0x%08lx\n", (u_long) palette_mem_size); 461 dev_dbg(fbi->dev, "palette_mem_size = 0x%08lx\n", palette_mem_size);
708 462
709 fbi->palette_cpu = (u16 *)(fbi->map_cpu + PAGE_SIZE - palette_mem_size); 463 fbi->palette_cpu = (u16 *)(fbi->map_cpu + PAGE_SIZE - palette_mem_size);
710 fbi->palette_dma = fbi->map_dma + PAGE_SIZE - palette_mem_size; 464 fbi->palette_dma = fbi->map_dma + PAGE_SIZE - palette_mem_size;
@@ -712,7 +466,7 @@ static int sa1100fb_set_par(struct fb_info *info)
712 /* 466 /*
713 * Set (any) board control register to handle new color depth 467 * Set (any) board control register to handle new color depth
714 */ 468 */
715 sa1100fb_set_truecolor(fbi->fb.fix.visual == FB_VISUAL_TRUECOLOR); 469 sa1100fb_set_visual(fbi, fbi->fb.fix.visual);
716 sa1100fb_activate_var(var, fbi); 470 sa1100fb_activate_var(var, fbi);
717 471
718 return 0; 472 return 0;
@@ -728,7 +482,7 @@ sa1100fb_set_cmap(struct fb_cmap *cmap, int kspc, int con,
728 /* 482 /*
729 * Make sure the user isn't doing something stupid. 483 * Make sure the user isn't doing something stupid.
730 */ 484 */
731 if (!kspc && (fbi->fb.var.bits_per_pixel == 16 || fbi->cmap_static)) 485 if (!kspc && (fbi->fb.var.bits_per_pixel == 16 || fbi->inf->cmap_static))
732 return -EINVAL; 486 return -EINVAL;
733 487
734 return gen_set_cmap(cmap, kspc, con, info); 488 return gen_set_cmap(cmap, kspc, con, info);
@@ -775,7 +529,7 @@ static int sa1100fb_blank(int blank, struct fb_info *info)
775 struct sa1100fb_info *fbi = (struct sa1100fb_info *)info; 529 struct sa1100fb_info *fbi = (struct sa1100fb_info *)info;
776 int i; 530 int i;
777 531
778 DPRINTK("sa1100fb_blank: blank=%d\n", blank); 532 dev_dbg(fbi->dev, "sa1100fb_blank: blank=%d\n", blank);
779 533
780 switch (blank) { 534 switch (blank) {
781 case FB_BLANK_POWERDOWN: 535 case FB_BLANK_POWERDOWN:
@@ -863,43 +617,43 @@ static int sa1100fb_activate_var(struct fb_var_screeninfo *var, struct sa1100fb_
863 u_int half_screen_size, yres, pcd; 617 u_int half_screen_size, yres, pcd;
864 u_long flags; 618 u_long flags;
865 619
866 DPRINTK("Configuring SA1100 LCD\n"); 620 dev_dbg(fbi->dev, "Configuring SA1100 LCD\n");
867 621
868 DPRINTK("var: xres=%d hslen=%d lm=%d rm=%d\n", 622 dev_dbg(fbi->dev, "var: xres=%d hslen=%d lm=%d rm=%d\n",
869 var->xres, var->hsync_len, 623 var->xres, var->hsync_len,
870 var->left_margin, var->right_margin); 624 var->left_margin, var->right_margin);
871 DPRINTK("var: yres=%d vslen=%d um=%d bm=%d\n", 625 dev_dbg(fbi->dev, "var: yres=%d vslen=%d um=%d bm=%d\n",
872 var->yres, var->vsync_len, 626 var->yres, var->vsync_len,
873 var->upper_margin, var->lower_margin); 627 var->upper_margin, var->lower_margin);
874 628
875#if DEBUG_VAR 629#if DEBUG_VAR
876 if (var->xres < 16 || var->xres > 1024) 630 if (var->xres < 16 || var->xres > 1024)
877 printk(KERN_ERR "%s: invalid xres %d\n", 631 dev_err(fbi->dev, "%s: invalid xres %d\n",
878 fbi->fb.fix.id, var->xres); 632 fbi->fb.fix.id, var->xres);
879 if (var->hsync_len < 1 || var->hsync_len > 64) 633 if (var->hsync_len < 1 || var->hsync_len > 64)
880 printk(KERN_ERR "%s: invalid hsync_len %d\n", 634 dev_err(fbi->dev, "%s: invalid hsync_len %d\n",
881 fbi->fb.fix.id, var->hsync_len); 635 fbi->fb.fix.id, var->hsync_len);
882 if (var->left_margin < 1 || var->left_margin > 255) 636 if (var->left_margin < 1 || var->left_margin > 255)
883 printk(KERN_ERR "%s: invalid left_margin %d\n", 637 dev_err(fbi->dev, "%s: invalid left_margin %d\n",
884 fbi->fb.fix.id, var->left_margin); 638 fbi->fb.fix.id, var->left_margin);
885 if (var->right_margin < 1 || var->right_margin > 255) 639 if (var->right_margin < 1 || var->right_margin > 255)
886 printk(KERN_ERR "%s: invalid right_margin %d\n", 640 dev_err(fbi->dev, "%s: invalid right_margin %d\n",
887 fbi->fb.fix.id, var->right_margin); 641 fbi->fb.fix.id, var->right_margin);
888 if (var->yres < 1 || var->yres > 1024) 642 if (var->yres < 1 || var->yres > 1024)
889 printk(KERN_ERR "%s: invalid yres %d\n", 643 dev_err(fbi->dev, "%s: invalid yres %d\n",
890 fbi->fb.fix.id, var->yres); 644 fbi->fb.fix.id, var->yres);
891 if (var->vsync_len < 1 || var->vsync_len > 64) 645 if (var->vsync_len < 1 || var->vsync_len > 64)
892 printk(KERN_ERR "%s: invalid vsync_len %d\n", 646 dev_err(fbi->dev, "%s: invalid vsync_len %d\n",
893 fbi->fb.fix.id, var->vsync_len); 647 fbi->fb.fix.id, var->vsync_len);
894 if (var->upper_margin < 0 || var->upper_margin > 255) 648 if (var->upper_margin < 0 || var->upper_margin > 255)
895 printk(KERN_ERR "%s: invalid upper_margin %d\n", 649 dev_err(fbi->dev, "%s: invalid upper_margin %d\n",
896 fbi->fb.fix.id, var->upper_margin); 650 fbi->fb.fix.id, var->upper_margin);
897 if (var->lower_margin < 0 || var->lower_margin > 255) 651 if (var->lower_margin < 0 || var->lower_margin > 255)
898 printk(KERN_ERR "%s: invalid lower_margin %d\n", 652 dev_err(fbi->dev, "%s: invalid lower_margin %d\n",
899 fbi->fb.fix.id, var->lower_margin); 653 fbi->fb.fix.id, var->lower_margin);
900#endif 654#endif
901 655
902 new_regs.lccr0 = fbi->lccr0 | 656 new_regs.lccr0 = fbi->inf->lccr0 |
903 LCCR0_LEN | LCCR0_LDM | LCCR0_BAM | 657 LCCR0_LEN | LCCR0_LDM | LCCR0_BAM |
904 LCCR0_ERM | LCCR0_LtlEnd | LCCR0_DMADel(0); 658 LCCR0_ERM | LCCR0_LtlEnd | LCCR0_DMADel(0);
905 659
@@ -914,7 +668,7 @@ static int sa1100fb_activate_var(struct fb_var_screeninfo *var, struct sa1100fb_
914 * the YRES parameter. 668 * the YRES parameter.
915 */ 669 */
916 yres = var->yres; 670 yres = var->yres;
917 if (fbi->lccr0 & LCCR0_Dual) 671 if (fbi->inf->lccr0 & LCCR0_Dual)
918 yres /= 2; 672 yres /= 2;
919 673
920 new_regs.lccr2 = 674 new_regs.lccr2 =
@@ -924,14 +678,14 @@ static int sa1100fb_activate_var(struct fb_var_screeninfo *var, struct sa1100fb_
924 LCCR2_EndFrmDel(var->lower_margin); 678 LCCR2_EndFrmDel(var->lower_margin);
925 679
926 pcd = get_pcd(var->pixclock, cpufreq_get(0)); 680 pcd = get_pcd(var->pixclock, cpufreq_get(0));
927 new_regs.lccr3 = LCCR3_PixClkDiv(pcd) | fbi->lccr3 | 681 new_regs.lccr3 = LCCR3_PixClkDiv(pcd) | fbi->inf->lccr3 |
928 (var->sync & FB_SYNC_HOR_HIGH_ACT ? LCCR3_HorSnchH : LCCR3_HorSnchL) | 682 (var->sync & FB_SYNC_HOR_HIGH_ACT ? LCCR3_HorSnchH : LCCR3_HorSnchL) |
929 (var->sync & FB_SYNC_VERT_HIGH_ACT ? LCCR3_VrtSnchH : LCCR3_VrtSnchL); 683 (var->sync & FB_SYNC_VERT_HIGH_ACT ? LCCR3_VrtSnchH : LCCR3_VrtSnchL);
930 684
931 DPRINTK("nlccr0 = 0x%08lx\n", new_regs.lccr0); 685 dev_dbg(fbi->dev, "nlccr0 = 0x%08lx\n", new_regs.lccr0);
932 DPRINTK("nlccr1 = 0x%08lx\n", new_regs.lccr1); 686 dev_dbg(fbi->dev, "nlccr1 = 0x%08lx\n", new_regs.lccr1);
933 DPRINTK("nlccr2 = 0x%08lx\n", new_regs.lccr2); 687 dev_dbg(fbi->dev, "nlccr2 = 0x%08lx\n", new_regs.lccr2);
934 DPRINTK("nlccr3 = 0x%08lx\n", new_regs.lccr3); 688 dev_dbg(fbi->dev, "nlccr3 = 0x%08lx\n", new_regs.lccr3);
935 689
936 half_screen_size = var->bits_per_pixel; 690 half_screen_size = var->bits_per_pixel;
937 half_screen_size = half_screen_size * var->xres * var->yres / 16; 691 half_screen_size = half_screen_size * var->xres * var->yres / 16;
@@ -951,9 +705,12 @@ static int sa1100fb_activate_var(struct fb_var_screeninfo *var, struct sa1100fb_
951 * Only update the registers if the controller is enabled 705 * Only update the registers if the controller is enabled
952 * and something has changed. 706 * and something has changed.
953 */ 707 */
954 if ((LCCR0 != fbi->reg_lccr0) || (LCCR1 != fbi->reg_lccr1) || 708 if (readl_relaxed(fbi->base + LCCR0) != fbi->reg_lccr0 ||
955 (LCCR2 != fbi->reg_lccr2) || (LCCR3 != fbi->reg_lccr3) || 709 readl_relaxed(fbi->base + LCCR1) != fbi->reg_lccr1 ||
956 (DBAR1 != fbi->dbar1) || (DBAR2 != fbi->dbar2)) 710 readl_relaxed(fbi->base + LCCR2) != fbi->reg_lccr2 ||
711 readl_relaxed(fbi->base + LCCR3) != fbi->reg_lccr3 ||
712 readl_relaxed(fbi->base + DBAR1) != fbi->dbar1 ||
713 readl_relaxed(fbi->base + DBAR2) != fbi->dbar2)
957 sa1100fb_schedule_work(fbi, C_REENABLE); 714 sa1100fb_schedule_work(fbi, C_REENABLE);
958 715
959 return 0; 716 return 0;
@@ -967,18 +724,18 @@ static int sa1100fb_activate_var(struct fb_var_screeninfo *var, struct sa1100fb_
967 */ 724 */
968static inline void __sa1100fb_backlight_power(struct sa1100fb_info *fbi, int on) 725static inline void __sa1100fb_backlight_power(struct sa1100fb_info *fbi, int on)
969{ 726{
970 DPRINTK("backlight o%s\n", on ? "n" : "ff"); 727 dev_dbg(fbi->dev, "backlight o%s\n", on ? "n" : "ff");
971 728
972 if (sa1100fb_backlight_power) 729 if (fbi->inf->backlight_power)
973 sa1100fb_backlight_power(on); 730 fbi->inf->backlight_power(on);
974} 731}
975 732
976static inline void __sa1100fb_lcd_power(struct sa1100fb_info *fbi, int on) 733static inline void __sa1100fb_lcd_power(struct sa1100fb_info *fbi, int on)
977{ 734{
978 DPRINTK("LCD power o%s\n", on ? "n" : "ff"); 735 dev_dbg(fbi->dev, "LCD power o%s\n", on ? "n" : "ff");
979 736
980 if (sa1100fb_lcd_power) 737 if (fbi->inf->lcd_power)
981 sa1100fb_lcd_power(on); 738 fbi->inf->lcd_power(on);
982} 739}
983 740
984static void sa1100fb_setup_gpio(struct sa1100fb_info *fbi) 741static void sa1100fb_setup_gpio(struct sa1100fb_info *fbi)
@@ -1008,14 +765,25 @@ static void sa1100fb_setup_gpio(struct sa1100fb_info *fbi)
1008 } 765 }
1009 766
1010 if (mask) { 767 if (mask) {
768 unsigned long flags;
769
770 /*
771 * SA-1100 requires the GPIO direction register set
772 * appropriately for the alternate function. Hence
773 * we set it here via bitmask rather than excessive
774 * fiddling via the GPIO subsystem - and even then
775 * we'll still have to deal with GAFR.
776 */
777 local_irq_save(flags);
1011 GPDR |= mask; 778 GPDR |= mask;
1012 GAFR |= mask; 779 GAFR |= mask;
780 local_irq_restore(flags);
1013 } 781 }
1014} 782}
1015 783
1016static void sa1100fb_enable_controller(struct sa1100fb_info *fbi) 784static void sa1100fb_enable_controller(struct sa1100fb_info *fbi)
1017{ 785{
1018 DPRINTK("Enabling LCD controller\n"); 786 dev_dbg(fbi->dev, "Enabling LCD controller\n");
1019 787
1020 /* 788 /*
1021 * Make sure the mode bits are present in the first palette entry 789 * Make sure the mode bits are present in the first palette entry
@@ -1024,43 +792,46 @@ static void sa1100fb_enable_controller(struct sa1100fb_info *fbi)
1024 fbi->palette_cpu[0] |= palette_pbs(&fbi->fb.var); 792 fbi->palette_cpu[0] |= palette_pbs(&fbi->fb.var);
1025 793
1026 /* Sequence from 11.7.10 */ 794 /* Sequence from 11.7.10 */
1027 LCCR3 = fbi->reg_lccr3; 795 writel_relaxed(fbi->reg_lccr3, fbi->base + LCCR3);
1028 LCCR2 = fbi->reg_lccr2; 796 writel_relaxed(fbi->reg_lccr2, fbi->base + LCCR2);
1029 LCCR1 = fbi->reg_lccr1; 797 writel_relaxed(fbi->reg_lccr1, fbi->base + LCCR1);
1030 LCCR0 = fbi->reg_lccr0 & ~LCCR0_LEN; 798 writel_relaxed(fbi->reg_lccr0 & ~LCCR0_LEN, fbi->base + LCCR0);
1031 DBAR1 = fbi->dbar1; 799 writel_relaxed(fbi->dbar1, fbi->base + DBAR1);
1032 DBAR2 = fbi->dbar2; 800 writel_relaxed(fbi->dbar2, fbi->base + DBAR2);
1033 LCCR0 |= LCCR0_LEN; 801 writel_relaxed(fbi->reg_lccr0 | LCCR0_LEN, fbi->base + LCCR0);
1034 802
1035 if (machine_is_shannon()) { 803 if (machine_is_shannon())
1036 GPDR |= SHANNON_GPIO_DISP_EN; 804 gpio_set_value(SHANNON_GPIO_DISP_EN, 1);
1037 GPSR |= SHANNON_GPIO_DISP_EN; 805
1038 } 806 dev_dbg(fbi->dev, "DBAR1: 0x%08x\n", readl_relaxed(fbi->base + DBAR1));
1039 807 dev_dbg(fbi->dev, "DBAR2: 0x%08x\n", readl_relaxed(fbi->base + DBAR2));
1040 DPRINTK("DBAR1 = 0x%08x\n", DBAR1); 808 dev_dbg(fbi->dev, "LCCR0: 0x%08x\n", readl_relaxed(fbi->base + LCCR0));
1041 DPRINTK("DBAR2 = 0x%08x\n", DBAR2); 809 dev_dbg(fbi->dev, "LCCR1: 0x%08x\n", readl_relaxed(fbi->base + LCCR1));
1042 DPRINTK("LCCR0 = 0x%08x\n", LCCR0); 810 dev_dbg(fbi->dev, "LCCR2: 0x%08x\n", readl_relaxed(fbi->base + LCCR2));
1043 DPRINTK("LCCR1 = 0x%08x\n", LCCR1); 811 dev_dbg(fbi->dev, "LCCR3: 0x%08x\n", readl_relaxed(fbi->base + LCCR3));
1044 DPRINTK("LCCR2 = 0x%08x\n", LCCR2);
1045 DPRINTK("LCCR3 = 0x%08x\n", LCCR3);
1046} 812}
1047 813
1048static void sa1100fb_disable_controller(struct sa1100fb_info *fbi) 814static void sa1100fb_disable_controller(struct sa1100fb_info *fbi)
1049{ 815{
1050 DECLARE_WAITQUEUE(wait, current); 816 DECLARE_WAITQUEUE(wait, current);
817 u32 lccr0;
1051 818
1052 DPRINTK("Disabling LCD controller\n"); 819 dev_dbg(fbi->dev, "Disabling LCD controller\n");
1053 820
1054 if (machine_is_shannon()) { 821 if (machine_is_shannon())
1055 GPCR |= SHANNON_GPIO_DISP_EN; 822 gpio_set_value(SHANNON_GPIO_DISP_EN, 0);
1056 }
1057 823
1058 set_current_state(TASK_UNINTERRUPTIBLE); 824 set_current_state(TASK_UNINTERRUPTIBLE);
1059 add_wait_queue(&fbi->ctrlr_wait, &wait); 825 add_wait_queue(&fbi->ctrlr_wait, &wait);
1060 826
1061 LCSR = 0xffffffff; /* Clear LCD Status Register */ 827 /* Clear LCD Status Register */
1062 LCCR0 &= ~LCCR0_LDM; /* Enable LCD Disable Done Interrupt */ 828 writel_relaxed(~0, fbi->base + LCSR);
1063 LCCR0 &= ~LCCR0_LEN; /* Disable LCD Controller */ 829
830 lccr0 = readl_relaxed(fbi->base + LCCR0);
831 lccr0 &= ~LCCR0_LDM; /* Enable LCD Disable Done Interrupt */
832 writel_relaxed(lccr0, fbi->base + LCCR0);
833 lccr0 &= ~LCCR0_LEN; /* Disable LCD Controller */
834 writel_relaxed(lccr0, fbi->base + LCCR0);
1064 835
1065 schedule_timeout(20 * HZ / 1000); 836 schedule_timeout(20 * HZ / 1000);
1066 remove_wait_queue(&fbi->ctrlr_wait, &wait); 837 remove_wait_queue(&fbi->ctrlr_wait, &wait);
@@ -1072,14 +843,15 @@ static void sa1100fb_disable_controller(struct sa1100fb_info *fbi)
1072static irqreturn_t sa1100fb_handle_irq(int irq, void *dev_id) 843static irqreturn_t sa1100fb_handle_irq(int irq, void *dev_id)
1073{ 844{
1074 struct sa1100fb_info *fbi = dev_id; 845 struct sa1100fb_info *fbi = dev_id;
1075 unsigned int lcsr = LCSR; 846 unsigned int lcsr = readl_relaxed(fbi->base + LCSR);
1076 847
1077 if (lcsr & LCSR_LDD) { 848 if (lcsr & LCSR_LDD) {
1078 LCCR0 |= LCCR0_LDM; 849 u32 lccr0 = readl_relaxed(fbi->base + LCCR0) | LCCR0_LDM;
850 writel_relaxed(lccr0, fbi->base + LCCR0);
1079 wake_up(&fbi->ctrlr_wait); 851 wake_up(&fbi->ctrlr_wait);
1080 } 852 }
1081 853
1082 LCSR = lcsr; 854 writel_relaxed(lcsr, fbi->base + LCSR);
1083 return IRQ_HANDLED; 855 return IRQ_HANDLED;
1084} 856}
1085 857
@@ -1268,7 +1040,7 @@ sa1100fb_freq_policy(struct notifier_block *nb, unsigned long val,
1268 switch (val) { 1040 switch (val) {
1269 case CPUFREQ_ADJUST: 1041 case CPUFREQ_ADJUST:
1270 case CPUFREQ_INCOMPATIBLE: 1042 case CPUFREQ_INCOMPATIBLE:
1271 printk(KERN_DEBUG "min dma period: %d ps, " 1043 dev_dbg(fbi->dev, "min dma period: %d ps, "
1272 "new clock %d kHz\n", sa1100fb_min_dma_period(fbi), 1044 "new clock %d kHz\n", sa1100fb_min_dma_period(fbi),
1273 policy->max); 1045 policy->max);
1274 /* todo: fill in min/max values */ 1046 /* todo: fill in min/max values */
@@ -1318,7 +1090,7 @@ static int sa1100fb_resume(struct platform_device *dev)
1318 * cache. Once this area is remapped, all virtual memory 1090 * cache. Once this area is remapped, all virtual memory
1319 * access to the video memory should occur at the new region. 1091 * access to the video memory should occur at the new region.
1320 */ 1092 */
1321static int __init sa1100fb_map_video_memory(struct sa1100fb_info *fbi) 1093static int __devinit sa1100fb_map_video_memory(struct sa1100fb_info *fbi)
1322{ 1094{
1323 /* 1095 /*
1324 * We reserve one page for the palette, plus the size 1096 * We reserve one page for the palette, plus the size
@@ -1344,7 +1116,7 @@ static int __init sa1100fb_map_video_memory(struct sa1100fb_info *fbi)
1344} 1116}
1345 1117
1346/* Fake monspecs to fill in fbinfo structure */ 1118/* Fake monspecs to fill in fbinfo structure */
1347static struct fb_monspecs monspecs __initdata = { 1119static struct fb_monspecs monspecs __devinitdata = {
1348 .hfmin = 30000, 1120 .hfmin = 30000,
1349 .hfmax = 70000, 1121 .hfmax = 70000,
1350 .vfmin = 50, 1122 .vfmin = 50,
@@ -1352,10 +1124,11 @@ static struct fb_monspecs monspecs __initdata = {
1352}; 1124};
1353 1125
1354 1126
1355static struct sa1100fb_info * __init sa1100fb_init_fbinfo(struct device *dev) 1127static struct sa1100fb_info * __devinit sa1100fb_init_fbinfo(struct device *dev)
1356{ 1128{
1357 struct sa1100fb_mach_info *inf; 1129 struct sa1100fb_mach_info *inf = dev->platform_data;
1358 struct sa1100fb_info *fbi; 1130 struct sa1100fb_info *fbi;
1131 unsigned i;
1359 1132
1360 fbi = kmalloc(sizeof(struct sa1100fb_info) + sizeof(u32) * 16, 1133 fbi = kmalloc(sizeof(struct sa1100fb_info) + sizeof(u32) * 16,
1361 GFP_KERNEL); 1134 GFP_KERNEL);
@@ -1390,8 +1163,6 @@ static struct sa1100fb_info * __init sa1100fb_init_fbinfo(struct device *dev)
1390 fbi->rgb[RGB_8] = &rgb_8; 1163 fbi->rgb[RGB_8] = &rgb_8;
1391 fbi->rgb[RGB_16] = &def_rgb_16; 1164 fbi->rgb[RGB_16] = &def_rgb_16;
1392 1165
1393 inf = sa1100fb_get_machine_info(fbi);
1394
1395 /* 1166 /*
1396 * People just don't seem to get this. We don't support 1167 * People just don't seem to get this. We don't support
1397 * anything but correct entries now, so panic if someone 1168 * anything but correct entries now, so panic if someone
@@ -1402,13 +1173,10 @@ static struct sa1100fb_info * __init sa1100fb_init_fbinfo(struct device *dev)
1402 panic("sa1100fb error: invalid LCCR3 fields set or zero " 1173 panic("sa1100fb error: invalid LCCR3 fields set or zero "
1403 "pixclock."); 1174 "pixclock.");
1404 1175
1405 fbi->max_xres = inf->xres;
1406 fbi->fb.var.xres = inf->xres; 1176 fbi->fb.var.xres = inf->xres;
1407 fbi->fb.var.xres_virtual = inf->xres; 1177 fbi->fb.var.xres_virtual = inf->xres;
1408 fbi->max_yres = inf->yres;
1409 fbi->fb.var.yres = inf->yres; 1178 fbi->fb.var.yres = inf->yres;
1410 fbi->fb.var.yres_virtual = inf->yres; 1179 fbi->fb.var.yres_virtual = inf->yres;
1411 fbi->max_bpp = inf->bpp;
1412 fbi->fb.var.bits_per_pixel = inf->bpp; 1180 fbi->fb.var.bits_per_pixel = inf->bpp;
1413 fbi->fb.var.pixclock = inf->pixclock; 1181 fbi->fb.var.pixclock = inf->pixclock;
1414 fbi->fb.var.hsync_len = inf->hsync_len; 1182 fbi->fb.var.hsync_len = inf->hsync_len;
@@ -1419,14 +1187,16 @@ static struct sa1100fb_info * __init sa1100fb_init_fbinfo(struct device *dev)
1419 fbi->fb.var.lower_margin = inf->lower_margin; 1187 fbi->fb.var.lower_margin = inf->lower_margin;
1420 fbi->fb.var.sync = inf->sync; 1188 fbi->fb.var.sync = inf->sync;
1421 fbi->fb.var.grayscale = inf->cmap_greyscale; 1189 fbi->fb.var.grayscale = inf->cmap_greyscale;
1422 fbi->cmap_inverse = inf->cmap_inverse;
1423 fbi->cmap_static = inf->cmap_static;
1424 fbi->lccr0 = inf->lccr0;
1425 fbi->lccr3 = inf->lccr3;
1426 fbi->state = C_STARTUP; 1190 fbi->state = C_STARTUP;
1427 fbi->task_state = (u_char)-1; 1191 fbi->task_state = (u_char)-1;
1428 fbi->fb.fix.smem_len = fbi->max_xres * fbi->max_yres * 1192 fbi->fb.fix.smem_len = inf->xres * inf->yres *
1429 fbi->max_bpp / 8; 1193 inf->bpp / 8;
1194 fbi->inf = inf;
1195
1196 /* Copy the RGB bitfield overrides */
1197 for (i = 0; i < NR_RGB; i++)
1198 if (inf->rgb[i])
1199 fbi->rgb[i] = inf->rgb[i];
1430 1200
1431 init_waitqueue_head(&fbi->ctrlr_wait); 1201 init_waitqueue_head(&fbi->ctrlr_wait);
1432 INIT_WORK(&fbi->task, sa1100fb_task); 1202 INIT_WORK(&fbi->task, sa1100fb_task);
@@ -1438,13 +1208,20 @@ static struct sa1100fb_info * __init sa1100fb_init_fbinfo(struct device *dev)
1438static int __devinit sa1100fb_probe(struct platform_device *pdev) 1208static int __devinit sa1100fb_probe(struct platform_device *pdev)
1439{ 1209{
1440 struct sa1100fb_info *fbi; 1210 struct sa1100fb_info *fbi;
1211 struct resource *res;
1441 int ret, irq; 1212 int ret, irq;
1442 1213
1214 if (!pdev->dev.platform_data) {
1215 dev_err(&pdev->dev, "no platform LCD data\n");
1216 return -EINVAL;
1217 }
1218
1219 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1443 irq = platform_get_irq(pdev, 0); 1220 irq = platform_get_irq(pdev, 0);
1444 if (irq < 0) 1221 if (irq < 0 || !res)
1445 return -EINVAL; 1222 return -EINVAL;
1446 1223
1447 if (!request_mem_region(0xb0100000, 0x10000, "LCD")) 1224 if (!request_mem_region(res->start, resource_size(res), "LCD"))
1448 return -EBUSY; 1225 return -EBUSY;
1449 1226
1450 fbi = sa1100fb_init_fbinfo(&pdev->dev); 1227 fbi = sa1100fb_init_fbinfo(&pdev->dev);
@@ -1452,6 +1229,10 @@ static int __devinit sa1100fb_probe(struct platform_device *pdev)
1452 if (!fbi) 1229 if (!fbi)
1453 goto failed; 1230 goto failed;
1454 1231
1232 fbi->base = ioremap(res->start, resource_size(res));
1233 if (!fbi->base)
1234 goto failed;
1235
1455 /* Initialize video memory */ 1236 /* Initialize video memory */
1456 ret = sa1100fb_map_video_memory(fbi); 1237 ret = sa1100fb_map_video_memory(fbi);
1457 if (ret) 1238 if (ret)
@@ -1459,14 +1240,16 @@ static int __devinit sa1100fb_probe(struct platform_device *pdev)
1459 1240
1460 ret = request_irq(irq, sa1100fb_handle_irq, 0, "LCD", fbi); 1241 ret = request_irq(irq, sa1100fb_handle_irq, 0, "LCD", fbi);
1461 if (ret) { 1242 if (ret) {
1462 printk(KERN_ERR "sa1100fb: request_irq failed: %d\n", ret); 1243 dev_err(&pdev->dev, "request_irq failed: %d\n", ret);
1463 goto failed; 1244 goto failed;
1464 } 1245 }
1465 1246
1466#ifdef ASSABET_PAL_VIDEO 1247 if (machine_is_shannon()) {
1467 if (machine_is_assabet()) 1248 ret = gpio_request_one(SHANNON_GPIO_DISP_EN,
1468 ASSABET_BCR_clear(ASSABET_BCR_LCD_ON); 1249 GPIOF_OUT_INIT_LOW, "display enable");
1469#endif 1250 if (ret)
1251 goto err_free_irq;
1252 }
1470 1253
1471 /* 1254 /*
1472 * This makes sure that our colour bitfield 1255 * This makes sure that our colour bitfield
@@ -1478,7 +1261,7 @@ static int __devinit sa1100fb_probe(struct platform_device *pdev)
1478 1261
1479 ret = register_framebuffer(&fbi->fb); 1262 ret = register_framebuffer(&fbi->fb);
1480 if (ret < 0) 1263 if (ret < 0)
1481 goto err_free_irq; 1264 goto err_reg_fb;
1482 1265
1483#ifdef CONFIG_CPU_FREQ 1266#ifdef CONFIG_CPU_FREQ
1484 fbi->freq_transition.notifier_call = sa1100fb_freq_transition; 1267 fbi->freq_transition.notifier_call = sa1100fb_freq_transition;
@@ -1490,12 +1273,17 @@ static int __devinit sa1100fb_probe(struct platform_device *pdev)
1490 /* This driver cannot be unloaded at the moment */ 1273 /* This driver cannot be unloaded at the moment */
1491 return 0; 1274 return 0;
1492 1275
1276 err_reg_fb:
1277 if (machine_is_shannon())
1278 gpio_free(SHANNON_GPIO_DISP_EN);
1493 err_free_irq: 1279 err_free_irq:
1494 free_irq(irq, fbi); 1280 free_irq(irq, fbi);
1495 failed: 1281 failed:
1282 if (fbi)
1283 iounmap(fbi->base);
1496 platform_set_drvdata(pdev, NULL); 1284 platform_set_drvdata(pdev, NULL);
1497 kfree(fbi); 1285 kfree(fbi);
1498 release_mem_region(0xb0100000, 0x10000); 1286 release_mem_region(res->start, resource_size(res));
1499 return ret; 1287 return ret;
1500} 1288}
1501 1289
@@ -1505,6 +1293,7 @@ static struct platform_driver sa1100fb_driver = {
1505 .resume = sa1100fb_resume, 1293 .resume = sa1100fb_resume,
1506 .driver = { 1294 .driver = {
1507 .name = "sa11x0-fb", 1295 .name = "sa11x0-fb",
1296 .owner = THIS_MODULE,
1508 }, 1297 },
1509}; 1298};
1510 1299
diff --git a/drivers/video/sa1100fb.h b/drivers/video/sa1100fb.h
index 1c3b459865d8..fc5d4292fad6 100644
--- a/drivers/video/sa1100fb.h
+++ b/drivers/video/sa1100fb.h
@@ -10,44 +10,15 @@
10 * for more details. 10 * for more details.
11 */ 11 */
12 12
13/* 13#define LCCR0 0x0000 /* LCD Control Reg. 0 */
14 * These are the bitfields for each 14#define LCSR 0x0004 /* LCD Status Reg. */
15 * display depth that we support. 15#define DBAR1 0x0010 /* LCD DMA Base Address Reg. channel 1 */
16 */ 16#define DCAR1 0x0014 /* LCD DMA Current Address Reg. channel 1 */
17struct sa1100fb_rgb { 17#define DBAR2 0x0018 /* LCD DMA Base Address Reg. channel 2 */
18 struct fb_bitfield red; 18#define DCAR2 0x001C /* LCD DMA Current Address Reg. channel 2 */
19 struct fb_bitfield green; 19#define LCCR1 0x0020 /* LCD Control Reg. 1 */
20 struct fb_bitfield blue; 20#define LCCR2 0x0024 /* LCD Control Reg. 2 */
21 struct fb_bitfield transp; 21#define LCCR3 0x0028 /* LCD Control Reg. 3 */
22};
23
24/*
25 * This structure describes the machine which we are running on.
26 */
27struct sa1100fb_mach_info {
28 u_long pixclock;
29
30 u_short xres;
31 u_short yres;
32
33 u_char bpp;
34 u_char hsync_len;
35 u_char left_margin;
36 u_char right_margin;
37
38 u_char vsync_len;
39 u_char upper_margin;
40 u_char lower_margin;
41 u_char sync;
42
43 u_int cmap_greyscale:1,
44 cmap_inverse:1,
45 cmap_static:1,
46 unused:29;
47
48 u_int lccr0;
49 u_int lccr3;
50};
51 22
52/* Shadows for LCD controller registers */ 23/* Shadows for LCD controller registers */
53struct sa1100fb_lcd_reg { 24struct sa1100fb_lcd_reg {
@@ -57,19 +28,11 @@ struct sa1100fb_lcd_reg {
57 unsigned long lccr3; 28 unsigned long lccr3;
58}; 29};
59 30
60#define RGB_4 (0)
61#define RGB_8 (1)
62#define RGB_16 (2)
63#define NR_RGB 3
64
65struct sa1100fb_info { 31struct sa1100fb_info {
66 struct fb_info fb; 32 struct fb_info fb;
67 struct device *dev; 33 struct device *dev;
68 struct sa1100fb_rgb *rgb[NR_RGB]; 34 const struct sa1100fb_rgb *rgb[NR_RGB];
69 35 void __iomem *base;
70 u_int max_bpp;
71 u_int max_xres;
72 u_int max_yres;
73 36
74 /* 37 /*
75 * These are the addresses we mapped 38 * These are the addresses we mapped
@@ -88,12 +51,6 @@ struct sa1100fb_info {
88 dma_addr_t dbar1; 51 dma_addr_t dbar1;
89 dma_addr_t dbar2; 52 dma_addr_t dbar2;
90 53
91 u_int lccr0;
92 u_int lccr3;
93 u_int cmap_inverse:1,
94 cmap_static:1,
95 unused:30;
96
97 u_int reg_lccr0; 54 u_int reg_lccr0;
98 u_int reg_lccr1; 55 u_int reg_lccr1;
99 u_int reg_lccr2; 56 u_int reg_lccr2;
@@ -109,6 +66,8 @@ struct sa1100fb_info {
109 struct notifier_block freq_transition; 66 struct notifier_block freq_transition;
110 struct notifier_block freq_policy; 67 struct notifier_block freq_policy;
111#endif 68#endif
69
70 const struct sa1100fb_mach_info *inf;
112}; 71};
113 72
114#define TO_INF(ptr,member) container_of(ptr,struct sa1100fb_info,member) 73#define TO_INF(ptr,member) container_of(ptr,struct sa1100fb_info,member)
@@ -130,15 +89,6 @@ struct sa1100fb_info {
130#define SA1100_NAME "SA1100" 89#define SA1100_NAME "SA1100"
131 90
132/* 91/*
133 * Debug macros
134 */
135#if DEBUG
136# define DPRINTK(fmt, args...) printk("%s: " fmt, __func__ , ## args)
137#else
138# define DPRINTK(fmt, args...)
139#endif
140
141/*
142 * Minimum X and Y resolutions 92 * Minimum X and Y resolutions
143 */ 93 */
144#define MIN_XRES 64 94#define MIN_XRES 64