aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/amba/bus.c348
-rw-r--r--drivers/char/hw_random/nomadik-rng.c2
-rw-r--r--drivers/dma/amba-pl08x.c2
-rw-r--r--drivers/dma/pl330.c2
-rw-r--r--drivers/gpio/pl061.c2
-rw-r--r--drivers/input/serio/ambakmi.c3
-rw-r--r--drivers/mmc/host/mmci.c348
-rw-r--r--drivers/mmc/host/mmci.h15
-rw-r--r--drivers/rtc/rtc-pl030.c2
-rw-r--r--drivers/rtc/rtc-pl031.c2
-rw-r--r--drivers/spi/amba-pl022.c2
-rw-r--r--drivers/tty/serial/amba-pl010.c2
-rw-r--r--drivers/tty/serial/amba-pl011.c513
-rw-r--r--drivers/video/Kconfig18
-rw-r--r--drivers/video/amba-clcd.c2
-rw-r--r--drivers/video/cyber2000fb.c263
-rw-r--r--drivers/video/cyber2000fb.h16
-rw-r--r--drivers/watchdog/sp805_wdt.c2
18 files changed, 1358 insertions, 186 deletions
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index e7df019d29d4..6d2bb2524b6e 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -13,16 +13,17 @@
13#include <linux/string.h> 13#include <linux/string.h>
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/io.h> 15#include <linux/io.h>
16#include <linux/pm.h>
17#include <linux/pm_runtime.h>
16#include <linux/amba/bus.h> 18#include <linux/amba/bus.h>
17 19
18#include <asm/irq.h> 20#include <asm/irq.h>
19#include <asm/sizes.h> 21#include <asm/sizes.h>
20 22
21#define to_amba_device(d) container_of(d, struct amba_device, dev)
22#define to_amba_driver(d) container_of(d, struct amba_driver, drv) 23#define to_amba_driver(d) container_of(d, struct amba_driver, drv)
23 24
24static struct amba_id * 25static const struct amba_id *
25amba_lookup(struct amba_id *table, struct amba_device *dev) 26amba_lookup(const struct amba_id *table, struct amba_device *dev)
26{ 27{
27 int ret = 0; 28 int ret = 0;
28 29
@@ -57,26 +58,6 @@ static int amba_uevent(struct device *dev, struct kobj_uevent_env *env)
57#define amba_uevent NULL 58#define amba_uevent NULL
58#endif 59#endif
59 60
60static int amba_suspend(struct device *dev, pm_message_t state)
61{
62 struct amba_driver *drv = to_amba_driver(dev->driver);
63 int ret = 0;
64
65 if (dev->driver && drv->suspend)
66 ret = drv->suspend(to_amba_device(dev), state);
67 return ret;
68}
69
70static int amba_resume(struct device *dev)
71{
72 struct amba_driver *drv = to_amba_driver(dev->driver);
73 int ret = 0;
74
75 if (dev->driver && drv->resume)
76 ret = drv->resume(to_amba_device(dev));
77 return ret;
78}
79
80#define amba_attr_func(name,fmt,arg...) \ 61#define amba_attr_func(name,fmt,arg...) \
81static ssize_t name##_show(struct device *_dev, \ 62static ssize_t name##_show(struct device *_dev, \
82 struct device_attribute *attr, char *buf) \ 63 struct device_attribute *attr, char *buf) \
@@ -102,17 +83,330 @@ static struct device_attribute amba_dev_attrs[] = {
102 __ATTR_NULL, 83 __ATTR_NULL,
103}; 84};
104 85
86#ifdef CONFIG_PM_SLEEP
87
88static int amba_legacy_suspend(struct device *dev, pm_message_t mesg)
89{
90 struct amba_driver *adrv = to_amba_driver(dev->driver);
91 struct amba_device *adev = to_amba_device(dev);
92 int ret = 0;
93
94 if (dev->driver && adrv->suspend)
95 ret = adrv->suspend(adev, mesg);
96
97 return ret;
98}
99
100static int amba_legacy_resume(struct device *dev)
101{
102 struct amba_driver *adrv = to_amba_driver(dev->driver);
103 struct amba_device *adev = to_amba_device(dev);
104 int ret = 0;
105
106 if (dev->driver && adrv->resume)
107 ret = adrv->resume(adev);
108
109 return ret;
110}
111
112static int amba_pm_prepare(struct device *dev)
113{
114 struct device_driver *drv = dev->driver;
115 int ret = 0;
116
117 if (drv && drv->pm && drv->pm->prepare)
118 ret = drv->pm->prepare(dev);
119
120 return ret;
121}
122
123static void amba_pm_complete(struct device *dev)
124{
125 struct device_driver *drv = dev->driver;
126
127 if (drv && drv->pm && drv->pm->complete)
128 drv->pm->complete(dev);
129}
130
131#else /* !CONFIG_PM_SLEEP */
132
133#define amba_pm_prepare NULL
134#define amba_pm_complete NULL
135
136#endif /* !CONFIG_PM_SLEEP */
137
138#ifdef CONFIG_SUSPEND
139
140static int amba_pm_suspend(struct device *dev)
141{
142 struct device_driver *drv = dev->driver;
143 int ret = 0;
144
145 if (!drv)
146 return 0;
147
148 if (drv->pm) {
149 if (drv->pm->suspend)
150 ret = drv->pm->suspend(dev);
151 } else {
152 ret = amba_legacy_suspend(dev, PMSG_SUSPEND);
153 }
154
155 return ret;
156}
157
158static int amba_pm_suspend_noirq(struct device *dev)
159{
160 struct device_driver *drv = dev->driver;
161 int ret = 0;
162
163 if (!drv)
164 return 0;
165
166 if (drv->pm) {
167 if (drv->pm->suspend_noirq)
168 ret = drv->pm->suspend_noirq(dev);
169 }
170
171 return ret;
172}
173
174static int amba_pm_resume(struct device *dev)
175{
176 struct device_driver *drv = dev->driver;
177 int ret = 0;
178
179 if (!drv)
180 return 0;
181
182 if (drv->pm) {
183 if (drv->pm->resume)
184 ret = drv->pm->resume(dev);
185 } else {
186 ret = amba_legacy_resume(dev);
187 }
188
189 return ret;
190}
191
192static int amba_pm_resume_noirq(struct device *dev)
193{
194 struct device_driver *drv = dev->driver;
195 int ret = 0;
196
197 if (!drv)
198 return 0;
199
200 if (drv->pm) {
201 if (drv->pm->resume_noirq)
202 ret = drv->pm->resume_noirq(dev);
203 }
204
205 return ret;
206}
207
208#else /* !CONFIG_SUSPEND */
209
210#define amba_pm_suspend NULL
211#define amba_pm_resume NULL
212#define amba_pm_suspend_noirq NULL
213#define amba_pm_resume_noirq NULL
214
215#endif /* !CONFIG_SUSPEND */
216
217#ifdef CONFIG_HIBERNATION
218
219static int amba_pm_freeze(struct device *dev)
220{
221 struct device_driver *drv = dev->driver;
222 int ret = 0;
223
224 if (!drv)
225 return 0;
226
227 if (drv->pm) {
228 if (drv->pm->freeze)
229 ret = drv->pm->freeze(dev);
230 } else {
231 ret = amba_legacy_suspend(dev, PMSG_FREEZE);
232 }
233
234 return ret;
235}
236
237static int amba_pm_freeze_noirq(struct device *dev)
238{
239 struct device_driver *drv = dev->driver;
240 int ret = 0;
241
242 if (!drv)
243 return 0;
244
245 if (drv->pm) {
246 if (drv->pm->freeze_noirq)
247 ret = drv->pm->freeze_noirq(dev);
248 }
249
250 return ret;
251}
252
253static int amba_pm_thaw(struct device *dev)
254{
255 struct device_driver *drv = dev->driver;
256 int ret = 0;
257
258 if (!drv)
259 return 0;
260
261 if (drv->pm) {
262 if (drv->pm->thaw)
263 ret = drv->pm->thaw(dev);
264 } else {
265 ret = amba_legacy_resume(dev);
266 }
267
268 return ret;
269}
270
271static int amba_pm_thaw_noirq(struct device *dev)
272{
273 struct device_driver *drv = dev->driver;
274 int ret = 0;
275
276 if (!drv)
277 return 0;
278
279 if (drv->pm) {
280 if (drv->pm->thaw_noirq)
281 ret = drv->pm->thaw_noirq(dev);
282 }
283
284 return ret;
285}
286
287static int amba_pm_poweroff(struct device *dev)
288{
289 struct device_driver *drv = dev->driver;
290 int ret = 0;
291
292 if (!drv)
293 return 0;
294
295 if (drv->pm) {
296 if (drv->pm->poweroff)
297 ret = drv->pm->poweroff(dev);
298 } else {
299 ret = amba_legacy_suspend(dev, PMSG_HIBERNATE);
300 }
301
302 return ret;
303}
304
305static int amba_pm_poweroff_noirq(struct device *dev)
306{
307 struct device_driver *drv = dev->driver;
308 int ret = 0;
309
310 if (!drv)
311 return 0;
312
313 if (drv->pm) {
314 if (drv->pm->poweroff_noirq)
315 ret = drv->pm->poweroff_noirq(dev);
316 }
317
318 return ret;
319}
320
321static int amba_pm_restore(struct device *dev)
322{
323 struct device_driver *drv = dev->driver;
324 int ret = 0;
325
326 if (!drv)
327 return 0;
328
329 if (drv->pm) {
330 if (drv->pm->restore)
331 ret = drv->pm->restore(dev);
332 } else {
333 ret = amba_legacy_resume(dev);
334 }
335
336 return ret;
337}
338
339static int amba_pm_restore_noirq(struct device *dev)
340{
341 struct device_driver *drv = dev->driver;
342 int ret = 0;
343
344 if (!drv)
345 return 0;
346
347 if (drv->pm) {
348 if (drv->pm->restore_noirq)
349 ret = drv->pm->restore_noirq(dev);
350 }
351
352 return ret;
353}
354
355#else /* !CONFIG_HIBERNATION */
356
357#define amba_pm_freeze NULL
358#define amba_pm_thaw NULL
359#define amba_pm_poweroff NULL
360#define amba_pm_restore NULL
361#define amba_pm_freeze_noirq NULL
362#define amba_pm_thaw_noirq NULL
363#define amba_pm_poweroff_noirq NULL
364#define amba_pm_restore_noirq NULL
365
366#endif /* !CONFIG_HIBERNATION */
367
368#ifdef CONFIG_PM
369
370static const struct dev_pm_ops amba_pm = {
371 .prepare = amba_pm_prepare,
372 .complete = amba_pm_complete,
373 .suspend = amba_pm_suspend,
374 .resume = amba_pm_resume,
375 .freeze = amba_pm_freeze,
376 .thaw = amba_pm_thaw,
377 .poweroff = amba_pm_poweroff,
378 .restore = amba_pm_restore,
379 .suspend_noirq = amba_pm_suspend_noirq,
380 .resume_noirq = amba_pm_resume_noirq,
381 .freeze_noirq = amba_pm_freeze_noirq,
382 .thaw_noirq = amba_pm_thaw_noirq,
383 .poweroff_noirq = amba_pm_poweroff_noirq,
384 .restore_noirq = amba_pm_restore_noirq,
385 SET_RUNTIME_PM_OPS(
386 pm_generic_runtime_suspend,
387 pm_generic_runtime_resume,
388 pm_generic_runtime_idle
389 )
390};
391
392#define AMBA_PM (&amba_pm)
393
394#else /* !CONFIG_PM */
395
396#define AMBA_PM NULL
397
398#endif /* !CONFIG_PM */
399
105/* 400/*
106 * Primecells are part of the Advanced Microcontroller Bus Architecture, 401 * Primecells are part of the Advanced Microcontroller Bus Architecture,
107 * so we call the bus "amba". 402 * so we call the bus "amba".
108 */ 403 */
109static struct bus_type amba_bustype = { 404struct bus_type amba_bustype = {
110 .name = "amba", 405 .name = "amba",
111 .dev_attrs = amba_dev_attrs, 406 .dev_attrs = amba_dev_attrs,
112 .match = amba_match, 407 .match = amba_match,
113 .uevent = amba_uevent, 408 .uevent = amba_uevent,
114 .suspend = amba_suspend, 409 .pm = AMBA_PM,
115 .resume = amba_resume,
116}; 410};
117 411
118static int __init amba_init(void) 412static int __init amba_init(void)
@@ -188,7 +482,7 @@ static int amba_probe(struct device *dev)
188{ 482{
189 struct amba_device *pcdev = to_amba_device(dev); 483 struct amba_device *pcdev = to_amba_device(dev);
190 struct amba_driver *pcdrv = to_amba_driver(dev->driver); 484 struct amba_driver *pcdrv = to_amba_driver(dev->driver);
191 struct amba_id *id = amba_lookup(pcdrv->id_table, pcdev); 485 const struct amba_id *id = amba_lookup(pcdrv->id_table, pcdev);
192 int ret; 486 int ret;
193 487
194 do { 488 do {
diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c
index a348c7e9aa0b..dd1d143eb8ea 100644
--- a/drivers/char/hw_random/nomadik-rng.c
+++ b/drivers/char/hw_random/nomadik-rng.c
@@ -39,7 +39,7 @@ static struct hwrng nmk_rng = {
39 .read = nmk_rng_read, 39 .read = nmk_rng_read,
40}; 40};
41 41
42static int nmk_rng_probe(struct amba_device *dev, struct amba_id *id) 42static int nmk_rng_probe(struct amba_device *dev, const struct amba_id *id)
43{ 43{
44 void __iomem *base; 44 void __iomem *base;
45 int ret; 45 int ret;
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 07bca4970e50..e6d7228b1479 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -1845,7 +1845,7 @@ static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
1845} 1845}
1846#endif 1846#endif
1847 1847
1848static int pl08x_probe(struct amba_device *adev, struct amba_id *id) 1848static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
1849{ 1849{
1850 struct pl08x_driver_data *pl08x; 1850 struct pl08x_driver_data *pl08x;
1851 const struct vendor_data *vd = id->data; 1851 const struct vendor_data *vd = id->data;
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 7c50f6dfd3f4..6abe1ec1f2ce 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -657,7 +657,7 @@ static irqreturn_t pl330_irq_handler(int irq, void *data)
657} 657}
658 658
659static int __devinit 659static int __devinit
660pl330_probe(struct amba_device *adev, struct amba_id *id) 660pl330_probe(struct amba_device *adev, const struct amba_id *id)
661{ 661{
662 struct dma_pl330_platdata *pdat; 662 struct dma_pl330_platdata *pdat;
663 struct dma_pl330_dmac *pdmac; 663 struct dma_pl330_dmac *pdmac;
diff --git a/drivers/gpio/pl061.c b/drivers/gpio/pl061.c
index 2975d22daffe..838ddbdf90cc 100644
--- a/drivers/gpio/pl061.c
+++ b/drivers/gpio/pl061.c
@@ -232,7 +232,7 @@ static void pl061_irq_handler(unsigned irq, struct irq_desc *desc)
232 desc->irq_data.chip->irq_unmask(&desc->irq_data); 232 desc->irq_data.chip->irq_unmask(&desc->irq_data);
233} 233}
234 234
235static int pl061_probe(struct amba_device *dev, struct amba_id *id) 235static int pl061_probe(struct amba_device *dev, const struct amba_id *id)
236{ 236{
237 struct pl061_platform_data *pdata; 237 struct pl061_platform_data *pdata;
238 struct pl061_gpio *chip; 238 struct pl061_gpio *chip;
diff --git a/drivers/input/serio/ambakmi.c b/drivers/input/serio/ambakmi.c
index 92563a681d65..12abc50508e5 100644
--- a/drivers/input/serio/ambakmi.c
+++ b/drivers/input/serio/ambakmi.c
@@ -107,7 +107,8 @@ static void amba_kmi_close(struct serio *io)
107 clk_disable(kmi->clk); 107 clk_disable(kmi->clk);
108} 108}
109 109
110static int __devinit amba_kmi_probe(struct amba_device *dev, struct amba_id *id) 110static int __devinit amba_kmi_probe(struct amba_device *dev,
111 const struct amba_id *id)
111{ 112{
112 struct amba_kmi_port *kmi; 113 struct amba_kmi_port *kmi;
113 struct serio *io; 114 struct serio *io;
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 2d6de3e03e2d..5bbb87d10251 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -2,7 +2,7 @@
2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver 2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
3 * 3 *
4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved. 4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
5 * Copyright (C) 2010 ST-Ericsson AB. 5 * Copyright (C) 2010 ST-Ericsson SA
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
@@ -25,8 +25,10 @@
25#include <linux/clk.h> 25#include <linux/clk.h>
26#include <linux/scatterlist.h> 26#include <linux/scatterlist.h>
27#include <linux/gpio.h> 27#include <linux/gpio.h>
28#include <linux/amba/mmci.h>
29#include <linux/regulator/consumer.h> 28#include <linux/regulator/consumer.h>
29#include <linux/dmaengine.h>
30#include <linux/dma-mapping.h>
31#include <linux/amba/mmci.h>
30 32
31#include <asm/div64.h> 33#include <asm/div64.h>
32#include <asm/io.h> 34#include <asm/io.h>
@@ -142,9 +144,6 @@ mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
142 host->mrq = NULL; 144 host->mrq = NULL;
143 host->cmd = NULL; 145 host->cmd = NULL;
144 146
145 if (mrq->data)
146 mrq->data->bytes_xfered = host->data_xfered;
147
148 /* 147 /*
149 * Need to drop the host lock here; mmc_request_done may call 148 * Need to drop the host lock here; mmc_request_done may call
150 * back into the driver... 149 * back into the driver...
@@ -189,6 +188,248 @@ static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
189 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 188 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
190} 189}
191 190
191/*
192 * All the DMA operation mode stuff goes inside this ifdef.
193 * This assumes that you have a generic DMA device interface,
194 * no custom DMA interfaces are supported.
195 */
196#ifdef CONFIG_DMA_ENGINE
197static void __devinit mmci_dma_setup(struct mmci_host *host)
198{
199 struct mmci_platform_data *plat = host->plat;
200 const char *rxname, *txname;
201 dma_cap_mask_t mask;
202
203 if (!plat || !plat->dma_filter) {
204 dev_info(mmc_dev(host->mmc), "no DMA platform data\n");
205 return;
206 }
207
208 /* Try to acquire a generic DMA engine slave channel */
209 dma_cap_zero(mask);
210 dma_cap_set(DMA_SLAVE, mask);
211
212 /*
213 * If only an RX channel is specified, the driver will
214 * attempt to use it bidirectionally, however if it is
215 * is specified but cannot be located, DMA will be disabled.
216 */
217 if (plat->dma_rx_param) {
218 host->dma_rx_channel = dma_request_channel(mask,
219 plat->dma_filter,
220 plat->dma_rx_param);
221 /* E.g if no DMA hardware is present */
222 if (!host->dma_rx_channel)
223 dev_err(mmc_dev(host->mmc), "no RX DMA channel\n");
224 }
225
226 if (plat->dma_tx_param) {
227 host->dma_tx_channel = dma_request_channel(mask,
228 plat->dma_filter,
229 plat->dma_tx_param);
230 if (!host->dma_tx_channel)
231 dev_warn(mmc_dev(host->mmc), "no TX DMA channel\n");
232 } else {
233 host->dma_tx_channel = host->dma_rx_channel;
234 }
235
236 if (host->dma_rx_channel)
237 rxname = dma_chan_name(host->dma_rx_channel);
238 else
239 rxname = "none";
240
241 if (host->dma_tx_channel)
242 txname = dma_chan_name(host->dma_tx_channel);
243 else
244 txname = "none";
245
246 dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n",
247 rxname, txname);
248
249 /*
250 * Limit the maximum segment size in any SG entry according to
251 * the parameters of the DMA engine device.
252 */
253 if (host->dma_tx_channel) {
254 struct device *dev = host->dma_tx_channel->device->dev;
255 unsigned int max_seg_size = dma_get_max_seg_size(dev);
256
257 if (max_seg_size < host->mmc->max_seg_size)
258 host->mmc->max_seg_size = max_seg_size;
259 }
260 if (host->dma_rx_channel) {
261 struct device *dev = host->dma_rx_channel->device->dev;
262 unsigned int max_seg_size = dma_get_max_seg_size(dev);
263
264 if (max_seg_size < host->mmc->max_seg_size)
265 host->mmc->max_seg_size = max_seg_size;
266 }
267}
268
269/*
270 * This is used in __devinit or __devexit so inline it
271 * so it can be discarded.
272 */
273static inline void mmci_dma_release(struct mmci_host *host)
274{
275 struct mmci_platform_data *plat = host->plat;
276
277 if (host->dma_rx_channel)
278 dma_release_channel(host->dma_rx_channel);
279 if (host->dma_tx_channel && plat->dma_tx_param)
280 dma_release_channel(host->dma_tx_channel);
281 host->dma_rx_channel = host->dma_tx_channel = NULL;
282}
283
284static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
285{
286 struct dma_chan *chan = host->dma_current;
287 enum dma_data_direction dir;
288 u32 status;
289 int i;
290
291 /* Wait up to 1ms for the DMA to complete */
292 for (i = 0; ; i++) {
293 status = readl(host->base + MMCISTATUS);
294 if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100)
295 break;
296 udelay(10);
297 }
298
299 /*
300 * Check to see whether we still have some data left in the FIFO -
301 * this catches DMA controllers which are unable to monitor the
302 * DMALBREQ and DMALSREQ signals while allowing us to DMA to non-
303 * contiguous buffers. On TX, we'll get a FIFO underrun error.
304 */
305 if (status & MCI_RXDATAAVLBLMASK) {
306 dmaengine_terminate_all(chan);
307 if (!data->error)
308 data->error = -EIO;
309 }
310
311 if (data->flags & MMC_DATA_WRITE) {
312 dir = DMA_TO_DEVICE;
313 } else {
314 dir = DMA_FROM_DEVICE;
315 }
316
317 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
318
319 /*
320 * Use of DMA with scatter-gather is impossible.
321 * Give up with DMA and switch back to PIO mode.
322 */
323 if (status & MCI_RXDATAAVLBLMASK) {
324 dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n");
325 mmci_dma_release(host);
326 }
327}
328
329static void mmci_dma_data_error(struct mmci_host *host)
330{
331 dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
332 dmaengine_terminate_all(host->dma_current);
333}
334
335static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
336{
337 struct variant_data *variant = host->variant;
338 struct dma_slave_config conf = {
339 .src_addr = host->phybase + MMCIFIFO,
340 .dst_addr = host->phybase + MMCIFIFO,
341 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
342 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
343 .src_maxburst = variant->fifohalfsize >> 2, /* # of words */
344 .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
345 };
346 struct mmc_data *data = host->data;
347 struct dma_chan *chan;
348 struct dma_device *device;
349 struct dma_async_tx_descriptor *desc;
350 int nr_sg;
351
352 host->dma_current = NULL;
353
354 if (data->flags & MMC_DATA_READ) {
355 conf.direction = DMA_FROM_DEVICE;
356 chan = host->dma_rx_channel;
357 } else {
358 conf.direction = DMA_TO_DEVICE;
359 chan = host->dma_tx_channel;
360 }
361
362 /* If there's no DMA channel, fall back to PIO */
363 if (!chan)
364 return -EINVAL;
365
366 /* If less than or equal to the fifo size, don't bother with DMA */
367 if (host->size <= variant->fifosize)
368 return -EINVAL;
369
370 device = chan->device;
371 nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, conf.direction);
372 if (nr_sg == 0)
373 return -EINVAL;
374
375 dmaengine_slave_config(chan, &conf);
376 desc = device->device_prep_slave_sg(chan, data->sg, nr_sg,
377 conf.direction, DMA_CTRL_ACK);
378 if (!desc)
379 goto unmap_exit;
380
381 /* Okay, go for it. */
382 host->dma_current = chan;
383
384 dev_vdbg(mmc_dev(host->mmc),
385 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
386 data->sg_len, data->blksz, data->blocks, data->flags);
387 dmaengine_submit(desc);
388 dma_async_issue_pending(chan);
389
390 datactrl |= MCI_DPSM_DMAENABLE;
391
392 /* Trigger the DMA transfer */
393 writel(datactrl, host->base + MMCIDATACTRL);
394
395 /*
396 * Let the MMCI say when the data is ended and it's time
397 * to fire next DMA request. When that happens, MMCI will
398 * call mmci_data_end()
399 */
400 writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
401 host->base + MMCIMASK0);
402 return 0;
403
404unmap_exit:
405 dmaengine_terminate_all(chan);
406 dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction);
407 return -ENOMEM;
408}
409#else
410/* Blank functions if the DMA engine is not available */
411static inline void mmci_dma_setup(struct mmci_host *host)
412{
413}
414
415static inline void mmci_dma_release(struct mmci_host *host)
416{
417}
418
419static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
420{
421}
422
423static inline void mmci_dma_data_error(struct mmci_host *host)
424{
425}
426
427static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
428{
429 return -ENOSYS;
430}
431#endif
432
192static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) 433static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
193{ 434{
194 struct variant_data *variant = host->variant; 435 struct variant_data *variant = host->variant;
@@ -202,9 +443,7 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
202 443
203 host->data = data; 444 host->data = data;
204 host->size = data->blksz * data->blocks; 445 host->size = data->blksz * data->blocks;
205 host->data_xfered = 0; 446 data->bytes_xfered = 0;
206
207 mmci_init_sg(host, data);
208 447
209 clks = (unsigned long long)data->timeout_ns * host->cclk; 448 clks = (unsigned long long)data->timeout_ns * host->cclk;
210 do_div(clks, 1000000000UL); 449 do_div(clks, 1000000000UL);
@@ -219,15 +458,29 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
219 BUG_ON(1 << blksz_bits != data->blksz); 458 BUG_ON(1 << blksz_bits != data->blksz);
220 459
221 datactrl = MCI_DPSM_ENABLE | blksz_bits << 4; 460 datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
222 if (data->flags & MMC_DATA_READ) { 461
462 if (data->flags & MMC_DATA_READ)
223 datactrl |= MCI_DPSM_DIRECTION; 463 datactrl |= MCI_DPSM_DIRECTION;
464
465 /*
466 * Attempt to use DMA operation mode, if this
467 * should fail, fall back to PIO mode
468 */
469 if (!mmci_dma_start_data(host, datactrl))
470 return;
471
472 /* IRQ mode, map the SG list for CPU reading/writing */
473 mmci_init_sg(host, data);
474
475 if (data->flags & MMC_DATA_READ) {
224 irqmask = MCI_RXFIFOHALFFULLMASK; 476 irqmask = MCI_RXFIFOHALFFULLMASK;
225 477
226 /* 478 /*
227 * If we have less than a FIFOSIZE of bytes to transfer, 479 * If we have less than the fifo 'half-full' threshold to
228 * trigger a PIO interrupt as soon as any data is available. 480 * transfer, trigger a PIO interrupt as soon as any data
481 * is available.
229 */ 482 */
230 if (host->size < variant->fifosize) 483 if (host->size < variant->fifohalfsize)
231 irqmask |= MCI_RXDATAAVLBLMASK; 484 irqmask |= MCI_RXDATAAVLBLMASK;
232 } else { 485 } else {
233 /* 486 /*
@@ -283,49 +536,51 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
283 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) { 536 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
284 u32 remain, success; 537 u32 remain, success;
285 538
286 /* Calculate how far we are into the transfer */ 539 /* Terminate the DMA transfer */
540 if (dma_inprogress(host))
541 mmci_dma_data_error(host);
542
543 /*
544 * Calculate how far we are into the transfer. Note that
545 * the data counter gives the number of bytes transferred
546 * on the MMC bus, not on the host side. On reads, this
547 * can be as much as a FIFO-worth of data ahead. This
548 * matters for FIFO overruns only.
549 */
287 remain = readl(host->base + MMCIDATACNT); 550 remain = readl(host->base + MMCIDATACNT);
288 success = data->blksz * data->blocks - remain; 551 success = data->blksz * data->blocks - remain;
289 552
290 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ (status %08x)\n", status); 553 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n",
554 status, success);
291 if (status & MCI_DATACRCFAIL) { 555 if (status & MCI_DATACRCFAIL) {
292 /* Last block was not successful */ 556 /* Last block was not successful */
293 host->data_xfered = round_down(success - 1, data->blksz); 557 success -= 1;
294 data->error = -EILSEQ; 558 data->error = -EILSEQ;
295 } else if (status & MCI_DATATIMEOUT) { 559 } else if (status & MCI_DATATIMEOUT) {
296 host->data_xfered = round_down(success, data->blksz);
297 data->error = -ETIMEDOUT; 560 data->error = -ETIMEDOUT;
298 } else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN)) { 561 } else if (status & MCI_TXUNDERRUN) {
299 host->data_xfered = round_down(success, data->blksz); 562 data->error = -EIO;
563 } else if (status & MCI_RXOVERRUN) {
564 if (success > host->variant->fifosize)
565 success -= host->variant->fifosize;
566 else
567 success = 0;
300 data->error = -EIO; 568 data->error = -EIO;
301 } 569 }
302 570 data->bytes_xfered = round_down(success, data->blksz);
303 /*
304 * We hit an error condition. Ensure that any data
305 * partially written to a page is properly coherent.
306 */
307 if (data->flags & MMC_DATA_READ) {
308 struct sg_mapping_iter *sg_miter = &host->sg_miter;
309 unsigned long flags;
310
311 local_irq_save(flags);
312 if (sg_miter_next(sg_miter)) {
313 flush_dcache_page(sg_miter->page);
314 sg_miter_stop(sg_miter);
315 }
316 local_irq_restore(flags);
317 }
318 } 571 }
319 572
320 if (status & MCI_DATABLOCKEND) 573 if (status & MCI_DATABLOCKEND)
321 dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n"); 574 dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n");
322 575
323 if (status & MCI_DATAEND || data->error) { 576 if (status & MCI_DATAEND || data->error) {
577 if (dma_inprogress(host))
578 mmci_dma_unmap(host, data);
324 mmci_stop_data(host); 579 mmci_stop_data(host);
325 580
326 if (!data->error) 581 if (!data->error)
327 /* The error clause is handled above, success! */ 582 /* The error clause is handled above, success! */
328 host->data_xfered += data->blksz * data->blocks; 583 data->bytes_xfered = data->blksz * data->blocks;
329 584
330 if (!data->stop) { 585 if (!data->stop) {
331 mmci_request_end(host, data->mrq); 586 mmci_request_end(host, data->mrq);
@@ -498,9 +753,6 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
498 if (remain) 753 if (remain)
499 break; 754 break;
500 755
501 if (status & MCI_RXACTIVE)
502 flush_dcache_page(sg_miter->page);
503
504 status = readl(base + MMCISTATUS); 756 status = readl(base + MMCISTATUS);
505 } while (1); 757 } while (1);
506 758
@@ -509,10 +761,10 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
509 local_irq_restore(flags); 761 local_irq_restore(flags);
510 762
511 /* 763 /*
512 * If we're nearing the end of the read, switch to 764 * If we have less than the fifo 'half-full' threshold to transfer,
513 * "any data available" mode. 765 * trigger a PIO interrupt as soon as any data is available.
514 */ 766 */
515 if (status & MCI_RXACTIVE && host->size < variant->fifosize) 767 if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize)
516 mmci_set_mask1(host, MCI_RXDATAAVLBLMASK); 768 mmci_set_mask1(host, MCI_RXDATAAVLBLMASK);
517 769
518 /* 770 /*
@@ -713,7 +965,8 @@ static const struct mmc_host_ops mmci_ops = {
713 .get_cd = mmci_get_cd, 965 .get_cd = mmci_get_cd,
714}; 966};
715 967
716static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id) 968static int __devinit mmci_probe(struct amba_device *dev,
969 const struct amba_id *id)
717{ 970{
718 struct mmci_platform_data *plat = dev->dev.platform_data; 971 struct mmci_platform_data *plat = dev->dev.platform_data;
719 struct variant_data *variant = id->data; 972 struct variant_data *variant = id->data;
@@ -776,6 +1029,7 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
776 dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n", 1029 dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n",
777 host->mclk); 1030 host->mclk);
778 } 1031 }
1032 host->phybase = dev->res.start;
779 host->base = ioremap(dev->res.start, resource_size(&dev->res)); 1033 host->base = ioremap(dev->res.start, resource_size(&dev->res));
780 if (!host->base) { 1034 if (!host->base) {
781 ret = -ENOMEM; 1035 ret = -ENOMEM;
@@ -903,9 +1157,12 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
903 1157
904 amba_set_drvdata(dev, mmc); 1158 amba_set_drvdata(dev, mmc);
905 1159
906 dev_info(&dev->dev, "%s: PL%03x rev%u at 0x%08llx irq %d,%d\n", 1160 dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n",
907 mmc_hostname(mmc), amba_part(dev), amba_rev(dev), 1161 mmc_hostname(mmc), amba_part(dev), amba_manf(dev),
908 (unsigned long long)dev->res.start, dev->irq[0], dev->irq[1]); 1162 amba_rev(dev), (unsigned long long)dev->res.start,
1163 dev->irq[0], dev->irq[1]);
1164
1165 mmci_dma_setup(host);
909 1166
910 mmc_add_host(mmc); 1167 mmc_add_host(mmc);
911 1168
@@ -952,6 +1209,7 @@ static int __devexit mmci_remove(struct amba_device *dev)
952 writel(0, host->base + MMCICOMMAND); 1209 writel(0, host->base + MMCICOMMAND);
953 writel(0, host->base + MMCIDATACTRL); 1210 writel(0, host->base + MMCIDATACTRL);
954 1211
1212 mmci_dma_release(host);
955 free_irq(dev->irq[0], host); 1213 free_irq(dev->irq[0], host);
956 if (!host->singleirq) 1214 if (!host->singleirq)
957 free_irq(dev->irq[1], host); 1215 free_irq(dev->irq[1], host);
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index c1df7b82d36c..ec9a7bc6d0df 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -148,8 +148,10 @@
148 148
149struct clk; 149struct clk;
150struct variant_data; 150struct variant_data;
151struct dma_chan;
151 152
152struct mmci_host { 153struct mmci_host {
154 phys_addr_t phybase;
153 void __iomem *base; 155 void __iomem *base;
154 struct mmc_request *mrq; 156 struct mmc_request *mrq;
155 struct mmc_command *cmd; 157 struct mmc_command *cmd;
@@ -161,8 +163,6 @@ struct mmci_host {
161 int gpio_cd_irq; 163 int gpio_cd_irq;
162 bool singleirq; 164 bool singleirq;
163 165
164 unsigned int data_xfered;
165
166 spinlock_t lock; 166 spinlock_t lock;
167 167
168 unsigned int mclk; 168 unsigned int mclk;
@@ -181,5 +181,16 @@ struct mmci_host {
181 struct sg_mapping_iter sg_miter; 181 struct sg_mapping_iter sg_miter;
182 unsigned int size; 182 unsigned int size;
183 struct regulator *vcc; 183 struct regulator *vcc;
184
185#ifdef CONFIG_DMA_ENGINE
186 /* DMA stuff */
187 struct dma_chan *dma_current;
188 struct dma_chan *dma_rx_channel;
189 struct dma_chan *dma_tx_channel;
190
191#define dma_inprogress(host) ((host)->dma_current)
192#else
193#define dma_inprogress(host) (0)
194#endif
184}; 195};
185 196
diff --git a/drivers/rtc/rtc-pl030.c b/drivers/rtc/rtc-pl030.c
index d554368c9f57..1d28d4451dae 100644
--- a/drivers/rtc/rtc-pl030.c
+++ b/drivers/rtc/rtc-pl030.c
@@ -97,7 +97,7 @@ static const struct rtc_class_ops pl030_ops = {
97 .set_alarm = pl030_set_alarm, 97 .set_alarm = pl030_set_alarm,
98}; 98};
99 99
100static int pl030_probe(struct amba_device *dev, struct amba_id *id) 100static int pl030_probe(struct amba_device *dev, const struct amba_id *id)
101{ 101{
102 struct pl030_rtc *rtc; 102 struct pl030_rtc *rtc;
103 int ret; 103 int ret;
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index d829ea63c4fb..ff1b84bd9bb5 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -307,7 +307,7 @@ static int pl031_remove(struct amba_device *adev)
307 return 0; 307 return 0;
308} 308}
309 309
310static int pl031_probe(struct amba_device *adev, struct amba_id *id) 310static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
311{ 311{
312 int ret; 312 int ret;
313 struct pl031_local *ldata; 313 struct pl031_local *ldata;
diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/amba-pl022.c
index 71a1219a995d..95e58c70a2c9 100644
--- a/drivers/spi/amba-pl022.c
+++ b/drivers/spi/amba-pl022.c
@@ -2021,7 +2021,7 @@ static void pl022_cleanup(struct spi_device *spi)
2021 2021
2022 2022
2023static int __devinit 2023static int __devinit
2024pl022_probe(struct amba_device *adev, struct amba_id *id) 2024pl022_probe(struct amba_device *adev, const struct amba_id *id)
2025{ 2025{
2026 struct device *dev = &adev->dev; 2026 struct device *dev = &adev->dev;
2027 struct pl022_ssp_controller *platform_info = adev->dev.platform_data; 2027 struct pl022_ssp_controller *platform_info = adev->dev.platform_data;
diff --git a/drivers/tty/serial/amba-pl010.c b/drivers/tty/serial/amba-pl010.c
index 2904aa044126..d742dd2c525c 100644
--- a/drivers/tty/serial/amba-pl010.c
+++ b/drivers/tty/serial/amba-pl010.c
@@ -676,7 +676,7 @@ static struct uart_driver amba_reg = {
676 .cons = AMBA_CONSOLE, 676 .cons = AMBA_CONSOLE,
677}; 677};
678 678
679static int pl010_probe(struct amba_device *dev, struct amba_id *id) 679static int pl010_probe(struct amba_device *dev, const struct amba_id *id)
680{ 680{
681 struct uart_amba_port *uap; 681 struct uart_amba_port *uap;
682 void __iomem *base; 682 void __iomem *base;
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index e76d7d000128..57731e870085 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -96,6 +96,22 @@ static struct vendor_data vendor_st = {
96}; 96};
97 97
98/* Deals with DMA transactions */ 98/* Deals with DMA transactions */
99
100struct pl011_sgbuf {
101 struct scatterlist sg;
102 char *buf;
103};
104
105struct pl011_dmarx_data {
106 struct dma_chan *chan;
107 struct completion complete;
108 bool use_buf_b;
109 struct pl011_sgbuf sgbuf_a;
110 struct pl011_sgbuf sgbuf_b;
111 dma_cookie_t cookie;
112 bool running;
113};
114
99struct pl011_dmatx_data { 115struct pl011_dmatx_data {
100 struct dma_chan *chan; 116 struct dma_chan *chan;
101 struct scatterlist sg; 117 struct scatterlist sg;
@@ -120,12 +136,70 @@ struct uart_amba_port {
120 char type[12]; 136 char type[12];
121#ifdef CONFIG_DMA_ENGINE 137#ifdef CONFIG_DMA_ENGINE
122 /* DMA stuff */ 138 /* DMA stuff */
123 bool using_dma; 139 bool using_tx_dma;
140 bool using_rx_dma;
141 struct pl011_dmarx_data dmarx;
124 struct pl011_dmatx_data dmatx; 142 struct pl011_dmatx_data dmatx;
125#endif 143#endif
126}; 144};
127 145
128/* 146/*
147 * Reads up to 256 characters from the FIFO or until it's empty and
148 * inserts them into the TTY layer. Returns the number of characters
149 * read from the FIFO.
150 */
151static int pl011_fifo_to_tty(struct uart_amba_port *uap)
152{
153 u16 status, ch;
154 unsigned int flag, max_count = 256;
155 int fifotaken = 0;
156
157 while (max_count--) {
158 status = readw(uap->port.membase + UART01x_FR);
159 if (status & UART01x_FR_RXFE)
160 break;
161
162 /* Take chars from the FIFO and update status */
163 ch = readw(uap->port.membase + UART01x_DR) |
164 UART_DUMMY_DR_RX;
165 flag = TTY_NORMAL;
166 uap->port.icount.rx++;
167 fifotaken++;
168
169 if (unlikely(ch & UART_DR_ERROR)) {
170 if (ch & UART011_DR_BE) {
171 ch &= ~(UART011_DR_FE | UART011_DR_PE);
172 uap->port.icount.brk++;
173 if (uart_handle_break(&uap->port))
174 continue;
175 } else if (ch & UART011_DR_PE)
176 uap->port.icount.parity++;
177 else if (ch & UART011_DR_FE)
178 uap->port.icount.frame++;
179 if (ch & UART011_DR_OE)
180 uap->port.icount.overrun++;
181
182 ch &= uap->port.read_status_mask;
183
184 if (ch & UART011_DR_BE)
185 flag = TTY_BREAK;
186 else if (ch & UART011_DR_PE)
187 flag = TTY_PARITY;
188 else if (ch & UART011_DR_FE)
189 flag = TTY_FRAME;
190 }
191
192 if (uart_handle_sysrq_char(&uap->port, ch & 255))
193 continue;
194
195 uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag);
196 }
197
198 return fifotaken;
199}
200
201
202/*
129 * All the DMA operation mode stuff goes inside this ifdef. 203 * All the DMA operation mode stuff goes inside this ifdef.
130 * This assumes that you have a generic DMA device interface, 204 * This assumes that you have a generic DMA device interface,
131 * no custom DMA interfaces are supported. 205 * no custom DMA interfaces are supported.
@@ -134,6 +208,31 @@ struct uart_amba_port {
134 208
135#define PL011_DMA_BUFFER_SIZE PAGE_SIZE 209#define PL011_DMA_BUFFER_SIZE PAGE_SIZE
136 210
211static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg,
212 enum dma_data_direction dir)
213{
214 sg->buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL);
215 if (!sg->buf)
216 return -ENOMEM;
217
218 sg_init_one(&sg->sg, sg->buf, PL011_DMA_BUFFER_SIZE);
219
220 if (dma_map_sg(chan->device->dev, &sg->sg, 1, dir) != 1) {
221 kfree(sg->buf);
222 return -EINVAL;
223 }
224 return 0;
225}
226
227static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg,
228 enum dma_data_direction dir)
229{
230 if (sg->buf) {
231 dma_unmap_sg(chan->device->dev, &sg->sg, 1, dir);
232 kfree(sg->buf);
233 }
234}
235
137static void pl011_dma_probe_initcall(struct uart_amba_port *uap) 236static void pl011_dma_probe_initcall(struct uart_amba_port *uap)
138{ 237{
139 /* DMA is the sole user of the platform data right now */ 238 /* DMA is the sole user of the platform data right now */
@@ -153,7 +252,7 @@ static void pl011_dma_probe_initcall(struct uart_amba_port *uap)
153 return; 252 return;
154 } 253 }
155 254
156 /* Try to acquire a generic DMA engine slave channel */ 255 /* Try to acquire a generic DMA engine slave TX channel */
157 dma_cap_zero(mask); 256 dma_cap_zero(mask);
158 dma_cap_set(DMA_SLAVE, mask); 257 dma_cap_set(DMA_SLAVE, mask);
159 258
@@ -168,6 +267,28 @@ static void pl011_dma_probe_initcall(struct uart_amba_port *uap)
168 267
169 dev_info(uap->port.dev, "DMA channel TX %s\n", 268 dev_info(uap->port.dev, "DMA channel TX %s\n",
170 dma_chan_name(uap->dmatx.chan)); 269 dma_chan_name(uap->dmatx.chan));
270
271 /* Optionally make use of an RX channel as well */
272 if (plat->dma_rx_param) {
273 struct dma_slave_config rx_conf = {
274 .src_addr = uap->port.mapbase + UART01x_DR,
275 .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
276 .direction = DMA_FROM_DEVICE,
277 .src_maxburst = uap->fifosize >> 1,
278 };
279
280 chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param);
281 if (!chan) {
282 dev_err(uap->port.dev, "no RX DMA channel!\n");
283 return;
284 }
285
286 dmaengine_slave_config(chan, &rx_conf);
287 uap->dmarx.chan = chan;
288
289 dev_info(uap->port.dev, "DMA channel RX %s\n",
290 dma_chan_name(uap->dmarx.chan));
291 }
171} 292}
172 293
173#ifndef MODULE 294#ifndef MODULE
@@ -219,9 +340,10 @@ static void pl011_dma_remove(struct uart_amba_port *uap)
219 /* TODO: remove the initcall if it has not yet executed */ 340 /* TODO: remove the initcall if it has not yet executed */
220 if (uap->dmatx.chan) 341 if (uap->dmatx.chan)
221 dma_release_channel(uap->dmatx.chan); 342 dma_release_channel(uap->dmatx.chan);
343 if (uap->dmarx.chan)
344 dma_release_channel(uap->dmarx.chan);
222} 345}
223 346
224
225/* Forward declare this for the refill routine */ 347/* Forward declare this for the refill routine */
226static int pl011_dma_tx_refill(struct uart_amba_port *uap); 348static int pl011_dma_tx_refill(struct uart_amba_port *uap);
227 349
@@ -380,7 +502,7 @@ static int pl011_dma_tx_refill(struct uart_amba_port *uap)
380 */ 502 */
381static bool pl011_dma_tx_irq(struct uart_amba_port *uap) 503static bool pl011_dma_tx_irq(struct uart_amba_port *uap)
382{ 504{
383 if (!uap->using_dma) 505 if (!uap->using_tx_dma)
384 return false; 506 return false;
385 507
386 /* 508 /*
@@ -432,7 +554,7 @@ static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
432{ 554{
433 u16 dmacr; 555 u16 dmacr;
434 556
435 if (!uap->using_dma) 557 if (!uap->using_tx_dma)
436 return false; 558 return false;
437 559
438 if (!uap->port.x_char) { 560 if (!uap->port.x_char) {
@@ -492,7 +614,7 @@ static void pl011_dma_flush_buffer(struct uart_port *port)
492{ 614{
493 struct uart_amba_port *uap = (struct uart_amba_port *)port; 615 struct uart_amba_port *uap = (struct uart_amba_port *)port;
494 616
495 if (!uap->using_dma) 617 if (!uap->using_tx_dma)
496 return; 618 return;
497 619
498 /* Avoid deadlock with the DMA engine callback */ 620 /* Avoid deadlock with the DMA engine callback */
@@ -508,9 +630,219 @@ static void pl011_dma_flush_buffer(struct uart_port *port)
508 } 630 }
509} 631}
510 632
633static void pl011_dma_rx_callback(void *data);
634
635static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
636{
637 struct dma_chan *rxchan = uap->dmarx.chan;
638 struct dma_device *dma_dev;
639 struct pl011_dmarx_data *dmarx = &uap->dmarx;
640 struct dma_async_tx_descriptor *desc;
641 struct pl011_sgbuf *sgbuf;
642
643 if (!rxchan)
644 return -EIO;
645
646 /* Start the RX DMA job */
647 sgbuf = uap->dmarx.use_buf_b ?
648 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
649 dma_dev = rxchan->device;
650 desc = rxchan->device->device_prep_slave_sg(rxchan, &sgbuf->sg, 1,
651 DMA_FROM_DEVICE,
652 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
653 /*
654 * If the DMA engine is busy and cannot prepare a
655 * channel, no big deal, the driver will fall back
656 * to interrupt mode as a result of this error code.
657 */
658 if (!desc) {
659 uap->dmarx.running = false;
660 dmaengine_terminate_all(rxchan);
661 return -EBUSY;
662 }
663
664 /* Some data to go along to the callback */
665 desc->callback = pl011_dma_rx_callback;
666 desc->callback_param = uap;
667 dmarx->cookie = dmaengine_submit(desc);
668 dma_async_issue_pending(rxchan);
669
670 uap->dmacr |= UART011_RXDMAE;
671 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
672 uap->dmarx.running = true;
673
674 uap->im &= ~UART011_RXIM;
675 writew(uap->im, uap->port.membase + UART011_IMSC);
676
677 return 0;
678}
679
680/*
681 * This is called when either the DMA job is complete, or
682 * the FIFO timeout interrupt occurred. This must be called
683 * with the port spinlock uap->port.lock held.
684 */
685static void pl011_dma_rx_chars(struct uart_amba_port *uap,
686 u32 pending, bool use_buf_b,
687 bool readfifo)
688{
689 struct tty_struct *tty = uap->port.state->port.tty;
690 struct pl011_sgbuf *sgbuf = use_buf_b ?
691 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
692 struct device *dev = uap->dmarx.chan->device->dev;
693 int dma_count = 0;
694 u32 fifotaken = 0; /* only used for vdbg() */
695
696 /* Pick everything from the DMA first */
697 if (pending) {
698 /* Sync in buffer */
699 dma_sync_sg_for_cpu(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE);
700
701 /*
702 * First take all chars in the DMA pipe, then look in the FIFO.
703 * Note that tty_insert_flip_buf() tries to take as many chars
704 * as it can.
705 */
706 dma_count = tty_insert_flip_string(uap->port.state->port.tty,
707 sgbuf->buf, pending);
708
709 /* Return buffer to device */
710 dma_sync_sg_for_device(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE);
711
712 uap->port.icount.rx += dma_count;
713 if (dma_count < pending)
714 dev_warn(uap->port.dev,
715 "couldn't insert all characters (TTY is full?)\n");
716 }
717
718 /*
719 * Only continue with trying to read the FIFO if all DMA chars have
720 * been taken first.
721 */
722 if (dma_count == pending && readfifo) {
723 /* Clear any error flags */
724 writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS,
725 uap->port.membase + UART011_ICR);
726
727 /*
728 * If we read all the DMA'd characters, and we had an
729 * incomplete buffer, that could be due to an rx error, or
730 * maybe we just timed out. Read any pending chars and check
731 * the error status.
732 *
733 * Error conditions will only occur in the FIFO, these will
734 * trigger an immediate interrupt and stop the DMA job, so we
735 * will always find the error in the FIFO, never in the DMA
736 * buffer.
737 */
738 fifotaken = pl011_fifo_to_tty(uap);
739 }
740
741 spin_unlock(&uap->port.lock);
742 dev_vdbg(uap->port.dev,
743 "Took %d chars from DMA buffer and %d chars from the FIFO\n",
744 dma_count, fifotaken);
745 tty_flip_buffer_push(tty);
746 spin_lock(&uap->port.lock);
747}
748
749static void pl011_dma_rx_irq(struct uart_amba_port *uap)
750{
751 struct pl011_dmarx_data *dmarx = &uap->dmarx;
752 struct dma_chan *rxchan = dmarx->chan;
753 struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
754 &dmarx->sgbuf_b : &dmarx->sgbuf_a;
755 size_t pending;
756 struct dma_tx_state state;
757 enum dma_status dmastat;
758
759 /*
760 * Pause the transfer so we can trust the current counter,
761 * do this before we pause the PL011 block, else we may
762 * overflow the FIFO.
763 */
764 if (dmaengine_pause(rxchan))
765 dev_err(uap->port.dev, "unable to pause DMA transfer\n");
766 dmastat = rxchan->device->device_tx_status(rxchan,
767 dmarx->cookie, &state);
768 if (dmastat != DMA_PAUSED)
769 dev_err(uap->port.dev, "unable to pause DMA transfer\n");
770
771 /* Disable RX DMA - incoming data will wait in the FIFO */
772 uap->dmacr &= ~UART011_RXDMAE;
773 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
774 uap->dmarx.running = false;
775
776 pending = sgbuf->sg.length - state.residue;
777 BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
778 /* Then we terminate the transfer - we now know our residue */
779 dmaengine_terminate_all(rxchan);
780
781 /*
782 * This will take the chars we have so far and insert
783 * into the framework.
784 */
785 pl011_dma_rx_chars(uap, pending, dmarx->use_buf_b, true);
786
787 /* Switch buffer & re-trigger DMA job */
788 dmarx->use_buf_b = !dmarx->use_buf_b;
789 if (pl011_dma_rx_trigger_dma(uap)) {
790 dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
791 "fall back to interrupt mode\n");
792 uap->im |= UART011_RXIM;
793 writew(uap->im, uap->port.membase + UART011_IMSC);
794 }
795}
796
797static void pl011_dma_rx_callback(void *data)
798{
799 struct uart_amba_port *uap = data;
800 struct pl011_dmarx_data *dmarx = &uap->dmarx;
801 bool lastbuf = dmarx->use_buf_b;
802 int ret;
803
804 /*
805 * This completion interrupt occurs typically when the
806 * RX buffer is totally stuffed but no timeout has yet
807 * occurred. When that happens, we just want the RX
808 * routine to flush out the secondary DMA buffer while
809 * we immediately trigger the next DMA job.
810 */
811 spin_lock_irq(&uap->port.lock);
812 uap->dmarx.running = false;
813 dmarx->use_buf_b = !lastbuf;
814 ret = pl011_dma_rx_trigger_dma(uap);
815
816 pl011_dma_rx_chars(uap, PL011_DMA_BUFFER_SIZE, lastbuf, false);
817 spin_unlock_irq(&uap->port.lock);
818 /*
819 * Do this check after we picked the DMA chars so we don't
820 * get some IRQ immediately from RX.
821 */
822 if (ret) {
823 dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
824 "fall back to interrupt mode\n");
825 uap->im |= UART011_RXIM;
826 writew(uap->im, uap->port.membase + UART011_IMSC);
827 }
828}
829
830/*
831 * Stop accepting received characters, when we're shutting down or
832 * suspending this port.
833 * Locking: called with port lock held and IRQs disabled.
834 */
835static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
836{
837 /* FIXME. Just disable the DMA enable */
838 uap->dmacr &= ~UART011_RXDMAE;
839 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
840}
511 841
512static void pl011_dma_startup(struct uart_amba_port *uap) 842static void pl011_dma_startup(struct uart_amba_port *uap)
513{ 843{
844 int ret;
845
514 if (!uap->dmatx.chan) 846 if (!uap->dmatx.chan)
515 return; 847 return;
516 848
@@ -525,8 +857,33 @@ static void pl011_dma_startup(struct uart_amba_port *uap)
525 857
526 /* The DMA buffer is now the FIFO the TTY subsystem can use */ 858 /* The DMA buffer is now the FIFO the TTY subsystem can use */
527 uap->port.fifosize = PL011_DMA_BUFFER_SIZE; 859 uap->port.fifosize = PL011_DMA_BUFFER_SIZE;
528 uap->using_dma = true; 860 uap->using_tx_dma = true;
861
862 if (!uap->dmarx.chan)
863 goto skip_rx;
864
865 /* Allocate and map DMA RX buffers */
866 ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
867 DMA_FROM_DEVICE);
868 if (ret) {
869 dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
870 "RX buffer A", ret);
871 goto skip_rx;
872 }
873
874 ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_b,
875 DMA_FROM_DEVICE);
876 if (ret) {
877 dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
878 "RX buffer B", ret);
879 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
880 DMA_FROM_DEVICE);
881 goto skip_rx;
882 }
529 883
884 uap->using_rx_dma = true;
885
886skip_rx:
530 /* Turn on DMA error (RX/TX will be enabled on demand) */ 887 /* Turn on DMA error (RX/TX will be enabled on demand) */
531 uap->dmacr |= UART011_DMAONERR; 888 uap->dmacr |= UART011_DMAONERR;
532 writew(uap->dmacr, uap->port.membase + UART011_DMACR); 889 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
@@ -539,11 +896,17 @@ static void pl011_dma_startup(struct uart_amba_port *uap)
539 if (uap->vendor->dma_threshold) 896 if (uap->vendor->dma_threshold)
540 writew(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16, 897 writew(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16,
541 uap->port.membase + ST_UART011_DMAWM); 898 uap->port.membase + ST_UART011_DMAWM);
899
900 if (uap->using_rx_dma) {
901 if (pl011_dma_rx_trigger_dma(uap))
902 dev_dbg(uap->port.dev, "could not trigger initial "
903 "RX DMA job, fall back to interrupt mode\n");
904 }
542} 905}
543 906
544static void pl011_dma_shutdown(struct uart_amba_port *uap) 907static void pl011_dma_shutdown(struct uart_amba_port *uap)
545{ 908{
546 if (!uap->using_dma) 909 if (!(uap->using_tx_dma || uap->using_rx_dma))
547 return; 910 return;
548 911
549 /* Disable RX and TX DMA */ 912 /* Disable RX and TX DMA */
@@ -555,19 +918,39 @@ static void pl011_dma_shutdown(struct uart_amba_port *uap)
555 writew(uap->dmacr, uap->port.membase + UART011_DMACR); 918 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
556 spin_unlock_irq(&uap->port.lock); 919 spin_unlock_irq(&uap->port.lock);
557 920
558 /* In theory, this should already be done by pl011_dma_flush_buffer */ 921 if (uap->using_tx_dma) {
559 dmaengine_terminate_all(uap->dmatx.chan); 922 /* In theory, this should already be done by pl011_dma_flush_buffer */
560 if (uap->dmatx.queued) { 923 dmaengine_terminate_all(uap->dmatx.chan);
561 dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1, 924 if (uap->dmatx.queued) {
562 DMA_TO_DEVICE); 925 dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
563 uap->dmatx.queued = false; 926 DMA_TO_DEVICE);
927 uap->dmatx.queued = false;
928 }
929
930 kfree(uap->dmatx.buf);
931 uap->using_tx_dma = false;
564 } 932 }
565 933
566 kfree(uap->dmatx.buf); 934 if (uap->using_rx_dma) {
935 dmaengine_terminate_all(uap->dmarx.chan);
936 /* Clean up the RX DMA */
937 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE);
938 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE);
939 uap->using_rx_dma = false;
940 }
941}
567 942
568 uap->using_dma = false; 943static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
944{
945 return uap->using_rx_dma;
569} 946}
570 947
948static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
949{
950 return uap->using_rx_dma && uap->dmarx.running;
951}
952
953
571#else 954#else
572/* Blank functions if the DMA engine is not available */ 955/* Blank functions if the DMA engine is not available */
573static inline void pl011_dma_probe(struct uart_amba_port *uap) 956static inline void pl011_dma_probe(struct uart_amba_port *uap)
@@ -600,6 +983,29 @@ static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
600 return false; 983 return false;
601} 984}
602 985
986static inline void pl011_dma_rx_irq(struct uart_amba_port *uap)
987{
988}
989
990static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
991{
992}
993
994static inline int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
995{
996 return -EIO;
997}
998
999static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
1000{
1001 return false;
1002}
1003
1004static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
1005{
1006 return false;
1007}
1008
603#define pl011_dma_flush_buffer NULL 1009#define pl011_dma_flush_buffer NULL
604#endif 1010#endif
605 1011
@@ -630,6 +1036,8 @@ static void pl011_stop_rx(struct uart_port *port)
630 uap->im &= ~(UART011_RXIM|UART011_RTIM|UART011_FEIM| 1036 uap->im &= ~(UART011_RXIM|UART011_RTIM|UART011_FEIM|
631 UART011_PEIM|UART011_BEIM|UART011_OEIM); 1037 UART011_PEIM|UART011_BEIM|UART011_OEIM);
632 writew(uap->im, uap->port.membase + UART011_IMSC); 1038 writew(uap->im, uap->port.membase + UART011_IMSC);
1039
1040 pl011_dma_rx_stop(uap);
633} 1041}
634 1042
635static void pl011_enable_ms(struct uart_port *port) 1043static void pl011_enable_ms(struct uart_port *port)
@@ -643,51 +1051,24 @@ static void pl011_enable_ms(struct uart_port *port)
643static void pl011_rx_chars(struct uart_amba_port *uap) 1051static void pl011_rx_chars(struct uart_amba_port *uap)
644{ 1052{
645 struct tty_struct *tty = uap->port.state->port.tty; 1053 struct tty_struct *tty = uap->port.state->port.tty;
646 unsigned int status, ch, flag, max_count = 256;
647
648 status = readw(uap->port.membase + UART01x_FR);
649 while ((status & UART01x_FR_RXFE) == 0 && max_count--) {
650 ch = readw(uap->port.membase + UART01x_DR) | UART_DUMMY_DR_RX;
651 flag = TTY_NORMAL;
652 uap->port.icount.rx++;
653
654 /*
655 * Note that the error handling code is
656 * out of the main execution path
657 */
658 if (unlikely(ch & UART_DR_ERROR)) {
659 if (ch & UART011_DR_BE) {
660 ch &= ~(UART011_DR_FE | UART011_DR_PE);
661 uap->port.icount.brk++;
662 if (uart_handle_break(&uap->port))
663 goto ignore_char;
664 } else if (ch & UART011_DR_PE)
665 uap->port.icount.parity++;
666 else if (ch & UART011_DR_FE)
667 uap->port.icount.frame++;
668 if (ch & UART011_DR_OE)
669 uap->port.icount.overrun++;
670
671 ch &= uap->port.read_status_mask;
672
673 if (ch & UART011_DR_BE)
674 flag = TTY_BREAK;
675 else if (ch & UART011_DR_PE)
676 flag = TTY_PARITY;
677 else if (ch & UART011_DR_FE)
678 flag = TTY_FRAME;
679 }
680 1054
681 if (uart_handle_sysrq_char(&uap->port, ch & 255)) 1055 pl011_fifo_to_tty(uap);
682 goto ignore_char;
683
684 uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag);
685 1056
686 ignore_char:
687 status = readw(uap->port.membase + UART01x_FR);
688 }
689 spin_unlock(&uap->port.lock); 1057 spin_unlock(&uap->port.lock);
690 tty_flip_buffer_push(tty); 1058 tty_flip_buffer_push(tty);
1059 /*
1060 * If we were temporarily out of DMA mode for a while,
1061 * attempt to switch back to DMA mode again.
1062 */
1063 if (pl011_dma_rx_available(uap)) {
1064 if (pl011_dma_rx_trigger_dma(uap)) {
1065 dev_dbg(uap->port.dev, "could not trigger RX DMA job "
1066 "fall back to interrupt mode again\n");
1067 uap->im |= UART011_RXIM;
1068 } else
1069 uap->im &= ~UART011_RXIM;
1070 writew(uap->im, uap->port.membase + UART011_IMSC);
1071 }
691 spin_lock(&uap->port.lock); 1072 spin_lock(&uap->port.lock);
692} 1073}
693 1074
@@ -767,8 +1148,12 @@ static irqreturn_t pl011_int(int irq, void *dev_id)
767 UART011_RXIS), 1148 UART011_RXIS),
768 uap->port.membase + UART011_ICR); 1149 uap->port.membase + UART011_ICR);
769 1150
770 if (status & (UART011_RTIS|UART011_RXIS)) 1151 if (status & (UART011_RTIS|UART011_RXIS)) {
771 pl011_rx_chars(uap); 1152 if (pl011_dma_rx_running(uap))
1153 pl011_dma_rx_irq(uap);
1154 else
1155 pl011_rx_chars(uap);
1156 }
772 if (status & (UART011_DSRMIS|UART011_DCDMIS| 1157 if (status & (UART011_DSRMIS|UART011_DCDMIS|
773 UART011_CTSMIS|UART011_RIMIS)) 1158 UART011_CTSMIS|UART011_RIMIS))
774 pl011_modem_status(uap); 1159 pl011_modem_status(uap);
@@ -945,10 +1330,14 @@ static int pl011_startup(struct uart_port *port)
945 pl011_dma_startup(uap); 1330 pl011_dma_startup(uap);
946 1331
947 /* 1332 /*
948 * Finally, enable interrupts 1333 * Finally, enable interrupts, only timeouts when using DMA
1334 * if initial RX DMA job failed, start in interrupt mode
1335 * as well.
949 */ 1336 */
950 spin_lock_irq(&uap->port.lock); 1337 spin_lock_irq(&uap->port.lock);
951 uap->im = UART011_RXIM | UART011_RTIM; 1338 uap->im = UART011_RTIM;
1339 if (!pl011_dma_rx_running(uap))
1340 uap->im |= UART011_RXIM;
952 writew(uap->im, uap->port.membase + UART011_IMSC); 1341 writew(uap->im, uap->port.membase + UART011_IMSC);
953 spin_unlock_irq(&uap->port.lock); 1342 spin_unlock_irq(&uap->port.lock);
954 1343
@@ -1349,7 +1738,7 @@ static struct uart_driver amba_reg = {
1349 .cons = AMBA_CONSOLE, 1738 .cons = AMBA_CONSOLE,
1350}; 1739};
1351 1740
1352static int pl011_probe(struct amba_device *dev, struct amba_id *id) 1741static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
1353{ 1742{
1354 struct uart_amba_port *uap; 1743 struct uart_amba_port *uap;
1355 struct vendor_data *vendor = id->data; 1744 struct vendor_data *vendor = id->data;
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 2a753f1e9183..bfc62d1ee2f7 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -376,6 +376,24 @@ config FB_CYBER2000
376 Say Y if you have a NetWinder or a graphics card containing this 376 Say Y if you have a NetWinder or a graphics card containing this
377 device, otherwise say N. 377 device, otherwise say N.
378 378
379config FB_CYBER2000_DDC
380 bool "DDC for CyberPro support"
381 depends on FB_CYBER2000
382 select FB_DDC
383 default y
384 help
385 Say Y here if you want DDC support for your CyberPro graphics
386 card. This is only I2C bus support, driver does not use EDID.
387
388config FB_CYBER2000_I2C
389 bool "CyberPro 2000/2010/5000 I2C support"
390 depends on FB_CYBER2000 && I2C && ARCH_NETWINDER
391 select I2C_ALGOBIT
392 help
393 Enable support for the I2C video decoder interface on the
394 Integraphics CyberPro 20x0 and 5000 VGA chips. This is used
395 on the Netwinder machines for the SAA7111 video capture.
396
379config FB_APOLLO 397config FB_APOLLO
380 bool 398 bool
381 depends on (FB = y) && APOLLO 399 depends on (FB = y) && APOLLO
diff --git a/drivers/video/amba-clcd.c b/drivers/video/amba-clcd.c
index 1c2c68356ea7..013c8ce57205 100644
--- a/drivers/video/amba-clcd.c
+++ b/drivers/video/amba-clcd.c
@@ -461,7 +461,7 @@ static int clcdfb_register(struct clcd_fb *fb)
461 return ret; 461 return ret;
462} 462}
463 463
464static int clcdfb_probe(struct amba_device *dev, struct amba_id *id) 464static int clcdfb_probe(struct amba_device *dev, const struct amba_id *id)
465{ 465{
466 struct clcd_board *board = dev->dev.platform_data; 466 struct clcd_board *board = dev->dev.platform_data;
467 struct clcd_fb *fb; 467 struct clcd_fb *fb;
diff --git a/drivers/video/cyber2000fb.c b/drivers/video/cyber2000fb.c
index 0c1afd13ddd3..850380795b05 100644
--- a/drivers/video/cyber2000fb.c
+++ b/drivers/video/cyber2000fb.c
@@ -47,6 +47,8 @@
47#include <linux/pci.h> 47#include <linux/pci.h>
48#include <linux/init.h> 48#include <linux/init.h>
49#include <linux/io.h> 49#include <linux/io.h>
50#include <linux/i2c.h>
51#include <linux/i2c-algo-bit.h>
50 52
51#include <asm/pgtable.h> 53#include <asm/pgtable.h>
52#include <asm/system.h> 54#include <asm/system.h>
@@ -61,10 +63,10 @@ struct cfb_info {
61 struct fb_info fb; 63 struct fb_info fb;
62 struct display_switch *dispsw; 64 struct display_switch *dispsw;
63 struct display *display; 65 struct display *display;
64 struct pci_dev *dev;
65 unsigned char __iomem *region; 66 unsigned char __iomem *region;
66 unsigned char __iomem *regs; 67 unsigned char __iomem *regs;
67 u_int id; 68 u_int id;
69 u_int irq;
68 int func_use_count; 70 int func_use_count;
69 u_long ref_ps; 71 u_long ref_ps;
70 72
@@ -88,6 +90,19 @@ struct cfb_info {
88 u_char ramdac_powerdown; 90 u_char ramdac_powerdown;
89 91
90 u32 pseudo_palette[16]; 92 u32 pseudo_palette[16];
93
94 spinlock_t reg_b0_lock;
95
96#ifdef CONFIG_FB_CYBER2000_DDC
97 bool ddc_registered;
98 struct i2c_adapter ddc_adapter;
99 struct i2c_algo_bit_data ddc_algo;
100#endif
101
102#ifdef CONFIG_FB_CYBER2000_I2C
103 struct i2c_adapter i2c_adapter;
104 struct i2c_algo_bit_data i2c_algo;
105#endif
91}; 106};
92 107
93static char *default_font = "Acorn8x8"; 108static char *default_font = "Acorn8x8";
@@ -494,6 +509,7 @@ static void cyber2000fb_set_timing(struct cfb_info *cfb, struct par_info *hw)
494 cyber2000_attrw(0x14, 0x00, cfb); 509 cyber2000_attrw(0x14, 0x00, cfb);
495 510
496 /* PLL registers */ 511 /* PLL registers */
512 spin_lock(&cfb->reg_b0_lock);
497 cyber2000_grphw(EXT_DCLK_MULT, hw->clock_mult, cfb); 513 cyber2000_grphw(EXT_DCLK_MULT, hw->clock_mult, cfb);
498 cyber2000_grphw(EXT_DCLK_DIV, hw->clock_div, cfb); 514 cyber2000_grphw(EXT_DCLK_DIV, hw->clock_div, cfb);
499 cyber2000_grphw(EXT_MCLK_MULT, cfb->mclk_mult, cfb); 515 cyber2000_grphw(EXT_MCLK_MULT, cfb->mclk_mult, cfb);
@@ -501,6 +517,7 @@ static void cyber2000fb_set_timing(struct cfb_info *cfb, struct par_info *hw)
501 cyber2000_grphw(0x90, 0x01, cfb); 517 cyber2000_grphw(0x90, 0x01, cfb);
502 cyber2000_grphw(0xb9, 0x80, cfb); 518 cyber2000_grphw(0xb9, 0x80, cfb);
503 cyber2000_grphw(0xb9, 0x00, cfb); 519 cyber2000_grphw(0xb9, 0x00, cfb);
520 spin_unlock(&cfb->reg_b0_lock);
504 521
505 cfb->ramdac_ctrl = hw->ramdac; 522 cfb->ramdac_ctrl = hw->ramdac;
506 cyber2000fb_write_ramdac_ctrl(cfb); 523 cyber2000fb_write_ramdac_ctrl(cfb);
@@ -681,9 +698,9 @@ cyber2000fb_decode_clock(struct par_info *hw, struct cfb_info *cfb,
681 * pll_ps_calc = best_div1 / (ref_ps * best_mult) 698 * pll_ps_calc = best_div1 / (ref_ps * best_mult)
682 */ 699 */
683 best_diff = 0x7fffffff; 700 best_diff = 0x7fffffff;
684 best_mult = 32; 701 best_mult = 2;
685 best_div1 = 255; 702 best_div1 = 32;
686 for (t_div1 = 32; t_div1 > 1; t_div1 -= 1) { 703 for (t_div1 = 2; t_div1 < 32; t_div1 += 1) {
687 u_int rr, t_mult, t_pll_ps; 704 u_int rr, t_mult, t_pll_ps;
688 int diff; 705 int diff;
689 706
@@ -1105,24 +1122,22 @@ void cyber2000fb_disable_extregs(struct cfb_info *cfb)
1105} 1122}
1106EXPORT_SYMBOL(cyber2000fb_disable_extregs); 1123EXPORT_SYMBOL(cyber2000fb_disable_extregs);
1107 1124
1108void cyber2000fb_get_fb_var(struct cfb_info *cfb, struct fb_var_screeninfo *var)
1109{
1110 memcpy(var, &cfb->fb.var, sizeof(struct fb_var_screeninfo));
1111}
1112EXPORT_SYMBOL(cyber2000fb_get_fb_var);
1113
1114/* 1125/*
1115 * Attach a capture/tv driver to the core CyberX0X0 driver. 1126 * Attach a capture/tv driver to the core CyberX0X0 driver.
1116 */ 1127 */
1117int cyber2000fb_attach(struct cyberpro_info *info, int idx) 1128int cyber2000fb_attach(struct cyberpro_info *info, int idx)
1118{ 1129{
1119 if (int_cfb_info != NULL) { 1130 if (int_cfb_info != NULL) {
1120 info->dev = int_cfb_info->dev; 1131 info->dev = int_cfb_info->fb.device;
1132#ifdef CONFIG_FB_CYBER2000_I2C
1133 info->i2c = &int_cfb_info->i2c_adapter;
1134#else
1135 info->i2c = NULL;
1136#endif
1121 info->regs = int_cfb_info->regs; 1137 info->regs = int_cfb_info->regs;
1138 info->irq = int_cfb_info->irq;
1122 info->fb = int_cfb_info->fb.screen_base; 1139 info->fb = int_cfb_info->fb.screen_base;
1123 info->fb_size = int_cfb_info->fb.fix.smem_len; 1140 info->fb_size = int_cfb_info->fb.fix.smem_len;
1124 info->enable_extregs = cyber2000fb_enable_extregs;
1125 info->disable_extregs = cyber2000fb_disable_extregs;
1126 info->info = int_cfb_info; 1141 info->info = int_cfb_info;
1127 1142
1128 strlcpy(info->dev_name, int_cfb_info->fb.fix.id, 1143 strlcpy(info->dev_name, int_cfb_info->fb.fix.id,
@@ -1141,6 +1156,183 @@ void cyber2000fb_detach(int idx)
1141} 1156}
1142EXPORT_SYMBOL(cyber2000fb_detach); 1157EXPORT_SYMBOL(cyber2000fb_detach);
1143 1158
1159#ifdef CONFIG_FB_CYBER2000_DDC
1160
1161#define DDC_REG 0xb0
1162#define DDC_SCL_OUT (1 << 0)
1163#define DDC_SDA_OUT (1 << 4)
1164#define DDC_SCL_IN (1 << 2)
1165#define DDC_SDA_IN (1 << 6)
1166
1167static void cyber2000fb_enable_ddc(struct cfb_info *cfb)
1168{
1169 spin_lock(&cfb->reg_b0_lock);
1170 cyber2000fb_writew(0x1bf, 0x3ce, cfb);
1171}
1172
1173static void cyber2000fb_disable_ddc(struct cfb_info *cfb)
1174{
1175 cyber2000fb_writew(0x0bf, 0x3ce, cfb);
1176 spin_unlock(&cfb->reg_b0_lock);
1177}
1178
1179
1180static void cyber2000fb_ddc_setscl(void *data, int val)
1181{
1182 struct cfb_info *cfb = data;
1183 unsigned char reg;
1184
1185 cyber2000fb_enable_ddc(cfb);
1186 reg = cyber2000_grphr(DDC_REG, cfb);
1187 if (!val) /* bit is inverted */
1188 reg |= DDC_SCL_OUT;
1189 else
1190 reg &= ~DDC_SCL_OUT;
1191 cyber2000_grphw(DDC_REG, reg, cfb);
1192 cyber2000fb_disable_ddc(cfb);
1193}
1194
1195static void cyber2000fb_ddc_setsda(void *data, int val)
1196{
1197 struct cfb_info *cfb = data;
1198 unsigned char reg;
1199
1200 cyber2000fb_enable_ddc(cfb);
1201 reg = cyber2000_grphr(DDC_REG, cfb);
1202 if (!val) /* bit is inverted */
1203 reg |= DDC_SDA_OUT;
1204 else
1205 reg &= ~DDC_SDA_OUT;
1206 cyber2000_grphw(DDC_REG, reg, cfb);
1207 cyber2000fb_disable_ddc(cfb);
1208}
1209
1210static int cyber2000fb_ddc_getscl(void *data)
1211{
1212 struct cfb_info *cfb = data;
1213 int retval;
1214
1215 cyber2000fb_enable_ddc(cfb);
1216 retval = !!(cyber2000_grphr(DDC_REG, cfb) & DDC_SCL_IN);
1217 cyber2000fb_disable_ddc(cfb);
1218
1219 return retval;
1220}
1221
1222static int cyber2000fb_ddc_getsda(void *data)
1223{
1224 struct cfb_info *cfb = data;
1225 int retval;
1226
1227 cyber2000fb_enable_ddc(cfb);
1228 retval = !!(cyber2000_grphr(DDC_REG, cfb) & DDC_SDA_IN);
1229 cyber2000fb_disable_ddc(cfb);
1230
1231 return retval;
1232}
1233
1234static int __devinit cyber2000fb_setup_ddc_bus(struct cfb_info *cfb)
1235{
1236 strlcpy(cfb->ddc_adapter.name, cfb->fb.fix.id,
1237 sizeof(cfb->ddc_adapter.name));
1238 cfb->ddc_adapter.owner = THIS_MODULE;
1239 cfb->ddc_adapter.class = I2C_CLASS_DDC;
1240 cfb->ddc_adapter.algo_data = &cfb->ddc_algo;
1241 cfb->ddc_adapter.dev.parent = cfb->fb.device;
1242 cfb->ddc_algo.setsda = cyber2000fb_ddc_setsda;
1243 cfb->ddc_algo.setscl = cyber2000fb_ddc_setscl;
1244 cfb->ddc_algo.getsda = cyber2000fb_ddc_getsda;
1245 cfb->ddc_algo.getscl = cyber2000fb_ddc_getscl;
1246 cfb->ddc_algo.udelay = 10;
1247 cfb->ddc_algo.timeout = 20;
1248 cfb->ddc_algo.data = cfb;
1249
1250 i2c_set_adapdata(&cfb->ddc_adapter, cfb);
1251
1252 return i2c_bit_add_bus(&cfb->ddc_adapter);
1253}
1254#endif /* CONFIG_FB_CYBER2000_DDC */
1255
1256#ifdef CONFIG_FB_CYBER2000_I2C
1257static void cyber2000fb_i2c_setsda(void *data, int state)
1258{
1259 struct cfb_info *cfb = data;
1260 unsigned int latch2;
1261
1262 spin_lock(&cfb->reg_b0_lock);
1263 latch2 = cyber2000_grphr(EXT_LATCH2, cfb);
1264 latch2 &= EXT_LATCH2_I2C_CLKEN;
1265 if (state)
1266 latch2 |= EXT_LATCH2_I2C_DATEN;
1267 cyber2000_grphw(EXT_LATCH2, latch2, cfb);
1268 spin_unlock(&cfb->reg_b0_lock);
1269}
1270
1271static void cyber2000fb_i2c_setscl(void *data, int state)
1272{
1273 struct cfb_info *cfb = data;
1274 unsigned int latch2;
1275
1276 spin_lock(&cfb->reg_b0_lock);
1277 latch2 = cyber2000_grphr(EXT_LATCH2, cfb);
1278 latch2 &= EXT_LATCH2_I2C_DATEN;
1279 if (state)
1280 latch2 |= EXT_LATCH2_I2C_CLKEN;
1281 cyber2000_grphw(EXT_LATCH2, latch2, cfb);
1282 spin_unlock(&cfb->reg_b0_lock);
1283}
1284
1285static int cyber2000fb_i2c_getsda(void *data)
1286{
1287 struct cfb_info *cfb = data;
1288 int ret;
1289
1290 spin_lock(&cfb->reg_b0_lock);
1291 ret = !!(cyber2000_grphr(EXT_LATCH2, cfb) & EXT_LATCH2_I2C_DAT);
1292 spin_unlock(&cfb->reg_b0_lock);
1293
1294 return ret;
1295}
1296
1297static int cyber2000fb_i2c_getscl(void *data)
1298{
1299 struct cfb_info *cfb = data;
1300 int ret;
1301
1302 spin_lock(&cfb->reg_b0_lock);
1303 ret = !!(cyber2000_grphr(EXT_LATCH2, cfb) & EXT_LATCH2_I2C_CLK);
1304 spin_unlock(&cfb->reg_b0_lock);
1305
1306 return ret;
1307}
1308
1309static int __devinit cyber2000fb_i2c_register(struct cfb_info *cfb)
1310{
1311 strlcpy(cfb->i2c_adapter.name, cfb->fb.fix.id,
1312 sizeof(cfb->i2c_adapter.name));
1313 cfb->i2c_adapter.owner = THIS_MODULE;
1314 cfb->i2c_adapter.algo_data = &cfb->i2c_algo;
1315 cfb->i2c_adapter.dev.parent = cfb->fb.device;
1316 cfb->i2c_algo.setsda = cyber2000fb_i2c_setsda;
1317 cfb->i2c_algo.setscl = cyber2000fb_i2c_setscl;
1318 cfb->i2c_algo.getsda = cyber2000fb_i2c_getsda;
1319 cfb->i2c_algo.getscl = cyber2000fb_i2c_getscl;
1320 cfb->i2c_algo.udelay = 5;
1321 cfb->i2c_algo.timeout = msecs_to_jiffies(100);
1322 cfb->i2c_algo.data = cfb;
1323
1324 return i2c_bit_add_bus(&cfb->i2c_adapter);
1325}
1326
1327static void cyber2000fb_i2c_unregister(struct cfb_info *cfb)
1328{
1329 i2c_del_adapter(&cfb->i2c_adapter);
1330}
1331#else
1332#define cyber2000fb_i2c_register(cfb) (0)
1333#define cyber2000fb_i2c_unregister(cfb) do { } while (0)
1334#endif
1335
1144/* 1336/*
1145 * These parameters give 1337 * These parameters give
1146 * 640x480, hsync 31.5kHz, vsync 60Hz 1338 * 640x480, hsync 31.5kHz, vsync 60Hz
@@ -1275,6 +1467,8 @@ static struct cfb_info __devinit *cyberpro_alloc_fb_info(unsigned int id,
1275 cfb->fb.flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN; 1467 cfb->fb.flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN;
1276 cfb->fb.pseudo_palette = cfb->pseudo_palette; 1468 cfb->fb.pseudo_palette = cfb->pseudo_palette;
1277 1469
1470 spin_lock_init(&cfb->reg_b0_lock);
1471
1278 fb_alloc_cmap(&cfb->fb.cmap, NR_PALETTE, 0); 1472 fb_alloc_cmap(&cfb->fb.cmap, NR_PALETTE, 0);
1279 1473
1280 return cfb; 1474 return cfb;
@@ -1369,6 +1563,11 @@ static int __devinit cyberpro_common_probe(struct cfb_info *cfb)
1369 cfb->fb.fix.mmio_len = MMIO_SIZE; 1563 cfb->fb.fix.mmio_len = MMIO_SIZE;
1370 cfb->fb.screen_base = cfb->region; 1564 cfb->fb.screen_base = cfb->region;
1371 1565
1566#ifdef CONFIG_FB_CYBER2000_DDC
1567 if (cyber2000fb_setup_ddc_bus(cfb) == 0)
1568 cfb->ddc_registered = true;
1569#endif
1570
1372 err = -EINVAL; 1571 err = -EINVAL;
1373 if (!fb_find_mode(&cfb->fb.var, &cfb->fb, NULL, NULL, 0, 1572 if (!fb_find_mode(&cfb->fb.var, &cfb->fb, NULL, NULL, 0,
1374 &cyber2000fb_default_mode, 8)) { 1573 &cyber2000fb_default_mode, 8)) {
@@ -1401,14 +1600,32 @@ static int __devinit cyberpro_common_probe(struct cfb_info *cfb)
1401 cfb->fb.var.xres, cfb->fb.var.yres, 1600 cfb->fb.var.xres, cfb->fb.var.yres,
1402 h_sync / 1000, h_sync % 1000, v_sync); 1601 h_sync / 1000, h_sync % 1000, v_sync);
1403 1602
1404 if (cfb->dev) 1603 err = cyber2000fb_i2c_register(cfb);
1405 cfb->fb.device = &cfb->dev->dev; 1604 if (err)
1605 goto failed;
1606
1406 err = register_framebuffer(&cfb->fb); 1607 err = register_framebuffer(&cfb->fb);
1608 if (err)
1609 cyber2000fb_i2c_unregister(cfb);
1407 1610
1408failed: 1611failed:
1612#ifdef CONFIG_FB_CYBER2000_DDC
1613 if (err && cfb->ddc_registered)
1614 i2c_del_adapter(&cfb->ddc_adapter);
1615#endif
1409 return err; 1616 return err;
1410} 1617}
1411 1618
1619static void __devexit cyberpro_common_remove(struct cfb_info *cfb)
1620{
1621 unregister_framebuffer(&cfb->fb);
1622#ifdef CONFIG_FB_CYBER2000_DDC
1623 if (cfb->ddc_registered)
1624 i2c_del_adapter(&cfb->ddc_adapter);
1625#endif
1626 cyber2000fb_i2c_unregister(cfb);
1627}
1628
1412static void cyberpro_common_resume(struct cfb_info *cfb) 1629static void cyberpro_common_resume(struct cfb_info *cfb)
1413{ 1630{
1414 cyberpro_init_hw(cfb); 1631 cyberpro_init_hw(cfb);
@@ -1442,12 +1659,13 @@ static int __devinit cyberpro_vl_probe(void)
1442 if (!cfb) 1659 if (!cfb)
1443 goto failed_release; 1660 goto failed_release;
1444 1661
1445 cfb->dev = NULL; 1662 cfb->irq = -1;
1446 cfb->region = ioremap(FB_START, FB_SIZE); 1663 cfb->region = ioremap(FB_START, FB_SIZE);
1447 if (!cfb->region) 1664 if (!cfb->region)
1448 goto failed_ioremap; 1665 goto failed_ioremap;
1449 1666
1450 cfb->regs = cfb->region + MMIO_OFFSET; 1667 cfb->regs = cfb->region + MMIO_OFFSET;
1668 cfb->fb.device = NULL;
1451 cfb->fb.fix.mmio_start = FB_START + MMIO_OFFSET; 1669 cfb->fb.fix.mmio_start = FB_START + MMIO_OFFSET;
1452 cfb->fb.fix.smem_start = FB_START; 1670 cfb->fb.fix.smem_start = FB_START;
1453 1671
@@ -1585,12 +1803,13 @@ cyberpro_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
1585 if (err) 1803 if (err)
1586 goto failed_regions; 1804 goto failed_regions;
1587 1805
1588 cfb->dev = dev; 1806 cfb->irq = dev->irq;
1589 cfb->region = pci_ioremap_bar(dev, 0); 1807 cfb->region = pci_ioremap_bar(dev, 0);
1590 if (!cfb->region) 1808 if (!cfb->region)
1591 goto failed_ioremap; 1809 goto failed_ioremap;
1592 1810
1593 cfb->regs = cfb->region + MMIO_OFFSET; 1811 cfb->regs = cfb->region + MMIO_OFFSET;
1812 cfb->fb.device = &dev->dev;
1594 cfb->fb.fix.mmio_start = pci_resource_start(dev, 0) + MMIO_OFFSET; 1813 cfb->fb.fix.mmio_start = pci_resource_start(dev, 0) + MMIO_OFFSET;
1595 cfb->fb.fix.smem_start = pci_resource_start(dev, 0); 1814 cfb->fb.fix.smem_start = pci_resource_start(dev, 0);
1596 1815
@@ -1648,15 +1867,7 @@ static void __devexit cyberpro_pci_remove(struct pci_dev *dev)
1648 struct cfb_info *cfb = pci_get_drvdata(dev); 1867 struct cfb_info *cfb = pci_get_drvdata(dev);
1649 1868
1650 if (cfb) { 1869 if (cfb) {
1651 /* 1870 cyberpro_common_remove(cfb);
1652 * If unregister_framebuffer fails, then
1653 * we will be leaving hooks that could cause
1654 * oopsen laying around.
1655 */
1656 if (unregister_framebuffer(&cfb->fb))
1657 printk(KERN_WARNING "%s: danger Will Robinson, "
1658 "danger danger! Oopsen imminent!\n",
1659 cfb->fb.fix.id);
1660 iounmap(cfb->region); 1871 iounmap(cfb->region);
1661 cyberpro_free_fb_info(cfb); 1872 cyberpro_free_fb_info(cfb);
1662 1873
diff --git a/drivers/video/cyber2000fb.h b/drivers/video/cyber2000fb.h
index de4fc43e51c1..bad69102e774 100644
--- a/drivers/video/cyber2000fb.h
+++ b/drivers/video/cyber2000fb.h
@@ -464,12 +464,14 @@ static void debug_printf(char *fmt, ...)
464struct cfb_info; 464struct cfb_info;
465 465
466struct cyberpro_info { 466struct cyberpro_info {
467 struct pci_dev *dev; 467 struct device *dev;
468 struct i2c_adapter *i2c;
468 unsigned char __iomem *regs; 469 unsigned char __iomem *regs;
469 char __iomem *fb; 470 char __iomem *fb;
470 char dev_name[32]; 471 char dev_name[32];
471 unsigned int fb_size; 472 unsigned int fb_size;
472 unsigned int chip_id; 473 unsigned int chip_id;
474 unsigned int irq;
473 475
474 /* 476 /*
475 * The following is a pointer to be passed into the 477 * The following is a pointer to be passed into the
@@ -478,15 +480,6 @@ struct cyberpro_info {
478 * is within this structure. 480 * is within this structure.
479 */ 481 */
480 struct cfb_info *info; 482 struct cfb_info *info;
481
482 /*
483 * Use these to enable the BM or TV registers. In an SMP
484 * environment, these two function pointers should only be
485 * called from the module_init() or module_exit()
486 * functions.
487 */
488 void (*enable_extregs)(struct cfb_info *);
489 void (*disable_extregs)(struct cfb_info *);
490}; 483};
491 484
492#define ID_IGA_1682 0 485#define ID_IGA_1682 0
@@ -494,8 +487,6 @@ struct cyberpro_info {
494#define ID_CYBERPRO_2010 2 487#define ID_CYBERPRO_2010 2
495#define ID_CYBERPRO_5000 3 488#define ID_CYBERPRO_5000 3
496 489
497struct fb_var_screeninfo;
498
499/* 490/*
500 * Note! Writing to the Cyber20x0 registers from an interrupt 491 * Note! Writing to the Cyber20x0 registers from an interrupt
501 * routine is definitely a bad idea atm. 492 * routine is definitely a bad idea atm.
@@ -504,4 +495,3 @@ int cyber2000fb_attach(struct cyberpro_info *info, int idx);
504void cyber2000fb_detach(int idx); 495void cyber2000fb_detach(int idx);
505void cyber2000fb_enable_extregs(struct cfb_info *cfb); 496void cyber2000fb_enable_extregs(struct cfb_info *cfb);
506void cyber2000fb_disable_extregs(struct cfb_info *cfb); 497void cyber2000fb_disable_extregs(struct cfb_info *cfb);
507void cyber2000fb_get_fb_var(struct cfb_info *cfb, struct fb_var_screeninfo *var);
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
index 9127eda2145b..0a0efe713bc8 100644
--- a/drivers/watchdog/sp805_wdt.c
+++ b/drivers/watchdog/sp805_wdt.c
@@ -278,7 +278,7 @@ static struct miscdevice sp805_wdt_miscdev = {
278}; 278};
279 279
280static int __devinit 280static int __devinit
281sp805_wdt_probe(struct amba_device *adev, struct amba_id *id) 281sp805_wdt_probe(struct amba_device *adev, const struct amba_id *id)
282{ 282{
283 int ret = 0; 283 int ret = 0;
284 284