aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-12-12 17:27:24 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-12 17:27:24 -0500
commit09cea96caa59fabab3030c53bd698b9b568d959a (patch)
treea991cdc0c887fdcda37f4b751ee98d3db9559f4e /drivers
parent6eb7365db6f3a4a9d8d9922bb0b800f9cbaad641 (diff)
parente090aa80321b64c3b793f3b047e31ecf1af9538d (diff)
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (151 commits) powerpc: Fix usage of 64-bit instruction in 32-bit altivec code MAINTAINERS: Add PowerPC patterns powerpc/pseries: Track previous CPPR values to correctly EOI interrupts powerpc/pseries: Correct pseries/dlpar.c build break without CONFIG_SMP powerpc: Make "intspec" pointers in irq_host->xlate() const powerpc/8xx: DTLB Miss cleanup powerpc/8xx: Remove DIRTY pte handling in DTLB Error. powerpc/8xx: Start using dcbX instructions in various copy routines powerpc/8xx: Restore _PAGE_WRITETHRU powerpc/8xx: Add missing Guarded setting in DTLB Error. powerpc/8xx: Fixup DAR from buggy dcbX instructions. powerpc/8xx: Tag DAR with 0x00f0 to catch buggy instructions. powerpc/8xx: Update TLB asm so it behaves as linux mm expects. powerpc/8xx: Invalidate non present TLBs powerpc/pseries: Serialize cpu hotplug operations during deactivate Vs deallocate pseries/pseries: Add code to online/offline CPUs of a DLPAR node powerpc: stop_this_cpu: remove the cpu from the online map. powerpc/pseries: Add kernel based CPU DLPAR handling sysfs/cpu: Add probe/release files powerpc/pseries: Kernel DLPAR Infrastructure ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/ata/Kconfig10
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/pata_macio.c1427
-rw-r--r--drivers/base/cpu.c34
-rw-r--r--drivers/block/swim3.c39
-rw-r--r--drivers/char/agp/uninorth-agp.c77
-rw-r--r--drivers/char/hvc_console.c1
-rw-r--r--drivers/ide/pmac.c92
-rw-r--r--drivers/macintosh/macio_asic.c52
-rw-r--r--drivers/macintosh/mediabay.c328
-rw-r--r--drivers/macintosh/nvram.c11
-rw-r--r--drivers/macintosh/therm_adt746x.c15
-rw-r--r--drivers/macintosh/via-pmu.c160
-rw-r--r--drivers/macintosh/windfarm_smu_controls.c2
-rw-r--r--drivers/mmc/host/of_mmc_spi.c2
-rw-r--r--drivers/net/ehea/ehea_hcall.h51
-rw-r--r--drivers/net/ehea/ehea_phyp.h1
-rw-r--r--drivers/of/platform.c305
-rw-r--r--drivers/spi/Kconfig11
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/mpc52xx_psc_spi.c25
-rw-r--r--drivers/spi/mpc52xx_spi.c520
-rw-r--r--drivers/spi/spi_mpc8xxx.c618
-rw-r--r--drivers/spi/xilinx_spi.c3
-rw-r--r--drivers/usb/gadget/fsl_qe_udc.h15
-rw-r--r--drivers/video/offb.c15
-rw-r--r--drivers/watchdog/Kconfig4
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/mpc5200_wdt.c293
29 files changed, 3247 insertions, 867 deletions
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 676f08b004b3..85844d053846 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -790,5 +790,15 @@ config PATA_BF54X
790 790
791 If unsure, say N. 791 If unsure, say N.
792 792
793config PATA_MACIO
794 tristate "Apple PowerMac/PowerBook internal 'MacIO' IDE"
795 depends on PPC_PMAC
796 help
797 Most IDE capable PowerMacs have IDE busses driven by a variant
798 of this controller which is part of the Apple chipset used on
799 most PowerMac models. Some models have multiple busses using
800 different chipsets, though generally, MacIO is one of them.
801
802
793endif # ATA_SFF 803endif # ATA_SFF
794endif # ATA 804endif # ATA
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index d909435e9d81..fc936d4471d6 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_SATA_MV) += sata_mv.o
18obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o 18obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o
19obj-$(CONFIG_PDC_ADMA) += pdc_adma.o 19obj-$(CONFIG_PDC_ADMA) += pdc_adma.o
20obj-$(CONFIG_SATA_FSL) += sata_fsl.o 20obj-$(CONFIG_SATA_FSL) += sata_fsl.o
21obj-$(CONFIG_PATA_MACIO) += pata_macio.o
21 22
22obj-$(CONFIG_PATA_ALI) += pata_ali.o 23obj-$(CONFIG_PATA_ALI) += pata_ali.o
23obj-$(CONFIG_PATA_AMD) += pata_amd.o 24obj-$(CONFIG_PATA_AMD) += pata_amd.o
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
new file mode 100644
index 000000000000..4cc7bbd10ec2
--- /dev/null
+++ b/drivers/ata/pata_macio.c
@@ -0,0 +1,1427 @@
1/*
2 * Libata based driver for Apple "macio" family of PATA controllers
3 *
4 * Copyright 2008/2009 Benjamin Herrenschmidt, IBM Corp
5 * <benh@kernel.crashing.org>
6 *
7 * Some bits and pieces from drivers/ide/ppc/pmac.c
8 *
9 */
10
11#undef DEBUG
12#undef DEBUG_DMA
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/init.h>
17#include <linux/blkdev.h>
18#include <linux/ata.h>
19#include <linux/libata.h>
20#include <linux/adb.h>
21#include <linux/pmu.h>
22#include <linux/scatterlist.h>
23#include <linux/of.h>
24
25#include <scsi/scsi.h>
26#include <scsi/scsi_host.h>
27#include <scsi/scsi_device.h>
28
29#include <asm/macio.h>
30#include <asm/io.h>
31#include <asm/dbdma.h>
32#include <asm/pci-bridge.h>
33#include <asm/machdep.h>
34#include <asm/pmac_feature.h>
35#include <asm/mediabay.h>
36
37#ifdef DEBUG_DMA
38#define dev_dbgdma(dev, format, arg...) \
39 dev_printk(KERN_DEBUG , dev , format , ## arg)
40#else
41#define dev_dbgdma(dev, format, arg...) \
42 ({ if (0) dev_printk(KERN_DEBUG, dev, format, ##arg); 0; })
43#endif
44
45#define DRV_NAME "pata_macio"
46#define DRV_VERSION "0.9"
47
48/* Models of macio ATA controller */
49enum {
50 controller_ohare, /* OHare based */
51 controller_heathrow, /* Heathrow/Paddington */
52 controller_kl_ata3, /* KeyLargo ATA-3 */
53 controller_kl_ata4, /* KeyLargo ATA-4 */
54 controller_un_ata6, /* UniNorth2 ATA-6 */
55 controller_k2_ata6, /* K2 ATA-6 */
56 controller_sh_ata6, /* Shasta ATA-6 */
57};
58
59static const char* macio_ata_names[] = {
60 "OHare ATA", /* OHare based */
61 "Heathrow ATA", /* Heathrow/Paddington */
62 "KeyLargo ATA-3", /* KeyLargo ATA-3 (MDMA only) */
63 "KeyLargo ATA-4", /* KeyLargo ATA-4 (UDMA/66) */
64 "UniNorth ATA-6", /* UniNorth2 ATA-6 (UDMA/100) */
65 "K2 ATA-6", /* K2 ATA-6 (UDMA/100) */
66 "Shasta ATA-6", /* Shasta ATA-6 (UDMA/133) */
67};
68
69/*
70 * Extra registers, both 32-bit little-endian
71 */
72#define IDE_TIMING_CONFIG 0x200
73#define IDE_INTERRUPT 0x300
74
75/* Kauai (U2) ATA has different register setup */
76#define IDE_KAUAI_PIO_CONFIG 0x200
77#define IDE_KAUAI_ULTRA_CONFIG 0x210
78#define IDE_KAUAI_POLL_CONFIG 0x220
79
80/*
81 * Timing configuration register definitions
82 */
83
84/* Number of IDE_SYSCLK_NS ticks, argument is in nanoseconds */
85#define SYSCLK_TICKS(t) (((t) + IDE_SYSCLK_NS - 1) / IDE_SYSCLK_NS)
86#define SYSCLK_TICKS_66(t) (((t) + IDE_SYSCLK_66_NS - 1) / IDE_SYSCLK_66_NS)
87#define IDE_SYSCLK_NS 30 /* 33Mhz cell */
88#define IDE_SYSCLK_66_NS 15 /* 66Mhz cell */
89
90/* 133Mhz cell, found in shasta.
91 * See comments about 100 Mhz Uninorth 2...
92 * Note that PIO_MASK and MDMA_MASK seem to overlap, that's just
93 * weird and I don't now why .. at this stage
94 */
95#define TR_133_PIOREG_PIO_MASK 0xff000fff
96#define TR_133_PIOREG_MDMA_MASK 0x00fff800
97#define TR_133_UDMAREG_UDMA_MASK 0x0003ffff
98#define TR_133_UDMAREG_UDMA_EN 0x00000001
99
100/* 100Mhz cell, found in Uninorth 2 and K2. It appears as a pci device
101 * (106b/0033) on uninorth or K2 internal PCI bus and it's clock is
102 * controlled like gem or fw. It appears to be an evolution of keylargo
103 * ATA4 with a timing register extended to 2x32bits registers (one
104 * for PIO & MWDMA and one for UDMA, and a similar DBDMA channel.
105 * It has it's own local feature control register as well.
106 *
107 * After scratching my mind over the timing values, at least for PIO
108 * and MDMA, I think I've figured the format of the timing register,
109 * though I use pre-calculated tables for UDMA as usual...
110 */
111#define TR_100_PIO_ADDRSETUP_MASK 0xff000000 /* Size of field unknown */
112#define TR_100_PIO_ADDRSETUP_SHIFT 24
113#define TR_100_MDMA_MASK 0x00fff000
114#define TR_100_MDMA_RECOVERY_MASK 0x00fc0000
115#define TR_100_MDMA_RECOVERY_SHIFT 18
116#define TR_100_MDMA_ACCESS_MASK 0x0003f000
117#define TR_100_MDMA_ACCESS_SHIFT 12
118#define TR_100_PIO_MASK 0xff000fff
119#define TR_100_PIO_RECOVERY_MASK 0x00000fc0
120#define TR_100_PIO_RECOVERY_SHIFT 6
121#define TR_100_PIO_ACCESS_MASK 0x0000003f
122#define TR_100_PIO_ACCESS_SHIFT 0
123
124#define TR_100_UDMAREG_UDMA_MASK 0x0000ffff
125#define TR_100_UDMAREG_UDMA_EN 0x00000001
126
127
128/* 66Mhz cell, found in KeyLargo. Can do ultra mode 0 to 2 on
129 * 40 connector cable and to 4 on 80 connector one.
130 * Clock unit is 15ns (66Mhz)
131 *
132 * 3 Values can be programmed:
133 * - Write data setup, which appears to match the cycle time. They
134 * also call it DIOW setup.
135 * - Ready to pause time (from spec)
136 * - Address setup. That one is weird. I don't see where exactly
137 * it fits in UDMA cycles, I got it's name from an obscure piece
138 * of commented out code in Darwin. They leave it to 0, we do as
139 * well, despite a comment that would lead to think it has a
140 * min value of 45ns.
141 * Apple also add 60ns to the write data setup (or cycle time ?) on
142 * reads.
143 */
144#define TR_66_UDMA_MASK 0xfff00000
145#define TR_66_UDMA_EN 0x00100000 /* Enable Ultra mode for DMA */
146#define TR_66_PIO_ADDRSETUP_MASK 0xe0000000 /* Address setup */
147#define TR_66_PIO_ADDRSETUP_SHIFT 29
148#define TR_66_UDMA_RDY2PAUS_MASK 0x1e000000 /* Ready 2 pause time */
149#define TR_66_UDMA_RDY2PAUS_SHIFT 25
150#define TR_66_UDMA_WRDATASETUP_MASK 0x01e00000 /* Write data setup time */
151#define TR_66_UDMA_WRDATASETUP_SHIFT 21
152#define TR_66_MDMA_MASK 0x000ffc00
153#define TR_66_MDMA_RECOVERY_MASK 0x000f8000
154#define TR_66_MDMA_RECOVERY_SHIFT 15
155#define TR_66_MDMA_ACCESS_MASK 0x00007c00
156#define TR_66_MDMA_ACCESS_SHIFT 10
157#define TR_66_PIO_MASK 0xe00003ff
158#define TR_66_PIO_RECOVERY_MASK 0x000003e0
159#define TR_66_PIO_RECOVERY_SHIFT 5
160#define TR_66_PIO_ACCESS_MASK 0x0000001f
161#define TR_66_PIO_ACCESS_SHIFT 0
162
163/* 33Mhz cell, found in OHare, Heathrow (& Paddington) and KeyLargo
164 * Can do pio & mdma modes, clock unit is 30ns (33Mhz)
165 *
166 * The access time and recovery time can be programmed. Some older
167 * Darwin code base limit OHare to 150ns cycle time. I decided to do
168 * the same here fore safety against broken old hardware ;)
169 * The HalfTick bit, when set, adds half a clock (15ns) to the access
170 * time and removes one from recovery. It's not supported on KeyLargo
171 * implementation afaik. The E bit appears to be set for PIO mode 0 and
172 * is used to reach long timings used in this mode.
173 */
174#define TR_33_MDMA_MASK 0x003ff800
175#define TR_33_MDMA_RECOVERY_MASK 0x001f0000
176#define TR_33_MDMA_RECOVERY_SHIFT 16
177#define TR_33_MDMA_ACCESS_MASK 0x0000f800
178#define TR_33_MDMA_ACCESS_SHIFT 11
179#define TR_33_MDMA_HALFTICK 0x00200000
180#define TR_33_PIO_MASK 0x000007ff
181#define TR_33_PIO_E 0x00000400
182#define TR_33_PIO_RECOVERY_MASK 0x000003e0
183#define TR_33_PIO_RECOVERY_SHIFT 5
184#define TR_33_PIO_ACCESS_MASK 0x0000001f
185#define TR_33_PIO_ACCESS_SHIFT 0
186
187/*
188 * Interrupt register definitions. Only present on newer cells
189 * (Keylargo and later afaik) so we don't use it.
190 */
191#define IDE_INTR_DMA 0x80000000
192#define IDE_INTR_DEVICE 0x40000000
193
194/*
195 * FCR Register on Kauai. Not sure what bit 0x4 is ...
196 */
197#define KAUAI_FCR_UATA_MAGIC 0x00000004
198#define KAUAI_FCR_UATA_RESET_N 0x00000002
199#define KAUAI_FCR_UATA_ENABLE 0x00000001
200
201
202/* Allow up to 256 DBDMA commands per xfer */
203#define MAX_DCMDS 256
204
205/* Don't let a DMA segment go all the way to 64K */
206#define MAX_DBDMA_SEG 0xff00
207
208
209/*
210 * Wait 1s for disk to answer on IDE bus after a hard reset
211 * of the device (via GPIO/FCR).
212 *
213 * Some devices seem to "pollute" the bus even after dropping
214 * the BSY bit (typically some combo drives slave on the UDMA
215 * bus) after a hard reset. Since we hard reset all drives on
216 * KeyLargo ATA66, we have to keep that delay around. I may end
217 * up not hard resetting anymore on these and keep the delay only
218 * for older interfaces instead (we have to reset when coming
219 * from MacOS...) --BenH.
220 */
221#define IDE_WAKEUP_DELAY_MS 1000
222
223struct pata_macio_timing;
224
225struct pata_macio_priv {
226 int kind;
227 int aapl_bus_id;
228 int mediabay : 1;
229 struct device_node *node;
230 struct macio_dev *mdev;
231 struct pci_dev *pdev;
232 struct device *dev;
233 int irq;
234 u32 treg[2][2];
235 void __iomem *tfregs;
236 void __iomem *kauai_fcr;
237 struct dbdma_cmd * dma_table_cpu;
238 dma_addr_t dma_table_dma;
239 struct ata_host *host;
240 const struct pata_macio_timing *timings;
241};
242
243/* Previous variants of this driver used to calculate timings
244 * for various variants of the chip and use tables for others.
245 *
246 * Not only was this confusing, but in addition, it isn't clear
247 * whether our calculation code was correct. It didn't entirely
248 * match the darwin code and whatever documentation I could find
249 * on these cells
250 *
251 * I decided to entirely rely on a table instead for this version
252 * of the driver. Also, because I don't really care about derated
253 * modes and really old HW other than making it work, I'm not going
254 * to calculate / snoop timing values for something else than the
255 * standard modes.
256 */
257struct pata_macio_timing {
258 int mode;
259 u32 reg1; /* Bits to set in first timing reg */
260 u32 reg2; /* Bits to set in second timing reg */
261};
262
263static const struct pata_macio_timing pata_macio_ohare_timings[] = {
264 { XFER_PIO_0, 0x00000526, 0, },
265 { XFER_PIO_1, 0x00000085, 0, },
266 { XFER_PIO_2, 0x00000025, 0, },
267 { XFER_PIO_3, 0x00000025, 0, },
268 { XFER_PIO_4, 0x00000025, 0, },
269 { XFER_MW_DMA_0, 0x00074000, 0, },
270 { XFER_MW_DMA_1, 0x00221000, 0, },
271 { XFER_MW_DMA_2, 0x00211000, 0, },
272 { -1, 0, 0 }
273};
274
275static const struct pata_macio_timing pata_macio_heathrow_timings[] = {
276 { XFER_PIO_0, 0x00000526, 0, },
277 { XFER_PIO_1, 0x00000085, 0, },
278 { XFER_PIO_2, 0x00000025, 0, },
279 { XFER_PIO_3, 0x00000025, 0, },
280 { XFER_PIO_4, 0x00000025, 0, },
281 { XFER_MW_DMA_0, 0x00074000, 0, },
282 { XFER_MW_DMA_1, 0x00221000, 0, },
283 { XFER_MW_DMA_2, 0x00211000, 0, },
284 { -1, 0, 0 }
285};
286
287static const struct pata_macio_timing pata_macio_kl33_timings[] = {
288 { XFER_PIO_0, 0x00000526, 0, },
289 { XFER_PIO_1, 0x00000085, 0, },
290 { XFER_PIO_2, 0x00000025, 0, },
291 { XFER_PIO_3, 0x00000025, 0, },
292 { XFER_PIO_4, 0x00000025, 0, },
293 { XFER_MW_DMA_0, 0x00084000, 0, },
294 { XFER_MW_DMA_1, 0x00021800, 0, },
295 { XFER_MW_DMA_2, 0x00011800, 0, },
296 { -1, 0, 0 }
297};
298
299static const struct pata_macio_timing pata_macio_kl66_timings[] = {
300 { XFER_PIO_0, 0x0000038c, 0, },
301 { XFER_PIO_1, 0x0000020a, 0, },
302 { XFER_PIO_2, 0x00000127, 0, },
303 { XFER_PIO_3, 0x000000c6, 0, },
304 { XFER_PIO_4, 0x00000065, 0, },
305 { XFER_MW_DMA_0, 0x00084000, 0, },
306 { XFER_MW_DMA_1, 0x00029800, 0, },
307 { XFER_MW_DMA_2, 0x00019400, 0, },
308 { XFER_UDMA_0, 0x19100000, 0, },
309 { XFER_UDMA_1, 0x14d00000, 0, },
310 { XFER_UDMA_2, 0x10900000, 0, },
311 { XFER_UDMA_3, 0x0c700000, 0, },
312 { XFER_UDMA_4, 0x0c500000, 0, },
313 { -1, 0, 0 }
314};
315
316static const struct pata_macio_timing pata_macio_kauai_timings[] = {
317 { XFER_PIO_0, 0x08000a92, 0, },
318 { XFER_PIO_1, 0x0800060f, 0, },
319 { XFER_PIO_2, 0x0800038b, 0, },
320 { XFER_PIO_3, 0x05000249, 0, },
321 { XFER_PIO_4, 0x04000148, 0, },
322 { XFER_MW_DMA_0, 0x00618000, 0, },
323 { XFER_MW_DMA_1, 0x00209000, 0, },
324 { XFER_MW_DMA_2, 0x00148000, 0, },
325 { XFER_UDMA_0, 0, 0x000070c1, },
326 { XFER_UDMA_1, 0, 0x00005d81, },
327 { XFER_UDMA_2, 0, 0x00004a61, },
328 { XFER_UDMA_3, 0, 0x00003a51, },
329 { XFER_UDMA_4, 0, 0x00002a31, },
330 { XFER_UDMA_5, 0, 0x00002921, },
331 { -1, 0, 0 }
332};
333
334static const struct pata_macio_timing pata_macio_shasta_timings[] = {
335 { XFER_PIO_0, 0x0a000c97, 0, },
336 { XFER_PIO_1, 0x07000712, 0, },
337 { XFER_PIO_2, 0x040003cd, 0, },
338 { XFER_PIO_3, 0x0500028b, 0, },
339 { XFER_PIO_4, 0x0400010a, 0, },
340 { XFER_MW_DMA_0, 0x00820800, 0, },
341 { XFER_MW_DMA_1, 0x0028b000, 0, },
342 { XFER_MW_DMA_2, 0x001ca000, 0, },
343 { XFER_UDMA_0, 0, 0x00035901, },
344 { XFER_UDMA_1, 0, 0x000348b1, },
345 { XFER_UDMA_2, 0, 0x00033881, },
346 { XFER_UDMA_3, 0, 0x00033861, },
347 { XFER_UDMA_4, 0, 0x00033841, },
348 { XFER_UDMA_5, 0, 0x00033031, },
349 { XFER_UDMA_6, 0, 0x00033021, },
350 { -1, 0, 0 }
351};
352
353static const struct pata_macio_timing *pata_macio_find_timing(
354 struct pata_macio_priv *priv,
355 int mode)
356{
357 int i;
358
359 for (i = 0; priv->timings[i].mode > 0; i++) {
360 if (priv->timings[i].mode == mode)
361 return &priv->timings[i];
362 }
363 return NULL;
364}
365
366
367static void pata_macio_apply_timings(struct ata_port *ap, unsigned int device)
368{
369 struct pata_macio_priv *priv = ap->private_data;
370 void __iomem *rbase = ap->ioaddr.cmd_addr;
371
372 if (priv->kind == controller_sh_ata6 ||
373 priv->kind == controller_un_ata6 ||
374 priv->kind == controller_k2_ata6) {
375 writel(priv->treg[device][0], rbase + IDE_KAUAI_PIO_CONFIG);
376 writel(priv->treg[device][1], rbase + IDE_KAUAI_ULTRA_CONFIG);
377 } else
378 writel(priv->treg[device][0], rbase + IDE_TIMING_CONFIG);
379}
380
381static void pata_macio_dev_select(struct ata_port *ap, unsigned int device)
382{
383 ata_sff_dev_select(ap, device);
384
385 /* Apply timings */
386 pata_macio_apply_timings(ap, device);
387}
388
389static void pata_macio_set_timings(struct ata_port *ap,
390 struct ata_device *adev)
391{
392 struct pata_macio_priv *priv = ap->private_data;
393 const struct pata_macio_timing *t;
394
395 dev_dbg(priv->dev, "Set timings: DEV=%d,PIO=0x%x (%s),DMA=0x%x (%s)\n",
396 adev->devno,
397 adev->pio_mode,
398 ata_mode_string(ata_xfer_mode2mask(adev->pio_mode)),
399 adev->dma_mode,
400 ata_mode_string(ata_xfer_mode2mask(adev->dma_mode)));
401
402 /* First clear timings */
403 priv->treg[adev->devno][0] = priv->treg[adev->devno][1] = 0;
404
405 /* Now get the PIO timings */
406 t = pata_macio_find_timing(priv, adev->pio_mode);
407 if (t == NULL) {
408 dev_warn(priv->dev, "Invalid PIO timing requested: 0x%x\n",
409 adev->pio_mode);
410 t = pata_macio_find_timing(priv, XFER_PIO_0);
411 }
412 BUG_ON(t == NULL);
413
414 /* PIO timings only ever use the first treg */
415 priv->treg[adev->devno][0] |= t->reg1;
416
417 /* Now get DMA timings */
418 t = pata_macio_find_timing(priv, adev->dma_mode);
419 if (t == NULL || (t->reg1 == 0 && t->reg2 == 0)) {
420 dev_dbg(priv->dev, "DMA timing not set yet, using MW_DMA_0\n");
421 t = pata_macio_find_timing(priv, XFER_MW_DMA_0);
422 }
423 BUG_ON(t == NULL);
424
425 /* DMA timings can use both tregs */
426 priv->treg[adev->devno][0] |= t->reg1;
427 priv->treg[adev->devno][1] |= t->reg2;
428
429 dev_dbg(priv->dev, " -> %08x %08x\n",
430 priv->treg[adev->devno][0],
431 priv->treg[adev->devno][1]);
432
433 /* Apply to hardware */
434 pata_macio_apply_timings(ap, adev->devno);
435}
436
437/*
438 * Blast some well known "safe" values to the timing registers at init or
439 * wakeup from sleep time, before we do real calculation
440 */
441static void pata_macio_default_timings(struct pata_macio_priv *priv)
442{
443 unsigned int value, value2 = 0;
444
445 switch(priv->kind) {
446 case controller_sh_ata6:
447 value = 0x0a820c97;
448 value2 = 0x00033031;
449 break;
450 case controller_un_ata6:
451 case controller_k2_ata6:
452 value = 0x08618a92;
453 value2 = 0x00002921;
454 break;
455 case controller_kl_ata4:
456 value = 0x0008438c;
457 break;
458 case controller_kl_ata3:
459 value = 0x00084526;
460 break;
461 case controller_heathrow:
462 case controller_ohare:
463 default:
464 value = 0x00074526;
465 break;
466 }
467 priv->treg[0][0] = priv->treg[1][0] = value;
468 priv->treg[0][1] = priv->treg[1][1] = value2;
469}
470
471static int pata_macio_cable_detect(struct ata_port *ap)
472{
473 struct pata_macio_priv *priv = ap->private_data;
474
475 /* Get cable type from device-tree */
476 if (priv->kind == controller_kl_ata4 ||
477 priv->kind == controller_un_ata6 ||
478 priv->kind == controller_k2_ata6 ||
479 priv->kind == controller_sh_ata6) {
480 const char* cable = of_get_property(priv->node, "cable-type",
481 NULL);
482 struct device_node *root = of_find_node_by_path("/");
483 const char *model = of_get_property(root, "model", NULL);
484
485 if (cable && !strncmp(cable, "80-", 3)) {
486 /* Some drives fail to detect 80c cable in PowerBook
487 * These machine use proprietary short IDE cable
488 * anyway
489 */
490 if (!strncmp(model, "PowerBook", 9))
491 return ATA_CBL_PATA40_SHORT;
492 else
493 return ATA_CBL_PATA80;
494 }
495 }
496
497 /* G5's seem to have incorrect cable type in device-tree.
498 * Let's assume they always have a 80 conductor cable, this seem to
499 * be always the case unless the user mucked around
500 */
501 if (of_device_is_compatible(priv->node, "K2-UATA") ||
502 of_device_is_compatible(priv->node, "shasta-ata"))
503 return ATA_CBL_PATA80;
504
505 /* Anything else is 40 connectors */
506 return ATA_CBL_PATA40;
507}
508
509static void pata_macio_qc_prep(struct ata_queued_cmd *qc)
510{
511 unsigned int write = (qc->tf.flags & ATA_TFLAG_WRITE);
512 struct ata_port *ap = qc->ap;
513 struct pata_macio_priv *priv = ap->private_data;
514 struct scatterlist *sg;
515 struct dbdma_cmd *table;
516 unsigned int si, pi;
517
518 dev_dbgdma(priv->dev, "%s: qc %p flags %lx, write %d dev %d\n",
519 __func__, qc, qc->flags, write, qc->dev->devno);
520
521 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
522 return;
523
524 table = (struct dbdma_cmd *) priv->dma_table_cpu;
525
526 pi = 0;
527 for_each_sg(qc->sg, sg, qc->n_elem, si) {
528 u32 addr, sg_len, len;
529
530 /* determine if physical DMA addr spans 64K boundary.
531 * Note h/w doesn't support 64-bit, so we unconditionally
532 * truncate dma_addr_t to u32.
533 */
534 addr = (u32) sg_dma_address(sg);
535 sg_len = sg_dma_len(sg);
536
537 while (sg_len) {
538 /* table overflow should never happen */
539 BUG_ON (pi++ >= MAX_DCMDS);
540
541 len = (sg_len < MAX_DBDMA_SEG) ? sg_len : MAX_DBDMA_SEG;
542 st_le16(&table->command, write ? OUTPUT_MORE: INPUT_MORE);
543 st_le16(&table->req_count, len);
544 st_le32(&table->phy_addr, addr);
545 table->cmd_dep = 0;
546 table->xfer_status = 0;
547 table->res_count = 0;
548 addr += len;
549 sg_len -= len;
550 ++table;
551 }
552 }
553
554 /* Should never happen according to Tejun */
555 BUG_ON(!pi);
556
557 /* Convert the last command to an input/output */
558 table--;
559 st_le16(&table->command, write ? OUTPUT_LAST: INPUT_LAST);
560 table++;
561
562 /* Add the stop command to the end of the list */
563 memset(table, 0, sizeof(struct dbdma_cmd));
564 st_le16(&table->command, DBDMA_STOP);
565
566 dev_dbgdma(priv->dev, "%s: %d DMA list entries\n", __func__, pi);
567}
568
569
570static void pata_macio_freeze(struct ata_port *ap)
571{
572 struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
573
574 if (dma_regs) {
575 unsigned int timeout = 1000000;
576
577 /* Make sure DMA controller is stopped */
578 writel((RUN|PAUSE|FLUSH|WAKE|DEAD) << 16, &dma_regs->control);
579 while (--timeout && (readl(&dma_regs->status) & RUN))
580 udelay(1);
581 }
582
583 ata_sff_freeze(ap);
584}
585
586
587static void pata_macio_bmdma_setup(struct ata_queued_cmd *qc)
588{
589 struct ata_port *ap = qc->ap;
590 struct pata_macio_priv *priv = ap->private_data;
591 struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
592 int dev = qc->dev->devno;
593
594 dev_dbgdma(priv->dev, "%s: qc %p\n", __func__, qc);
595
596 /* Make sure DMA commands updates are visible */
597 writel(priv->dma_table_dma, &dma_regs->cmdptr);
598
599 /* On KeyLargo 66Mhz cell, we need to add 60ns to wrDataSetup on
600 * UDMA reads
601 */
602 if (priv->kind == controller_kl_ata4 &&
603 (priv->treg[dev][0] & TR_66_UDMA_EN)) {
604 void __iomem *rbase = ap->ioaddr.cmd_addr;
605 u32 reg = priv->treg[dev][0];
606
607 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
608 reg += 0x00800000;
609 writel(reg, rbase + IDE_TIMING_CONFIG);
610 }
611
612 /* issue r/w command */
613 ap->ops->sff_exec_command(ap, &qc->tf);
614}
615
616static void pata_macio_bmdma_start(struct ata_queued_cmd *qc)
617{
618 struct ata_port *ap = qc->ap;
619 struct pata_macio_priv *priv = ap->private_data;
620 struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
621
622 dev_dbgdma(priv->dev, "%s: qc %p\n", __func__, qc);
623
624 writel((RUN << 16) | RUN, &dma_regs->control);
625 /* Make sure it gets to the controller right now */
626 (void)readl(&dma_regs->control);
627}
628
629static void pata_macio_bmdma_stop(struct ata_queued_cmd *qc)
630{
631 struct ata_port *ap = qc->ap;
632 struct pata_macio_priv *priv = ap->private_data;
633 struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
634 unsigned int timeout = 1000000;
635
636 dev_dbgdma(priv->dev, "%s: qc %p\n", __func__, qc);
637
638 /* Stop the DMA engine and wait for it to full halt */
639 writel (((RUN|WAKE|DEAD) << 16), &dma_regs->control);
640 while (--timeout && (readl(&dma_regs->status) & RUN))
641 udelay(1);
642}
643
644static u8 pata_macio_bmdma_status(struct ata_port *ap)
645{
646 struct pata_macio_priv *priv = ap->private_data;
647 struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
648 u32 dstat, rstat = ATA_DMA_INTR;
649 unsigned long timeout = 0;
650
651 dstat = readl(&dma_regs->status);
652
653 dev_dbgdma(priv->dev, "%s: dstat=%x\n", __func__, dstat);
654
655 /* We have two things to deal with here:
656 *
657 * - The dbdma won't stop if the command was started
658 * but completed with an error without transferring all
659 * datas. This happens when bad blocks are met during
660 * a multi-block transfer.
661 *
662 * - The dbdma fifo hasn't yet finished flushing to
663 * to system memory when the disk interrupt occurs.
664 *
665 */
666
667 /* First check for errors */
668 if ((dstat & (RUN|DEAD)) != RUN)
669 rstat |= ATA_DMA_ERR;
670
671 /* If ACTIVE is cleared, the STOP command has been hit and
672 * the transfer is complete. If not, we have to flush the
673 * channel.
674 */
675 if ((dstat & ACTIVE) == 0)
676 return rstat;
677
678 dev_dbgdma(priv->dev, "%s: DMA still active, flushing...\n", __func__);
679
680 /* If dbdma didn't execute the STOP command yet, the
681 * active bit is still set. We consider that we aren't
682 * sharing interrupts (which is hopefully the case with
683 * those controllers) and so we just try to flush the
684 * channel for pending data in the fifo
685 */
686 udelay(1);
687 writel((FLUSH << 16) | FLUSH, &dma_regs->control);
688 for (;;) {
689 udelay(1);
690 dstat = readl(&dma_regs->status);
691 if ((dstat & FLUSH) == 0)
692 break;
693 if (++timeout > 1000) {
694 dev_warn(priv->dev, "timeout flushing DMA\n");
695 rstat |= ATA_DMA_ERR;
696 break;
697 }
698 }
699 return rstat;
700}
701
702/* port_start is when we allocate the DMA command list */
703static int pata_macio_port_start(struct ata_port *ap)
704{
705 struct pata_macio_priv *priv = ap->private_data;
706
707 if (ap->ioaddr.bmdma_addr == NULL)
708 return 0;
709
710 /* Allocate space for the DBDMA commands.
711 *
712 * The +2 is +1 for the stop command and +1 to allow for
713 * aligning the start address to a multiple of 16 bytes.
714 */
715 priv->dma_table_cpu =
716 dmam_alloc_coherent(priv->dev,
717 (MAX_DCMDS + 2) * sizeof(struct dbdma_cmd),
718 &priv->dma_table_dma, GFP_KERNEL);
719 if (priv->dma_table_cpu == NULL) {
720 dev_err(priv->dev, "Unable to allocate DMA command list\n");
721 ap->ioaddr.bmdma_addr = NULL;
722 }
723 return 0;
724}
725
726static void pata_macio_irq_clear(struct ata_port *ap)
727{
728 struct pata_macio_priv *priv = ap->private_data;
729
730 /* Nothing to do here */
731
732 dev_dbgdma(priv->dev, "%s\n", __func__);
733}
734
735static void pata_macio_reset_hw(struct pata_macio_priv *priv, int resume)
736{
737 dev_dbg(priv->dev, "Enabling & resetting... \n");
738
739 if (priv->mediabay)
740 return;
741
742 if (priv->kind == controller_ohare && !resume) {
743 /* The code below is having trouble on some ohare machines
744 * (timing related ?). Until I can put my hand on one of these
745 * units, I keep the old way
746 */
747 ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, priv->node, 0, 1);
748 } else {
749 int rc;
750
751 /* Reset and enable controller */
752 rc = ppc_md.feature_call(PMAC_FTR_IDE_RESET,
753 priv->node, priv->aapl_bus_id, 1);
754 ppc_md.feature_call(PMAC_FTR_IDE_ENABLE,
755 priv->node, priv->aapl_bus_id, 1);
756 msleep(10);
757 /* Only bother waiting if there's a reset control */
758 if (rc == 0) {
759 ppc_md.feature_call(PMAC_FTR_IDE_RESET,
760 priv->node, priv->aapl_bus_id, 0);
761 msleep(IDE_WAKEUP_DELAY_MS);
762 }
763 }
764
765 /* If resuming a PCI device, restore the config space here */
766 if (priv->pdev && resume) {
767 int rc;
768
769 pci_restore_state(priv->pdev);
770 rc = pcim_enable_device(priv->pdev);
771 if (rc)
772 dev_printk(KERN_ERR, &priv->pdev->dev,
773 "Failed to enable device after resume (%d)\n", rc);
774 else
775 pci_set_master(priv->pdev);
776 }
777
778 /* On Kauai, initialize the FCR. We don't perform a reset, doesn't really
779 * seem necessary and speeds up the boot process
780 */
781 if (priv->kauai_fcr)
782 writel(KAUAI_FCR_UATA_MAGIC |
783 KAUAI_FCR_UATA_RESET_N |
784 KAUAI_FCR_UATA_ENABLE, priv->kauai_fcr);
785}
786
787/* Hook the standard slave config to fixup some HW related alignment
788 * restrictions
789 */
790static int pata_macio_slave_config(struct scsi_device *sdev)
791{
792 struct ata_port *ap = ata_shost_to_port(sdev->host);
793 struct pata_macio_priv *priv = ap->private_data;
794 struct ata_device *dev;
795 u16 cmd;
796 int rc;
797
798 /* First call original */
799 rc = ata_scsi_slave_config(sdev);
800 if (rc)
801 return rc;
802
803 /* This is lifted from sata_nv */
804 dev = &ap->link.device[sdev->id];
805
806 /* OHare has issues with non cache aligned DMA on some chipsets */
807 if (priv->kind == controller_ohare) {
808 blk_queue_update_dma_alignment(sdev->request_queue, 31);
809 blk_queue_update_dma_pad(sdev->request_queue, 31);
810
811 /* Tell the world about it */
812 ata_dev_printk(dev, KERN_INFO, "OHare alignment limits applied\n");
813 return 0;
814 }
815
816 /* We only have issues with ATAPI */
817 if (dev->class != ATA_DEV_ATAPI)
818 return 0;
819
820 /* Shasta and K2 seem to have "issues" with reads ... */
821 if (priv->kind == controller_sh_ata6 || priv->kind == controller_k2_ata6) {
822 /* Allright these are bad, apply restrictions */
823 blk_queue_update_dma_alignment(sdev->request_queue, 15);
824 blk_queue_update_dma_pad(sdev->request_queue, 15);
825
826 /* We enable MWI and hack cache line size directly here, this
827 * is specific to this chipset and not normal values, we happen
828 * to somewhat know what we are doing here (which is basically
829 * to do the same Apple does and pray they did not get it wrong :-)
830 */
831 BUG_ON(!priv->pdev);
832 pci_write_config_byte(priv->pdev, PCI_CACHE_LINE_SIZE, 0x08);
833 pci_read_config_word(priv->pdev, PCI_COMMAND, &cmd);
834 pci_write_config_word(priv->pdev, PCI_COMMAND,
835 cmd | PCI_COMMAND_INVALIDATE);
836
837 /* Tell the world about it */
838 ata_dev_printk(dev, KERN_INFO,
839 "K2/Shasta alignment limits applied\n");
840 }
841
842 return 0;
843}
844
845#ifdef CONFIG_PM
846
847static int pata_macio_do_suspend(struct pata_macio_priv *priv, pm_message_t mesg)
848{
849 int rc;
850
851 /* First, core libata suspend to do most of the work */
852 rc = ata_host_suspend(priv->host, mesg);
853 if (rc)
854 return rc;
855
856 /* Restore to default timings */
857 pata_macio_default_timings(priv);
858
859 /* Mask interrupt. Not strictly necessary but old driver did
860 * it and I'd rather not change that here */
861 disable_irq(priv->irq);
862
863 /* The media bay will handle itself just fine */
864 if (priv->mediabay)
865 return 0;
866
867 /* Kauai has bus control FCRs directly here */
868 if (priv->kauai_fcr) {
869 u32 fcr = readl(priv->kauai_fcr);
870 fcr &= ~(KAUAI_FCR_UATA_RESET_N | KAUAI_FCR_UATA_ENABLE);
871 writel(fcr, priv->kauai_fcr);
872 }
873
874 /* For PCI, save state and disable DMA. No need to call
875 * pci_set_power_state(), the HW doesn't do D states that
876 * way, the platform code will take care of suspending the
877 * ASIC properly
878 */
879 if (priv->pdev) {
880 pci_save_state(priv->pdev);
881 pci_disable_device(priv->pdev);
882 }
883
884 /* Disable the bus on older machines and the cell on kauai */
885 ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, priv->node,
886 priv->aapl_bus_id, 0);
887
888 return 0;
889}
890
891static int pata_macio_do_resume(struct pata_macio_priv *priv)
892{
893 /* Reset and re-enable the HW */
894 pata_macio_reset_hw(priv, 1);
895
896 /* Sanitize drive timings */
897 pata_macio_apply_timings(priv->host->ports[0], 0);
898
899 /* We want our IRQ back ! */
900 enable_irq(priv->irq);
901
902 /* Let the libata core take it from there */
903 ata_host_resume(priv->host);
904
905 return 0;
906}
907
908#endif /* CONFIG_PM */
909
910static struct scsi_host_template pata_macio_sht = {
911 ATA_BASE_SHT(DRV_NAME),
912 .sg_tablesize = MAX_DCMDS,
913 /* We may not need that strict one */
914 .dma_boundary = ATA_DMA_BOUNDARY,
915 .slave_configure = pata_macio_slave_config,
916};
917
918static struct ata_port_operations pata_macio_ops = {
919 .inherits = &ata_sff_port_ops,
920
921 .freeze = pata_macio_freeze,
922 .set_piomode = pata_macio_set_timings,
923 .set_dmamode = pata_macio_set_timings,
924 .cable_detect = pata_macio_cable_detect,
925 .sff_dev_select = pata_macio_dev_select,
926 .qc_prep = pata_macio_qc_prep,
927 .mode_filter = ata_bmdma_mode_filter,
928 .bmdma_setup = pata_macio_bmdma_setup,
929 .bmdma_start = pata_macio_bmdma_start,
930 .bmdma_stop = pata_macio_bmdma_stop,
931 .bmdma_status = pata_macio_bmdma_status,
932 .port_start = pata_macio_port_start,
933 .sff_irq_clear = pata_macio_irq_clear,
934};
935
936static void __devinit pata_macio_invariants(struct pata_macio_priv *priv)
937{
938 const int *bidp;
939
940 /* Identify the type of controller */
941 if (of_device_is_compatible(priv->node, "shasta-ata")) {
942 priv->kind = controller_sh_ata6;
943 priv->timings = pata_macio_shasta_timings;
944 } else if (of_device_is_compatible(priv->node, "kauai-ata")) {
945 priv->kind = controller_un_ata6;
946 priv->timings = pata_macio_kauai_timings;
947 } else if (of_device_is_compatible(priv->node, "K2-UATA")) {
948 priv->kind = controller_k2_ata6;
949 priv->timings = pata_macio_kauai_timings;
950 } else if (of_device_is_compatible(priv->node, "keylargo-ata")) {
951 if (strcmp(priv->node->name, "ata-4") == 0) {
952 priv->kind = controller_kl_ata4;
953 priv->timings = pata_macio_kl66_timings;
954 } else {
955 priv->kind = controller_kl_ata3;
956 priv->timings = pata_macio_kl33_timings;
957 }
958 } else if (of_device_is_compatible(priv->node, "heathrow-ata")) {
959 priv->kind = controller_heathrow;
960 priv->timings = pata_macio_heathrow_timings;
961 } else {
962 priv->kind = controller_ohare;
963 priv->timings = pata_macio_ohare_timings;
964 }
965
966 /* XXX FIXME --- setup priv->mediabay here */
967
968 /* Get Apple bus ID (for clock and ASIC control) */
969 bidp = of_get_property(priv->node, "AAPL,bus-id", NULL);
970 priv->aapl_bus_id = bidp ? *bidp : 0;
971
972 /* Fixup missing Apple bus ID in case of media-bay */
973 if (priv->mediabay && bidp == 0)
974 priv->aapl_bus_id = 1;
975}
976
977static void __devinit pata_macio_setup_ios(struct ata_ioports *ioaddr,
978 void __iomem * base,
979 void __iomem * dma)
980{
981 /* cmd_addr is the base of regs for that port */
982 ioaddr->cmd_addr = base;
983
984 /* taskfile registers */
985 ioaddr->data_addr = base + (ATA_REG_DATA << 4);
986 ioaddr->error_addr = base + (ATA_REG_ERR << 4);
987 ioaddr->feature_addr = base + (ATA_REG_FEATURE << 4);
988 ioaddr->nsect_addr = base + (ATA_REG_NSECT << 4);
989 ioaddr->lbal_addr = base + (ATA_REG_LBAL << 4);
990 ioaddr->lbam_addr = base + (ATA_REG_LBAM << 4);
991 ioaddr->lbah_addr = base + (ATA_REG_LBAH << 4);
992 ioaddr->device_addr = base + (ATA_REG_DEVICE << 4);
993 ioaddr->status_addr = base + (ATA_REG_STATUS << 4);
994 ioaddr->command_addr = base + (ATA_REG_CMD << 4);
995 ioaddr->altstatus_addr = base + 0x160;
996 ioaddr->ctl_addr = base + 0x160;
997 ioaddr->bmdma_addr = dma;
998}
999
1000static void __devinit pmac_macio_calc_timing_masks(struct pata_macio_priv *priv,
1001 struct ata_port_info *pinfo)
1002{
1003 int i = 0;
1004
1005 pinfo->pio_mask = 0;
1006 pinfo->mwdma_mask = 0;
1007 pinfo->udma_mask = 0;
1008
1009 while (priv->timings[i].mode > 0) {
1010 unsigned int mask = 1U << (priv->timings[i].mode & 0x0f);
1011 switch(priv->timings[i].mode & 0xf0) {
1012 case 0x00: /* PIO */
1013 pinfo->pio_mask |= (mask >> 8);
1014 break;
1015 case 0x20: /* MWDMA */
1016 pinfo->mwdma_mask |= mask;
1017 break;
1018 case 0x40: /* UDMA */
1019 pinfo->udma_mask |= mask;
1020 break;
1021 }
1022 i++;
1023 }
1024 dev_dbg(priv->dev, "Supported masks: PIO=%lx, MWDMA=%lx, UDMA=%lx\n",
1025 pinfo->pio_mask, pinfo->mwdma_mask, pinfo->udma_mask);
1026}
1027
1028static int __devinit pata_macio_common_init(struct pata_macio_priv *priv,
1029 resource_size_t tfregs,
1030 resource_size_t dmaregs,
1031 resource_size_t fcregs,
1032 unsigned long irq)
1033{
1034 struct ata_port_info pinfo;
1035 const struct ata_port_info *ppi[] = { &pinfo, NULL };
1036 void __iomem *dma_regs = NULL;
1037
1038 /* Fill up privates with various invariants collected from the
1039 * device-tree
1040 */
1041 pata_macio_invariants(priv);
1042
1043 /* Make sure we have sane initial timings in the cache */
1044 pata_macio_default_timings(priv);
1045
1046 /* Not sure what the real max is but we know it's less than 64K, let's
1047 * use 64K minus 256
1048 */
1049 dma_set_max_seg_size(priv->dev, MAX_DBDMA_SEG);
1050
1051 /* Allocate libata host for 1 port */
1052 memset(&pinfo, 0, sizeof(struct ata_port_info));
1053 pmac_macio_calc_timing_masks(priv, &pinfo);
1054 pinfo.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_MMIO |
1055 ATA_FLAG_NO_LEGACY;
1056 pinfo.port_ops = &pata_macio_ops;
1057 pinfo.private_data = priv;
1058
1059 priv->host = ata_host_alloc_pinfo(priv->dev, ppi, 1);
1060 if (priv->host == NULL) {
1061 dev_err(priv->dev, "Failed to allocate ATA port structure\n");
1062 return -ENOMEM;
1063 }
1064
1065 /* Setup the private data in host too */
1066 priv->host->private_data = priv;
1067
1068 /* Map base registers */
1069 priv->tfregs = devm_ioremap(priv->dev, tfregs, 0x100);
1070 if (priv->tfregs == NULL) {
1071 dev_err(priv->dev, "Failed to map ATA ports\n");
1072 return -ENOMEM;
1073 }
1074 priv->host->iomap = &priv->tfregs;
1075
1076 /* Map DMA regs */
1077 if (dmaregs != 0) {
1078 dma_regs = devm_ioremap(priv->dev, dmaregs,
1079 sizeof(struct dbdma_regs));
1080 if (dma_regs == NULL)
1081 dev_warn(priv->dev, "Failed to map ATA DMA registers\n");
1082 }
1083
1084 /* If chip has local feature control, map those regs too */
1085 if (fcregs != 0) {
1086 priv->kauai_fcr = devm_ioremap(priv->dev, fcregs, 4);
1087 if (priv->kauai_fcr == NULL) {
1088 dev_err(priv->dev, "Failed to map ATA FCR register\n");
1089 return -ENOMEM;
1090 }
1091 }
1092
1093 /* Setup port data structure */
1094 pata_macio_setup_ios(&priv->host->ports[0]->ioaddr,
1095 priv->tfregs, dma_regs);
1096 priv->host->ports[0]->private_data = priv;
1097
1098 /* hard-reset the controller */
1099 pata_macio_reset_hw(priv, 0);
1100 pata_macio_apply_timings(priv->host->ports[0], 0);
1101
1102 /* Enable bus master if necessary */
1103 if (priv->pdev && dma_regs)
1104 pci_set_master(priv->pdev);
1105
1106 dev_info(priv->dev, "Activating pata-macio chipset %s, Apple bus ID %d\n",
1107 macio_ata_names[priv->kind], priv->aapl_bus_id);
1108
1109 /* Start it up */
1110 priv->irq = irq;
1111 return ata_host_activate(priv->host, irq, ata_sff_interrupt, 0,
1112 &pata_macio_sht);
1113}
1114
1115static int __devinit pata_macio_attach(struct macio_dev *mdev,
1116 const struct of_device_id *match)
1117{
1118 struct pata_macio_priv *priv;
1119 resource_size_t tfregs, dmaregs = 0;
1120 unsigned long irq;
1121 int rc;
1122
1123 /* Check for broken device-trees */
1124 if (macio_resource_count(mdev) == 0) {
1125 dev_err(&mdev->ofdev.dev,
1126 "No addresses for controller\n");
1127 return -ENXIO;
1128 }
1129
1130 /* Enable managed resources */
1131 macio_enable_devres(mdev);
1132
1133 /* Allocate and init private data structure */
1134 priv = devm_kzalloc(&mdev->ofdev.dev,
1135 sizeof(struct pata_macio_priv), GFP_KERNEL);
1136 if (priv == NULL) {
1137 dev_err(&mdev->ofdev.dev,
1138 "Failed to allocate private memory\n");
1139 return -ENOMEM;
1140 }
1141 priv->node = of_node_get(mdev->ofdev.node);
1142 priv->mdev = mdev;
1143 priv->dev = &mdev->ofdev.dev;
1144
1145 /* Request memory resource for taskfile registers */
1146 if (macio_request_resource(mdev, 0, "pata-macio")) {
1147 dev_err(&mdev->ofdev.dev,
1148 "Cannot obtain taskfile resource\n");
1149 return -EBUSY;
1150 }
1151 tfregs = macio_resource_start(mdev, 0);
1152
1153 /* Request resources for DMA registers if any */
1154 if (macio_resource_count(mdev) >= 2) {
1155 if (macio_request_resource(mdev, 1, "pata-macio-dma"))
1156 dev_err(&mdev->ofdev.dev,
1157 "Cannot obtain DMA resource\n");
1158 else
1159 dmaregs = macio_resource_start(mdev, 1);
1160 }
1161
1162 /*
1163 * Fixup missing IRQ for some old implementations with broken
1164 * device-trees.
1165 *
1166 * This is a bit bogus, it should be fixed in the device-tree itself,
1167 * via the existing macio fixups, based on the type of interrupt
1168 * controller in the machine. However, I have no test HW for this case,
1169 * and this trick works well enough on those old machines...
1170 */
1171 if (macio_irq_count(mdev) == 0) {
1172 dev_warn(&mdev->ofdev.dev,
1173 "No interrupts for controller, using 13\n");
1174 irq = irq_create_mapping(NULL, 13);
1175 } else
1176 irq = macio_irq(mdev, 0);
1177
1178 /* Prevvent media bay callbacks until fully registered */
1179 lock_media_bay(priv->mdev->media_bay);
1180
1181 /* Get register addresses and call common initialization */
1182 rc = pata_macio_common_init(priv,
1183 tfregs, /* Taskfile regs */
1184 dmaregs, /* DBDMA regs */
1185 0, /* Feature control */
1186 irq);
1187 unlock_media_bay(priv->mdev->media_bay);
1188
1189 return rc;
1190}
1191
1192static int __devexit pata_macio_detach(struct macio_dev *mdev)
1193{
1194 struct ata_host *host = macio_get_drvdata(mdev);
1195 struct pata_macio_priv *priv = host->private_data;
1196
1197 lock_media_bay(priv->mdev->media_bay);
1198
1199 /* Make sure the mediabay callback doesn't try to access
1200 * dead stuff
1201 */
1202 priv->host->private_data = NULL;
1203
1204 ata_host_detach(host);
1205
1206 unlock_media_bay(priv->mdev->media_bay);
1207
1208 return 0;
1209}
1210
1211#ifdef CONFIG_PM
1212
1213static int pata_macio_suspend(struct macio_dev *mdev, pm_message_t mesg)
1214{
1215 struct ata_host *host = macio_get_drvdata(mdev);
1216
1217 return pata_macio_do_suspend(host->private_data, mesg);
1218}
1219
1220static int pata_macio_resume(struct macio_dev *mdev)
1221{
1222 struct ata_host *host = macio_get_drvdata(mdev);
1223
1224 return pata_macio_do_resume(host->private_data);
1225}
1226
1227#endif /* CONFIG_PM */
1228
1229#ifdef CONFIG_PMAC_MEDIABAY
1230static void pata_macio_mb_event(struct macio_dev* mdev, int mb_state)
1231{
1232 struct ata_host *host = macio_get_drvdata(mdev);
1233 struct ata_port *ap;
1234 struct ata_eh_info *ehi;
1235 struct ata_device *dev;
1236 unsigned long flags;
1237
1238 if (!host || !host->private_data)
1239 return;
1240 ap = host->ports[0];
1241 spin_lock_irqsave(ap->lock, flags);
1242 ehi = &ap->link.eh_info;
1243 if (mb_state == MB_CD) {
1244 ata_ehi_push_desc(ehi, "mediabay plug");
1245 ata_ehi_hotplugged(ehi);
1246 ata_port_freeze(ap);
1247 } else {
1248 ata_ehi_push_desc(ehi, "mediabay unplug");
1249 ata_for_each_dev(dev, &ap->link, ALL)
1250 dev->flags |= ATA_DFLAG_DETACH;
1251 ata_port_abort(ap);
1252 }
1253 spin_unlock_irqrestore(ap->lock, flags);
1254
1255}
1256#endif /* CONFIG_PMAC_MEDIABAY */
1257
1258
1259static int __devinit pata_macio_pci_attach(struct pci_dev *pdev,
1260 const struct pci_device_id *id)
1261{
1262 struct pata_macio_priv *priv;
1263 struct device_node *np;
1264 resource_size_t rbase;
1265
1266 /* We cannot use a MacIO controller without its OF device node */
1267 np = pci_device_to_OF_node(pdev);
1268 if (np == NULL) {
1269 dev_err(&pdev->dev,
1270 "Cannot find OF device node for controller\n");
1271 return -ENODEV;
1272 }
1273
1274 /* Check that it can be enabled */
1275 if (pcim_enable_device(pdev)) {
1276 dev_err(&pdev->dev,
1277 "Cannot enable controller PCI device\n");
1278 return -ENXIO;
1279 }
1280
1281 /* Allocate and init private data structure */
1282 priv = devm_kzalloc(&pdev->dev,
1283 sizeof(struct pata_macio_priv), GFP_KERNEL);
1284 if (priv == NULL) {
1285 dev_err(&pdev->dev,
1286 "Failed to allocate private memory\n");
1287 return -ENOMEM;
1288 }
1289 priv->node = of_node_get(np);
1290 priv->pdev = pdev;
1291 priv->dev = &pdev->dev;
1292
1293 /* Get MMIO regions */
1294 if (pci_request_regions(pdev, "pata-macio")) {
1295 dev_err(&pdev->dev,
1296 "Cannot obtain PCI resources\n");
1297 return -EBUSY;
1298 }
1299
1300 /* Get register addresses and call common initialization */
1301 rbase = pci_resource_start(pdev, 0);
1302 if (pata_macio_common_init(priv,
1303 rbase + 0x2000, /* Taskfile regs */
1304 rbase + 0x1000, /* DBDMA regs */
1305 rbase, /* Feature control */
1306 pdev->irq))
1307 return -ENXIO;
1308
1309 return 0;
1310}
1311
1312static void __devexit pata_macio_pci_detach(struct pci_dev *pdev)
1313{
1314 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1315
1316 ata_host_detach(host);
1317}
1318
1319#ifdef CONFIG_PM
1320
1321static int pata_macio_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
1322{
1323 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1324
1325 return pata_macio_do_suspend(host->private_data, mesg);
1326}
1327
1328static int pata_macio_pci_resume(struct pci_dev *pdev)
1329{
1330 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1331
1332 return pata_macio_do_resume(host->private_data);
1333}
1334
1335#endif /* CONFIG_PM */
1336
1337static struct of_device_id pata_macio_match[] =
1338{
1339 {
1340 .name = "IDE",
1341 },
1342 {
1343 .name = "ATA",
1344 },
1345 {
1346 .type = "ide",
1347 },
1348 {
1349 .type = "ata",
1350 },
1351 {},
1352};
1353
1354static struct macio_driver pata_macio_driver =
1355{
1356 .name = "pata-macio",
1357 .match_table = pata_macio_match,
1358 .probe = pata_macio_attach,
1359 .remove = pata_macio_detach,
1360#ifdef CONFIG_PM
1361 .suspend = pata_macio_suspend,
1362 .resume = pata_macio_resume,
1363#endif
1364#ifdef CONFIG_PMAC_MEDIABAY
1365 .mediabay_event = pata_macio_mb_event,
1366#endif
1367 .driver = {
1368 .owner = THIS_MODULE,
1369 },
1370};
1371
1372static const struct pci_device_id pata_macio_pci_match[] = {
1373 { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_UNI_N_ATA), 0 },
1374 { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_IPID_ATA100), 0 },
1375 { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_K2_ATA100), 0 },
1376 { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_SH_ATA), 0 },
1377 { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_IPID2_ATA), 0 },
1378 {},
1379};
1380
1381static struct pci_driver pata_macio_pci_driver = {
1382 .name = "pata-pci-macio",
1383 .id_table = pata_macio_pci_match,
1384 .probe = pata_macio_pci_attach,
1385 .remove = pata_macio_pci_detach,
1386#ifdef CONFIG_PM
1387 .suspend = pata_macio_pci_suspend,
1388 .resume = pata_macio_pci_resume,
1389#endif
1390 .driver = {
1391 .owner = THIS_MODULE,
1392 },
1393};
1394MODULE_DEVICE_TABLE(pci, pata_macio_pci_match);
1395
1396
1397static int __init pata_macio_init(void)
1398{
1399 int rc;
1400
1401 if (!machine_is(powermac))
1402 return -ENODEV;
1403
1404 rc = pci_register_driver(&pata_macio_pci_driver);
1405 if (rc)
1406 return rc;
1407 rc = macio_register_driver(&pata_macio_driver);
1408 if (rc) {
1409 pci_unregister_driver(&pata_macio_pci_driver);
1410 return rc;
1411 }
1412 return 0;
1413}
1414
1415static void __exit pata_macio_exit(void)
1416{
1417 macio_unregister_driver(&pata_macio_driver);
1418 pci_unregister_driver(&pata_macio_pci_driver);
1419}
1420
1421module_init(pata_macio_init);
1422module_exit(pata_macio_exit);
1423
1424MODULE_AUTHOR("Benjamin Herrenschmidt");
1425MODULE_DESCRIPTION("Apple MacIO PATA driver");
1426MODULE_LICENSE("GPL");
1427MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index e62a4ccea54d..27fd775375b0 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -35,6 +35,7 @@ static ssize_t __ref store_online(struct sys_device *dev, struct sysdev_attribut
35 struct cpu *cpu = container_of(dev, struct cpu, sysdev); 35 struct cpu *cpu = container_of(dev, struct cpu, sysdev);
36 ssize_t ret; 36 ssize_t ret;
37 37
38 cpu_hotplug_driver_lock();
38 switch (buf[0]) { 39 switch (buf[0]) {
39 case '0': 40 case '0':
40 ret = cpu_down(cpu->sysdev.id); 41 ret = cpu_down(cpu->sysdev.id);
@@ -49,6 +50,7 @@ static ssize_t __ref store_online(struct sys_device *dev, struct sysdev_attribut
49 default: 50 default:
50 ret = -EINVAL; 51 ret = -EINVAL;
51 } 52 }
53 cpu_hotplug_driver_unlock();
52 54
53 if (ret >= 0) 55 if (ret >= 0)
54 ret = count; 56 ret = count;
@@ -72,6 +74,38 @@ void unregister_cpu(struct cpu *cpu)
72 per_cpu(cpu_sys_devices, logical_cpu) = NULL; 74 per_cpu(cpu_sys_devices, logical_cpu) = NULL;
73 return; 75 return;
74} 76}
77
78#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
79static ssize_t cpu_probe_store(struct class *class, const char *buf,
80 size_t count)
81{
82 return arch_cpu_probe(buf, count);
83}
84
85static ssize_t cpu_release_store(struct class *class, const char *buf,
86 size_t count)
87{
88 return arch_cpu_release(buf, count);
89}
90
91static CLASS_ATTR(probe, S_IWUSR, NULL, cpu_probe_store);
92static CLASS_ATTR(release, S_IWUSR, NULL, cpu_release_store);
93
94int __init cpu_probe_release_init(void)
95{
96 int rc;
97
98 rc = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
99 &class_attr_probe.attr);
100 if (!rc)
101 rc = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
102 &class_attr_release.attr);
103
104 return rc;
105}
106device_initcall(cpu_probe_release_init);
107#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
108
75#else /* ... !CONFIG_HOTPLUG_CPU */ 109#else /* ... !CONFIG_HOTPLUG_CPU */
76static inline void register_cpu_control(struct cpu *cpu) 110static inline void register_cpu_control(struct cpu *cpu)
77{ 111{
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 6380ad8d91bd..59ca2b77b574 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -200,7 +200,7 @@ struct floppy_state {
200 int ejected; 200 int ejected;
201 wait_queue_head_t wait; 201 wait_queue_head_t wait;
202 int wanted; 202 int wanted;
203 struct device_node* media_bay; /* NULL when not in bay */ 203 struct macio_dev *mdev;
204 char dbdma_cmd_space[5 * sizeof(struct dbdma_cmd)]; 204 char dbdma_cmd_space[5 * sizeof(struct dbdma_cmd)];
205}; 205};
206 206
@@ -303,14 +303,13 @@ static int swim3_readbit(struct floppy_state *fs, int bit)
303static void do_fd_request(struct request_queue * q) 303static void do_fd_request(struct request_queue * q)
304{ 304{
305 int i; 305 int i;
306 for(i=0;i<floppy_count;i++) 306
307 { 307 for(i=0; i<floppy_count; i++) {
308#ifdef CONFIG_PMAC_MEDIABAY 308 struct floppy_state *fs = &floppy_states[i];
309 if (floppy_states[i].media_bay && 309 if (fs->mdev->media_bay &&
310 check_media_bay(floppy_states[i].media_bay, MB_FD)) 310 check_media_bay(fs->mdev->media_bay) != MB_FD)
311 continue; 311 continue;
312#endif /* CONFIG_PMAC_MEDIABAY */ 312 start_request(fs);
313 start_request(&floppy_states[i]);
314 } 313 }
315} 314}
316 315
@@ -849,10 +848,9 @@ static int floppy_ioctl(struct block_device *bdev, fmode_t mode,
849 if ((cmd & 0x80) && !capable(CAP_SYS_ADMIN)) 848 if ((cmd & 0x80) && !capable(CAP_SYS_ADMIN))
850 return -EPERM; 849 return -EPERM;
851 850
852#ifdef CONFIG_PMAC_MEDIABAY 851 if (fs->mdev->media_bay &&
853 if (fs->media_bay && check_media_bay(fs->media_bay, MB_FD)) 852 check_media_bay(fs->mdev->media_bay) != MB_FD)
854 return -ENXIO; 853 return -ENXIO;
855#endif
856 854
857 switch (cmd) { 855 switch (cmd) {
858 case FDEJECT: 856 case FDEJECT:
@@ -876,10 +874,9 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
876 int n, err = 0; 874 int n, err = 0;
877 875
878 if (fs->ref_count == 0) { 876 if (fs->ref_count == 0) {
879#ifdef CONFIG_PMAC_MEDIABAY 877 if (fs->mdev->media_bay &&
880 if (fs->media_bay && check_media_bay(fs->media_bay, MB_FD)) 878 check_media_bay(fs->mdev->media_bay) != MB_FD)
881 return -ENXIO; 879 return -ENXIO;
882#endif
883 out_8(&sw->setup, S_IBM_DRIVE | S_FCLK_DIV2); 880 out_8(&sw->setup, S_IBM_DRIVE | S_FCLK_DIV2);
884 out_8(&sw->control_bic, 0xff); 881 out_8(&sw->control_bic, 0xff);
885 out_8(&sw->mode, 0x95); 882 out_8(&sw->mode, 0x95);
@@ -963,10 +960,9 @@ static int floppy_revalidate(struct gendisk *disk)
963 struct swim3 __iomem *sw; 960 struct swim3 __iomem *sw;
964 int ret, n; 961 int ret, n;
965 962
966#ifdef CONFIG_PMAC_MEDIABAY 963 if (fs->mdev->media_bay &&
967 if (fs->media_bay && check_media_bay(fs->media_bay, MB_FD)) 964 check_media_bay(fs->mdev->media_bay) != MB_FD)
968 return -ENXIO; 965 return -ENXIO;
969#endif
970 966
971 sw = fs->swim3; 967 sw = fs->swim3;
972 grab_drive(fs, revalidating, 0); 968 grab_drive(fs, revalidating, 0);
@@ -1009,7 +1005,6 @@ static const struct block_device_operations floppy_fops = {
1009static int swim3_add_device(struct macio_dev *mdev, int index) 1005static int swim3_add_device(struct macio_dev *mdev, int index)
1010{ 1006{
1011 struct device_node *swim = mdev->ofdev.node; 1007 struct device_node *swim = mdev->ofdev.node;
1012 struct device_node *mediabay;
1013 struct floppy_state *fs = &floppy_states[index]; 1008 struct floppy_state *fs = &floppy_states[index];
1014 int rc = -EBUSY; 1009 int rc = -EBUSY;
1015 1010
@@ -1036,9 +1031,7 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
1036 } 1031 }
1037 dev_set_drvdata(&mdev->ofdev.dev, fs); 1032 dev_set_drvdata(&mdev->ofdev.dev, fs);
1038 1033
1039 mediabay = (strcasecmp(swim->parent->type, "media-bay") == 0) ? 1034 if (mdev->media_bay == NULL)
1040 swim->parent : NULL;
1041 if (mediabay == NULL)
1042 pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 1); 1035 pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 1);
1043 1036
1044 memset(fs, 0, sizeof(*fs)); 1037 memset(fs, 0, sizeof(*fs));
@@ -1068,7 +1061,7 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
1068 fs->secpercyl = 36; 1061 fs->secpercyl = 36;
1069 fs->secpertrack = 18; 1062 fs->secpertrack = 18;
1070 fs->total_secs = 2880; 1063 fs->total_secs = 2880;
1071 fs->media_bay = mediabay; 1064 fs->mdev = mdev;
1072 init_waitqueue_head(&fs->wait); 1065 init_waitqueue_head(&fs->wait);
1073 1066
1074 fs->dma_cmd = (struct dbdma_cmd *) DBDMA_ALIGN(fs->dbdma_cmd_space); 1067 fs->dma_cmd = (struct dbdma_cmd *) DBDMA_ALIGN(fs->dbdma_cmd_space);
@@ -1093,7 +1086,7 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
1093 init_timer(&fs->timeout); 1086 init_timer(&fs->timeout);
1094 1087
1095 printk(KERN_INFO "fd%d: SWIM3 floppy controller %s\n", floppy_count, 1088 printk(KERN_INFO "fd%d: SWIM3 floppy controller %s\n", floppy_count,
1096 mediabay ? "in media bay" : ""); 1089 mdev->media_bay ? "in media bay" : "");
1097 1090
1098 return 0; 1091 return 0;
1099 1092
diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c
index 703959eba45a..d89da4ac061f 100644
--- a/drivers/char/agp/uninorth-agp.c
+++ b/drivers/char/agp/uninorth-agp.c
@@ -144,16 +144,13 @@ static int uninorth_configure(void)
144 return 0; 144 return 0;
145} 145}
146 146
147static int uninorth_insert_memory(struct agp_memory *mem, off_t pg_start, 147static int uninorth_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
148 int type)
149{ 148{
150 int i, j, num_entries; 149 int i, num_entries;
151 void *temp; 150 void *temp;
151 u32 *gp;
152 int mask_type; 152 int mask_type;
153 153
154 temp = agp_bridge->current_size;
155 num_entries = A_SIZE_32(temp)->num_entries;
156
157 if (type != mem->type) 154 if (type != mem->type)
158 return -EINVAL; 155 return -EINVAL;
159 156
@@ -163,49 +160,12 @@ static int uninorth_insert_memory(struct agp_memory *mem, off_t pg_start,
163 return -EINVAL; 160 return -EINVAL;
164 } 161 }
165 162
166 if ((pg_start + mem->page_count) > num_entries) 163 if (mem->page_count == 0)
167 return -EINVAL; 164 return 0;
168
169 j = pg_start;
170
171 while (j < (pg_start + mem->page_count)) {
172 if (agp_bridge->gatt_table[j])
173 return -EBUSY;
174 j++;
175 }
176
177 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
178 agp_bridge->gatt_table[j] =
179 cpu_to_le32((page_to_phys(mem->pages[i]) & 0xFFFFF000UL) | 0x1UL);
180 flush_dcache_range((unsigned long)__va(page_to_phys(mem->pages[i])),
181 (unsigned long)__va(page_to_phys(mem->pages[i]))+0x1000);
182 }
183 (void)in_le32((volatile u32*)&agp_bridge->gatt_table[pg_start]);
184 mb();
185
186 uninorth_tlbflush(mem);
187 return 0;
188}
189
190static int u3_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
191{
192 int i, num_entries;
193 void *temp;
194 u32 *gp;
195 int mask_type;
196 165
197 temp = agp_bridge->current_size; 166 temp = agp_bridge->current_size;
198 num_entries = A_SIZE_32(temp)->num_entries; 167 num_entries = A_SIZE_32(temp)->num_entries;
199 168
200 if (type != mem->type)
201 return -EINVAL;
202
203 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
204 if (mask_type != 0) {
205 /* We know nothing of memory types */
206 return -EINVAL;
207 }
208
209 if ((pg_start + mem->page_count) > num_entries) 169 if ((pg_start + mem->page_count) > num_entries)
210 return -EINVAL; 170 return -EINVAL;
211 171
@@ -213,14 +173,18 @@ static int u3_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
213 for (i = 0; i < mem->page_count; ++i) { 173 for (i = 0; i < mem->page_count; ++i) {
214 if (gp[i]) { 174 if (gp[i]) {
215 dev_info(&agp_bridge->dev->dev, 175 dev_info(&agp_bridge->dev->dev,
216 "u3_insert_memory: entry 0x%x occupied (%x)\n", 176 "uninorth_insert_memory: entry 0x%x occupied (%x)\n",
217 i, gp[i]); 177 i, gp[i]);
218 return -EBUSY; 178 return -EBUSY;
219 } 179 }
220 } 180 }
221 181
222 for (i = 0; i < mem->page_count; i++) { 182 for (i = 0; i < mem->page_count; i++) {
223 gp[i] = (page_to_phys(mem->pages[i]) >> PAGE_SHIFT) | 0x80000000UL; 183 if (is_u3)
184 gp[i] = (page_to_phys(mem->pages[i]) >> PAGE_SHIFT) | 0x80000000UL;
185 else
186 gp[i] = cpu_to_le32((page_to_phys(mem->pages[i]) & 0xFFFFF000UL) |
187 0x1UL);
224 flush_dcache_range((unsigned long)__va(page_to_phys(mem->pages[i])), 188 flush_dcache_range((unsigned long)__va(page_to_phys(mem->pages[i])),
225 (unsigned long)__va(page_to_phys(mem->pages[i]))+0x1000); 189 (unsigned long)__va(page_to_phys(mem->pages[i]))+0x1000);
226 } 190 }
@@ -230,14 +194,23 @@ static int u3_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
230 return 0; 194 return 0;
231} 195}
232 196
233int u3_remove_memory(struct agp_memory *mem, off_t pg_start, int type) 197int uninorth_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
234{ 198{
235 size_t i; 199 size_t i;
236 u32 *gp; 200 u32 *gp;
201 int mask_type;
202
203 if (type != mem->type)
204 return -EINVAL;
237 205
238 if (type != 0 || mem->type != 0) 206 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
207 if (mask_type != 0) {
239 /* We know nothing of memory types */ 208 /* We know nothing of memory types */
240 return -EINVAL; 209 return -EINVAL;
210 }
211
212 if (mem->page_count == 0)
213 return 0;
241 214
242 gp = (u32 *) &agp_bridge->gatt_table[pg_start]; 215 gp = (u32 *) &agp_bridge->gatt_table[pg_start];
243 for (i = 0; i < mem->page_count; ++i) 216 for (i = 0; i < mem->page_count; ++i)
@@ -536,7 +509,7 @@ const struct agp_bridge_driver uninorth_agp_driver = {
536 .create_gatt_table = uninorth_create_gatt_table, 509 .create_gatt_table = uninorth_create_gatt_table,
537 .free_gatt_table = uninorth_free_gatt_table, 510 .free_gatt_table = uninorth_free_gatt_table,
538 .insert_memory = uninorth_insert_memory, 511 .insert_memory = uninorth_insert_memory,
539 .remove_memory = agp_generic_remove_memory, 512 .remove_memory = uninorth_remove_memory,
540 .alloc_by_type = agp_generic_alloc_by_type, 513 .alloc_by_type = agp_generic_alloc_by_type,
541 .free_by_type = agp_generic_free_by_type, 514 .free_by_type = agp_generic_free_by_type,
542 .agp_alloc_page = agp_generic_alloc_page, 515 .agp_alloc_page = agp_generic_alloc_page,
@@ -562,8 +535,8 @@ const struct agp_bridge_driver u3_agp_driver = {
562 .agp_enable = uninorth_agp_enable, 535 .agp_enable = uninorth_agp_enable,
563 .create_gatt_table = uninorth_create_gatt_table, 536 .create_gatt_table = uninorth_create_gatt_table,
564 .free_gatt_table = uninorth_free_gatt_table, 537 .free_gatt_table = uninorth_free_gatt_table,
565 .insert_memory = u3_insert_memory, 538 .insert_memory = uninorth_insert_memory,
566 .remove_memory = u3_remove_memory, 539 .remove_memory = uninorth_remove_memory,
567 .alloc_by_type = agp_generic_alloc_by_type, 540 .alloc_by_type = agp_generic_alloc_by_type,
568 .free_by_type = agp_generic_free_by_type, 541 .free_by_type = agp_generic_free_by_type,
569 .agp_alloc_page = agp_generic_alloc_page, 542 .agp_alloc_page = agp_generic_alloc_page,
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index a632f25f144a..416d3423150d 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -832,6 +832,7 @@ int hvc_remove(struct hvc_struct *hp)
832 tty_hangup(tty); 832 tty_hangup(tty);
833 return 0; 833 return 0;
834} 834}
835EXPORT_SYMBOL_GPL(hvc_remove);
835 836
836/* Driver initialization: called as soon as someone uses hvc_alloc(). */ 837/* Driver initialization: called as soon as someone uses hvc_alloc(). */
837static int hvc_init(void) 838static int hvc_init(void)
diff --git a/drivers/ide/pmac.c b/drivers/ide/pmac.c
index 97642a7a79c4..7a4e788cab2f 100644
--- a/drivers/ide/pmac.c
+++ b/drivers/ide/pmac.c
@@ -43,10 +43,7 @@
43#include <asm/pmac_feature.h> 43#include <asm/pmac_feature.h>
44#include <asm/sections.h> 44#include <asm/sections.h>
45#include <asm/irq.h> 45#include <asm/irq.h>
46
47#ifndef CONFIG_PPC64
48#include <asm/mediabay.h> 46#include <asm/mediabay.h>
49#endif
50 47
51#define DRV_NAME "ide-pmac" 48#define DRV_NAME "ide-pmac"
52 49
@@ -59,13 +56,14 @@ typedef struct pmac_ide_hwif {
59 int irq; 56 int irq;
60 int kind; 57 int kind;
61 int aapl_bus_id; 58 int aapl_bus_id;
62 unsigned mediabay : 1;
63 unsigned broken_dma : 1; 59 unsigned broken_dma : 1;
64 unsigned broken_dma_warn : 1; 60 unsigned broken_dma_warn : 1;
65 struct device_node* node; 61 struct device_node* node;
66 struct macio_dev *mdev; 62 struct macio_dev *mdev;
67 u32 timings[4]; 63 u32 timings[4];
68 volatile u32 __iomem * *kauai_fcr; 64 volatile u32 __iomem * *kauai_fcr;
65 ide_hwif_t *hwif;
66
69 /* Those fields are duplicating what is in hwif. We currently 67 /* Those fields are duplicating what is in hwif. We currently
70 * can't use the hwif ones because of some assumptions that are 68 * can't use the hwif ones because of some assumptions that are
71 * beeing done by the generic code about the kind of dma controller 69 * beeing done by the generic code about the kind of dma controller
@@ -854,6 +852,11 @@ sanitize_timings(pmac_ide_hwif_t *pmif)
854 pmif->timings[2] = pmif->timings[3] = value2; 852 pmif->timings[2] = pmif->timings[3] = value2;
855} 853}
856 854
855static int on_media_bay(pmac_ide_hwif_t *pmif)
856{
857 return pmif->mdev && pmif->mdev->media_bay != NULL;
858}
859
857/* Suspend call back, should be called after the child devices 860/* Suspend call back, should be called after the child devices
858 * have actually been suspended 861 * have actually been suspended
859 */ 862 */
@@ -866,7 +869,7 @@ static int pmac_ide_do_suspend(pmac_ide_hwif_t *pmif)
866 disable_irq(pmif->irq); 869 disable_irq(pmif->irq);
867 870
868 /* The media bay will handle itself just fine */ 871 /* The media bay will handle itself just fine */
869 if (pmif->mediabay) 872 if (on_media_bay(pmif))
870 return 0; 873 return 0;
871 874
872 /* Kauai has bus control FCRs directly here */ 875 /* Kauai has bus control FCRs directly here */
@@ -889,7 +892,7 @@ static int pmac_ide_do_suspend(pmac_ide_hwif_t *pmif)
889static int pmac_ide_do_resume(pmac_ide_hwif_t *pmif) 892static int pmac_ide_do_resume(pmac_ide_hwif_t *pmif)
890{ 893{
891 /* Hard reset & re-enable controller (do we really need to reset ? -BenH) */ 894 /* Hard reset & re-enable controller (do we really need to reset ? -BenH) */
892 if (!pmif->mediabay) { 895 if (!on_media_bay(pmif)) {
893 ppc_md.feature_call(PMAC_FTR_IDE_RESET, pmif->node, pmif->aapl_bus_id, 1); 896 ppc_md.feature_call(PMAC_FTR_IDE_RESET, pmif->node, pmif->aapl_bus_id, 1);
894 ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, pmif->node, pmif->aapl_bus_id, 1); 897 ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, pmif->node, pmif->aapl_bus_id, 1);
895 msleep(10); 898 msleep(10);
@@ -950,13 +953,11 @@ static void pmac_ide_init_dev(ide_drive_t *drive)
950 pmac_ide_hwif_t *pmif = 953 pmac_ide_hwif_t *pmif =
951 (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent); 954 (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
952 955
953 if (pmif->mediabay) { 956 if (on_media_bay(pmif)) {
954#ifdef CONFIG_PMAC_MEDIABAY 957 if (check_media_bay(pmif->mdev->media_bay) == MB_CD) {
955 if (check_media_bay_by_base(pmif->regbase, MB_CD) == 0) {
956 drive->dev_flags &= ~IDE_DFLAG_NOPROBE; 958 drive->dev_flags &= ~IDE_DFLAG_NOPROBE;
957 return; 959 return;
958 } 960 }
959#endif
960 drive->dev_flags |= IDE_DFLAG_NOPROBE; 961 drive->dev_flags |= IDE_DFLAG_NOPROBE;
961 } 962 }
962} 963}
@@ -1072,26 +1073,23 @@ static int __devinit pmac_ide_setup_device(pmac_ide_hwif_t *pmif,
1072 writel(KAUAI_FCR_UATA_MAGIC | 1073 writel(KAUAI_FCR_UATA_MAGIC |
1073 KAUAI_FCR_UATA_RESET_N | 1074 KAUAI_FCR_UATA_RESET_N |
1074 KAUAI_FCR_UATA_ENABLE, pmif->kauai_fcr); 1075 KAUAI_FCR_UATA_ENABLE, pmif->kauai_fcr);
1075
1076 pmif->mediabay = 0;
1077 1076
1078 /* Make sure we have sane timings */ 1077 /* Make sure we have sane timings */
1079 sanitize_timings(pmif); 1078 sanitize_timings(pmif);
1080 1079
1080 /* If we are on a media bay, wait for it to settle and lock it */
1081 if (pmif->mdev)
1082 lock_media_bay(pmif->mdev->media_bay);
1083
1081 host = ide_host_alloc(&d, hws, 1); 1084 host = ide_host_alloc(&d, hws, 1);
1082 if (host == NULL) 1085 if (host == NULL) {
1083 return -ENOMEM; 1086 rc = -ENOMEM;
1084 hwif = host->ports[0]; 1087 goto bail;
1088 }
1089 hwif = pmif->hwif = host->ports[0];
1085 1090
1086#ifndef CONFIG_PPC64 1091 if (on_media_bay(pmif)) {
1087 /* XXX FIXME: Media bay stuff need re-organizing */ 1092 /* Fixup bus ID for media bay */
1088 if (np->parent && np->parent->name
1089 && strcasecmp(np->parent->name, "media-bay") == 0) {
1090#ifdef CONFIG_PMAC_MEDIABAY
1091 media_bay_set_ide_infos(np->parent, pmif->regbase, pmif->irq,
1092 hwif);
1093#endif /* CONFIG_PMAC_MEDIABAY */
1094 pmif->mediabay = 1;
1095 if (!bidp) 1093 if (!bidp)
1096 pmif->aapl_bus_id = 1; 1094 pmif->aapl_bus_id = 1;
1097 } else if (pmif->kind == controller_ohare) { 1095 } else if (pmif->kind == controller_ohare) {
@@ -1100,9 +1098,7 @@ static int __devinit pmac_ide_setup_device(pmac_ide_hwif_t *pmif,
1100 * units, I keep the old way 1098 * units, I keep the old way
1101 */ 1099 */
1102 ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, np, 0, 1); 1100 ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, np, 0, 1);
1103 } else 1101 } else {
1104#endif
1105 {
1106 /* This is necessary to enable IDE when net-booting */ 1102 /* This is necessary to enable IDE when net-booting */
1107 ppc_md.feature_call(PMAC_FTR_IDE_RESET, np, pmif->aapl_bus_id, 1); 1103 ppc_md.feature_call(PMAC_FTR_IDE_RESET, np, pmif->aapl_bus_id, 1);
1108 ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, np, pmif->aapl_bus_id, 1); 1104 ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, np, pmif->aapl_bus_id, 1);
@@ -1112,17 +1108,21 @@ static int __devinit pmac_ide_setup_device(pmac_ide_hwif_t *pmif,
1112 } 1108 }
1113 1109
1114 printk(KERN_INFO DRV_NAME ": Found Apple %s controller (%s), " 1110 printk(KERN_INFO DRV_NAME ": Found Apple %s controller (%s), "
1115 "bus ID %d%s, irq %d\n", model_name[pmif->kind], 1111 "bus ID %d%s, irq %d\n", model_name[pmif->kind],
1116 pmif->mdev ? "macio" : "PCI", pmif->aapl_bus_id, 1112 pmif->mdev ? "macio" : "PCI", pmif->aapl_bus_id,
1117 pmif->mediabay ? " (mediabay)" : "", hw->irq); 1113 on_media_bay(pmif) ? " (mediabay)" : "", hw->irq);
1118 1114
1119 rc = ide_host_register(host, &d, hws); 1115 rc = ide_host_register(host, &d, hws);
1120 if (rc) { 1116 if (rc)
1121 ide_host_free(host); 1117 pmif->hwif = NULL;
1122 return rc;
1123 }
1124 1118
1125 return 0; 1119 if (pmif->mdev)
1120 unlock_media_bay(pmif->mdev->media_bay);
1121
1122 bail:
1123 if (rc && host)
1124 ide_host_free(host);
1125 return rc;
1126} 1126}
1127 1127
1128static void __devinit pmac_ide_init_ports(struct ide_hw *hw, unsigned long base) 1128static void __devinit pmac_ide_init_ports(struct ide_hw *hw, unsigned long base)
@@ -1362,6 +1362,25 @@ pmac_ide_pci_resume(struct pci_dev *pdev)
1362 return rc; 1362 return rc;
1363} 1363}
1364 1364
1365#ifdef CONFIG_PMAC_MEDIABAY
1366static void pmac_ide_macio_mb_event(struct macio_dev* mdev, int mb_state)
1367{
1368 pmac_ide_hwif_t *pmif =
1369 (pmac_ide_hwif_t *)dev_get_drvdata(&mdev->ofdev.dev);
1370
1371 switch(mb_state) {
1372 case MB_CD:
1373 if (!pmif->hwif->present)
1374 ide_port_scan(pmif->hwif);
1375 break;
1376 default:
1377 if (pmif->hwif->present)
1378 ide_port_unregister_devices(pmif->hwif);
1379 }
1380}
1381#endif /* CONFIG_PMAC_MEDIABAY */
1382
1383
1365static struct of_device_id pmac_ide_macio_match[] = 1384static struct of_device_id pmac_ide_macio_match[] =
1366{ 1385{
1367 { 1386 {
@@ -1386,6 +1405,9 @@ static struct macio_driver pmac_ide_macio_driver =
1386 .probe = pmac_ide_macio_attach, 1405 .probe = pmac_ide_macio_attach,
1387 .suspend = pmac_ide_macio_suspend, 1406 .suspend = pmac_ide_macio_suspend,
1388 .resume = pmac_ide_macio_resume, 1407 .resume = pmac_ide_macio_resume,
1408#ifdef CONFIG_PMAC_MEDIABAY
1409 .mediabay_event = pmac_ide_macio_mb_event,
1410#endif
1389}; 1411};
1390 1412
1391static const struct pci_device_id pmac_ide_pci_match[] = { 1413static const struct pci_device_id pmac_ide_pci_match[] = {
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index 588a5b0bc4b5..26a303a1d1ab 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -379,6 +379,11 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
379 dev->ofdev.dev.parent = parent; 379 dev->ofdev.dev.parent = parent;
380 dev->ofdev.dev.bus = &macio_bus_type; 380 dev->ofdev.dev.bus = &macio_bus_type;
381 dev->ofdev.dev.release = macio_release_dev; 381 dev->ofdev.dev.release = macio_release_dev;
382 dev->ofdev.dev.dma_parms = &dev->dma_parms;
383
384 /* Standard DMA paremeters */
385 dma_set_max_seg_size(&dev->ofdev.dev, 65536);
386 dma_set_seg_boundary(&dev->ofdev.dev, 0xffffffff);
382 387
383#ifdef CONFIG_PCI 388#ifdef CONFIG_PCI
384 /* Set the DMA ops to the ones from the PCI device, this could be 389 /* Set the DMA ops to the ones from the PCI device, this could be
@@ -538,6 +543,42 @@ void macio_unregister_driver(struct macio_driver *drv)
538 driver_unregister(&drv->driver); 543 driver_unregister(&drv->driver);
539} 544}
540 545
546/* Managed MacIO resources */
547struct macio_devres {
548 u32 res_mask;
549};
550
551static void maciom_release(struct device *gendev, void *res)
552{
553 struct macio_dev *dev = to_macio_device(gendev);
554 struct macio_devres *dr = res;
555 int i, max;
556
557 max = min(dev->n_resources, 32);
558 for (i = 0; i < max; i++) {
559 if (dr->res_mask & (1 << i))
560 macio_release_resource(dev, i);
561 }
562}
563
564int macio_enable_devres(struct macio_dev *dev)
565{
566 struct macio_devres *dr;
567
568 dr = devres_find(&dev->ofdev.dev, maciom_release, NULL, NULL);
569 if (!dr) {
570 dr = devres_alloc(maciom_release, sizeof(*dr), GFP_KERNEL);
571 if (!dr)
572 return -ENOMEM;
573 }
574 return devres_get(&dev->ofdev.dev, dr, NULL, NULL) != NULL;
575}
576
577static struct macio_devres * find_macio_dr(struct macio_dev *dev)
578{
579 return devres_find(&dev->ofdev.dev, maciom_release, NULL, NULL);
580}
581
541/** 582/**
542 * macio_request_resource - Request an MMIO resource 583 * macio_request_resource - Request an MMIO resource
543 * @dev: pointer to the device holding the resource 584 * @dev: pointer to the device holding the resource
@@ -555,6 +596,8 @@ void macio_unregister_driver(struct macio_driver *drv)
555int macio_request_resource(struct macio_dev *dev, int resource_no, 596int macio_request_resource(struct macio_dev *dev, int resource_no,
556 const char *name) 597 const char *name)
557{ 598{
599 struct macio_devres *dr = find_macio_dr(dev);
600
558 if (macio_resource_len(dev, resource_no) == 0) 601 if (macio_resource_len(dev, resource_no) == 0)
559 return 0; 602 return 0;
560 603
@@ -562,6 +605,9 @@ int macio_request_resource(struct macio_dev *dev, int resource_no,
562 macio_resource_len(dev, resource_no), 605 macio_resource_len(dev, resource_no),
563 name)) 606 name))
564 goto err_out; 607 goto err_out;
608
609 if (dr && resource_no < 32)
610 dr->res_mask |= 1 << resource_no;
565 611
566 return 0; 612 return 0;
567 613
@@ -582,10 +628,14 @@ err_out:
582 */ 628 */
583void macio_release_resource(struct macio_dev *dev, int resource_no) 629void macio_release_resource(struct macio_dev *dev, int resource_no)
584{ 630{
631 struct macio_devres *dr = find_macio_dr(dev);
632
585 if (macio_resource_len(dev, resource_no) == 0) 633 if (macio_resource_len(dev, resource_no) == 0)
586 return; 634 return;
587 release_mem_region(macio_resource_start(dev, resource_no), 635 release_mem_region(macio_resource_start(dev, resource_no),
588 macio_resource_len(dev, resource_no)); 636 macio_resource_len(dev, resource_no));
637 if (dr && resource_no < 32)
638 dr->res_mask &= ~(1 << resource_no);
589} 639}
590 640
591/** 641/**
@@ -744,3 +794,5 @@ EXPORT_SYMBOL(macio_request_resource);
744EXPORT_SYMBOL(macio_release_resource); 794EXPORT_SYMBOL(macio_release_resource);
745EXPORT_SYMBOL(macio_request_resources); 795EXPORT_SYMBOL(macio_request_resources);
746EXPORT_SYMBOL(macio_release_resources); 796EXPORT_SYMBOL(macio_release_resources);
797EXPORT_SYMBOL(macio_enable_devres);
798
diff --git a/drivers/macintosh/mediabay.c b/drivers/macintosh/mediabay.c
index 029ad8ce8a7e..08002b88f342 100644
--- a/drivers/macintosh/mediabay.c
+++ b/drivers/macintosh/mediabay.c
@@ -33,15 +33,6 @@
33#include <linux/adb.h> 33#include <linux/adb.h>
34#include <linux/pmu.h> 34#include <linux/pmu.h>
35 35
36
37#define MB_DEBUG
38
39#ifdef MB_DEBUG
40#define MBDBG(fmt, arg...) printk(KERN_INFO fmt , ## arg)
41#else
42#define MBDBG(fmt, arg...) do { } while (0)
43#endif
44
45#define MB_FCR32(bay, r) ((bay)->base + ((r) >> 2)) 36#define MB_FCR32(bay, r) ((bay)->base + ((r) >> 2))
46#define MB_FCR8(bay, r) (((volatile u8 __iomem *)((bay)->base)) + (r)) 37#define MB_FCR8(bay, r) (((volatile u8 __iomem *)((bay)->base)) + (r))
47 38
@@ -76,28 +67,14 @@ struct media_bay_info {
76 int index; 67 int index;
77 int cached_gpio; 68 int cached_gpio;
78 int sleeping; 69 int sleeping;
70 int user_lock;
79 struct mutex lock; 71 struct mutex lock;
80#ifdef CONFIG_BLK_DEV_IDE_PMAC
81 ide_hwif_t *cd_port;
82 void __iomem *cd_base;
83 int cd_irq;
84 int cd_retry;
85#endif
86#if defined(CONFIG_BLK_DEV_IDE_PMAC)
87 int cd_index;
88#endif
89}; 72};
90 73
91#define MAX_BAYS 2 74#define MAX_BAYS 2
92 75
93static struct media_bay_info media_bays[MAX_BAYS]; 76static struct media_bay_info media_bays[MAX_BAYS];
94int media_bay_count = 0; 77static int media_bay_count = 0;
95
96#ifdef CONFIG_BLK_DEV_IDE_PMAC
97/* check the busy bit in the media-bay ide interface
98 (assumes the media-bay contains an ide device) */
99#define MB_IDE_READY(i) ((readb(media_bays[i].cd_base + 0x70) & 0x80) == 0)
100#endif
101 78
102/* 79/*
103 * Wait that number of ms between each step in normal polling mode 80 * Wait that number of ms between each step in normal polling mode
@@ -130,21 +107,11 @@ int media_bay_count = 0;
130 107
131/* 108/*
132 * Wait this many ticks after an IDE device (e.g. CD-ROM) is inserted 109 * Wait this many ticks after an IDE device (e.g. CD-ROM) is inserted
133 * (or until the device is ready) before waiting for busy bit to disappear 110 * (or until the device is ready) before calling into the driver
134 */ 111 */
135#define MB_IDE_WAIT 1000 112#define MB_IDE_WAIT 1000
136 113
137/* 114/*
138 * Timeout waiting for busy bit of an IDE device to go down
139 */
140#define MB_IDE_TIMEOUT 5000
141
142/*
143 * Max retries of the full power up/down sequence for an IDE device
144 */
145#define MAX_CD_RETRIES 3
146
147/*
148 * States of a media bay 115 * States of a media bay
149 */ 116 */
150enum { 117enum {
@@ -153,7 +120,6 @@ enum {
153 mb_enabling_bay, /* enable bits set, waiting MB_RESET_DELAY */ 120 mb_enabling_bay, /* enable bits set, waiting MB_RESET_DELAY */
154 mb_resetting, /* reset bit unset, waiting MB_SETUP_DELAY */ 121 mb_resetting, /* reset bit unset, waiting MB_SETUP_DELAY */
155 mb_ide_resetting, /* IDE reset bit unser, waiting MB_IDE_WAIT */ 122 mb_ide_resetting, /* IDE reset bit unser, waiting MB_IDE_WAIT */
156 mb_ide_waiting, /* Waiting for BUSY bit to go away until MB_IDE_TIMEOUT */
157 mb_up, /* Media bay full */ 123 mb_up, /* Media bay full */
158 mb_powering_down /* Powering down (avoid too fast down/up) */ 124 mb_powering_down /* Powering down (avoid too fast down/up) */
159}; 125};
@@ -373,12 +339,12 @@ static inline void set_mb_power(struct media_bay_info* bay, int onoff)
373 if (onoff) { 339 if (onoff) {
374 bay->ops->power(bay, 1); 340 bay->ops->power(bay, 1);
375 bay->state = mb_powering_up; 341 bay->state = mb_powering_up;
376 MBDBG("mediabay%d: powering up\n", bay->index); 342 pr_debug("mediabay%d: powering up\n", bay->index);
377 } else { 343 } else {
378 /* Make sure everything is powered down & disabled */ 344 /* Make sure everything is powered down & disabled */
379 bay->ops->power(bay, 0); 345 bay->ops->power(bay, 0);
380 bay->state = mb_powering_down; 346 bay->state = mb_powering_down;
381 MBDBG("mediabay%d: powering down\n", bay->index); 347 pr_debug("mediabay%d: powering down\n", bay->index);
382 } 348 }
383 bay->timer = msecs_to_jiffies(MB_POWER_DELAY); 349 bay->timer = msecs_to_jiffies(MB_POWER_DELAY);
384} 350}
@@ -387,107 +353,118 @@ static void poll_media_bay(struct media_bay_info* bay)
387{ 353{
388 int id = bay->ops->content(bay); 354 int id = bay->ops->content(bay);
389 355
390 if (id == bay->last_value) { 356 static char *mb_content_types[] = {
391 if (id != bay->content_id) { 357 "a floppy drive",
392 bay->value_count += msecs_to_jiffies(MB_POLL_DELAY); 358 "a floppy drive",
393 if (bay->value_count >= msecs_to_jiffies(MB_STABLE_DELAY)) { 359 "an unsuported audio device",
394 /* If the device type changes without going thru 360 "an ATA device",
395 * "MB_NO", we force a pass by "MB_NO" to make sure 361 "an unsupported PCI device",
396 * things are properly reset 362 "an unknown device",
397 */ 363 };
398 if ((id != MB_NO) && (bay->content_id != MB_NO)) { 364
399 id = MB_NO; 365 if (id != bay->last_value) {
400 MBDBG("mediabay%d: forcing MB_NO\n", bay->index);
401 }
402 MBDBG("mediabay%d: switching to %d\n", bay->index, id);
403 set_mb_power(bay, id != MB_NO);
404 bay->content_id = id;
405 if (id == MB_NO) {
406#ifdef CONFIG_BLK_DEV_IDE_PMAC
407 bay->cd_retry = 0;
408#endif
409 printk(KERN_INFO "media bay %d is empty\n", bay->index);
410 }
411 }
412 }
413 } else {
414 bay->last_value = id; 366 bay->last_value = id;
415 bay->value_count = 0; 367 bay->value_count = 0;
368 return;
369 }
370 if (id == bay->content_id)
371 return;
372
373 bay->value_count += msecs_to_jiffies(MB_POLL_DELAY);
374 if (bay->value_count >= msecs_to_jiffies(MB_STABLE_DELAY)) {
375 /* If the device type changes without going thru
376 * "MB_NO", we force a pass by "MB_NO" to make sure
377 * things are properly reset
378 */
379 if ((id != MB_NO) && (bay->content_id != MB_NO)) {
380 id = MB_NO;
381 pr_debug("mediabay%d: forcing MB_NO\n", bay->index);
382 }
383 pr_debug("mediabay%d: switching to %d\n", bay->index, id);
384 set_mb_power(bay, id != MB_NO);
385 bay->content_id = id;
386 if (id >= MB_NO || id < 0)
387 printk(KERN_INFO "mediabay%d: Bay is now empty\n", bay->index);
388 else
389 printk(KERN_INFO "mediabay%d: Bay contains %s\n",
390 bay->index, mb_content_types[id]);
416 } 391 }
417} 392}
418 393
419#ifdef CONFIG_BLK_DEV_IDE_PMAC 394int check_media_bay(struct macio_dev *baydev)
420int check_media_bay(struct device_node *which_bay, int what)
421{ 395{
422 int i; 396 struct media_bay_info* bay;
397 int id;
423 398
424 for (i=0; i<media_bay_count; i++) 399 if (baydev == NULL)
425 if (media_bays[i].mdev && which_bay == media_bays[i].mdev->ofdev.node) { 400 return MB_NO;
426 if ((what == media_bays[i].content_id) && media_bays[i].state == mb_up) 401
427 return 0; 402 /* This returns an instant snapshot, not locking, sine
428 media_bays[i].cd_index = -1; 403 * we may be called with the bay lock held. The resulting
429 return -EINVAL; 404 * fuzzyness of the result if called at the wrong time is
430 } 405 * not actually a huge deal
431 return -ENODEV; 406 */
407 bay = macio_get_drvdata(baydev);
408 if (bay == NULL)
409 return MB_NO;
410 id = bay->content_id;
411 if (bay->state != mb_up)
412 return MB_NO;
413 if (id == MB_FD1)
414 return MB_FD;
415 return id;
432} 416}
433EXPORT_SYMBOL(check_media_bay); 417EXPORT_SYMBOL_GPL(check_media_bay);
434 418
435int check_media_bay_by_base(unsigned long base, int what) 419void lock_media_bay(struct macio_dev *baydev)
436{ 420{
437 int i; 421 struct media_bay_info* bay;
438
439 for (i=0; i<media_bay_count; i++)
440 if (media_bays[i].mdev && base == (unsigned long) media_bays[i].cd_base) {
441 if ((what == media_bays[i].content_id) && media_bays[i].state == mb_up)
442 return 0;
443 media_bays[i].cd_index = -1;
444 return -EINVAL;
445 }
446 422
447 return -ENODEV; 423 if (baydev == NULL)
424 return;
425 bay = macio_get_drvdata(baydev);
426 if (bay == NULL)
427 return;
428 mutex_lock(&bay->lock);
429 bay->user_lock = 1;
448} 430}
449EXPORT_SYMBOL_GPL(check_media_bay_by_base); 431EXPORT_SYMBOL_GPL(lock_media_bay);
450 432
451int media_bay_set_ide_infos(struct device_node* which_bay, unsigned long base, 433void unlock_media_bay(struct macio_dev *baydev)
452 int irq, ide_hwif_t *hwif)
453{ 434{
454 int i; 435 struct media_bay_info* bay;
455 436
456 for (i=0; i<media_bay_count; i++) { 437 if (baydev == NULL)
457 struct media_bay_info* bay = &media_bays[i]; 438 return;
458 439 bay = macio_get_drvdata(baydev);
459 if (bay->mdev && which_bay == bay->mdev->ofdev.node) { 440 if (bay == NULL)
460 int timeout = 5000, index = hwif->index; 441 return;
461 442 if (bay->user_lock) {
462 mutex_lock(&bay->lock); 443 bay->user_lock = 0;
463 444 mutex_unlock(&bay->lock);
464 bay->cd_port = hwif;
465 bay->cd_base = (void __iomem *) base;
466 bay->cd_irq = irq;
467
468 if ((MB_CD != bay->content_id) || bay->state != mb_up) {
469 mutex_unlock(&bay->lock);
470 return 0;
471 }
472 printk(KERN_DEBUG "Registered ide%d for media bay %d\n", index, i);
473 do {
474 if (MB_IDE_READY(i)) {
475 bay->cd_index = index;
476 mutex_unlock(&bay->lock);
477 return 0;
478 }
479 mdelay(1);
480 } while(--timeout);
481 printk(KERN_DEBUG "Timeount waiting IDE in bay %d\n", i);
482 mutex_unlock(&bay->lock);
483 return -ENODEV;
484 }
485 } 445 }
446}
447EXPORT_SYMBOL_GPL(unlock_media_bay);
486 448
487 return -ENODEV; 449static int mb_broadcast_hotplug(struct device *dev, void *data)
450{
451 struct media_bay_info* bay = data;
452 struct macio_dev *mdev;
453 struct macio_driver *drv;
454 int state;
455
456 if (dev->bus != &macio_bus_type)
457 return 0;
458
459 state = bay->state == mb_up ? bay->content_id : MB_NO;
460 if (state == MB_FD1)
461 state = MB_FD;
462 mdev = to_macio_device(dev);
463 drv = to_macio_driver(dev->driver);
464 if (dev->driver && drv->mediabay_event)
465 drv->mediabay_event(mdev, state);
466 return 0;
488} 467}
489EXPORT_SYMBOL_GPL(media_bay_set_ide_infos);
490#endif /* CONFIG_BLK_DEV_IDE_PMAC */
491 468
492static void media_bay_step(int i) 469static void media_bay_step(int i)
493{ 470{
@@ -497,8 +474,8 @@ static void media_bay_step(int i)
497 if (bay->state != mb_powering_down) 474 if (bay->state != mb_powering_down)
498 poll_media_bay(bay); 475 poll_media_bay(bay);
499 476
500 /* If timer expired or polling IDE busy, run state machine */ 477 /* If timer expired run state machine */
501 if ((bay->state != mb_ide_waiting) && (bay->timer != 0)) { 478 if (bay->timer != 0) {
502 bay->timer -= msecs_to_jiffies(MB_POLL_DELAY); 479 bay->timer -= msecs_to_jiffies(MB_POLL_DELAY);
503 if (bay->timer > 0) 480 if (bay->timer > 0)
504 return; 481 return;
@@ -508,100 +485,50 @@ static void media_bay_step(int i)
508 switch(bay->state) { 485 switch(bay->state) {
509 case mb_powering_up: 486 case mb_powering_up:
510 if (bay->ops->setup_bus(bay, bay->last_value) < 0) { 487 if (bay->ops->setup_bus(bay, bay->last_value) < 0) {
511 MBDBG("mediabay%d: device not supported (kind:%d)\n", i, bay->content_id); 488 pr_debug("mediabay%d: device not supported (kind:%d)\n",
489 i, bay->content_id);
512 set_mb_power(bay, 0); 490 set_mb_power(bay, 0);
513 break; 491 break;
514 } 492 }
515 bay->timer = msecs_to_jiffies(MB_RESET_DELAY); 493 bay->timer = msecs_to_jiffies(MB_RESET_DELAY);
516 bay->state = mb_enabling_bay; 494 bay->state = mb_enabling_bay;
517 MBDBG("mediabay%d: enabling (kind:%d)\n", i, bay->content_id); 495 pr_debug("mediabay%d: enabling (kind:%d)\n", i, bay->content_id);
518 break; 496 break;
519 case mb_enabling_bay: 497 case mb_enabling_bay:
520 bay->ops->un_reset(bay); 498 bay->ops->un_reset(bay);
521 bay->timer = msecs_to_jiffies(MB_SETUP_DELAY); 499 bay->timer = msecs_to_jiffies(MB_SETUP_DELAY);
522 bay->state = mb_resetting; 500 bay->state = mb_resetting;
523 MBDBG("mediabay%d: waiting reset (kind:%d)\n", i, bay->content_id); 501 pr_debug("mediabay%d: releasing bay reset (kind:%d)\n",
502 i, bay->content_id);
524 break; 503 break;
525 case mb_resetting: 504 case mb_resetting:
526 if (bay->content_id != MB_CD) { 505 if (bay->content_id != MB_CD) {
527 MBDBG("mediabay%d: bay is up (kind:%d)\n", i, bay->content_id); 506 pr_debug("mediabay%d: bay is up (kind:%d)\n", i,
507 bay->content_id);
528 bay->state = mb_up; 508 bay->state = mb_up;
509 device_for_each_child(&bay->mdev->ofdev.dev,
510 bay, mb_broadcast_hotplug);
529 break; 511 break;
530 } 512 }
531#ifdef CONFIG_BLK_DEV_IDE_PMAC 513 pr_debug("mediabay%d: releasing ATA reset (kind:%d)\n",
532 MBDBG("mediabay%d: waiting IDE reset (kind:%d)\n", i, bay->content_id); 514 i, bay->content_id);
533 bay->ops->un_reset_ide(bay); 515 bay->ops->un_reset_ide(bay);
534 bay->timer = msecs_to_jiffies(MB_IDE_WAIT); 516 bay->timer = msecs_to_jiffies(MB_IDE_WAIT);
535 bay->state = mb_ide_resetting; 517 bay->state = mb_ide_resetting;
536#else
537 printk(KERN_DEBUG "media-bay %d is ide (not compiled in kernel)\n", i);
538 set_mb_power(bay, 0);
539#endif /* CONFIG_BLK_DEV_IDE_PMAC */
540 break; 518 break;
541#ifdef CONFIG_BLK_DEV_IDE_PMAC 519
542 case mb_ide_resetting: 520 case mb_ide_resetting:
543 bay->timer = msecs_to_jiffies(MB_IDE_TIMEOUT); 521 pr_debug("mediabay%d: bay is up (kind:%d)\n", i, bay->content_id);
544 bay->state = mb_ide_waiting; 522 bay->state = mb_up;
545 MBDBG("mediabay%d: waiting IDE ready (kind:%d)\n", i, bay->content_id); 523 device_for_each_child(&bay->mdev->ofdev.dev,
524 bay, mb_broadcast_hotplug);
546 break; 525 break;
547 case mb_ide_waiting: 526
548 if (bay->cd_base == NULL) {
549 bay->timer = 0;
550 bay->state = mb_up;
551 MBDBG("mediabay%d: up before IDE init\n", i);
552 break;
553 } else if (MB_IDE_READY(i)) {
554 bay->timer = 0;
555 bay->state = mb_up;
556 if (bay->cd_index < 0) {
557 printk("mediabay %d, registering IDE...\n", i);
558 pmu_suspend();
559 ide_port_scan(bay->cd_port);
560 if (bay->cd_port->present)
561 bay->cd_index = bay->cd_port->index;
562 pmu_resume();
563 }
564 if (bay->cd_index == -1) {
565 /* We eventually do a retry */
566 bay->cd_retry++;
567 printk("IDE register error\n");
568 set_mb_power(bay, 0);
569 } else {
570 printk(KERN_DEBUG "media-bay %d is ide%d\n", i, bay->cd_index);
571 MBDBG("mediabay %d IDE ready\n", i);
572 }
573 break;
574 } else if (bay->timer > 0)
575 bay->timer -= msecs_to_jiffies(MB_POLL_DELAY);
576 if (bay->timer <= 0) {
577 printk("\nIDE Timeout in bay %d !, IDE state is: 0x%02x\n",
578 i, readb(bay->cd_base + 0x70));
579 MBDBG("mediabay%d: nIDE Timeout !\n", i);
580 set_mb_power(bay, 0);
581 bay->timer = 0;
582 }
583 break;
584#endif /* CONFIG_BLK_DEV_IDE_PMAC */
585 case mb_powering_down: 527 case mb_powering_down:
586 bay->state = mb_empty; 528 bay->state = mb_empty;
587#ifdef CONFIG_BLK_DEV_IDE_PMAC 529 device_for_each_child(&bay->mdev->ofdev.dev,
588 if (bay->cd_index >= 0) { 530 bay, mb_broadcast_hotplug);
589 printk(KERN_DEBUG "Unregistering mb %d ide, index:%d\n", i, 531 pr_debug("mediabay%d: end of power down\n", i);
590 bay->cd_index);
591 ide_port_unregister_devices(bay->cd_port);
592 bay->cd_index = -1;
593 }
594 if (bay->cd_retry) {
595 if (bay->cd_retry > MAX_CD_RETRIES) {
596 /* Should add an error sound (sort of beep in dmasound) */
597 printk("\nmedia-bay %d, IDE device badly inserted or unrecognised\n", i);
598 } else {
599 /* Force a new power down/up sequence */
600 bay->content_id = MB_NO;
601 }
602 }
603#endif /* CONFIG_BLK_DEV_IDE_PMAC */
604 MBDBG("mediabay%d: end of power down\n", i);
605 break; 532 break;
606 } 533 }
607} 534}
@@ -676,11 +603,6 @@ static int __devinit media_bay_attach(struct macio_dev *mdev, const struct of_de
676 bay->last_value = bay->ops->content(bay); 603 bay->last_value = bay->ops->content(bay);
677 bay->value_count = msecs_to_jiffies(MB_STABLE_DELAY); 604 bay->value_count = msecs_to_jiffies(MB_STABLE_DELAY);
678 bay->state = mb_empty; 605 bay->state = mb_empty;
679 do {
680 msleep(MB_POLL_DELAY);
681 media_bay_step(i);
682 } while((bay->state != mb_empty) &&
683 (bay->state != mb_up));
684 606
685 /* Mark us ready by filling our mdev data */ 607 /* Mark us ready by filling our mdev data */
686 macio_set_drvdata(mdev, bay); 608 macio_set_drvdata(mdev, bay);
@@ -725,7 +647,7 @@ static int media_bay_resume(struct macio_dev *mdev)
725 set_mb_power(bay, 0); 647 set_mb_power(bay, 0);
726 msleep(MB_POWER_DELAY); 648 msleep(MB_POWER_DELAY);
727 if (bay->ops->content(bay) != bay->content_id) { 649 if (bay->ops->content(bay) != bay->content_id) {
728 printk("mediabay%d: content changed during sleep...\n", bay->index); 650 printk("mediabay%d: Content changed during sleep...\n", bay->index);
729 mutex_unlock(&bay->lock); 651 mutex_unlock(&bay->lock);
730 return 0; 652 return 0;
731 } 653 }
@@ -733,9 +655,6 @@ static int media_bay_resume(struct macio_dev *mdev)
733 bay->last_value = bay->content_id; 655 bay->last_value = bay->content_id;
734 bay->value_count = msecs_to_jiffies(MB_STABLE_DELAY); 656 bay->value_count = msecs_to_jiffies(MB_STABLE_DELAY);
735 bay->timer = msecs_to_jiffies(MB_POWER_DELAY); 657 bay->timer = msecs_to_jiffies(MB_POWER_DELAY);
736#ifdef CONFIG_BLK_DEV_IDE_PMAC
737 bay->cd_retry = 0;
738#endif
739 do { 658 do {
740 msleep(MB_POLL_DELAY); 659 msleep(MB_POLL_DELAY);
741 media_bay_step(bay->index); 660 media_bay_step(bay->index);
@@ -823,9 +742,6 @@ static int __init media_bay_init(void)
823 for (i=0; i<MAX_BAYS; i++) { 742 for (i=0; i<MAX_BAYS; i++) {
824 memset((char *)&media_bays[i], 0, sizeof(struct media_bay_info)); 743 memset((char *)&media_bays[i], 0, sizeof(struct media_bay_info));
825 media_bays[i].content_id = -1; 744 media_bays[i].content_id = -1;
826#ifdef CONFIG_BLK_DEV_IDE_PMAC
827 media_bays[i].cd_index = -1;
828#endif
829 } 745 }
830 if (!machine_is(powermac)) 746 if (!machine_is(powermac))
831 return 0; 747 return 0;
diff --git a/drivers/macintosh/nvram.c b/drivers/macintosh/nvram.c
index b195d753d2ed..c876349c32de 100644
--- a/drivers/macintosh/nvram.c
+++ b/drivers/macintosh/nvram.c
@@ -13,7 +13,6 @@
13#include <linux/fcntl.h> 13#include <linux/fcntl.h>
14#include <linux/nvram.h> 14#include <linux/nvram.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/smp_lock.h>
17#include <asm/uaccess.h> 16#include <asm/uaccess.h>
18#include <asm/nvram.h> 17#include <asm/nvram.h>
19 18
@@ -21,7 +20,6 @@
21 20
22static loff_t nvram_llseek(struct file *file, loff_t offset, int origin) 21static loff_t nvram_llseek(struct file *file, loff_t offset, int origin)
23{ 22{
24 lock_kernel();
25 switch (origin) { 23 switch (origin) {
26 case 1: 24 case 1:
27 offset += file->f_pos; 25 offset += file->f_pos;
@@ -30,12 +28,10 @@ static loff_t nvram_llseek(struct file *file, loff_t offset, int origin)
30 offset += NVRAM_SIZE; 28 offset += NVRAM_SIZE;
31 break; 29 break;
32 } 30 }
33 if (offset < 0) { 31 if (offset < 0)
34 unlock_kernel();
35 return -EINVAL; 32 return -EINVAL;
36 } 33
37 file->f_pos = offset; 34 file->f_pos = offset;
38 unlock_kernel();
39 return file->f_pos; 35 return file->f_pos;
40} 36}
41 37
@@ -76,8 +72,7 @@ static ssize_t write_nvram(struct file *file, const char __user *buf,
76 return p - buf; 72 return p - buf;
77} 73}
78 74
79static int nvram_ioctl(struct inode *inode, struct file *file, 75static long nvram_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
80 unsigned int cmd, unsigned long arg)
81{ 76{
82 switch(cmd) { 77 switch(cmd) {
83 case PMAC_NVRAM_GET_OFFSET: 78 case PMAC_NVRAM_GET_OFFSET:
diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c
index 556f0feaa4df..5ff47ba7f2d0 100644
--- a/drivers/macintosh/therm_adt746x.c
+++ b/drivers/macintosh/therm_adt746x.c
@@ -79,6 +79,7 @@ struct thermostat {
79 u8 limits[3]; 79 u8 limits[3];
80 int last_speed[2]; 80 int last_speed[2];
81 int last_var[2]; 81 int last_var[2];
82 int pwm_inv[2];
82}; 83};
83 84
84static enum {ADT7460, ADT7467} therm_type; 85static enum {ADT7460, ADT7467} therm_type;
@@ -229,19 +230,23 @@ static void write_fan_speed(struct thermostat *th, int speed, int fan)
229 230
230 if (speed >= 0) { 231 if (speed >= 0) {
231 manual = read_reg(th, MANUAL_MODE[fan]); 232 manual = read_reg(th, MANUAL_MODE[fan]);
233 manual &= ~INVERT_MASK;
232 write_reg(th, MANUAL_MODE[fan], 234 write_reg(th, MANUAL_MODE[fan],
233 (manual|MANUAL_MASK) & (~INVERT_MASK)); 235 manual | MANUAL_MASK | th->pwm_inv[fan]);
234 write_reg(th, FAN_SPD_SET[fan], speed); 236 write_reg(th, FAN_SPD_SET[fan], speed);
235 } else { 237 } else {
236 /* back to automatic */ 238 /* back to automatic */
237 if(therm_type == ADT7460) { 239 if(therm_type == ADT7460) {
238 manual = read_reg(th, 240 manual = read_reg(th,
239 MANUAL_MODE[fan]) & (~MANUAL_MASK); 241 MANUAL_MODE[fan]) & (~MANUAL_MASK);
240 242 manual &= ~INVERT_MASK;
243 manual |= th->pwm_inv[fan];
241 write_reg(th, 244 write_reg(th,
242 MANUAL_MODE[fan], manual|REM_CONTROL[fan]); 245 MANUAL_MODE[fan], manual|REM_CONTROL[fan]);
243 } else { 246 } else {
244 manual = read_reg(th, MANUAL_MODE[fan]); 247 manual = read_reg(th, MANUAL_MODE[fan]);
248 manual &= ~INVERT_MASK;
249 manual |= th->pwm_inv[fan];
245 write_reg(th, MANUAL_MODE[fan], manual&(~AUTO_MASK)); 250 write_reg(th, MANUAL_MODE[fan], manual&(~AUTO_MASK));
246 } 251 }
247 } 252 }
@@ -387,7 +392,7 @@ static int probe_thermostat(struct i2c_client *client,
387 i2c_set_clientdata(client, th); 392 i2c_set_clientdata(client, th);
388 th->clt = client; 393 th->clt = client;
389 394
390 rc = read_reg(th, 0); 395 rc = read_reg(th, CONFIG_REG);
391 if (rc < 0) { 396 if (rc < 0) {
392 dev_err(&client->dev, "Thermostat failed to read config!\n"); 397 dev_err(&client->dev, "Thermostat failed to read config!\n");
393 kfree(th); 398 kfree(th);
@@ -418,6 +423,10 @@ static int probe_thermostat(struct i2c_client *client,
418 423
419 thermostat = th; 424 thermostat = th;
420 425
426 /* record invert bit status because fw can corrupt it after suspend */
427 th->pwm_inv[0] = read_reg(th, MANUAL_MODE[0]) & INVERT_MASK;
428 th->pwm_inv[1] = read_reg(th, MANUAL_MODE[1]) & INVERT_MASK;
429
421 /* be sure to really write fan speed the first time */ 430 /* be sure to really write fan speed the first time */
422 th->last_speed[0] = -2; 431 th->last_speed[0] = -2;
423 th->last_speed[1] = -2; 432 th->last_speed[1] = -2;
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index 6f308a4757ee..db379c381432 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -36,6 +36,7 @@
36#include <linux/spinlock.h> 36#include <linux/spinlock.h>
37#include <linux/pm.h> 37#include <linux/pm.h>
38#include <linux/proc_fs.h> 38#include <linux/proc_fs.h>
39#include <linux/seq_file.h>
39#include <linux/init.h> 40#include <linux/init.h>
40#include <linux/interrupt.h> 41#include <linux/interrupt.h>
41#include <linux/device.h> 42#include <linux/device.h>
@@ -186,17 +187,11 @@ static int init_pmu(void);
186static void pmu_start(void); 187static void pmu_start(void);
187static irqreturn_t via_pmu_interrupt(int irq, void *arg); 188static irqreturn_t via_pmu_interrupt(int irq, void *arg);
188static irqreturn_t gpio1_interrupt(int irq, void *arg); 189static irqreturn_t gpio1_interrupt(int irq, void *arg);
189static int proc_get_info(char *page, char **start, off_t off, 190static const struct file_operations pmu_info_proc_fops;
190 int count, int *eof, void *data); 191static const struct file_operations pmu_irqstats_proc_fops;
191static int proc_get_irqstats(char *page, char **start, off_t off,
192 int count, int *eof, void *data);
193static void pmu_pass_intr(unsigned char *data, int len); 192static void pmu_pass_intr(unsigned char *data, int len);
194static int proc_get_batt(char *page, char **start, off_t off, 193static const struct file_operations pmu_battery_proc_fops;
195 int count, int *eof, void *data); 194static const struct file_operations pmu_options_proc_fops;
196static int proc_read_options(char *page, char **start, off_t off,
197 int count, int *eof, void *data);
198static int proc_write_options(struct file *file, const char __user *buffer,
199 unsigned long count, void *data);
200 195
201#ifdef CONFIG_ADB 196#ifdef CONFIG_ADB
202struct adb_driver via_pmu_driver = { 197struct adb_driver via_pmu_driver = {
@@ -507,19 +502,15 @@ static int __init via_pmu_dev_init(void)
507 for (i=0; i<pmu_battery_count; i++) { 502 for (i=0; i<pmu_battery_count; i++) {
508 char title[16]; 503 char title[16];
509 sprintf(title, "battery_%ld", i); 504 sprintf(title, "battery_%ld", i);
510 proc_pmu_batt[i] = create_proc_read_entry(title, 0, proc_pmu_root, 505 proc_pmu_batt[i] = proc_create_data(title, 0, proc_pmu_root,
511 proc_get_batt, (void *)i); 506 &pmu_battery_proc_fops, (void *)i);
512 } 507 }
513 508
514 proc_pmu_info = create_proc_read_entry("info", 0, proc_pmu_root, 509 proc_pmu_info = proc_create("info", 0, proc_pmu_root, &pmu_info_proc_fops);
515 proc_get_info, NULL); 510 proc_pmu_irqstats = proc_create("interrupts", 0, proc_pmu_root,
516 proc_pmu_irqstats = create_proc_read_entry("interrupts", 0, proc_pmu_root, 511 &pmu_irqstats_proc_fops);
517 proc_get_irqstats, NULL); 512 proc_pmu_options = proc_create("options", 0600, proc_pmu_root,
518 proc_pmu_options = create_proc_entry("options", 0600, proc_pmu_root); 513 &pmu_options_proc_fops);
519 if (proc_pmu_options) {
520 proc_pmu_options->read_proc = proc_read_options;
521 proc_pmu_options->write_proc = proc_write_options;
522 }
523 } 514 }
524 return 0; 515 return 0;
525} 516}
@@ -799,27 +790,33 @@ query_battery_state(void)
799 2, PMU_SMART_BATTERY_STATE, pmu_cur_battery+1); 790 2, PMU_SMART_BATTERY_STATE, pmu_cur_battery+1);
800} 791}
801 792
802static int 793static int pmu_info_proc_show(struct seq_file *m, void *v)
803proc_get_info(char *page, char **start, off_t off,
804 int count, int *eof, void *data)
805{ 794{
806 char* p = page; 795 seq_printf(m, "PMU driver version : %d\n", PMU_DRIVER_VERSION);
807 796 seq_printf(m, "PMU firmware version : %02x\n", pmu_version);
808 p += sprintf(p, "PMU driver version : %d\n", PMU_DRIVER_VERSION); 797 seq_printf(m, "AC Power : %d\n",
809 p += sprintf(p, "PMU firmware version : %02x\n", pmu_version);
810 p += sprintf(p, "AC Power : %d\n",
811 ((pmu_power_flags & PMU_PWR_AC_PRESENT) != 0) || pmu_battery_count == 0); 798 ((pmu_power_flags & PMU_PWR_AC_PRESENT) != 0) || pmu_battery_count == 0);
812 p += sprintf(p, "Battery count : %d\n", pmu_battery_count); 799 seq_printf(m, "Battery count : %d\n", pmu_battery_count);
800
801 return 0;
802}
813 803
814 return p - page; 804static int pmu_info_proc_open(struct inode *inode, struct file *file)
805{
806 return single_open(file, pmu_info_proc_show, NULL);
815} 807}
816 808
817static int 809static const struct file_operations pmu_info_proc_fops = {
818proc_get_irqstats(char *page, char **start, off_t off, 810 .owner = THIS_MODULE,
819 int count, int *eof, void *data) 811 .open = pmu_info_proc_open,
812 .read = seq_read,
813 .llseek = seq_lseek,
814 .release = single_release,
815};
816
817static int pmu_irqstats_proc_show(struct seq_file *m, void *v)
820{ 818{
821 int i; 819 int i;
822 char* p = page;
823 static const char *irq_names[] = { 820 static const char *irq_names[] = {
824 "Total CB1 triggered events", 821 "Total CB1 triggered events",
825 "Total GPIO1 triggered events", 822 "Total GPIO1 triggered events",
@@ -835,60 +832,76 @@ proc_get_irqstats(char *page, char **start, off_t off,
835 }; 832 };
836 833
837 for (i=0; i<11; i++) { 834 for (i=0; i<11; i++) {
838 p += sprintf(p, " %2u: %10u (%s)\n", 835 seq_printf(m, " %2u: %10u (%s)\n",
839 i, pmu_irq_stats[i], irq_names[i]); 836 i, pmu_irq_stats[i], irq_names[i]);
840 } 837 }
841 return p - page; 838 return 0;
842} 839}
843 840
844static int 841static int pmu_irqstats_proc_open(struct inode *inode, struct file *file)
845proc_get_batt(char *page, char **start, off_t off,
846 int count, int *eof, void *data)
847{ 842{
848 long batnum = (long)data; 843 return single_open(file, pmu_irqstats_proc_show, NULL);
849 char *p = page; 844}
845
846static const struct file_operations pmu_irqstats_proc_fops = {
847 .owner = THIS_MODULE,
848 .open = pmu_irqstats_proc_open,
849 .read = seq_read,
850 .llseek = seq_lseek,
851 .release = single_release,
852};
853
854static int pmu_battery_proc_show(struct seq_file *m, void *v)
855{
856 long batnum = (long)m->private;
850 857
851 p += sprintf(p, "\n"); 858 seq_putc(m, '\n');
852 p += sprintf(p, "flags : %08x\n", 859 seq_printf(m, "flags : %08x\n", pmu_batteries[batnum].flags);
853 pmu_batteries[batnum].flags); 860 seq_printf(m, "charge : %d\n", pmu_batteries[batnum].charge);
854 p += sprintf(p, "charge : %d\n", 861 seq_printf(m, "max_charge : %d\n", pmu_batteries[batnum].max_charge);
855 pmu_batteries[batnum].charge); 862 seq_printf(m, "current : %d\n", pmu_batteries[batnum].amperage);
856 p += sprintf(p, "max_charge : %d\n", 863 seq_printf(m, "voltage : %d\n", pmu_batteries[batnum].voltage);
857 pmu_batteries[batnum].max_charge); 864 seq_printf(m, "time rem. : %d\n", pmu_batteries[batnum].time_remaining);
858 p += sprintf(p, "current : %d\n", 865 return 0;
859 pmu_batteries[batnum].amperage);
860 p += sprintf(p, "voltage : %d\n",
861 pmu_batteries[batnum].voltage);
862 p += sprintf(p, "time rem. : %d\n",
863 pmu_batteries[batnum].time_remaining);
864
865 return p - page;
866} 866}
867 867
868static int 868static int pmu_battery_proc_open(struct inode *inode, struct file *file)
869proc_read_options(char *page, char **start, off_t off,
870 int count, int *eof, void *data)
871{ 869{
872 char *p = page; 870 return single_open(file, pmu_battery_proc_show, PDE(inode)->data);
871}
873 872
873static const struct file_operations pmu_battery_proc_fops = {
874 .owner = THIS_MODULE,
875 .open = pmu_battery_proc_open,
876 .read = seq_read,
877 .llseek = seq_lseek,
878 .release = single_release,
879};
880
881static int pmu_options_proc_show(struct seq_file *m, void *v)
882{
874#if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32) 883#if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32)
875 if (pmu_kind == PMU_KEYLARGO_BASED && 884 if (pmu_kind == PMU_KEYLARGO_BASED &&
876 pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,0,-1) >= 0) 885 pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,0,-1) >= 0)
877 p += sprintf(p, "lid_wakeup=%d\n", option_lid_wakeup); 886 seq_printf(m, "lid_wakeup=%d\n", option_lid_wakeup);
878#endif 887#endif
879 if (pmu_kind == PMU_KEYLARGO_BASED) 888 if (pmu_kind == PMU_KEYLARGO_BASED)
880 p += sprintf(p, "server_mode=%d\n", option_server_mode); 889 seq_printf(m, "server_mode=%d\n", option_server_mode);
881 890
882 return p - page; 891 return 0;
883} 892}
884 893
885static int 894static int pmu_options_proc_open(struct inode *inode, struct file *file)
886proc_write_options(struct file *file, const char __user *buffer, 895{
887 unsigned long count, void *data) 896 return single_open(file, pmu_options_proc_show, NULL);
897}
898
899static ssize_t pmu_options_proc_write(struct file *file,
900 const char __user *buffer, size_t count, loff_t *pos)
888{ 901{
889 char tmp[33]; 902 char tmp[33];
890 char *label, *val; 903 char *label, *val;
891 unsigned long fcount = count; 904 size_t fcount = count;
892 905
893 if (!count) 906 if (!count)
894 return -EINVAL; 907 return -EINVAL;
@@ -927,6 +940,15 @@ proc_write_options(struct file *file, const char __user *buffer,
927 return fcount; 940 return fcount;
928} 941}
929 942
943static const struct file_operations pmu_options_proc_fops = {
944 .owner = THIS_MODULE,
945 .open = pmu_options_proc_open,
946 .read = seq_read,
947 .llseek = seq_lseek,
948 .release = single_release,
949 .write = pmu_options_proc_write,
950};
951
930#ifdef CONFIG_ADB 952#ifdef CONFIG_ADB
931/* Send an ADB command */ 953/* Send an ADB command */
932static int pmu_send_request(struct adb_request *req, int sync) 954static int pmu_send_request(struct adb_request *req, int sync)
diff --git a/drivers/macintosh/windfarm_smu_controls.c b/drivers/macintosh/windfarm_smu_controls.c
index 961fa0e7c2cf..6c68b9e5f5c4 100644
--- a/drivers/macintosh/windfarm_smu_controls.c
+++ b/drivers/macintosh/windfarm_smu_controls.c
@@ -202,6 +202,8 @@ static struct smu_fan_control *smu_fan_create(struct device_node *node,
202 fct->ctrl.name = "cpu-front-fan-1"; 202 fct->ctrl.name = "cpu-front-fan-1";
203 else if (!strcmp(l, "CPU A PUMP")) 203 else if (!strcmp(l, "CPU A PUMP"))
204 fct->ctrl.name = "cpu-pump-0"; 204 fct->ctrl.name = "cpu-pump-0";
205 else if (!strcmp(l, "CPU B PUMP"))
206 fct->ctrl.name = "cpu-pump-1";
205 else if (!strcmp(l, "Slots Fan") || !strcmp(l, "Slots fan") || 207 else if (!strcmp(l, "Slots Fan") || !strcmp(l, "Slots fan") ||
206 !strcmp(l, "EXPANSION SLOTS INTAKE")) 208 !strcmp(l, "EXPANSION SLOTS INTAKE"))
207 fct->ctrl.name = "slots-fan"; 209 fct->ctrl.name = "slots-fan";
diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c
index 0c44d560bf1a..0c7a63c1f12f 100644
--- a/drivers/mmc/host/of_mmc_spi.c
+++ b/drivers/mmc/host/of_mmc_spi.c
@@ -22,6 +22,8 @@
22#include <linux/mmc/core.h> 22#include <linux/mmc/core.h>
23#include <linux/mmc/host.h> 23#include <linux/mmc/host.h>
24 24
25MODULE_LICENSE("GPL");
26
25enum { 27enum {
26 CD_GPIO = 0, 28 CD_GPIO = 0,
27 WP_GPIO, 29 WP_GPIO,
diff --git a/drivers/net/ehea/ehea_hcall.h b/drivers/net/ehea/ehea_hcall.h
deleted file mode 100644
index 8e7d1c3edc60..000000000000
--- a/drivers/net/ehea/ehea_hcall.h
+++ /dev/null
@@ -1,51 +0,0 @@
1/*
2 * linux/drivers/net/ehea/ehea_hcall.h
3 *
4 * eHEA ethernet device driver for IBM eServer System p
5 *
6 * (C) Copyright IBM Corp. 2006
7 *
8 * Authors:
9 * Christoph Raisch <raisch@de.ibm.com>
10 * Jan-Bernd Themann <themann@de.ibm.com>
11 * Thomas Klein <tklein@de.ibm.com>
12 *
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */
28
29#ifndef __EHEA_HCALL_H__
30#define __EHEA_HCALL_H__
31
32/**
33 * This file contains HCALL defines that are to be included in the appropriate
34 * kernel files later
35 */
36
37#define H_ALLOC_HEA_RESOURCE 0x278
38#define H_MODIFY_HEA_QP 0x250
39#define H_QUERY_HEA_QP 0x254
40#define H_QUERY_HEA 0x258
41#define H_QUERY_HEA_PORT 0x25C
42#define H_MODIFY_HEA_PORT 0x260
43#define H_REG_BCMC 0x264
44#define H_DEREG_BCMC 0x268
45#define H_REGISTER_HEA_RPAGES 0x26C
46#define H_DISABLE_AND_GET_HEA 0x270
47#define H_GET_HEA_INFO 0x274
48#define H_ADD_CONN 0x284
49#define H_DEL_CONN 0x288
50
51#endif /* __EHEA_HCALL_H__ */
diff --git a/drivers/net/ehea/ehea_phyp.h b/drivers/net/ehea/ehea_phyp.h
index f3628c803567..2f8174c248bc 100644
--- a/drivers/net/ehea/ehea_phyp.h
+++ b/drivers/net/ehea/ehea_phyp.h
@@ -33,7 +33,6 @@
33#include <asm/hvcall.h> 33#include <asm/hvcall.h>
34#include "ehea.h" 34#include "ehea.h"
35#include "ehea_hw.h" 35#include "ehea_hw.h"
36#include "ehea_hcall.h"
37 36
38/* Some abbreviations used here: 37/* Some abbreviations used here:
39 * 38 *
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 298de0f95d70..d58ade170c4b 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -65,47 +65,322 @@ static int of_platform_device_remove(struct device *dev)
65 return 0; 65 return 0;
66} 66}
67 67
68static int of_platform_device_suspend(struct device *dev, pm_message_t state) 68static void of_platform_device_shutdown(struct device *dev)
69{ 69{
70 struct of_device *of_dev = to_of_device(dev); 70 struct of_device *of_dev = to_of_device(dev);
71 struct of_platform_driver *drv = to_of_platform_driver(dev->driver); 71 struct of_platform_driver *drv = to_of_platform_driver(dev->driver);
72 int error = 0;
73 72
74 if (dev->driver && drv->suspend) 73 if (dev->driver && drv->shutdown)
75 error = drv->suspend(of_dev, state); 74 drv->shutdown(of_dev);
76 return error;
77} 75}
78 76
79static int of_platform_device_resume(struct device * dev) 77#ifdef CONFIG_PM_SLEEP
78
79static int of_platform_legacy_suspend(struct device *dev, pm_message_t mesg)
80{ 80{
81 struct of_device *of_dev = to_of_device(dev); 81 struct of_device *of_dev = to_of_device(dev);
82 struct of_platform_driver *drv = to_of_platform_driver(dev->driver); 82 struct of_platform_driver *drv = to_of_platform_driver(dev->driver);
83 int error = 0; 83 int ret = 0;
84 84
85 if (dev->driver && drv->resume) 85 if (dev->driver && drv->suspend)
86 error = drv->resume(of_dev); 86 ret = drv->suspend(of_dev, mesg);
87 return error; 87 return ret;
88} 88}
89 89
90static void of_platform_device_shutdown(struct device *dev) 90static int of_platform_legacy_resume(struct device *dev)
91{ 91{
92 struct of_device *of_dev = to_of_device(dev); 92 struct of_device *of_dev = to_of_device(dev);
93 struct of_platform_driver *drv = to_of_platform_driver(dev->driver); 93 struct of_platform_driver *drv = to_of_platform_driver(dev->driver);
94 int ret = 0;
94 95
95 if (dev->driver && drv->shutdown) 96 if (dev->driver && drv->resume)
96 drv->shutdown(of_dev); 97 ret = drv->resume(of_dev);
98 return ret;
99}
100
101static int of_platform_pm_prepare(struct device *dev)
102{
103 struct device_driver *drv = dev->driver;
104 int ret = 0;
105
106 if (drv && drv->pm && drv->pm->prepare)
107 ret = drv->pm->prepare(dev);
108
109 return ret;
110}
111
112static void of_platform_pm_complete(struct device *dev)
113{
114 struct device_driver *drv = dev->driver;
115
116 if (drv && drv->pm && drv->pm->complete)
117 drv->pm->complete(dev);
118}
119
120#ifdef CONFIG_SUSPEND
121
122static int of_platform_pm_suspend(struct device *dev)
123{
124 struct device_driver *drv = dev->driver;
125 int ret = 0;
126
127 if (!drv)
128 return 0;
129
130 if (drv->pm) {
131 if (drv->pm->suspend)
132 ret = drv->pm->suspend(dev);
133 } else {
134 ret = of_platform_legacy_suspend(dev, PMSG_SUSPEND);
135 }
136
137 return ret;
97} 138}
98 139
140static int of_platform_pm_suspend_noirq(struct device *dev)
141{
142 struct device_driver *drv = dev->driver;
143 int ret = 0;
144
145 if (!drv)
146 return 0;
147
148 if (drv->pm) {
149 if (drv->pm->suspend_noirq)
150 ret = drv->pm->suspend_noirq(dev);
151 }
152
153 return ret;
154}
155
156static int of_platform_pm_resume(struct device *dev)
157{
158 struct device_driver *drv = dev->driver;
159 int ret = 0;
160
161 if (!drv)
162 return 0;
163
164 if (drv->pm) {
165 if (drv->pm->resume)
166 ret = drv->pm->resume(dev);
167 } else {
168 ret = of_platform_legacy_resume(dev);
169 }
170
171 return ret;
172}
173
174static int of_platform_pm_resume_noirq(struct device *dev)
175{
176 struct device_driver *drv = dev->driver;
177 int ret = 0;
178
179 if (!drv)
180 return 0;
181
182 if (drv->pm) {
183 if (drv->pm->resume_noirq)
184 ret = drv->pm->resume_noirq(dev);
185 }
186
187 return ret;
188}
189
190#else /* !CONFIG_SUSPEND */
191
192#define of_platform_pm_suspend NULL
193#define of_platform_pm_resume NULL
194#define of_platform_pm_suspend_noirq NULL
195#define of_platform_pm_resume_noirq NULL
196
197#endif /* !CONFIG_SUSPEND */
198
199#ifdef CONFIG_HIBERNATION
200
201static int of_platform_pm_freeze(struct device *dev)
202{
203 struct device_driver *drv = dev->driver;
204 int ret = 0;
205
206 if (!drv)
207 return 0;
208
209 if (drv->pm) {
210 if (drv->pm->freeze)
211 ret = drv->pm->freeze(dev);
212 } else {
213 ret = of_platform_legacy_suspend(dev, PMSG_FREEZE);
214 }
215
216 return ret;
217}
218
219static int of_platform_pm_freeze_noirq(struct device *dev)
220{
221 struct device_driver *drv = dev->driver;
222 int ret = 0;
223
224 if (!drv)
225 return 0;
226
227 if (drv->pm) {
228 if (drv->pm->freeze_noirq)
229 ret = drv->pm->freeze_noirq(dev);
230 }
231
232 return ret;
233}
234
235static int of_platform_pm_thaw(struct device *dev)
236{
237 struct device_driver *drv = dev->driver;
238 int ret = 0;
239
240 if (!drv)
241 return 0;
242
243 if (drv->pm) {
244 if (drv->pm->thaw)
245 ret = drv->pm->thaw(dev);
246 } else {
247 ret = of_platform_legacy_resume(dev);
248 }
249
250 return ret;
251}
252
253static int of_platform_pm_thaw_noirq(struct device *dev)
254{
255 struct device_driver *drv = dev->driver;
256 int ret = 0;
257
258 if (!drv)
259 return 0;
260
261 if (drv->pm) {
262 if (drv->pm->thaw_noirq)
263 ret = drv->pm->thaw_noirq(dev);
264 }
265
266 return ret;
267}
268
269static int of_platform_pm_poweroff(struct device *dev)
270{
271 struct device_driver *drv = dev->driver;
272 int ret = 0;
273
274 if (!drv)
275 return 0;
276
277 if (drv->pm) {
278 if (drv->pm->poweroff)
279 ret = drv->pm->poweroff(dev);
280 } else {
281 ret = of_platform_legacy_suspend(dev, PMSG_HIBERNATE);
282 }
283
284 return ret;
285}
286
287static int of_platform_pm_poweroff_noirq(struct device *dev)
288{
289 struct device_driver *drv = dev->driver;
290 int ret = 0;
291
292 if (!drv)
293 return 0;
294
295 if (drv->pm) {
296 if (drv->pm->poweroff_noirq)
297 ret = drv->pm->poweroff_noirq(dev);
298 }
299
300 return ret;
301}
302
303static int of_platform_pm_restore(struct device *dev)
304{
305 struct device_driver *drv = dev->driver;
306 int ret = 0;
307
308 if (!drv)
309 return 0;
310
311 if (drv->pm) {
312 if (drv->pm->restore)
313 ret = drv->pm->restore(dev);
314 } else {
315 ret = of_platform_legacy_resume(dev);
316 }
317
318 return ret;
319}
320
321static int of_platform_pm_restore_noirq(struct device *dev)
322{
323 struct device_driver *drv = dev->driver;
324 int ret = 0;
325
326 if (!drv)
327 return 0;
328
329 if (drv->pm) {
330 if (drv->pm->restore_noirq)
331 ret = drv->pm->restore_noirq(dev);
332 }
333
334 return ret;
335}
336
337#else /* !CONFIG_HIBERNATION */
338
339#define of_platform_pm_freeze NULL
340#define of_platform_pm_thaw NULL
341#define of_platform_pm_poweroff NULL
342#define of_platform_pm_restore NULL
343#define of_platform_pm_freeze_noirq NULL
344#define of_platform_pm_thaw_noirq NULL
345#define of_platform_pm_poweroff_noirq NULL
346#define of_platform_pm_restore_noirq NULL
347
348#endif /* !CONFIG_HIBERNATION */
349
350static struct dev_pm_ops of_platform_dev_pm_ops = {
351 .prepare = of_platform_pm_prepare,
352 .complete = of_platform_pm_complete,
353 .suspend = of_platform_pm_suspend,
354 .resume = of_platform_pm_resume,
355 .freeze = of_platform_pm_freeze,
356 .thaw = of_platform_pm_thaw,
357 .poweroff = of_platform_pm_poweroff,
358 .restore = of_platform_pm_restore,
359 .suspend_noirq = of_platform_pm_suspend_noirq,
360 .resume_noirq = of_platform_pm_resume_noirq,
361 .freeze_noirq = of_platform_pm_freeze_noirq,
362 .thaw_noirq = of_platform_pm_thaw_noirq,
363 .poweroff_noirq = of_platform_pm_poweroff_noirq,
364 .restore_noirq = of_platform_pm_restore_noirq,
365};
366
367#define OF_PLATFORM_PM_OPS_PTR (&of_platform_dev_pm_ops)
368
369#else /* !CONFIG_PM_SLEEP */
370
371#define OF_PLATFORM_PM_OPS_PTR NULL
372
373#endif /* !CONFIG_PM_SLEEP */
374
99int of_bus_type_init(struct bus_type *bus, const char *name) 375int of_bus_type_init(struct bus_type *bus, const char *name)
100{ 376{
101 bus->name = name; 377 bus->name = name;
102 bus->match = of_platform_bus_match; 378 bus->match = of_platform_bus_match;
103 bus->probe = of_platform_device_probe; 379 bus->probe = of_platform_device_probe;
104 bus->remove = of_platform_device_remove; 380 bus->remove = of_platform_device_remove;
105 bus->suspend = of_platform_device_suspend;
106 bus->resume = of_platform_device_resume;
107 bus->shutdown = of_platform_device_shutdown; 381 bus->shutdown = of_platform_device_shutdown;
108 bus->dev_attrs = of_platform_device_attrs; 382 bus->dev_attrs = of_platform_device_attrs;
383 bus->pm = OF_PLATFORM_PM_OPS_PTR;
109 return bus_register(bus); 384 return bus_register(bus);
110} 385}
111 386
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 4b6f7cba3b3d..28fce65b8594 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -133,6 +133,14 @@ config SPI_LM70_LLP
133 which interfaces to an LM70 temperature sensor using 133 which interfaces to an LM70 temperature sensor using
134 a parallel port. 134 a parallel port.
135 135
136config SPI_MPC52xx
137 tristate "Freescale MPC52xx SPI (non-PSC) controller support"
138 depends on PPC_MPC52xx && SPI
139 select SPI_MASTER_OF
140 help
141 This drivers supports the MPC52xx SPI controller in master SPI
142 mode.
143
136config SPI_MPC52xx_PSC 144config SPI_MPC52xx_PSC
137 tristate "Freescale MPC52xx PSC SPI controller" 145 tristate "Freescale MPC52xx PSC SPI controller"
138 depends on PPC_MPC52xx && EXPERIMENTAL 146 depends on PPC_MPC52xx && EXPERIMENTAL
@@ -147,9 +155,6 @@ config SPI_MPC8xxx
147 This enables using the Freescale MPC8xxx SPI controllers in master 155 This enables using the Freescale MPC8xxx SPI controllers in master
148 mode. 156 mode.
149 157
150 This driver uses a simple set of shift registers for data (opposed
151 to the CPM based descriptor model).
152
153config SPI_OMAP_UWIRE 158config SPI_OMAP_UWIRE
154 tristate "OMAP1 MicroWire" 159 tristate "OMAP1 MicroWire"
155 depends on ARCH_OMAP1 160 depends on ARCH_OMAP1
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 21a118269cac..e3f092a9afa5 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_SPI_OMAP24XX) += omap2_mcspi.o
25obj-$(CONFIG_SPI_ORION) += orion_spi.o 25obj-$(CONFIG_SPI_ORION) += orion_spi.o
26obj-$(CONFIG_SPI_PL022) += amba-pl022.o 26obj-$(CONFIG_SPI_PL022) += amba-pl022.o
27obj-$(CONFIG_SPI_MPC52xx_PSC) += mpc52xx_psc_spi.o 27obj-$(CONFIG_SPI_MPC52xx_PSC) += mpc52xx_psc_spi.o
28obj-$(CONFIG_SPI_MPC52xx) += mpc52xx_spi.o
28obj-$(CONFIG_SPI_MPC8xxx) += spi_mpc8xxx.o 29obj-$(CONFIG_SPI_MPC8xxx) += spi_mpc8xxx.o
29obj-$(CONFIG_SPI_PPC4xx) += spi_ppc4xx.o 30obj-$(CONFIG_SPI_PPC4xx) += spi_ppc4xx.o
30obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o 31obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o
diff --git a/drivers/spi/mpc52xx_psc_spi.c b/drivers/spi/mpc52xx_psc_spi.c
index 1b74d5ca03f3..f50c81df336a 100644
--- a/drivers/spi/mpc52xx_psc_spi.c
+++ b/drivers/spi/mpc52xx_psc_spi.c
@@ -17,6 +17,7 @@
17#include <linux/errno.h> 17#include <linux/errno.h>
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/of_platform.h> 19#include <linux/of_platform.h>
20#include <linux/of_spi.h>
20#include <linux/workqueue.h> 21#include <linux/workqueue.h>
21#include <linux/completion.h> 22#include <linux/completion.h>
22#include <linux/io.h> 23#include <linux/io.h>
@@ -313,11 +314,13 @@ static int mpc52xx_psc_spi_port_config(int psc_id, struct mpc52xx_psc_spi *mps)
313 struct mpc52xx_psc __iomem *psc = mps->psc; 314 struct mpc52xx_psc __iomem *psc = mps->psc;
314 struct mpc52xx_psc_fifo __iomem *fifo = mps->fifo; 315 struct mpc52xx_psc_fifo __iomem *fifo = mps->fifo;
315 u32 mclken_div; 316 u32 mclken_div;
316 int ret = 0; 317 int ret;
317 318
318 /* default sysclk is 512MHz */ 319 /* default sysclk is 512MHz */
319 mclken_div = (mps->sysclk ? mps->sysclk : 512000000) / MCLK; 320 mclken_div = (mps->sysclk ? mps->sysclk : 512000000) / MCLK;
320 mpc52xx_set_psc_clkdiv(psc_id, mclken_div); 321 ret = mpc52xx_set_psc_clkdiv(psc_id, mclken_div);
322 if (ret)
323 return ret;
321 324
322 /* Reset the PSC into a known state */ 325 /* Reset the PSC into a known state */
323 out_8(&psc->command, MPC52xx_PSC_RST_RX); 326 out_8(&psc->command, MPC52xx_PSC_RST_RX);
@@ -341,7 +344,7 @@ static int mpc52xx_psc_spi_port_config(int psc_id, struct mpc52xx_psc_spi *mps)
341 344
342 mps->bits_per_word = 8; 345 mps->bits_per_word = 8;
343 346
344 return ret; 347 return 0;
345} 348}
346 349
347static irqreturn_t mpc52xx_psc_spi_isr(int irq, void *dev_id) 350static irqreturn_t mpc52xx_psc_spi_isr(int irq, void *dev_id)
@@ -410,8 +413,10 @@ static int __init mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr,
410 goto free_master; 413 goto free_master;
411 414
412 ret = mpc52xx_psc_spi_port_config(master->bus_num, mps); 415 ret = mpc52xx_psc_spi_port_config(master->bus_num, mps);
413 if (ret < 0) 416 if (ret < 0) {
417 dev_err(dev, "can't configure PSC! Is it capable of SPI?\n");
414 goto free_irq; 418 goto free_irq;
419 }
415 420
416 spin_lock_init(&mps->lock); 421 spin_lock_init(&mps->lock);
417 init_completion(&mps->done); 422 init_completion(&mps->done);
@@ -464,10 +469,11 @@ static int __init mpc52xx_psc_spi_of_probe(struct of_device *op,
464 const u32 *regaddr_p; 469 const u32 *regaddr_p;
465 u64 regaddr64, size64; 470 u64 regaddr64, size64;
466 s16 id = -1; 471 s16 id = -1;
472 int rc;
467 473
468 regaddr_p = of_get_address(op->node, 0, &size64, NULL); 474 regaddr_p = of_get_address(op->node, 0, &size64, NULL);
469 if (!regaddr_p) { 475 if (!regaddr_p) {
470 printk(KERN_ERR "Invalid PSC address\n"); 476 dev_err(&op->dev, "Invalid PSC address\n");
471 return -EINVAL; 477 return -EINVAL;
472 } 478 }
473 regaddr64 = of_translate_address(op->node, regaddr_p); 479 regaddr64 = of_translate_address(op->node, regaddr_p);
@@ -478,15 +484,18 @@ static int __init mpc52xx_psc_spi_of_probe(struct of_device *op,
478 484
479 psc_nump = of_get_property(op->node, "cell-index", NULL); 485 psc_nump = of_get_property(op->node, "cell-index", NULL);
480 if (!psc_nump || *psc_nump > 5) { 486 if (!psc_nump || *psc_nump > 5) {
481 printk(KERN_ERR "mpc52xx_psc_spi: Device node %s has invalid " 487 dev_err(&op->dev, "Invalid cell-index property\n");
482 "cell-index property\n", op->node->full_name);
483 return -EINVAL; 488 return -EINVAL;
484 } 489 }
485 id = *psc_nump + 1; 490 id = *psc_nump + 1;
486 } 491 }
487 492
488 return mpc52xx_psc_spi_do_probe(&op->dev, (u32)regaddr64, (u32)size64, 493 rc = mpc52xx_psc_spi_do_probe(&op->dev, (u32)regaddr64, (u32)size64,
489 irq_of_parse_and_map(op->node, 0), id); 494 irq_of_parse_and_map(op->node, 0), id);
495 if (rc == 0)
496 of_register_spi_devices(dev_get_drvdata(&op->dev), op->node);
497
498 return rc;
490} 499}
491 500
492static int __exit mpc52xx_psc_spi_of_remove(struct of_device *op) 501static int __exit mpc52xx_psc_spi_of_remove(struct of_device *op)
diff --git a/drivers/spi/mpc52xx_spi.c b/drivers/spi/mpc52xx_spi.c
new file mode 100644
index 000000000000..ef8379b2c172
--- /dev/null
+++ b/drivers/spi/mpc52xx_spi.c
@@ -0,0 +1,520 @@
1/*
2 * MPC52xx SPI bus driver.
3 *
4 * Copyright (C) 2008 Secret Lab Technologies Ltd.
5 *
6 * This file is released under the GPLv2
7 *
8 * This is the driver for the MPC5200's dedicated SPI controller.
9 *
10 * Note: this driver does not support the MPC5200 PSC in SPI mode. For
11 * that driver see drivers/spi/mpc52xx_psc_spi.c
12 */
13
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/errno.h>
17#include <linux/of_platform.h>
18#include <linux/interrupt.h>
19#include <linux/delay.h>
20#include <linux/spi/spi.h>
21#include <linux/spi/mpc52xx_spi.h>
22#include <linux/of_spi.h>
23#include <linux/io.h>
24#include <asm/time.h>
25#include <asm/mpc52xx.h>
26
27MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>");
28MODULE_DESCRIPTION("MPC52xx SPI (non-PSC) Driver");
29MODULE_LICENSE("GPL");
30
31/* Register offsets */
32#define SPI_CTRL1 0x00
33#define SPI_CTRL1_SPIE (1 << 7)
34#define SPI_CTRL1_SPE (1 << 6)
35#define SPI_CTRL1_MSTR (1 << 4)
36#define SPI_CTRL1_CPOL (1 << 3)
37#define SPI_CTRL1_CPHA (1 << 2)
38#define SPI_CTRL1_SSOE (1 << 1)
39#define SPI_CTRL1_LSBFE (1 << 0)
40
41#define SPI_CTRL2 0x01
42#define SPI_BRR 0x04
43
44#define SPI_STATUS 0x05
45#define SPI_STATUS_SPIF (1 << 7)
46#define SPI_STATUS_WCOL (1 << 6)
47#define SPI_STATUS_MODF (1 << 4)
48
49#define SPI_DATA 0x09
50#define SPI_PORTDATA 0x0d
51#define SPI_DATADIR 0x10
52
53/* FSM state return values */
54#define FSM_STOP 0 /* Nothing more for the state machine to */
55 /* do. If something interesting happens */
56 /* then and IRQ will be received */
57#define FSM_POLL 1 /* need to poll for completion, an IRQ is */
58 /* not expected */
59#define FSM_CONTINUE 2 /* Keep iterating the state machine */
60
61/* Driver internal data */
62struct mpc52xx_spi {
63 struct spi_master *master;
64 u32 sysclk;
65 void __iomem *regs;
66 int irq0; /* MODF irq */
67 int irq1; /* SPIF irq */
68 int ipb_freq;
69
70 /* Statistics */
71 int msg_count;
72 int wcol_count;
73 int wcol_ticks;
74 u32 wcol_tx_timestamp;
75 int modf_count;
76 int byte_count;
77
78 struct list_head queue; /* queue of pending messages */
79 spinlock_t lock;
80 struct work_struct work;
81
82
83 /* Details of current transfer (length, and buffer pointers) */
84 struct spi_message *message; /* current message */
85 struct spi_transfer *transfer; /* current transfer */
86 int (*state)(int irq, struct mpc52xx_spi *ms, u8 status, u8 data);
87 int len;
88 int timestamp;
89 u8 *rx_buf;
90 const u8 *tx_buf;
91 int cs_change;
92};
93
94/*
95 * CS control function
96 */
97static void mpc52xx_spi_chipsel(struct mpc52xx_spi *ms, int value)
98{
99 out_8(ms->regs + SPI_PORTDATA, value ? 0 : 0x08);
100}
101
102/*
103 * Start a new transfer. This is called both by the idle state
104 * for the first transfer in a message, and by the wait state when the
105 * previous transfer in a message is complete.
106 */
107static void mpc52xx_spi_start_transfer(struct mpc52xx_spi *ms)
108{
109 ms->rx_buf = ms->transfer->rx_buf;
110 ms->tx_buf = ms->transfer->tx_buf;
111 ms->len = ms->transfer->len;
112
113 /* Activate the chip select */
114 if (ms->cs_change)
115 mpc52xx_spi_chipsel(ms, 1);
116 ms->cs_change = ms->transfer->cs_change;
117
118 /* Write out the first byte */
119 ms->wcol_tx_timestamp = get_tbl();
120 if (ms->tx_buf)
121 out_8(ms->regs + SPI_DATA, *ms->tx_buf++);
122 else
123 out_8(ms->regs + SPI_DATA, 0);
124}
125
126/* Forward declaration of state handlers */
127static int mpc52xx_spi_fsmstate_transfer(int irq, struct mpc52xx_spi *ms,
128 u8 status, u8 data);
129static int mpc52xx_spi_fsmstate_wait(int irq, struct mpc52xx_spi *ms,
130 u8 status, u8 data);
131
132/*
133 * IDLE state
134 *
135 * No transfers are in progress; if another transfer is pending then retrieve
136 * it and kick it off. Otherwise, stop processing the state machine
137 */
138static int
139mpc52xx_spi_fsmstate_idle(int irq, struct mpc52xx_spi *ms, u8 status, u8 data)
140{
141 struct spi_device *spi;
142 int spr, sppr;
143 u8 ctrl1;
144
145 if (status && (irq != NO_IRQ))
146 dev_err(&ms->master->dev, "spurious irq, status=0x%.2x\n",
147 status);
148
149 /* Check if there is another transfer waiting. */
150 if (list_empty(&ms->queue))
151 return FSM_STOP;
152
153 /* get the head of the queue */
154 ms->message = list_first_entry(&ms->queue, struct spi_message, queue);
155 list_del_init(&ms->message->queue);
156
157 /* Setup the controller parameters */
158 ctrl1 = SPI_CTRL1_SPIE | SPI_CTRL1_SPE | SPI_CTRL1_MSTR;
159 spi = ms->message->spi;
160 if (spi->mode & SPI_CPHA)
161 ctrl1 |= SPI_CTRL1_CPHA;
162 if (spi->mode & SPI_CPOL)
163 ctrl1 |= SPI_CTRL1_CPOL;
164 if (spi->mode & SPI_LSB_FIRST)
165 ctrl1 |= SPI_CTRL1_LSBFE;
166 out_8(ms->regs + SPI_CTRL1, ctrl1);
167
168 /* Setup the controller speed */
169 /* minimum divider is '2'. Also, add '1' to force rounding the
170 * divider up. */
171 sppr = ((ms->ipb_freq / ms->message->spi->max_speed_hz) + 1) >> 1;
172 spr = 0;
173 if (sppr < 1)
174 sppr = 1;
175 while (((sppr - 1) & ~0x7) != 0) {
176 sppr = (sppr + 1) >> 1; /* add '1' to force rounding up */
177 spr++;
178 }
179 sppr--; /* sppr quantity in register is offset by 1 */
180 if (spr > 7) {
181 /* Don't overrun limits of SPI baudrate register */
182 spr = 7;
183 sppr = 7;
184 }
185 out_8(ms->regs + SPI_BRR, sppr << 4 | spr); /* Set speed */
186
187 ms->cs_change = 1;
188 ms->transfer = container_of(ms->message->transfers.next,
189 struct spi_transfer, transfer_list);
190
191 mpc52xx_spi_start_transfer(ms);
192 ms->state = mpc52xx_spi_fsmstate_transfer;
193
194 return FSM_CONTINUE;
195}
196
197/*
198 * TRANSFER state
199 *
200 * In the middle of a transfer. If the SPI core has completed processing
201 * a byte, then read out the received data and write out the next byte
202 * (unless this transfer is finished; in which case go on to the wait
203 * state)
204 */
205static int mpc52xx_spi_fsmstate_transfer(int irq, struct mpc52xx_spi *ms,
206 u8 status, u8 data)
207{
208 if (!status)
209 return ms->irq0 ? FSM_STOP : FSM_POLL;
210
211 if (status & SPI_STATUS_WCOL) {
212 /* The SPI controller is stoopid. At slower speeds, it may
213 * raise the SPIF flag before the state machine is actually
214 * finished, which causes a collision (internal to the state
215 * machine only). The manual recommends inserting a delay
216 * between receiving the interrupt and sending the next byte,
217 * but it can also be worked around simply by retrying the
218 * transfer which is what we do here. */
219 ms->wcol_count++;
220 ms->wcol_ticks += get_tbl() - ms->wcol_tx_timestamp;
221 ms->wcol_tx_timestamp = get_tbl();
222 data = 0;
223 if (ms->tx_buf)
224 data = *(ms->tx_buf-1);
225 out_8(ms->regs + SPI_DATA, data); /* try again */
226 return FSM_CONTINUE;
227 } else if (status & SPI_STATUS_MODF) {
228 ms->modf_count++;
229 dev_err(&ms->master->dev, "mode fault\n");
230 mpc52xx_spi_chipsel(ms, 0);
231 ms->message->status = -EIO;
232 ms->message->complete(ms->message->context);
233 ms->state = mpc52xx_spi_fsmstate_idle;
234 return FSM_CONTINUE;
235 }
236
237 /* Read data out of the spi device */
238 ms->byte_count++;
239 if (ms->rx_buf)
240 *ms->rx_buf++ = data;
241
242 /* Is the transfer complete? */
243 ms->len--;
244 if (ms->len == 0) {
245 ms->timestamp = get_tbl();
246 ms->timestamp += ms->transfer->delay_usecs * tb_ticks_per_usec;
247 ms->state = mpc52xx_spi_fsmstate_wait;
248 return FSM_CONTINUE;
249 }
250
251 /* Write out the next byte */
252 ms->wcol_tx_timestamp = get_tbl();
253 if (ms->tx_buf)
254 out_8(ms->regs + SPI_DATA, *ms->tx_buf++);
255 else
256 out_8(ms->regs + SPI_DATA, 0);
257
258 return FSM_CONTINUE;
259}
260
261/*
262 * WAIT state
263 *
264 * A transfer has completed; need to wait for the delay period to complete
265 * before starting the next transfer
266 */
267static int
268mpc52xx_spi_fsmstate_wait(int irq, struct mpc52xx_spi *ms, u8 status, u8 data)
269{
270 if (status && irq)
271 dev_err(&ms->master->dev, "spurious irq, status=0x%.2x\n",
272 status);
273
274 if (((int)get_tbl()) - ms->timestamp < 0)
275 return FSM_POLL;
276
277 ms->message->actual_length += ms->transfer->len;
278
279 /* Check if there is another transfer in this message. If there
280 * aren't then deactivate CS, notify sender, and drop back to idle
281 * to start the next message. */
282 if (ms->transfer->transfer_list.next == &ms->message->transfers) {
283 ms->msg_count++;
284 mpc52xx_spi_chipsel(ms, 0);
285 ms->message->status = 0;
286 ms->message->complete(ms->message->context);
287 ms->state = mpc52xx_spi_fsmstate_idle;
288 return FSM_CONTINUE;
289 }
290
291 /* There is another transfer; kick it off */
292
293 if (ms->cs_change)
294 mpc52xx_spi_chipsel(ms, 0);
295
296 ms->transfer = container_of(ms->transfer->transfer_list.next,
297 struct spi_transfer, transfer_list);
298 mpc52xx_spi_start_transfer(ms);
299 ms->state = mpc52xx_spi_fsmstate_transfer;
300 return FSM_CONTINUE;
301}
302
303/**
304 * mpc52xx_spi_fsm_process - Finite State Machine iteration function
305 * @irq: irq number that triggered the FSM or 0 for polling
306 * @ms: pointer to mpc52xx_spi driver data
307 */
308static void mpc52xx_spi_fsm_process(int irq, struct mpc52xx_spi *ms)
309{
310 int rc = FSM_CONTINUE;
311 u8 status, data;
312
313 while (rc == FSM_CONTINUE) {
314 /* Interrupt cleared by read of STATUS followed by
315 * read of DATA registers */
316 status = in_8(ms->regs + SPI_STATUS);
317 data = in_8(ms->regs + SPI_DATA);
318 rc = ms->state(irq, ms, status, data);
319 }
320
321 if (rc == FSM_POLL)
322 schedule_work(&ms->work);
323}
324
325/**
326 * mpc52xx_spi_irq - IRQ handler
327 */
328static irqreturn_t mpc52xx_spi_irq(int irq, void *_ms)
329{
330 struct mpc52xx_spi *ms = _ms;
331 spin_lock(&ms->lock);
332 mpc52xx_spi_fsm_process(irq, ms);
333 spin_unlock(&ms->lock);
334 return IRQ_HANDLED;
335}
336
337/**
338 * mpc52xx_spi_wq - Workqueue function for polling the state machine
339 */
340static void mpc52xx_spi_wq(struct work_struct *work)
341{
342 struct mpc52xx_spi *ms = container_of(work, struct mpc52xx_spi, work);
343 unsigned long flags;
344
345 spin_lock_irqsave(&ms->lock, flags);
346 mpc52xx_spi_fsm_process(0, ms);
347 spin_unlock_irqrestore(&ms->lock, flags);
348}
349
350/*
351 * spi_master ops
352 */
353
354static int mpc52xx_spi_setup(struct spi_device *spi)
355{
356 if (spi->bits_per_word % 8)
357 return -EINVAL;
358
359 if (spi->mode & ~(SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST))
360 return -EINVAL;
361
362 if (spi->chip_select >= spi->master->num_chipselect)
363 return -EINVAL;
364
365 return 0;
366}
367
368static int mpc52xx_spi_transfer(struct spi_device *spi, struct spi_message *m)
369{
370 struct mpc52xx_spi *ms = spi_master_get_devdata(spi->master);
371 unsigned long flags;
372
373 m->actual_length = 0;
374 m->status = -EINPROGRESS;
375
376 spin_lock_irqsave(&ms->lock, flags);
377 list_add_tail(&m->queue, &ms->queue);
378 spin_unlock_irqrestore(&ms->lock, flags);
379 schedule_work(&ms->work);
380
381 return 0;
382}
383
384/*
385 * OF Platform Bus Binding
386 */
387static int __devinit mpc52xx_spi_probe(struct of_device *op,
388 const struct of_device_id *match)
389{
390 struct spi_master *master;
391 struct mpc52xx_spi *ms;
392 void __iomem *regs;
393 int rc;
394
395 /* MMIO registers */
396 dev_dbg(&op->dev, "probing mpc5200 SPI device\n");
397 regs = of_iomap(op->node, 0);
398 if (!regs)
399 return -ENODEV;
400
401 /* initialize the device */
402 out_8(regs+SPI_CTRL1, SPI_CTRL1_SPIE | SPI_CTRL1_SPE | SPI_CTRL1_MSTR);
403 out_8(regs + SPI_CTRL2, 0x0);
404 out_8(regs + SPI_DATADIR, 0xe); /* Set output pins */
405 out_8(regs + SPI_PORTDATA, 0x8); /* Deassert /SS signal */
406
407 /* Clear the status register and re-read it to check for a MODF
408 * failure. This driver cannot currently handle multiple masters
409 * on the SPI bus. This fault will also occur if the SPI signals
410 * are not connected to any pins (port_config setting) */
411 in_8(regs + SPI_STATUS);
412 in_8(regs + SPI_DATA);
413 if (in_8(regs + SPI_STATUS) & SPI_STATUS_MODF) {
414 dev_err(&op->dev, "mode fault; is port_config correct?\n");
415 rc = -EIO;
416 goto err_init;
417 }
418
419 dev_dbg(&op->dev, "allocating spi_master struct\n");
420 master = spi_alloc_master(&op->dev, sizeof *ms);
421 if (!master) {
422 rc = -ENOMEM;
423 goto err_alloc;
424 }
425 master->bus_num = -1;
426 master->num_chipselect = 1;
427 master->setup = mpc52xx_spi_setup;
428 master->transfer = mpc52xx_spi_transfer;
429 dev_set_drvdata(&op->dev, master);
430
431 ms = spi_master_get_devdata(master);
432 ms->master = master;
433 ms->regs = regs;
434 ms->irq0 = irq_of_parse_and_map(op->node, 0);
435 ms->irq1 = irq_of_parse_and_map(op->node, 1);
436 ms->state = mpc52xx_spi_fsmstate_idle;
437 ms->ipb_freq = mpc5xxx_get_bus_frequency(op->node);
438 spin_lock_init(&ms->lock);
439 INIT_LIST_HEAD(&ms->queue);
440 INIT_WORK(&ms->work, mpc52xx_spi_wq);
441
442 /* Decide if interrupts can be used */
443 if (ms->irq0 && ms->irq1) {
444 rc = request_irq(ms->irq0, mpc52xx_spi_irq, IRQF_SAMPLE_RANDOM,
445 "mpc5200-spi-modf", ms);
446 rc |= request_irq(ms->irq1, mpc52xx_spi_irq, IRQF_SAMPLE_RANDOM,
447 "mpc5200-spi-spiF", ms);
448 if (rc) {
449 free_irq(ms->irq0, ms);
450 free_irq(ms->irq1, ms);
451 ms->irq0 = ms->irq1 = 0;
452 }
453 } else {
454 /* operate in polled mode */
455 ms->irq0 = ms->irq1 = 0;
456 }
457
458 if (!ms->irq0)
459 dev_info(&op->dev, "using polled mode\n");
460
461 dev_dbg(&op->dev, "registering spi_master struct\n");
462 rc = spi_register_master(master);
463 if (rc)
464 goto err_register;
465
466 of_register_spi_devices(master, op->node);
467 dev_info(&ms->master->dev, "registered MPC5200 SPI bus\n");
468
469 return rc;
470
471 err_register:
472 dev_err(&ms->master->dev, "initialization failed\n");
473 spi_master_put(master);
474 err_alloc:
475 err_init:
476 iounmap(regs);
477 return rc;
478}
479
480static int __devexit mpc52xx_spi_remove(struct of_device *op)
481{
482 struct spi_master *master = dev_get_drvdata(&op->dev);
483 struct mpc52xx_spi *ms = spi_master_get_devdata(master);
484
485 free_irq(ms->irq0, ms);
486 free_irq(ms->irq1, ms);
487
488 spi_unregister_master(master);
489 spi_master_put(master);
490 iounmap(ms->regs);
491
492 return 0;
493}
494
495static struct of_device_id mpc52xx_spi_match[] __devinitdata = {
496 { .compatible = "fsl,mpc5200-spi", },
497 {}
498};
499MODULE_DEVICE_TABLE(of, mpc52xx_spi_match);
500
501static struct of_platform_driver mpc52xx_spi_of_driver = {
502 .owner = THIS_MODULE,
503 .name = "mpc52xx-spi",
504 .match_table = mpc52xx_spi_match,
505 .probe = mpc52xx_spi_probe,
506 .remove = __exit_p(mpc52xx_spi_remove),
507};
508
509static int __init mpc52xx_spi_init(void)
510{
511 return of_register_platform_driver(&mpc52xx_spi_of_driver);
512}
513module_init(mpc52xx_spi_init);
514
515static void __exit mpc52xx_spi_exit(void)
516{
517 of_unregister_platform_driver(&mpc52xx_spi_of_driver);
518}
519module_exit(mpc52xx_spi_exit);
520
diff --git a/drivers/spi/spi_mpc8xxx.c b/drivers/spi/spi_mpc8xxx.c
index 0fd0ec4d3a7d..930135dc73ba 100644
--- a/drivers/spi/spi_mpc8xxx.c
+++ b/drivers/spi/spi_mpc8xxx.c
@@ -5,6 +5,10 @@
5 * 5 *
6 * Copyright (C) 2006 Polycom, Inc. 6 * Copyright (C) 2006 Polycom, Inc.
7 * 7 *
8 * CPM SPI and QE buffer descriptors mode support:
9 * Copyright (c) 2009 MontaVista Software, Inc.
10 * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
11 *
8 * This program is free software; you can redistribute it and/or modify it 12 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the 13 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your 14 * Free Software Foundation; either version 2 of the License, or (at your
@@ -27,6 +31,9 @@
27#include <linux/spi/spi_bitbang.h> 31#include <linux/spi/spi_bitbang.h>
28#include <linux/platform_device.h> 32#include <linux/platform_device.h>
29#include <linux/fsl_devices.h> 33#include <linux/fsl_devices.h>
34#include <linux/dma-mapping.h>
35#include <linux/mm.h>
36#include <linux/mutex.h>
30#include <linux/of.h> 37#include <linux/of.h>
31#include <linux/of_platform.h> 38#include <linux/of_platform.h>
32#include <linux/gpio.h> 39#include <linux/gpio.h>
@@ -34,8 +41,19 @@
34#include <linux/of_spi.h> 41#include <linux/of_spi.h>
35 42
36#include <sysdev/fsl_soc.h> 43#include <sysdev/fsl_soc.h>
44#include <asm/cpm.h>
45#include <asm/qe.h>
37#include <asm/irq.h> 46#include <asm/irq.h>
38 47
48/* CPM1 and CPM2 are mutually exclusive. */
49#ifdef CONFIG_CPM1
50#include <asm/cpm1.h>
51#define CPM_SPI_CMD mk_cr_cmd(CPM_CR_CH_SPI, 0)
52#else
53#include <asm/cpm2.h>
54#define CPM_SPI_CMD mk_cr_cmd(CPM_CR_SPI_PAGE, CPM_CR_SPI_SBLOCK, 0, 0)
55#endif
56
39/* SPI Controller registers */ 57/* SPI Controller registers */
40struct mpc8xxx_spi_reg { 58struct mpc8xxx_spi_reg {
41 u8 res1[0x20]; 59 u8 res1[0x20];
@@ -47,6 +65,28 @@ struct mpc8xxx_spi_reg {
47 __be32 receive; 65 __be32 receive;
48}; 66};
49 67
68/* SPI Parameter RAM */
69struct spi_pram {
70 __be16 rbase; /* Rx Buffer descriptor base address */
71 __be16 tbase; /* Tx Buffer descriptor base address */
72 u8 rfcr; /* Rx function code */
73 u8 tfcr; /* Tx function code */
74 __be16 mrblr; /* Max receive buffer length */
75 __be32 rstate; /* Internal */
76 __be32 rdp; /* Internal */
77 __be16 rbptr; /* Internal */
78 __be16 rbc; /* Internal */
79 __be32 rxtmp; /* Internal */
80 __be32 tstate; /* Internal */
81 __be32 tdp; /* Internal */
82 __be16 tbptr; /* Internal */
83 __be16 tbc; /* Internal */
84 __be32 txtmp; /* Internal */
85 __be32 res; /* Tx temp. */
86 __be16 rpbase; /* Relocation pointer (CPM1 only) */
87 __be16 res1; /* Reserved */
88};
89
50/* SPI Controller mode register definitions */ 90/* SPI Controller mode register definitions */
51#define SPMODE_LOOP (1 << 30) 91#define SPMODE_LOOP (1 << 30)
52#define SPMODE_CI_INACTIVEHIGH (1 << 29) 92#define SPMODE_CI_INACTIVEHIGH (1 << 29)
@@ -75,14 +115,40 @@ struct mpc8xxx_spi_reg {
75#define SPIM_NE 0x00000200 /* Not empty */ 115#define SPIM_NE 0x00000200 /* Not empty */
76#define SPIM_NF 0x00000100 /* Not full */ 116#define SPIM_NF 0x00000100 /* Not full */
77 117
118#define SPIE_TXB 0x00000200 /* Last char is written to tx fifo */
119#define SPIE_RXB 0x00000100 /* Last char is written to rx buf */
120
121/* SPCOM register values */
122#define SPCOM_STR (1 << 23) /* Start transmit */
123
124#define SPI_PRAM_SIZE 0x100
125#define SPI_MRBLR ((unsigned int)PAGE_SIZE)
126
78/* SPI Controller driver's private data. */ 127/* SPI Controller driver's private data. */
79struct mpc8xxx_spi { 128struct mpc8xxx_spi {
129 struct device *dev;
80 struct mpc8xxx_spi_reg __iomem *base; 130 struct mpc8xxx_spi_reg __iomem *base;
81 131
82 /* rx & tx bufs from the spi_transfer */ 132 /* rx & tx bufs from the spi_transfer */
83 const void *tx; 133 const void *tx;
84 void *rx; 134 void *rx;
85 135
136 int subblock;
137 struct spi_pram __iomem *pram;
138 struct cpm_buf_desc __iomem *tx_bd;
139 struct cpm_buf_desc __iomem *rx_bd;
140
141 struct spi_transfer *xfer_in_progress;
142
143 /* dma addresses for CPM transfers */
144 dma_addr_t tx_dma;
145 dma_addr_t rx_dma;
146 bool map_tx_dma;
147 bool map_rx_dma;
148
149 dma_addr_t dma_dummy_tx;
150 dma_addr_t dma_dummy_rx;
151
86 /* functions to deal with different sized buffers */ 152 /* functions to deal with different sized buffers */
87 void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *); 153 void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *);
88 u32(*get_tx) (struct mpc8xxx_spi *); 154 u32(*get_tx) (struct mpc8xxx_spi *);
@@ -96,7 +162,7 @@ struct mpc8xxx_spi {
96 u32 rx_shift; /* RX data reg shift when in qe mode */ 162 u32 rx_shift; /* RX data reg shift when in qe mode */
97 u32 tx_shift; /* TX data reg shift when in qe mode */ 163 u32 tx_shift; /* TX data reg shift when in qe mode */
98 164
99 bool qe_mode; 165 unsigned int flags;
100 166
101 struct workqueue_struct *workqueue; 167 struct workqueue_struct *workqueue;
102 struct work_struct work; 168 struct work_struct work;
@@ -107,6 +173,10 @@ struct mpc8xxx_spi {
107 struct completion done; 173 struct completion done;
108}; 174};
109 175
176static void *mpc8xxx_dummy_rx;
177static DEFINE_MUTEX(mpc8xxx_dummy_rx_lock);
178static int mpc8xxx_dummy_rx_refcnt;
179
110struct spi_mpc8xxx_cs { 180struct spi_mpc8xxx_cs {
111 /* functions to deal with different sized buffers */ 181 /* functions to deal with different sized buffers */
112 void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *); 182 void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *);
@@ -155,6 +225,42 @@ MPC83XX_SPI_TX_BUF(u8)
155MPC83XX_SPI_TX_BUF(u16) 225MPC83XX_SPI_TX_BUF(u16)
156MPC83XX_SPI_TX_BUF(u32) 226MPC83XX_SPI_TX_BUF(u32)
157 227
228static void mpc8xxx_spi_change_mode(struct spi_device *spi)
229{
230 struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master);
231 struct spi_mpc8xxx_cs *cs = spi->controller_state;
232 __be32 __iomem *mode = &mspi->base->mode;
233 unsigned long flags;
234
235 if (cs->hw_mode == mpc8xxx_spi_read_reg(mode))
236 return;
237
238 /* Turn off IRQs locally to minimize time that SPI is disabled. */
239 local_irq_save(flags);
240
241 /* Turn off SPI unit prior changing mode */
242 mpc8xxx_spi_write_reg(mode, cs->hw_mode & ~SPMODE_ENABLE);
243 mpc8xxx_spi_write_reg(mode, cs->hw_mode);
244
245 /* When in CPM mode, we need to reinit tx and rx. */
246 if (mspi->flags & SPI_CPM_MODE) {
247 if (mspi->flags & SPI_QE) {
248 qe_issue_cmd(QE_INIT_TX_RX, mspi->subblock,
249 QE_CR_PROTOCOL_UNSPECIFIED, 0);
250 } else {
251 cpm_command(CPM_SPI_CMD, CPM_CR_INIT_TRX);
252 if (mspi->flags & SPI_CPM1) {
253 out_be16(&mspi->pram->rbptr,
254 in_be16(&mspi->pram->rbase));
255 out_be16(&mspi->pram->tbptr,
256 in_be16(&mspi->pram->tbase));
257 }
258 }
259 }
260
261 local_irq_restore(flags);
262}
263
158static void mpc8xxx_spi_chipselect(struct spi_device *spi, int value) 264static void mpc8xxx_spi_chipselect(struct spi_device *spi, int value)
159{ 265{
160 struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); 266 struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
@@ -168,27 +274,13 @@ static void mpc8xxx_spi_chipselect(struct spi_device *spi, int value)
168 } 274 }
169 275
170 if (value == BITBANG_CS_ACTIVE) { 276 if (value == BITBANG_CS_ACTIVE) {
171 u32 regval = mpc8xxx_spi_read_reg(&mpc8xxx_spi->base->mode);
172
173 mpc8xxx_spi->rx_shift = cs->rx_shift; 277 mpc8xxx_spi->rx_shift = cs->rx_shift;
174 mpc8xxx_spi->tx_shift = cs->tx_shift; 278 mpc8xxx_spi->tx_shift = cs->tx_shift;
175 mpc8xxx_spi->get_rx = cs->get_rx; 279 mpc8xxx_spi->get_rx = cs->get_rx;
176 mpc8xxx_spi->get_tx = cs->get_tx; 280 mpc8xxx_spi->get_tx = cs->get_tx;
177 281
178 if (cs->hw_mode != regval) { 282 mpc8xxx_spi_change_mode(spi);
179 unsigned long flags; 283
180 __be32 __iomem *mode = &mpc8xxx_spi->base->mode;
181
182 regval = cs->hw_mode;
183 /* Turn off IRQs locally to minimize time that
184 * SPI is disabled
185 */
186 local_irq_save(flags);
187 /* Turn off SPI unit prior changing mode */
188 mpc8xxx_spi_write_reg(mode, regval & ~SPMODE_ENABLE);
189 mpc8xxx_spi_write_reg(mode, regval);
190 local_irq_restore(flags);
191 }
192 if (pdata->cs_control) 284 if (pdata->cs_control)
193 pdata->cs_control(spi, pol); 285 pdata->cs_control(spi, pol);
194 } 286 }
@@ -198,7 +290,6 @@ static
198int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) 290int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
199{ 291{
200 struct mpc8xxx_spi *mpc8xxx_spi; 292 struct mpc8xxx_spi *mpc8xxx_spi;
201 u32 regval;
202 u8 bits_per_word, pm; 293 u8 bits_per_word, pm;
203 u32 hz; 294 u32 hz;
204 struct spi_mpc8xxx_cs *cs = spi->controller_state; 295 struct spi_mpc8xxx_cs *cs = spi->controller_state;
@@ -230,14 +321,14 @@ int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
230 if (bits_per_word <= 8) { 321 if (bits_per_word <= 8) {
231 cs->get_rx = mpc8xxx_spi_rx_buf_u8; 322 cs->get_rx = mpc8xxx_spi_rx_buf_u8;
232 cs->get_tx = mpc8xxx_spi_tx_buf_u8; 323 cs->get_tx = mpc8xxx_spi_tx_buf_u8;
233 if (mpc8xxx_spi->qe_mode) { 324 if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) {
234 cs->rx_shift = 16; 325 cs->rx_shift = 16;
235 cs->tx_shift = 24; 326 cs->tx_shift = 24;
236 } 327 }
237 } else if (bits_per_word <= 16) { 328 } else if (bits_per_word <= 16) {
238 cs->get_rx = mpc8xxx_spi_rx_buf_u16; 329 cs->get_rx = mpc8xxx_spi_rx_buf_u16;
239 cs->get_tx = mpc8xxx_spi_tx_buf_u16; 330 cs->get_tx = mpc8xxx_spi_tx_buf_u16;
240 if (mpc8xxx_spi->qe_mode) { 331 if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) {
241 cs->rx_shift = 16; 332 cs->rx_shift = 16;
242 cs->tx_shift = 16; 333 cs->tx_shift = 16;
243 } 334 }
@@ -247,7 +338,8 @@ int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
247 } else 338 } else
248 return -EINVAL; 339 return -EINVAL;
249 340
250 if (mpc8xxx_spi->qe_mode && spi->mode & SPI_LSB_FIRST) { 341 if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE &&
342 spi->mode & SPI_LSB_FIRST) {
251 cs->tx_shift = 0; 343 cs->tx_shift = 0;
252 if (bits_per_word <= 8) 344 if (bits_per_word <= 8)
253 cs->rx_shift = 8; 345 cs->rx_shift = 8;
@@ -286,37 +378,138 @@ int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
286 pm--; 378 pm--;
287 379
288 cs->hw_mode |= SPMODE_PM(pm); 380 cs->hw_mode |= SPMODE_PM(pm);
289 regval = mpc8xxx_spi_read_reg(&mpc8xxx_spi->base->mode); 381
290 if (cs->hw_mode != regval) { 382 mpc8xxx_spi_change_mode(spi);
291 unsigned long flags; 383 return 0;
292 __be32 __iomem *mode = &mpc8xxx_spi->base->mode; 384}
293 385
294 regval = cs->hw_mode; 386static void mpc8xxx_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi)
295 /* Turn off IRQs locally to minimize time 387{
296 * that SPI is disabled 388 struct cpm_buf_desc __iomem *tx_bd = mspi->tx_bd;
297 */ 389 struct cpm_buf_desc __iomem *rx_bd = mspi->rx_bd;
298 local_irq_save(flags); 390 unsigned int xfer_len = min(mspi->count, SPI_MRBLR);
299 /* Turn off SPI unit prior changing mode */ 391 unsigned int xfer_ofs;
300 mpc8xxx_spi_write_reg(mode, regval & ~SPMODE_ENABLE); 392
301 mpc8xxx_spi_write_reg(mode, regval); 393 xfer_ofs = mspi->xfer_in_progress->len - mspi->count;
302 local_irq_restore(flags); 394
395 out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma + xfer_ofs);
396 out_be16(&rx_bd->cbd_datlen, 0);
397 out_be16(&rx_bd->cbd_sc, BD_SC_EMPTY | BD_SC_INTRPT | BD_SC_WRAP);
398
399 out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma + xfer_ofs);
400 out_be16(&tx_bd->cbd_datlen, xfer_len);
401 out_be16(&tx_bd->cbd_sc, BD_SC_READY | BD_SC_INTRPT | BD_SC_WRAP |
402 BD_SC_LAST);
403
404 /* start transfer */
405 mpc8xxx_spi_write_reg(&mspi->base->command, SPCOM_STR);
406}
407
408static int mpc8xxx_spi_cpm_bufs(struct mpc8xxx_spi *mspi,
409 struct spi_transfer *t, bool is_dma_mapped)
410{
411 struct device *dev = mspi->dev;
412
413 if (is_dma_mapped) {
414 mspi->map_tx_dma = 0;
415 mspi->map_rx_dma = 0;
416 } else {
417 mspi->map_tx_dma = 1;
418 mspi->map_rx_dma = 1;
419 }
420
421 if (!t->tx_buf) {
422 mspi->tx_dma = mspi->dma_dummy_tx;
423 mspi->map_tx_dma = 0;
424 }
425
426 if (!t->rx_buf) {
427 mspi->rx_dma = mspi->dma_dummy_rx;
428 mspi->map_rx_dma = 0;
303 } 429 }
430
431 if (mspi->map_tx_dma) {
432 void *nonconst_tx = (void *)mspi->tx; /* shut up gcc */
433
434 mspi->tx_dma = dma_map_single(dev, nonconst_tx, t->len,
435 DMA_TO_DEVICE);
436 if (dma_mapping_error(dev, mspi->tx_dma)) {
437 dev_err(dev, "unable to map tx dma\n");
438 return -ENOMEM;
439 }
440 } else {
441 mspi->tx_dma = t->tx_dma;
442 }
443
444 if (mspi->map_rx_dma) {
445 mspi->rx_dma = dma_map_single(dev, mspi->rx, t->len,
446 DMA_FROM_DEVICE);
447 if (dma_mapping_error(dev, mspi->rx_dma)) {
448 dev_err(dev, "unable to map rx dma\n");
449 goto err_rx_dma;
450 }
451 } else {
452 mspi->rx_dma = t->rx_dma;
453 }
454
455 /* enable rx ints */
456 mpc8xxx_spi_write_reg(&mspi->base->mask, SPIE_RXB);
457
458 mspi->xfer_in_progress = t;
459 mspi->count = t->len;
460
461 /* start CPM transfers */
462 mpc8xxx_spi_cpm_bufs_start(mspi);
463
304 return 0; 464 return 0;
465
466err_rx_dma:
467 if (mspi->map_tx_dma)
468 dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE);
469 return -ENOMEM;
305} 470}
306 471
307static int mpc8xxx_spi_bufs(struct spi_device *spi, struct spi_transfer *t) 472static void mpc8xxx_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi)
308{ 473{
309 struct mpc8xxx_spi *mpc8xxx_spi; 474 struct device *dev = mspi->dev;
310 u32 word, len, bits_per_word; 475 struct spi_transfer *t = mspi->xfer_in_progress;
476
477 if (mspi->map_tx_dma)
478 dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE);
479 if (mspi->map_tx_dma)
480 dma_unmap_single(dev, mspi->rx_dma, t->len, DMA_FROM_DEVICE);
481 mspi->xfer_in_progress = NULL;
482}
311 483
312 mpc8xxx_spi = spi_master_get_devdata(spi->master); 484static int mpc8xxx_spi_cpu_bufs(struct mpc8xxx_spi *mspi,
485 struct spi_transfer *t, unsigned int len)
486{
487 u32 word;
488
489 mspi->count = len;
490
491 /* enable rx ints */
492 mpc8xxx_spi_write_reg(&mspi->base->mask, SPIM_NE);
493
494 /* transmit word */
495 word = mspi->get_tx(mspi);
496 mpc8xxx_spi_write_reg(&mspi->base->transmit, word);
497
498 return 0;
499}
500
501static int mpc8xxx_spi_bufs(struct spi_device *spi, struct spi_transfer *t,
502 bool is_dma_mapped)
503{
504 struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
505 unsigned int len = t->len;
506 u8 bits_per_word;
507 int ret;
313 508
314 mpc8xxx_spi->tx = t->tx_buf;
315 mpc8xxx_spi->rx = t->rx_buf;
316 bits_per_word = spi->bits_per_word; 509 bits_per_word = spi->bits_per_word;
317 if (t->bits_per_word) 510 if (t->bits_per_word)
318 bits_per_word = t->bits_per_word; 511 bits_per_word = t->bits_per_word;
319 len = t->len; 512
320 if (bits_per_word > 8) { 513 if (bits_per_word > 8) {
321 /* invalid length? */ 514 /* invalid length? */
322 if (len & 1) 515 if (len & 1)
@@ -329,22 +522,27 @@ static int mpc8xxx_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
329 return -EINVAL; 522 return -EINVAL;
330 len /= 2; 523 len /= 2;
331 } 524 }
332 mpc8xxx_spi->count = len;
333 525
334 INIT_COMPLETION(mpc8xxx_spi->done); 526 mpc8xxx_spi->tx = t->tx_buf;
527 mpc8xxx_spi->rx = t->rx_buf;
335 528
336 /* enable rx ints */ 529 INIT_COMPLETION(mpc8xxx_spi->done);
337 mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mask, SPIM_NE);
338 530
339 /* transmit word */ 531 if (mpc8xxx_spi->flags & SPI_CPM_MODE)
340 word = mpc8xxx_spi->get_tx(mpc8xxx_spi); 532 ret = mpc8xxx_spi_cpm_bufs(mpc8xxx_spi, t, is_dma_mapped);
341 mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->transmit, word); 533 else
534 ret = mpc8xxx_spi_cpu_bufs(mpc8xxx_spi, t, len);
535 if (ret)
536 return ret;
342 537
343 wait_for_completion(&mpc8xxx_spi->done); 538 wait_for_completion(&mpc8xxx_spi->done);
344 539
345 /* disable rx ints */ 540 /* disable rx ints */
346 mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mask, 0); 541 mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mask, 0);
347 542
543 if (mpc8xxx_spi->flags & SPI_CPM_MODE)
544 mpc8xxx_spi_cpm_bufs_complete(mpc8xxx_spi);
545
348 return mpc8xxx_spi->count; 546 return mpc8xxx_spi->count;
349} 547}
350 548
@@ -375,7 +573,7 @@ static void mpc8xxx_spi_do_one_msg(struct spi_message *m)
375 } 573 }
376 cs_change = t->cs_change; 574 cs_change = t->cs_change;
377 if (t->len) 575 if (t->len)
378 status = mpc8xxx_spi_bufs(spi, t); 576 status = mpc8xxx_spi_bufs(spi, t, m->is_dma_mapped);
379 if (status) { 577 if (status) {
380 status = -EMSGSIZE; 578 status = -EMSGSIZE;
381 break; 579 break;
@@ -464,45 +662,80 @@ static int mpc8xxx_spi_setup(struct spi_device *spi)
464 return 0; 662 return 0;
465} 663}
466 664
467static irqreturn_t mpc8xxx_spi_irq(s32 irq, void *context_data) 665static void mpc8xxx_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events)
468{ 666{
469 struct mpc8xxx_spi *mpc8xxx_spi = context_data; 667 u16 len;
470 u32 event;
471 irqreturn_t ret = IRQ_NONE;
472 668
473 /* Get interrupt events(tx/rx) */ 669 dev_dbg(mspi->dev, "%s: bd datlen %d, count %d\n", __func__,
474 event = mpc8xxx_spi_read_reg(&mpc8xxx_spi->base->event); 670 in_be16(&mspi->rx_bd->cbd_datlen), mspi->count);
475 671
476 /* We need handle RX first */ 672 len = in_be16(&mspi->rx_bd->cbd_datlen);
477 if (event & SPIE_NE) { 673 if (len > mspi->count) {
478 u32 rx_data = mpc8xxx_spi_read_reg(&mpc8xxx_spi->base->receive); 674 WARN_ON(1);
675 len = mspi->count;
676 }
479 677
480 if (mpc8xxx_spi->rx) 678 /* Clear the events */
481 mpc8xxx_spi->get_rx(rx_data, mpc8xxx_spi); 679 mpc8xxx_spi_write_reg(&mspi->base->event, events);
482 680
483 ret = IRQ_HANDLED; 681 mspi->count -= len;
682 if (mspi->count)
683 mpc8xxx_spi_cpm_bufs_start(mspi);
684 else
685 complete(&mspi->done);
686}
687
688static void mpc8xxx_spi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events)
689{
690 /* We need handle RX first */
691 if (events & SPIE_NE) {
692 u32 rx_data = mpc8xxx_spi_read_reg(&mspi->base->receive);
693
694 if (mspi->rx)
695 mspi->get_rx(rx_data, mspi);
484 } 696 }
485 697
486 if ((event & SPIE_NF) == 0) 698 if ((events & SPIE_NF) == 0)
487 /* spin until TX is done */ 699 /* spin until TX is done */
488 while (((event = 700 while (((events =
489 mpc8xxx_spi_read_reg(&mpc8xxx_spi->base->event)) & 701 mpc8xxx_spi_read_reg(&mspi->base->event)) &
490 SPIE_NF) == 0) 702 SPIE_NF) == 0)
491 cpu_relax(); 703 cpu_relax();
492 704
493 mpc8xxx_spi->count -= 1; 705 /* Clear the events */
494 if (mpc8xxx_spi->count) { 706 mpc8xxx_spi_write_reg(&mspi->base->event, events);
495 u32 word = mpc8xxx_spi->get_tx(mpc8xxx_spi); 707
496 mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->transmit, word); 708 mspi->count -= 1;
709 if (mspi->count) {
710 u32 word = mspi->get_tx(mspi);
711
712 mpc8xxx_spi_write_reg(&mspi->base->transmit, word);
497 } else { 713 } else {
498 complete(&mpc8xxx_spi->done); 714 complete(&mspi->done);
499 } 715 }
716}
500 717
501 /* Clear the events */ 718static irqreturn_t mpc8xxx_spi_irq(s32 irq, void *context_data)
502 mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->event, event); 719{
720 struct mpc8xxx_spi *mspi = context_data;
721 irqreturn_t ret = IRQ_NONE;
722 u32 events;
723
724 /* Get interrupt events(tx/rx) */
725 events = mpc8xxx_spi_read_reg(&mspi->base->event);
726 if (events)
727 ret = IRQ_HANDLED;
728
729 dev_dbg(mspi->dev, "%s: events %x\n", __func__, events);
730
731 if (mspi->flags & SPI_CPM_MODE)
732 mpc8xxx_spi_cpm_irq(mspi, events);
733 else
734 mpc8xxx_spi_cpu_irq(mspi, events);
503 735
504 return ret; 736 return ret;
505} 737}
738
506static int mpc8xxx_spi_transfer(struct spi_device *spi, 739static int mpc8xxx_spi_transfer(struct spi_device *spi,
507 struct spi_message *m) 740 struct spi_message *m)
508{ 741{
@@ -526,6 +759,215 @@ static void mpc8xxx_spi_cleanup(struct spi_device *spi)
526 kfree(spi->controller_state); 759 kfree(spi->controller_state);
527} 760}
528 761
762static void *mpc8xxx_spi_alloc_dummy_rx(void)
763{
764 mutex_lock(&mpc8xxx_dummy_rx_lock);
765
766 if (!mpc8xxx_dummy_rx)
767 mpc8xxx_dummy_rx = kmalloc(SPI_MRBLR, GFP_KERNEL);
768 if (mpc8xxx_dummy_rx)
769 mpc8xxx_dummy_rx_refcnt++;
770
771 mutex_unlock(&mpc8xxx_dummy_rx_lock);
772
773 return mpc8xxx_dummy_rx;
774}
775
776static void mpc8xxx_spi_free_dummy_rx(void)
777{
778 mutex_lock(&mpc8xxx_dummy_rx_lock);
779
780 switch (mpc8xxx_dummy_rx_refcnt) {
781 case 0:
782 WARN_ON(1);
783 break;
784 case 1:
785 kfree(mpc8xxx_dummy_rx);
786 mpc8xxx_dummy_rx = NULL;
787 /* fall through */
788 default:
789 mpc8xxx_dummy_rx_refcnt--;
790 break;
791 }
792
793 mutex_unlock(&mpc8xxx_dummy_rx_lock);
794}
795
796static unsigned long mpc8xxx_spi_cpm_get_pram(struct mpc8xxx_spi *mspi)
797{
798 struct device *dev = mspi->dev;
799 struct device_node *np = dev_archdata_get_node(&dev->archdata);
800 const u32 *iprop;
801 int size;
802 unsigned long spi_base_ofs;
803 unsigned long pram_ofs = -ENOMEM;
804
805 /* Can't use of_address_to_resource(), QE muram isn't at 0. */
806 iprop = of_get_property(np, "reg", &size);
807
808 /* QE with a fixed pram location? */
809 if (mspi->flags & SPI_QE && iprop && size == sizeof(*iprop) * 4)
810 return cpm_muram_alloc_fixed(iprop[2], SPI_PRAM_SIZE);
811
812 /* QE but with a dynamic pram location? */
813 if (mspi->flags & SPI_QE) {
814 pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64);
815 qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, mspi->subblock,
816 QE_CR_PROTOCOL_UNSPECIFIED, pram_ofs);
817 return pram_ofs;
818 }
819
820 /* CPM1 and CPM2 pram must be at a fixed addr. */
821 if (!iprop || size != sizeof(*iprop) * 4)
822 return -ENOMEM;
823
824 spi_base_ofs = cpm_muram_alloc_fixed(iprop[2], 2);
825 if (IS_ERR_VALUE(spi_base_ofs))
826 return -ENOMEM;
827
828 if (mspi->flags & SPI_CPM2) {
829 pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64);
830 if (!IS_ERR_VALUE(pram_ofs)) {
831 u16 __iomem *spi_base = cpm_muram_addr(spi_base_ofs);
832
833 out_be16(spi_base, pram_ofs);
834 }
835 } else {
836 struct spi_pram __iomem *pram = cpm_muram_addr(spi_base_ofs);
837 u16 rpbase = in_be16(&pram->rpbase);
838
839 /* Microcode relocation patch applied? */
840 if (rpbase)
841 pram_ofs = rpbase;
842 else
843 return spi_base_ofs;
844 }
845
846 cpm_muram_free(spi_base_ofs);
847 return pram_ofs;
848}
849
850static int mpc8xxx_spi_cpm_init(struct mpc8xxx_spi *mspi)
851{
852 struct device *dev = mspi->dev;
853 struct device_node *np = dev_archdata_get_node(&dev->archdata);
854 const u32 *iprop;
855 int size;
856 unsigned long pram_ofs;
857 unsigned long bds_ofs;
858
859 if (!(mspi->flags & SPI_CPM_MODE))
860 return 0;
861
862 if (!mpc8xxx_spi_alloc_dummy_rx())
863 return -ENOMEM;
864
865 if (mspi->flags & SPI_QE) {
866 iprop = of_get_property(np, "cell-index", &size);
867 if (iprop && size == sizeof(*iprop))
868 mspi->subblock = *iprop;
869
870 switch (mspi->subblock) {
871 default:
872 dev_warn(dev, "cell-index unspecified, assuming SPI1");
873 /* fall through */
874 case 0:
875 mspi->subblock = QE_CR_SUBBLOCK_SPI1;
876 break;
877 case 1:
878 mspi->subblock = QE_CR_SUBBLOCK_SPI2;
879 break;
880 }
881 }
882
883 pram_ofs = mpc8xxx_spi_cpm_get_pram(mspi);
884 if (IS_ERR_VALUE(pram_ofs)) {
885 dev_err(dev, "can't allocate spi parameter ram\n");
886 goto err_pram;
887 }
888
889 bds_ofs = cpm_muram_alloc(sizeof(*mspi->tx_bd) +
890 sizeof(*mspi->rx_bd), 8);
891 if (IS_ERR_VALUE(bds_ofs)) {
892 dev_err(dev, "can't allocate bds\n");
893 goto err_bds;
894 }
895
896 mspi->dma_dummy_tx = dma_map_single(dev, empty_zero_page, PAGE_SIZE,
897 DMA_TO_DEVICE);
898 if (dma_mapping_error(dev, mspi->dma_dummy_tx)) {
899 dev_err(dev, "unable to map dummy tx buffer\n");
900 goto err_dummy_tx;
901 }
902
903 mspi->dma_dummy_rx = dma_map_single(dev, mpc8xxx_dummy_rx, SPI_MRBLR,
904 DMA_FROM_DEVICE);
905 if (dma_mapping_error(dev, mspi->dma_dummy_rx)) {
906 dev_err(dev, "unable to map dummy rx buffer\n");
907 goto err_dummy_rx;
908 }
909
910 mspi->pram = cpm_muram_addr(pram_ofs);
911
912 mspi->tx_bd = cpm_muram_addr(bds_ofs);
913 mspi->rx_bd = cpm_muram_addr(bds_ofs + sizeof(*mspi->tx_bd));
914
915 /* Initialize parameter ram. */
916 out_be16(&mspi->pram->tbase, cpm_muram_offset(mspi->tx_bd));
917 out_be16(&mspi->pram->rbase, cpm_muram_offset(mspi->rx_bd));
918 out_8(&mspi->pram->tfcr, CPMFCR_EB | CPMFCR_GBL);
919 out_8(&mspi->pram->rfcr, CPMFCR_EB | CPMFCR_GBL);
920 out_be16(&mspi->pram->mrblr, SPI_MRBLR);
921 out_be32(&mspi->pram->rstate, 0);
922 out_be32(&mspi->pram->rdp, 0);
923 out_be16(&mspi->pram->rbptr, 0);
924 out_be16(&mspi->pram->rbc, 0);
925 out_be32(&mspi->pram->rxtmp, 0);
926 out_be32(&mspi->pram->tstate, 0);
927 out_be32(&mspi->pram->tdp, 0);
928 out_be16(&mspi->pram->tbptr, 0);
929 out_be16(&mspi->pram->tbc, 0);
930 out_be32(&mspi->pram->txtmp, 0);
931
932 return 0;
933
934err_dummy_rx:
935 dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE);
936err_dummy_tx:
937 cpm_muram_free(bds_ofs);
938err_bds:
939 cpm_muram_free(pram_ofs);
940err_pram:
941 mpc8xxx_spi_free_dummy_rx();
942 return -ENOMEM;
943}
944
945static void mpc8xxx_spi_cpm_free(struct mpc8xxx_spi *mspi)
946{
947 struct device *dev = mspi->dev;
948
949 dma_unmap_single(dev, mspi->dma_dummy_rx, SPI_MRBLR, DMA_FROM_DEVICE);
950 dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE);
951 cpm_muram_free(cpm_muram_offset(mspi->tx_bd));
952 cpm_muram_free(cpm_muram_offset(mspi->pram));
953 mpc8xxx_spi_free_dummy_rx();
954}
955
956static const char *mpc8xxx_spi_strmode(unsigned int flags)
957{
958 if (flags & SPI_QE_CPU_MODE) {
959 return "QE CPU";
960 } else if (flags & SPI_CPM_MODE) {
961 if (flags & SPI_QE)
962 return "QE";
963 else if (flags & SPI_CPM2)
964 return "CPM2";
965 else
966 return "CPM1";
967 }
968 return "CPU";
969}
970
529static struct spi_master * __devinit 971static struct spi_master * __devinit
530mpc8xxx_spi_probe(struct device *dev, struct resource *mem, unsigned int irq) 972mpc8xxx_spi_probe(struct device *dev, struct resource *mem, unsigned int irq)
531{ 973{
@@ -552,14 +994,19 @@ mpc8xxx_spi_probe(struct device *dev, struct resource *mem, unsigned int irq)
552 master->cleanup = mpc8xxx_spi_cleanup; 994 master->cleanup = mpc8xxx_spi_cleanup;
553 995
554 mpc8xxx_spi = spi_master_get_devdata(master); 996 mpc8xxx_spi = spi_master_get_devdata(master);
555 mpc8xxx_spi->qe_mode = pdata->qe_mode; 997 mpc8xxx_spi->dev = dev;
556 mpc8xxx_spi->get_rx = mpc8xxx_spi_rx_buf_u8; 998 mpc8xxx_spi->get_rx = mpc8xxx_spi_rx_buf_u8;
557 mpc8xxx_spi->get_tx = mpc8xxx_spi_tx_buf_u8; 999 mpc8xxx_spi->get_tx = mpc8xxx_spi_tx_buf_u8;
1000 mpc8xxx_spi->flags = pdata->flags;
558 mpc8xxx_spi->spibrg = pdata->sysclk; 1001 mpc8xxx_spi->spibrg = pdata->sysclk;
559 1002
1003 ret = mpc8xxx_spi_cpm_init(mpc8xxx_spi);
1004 if (ret)
1005 goto err_cpm_init;
1006
560 mpc8xxx_spi->rx_shift = 0; 1007 mpc8xxx_spi->rx_shift = 0;
561 mpc8xxx_spi->tx_shift = 0; 1008 mpc8xxx_spi->tx_shift = 0;
562 if (mpc8xxx_spi->qe_mode) { 1009 if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) {
563 mpc8xxx_spi->rx_shift = 16; 1010 mpc8xxx_spi->rx_shift = 16;
564 mpc8xxx_spi->tx_shift = 24; 1011 mpc8xxx_spi->tx_shift = 24;
565 } 1012 }
@@ -569,7 +1016,7 @@ mpc8xxx_spi_probe(struct device *dev, struct resource *mem, unsigned int irq)
569 mpc8xxx_spi->base = ioremap(mem->start, mem->end - mem->start + 1); 1016 mpc8xxx_spi->base = ioremap(mem->start, mem->end - mem->start + 1);
570 if (mpc8xxx_spi->base == NULL) { 1017 if (mpc8xxx_spi->base == NULL) {
571 ret = -ENOMEM; 1018 ret = -ENOMEM;
572 goto put_master; 1019 goto err_ioremap;
573 } 1020 }
574 1021
575 mpc8xxx_spi->irq = irq; 1022 mpc8xxx_spi->irq = irq;
@@ -592,7 +1039,7 @@ mpc8xxx_spi_probe(struct device *dev, struct resource *mem, unsigned int irq)
592 1039
593 /* Enable SPI interface */ 1040 /* Enable SPI interface */
594 regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE; 1041 regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE;
595 if (pdata->qe_mode) 1042 if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE)
596 regval |= SPMODE_OP; 1043 regval |= SPMODE_OP;
597 1044
598 mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mode, regval); 1045 mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mode, regval);
@@ -612,9 +1059,8 @@ mpc8xxx_spi_probe(struct device *dev, struct resource *mem, unsigned int irq)
612 if (ret < 0) 1059 if (ret < 0)
613 goto unreg_master; 1060 goto unreg_master;
614 1061
615 printk(KERN_INFO 1062 dev_info(dev, "at 0x%p (irq = %d), %s mode\n", mpc8xxx_spi->base,
616 "%s: MPC8xxx SPI Controller driver at 0x%p (irq = %d)\n", 1063 mpc8xxx_spi->irq, mpc8xxx_spi_strmode(mpc8xxx_spi->flags));
617 dev_name(dev), mpc8xxx_spi->base, mpc8xxx_spi->irq);
618 1064
619 return master; 1065 return master;
620 1066
@@ -624,7 +1070,9 @@ free_irq:
624 free_irq(mpc8xxx_spi->irq, mpc8xxx_spi); 1070 free_irq(mpc8xxx_spi->irq, mpc8xxx_spi);
625unmap_io: 1071unmap_io:
626 iounmap(mpc8xxx_spi->base); 1072 iounmap(mpc8xxx_spi->base);
627put_master: 1073err_ioremap:
1074 mpc8xxx_spi_cpm_free(mpc8xxx_spi);
1075err_cpm_init:
628 spi_master_put(master); 1076 spi_master_put(master);
629err: 1077err:
630 return ERR_PTR(ret); 1078 return ERR_PTR(ret);
@@ -644,6 +1092,7 @@ static int __devexit mpc8xxx_spi_remove(struct device *dev)
644 1092
645 free_irq(mpc8xxx_spi->irq, mpc8xxx_spi); 1093 free_irq(mpc8xxx_spi->irq, mpc8xxx_spi);
646 iounmap(mpc8xxx_spi->base); 1094 iounmap(mpc8xxx_spi->base);
1095 mpc8xxx_spi_cpm_free(mpc8xxx_spi);
647 1096
648 return 0; 1097 return 0;
649} 1098}
@@ -709,6 +1158,7 @@ static int of_mpc8xxx_spi_get_chipselects(struct device *dev)
709 gpio = of_get_gpio_flags(np, i, &flags); 1158 gpio = of_get_gpio_flags(np, i, &flags);
710 if (!gpio_is_valid(gpio)) { 1159 if (!gpio_is_valid(gpio)) {
711 dev_err(dev, "invalid gpio #%d: %d\n", i, gpio); 1160 dev_err(dev, "invalid gpio #%d: %d\n", i, gpio);
1161 ret = gpio;
712 goto err_loop; 1162 goto err_loop;
713 } 1163 }
714 1164
@@ -804,7 +1254,13 @@ static int __devinit of_mpc8xxx_spi_probe(struct of_device *ofdev,
804 1254
805 prop = of_get_property(np, "mode", NULL); 1255 prop = of_get_property(np, "mode", NULL);
806 if (prop && !strcmp(prop, "cpu-qe")) 1256 if (prop && !strcmp(prop, "cpu-qe"))
807 pdata->qe_mode = 1; 1257 pdata->flags = SPI_QE_CPU_MODE;
1258 else if (prop && !strcmp(prop, "qe"))
1259 pdata->flags = SPI_CPM_MODE | SPI_QE;
1260 else if (of_device_is_compatible(np, "fsl,cpm2-spi"))
1261 pdata->flags = SPI_CPM_MODE | SPI_CPM2;
1262 else if (of_device_is_compatible(np, "fsl,cpm1-spi"))
1263 pdata->flags = SPI_CPM_MODE | SPI_CPM1;
808 1264
809 ret = of_mpc8xxx_spi_get_chipselects(dev); 1265 ret = of_mpc8xxx_spi_get_chipselects(dev);
810 if (ret) 1266 if (ret)
diff --git a/drivers/spi/xilinx_spi.c b/drivers/spi/xilinx_spi.c
index 46b8c5c2f45e..5a143b9f6361 100644
--- a/drivers/spi/xilinx_spi.c
+++ b/drivers/spi/xilinx_spi.c
@@ -148,7 +148,8 @@ static int xilinx_spi_setup_transfer(struct spi_device *spi,
148{ 148{
149 u8 bits_per_word; 149 u8 bits_per_word;
150 150
151 bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word; 151 bits_per_word = (t && t->bits_per_word)
152 ? t->bits_per_word : spi->bits_per_word;
152 if (bits_per_word != 8) { 153 if (bits_per_word != 8) {
153 dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n", 154 dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n",
154 __func__, bits_per_word); 155 __func__, bits_per_word);
diff --git a/drivers/usb/gadget/fsl_qe_udc.h b/drivers/usb/gadget/fsl_qe_udc.h
index 31b2710882e4..bea5b827bebe 100644
--- a/drivers/usb/gadget/fsl_qe_udc.h
+++ b/drivers/usb/gadget/fsl_qe_udc.h
@@ -419,19 +419,4 @@ struct qe_udc {
419#define CPM_USB_RESTART_TX_OPCODE 0x0b 419#define CPM_USB_RESTART_TX_OPCODE 0x0b
420#define CPM_USB_EP_SHIFT 5 420#define CPM_USB_EP_SHIFT 5
421 421
422#ifndef CONFIG_CPM
423inline int cpm_command(u32 command, u8 opcode)
424{
425 return -EOPNOTSUPP;
426}
427#endif
428
429#ifndef CONFIG_QUICC_ENGINE
430inline int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol,
431 u32 cmd_input)
432{
433 return -EOPNOTSUPP;
434}
435#endif
436
437#endif /* __FSL_QE_UDC_H */ 422#endif /* __FSL_QE_UDC_H */
diff --git a/drivers/video/offb.c b/drivers/video/offb.c
index 4d8c54c23dd7..b043ac83c412 100644
--- a/drivers/video/offb.c
+++ b/drivers/video/offb.c
@@ -282,8 +282,17 @@ static int offb_set_par(struct fb_info *info)
282 return 0; 282 return 0;
283} 283}
284 284
285static void offb_destroy(struct fb_info *info)
286{
287 if (info->screen_base)
288 iounmap(info->screen_base);
289 release_mem_region(info->aperture_base, info->aperture_size);
290 framebuffer_release(info);
291}
292
285static struct fb_ops offb_ops = { 293static struct fb_ops offb_ops = {
286 .owner = THIS_MODULE, 294 .owner = THIS_MODULE,
295 .fb_destroy = offb_destroy,
287 .fb_setcolreg = offb_setcolreg, 296 .fb_setcolreg = offb_setcolreg,
288 .fb_set_par = offb_set_par, 297 .fb_set_par = offb_set_par,
289 .fb_blank = offb_blank, 298 .fb_blank = offb_blank,
@@ -482,10 +491,14 @@ static void __init offb_init_fb(const char *name, const char *full_name,
482 var->sync = 0; 491 var->sync = 0;
483 var->vmode = FB_VMODE_NONINTERLACED; 492 var->vmode = FB_VMODE_NONINTERLACED;
484 493
494 /* set offb aperture size for generic probing */
495 info->aperture_base = address;
496 info->aperture_size = fix->smem_len;
497
485 info->fbops = &offb_ops; 498 info->fbops = &offb_ops;
486 info->screen_base = ioremap(address, fix->smem_len); 499 info->screen_base = ioremap(address, fix->smem_len);
487 info->pseudo_palette = (void *) (info + 1); 500 info->pseudo_palette = (void *) (info + 1);
488 info->flags = FBINFO_DEFAULT | foreign_endian; 501 info->flags = FBINFO_DEFAULT | FBINFO_MISC_FIRMWARE | foreign_endian;
489 502
490 fb_alloc_cmap(&info->cmap, 256, 0); 503 fb_alloc_cmap(&info->cmap, 256, 0);
491 504
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 3711b888d482..d958b76430a2 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -861,8 +861,10 @@ config GEF_WDT
861 Watchdog timer found in a number of GE Fanuc single board computers. 861 Watchdog timer found in a number of GE Fanuc single board computers.
862 862
863config MPC5200_WDT 863config MPC5200_WDT
864 tristate "MPC5200 Watchdog Timer" 864 bool "MPC52xx Watchdog Timer"
865 depends on PPC_MPC52xx 865 depends on PPC_MPC52xx
866 help
867 Use General Purpose Timer (GPT) 0 on the MPC5200 as Watchdog.
866 868
867config 8xxx_WDT 869config 8xxx_WDT
868 tristate "MPC8xxx Platform Watchdog Timer" 870 tristate "MPC8xxx Platform Watchdog Timer"
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 699199b1baa6..89c045dc468e 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -118,7 +118,6 @@ obj-$(CONFIG_TXX9_WDT) += txx9wdt.o
118 118
119# POWERPC Architecture 119# POWERPC Architecture
120obj-$(CONFIG_GEF_WDT) += gef_wdt.o 120obj-$(CONFIG_GEF_WDT) += gef_wdt.o
121obj-$(CONFIG_MPC5200_WDT) += mpc5200_wdt.o
122obj-$(CONFIG_8xxx_WDT) += mpc8xxx_wdt.o 121obj-$(CONFIG_8xxx_WDT) += mpc8xxx_wdt.o
123obj-$(CONFIG_MV64X60_WDT) += mv64x60_wdt.o 122obj-$(CONFIG_MV64X60_WDT) += mv64x60_wdt.o
124obj-$(CONFIG_PIKA_WDT) += pika_wdt.o 123obj-$(CONFIG_PIKA_WDT) += pika_wdt.o
diff --git a/drivers/watchdog/mpc5200_wdt.c b/drivers/watchdog/mpc5200_wdt.c
deleted file mode 100644
index fa9c47ce0ae7..000000000000
--- a/drivers/watchdog/mpc5200_wdt.c
+++ /dev/null
@@ -1,293 +0,0 @@
1#include <linux/init.h>
2#include <linux/module.h>
3#include <linux/miscdevice.h>
4#include <linux/watchdog.h>
5#include <linux/io.h>
6#include <linux/spinlock.h>
7#include <linux/of_platform.h>
8#include <linux/uaccess.h>
9#include <asm/mpc52xx.h>
10
11
12#define GPT_MODE_WDT (1 << 15)
13#define GPT_MODE_CE (1 << 12)
14#define GPT_MODE_MS_TIMER (0x4)
15
16
17struct mpc5200_wdt {
18 unsigned count; /* timer ticks before watchdog kicks in */
19 long ipb_freq;
20 struct miscdevice miscdev;
21 struct resource mem;
22 struct mpc52xx_gpt __iomem *regs;
23 spinlock_t io_lock;
24};
25
26/* is_active stores wether or not the /dev/watchdog device is opened */
27static unsigned long is_active;
28
29/* misc devices don't provide a way, to get back to 'dev' or 'miscdev' from
30 * file operations, which sucks. But there can be max 1 watchdog anyway, so...
31 */
32static struct mpc5200_wdt *wdt_global;
33
34
35/* helper to calculate timeout in timer counts */
36static void mpc5200_wdt_set_timeout(struct mpc5200_wdt *wdt, int timeout)
37{
38 /* use biggest prescaler of 64k */
39 wdt->count = (wdt->ipb_freq + 0xffff) / 0x10000 * timeout;
40
41 if (wdt->count > 0xffff)
42 wdt->count = 0xffff;
43}
44/* return timeout in seconds (calculated from timer count) */
45static int mpc5200_wdt_get_timeout(struct mpc5200_wdt *wdt)
46{
47 return wdt->count * 0x10000 / wdt->ipb_freq;
48}
49
50
51/* watchdog operations */
52static int mpc5200_wdt_start(struct mpc5200_wdt *wdt)
53{
54 spin_lock(&wdt->io_lock);
55 /* disable */
56 out_be32(&wdt->regs->mode, 0);
57 /* set timeout, with maximum prescaler */
58 out_be32(&wdt->regs->count, 0x0 | wdt->count);
59 /* enable watchdog */
60 out_be32(&wdt->regs->mode, GPT_MODE_CE | GPT_MODE_WDT |
61 GPT_MODE_MS_TIMER);
62 spin_unlock(&wdt->io_lock);
63
64 return 0;
65}
66static int mpc5200_wdt_ping(struct mpc5200_wdt *wdt)
67{
68 spin_lock(&wdt->io_lock);
69 /* writing A5 to OCPW resets the watchdog */
70 out_be32(&wdt->regs->mode, 0xA5000000 |
71 (0xffffff & in_be32(&wdt->regs->mode)));
72 spin_unlock(&wdt->io_lock);
73 return 0;
74}
75static int mpc5200_wdt_stop(struct mpc5200_wdt *wdt)
76{
77 spin_lock(&wdt->io_lock);
78 /* disable */
79 out_be32(&wdt->regs->mode, 0);
80 spin_unlock(&wdt->io_lock);
81 return 0;
82}
83
84
85/* file operations */
86static ssize_t mpc5200_wdt_write(struct file *file, const char __user *data,
87 size_t len, loff_t *ppos)
88{
89 struct mpc5200_wdt *wdt = file->private_data;
90 mpc5200_wdt_ping(wdt);
91 return 0;
92}
93static struct watchdog_info mpc5200_wdt_info = {
94 .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
95 .identity = "mpc5200 watchdog on GPT0",
96};
97static long mpc5200_wdt_ioctl(struct file *file, unsigned int cmd,
98 unsigned long arg)
99{
100 struct mpc5200_wdt *wdt = file->private_data;
101 int __user *data = (int __user *)arg;
102 int timeout;
103 int ret = 0;
104
105 switch (cmd) {
106 case WDIOC_GETSUPPORT:
107 ret = copy_to_user(data, &mpc5200_wdt_info,
108 sizeof(mpc5200_wdt_info));
109 if (ret)
110 ret = -EFAULT;
111 break;
112
113 case WDIOC_GETSTATUS:
114 case WDIOC_GETBOOTSTATUS:
115 ret = put_user(0, data);
116 break;
117
118 case WDIOC_KEEPALIVE:
119 mpc5200_wdt_ping(wdt);
120 break;
121
122 case WDIOC_SETTIMEOUT:
123 ret = get_user(timeout, data);
124 if (ret)
125 break;
126 mpc5200_wdt_set_timeout(wdt, timeout);
127 mpc5200_wdt_start(wdt);
128 /* fall through and return the timeout */
129
130 case WDIOC_GETTIMEOUT:
131 timeout = mpc5200_wdt_get_timeout(wdt);
132 ret = put_user(timeout, data);
133 break;
134
135 default:
136 ret = -ENOTTY;
137 }
138 return ret;
139}
140
141static int mpc5200_wdt_open(struct inode *inode, struct file *file)
142{
143 /* /dev/watchdog can only be opened once */
144 if (test_and_set_bit(0, &is_active))
145 return -EBUSY;
146
147 /* Set and activate the watchdog */
148 mpc5200_wdt_set_timeout(wdt_global, 30);
149 mpc5200_wdt_start(wdt_global);
150 file->private_data = wdt_global;
151 return nonseekable_open(inode, file);
152}
153static int mpc5200_wdt_release(struct inode *inode, struct file *file)
154{
155#if WATCHDOG_NOWAYOUT == 0
156 struct mpc5200_wdt *wdt = file->private_data;
157 mpc5200_wdt_stop(wdt);
158 wdt->count = 0; /* == disabled */
159#endif
160 clear_bit(0, &is_active);
161 return 0;
162}
163
164static const struct file_operations mpc5200_wdt_fops = {
165 .owner = THIS_MODULE,
166 .write = mpc5200_wdt_write,
167 .unlocked_ioctl = mpc5200_wdt_ioctl,
168 .open = mpc5200_wdt_open,
169 .release = mpc5200_wdt_release,
170};
171
172/* module operations */
173static int mpc5200_wdt_probe(struct of_device *op,
174 const struct of_device_id *match)
175{
176 struct mpc5200_wdt *wdt;
177 int err;
178 const void *has_wdt;
179 int size;
180
181 has_wdt = of_get_property(op->node, "has-wdt", NULL);
182 if (!has_wdt)
183 has_wdt = of_get_property(op->node, "fsl,has-wdt", NULL);
184 if (!has_wdt)
185 return -ENODEV;
186
187 wdt = kzalloc(sizeof(*wdt), GFP_KERNEL);
188 if (!wdt)
189 return -ENOMEM;
190
191 wdt->ipb_freq = mpc5xxx_get_bus_frequency(op->node);
192
193 err = of_address_to_resource(op->node, 0, &wdt->mem);
194 if (err)
195 goto out_free;
196 size = wdt->mem.end - wdt->mem.start + 1;
197 if (!request_mem_region(wdt->mem.start, size, "mpc5200_wdt")) {
198 err = -ENODEV;
199 goto out_free;
200 }
201 wdt->regs = ioremap(wdt->mem.start, size);
202 if (!wdt->regs) {
203 err = -ENODEV;
204 goto out_release;
205 }
206
207 dev_set_drvdata(&op->dev, wdt);
208 spin_lock_init(&wdt->io_lock);
209
210 wdt->miscdev = (struct miscdevice) {
211 .minor = WATCHDOG_MINOR,
212 .name = "watchdog",
213 .fops = &mpc5200_wdt_fops,
214 .parent = &op->dev,
215 };
216 wdt_global = wdt;
217 err = misc_register(&wdt->miscdev);
218 if (!err)
219 return 0;
220
221 iounmap(wdt->regs);
222out_release:
223 release_mem_region(wdt->mem.start, size);
224out_free:
225 kfree(wdt);
226 return err;
227}
228
229static int mpc5200_wdt_remove(struct of_device *op)
230{
231 struct mpc5200_wdt *wdt = dev_get_drvdata(&op->dev);
232
233 mpc5200_wdt_stop(wdt);
234 misc_deregister(&wdt->miscdev);
235 iounmap(wdt->regs);
236 release_mem_region(wdt->mem.start, wdt->mem.end - wdt->mem.start + 1);
237 kfree(wdt);
238
239 return 0;
240}
241static int mpc5200_wdt_suspend(struct of_device *op, pm_message_t state)
242{
243 struct mpc5200_wdt *wdt = dev_get_drvdata(&op->dev);
244 mpc5200_wdt_stop(wdt);
245 return 0;
246}
247static int mpc5200_wdt_resume(struct of_device *op)
248{
249 struct mpc5200_wdt *wdt = dev_get_drvdata(&op->dev);
250 if (wdt->count)
251 mpc5200_wdt_start(wdt);
252 return 0;
253}
254static int mpc5200_wdt_shutdown(struct of_device *op)
255{
256 struct mpc5200_wdt *wdt = dev_get_drvdata(&op->dev);
257 mpc5200_wdt_stop(wdt);
258 return 0;
259}
260
261static struct of_device_id mpc5200_wdt_match[] = {
262 { .compatible = "mpc5200-gpt", },
263 { .compatible = "fsl,mpc5200-gpt", },
264 {},
265};
266static struct of_platform_driver mpc5200_wdt_driver = {
267 .owner = THIS_MODULE,
268 .name = "mpc5200-gpt-wdt",
269 .match_table = mpc5200_wdt_match,
270 .probe = mpc5200_wdt_probe,
271 .remove = mpc5200_wdt_remove,
272 .suspend = mpc5200_wdt_suspend,
273 .resume = mpc5200_wdt_resume,
274 .shutdown = mpc5200_wdt_shutdown,
275};
276
277
278static int __init mpc5200_wdt_init(void)
279{
280 return of_register_platform_driver(&mpc5200_wdt_driver);
281}
282
283static void __exit mpc5200_wdt_exit(void)
284{
285 of_unregister_platform_driver(&mpc5200_wdt_driver);
286}
287
288module_init(mpc5200_wdt_init);
289module_exit(mpc5200_wdt_exit);
290
291MODULE_AUTHOR("Domen Puncer <domen.puncer@telargo.com>");
292MODULE_LICENSE("Dual BSD/GPL");
293MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);