aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/sysdev
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/sysdev')
-rw-r--r--arch/powerpc/sysdev/Makefile12
-rw-r--r--arch/powerpc/sysdev/axonram.c8
-rw-r--r--arch/powerpc/sysdev/commproc.c299
-rw-r--r--arch/powerpc/sysdev/commproc.h12
-rw-r--r--arch/powerpc/sysdev/cpm2_common.c178
-rw-r--r--arch/powerpc/sysdev/cpm2_pic.c13
-rw-r--r--arch/powerpc/sysdev/cpm_common.c205
-rw-r--r--arch/powerpc/sysdev/dart_iommu.c4
-rw-r--r--arch/powerpc/sysdev/dcr.c6
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c7
-rw-r--r--arch/powerpc/sysdev/fsl_soc.c187
-rw-r--r--arch/powerpc/sysdev/fsl_soc.h8
-rw-r--r--arch/powerpc/sysdev/i8259.c8
-rw-r--r--arch/powerpc/sysdev/indirect_pci.c10
-rw-r--r--arch/powerpc/sysdev/ipic.c7
-rw-r--r--arch/powerpc/sysdev/ipic.h3
-rw-r--r--arch/powerpc/sysdev/mpc8xx_pic.c30
-rw-r--r--arch/powerpc/sysdev/mpic.c53
-rw-r--r--arch/powerpc/sysdev/mpic.h1
-rw-r--r--arch/powerpc/sysdev/mpic_msi.c13
-rw-r--r--arch/powerpc/sysdev/mpic_u3msi.c36
-rw-r--r--arch/powerpc/sysdev/mv64x60.h1
-rw-r--r--arch/powerpc/sysdev/mv64x60_pic.c12
-rw-r--r--arch/powerpc/sysdev/mv64x60_udbg.c152
-rw-r--r--arch/powerpc/sysdev/pmi.c6
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe.c36
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe_ic.c38
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe_ic.h3
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe_io.c38
-rw-r--r--arch/powerpc/sysdev/qe_lib/ucc.c270
-rw-r--r--arch/powerpc/sysdev/qe_lib/ucc_fast.c127
-rw-r--r--arch/powerpc/sysdev/qe_lib/ucc_slow.c48
-rw-r--r--arch/powerpc/sysdev/timer.c81
-rw-r--r--arch/powerpc/sysdev/tsi108_pci.c16
-rw-r--r--arch/powerpc/sysdev/uic.c75
-rw-r--r--arch/powerpc/sysdev/xilinx_intc.c151
36 files changed, 1527 insertions, 627 deletions
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index 08ce31e612c2..1a6f5641ebc8 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -6,7 +6,6 @@ mpic-msi-obj-$(CONFIG_PCI_MSI) += mpic_msi.o mpic_u3msi.o
6obj-$(CONFIG_MPIC) += mpic.o $(mpic-msi-obj-y) 6obj-$(CONFIG_MPIC) += mpic.o $(mpic-msi-obj-y)
7 7
8obj-$(CONFIG_PPC_MPC106) += grackle.o 8obj-$(CONFIG_PPC_MPC106) += grackle.o
9obj-$(CONFIG_PPC_DCR) += dcr.o
10obj-$(CONFIG_PPC_DCR_NATIVE) += dcr-low.o 9obj-$(CONFIG_PPC_DCR_NATIVE) += dcr-low.o
11obj-$(CONFIG_PPC_PMI) += pmi.o 10obj-$(CONFIG_PPC_PMI) += pmi.o
12obj-$(CONFIG_U3_DART) += dart_iommu.o 11obj-$(CONFIG_U3_DART) += dart_iommu.o
@@ -16,25 +15,24 @@ obj-$(CONFIG_FSL_PCI) += fsl_pci.o
16obj-$(CONFIG_TSI108_BRIDGE) += tsi108_pci.o tsi108_dev.o 15obj-$(CONFIG_TSI108_BRIDGE) += tsi108_pci.o tsi108_dev.o
17obj-$(CONFIG_QUICC_ENGINE) += qe_lib/ 16obj-$(CONFIG_QUICC_ENGINE) += qe_lib/
18mv64x60-$(CONFIG_PCI) += mv64x60_pci.o 17mv64x60-$(CONFIG_PCI) += mv64x60_pci.o
19obj-$(CONFIG_MV64X60) += $(mv64x60-y) mv64x60_pic.o mv64x60_dev.o 18obj-$(CONFIG_MV64X60) += $(mv64x60-y) mv64x60_pic.o mv64x60_dev.o \
19 mv64x60_udbg.o
20obj-$(CONFIG_RTC_DRV_CMOS) += rtc_cmos_setup.o 20obj-$(CONFIG_RTC_DRV_CMOS) += rtc_cmos_setup.o
21obj-$(CONFIG_AXON_RAM) += axonram.o 21obj-$(CONFIG_AXON_RAM) += axonram.o
22 22
23# contains only the suspend handler for time
24ifeq ($(CONFIG_RTC_CLASS),)
25obj-$(CONFIG_PM) += timer.o
26endif
27
28ifeq ($(CONFIG_PPC_MERGE),y) 23ifeq ($(CONFIG_PPC_MERGE),y)
29obj-$(CONFIG_PPC_INDIRECT_PCI) += indirect_pci.o 24obj-$(CONFIG_PPC_INDIRECT_PCI) += indirect_pci.o
30obj-$(CONFIG_PPC_I8259) += i8259.o 25obj-$(CONFIG_PPC_I8259) += i8259.o
31obj-$(CONFIG_PPC_83xx) += ipic.o 26obj-$(CONFIG_PPC_83xx) += ipic.o
32obj-$(CONFIG_4xx) += uic.o 27obj-$(CONFIG_4xx) += uic.o
28obj-$(CONFIG_XILINX_VIRTEX) += xilinx_intc.o
33endif 29endif
34 30
35# Temporary hack until we have migrated to asm-powerpc 31# Temporary hack until we have migrated to asm-powerpc
36ifeq ($(ARCH),powerpc) 32ifeq ($(ARCH),powerpc)
33obj-$(CONFIG_CPM) += cpm_common.o
37obj-$(CONFIG_CPM2) += cpm2_common.o cpm2_pic.o 34obj-$(CONFIG_CPM2) += cpm2_common.o cpm2_pic.o
35obj-$(CONFIG_PPC_DCR) += dcr.o
38obj-$(CONFIG_8xx) += mpc8xx_pic.o commproc.o 36obj-$(CONFIG_8xx) += mpc8xx_pic.o commproc.o
39obj-$(CONFIG_UCODE_PATCH) += micropatch.o 37obj-$(CONFIG_UCODE_PATCH) += micropatch.o
40endif 38endif
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
index ab037a3a40db..4d3ba63bba79 100644
--- a/arch/powerpc/sysdev/axonram.c
+++ b/arch/powerpc/sysdev/axonram.c
@@ -324,11 +324,13 @@ static struct of_device_id axon_ram_device_id[] = {
324}; 324};
325 325
326static struct of_platform_driver axon_ram_driver = { 326static struct of_platform_driver axon_ram_driver = {
327 .owner = THIS_MODULE,
328 .name = AXON_RAM_MODULE_NAME,
329 .match_table = axon_ram_device_id, 327 .match_table = axon_ram_device_id,
330 .probe = axon_ram_probe, 328 .probe = axon_ram_probe,
331 .remove = axon_ram_remove 329 .remove = axon_ram_remove,
330 .driver = {
331 .owner = THIS_MODULE,
332 .name = AXON_RAM_MODULE_NAME,
333 },
332}; 334};
333 335
334/** 336/**
diff --git a/arch/powerpc/sysdev/commproc.c b/arch/powerpc/sysdev/commproc.c
index dd5417aec1b4..f6a63780bbde 100644
--- a/arch/powerpc/sysdev/commproc.c
+++ b/arch/powerpc/sysdev/commproc.c
@@ -39,18 +39,21 @@
39#include <asm/tlbflush.h> 39#include <asm/tlbflush.h>
40#include <asm/rheap.h> 40#include <asm/rheap.h>
41#include <asm/prom.h> 41#include <asm/prom.h>
42#include <asm/cpm.h>
42 43
43#include <asm/fs_pd.h> 44#include <asm/fs_pd.h>
44 45
45#define CPM_MAP_SIZE (0x4000) 46#define CPM_MAP_SIZE (0x4000)
46 47
48#ifndef CONFIG_PPC_CPM_NEW_BINDING
47static void m8xx_cpm_dpinit(void); 49static void m8xx_cpm_dpinit(void);
48static uint host_buffer; /* One page of host buffer */ 50#endif
49static uint host_end; /* end + 1 */ 51static uint host_buffer; /* One page of host buffer */
50cpm8xx_t *cpmp; /* Pointer to comm processor space */ 52static uint host_end; /* end + 1 */
51cpic8xx_t *cpic_reg; 53cpm8xx_t __iomem *cpmp; /* Pointer to comm processor space */
54immap_t __iomem *mpc8xx_immr;
55static cpic8xx_t __iomem *cpic_reg;
52 56
53static struct device_node *cpm_pic_node;
54static struct irq_host *cpm_pic_host; 57static struct irq_host *cpm_pic_host;
55 58
56static void cpm_mask_irq(unsigned int irq) 59static void cpm_mask_irq(unsigned int irq)
@@ -95,11 +98,6 @@ int cpm_get_irq(void)
95 return irq_linear_revmap(cpm_pic_host, cpm_vec); 98 return irq_linear_revmap(cpm_pic_host, cpm_vec);
96} 99}
97 100
98static int cpm_pic_host_match(struct irq_host *h, struct device_node *node)
99{
100 return cpm_pic_node == node;
101}
102
103static int cpm_pic_host_map(struct irq_host *h, unsigned int virq, 101static int cpm_pic_host_map(struct irq_host *h, unsigned int virq,
104 irq_hw_number_t hw) 102 irq_hw_number_t hw)
105{ 103{
@@ -115,7 +113,7 @@ static int cpm_pic_host_map(struct irq_host *h, unsigned int virq,
115 * and return. This is a no-op function so we don't need any special 113 * and return. This is a no-op function so we don't need any special
116 * tests in the interrupt handler. 114 * tests in the interrupt handler.
117 */ 115 */
118static irqreturn_t cpm_error_interrupt(int irq, void *dev) 116static irqreturn_t cpm_error_interrupt(int irq, void *dev)
119{ 117{
120 return IRQ_HANDLED; 118 return IRQ_HANDLED;
121} 119}
@@ -127,7 +125,6 @@ static struct irqaction cpm_error_irqaction = {
127}; 125};
128 126
129static struct irq_host_ops cpm_pic_host_ops = { 127static struct irq_host_ops cpm_pic_host_ops = {
130 .match = cpm_pic_host_match,
131 .map = cpm_pic_host_map, 128 .map = cpm_pic_host_map,
132}; 129};
133 130
@@ -140,16 +137,19 @@ unsigned int cpm_pic_init(void)
140 137
141 pr_debug("cpm_pic_init\n"); 138 pr_debug("cpm_pic_init\n");
142 139
143 np = of_find_compatible_node(NULL, "cpm-pic", "CPM"); 140 np = of_find_compatible_node(NULL, NULL, "fsl,cpm1-pic");
141 if (np == NULL)
142 np = of_find_compatible_node(NULL, "cpm-pic", "CPM");
144 if (np == NULL) { 143 if (np == NULL) {
145 printk(KERN_ERR "CPM PIC init: can not find cpm-pic node\n"); 144 printk(KERN_ERR "CPM PIC init: can not find cpm-pic node\n");
146 return sirq; 145 return sirq;
147 } 146 }
147
148 ret = of_address_to_resource(np, 0, &res); 148 ret = of_address_to_resource(np, 0, &res);
149 if (ret) 149 if (ret)
150 goto end; 150 goto end;
151 151
152 cpic_reg = (void *)ioremap(res.start, res.end - res.start + 1); 152 cpic_reg = ioremap(res.start, res.end - res.start + 1);
153 if (cpic_reg == NULL) 153 if (cpic_reg == NULL)
154 goto end; 154 goto end;
155 155
@@ -165,23 +165,24 @@ unsigned int cpm_pic_init(void)
165 165
166 out_be32(&cpic_reg->cpic_cimr, 0); 166 out_be32(&cpic_reg->cpic_cimr, 0);
167 167
168 cpm_pic_node = of_node_get(np); 168 cpm_pic_host = irq_alloc_host(of_node_get(np), IRQ_HOST_MAP_LINEAR,
169 169 64, &cpm_pic_host_ops, 64);
170 cpm_pic_host = irq_alloc_host(IRQ_HOST_MAP_LINEAR, 64, &cpm_pic_host_ops, 64);
171 if (cpm_pic_host == NULL) { 170 if (cpm_pic_host == NULL) {
172 printk(KERN_ERR "CPM2 PIC: failed to allocate irq host!\n"); 171 printk(KERN_ERR "CPM2 PIC: failed to allocate irq host!\n");
173 sirq = NO_IRQ; 172 sirq = NO_IRQ;
174 goto end; 173 goto end;
175 } 174 }
176 of_node_put(np);
177 175
178 /* Install our own error handler. */ 176 /* Install our own error handler. */
179 np = of_find_node_by_type(NULL, "cpm"); 177 np = of_find_compatible_node(NULL, NULL, "fsl,cpm1");
178 if (np == NULL)
179 np = of_find_node_by_type(NULL, "cpm");
180 if (np == NULL) { 180 if (np == NULL) {
181 printk(KERN_ERR "CPM PIC init: can not find cpm node\n"); 181 printk(KERN_ERR "CPM PIC init: can not find cpm node\n");
182 goto end; 182 goto end;
183 } 183 }
184 eirq= irq_of_parse_and_map(np, 0); 184
185 eirq = irq_of_parse_and_map(np, 0);
185 if (eirq == NO_IRQ) 186 if (eirq == NO_IRQ)
186 goto end; 187 goto end;
187 188
@@ -195,23 +196,30 @@ end:
195 return sirq; 196 return sirq;
196} 197}
197 198
198void cpm_reset(void) 199void __init cpm_reset(void)
199{ 200{
200 cpm8xx_t *commproc; 201 sysconf8xx_t __iomem *siu_conf;
201 sysconf8xx_t *siu_conf;
202 202
203 commproc = (cpm8xx_t *)ioremap(CPM_MAP_ADDR, CPM_MAP_SIZE); 203 mpc8xx_immr = ioremap(get_immrbase(), 0x4000);
204 if (!mpc8xx_immr) {
205 printk(KERN_CRIT "Could not map IMMR\n");
206 return;
207 }
204 208
205#ifdef CONFIG_UCODE_PATCH 209 cpmp = &mpc8xx_immr->im_cpm;
210
211#ifndef CONFIG_PPC_EARLY_DEBUG_CPM
206 /* Perform a reset. 212 /* Perform a reset.
207 */ 213 */
208 out_be16(&commproc->cp_cpcr, CPM_CR_RST | CPM_CR_FLG); 214 out_be16(&cpmp->cp_cpcr, CPM_CR_RST | CPM_CR_FLG);
209 215
210 /* Wait for it. 216 /* Wait for it.
211 */ 217 */
212 while (in_be16(&commproc->cp_cpcr) & CPM_CR_FLG); 218 while (in_be16(&cpmp->cp_cpcr) & CPM_CR_FLG);
219#endif
213 220
214 cpm_load_patch(commproc); 221#ifdef CONFIG_UCODE_PATCH
222 cpm_load_patch(cpmp);
215#endif 223#endif
216 224
217 /* Set SDMA Bus Request priority 5. 225 /* Set SDMA Bus Request priority 5.
@@ -220,16 +228,16 @@ void cpm_reset(void)
220 * manual recommends it. 228 * manual recommends it.
221 * Bit 25, FAM can also be set to use FEC aggressive mode (860T). 229 * Bit 25, FAM can also be set to use FEC aggressive mode (860T).
222 */ 230 */
223 siu_conf = (sysconf8xx_t*)immr_map(im_siu_conf); 231 siu_conf = immr_map(im_siu_conf);
224 out_be32(&siu_conf->sc_sdcr, 1); 232 out_be32(&siu_conf->sc_sdcr, 1);
225 immr_unmap(siu_conf); 233 immr_unmap(siu_conf);
226 234
235#ifdef CONFIG_PPC_CPM_NEW_BINDING
236 cpm_muram_init();
237#else
227 /* Reclaim the DP memory for our use. */ 238 /* Reclaim the DP memory for our use. */
228 m8xx_cpm_dpinit(); 239 m8xx_cpm_dpinit();
229 240#endif
230 /* Tell everyone where the comm processor resides.
231 */
232 cpmp = commproc;
233} 241}
234 242
235/* We used to do this earlier, but have to postpone as long as possible 243/* We used to do this earlier, but have to postpone as long as possible
@@ -279,22 +287,23 @@ m8xx_cpm_hostalloc(uint size)
279void 287void
280cpm_setbrg(uint brg, uint rate) 288cpm_setbrg(uint brg, uint rate)
281{ 289{
282 volatile uint *bp; 290 u32 __iomem *bp;
283 291
284 /* This is good enough to get SMCs running..... 292 /* This is good enough to get SMCs running.....
285 */ 293 */
286 bp = (uint *)&cpmp->cp_brgc1; 294 bp = &cpmp->cp_brgc1;
287 bp += brg; 295 bp += brg;
288 /* The BRG has a 12-bit counter. For really slow baud rates (or 296 /* The BRG has a 12-bit counter. For really slow baud rates (or
289 * really fast processors), we may have to further divide by 16. 297 * really fast processors), we may have to further divide by 16.
290 */ 298 */
291 if (((BRG_UART_CLK / rate) - 1) < 4096) 299 if (((BRG_UART_CLK / rate) - 1) < 4096)
292 *bp = (((BRG_UART_CLK / rate) - 1) << 1) | CPM_BRG_EN; 300 out_be32(bp, (((BRG_UART_CLK / rate) - 1) << 1) | CPM_BRG_EN);
293 else 301 else
294 *bp = (((BRG_UART_CLK_DIV16 / rate) - 1) << 1) | 302 out_be32(bp, (((BRG_UART_CLK_DIV16 / rate) - 1) << 1) |
295 CPM_BRG_EN | CPM_BRG_DIV16; 303 CPM_BRG_EN | CPM_BRG_DIV16);
296} 304}
297 305
306#ifndef CONFIG_PPC_CPM_NEW_BINDING
298/* 307/*
299 * dpalloc / dpfree bits. 308 * dpalloc / dpfree bits.
300 */ 309 */
@@ -307,15 +316,15 @@ static rh_block_t cpm_boot_dpmem_rh_block[16];
307static rh_info_t cpm_dpmem_info; 316static rh_info_t cpm_dpmem_info;
308 317
309#define CPM_DPMEM_ALIGNMENT 8 318#define CPM_DPMEM_ALIGNMENT 8
310static u8* dpram_vbase; 319static u8 __iomem *dpram_vbase;
311static uint dpram_pbase; 320static phys_addr_t dpram_pbase;
312 321
313void m8xx_cpm_dpinit(void) 322static void m8xx_cpm_dpinit(void)
314{ 323{
315 spin_lock_init(&cpm_dpmem_lock); 324 spin_lock_init(&cpm_dpmem_lock);
316 325
317 dpram_vbase = immr_map_size(im_cpm.cp_dpmem, CPM_DATAONLY_BASE + CPM_DATAONLY_SIZE); 326 dpram_vbase = cpmp->cp_dpmem;
318 dpram_pbase = (uint)&((immap_t *)IMAP_ADDR)->im_cpm.cp_dpmem; 327 dpram_pbase = get_immrbase() + offsetof(immap_t, im_cpm.cp_dpmem);
319 328
320 /* Initialize the info header */ 329 /* Initialize the info header */
321 rh_init(&cpm_dpmem_info, CPM_DPMEM_ALIGNMENT, 330 rh_init(&cpm_dpmem_info, CPM_DPMEM_ALIGNMENT,
@@ -391,8 +400,210 @@ void *cpm_dpram_addr(unsigned long offset)
391} 400}
392EXPORT_SYMBOL(cpm_dpram_addr); 401EXPORT_SYMBOL(cpm_dpram_addr);
393 402
394uint cpm_dpram_phys(u8* addr) 403uint cpm_dpram_phys(u8 *addr)
395{ 404{
396 return (dpram_pbase + (uint)(addr - dpram_vbase)); 405 return (dpram_pbase + (uint)(addr - dpram_vbase));
397} 406}
398EXPORT_SYMBOL(cpm_dpram_phys); 407EXPORT_SYMBOL(cpm_dpram_phys);
408#endif /* !CONFIG_PPC_CPM_NEW_BINDING */
409
410struct cpm_ioport16 {
411 __be16 dir, par, sor, dat, intr;
412 __be16 res[3];
413};
414
415struct cpm_ioport32 {
416 __be32 dir, par, sor;
417};
418
419static void cpm1_set_pin32(int port, int pin, int flags)
420{
421 struct cpm_ioport32 __iomem *iop;
422 pin = 1 << (31 - pin);
423
424 if (port == CPM_PORTB)
425 iop = (struct cpm_ioport32 __iomem *)
426 &mpc8xx_immr->im_cpm.cp_pbdir;
427 else
428 iop = (struct cpm_ioport32 __iomem *)
429 &mpc8xx_immr->im_cpm.cp_pedir;
430
431 if (flags & CPM_PIN_OUTPUT)
432 setbits32(&iop->dir, pin);
433 else
434 clrbits32(&iop->dir, pin);
435
436 if (!(flags & CPM_PIN_GPIO))
437 setbits32(&iop->par, pin);
438 else
439 clrbits32(&iop->par, pin);
440
441 if (port == CPM_PORTE) {
442 if (flags & CPM_PIN_SECONDARY)
443 setbits32(&iop->sor, pin);
444 else
445 clrbits32(&iop->sor, pin);
446
447 if (flags & CPM_PIN_OPENDRAIN)
448 setbits32(&mpc8xx_immr->im_cpm.cp_peodr, pin);
449 else
450 clrbits32(&mpc8xx_immr->im_cpm.cp_peodr, pin);
451 }
452}
453
454static void cpm1_set_pin16(int port, int pin, int flags)
455{
456 struct cpm_ioport16 __iomem *iop =
457 (struct cpm_ioport16 __iomem *)&mpc8xx_immr->im_ioport;
458
459 pin = 1 << (15 - pin);
460
461 if (port != 0)
462 iop += port - 1;
463
464 if (flags & CPM_PIN_OUTPUT)
465 setbits16(&iop->dir, pin);
466 else
467 clrbits16(&iop->dir, pin);
468
469 if (!(flags & CPM_PIN_GPIO))
470 setbits16(&iop->par, pin);
471 else
472 clrbits16(&iop->par, pin);
473
474 if (port == CPM_PORTC) {
475 if (flags & CPM_PIN_SECONDARY)
476 setbits16(&iop->sor, pin);
477 else
478 clrbits16(&iop->sor, pin);
479 }
480}
481
482void cpm1_set_pin(enum cpm_port port, int pin, int flags)
483{
484 if (port == CPM_PORTB || port == CPM_PORTE)
485 cpm1_set_pin32(port, pin, flags);
486 else
487 cpm1_set_pin16(port, pin, flags);
488}
489
490int cpm1_clk_setup(enum cpm_clk_target target, int clock, int mode)
491{
492 int shift;
493 int i, bits = 0;
494 u32 __iomem *reg;
495 u32 mask = 7;
496
497 u8 clk_map[][3] = {
498 {CPM_CLK_SCC1, CPM_BRG1, 0},
499 {CPM_CLK_SCC1, CPM_BRG2, 1},
500 {CPM_CLK_SCC1, CPM_BRG3, 2},
501 {CPM_CLK_SCC1, CPM_BRG4, 3},
502 {CPM_CLK_SCC1, CPM_CLK1, 4},
503 {CPM_CLK_SCC1, CPM_CLK2, 5},
504 {CPM_CLK_SCC1, CPM_CLK3, 6},
505 {CPM_CLK_SCC1, CPM_CLK4, 7},
506
507 {CPM_CLK_SCC2, CPM_BRG1, 0},
508 {CPM_CLK_SCC2, CPM_BRG2, 1},
509 {CPM_CLK_SCC2, CPM_BRG3, 2},
510 {CPM_CLK_SCC2, CPM_BRG4, 3},
511 {CPM_CLK_SCC2, CPM_CLK1, 4},
512 {CPM_CLK_SCC2, CPM_CLK2, 5},
513 {CPM_CLK_SCC2, CPM_CLK3, 6},
514 {CPM_CLK_SCC2, CPM_CLK4, 7},
515
516 {CPM_CLK_SCC3, CPM_BRG1, 0},
517 {CPM_CLK_SCC3, CPM_BRG2, 1},
518 {CPM_CLK_SCC3, CPM_BRG3, 2},
519 {CPM_CLK_SCC3, CPM_BRG4, 3},
520 {CPM_CLK_SCC3, CPM_CLK5, 4},
521 {CPM_CLK_SCC3, CPM_CLK6, 5},
522 {CPM_CLK_SCC3, CPM_CLK7, 6},
523 {CPM_CLK_SCC3, CPM_CLK8, 7},
524
525 {CPM_CLK_SCC4, CPM_BRG1, 0},
526 {CPM_CLK_SCC4, CPM_BRG2, 1},
527 {CPM_CLK_SCC4, CPM_BRG3, 2},
528 {CPM_CLK_SCC4, CPM_BRG4, 3},
529 {CPM_CLK_SCC4, CPM_CLK5, 4},
530 {CPM_CLK_SCC4, CPM_CLK6, 5},
531 {CPM_CLK_SCC4, CPM_CLK7, 6},
532 {CPM_CLK_SCC4, CPM_CLK8, 7},
533
534 {CPM_CLK_SMC1, CPM_BRG1, 0},
535 {CPM_CLK_SMC1, CPM_BRG2, 1},
536 {CPM_CLK_SMC1, CPM_BRG3, 2},
537 {CPM_CLK_SMC1, CPM_BRG4, 3},
538 {CPM_CLK_SMC1, CPM_CLK1, 4},
539 {CPM_CLK_SMC1, CPM_CLK2, 5},
540 {CPM_CLK_SMC1, CPM_CLK3, 6},
541 {CPM_CLK_SMC1, CPM_CLK4, 7},
542
543 {CPM_CLK_SMC2, CPM_BRG1, 0},
544 {CPM_CLK_SMC2, CPM_BRG2, 1},
545 {CPM_CLK_SMC2, CPM_BRG3, 2},
546 {CPM_CLK_SMC2, CPM_BRG4, 3},
547 {CPM_CLK_SMC2, CPM_CLK5, 4},
548 {CPM_CLK_SMC2, CPM_CLK6, 5},
549 {CPM_CLK_SMC2, CPM_CLK7, 6},
550 {CPM_CLK_SMC2, CPM_CLK8, 7},
551 };
552
553 switch (target) {
554 case CPM_CLK_SCC1:
555 reg = &mpc8xx_immr->im_cpm.cp_sicr;
556 shift = 0;
557 break;
558
559 case CPM_CLK_SCC2:
560 reg = &mpc8xx_immr->im_cpm.cp_sicr;
561 shift = 8;
562 break;
563
564 case CPM_CLK_SCC3:
565 reg = &mpc8xx_immr->im_cpm.cp_sicr;
566 shift = 16;
567 break;
568
569 case CPM_CLK_SCC4:
570 reg = &mpc8xx_immr->im_cpm.cp_sicr;
571 shift = 24;
572 break;
573
574 case CPM_CLK_SMC1:
575 reg = &mpc8xx_immr->im_cpm.cp_simode;
576 shift = 12;
577 break;
578
579 case CPM_CLK_SMC2:
580 reg = &mpc8xx_immr->im_cpm.cp_simode;
581 shift = 28;
582 break;
583
584 default:
585 printk(KERN_ERR "cpm1_clock_setup: invalid clock target\n");
586 return -EINVAL;
587 }
588
589 if (reg == &mpc8xx_immr->im_cpm.cp_sicr && mode == CPM_CLK_RX)
590 shift += 3;
591
592 for (i = 0; i < ARRAY_SIZE(clk_map); i++) {
593 if (clk_map[i][0] == target && clk_map[i][1] == clock) {
594 bits = clk_map[i][2];
595 break;
596 }
597 }
598
599 if (i == ARRAY_SIZE(clk_map)) {
600 printk(KERN_ERR "cpm1_clock_setup: invalid clock combination\n");
601 return -EINVAL;
602 }
603
604 bits <<= shift;
605 mask <<= shift;
606 out_be32(reg, (in_be32(reg) & ~mask) | bits);
607
608 return 0;
609}
diff --git a/arch/powerpc/sysdev/commproc.h b/arch/powerpc/sysdev/commproc.h
new file mode 100644
index 000000000000..9155ba467274
--- /dev/null
+++ b/arch/powerpc/sysdev/commproc.h
@@ -0,0 +1,12 @@
1#ifndef _POWERPC_SYSDEV_COMMPROC_H
2#define _POWERPC_SYSDEV_COMMPROC_H
3
4extern void cpm_reset(void);
5extern void mpc8xx_restart(char *cmd);
6extern void mpc8xx_calibrate_decr(void);
7extern int mpc8xx_set_rtc_time(struct rtc_time *tm);
8extern void mpc8xx_get_rtc_time(struct rtc_time *tm);
9extern void m8xx_pic_init(void);
10extern unsigned int mpc8xx_get_irq(void);
11
12#endif
diff --git a/arch/powerpc/sysdev/cpm2_common.c b/arch/powerpc/sysdev/cpm2_common.c
index c827715a5090..859362fecb7c 100644
--- a/arch/powerpc/sysdev/cpm2_common.c
+++ b/arch/powerpc/sysdev/cpm2_common.c
@@ -33,6 +33,8 @@
33#include <linux/mm.h> 33#include <linux/mm.h>
34#include <linux/interrupt.h> 34#include <linux/interrupt.h>
35#include <linux/module.h> 35#include <linux/module.h>
36#include <linux/of.h>
37
36#include <asm/io.h> 38#include <asm/io.h>
37#include <asm/irq.h> 39#include <asm/irq.h>
38#include <asm/mpc8260.h> 40#include <asm/mpc8260.h>
@@ -44,14 +46,16 @@
44 46
45#include <sysdev/fsl_soc.h> 47#include <sysdev/fsl_soc.h>
46 48
49#ifndef CONFIG_PPC_CPM_NEW_BINDING
47static void cpm2_dpinit(void); 50static void cpm2_dpinit(void);
48cpm_cpm2_t *cpmp; /* Pointer to comm processor space */ 51#endif
52
53cpm_cpm2_t __iomem *cpmp; /* Pointer to comm processor space */
49 54
50/* We allocate this here because it is used almost exclusively for 55/* We allocate this here because it is used almost exclusively for
51 * the communication processor devices. 56 * the communication processor devices.
52 */ 57 */
53cpm2_map_t *cpm2_immr; 58cpm2_map_t __iomem *cpm2_immr;
54intctl_cpm2_t *cpm2_intctl;
55 59
56#define CPM_MAP_SIZE (0x40000) /* 256k - the PQ3 reserve this amount 60#define CPM_MAP_SIZE (0x40000) /* 256k - the PQ3 reserve this amount
57 of space for CPM as it is larger 61 of space for CPM as it is larger
@@ -60,12 +64,19 @@ intctl_cpm2_t *cpm2_intctl;
60void 64void
61cpm2_reset(void) 65cpm2_reset(void)
62{ 66{
63 cpm2_immr = (cpm2_map_t *)ioremap(CPM_MAP_ADDR, CPM_MAP_SIZE); 67#ifdef CONFIG_PPC_85xx
64 cpm2_intctl = cpm2_map(im_intctl); 68 cpm2_immr = ioremap(CPM_MAP_ADDR, CPM_MAP_SIZE);
69#else
70 cpm2_immr = ioremap(get_immrbase(), CPM_MAP_SIZE);
71#endif
65 72
66 /* Reclaim the DP memory for our use. 73 /* Reclaim the DP memory for our use.
67 */ 74 */
75#ifdef CONFIG_PPC_CPM_NEW_BINDING
76 cpm_muram_init();
77#else
68 cpm2_dpinit(); 78 cpm2_dpinit();
79#endif
69 80
70 /* Tell everyone where the comm processor resides. 81 /* Tell everyone where the comm processor resides.
71 */ 82 */
@@ -91,7 +102,7 @@ cpm2_reset(void)
91void 102void
92cpm_setbrg(uint brg, uint rate) 103cpm_setbrg(uint brg, uint rate)
93{ 104{
94 volatile uint *bp; 105 u32 __iomem *bp;
95 106
96 /* This is good enough to get SMCs running..... 107 /* This is good enough to get SMCs running.....
97 */ 108 */
@@ -113,7 +124,8 @@ cpm_setbrg(uint brg, uint rate)
113void 124void
114cpm2_fastbrg(uint brg, uint rate, int div16) 125cpm2_fastbrg(uint brg, uint rate, int div16)
115{ 126{
116 volatile uint *bp; 127 u32 __iomem *bp;
128 u32 val;
117 129
118 if (brg < 4) { 130 if (brg < 4) {
119 bp = cpm2_map_size(im_brgc1, 16); 131 bp = cpm2_map_size(im_brgc1, 16);
@@ -123,10 +135,11 @@ cpm2_fastbrg(uint brg, uint rate, int div16)
123 brg -= 4; 135 brg -= 4;
124 } 136 }
125 bp += brg; 137 bp += brg;
126 *bp = ((BRG_INT_CLK / rate) << 1) | CPM_BRG_EN; 138 val = ((BRG_INT_CLK / rate) << 1) | CPM_BRG_EN;
127 if (div16) 139 if (div16)
128 *bp |= CPM_BRG_DIV16; 140 val |= CPM_BRG_DIV16;
129 141
142 out_be32(bp, val);
130 cpm2_unmap(bp); 143 cpm2_unmap(bp);
131} 144}
132 145
@@ -135,10 +148,11 @@ int cpm2_clk_setup(enum cpm_clk_target target, int clock, int mode)
135 int ret = 0; 148 int ret = 0;
136 int shift; 149 int shift;
137 int i, bits = 0; 150 int i, bits = 0;
138 cpmux_t *im_cpmux; 151 cpmux_t __iomem *im_cpmux;
139 u32 *reg; 152 u32 __iomem *reg;
140 u32 mask = 7; 153 u32 mask = 7;
141 u8 clk_map [24][3] = { 154
155 u8 clk_map[][3] = {
142 {CPM_CLK_FCC1, CPM_BRG5, 0}, 156 {CPM_CLK_FCC1, CPM_BRG5, 0},
143 {CPM_CLK_FCC1, CPM_BRG6, 1}, 157 {CPM_CLK_FCC1, CPM_BRG6, 1},
144 {CPM_CLK_FCC1, CPM_BRG7, 2}, 158 {CPM_CLK_FCC1, CPM_BRG7, 2},
@@ -162,8 +176,40 @@ int cpm2_clk_setup(enum cpm_clk_target target, int clock, int mode)
162 {CPM_CLK_FCC3, CPM_CLK13, 4}, 176 {CPM_CLK_FCC3, CPM_CLK13, 4},
163 {CPM_CLK_FCC3, CPM_CLK14, 5}, 177 {CPM_CLK_FCC3, CPM_CLK14, 5},
164 {CPM_CLK_FCC3, CPM_CLK15, 6}, 178 {CPM_CLK_FCC3, CPM_CLK15, 6},
165 {CPM_CLK_FCC3, CPM_CLK16, 7} 179 {CPM_CLK_FCC3, CPM_CLK16, 7},
166 }; 180 {CPM_CLK_SCC1, CPM_BRG1, 0},
181 {CPM_CLK_SCC1, CPM_BRG2, 1},
182 {CPM_CLK_SCC1, CPM_BRG3, 2},
183 {CPM_CLK_SCC1, CPM_BRG4, 3},
184 {CPM_CLK_SCC1, CPM_CLK11, 4},
185 {CPM_CLK_SCC1, CPM_CLK12, 5},
186 {CPM_CLK_SCC1, CPM_CLK3, 6},
187 {CPM_CLK_SCC1, CPM_CLK4, 7},
188 {CPM_CLK_SCC2, CPM_BRG1, 0},
189 {CPM_CLK_SCC2, CPM_BRG2, 1},
190 {CPM_CLK_SCC2, CPM_BRG3, 2},
191 {CPM_CLK_SCC2, CPM_BRG4, 3},
192 {CPM_CLK_SCC2, CPM_CLK11, 4},
193 {CPM_CLK_SCC2, CPM_CLK12, 5},
194 {CPM_CLK_SCC2, CPM_CLK3, 6},
195 {CPM_CLK_SCC2, CPM_CLK4, 7},
196 {CPM_CLK_SCC3, CPM_BRG1, 0},
197 {CPM_CLK_SCC3, CPM_BRG2, 1},
198 {CPM_CLK_SCC3, CPM_BRG3, 2},
199 {CPM_CLK_SCC3, CPM_BRG4, 3},
200 {CPM_CLK_SCC3, CPM_CLK5, 4},
201 {CPM_CLK_SCC3, CPM_CLK6, 5},
202 {CPM_CLK_SCC3, CPM_CLK7, 6},
203 {CPM_CLK_SCC3, CPM_CLK8, 7},
204 {CPM_CLK_SCC4, CPM_BRG1, 0},
205 {CPM_CLK_SCC4, CPM_BRG2, 1},
206 {CPM_CLK_SCC4, CPM_BRG3, 2},
207 {CPM_CLK_SCC4, CPM_BRG4, 3},
208 {CPM_CLK_SCC4, CPM_CLK5, 4},
209 {CPM_CLK_SCC4, CPM_CLK6, 5},
210 {CPM_CLK_SCC4, CPM_CLK7, 6},
211 {CPM_CLK_SCC4, CPM_CLK8, 7},
212 };
167 213
168 im_cpmux = cpm2_map(im_cpmux); 214 im_cpmux = cpm2_map(im_cpmux);
169 215
@@ -201,25 +247,83 @@ int cpm2_clk_setup(enum cpm_clk_target target, int clock, int mode)
201 } 247 }
202 248
203 if (mode == CPM_CLK_RX) 249 if (mode == CPM_CLK_RX)
204 shift +=3; 250 shift += 3;
205 251
206 for (i=0; i<24; i++) { 252 for (i = 0; i < ARRAY_SIZE(clk_map); i++) {
207 if (clk_map[i][0] == target && clk_map[i][1] == clock) { 253 if (clk_map[i][0] == target && clk_map[i][1] == clock) {
208 bits = clk_map[i][2]; 254 bits = clk_map[i][2];
209 break; 255 break;
210 } 256 }
211 } 257 }
212 if (i == sizeof(clk_map)/3) 258 if (i == ARRAY_SIZE(clk_map))
213 ret = -EINVAL; 259 ret = -EINVAL;
214 260
215 bits <<= shift; 261 bits <<= shift;
216 mask <<= shift; 262 mask <<= shift;
263
217 out_be32(reg, (in_be32(reg) & ~mask) | bits); 264 out_be32(reg, (in_be32(reg) & ~mask) | bits);
218 265
219 cpm2_unmap(im_cpmux); 266 cpm2_unmap(im_cpmux);
220 return ret; 267 return ret;
221} 268}
222 269
270int cpm2_smc_clk_setup(enum cpm_clk_target target, int clock)
271{
272 int ret = 0;
273 int shift;
274 int i, bits = 0;
275 cpmux_t __iomem *im_cpmux;
276 u8 __iomem *reg;
277 u8 mask = 3;
278
279 u8 clk_map[][3] = {
280 {CPM_CLK_SMC1, CPM_BRG1, 0},
281 {CPM_CLK_SMC1, CPM_BRG7, 1},
282 {CPM_CLK_SMC1, CPM_CLK7, 2},
283 {CPM_CLK_SMC1, CPM_CLK9, 3},
284 {CPM_CLK_SMC2, CPM_BRG2, 0},
285 {CPM_CLK_SMC2, CPM_BRG8, 1},
286 {CPM_CLK_SMC2, CPM_CLK4, 2},
287 {CPM_CLK_SMC2, CPM_CLK15, 3},
288 };
289
290 im_cpmux = cpm2_map(im_cpmux);
291
292 switch (target) {
293 case CPM_CLK_SMC1:
294 reg = &im_cpmux->cmx_smr;
295 mask = 3;
296 shift = 4;
297 break;
298 case CPM_CLK_SMC2:
299 reg = &im_cpmux->cmx_smr;
300 mask = 3;
301 shift = 0;
302 break;
303 default:
304 printk(KERN_ERR "cpm2_smc_clock_setup: invalid clock target\n");
305 return -EINVAL;
306 }
307
308 for (i = 0; i < ARRAY_SIZE(clk_map); i++) {
309 if (clk_map[i][0] == target && clk_map[i][1] == clock) {
310 bits = clk_map[i][2];
311 break;
312 }
313 }
314 if (i == ARRAY_SIZE(clk_map))
315 ret = -EINVAL;
316
317 bits <<= shift;
318 mask <<= shift;
319
320 out_8(reg, (in_8(reg) & ~mask) | bits);
321
322 cpm2_unmap(im_cpmux);
323 return ret;
324}
325
326#ifndef CONFIG_PPC_CPM_NEW_BINDING
223/* 327/*
224 * dpalloc / dpfree bits. 328 * dpalloc / dpfree bits.
225 */ 329 */
@@ -228,20 +332,20 @@ static spinlock_t cpm_dpmem_lock;
228 * until the memory subsystem goes up... */ 332 * until the memory subsystem goes up... */
229static rh_block_t cpm_boot_dpmem_rh_block[16]; 333static rh_block_t cpm_boot_dpmem_rh_block[16];
230static rh_info_t cpm_dpmem_info; 334static rh_info_t cpm_dpmem_info;
231static u8* im_dprambase; 335static u8 __iomem *im_dprambase;
232 336
233static void cpm2_dpinit(void) 337static void cpm2_dpinit(void)
234{ 338{
235 spin_lock_init(&cpm_dpmem_lock); 339 spin_lock_init(&cpm_dpmem_lock);
236 340
237 im_dprambase = ioremap(CPM_MAP_ADDR, CPM_DATAONLY_BASE + CPM_DATAONLY_SIZE);
238
239 /* initialize the info header */ 341 /* initialize the info header */
240 rh_init(&cpm_dpmem_info, 1, 342 rh_init(&cpm_dpmem_info, 1,
241 sizeof(cpm_boot_dpmem_rh_block) / 343 sizeof(cpm_boot_dpmem_rh_block) /
242 sizeof(cpm_boot_dpmem_rh_block[0]), 344 sizeof(cpm_boot_dpmem_rh_block[0]),
243 cpm_boot_dpmem_rh_block); 345 cpm_boot_dpmem_rh_block);
244 346
347 im_dprambase = cpm2_immr;
348
245 /* Attach the usable dpmem area */ 349 /* Attach the usable dpmem area */
246 /* XXX: This is actually crap. CPM_DATAONLY_BASE and 350 /* XXX: This is actually crap. CPM_DATAONLY_BASE and
247 * CPM_DATAONLY_SIZE is only a subset of the available dpram. It 351 * CPM_DATAONLY_SIZE is only a subset of the available dpram. It
@@ -306,3 +410,37 @@ void *cpm_dpram_addr(unsigned long offset)
306 return (void *)(im_dprambase + offset); 410 return (void *)(im_dprambase + offset);
307} 411}
308EXPORT_SYMBOL(cpm_dpram_addr); 412EXPORT_SYMBOL(cpm_dpram_addr);
413#endif /* !CONFIG_PPC_CPM_NEW_BINDING */
414
415struct cpm2_ioports {
416 u32 dir, par, sor, odr, dat;
417 u32 res[3];
418};
419
420void cpm2_set_pin(int port, int pin, int flags)
421{
422 struct cpm2_ioports __iomem *iop =
423 (struct cpm2_ioports __iomem *)&cpm2_immr->im_ioport;
424
425 pin = 1 << (31 - pin);
426
427 if (flags & CPM_PIN_OUTPUT)
428 setbits32(&iop[port].dir, pin);
429 else
430 clrbits32(&iop[port].dir, pin);
431
432 if (!(flags & CPM_PIN_GPIO))
433 setbits32(&iop[port].par, pin);
434 else
435 clrbits32(&iop[port].par, pin);
436
437 if (flags & CPM_PIN_SECONDARY)
438 setbits32(&iop[port].sor, pin);
439 else
440 clrbits32(&iop[port].sor, pin);
441
442 if (flags & CPM_PIN_OPENDRAIN)
443 setbits32(&iop[port].odr, pin);
444 else
445 clrbits32(&iop[port].odr, pin);
446}
diff --git a/arch/powerpc/sysdev/cpm2_pic.c b/arch/powerpc/sysdev/cpm2_pic.c
index eabfe06fe05c..5fe65b2f8f3a 100644
--- a/arch/powerpc/sysdev/cpm2_pic.c
+++ b/arch/powerpc/sysdev/cpm2_pic.c
@@ -48,9 +48,8 @@
48#define CPM2_IRQ_PORTC15 48 48#define CPM2_IRQ_PORTC15 48
49#define CPM2_IRQ_PORTC0 63 49#define CPM2_IRQ_PORTC0 63
50 50
51static intctl_cpm2_t *cpm2_intctl; 51static intctl_cpm2_t __iomem *cpm2_intctl;
52 52
53static struct device_node *cpm2_pic_node;
54static struct irq_host *cpm2_pic_host; 53static struct irq_host *cpm2_pic_host;
55#define NR_MASK_WORDS ((NR_IRQS + 31) / 32) 54#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
56static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; 55static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
@@ -206,11 +205,6 @@ unsigned int cpm2_get_irq(void)
206 return irq_linear_revmap(cpm2_pic_host, irq); 205 return irq_linear_revmap(cpm2_pic_host, irq);
207} 206}
208 207
209static int cpm2_pic_host_match(struct irq_host *h, struct device_node *node)
210{
211 return cpm2_pic_node == node;
212}
213
214static int cpm2_pic_host_map(struct irq_host *h, unsigned int virq, 208static int cpm2_pic_host_map(struct irq_host *h, unsigned int virq,
215 irq_hw_number_t hw) 209 irq_hw_number_t hw)
216{ 210{
@@ -234,7 +228,6 @@ static int cpm2_pic_host_xlate(struct irq_host *h, struct device_node *ct,
234} 228}
235 229
236static struct irq_host_ops cpm2_pic_host_ops = { 230static struct irq_host_ops cpm2_pic_host_ops = {
237 .match = cpm2_pic_host_match,
238 .map = cpm2_pic_host_map, 231 .map = cpm2_pic_host_map,
239 .xlate = cpm2_pic_host_xlate, 232 .xlate = cpm2_pic_host_xlate,
240}; 233};
@@ -273,8 +266,8 @@ void cpm2_pic_init(struct device_node *node)
273 out_be32(&cpm2_intctl->ic_scprrl, 0x05309770); 266 out_be32(&cpm2_intctl->ic_scprrl, 0x05309770);
274 267
275 /* create a legacy host */ 268 /* create a legacy host */
276 cpm2_pic_node = of_node_get(node); 269 cpm2_pic_host = irq_alloc_host(of_node_get(node), IRQ_HOST_MAP_LINEAR,
277 cpm2_pic_host = irq_alloc_host(IRQ_HOST_MAP_LINEAR, 64, &cpm2_pic_host_ops, 64); 270 64, &cpm2_pic_host_ops, 64);
278 if (cpm2_pic_host == NULL) { 271 if (cpm2_pic_host == NULL) {
279 printk(KERN_ERR "CPM2 PIC: failed to allocate irq host!\n"); 272 printk(KERN_ERR "CPM2 PIC: failed to allocate irq host!\n");
280 return; 273 return;
diff --git a/arch/powerpc/sysdev/cpm_common.c b/arch/powerpc/sysdev/cpm_common.c
new file mode 100644
index 000000000000..66c8ad4cfce6
--- /dev/null
+++ b/arch/powerpc/sysdev/cpm_common.c
@@ -0,0 +1,205 @@
1/*
2 * Common CPM code
3 *
4 * Author: Scott Wood <scottwood@freescale.com>
5 *
6 * Copyright 2007 Freescale Semiconductor, Inc.
7 *
8 * Some parts derived from commproc.c/cpm2_common.c, which is:
9 * Copyright (c) 1997 Dan error_act (dmalek@jlc.net)
10 * Copyright (c) 1999-2001 Dan Malek <dan@embeddedalley.com>
11 * Copyright (c) 2000 MontaVista Software, Inc (source@mvista.com)
12 * 2006 (c) MontaVista Software, Inc.
13 * Vitaly Bordug <vbordug@ru.mvista.com>
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of version 2 of the GNU General Public License as
17 * published by the Free Software Foundation.
18 */
19
20#include <linux/init.h>
21#include <linux/of_device.h>
22
23#include <asm/udbg.h>
24#include <asm/io.h>
25#include <asm/system.h>
26#include <asm/rheap.h>
27#include <asm/cpm.h>
28
29#include <mm/mmu_decl.h>
30
31#ifdef CONFIG_PPC_EARLY_DEBUG_CPM
32static u32 __iomem *cpm_udbg_txdesc =
33 (u32 __iomem __force *)CONFIG_PPC_EARLY_DEBUG_CPM_ADDR;
34
35static void udbg_putc_cpm(char c)
36{
37 u8 __iomem *txbuf = (u8 __iomem __force *)in_be32(&cpm_udbg_txdesc[1]);
38
39 if (c == '\n')
40 udbg_putc('\r');
41
42 while (in_be32(&cpm_udbg_txdesc[0]) & 0x80000000)
43 ;
44
45 out_8(txbuf, c);
46 out_be32(&cpm_udbg_txdesc[0], 0xa0000001);
47}
48
49void __init udbg_init_cpm(void)
50{
51 if (cpm_udbg_txdesc) {
52#ifdef CONFIG_CPM2
53 setbat(1, 0xf0000000, 0xf0000000, 1024*1024, _PAGE_IO);
54#endif
55 udbg_putc = udbg_putc_cpm;
56 udbg_putc('X');
57 }
58}
59#endif
60
61#ifdef CONFIG_PPC_CPM_NEW_BINDING
62static spinlock_t cpm_muram_lock;
63static rh_block_t cpm_boot_muram_rh_block[16];
64static rh_info_t cpm_muram_info;
65static u8 __iomem *muram_vbase;
66static phys_addr_t muram_pbase;
67
68/* Max address size we deal with */
69#define OF_MAX_ADDR_CELLS 4
70
71int __init cpm_muram_init(void)
72{
73 struct device_node *np;
74 struct resource r;
75 u32 zero[OF_MAX_ADDR_CELLS] = {};
76 resource_size_t max = 0;
77 int i = 0;
78 int ret = 0;
79
80 printk("cpm_muram_init\n");
81
82 spin_lock_init(&cpm_muram_lock);
83 /* initialize the info header */
84 rh_init(&cpm_muram_info, 1,
85 sizeof(cpm_boot_muram_rh_block) /
86 sizeof(cpm_boot_muram_rh_block[0]),
87 cpm_boot_muram_rh_block);
88
89 np = of_find_compatible_node(NULL, NULL, "fsl,cpm-muram-data");
90 if (!np) {
91 printk(KERN_ERR "Cannot find CPM muram data node");
92 ret = -ENODEV;
93 goto out;
94 }
95
96 muram_pbase = of_translate_address(np, zero);
97 if (muram_pbase == (phys_addr_t)OF_BAD_ADDR) {
98 printk(KERN_ERR "Cannot translate zero through CPM muram node");
99 ret = -ENODEV;
100 goto out;
101 }
102
103 while (of_address_to_resource(np, i++, &r) == 0) {
104 if (r.end > max)
105 max = r.end;
106
107 rh_attach_region(&cpm_muram_info, r.start - muram_pbase,
108 r.end - r.start + 1);
109 }
110
111 muram_vbase = ioremap(muram_pbase, max - muram_pbase + 1);
112 if (!muram_vbase) {
113 printk(KERN_ERR "Cannot map CPM muram");
114 ret = -ENOMEM;
115 }
116
117out:
118 of_node_put(np);
119 return ret;
120}
121
122/**
123 * cpm_muram_alloc - allocate the requested size worth of multi-user ram
124 * @size: number of bytes to allocate
125 * @align: requested alignment, in bytes
126 *
127 * This function returns an offset into the muram area.
128 * Use cpm_dpram_addr() to get the virtual address of the area.
129 * Use cpm_muram_free() to free the allocation.
130 */
131unsigned long cpm_muram_alloc(unsigned long size, unsigned long align)
132{
133 unsigned long start;
134 unsigned long flags;
135
136 spin_lock_irqsave(&cpm_muram_lock, flags);
137 cpm_muram_info.alignment = align;
138 start = rh_alloc(&cpm_muram_info, size, "commproc");
139 spin_unlock_irqrestore(&cpm_muram_lock, flags);
140
141 return start;
142}
143EXPORT_SYMBOL(cpm_muram_alloc);
144
145/**
146 * cpm_muram_free - free a chunk of multi-user ram
147 * @offset: The beginning of the chunk as returned by cpm_muram_alloc().
148 */
149int cpm_muram_free(unsigned long offset)
150{
151 int ret;
152 unsigned long flags;
153
154 spin_lock_irqsave(&cpm_muram_lock, flags);
155 ret = rh_free(&cpm_muram_info, offset);
156 spin_unlock_irqrestore(&cpm_muram_lock, flags);
157
158 return ret;
159}
160EXPORT_SYMBOL(cpm_muram_free);
161
162/**
163 * cpm_muram_alloc_fixed - reserve a specific region of multi-user ram
164 * @offset: the offset into the muram area to reserve
165 * @size: the number of bytes to reserve
166 *
167 * This function returns "start" on success, -ENOMEM on failure.
168 * Use cpm_dpram_addr() to get the virtual address of the area.
169 * Use cpm_muram_free() to free the allocation.
170 */
171unsigned long cpm_muram_alloc_fixed(unsigned long offset, unsigned long size)
172{
173 unsigned long start;
174 unsigned long flags;
175
176 spin_lock_irqsave(&cpm_muram_lock, flags);
177 cpm_muram_info.alignment = 1;
178 start = rh_alloc_fixed(&cpm_muram_info, offset, size, "commproc");
179 spin_unlock_irqrestore(&cpm_muram_lock, flags);
180
181 return start;
182}
183EXPORT_SYMBOL(cpm_muram_alloc_fixed);
184
185/**
186 * cpm_muram_addr - turn a muram offset into a virtual address
187 * @offset: muram offset to convert
188 */
189void __iomem *cpm_muram_addr(unsigned long offset)
190{
191 return muram_vbase + offset;
192}
193EXPORT_SYMBOL(cpm_muram_addr);
194
195/**
196 * cpm_muram_phys - turn a muram virtual address into a DMA address
197 * @offset: virtual address from cpm_muram_addr() to convert
198 */
199dma_addr_t cpm_muram_dma(void __iomem *addr)
200{
201 return muram_pbase + ((u8 __iomem *)addr - muram_vbase);
202}
203EXPORT_SYMBOL(cpm_muram_dma);
204
205#endif /* CONFIG_PPC_CPM_NEW_BINDING */
diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c
index a1d2042bb304..e0e24b01e3a6 100644
--- a/arch/powerpc/sysdev/dart_iommu.c
+++ b/arch/powerpc/sysdev/dart_iommu.c
@@ -204,7 +204,7 @@ static void dart_free(struct iommu_table *tbl, long index, long npages)
204} 204}
205 205
206 206
207static int dart_init(struct device_node *dart_node) 207static int __init dart_init(struct device_node *dart_node)
208{ 208{
209 unsigned int i; 209 unsigned int i;
210 unsigned long tmp, base, size; 210 unsigned long tmp, base, size;
@@ -313,7 +313,7 @@ static void pci_dma_bus_setup_dart(struct pci_bus *bus)
313 PCI_DN(dn)->iommu_table = &iommu_table_dart; 313 PCI_DN(dn)->iommu_table = &iommu_table_dart;
314} 314}
315 315
316void iommu_init_early_dart(void) 316void __init iommu_init_early_dart(void)
317{ 317{
318 struct device_node *dn; 318 struct device_node *dn;
319 319
diff --git a/arch/powerpc/sysdev/dcr.c b/arch/powerpc/sysdev/dcr.c
index 574b6ef44e0b..ab11c0b29024 100644
--- a/arch/powerpc/sysdev/dcr.c
+++ b/arch/powerpc/sysdev/dcr.c
@@ -33,6 +33,7 @@ unsigned int dcr_resource_start(struct device_node *np, unsigned int index)
33 33
34 return dr[index * 2]; 34 return dr[index * 2];
35} 35}
36EXPORT_SYMBOL_GPL(dcr_resource_start);
36 37
37unsigned int dcr_resource_len(struct device_node *np, unsigned int index) 38unsigned int dcr_resource_len(struct device_node *np, unsigned int index)
38{ 39{
@@ -44,6 +45,7 @@ unsigned int dcr_resource_len(struct device_node *np, unsigned int index)
44 45
45 return dr[index * 2 + 1]; 46 return dr[index * 2 + 1];
46} 47}
48EXPORT_SYMBOL_GPL(dcr_resource_len);
47 49
48#ifndef CONFIG_PPC_DCR_NATIVE 50#ifndef CONFIG_PPC_DCR_NATIVE
49 51
@@ -102,7 +104,7 @@ u64 of_translate_dcr_address(struct device_node *dev,
102dcr_host_t dcr_map(struct device_node *dev, unsigned int dcr_n, 104dcr_host_t dcr_map(struct device_node *dev, unsigned int dcr_n,
103 unsigned int dcr_c) 105 unsigned int dcr_c)
104{ 106{
105 dcr_host_t ret = { .token = NULL, .stride = 0 }; 107 dcr_host_t ret = { .token = NULL, .stride = 0, .base = dcr_n };
106 u64 addr; 108 u64 addr;
107 109
108 pr_debug("dcr_map(%s, 0x%x, 0x%x)\n", 110 pr_debug("dcr_map(%s, 0x%x, 0x%x)\n",
@@ -122,6 +124,7 @@ dcr_host_t dcr_map(struct device_node *dev, unsigned int dcr_n,
122 ret.token -= dcr_n * ret.stride; 124 ret.token -= dcr_n * ret.stride;
123 return ret; 125 return ret;
124} 126}
127EXPORT_SYMBOL_GPL(dcr_map);
125 128
126void dcr_unmap(dcr_host_t host, unsigned int dcr_n, unsigned int dcr_c) 129void dcr_unmap(dcr_host_t host, unsigned int dcr_n, unsigned int dcr_c)
127{ 130{
@@ -133,5 +136,6 @@ void dcr_unmap(dcr_host_t host, unsigned int dcr_n, unsigned int dcr_c)
133 iounmap(h.token); 136 iounmap(h.token);
134 h.token = NULL; 137 h.token = NULL;
135} 138}
139EXPORT_SYMBOL_GPL(dcr_unmap);
136 140
137#endif /* !defined(CONFIG_PPC_DCR_NATIVE) */ 141#endif /* !defined(CONFIG_PPC_DCR_NATIVE) */
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index 114c90f8f560..af090c93be10 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -160,8 +160,8 @@ static void __init quirk_fsl_pcie_transparent(struct pci_dev *dev)
160 160
161int __init fsl_pcie_check_link(struct pci_controller *hose) 161int __init fsl_pcie_check_link(struct pci_controller *hose)
162{ 162{
163 u16 val; 163 u32 val;
164 early_read_config_word(hose, 0, 0, PCIE_LTSSM, &val); 164 early_read_config_dword(hose, 0, 0, PCIE_LTSSM, &val);
165 if (val < PCIE_LTSSM_L0) 165 if (val < PCIE_LTSSM_L0)
166 return 1; 166 return 1;
167 return 0; 167 return 0;
@@ -255,5 +255,8 @@ DECLARE_PCI_FIXUP_EARLY(0x1957, PCI_DEVICE_ID_MPC8533E, quirk_fsl_pcie_transpare
255DECLARE_PCI_FIXUP_EARLY(0x1957, PCI_DEVICE_ID_MPC8533, quirk_fsl_pcie_transparent); 255DECLARE_PCI_FIXUP_EARLY(0x1957, PCI_DEVICE_ID_MPC8533, quirk_fsl_pcie_transparent);
256DECLARE_PCI_FIXUP_EARLY(0x1957, PCI_DEVICE_ID_MPC8544E, quirk_fsl_pcie_transparent); 256DECLARE_PCI_FIXUP_EARLY(0x1957, PCI_DEVICE_ID_MPC8544E, quirk_fsl_pcie_transparent);
257DECLARE_PCI_FIXUP_EARLY(0x1957, PCI_DEVICE_ID_MPC8544, quirk_fsl_pcie_transparent); 257DECLARE_PCI_FIXUP_EARLY(0x1957, PCI_DEVICE_ID_MPC8544, quirk_fsl_pcie_transparent);
258DECLARE_PCI_FIXUP_EARLY(0x1957, PCI_DEVICE_ID_MPC8572E, quirk_fsl_pcie_transparent)
259DECLARE_PCI_FIXUP_EARLY(0x1957, PCI_DEVICE_ID_MPC8572, quirk_fsl_pcie_transparent);
258DECLARE_PCI_FIXUP_EARLY(0x1957, PCI_DEVICE_ID_MPC8641, quirk_fsl_pcie_transparent); 260DECLARE_PCI_FIXUP_EARLY(0x1957, PCI_DEVICE_ID_MPC8641, quirk_fsl_pcie_transparent);
259DECLARE_PCI_FIXUP_EARLY(0x1957, PCI_DEVICE_ID_MPC8641D, quirk_fsl_pcie_transparent); 261DECLARE_PCI_FIXUP_EARLY(0x1957, PCI_DEVICE_ID_MPC8641D, quirk_fsl_pcie_transparent);
262DECLARE_PCI_FIXUP_EARLY(0x1957, PCI_DEVICE_ID_MPC8610, quirk_fsl_pcie_transparent);
diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c
index 1cf29c9d4408..3ace7474809e 100644
--- a/arch/powerpc/sysdev/fsl_soc.c
+++ b/arch/powerpc/sysdev/fsl_soc.c
@@ -24,6 +24,7 @@
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25#include <linux/of_platform.h> 25#include <linux/of_platform.h>
26#include <linux/phy.h> 26#include <linux/phy.h>
27#include <linux/spi/spi.h>
27#include <linux/fsl_devices.h> 28#include <linux/fsl_devices.h>
28#include <linux/fs_enet_pd.h> 29#include <linux/fs_enet_pd.h>
29#include <linux/fs_uart_pd.h> 30#include <linux/fs_uart_pd.h>
@@ -52,13 +53,13 @@ phys_addr_t get_immrbase(void)
52 53
53 soc = of_find_node_by_type(NULL, "soc"); 54 soc = of_find_node_by_type(NULL, "soc");
54 if (soc) { 55 if (soc) {
55 unsigned int size; 56 int size;
56 const void *prop = of_get_property(soc, "reg", &size); 57 const void *prop = of_get_property(soc, "reg", &size);
57 58
58 if (prop) 59 if (prop)
59 immrbase = of_translate_address(soc, prop); 60 immrbase = of_translate_address(soc, prop);
60 of_node_put(soc); 61 of_node_put(soc);
61 }; 62 }
62 63
63 return immrbase; 64 return immrbase;
64} 65}
@@ -72,20 +73,31 @@ static u32 brgfreq = -1;
72u32 get_brgfreq(void) 73u32 get_brgfreq(void)
73{ 74{
74 struct device_node *node; 75 struct device_node *node;
76 const unsigned int *prop;
77 int size;
75 78
76 if (brgfreq != -1) 79 if (brgfreq != -1)
77 return brgfreq; 80 return brgfreq;
78 81
79 node = of_find_node_by_type(NULL, "cpm"); 82 node = of_find_compatible_node(NULL, NULL, "fsl,cpm-brg");
80 if (node) { 83 if (node) {
81 unsigned int size; 84 prop = of_get_property(node, "clock-frequency", &size);
82 const unsigned int *prop = of_get_property(node, 85 if (prop && size == 4)
83 "brg-frequency", &size); 86 brgfreq = *prop;
84 87
85 if (prop) 88 of_node_put(node);
89 return brgfreq;
90 }
91
92 /* Legacy device binding -- will go away when no users are left. */
93 node = of_find_node_by_type(NULL, "cpm");
94 if (node) {
95 prop = of_get_property(node, "brg-frequency", &size);
96 if (prop && size == 4)
86 brgfreq = *prop; 97 brgfreq = *prop;
98
87 of_node_put(node); 99 of_node_put(node);
88 }; 100 }
89 101
90 return brgfreq; 102 return brgfreq;
91} 103}
@@ -103,14 +115,14 @@ u32 get_baudrate(void)
103 115
104 node = of_find_node_by_type(NULL, "serial"); 116 node = of_find_node_by_type(NULL, "serial");
105 if (node) { 117 if (node) {
106 unsigned int size; 118 int size;
107 const unsigned int *prop = of_get_property(node, 119 const unsigned int *prop = of_get_property(node,
108 "current-speed", &size); 120 "current-speed", &size);
109 121
110 if (prop) 122 if (prop)
111 fs_baudrate = *prop; 123 fs_baudrate = *prop;
112 of_node_put(node); 124 of_node_put(node);
113 }; 125 }
114 126
115 return fs_baudrate; 127 return fs_baudrate;
116} 128}
@@ -319,34 +331,46 @@ static struct i2c_driver_device i2c_devices[] __initdata = {
319 {"ricoh,rs5c372b", "rtc-rs5c372", "rs5c372b",}, 331 {"ricoh,rs5c372b", "rtc-rs5c372", "rs5c372b",},
320 {"ricoh,rv5c386", "rtc-rs5c372", "rv5c386",}, 332 {"ricoh,rv5c386", "rtc-rs5c372", "rv5c386",},
321 {"ricoh,rv5c387a", "rtc-rs5c372", "rv5c387a",}, 333 {"ricoh,rv5c387a", "rtc-rs5c372", "rv5c387a",},
334 {"dallas,ds1307", "rtc-ds1307", "ds1307",},
335 {"dallas,ds1337", "rtc-ds1307", "ds1337",},
336 {"dallas,ds1338", "rtc-ds1307", "ds1338",},
337 {"dallas,ds1339", "rtc-ds1307", "ds1339",},
338 {"dallas,ds1340", "rtc-ds1307", "ds1340",},
339 {"stm,m41t00", "rtc-ds1307", "m41t00"},
340 {"dallas,ds1374", "rtc-ds1374", "rtc-ds1374",},
322}; 341};
323 342
324static int __init of_find_i2c_driver(struct device_node *node, struct i2c_board_info *info) 343static int __init of_find_i2c_driver(struct device_node *node,
344 struct i2c_board_info *info)
325{ 345{
326 int i; 346 int i;
327 347
328 for (i = 0; i < ARRAY_SIZE(i2c_devices); i++) { 348 for (i = 0; i < ARRAY_SIZE(i2c_devices); i++) {
329 if (!of_device_is_compatible(node, i2c_devices[i].of_device)) 349 if (!of_device_is_compatible(node, i2c_devices[i].of_device))
330 continue; 350 continue;
331 strncpy(info->driver_name, i2c_devices[i].i2c_driver, KOBJ_NAME_LEN); 351 if (strlcpy(info->driver_name, i2c_devices[i].i2c_driver,
332 strncpy(info->type, i2c_devices[i].i2c_type, I2C_NAME_SIZE); 352 KOBJ_NAME_LEN) >= KOBJ_NAME_LEN ||
353 strlcpy(info->type, i2c_devices[i].i2c_type,
354 I2C_NAME_SIZE) >= I2C_NAME_SIZE)
355 return -ENOMEM;
333 return 0; 356 return 0;
334 } 357 }
335 return -ENODEV; 358 return -ENODEV;
336} 359}
337 360
338static void __init of_register_i2c_devices(struct device_node *adap_node, int bus_num) 361static void __init of_register_i2c_devices(struct device_node *adap_node,
362 int bus_num)
339{ 363{
340 struct device_node *node = NULL; 364 struct device_node *node = NULL;
341 365
342 while ((node = of_get_next_child(adap_node, node))) { 366 while ((node = of_get_next_child(adap_node, node))) {
343 struct i2c_board_info info; 367 struct i2c_board_info info = {};
344 const u32 *addr; 368 const u32 *addr;
345 int len; 369 int len;
346 370
347 addr = of_get_property(node, "reg", &len); 371 addr = of_get_property(node, "reg", &len);
348 if (!addr || len < sizeof(int) || *addr > (1 << 10) - 1) { 372 if (!addr || len < sizeof(int) || *addr > (1 << 10) - 1) {
349 printk(KERN_WARNING "fsl_ioc.c: invalid i2c device entry\n"); 373 printk(KERN_WARNING "fsl_soc.c: invalid i2c device entry\n");
350 continue; 374 continue;
351 } 375 }
352 376
@@ -357,7 +381,6 @@ static void __init of_register_i2c_devices(struct device_node *adap_node, int bu
357 if (of_find_i2c_driver(node, &info) < 0) 381 if (of_find_i2c_driver(node, &info) < 0)
358 continue; 382 continue;
359 383
360 info.platform_data = NULL;
361 info.addr = *addr; 384 info.addr = *addr;
362 385
363 i2c_register_board_info(bus_num, &info, 1); 386 i2c_register_board_info(bus_num, &info, 1);
@@ -648,6 +671,7 @@ err:
648 671
649arch_initcall(fsl_usb_of_init); 672arch_initcall(fsl_usb_of_init);
650 673
674#ifndef CONFIG_PPC_CPM_NEW_BINDING
651#ifdef CONFIG_CPM2 675#ifdef CONFIG_CPM2
652 676
653extern void init_scc_ioports(struct fs_uart_platform_info*); 677extern void init_scc_ioports(struct fs_uart_platform_info*);
@@ -1187,3 +1211,132 @@ err:
1187arch_initcall(cpm_smc_uart_of_init); 1211arch_initcall(cpm_smc_uart_of_init);
1188 1212
1189#endif /* CONFIG_8xx */ 1213#endif /* CONFIG_8xx */
1214#endif /* CONFIG_PPC_CPM_NEW_BINDING */
1215
1216int __init fsl_spi_init(struct spi_board_info *board_infos,
1217 unsigned int num_board_infos,
1218 void (*activate_cs)(u8 cs, u8 polarity),
1219 void (*deactivate_cs)(u8 cs, u8 polarity))
1220{
1221 struct device_node *np;
1222 unsigned int i;
1223 const u32 *sysclk;
1224
1225 /* SPI controller is either clocked from QE or SoC clock */
1226 np = of_find_node_by_type(NULL, "qe");
1227 if (!np)
1228 np = of_find_node_by_type(NULL, "soc");
1229
1230 if (!np)
1231 return -ENODEV;
1232
1233 sysclk = of_get_property(np, "bus-frequency", NULL);
1234 if (!sysclk)
1235 return -ENODEV;
1236
1237 for (np = NULL, i = 1;
1238 (np = of_find_compatible_node(np, "spi", "fsl_spi")) != NULL;
1239 i++) {
1240 int ret = 0;
1241 unsigned int j;
1242 const void *prop;
1243 struct resource res[2];
1244 struct platform_device *pdev;
1245 struct fsl_spi_platform_data pdata = {
1246 .activate_cs = activate_cs,
1247 .deactivate_cs = deactivate_cs,
1248 };
1249
1250 memset(res, 0, sizeof(res));
1251
1252 pdata.sysclk = *sysclk;
1253
1254 prop = of_get_property(np, "reg", NULL);
1255 if (!prop)
1256 goto err;
1257 pdata.bus_num = *(u32 *)prop;
1258
1259 prop = of_get_property(np, "mode", NULL);
1260 if (prop && !strcmp(prop, "cpu-qe"))
1261 pdata.qe_mode = 1;
1262
1263 for (j = 0; j < num_board_infos; j++) {
1264 if (board_infos[j].bus_num == pdata.bus_num)
1265 pdata.max_chipselect++;
1266 }
1267
1268 if (!pdata.max_chipselect)
1269 goto err;
1270
1271 ret = of_address_to_resource(np, 0, &res[0]);
1272 if (ret)
1273 goto err;
1274
1275 ret = of_irq_to_resource(np, 0, &res[1]);
1276 if (ret == NO_IRQ)
1277 goto err;
1278
1279 pdev = platform_device_alloc("mpc83xx_spi", i);
1280 if (!pdev)
1281 goto err;
1282
1283 ret = platform_device_add_data(pdev, &pdata, sizeof(pdata));
1284 if (ret)
1285 goto unreg;
1286
1287 ret = platform_device_add_resources(pdev, res,
1288 ARRAY_SIZE(res));
1289 if (ret)
1290 goto unreg;
1291
1292 ret = platform_device_register(pdev);
1293 if (ret)
1294 goto unreg;
1295
1296 continue;
1297unreg:
1298 platform_device_del(pdev);
1299err:
1300 continue;
1301 }
1302
1303 return spi_register_board_info(board_infos, num_board_infos);
1304}
1305
1306#if defined(CONFIG_PPC_85xx) || defined(CONFIG_PPC_86xx)
1307static __be32 __iomem *rstcr;
1308
1309static int __init setup_rstcr(void)
1310{
1311 struct device_node *np;
1312 np = of_find_node_by_name(NULL, "global-utilities");
1313 if ((np && of_get_property(np, "fsl,has-rstcr", NULL))) {
1314 const u32 *prop = of_get_property(np, "reg", NULL);
1315 if (prop) {
1316 /* map reset control register
1317 * 0xE00B0 is offset of reset control register
1318 */
1319 rstcr = ioremap(get_immrbase() + *prop + 0xB0, 0xff);
1320 if (!rstcr)
1321 printk (KERN_EMERG "Error: reset control "
1322 "register not mapped!\n");
1323 }
1324 } else
1325 printk (KERN_INFO "rstcr compatible register does not exist!\n");
1326 if (np)
1327 of_node_put(np);
1328 return 0;
1329}
1330
1331arch_initcall(setup_rstcr);
1332
1333void fsl_rstcr_restart(char *cmd)
1334{
1335 local_irq_disable();
1336 if (rstcr)
1337 /* set reset control register */
1338 out_be32(rstcr, 0x2); /* HRESET_REQ */
1339
1340 while (1) ;
1341}
1342#endif
diff --git a/arch/powerpc/sysdev/fsl_soc.h b/arch/powerpc/sysdev/fsl_soc.h
index 04e145b5fc32..63e7db30a4cd 100644
--- a/arch/powerpc/sysdev/fsl_soc.h
+++ b/arch/powerpc/sysdev/fsl_soc.h
@@ -8,5 +8,13 @@ extern phys_addr_t get_immrbase(void);
8extern u32 get_brgfreq(void); 8extern u32 get_brgfreq(void);
9extern u32 get_baudrate(void); 9extern u32 get_baudrate(void);
10 10
11struct spi_board_info;
12
13extern int fsl_spi_init(struct spi_board_info *board_infos,
14 unsigned int num_board_infos,
15 void (*activate_cs)(u8 cs, u8 polarity),
16 void (*deactivate_cs)(u8 cs, u8 polarity));
17
18extern void fsl_rstcr_restart(char *cmd);
11#endif 19#endif
12#endif 20#endif
diff --git a/arch/powerpc/sysdev/i8259.c b/arch/powerpc/sysdev/i8259.c
index ad87adc975bc..7c1b27ac7d3c 100644
--- a/arch/powerpc/sysdev/i8259.c
+++ b/arch/powerpc/sysdev/i8259.c
@@ -25,7 +25,6 @@ static unsigned char cached_8259[2] = { 0xff, 0xff };
25 25
26static DEFINE_SPINLOCK(i8259_lock); 26static DEFINE_SPINLOCK(i8259_lock);
27 27
28static struct device_node *i8259_node;
29static struct irq_host *i8259_host; 28static struct irq_host *i8259_host;
30 29
31/* 30/*
@@ -165,7 +164,7 @@ static struct resource pic_edgectrl_iores = {
165 164
166static int i8259_host_match(struct irq_host *h, struct device_node *node) 165static int i8259_host_match(struct irq_host *h, struct device_node *node)
167{ 166{
168 return i8259_node == NULL || i8259_node == node; 167 return h->of_node == NULL || h->of_node == node;
169} 168}
170 169
171static int i8259_host_map(struct irq_host *h, unsigned int virq, 170static int i8259_host_map(struct irq_host *h, unsigned int virq,
@@ -276,9 +275,8 @@ void i8259_init(struct device_node *node, unsigned long intack_addr)
276 spin_unlock_irqrestore(&i8259_lock, flags); 275 spin_unlock_irqrestore(&i8259_lock, flags);
277 276
278 /* create a legacy host */ 277 /* create a legacy host */
279 if (node) 278 i8259_host = irq_alloc_host(of_node_get(node), IRQ_HOST_MAP_LEGACY,
280 i8259_node = of_node_get(node); 279 0, &i8259_host_ops, 0);
281 i8259_host = irq_alloc_host(IRQ_HOST_MAP_LEGACY, 0, &i8259_host_ops, 0);
282 if (i8259_host == NULL) { 280 if (i8259_host == NULL) {
283 printk(KERN_ERR "i8259: failed to allocate irq host !\n"); 281 printk(KERN_ERR "i8259: failed to allocate irq host !\n");
284 return; 282 return;
diff --git a/arch/powerpc/sysdev/indirect_pci.c b/arch/powerpc/sysdev/indirect_pci.c
index 5294560c7b00..cfbd2aae93e8 100644
--- a/arch/powerpc/sysdev/indirect_pci.c
+++ b/arch/powerpc/sysdev/indirect_pci.c
@@ -144,14 +144,16 @@ indirect_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
144 144
145static struct pci_ops indirect_pci_ops = 145static struct pci_ops indirect_pci_ops =
146{ 146{
147 indirect_read_config, 147 .read = indirect_read_config,
148 indirect_write_config 148 .write = indirect_write_config,
149}; 149};
150 150
151void __init 151void __init
152setup_indirect_pci(struct pci_controller* hose, u32 cfg_addr, u32 cfg_data, u32 flags) 152setup_indirect_pci(struct pci_controller* hose,
153 resource_size_t cfg_addr,
154 resource_size_t cfg_data, u32 flags)
153{ 155{
154 unsigned long base = cfg_addr & PAGE_MASK; 156 resource_size_t base = cfg_addr & PAGE_MASK;
155 void __iomem *mbase; 157 void __iomem *mbase;
156 158
157 mbase = ioremap(base, PAGE_SIZE); 159 mbase = ioremap(base, PAGE_SIZE);
diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c
index 473c415e9e25..05a56e55804c 100644
--- a/arch/powerpc/sysdev/ipic.c
+++ b/arch/powerpc/sysdev/ipic.c
@@ -511,10 +511,8 @@ static struct irq_chip ipic_irq_chip = {
511 511
512static int ipic_host_match(struct irq_host *h, struct device_node *node) 512static int ipic_host_match(struct irq_host *h, struct device_node *node)
513{ 513{
514 struct ipic *ipic = h->host_data;
515
516 /* Exact match, unless ipic node is NULL */ 514 /* Exact match, unless ipic node is NULL */
517 return ipic->of_node == NULL || ipic->of_node == node; 515 return h->of_node == NULL || h->of_node == node;
518} 516}
519 517
520static int ipic_host_map(struct irq_host *h, unsigned int virq, 518static int ipic_host_map(struct irq_host *h, unsigned int virq,
@@ -568,9 +566,8 @@ struct ipic * __init ipic_init(struct device_node *node, unsigned int flags)
568 return NULL; 566 return NULL;
569 567
570 memset(ipic, 0, sizeof(struct ipic)); 568 memset(ipic, 0, sizeof(struct ipic));
571 ipic->of_node = of_node_get(node);
572 569
573 ipic->irqhost = irq_alloc_host(IRQ_HOST_MAP_LINEAR, 570 ipic->irqhost = irq_alloc_host(of_node_get(node), IRQ_HOST_MAP_LINEAR,
574 NR_IPIC_INTS, 571 NR_IPIC_INTS,
575 &ipic_host_ops, 0); 572 &ipic_host_ops, 0);
576 if (ipic->irqhost == NULL) { 573 if (ipic->irqhost == NULL) {
diff --git a/arch/powerpc/sysdev/ipic.h b/arch/powerpc/sysdev/ipic.h
index c28e589877eb..bb309a501b2d 100644
--- a/arch/powerpc/sysdev/ipic.h
+++ b/arch/powerpc/sysdev/ipic.h
@@ -48,9 +48,6 @@ struct ipic {
48 48
49 /* The "linux" controller struct */ 49 /* The "linux" controller struct */
50 struct irq_chip hc_irq; 50 struct irq_chip hc_irq;
51
52 /* The device node of the interrupt controller */
53 struct device_node *of_node;
54}; 51};
55 52
56struct ipic_info { 53struct ipic_info {
diff --git a/arch/powerpc/sysdev/mpc8xx_pic.c b/arch/powerpc/sysdev/mpc8xx_pic.c
index 2fc2bcd79b5e..7aa4ff5f5ec8 100644
--- a/arch/powerpc/sysdev/mpc8xx_pic.c
+++ b/arch/powerpc/sysdev/mpc8xx_pic.c
@@ -19,11 +19,10 @@
19 19
20extern int cpm_get_irq(struct pt_regs *regs); 20extern int cpm_get_irq(struct pt_regs *regs);
21 21
22static struct device_node *mpc8xx_pic_node;
23static struct irq_host *mpc8xx_pic_host; 22static struct irq_host *mpc8xx_pic_host;
24#define NR_MASK_WORDS ((NR_IRQS + 31) / 32) 23#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
25static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; 24static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
26static sysconf8xx_t *siu_reg; 25static sysconf8xx_t __iomem *siu_reg;
27 26
28int cpm_get_irq(struct pt_regs *regs); 27int cpm_get_irq(struct pt_regs *regs);
29 28
@@ -120,11 +119,6 @@ unsigned int mpc8xx_get_irq(void)
120 119
121} 120}
122 121
123static int mpc8xx_pic_host_match(struct irq_host *h, struct device_node *node)
124{
125 return mpc8xx_pic_node == node;
126}
127
128static int mpc8xx_pic_host_map(struct irq_host *h, unsigned int virq, 122static int mpc8xx_pic_host_map(struct irq_host *h, unsigned int virq,
129 irq_hw_number_t hw) 123 irq_hw_number_t hw)
130{ 124{
@@ -158,7 +152,6 @@ static int mpc8xx_pic_host_xlate(struct irq_host *h, struct device_node *ct,
158 152
159 153
160static struct irq_host_ops mpc8xx_pic_host_ops = { 154static struct irq_host_ops mpc8xx_pic_host_ops = {
161 .match = mpc8xx_pic_host_match,
162 .map = mpc8xx_pic_host_map, 155 .map = mpc8xx_pic_host_map,
163 .xlate = mpc8xx_pic_host_xlate, 156 .xlate = mpc8xx_pic_host_xlate,
164}; 157};
@@ -166,32 +159,33 @@ static struct irq_host_ops mpc8xx_pic_host_ops = {
166int mpc8xx_pic_init(void) 159int mpc8xx_pic_init(void)
167{ 160{
168 struct resource res; 161 struct resource res;
169 struct device_node *np = NULL; 162 struct device_node *np;
170 int ret; 163 int ret;
171 164
172 np = of_find_node_by_type(np, "mpc8xx-pic"); 165 np = of_find_compatible_node(NULL, NULL, "fsl,pq1-pic");
173 166 if (np == NULL)
167 np = of_find_node_by_type(NULL, "mpc8xx-pic");
174 if (np == NULL) { 168 if (np == NULL) {
175 printk(KERN_ERR "Could not find open-pic node\n"); 169 printk(KERN_ERR "Could not find fsl,pq1-pic node\n");
176 return -ENOMEM; 170 return -ENOMEM;
177 } 171 }
178 172
179 mpc8xx_pic_node = of_node_get(np);
180
181 ret = of_address_to_resource(np, 0, &res); 173 ret = of_address_to_resource(np, 0, &res);
182 of_node_put(np);
183 if (ret) 174 if (ret)
184 return ret; 175 goto out;
185 176
186 siu_reg = (void *)ioremap(res.start, res.end - res.start + 1); 177 siu_reg = ioremap(res.start, res.end - res.start + 1);
187 if (siu_reg == NULL) 178 if (siu_reg == NULL)
188 return -EINVAL; 179 return -EINVAL;
189 180
190 mpc8xx_pic_host = irq_alloc_host(IRQ_HOST_MAP_LINEAR, 64, &mpc8xx_pic_host_ops, 64); 181 mpc8xx_pic_host = irq_alloc_host(of_node_get(np), IRQ_HOST_MAP_LINEAR,
182 64, &mpc8xx_pic_host_ops, 64);
191 if (mpc8xx_pic_host == NULL) { 183 if (mpc8xx_pic_host == NULL) {
192 printk(KERN_ERR "MPC8xx PIC: failed to allocate irq host!\n"); 184 printk(KERN_ERR "MPC8xx PIC: failed to allocate irq host!\n");
193 ret = -ENOMEM; 185 ret = -ENOMEM;
194 } 186 }
195 187
188out:
189 of_node_put(np);
196 return ret; 190 return ret;
197} 191}
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 74c64c0d3b71..893e65439e85 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -156,8 +156,7 @@ static inline u32 _mpic_read(enum mpic_reg_type type,
156 switch(type) { 156 switch(type) {
157#ifdef CONFIG_PPC_DCR 157#ifdef CONFIG_PPC_DCR
158 case mpic_access_dcr: 158 case mpic_access_dcr:
159 return dcr_read(rb->dhost, 159 return dcr_read(rb->dhost, rb->dhost.base + reg);
160 rb->dbase + reg + rb->doff);
161#endif 160#endif
162 case mpic_access_mmio_be: 161 case mpic_access_mmio_be:
163 return in_be32(rb->base + (reg >> 2)); 162 return in_be32(rb->base + (reg >> 2));
@@ -174,8 +173,7 @@ static inline void _mpic_write(enum mpic_reg_type type,
174 switch(type) { 173 switch(type) {
175#ifdef CONFIG_PPC_DCR 174#ifdef CONFIG_PPC_DCR
176 case mpic_access_dcr: 175 case mpic_access_dcr:
177 return dcr_write(rb->dhost, 176 return dcr_write(rb->dhost, rb->dhost.base + reg, value);
178 rb->dbase + reg + rb->doff, value);
179#endif 177#endif
180 case mpic_access_mmio_be: 178 case mpic_access_mmio_be:
181 return out_be32(rb->base + (reg >> 2), value); 179 return out_be32(rb->base + (reg >> 2), value);
@@ -228,8 +226,13 @@ static inline u32 _mpic_irq_read(struct mpic *mpic, unsigned int src_no, unsigne
228 unsigned int isu = src_no >> mpic->isu_shift; 226 unsigned int isu = src_no >> mpic->isu_shift;
229 unsigned int idx = src_no & mpic->isu_mask; 227 unsigned int idx = src_no & mpic->isu_mask;
230 228
231 return _mpic_read(mpic->reg_type, &mpic->isus[isu], 229#ifdef CONFIG_MPIC_BROKEN_REGREAD
232 reg + (idx * MPIC_INFO(IRQ_STRIDE))); 230 if (reg == 0)
231 return mpic->isu_reg0_shadow[idx];
232 else
233#endif
234 return _mpic_read(mpic->reg_type, &mpic->isus[isu],
235 reg + (idx * MPIC_INFO(IRQ_STRIDE)));
233} 236}
234 237
235static inline void _mpic_irq_write(struct mpic *mpic, unsigned int src_no, 238static inline void _mpic_irq_write(struct mpic *mpic, unsigned int src_no,
@@ -240,6 +243,11 @@ static inline void _mpic_irq_write(struct mpic *mpic, unsigned int src_no,
240 243
241 _mpic_write(mpic->reg_type, &mpic->isus[isu], 244 _mpic_write(mpic->reg_type, &mpic->isus[isu],
242 reg + (idx * MPIC_INFO(IRQ_STRIDE)), value); 245 reg + (idx * MPIC_INFO(IRQ_STRIDE)), value);
246
247#ifdef CONFIG_MPIC_BROKEN_REGREAD
248 if (reg == 0)
249 mpic->isu_reg0_shadow[idx] = value;
250#endif
243} 251}
244 252
245#define mpic_read(b,r) _mpic_read(mpic->reg_type,&(b),(r)) 253#define mpic_read(b,r) _mpic_read(mpic->reg_type,&(b),(r))
@@ -269,9 +277,11 @@ static void _mpic_map_mmio(struct mpic *mpic, unsigned long phys_addr,
269static void _mpic_map_dcr(struct mpic *mpic, struct mpic_reg_bank *rb, 277static void _mpic_map_dcr(struct mpic *mpic, struct mpic_reg_bank *rb,
270 unsigned int offset, unsigned int size) 278 unsigned int offset, unsigned int size)
271{ 279{
272 rb->dbase = mpic->dcr_base; 280 const u32 *dbasep;
273 rb->doff = offset; 281
274 rb->dhost = dcr_map(mpic->of_node, rb->dbase + rb->doff, size); 282 dbasep = of_get_property(mpic->irqhost->of_node, "dcr-reg", NULL);
283
284 rb->dhost = dcr_map(mpic->irqhost->of_node, *dbasep + offset, size);
275 BUG_ON(!DCR_MAP_OK(rb->dhost)); 285 BUG_ON(!DCR_MAP_OK(rb->dhost));
276} 286}
277 287
@@ -758,7 +768,7 @@ static void mpic_end_ipi(unsigned int irq)
758 768
759#endif /* CONFIG_SMP */ 769#endif /* CONFIG_SMP */
760 770
761static void mpic_set_affinity(unsigned int irq, cpumask_t cpumask) 771void mpic_set_affinity(unsigned int irq, cpumask_t cpumask)
762{ 772{
763 struct mpic *mpic = mpic_from_irq(irq); 773 struct mpic *mpic = mpic_from_irq(irq);
764 unsigned int src = mpic_irq_to_hw(irq); 774 unsigned int src = mpic_irq_to_hw(irq);
@@ -861,10 +871,8 @@ static struct irq_chip mpic_irq_ht_chip = {
861 871
862static int mpic_host_match(struct irq_host *h, struct device_node *node) 872static int mpic_host_match(struct irq_host *h, struct device_node *node)
863{ 873{
864 struct mpic *mpic = h->host_data;
865
866 /* Exact match, unless mpic node is NULL */ 874 /* Exact match, unless mpic node is NULL */
867 return mpic->of_node == NULL || mpic->of_node == node; 875 return h->of_node == NULL || h->of_node == node;
868} 876}
869 877
870static int mpic_host_map(struct irq_host *h, unsigned int virq, 878static int mpic_host_map(struct irq_host *h, unsigned int virq,
@@ -985,10 +993,9 @@ struct mpic * __init mpic_alloc(struct device_node *node,
985 993
986 memset(mpic, 0, sizeof(struct mpic)); 994 memset(mpic, 0, sizeof(struct mpic));
987 mpic->name = name; 995 mpic->name = name;
988 mpic->of_node = of_node_get(node);
989 996
990 mpic->irqhost = irq_alloc_host(IRQ_HOST_MAP_LINEAR, isu_size, 997 mpic->irqhost = irq_alloc_host(of_node_get(node), IRQ_HOST_MAP_LINEAR,
991 &mpic_host_ops, 998 isu_size, &mpic_host_ops,
992 flags & MPIC_LARGE_VECTORS ? 2048 : 256); 999 flags & MPIC_LARGE_VECTORS ? 2048 : 256);
993 if (mpic->irqhost == NULL) { 1000 if (mpic->irqhost == NULL) {
994 of_node_put(node); 1001 of_node_put(node);
@@ -1068,20 +1075,14 @@ struct mpic * __init mpic_alloc(struct device_node *node,
1068 BUG_ON(paddr == 0 && node == NULL); 1075 BUG_ON(paddr == 0 && node == NULL);
1069 1076
1070 /* If no physical address passed in, check if it's dcr based */ 1077 /* If no physical address passed in, check if it's dcr based */
1071 if (paddr == 0 && of_get_property(node, "dcr-reg", NULL) != NULL) 1078 if (paddr == 0 && of_get_property(node, "dcr-reg", NULL) != NULL) {
1072 mpic->flags |= MPIC_USES_DCR;
1073
1074#ifdef CONFIG_PPC_DCR 1079#ifdef CONFIG_PPC_DCR
1075 if (mpic->flags & MPIC_USES_DCR) { 1080 mpic->flags |= MPIC_USES_DCR;
1076 const u32 *dbasep;
1077 dbasep = of_get_property(node, "dcr-reg", NULL);
1078 BUG_ON(dbasep == NULL);
1079 mpic->dcr_base = *dbasep;
1080 mpic->reg_type = mpic_access_dcr; 1081 mpic->reg_type = mpic_access_dcr;
1081 }
1082#else 1082#else
1083 BUG_ON (mpic->flags & MPIC_USES_DCR); 1083 BUG();
1084#endif /* CONFIG_PPC_DCR */ 1084#endif /* CONFIG_PPC_DCR */
1085 }
1085 1086
1086 /* If the MPIC is not DCR based, and no physical address was passed 1087 /* If the MPIC is not DCR based, and no physical address was passed
1087 * in, try to obtain one 1088 * in, try to obtain one
diff --git a/arch/powerpc/sysdev/mpic.h b/arch/powerpc/sysdev/mpic.h
index 3a1c3d2c594d..1cb6bd841027 100644
--- a/arch/powerpc/sysdev/mpic.h
+++ b/arch/powerpc/sysdev/mpic.h
@@ -34,5 +34,6 @@ extern int mpic_set_irq_type(unsigned int virq, unsigned int flow_type);
34extern void mpic_end_irq(unsigned int irq); 34extern void mpic_end_irq(unsigned int irq);
35extern void mpic_mask_irq(unsigned int irq); 35extern void mpic_mask_irq(unsigned int irq);
36extern void mpic_unmask_irq(unsigned int irq); 36extern void mpic_unmask_irq(unsigned int irq);
37extern void mpic_set_affinity(unsigned int irq, cpumask_t cpumask);
37 38
38#endif /* _POWERPC_SYSDEV_MPIC_H */ 39#endif /* _POWERPC_SYSDEV_MPIC_H */
diff --git a/arch/powerpc/sysdev/mpic_msi.c b/arch/powerpc/sysdev/mpic_msi.c
index b076793033c2..d272a52ecd24 100644
--- a/arch/powerpc/sysdev/mpic_msi.c
+++ b/arch/powerpc/sysdev/mpic_msi.c
@@ -9,7 +9,6 @@
9 */ 9 */
10 10
11#include <linux/irq.h> 11#include <linux/irq.h>
12#include <linux/bootmem.h>
13#include <linux/bitmap.h> 12#include <linux/bitmap.h>
14#include <linux/msi.h> 13#include <linux/msi.h>
15#include <asm/mpic.h> 14#include <asm/mpic.h>
@@ -117,16 +116,17 @@ static int mpic_msi_reserve_dt_hwirqs(struct mpic *mpic)
117 int i, len; 116 int i, len;
118 const u32 *p; 117 const u32 *p;
119 118
120 p = of_get_property(mpic->of_node, "msi-available-ranges", &len); 119 p = of_get_property(mpic->irqhost->of_node,
120 "msi-available-ranges", &len);
121 if (!p) { 121 if (!p) {
122 pr_debug("mpic: no msi-available-ranges property found on %s\n", 122 pr_debug("mpic: no msi-available-ranges property found on %s\n",
123 mpic->of_node->full_name); 123 mpic->irqhost->of_node->full_name);
124 return -ENODEV; 124 return -ENODEV;
125 } 125 }
126 126
127 if (len % 8 != 0) { 127 if (len % 8 != 0) {
128 printk(KERN_WARNING "mpic: Malformed msi-available-ranges " 128 printk(KERN_WARNING "mpic: Malformed msi-available-ranges "
129 "property on %s\n", mpic->of_node->full_name); 129 "property on %s\n", mpic->irqhost->of_node->full_name);
130 return -EINVAL; 130 return -EINVAL;
131 } 131 }
132 132
@@ -151,10 +151,7 @@ int mpic_msi_init_allocator(struct mpic *mpic)
151 size = BITS_TO_LONGS(mpic->irq_count) * sizeof(long); 151 size = BITS_TO_LONGS(mpic->irq_count) * sizeof(long);
152 pr_debug("mpic: allocator bitmap size is 0x%x bytes\n", size); 152 pr_debug("mpic: allocator bitmap size is 0x%x bytes\n", size);
153 153
154 if (mem_init_done) 154 mpic->hwirq_bitmap = alloc_maybe_bootmem(size, GFP_KERNEL);
155 mpic->hwirq_bitmap = kmalloc(size, GFP_KERNEL);
156 else
157 mpic->hwirq_bitmap = alloc_bootmem(size);
158 155
159 if (!mpic->hwirq_bitmap) { 156 if (!mpic->hwirq_bitmap) {
160 pr_debug("mpic: ENOMEM allocating allocator bitmap!\n"); 157 pr_debug("mpic: ENOMEM allocating allocator bitmap!\n");
diff --git a/arch/powerpc/sysdev/mpic_u3msi.c b/arch/powerpc/sysdev/mpic_u3msi.c
index 305b864c25d9..1d5a40899b74 100644
--- a/arch/powerpc/sysdev/mpic_u3msi.c
+++ b/arch/powerpc/sysdev/mpic_u3msi.c
@@ -40,6 +40,7 @@ static struct irq_chip mpic_u3msi_chip = {
40 .unmask = mpic_u3msi_unmask_irq, 40 .unmask = mpic_u3msi_unmask_irq,
41 .eoi = mpic_end_irq, 41 .eoi = mpic_end_irq,
42 .set_type = mpic_set_irq_type, 42 .set_type = mpic_set_irq_type,
43 .set_affinity = mpic_set_affinity,
43 .typename = "MPIC-U3MSI", 44 .typename = "MPIC-U3MSI",
44}; 45};
45 46
@@ -107,59 +108,46 @@ static void u3msi_teardown_msi_irqs(struct pci_dev *pdev)
107 return; 108 return;
108} 109}
109 110
110static void u3msi_compose_msi_msg(struct pci_dev *pdev, int virq,
111 struct msi_msg *msg)
112{
113 u64 addr;
114
115 addr = find_ht_magic_addr(pdev);
116 msg->address_lo = addr & 0xFFFFFFFF;
117 msg->address_hi = addr >> 32;
118 msg->data = virq_to_hw(virq);
119
120 pr_debug("u3msi: allocated virq 0x%x (hw 0x%lx) at address 0x%lx\n",
121 virq, virq_to_hw(virq), addr);
122}
123
124static int u3msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) 111static int u3msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
125{ 112{
126 irq_hw_number_t hwirq; 113 irq_hw_number_t hwirq;
127 int rc;
128 unsigned int virq; 114 unsigned int virq;
129 struct msi_desc *entry; 115 struct msi_desc *entry;
130 struct msi_msg msg; 116 struct msi_msg msg;
117 u64 addr;
118
119 addr = find_ht_magic_addr(pdev);
120 msg.address_lo = addr & 0xFFFFFFFF;
121 msg.address_hi = addr >> 32;
131 122
132 list_for_each_entry(entry, &pdev->msi_list, list) { 123 list_for_each_entry(entry, &pdev->msi_list, list) {
133 hwirq = mpic_msi_alloc_hwirqs(msi_mpic, 1); 124 hwirq = mpic_msi_alloc_hwirqs(msi_mpic, 1);
134 if (hwirq < 0) { 125 if (hwirq < 0) {
135 rc = hwirq;
136 pr_debug("u3msi: failed allocating hwirq\n"); 126 pr_debug("u3msi: failed allocating hwirq\n");
137 goto out_free; 127 return hwirq;
138 } 128 }
139 129
140 virq = irq_create_mapping(msi_mpic->irqhost, hwirq); 130 virq = irq_create_mapping(msi_mpic->irqhost, hwirq);
141 if (virq == NO_IRQ) { 131 if (virq == NO_IRQ) {
142 pr_debug("u3msi: failed mapping hwirq 0x%lx\n", hwirq); 132 pr_debug("u3msi: failed mapping hwirq 0x%lx\n", hwirq);
143 mpic_msi_free_hwirqs(msi_mpic, hwirq, 1); 133 mpic_msi_free_hwirqs(msi_mpic, hwirq, 1);
144 rc = -ENOSPC; 134 return -ENOSPC;
145 goto out_free;
146 } 135 }
147 136
148 set_irq_msi(virq, entry); 137 set_irq_msi(virq, entry);
149 set_irq_chip(virq, &mpic_u3msi_chip); 138 set_irq_chip(virq, &mpic_u3msi_chip);
150 set_irq_type(virq, IRQ_TYPE_EDGE_RISING); 139 set_irq_type(virq, IRQ_TYPE_EDGE_RISING);
151 140
152 u3msi_compose_msi_msg(pdev, virq, &msg); 141 pr_debug("u3msi: allocated virq 0x%x (hw 0x%lx) addr 0x%lx\n",
142 virq, hwirq, addr);
143
144 msg.data = hwirq;
153 write_msi_msg(virq, &msg); 145 write_msi_msg(virq, &msg);
154 146
155 hwirq++; 147 hwirq++;
156 } 148 }
157 149
158 return 0; 150 return 0;
159
160 out_free:
161 u3msi_teardown_msi_irqs(pdev);
162 return rc;
163} 151}
164 152
165int mpic_u3msi_init(struct mpic *mpic) 153int mpic_u3msi_init(struct mpic *mpic)
diff --git a/arch/powerpc/sysdev/mv64x60.h b/arch/powerpc/sysdev/mv64x60.h
index 2ff0b4ef2681..4f618fa465c0 100644
--- a/arch/powerpc/sysdev/mv64x60.h
+++ b/arch/powerpc/sysdev/mv64x60.h
@@ -7,5 +7,6 @@ extern void __init mv64x60_init_irq(void);
7extern unsigned int mv64x60_get_irq(void); 7extern unsigned int mv64x60_get_irq(void);
8 8
9extern void __init mv64x60_pci_init(void); 9extern void __init mv64x60_pci_init(void);
10extern void __init mv64x60_init_early(void);
10 11
11#endif /* __MV64X60_H__ */ 12#endif /* __MV64X60_H__ */
diff --git a/arch/powerpc/sysdev/mv64x60_pic.c b/arch/powerpc/sysdev/mv64x60_pic.c
index 01d316287772..19e6ef263797 100644
--- a/arch/powerpc/sysdev/mv64x60_pic.c
+++ b/arch/powerpc/sysdev/mv64x60_pic.c
@@ -202,11 +202,6 @@ static struct irq_chip mv64x60_chip_gpp = {
202 * mv64x60_host_ops functions 202 * mv64x60_host_ops functions
203 */ 203 */
204 204
205static int mv64x60_host_match(struct irq_host *h, struct device_node *np)
206{
207 return mv64x60_irq_host->host_data == np;
208}
209
210static struct irq_chip *mv64x60_chips[] = { 205static struct irq_chip *mv64x60_chips[] = {
211 [MV64x60_LEVEL1_LOW] = &mv64x60_chip_low, 206 [MV64x60_LEVEL1_LOW] = &mv64x60_chip_low,
212 [MV64x60_LEVEL1_HIGH] = &mv64x60_chip_high, 207 [MV64x60_LEVEL1_HIGH] = &mv64x60_chip_high,
@@ -228,7 +223,6 @@ static int mv64x60_host_map(struct irq_host *h, unsigned int virq,
228} 223}
229 224
230static struct irq_host_ops mv64x60_host_ops = { 225static struct irq_host_ops mv64x60_host_ops = {
231 .match = mv64x60_host_match,
232 .map = mv64x60_host_map, 226 .map = mv64x60_host_map,
233}; 227};
234 228
@@ -253,14 +247,12 @@ void __init mv64x60_init_irq(void)
253 np = of_find_compatible_node(NULL, NULL, "marvell,mv64x60-pic"); 247 np = of_find_compatible_node(NULL, NULL, "marvell,mv64x60-pic");
254 reg = of_get_property(np, "reg", &size); 248 reg = of_get_property(np, "reg", &size);
255 paddr = of_translate_address(np, reg); 249 paddr = of_translate_address(np, reg);
256 of_node_put(np);
257 mv64x60_irq_reg_base = ioremap(paddr, reg[1]); 250 mv64x60_irq_reg_base = ioremap(paddr, reg[1]);
258 251
259 mv64x60_irq_host = irq_alloc_host(IRQ_HOST_MAP_LINEAR, MV64x60_NUM_IRQS, 252 mv64x60_irq_host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR,
253 MV64x60_NUM_IRQS,
260 &mv64x60_host_ops, MV64x60_NUM_IRQS); 254 &mv64x60_host_ops, MV64x60_NUM_IRQS);
261 255
262 mv64x60_irq_host->host_data = np;
263
264 spin_lock_irqsave(&mv64x60_lock, flags); 256 spin_lock_irqsave(&mv64x60_lock, flags);
265 out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK, 257 out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK,
266 mv64x60_cached_gpp_mask); 258 mv64x60_cached_gpp_mask);
diff --git a/arch/powerpc/sysdev/mv64x60_udbg.c b/arch/powerpc/sysdev/mv64x60_udbg.c
new file mode 100644
index 000000000000..367e7b13ec00
--- /dev/null
+++ b/arch/powerpc/sysdev/mv64x60_udbg.c
@@ -0,0 +1,152 @@
1/*
2 * udbg serial input/output routines for the Marvell MV64x60 (Discovery).
3 *
4 * Author: Dale Farnsworth <dale@farnsworth.org>
5 *
6 * 2007 (c) MontaVista Software, Inc. This file is licensed under
7 * the terms of the GNU General Public License version 2. This program
8 * is licensed "as is" without any warranty of any kind, whether express
9 * or implied.
10 */
11
12#include <asm/io.h>
13#include <asm/prom.h>
14#include <asm/udbg.h>
15
16#include <sysdev/mv64x60.h>
17
18#define MPSC_0_CR1_OFFSET 0x000c
19
20#define MPSC_0_CR2_OFFSET 0x0010
21#define MPSC_CHR_2_TCS (1 << 9)
22
23#define MPSC_0_CHR_10_OFFSET 0x0030
24
25#define MPSC_INTR_CAUSE_OFF_0 0x0004
26#define MPSC_INTR_CAUSE_OFF_1 0x000c
27#define MPSC_INTR_CAUSE_RCC (1<<6)
28
29static void __iomem *mpsc_base;
30static void __iomem *mpsc_intr_cause;
31
32static void mv64x60_udbg_putc(char c)
33{
34 if (c == '\n')
35 mv64x60_udbg_putc('\r');
36
37 while(in_le32(mpsc_base + MPSC_0_CR2_OFFSET) & MPSC_CHR_2_TCS)
38 ;
39 out_le32(mpsc_base + MPSC_0_CR1_OFFSET, c);
40 out_le32(mpsc_base + MPSC_0_CR2_OFFSET, MPSC_CHR_2_TCS);
41}
42
43static int mv64x60_udbg_testc(void)
44{
45 return (in_le32(mpsc_intr_cause) & MPSC_INTR_CAUSE_RCC) != 0;
46}
47
48static int mv64x60_udbg_getc(void)
49{
50 int cause = 0;
51 int c;
52
53 while (!mv64x60_udbg_testc())
54 ;
55
56 c = in_8(mpsc_base + MPSC_0_CHR_10_OFFSET + 2);
57 out_8(mpsc_base + MPSC_0_CHR_10_OFFSET + 2, c);
58 out_le32(mpsc_intr_cause, cause & ~MPSC_INTR_CAUSE_RCC);
59 return c;
60}
61
62static int mv64x60_udbg_getc_poll(void)
63{
64 if (!mv64x60_udbg_testc())
65 return -1;
66
67 return mv64x60_udbg_getc();
68}
69
70static void mv64x60_udbg_init(void)
71{
72 struct device_node *np, *mpscintr, *stdout = NULL;
73 const char *path;
74 const phandle *ph;
75 struct resource r[2];
76 const int *block_index;
77 int intr_cause_offset;
78 int err;
79
80 path = of_get_property(of_chosen, "linux,stdout-path", NULL);
81 if (!path)
82 return;
83
84 stdout = of_find_node_by_path(path);
85 if (!stdout)
86 return;
87
88 for (np = NULL;
89 (np = of_find_compatible_node(np, "serial", "marvell,mpsc")); )
90 if (np == stdout)
91 break;
92
93 of_node_put(stdout);
94 if (!np)
95 return;
96
97 block_index = of_get_property(np, "block-index", NULL);
98 if (!block_index)
99 goto error;
100
101 switch (*block_index) {
102 case 0:
103 intr_cause_offset = MPSC_INTR_CAUSE_OFF_0;
104 break;
105 case 1:
106 intr_cause_offset = MPSC_INTR_CAUSE_OFF_1;
107 break;
108 default:
109 goto error;
110 }
111
112 err = of_address_to_resource(np, 0, &r[0]);
113 if (err)
114 goto error;
115
116 ph = of_get_property(np, "mpscintr", NULL);
117 mpscintr = of_find_node_by_phandle(*ph);
118 if (!mpscintr)
119 goto error;
120
121 err = of_address_to_resource(mpscintr, 0, &r[1]);
122 of_node_put(mpscintr);
123 if (err)
124 goto error;
125
126 of_node_put(np);
127
128 mpsc_base = ioremap(r[0].start, r[0].end - r[0].start + 1);
129 if (!mpsc_base)
130 return;
131
132 mpsc_intr_cause = ioremap(r[1].start, r[1].end - r[1].start + 1);
133 if (!mpsc_intr_cause) {
134 iounmap(mpsc_base);
135 return;
136 }
137 mpsc_intr_cause += intr_cause_offset;
138
139 udbg_putc = mv64x60_udbg_putc;
140 udbg_getc = mv64x60_udbg_getc;
141 udbg_getc_poll = mv64x60_udbg_getc_poll;
142
143 return;
144
145error:
146 of_node_put(np);
147}
148
149void mv64x60_init_early(void)
150{
151 mv64x60_udbg_init();
152}
diff --git a/arch/powerpc/sysdev/pmi.c b/arch/powerpc/sysdev/pmi.c
index 2f91b55b7754..20edd1e94eff 100644
--- a/arch/powerpc/sysdev/pmi.c
+++ b/arch/powerpc/sysdev/pmi.c
@@ -205,10 +205,12 @@ static int pmi_of_remove(struct of_device *dev)
205} 205}
206 206
207static struct of_platform_driver pmi_of_platform_driver = { 207static struct of_platform_driver pmi_of_platform_driver = {
208 .name = "pmi",
209 .match_table = pmi_match, 208 .match_table = pmi_match,
210 .probe = pmi_of_probe, 209 .probe = pmi_of_probe,
211 .remove = pmi_of_remove 210 .remove = pmi_of_remove,
211 .driver = {
212 .name = "pmi",
213 },
212}; 214};
213 215
214static int __init pmi_module_init(void) 216static int __init pmi_module_init(void)
diff --git a/arch/powerpc/sysdev/qe_lib/qe.c b/arch/powerpc/sysdev/qe_lib/qe.c
index 90f87408b5d5..3d57d3835b04 100644
--- a/arch/powerpc/sysdev/qe_lib/qe.c
+++ b/arch/powerpc/sysdev/qe_lib/qe.c
@@ -141,7 +141,7 @@ EXPORT_SYMBOL(qe_issue_cmd);
141 * 16 BRGs, which can be connected to the QE channels or output 141 * 16 BRGs, which can be connected to the QE channels or output
142 * as clocks. The BRGs are in two different block of internal 142 * as clocks. The BRGs are in two different block of internal
143 * memory mapped space. 143 * memory mapped space.
144 * The baud rate clock is the system clock divided by something. 144 * The BRG clock is the QE clock divided by 2.
145 * It was set up long ago during the initial boot phase and is 145 * It was set up long ago during the initial boot phase and is
146 * is given to us. 146 * is given to us.
147 * Baud rate clocks are zero-based in the driver code (as that maps 147 * Baud rate clocks are zero-based in the driver code (as that maps
@@ -165,28 +165,38 @@ unsigned int get_brg_clk(void)
165 return brg_clk; 165 return brg_clk;
166} 166}
167 167
168/* This function is used by UARTS, or anything else that uses a 16x 168/* Program the BRG to the given sampling rate and multiplier
169 * oversampled clock. 169 *
170 * @brg: the BRG, 1-16
171 * @rate: the desired sampling rate
172 * @multiplier: corresponds to the value programmed in GUMR_L[RDCR] or
173 * GUMR_L[TDCR]. E.g., if this BRG is the RX clock, and GUMR_L[RDCR]=01,
174 * then 'multiplier' should be 8.
175 *
176 * Also note that the value programmed into the BRGC register must be even.
170 */ 177 */
171void qe_setbrg(u32 brg, u32 rate) 178void qe_setbrg(unsigned int brg, unsigned int rate, unsigned int multiplier)
172{ 179{
173 volatile u32 *bp;
174 u32 divisor, tempval; 180 u32 divisor, tempval;
175 int div16 = 0; 181 u32 div16 = 0;
176 182
177 bp = &qe_immr->brg.brgc[brg]; 183 divisor = get_brg_clk() / (rate * multiplier);
178 184
179 divisor = (get_brg_clk() / rate);
180 if (divisor > QE_BRGC_DIVISOR_MAX + 1) { 185 if (divisor > QE_BRGC_DIVISOR_MAX + 1) {
181 div16 = 1; 186 div16 = QE_BRGC_DIV16;
182 divisor /= 16; 187 divisor /= 16;
183 } 188 }
184 189
185 tempval = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) | QE_BRGC_ENABLE; 190 /* Errata QE_General4, which affects some MPC832x and MPC836x SOCs, says
186 if (div16) 191 that the BRG divisor must be even if you're not using divide-by-16
187 tempval |= QE_BRGC_DIV16; 192 mode. */
193 if (!div16 && (divisor & 1))
194 divisor++;
195
196 tempval = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) |
197 QE_BRGC_ENABLE | div16;
188 198
189 out_be32(bp, tempval); 199 out_be32(&qe_immr->brg.brgc[brg - 1], tempval);
190} 200}
191 201
192/* Initialize SNUMs (thread serial numbers) according to 202/* Initialize SNUMs (thread serial numbers) according to
diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.c b/arch/powerpc/sysdev/qe_lib/qe_ic.c
index 4d1dcb45963d..e1c0fd6dbc1a 100644
--- a/arch/powerpc/sysdev/qe_lib/qe_ic.c
+++ b/arch/powerpc/sysdev/qe_lib/qe_ic.c
@@ -245,10 +245,8 @@ static struct irq_chip qe_ic_irq_chip = {
245 245
246static int qe_ic_host_match(struct irq_host *h, struct device_node *node) 246static int qe_ic_host_match(struct irq_host *h, struct device_node *node)
247{ 247{
248 struct qe_ic *qe_ic = h->host_data;
249
250 /* Exact match, unless qe_ic node is NULL */ 248 /* Exact match, unless qe_ic node is NULL */
251 return qe_ic->of_node == NULL || qe_ic->of_node == node; 249 return h->of_node == NULL || h->of_node == node;
252} 250}
253 251
254static int qe_ic_host_map(struct irq_host *h, unsigned int virq, 252static int qe_ic_host_map(struct irq_host *h, unsigned int virq,
@@ -323,25 +321,9 @@ unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
323 return irq_linear_revmap(qe_ic->irqhost, irq); 321 return irq_linear_revmap(qe_ic->irqhost, irq);
324} 322}
325 323
326void qe_ic_cascade_low(unsigned int irq, struct irq_desc *desc) 324void __init qe_ic_init(struct device_node *node, unsigned int flags,
327{ 325 void (*low_handler)(unsigned int irq, struct irq_desc *desc),
328 struct qe_ic *qe_ic = desc->handler_data; 326 void (*high_handler)(unsigned int irq, struct irq_desc *desc))
329 unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
330
331 if (cascade_irq != NO_IRQ)
332 generic_handle_irq(cascade_irq);
333}
334
335void qe_ic_cascade_high(unsigned int irq, struct irq_desc *desc)
336{
337 struct qe_ic *qe_ic = desc->handler_data;
338 unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
339
340 if (cascade_irq != NO_IRQ)
341 generic_handle_irq(cascade_irq);
342}
343
344void __init qe_ic_init(struct device_node *node, unsigned int flags)
345{ 327{
346 struct qe_ic *qe_ic; 328 struct qe_ic *qe_ic;
347 struct resource res; 329 struct resource res;
@@ -352,9 +334,8 @@ void __init qe_ic_init(struct device_node *node, unsigned int flags)
352 return; 334 return;
353 335
354 memset(qe_ic, 0, sizeof(struct qe_ic)); 336 memset(qe_ic, 0, sizeof(struct qe_ic));
355 qe_ic->of_node = of_node_get(node);
356 337
357 qe_ic->irqhost = irq_alloc_host(IRQ_HOST_MAP_LINEAR, 338 qe_ic->irqhost = irq_alloc_host(of_node_get(node), IRQ_HOST_MAP_LINEAR,
358 NR_QE_IC_INTS, &qe_ic_host_ops, 0); 339 NR_QE_IC_INTS, &qe_ic_host_ops, 0);
359 if (qe_ic->irqhost == NULL) { 340 if (qe_ic->irqhost == NULL) {
360 of_node_put(node); 341 of_node_put(node);
@@ -402,14 +383,13 @@ void __init qe_ic_init(struct device_node *node, unsigned int flags)
402 qe_ic_write(qe_ic->regs, QEIC_CICR, temp); 383 qe_ic_write(qe_ic->regs, QEIC_CICR, temp);
403 384
404 set_irq_data(qe_ic->virq_low, qe_ic); 385 set_irq_data(qe_ic->virq_low, qe_ic);
405 set_irq_chained_handler(qe_ic->virq_low, qe_ic_cascade_low); 386 set_irq_chained_handler(qe_ic->virq_low, low_handler);
406 387
407 if (qe_ic->virq_high != NO_IRQ) { 388 if (qe_ic->virq_high != NO_IRQ &&
389 qe_ic->virq_high != qe_ic->virq_low) {
408 set_irq_data(qe_ic->virq_high, qe_ic); 390 set_irq_data(qe_ic->virq_high, qe_ic);
409 set_irq_chained_handler(qe_ic->virq_high, qe_ic_cascade_high); 391 set_irq_chained_handler(qe_ic->virq_high, high_handler);
410 } 392 }
411
412 printk("QEIC (%d IRQ sources) at %p\n", NR_QE_IC_INTS, qe_ic->regs);
413} 393}
414 394
415void qe_ic_set_highest_priority(unsigned int virq, int high) 395void qe_ic_set_highest_priority(unsigned int virq, int high)
diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.h b/arch/powerpc/sysdev/qe_lib/qe_ic.h
index 9a631adb189d..c1361d005a8a 100644
--- a/arch/powerpc/sysdev/qe_lib/qe_ic.h
+++ b/arch/powerpc/sysdev/qe_lib/qe_ic.h
@@ -84,9 +84,6 @@ struct qe_ic {
84 /* The "linux" controller struct */ 84 /* The "linux" controller struct */
85 struct irq_chip hc_irq; 85 struct irq_chip hc_irq;
86 86
87 /* The device node of the interrupt controller */
88 struct device_node *of_node;
89
90 /* VIRQ numbers of QE high/low irqs */ 87 /* VIRQ numbers of QE high/low irqs */
91 unsigned int virq_high; 88 unsigned int virq_high;
92 unsigned int virq_low; 89 unsigned int virq_low;
diff --git a/arch/powerpc/sysdev/qe_lib/qe_io.c b/arch/powerpc/sysdev/qe_lib/qe_io.c
index e32b45bf9ff5..e53ea4d374a0 100644
--- a/arch/powerpc/sysdev/qe_lib/qe_io.c
+++ b/arch/powerpc/sysdev/qe_lib/qe_io.c
@@ -36,6 +36,9 @@ struct port_regs {
36 __be32 cpdir2; /* Direction register */ 36 __be32 cpdir2; /* Direction register */
37 __be32 cppar1; /* Pin assignment register */ 37 __be32 cppar1; /* Pin assignment register */
38 __be32 cppar2; /* Pin assignment register */ 38 __be32 cppar2; /* Pin assignment register */
39#ifdef CONFIG_PPC_85xx
40 u8 pad[8];
41#endif
39}; 42};
40 43
41static struct port_regs *par_io = NULL; 44static struct port_regs *par_io = NULL;
@@ -195,29 +198,22 @@ EXPORT_SYMBOL(par_io_of_config);
195#ifdef DEBUG 198#ifdef DEBUG
196static void dump_par_io(void) 199static void dump_par_io(void)
197{ 200{
198 int i; 201 unsigned int i;
199 202
200 printk(KERN_INFO "PAR IO registars:\n"); 203 printk(KERN_INFO "%s: par_io=%p\n", __FUNCTION__, par_io);
201 printk(KERN_INFO "Base address: 0x%08x\n", (u32) par_io);
202 for (i = 0; i < num_par_io_ports; i++) { 204 for (i = 0; i < num_par_io_ports; i++) {
203 printk(KERN_INFO "cpodr[%d] : addr - 0x%08x, val - 0x%08x\n", 205 printk(KERN_INFO " cpodr[%u]=%08x\n", i,
204 i, (u32) & par_io[i].cpodr, 206 in_be32(&par_io[i].cpodr));
205 in_be32(&par_io[i].cpodr)); 207 printk(KERN_INFO " cpdata[%u]=%08x\n", i,
206 printk(KERN_INFO "cpdata[%d]: addr - 0x%08x, val - 0x%08x\n", 208 in_be32(&par_io[i].cpdata));
207 i, (u32) & par_io[i].cpdata, 209 printk(KERN_INFO " cpdir1[%u]=%08x\n", i,
208 in_be32(&par_io[i].cpdata)); 210 in_be32(&par_io[i].cpdir1));
209 printk(KERN_INFO "cpdir1[%d]: addr - 0x%08x, val - 0x%08x\n", 211 printk(KERN_INFO " cpdir2[%u]=%08x\n", i,
210 i, (u32) & par_io[i].cpdir1, 212 in_be32(&par_io[i].cpdir2));
211 in_be32(&par_io[i].cpdir1)); 213 printk(KERN_INFO " cppar1[%u]=%08x\n", i,
212 printk(KERN_INFO "cpdir2[%d]: addr - 0x%08x, val - 0x%08x\n", 214 in_be32(&par_io[i].cppar1));
213 i, (u32) & par_io[i].cpdir2, 215 printk(KERN_INFO " cppar2[%u]=%08x\n", i,
214 in_be32(&par_io[i].cpdir2)); 216 in_be32(&par_io[i].cppar2));
215 printk(KERN_INFO "cppar1[%d]: addr - 0x%08x, val - 0x%08x\n",
216 i, (u32) & par_io[i].cppar1,
217 in_be32(&par_io[i].cppar1));
218 printk(KERN_INFO "cppar2[%d]: addr - 0x%08x, val - 0x%08x\n",
219 i, (u32) & par_io[i].cppar2,
220 in_be32(&par_io[i].cppar2));
221 } 217 }
222 218
223} 219}
diff --git a/arch/powerpc/sysdev/qe_lib/ucc.c b/arch/powerpc/sysdev/qe_lib/ucc.c
index f970e5415ac0..0e348d9af8a6 100644
--- a/arch/powerpc/sysdev/qe_lib/ucc.c
+++ b/arch/powerpc/sysdev/qe_lib/ucc.c
@@ -28,228 +28,188 @@
28 28
29static DEFINE_SPINLOCK(ucc_lock); 29static DEFINE_SPINLOCK(ucc_lock);
30 30
31int ucc_set_qe_mux_mii_mng(int ucc_num) 31int ucc_set_qe_mux_mii_mng(unsigned int ucc_num)
32{ 32{
33 unsigned long flags; 33 unsigned long flags;
34 34
35 if (ucc_num > UCC_MAX_NUM - 1)
36 return -EINVAL;
37
35 spin_lock_irqsave(&ucc_lock, flags); 38 spin_lock_irqsave(&ucc_lock, flags);
36 out_be32(&qe_immr->qmx.cmxgcr, 39 clrsetbits_be32(&qe_immr->qmx.cmxgcr, QE_CMXGCR_MII_ENET_MNG,
37 ((in_be32(&qe_immr->qmx.cmxgcr) & 40 ucc_num << QE_CMXGCR_MII_ENET_MNG_SHIFT);
38 ~QE_CMXGCR_MII_ENET_MNG) |
39 (ucc_num << QE_CMXGCR_MII_ENET_MNG_SHIFT)));
40 spin_unlock_irqrestore(&ucc_lock, flags); 41 spin_unlock_irqrestore(&ucc_lock, flags);
41 42
42 return 0; 43 return 0;
43} 44}
44EXPORT_SYMBOL(ucc_set_qe_mux_mii_mng); 45EXPORT_SYMBOL(ucc_set_qe_mux_mii_mng);
45 46
46int ucc_set_type(int ucc_num, struct ucc_common *regs, 47/* Configure the UCC to either Slow or Fast.
47 enum ucc_speed_type speed) 48 *
48{ 49 * A given UCC can be figured to support either "slow" devices (e.g. UART)
49 u8 guemr = 0; 50 * or "fast" devices (e.g. Ethernet).
50 51 *
51 /* check if the UCC number is in range. */ 52 * 'ucc_num' is the UCC number, from 0 - 7.
52 if ((ucc_num > UCC_MAX_NUM - 1) || (ucc_num < 0)) 53 *
53 return -EINVAL; 54 * This function also sets the UCC_GUEMR_SET_RESERVED3 bit because that bit
54 55 * must always be set to 1.
55 guemr = regs->guemr; 56 */
56 guemr &= ~(UCC_GUEMR_MODE_MASK_RX | UCC_GUEMR_MODE_MASK_TX); 57int ucc_set_type(unsigned int ucc_num, enum ucc_speed_type speed)
57 switch (speed) {
58 case UCC_SPEED_TYPE_SLOW:
59 guemr |= (UCC_GUEMR_MODE_SLOW_RX | UCC_GUEMR_MODE_SLOW_TX);
60 break;
61 case UCC_SPEED_TYPE_FAST:
62 guemr |= (UCC_GUEMR_MODE_FAST_RX | UCC_GUEMR_MODE_FAST_TX);
63 break;
64 default:
65 return -EINVAL;
66 }
67 regs->guemr = guemr;
68
69 return 0;
70}
71
72int ucc_init_guemr(struct ucc_common *regs)
73{ 58{
74 u8 guemr = 0; 59 u8 __iomem *guemr;
75
76 if (!regs)
77 return -EINVAL;
78
79 /* Set bit 3 (which is reserved in the GUEMR register) to 1 */
80 guemr = UCC_GUEMR_SET_RESERVED3;
81
82 regs->guemr = guemr;
83
84 return 0;
85}
86 60
87static void get_cmxucr_reg(int ucc_num, volatile u32 ** p_cmxucr, u8 * reg_num, 61 /* The GUEMR register is at the same location for both slow and fast
88 u8 * shift) 62 devices, so we just use uccX.slow.guemr. */
89{
90 switch (ucc_num) { 63 switch (ucc_num) {
91 case 0: *p_cmxucr = &(qe_immr->qmx.cmxucr1); 64 case 0: guemr = &qe_immr->ucc1.slow.guemr;
92 *reg_num = 1;
93 *shift = 16;
94 break; 65 break;
95 case 2: *p_cmxucr = &(qe_immr->qmx.cmxucr1); 66 case 1: guemr = &qe_immr->ucc2.slow.guemr;
96 *reg_num = 1;
97 *shift = 0;
98 break; 67 break;
99 case 4: *p_cmxucr = &(qe_immr->qmx.cmxucr2); 68 case 2: guemr = &qe_immr->ucc3.slow.guemr;
100 *reg_num = 2;
101 *shift = 16;
102 break; 69 break;
103 case 6: *p_cmxucr = &(qe_immr->qmx.cmxucr2); 70 case 3: guemr = &qe_immr->ucc4.slow.guemr;
104 *reg_num = 2;
105 *shift = 0;
106 break; 71 break;
107 case 1: *p_cmxucr = &(qe_immr->qmx.cmxucr3); 72 case 4: guemr = &qe_immr->ucc5.slow.guemr;
108 *reg_num = 3;
109 *shift = 16;
110 break; 73 break;
111 case 3: *p_cmxucr = &(qe_immr->qmx.cmxucr3); 74 case 5: guemr = &qe_immr->ucc6.slow.guemr;
112 *reg_num = 3;
113 *shift = 0;
114 break; 75 break;
115 case 5: *p_cmxucr = &(qe_immr->qmx.cmxucr4); 76 case 6: guemr = &qe_immr->ucc7.slow.guemr;
116 *reg_num = 4;
117 *shift = 16;
118 break; 77 break;
119 case 7: *p_cmxucr = &(qe_immr->qmx.cmxucr4); 78 case 7: guemr = &qe_immr->ucc8.slow.guemr;
120 *reg_num = 4;
121 *shift = 0;
122 break; 79 break;
123 default: 80 default:
124 break; 81 return -EINVAL;
125 } 82 }
83
84 clrsetbits_8(guemr, UCC_GUEMR_MODE_MASK,
85 UCC_GUEMR_SET_RESERVED3 | speed);
86
87 return 0;
88}
89
90static void get_cmxucr_reg(unsigned int ucc_num, __be32 **cmxucr,
91 unsigned int *reg_num, unsigned int *shift)
92{
93 unsigned int cmx = ((ucc_num & 1) << 1) + (ucc_num > 3);
94
95 *reg_num = cmx + 1;
96 *cmxucr = &qe_immr->qmx.cmxucr[cmx];
97 *shift = 16 - 8 * (ucc_num & 2);
126} 98}
127 99
128int ucc_mux_set_grant_tsa_bkpt(int ucc_num, int set, u32 mask) 100int ucc_mux_set_grant_tsa_bkpt(unsigned int ucc_num, int set, u32 mask)
129{ 101{
130 volatile u32 *p_cmxucr; 102 __be32 *cmxucr;
131 u8 reg_num; 103 unsigned int reg_num;
132 u8 shift; 104 unsigned int shift;
133 105
134 /* check if the UCC number is in range. */ 106 /* check if the UCC number is in range. */
135 if ((ucc_num > UCC_MAX_NUM - 1) || (ucc_num < 0)) 107 if (ucc_num > UCC_MAX_NUM - 1)
136 return -EINVAL; 108 return -EINVAL;
137 109
138 get_cmxucr_reg(ucc_num, &p_cmxucr, &reg_num, &shift); 110 get_cmxucr_reg(ucc_num, &cmxucr, &reg_num, &shift);
139 111
140 if (set) 112 if (set)
141 out_be32(p_cmxucr, in_be32(p_cmxucr) | (mask << shift)); 113 setbits32(cmxucr, mask << shift);
142 else 114 else
143 out_be32(p_cmxucr, in_be32(p_cmxucr) & ~(mask << shift)); 115 clrbits32(cmxucr, mask << shift);
144 116
145 return 0; 117 return 0;
146} 118}
147 119
148int ucc_set_qe_mux_rxtx(int ucc_num, enum qe_clock clock, enum comm_dir mode) 120int ucc_set_qe_mux_rxtx(unsigned int ucc_num, enum qe_clock clock,
121 enum comm_dir mode)
149{ 122{
150 volatile u32 *p_cmxucr; 123 __be32 *cmxucr;
151 u8 reg_num; 124 unsigned int reg_num;
152 u8 shift; 125 unsigned int shift;
153 u32 clock_bits; 126 u32 clock_bits = 0;
154 u32 clock_mask;
155 int source = -1;
156 127
157 /* check if the UCC number is in range. */ 128 /* check if the UCC number is in range. */
158 if ((ucc_num > UCC_MAX_NUM - 1) || (ucc_num < 0)) 129 if (ucc_num > UCC_MAX_NUM - 1)
159 return -EINVAL; 130 return -EINVAL;
160 131
161 if (!((mode == COMM_DIR_RX) || (mode == COMM_DIR_TX))) { 132 /* The communications direction must be RX or TX */
162 printk(KERN_ERR 133 if (!((mode == COMM_DIR_RX) || (mode == COMM_DIR_TX)))
163 "ucc_set_qe_mux_rxtx: bad comm mode type passed.");
164 return -EINVAL; 134 return -EINVAL;
165 }
166 135
167 get_cmxucr_reg(ucc_num, &p_cmxucr, &reg_num, &shift); 136 get_cmxucr_reg(ucc_num, &cmxucr, &reg_num, &shift);
168 137
169 switch (reg_num) { 138 switch (reg_num) {
170 case 1: 139 case 1:
171 switch (clock) { 140 switch (clock) {
172 case QE_BRG1: source = 1; break; 141 case QE_BRG1: clock_bits = 1; break;
173 case QE_BRG2: source = 2; break; 142 case QE_BRG2: clock_bits = 2; break;
174 case QE_BRG7: source = 3; break; 143 case QE_BRG7: clock_bits = 3; break;
175 case QE_BRG8: source = 4; break; 144 case QE_BRG8: clock_bits = 4; break;
176 case QE_CLK9: source = 5; break; 145 case QE_CLK9: clock_bits = 5; break;
177 case QE_CLK10: source = 6; break; 146 case QE_CLK10: clock_bits = 6; break;
178 case QE_CLK11: source = 7; break; 147 case QE_CLK11: clock_bits = 7; break;
179 case QE_CLK12: source = 8; break; 148 case QE_CLK12: clock_bits = 8; break;
180 case QE_CLK15: source = 9; break; 149 case QE_CLK15: clock_bits = 9; break;
181 case QE_CLK16: source = 10; break; 150 case QE_CLK16: clock_bits = 10; break;
182 default: source = -1; break; 151 default: break;
183 } 152 }
184 break; 153 break;
185 case 2: 154 case 2:
186 switch (clock) { 155 switch (clock) {
187 case QE_BRG5: source = 1; break; 156 case QE_BRG5: clock_bits = 1; break;
188 case QE_BRG6: source = 2; break; 157 case QE_BRG6: clock_bits = 2; break;
189 case QE_BRG7: source = 3; break; 158 case QE_BRG7: clock_bits = 3; break;
190 case QE_BRG8: source = 4; break; 159 case QE_BRG8: clock_bits = 4; break;
191 case QE_CLK13: source = 5; break; 160 case QE_CLK13: clock_bits = 5; break;
192 case QE_CLK14: source = 6; break; 161 case QE_CLK14: clock_bits = 6; break;
193 case QE_CLK19: source = 7; break; 162 case QE_CLK19: clock_bits = 7; break;
194 case QE_CLK20: source = 8; break; 163 case QE_CLK20: clock_bits = 8; break;
195 case QE_CLK15: source = 9; break; 164 case QE_CLK15: clock_bits = 9; break;
196 case QE_CLK16: source = 10; break; 165 case QE_CLK16: clock_bits = 10; break;
197 default: source = -1; break; 166 default: break;
198 } 167 }
199 break; 168 break;
200 case 3: 169 case 3:
201 switch (clock) { 170 switch (clock) {
202 case QE_BRG9: source = 1; break; 171 case QE_BRG9: clock_bits = 1; break;
203 case QE_BRG10: source = 2; break; 172 case QE_BRG10: clock_bits = 2; break;
204 case QE_BRG15: source = 3; break; 173 case QE_BRG15: clock_bits = 3; break;
205 case QE_BRG16: source = 4; break; 174 case QE_BRG16: clock_bits = 4; break;
206 case QE_CLK3: source = 5; break; 175 case QE_CLK3: clock_bits = 5; break;
207 case QE_CLK4: source = 6; break; 176 case QE_CLK4: clock_bits = 6; break;
208 case QE_CLK17: source = 7; break; 177 case QE_CLK17: clock_bits = 7; break;
209 case QE_CLK18: source = 8; break; 178 case QE_CLK18: clock_bits = 8; break;
210 case QE_CLK7: source = 9; break; 179 case QE_CLK7: clock_bits = 9; break;
211 case QE_CLK8: source = 10; break; 180 case QE_CLK8: clock_bits = 10; break;
212 case QE_CLK16: source = 11; break; 181 case QE_CLK16: clock_bits = 11; break;
213 default: source = -1; break; 182 default: break;
214 } 183 }
215 break; 184 break;
216 case 4: 185 case 4:
217 switch (clock) { 186 switch (clock) {
218 case QE_BRG13: source = 1; break; 187 case QE_BRG13: clock_bits = 1; break;
219 case QE_BRG14: source = 2; break; 188 case QE_BRG14: clock_bits = 2; break;
220 case QE_BRG15: source = 3; break; 189 case QE_BRG15: clock_bits = 3; break;
221 case QE_BRG16: source = 4; break; 190 case QE_BRG16: clock_bits = 4; break;
222 case QE_CLK5: source = 5; break; 191 case QE_CLK5: clock_bits = 5; break;
223 case QE_CLK6: source = 6; break; 192 case QE_CLK6: clock_bits = 6; break;
224 case QE_CLK21: source = 7; break; 193 case QE_CLK21: clock_bits = 7; break;
225 case QE_CLK22: source = 8; break; 194 case QE_CLK22: clock_bits = 8; break;
226 case QE_CLK7: source = 9; break; 195 case QE_CLK7: clock_bits = 9; break;
227 case QE_CLK8: source = 10; break; 196 case QE_CLK8: clock_bits = 10; break;
228 case QE_CLK16: source = 11; break; 197 case QE_CLK16: clock_bits = 11; break;
229 default: source = -1; break; 198 default: break;
230 } 199 }
231 break; 200 break;
232 default: 201 default: break;
233 source = -1;
234 break;
235 } 202 }
236 203
237 if (source == -1) { 204 /* Check for invalid combination of clock and UCC number */
238 printk(KERN_ERR 205 if (!clock_bits)
239 "ucc_set_qe_mux_rxtx: Bad combination of clock and UCC.");
240 return -ENOENT; 206 return -ENOENT;
241 }
242 207
243 clock_bits = (u32) source; 208 if (mode == COMM_DIR_RX)
244 clock_mask = QE_CMXUCR_TX_CLK_SRC_MASK; 209 shift += 4;
245 if (mode == COMM_DIR_RX) {
246 clock_bits <<= 4; /* Rx field is 4 bits to left of Tx field */
247 clock_mask <<= 4; /* Rx field is 4 bits to left of Tx field */
248 }
249 clock_bits <<= shift;
250 clock_mask <<= shift;
251 210
252 out_be32(p_cmxucr, (in_be32(p_cmxucr) & ~clock_mask) | clock_bits); 211 clrsetbits_be32(cmxucr, QE_CMXUCR_TX_CLK_SRC_MASK << shift,
212 clock_bits << shift);
253 213
254 return 0; 214 return 0;
255} 215}
diff --git a/arch/powerpc/sysdev/qe_lib/ucc_fast.c b/arch/powerpc/sysdev/qe_lib/ucc_fast.c
index 3df202e8d332..3223acbc39e5 100644
--- a/arch/powerpc/sysdev/qe_lib/ucc_fast.c
+++ b/arch/powerpc/sysdev/qe_lib/ucc_fast.c
@@ -30,46 +30,45 @@
30 30
31void ucc_fast_dump_regs(struct ucc_fast_private * uccf) 31void ucc_fast_dump_regs(struct ucc_fast_private * uccf)
32{ 32{
33 printk(KERN_INFO "UCC%d Fast registers:", uccf->uf_info->ucc_num); 33 printk(KERN_INFO "UCC%u Fast registers:\n", uccf->uf_info->ucc_num);
34 printk(KERN_INFO "Base address: 0x%08x", (u32) uccf->uf_regs); 34 printk(KERN_INFO "Base address: 0x%p\n", uccf->uf_regs);
35 35
36 printk(KERN_INFO "gumr : addr - 0x%08x, val - 0x%08x", 36 printk(KERN_INFO "gumr : addr=0x%p, val=0x%08x\n",
37 (u32) & uccf->uf_regs->gumr, in_be32(&uccf->uf_regs->gumr)); 37 &uccf->uf_regs->gumr, in_be32(&uccf->uf_regs->gumr));
38 printk(KERN_INFO "upsmr : addr - 0x%08x, val - 0x%08x", 38 printk(KERN_INFO "upsmr : addr=0x%p, val=0x%08x\n",
39 (u32) & uccf->uf_regs->upsmr, in_be32(&uccf->uf_regs->upsmr)); 39 &uccf->uf_regs->upsmr, in_be32(&uccf->uf_regs->upsmr));
40 printk(KERN_INFO "utodr : addr - 0x%08x, val - 0x%04x", 40 printk(KERN_INFO "utodr : addr=0x%p, val=0x%04x\n",
41 (u32) & uccf->uf_regs->utodr, in_be16(&uccf->uf_regs->utodr)); 41 &uccf->uf_regs->utodr, in_be16(&uccf->uf_regs->utodr));
42 printk(KERN_INFO "udsr : addr - 0x%08x, val - 0x%04x", 42 printk(KERN_INFO "udsr : addr=0x%p, val=0x%04x\n",
43 (u32) & uccf->uf_regs->udsr, in_be16(&uccf->uf_regs->udsr)); 43 &uccf->uf_regs->udsr, in_be16(&uccf->uf_regs->udsr));
44 printk(KERN_INFO "ucce : addr - 0x%08x, val - 0x%08x", 44 printk(KERN_INFO "ucce : addr=0x%p, val=0x%08x\n",
45 (u32) & uccf->uf_regs->ucce, in_be32(&uccf->uf_regs->ucce)); 45 &uccf->uf_regs->ucce, in_be32(&uccf->uf_regs->ucce));
46 printk(KERN_INFO "uccm : addr - 0x%08x, val - 0x%08x", 46 printk(KERN_INFO "uccm : addr=0x%p, val=0x%08x\n",
47 (u32) & uccf->uf_regs->uccm, in_be32(&uccf->uf_regs->uccm)); 47 &uccf->uf_regs->uccm, in_be32(&uccf->uf_regs->uccm));
48 printk(KERN_INFO "uccs : addr - 0x%08x, val - 0x%02x", 48 printk(KERN_INFO "uccs : addr=0x%p, val=0x%02x\n",
49 (u32) & uccf->uf_regs->uccs, uccf->uf_regs->uccs); 49 &uccf->uf_regs->uccs, uccf->uf_regs->uccs);
50 printk(KERN_INFO "urfb : addr - 0x%08x, val - 0x%08x", 50 printk(KERN_INFO "urfb : addr=0x%p, val=0x%08x\n",
51 (u32) & uccf->uf_regs->urfb, in_be32(&uccf->uf_regs->urfb)); 51 &uccf->uf_regs->urfb, in_be32(&uccf->uf_regs->urfb));
52 printk(KERN_INFO "urfs : addr - 0x%08x, val - 0x%04x", 52 printk(KERN_INFO "urfs : addr=0x%p, val=0x%04x\n",
53 (u32) & uccf->uf_regs->urfs, in_be16(&uccf->uf_regs->urfs)); 53 &uccf->uf_regs->urfs, in_be16(&uccf->uf_regs->urfs));
54 printk(KERN_INFO "urfet : addr - 0x%08x, val - 0x%04x", 54 printk(KERN_INFO "urfet : addr=0x%p, val=0x%04x\n",
55 (u32) & uccf->uf_regs->urfet, in_be16(&uccf->uf_regs->urfet)); 55 &uccf->uf_regs->urfet, in_be16(&uccf->uf_regs->urfet));
56 printk(KERN_INFO "urfset: addr - 0x%08x, val - 0x%04x", 56 printk(KERN_INFO "urfset: addr=0x%p, val=0x%04x\n",
57 (u32) & uccf->uf_regs->urfset, 57 &uccf->uf_regs->urfset, in_be16(&uccf->uf_regs->urfset));
58 in_be16(&uccf->uf_regs->urfset)); 58 printk(KERN_INFO "utfb : addr=0x%p, val=0x%08x\n",
59 printk(KERN_INFO "utfb : addr - 0x%08x, val - 0x%08x", 59 &uccf->uf_regs->utfb, in_be32(&uccf->uf_regs->utfb));
60 (u32) & uccf->uf_regs->utfb, in_be32(&uccf->uf_regs->utfb)); 60 printk(KERN_INFO "utfs : addr=0x%p, val=0x%04x\n",
61 printk(KERN_INFO "utfs : addr - 0x%08x, val - 0x%04x", 61 &uccf->uf_regs->utfs, in_be16(&uccf->uf_regs->utfs));
62 (u32) & uccf->uf_regs->utfs, in_be16(&uccf->uf_regs->utfs)); 62 printk(KERN_INFO "utfet : addr=0x%p, val=0x%04x\n",
63 printk(KERN_INFO "utfet : addr - 0x%08x, val - 0x%04x", 63 &uccf->uf_regs->utfet, in_be16(&uccf->uf_regs->utfet));
64 (u32) & uccf->uf_regs->utfet, in_be16(&uccf->uf_regs->utfet)); 64 printk(KERN_INFO "utftt : addr=0x%p, val=0x%04x\n",
65 printk(KERN_INFO "utftt : addr - 0x%08x, val - 0x%04x", 65 &uccf->uf_regs->utftt, in_be16(&uccf->uf_regs->utftt));
66 (u32) & uccf->uf_regs->utftt, in_be16(&uccf->uf_regs->utftt)); 66 printk(KERN_INFO "utpt : addr=0x%p, val=0x%04x\n",
67 printk(KERN_INFO "utpt : addr - 0x%08x, val - 0x%04x", 67 &uccf->uf_regs->utpt, in_be16(&uccf->uf_regs->utpt));
68 (u32) & uccf->uf_regs->utpt, in_be16(&uccf->uf_regs->utpt)); 68 printk(KERN_INFO "urtry : addr=0x%p, val=0x%08x\n",
69 printk(KERN_INFO "urtry : addr - 0x%08x, val - 0x%08x", 69 &uccf->uf_regs->urtry, in_be32(&uccf->uf_regs->urtry));
70 (u32) & uccf->uf_regs->urtry, in_be32(&uccf->uf_regs->urtry)); 70 printk(KERN_INFO "guemr : addr=0x%p, val=0x%02x\n",
71 printk(KERN_INFO "guemr : addr - 0x%08x, val - 0x%02x", 71 &uccf->uf_regs->guemr, uccf->uf_regs->guemr);
72 (u32) & uccf->uf_regs->guemr, uccf->uf_regs->guemr);
73} 72}
74EXPORT_SYMBOL(ucc_fast_dump_regs); 73EXPORT_SYMBOL(ucc_fast_dump_regs);
75 74
@@ -149,55 +148,57 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc
149 148
150 /* check if the UCC port number is in range. */ 149 /* check if the UCC port number is in range. */
151 if ((uf_info->ucc_num < 0) || (uf_info->ucc_num > UCC_MAX_NUM - 1)) { 150 if ((uf_info->ucc_num < 0) || (uf_info->ucc_num > UCC_MAX_NUM - 1)) {
152 printk(KERN_ERR "%s: illegal UCC number", __FUNCTION__); 151 printk(KERN_ERR "%s: illegal UCC number\n", __FUNCTION__);
153 return -EINVAL; 152 return -EINVAL;
154 } 153 }
155 154
156 /* Check that 'max_rx_buf_length' is properly aligned (4). */ 155 /* Check that 'max_rx_buf_length' is properly aligned (4). */
157 if (uf_info->max_rx_buf_length & (UCC_FAST_MRBLR_ALIGNMENT - 1)) { 156 if (uf_info->max_rx_buf_length & (UCC_FAST_MRBLR_ALIGNMENT - 1)) {
158 printk(KERN_ERR "%s: max_rx_buf_length not aligned", __FUNCTION__); 157 printk(KERN_ERR "%s: max_rx_buf_length not aligned\n",
158 __FUNCTION__);
159 return -EINVAL; 159 return -EINVAL;
160 } 160 }
161 161
162 /* Validate Virtual Fifo register values */ 162 /* Validate Virtual Fifo register values */
163 if (uf_info->urfs < UCC_FAST_URFS_MIN_VAL) { 163 if (uf_info->urfs < UCC_FAST_URFS_MIN_VAL) {
164 printk(KERN_ERR "%s: urfs is too small", __FUNCTION__); 164 printk(KERN_ERR "%s: urfs is too small\n", __FUNCTION__);
165 return -EINVAL; 165 return -EINVAL;
166 } 166 }
167 167
168 if (uf_info->urfs & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) { 168 if (uf_info->urfs & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
169 printk(KERN_ERR "%s: urfs is not aligned", __FUNCTION__); 169 printk(KERN_ERR "%s: urfs is not aligned\n", __FUNCTION__);
170 return -EINVAL; 170 return -EINVAL;
171 } 171 }
172 172
173 if (uf_info->urfet & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) { 173 if (uf_info->urfet & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
174 printk(KERN_ERR "%s: urfet is not aligned.", __FUNCTION__); 174 printk(KERN_ERR "%s: urfet is not aligned.\n", __FUNCTION__);
175 return -EINVAL; 175 return -EINVAL;
176 } 176 }
177 177
178 if (uf_info->urfset & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) { 178 if (uf_info->urfset & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
179 printk(KERN_ERR "%s: urfset is not aligned", __FUNCTION__); 179 printk(KERN_ERR "%s: urfset is not aligned\n", __FUNCTION__);
180 return -EINVAL; 180 return -EINVAL;
181 } 181 }
182 182
183 if (uf_info->utfs & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) { 183 if (uf_info->utfs & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
184 printk(KERN_ERR "%s: utfs is not aligned", __FUNCTION__); 184 printk(KERN_ERR "%s: utfs is not aligned\n", __FUNCTION__);
185 return -EINVAL; 185 return -EINVAL;
186 } 186 }
187 187
188 if (uf_info->utfet & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) { 188 if (uf_info->utfet & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
189 printk(KERN_ERR "%s: utfet is not aligned", __FUNCTION__); 189 printk(KERN_ERR "%s: utfet is not aligned\n", __FUNCTION__);
190 return -EINVAL; 190 return -EINVAL;
191 } 191 }
192 192
193 if (uf_info->utftt & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) { 193 if (uf_info->utftt & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
194 printk(KERN_ERR "%s: utftt is not aligned", __FUNCTION__); 194 printk(KERN_ERR "%s: utftt is not aligned\n", __FUNCTION__);
195 return -EINVAL; 195 return -EINVAL;
196 } 196 }
197 197
198 uccf = kzalloc(sizeof(struct ucc_fast_private), GFP_KERNEL); 198 uccf = kzalloc(sizeof(struct ucc_fast_private), GFP_KERNEL);
199 if (!uccf) { 199 if (!uccf) {
200 printk(KERN_ERR "%s: Cannot allocate private data", __FUNCTION__); 200 printk(KERN_ERR "%s: Cannot allocate private data\n",
201 __FUNCTION__);
201 return -ENOMEM; 202 return -ENOMEM;
202 } 203 }
203 204
@@ -206,7 +207,7 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc
206 /* Set the PHY base address */ 207 /* Set the PHY base address */
207 uccf->uf_regs = ioremap(uf_info->regs, sizeof(struct ucc_fast)); 208 uccf->uf_regs = ioremap(uf_info->regs, sizeof(struct ucc_fast));
208 if (uccf->uf_regs == NULL) { 209 if (uccf->uf_regs == NULL) {
209 printk(KERN_ERR "%s: Cannot map UCC registers", __FUNCTION__); 210 printk(KERN_ERR "%s: Cannot map UCC registers\n", __FUNCTION__);
210 return -ENOMEM; 211 return -ENOMEM;
211 } 212 }
212 213
@@ -226,18 +227,10 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc
226 uccf->rx_discarded = 0; 227 uccf->rx_discarded = 0;
227#endif /* STATISTICS */ 228#endif /* STATISTICS */
228 229
229 /* Init Guemr register */
230 if ((ret = ucc_init_guemr((struct ucc_common *) (uf_regs)))) {
231 printk(KERN_ERR "%s: cannot init GUEMR", __FUNCTION__);
232 ucc_fast_free(uccf);
233 return ret;
234 }
235
236 /* Set UCC to fast type */ 230 /* Set UCC to fast type */
237 if ((ret = ucc_set_type(uf_info->ucc_num, 231 ret = ucc_set_type(uf_info->ucc_num, UCC_SPEED_TYPE_FAST);
238 (struct ucc_common *) (uf_regs), 232 if (ret) {
239 UCC_SPEED_TYPE_FAST))) { 233 printk(KERN_ERR "%s: cannot set UCC type\n", __FUNCTION__);
240 printk(KERN_ERR "%s: cannot set UCC type", __FUNCTION__);
241 ucc_fast_free(uccf); 234 ucc_fast_free(uccf);
242 return ret; 235 return ret;
243 } 236 }
@@ -276,7 +269,8 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc
276 uccf->ucc_fast_tx_virtual_fifo_base_offset = 269 uccf->ucc_fast_tx_virtual_fifo_base_offset =
277 qe_muram_alloc(uf_info->utfs, UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT); 270 qe_muram_alloc(uf_info->utfs, UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
278 if (IS_ERR_VALUE(uccf->ucc_fast_tx_virtual_fifo_base_offset)) { 271 if (IS_ERR_VALUE(uccf->ucc_fast_tx_virtual_fifo_base_offset)) {
279 printk(KERN_ERR "%s: cannot allocate MURAM for TX FIFO", __FUNCTION__); 272 printk(KERN_ERR "%s: cannot allocate MURAM for TX FIFO\n",
273 __FUNCTION__);
280 uccf->ucc_fast_tx_virtual_fifo_base_offset = 0; 274 uccf->ucc_fast_tx_virtual_fifo_base_offset = 0;
281 ucc_fast_free(uccf); 275 ucc_fast_free(uccf);
282 return -ENOMEM; 276 return -ENOMEM;
@@ -288,7 +282,8 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc
288 UCC_FAST_RECEIVE_VIRTUAL_FIFO_SIZE_FUDGE_FACTOR, 282 UCC_FAST_RECEIVE_VIRTUAL_FIFO_SIZE_FUDGE_FACTOR,
289 UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT); 283 UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
290 if (IS_ERR_VALUE(uccf->ucc_fast_rx_virtual_fifo_base_offset)) { 284 if (IS_ERR_VALUE(uccf->ucc_fast_rx_virtual_fifo_base_offset)) {
291 printk(KERN_ERR "%s: cannot allocate MURAM for RX FIFO", __FUNCTION__); 285 printk(KERN_ERR "%s: cannot allocate MURAM for RX FIFO\n",
286 __FUNCTION__);
292 uccf->ucc_fast_rx_virtual_fifo_base_offset = 0; 287 uccf->ucc_fast_rx_virtual_fifo_base_offset = 0;
293 ucc_fast_free(uccf); 288 ucc_fast_free(uccf);
294 return -ENOMEM; 289 return -ENOMEM;
@@ -318,7 +313,7 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc
318 if ((uf_info->rx_clock != QE_CLK_NONE) && 313 if ((uf_info->rx_clock != QE_CLK_NONE) &&
319 ucc_set_qe_mux_rxtx(uf_info->ucc_num, uf_info->rx_clock, 314 ucc_set_qe_mux_rxtx(uf_info->ucc_num, uf_info->rx_clock,
320 COMM_DIR_RX)) { 315 COMM_DIR_RX)) {
321 printk(KERN_ERR "%s: illegal value for RX clock", 316 printk(KERN_ERR "%s: illegal value for RX clock\n",
322 __FUNCTION__); 317 __FUNCTION__);
323 ucc_fast_free(uccf); 318 ucc_fast_free(uccf);
324 return -EINVAL; 319 return -EINVAL;
@@ -327,7 +322,7 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc
327 if ((uf_info->tx_clock != QE_CLK_NONE) && 322 if ((uf_info->tx_clock != QE_CLK_NONE) &&
328 ucc_set_qe_mux_rxtx(uf_info->ucc_num, uf_info->tx_clock, 323 ucc_set_qe_mux_rxtx(uf_info->ucc_num, uf_info->tx_clock,
329 COMM_DIR_TX)) { 324 COMM_DIR_TX)) {
330 printk(KERN_ERR "%s: illegal value for TX clock", 325 printk(KERN_ERR "%s: illegal value for TX clock\n",
331 __FUNCTION__); 326 __FUNCTION__);
332 ucc_fast_free(uccf); 327 ucc_fast_free(uccf);
333 return -EINVAL; 328 return -EINVAL;
diff --git a/arch/powerpc/sysdev/qe_lib/ucc_slow.c b/arch/powerpc/sysdev/qe_lib/ucc_slow.c
index 1f65c26ce63f..0174b3aeef8f 100644
--- a/arch/powerpc/sysdev/qe_lib/ucc_slow.c
+++ b/arch/powerpc/sysdev/qe_lib/ucc_slow.c
@@ -115,11 +115,15 @@ void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode)
115 out_be32(&us_regs->gumr_l, gumr_l); 115 out_be32(&us_regs->gumr_l, gumr_l);
116} 116}
117 117
118/* Initialize the UCC for Slow operations
119 *
120 * The caller should initialize the following us_info
121 */
118int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** uccs_ret) 122int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** uccs_ret)
119{ 123{
120 struct ucc_slow_private *uccs; 124 struct ucc_slow_private *uccs;
121 u32 i; 125 u32 i;
122 struct ucc_slow *us_regs; 126 struct ucc_slow __iomem *us_regs;
123 u32 gumr; 127 u32 gumr;
124 struct qe_bd *bd; 128 struct qe_bd *bd;
125 u32 id; 129 u32 id;
@@ -131,7 +135,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
131 135
132 /* check if the UCC port number is in range. */ 136 /* check if the UCC port number is in range. */
133 if ((us_info->ucc_num < 0) || (us_info->ucc_num > UCC_MAX_NUM - 1)) { 137 if ((us_info->ucc_num < 0) || (us_info->ucc_num > UCC_MAX_NUM - 1)) {
134 printk(KERN_ERR "%s: illegal UCC number", __FUNCTION__); 138 printk(KERN_ERR "%s: illegal UCC number\n", __FUNCTION__);
135 return -EINVAL; 139 return -EINVAL;
136 } 140 }
137 141
@@ -143,13 +147,14 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
143 */ 147 */
144 if ((!us_info->rfw) && 148 if ((!us_info->rfw) &&
145 (us_info->max_rx_buf_length & (UCC_SLOW_MRBLR_ALIGNMENT - 1))) { 149 (us_info->max_rx_buf_length & (UCC_SLOW_MRBLR_ALIGNMENT - 1))) {
146 printk(KERN_ERR "max_rx_buf_length not aligned."); 150 printk(KERN_ERR "max_rx_buf_length not aligned.\n");
147 return -EINVAL; 151 return -EINVAL;
148 } 152 }
149 153
150 uccs = kzalloc(sizeof(struct ucc_slow_private), GFP_KERNEL); 154 uccs = kzalloc(sizeof(struct ucc_slow_private), GFP_KERNEL);
151 if (!uccs) { 155 if (!uccs) {
152 printk(KERN_ERR "%s: Cannot allocate private data", __FUNCTION__); 156 printk(KERN_ERR "%s: Cannot allocate private data\n",
157 __FUNCTION__);
153 return -ENOMEM; 158 return -ENOMEM;
154 } 159 }
155 160
@@ -158,7 +163,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
158 /* Set the PHY base address */ 163 /* Set the PHY base address */
159 uccs->us_regs = ioremap(us_info->regs, sizeof(struct ucc_slow)); 164 uccs->us_regs = ioremap(us_info->regs, sizeof(struct ucc_slow));
160 if (uccs->us_regs == NULL) { 165 if (uccs->us_regs == NULL) {
161 printk(KERN_ERR "%s: Cannot map UCC registers", __FUNCTION__); 166 printk(KERN_ERR "%s: Cannot map UCC registers\n", __FUNCTION__);
162 return -ENOMEM; 167 return -ENOMEM;
163 } 168 }
164 169
@@ -182,22 +187,14 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
182 return -ENOMEM; 187 return -ENOMEM;
183 } 188 }
184 id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num); 189 id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
185 qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, id, QE_CR_PROTOCOL_UNSPECIFIED, 190 qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, id, us_info->protocol,
186 uccs->us_pram_offset); 191 uccs->us_pram_offset);
187 192
188 uccs->us_pram = qe_muram_addr(uccs->us_pram_offset); 193 uccs->us_pram = qe_muram_addr(uccs->us_pram_offset);
189 194
190 /* Init Guemr register */
191 if ((ret = ucc_init_guemr((struct ucc_common *) us_regs))) {
192 printk(KERN_ERR "%s: cannot init GUEMR", __FUNCTION__);
193 ucc_slow_free(uccs);
194 return ret;
195 }
196
197 /* Set UCC to slow type */ 195 /* Set UCC to slow type */
198 if ((ret = ucc_set_type(us_info->ucc_num, 196 ret = ucc_set_type(us_info->ucc_num, UCC_SPEED_TYPE_SLOW);
199 (struct ucc_common *) us_regs, 197 if (ret) {
200 UCC_SPEED_TYPE_SLOW))) {
201 printk(KERN_ERR "%s: cannot set UCC type", __FUNCTION__); 198 printk(KERN_ERR "%s: cannot set UCC type", __FUNCTION__);
202 ucc_slow_free(uccs); 199 ucc_slow_free(uccs);
203 return ret; 200 return ret;
@@ -212,7 +209,8 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
212 qe_muram_alloc(us_info->rx_bd_ring_len * sizeof(struct qe_bd), 209 qe_muram_alloc(us_info->rx_bd_ring_len * sizeof(struct qe_bd),
213 QE_ALIGNMENT_OF_BD); 210 QE_ALIGNMENT_OF_BD);
214 if (IS_ERR_VALUE(uccs->rx_base_offset)) { 211 if (IS_ERR_VALUE(uccs->rx_base_offset)) {
215 printk(KERN_ERR "%s: cannot allocate RX BDs", __FUNCTION__); 212 printk(KERN_ERR "%s: cannot allocate %u RX BDs\n", __FUNCTION__,
213 us_info->rx_bd_ring_len);
216 uccs->rx_base_offset = 0; 214 uccs->rx_base_offset = 0;
217 ucc_slow_free(uccs); 215 ucc_slow_free(uccs);
218 return -ENOMEM; 216 return -ENOMEM;
@@ -292,12 +290,12 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
292 290
293 /* if the data is in cachable memory, the 'global' */ 291 /* if the data is in cachable memory, the 'global' */
294 /* in the function code should be set. */ 292 /* in the function code should be set. */
295 uccs->us_pram->tfcr = uccs->us_pram->rfcr = 293 uccs->us_pram->tbmr = UCC_BMR_BO_BE;
296 us_info->data_mem_part | QE_BMR_BYTE_ORDER_BO_MOT; 294 uccs->us_pram->rbmr = UCC_BMR_BO_BE;
297 295
298 /* rbase, tbase are offsets from MURAM base */ 296 /* rbase, tbase are offsets from MURAM base */
299 out_be16(&uccs->us_pram->rbase, uccs->us_pram_offset); 297 out_be16(&uccs->us_pram->rbase, uccs->rx_base_offset);
300 out_be16(&uccs->us_pram->tbase, uccs->us_pram_offset); 298 out_be16(&uccs->us_pram->tbase, uccs->tx_base_offset);
301 299
302 /* Mux clocking */ 300 /* Mux clocking */
303 /* Grant Support */ 301 /* Grant Support */
@@ -311,7 +309,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
311 /* Rx clock routing */ 309 /* Rx clock routing */
312 if (ucc_set_qe_mux_rxtx(us_info->ucc_num, us_info->rx_clock, 310 if (ucc_set_qe_mux_rxtx(us_info->ucc_num, us_info->rx_clock,
313 COMM_DIR_RX)) { 311 COMM_DIR_RX)) {
314 printk(KERN_ERR "%s: illegal value for RX clock", 312 printk(KERN_ERR "%s: illegal value for RX clock\n",
315 __FUNCTION__); 313 __FUNCTION__);
316 ucc_slow_free(uccs); 314 ucc_slow_free(uccs);
317 return -EINVAL; 315 return -EINVAL;
@@ -319,7 +317,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
319 /* Tx clock routing */ 317 /* Tx clock routing */
320 if (ucc_set_qe_mux_rxtx(us_info->ucc_num, us_info->tx_clock, 318 if (ucc_set_qe_mux_rxtx(us_info->ucc_num, us_info->tx_clock,
321 COMM_DIR_TX)) { 319 COMM_DIR_TX)) {
322 printk(KERN_ERR "%s: illegal value for TX clock", 320 printk(KERN_ERR "%s: illegal value for TX clock\n",
323 __FUNCTION__); 321 __FUNCTION__);
324 ucc_slow_free(uccs); 322 ucc_slow_free(uccs);
325 return -EINVAL; 323 return -EINVAL;
@@ -343,8 +341,8 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
343 command = QE_INIT_TX; 341 command = QE_INIT_TX;
344 else 342 else
345 command = QE_INIT_RX; /* We know at least one is TRUE */ 343 command = QE_INIT_RX; /* We know at least one is TRUE */
346 id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num); 344
347 qe_issue_cmd(command, id, QE_CR_PROTOCOL_UNSPECIFIED, 0); 345 qe_issue_cmd(command, id, us_info->protocol, 0);
348 346
349 *uccs_ret = uccs; 347 *uccs_ret = uccs;
350 return 0; 348 return 0;
diff --git a/arch/powerpc/sysdev/timer.c b/arch/powerpc/sysdev/timer.c
deleted file mode 100644
index e81e7ec2e799..000000000000
--- a/arch/powerpc/sysdev/timer.c
+++ /dev/null
@@ -1,81 +0,0 @@
1/*
2 * Common code to keep time when machine suspends.
3 *
4 * Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
5 *
6 * GPLv2
7 */
8
9#include <linux/time.h>
10#include <linux/sysdev.h>
11#include <asm/rtc.h>
12
13static unsigned long suspend_rtc_time;
14
15/*
16 * Reset the time after a sleep.
17 */
18static int timer_resume(struct sys_device *dev)
19{
20 struct timeval tv;
21 struct timespec ts;
22 struct rtc_time cur_rtc_tm;
23 unsigned long cur_rtc_time, diff;
24
25 /* get current RTC time and convert to seconds */
26 get_rtc_time(&cur_rtc_tm);
27 cur_rtc_time = mktime(cur_rtc_tm.tm_year + 1900,
28 cur_rtc_tm.tm_mon + 1,
29 cur_rtc_tm.tm_mday,
30 cur_rtc_tm.tm_hour,
31 cur_rtc_tm.tm_min,
32 cur_rtc_tm.tm_sec);
33
34 diff = cur_rtc_time - suspend_rtc_time;
35
36 /* adjust time of day by seconds that elapsed while
37 * we were suspended */
38 do_gettimeofday(&tv);
39 ts.tv_sec = tv.tv_sec + diff;
40 ts.tv_nsec = tv.tv_usec * NSEC_PER_USEC;
41 do_settimeofday(&ts);
42
43 return 0;
44}
45
46static int timer_suspend(struct sys_device *dev, pm_message_t state)
47{
48 struct rtc_time suspend_rtc_tm;
49 WARN_ON(!ppc_md.get_rtc_time);
50
51 get_rtc_time(&suspend_rtc_tm);
52 suspend_rtc_time = mktime(suspend_rtc_tm.tm_year + 1900,
53 suspend_rtc_tm.tm_mon + 1,
54 suspend_rtc_tm.tm_mday,
55 suspend_rtc_tm.tm_hour,
56 suspend_rtc_tm.tm_min,
57 suspend_rtc_tm.tm_sec);
58
59 return 0;
60}
61
62static struct sysdev_class timer_sysclass = {
63 .resume = timer_resume,
64 .suspend = timer_suspend,
65 set_kset_name("timer"),
66};
67
68static struct sys_device device_timer = {
69 .id = 0,
70 .cls = &timer_sysclass,
71};
72
73static int time_init_device(void)
74{
75 int error = sysdev_class_register(&timer_sysclass);
76 if (!error)
77 error = sysdev_register(&device_timer);
78 return error;
79}
80
81device_initcall(time_init_device);
diff --git a/arch/powerpc/sysdev/tsi108_pci.c b/arch/powerpc/sysdev/tsi108_pci.c
index 90db8a720fed..31d3d33d91fc 100644
--- a/arch/powerpc/sysdev/tsi108_pci.c
+++ b/arch/powerpc/sysdev/tsi108_pci.c
@@ -52,7 +52,6 @@
52u32 tsi108_pci_cfg_base; 52u32 tsi108_pci_cfg_base;
53static u32 tsi108_pci_cfg_phys; 53static u32 tsi108_pci_cfg_phys;
54u32 tsi108_csr_vir_base; 54u32 tsi108_csr_vir_base;
55static struct device_node *pci_irq_node;
56static struct irq_host *pci_irq_host; 55static struct irq_host *pci_irq_host;
57 56
58extern u32 get_vir_csrbase(void); 57extern u32 get_vir_csrbase(void);
@@ -193,8 +192,8 @@ void tsi108_clear_pci_cfg_error(void)
193} 192}
194 193
195static struct pci_ops tsi108_direct_pci_ops = { 194static struct pci_ops tsi108_direct_pci_ops = {
196 tsi108_direct_read_config, 195 .read = tsi108_direct_read_config,
197 tsi108_direct_write_config 196 .write = tsi108_direct_write_config,
198}; 197};
199 198
200int __init tsi108_setup_pci(struct device_node *dev, u32 cfg_phys, int primary) 199int __init tsi108_setup_pci(struct device_node *dev, u32 cfg_phys, int primary)
@@ -405,13 +404,7 @@ static int pci_irq_host_map(struct irq_host *h, unsigned int virq,
405 return 0; 404 return 0;
406} 405}
407 406
408static int pci_irq_host_match(struct irq_host *h, struct device_node *node)
409{
410 return pci_irq_node == node;
411}
412
413static struct irq_host_ops pci_irq_host_ops = { 407static struct irq_host_ops pci_irq_host_ops = {
414 .match = pci_irq_host_match,
415 .map = pci_irq_host_map, 408 .map = pci_irq_host_map,
416 .xlate = pci_irq_host_xlate, 409 .xlate = pci_irq_host_xlate,
417}; 410};
@@ -433,10 +426,11 @@ void __init tsi108_pci_int_init(struct device_node *node)
433{ 426{
434 DBG("Tsi108_pci_int_init: initializing PCI interrupts\n"); 427 DBG("Tsi108_pci_int_init: initializing PCI interrupts\n");
435 428
436 pci_irq_node = of_node_get(node); 429 pci_irq_host = irq_alloc_host(of_node_get(node), IRQ_HOST_MAP_LEGACY,
437 pci_irq_host = irq_alloc_host(IRQ_HOST_MAP_LEGACY, 0, &pci_irq_host_ops, 0); 430 0, &pci_irq_host_ops, 0);
438 if (pci_irq_host == NULL) { 431 if (pci_irq_host == NULL) {
439 printk(KERN_ERR "pci_irq_host: failed to allocate irq host !\n"); 432 printk(KERN_ERR "pci_irq_host: failed to allocate irq host !\n");
433 of_node_put(node);
440 return; 434 return;
441 } 435 }
442 436
diff --git a/arch/powerpc/sysdev/uic.c b/arch/powerpc/sysdev/uic.c
index 89059895a20d..5149716c734d 100644
--- a/arch/powerpc/sysdev/uic.c
+++ b/arch/powerpc/sysdev/uic.c
@@ -24,6 +24,7 @@
24#include <linux/spinlock.h> 24#include <linux/spinlock.h>
25#include <linux/irq.h> 25#include <linux/irq.h>
26#include <linux/interrupt.h> 26#include <linux/interrupt.h>
27#include <linux/kernel_stat.h>
27#include <asm/irq.h> 28#include <asm/irq.h>
28#include <asm/io.h> 29#include <asm/io.h>
29#include <asm/prom.h> 30#include <asm/prom.h>
@@ -55,9 +56,6 @@ struct uic {
55 56
56 /* For secondary UICs, the cascade interrupt's irqaction */ 57 /* For secondary UICs, the cascade interrupt's irqaction */
57 struct irqaction cascade; 58 struct irqaction cascade;
58
59 /* The device node of the interrupt controller */
60 struct device_node *of_node;
61}; 59};
62 60
63static void uic_unmask_irq(unsigned int virq) 61static void uic_unmask_irq(unsigned int virq)
@@ -142,7 +140,7 @@ static int uic_set_irq_type(unsigned int virq, unsigned int flow_type)
142 140
143 desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL); 141 desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL);
144 desc->status |= flow_type & IRQ_TYPE_SENSE_MASK; 142 desc->status |= flow_type & IRQ_TYPE_SENSE_MASK;
145 if (trigger) 143 if (!trigger)
146 desc->status |= IRQ_LEVEL; 144 desc->status |= IRQ_LEVEL;
147 145
148 spin_unlock_irqrestore(&uic->lock, flags); 146 spin_unlock_irqrestore(&uic->lock, flags);
@@ -159,10 +157,62 @@ static struct irq_chip uic_irq_chip = {
159 .set_type = uic_set_irq_type, 157 .set_type = uic_set_irq_type,
160}; 158};
161 159
162static int uic_host_match(struct irq_host *h, struct device_node *node) 160/**
161 * handle_uic_irq - irq flow handler for UIC
162 * @irq: the interrupt number
163 * @desc: the interrupt description structure for this irq
164 *
165 * This is modified version of the generic handle_level_irq() suitable
166 * for the UIC. On the UIC, acking (i.e. clearing the SR bit) a level
167 * irq will have no effect if the interrupt is still asserted by the
168 * device, even if the interrupt is already masked. Therefore, unlike
169 * the standard handle_level_irq(), we must ack the interrupt *after*
170 * invoking the ISR (which should have de-asserted the interrupt in
171 * the external source). For edge interrupts we ack at the beginning
172 * instead of the end, to keep the window in which we can miss an
173 * interrupt as small as possible.
174 */
175void fastcall handle_uic_irq(unsigned int irq, struct irq_desc *desc)
163{ 176{
164 struct uic *uic = h->host_data; 177 unsigned int cpu = smp_processor_id();
165 return uic->of_node == node; 178 struct irqaction *action;
179 irqreturn_t action_ret;
180
181 spin_lock(&desc->lock);
182 if (desc->status & IRQ_LEVEL)
183 desc->chip->mask(irq);
184 else
185 desc->chip->mask_ack(irq);
186
187 if (unlikely(desc->status & IRQ_INPROGRESS))
188 goto out_unlock;
189 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
190 kstat_cpu(cpu).irqs[irq]++;
191
192 /*
193 * If its disabled or no action available
194 * keep it masked and get out of here
195 */
196 action = desc->action;
197 if (unlikely(!action || (desc->status & IRQ_DISABLED))) {
198 desc->status |= IRQ_PENDING;
199 goto out_unlock;
200 }
201
202 desc->status |= IRQ_INPROGRESS;
203 desc->status &= ~IRQ_PENDING;
204 spin_unlock(&desc->lock);
205
206 action_ret = handle_IRQ_event(irq, action);
207
208 spin_lock(&desc->lock);
209 desc->status &= ~IRQ_INPROGRESS;
210 if (desc->status & IRQ_LEVEL)
211 desc->chip->ack(irq);
212 if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
213 desc->chip->unmask(irq);
214out_unlock:
215 spin_unlock(&desc->lock);
166} 216}
167 217
168static int uic_host_map(struct irq_host *h, unsigned int virq, 218static int uic_host_map(struct irq_host *h, unsigned int virq,
@@ -173,7 +223,7 @@ static int uic_host_map(struct irq_host *h, unsigned int virq,
173 set_irq_chip_data(virq, uic); 223 set_irq_chip_data(virq, uic);
174 /* Despite the name, handle_level_irq() works for both level 224 /* Despite the name, handle_level_irq() works for both level
175 * and edge irqs on UIC. FIXME: check this is correct */ 225 * and edge irqs on UIC. FIXME: check this is correct */
176 set_irq_chip_and_handler(virq, &uic_irq_chip, handle_level_irq); 226 set_irq_chip_and_handler(virq, &uic_irq_chip, handle_uic_irq);
177 227
178 /* Set default irq type */ 228 /* Set default irq type */
179 set_irq_type(virq, IRQ_TYPE_NONE); 229 set_irq_type(virq, IRQ_TYPE_NONE);
@@ -194,7 +244,6 @@ static int uic_host_xlate(struct irq_host *h, struct device_node *ct,
194} 244}
195 245
196static struct irq_host_ops uic_host_ops = { 246static struct irq_host_ops uic_host_ops = {
197 .match = uic_host_match,
198 .map = uic_host_map, 247 .map = uic_host_map,
199 .xlate = uic_host_xlate, 248 .xlate = uic_host_xlate,
200}; 249};
@@ -207,6 +256,9 @@ irqreturn_t uic_cascade(int virq, void *data)
207 int subvirq; 256 int subvirq;
208 257
209 msr = mfdcr(uic->dcrbase + UIC_MSR); 258 msr = mfdcr(uic->dcrbase + UIC_MSR);
259 if (!msr) /* spurious interrupt */
260 return IRQ_HANDLED;
261
210 src = 32 - ffs(msr); 262 src = 32 - ffs(msr);
211 263
212 subvirq = irq_linear_revmap(uic->irqhost, src); 264 subvirq = irq_linear_revmap(uic->irqhost, src);
@@ -229,7 +281,6 @@ static struct uic * __init uic_init_one(struct device_node *node)
229 281
230 memset(uic, 0, sizeof(*uic)); 282 memset(uic, 0, sizeof(*uic));
231 spin_lock_init(&uic->lock); 283 spin_lock_init(&uic->lock);
232 uic->of_node = of_node_get(node);
233 indexp = of_get_property(node, "cell-index", &len); 284 indexp = of_get_property(node, "cell-index", &len);
234 if (!indexp || (len != sizeof(u32))) { 285 if (!indexp || (len != sizeof(u32))) {
235 printk(KERN_ERR "uic: Device node %s has missing or invalid " 286 printk(KERN_ERR "uic: Device node %s has missing or invalid "
@@ -246,8 +297,8 @@ static struct uic * __init uic_init_one(struct device_node *node)
246 } 297 }
247 uic->dcrbase = *dcrreg; 298 uic->dcrbase = *dcrreg;
248 299
249 uic->irqhost = irq_alloc_host(IRQ_HOST_MAP_LINEAR, NR_UIC_INTS, 300 uic->irqhost = irq_alloc_host(of_node_get(node), IRQ_HOST_MAP_LINEAR,
250 &uic_host_ops, -1); 301 NR_UIC_INTS, &uic_host_ops, -1);
251 if (! uic->irqhost) { 302 if (! uic->irqhost) {
252 of_node_put(node); 303 of_node_put(node);
253 return NULL; /* FIXME: panic? */ 304 return NULL; /* FIXME: panic? */
diff --git a/arch/powerpc/sysdev/xilinx_intc.c b/arch/powerpc/sysdev/xilinx_intc.c
new file mode 100644
index 000000000000..c2f17cc43dfa
--- /dev/null
+++ b/arch/powerpc/sysdev/xilinx_intc.c
@@ -0,0 +1,151 @@
1/*
2 * Interrupt controller driver for Xilinx Virtex FPGAs
3 *
4 * Copyright (C) 2007 Secret Lab Technologies Ltd.
5 *
6 * This file is licensed under the terms of the GNU General Public License
7 * version 2. This program is licensed "as is" without any warranty of any
8 * kind, whether express or implied.
9 *
10 */
11
12/*
13 * This is a driver for the interrupt controller typically found in
14 * Xilinx Virtex FPGA designs.
15 *
16 * The interrupt sense levels are hard coded into the FPGA design with
17 * typically a 1:1 relationship between irq lines and devices (no shared
18 * irq lines). Therefore, this driver does not attempt to handle edge
19 * and level interrupts differently.
20 */
21#undef DEBUG
22
23#include <linux/kernel.h>
24#include <linux/irq.h>
25#include <linux/of.h>
26#include <asm/io.h>
27#include <asm/processor.h>
28#include <asm/irq.h>
29
30/*
31 * INTC Registers
32 */
33#define XINTC_ISR 0 /* Interrupt Status */
34#define XINTC_IPR 4 /* Interrupt Pending */
35#define XINTC_IER 8 /* Interrupt Enable */
36#define XINTC_IAR 12 /* Interrupt Acknowledge */
37#define XINTC_SIE 16 /* Set Interrupt Enable bits */
38#define XINTC_CIE 20 /* Clear Interrupt Enable bits */
39#define XINTC_IVR 24 /* Interrupt Vector */
40#define XINTC_MER 28 /* Master Enable */
41
42static struct irq_host *master_irqhost;
43
44/*
45 * IRQ Chip operations
46 */
47static void xilinx_intc_mask(unsigned int virq)
48{
49 int irq = virq_to_hw(virq);
50 void * regs = get_irq_chip_data(virq);
51 pr_debug("mask: %d\n", irq);
52 out_be32(regs + XINTC_CIE, 1 << irq);
53}
54
55static void xilinx_intc_unmask(unsigned int virq)
56{
57 int irq = virq_to_hw(virq);
58 void * regs = get_irq_chip_data(virq);
59 pr_debug("unmask: %d\n", irq);
60 out_be32(regs + XINTC_SIE, 1 << irq);
61}
62
63static void xilinx_intc_ack(unsigned int virq)
64{
65 int irq = virq_to_hw(virq);
66 void * regs = get_irq_chip_data(virq);
67 pr_debug("ack: %d\n", irq);
68 out_be32(regs + XINTC_IAR, 1 << irq);
69}
70
71static struct irq_chip xilinx_intc_irqchip = {
72 .typename = "Xilinx INTC",
73 .mask = xilinx_intc_mask,
74 .unmask = xilinx_intc_unmask,
75 .ack = xilinx_intc_ack,
76};
77
78/*
79 * IRQ Host operations
80 */
81static int xilinx_intc_map(struct irq_host *h, unsigned int virq,
82 irq_hw_number_t irq)
83{
84 set_irq_chip_data(virq, h->host_data);
85 set_irq_chip_and_handler(virq, &xilinx_intc_irqchip, handle_level_irq);
86 set_irq_type(virq, IRQ_TYPE_NONE);
87 return 0;
88}
89
90static struct irq_host_ops xilinx_intc_ops = {
91 .map = xilinx_intc_map,
92};
93
94struct irq_host * __init
95xilinx_intc_init(struct device_node *np)
96{
97 struct irq_host * irq;
98 struct resource res;
99 void * regs;
100 int rc;
101
102 /* Find and map the intc registers */
103 rc = of_address_to_resource(np, 0, &res);
104 if (rc) {
105 printk(KERN_ERR __FILE__ ": of_address_to_resource() failed\n");
106 return NULL;
107 }
108 regs = ioremap(res.start, 32);
109
110 printk(KERN_INFO "Xilinx intc at 0x%08X mapped to 0x%p\n",
111 res.start, regs);
112
113 /* Setup interrupt controller */
114 out_be32(regs + XINTC_IER, 0); /* disable all irqs */
115 out_be32(regs + XINTC_IAR, ~(u32) 0); /* Acknowledge pending irqs */
116 out_be32(regs + XINTC_MER, 0x3UL); /* Turn on the Master Enable. */
117
118 /* Allocate and initialize an irq_host structure. */
119 irq = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, 32, &xilinx_intc_ops, -1);
120 if (!irq)
121 panic(__FILE__ ": Cannot allocate IRQ host\n");
122 irq->host_data = regs;
123 return irq;
124}
125
126int xilinx_intc_get_irq(void)
127{
128 void * regs = master_irqhost->host_data;
129 pr_debug("get_irq:\n");
130 return irq_linear_revmap(master_irqhost, in_be32(regs + XINTC_IVR));
131}
132
133void __init xilinx_intc_init_tree(void)
134{
135 struct device_node *np;
136
137 /* find top level interrupt controller */
138 for_each_compatible_node(np, NULL, "xilinx,intc") {
139 if (!of_get_property(np, "interrupts", NULL))
140 break;
141 }
142
143 /* xilinx interrupt controller needs to be top level */
144 BUG_ON(!np);
145
146 master_irqhost = xilinx_intc_init(np);
147 BUG_ON(!master_irqhost);
148
149 irq_set_default_host(master_irqhost);
150 of_node_put(np);
151}