aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/sysdev
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
commitc71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch)
treeecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /arch/powerpc/sysdev
parentea53c912f8a86a8567697115b6a0d8152beee5c8 (diff)
parent6a00f206debf8a5c8899055726ad127dbeeed098 (diff)
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts: litmus/sched_cedf.c
Diffstat (limited to 'arch/powerpc/sysdev')
-rw-r--r--arch/powerpc/sysdev/Kconfig17
-rw-r--r--arch/powerpc/sysdev/Makefile15
-rw-r--r--arch/powerpc/sysdev/axonram.c15
-rw-r--r--arch/powerpc/sysdev/bestcomm/bestcomm.c9
-rw-r--r--arch/powerpc/sysdev/bestcomm/bestcomm.h2
-rw-r--r--arch/powerpc/sysdev/bestcomm/bestcomm_priv.h2
-rw-r--r--arch/powerpc/sysdev/cpm1.c26
-rw-r--r--arch/powerpc/sysdev/cpm2_pic.c77
-rw-r--r--arch/powerpc/sysdev/dart_iommu.c79
-rw-r--r--arch/powerpc/sysdev/fsl_85xx_cache_ctlr.h101
-rw-r--r--arch/powerpc/sysdev/fsl_85xx_cache_sram.c159
-rw-r--r--arch/powerpc/sysdev/fsl_85xx_l2ctlr.c230
-rw-r--r--arch/powerpc/sysdev/fsl_lbc.c249
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c169
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c78
-rw-r--r--arch/powerpc/sysdev/fsl_pci.h18
-rw-r--r--arch/powerpc/sysdev/fsl_pmc.c9
-rw-r--r--arch/powerpc/sysdev/fsl_rio.c380
-rw-r--r--arch/powerpc/sysdev/fsl_soc.c181
-rw-r--r--arch/powerpc/sysdev/i8259.c59
-rw-r--r--arch/powerpc/sysdev/indirect_pci.c2
-rw-r--r--arch/powerpc/sysdev/ipic.c112
-rw-r--r--arch/powerpc/sysdev/mmio_nvram.c2
-rw-r--r--arch/powerpc/sysdev/mpc8xx_pic.c41
-rw-r--r--arch/powerpc/sysdev/mpc8xxx_gpio.c122
-rw-r--r--arch/powerpc/sysdev/mpic.c505
-rw-r--r--arch/powerpc/sysdev/mpic.h5
-rw-r--r--arch/powerpc/sysdev/mpic_pasemi_msi.c38
-rw-r--r--arch/powerpc/sysdev/mpic_u3msi.c34
-rw-r--r--arch/powerpc/sysdev/mv64x60_dev.c3
-rw-r--r--arch/powerpc/sysdev/mv64x60_pic.c51
-rw-r--r--arch/powerpc/sysdev/pmi.c11
-rw-r--r--arch/powerpc/sysdev/ppc4xx_cpm.c346
-rw-r--r--arch/powerpc/sysdev/ppc4xx_msi.c276
-rw-r--r--arch/powerpc/sysdev/ppc4xx_pci.h2
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe.c7
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe_ic.c39
-rw-r--r--arch/powerpc/sysdev/scom.c192
-rw-r--r--arch/powerpc/sysdev/tsi108_dev.c9
-rw-r--r--arch/powerpc/sysdev/tsi108_pci.c45
-rw-r--r--arch/powerpc/sysdev/uic.c84
-rw-r--r--arch/powerpc/sysdev/xics/Kconfig13
-rw-r--r--arch/powerpc/sysdev/xics/Makefile6
-rw-r--r--arch/powerpc/sysdev/xics/icp-hv.c164
-rw-r--r--arch/powerpc/sysdev/xics/icp-native.c293
-rw-r--r--arch/powerpc/sysdev/xics/ics-rtas.c240
-rw-r--r--arch/powerpc/sysdev/xics/xics-common.c443
-rw-r--r--arch/powerpc/sysdev/xilinx_intc.c64
48 files changed, 3944 insertions, 1080 deletions
diff --git a/arch/powerpc/sysdev/Kconfig b/arch/powerpc/sysdev/Kconfig
index 396582835cb5..7b4df37ac381 100644
--- a/arch/powerpc/sysdev/Kconfig
+++ b/arch/powerpc/sysdev/Kconfig
@@ -7,8 +7,25 @@ config PPC4xx_PCI_EXPRESS
7 depends on PCI && 4xx 7 depends on PCI && 4xx
8 default n 8 default n
9 9
10config PPC4xx_MSI
11 bool
12 depends on PCI_MSI
13 depends on PCI && 4xx
14 default n
15
10config PPC_MSI_BITMAP 16config PPC_MSI_BITMAP
11 bool 17 bool
12 depends on PCI_MSI 18 depends on PCI_MSI
13 default y if MPIC 19 default y if MPIC
14 default y if FSL_PCI 20 default y if FSL_PCI
21 default y if PPC4xx_MSI
22
23source "arch/powerpc/sysdev/xics/Kconfig"
24
25config PPC_SCOM
26 bool
27
28config SCOM_DEBUGFS
29 bool "Expose SCOM controllers via debugfs"
30 depends on PPC_SCOM
31 default n
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index 5642924fb9fb..0efa990e3344 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -1,8 +1,6 @@
1subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror 1subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
2 2
3ifeq ($(CONFIG_PPC64),y) 3ccflags-$(CONFIG_PPC64) := -mno-minimal-toc
4EXTRA_CFLAGS += -mno-minimal-toc
5endif
6 4
7mpic-msi-obj-$(CONFIG_PCI_MSI) += mpic_msi.o mpic_u3msi.o mpic_pasemi_msi.o 5mpic-msi-obj-$(CONFIG_PCI_MSI) += mpic_msi.o mpic_u3msi.o mpic_pasemi_msi.o
8obj-$(CONFIG_MPIC) += mpic.o $(mpic-msi-obj-y) 6obj-$(CONFIG_MPIC) += mpic.o $(mpic-msi-obj-y)
@@ -20,8 +18,9 @@ obj-$(CONFIG_FSL_PMC) += fsl_pmc.o
20obj-$(CONFIG_FSL_LBC) += fsl_lbc.o 18obj-$(CONFIG_FSL_LBC) += fsl_lbc.o
21obj-$(CONFIG_FSL_GTM) += fsl_gtm.o 19obj-$(CONFIG_FSL_GTM) += fsl_gtm.o
22obj-$(CONFIG_MPC8xxx_GPIO) += mpc8xxx_gpio.o 20obj-$(CONFIG_MPC8xxx_GPIO) += mpc8xxx_gpio.o
21obj-$(CONFIG_FSL_85XX_CACHE_SRAM) += fsl_85xx_l2ctlr.o fsl_85xx_cache_sram.o
23obj-$(CONFIG_SIMPLE_GPIO) += simple_gpio.o 22obj-$(CONFIG_SIMPLE_GPIO) += simple_gpio.o
24obj-$(CONFIG_RAPIDIO) += fsl_rio.o 23obj-$(CONFIG_FSL_RIO) += fsl_rio.o
25obj-$(CONFIG_TSI108_BRIDGE) += tsi108_pci.o tsi108_dev.o 24obj-$(CONFIG_TSI108_BRIDGE) += tsi108_pci.o tsi108_dev.o
26obj-$(CONFIG_QUICC_ENGINE) += qe_lib/ 25obj-$(CONFIG_QUICC_ENGINE) += qe_lib/
27obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/ 26obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
@@ -42,6 +41,8 @@ obj-$(CONFIG_OF_RTC) += of_rtc.o
42ifeq ($(CONFIG_PCI),y) 41ifeq ($(CONFIG_PCI),y)
43obj-$(CONFIG_4xx) += ppc4xx_pci.o 42obj-$(CONFIG_4xx) += ppc4xx_pci.o
44endif 43endif
44obj-$(CONFIG_PPC4xx_MSI) += ppc4xx_msi.o
45obj-$(CONFIG_PPC4xx_CPM) += ppc4xx_cpm.o
45obj-$(CONFIG_PPC4xx_GPIO) += ppc4xx_gpio.o 46obj-$(CONFIG_PPC4xx_GPIO) += ppc4xx_gpio.o
46 47
47obj-$(CONFIG_CPM) += cpm_common.o 48obj-$(CONFIG_CPM) += cpm_common.o
@@ -57,3 +58,9 @@ obj-$(CONFIG_PPC_MPC52xx) += mpc5xxx_clocks.o
57ifeq ($(CONFIG_SUSPEND),y) 58ifeq ($(CONFIG_SUSPEND),y)
58obj-$(CONFIG_6xx) += 6xx-suspend.o 59obj-$(CONFIG_6xx) += 6xx-suspend.o
59endif 60endif
61
62obj-$(CONFIG_PPC_SCOM) += scom.o
63
64subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
65
66obj-$(CONFIG_PPC_XICS) += xics/
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
index 2659a60bd7b8..bd0d54060b94 100644
--- a/arch/powerpc/sysdev/axonram.c
+++ b/arch/powerpc/sysdev/axonram.c
@@ -95,7 +95,7 @@ axon_ram_irq_handler(int irq, void *dev)
95 95
96 BUG_ON(!bank); 96 BUG_ON(!bank);
97 97
98 dev_err(&device->dev, "Correctable memory error occured\n"); 98 dev_err(&device->dev, "Correctable memory error occurred\n");
99 bank->ecc_counter++; 99 bank->ecc_counter++;
100 return IRQ_HANDLED; 100 return IRQ_HANDLED;
101} 101}
@@ -172,10 +172,9 @@ static const struct block_device_operations axon_ram_devops = {
172 172
173/** 173/**
174 * axon_ram_probe - probe() method for platform driver 174 * axon_ram_probe - probe() method for platform driver
175 * @device, @device_id: see of_platform_driver method 175 * @device: see platform_driver method
176 */ 176 */
177static int axon_ram_probe(struct platform_device *device, 177static int axon_ram_probe(struct platform_device *device)
178 const struct of_device_id *device_id)
179{ 178{
180 static int axon_ram_bank_id = -1; 179 static int axon_ram_bank_id = -1;
181 struct axon_ram_bank *bank; 180 struct axon_ram_bank *bank;
@@ -217,7 +216,7 @@ static int axon_ram_probe(struct platform_device *device,
217 AXON_RAM_DEVICE_NAME, axon_ram_bank_id, bank->size >> 20); 216 AXON_RAM_DEVICE_NAME, axon_ram_bank_id, bank->size >> 20);
218 217
219 bank->ph_addr = resource.start; 218 bank->ph_addr = resource.start;
220 bank->io_addr = (unsigned long) ioremap_flags( 219 bank->io_addr = (unsigned long) ioremap_prot(
221 bank->ph_addr, bank->size, _PAGE_NO_CACHE); 220 bank->ph_addr, bank->size, _PAGE_NO_CACHE);
222 if (bank->io_addr == 0) { 221 if (bank->io_addr == 0) {
223 dev_err(&device->dev, "ioremap() failed\n"); 222 dev_err(&device->dev, "ioremap() failed\n");
@@ -326,7 +325,7 @@ static struct of_device_id axon_ram_device_id[] = {
326 {} 325 {}
327}; 326};
328 327
329static struct of_platform_driver axon_ram_driver = { 328static struct platform_driver axon_ram_driver = {
330 .probe = axon_ram_probe, 329 .probe = axon_ram_probe,
331 .remove = axon_ram_remove, 330 .remove = axon_ram_remove,
332 .driver = { 331 .driver = {
@@ -350,7 +349,7 @@ axon_ram_init(void)
350 } 349 }
351 azfs_minor = 0; 350 azfs_minor = 0;
352 351
353 return of_register_platform_driver(&axon_ram_driver); 352 return platform_driver_register(&axon_ram_driver);
354} 353}
355 354
356/** 355/**
@@ -359,7 +358,7 @@ axon_ram_init(void)
359static void __exit 358static void __exit
360axon_ram_exit(void) 359axon_ram_exit(void)
361{ 360{
362 of_unregister_platform_driver(&axon_ram_driver); 361 platform_driver_unregister(&axon_ram_driver);
363 unregister_blkdev(azfs_major, AXON_RAM_DEVICE_NAME); 362 unregister_blkdev(azfs_major, AXON_RAM_DEVICE_NAME);
364} 363}
365 364
diff --git a/arch/powerpc/sysdev/bestcomm/bestcomm.c b/arch/powerpc/sysdev/bestcomm/bestcomm.c
index 650256115064..b3fbb271be87 100644
--- a/arch/powerpc/sysdev/bestcomm/bestcomm.c
+++ b/arch/powerpc/sysdev/bestcomm/bestcomm.c
@@ -365,8 +365,7 @@ bcom_engine_cleanup(void)
365/* OF platform driver */ 365/* OF platform driver */
366/* ======================================================================== */ 366/* ======================================================================== */
367 367
368static int __devinit mpc52xx_bcom_probe(struct platform_device *op, 368static int __devinit mpc52xx_bcom_probe(struct platform_device *op)
369 const struct of_device_id *match)
370{ 369{
371 struct device_node *ofn_sram; 370 struct device_node *ofn_sram;
372 struct resource res_bcom; 371 struct resource res_bcom;
@@ -492,7 +491,7 @@ static struct of_device_id mpc52xx_bcom_of_match[] = {
492MODULE_DEVICE_TABLE(of, mpc52xx_bcom_of_match); 491MODULE_DEVICE_TABLE(of, mpc52xx_bcom_of_match);
493 492
494 493
495static struct of_platform_driver mpc52xx_bcom_of_platform_driver = { 494static struct platform_driver mpc52xx_bcom_of_platform_driver = {
496 .probe = mpc52xx_bcom_probe, 495 .probe = mpc52xx_bcom_probe,
497 .remove = mpc52xx_bcom_remove, 496 .remove = mpc52xx_bcom_remove,
498 .driver = { 497 .driver = {
@@ -510,13 +509,13 @@ static struct of_platform_driver mpc52xx_bcom_of_platform_driver = {
510static int __init 509static int __init
511mpc52xx_bcom_init(void) 510mpc52xx_bcom_init(void)
512{ 511{
513 return of_register_platform_driver(&mpc52xx_bcom_of_platform_driver); 512 return platform_driver_register(&mpc52xx_bcom_of_platform_driver);
514} 513}
515 514
516static void __exit 515static void __exit
517mpc52xx_bcom_exit(void) 516mpc52xx_bcom_exit(void)
518{ 517{
519 of_unregister_platform_driver(&mpc52xx_bcom_of_platform_driver); 518 platform_driver_unregister(&mpc52xx_bcom_of_platform_driver);
520} 519}
521 520
522/* If we're not a module, we must make sure everything is setup before */ 521/* If we're not a module, we must make sure everything is setup before */
diff --git a/arch/powerpc/sysdev/bestcomm/bestcomm.h b/arch/powerpc/sysdev/bestcomm/bestcomm.h
index 23a95f80dfdb..a0e2e6b19b57 100644
--- a/arch/powerpc/sysdev/bestcomm/bestcomm.h
+++ b/arch/powerpc/sysdev/bestcomm/bestcomm.h
@@ -20,7 +20,7 @@
20 * struct bcom_bd - Structure describing a generic BestComm buffer descriptor 20 * struct bcom_bd - Structure describing a generic BestComm buffer descriptor
21 * @status: The current status of this buffer. Exact meaning depends on the 21 * @status: The current status of this buffer. Exact meaning depends on the
22 * task type 22 * task type
23 * @data: An array of u32 extra data. Size of array is task dependant. 23 * @data: An array of u32 extra data. Size of array is task dependent.
24 * 24 *
25 * Note: Don't dereference a bcom_bd pointer as an array. The size of the 25 * Note: Don't dereference a bcom_bd pointer as an array. The size of the
26 * bcom_bd is variable. Use bcom_get_bd() instead. 26 * bcom_bd is variable. Use bcom_get_bd() instead.
diff --git a/arch/powerpc/sysdev/bestcomm/bestcomm_priv.h b/arch/powerpc/sysdev/bestcomm/bestcomm_priv.h
index eb0d1c883c31..3b52f3ffbdf8 100644
--- a/arch/powerpc/sysdev/bestcomm/bestcomm_priv.h
+++ b/arch/powerpc/sysdev/bestcomm/bestcomm_priv.h
@@ -97,7 +97,7 @@ struct bcom_task_header {
97 u8 reserved[8]; 97 u8 reserved[8];
98}; 98};
99 99
100/* Descriptors stucture & co */ 100/* Descriptors structure & co */
101#define BCOM_DESC_NOP 0x000001f8 101#define BCOM_DESC_NOP 0x000001f8
102#define BCOM_LCD_MASK 0x80000000 102#define BCOM_LCD_MASK 0x80000000
103#define BCOM_DRD_EXTENDED 0x40000000 103#define BCOM_DRD_EXTENDED 0x40000000
diff --git a/arch/powerpc/sysdev/cpm1.c b/arch/powerpc/sysdev/cpm1.c
index 00852124ff4a..350787c83e22 100644
--- a/arch/powerpc/sysdev/cpm1.c
+++ b/arch/powerpc/sysdev/cpm1.c
@@ -56,32 +56,32 @@ static cpic8xx_t __iomem *cpic_reg;
56 56
57static struct irq_host *cpm_pic_host; 57static struct irq_host *cpm_pic_host;
58 58
59static void cpm_mask_irq(unsigned int irq) 59static void cpm_mask_irq(struct irq_data *d)
60{ 60{
61 unsigned int cpm_vec = (unsigned int)irq_map[irq].hwirq; 61 unsigned int cpm_vec = (unsigned int)irqd_to_hwirq(d);
62 62
63 clrbits32(&cpic_reg->cpic_cimr, (1 << cpm_vec)); 63 clrbits32(&cpic_reg->cpic_cimr, (1 << cpm_vec));
64} 64}
65 65
66static void cpm_unmask_irq(unsigned int irq) 66static void cpm_unmask_irq(struct irq_data *d)
67{ 67{
68 unsigned int cpm_vec = (unsigned int)irq_map[irq].hwirq; 68 unsigned int cpm_vec = (unsigned int)irqd_to_hwirq(d);
69 69
70 setbits32(&cpic_reg->cpic_cimr, (1 << cpm_vec)); 70 setbits32(&cpic_reg->cpic_cimr, (1 << cpm_vec));
71} 71}
72 72
73static void cpm_end_irq(unsigned int irq) 73static void cpm_end_irq(struct irq_data *d)
74{ 74{
75 unsigned int cpm_vec = (unsigned int)irq_map[irq].hwirq; 75 unsigned int cpm_vec = (unsigned int)irqd_to_hwirq(d);
76 76
77 out_be32(&cpic_reg->cpic_cisr, (1 << cpm_vec)); 77 out_be32(&cpic_reg->cpic_cisr, (1 << cpm_vec));
78} 78}
79 79
80static struct irq_chip cpm_pic = { 80static struct irq_chip cpm_pic = {
81 .name = "CPM PIC", 81 .name = "CPM PIC",
82 .mask = cpm_mask_irq, 82 .irq_mask = cpm_mask_irq,
83 .unmask = cpm_unmask_irq, 83 .irq_unmask = cpm_unmask_irq,
84 .eoi = cpm_end_irq, 84 .irq_eoi = cpm_end_irq,
85}; 85};
86 86
87int cpm_get_irq(void) 87int cpm_get_irq(void)
@@ -103,8 +103,8 @@ static int cpm_pic_host_map(struct irq_host *h, unsigned int virq,
103{ 103{
104 pr_debug("cpm_pic_host_map(%d, 0x%lx)\n", virq, hw); 104 pr_debug("cpm_pic_host_map(%d, 0x%lx)\n", virq, hw);
105 105
106 irq_to_desc(virq)->status |= IRQ_LEVEL; 106 irq_set_status_flags(virq, IRQ_LEVEL);
107 set_irq_chip_and_handler(virq, &cpm_pic, handle_fasteoi_irq); 107 irq_set_chip_and_handler(virq, &cpm_pic, handle_fasteoi_irq);
108 return 0; 108 return 0;
109} 109}
110 110
@@ -157,7 +157,7 @@ unsigned int cpm_pic_init(void)
157 goto end; 157 goto end;
158 158
159 /* Initialize the CPM interrupt controller. */ 159 /* Initialize the CPM interrupt controller. */
160 hwirq = (unsigned int)irq_map[sirq].hwirq; 160 hwirq = (unsigned int)virq_to_hw(sirq);
161 out_be32(&cpic_reg->cpic_cicr, 161 out_be32(&cpic_reg->cpic_cicr,
162 (CICR_SCD_SCC4 | CICR_SCC_SCC3 | CICR_SCB_SCC2 | CICR_SCA_SCC1) | 162 (CICR_SCD_SCC4 | CICR_SCC_SCC3 | CICR_SCB_SCC2 | CICR_SCA_SCC1) |
163 ((hwirq/2) << 13) | CICR_HP_MASK); 163 ((hwirq/2) << 13) | CICR_HP_MASK);
@@ -223,7 +223,7 @@ void __init cpm_reset(void)
223 223
224 /* Set SDMA Bus Request priority 5. 224 /* Set SDMA Bus Request priority 5.
225 * On 860T, this also enables FEC priority 6. I am not sure 225 * On 860T, this also enables FEC priority 6. I am not sure
226 * this is what we realy want for some applications, but the 226 * this is what we really want for some applications, but the
227 * manual recommends it. 227 * manual recommends it.
228 * Bit 25, FAM can also be set to use FEC aggressive mode (860T). 228 * Bit 25, FAM can also be set to use FEC aggressive mode (860T).
229 */ 229 */
diff --git a/arch/powerpc/sysdev/cpm2_pic.c b/arch/powerpc/sysdev/cpm2_pic.c
index fcea4ff825dd..bcab50e2a9eb 100644
--- a/arch/powerpc/sysdev/cpm2_pic.c
+++ b/arch/powerpc/sysdev/cpm2_pic.c
@@ -78,10 +78,10 @@ static const u_char irq_to_siubit[] = {
78 24, 25, 26, 27, 28, 29, 30, 31, 78 24, 25, 26, 27, 28, 29, 30, 31,
79}; 79};
80 80
81static void cpm2_mask_irq(unsigned int virq) 81static void cpm2_mask_irq(struct irq_data *d)
82{ 82{
83 int bit, word; 83 int bit, word;
84 unsigned int irq_nr = virq_to_hw(virq); 84 unsigned int irq_nr = irqd_to_hwirq(d);
85 85
86 bit = irq_to_siubit[irq_nr]; 86 bit = irq_to_siubit[irq_nr];
87 word = irq_to_siureg[irq_nr]; 87 word = irq_to_siureg[irq_nr];
@@ -90,10 +90,10 @@ static void cpm2_mask_irq(unsigned int virq)
90 out_be32(&cpm2_intctl->ic_simrh + word, ppc_cached_irq_mask[word]); 90 out_be32(&cpm2_intctl->ic_simrh + word, ppc_cached_irq_mask[word]);
91} 91}
92 92
93static void cpm2_unmask_irq(unsigned int virq) 93static void cpm2_unmask_irq(struct irq_data *d)
94{ 94{
95 int bit, word; 95 int bit, word;
96 unsigned int irq_nr = virq_to_hw(virq); 96 unsigned int irq_nr = irqd_to_hwirq(d);
97 97
98 bit = irq_to_siubit[irq_nr]; 98 bit = irq_to_siubit[irq_nr];
99 word = irq_to_siureg[irq_nr]; 99 word = irq_to_siureg[irq_nr];
@@ -102,10 +102,10 @@ static void cpm2_unmask_irq(unsigned int virq)
102 out_be32(&cpm2_intctl->ic_simrh + word, ppc_cached_irq_mask[word]); 102 out_be32(&cpm2_intctl->ic_simrh + word, ppc_cached_irq_mask[word]);
103} 103}
104 104
105static void cpm2_ack(unsigned int virq) 105static void cpm2_ack(struct irq_data *d)
106{ 106{
107 int bit, word; 107 int bit, word;
108 unsigned int irq_nr = virq_to_hw(virq); 108 unsigned int irq_nr = irqd_to_hwirq(d);
109 109
110 bit = irq_to_siubit[irq_nr]; 110 bit = irq_to_siubit[irq_nr];
111 word = irq_to_siureg[irq_nr]; 111 word = irq_to_siureg[irq_nr];
@@ -113,34 +113,27 @@ static void cpm2_ack(unsigned int virq)
113 out_be32(&cpm2_intctl->ic_sipnrh + word, 1 << bit); 113 out_be32(&cpm2_intctl->ic_sipnrh + word, 1 << bit);
114} 114}
115 115
116static void cpm2_end_irq(unsigned int virq) 116static void cpm2_end_irq(struct irq_data *d)
117{ 117{
118 struct irq_desc *desc;
119 int bit, word; 118 int bit, word;
120 unsigned int irq_nr = virq_to_hw(virq); 119 unsigned int irq_nr = irqd_to_hwirq(d);
121 120
122 desc = irq_to_desc(irq_nr); 121 bit = irq_to_siubit[irq_nr];
123 if (!(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)) 122 word = irq_to_siureg[irq_nr];
124 && desc->action) {
125
126 bit = irq_to_siubit[irq_nr];
127 word = irq_to_siureg[irq_nr];
128 123
129 ppc_cached_irq_mask[word] |= 1 << bit; 124 ppc_cached_irq_mask[word] |= 1 << bit;
130 out_be32(&cpm2_intctl->ic_simrh + word, ppc_cached_irq_mask[word]); 125 out_be32(&cpm2_intctl->ic_simrh + word, ppc_cached_irq_mask[word]);
131 126
132 /* 127 /*
133 * Work around large numbers of spurious IRQs on PowerPC 82xx 128 * Work around large numbers of spurious IRQs on PowerPC 82xx
134 * systems. 129 * systems.
135 */ 130 */
136 mb(); 131 mb();
137 }
138} 132}
139 133
140static int cpm2_set_irq_type(unsigned int virq, unsigned int flow_type) 134static int cpm2_set_irq_type(struct irq_data *d, unsigned int flow_type)
141{ 135{
142 unsigned int src = virq_to_hw(virq); 136 unsigned int src = irqd_to_hwirq(d);
143 struct irq_desc *desc = irq_to_desc(virq);
144 unsigned int vold, vnew, edibit; 137 unsigned int vold, vnew, edibit;
145 138
146 /* Port C interrupts are either IRQ_TYPE_EDGE_FALLING or 139 /* Port C interrupts are either IRQ_TYPE_EDGE_FALLING or
@@ -162,13 +155,11 @@ static int cpm2_set_irq_type(unsigned int virq, unsigned int flow_type)
162 goto err_sense; 155 goto err_sense;
163 } 156 }
164 157
165 desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL); 158 irqd_set_trigger_type(d, flow_type);
166 desc->status |= flow_type & IRQ_TYPE_SENSE_MASK; 159 if (flow_type & IRQ_TYPE_LEVEL_LOW)
167 if (flow_type & IRQ_TYPE_LEVEL_LOW) { 160 __irq_set_handler_locked(d->irq, handle_level_irq);
168 desc->status |= IRQ_LEVEL; 161 else
169 desc->handle_irq = handle_level_irq; 162 __irq_set_handler_locked(d->irq, handle_edge_irq);
170 } else
171 desc->handle_irq = handle_edge_irq;
172 163
173 /* internal IRQ senses are LEVEL_LOW 164 /* internal IRQ senses are LEVEL_LOW
174 * EXT IRQ and Port C IRQ senses are programmable 165 * EXT IRQ and Port C IRQ senses are programmable
@@ -179,7 +170,8 @@ static int cpm2_set_irq_type(unsigned int virq, unsigned int flow_type)
179 if (src >= CPM2_IRQ_PORTC15 && src <= CPM2_IRQ_PORTC0) 170 if (src >= CPM2_IRQ_PORTC15 && src <= CPM2_IRQ_PORTC0)
180 edibit = (31 - (CPM2_IRQ_PORTC0 - src)); 171 edibit = (31 - (CPM2_IRQ_PORTC0 - src));
181 else 172 else
182 return (flow_type & IRQ_TYPE_LEVEL_LOW) ? 0 : -EINVAL; 173 return (flow_type & IRQ_TYPE_LEVEL_LOW) ?
174 IRQ_SET_MASK_OK_NOCOPY : -EINVAL;
183 175
184 vold = in_be32(&cpm2_intctl->ic_siexr); 176 vold = in_be32(&cpm2_intctl->ic_siexr);
185 177
@@ -190,7 +182,7 @@ static int cpm2_set_irq_type(unsigned int virq, unsigned int flow_type)
190 182
191 if (vold != vnew) 183 if (vold != vnew)
192 out_be32(&cpm2_intctl->ic_siexr, vnew); 184 out_be32(&cpm2_intctl->ic_siexr, vnew);
193 return 0; 185 return IRQ_SET_MASK_OK_NOCOPY;
194 186
195err_sense: 187err_sense:
196 pr_err("CPM2 PIC: sense type 0x%x not supported\n", flow_type); 188 pr_err("CPM2 PIC: sense type 0x%x not supported\n", flow_type);
@@ -199,11 +191,12 @@ err_sense:
199 191
200static struct irq_chip cpm2_pic = { 192static struct irq_chip cpm2_pic = {
201 .name = "CPM2 SIU", 193 .name = "CPM2 SIU",
202 .mask = cpm2_mask_irq, 194 .irq_mask = cpm2_mask_irq,
203 .unmask = cpm2_unmask_irq, 195 .irq_unmask = cpm2_unmask_irq,
204 .ack = cpm2_ack, 196 .irq_ack = cpm2_ack,
205 .eoi = cpm2_end_irq, 197 .irq_eoi = cpm2_end_irq,
206 .set_type = cpm2_set_irq_type, 198 .irq_set_type = cpm2_set_irq_type,
199 .flags = IRQCHIP_EOI_IF_HANDLED,
207}; 200};
208 201
209unsigned int cpm2_get_irq(void) 202unsigned int cpm2_get_irq(void)
@@ -226,8 +219,8 @@ static int cpm2_pic_host_map(struct irq_host *h, unsigned int virq,
226{ 219{
227 pr_debug("cpm2_pic_host_map(%d, 0x%lx)\n", virq, hw); 220 pr_debug("cpm2_pic_host_map(%d, 0x%lx)\n", virq, hw);
228 221
229 irq_to_desc(virq)->status |= IRQ_LEVEL; 222 irq_set_status_flags(virq, IRQ_LEVEL);
230 set_irq_chip_and_handler(virq, &cpm2_pic, handle_level_irq); 223 irq_set_chip_and_handler(virq, &cpm2_pic, handle_level_irq);
231 return 0; 224 return 0;
232} 225}
233 226
diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c
index 559db2b846a9..8e9e06a7ca59 100644
--- a/arch/powerpc/sysdev/dart_iommu.c
+++ b/arch/powerpc/sysdev/dart_iommu.c
@@ -70,6 +70,8 @@ static int iommu_table_dart_inited;
70static int dart_dirty; 70static int dart_dirty;
71static int dart_is_u4; 71static int dart_is_u4;
72 72
73#define DART_U4_BYPASS_BASE 0x8000000000ull
74
73#define DBG(...) 75#define DBG(...)
74 76
75static inline void dart_tlb_invalidate_all(void) 77static inline void dart_tlb_invalidate_all(void)
@@ -292,27 +294,67 @@ static void iommu_table_dart_setup(void)
292 set_bit(iommu_table_dart.it_size - 1, iommu_table_dart.it_map); 294 set_bit(iommu_table_dart.it_size - 1, iommu_table_dart.it_map);
293} 295}
294 296
295static void pci_dma_dev_setup_dart(struct pci_dev *dev) 297static void dma_dev_setup_dart(struct device *dev)
296{ 298{
297 /* We only have one iommu table on the mac for now, which makes 299 /* We only have one iommu table on the mac for now, which makes
298 * things simple. Setup all PCI devices to point to this table 300 * things simple. Setup all PCI devices to point to this table
299 */ 301 */
300 set_iommu_table_base(&dev->dev, &iommu_table_dart); 302 if (get_dma_ops(dev) == &dma_direct_ops)
303 set_dma_offset(dev, DART_U4_BYPASS_BASE);
304 else
305 set_iommu_table_base(dev, &iommu_table_dart);
301} 306}
302 307
303static void pci_dma_bus_setup_dart(struct pci_bus *bus) 308static void pci_dma_dev_setup_dart(struct pci_dev *dev)
304{ 309{
305 struct device_node *dn; 310 dma_dev_setup_dart(&dev->dev);
311}
306 312
313static void pci_dma_bus_setup_dart(struct pci_bus *bus)
314{
307 if (!iommu_table_dart_inited) { 315 if (!iommu_table_dart_inited) {
308 iommu_table_dart_inited = 1; 316 iommu_table_dart_inited = 1;
309 iommu_table_dart_setup(); 317 iommu_table_dart_setup();
310 } 318 }
319}
311 320
312 dn = pci_bus_to_OF_node(bus); 321static bool dart_device_on_pcie(struct device *dev)
322{
323 struct device_node *np = of_node_get(dev->of_node);
324
325 while(np) {
326 if (of_device_is_compatible(np, "U4-pcie") ||
327 of_device_is_compatible(np, "u4-pcie")) {
328 of_node_put(np);
329 return true;
330 }
331 np = of_get_next_parent(np);
332 }
333 return false;
334}
313 335
314 if (dn) 336static int dart_dma_set_mask(struct device *dev, u64 dma_mask)
315 PCI_DN(dn)->iommu_table = &iommu_table_dart; 337{
338 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
339 return -EIO;
340
341 /* U4 supports a DART bypass, we use it for 64-bit capable
342 * devices to improve performances. However, that only works
343 * for devices connected to U4 own PCIe interface, not bridged
344 * through hypertransport. We need the device to support at
345 * least 40 bits of addresses.
346 */
347 if (dart_device_on_pcie(dev) && dma_mask >= DMA_BIT_MASK(40)) {
348 dev_info(dev, "Using 64-bit DMA iommu bypass\n");
349 set_dma_ops(dev, &dma_direct_ops);
350 } else {
351 dev_info(dev, "Using 32-bit DMA via iommu\n");
352 set_dma_ops(dev, &dma_iommu_ops);
353 }
354 dma_dev_setup_dart(dev);
355
356 *dev->dma_mask = dma_mask;
357 return 0;
316} 358}
317 359
318void __init iommu_init_early_dart(void) 360void __init iommu_init_early_dart(void)
@@ -324,24 +366,29 @@ void __init iommu_init_early_dart(void)
324 if (dn == NULL) { 366 if (dn == NULL) {
325 dn = of_find_compatible_node(NULL, "dart", "u4-dart"); 367 dn = of_find_compatible_node(NULL, "dart", "u4-dart");
326 if (dn == NULL) 368 if (dn == NULL)
327 goto bail; 369 return; /* use default direct_dma_ops */
328 dart_is_u4 = 1; 370 dart_is_u4 = 1;
329 } 371 }
330 372
373 /* Initialize the DART HW */
374 if (dart_init(dn) != 0)
375 goto bail;
376
331 /* Setup low level TCE operations for the core IOMMU code */ 377 /* Setup low level TCE operations for the core IOMMU code */
332 ppc_md.tce_build = dart_build; 378 ppc_md.tce_build = dart_build;
333 ppc_md.tce_free = dart_free; 379 ppc_md.tce_free = dart_free;
334 ppc_md.tce_flush = dart_flush; 380 ppc_md.tce_flush = dart_flush;
335 381
336 /* Initialize the DART HW */ 382 /* Setup bypass if supported */
337 if (dart_init(dn) == 0) { 383 if (dart_is_u4)
338 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_dart; 384 ppc_md.dma_set_mask = dart_dma_set_mask;
339 ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_dart;
340 385
341 /* Setup pci_dma ops */ 386 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_dart;
342 set_pci_dma_ops(&dma_iommu_ops); 387 ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_dart;
343 return; 388
344 } 389 /* Setup pci_dma ops */
390 set_pci_dma_ops(&dma_iommu_ops);
391 return;
345 392
346 bail: 393 bail:
347 /* If init failed, use direct iommu and null setup functions */ 394 /* If init failed, use direct iommu and null setup functions */
diff --git a/arch/powerpc/sysdev/fsl_85xx_cache_ctlr.h b/arch/powerpc/sysdev/fsl_85xx_cache_ctlr.h
new file mode 100644
index 000000000000..60c9c0bd5ba2
--- /dev/null
+++ b/arch/powerpc/sysdev/fsl_85xx_cache_ctlr.h
@@ -0,0 +1,101 @@
1/*
2 * Copyright 2009-2010 Freescale Semiconductor, Inc
3 *
4 * QorIQ based Cache Controller Memory Mapped Registers
5 *
6 * Author: Vivek Mahajan <vivek.mahajan@freescale.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#ifndef __FSL_85XX_CACHE_CTLR_H__
24#define __FSL_85XX_CACHE_CTLR_H__
25
26#define L2CR_L2FI 0x40000000 /* L2 flash invalidate */
27#define L2CR_L2IO 0x00200000 /* L2 instruction only */
28#define L2CR_SRAM_ZERO 0x00000000 /* L2SRAM zero size */
29#define L2CR_SRAM_FULL 0x00010000 /* L2SRAM full size */
30#define L2CR_SRAM_HALF 0x00020000 /* L2SRAM half size */
31#define L2CR_SRAM_TWO_HALFS 0x00030000 /* L2SRAM two half sizes */
32#define L2CR_SRAM_QUART 0x00040000 /* L2SRAM one quarter size */
33#define L2CR_SRAM_TWO_QUARTS 0x00050000 /* L2SRAM two quarter size */
34#define L2CR_SRAM_EIGHTH 0x00060000 /* L2SRAM one eighth size */
35#define L2CR_SRAM_TWO_EIGHTH 0x00070000 /* L2SRAM two eighth size */
36
37#define L2SRAM_OPTIMAL_SZ_SHIFT 0x00000003 /* Optimum size for L2SRAM */
38
39#define L2SRAM_BAR_MSK_LO18 0xFFFFC000 /* Lower 18 bits */
40#define L2SRAM_BARE_MSK_HI4 0x0000000F /* Upper 4 bits */
41
42enum cache_sram_lock_ways {
43 LOCK_WAYS_ZERO,
44 LOCK_WAYS_EIGHTH,
45 LOCK_WAYS_TWO_EIGHTH,
46 LOCK_WAYS_HALF = 4,
47 LOCK_WAYS_FULL = 8,
48};
49
50struct mpc85xx_l2ctlr {
51 u32 ctl; /* 0x000 - L2 control */
52 u8 res1[0xC];
53 u32 ewar0; /* 0x010 - External write address 0 */
54 u32 ewarea0; /* 0x014 - External write address extended 0 */
55 u32 ewcr0; /* 0x018 - External write ctrl */
56 u8 res2[4];
57 u32 ewar1; /* 0x020 - External write address 1 */
58 u32 ewarea1; /* 0x024 - External write address extended 1 */
59 u32 ewcr1; /* 0x028 - External write ctrl 1 */
60 u8 res3[4];
61 u32 ewar2; /* 0x030 - External write address 2 */
62 u32 ewarea2; /* 0x034 - External write address extended 2 */
63 u32 ewcr2; /* 0x038 - External write ctrl 2 */
64 u8 res4[4];
65 u32 ewar3; /* 0x040 - External write address 3 */
66 u32 ewarea3; /* 0x044 - External write address extended 3 */
67 u32 ewcr3; /* 0x048 - External write ctrl 3 */
68 u8 res5[0xB4];
69 u32 srbar0; /* 0x100 - SRAM base address 0 */
70 u32 srbarea0; /* 0x104 - SRAM base addr reg ext address 0 */
71 u32 srbar1; /* 0x108 - SRAM base address 1 */
72 u32 srbarea1; /* 0x10C - SRAM base addr reg ext address 1 */
73 u8 res6[0xCF0];
74 u32 errinjhi; /* 0xE00 - Error injection mask high */
75 u32 errinjlo; /* 0xE04 - Error injection mask low */
76 u32 errinjctl; /* 0xE08 - Error injection tag/ecc control */
77 u8 res7[0x14];
78 u32 captdatahi; /* 0xE20 - Error data high capture */
79 u32 captdatalo; /* 0xE24 - Error data low capture */
80 u32 captecc; /* 0xE28 - Error syndrome */
81 u8 res8[0x14];
82 u32 errdet; /* 0xE40 - Error detect */
83 u32 errdis; /* 0xE44 - Error disable */
84 u32 errinten; /* 0xE48 - Error interrupt enable */
85 u32 errattr; /* 0xE4c - Error attribute capture */
86 u32 erradrrl; /* 0xE50 - Error address capture low */
87 u32 erradrrh; /* 0xE54 - Error address capture high */
88 u32 errctl; /* 0xE58 - Error control */
89 u8 res9[0x1A4];
90};
91
92struct sram_parameters {
93 unsigned int sram_size;
94 uint64_t sram_offset;
95};
96
97extern int instantiate_cache_sram(struct platform_device *dev,
98 struct sram_parameters sram_params);
99extern void remove_cache_sram(struct platform_device *dev);
100
101#endif /* __FSL_85XX_CACHE_CTLR_H__ */
diff --git a/arch/powerpc/sysdev/fsl_85xx_cache_sram.c b/arch/powerpc/sysdev/fsl_85xx_cache_sram.c
new file mode 100644
index 000000000000..116415899176
--- /dev/null
+++ b/arch/powerpc/sysdev/fsl_85xx_cache_sram.c
@@ -0,0 +1,159 @@
1/*
2 * Copyright 2009-2010 Freescale Semiconductor, Inc.
3 *
4 * Simple memory allocator abstraction for QorIQ (P1/P2) based Cache-SRAM
5 *
6 * Author: Vivek Mahajan <vivek.mahajan@freescale.com>
7 *
8 * This file is derived from the original work done
9 * by Sylvain Munaut for the Bestcomm SRAM allocator.
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 */
25
26#include <linux/kernel.h>
27#include <linux/slab.h>
28#include <linux/err.h>
29#include <linux/of_platform.h>
30#include <asm/pgtable.h>
31#include <asm/fsl_85xx_cache_sram.h>
32
33#include "fsl_85xx_cache_ctlr.h"
34
35struct mpc85xx_cache_sram *cache_sram;
36
37void *mpc85xx_cache_sram_alloc(unsigned int size,
38 phys_addr_t *phys, unsigned int align)
39{
40 unsigned long offset;
41 unsigned long flags;
42
43 if (unlikely(cache_sram == NULL))
44 return NULL;
45
46 if (!size || (size > cache_sram->size) || (align > cache_sram->size)) {
47 pr_err("%s(): size(=%x) or align(=%x) zero or too big\n",
48 __func__, size, align);
49 return NULL;
50 }
51
52 if ((align & (align - 1)) || align <= 1) {
53 pr_err("%s(): align(=%x) must be power of two and >1\n",
54 __func__, align);
55 return NULL;
56 }
57
58 spin_lock_irqsave(&cache_sram->lock, flags);
59 offset = rh_alloc_align(cache_sram->rh, size, align, NULL);
60 spin_unlock_irqrestore(&cache_sram->lock, flags);
61
62 if (IS_ERR_VALUE(offset))
63 return NULL;
64
65 *phys = cache_sram->base_phys + offset;
66
67 return (unsigned char *)cache_sram->base_virt + offset;
68}
69EXPORT_SYMBOL(mpc85xx_cache_sram_alloc);
70
71void mpc85xx_cache_sram_free(void *ptr)
72{
73 unsigned long flags;
74 BUG_ON(!ptr);
75
76 spin_lock_irqsave(&cache_sram->lock, flags);
77 rh_free(cache_sram->rh, ptr - cache_sram->base_virt);
78 spin_unlock_irqrestore(&cache_sram->lock, flags);
79}
80EXPORT_SYMBOL(mpc85xx_cache_sram_free);
81
82int __init instantiate_cache_sram(struct platform_device *dev,
83 struct sram_parameters sram_params)
84{
85 int ret = 0;
86
87 if (cache_sram) {
88 dev_err(&dev->dev, "Already initialized cache-sram\n");
89 return -EBUSY;
90 }
91
92 cache_sram = kzalloc(sizeof(struct mpc85xx_cache_sram), GFP_KERNEL);
93 if (!cache_sram) {
94 dev_err(&dev->dev, "Out of memory for cache_sram structure\n");
95 return -ENOMEM;
96 }
97
98 cache_sram->base_phys = sram_params.sram_offset;
99 cache_sram->size = sram_params.sram_size;
100
101 if (!request_mem_region(cache_sram->base_phys, cache_sram->size,
102 "fsl_85xx_cache_sram")) {
103 dev_err(&dev->dev, "%s: request memory failed\n",
104 dev->dev.of_node->full_name);
105 ret = -ENXIO;
106 goto out_free;
107 }
108
109 cache_sram->base_virt = ioremap_prot(cache_sram->base_phys,
110 cache_sram->size, _PAGE_COHERENT | PAGE_KERNEL);
111 if (!cache_sram->base_virt) {
112 dev_err(&dev->dev, "%s: ioremap_prot failed\n",
113 dev->dev.of_node->full_name);
114 ret = -ENOMEM;
115 goto out_release;
116 }
117
118 cache_sram->rh = rh_create(sizeof(unsigned int));
119 if (IS_ERR(cache_sram->rh)) {
120 dev_err(&dev->dev, "%s: Unable to create remote heap\n",
121 dev->dev.of_node->full_name);
122 ret = PTR_ERR(cache_sram->rh);
123 goto out_unmap;
124 }
125
126 rh_attach_region(cache_sram->rh, 0, cache_sram->size);
127 spin_lock_init(&cache_sram->lock);
128
129 dev_info(&dev->dev, "[base:0x%llx, size:0x%x] configured and loaded\n",
130 (unsigned long long)cache_sram->base_phys, cache_sram->size);
131
132 return 0;
133
134out_unmap:
135 iounmap(cache_sram->base_virt);
136
137out_release:
138 release_mem_region(cache_sram->base_phys, cache_sram->size);
139
140out_free:
141 kfree(cache_sram);
142 return ret;
143}
144
145void remove_cache_sram(struct platform_device *dev)
146{
147 BUG_ON(!cache_sram);
148
149 rh_detach_region(cache_sram->rh, 0, cache_sram->size);
150 rh_destroy(cache_sram->rh);
151
152 iounmap(cache_sram->base_virt);
153 release_mem_region(cache_sram->base_phys, cache_sram->size);
154
155 kfree(cache_sram);
156 cache_sram = NULL;
157
158 dev_info(&dev->dev, "MPC85xx Cache-SRAM driver unloaded\n");
159}
diff --git a/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c b/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c
new file mode 100644
index 000000000000..5f88797dce73
--- /dev/null
+++ b/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c
@@ -0,0 +1,230 @@
1/*
2 * Copyright 2009-2010 Freescale Semiconductor, Inc.
3 *
4 * QorIQ (P1/P2) L2 controller init for Cache-SRAM instantiation
5 *
6 * Author: Vivek Mahajan <vivek.mahajan@freescale.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include <linux/kernel.h>
24#include <linux/of_platform.h>
25#include <asm/io.h>
26
27#include "fsl_85xx_cache_ctlr.h"
28
29static char *sram_size;
30static char *sram_offset;
31struct mpc85xx_l2ctlr __iomem *l2ctlr;
32
33static long get_cache_sram_size(void)
34{
35 unsigned long val;
36
37 if (!sram_size || (strict_strtoul(sram_size, 0, &val) < 0))
38 return -EINVAL;
39
40 return val;
41}
42
43static long get_cache_sram_offset(void)
44{
45 unsigned long val;
46
47 if (!sram_offset || (strict_strtoul(sram_offset, 0, &val) < 0))
48 return -EINVAL;
49
50 return val;
51}
52
53static int __init get_size_from_cmdline(char *str)
54{
55 if (!str)
56 return 0;
57
58 sram_size = str;
59 return 1;
60}
61
62static int __init get_offset_from_cmdline(char *str)
63{
64 if (!str)
65 return 0;
66
67 sram_offset = str;
68 return 1;
69}
70
71__setup("cache-sram-size=", get_size_from_cmdline);
72__setup("cache-sram-offset=", get_offset_from_cmdline);
73
74static int __devinit mpc85xx_l2ctlr_of_probe(struct platform_device *dev)
75{
76 long rval;
77 unsigned int rem;
78 unsigned char ways;
79 const unsigned int *prop;
80 unsigned int l2cache_size;
81 struct sram_parameters sram_params;
82
83 if (!dev->dev.of_node) {
84 dev_err(&dev->dev, "Device's OF-node is NULL\n");
85 return -EINVAL;
86 }
87
88 prop = of_get_property(dev->dev.of_node, "cache-size", NULL);
89 if (!prop) {
90 dev_err(&dev->dev, "Missing L2 cache-size\n");
91 return -EINVAL;
92 }
93 l2cache_size = *prop;
94
95 sram_params.sram_size = get_cache_sram_size();
96 if ((int)sram_params.sram_size <= 0) {
97 dev_err(&dev->dev,
98 "Entire L2 as cache, Aborting Cache-SRAM stuff\n");
99 return -EINVAL;
100 }
101
102 sram_params.sram_offset = get_cache_sram_offset();
103 if ((int64_t)sram_params.sram_offset <= 0) {
104 dev_err(&dev->dev,
105 "Entire L2 as cache, provide a valid sram offset\n");
106 return -EINVAL;
107 }
108
109
110 rem = l2cache_size % sram_params.sram_size;
111 ways = LOCK_WAYS_FULL * sram_params.sram_size / l2cache_size;
112 if (rem || (ways & (ways - 1))) {
113 dev_err(&dev->dev, "Illegal cache-sram-size in command line\n");
114 return -EINVAL;
115 }
116
117 l2ctlr = of_iomap(dev->dev.of_node, 0);
118 if (!l2ctlr) {
119 dev_err(&dev->dev, "Can't map L2 controller\n");
120 return -EINVAL;
121 }
122
123 /*
124 * Write bits[0-17] to srbar0
125 */
126 out_be32(&l2ctlr->srbar0,
127 sram_params.sram_offset & L2SRAM_BAR_MSK_LO18);
128
129 /*
130 * Write bits[18-21] to srbare0
131 */
132#ifdef CONFIG_PHYS_64BIT
133 out_be32(&l2ctlr->srbarea0,
134 (sram_params.sram_offset >> 32) & L2SRAM_BARE_MSK_HI4);
135#endif
136
137 clrsetbits_be32(&l2ctlr->ctl, L2CR_L2E, L2CR_L2FI);
138
139 switch (ways) {
140 case LOCK_WAYS_EIGHTH:
141 setbits32(&l2ctlr->ctl,
142 L2CR_L2E | L2CR_L2FI | L2CR_SRAM_EIGHTH);
143 break;
144
145 case LOCK_WAYS_TWO_EIGHTH:
146 setbits32(&l2ctlr->ctl,
147 L2CR_L2E | L2CR_L2FI | L2CR_SRAM_QUART);
148 break;
149
150 case LOCK_WAYS_HALF:
151 setbits32(&l2ctlr->ctl,
152 L2CR_L2E | L2CR_L2FI | L2CR_SRAM_HALF);
153 break;
154
155 case LOCK_WAYS_FULL:
156 default:
157 setbits32(&l2ctlr->ctl,
158 L2CR_L2E | L2CR_L2FI | L2CR_SRAM_FULL);
159 break;
160 }
161 eieio();
162
163 rval = instantiate_cache_sram(dev, sram_params);
164 if (rval < 0) {
165 dev_err(&dev->dev, "Can't instantiate Cache-SRAM\n");
166 iounmap(l2ctlr);
167 return -EINVAL;
168 }
169
170 return 0;
171}
172
173static int __devexit mpc85xx_l2ctlr_of_remove(struct platform_device *dev)
174{
175 BUG_ON(!l2ctlr);
176
177 iounmap(l2ctlr);
178 remove_cache_sram(dev);
179 dev_info(&dev->dev, "MPC85xx L2 controller unloaded\n");
180
181 return 0;
182}
183
184static struct of_device_id mpc85xx_l2ctlr_of_match[] = {
185 {
186 .compatible = "fsl,p2020-l2-cache-controller",
187 },
188 {
189 .compatible = "fsl,p2010-l2-cache-controller",
190 },
191 {
192 .compatible = "fsl,p1020-l2-cache-controller",
193 },
194 {
195 .compatible = "fsl,p1011-l2-cache-controller",
196 },
197 {
198 .compatible = "fsl,p1013-l2-cache-controller",
199 },
200 {
201 .compatible = "fsl,p1022-l2-cache-controller",
202 },
203 {},
204};
205
206static struct platform_driver mpc85xx_l2ctlr_of_platform_driver = {
207 .driver = {
208 .name = "fsl-l2ctlr",
209 .owner = THIS_MODULE,
210 .of_match_table = mpc85xx_l2ctlr_of_match,
211 },
212 .probe = mpc85xx_l2ctlr_of_probe,
213 .remove = __devexit_p(mpc85xx_l2ctlr_of_remove),
214};
215
216static __init int mpc85xx_l2ctlr_of_init(void)
217{
218 return platform_driver_register(&mpc85xx_l2ctlr_of_platform_driver);
219}
220
221static void __exit mpc85xx_l2ctlr_of_exit(void)
222{
223 platform_driver_unregister(&mpc85xx_l2ctlr_of_platform_driver);
224}
225
226subsys_initcall(mpc85xx_l2ctlr_of_init);
227module_exit(mpc85xx_l2ctlr_of_exit);
228
229MODULE_DESCRIPTION("Freescale MPC85xx L2 controller init");
230MODULE_LICENSE("GPL v2");
diff --git a/arch/powerpc/sysdev/fsl_lbc.c b/arch/powerpc/sysdev/fsl_lbc.c
index dceb8d1a843d..d917573cf1a8 100644
--- a/arch/powerpc/sysdev/fsl_lbc.c
+++ b/arch/powerpc/sysdev/fsl_lbc.c
@@ -1,9 +1,12 @@
1/* 1/*
2 * Freescale LBC and UPM routines. 2 * Freescale LBC and UPM routines.
3 * 3 *
4 * Copyright (c) 2007-2008 MontaVista Software, Inc. 4 * Copyright © 2007-2008 MontaVista Software, Inc.
5 * Copyright © 2010 Freescale Semiconductor
5 * 6 *
6 * Author: Anton Vorontsov <avorontsov@ru.mvista.com> 7 * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
8 * Author: Jack Lan <Jack.Lan@freescale.com>
9 * Author: Roy Zang <tie-fei.zang@freescale.com>
7 * 10 *
8 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 12 * it under the terms of the GNU General Public License as published by
@@ -19,39 +22,37 @@
19#include <linux/types.h> 22#include <linux/types.h>
20#include <linux/io.h> 23#include <linux/io.h>
21#include <linux/of.h> 24#include <linux/of.h>
25#include <linux/slab.h>
26#include <linux/platform_device.h>
27#include <linux/interrupt.h>
28#include <linux/mod_devicetable.h>
22#include <asm/prom.h> 29#include <asm/prom.h>
23#include <asm/fsl_lbc.h> 30#include <asm/fsl_lbc.h>
24 31
25static spinlock_t fsl_lbc_lock = __SPIN_LOCK_UNLOCKED(fsl_lbc_lock); 32static spinlock_t fsl_lbc_lock = __SPIN_LOCK_UNLOCKED(fsl_lbc_lock);
26static struct fsl_lbc_regs __iomem *fsl_lbc_regs; 33struct fsl_lbc_ctrl *fsl_lbc_ctrl_dev;
34EXPORT_SYMBOL(fsl_lbc_ctrl_dev);
27 35
28static char __initdata *compat_lbc[] = { 36/**
29 "fsl,pq2-localbus", 37 * fsl_lbc_addr - convert the base address
30 "fsl,pq2pro-localbus", 38 * @addr_base: base address of the memory bank
31 "fsl,pq3-localbus", 39 *
32 "fsl,elbc", 40 * This function converts a base address of lbc into the right format for the
33}; 41 * BR register. If the SOC has eLBC then it returns 32bit physical address
34 42 * else it convers a 34bit local bus physical address to correct format of
35static int __init fsl_lbc_init(void) 43 * 32bit address for BR register (Example: MPC8641).
44 */
45u32 fsl_lbc_addr(phys_addr_t addr_base)
36{ 46{
37 struct device_node *lbus; 47 struct device_node *np = fsl_lbc_ctrl_dev->dev->of_node;
38 int i; 48 u32 addr = addr_base & 0xffff8000;
39 49
40 for (i = 0; i < ARRAY_SIZE(compat_lbc); i++) { 50 if (of_device_is_compatible(np, "fsl,elbc"))
41 lbus = of_find_compatible_node(NULL, NULL, compat_lbc[i]); 51 return addr;
42 if (lbus)
43 goto found;
44 }
45 return -ENODEV;
46 52
47found: 53 return addr | ((addr_base & 0x300000000ull) >> 19);
48 fsl_lbc_regs = of_iomap(lbus, 0);
49 of_node_put(lbus);
50 if (!fsl_lbc_regs)
51 return -ENOMEM;
52 return 0;
53} 54}
54arch_initcall(fsl_lbc_init); 55EXPORT_SYMBOL(fsl_lbc_addr);
55 56
56/** 57/**
57 * fsl_lbc_find - find Localbus bank 58 * fsl_lbc_find - find Localbus bank
@@ -65,15 +66,17 @@ arch_initcall(fsl_lbc_init);
65int fsl_lbc_find(phys_addr_t addr_base) 66int fsl_lbc_find(phys_addr_t addr_base)
66{ 67{
67 int i; 68 int i;
69 struct fsl_lbc_regs __iomem *lbc;
68 70
69 if (!fsl_lbc_regs) 71 if (!fsl_lbc_ctrl_dev || !fsl_lbc_ctrl_dev->regs)
70 return -ENODEV; 72 return -ENODEV;
71 73
72 for (i = 0; i < ARRAY_SIZE(fsl_lbc_regs->bank); i++) { 74 lbc = fsl_lbc_ctrl_dev->regs;
73 __be32 br = in_be32(&fsl_lbc_regs->bank[i].br); 75 for (i = 0; i < ARRAY_SIZE(lbc->bank); i++) {
74 __be32 or = in_be32(&fsl_lbc_regs->bank[i].or); 76 __be32 br = in_be32(&lbc->bank[i].br);
77 __be32 or = in_be32(&lbc->bank[i].or);
75 78
76 if (br & BR_V && (br & or & BR_BA) == addr_base) 79 if (br & BR_V && (br & or & BR_BA) == fsl_lbc_addr(addr_base))
77 return i; 80 return i;
78 } 81 }
79 82
@@ -94,22 +97,27 @@ int fsl_upm_find(phys_addr_t addr_base, struct fsl_upm *upm)
94{ 97{
95 int bank; 98 int bank;
96 __be32 br; 99 __be32 br;
100 struct fsl_lbc_regs __iomem *lbc;
97 101
98 bank = fsl_lbc_find(addr_base); 102 bank = fsl_lbc_find(addr_base);
99 if (bank < 0) 103 if (bank < 0)
100 return bank; 104 return bank;
101 105
102 br = in_be32(&fsl_lbc_regs->bank[bank].br); 106 if (!fsl_lbc_ctrl_dev || !fsl_lbc_ctrl_dev->regs)
107 return -ENODEV;
108
109 lbc = fsl_lbc_ctrl_dev->regs;
110 br = in_be32(&lbc->bank[bank].br);
103 111
104 switch (br & BR_MSEL) { 112 switch (br & BR_MSEL) {
105 case BR_MS_UPMA: 113 case BR_MS_UPMA:
106 upm->mxmr = &fsl_lbc_regs->mamr; 114 upm->mxmr = &lbc->mamr;
107 break; 115 break;
108 case BR_MS_UPMB: 116 case BR_MS_UPMB:
109 upm->mxmr = &fsl_lbc_regs->mbmr; 117 upm->mxmr = &lbc->mbmr;
110 break; 118 break;
111 case BR_MS_UPMC: 119 case BR_MS_UPMC:
112 upm->mxmr = &fsl_lbc_regs->mcmr; 120 upm->mxmr = &lbc->mcmr;
113 break; 121 break;
114 default: 122 default:
115 return -EINVAL; 123 return -EINVAL;
@@ -148,9 +156,12 @@ int fsl_upm_run_pattern(struct fsl_upm *upm, void __iomem *io_base, u32 mar)
148 int ret = 0; 156 int ret = 0;
149 unsigned long flags; 157 unsigned long flags;
150 158
159 if (!fsl_lbc_ctrl_dev || !fsl_lbc_ctrl_dev->regs)
160 return -ENODEV;
161
151 spin_lock_irqsave(&fsl_lbc_lock, flags); 162 spin_lock_irqsave(&fsl_lbc_lock, flags);
152 163
153 out_be32(&fsl_lbc_regs->mar, mar); 164 out_be32(&fsl_lbc_ctrl_dev->regs->mar, mar);
154 165
155 switch (upm->width) { 166 switch (upm->width) {
156 case 8: 167 case 8:
@@ -172,3 +183,171 @@ int fsl_upm_run_pattern(struct fsl_upm *upm, void __iomem *io_base, u32 mar)
172 return ret; 183 return ret;
173} 184}
174EXPORT_SYMBOL(fsl_upm_run_pattern); 185EXPORT_SYMBOL(fsl_upm_run_pattern);
186
187static int __devinit fsl_lbc_ctrl_init(struct fsl_lbc_ctrl *ctrl,
188 struct device_node *node)
189{
190 struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
191
192 /* clear event registers */
193 setbits32(&lbc->ltesr, LTESR_CLEAR);
194 out_be32(&lbc->lteatr, 0);
195 out_be32(&lbc->ltear, 0);
196 out_be32(&lbc->lteccr, LTECCR_CLEAR);
197 out_be32(&lbc->ltedr, LTEDR_ENABLE);
198
199 /* Set the monitor timeout value to the maximum for erratum A001 */
200 if (of_device_is_compatible(node, "fsl,elbc"))
201 clrsetbits_be32(&lbc->lbcr, LBCR_BMT, LBCR_BMTPS);
202
203 return 0;
204}
205
206/*
207 * NOTE: This interrupt is used to report localbus events of various kinds,
208 * such as transaction errors on the chipselects.
209 */
210
211static irqreturn_t fsl_lbc_ctrl_irq(int irqno, void *data)
212{
213 struct fsl_lbc_ctrl *ctrl = data;
214 struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
215 u32 status;
216
217 status = in_be32(&lbc->ltesr);
218 if (!status)
219 return IRQ_NONE;
220
221 out_be32(&lbc->ltesr, LTESR_CLEAR);
222 out_be32(&lbc->lteatr, 0);
223 out_be32(&lbc->ltear, 0);
224 ctrl->irq_status = status;
225
226 if (status & LTESR_BM)
227 dev_err(ctrl->dev, "Local bus monitor time-out: "
228 "LTESR 0x%08X\n", status);
229 if (status & LTESR_WP)
230 dev_err(ctrl->dev, "Write protect error: "
231 "LTESR 0x%08X\n", status);
232 if (status & LTESR_ATMW)
233 dev_err(ctrl->dev, "Atomic write error: "
234 "LTESR 0x%08X\n", status);
235 if (status & LTESR_ATMR)
236 dev_err(ctrl->dev, "Atomic read error: "
237 "LTESR 0x%08X\n", status);
238 if (status & LTESR_CS)
239 dev_err(ctrl->dev, "Chip select error: "
240 "LTESR 0x%08X\n", status);
241 if (status & LTESR_UPM)
242 ;
243 if (status & LTESR_FCT) {
244 dev_err(ctrl->dev, "FCM command time-out: "
245 "LTESR 0x%08X\n", status);
246 smp_wmb();
247 wake_up(&ctrl->irq_wait);
248 }
249 if (status & LTESR_PAR) {
250 dev_err(ctrl->dev, "Parity or Uncorrectable ECC error: "
251 "LTESR 0x%08X\n", status);
252 smp_wmb();
253 wake_up(&ctrl->irq_wait);
254 }
255 if (status & LTESR_CC) {
256 smp_wmb();
257 wake_up(&ctrl->irq_wait);
258 }
259 if (status & ~LTESR_MASK)
260 dev_err(ctrl->dev, "Unknown error: "
261 "LTESR 0x%08X\n", status);
262 return IRQ_HANDLED;
263}
264
265/*
266 * fsl_lbc_ctrl_probe
267 *
268 * called by device layer when it finds a device matching
269 * one our driver can handled. This code allocates all of
270 * the resources needed for the controller only. The
271 * resources for the NAND banks themselves are allocated
272 * in the chip probe function.
273*/
274
275static int __devinit fsl_lbc_ctrl_probe(struct platform_device *dev)
276{
277 int ret;
278
279 if (!dev->dev.of_node) {
280 dev_err(&dev->dev, "Device OF-Node is NULL");
281 return -EFAULT;
282 }
283
284 fsl_lbc_ctrl_dev = kzalloc(sizeof(*fsl_lbc_ctrl_dev), GFP_KERNEL);
285 if (!fsl_lbc_ctrl_dev)
286 return -ENOMEM;
287
288 dev_set_drvdata(&dev->dev, fsl_lbc_ctrl_dev);
289
290 spin_lock_init(&fsl_lbc_ctrl_dev->lock);
291 init_waitqueue_head(&fsl_lbc_ctrl_dev->irq_wait);
292
293 fsl_lbc_ctrl_dev->regs = of_iomap(dev->dev.of_node, 0);
294 if (!fsl_lbc_ctrl_dev->regs) {
295 dev_err(&dev->dev, "failed to get memory region\n");
296 ret = -ENODEV;
297 goto err;
298 }
299
300 fsl_lbc_ctrl_dev->irq = irq_of_parse_and_map(dev->dev.of_node, 0);
301 if (fsl_lbc_ctrl_dev->irq == NO_IRQ) {
302 dev_err(&dev->dev, "failed to get irq resource\n");
303 ret = -ENODEV;
304 goto err;
305 }
306
307 fsl_lbc_ctrl_dev->dev = &dev->dev;
308
309 ret = fsl_lbc_ctrl_init(fsl_lbc_ctrl_dev, dev->dev.of_node);
310 if (ret < 0)
311 goto err;
312
313 ret = request_irq(fsl_lbc_ctrl_dev->irq, fsl_lbc_ctrl_irq, 0,
314 "fsl-lbc", fsl_lbc_ctrl_dev);
315 if (ret != 0) {
316 dev_err(&dev->dev, "failed to install irq (%d)\n",
317 fsl_lbc_ctrl_dev->irq);
318 ret = fsl_lbc_ctrl_dev->irq;
319 goto err;
320 }
321
322 /* Enable interrupts for any detected events */
323 out_be32(&fsl_lbc_ctrl_dev->regs->lteir, LTEIR_ENABLE);
324
325 return 0;
326
327err:
328 iounmap(fsl_lbc_ctrl_dev->regs);
329 kfree(fsl_lbc_ctrl_dev);
330 return ret;
331}
332
333static const struct of_device_id fsl_lbc_match[] = {
334 { .compatible = "fsl,elbc", },
335 { .compatible = "fsl,pq3-localbus", },
336 { .compatible = "fsl,pq2-localbus", },
337 { .compatible = "fsl,pq2pro-localbus", },
338 {},
339};
340
341static struct platform_driver fsl_lbc_ctrl_driver = {
342 .driver = {
343 .name = "fsl-lbc",
344 .of_match_table = fsl_lbc_match,
345 },
346 .probe = fsl_lbc_ctrl_probe,
347};
348
349static int __init fsl_lbc_init(void)
350{
351 return platform_driver_register(&fsl_lbc_ctrl_driver);
352}
353module_init(fsl_lbc_init);
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index 87991d3abbab..92e78333c47c 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2010 Freescale Semiconductor, Inc. 2 * Copyright (C) 2007-2011 Freescale Semiconductor, Inc.
3 * 3 *
4 * Author: Tony Li <tony.li@freescale.com> 4 * Author: Tony Li <tony.li@freescale.com>
5 * Jason Jin <Jason.jin@freescale.com> 5 * Jason Jin <Jason.jin@freescale.com>
@@ -24,6 +24,7 @@
24#include <asm/ppc-pci.h> 24#include <asm/ppc-pci.h>
25#include <asm/mpic.h> 25#include <asm/mpic.h>
26#include "fsl_msi.h" 26#include "fsl_msi.h"
27#include "fsl_pci.h"
27 28
28LIST_HEAD(msi_head); 29LIST_HEAD(msi_head);
29 30
@@ -46,14 +47,14 @@ static inline u32 fsl_msi_read(u32 __iomem *base, unsigned int reg)
46 * We do not need this actually. The MSIR register has been read once 47 * We do not need this actually. The MSIR register has been read once
47 * in the cascade interrupt. So, this MSI interrupt has been acked 48 * in the cascade interrupt. So, this MSI interrupt has been acked
48*/ 49*/
49static void fsl_msi_end_irq(unsigned int virq) 50static void fsl_msi_end_irq(struct irq_data *d)
50{ 51{
51} 52}
52 53
53static struct irq_chip fsl_msi_chip = { 54static struct irq_chip fsl_msi_chip = {
54 .mask = mask_msi_irq, 55 .irq_mask = mask_msi_irq,
55 .unmask = unmask_msi_irq, 56 .irq_unmask = unmask_msi_irq,
56 .ack = fsl_msi_end_irq, 57 .irq_ack = fsl_msi_end_irq,
57 .name = "FSL-MSI", 58 .name = "FSL-MSI",
58}; 59};
59 60
@@ -63,10 +64,10 @@ static int fsl_msi_host_map(struct irq_host *h, unsigned int virq,
63 struct fsl_msi *msi_data = h->host_data; 64 struct fsl_msi *msi_data = h->host_data;
64 struct irq_chip *chip = &fsl_msi_chip; 65 struct irq_chip *chip = &fsl_msi_chip;
65 66
66 irq_to_desc(virq)->status |= IRQ_TYPE_EDGE_FALLING; 67 irq_set_status_flags(virq, IRQ_TYPE_EDGE_FALLING);
67 68
68 set_irq_chip_data(virq, msi_data); 69 irq_set_chip_data(virq, msi_data);
69 set_irq_chip_and_handler(virq, chip, handle_edge_irq); 70 irq_set_chip_and_handler(virq, chip, handle_edge_irq);
70 71
71 return 0; 72 return 0;
72} 73}
@@ -109,8 +110,8 @@ static void fsl_teardown_msi_irqs(struct pci_dev *pdev)
109 list_for_each_entry(entry, &pdev->msi_list, list) { 110 list_for_each_entry(entry, &pdev->msi_list, list) {
110 if (entry->irq == NO_IRQ) 111 if (entry->irq == NO_IRQ)
111 continue; 112 continue;
112 msi_data = get_irq_data(entry->irq); 113 msi_data = irq_get_chip_data(entry->irq);
113 set_irq_msi(entry->irq, NULL); 114 irq_set_msi_desc(entry->irq, NULL);
114 msi_bitmap_free_hwirqs(&msi_data->bitmap, 115 msi_bitmap_free_hwirqs(&msi_data->bitmap,
115 virq_to_hw(entry->irq), 1); 116 virq_to_hw(entry->irq), 1);
116 irq_dispose_mapping(entry->irq); 117 irq_dispose_mapping(entry->irq);
@@ -125,13 +126,11 @@ static void fsl_compose_msi_msg(struct pci_dev *pdev, int hwirq,
125{ 126{
126 struct fsl_msi *msi_data = fsl_msi_data; 127 struct fsl_msi *msi_data = fsl_msi_data;
127 struct pci_controller *hose = pci_bus_to_host(pdev->bus); 128 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
128 u32 base = 0; 129 u64 base = fsl_pci_immrbar_base(hose);
129 130
130 pci_bus_read_config_dword(hose->bus, 131 msg->address_lo = msi_data->msi_addr_lo + lower_32_bits(base);
131 PCI_DEVFN(0, 0), PCI_BASE_ADDRESS_0, &base); 132 msg->address_hi = msi_data->msi_addr_hi + upper_32_bits(base);
132 133
133 msg->address_lo = msi_data->msi_addr_lo + base;
134 msg->address_hi = msi_data->msi_addr_hi;
135 msg->data = hwirq; 134 msg->data = hwirq;
136 135
137 pr_debug("%s: allocated srs: %d, ibs: %d\n", 136 pr_debug("%s: allocated srs: %d, ibs: %d\n",
@@ -169,8 +168,8 @@ static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
169 rc = -ENOSPC; 168 rc = -ENOSPC;
170 goto out_free; 169 goto out_free;
171 } 170 }
172 set_irq_data(virq, msi_data); 171 /* chip_data is msi_data via host->hostdata in host->map() */
173 set_irq_msi(virq, entry); 172 irq_set_msi_desc(virq, entry);
174 173
175 fsl_compose_msi_msg(pdev, hwirq, &msg, msi_data); 174 fsl_compose_msi_msg(pdev, hwirq, &msg, msi_data);
176 write_msi_msg(virq, &msg); 175 write_msi_msg(virq, &msg);
@@ -184,6 +183,8 @@ out_free:
184 183
185static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc) 184static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc)
186{ 185{
186 struct irq_chip *chip = irq_desc_get_chip(desc);
187 struct irq_data *idata = irq_desc_get_irq_data(desc);
187 unsigned int cascade_irq; 188 unsigned int cascade_irq;
188 struct fsl_msi *msi_data; 189 struct fsl_msi *msi_data;
189 int msir_index = -1; 190 int msir_index = -1;
@@ -192,20 +193,20 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc)
192 u32 have_shift = 0; 193 u32 have_shift = 0;
193 struct fsl_msi_cascade_data *cascade_data; 194 struct fsl_msi_cascade_data *cascade_data;
194 195
195 cascade_data = (struct fsl_msi_cascade_data *)get_irq_data(irq); 196 cascade_data = irq_get_handler_data(irq);
196 msi_data = cascade_data->msi_data; 197 msi_data = cascade_data->msi_data;
197 198
198 raw_spin_lock(&desc->lock); 199 raw_spin_lock(&desc->lock);
199 if ((msi_data->feature & FSL_PIC_IP_MASK) == FSL_PIC_IP_IPIC) { 200 if ((msi_data->feature & FSL_PIC_IP_MASK) == FSL_PIC_IP_IPIC) {
200 if (desc->chip->mask_ack) 201 if (chip->irq_mask_ack)
201 desc->chip->mask_ack(irq); 202 chip->irq_mask_ack(idata);
202 else { 203 else {
203 desc->chip->mask(irq); 204 chip->irq_mask(idata);
204 desc->chip->ack(irq); 205 chip->irq_ack(idata);
205 } 206 }
206 } 207 }
207 208
208 if (unlikely(desc->status & IRQ_INPROGRESS)) 209 if (unlikely(irqd_irq_inprogress(idata)))
209 goto unlock; 210 goto unlock;
210 211
211 msir_index = cascade_data->index; 212 msir_index = cascade_data->index;
@@ -213,7 +214,7 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc)
213 if (msir_index >= NR_MSI_REG) 214 if (msir_index >= NR_MSI_REG)
214 cascade_irq = NO_IRQ; 215 cascade_irq = NO_IRQ;
215 216
216 desc->status |= IRQ_INPROGRESS; 217 irqd_set_chained_irq_inprogress(idata);
217 switch (msi_data->feature & FSL_PIC_IP_MASK) { 218 switch (msi_data->feature & FSL_PIC_IP_MASK) {
218 case FSL_PIC_IP_MPIC: 219 case FSL_PIC_IP_MPIC:
219 msir_value = fsl_msi_read(msi_data->msi_regs, 220 msir_value = fsl_msi_read(msi_data->msi_regs,
@@ -235,15 +236,15 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc)
235 have_shift += intr_index + 1; 236 have_shift += intr_index + 1;
236 msir_value = msir_value >> (intr_index + 1); 237 msir_value = msir_value >> (intr_index + 1);
237 } 238 }
238 desc->status &= ~IRQ_INPROGRESS; 239 irqd_clr_chained_irq_inprogress(idata);
239 240
240 switch (msi_data->feature & FSL_PIC_IP_MASK) { 241 switch (msi_data->feature & FSL_PIC_IP_MASK) {
241 case FSL_PIC_IP_MPIC: 242 case FSL_PIC_IP_MPIC:
242 desc->chip->eoi(irq); 243 chip->irq_eoi(idata);
243 break; 244 break;
244 case FSL_PIC_IP_IPIC: 245 case FSL_PIC_IP_IPIC:
245 if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask) 246 if (!irqd_irq_disabled(idata) && chip->irq_unmask)
246 desc->chip->unmask(irq); 247 chip->irq_unmask(idata);
247 break; 248 break;
248 } 249 }
249unlock: 250unlock:
@@ -252,7 +253,7 @@ unlock:
252 253
253static int fsl_of_msi_remove(struct platform_device *ofdev) 254static int fsl_of_msi_remove(struct platform_device *ofdev)
254{ 255{
255 struct fsl_msi *msi = ofdev->dev.platform_data; 256 struct fsl_msi *msi = platform_get_drvdata(ofdev);
256 int virq, i; 257 int virq, i;
257 struct fsl_msi_cascade_data *cascade_data; 258 struct fsl_msi_cascade_data *cascade_data;
258 259
@@ -261,7 +262,7 @@ static int fsl_of_msi_remove(struct platform_device *ofdev)
261 for (i = 0; i < NR_MSI_REG; i++) { 262 for (i = 0; i < NR_MSI_REG; i++) {
262 virq = msi->msi_virqs[i]; 263 virq = msi->msi_virqs[i];
263 if (virq != NO_IRQ) { 264 if (virq != NO_IRQ) {
264 cascade_data = get_irq_data(virq); 265 cascade_data = irq_get_handler_data(virq);
265 kfree(cascade_data); 266 kfree(cascade_data);
266 irq_dispose_mapping(virq); 267 irq_dispose_mapping(virq);
267 } 268 }
@@ -274,19 +275,53 @@ static int fsl_of_msi_remove(struct platform_device *ofdev)
274 return 0; 275 return 0;
275} 276}
276 277
277static int __devinit fsl_of_msi_probe(struct platform_device *dev, 278static int __devinit fsl_msi_setup_hwirq(struct fsl_msi *msi,
278 const struct of_device_id *match) 279 struct platform_device *dev,
280 int offset, int irq_index)
279{ 281{
282 struct fsl_msi_cascade_data *cascade_data = NULL;
283 int virt_msir;
284
285 virt_msir = irq_of_parse_and_map(dev->dev.of_node, irq_index);
286 if (virt_msir == NO_IRQ) {
287 dev_err(&dev->dev, "%s: Cannot translate IRQ index %d\n",
288 __func__, irq_index);
289 return 0;
290 }
291
292 cascade_data = kzalloc(sizeof(struct fsl_msi_cascade_data), GFP_KERNEL);
293 if (!cascade_data) {
294 dev_err(&dev->dev, "No memory for MSI cascade data\n");
295 return -ENOMEM;
296 }
297
298 msi->msi_virqs[irq_index] = virt_msir;
299 cascade_data->index = offset + irq_index;
300 cascade_data->msi_data = msi;
301 irq_set_handler_data(virt_msir, cascade_data);
302 irq_set_chained_handler(virt_msir, fsl_msi_cascade);
303
304 return 0;
305}
306
307static const struct of_device_id fsl_of_msi_ids[];
308static int __devinit fsl_of_msi_probe(struct platform_device *dev)
309{
310 const struct of_device_id *match;
280 struct fsl_msi *msi; 311 struct fsl_msi *msi;
281 struct resource res; 312 struct resource res;
282 int err, i, count; 313 int err, i, j, irq_index, count;
283 int rc; 314 int rc;
284 int virt_msir;
285 const u32 *p; 315 const u32 *p;
286 struct fsl_msi_feature *features = match->data; 316 struct fsl_msi_feature *features;
287 struct fsl_msi_cascade_data *cascade_data = NULL;
288 int len; 317 int len;
289 u32 offset; 318 u32 offset;
319 static const u32 all_avail[] = { 0, NR_MSI_IRQS };
320
321 match = of_match_device(fsl_of_msi_ids, &dev->dev);
322 if (!match)
323 return -EINVAL;
324 features = match->data;
290 325
291 printk(KERN_DEBUG "Setting up Freescale MSI support\n"); 326 printk(KERN_DEBUG "Setting up Freescale MSI support\n");
292 327
@@ -295,7 +330,7 @@ static int __devinit fsl_of_msi_probe(struct platform_device *dev,
295 dev_err(&dev->dev, "No memory for MSI structure\n"); 330 dev_err(&dev->dev, "No memory for MSI structure\n");
296 return -ENOMEM; 331 return -ENOMEM;
297 } 332 }
298 dev->dev.platform_data = msi; 333 platform_set_drvdata(dev, msi);
299 334
300 msi->irqhost = irq_alloc_host(dev->dev.of_node, IRQ_HOST_MAP_LINEAR, 335 msi->irqhost = irq_alloc_host(dev->dev.of_node, IRQ_HOST_MAP_LINEAR,
301 NR_MSI_IRQS, &fsl_msi_host_ops, 0); 336 NR_MSI_IRQS, &fsl_msi_host_ops, 0);
@@ -333,42 +368,34 @@ static int __devinit fsl_of_msi_probe(struct platform_device *dev,
333 goto error_out; 368 goto error_out;
334 } 369 }
335 370
336 p = of_get_property(dev->dev.of_node, "interrupts", &count); 371 p = of_get_property(dev->dev.of_node, "msi-available-ranges", &len);
337 if (!p) { 372 if (p && len % (2 * sizeof(u32)) != 0) {
338 dev_err(&dev->dev, "no interrupts property found on %s\n", 373 dev_err(&dev->dev, "%s: Malformed msi-available-ranges property\n",
339 dev->dev.of_node->full_name); 374 __func__);
340 err = -ENODEV;
341 goto error_out;
342 }
343 if (count % 8 != 0) {
344 dev_err(&dev->dev, "Malformed interrupts property on %s\n",
345 dev->dev.of_node->full_name);
346 err = -EINVAL; 375 err = -EINVAL;
347 goto error_out; 376 goto error_out;
348 } 377 }
349 offset = 0; 378
350 p = of_get_property(dev->dev.of_node, "msi-available-ranges", &len); 379 if (!p)
351 if (p) 380 p = all_avail;
352 offset = *p / IRQS_PER_MSI_REG; 381
353 382 for (irq_index = 0, i = 0; i < len / (2 * sizeof(u32)); i++) {
354 count /= sizeof(u32); 383 if (p[i * 2] % IRQS_PER_MSI_REG ||
355 for (i = 0; i < min(count / 2, NR_MSI_REG); i++) { 384 p[i * 2 + 1] % IRQS_PER_MSI_REG) {
356 virt_msir = irq_of_parse_and_map(dev->dev.of_node, i); 385 printk(KERN_WARNING "%s: %s: msi available range of %u at %u is not IRQ-aligned\n",
357 if (virt_msir != NO_IRQ) { 386 __func__, dev->dev.of_node->full_name,
358 cascade_data = kzalloc( 387 p[i * 2 + 1], p[i * 2]);
359 sizeof(struct fsl_msi_cascade_data), 388 err = -EINVAL;
360 GFP_KERNEL); 389 goto error_out;
361 if (!cascade_data) { 390 }
362 dev_err(&dev->dev, 391
363 "No memory for MSI cascade data\n"); 392 offset = p[i * 2] / IRQS_PER_MSI_REG;
364 err = -ENOMEM; 393 count = p[i * 2 + 1] / IRQS_PER_MSI_REG;
394
395 for (j = 0; j < count; j++, irq_index++) {
396 err = fsl_msi_setup_hwirq(msi, dev, offset, irq_index);
397 if (err)
365 goto error_out; 398 goto error_out;
366 }
367 msi->msi_virqs[i] = virt_msir;
368 cascade_data->index = i + offset;
369 cascade_data->msi_data = msi;
370 set_irq_data(virt_msir, (void *)cascade_data);
371 set_irq_chained_handler(virt_msir, fsl_msi_cascade);
372 } 399 }
373 } 400 }
374 401
@@ -412,7 +439,7 @@ static const struct of_device_id fsl_of_msi_ids[] = {
412 {} 439 {}
413}; 440};
414 441
415static struct of_platform_driver fsl_of_msi_driver = { 442static struct platform_driver fsl_of_msi_driver = {
416 .driver = { 443 .driver = {
417 .name = "fsl-msi", 444 .name = "fsl-msi",
418 .owner = THIS_MODULE, 445 .owner = THIS_MODULE,
@@ -424,7 +451,7 @@ static struct of_platform_driver fsl_of_msi_driver = {
424 451
425static __init int fsl_of_msi_init(void) 452static __init int fsl_of_msi_init(void)
426{ 453{
427 return of_register_platform_driver(&fsl_of_msi_driver); 454 return platform_driver_register(&fsl_of_msi_driver);
428} 455}
429 456
430subsys_initcall(fsl_of_msi_init); 457subsys_initcall(fsl_of_msi_init);
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index 4ae933225251..68ca9290df94 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * MPC83xx/85xx/86xx PCI/PCIE support routing. 2 * MPC83xx/85xx/86xx PCI/PCIE support routing.
3 * 3 *
4 * Copyright 2007-2009 Freescale Semiconductor, Inc. 4 * Copyright 2007-2011 Freescale Semiconductor, Inc.
5 * Copyright 2008-2009 MontaVista Software, Inc. 5 * Copyright 2008-2009 MontaVista Software, Inc.
6 * 6 *
7 * Initial author: Xianghua Xiao <x.xiao@freescale.com> 7 * Initial author: Xianghua Xiao <x.xiao@freescale.com>
@@ -34,7 +34,7 @@
34#include <sysdev/fsl_soc.h> 34#include <sysdev/fsl_soc.h>
35#include <sysdev/fsl_pci.h> 35#include <sysdev/fsl_pci.h>
36 36
37static int fsl_pcie_bus_fixup; 37static int fsl_pcie_bus_fixup, is_mpc83xx_pci;
38 38
39static void __init quirk_fsl_pcie_header(struct pci_dev *dev) 39static void __init quirk_fsl_pcie_header(struct pci_dev *dev)
40{ 40{
@@ -99,7 +99,7 @@ static void __init setup_pci_atmu(struct pci_controller *hose,
99 struct resource *rsrc) 99 struct resource *rsrc)
100{ 100{
101 struct ccsr_pci __iomem *pci; 101 struct ccsr_pci __iomem *pci;
102 int i, j, n, mem_log, win_idx = 2; 102 int i, j, n, mem_log, win_idx = 3, start_idx = 1, end_idx = 4;
103 u64 mem, sz, paddr_hi = 0; 103 u64 mem, sz, paddr_hi = 0;
104 u64 paddr_lo = ULLONG_MAX; 104 u64 paddr_lo = ULLONG_MAX;
105 u32 pcicsrbar = 0, pcicsrbar_sz; 105 u32 pcicsrbar = 0, pcicsrbar_sz;
@@ -109,6 +109,13 @@ static void __init setup_pci_atmu(struct pci_controller *hose,
109 109
110 pr_debug("PCI memory map start 0x%016llx, size 0x%016llx\n", 110 pr_debug("PCI memory map start 0x%016llx, size 0x%016llx\n",
111 (u64)rsrc->start, (u64)rsrc->end - (u64)rsrc->start + 1); 111 (u64)rsrc->start, (u64)rsrc->end - (u64)rsrc->start + 1);
112
113 if (of_device_is_compatible(hose->dn, "fsl,qoriq-pcie-v2.2")) {
114 win_idx = 2;
115 start_idx = 0;
116 end_idx = 3;
117 }
118
112 pci = ioremap(rsrc->start, rsrc->end - rsrc->start + 1); 119 pci = ioremap(rsrc->start, rsrc->end - rsrc->start + 1);
113 if (!pci) { 120 if (!pci) {
114 dev_err(hose->parent, "Unable to map ATMU registers\n"); 121 dev_err(hose->parent, "Unable to map ATMU registers\n");
@@ -118,7 +125,7 @@ static void __init setup_pci_atmu(struct pci_controller *hose,
118 /* Disable all windows (except powar0 since it's ignored) */ 125 /* Disable all windows (except powar0 since it's ignored) */
119 for(i = 1; i < 5; i++) 126 for(i = 1; i < 5; i++)
120 out_be32(&pci->pow[i].powar, 0); 127 out_be32(&pci->pow[i].powar, 0);
121 for(i = 0; i < 3; i++) 128 for (i = start_idx; i < end_idx; i++)
122 out_be32(&pci->piw[i].piwar, 0); 129 out_be32(&pci->piw[i].piwar, 0);
123 130
124 /* Setup outbound MEM window */ 131 /* Setup outbound MEM window */
@@ -204,7 +211,7 @@ static void __init setup_pci_atmu(struct pci_controller *hose,
204 mem_log++; 211 mem_log++;
205 } 212 }
206 213
207 piwar |= (mem_log - 1); 214 piwar |= ((mem_log - 1) & PIWAR_SZ_MASK);
208 215
209 /* Setup inbound memory window */ 216 /* Setup inbound memory window */
210 out_be32(&pci->piw[win_idx].pitar, 0x00000000); 217 out_be32(&pci->piw[win_idx].pitar, 0x00000000);
@@ -317,6 +324,11 @@ int __init fsl_add_bridge(struct device_node *dev, int is_primary)
317 struct resource rsrc; 324 struct resource rsrc;
318 const int *bus_range; 325 const int *bus_range;
319 326
327 if (!of_device_is_available(dev)) {
328 pr_warning("%s: disabled\n", dev->full_name);
329 return -ENODEV;
330 }
331
320 pr_debug("Adding PCI host bridge %s\n", dev->full_name); 332 pr_debug("Adding PCI host bridge %s\n", dev->full_name);
321 333
322 /* Fetch host bridge registers address */ 334 /* Fetch host bridge registers address */
@@ -407,10 +419,18 @@ DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2010E, quirk_fsl_pcie_header);
407DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2010, quirk_fsl_pcie_header); 419DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2010, quirk_fsl_pcie_header);
408DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2020E, quirk_fsl_pcie_header); 420DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2020E, quirk_fsl_pcie_header);
409DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2020, quirk_fsl_pcie_header); 421DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2020, quirk_fsl_pcie_header);
422DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2040E, quirk_fsl_pcie_header);
423DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2040, quirk_fsl_pcie_header);
424DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P3041E, quirk_fsl_pcie_header);
425DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P3041, quirk_fsl_pcie_header);
410DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4040E, quirk_fsl_pcie_header); 426DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4040E, quirk_fsl_pcie_header);
411DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4040, quirk_fsl_pcie_header); 427DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4040, quirk_fsl_pcie_header);
412DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4080E, quirk_fsl_pcie_header); 428DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4080E, quirk_fsl_pcie_header);
413DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4080, quirk_fsl_pcie_header); 429DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4080, quirk_fsl_pcie_header);
430DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P5010E, quirk_fsl_pcie_header);
431DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P5010, quirk_fsl_pcie_header);
432DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P5020E, quirk_fsl_pcie_header);
433DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P5020, quirk_fsl_pcie_header);
414#endif /* CONFIG_FSL_SOC_BOOKE || CONFIG_PPC_86xx */ 434#endif /* CONFIG_FSL_SOC_BOOKE || CONFIG_PPC_86xx */
415 435
416#if defined(CONFIG_PPC_83xx) || defined(CONFIG_PPC_MPC512x) 436#if defined(CONFIG_PPC_83xx) || defined(CONFIG_PPC_MPC512x)
@@ -430,6 +450,13 @@ struct mpc83xx_pcie_priv {
430 u32 dev_base; 450 u32 dev_base;
431}; 451};
432 452
453struct pex_inbound_window {
454 u32 ar;
455 u32 tar;
456 u32 barl;
457 u32 barh;
458};
459
433/* 460/*
434 * With the convention of u-boot, the PCIE outbound window 0 serves 461 * With the convention of u-boot, the PCIE outbound window 0 serves
435 * as configuration transactions outbound. 462 * as configuration transactions outbound.
@@ -437,6 +464,8 @@ struct mpc83xx_pcie_priv {
437#define PEX_OUTWIN0_BAR 0xCA4 464#define PEX_OUTWIN0_BAR 0xCA4
438#define PEX_OUTWIN0_TAL 0xCA8 465#define PEX_OUTWIN0_TAL 0xCA8
439#define PEX_OUTWIN0_TAH 0xCAC 466#define PEX_OUTWIN0_TAH 0xCAC
467#define PEX_RC_INWIN_BASE 0xE60
468#define PEX_RCIWARn_EN 0x1
440 469
441static int mpc83xx_pcie_exclude_device(struct pci_bus *bus, unsigned int devfn) 470static int mpc83xx_pcie_exclude_device(struct pci_bus *bus, unsigned int devfn)
442{ 471{
@@ -604,6 +633,8 @@ int __init mpc83xx_add_bridge(struct device_node *dev)
604 const int *bus_range; 633 const int *bus_range;
605 int primary; 634 int primary;
606 635
636 is_mpc83xx_pci = 1;
637
607 if (!of_device_is_available(dev)) { 638 if (!of_device_is_available(dev)) {
608 pr_warning("%s: disabled by the firmware.\n", 639 pr_warning("%s: disabled by the firmware.\n",
609 dev->full_name); 640 dev->full_name);
@@ -683,3 +714,40 @@ err0:
683 return ret; 714 return ret;
684} 715}
685#endif /* CONFIG_PPC_83xx */ 716#endif /* CONFIG_PPC_83xx */
717
718u64 fsl_pci_immrbar_base(struct pci_controller *hose)
719{
720#ifdef CONFIG_PPC_83xx
721 if (is_mpc83xx_pci) {
722 struct mpc83xx_pcie_priv *pcie = hose->dn->data;
723 struct pex_inbound_window *in;
724 int i;
725
726 /* Walk the Root Complex Inbound windows to match IMMR base */
727 in = pcie->cfg_type0 + PEX_RC_INWIN_BASE;
728 for (i = 0; i < 4; i++) {
729 /* not enabled, skip */
730 if (!in_le32(&in[i].ar) & PEX_RCIWARn_EN)
731 continue;
732
733 if (get_immrbase() == in_le32(&in[i].tar))
734 return (u64)in_le32(&in[i].barh) << 32 |
735 in_le32(&in[i].barl);
736 }
737
738 printk(KERN_WARNING "could not find PCI BAR matching IMMR\n");
739 }
740#endif
741
742#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
743 if (!is_mpc83xx_pci) {
744 u32 base;
745
746 pci_bus_read_config_dword(hose->bus,
747 PCI_DEVFN(0, 0), PCI_BASE_ADDRESS_0, &base);
748 return base;
749 }
750#endif
751
752 return 0;
753}
diff --git a/arch/powerpc/sysdev/fsl_pci.h b/arch/powerpc/sysdev/fsl_pci.h
index a9d8bbebed80..a39ed5cc2c5a 100644
--- a/arch/powerpc/sysdev/fsl_pci.h
+++ b/arch/powerpc/sysdev/fsl_pci.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * MPC85xx/86xx PCI Express structure define 2 * MPC85xx/86xx PCI Express structure define
3 * 3 *
4 * Copyright 2007 Freescale Semiconductor, Inc 4 * Copyright 2007,2011 Freescale Semiconductor, Inc
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the 7 * under the terms of the GNU General Public License as published by the
@@ -21,6 +21,7 @@
21#define PIWAR_TGI_LOCAL 0x00f00000 /* target - local memory */ 21#define PIWAR_TGI_LOCAL 0x00f00000 /* target - local memory */
22#define PIWAR_READ_SNOOP 0x00050000 22#define PIWAR_READ_SNOOP 0x00050000
23#define PIWAR_WRITE_SNOOP 0x00005000 23#define PIWAR_WRITE_SNOOP 0x00005000
24#define PIWAR_SZ_MASK 0x0000003f
24 25
25/* PCI/PCI Express outbound window reg */ 26/* PCI/PCI Express outbound window reg */
26struct pci_outbound_window_regs { 27struct pci_outbound_window_regs {
@@ -49,7 +50,9 @@ struct ccsr_pci {
49 __be32 int_ack; /* 0x.008 - PCI Interrupt Acknowledge Register */ 50 __be32 int_ack; /* 0x.008 - PCI Interrupt Acknowledge Register */
50 __be32 pex_otb_cpl_tor; /* 0x.00c - PCIE Outbound completion timeout register */ 51 __be32 pex_otb_cpl_tor; /* 0x.00c - PCIE Outbound completion timeout register */
51 __be32 pex_conf_tor; /* 0x.010 - PCIE configuration timeout register */ 52 __be32 pex_conf_tor; /* 0x.010 - PCIE configuration timeout register */
52 u8 res2[12]; 53 __be32 pex_config; /* 0x.014 - PCIE CONFIG Register */
54 __be32 pex_int_status; /* 0x.018 - PCIE interrupt status */
55 u8 res2[4];
53 __be32 pex_pme_mes_dr; /* 0x.020 - PCIE PME and message detect register */ 56 __be32 pex_pme_mes_dr; /* 0x.020 - PCIE PME and message detect register */
54 __be32 pex_pme_mes_disr; /* 0x.024 - PCIE PME and message disable register */ 57 __be32 pex_pme_mes_disr; /* 0x.024 - PCIE PME and message disable register */
55 __be32 pex_pme_mes_ier; /* 0x.028 - PCIE PME and message interrupt enable register */ 58 __be32 pex_pme_mes_ier; /* 0x.028 - PCIE PME and message interrupt enable register */
@@ -62,14 +65,14 @@ struct ccsr_pci {
62 * in all of the other outbound windows. 65 * in all of the other outbound windows.
63 */ 66 */
64 struct pci_outbound_window_regs pow[5]; 67 struct pci_outbound_window_regs pow[5];
65 68 u8 res14[96];
66 u8 res14[256]; 69 struct pci_inbound_window_regs pmit; /* 0xd00 - 0xd9c Inbound MSI */
67 70 u8 res6[96];
68/* PCI/PCI Express inbound window 3-1 71/* PCI/PCI Express inbound window 3-0
69 * inbound window 1 supports only a 32-bit base address and does not 72 * inbound window 1 supports only a 32-bit base address and does not
70 * define an inbound window base extended address register. 73 * define an inbound window base extended address register.
71 */ 74 */
72 struct pci_inbound_window_regs piw[3]; 75 struct pci_inbound_window_regs piw[4];
73 76
74 __be32 pex_err_dr; /* 0x.e00 - PCI/PCIE error detect register */ 77 __be32 pex_err_dr; /* 0x.e00 - PCI/PCIE error detect register */
75 u8 res21[4]; 78 u8 res21[4];
@@ -88,6 +91,7 @@ struct ccsr_pci {
88extern int fsl_add_bridge(struct device_node *dev, int is_primary); 91extern int fsl_add_bridge(struct device_node *dev, int is_primary);
89extern void fsl_pcibios_fixup_bus(struct pci_bus *bus); 92extern void fsl_pcibios_fixup_bus(struct pci_bus *bus);
90extern int mpc83xx_add_bridge(struct device_node *dev); 93extern int mpc83xx_add_bridge(struct device_node *dev);
94u64 fsl_pci_immrbar_base(struct pci_controller *hose);
91 95
92#endif /* __POWERPC_FSL_PCI_H */ 96#endif /* __POWERPC_FSL_PCI_H */
93#endif /* __KERNEL__ */ 97#endif /* __KERNEL__ */
diff --git a/arch/powerpc/sysdev/fsl_pmc.c b/arch/powerpc/sysdev/fsl_pmc.c
index 44de8559c975..f122e8961d32 100644
--- a/arch/powerpc/sysdev/fsl_pmc.c
+++ b/arch/powerpc/sysdev/fsl_pmc.c
@@ -53,13 +53,12 @@ static int pmc_suspend_valid(suspend_state_t state)
53 return 1; 53 return 1;
54} 54}
55 55
56static struct platform_suspend_ops pmc_suspend_ops = { 56static const struct platform_suspend_ops pmc_suspend_ops = {
57 .valid = pmc_suspend_valid, 57 .valid = pmc_suspend_valid,
58 .enter = pmc_suspend_enter, 58 .enter = pmc_suspend_enter,
59}; 59};
60 60
61static int pmc_probe(struct platform_device *ofdev, 61static int pmc_probe(struct platform_device *ofdev)
62 const struct of_device_id *id)
63{ 62{
64 pmc_regs = of_iomap(ofdev->dev.of_node, 0); 63 pmc_regs = of_iomap(ofdev->dev.of_node, 0);
65 if (!pmc_regs) 64 if (!pmc_regs)
@@ -76,7 +75,7 @@ static const struct of_device_id pmc_ids[] = {
76 { }, 75 { },
77}; 76};
78 77
79static struct of_platform_driver pmc_driver = { 78static struct platform_driver pmc_driver = {
80 .driver = { 79 .driver = {
81 .name = "fsl-pmc", 80 .name = "fsl-pmc",
82 .owner = THIS_MODULE, 81 .owner = THIS_MODULE,
@@ -87,6 +86,6 @@ static struct of_platform_driver pmc_driver = {
87 86
88static int __init pmc_init(void) 87static int __init pmc_init(void)
89{ 88{
90 return of_register_platform_driver(&pmc_driver); 89 return platform_driver_register(&pmc_driver);
91} 90}
92device_initcall(pmc_init); 91device_initcall(pmc_init);
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
index 3017532319c8..b3fd081d56f5 100644
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -10,7 +10,7 @@
10 * - Added Port-Write message handling 10 * - Added Port-Write message handling
11 * - Added Machine Check exception handling 11 * - Added Machine Check exception handling
12 * 12 *
13 * Copyright (C) 2007, 2008 Freescale Semiconductor, Inc. 13 * Copyright (C) 2007, 2008, 2010 Freescale Semiconductor, Inc.
14 * Zhang Wei <wei.zhang@freescale.com> 14 * Zhang Wei <wei.zhang@freescale.com>
15 * 15 *
16 * Copyright 2005 MontaVista Software, Inc. 16 * Copyright 2005 MontaVista Software, Inc.
@@ -47,14 +47,33 @@
47#define IRQ_RIO_RX(m) (((struct rio_priv *)(m->priv))->rxirq) 47#define IRQ_RIO_RX(m) (((struct rio_priv *)(m->priv))->rxirq)
48#define IRQ_RIO_PW(m) (((struct rio_priv *)(m->priv))->pwirq) 48#define IRQ_RIO_PW(m) (((struct rio_priv *)(m->priv))->pwirq)
49 49
50#define IPWSR_CLEAR 0x98
51#define OMSR_CLEAR 0x1cb3
52#define IMSR_CLEAR 0x491
53#define IDSR_CLEAR 0x91
54#define ODSR_CLEAR 0x1c00
55#define LTLEECSR_ENABLE_ALL 0xFFC000FC
56#define ESCSR_CLEAR 0x07120204
57
58#define RIO_PORT1_EDCSR 0x0640
59#define RIO_PORT2_EDCSR 0x0680
60#define RIO_PORT1_IECSR 0x10130
61#define RIO_PORT2_IECSR 0x101B0
62#define RIO_IM0SR 0x13064
63#define RIO_IM1SR 0x13164
64#define RIO_OM0SR 0x13004
65#define RIO_OM1SR 0x13104
66
50#define RIO_ATMU_REGS_OFFSET 0x10c00 67#define RIO_ATMU_REGS_OFFSET 0x10c00
51#define RIO_P_MSG_REGS_OFFSET 0x11000 68#define RIO_P_MSG_REGS_OFFSET 0x11000
52#define RIO_S_MSG_REGS_OFFSET 0x13000 69#define RIO_S_MSG_REGS_OFFSET 0x13000
70#define RIO_GCCSR 0x13c
53#define RIO_ESCSR 0x158 71#define RIO_ESCSR 0x158
72#define RIO_PORT2_ESCSR 0x178
54#define RIO_CCSR 0x15c 73#define RIO_CCSR 0x15c
55#define RIO_LTLEDCSR 0x0608 74#define RIO_LTLEDCSR 0x0608
56#define RIO_LTLEDCSR_IER 0x80000000 75#define RIO_LTLEDCSR_IER 0x80000000
57#define RIO_LTLEDCSR_PRT 0x01000000 76#define RIO_LTLEDCSR_PRT 0x01000000
58#define RIO_LTLEECSR 0x060c 77#define RIO_LTLEECSR 0x060c
59#define RIO_EPWISR 0x10010 78#define RIO_EPWISR 0x10010
60#define RIO_ISR_AACR 0x10120 79#define RIO_ISR_AACR 0x10120
@@ -87,6 +106,12 @@
87#define RIO_IPWSR_PWD 0x00000008 106#define RIO_IPWSR_PWD 0x00000008
88#define RIO_IPWSR_PWB 0x00000004 107#define RIO_IPWSR_PWB 0x00000004
89 108
109/* EPWISR Error match value */
110#define RIO_EPWISR_PINT1 0x80000000
111#define RIO_EPWISR_PINT2 0x40000000
112#define RIO_EPWISR_MU 0x00000002
113#define RIO_EPWISR_PW 0x00000001
114
90#define RIO_MSG_DESC_SIZE 32 115#define RIO_MSG_DESC_SIZE 32
91#define RIO_MSG_BUFFER_SIZE 4096 116#define RIO_MSG_BUFFER_SIZE 4096
92#define RIO_MIN_TX_RING_SIZE 2 117#define RIO_MIN_TX_RING_SIZE 2
@@ -117,44 +142,59 @@ struct rio_atmu_regs {
117}; 142};
118 143
119struct rio_msg_regs { 144struct rio_msg_regs {
120 u32 omr; 145 u32 omr; /* 0xD_3000 - Outbound message 0 mode register */
121 u32 osr; 146 u32 osr; /* 0xD_3004 - Outbound message 0 status register */
122 u32 pad1; 147 u32 pad1;
123 u32 odqdpar; 148 u32 odqdpar; /* 0xD_300C - Outbound message 0 descriptor queue
149 dequeue pointer address register */
124 u32 pad2; 150 u32 pad2;
125 u32 osar; 151 u32 osar; /* 0xD_3014 - Outbound message 0 source address
126 u32 odpr; 152 register */
127 u32 odatr; 153 u32 odpr; /* 0xD_3018 - Outbound message 0 destination port
128 u32 odcr; 154 register */
155 u32 odatr; /* 0xD_301C - Outbound message 0 destination attributes
156 Register*/
157 u32 odcr; /* 0xD_3020 - Outbound message 0 double-word count
158 register */
129 u32 pad3; 159 u32 pad3;
130 u32 odqepar; 160 u32 odqepar; /* 0xD_3028 - Outbound message 0 descriptor queue
161 enqueue pointer address register */
131 u32 pad4[13]; 162 u32 pad4[13];
132 u32 imr; 163 u32 imr; /* 0xD_3060 - Inbound message 0 mode register */
133 u32 isr; 164 u32 isr; /* 0xD_3064 - Inbound message 0 status register */
134 u32 pad5; 165 u32 pad5;
135 u32 ifqdpar; 166 u32 ifqdpar; /* 0xD_306C - Inbound message 0 frame queue dequeue
167 pointer address register*/
136 u32 pad6; 168 u32 pad6;
137 u32 ifqepar; 169 u32 ifqepar; /* 0xD_3074 - Inbound message 0 frame queue enqueue
170 pointer address register */
138 u32 pad7[226]; 171 u32 pad7[226];
139 u32 odmr; 172 u32 odmr; /* 0xD_3400 - Outbound doorbell mode register */
140 u32 odsr; 173 u32 odsr; /* 0xD_3404 - Outbound doorbell status register */
141 u32 res0[4]; 174 u32 res0[4];
142 u32 oddpr; 175 u32 oddpr; /* 0xD_3418 - Outbound doorbell destination port
143 u32 oddatr; 176 register */
177 u32 oddatr; /* 0xD_341c - Outbound doorbell destination attributes
178 register */
144 u32 res1[3]; 179 u32 res1[3];
145 u32 odretcr; 180 u32 odretcr; /* 0xD_342C - Outbound doorbell retry error threshold
181 configuration register */
146 u32 res2[12]; 182 u32 res2[12];
147 u32 dmr; 183 u32 dmr; /* 0xD_3460 - Inbound doorbell mode register */
148 u32 dsr; 184 u32 dsr; /* 0xD_3464 - Inbound doorbell status register */
149 u32 pad8; 185 u32 pad8;
150 u32 dqdpar; 186 u32 dqdpar; /* 0xD_346C - Inbound doorbell queue dequeue Pointer
187 address register */
151 u32 pad9; 188 u32 pad9;
152 u32 dqepar; 189 u32 dqepar; /* 0xD_3474 - Inbound doorbell Queue enqueue pointer
190 address register */
153 u32 pad10[26]; 191 u32 pad10[26];
154 u32 pwmr; 192 u32 pwmr; /* 0xD_34E0 - Inbound port-write mode register */
155 u32 pwsr; 193 u32 pwsr; /* 0xD_34E4 - Inbound port-write status register */
156 u32 epwqbar; 194 u32 epwqbar; /* 0xD_34E8 - Extended Port-Write Queue Base Address
157 u32 pwqbar; 195 register */
196 u32 pwqbar; /* 0xD_34EC - Inbound port-write queue base address
197 register */
158}; 198};
159 199
160struct rio_tx_desc { 200struct rio_tx_desc {
@@ -241,35 +281,32 @@ struct rio_priv {
241static void __iomem *rio_regs_win; 281static void __iomem *rio_regs_win;
242 282
243#ifdef CONFIG_E500 283#ifdef CONFIG_E500
244static int (*saved_mcheck_exception)(struct pt_regs *regs); 284int fsl_rio_mcheck_exception(struct pt_regs *regs)
245
246static int fsl_rio_mcheck_exception(struct pt_regs *regs)
247{ 285{
248 const struct exception_table_entry *entry = NULL; 286 const struct exception_table_entry *entry;
249 unsigned long reason = mfspr(SPRN_MCSR); 287 unsigned long reason;
250 288
251 if (reason & MCSR_BUS_RBERR) { 289 if (!rio_regs_win)
252 reason = in_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR)); 290 return 0;
253 if (reason & (RIO_LTLEDCSR_IER | RIO_LTLEDCSR_PRT)) { 291
254 /* Check if we are prepared to handle this fault */ 292 reason = in_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR));
255 entry = search_exception_tables(regs->nip); 293 if (reason & (RIO_LTLEDCSR_IER | RIO_LTLEDCSR_PRT)) {
256 if (entry) { 294 /* Check if we are prepared to handle this fault */
257 pr_debug("RIO: %s - MC Exception handled\n", 295 entry = search_exception_tables(regs->nip);
258 __func__); 296 if (entry) {
259 out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 297 pr_debug("RIO: %s - MC Exception handled\n",
260 0); 298 __func__);
261 regs->msr |= MSR_RI; 299 out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR),
262 regs->nip = entry->fixup; 300 0);
263 return 1; 301 regs->msr |= MSR_RI;
264 } 302 regs->nip = entry->fixup;
303 return 1;
265 } 304 }
266 } 305 }
267 306
268 if (saved_mcheck_exception) 307 return 0;
269 return saved_mcheck_exception(regs);
270 else
271 return cur_cpu_spec->machine_check(regs);
272} 308}
309EXPORT_SYMBOL_GPL(fsl_rio_mcheck_exception);
273#endif 310#endif
274 311
275/** 312/**
@@ -463,7 +500,7 @@ fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid,
463} 500}
464 501
465/** 502/**
466 * rio_hw_add_outb_message - Add message to the MPC85xx outbound message queue 503 * fsl_add_outb_message - Add message to the MPC85xx outbound message queue
467 * @mport: Master port with outbound message queue 504 * @mport: Master port with outbound message queue
468 * @rdev: Target of outbound message 505 * @rdev: Target of outbound message
469 * @mbox: Outbound mailbox 506 * @mbox: Outbound mailbox
@@ -473,8 +510,8 @@ fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid,
473 * Adds the @buffer message to the MPC85xx outbound message queue. Returns 510 * Adds the @buffer message to the MPC85xx outbound message queue. Returns
474 * %0 on success or %-EINVAL on failure. 511 * %0 on success or %-EINVAL on failure.
475 */ 512 */
476int 513static int
477rio_hw_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox, 514fsl_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox,
478 void *buffer, size_t len) 515 void *buffer, size_t len)
479{ 516{
480 struct rio_priv *priv = mport->priv; 517 struct rio_priv *priv = mport->priv;
@@ -483,9 +520,8 @@ rio_hw_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox,
483 + priv->msg_tx_ring.tx_slot; 520 + priv->msg_tx_ring.tx_slot;
484 int ret = 0; 521 int ret = 0;
485 522
486 pr_debug 523 pr_debug("RIO: fsl_add_outb_message(): destid %4.4x mbox %d buffer " \
487 ("RIO: rio_hw_add_outb_message(): destid %4.4x mbox %d buffer %8.8x len %8.8x\n", 524 "%8.8x len %8.8x\n", rdev->destid, mbox, (int)buffer, len);
488 rdev->destid, mbox, (int)buffer, len);
489 525
490 if ((len < 8) || (len > RIO_MAX_MSG_SIZE)) { 526 if ((len < 8) || (len > RIO_MAX_MSG_SIZE)) {
491 ret = -EINVAL; 527 ret = -EINVAL;
@@ -535,8 +571,6 @@ rio_hw_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox,
535 return ret; 571 return ret;
536} 572}
537 573
538EXPORT_SYMBOL_GPL(rio_hw_add_outb_message);
539
540/** 574/**
541 * fsl_rio_tx_handler - MPC85xx outbound message interrupt handler 575 * fsl_rio_tx_handler - MPC85xx outbound message interrupt handler
542 * @irq: Linux interrupt number 576 * @irq: Linux interrupt number
@@ -581,7 +615,7 @@ fsl_rio_tx_handler(int irq, void *dev_instance)
581} 615}
582 616
583/** 617/**
584 * rio_open_outb_mbox - Initialize MPC85xx outbound mailbox 618 * fsl_open_outb_mbox - Initialize MPC85xx outbound mailbox
585 * @mport: Master port implementing the outbound message unit 619 * @mport: Master port implementing the outbound message unit
586 * @dev_id: Device specific pointer to pass on event 620 * @dev_id: Device specific pointer to pass on event
587 * @mbox: Mailbox to open 621 * @mbox: Mailbox to open
@@ -591,7 +625,8 @@ fsl_rio_tx_handler(int irq, void *dev_instance)
591 * and enables the outbound message unit. Returns %0 on success and 625 * and enables the outbound message unit. Returns %0 on success and
592 * %-EINVAL or %-ENOMEM on failure. 626 * %-EINVAL or %-ENOMEM on failure.
593 */ 627 */
594int rio_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries) 628static int
629fsl_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries)
595{ 630{
596 int i, j, rc = 0; 631 int i, j, rc = 0;
597 struct rio_priv *priv = mport->priv; 632 struct rio_priv *priv = mport->priv;
@@ -687,14 +722,14 @@ int rio_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entr
687} 722}
688 723
689/** 724/**
690 * rio_close_outb_mbox - Shut down MPC85xx outbound mailbox 725 * fsl_close_outb_mbox - Shut down MPC85xx outbound mailbox
691 * @mport: Master port implementing the outbound message unit 726 * @mport: Master port implementing the outbound message unit
692 * @mbox: Mailbox to close 727 * @mbox: Mailbox to close
693 * 728 *
694 * Disables the outbound message unit, free all buffers, and 729 * Disables the outbound message unit, free all buffers, and
695 * frees the outbound message interrupt. 730 * frees the outbound message interrupt.
696 */ 731 */
697void rio_close_outb_mbox(struct rio_mport *mport, int mbox) 732static void fsl_close_outb_mbox(struct rio_mport *mport, int mbox)
698{ 733{
699 struct rio_priv *priv = mport->priv; 734 struct rio_priv *priv = mport->priv;
700 /* Disable inbound message unit */ 735 /* Disable inbound message unit */
@@ -751,7 +786,7 @@ fsl_rio_rx_handler(int irq, void *dev_instance)
751} 786}
752 787
753/** 788/**
754 * rio_open_inb_mbox - Initialize MPC85xx inbound mailbox 789 * fsl_open_inb_mbox - Initialize MPC85xx inbound mailbox
755 * @mport: Master port implementing the inbound message unit 790 * @mport: Master port implementing the inbound message unit
756 * @dev_id: Device specific pointer to pass on event 791 * @dev_id: Device specific pointer to pass on event
757 * @mbox: Mailbox to open 792 * @mbox: Mailbox to open
@@ -761,7 +796,8 @@ fsl_rio_rx_handler(int irq, void *dev_instance)
761 * and enables the inbound message unit. Returns %0 on success 796 * and enables the inbound message unit. Returns %0 on success
762 * and %-EINVAL or %-ENOMEM on failure. 797 * and %-EINVAL or %-ENOMEM on failure.
763 */ 798 */
764int rio_open_inb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries) 799static int
800fsl_open_inb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries)
765{ 801{
766 int i, rc = 0; 802 int i, rc = 0;
767 struct rio_priv *priv = mport->priv; 803 struct rio_priv *priv = mport->priv;
@@ -825,14 +861,14 @@ int rio_open_inb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entri
825} 861}
826 862
827/** 863/**
828 * rio_close_inb_mbox - Shut down MPC85xx inbound mailbox 864 * fsl_close_inb_mbox - Shut down MPC85xx inbound mailbox
829 * @mport: Master port implementing the inbound message unit 865 * @mport: Master port implementing the inbound message unit
830 * @mbox: Mailbox to close 866 * @mbox: Mailbox to close
831 * 867 *
832 * Disables the inbound message unit, free all buffers, and 868 * Disables the inbound message unit, free all buffers, and
833 * frees the inbound message interrupt. 869 * frees the inbound message interrupt.
834 */ 870 */
835void rio_close_inb_mbox(struct rio_mport *mport, int mbox) 871static void fsl_close_inb_mbox(struct rio_mport *mport, int mbox)
836{ 872{
837 struct rio_priv *priv = mport->priv; 873 struct rio_priv *priv = mport->priv;
838 /* Disable inbound message unit */ 874 /* Disable inbound message unit */
@@ -847,7 +883,7 @@ void rio_close_inb_mbox(struct rio_mport *mport, int mbox)
847} 883}
848 884
849/** 885/**
850 * rio_hw_add_inb_buffer - Add buffer to the MPC85xx inbound message queue 886 * fsl_add_inb_buffer - Add buffer to the MPC85xx inbound message queue
851 * @mport: Master port implementing the inbound message unit 887 * @mport: Master port implementing the inbound message unit
852 * @mbox: Inbound mailbox number 888 * @mbox: Inbound mailbox number
853 * @buf: Buffer to add to inbound queue 889 * @buf: Buffer to add to inbound queue
@@ -855,12 +891,12 @@ void rio_close_inb_mbox(struct rio_mport *mport, int mbox)
855 * Adds the @buf buffer to the MPC85xx inbound message queue. Returns 891 * Adds the @buf buffer to the MPC85xx inbound message queue. Returns
856 * %0 on success or %-EINVAL on failure. 892 * %0 on success or %-EINVAL on failure.
857 */ 893 */
858int rio_hw_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf) 894static int fsl_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf)
859{ 895{
860 int rc = 0; 896 int rc = 0;
861 struct rio_priv *priv = mport->priv; 897 struct rio_priv *priv = mport->priv;
862 898
863 pr_debug("RIO: rio_hw_add_inb_buffer(), msg_rx_ring.rx_slot %d\n", 899 pr_debug("RIO: fsl_add_inb_buffer(), msg_rx_ring.rx_slot %d\n",
864 priv->msg_rx_ring.rx_slot); 900 priv->msg_rx_ring.rx_slot);
865 901
866 if (priv->msg_rx_ring.virt_buffer[priv->msg_rx_ring.rx_slot]) { 902 if (priv->msg_rx_ring.virt_buffer[priv->msg_rx_ring.rx_slot]) {
@@ -879,17 +915,15 @@ int rio_hw_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf)
879 return rc; 915 return rc;
880} 916}
881 917
882EXPORT_SYMBOL_GPL(rio_hw_add_inb_buffer);
883
884/** 918/**
885 * rio_hw_get_inb_message - Fetch inbound message from the MPC85xx message unit 919 * fsl_get_inb_message - Fetch inbound message from the MPC85xx message unit
886 * @mport: Master port implementing the inbound message unit 920 * @mport: Master port implementing the inbound message unit
887 * @mbox: Inbound mailbox number 921 * @mbox: Inbound mailbox number
888 * 922 *
889 * Gets the next available inbound message from the inbound message queue. 923 * Gets the next available inbound message from the inbound message queue.
890 * A pointer to the message is returned on success or NULL on failure. 924 * A pointer to the message is returned on success or NULL on failure.
891 */ 925 */
892void *rio_hw_get_inb_message(struct rio_mport *mport, int mbox) 926static void *fsl_get_inb_message(struct rio_mport *mport, int mbox)
893{ 927{
894 struct rio_priv *priv = mport->priv; 928 struct rio_priv *priv = mport->priv;
895 u32 phys_buf, virt_buf; 929 u32 phys_buf, virt_buf;
@@ -926,8 +960,6 @@ void *rio_hw_get_inb_message(struct rio_mport *mport, int mbox)
926 return buf; 960 return buf;
927} 961}
928 962
929EXPORT_SYMBOL_GPL(rio_hw_get_inb_message);
930
931/** 963/**
932 * fsl_rio_dbell_handler - MPC85xx doorbell interrupt handler 964 * fsl_rio_dbell_handler - MPC85xx doorbell interrupt handler
933 * @irq: Linux interrupt number 965 * @irq: Linux interrupt number
@@ -954,7 +986,6 @@ fsl_rio_dbell_handler(int irq, void *dev_instance)
954 if (dsr & DOORBELL_DSR_QFI) { 986 if (dsr & DOORBELL_DSR_QFI) {
955 pr_info("RIO: doorbell queue full\n"); 987 pr_info("RIO: doorbell queue full\n");
956 out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_QFI); 988 out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_QFI);
957 goto out;
958 } 989 }
959 990
960 /* XXX Need to check/dispatch until queue empty */ 991 /* XXX Need to check/dispatch until queue empty */
@@ -1051,6 +1082,40 @@ static int fsl_rio_doorbell_init(struct rio_mport *mport)
1051 return rc; 1082 return rc;
1052} 1083}
1053 1084
1085static void port_error_handler(struct rio_mport *port, int offset)
1086{
1087 /*XXX: Error recovery is not implemented, we just clear errors */
1088 out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0);
1089
1090 if (offset == 0) {
1091 out_be32((u32 *)(rio_regs_win + RIO_PORT1_EDCSR), 0);
1092 out_be32((u32 *)(rio_regs_win + RIO_PORT1_IECSR), 0);
1093 out_be32((u32 *)(rio_regs_win + RIO_ESCSR), ESCSR_CLEAR);
1094 } else {
1095 out_be32((u32 *)(rio_regs_win + RIO_PORT2_EDCSR), 0);
1096 out_be32((u32 *)(rio_regs_win + RIO_PORT2_IECSR), 0);
1097 out_be32((u32 *)(rio_regs_win + RIO_PORT2_ESCSR), ESCSR_CLEAR);
1098 }
1099}
1100
1101static void msg_unit_error_handler(struct rio_mport *port)
1102{
1103 struct rio_priv *priv = port->priv;
1104
1105 /*XXX: Error recovery is not implemented, we just clear errors */
1106 out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0);
1107
1108 out_be32((u32 *)(rio_regs_win + RIO_IM0SR), IMSR_CLEAR);
1109 out_be32((u32 *)(rio_regs_win + RIO_IM1SR), IMSR_CLEAR);
1110 out_be32((u32 *)(rio_regs_win + RIO_OM0SR), OMSR_CLEAR);
1111 out_be32((u32 *)(rio_regs_win + RIO_OM1SR), OMSR_CLEAR);
1112
1113 out_be32(&priv->msg_regs->odsr, ODSR_CLEAR);
1114 out_be32(&priv->msg_regs->dsr, IDSR_CLEAR);
1115
1116 out_be32(&priv->msg_regs->pwsr, IPWSR_CLEAR);
1117}
1118
1054/** 1119/**
1055 * fsl_rio_port_write_handler - MPC85xx port write interrupt handler 1120 * fsl_rio_port_write_handler - MPC85xx port write interrupt handler
1056 * @irq: Linux interrupt number 1121 * @irq: Linux interrupt number
@@ -1067,18 +1132,12 @@ fsl_rio_port_write_handler(int irq, void *dev_instance)
1067 struct rio_priv *priv = port->priv; 1132 struct rio_priv *priv = port->priv;
1068 u32 epwisr, tmp; 1133 u32 epwisr, tmp;
1069 1134
1070 ipwmr = in_be32(&priv->msg_regs->pwmr);
1071 ipwsr = in_be32(&priv->msg_regs->pwsr);
1072
1073 epwisr = in_be32(priv->regs_win + RIO_EPWISR); 1135 epwisr = in_be32(priv->regs_win + RIO_EPWISR);
1074 if (epwisr & 0x80000000) { 1136 if (!(epwisr & RIO_EPWISR_PW))
1075 tmp = in_be32(priv->regs_win + RIO_LTLEDCSR); 1137 goto pw_done;
1076 pr_info("RIO_LTLEDCSR = 0x%x\n", tmp);
1077 out_be32(priv->regs_win + RIO_LTLEDCSR, 0);
1078 }
1079 1138
1080 if (!(epwisr & 0x00000001)) 1139 ipwmr = in_be32(&priv->msg_regs->pwmr);
1081 return IRQ_HANDLED; 1140 ipwsr = in_be32(&priv->msg_regs->pwsr);
1082 1141
1083#ifdef DEBUG_PW 1142#ifdef DEBUG_PW
1084 pr_debug("PW Int->IPWMR: 0x%08x IPWSR: 0x%08x (", ipwmr, ipwsr); 1143 pr_debug("PW Int->IPWMR: 0x%08x IPWSR: 0x%08x (", ipwmr, ipwsr);
@@ -1094,20 +1153,6 @@ fsl_rio_port_write_handler(int irq, void *dev_instance)
1094 pr_debug(" PWB"); 1153 pr_debug(" PWB");
1095 pr_debug(" )\n"); 1154 pr_debug(" )\n");
1096#endif 1155#endif
1097 out_be32(&priv->msg_regs->pwsr,
1098 ipwsr & (RIO_IPWSR_TE | RIO_IPWSR_QFI | RIO_IPWSR_PWD));
1099
1100 if ((ipwmr & RIO_IPWMR_EIE) && (ipwsr & RIO_IPWSR_TE)) {
1101 priv->port_write_msg.err_count++;
1102 pr_info("RIO: Port-Write Transaction Err (%d)\n",
1103 priv->port_write_msg.err_count);
1104 }
1105 if (ipwsr & RIO_IPWSR_PWD) {
1106 priv->port_write_msg.discard_count++;
1107 pr_info("RIO: Port Discarded Port-Write Msg(s) (%d)\n",
1108 priv->port_write_msg.discard_count);
1109 }
1110
1111 /* Schedule deferred processing if PW was received */ 1156 /* Schedule deferred processing if PW was received */
1112 if (ipwsr & RIO_IPWSR_QFI) { 1157 if (ipwsr & RIO_IPWSR_QFI) {
1113 /* Save PW message (if there is room in FIFO), 1158 /* Save PW message (if there is room in FIFO),
@@ -1119,16 +1164,55 @@ fsl_rio_port_write_handler(int irq, void *dev_instance)
1119 RIO_PW_MSG_SIZE); 1164 RIO_PW_MSG_SIZE);
1120 } else { 1165 } else {
1121 priv->port_write_msg.discard_count++; 1166 priv->port_write_msg.discard_count++;
1122 pr_info("RIO: ISR Discarded Port-Write Msg(s) (%d)\n", 1167 pr_debug("RIO: ISR Discarded Port-Write Msg(s) (%d)\n",
1123 priv->port_write_msg.discard_count); 1168 priv->port_write_msg.discard_count);
1124 } 1169 }
1170 /* Clear interrupt and issue Clear Queue command. This allows
1171 * another port-write to be received.
1172 */
1173 out_be32(&priv->msg_regs->pwsr, RIO_IPWSR_QFI);
1174 out_be32(&priv->msg_regs->pwmr, ipwmr | RIO_IPWMR_CQ);
1175
1125 schedule_work(&priv->pw_work); 1176 schedule_work(&priv->pw_work);
1126 } 1177 }
1127 1178
1128 /* Issue Clear Queue command. This allows another 1179 if ((ipwmr & RIO_IPWMR_EIE) && (ipwsr & RIO_IPWSR_TE)) {
1129 * port-write to be received. 1180 priv->port_write_msg.err_count++;
1130 */ 1181 pr_debug("RIO: Port-Write Transaction Err (%d)\n",
1131 out_be32(&priv->msg_regs->pwmr, ipwmr | RIO_IPWMR_CQ); 1182 priv->port_write_msg.err_count);
1183 /* Clear Transaction Error: port-write controller should be
1184 * disabled when clearing this error
1185 */
1186 out_be32(&priv->msg_regs->pwmr, ipwmr & ~RIO_IPWMR_PWE);
1187 out_be32(&priv->msg_regs->pwsr, RIO_IPWSR_TE);
1188 out_be32(&priv->msg_regs->pwmr, ipwmr);
1189 }
1190
1191 if (ipwsr & RIO_IPWSR_PWD) {
1192 priv->port_write_msg.discard_count++;
1193 pr_debug("RIO: Port Discarded Port-Write Msg(s) (%d)\n",
1194 priv->port_write_msg.discard_count);
1195 out_be32(&priv->msg_regs->pwsr, RIO_IPWSR_PWD);
1196 }
1197
1198pw_done:
1199 if (epwisr & RIO_EPWISR_PINT1) {
1200 tmp = in_be32(priv->regs_win + RIO_LTLEDCSR);
1201 pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
1202 port_error_handler(port, 0);
1203 }
1204
1205 if (epwisr & RIO_EPWISR_PINT2) {
1206 tmp = in_be32(priv->regs_win + RIO_LTLEDCSR);
1207 pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
1208 port_error_handler(port, 1);
1209 }
1210
1211 if (epwisr & RIO_EPWISR_MU) {
1212 tmp = in_be32(priv->regs_win + RIO_LTLEDCSR);
1213 pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
1214 msg_unit_error_handler(port);
1215 }
1132 1216
1133 return IRQ_HANDLED; 1217 return IRQ_HANDLED;
1134} 1218}
@@ -1238,12 +1322,14 @@ static int fsl_rio_port_write_init(struct rio_mport *mport)
1238 1322
1239 1323
1240 /* Hook up port-write handler */ 1324 /* Hook up port-write handler */
1241 rc = request_irq(IRQ_RIO_PW(mport), fsl_rio_port_write_handler, 0, 1325 rc = request_irq(IRQ_RIO_PW(mport), fsl_rio_port_write_handler,
1242 "port-write", (void *)mport); 1326 IRQF_SHARED, "port-write", (void *)mport);
1243 if (rc < 0) { 1327 if (rc < 0) {
1244 pr_err("MPC85xx RIO: unable to request inbound doorbell irq"); 1328 pr_err("MPC85xx RIO: unable to request inbound doorbell irq");
1245 goto err_out; 1329 goto err_out;
1246 } 1330 }
1331 /* Enable Error Interrupt */
1332 out_be32((u32 *)(rio_regs_win + RIO_LTLEECSR), LTLEECSR_ENABLE_ALL);
1247 1333
1248 INIT_WORK(&priv->pw_work, fsl_pw_dpc); 1334 INIT_WORK(&priv->pw_work, fsl_pw_dpc);
1249 spin_lock_init(&priv->pw_fifo_lock); 1335 spin_lock_init(&priv->pw_fifo_lock);
@@ -1268,28 +1354,6 @@ err_out:
1268 return rc; 1354 return rc;
1269} 1355}
1270 1356
1271static char *cmdline = NULL;
1272
1273static int fsl_rio_get_hdid(int index)
1274{
1275 /* XXX Need to parse multiple entries in some format */
1276 if (!cmdline)
1277 return -1;
1278
1279 return simple_strtol(cmdline, NULL, 0);
1280}
1281
1282static int fsl_rio_get_cmdline(char *s)
1283{
1284 if (!s)
1285 return 0;
1286
1287 cmdline = s;
1288 return 1;
1289}
1290
1291__setup("riohdid=", fsl_rio_get_cmdline);
1292
1293static inline void fsl_rio_info(struct device *dev, u32 ccsr) 1357static inline void fsl_rio_info(struct device *dev, u32 ccsr)
1294{ 1358{
1295 const char *str; 1359 const char *str;
@@ -1406,13 +1470,19 @@ int fsl_rio_setup(struct platform_device *dev)
1406 ops->cwrite = fsl_rio_config_write; 1470 ops->cwrite = fsl_rio_config_write;
1407 ops->dsend = fsl_rio_doorbell_send; 1471 ops->dsend = fsl_rio_doorbell_send;
1408 ops->pwenable = fsl_rio_pw_enable; 1472 ops->pwenable = fsl_rio_pw_enable;
1473 ops->open_outb_mbox = fsl_open_outb_mbox;
1474 ops->open_inb_mbox = fsl_open_inb_mbox;
1475 ops->close_outb_mbox = fsl_close_outb_mbox;
1476 ops->close_inb_mbox = fsl_close_inb_mbox;
1477 ops->add_outb_message = fsl_add_outb_message;
1478 ops->add_inb_buffer = fsl_add_inb_buffer;
1479 ops->get_inb_message = fsl_get_inb_message;
1409 1480
1410 port = kzalloc(sizeof(struct rio_mport), GFP_KERNEL); 1481 port = kzalloc(sizeof(struct rio_mport), GFP_KERNEL);
1411 if (!port) { 1482 if (!port) {
1412 rc = -ENOMEM; 1483 rc = -ENOMEM;
1413 goto err_port; 1484 goto err_port;
1414 } 1485 }
1415 port->id = 0;
1416 port->index = 0; 1486 port->index = 0;
1417 1487
1418 priv = kzalloc(sizeof(struct rio_priv), GFP_KERNEL); 1488 priv = kzalloc(sizeof(struct rio_priv), GFP_KERNEL);
@@ -1428,6 +1498,14 @@ int fsl_rio_setup(struct platform_device *dev)
1428 port->iores.flags = IORESOURCE_MEM; 1498 port->iores.flags = IORESOURCE_MEM;
1429 port->iores.name = "rio_io_win"; 1499 port->iores.name = "rio_io_win";
1430 1500
1501 if (request_resource(&iomem_resource, &port->iores) < 0) {
1502 dev_err(&dev->dev, "RIO: Error requesting master port region"
1503 " 0x%016llx-0x%016llx\n",
1504 (u64)port->iores.start, (u64)port->iores.end);
1505 rc = -ENOMEM;
1506 goto err_res;
1507 }
1508
1431 priv->pwirq = irq_of_parse_and_map(dev->dev.of_node, 0); 1509 priv->pwirq = irq_of_parse_and_map(dev->dev.of_node, 0);
1432 priv->bellirq = irq_of_parse_and_map(dev->dev.of_node, 2); 1510 priv->bellirq = irq_of_parse_and_map(dev->dev.of_node, 2);
1433 priv->txirq = irq_of_parse_and_map(dev->dev.of_node, 3); 1511 priv->txirq = irq_of_parse_and_map(dev->dev.of_node, 3);
@@ -1443,10 +1521,8 @@ int fsl_rio_setup(struct platform_device *dev)
1443 priv->dev = &dev->dev; 1521 priv->dev = &dev->dev;
1444 1522
1445 port->ops = ops; 1523 port->ops = ops;
1446 port->host_deviceid = fsl_rio_get_hdid(port->id);
1447
1448 port->priv = priv; 1524 port->priv = priv;
1449 rio_register_mport(port); 1525 port->phys_efptr = 0x100;
1450 1526
1451 priv->regs_win = ioremap(regs.start, regs.end - regs.start + 1); 1527 priv->regs_win = ioremap(regs.start, regs.end - regs.start + 1);
1452 rio_regs_win = priv->regs_win; 1528 rio_regs_win = priv->regs_win;
@@ -1493,6 +1569,15 @@ int fsl_rio_setup(struct platform_device *dev)
1493 dev_info(&dev->dev, "RapidIO Common Transport System size: %d\n", 1569 dev_info(&dev->dev, "RapidIO Common Transport System size: %d\n",
1494 port->sys_size ? 65536 : 256); 1570 port->sys_size ? 65536 : 256);
1495 1571
1572 if (rio_register_mport(port))
1573 goto err;
1574
1575 if (port->host_deviceid >= 0)
1576 out_be32(priv->regs_win + RIO_GCCSR, RIO_PORT_GEN_HOST |
1577 RIO_PORT_GEN_MASTER | RIO_PORT_GEN_DISCOVERED);
1578 else
1579 out_be32(priv->regs_win + RIO_GCCSR, 0x00000000);
1580
1496 priv->atmu_regs = (struct rio_atmu_regs *)(priv->regs_win 1581 priv->atmu_regs = (struct rio_atmu_regs *)(priv->regs_win
1497 + RIO_ATMU_REGS_OFFSET); 1582 + RIO_ATMU_REGS_OFFSET);
1498 priv->maint_atmu_regs = priv->atmu_regs + 1; 1583 priv->maint_atmu_regs = priv->atmu_regs + 1;
@@ -1519,16 +1604,10 @@ int fsl_rio_setup(struct platform_device *dev)
1519 fsl_rio_doorbell_init(port); 1604 fsl_rio_doorbell_init(port);
1520 fsl_rio_port_write_init(port); 1605 fsl_rio_port_write_init(port);
1521 1606
1522#ifdef CONFIG_E500
1523 saved_mcheck_exception = ppc_md.machine_check_exception;
1524 ppc_md.machine_check_exception = fsl_rio_mcheck_exception;
1525#endif
1526 /* Ensure that RFXE is set */
1527 mtspr(SPRN_HID1, (mfspr(SPRN_HID1) | 0x20000));
1528
1529 return 0; 1607 return 0;
1530err: 1608err:
1531 iounmap(priv->regs_win); 1609 iounmap(priv->regs_win);
1610err_res:
1532 kfree(priv); 1611 kfree(priv);
1533err_priv: 1612err_priv:
1534 kfree(port); 1613 kfree(port);
@@ -1540,21 +1619,12 @@ err_ops:
1540 1619
1541/* The probe function for RapidIO peer-to-peer network. 1620/* The probe function for RapidIO peer-to-peer network.
1542 */ 1621 */
1543static int __devinit fsl_of_rio_rpn_probe(struct platform_device *dev, 1622static int __devinit fsl_of_rio_rpn_probe(struct platform_device *dev)
1544 const struct of_device_id *match)
1545{ 1623{
1546 int rc;
1547 printk(KERN_INFO "Setting up RapidIO peer-to-peer network %s\n", 1624 printk(KERN_INFO "Setting up RapidIO peer-to-peer network %s\n",
1548 dev->dev.of_node->full_name); 1625 dev->dev.of_node->full_name);
1549 1626
1550 rc = fsl_rio_setup(dev); 1627 return fsl_rio_setup(dev);
1551 if (rc)
1552 goto out;
1553
1554 /* Enumerate all registered ports */
1555 rc = rio_init_mports();
1556out:
1557 return rc;
1558}; 1628};
1559 1629
1560static const struct of_device_id fsl_of_rio_rpn_ids[] = { 1630static const struct of_device_id fsl_of_rio_rpn_ids[] = {
@@ -1564,7 +1634,7 @@ static const struct of_device_id fsl_of_rio_rpn_ids[] = {
1564 {}, 1634 {},
1565}; 1635};
1566 1636
1567static struct of_platform_driver fsl_of_rio_rpn_driver = { 1637static struct platform_driver fsl_of_rio_rpn_driver = {
1568 .driver = { 1638 .driver = {
1569 .name = "fsl-of-rio", 1639 .name = "fsl-of-rio",
1570 .owner = THIS_MODULE, 1640 .owner = THIS_MODULE,
@@ -1575,7 +1645,7 @@ static struct of_platform_driver fsl_of_rio_rpn_driver = {
1575 1645
1576static __init int fsl_of_rio_rpn_init(void) 1646static __init int fsl_of_rio_rpn_init(void)
1577{ 1647{
1578 return of_register_platform_driver(&fsl_of_rio_rpn_driver); 1648 return platform_driver_register(&fsl_of_rio_rpn_driver);
1579} 1649}
1580 1650
1581subsys_initcall(fsl_of_rio_rpn_init); 1651subsys_initcall(fsl_of_rio_rpn_init);
diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c
index b91f7acdda6f..19e5015e039b 100644
--- a/arch/powerpc/sysdev/fsl_soc.c
+++ b/arch/powerpc/sysdev/fsl_soc.c
@@ -209,186 +209,29 @@ static int __init of_add_fixed_phys(void)
209arch_initcall(of_add_fixed_phys); 209arch_initcall(of_add_fixed_phys);
210#endif /* CONFIG_FIXED_PHY */ 210#endif /* CONFIG_FIXED_PHY */
211 211
212static enum fsl_usb2_phy_modes determine_usb_phy(const char *phy_type) 212#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
213{ 213static __be32 __iomem *rstcr;
214 if (!phy_type)
215 return FSL_USB2_PHY_NONE;
216 if (!strcasecmp(phy_type, "ulpi"))
217 return FSL_USB2_PHY_ULPI;
218 if (!strcasecmp(phy_type, "utmi"))
219 return FSL_USB2_PHY_UTMI;
220 if (!strcasecmp(phy_type, "utmi_wide"))
221 return FSL_USB2_PHY_UTMI_WIDE;
222 if (!strcasecmp(phy_type, "serial"))
223 return FSL_USB2_PHY_SERIAL;
224
225 return FSL_USB2_PHY_NONE;
226}
227 214
228static int __init fsl_usb_of_init(void) 215static int __init setup_rstcr(void)
229{ 216{
230 struct device_node *np; 217 struct device_node *np;
231 unsigned int i = 0;
232 struct platform_device *usb_dev_mph = NULL, *usb_dev_dr_host = NULL,
233 *usb_dev_dr_client = NULL;
234 int ret;
235
236 for_each_compatible_node(np, NULL, "fsl-usb2-mph") {
237 struct resource r[2];
238 struct fsl_usb2_platform_data usb_data;
239 const unsigned char *prop = NULL;
240
241 memset(&r, 0, sizeof(r));
242 memset(&usb_data, 0, sizeof(usb_data));
243
244 ret = of_address_to_resource(np, 0, &r[0]);
245 if (ret)
246 goto err;
247
248 of_irq_to_resource(np, 0, &r[1]);
249
250 usb_dev_mph =
251 platform_device_register_simple("fsl-ehci", i, r, 2);
252 if (IS_ERR(usb_dev_mph)) {
253 ret = PTR_ERR(usb_dev_mph);
254 goto err;
255 }
256
257 usb_dev_mph->dev.coherent_dma_mask = 0xffffffffUL;
258 usb_dev_mph->dev.dma_mask = &usb_dev_mph->dev.coherent_dma_mask;
259
260 usb_data.operating_mode = FSL_USB2_MPH_HOST;
261
262 prop = of_get_property(np, "port0", NULL);
263 if (prop)
264 usb_data.port_enables |= FSL_USB2_PORT0_ENABLED;
265
266 prop = of_get_property(np, "port1", NULL);
267 if (prop)
268 usb_data.port_enables |= FSL_USB2_PORT1_ENABLED;
269
270 prop = of_get_property(np, "phy_type", NULL);
271 usb_data.phy_mode = determine_usb_phy(prop);
272
273 ret =
274 platform_device_add_data(usb_dev_mph, &usb_data,
275 sizeof(struct
276 fsl_usb2_platform_data));
277 if (ret)
278 goto unreg_mph;
279 i++;
280 }
281
282 for_each_compatible_node(np, NULL, "fsl-usb2-dr") {
283 struct resource r[2];
284 struct fsl_usb2_platform_data usb_data;
285 const unsigned char *prop = NULL;
286 218
287 if (!of_device_is_available(np)) 219 for_each_node_by_name(np, "global-utilities") {
288 continue; 220 if ((of_get_property(np, "fsl,has-rstcr", NULL))) {
289 221 rstcr = of_iomap(np, 0) + 0xb0;
290 memset(&r, 0, sizeof(r)); 222 if (!rstcr)
291 memset(&usb_data, 0, sizeof(usb_data)); 223 printk (KERN_ERR "Error: reset control "
292 224 "register not mapped!\n");
293 ret = of_address_to_resource(np, 0, &r[0]); 225 break;
294 if (ret)
295 goto unreg_mph;
296
297 of_irq_to_resource(np, 0, &r[1]);
298
299 prop = of_get_property(np, "dr_mode", NULL);
300
301 if (!prop || !strcmp(prop, "host")) {
302 usb_data.operating_mode = FSL_USB2_DR_HOST;
303 usb_dev_dr_host = platform_device_register_simple(
304 "fsl-ehci", i, r, 2);
305 if (IS_ERR(usb_dev_dr_host)) {
306 ret = PTR_ERR(usb_dev_dr_host);
307 goto err;
308 }
309 } else if (prop && !strcmp(prop, "peripheral")) {
310 usb_data.operating_mode = FSL_USB2_DR_DEVICE;
311 usb_dev_dr_client = platform_device_register_simple(
312 "fsl-usb2-udc", i, r, 2);
313 if (IS_ERR(usb_dev_dr_client)) {
314 ret = PTR_ERR(usb_dev_dr_client);
315 goto err;
316 }
317 } else if (prop && !strcmp(prop, "otg")) {
318 usb_data.operating_mode = FSL_USB2_DR_OTG;
319 usb_dev_dr_host = platform_device_register_simple(
320 "fsl-ehci", i, r, 2);
321 if (IS_ERR(usb_dev_dr_host)) {
322 ret = PTR_ERR(usb_dev_dr_host);
323 goto err;
324 }
325 usb_dev_dr_client = platform_device_register_simple(
326 "fsl-usb2-udc", i, r, 2);
327 if (IS_ERR(usb_dev_dr_client)) {
328 ret = PTR_ERR(usb_dev_dr_client);
329 goto err;
330 }
331 } else {
332 ret = -EINVAL;
333 goto err;
334 } 226 }
335
336 prop = of_get_property(np, "phy_type", NULL);
337 usb_data.phy_mode = determine_usb_phy(prop);
338
339 if (usb_dev_dr_host) {
340 usb_dev_dr_host->dev.coherent_dma_mask = 0xffffffffUL;
341 usb_dev_dr_host->dev.dma_mask = &usb_dev_dr_host->
342 dev.coherent_dma_mask;
343 if ((ret = platform_device_add_data(usb_dev_dr_host,
344 &usb_data, sizeof(struct
345 fsl_usb2_platform_data))))
346 goto unreg_dr;
347 }
348 if (usb_dev_dr_client) {
349 usb_dev_dr_client->dev.coherent_dma_mask = 0xffffffffUL;
350 usb_dev_dr_client->dev.dma_mask = &usb_dev_dr_client->
351 dev.coherent_dma_mask;
352 if ((ret = platform_device_add_data(usb_dev_dr_client,
353 &usb_data, sizeof(struct
354 fsl_usb2_platform_data))))
355 goto unreg_dr;
356 }
357 i++;
358 } 227 }
359 return 0;
360
361unreg_dr:
362 if (usb_dev_dr_host)
363 platform_device_unregister(usb_dev_dr_host);
364 if (usb_dev_dr_client)
365 platform_device_unregister(usb_dev_dr_client);
366unreg_mph:
367 if (usb_dev_mph)
368 platform_device_unregister(usb_dev_mph);
369err:
370 return ret;
371}
372
373arch_initcall(fsl_usb_of_init);
374
375#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
376static __be32 __iomem *rstcr;
377 228
378static int __init setup_rstcr(void) 229 if (!rstcr && ppc_md.restart == fsl_rstcr_restart)
379{
380 struct device_node *np;
381 np = of_find_node_by_name(NULL, "global-utilities");
382 if ((np && of_get_property(np, "fsl,has-rstcr", NULL))) {
383 rstcr = of_iomap(np, 0) + 0xb0;
384 if (!rstcr)
385 printk (KERN_EMERG "Error: reset control register "
386 "not mapped!\n");
387 } else if (ppc_md.restart == fsl_rstcr_restart)
388 printk(KERN_ERR "No RSTCR register, warm reboot won't work\n"); 230 printk(KERN_ERR "No RSTCR register, warm reboot won't work\n");
389 231
390 if (np) 232 if (np)
391 of_node_put(np); 233 of_node_put(np);
234
392 return 0; 235 return 0;
393} 236}
394 237
diff --git a/arch/powerpc/sysdev/i8259.c b/arch/powerpc/sysdev/i8259.c
index 6323e70e6bf4..d18bb27e4df9 100644
--- a/arch/powerpc/sysdev/i8259.c
+++ b/arch/powerpc/sysdev/i8259.c
@@ -78,19 +78,19 @@ unsigned int i8259_irq(void)
78 return irq; 78 return irq;
79} 79}
80 80
81static void i8259_mask_and_ack_irq(unsigned int irq_nr) 81static void i8259_mask_and_ack_irq(struct irq_data *d)
82{ 82{
83 unsigned long flags; 83 unsigned long flags;
84 84
85 raw_spin_lock_irqsave(&i8259_lock, flags); 85 raw_spin_lock_irqsave(&i8259_lock, flags);
86 if (irq_nr > 7) { 86 if (d->irq > 7) {
87 cached_A1 |= 1 << (irq_nr-8); 87 cached_A1 |= 1 << (d->irq-8);
88 inb(0xA1); /* DUMMY */ 88 inb(0xA1); /* DUMMY */
89 outb(cached_A1, 0xA1); 89 outb(cached_A1, 0xA1);
90 outb(0x20, 0xA0); /* Non-specific EOI */ 90 outb(0x20, 0xA0); /* Non-specific EOI */
91 outb(0x20, 0x20); /* Non-specific EOI to cascade */ 91 outb(0x20, 0x20); /* Non-specific EOI to cascade */
92 } else { 92 } else {
93 cached_21 |= 1 << irq_nr; 93 cached_21 |= 1 << d->irq;
94 inb(0x21); /* DUMMY */ 94 inb(0x21); /* DUMMY */
95 outb(cached_21, 0x21); 95 outb(cached_21, 0x21);
96 outb(0x20, 0x20); /* Non-specific EOI */ 96 outb(0x20, 0x20); /* Non-specific EOI */
@@ -104,42 +104,42 @@ static void i8259_set_irq_mask(int irq_nr)
104 outb(cached_21,0x21); 104 outb(cached_21,0x21);
105} 105}
106 106
107static void i8259_mask_irq(unsigned int irq_nr) 107static void i8259_mask_irq(struct irq_data *d)
108{ 108{
109 unsigned long flags; 109 unsigned long flags;
110 110
111 pr_debug("i8259_mask_irq(%d)\n", irq_nr); 111 pr_debug("i8259_mask_irq(%d)\n", d->irq);
112 112
113 raw_spin_lock_irqsave(&i8259_lock, flags); 113 raw_spin_lock_irqsave(&i8259_lock, flags);
114 if (irq_nr < 8) 114 if (d->irq < 8)
115 cached_21 |= 1 << irq_nr; 115 cached_21 |= 1 << d->irq;
116 else 116 else
117 cached_A1 |= 1 << (irq_nr-8); 117 cached_A1 |= 1 << (d->irq-8);
118 i8259_set_irq_mask(irq_nr); 118 i8259_set_irq_mask(d->irq);
119 raw_spin_unlock_irqrestore(&i8259_lock, flags); 119 raw_spin_unlock_irqrestore(&i8259_lock, flags);
120} 120}
121 121
122static void i8259_unmask_irq(unsigned int irq_nr) 122static void i8259_unmask_irq(struct irq_data *d)
123{ 123{
124 unsigned long flags; 124 unsigned long flags;
125 125
126 pr_debug("i8259_unmask_irq(%d)\n", irq_nr); 126 pr_debug("i8259_unmask_irq(%d)\n", d->irq);
127 127
128 raw_spin_lock_irqsave(&i8259_lock, flags); 128 raw_spin_lock_irqsave(&i8259_lock, flags);
129 if (irq_nr < 8) 129 if (d->irq < 8)
130 cached_21 &= ~(1 << irq_nr); 130 cached_21 &= ~(1 << d->irq);
131 else 131 else
132 cached_A1 &= ~(1 << (irq_nr-8)); 132 cached_A1 &= ~(1 << (d->irq-8));
133 i8259_set_irq_mask(irq_nr); 133 i8259_set_irq_mask(d->irq);
134 raw_spin_unlock_irqrestore(&i8259_lock, flags); 134 raw_spin_unlock_irqrestore(&i8259_lock, flags);
135} 135}
136 136
137static struct irq_chip i8259_pic = { 137static struct irq_chip i8259_pic = {
138 .name = "i8259", 138 .name = "i8259",
139 .mask = i8259_mask_irq, 139 .irq_mask = i8259_mask_irq,
140 .disable = i8259_mask_irq, 140 .irq_disable = i8259_mask_irq,
141 .unmask = i8259_unmask_irq, 141 .irq_unmask = i8259_unmask_irq,
142 .mask_ack = i8259_mask_and_ack_irq, 142 .irq_mask_ack = i8259_mask_and_ack_irq,
143}; 143};
144 144
145static struct resource pic1_iores = { 145static struct resource pic1_iores = {
@@ -175,28 +175,16 @@ static int i8259_host_map(struct irq_host *h, unsigned int virq,
175 175
176 /* We block the internal cascade */ 176 /* We block the internal cascade */
177 if (hw == 2) 177 if (hw == 2)
178 irq_to_desc(virq)->status |= IRQ_NOREQUEST; 178 irq_set_status_flags(virq, IRQ_NOREQUEST);
179 179
180 /* We use the level handler only for now, we might want to 180 /* We use the level handler only for now, we might want to
181 * be more cautious here but that works for now 181 * be more cautious here but that works for now
182 */ 182 */
183 irq_to_desc(virq)->status |= IRQ_LEVEL; 183 irq_set_status_flags(virq, IRQ_LEVEL);
184 set_irq_chip_and_handler(virq, &i8259_pic, handle_level_irq); 184 irq_set_chip_and_handler(virq, &i8259_pic, handle_level_irq);
185 return 0; 185 return 0;
186} 186}
187 187
188static void i8259_host_unmap(struct irq_host *h, unsigned int virq)
189{
190 /* Make sure irq is masked in hardware */
191 i8259_mask_irq(virq);
192
193 /* remove chip and handler */
194 set_irq_chip_and_handler(virq, NULL, NULL);
195
196 /* Make sure it's completed */
197 synchronize_irq(virq);
198}
199
200static int i8259_host_xlate(struct irq_host *h, struct device_node *ct, 188static int i8259_host_xlate(struct irq_host *h, struct device_node *ct,
201 const u32 *intspec, unsigned int intsize, 189 const u32 *intspec, unsigned int intsize,
202 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 190 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
@@ -220,7 +208,6 @@ static int i8259_host_xlate(struct irq_host *h, struct device_node *ct,
220static struct irq_host_ops i8259_host_ops = { 208static struct irq_host_ops i8259_host_ops = {
221 .match = i8259_host_match, 209 .match = i8259_host_match,
222 .map = i8259_host_map, 210 .map = i8259_host_map,
223 .unmap = i8259_host_unmap,
224 .xlate = i8259_host_xlate, 211 .xlate = i8259_host_xlate,
225}; 212};
226 213
diff --git a/arch/powerpc/sysdev/indirect_pci.c b/arch/powerpc/sysdev/indirect_pci.c
index 7ed809676642..82fdad885d20 100644
--- a/arch/powerpc/sysdev/indirect_pci.c
+++ b/arch/powerpc/sysdev/indirect_pci.c
@@ -117,7 +117,7 @@ indirect_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
117 out_le32(hose->cfg_addr, (0x80000000 | (bus_no << 16) | 117 out_le32(hose->cfg_addr, (0x80000000 | (bus_no << 16) |
118 (devfn << 8) | reg | cfg_type)); 118 (devfn << 8) | reg | cfg_type));
119 119
120 /* surpress setting of PCI_PRIMARY_BUS */ 120 /* suppress setting of PCI_PRIMARY_BUS */
121 if (hose->indirect_type & PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS) 121 if (hose->indirect_type & PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS)
122 if ((offset == PCI_PRIMARY_BUS) && 122 if ((offset == PCI_PRIMARY_BUS) &&
123 (bus->number == hose->first_busno)) 123 (bus->number == hose->first_busno))
diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c
index d7b9b9c69287..7367d17364cb 100644
--- a/arch/powerpc/sysdev/ipic.c
+++ b/arch/powerpc/sysdev/ipic.c
@@ -18,7 +18,7 @@
18#include <linux/stddef.h> 18#include <linux/stddef.h>
19#include <linux/sched.h> 19#include <linux/sched.h>
20#include <linux/signal.h> 20#include <linux/signal.h>
21#include <linux/sysdev.h> 21#include <linux/syscore_ops.h>
22#include <linux/device.h> 22#include <linux/device.h>
23#include <linux/bootmem.h> 23#include <linux/bootmem.h>
24#include <linux/spinlock.h> 24#include <linux/spinlock.h>
@@ -521,12 +521,10 @@ static inline struct ipic * ipic_from_irq(unsigned int virq)
521 return primary_ipic; 521 return primary_ipic;
522} 522}
523 523
524#define ipic_irq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq) 524static void ipic_unmask_irq(struct irq_data *d)
525
526static void ipic_unmask_irq(unsigned int virq)
527{ 525{
528 struct ipic *ipic = ipic_from_irq(virq); 526 struct ipic *ipic = ipic_from_irq(d->irq);
529 unsigned int src = ipic_irq_to_hw(virq); 527 unsigned int src = irqd_to_hwirq(d);
530 unsigned long flags; 528 unsigned long flags;
531 u32 temp; 529 u32 temp;
532 530
@@ -539,10 +537,10 @@ static void ipic_unmask_irq(unsigned int virq)
539 raw_spin_unlock_irqrestore(&ipic_lock, flags); 537 raw_spin_unlock_irqrestore(&ipic_lock, flags);
540} 538}
541 539
542static void ipic_mask_irq(unsigned int virq) 540static void ipic_mask_irq(struct irq_data *d)
543{ 541{
544 struct ipic *ipic = ipic_from_irq(virq); 542 struct ipic *ipic = ipic_from_irq(d->irq);
545 unsigned int src = ipic_irq_to_hw(virq); 543 unsigned int src = irqd_to_hwirq(d);
546 unsigned long flags; 544 unsigned long flags;
547 u32 temp; 545 u32 temp;
548 546
@@ -559,10 +557,10 @@ static void ipic_mask_irq(unsigned int virq)
559 raw_spin_unlock_irqrestore(&ipic_lock, flags); 557 raw_spin_unlock_irqrestore(&ipic_lock, flags);
560} 558}
561 559
562static void ipic_ack_irq(unsigned int virq) 560static void ipic_ack_irq(struct irq_data *d)
563{ 561{
564 struct ipic *ipic = ipic_from_irq(virq); 562 struct ipic *ipic = ipic_from_irq(d->irq);
565 unsigned int src = ipic_irq_to_hw(virq); 563 unsigned int src = irqd_to_hwirq(d);
566 unsigned long flags; 564 unsigned long flags;
567 u32 temp; 565 u32 temp;
568 566
@@ -578,10 +576,10 @@ static void ipic_ack_irq(unsigned int virq)
578 raw_spin_unlock_irqrestore(&ipic_lock, flags); 576 raw_spin_unlock_irqrestore(&ipic_lock, flags);
579} 577}
580 578
581static void ipic_mask_irq_and_ack(unsigned int virq) 579static void ipic_mask_irq_and_ack(struct irq_data *d)
582{ 580{
583 struct ipic *ipic = ipic_from_irq(virq); 581 struct ipic *ipic = ipic_from_irq(d->irq);
584 unsigned int src = ipic_irq_to_hw(virq); 582 unsigned int src = irqd_to_hwirq(d);
585 unsigned long flags; 583 unsigned long flags;
586 u32 temp; 584 u32 temp;
587 585
@@ -601,11 +599,10 @@ static void ipic_mask_irq_and_ack(unsigned int virq)
601 raw_spin_unlock_irqrestore(&ipic_lock, flags); 599 raw_spin_unlock_irqrestore(&ipic_lock, flags);
602} 600}
603 601
604static int ipic_set_irq_type(unsigned int virq, unsigned int flow_type) 602static int ipic_set_irq_type(struct irq_data *d, unsigned int flow_type)
605{ 603{
606 struct ipic *ipic = ipic_from_irq(virq); 604 struct ipic *ipic = ipic_from_irq(d->irq);
607 unsigned int src = ipic_irq_to_hw(virq); 605 unsigned int src = irqd_to_hwirq(d);
608 struct irq_desc *desc = irq_to_desc(virq);
609 unsigned int vold, vnew, edibit; 606 unsigned int vold, vnew, edibit;
610 607
611 if (flow_type == IRQ_TYPE_NONE) 608 if (flow_type == IRQ_TYPE_NONE)
@@ -623,17 +620,16 @@ static int ipic_set_irq_type(unsigned int virq, unsigned int flow_type)
623 printk(KERN_ERR "ipic: edge sense not supported on internal " 620 printk(KERN_ERR "ipic: edge sense not supported on internal "
624 "interrupts\n"); 621 "interrupts\n");
625 return -EINVAL; 622 return -EINVAL;
623
626 } 624 }
627 625
628 desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL); 626 irqd_set_trigger_type(d, flow_type);
629 desc->status |= flow_type & IRQ_TYPE_SENSE_MASK;
630 if (flow_type & IRQ_TYPE_LEVEL_LOW) { 627 if (flow_type & IRQ_TYPE_LEVEL_LOW) {
631 desc->status |= IRQ_LEVEL; 628 __irq_set_handler_locked(d->irq, handle_level_irq);
632 desc->handle_irq = handle_level_irq; 629 d->chip = &ipic_level_irq_chip;
633 desc->chip = &ipic_level_irq_chip;
634 } else { 630 } else {
635 desc->handle_irq = handle_edge_irq; 631 __irq_set_handler_locked(d->irq, handle_edge_irq);
636 desc->chip = &ipic_edge_irq_chip; 632 d->chip = &ipic_edge_irq_chip;
637 } 633 }
638 634
639 /* only EXT IRQ senses are programmable on ipic 635 /* only EXT IRQ senses are programmable on ipic
@@ -655,25 +651,25 @@ static int ipic_set_irq_type(unsigned int virq, unsigned int flow_type)
655 } 651 }
656 if (vold != vnew) 652 if (vold != vnew)
657 ipic_write(ipic->regs, IPIC_SECNR, vnew); 653 ipic_write(ipic->regs, IPIC_SECNR, vnew);
658 return 0; 654 return IRQ_SET_MASK_OK_NOCOPY;
659} 655}
660 656
661/* level interrupts and edge interrupts have different ack operations */ 657/* level interrupts and edge interrupts have different ack operations */
662static struct irq_chip ipic_level_irq_chip = { 658static struct irq_chip ipic_level_irq_chip = {
663 .name = "IPIC", 659 .name = "IPIC",
664 .unmask = ipic_unmask_irq, 660 .irq_unmask = ipic_unmask_irq,
665 .mask = ipic_mask_irq, 661 .irq_mask = ipic_mask_irq,
666 .mask_ack = ipic_mask_irq, 662 .irq_mask_ack = ipic_mask_irq,
667 .set_type = ipic_set_irq_type, 663 .irq_set_type = ipic_set_irq_type,
668}; 664};
669 665
670static struct irq_chip ipic_edge_irq_chip = { 666static struct irq_chip ipic_edge_irq_chip = {
671 .name = "IPIC", 667 .name = "IPIC",
672 .unmask = ipic_unmask_irq, 668 .irq_unmask = ipic_unmask_irq,
673 .mask = ipic_mask_irq, 669 .irq_mask = ipic_mask_irq,
674 .mask_ack = ipic_mask_irq_and_ack, 670 .irq_mask_ack = ipic_mask_irq_and_ack,
675 .ack = ipic_ack_irq, 671 .irq_ack = ipic_ack_irq,
676 .set_type = ipic_set_irq_type, 672 .irq_set_type = ipic_set_irq_type,
677}; 673};
678 674
679static int ipic_host_match(struct irq_host *h, struct device_node *node) 675static int ipic_host_match(struct irq_host *h, struct device_node *node)
@@ -687,11 +683,11 @@ static int ipic_host_map(struct irq_host *h, unsigned int virq,
687{ 683{
688 struct ipic *ipic = h->host_data; 684 struct ipic *ipic = h->host_data;
689 685
690 set_irq_chip_data(virq, ipic); 686 irq_set_chip_data(virq, ipic);
691 set_irq_chip_and_handler(virq, &ipic_level_irq_chip, handle_level_irq); 687 irq_set_chip_and_handler(virq, &ipic_level_irq_chip, handle_level_irq);
692 688
693 /* Set default irq type */ 689 /* Set default irq type */
694 set_irq_type(virq, IRQ_TYPE_NONE); 690 irq_set_irq_type(virq, IRQ_TYPE_NONE);
695 691
696 return 0; 692 return 0;
697} 693}
@@ -795,7 +791,7 @@ struct ipic * __init ipic_init(struct device_node *node, unsigned int flags)
795int ipic_set_priority(unsigned int virq, unsigned int priority) 791int ipic_set_priority(unsigned int virq, unsigned int priority)
796{ 792{
797 struct ipic *ipic = ipic_from_irq(virq); 793 struct ipic *ipic = ipic_from_irq(virq);
798 unsigned int src = ipic_irq_to_hw(virq); 794 unsigned int src = virq_to_hw(virq);
799 u32 temp; 795 u32 temp;
800 796
801 if (priority > 7) 797 if (priority > 7)
@@ -823,7 +819,7 @@ int ipic_set_priority(unsigned int virq, unsigned int priority)
823void ipic_set_highest_priority(unsigned int virq) 819void ipic_set_highest_priority(unsigned int virq)
824{ 820{
825 struct ipic *ipic = ipic_from_irq(virq); 821 struct ipic *ipic = ipic_from_irq(virq);
826 unsigned int src = ipic_irq_to_hw(virq); 822 unsigned int src = virq_to_hw(virq);
827 u32 temp; 823 u32 temp;
828 824
829 temp = ipic_read(ipic->regs, IPIC_SICFR); 825 temp = ipic_read(ipic->regs, IPIC_SICFR);
@@ -904,7 +900,7 @@ static struct {
904 u32 sercr; 900 u32 sercr;
905} ipic_saved_state; 901} ipic_saved_state;
906 902
907static int ipic_suspend(struct sys_device *sdev, pm_message_t state) 903static int ipic_suspend(void)
908{ 904{
909 struct ipic *ipic = primary_ipic; 905 struct ipic *ipic = primary_ipic;
910 906
@@ -935,7 +931,7 @@ static int ipic_suspend(struct sys_device *sdev, pm_message_t state)
935 return 0; 931 return 0;
936} 932}
937 933
938static int ipic_resume(struct sys_device *sdev) 934static void ipic_resume(void)
939{ 935{
940 struct ipic *ipic = primary_ipic; 936 struct ipic *ipic = primary_ipic;
941 937
@@ -951,44 +947,26 @@ static int ipic_resume(struct sys_device *sdev)
951 ipic_write(ipic->regs, IPIC_SECNR, ipic_saved_state.secnr); 947 ipic_write(ipic->regs, IPIC_SECNR, ipic_saved_state.secnr);
952 ipic_write(ipic->regs, IPIC_SERMR, ipic_saved_state.sermr); 948 ipic_write(ipic->regs, IPIC_SERMR, ipic_saved_state.sermr);
953 ipic_write(ipic->regs, IPIC_SERCR, ipic_saved_state.sercr); 949 ipic_write(ipic->regs, IPIC_SERCR, ipic_saved_state.sercr);
954
955 return 0;
956} 950}
957#else 951#else
958#define ipic_suspend NULL 952#define ipic_suspend NULL
959#define ipic_resume NULL 953#define ipic_resume NULL
960#endif 954#endif
961 955
962static struct sysdev_class ipic_sysclass = { 956static struct syscore_ops ipic_syscore_ops = {
963 .name = "ipic",
964 .suspend = ipic_suspend, 957 .suspend = ipic_suspend,
965 .resume = ipic_resume, 958 .resume = ipic_resume,
966}; 959};
967 960
968static struct sys_device device_ipic = { 961static int __init init_ipic_syscore(void)
969 .id = 0,
970 .cls = &ipic_sysclass,
971};
972
973static int __init init_ipic_sysfs(void)
974{ 962{
975 int rc;
976
977 if (!primary_ipic || !primary_ipic->regs) 963 if (!primary_ipic || !primary_ipic->regs)
978 return -ENODEV; 964 return -ENODEV;
979 printk(KERN_DEBUG "Registering ipic with sysfs...\n");
980 965
981 rc = sysdev_class_register(&ipic_sysclass); 966 printk(KERN_DEBUG "Registering ipic system core operations\n");
982 if (rc) { 967 register_syscore_ops(&ipic_syscore_ops);
983 printk(KERN_ERR "Failed registering ipic sys class\n"); 968
984 return -ENODEV;
985 }
986 rc = sysdev_register(&device_ipic);
987 if (rc) {
988 printk(KERN_ERR "Failed registering ipic sys device\n");
989 return -ENODEV;
990 }
991 return 0; 969 return 0;
992} 970}
993 971
994subsys_initcall(init_ipic_sysfs); 972subsys_initcall(init_ipic_syscore);
diff --git a/arch/powerpc/sysdev/mmio_nvram.c b/arch/powerpc/sysdev/mmio_nvram.c
index 207324209065..ddc877a3a23a 100644
--- a/arch/powerpc/sysdev/mmio_nvram.c
+++ b/arch/powerpc/sysdev/mmio_nvram.c
@@ -115,6 +115,8 @@ int __init mmio_nvram_init(void)
115 int ret; 115 int ret;
116 116
117 nvram_node = of_find_node_by_type(NULL, "nvram"); 117 nvram_node = of_find_node_by_type(NULL, "nvram");
118 if (!nvram_node)
119 nvram_node = of_find_compatible_node(NULL, NULL, "nvram");
118 if (!nvram_node) { 120 if (!nvram_node) {
119 printk(KERN_WARNING "nvram: no node found in device-tree\n"); 121 printk(KERN_WARNING "nvram: no node found in device-tree\n");
120 return -ENODEV; 122 return -ENODEV;
diff --git a/arch/powerpc/sysdev/mpc8xx_pic.c b/arch/powerpc/sysdev/mpc8xx_pic.c
index 8c27d261aba8..20924f2246f0 100644
--- a/arch/powerpc/sysdev/mpc8xx_pic.c
+++ b/arch/powerpc/sysdev/mpc8xx_pic.c
@@ -25,10 +25,10 @@ static sysconf8xx_t __iomem *siu_reg;
25 25
26int cpm_get_irq(struct pt_regs *regs); 26int cpm_get_irq(struct pt_regs *regs);
27 27
28static void mpc8xx_unmask_irq(unsigned int virq) 28static void mpc8xx_unmask_irq(struct irq_data *d)
29{ 29{
30 int bit, word; 30 int bit, word;
31 unsigned int irq_nr = (unsigned int)irq_map[virq].hwirq; 31 unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d);
32 32
33 bit = irq_nr & 0x1f; 33 bit = irq_nr & 0x1f;
34 word = irq_nr >> 5; 34 word = irq_nr >> 5;
@@ -37,10 +37,10 @@ static void mpc8xx_unmask_irq(unsigned int virq)
37 out_be32(&siu_reg->sc_simask, ppc_cached_irq_mask[word]); 37 out_be32(&siu_reg->sc_simask, ppc_cached_irq_mask[word]);
38} 38}
39 39
40static void mpc8xx_mask_irq(unsigned int virq) 40static void mpc8xx_mask_irq(struct irq_data *d)
41{ 41{
42 int bit, word; 42 int bit, word;
43 unsigned int irq_nr = (unsigned int)irq_map[virq].hwirq; 43 unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d);
44 44
45 bit = irq_nr & 0x1f; 45 bit = irq_nr & 0x1f;
46 word = irq_nr >> 5; 46 word = irq_nr >> 5;
@@ -49,19 +49,19 @@ static void mpc8xx_mask_irq(unsigned int virq)
49 out_be32(&siu_reg->sc_simask, ppc_cached_irq_mask[word]); 49 out_be32(&siu_reg->sc_simask, ppc_cached_irq_mask[word]);
50} 50}
51 51
52static void mpc8xx_ack(unsigned int virq) 52static void mpc8xx_ack(struct irq_data *d)
53{ 53{
54 int bit; 54 int bit;
55 unsigned int irq_nr = (unsigned int)irq_map[virq].hwirq; 55 unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d);
56 56
57 bit = irq_nr & 0x1f; 57 bit = irq_nr & 0x1f;
58 out_be32(&siu_reg->sc_sipend, 1 << (31-bit)); 58 out_be32(&siu_reg->sc_sipend, 1 << (31-bit));
59} 59}
60 60
61static void mpc8xx_end_irq(unsigned int virq) 61static void mpc8xx_end_irq(struct irq_data *d)
62{ 62{
63 int bit, word; 63 int bit, word;
64 unsigned int irq_nr = (unsigned int)irq_map[virq].hwirq; 64 unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d);
65 65
66 bit = irq_nr & 0x1f; 66 bit = irq_nr & 0x1f;
67 word = irq_nr >> 5; 67 word = irq_nr >> 5;
@@ -70,24 +70,17 @@ static void mpc8xx_end_irq(unsigned int virq)
70 out_be32(&siu_reg->sc_simask, ppc_cached_irq_mask[word]); 70 out_be32(&siu_reg->sc_simask, ppc_cached_irq_mask[word]);
71} 71}
72 72
73static int mpc8xx_set_irq_type(unsigned int virq, unsigned int flow_type) 73static int mpc8xx_set_irq_type(struct irq_data *d, unsigned int flow_type)
74{ 74{
75 struct irq_desc *desc = irq_to_desc(virq);
76
77 desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL);
78 desc->status |= flow_type & IRQ_TYPE_SENSE_MASK;
79 if (flow_type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW))
80 desc->status |= IRQ_LEVEL;
81
82 if (flow_type & IRQ_TYPE_EDGE_FALLING) { 75 if (flow_type & IRQ_TYPE_EDGE_FALLING) {
83 irq_hw_number_t hw = (unsigned int)irq_map[virq].hwirq; 76 irq_hw_number_t hw = (unsigned int)irqd_to_hwirq(d);
84 unsigned int siel = in_be32(&siu_reg->sc_siel); 77 unsigned int siel = in_be32(&siu_reg->sc_siel);
85 78
86 /* only external IRQ senses are programmable */ 79 /* only external IRQ senses are programmable */
87 if ((hw & 1) == 0) { 80 if ((hw & 1) == 0) {
88 siel |= (0x80000000 >> hw); 81 siel |= (0x80000000 >> hw);
89 out_be32(&siu_reg->sc_siel, siel); 82 out_be32(&siu_reg->sc_siel, siel);
90 desc->handle_irq = handle_edge_irq; 83 __irq_set_handler_locked(d->irq, handle_edge_irq);
91 } 84 }
92 } 85 }
93 return 0; 86 return 0;
@@ -95,11 +88,11 @@ static int mpc8xx_set_irq_type(unsigned int virq, unsigned int flow_type)
95 88
96static struct irq_chip mpc8xx_pic = { 89static struct irq_chip mpc8xx_pic = {
97 .name = "MPC8XX SIU", 90 .name = "MPC8XX SIU",
98 .unmask = mpc8xx_unmask_irq, 91 .irq_unmask = mpc8xx_unmask_irq,
99 .mask = mpc8xx_mask_irq, 92 .irq_mask = mpc8xx_mask_irq,
100 .ack = mpc8xx_ack, 93 .irq_ack = mpc8xx_ack,
101 .eoi = mpc8xx_end_irq, 94 .irq_eoi = mpc8xx_end_irq,
102 .set_type = mpc8xx_set_irq_type, 95 .irq_set_type = mpc8xx_set_irq_type,
103}; 96};
104 97
105unsigned int mpc8xx_get_irq(void) 98unsigned int mpc8xx_get_irq(void)
@@ -124,7 +117,7 @@ static int mpc8xx_pic_host_map(struct irq_host *h, unsigned int virq,
124 pr_debug("mpc8xx_pic_host_map(%d, 0x%lx)\n", virq, hw); 117 pr_debug("mpc8xx_pic_host_map(%d, 0x%lx)\n", virq, hw);
125 118
126 /* Set default irq handle */ 119 /* Set default irq handle */
127 set_irq_chip_and_handler(virq, &mpc8xx_pic, handle_level_irq); 120 irq_set_chip_and_handler(virq, &mpc8xx_pic, handle_level_irq);
128 return 0; 121 return 0;
129} 122}
130 123
diff --git a/arch/powerpc/sysdev/mpc8xxx_gpio.c b/arch/powerpc/sysdev/mpc8xxx_gpio.c
index 2b69084d0f0c..fb4963abdf55 100644
--- a/arch/powerpc/sysdev/mpc8xxx_gpio.c
+++ b/arch/powerpc/sysdev/mpc8xxx_gpio.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * GPIOs on MPC8349/8572/8610 and compatible 2 * GPIOs on MPC512x/8349/8572/8610 and compatible
3 * 3 *
4 * Copyright (C) 2008 Peter Korsgaard <jacmet@sunsite.dk> 4 * Copyright (C) 2008 Peter Korsgaard <jacmet@sunsite.dk>
5 * 5 *
@@ -26,6 +26,7 @@
26#define GPIO_IER 0x0c 26#define GPIO_IER 0x0c
27#define GPIO_IMR 0x10 27#define GPIO_IMR 0x10
28#define GPIO_ICR 0x14 28#define GPIO_ICR 0x14
29#define GPIO_ICR2 0x18
29 30
30struct mpc8xxx_gpio_chip { 31struct mpc8xxx_gpio_chip {
31 struct of_mm_gpio_chip mm_gc; 32 struct of_mm_gpio_chip mm_gc;
@@ -37,6 +38,7 @@ struct mpc8xxx_gpio_chip {
37 */ 38 */
38 u32 data; 39 u32 data;
39 struct irq_host *irq; 40 struct irq_host *irq;
41 void *of_dev_id_data;
40}; 42};
41 43
42static inline u32 mpc8xxx_gpio2mask(unsigned int gpio) 44static inline u32 mpc8xxx_gpio2mask(unsigned int gpio)
@@ -143,7 +145,7 @@ static int mpc8xxx_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
143 145
144static void mpc8xxx_gpio_irq_cascade(unsigned int irq, struct irq_desc *desc) 146static void mpc8xxx_gpio_irq_cascade(unsigned int irq, struct irq_desc *desc)
145{ 147{
146 struct mpc8xxx_gpio_chip *mpc8xxx_gc = get_irq_desc_data(desc); 148 struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_desc_get_handler_data(desc);
147 struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; 149 struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
148 unsigned int mask; 150 unsigned int mask;
149 151
@@ -153,43 +155,43 @@ static void mpc8xxx_gpio_irq_cascade(unsigned int irq, struct irq_desc *desc)
153 32 - ffs(mask))); 155 32 - ffs(mask)));
154} 156}
155 157
156static void mpc8xxx_irq_unmask(unsigned int virq) 158static void mpc8xxx_irq_unmask(struct irq_data *d)
157{ 159{
158 struct mpc8xxx_gpio_chip *mpc8xxx_gc = get_irq_chip_data(virq); 160 struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
159 struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; 161 struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
160 unsigned long flags; 162 unsigned long flags;
161 163
162 spin_lock_irqsave(&mpc8xxx_gc->lock, flags); 164 spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
163 165
164 setbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(virq_to_hw(virq))); 166 setbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(irqd_to_hwirq(d)));
165 167
166 spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); 168 spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
167} 169}
168 170
169static void mpc8xxx_irq_mask(unsigned int virq) 171static void mpc8xxx_irq_mask(struct irq_data *d)
170{ 172{
171 struct mpc8xxx_gpio_chip *mpc8xxx_gc = get_irq_chip_data(virq); 173 struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
172 struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; 174 struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
173 unsigned long flags; 175 unsigned long flags;
174 176
175 spin_lock_irqsave(&mpc8xxx_gc->lock, flags); 177 spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
176 178
177 clrbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(virq_to_hw(virq))); 179 clrbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(irqd_to_hwirq(d)));
178 180
179 spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); 181 spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
180} 182}
181 183
182static void mpc8xxx_irq_ack(unsigned int virq) 184static void mpc8xxx_irq_ack(struct irq_data *d)
183{ 185{
184 struct mpc8xxx_gpio_chip *mpc8xxx_gc = get_irq_chip_data(virq); 186 struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
185 struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; 187 struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
186 188
187 out_be32(mm->regs + GPIO_IER, mpc8xxx_gpio2mask(virq_to_hw(virq))); 189 out_be32(mm->regs + GPIO_IER, mpc8xxx_gpio2mask(irqd_to_hwirq(d)));
188} 190}
189 191
190static int mpc8xxx_irq_set_type(unsigned int virq, unsigned int flow_type) 192static int mpc8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type)
191{ 193{
192 struct mpc8xxx_gpio_chip *mpc8xxx_gc = get_irq_chip_data(virq); 194 struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
193 struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; 195 struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
194 unsigned long flags; 196 unsigned long flags;
195 197
@@ -197,14 +199,59 @@ static int mpc8xxx_irq_set_type(unsigned int virq, unsigned int flow_type)
197 case IRQ_TYPE_EDGE_FALLING: 199 case IRQ_TYPE_EDGE_FALLING:
198 spin_lock_irqsave(&mpc8xxx_gc->lock, flags); 200 spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
199 setbits32(mm->regs + GPIO_ICR, 201 setbits32(mm->regs + GPIO_ICR,
200 mpc8xxx_gpio2mask(virq_to_hw(virq))); 202 mpc8xxx_gpio2mask(irqd_to_hwirq(d)));
201 spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); 203 spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
202 break; 204 break;
203 205
204 case IRQ_TYPE_EDGE_BOTH: 206 case IRQ_TYPE_EDGE_BOTH:
205 spin_lock_irqsave(&mpc8xxx_gc->lock, flags); 207 spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
206 clrbits32(mm->regs + GPIO_ICR, 208 clrbits32(mm->regs + GPIO_ICR,
207 mpc8xxx_gpio2mask(virq_to_hw(virq))); 209 mpc8xxx_gpio2mask(irqd_to_hwirq(d)));
210 spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
211 break;
212
213 default:
214 return -EINVAL;
215 }
216
217 return 0;
218}
219
220static int mpc512x_irq_set_type(struct irq_data *d, unsigned int flow_type)
221{
222 struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
223 struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
224 unsigned long gpio = irqd_to_hwirq(d);
225 void __iomem *reg;
226 unsigned int shift;
227 unsigned long flags;
228
229 if (gpio < 16) {
230 reg = mm->regs + GPIO_ICR;
231 shift = (15 - gpio) * 2;
232 } else {
233 reg = mm->regs + GPIO_ICR2;
234 shift = (15 - (gpio % 16)) * 2;
235 }
236
237 switch (flow_type) {
238 case IRQ_TYPE_EDGE_FALLING:
239 case IRQ_TYPE_LEVEL_LOW:
240 spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
241 clrsetbits_be32(reg, 3 << shift, 2 << shift);
242 spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
243 break;
244
245 case IRQ_TYPE_EDGE_RISING:
246 case IRQ_TYPE_LEVEL_HIGH:
247 spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
248 clrsetbits_be32(reg, 3 << shift, 1 << shift);
249 spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
250 break;
251
252 case IRQ_TYPE_EDGE_BOTH:
253 spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
254 clrbits32(reg, 3 << shift);
208 spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); 255 spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
209 break; 256 break;
210 257
@@ -217,18 +264,23 @@ static int mpc8xxx_irq_set_type(unsigned int virq, unsigned int flow_type)
217 264
218static struct irq_chip mpc8xxx_irq_chip = { 265static struct irq_chip mpc8xxx_irq_chip = {
219 .name = "mpc8xxx-gpio", 266 .name = "mpc8xxx-gpio",
220 .unmask = mpc8xxx_irq_unmask, 267 .irq_unmask = mpc8xxx_irq_unmask,
221 .mask = mpc8xxx_irq_mask, 268 .irq_mask = mpc8xxx_irq_mask,
222 .ack = mpc8xxx_irq_ack, 269 .irq_ack = mpc8xxx_irq_ack,
223 .set_type = mpc8xxx_irq_set_type, 270 .irq_set_type = mpc8xxx_irq_set_type,
224}; 271};
225 272
226static int mpc8xxx_gpio_irq_map(struct irq_host *h, unsigned int virq, 273static int mpc8xxx_gpio_irq_map(struct irq_host *h, unsigned int virq,
227 irq_hw_number_t hw) 274 irq_hw_number_t hw)
228{ 275{
229 set_irq_chip_data(virq, h->host_data); 276 struct mpc8xxx_gpio_chip *mpc8xxx_gc = h->host_data;
230 set_irq_chip_and_handler(virq, &mpc8xxx_irq_chip, handle_level_irq); 277
231 set_irq_type(virq, IRQ_TYPE_NONE); 278 if (mpc8xxx_gc->of_dev_id_data)
279 mpc8xxx_irq_chip.irq_set_type = mpc8xxx_gc->of_dev_id_data;
280
281 irq_set_chip_data(virq, h->host_data);
282 irq_set_chip_and_handler(virq, &mpc8xxx_irq_chip, handle_level_irq);
283 irq_set_irq_type(virq, IRQ_TYPE_NONE);
232 284
233 return 0; 285 return 0;
234} 286}
@@ -253,11 +305,21 @@ static struct irq_host_ops mpc8xxx_gpio_irq_ops = {
253 .xlate = mpc8xxx_gpio_irq_xlate, 305 .xlate = mpc8xxx_gpio_irq_xlate,
254}; 306};
255 307
308static struct of_device_id mpc8xxx_gpio_ids[] __initdata = {
309 { .compatible = "fsl,mpc8349-gpio", },
310 { .compatible = "fsl,mpc8572-gpio", },
311 { .compatible = "fsl,mpc8610-gpio", },
312 { .compatible = "fsl,mpc5121-gpio", .data = mpc512x_irq_set_type, },
313 { .compatible = "fsl,qoriq-gpio", },
314 {}
315};
316
256static void __init mpc8xxx_add_controller(struct device_node *np) 317static void __init mpc8xxx_add_controller(struct device_node *np)
257{ 318{
258 struct mpc8xxx_gpio_chip *mpc8xxx_gc; 319 struct mpc8xxx_gpio_chip *mpc8xxx_gc;
259 struct of_mm_gpio_chip *mm_gc; 320 struct of_mm_gpio_chip *mm_gc;
260 struct gpio_chip *gc; 321 struct gpio_chip *gc;
322 const struct of_device_id *id;
261 unsigned hwirq; 323 unsigned hwirq;
262 int ret; 324 int ret;
263 325
@@ -297,14 +359,18 @@ static void __init mpc8xxx_add_controller(struct device_node *np)
297 if (!mpc8xxx_gc->irq) 359 if (!mpc8xxx_gc->irq)
298 goto skip_irq; 360 goto skip_irq;
299 361
362 id = of_match_node(mpc8xxx_gpio_ids, np);
363 if (id)
364 mpc8xxx_gc->of_dev_id_data = id->data;
365
300 mpc8xxx_gc->irq->host_data = mpc8xxx_gc; 366 mpc8xxx_gc->irq->host_data = mpc8xxx_gc;
301 367
302 /* ack and mask all irqs */ 368 /* ack and mask all irqs */
303 out_be32(mm_gc->regs + GPIO_IER, 0xffffffff); 369 out_be32(mm_gc->regs + GPIO_IER, 0xffffffff);
304 out_be32(mm_gc->regs + GPIO_IMR, 0); 370 out_be32(mm_gc->regs + GPIO_IMR, 0);
305 371
306 set_irq_data(hwirq, mpc8xxx_gc); 372 irq_set_handler_data(hwirq, mpc8xxx_gc);
307 set_irq_chained_handler(hwirq, mpc8xxx_gpio_irq_cascade); 373 irq_set_chained_handler(hwirq, mpc8xxx_gpio_irq_cascade);
308 374
309skip_irq: 375skip_irq:
310 return; 376 return;
@@ -321,13 +387,7 @@ static int __init mpc8xxx_add_gpiochips(void)
321{ 387{
322 struct device_node *np; 388 struct device_node *np;
323 389
324 for_each_compatible_node(np, NULL, "fsl,mpc8349-gpio") 390 for_each_matching_node(np, mpc8xxx_gpio_ids)
325 mpc8xxx_add_controller(np);
326
327 for_each_compatible_node(np, NULL, "fsl,mpc8572-gpio")
328 mpc8xxx_add_controller(np);
329
330 for_each_compatible_node(np, NULL, "fsl,mpc8610-gpio")
331 mpc8xxx_add_controller(np); 391 mpc8xxx_add_controller(np);
332 392
333 return 0; 393 return 0;
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 7c1342618a30..58d7a534f877 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -6,6 +6,7 @@
6 * with various broken implementations of this HW. 6 * with various broken implementations of this HW.
7 * 7 *
8 * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp. 8 * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
9 * Copyright 2010-2011 Freescale Semiconductor, Inc.
9 * 10 *
10 * This file is subject to the terms and conditions of the GNU General Public 11 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file COPYING in the main directory of this archive 12 * License. See the file COPYING in the main directory of this archive
@@ -27,6 +28,8 @@
27#include <linux/spinlock.h> 28#include <linux/spinlock.h>
28#include <linux/pci.h> 29#include <linux/pci.h>
29#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/syscore_ops.h>
32#include <linux/ratelimit.h>
30 33
31#include <asm/ptrace.h> 34#include <asm/ptrace.h>
32#include <asm/signal.h> 35#include <asm/signal.h>
@@ -147,6 +150,16 @@ static u32 mpic_infos[][MPIC_IDX_END] = {
147 150
148#endif /* CONFIG_MPIC_WEIRD */ 151#endif /* CONFIG_MPIC_WEIRD */
149 152
153static inline unsigned int mpic_processor_id(struct mpic *mpic)
154{
155 unsigned int cpu = 0;
156
157 if (mpic->flags & MPIC_PRIMARY)
158 cpu = hard_smp_processor_id();
159
160 return cpu;
161}
162
150/* 163/*
151 * Register accessor functions 164 * Register accessor functions
152 */ 165 */
@@ -208,21 +221,38 @@ static inline void _mpic_ipi_write(struct mpic *mpic, unsigned int ipi, u32 valu
208 _mpic_write(mpic->reg_type, &mpic->gregs, offset, value); 221 _mpic_write(mpic->reg_type, &mpic->gregs, offset, value);
209} 222}
210 223
224static inline u32 _mpic_tm_read(struct mpic *mpic, unsigned int tm)
225{
226 unsigned int offset = MPIC_INFO(TIMER_VECTOR_PRI) +
227 ((tm & 3) * MPIC_INFO(TIMER_STRIDE));
228
229 if (tm >= 4)
230 offset += 0x1000 / 4;
231
232 return _mpic_read(mpic->reg_type, &mpic->tmregs, offset);
233}
234
235static inline void _mpic_tm_write(struct mpic *mpic, unsigned int tm, u32 value)
236{
237 unsigned int offset = MPIC_INFO(TIMER_VECTOR_PRI) +
238 ((tm & 3) * MPIC_INFO(TIMER_STRIDE));
239
240 if (tm >= 4)
241 offset += 0x1000 / 4;
242
243 _mpic_write(mpic->reg_type, &mpic->tmregs, offset, value);
244}
245
211static inline u32 _mpic_cpu_read(struct mpic *mpic, unsigned int reg) 246static inline u32 _mpic_cpu_read(struct mpic *mpic, unsigned int reg)
212{ 247{
213 unsigned int cpu = 0; 248 unsigned int cpu = mpic_processor_id(mpic);
214 249
215 if (mpic->flags & MPIC_PRIMARY)
216 cpu = hard_smp_processor_id();
217 return _mpic_read(mpic->reg_type, &mpic->cpuregs[cpu], reg); 250 return _mpic_read(mpic->reg_type, &mpic->cpuregs[cpu], reg);
218} 251}
219 252
220static inline void _mpic_cpu_write(struct mpic *mpic, unsigned int reg, u32 value) 253static inline void _mpic_cpu_write(struct mpic *mpic, unsigned int reg, u32 value)
221{ 254{
222 unsigned int cpu = 0; 255 unsigned int cpu = mpic_processor_id(mpic);
223
224 if (mpic->flags & MPIC_PRIMARY)
225 cpu = hard_smp_processor_id();
226 256
227 _mpic_write(mpic->reg_type, &mpic->cpuregs[cpu], reg, value); 257 _mpic_write(mpic->reg_type, &mpic->cpuregs[cpu], reg, value);
228} 258}
@@ -263,6 +293,8 @@ static inline void _mpic_irq_write(struct mpic *mpic, unsigned int src_no,
263#define mpic_write(b,r,v) _mpic_write(mpic->reg_type,&(b),(r),(v)) 293#define mpic_write(b,r,v) _mpic_write(mpic->reg_type,&(b),(r),(v))
264#define mpic_ipi_read(i) _mpic_ipi_read(mpic,(i)) 294#define mpic_ipi_read(i) _mpic_ipi_read(mpic,(i))
265#define mpic_ipi_write(i,v) _mpic_ipi_write(mpic,(i),(v)) 295#define mpic_ipi_write(i,v) _mpic_ipi_write(mpic,(i),(v))
296#define mpic_tm_read(i) _mpic_tm_read(mpic,(i))
297#define mpic_tm_write(i,v) _mpic_tm_write(mpic,(i),(v))
266#define mpic_cpu_read(i) _mpic_cpu_read(mpic,(i)) 298#define mpic_cpu_read(i) _mpic_cpu_read(mpic,(i))
267#define mpic_cpu_write(i,v) _mpic_cpu_write(mpic,(i),(v)) 299#define mpic_cpu_write(i,v) _mpic_cpu_write(mpic,(i),(v))
268#define mpic_irq_read(s,r) _mpic_irq_read(mpic,(s),(r)) 300#define mpic_irq_read(s,r) _mpic_irq_read(mpic,(s),(r))
@@ -356,7 +388,7 @@ static inline void mpic_ht_end_irq(struct mpic *mpic, unsigned int source)
356} 388}
357 389
358static void mpic_startup_ht_interrupt(struct mpic *mpic, unsigned int source, 390static void mpic_startup_ht_interrupt(struct mpic *mpic, unsigned int source,
359 unsigned int irqflags) 391 bool level)
360{ 392{
361 struct mpic_irq_fixup *fixup = &mpic->fixups[source]; 393 struct mpic_irq_fixup *fixup = &mpic->fixups[source];
362 unsigned long flags; 394 unsigned long flags;
@@ -365,14 +397,14 @@ static void mpic_startup_ht_interrupt(struct mpic *mpic, unsigned int source,
365 if (fixup->base == NULL) 397 if (fixup->base == NULL)
366 return; 398 return;
367 399
368 DBG("startup_ht_interrupt(0x%x, 0x%x) index: %d\n", 400 DBG("startup_ht_interrupt(0x%x) index: %d\n",
369 source, irqflags, fixup->index); 401 source, fixup->index);
370 raw_spin_lock_irqsave(&mpic->fixup_lock, flags); 402 raw_spin_lock_irqsave(&mpic->fixup_lock, flags);
371 /* Enable and configure */ 403 /* Enable and configure */
372 writeb(0x10 + 2 * fixup->index, fixup->base + 2); 404 writeb(0x10 + 2 * fixup->index, fixup->base + 2);
373 tmp = readl(fixup->base + 4); 405 tmp = readl(fixup->base + 4);
374 tmp &= ~(0x23U); 406 tmp &= ~(0x23U);
375 if (irqflags & IRQ_LEVEL) 407 if (level)
376 tmp |= 0x22; 408 tmp |= 0x22;
377 writel(tmp, fixup->base + 4); 409 writel(tmp, fixup->base + 4);
378 raw_spin_unlock_irqrestore(&mpic->fixup_lock, flags); 410 raw_spin_unlock_irqrestore(&mpic->fixup_lock, flags);
@@ -384,8 +416,7 @@ static void mpic_startup_ht_interrupt(struct mpic *mpic, unsigned int source,
384#endif 416#endif
385} 417}
386 418
387static void mpic_shutdown_ht_interrupt(struct mpic *mpic, unsigned int source, 419static void mpic_shutdown_ht_interrupt(struct mpic *mpic, unsigned int source)
388 unsigned int irqflags)
389{ 420{
390 struct mpic_irq_fixup *fixup = &mpic->fixups[source]; 421 struct mpic_irq_fixup *fixup = &mpic->fixups[source];
391 unsigned long flags; 422 unsigned long flags;
@@ -394,7 +425,7 @@ static void mpic_shutdown_ht_interrupt(struct mpic *mpic, unsigned int source,
394 if (fixup->base == NULL) 425 if (fixup->base == NULL)
395 return; 426 return;
396 427
397 DBG("shutdown_ht_interrupt(0x%x, 0x%x)\n", source, irqflags); 428 DBG("shutdown_ht_interrupt(0x%x)\n", source);
398 429
399 /* Disable */ 430 /* Disable */
400 raw_spin_lock_irqsave(&mpic->fixup_lock, flags); 431 raw_spin_lock_irqsave(&mpic->fixup_lock, flags);
@@ -603,25 +634,30 @@ static int irq_choose_cpu(const struct cpumask *mask)
603} 634}
604#endif 635#endif
605 636
606#define mpic_irq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq)
607
608/* Find an mpic associated with a given linux interrupt */ 637/* Find an mpic associated with a given linux interrupt */
609static struct mpic *mpic_find(unsigned int irq) 638static struct mpic *mpic_find(unsigned int irq)
610{ 639{
611 if (irq < NUM_ISA_INTERRUPTS) 640 if (irq < NUM_ISA_INTERRUPTS)
612 return NULL; 641 return NULL;
613 642
614 return irq_to_desc(irq)->chip_data; 643 return irq_get_chip_data(irq);
615} 644}
616 645
617/* Determine if the linux irq is an IPI */ 646/* Determine if the linux irq is an IPI */
618static unsigned int mpic_is_ipi(struct mpic *mpic, unsigned int irq) 647static unsigned int mpic_is_ipi(struct mpic *mpic, unsigned int irq)
619{ 648{
620 unsigned int src = mpic_irq_to_hw(irq); 649 unsigned int src = virq_to_hw(irq);
621 650
622 return (src >= mpic->ipi_vecs[0] && src <= mpic->ipi_vecs[3]); 651 return (src >= mpic->ipi_vecs[0] && src <= mpic->ipi_vecs[3]);
623} 652}
624 653
654/* Determine if the linux irq is a timer */
655static unsigned int mpic_is_tm(struct mpic *mpic, unsigned int irq)
656{
657 unsigned int src = virq_to_hw(irq);
658
659 return (src >= mpic->timer_vecs[0] && src <= mpic->timer_vecs[7]);
660}
625 661
626/* Convert a cpu mask from logical to physical cpu numbers. */ 662/* Convert a cpu mask from logical to physical cpu numbers. */
627static inline u32 mpic_physmask(u32 cpumask) 663static inline u32 mpic_physmask(u32 cpumask)
@@ -629,23 +665,29 @@ static inline u32 mpic_physmask(u32 cpumask)
629 int i; 665 int i;
630 u32 mask = 0; 666 u32 mask = 0;
631 667
632 for (i = 0; i < NR_CPUS; ++i, cpumask >>= 1) 668 for (i = 0; i < min(32, NR_CPUS); ++i, cpumask >>= 1)
633 mask |= (cpumask & 1) << get_hard_smp_processor_id(i); 669 mask |= (cpumask & 1) << get_hard_smp_processor_id(i);
634 return mask; 670 return mask;
635} 671}
636 672
637#ifdef CONFIG_SMP 673#ifdef CONFIG_SMP
638/* Get the mpic structure from the IPI number */ 674/* Get the mpic structure from the IPI number */
639static inline struct mpic * mpic_from_ipi(unsigned int ipi) 675static inline struct mpic * mpic_from_ipi(struct irq_data *d)
640{ 676{
641 return irq_to_desc(ipi)->chip_data; 677 return irq_data_get_irq_chip_data(d);
642} 678}
643#endif 679#endif
644 680
645/* Get the mpic structure from the irq number */ 681/* Get the mpic structure from the irq number */
646static inline struct mpic * mpic_from_irq(unsigned int irq) 682static inline struct mpic * mpic_from_irq(unsigned int irq)
647{ 683{
648 return irq_to_desc(irq)->chip_data; 684 return irq_get_chip_data(irq);
685}
686
687/* Get the mpic structure from the irq data */
688static inline struct mpic * mpic_from_irq_data(struct irq_data *d)
689{
690 return irq_data_get_irq_chip_data(d);
649} 691}
650 692
651/* Send an EOI */ 693/* Send an EOI */
@@ -660,13 +702,13 @@ static inline void mpic_eoi(struct mpic *mpic)
660 */ 702 */
661 703
662 704
663void mpic_unmask_irq(unsigned int irq) 705void mpic_unmask_irq(struct irq_data *d)
664{ 706{
665 unsigned int loops = 100000; 707 unsigned int loops = 100000;
666 struct mpic *mpic = mpic_from_irq(irq); 708 struct mpic *mpic = mpic_from_irq_data(d);
667 unsigned int src = mpic_irq_to_hw(irq); 709 unsigned int src = irqd_to_hwirq(d);
668 710
669 DBG("%p: %s: enable_irq: %d (src %d)\n", mpic, mpic->name, irq, src); 711 DBG("%p: %s: enable_irq: %d (src %d)\n", mpic, mpic->name, d->irq, src);
670 712
671 mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), 713 mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI),
672 mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) & 714 mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) &
@@ -674,19 +716,20 @@ void mpic_unmask_irq(unsigned int irq)
674 /* make sure mask gets to controller before we return to user */ 716 /* make sure mask gets to controller before we return to user */
675 do { 717 do {
676 if (!loops--) { 718 if (!loops--) {
677 printk(KERN_ERR "mpic_enable_irq timeout\n"); 719 printk(KERN_ERR "%s: timeout on hwirq %u\n",
720 __func__, src);
678 break; 721 break;
679 } 722 }
680 } while(mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) & MPIC_VECPRI_MASK); 723 } while(mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) & MPIC_VECPRI_MASK);
681} 724}
682 725
683void mpic_mask_irq(unsigned int irq) 726void mpic_mask_irq(struct irq_data *d)
684{ 727{
685 unsigned int loops = 100000; 728 unsigned int loops = 100000;
686 struct mpic *mpic = mpic_from_irq(irq); 729 struct mpic *mpic = mpic_from_irq_data(d);
687 unsigned int src = mpic_irq_to_hw(irq); 730 unsigned int src = irqd_to_hwirq(d);
688 731
689 DBG("%s: disable_irq: %d (src %d)\n", mpic->name, irq, src); 732 DBG("%s: disable_irq: %d (src %d)\n", mpic->name, d->irq, src);
690 733
691 mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), 734 mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI),
692 mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) | 735 mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) |
@@ -695,18 +738,19 @@ void mpic_mask_irq(unsigned int irq)
695 /* make sure mask gets to controller before we return to user */ 738 /* make sure mask gets to controller before we return to user */
696 do { 739 do {
697 if (!loops--) { 740 if (!loops--) {
698 printk(KERN_ERR "mpic_enable_irq timeout\n"); 741 printk(KERN_ERR "%s: timeout on hwirq %u\n",
742 __func__, src);
699 break; 743 break;
700 } 744 }
701 } while(!(mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) & MPIC_VECPRI_MASK)); 745 } while(!(mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) & MPIC_VECPRI_MASK));
702} 746}
703 747
704void mpic_end_irq(unsigned int irq) 748void mpic_end_irq(struct irq_data *d)
705{ 749{
706 struct mpic *mpic = mpic_from_irq(irq); 750 struct mpic *mpic = mpic_from_irq_data(d);
707 751
708#ifdef DEBUG_IRQ 752#ifdef DEBUG_IRQ
709 DBG("%s: end_irq: %d\n", mpic->name, irq); 753 DBG("%s: end_irq: %d\n", mpic->name, d->irq);
710#endif 754#endif
711 /* We always EOI on end_irq() even for edge interrupts since that 755 /* We always EOI on end_irq() even for edge interrupts since that
712 * should only lower the priority, the MPIC should have properly 756 * should only lower the priority, the MPIC should have properly
@@ -718,51 +762,51 @@ void mpic_end_irq(unsigned int irq)
718 762
719#ifdef CONFIG_MPIC_U3_HT_IRQS 763#ifdef CONFIG_MPIC_U3_HT_IRQS
720 764
721static void mpic_unmask_ht_irq(unsigned int irq) 765static void mpic_unmask_ht_irq(struct irq_data *d)
722{ 766{
723 struct mpic *mpic = mpic_from_irq(irq); 767 struct mpic *mpic = mpic_from_irq_data(d);
724 unsigned int src = mpic_irq_to_hw(irq); 768 unsigned int src = irqd_to_hwirq(d);
725 769
726 mpic_unmask_irq(irq); 770 mpic_unmask_irq(d);
727 771
728 if (irq_to_desc(irq)->status & IRQ_LEVEL) 772 if (irqd_is_level_type(d))
729 mpic_ht_end_irq(mpic, src); 773 mpic_ht_end_irq(mpic, src);
730} 774}
731 775
732static unsigned int mpic_startup_ht_irq(unsigned int irq) 776static unsigned int mpic_startup_ht_irq(struct irq_data *d)
733{ 777{
734 struct mpic *mpic = mpic_from_irq(irq); 778 struct mpic *mpic = mpic_from_irq_data(d);
735 unsigned int src = mpic_irq_to_hw(irq); 779 unsigned int src = irqd_to_hwirq(d);
736 780
737 mpic_unmask_irq(irq); 781 mpic_unmask_irq(d);
738 mpic_startup_ht_interrupt(mpic, src, irq_to_desc(irq)->status); 782 mpic_startup_ht_interrupt(mpic, src, irqd_is_level_type(d));
739 783
740 return 0; 784 return 0;
741} 785}
742 786
743static void mpic_shutdown_ht_irq(unsigned int irq) 787static void mpic_shutdown_ht_irq(struct irq_data *d)
744{ 788{
745 struct mpic *mpic = mpic_from_irq(irq); 789 struct mpic *mpic = mpic_from_irq_data(d);
746 unsigned int src = mpic_irq_to_hw(irq); 790 unsigned int src = irqd_to_hwirq(d);
747 791
748 mpic_shutdown_ht_interrupt(mpic, src, irq_to_desc(irq)->status); 792 mpic_shutdown_ht_interrupt(mpic, src);
749 mpic_mask_irq(irq); 793 mpic_mask_irq(d);
750} 794}
751 795
752static void mpic_end_ht_irq(unsigned int irq) 796static void mpic_end_ht_irq(struct irq_data *d)
753{ 797{
754 struct mpic *mpic = mpic_from_irq(irq); 798 struct mpic *mpic = mpic_from_irq_data(d);
755 unsigned int src = mpic_irq_to_hw(irq); 799 unsigned int src = irqd_to_hwirq(d);
756 800
757#ifdef DEBUG_IRQ 801#ifdef DEBUG_IRQ
758 DBG("%s: end_irq: %d\n", mpic->name, irq); 802 DBG("%s: end_irq: %d\n", mpic->name, d->irq);
759#endif 803#endif
760 /* We always EOI on end_irq() even for edge interrupts since that 804 /* We always EOI on end_irq() even for edge interrupts since that
761 * should only lower the priority, the MPIC should have properly 805 * should only lower the priority, the MPIC should have properly
762 * latched another edge interrupt coming in anyway 806 * latched another edge interrupt coming in anyway
763 */ 807 */
764 808
765 if (irq_to_desc(irq)->status & IRQ_LEVEL) 809 if (irqd_is_level_type(d))
766 mpic_ht_end_irq(mpic, src); 810 mpic_ht_end_irq(mpic, src);
767 mpic_eoi(mpic); 811 mpic_eoi(mpic);
768} 812}
@@ -770,23 +814,23 @@ static void mpic_end_ht_irq(unsigned int irq)
770 814
771#ifdef CONFIG_SMP 815#ifdef CONFIG_SMP
772 816
773static void mpic_unmask_ipi(unsigned int irq) 817static void mpic_unmask_ipi(struct irq_data *d)
774{ 818{
775 struct mpic *mpic = mpic_from_ipi(irq); 819 struct mpic *mpic = mpic_from_ipi(d);
776 unsigned int src = mpic_irq_to_hw(irq) - mpic->ipi_vecs[0]; 820 unsigned int src = virq_to_hw(d->irq) - mpic->ipi_vecs[0];
777 821
778 DBG("%s: enable_ipi: %d (ipi %d)\n", mpic->name, irq, src); 822 DBG("%s: enable_ipi: %d (ipi %d)\n", mpic->name, d->irq, src);
779 mpic_ipi_write(src, mpic_ipi_read(src) & ~MPIC_VECPRI_MASK); 823 mpic_ipi_write(src, mpic_ipi_read(src) & ~MPIC_VECPRI_MASK);
780} 824}
781 825
782static void mpic_mask_ipi(unsigned int irq) 826static void mpic_mask_ipi(struct irq_data *d)
783{ 827{
784 /* NEVER disable an IPI... that's just plain wrong! */ 828 /* NEVER disable an IPI... that's just plain wrong! */
785} 829}
786 830
787static void mpic_end_ipi(unsigned int irq) 831static void mpic_end_ipi(struct irq_data *d)
788{ 832{
789 struct mpic *mpic = mpic_from_ipi(irq); 833 struct mpic *mpic = mpic_from_ipi(d);
790 834
791 /* 835 /*
792 * IPIs are marked IRQ_PER_CPU. This has the side effect of 836 * IPIs are marked IRQ_PER_CPU. This has the side effect of
@@ -800,26 +844,42 @@ static void mpic_end_ipi(unsigned int irq)
800 844
801#endif /* CONFIG_SMP */ 845#endif /* CONFIG_SMP */
802 846
803int mpic_set_affinity(unsigned int irq, const struct cpumask *cpumask) 847static void mpic_unmask_tm(struct irq_data *d)
804{ 848{
805 struct mpic *mpic = mpic_from_irq(irq); 849 struct mpic *mpic = mpic_from_irq_data(d);
806 unsigned int src = mpic_irq_to_hw(irq); 850 unsigned int src = virq_to_hw(d->irq) - mpic->timer_vecs[0];
851
852 DBG("%s: enable_tm: %d (tm %d)\n", mpic->name, irq, src);
853 mpic_tm_write(src, mpic_tm_read(src) & ~MPIC_VECPRI_MASK);
854 mpic_tm_read(src);
855}
856
857static void mpic_mask_tm(struct irq_data *d)
858{
859 struct mpic *mpic = mpic_from_irq_data(d);
860 unsigned int src = virq_to_hw(d->irq) - mpic->timer_vecs[0];
861
862 mpic_tm_write(src, mpic_tm_read(src) | MPIC_VECPRI_MASK);
863 mpic_tm_read(src);
864}
865
866int mpic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
867 bool force)
868{
869 struct mpic *mpic = mpic_from_irq_data(d);
870 unsigned int src = irqd_to_hwirq(d);
807 871
808 if (mpic->flags & MPIC_SINGLE_DEST_CPU) { 872 if (mpic->flags & MPIC_SINGLE_DEST_CPU) {
809 int cpuid = irq_choose_cpu(cpumask); 873 int cpuid = irq_choose_cpu(cpumask);
810 874
811 mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), 1 << cpuid); 875 mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), 1 << cpuid);
812 } else { 876 } else {
813 cpumask_var_t tmp; 877 u32 mask = cpumask_bits(cpumask)[0];
814
815 alloc_cpumask_var(&tmp, GFP_KERNEL);
816 878
817 cpumask_and(tmp, cpumask, cpu_online_mask); 879 mask &= cpumask_bits(cpu_online_mask)[0];
818 880
819 mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), 881 mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION),
820 mpic_physmask(cpumask_bits(tmp)[0])); 882 mpic_physmask(mask));
821
822 free_cpumask_var(tmp);
823 } 883 }
824 884
825 return 0; 885 return 0;
@@ -846,15 +906,14 @@ static unsigned int mpic_type_to_vecpri(struct mpic *mpic, unsigned int type)
846 } 906 }
847} 907}
848 908
849int mpic_set_irq_type(unsigned int virq, unsigned int flow_type) 909int mpic_set_irq_type(struct irq_data *d, unsigned int flow_type)
850{ 910{
851 struct mpic *mpic = mpic_from_irq(virq); 911 struct mpic *mpic = mpic_from_irq_data(d);
852 unsigned int src = mpic_irq_to_hw(virq); 912 unsigned int src = irqd_to_hwirq(d);
853 struct irq_desc *desc = irq_to_desc(virq);
854 unsigned int vecpri, vold, vnew; 913 unsigned int vecpri, vold, vnew;
855 914
856 DBG("mpic: set_irq_type(mpic:@%p,virq:%d,src:0x%x,type:0x%x)\n", 915 DBG("mpic: set_irq_type(mpic:@%p,virq:%d,src:0x%x,type:0x%x)\n",
857 mpic, virq, src, flow_type); 916 mpic, d->irq, src, flow_type);
858 917
859 if (src >= mpic->irq_count) 918 if (src >= mpic->irq_count)
860 return -EINVAL; 919 return -EINVAL;
@@ -865,10 +924,7 @@ int mpic_set_irq_type(unsigned int virq, unsigned int flow_type)
865 if (flow_type == IRQ_TYPE_NONE) 924 if (flow_type == IRQ_TYPE_NONE)
866 flow_type = IRQ_TYPE_LEVEL_LOW; 925 flow_type = IRQ_TYPE_LEVEL_LOW;
867 926
868 desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL); 927 irqd_set_trigger_type(d, flow_type);
869 desc->status |= flow_type & IRQ_TYPE_SENSE_MASK;
870 if (flow_type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW))
871 desc->status |= IRQ_LEVEL;
872 928
873 if (mpic_is_ht_interrupt(mpic, src)) 929 if (mpic_is_ht_interrupt(mpic, src))
874 vecpri = MPIC_VECPRI_POLARITY_POSITIVE | 930 vecpri = MPIC_VECPRI_POLARITY_POSITIVE |
@@ -883,13 +939,13 @@ int mpic_set_irq_type(unsigned int virq, unsigned int flow_type)
883 if (vold != vnew) 939 if (vold != vnew)
884 mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), vnew); 940 mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), vnew);
885 941
886 return 0; 942 return IRQ_SET_MASK_OK_NOCOPY;;
887} 943}
888 944
889void mpic_set_vector(unsigned int virq, unsigned int vector) 945void mpic_set_vector(unsigned int virq, unsigned int vector)
890{ 946{
891 struct mpic *mpic = mpic_from_irq(virq); 947 struct mpic *mpic = mpic_from_irq(virq);
892 unsigned int src = mpic_irq_to_hw(virq); 948 unsigned int src = virq_to_hw(virq);
893 unsigned int vecpri; 949 unsigned int vecpri;
894 950
895 DBG("mpic: set_vector(mpic:@%p,virq:%d,src:%d,vector:0x%x)\n", 951 DBG("mpic: set_vector(mpic:@%p,virq:%d,src:%d,vector:0x%x)\n",
@@ -904,29 +960,49 @@ void mpic_set_vector(unsigned int virq, unsigned int vector)
904 mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), vecpri); 960 mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), vecpri);
905} 961}
906 962
963void mpic_set_destination(unsigned int virq, unsigned int cpuid)
964{
965 struct mpic *mpic = mpic_from_irq(virq);
966 unsigned int src = virq_to_hw(virq);
967
968 DBG("mpic: set_destination(mpic:@%p,virq:%d,src:%d,cpuid:0x%x)\n",
969 mpic, virq, src, cpuid);
970
971 if (src >= mpic->irq_count)
972 return;
973
974 mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), 1 << cpuid);
975}
976
907static struct irq_chip mpic_irq_chip = { 977static struct irq_chip mpic_irq_chip = {
908 .mask = mpic_mask_irq, 978 .irq_mask = mpic_mask_irq,
909 .unmask = mpic_unmask_irq, 979 .irq_unmask = mpic_unmask_irq,
910 .eoi = mpic_end_irq, 980 .irq_eoi = mpic_end_irq,
911 .set_type = mpic_set_irq_type, 981 .irq_set_type = mpic_set_irq_type,
912}; 982};
913 983
914#ifdef CONFIG_SMP 984#ifdef CONFIG_SMP
915static struct irq_chip mpic_ipi_chip = { 985static struct irq_chip mpic_ipi_chip = {
916 .mask = mpic_mask_ipi, 986 .irq_mask = mpic_mask_ipi,
917 .unmask = mpic_unmask_ipi, 987 .irq_unmask = mpic_unmask_ipi,
918 .eoi = mpic_end_ipi, 988 .irq_eoi = mpic_end_ipi,
919}; 989};
920#endif /* CONFIG_SMP */ 990#endif /* CONFIG_SMP */
921 991
992static struct irq_chip mpic_tm_chip = {
993 .irq_mask = mpic_mask_tm,
994 .irq_unmask = mpic_unmask_tm,
995 .irq_eoi = mpic_end_irq,
996};
997
922#ifdef CONFIG_MPIC_U3_HT_IRQS 998#ifdef CONFIG_MPIC_U3_HT_IRQS
923static struct irq_chip mpic_irq_ht_chip = { 999static struct irq_chip mpic_irq_ht_chip = {
924 .startup = mpic_startup_ht_irq, 1000 .irq_startup = mpic_startup_ht_irq,
925 .shutdown = mpic_shutdown_ht_irq, 1001 .irq_shutdown = mpic_shutdown_ht_irq,
926 .mask = mpic_mask_irq, 1002 .irq_mask = mpic_mask_irq,
927 .unmask = mpic_unmask_ht_irq, 1003 .irq_unmask = mpic_unmask_ht_irq,
928 .eoi = mpic_end_ht_irq, 1004 .irq_eoi = mpic_end_ht_irq,
929 .set_type = mpic_set_irq_type, 1005 .irq_set_type = mpic_set_irq_type,
930}; 1006};
931#endif /* CONFIG_MPIC_U3_HT_IRQS */ 1007#endif /* CONFIG_MPIC_U3_HT_IRQS */
932 1008
@@ -955,13 +1031,23 @@ static int mpic_host_map(struct irq_host *h, unsigned int virq,
955 WARN_ON(!(mpic->flags & MPIC_PRIMARY)); 1031 WARN_ON(!(mpic->flags & MPIC_PRIMARY));
956 1032
957 DBG("mpic: mapping as IPI\n"); 1033 DBG("mpic: mapping as IPI\n");
958 set_irq_chip_data(virq, mpic); 1034 irq_set_chip_data(virq, mpic);
959 set_irq_chip_and_handler(virq, &mpic->hc_ipi, 1035 irq_set_chip_and_handler(virq, &mpic->hc_ipi,
960 handle_percpu_irq); 1036 handle_percpu_irq);
961 return 0; 1037 return 0;
962 } 1038 }
963#endif /* CONFIG_SMP */ 1039#endif /* CONFIG_SMP */
964 1040
1041 if (hw >= mpic->timer_vecs[0] && hw <= mpic->timer_vecs[7]) {
1042 WARN_ON(!(mpic->flags & MPIC_PRIMARY));
1043
1044 DBG("mpic: mapping as timer\n");
1045 irq_set_chip_data(virq, mpic);
1046 irq_set_chip_and_handler(virq, &mpic->hc_tm,
1047 handle_fasteoi_irq);
1048 return 0;
1049 }
1050
965 if (hw >= mpic->irq_count) 1051 if (hw >= mpic->irq_count)
966 return -EINVAL; 1052 return -EINVAL;
967 1053
@@ -978,11 +1064,21 @@ static int mpic_host_map(struct irq_host *h, unsigned int virq,
978 1064
979 DBG("mpic: mapping to irq chip @%p\n", chip); 1065 DBG("mpic: mapping to irq chip @%p\n", chip);
980 1066
981 set_irq_chip_data(virq, mpic); 1067 irq_set_chip_data(virq, mpic);
982 set_irq_chip_and_handler(virq, chip, handle_fasteoi_irq); 1068 irq_set_chip_and_handler(virq, chip, handle_fasteoi_irq);
983 1069
984 /* Set default irq type */ 1070 /* Set default irq type */
985 set_irq_type(virq, IRQ_TYPE_NONE); 1071 irq_set_irq_type(virq, IRQ_TYPE_NONE);
1072
1073 /* If the MPIC was reset, then all vectors have already been
1074 * initialized. Otherwise, a per source lazy initialization
1075 * is done here.
1076 */
1077 if (!mpic_is_ipi(mpic, hw) && (mpic->flags & MPIC_NO_RESET)) {
1078 mpic_set_vector(virq, hw);
1079 mpic_set_destination(virq, mpic_processor_id(mpic));
1080 mpic_irq_set_priority(virq, 8);
1081 }
986 1082
987 return 0; 1083 return 0;
988} 1084}
@@ -992,6 +1088,7 @@ static int mpic_host_xlate(struct irq_host *h, struct device_node *ct,
992 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 1088 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
993 1089
994{ 1090{
1091 struct mpic *mpic = h->host_data;
995 static unsigned char map_mpic_senses[4] = { 1092 static unsigned char map_mpic_senses[4] = {
996 IRQ_TYPE_EDGE_RISING, 1093 IRQ_TYPE_EDGE_RISING,
997 IRQ_TYPE_LEVEL_LOW, 1094 IRQ_TYPE_LEVEL_LOW,
@@ -1000,7 +1097,38 @@ static int mpic_host_xlate(struct irq_host *h, struct device_node *ct,
1000 }; 1097 };
1001 1098
1002 *out_hwirq = intspec[0]; 1099 *out_hwirq = intspec[0];
1003 if (intsize > 1) { 1100 if (intsize >= 4 && (mpic->flags & MPIC_FSL)) {
1101 /*
1102 * Freescale MPIC with extended intspec:
1103 * First two cells are as usual. Third specifies
1104 * an "interrupt type". Fourth is type-specific data.
1105 *
1106 * See Documentation/devicetree/bindings/powerpc/fsl/mpic.txt
1107 */
1108 switch (intspec[2]) {
1109 case 0:
1110 case 1: /* no EISR/EIMR support for now, treat as shared IRQ */
1111 break;
1112 case 2:
1113 if (intspec[0] >= ARRAY_SIZE(mpic->ipi_vecs))
1114 return -EINVAL;
1115
1116 *out_hwirq = mpic->ipi_vecs[intspec[0]];
1117 break;
1118 case 3:
1119 if (intspec[0] >= ARRAY_SIZE(mpic->timer_vecs))
1120 return -EINVAL;
1121
1122 *out_hwirq = mpic->timer_vecs[intspec[0]];
1123 break;
1124 default:
1125 pr_debug("%s: unknown irq type %u\n",
1126 __func__, intspec[2]);
1127 return -EINVAL;
1128 }
1129
1130 *out_flags = map_mpic_senses[intspec[1] & 3];
1131 } else if (intsize > 1) {
1004 u32 mask = 0x3; 1132 u32 mask = 0x3;
1005 1133
1006 /* Apple invented a new race of encoding on machines with 1134 /* Apple invented a new race of encoding on machines with
@@ -1031,6 +1159,11 @@ static struct irq_host_ops mpic_host_ops = {
1031 .xlate = mpic_host_xlate, 1159 .xlate = mpic_host_xlate,
1032}; 1160};
1033 1161
1162static int mpic_reset_prohibited(struct device_node *node)
1163{
1164 return node && of_get_property(node, "pic-no-reset", NULL);
1165}
1166
1034/* 1167/*
1035 * Exported functions 1168 * Exported functions
1036 */ 1169 */
@@ -1058,12 +1191,12 @@ struct mpic * __init mpic_alloc(struct device_node *node,
1058 mpic->hc_irq = mpic_irq_chip; 1191 mpic->hc_irq = mpic_irq_chip;
1059 mpic->hc_irq.name = name; 1192 mpic->hc_irq.name = name;
1060 if (flags & MPIC_PRIMARY) 1193 if (flags & MPIC_PRIMARY)
1061 mpic->hc_irq.set_affinity = mpic_set_affinity; 1194 mpic->hc_irq.irq_set_affinity = mpic_set_affinity;
1062#ifdef CONFIG_MPIC_U3_HT_IRQS 1195#ifdef CONFIG_MPIC_U3_HT_IRQS
1063 mpic->hc_ht_irq = mpic_irq_ht_chip; 1196 mpic->hc_ht_irq = mpic_irq_ht_chip;
1064 mpic->hc_ht_irq.name = name; 1197 mpic->hc_ht_irq.name = name;
1065 if (flags & MPIC_PRIMARY) 1198 if (flags & MPIC_PRIMARY)
1066 mpic->hc_ht_irq.set_affinity = mpic_set_affinity; 1199 mpic->hc_ht_irq.irq_set_affinity = mpic_set_affinity;
1067#endif /* CONFIG_MPIC_U3_HT_IRQS */ 1200#endif /* CONFIG_MPIC_U3_HT_IRQS */
1068 1201
1069#ifdef CONFIG_SMP 1202#ifdef CONFIG_SMP
@@ -1071,6 +1204,9 @@ struct mpic * __init mpic_alloc(struct device_node *node,
1071 mpic->hc_ipi.name = name; 1204 mpic->hc_ipi.name = name;
1072#endif /* CONFIG_SMP */ 1205#endif /* CONFIG_SMP */
1073 1206
1207 mpic->hc_tm = mpic_tm_chip;
1208 mpic->hc_tm.name = name;
1209
1074 mpic->flags = flags; 1210 mpic->flags = flags;
1075 mpic->isu_size = isu_size; 1211 mpic->isu_size = isu_size;
1076 mpic->irq_count = irq_count; 1212 mpic->irq_count = irq_count;
@@ -1081,10 +1217,14 @@ struct mpic * __init mpic_alloc(struct device_node *node,
1081 else 1217 else
1082 intvec_top = 255; 1218 intvec_top = 255;
1083 1219
1084 mpic->timer_vecs[0] = intvec_top - 8; 1220 mpic->timer_vecs[0] = intvec_top - 12;
1085 mpic->timer_vecs[1] = intvec_top - 7; 1221 mpic->timer_vecs[1] = intvec_top - 11;
1086 mpic->timer_vecs[2] = intvec_top - 6; 1222 mpic->timer_vecs[2] = intvec_top - 10;
1087 mpic->timer_vecs[3] = intvec_top - 5; 1223 mpic->timer_vecs[3] = intvec_top - 9;
1224 mpic->timer_vecs[4] = intvec_top - 8;
1225 mpic->timer_vecs[5] = intvec_top - 7;
1226 mpic->timer_vecs[6] = intvec_top - 6;
1227 mpic->timer_vecs[7] = intvec_top - 5;
1088 mpic->ipi_vecs[0] = intvec_top - 4; 1228 mpic->ipi_vecs[0] = intvec_top - 4;
1089 mpic->ipi_vecs[1] = intvec_top - 3; 1229 mpic->ipi_vecs[1] = intvec_top - 3;
1090 mpic->ipi_vecs[2] = intvec_top - 2; 1230 mpic->ipi_vecs[2] = intvec_top - 2;
@@ -1094,6 +1234,8 @@ struct mpic * __init mpic_alloc(struct device_node *node,
1094 /* Check for "big-endian" in device-tree */ 1234 /* Check for "big-endian" in device-tree */
1095 if (node && of_get_property(node, "big-endian", NULL) != NULL) 1235 if (node && of_get_property(node, "big-endian", NULL) != NULL)
1096 mpic->flags |= MPIC_BIG_ENDIAN; 1236 mpic->flags |= MPIC_BIG_ENDIAN;
1237 if (node && of_device_is_compatible(node, "fsl,mpic"))
1238 mpic->flags |= MPIC_FSL;
1097 1239
1098 /* Look for protected sources */ 1240 /* Look for protected sources */
1099 if (node) { 1241 if (node) {
@@ -1151,7 +1293,15 @@ struct mpic * __init mpic_alloc(struct device_node *node,
1151 mpic_map(mpic, node, paddr, &mpic->tmregs, MPIC_INFO(TIMER_BASE), 0x1000); 1293 mpic_map(mpic, node, paddr, &mpic->tmregs, MPIC_INFO(TIMER_BASE), 0x1000);
1152 1294
1153 /* Reset */ 1295 /* Reset */
1154 if (flags & MPIC_WANTS_RESET) { 1296
1297 /* When using a device-node, reset requests are only honored if the MPIC
1298 * is allowed to reset.
1299 */
1300 if (mpic_reset_prohibited(node))
1301 mpic->flags |= MPIC_NO_RESET;
1302
1303 if ((flags & MPIC_WANTS_RESET) && !(mpic->flags & MPIC_NO_RESET)) {
1304 printk(KERN_DEBUG "mpic: Resetting\n");
1155 mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0), 1305 mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0),
1156 mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0)) 1306 mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0))
1157 | MPIC_GREG_GCONF_RESET); 1307 | MPIC_GREG_GCONF_RESET);
@@ -1277,15 +1427,17 @@ void __init mpic_init(struct mpic *mpic)
1277 /* Set current processor priority to max */ 1427 /* Set current processor priority to max */
1278 mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0xf); 1428 mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0xf);
1279 1429
1280 /* Initialize timers: just disable them all */ 1430 /* Initialize timers to our reserved vectors and mask them for now */
1281 for (i = 0; i < 4; i++) { 1431 for (i = 0; i < 4; i++) {
1282 mpic_write(mpic->tmregs, 1432 mpic_write(mpic->tmregs,
1283 i * MPIC_INFO(TIMER_STRIDE) + 1433 i * MPIC_INFO(TIMER_STRIDE) +
1284 MPIC_INFO(TIMER_DESTINATION), 0); 1434 MPIC_INFO(TIMER_DESTINATION),
1435 1 << hard_smp_processor_id());
1285 mpic_write(mpic->tmregs, 1436 mpic_write(mpic->tmregs,
1286 i * MPIC_INFO(TIMER_STRIDE) + 1437 i * MPIC_INFO(TIMER_STRIDE) +
1287 MPIC_INFO(TIMER_VECTOR_PRI), 1438 MPIC_INFO(TIMER_VECTOR_PRI),
1288 MPIC_VECPRI_MASK | 1439 MPIC_VECPRI_MASK |
1440 (9 << MPIC_VECPRI_PRIORITY_SHIFT) |
1289 (mpic->timer_vecs[0] + i)); 1441 (mpic->timer_vecs[0] + i));
1290 } 1442 }
1291 1443
@@ -1311,22 +1463,21 @@ void __init mpic_init(struct mpic *mpic)
1311 1463
1312 mpic_pasemi_msi_init(mpic); 1464 mpic_pasemi_msi_init(mpic);
1313 1465
1314 if (mpic->flags & MPIC_PRIMARY) 1466 cpu = mpic_processor_id(mpic);
1315 cpu = hard_smp_processor_id();
1316 else
1317 cpu = 0;
1318 1467
1319 for (i = 0; i < mpic->num_sources; i++) { 1468 if (!(mpic->flags & MPIC_NO_RESET)) {
1320 /* start with vector = source number, and masked */ 1469 for (i = 0; i < mpic->num_sources; i++) {
1321 u32 vecpri = MPIC_VECPRI_MASK | i | 1470 /* start with vector = source number, and masked */
1322 (8 << MPIC_VECPRI_PRIORITY_SHIFT); 1471 u32 vecpri = MPIC_VECPRI_MASK | i |
1472 (8 << MPIC_VECPRI_PRIORITY_SHIFT);
1323 1473
1324 /* check if protected */ 1474 /* check if protected */
1325 if (mpic->protected && test_bit(i, mpic->protected)) 1475 if (mpic->protected && test_bit(i, mpic->protected))
1326 continue; 1476 continue;
1327 /* init hw */ 1477 /* init hw */
1328 mpic_irq_write(i, MPIC_INFO(IRQ_VECTOR_PRI), vecpri); 1478 mpic_irq_write(i, MPIC_INFO(IRQ_VECTOR_PRI), vecpri);
1329 mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION), 1 << cpu); 1479 mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION), 1 << cpu);
1480 }
1330 } 1481 }
1331 1482
1332 /* Init spurious vector */ 1483 /* Init spurious vector */
@@ -1382,7 +1533,7 @@ void __init mpic_set_serial_int(struct mpic *mpic, int enable)
1382void mpic_irq_set_priority(unsigned int irq, unsigned int pri) 1533void mpic_irq_set_priority(unsigned int irq, unsigned int pri)
1383{ 1534{
1384 struct mpic *mpic = mpic_find(irq); 1535 struct mpic *mpic = mpic_find(irq);
1385 unsigned int src = mpic_irq_to_hw(irq); 1536 unsigned int src = virq_to_hw(irq);
1386 unsigned long flags; 1537 unsigned long flags;
1387 u32 reg; 1538 u32 reg;
1388 1539
@@ -1395,6 +1546,11 @@ void mpic_irq_set_priority(unsigned int irq, unsigned int pri)
1395 ~MPIC_VECPRI_PRIORITY_MASK; 1546 ~MPIC_VECPRI_PRIORITY_MASK;
1396 mpic_ipi_write(src - mpic->ipi_vecs[0], 1547 mpic_ipi_write(src - mpic->ipi_vecs[0],
1397 reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT)); 1548 reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
1549 } else if (mpic_is_tm(mpic, irq)) {
1550 reg = mpic_tm_read(src - mpic->timer_vecs[0]) &
1551 ~MPIC_VECPRI_PRIORITY_MASK;
1552 mpic_tm_write(src - mpic->timer_vecs[0],
1553 reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
1398 } else { 1554 } else {
1399 reg = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) 1555 reg = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI))
1400 & ~MPIC_VECPRI_PRIORITY_MASK; 1556 & ~MPIC_VECPRI_PRIORITY_MASK;
@@ -1493,9 +1649,8 @@ static unsigned int _mpic_get_one_irq(struct mpic *mpic, int reg)
1493 return NO_IRQ; 1649 return NO_IRQ;
1494 } 1650 }
1495 if (unlikely(mpic->protected && test_bit(src, mpic->protected))) { 1651 if (unlikely(mpic->protected && test_bit(src, mpic->protected))) {
1496 if (printk_ratelimit()) 1652 printk_ratelimited(KERN_WARNING "%s: Got protected source %d !\n",
1497 printk(KERN_WARNING "%s: Got protected source %d !\n", 1653 mpic->name, (int)src);
1498 mpic->name, (int)src);
1499 mpic_eoi(mpic); 1654 mpic_eoi(mpic);
1500 return NO_IRQ; 1655 return NO_IRQ;
1501 } 1656 }
@@ -1533,9 +1688,8 @@ unsigned int mpic_get_coreint_irq(void)
1533 return NO_IRQ; 1688 return NO_IRQ;
1534 } 1689 }
1535 if (unlikely(mpic->protected && test_bit(src, mpic->protected))) { 1690 if (unlikely(mpic->protected && test_bit(src, mpic->protected))) {
1536 if (printk_ratelimit()) 1691 printk_ratelimited(KERN_WARNING "%s: Got protected source %d !\n",
1537 printk(KERN_WARNING "%s: Got protected source %d !\n", 1692 mpic->name, (int)src);
1538 mpic->name, (int)src);
1539 return NO_IRQ; 1693 return NO_IRQ;
1540 } 1694 }
1541 1695
@@ -1574,46 +1728,28 @@ void mpic_request_ipis(void)
1574 } 1728 }
1575} 1729}
1576 1730
1577static void mpic_send_ipi(unsigned int ipi_no, const struct cpumask *cpu_mask) 1731void smp_mpic_message_pass(int cpu, int msg)
1578{ 1732{
1579 struct mpic *mpic = mpic_primary; 1733 struct mpic *mpic = mpic_primary;
1734 u32 physmask;
1580 1735
1581 BUG_ON(mpic == NULL); 1736 BUG_ON(mpic == NULL);
1582 1737
1583#ifdef DEBUG_IPI
1584 DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, ipi_no);
1585#endif
1586
1587 mpic_cpu_write(MPIC_INFO(CPU_IPI_DISPATCH_0) +
1588 ipi_no * MPIC_INFO(CPU_IPI_DISPATCH_STRIDE),
1589 mpic_physmask(cpumask_bits(cpu_mask)[0]));
1590}
1591
1592void smp_mpic_message_pass(int target, int msg)
1593{
1594 cpumask_var_t tmp;
1595
1596 /* make sure we're sending something that translates to an IPI */ 1738 /* make sure we're sending something that translates to an IPI */
1597 if ((unsigned int)msg > 3) { 1739 if ((unsigned int)msg > 3) {
1598 printk("SMP %d: smp_message_pass: unknown msg %d\n", 1740 printk("SMP %d: smp_message_pass: unknown msg %d\n",
1599 smp_processor_id(), msg); 1741 smp_processor_id(), msg);
1600 return; 1742 return;
1601 } 1743 }
1602 switch (target) { 1744
1603 case MSG_ALL: 1745#ifdef DEBUG_IPI
1604 mpic_send_ipi(msg, cpu_online_mask); 1746 DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, msg);
1605 break; 1747#endif
1606 case MSG_ALL_BUT_SELF: 1748
1607 alloc_cpumask_var(&tmp, GFP_NOWAIT); 1749 physmask = 1 << get_hard_smp_processor_id(cpu);
1608 cpumask_andnot(tmp, cpu_online_mask, 1750
1609 cpumask_of(smp_processor_id())); 1751 mpic_cpu_write(MPIC_INFO(CPU_IPI_DISPATCH_0) +
1610 mpic_send_ipi(msg, tmp); 1752 msg * MPIC_INFO(CPU_IPI_DISPATCH_STRIDE), physmask);
1611 free_cpumask_var(tmp);
1612 break;
1613 default:
1614 mpic_send_ipi(msg, cpumask_of(target));
1615 break;
1616 }
1617} 1753}
1618 1754
1619int __init smp_mpic_probe(void) 1755int __init smp_mpic_probe(void)
@@ -1657,9 +1793,8 @@ void mpic_reset_core(int cpu)
1657#endif /* CONFIG_SMP */ 1793#endif /* CONFIG_SMP */
1658 1794
1659#ifdef CONFIG_PM 1795#ifdef CONFIG_PM
1660static int mpic_suspend(struct sys_device *dev, pm_message_t state) 1796static void mpic_suspend_one(struct mpic *mpic)
1661{ 1797{
1662 struct mpic *mpic = container_of(dev, struct mpic, sysdev);
1663 int i; 1798 int i;
1664 1799
1665 for (i = 0; i < mpic->num_sources; i++) { 1800 for (i = 0; i < mpic->num_sources; i++) {
@@ -1668,13 +1803,22 @@ static int mpic_suspend(struct sys_device *dev, pm_message_t state)
1668 mpic->save_data[i].dest = 1803 mpic->save_data[i].dest =
1669 mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION)); 1804 mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION));
1670 } 1805 }
1806}
1807
1808static int mpic_suspend(void)
1809{
1810 struct mpic *mpic = mpics;
1811
1812 while (mpic) {
1813 mpic_suspend_one(mpic);
1814 mpic = mpic->next;
1815 }
1671 1816
1672 return 0; 1817 return 0;
1673} 1818}
1674 1819
1675static int mpic_resume(struct sys_device *dev) 1820static void mpic_resume_one(struct mpic *mpic)
1676{ 1821{
1677 struct mpic *mpic = container_of(dev, struct mpic, sysdev);
1678 int i; 1822 int i;
1679 1823
1680 for (i = 0; i < mpic->num_sources; i++) { 1824 for (i = 0; i < mpic->num_sources; i++) {
@@ -1701,33 +1845,28 @@ static int mpic_resume(struct sys_device *dev)
1701 } 1845 }
1702#endif 1846#endif
1703 } /* end for loop */ 1847 } /* end for loop */
1848}
1704 1849
1705 return 0; 1850static void mpic_resume(void)
1851{
1852 struct mpic *mpic = mpics;
1853
1854 while (mpic) {
1855 mpic_resume_one(mpic);
1856 mpic = mpic->next;
1857 }
1706} 1858}
1707#endif
1708 1859
1709static struct sysdev_class mpic_sysclass = { 1860static struct syscore_ops mpic_syscore_ops = {
1710#ifdef CONFIG_PM
1711 .resume = mpic_resume, 1861 .resume = mpic_resume,
1712 .suspend = mpic_suspend, 1862 .suspend = mpic_suspend,
1713#endif
1714 .name = "mpic",
1715}; 1863};
1716 1864
1717static int mpic_init_sys(void) 1865static int mpic_init_sys(void)
1718{ 1866{
1719 struct mpic *mpic = mpics; 1867 register_syscore_ops(&mpic_syscore_ops);
1720 int error, id = 0; 1868 return 0;
1721
1722 error = sysdev_class_register(&mpic_sysclass);
1723
1724 while (mpic && !error) {
1725 mpic->sysdev.cls = &mpic_sysclass;
1726 mpic->sysdev.id = id++;
1727 error = sysdev_register(&mpic->sysdev);
1728 mpic = mpic->next;
1729 }
1730 return error;
1731} 1869}
1732 1870
1733device_initcall(mpic_init_sys); 1871device_initcall(mpic_init_sys);
1872#endif
diff --git a/arch/powerpc/sysdev/mpic.h b/arch/powerpc/sysdev/mpic.h
index e4a6df77b8d7..13f3e8913a93 100644
--- a/arch/powerpc/sysdev/mpic.h
+++ b/arch/powerpc/sysdev/mpic.h
@@ -34,9 +34,10 @@ static inline int mpic_pasemi_msi_init(struct mpic *mpic)
34} 34}
35#endif 35#endif
36 36
37extern int mpic_set_irq_type(unsigned int virq, unsigned int flow_type); 37extern int mpic_set_irq_type(struct irq_data *d, unsigned int flow_type);
38extern void mpic_set_vector(unsigned int virq, unsigned int vector); 38extern void mpic_set_vector(unsigned int virq, unsigned int vector);
39extern int mpic_set_affinity(unsigned int irq, const struct cpumask *cpumask); 39extern int mpic_set_affinity(struct irq_data *d,
40 const struct cpumask *cpumask, bool force);
40extern void mpic_reset_core(int cpu); 41extern void mpic_reset_core(int cpu);
41 42
42#endif /* _POWERPC_SYSDEV_MPIC_H */ 43#endif /* _POWERPC_SYSDEV_MPIC_H */
diff --git a/arch/powerpc/sysdev/mpic_pasemi_msi.c b/arch/powerpc/sysdev/mpic_pasemi_msi.c
index 3b6a9a43718f..38e62382070c 100644
--- a/arch/powerpc/sysdev/mpic_pasemi_msi.c
+++ b/arch/powerpc/sysdev/mpic_pasemi_msi.c
@@ -39,28 +39,28 @@
39static struct mpic *msi_mpic; 39static struct mpic *msi_mpic;
40 40
41 41
42static void mpic_pasemi_msi_mask_irq(unsigned int irq) 42static void mpic_pasemi_msi_mask_irq(struct irq_data *data)
43{ 43{
44 pr_debug("mpic_pasemi_msi_mask_irq %d\n", irq); 44 pr_debug("mpic_pasemi_msi_mask_irq %d\n", data->irq);
45 mask_msi_irq(irq); 45 mask_msi_irq(data);
46 mpic_mask_irq(irq); 46 mpic_mask_irq(data);
47} 47}
48 48
49static void mpic_pasemi_msi_unmask_irq(unsigned int irq) 49static void mpic_pasemi_msi_unmask_irq(struct irq_data *data)
50{ 50{
51 pr_debug("mpic_pasemi_msi_unmask_irq %d\n", irq); 51 pr_debug("mpic_pasemi_msi_unmask_irq %d\n", data->irq);
52 mpic_unmask_irq(irq); 52 mpic_unmask_irq(data);
53 unmask_msi_irq(irq); 53 unmask_msi_irq(data);
54} 54}
55 55
56static struct irq_chip mpic_pasemi_msi_chip = { 56static struct irq_chip mpic_pasemi_msi_chip = {
57 .shutdown = mpic_pasemi_msi_mask_irq, 57 .irq_shutdown = mpic_pasemi_msi_mask_irq,
58 .mask = mpic_pasemi_msi_mask_irq, 58 .irq_mask = mpic_pasemi_msi_mask_irq,
59 .unmask = mpic_pasemi_msi_unmask_irq, 59 .irq_unmask = mpic_pasemi_msi_unmask_irq,
60 .eoi = mpic_end_irq, 60 .irq_eoi = mpic_end_irq,
61 .set_type = mpic_set_irq_type, 61 .irq_set_type = mpic_set_irq_type,
62 .set_affinity = mpic_set_affinity, 62 .irq_set_affinity = mpic_set_affinity,
63 .name = "PASEMI-MSI", 63 .name = "PASEMI-MSI",
64}; 64};
65 65
66static int pasemi_msi_check_device(struct pci_dev *pdev, int nvec, int type) 66static int pasemi_msi_check_device(struct pci_dev *pdev, int nvec, int type)
@@ -81,7 +81,7 @@ static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev)
81 if (entry->irq == NO_IRQ) 81 if (entry->irq == NO_IRQ)
82 continue; 82 continue;
83 83
84 set_irq_msi(entry->irq, NULL); 84 irq_set_msi_desc(entry->irq, NULL);
85 msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, 85 msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap,
86 virq_to_hw(entry->irq), ALLOC_CHUNK); 86 virq_to_hw(entry->irq), ALLOC_CHUNK);
87 irq_dispose_mapping(entry->irq); 87 irq_dispose_mapping(entry->irq);
@@ -131,9 +131,9 @@ static int pasemi_msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
131 */ 131 */
132 mpic_set_vector(virq, 0); 132 mpic_set_vector(virq, 0);
133 133
134 set_irq_msi(virq, entry); 134 irq_set_msi_desc(virq, entry);
135 set_irq_chip(virq, &mpic_pasemi_msi_chip); 135 irq_set_chip(virq, &mpic_pasemi_msi_chip);
136 set_irq_type(virq, IRQ_TYPE_EDGE_RISING); 136 irq_set_irq_type(virq, IRQ_TYPE_EDGE_RISING);
137 137
138 pr_debug("pasemi_msi: allocated virq 0x%x (hw 0x%x) " \ 138 pr_debug("pasemi_msi: allocated virq 0x%x (hw 0x%x) " \
139 "addr 0x%x\n", virq, hwirq, msg.address_lo); 139 "addr 0x%x\n", virq, hwirq, msg.address_lo);
diff --git a/arch/powerpc/sysdev/mpic_u3msi.c b/arch/powerpc/sysdev/mpic_u3msi.c
index bcbfe79c704b..9a7aa0ed9c1c 100644
--- a/arch/powerpc/sysdev/mpic_u3msi.c
+++ b/arch/powerpc/sysdev/mpic_u3msi.c
@@ -23,26 +23,26 @@
23/* A bit ugly, can we get this from the pci_dev somehow? */ 23/* A bit ugly, can we get this from the pci_dev somehow? */
24static struct mpic *msi_mpic; 24static struct mpic *msi_mpic;
25 25
26static void mpic_u3msi_mask_irq(unsigned int irq) 26static void mpic_u3msi_mask_irq(struct irq_data *data)
27{ 27{
28 mask_msi_irq(irq); 28 mask_msi_irq(data);
29 mpic_mask_irq(irq); 29 mpic_mask_irq(data);
30} 30}
31 31
32static void mpic_u3msi_unmask_irq(unsigned int irq) 32static void mpic_u3msi_unmask_irq(struct irq_data *data)
33{ 33{
34 mpic_unmask_irq(irq); 34 mpic_unmask_irq(data);
35 unmask_msi_irq(irq); 35 unmask_msi_irq(data);
36} 36}
37 37
38static struct irq_chip mpic_u3msi_chip = { 38static struct irq_chip mpic_u3msi_chip = {
39 .shutdown = mpic_u3msi_mask_irq, 39 .irq_shutdown = mpic_u3msi_mask_irq,
40 .mask = mpic_u3msi_mask_irq, 40 .irq_mask = mpic_u3msi_mask_irq,
41 .unmask = mpic_u3msi_unmask_irq, 41 .irq_unmask = mpic_u3msi_unmask_irq,
42 .eoi = mpic_end_irq, 42 .irq_eoi = mpic_end_irq,
43 .set_type = mpic_set_irq_type, 43 .irq_set_type = mpic_set_irq_type,
44 .set_affinity = mpic_set_affinity, 44 .irq_set_affinity = mpic_set_affinity,
45 .name = "MPIC-U3MSI", 45 .name = "MPIC-U3MSI",
46}; 46};
47 47
48static u64 read_ht_magic_addr(struct pci_dev *pdev, unsigned int pos) 48static u64 read_ht_magic_addr(struct pci_dev *pdev, unsigned int pos)
@@ -129,7 +129,7 @@ static void u3msi_teardown_msi_irqs(struct pci_dev *pdev)
129 if (entry->irq == NO_IRQ) 129 if (entry->irq == NO_IRQ)
130 continue; 130 continue;
131 131
132 set_irq_msi(entry->irq, NULL); 132 irq_set_msi_desc(entry->irq, NULL);
133 msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, 133 msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap,
134 virq_to_hw(entry->irq), 1); 134 virq_to_hw(entry->irq), 1);
135 irq_dispose_mapping(entry->irq); 135 irq_dispose_mapping(entry->irq);
@@ -166,9 +166,9 @@ static int u3msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
166 return -ENOSPC; 166 return -ENOSPC;
167 } 167 }
168 168
169 set_irq_msi(virq, entry); 169 irq_set_msi_desc(virq, entry);
170 set_irq_chip(virq, &mpic_u3msi_chip); 170 irq_set_chip(virq, &mpic_u3msi_chip);
171 set_irq_type(virq, IRQ_TYPE_EDGE_RISING); 171 irq_set_irq_type(virq, IRQ_TYPE_EDGE_RISING);
172 172
173 pr_debug("u3msi: allocated virq 0x%x (hw 0x%x) addr 0x%lx\n", 173 pr_debug("u3msi: allocated virq 0x%x (hw 0x%x) addr 0x%lx\n",
174 virq, hwirq, (unsigned long)addr); 174 virq, hwirq, (unsigned long)addr);
diff --git a/arch/powerpc/sysdev/mv64x60_dev.c b/arch/powerpc/sysdev/mv64x60_dev.c
index 1398bc454999..0f6af41ebb44 100644
--- a/arch/powerpc/sysdev/mv64x60_dev.c
+++ b/arch/powerpc/sysdev/mv64x60_dev.c
@@ -16,6 +16,7 @@
16#include <linux/mv643xx.h> 16#include <linux/mv643xx.h>
17#include <linux/platform_device.h> 17#include <linux/platform_device.h>
18#include <linux/of_platform.h> 18#include <linux/of_platform.h>
19#include <linux/of_net.h>
19#include <linux/dma-mapping.h> 20#include <linux/dma-mapping.h>
20 21
21#include <asm/prom.h> 22#include <asm/prom.h>
@@ -345,7 +346,7 @@ static int __init mv64x60_i2c_device_setup(struct device_node *np, int id)
345 if (prop) 346 if (prop)
346 pdata.freq_m = *prop; 347 pdata.freq_m = *prop;
347 348
348 pdata.freq_m = 3; /* default */ 349 pdata.freq_n = 3; /* default */
349 prop = of_get_property(np, "freq_n", NULL); 350 prop = of_get_property(np, "freq_n", NULL);
350 if (prop) 351 if (prop)
351 pdata.freq_n = *prop; 352 pdata.freq_n = *prop;
diff --git a/arch/powerpc/sysdev/mv64x60_pic.c b/arch/powerpc/sysdev/mv64x60_pic.c
index 485b92477d7c..14d130268e7a 100644
--- a/arch/powerpc/sysdev/mv64x60_pic.c
+++ b/arch/powerpc/sysdev/mv64x60_pic.c
@@ -76,9 +76,9 @@ static struct irq_host *mv64x60_irq_host;
76 * mv64x60_chip_low functions 76 * mv64x60_chip_low functions
77 */ 77 */
78 78
79static void mv64x60_mask_low(unsigned int virq) 79static void mv64x60_mask_low(struct irq_data *d)
80{ 80{
81 int level2 = irq_map[virq].hwirq & MV64x60_LEVEL2_MASK; 81 int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
82 unsigned long flags; 82 unsigned long flags;
83 83
84 spin_lock_irqsave(&mv64x60_lock, flags); 84 spin_lock_irqsave(&mv64x60_lock, flags);
@@ -89,9 +89,9 @@ static void mv64x60_mask_low(unsigned int virq)
89 (void)in_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_LO); 89 (void)in_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_LO);
90} 90}
91 91
92static void mv64x60_unmask_low(unsigned int virq) 92static void mv64x60_unmask_low(struct irq_data *d)
93{ 93{
94 int level2 = irq_map[virq].hwirq & MV64x60_LEVEL2_MASK; 94 int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
95 unsigned long flags; 95 unsigned long flags;
96 96
97 spin_lock_irqsave(&mv64x60_lock, flags); 97 spin_lock_irqsave(&mv64x60_lock, flags);
@@ -104,18 +104,18 @@ static void mv64x60_unmask_low(unsigned int virq)
104 104
105static struct irq_chip mv64x60_chip_low = { 105static struct irq_chip mv64x60_chip_low = {
106 .name = "mv64x60_low", 106 .name = "mv64x60_low",
107 .mask = mv64x60_mask_low, 107 .irq_mask = mv64x60_mask_low,
108 .mask_ack = mv64x60_mask_low, 108 .irq_mask_ack = mv64x60_mask_low,
109 .unmask = mv64x60_unmask_low, 109 .irq_unmask = mv64x60_unmask_low,
110}; 110};
111 111
112/* 112/*
113 * mv64x60_chip_high functions 113 * mv64x60_chip_high functions
114 */ 114 */
115 115
116static void mv64x60_mask_high(unsigned int virq) 116static void mv64x60_mask_high(struct irq_data *d)
117{ 117{
118 int level2 = irq_map[virq].hwirq & MV64x60_LEVEL2_MASK; 118 int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
119 unsigned long flags; 119 unsigned long flags;
120 120
121 spin_lock_irqsave(&mv64x60_lock, flags); 121 spin_lock_irqsave(&mv64x60_lock, flags);
@@ -126,9 +126,9 @@ static void mv64x60_mask_high(unsigned int virq)
126 (void)in_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_HI); 126 (void)in_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_HI);
127} 127}
128 128
129static void mv64x60_unmask_high(unsigned int virq) 129static void mv64x60_unmask_high(struct irq_data *d)
130{ 130{
131 int level2 = irq_map[virq].hwirq & MV64x60_LEVEL2_MASK; 131 int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
132 unsigned long flags; 132 unsigned long flags;
133 133
134 spin_lock_irqsave(&mv64x60_lock, flags); 134 spin_lock_irqsave(&mv64x60_lock, flags);
@@ -141,18 +141,18 @@ static void mv64x60_unmask_high(unsigned int virq)
141 141
142static struct irq_chip mv64x60_chip_high = { 142static struct irq_chip mv64x60_chip_high = {
143 .name = "mv64x60_high", 143 .name = "mv64x60_high",
144 .mask = mv64x60_mask_high, 144 .irq_mask = mv64x60_mask_high,
145 .mask_ack = mv64x60_mask_high, 145 .irq_mask_ack = mv64x60_mask_high,
146 .unmask = mv64x60_unmask_high, 146 .irq_unmask = mv64x60_unmask_high,
147}; 147};
148 148
149/* 149/*
150 * mv64x60_chip_gpp functions 150 * mv64x60_chip_gpp functions
151 */ 151 */
152 152
153static void mv64x60_mask_gpp(unsigned int virq) 153static void mv64x60_mask_gpp(struct irq_data *d)
154{ 154{
155 int level2 = irq_map[virq].hwirq & MV64x60_LEVEL2_MASK; 155 int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
156 unsigned long flags; 156 unsigned long flags;
157 157
158 spin_lock_irqsave(&mv64x60_lock, flags); 158 spin_lock_irqsave(&mv64x60_lock, flags);
@@ -163,9 +163,9 @@ static void mv64x60_mask_gpp(unsigned int virq)
163 (void)in_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK); 163 (void)in_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK);
164} 164}
165 165
166static void mv64x60_mask_ack_gpp(unsigned int virq) 166static void mv64x60_mask_ack_gpp(struct irq_data *d)
167{ 167{
168 int level2 = irq_map[virq].hwirq & MV64x60_LEVEL2_MASK; 168 int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
169 unsigned long flags; 169 unsigned long flags;
170 170
171 spin_lock_irqsave(&mv64x60_lock, flags); 171 spin_lock_irqsave(&mv64x60_lock, flags);
@@ -178,9 +178,9 @@ static void mv64x60_mask_ack_gpp(unsigned int virq)
178 (void)in_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_CAUSE); 178 (void)in_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_CAUSE);
179} 179}
180 180
181static void mv64x60_unmask_gpp(unsigned int virq) 181static void mv64x60_unmask_gpp(struct irq_data *d)
182{ 182{
183 int level2 = irq_map[virq].hwirq & MV64x60_LEVEL2_MASK; 183 int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
184 unsigned long flags; 184 unsigned long flags;
185 185
186 spin_lock_irqsave(&mv64x60_lock, flags); 186 spin_lock_irqsave(&mv64x60_lock, flags);
@@ -193,9 +193,9 @@ static void mv64x60_unmask_gpp(unsigned int virq)
193 193
194static struct irq_chip mv64x60_chip_gpp = { 194static struct irq_chip mv64x60_chip_gpp = {
195 .name = "mv64x60_gpp", 195 .name = "mv64x60_gpp",
196 .mask = mv64x60_mask_gpp, 196 .irq_mask = mv64x60_mask_gpp,
197 .mask_ack = mv64x60_mask_ack_gpp, 197 .irq_mask_ack = mv64x60_mask_ack_gpp,
198 .unmask = mv64x60_unmask_gpp, 198 .irq_unmask = mv64x60_unmask_gpp,
199}; 199};
200 200
201/* 201/*
@@ -213,11 +213,12 @@ static int mv64x60_host_map(struct irq_host *h, unsigned int virq,
213{ 213{
214 int level1; 214 int level1;
215 215
216 irq_to_desc(virq)->status |= IRQ_LEVEL; 216 irq_set_status_flags(virq, IRQ_LEVEL);
217 217
218 level1 = (hwirq & MV64x60_LEVEL1_MASK) >> MV64x60_LEVEL1_OFFSET; 218 level1 = (hwirq & MV64x60_LEVEL1_MASK) >> MV64x60_LEVEL1_OFFSET;
219 BUG_ON(level1 > MV64x60_LEVEL1_GPP); 219 BUG_ON(level1 > MV64x60_LEVEL1_GPP);
220 set_irq_chip_and_handler(virq, mv64x60_chips[level1], handle_level_irq); 220 irq_set_chip_and_handler(virq, mv64x60_chips[level1],
221 handle_level_irq);
221 222
222 return 0; 223 return 0;
223} 224}
diff --git a/arch/powerpc/sysdev/pmi.c b/arch/powerpc/sysdev/pmi.c
index 24a0bb955b18..8ce4fc3d9828 100644
--- a/arch/powerpc/sysdev/pmi.c
+++ b/arch/powerpc/sysdev/pmi.c
@@ -114,15 +114,14 @@ static void pmi_notify_handlers(struct work_struct *work)
114 114
115 spin_lock(&data->handler_spinlock); 115 spin_lock(&data->handler_spinlock);
116 list_for_each_entry(handler, &data->handler, node) { 116 list_for_each_entry(handler, &data->handler, node) {
117 pr_debug(KERN_INFO "pmi: notifying handler %p\n", handler); 117 pr_debug("pmi: notifying handler %p\n", handler);
118 if (handler->type == data->msg.type) 118 if (handler->type == data->msg.type)
119 handler->handle_pmi_message(data->msg); 119 handler->handle_pmi_message(data->msg);
120 } 120 }
121 spin_unlock(&data->handler_spinlock); 121 spin_unlock(&data->handler_spinlock);
122} 122}
123 123
124static int pmi_of_probe(struct platform_device *dev, 124static int pmi_of_probe(struct platform_device *dev)
125 const struct of_device_id *match)
126{ 125{
127 struct device_node *np = dev->dev.of_node; 126 struct device_node *np = dev->dev.of_node;
128 int rc; 127 int rc;
@@ -205,7 +204,7 @@ static int pmi_of_remove(struct platform_device *dev)
205 return 0; 204 return 0;
206} 205}
207 206
208static struct of_platform_driver pmi_of_platform_driver = { 207static struct platform_driver pmi_of_platform_driver = {
209 .probe = pmi_of_probe, 208 .probe = pmi_of_probe,
210 .remove = pmi_of_remove, 209 .remove = pmi_of_remove,
211 .driver = { 210 .driver = {
@@ -217,13 +216,13 @@ static struct of_platform_driver pmi_of_platform_driver = {
217 216
218static int __init pmi_module_init(void) 217static int __init pmi_module_init(void)
219{ 218{
220 return of_register_platform_driver(&pmi_of_platform_driver); 219 return platform_driver_register(&pmi_of_platform_driver);
221} 220}
222module_init(pmi_module_init); 221module_init(pmi_module_init);
223 222
224static void __exit pmi_module_exit(void) 223static void __exit pmi_module_exit(void)
225{ 224{
226 of_unregister_platform_driver(&pmi_of_platform_driver); 225 platform_driver_unregister(&pmi_of_platform_driver);
227} 226}
228module_exit(pmi_module_exit); 227module_exit(pmi_module_exit);
229 228
diff --git a/arch/powerpc/sysdev/ppc4xx_cpm.c b/arch/powerpc/sysdev/ppc4xx_cpm.c
new file mode 100644
index 000000000000..73b86cc5ea74
--- /dev/null
+++ b/arch/powerpc/sysdev/ppc4xx_cpm.c
@@ -0,0 +1,346 @@
1/*
2 * PowerPC 4xx Clock and Power Management
3 *
4 * Copyright (C) 2010, Applied Micro Circuits Corporation
5 * Victor Gallardo (vgallardo@apm.com)
6 *
7 * Based on arch/powerpc/platforms/44x/idle.c:
8 * Jerone Young <jyoung5@us.ibm.com>
9 * Copyright 2008 IBM Corp.
10 *
11 * Based on arch/powerpc/sysdev/fsl_pmc.c:
12 * Anton Vorontsov <avorontsov@ru.mvista.com>
13 * Copyright 2009 MontaVista Software, Inc.
14 *
15 * See file CREDITS for list of people who contributed to this
16 * project.
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License as
20 * published by the Free Software Foundation; either version 2 of
21 * the License, or (at your option) any later version.
22 *
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
31 * MA 02111-1307 USA
32 */
33
34#include <linux/kernel.h>
35#include <linux/of_platform.h>
36#include <linux/sysfs.h>
37#include <linux/cpu.h>
38#include <linux/suspend.h>
39#include <asm/dcr.h>
40#include <asm/dcr-native.h>
41#include <asm/machdep.h>
42
43#define CPM_ER 0
44#define CPM_FR 1
45#define CPM_SR 2
46
47#define CPM_IDLE_WAIT 0
48#define CPM_IDLE_DOZE 1
49
50struct cpm {
51 dcr_host_t dcr_host;
52 unsigned int dcr_offset[3];
53 unsigned int powersave_off;
54 unsigned int unused;
55 unsigned int idle_doze;
56 unsigned int standby;
57 unsigned int suspend;
58};
59
60static struct cpm cpm;
61
62struct cpm_idle_mode {
63 unsigned int enabled;
64 const char *name;
65};
66
67static struct cpm_idle_mode idle_mode[] = {
68 [CPM_IDLE_WAIT] = { 1, "wait" }, /* default */
69 [CPM_IDLE_DOZE] = { 0, "doze" },
70};
71
72static unsigned int cpm_set(unsigned int cpm_reg, unsigned int mask)
73{
74 unsigned int value;
75
76 /* CPM controller supports 3 different types of sleep interface
77 * known as class 1, 2 and 3. For class 1 units, they are
78 * unconditionally put to sleep when the corresponding CPM bit is
79 * set. For class 2 and 3 units this is not case; if they can be
80 * put to to sleep, they will. Here we do not verify, we just
81 * set them and expect them to eventually go off when they can.
82 */
83 value = dcr_read(cpm.dcr_host, cpm.dcr_offset[cpm_reg]);
84 dcr_write(cpm.dcr_host, cpm.dcr_offset[cpm_reg], value | mask);
85
86 /* return old state, to restore later if needed */
87 return value;
88}
89
90static void cpm_idle_wait(void)
91{
92 unsigned long msr_save;
93
94 /* save off initial state */
95 msr_save = mfmsr();
96 /* sync required when CPM0_ER[CPU] is set */
97 mb();
98 /* set wait state MSR */
99 mtmsr(msr_save|MSR_WE|MSR_EE|MSR_CE|MSR_DE);
100 isync();
101 /* return to initial state */
102 mtmsr(msr_save);
103 isync();
104}
105
106static void cpm_idle_sleep(unsigned int mask)
107{
108 unsigned int er_save;
109
110 /* update CPM_ER state */
111 er_save = cpm_set(CPM_ER, mask);
112
113 /* go to wait state so that CPM0_ER[CPU] can take effect */
114 cpm_idle_wait();
115
116 /* restore CPM_ER state */
117 dcr_write(cpm.dcr_host, cpm.dcr_offset[CPM_ER], er_save);
118}
119
120static void cpm_idle_doze(void)
121{
122 cpm_idle_sleep(cpm.idle_doze);
123}
124
125static void cpm_idle_config(int mode)
126{
127 int i;
128
129 if (idle_mode[mode].enabled)
130 return;
131
132 for (i = 0; i < ARRAY_SIZE(idle_mode); i++)
133 idle_mode[i].enabled = 0;
134
135 idle_mode[mode].enabled = 1;
136}
137
138static ssize_t cpm_idle_show(struct kobject *kobj,
139 struct kobj_attribute *attr, char *buf)
140{
141 char *s = buf;
142 int i;
143
144 for (i = 0; i < ARRAY_SIZE(idle_mode); i++) {
145 if (idle_mode[i].enabled)
146 s += sprintf(s, "[%s] ", idle_mode[i].name);
147 else
148 s += sprintf(s, "%s ", idle_mode[i].name);
149 }
150
151 *(s-1) = '\n'; /* convert the last space to a newline */
152
153 return s - buf;
154}
155
156static ssize_t cpm_idle_store(struct kobject *kobj,
157 struct kobj_attribute *attr,
158 const char *buf, size_t n)
159{
160 int i;
161 char *p;
162 int len;
163
164 p = memchr(buf, '\n', n);
165 len = p ? p - buf : n;
166
167 for (i = 0; i < ARRAY_SIZE(idle_mode); i++) {
168 if (strncmp(buf, idle_mode[i].name, len) == 0) {
169 cpm_idle_config(i);
170 return n;
171 }
172 }
173
174 return -EINVAL;
175}
176
177static struct kobj_attribute cpm_idle_attr =
178 __ATTR(idle, 0644, cpm_idle_show, cpm_idle_store);
179
180static void cpm_idle_config_sysfs(void)
181{
182 struct sys_device *sys_dev;
183 unsigned long ret;
184
185 sys_dev = get_cpu_sysdev(0);
186
187 ret = sysfs_create_file(&sys_dev->kobj,
188 &cpm_idle_attr.attr);
189 if (ret)
190 printk(KERN_WARNING
191 "cpm: failed to create idle sysfs entry\n");
192}
193
194static void cpm_idle(void)
195{
196 if (idle_mode[CPM_IDLE_DOZE].enabled)
197 cpm_idle_doze();
198 else
199 cpm_idle_wait();
200}
201
202static int cpm_suspend_valid(suspend_state_t state)
203{
204 switch (state) {
205 case PM_SUSPEND_STANDBY:
206 return !!cpm.standby;
207 case PM_SUSPEND_MEM:
208 return !!cpm.suspend;
209 default:
210 return 0;
211 }
212}
213
214static void cpm_suspend_standby(unsigned int mask)
215{
216 unsigned long tcr_save;
217
218 /* disable decrement interrupt */
219 tcr_save = mfspr(SPRN_TCR);
220 mtspr(SPRN_TCR, tcr_save & ~TCR_DIE);
221
222 /* go to sleep state */
223 cpm_idle_sleep(mask);
224
225 /* restore decrement interrupt */
226 mtspr(SPRN_TCR, tcr_save);
227}
228
229static int cpm_suspend_enter(suspend_state_t state)
230{
231 switch (state) {
232 case PM_SUSPEND_STANDBY:
233 cpm_suspend_standby(cpm.standby);
234 break;
235 case PM_SUSPEND_MEM:
236 cpm_suspend_standby(cpm.suspend);
237 break;
238 }
239
240 return 0;
241}
242
243static struct platform_suspend_ops cpm_suspend_ops = {
244 .valid = cpm_suspend_valid,
245 .enter = cpm_suspend_enter,
246};
247
248static int cpm_get_uint_property(struct device_node *np,
249 const char *name)
250{
251 int len;
252 const unsigned int *prop = of_get_property(np, name, &len);
253
254 if (prop == NULL || len < sizeof(u32))
255 return 0;
256
257 return *prop;
258}
259
260static int __init cpm_init(void)
261{
262 struct device_node *np;
263 int dcr_base, dcr_len;
264 int ret = 0;
265
266 if (!cpm.powersave_off) {
267 cpm_idle_config(CPM_IDLE_WAIT);
268 ppc_md.power_save = &cpm_idle;
269 }
270
271 np = of_find_compatible_node(NULL, NULL, "ibm,cpm");
272 if (!np) {
273 ret = -EINVAL;
274 goto out;
275 }
276
277 dcr_base = dcr_resource_start(np, 0);
278 dcr_len = dcr_resource_len(np, 0);
279
280 if (dcr_base == 0 || dcr_len == 0) {
281 printk(KERN_ERR "cpm: could not parse dcr property for %s\n",
282 np->full_name);
283 ret = -EINVAL;
284 goto out;
285 }
286
287 cpm.dcr_host = dcr_map(np, dcr_base, dcr_len);
288
289 if (!DCR_MAP_OK(cpm.dcr_host)) {
290 printk(KERN_ERR "cpm: failed to map dcr property for %s\n",
291 np->full_name);
292 ret = -EINVAL;
293 goto out;
294 }
295
296 /* All 4xx SoCs with a CPM controller have one of two
297 * different order for the CPM registers. Some have the
298 * CPM registers in the following order (ER,FR,SR). The
299 * others have them in the following order (SR,ER,FR).
300 */
301
302 if (cpm_get_uint_property(np, "er-offset") == 0) {
303 cpm.dcr_offset[CPM_ER] = 0;
304 cpm.dcr_offset[CPM_FR] = 1;
305 cpm.dcr_offset[CPM_SR] = 2;
306 } else {
307 cpm.dcr_offset[CPM_ER] = 1;
308 cpm.dcr_offset[CPM_FR] = 2;
309 cpm.dcr_offset[CPM_SR] = 0;
310 }
311
312 /* Now let's see what IPs to turn off for the following modes */
313
314 cpm.unused = cpm_get_uint_property(np, "unused-units");
315 cpm.idle_doze = cpm_get_uint_property(np, "idle-doze");
316 cpm.standby = cpm_get_uint_property(np, "standby");
317 cpm.suspend = cpm_get_uint_property(np, "suspend");
318
319 /* If some IPs are unused let's turn them off now */
320
321 if (cpm.unused) {
322 cpm_set(CPM_ER, cpm.unused);
323 cpm_set(CPM_FR, cpm.unused);
324 }
325
326 /* Now let's export interfaces */
327
328 if (!cpm.powersave_off && cpm.idle_doze)
329 cpm_idle_config_sysfs();
330
331 if (cpm.standby || cpm.suspend)
332 suspend_set_ops(&cpm_suspend_ops);
333out:
334 if (np)
335 of_node_put(np);
336 return ret;
337}
338
339late_initcall(cpm_init);
340
341static int __init cpm_powersave_off(char *arg)
342{
343 cpm.powersave_off = 1;
344 return 0;
345}
346__setup("powersave=off", cpm_powersave_off);
diff --git a/arch/powerpc/sysdev/ppc4xx_msi.c b/arch/powerpc/sysdev/ppc4xx_msi.c
new file mode 100644
index 000000000000..367af0241851
--- /dev/null
+++ b/arch/powerpc/sysdev/ppc4xx_msi.c
@@ -0,0 +1,276 @@
1/*
2 * Adding PCI-E MSI support for PPC4XX SoCs.
3 *
4 * Copyright (c) 2010, Applied Micro Circuits Corporation
5 * Authors: Tirumala R Marri <tmarri@apm.com>
6 * Feng Kan <fkan@apm.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of
11 * the License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
21 * MA 02111-1307 USA
22 */
23
24#include <linux/irq.h>
25#include <linux/bootmem.h>
26#include <linux/pci.h>
27#include <linux/msi.h>
28#include <linux/of_platform.h>
29#include <linux/interrupt.h>
30#include <asm/prom.h>
31#include <asm/hw_irq.h>
32#include <asm/ppc-pci.h>
33#include <boot/dcr.h>
34#include <asm/dcr-regs.h>
35#include <asm/msi_bitmap.h>
36
37#define PEIH_TERMADH 0x00
38#define PEIH_TERMADL 0x08
39#define PEIH_MSIED 0x10
40#define PEIH_MSIMK 0x18
41#define PEIH_MSIASS 0x20
42#define PEIH_FLUSH0 0x30
43#define PEIH_FLUSH1 0x38
44#define PEIH_CNTRST 0x48
45#define NR_MSI_IRQS 4
46
47struct ppc4xx_msi {
48 u32 msi_addr_lo;
49 u32 msi_addr_hi;
50 void __iomem *msi_regs;
51 int msi_virqs[NR_MSI_IRQS];
52 struct msi_bitmap bitmap;
53 struct device_node *msi_dev;
54};
55
56static struct ppc4xx_msi ppc4xx_msi;
57
58static int ppc4xx_msi_init_allocator(struct platform_device *dev,
59 struct ppc4xx_msi *msi_data)
60{
61 int err;
62
63 err = msi_bitmap_alloc(&msi_data->bitmap, NR_MSI_IRQS,
64 dev->dev.of_node);
65 if (err)
66 return err;
67
68 err = msi_bitmap_reserve_dt_hwirqs(&msi_data->bitmap);
69 if (err < 0) {
70 msi_bitmap_free(&msi_data->bitmap);
71 return err;
72 }
73
74 return 0;
75}
76
77static int ppc4xx_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
78{
79 int int_no = -ENOMEM;
80 unsigned int virq;
81 struct msi_msg msg;
82 struct msi_desc *entry;
83 struct ppc4xx_msi *msi_data = &ppc4xx_msi;
84
85 list_for_each_entry(entry, &dev->msi_list, list) {
86 int_no = msi_bitmap_alloc_hwirqs(&msi_data->bitmap, 1);
87 if (int_no >= 0)
88 break;
89 if (int_no < 0) {
90 pr_debug("%s: fail allocating msi interrupt\n",
91 __func__);
92 }
93 virq = irq_of_parse_and_map(msi_data->msi_dev, int_no);
94 if (virq == NO_IRQ) {
95 dev_err(&dev->dev, "%s: fail mapping irq\n", __func__);
96 msi_bitmap_free_hwirqs(&msi_data->bitmap, int_no, 1);
97 return -ENOSPC;
98 }
99 dev_dbg(&dev->dev, "%s: virq = %d\n", __func__, virq);
100
101 /* Setup msi address space */
102 msg.address_hi = msi_data->msi_addr_hi;
103 msg.address_lo = msi_data->msi_addr_lo;
104
105 irq_set_msi_desc(virq, entry);
106 msg.data = int_no;
107 write_msi_msg(virq, &msg);
108 }
109 return 0;
110}
111
112void ppc4xx_teardown_msi_irqs(struct pci_dev *dev)
113{
114 struct msi_desc *entry;
115 struct ppc4xx_msi *msi_data = &ppc4xx_msi;
116
117 dev_dbg(&dev->dev, "PCIE-MSI: tearing down msi irqs\n");
118
119 list_for_each_entry(entry, &dev->msi_list, list) {
120 if (entry->irq == NO_IRQ)
121 continue;
122 irq_set_msi_desc(entry->irq, NULL);
123 msi_bitmap_free_hwirqs(&msi_data->bitmap,
124 virq_to_hw(entry->irq), 1);
125 irq_dispose_mapping(entry->irq);
126 }
127}
128
129static int ppc4xx_msi_check_device(struct pci_dev *pdev, int nvec, int type)
130{
131 dev_dbg(&pdev->dev, "PCIE-MSI:%s called. vec %x type %d\n",
132 __func__, nvec, type);
133 if (type == PCI_CAP_ID_MSIX)
134 pr_debug("ppc4xx msi: MSI-X untested, trying anyway.\n");
135
136 return 0;
137}
138
139static int ppc4xx_setup_pcieh_hw(struct platform_device *dev,
140 struct resource res, struct ppc4xx_msi *msi)
141{
142 const u32 *msi_data;
143 const u32 *msi_mask;
144 const u32 *sdr_addr;
145 dma_addr_t msi_phys;
146 void *msi_virt;
147
148 sdr_addr = of_get_property(dev->dev.of_node, "sdr-base", NULL);
149 if (!sdr_addr)
150 return -1;
151
152 SDR0_WRITE(sdr_addr, (u64)res.start >> 32); /*HIGH addr */
153 SDR0_WRITE(sdr_addr + 1, res.start & 0xFFFFFFFF); /* Low addr */
154
155
156 msi->msi_dev = of_find_node_by_name(NULL, "ppc4xx-msi");
157 if (msi->msi_dev)
158 return -ENODEV;
159
160 msi->msi_regs = of_iomap(msi->msi_dev, 0);
161 if (!msi->msi_regs) {
162 dev_err(&dev->dev, "of_iomap problem failed\n");
163 return -ENOMEM;
164 }
165 dev_dbg(&dev->dev, "PCIE-MSI: msi register mapped 0x%x 0x%x\n",
166 (u32) (msi->msi_regs + PEIH_TERMADH), (u32) (msi->msi_regs));
167
168 msi_virt = dma_alloc_coherent(&dev->dev, 64, &msi_phys, GFP_KERNEL);
169 msi->msi_addr_hi = 0x0;
170 msi->msi_addr_lo = (u32) msi_phys;
171 dev_dbg(&dev->dev, "PCIE-MSI: msi address 0x%x\n", msi->msi_addr_lo);
172
173 /* Progam the Interrupt handler Termination addr registers */
174 out_be32(msi->msi_regs + PEIH_TERMADH, msi->msi_addr_hi);
175 out_be32(msi->msi_regs + PEIH_TERMADL, msi->msi_addr_lo);
176
177 msi_data = of_get_property(dev->dev.of_node, "msi-data", NULL);
178 if (!msi_data)
179 return -1;
180 msi_mask = of_get_property(dev->dev.of_node, "msi-mask", NULL);
181 if (!msi_mask)
182 return -1;
183 /* Program MSI Expected data and Mask bits */
184 out_be32(msi->msi_regs + PEIH_MSIED, *msi_data);
185 out_be32(msi->msi_regs + PEIH_MSIMK, *msi_mask);
186
187 return 0;
188}
189
190static int ppc4xx_of_msi_remove(struct platform_device *dev)
191{
192 struct ppc4xx_msi *msi = dev->dev.platform_data;
193 int i;
194 int virq;
195
196 for (i = 0; i < NR_MSI_IRQS; i++) {
197 virq = msi->msi_virqs[i];
198 if (virq != NO_IRQ)
199 irq_dispose_mapping(virq);
200 }
201
202 if (msi->bitmap.bitmap)
203 msi_bitmap_free(&msi->bitmap);
204 iounmap(msi->msi_regs);
205 of_node_put(msi->msi_dev);
206 kfree(msi);
207
208 return 0;
209}
210
211static int __devinit ppc4xx_msi_probe(struct platform_device *dev)
212{
213 struct ppc4xx_msi *msi;
214 struct resource res;
215 int err = 0;
216
217 msi = &ppc4xx_msi;/*keep the msi data for further use*/
218
219 dev_dbg(&dev->dev, "PCIE-MSI: Setting up MSI support...\n");
220
221 msi = kzalloc(sizeof(struct ppc4xx_msi), GFP_KERNEL);
222 if (!msi) {
223 dev_err(&dev->dev, "No memory for MSI structure\n");
224 return -ENOMEM;
225 }
226 dev->dev.platform_data = msi;
227
228 /* Get MSI ranges */
229 err = of_address_to_resource(dev->dev.of_node, 0, &res);
230 if (err) {
231 dev_err(&dev->dev, "%s resource error!\n",
232 dev->dev.of_node->full_name);
233 goto error_out;
234 }
235
236 if (ppc4xx_setup_pcieh_hw(dev, res, msi))
237 goto error_out;
238
239 err = ppc4xx_msi_init_allocator(dev, msi);
240 if (err) {
241 dev_err(&dev->dev, "Error allocating MSI bitmap\n");
242 goto error_out;
243 }
244
245 ppc_md.setup_msi_irqs = ppc4xx_setup_msi_irqs;
246 ppc_md.teardown_msi_irqs = ppc4xx_teardown_msi_irqs;
247 ppc_md.msi_check_device = ppc4xx_msi_check_device;
248 return err;
249
250error_out:
251 ppc4xx_of_msi_remove(dev);
252 return err;
253}
254static const struct of_device_id ppc4xx_msi_ids[] = {
255 {
256 .compatible = "amcc,ppc4xx-msi",
257 },
258 {}
259};
260static struct platform_driver ppc4xx_msi_driver = {
261 .probe = ppc4xx_msi_probe,
262 .remove = ppc4xx_of_msi_remove,
263 .driver = {
264 .name = "ppc4xx-msi",
265 .owner = THIS_MODULE,
266 .of_match_table = ppc4xx_msi_ids,
267 },
268
269};
270
271static __init int ppc4xx_msi_init(void)
272{
273 return platform_driver_register(&ppc4xx_msi_driver);
274}
275
276subsys_initcall(ppc4xx_msi_init);
diff --git a/arch/powerpc/sysdev/ppc4xx_pci.h b/arch/powerpc/sysdev/ppc4xx_pci.h
index 56d9e5deccbf..c39a134e8684 100644
--- a/arch/powerpc/sysdev/ppc4xx_pci.h
+++ b/arch/powerpc/sysdev/ppc4xx_pci.h
@@ -324,7 +324,7 @@
324#define PESDR0_460EX_IHS2 0x036D 324#define PESDR0_460EX_IHS2 0x036D
325 325
326/* 326/*
327 * 460SX addtional DCRs 327 * 460SX additional DCRs
328 */ 328 */
329#define PESDRn_460SX_RCEI 0x02 329#define PESDRn_460SX_RCEI 0x02
330 330
diff --git a/arch/powerpc/sysdev/qe_lib/qe.c b/arch/powerpc/sysdev/qe_lib/qe.c
index 90020de4dcf2..904c6cbaf45b 100644
--- a/arch/powerpc/sysdev/qe_lib/qe.c
+++ b/arch/powerpc/sysdev/qe_lib/qe.c
@@ -659,8 +659,7 @@ static int qe_resume(struct platform_device *ofdev)
659 return 0; 659 return 0;
660} 660}
661 661
662static int qe_probe(struct platform_device *ofdev, 662static int qe_probe(struct platform_device *ofdev)
663 const struct of_device_id *id)
664{ 663{
665 return 0; 664 return 0;
666} 665}
@@ -670,7 +669,7 @@ static const struct of_device_id qe_ids[] = {
670 { }, 669 { },
671}; 670};
672 671
673static struct of_platform_driver qe_driver = { 672static struct platform_driver qe_driver = {
674 .driver = { 673 .driver = {
675 .name = "fsl-qe", 674 .name = "fsl-qe",
676 .owner = THIS_MODULE, 675 .owner = THIS_MODULE,
@@ -682,7 +681,7 @@ static struct of_platform_driver qe_driver = {
682 681
683static int __init qe_drv_init(void) 682static int __init qe_drv_init(void)
684{ 683{
685 return of_register_platform_driver(&qe_driver); 684 return platform_driver_register(&qe_driver);
686} 685}
687device_initcall(qe_drv_init); 686device_initcall(qe_drv_init);
688#endif /* defined(CONFIG_SUSPEND) && defined(CONFIG_PPC_85xx) */ 687#endif /* defined(CONFIG_SUSPEND) && defined(CONFIG_PPC_85xx) */
diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.c b/arch/powerpc/sysdev/qe_lib/qe_ic.c
index 541ba9863647..b2acda07220d 100644
--- a/arch/powerpc/sysdev/qe_lib/qe_ic.c
+++ b/arch/powerpc/sysdev/qe_lib/qe_ic.c
@@ -189,15 +189,18 @@ static inline void qe_ic_write(volatile __be32 __iomem * base, unsigned int reg
189 189
190static inline struct qe_ic *qe_ic_from_irq(unsigned int virq) 190static inline struct qe_ic *qe_ic_from_irq(unsigned int virq)
191{ 191{
192 return irq_to_desc(virq)->chip_data; 192 return irq_get_chip_data(virq);
193} 193}
194 194
195#define virq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq) 195static inline struct qe_ic *qe_ic_from_irq_data(struct irq_data *d)
196{
197 return irq_data_get_irq_chip_data(d);
198}
196 199
197static void qe_ic_unmask_irq(unsigned int virq) 200static void qe_ic_unmask_irq(struct irq_data *d)
198{ 201{
199 struct qe_ic *qe_ic = qe_ic_from_irq(virq); 202 struct qe_ic *qe_ic = qe_ic_from_irq_data(d);
200 unsigned int src = virq_to_hw(virq); 203 unsigned int src = irqd_to_hwirq(d);
201 unsigned long flags; 204 unsigned long flags;
202 u32 temp; 205 u32 temp;
203 206
@@ -210,10 +213,10 @@ static void qe_ic_unmask_irq(unsigned int virq)
210 raw_spin_unlock_irqrestore(&qe_ic_lock, flags); 213 raw_spin_unlock_irqrestore(&qe_ic_lock, flags);
211} 214}
212 215
213static void qe_ic_mask_irq(unsigned int virq) 216static void qe_ic_mask_irq(struct irq_data *d)
214{ 217{
215 struct qe_ic *qe_ic = qe_ic_from_irq(virq); 218 struct qe_ic *qe_ic = qe_ic_from_irq_data(d);
216 unsigned int src = virq_to_hw(virq); 219 unsigned int src = irqd_to_hwirq(d);
217 unsigned long flags; 220 unsigned long flags;
218 u32 temp; 221 u32 temp;
219 222
@@ -238,9 +241,9 @@ static void qe_ic_mask_irq(unsigned int virq)
238 241
239static struct irq_chip qe_ic_irq_chip = { 242static struct irq_chip qe_ic_irq_chip = {
240 .name = "QEIC", 243 .name = "QEIC",
241 .unmask = qe_ic_unmask_irq, 244 .irq_unmask = qe_ic_unmask_irq,
242 .mask = qe_ic_mask_irq, 245 .irq_mask = qe_ic_mask_irq,
243 .mask_ack = qe_ic_mask_irq, 246 .irq_mask_ack = qe_ic_mask_irq,
244}; 247};
245 248
246static int qe_ic_host_match(struct irq_host *h, struct device_node *node) 249static int qe_ic_host_match(struct irq_host *h, struct device_node *node)
@@ -262,10 +265,10 @@ static int qe_ic_host_map(struct irq_host *h, unsigned int virq,
262 /* Default chip */ 265 /* Default chip */
263 chip = &qe_ic->hc_irq; 266 chip = &qe_ic->hc_irq;
264 267
265 set_irq_chip_data(virq, qe_ic); 268 irq_set_chip_data(virq, qe_ic);
266 irq_to_desc(virq)->status |= IRQ_LEVEL; 269 irq_set_status_flags(virq, IRQ_LEVEL);
267 270
268 set_irq_chip_and_handler(virq, chip, handle_level_irq); 271 irq_set_chip_and_handler(virq, chip, handle_level_irq);
269 272
270 return 0; 273 return 0;
271} 274}
@@ -381,13 +384,13 @@ void __init qe_ic_init(struct device_node *node, unsigned int flags,
381 384
382 qe_ic_write(qe_ic->regs, QEIC_CICR, temp); 385 qe_ic_write(qe_ic->regs, QEIC_CICR, temp);
383 386
384 set_irq_data(qe_ic->virq_low, qe_ic); 387 irq_set_handler_data(qe_ic->virq_low, qe_ic);
385 set_irq_chained_handler(qe_ic->virq_low, low_handler); 388 irq_set_chained_handler(qe_ic->virq_low, low_handler);
386 389
387 if (qe_ic->virq_high != NO_IRQ && 390 if (qe_ic->virq_high != NO_IRQ &&
388 qe_ic->virq_high != qe_ic->virq_low) { 391 qe_ic->virq_high != qe_ic->virq_low) {
389 set_irq_data(qe_ic->virq_high, qe_ic); 392 irq_set_handler_data(qe_ic->virq_high, qe_ic);
390 set_irq_chained_handler(qe_ic->virq_high, high_handler); 393 irq_set_chained_handler(qe_ic->virq_high, high_handler);
391 } 394 }
392} 395}
393 396
diff --git a/arch/powerpc/sysdev/scom.c b/arch/powerpc/sysdev/scom.c
new file mode 100644
index 000000000000..b2593ce30c9b
--- /dev/null
+++ b/arch/powerpc/sysdev/scom.c
@@ -0,0 +1,192 @@
1/*
2 * Copyright 2010 Benjamin Herrenschmidt, IBM Corp
3 * <benh@kernel.crashing.org>
4 * and David Gibson, IBM Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
14 * the GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include <linux/kernel.h>
22#include <linux/debugfs.h>
23#include <linux/slab.h>
24#include <asm/prom.h>
25#include <asm/scom.h>
26
27const struct scom_controller *scom_controller;
28EXPORT_SYMBOL_GPL(scom_controller);
29
30struct device_node *scom_find_parent(struct device_node *node)
31{
32 struct device_node *par, *tmp;
33 const u32 *p;
34
35 for (par = of_node_get(node); par;) {
36 if (of_get_property(par, "scom-controller", NULL))
37 break;
38 p = of_get_property(par, "scom-parent", NULL);
39 tmp = par;
40 if (p == NULL)
41 par = of_get_parent(par);
42 else
43 par = of_find_node_by_phandle(*p);
44 of_node_put(tmp);
45 }
46 return par;
47}
48EXPORT_SYMBOL_GPL(scom_find_parent);
49
50scom_map_t scom_map_device(struct device_node *dev, int index)
51{
52 struct device_node *parent;
53 unsigned int cells, size;
54 const u32 *prop;
55 u64 reg, cnt;
56 scom_map_t ret;
57
58 parent = scom_find_parent(dev);
59
60 if (parent == NULL)
61 return 0;
62
63 prop = of_get_property(parent, "#scom-cells", NULL);
64 cells = prop ? *prop : 1;
65
66 prop = of_get_property(dev, "scom-reg", &size);
67 if (!prop)
68 return 0;
69 size >>= 2;
70
71 if (index >= (size / (2*cells)))
72 return 0;
73
74 reg = of_read_number(&prop[index * cells * 2], cells);
75 cnt = of_read_number(&prop[index * cells * 2 + cells], cells);
76
77 ret = scom_map(parent, reg, cnt);
78 of_node_put(parent);
79
80 return ret;
81}
82EXPORT_SYMBOL_GPL(scom_map_device);
83
84#ifdef CONFIG_SCOM_DEBUGFS
85struct scom_debug_entry {
86 struct device_node *dn;
87 unsigned long addr;
88 scom_map_t map;
89 spinlock_t lock;
90 char name[8];
91 struct debugfs_blob_wrapper blob;
92};
93
94static int scom_addr_set(void *data, u64 val)
95{
96 struct scom_debug_entry *ent = data;
97
98 ent->addr = 0;
99 scom_unmap(ent->map);
100
101 ent->map = scom_map(ent->dn, val, 1);
102 if (scom_map_ok(ent->map))
103 ent->addr = val;
104 else
105 return -EFAULT;
106
107 return 0;
108}
109
110static int scom_addr_get(void *data, u64 *val)
111{
112 struct scom_debug_entry *ent = data;
113 *val = ent->addr;
114 return 0;
115}
116DEFINE_SIMPLE_ATTRIBUTE(scom_addr_fops, scom_addr_get, scom_addr_set,
117 "0x%llx\n");
118
119static int scom_val_set(void *data, u64 val)
120{
121 struct scom_debug_entry *ent = data;
122
123 if (!scom_map_ok(ent->map))
124 return -EFAULT;
125
126 scom_write(ent->map, 0, val);
127
128 return 0;
129}
130
131static int scom_val_get(void *data, u64 *val)
132{
133 struct scom_debug_entry *ent = data;
134
135 if (!scom_map_ok(ent->map))
136 return -EFAULT;
137
138 *val = scom_read(ent->map, 0);
139 return 0;
140}
141DEFINE_SIMPLE_ATTRIBUTE(scom_val_fops, scom_val_get, scom_val_set,
142 "0x%llx\n");
143
144static int scom_debug_init_one(struct dentry *root, struct device_node *dn,
145 int i)
146{
147 struct scom_debug_entry *ent;
148 struct dentry *dir;
149
150 ent = kzalloc(sizeof(*ent), GFP_KERNEL);
151 if (!ent)
152 return -ENOMEM;
153
154 ent->dn = of_node_get(dn);
155 ent->map = SCOM_MAP_INVALID;
156 spin_lock_init(&ent->lock);
157 snprintf(ent->name, 8, "scom%d", i);
158 ent->blob.data = dn->full_name;
159 ent->blob.size = strlen(dn->full_name);
160
161 dir = debugfs_create_dir(ent->name, root);
162 if (!dir) {
163 of_node_put(dn);
164 kfree(ent);
165 return -1;
166 }
167
168 debugfs_create_file("addr", 0600, dir, ent, &scom_addr_fops);
169 debugfs_create_file("value", 0600, dir, ent, &scom_val_fops);
170 debugfs_create_blob("path", 0400, dir, &ent->blob);
171
172 return 0;
173}
174
175static int scom_debug_init(void)
176{
177 struct device_node *dn;
178 struct dentry *root;
179 int i, rc;
180
181 root = debugfs_create_dir("scom", powerpc_debugfs_root);
182 if (!root)
183 return -1;
184
185 i = rc = 0;
186 for_each_node_with_property(dn, "scom-controller")
187 rc |= scom_debug_init_one(root, dn, i++);
188
189 return rc;
190}
191device_initcall(scom_debug_init);
192#endif /* CONFIG_SCOM_DEBUGFS */
diff --git a/arch/powerpc/sysdev/tsi108_dev.c b/arch/powerpc/sysdev/tsi108_dev.c
index d4d15aaf18fa..ee056807b52c 100644
--- a/arch/powerpc/sysdev/tsi108_dev.c
+++ b/arch/powerpc/sysdev/tsi108_dev.c
@@ -19,6 +19,7 @@
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/device.h> 20#include <linux/device.h>
21#include <linux/platform_device.h> 21#include <linux/platform_device.h>
22#include <linux/of_net.h>
22#include <asm/tsi108.h> 23#include <asm/tsi108.h>
23 24
24#include <asm/system.h> 25#include <asm/system.h>
@@ -83,8 +84,8 @@ static int __init tsi108_eth_of_init(void)
83 memset(&tsi_eth_data, 0, sizeof(tsi_eth_data)); 84 memset(&tsi_eth_data, 0, sizeof(tsi_eth_data));
84 85
85 ret = of_address_to_resource(np, 0, &r[0]); 86 ret = of_address_to_resource(np, 0, &r[0]);
86 DBG("%s: name:start->end = %s:0x%lx-> 0x%lx\n", 87 DBG("%s: name:start->end = %s:%pR\n",
87 __func__,r[0].name, r[0].start, r[0].end); 88 __func__, r[0].name, &r[0]);
88 if (ret) 89 if (ret)
89 goto err; 90 goto err;
90 91
@@ -92,8 +93,8 @@ static int __init tsi108_eth_of_init(void)
92 r[1].start = irq_of_parse_and_map(np, 0); 93 r[1].start = irq_of_parse_and_map(np, 0);
93 r[1].end = irq_of_parse_and_map(np, 0); 94 r[1].end = irq_of_parse_and_map(np, 0);
94 r[1].flags = IORESOURCE_IRQ; 95 r[1].flags = IORESOURCE_IRQ;
95 DBG("%s: name:start->end = %s:0x%lx-> 0x%lx\n", 96 DBG("%s: name:start->end = %s:%pR\n",
96 __func__,r[1].name, r[1].start, r[1].end); 97 __func__, r[1].name, &r[1]);
97 98
98 tsi_eth_dev = 99 tsi_eth_dev =
99 platform_device_register_simple("tsi-ethernet", i++, &r[0], 100 platform_device_register_simple("tsi-ethernet", i++, &r[0],
diff --git a/arch/powerpc/sysdev/tsi108_pci.c b/arch/powerpc/sysdev/tsi108_pci.c
index 0ab9281e49ae..4d18658116e5 100644
--- a/arch/powerpc/sysdev/tsi108_pci.c
+++ b/arch/powerpc/sysdev/tsi108_pci.c
@@ -343,24 +343,9 @@ static inline unsigned int get_pci_source(void)
343 * Linux descriptor level callbacks 343 * Linux descriptor level callbacks
344 */ 344 */
345 345
346static void tsi108_pci_irq_enable(u_int irq) 346static void tsi108_pci_irq_unmask(struct irq_data *d)
347{ 347{
348 tsi108_pci_int_unmask(irq); 348 tsi108_pci_int_unmask(d->irq);
349}
350
351static void tsi108_pci_irq_disable(u_int irq)
352{
353 tsi108_pci_int_mask(irq);
354}
355
356static void tsi108_pci_irq_ack(u_int irq)
357{
358 tsi108_pci_int_mask(irq);
359}
360
361static void tsi108_pci_irq_end(u_int irq)
362{
363 tsi108_pci_int_unmask(irq);
364 349
365 /* Enable interrupts from PCI block */ 350 /* Enable interrupts from PCI block */
366 tsi108_write_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_ENABLE, 351 tsi108_write_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_ENABLE,
@@ -370,16 +355,25 @@ static void tsi108_pci_irq_end(u_int irq)
370 mb(); 355 mb();
371} 356}
372 357
358static void tsi108_pci_irq_mask(struct irq_data *d)
359{
360 tsi108_pci_int_mask(d->irq);
361}
362
363static void tsi108_pci_irq_ack(struct irq_data *d)
364{
365 tsi108_pci_int_mask(d->irq);
366}
367
373/* 368/*
374 * Interrupt controller descriptor for cascaded PCI interrupt controller. 369 * Interrupt controller descriptor for cascaded PCI interrupt controller.
375 */ 370 */
376 371
377static struct irq_chip tsi108_pci_irq = { 372static struct irq_chip tsi108_pci_irq = {
378 .name = "tsi108_PCI_int", 373 .name = "tsi108_PCI_int",
379 .mask = tsi108_pci_irq_disable, 374 .irq_mask = tsi108_pci_irq_mask,
380 .ack = tsi108_pci_irq_ack, 375 .irq_ack = tsi108_pci_irq_ack,
381 .end = tsi108_pci_irq_end, 376 .irq_unmask = tsi108_pci_irq_unmask,
382 .unmask = tsi108_pci_irq_enable,
383}; 377};
384 378
385static int pci_irq_host_xlate(struct irq_host *h, struct device_node *ct, 379static int pci_irq_host_xlate(struct irq_host *h, struct device_node *ct,
@@ -397,8 +391,8 @@ static int pci_irq_host_map(struct irq_host *h, unsigned int virq,
397 DBG("%s(%d, 0x%lx)\n", __func__, virq, hw); 391 DBG("%s(%d, 0x%lx)\n", __func__, virq, hw);
398 if ((virq >= 1) && (virq <= 4)){ 392 if ((virq >= 1) && (virq <= 4)){
399 irq = virq + IRQ_PCI_INTAD_BASE - 1; 393 irq = virq + IRQ_PCI_INTAD_BASE - 1;
400 irq_to_desc(irq)->status |= IRQ_LEVEL; 394 irq_set_status_flags(irq, IRQ_LEVEL);
401 set_irq_chip(irq, &tsi108_pci_irq); 395 irq_set_chip(irq, &tsi108_pci_irq);
402 } 396 }
403 return 0; 397 return 0;
404} 398}
@@ -437,8 +431,11 @@ void __init tsi108_pci_int_init(struct device_node *node)
437 431
438void tsi108_irq_cascade(unsigned int irq, struct irq_desc *desc) 432void tsi108_irq_cascade(unsigned int irq, struct irq_desc *desc)
439{ 433{
434 struct irq_chip *chip = irq_desc_get_chip(desc);
440 unsigned int cascade_irq = get_pci_source(); 435 unsigned int cascade_irq = get_pci_source();
436
441 if (cascade_irq != NO_IRQ) 437 if (cascade_irq != NO_IRQ)
442 generic_handle_irq(cascade_irq); 438 generic_handle_irq(cascade_irq);
443 desc->chip->eoi(irq); 439
440 chip->irq_eoi(&desc->irq_data);
444} 441}
diff --git a/arch/powerpc/sysdev/uic.c b/arch/powerpc/sysdev/uic.c
index 0038fb78f094..984cd2029158 100644
--- a/arch/powerpc/sysdev/uic.c
+++ b/arch/powerpc/sysdev/uic.c
@@ -41,8 +41,6 @@
41#define UIC_VR 0x7 41#define UIC_VR 0x7
42#define UIC_VCR 0x8 42#define UIC_VCR 0x8
43 43
44#define uic_irq_to_hw(virq) (irq_map[virq].hwirq)
45
46struct uic *primary_uic; 44struct uic *primary_uic;
47 45
48struct uic { 46struct uic {
@@ -55,18 +53,17 @@ struct uic {
55 struct irq_host *irqhost; 53 struct irq_host *irqhost;
56}; 54};
57 55
58static void uic_unmask_irq(unsigned int virq) 56static void uic_unmask_irq(struct irq_data *d)
59{ 57{
60 struct irq_desc *desc = irq_to_desc(virq); 58 struct uic *uic = irq_data_get_irq_chip_data(d);
61 struct uic *uic = get_irq_chip_data(virq); 59 unsigned int src = irqd_to_hwirq(d);
62 unsigned int src = uic_irq_to_hw(virq);
63 unsigned long flags; 60 unsigned long flags;
64 u32 er, sr; 61 u32 er, sr;
65 62
66 sr = 1 << (31-src); 63 sr = 1 << (31-src);
67 spin_lock_irqsave(&uic->lock, flags); 64 spin_lock_irqsave(&uic->lock, flags);
68 /* ack level-triggered interrupts here */ 65 /* ack level-triggered interrupts here */
69 if (desc->status & IRQ_LEVEL) 66 if (irqd_is_level_type(d))
70 mtdcr(uic->dcrbase + UIC_SR, sr); 67 mtdcr(uic->dcrbase + UIC_SR, sr);
71 er = mfdcr(uic->dcrbase + UIC_ER); 68 er = mfdcr(uic->dcrbase + UIC_ER);
72 er |= sr; 69 er |= sr;
@@ -74,10 +71,10 @@ static void uic_unmask_irq(unsigned int virq)
74 spin_unlock_irqrestore(&uic->lock, flags); 71 spin_unlock_irqrestore(&uic->lock, flags);
75} 72}
76 73
77static void uic_mask_irq(unsigned int virq) 74static void uic_mask_irq(struct irq_data *d)
78{ 75{
79 struct uic *uic = get_irq_chip_data(virq); 76 struct uic *uic = irq_data_get_irq_chip_data(d);
80 unsigned int src = uic_irq_to_hw(virq); 77 unsigned int src = irqd_to_hwirq(d);
81 unsigned long flags; 78 unsigned long flags;
82 u32 er; 79 u32 er;
83 80
@@ -88,10 +85,10 @@ static void uic_mask_irq(unsigned int virq)
88 spin_unlock_irqrestore(&uic->lock, flags); 85 spin_unlock_irqrestore(&uic->lock, flags);
89} 86}
90 87
91static void uic_ack_irq(unsigned int virq) 88static void uic_ack_irq(struct irq_data *d)
92{ 89{
93 struct uic *uic = get_irq_chip_data(virq); 90 struct uic *uic = irq_data_get_irq_chip_data(d);
94 unsigned int src = uic_irq_to_hw(virq); 91 unsigned int src = irqd_to_hwirq(d);
95 unsigned long flags; 92 unsigned long flags;
96 93
97 spin_lock_irqsave(&uic->lock, flags); 94 spin_lock_irqsave(&uic->lock, flags);
@@ -99,11 +96,10 @@ static void uic_ack_irq(unsigned int virq)
99 spin_unlock_irqrestore(&uic->lock, flags); 96 spin_unlock_irqrestore(&uic->lock, flags);
100} 97}
101 98
102static void uic_mask_ack_irq(unsigned int virq) 99static void uic_mask_ack_irq(struct irq_data *d)
103{ 100{
104 struct irq_desc *desc = irq_to_desc(virq); 101 struct uic *uic = irq_data_get_irq_chip_data(d);
105 struct uic *uic = get_irq_chip_data(virq); 102 unsigned int src = irqd_to_hwirq(d);
106 unsigned int src = uic_irq_to_hw(virq);
107 unsigned long flags; 103 unsigned long flags;
108 u32 er, sr; 104 u32 er, sr;
109 105
@@ -120,23 +116,22 @@ static void uic_mask_ack_irq(unsigned int virq)
120 * level interrupts are ack'ed after the actual 116 * level interrupts are ack'ed after the actual
121 * isr call in the uic_unmask_irq() 117 * isr call in the uic_unmask_irq()
122 */ 118 */
123 if (!(desc->status & IRQ_LEVEL)) 119 if (!irqd_is_level_type(d))
124 mtdcr(uic->dcrbase + UIC_SR, sr); 120 mtdcr(uic->dcrbase + UIC_SR, sr);
125 spin_unlock_irqrestore(&uic->lock, flags); 121 spin_unlock_irqrestore(&uic->lock, flags);
126} 122}
127 123
128static int uic_set_irq_type(unsigned int virq, unsigned int flow_type) 124static int uic_set_irq_type(struct irq_data *d, unsigned int flow_type)
129{ 125{
130 struct uic *uic = get_irq_chip_data(virq); 126 struct uic *uic = irq_data_get_irq_chip_data(d);
131 unsigned int src = uic_irq_to_hw(virq); 127 unsigned int src = irqd_to_hwirq(d);
132 struct irq_desc *desc = irq_to_desc(virq);
133 unsigned long flags; 128 unsigned long flags;
134 int trigger, polarity; 129 int trigger, polarity;
135 u32 tr, pr, mask; 130 u32 tr, pr, mask;
136 131
137 switch (flow_type & IRQ_TYPE_SENSE_MASK) { 132 switch (flow_type & IRQ_TYPE_SENSE_MASK) {
138 case IRQ_TYPE_NONE: 133 case IRQ_TYPE_NONE:
139 uic_mask_irq(virq); 134 uic_mask_irq(d);
140 return 0; 135 return 0;
141 136
142 case IRQ_TYPE_EDGE_RISING: 137 case IRQ_TYPE_EDGE_RISING:
@@ -166,11 +161,6 @@ static int uic_set_irq_type(unsigned int virq, unsigned int flow_type)
166 mtdcr(uic->dcrbase + UIC_PR, pr); 161 mtdcr(uic->dcrbase + UIC_PR, pr);
167 mtdcr(uic->dcrbase + UIC_TR, tr); 162 mtdcr(uic->dcrbase + UIC_TR, tr);
168 163
169 desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL);
170 desc->status |= flow_type & IRQ_TYPE_SENSE_MASK;
171 if (!trigger)
172 desc->status |= IRQ_LEVEL;
173
174 spin_unlock_irqrestore(&uic->lock, flags); 164 spin_unlock_irqrestore(&uic->lock, flags);
175 165
176 return 0; 166 return 0;
@@ -178,11 +168,11 @@ static int uic_set_irq_type(unsigned int virq, unsigned int flow_type)
178 168
179static struct irq_chip uic_irq_chip = { 169static struct irq_chip uic_irq_chip = {
180 .name = "UIC", 170 .name = "UIC",
181 .unmask = uic_unmask_irq, 171 .irq_unmask = uic_unmask_irq,
182 .mask = uic_mask_irq, 172 .irq_mask = uic_mask_irq,
183 .mask_ack = uic_mask_ack_irq, 173 .irq_mask_ack = uic_mask_ack_irq,
184 .ack = uic_ack_irq, 174 .irq_ack = uic_ack_irq,
185 .set_type = uic_set_irq_type, 175 .irq_set_type = uic_set_irq_type,
186}; 176};
187 177
188static int uic_host_map(struct irq_host *h, unsigned int virq, 178static int uic_host_map(struct irq_host *h, unsigned int virq,
@@ -190,13 +180,13 @@ static int uic_host_map(struct irq_host *h, unsigned int virq,
190{ 180{
191 struct uic *uic = h->host_data; 181 struct uic *uic = h->host_data;
192 182
193 set_irq_chip_data(virq, uic); 183 irq_set_chip_data(virq, uic);
194 /* Despite the name, handle_level_irq() works for both level 184 /* Despite the name, handle_level_irq() works for both level
195 * and edge irqs on UIC. FIXME: check this is correct */ 185 * and edge irqs on UIC. FIXME: check this is correct */
196 set_irq_chip_and_handler(virq, &uic_irq_chip, handle_level_irq); 186 irq_set_chip_and_handler(virq, &uic_irq_chip, handle_level_irq);
197 187
198 /* Set default irq type */ 188 /* Set default irq type */
199 set_irq_type(virq, IRQ_TYPE_NONE); 189 irq_set_irq_type(virq, IRQ_TYPE_NONE);
200 190
201 return 0; 191 return 0;
202} 192}
@@ -220,16 +210,18 @@ static struct irq_host_ops uic_host_ops = {
220 210
221void uic_irq_cascade(unsigned int virq, struct irq_desc *desc) 211void uic_irq_cascade(unsigned int virq, struct irq_desc *desc)
222{ 212{
223 struct uic *uic = get_irq_data(virq); 213 struct irq_chip *chip = irq_desc_get_chip(desc);
214 struct irq_data *idata = irq_desc_get_irq_data(desc);
215 struct uic *uic = irq_get_handler_data(virq);
224 u32 msr; 216 u32 msr;
225 int src; 217 int src;
226 int subvirq; 218 int subvirq;
227 219
228 raw_spin_lock(&desc->lock); 220 raw_spin_lock(&desc->lock);
229 if (desc->status & IRQ_LEVEL) 221 if (irqd_is_level_type(idata))
230 desc->chip->mask(virq); 222 chip->irq_mask(idata);
231 else 223 else
232 desc->chip->mask_ack(virq); 224 chip->irq_mask_ack(idata);
233 raw_spin_unlock(&desc->lock); 225 raw_spin_unlock(&desc->lock);
234 226
235 msr = mfdcr(uic->dcrbase + UIC_MSR); 227 msr = mfdcr(uic->dcrbase + UIC_MSR);
@@ -243,10 +235,10 @@ void uic_irq_cascade(unsigned int virq, struct irq_desc *desc)
243 235
244uic_irq_ret: 236uic_irq_ret:
245 raw_spin_lock(&desc->lock); 237 raw_spin_lock(&desc->lock);
246 if (desc->status & IRQ_LEVEL) 238 if (irqd_is_level_type(idata))
247 desc->chip->ack(virq); 239 chip->irq_ack(idata);
248 if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask) 240 if (!irqd_irq_disabled(idata) && chip->irq_unmask)
249 desc->chip->unmask(virq); 241 chip->irq_unmask(idata);
250 raw_spin_unlock(&desc->lock); 242 raw_spin_unlock(&desc->lock);
251} 243}
252 244
@@ -335,8 +327,8 @@ void __init uic_init_tree(void)
335 327
336 cascade_virq = irq_of_parse_and_map(np, 0); 328 cascade_virq = irq_of_parse_and_map(np, 0);
337 329
338 set_irq_data(cascade_virq, uic); 330 irq_set_handler_data(cascade_virq, uic);
339 set_irq_chained_handler(cascade_virq, uic_irq_cascade); 331 irq_set_chained_handler(cascade_virq, uic_irq_cascade);
340 332
341 /* FIXME: setup critical cascade?? */ 333 /* FIXME: setup critical cascade?? */
342 } 334 }
diff --git a/arch/powerpc/sysdev/xics/Kconfig b/arch/powerpc/sysdev/xics/Kconfig
new file mode 100644
index 000000000000..0031eda320c3
--- /dev/null
+++ b/arch/powerpc/sysdev/xics/Kconfig
@@ -0,0 +1,13 @@
1config PPC_XICS
2 def_bool n
3 select PPC_SMP_MUXED_IPI
4
5config PPC_ICP_NATIVE
6 def_bool n
7
8config PPC_ICP_HV
9 def_bool n
10
11config PPC_ICS_RTAS
12 def_bool n
13
diff --git a/arch/powerpc/sysdev/xics/Makefile b/arch/powerpc/sysdev/xics/Makefile
new file mode 100644
index 000000000000..b75a6059337f
--- /dev/null
+++ b/arch/powerpc/sysdev/xics/Makefile
@@ -0,0 +1,6 @@
1subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
2
3obj-y += xics-common.o
4obj-$(CONFIG_PPC_ICP_NATIVE) += icp-native.o
5obj-$(CONFIG_PPC_ICP_HV) += icp-hv.o
6obj-$(CONFIG_PPC_ICS_RTAS) += ics-rtas.o
diff --git a/arch/powerpc/sysdev/xics/icp-hv.c b/arch/powerpc/sysdev/xics/icp-hv.c
new file mode 100644
index 000000000000..9518d367a64f
--- /dev/null
+++ b/arch/powerpc/sysdev/xics/icp-hv.c
@@ -0,0 +1,164 @@
1/*
2 * Copyright 2011 IBM Corporation.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 */
10#include <linux/types.h>
11#include <linux/kernel.h>
12#include <linux/irq.h>
13#include <linux/smp.h>
14#include <linux/interrupt.h>
15#include <linux/init.h>
16#include <linux/cpu.h>
17#include <linux/of.h>
18
19#include <asm/smp.h>
20#include <asm/irq.h>
21#include <asm/errno.h>
22#include <asm/xics.h>
23#include <asm/io.h>
24#include <asm/hvcall.h>
25
26static inline unsigned int icp_hv_get_xirr(unsigned char cppr)
27{
28 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
29 long rc;
30
31 rc = plpar_hcall(H_XIRR, retbuf, cppr);
32 if (rc != H_SUCCESS)
33 panic(" bad return code xirr - rc = %lx\n", rc);
34 return (unsigned int)retbuf[0];
35}
36
37static inline void icp_hv_set_xirr(unsigned int value)
38{
39 long rc = plpar_hcall_norets(H_EOI, value);
40 if (rc != H_SUCCESS)
41 panic("bad return code EOI - rc = %ld, value=%x\n", rc, value);
42}
43
44static inline void icp_hv_set_cppr(u8 value)
45{
46 long rc = plpar_hcall_norets(H_CPPR, value);
47 if (rc != H_SUCCESS)
48 panic("bad return code cppr - rc = %lx\n", rc);
49}
50
51static inline void icp_hv_set_qirr(int n_cpu , u8 value)
52{
53 long rc = plpar_hcall_norets(H_IPI, get_hard_smp_processor_id(n_cpu),
54 value);
55 if (rc != H_SUCCESS)
56 panic("bad return code qirr - rc = %lx\n", rc);
57}
58
59static void icp_hv_eoi(struct irq_data *d)
60{
61 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
62
63 iosync();
64 icp_hv_set_xirr((xics_pop_cppr() << 24) | hw_irq);
65}
66
67static void icp_hv_teardown_cpu(void)
68{
69 int cpu = smp_processor_id();
70
71 /* Clear any pending IPI */
72 icp_hv_set_qirr(cpu, 0xff);
73}
74
75static void icp_hv_flush_ipi(void)
76{
77 /* We take the ipi irq but and never return so we
78 * need to EOI the IPI, but want to leave our priority 0
79 *
80 * should we check all the other interrupts too?
81 * should we be flagging idle loop instead?
82 * or creating some task to be scheduled?
83 */
84
85 icp_hv_set_xirr((0x00 << 24) | XICS_IPI);
86}
87
88static unsigned int icp_hv_get_irq(void)
89{
90 unsigned int xirr = icp_hv_get_xirr(xics_cppr_top());
91 unsigned int vec = xirr & 0x00ffffff;
92 unsigned int irq;
93
94 if (vec == XICS_IRQ_SPURIOUS)
95 return NO_IRQ;
96
97 irq = irq_radix_revmap_lookup(xics_host, vec);
98 if (likely(irq != NO_IRQ)) {
99 xics_push_cppr(vec);
100 return irq;
101 }
102
103 /* We don't have a linux mapping, so have rtas mask it. */
104 xics_mask_unknown_vec(vec);
105
106 /* We might learn about it later, so EOI it */
107 icp_hv_set_xirr(xirr);
108
109 return NO_IRQ;
110}
111
112static void icp_hv_set_cpu_priority(unsigned char cppr)
113{
114 xics_set_base_cppr(cppr);
115 icp_hv_set_cppr(cppr);
116 iosync();
117}
118
119#ifdef CONFIG_SMP
120
121static void icp_hv_cause_ipi(int cpu, unsigned long data)
122{
123 icp_hv_set_qirr(cpu, IPI_PRIORITY);
124}
125
126static irqreturn_t icp_hv_ipi_action(int irq, void *dev_id)
127{
128 int cpu = smp_processor_id();
129
130 icp_hv_set_qirr(cpu, 0xff);
131
132 return smp_ipi_demux();
133}
134
135#endif /* CONFIG_SMP */
136
137static const struct icp_ops icp_hv_ops = {
138 .get_irq = icp_hv_get_irq,
139 .eoi = icp_hv_eoi,
140 .set_priority = icp_hv_set_cpu_priority,
141 .teardown_cpu = icp_hv_teardown_cpu,
142 .flush_ipi = icp_hv_flush_ipi,
143#ifdef CONFIG_SMP
144 .ipi_action = icp_hv_ipi_action,
145 .cause_ipi = icp_hv_cause_ipi,
146#endif
147};
148
149int icp_hv_init(void)
150{
151 struct device_node *np;
152
153 np = of_find_compatible_node(NULL, NULL, "ibm,ppc-xicp");
154 if (!np)
155 np = of_find_node_by_type(NULL,
156 "PowerPC-External-Interrupt-Presentation");
157 if (!np)
158 return -ENODEV;
159
160 icp_ops = &icp_hv_ops;
161
162 return 0;
163}
164
diff --git a/arch/powerpc/sysdev/xics/icp-native.c b/arch/powerpc/sysdev/xics/icp-native.c
new file mode 100644
index 000000000000..1f15ad436140
--- /dev/null
+++ b/arch/powerpc/sysdev/xics/icp-native.c
@@ -0,0 +1,293 @@
1/*
2 * Copyright 2011 IBM Corporation.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 */
10
11#include <linux/types.h>
12#include <linux/kernel.h>
13#include <linux/irq.h>
14#include <linux/smp.h>
15#include <linux/interrupt.h>
16#include <linux/init.h>
17#include <linux/cpu.h>
18#include <linux/of.h>
19#include <linux/spinlock.h>
20
21#include <asm/prom.h>
22#include <asm/io.h>
23#include <asm/smp.h>
24#include <asm/irq.h>
25#include <asm/errno.h>
26#include <asm/xics.h>
27
28struct icp_ipl {
29 union {
30 u32 word;
31 u8 bytes[4];
32 } xirr_poll;
33 union {
34 u32 word;
35 u8 bytes[4];
36 } xirr;
37 u32 dummy;
38 union {
39 u32 word;
40 u8 bytes[4];
41 } qirr;
42 u32 link_a;
43 u32 link_b;
44 u32 link_c;
45};
46
47static struct icp_ipl __iomem *icp_native_regs[NR_CPUS];
48
49static inline unsigned int icp_native_get_xirr(void)
50{
51 int cpu = smp_processor_id();
52
53 return in_be32(&icp_native_regs[cpu]->xirr.word);
54}
55
56static inline void icp_native_set_xirr(unsigned int value)
57{
58 int cpu = smp_processor_id();
59
60 out_be32(&icp_native_regs[cpu]->xirr.word, value);
61}
62
63static inline void icp_native_set_cppr(u8 value)
64{
65 int cpu = smp_processor_id();
66
67 out_8(&icp_native_regs[cpu]->xirr.bytes[0], value);
68}
69
70static inline void icp_native_set_qirr(int n_cpu, u8 value)
71{
72 out_8(&icp_native_regs[n_cpu]->qirr.bytes[0], value);
73}
74
75static void icp_native_set_cpu_priority(unsigned char cppr)
76{
77 xics_set_base_cppr(cppr);
78 icp_native_set_cppr(cppr);
79 iosync();
80}
81
82static void icp_native_eoi(struct irq_data *d)
83{
84 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
85
86 iosync();
87 icp_native_set_xirr((xics_pop_cppr() << 24) | hw_irq);
88}
89
90static void icp_native_teardown_cpu(void)
91{
92 int cpu = smp_processor_id();
93
94 /* Clear any pending IPI */
95 icp_native_set_qirr(cpu, 0xff);
96}
97
98static void icp_native_flush_ipi(void)
99{
100 /* We take the ipi irq but and never return so we
101 * need to EOI the IPI, but want to leave our priority 0
102 *
103 * should we check all the other interrupts too?
104 * should we be flagging idle loop instead?
105 * or creating some task to be scheduled?
106 */
107
108 icp_native_set_xirr((0x00 << 24) | XICS_IPI);
109}
110
111static unsigned int icp_native_get_irq(void)
112{
113 unsigned int xirr = icp_native_get_xirr();
114 unsigned int vec = xirr & 0x00ffffff;
115 unsigned int irq;
116
117 if (vec == XICS_IRQ_SPURIOUS)
118 return NO_IRQ;
119
120 irq = irq_radix_revmap_lookup(xics_host, vec);
121 if (likely(irq != NO_IRQ)) {
122 xics_push_cppr(vec);
123 return irq;
124 }
125
126 /* We don't have a linux mapping, so have rtas mask it. */
127 xics_mask_unknown_vec(vec);
128
129 /* We might learn about it later, so EOI it */
130 icp_native_set_xirr(xirr);
131
132 return NO_IRQ;
133}
134
135#ifdef CONFIG_SMP
136
137static void icp_native_cause_ipi(int cpu, unsigned long data)
138{
139 icp_native_set_qirr(cpu, IPI_PRIORITY);
140}
141
142static irqreturn_t icp_native_ipi_action(int irq, void *dev_id)
143{
144 int cpu = smp_processor_id();
145
146 icp_native_set_qirr(cpu, 0xff);
147
148 return smp_ipi_demux();
149}
150
151#endif /* CONFIG_SMP */
152
153static int __init icp_native_map_one_cpu(int hw_id, unsigned long addr,
154 unsigned long size)
155{
156 char *rname;
157 int i, cpu = -1;
158
159 /* This may look gross but it's good enough for now, we don't quite
160 * have a hard -> linux processor id matching.
161 */
162 for_each_possible_cpu(i) {
163 if (!cpu_present(i))
164 continue;
165 if (hw_id == get_hard_smp_processor_id(i)) {
166 cpu = i;
167 break;
168 }
169 }
170
171 /* Fail, skip that CPU. Don't print, it's normal, some XICS come up
172 * with way more entries in there than you have CPUs
173 */
174 if (cpu == -1)
175 return 0;
176
177 rname = kasprintf(GFP_KERNEL, "CPU %d [0x%x] Interrupt Presentation",
178 cpu, hw_id);
179
180 if (!request_mem_region(addr, size, rname)) {
181 pr_warning("icp_native: Could not reserve ICP MMIO"
182 " for CPU %d, interrupt server #0x%x\n",
183 cpu, hw_id);
184 return -EBUSY;
185 }
186
187 icp_native_regs[cpu] = ioremap(addr, size);
188 if (!icp_native_regs[cpu]) {
189 pr_warning("icp_native: Failed ioremap for CPU %d, "
190 "interrupt server #0x%x, addr %#lx\n",
191 cpu, hw_id, addr);
192 release_mem_region(addr, size);
193 return -ENOMEM;
194 }
195 return 0;
196}
197
198static int __init icp_native_init_one_node(struct device_node *np,
199 unsigned int *indx)
200{
201 unsigned int ilen;
202 const u32 *ireg;
203 int i;
204 int reg_tuple_size;
205 int num_servers = 0;
206
207 /* This code does the theorically broken assumption that the interrupt
208 * server numbers are the same as the hard CPU numbers.
209 * This happens to be the case so far but we are playing with fire...
210 * should be fixed one of these days. -BenH.
211 */
212 ireg = of_get_property(np, "ibm,interrupt-server-ranges", &ilen);
213
214 /* Do that ever happen ? we'll know soon enough... but even good'old
215 * f80 does have that property ..
216 */
217 WARN_ON((ireg == NULL) || (ilen != 2*sizeof(u32)));
218
219 if (ireg) {
220 *indx = of_read_number(ireg, 1);
221 if (ilen >= 2*sizeof(u32))
222 num_servers = of_read_number(ireg + 1, 1);
223 }
224
225 ireg = of_get_property(np, "reg", &ilen);
226 if (!ireg) {
227 pr_err("icp_native: Can't find interrupt reg property");
228 return -1;
229 }
230
231 reg_tuple_size = (of_n_addr_cells(np) + of_n_size_cells(np)) * 4;
232 if (((ilen % reg_tuple_size) != 0)
233 || (num_servers && (num_servers != (ilen / reg_tuple_size)))) {
234 pr_err("icp_native: ICP reg len (%d) != num servers (%d)",
235 ilen / reg_tuple_size, num_servers);
236 return -1;
237 }
238
239 for (i = 0; i < (ilen / reg_tuple_size); i++) {
240 struct resource r;
241 int err;
242
243 err = of_address_to_resource(np, i, &r);
244 if (err) {
245 pr_err("icp_native: Could not translate ICP MMIO"
246 " for interrupt server 0x%x (%d)\n", *indx, err);
247 return -1;
248 }
249
250 if (icp_native_map_one_cpu(*indx, r.start, r.end - r.start))
251 return -1;
252
253 (*indx)++;
254 }
255 return 0;
256}
257
258static const struct icp_ops icp_native_ops = {
259 .get_irq = icp_native_get_irq,
260 .eoi = icp_native_eoi,
261 .set_priority = icp_native_set_cpu_priority,
262 .teardown_cpu = icp_native_teardown_cpu,
263 .flush_ipi = icp_native_flush_ipi,
264#ifdef CONFIG_SMP
265 .ipi_action = icp_native_ipi_action,
266 .cause_ipi = icp_native_cause_ipi,
267#endif
268};
269
270int icp_native_init(void)
271{
272 struct device_node *np;
273 u32 indx = 0;
274 int found = 0;
275
276 for_each_compatible_node(np, NULL, "ibm,ppc-xicp")
277 if (icp_native_init_one_node(np, &indx) == 0)
278 found = 1;
279 if (!found) {
280 for_each_node_by_type(np,
281 "PowerPC-External-Interrupt-Presentation") {
282 if (icp_native_init_one_node(np, &indx) == 0)
283 found = 1;
284 }
285 }
286
287 if (found == 0)
288 return -ENODEV;
289
290 icp_ops = &icp_native_ops;
291
292 return 0;
293}
diff --git a/arch/powerpc/sysdev/xics/ics-rtas.c b/arch/powerpc/sysdev/xics/ics-rtas.c
new file mode 100644
index 000000000000..c782f85cf7e4
--- /dev/null
+++ b/arch/powerpc/sysdev/xics/ics-rtas.c
@@ -0,0 +1,240 @@
1#include <linux/types.h>
2#include <linux/kernel.h>
3#include <linux/irq.h>
4#include <linux/smp.h>
5#include <linux/interrupt.h>
6#include <linux/init.h>
7#include <linux/cpu.h>
8#include <linux/of.h>
9#include <linux/spinlock.h>
10#include <linux/msi.h>
11
12#include <asm/prom.h>
13#include <asm/smp.h>
14#include <asm/machdep.h>
15#include <asm/irq.h>
16#include <asm/errno.h>
17#include <asm/xics.h>
18#include <asm/rtas.h>
19
20/* RTAS service tokens */
21static int ibm_get_xive;
22static int ibm_set_xive;
23static int ibm_int_on;
24static int ibm_int_off;
25
26static int ics_rtas_map(struct ics *ics, unsigned int virq);
27static void ics_rtas_mask_unknown(struct ics *ics, unsigned long vec);
28static long ics_rtas_get_server(struct ics *ics, unsigned long vec);
29static int ics_rtas_host_match(struct ics *ics, struct device_node *node);
30
31/* Only one global & state struct ics */
32static struct ics ics_rtas = {
33 .map = ics_rtas_map,
34 .mask_unknown = ics_rtas_mask_unknown,
35 .get_server = ics_rtas_get_server,
36 .host_match = ics_rtas_host_match,
37};
38
39static void ics_rtas_unmask_irq(struct irq_data *d)
40{
41 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
42 int call_status;
43 int server;
44
45 pr_devel("xics: unmask virq %d [hw 0x%x]\n", d->irq, hw_irq);
46
47 if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
48 return;
49
50 server = xics_get_irq_server(d->irq, d->affinity, 0);
51
52 call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hw_irq, server,
53 DEFAULT_PRIORITY);
54 if (call_status != 0) {
55 printk(KERN_ERR
56 "%s: ibm_set_xive irq %u server %x returned %d\n",
57 __func__, hw_irq, server, call_status);
58 return;
59 }
60
61 /* Now unmask the interrupt (often a no-op) */
62 call_status = rtas_call(ibm_int_on, 1, 1, NULL, hw_irq);
63 if (call_status != 0) {
64 printk(KERN_ERR "%s: ibm_int_on irq=%u returned %d\n",
65 __func__, hw_irq, call_status);
66 return;
67 }
68}
69
70static unsigned int ics_rtas_startup(struct irq_data *d)
71{
72#ifdef CONFIG_PCI_MSI
73 /*
74 * The generic MSI code returns with the interrupt disabled on the
75 * card, using the MSI mask bits. Firmware doesn't appear to unmask
76 * at that level, so we do it here by hand.
77 */
78 if (d->msi_desc)
79 unmask_msi_irq(d);
80#endif
81 /* unmask it */
82 ics_rtas_unmask_irq(d);
83 return 0;
84}
85
86static void ics_rtas_mask_real_irq(unsigned int hw_irq)
87{
88 int call_status;
89
90 if (hw_irq == XICS_IPI)
91 return;
92
93 call_status = rtas_call(ibm_int_off, 1, 1, NULL, hw_irq);
94 if (call_status != 0) {
95 printk(KERN_ERR "%s: ibm_int_off irq=%u returned %d\n",
96 __func__, hw_irq, call_status);
97 return;
98 }
99
100 /* Have to set XIVE to 0xff to be able to remove a slot */
101 call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hw_irq,
102 xics_default_server, 0xff);
103 if (call_status != 0) {
104 printk(KERN_ERR "%s: ibm_set_xive(0xff) irq=%u returned %d\n",
105 __func__, hw_irq, call_status);
106 return;
107 }
108}
109
110static void ics_rtas_mask_irq(struct irq_data *d)
111{
112 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
113
114 pr_devel("xics: mask virq %d [hw 0x%x]\n", d->irq, hw_irq);
115
116 if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
117 return;
118 ics_rtas_mask_real_irq(hw_irq);
119}
120
121static int ics_rtas_set_affinity(struct irq_data *d,
122 const struct cpumask *cpumask,
123 bool force)
124{
125 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
126 int status;
127 int xics_status[2];
128 int irq_server;
129
130 if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
131 return -1;
132
133 status = rtas_call(ibm_get_xive, 1, 3, xics_status, hw_irq);
134
135 if (status) {
136 printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n",
137 __func__, hw_irq, status);
138 return -1;
139 }
140
141 irq_server = xics_get_irq_server(d->irq, cpumask, 1);
142 if (irq_server == -1) {
143 char cpulist[128];
144 cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask);
145 printk(KERN_WARNING
146 "%s: No online cpus in the mask %s for irq %d\n",
147 __func__, cpulist, d->irq);
148 return -1;
149 }
150
151 status = rtas_call(ibm_set_xive, 3, 1, NULL,
152 hw_irq, irq_server, xics_status[1]);
153
154 if (status) {
155 printk(KERN_ERR "%s: ibm,set-xive irq=%u returns %d\n",
156 __func__, hw_irq, status);
157 return -1;
158 }
159
160 return IRQ_SET_MASK_OK;
161}
162
163static struct irq_chip ics_rtas_irq_chip = {
164 .name = "XICS",
165 .irq_startup = ics_rtas_startup,
166 .irq_mask = ics_rtas_mask_irq,
167 .irq_unmask = ics_rtas_unmask_irq,
168 .irq_eoi = NULL, /* Patched at init time */
169 .irq_set_affinity = ics_rtas_set_affinity
170};
171
172static int ics_rtas_map(struct ics *ics, unsigned int virq)
173{
174 unsigned int hw_irq = (unsigned int)virq_to_hw(virq);
175 int status[2];
176 int rc;
177
178 if (WARN_ON(hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS))
179 return -EINVAL;
180
181 /* Check if RTAS knows about this interrupt */
182 rc = rtas_call(ibm_get_xive, 1, 3, status, hw_irq);
183 if (rc)
184 return -ENXIO;
185
186 irq_set_chip_and_handler(virq, &ics_rtas_irq_chip, handle_fasteoi_irq);
187 irq_set_chip_data(virq, &ics_rtas);
188
189 return 0;
190}
191
192static void ics_rtas_mask_unknown(struct ics *ics, unsigned long vec)
193{
194 ics_rtas_mask_real_irq(vec);
195}
196
197static long ics_rtas_get_server(struct ics *ics, unsigned long vec)
198{
199 int rc, status[2];
200
201 rc = rtas_call(ibm_get_xive, 1, 3, status, vec);
202 if (rc)
203 return -1;
204 return status[0];
205}
206
207static int ics_rtas_host_match(struct ics *ics, struct device_node *node)
208{
209 /* IBM machines have interrupt parents of various funky types for things
210 * like vdevices, events, etc... The trick we use here is to match
211 * everything here except the legacy 8259 which is compatible "chrp,iic"
212 */
213 return !of_device_is_compatible(node, "chrp,iic");
214}
215
216int ics_rtas_init(void)
217{
218 ibm_get_xive = rtas_token("ibm,get-xive");
219 ibm_set_xive = rtas_token("ibm,set-xive");
220 ibm_int_on = rtas_token("ibm,int-on");
221 ibm_int_off = rtas_token("ibm,int-off");
222
223 /* We enable the RTAS "ICS" if RTAS is present with the
224 * appropriate tokens
225 */
226 if (ibm_get_xive == RTAS_UNKNOWN_SERVICE ||
227 ibm_set_xive == RTAS_UNKNOWN_SERVICE)
228 return -ENODEV;
229
230 /* We need to patch our irq chip's EOI to point to the
231 * right ICP
232 */
233 ics_rtas_irq_chip.irq_eoi = icp_ops->eoi;
234
235 /* Register ourselves */
236 xics_register_ics(&ics_rtas);
237
238 return 0;
239}
240
diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c
new file mode 100644
index 000000000000..445c5a01b766
--- /dev/null
+++ b/arch/powerpc/sysdev/xics/xics-common.c
@@ -0,0 +1,443 @@
1/*
2 * Copyright 2011 IBM Corporation.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 */
10#include <linux/types.h>
11#include <linux/threads.h>
12#include <linux/kernel.h>
13#include <linux/irq.h>
14#include <linux/debugfs.h>
15#include <linux/smp.h>
16#include <linux/interrupt.h>
17#include <linux/seq_file.h>
18#include <linux/init.h>
19#include <linux/cpu.h>
20#include <linux/of.h>
21#include <linux/slab.h>
22#include <linux/spinlock.h>
23
24#include <asm/prom.h>
25#include <asm/io.h>
26#include <asm/smp.h>
27#include <asm/machdep.h>
28#include <asm/irq.h>
29#include <asm/errno.h>
30#include <asm/rtas.h>
31#include <asm/xics.h>
32#include <asm/firmware.h>
33
34/* Globals common to all ICP/ICS implementations */
35const struct icp_ops *icp_ops;
36
37unsigned int xics_default_server = 0xff;
38unsigned int xics_default_distrib_server = 0;
39unsigned int xics_interrupt_server_size = 8;
40
41DEFINE_PER_CPU(struct xics_cppr, xics_cppr);
42
43struct irq_host *xics_host;
44
45static LIST_HEAD(ics_list);
46
47void xics_update_irq_servers(void)
48{
49 int i, j;
50 struct device_node *np;
51 u32 ilen;
52 const u32 *ireg;
53 u32 hcpuid;
54
55 /* Find the server numbers for the boot cpu. */
56 np = of_get_cpu_node(boot_cpuid, NULL);
57 BUG_ON(!np);
58
59 hcpuid = get_hard_smp_processor_id(boot_cpuid);
60 xics_default_server = xics_default_distrib_server = hcpuid;
61
62 pr_devel("xics: xics_default_server = 0x%x\n", xics_default_server);
63
64 ireg = of_get_property(np, "ibm,ppc-interrupt-gserver#s", &ilen);
65 if (!ireg) {
66 of_node_put(np);
67 return;
68 }
69
70 i = ilen / sizeof(int);
71
72 /* Global interrupt distribution server is specified in the last
73 * entry of "ibm,ppc-interrupt-gserver#s" property. Get the last
74 * entry fom this property for current boot cpu id and use it as
75 * default distribution server
76 */
77 for (j = 0; j < i; j += 2) {
78 if (ireg[j] == hcpuid) {
79 xics_default_distrib_server = ireg[j+1];
80 break;
81 }
82 }
83 pr_devel("xics: xics_default_distrib_server = 0x%x\n",
84 xics_default_distrib_server);
85 of_node_put(np);
86}
87
88/* GIQ stuff, currently only supported on RTAS setups, will have
89 * to be sorted properly for bare metal
90 */
91void xics_set_cpu_giq(unsigned int gserver, unsigned int join)
92{
93#ifdef CONFIG_PPC_RTAS
94 int index;
95 int status;
96
97 if (!rtas_indicator_present(GLOBAL_INTERRUPT_QUEUE, NULL))
98 return;
99
100 index = (1UL << xics_interrupt_server_size) - 1 - gserver;
101
102 status = rtas_set_indicator_fast(GLOBAL_INTERRUPT_QUEUE, index, join);
103
104 WARN(status < 0, "set-indicator(%d, %d, %u) returned %d\n",
105 GLOBAL_INTERRUPT_QUEUE, index, join, status);
106#endif
107}
108
109void xics_setup_cpu(void)
110{
111 icp_ops->set_priority(LOWEST_PRIORITY);
112
113 xics_set_cpu_giq(xics_default_distrib_server, 1);
114}
115
116void xics_mask_unknown_vec(unsigned int vec)
117{
118 struct ics *ics;
119
120 pr_err("Interrupt 0x%x (real) is invalid, disabling it.\n", vec);
121
122 list_for_each_entry(ics, &ics_list, link)
123 ics->mask_unknown(ics, vec);
124}
125
126
127#ifdef CONFIG_SMP
128
129static void xics_request_ipi(void)
130{
131 unsigned int ipi;
132
133 ipi = irq_create_mapping(xics_host, XICS_IPI);
134 BUG_ON(ipi == NO_IRQ);
135
136 /*
137 * IPIs are marked IRQF_DISABLED as they must run with irqs
138 * disabled, and PERCPU. The handler was set in map.
139 */
140 BUG_ON(request_irq(ipi, icp_ops->ipi_action,
141 IRQF_DISABLED|IRQF_PERCPU, "IPI", NULL));
142}
143
144int __init xics_smp_probe(void)
145{
146 /* Setup cause_ipi callback based on which ICP is used */
147 smp_ops->cause_ipi = icp_ops->cause_ipi;
148
149 /* Register all the IPIs */
150 xics_request_ipi();
151
152 return cpumask_weight(cpu_possible_mask);
153}
154
155#endif /* CONFIG_SMP */
156
157void xics_teardown_cpu(void)
158{
159 struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
160
161 /*
162 * we have to reset the cppr index to 0 because we're
163 * not going to return from the IPI
164 */
165 os_cppr->index = 0;
166 icp_ops->set_priority(0);
167 icp_ops->teardown_cpu();
168}
169
170void xics_kexec_teardown_cpu(int secondary)
171{
172 xics_teardown_cpu();
173
174 icp_ops->flush_ipi();
175
176 /*
177 * Some machines need to have at least one cpu in the GIQ,
178 * so leave the master cpu in the group.
179 */
180 if (secondary)
181 xics_set_cpu_giq(xics_default_distrib_server, 0);
182}
183
184
185#ifdef CONFIG_HOTPLUG_CPU
186
187/* Interrupts are disabled. */
188void xics_migrate_irqs_away(void)
189{
190 int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id();
191 unsigned int irq, virq;
192
193 /* If we used to be the default server, move to the new "boot_cpuid" */
194 if (hw_cpu == xics_default_server)
195 xics_update_irq_servers();
196
197 /* Reject any interrupt that was queued to us... */
198 icp_ops->set_priority(0);
199
200 /* Remove ourselves from the global interrupt queue */
201 xics_set_cpu_giq(xics_default_distrib_server, 0);
202
203 /* Allow IPIs again... */
204 icp_ops->set_priority(DEFAULT_PRIORITY);
205
206 for_each_irq(virq) {
207 struct irq_desc *desc;
208 struct irq_chip *chip;
209 long server;
210 unsigned long flags;
211 struct ics *ics;
212
213 /* We can't set affinity on ISA interrupts */
214 if (virq < NUM_ISA_INTERRUPTS)
215 continue;
216 if (!virq_is_host(virq, xics_host))
217 continue;
218 irq = (unsigned int)virq_to_hw(virq);
219 /* We need to get IPIs still. */
220 if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
221 continue;
222 desc = irq_to_desc(virq);
223 /* We only need to migrate enabled IRQS */
224 if (!desc || !desc->action)
225 continue;
226 chip = irq_desc_get_chip(desc);
227 if (!chip || !chip->irq_set_affinity)
228 continue;
229
230 raw_spin_lock_irqsave(&desc->lock, flags);
231
232 /* Locate interrupt server */
233 server = -1;
234 ics = irq_get_chip_data(virq);
235 if (ics)
236 server = ics->get_server(ics, irq);
237 if (server < 0) {
238 printk(KERN_ERR "%s: Can't find server for irq %d\n",
239 __func__, irq);
240 goto unlock;
241 }
242
243 /* We only support delivery to all cpus or to one cpu.
244 * The irq has to be migrated only in the single cpu
245 * case.
246 */
247 if (server != hw_cpu)
248 goto unlock;
249
250 /* This is expected during cpu offline. */
251 if (cpu_online(cpu))
252 pr_warning("IRQ %u affinity broken off cpu %u\n",
253 virq, cpu);
254
255 /* Reset affinity to all cpus */
256 raw_spin_unlock_irqrestore(&desc->lock, flags);
257 irq_set_affinity(virq, cpu_all_mask);
258 continue;
259unlock:
260 raw_spin_unlock_irqrestore(&desc->lock, flags);
261 }
262}
263#endif /* CONFIG_HOTPLUG_CPU */
264
265#ifdef CONFIG_SMP
266/*
267 * For the moment we only implement delivery to all cpus or one cpu.
268 *
269 * If the requested affinity is cpu_all_mask, we set global affinity.
270 * If not we set it to the first cpu in the mask, even if multiple cpus
271 * are set. This is so things like irqbalance (which set core and package
272 * wide affinities) do the right thing.
273 *
274 * We need to fix this to implement support for the links
275 */
276int xics_get_irq_server(unsigned int virq, const struct cpumask *cpumask,
277 unsigned int strict_check)
278{
279
280 if (!distribute_irqs)
281 return xics_default_server;
282
283 if (!cpumask_subset(cpu_possible_mask, cpumask)) {
284 int server = cpumask_first_and(cpu_online_mask, cpumask);
285
286 if (server < nr_cpu_ids)
287 return get_hard_smp_processor_id(server);
288
289 if (strict_check)
290 return -1;
291 }
292
293 /*
294 * Workaround issue with some versions of JS20 firmware that
295 * deliver interrupts to cpus which haven't been started. This
296 * happens when using the maxcpus= boot option.
297 */
298 if (cpumask_equal(cpu_online_mask, cpu_present_mask))
299 return xics_default_distrib_server;
300
301 return xics_default_server;
302}
303#endif /* CONFIG_SMP */
304
305static int xics_host_match(struct irq_host *h, struct device_node *node)
306{
307 struct ics *ics;
308
309 list_for_each_entry(ics, &ics_list, link)
310 if (ics->host_match(ics, node))
311 return 1;
312
313 return 0;
314}
315
316/* Dummies */
317static void xics_ipi_unmask(struct irq_data *d) { }
318static void xics_ipi_mask(struct irq_data *d) { }
319
320static struct irq_chip xics_ipi_chip = {
321 .name = "XICS",
322 .irq_eoi = NULL, /* Patched at init time */
323 .irq_mask = xics_ipi_mask,
324 .irq_unmask = xics_ipi_unmask,
325};
326
327static int xics_host_map(struct irq_host *h, unsigned int virq,
328 irq_hw_number_t hw)
329{
330 struct ics *ics;
331
332 pr_devel("xics: map virq %d, hwirq 0x%lx\n", virq, hw);
333
334 /* Insert the interrupt mapping into the radix tree for fast lookup */
335 irq_radix_revmap_insert(xics_host, virq, hw);
336
337 /* They aren't all level sensitive but we just don't really know */
338 irq_set_status_flags(virq, IRQ_LEVEL);
339
340 /* Don't call into ICS for IPIs */
341 if (hw == XICS_IPI) {
342 irq_set_chip_and_handler(virq, &xics_ipi_chip,
343 handle_percpu_irq);
344 return 0;
345 }
346
347 /* Let the ICS setup the chip data */
348 list_for_each_entry(ics, &ics_list, link)
349 if (ics->map(ics, virq) == 0)
350 return 0;
351
352 return -EINVAL;
353}
354
355static int xics_host_xlate(struct irq_host *h, struct device_node *ct,
356 const u32 *intspec, unsigned int intsize,
357 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
358
359{
360 /* Current xics implementation translates everything
361 * to level. It is not technically right for MSIs but this
362 * is irrelevant at this point. We might get smarter in the future
363 */
364 *out_hwirq = intspec[0];
365 *out_flags = IRQ_TYPE_LEVEL_LOW;
366
367 return 0;
368}
369
370static struct irq_host_ops xics_host_ops = {
371 .match = xics_host_match,
372 .map = xics_host_map,
373 .xlate = xics_host_xlate,
374};
375
376static void __init xics_init_host(void)
377{
378 xics_host = irq_alloc_host(NULL, IRQ_HOST_MAP_TREE, 0, &xics_host_ops,
379 XICS_IRQ_SPURIOUS);
380 BUG_ON(xics_host == NULL);
381 irq_set_default_host(xics_host);
382}
383
384void __init xics_register_ics(struct ics *ics)
385{
386 list_add(&ics->link, &ics_list);
387}
388
389static void __init xics_get_server_size(void)
390{
391 struct device_node *np;
392 const u32 *isize;
393
394 /* We fetch the interrupt server size from the first ICS node
395 * we find if any
396 */
397 np = of_find_compatible_node(NULL, NULL, "ibm,ppc-xics");
398 if (!np)
399 return;
400 isize = of_get_property(np, "ibm,interrupt-server#-size", NULL);
401 if (!isize)
402 return;
403 xics_interrupt_server_size = *isize;
404 of_node_put(np);
405}
406
407void __init xics_init(void)
408{
409 int rc = -1;
410
411 /* Fist locate ICP */
412#ifdef CONFIG_PPC_ICP_HV
413 if (firmware_has_feature(FW_FEATURE_LPAR))
414 rc = icp_hv_init();
415#endif
416#ifdef CONFIG_PPC_ICP_NATIVE
417 if (rc < 0)
418 rc = icp_native_init();
419#endif
420 if (rc < 0) {
421 pr_warning("XICS: Cannot find a Presentation Controller !\n");
422 return;
423 }
424
425 /* Copy get_irq callback over to ppc_md */
426 ppc_md.get_irq = icp_ops->get_irq;
427
428 /* Patch up IPI chip EOI */
429 xics_ipi_chip.irq_eoi = icp_ops->eoi;
430
431 /* Now locate ICS */
432#ifdef CONFIG_PPC_ICS_RTAS
433 rc = ics_rtas_init();
434#endif
435 if (rc < 0)
436 pr_warning("XICS: Cannot find a Source Controller !\n");
437
438 /* Initialize common bits */
439 xics_get_server_size();
440 xics_update_irq_servers();
441 xics_init_host();
442 xics_setup_cpu();
443}
diff --git a/arch/powerpc/sysdev/xilinx_intc.c b/arch/powerpc/sysdev/xilinx_intc.c
index 1e0ccfaf403e..6183799754af 100644
--- a/arch/powerpc/sysdev/xilinx_intc.c
+++ b/arch/powerpc/sysdev/xilinx_intc.c
@@ -69,32 +69,26 @@ static unsigned char xilinx_intc_map_senses[] = {
69 * 69 *
70 * IRQ Chip common (across level and edge) operations 70 * IRQ Chip common (across level and edge) operations
71 */ 71 */
72static void xilinx_intc_mask(unsigned int virq) 72static void xilinx_intc_mask(struct irq_data *d)
73{ 73{
74 int irq = virq_to_hw(virq); 74 int irq = irqd_to_hwirq(d);
75 void * regs = get_irq_chip_data(virq); 75 void * regs = irq_data_get_irq_chip_data(d);
76 pr_debug("mask: %d\n", irq); 76 pr_debug("mask: %d\n", irq);
77 out_be32(regs + XINTC_CIE, 1 << irq); 77 out_be32(regs + XINTC_CIE, 1 << irq);
78} 78}
79 79
80static int xilinx_intc_set_type(unsigned int virq, unsigned int flow_type) 80static int xilinx_intc_set_type(struct irq_data *d, unsigned int flow_type)
81{ 81{
82 struct irq_desc *desc = irq_to_desc(virq);
83
84 desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL);
85 desc->status |= flow_type & IRQ_TYPE_SENSE_MASK;
86 if (flow_type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW))
87 desc->status |= IRQ_LEVEL;
88 return 0; 82 return 0;
89} 83}
90 84
91/* 85/*
92 * IRQ Chip level operations 86 * IRQ Chip level operations
93 */ 87 */
94static void xilinx_intc_level_unmask(unsigned int virq) 88static void xilinx_intc_level_unmask(struct irq_data *d)
95{ 89{
96 int irq = virq_to_hw(virq); 90 int irq = irqd_to_hwirq(d);
97 void * regs = get_irq_chip_data(virq); 91 void * regs = irq_data_get_irq_chip_data(d);
98 pr_debug("unmask: %d\n", irq); 92 pr_debug("unmask: %d\n", irq);
99 out_be32(regs + XINTC_SIE, 1 << irq); 93 out_be32(regs + XINTC_SIE, 1 << irq);
100 94
@@ -107,37 +101,37 @@ static void xilinx_intc_level_unmask(unsigned int virq)
107 101
108static struct irq_chip xilinx_intc_level_irqchip = { 102static struct irq_chip xilinx_intc_level_irqchip = {
109 .name = "Xilinx Level INTC", 103 .name = "Xilinx Level INTC",
110 .mask = xilinx_intc_mask, 104 .irq_mask = xilinx_intc_mask,
111 .mask_ack = xilinx_intc_mask, 105 .irq_mask_ack = xilinx_intc_mask,
112 .unmask = xilinx_intc_level_unmask, 106 .irq_unmask = xilinx_intc_level_unmask,
113 .set_type = xilinx_intc_set_type, 107 .irq_set_type = xilinx_intc_set_type,
114}; 108};
115 109
116/* 110/*
117 * IRQ Chip edge operations 111 * IRQ Chip edge operations
118 */ 112 */
119static void xilinx_intc_edge_unmask(unsigned int virq) 113static void xilinx_intc_edge_unmask(struct irq_data *d)
120{ 114{
121 int irq = virq_to_hw(virq); 115 int irq = irqd_to_hwirq(d);
122 void *regs = get_irq_chip_data(virq); 116 void *regs = irq_data_get_irq_chip_data(d);
123 pr_debug("unmask: %d\n", irq); 117 pr_debug("unmask: %d\n", irq);
124 out_be32(regs + XINTC_SIE, 1 << irq); 118 out_be32(regs + XINTC_SIE, 1 << irq);
125} 119}
126 120
127static void xilinx_intc_edge_ack(unsigned int virq) 121static void xilinx_intc_edge_ack(struct irq_data *d)
128{ 122{
129 int irq = virq_to_hw(virq); 123 int irq = irqd_to_hwirq(d);
130 void * regs = get_irq_chip_data(virq); 124 void * regs = irq_data_get_irq_chip_data(d);
131 pr_debug("ack: %d\n", irq); 125 pr_debug("ack: %d\n", irq);
132 out_be32(regs + XINTC_IAR, 1 << irq); 126 out_be32(regs + XINTC_IAR, 1 << irq);
133} 127}
134 128
135static struct irq_chip xilinx_intc_edge_irqchip = { 129static struct irq_chip xilinx_intc_edge_irqchip = {
136 .name = "Xilinx Edge INTC", 130 .name = "Xilinx Edge INTC",
137 .mask = xilinx_intc_mask, 131 .irq_mask = xilinx_intc_mask,
138 .unmask = xilinx_intc_edge_unmask, 132 .irq_unmask = xilinx_intc_edge_unmask,
139 .ack = xilinx_intc_edge_ack, 133 .irq_ack = xilinx_intc_edge_ack,
140 .set_type = xilinx_intc_set_type, 134 .irq_set_type = xilinx_intc_set_type,
141}; 135};
142 136
143/* 137/*
@@ -170,15 +164,15 @@ static int xilinx_intc_xlate(struct irq_host *h, struct device_node *ct,
170static int xilinx_intc_map(struct irq_host *h, unsigned int virq, 164static int xilinx_intc_map(struct irq_host *h, unsigned int virq,
171 irq_hw_number_t irq) 165 irq_hw_number_t irq)
172{ 166{
173 set_irq_chip_data(virq, h->host_data); 167 irq_set_chip_data(virq, h->host_data);
174 168
175 if (xilinx_intc_typetable[irq] == IRQ_TYPE_LEVEL_HIGH || 169 if (xilinx_intc_typetable[irq] == IRQ_TYPE_LEVEL_HIGH ||
176 xilinx_intc_typetable[irq] == IRQ_TYPE_LEVEL_LOW) { 170 xilinx_intc_typetable[irq] == IRQ_TYPE_LEVEL_LOW) {
177 set_irq_chip_and_handler(virq, &xilinx_intc_level_irqchip, 171 irq_set_chip_and_handler(virq, &xilinx_intc_level_irqchip,
178 handle_level_irq); 172 handle_level_irq);
179 } else { 173 } else {
180 set_irq_chip_and_handler(virq, &xilinx_intc_edge_irqchip, 174 irq_set_chip_and_handler(virq, &xilinx_intc_edge_irqchip,
181 handle_edge_irq); 175 handle_edge_irq);
182 } 176 }
183 return 0; 177 return 0;
184} 178}
@@ -229,12 +223,14 @@ int xilinx_intc_get_irq(void)
229 */ 223 */
230static void xilinx_i8259_cascade(unsigned int irq, struct irq_desc *desc) 224static void xilinx_i8259_cascade(unsigned int irq, struct irq_desc *desc)
231{ 225{
226 struct irq_chip *chip = irq_desc_get_chip(desc);
232 unsigned int cascade_irq = i8259_irq(); 227 unsigned int cascade_irq = i8259_irq();
228
233 if (cascade_irq) 229 if (cascade_irq)
234 generic_handle_irq(cascade_irq); 230 generic_handle_irq(cascade_irq);
235 231
236 /* Let xilinx_intc end the interrupt */ 232 /* Let xilinx_intc end the interrupt */
237 desc->chip->unmask(irq); 233 chip->irq_unmask(&desc->irq_data);
238} 234}
239 235
240static void __init xilinx_i8259_setup_cascade(void) 236static void __init xilinx_i8259_setup_cascade(void)
@@ -254,7 +250,7 @@ static void __init xilinx_i8259_setup_cascade(void)
254 } 250 }
255 251
256 i8259_init(cascade_node, 0); 252 i8259_init(cascade_node, 0);
257 set_irq_chained_handler(cascade_irq, xilinx_i8259_cascade); 253 irq_set_chained_handler(cascade_irq, xilinx_i8259_cascade);
258 254
259 /* Program irq 7 (usb/audio), 14/15 (ide) to level sensitive */ 255 /* Program irq 7 (usb/audio), 14/15 (ide) to level sensitive */
260 /* This looks like a dirty hack to me --gcl */ 256 /* This looks like a dirty hack to me --gcl */