aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/kernel')
-rw-r--r--arch/sparc/kernel/Makefile12
-rw-r--r--arch/sparc/kernel/apc.c12
-rw-r--r--arch/sparc/kernel/auxio_32.c8
-rw-r--r--arch/sparc/kernel/auxio_64.c9
-rw-r--r--arch/sparc/kernel/btext.c4
-rw-r--r--arch/sparc/kernel/central.c18
-rw-r--r--arch/sparc/kernel/chmc.c21
-rw-r--r--arch/sparc/kernel/cpu.c143
-rw-r--r--arch/sparc/kernel/cpumap.c4
-rw-r--r--arch/sparc/kernel/devices.c27
-rw-r--r--arch/sparc/kernel/ds.c16
-rw-r--r--arch/sparc/kernel/entry.S53
-rw-r--r--arch/sparc/kernel/entry.h4
-rw-r--r--arch/sparc/kernel/head_32.S54
-rw-r--r--arch/sparc/kernel/head_64.S2
-rw-r--r--arch/sparc/kernel/init_task.c2
-rw-r--r--arch/sparc/kernel/iommu.c8
-rw-r--r--arch/sparc/kernel/ioport.c116
-rw-r--r--arch/sparc/kernel/irq.h93
-rw-r--r--arch/sparc/kernel/irq_32.c610
-rw-r--r--arch/sparc/kernel/irq_64.c389
-rw-r--r--arch/sparc/kernel/jump_label.c47
-rw-r--r--arch/sparc/kernel/kernel.h54
-rw-r--r--arch/sparc/kernel/ldc.c28
-rw-r--r--arch/sparc/kernel/leon_kernel.c429
-rw-r--r--arch/sparc/kernel/leon_pci.c253
-rw-r--r--arch/sparc/kernel/leon_pci_grpci2.c897
-rw-r--r--arch/sparc/kernel/leon_pmc.c82
-rw-r--r--arch/sparc/kernel/leon_smp.c189
-rw-r--r--arch/sparc/kernel/mdesc.c5
-rw-r--r--arch/sparc/kernel/module.c22
-rw-r--r--arch/sparc/kernel/nmi.c2
-rw-r--r--arch/sparc/kernel/of_device_32.c59
-rw-r--r--arch/sparc/kernel/of_device_64.c5
-rw-r--r--arch/sparc/kernel/of_device_common.c27
-rw-r--r--arch/sparc/kernel/pci.c11
-rw-r--r--arch/sparc/kernel/pci_common.c15
-rw-r--r--arch/sparc/kernel/pci_fire.c19
-rw-r--r--arch/sparc/kernel/pci_impl.h4
-rw-r--r--arch/sparc/kernel/pci_msi.c58
-rw-r--r--arch/sparc/kernel/pci_psycho.c9
-rw-r--r--arch/sparc/kernel/pci_sabre.c14
-rw-r--r--arch/sparc/kernel/pci_schizo.c27
-rw-r--r--arch/sparc/kernel/pci_sun4v.c18
-rw-r--r--arch/sparc/kernel/pcic.c91
-rw-r--r--arch/sparc/kernel/pcr.c12
-rw-r--r--arch/sparc/kernel/perf_event.c248
-rw-r--r--arch/sparc/kernel/pmc.c9
-rw-r--r--arch/sparc/kernel/power.c8
-rw-r--r--arch/sparc/kernel/process_32.c12
-rw-r--r--arch/sparc/kernel/prom.h6
-rw-r--r--arch/sparc/kernel/prom_32.c28
-rw-r--r--arch/sparc/kernel/prom_common.c202
-rw-r--r--arch/sparc/kernel/prom_irqtrans.c18
-rw-r--r--arch/sparc/kernel/psycho_common.c2
-rw-r--r--arch/sparc/kernel/ptrace_32.c57
-rw-r--r--arch/sparc/kernel/ptrace_64.c18
-rw-r--r--arch/sparc/kernel/rtrap_32.S6
-rw-r--r--arch/sparc/kernel/rtrap_64.S36
-rw-r--r--arch/sparc/kernel/sbus.c4
-rw-r--r--arch/sparc/kernel/setup_32.c111
-rw-r--r--arch/sparc/kernel/setup_64.c82
-rw-r--r--arch/sparc/kernel/smp_32.c121
-rw-r--r--arch/sparc/kernel/smp_64.c72
-rw-r--r--arch/sparc/kernel/starfire.c4
-rw-r--r--arch/sparc/kernel/sun4c_irq.c225
-rw-r--r--arch/sparc/kernel/sun4d_irq.c664
-rw-r--r--arch/sparc/kernel/sun4d_smp.c267
-rw-r--r--arch/sparc/kernel/sun4m_irq.c359
-rw-r--r--arch/sparc/kernel/sun4m_smp.c140
-rw-r--r--arch/sparc/kernel/sys_sparc32.c3
-rw-r--r--arch/sparc/kernel/sys_sparc_32.c1
-rw-r--r--arch/sparc/kernel/sys_sparc_64.c27
-rw-r--r--arch/sparc/kernel/sysfs.c3
-rw-r--r--arch/sparc/kernel/systbls_32.S4
-rw-r--r--arch/sparc/kernel/systbls_64.S6
-rw-r--r--arch/sparc/kernel/tadpole.c2
-rw-r--r--arch/sparc/kernel/tick14.c39
-rw-r--r--arch/sparc/kernel/time_32.c33
-rw-r--r--arch/sparc/kernel/time_64.c30
-rw-r--r--arch/sparc/kernel/traps_64.c7
-rw-r--r--arch/sparc/kernel/una_asm_32.S4
-rw-r--r--arch/sparc/kernel/una_asm_64.S2
-rw-r--r--arch/sparc/kernel/unaligned_32.c1
-rw-r--r--arch/sparc/kernel/unaligned_64.c6
-rw-r--r--arch/sparc/kernel/us2e_cpufreq.c10
-rw-r--r--arch/sparc/kernel/us3_cpufreq.c8
-rw-r--r--arch/sparc/kernel/viohs.c2
-rw-r--r--arch/sparc/kernel/visemul.c14
-rw-r--r--arch/sparc/kernel/vmlinux.lds.S2
-rw-r--r--arch/sparc/kernel/windows.c1
91 files changed, 4018 insertions, 2868 deletions
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index 0c2dc1f24a9a..b90b4a1d070a 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -42,7 +42,6 @@ obj-$(CONFIG_SPARC32) += windows.o
42obj-y += cpu.o 42obj-y += cpu.o
43obj-$(CONFIG_SPARC32) += devices.o 43obj-$(CONFIG_SPARC32) += devices.o
44obj-$(CONFIG_SPARC32) += tadpole.o 44obj-$(CONFIG_SPARC32) += tadpole.o
45obj-$(CONFIG_SPARC32) += tick14.o
46obj-y += ptrace_$(BITS).o 45obj-y += ptrace_$(BITS).o
47obj-y += unaligned_$(BITS).o 46obj-y += unaligned_$(BITS).o
48obj-y += una_asm_$(BITS).o 47obj-y += una_asm_$(BITS).o
@@ -54,6 +53,7 @@ obj-y += of_device_$(BITS).o
54obj-$(CONFIG_SPARC64) += prom_irqtrans.o 53obj-$(CONFIG_SPARC64) += prom_irqtrans.o
55 54
56obj-$(CONFIG_SPARC_LEON)+= leon_kernel.o 55obj-$(CONFIG_SPARC_LEON)+= leon_kernel.o
56obj-$(CONFIG_SPARC_LEON)+= leon_pmc.o
57 57
58obj-$(CONFIG_SPARC64) += reboot.o 58obj-$(CONFIG_SPARC64) += reboot.o
59obj-$(CONFIG_SPARC64) += sysfs.o 59obj-$(CONFIG_SPARC64) += sysfs.o
@@ -71,13 +71,11 @@ obj-$(CONFIG_SPARC64) += pcr.o
71obj-$(CONFIG_SPARC64) += nmi.o 71obj-$(CONFIG_SPARC64) += nmi.o
72obj-$(CONFIG_SPARC64_SMP) += cpumap.o 72obj-$(CONFIG_SPARC64_SMP) += cpumap.o
73 73
74# sparc32 do not use GENERIC_HARDIRQS but uses the generic devres implementation
75obj-$(CONFIG_SPARC32) += devres.o
76devres-y := ../../../kernel/irq/devres.o
77
78obj-y += dma.o 74obj-y += dma.o
79 75
80obj-$(CONFIG_SPARC32_PCI) += pcic.o 76obj-$(CONFIG_PCIC_PCI) += pcic.o
77obj-$(CONFIG_LEON_PCI) += leon_pci.o
78obj-$(CONFIG_GRPCI2) += leon_pci_grpci2.o
81 79
82obj-$(CONFIG_SMP) += trampoline_$(BITS).o smp_$(BITS).o 80obj-$(CONFIG_SMP) += trampoline_$(BITS).o smp_$(BITS).o
83obj-$(CONFIG_SPARC32_SMP) += sun4m_smp.o sun4d_smp.o leon_smp.o 81obj-$(CONFIG_SPARC32_SMP) += sun4m_smp.o sun4d_smp.o leon_smp.o
@@ -119,3 +117,5 @@ obj-$(CONFIG_COMPAT) += $(audit--y)
119 117
120pc--$(CONFIG_PERF_EVENTS) := perf_event.o 118pc--$(CONFIG_PERF_EVENTS) := perf_event.o
121obj-$(CONFIG_SPARC64) += $(pc--y) 119obj-$(CONFIG_SPARC64) += $(pc--y)
120
121obj-$(CONFIG_SPARC64) += jump_label.o
diff --git a/arch/sparc/kernel/apc.c b/arch/sparc/kernel/apc.c
index 2c0046ecc715..caef9deb5866 100644
--- a/arch/sparc/kernel/apc.c
+++ b/arch/sparc/kernel/apc.c
@@ -123,7 +123,7 @@ static long apc_ioctl(struct file *f, unsigned int cmd, unsigned long __arg)
123 123
124 default: 124 default:
125 return -EINVAL; 125 return -EINVAL;
126 }; 126 }
127 127
128 return 0; 128 return 0;
129} 129}
@@ -132,12 +132,12 @@ static const struct file_operations apc_fops = {
132 .unlocked_ioctl = apc_ioctl, 132 .unlocked_ioctl = apc_ioctl,
133 .open = apc_open, 133 .open = apc_open,
134 .release = apc_release, 134 .release = apc_release,
135 .llseek = noop_llseek,
135}; 136};
136 137
137static struct miscdevice apc_miscdev = { APC_MINOR, APC_DEVNAME, &apc_fops }; 138static struct miscdevice apc_miscdev = { APC_MINOR, APC_DEVNAME, &apc_fops };
138 139
139static int __devinit apc_probe(struct platform_device *op, 140static int __devinit apc_probe(struct platform_device *op)
140 const struct of_device_id *match)
141{ 141{
142 int err; 142 int err;
143 143
@@ -165,7 +165,7 @@ static int __devinit apc_probe(struct platform_device *op,
165 return 0; 165 return 0;
166} 166}
167 167
168static struct of_device_id __initdata apc_match[] = { 168static struct of_device_id apc_match[] = {
169 { 169 {
170 .name = APC_OBPNAME, 170 .name = APC_OBPNAME,
171 }, 171 },
@@ -173,7 +173,7 @@ static struct of_device_id __initdata apc_match[] = {
173}; 173};
174MODULE_DEVICE_TABLE(of, apc_match); 174MODULE_DEVICE_TABLE(of, apc_match);
175 175
176static struct of_platform_driver apc_driver = { 176static struct platform_driver apc_driver = {
177 .driver = { 177 .driver = {
178 .name = "apc", 178 .name = "apc",
179 .owner = THIS_MODULE, 179 .owner = THIS_MODULE,
@@ -184,7 +184,7 @@ static struct of_platform_driver apc_driver = {
184 184
185static int __init apc_init(void) 185static int __init apc_init(void)
186{ 186{
187 return of_register_platform_driver(&apc_driver); 187 return platform_driver_register(&apc_driver);
188} 188}
189 189
190/* This driver is not critical to the boot process 190/* This driver is not critical to the boot process
diff --git a/arch/sparc/kernel/auxio_32.c b/arch/sparc/kernel/auxio_32.c
index ee8d214cae1e..acf5151f3c1d 100644
--- a/arch/sparc/kernel/auxio_32.c
+++ b/arch/sparc/kernel/auxio_32.c
@@ -23,7 +23,7 @@ static DEFINE_SPINLOCK(auxio_lock);
23 23
24void __init auxio_probe(void) 24void __init auxio_probe(void)
25{ 25{
26 int node, auxio_nd; 26 phandle node, auxio_nd;
27 struct linux_prom_registers auxregs[1]; 27 struct linux_prom_registers auxregs[1];
28 struct resource r; 28 struct resource r;
29 29
@@ -101,7 +101,7 @@ void set_auxio(unsigned char bits_on, unsigned char bits_off)
101 break; 101 break;
102 default: 102 default:
103 panic("Can't set AUXIO register on this machine."); 103 panic("Can't set AUXIO register on this machine.");
104 }; 104 }
105 spin_unlock_irqrestore(&auxio_lock, flags); 105 spin_unlock_irqrestore(&auxio_lock, flags);
106} 106}
107EXPORT_SYMBOL(set_auxio); 107EXPORT_SYMBOL(set_auxio);
@@ -113,7 +113,7 @@ volatile unsigned char * auxio_power_register = NULL;
113void __init auxio_power_probe(void) 113void __init auxio_power_probe(void)
114{ 114{
115 struct linux_prom_registers regs; 115 struct linux_prom_registers regs;
116 int node; 116 phandle node;
117 struct resource r; 117 struct resource r;
118 118
119 /* Attempt to find the sun4m power control node. */ 119 /* Attempt to find the sun4m power control node. */
@@ -121,7 +121,7 @@ void __init auxio_power_probe(void)
121 node = prom_searchsiblings(node, "obio"); 121 node = prom_searchsiblings(node, "obio");
122 node = prom_getchild(node); 122 node = prom_getchild(node);
123 node = prom_searchsiblings(node, "power"); 123 node = prom_searchsiblings(node, "power");
124 if (node == 0 || node == -1) 124 if (node == 0 || (s32)node == -1)
125 return; 125 return;
126 126
127 /* Map the power control register. */ 127 /* Map the power control register. */
diff --git a/arch/sparc/kernel/auxio_64.c b/arch/sparc/kernel/auxio_64.c
index 3efd3c5af6a9..773091ac71a3 100644
--- a/arch/sparc/kernel/auxio_64.c
+++ b/arch/sparc/kernel/auxio_64.c
@@ -93,7 +93,7 @@ void auxio_set_lte(int on)
93} 93}
94EXPORT_SYMBOL(auxio_set_lte); 94EXPORT_SYMBOL(auxio_set_lte);
95 95
96static struct of_device_id __initdata auxio_match[] = { 96static const struct of_device_id auxio_match[] = {
97 { 97 {
98 .name = "auxio", 98 .name = "auxio",
99 }, 99 },
@@ -102,8 +102,7 @@ static struct of_device_id __initdata auxio_match[] = {
102 102
103MODULE_DEVICE_TABLE(of, auxio_match); 103MODULE_DEVICE_TABLE(of, auxio_match);
104 104
105static int __devinit auxio_probe(struct platform_device *dev, 105static int __devinit auxio_probe(struct platform_device *dev)
106 const struct of_device_id *match)
107{ 106{
108 struct device_node *dp = dev->dev.of_node; 107 struct device_node *dp = dev->dev.of_node;
109 unsigned long size; 108 unsigned long size;
@@ -132,7 +131,7 @@ static int __devinit auxio_probe(struct platform_device *dev,
132 return 0; 131 return 0;
133} 132}
134 133
135static struct of_platform_driver auxio_driver = { 134static struct platform_driver auxio_driver = {
136 .probe = auxio_probe, 135 .probe = auxio_probe,
137 .driver = { 136 .driver = {
138 .name = "auxio", 137 .name = "auxio",
@@ -143,7 +142,7 @@ static struct of_platform_driver auxio_driver = {
143 142
144static int __init auxio_init(void) 143static int __init auxio_init(void)
145{ 144{
146 return of_register_platform_driver(&auxio_driver); 145 return platform_driver_register(&auxio_driver);
147} 146}
148 147
149/* Must be after subsys_initcall() so that busses are probed. Must 148/* Must be after subsys_initcall() so that busses are probed. Must
diff --git a/arch/sparc/kernel/btext.c b/arch/sparc/kernel/btext.c
index 8cc2d56ffe9a..89aa4eb20cf5 100644
--- a/arch/sparc/kernel/btext.c
+++ b/arch/sparc/kernel/btext.c
@@ -40,7 +40,7 @@ static unsigned char *dispDeviceBase __force_data;
40 40
41static unsigned char vga_font[cmapsz]; 41static unsigned char vga_font[cmapsz];
42 42
43static int __init btext_initialize(unsigned int node) 43static int __init btext_initialize(phandle node)
44{ 44{
45 unsigned int width, height, depth, pitch; 45 unsigned int width, height, depth, pitch;
46 unsigned long address = 0; 46 unsigned long address = 0;
@@ -309,7 +309,7 @@ static struct console btext_console = {
309 309
310int __init btext_find_display(void) 310int __init btext_find_display(void)
311{ 311{
312 unsigned int node; 312 phandle node;
313 char type[32]; 313 char type[32];
314 int ret; 314 int ret;
315 315
diff --git a/arch/sparc/kernel/central.c b/arch/sparc/kernel/central.c
index cfa2624c5332..7eef3f741963 100644
--- a/arch/sparc/kernel/central.c
+++ b/arch/sparc/kernel/central.c
@@ -59,8 +59,7 @@ static int __devinit clock_board_calc_nslots(struct clock_board *p)
59 } 59 }
60} 60}
61 61
62static int __devinit clock_board_probe(struct platform_device *op, 62static int __devinit clock_board_probe(struct platform_device *op)
63 const struct of_device_id *match)
64{ 63{
65 struct clock_board *p = kzalloc(sizeof(*p), GFP_KERNEL); 64 struct clock_board *p = kzalloc(sizeof(*p), GFP_KERNEL);
66 int err = -ENOMEM; 65 int err = -ENOMEM;
@@ -141,14 +140,14 @@ out_free:
141 goto out; 140 goto out;
142} 141}
143 142
144static struct of_device_id __initdata clock_board_match[] = { 143static const struct of_device_id clock_board_match[] = {
145 { 144 {
146 .name = "clock-board", 145 .name = "clock-board",
147 }, 146 },
148 {}, 147 {},
149}; 148};
150 149
151static struct of_platform_driver clock_board_driver = { 150static struct platform_driver clock_board_driver = {
152 .probe = clock_board_probe, 151 .probe = clock_board_probe,
153 .driver = { 152 .driver = {
154 .name = "clock_board", 153 .name = "clock_board",
@@ -157,8 +156,7 @@ static struct of_platform_driver clock_board_driver = {
157 }, 156 },
158}; 157};
159 158
160static int __devinit fhc_probe(struct platform_device *op, 159static int __devinit fhc_probe(struct platform_device *op)
161 const struct of_device_id *match)
162{ 160{
163 struct fhc *p = kzalloc(sizeof(*p), GFP_KERNEL); 161 struct fhc *p = kzalloc(sizeof(*p), GFP_KERNEL);
164 int err = -ENOMEM; 162 int err = -ENOMEM;
@@ -247,14 +245,14 @@ out_free:
247 goto out; 245 goto out;
248} 246}
249 247
250static struct of_device_id __initdata fhc_match[] = { 248static const struct of_device_id fhc_match[] = {
251 { 249 {
252 .name = "fhc", 250 .name = "fhc",
253 }, 251 },
254 {}, 252 {},
255}; 253};
256 254
257static struct of_platform_driver fhc_driver = { 255static struct platform_driver fhc_driver = {
258 .probe = fhc_probe, 256 .probe = fhc_probe,
259 .driver = { 257 .driver = {
260 .name = "fhc", 258 .name = "fhc",
@@ -265,8 +263,8 @@ static struct of_platform_driver fhc_driver = {
265 263
266static int __init sunfire_init(void) 264static int __init sunfire_init(void)
267{ 265{
268 (void) of_register_platform_driver(&fhc_driver); 266 (void) platform_driver_register(&fhc_driver);
269 (void) of_register_platform_driver(&clock_board_driver); 267 (void) platform_driver_register(&clock_board_driver);
270 return 0; 268 return 0;
271} 269}
272 270
diff --git a/arch/sparc/kernel/chmc.c b/arch/sparc/kernel/chmc.c
index 08c466ebb32b..5f450260981d 100644
--- a/arch/sparc/kernel/chmc.c
+++ b/arch/sparc/kernel/chmc.c
@@ -392,8 +392,7 @@ static void __devinit jbusmc_construct_dimm_groups(struct jbusmc *p,
392 } 392 }
393} 393}
394 394
395static int __devinit jbusmc_probe(struct platform_device *op, 395static int __devinit jbusmc_probe(struct platform_device *op)
396 const struct of_device_id *match)
397{ 396{
398 const struct linux_prom64_registers *mem_regs; 397 const struct linux_prom64_registers *mem_regs;
399 struct device_node *mem_node; 398 struct device_node *mem_node;
@@ -665,7 +664,7 @@ static void chmc_interpret_one_decode_reg(struct chmc *p, int which_bank, u64 va
665 case 0x0: 664 case 0x0:
666 bp->interleave = 16; 665 bp->interleave = 16;
667 break; 666 break;
668 }; 667 }
669 668
670 /* UK[10] is reserved, and UK[11] is not set for the SDRAM 669 /* UK[10] is reserved, and UK[11] is not set for the SDRAM
671 * bank size definition. 670 * bank size definition.
@@ -690,8 +689,7 @@ static void chmc_fetch_decode_regs(struct chmc *p)
690 chmc_read_mcreg(p, CHMCTRL_DECODE4)); 689 chmc_read_mcreg(p, CHMCTRL_DECODE4));
691} 690}
692 691
693static int __devinit chmc_probe(struct platform_device *op, 692static int __devinit chmc_probe(struct platform_device *op)
694 const struct of_device_id *match)
695{ 693{
696 struct device_node *dp = op->dev.of_node; 694 struct device_node *dp = op->dev.of_node;
697 unsigned long ver; 695 unsigned long ver;
@@ -765,13 +763,12 @@ out_free:
765 goto out; 763 goto out;
766} 764}
767 765
768static int __devinit us3mc_probe(struct platform_device *op, 766static int __devinit us3mc_probe(struct platform_device *op)
769 const struct of_device_id *match)
770{ 767{
771 if (mc_type == MC_TYPE_SAFARI) 768 if (mc_type == MC_TYPE_SAFARI)
772 return chmc_probe(op, match); 769 return chmc_probe(op);
773 else if (mc_type == MC_TYPE_JBUS) 770 else if (mc_type == MC_TYPE_JBUS)
774 return jbusmc_probe(op, match); 771 return jbusmc_probe(op);
775 return -ENODEV; 772 return -ENODEV;
776} 773}
777 774
@@ -810,7 +807,7 @@ static const struct of_device_id us3mc_match[] = {
810}; 807};
811MODULE_DEVICE_TABLE(of, us3mc_match); 808MODULE_DEVICE_TABLE(of, us3mc_match);
812 809
813static struct of_platform_driver us3mc_driver = { 810static struct platform_driver us3mc_driver = {
814 .driver = { 811 .driver = {
815 .name = "us3mc", 812 .name = "us3mc",
816 .owner = THIS_MODULE, 813 .owner = THIS_MODULE,
@@ -848,7 +845,7 @@ static int __init us3mc_init(void)
848 ret = register_dimm_printer(us3mc_dimm_printer); 845 ret = register_dimm_printer(us3mc_dimm_printer);
849 846
850 if (!ret) { 847 if (!ret) {
851 ret = of_register_platform_driver(&us3mc_driver); 848 ret = platform_driver_register(&us3mc_driver);
852 if (ret) 849 if (ret)
853 unregister_dimm_printer(us3mc_dimm_printer); 850 unregister_dimm_printer(us3mc_dimm_printer);
854 } 851 }
@@ -859,7 +856,7 @@ static void __exit us3mc_cleanup(void)
859{ 856{
860 if (us3mc_platform()) { 857 if (us3mc_platform()) {
861 unregister_dimm_printer(us3mc_dimm_printer); 858 unregister_dimm_printer(us3mc_dimm_printer);
862 of_unregister_platform_driver(&us3mc_driver); 859 platform_driver_unregister(&us3mc_driver);
863 } 860 }
864} 861}
865 862
diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c
index e447938d39cf..138dbbc8dc84 100644
--- a/arch/sparc/kernel/cpu.c
+++ b/arch/sparc/kernel/cpu.c
@@ -4,6 +4,7 @@
4 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) 4 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
5 */ 5 */
6 6
7#include <linux/seq_file.h>
7#include <linux/kernel.h> 8#include <linux/kernel.h>
8#include <linux/module.h> 9#include <linux/module.h>
9#include <linux/init.h> 10#include <linux/init.h>
@@ -11,7 +12,9 @@
11#include <linux/threads.h> 12#include <linux/threads.h>
12 13
13#include <asm/spitfire.h> 14#include <asm/spitfire.h>
15#include <asm/pgtable.h>
14#include <asm/oplib.h> 16#include <asm/oplib.h>
17#include <asm/setup.h>
15#include <asm/page.h> 18#include <asm/page.h>
16#include <asm/head.h> 19#include <asm/head.h>
17#include <asm/psr.h> 20#include <asm/psr.h>
@@ -23,6 +26,9 @@
23DEFINE_PER_CPU(cpuinfo_sparc, __cpu_data) = { 0 }; 26DEFINE_PER_CPU(cpuinfo_sparc, __cpu_data) = { 0 };
24EXPORT_PER_CPU_SYMBOL(__cpu_data); 27EXPORT_PER_CPU_SYMBOL(__cpu_data);
25 28
29int ncpus_probed;
30unsigned int fsr_storage;
31
26struct cpu_info { 32struct cpu_info {
27 int psr_vers; 33 int psr_vers;
28 const char *name; 34 const char *name;
@@ -247,13 +253,12 @@ static const struct manufacturer_info __initconst manufacturer_info[] = {
247 * machine type value into consideration too. I will fix this. 253 * machine type value into consideration too. I will fix this.
248 */ 254 */
249 255
250const char *sparc_cpu_type; 256static const char *sparc_cpu_type;
251const char *sparc_fpu_type; 257static const char *sparc_fpu_type;
252const char *sparc_pmu_type; 258const char *sparc_pmu_type;
253 259
254unsigned int fsr_storage;
255 260
256static void set_cpu_and_fpu(int psr_impl, int psr_vers, int fpu_vers) 261static void __init set_cpu_and_fpu(int psr_impl, int psr_vers, int fpu_vers)
257{ 262{
258 const struct manufacturer_info *manuf; 263 const struct manufacturer_info *manuf;
259 int i; 264 int i;
@@ -313,7 +318,123 @@ static void set_cpu_and_fpu(int psr_impl, int psr_vers, int fpu_vers)
313} 318}
314 319
315#ifdef CONFIG_SPARC32 320#ifdef CONFIG_SPARC32
316void __cpuinit cpu_probe(void) 321static int show_cpuinfo(struct seq_file *m, void *__unused)
322{
323 seq_printf(m,
324 "cpu\t\t: %s\n"
325 "fpu\t\t: %s\n"
326 "promlib\t\t: Version %d Revision %d\n"
327 "prom\t\t: %d.%d\n"
328 "type\t\t: %s\n"
329 "ncpus probed\t: %d\n"
330 "ncpus active\t: %d\n"
331#ifndef CONFIG_SMP
332 "CPU0Bogo\t: %lu.%02lu\n"
333 "CPU0ClkTck\t: %ld\n"
334#endif
335 ,
336 sparc_cpu_type,
337 sparc_fpu_type ,
338 romvec->pv_romvers,
339 prom_rev,
340 romvec->pv_printrev >> 16,
341 romvec->pv_printrev & 0xffff,
342 &cputypval[0],
343 ncpus_probed,
344 num_online_cpus()
345#ifndef CONFIG_SMP
346 , cpu_data(0).udelay_val/(500000/HZ),
347 (cpu_data(0).udelay_val/(5000/HZ)) % 100,
348 cpu_data(0).clock_tick
349#endif
350 );
351
352#ifdef CONFIG_SMP
353 smp_bogo(m);
354#endif
355 mmu_info(m);
356#ifdef CONFIG_SMP
357 smp_info(m);
358#endif
359 return 0;
360}
361#endif /* CONFIG_SPARC32 */
362
363#ifdef CONFIG_SPARC64
364unsigned int dcache_parity_tl1_occurred;
365unsigned int icache_parity_tl1_occurred;
366
367
368static int show_cpuinfo(struct seq_file *m, void *__unused)
369{
370 seq_printf(m,
371 "cpu\t\t: %s\n"
372 "fpu\t\t: %s\n"
373 "pmu\t\t: %s\n"
374 "prom\t\t: %s\n"
375 "type\t\t: %s\n"
376 "ncpus probed\t: %d\n"
377 "ncpus active\t: %d\n"
378 "D$ parity tl1\t: %u\n"
379 "I$ parity tl1\t: %u\n"
380#ifndef CONFIG_SMP
381 "Cpu0ClkTck\t: %016lx\n"
382#endif
383 ,
384 sparc_cpu_type,
385 sparc_fpu_type,
386 sparc_pmu_type,
387 prom_version,
388 ((tlb_type == hypervisor) ?
389 "sun4v" :
390 "sun4u"),
391 ncpus_probed,
392 num_online_cpus(),
393 dcache_parity_tl1_occurred,
394 icache_parity_tl1_occurred
395#ifndef CONFIG_SMP
396 , cpu_data(0).clock_tick
397#endif
398 );
399#ifdef CONFIG_SMP
400 smp_bogo(m);
401#endif
402 mmu_info(m);
403#ifdef CONFIG_SMP
404 smp_info(m);
405#endif
406 return 0;
407}
408#endif /* CONFIG_SPARC64 */
409
410static void *c_start(struct seq_file *m, loff_t *pos)
411{
412 /* The pointer we are returning is arbitrary,
413 * it just has to be non-NULL and not IS_ERR
414 * in the success case.
415 */
416 return *pos == 0 ? &c_start : NULL;
417}
418
419static void *c_next(struct seq_file *m, void *v, loff_t *pos)
420{
421 ++*pos;
422 return c_start(m, pos);
423}
424
425static void c_stop(struct seq_file *m, void *v)
426{
427}
428
429const struct seq_operations cpuinfo_op = {
430 .start =c_start,
431 .next = c_next,
432 .stop = c_stop,
433 .show = show_cpuinfo,
434};
435
436#ifdef CONFIG_SPARC32
437static int __init cpu_type_probe(void)
317{ 438{
318 int psr_impl, psr_vers, fpu_vers; 439 int psr_impl, psr_vers, fpu_vers;
319 int psr; 440 int psr;
@@ -324,7 +445,7 @@ void __cpuinit cpu_probe(void)
324 psr = get_psr(); 445 psr = get_psr();
325 put_psr(psr | PSR_EF); 446 put_psr(psr | PSR_EF);
326#ifdef CONFIG_SPARC_LEON 447#ifdef CONFIG_SPARC_LEON
327 fpu_vers = 7; 448 fpu_vers = get_psr() & PSR_EF ? ((get_fsr() >> 17) & 0x7) : 7;
328#else 449#else
329 fpu_vers = ((get_fsr() >> 17) & 0x7); 450 fpu_vers = ((get_fsr() >> 17) & 0x7);
330#endif 451#endif
@@ -332,8 +453,12 @@ void __cpuinit cpu_probe(void)
332 put_psr(psr); 453 put_psr(psr);
333 454
334 set_cpu_and_fpu(psr_impl, psr_vers, fpu_vers); 455 set_cpu_and_fpu(psr_impl, psr_vers, fpu_vers);
456
457 return 0;
335} 458}
336#else 459#endif /* CONFIG_SPARC32 */
460
461#ifdef CONFIG_SPARC64
337static void __init sun4v_cpu_probe(void) 462static void __init sun4v_cpu_probe(void)
338{ 463{
339 switch (sun4v_chip_type) { 464 switch (sun4v_chip_type) {
@@ -374,6 +499,6 @@ static int __init cpu_type_probe(void)
374 } 499 }
375 return 0; 500 return 0;
376} 501}
502#endif /* CONFIG_SPARC64 */
377 503
378arch_initcall(cpu_type_probe); 504early_initcall(cpu_type_probe);
379#endif
diff --git a/arch/sparc/kernel/cpumap.c b/arch/sparc/kernel/cpumap.c
index 8de64c8126bc..d91fd782743a 100644
--- a/arch/sparc/kernel/cpumap.c
+++ b/arch/sparc/kernel/cpumap.c
@@ -202,7 +202,7 @@ static struct cpuinfo_tree *build_cpuinfo_tree(void)
202 new_tree->total_nodes = n; 202 new_tree->total_nodes = n;
203 memcpy(&new_tree->level, tmp_level, sizeof(tmp_level)); 203 memcpy(&new_tree->level, tmp_level, sizeof(tmp_level));
204 204
205 prev_cpu = cpu = first_cpu(cpu_online_map); 205 prev_cpu = cpu = cpumask_first(cpu_online_mask);
206 206
207 /* Initialize all levels in the tree with the first CPU */ 207 /* Initialize all levels in the tree with the first CPU */
208 for (level = CPUINFO_LVL_PROC; level >= CPUINFO_LVL_ROOT; level--) { 208 for (level = CPUINFO_LVL_PROC; level >= CPUINFO_LVL_ROOT; level--) {
@@ -381,7 +381,7 @@ static int simple_map_to_cpu(unsigned int index)
381 } 381 }
382 382
383 /* Impossible, since num_online_cpus() <= num_possible_cpus() */ 383 /* Impossible, since num_online_cpus() <= num_possible_cpus() */
384 return first_cpu(cpu_online_map); 384 return cpumask_first(cpu_online_mask);
385} 385}
386 386
387static int _map_to_cpu(unsigned int index) 387static int _map_to_cpu(unsigned int index)
diff --git a/arch/sparc/kernel/devices.c b/arch/sparc/kernel/devices.c
index 62dc7a021413..113c052c3043 100644
--- a/arch/sparc/kernel/devices.c
+++ b/arch/sparc/kernel/devices.c
@@ -20,7 +20,6 @@
20#include <asm/system.h> 20#include <asm/system.h>
21#include <asm/cpudata.h> 21#include <asm/cpudata.h>
22 22
23extern void cpu_probe(void);
24extern void clock_stop_probe(void); /* tadpole.c */ 23extern void clock_stop_probe(void); /* tadpole.c */
25extern void sun4c_probe_memerr_reg(void); 24extern void sun4c_probe_memerr_reg(void);
26 25
@@ -31,9 +30,9 @@ static char *cpu_mid_prop(void)
31 return "mid"; 30 return "mid";
32} 31}
33 32
34static int check_cpu_node(int nd, int *cur_inst, 33static int check_cpu_node(phandle nd, int *cur_inst,
35 int (*compare)(int, int, void *), void *compare_arg, 34 int (*compare)(phandle, int, void *), void *compare_arg,
36 int *prom_node, int *mid) 35 phandle *prom_node, int *mid)
37{ 36{
38 if (!compare(nd, *cur_inst, compare_arg)) { 37 if (!compare(nd, *cur_inst, compare_arg)) {
39 if (prom_node) 38 if (prom_node)
@@ -51,8 +50,8 @@ static int check_cpu_node(int nd, int *cur_inst,
51 return -ENODEV; 50 return -ENODEV;
52} 51}
53 52
54static int __cpu_find_by(int (*compare)(int, int, void *), void *compare_arg, 53static int __cpu_find_by(int (*compare)(phandle, int, void *),
55 int *prom_node, int *mid) 54 void *compare_arg, phandle *prom_node, int *mid)
56{ 55{
57 struct device_node *dp; 56 struct device_node *dp;
58 int cur_inst; 57 int cur_inst;
@@ -71,7 +70,7 @@ static int __cpu_find_by(int (*compare)(int, int, void *), void *compare_arg,
71 return -ENODEV; 70 return -ENODEV;
72} 71}
73 72
74static int cpu_instance_compare(int nd, int instance, void *_arg) 73static int cpu_instance_compare(phandle nd, int instance, void *_arg)
75{ 74{
76 int desired_instance = (int) _arg; 75 int desired_instance = (int) _arg;
77 76
@@ -80,13 +79,13 @@ static int cpu_instance_compare(int nd, int instance, void *_arg)
80 return -ENODEV; 79 return -ENODEV;
81} 80}
82 81
83int cpu_find_by_instance(int instance, int *prom_node, int *mid) 82int cpu_find_by_instance(int instance, phandle *prom_node, int *mid)
84{ 83{
85 return __cpu_find_by(cpu_instance_compare, (void *)instance, 84 return __cpu_find_by(cpu_instance_compare, (void *)instance,
86 prom_node, mid); 85 prom_node, mid);
87} 86}
88 87
89static int cpu_mid_compare(int nd, int instance, void *_arg) 88static int cpu_mid_compare(phandle nd, int instance, void *_arg)
90{ 89{
91 int desired_mid = (int) _arg; 90 int desired_mid = (int) _arg;
92 int this_mid; 91 int this_mid;
@@ -98,7 +97,7 @@ static int cpu_mid_compare(int nd, int instance, void *_arg)
98 return -ENODEV; 97 return -ENODEV;
99} 98}
100 99
101int cpu_find_by_mid(int mid, int *prom_node) 100int cpu_find_by_mid(int mid, phandle *prom_node)
102{ 101{
103 return __cpu_find_by(cpu_mid_compare, (void *)mid, 102 return __cpu_find_by(cpu_mid_compare, (void *)mid,
104 prom_node, NULL); 103 prom_node, NULL);
@@ -108,18 +107,19 @@ int cpu_find_by_mid(int mid, int *prom_node)
108 * address (0-3). This gives us the true hardware mid, which might have 107 * address (0-3). This gives us the true hardware mid, which might have
109 * some other bits set. On 4d hardware and software mids are the same. 108 * some other bits set. On 4d hardware and software mids are the same.
110 */ 109 */
111int cpu_get_hwmid(int prom_node) 110int cpu_get_hwmid(phandle prom_node)
112{ 111{
113 return prom_getintdefault(prom_node, cpu_mid_prop(), -ENODEV); 112 return prom_getintdefault(prom_node, cpu_mid_prop(), -ENODEV);
114} 113}
115 114
116void __init device_scan(void) 115void __init device_scan(void)
117{ 116{
118 prom_printf("Booting Linux...\n"); 117 printk(KERN_NOTICE "Booting Linux...\n");
119 118
120#ifndef CONFIG_SMP 119#ifndef CONFIG_SMP
121 { 120 {
122 int err, cpu_node; 121 phandle cpu_node;
122 int err;
123 err = cpu_find_by_instance(0, &cpu_node, NULL); 123 err = cpu_find_by_instance(0, &cpu_node, NULL);
124 if (err) { 124 if (err) {
125 /* Probably a sun4e, Sun is trying to trick us ;-) */ 125 /* Probably a sun4e, Sun is trying to trick us ;-) */
@@ -132,7 +132,6 @@ void __init device_scan(void)
132 } 132 }
133#endif /* !CONFIG_SMP */ 133#endif /* !CONFIG_SMP */
134 134
135 cpu_probe();
136 { 135 {
137 extern void auxio_probe(void); 136 extern void auxio_probe(void);
138 extern void auxio_power_probe(void); 137 extern void auxio_power_probe(void);
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
index 4a700f4b79ce..dd1342c0a3be 100644
--- a/arch/sparc/kernel/ds.c
+++ b/arch/sparc/kernel/ds.c
@@ -497,7 +497,7 @@ static void dr_cpu_init_response(struct ds_data *resp, u64 req_num,
497 tag->num_records = ncpus; 497 tag->num_records = ncpus;
498 498
499 i = 0; 499 i = 0;
500 for_each_cpu_mask(cpu, *mask) { 500 for_each_cpu(cpu, mask) {
501 ent[i].cpu = cpu; 501 ent[i].cpu = cpu;
502 ent[i].result = DR_CPU_RES_OK; 502 ent[i].result = DR_CPU_RES_OK;
503 ent[i].stat = default_stat; 503 ent[i].stat = default_stat;
@@ -534,7 +534,7 @@ static int __cpuinit dr_cpu_configure(struct ds_info *dp,
534 int resp_len, ncpus, cpu; 534 int resp_len, ncpus, cpu;
535 unsigned long flags; 535 unsigned long flags;
536 536
537 ncpus = cpus_weight(*mask); 537 ncpus = cpumask_weight(mask);
538 resp_len = dr_cpu_size_response(ncpus); 538 resp_len = dr_cpu_size_response(ncpus);
539 resp = kzalloc(resp_len, GFP_KERNEL); 539 resp = kzalloc(resp_len, GFP_KERNEL);
540 if (!resp) 540 if (!resp)
@@ -547,7 +547,7 @@ static int __cpuinit dr_cpu_configure(struct ds_info *dp,
547 mdesc_populate_present_mask(mask); 547 mdesc_populate_present_mask(mask);
548 mdesc_fill_in_cpu_data(mask); 548 mdesc_fill_in_cpu_data(mask);
549 549
550 for_each_cpu_mask(cpu, *mask) { 550 for_each_cpu(cpu, mask) {
551 int err; 551 int err;
552 552
553 printk(KERN_INFO "ds-%llu: Starting cpu %d...\n", 553 printk(KERN_INFO "ds-%llu: Starting cpu %d...\n",
@@ -593,7 +593,7 @@ static int dr_cpu_unconfigure(struct ds_info *dp,
593 int resp_len, ncpus, cpu; 593 int resp_len, ncpus, cpu;
594 unsigned long flags; 594 unsigned long flags;
595 595
596 ncpus = cpus_weight(*mask); 596 ncpus = cpumask_weight(mask);
597 resp_len = dr_cpu_size_response(ncpus); 597 resp_len = dr_cpu_size_response(ncpus);
598 resp = kzalloc(resp_len, GFP_KERNEL); 598 resp = kzalloc(resp_len, GFP_KERNEL);
599 if (!resp) 599 if (!resp)
@@ -603,7 +603,7 @@ static int dr_cpu_unconfigure(struct ds_info *dp,
603 resp_len, ncpus, mask, 603 resp_len, ncpus, mask,
604 DR_CPU_STAT_UNCONFIGURED); 604 DR_CPU_STAT_UNCONFIGURED);
605 605
606 for_each_cpu_mask(cpu, *mask) { 606 for_each_cpu(cpu, mask) {
607 int err; 607 int err;
608 608
609 printk(KERN_INFO "ds-%llu: Shutting down cpu %d...\n", 609 printk(KERN_INFO "ds-%llu: Shutting down cpu %d...\n",
@@ -649,13 +649,13 @@ static void __cpuinit dr_cpu_data(struct ds_info *dp,
649 649
650 purge_dups(cpu_list, tag->num_records); 650 purge_dups(cpu_list, tag->num_records);
651 651
652 cpus_clear(mask); 652 cpumask_clear(&mask);
653 for (i = 0; i < tag->num_records; i++) { 653 for (i = 0; i < tag->num_records; i++) {
654 if (cpu_list[i] == CPU_SENTINEL) 654 if (cpu_list[i] == CPU_SENTINEL)
655 continue; 655 continue;
656 656
657 if (cpu_list[i] < nr_cpu_ids) 657 if (cpu_list[i] < nr_cpu_ids)
658 cpu_set(cpu_list[i], mask); 658 cpumask_set_cpu(cpu_list[i], &mask);
659 } 659 }
660 660
661 if (tag->type == DR_CPU_CONFIGURE) 661 if (tag->type == DR_CPU_CONFIGURE)
@@ -1218,7 +1218,7 @@ static int ds_remove(struct vio_dev *vdev)
1218 return 0; 1218 return 0;
1219} 1219}
1220 1220
1221static struct vio_device_id __initdata ds_match[] = { 1221static const struct vio_device_id ds_match[] = {
1222 { 1222 {
1223 .type = "domain-services-port", 1223 .type = "domain-services-port",
1224 }, 1224 },
diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S
index 1504df8ddf70..f445e98463e6 100644
--- a/arch/sparc/kernel/entry.S
+++ b/arch/sparc/kernel/entry.S
@@ -229,7 +229,7 @@ real_irq_entry:
229#ifdef CONFIG_SMP 229#ifdef CONFIG_SMP
230 .globl patchme_maybe_smp_msg 230 .globl patchme_maybe_smp_msg
231 231
232 cmp %l7, 12 232 cmp %l7, 11
233patchme_maybe_smp_msg: 233patchme_maybe_smp_msg:
234 bgu maybe_smp4m_msg 234 bgu maybe_smp4m_msg
235 nop 235 nop
@@ -269,19 +269,22 @@ smp4m_ticker:
269 /* Here is where we check for possible SMP IPI passed to us 269 /* Here is where we check for possible SMP IPI passed to us
270 * on some level other than 15 which is the NMI and only used 270 * on some level other than 15 which is the NMI and only used
271 * for cross calls. That has a separate entry point below. 271 * for cross calls. That has a separate entry point below.
272 *
273 * IPIs are sent on Level 12, 13 and 14. See IRQ_IPI_*.
272 */ 274 */
273maybe_smp4m_msg: 275maybe_smp4m_msg:
274 GET_PROCESSOR4M_ID(o3) 276 GET_PROCESSOR4M_ID(o3)
275 sethi %hi(sun4m_irq_percpu), %l5 277 sethi %hi(sun4m_irq_percpu), %l5
276 sll %o3, 2, %o3 278 sll %o3, 2, %o3
277 or %l5, %lo(sun4m_irq_percpu), %o5 279 or %l5, %lo(sun4m_irq_percpu), %o5
278 sethi %hi(0x40000000), %o2 280 sethi %hi(0x70000000), %o2 ! Check all soft-IRQs
279 ld [%o5 + %o3], %o1 281 ld [%o5 + %o3], %o1
280 ld [%o1 + 0x00], %o3 ! sun4m_irq_percpu[cpu]->pending 282 ld [%o1 + 0x00], %o3 ! sun4m_irq_percpu[cpu]->pending
281 andcc %o3, %o2, %g0 283 andcc %o3, %o2, %g0
282 be,a smp4m_ticker 284 be,a smp4m_ticker
283 cmp %l7, 14 285 cmp %l7, 14
284 st %o2, [%o1 + 0x04] ! sun4m_irq_percpu[cpu]->clear=0x40000000 286 /* Soft-IRQ IPI */
287 st %o2, [%o1 + 0x04] ! sun4m_irq_percpu[cpu]->clear=0x70000000
285 WRITE_PAUSE 288 WRITE_PAUSE
286 ld [%o1 + 0x00], %g0 ! sun4m_irq_percpu[cpu]->pending 289 ld [%o1 + 0x00], %g0 ! sun4m_irq_percpu[cpu]->pending
287 WRITE_PAUSE 290 WRITE_PAUSE
@@ -290,9 +293,27 @@ maybe_smp4m_msg:
290 WRITE_PAUSE 293 WRITE_PAUSE
291 wr %l4, PSR_ET, %psr 294 wr %l4, PSR_ET, %psr
292 WRITE_PAUSE 295 WRITE_PAUSE
293 call smp_reschedule_irq 296 srl %o3, 28, %o2 ! shift for simpler checks below
297maybe_smp4m_msg_check_single:
298 andcc %o2, 0x1, %g0
299 beq,a maybe_smp4m_msg_check_mask
300 andcc %o2, 0x2, %g0
301 call smp_call_function_single_interrupt
294 nop 302 nop
295 303 andcc %o2, 0x2, %g0
304maybe_smp4m_msg_check_mask:
305 beq,a maybe_smp4m_msg_check_resched
306 andcc %o2, 0x4, %g0
307 call smp_call_function_interrupt
308 nop
309 andcc %o2, 0x4, %g0
310maybe_smp4m_msg_check_resched:
311 /* rescheduling is done in RESTORE_ALL regardless, but incr stats */
312 beq,a maybe_smp4m_msg_out
313 nop
314 call smp_resched_interrupt
315 nop
316maybe_smp4m_msg_out:
296 RESTORE_ALL 317 RESTORE_ALL
297 318
298 .align 4 319 .align 4
@@ -401,18 +422,18 @@ linux_trap_ipi15_sun4d:
4011: b,a 1b 4221: b,a 1b
402 423
403#ifdef CONFIG_SPARC_LEON 424#ifdef CONFIG_SPARC_LEON
404 425 .globl smpleon_ipi
405 .globl smpleon_ticker 426 .extern leon_ipi_interrupt
406 /* SMP per-cpu ticker interrupts are handled specially. */ 427 /* SMP per-cpu IPI interrupts are handled specially. */
407smpleon_ticker: 428smpleon_ipi:
408 SAVE_ALL 429 SAVE_ALL
409 or %l0, PSR_PIL, %g2 430 or %l0, PSR_PIL, %g2
410 wr %g2, 0x0, %psr 431 wr %g2, 0x0, %psr
411 WRITE_PAUSE 432 WRITE_PAUSE
412 wr %g2, PSR_ET, %psr 433 wr %g2, PSR_ET, %psr
413 WRITE_PAUSE 434 WRITE_PAUSE
414 call leon_percpu_timer_interrupt 435 call leonsmp_ipi_interrupt
415 add %sp, STACKFRAME_SZ, %o0 436 add %sp, STACKFRAME_SZ, %o1 ! pt_regs
416 wr %l0, PSR_ET, %psr 437 wr %l0, PSR_ET, %psr
417 WRITE_PAUSE 438 WRITE_PAUSE
418 RESTORE_ALL 439 RESTORE_ALL
@@ -801,7 +822,7 @@ vac_linesize_patch_32: subcc %l7, 32, %l7
801 .globl vac_hwflush_patch1_on, vac_hwflush_patch2_on 822 .globl vac_hwflush_patch1_on, vac_hwflush_patch2_on
802 823
803/* 824/*
804 * Ugly, but we cant use hardware flushing on the sun4 and we'd require 825 * Ugly, but we can't use hardware flushing on the sun4 and we'd require
805 * two instructions (Anton) 826 * two instructions (Anton)
806 */ 827 */
807vac_hwflush_patch1_on: addcc %l7, -PAGE_SIZE, %l7 828vac_hwflush_patch1_on: addcc %l7, -PAGE_SIZE, %l7
@@ -851,7 +872,7 @@ sun4c_fault:
851 sethi %hi(~((1 << SUN4C_REAL_PGDIR_SHIFT) - 1)), %l4 872 sethi %hi(~((1 << SUN4C_REAL_PGDIR_SHIFT) - 1)), %l4
852 873
853 /* If the kernel references a bum kernel pointer, or a pte which 874 /* If the kernel references a bum kernel pointer, or a pte which
854 * points to a non existant page in ram, we will run this code 875 * points to a non existent page in ram, we will run this code
855 * _forever_ and lock up the machine!!!!! So we must check for 876 * _forever_ and lock up the machine!!!!! So we must check for
856 * this condition, the AC_SYNC_ERR bits are what we must examine. 877 * this condition, the AC_SYNC_ERR bits are what we must examine.
857 * Also a parity error would make this happen as well. So we just 878 * Also a parity error would make this happen as well. So we just
@@ -1283,7 +1304,7 @@ linux_syscall_trace:
1283 .globl ret_from_fork 1304 .globl ret_from_fork
1284ret_from_fork: 1305ret_from_fork:
1285 call schedule_tail 1306 call schedule_tail
1286 mov %g3, %o0 1307 ld [%g3 + TI_TASK], %o0
1287 b ret_sys_call 1308 b ret_sys_call
1288 ld [%sp + STACKFRAME_SZ + PT_I0], %o0 1309 ld [%sp + STACKFRAME_SZ + PT_I0], %o0
1289 1310
@@ -1583,7 +1604,7 @@ restore_current:
1583 retl 1604 retl
1584 nop 1605 nop
1585 1606
1586#ifdef CONFIG_PCI 1607#ifdef CONFIG_PCIC_PCI
1587#include <asm/pcic.h> 1608#include <asm/pcic.h>
1588 1609
1589 .align 4 1610 .align 4
@@ -1629,7 +1650,7 @@ pcic_nmi_trap_patch:
1629 rd %psr, %l0 1650 rd %psr, %l0
1630 .word 0 1651 .word 0
1631 1652
1632#endif /* CONFIG_PCI */ 1653#endif /* CONFIG_PCIC_PCI */
1633 1654
1634 .globl flushw_all 1655 .globl flushw_all
1635flushw_all: 1656flushw_all:
diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h
index c011b932bb17..d1f1361c4167 100644
--- a/arch/sparc/kernel/entry.h
+++ b/arch/sparc/kernel/entry.h
@@ -213,8 +213,8 @@ extern struct cheetah_err_info *cheetah_error_log;
213struct ino_bucket { 213struct ino_bucket {
214/*0x00*/unsigned long __irq_chain_pa; 214/*0x00*/unsigned long __irq_chain_pa;
215 215
216 /* Virtual interrupt number assigned to this INO. */ 216 /* Interrupt number assigned to this INO. */
217/*0x08*/unsigned int __virt_irq; 217/*0x08*/unsigned int __irq;
218/*0x0c*/unsigned int __pad; 218/*0x0c*/unsigned int __pad;
219}; 219};
220 220
diff --git a/arch/sparc/kernel/head_32.S b/arch/sparc/kernel/head_32.S
index 21bb2590d4ae..587785759838 100644
--- a/arch/sparc/kernel/head_32.S
+++ b/arch/sparc/kernel/head_32.S
@@ -73,12 +73,11 @@ sun4e_notsup:
73 73
74 /* The Sparc trap table, bootloader gives us control at _start. */ 74 /* The Sparc trap table, bootloader gives us control at _start. */
75 __HEAD 75 __HEAD
76 .globl start, _stext, _start, __stext 76 .globl _stext, _start, __stext
77 .globl trapbase 77 .globl trapbase
78_start: /* danger danger */ 78_start: /* danger danger */
79__stext: 79__stext:
80_stext: 80_stext:
81start:
82trapbase: 81trapbase:
83#ifdef CONFIG_SMP 82#ifdef CONFIG_SMP
84trapbase_cpu0: 83trapbase_cpu0:
@@ -811,31 +810,25 @@ found_version:
811got_prop: 810got_prop:
812#ifdef CONFIG_SPARC_LEON 811#ifdef CONFIG_SPARC_LEON
813 /* no cpu-type check is needed, it is a SPARC-LEON */ 812 /* no cpu-type check is needed, it is a SPARC-LEON */
814#ifdef CONFIG_SMP
815 ba leon_smp_init
816 nop
817 813
818 .global leon_smp_init 814 sethi %hi(boot_cpu_id), %g2 ! boot-cpu index
819leon_smp_init:
820 sethi %hi(boot_cpu_id), %g1 ! master always 0
821 stb %g0, [%g1 + %lo(boot_cpu_id)]
822 sethi %hi(boot_cpu_id4), %g1 ! master always 0
823 stb %g0, [%g1 + %lo(boot_cpu_id4)]
824 815
825 rd %asr17,%g1 816#ifdef CONFIG_SMP
826 srl %g1,28,%g1 817 ldub [%g2 + %lo(boot_cpu_id)], %g1
818 cmp %g1, 0xff ! unset means first CPU
819 bne leon_smp_cpu_startup ! continue only with master
820 nop
821#endif
822 /* Get CPU-ID from most significant 4-bit of ASR17 */
823 rd %asr17, %g1
824 srl %g1, 28, %g1
827 825
828 cmp %g0,%g1 826 /* Update boot_cpu_id only on boot cpu */
829 beq sun4c_continue_boot !continue with master 827 stub %g1, [%g2 + %lo(boot_cpu_id)]
830 nop
831 828
832 ba leon_smp_cpu_startup
833 nop
834#else
835 ba sun4c_continue_boot 829 ba sun4c_continue_boot
836 nop 830 nop
837#endif 831#endif
838#endif
839 set cputypval, %o2 832 set cputypval, %o2
840 ldub [%o2 + 0x4], %l1 833 ldub [%o2 + 0x4], %l1
841 834
@@ -894,9 +887,6 @@ sun4d_init:
894 sta %g4, [%g0] ASI_M_VIKING_TMP1 887 sta %g4, [%g0] ASI_M_VIKING_TMP1
895 sethi %hi(boot_cpu_id), %g5 888 sethi %hi(boot_cpu_id), %g5
896 stb %g4, [%g5 + %lo(boot_cpu_id)] 889 stb %g4, [%g5 + %lo(boot_cpu_id)]
897 sll %g4, 2, %g4
898 sethi %hi(boot_cpu_id4), %g5
899 stb %g4, [%g5 + %lo(boot_cpu_id4)]
900#endif 890#endif
901 891
902 /* Fall through to sun4m_init */ 892 /* Fall through to sun4m_init */
@@ -1025,14 +1015,28 @@ sun4c_continue_boot:
1025 bl 1b 1015 bl 1b
1026 add %o0, 0x1, %o0 1016 add %o0, 0x1, %o0
1027 1017
1018 /* If boot_cpu_id has not been setup by machine specific
1019 * init-code above we default it to zero.
1020 */
1021 sethi %hi(boot_cpu_id), %g2
1022 ldub [%g2 + %lo(boot_cpu_id)], %g3
1023 cmp %g3, 0xff
1024 bne 1f
1025 nop
1026 mov %g0, %g3
1027 stub %g3, [%g2 + %lo(boot_cpu_id)]
1028
10291: /* boot_cpu_id set. calculate boot_cpu_id4 = boot_cpu_id*4 */
1030 sll %g3, 2, %g3
1031 sethi %hi(boot_cpu_id4), %g2
1032 stub %g3, [%g2 + %lo(boot_cpu_id4)]
1033
1028 /* Initialize the uwinmask value for init task just in case. 1034 /* Initialize the uwinmask value for init task just in case.
1029 * But first make current_set[boot_cpu_id] point to something useful. 1035 * But first make current_set[boot_cpu_id] point to something useful.
1030 */ 1036 */
1031 set init_thread_union, %g6 1037 set init_thread_union, %g6
1032 set current_set, %g2 1038 set current_set, %g2
1033#ifdef CONFIG_SMP 1039#ifdef CONFIG_SMP
1034 sethi %hi(boot_cpu_id4), %g3
1035 ldub [%g3 + %lo(boot_cpu_id4)], %g3
1036 st %g6, [%g2] 1040 st %g6, [%g2]
1037 add %g2, %g3, %g2 1041 add %g2, %g3, %g2
1038#endif 1042#endif
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
index f8f21050448b..aa594c792d19 100644
--- a/arch/sparc/kernel/head_64.S
+++ b/arch/sparc/kernel/head_64.S
@@ -85,7 +85,7 @@ sparc_ramdisk_image64:
85sparc64_boot: 85sparc64_boot:
86 mov %o4, %l7 86 mov %o4, %l7
87 87
88 /* We need to remap the kernel. Use position independant 88 /* We need to remap the kernel. Use position independent
89 * code to remap us to KERNBASE. 89 * code to remap us to KERNBASE.
90 * 90 *
91 * SILO can invoke us with 32-bit address masking enabled, 91 * SILO can invoke us with 32-bit address masking enabled,
diff --git a/arch/sparc/kernel/init_task.c b/arch/sparc/kernel/init_task.c
index 5fe3d65581f7..35f141a9f506 100644
--- a/arch/sparc/kernel/init_task.c
+++ b/arch/sparc/kernel/init_task.c
@@ -15,7 +15,7 @@ EXPORT_SYMBOL(init_task);
15 15
16/* .text section in head.S is aligned at 8k boundary and this gets linked 16/* .text section in head.S is aligned at 8k boundary and this gets linked
17 * right after that so that the init_thread_union is aligned properly as well. 17 * right after that so that the init_thread_union is aligned properly as well.
18 * If this is not aligned on a 8k boundry, then you should change code 18 * If this is not aligned on a 8k boundary, then you should change code
19 * in etrap.S which assumes it. 19 * in etrap.S which assumes it.
20 */ 20 */
21union thread_union init_thread_union __init_task_data = 21union thread_union init_thread_union __init_task_data =
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
index 47977a77f6c6..6f01e8c83197 100644
--- a/arch/sparc/kernel/iommu.c
+++ b/arch/sparc/kernel/iommu.c
@@ -255,10 +255,9 @@ static inline iopte_t *alloc_npages(struct device *dev, struct iommu *iommu,
255static int iommu_alloc_ctx(struct iommu *iommu) 255static int iommu_alloc_ctx(struct iommu *iommu)
256{ 256{
257 int lowest = iommu->ctx_lowest_free; 257 int lowest = iommu->ctx_lowest_free;
258 int sz = IOMMU_NUM_CTXS - lowest; 258 int n = find_next_zero_bit(iommu->ctx_bitmap, IOMMU_NUM_CTXS, lowest);
259 int n = find_next_zero_bit(iommu->ctx_bitmap, sz, lowest);
260 259
261 if (unlikely(n == sz)) { 260 if (unlikely(n == IOMMU_NUM_CTXS)) {
262 n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1); 261 n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
263 if (unlikely(n == lowest)) { 262 if (unlikely(n == lowest)) {
264 printk(KERN_WARNING "IOMMU: Ran out of contexts.\n"); 263 printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
@@ -334,13 +333,10 @@ static void dma_4u_free_coherent(struct device *dev, size_t size,
334 void *cpu, dma_addr_t dvma) 333 void *cpu, dma_addr_t dvma)
335{ 334{
336 struct iommu *iommu; 335 struct iommu *iommu;
337 iopte_t *iopte;
338 unsigned long flags, order, npages; 336 unsigned long flags, order, npages;
339 337
340 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; 338 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
341 iommu = dev->archdata.iommu; 339 iommu = dev->archdata.iommu;
342 iopte = iommu->page_table +
343 ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
344 340
345 spin_lock_irqsave(&iommu->lock, flags); 341 spin_lock_irqsave(&iommu->lock, flags);
346 342
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index 41f7e4e0f72a..1c9c80a1a86a 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -50,10 +50,19 @@
50#include <asm/io-unit.h> 50#include <asm/io-unit.h>
51#include <asm/leon.h> 51#include <asm/leon.h>
52 52
53#ifdef CONFIG_SPARC_LEON 53/* This function must make sure that caches and memory are coherent after DMA
54#define mmu_inval_dma_area(p, l) leon_flush_dcache_all() 54 * On LEON systems without cache snooping it flushes the entire D-CACHE.
55 */
56#ifndef CONFIG_SPARC_LEON
57static inline void dma_make_coherent(unsigned long pa, unsigned long len)
58{
59}
55#else 60#else
56#define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */ 61static inline void dma_make_coherent(unsigned long pa, unsigned long len)
62{
63 if (!sparc_leon3_snooping_enabled())
64 leon_flush_dcache_all();
65}
57#endif 66#endif
58 67
59static struct resource *_sparc_find_resource(struct resource *r, 68static struct resource *_sparc_find_resource(struct resource *r,
@@ -254,7 +263,7 @@ static void *sbus_alloc_coherent(struct device *dev, size_t len,
254 dma_addr_t *dma_addrp, gfp_t gfp) 263 dma_addr_t *dma_addrp, gfp_t gfp)
255{ 264{
256 struct platform_device *op = to_platform_device(dev); 265 struct platform_device *op = to_platform_device(dev);
257 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; 266 unsigned long len_total = PAGE_ALIGN(len);
258 unsigned long va; 267 unsigned long va;
259 struct resource *res; 268 struct resource *res;
260 int order; 269 int order;
@@ -280,7 +289,7 @@ static void *sbus_alloc_coherent(struct device *dev, size_t len,
280 printk("sbus_alloc_consistent: cannot occupy 0x%lx", len_total); 289 printk("sbus_alloc_consistent: cannot occupy 0x%lx", len_total);
281 goto err_nova; 290 goto err_nova;
282 } 291 }
283 mmu_inval_dma_area(va, len_total); 292
284 // XXX The mmu_map_dma_area does this for us below, see comments. 293 // XXX The mmu_map_dma_area does this for us below, see comments.
285 // sparc_mapiorange(0, virt_to_phys(va), res->start, len_total); 294 // sparc_mapiorange(0, virt_to_phys(va), res->start, len_total);
286 /* 295 /*
@@ -297,9 +306,9 @@ static void *sbus_alloc_coherent(struct device *dev, size_t len,
297err_noiommu: 306err_noiommu:
298 release_resource(res); 307 release_resource(res);
299err_nova: 308err_nova:
300 free_pages(va, order);
301err_nomem:
302 kfree(res); 309 kfree(res);
310err_nomem:
311 free_pages(va, order);
303err_nopages: 312err_nopages:
304 return NULL; 313 return NULL;
305} 314}
@@ -321,7 +330,7 @@ static void sbus_free_coherent(struct device *dev, size_t n, void *p,
321 return; 330 return;
322 } 331 }
323 332
324 n = (n + PAGE_SIZE-1) & PAGE_MASK; 333 n = PAGE_ALIGN(n);
325 if ((res->end-res->start)+1 != n) { 334 if ((res->end-res->start)+1 != n) {
326 printk("sbus_free_consistent: region 0x%lx asked 0x%zx\n", 335 printk("sbus_free_consistent: region 0x%lx asked 0x%zx\n",
327 (long)((res->end-res->start)+1), n); 336 (long)((res->end-res->start)+1), n);
@@ -331,7 +340,6 @@ static void sbus_free_coherent(struct device *dev, size_t n, void *p,
331 release_resource(res); 340 release_resource(res);
332 kfree(res); 341 kfree(res);
333 342
334 /* mmu_inval_dma_area(va, n); */ /* it's consistent, isn't it */
335 pgv = virt_to_page(p); 343 pgv = virt_to_page(p);
336 mmu_unmap_dma_area(dev, ba, n); 344 mmu_unmap_dma_area(dev, ba, n);
337 345
@@ -408,9 +416,6 @@ struct dma_map_ops sbus_dma_ops = {
408 .sync_sg_for_device = sbus_sync_sg_for_device, 416 .sync_sg_for_device = sbus_sync_sg_for_device,
409}; 417};
410 418
411struct dma_map_ops *dma_ops = &sbus_dma_ops;
412EXPORT_SYMBOL(dma_ops);
413
414static int __init sparc_register_ioport(void) 419static int __init sparc_register_ioport(void)
415{ 420{
416 register_proc_sparc_ioport(); 421 register_proc_sparc_ioport();
@@ -422,7 +427,9 @@ arch_initcall(sparc_register_ioport);
422 427
423#endif /* CONFIG_SBUS */ 428#endif /* CONFIG_SBUS */
424 429
425#ifdef CONFIG_PCI 430
431/* LEON reuses PCI DMA ops */
432#if defined(CONFIG_PCI) || defined(CONFIG_SPARC_LEON)
426 433
427/* Allocate and map kernel buffer using consistent mode DMA for a device. 434/* Allocate and map kernel buffer using consistent mode DMA for a device.
428 * hwdev should be valid struct pci_dev pointer for PCI devices. 435 * hwdev should be valid struct pci_dev pointer for PCI devices.
@@ -430,8 +437,8 @@ arch_initcall(sparc_register_ioport);
430static void *pci32_alloc_coherent(struct device *dev, size_t len, 437static void *pci32_alloc_coherent(struct device *dev, size_t len,
431 dma_addr_t *pba, gfp_t gfp) 438 dma_addr_t *pba, gfp_t gfp)
432{ 439{
433 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; 440 unsigned long len_total = PAGE_ALIGN(len);
434 unsigned long va; 441 void *va;
435 struct resource *res; 442 struct resource *res;
436 int order; 443 int order;
437 444
@@ -443,34 +450,33 @@ static void *pci32_alloc_coherent(struct device *dev, size_t len,
443 } 450 }
444 451
445 order = get_order(len_total); 452 order = get_order(len_total);
446 va = __get_free_pages(GFP_KERNEL, order); 453 va = (void *) __get_free_pages(GFP_KERNEL, order);
447 if (va == 0) { 454 if (va == NULL) {
448 printk("pci_alloc_consistent: no %ld pages\n", len_total>>PAGE_SHIFT); 455 printk("pci_alloc_consistent: no %ld pages\n", len_total>>PAGE_SHIFT);
449 return NULL; 456 goto err_nopages;
450 } 457 }
451 458
452 if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) { 459 if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) {
453 free_pages(va, order);
454 printk("pci_alloc_consistent: no core\n"); 460 printk("pci_alloc_consistent: no core\n");
455 return NULL; 461 goto err_nomem;
456 } 462 }
457 463
458 if (allocate_resource(&_sparc_dvma, res, len_total, 464 if (allocate_resource(&_sparc_dvma, res, len_total,
459 _sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) { 465 _sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) {
460 printk("pci_alloc_consistent: cannot occupy 0x%lx", len_total); 466 printk("pci_alloc_consistent: cannot occupy 0x%lx", len_total);
461 free_pages(va, order); 467 goto err_nova;
462 kfree(res);
463 return NULL;
464 } 468 }
465 mmu_inval_dma_area(va, len_total);
466#if 0
467/* P3 */ printk("pci_alloc_consistent: kva %lx uncva %lx phys %lx size %lx\n",
468 (long)va, (long)res->start, (long)virt_to_phys(va), len_total);
469#endif
470 sparc_mapiorange(0, virt_to_phys(va), res->start, len_total); 469 sparc_mapiorange(0, virt_to_phys(va), res->start, len_total);
471 470
472 *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */ 471 *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */
473 return (void *) res->start; 472 return (void *) res->start;
473
474err_nova:
475 kfree(res);
476err_nomem:
477 free_pages((unsigned long)va, order);
478err_nopages:
479 return NULL;
474} 480}
475 481
476/* Free and unmap a consistent DMA buffer. 482/* Free and unmap a consistent DMA buffer.
@@ -485,7 +491,6 @@ static void pci32_free_coherent(struct device *dev, size_t n, void *p,
485 dma_addr_t ba) 491 dma_addr_t ba)
486{ 492{
487 struct resource *res; 493 struct resource *res;
488 unsigned long pgp;
489 494
490 if ((res = _sparc_find_resource(&_sparc_dvma, 495 if ((res = _sparc_find_resource(&_sparc_dvma,
491 (unsigned long)p)) == NULL) { 496 (unsigned long)p)) == NULL) {
@@ -498,21 +503,19 @@ static void pci32_free_coherent(struct device *dev, size_t n, void *p,
498 return; 503 return;
499 } 504 }
500 505
501 n = (n + PAGE_SIZE-1) & PAGE_MASK; 506 n = PAGE_ALIGN(n);
502 if ((res->end-res->start)+1 != n) { 507 if ((res->end-res->start)+1 != n) {
503 printk("pci_free_consistent: region 0x%lx asked 0x%lx\n", 508 printk("pci_free_consistent: region 0x%lx asked 0x%lx\n",
504 (long)((res->end-res->start)+1), (long)n); 509 (long)((res->end-res->start)+1), (long)n);
505 return; 510 return;
506 } 511 }
507 512
508 pgp = (unsigned long) phys_to_virt(ba); /* bus_to_virt actually */ 513 dma_make_coherent(ba, n);
509 mmu_inval_dma_area(pgp, n);
510 sparc_unmapiorange((unsigned long)p, n); 514 sparc_unmapiorange((unsigned long)p, n);
511 515
512 release_resource(res); 516 release_resource(res);
513 kfree(res); 517 kfree(res);
514 518 free_pages((unsigned long)phys_to_virt(ba), get_order(n));
515 free_pages(pgp, get_order(n));
516} 519}
517 520
518/* 521/*
@@ -527,6 +530,13 @@ static dma_addr_t pci32_map_page(struct device *dev, struct page *page,
527 return page_to_phys(page) + offset; 530 return page_to_phys(page) + offset;
528} 531}
529 532
533static void pci32_unmap_page(struct device *dev, dma_addr_t ba, size_t size,
534 enum dma_data_direction dir, struct dma_attrs *attrs)
535{
536 if (dir != PCI_DMA_TODEVICE)
537 dma_make_coherent(ba, PAGE_ALIGN(size));
538}
539
530/* Map a set of buffers described by scatterlist in streaming 540/* Map a set of buffers described by scatterlist in streaming
531 * mode for DMA. This is the scather-gather version of the 541 * mode for DMA. This is the scather-gather version of the
532 * above pci_map_single interface. Here the scatter gather list 542 * above pci_map_single interface. Here the scatter gather list
@@ -551,8 +561,7 @@ static int pci32_map_sg(struct device *device, struct scatterlist *sgl,
551 561
552 /* IIep is write-through, not flushing. */ 562 /* IIep is write-through, not flushing. */
553 for_each_sg(sgl, sg, nents, n) { 563 for_each_sg(sgl, sg, nents, n) {
554 BUG_ON(page_address(sg_page(sg)) == NULL); 564 sg->dma_address = sg_phys(sg);
555 sg->dma_address = virt_to_phys(sg_virt(sg));
556 sg->dma_length = sg->length; 565 sg->dma_length = sg->length;
557 } 566 }
558 return nents; 567 return nents;
@@ -571,10 +580,7 @@ static void pci32_unmap_sg(struct device *dev, struct scatterlist *sgl,
571 580
572 if (dir != PCI_DMA_TODEVICE) { 581 if (dir != PCI_DMA_TODEVICE) {
573 for_each_sg(sgl, sg, nents, n) { 582 for_each_sg(sgl, sg, nents, n) {
574 BUG_ON(page_address(sg_page(sg)) == NULL); 583 dma_make_coherent(sg_phys(sg), PAGE_ALIGN(sg->length));
575 mmu_inval_dma_area(
576 (unsigned long) page_address(sg_page(sg)),
577 (sg->length + PAGE_SIZE-1) & PAGE_MASK);
578 } 584 }
579 } 585 }
580} 586}
@@ -593,8 +599,7 @@ static void pci32_sync_single_for_cpu(struct device *dev, dma_addr_t ba,
593 size_t size, enum dma_data_direction dir) 599 size_t size, enum dma_data_direction dir)
594{ 600{
595 if (dir != PCI_DMA_TODEVICE) { 601 if (dir != PCI_DMA_TODEVICE) {
596 mmu_inval_dma_area((unsigned long)phys_to_virt(ba), 602 dma_make_coherent(ba, PAGE_ALIGN(size));
597 (size + PAGE_SIZE-1) & PAGE_MASK);
598 } 603 }
599} 604}
600 605
@@ -602,8 +607,7 @@ static void pci32_sync_single_for_device(struct device *dev, dma_addr_t ba,
602 size_t size, enum dma_data_direction dir) 607 size_t size, enum dma_data_direction dir)
603{ 608{
604 if (dir != PCI_DMA_TODEVICE) { 609 if (dir != PCI_DMA_TODEVICE) {
605 mmu_inval_dma_area((unsigned long)phys_to_virt(ba), 610 dma_make_coherent(ba, PAGE_ALIGN(size));
606 (size + PAGE_SIZE-1) & PAGE_MASK);
607 } 611 }
608} 612}
609 613
@@ -621,10 +625,7 @@ static void pci32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
621 625
622 if (dir != PCI_DMA_TODEVICE) { 626 if (dir != PCI_DMA_TODEVICE) {
623 for_each_sg(sgl, sg, nents, n) { 627 for_each_sg(sgl, sg, nents, n) {
624 BUG_ON(page_address(sg_page(sg)) == NULL); 628 dma_make_coherent(sg_phys(sg), PAGE_ALIGN(sg->length));
625 mmu_inval_dma_area(
626 (unsigned long) page_address(sg_page(sg)),
627 (sg->length + PAGE_SIZE-1) & PAGE_MASK);
628 } 629 }
629 } 630 }
630} 631}
@@ -637,10 +638,7 @@ static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *
637 638
638 if (dir != PCI_DMA_TODEVICE) { 639 if (dir != PCI_DMA_TODEVICE) {
639 for_each_sg(sgl, sg, nents, n) { 640 for_each_sg(sgl, sg, nents, n) {
640 BUG_ON(page_address(sg_page(sg)) == NULL); 641 dma_make_coherent(sg_phys(sg), PAGE_ALIGN(sg->length));
641 mmu_inval_dma_area(
642 (unsigned long) page_address(sg_page(sg)),
643 (sg->length + PAGE_SIZE-1) & PAGE_MASK);
644 } 642 }
645 } 643 }
646} 644}
@@ -649,6 +647,7 @@ struct dma_map_ops pci32_dma_ops = {
649 .alloc_coherent = pci32_alloc_coherent, 647 .alloc_coherent = pci32_alloc_coherent,
650 .free_coherent = pci32_free_coherent, 648 .free_coherent = pci32_free_coherent,
651 .map_page = pci32_map_page, 649 .map_page = pci32_map_page,
650 .unmap_page = pci32_unmap_page,
652 .map_sg = pci32_map_sg, 651 .map_sg = pci32_map_sg,
653 .unmap_sg = pci32_unmap_sg, 652 .unmap_sg = pci32_unmap_sg,
654 .sync_single_for_cpu = pci32_sync_single_for_cpu, 653 .sync_single_for_cpu = pci32_sync_single_for_cpu,
@@ -658,7 +657,16 @@ struct dma_map_ops pci32_dma_ops = {
658}; 657};
659EXPORT_SYMBOL(pci32_dma_ops); 658EXPORT_SYMBOL(pci32_dma_ops);
660 659
661#endif /* CONFIG_PCI */ 660#endif /* CONFIG_PCI || CONFIG_SPARC_LEON */
661
662#ifdef CONFIG_SPARC_LEON
663struct dma_map_ops *dma_ops = &pci32_dma_ops;
664#elif defined(CONFIG_SBUS)
665struct dma_map_ops *dma_ops = &sbus_dma_ops;
666#endif
667
668EXPORT_SYMBOL(dma_ops);
669
662 670
663/* 671/*
664 * Return whether the given PCI device DMA address mask can be 672 * Return whether the given PCI device DMA address mask can be
@@ -717,7 +725,7 @@ static const struct file_operations sparc_io_proc_fops = {
717static struct resource *_sparc_find_resource(struct resource *root, 725static struct resource *_sparc_find_resource(struct resource *root,
718 unsigned long hit) 726 unsigned long hit)
719{ 727{
720 struct resource *tmp; 728 struct resource *tmp;
721 729
722 for (tmp = root->child; tmp != 0; tmp = tmp->sibling) { 730 for (tmp = root->child; tmp != 0; tmp = tmp->sibling) {
723 if (tmp->start <= hit && tmp->end >= hit) 731 if (tmp->start <= hit && tmp->end >= hit)
diff --git a/arch/sparc/kernel/irq.h b/arch/sparc/kernel/irq.h
index db7513881530..100b9c204e78 100644
--- a/arch/sparc/kernel/irq.h
+++ b/arch/sparc/kernel/irq.h
@@ -1,5 +1,62 @@
1#include <linux/platform_device.h>
2
1#include <asm/btfixup.h> 3#include <asm/btfixup.h>
2 4
5struct irq_bucket {
6 struct irq_bucket *next;
7 unsigned int real_irq;
8 unsigned int irq;
9 unsigned int pil;
10};
11
12#define SUN4D_MAX_BOARD 10
13#define SUN4D_MAX_IRQ ((SUN4D_MAX_BOARD + 2) << 5)
14
15/* Map between the irq identifier used in hw to the
16 * irq_bucket. The map is sufficient large to hold
17 * the sun4d hw identifiers.
18 */
19extern struct irq_bucket *irq_map[SUN4D_MAX_IRQ];
20
21
22/* sun4m specific type definitions */
23
24/* This maps direct to CPU specific interrupt registers */
25struct sun4m_irq_percpu {
26 u32 pending;
27 u32 clear;
28 u32 set;
29};
30
31/* This maps direct to global interrupt registers */
32struct sun4m_irq_global {
33 u32 pending;
34 u32 mask;
35 u32 mask_clear;
36 u32 mask_set;
37 u32 interrupt_target;
38};
39
40extern struct sun4m_irq_percpu __iomem *sun4m_irq_percpu[SUN4M_NCPUS];
41extern struct sun4m_irq_global __iomem *sun4m_irq_global;
42
43/*
44 * Platform specific irq configuration
45 * The individual platforms assign their platform
46 * specifics in their init functions.
47 */
48struct sparc_irq_config {
49 void (*init_timers)(irq_handler_t);
50 unsigned int (*build_device_irq)(struct platform_device *op,
51 unsigned int real_irq);
52};
53extern struct sparc_irq_config sparc_irq_config;
54
55unsigned int irq_alloc(unsigned int real_irq, unsigned int pil);
56void irq_link(unsigned int irq);
57void irq_unlink(unsigned int irq);
58void handler_irq(unsigned int pil, struct pt_regs *regs);
59
3/* Dave Redman (djhr@tadpole.co.uk) 60/* Dave Redman (djhr@tadpole.co.uk)
4 * changed these to function pointers.. it saves cycles and will allow 61 * changed these to function pointers.. it saves cycles and will allow
5 * the irq dependencies to be split into different files at a later date 62 * the irq dependencies to be split into different files at a later date
@@ -8,33 +65,9 @@
8 * Changed these to btfixup entities... It saves cycles :) 65 * Changed these to btfixup entities... It saves cycles :)
9 */ 66 */
10 67
11BTFIXUPDEF_CALL(void, disable_irq, unsigned int)
12BTFIXUPDEF_CALL(void, enable_irq, unsigned int)
13BTFIXUPDEF_CALL(void, disable_pil_irq, unsigned int)
14BTFIXUPDEF_CALL(void, enable_pil_irq, unsigned int)
15BTFIXUPDEF_CALL(void, clear_clock_irq, void) 68BTFIXUPDEF_CALL(void, clear_clock_irq, void)
16BTFIXUPDEF_CALL(void, load_profile_irq, int, unsigned int) 69BTFIXUPDEF_CALL(void, load_profile_irq, int, unsigned int)
17 70
18static inline void __disable_irq(unsigned int irq)
19{
20 BTFIXUP_CALL(disable_irq)(irq);
21}
22
23static inline void __enable_irq(unsigned int irq)
24{
25 BTFIXUP_CALL(enable_irq)(irq);
26}
27
28static inline void disable_pil_irq(unsigned int irq)
29{
30 BTFIXUP_CALL(disable_pil_irq)(irq);
31}
32
33static inline void enable_pil_irq(unsigned int irq)
34{
35 BTFIXUP_CALL(enable_pil_irq)(irq);
36}
37
38static inline void clear_clock_irq(void) 71static inline void clear_clock_irq(void)
39{ 72{
40 BTFIXUP_CALL(clear_clock_irq)(); 73 BTFIXUP_CALL(clear_clock_irq)();
@@ -45,12 +78,6 @@ static inline void load_profile_irq(int cpu, int limit)
45 BTFIXUP_CALL(load_profile_irq)(cpu, limit); 78 BTFIXUP_CALL(load_profile_irq)(cpu, limit);
46} 79}
47 80
48extern void (*sparc_init_timers)(irq_handler_t lvl10_irq);
49
50extern void claim_ticker14(irq_handler_t irq_handler,
51 int irq,
52 unsigned int timeout);
53
54#ifdef CONFIG_SMP 81#ifdef CONFIG_SMP
55BTFIXUPDEF_CALL(void, set_cpu_int, int, int) 82BTFIXUPDEF_CALL(void, set_cpu_int, int, int)
56BTFIXUPDEF_CALL(void, clear_cpu_int, int, int) 83BTFIXUPDEF_CALL(void, clear_cpu_int, int, int)
@@ -59,4 +86,10 @@ BTFIXUPDEF_CALL(void, set_irq_udt, int)
59#define set_cpu_int(cpu,level) BTFIXUP_CALL(set_cpu_int)(cpu,level) 86#define set_cpu_int(cpu,level) BTFIXUP_CALL(set_cpu_int)(cpu,level)
60#define clear_cpu_int(cpu,level) BTFIXUP_CALL(clear_cpu_int)(cpu,level) 87#define clear_cpu_int(cpu,level) BTFIXUP_CALL(clear_cpu_int)(cpu,level)
61#define set_irq_udt(cpu) BTFIXUP_CALL(set_irq_udt)(cpu) 88#define set_irq_udt(cpu) BTFIXUP_CALL(set_irq_udt)(cpu)
89
90/* All SUN4D IPIs are sent on this IRQ, may be shared with hard IRQs */
91#define SUN4D_IPI_IRQ 14
92
93extern void sun4d_ipi_interrupt(void);
94
62#endif 95#endif
diff --git a/arch/sparc/kernel/irq_32.c b/arch/sparc/kernel/irq_32.c
index e1af43728329..9b89d842913c 100644
--- a/arch/sparc/kernel/irq_32.c
+++ b/arch/sparc/kernel/irq_32.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * arch/sparc/kernel/irq.c: Interrupt request handling routines. On the 2 * Interrupt request handling routines. On the
3 * Sparc the IRQs are basically 'cast in stone' 3 * Sparc the IRQs are basically 'cast in stone'
4 * and you are supposed to probe the prom's device 4 * and you are supposed to probe the prom's device
5 * node trees to find out who's got which IRQ. 5 * node trees to find out who's got which IRQ.
6 * 6 *
7 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) 7 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
8 * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx) 8 * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
@@ -11,40 +11,12 @@
11 * Copyright (C) 1998-2000 Anton Blanchard (anton@samba.org) 11 * Copyright (C) 1998-2000 Anton Blanchard (anton@samba.org)
12 */ 12 */
13 13
14#include <linux/module.h>
15#include <linux/sched.h>
16#include <linux/ptrace.h>
17#include <linux/errno.h>
18#include <linux/linkage.h>
19#include <linux/kernel_stat.h> 14#include <linux/kernel_stat.h>
20#include <linux/signal.h>
21#include <linux/interrupt.h>
22#include <linux/slab.h>
23#include <linux/random.h>
24#include <linux/init.h>
25#include <linux/smp.h>
26#include <linux/delay.h>
27#include <linux/threads.h>
28#include <linux/spinlock.h>
29#include <linux/seq_file.h> 15#include <linux/seq_file.h>
30 16
31#include <asm/ptrace.h>
32#include <asm/processor.h>
33#include <asm/system.h>
34#include <asm/psr.h>
35#include <asm/smp.h>
36#include <asm/vaddrs.h>
37#include <asm/timer.h>
38#include <asm/openprom.h>
39#include <asm/oplib.h>
40#include <asm/traps.h>
41#include <asm/irq.h>
42#include <asm/io.h>
43#include <asm/pgalloc.h>
44#include <asm/pgtable.h>
45#include <asm/pcic.h>
46#include <asm/cacheflush.h> 17#include <asm/cacheflush.h>
47#include <asm/irq_regs.h> 18#include <asm/cpudata.h>
19#include <asm/pcic.h>
48#include <asm/leon.h> 20#include <asm/leon.h>
49 21
50#include "kernel.h" 22#include "kernel.h"
@@ -57,7 +29,11 @@
57#define SMP_NOP2 29#define SMP_NOP2
58#define SMP_NOP3 30#define SMP_NOP3
59#endif /* SMP */ 31#endif /* SMP */
60unsigned long __raw_local_irq_save(void) 32
33/* platform specific irq setup */
34struct sparc_irq_config sparc_irq_config;
35
36unsigned long arch_local_irq_save(void)
61{ 37{
62 unsigned long retval; 38 unsigned long retval;
63 unsigned long tmp; 39 unsigned long tmp;
@@ -74,8 +50,9 @@ unsigned long __raw_local_irq_save(void)
74 50
75 return retval; 51 return retval;
76} 52}
53EXPORT_SYMBOL(arch_local_irq_save);
77 54
78void raw_local_irq_enable(void) 55void arch_local_irq_enable(void)
79{ 56{
80 unsigned long tmp; 57 unsigned long tmp;
81 58
@@ -89,8 +66,9 @@ void raw_local_irq_enable(void)
89 : "i" (PSR_PIL) 66 : "i" (PSR_PIL)
90 : "memory"); 67 : "memory");
91} 68}
69EXPORT_SYMBOL(arch_local_irq_enable);
92 70
93void raw_local_irq_restore(unsigned long old_psr) 71void arch_local_irq_restore(unsigned long old_psr)
94{ 72{
95 unsigned long tmp; 73 unsigned long tmp;
96 74
@@ -105,10 +83,7 @@ void raw_local_irq_restore(unsigned long old_psr)
105 : "i" (PSR_PIL), "r" (old_psr) 83 : "i" (PSR_PIL), "r" (old_psr)
106 : "memory"); 84 : "memory");
107} 85}
108 86EXPORT_SYMBOL(arch_local_irq_restore);
109EXPORT_SYMBOL(__raw_local_irq_save);
110EXPORT_SYMBOL(raw_local_irq_enable);
111EXPORT_SYMBOL(raw_local_irq_restore);
112 87
113/* 88/*
114 * Dave Redman (djhr@tadpole.co.uk) 89 * Dave Redman (djhr@tadpole.co.uk)
@@ -127,309 +102,185 @@ EXPORT_SYMBOL(raw_local_irq_restore);
127 * directed CPU interrupts using the existing enable/disable irq code 102 * directed CPU interrupts using the existing enable/disable irq code
128 * with tweaks. 103 * with tweaks.
129 * 104 *
105 * Sun4d complicates things even further. IRQ numbers are arbitrary
106 * 32-bit values in that case. Since this is similar to sparc64,
107 * we adopt a virtual IRQ numbering scheme as is done there.
108 * Virutal interrupt numbers are allocated by build_irq(). So NR_IRQS
109 * just becomes a limit of how many interrupt sources we can handle in
110 * a single system. Even fully loaded SS2000 machines top off at
111 * about 32 interrupt sources or so, therefore a NR_IRQS value of 64
112 * is more than enough.
113 *
114 * We keep a map of per-PIL enable interrupts. These get wired
115 * up via the irq_chip->startup() method which gets invoked by
116 * the generic IRQ layer during request_irq().
130 */ 117 */
131 118
132static void irq_panic(void)
133{
134 extern char *cputypval;
135 prom_printf("machine: %s doesn't have irq handlers defined!\n",cputypval);
136 prom_halt();
137}
138 119
139void (*sparc_init_timers)(irq_handler_t ) = 120/* Table of allocated irqs. Unused entries has irq == 0 */
140 (void (*)(irq_handler_t )) irq_panic; 121static struct irq_bucket irq_table[NR_IRQS];
122/* Protect access to irq_table */
123static DEFINE_SPINLOCK(irq_table_lock);
141 124
142/* 125/* Map between the irq identifier used in hw to the irq_bucket. */
143 * Dave Redman (djhr@tadpole.co.uk) 126struct irq_bucket *irq_map[SUN4D_MAX_IRQ];
144 * 127/* Protect access to irq_map */
145 * There used to be extern calls and hard coded values here.. very sucky! 128static DEFINE_SPINLOCK(irq_map_lock);
146 * instead, because some of the devices attach very early, I do something
147 * equally sucky but at least we'll never try to free statically allocated
148 * space or call kmalloc before kmalloc_init :(.
149 *
150 * In fact it's the timer10 that attaches first.. then timer14
151 * then kmalloc_init is called.. then the tty interrupts attach.
152 * hmmm....
153 *
154 */
155#define MAX_STATIC_ALLOC 4
156struct irqaction static_irqaction[MAX_STATIC_ALLOC];
157int static_irq_count;
158
159static struct {
160 struct irqaction *action;
161 int flags;
162} sparc_irq[NR_IRQS];
163#define SPARC_IRQ_INPROGRESS 1
164 129
165/* Used to protect the IRQ action lists */ 130/* Allocate a new irq from the irq_table */
166DEFINE_SPINLOCK(irq_action_lock); 131unsigned int irq_alloc(unsigned int real_irq, unsigned int pil)
167
168int show_interrupts(struct seq_file *p, void *v)
169{ 132{
170 int i = *(loff_t *) v;
171 struct irqaction * action;
172 unsigned long flags; 133 unsigned long flags;
173#ifdef CONFIG_SMP 134 unsigned int i;
174 int j;
175#endif
176 135
177 if (sparc_cpu_model == sun4d) { 136 spin_lock_irqsave(&irq_table_lock, flags);
178 extern int show_sun4d_interrupts(struct seq_file *, void *); 137 for (i = 1; i < NR_IRQS; i++) {
179 138 if (irq_table[i].real_irq == real_irq && irq_table[i].pil == pil)
180 return show_sun4d_interrupts(p, v); 139 goto found;
181 } 140 }
182 spin_lock_irqsave(&irq_action_lock, flags); 141
142 for (i = 1; i < NR_IRQS; i++) {
143 if (!irq_table[i].irq)
144 break;
145 }
146
183 if (i < NR_IRQS) { 147 if (i < NR_IRQS) {
184 action = sparc_irq[i].action; 148 irq_table[i].real_irq = real_irq;
185 if (!action) 149 irq_table[i].irq = i;
186 goto out_unlock; 150 irq_table[i].pil = pil;
187 seq_printf(p, "%3d: ", i); 151 } else {
188#ifndef CONFIG_SMP 152 printk(KERN_ERR "IRQ: Out of virtual IRQs.\n");
189 seq_printf(p, "%10u ", kstat_irqs(i)); 153 i = 0;
190#else
191 for_each_online_cpu(j) {
192 seq_printf(p, "%10u ",
193 kstat_cpu(j).irqs[i]);
194 }
195#endif
196 seq_printf(p, " %c %s",
197 (action->flags & IRQF_DISABLED) ? '+' : ' ',
198 action->name);
199 for (action=action->next; action; action = action->next) {
200 seq_printf(p, ",%s %s",
201 (action->flags & IRQF_DISABLED) ? " +" : "",
202 action->name);
203 }
204 seq_putc(p, '\n');
205 } 154 }
206out_unlock: 155found:
207 spin_unlock_irqrestore(&irq_action_lock, flags); 156 spin_unlock_irqrestore(&irq_table_lock, flags);
208 return 0; 157
158 return i;
209} 159}
210 160
211void free_irq(unsigned int irq, void *dev_id) 161/* Based on a single pil handler_irq may need to call several
162 * interrupt handlers. Use irq_map as entry to irq_table,
163 * and let each entry in irq_table point to the next entry.
164 */
165void irq_link(unsigned int irq)
212{ 166{
213 struct irqaction * action; 167 struct irq_bucket *p;
214 struct irqaction **actionp; 168 unsigned long flags;
215 unsigned long flags; 169 unsigned int pil;
216 unsigned int cpu_irq;
217
218 if (sparc_cpu_model == sun4d) {
219 extern void sun4d_free_irq(unsigned int, void *);
220
221 sun4d_free_irq(irq, dev_id);
222 return;
223 }
224 cpu_irq = irq & (NR_IRQS - 1);
225 if (cpu_irq > 14) { /* 14 irq levels on the sparc */
226 printk("Trying to free bogus IRQ %d\n", irq);
227 return;
228 }
229
230 spin_lock_irqsave(&irq_action_lock, flags);
231 170
232 actionp = &sparc_irq[cpu_irq].action; 171 BUG_ON(irq >= NR_IRQS);
233 action = *actionp;
234 172
235 if (!action->handler) { 173 spin_lock_irqsave(&irq_map_lock, flags);
236 printk("Trying to free free IRQ%d\n",irq);
237 goto out_unlock;
238 }
239 if (dev_id) {
240 for (; action; action = action->next) {
241 if (action->dev_id == dev_id)
242 break;
243 actionp = &action->next;
244 }
245 if (!action) {
246 printk("Trying to free free shared IRQ%d\n",irq);
247 goto out_unlock;
248 }
249 } else if (action->flags & IRQF_SHARED) {
250 printk("Trying to free shared IRQ%d with NULL device ID\n", irq);
251 goto out_unlock;
252 }
253 if (action->flags & SA_STATIC_ALLOC)
254 {
255 /* This interrupt is marked as specially allocated
256 * so it is a bad idea to free it.
257 */
258 printk("Attempt to free statically allocated IRQ%d (%s)\n",
259 irq, action->name);
260 goto out_unlock;
261 }
262 174
263 *actionp = action->next; 175 p = &irq_table[irq];
176 pil = p->pil;
177 BUG_ON(pil > SUN4D_MAX_IRQ);
178 p->next = irq_map[pil];
179 irq_map[pil] = p;
264 180
265 spin_unlock_irqrestore(&irq_action_lock, flags); 181 spin_unlock_irqrestore(&irq_map_lock, flags);
182}
266 183
267 synchronize_irq(irq); 184void irq_unlink(unsigned int irq)
185{
186 struct irq_bucket *p, **pnext;
187 unsigned long flags;
268 188
269 spin_lock_irqsave(&irq_action_lock, flags); 189 BUG_ON(irq >= NR_IRQS);
270 190
271 kfree(action); 191 spin_lock_irqsave(&irq_map_lock, flags);
272 192
273 if (!sparc_irq[cpu_irq].action) 193 p = &irq_table[irq];
274 __disable_irq(irq); 194 BUG_ON(p->pil > SUN4D_MAX_IRQ);
195 pnext = &irq_map[p->pil];
196 while (*pnext != p)
197 pnext = &(*pnext)->next;
198 *pnext = p->next;
275 199
276out_unlock: 200 spin_unlock_irqrestore(&irq_map_lock, flags);
277 spin_unlock_irqrestore(&irq_action_lock, flags);
278} 201}
279 202
280EXPORT_SYMBOL(free_irq);
281 203
282/* 204/* /proc/interrupts printing */
283 * This is called when we want to synchronize with 205int arch_show_interrupts(struct seq_file *p, int prec)
284 * interrupts. We may for example tell a device to
285 * stop sending interrupts: but to make sure there
286 * are no interrupts that are executing on another
287 * CPU we need to call this function.
288 */
289#ifdef CONFIG_SMP
290void synchronize_irq(unsigned int irq)
291{ 206{
292 unsigned int cpu_irq; 207 int j;
293
294 cpu_irq = irq & (NR_IRQS - 1);
295 while (sparc_irq[cpu_irq].flags & SPARC_IRQ_INPROGRESS)
296 cpu_relax();
297}
298EXPORT_SYMBOL(synchronize_irq);
299#endif /* SMP */
300 208
301void unexpected_irq(int irq, void *dev_id, struct pt_regs * regs) 209#ifdef CONFIG_SMP
302{ 210 seq_printf(p, "RES: ");
303 int i; 211 for_each_online_cpu(j)
304 struct irqaction * action; 212 seq_printf(p, "%10u ", cpu_data(j).irq_resched_count);
305 unsigned int cpu_irq; 213 seq_printf(p, " IPI rescheduling interrupts\n");
306 214 seq_printf(p, "CAL: ");
307 cpu_irq = irq & (NR_IRQS - 1); 215 for_each_online_cpu(j)
308 action = sparc_irq[cpu_irq].action; 216 seq_printf(p, "%10u ", cpu_data(j).irq_call_count);
309 217 seq_printf(p, " IPI function call interrupts\n");
310 printk("IO device interrupt, irq = %d\n", irq); 218#endif
311 printk("PC = %08lx NPC = %08lx FP=%08lx\n", regs->pc, 219 seq_printf(p, "NMI: ");
312 regs->npc, regs->u_regs[14]); 220 for_each_online_cpu(j)
313 if (action) { 221 seq_printf(p, "%10u ", cpu_data(j).counter);
314 printk("Expecting: "); 222 seq_printf(p, " Non-maskable interrupts\n");
315 for (i = 0; i < 16; i++) 223 return 0;
316 if (action->handler)
317 printk("[%s:%d:0x%x] ", action->name,
318 (int) i, (unsigned int) action->handler);
319 }
320 printk("AIEEE\n");
321 panic("bogus interrupt received");
322} 224}
323 225
324void handler_irq(int irq, struct pt_regs * regs) 226void handler_irq(unsigned int pil, struct pt_regs *regs)
325{ 227{
326 struct pt_regs *old_regs; 228 struct pt_regs *old_regs;
327 struct irqaction * action; 229 struct irq_bucket *p;
328 int cpu = smp_processor_id();
329#ifdef CONFIG_SMP
330 extern void smp4m_irq_rotate(int cpu);
331#endif
332 230
231 BUG_ON(pil > 15);
333 old_regs = set_irq_regs(regs); 232 old_regs = set_irq_regs(regs);
334 irq_enter(); 233 irq_enter();
335 disable_pil_irq(irq); 234
336#ifdef CONFIG_SMP 235 p = irq_map[pil];
337 /* Only rotate on lower priority IRQs (scsi, ethernet, etc.). */ 236 while (p) {
338 if((sparc_cpu_model==sun4m) && (irq < 10)) 237 struct irq_bucket *next = p->next;
339 smp4m_irq_rotate(cpu); 238
340#endif 239 generic_handle_irq(p->irq);
341 action = sparc_irq[irq].action; 240 p = next;
342 sparc_irq[irq].flags |= SPARC_IRQ_INPROGRESS; 241 }
343 kstat_cpu(cpu).irqs[irq]++;
344 do {
345 if (!action || !action->handler)
346 unexpected_irq(irq, NULL, regs);
347 action->handler(irq, action->dev_id);
348 action = action->next;
349 } while (action);
350 sparc_irq[irq].flags &= ~SPARC_IRQ_INPROGRESS;
351 enable_pil_irq(irq);
352 irq_exit(); 242 irq_exit();
353 set_irq_regs(old_regs); 243 set_irq_regs(old_regs);
354} 244}
355 245
356#if defined(CONFIG_BLK_DEV_FD) || defined(CONFIG_BLK_DEV_FD_MODULE) 246#if defined(CONFIG_BLK_DEV_FD) || defined(CONFIG_BLK_DEV_FD_MODULE)
247static unsigned int floppy_irq;
357 248
358/* Fast IRQs on the Sparc can only have one routine attached to them, 249int sparc_floppy_request_irq(unsigned int irq, irq_handler_t irq_handler)
359 * thus no sharing possible.
360 */
361static int request_fast_irq(unsigned int irq,
362 void (*handler)(void),
363 unsigned long irqflags, const char *devname)
364{ 250{
365 struct irqaction *action;
366 unsigned long flags;
367 unsigned int cpu_irq; 251 unsigned int cpu_irq;
368 int ret; 252 int err;
369#ifdef CONFIG_SMP 253
254#if defined CONFIG_SMP && !defined CONFIG_SPARC_LEON
370 struct tt_entry *trap_table; 255 struct tt_entry *trap_table;
371 extern struct tt_entry trapbase_cpu1, trapbase_cpu2, trapbase_cpu3;
372#endif 256#endif
373
374 cpu_irq = irq & (NR_IRQS - 1);
375 if(cpu_irq > 14) {
376 ret = -EINVAL;
377 goto out;
378 }
379 if(!handler) {
380 ret = -EINVAL;
381 goto out;
382 }
383 257
384 spin_lock_irqsave(&irq_action_lock, flags); 258 err = request_irq(irq, irq_handler, 0, "floppy", NULL);
259 if (err)
260 return -1;
385 261
386 action = sparc_irq[cpu_irq].action; 262 /* Save for later use in floppy interrupt handler */
387 if(action) { 263 floppy_irq = irq;
388 if(action->flags & IRQF_SHARED)
389 panic("Trying to register fast irq when already shared.\n");
390 if(irqflags & IRQF_SHARED)
391 panic("Trying to register fast irq as shared.\n");
392 264
393 /* Anyway, someone already owns it so cannot be made fast. */ 265 cpu_irq = (irq & (NR_IRQS - 1));
394 printk("request_fast_irq: Trying to register yet already owned.\n");
395 ret = -EBUSY;
396 goto out_unlock;
397 }
398
399 /* If this is flagged as statically allocated then we use our
400 * private struct which is never freed.
401 */
402 if (irqflags & SA_STATIC_ALLOC) {
403 if (static_irq_count < MAX_STATIC_ALLOC)
404 action = &static_irqaction[static_irq_count++];
405 else
406 printk("Fast IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n",
407 irq, devname);
408 }
409
410 if (action == NULL)
411 action = kmalloc(sizeof(struct irqaction),
412 GFP_ATOMIC);
413
414 if (!action) {
415 ret = -ENOMEM;
416 goto out_unlock;
417 }
418 266
419 /* Dork with trap table if we get this far. */ 267 /* Dork with trap table if we get this far. */
420#define INSTANTIATE(table) \ 268#define INSTANTIATE(table) \
421 table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_one = SPARC_RD_PSR_L0; \ 269 table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_one = SPARC_RD_PSR_L0; \
422 table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_two = \ 270 table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_two = \
423 SPARC_BRANCH((unsigned long) handler, \ 271 SPARC_BRANCH((unsigned long) floppy_hardint, \
424 (unsigned long) &table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_two);\ 272 (unsigned long) &table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_two);\
425 table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_three = SPARC_RD_WIM_L3; \ 273 table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_three = SPARC_RD_WIM_L3; \
426 table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_four = SPARC_NOP; 274 table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_four = SPARC_NOP;
427 275
428 INSTANTIATE(sparc_ttable) 276 INSTANTIATE(sparc_ttable)
429#ifdef CONFIG_SMP 277#if defined CONFIG_SMP && !defined CONFIG_SPARC_LEON
430 trap_table = &trapbase_cpu1; INSTANTIATE(trap_table) 278 trap_table = &trapbase_cpu1;
431 trap_table = &trapbase_cpu2; INSTANTIATE(trap_table) 279 INSTANTIATE(trap_table)
432 trap_table = &trapbase_cpu3; INSTANTIATE(trap_table) 280 trap_table = &trapbase_cpu2;
281 INSTANTIATE(trap_table)
282 trap_table = &trapbase_cpu3;
283 INSTANTIATE(trap_table)
433#endif 284#endif
434#undef INSTANTIATE 285#undef INSTANTIATE
435 /* 286 /*
@@ -438,24 +289,12 @@ static int request_fast_irq(unsigned int irq,
438 * writing we have no CPU-neutral interface to fine-grained flushes. 289 * writing we have no CPU-neutral interface to fine-grained flushes.
439 */ 290 */
440 flush_cache_all(); 291 flush_cache_all();
441 292 return 0;
442 action->flags = irqflags;
443 action->name = devname;
444 action->dev_id = NULL;
445 action->next = NULL;
446
447 sparc_irq[cpu_irq].action = action;
448
449 __enable_irq(irq);
450
451 ret = 0;
452out_unlock:
453 spin_unlock_irqrestore(&irq_action_lock, flags);
454out:
455 return ret;
456} 293}
294EXPORT_SYMBOL(sparc_floppy_request_irq);
457 295
458/* These variables are used to access state from the assembler 296/*
297 * These variables are used to access state from the assembler
459 * interrupt handler, floppy_hardint, so we cannot put these in 298 * interrupt handler, floppy_hardint, so we cannot put these in
460 * the floppy driver image because that would not work in the 299 * the floppy driver image because that would not work in the
461 * modular case. 300 * modular case.
@@ -478,155 +317,23 @@ EXPORT_SYMBOL(pdma_base);
478unsigned long pdma_areasize; 317unsigned long pdma_areasize;
479EXPORT_SYMBOL(pdma_areasize); 318EXPORT_SYMBOL(pdma_areasize);
480 319
481extern void floppy_hardint(void); 320/* Use the generic irq support to call floppy_interrupt
482 321 * which was setup using request_irq() in sparc_floppy_request_irq().
483static irq_handler_t floppy_irq_handler; 322 * We only have one floppy interrupt so we do not need to check
484 323 * for additional handlers being wired up by irq_link()
324 */
485void sparc_floppy_irq(int irq, void *dev_id, struct pt_regs *regs) 325void sparc_floppy_irq(int irq, void *dev_id, struct pt_regs *regs)
486{ 326{
487 struct pt_regs *old_regs; 327 struct pt_regs *old_regs;
488 int cpu = smp_processor_id();
489 328
490 old_regs = set_irq_regs(regs); 329 old_regs = set_irq_regs(regs);
491 disable_pil_irq(irq);
492 irq_enter(); 330 irq_enter();
493 kstat_cpu(cpu).irqs[irq]++; 331 generic_handle_irq(floppy_irq);
494 floppy_irq_handler(irq, dev_id);
495 irq_exit(); 332 irq_exit();
496 enable_pil_irq(irq);
497 set_irq_regs(old_regs); 333 set_irq_regs(old_regs);
498 // XXX Eek, it's totally changed with preempt_count() and such
499 // if (softirq_pending(cpu))
500 // do_softirq();
501} 334}
502
503int sparc_floppy_request_irq(int irq, unsigned long flags,
504 irq_handler_t irq_handler)
505{
506 floppy_irq_handler = irq_handler;
507 return request_fast_irq(irq, floppy_hardint, flags, "floppy");
508}
509EXPORT_SYMBOL(sparc_floppy_request_irq);
510
511#endif 335#endif
512 336
513int request_irq(unsigned int irq,
514 irq_handler_t handler,
515 unsigned long irqflags, const char * devname, void *dev_id)
516{
517 struct irqaction * action, **actionp;
518 unsigned long flags;
519 unsigned int cpu_irq;
520 int ret;
521
522 if (sparc_cpu_model == sun4d) {
523 extern int sun4d_request_irq(unsigned int,
524 irq_handler_t ,
525 unsigned long, const char *, void *);
526 return sun4d_request_irq(irq, handler, irqflags, devname, dev_id);
527 }
528 cpu_irq = irq & (NR_IRQS - 1);
529 if(cpu_irq > 14) {
530 ret = -EINVAL;
531 goto out;
532 }
533 if (!handler) {
534 ret = -EINVAL;
535 goto out;
536 }
537
538 spin_lock_irqsave(&irq_action_lock, flags);
539
540 actionp = &sparc_irq[cpu_irq].action;
541 action = *actionp;
542 if (action) {
543 if (!(action->flags & IRQF_SHARED) || !(irqflags & IRQF_SHARED)) {
544 ret = -EBUSY;
545 goto out_unlock;
546 }
547 if ((action->flags & IRQF_DISABLED) != (irqflags & IRQF_DISABLED)) {
548 printk("Attempt to mix fast and slow interrupts on IRQ%d denied\n", irq);
549 ret = -EBUSY;
550 goto out_unlock;
551 }
552 for ( ; action; action = *actionp)
553 actionp = &action->next;
554 }
555
556 /* If this is flagged as statically allocated then we use our
557 * private struct which is never freed.
558 */
559 if (irqflags & SA_STATIC_ALLOC) {
560 if (static_irq_count < MAX_STATIC_ALLOC)
561 action = &static_irqaction[static_irq_count++];
562 else
563 printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n", irq, devname);
564 }
565
566 if (action == NULL)
567 action = kmalloc(sizeof(struct irqaction),
568 GFP_ATOMIC);
569
570 if (!action) {
571 ret = -ENOMEM;
572 goto out_unlock;
573 }
574
575 action->handler = handler;
576 action->flags = irqflags;
577 action->name = devname;
578 action->next = NULL;
579 action->dev_id = dev_id;
580
581 *actionp = action;
582
583 __enable_irq(irq);
584
585 ret = 0;
586out_unlock:
587 spin_unlock_irqrestore(&irq_action_lock, flags);
588out:
589 return ret;
590}
591
592EXPORT_SYMBOL(request_irq);
593
594void disable_irq_nosync(unsigned int irq)
595{
596 __disable_irq(irq);
597}
598EXPORT_SYMBOL(disable_irq_nosync);
599
600void disable_irq(unsigned int irq)
601{
602 __disable_irq(irq);
603}
604EXPORT_SYMBOL(disable_irq);
605
606void enable_irq(unsigned int irq)
607{
608 __enable_irq(irq);
609}
610
611EXPORT_SYMBOL(enable_irq);
612
613/* We really don't need these at all on the Sparc. We only have
614 * stubs here because they are exported to modules.
615 */
616unsigned long probe_irq_on(void)
617{
618 return 0;
619}
620
621EXPORT_SYMBOL(probe_irq_on);
622
623int probe_irq_off(unsigned long mask)
624{
625 return 0;
626}
627
628EXPORT_SYMBOL(probe_irq_off);
629
630/* djhr 337/* djhr
631 * This could probably be made indirect too and assigned in the CPU 338 * This could probably be made indirect too and assigned in the CPU
632 * bits of the code. That would be much nicer I think and would also 339 * bits of the code. That would be much nicer I think and would also
@@ -637,27 +344,20 @@ EXPORT_SYMBOL(probe_irq_off);
637 344
638void __init init_IRQ(void) 345void __init init_IRQ(void)
639{ 346{
640 extern void sun4c_init_IRQ( void ); 347 switch (sparc_cpu_model) {
641 extern void sun4m_init_IRQ( void );
642 extern void sun4d_init_IRQ( void );
643
644 switch(sparc_cpu_model) {
645 case sun4c: 348 case sun4c:
646 case sun4: 349 case sun4:
647 sun4c_init_IRQ(); 350 sun4c_init_IRQ();
648 break; 351 break;
649 352
650 case sun4m: 353 case sun4m:
651#ifdef CONFIG_PCI
652 pcic_probe(); 354 pcic_probe();
653 if (pcic_present()) { 355 if (pcic_present())
654 sun4m_pci_init_IRQ(); 356 sun4m_pci_init_IRQ();
655 break; 357 else
656 } 358 sun4m_init_IRQ();
657#endif
658 sun4m_init_IRQ();
659 break; 359 break;
660 360
661 case sun4d: 361 case sun4d:
662 sun4d_init_IRQ(); 362 sun4d_init_IRQ();
663 break; 363 break;
@@ -673,9 +373,3 @@ void __init init_IRQ(void)
673 btfixup(); 373 btfixup();
674} 374}
675 375
676#ifdef CONFIG_PROC_FS
677void init_irq_proc(void)
678{
679 /* For now, nothing... */
680}
681#endif /* CONFIG_PROC_FS */
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index 830d70a3e20b..4e78862d12fd 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -82,7 +82,7 @@ static void bucket_clear_chain_pa(unsigned long bucket_pa)
82 "i" (ASI_PHYS_USE_EC)); 82 "i" (ASI_PHYS_USE_EC));
83} 83}
84 84
85static unsigned int bucket_get_virt_irq(unsigned long bucket_pa) 85static unsigned int bucket_get_irq(unsigned long bucket_pa)
86{ 86{
87 unsigned int ret; 87 unsigned int ret;
88 88
@@ -90,21 +90,20 @@ static unsigned int bucket_get_virt_irq(unsigned long bucket_pa)
90 : "=&r" (ret) 90 : "=&r" (ret)
91 : "r" (bucket_pa + 91 : "r" (bucket_pa +
92 offsetof(struct ino_bucket, 92 offsetof(struct ino_bucket,
93 __virt_irq)), 93 __irq)),
94 "i" (ASI_PHYS_USE_EC)); 94 "i" (ASI_PHYS_USE_EC));
95 95
96 return ret; 96 return ret;
97} 97}
98 98
99static void bucket_set_virt_irq(unsigned long bucket_pa, 99static void bucket_set_irq(unsigned long bucket_pa, unsigned int irq)
100 unsigned int virt_irq)
101{ 100{
102 __asm__ __volatile__("stwa %0, [%1] %2" 101 __asm__ __volatile__("stwa %0, [%1] %2"
103 : /* no outputs */ 102 : /* no outputs */
104 : "r" (virt_irq), 103 : "r" (irq),
105 "r" (bucket_pa + 104 "r" (bucket_pa +
106 offsetof(struct ino_bucket, 105 offsetof(struct ino_bucket,
107 __virt_irq)), 106 __irq)),
108 "i" (ASI_PHYS_USE_EC)); 107 "i" (ASI_PHYS_USE_EC));
109} 108}
110 109
@@ -114,97 +113,63 @@ static struct {
114 unsigned int dev_handle; 113 unsigned int dev_handle;
115 unsigned int dev_ino; 114 unsigned int dev_ino;
116 unsigned int in_use; 115 unsigned int in_use;
117} virt_irq_table[NR_IRQS]; 116} irq_table[NR_IRQS];
118static DEFINE_SPINLOCK(virt_irq_alloc_lock); 117static DEFINE_SPINLOCK(irq_alloc_lock);
119 118
120unsigned char virt_irq_alloc(unsigned int dev_handle, 119unsigned char irq_alloc(unsigned int dev_handle, unsigned int dev_ino)
121 unsigned int dev_ino)
122{ 120{
123 unsigned long flags; 121 unsigned long flags;
124 unsigned char ent; 122 unsigned char ent;
125 123
126 BUILD_BUG_ON(NR_IRQS >= 256); 124 BUILD_BUG_ON(NR_IRQS >= 256);
127 125
128 spin_lock_irqsave(&virt_irq_alloc_lock, flags); 126 spin_lock_irqsave(&irq_alloc_lock, flags);
129 127
130 for (ent = 1; ent < NR_IRQS; ent++) { 128 for (ent = 1; ent < NR_IRQS; ent++) {
131 if (!virt_irq_table[ent].in_use) 129 if (!irq_table[ent].in_use)
132 break; 130 break;
133 } 131 }
134 if (ent >= NR_IRQS) { 132 if (ent >= NR_IRQS) {
135 printk(KERN_ERR "IRQ: Out of virtual IRQs.\n"); 133 printk(KERN_ERR "IRQ: Out of virtual IRQs.\n");
136 ent = 0; 134 ent = 0;
137 } else { 135 } else {
138 virt_irq_table[ent].dev_handle = dev_handle; 136 irq_table[ent].dev_handle = dev_handle;
139 virt_irq_table[ent].dev_ino = dev_ino; 137 irq_table[ent].dev_ino = dev_ino;
140 virt_irq_table[ent].in_use = 1; 138 irq_table[ent].in_use = 1;
141 } 139 }
142 140
143 spin_unlock_irqrestore(&virt_irq_alloc_lock, flags); 141 spin_unlock_irqrestore(&irq_alloc_lock, flags);
144 142
145 return ent; 143 return ent;
146} 144}
147 145
148#ifdef CONFIG_PCI_MSI 146#ifdef CONFIG_PCI_MSI
149void virt_irq_free(unsigned int virt_irq) 147void irq_free(unsigned int irq)
150{ 148{
151 unsigned long flags; 149 unsigned long flags;
152 150
153 if (virt_irq >= NR_IRQS) 151 if (irq >= NR_IRQS)
154 return; 152 return;
155 153
156 spin_lock_irqsave(&virt_irq_alloc_lock, flags); 154 spin_lock_irqsave(&irq_alloc_lock, flags);
157 155
158 virt_irq_table[virt_irq].in_use = 0; 156 irq_table[irq].in_use = 0;
159 157
160 spin_unlock_irqrestore(&virt_irq_alloc_lock, flags); 158 spin_unlock_irqrestore(&irq_alloc_lock, flags);
161} 159}
162#endif 160#endif
163 161
164/* 162/*
165 * /proc/interrupts printing: 163 * /proc/interrupts printing:
166 */ 164 */
167 165int arch_show_interrupts(struct seq_file *p, int prec)
168int show_interrupts(struct seq_file *p, void *v)
169{ 166{
170 int i = *(loff_t *) v, j; 167 int j;
171 struct irqaction * action;
172 unsigned long flags;
173 168
174 if (i == 0) { 169 seq_printf(p, "NMI: ");
175 seq_printf(p, " "); 170 for_each_online_cpu(j)
176 for_each_online_cpu(j) 171 seq_printf(p, "%10u ", cpu_data(j).__nmi_count);
177 seq_printf(p, "CPU%d ",j); 172 seq_printf(p, " Non-maskable interrupts\n");
178 seq_putc(p, '\n');
179 }
180
181 if (i < NR_IRQS) {
182 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
183 action = irq_desc[i].action;
184 if (!action)
185 goto skip;
186 seq_printf(p, "%3d: ",i);
187#ifndef CONFIG_SMP
188 seq_printf(p, "%10u ", kstat_irqs(i));
189#else
190 for_each_online_cpu(j)
191 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
192#endif
193 seq_printf(p, " %9s", irq_desc[i].chip->name);
194 seq_printf(p, " %s", action->name);
195
196 for (action=action->next; action; action = action->next)
197 seq_printf(p, ", %s", action->name);
198
199 seq_putc(p, '\n');
200skip:
201 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
202 } else if (i == NR_IRQS) {
203 seq_printf(p, "NMI: ");
204 for_each_online_cpu(j)
205 seq_printf(p, "%10u ", cpu_data(j).__nmi_count);
206 seq_printf(p, " Non-maskable interrupts\n");
207 }
208 return 0; 173 return 0;
209} 174}
210 175
@@ -253,39 +218,38 @@ struct irq_handler_data {
253}; 218};
254 219
255#ifdef CONFIG_SMP 220#ifdef CONFIG_SMP
256static int irq_choose_cpu(unsigned int virt_irq, const struct cpumask *affinity) 221static int irq_choose_cpu(unsigned int irq, const struct cpumask *affinity)
257{ 222{
258 cpumask_t mask; 223 cpumask_t mask;
259 int cpuid; 224 int cpuid;
260 225
261 cpumask_copy(&mask, affinity); 226 cpumask_copy(&mask, affinity);
262 if (cpus_equal(mask, cpu_online_map)) { 227 if (cpumask_equal(&mask, cpu_online_mask)) {
263 cpuid = map_to_cpu(virt_irq); 228 cpuid = map_to_cpu(irq);
264 } else { 229 } else {
265 cpumask_t tmp; 230 cpumask_t tmp;
266 231
267 cpus_and(tmp, cpu_online_map, mask); 232 cpumask_and(&tmp, cpu_online_mask, &mask);
268 cpuid = cpus_empty(tmp) ? map_to_cpu(virt_irq) : first_cpu(tmp); 233 cpuid = cpumask_empty(&tmp) ? map_to_cpu(irq) : cpumask_first(&tmp);
269 } 234 }
270 235
271 return cpuid; 236 return cpuid;
272} 237}
273#else 238#else
274#define irq_choose_cpu(virt_irq, affinity) \ 239#define irq_choose_cpu(irq, affinity) \
275 real_hard_smp_processor_id() 240 real_hard_smp_processor_id()
276#endif 241#endif
277 242
278static void sun4u_irq_enable(unsigned int virt_irq) 243static void sun4u_irq_enable(struct irq_data *data)
279{ 244{
280 struct irq_handler_data *data = get_irq_chip_data(virt_irq); 245 struct irq_handler_data *handler_data = data->handler_data;
281 246
282 if (likely(data)) { 247 if (likely(handler_data)) {
283 unsigned long cpuid, imap, val; 248 unsigned long cpuid, imap, val;
284 unsigned int tid; 249 unsigned int tid;
285 250
286 cpuid = irq_choose_cpu(virt_irq, 251 cpuid = irq_choose_cpu(data->irq, data->affinity);
287 irq_desc[virt_irq].affinity); 252 imap = handler_data->imap;
288 imap = data->imap;
289 253
290 tid = sun4u_compute_tid(imap, cpuid); 254 tid = sun4u_compute_tid(imap, cpuid);
291 255
@@ -294,21 +258,21 @@ static void sun4u_irq_enable(unsigned int virt_irq)
294 IMAP_AID_SAFARI | IMAP_NID_SAFARI); 258 IMAP_AID_SAFARI | IMAP_NID_SAFARI);
295 val |= tid | IMAP_VALID; 259 val |= tid | IMAP_VALID;
296 upa_writeq(val, imap); 260 upa_writeq(val, imap);
297 upa_writeq(ICLR_IDLE, data->iclr); 261 upa_writeq(ICLR_IDLE, handler_data->iclr);
298 } 262 }
299} 263}
300 264
301static int sun4u_set_affinity(unsigned int virt_irq, 265static int sun4u_set_affinity(struct irq_data *data,
302 const struct cpumask *mask) 266 const struct cpumask *mask, bool force)
303{ 267{
304 struct irq_handler_data *data = get_irq_chip_data(virt_irq); 268 struct irq_handler_data *handler_data = data->handler_data;
305 269
306 if (likely(data)) { 270 if (likely(handler_data)) {
307 unsigned long cpuid, imap, val; 271 unsigned long cpuid, imap, val;
308 unsigned int tid; 272 unsigned int tid;
309 273
310 cpuid = irq_choose_cpu(virt_irq, mask); 274 cpuid = irq_choose_cpu(data->irq, mask);
311 imap = data->imap; 275 imap = handler_data->imap;
312 276
313 tid = sun4u_compute_tid(imap, cpuid); 277 tid = sun4u_compute_tid(imap, cpuid);
314 278
@@ -317,7 +281,7 @@ static int sun4u_set_affinity(unsigned int virt_irq,
317 IMAP_AID_SAFARI | IMAP_NID_SAFARI); 281 IMAP_AID_SAFARI | IMAP_NID_SAFARI);
318 val |= tid | IMAP_VALID; 282 val |= tid | IMAP_VALID;
319 upa_writeq(val, imap); 283 upa_writeq(val, imap);
320 upa_writeq(ICLR_IDLE, data->iclr); 284 upa_writeq(ICLR_IDLE, handler_data->iclr);
321 } 285 }
322 286
323 return 0; 287 return 0;
@@ -340,27 +304,22 @@ static int sun4u_set_affinity(unsigned int virt_irq,
340 * sees that, it also hooks up a default ->shutdown method which 304 * sees that, it also hooks up a default ->shutdown method which
341 * invokes ->mask() which we do not want. See irq_chip_set_defaults(). 305 * invokes ->mask() which we do not want. See irq_chip_set_defaults().
342 */ 306 */
343static void sun4u_irq_disable(unsigned int virt_irq) 307static void sun4u_irq_disable(struct irq_data *data)
344{ 308{
345} 309}
346 310
347static void sun4u_irq_eoi(unsigned int virt_irq) 311static void sun4u_irq_eoi(struct irq_data *data)
348{ 312{
349 struct irq_handler_data *data = get_irq_chip_data(virt_irq); 313 struct irq_handler_data *handler_data = data->handler_data;
350 struct irq_desc *desc = irq_desc + virt_irq;
351 314
352 if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS))) 315 if (likely(handler_data))
353 return; 316 upa_writeq(ICLR_IDLE, handler_data->iclr);
354
355 if (likely(data))
356 upa_writeq(ICLR_IDLE, data->iclr);
357} 317}
358 318
359static void sun4v_irq_enable(unsigned int virt_irq) 319static void sun4v_irq_enable(struct irq_data *data)
360{ 320{
361 unsigned int ino = virt_irq_table[virt_irq].dev_ino; 321 unsigned int ino = irq_table[data->irq].dev_ino;
362 unsigned long cpuid = irq_choose_cpu(virt_irq, 322 unsigned long cpuid = irq_choose_cpu(data->irq, data->affinity);
363 irq_desc[virt_irq].affinity);
364 int err; 323 int err;
365 324
366 err = sun4v_intr_settarget(ino, cpuid); 325 err = sun4v_intr_settarget(ino, cpuid);
@@ -377,11 +336,11 @@ static void sun4v_irq_enable(unsigned int virt_irq)
377 ino, err); 336 ino, err);
378} 337}
379 338
380static int sun4v_set_affinity(unsigned int virt_irq, 339static int sun4v_set_affinity(struct irq_data *data,
381 const struct cpumask *mask) 340 const struct cpumask *mask, bool force)
382{ 341{
383 unsigned int ino = virt_irq_table[virt_irq].dev_ino; 342 unsigned int ino = irq_table[data->irq].dev_ino;
384 unsigned long cpuid = irq_choose_cpu(virt_irq, mask); 343 unsigned long cpuid = irq_choose_cpu(data->irq, mask);
385 int err; 344 int err;
386 345
387 err = sun4v_intr_settarget(ino, cpuid); 346 err = sun4v_intr_settarget(ino, cpuid);
@@ -392,9 +351,9 @@ static int sun4v_set_affinity(unsigned int virt_irq,
392 return 0; 351 return 0;
393} 352}
394 353
395static void sun4v_irq_disable(unsigned int virt_irq) 354static void sun4v_irq_disable(struct irq_data *data)
396{ 355{
397 unsigned int ino = virt_irq_table[virt_irq].dev_ino; 356 unsigned int ino = irq_table[data->irq].dev_ino;
398 int err; 357 int err;
399 358
400 err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED); 359 err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
@@ -403,30 +362,26 @@ static void sun4v_irq_disable(unsigned int virt_irq)
403 "err(%d)\n", ino, err); 362 "err(%d)\n", ino, err);
404} 363}
405 364
406static void sun4v_irq_eoi(unsigned int virt_irq) 365static void sun4v_irq_eoi(struct irq_data *data)
407{ 366{
408 unsigned int ino = virt_irq_table[virt_irq].dev_ino; 367 unsigned int ino = irq_table[data->irq].dev_ino;
409 struct irq_desc *desc = irq_desc + virt_irq;
410 int err; 368 int err;
411 369
412 if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
413 return;
414
415 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE); 370 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
416 if (err != HV_EOK) 371 if (err != HV_EOK)
417 printk(KERN_ERR "sun4v_intr_setstate(%x): " 372 printk(KERN_ERR "sun4v_intr_setstate(%x): "
418 "err(%d)\n", ino, err); 373 "err(%d)\n", ino, err);
419} 374}
420 375
421static void sun4v_virq_enable(unsigned int virt_irq) 376static void sun4v_virq_enable(struct irq_data *data)
422{ 377{
423 unsigned long cpuid, dev_handle, dev_ino; 378 unsigned long cpuid, dev_handle, dev_ino;
424 int err; 379 int err;
425 380
426 cpuid = irq_choose_cpu(virt_irq, irq_desc[virt_irq].affinity); 381 cpuid = irq_choose_cpu(data->irq, data->affinity);
427 382
428 dev_handle = virt_irq_table[virt_irq].dev_handle; 383 dev_handle = irq_table[data->irq].dev_handle;
429 dev_ino = virt_irq_table[virt_irq].dev_ino; 384 dev_ino = irq_table[data->irq].dev_ino;
430 385
431 err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid); 386 err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
432 if (err != HV_EOK) 387 if (err != HV_EOK)
@@ -447,16 +402,16 @@ static void sun4v_virq_enable(unsigned int virt_irq)
447 dev_handle, dev_ino, err); 402 dev_handle, dev_ino, err);
448} 403}
449 404
450static int sun4v_virt_set_affinity(unsigned int virt_irq, 405static int sun4v_virt_set_affinity(struct irq_data *data,
451 const struct cpumask *mask) 406 const struct cpumask *mask, bool force)
452{ 407{
453 unsigned long cpuid, dev_handle, dev_ino; 408 unsigned long cpuid, dev_handle, dev_ino;
454 int err; 409 int err;
455 410
456 cpuid = irq_choose_cpu(virt_irq, mask); 411 cpuid = irq_choose_cpu(data->irq, mask);
457 412
458 dev_handle = virt_irq_table[virt_irq].dev_handle; 413 dev_handle = irq_table[data->irq].dev_handle;
459 dev_ino = virt_irq_table[virt_irq].dev_ino; 414 dev_ino = irq_table[data->irq].dev_ino;
460 415
461 err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid); 416 err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
462 if (err != HV_EOK) 417 if (err != HV_EOK)
@@ -467,13 +422,13 @@ static int sun4v_virt_set_affinity(unsigned int virt_irq,
467 return 0; 422 return 0;
468} 423}
469 424
470static void sun4v_virq_disable(unsigned int virt_irq) 425static void sun4v_virq_disable(struct irq_data *data)
471{ 426{
472 unsigned long dev_handle, dev_ino; 427 unsigned long dev_handle, dev_ino;
473 int err; 428 int err;
474 429
475 dev_handle = virt_irq_table[virt_irq].dev_handle; 430 dev_handle = irq_table[data->irq].dev_handle;
476 dev_ino = virt_irq_table[virt_irq].dev_ino; 431 dev_ino = irq_table[data->irq].dev_ino;
477 432
478 err = sun4v_vintr_set_valid(dev_handle, dev_ino, 433 err = sun4v_vintr_set_valid(dev_handle, dev_ino,
479 HV_INTR_DISABLED); 434 HV_INTR_DISABLED);
@@ -483,17 +438,13 @@ static void sun4v_virq_disable(unsigned int virt_irq)
483 dev_handle, dev_ino, err); 438 dev_handle, dev_ino, err);
484} 439}
485 440
486static void sun4v_virq_eoi(unsigned int virt_irq) 441static void sun4v_virq_eoi(struct irq_data *data)
487{ 442{
488 struct irq_desc *desc = irq_desc + virt_irq;
489 unsigned long dev_handle, dev_ino; 443 unsigned long dev_handle, dev_ino;
490 int err; 444 int err;
491 445
492 if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS))) 446 dev_handle = irq_table[data->irq].dev_handle;
493 return; 447 dev_ino = irq_table[data->irq].dev_ino;
494
495 dev_handle = virt_irq_table[virt_irq].dev_handle;
496 dev_ino = virt_irq_table[virt_irq].dev_ino;
497 448
498 err = sun4v_vintr_set_state(dev_handle, dev_ino, 449 err = sun4v_vintr_set_state(dev_handle, dev_ino,
499 HV_INTR_STATE_IDLE); 450 HV_INTR_STATE_IDLE);
@@ -504,132 +455,128 @@ static void sun4v_virq_eoi(unsigned int virt_irq)
504} 455}
505 456
506static struct irq_chip sun4u_irq = { 457static struct irq_chip sun4u_irq = {
507 .name = "sun4u", 458 .name = "sun4u",
508 .enable = sun4u_irq_enable, 459 .irq_enable = sun4u_irq_enable,
509 .disable = sun4u_irq_disable, 460 .irq_disable = sun4u_irq_disable,
510 .eoi = sun4u_irq_eoi, 461 .irq_eoi = sun4u_irq_eoi,
511 .set_affinity = sun4u_set_affinity, 462 .irq_set_affinity = sun4u_set_affinity,
463 .flags = IRQCHIP_EOI_IF_HANDLED,
512}; 464};
513 465
514static struct irq_chip sun4v_irq = { 466static struct irq_chip sun4v_irq = {
515 .name = "sun4v", 467 .name = "sun4v",
516 .enable = sun4v_irq_enable, 468 .irq_enable = sun4v_irq_enable,
517 .disable = sun4v_irq_disable, 469 .irq_disable = sun4v_irq_disable,
518 .eoi = sun4v_irq_eoi, 470 .irq_eoi = sun4v_irq_eoi,
519 .set_affinity = sun4v_set_affinity, 471 .irq_set_affinity = sun4v_set_affinity,
472 .flags = IRQCHIP_EOI_IF_HANDLED,
520}; 473};
521 474
522static struct irq_chip sun4v_virq = { 475static struct irq_chip sun4v_virq = {
523 .name = "vsun4v", 476 .name = "vsun4v",
524 .enable = sun4v_virq_enable, 477 .irq_enable = sun4v_virq_enable,
525 .disable = sun4v_virq_disable, 478 .irq_disable = sun4v_virq_disable,
526 .eoi = sun4v_virq_eoi, 479 .irq_eoi = sun4v_virq_eoi,
527 .set_affinity = sun4v_virt_set_affinity, 480 .irq_set_affinity = sun4v_virt_set_affinity,
481 .flags = IRQCHIP_EOI_IF_HANDLED,
528}; 482};
529 483
530static void pre_flow_handler(unsigned int virt_irq, 484static void pre_flow_handler(struct irq_data *d)
531 struct irq_desc *desc)
532{ 485{
533 struct irq_handler_data *data = get_irq_chip_data(virt_irq); 486 struct irq_handler_data *handler_data = irq_data_get_irq_handler_data(d);
534 unsigned int ino = virt_irq_table[virt_irq].dev_ino; 487 unsigned int ino = irq_table[d->irq].dev_ino;
535 488
536 data->pre_handler(ino, data->arg1, data->arg2); 489 handler_data->pre_handler(ino, handler_data->arg1, handler_data->arg2);
537
538 handle_fasteoi_irq(virt_irq, desc);
539} 490}
540 491
541void irq_install_pre_handler(int virt_irq, 492void irq_install_pre_handler(int irq,
542 void (*func)(unsigned int, void *, void *), 493 void (*func)(unsigned int, void *, void *),
543 void *arg1, void *arg2) 494 void *arg1, void *arg2)
544{ 495{
545 struct irq_handler_data *data = get_irq_chip_data(virt_irq); 496 struct irq_handler_data *handler_data = irq_get_handler_data(irq);
546 struct irq_desc *desc = irq_desc + virt_irq;
547 497
548 data->pre_handler = func; 498 handler_data->pre_handler = func;
549 data->arg1 = arg1; 499 handler_data->arg1 = arg1;
550 data->arg2 = arg2; 500 handler_data->arg2 = arg2;
551 501
552 desc->handle_irq = pre_flow_handler; 502 __irq_set_preflow_handler(irq, pre_flow_handler);
553} 503}
554 504
555unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap) 505unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
556{ 506{
557 struct ino_bucket *bucket; 507 struct ino_bucket *bucket;
558 struct irq_handler_data *data; 508 struct irq_handler_data *handler_data;
559 unsigned int virt_irq; 509 unsigned int irq;
560 int ino; 510 int ino;
561 511
562 BUG_ON(tlb_type == hypervisor); 512 BUG_ON(tlb_type == hypervisor);
563 513
564 ino = (upa_readq(imap) & (IMAP_IGN | IMAP_INO)) + inofixup; 514 ino = (upa_readq(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
565 bucket = &ivector_table[ino]; 515 bucket = &ivector_table[ino];
566 virt_irq = bucket_get_virt_irq(__pa(bucket)); 516 irq = bucket_get_irq(__pa(bucket));
567 if (!virt_irq) { 517 if (!irq) {
568 virt_irq = virt_irq_alloc(0, ino); 518 irq = irq_alloc(0, ino);
569 bucket_set_virt_irq(__pa(bucket), virt_irq); 519 bucket_set_irq(__pa(bucket), irq);
570 set_irq_chip_and_handler_name(virt_irq, 520 irq_set_chip_and_handler_name(irq, &sun4u_irq,
571 &sun4u_irq, 521 handle_fasteoi_irq, "IVEC");
572 handle_fasteoi_irq,
573 "IVEC");
574 } 522 }
575 523
576 data = get_irq_chip_data(virt_irq); 524 handler_data = irq_get_handler_data(irq);
577 if (unlikely(data)) 525 if (unlikely(handler_data))
578 goto out; 526 goto out;
579 527
580 data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC); 528 handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
581 if (unlikely(!data)) { 529 if (unlikely(!handler_data)) {
582 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n"); 530 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
583 prom_halt(); 531 prom_halt();
584 } 532 }
585 set_irq_chip_data(virt_irq, data); 533 irq_set_handler_data(irq, handler_data);
586 534
587 data->imap = imap; 535 handler_data->imap = imap;
588 data->iclr = iclr; 536 handler_data->iclr = iclr;
589 537
590out: 538out:
591 return virt_irq; 539 return irq;
592} 540}
593 541
594static unsigned int sun4v_build_common(unsigned long sysino, 542static unsigned int sun4v_build_common(unsigned long sysino,
595 struct irq_chip *chip) 543 struct irq_chip *chip)
596{ 544{
597 struct ino_bucket *bucket; 545 struct ino_bucket *bucket;
598 struct irq_handler_data *data; 546 struct irq_handler_data *handler_data;
599 unsigned int virt_irq; 547 unsigned int irq;
600 548
601 BUG_ON(tlb_type != hypervisor); 549 BUG_ON(tlb_type != hypervisor);
602 550
603 bucket = &ivector_table[sysino]; 551 bucket = &ivector_table[sysino];
604 virt_irq = bucket_get_virt_irq(__pa(bucket)); 552 irq = bucket_get_irq(__pa(bucket));
605 if (!virt_irq) { 553 if (!irq) {
606 virt_irq = virt_irq_alloc(0, sysino); 554 irq = irq_alloc(0, sysino);
607 bucket_set_virt_irq(__pa(bucket), virt_irq); 555 bucket_set_irq(__pa(bucket), irq);
608 set_irq_chip_and_handler_name(virt_irq, chip, 556 irq_set_chip_and_handler_name(irq, chip, handle_fasteoi_irq,
609 handle_fasteoi_irq,
610 "IVEC"); 557 "IVEC");
611 } 558 }
612 559
613 data = get_irq_chip_data(virt_irq); 560 handler_data = irq_get_handler_data(irq);
614 if (unlikely(data)) 561 if (unlikely(handler_data))
615 goto out; 562 goto out;
616 563
617 data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC); 564 handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
618 if (unlikely(!data)) { 565 if (unlikely(!handler_data)) {
619 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n"); 566 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
620 prom_halt(); 567 prom_halt();
621 } 568 }
622 set_irq_chip_data(virt_irq, data); 569 irq_set_handler_data(irq, handler_data);
623 570
624 /* Catch accidental accesses to these things. IMAP/ICLR handling 571 /* Catch accidental accesses to these things. IMAP/ICLR handling
625 * is done by hypervisor calls on sun4v platforms, not by direct 572 * is done by hypervisor calls on sun4v platforms, not by direct
626 * register accesses. 573 * register accesses.
627 */ 574 */
628 data->imap = ~0UL; 575 handler_data->imap = ~0UL;
629 data->iclr = ~0UL; 576 handler_data->iclr = ~0UL;
630 577
631out: 578out:
632 return virt_irq; 579 return irq;
633} 580}
634 581
635unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino) 582unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
@@ -641,11 +588,10 @@ unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
641 588
642unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino) 589unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
643{ 590{
644 struct irq_handler_data *data; 591 struct irq_handler_data *handler_data;
645 unsigned long hv_err, cookie; 592 unsigned long hv_err, cookie;
646 struct ino_bucket *bucket; 593 struct ino_bucket *bucket;
647 struct irq_desc *desc; 594 unsigned int irq;
648 unsigned int virt_irq;
649 595
650 bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC); 596 bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC);
651 if (unlikely(!bucket)) 597 if (unlikely(!bucket))
@@ -662,32 +608,29 @@ unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
662 ((unsigned long) bucket + 608 ((unsigned long) bucket +
663 sizeof(struct ino_bucket))); 609 sizeof(struct ino_bucket)));
664 610
665 virt_irq = virt_irq_alloc(devhandle, devino); 611 irq = irq_alloc(devhandle, devino);
666 bucket_set_virt_irq(__pa(bucket), virt_irq); 612 bucket_set_irq(__pa(bucket), irq);
667 613
668 set_irq_chip_and_handler_name(virt_irq, &sun4v_virq, 614 irq_set_chip_and_handler_name(irq, &sun4v_virq, handle_fasteoi_irq,
669 handle_fasteoi_irq,
670 "IVEC"); 615 "IVEC");
671 616
672 data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC); 617 handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
673 if (unlikely(!data)) 618 if (unlikely(!handler_data))
674 return 0; 619 return 0;
675 620
676 /* In order to make the LDC channel startup sequence easier, 621 /* In order to make the LDC channel startup sequence easier,
677 * especially wrt. locking, we do not let request_irq() enable 622 * especially wrt. locking, we do not let request_irq() enable
678 * the interrupt. 623 * the interrupt.
679 */ 624 */
680 desc = irq_desc + virt_irq; 625 irq_set_status_flags(irq, IRQ_NOAUTOEN);
681 desc->status |= IRQ_NOAUTOEN; 626 irq_set_handler_data(irq, handler_data);
682
683 set_irq_chip_data(virt_irq, data);
684 627
685 /* Catch accidental accesses to these things. IMAP/ICLR handling 628 /* Catch accidental accesses to these things. IMAP/ICLR handling
686 * is done by hypervisor calls on sun4v platforms, not by direct 629 * is done by hypervisor calls on sun4v platforms, not by direct
687 * register accesses. 630 * register accesses.
688 */ 631 */
689 data->imap = ~0UL; 632 handler_data->imap = ~0UL;
690 data->iclr = ~0UL; 633 handler_data->iclr = ~0UL;
691 634
692 cookie = ~__pa(bucket); 635 cookie = ~__pa(bucket);
693 hv_err = sun4v_vintr_set_cookie(devhandle, devino, cookie); 636 hv_err = sun4v_vintr_set_cookie(devhandle, devino, cookie);
@@ -697,30 +640,30 @@ unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
697 prom_halt(); 640 prom_halt();
698 } 641 }
699 642
700 return virt_irq; 643 return irq;
701} 644}
702 645
703void ack_bad_irq(unsigned int virt_irq) 646void ack_bad_irq(unsigned int irq)
704{ 647{
705 unsigned int ino = virt_irq_table[virt_irq].dev_ino; 648 unsigned int ino = irq_table[irq].dev_ino;
706 649
707 if (!ino) 650 if (!ino)
708 ino = 0xdeadbeef; 651 ino = 0xdeadbeef;
709 652
710 printk(KERN_CRIT "Unexpected IRQ from ino[%x] virt_irq[%u]\n", 653 printk(KERN_CRIT "Unexpected IRQ from ino[%x] irq[%u]\n",
711 ino, virt_irq); 654 ino, irq);
712} 655}
713 656
714void *hardirq_stack[NR_CPUS]; 657void *hardirq_stack[NR_CPUS];
715void *softirq_stack[NR_CPUS]; 658void *softirq_stack[NR_CPUS];
716 659
717void __irq_entry handler_irq(int irq, struct pt_regs *regs) 660void __irq_entry handler_irq(int pil, struct pt_regs *regs)
718{ 661{
719 unsigned long pstate, bucket_pa; 662 unsigned long pstate, bucket_pa;
720 struct pt_regs *old_regs; 663 struct pt_regs *old_regs;
721 void *orig_sp; 664 void *orig_sp;
722 665
723 clear_softint(1 << irq); 666 clear_softint(1 << pil);
724 667
725 old_regs = set_irq_regs(regs); 668 old_regs = set_irq_regs(regs);
726 irq_enter(); 669 irq_enter();
@@ -739,18 +682,14 @@ void __irq_entry handler_irq(int irq, struct pt_regs *regs)
739 orig_sp = set_hardirq_stack(); 682 orig_sp = set_hardirq_stack();
740 683
741 while (bucket_pa) { 684 while (bucket_pa) {
742 struct irq_desc *desc;
743 unsigned long next_pa; 685 unsigned long next_pa;
744 unsigned int virt_irq; 686 unsigned int irq;
745 687
746 next_pa = bucket_get_chain_pa(bucket_pa); 688 next_pa = bucket_get_chain_pa(bucket_pa);
747 virt_irq = bucket_get_virt_irq(bucket_pa); 689 irq = bucket_get_irq(bucket_pa);
748 bucket_clear_chain_pa(bucket_pa); 690 bucket_clear_chain_pa(bucket_pa);
749 691
750 desc = irq_desc + virt_irq; 692 generic_handle_irq(irq);
751
752 if (!(desc->status & IRQ_DISABLED))
753 desc->handle_irq(virt_irq, desc);
754 693
755 bucket_pa = next_pa; 694 bucket_pa = next_pa;
756 } 695 }
@@ -793,16 +732,18 @@ void fixup_irqs(void)
793 unsigned int irq; 732 unsigned int irq;
794 733
795 for (irq = 0; irq < NR_IRQS; irq++) { 734 for (irq = 0; irq < NR_IRQS; irq++) {
735 struct irq_desc *desc = irq_to_desc(irq);
736 struct irq_data *data = irq_desc_get_irq_data(desc);
796 unsigned long flags; 737 unsigned long flags;
797 738
798 raw_spin_lock_irqsave(&irq_desc[irq].lock, flags); 739 raw_spin_lock_irqsave(&desc->lock, flags);
799 if (irq_desc[irq].action && 740 if (desc->action && !irqd_is_per_cpu(data)) {
800 !(irq_desc[irq].status & IRQ_PER_CPU)) { 741 if (data->chip->irq_set_affinity)
801 if (irq_desc[irq].chip->set_affinity) 742 data->chip->irq_set_affinity(data,
802 irq_desc[irq].chip->set_affinity(irq, 743 data->affinity,
803 irq_desc[irq].affinity); 744 false);
804 } 745 }
805 raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags); 746 raw_spin_unlock_irqrestore(&desc->lock, flags);
806 } 747 }
807 748
808 tick_ops->disable_irq(); 749 tick_ops->disable_irq();
@@ -1040,5 +981,5 @@ void __init init_IRQ(void)
1040 : "i" (PSTATE_IE) 981 : "i" (PSTATE_IE)
1041 : "g1"); 982 : "g1");
1042 983
1043 irq_desc[0].action = &timer_irq_action; 984 irq_to_desc(0)->action = &timer_irq_action;
1044} 985}
diff --git a/arch/sparc/kernel/jump_label.c b/arch/sparc/kernel/jump_label.c
new file mode 100644
index 000000000000..ea2dafc93d78
--- /dev/null
+++ b/arch/sparc/kernel/jump_label.c
@@ -0,0 +1,47 @@
1#include <linux/kernel.h>
2#include <linux/types.h>
3#include <linux/mutex.h>
4#include <linux/cpu.h>
5
6#include <linux/jump_label.h>
7#include <linux/memory.h>
8
9#ifdef HAVE_JUMP_LABEL
10
11void arch_jump_label_transform(struct jump_entry *entry,
12 enum jump_label_type type)
13{
14 u32 val;
15 u32 *insn = (u32 *) (unsigned long) entry->code;
16
17 if (type == JUMP_LABEL_ENABLE) {
18 s32 off = (s32)entry->target - (s32)entry->code;
19
20#ifdef CONFIG_SPARC64
21 /* ba,pt %xcc, . + (off << 2) */
22 val = 0x10680000 | ((u32) off >> 2);
23#else
24 /* ba . + (off << 2) */
25 val = 0x10800000 | ((u32) off >> 2);
26#endif
27 } else {
28 val = 0x01000000;
29 }
30
31 get_online_cpus();
32 mutex_lock(&text_mutex);
33 *insn = val;
34 flushi(insn);
35 mutex_unlock(&text_mutex);
36 put_online_cpus();
37}
38
39void arch_jump_label_text_poke_early(jump_label_t addr)
40{
41 u32 *insn_p = (u32 *) (unsigned long) addr;
42
43 *insn_p = 0x01000000;
44 flushi(insn_p);
45}
46
47#endif
diff --git a/arch/sparc/kernel/kernel.h b/arch/sparc/kernel/kernel.h
index 15d8a3f645c9..6f6544cfa0ef 100644
--- a/arch/sparc/kernel/kernel.h
+++ b/arch/sparc/kernel/kernel.h
@@ -3,12 +3,12 @@
3 3
4#include <linux/interrupt.h> 4#include <linux/interrupt.h>
5 5
6#include <asm/traps.h>
7
6/* cpu.c */ 8/* cpu.c */
7extern const char *sparc_cpu_type;
8extern const char *sparc_pmu_type; 9extern const char *sparc_pmu_type;
9extern const char *sparc_fpu_type;
10
11extern unsigned int fsr_storage; 10extern unsigned int fsr_storage;
11extern int ncpus_probed;
12 12
13#ifdef CONFIG_SPARC32 13#ifdef CONFIG_SPARC32
14/* cpu.c */ 14/* cpu.c */
@@ -26,6 +26,54 @@ extern int static_irq_count;
26extern spinlock_t irq_action_lock; 26extern spinlock_t irq_action_lock;
27 27
28extern void unexpected_irq(int irq, void *dev_id, struct pt_regs * regs); 28extern void unexpected_irq(int irq, void *dev_id, struct pt_regs * regs);
29extern void init_IRQ(void);
30
31/* sun4c_irq.c */
32extern void sun4c_init_IRQ(void);
33
34/* sun4m_irq.c */
35extern unsigned int lvl14_resolution;
36
37extern void sun4m_init_IRQ(void);
38extern void sun4m_unmask_profile_irq(void);
39extern void sun4m_clear_profile_irq(int cpu);
40
41/* sun4d_irq.c */
42extern spinlock_t sun4d_imsk_lock;
43
44extern void sun4d_init_IRQ(void);
45extern int sun4d_request_irq(unsigned int irq,
46 irq_handler_t handler,
47 unsigned long irqflags,
48 const char *devname, void *dev_id);
49extern int show_sun4d_interrupts(struct seq_file *, void *);
50extern void sun4d_distribute_irqs(void);
51extern void sun4d_free_irq(unsigned int irq, void *dev_id);
52
53/* head_32.S */
54extern unsigned int t_nmi[];
55extern unsigned int linux_trap_ipi15_sun4d[];
56extern unsigned int linux_trap_ipi15_sun4m[];
57
58extern struct tt_entry trapbase_cpu1;
59extern struct tt_entry trapbase_cpu2;
60extern struct tt_entry trapbase_cpu3;
61
62extern char cputypval[];
63
64/* entry.S */
65extern unsigned long lvl14_save[4];
66extern unsigned int real_irq_entry[];
67extern unsigned int smp4d_ticker[];
68extern unsigned int patchme_maybe_smp_msg[];
69
70extern void floppy_hardint(void);
71
72/* trampoline_32.S */
73extern int __smp4m_processor_id(void);
74extern int __smp4d_processor_id(void);
75extern unsigned long sun4m_cpu_startup;
76extern unsigned long sun4d_cpu_startup;
29 77
30#else /* CONFIG_SPARC32 */ 78#else /* CONFIG_SPARC32 */
31#endif /* CONFIG_SPARC32 */ 79#endif /* CONFIG_SPARC32 */
diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c
index df39a0f0d27a..732b0bce6001 100644
--- a/arch/sparc/kernel/ldc.c
+++ b/arch/sparc/kernel/ldc.c
@@ -790,16 +790,20 @@ static void send_events(struct ldc_channel *lp, unsigned int event_mask)
790static irqreturn_t ldc_rx(int irq, void *dev_id) 790static irqreturn_t ldc_rx(int irq, void *dev_id)
791{ 791{
792 struct ldc_channel *lp = dev_id; 792 struct ldc_channel *lp = dev_id;
793 unsigned long orig_state, hv_err, flags; 793 unsigned long orig_state, flags;
794 unsigned int event_mask; 794 unsigned int event_mask;
795 795
796 spin_lock_irqsave(&lp->lock, flags); 796 spin_lock_irqsave(&lp->lock, flags);
797 797
798 orig_state = lp->chan_state; 798 orig_state = lp->chan_state;
799 hv_err = sun4v_ldc_rx_get_state(lp->id, 799
800 &lp->rx_head, 800 /* We should probably check for hypervisor errors here and
801 &lp->rx_tail, 801 * reset the LDC channel if we get one.
802 &lp->chan_state); 802 */
803 sun4v_ldc_rx_get_state(lp->id,
804 &lp->rx_head,
805 &lp->rx_tail,
806 &lp->chan_state);
803 807
804 ldcdbg(RX, "RX state[0x%02lx:0x%02lx] head[0x%04lx] tail[0x%04lx]\n", 808 ldcdbg(RX, "RX state[0x%02lx:0x%02lx] head[0x%04lx] tail[0x%04lx]\n",
805 orig_state, lp->chan_state, lp->rx_head, lp->rx_tail); 809 orig_state, lp->chan_state, lp->rx_head, lp->rx_tail);
@@ -904,16 +908,20 @@ out:
904static irqreturn_t ldc_tx(int irq, void *dev_id) 908static irqreturn_t ldc_tx(int irq, void *dev_id)
905{ 909{
906 struct ldc_channel *lp = dev_id; 910 struct ldc_channel *lp = dev_id;
907 unsigned long flags, hv_err, orig_state; 911 unsigned long flags, orig_state;
908 unsigned int event_mask = 0; 912 unsigned int event_mask = 0;
909 913
910 spin_lock_irqsave(&lp->lock, flags); 914 spin_lock_irqsave(&lp->lock, flags);
911 915
912 orig_state = lp->chan_state; 916 orig_state = lp->chan_state;
913 hv_err = sun4v_ldc_tx_get_state(lp->id, 917
914 &lp->tx_head, 918 /* We should probably check for hypervisor errors here and
915 &lp->tx_tail, 919 * reset the LDC channel if we get one.
916 &lp->chan_state); 920 */
921 sun4v_ldc_tx_get_state(lp->id,
922 &lp->tx_head,
923 &lp->tx_tail,
924 &lp->chan_state);
917 925
918 ldcdbg(TX, " TX state[0x%02lx:0x%02lx] head[0x%04lx] tail[0x%04lx]\n", 926 ldcdbg(TX, " TX state[0x%02lx:0x%02lx] head[0x%04lx] tail[0x%04lx]\n",
919 orig_state, lp->chan_state, lp->tx_head, lp->tx_tail); 927 orig_state, lp->chan_state, lp->tx_head, lp->tx_tail);
diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c
index 6a7b4dbc8e09..d17255a2bbac 100644
--- a/arch/sparc/kernel/leon_kernel.c
+++ b/arch/sparc/kernel/leon_kernel.c
@@ -19,51 +19,70 @@
19#include <asm/leon_amba.h> 19#include <asm/leon_amba.h>
20#include <asm/traps.h> 20#include <asm/traps.h>
21#include <asm/cacheflush.h> 21#include <asm/cacheflush.h>
22#include <asm/smp.h>
23#include <asm/setup.h>
22 24
23#include "prom.h" 25#include "prom.h"
24#include "irq.h" 26#include "irq.h"
25 27
26struct leon3_irqctrl_regs_map *leon3_irqctrl_regs; /* interrupt controller base address, initialized by amba_init() */ 28struct leon3_irqctrl_regs_map *leon3_irqctrl_regs; /* interrupt controller base address */
27struct leon3_gptimer_regs_map *leon3_gptimer_regs; /* timer controller base address, initialized by amba_init() */ 29struct leon3_gptimer_regs_map *leon3_gptimer_regs; /* timer controller base address */
28struct amba_apb_device leon_percpu_timer_dev[16];
29 30
30int leondebug_irq_disable; 31int leondebug_irq_disable;
31int leon_debug_irqout; 32int leon_debug_irqout;
32static int dummy_master_l10_counter; 33static int dummy_master_l10_counter;
34unsigned long amba_system_id;
35static DEFINE_SPINLOCK(leon_irq_lock);
33 36
34unsigned long leon3_gptimer_irq; /* interrupt controller irq number, initialized by amba_init() */ 37unsigned long leon3_gptimer_irq; /* interrupt controller irq number */
38unsigned long leon3_gptimer_idx; /* Timer Index (0..6) within Timer Core */
39int leon3_ticker_irq; /* Timer ticker IRQ */
35unsigned int sparc_leon_eirq; 40unsigned int sparc_leon_eirq;
36#define LEON_IMASK ((&leon3_irqctrl_regs->mask[0])) 41#define LEON_IMASK(cpu) (&leon3_irqctrl_regs->mask[cpu])
42#define LEON_IACK (&leon3_irqctrl_regs->iclear)
43#define LEON_DO_ACK_HW 1
37 44
38/* Return the IRQ of the pending IRQ on the extended IRQ controller */ 45/* Return the last ACKed IRQ by the Extended IRQ controller. It has already
39int sparc_leon_eirq_get(int eirq, int cpu) 46 * been (automatically) ACKed when the CPU takes the trap.
47 */
48static inline unsigned int leon_eirq_get(int cpu)
40{ 49{
41 return LEON3_BYPASS_LOAD_PA(&leon3_irqctrl_regs->intid[cpu]) & 0x1f; 50 return LEON3_BYPASS_LOAD_PA(&leon3_irqctrl_regs->intid[cpu]) & 0x1f;
42} 51}
43 52
44irqreturn_t sparc_leon_eirq_isr(int dummy, void *dev_id) 53/* Handle one or multiple IRQs from the extended interrupt controller */
54static void leon_handle_ext_irq(unsigned int irq, struct irq_desc *desc)
45{ 55{
46 printk(KERN_ERR "sparc_leon_eirq_isr: ERROR EXTENDED IRQ\n"); 56 unsigned int eirq;
47 return IRQ_HANDLED; 57 int cpu = sparc_leon3_cpuid();
58
59 eirq = leon_eirq_get(cpu);
60 if ((eirq & 0x10) && irq_map[eirq]->irq) /* bit4 tells if IRQ happened */
61 generic_handle_irq(irq_map[eirq]->irq);
48} 62}
49 63
50/* The extended IRQ controller has been found, this function registers it */ 64/* The extended IRQ controller has been found, this function registers it */
51void sparc_leon_eirq_register(int eirq) 65void leon_eirq_setup(unsigned int eirq)
52{ 66{
53 int irq; 67 unsigned long mask, oldmask;
54 68 unsigned int veirq;
55 /* Register a "BAD" handler for this interrupt, it should never happen */
56 irq = request_irq(eirq, sparc_leon_eirq_isr,
57 (IRQF_DISABLED | SA_STATIC_ALLOC), "extirq", NULL);
58 69
59 if (irq) { 70 if (eirq < 1 || eirq > 0xf) {
60 printk(KERN_ERR 71 printk(KERN_ERR "LEON EXT IRQ NUMBER BAD: %d\n", eirq);
61 "sparc_leon_eirq_register: unable to attach IRQ%d\n", 72 return;
62 eirq);
63 } else {
64 sparc_leon_eirq = eirq;
65 } 73 }
66 74
75 veirq = leon_build_device_irq(eirq, leon_handle_ext_irq, "extirq", 0);
76
77 /*
78 * Unmask the Extended IRQ, the IRQs routed through the Ext-IRQ
79 * controller have a mask-bit of their own, so this is safe.
80 */
81 irq_link(veirq);
82 mask = 1 << eirq;
83 oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(boot_cpu_id));
84 LEON3_BYPASS_STORE_PA(LEON_IMASK(boot_cpu_id), (oldmask | mask));
85 sparc_leon_eirq = eirq;
67} 86}
68 87
69static inline unsigned long get_irqmask(unsigned int irq) 88static inline unsigned long get_irqmask(unsigned int irq)
@@ -81,108 +100,327 @@ static inline unsigned long get_irqmask(unsigned int irq)
81 return mask; 100 return mask;
82} 101}
83 102
84static void leon_enable_irq(unsigned int irq_nr) 103#ifdef CONFIG_SMP
104static int irq_choose_cpu(const struct cpumask *affinity)
85{ 105{
86 unsigned long mask, flags; 106 cpumask_t mask;
87 mask = get_irqmask(irq_nr); 107
88 local_irq_save(flags); 108 cpus_and(mask, cpu_online_map, *affinity);
89 LEON3_BYPASS_STORE_PA(LEON_IMASK, 109 if (cpus_equal(mask, cpu_online_map) || cpus_empty(mask))
90 (LEON3_BYPASS_LOAD_PA(LEON_IMASK) | (mask))); 110 return boot_cpu_id;
91 local_irq_restore(flags); 111 else
112 return first_cpu(mask);
113}
114#else
115#define irq_choose_cpu(affinity) boot_cpu_id
116#endif
117
118static int leon_set_affinity(struct irq_data *data, const struct cpumask *dest,
119 bool force)
120{
121 unsigned long mask, oldmask, flags;
122 int oldcpu, newcpu;
123
124 mask = (unsigned long)data->chip_data;
125 oldcpu = irq_choose_cpu(data->affinity);
126 newcpu = irq_choose_cpu(dest);
127
128 if (oldcpu == newcpu)
129 goto out;
130
131 /* unmask on old CPU first before enabling on the selected CPU */
132 spin_lock_irqsave(&leon_irq_lock, flags);
133 oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(oldcpu));
134 LEON3_BYPASS_STORE_PA(LEON_IMASK(oldcpu), (oldmask & ~mask));
135 oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(newcpu));
136 LEON3_BYPASS_STORE_PA(LEON_IMASK(newcpu), (oldmask | mask));
137 spin_unlock_irqrestore(&leon_irq_lock, flags);
138out:
139 return IRQ_SET_MASK_OK;
92} 140}
93 141
94static void leon_disable_irq(unsigned int irq_nr) 142static void leon_unmask_irq(struct irq_data *data)
95{ 143{
96 unsigned long mask, flags; 144 unsigned long mask, oldmask, flags;
97 mask = get_irqmask(irq_nr); 145 int cpu;
98 local_irq_save(flags); 146
99 LEON3_BYPASS_STORE_PA(LEON_IMASK, 147 mask = (unsigned long)data->chip_data;
100 (LEON3_BYPASS_LOAD_PA(LEON_IMASK) & ~(mask))); 148 cpu = irq_choose_cpu(data->affinity);
101 local_irq_restore(flags); 149 spin_lock_irqsave(&leon_irq_lock, flags);
150 oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(cpu));
151 LEON3_BYPASS_STORE_PA(LEON_IMASK(cpu), (oldmask | mask));
152 spin_unlock_irqrestore(&leon_irq_lock, flags);
153}
154
155static void leon_mask_irq(struct irq_data *data)
156{
157 unsigned long mask, oldmask, flags;
158 int cpu;
159
160 mask = (unsigned long)data->chip_data;
161 cpu = irq_choose_cpu(data->affinity);
162 spin_lock_irqsave(&leon_irq_lock, flags);
163 oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(cpu));
164 LEON3_BYPASS_STORE_PA(LEON_IMASK(cpu), (oldmask & ~mask));
165 spin_unlock_irqrestore(&leon_irq_lock, flags);
166}
167
168static unsigned int leon_startup_irq(struct irq_data *data)
169{
170 irq_link(data->irq);
171 leon_unmask_irq(data);
172 return 0;
173}
102 174
175static void leon_shutdown_irq(struct irq_data *data)
176{
177 leon_mask_irq(data);
178 irq_unlink(data->irq);
179}
180
181/* Used by external level sensitive IRQ handlers on the LEON: ACK IRQ ctrl */
182static void leon_eoi_irq(struct irq_data *data)
183{
184 unsigned long mask = (unsigned long)data->chip_data;
185
186 if (mask & LEON_DO_ACK_HW)
187 LEON3_BYPASS_STORE_PA(LEON_IACK, mask & ~LEON_DO_ACK_HW);
188}
189
190static struct irq_chip leon_irq = {
191 .name = "leon",
192 .irq_startup = leon_startup_irq,
193 .irq_shutdown = leon_shutdown_irq,
194 .irq_mask = leon_mask_irq,
195 .irq_unmask = leon_unmask_irq,
196 .irq_eoi = leon_eoi_irq,
197 .irq_set_affinity = leon_set_affinity,
198};
199
200/*
201 * Build a LEON IRQ for the edge triggered LEON IRQ controller:
202 * Edge (normal) IRQ - handle_simple_irq, ack=DONT-CARE, never ack
203 * Level IRQ (PCI|Level-GPIO) - handle_fasteoi_irq, ack=1, ack after ISR
204 * Per-CPU Edge - handle_percpu_irq, ack=0
205 */
206unsigned int leon_build_device_irq(unsigned int real_irq,
207 irq_flow_handler_t flow_handler,
208 const char *name, int do_ack)
209{
210 unsigned int irq;
211 unsigned long mask;
212
213 irq = 0;
214 mask = get_irqmask(real_irq);
215 if (mask == 0)
216 goto out;
217
218 irq = irq_alloc(real_irq, real_irq);
219 if (irq == 0)
220 goto out;
221
222 if (do_ack)
223 mask |= LEON_DO_ACK_HW;
224
225 irq_set_chip_and_handler_name(irq, &leon_irq,
226 flow_handler, name);
227 irq_set_chip_data(irq, (void *)mask);
228
229out:
230 return irq;
231}
232
233static unsigned int _leon_build_device_irq(struct platform_device *op,
234 unsigned int real_irq)
235{
236 return leon_build_device_irq(real_irq, handle_simple_irq, "edge", 0);
237}
238
239void leon_update_virq_handling(unsigned int virq,
240 irq_flow_handler_t flow_handler,
241 const char *name, int do_ack)
242{
243 unsigned long mask = (unsigned long)irq_get_chip_data(virq);
244
245 mask &= ~LEON_DO_ACK_HW;
246 if (do_ack)
247 mask |= LEON_DO_ACK_HW;
248
249 irq_set_chip_and_handler_name(virq, &leon_irq,
250 flow_handler, name);
251 irq_set_chip_data(virq, (void *)mask);
103} 252}
104 253
105void __init leon_init_timers(irq_handler_t counter_fn) 254void __init leon_init_timers(irq_handler_t counter_fn)
106{ 255{
107 int irq; 256 int irq, eirq;
257 struct device_node *rootnp, *np, *nnp;
258 struct property *pp;
259 int len;
260 int icsel;
261 int ampopts;
262 int err;
108 263
109 leondebug_irq_disable = 0; 264 leondebug_irq_disable = 0;
110 leon_debug_irqout = 0; 265 leon_debug_irqout = 0;
111 master_l10_counter = (unsigned int *)&dummy_master_l10_counter; 266 master_l10_counter = (unsigned int *)&dummy_master_l10_counter;
112 dummy_master_l10_counter = 0; 267 dummy_master_l10_counter = 0;
113 268
114 if (leon3_gptimer_regs && leon3_irqctrl_regs) { 269 rootnp = of_find_node_by_path("/ambapp0");
115 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].val, 0); 270 if (!rootnp)
116 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].rld, 271 goto bad;
117 (((1000000 / 100) - 1))); 272
118 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].ctrl, 0); 273 /* Find System ID: GRLIB build ID and optional CHIP ID */
119 274 pp = of_find_property(rootnp, "systemid", &len);
120#ifdef CONFIG_SMP 275 if (pp)
121 leon_percpu_timer_dev[0].start = (int)leon3_gptimer_regs; 276 amba_system_id = *(unsigned long *)pp->value;
122 leon_percpu_timer_dev[0].irq = leon3_gptimer_irq+1; 277
278 /* Find IRQMP IRQ Controller Registers base adr otherwise bail out */
279 np = of_find_node_by_name(rootnp, "GAISLER_IRQMP");
280 if (!np) {
281 np = of_find_node_by_name(rootnp, "01_00d");
282 if (!np)
283 goto bad;
284 }
285 pp = of_find_property(np, "reg", &len);
286 if (!pp)
287 goto bad;
288 leon3_irqctrl_regs = *(struct leon3_irqctrl_regs_map **)pp->value;
289
290 /* Find GPTIMER Timer Registers base address otherwise bail out. */
291 nnp = rootnp;
292 do {
293 np = of_find_node_by_name(nnp, "GAISLER_GPTIMER");
294 if (!np) {
295 np = of_find_node_by_name(nnp, "01_011");
296 if (!np)
297 goto bad;
298 }
123 299
124 if (!(LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->config) & 300 ampopts = 0;
125 (1<<LEON3_GPTIMER_SEPIRQ))) { 301 pp = of_find_property(np, "ampopts", &len);
126 prom_printf("irq timer not configured with separate irqs\n"); 302 if (pp) {
127 BUG(); 303 ampopts = *(int *)pp->value;
304 if (ampopts == 0) {
305 /* Skip this instance, resource already
306 * allocated by other OS */
307 nnp = np;
308 continue;
309 }
128 } 310 }
129 311
130 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[1].val, 0); 312 /* Select Timer-Instance on Timer Core. Default is zero */
131 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[1].rld, (((1000000/100) - 1))); 313 leon3_gptimer_idx = ampopts & 0x7;
132 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[1].ctrl, 0);
133# endif
134 314
135 } else { 315 pp = of_find_property(np, "reg", &len);
136 printk(KERN_ERR "No Timer/irqctrl found\n"); 316 if (pp)
317 leon3_gptimer_regs = *(struct leon3_gptimer_regs_map **)
318 pp->value;
319 pp = of_find_property(np, "interrupts", &len);
320 if (pp)
321 leon3_gptimer_irq = *(unsigned int *)pp->value;
322 } while (0);
323
324 if (!(leon3_gptimer_regs && leon3_irqctrl_regs && leon3_gptimer_irq))
325 goto bad;
326
327 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].val, 0);
328 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].rld,
329 (((1000000 / HZ) - 1)));
330 LEON3_BYPASS_STORE_PA(
331 &leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl, 0);
332
333#ifdef CONFIG_SMP
334 leon3_ticker_irq = leon3_gptimer_irq + 1 + leon3_gptimer_idx;
335
336 if (!(LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->config) &
337 (1<<LEON3_GPTIMER_SEPIRQ))) {
338 printk(KERN_ERR "timer not configured with separate irqs\n");
137 BUG(); 339 BUG();
138 } 340 }
139 341
140 irq = request_irq(leon3_gptimer_irq, 342 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].val,
141 counter_fn, 343 0);
142 (IRQF_DISABLED | SA_STATIC_ALLOC), "timer", NULL); 344 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].rld,
345 (((1000000/HZ) - 1)));
346 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].ctrl,
347 0);
348#endif
143 349
144 if (irq) { 350 /*
145 printk(KERN_ERR "leon_time_init: unable to attach IRQ%d\n", 351 * The IRQ controller may (if implemented) consist of multiple
146 LEON_INTERRUPT_TIMER1); 352 * IRQ controllers, each mapped on a 4Kb boundary.
353 * Each CPU may be routed to different IRQCTRLs, however
354 * we assume that all CPUs (in SMP system) is routed to the
355 * same IRQ Controller, and for non-SMP only one IRQCTRL is
356 * accessed anyway.
357 * In AMP systems, Linux must run on CPU0 for the time being.
358 */
359 icsel = LEON3_BYPASS_LOAD_PA(&leon3_irqctrl_regs->icsel[boot_cpu_id/8]);
360 icsel = (icsel >> ((7 - (boot_cpu_id&0x7)) * 4)) & 0xf;
361 leon3_irqctrl_regs += icsel;
362
363 /* Mask all IRQs on boot-cpu IRQ controller */
364 LEON3_BYPASS_STORE_PA(&leon3_irqctrl_regs->mask[boot_cpu_id], 0);
365
366 /* Probe extended IRQ controller */
367 eirq = (LEON3_BYPASS_LOAD_PA(&leon3_irqctrl_regs->mpstatus)
368 >> 16) & 0xf;
369 if (eirq != 0)
370 leon_eirq_setup(eirq);
371
372 irq = _leon_build_device_irq(NULL, leon3_gptimer_irq+leon3_gptimer_idx);
373 err = request_irq(irq, counter_fn, IRQF_TIMER, "timer", NULL);
374 if (err) {
375 printk(KERN_ERR "unable to attach timer IRQ%d\n", irq);
147 prom_halt(); 376 prom_halt();
148 } 377 }
149 378
150# ifdef CONFIG_SMP 379#ifdef CONFIG_SMP
151 { 380 {
152 unsigned long flags; 381 unsigned long flags;
153 struct tt_entry *trap_table = &sparc_ttable[SP_TRAP_IRQ1 + (leon_percpu_timer_dev[0].irq - 1)];
154 382
155 /* For SMP we use the level 14 ticker, however the bootup code 383 /*
156 * has copied the firmwares level 14 vector into boot cpu's 384 * In SMP, sun4m adds a IPI handler to IRQ trap handler that
157 * trap table, we must fix this now or we get squashed. 385 * LEON never must take, sun4d and LEON overwrites the branch
386 * with a NOP.
158 */ 387 */
159 local_irq_save(flags); 388 local_irq_save(flags);
160
161 patchme_maybe_smp_msg[0] = 0x01000000; /* NOP out the branch */ 389 patchme_maybe_smp_msg[0] = 0x01000000; /* NOP out the branch */
162
163 /* Adjust so that we jump directly to smpleon_ticker */
164 trap_table->inst_three += smpleon_ticker - real_irq_entry;
165
166 local_flush_cache_all(); 390 local_flush_cache_all();
167 local_irq_restore(flags); 391 local_irq_restore(flags);
168 } 392 }
169# endif 393#endif
170 394
171 if (leon3_gptimer_regs) { 395 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl,
172 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].ctrl, 396 LEON3_GPTIMER_EN |
173 LEON3_GPTIMER_EN | 397 LEON3_GPTIMER_RL |
174 LEON3_GPTIMER_RL | 398 LEON3_GPTIMER_LD |
175 LEON3_GPTIMER_LD | LEON3_GPTIMER_IRQEN); 399 LEON3_GPTIMER_IRQEN);
176 400
177#ifdef CONFIG_SMP 401#ifdef CONFIG_SMP
178 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[1].ctrl, 402 /* Install per-cpu IRQ handler for broadcasted ticker */
179 LEON3_GPTIMER_EN | 403 irq = leon_build_device_irq(leon3_ticker_irq, handle_percpu_irq,
180 LEON3_GPTIMER_RL | 404 "per-cpu", 0);
181 LEON3_GPTIMER_LD | 405 err = request_irq(irq, leon_percpu_timer_interrupt,
182 LEON3_GPTIMER_IRQEN); 406 IRQF_PERCPU | IRQF_TIMER, "ticker",
183#endif 407 NULL);
184 408 if (err) {
409 printk(KERN_ERR "unable to attach ticker IRQ%d\n", irq);
410 prom_halt();
185 } 411 }
412
413 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].ctrl,
414 LEON3_GPTIMER_EN |
415 LEON3_GPTIMER_RL |
416 LEON3_GPTIMER_LD |
417 LEON3_GPTIMER_IRQEN);
418#endif
419 return;
420bad:
421 printk(KERN_ERR "No Timer/irqctrl found\n");
422 BUG();
423 return;
186} 424}
187 425
188void leon_clear_clock_irq(void) 426void leon_clear_clock_irq(void)
@@ -194,9 +432,6 @@ void leon_load_profile_irq(int cpu, unsigned int limit)
194 BUG(); 432 BUG();
195} 433}
196 434
197
198
199
200void __init leon_trans_init(struct device_node *dp) 435void __init leon_trans_init(struct device_node *dp)
201{ 436{
202 if (strcmp(dp->type, "cpu") == 0 && strcmp(dp->name, "<NULL>") == 0) { 437 if (strcmp(dp->type, "cpu") == 0 && strcmp(dp->name, "<NULL>") == 0) {
@@ -250,22 +485,18 @@ void leon_enable_irq_cpu(unsigned int irq_nr, unsigned int cpu)
250{ 485{
251 unsigned long mask, flags, *addr; 486 unsigned long mask, flags, *addr;
252 mask = get_irqmask(irq_nr); 487 mask = get_irqmask(irq_nr);
253 local_irq_save(flags); 488 spin_lock_irqsave(&leon_irq_lock, flags);
254 addr = (unsigned long *)&(leon3_irqctrl_regs->mask[cpu]); 489 addr = (unsigned long *)LEON_IMASK(cpu);
255 LEON3_BYPASS_STORE_PA(addr, (LEON3_BYPASS_LOAD_PA(addr) | (mask))); 490 LEON3_BYPASS_STORE_PA(addr, (LEON3_BYPASS_LOAD_PA(addr) | mask));
256 local_irq_restore(flags); 491 spin_unlock_irqrestore(&leon_irq_lock, flags);
257} 492}
258 493
259#endif 494#endif
260 495
261void __init leon_init_IRQ(void) 496void __init leon_init_IRQ(void)
262{ 497{
263 sparc_init_timers = leon_init_timers; 498 sparc_irq_config.init_timers = leon_init_timers;
264 499 sparc_irq_config.build_device_irq = _leon_build_device_irq;
265 BTFIXUPSET_CALL(enable_irq, leon_enable_irq, BTFIXUPCALL_NORM);
266 BTFIXUPSET_CALL(disable_irq, leon_disable_irq, BTFIXUPCALL_NORM);
267 BTFIXUPSET_CALL(enable_pil_irq, leon_enable_irq, BTFIXUPCALL_NORM);
268 BTFIXUPSET_CALL(disable_pil_irq, leon_disable_irq, BTFIXUPCALL_NORM);
269 500
270 BTFIXUPSET_CALL(clear_clock_irq, leon_clear_clock_irq, 501 BTFIXUPSET_CALL(clear_clock_irq, leon_clear_clock_irq,
271 BTFIXUPCALL_NORM); 502 BTFIXUPCALL_NORM);
@@ -282,5 +513,5 @@ void __init leon_init_IRQ(void)
282 513
283void __init leon_init(void) 514void __init leon_init(void)
284{ 515{
285 prom_build_more = &leon_node_init; 516 of_pdt_build_more = &leon_node_init;
286} 517}
diff --git a/arch/sparc/kernel/leon_pci.c b/arch/sparc/kernel/leon_pci.c
new file mode 100644
index 000000000000..a8a9a275037d
--- /dev/null
+++ b/arch/sparc/kernel/leon_pci.c
@@ -0,0 +1,253 @@
1/*
2 * leon_pci.c: LEON Host PCI support
3 *
4 * Copyright (C) 2011 Aeroflex Gaisler AB, Daniel Hellstrom
5 *
6 * Code is partially derived from pcic.c
7 */
8
9#include <linux/of_device.h>
10#include <linux/kernel.h>
11#include <linux/pci.h>
12#include <asm/leon.h>
13#include <asm/leon_pci.h>
14
15/* The LEON architecture does not rely on a BIOS or bootloader to setup
16 * PCI for us. The Linux generic routines are used to setup resources,
17 * reset values of confuration-space registers settings ae preseved.
18 */
19void leon_pci_init(struct platform_device *ofdev, struct leon_pci_info *info)
20{
21 struct pci_bus *root_bus;
22
23 root_bus = pci_scan_bus_parented(&ofdev->dev, 0, info->ops, info);
24 if (root_bus) {
25 root_bus->resource[0] = &info->io_space;
26 root_bus->resource[1] = &info->mem_space;
27 root_bus->resource[2] = NULL;
28
29 /* Init all PCI devices into PCI tree */
30 pci_bus_add_devices(root_bus);
31
32 /* Setup IRQs of all devices using custom routines */
33 pci_fixup_irqs(pci_common_swizzle, info->map_irq);
34
35 /* Assign devices with resources */
36 pci_assign_unassigned_resources();
37 }
38}
39
40/* PCI Memory and Prefetchable Memory is direct-mapped. However I/O Space is
41 * accessed through a Window which is translated to low 64KB in PCI space, the
42 * first 4KB is not used so 60KB is available.
43 *
44 * This function is used by generic code to translate resource addresses into
45 * PCI addresses.
46 */
47void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
48 struct resource *res)
49{
50 struct leon_pci_info *info = dev->bus->sysdata;
51
52 region->start = res->start;
53 region->end = res->end;
54
55 if (res->flags & IORESOURCE_IO) {
56 region->start -= (info->io_space.start - 0x1000);
57 region->end -= (info->io_space.start - 0x1000);
58 }
59}
60EXPORT_SYMBOL(pcibios_resource_to_bus);
61
62/* see pcibios_resource_to_bus() comment */
63void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
64 struct pci_bus_region *region)
65{
66 struct leon_pci_info *info = dev->bus->sysdata;
67
68 res->start = region->start;
69 res->end = region->end;
70
71 if (res->flags & IORESOURCE_IO) {
72 res->start += (info->io_space.start - 0x1000);
73 res->end += (info->io_space.start - 0x1000);
74 }
75}
76EXPORT_SYMBOL(pcibios_bus_to_resource);
77
78void __devinit pcibios_fixup_bus(struct pci_bus *pbus)
79{
80 struct leon_pci_info *info = pbus->sysdata;
81 struct pci_dev *dev;
82 int i, has_io, has_mem;
83 u16 cmd;
84
85 /* Generic PCI bus probing sets these to point at
86 * &io{port,mem}_resouce which is wrong for us.
87 */
88 if (pbus->self == NULL) {
89 pbus->resource[0] = &info->io_space;
90 pbus->resource[1] = &info->mem_space;
91 pbus->resource[2] = NULL;
92 }
93
94 list_for_each_entry(dev, &pbus->devices, bus_list) {
95 /*
96 * We can not rely on that the bootloader has enabled I/O
97 * or memory access to PCI devices. Instead we enable it here
98 * if the device has BARs of respective type.
99 */
100 has_io = has_mem = 0;
101 for (i = 0; i < PCI_ROM_RESOURCE; i++) {
102 unsigned long f = dev->resource[i].flags;
103 if (f & IORESOURCE_IO)
104 has_io = 1;
105 else if (f & IORESOURCE_MEM)
106 has_mem = 1;
107 }
108 /* ROM BARs are mapped into 32-bit memory space */
109 if (dev->resource[PCI_ROM_RESOURCE].end != 0) {
110 dev->resource[PCI_ROM_RESOURCE].flags |=
111 IORESOURCE_ROM_ENABLE;
112 has_mem = 1;
113 }
114 pci_bus_read_config_word(pbus, dev->devfn, PCI_COMMAND, &cmd);
115 if (has_io && !(cmd & PCI_COMMAND_IO)) {
116#ifdef CONFIG_PCI_DEBUG
117 printk(KERN_INFO "LEONPCI: Enabling I/O for dev %s\n",
118 pci_name(dev));
119#endif
120 cmd |= PCI_COMMAND_IO;
121 pci_bus_write_config_word(pbus, dev->devfn, PCI_COMMAND,
122 cmd);
123 }
124 if (has_mem && !(cmd & PCI_COMMAND_MEMORY)) {
125#ifdef CONFIG_PCI_DEBUG
126 printk(KERN_INFO "LEONPCI: Enabling MEMORY for dev"
127 "%s\n", pci_name(dev));
128#endif
129 cmd |= PCI_COMMAND_MEMORY;
130 pci_bus_write_config_word(pbus, dev->devfn, PCI_COMMAND,
131 cmd);
132 }
133 }
134}
135
136/*
137 * Other archs parse arguments here.
138 */
139char * __devinit pcibios_setup(char *str)
140{
141 return str;
142}
143
144resource_size_t pcibios_align_resource(void *data, const struct resource *res,
145 resource_size_t size, resource_size_t align)
146{
147 return res->start;
148}
149
150int pcibios_enable_device(struct pci_dev *dev, int mask)
151{
152 return pci_enable_resources(dev, mask);
153}
154
155struct device_node *pci_device_to_OF_node(struct pci_dev *pdev)
156{
157 /*
158 * Currently the OpenBoot nodes are not connected with the PCI device,
159 * this is because the LEON PROM does not create PCI nodes. Eventually
160 * this will change and the same approach as pcic.c can be used to
161 * match PROM nodes with pci devices.
162 */
163 return NULL;
164}
165EXPORT_SYMBOL(pci_device_to_OF_node);
166
167void __devinit pcibios_update_irq(struct pci_dev *dev, int irq)
168{
169#ifdef CONFIG_PCI_DEBUG
170 printk(KERN_DEBUG "LEONPCI: Assigning IRQ %02d to %s\n", irq,
171 pci_name(dev));
172#endif
173 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
174}
175
176/* in/out routines taken from pcic.c
177 *
178 * This probably belongs here rather than ioport.c because
179 * we do not want this crud linked into SBus kernels.
180 * Also, think for a moment about likes of floppy.c that
181 * include architecture specific parts. They may want to redefine ins/outs.
182 *
183 * We do not use horrible macros here because we want to
184 * advance pointer by sizeof(size).
185 */
186void outsb(unsigned long addr, const void *src, unsigned long count)
187{
188 while (count) {
189 count -= 1;
190 outb(*(const char *)src, addr);
191 src += 1;
192 /* addr += 1; */
193 }
194}
195EXPORT_SYMBOL(outsb);
196
197void outsw(unsigned long addr, const void *src, unsigned long count)
198{
199 while (count) {
200 count -= 2;
201 outw(*(const short *)src, addr);
202 src += 2;
203 /* addr += 2; */
204 }
205}
206EXPORT_SYMBOL(outsw);
207
208void outsl(unsigned long addr, const void *src, unsigned long count)
209{
210 while (count) {
211 count -= 4;
212 outl(*(const long *)src, addr);
213 src += 4;
214 /* addr += 4; */
215 }
216}
217EXPORT_SYMBOL(outsl);
218
219void insb(unsigned long addr, void *dst, unsigned long count)
220{
221 while (count) {
222 count -= 1;
223 *(unsigned char *)dst = inb(addr);
224 dst += 1;
225 /* addr += 1; */
226 }
227}
228EXPORT_SYMBOL(insb);
229
230void insw(unsigned long addr, void *dst, unsigned long count)
231{
232 while (count) {
233 count -= 2;
234 *(unsigned short *)dst = inw(addr);
235 dst += 2;
236 /* addr += 2; */
237 }
238}
239EXPORT_SYMBOL(insw);
240
241void insl(unsigned long addr, void *dst, unsigned long count)
242{
243 while (count) {
244 count -= 4;
245 /*
246 * XXX I am sure we are in for an unaligned trap here.
247 */
248 *(unsigned long *)dst = inl(addr);
249 dst += 4;
250 /* addr += 4; */
251 }
252}
253EXPORT_SYMBOL(insl);
diff --git a/arch/sparc/kernel/leon_pci_grpci2.c b/arch/sparc/kernel/leon_pci_grpci2.c
new file mode 100644
index 000000000000..44dc093ee33a
--- /dev/null
+++ b/arch/sparc/kernel/leon_pci_grpci2.c
@@ -0,0 +1,897 @@
1/*
2 * leon_pci_grpci2.c: GRPCI2 Host PCI driver
3 *
4 * Copyright (C) 2011 Aeroflex Gaisler AB, Daniel Hellstrom
5 *
6 */
7
8#include <linux/of_device.h>
9#include <linux/kernel.h>
10#include <linux/pci.h>
11#include <linux/delay.h>
12#include <linux/module.h>
13#include <asm/io.h>
14#include <asm/leon.h>
15#include <asm/vaddrs.h>
16#include <asm/sections.h>
17#include <asm/leon_pci.h>
18
19#include "irq.h"
20
21struct grpci2_barcfg {
22 unsigned long pciadr; /* PCI Space Address */
23 unsigned long ahbadr; /* PCI Base address mapped to this AHB addr */
24};
25
26/* Device Node Configuration options:
27 * - barcfgs : Custom Configuration of Host's 6 target BARs
28 * - irq_mask : Limit which PCI interrupts are enabled
29 * - do_reset : Force PCI Reset on startup
30 *
31 * barcfgs
32 * =======
33 *
34 * Optional custom Target BAR configuration (see struct grpci2_barcfg). All
35 * addresses are physical. Array always contains 6 elements (len=2*4*6 bytes)
36 *
37 * -1 means not configured (let host driver do default setup).
38 *
39 * [i*2+0] = PCI Address of BAR[i] on target interface
40 * [i*2+1] = Accessing PCI address of BAR[i] result in this AMBA address
41 *
42 *
43 * irq_mask
44 * ========
45 *
46 * Limit which PCI interrupts are enabled. 0=Disable, 1=Enable. By default
47 * all are enabled. Use this when PCI interrupt pins are floating on PCB.
48 * int, len=4.
49 * bit0 = PCI INTA#
50 * bit1 = PCI INTB#
51 * bit2 = PCI INTC#
52 * bit3 = PCI INTD#
53 *
54 *
55 * reset
56 * =====
57 *
58 * Force PCI reset on startup. int, len=4
59 */
60
61/* Enable Debugging Configuration Space Access */
62#undef GRPCI2_DEBUG_CFGACCESS
63
64/*
65 * GRPCI2 APB Register MAP
66 */
67struct grpci2_regs {
68 unsigned int ctrl; /* 0x00 Control */
69 unsigned int sts_cap; /* 0x04 Status / Capabilities */
70 int res1; /* 0x08 */
71 unsigned int io_map; /* 0x0C I/O Map address */
72 unsigned int dma_ctrl; /* 0x10 DMA */
73 unsigned int dma_bdbase; /* 0x14 DMA */
74 int res2[2]; /* 0x18 */
75 unsigned int bars[6]; /* 0x20 read-only PCI BARs */
76 int res3[2]; /* 0x38 */
77 unsigned int ahbmst_map[16]; /* 0x40 AHB->PCI Map per AHB Master */
78
79 /* PCI Trace Buffer Registers (OPTIONAL) */
80 unsigned int t_ctrl; /* 0x80 */
81 unsigned int t_cnt; /* 0x84 */
82 unsigned int t_adpat; /* 0x88 */
83 unsigned int t_admask; /* 0x8C */
84 unsigned int t_sigpat; /* 0x90 */
85 unsigned int t_sigmask; /* 0x94 */
86 unsigned int t_adstate; /* 0x98 */
87 unsigned int t_sigstate; /* 0x9C */
88};
89
90#define REGLOAD(a) (be32_to_cpu(__raw_readl(&(a))))
91#define REGSTORE(a, v) (__raw_writel(cpu_to_be32(v), &(a)))
92
93#define CTRL_BUS_BIT 16
94
95#define CTRL_RESET (1<<31)
96#define CTRL_SI (1<<27)
97#define CTRL_PE (1<<26)
98#define CTRL_EI (1<<25)
99#define CTRL_ER (1<<24)
100#define CTRL_BUS (0xff<<CTRL_BUS_BIT)
101#define CTRL_HOSTINT 0xf
102
103#define STS_HOST_BIT 31
104#define STS_MST_BIT 30
105#define STS_TAR_BIT 29
106#define STS_DMA_BIT 28
107#define STS_DI_BIT 27
108#define STS_HI_BIT 26
109#define STS_IRQMODE_BIT 24
110#define STS_TRACE_BIT 23
111#define STS_CFGERRVALID_BIT 20
112#define STS_CFGERR_BIT 19
113#define STS_INTTYPE_BIT 12
114#define STS_INTSTS_BIT 8
115#define STS_FDEPTH_BIT 2
116#define STS_FNUM_BIT 0
117
118#define STS_HOST (1<<STS_HOST_BIT)
119#define STS_MST (1<<STS_MST_BIT)
120#define STS_TAR (1<<STS_TAR_BIT)
121#define STS_DMA (1<<STS_DMA_BIT)
122#define STS_DI (1<<STS_DI_BIT)
123#define STS_HI (1<<STS_HI_BIT)
124#define STS_IRQMODE (0x3<<STS_IRQMODE_BIT)
125#define STS_TRACE (1<<STS_TRACE_BIT)
126#define STS_CFGERRVALID (1<<STS_CFGERRVALID_BIT)
127#define STS_CFGERR (1<<STS_CFGERR_BIT)
128#define STS_INTTYPE (0x3f<<STS_INTTYPE_BIT)
129#define STS_INTSTS (0xf<<STS_INTSTS_BIT)
130#define STS_FDEPTH (0x7<<STS_FDEPTH_BIT)
131#define STS_FNUM (0x3<<STS_FNUM_BIT)
132
133#define STS_ISYSERR (1<<17)
134#define STS_IDMA (1<<16)
135#define STS_IDMAERR (1<<15)
136#define STS_IMSTABRT (1<<14)
137#define STS_ITGTABRT (1<<13)
138#define STS_IPARERR (1<<12)
139
140#define STS_ERR_IRQ (STS_ISYSERR | STS_IMSTABRT | STS_ITGTABRT | STS_IPARERR)
141
142struct grpci2_bd_chan {
143 unsigned int ctrl; /* 0x00 DMA Control */
144 unsigned int nchan; /* 0x04 Next DMA Channel Address */
145 unsigned int nbd; /* 0x08 Next Data Descriptor in chan */
146 unsigned int res; /* 0x0C Reserved */
147};
148
149#define BD_CHAN_EN 0x80000000
150#define BD_CHAN_TYPE 0x00300000
151#define BD_CHAN_BDCNT 0x0000ffff
152#define BD_CHAN_EN_BIT 31
153#define BD_CHAN_TYPE_BIT 20
154#define BD_CHAN_BDCNT_BIT 0
155
156struct grpci2_bd_data {
157 unsigned int ctrl; /* 0x00 DMA Data Control */
158 unsigned int pci_adr; /* 0x04 PCI Start Address */
159 unsigned int ahb_adr; /* 0x08 AHB Start address */
160 unsigned int next; /* 0x0C Next Data Descriptor in chan */
161};
162
163#define BD_DATA_EN 0x80000000
164#define BD_DATA_IE 0x40000000
165#define BD_DATA_DR 0x20000000
166#define BD_DATA_TYPE 0x00300000
167#define BD_DATA_ER 0x00080000
168#define BD_DATA_LEN 0x0000ffff
169#define BD_DATA_EN_BIT 31
170#define BD_DATA_IE_BIT 30
171#define BD_DATA_DR_BIT 29
172#define BD_DATA_TYPE_BIT 20
173#define BD_DATA_ER_BIT 19
174#define BD_DATA_LEN_BIT 0
175
176/* GRPCI2 Capability */
177struct grpci2_cap_first {
178 unsigned int ctrl;
179 unsigned int pci2ahb_map[6];
180 unsigned int ext2ahb_map;
181 unsigned int io_map;
182 unsigned int pcibar_size[6];
183};
184#define CAP9_CTRL_OFS 0
185#define CAP9_BAR_OFS 0x4
186#define CAP9_IOMAP_OFS 0x20
187#define CAP9_BARSIZE_OFS 0x24
188
189struct grpci2_priv {
190 struct leon_pci_info info; /* must be on top of this structure */
191 struct grpci2_regs *regs;
192 char irq;
193 char irq_mode; /* IRQ Mode from CAPSTS REG */
194 char bt_enabled;
195 char do_reset;
196 char irq_mask;
197 u32 pciid; /* PCI ID of Host */
198 unsigned char irq_map[4];
199
200 /* Virtual IRQ numbers */
201 unsigned int virq_err;
202 unsigned int virq_dma;
203
204 /* AHB PCI Windows */
205 unsigned long pci_area; /* MEMORY */
206 unsigned long pci_area_end;
207 unsigned long pci_io; /* I/O */
208 unsigned long pci_conf; /* CONFIGURATION */
209 unsigned long pci_conf_end;
210 unsigned long pci_io_va;
211
212 struct grpci2_barcfg tgtbars[6];
213};
214
215DEFINE_SPINLOCK(grpci2_dev_lock);
216struct grpci2_priv *grpci2priv;
217
218int grpci2_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
219{
220 struct grpci2_priv *priv = dev->bus->sysdata;
221 int irq_group;
222
223 /* Use default IRQ decoding on PCI BUS0 according slot numbering */
224 irq_group = slot & 0x3;
225 pin = ((pin - 1) + irq_group) & 0x3;
226
227 return priv->irq_map[pin];
228}
229
230static int grpci2_cfg_r32(struct grpci2_priv *priv, unsigned int bus,
231 unsigned int devfn, int where, u32 *val)
232{
233 unsigned int *pci_conf;
234 unsigned long flags;
235 u32 tmp;
236
237 if (where & 0x3)
238 return -EINVAL;
239
240 if (bus == 0 && PCI_SLOT(devfn) != 0)
241 devfn += (0x8 * 6);
242
243 /* Select bus */
244 spin_lock_irqsave(&grpci2_dev_lock, flags);
245 REGSTORE(priv->regs->ctrl, (REGLOAD(priv->regs->ctrl) & ~(0xff << 16)) |
246 (bus << 16));
247 spin_unlock_irqrestore(&grpci2_dev_lock, flags);
248
249 /* clear old status */
250 REGSTORE(priv->regs->sts_cap, (STS_CFGERR | STS_CFGERRVALID));
251
252 pci_conf = (unsigned int *) (priv->pci_conf |
253 (devfn << 8) | (where & 0xfc));
254 tmp = LEON3_BYPASS_LOAD_PA(pci_conf);
255
256 /* Wait until GRPCI2 signals that CFG access is done, it should be
257 * done instantaneously unless a DMA operation is ongoing...
258 */
259 while ((REGLOAD(priv->regs->sts_cap) & STS_CFGERRVALID) == 0)
260 ;
261
262 if (REGLOAD(priv->regs->sts_cap) & STS_CFGERR) {
263 *val = 0xffffffff;
264 } else {
265 /* Bus always little endian (unaffected by byte-swapping) */
266 *val = flip_dword(tmp);
267 }
268
269 return 0;
270}
271
272static int grpci2_cfg_r16(struct grpci2_priv *priv, unsigned int bus,
273 unsigned int devfn, int where, u32 *val)
274{
275 u32 v;
276 int ret;
277
278 if (where & 0x1)
279 return -EINVAL;
280 ret = grpci2_cfg_r32(priv, bus, devfn, where & ~0x3, &v);
281 *val = 0xffff & (v >> (8 * (where & 0x3)));
282 return ret;
283}
284
285static int grpci2_cfg_r8(struct grpci2_priv *priv, unsigned int bus,
286 unsigned int devfn, int where, u32 *val)
287{
288 u32 v;
289 int ret;
290
291 ret = grpci2_cfg_r32(priv, bus, devfn, where & ~0x3, &v);
292 *val = 0xff & (v >> (8 * (where & 3)));
293
294 return ret;
295}
296
297static int grpci2_cfg_w32(struct grpci2_priv *priv, unsigned int bus,
298 unsigned int devfn, int where, u32 val)
299{
300 unsigned int *pci_conf;
301 unsigned long flags;
302
303 if (where & 0x3)
304 return -EINVAL;
305
306 if (bus == 0 && PCI_SLOT(devfn) != 0)
307 devfn += (0x8 * 6);
308
309 /* Select bus */
310 spin_lock_irqsave(&grpci2_dev_lock, flags);
311 REGSTORE(priv->regs->ctrl, (REGLOAD(priv->regs->ctrl) & ~(0xff << 16)) |
312 (bus << 16));
313 spin_unlock_irqrestore(&grpci2_dev_lock, flags);
314
315 /* clear old status */
316 REGSTORE(priv->regs->sts_cap, (STS_CFGERR | STS_CFGERRVALID));
317
318 pci_conf = (unsigned int *) (priv->pci_conf |
319 (devfn << 8) | (where & 0xfc));
320 LEON3_BYPASS_STORE_PA(pci_conf, flip_dword(val));
321
322 /* Wait until GRPCI2 signals that CFG access is done, it should be
323 * done instantaneously unless a DMA operation is ongoing...
324 */
325 while ((REGLOAD(priv->regs->sts_cap) & STS_CFGERRVALID) == 0)
326 ;
327
328 return 0;
329}
330
331static int grpci2_cfg_w16(struct grpci2_priv *priv, unsigned int bus,
332 unsigned int devfn, int where, u32 val)
333{
334 int ret;
335 u32 v;
336
337 if (where & 0x1)
338 return -EINVAL;
339 ret = grpci2_cfg_r32(priv, bus, devfn, where&~3, &v);
340 if (ret)
341 return ret;
342 v = (v & ~(0xffff << (8 * (where & 0x3)))) |
343 ((0xffff & val) << (8 * (where & 0x3)));
344 return grpci2_cfg_w32(priv, bus, devfn, where & ~0x3, v);
345}
346
347static int grpci2_cfg_w8(struct grpci2_priv *priv, unsigned int bus,
348 unsigned int devfn, int where, u32 val)
349{
350 int ret;
351 u32 v;
352
353 ret = grpci2_cfg_r32(priv, bus, devfn, where & ~0x3, &v);
354 if (ret != 0)
355 return ret;
356 v = (v & ~(0xff << (8 * (where & 0x3)))) |
357 ((0xff & val) << (8 * (where & 0x3)));
358 return grpci2_cfg_w32(priv, bus, devfn, where & ~0x3, v);
359}
360
361/* Read from Configuration Space. When entering here the PCI layer has taken
362 * the pci_lock spinlock and IRQ is off.
363 */
364static int grpci2_read_config(struct pci_bus *bus, unsigned int devfn,
365 int where, int size, u32 *val)
366{
367 struct grpci2_priv *priv = grpci2priv;
368 unsigned int busno = bus->number;
369 int ret;
370
371 if (PCI_SLOT(devfn) > 15 || (PCI_SLOT(devfn) == 0 && busno == 0)) {
372 *val = ~0;
373 return 0;
374 }
375
376 switch (size) {
377 case 1:
378 ret = grpci2_cfg_r8(priv, busno, devfn, where, val);
379 break;
380 case 2:
381 ret = grpci2_cfg_r16(priv, busno, devfn, where, val);
382 break;
383 case 4:
384 ret = grpci2_cfg_r32(priv, busno, devfn, where, val);
385 break;
386 default:
387 ret = -EINVAL;
388 break;
389 }
390
391#ifdef GRPCI2_DEBUG_CFGACCESS
392 printk(KERN_INFO "grpci2_read_config: [%02x:%02x:%x] ofs=%d val=%x "
393 "size=%d\n", busno, PCI_SLOT(devfn), PCI_FUNC(devfn), where,
394 *val, size);
395#endif
396
397 return ret;
398}
399
400/* Write to Configuration Space. When entering here the PCI layer has taken
401 * the pci_lock spinlock and IRQ is off.
402 */
403static int grpci2_write_config(struct pci_bus *bus, unsigned int devfn,
404 int where, int size, u32 val)
405{
406 struct grpci2_priv *priv = grpci2priv;
407 unsigned int busno = bus->number;
408
409 if (PCI_SLOT(devfn) > 15 || (PCI_SLOT(devfn) == 0 && busno == 0))
410 return 0;
411
412#ifdef GRPCI2_DEBUG_CFGACCESS
413 printk(KERN_INFO "grpci2_write_config: [%02x:%02x:%x] ofs=%d size=%d "
414 "val=%x\n", busno, PCI_SLOT(devfn), PCI_FUNC(devfn),
415 where, size, val);
416#endif
417
418 switch (size) {
419 default:
420 return -EINVAL;
421 case 1:
422 return grpci2_cfg_w8(priv, busno, devfn, where, val);
423 case 2:
424 return grpci2_cfg_w16(priv, busno, devfn, where, val);
425 case 4:
426 return grpci2_cfg_w32(priv, busno, devfn, where, val);
427 }
428}
429
430static struct pci_ops grpci2_ops = {
431 .read = grpci2_read_config,
432 .write = grpci2_write_config,
433};
434
435/* GENIRQ IRQ chip implementation for GRPCI2 irqmode=0..2. In configuration
436 * 3 where all PCI Interrupts has a separate IRQ on the system IRQ controller
437 * this is not needed and the standard IRQ controller can be used.
438 */
439
440static void grpci2_mask_irq(struct irq_data *data)
441{
442 unsigned long flags;
443 unsigned int irqidx;
444 struct grpci2_priv *priv = grpci2priv;
445
446 irqidx = (unsigned int)data->chip_data - 1;
447 if (irqidx > 3) /* only mask PCI interrupts here */
448 return;
449
450 spin_lock_irqsave(&grpci2_dev_lock, flags);
451 REGSTORE(priv->regs->ctrl, REGLOAD(priv->regs->ctrl) & ~(1 << irqidx));
452 spin_unlock_irqrestore(&grpci2_dev_lock, flags);
453}
454
455static void grpci2_unmask_irq(struct irq_data *data)
456{
457 unsigned long flags;
458 unsigned int irqidx;
459 struct grpci2_priv *priv = grpci2priv;
460
461 irqidx = (unsigned int)data->chip_data - 1;
462 if (irqidx > 3) /* only unmask PCI interrupts here */
463 return;
464
465 spin_lock_irqsave(&grpci2_dev_lock, flags);
466 REGSTORE(priv->regs->ctrl, REGLOAD(priv->regs->ctrl) | (1 << irqidx));
467 spin_unlock_irqrestore(&grpci2_dev_lock, flags);
468}
469
470static unsigned int grpci2_startup_irq(struct irq_data *data)
471{
472 grpci2_unmask_irq(data);
473 return 0;
474}
475
476static void grpci2_shutdown_irq(struct irq_data *data)
477{
478 grpci2_mask_irq(data);
479}
480
481static struct irq_chip grpci2_irq = {
482 .name = "grpci2",
483 .irq_startup = grpci2_startup_irq,
484 .irq_shutdown = grpci2_shutdown_irq,
485 .irq_mask = grpci2_mask_irq,
486 .irq_unmask = grpci2_unmask_irq,
487};
488
489/* Handle one or multiple IRQs from the PCI core */
490static void grpci2_pci_flow_irq(unsigned int irq, struct irq_desc *desc)
491{
492 struct grpci2_priv *priv = grpci2priv;
493 int i, ack = 0;
494 unsigned int ctrl, sts_cap, pci_ints;
495
496 ctrl = REGLOAD(priv->regs->ctrl);
497 sts_cap = REGLOAD(priv->regs->sts_cap);
498
499 /* Error Interrupt? */
500 if (sts_cap & STS_ERR_IRQ) {
501 generic_handle_irq(priv->virq_err);
502 ack = 1;
503 }
504
505 /* PCI Interrupt? */
506 pci_ints = ((~sts_cap) >> STS_INTSTS_BIT) & ctrl & CTRL_HOSTINT;
507 if (pci_ints) {
508 /* Call respective PCI Interrupt handler */
509 for (i = 0; i < 4; i++) {
510 if (pci_ints & (1 << i))
511 generic_handle_irq(priv->irq_map[i]);
512 }
513 ack = 1;
514 }
515
516 /*
517 * Decode DMA Interrupt only when shared with Err and PCI INTX#, when
518 * the DMA is a unique IRQ the DMA interrupts doesn't end up here, they
519 * goes directly to DMA ISR.
520 */
521 if ((priv->irq_mode == 0) && (sts_cap & (STS_IDMA | STS_IDMAERR))) {
522 generic_handle_irq(priv->virq_dma);
523 ack = 1;
524 }
525
526 /*
527 * Call "first level" IRQ chip end-of-irq handler. It will ACK LEON IRQ
528 * Controller, this must be done after IRQ sources have been handled to
529 * avoid double IRQ generation
530 */
531 if (ack)
532 desc->irq_data.chip->irq_eoi(&desc->irq_data);
533}
534
535/* Create a virtual IRQ */
536static unsigned int grpci2_build_device_irq(unsigned int irq)
537{
538 unsigned int virq = 0, pil;
539
540 pil = 1 << 8;
541 virq = irq_alloc(irq, pil);
542 if (virq == 0)
543 goto out;
544
545 irq_set_chip_and_handler_name(virq, &grpci2_irq, handle_simple_irq,
546 "pcilvl");
547 irq_set_chip_data(virq, (void *)irq);
548
549out:
550 return virq;
551}
552
553void grpci2_hw_init(struct grpci2_priv *priv)
554{
555 u32 ahbadr, pciadr, bar_sz, capptr, io_map, data;
556 struct grpci2_regs *regs = priv->regs;
557 int i;
558 struct grpci2_barcfg *barcfg = priv->tgtbars;
559
560 /* Reset any earlier setup */
561 if (priv->do_reset) {
562 printk(KERN_INFO "GRPCI2: Resetting PCI bus\n");
563 REGSTORE(regs->ctrl, CTRL_RESET);
564 ssleep(1); /* Wait for boards to settle */
565 }
566 REGSTORE(regs->ctrl, 0);
567 REGSTORE(regs->sts_cap, ~0); /* Clear Status */
568 REGSTORE(regs->dma_ctrl, 0);
569 REGSTORE(regs->dma_bdbase, 0);
570
571 /* Translate I/O accesses to 0, I/O Space always @ PCI low 64Kbytes */
572 REGSTORE(regs->io_map, REGLOAD(regs->io_map) & 0x0000ffff);
573
574 /* set 1:1 mapping between AHB -> PCI memory space, for all Masters
575 * Each AHB master has it's own mapping registers. Max 16 AHB masters.
576 */
577 for (i = 0; i < 16; i++)
578 REGSTORE(regs->ahbmst_map[i], priv->pci_area);
579
580 /* Get the GRPCI2 Host PCI ID */
581 grpci2_cfg_r32(priv, 0, 0, PCI_VENDOR_ID, &priv->pciid);
582
583 /* Get address to first (always defined) capability structure */
584 grpci2_cfg_r8(priv, 0, 0, PCI_CAPABILITY_LIST, &capptr);
585
586 /* Enable/Disable Byte twisting */
587 grpci2_cfg_r32(priv, 0, 0, capptr+CAP9_IOMAP_OFS, &io_map);
588 io_map = (io_map & ~0x1) | (priv->bt_enabled ? 1 : 0);
589 grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_IOMAP_OFS, io_map);
590
591 /* Setup the Host's PCI Target BARs for other peripherals to access,
592 * and do DMA to the host's memory. The target BARs can be sized and
593 * enabled individually.
594 *
595 * User may set custom target BARs, but default is:
596 * The first BARs is used to map kernel low (DMA is part of normal
597 * region on sparc which is SRMMU_MAXMEM big) main memory 1:1 to the
598 * PCI bus, the other BARs are disabled. We assume that the first BAR
599 * is always available.
600 */
601 for (i = 0; i < 6; i++) {
602 if (barcfg[i].pciadr != ~0 && barcfg[i].ahbadr != ~0) {
603 /* Target BARs must have the proper alignment */
604 ahbadr = barcfg[i].ahbadr;
605 pciadr = barcfg[i].pciadr;
606 bar_sz = ((pciadr - 1) & ~pciadr) + 1;
607 } else {
608 if (i == 0) {
609 /* Map main memory */
610 bar_sz = 0xf0000008; /* 256MB prefetchable */
611 ahbadr = 0xf0000000 & (u32)__pa(PAGE_ALIGN(
612 (unsigned long) &_end));
613 pciadr = ahbadr;
614 } else {
615 bar_sz = 0;
616 ahbadr = 0;
617 pciadr = 0;
618 }
619 }
620 grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_BARSIZE_OFS+i*4, bar_sz);
621 grpci2_cfg_w32(priv, 0, 0, PCI_BASE_ADDRESS_0+i*4, pciadr);
622 grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_BAR_OFS+i*4, ahbadr);
623 printk(KERN_INFO " TGT BAR[%d]: 0x%08x (PCI)-> 0x%08x\n",
624 i, pciadr, ahbadr);
625 }
626
627 /* set as bus master and enable pci memory responses */
628 grpci2_cfg_r32(priv, 0, 0, PCI_COMMAND, &data);
629 data |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
630 grpci2_cfg_w32(priv, 0, 0, PCI_COMMAND, data);
631
632 /* Enable Error respone (CPU-TRAP) on illegal memory access. */
633 REGSTORE(regs->ctrl, CTRL_ER | CTRL_PE);
634}
635
636static irqreturn_t grpci2_jump_interrupt(int irq, void *arg)
637{
638 printk(KERN_ERR "GRPCI2: Jump IRQ happened\n");
639 return IRQ_NONE;
640}
641
642/* Handle GRPCI2 Error Interrupt */
643static irqreturn_t grpci2_err_interrupt(int irq, void *arg)
644{
645 struct grpci2_priv *priv = arg;
646 struct grpci2_regs *regs = priv->regs;
647 unsigned int status;
648
649 status = REGLOAD(regs->sts_cap);
650 if ((status & STS_ERR_IRQ) == 0)
651 return IRQ_NONE;
652
653 if (status & STS_IPARERR)
654 printk(KERN_ERR "GRPCI2: Parity Error\n");
655
656 if (status & STS_ITGTABRT)
657 printk(KERN_ERR "GRPCI2: Target Abort\n");
658
659 if (status & STS_IMSTABRT)
660 printk(KERN_ERR "GRPCI2: Master Abort\n");
661
662 if (status & STS_ISYSERR)
663 printk(KERN_ERR "GRPCI2: System Error\n");
664
665 /* Clear handled INT TYPE IRQs */
666 REGSTORE(regs->sts_cap, status & STS_ERR_IRQ);
667
668 return IRQ_HANDLED;
669}
670
671static int __devinit grpci2_of_probe(struct platform_device *ofdev)
672{
673 struct grpci2_regs *regs;
674 struct grpci2_priv *priv;
675 int err, i, len;
676 const int *tmp;
677 unsigned int capability;
678
679 if (grpci2priv) {
680 printk(KERN_ERR "GRPCI2: only one GRPCI2 core supported\n");
681 return -ENODEV;
682 }
683
684 if (ofdev->num_resources < 3) {
685 printk(KERN_ERR "GRPCI2: not enough APB/AHB resources\n");
686 return -EIO;
687 }
688
689 /* Find Device Address */
690 regs = of_ioremap(&ofdev->resource[0], 0,
691 resource_size(&ofdev->resource[0]),
692 "grlib-grpci2 regs");
693 if (regs == NULL) {
694 printk(KERN_ERR "GRPCI2: ioremap failed\n");
695 return -EIO;
696 }
697
698 /*
699 * Check that we're in Host Slot and that we can act as a Host Bridge
700 * and not only as target.
701 */
702 capability = REGLOAD(regs->sts_cap);
703 if ((capability & STS_HOST) || !(capability & STS_MST)) {
704 printk(KERN_INFO "GRPCI2: not in host system slot\n");
705 err = -EIO;
706 goto err1;
707 }
708
709 priv = grpci2priv = kzalloc(sizeof(struct grpci2_priv), GFP_KERNEL);
710 if (grpci2priv == NULL) {
711 err = -ENOMEM;
712 goto err1;
713 }
714 memset(grpci2priv, 0, sizeof(*grpci2priv));
715 priv->regs = regs;
716 priv->irq = ofdev->archdata.irqs[0]; /* BASE IRQ */
717 priv->irq_mode = (capability & STS_IRQMODE) >> STS_IRQMODE_BIT;
718
719 printk(KERN_INFO "GRPCI2: host found at %p, irq%d\n", regs, priv->irq);
720
721 /* Byte twisting should be made configurable from kernel command line */
722 priv->bt_enabled = 1;
723
724 /* Let user do custom Target BAR assignment */
725 tmp = of_get_property(ofdev->dev.of_node, "barcfg", &len);
726 if (tmp && (len == 2*4*6))
727 memcpy(priv->tgtbars, tmp, 2*4*6);
728 else
729 memset(priv->tgtbars, -1, 2*4*6);
730
731 /* Limit IRQ unmasking in irq_mode 2 and 3 */
732 tmp = of_get_property(ofdev->dev.of_node, "irq_mask", &len);
733 if (tmp && (len == 4))
734 priv->do_reset = *tmp;
735 else
736 priv->irq_mask = 0xf;
737
738 /* Optional PCI reset. Force PCI reset on startup */
739 tmp = of_get_property(ofdev->dev.of_node, "reset", &len);
740 if (tmp && (len == 4))
741 priv->do_reset = *tmp;
742 else
743 priv->do_reset = 0;
744
745 /* Find PCI Memory, I/O and Configuration Space Windows */
746 priv->pci_area = ofdev->resource[1].start;
747 priv->pci_area_end = ofdev->resource[1].end+1;
748 priv->pci_io = ofdev->resource[2].start;
749 priv->pci_conf = ofdev->resource[2].start + 0x10000;
750 priv->pci_conf_end = priv->pci_conf + 0x10000;
751 priv->pci_io_va = (unsigned long)ioremap(priv->pci_io, 0x10000);
752 if (!priv->pci_io_va) {
753 err = -EIO;
754 goto err2;
755 }
756
757 printk(KERN_INFO
758 "GRPCI2: MEMORY SPACE [0x%08lx - 0x%08lx]\n"
759 " I/O SPACE [0x%08lx - 0x%08lx]\n"
760 " CONFIG SPACE [0x%08lx - 0x%08lx]\n",
761 priv->pci_area, priv->pci_area_end-1,
762 priv->pci_io, priv->pci_conf-1,
763 priv->pci_conf, priv->pci_conf_end-1);
764
765 /*
766 * I/O Space resources in I/O Window mapped into Virtual Adr Space
767 * We never use low 4KB because some devices seem have problems using
768 * address 0.
769 */
770 memset(&priv->info.io_space, 0, sizeof(struct resource));
771 priv->info.io_space.name = "GRPCI2 PCI I/O Space";
772 priv->info.io_space.start = priv->pci_io_va + 0x1000;
773 priv->info.io_space.end = priv->pci_io_va + 0x10000 - 1;
774 priv->info.io_space.flags = IORESOURCE_IO;
775
776 /*
777 * GRPCI2 has no prefetchable memory, map everything as
778 * non-prefetchable memory
779 */
780 memset(&priv->info.mem_space, 0, sizeof(struct resource));
781 priv->info.mem_space.name = "GRPCI2 PCI MEM Space";
782 priv->info.mem_space.start = priv->pci_area;
783 priv->info.mem_space.end = priv->pci_area_end - 1;
784 priv->info.mem_space.flags = IORESOURCE_MEM;
785
786 if (request_resource(&iomem_resource, &priv->info.mem_space) < 0)
787 goto err3;
788 if (request_resource(&ioport_resource, &priv->info.io_space) < 0)
789 goto err4;
790
791 grpci2_hw_init(priv);
792
793 /*
794 * Get PCI Interrupt to System IRQ mapping and setup IRQ handling
795 * Error IRQ always on PCI INTA.
796 */
797 if (priv->irq_mode < 2) {
798 /* All PCI interrupts are shared using the same system IRQ */
799 leon_update_virq_handling(priv->irq, grpci2_pci_flow_irq,
800 "pcilvl", 0);
801
802 priv->irq_map[0] = grpci2_build_device_irq(1);
803 priv->irq_map[1] = grpci2_build_device_irq(2);
804 priv->irq_map[2] = grpci2_build_device_irq(3);
805 priv->irq_map[3] = grpci2_build_device_irq(4);
806
807 priv->virq_err = grpci2_build_device_irq(5);
808 if (priv->irq_mode & 1)
809 priv->virq_dma = ofdev->archdata.irqs[1];
810 else
811 priv->virq_dma = grpci2_build_device_irq(6);
812
813 /* Enable IRQs on LEON IRQ controller */
814 err = request_irq(priv->irq, grpci2_jump_interrupt, 0,
815 "GRPCI2_JUMP", priv);
816 if (err)
817 printk(KERN_ERR "GRPCI2: ERR IRQ request failed\n");
818 } else {
819 /* All PCI interrupts have an unique IRQ interrupt */
820 for (i = 0; i < 4; i++) {
821 /* Make LEON IRQ layer handle level IRQ by acking */
822 leon_update_virq_handling(ofdev->archdata.irqs[i],
823 handle_fasteoi_irq, "pcilvl",
824 1);
825 priv->irq_map[i] = ofdev->archdata.irqs[i];
826 }
827 priv->virq_err = priv->irq_map[0];
828 if (priv->irq_mode & 1)
829 priv->virq_dma = ofdev->archdata.irqs[4];
830 else
831 priv->virq_dma = priv->irq_map[0];
832
833 /* Unmask all PCI interrupts, request_irq will not do that */
834 REGSTORE(regs->ctrl, REGLOAD(regs->ctrl)|(priv->irq_mask&0xf));
835 }
836
837 /* Setup IRQ handler for non-configuration space access errors */
838 err = request_irq(priv->virq_err, grpci2_err_interrupt, IRQF_SHARED,
839 "GRPCI2_ERR", priv);
840 if (err) {
841 printk(KERN_DEBUG "GRPCI2: ERR VIRQ request failed: %d\n", err);
842 goto err5;
843 }
844
845 /*
846 * Enable Error Interrupts. PCI interrupts are unmasked once request_irq
847 * is called by the PCI Device drivers
848 */
849 REGSTORE(regs->ctrl, REGLOAD(regs->ctrl) | CTRL_EI | CTRL_SI);
850
851 /* Init common layer and scan buses */
852 priv->info.ops = &grpci2_ops;
853 priv->info.map_irq = grpci2_map_irq;
854 leon_pci_init(ofdev, &priv->info);
855
856 return 0;
857
858err5:
859 release_resource(&priv->info.io_space);
860err4:
861 release_resource(&priv->info.mem_space);
862err3:
863 err = -ENOMEM;
864 iounmap((void *)priv->pci_io_va);
865err2:
866 kfree(priv);
867err1:
868 of_iounmap(&ofdev->resource[0], regs,
869 resource_size(&ofdev->resource[0]));
870 return err;
871}
872
873static struct of_device_id grpci2_of_match[] = {
874 {
875 .name = "GAISLER_GRPCI2",
876 },
877 {
878 .name = "01_07c",
879 },
880 {},
881};
882
883static struct platform_driver grpci2_of_driver = {
884 .driver = {
885 .name = "grpci2",
886 .owner = THIS_MODULE,
887 .of_match_table = grpci2_of_match,
888 },
889 .probe = grpci2_of_probe,
890};
891
892static int __init grpci2_init(void)
893{
894 return platform_driver_register(&grpci2_of_driver);
895}
896
897subsys_initcall(grpci2_init);
diff --git a/arch/sparc/kernel/leon_pmc.c b/arch/sparc/kernel/leon_pmc.c
new file mode 100644
index 000000000000..519ca923f59f
--- /dev/null
+++ b/arch/sparc/kernel/leon_pmc.c
@@ -0,0 +1,82 @@
1/* leon_pmc.c: LEON Power-down cpu_idle() handler
2 *
3 * Copyright (C) 2011 Daniel Hellstrom (daniel@gaisler.com) Aeroflex Gaisler AB
4 */
5
6#include <linux/init.h>
7#include <linux/pm.h>
8
9#include <asm/leon_amba.h>
10#include <asm/leon.h>
11
12/* List of Systems that need fixup instructions around power-down instruction */
13unsigned int pmc_leon_fixup_ids[] = {
14 AEROFLEX_UT699,
15 GAISLER_GR712RC,
16 LEON4_NEXTREME1,
17 0
18};
19
20int pmc_leon_need_fixup(void)
21{
22 unsigned int systemid = amba_system_id >> 16;
23 unsigned int *id;
24
25 id = &pmc_leon_fixup_ids[0];
26 while (*id != 0) {
27 if (*id == systemid)
28 return 1;
29 id++;
30 }
31
32 return 0;
33}
34
35/*
36 * CPU idle callback function for systems that need some extra handling
37 * See .../arch/sparc/kernel/process.c
38 */
39void pmc_leon_idle_fixup(void)
40{
41 /* Prepare an address to a non-cachable region. APB is always
42 * none-cachable. One instruction is executed after the Sleep
43 * instruction, we make sure to read the bus and throw away the
44 * value by accessing a non-cachable area, also we make sure the
45 * MMU does not get a TLB miss here by using the MMU BYPASS ASI.
46 */
47 register unsigned int address = (unsigned int)leon3_irqctrl_regs;
48 __asm__ __volatile__ (
49 "mov %%g0, %%asr19\n"
50 "lda [%0] %1, %%g0\n"
51 :
52 : "r"(address), "i"(ASI_LEON_BYPASS));
53}
54
55/*
56 * CPU idle callback function
57 * See .../arch/sparc/kernel/process.c
58 */
59void pmc_leon_idle(void)
60{
61 /* For systems without power-down, this will be no-op */
62 __asm__ __volatile__ ("mov %g0, %asr19\n\t");
63}
64
65/* Install LEON Power Down function */
66static int __init leon_pmc_install(void)
67{
68 /* Assign power management IDLE handler */
69 if (pmc_leon_need_fixup())
70 pm_idle = pmc_leon_idle_fixup;
71 else
72 pm_idle = pmc_leon_idle;
73
74 printk(KERN_INFO "leon: power management initialized\n");
75
76 return 0;
77}
78
79/* This driver is not critical to the boot process, don't care
80 * if initialized late.
81 */
82late_initcall(leon_pmc_install);
diff --git a/arch/sparc/kernel/leon_smp.c b/arch/sparc/kernel/leon_smp.c
index e1656fc41ccb..fe8fb44c609c 100644
--- a/arch/sparc/kernel/leon_smp.c
+++ b/arch/sparc/kernel/leon_smp.c
@@ -12,9 +12,9 @@
12#include <linux/sched.h> 12#include <linux/sched.h>
13#include <linux/threads.h> 13#include <linux/threads.h>
14#include <linux/smp.h> 14#include <linux/smp.h>
15#include <linux/smp_lock.h>
16#include <linux/interrupt.h> 15#include <linux/interrupt.h>
17#include <linux/kernel_stat.h> 16#include <linux/kernel_stat.h>
17#include <linux/of.h>
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/spinlock.h> 19#include <linux/spinlock.h>
20#include <linux/mm.h> 20#include <linux/mm.h>
@@ -30,6 +30,7 @@
30#include <asm/ptrace.h> 30#include <asm/ptrace.h>
31#include <asm/atomic.h> 31#include <asm/atomic.h>
32#include <asm/irq_regs.h> 32#include <asm/irq_regs.h>
33#include <asm/traps.h>
33 34
34#include <asm/delay.h> 35#include <asm/delay.h>
35#include <asm/irq.h> 36#include <asm/irq.h>
@@ -42,6 +43,8 @@
42#include <asm/leon.h> 43#include <asm/leon.h>
43#include <asm/leon_amba.h> 44#include <asm/leon_amba.h>
44 45
46#include "kernel.h"
47
45#ifdef CONFIG_SPARC_LEON 48#ifdef CONFIG_SPARC_LEON
46 49
47#include "irq.h" 50#include "irq.h"
@@ -49,15 +52,18 @@
49extern ctxd_t *srmmu_ctx_table_phys; 52extern ctxd_t *srmmu_ctx_table_phys;
50static int smp_processors_ready; 53static int smp_processors_ready;
51extern volatile unsigned long cpu_callin_map[NR_CPUS]; 54extern volatile unsigned long cpu_callin_map[NR_CPUS];
52extern unsigned char boot_cpu_id;
53extern cpumask_t smp_commenced_mask; 55extern cpumask_t smp_commenced_mask;
54void __init leon_configure_cache_smp(void); 56void __init leon_configure_cache_smp(void);
57static void leon_ipi_init(void);
58
59/* IRQ number of LEON IPIs */
60int leon_ipi_irq = LEON3_IRQ_IPI_DEFAULT;
55 61
56static inline unsigned long do_swap(volatile unsigned long *ptr, 62static inline unsigned long do_swap(volatile unsigned long *ptr,
57 unsigned long val) 63 unsigned long val)
58{ 64{
59 __asm__ __volatile__("swapa [%1] %2, %0\n\t" : "=&r"(val) 65 __asm__ __volatile__("swapa [%2] %3, %0\n\t" : "=&r"(val)
60 : "r"(ptr), "i"(ASI_LEON_DCACHE_MISS) 66 : "0"(val), "r"(ptr), "i"(ASI_LEON_DCACHE_MISS)
61 : "memory"); 67 : "memory");
62 return val; 68 return val;
63} 69}
@@ -93,8 +99,6 @@ void __cpuinit leon_callin(void)
93 local_flush_cache_all(); 99 local_flush_cache_all();
94 local_flush_tlb_all(); 100 local_flush_tlb_all();
95 101
96 cpu_probe();
97
98 /* Fix idle thread fields. */ 102 /* Fix idle thread fields. */
99 __asm__ __volatile__("ld [%0], %%g6\n\t" : : "r"(&current_set[cpuid]) 103 __asm__ __volatile__("ld [%0], %%g6\n\t" : : "r"(&current_set[cpuid])
100 : "memory" /* paranoid */); 104 : "memory" /* paranoid */);
@@ -103,11 +107,11 @@ void __cpuinit leon_callin(void)
103 atomic_inc(&init_mm.mm_count); 107 atomic_inc(&init_mm.mm_count);
104 current->active_mm = &init_mm; 108 current->active_mm = &init_mm;
105 109
106 while (!cpu_isset(cpuid, smp_commenced_mask)) 110 while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
107 mb(); 111 mb();
108 112
109 local_irq_enable(); 113 local_irq_enable();
110 cpu_set(cpuid, cpu_online_map); 114 set_cpu_online(cpuid, true);
111} 115}
112 116
113/* 117/*
@@ -178,13 +182,16 @@ void __init leon_boot_cpus(void)
178 int nrcpu = leon_smp_nrcpus(); 182 int nrcpu = leon_smp_nrcpus();
179 int me = smp_processor_id(); 183 int me = smp_processor_id();
180 184
185 /* Setup IPI */
186 leon_ipi_init();
187
181 printk(KERN_INFO "%d:(%d:%d) cpus mpirq at 0x%x\n", (unsigned int)me, 188 printk(KERN_INFO "%d:(%d:%d) cpus mpirq at 0x%x\n", (unsigned int)me,
182 (unsigned int)nrcpu, (unsigned int)NR_CPUS, 189 (unsigned int)nrcpu, (unsigned int)NR_CPUS,
183 (unsigned int)&(leon3_irqctrl_regs->mpstatus)); 190 (unsigned int)&(leon3_irqctrl_regs->mpstatus));
184 191
185 leon_enable_irq_cpu(LEON3_IRQ_CROSS_CALL, me); 192 leon_enable_irq_cpu(LEON3_IRQ_CROSS_CALL, me);
186 leon_enable_irq_cpu(LEON3_IRQ_TICKER, me); 193 leon_enable_irq_cpu(LEON3_IRQ_TICKER, me);
187 leon_enable_irq_cpu(LEON3_IRQ_RESCHEDULE, me); 194 leon_enable_irq_cpu(leon_ipi_irq, me);
188 195
189 leon_smp_setbroadcast(1 << LEON3_IRQ_TICKER); 196 leon_smp_setbroadcast(1 << LEON3_IRQ_TICKER);
190 197
@@ -219,6 +226,10 @@ int __cpuinit leon_boot_one_cpu(int i)
219 (unsigned int)&leon3_irqctrl_regs->mpstatus); 226 (unsigned int)&leon3_irqctrl_regs->mpstatus);
220 local_flush_cache_all(); 227 local_flush_cache_all();
221 228
229 /* Make sure all IRQs are of from the start for this new CPU */
230 LEON_BYPASS_STORE_PA(&leon3_irqctrl_regs->mask[i], 0);
231
232 /* Wake one CPU */
222 LEON_BYPASS_STORE_PA(&(leon3_irqctrl_regs->mpstatus), 1 << i); 233 LEON_BYPASS_STORE_PA(&(leon3_irqctrl_regs->mpstatus), 1 << i);
223 234
224 /* wheee... it's going... */ 235 /* wheee... it's going... */
@@ -235,7 +246,7 @@ int __cpuinit leon_boot_one_cpu(int i)
235 } else { 246 } else {
236 leon_enable_irq_cpu(LEON3_IRQ_CROSS_CALL, i); 247 leon_enable_irq_cpu(LEON3_IRQ_CROSS_CALL, i);
237 leon_enable_irq_cpu(LEON3_IRQ_TICKER, i); 248 leon_enable_irq_cpu(LEON3_IRQ_TICKER, i);
238 leon_enable_irq_cpu(LEON3_IRQ_RESCHEDULE, i); 249 leon_enable_irq_cpu(leon_ipi_irq, i);
239 } 250 }
240 251
241 local_flush_cache_all(); 252 local_flush_cache_all();
@@ -261,24 +272,24 @@ void __init leon_smp_done(void)
261 local_flush_cache_all(); 272 local_flush_cache_all();
262 273
263 /* Free unneeded trap tables */ 274 /* Free unneeded trap tables */
264 if (!cpu_isset(1, cpu_present_map)) { 275 if (!cpu_present(1)) {
265 ClearPageReserved(virt_to_page(trapbase_cpu1)); 276 ClearPageReserved(virt_to_page(&trapbase_cpu1));
266 init_page_count(virt_to_page(trapbase_cpu1)); 277 init_page_count(virt_to_page(&trapbase_cpu1));
267 free_page((unsigned long)trapbase_cpu1); 278 free_page((unsigned long)&trapbase_cpu1);
268 totalram_pages++; 279 totalram_pages++;
269 num_physpages++; 280 num_physpages++;
270 } 281 }
271 if (!cpu_isset(2, cpu_present_map)) { 282 if (!cpu_present(2)) {
272 ClearPageReserved(virt_to_page(trapbase_cpu2)); 283 ClearPageReserved(virt_to_page(&trapbase_cpu2));
273 init_page_count(virt_to_page(trapbase_cpu2)); 284 init_page_count(virt_to_page(&trapbase_cpu2));
274 free_page((unsigned long)trapbase_cpu2); 285 free_page((unsigned long)&trapbase_cpu2);
275 totalram_pages++; 286 totalram_pages++;
276 num_physpages++; 287 num_physpages++;
277 } 288 }
278 if (!cpu_isset(3, cpu_present_map)) { 289 if (!cpu_present(3)) {
279 ClearPageReserved(virt_to_page(trapbase_cpu3)); 290 ClearPageReserved(virt_to_page(&trapbase_cpu3));
280 init_page_count(virt_to_page(trapbase_cpu3)); 291 init_page_count(virt_to_page(&trapbase_cpu3));
281 free_page((unsigned long)trapbase_cpu3); 292 free_page((unsigned long)&trapbase_cpu3);
282 totalram_pages++; 293 totalram_pages++;
283 num_physpages++; 294 num_physpages++;
284 } 295 }
@@ -291,6 +302,99 @@ void leon_irq_rotate(int cpu)
291{ 302{
292} 303}
293 304
305struct leon_ipi_work {
306 int single;
307 int msk;
308 int resched;
309};
310
311static DEFINE_PER_CPU_SHARED_ALIGNED(struct leon_ipi_work, leon_ipi_work);
312
313/* Initialize IPIs on the LEON, in order to save IRQ resources only one IRQ
314 * is used for all three types of IPIs.
315 */
316static void __init leon_ipi_init(void)
317{
318 int cpu, len;
319 struct leon_ipi_work *work;
320 struct property *pp;
321 struct device_node *rootnp;
322 struct tt_entry *trap_table;
323 unsigned long flags;
324
325 /* Find IPI IRQ or stick with default value */
326 rootnp = of_find_node_by_path("/ambapp0");
327 if (rootnp) {
328 pp = of_find_property(rootnp, "ipi_num", &len);
329 if (pp && (*(int *)pp->value))
330 leon_ipi_irq = *(int *)pp->value;
331 }
332 printk(KERN_INFO "leon: SMP IPIs at IRQ %d\n", leon_ipi_irq);
333
334 /* Adjust so that we jump directly to smpleon_ipi */
335 local_irq_save(flags);
336 trap_table = &sparc_ttable[SP_TRAP_IRQ1 + (leon_ipi_irq - 1)];
337 trap_table->inst_three += smpleon_ipi - real_irq_entry;
338 local_flush_cache_all();
339 local_irq_restore(flags);
340
341 for_each_possible_cpu(cpu) {
342 work = &per_cpu(leon_ipi_work, cpu);
343 work->single = work->msk = work->resched = 0;
344 }
345}
346
347static void leon_ipi_single(int cpu)
348{
349 struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu);
350
351 /* Mark work */
352 work->single = 1;
353
354 /* Generate IRQ on the CPU */
355 set_cpu_int(cpu, leon_ipi_irq);
356}
357
358static void leon_ipi_mask_one(int cpu)
359{
360 struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu);
361
362 /* Mark work */
363 work->msk = 1;
364
365 /* Generate IRQ on the CPU */
366 set_cpu_int(cpu, leon_ipi_irq);
367}
368
369static void leon_ipi_resched(int cpu)
370{
371 struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu);
372
373 /* Mark work */
374 work->resched = 1;
375
376 /* Generate IRQ on the CPU (any IRQ will cause resched) */
377 set_cpu_int(cpu, leon_ipi_irq);
378}
379
380void leonsmp_ipi_interrupt(void)
381{
382 struct leon_ipi_work *work = &__get_cpu_var(leon_ipi_work);
383
384 if (work->single) {
385 work->single = 0;
386 smp_call_function_single_interrupt();
387 }
388 if (work->msk) {
389 work->msk = 0;
390 smp_call_function_interrupt();
391 }
392 if (work->resched) {
393 work->resched = 0;
394 smp_resched_interrupt();
395 }
396}
397
294static struct smp_funcall { 398static struct smp_funcall {
295 smpfunc_t func; 399 smpfunc_t func;
296 unsigned long arg1; 400 unsigned long arg1;
@@ -336,10 +440,10 @@ static void leon_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
336 { 440 {
337 register int i; 441 register int i;
338 442
339 cpu_clear(smp_processor_id(), mask); 443 cpumask_clear_cpu(smp_processor_id(), &mask);
340 cpus_and(mask, cpu_online_map, mask); 444 cpumask_and(&mask, cpu_online_mask, &mask);
341 for (i = 0; i <= high; i++) { 445 for (i = 0; i <= high; i++) {
342 if (cpu_isset(i, mask)) { 446 if (cpumask_test_cpu(i, &mask)) {
343 ccall_info.processors_in[i] = 0; 447 ccall_info.processors_in[i] = 0;
344 ccall_info.processors_out[i] = 0; 448 ccall_info.processors_out[i] = 0;
345 set_cpu_int(i, LEON3_IRQ_CROSS_CALL); 449 set_cpu_int(i, LEON3_IRQ_CROSS_CALL);
@@ -353,7 +457,7 @@ static void leon_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
353 457
354 i = 0; 458 i = 0;
355 do { 459 do {
356 if (!cpu_isset(i, mask)) 460 if (!cpumask_test_cpu(i, &mask))
357 continue; 461 continue;
358 462
359 while (!ccall_info.processors_in[i]) 463 while (!ccall_info.processors_in[i])
@@ -362,7 +466,7 @@ static void leon_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
362 466
363 i = 0; 467 i = 0;
364 do { 468 do {
365 if (!cpu_isset(i, mask)) 469 if (!cpumask_test_cpu(i, &mask))
366 continue; 470 continue;
367 471
368 while (!ccall_info.processors_out[i]) 472 while (!ccall_info.processors_out[i])
@@ -385,27 +489,23 @@ void leon_cross_call_irq(void)
385 ccall_info.processors_out[i] = 1; 489 ccall_info.processors_out[i] = 1;
386} 490}
387 491
388void leon_percpu_timer_interrupt(struct pt_regs *regs) 492irqreturn_t leon_percpu_timer_interrupt(int irq, void *unused)
389{ 493{
390 struct pt_regs *old_regs;
391 int cpu = smp_processor_id(); 494 int cpu = smp_processor_id();
392 495
393 old_regs = set_irq_regs(regs);
394
395 leon_clear_profile_irq(cpu); 496 leon_clear_profile_irq(cpu);
396 497
397 profile_tick(CPU_PROFILING); 498 profile_tick(CPU_PROFILING);
398 499
399 if (!--prof_counter(cpu)) { 500 if (!--prof_counter(cpu)) {
400 int user = user_mode(regs); 501 int user = user_mode(get_irq_regs());
401 502
402 irq_enter();
403 update_process_times(user); 503 update_process_times(user);
404 irq_exit();
405 504
406 prof_counter(cpu) = prof_multiplier(cpu); 505 prof_counter(cpu) = prof_multiplier(cpu);
407 } 506 }
408 set_irq_regs(old_regs); 507
508 return IRQ_HANDLED;
409} 509}
410 510
411static void __init smp_setup_percpu_timer(void) 511static void __init smp_setup_percpu_timer(void)
@@ -438,15 +538,6 @@ void __init leon_blackbox_current(unsigned *addr)
438 538
439} 539}
440 540
441/*
442 * CPU idle callback function
443 * See .../arch/sparc/kernel/process.c
444 */
445void pmc_leon_idle(void)
446{
447 __asm__ volatile ("mov %g0, %asr19");
448}
449
450void __init leon_init_smp(void) 541void __init leon_init_smp(void)
451{ 542{
452 /* Patch ipi15 trap table */ 543 /* Patch ipi15 trap table */
@@ -457,13 +548,9 @@ void __init leon_init_smp(void)
457 BTFIXUPSET_CALL(smp_cross_call, leon_cross_call, BTFIXUPCALL_NORM); 548 BTFIXUPSET_CALL(smp_cross_call, leon_cross_call, BTFIXUPCALL_NORM);
458 BTFIXUPSET_CALL(__hard_smp_processor_id, __leon_processor_id, 549 BTFIXUPSET_CALL(__hard_smp_processor_id, __leon_processor_id,
459 BTFIXUPCALL_NORM); 550 BTFIXUPCALL_NORM);
460 551 BTFIXUPSET_CALL(smp_ipi_resched, leon_ipi_resched, BTFIXUPCALL_NORM);
461#ifndef PMC_NO_IDLE 552 BTFIXUPSET_CALL(smp_ipi_single, leon_ipi_single, BTFIXUPCALL_NORM);
462 /* Assign power management IDLE handler */ 553 BTFIXUPSET_CALL(smp_ipi_mask_one, leon_ipi_mask_one, BTFIXUPCALL_NORM);
463 pm_idle = pmc_leon_idle;
464 printk(KERN_INFO "leon: power management initialized\n");
465#endif
466
467} 554}
468 555
469#endif /* CONFIG_SPARC_LEON */ 556#endif /* CONFIG_SPARC_LEON */
diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c
index 83e85c2e802a..42f28c7420e1 100644
--- a/arch/sparc/kernel/mdesc.c
+++ b/arch/sparc/kernel/mdesc.c
@@ -107,7 +107,7 @@ static struct mdesc_handle * __init mdesc_memblock_alloc(unsigned int mdesc_size
107 return hp; 107 return hp;
108} 108}
109 109
110static void mdesc_memblock_free(struct mdesc_handle *hp) 110static void __init mdesc_memblock_free(struct mdesc_handle *hp)
111{ 111{
112 unsigned int alloc_size; 112 unsigned int alloc_size;
113 unsigned long start; 113 unsigned long start;
@@ -768,7 +768,7 @@ static void * __cpuinit mdesc_iterate_over_cpus(void *(*func)(struct mdesc_handl
768 cpuid, NR_CPUS); 768 cpuid, NR_CPUS);
769 continue; 769 continue;
770 } 770 }
771 if (!cpu_isset(cpuid, *mask)) 771 if (!cpumask_test_cpu(cpuid, mask))
772 continue; 772 continue;
773#endif 773#endif
774 774
@@ -890,6 +890,7 @@ static ssize_t mdesc_read(struct file *file, char __user *buf,
890static const struct file_operations mdesc_fops = { 890static const struct file_operations mdesc_fops = {
891 .read = mdesc_read, 891 .read = mdesc_read,
892 .owner = THIS_MODULE, 892 .owner = THIS_MODULE,
893 .llseek = noop_llseek,
893}; 894};
894 895
895static struct miscdevice mdesc_misc = { 896static struct miscdevice mdesc_misc = {
diff --git a/arch/sparc/kernel/module.c b/arch/sparc/kernel/module.c
index f848aadf54dc..99ba5baa9497 100644
--- a/arch/sparc/kernel/module.c
+++ b/arch/sparc/kernel/module.c
@@ -18,19 +18,16 @@
18#include <asm/spitfire.h> 18#include <asm/spitfire.h>
19 19
20#ifdef CONFIG_SPARC64 20#ifdef CONFIG_SPARC64
21static void *module_map(unsigned long size)
22{
23 struct vm_struct *area;
24 21
25 size = PAGE_ALIGN(size); 22#include <linux/jump_label.h>
26 if (!size || size > MODULES_LEN)
27 return NULL;
28 23
29 area = __get_vm_area(size, VM_ALLOC, MODULES_VADDR, MODULES_END); 24static void *module_map(unsigned long size)
30 if (!area) 25{
26 if (PAGE_ALIGN(size) > MODULES_LEN)
31 return NULL; 27 return NULL;
32 28 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
33 return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL); 29 GFP_KERNEL, PAGE_KERNEL, -1,
30 __builtin_return_address(0));
34} 31}
35 32
36static char *dot2underscore(char *name) 33static char *dot2underscore(char *name)
@@ -217,7 +214,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
217 me->name, 214 me->name,
218 (int) (ELF_R_TYPE(rel[i].r_info) & 0xff)); 215 (int) (ELF_R_TYPE(rel[i].r_info) & 0xff));
219 return -ENOEXEC; 216 return -ENOEXEC;
220 }; 217 }
221 } 218 }
222 return 0; 219 return 0;
223} 220}
@@ -227,6 +224,9 @@ int module_finalize(const Elf_Ehdr *hdr,
227 const Elf_Shdr *sechdrs, 224 const Elf_Shdr *sechdrs,
228 struct module *me) 225 struct module *me)
229{ 226{
227 /* make jump label nops */
228 jump_label_apply_nops(me);
229
230 /* Cheetah's I-cache is fully coherent. */ 230 /* Cheetah's I-cache is fully coherent. */
231 if (tlb_type == spitfire) { 231 if (tlb_type == spitfire) {
232 unsigned long va; 232 unsigned long va;
diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c
index a4bd7ba74c89..300f810142f5 100644
--- a/arch/sparc/kernel/nmi.c
+++ b/arch/sparc/kernel/nmi.c
@@ -270,8 +270,6 @@ int __init nmi_init(void)
270 atomic_set(&nmi_active, -1); 270 atomic_set(&nmi_active, -1);
271 } 271 }
272 } 272 }
273 if (!err)
274 init_hw_perf_events();
275 273
276 return err; 274 return err;
277} 275}
diff --git a/arch/sparc/kernel/of_device_32.c b/arch/sparc/kernel/of_device_32.c
index 2d055a1e9cc2..a312af40ea84 100644
--- a/arch/sparc/kernel/of_device_32.c
+++ b/arch/sparc/kernel/of_device_32.c
@@ -13,6 +13,7 @@
13#include <asm/leon_amba.h> 13#include <asm/leon_amba.h>
14 14
15#include "of_device_common.h" 15#include "of_device_common.h"
16#include "irq.h"
16 17
17/* 18/*
18 * PCI bus specific translator 19 * PCI bus specific translator
@@ -355,7 +356,8 @@ static struct platform_device * __init scan_one_device(struct device_node *dp,
355 if (intr) { 356 if (intr) {
356 op->archdata.num_irqs = len / sizeof(struct linux_prom_irqs); 357 op->archdata.num_irqs = len / sizeof(struct linux_prom_irqs);
357 for (i = 0; i < op->archdata.num_irqs; i++) 358 for (i = 0; i < op->archdata.num_irqs; i++)
358 op->archdata.irqs[i] = intr[i].pri; 359 op->archdata.irqs[i] =
360 sparc_irq_config.build_device_irq(op, intr[i].pri);
359 } else { 361 } else {
360 const unsigned int *irq = 362 const unsigned int *irq =
361 of_get_property(dp, "interrupts", &len); 363 of_get_property(dp, "interrupts", &len);
@@ -363,64 +365,13 @@ static struct platform_device * __init scan_one_device(struct device_node *dp,
363 if (irq) { 365 if (irq) {
364 op->archdata.num_irqs = len / sizeof(unsigned int); 366 op->archdata.num_irqs = len / sizeof(unsigned int);
365 for (i = 0; i < op->archdata.num_irqs; i++) 367 for (i = 0; i < op->archdata.num_irqs; i++)
366 op->archdata.irqs[i] = irq[i]; 368 op->archdata.irqs[i] =
369 sparc_irq_config.build_device_irq(op, irq[i]);
367 } else { 370 } else {
368 op->archdata.num_irqs = 0; 371 op->archdata.num_irqs = 0;
369 } 372 }
370 } 373 }
371 if (sparc_cpu_model == sun4d) {
372 static int pil_to_sbus[] = {
373 0, 0, 1, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0, 0,
374 };
375 struct device_node *io_unit, *sbi = dp->parent;
376 const struct linux_prom_registers *regs;
377 int board, slot;
378
379 while (sbi) {
380 if (!strcmp(sbi->name, "sbi"))
381 break;
382
383 sbi = sbi->parent;
384 }
385 if (!sbi)
386 goto build_resources;
387
388 regs = of_get_property(dp, "reg", NULL);
389 if (!regs)
390 goto build_resources;
391
392 slot = regs->which_io;
393
394 /* If SBI's parent is not io-unit or the io-unit lacks
395 * a "board#" property, something is very wrong.
396 */
397 if (!sbi->parent || strcmp(sbi->parent->name, "io-unit")) {
398 printk("%s: Error, parent is not io-unit.\n",
399 sbi->full_name);
400 goto build_resources;
401 }
402 io_unit = sbi->parent;
403 board = of_getintprop_default(io_unit, "board#", -1);
404 if (board == -1) {
405 printk("%s: Error, lacks board# property.\n",
406 io_unit->full_name);
407 goto build_resources;
408 }
409
410 for (i = 0; i < op->archdata.num_irqs; i++) {
411 int this_irq = op->archdata.irqs[i];
412 int sbusl = pil_to_sbus[this_irq];
413
414 if (sbusl)
415 this_irq = (((board + 1) << 5) +
416 (sbusl << 2) +
417 slot);
418
419 op->archdata.irqs[i] = this_irq;
420 }
421 }
422 374
423build_resources:
424 build_device_resources(op, parent); 375 build_device_resources(op, parent);
425 376
426 op->dev.parent = parent; 377 op->dev.parent = parent;
diff --git a/arch/sparc/kernel/of_device_64.c b/arch/sparc/kernel/of_device_64.c
index 63cd4e5d47c2..3bb2eace58cf 100644
--- a/arch/sparc/kernel/of_device_64.c
+++ b/arch/sparc/kernel/of_device_64.c
@@ -459,7 +459,7 @@ apply_interrupt_map(struct device_node *dp, struct device_node *pp,
459 * 459 *
460 * Handle this by deciding that, if we didn't get a 460 * Handle this by deciding that, if we didn't get a
461 * match in the parent's 'interrupt-map', and the 461 * match in the parent's 'interrupt-map', and the
462 * parent is an IRQ translater, then use the parent as 462 * parent is an IRQ translator, then use the parent as
463 * our IRQ controller. 463 * our IRQ controller.
464 */ 464 */
465 if (pp->irq_trans) 465 if (pp->irq_trans)
@@ -622,8 +622,9 @@ static unsigned int __init build_one_device_irq(struct platform_device *op,
622out: 622out:
623 nid = of_node_to_nid(dp); 623 nid = of_node_to_nid(dp);
624 if (nid != -1) { 624 if (nid != -1) {
625 cpumask_t numa_mask = *cpumask_of_node(nid); 625 cpumask_t numa_mask;
626 626
627 cpumask_copy(&numa_mask, cpumask_of_node(nid));
627 irq_set_affinity(irq, &numa_mask); 628 irq_set_affinity(irq, &numa_mask);
628 } 629 }
629 630
diff --git a/arch/sparc/kernel/of_device_common.c b/arch/sparc/kernel/of_device_common.c
index 49ddff56cb04..cb15bbf8a201 100644
--- a/arch/sparc/kernel/of_device_common.c
+++ b/arch/sparc/kernel/of_device_common.c
@@ -22,6 +22,33 @@ unsigned int irq_of_parse_and_map(struct device_node *node, int index)
22} 22}
23EXPORT_SYMBOL(irq_of_parse_and_map); 23EXPORT_SYMBOL(irq_of_parse_and_map);
24 24
25int of_address_to_resource(struct device_node *node, int index,
26 struct resource *r)
27{
28 struct platform_device *op = of_find_device_by_node(node);
29
30 if (!op || index >= op->num_resources)
31 return -EINVAL;
32
33 memcpy(r, &op->archdata.resource[index], sizeof(*r));
34 return 0;
35}
36EXPORT_SYMBOL_GPL(of_address_to_resource);
37
38void __iomem *of_iomap(struct device_node *node, int index)
39{
40 struct platform_device *op = of_find_device_by_node(node);
41 struct resource *r;
42
43 if (!op || index >= op->num_resources)
44 return NULL;
45
46 r = &op->archdata.resource[index];
47
48 return of_ioremap(r, 0, resource_size(r), (char *) r->name);
49}
50EXPORT_SYMBOL(of_iomap);
51
25/* Take the archdata values for IOMMU, STC, and HOSTDATA found in 52/* Take the archdata values for IOMMU, STC, and HOSTDATA found in
26 * BUS and propagate to all child platform_device objects. 53 * BUS and propagate to all child platform_device objects.
27 */ 54 */
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index 4137579d9adc..713dc91020a6 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -675,6 +675,7 @@ static void __devinit pci_bus_register_of_sysfs(struct pci_bus *bus)
675 * humanoid. 675 * humanoid.
676 */ 676 */
677 err = sysfs_create_file(&dev->dev.kobj, &dev_attr_obppath.attr); 677 err = sysfs_create_file(&dev->dev.kobj, &dev_attr_obppath.attr);
678 (void) err;
678 } 679 }
679 list_for_each_entry(child_bus, &bus->children, node) 680 list_for_each_entry(child_bus, &bus->children, node)
680 pci_bus_register_of_sysfs(child_bus); 681 pci_bus_register_of_sysfs(child_bus);
@@ -1001,22 +1002,22 @@ EXPORT_SYMBOL(pci_domain_nr);
1001int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) 1002int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
1002{ 1003{
1003 struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; 1004 struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
1004 unsigned int virt_irq; 1005 unsigned int irq;
1005 1006
1006 if (!pbm->setup_msi_irq) 1007 if (!pbm->setup_msi_irq)
1007 return -EINVAL; 1008 return -EINVAL;
1008 1009
1009 return pbm->setup_msi_irq(&virt_irq, pdev, desc); 1010 return pbm->setup_msi_irq(&irq, pdev, desc);
1010} 1011}
1011 1012
1012void arch_teardown_msi_irq(unsigned int virt_irq) 1013void arch_teardown_msi_irq(unsigned int irq)
1013{ 1014{
1014 struct msi_desc *entry = get_irq_msi(virt_irq); 1015 struct msi_desc *entry = irq_get_msi_desc(irq);
1015 struct pci_dev *pdev = entry->dev; 1016 struct pci_dev *pdev = entry->dev;
1016 struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; 1017 struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
1017 1018
1018 if (pbm->teardown_msi_irq) 1019 if (pbm->teardown_msi_irq)
1019 pbm->teardown_msi_irq(virt_irq, pdev); 1020 pbm->teardown_msi_irq(irq, pdev);
1020} 1021}
1021#endif /* !(CONFIG_PCI_MSI) */ 1022#endif /* !(CONFIG_PCI_MSI) */
1022 1023
diff --git a/arch/sparc/kernel/pci_common.c b/arch/sparc/kernel/pci_common.c
index 6c7a33af3ba6..a6895987fb70 100644
--- a/arch/sparc/kernel/pci_common.c
+++ b/arch/sparc/kernel/pci_common.c
@@ -281,7 +281,7 @@ static int sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
281 case 4: 281 case 4:
282 *value = ret & 0xffffffff; 282 *value = ret & 0xffffffff;
283 break; 283 break;
284 }; 284 }
285 285
286 286
287 return PCIBIOS_SUCCESSFUL; 287 return PCIBIOS_SUCCESSFUL;
@@ -295,14 +295,17 @@ static int sun4v_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
295 unsigned int bus = bus_dev->number; 295 unsigned int bus = bus_dev->number;
296 unsigned int device = PCI_SLOT(devfn); 296 unsigned int device = PCI_SLOT(devfn);
297 unsigned int func = PCI_FUNC(devfn); 297 unsigned int func = PCI_FUNC(devfn);
298 unsigned long ret;
299 298
300 if (config_out_of_range(pbm, bus, devfn, where)) { 299 if (config_out_of_range(pbm, bus, devfn, where)) {
301 /* Do nothing. */ 300 /* Do nothing. */
302 } else { 301 } else {
303 ret = pci_sun4v_config_put(devhandle, 302 /* We don't check for hypervisor errors here, but perhaps
304 HV_PCI_DEVICE_BUILD(bus, device, func), 303 * we should and influence our return value depending upon
305 where, size, value); 304 * what kind of error is thrown.
305 */
306 pci_sun4v_config_put(devhandle,
307 HV_PCI_DEVICE_BUILD(bus, device, func),
308 where, size, value);
306 } 309 }
307 return PCIBIOS_SUCCESSFUL; 310 return PCIBIOS_SUCCESSFUL;
308} 311}
@@ -453,7 +456,7 @@ void pci_determine_mem_io_space(struct pci_pbm_info *pbm)
453 456
454 default: 457 default:
455 break; 458 break;
456 }; 459 }
457 } 460 }
458 461
459 if (!saw_io || !saw_mem) { 462 if (!saw_io || !saw_mem) {
diff --git a/arch/sparc/kernel/pci_fire.c b/arch/sparc/kernel/pci_fire.c
index efb896d68754..d29a32fcc5e4 100644
--- a/arch/sparc/kernel/pci_fire.c
+++ b/arch/sparc/kernel/pci_fire.c
@@ -214,11 +214,9 @@ static int pci_fire_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
214 214
215static int pci_fire_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi) 215static int pci_fire_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
216{ 216{
217 unsigned long msiqid;
218 u64 val; 217 u64 val;
219 218
220 val = upa_readq(pbm->pbm_regs + MSI_MAP(msi)); 219 val = upa_readq(pbm->pbm_regs + MSI_MAP(msi));
221 msiqid = (val & MSI_MAP_EQNUM);
222 220
223 val &= ~MSI_MAP_VALID; 221 val &= ~MSI_MAP_VALID;
224 222
@@ -277,7 +275,7 @@ static int pci_fire_msiq_build_irq(struct pci_pbm_info *pbm,
277{ 275{
278 unsigned long cregs = (unsigned long) pbm->pbm_regs; 276 unsigned long cregs = (unsigned long) pbm->pbm_regs;
279 unsigned long imap_reg, iclr_reg, int_ctrlr; 277 unsigned long imap_reg, iclr_reg, int_ctrlr;
280 unsigned int virt_irq; 278 unsigned int irq;
281 int fixup; 279 int fixup;
282 u64 val; 280 u64 val;
283 281
@@ -293,14 +291,14 @@ static int pci_fire_msiq_build_irq(struct pci_pbm_info *pbm,
293 291
294 fixup = ((pbm->portid << 6) | devino) - int_ctrlr; 292 fixup = ((pbm->portid << 6) | devino) - int_ctrlr;
295 293
296 virt_irq = build_irq(fixup, iclr_reg, imap_reg); 294 irq = build_irq(fixup, iclr_reg, imap_reg);
297 if (!virt_irq) 295 if (!irq)
298 return -ENOMEM; 296 return -ENOMEM;
299 297
300 upa_writeq(EVENT_QUEUE_CONTROL_SET_EN, 298 upa_writeq(EVENT_QUEUE_CONTROL_SET_EN,
301 pbm->pbm_regs + EVENT_QUEUE_CONTROL_SET(msiqid)); 299 pbm->pbm_regs + EVENT_QUEUE_CONTROL_SET(msiqid));
302 300
303 return virt_irq; 301 return irq;
304} 302}
305 303
306static const struct sparc64_msiq_ops pci_fire_msiq_ops = { 304static const struct sparc64_msiq_ops pci_fire_msiq_ops = {
@@ -455,8 +453,7 @@ static int __devinit pci_fire_pbm_init(struct pci_pbm_info *pbm,
455 return 0; 453 return 0;
456} 454}
457 455
458static int __devinit fire_probe(struct platform_device *op, 456static int __devinit fire_probe(struct platform_device *op)
459 const struct of_device_id *match)
460{ 457{
461 struct device_node *dp = op->dev.of_node; 458 struct device_node *dp = op->dev.of_node;
462 struct pci_pbm_info *pbm; 459 struct pci_pbm_info *pbm;
@@ -499,7 +496,7 @@ out_err:
499 return err; 496 return err;
500} 497}
501 498
502static struct of_device_id __initdata fire_match[] = { 499static const struct of_device_id fire_match[] = {
503 { 500 {
504 .name = "pci", 501 .name = "pci",
505 .compatible = "pciex108e,80f0", 502 .compatible = "pciex108e,80f0",
@@ -507,7 +504,7 @@ static struct of_device_id __initdata fire_match[] = {
507 {}, 504 {},
508}; 505};
509 506
510static struct of_platform_driver fire_driver = { 507static struct platform_driver fire_driver = {
511 .driver = { 508 .driver = {
512 .name = DRIVER_NAME, 509 .name = DRIVER_NAME,
513 .owner = THIS_MODULE, 510 .owner = THIS_MODULE,
@@ -518,7 +515,7 @@ static struct of_platform_driver fire_driver = {
518 515
519static int __init fire_init(void) 516static int __init fire_init(void)
520{ 517{
521 return of_register_platform_driver(&fire_driver); 518 return platform_driver_register(&fire_driver);
522} 519}
523 520
524subsys_initcall(fire_init); 521subsys_initcall(fire_init);
diff --git a/arch/sparc/kernel/pci_impl.h b/arch/sparc/kernel/pci_impl.h
index e20ed5f06e9c..6beb60df31d0 100644
--- a/arch/sparc/kernel/pci_impl.h
+++ b/arch/sparc/kernel/pci_impl.h
@@ -131,9 +131,9 @@ struct pci_pbm_info {
131 void *msi_queues; 131 void *msi_queues;
132 unsigned long *msi_bitmap; 132 unsigned long *msi_bitmap;
133 unsigned int *msi_irq_table; 133 unsigned int *msi_irq_table;
134 int (*setup_msi_irq)(unsigned int *virt_irq_p, struct pci_dev *pdev, 134 int (*setup_msi_irq)(unsigned int *irq_p, struct pci_dev *pdev,
135 struct msi_desc *entry); 135 struct msi_desc *entry);
136 void (*teardown_msi_irq)(unsigned int virt_irq, struct pci_dev *pdev); 136 void (*teardown_msi_irq)(unsigned int irq, struct pci_dev *pdev);
137 const struct sparc64_msiq_ops *msi_ops; 137 const struct sparc64_msiq_ops *msi_ops;
138#endif /* !(CONFIG_PCI_MSI) */ 138#endif /* !(CONFIG_PCI_MSI) */
139 139
diff --git a/arch/sparc/kernel/pci_msi.c b/arch/sparc/kernel/pci_msi.c
index 548b8ca9c210..580651af73f2 100644
--- a/arch/sparc/kernel/pci_msi.c
+++ b/arch/sparc/kernel/pci_msi.c
@@ -30,13 +30,10 @@ static irqreturn_t sparc64_msiq_interrupt(int irq, void *cookie)
30 30
31 err = ops->dequeue_msi(pbm, msiqid, &head, &msi); 31 err = ops->dequeue_msi(pbm, msiqid, &head, &msi);
32 if (likely(err > 0)) { 32 if (likely(err > 0)) {
33 struct irq_desc *desc; 33 unsigned int irq;
34 unsigned int virt_irq;
35 34
36 virt_irq = pbm->msi_irq_table[msi - pbm->msi_first]; 35 irq = pbm->msi_irq_table[msi - pbm->msi_first];
37 desc = irq_desc + virt_irq; 36 generic_handle_irq(irq);
38
39 desc->handle_irq(virt_irq, desc);
40 } 37 }
41 38
42 if (unlikely(err < 0)) 39 if (unlikely(err < 0))
@@ -114,14 +111,14 @@ static void free_msi(struct pci_pbm_info *pbm, int msi_num)
114 111
115static struct irq_chip msi_irq = { 112static struct irq_chip msi_irq = {
116 .name = "PCI-MSI", 113 .name = "PCI-MSI",
117 .mask = mask_msi_irq, 114 .irq_mask = mask_msi_irq,
118 .unmask = unmask_msi_irq, 115 .irq_unmask = unmask_msi_irq,
119 .enable = unmask_msi_irq, 116 .irq_enable = unmask_msi_irq,
120 .disable = mask_msi_irq, 117 .irq_disable = mask_msi_irq,
121 /* XXX affinity XXX */ 118 /* XXX affinity XXX */
122}; 119};
123 120
124static int sparc64_setup_msi_irq(unsigned int *virt_irq_p, 121static int sparc64_setup_msi_irq(unsigned int *irq_p,
125 struct pci_dev *pdev, 122 struct pci_dev *pdev,
126 struct msi_desc *entry) 123 struct msi_desc *entry)
127{ 124{
@@ -131,17 +128,17 @@ static int sparc64_setup_msi_irq(unsigned int *virt_irq_p,
131 int msi, err; 128 int msi, err;
132 u32 msiqid; 129 u32 msiqid;
133 130
134 *virt_irq_p = virt_irq_alloc(0, 0); 131 *irq_p = irq_alloc(0, 0);
135 err = -ENOMEM; 132 err = -ENOMEM;
136 if (!*virt_irq_p) 133 if (!*irq_p)
137 goto out_err; 134 goto out_err;
138 135
139 set_irq_chip_and_handler_name(*virt_irq_p, &msi_irq, 136 irq_set_chip_and_handler_name(*irq_p, &msi_irq, handle_simple_irq,
140 handle_simple_irq, "MSI"); 137 "MSI");
141 138
142 err = alloc_msi(pbm); 139 err = alloc_msi(pbm);
143 if (unlikely(err < 0)) 140 if (unlikely(err < 0))
144 goto out_virt_irq_free; 141 goto out_irq_free;
145 142
146 msi = err; 143 msi = err;
147 144
@@ -152,7 +149,7 @@ static int sparc64_setup_msi_irq(unsigned int *virt_irq_p,
152 if (err) 149 if (err)
153 goto out_msi_free; 150 goto out_msi_free;
154 151
155 pbm->msi_irq_table[msi - pbm->msi_first] = *virt_irq_p; 152 pbm->msi_irq_table[msi - pbm->msi_first] = *irq_p;
156 153
157 if (entry->msi_attrib.is_64) { 154 if (entry->msi_attrib.is_64) {
158 msg.address_hi = pbm->msi64_start >> 32; 155 msg.address_hi = pbm->msi64_start >> 32;
@@ -163,24 +160,24 @@ static int sparc64_setup_msi_irq(unsigned int *virt_irq_p,
163 } 160 }
164 msg.data = msi; 161 msg.data = msi;
165 162
166 set_irq_msi(*virt_irq_p, entry); 163 irq_set_msi_desc(*irq_p, entry);
167 write_msi_msg(*virt_irq_p, &msg); 164 write_msi_msg(*irq_p, &msg);
168 165
169 return 0; 166 return 0;
170 167
171out_msi_free: 168out_msi_free:
172 free_msi(pbm, msi); 169 free_msi(pbm, msi);
173 170
174out_virt_irq_free: 171out_irq_free:
175 set_irq_chip(*virt_irq_p, NULL); 172 irq_set_chip(*irq_p, NULL);
176 virt_irq_free(*virt_irq_p); 173 irq_free(*irq_p);
177 *virt_irq_p = 0; 174 *irq_p = 0;
178 175
179out_err: 176out_err:
180 return err; 177 return err;
181} 178}
182 179
183static void sparc64_teardown_msi_irq(unsigned int virt_irq, 180static void sparc64_teardown_msi_irq(unsigned int irq,
184 struct pci_dev *pdev) 181 struct pci_dev *pdev)
185{ 182{
186 struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; 183 struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
@@ -189,12 +186,12 @@ static void sparc64_teardown_msi_irq(unsigned int virt_irq,
189 int i, err; 186 int i, err;
190 187
191 for (i = 0; i < pbm->msi_num; i++) { 188 for (i = 0; i < pbm->msi_num; i++) {
192 if (pbm->msi_irq_table[i] == virt_irq) 189 if (pbm->msi_irq_table[i] == irq)
193 break; 190 break;
194 } 191 }
195 if (i >= pbm->msi_num) { 192 if (i >= pbm->msi_num) {
196 printk(KERN_ERR "%s: teardown: No MSI for irq %u\n", 193 printk(KERN_ERR "%s: teardown: No MSI for irq %u\n",
197 pbm->name, virt_irq); 194 pbm->name, irq);
198 return; 195 return;
199 } 196 }
200 197
@@ -205,14 +202,14 @@ static void sparc64_teardown_msi_irq(unsigned int virt_irq,
205 if (err) { 202 if (err) {
206 printk(KERN_ERR "%s: teardown: ops->teardown() on MSI %u, " 203 printk(KERN_ERR "%s: teardown: ops->teardown() on MSI %u, "
207 "irq %u, gives error %d\n", 204 "irq %u, gives error %d\n",
208 pbm->name, msi_num, virt_irq, err); 205 pbm->name, msi_num, irq, err);
209 return; 206 return;
210 } 207 }
211 208
212 free_msi(pbm, msi_num); 209 free_msi(pbm, msi_num);
213 210
214 set_irq_chip(virt_irq, NULL); 211 irq_set_chip(irq, NULL);
215 virt_irq_free(virt_irq); 212 irq_free(irq);
216} 213}
217 214
218static int msi_bitmap_alloc(struct pci_pbm_info *pbm) 215static int msi_bitmap_alloc(struct pci_pbm_info *pbm)
@@ -287,8 +284,9 @@ static int bringup_one_msi_queue(struct pci_pbm_info *pbm,
287 284
288 nid = pbm->numa_node; 285 nid = pbm->numa_node;
289 if (nid != -1) { 286 if (nid != -1) {
290 cpumask_t numa_mask = *cpumask_of_node(nid); 287 cpumask_t numa_mask;
291 288
289 cpumask_copy(&numa_mask, cpumask_of_node(nid));
292 irq_set_affinity(irq, &numa_mask); 290 irq_set_affinity(irq, &numa_mask);
293 } 291 }
294 err = request_irq(irq, sparc64_msiq_interrupt, 0, 292 err = request_irq(irq, sparc64_msiq_interrupt, 0,
diff --git a/arch/sparc/kernel/pci_psycho.c b/arch/sparc/kernel/pci_psycho.c
index 22eab7cf3b11..86ae08d9b6ee 100644
--- a/arch/sparc/kernel/pci_psycho.c
+++ b/arch/sparc/kernel/pci_psycho.c
@@ -503,8 +503,7 @@ static struct pci_pbm_info * __devinit psycho_find_sibling(u32 upa_portid)
503 503
504#define PSYCHO_CONFIGSPACE 0x001000000UL 504#define PSYCHO_CONFIGSPACE 0x001000000UL
505 505
506static int __devinit psycho_probe(struct platform_device *op, 506static int __devinit psycho_probe(struct platform_device *op)
507 const struct of_device_id *match)
508{ 507{
509 const struct linux_prom64_registers *pr_regs; 508 const struct linux_prom64_registers *pr_regs;
510 struct device_node *dp = op->dev.of_node; 509 struct device_node *dp = op->dev.of_node;
@@ -593,7 +592,7 @@ out_err:
593 return err; 592 return err;
594} 593}
595 594
596static struct of_device_id __initdata psycho_match[] = { 595static const struct of_device_id psycho_match[] = {
597 { 596 {
598 .name = "pci", 597 .name = "pci",
599 .compatible = "pci108e,8000", 598 .compatible = "pci108e,8000",
@@ -601,7 +600,7 @@ static struct of_device_id __initdata psycho_match[] = {
601 {}, 600 {},
602}; 601};
603 602
604static struct of_platform_driver psycho_driver = { 603static struct platform_driver psycho_driver = {
605 .driver = { 604 .driver = {
606 .name = DRIVER_NAME, 605 .name = DRIVER_NAME,
607 .owner = THIS_MODULE, 606 .owner = THIS_MODULE,
@@ -612,7 +611,7 @@ static struct of_platform_driver psycho_driver = {
612 611
613static int __init psycho_init(void) 612static int __init psycho_init(void)
614{ 613{
615 return of_register_platform_driver(&psycho_driver); 614 return platform_driver_register(&psycho_driver);
616} 615}
617 616
618subsys_initcall(psycho_init); 617subsys_initcall(psycho_init);
diff --git a/arch/sparc/kernel/pci_sabre.c b/arch/sparc/kernel/pci_sabre.c
index 5c3f5ec4cabc..d1840dbdaa2f 100644
--- a/arch/sparc/kernel/pci_sabre.c
+++ b/arch/sparc/kernel/pci_sabre.c
@@ -452,9 +452,10 @@ static void __devinit sabre_pbm_init(struct pci_pbm_info *pbm,
452 sabre_scan_bus(pbm, &op->dev); 452 sabre_scan_bus(pbm, &op->dev);
453} 453}
454 454
455static int __devinit sabre_probe(struct platform_device *op, 455static const struct of_device_id sabre_match[];
456 const struct of_device_id *match) 456static int __devinit sabre_probe(struct platform_device *op)
457{ 457{
458 const struct of_device_id *match;
458 const struct linux_prom64_registers *pr_regs; 459 const struct linux_prom64_registers *pr_regs;
459 struct device_node *dp = op->dev.of_node; 460 struct device_node *dp = op->dev.of_node;
460 struct pci_pbm_info *pbm; 461 struct pci_pbm_info *pbm;
@@ -464,7 +465,8 @@ static int __devinit sabre_probe(struct platform_device *op,
464 const u32 *vdma; 465 const u32 *vdma;
465 u64 clear_irq; 466 u64 clear_irq;
466 467
467 hummingbird_p = (match->data != NULL); 468 match = of_match_device(sabre_match, &op->dev);
469 hummingbird_p = match && (match->data != NULL);
468 if (!hummingbird_p) { 470 if (!hummingbird_p) {
469 struct device_node *cpu_dp; 471 struct device_node *cpu_dp;
470 472
@@ -582,7 +584,7 @@ out_err:
582 return err; 584 return err;
583} 585}
584 586
585static struct of_device_id __initdata sabre_match[] = { 587static const struct of_device_id sabre_match[] = {
586 { 588 {
587 .name = "pci", 589 .name = "pci",
588 .compatible = "pci108e,a001", 590 .compatible = "pci108e,a001",
@@ -595,7 +597,7 @@ static struct of_device_id __initdata sabre_match[] = {
595 {}, 597 {},
596}; 598};
597 599
598static struct of_platform_driver sabre_driver = { 600static struct platform_driver sabre_driver = {
599 .driver = { 601 .driver = {
600 .name = DRIVER_NAME, 602 .name = DRIVER_NAME,
601 .owner = THIS_MODULE, 603 .owner = THIS_MODULE,
@@ -606,7 +608,7 @@ static struct of_platform_driver sabre_driver = {
606 608
607static int __init sabre_init(void) 609static int __init sabre_init(void)
608{ 610{
609 return of_register_platform_driver(&sabre_driver); 611 return platform_driver_register(&sabre_driver);
610} 612}
611 613
612subsys_initcall(sabre_init); 614subsys_initcall(sabre_init);
diff --git a/arch/sparc/kernel/pci_schizo.c b/arch/sparc/kernel/pci_schizo.c
index 445a47a2fb3d..f030b02edddd 100644
--- a/arch/sparc/kernel/pci_schizo.c
+++ b/arch/sparc/kernel/pci_schizo.c
@@ -264,7 +264,7 @@ static void schizo_check_iommu_error_pbm(struct pci_pbm_info *pbm,
264 default: 264 default:
265 type_string = "ECC Error"; 265 type_string = "ECC Error";
266 break; 266 break;
267 }; 267 }
268 printk("%s: IOMMU Error, type[%s]\n", 268 printk("%s: IOMMU Error, type[%s]\n",
269 pbm->name, type_string); 269 pbm->name, type_string);
270 270
@@ -319,7 +319,7 @@ static void schizo_check_iommu_error_pbm(struct pci_pbm_info *pbm,
319 default: 319 default:
320 type_string = "ECC Error"; 320 type_string = "ECC Error";
321 break; 321 break;
322 }; 322 }
323 printk("%s: IOMMU TAG(%d)[error(%s) ctx(%x) wr(%d) str(%d) " 323 printk("%s: IOMMU TAG(%d)[error(%s) ctx(%x) wr(%d) str(%d) "
324 "sz(%dK) vpg(%08lx)]\n", 324 "sz(%dK) vpg(%08lx)]\n",
325 pbm->name, i, type_string, 325 pbm->name, i, type_string,
@@ -1313,7 +1313,7 @@ static int __devinit schizo_pbm_init(struct pci_pbm_info *pbm,
1313 const struct linux_prom64_registers *regs; 1313 const struct linux_prom64_registers *regs;
1314 struct device_node *dp = op->dev.of_node; 1314 struct device_node *dp = op->dev.of_node;
1315 const char *chipset_name; 1315 const char *chipset_name;
1316 int is_pbm_a, err; 1316 int err;
1317 1317
1318 switch (chip_type) { 1318 switch (chip_type) {
1319 case PBM_CHIP_TYPE_TOMATILLO: 1319 case PBM_CHIP_TYPE_TOMATILLO:
@@ -1328,7 +1328,7 @@ static int __devinit schizo_pbm_init(struct pci_pbm_info *pbm,
1328 default: 1328 default:
1329 chipset_name = "SCHIZO"; 1329 chipset_name = "SCHIZO";
1330 break; 1330 break;
1331 }; 1331 }
1332 1332
1333 /* For SCHIZO, three OBP regs: 1333 /* For SCHIZO, three OBP regs:
1334 * 1) PBM controller regs 1334 * 1) PBM controller regs
@@ -1343,8 +1343,6 @@ static int __devinit schizo_pbm_init(struct pci_pbm_info *pbm,
1343 */ 1343 */
1344 regs = of_get_property(dp, "reg", NULL); 1344 regs = of_get_property(dp, "reg", NULL);
1345 1345
1346 is_pbm_a = ((regs[0].phys_addr & 0x00700000) == 0x00600000);
1347
1348 pbm->next = pci_pbm_root; 1346 pbm->next = pci_pbm_root;
1349 pci_pbm_root = pbm; 1347 pci_pbm_root = pbm;
1350 1348
@@ -1460,10 +1458,15 @@ out_err:
1460 return err; 1458 return err;
1461} 1459}
1462 1460
1463static int __devinit schizo_probe(struct platform_device *op, 1461static const struct of_device_id schizo_match[];
1464 const struct of_device_id *match) 1462static int __devinit schizo_probe(struct platform_device *op)
1465{ 1463{
1466 return __schizo_init(op, (unsigned long) match->data); 1464 const struct of_device_id *match;
1465
1466 match = of_match_device(schizo_match, &op->dev);
1467 if (!match)
1468 return -EINVAL;
1469 return __schizo_init(op, (unsigned long)match->data);
1467} 1470}
1468 1471
1469/* The ordering of this table is very important. Some Tomatillo 1472/* The ordering of this table is very important. Some Tomatillo
@@ -1471,7 +1474,7 @@ static int __devinit schizo_probe(struct platform_device *op,
1471 * and pci108e,8001. So list the chips in reverse chronological 1474 * and pci108e,8001. So list the chips in reverse chronological
1472 * order. 1475 * order.
1473 */ 1476 */
1474static struct of_device_id __initdata schizo_match[] = { 1477static const struct of_device_id schizo_match[] = {
1475 { 1478 {
1476 .name = "pci", 1479 .name = "pci",
1477 .compatible = "pci108e,a801", 1480 .compatible = "pci108e,a801",
@@ -1490,7 +1493,7 @@ static struct of_device_id __initdata schizo_match[] = {
1490 {}, 1493 {},
1491}; 1494};
1492 1495
1493static struct of_platform_driver schizo_driver = { 1496static struct platform_driver schizo_driver = {
1494 .driver = { 1497 .driver = {
1495 .name = DRIVER_NAME, 1498 .name = DRIVER_NAME,
1496 .owner = THIS_MODULE, 1499 .owner = THIS_MODULE,
@@ -1501,7 +1504,7 @@ static struct of_platform_driver schizo_driver = {
1501 1504
1502static int __init schizo_init(void) 1505static int __init schizo_init(void)
1503{ 1506{
1504 return of_register_platform_driver(&schizo_driver); 1507 return platform_driver_register(&schizo_driver);
1505} 1508}
1506 1509
1507subsys_initcall(schizo_init); 1510subsys_initcall(schizo_init);
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
index 743344aa6d8a..b01a06e9ae4e 100644
--- a/arch/sparc/kernel/pci_sun4v.c
+++ b/arch/sparc/kernel/pci_sun4v.c
@@ -580,7 +580,7 @@ static int __devinit pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
580{ 580{
581 static const u32 vdma_default[] = { 0x80000000, 0x80000000 }; 581 static const u32 vdma_default[] = { 0x80000000, 0x80000000 };
582 struct iommu *iommu = pbm->iommu; 582 struct iommu *iommu = pbm->iommu;
583 unsigned long num_tsb_entries, sz, tsbsize; 583 unsigned long num_tsb_entries, sz;
584 u32 dma_mask, dma_offset; 584 u32 dma_mask, dma_offset;
585 const u32 *vdma; 585 const u32 *vdma;
586 586
@@ -596,7 +596,6 @@ static int __devinit pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
596 596
597 dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL); 597 dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL);
598 num_tsb_entries = vdma[1] / IO_PAGE_SIZE; 598 num_tsb_entries = vdma[1] / IO_PAGE_SIZE;
599 tsbsize = num_tsb_entries * sizeof(iopte_t);
600 599
601 dma_offset = vdma[0]; 600 dma_offset = vdma[0];
602 601
@@ -844,9 +843,9 @@ static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
844 unsigned long msiqid, 843 unsigned long msiqid,
845 unsigned long devino) 844 unsigned long devino)
846{ 845{
847 unsigned int virt_irq = sun4v_build_irq(pbm->devhandle, devino); 846 unsigned int irq = sun4v_build_irq(pbm->devhandle, devino);
848 847
849 if (!virt_irq) 848 if (!irq)
850 return -ENOMEM; 849 return -ENOMEM;
851 850
852 if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE)) 851 if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
@@ -854,7 +853,7 @@ static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
854 if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID)) 853 if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
855 return -EINVAL; 854 return -EINVAL;
856 855
857 return virt_irq; 856 return irq;
858} 857}
859 858
860static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = { 859static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = {
@@ -918,8 +917,7 @@ static int __devinit pci_sun4v_pbm_init(struct pci_pbm_info *pbm,
918 return 0; 917 return 0;
919} 918}
920 919
921static int __devinit pci_sun4v_probe(struct platform_device *op, 920static int __devinit pci_sun4v_probe(struct platform_device *op)
922 const struct of_device_id *match)
923{ 921{
924 const struct linux_prom64_registers *regs; 922 const struct linux_prom64_registers *regs;
925 static int hvapi_negotiated = 0; 923 static int hvapi_negotiated = 0;
@@ -1000,7 +998,7 @@ out_err:
1000 return err; 998 return err;
1001} 999}
1002 1000
1003static struct of_device_id __initdata pci_sun4v_match[] = { 1001static const struct of_device_id pci_sun4v_match[] = {
1004 { 1002 {
1005 .name = "pci", 1003 .name = "pci",
1006 .compatible = "SUNW,sun4v-pci", 1004 .compatible = "SUNW,sun4v-pci",
@@ -1008,7 +1006,7 @@ static struct of_device_id __initdata pci_sun4v_match[] = {
1008 {}, 1006 {},
1009}; 1007};
1010 1008
1011static struct of_platform_driver pci_sun4v_driver = { 1009static struct platform_driver pci_sun4v_driver = {
1012 .driver = { 1010 .driver = {
1013 .name = DRIVER_NAME, 1011 .name = DRIVER_NAME,
1014 .owner = THIS_MODULE, 1012 .owner = THIS_MODULE,
@@ -1019,7 +1017,7 @@ static struct of_platform_driver pci_sun4v_driver = {
1019 1017
1020static int __init pci_sun4v_init(void) 1018static int __init pci_sun4v_init(void)
1021{ 1019{
1022 return of_register_platform_driver(&pci_sun4v_driver); 1020 return platform_driver_register(&pci_sun4v_driver);
1023} 1021}
1024 1022
1025subsys_initcall(pci_sun4v_init); 1023subsys_initcall(pci_sun4v_init);
diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c
index d36a8d391ca0..948601a066ff 100644
--- a/arch/sparc/kernel/pcic.c
+++ b/arch/sparc/kernel/pcic.c
@@ -164,6 +164,9 @@ void __iomem *pcic_regs;
164volatile int pcic_speculative; 164volatile int pcic_speculative;
165volatile int pcic_trapped; 165volatile int pcic_trapped;
166 166
167/* forward */
168unsigned int pcic_build_device_irq(struct platform_device *op,
169 unsigned int real_irq);
167 170
168#define CONFIG_CMD(bus, device_fn, where) (0x80000000 | (((unsigned int)bus) << 16) | (((unsigned int)device_fn) << 8) | (where & ~3)) 171#define CONFIG_CMD(bus, device_fn, where) (0x80000000 | (((unsigned int)bus) << 16) | (((unsigned int)device_fn) << 8) | (where & ~3))
169 172
@@ -284,7 +287,7 @@ int __init pcic_probe(void)
284 struct linux_prom_registers regs[PROMREG_MAX]; 287 struct linux_prom_registers regs[PROMREG_MAX];
285 struct linux_pbm_info* pbm; 288 struct linux_pbm_info* pbm;
286 char namebuf[64]; 289 char namebuf[64];
287 int node; 290 phandle node;
288 int err; 291 int err;
289 292
290 if (pcic0_up) { 293 if (pcic0_up) {
@@ -440,7 +443,7 @@ static int __devinit pdev_to_pnode(struct linux_pbm_info *pbm,
440{ 443{
441 struct linux_prom_pci_registers regs[PROMREG_MAX]; 444 struct linux_prom_pci_registers regs[PROMREG_MAX];
442 int err; 445 int err;
443 int node = prom_getchild(pbm->prom_node); 446 phandle node = prom_getchild(pbm->prom_node);
444 447
445 while(node) { 448 while(node) {
446 err = prom_getproperty(node, "reg", 449 err = prom_getproperty(node, "reg",
@@ -523,6 +526,7 @@ static void
523pcic_fill_irq(struct linux_pcic *pcic, struct pci_dev *dev, int node) 526pcic_fill_irq(struct linux_pcic *pcic, struct pci_dev *dev, int node)
524{ 527{
525 struct pcic_ca2irq *p; 528 struct pcic_ca2irq *p;
529 unsigned int real_irq;
526 int i, ivec; 530 int i, ivec;
527 char namebuf[64]; 531 char namebuf[64];
528 532
@@ -551,26 +555,25 @@ pcic_fill_irq(struct linux_pcic *pcic, struct pci_dev *dev, int node)
551 i = p->pin; 555 i = p->pin;
552 if (i >= 0 && i < 4) { 556 if (i >= 0 && i < 4) {
553 ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_LO); 557 ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_LO);
554 dev->irq = ivec >> (i << 2) & 0xF; 558 real_irq = ivec >> (i << 2) & 0xF;
555 } else if (i >= 4 && i < 8) { 559 } else if (i >= 4 && i < 8) {
556 ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_HI); 560 ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_HI);
557 dev->irq = ivec >> ((i-4) << 2) & 0xF; 561 real_irq = ivec >> ((i-4) << 2) & 0xF;
558 } else { /* Corrupted map */ 562 } else { /* Corrupted map */
559 printk("PCIC: BAD PIN %d\n", i); for (;;) {} 563 printk("PCIC: BAD PIN %d\n", i); for (;;) {}
560 } 564 }
561/* P3 */ /* printk("PCIC: device %s pin %d ivec 0x%x irq %x\n", namebuf, i, ivec, dev->irq); */ 565/* P3 */ /* printk("PCIC: device %s pin %d ivec 0x%x irq %x\n", namebuf, i, ivec, dev->irq); */
562 566
563 /* 567 /* real_irq means PROM did not bother to program the upper
564 * dev->irq=0 means PROM did not bother to program the upper
565 * half of PCIC. This happens on JS-E with PROM 3.11, for instance. 568 * half of PCIC. This happens on JS-E with PROM 3.11, for instance.
566 */ 569 */
567 if (dev->irq == 0 || p->force) { 570 if (real_irq == 0 || p->force) {
568 if (p->irq == 0 || p->irq >= 15) { /* Corrupted map */ 571 if (p->irq == 0 || p->irq >= 15) { /* Corrupted map */
569 printk("PCIC: BAD IRQ %d\n", p->irq); for (;;) {} 572 printk("PCIC: BAD IRQ %d\n", p->irq); for (;;) {}
570 } 573 }
571 printk("PCIC: setting irq %d at pin %d for device %02x:%02x\n", 574 printk("PCIC: setting irq %d at pin %d for device %02x:%02x\n",
572 p->irq, p->pin, dev->bus->number, dev->devfn); 575 p->irq, p->pin, dev->bus->number, dev->devfn);
573 dev->irq = p->irq; 576 real_irq = p->irq;
574 577
575 i = p->pin; 578 i = p->pin;
576 if (i >= 4) { 579 if (i >= 4) {
@@ -584,7 +587,8 @@ pcic_fill_irq(struct linux_pcic *pcic, struct pci_dev *dev, int node)
584 ivec |= p->irq << (i << 2); 587 ivec |= p->irq << (i << 2);
585 writew(ivec, pcic->pcic_regs+PCI_INT_SELECT_LO); 588 writew(ivec, pcic->pcic_regs+PCI_INT_SELECT_LO);
586 } 589 }
587 } 590 }
591 dev->irq = pcic_build_device_irq(NULL, real_irq);
588} 592}
589 593
590/* 594/*
@@ -700,10 +704,8 @@ static void pcic_clear_clock_irq(void)
700 704
701static irqreturn_t pcic_timer_handler (int irq, void *h) 705static irqreturn_t pcic_timer_handler (int irq, void *h)
702{ 706{
703 write_seqlock(&xtime_lock); /* Dummy, to show that we remember */
704 pcic_clear_clock_irq(); 707 pcic_clear_clock_irq();
705 do_timer(1); 708 xtime_update(1);
706 write_sequnlock(&xtime_lock);
707#ifndef CONFIG_SMP 709#ifndef CONFIG_SMP
708 update_process_times(user_mode(get_irq_regs())); 710 update_process_times(user_mode(get_irq_regs()));
709#endif 711#endif
@@ -731,6 +733,7 @@ void __init pci_time_init(void)
731 struct linux_pcic *pcic = &pcic0; 733 struct linux_pcic *pcic = &pcic0;
732 unsigned long v; 734 unsigned long v;
733 int timer_irq, irq; 735 int timer_irq, irq;
736 int err;
734 737
735 do_arch_gettimeoffset = pci_gettimeoffset; 738 do_arch_gettimeoffset = pci_gettimeoffset;
736 739
@@ -742,9 +745,10 @@ void __init pci_time_init(void)
742 timer_irq = PCI_COUNTER_IRQ_SYS(v); 745 timer_irq = PCI_COUNTER_IRQ_SYS(v);
743 writel (PCI_COUNTER_IRQ_SET(timer_irq, 0), 746 writel (PCI_COUNTER_IRQ_SET(timer_irq, 0),
744 pcic->pcic_regs+PCI_COUNTER_IRQ); 747 pcic->pcic_regs+PCI_COUNTER_IRQ);
745 irq = request_irq(timer_irq, pcic_timer_handler, 748 irq = pcic_build_device_irq(NULL, timer_irq);
746 (IRQF_DISABLED | SA_STATIC_ALLOC), "timer", NULL); 749 err = request_irq(irq, pcic_timer_handler,
747 if (irq) { 750 IRQF_TIMER, "timer", NULL);
751 if (err) {
748 prom_printf("time_init: unable to attach IRQ%d\n", timer_irq); 752 prom_printf("time_init: unable to attach IRQ%d\n", timer_irq);
749 prom_halt(); 753 prom_halt();
750 } 754 }
@@ -805,50 +809,73 @@ static inline unsigned long get_irqmask(int irq_nr)
805 return 1 << irq_nr; 809 return 1 << irq_nr;
806} 810}
807 811
808static void pcic_disable_irq(unsigned int irq_nr) 812static void pcic_mask_irq(struct irq_data *data)
809{ 813{
810 unsigned long mask, flags; 814 unsigned long mask, flags;
811 815
812 mask = get_irqmask(irq_nr); 816 mask = (unsigned long)data->chip_data;
813 local_irq_save(flags); 817 local_irq_save(flags);
814 writel(mask, pcic0.pcic_regs+PCI_SYS_INT_TARGET_MASK_SET); 818 writel(mask, pcic0.pcic_regs+PCI_SYS_INT_TARGET_MASK_SET);
815 local_irq_restore(flags); 819 local_irq_restore(flags);
816} 820}
817 821
818static void pcic_enable_irq(unsigned int irq_nr) 822static void pcic_unmask_irq(struct irq_data *data)
819{ 823{
820 unsigned long mask, flags; 824 unsigned long mask, flags;
821 825
822 mask = get_irqmask(irq_nr); 826 mask = (unsigned long)data->chip_data;
823 local_irq_save(flags); 827 local_irq_save(flags);
824 writel(mask, pcic0.pcic_regs+PCI_SYS_INT_TARGET_MASK_CLEAR); 828 writel(mask, pcic0.pcic_regs+PCI_SYS_INT_TARGET_MASK_CLEAR);
825 local_irq_restore(flags); 829 local_irq_restore(flags);
826} 830}
827 831
828static void pcic_load_profile_irq(int cpu, unsigned int limit) 832static unsigned int pcic_startup_irq(struct irq_data *data)
829{ 833{
830 printk("PCIC: unimplemented code: FILE=%s LINE=%d", __FILE__, __LINE__); 834 irq_link(data->irq);
835 pcic_unmask_irq(data);
836 return 0;
831} 837}
832 838
833/* We assume the caller has disabled local interrupts when these are called, 839static struct irq_chip pcic_irq = {
834 * or else very bizarre behavior will result. 840 .name = "pcic",
835 */ 841 .irq_startup = pcic_startup_irq,
836static void pcic_disable_pil_irq(unsigned int pil) 842 .irq_mask = pcic_mask_irq,
843 .irq_unmask = pcic_unmask_irq,
844};
845
846unsigned int pcic_build_device_irq(struct platform_device *op,
847 unsigned int real_irq)
837{ 848{
838 writel(get_irqmask(pil), pcic0.pcic_regs+PCI_SYS_INT_TARGET_MASK_SET); 849 unsigned int irq;
850 unsigned long mask;
851
852 irq = 0;
853 mask = get_irqmask(real_irq);
854 if (mask == 0)
855 goto out;
856
857 irq = irq_alloc(real_irq, real_irq);
858 if (irq == 0)
859 goto out;
860
861 irq_set_chip_and_handler_name(irq, &pcic_irq,
862 handle_level_irq, "PCIC");
863 irq_set_chip_data(irq, (void *)mask);
864
865out:
866 return irq;
839} 867}
840 868
841static void pcic_enable_pil_irq(unsigned int pil) 869
870static void pcic_load_profile_irq(int cpu, unsigned int limit)
842{ 871{
843 writel(get_irqmask(pil), pcic0.pcic_regs+PCI_SYS_INT_TARGET_MASK_CLEAR); 872 printk("PCIC: unimplemented code: FILE=%s LINE=%d", __FILE__, __LINE__);
844} 873}
845 874
846void __init sun4m_pci_init_IRQ(void) 875void __init sun4m_pci_init_IRQ(void)
847{ 876{
848 BTFIXUPSET_CALL(enable_irq, pcic_enable_irq, BTFIXUPCALL_NORM); 877 sparc_irq_config.build_device_irq = pcic_build_device_irq;
849 BTFIXUPSET_CALL(disable_irq, pcic_disable_irq, BTFIXUPCALL_NORM); 878
850 BTFIXUPSET_CALL(enable_pil_irq, pcic_enable_pil_irq, BTFIXUPCALL_NORM);
851 BTFIXUPSET_CALL(disable_pil_irq, pcic_disable_pil_irq, BTFIXUPCALL_NORM);
852 BTFIXUPSET_CALL(clear_clock_irq, pcic_clear_clock_irq, BTFIXUPCALL_NORM); 879 BTFIXUPSET_CALL(clear_clock_irq, pcic_clear_clock_irq, BTFIXUPCALL_NORM);
853 BTFIXUPSET_CALL(load_profile_irq, pcic_load_profile_irq, BTFIXUPCALL_NORM); 880 BTFIXUPSET_CALL(load_profile_irq, pcic_load_profile_irq, BTFIXUPCALL_NORM);
854} 881}
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c
index c4a6a50b4849..8ac23e660080 100644
--- a/arch/sparc/kernel/pcr.c
+++ b/arch/sparc/kernel/pcr.c
@@ -7,7 +7,7 @@
7#include <linux/init.h> 7#include <linux/init.h>
8#include <linux/irq.h> 8#include <linux/irq.h>
9 9
10#include <linux/perf_event.h> 10#include <linux/irq_work.h>
11#include <linux/ftrace.h> 11#include <linux/ftrace.h>
12 12
13#include <asm/pil.h> 13#include <asm/pil.h>
@@ -43,14 +43,14 @@ void __irq_entry deferred_pcr_work_irq(int irq, struct pt_regs *regs)
43 43
44 old_regs = set_irq_regs(regs); 44 old_regs = set_irq_regs(regs);
45 irq_enter(); 45 irq_enter();
46#ifdef CONFIG_PERF_EVENTS 46#ifdef CONFIG_IRQ_WORK
47 perf_event_do_pending(); 47 irq_work_run();
48#endif 48#endif
49 irq_exit(); 49 irq_exit();
50 set_irq_regs(old_regs); 50 set_irq_regs(old_regs);
51} 51}
52 52
53void set_perf_event_pending(void) 53void arch_irq_work_raise(void)
54{ 54{
55 set_softint(1 << PIL_DEFERRED_PCR_WORK); 55 set_softint(1 << PIL_DEFERRED_PCR_WORK);
56} 56}
@@ -81,7 +81,7 @@ static void n2_pcr_write(u64 val)
81 unsigned long ret; 81 unsigned long ret;
82 82
83 ret = sun4v_niagara2_setperf(HV_N2_PERF_SPARC_CTL, val); 83 ret = sun4v_niagara2_setperf(HV_N2_PERF_SPARC_CTL, val);
84 if (val != HV_EOK) 84 if (ret != HV_EOK)
85 write_pcr(val); 85 write_pcr(val);
86} 86}
87 87
@@ -167,5 +167,3 @@ out_unregister:
167 unregister_perf_hsvc(); 167 unregister_perf_hsvc();
168 return err; 168 return err;
169} 169}
170
171arch_initcall(pcr_arch_init);
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 6318e622cfb0..2cb0e1c001e2 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -26,6 +26,7 @@
26#include <asm/nmi.h> 26#include <asm/nmi.h>
27#include <asm/pcr.h> 27#include <asm/pcr.h>
28 28
29#include "kernel.h"
29#include "kstack.h" 30#include "kstack.h"
30 31
31/* Sparc64 chips have two performance counters, 32-bits each, with 32/* Sparc64 chips have two performance counters, 32-bits each, with
@@ -658,13 +659,16 @@ static u64 maybe_change_configuration(struct cpu_hw_events *cpuc, u64 pcr)
658 659
659 enc = perf_event_get_enc(cpuc->events[i]); 660 enc = perf_event_get_enc(cpuc->events[i]);
660 pcr &= ~mask_for_index(idx); 661 pcr &= ~mask_for_index(idx);
661 pcr |= event_encoding(enc, idx); 662 if (hwc->state & PERF_HES_STOPPED)
663 pcr |= nop_for_index(idx);
664 else
665 pcr |= event_encoding(enc, idx);
662 } 666 }
663out: 667out:
664 return pcr; 668 return pcr;
665} 669}
666 670
667void hw_perf_enable(void) 671static void sparc_pmu_enable(struct pmu *pmu)
668{ 672{
669 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 673 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
670 u64 pcr; 674 u64 pcr;
@@ -691,7 +695,7 @@ void hw_perf_enable(void)
691 pcr_ops->write(cpuc->pcr); 695 pcr_ops->write(cpuc->pcr);
692} 696}
693 697
694void hw_perf_disable(void) 698static void sparc_pmu_disable(struct pmu *pmu)
695{ 699{
696 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 700 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
697 u64 val; 701 u64 val;
@@ -710,19 +714,65 @@ void hw_perf_disable(void)
710 pcr_ops->write(cpuc->pcr); 714 pcr_ops->write(cpuc->pcr);
711} 715}
712 716
713static void sparc_pmu_disable(struct perf_event *event) 717static int active_event_index(struct cpu_hw_events *cpuc,
718 struct perf_event *event)
719{
720 int i;
721
722 for (i = 0; i < cpuc->n_events; i++) {
723 if (cpuc->event[i] == event)
724 break;
725 }
726 BUG_ON(i == cpuc->n_events);
727 return cpuc->current_idx[i];
728}
729
730static void sparc_pmu_start(struct perf_event *event, int flags)
731{
732 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
733 int idx = active_event_index(cpuc, event);
734
735 if (flags & PERF_EF_RELOAD) {
736 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
737 sparc_perf_event_set_period(event, &event->hw, idx);
738 }
739
740 event->hw.state = 0;
741
742 sparc_pmu_enable_event(cpuc, &event->hw, idx);
743}
744
745static void sparc_pmu_stop(struct perf_event *event, int flags)
746{
747 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
748 int idx = active_event_index(cpuc, event);
749
750 if (!(event->hw.state & PERF_HES_STOPPED)) {
751 sparc_pmu_disable_event(cpuc, &event->hw, idx);
752 event->hw.state |= PERF_HES_STOPPED;
753 }
754
755 if (!(event->hw.state & PERF_HES_UPTODATE) && (flags & PERF_EF_UPDATE)) {
756 sparc_perf_event_update(event, &event->hw, idx);
757 event->hw.state |= PERF_HES_UPTODATE;
758 }
759}
760
761static void sparc_pmu_del(struct perf_event *event, int _flags)
714{ 762{
715 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 763 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
716 struct hw_perf_event *hwc = &event->hw;
717 unsigned long flags; 764 unsigned long flags;
718 int i; 765 int i;
719 766
720 local_irq_save(flags); 767 local_irq_save(flags);
721 perf_disable(); 768 perf_pmu_disable(event->pmu);
722 769
723 for (i = 0; i < cpuc->n_events; i++) { 770 for (i = 0; i < cpuc->n_events; i++) {
724 if (event == cpuc->event[i]) { 771 if (event == cpuc->event[i]) {
725 int idx = cpuc->current_idx[i]; 772 /* Absorb the final count and turn off the
773 * event.
774 */
775 sparc_pmu_stop(event, PERF_EF_UPDATE);
726 776
727 /* Shift remaining entries down into 777 /* Shift remaining entries down into
728 * the existing slot. 778 * the existing slot.
@@ -734,13 +784,6 @@ static void sparc_pmu_disable(struct perf_event *event)
734 cpuc->current_idx[i]; 784 cpuc->current_idx[i];
735 } 785 }
736 786
737 /* Absorb the final count and turn off the
738 * event.
739 */
740 sparc_pmu_disable_event(cpuc, hwc, idx);
741 barrier();
742 sparc_perf_event_update(event, hwc, idx);
743
744 perf_event_update_userpage(event); 787 perf_event_update_userpage(event);
745 788
746 cpuc->n_events--; 789 cpuc->n_events--;
@@ -748,23 +791,10 @@ static void sparc_pmu_disable(struct perf_event *event)
748 } 791 }
749 } 792 }
750 793
751 perf_enable(); 794 perf_pmu_enable(event->pmu);
752 local_irq_restore(flags); 795 local_irq_restore(flags);
753} 796}
754 797
755static int active_event_index(struct cpu_hw_events *cpuc,
756 struct perf_event *event)
757{
758 int i;
759
760 for (i = 0; i < cpuc->n_events; i++) {
761 if (cpuc->event[i] == event)
762 break;
763 }
764 BUG_ON(i == cpuc->n_events);
765 return cpuc->current_idx[i];
766}
767
768static void sparc_pmu_read(struct perf_event *event) 798static void sparc_pmu_read(struct perf_event *event)
769{ 799{
770 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 800 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
@@ -774,15 +804,6 @@ static void sparc_pmu_read(struct perf_event *event)
774 sparc_perf_event_update(event, hwc, idx); 804 sparc_perf_event_update(event, hwc, idx);
775} 805}
776 806
777static void sparc_pmu_unthrottle(struct perf_event *event)
778{
779 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
780 int idx = active_event_index(cpuc, event);
781 struct hw_perf_event *hwc = &event->hw;
782
783 sparc_pmu_enable_event(cpuc, hwc, idx);
784}
785
786static atomic_t active_events = ATOMIC_INIT(0); 807static atomic_t active_events = ATOMIC_INIT(0);
787static DEFINE_MUTEX(pmc_grab_mutex); 808static DEFINE_MUTEX(pmc_grab_mutex);
788 809
@@ -877,7 +898,7 @@ static int sparc_check_constraints(struct perf_event **evts,
877 if (!n_ev) 898 if (!n_ev)
878 return 0; 899 return 0;
879 900
880 if (n_ev > perf_max_events) 901 if (n_ev > MAX_HWEVENTS)
881 return -1; 902 return -1;
882 903
883 msk0 = perf_event_get_msk(events[0]); 904 msk0 = perf_event_get_msk(events[0]);
@@ -984,26 +1005,30 @@ static int collect_events(struct perf_event *group, int max_count,
984 return n; 1005 return n;
985} 1006}
986 1007
987static int sparc_pmu_enable(struct perf_event *event) 1008static int sparc_pmu_add(struct perf_event *event, int ef_flags)
988{ 1009{
989 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1010 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
990 int n0, ret = -EAGAIN; 1011 int n0, ret = -EAGAIN;
991 unsigned long flags; 1012 unsigned long flags;
992 1013
993 local_irq_save(flags); 1014 local_irq_save(flags);
994 perf_disable(); 1015 perf_pmu_disable(event->pmu);
995 1016
996 n0 = cpuc->n_events; 1017 n0 = cpuc->n_events;
997 if (n0 >= perf_max_events) 1018 if (n0 >= MAX_HWEVENTS)
998 goto out; 1019 goto out;
999 1020
1000 cpuc->event[n0] = event; 1021 cpuc->event[n0] = event;
1001 cpuc->events[n0] = event->hw.event_base; 1022 cpuc->events[n0] = event->hw.event_base;
1002 cpuc->current_idx[n0] = PIC_NO_INDEX; 1023 cpuc->current_idx[n0] = PIC_NO_INDEX;
1003 1024
1025 event->hw.state = PERF_HES_UPTODATE;
1026 if (!(ef_flags & PERF_EF_START))
1027 event->hw.state |= PERF_HES_STOPPED;
1028
1004 /* 1029 /*
1005 * If group events scheduling transaction was started, 1030 * If group events scheduling transaction was started,
1006 * skip the schedulability test here, it will be peformed 1031 * skip the schedulability test here, it will be performed
1007 * at commit time(->commit_txn) as a whole 1032 * at commit time(->commit_txn) as a whole
1008 */ 1033 */
1009 if (cpuc->group_flag & PERF_EVENT_TXN) 1034 if (cpuc->group_flag & PERF_EVENT_TXN)
@@ -1020,12 +1045,12 @@ nocheck:
1020 1045
1021 ret = 0; 1046 ret = 0;
1022out: 1047out:
1023 perf_enable(); 1048 perf_pmu_enable(event->pmu);
1024 local_irq_restore(flags); 1049 local_irq_restore(flags);
1025 return ret; 1050 return ret;
1026} 1051}
1027 1052
1028static int __hw_perf_event_init(struct perf_event *event) 1053static int sparc_pmu_event_init(struct perf_event *event)
1029{ 1054{
1030 struct perf_event_attr *attr = &event->attr; 1055 struct perf_event_attr *attr = &event->attr;
1031 struct perf_event *evts[MAX_HWEVENTS]; 1056 struct perf_event *evts[MAX_HWEVENTS];
@@ -1038,22 +1063,33 @@ static int __hw_perf_event_init(struct perf_event *event)
1038 if (atomic_read(&nmi_active) < 0) 1063 if (atomic_read(&nmi_active) < 0)
1039 return -ENODEV; 1064 return -ENODEV;
1040 1065
1041 pmap = NULL; 1066 switch (attr->type) {
1042 if (attr->type == PERF_TYPE_HARDWARE) { 1067 case PERF_TYPE_HARDWARE:
1043 if (attr->config >= sparc_pmu->max_events) 1068 if (attr->config >= sparc_pmu->max_events)
1044 return -EINVAL; 1069 return -EINVAL;
1045 pmap = sparc_pmu->event_map(attr->config); 1070 pmap = sparc_pmu->event_map(attr->config);
1046 } else if (attr->type == PERF_TYPE_HW_CACHE) { 1071 break;
1072
1073 case PERF_TYPE_HW_CACHE:
1047 pmap = sparc_map_cache_event(attr->config); 1074 pmap = sparc_map_cache_event(attr->config);
1048 if (IS_ERR(pmap)) 1075 if (IS_ERR(pmap))
1049 return PTR_ERR(pmap); 1076 return PTR_ERR(pmap);
1050 } else if (attr->type != PERF_TYPE_RAW) 1077 break;
1051 return -EOPNOTSUPP; 1078
1079 case PERF_TYPE_RAW:
1080 pmap = NULL;
1081 break;
1082
1083 default:
1084 return -ENOENT;
1085
1086 }
1052 1087
1053 if (pmap) { 1088 if (pmap) {
1054 hwc->event_base = perf_event_encode(pmap); 1089 hwc->event_base = perf_event_encode(pmap);
1055 } else { 1090 } else {
1056 /* User gives us "(encoding << 16) | pic_mask" for 1091 /*
1092 * User gives us "(encoding << 16) | pic_mask" for
1057 * PERF_TYPE_RAW events. 1093 * PERF_TYPE_RAW events.
1058 */ 1094 */
1059 hwc->event_base = attr->config; 1095 hwc->event_base = attr->config;
@@ -1071,7 +1107,7 @@ static int __hw_perf_event_init(struct perf_event *event)
1071 n = 0; 1107 n = 0;
1072 if (event->group_leader != event) { 1108 if (event->group_leader != event) {
1073 n = collect_events(event->group_leader, 1109 n = collect_events(event->group_leader,
1074 perf_max_events - 1, 1110 MAX_HWEVENTS - 1,
1075 evts, events, current_idx_dmy); 1111 evts, events, current_idx_dmy);
1076 if (n < 0) 1112 if (n < 0)
1077 return -EINVAL; 1113 return -EINVAL;
@@ -1107,10 +1143,11 @@ static int __hw_perf_event_init(struct perf_event *event)
1107 * Set the flag to make pmu::enable() not perform the 1143 * Set the flag to make pmu::enable() not perform the
1108 * schedulability test, it will be performed at commit time 1144 * schedulability test, it will be performed at commit time
1109 */ 1145 */
1110static void sparc_pmu_start_txn(const struct pmu *pmu) 1146static void sparc_pmu_start_txn(struct pmu *pmu)
1111{ 1147{
1112 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 1148 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1113 1149
1150 perf_pmu_disable(pmu);
1114 cpuhw->group_flag |= PERF_EVENT_TXN; 1151 cpuhw->group_flag |= PERF_EVENT_TXN;
1115} 1152}
1116 1153
@@ -1119,11 +1156,12 @@ static void sparc_pmu_start_txn(const struct pmu *pmu)
1119 * Clear the flag and pmu::enable() will perform the 1156 * Clear the flag and pmu::enable() will perform the
1120 * schedulability test. 1157 * schedulability test.
1121 */ 1158 */
1122static void sparc_pmu_cancel_txn(const struct pmu *pmu) 1159static void sparc_pmu_cancel_txn(struct pmu *pmu)
1123{ 1160{
1124 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 1161 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1125 1162
1126 cpuhw->group_flag &= ~PERF_EVENT_TXN; 1163 cpuhw->group_flag &= ~PERF_EVENT_TXN;
1164 perf_pmu_enable(pmu);
1127} 1165}
1128 1166
1129/* 1167/*
@@ -1131,7 +1169,7 @@ static void sparc_pmu_cancel_txn(const struct pmu *pmu)
1131 * Perform the group schedulability test as a whole 1169 * Perform the group schedulability test as a whole
1132 * Return 0 if success 1170 * Return 0 if success
1133 */ 1171 */
1134static int sparc_pmu_commit_txn(const struct pmu *pmu) 1172static int sparc_pmu_commit_txn(struct pmu *pmu)
1135{ 1173{
1136 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1174 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1137 int n; 1175 int n;
@@ -1147,28 +1185,24 @@ static int sparc_pmu_commit_txn(const struct pmu *pmu)
1147 return -EAGAIN; 1185 return -EAGAIN;
1148 1186
1149 cpuc->group_flag &= ~PERF_EVENT_TXN; 1187 cpuc->group_flag &= ~PERF_EVENT_TXN;
1188 perf_pmu_enable(pmu);
1150 return 0; 1189 return 0;
1151} 1190}
1152 1191
1153static const struct pmu pmu = { 1192static struct pmu pmu = {
1154 .enable = sparc_pmu_enable, 1193 .pmu_enable = sparc_pmu_enable,
1155 .disable = sparc_pmu_disable, 1194 .pmu_disable = sparc_pmu_disable,
1195 .event_init = sparc_pmu_event_init,
1196 .add = sparc_pmu_add,
1197 .del = sparc_pmu_del,
1198 .start = sparc_pmu_start,
1199 .stop = sparc_pmu_stop,
1156 .read = sparc_pmu_read, 1200 .read = sparc_pmu_read,
1157 .unthrottle = sparc_pmu_unthrottle,
1158 .start_txn = sparc_pmu_start_txn, 1201 .start_txn = sparc_pmu_start_txn,
1159 .cancel_txn = sparc_pmu_cancel_txn, 1202 .cancel_txn = sparc_pmu_cancel_txn,
1160 .commit_txn = sparc_pmu_commit_txn, 1203 .commit_txn = sparc_pmu_commit_txn,
1161}; 1204};
1162 1205
1163const struct pmu *hw_perf_event_init(struct perf_event *event)
1164{
1165 int err = __hw_perf_event_init(event);
1166
1167 if (err)
1168 return ERR_PTR(err);
1169 return &pmu;
1170}
1171
1172void perf_event_print_debug(void) 1206void perf_event_print_debug(void)
1173{ 1207{
1174 unsigned long flags; 1208 unsigned long flags;
@@ -1244,7 +1278,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
1244 continue; 1278 continue;
1245 1279
1246 if (perf_event_overflow(event, 1, &data, regs)) 1280 if (perf_event_overflow(event, 1, &data, regs))
1247 sparc_pmu_disable_event(cpuc, hwc, idx); 1281 sparc_pmu_stop(event, 0);
1248 } 1282 }
1249 1283
1250 return NOTIFY_STOP; 1284 return NOTIFY_STOP;
@@ -1274,39 +1308,35 @@ static bool __init supported_pmu(void)
1274 return false; 1308 return false;
1275} 1309}
1276 1310
1277void __init init_hw_perf_events(void) 1311int __init init_hw_perf_events(void)
1278{ 1312{
1279 pr_info("Performance events: "); 1313 pr_info("Performance events: ");
1280 1314
1281 if (!supported_pmu()) { 1315 if (!supported_pmu()) {
1282 pr_cont("No support for PMU type '%s'\n", sparc_pmu_type); 1316 pr_cont("No support for PMU type '%s'\n", sparc_pmu_type);
1283 return; 1317 return 0;
1284 } 1318 }
1285 1319
1286 pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type); 1320 pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type);
1287 1321
1288 /* All sparc64 PMUs currently have 2 events. */ 1322 perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
1289 perf_max_events = 2;
1290
1291 register_die_notifier(&perf_event_nmi_notifier); 1323 register_die_notifier(&perf_event_nmi_notifier);
1292}
1293 1324
1294static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip) 1325 return 0;
1295{
1296 if (entry->nr < PERF_MAX_STACK_DEPTH)
1297 entry->ip[entry->nr++] = ip;
1298} 1326}
1327early_initcall(init_hw_perf_events);
1299 1328
1300static void perf_callchain_kernel(struct pt_regs *regs, 1329void perf_callchain_kernel(struct perf_callchain_entry *entry,
1301 struct perf_callchain_entry *entry) 1330 struct pt_regs *regs)
1302{ 1331{
1303 unsigned long ksp, fp; 1332 unsigned long ksp, fp;
1304#ifdef CONFIG_FUNCTION_GRAPH_TRACER 1333#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1305 int graph = 0; 1334 int graph = 0;
1306#endif 1335#endif
1307 1336
1308 callchain_store(entry, PERF_CONTEXT_KERNEL); 1337 stack_trace_flush();
1309 callchain_store(entry, regs->tpc); 1338
1339 perf_callchain_store(entry, regs->tpc);
1310 1340
1311 ksp = regs->u_regs[UREG_I6]; 1341 ksp = regs->u_regs[UREG_I6];
1312 fp = ksp + STACK_BIAS; 1342 fp = ksp + STACK_BIAS;
@@ -1330,13 +1360,13 @@ static void perf_callchain_kernel(struct pt_regs *regs,
1330 pc = sf->callers_pc; 1360 pc = sf->callers_pc;
1331 fp = (unsigned long)sf->fp + STACK_BIAS; 1361 fp = (unsigned long)sf->fp + STACK_BIAS;
1332 } 1362 }
1333 callchain_store(entry, pc); 1363 perf_callchain_store(entry, pc);
1334#ifdef CONFIG_FUNCTION_GRAPH_TRACER 1364#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1335 if ((pc + 8UL) == (unsigned long) &return_to_handler) { 1365 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
1336 int index = current->curr_ret_stack; 1366 int index = current->curr_ret_stack;
1337 if (current->ret_stack && index >= graph) { 1367 if (current->ret_stack && index >= graph) {
1338 pc = current->ret_stack[index - graph].ret; 1368 pc = current->ret_stack[index - graph].ret;
1339 callchain_store(entry, pc); 1369 perf_callchain_store(entry, pc);
1340 graph++; 1370 graph++;
1341 } 1371 }
1342 } 1372 }
@@ -1344,13 +1374,12 @@ static void perf_callchain_kernel(struct pt_regs *regs,
1344 } while (entry->nr < PERF_MAX_STACK_DEPTH); 1374 } while (entry->nr < PERF_MAX_STACK_DEPTH);
1345} 1375}
1346 1376
1347static void perf_callchain_user_64(struct pt_regs *regs, 1377static void perf_callchain_user_64(struct perf_callchain_entry *entry,
1348 struct perf_callchain_entry *entry) 1378 struct pt_regs *regs)
1349{ 1379{
1350 unsigned long ufp; 1380 unsigned long ufp;
1351 1381
1352 callchain_store(entry, PERF_CONTEXT_USER); 1382 perf_callchain_store(entry, regs->tpc);
1353 callchain_store(entry, regs->tpc);
1354 1383
1355 ufp = regs->u_regs[UREG_I6] + STACK_BIAS; 1384 ufp = regs->u_regs[UREG_I6] + STACK_BIAS;
1356 do { 1385 do {
@@ -1363,17 +1392,16 @@ static void perf_callchain_user_64(struct pt_regs *regs,
1363 1392
1364 pc = sf.callers_pc; 1393 pc = sf.callers_pc;
1365 ufp = (unsigned long)sf.fp + STACK_BIAS; 1394 ufp = (unsigned long)sf.fp + STACK_BIAS;
1366 callchain_store(entry, pc); 1395 perf_callchain_store(entry, pc);
1367 } while (entry->nr < PERF_MAX_STACK_DEPTH); 1396 } while (entry->nr < PERF_MAX_STACK_DEPTH);
1368} 1397}
1369 1398
1370static void perf_callchain_user_32(struct pt_regs *regs, 1399static void perf_callchain_user_32(struct perf_callchain_entry *entry,
1371 struct perf_callchain_entry *entry) 1400 struct pt_regs *regs)
1372{ 1401{
1373 unsigned long ufp; 1402 unsigned long ufp;
1374 1403
1375 callchain_store(entry, PERF_CONTEXT_USER); 1404 perf_callchain_store(entry, regs->tpc);
1376 callchain_store(entry, regs->tpc);
1377 1405
1378 ufp = regs->u_regs[UREG_I6] & 0xffffffffUL; 1406 ufp = regs->u_regs[UREG_I6] & 0xffffffffUL;
1379 do { 1407 do {
@@ -1386,34 +1414,16 @@ static void perf_callchain_user_32(struct pt_regs *regs,
1386 1414
1387 pc = sf.callers_pc; 1415 pc = sf.callers_pc;
1388 ufp = (unsigned long)sf.fp; 1416 ufp = (unsigned long)sf.fp;
1389 callchain_store(entry, pc); 1417 perf_callchain_store(entry, pc);
1390 } while (entry->nr < PERF_MAX_STACK_DEPTH); 1418 } while (entry->nr < PERF_MAX_STACK_DEPTH);
1391} 1419}
1392 1420
1393/* Like powerpc we can't get PMU interrupts within the PMU handler, 1421void
1394 * so no need for separate NMI and IRQ chains as on x86. 1422perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
1395 */
1396static DEFINE_PER_CPU(struct perf_callchain_entry, callchain);
1397
1398struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
1399{ 1423{
1400 struct perf_callchain_entry *entry = &__get_cpu_var(callchain); 1424 flushw_user();
1401 1425 if (test_thread_flag(TIF_32BIT))
1402 entry->nr = 0; 1426 perf_callchain_user_32(entry, regs);
1403 if (!user_mode(regs)) { 1427 else
1404 stack_trace_flush(); 1428 perf_callchain_user_64(entry, regs);
1405 perf_callchain_kernel(regs, entry);
1406 if (current->mm)
1407 regs = task_pt_regs(current);
1408 else
1409 regs = NULL;
1410 }
1411 if (regs) {
1412 flushw_user();
1413 if (test_thread_flag(TIF_32BIT))
1414 perf_callchain_user_32(regs, entry);
1415 else
1416 perf_callchain_user_64(regs, entry);
1417 }
1418 return entry;
1419} 1429}
diff --git a/arch/sparc/kernel/pmc.c b/arch/sparc/kernel/pmc.c
index 94536a85f161..6a585d393580 100644
--- a/arch/sparc/kernel/pmc.c
+++ b/arch/sparc/kernel/pmc.c
@@ -51,8 +51,7 @@ static void pmc_swift_idle(void)
51#endif 51#endif
52} 52}
53 53
54static int __devinit pmc_probe(struct platform_device *op, 54static int __devinit pmc_probe(struct platform_device *op)
55 const struct of_device_id *match)
56{ 55{
57 regs = of_ioremap(&op->resource[0], 0, 56 regs = of_ioremap(&op->resource[0], 0,
58 resource_size(&op->resource[0]), PMC_OBPNAME); 57 resource_size(&op->resource[0]), PMC_OBPNAME);
@@ -70,7 +69,7 @@ static int __devinit pmc_probe(struct platform_device *op,
70 return 0; 69 return 0;
71} 70}
72 71
73static struct of_device_id __initdata pmc_match[] = { 72static struct of_device_id pmc_match[] = {
74 { 73 {
75 .name = PMC_OBPNAME, 74 .name = PMC_OBPNAME,
76 }, 75 },
@@ -78,7 +77,7 @@ static struct of_device_id __initdata pmc_match[] = {
78}; 77};
79MODULE_DEVICE_TABLE(of, pmc_match); 78MODULE_DEVICE_TABLE(of, pmc_match);
80 79
81static struct of_platform_driver pmc_driver = { 80static struct platform_driver pmc_driver = {
82 .driver = { 81 .driver = {
83 .name = "pmc", 82 .name = "pmc",
84 .owner = THIS_MODULE, 83 .owner = THIS_MODULE,
@@ -89,7 +88,7 @@ static struct of_platform_driver pmc_driver = {
89 88
90static int __init pmc_init(void) 89static int __init pmc_init(void)
91{ 90{
92 return of_register_platform_driver(&pmc_driver); 91 return platform_driver_register(&pmc_driver);
93} 92}
94 93
95/* This driver is not critical to the boot process 94/* This driver is not critical to the boot process
diff --git a/arch/sparc/kernel/power.c b/arch/sparc/kernel/power.c
index 2c59f4d387dd..cb4c0f57c024 100644
--- a/arch/sparc/kernel/power.c
+++ b/arch/sparc/kernel/power.c
@@ -33,7 +33,7 @@ static int __devinit has_button_interrupt(unsigned int irq, struct device_node *
33 return 1; 33 return 1;
34} 34}
35 35
36static int __devinit power_probe(struct platform_device *op, const struct of_device_id *match) 36static int __devinit power_probe(struct platform_device *op)
37{ 37{
38 struct resource *res = &op->resource[0]; 38 struct resource *res = &op->resource[0];
39 unsigned int irq = op->archdata.irqs[0]; 39 unsigned int irq = op->archdata.irqs[0];
@@ -52,14 +52,14 @@ static int __devinit power_probe(struct platform_device *op, const struct of_dev
52 return 0; 52 return 0;
53} 53}
54 54
55static struct of_device_id __initdata power_match[] = { 55static const struct of_device_id power_match[] = {
56 { 56 {
57 .name = "power", 57 .name = "power",
58 }, 58 },
59 {}, 59 {},
60}; 60};
61 61
62static struct of_platform_driver power_driver = { 62static struct platform_driver power_driver = {
63 .probe = power_probe, 63 .probe = power_probe,
64 .driver = { 64 .driver = {
65 .name = "power", 65 .name = "power",
@@ -70,7 +70,7 @@ static struct of_platform_driver power_driver = {
70 70
71static int __init power_init(void) 71static int __init power_init(void)
72{ 72{
73 return of_register_platform_driver(&power_driver); 73 return platform_driver_register(&power_driver);
74} 74}
75 75
76device_initcall(power_init); 76device_initcall(power_init);
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
index 17529298c50a..c8cc461ff75f 100644
--- a/arch/sparc/kernel/process_32.c
+++ b/arch/sparc/kernel/process_32.c
@@ -128,8 +128,16 @@ void cpu_idle(void)
128 set_thread_flag(TIF_POLLING_NRFLAG); 128 set_thread_flag(TIF_POLLING_NRFLAG);
129 /* endless idle loop with no priority at all */ 129 /* endless idle loop with no priority at all */
130 while(1) { 130 while(1) {
131 while (!need_resched()) 131#ifdef CONFIG_SPARC_LEON
132 cpu_relax(); 132 if (pm_idle) {
133 while (!need_resched())
134 (*pm_idle)();
135 } else
136#endif
137 {
138 while (!need_resched())
139 cpu_relax();
140 }
133 preempt_enable_no_resched(); 141 preempt_enable_no_resched();
134 schedule(); 142 schedule();
135 preempt_disable(); 143 preempt_disable();
diff --git a/arch/sparc/kernel/prom.h b/arch/sparc/kernel/prom.h
index eeb04a782ec8..cf5fe1c0b024 100644
--- a/arch/sparc/kernel/prom.h
+++ b/arch/sparc/kernel/prom.h
@@ -4,12 +4,6 @@
4#include <linux/spinlock.h> 4#include <linux/spinlock.h>
5#include <asm/prom.h> 5#include <asm/prom.h>
6 6
7extern void * prom_early_alloc(unsigned long size);
8extern void irq_trans_init(struct device_node *dp);
9
10extern unsigned int prom_unique_id;
11
12extern char *build_path_component(struct device_node *dp);
13extern void of_console_init(void); 7extern void of_console_init(void);
14 8
15extern unsigned int prom_early_allocated; 9extern unsigned int prom_early_allocated;
diff --git a/arch/sparc/kernel/prom_32.c b/arch/sparc/kernel/prom_32.c
index 0a37e8cfd160..5ce3d15a99b0 100644
--- a/arch/sparc/kernel/prom_32.c
+++ b/arch/sparc/kernel/prom_32.c
@@ -136,18 +136,29 @@ static void __init ebus_path_component(struct device_node *dp, char *tmp_buf)
136/* "name:vendor:device@irq,addrlo" */ 136/* "name:vendor:device@irq,addrlo" */
137static void __init ambapp_path_component(struct device_node *dp, char *tmp_buf) 137static void __init ambapp_path_component(struct device_node *dp, char *tmp_buf)
138{ 138{
139 struct amba_prom_registers *regs; unsigned int *intr; 139 struct amba_prom_registers *regs;
140 unsigned int *device, *vendor; 140 unsigned int *intr, *device, *vendor, reg0;
141 struct property *prop; 141 struct property *prop;
142 int interrupt = 0;
142 143
144 /* In order to get a unique ID in the device tree (multiple AMBA devices
145 * may have the same name) the node number is printed
146 */
143 prop = of_find_property(dp, "reg", NULL); 147 prop = of_find_property(dp, "reg", NULL);
144 if (!prop) 148 if (!prop) {
145 return; 149 reg0 = (unsigned int)dp->phandle;
146 regs = prop->value; 150 } else {
151 regs = prop->value;
152 reg0 = regs->phys_addr;
153 }
154
155 /* Not all cores have Interrupt */
147 prop = of_find_property(dp, "interrupts", NULL); 156 prop = of_find_property(dp, "interrupts", NULL);
148 if (!prop) 157 if (!prop)
149 return; 158 intr = &interrupt; /* IRQ0 does not exist */
150 intr = prop->value; 159 else
160 intr = prop->value;
161
151 prop = of_find_property(dp, "vendor", NULL); 162 prop = of_find_property(dp, "vendor", NULL);
152 if (!prop) 163 if (!prop)
153 return; 164 return;
@@ -159,7 +170,7 @@ static void __init ambapp_path_component(struct device_node *dp, char *tmp_buf)
159 170
160 sprintf(tmp_buf, "%s:%d:%d@%x,%x", 171 sprintf(tmp_buf, "%s:%d:%d@%x,%x",
161 dp->name, *vendor, *device, 172 dp->name, *vendor, *device,
162 *intr, regs->phys_addr); 173 *intr, reg0);
163} 174}
164 175
165static void __init __build_path_component(struct device_node *dp, char *tmp_buf) 176static void __init __build_path_component(struct device_node *dp, char *tmp_buf)
@@ -315,7 +326,6 @@ void __init of_console_init(void)
315 of_console_options = NULL; 326 of_console_options = NULL;
316 } 327 }
317 328
318 prom_printf(msg, of_console_path);
319 printk(msg, of_console_path); 329 printk(msg, of_console_path);
320} 330}
321 331
diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
index 1f830da2ddf2..ed25834328f4 100644
--- a/arch/sparc/kernel/prom_common.c
+++ b/arch/sparc/kernel/prom_common.c
@@ -20,14 +20,13 @@
20#include <linux/mutex.h> 20#include <linux/mutex.h>
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/of.h> 22#include <linux/of.h>
23#include <linux/of_pdt.h>
23#include <asm/prom.h> 24#include <asm/prom.h>
24#include <asm/oplib.h> 25#include <asm/oplib.h>
25#include <asm/leon.h> 26#include <asm/leon.h>
26 27
27#include "prom.h" 28#include "prom.h"
28 29
29void (*prom_build_more)(struct device_node *dp, struct device_node ***nextp);
30
31struct device_node *of_console_device; 30struct device_node *of_console_device;
32EXPORT_SYMBOL(of_console_device); 31EXPORT_SYMBOL(of_console_device);
33 32
@@ -119,192 +118,47 @@ int of_find_in_proplist(const char *list, const char *match, int len)
119} 118}
120EXPORT_SYMBOL(of_find_in_proplist); 119EXPORT_SYMBOL(of_find_in_proplist);
121 120
122unsigned int prom_unique_id; 121/*
123 122 * SPARC32 and SPARC64's prom_nextprop() do things differently
124static struct property * __init build_one_prop(phandle node, char *prev, 123 * here, despite sharing the same interface. SPARC32 doesn't fill in 'buf',
125 char *special_name, 124 * returning NULL on an error. SPARC64 fills in 'buf', but sets it to an
126 void *special_val, 125 * empty string upon error.
127 int special_len) 126 */
127static int __init handle_nextprop_quirks(char *buf, const char *name)
128{ 128{
129 static struct property *tmp = NULL; 129 if (!name || strlen(name) == 0)
130 struct property *p; 130 return -1;
131 const char *name;
132
133 if (tmp) {
134 p = tmp;
135 memset(p, 0, sizeof(*p) + 32);
136 tmp = NULL;
137 } else {
138 p = prom_early_alloc(sizeof(struct property) + 32);
139 p->unique_id = prom_unique_id++;
140 }
141
142 p->name = (char *) (p + 1);
143 if (special_name) {
144 strcpy(p->name, special_name);
145 p->length = special_len;
146 p->value = prom_early_alloc(special_len);
147 memcpy(p->value, special_val, special_len);
148 } else {
149 if (prev == NULL) {
150 name = prom_firstprop(node, p->name);
151 } else {
152 name = prom_nextprop(node, prev, p->name);
153 }
154 131
155 if (!name || strlen(name) == 0) {
156 tmp = p;
157 return NULL;
158 }
159#ifdef CONFIG_SPARC32 132#ifdef CONFIG_SPARC32
160 strcpy(p->name, name); 133 strcpy(buf, name);
161#endif 134#endif
162 p->length = prom_getproplen(node, p->name); 135 return 0;
163 if (p->length <= 0) {
164 p->length = 0;
165 } else {
166 int len;
167
168 p->value = prom_early_alloc(p->length + 1);
169 len = prom_getproperty(node, p->name, p->value,
170 p->length);
171 if (len <= 0)
172 p->length = 0;
173 ((unsigned char *)p->value)[p->length] = '\0';
174 }
175 }
176 return p;
177}
178
179static struct property * __init build_prop_list(phandle node)
180{
181 struct property *head, *tail;
182
183 head = tail = build_one_prop(node, NULL,
184 ".node", &node, sizeof(node));
185
186 tail->next = build_one_prop(node, NULL, NULL, NULL, 0);
187 tail = tail->next;
188 while(tail) {
189 tail->next = build_one_prop(node, tail->name,
190 NULL, NULL, 0);
191 tail = tail->next;
192 }
193
194 return head;
195}
196
197static char * __init get_one_property(phandle node, const char *name)
198{
199 char *buf = "<NULL>";
200 int len;
201
202 len = prom_getproplen(node, name);
203 if (len > 0) {
204 buf = prom_early_alloc(len);
205 len = prom_getproperty(node, name, buf, len);
206 }
207
208 return buf;
209}
210
211static struct device_node * __init prom_create_node(phandle node,
212 struct device_node *parent)
213{
214 struct device_node *dp;
215
216 if (!node)
217 return NULL;
218
219 dp = prom_early_alloc(sizeof(*dp));
220 dp->unique_id = prom_unique_id++;
221 dp->parent = parent;
222
223 kref_init(&dp->kref);
224
225 dp->name = get_one_property(node, "name");
226 dp->type = get_one_property(node, "device_type");
227 dp->phandle = node;
228
229 dp->properties = build_prop_list(node);
230
231 irq_trans_init(dp);
232
233 return dp;
234}
235
236char * __init build_full_name(struct device_node *dp)
237{
238 int len, ourlen, plen;
239 char *n;
240
241 plen = strlen(dp->parent->full_name);
242 ourlen = strlen(dp->path_component_name);
243 len = ourlen + plen + 2;
244
245 n = prom_early_alloc(len);
246 strcpy(n, dp->parent->full_name);
247 if (!of_node_is_root(dp->parent)) {
248 strcpy(n + plen, "/");
249 plen++;
250 }
251 strcpy(n + plen, dp->path_component_name);
252
253 return n;
254} 136}
255 137
256static struct device_node * __init prom_build_tree(struct device_node *parent, 138static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
257 phandle node,
258 struct device_node ***nextp)
259{ 139{
260 struct device_node *ret = NULL, *prev_sibling = NULL; 140 const char *name;
261 struct device_node *dp;
262
263 while (1) {
264 dp = prom_create_node(node, parent);
265 if (!dp)
266 break;
267
268 if (prev_sibling)
269 prev_sibling->sibling = dp;
270
271 if (!ret)
272 ret = dp;
273 prev_sibling = dp;
274
275 *(*nextp) = dp;
276 *nextp = &dp->allnext;
277
278 dp->path_component_name = build_path_component(dp);
279 dp->full_name = build_full_name(dp);
280
281 dp->child = prom_build_tree(dp, prom_getchild(node), nextp);
282
283 if (prom_build_more)
284 prom_build_more(dp, nextp);
285
286 node = prom_getsibling(node);
287 }
288 141
289 return ret; 142 buf[0] = '\0';
143 name = prom_nextprop(node, prev, buf);
144 return handle_nextprop_quirks(buf, name);
290} 145}
291 146
292unsigned int prom_early_allocated __initdata; 147unsigned int prom_early_allocated __initdata;
293 148
149static struct of_pdt_ops prom_sparc_ops __initdata = {
150 .nextprop = prom_common_nextprop,
151 .getproplen = prom_getproplen,
152 .getproperty = prom_getproperty,
153 .getchild = prom_getchild,
154 .getsibling = prom_getsibling,
155};
156
294void __init prom_build_devicetree(void) 157void __init prom_build_devicetree(void)
295{ 158{
296 struct device_node **nextp; 159 of_pdt_build_devicetree(prom_root_node, &prom_sparc_ops);
297
298 allnodes = prom_create_node(prom_root_node, NULL);
299 allnodes->path_component_name = "";
300 allnodes->full_name = "/";
301
302 nextp = &allnodes->allnext;
303 allnodes->child = prom_build_tree(allnodes,
304 prom_getchild(allnodes->phandle),
305 &nextp);
306 of_console_init(); 160 of_console_init();
307 161
308 printk("PROM: Built device tree with %u bytes of memory.\n", 162 pr_info("PROM: Built device tree with %u bytes of memory.\n",
309 prom_early_allocated); 163 prom_early_allocated);
310} 164}
diff --git a/arch/sparc/kernel/prom_irqtrans.c b/arch/sparc/kernel/prom_irqtrans.c
index ce651147fabc..40e4936bd479 100644
--- a/arch/sparc/kernel/prom_irqtrans.c
+++ b/arch/sparc/kernel/prom_irqtrans.c
@@ -227,7 +227,7 @@ static unsigned int sabre_irq_build(struct device_node *dp,
227 unsigned long imap, iclr; 227 unsigned long imap, iclr;
228 unsigned long imap_off, iclr_off; 228 unsigned long imap_off, iclr_off;
229 int inofixup = 0; 229 int inofixup = 0;
230 int virt_irq; 230 int irq;
231 231
232 ino &= 0x3f; 232 ino &= 0x3f;
233 if (ino < SABRE_ONBOARD_IRQ_BASE) { 233 if (ino < SABRE_ONBOARD_IRQ_BASE) {
@@ -247,7 +247,7 @@ static unsigned int sabre_irq_build(struct device_node *dp,
247 if ((ino & 0x20) == 0) 247 if ((ino & 0x20) == 0)
248 inofixup = ino & 0x03; 248 inofixup = ino & 0x03;
249 249
250 virt_irq = build_irq(inofixup, iclr, imap); 250 irq = build_irq(inofixup, iclr, imap);
251 251
252 /* If the parent device is a PCI<->PCI bridge other than 252 /* If the parent device is a PCI<->PCI bridge other than
253 * APB, we have to install a pre-handler to ensure that 253 * APB, we have to install a pre-handler to ensure that
@@ -256,13 +256,13 @@ static unsigned int sabre_irq_build(struct device_node *dp,
256 */ 256 */
257 regs = of_get_property(dp, "reg", NULL); 257 regs = of_get_property(dp, "reg", NULL);
258 if (regs && sabre_device_needs_wsync(dp)) { 258 if (regs && sabre_device_needs_wsync(dp)) {
259 irq_install_pre_handler(virt_irq, 259 irq_install_pre_handler(irq,
260 sabre_wsync_handler, 260 sabre_wsync_handler,
261 (void *) (long) regs->phys_hi, 261 (void *) (long) regs->phys_hi,
262 (void *) irq_data); 262 (void *) irq_data);
263 } 263 }
264 264
265 return virt_irq; 265 return irq;
266} 266}
267 267
268static void __init sabre_irq_trans_init(struct device_node *dp) 268static void __init sabre_irq_trans_init(struct device_node *dp)
@@ -382,7 +382,7 @@ static unsigned int schizo_irq_build(struct device_node *dp,
382 unsigned long pbm_regs = irq_data->pbm_regs; 382 unsigned long pbm_regs = irq_data->pbm_regs;
383 unsigned long imap, iclr; 383 unsigned long imap, iclr;
384 int ign_fixup; 384 int ign_fixup;
385 int virt_irq; 385 int irq;
386 int is_tomatillo; 386 int is_tomatillo;
387 387
388 ino &= 0x3f; 388 ino &= 0x3f;
@@ -409,17 +409,17 @@ static unsigned int schizo_irq_build(struct device_node *dp,
409 ign_fixup = (1 << 6); 409 ign_fixup = (1 << 6);
410 } 410 }
411 411
412 virt_irq = build_irq(ign_fixup, iclr, imap); 412 irq = build_irq(ign_fixup, iclr, imap);
413 413
414 if (is_tomatillo) { 414 if (is_tomatillo) {
415 irq_install_pre_handler(virt_irq, 415 irq_install_pre_handler(irq,
416 tomatillo_wsync_handler, 416 tomatillo_wsync_handler,
417 ((irq_data->chip_version <= 4) ? 417 ((irq_data->chip_version <= 4) ?
418 (void *) 1 : (void *) 0), 418 (void *) 1 : (void *) 0),
419 (void *) irq_data->sync_reg); 419 (void *) irq_data->sync_reg);
420 } 420 }
421 421
422 return virt_irq; 422 return irq;
423} 423}
424 424
425static void __init __schizo_irq_trans_init(struct device_node *dp, 425static void __init __schizo_irq_trans_init(struct device_node *dp,
@@ -694,7 +694,7 @@ static unsigned int sbus_of_build_irq(struct device_node *dp,
694 case 3: 694 case 3:
695 iclr = reg_base + SYSIO_ICLR_SLOT3; 695 iclr = reg_base + SYSIO_ICLR_SLOT3;
696 break; 696 break;
697 }; 697 }
698 698
699 iclr += ((unsigned long)sbus_level - 1UL) * 8UL; 699 iclr += ((unsigned long)sbus_level - 1UL) * 8UL;
700 } 700 }
diff --git a/arch/sparc/kernel/psycho_common.c b/arch/sparc/kernel/psycho_common.c
index fe2af66bb198..8db48e808ed4 100644
--- a/arch/sparc/kernel/psycho_common.c
+++ b/arch/sparc/kernel/psycho_common.c
@@ -228,7 +228,7 @@ void psycho_check_iommu_error(struct pci_pbm_info *pbm,
228 default: 228 default:
229 type_str = "ECC Error"; 229 type_str = "ECC Error";
230 break; 230 break;
231 }; 231 }
232 printk(KERN_ERR "%s: IOMMU Error, type[%s]\n", 232 printk(KERN_ERR "%s: IOMMU Error, type[%s]\n",
233 pbm->name, type_str); 233 pbm->name, type_str);
234 234
diff --git a/arch/sparc/kernel/ptrace_32.c b/arch/sparc/kernel/ptrace_32.c
index e608f397e11f..27b9e93d0121 100644
--- a/arch/sparc/kernel/ptrace_32.c
+++ b/arch/sparc/kernel/ptrace_32.c
@@ -323,18 +323,35 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
323 return &user_sparc32_view; 323 return &user_sparc32_view;
324} 324}
325 325
326long arch_ptrace(struct task_struct *child, long request, long addr, long data) 326struct fps {
327 unsigned long regs[32];
328 unsigned long fsr;
329 unsigned long flags;
330 unsigned long extra;
331 unsigned long fpqd;
332 struct fq {
333 unsigned long *insnaddr;
334 unsigned long insn;
335 } fpq[16];
336};
337
338long arch_ptrace(struct task_struct *child, long request,
339 unsigned long addr, unsigned long data)
327{ 340{
328 unsigned long addr2 = current->thread.kregs->u_regs[UREG_I4]; 341 unsigned long addr2 = current->thread.kregs->u_regs[UREG_I4];
342 void __user *addr2p;
329 const struct user_regset_view *view; 343 const struct user_regset_view *view;
344 struct pt_regs __user *pregs;
345 struct fps __user *fps;
330 int ret; 346 int ret;
331 347
332 view = task_user_regset_view(current); 348 view = task_user_regset_view(current);
349 addr2p = (void __user *) addr2;
350 pregs = (struct pt_regs __user *) addr;
351 fps = (struct fps __user *) addr;
333 352
334 switch(request) { 353 switch(request) {
335 case PTRACE_GETREGS: { 354 case PTRACE_GETREGS: {
336 struct pt_regs __user *pregs = (struct pt_regs __user *) addr;
337
338 ret = copy_regset_to_user(child, view, REGSET_GENERAL, 355 ret = copy_regset_to_user(child, view, REGSET_GENERAL,
339 32 * sizeof(u32), 356 32 * sizeof(u32),
340 4 * sizeof(u32), 357 4 * sizeof(u32),
@@ -348,8 +365,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
348 } 365 }
349 366
350 case PTRACE_SETREGS: { 367 case PTRACE_SETREGS: {
351 struct pt_regs __user *pregs = (struct pt_regs __user *) addr;
352
353 ret = copy_regset_from_user(child, view, REGSET_GENERAL, 368 ret = copy_regset_from_user(child, view, REGSET_GENERAL,
354 32 * sizeof(u32), 369 32 * sizeof(u32),
355 4 * sizeof(u32), 370 4 * sizeof(u32),
@@ -363,19 +378,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
363 } 378 }
364 379
365 case PTRACE_GETFPREGS: { 380 case PTRACE_GETFPREGS: {
366 struct fps {
367 unsigned long regs[32];
368 unsigned long fsr;
369 unsigned long flags;
370 unsigned long extra;
371 unsigned long fpqd;
372 struct fq {
373 unsigned long *insnaddr;
374 unsigned long insn;
375 } fpq[16];
376 };
377 struct fps __user *fps = (struct fps __user *) addr;
378
379 ret = copy_regset_to_user(child, view, REGSET_FP, 381 ret = copy_regset_to_user(child, view, REGSET_FP,
380 0 * sizeof(u32), 382 0 * sizeof(u32),
381 32 * sizeof(u32), 383 32 * sizeof(u32),
@@ -397,19 +399,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
397 } 399 }
398 400
399 case PTRACE_SETFPREGS: { 401 case PTRACE_SETFPREGS: {
400 struct fps {
401 unsigned long regs[32];
402 unsigned long fsr;
403 unsigned long flags;
404 unsigned long extra;
405 unsigned long fpqd;
406 struct fq {
407 unsigned long *insnaddr;
408 unsigned long insn;
409 } fpq[16];
410 };
411 struct fps __user *fps = (struct fps __user *) addr;
412
413 ret = copy_regset_from_user(child, view, REGSET_FP, 402 ret = copy_regset_from_user(child, view, REGSET_FP,
414 0 * sizeof(u32), 403 0 * sizeof(u32),
415 32 * sizeof(u32), 404 32 * sizeof(u32),
@@ -424,8 +413,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
424 413
425 case PTRACE_READTEXT: 414 case PTRACE_READTEXT:
426 case PTRACE_READDATA: 415 case PTRACE_READDATA:
427 ret = ptrace_readdata(child, addr, 416 ret = ptrace_readdata(child, addr, addr2p, data);
428 (void __user *) addr2, data);
429 417
430 if (ret == data) 418 if (ret == data)
431 ret = 0; 419 ret = 0;
@@ -435,8 +423,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
435 423
436 case PTRACE_WRITETEXT: 424 case PTRACE_WRITETEXT:
437 case PTRACE_WRITEDATA: 425 case PTRACE_WRITEDATA:
438 ret = ptrace_writedata(child, (void __user *) addr2, 426 ret = ptrace_writedata(child, addr2p, addr, data);
439 addr, data);
440 427
441 if (ret == data) 428 if (ret == data)
442 ret = 0; 429 ret = 0;
diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
index aa90da08bf61..96ee50a80661 100644
--- a/arch/sparc/kernel/ptrace_64.c
+++ b/arch/sparc/kernel/ptrace_64.c
@@ -969,16 +969,19 @@ struct fps {
969 unsigned long fsr; 969 unsigned long fsr;
970}; 970};
971 971
972long arch_ptrace(struct task_struct *child, long request, long addr, long data) 972long arch_ptrace(struct task_struct *child, long request,
973 unsigned long addr, unsigned long data)
973{ 974{
974 const struct user_regset_view *view = task_user_regset_view(current); 975 const struct user_regset_view *view = task_user_regset_view(current);
975 unsigned long addr2 = task_pt_regs(current)->u_regs[UREG_I4]; 976 unsigned long addr2 = task_pt_regs(current)->u_regs[UREG_I4];
976 struct pt_regs __user *pregs; 977 struct pt_regs __user *pregs;
977 struct fps __user *fps; 978 struct fps __user *fps;
979 void __user *addr2p;
978 int ret; 980 int ret;
979 981
980 pregs = (struct pt_regs __user *) (unsigned long) addr; 982 pregs = (struct pt_regs __user *) addr;
981 fps = (struct fps __user *) (unsigned long) addr; 983 fps = (struct fps __user *) addr;
984 addr2p = (void __user *) addr2;
982 985
983 switch (request) { 986 switch (request) {
984 case PTRACE_PEEKUSR: 987 case PTRACE_PEEKUSR:
@@ -1029,8 +1032,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
1029 1032
1030 case PTRACE_READTEXT: 1033 case PTRACE_READTEXT:
1031 case PTRACE_READDATA: 1034 case PTRACE_READDATA:
1032 ret = ptrace_readdata(child, addr, 1035 ret = ptrace_readdata(child, addr, addr2p, data);
1033 (char __user *)addr2, data);
1034 if (ret == data) 1036 if (ret == data)
1035 ret = 0; 1037 ret = 0;
1036 else if (ret >= 0) 1038 else if (ret >= 0)
@@ -1039,8 +1041,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
1039 1041
1040 case PTRACE_WRITETEXT: 1042 case PTRACE_WRITETEXT:
1041 case PTRACE_WRITEDATA: 1043 case PTRACE_WRITEDATA:
1042 ret = ptrace_writedata(child, (char __user *) addr2, 1044 ret = ptrace_writedata(child, addr2p, addr, data);
1043 addr, data);
1044 if (ret == data) 1045 if (ret == data)
1045 ret = 0; 1046 ret = 0;
1046 else if (ret >= 0) 1047 else if (ret >= 0)
@@ -1085,6 +1086,7 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
1085 1086
1086asmlinkage void syscall_trace_leave(struct pt_regs *regs) 1087asmlinkage void syscall_trace_leave(struct pt_regs *regs)
1087{ 1088{
1089#ifdef CONFIG_AUDITSYSCALL
1088 if (unlikely(current->audit_context)) { 1090 if (unlikely(current->audit_context)) {
1089 unsigned long tstate = regs->tstate; 1091 unsigned long tstate = regs->tstate;
1090 int result = AUDITSC_SUCCESS; 1092 int result = AUDITSC_SUCCESS;
@@ -1094,7 +1096,7 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
1094 1096
1095 audit_syscall_exit(result, regs->u_regs[UREG_I0]); 1097 audit_syscall_exit(result, regs->u_regs[UREG_I0]);
1096 } 1098 }
1097 1099#endif
1098 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 1100 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1099 trace_sys_exit(regs, regs->u_regs[UREG_G1]); 1101 trace_sys_exit(regs, regs->u_regs[UREG_G1]);
1100 1102
diff --git a/arch/sparc/kernel/rtrap_32.S b/arch/sparc/kernel/rtrap_32.S
index 4da2e1f66290..5f5f74c2c2ca 100644
--- a/arch/sparc/kernel/rtrap_32.S
+++ b/arch/sparc/kernel/rtrap_32.S
@@ -78,9 +78,9 @@ signal_p:
78 call do_notify_resume 78 call do_notify_resume
79 add %sp, STACKFRAME_SZ, %o0 ! pt_regs ptr 79 add %sp, STACKFRAME_SZ, %o0 ! pt_regs ptr
80 80
81 /* Fall through. */ 81 b signal_p
82 ld [%sp + STACKFRAME_SZ + PT_PSR], %t_psr 82 ld [%curptr + TI_FLAGS], %g2
83 clr %l6 83
84ret_trap_continue: 84ret_trap_continue:
85 sethi %hi(PSR_SYSCALL), %g1 85 sethi %hi(PSR_SYSCALL), %g1
86 andn %t_psr, %g1, %t_psr 86 andn %t_psr, %g1, %t_psr
diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S
index 090b9e9ad5e3..77f1b95e0806 100644
--- a/arch/sparc/kernel/rtrap_64.S
+++ b/arch/sparc/kernel/rtrap_64.S
@@ -34,37 +34,9 @@ __handle_preemption:
34__handle_user_windows: 34__handle_user_windows:
35 call fault_in_user_windows 35 call fault_in_user_windows
36 wrpr %g0, RTRAP_PSTATE, %pstate 36 wrpr %g0, RTRAP_PSTATE, %pstate
37 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate 37 ba,pt %xcc, __handle_preemption_continue
38 /* Redo sched+sig checks */ 38 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
39 ldx [%g6 + TI_FLAGS], %l0
40 andcc %l0, _TIF_NEED_RESCHED, %g0
41
42 be,pt %xcc, 1f
43 nop
44 call schedule
45 wrpr %g0, RTRAP_PSTATE, %pstate
46 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
47 ldx [%g6 + TI_FLAGS], %l0
48
491: andcc %l0, _TIF_DO_NOTIFY_RESUME_MASK, %g0
50 be,pt %xcc, __handle_user_windows_continue
51 nop
52 mov %l5, %o1
53 add %sp, PTREGS_OFF, %o0
54 mov %l0, %o2
55
56 call do_notify_resume
57 wrpr %g0, RTRAP_PSTATE, %pstate
58 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
59 /* Signal delivery can modify pt_regs tstate, so we must
60 * reload it.
61 */
62 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
63 sethi %hi(0xf << 20), %l4
64 and %l1, %l4, %l4
65 ba,pt %xcc, __handle_user_windows_continue
66 39
67 andn %l1, %l4, %l1
68__handle_userfpu: 40__handle_userfpu:
69 rd %fprs, %l5 41 rd %fprs, %l5
70 andcc %l5, FPRS_FEF, %g0 42 andcc %l5, FPRS_FEF, %g0
@@ -87,7 +59,7 @@ __handle_signal:
87 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1 59 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
88 sethi %hi(0xf << 20), %l4 60 sethi %hi(0xf << 20), %l4
89 and %l1, %l4, %l4 61 and %l1, %l4, %l4
90 ba,pt %xcc, __handle_signal_continue 62 ba,pt %xcc, __handle_preemption_continue
91 andn %l1, %l4, %l1 63 andn %l1, %l4, %l1
92 64
93 /* When returning from a NMI (%pil==15) interrupt we want to 65 /* When returning from a NMI (%pil==15) interrupt we want to
@@ -177,11 +149,9 @@ __handle_preemption_continue:
177 bne,pn %xcc, __handle_preemption 149 bne,pn %xcc, __handle_preemption
178 andcc %l0, _TIF_DO_NOTIFY_RESUME_MASK, %g0 150 andcc %l0, _TIF_DO_NOTIFY_RESUME_MASK, %g0
179 bne,pn %xcc, __handle_signal 151 bne,pn %xcc, __handle_signal
180__handle_signal_continue:
181 ldub [%g6 + TI_WSAVED], %o2 152 ldub [%g6 + TI_WSAVED], %o2
182 brnz,pn %o2, __handle_user_windows 153 brnz,pn %o2, __handle_user_windows
183 nop 154 nop
184__handle_user_windows_continue:
185 sethi %hi(TSTATE_PEF), %o0 155 sethi %hi(TSTATE_PEF), %o0
186 andcc %l1, %o0, %g0 156 andcc %l1, %o0, %g0
187 157
diff --git a/arch/sparc/kernel/sbus.c b/arch/sparc/kernel/sbus.c
index 2ca32d13abcf..a161b9c77f05 100644
--- a/arch/sparc/kernel/sbus.c
+++ b/arch/sparc/kernel/sbus.c
@@ -97,7 +97,7 @@ void sbus_set_sbus64(struct device *dev, int bursts)
97 97
98 default: 98 default:
99 return; 99 return;
100 }; 100 }
101 101
102 val = upa_readq(cfg_reg); 102 val = upa_readq(cfg_reg);
103 if (val & (1UL << 14UL)) { 103 if (val & (1UL << 14UL)) {
@@ -244,7 +244,7 @@ static unsigned int sbus_build_irq(struct platform_device *op, unsigned int ino)
244 case 3: 244 case 3:
245 iclr = reg_base + SYSIO_ICLR_SLOT3; 245 iclr = reg_base + SYSIO_ICLR_SLOT3;
246 break; 246 break;
247 }; 247 }
248 248
249 iclr += ((unsigned long)sbus_level - 1UL) * 8UL; 249 iclr += ((unsigned long)sbus_level - 1UL) * 8UL;
250 } 250 }
diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c
index b22ce6100403..d26e1f6c717a 100644
--- a/arch/sparc/kernel/setup_32.c
+++ b/arch/sparc/kernel/setup_32.c
@@ -82,7 +82,7 @@ static void prom_sync_me(void)
82 "nop\n\t" : : "r" (&trapbase)); 82 "nop\n\t" : : "r" (&trapbase));
83 83
84 prom_printf("PROM SYNC COMMAND...\n"); 84 prom_printf("PROM SYNC COMMAND...\n");
85 show_free_areas(); 85 show_free_areas(0);
86 if(current->pid != 0) { 86 if(current->pid != 0) {
87 local_irq_enable(); 87 local_irq_enable();
88 sys_sync(); 88 sys_sync();
@@ -103,16 +103,20 @@ static unsigned int boot_flags __initdata = 0;
103/* Exported for mm/init.c:paging_init. */ 103/* Exported for mm/init.c:paging_init. */
104unsigned long cmdline_memory_size __initdata = 0; 104unsigned long cmdline_memory_size __initdata = 0;
105 105
106/* which CPU booted us (0xff = not set) */
107unsigned char boot_cpu_id = 0xff; /* 0xff will make it into DATA section... */
108unsigned char boot_cpu_id4; /* boot_cpu_id << 2 */
109
106static void 110static void
107prom_console_write(struct console *con, const char *s, unsigned n) 111prom_console_write(struct console *con, const char *s, unsigned n)
108{ 112{
109 prom_write(s, n); 113 prom_write(s, n);
110} 114}
111 115
112static struct console prom_debug_console = { 116static struct console prom_early_console = {
113 .name = "debug", 117 .name = "earlyprom",
114 .write = prom_console_write, 118 .write = prom_console_write,
115 .flags = CON_PRINTBUFFER, 119 .flags = CON_PRINTBUFFER | CON_BOOT,
116 .index = -1, 120 .index = -1,
117}; 121};
118 122
@@ -133,8 +137,7 @@ static void __init process_switch(char c)
133 prom_halt(); 137 prom_halt();
134 break; 138 break;
135 case 'p': 139 case 'p':
136 /* Use PROM debug console. */ 140 /* Just ignore, this behavior is now the default. */
137 register_console(&prom_debug_console);
138 break; 141 break;
139 default: 142 default:
140 printk("Unknown boot switch (-%c)\n", c); 143 printk("Unknown boot switch (-%c)\n", c);
@@ -184,8 +187,6 @@ static void __init boot_flags_init(char *commands)
184 */ 187 */
185 188
186extern void sun4c_probe_vac(void); 189extern void sun4c_probe_vac(void);
187extern char cputypval;
188extern unsigned long start, end;
189 190
190extern unsigned short root_flags; 191extern unsigned short root_flags;
191extern unsigned short root_dev; 192extern unsigned short root_dev;
@@ -210,30 +211,34 @@ void __init setup_arch(char **cmdline_p)
210 int i; 211 int i;
211 unsigned long highest_paddr; 212 unsigned long highest_paddr;
212 213
213 sparc_ttable = (struct tt_entry *) &start; 214 sparc_ttable = (struct tt_entry *) &trapbase;
214 215
215 /* Initialize PROM console and command line. */ 216 /* Initialize PROM console and command line. */
216 *cmdline_p = prom_getbootargs(); 217 *cmdline_p = prom_getbootargs();
217 strcpy(boot_command_line, *cmdline_p); 218 strcpy(boot_command_line, *cmdline_p);
218 parse_early_param(); 219 parse_early_param();
219 220
221 boot_flags_init(*cmdline_p);
222
223 register_console(&prom_early_console);
224
220 /* Set sparc_cpu_model */ 225 /* Set sparc_cpu_model */
221 sparc_cpu_model = sun_unknown; 226 sparc_cpu_model = sun_unknown;
222 if (!strcmp(&cputypval,"sun4 ")) 227 if (!strcmp(&cputypval[0], "sun4 "))
223 sparc_cpu_model = sun4; 228 sparc_cpu_model = sun4;
224 if (!strcmp(&cputypval,"sun4c")) 229 if (!strcmp(&cputypval[0], "sun4c"))
225 sparc_cpu_model = sun4c; 230 sparc_cpu_model = sun4c;
226 if (!strcmp(&cputypval,"sun4m")) 231 if (!strcmp(&cputypval[0], "sun4m"))
227 sparc_cpu_model = sun4m; 232 sparc_cpu_model = sun4m;
228 if (!strcmp(&cputypval,"sun4s")) 233 if (!strcmp(&cputypval[0], "sun4s"))
229 sparc_cpu_model = sun4m; /* CP-1200 with PROM 2.30 -E */ 234 sparc_cpu_model = sun4m; /* CP-1200 with PROM 2.30 -E */
230 if (!strcmp(&cputypval,"sun4d")) 235 if (!strcmp(&cputypval[0], "sun4d"))
231 sparc_cpu_model = sun4d; 236 sparc_cpu_model = sun4d;
232 if (!strcmp(&cputypval,"sun4e")) 237 if (!strcmp(&cputypval[0], "sun4e"))
233 sparc_cpu_model = sun4e; 238 sparc_cpu_model = sun4e;
234 if (!strcmp(&cputypval,"sun4u")) 239 if (!strcmp(&cputypval[0], "sun4u"))
235 sparc_cpu_model = sun4u; 240 sparc_cpu_model = sun4u;
236 if (!strncmp(&cputypval, "leon" , 4)) 241 if (!strncmp(&cputypval[0], "leon" , 4))
237 sparc_cpu_model = sparc_leon; 242 sparc_cpu_model = sparc_leon;
238 243
239 printk("ARCH: "); 244 printk("ARCH: ");
@@ -262,12 +267,11 @@ void __init setup_arch(char **cmdline_p)
262 default: 267 default:
263 printk("UNKNOWN!\n"); 268 printk("UNKNOWN!\n");
264 break; 269 break;
265 }; 270 }
266 271
267#ifdef CONFIG_DUMMY_CONSOLE 272#ifdef CONFIG_DUMMY_CONSOLE
268 conswitchp = &dummy_con; 273 conswitchp = &dummy_con;
269#endif 274#endif
270 boot_flags_init(*cmdline_p);
271 275
272 idprom_init(); 276 idprom_init();
273 if (ARCH_SUN4C) 277 if (ARCH_SUN4C)
@@ -313,75 +317,6 @@ void __init setup_arch(char **cmdline_p)
313 smp_setup_cpu_possible_map(); 317 smp_setup_cpu_possible_map();
314} 318}
315 319
316static int ncpus_probed;
317
318static int show_cpuinfo(struct seq_file *m, void *__unused)
319{
320 seq_printf(m,
321 "cpu\t\t: %s\n"
322 "fpu\t\t: %s\n"
323 "promlib\t\t: Version %d Revision %d\n"
324 "prom\t\t: %d.%d\n"
325 "type\t\t: %s\n"
326 "ncpus probed\t: %d\n"
327 "ncpus active\t: %d\n"
328#ifndef CONFIG_SMP
329 "CPU0Bogo\t: %lu.%02lu\n"
330 "CPU0ClkTck\t: %ld\n"
331#endif
332 ,
333 sparc_cpu_type,
334 sparc_fpu_type ,
335 romvec->pv_romvers,
336 prom_rev,
337 romvec->pv_printrev >> 16,
338 romvec->pv_printrev & 0xffff,
339 &cputypval,
340 ncpus_probed,
341 num_online_cpus()
342#ifndef CONFIG_SMP
343 , cpu_data(0).udelay_val/(500000/HZ),
344 (cpu_data(0).udelay_val/(5000/HZ)) % 100,
345 cpu_data(0).clock_tick
346#endif
347 );
348
349#ifdef CONFIG_SMP
350 smp_bogo(m);
351#endif
352 mmu_info(m);
353#ifdef CONFIG_SMP
354 smp_info(m);
355#endif
356 return 0;
357}
358
359static void *c_start(struct seq_file *m, loff_t *pos)
360{
361 /* The pointer we are returning is arbitrary,
362 * it just has to be non-NULL and not IS_ERR
363 * in the success case.
364 */
365 return *pos == 0 ? &c_start : NULL;
366}
367
368static void *c_next(struct seq_file *m, void *v, loff_t *pos)
369{
370 ++*pos;
371 return c_start(m, pos);
372}
373
374static void c_stop(struct seq_file *m, void *v)
375{
376}
377
378const struct seq_operations cpuinfo_op = {
379 .start =c_start,
380 .next = c_next,
381 .stop = c_stop,
382 .show = show_cpuinfo,
383};
384
385extern int stop_a_enabled; 320extern int stop_a_enabled;
386 321
387void sun_do_break(void) 322void sun_do_break(void)
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
index 5f72de67588b..c4dd0999da86 100644
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -209,7 +209,7 @@ void __init per_cpu_patch(void)
209 default: 209 default:
210 prom_printf("Unknown cpu type, halting.\n"); 210 prom_printf("Unknown cpu type, halting.\n");
211 prom_halt(); 211 prom_halt();
212 }; 212 }
213 213
214 *(unsigned int *) (addr + 0) = insns[0]; 214 *(unsigned int *) (addr + 0) = insns[0];
215 wmb(); 215 wmb();
@@ -315,7 +315,7 @@ void __init setup_arch(char **cmdline_p)
315 315
316#ifdef CONFIG_IP_PNP 316#ifdef CONFIG_IP_PNP
317 if (!ic_set_manually) { 317 if (!ic_set_manually) {
318 int chosen = prom_finddevice ("/chosen"); 318 phandle chosen = prom_finddevice("/chosen");
319 u32 cl, sv, gw; 319 u32 cl, sv, gw;
320 320
321 cl = prom_getintdefault (chosen, "client-ip", 0); 321 cl = prom_getintdefault (chosen, "client-ip", 0);
@@ -339,84 +339,6 @@ void __init setup_arch(char **cmdline_p)
339 paging_init(); 339 paging_init();
340} 340}
341 341
342/* BUFFER is PAGE_SIZE bytes long. */
343
344extern void smp_info(struct seq_file *);
345extern void smp_bogo(struct seq_file *);
346extern void mmu_info(struct seq_file *);
347
348unsigned int dcache_parity_tl1_occurred;
349unsigned int icache_parity_tl1_occurred;
350
351int ncpus_probed;
352
353static int show_cpuinfo(struct seq_file *m, void *__unused)
354{
355 seq_printf(m,
356 "cpu\t\t: %s\n"
357 "fpu\t\t: %s\n"
358 "pmu\t\t: %s\n"
359 "prom\t\t: %s\n"
360 "type\t\t: %s\n"
361 "ncpus probed\t: %d\n"
362 "ncpus active\t: %d\n"
363 "D$ parity tl1\t: %u\n"
364 "I$ parity tl1\t: %u\n"
365#ifndef CONFIG_SMP
366 "Cpu0ClkTck\t: %016lx\n"
367#endif
368 ,
369 sparc_cpu_type,
370 sparc_fpu_type,
371 sparc_pmu_type,
372 prom_version,
373 ((tlb_type == hypervisor) ?
374 "sun4v" :
375 "sun4u"),
376 ncpus_probed,
377 num_online_cpus(),
378 dcache_parity_tl1_occurred,
379 icache_parity_tl1_occurred
380#ifndef CONFIG_SMP
381 , cpu_data(0).clock_tick
382#endif
383 );
384#ifdef CONFIG_SMP
385 smp_bogo(m);
386#endif
387 mmu_info(m);
388#ifdef CONFIG_SMP
389 smp_info(m);
390#endif
391 return 0;
392}
393
394static void *c_start(struct seq_file *m, loff_t *pos)
395{
396 /* The pointer we are returning is arbitrary,
397 * it just has to be non-NULL and not IS_ERR
398 * in the success case.
399 */
400 return *pos == 0 ? &c_start : NULL;
401}
402
403static void *c_next(struct seq_file *m, void *v, loff_t *pos)
404{
405 ++*pos;
406 return c_start(m, pos);
407}
408
409static void c_stop(struct seq_file *m, void *v)
410{
411}
412
413const struct seq_operations cpuinfo_op = {
414 .start =c_start,
415 .next = c_next,
416 .stop = c_stop,
417 .show = show_cpuinfo,
418};
419
420extern int stop_a_enabled; 342extern int stop_a_enabled;
421 343
422void sun_do_break(void) 344void sun_do_break(void)
diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c
index 91c10fb70858..21b125341bf7 100644
--- a/arch/sparc/kernel/smp_32.c
+++ b/arch/sparc/kernel/smp_32.c
@@ -37,8 +37,6 @@
37#include "irq.h" 37#include "irq.h"
38 38
39volatile unsigned long cpu_callin_map[NR_CPUS] __cpuinitdata = {0,}; 39volatile unsigned long cpu_callin_map[NR_CPUS] __cpuinitdata = {0,};
40unsigned char boot_cpu_id = 0;
41unsigned char boot_cpu_id4 = 0; /* boot_cpu_id << 2 */
42 40
43cpumask_t smp_commenced_mask = CPU_MASK_NONE; 41cpumask_t smp_commenced_mask = CPU_MASK_NONE;
44 42
@@ -53,6 +51,7 @@ cpumask_t smp_commenced_mask = CPU_MASK_NONE;
53void __cpuinit smp_store_cpu_info(int id) 51void __cpuinit smp_store_cpu_info(int id)
54{ 52{
55 int cpu_node; 53 int cpu_node;
54 int mid;
56 55
57 cpu_data(id).udelay_val = loops_per_jiffy; 56 cpu_data(id).udelay_val = loops_per_jiffy;
58 57
@@ -60,10 +59,13 @@ void __cpuinit smp_store_cpu_info(int id)
60 cpu_data(id).clock_tick = prom_getintdefault(cpu_node, 59 cpu_data(id).clock_tick = prom_getintdefault(cpu_node,
61 "clock-frequency", 0); 60 "clock-frequency", 0);
62 cpu_data(id).prom_node = cpu_node; 61 cpu_data(id).prom_node = cpu_node;
63 cpu_data(id).mid = cpu_get_hwmid(cpu_node); 62 mid = cpu_get_hwmid(cpu_node);
64 63
65 if (cpu_data(id).mid < 0) 64 if (mid < 0) {
66 panic("No MID found for CPU%d at node 0x%08d", id, cpu_node); 65 printk(KERN_NOTICE "No MID found for CPU%d at node 0x%08d", id, cpu_node);
66 mid = 0;
67 }
68 cpu_data(id).mid = mid;
67} 69}
68 70
69void __init smp_cpus_done(unsigned int max_cpus) 71void __init smp_cpus_done(unsigned int max_cpus)
@@ -112,7 +114,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
112 printk("UNKNOWN!\n"); 114 printk("UNKNOWN!\n");
113 BUG(); 115 BUG();
114 break; 116 break;
115 }; 117 }
116} 118}
117 119
118void cpu_panic(void) 120void cpu_panic(void)
@@ -125,13 +127,58 @@ struct linux_prom_registers smp_penguin_ctable __cpuinitdata = { 0 };
125 127
126void smp_send_reschedule(int cpu) 128void smp_send_reschedule(int cpu)
127{ 129{
128 /* See sparc64 */ 130 /*
131 * CPU model dependent way of implementing IPI generation targeting
132 * a single CPU. The trap handler needs only to do trap entry/return
133 * to call schedule.
134 */
135 BTFIXUP_CALL(smp_ipi_resched)(cpu);
129} 136}
130 137
131void smp_send_stop(void) 138void smp_send_stop(void)
132{ 139{
133} 140}
134 141
142void arch_send_call_function_single_ipi(int cpu)
143{
144 /* trigger one IPI single call on one CPU */
145 BTFIXUP_CALL(smp_ipi_single)(cpu);
146}
147
148void arch_send_call_function_ipi_mask(const struct cpumask *mask)
149{
150 int cpu;
151
152 /* trigger IPI mask call on each CPU */
153 for_each_cpu(cpu, mask)
154 BTFIXUP_CALL(smp_ipi_mask_one)(cpu);
155}
156
157void smp_resched_interrupt(void)
158{
159 irq_enter();
160 scheduler_ipi();
161 local_cpu_data().irq_resched_count++;
162 irq_exit();
163 /* re-schedule routine called by interrupt return code. */
164}
165
166void smp_call_function_single_interrupt(void)
167{
168 irq_enter();
169 generic_smp_call_function_single_interrupt();
170 local_cpu_data().irq_call_count++;
171 irq_exit();
172}
173
174void smp_call_function_interrupt(void)
175{
176 irq_enter();
177 generic_smp_call_function_interrupt();
178 local_cpu_data().irq_call_count++;
179 irq_exit();
180}
181
135void smp_flush_cache_all(void) 182void smp_flush_cache_all(void)
136{ 183{
137 xc0((smpfunc_t) BTFIXUP_CALL(local_flush_cache_all)); 184 xc0((smpfunc_t) BTFIXUP_CALL(local_flush_cache_all));
@@ -147,9 +194,10 @@ void smp_flush_tlb_all(void)
147void smp_flush_cache_mm(struct mm_struct *mm) 194void smp_flush_cache_mm(struct mm_struct *mm)
148{ 195{
149 if(mm->context != NO_CONTEXT) { 196 if(mm->context != NO_CONTEXT) {
150 cpumask_t cpu_mask = *mm_cpumask(mm); 197 cpumask_t cpu_mask;
151 cpu_clear(smp_processor_id(), cpu_mask); 198 cpumask_copy(&cpu_mask, mm_cpumask(mm));
152 if (!cpus_empty(cpu_mask)) 199 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
200 if (!cpumask_empty(&cpu_mask))
153 xc1((smpfunc_t) BTFIXUP_CALL(local_flush_cache_mm), (unsigned long) mm); 201 xc1((smpfunc_t) BTFIXUP_CALL(local_flush_cache_mm), (unsigned long) mm);
154 local_flush_cache_mm(mm); 202 local_flush_cache_mm(mm);
155 } 203 }
@@ -158,9 +206,10 @@ void smp_flush_cache_mm(struct mm_struct *mm)
158void smp_flush_tlb_mm(struct mm_struct *mm) 206void smp_flush_tlb_mm(struct mm_struct *mm)
159{ 207{
160 if(mm->context != NO_CONTEXT) { 208 if(mm->context != NO_CONTEXT) {
161 cpumask_t cpu_mask = *mm_cpumask(mm); 209 cpumask_t cpu_mask;
162 cpu_clear(smp_processor_id(), cpu_mask); 210 cpumask_copy(&cpu_mask, mm_cpumask(mm));
163 if (!cpus_empty(cpu_mask)) { 211 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
212 if (!cpumask_empty(&cpu_mask)) {
164 xc1((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_mm), (unsigned long) mm); 213 xc1((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_mm), (unsigned long) mm);
165 if(atomic_read(&mm->mm_users) == 1 && current->active_mm == mm) 214 if(atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
166 cpumask_copy(mm_cpumask(mm), 215 cpumask_copy(mm_cpumask(mm),
@@ -176,9 +225,10 @@ void smp_flush_cache_range(struct vm_area_struct *vma, unsigned long start,
176 struct mm_struct *mm = vma->vm_mm; 225 struct mm_struct *mm = vma->vm_mm;
177 226
178 if (mm->context != NO_CONTEXT) { 227 if (mm->context != NO_CONTEXT) {
179 cpumask_t cpu_mask = *mm_cpumask(mm); 228 cpumask_t cpu_mask;
180 cpu_clear(smp_processor_id(), cpu_mask); 229 cpumask_copy(&cpu_mask, mm_cpumask(mm));
181 if (!cpus_empty(cpu_mask)) 230 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
231 if (!cpumask_empty(&cpu_mask))
182 xc3((smpfunc_t) BTFIXUP_CALL(local_flush_cache_range), (unsigned long) vma, start, end); 232 xc3((smpfunc_t) BTFIXUP_CALL(local_flush_cache_range), (unsigned long) vma, start, end);
183 local_flush_cache_range(vma, start, end); 233 local_flush_cache_range(vma, start, end);
184 } 234 }
@@ -190,9 +240,10 @@ void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
190 struct mm_struct *mm = vma->vm_mm; 240 struct mm_struct *mm = vma->vm_mm;
191 241
192 if (mm->context != NO_CONTEXT) { 242 if (mm->context != NO_CONTEXT) {
193 cpumask_t cpu_mask = *mm_cpumask(mm); 243 cpumask_t cpu_mask;
194 cpu_clear(smp_processor_id(), cpu_mask); 244 cpumask_copy(&cpu_mask, mm_cpumask(mm));
195 if (!cpus_empty(cpu_mask)) 245 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
246 if (!cpumask_empty(&cpu_mask))
196 xc3((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_range), (unsigned long) vma, start, end); 247 xc3((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_range), (unsigned long) vma, start, end);
197 local_flush_tlb_range(vma, start, end); 248 local_flush_tlb_range(vma, start, end);
198 } 249 }
@@ -203,9 +254,10 @@ void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
203 struct mm_struct *mm = vma->vm_mm; 254 struct mm_struct *mm = vma->vm_mm;
204 255
205 if(mm->context != NO_CONTEXT) { 256 if(mm->context != NO_CONTEXT) {
206 cpumask_t cpu_mask = *mm_cpumask(mm); 257 cpumask_t cpu_mask;
207 cpu_clear(smp_processor_id(), cpu_mask); 258 cpumask_copy(&cpu_mask, mm_cpumask(mm));
208 if (!cpus_empty(cpu_mask)) 259 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
260 if (!cpumask_empty(&cpu_mask))
209 xc2((smpfunc_t) BTFIXUP_CALL(local_flush_cache_page), (unsigned long) vma, page); 261 xc2((smpfunc_t) BTFIXUP_CALL(local_flush_cache_page), (unsigned long) vma, page);
210 local_flush_cache_page(vma, page); 262 local_flush_cache_page(vma, page);
211 } 263 }
@@ -216,19 +268,15 @@ void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
216 struct mm_struct *mm = vma->vm_mm; 268 struct mm_struct *mm = vma->vm_mm;
217 269
218 if(mm->context != NO_CONTEXT) { 270 if(mm->context != NO_CONTEXT) {
219 cpumask_t cpu_mask = *mm_cpumask(mm); 271 cpumask_t cpu_mask;
220 cpu_clear(smp_processor_id(), cpu_mask); 272 cpumask_copy(&cpu_mask, mm_cpumask(mm));
221 if (!cpus_empty(cpu_mask)) 273 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
274 if (!cpumask_empty(&cpu_mask))
222 xc2((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_page), (unsigned long) vma, page); 275 xc2((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_page), (unsigned long) vma, page);
223 local_flush_tlb_page(vma, page); 276 local_flush_tlb_page(vma, page);
224 } 277 }
225} 278}
226 279
227void smp_reschedule_irq(void)
228{
229 set_need_resched();
230}
231
232void smp_flush_page_to_ram(unsigned long page) 280void smp_flush_page_to_ram(unsigned long page)
233{ 281{
234 /* Current theory is that those who call this are the one's 282 /* Current theory is that those who call this are the one's
@@ -245,9 +293,10 @@ void smp_flush_page_to_ram(unsigned long page)
245 293
246void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr) 294void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
247{ 295{
248 cpumask_t cpu_mask = *mm_cpumask(mm); 296 cpumask_t cpu_mask;
249 cpu_clear(smp_processor_id(), cpu_mask); 297 cpumask_copy(&cpu_mask, mm_cpumask(mm));
250 if (!cpus_empty(cpu_mask)) 298 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
299 if (!cpumask_empty(&cpu_mask))
251 xc2((smpfunc_t) BTFIXUP_CALL(local_flush_sig_insns), (unsigned long) mm, insn_addr); 300 xc2((smpfunc_t) BTFIXUP_CALL(local_flush_sig_insns), (unsigned long) mm, insn_addr);
252 local_flush_sig_insns(mm, insn_addr); 301 local_flush_sig_insns(mm, insn_addr);
253} 302}
@@ -325,7 +374,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
325 printk("UNKNOWN!\n"); 374 printk("UNKNOWN!\n");
326 BUG(); 375 BUG();
327 break; 376 break;
328 }; 377 }
329} 378}
330 379
331/* Set this up early so that things like the scheduler can init 380/* Set this up early so that things like the scheduler can init
@@ -398,10 +447,10 @@ int __cpuinit __cpu_up(unsigned int cpu)
398 printk("UNKNOWN!\n"); 447 printk("UNKNOWN!\n");
399 BUG(); 448 BUG();
400 break; 449 break;
401 }; 450 }
402 451
403 if (!ret) { 452 if (!ret) {
404 cpu_set(cpu, smp_commenced_mask); 453 cpumask_set_cpu(cpu, &smp_commenced_mask);
405 while (!cpu_online(cpu)) 454 while (!cpu_online(cpu))
406 mb(); 455 mb();
407 } 456 }
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index b6a2b8f47040..99cb17251bb5 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -49,6 +49,7 @@
49#include <asm/mdesc.h> 49#include <asm/mdesc.h>
50#include <asm/ldc.h> 50#include <asm/ldc.h>
51#include <asm/hypervisor.h> 51#include <asm/hypervisor.h>
52#include <asm/pcr.h>
52 53
53#include "cpumap.h" 54#include "cpumap.h"
54 55
@@ -120,11 +121,11 @@ void __cpuinit smp_callin(void)
120 /* inform the notifiers about the new cpu */ 121 /* inform the notifiers about the new cpu */
121 notify_cpu_starting(cpuid); 122 notify_cpu_starting(cpuid);
122 123
123 while (!cpu_isset(cpuid, smp_commenced_mask)) 124 while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
124 rmb(); 125 rmb();
125 126
126 ipi_call_lock_irq(); 127 ipi_call_lock_irq();
127 cpu_set(cpuid, cpu_online_map); 128 set_cpu_online(cpuid, true);
128 ipi_call_unlock_irq(); 129 ipi_call_unlock_irq();
129 130
130 /* idle thread is expected to have preempt disabled */ 131 /* idle thread is expected to have preempt disabled */
@@ -188,7 +189,7 @@ static inline long get_delta (long *rt, long *master)
188void smp_synchronize_tick_client(void) 189void smp_synchronize_tick_client(void)
189{ 190{
190 long i, delta, adj, adjust_latency = 0, done = 0; 191 long i, delta, adj, adjust_latency = 0, done = 0;
191 unsigned long flags, rt, master_time_stamp, bound; 192 unsigned long flags, rt, master_time_stamp;
192#if DEBUG_TICK_SYNC 193#if DEBUG_TICK_SYNC
193 struct { 194 struct {
194 long rt; /* roundtrip time */ 195 long rt; /* roundtrip time */
@@ -207,10 +208,8 @@ void smp_synchronize_tick_client(void)
207 { 208 {
208 for (i = 0; i < NUM_ROUNDS; i++) { 209 for (i = 0; i < NUM_ROUNDS; i++) {
209 delta = get_delta(&rt, &master_time_stamp); 210 delta = get_delta(&rt, &master_time_stamp);
210 if (delta == 0) { 211 if (delta == 0)
211 done = 1; /* let's lock on to this... */ 212 done = 1; /* let's lock on to this... */
212 bound = rt;
213 }
214 213
215 if (!done) { 214 if (!done) {
216 if (i > 0) { 215 if (i > 0) {
@@ -786,7 +785,7 @@ static void xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask
786 785
787/* Send cross call to all processors mentioned in MASK_P 786/* Send cross call to all processors mentioned in MASK_P
788 * except self. Really, there are only two cases currently, 787 * except self. Really, there are only two cases currently,
789 * "&cpu_online_map" and "&mm->cpu_vm_mask". 788 * "cpu_online_mask" and "mm_cpumask(mm)".
790 */ 789 */
791static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, const cpumask_t *mask) 790static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, const cpumask_t *mask)
792{ 791{
@@ -798,7 +797,7 @@ static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 d
798/* Send cross call to all processors except self. */ 797/* Send cross call to all processors except self. */
799static void smp_cross_call(unsigned long *func, u32 ctx, u64 data1, u64 data2) 798static void smp_cross_call(unsigned long *func, u32 ctx, u64 data1, u64 data2)
800{ 799{
801 smp_cross_call_masked(func, ctx, data1, data2, &cpu_online_map); 800 smp_cross_call_masked(func, ctx, data1, data2, cpu_online_mask);
802} 801}
803 802
804extern unsigned long xcall_sync_tick; 803extern unsigned long xcall_sync_tick;
@@ -806,7 +805,7 @@ extern unsigned long xcall_sync_tick;
806static void smp_start_sync_tick_client(int cpu) 805static void smp_start_sync_tick_client(int cpu)
807{ 806{
808 xcall_deliver((u64) &xcall_sync_tick, 0, 0, 807 xcall_deliver((u64) &xcall_sync_tick, 0, 0,
809 &cpumask_of_cpu(cpu)); 808 cpumask_of(cpu));
810} 809}
811 810
812extern unsigned long xcall_call_function; 811extern unsigned long xcall_call_function;
@@ -821,7 +820,7 @@ extern unsigned long xcall_call_function_single;
821void arch_send_call_function_single_ipi(int cpu) 820void arch_send_call_function_single_ipi(int cpu)
822{ 821{
823 xcall_deliver((u64) &xcall_call_function_single, 0, 0, 822 xcall_deliver((u64) &xcall_call_function_single, 0, 0,
824 &cpumask_of_cpu(cpu)); 823 cpumask_of(cpu));
825} 824}
826 825
827void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs) 826void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs)
@@ -919,7 +918,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
919 } 918 }
920 if (data0) { 919 if (data0) {
921 xcall_deliver(data0, __pa(pg_addr), 920 xcall_deliver(data0, __pa(pg_addr),
922 (u64) pg_addr, &cpumask_of_cpu(cpu)); 921 (u64) pg_addr, cpumask_of(cpu));
923#ifdef CONFIG_DEBUG_DCFLUSH 922#ifdef CONFIG_DEBUG_DCFLUSH
924 atomic_inc(&dcpage_flushes_xcall); 923 atomic_inc(&dcpage_flushes_xcall);
925#endif 924#endif
@@ -932,13 +931,12 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
932void flush_dcache_page_all(struct mm_struct *mm, struct page *page) 931void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
933{ 932{
934 void *pg_addr; 933 void *pg_addr;
935 int this_cpu;
936 u64 data0; 934 u64 data0;
937 935
938 if (tlb_type == hypervisor) 936 if (tlb_type == hypervisor)
939 return; 937 return;
940 938
941 this_cpu = get_cpu(); 939 preempt_disable();
942 940
943#ifdef CONFIG_DEBUG_DCFLUSH 941#ifdef CONFIG_DEBUG_DCFLUSH
944 atomic_inc(&dcpage_flushes); 942 atomic_inc(&dcpage_flushes);
@@ -956,14 +954,14 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
956 } 954 }
957 if (data0) { 955 if (data0) {
958 xcall_deliver(data0, __pa(pg_addr), 956 xcall_deliver(data0, __pa(pg_addr),
959 (u64) pg_addr, &cpu_online_map); 957 (u64) pg_addr, cpu_online_mask);
960#ifdef CONFIG_DEBUG_DCFLUSH 958#ifdef CONFIG_DEBUG_DCFLUSH
961 atomic_inc(&dcpage_flushes_xcall); 959 atomic_inc(&dcpage_flushes_xcall);
962#endif 960#endif
963 } 961 }
964 __local_flush_dcache_page(page); 962 __local_flush_dcache_page(page);
965 963
966 put_cpu(); 964 preempt_enable();
967} 965}
968 966
969void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs) 967void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
@@ -1199,32 +1197,32 @@ void __devinit smp_fill_in_sib_core_maps(void)
1199 for_each_present_cpu(i) { 1197 for_each_present_cpu(i) {
1200 unsigned int j; 1198 unsigned int j;
1201 1199
1202 cpus_clear(cpu_core_map[i]); 1200 cpumask_clear(&cpu_core_map[i]);
1203 if (cpu_data(i).core_id == 0) { 1201 if (cpu_data(i).core_id == 0) {
1204 cpu_set(i, cpu_core_map[i]); 1202 cpumask_set_cpu(i, &cpu_core_map[i]);
1205 continue; 1203 continue;
1206 } 1204 }
1207 1205
1208 for_each_present_cpu(j) { 1206 for_each_present_cpu(j) {
1209 if (cpu_data(i).core_id == 1207 if (cpu_data(i).core_id ==
1210 cpu_data(j).core_id) 1208 cpu_data(j).core_id)
1211 cpu_set(j, cpu_core_map[i]); 1209 cpumask_set_cpu(j, &cpu_core_map[i]);
1212 } 1210 }
1213 } 1211 }
1214 1212
1215 for_each_present_cpu(i) { 1213 for_each_present_cpu(i) {
1216 unsigned int j; 1214 unsigned int j;
1217 1215
1218 cpus_clear(per_cpu(cpu_sibling_map, i)); 1216 cpumask_clear(&per_cpu(cpu_sibling_map, i));
1219 if (cpu_data(i).proc_id == -1) { 1217 if (cpu_data(i).proc_id == -1) {
1220 cpu_set(i, per_cpu(cpu_sibling_map, i)); 1218 cpumask_set_cpu(i, &per_cpu(cpu_sibling_map, i));
1221 continue; 1219 continue;
1222 } 1220 }
1223 1221
1224 for_each_present_cpu(j) { 1222 for_each_present_cpu(j) {
1225 if (cpu_data(i).proc_id == 1223 if (cpu_data(i).proc_id ==
1226 cpu_data(j).proc_id) 1224 cpu_data(j).proc_id)
1227 cpu_set(j, per_cpu(cpu_sibling_map, i)); 1225 cpumask_set_cpu(j, &per_cpu(cpu_sibling_map, i));
1228 } 1226 }
1229 } 1227 }
1230} 1228}
@@ -1234,10 +1232,10 @@ int __cpuinit __cpu_up(unsigned int cpu)
1234 int ret = smp_boot_one_cpu(cpu); 1232 int ret = smp_boot_one_cpu(cpu);
1235 1233
1236 if (!ret) { 1234 if (!ret) {
1237 cpu_set(cpu, smp_commenced_mask); 1235 cpumask_set_cpu(cpu, &smp_commenced_mask);
1238 while (!cpu_isset(cpu, cpu_online_map)) 1236 while (!cpu_online(cpu))
1239 mb(); 1237 mb();
1240 if (!cpu_isset(cpu, cpu_online_map)) { 1238 if (!cpu_online(cpu)) {
1241 ret = -ENODEV; 1239 ret = -ENODEV;
1242 } else { 1240 } else {
1243 /* On SUN4V, writes to %tick and %stick are 1241 /* On SUN4V, writes to %tick and %stick are
@@ -1271,7 +1269,7 @@ void cpu_play_dead(void)
1271 tb->nonresum_mondo_pa, 0); 1269 tb->nonresum_mondo_pa, 0);
1272 } 1270 }
1273 1271
1274 cpu_clear(cpu, smp_commenced_mask); 1272 cpumask_clear_cpu(cpu, &smp_commenced_mask);
1275 membar_safe("#Sync"); 1273 membar_safe("#Sync");
1276 1274
1277 local_irq_disable(); 1275 local_irq_disable();
@@ -1292,13 +1290,13 @@ int __cpu_disable(void)
1292 cpuinfo_sparc *c; 1290 cpuinfo_sparc *c;
1293 int i; 1291 int i;
1294 1292
1295 for_each_cpu_mask(i, cpu_core_map[cpu]) 1293 for_each_cpu(i, &cpu_core_map[cpu])
1296 cpu_clear(cpu, cpu_core_map[i]); 1294 cpumask_clear_cpu(cpu, &cpu_core_map[i]);
1297 cpus_clear(cpu_core_map[cpu]); 1295 cpumask_clear(&cpu_core_map[cpu]);
1298 1296
1299 for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu)) 1297 for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu))
1300 cpu_clear(cpu, per_cpu(cpu_sibling_map, i)); 1298 cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i));
1301 cpus_clear(per_cpu(cpu_sibling_map, cpu)); 1299 cpumask_clear(&per_cpu(cpu_sibling_map, cpu));
1302 1300
1303 c = &cpu_data(cpu); 1301 c = &cpu_data(cpu);
1304 1302
@@ -1315,7 +1313,7 @@ int __cpu_disable(void)
1315 local_irq_disable(); 1313 local_irq_disable();
1316 1314
1317 ipi_call_lock(); 1315 ipi_call_lock();
1318 cpu_clear(cpu, cpu_online_map); 1316 set_cpu_online(cpu, false);
1319 ipi_call_unlock(); 1317 ipi_call_unlock();
1320 1318
1321 cpu_map_rebuild(); 1319 cpu_map_rebuild();
@@ -1329,11 +1327,11 @@ void __cpu_die(unsigned int cpu)
1329 1327
1330 for (i = 0; i < 100; i++) { 1328 for (i = 0; i < 100; i++) {
1331 smp_rmb(); 1329 smp_rmb();
1332 if (!cpu_isset(cpu, smp_commenced_mask)) 1330 if (!cpumask_test_cpu(cpu, &smp_commenced_mask))
1333 break; 1331 break;
1334 msleep(100); 1332 msleep(100);
1335 } 1333 }
1336 if (cpu_isset(cpu, smp_commenced_mask)) { 1334 if (cpumask_test_cpu(cpu, &smp_commenced_mask)) {
1337 printk(KERN_ERR "CPU %u didn't die...\n", cpu); 1335 printk(KERN_ERR "CPU %u didn't die...\n", cpu);
1338 } else { 1336 } else {
1339#if defined(CONFIG_SUN_LDOMS) 1337#if defined(CONFIG_SUN_LDOMS)
@@ -1343,7 +1341,7 @@ void __cpu_die(unsigned int cpu)
1343 do { 1341 do {
1344 hv_err = sun4v_cpu_stop(cpu); 1342 hv_err = sun4v_cpu_stop(cpu);
1345 if (hv_err == HV_EOK) { 1343 if (hv_err == HV_EOK) {
1346 cpu_clear(cpu, cpu_present_map); 1344 set_cpu_present(cpu, false);
1347 break; 1345 break;
1348 } 1346 }
1349 } while (--limit > 0); 1347 } while (--limit > 0);
@@ -1358,17 +1356,19 @@ void __cpu_die(unsigned int cpu)
1358 1356
1359void __init smp_cpus_done(unsigned int max_cpus) 1357void __init smp_cpus_done(unsigned int max_cpus)
1360{ 1358{
1359 pcr_arch_init();
1361} 1360}
1362 1361
1363void smp_send_reschedule(int cpu) 1362void smp_send_reschedule(int cpu)
1364{ 1363{
1365 xcall_deliver((u64) &xcall_receive_signal, 0, 0, 1364 xcall_deliver((u64) &xcall_receive_signal, 0, 0,
1366 &cpumask_of_cpu(cpu)); 1365 cpumask_of(cpu));
1367} 1366}
1368 1367
1369void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs) 1368void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs)
1370{ 1369{
1371 clear_softint(1 << irq); 1370 clear_softint(1 << irq);
1371 scheduler_ipi();
1372} 1372}
1373 1373
1374/* This is a nop because we capture all other cpus 1374/* This is a nop because we capture all other cpus
diff --git a/arch/sparc/kernel/starfire.c b/arch/sparc/kernel/starfire.c
index 060d0f3a6151..82281a566bb8 100644
--- a/arch/sparc/kernel/starfire.c
+++ b/arch/sparc/kernel/starfire.c
@@ -23,8 +23,8 @@ int this_is_starfire = 0;
23 23
24void check_if_starfire(void) 24void check_if_starfire(void)
25{ 25{
26 int ssnode = prom_finddevice("/ssp-serial"); 26 phandle ssnode = prom_finddevice("/ssp-serial");
27 if (ssnode != 0 && ssnode != -1) 27 if (ssnode != 0 && (s32)ssnode != -1)
28 this_is_starfire = 1; 28 this_is_starfire = 1;
29} 29}
30 30
diff --git a/arch/sparc/kernel/sun4c_irq.c b/arch/sparc/kernel/sun4c_irq.c
index 892fb884910a..f6bf25a2ff80 100644
--- a/arch/sparc/kernel/sun4c_irq.c
+++ b/arch/sparc/kernel/sun4c_irq.c
@@ -1,5 +1,5 @@
1/* sun4c_irq.c 1/*
2 * arch/sparc/kernel/sun4c_irq.c: 2 * sun4c irq support
3 * 3 *
4 * djhr: Hacked out of irq.c into a CPU dependent version. 4 * djhr: Hacked out of irq.c into a CPU dependent version.
5 * 5 *
@@ -9,31 +9,41 @@
9 * Copyright (C) 1996 Dave Redman (djhr@tadpole.co.uk) 9 * Copyright (C) 1996 Dave Redman (djhr@tadpole.co.uk)
10 */ 10 */
11 11
12#include <linux/errno.h>
13#include <linux/linkage.h>
14#include <linux/kernel_stat.h>
15#include <linux/signal.h>
16#include <linux/sched.h>
17#include <linux/ptrace.h>
18#include <linux/interrupt.h>
19#include <linux/init.h> 12#include <linux/init.h>
20#include <linux/of.h>
21#include <linux/of_device.h>
22#include "irq.h"
23 13
24#include <asm/ptrace.h>
25#include <asm/processor.h>
26#include <asm/system.h>
27#include <asm/psr.h>
28#include <asm/vaddrs.h>
29#include <asm/timer.h>
30#include <asm/openprom.h>
31#include <asm/oplib.h> 14#include <asm/oplib.h>
32#include <asm/traps.h> 15#include <asm/timer.h>
33#include <asm/irq.h> 16#include <asm/irq.h>
34#include <asm/io.h> 17#include <asm/io.h>
35#include <asm/idprom.h> 18
36#include <asm/machines.h> 19#include "irq.h"
20
21/* Sun4c interrupts are typically laid out as follows:
22 *
23 * 1 - Software interrupt, SBUS level 1
24 * 2 - SBUS level 2
25 * 3 - ESP SCSI, SBUS level 3
26 * 4 - Software interrupt
27 * 5 - Lance ethernet, SBUS level 4
28 * 6 - Software interrupt
29 * 7 - Graphics card, SBUS level 5
30 * 8 - SBUS level 6
31 * 9 - SBUS level 7
32 * 10 - Counter timer
33 * 11 - Floppy
34 * 12 - Zilog uart
35 * 13 - CS4231 audio
36 * 14 - Profiling timer
37 * 15 - NMI
38 *
39 * The interrupt enable bits in the interrupt mask register are
40 * really only used to enable/disable the timer interrupts, and
41 * for signalling software interrupts. There is also a master
42 * interrupt enable bit in this register.
43 *
44 * Interrupts are enabled by setting the SUN4C_INT_* bits, they
45 * are disabled by clearing those bits.
46 */
37 47
38/* 48/*
39 * Bit field defines for the interrupt registers on various 49 * Bit field defines for the interrupt registers on various
@@ -49,73 +59,100 @@
49#define SUN4C_INT_E4 0x04 /* Enable level 4 IRQ. */ 59#define SUN4C_INT_E4 0x04 /* Enable level 4 IRQ. */
50#define SUN4C_INT_E1 0x02 /* Enable level 1 IRQ. */ 60#define SUN4C_INT_E1 0x02 /* Enable level 1 IRQ. */
51 61
52/* Pointer to the interrupt enable byte 62/*
53 * 63 * Pointer to the interrupt enable byte
54 * Dave Redman (djhr@tadpole.co.uk) 64 * Used by entry.S
55 * What you may not be aware of is that entry.S requires this variable.
56 *
57 * --- linux_trap_nmi_sun4c --
58 *
59 * so don't go making it static, like I tried. sigh.
60 */ 65 */
61unsigned char __iomem *interrupt_enable = NULL; 66unsigned char __iomem *interrupt_enable;
62 67
63static void sun4c_disable_irq(unsigned int irq_nr) 68static void sun4c_mask_irq(struct irq_data *data)
64{ 69{
65 unsigned long flags; 70 unsigned long mask = (unsigned long)data->chip_data;
66 unsigned char current_mask, new_mask; 71
67 72 if (mask) {
68 local_irq_save(flags); 73 unsigned long flags;
69 irq_nr &= (NR_IRQS - 1); 74
70 current_mask = sbus_readb(interrupt_enable); 75 local_irq_save(flags);
71 switch(irq_nr) { 76 mask = sbus_readb(interrupt_enable) & ~mask;
72 case 1: 77 sbus_writeb(mask, interrupt_enable);
73 new_mask = ((current_mask) & (~(SUN4C_INT_E1)));
74 break;
75 case 8:
76 new_mask = ((current_mask) & (~(SUN4C_INT_E8)));
77 break;
78 case 10:
79 new_mask = ((current_mask) & (~(SUN4C_INT_E10)));
80 break;
81 case 14:
82 new_mask = ((current_mask) & (~(SUN4C_INT_E14)));
83 break;
84 default:
85 local_irq_restore(flags); 78 local_irq_restore(flags);
86 return;
87 } 79 }
88 sbus_writeb(new_mask, interrupt_enable);
89 local_irq_restore(flags);
90} 80}
91 81
92static void sun4c_enable_irq(unsigned int irq_nr) 82static void sun4c_unmask_irq(struct irq_data *data)
93{ 83{
94 unsigned long flags; 84 unsigned long mask = (unsigned long)data->chip_data;
95 unsigned char current_mask, new_mask; 85
96 86 if (mask) {
97 local_irq_save(flags); 87 unsigned long flags;
98 irq_nr &= (NR_IRQS - 1); 88
99 current_mask = sbus_readb(interrupt_enable); 89 local_irq_save(flags);
100 switch(irq_nr) { 90 mask = sbus_readb(interrupt_enable) | mask;
101 case 1: 91 sbus_writeb(mask, interrupt_enable);
102 new_mask = ((current_mask) | SUN4C_INT_E1);
103 break;
104 case 8:
105 new_mask = ((current_mask) | SUN4C_INT_E8);
106 break;
107 case 10:
108 new_mask = ((current_mask) | SUN4C_INT_E10);
109 break;
110 case 14:
111 new_mask = ((current_mask) | SUN4C_INT_E14);
112 break;
113 default:
114 local_irq_restore(flags); 92 local_irq_restore(flags);
115 return;
116 } 93 }
117 sbus_writeb(new_mask, interrupt_enable); 94}
118 local_irq_restore(flags); 95
96static unsigned int sun4c_startup_irq(struct irq_data *data)
97{
98 irq_link(data->irq);
99 sun4c_unmask_irq(data);
100
101 return 0;
102}
103
104static void sun4c_shutdown_irq(struct irq_data *data)
105{
106 sun4c_mask_irq(data);
107 irq_unlink(data->irq);
108}
109
110static struct irq_chip sun4c_irq = {
111 .name = "sun4c",
112 .irq_startup = sun4c_startup_irq,
113 .irq_shutdown = sun4c_shutdown_irq,
114 .irq_mask = sun4c_mask_irq,
115 .irq_unmask = sun4c_unmask_irq,
116};
117
118static unsigned int sun4c_build_device_irq(struct platform_device *op,
119 unsigned int real_irq)
120{
121 unsigned int irq;
122
123 if (real_irq >= 16) {
124 prom_printf("Bogus sun4c IRQ %u\n", real_irq);
125 prom_halt();
126 }
127
128 irq = irq_alloc(real_irq, real_irq);
129 if (irq) {
130 unsigned long mask = 0UL;
131
132 switch (real_irq) {
133 case 1:
134 mask = SUN4C_INT_E1;
135 break;
136 case 8:
137 mask = SUN4C_INT_E8;
138 break;
139 case 10:
140 mask = SUN4C_INT_E10;
141 break;
142 case 14:
143 mask = SUN4C_INT_E14;
144 break;
145 default:
146 /* All the rest are either always enabled,
147 * or are for signalling software interrupts.
148 */
149 break;
150 }
151 irq_set_chip_and_handler_name(irq, &sun4c_irq,
152 handle_level_irq, "level");
153 irq_set_chip_data(irq, (void *)mask);
154 }
155 return irq;
119} 156}
120 157
121struct sun4c_timer_info { 158struct sun4c_timer_info {
@@ -139,8 +176,9 @@ static void sun4c_load_profile_irq(int cpu, unsigned int limit)
139 176
140static void __init sun4c_init_timers(irq_handler_t counter_fn) 177static void __init sun4c_init_timers(irq_handler_t counter_fn)
141{ 178{
142 const struct linux_prom_irqs *irq; 179 const struct linux_prom_irqs *prom_irqs;
143 struct device_node *dp; 180 struct device_node *dp;
181 unsigned int irq;
144 const u32 *addr; 182 const u32 *addr;
145 int err; 183 int err;
146 184
@@ -158,9 +196,9 @@ static void __init sun4c_init_timers(irq_handler_t counter_fn)
158 196
159 sun4c_timers = (void __iomem *) (unsigned long) addr[0]; 197 sun4c_timers = (void __iomem *) (unsigned long) addr[0];
160 198
161 irq = of_get_property(dp, "intr", NULL); 199 prom_irqs = of_get_property(dp, "intr", NULL);
162 of_node_put(dp); 200 of_node_put(dp);
163 if (!irq) { 201 if (!prom_irqs) {
164 prom_printf("sun4c_init_timers: No intr property\n"); 202 prom_printf("sun4c_init_timers: No intr property\n");
165 prom_halt(); 203 prom_halt();
166 } 204 }
@@ -173,19 +211,21 @@ static void __init sun4c_init_timers(irq_handler_t counter_fn)
173 211
174 master_l10_counter = &sun4c_timers->l10_count; 212 master_l10_counter = &sun4c_timers->l10_count;
175 213
176 err = request_irq(irq[0].pri, counter_fn, 214 irq = sun4c_build_device_irq(NULL, prom_irqs[0].pri);
177 (IRQF_DISABLED | SA_STATIC_ALLOC), 215 err = request_irq(irq, counter_fn, IRQF_TIMER, "timer", NULL);
178 "timer", NULL);
179 if (err) { 216 if (err) {
180 prom_printf("sun4c_init_timers: request_irq() fails with %d\n", err); 217 prom_printf("sun4c_init_timers: request_irq() fails with %d\n", err);
181 prom_halt(); 218 prom_halt();
182 } 219 }
183 220
184 sun4c_disable_irq(irq[1].pri); 221 /* disable timer interrupt */
222 sun4c_mask_irq(irq_get_irq_data(irq));
185} 223}
186 224
187#ifdef CONFIG_SMP 225#ifdef CONFIG_SMP
188static void sun4c_nop(void) {} 226static void sun4c_nop(void)
227{
228}
189#endif 229#endif
190 230
191void __init sun4c_init_IRQ(void) 231void __init sun4c_init_IRQ(void)
@@ -208,13 +248,12 @@ void __init sun4c_init_IRQ(void)
208 248
209 interrupt_enable = (void __iomem *) (unsigned long) addr[0]; 249 interrupt_enable = (void __iomem *) (unsigned long) addr[0];
210 250
211 BTFIXUPSET_CALL(enable_irq, sun4c_enable_irq, BTFIXUPCALL_NORM);
212 BTFIXUPSET_CALL(disable_irq, sun4c_disable_irq, BTFIXUPCALL_NORM);
213 BTFIXUPSET_CALL(enable_pil_irq, sun4c_enable_irq, BTFIXUPCALL_NORM);
214 BTFIXUPSET_CALL(disable_pil_irq, sun4c_disable_irq, BTFIXUPCALL_NORM);
215 BTFIXUPSET_CALL(clear_clock_irq, sun4c_clear_clock_irq, BTFIXUPCALL_NORM); 251 BTFIXUPSET_CALL(clear_clock_irq, sun4c_clear_clock_irq, BTFIXUPCALL_NORM);
216 BTFIXUPSET_CALL(load_profile_irq, sun4c_load_profile_irq, BTFIXUPCALL_NOP); 252 BTFIXUPSET_CALL(load_profile_irq, sun4c_load_profile_irq, BTFIXUPCALL_NOP);
217 sparc_init_timers = sun4c_init_timers; 253
254 sparc_irq_config.init_timers = sun4c_init_timers;
255 sparc_irq_config.build_device_irq = sun4c_build_device_irq;
256
218#ifdef CONFIG_SMP 257#ifdef CONFIG_SMP
219 BTFIXUPSET_CALL(set_cpu_int, sun4c_nop, BTFIXUPCALL_NOP); 258 BTFIXUPSET_CALL(set_cpu_int, sun4c_nop, BTFIXUPCALL_NOP);
220 BTFIXUPSET_CALL(clear_cpu_int, sun4c_nop, BTFIXUPCALL_NOP); 259 BTFIXUPSET_CALL(clear_cpu_int, sun4c_nop, BTFIXUPCALL_NOP);
diff --git a/arch/sparc/kernel/sun4d_irq.c b/arch/sparc/kernel/sun4d_irq.c
index e11b4612dabb..1d13c5bda0b1 100644
--- a/arch/sparc/kernel/sun4d_irq.c
+++ b/arch/sparc/kernel/sun4d_irq.c
@@ -1,50 +1,41 @@
1/* 1/*
2 * arch/sparc/kernel/sun4d_irq.c: 2 * SS1000/SC2000 interrupt handling.
3 * SS1000/SC2000 interrupt handling.
4 * 3 *
5 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 4 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 * Heavily based on arch/sparc/kernel/irq.c. 5 * Heavily based on arch/sparc/kernel/irq.c.
7 */ 6 */
8 7
9#include <linux/errno.h>
10#include <linux/linkage.h>
11#include <linux/kernel_stat.h> 8#include <linux/kernel_stat.h>
12#include <linux/signal.h>
13#include <linux/sched.h>
14#include <linux/ptrace.h>
15#include <linux/interrupt.h>
16#include <linux/slab.h>
17#include <linux/random.h>
18#include <linux/init.h>
19#include <linux/smp.h>
20#include <linux/spinlock.h>
21#include <linux/seq_file.h> 9#include <linux/seq_file.h>
22#include <linux/of.h> 10
23#include <linux/of_device.h>
24
25#include <asm/ptrace.h>
26#include <asm/processor.h>
27#include <asm/system.h>
28#include <asm/psr.h>
29#include <asm/smp.h>
30#include <asm/vaddrs.h>
31#include <asm/timer.h> 11#include <asm/timer.h>
32#include <asm/openprom.h>
33#include <asm/oplib.h>
34#include <asm/traps.h> 12#include <asm/traps.h>
35#include <asm/irq.h> 13#include <asm/irq.h>
36#include <asm/io.h> 14#include <asm/io.h>
37#include <asm/pgalloc.h>
38#include <asm/pgtable.h>
39#include <asm/sbi.h> 15#include <asm/sbi.h>
40#include <asm/cacheflush.h> 16#include <asm/cacheflush.h>
41#include <asm/irq_regs.h> 17#include <asm/setup.h>
42 18
43#include "kernel.h" 19#include "kernel.h"
44#include "irq.h" 20#include "irq.h"
45 21
46/* If you trust current SCSI layer to handle different SCSI IRQs, enable this. I don't trust it... -jj */ 22/* Sun4d interrupts fall roughly into two categories. SBUS and
47/* #define DISTRIBUTE_IRQS */ 23 * cpu local. CPU local interrupts cover the timer interrupts
24 * and whatnot, and we encode those as normal PILs between
25 * 0 and 15.
26 * SBUS interrupts are encodes as a combination of board, level and slot.
27 */
28
29struct sun4d_handler_data {
30 unsigned int cpuid; /* target cpu */
31 unsigned int real_irq; /* interrupt level */
32};
33
34
35static unsigned int sun4d_encode_irq(int board, int lvl, int slot)
36{
37 return (board + 1) << 5 | (lvl << 2) | slot;
38}
48 39
49struct sun4d_timer_regs { 40struct sun4d_timer_regs {
50 u32 l10_timer_limit; 41 u32 l10_timer_limit;
@@ -56,320 +47,201 @@ struct sun4d_timer_regs {
56 47
57static struct sun4d_timer_regs __iomem *sun4d_timers; 48static struct sun4d_timer_regs __iomem *sun4d_timers;
58 49
59#define TIMER_IRQ 10 50#define SUN4D_TIMER_IRQ 10
60
61#define MAX_STATIC_ALLOC 4
62extern int static_irq_count;
63static unsigned char sbus_tid[32];
64
65static struct irqaction *irq_action[NR_IRQS];
66extern spinlock_t irq_action_lock;
67 51
68static struct sbus_action { 52/* Specify which cpu handle interrupts from which board.
69 struct irqaction *action; 53 * Index is board - value is cpu.
70 /* For SMP this needs to be extended */ 54 */
71} *sbus_actions; 55static unsigned char board_to_cpu[32];
72 56
73static int pil_to_sbus[] = { 57static int pil_to_sbus[] = {
74 0, 0, 1, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0, 0, 58 0,
75}; 59 0,
76 60 1,
77static int sbus_to_pil[] = { 61 2,
78 0, 2, 3, 5, 7, 9, 11, 13, 62 0,
63 3,
64 0,
65 4,
66 0,
67 5,
68 0,
69 6,
70 0,
71 7,
72 0,
73 0,
79}; 74};
80 75
81static int nsbi;
82
83/* Exported for sun4d_smp.c */ 76/* Exported for sun4d_smp.c */
84DEFINE_SPINLOCK(sun4d_imsk_lock); 77DEFINE_SPINLOCK(sun4d_imsk_lock);
85 78
86int show_sun4d_interrupts(struct seq_file *p, void *v) 79/* SBUS interrupts are encoded integers including the board number
87{ 80 * (plus one), the SBUS level, and the SBUS slot number. Sun4D
88 int i = *(loff_t *) v, j = 0, k = 0, sbusl; 81 * IRQ dispatch is done by:
89 struct irqaction * action; 82 *
90 unsigned long flags; 83 * 1) Reading the BW local interrupt table in order to get the bus
91#ifdef CONFIG_SMP 84 * interrupt mask.
92 int x; 85 *
93#endif 86 * This table is indexed by SBUS interrupt level which can be
94 87 * derived from the PIL we got interrupted on.
95 spin_lock_irqsave(&irq_action_lock, flags); 88 *
96 if (i < NR_IRQS) { 89 * 2) For each bus showing interrupt pending from #1, read the
97 sbusl = pil_to_sbus[i]; 90 * SBI interrupt state register. This will indicate which slots
98 if (!sbusl) { 91 * have interrupts pending for that SBUS interrupt level.
99 action = *(i + irq_action); 92 *
100 if (!action) 93 * 3) Call the genreric IRQ support.
101 goto out_unlock; 94 */
102 } else { 95static void sun4d_sbus_handler_irq(int sbusl)
103 for (j = 0; j < nsbi; j++) {
104 for (k = 0; k < 4; k++)
105 if ((action = sbus_actions [(j << 5) + (sbusl << 2) + k].action))
106 goto found_it;
107 }
108 goto out_unlock;
109 }
110found_it: seq_printf(p, "%3d: ", i);
111#ifndef CONFIG_SMP
112 seq_printf(p, "%10u ", kstat_irqs(i));
113#else
114 for_each_online_cpu(x)
115 seq_printf(p, "%10u ",
116 kstat_cpu(cpu_logical_map(x)).irqs[i]);
117#endif
118 seq_printf(p, "%c %s",
119 (action->flags & IRQF_DISABLED) ? '+' : ' ',
120 action->name);
121 action = action->next;
122 for (;;) {
123 for (; action; action = action->next) {
124 seq_printf(p, ",%s %s",
125 (action->flags & IRQF_DISABLED) ? " +" : "",
126 action->name);
127 }
128 if (!sbusl) break;
129 k++;
130 if (k < 4)
131 action = sbus_actions [(j << 5) + (sbusl << 2) + k].action;
132 else {
133 j++;
134 if (j == nsbi) break;
135 k = 0;
136 action = sbus_actions [(j << 5) + (sbusl << 2)].action;
137 }
138 }
139 seq_putc(p, '\n');
140 }
141out_unlock:
142 spin_unlock_irqrestore(&irq_action_lock, flags);
143 return 0;
144}
145
146void sun4d_free_irq(unsigned int irq, void *dev_id)
147{ 96{
148 struct irqaction *action, **actionp; 97 unsigned int bus_mask;
149 struct irqaction *tmp = NULL; 98 unsigned int sbino, slot;
150 unsigned long flags; 99 unsigned int sbil;
151 100
152 spin_lock_irqsave(&irq_action_lock, flags); 101 bus_mask = bw_get_intr_mask(sbusl) & 0x3ffff;
153 if (irq < 15) 102 bw_clear_intr_mask(sbusl, bus_mask);
154 actionp = irq + irq_action; 103
155 else 104 sbil = (sbusl << 2);
156 actionp = &(sbus_actions[irq - (1 << 5)].action); 105 /* Loop for each pending SBI */
157 action = *actionp; 106 for (sbino = 0; bus_mask; sbino++, bus_mask >>= 1) {
158 if (!action) { 107 unsigned int idx, mask;
159 printk("Trying to free free IRQ%d\n",irq); 108
160 goto out_unlock; 109 if (!(bus_mask & 1))
161 } 110 continue;
162 if (dev_id) { 111 /* XXX This seems to ACK the irq twice. acquire_sbi()
163 for (; action; action = action->next) { 112 * XXX uses swap, therefore this writes 0xf << sbil,
164 if (action->dev_id == dev_id) 113 * XXX then later release_sbi() will write the individual
165 break; 114 * XXX bits which were set again.
166 tmp = action;
167 }
168 if (!action) {
169 printk("Trying to free free shared IRQ%d\n",irq);
170 goto out_unlock;
171 }
172 } else if (action->flags & IRQF_SHARED) {
173 printk("Trying to free shared IRQ%d with NULL device ID\n", irq);
174 goto out_unlock;
175 }
176 if (action->flags & SA_STATIC_ALLOC)
177 {
178 /* This interrupt is marked as specially allocated
179 * so it is a bad idea to free it.
180 */ 115 */
181 printk("Attempt to free statically allocated IRQ%d (%s)\n", 116 mask = acquire_sbi(SBI2DEVID(sbino), 0xf << sbil);
182 irq, action->name); 117 mask &= (0xf << sbil);
183 goto out_unlock;
184 }
185
186 if (tmp)
187 tmp->next = action->next;
188 else
189 *actionp = action->next;
190
191 spin_unlock_irqrestore(&irq_action_lock, flags);
192 118
193 synchronize_irq(irq); 119 /* Loop for each pending SBI slot */
120 slot = (1 << sbil);
121 for (idx = 0; mask != 0; idx++, slot <<= 1) {
122 unsigned int pil;
123 struct irq_bucket *p;
194 124
195 spin_lock_irqsave(&irq_action_lock, flags); 125 if (!(mask & slot))
126 continue;
196 127
197 kfree(action); 128 mask &= ~slot;
129 pil = sun4d_encode_irq(sbino, sbusl, idx);
198 130
199 if (!(*actionp)) 131 p = irq_map[pil];
200 __disable_irq(irq); 132 while (p) {
133 struct irq_bucket *next;
201 134
202out_unlock: 135 next = p->next;
203 spin_unlock_irqrestore(&irq_action_lock, flags); 136 generic_handle_irq(p->irq);
137 p = next;
138 }
139 release_sbi(SBI2DEVID(sbino), slot);
140 }
141 }
204} 142}
205 143
206extern void unexpected_irq(int, void *, struct pt_regs *); 144void sun4d_handler_irq(int pil, struct pt_regs *regs)
207
208void sun4d_handler_irq(int irq, struct pt_regs * regs)
209{ 145{
210 struct pt_regs *old_regs; 146 struct pt_regs *old_regs;
211 struct irqaction * action;
212 int cpu = smp_processor_id();
213 /* SBUS IRQ level (1 - 7) */ 147 /* SBUS IRQ level (1 - 7) */
214 int sbusl = pil_to_sbus[irq]; 148 int sbusl = pil_to_sbus[pil];
215 149
216 /* FIXME: Is this necessary?? */ 150 /* FIXME: Is this necessary?? */
217 cc_get_ipen(); 151 cc_get_ipen();
218 152
219 cc_set_iclr(1 << irq); 153 cc_set_iclr(1 << pil);
220 154
155#ifdef CONFIG_SMP
156 /*
157 * Check IPI data structures after IRQ has been cleared. Hard and Soft
158 * IRQ can happen at the same time, so both cases are always handled.
159 */
160 if (pil == SUN4D_IPI_IRQ)
161 sun4d_ipi_interrupt();
162#endif
163
221 old_regs = set_irq_regs(regs); 164 old_regs = set_irq_regs(regs);
222 irq_enter(); 165 irq_enter();
223 kstat_cpu(cpu).irqs[irq]++; 166 if (sbusl == 0) {
224 if (!sbusl) { 167 /* cpu interrupt */
225 action = *(irq + irq_action); 168 struct irq_bucket *p;
226 if (!action) 169
227 unexpected_irq(irq, NULL, regs); 170 p = irq_map[pil];
228 do { 171 while (p) {
229 action->handler(irq, action->dev_id); 172 struct irq_bucket *next;
230 action = action->next; 173
231 } while (action); 174 next = p->next;
175 generic_handle_irq(p->irq);
176 p = next;
177 }
232 } else { 178 } else {
233 int bus_mask = bw_get_intr_mask(sbusl) & 0x3ffff; 179 /* SBUS interrupt */
234 int sbino; 180 sun4d_sbus_handler_irq(sbusl);
235 struct sbus_action *actionp;
236 unsigned mask, slot;
237 int sbil = (sbusl << 2);
238
239 bw_clear_intr_mask(sbusl, bus_mask);
240
241 /* Loop for each pending SBI */
242 for (sbino = 0; bus_mask; sbino++, bus_mask >>= 1)
243 if (bus_mask & 1) {
244 mask = acquire_sbi(SBI2DEVID(sbino), 0xf << sbil);
245 mask &= (0xf << sbil);
246 actionp = sbus_actions + (sbino << 5) + (sbil);
247 /* Loop for each pending SBI slot */
248 for (slot = (1 << sbil); mask; slot <<= 1, actionp++)
249 if (mask & slot) {
250 mask &= ~slot;
251 action = actionp->action;
252
253 if (!action)
254 unexpected_irq(irq, NULL, regs);
255 do {
256 action->handler(irq, action->dev_id);
257 action = action->next;
258 } while (action);
259 release_sbi(SBI2DEVID(sbino), slot);
260 }
261 }
262 } 181 }
263 irq_exit(); 182 irq_exit();
264 set_irq_regs(old_regs); 183 set_irq_regs(old_regs);
265} 184}
266 185
267int sun4d_request_irq(unsigned int irq, 186
268 irq_handler_t handler, 187static void sun4d_mask_irq(struct irq_data *data)
269 unsigned long irqflags, const char * devname, void *dev_id)
270{ 188{
271 struct irqaction *action, *tmp = NULL, **actionp; 189 struct sun4d_handler_data *handler_data = data->handler_data;
190 unsigned int real_irq;
191#ifdef CONFIG_SMP
192 int cpuid = handler_data->cpuid;
272 unsigned long flags; 193 unsigned long flags;
273 int ret; 194#endif
274 195 real_irq = handler_data->real_irq;
275 if(irq > 14 && irq < (1 << 5)) { 196#ifdef CONFIG_SMP
276 ret = -EINVAL; 197 spin_lock_irqsave(&sun4d_imsk_lock, flags);
277 goto out; 198 cc_set_imsk_other(cpuid, cc_get_imsk_other(cpuid) | (1 << real_irq));
278 } 199 spin_unlock_irqrestore(&sun4d_imsk_lock, flags);
279 200#else
280 if (!handler) { 201 cc_set_imsk(cc_get_imsk() | (1 << real_irq));
281 ret = -EINVAL; 202#endif
282 goto out;
283 }
284
285 spin_lock_irqsave(&irq_action_lock, flags);
286
287 if (irq >= (1 << 5))
288 actionp = &(sbus_actions[irq - (1 << 5)].action);
289 else
290 actionp = irq + irq_action;
291 action = *actionp;
292
293 if (action) {
294 if ((action->flags & IRQF_SHARED) && (irqflags & IRQF_SHARED)) {
295 for (tmp = action; tmp->next; tmp = tmp->next);
296 } else {
297 ret = -EBUSY;
298 goto out_unlock;
299 }
300 if ((action->flags & IRQF_DISABLED) ^ (irqflags & IRQF_DISABLED)) {
301 printk("Attempt to mix fast and slow interrupts on IRQ%d denied\n", irq);
302 ret = -EBUSY;
303 goto out_unlock;
304 }
305 action = NULL; /* Or else! */
306 }
307
308 /* If this is flagged as statically allocated then we use our
309 * private struct which is never freed.
310 */
311 if (irqflags & SA_STATIC_ALLOC) {
312 if (static_irq_count < MAX_STATIC_ALLOC)
313 action = &static_irqaction[static_irq_count++];
314 else
315 printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n", irq, devname);
316 }
317
318 if (action == NULL)
319 action = kmalloc(sizeof(struct irqaction),
320 GFP_ATOMIC);
321
322 if (!action) {
323 ret = -ENOMEM;
324 goto out_unlock;
325 }
326
327 action->handler = handler;
328 action->flags = irqflags;
329 action->name = devname;
330 action->next = NULL;
331 action->dev_id = dev_id;
332
333 if (tmp)
334 tmp->next = action;
335 else
336 *actionp = action;
337
338 __enable_irq(irq);
339
340 ret = 0;
341out_unlock:
342 spin_unlock_irqrestore(&irq_action_lock, flags);
343out:
344 return ret;
345} 203}
346 204
347static void sun4d_disable_irq(unsigned int irq) 205static void sun4d_unmask_irq(struct irq_data *data)
348{ 206{
349 int tid = sbus_tid[(irq >> 5) - 1]; 207 struct sun4d_handler_data *handler_data = data->handler_data;
208 unsigned int real_irq;
209#ifdef CONFIG_SMP
210 int cpuid = handler_data->cpuid;
350 unsigned long flags; 211 unsigned long flags;
351 212#endif
352 if (irq < NR_IRQS) 213 real_irq = handler_data->real_irq;
353 return;
354 214
215#ifdef CONFIG_SMP
355 spin_lock_irqsave(&sun4d_imsk_lock, flags); 216 spin_lock_irqsave(&sun4d_imsk_lock, flags);
356 cc_set_imsk_other(tid, cc_get_imsk_other(tid) | (1 << sbus_to_pil[(irq >> 2) & 7])); 217 cc_set_imsk_other(cpuid, cc_get_imsk_other(cpuid) & ~(1 << real_irq));
357 spin_unlock_irqrestore(&sun4d_imsk_lock, flags); 218 spin_unlock_irqrestore(&sun4d_imsk_lock, flags);
219#else
220 cc_set_imsk(cc_get_imsk() & ~(1 << real_irq));
221#endif
358} 222}
359 223
360static void sun4d_enable_irq(unsigned int irq) 224static unsigned int sun4d_startup_irq(struct irq_data *data)
361{ 225{
362 int tid = sbus_tid[(irq >> 5) - 1]; 226 irq_link(data->irq);
363 unsigned long flags; 227 sun4d_unmask_irq(data);
364 228 return 0;
365 if (irq < NR_IRQS) 229}
366 return;
367 230
368 spin_lock_irqsave(&sun4d_imsk_lock, flags); 231static void sun4d_shutdown_irq(struct irq_data *data)
369 cc_set_imsk_other(tid, cc_get_imsk_other(tid) & ~(1 << sbus_to_pil[(irq >> 2) & 7])); 232{
370 spin_unlock_irqrestore(&sun4d_imsk_lock, flags); 233 sun4d_mask_irq(data);
234 irq_unlink(data->irq);
371} 235}
372 236
237struct irq_chip sun4d_irq = {
238 .name = "sun4d",
239 .irq_startup = sun4d_startup_irq,
240 .irq_shutdown = sun4d_shutdown_irq,
241 .irq_unmask = sun4d_unmask_irq,
242 .irq_mask = sun4d_mask_irq,
243};
244
373#ifdef CONFIG_SMP 245#ifdef CONFIG_SMP
374static void sun4d_set_cpu_int(int cpu, int level) 246static void sun4d_set_cpu_int(int cpu, int level)
375{ 247{
@@ -389,44 +261,6 @@ void __init sun4d_distribute_irqs(void)
389{ 261{
390 struct device_node *dp; 262 struct device_node *dp;
391 263
392#ifdef DISTRIBUTE_IRQS
393 cpumask_t sbus_serving_map;
394
395 sbus_serving_map = cpu_present_map;
396 for_each_node_by_name(dp, "sbi") {
397 int board = of_getintprop_default(dp, "board#", 0);
398
399 if ((board * 2) == boot_cpu_id && cpu_isset(board * 2 + 1, cpu_present_map))
400 sbus_tid[board] = (board * 2 + 1);
401 else if (cpu_isset(board * 2, cpu_present_map))
402 sbus_tid[board] = (board * 2);
403 else if (cpu_isset(board * 2 + 1, cpu_present_map))
404 sbus_tid[board] = (board * 2 + 1);
405 else
406 sbus_tid[board] = 0xff;
407 if (sbus_tid[board] != 0xff)
408 cpu_clear(sbus_tid[board], sbus_serving_map);
409 }
410 for_each_node_by_name(dp, "sbi") {
411 int board = of_getintprop_default(dp, "board#", 0);
412 if (sbus_tid[board] == 0xff) {
413 int i = 31;
414
415 if (cpus_empty(sbus_serving_map))
416 sbus_serving_map = cpu_present_map;
417 while (cpu_isset(i, sbus_serving_map))
418 i--;
419 sbus_tid[board] = i;
420 cpu_clear(i, sbus_serving_map);
421 }
422 }
423 for_each_node_by_name(dp, "sbi") {
424 int devid = of_getintprop_default(dp, "device-id", 0);
425 int board = of_getintprop_default(dp, "board#", 0);
426 printk("sbus%d IRQs directed to CPU%d\n", board, sbus_tid[board]);
427 set_sbi_tid(devid, sbus_tid[board] << 3);
428 }
429#else
430 int cpuid = cpu_logical_map(1); 264 int cpuid = cpu_logical_map(1);
431 265
432 if (cpuid == -1) 266 if (cpuid == -1)
@@ -434,14 +268,13 @@ void __init sun4d_distribute_irqs(void)
434 for_each_node_by_name(dp, "sbi") { 268 for_each_node_by_name(dp, "sbi") {
435 int devid = of_getintprop_default(dp, "device-id", 0); 269 int devid = of_getintprop_default(dp, "device-id", 0);
436 int board = of_getintprop_default(dp, "board#", 0); 270 int board = of_getintprop_default(dp, "board#", 0);
437 sbus_tid[board] = cpuid; 271 board_to_cpu[board] = cpuid;
438 set_sbi_tid(devid, cpuid << 3); 272 set_sbi_tid(devid, cpuid << 3);
439 } 273 }
440 printk("All sbus IRQs directed to CPU%d\n", cpuid); 274 printk(KERN_ERR "All sbus IRQs directed to CPU%d\n", cpuid);
441#endif
442} 275}
443#endif 276#endif
444 277
445static void sun4d_clear_clock_irq(void) 278static void sun4d_clear_clock_irq(void)
446{ 279{
447 sbus_readl(&sun4d_timers->l10_timer_limit); 280 sbus_readl(&sun4d_timers->l10_timer_limit);
@@ -462,14 +295,115 @@ static void __init sun4d_load_profile_irqs(void)
462 } 295 }
463} 296}
464 297
298unsigned int _sun4d_build_device_irq(unsigned int real_irq,
299 unsigned int pil,
300 unsigned int board)
301{
302 struct sun4d_handler_data *handler_data;
303 unsigned int irq;
304
305 irq = irq_alloc(real_irq, pil);
306 if (irq == 0) {
307 prom_printf("IRQ: allocate for %d %d %d failed\n",
308 real_irq, pil, board);
309 goto err_out;
310 }
311
312 handler_data = irq_get_handler_data(irq);
313 if (unlikely(handler_data))
314 goto err_out;
315
316 handler_data = kzalloc(sizeof(struct sun4d_handler_data), GFP_ATOMIC);
317 if (unlikely(!handler_data)) {
318 prom_printf("IRQ: kzalloc(sun4d_handler_data) failed.\n");
319 prom_halt();
320 }
321 handler_data->cpuid = board_to_cpu[board];
322 handler_data->real_irq = real_irq;
323 irq_set_chip_and_handler_name(irq, &sun4d_irq,
324 handle_level_irq, "level");
325 irq_set_handler_data(irq, handler_data);
326
327err_out:
328 return irq;
329}
330
331
332
333unsigned int sun4d_build_device_irq(struct platform_device *op,
334 unsigned int real_irq)
335{
336 struct device_node *dp = op->dev.of_node;
337 struct device_node *board_parent, *bus = dp->parent;
338 char *bus_connection;
339 const struct linux_prom_registers *regs;
340 unsigned int pil;
341 unsigned int irq;
342 int board, slot;
343 int sbusl;
344
345 irq = real_irq;
346 while (bus) {
347 if (!strcmp(bus->name, "sbi")) {
348 bus_connection = "io-unit";
349 break;
350 }
351
352 if (!strcmp(bus->name, "bootbus")) {
353 bus_connection = "cpu-unit";
354 break;
355 }
356
357 bus = bus->parent;
358 }
359 if (!bus)
360 goto err_out;
361
362 regs = of_get_property(dp, "reg", NULL);
363 if (!regs)
364 goto err_out;
365
366 slot = regs->which_io;
367
368 /*
369 * If Bus nodes parent is not io-unit/cpu-unit or the io-unit/cpu-unit
370 * lacks a "board#" property, something is very wrong.
371 */
372 if (!bus->parent || strcmp(bus->parent->name, bus_connection)) {
373 printk(KERN_ERR "%s: Error, parent is not %s.\n",
374 bus->full_name, bus_connection);
375 goto err_out;
376 }
377 board_parent = bus->parent;
378 board = of_getintprop_default(board_parent, "board#", -1);
379 if (board == -1) {
380 printk(KERN_ERR "%s: Error, lacks board# property.\n",
381 board_parent->full_name);
382 goto err_out;
383 }
384
385 sbusl = pil_to_sbus[real_irq];
386 if (sbusl)
387 pil = sun4d_encode_irq(board, sbusl, slot);
388 else
389 pil = real_irq;
390
391 irq = _sun4d_build_device_irq(real_irq, pil, board);
392err_out:
393 return irq;
394}
395
396unsigned int sun4d_build_timer_irq(unsigned int board, unsigned int real_irq)
397{
398 return _sun4d_build_device_irq(real_irq, real_irq, board);
399}
400
401
465static void __init sun4d_fixup_trap_table(void) 402static void __init sun4d_fixup_trap_table(void)
466{ 403{
467#ifdef CONFIG_SMP 404#ifdef CONFIG_SMP
468 unsigned long flags; 405 unsigned long flags;
469 extern unsigned long lvl14_save[4];
470 struct tt_entry *trap_table = &sparc_ttable[SP_TRAP_IRQ1 + (14 - 1)]; 406 struct tt_entry *trap_table = &sparc_ttable[SP_TRAP_IRQ1 + (14 - 1)];
471 extern unsigned int real_irq_entry[], smp4d_ticker[];
472 extern unsigned int patchme_maybe_smp_msg[];
473 407
474 /* Adjust so that we jump directly to smp4d_ticker */ 408 /* Adjust so that we jump directly to smp4d_ticker */
475 lvl14_save[2] += smp4d_ticker - real_irq_entry; 409 lvl14_save[2] += smp4d_ticker - real_irq_entry;
@@ -493,8 +427,10 @@ static void __init sun4d_init_timers(irq_handler_t counter_fn)
493{ 427{
494 struct device_node *dp; 428 struct device_node *dp;
495 struct resource res; 429 struct resource res;
430 unsigned int irq;
496 const u32 *reg; 431 const u32 *reg;
497 int err; 432 int err;
433 int board;
498 434
499 dp = of_find_node_by_name(NULL, "cpu-unit"); 435 dp = of_find_node_by_name(NULL, "cpu-unit");
500 if (!dp) { 436 if (!dp) {
@@ -507,12 +443,19 @@ static void __init sun4d_init_timers(irq_handler_t counter_fn)
507 * bootbus. 443 * bootbus.
508 */ 444 */
509 reg = of_get_property(dp, "reg", NULL); 445 reg = of_get_property(dp, "reg", NULL);
510 of_node_put(dp);
511 if (!reg) { 446 if (!reg) {
512 prom_printf("sun4d_init_timers: No reg property\n"); 447 prom_printf("sun4d_init_timers: No reg property\n");
513 prom_halt(); 448 prom_halt();
514 } 449 }
515 450
451 board = of_getintprop_default(dp, "board#", -1);
452 if (board == -1) {
453 prom_printf("sun4d_init_timers: No board# property on cpu-unit\n");
454 prom_halt();
455 }
456
457 of_node_put(dp);
458
516 res.start = reg[1]; 459 res.start = reg[1];
517 res.end = reg[2] - 1; 460 res.end = reg[2] - 1;
518 res.flags = reg[0] & 0xff; 461 res.flags = reg[0] & 0xff;
@@ -527,11 +470,11 @@ static void __init sun4d_init_timers(irq_handler_t counter_fn)
527 470
528 master_l10_counter = &sun4d_timers->l10_cur_count; 471 master_l10_counter = &sun4d_timers->l10_cur_count;
529 472
530 err = request_irq(TIMER_IRQ, counter_fn, 473 irq = sun4d_build_timer_irq(board, SUN4D_TIMER_IRQ);
531 (IRQF_DISABLED | SA_STATIC_ALLOC), 474 err = request_irq(irq, counter_fn, IRQF_TIMER, "timer", NULL);
532 "timer", NULL);
533 if (err) { 475 if (err) {
534 prom_printf("sun4d_init_timers: request_irq() failed with %d\n", err); 476 prom_printf("sun4d_init_timers: request_irq() failed with %d\n",
477 err);
535 prom_halt(); 478 prom_halt();
536 } 479 }
537 sun4d_load_profile_irqs(); 480 sun4d_load_profile_irqs();
@@ -541,32 +484,22 @@ static void __init sun4d_init_timers(irq_handler_t counter_fn)
541void __init sun4d_init_sbi_irq(void) 484void __init sun4d_init_sbi_irq(void)
542{ 485{
543 struct device_node *dp; 486 struct device_node *dp;
544 int target_cpu = 0; 487 int target_cpu;
545 488
546#ifdef CONFIG_SMP
547 target_cpu = boot_cpu_id; 489 target_cpu = boot_cpu_id;
548#endif
549
550 nsbi = 0;
551 for_each_node_by_name(dp, "sbi")
552 nsbi++;
553 sbus_actions = kzalloc (nsbi * 8 * 4 * sizeof(struct sbus_action), GFP_ATOMIC);
554 if (!sbus_actions) {
555 prom_printf("SUN4D: Cannot allocate sbus_actions, halting.\n");
556 prom_halt();
557 }
558 for_each_node_by_name(dp, "sbi") { 490 for_each_node_by_name(dp, "sbi") {
559 int devid = of_getintprop_default(dp, "device-id", 0); 491 int devid = of_getintprop_default(dp, "device-id", 0);
560 int board = of_getintprop_default(dp, "board#", 0); 492 int board = of_getintprop_default(dp, "board#", 0);
561 unsigned int mask; 493 unsigned int mask;
562 494
563 set_sbi_tid(devid, target_cpu << 3); 495 set_sbi_tid(devid, target_cpu << 3);
564 sbus_tid[board] = target_cpu; 496 board_to_cpu[board] = target_cpu;
565 497
566 /* Get rid of pending irqs from PROM */ 498 /* Get rid of pending irqs from PROM */
567 mask = acquire_sbi(devid, 0xffffffff); 499 mask = acquire_sbi(devid, 0xffffffff);
568 if (mask) { 500 if (mask) {
569 printk ("Clearing pending IRQs %08x on SBI %d\n", mask, board); 501 printk(KERN_ERR "Clearing pending IRQs %08x on SBI %d\n",
502 mask, board);
570 release_sbi(devid, mask); 503 release_sbi(devid, mask);
571 } 504 }
572 } 505 }
@@ -576,11 +509,12 @@ void __init sun4d_init_IRQ(void)
576{ 509{
577 local_irq_disable(); 510 local_irq_disable();
578 511
579 BTFIXUPSET_CALL(enable_irq, sun4d_enable_irq, BTFIXUPCALL_NORM);
580 BTFIXUPSET_CALL(disable_irq, sun4d_disable_irq, BTFIXUPCALL_NORM);
581 BTFIXUPSET_CALL(clear_clock_irq, sun4d_clear_clock_irq, BTFIXUPCALL_NORM); 512 BTFIXUPSET_CALL(clear_clock_irq, sun4d_clear_clock_irq, BTFIXUPCALL_NORM);
582 BTFIXUPSET_CALL(load_profile_irq, sun4d_load_profile_irq, BTFIXUPCALL_NORM); 513 BTFIXUPSET_CALL(load_profile_irq, sun4d_load_profile_irq, BTFIXUPCALL_NORM);
583 sparc_init_timers = sun4d_init_timers; 514
515 sparc_irq_config.init_timers = sun4d_init_timers;
516 sparc_irq_config.build_device_irq = sun4d_build_device_irq;
517
584#ifdef CONFIG_SMP 518#ifdef CONFIG_SMP
585 BTFIXUPSET_CALL(set_cpu_int, sun4d_set_cpu_int, BTFIXUPCALL_NORM); 519 BTFIXUPSET_CALL(set_cpu_int, sun4d_set_cpu_int, BTFIXUPCALL_NORM);
586 BTFIXUPSET_CALL(clear_cpu_int, sun4d_clear_ipi, BTFIXUPCALL_NOP); 520 BTFIXUPSET_CALL(clear_cpu_int, sun4d_clear_ipi, BTFIXUPCALL_NOP);
diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c
index 482f2ab92692..133387980b56 100644
--- a/arch/sparc/kernel/sun4d_smp.c
+++ b/arch/sparc/kernel/sun4d_smp.c
@@ -1,4 +1,4 @@
1/* sun4d_smp.c: Sparc SS1000/SC2000 SMP support. 1/* Sparc SS1000/SC2000 SMP support.
2 * 2 *
3 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 3 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
4 * 4 *
@@ -6,59 +6,23 @@
6 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) 6 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
7 */ 7 */
8 8
9#include <asm/head.h>
10
11#include <linux/kernel.h>
12#include <linux/sched.h>
13#include <linux/threads.h>
14#include <linux/smp.h>
15#include <linux/interrupt.h> 9#include <linux/interrupt.h>
16#include <linux/kernel_stat.h>
17#include <linux/init.h>
18#include <linux/spinlock.h>
19#include <linux/mm.h>
20#include <linux/swap.h>
21#include <linux/profile.h> 10#include <linux/profile.h>
22#include <linux/delay.h> 11#include <linux/delay.h>
23#include <linux/cpu.h> 12#include <linux/cpu.h>
24 13
25#include <asm/ptrace.h>
26#include <asm/atomic.h>
27#include <asm/irq_regs.h>
28
29#include <asm/irq.h>
30#include <asm/page.h>
31#include <asm/pgalloc.h>
32#include <asm/pgtable.h>
33#include <asm/oplib.h>
34#include <asm/sbi.h> 14#include <asm/sbi.h>
15#include <asm/mmu.h>
35#include <asm/tlbflush.h> 16#include <asm/tlbflush.h>
36#include <asm/cacheflush.h> 17#include <asm/cacheflush.h>
37#include <asm/cpudata.h>
38 18
19#include "kernel.h"
39#include "irq.h" 20#include "irq.h"
40#define IRQ_CROSS_CALL 15
41 21
42extern ctxd_t *srmmu_ctx_table_phys; 22#define IRQ_CROSS_CALL 15
43 23
44static volatile int smp_processors_ready = 0; 24static volatile int smp_processors_ready;
45static int smp_highest_cpu; 25static int smp_highest_cpu;
46extern volatile unsigned long cpu_callin_map[NR_CPUS];
47extern cpuinfo_sparc cpu_data[NR_CPUS];
48extern unsigned char boot_cpu_id;
49extern volatile int smp_process_available;
50
51extern cpumask_t smp_commenced_mask;
52
53extern int __smp4d_processor_id(void);
54
55/* #define SMP_DEBUG */
56
57#ifdef SMP_DEBUG
58#define SMP_PRINTK(x) printk x
59#else
60#define SMP_PRINTK(x)
61#endif
62 26
63static inline unsigned long sun4d_swap(volatile unsigned long *ptr, unsigned long val) 27static inline unsigned long sun4d_swap(volatile unsigned long *ptr, unsigned long val)
64{ 28{
@@ -68,9 +32,8 @@ static inline unsigned long sun4d_swap(volatile unsigned long *ptr, unsigned lon
68 return val; 32 return val;
69} 33}
70 34
35static void smp4d_ipi_init(void);
71static void smp_setup_percpu_timer(void); 36static void smp_setup_percpu_timer(void);
72extern void cpu_probe(void);
73extern void sun4d_distribute_irqs(void);
74 37
75static unsigned char cpu_leds[32]; 38static unsigned char cpu_leds[32];
76 39
@@ -86,9 +49,8 @@ static inline void show_leds(int cpuid)
86void __cpuinit smp4d_callin(void) 49void __cpuinit smp4d_callin(void)
87{ 50{
88 int cpuid = hard_smp4d_processor_id(); 51 int cpuid = hard_smp4d_processor_id();
89 extern spinlock_t sun4d_imsk_lock;
90 unsigned long flags; 52 unsigned long flags;
91 53
92 /* Show we are alive */ 54 /* Show we are alive */
93 cpu_leds[cpuid] = 0x6; 55 cpu_leds[cpuid] = 0x6;
94 show_leds(cpuid); 56 show_leds(cpuid);
@@ -118,15 +80,13 @@ void __cpuinit smp4d_callin(void)
118 sun4d_swap((unsigned long *)&cpu_callin_map[cpuid], 1); 80 sun4d_swap((unsigned long *)&cpu_callin_map[cpuid], 1);
119 local_flush_cache_all(); 81 local_flush_cache_all();
120 local_flush_tlb_all(); 82 local_flush_tlb_all();
121
122 cpu_probe();
123 83
124 while((unsigned long)current_set[cpuid] < PAGE_OFFSET) 84 while ((unsigned long)current_set[cpuid] < PAGE_OFFSET)
125 barrier(); 85 barrier();
126 86
127 while(current_set[cpuid]->cpu != cpuid) 87 while (current_set[cpuid]->cpu != cpuid)
128 barrier(); 88 barrier();
129 89
130 /* Fix idle thread fields. */ 90 /* Fix idle thread fields. */
131 __asm__ __volatile__("ld [%0], %%g6\n\t" 91 __asm__ __volatile__("ld [%0], %%g6\n\t"
132 : : "r" (&current_set[cpuid]) 92 : : "r" (&current_set[cpuid])
@@ -134,17 +94,17 @@ void __cpuinit smp4d_callin(void)
134 94
135 cpu_leds[cpuid] = 0x9; 95 cpu_leds[cpuid] = 0x9;
136 show_leds(cpuid); 96 show_leds(cpuid);
137 97
138 /* Attach to the address space of init_task. */ 98 /* Attach to the address space of init_task. */
139 atomic_inc(&init_mm.mm_count); 99 atomic_inc(&init_mm.mm_count);
140 current->active_mm = &init_mm; 100 current->active_mm = &init_mm;
141 101
142 local_flush_cache_all(); 102 local_flush_cache_all();
143 local_flush_tlb_all(); 103 local_flush_tlb_all();
144 104
145 local_irq_enable(); /* We don't allow PIL 14 yet */ 105 local_irq_enable(); /* We don't allow PIL 14 yet */
146 106
147 while (!cpu_isset(cpuid, smp_commenced_mask)) 107 while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
148 barrier(); 108 barrier();
149 109
150 spin_lock_irqsave(&sun4d_imsk_lock, flags); 110 spin_lock_irqsave(&sun4d_imsk_lock, flags);
@@ -154,17 +114,12 @@ void __cpuinit smp4d_callin(void)
154 114
155} 115}
156 116
157extern void init_IRQ(void);
158extern void cpu_panic(void);
159
160/* 117/*
161 * Cycle through the processors asking the PROM to start each one. 118 * Cycle through the processors asking the PROM to start each one.
162 */ 119 */
163
164extern struct linux_prom_registers smp_penguin_ctable;
165
166void __init smp4d_boot_cpus(void) 120void __init smp4d_boot_cpus(void)
167{ 121{
122 smp4d_ipi_init();
168 if (boot_cpu_id) 123 if (boot_cpu_id)
169 current_set[0] = NULL; 124 current_set[0] = NULL;
170 smp_setup_percpu_timer(); 125 smp_setup_percpu_timer();
@@ -173,43 +128,42 @@ void __init smp4d_boot_cpus(void)
173 128
174int __cpuinit smp4d_boot_one_cpu(int i) 129int __cpuinit smp4d_boot_one_cpu(int i)
175{ 130{
176 extern unsigned long sun4d_cpu_startup; 131 unsigned long *entry = &sun4d_cpu_startup;
177 unsigned long *entry = &sun4d_cpu_startup; 132 struct task_struct *p;
178 struct task_struct *p; 133 int timeout;
179 int timeout; 134 int cpu_node;
180 int cpu_node;
181 135
182 cpu_find_by_instance(i, &cpu_node,NULL); 136 cpu_find_by_instance(i, &cpu_node, NULL);
183 /* Cook up an idler for this guy. */ 137 /* Cook up an idler for this guy. */
184 p = fork_idle(i); 138 p = fork_idle(i);
185 current_set[i] = task_thread_info(p); 139 current_set[i] = task_thread_info(p);
140
141 /*
142 * Initialize the contexts table
143 * Since the call to prom_startcpu() trashes the structure,
144 * we need to re-initialize it for each cpu
145 */
146 smp_penguin_ctable.which_io = 0;
147 smp_penguin_ctable.phys_addr = (unsigned int) srmmu_ctx_table_phys;
148 smp_penguin_ctable.reg_size = 0;
149
150 /* whirrr, whirrr, whirrrrrrrrr... */
151 printk(KERN_INFO "Starting CPU %d at %p\n", i, entry);
152 local_flush_cache_all();
153 prom_startcpu(cpu_node,
154 &smp_penguin_ctable, 0, (char *)entry);
155
156 printk(KERN_INFO "prom_startcpu returned :)\n");
157
158 /* wheee... it's going... */
159 for (timeout = 0; timeout < 10000; timeout++) {
160 if (cpu_callin_map[i])
161 break;
162 udelay(200);
163 }
186 164
187 /*
188 * Initialize the contexts table
189 * Since the call to prom_startcpu() trashes the structure,
190 * we need to re-initialize it for each cpu
191 */
192 smp_penguin_ctable.which_io = 0;
193 smp_penguin_ctable.phys_addr = (unsigned int) srmmu_ctx_table_phys;
194 smp_penguin_ctable.reg_size = 0;
195
196 /* whirrr, whirrr, whirrrrrrrrr... */
197 SMP_PRINTK(("Starting CPU %d at %p\n", i, entry));
198 local_flush_cache_all();
199 prom_startcpu(cpu_node,
200 &smp_penguin_ctable, 0, (char *)entry);
201
202 SMP_PRINTK(("prom_startcpu returned :)\n"));
203
204 /* wheee... it's going... */
205 for(timeout = 0; timeout < 10000; timeout++) {
206 if(cpu_callin_map[i])
207 break;
208 udelay(200);
209 }
210
211 if (!(cpu_callin_map[i])) { 165 if (!(cpu_callin_map[i])) {
212 printk("Processor %d is stuck.\n", i); 166 printk(KERN_ERR "Processor %d is stuck.\n", i);
213 return -ENODEV; 167 return -ENODEV;
214 168
215 } 169 }
@@ -237,6 +191,80 @@ void __init smp4d_smp_done(void)
237 sun4d_distribute_irqs(); 191 sun4d_distribute_irqs();
238} 192}
239 193
194/* Memory structure giving interrupt handler information about IPI generated */
195struct sun4d_ipi_work {
196 int single;
197 int msk;
198 int resched;
199};
200
201static DEFINE_PER_CPU_SHARED_ALIGNED(struct sun4d_ipi_work, sun4d_ipi_work);
202
203/* Initialize IPIs on the SUN4D SMP machine */
204static void __init smp4d_ipi_init(void)
205{
206 int cpu;
207 struct sun4d_ipi_work *work;
208
209 printk(KERN_INFO "smp4d: setup IPI at IRQ %d\n", SUN4D_IPI_IRQ);
210
211 for_each_possible_cpu(cpu) {
212 work = &per_cpu(sun4d_ipi_work, cpu);
213 work->single = work->msk = work->resched = 0;
214 }
215}
216
217void sun4d_ipi_interrupt(void)
218{
219 struct sun4d_ipi_work *work = &__get_cpu_var(sun4d_ipi_work);
220
221 if (work->single) {
222 work->single = 0;
223 smp_call_function_single_interrupt();
224 }
225 if (work->msk) {
226 work->msk = 0;
227 smp_call_function_interrupt();
228 }
229 if (work->resched) {
230 work->resched = 0;
231 smp_resched_interrupt();
232 }
233}
234
235static void smp4d_ipi_single(int cpu)
236{
237 struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu);
238
239 /* Mark work */
240 work->single = 1;
241
242 /* Generate IRQ on the CPU */
243 sun4d_send_ipi(cpu, SUN4D_IPI_IRQ);
244}
245
246static void smp4d_ipi_mask_one(int cpu)
247{
248 struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu);
249
250 /* Mark work */
251 work->msk = 1;
252
253 /* Generate IRQ on the CPU */
254 sun4d_send_ipi(cpu, SUN4D_IPI_IRQ);
255}
256
257static void smp4d_ipi_resched(int cpu)
258{
259 struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu);
260
261 /* Mark work */
262 work->resched = 1;
263
264 /* Generate IRQ on the CPU (any IRQ will cause resched) */
265 sun4d_send_ipi(cpu, SUN4D_IPI_IRQ);
266}
267
240static struct smp_funcall { 268static struct smp_funcall {
241 smpfunc_t func; 269 smpfunc_t func;
242 unsigned long arg1; 270 unsigned long arg1;
@@ -255,14 +283,17 @@ static void smp4d_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
255 unsigned long arg2, unsigned long arg3, 283 unsigned long arg2, unsigned long arg3,
256 unsigned long arg4) 284 unsigned long arg4)
257{ 285{
258 if(smp_processors_ready) { 286 if (smp_processors_ready) {
259 register int high = smp_highest_cpu; 287 register int high = smp_highest_cpu;
260 unsigned long flags; 288 unsigned long flags;
261 289
262 spin_lock_irqsave(&cross_call_lock, flags); 290 spin_lock_irqsave(&cross_call_lock, flags);
263 291
264 { 292 {
265 /* If you make changes here, make sure gcc generates proper code... */ 293 /*
294 * If you make changes here, make sure
295 * gcc generates proper code...
296 */
266 register smpfunc_t f asm("i0") = func; 297 register smpfunc_t f asm("i0") = func;
267 register unsigned long a1 asm("i1") = arg1; 298 register unsigned long a1 asm("i1") = arg1;
268 register unsigned long a2 asm("i2") = arg2; 299 register unsigned long a2 asm("i2") = arg2;
@@ -282,10 +313,10 @@ static void smp4d_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
282 { 313 {
283 register int i; 314 register int i;
284 315
285 cpu_clear(smp_processor_id(), mask); 316 cpumask_clear_cpu(smp_processor_id(), &mask);
286 cpus_and(mask, cpu_online_map, mask); 317 cpumask_and(&mask, cpu_online_mask, &mask);
287 for(i = 0; i <= high; i++) { 318 for (i = 0; i <= high; i++) {
288 if (cpu_isset(i, mask)) { 319 if (cpumask_test_cpu(i, &mask)) {
289 ccall_info.processors_in[i] = 0; 320 ccall_info.processors_in[i] = 0;
290 ccall_info.processors_out[i] = 0; 321 ccall_info.processors_out[i] = 0;
291 sun4d_send_ipi(i, IRQ_CROSS_CALL); 322 sun4d_send_ipi(i, IRQ_CROSS_CALL);
@@ -298,19 +329,19 @@ static void smp4d_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
298 329
299 i = 0; 330 i = 0;
300 do { 331 do {
301 if (!cpu_isset(i, mask)) 332 if (!cpumask_test_cpu(i, &mask))
302 continue; 333 continue;
303 while(!ccall_info.processors_in[i]) 334 while (!ccall_info.processors_in[i])
304 barrier(); 335 barrier();
305 } while(++i <= high); 336 } while (++i <= high);
306 337
307 i = 0; 338 i = 0;
308 do { 339 do {
309 if (!cpu_isset(i, mask)) 340 if (!cpumask_test_cpu(i, &mask))
310 continue; 341 continue;
311 while(!ccall_info.processors_out[i]) 342 while (!ccall_info.processors_out[i])
312 barrier(); 343 barrier();
313 } while(++i <= high); 344 } while (++i <= high);
314 } 345 }
315 346
316 spin_unlock_irqrestore(&cross_call_lock, flags); 347 spin_unlock_irqrestore(&cross_call_lock, flags);
@@ -336,7 +367,7 @@ void smp4d_percpu_timer_interrupt(struct pt_regs *regs)
336 static char led_mask[] = { 0xe, 0xd, 0xb, 0x7, 0xb, 0xd }; 367 static char led_mask[] = { 0xe, 0xd, 0xb, 0x7, 0xb, 0xd };
337 368
338 old_regs = set_irq_regs(regs); 369 old_regs = set_irq_regs(regs);
339 bw_get_prof_limit(cpu); 370 bw_get_prof_limit(cpu);
340 bw_clear_intr_mask(0, 1); /* INTR_TABLE[0] & 1 is Profile IRQ */ 371 bw_clear_intr_mask(0, 1); /* INTR_TABLE[0] & 1 is Profile IRQ */
341 372
342 cpu_tick[cpu]++; 373 cpu_tick[cpu]++;
@@ -349,7 +380,7 @@ void smp4d_percpu_timer_interrupt(struct pt_regs *regs)
349 380
350 profile_tick(CPU_PROFILING); 381 profile_tick(CPU_PROFILING);
351 382
352 if(!--prof_counter(cpu)) { 383 if (!--prof_counter(cpu)) {
353 int user = user_mode(regs); 384 int user = user_mode(regs);
354 385
355 irq_enter(); 386 irq_enter();
@@ -361,8 +392,6 @@ void smp4d_percpu_timer_interrupt(struct pt_regs *regs)
361 set_irq_regs(old_regs); 392 set_irq_regs(old_regs);
362} 393}
363 394
364extern unsigned int lvl14_resolution;
365
366static void __cpuinit smp_setup_percpu_timer(void) 395static void __cpuinit smp_setup_percpu_timer(void)
367{ 396{
368 int cpu = hard_smp4d_processor_id(); 397 int cpu = hard_smp4d_processor_id();
@@ -374,16 +403,16 @@ static void __cpuinit smp_setup_percpu_timer(void)
374void __init smp4d_blackbox_id(unsigned *addr) 403void __init smp4d_blackbox_id(unsigned *addr)
375{ 404{
376 int rd = *addr & 0x3e000000; 405 int rd = *addr & 0x3e000000;
377 406
378 addr[0] = 0xc0800800 | rd; /* lda [%g0] ASI_M_VIKING_TMP1, reg */ 407 addr[0] = 0xc0800800 | rd; /* lda [%g0] ASI_M_VIKING_TMP1, reg */
379 addr[1] = 0x01000000; /* nop */ 408 addr[1] = 0x01000000; /* nop */
380 addr[2] = 0x01000000; /* nop */ 409 addr[2] = 0x01000000; /* nop */
381} 410}
382 411
383void __init smp4d_blackbox_current(unsigned *addr) 412void __init smp4d_blackbox_current(unsigned *addr)
384{ 413{
385 int rd = *addr & 0x3e000000; 414 int rd = *addr & 0x3e000000;
386 415
387 addr[0] = 0xc0800800 | rd; /* lda [%g0] ASI_M_VIKING_TMP1, reg */ 416 addr[0] = 0xc0800800 | rd; /* lda [%g0] ASI_M_VIKING_TMP1, reg */
388 addr[2] = 0x81282002 | rd | (rd >> 11); /* sll reg, 2, reg */ 417 addr[2] = 0x81282002 | rd | (rd >> 11); /* sll reg, 2, reg */
389 addr[4] = 0x01000000; /* nop */ 418 addr[4] = 0x01000000; /* nop */
@@ -392,17 +421,19 @@ void __init smp4d_blackbox_current(unsigned *addr)
392void __init sun4d_init_smp(void) 421void __init sun4d_init_smp(void)
393{ 422{
394 int i; 423 int i;
395 extern unsigned int t_nmi[], linux_trap_ipi15_sun4d[], linux_trap_ipi15_sun4m[];
396 424
397 /* Patch ipi15 trap table */ 425 /* Patch ipi15 trap table */
398 t_nmi[1] = t_nmi[1] + (linux_trap_ipi15_sun4d - linux_trap_ipi15_sun4m); 426 t_nmi[1] = t_nmi[1] + (linux_trap_ipi15_sun4d - linux_trap_ipi15_sun4m);
399 427
400 /* And set btfixup... */ 428 /* And set btfixup... */
401 BTFIXUPSET_BLACKBOX(hard_smp_processor_id, smp4d_blackbox_id); 429 BTFIXUPSET_BLACKBOX(hard_smp_processor_id, smp4d_blackbox_id);
402 BTFIXUPSET_BLACKBOX(load_current, smp4d_blackbox_current); 430 BTFIXUPSET_BLACKBOX(load_current, smp4d_blackbox_current);
403 BTFIXUPSET_CALL(smp_cross_call, smp4d_cross_call, BTFIXUPCALL_NORM); 431 BTFIXUPSET_CALL(smp_cross_call, smp4d_cross_call, BTFIXUPCALL_NORM);
404 BTFIXUPSET_CALL(__hard_smp_processor_id, __smp4d_processor_id, BTFIXUPCALL_NORM); 432 BTFIXUPSET_CALL(__hard_smp_processor_id, __smp4d_processor_id, BTFIXUPCALL_NORM);
405 433 BTFIXUPSET_CALL(smp_ipi_resched, smp4d_ipi_resched, BTFIXUPCALL_NORM);
434 BTFIXUPSET_CALL(smp_ipi_single, smp4d_ipi_single, BTFIXUPCALL_NORM);
435 BTFIXUPSET_CALL(smp_ipi_mask_one, smp4d_ipi_mask_one, BTFIXUPCALL_NORM);
436
406 for (i = 0; i < NR_CPUS; i++) { 437 for (i = 0; i < NR_CPUS; i++) {
407 ccall_info.processors_in[i] = 1; 438 ccall_info.processors_in[i] = 1;
408 ccall_info.processors_out[i] = 1; 439 ccall_info.processors_out[i] = 1;
diff --git a/arch/sparc/kernel/sun4m_irq.c b/arch/sparc/kernel/sun4m_irq.c
index 7f3b97ff62c1..422c16dad1f6 100644
--- a/arch/sparc/kernel/sun4m_irq.c
+++ b/arch/sparc/kernel/sun4m_irq.c
@@ -1,5 +1,5 @@
1/* sun4m_irq.c 1/*
2 * arch/sparc/kernel/sun4m_irq.c: 2 * sun4m irq support
3 * 3 *
4 * djhr: Hacked out of irq.c into a CPU dependent version. 4 * djhr: Hacked out of irq.c into a CPU dependent version.
5 * 5 *
@@ -9,101 +9,44 @@
9 * Copyright (C) 1996 Dave Redman (djhr@tadpole.co.uk) 9 * Copyright (C) 1996 Dave Redman (djhr@tadpole.co.uk)
10 */ 10 */
11 11
12#include <linux/errno.h>
13#include <linux/linkage.h>
14#include <linux/kernel_stat.h>
15#include <linux/signal.h>
16#include <linux/sched.h>
17#include <linux/ptrace.h>
18#include <linux/smp.h>
19#include <linux/interrupt.h>
20#include <linux/init.h>
21#include <linux/ioport.h>
22#include <linux/of.h>
23#include <linux/of_device.h>
24
25#include <asm/ptrace.h>
26#include <asm/processor.h>
27#include <asm/system.h>
28#include <asm/psr.h>
29#include <asm/vaddrs.h>
30#include <asm/timer.h> 12#include <asm/timer.h>
31#include <asm/openprom.h>
32#include <asm/oplib.h>
33#include <asm/traps.h> 13#include <asm/traps.h>
34#include <asm/pgalloc.h> 14#include <asm/pgalloc.h>
35#include <asm/pgtable.h> 15#include <asm/pgtable.h>
36#include <asm/smp.h>
37#include <asm/irq.h> 16#include <asm/irq.h>
38#include <asm/io.h> 17#include <asm/io.h>
39#include <asm/cacheflush.h> 18#include <asm/cacheflush.h>
40 19
41#include "irq.h" 20#include "irq.h"
21#include "kernel.h"
42 22
43struct sun4m_irq_percpu { 23/* Sample sun4m IRQ layout:
44 u32 pending; 24 *
45 u32 clear; 25 * 0x22 - Power
46 u32 set; 26 * 0x24 - ESP SCSI
47}; 27 * 0x26 - Lance ethernet
48 28 * 0x2b - Floppy
49struct sun4m_irq_global { 29 * 0x2c - Zilog uart
50 u32 pending; 30 * 0x32 - SBUS level 0
51 u32 mask; 31 * 0x33 - Parallel port, SBUS level 1
52 u32 mask_clear; 32 * 0x35 - SBUS level 2
53 u32 mask_set; 33 * 0x37 - SBUS level 3
54 u32 interrupt_target; 34 * 0x39 - Audio, Graphics card, SBUS level 4
55}; 35 * 0x3b - SBUS level 5
56 36 * 0x3d - SBUS level 6
57/* Code in entry.S needs to get at these register mappings. */ 37 *
58struct sun4m_irq_percpu __iomem *sun4m_irq_percpu[SUN4M_NCPUS]; 38 * Each interrupt source has a mask bit in the interrupt registers.
59struct sun4m_irq_global __iomem *sun4m_irq_global; 39 * When the mask bit is set, this blocks interrupt deliver. So you
60 40 * clear the bit to enable the interrupt.
61/* Dave Redman (djhr@tadpole.co.uk) 41 *
62 * The sun4m interrupt registers. 42 * Interrupts numbered less than 0x10 are software triggered interrupts
63 */ 43 * and unused by Linux.
64#define SUN4M_INT_ENABLE 0x80000000 44 *
65#define SUN4M_INT_E14 0x00000080 45 * Interrupt level assignment on sun4m:
66#define SUN4M_INT_E10 0x00080000
67
68#define SUN4M_HARD_INT(x) (0x000000001 << (x))
69#define SUN4M_SOFT_INT(x) (0x000010000 << (x))
70
71#define SUN4M_INT_MASKALL 0x80000000 /* mask all interrupts */
72#define SUN4M_INT_MODULE_ERR 0x40000000 /* module error */
73#define SUN4M_INT_M2S_WRITE_ERR 0x20000000 /* write buffer error */
74#define SUN4M_INT_ECC_ERR 0x10000000 /* ecc memory error */
75#define SUN4M_INT_VME_ERR 0x08000000 /* vme async error */
76#define SUN4M_INT_FLOPPY 0x00400000 /* floppy disk */
77#define SUN4M_INT_MODULE 0x00200000 /* module interrupt */
78#define SUN4M_INT_VIDEO 0x00100000 /* onboard video */
79#define SUN4M_INT_REALTIME 0x00080000 /* system timer */
80#define SUN4M_INT_SCSI 0x00040000 /* onboard scsi */
81#define SUN4M_INT_AUDIO 0x00020000 /* audio/isdn */
82#define SUN4M_INT_ETHERNET 0x00010000 /* onboard ethernet */
83#define SUN4M_INT_SERIAL 0x00008000 /* serial ports */
84#define SUN4M_INT_KBDMS 0x00004000 /* keyboard/mouse */
85#define SUN4M_INT_SBUSBITS 0x00003F80 /* sbus int bits */
86#define SUN4M_INT_VMEBITS 0x0000007F /* vme int bits */
87
88#define SUN4M_INT_ERROR (SUN4M_INT_MODULE_ERR | \
89 SUN4M_INT_M2S_WRITE_ERR | \
90 SUN4M_INT_ECC_ERR | \
91 SUN4M_INT_VME_ERR)
92
93#define SUN4M_INT_SBUS(x) (1 << (x+7))
94#define SUN4M_INT_VME(x) (1 << (x))
95
96/* Interrupt levels used by OBP */
97#define OBP_INT_LEVEL_SOFT 0x10
98#define OBP_INT_LEVEL_ONBOARD 0x20
99#define OBP_INT_LEVEL_SBUS 0x30
100#define OBP_INT_LEVEL_VME 0x40
101
102/* Interrupt level assignment on sun4m:
103 * 46 *
104 * level source 47 * level source
105 * ------------------------------------------------------------ 48 * ------------------------------------------------------------
106 * 1 softint-1 49 * 1 softint-1
107 * 2 softint-2, VME/SBUS level 1 50 * 2 softint-2, VME/SBUS level 1
108 * 3 softint-3, VME/SBUS level 2 51 * 3 softint-3, VME/SBUS level 2
109 * 4 softint-4, onboard SCSI 52 * 4 softint-4, onboard SCSI
@@ -138,10 +81,10 @@ struct sun4m_irq_global __iomem *sun4m_irq_global;
138 * 'intr' property IRQ priority values from ss4, ss5, ss10, ss20, and 81 * 'intr' property IRQ priority values from ss4, ss5, ss10, ss20, and
139 * Tadpole S3 GX systems. 82 * Tadpole S3 GX systems.
140 * 83 *
141 * esp: 0x24 onboard ESP SCSI 84 * esp: 0x24 onboard ESP SCSI
142 * le: 0x26 onboard Lance ETHERNET 85 * le: 0x26 onboard Lance ETHERNET
143 * p9100: 0x32 SBUS level 1 P9100 video 86 * p9100: 0x32 SBUS level 1 P9100 video
144 * bpp: 0x33 SBUS level 2 BPP parallel port device 87 * bpp: 0x33 SBUS level 2 BPP parallel port device
145 * DBRI: 0x39 SBUS level 5 DBRI ISDN audio 88 * DBRI: 0x39 SBUS level 5 DBRI ISDN audio
146 * SUNW,leo: 0x39 SBUS level 5 LEO video 89 * SUNW,leo: 0x39 SBUS level 5 LEO video
147 * pcmcia: 0x3b SBUS level 6 PCMCIA controller 90 * pcmcia: 0x3b SBUS level 6 PCMCIA controller
@@ -152,8 +95,62 @@ struct sun4m_irq_global __iomem *sun4m_irq_global;
152 * power: 0x22 onboard power device (XXX unknown mask bit XXX) 95 * power: 0x22 onboard power device (XXX unknown mask bit XXX)
153 */ 96 */
154 97
155static unsigned long irq_mask[0x50] = { 98
156 /* SMP */ 99/* Code in entry.S needs to get at these register mappings. */
100struct sun4m_irq_percpu __iomem *sun4m_irq_percpu[SUN4M_NCPUS];
101struct sun4m_irq_global __iomem *sun4m_irq_global;
102
103struct sun4m_handler_data {
104 bool percpu;
105 long mask;
106};
107
108/* Dave Redman (djhr@tadpole.co.uk)
109 * The sun4m interrupt registers.
110 */
111#define SUN4M_INT_ENABLE 0x80000000
112#define SUN4M_INT_E14 0x00000080
113#define SUN4M_INT_E10 0x00080000
114
115#define SUN4M_HARD_INT(x) (0x000000001 << (x))
116#define SUN4M_SOFT_INT(x) (0x000010000 << (x))
117
118#define SUN4M_INT_MASKALL 0x80000000 /* mask all interrupts */
119#define SUN4M_INT_MODULE_ERR 0x40000000 /* module error */
120#define SUN4M_INT_M2S_WRITE_ERR 0x20000000 /* write buffer error */
121#define SUN4M_INT_ECC_ERR 0x10000000 /* ecc memory error */
122#define SUN4M_INT_VME_ERR 0x08000000 /* vme async error */
123#define SUN4M_INT_FLOPPY 0x00400000 /* floppy disk */
124#define SUN4M_INT_MODULE 0x00200000 /* module interrupt */
125#define SUN4M_INT_VIDEO 0x00100000 /* onboard video */
126#define SUN4M_INT_REALTIME 0x00080000 /* system timer */
127#define SUN4M_INT_SCSI 0x00040000 /* onboard scsi */
128#define SUN4M_INT_AUDIO 0x00020000 /* audio/isdn */
129#define SUN4M_INT_ETHERNET 0x00010000 /* onboard ethernet */
130#define SUN4M_INT_SERIAL 0x00008000 /* serial ports */
131#define SUN4M_INT_KBDMS 0x00004000 /* keyboard/mouse */
132#define SUN4M_INT_SBUSBITS 0x00003F80 /* sbus int bits */
133#define SUN4M_INT_VMEBITS 0x0000007F /* vme int bits */
134
135#define SUN4M_INT_ERROR (SUN4M_INT_MODULE_ERR | \
136 SUN4M_INT_M2S_WRITE_ERR | \
137 SUN4M_INT_ECC_ERR | \
138 SUN4M_INT_VME_ERR)
139
140#define SUN4M_INT_SBUS(x) (1 << (x+7))
141#define SUN4M_INT_VME(x) (1 << (x))
142
143/* Interrupt levels used by OBP */
144#define OBP_INT_LEVEL_SOFT 0x10
145#define OBP_INT_LEVEL_ONBOARD 0x20
146#define OBP_INT_LEVEL_SBUS 0x30
147#define OBP_INT_LEVEL_VME 0x40
148
149#define SUN4M_TIMER_IRQ (OBP_INT_LEVEL_ONBOARD | 10)
150#define SUN4M_PROFILE_IRQ (OBP_INT_LEVEL_ONBOARD | 14)
151
152static unsigned long sun4m_imask[0x50] = {
153 /* 0x00 - SMP */
157 0, SUN4M_SOFT_INT(1), 154 0, SUN4M_SOFT_INT(1),
158 SUN4M_SOFT_INT(2), SUN4M_SOFT_INT(3), 155 SUN4M_SOFT_INT(2), SUN4M_SOFT_INT(3),
159 SUN4M_SOFT_INT(4), SUN4M_SOFT_INT(5), 156 SUN4M_SOFT_INT(4), SUN4M_SOFT_INT(5),
@@ -162,7 +159,7 @@ static unsigned long irq_mask[0x50] = {
162 SUN4M_SOFT_INT(10), SUN4M_SOFT_INT(11), 159 SUN4M_SOFT_INT(10), SUN4M_SOFT_INT(11),
163 SUN4M_SOFT_INT(12), SUN4M_SOFT_INT(13), 160 SUN4M_SOFT_INT(12), SUN4M_SOFT_INT(13),
164 SUN4M_SOFT_INT(14), SUN4M_SOFT_INT(15), 161 SUN4M_SOFT_INT(14), SUN4M_SOFT_INT(15),
165 /* soft */ 162 /* 0x10 - soft */
166 0, SUN4M_SOFT_INT(1), 163 0, SUN4M_SOFT_INT(1),
167 SUN4M_SOFT_INT(2), SUN4M_SOFT_INT(3), 164 SUN4M_SOFT_INT(2), SUN4M_SOFT_INT(3),
168 SUN4M_SOFT_INT(4), SUN4M_SOFT_INT(5), 165 SUN4M_SOFT_INT(4), SUN4M_SOFT_INT(5),
@@ -171,122 +168,129 @@ static unsigned long irq_mask[0x50] = {
171 SUN4M_SOFT_INT(10), SUN4M_SOFT_INT(11), 168 SUN4M_SOFT_INT(10), SUN4M_SOFT_INT(11),
172 SUN4M_SOFT_INT(12), SUN4M_SOFT_INT(13), 169 SUN4M_SOFT_INT(12), SUN4M_SOFT_INT(13),
173 SUN4M_SOFT_INT(14), SUN4M_SOFT_INT(15), 170 SUN4M_SOFT_INT(14), SUN4M_SOFT_INT(15),
174 /* onboard */ 171 /* 0x20 - onboard */
175 0, 0, 0, 0, 172 0, 0, 0, 0,
176 SUN4M_INT_SCSI, 0, SUN4M_INT_ETHERNET, 0, 173 SUN4M_INT_SCSI, 0, SUN4M_INT_ETHERNET, 0,
177 SUN4M_INT_VIDEO, SUN4M_INT_MODULE, 174 SUN4M_INT_VIDEO, SUN4M_INT_MODULE,
178 SUN4M_INT_REALTIME, SUN4M_INT_FLOPPY, 175 SUN4M_INT_REALTIME, SUN4M_INT_FLOPPY,
179 (SUN4M_INT_SERIAL | SUN4M_INT_KBDMS), 176 (SUN4M_INT_SERIAL | SUN4M_INT_KBDMS),
180 SUN4M_INT_AUDIO, 0, SUN4M_INT_MODULE_ERR, 177 SUN4M_INT_AUDIO, SUN4M_INT_E14, SUN4M_INT_MODULE_ERR,
181 /* sbus */ 178 /* 0x30 - sbus */
182 0, 0, SUN4M_INT_SBUS(0), SUN4M_INT_SBUS(1), 179 0, 0, SUN4M_INT_SBUS(0), SUN4M_INT_SBUS(1),
183 0, SUN4M_INT_SBUS(2), 0, SUN4M_INT_SBUS(3), 180 0, SUN4M_INT_SBUS(2), 0, SUN4M_INT_SBUS(3),
184 0, SUN4M_INT_SBUS(4), 0, SUN4M_INT_SBUS(5), 181 0, SUN4M_INT_SBUS(4), 0, SUN4M_INT_SBUS(5),
185 0, SUN4M_INT_SBUS(6), 0, 0, 182 0, SUN4M_INT_SBUS(6), 0, 0,
186 /* vme */ 183 /* 0x40 - vme */
187 0, 0, SUN4M_INT_VME(0), SUN4M_INT_VME(1), 184 0, 0, SUN4M_INT_VME(0), SUN4M_INT_VME(1),
188 0, SUN4M_INT_VME(2), 0, SUN4M_INT_VME(3), 185 0, SUN4M_INT_VME(2), 0, SUN4M_INT_VME(3),
189 0, SUN4M_INT_VME(4), 0, SUN4M_INT_VME(5), 186 0, SUN4M_INT_VME(4), 0, SUN4M_INT_VME(5),
190 0, SUN4M_INT_VME(6), 0, 0 187 0, SUN4M_INT_VME(6), 0, 0
191}; 188};
192 189
193static unsigned long sun4m_get_irqmask(unsigned int irq) 190static void sun4m_mask_irq(struct irq_data *data)
194{
195 unsigned long mask;
196
197 if (irq < 0x50)
198 mask = irq_mask[irq];
199 else
200 mask = 0;
201
202 if (!mask)
203 printk(KERN_ERR "sun4m_get_irqmask: IRQ%d has no valid mask!\n",
204 irq);
205
206 return mask;
207}
208
209static void sun4m_disable_irq(unsigned int irq_nr)
210{ 191{
211 unsigned long mask, flags; 192 struct sun4m_handler_data *handler_data = data->handler_data;
212 int cpu = smp_processor_id(); 193 int cpu = smp_processor_id();
213 194
214 mask = sun4m_get_irqmask(irq_nr); 195 if (handler_data->mask) {
215 local_irq_save(flags); 196 unsigned long flags;
216 if (irq_nr > 15) 197
217 sbus_writel(mask, &sun4m_irq_global->mask_set); 198 local_irq_save(flags);
218 else 199 if (handler_data->percpu) {
219 sbus_writel(mask, &sun4m_irq_percpu[cpu]->set); 200 sbus_writel(handler_data->mask, &sun4m_irq_percpu[cpu]->set);
220 local_irq_restore(flags); 201 } else {
202 sbus_writel(handler_data->mask, &sun4m_irq_global->mask_set);
203 }
204 local_irq_restore(flags);
205 }
221} 206}
222 207
223static void sun4m_enable_irq(unsigned int irq_nr) 208static void sun4m_unmask_irq(struct irq_data *data)
224{ 209{
225 unsigned long mask, flags; 210 struct sun4m_handler_data *handler_data = data->handler_data;
226 int cpu = smp_processor_id(); 211 int cpu = smp_processor_id();
227 212
228 /* Dreadful floppy hack. When we use 0x2b instead of 213 if (handler_data->mask) {
229 * 0x0b the system blows (it starts to whistle!). 214 unsigned long flags;
230 * So we continue to use 0x0b. Fixme ASAP. --P3 215
231 */
232 if (irq_nr != 0x0b) {
233 mask = sun4m_get_irqmask(irq_nr);
234 local_irq_save(flags);
235 if (irq_nr > 15)
236 sbus_writel(mask, &sun4m_irq_global->mask_clear);
237 else
238 sbus_writel(mask, &sun4m_irq_percpu[cpu]->clear);
239 local_irq_restore(flags);
240 } else {
241 local_irq_save(flags); 216 local_irq_save(flags);
242 sbus_writel(SUN4M_INT_FLOPPY, &sun4m_irq_global->mask_clear); 217 if (handler_data->percpu) {
218 sbus_writel(handler_data->mask, &sun4m_irq_percpu[cpu]->clear);
219 } else {
220 sbus_writel(handler_data->mask, &sun4m_irq_global->mask_clear);
221 }
243 local_irq_restore(flags); 222 local_irq_restore(flags);
244 } 223 }
245} 224}
246 225
247static unsigned long cpu_pil_to_imask[16] = { 226static unsigned int sun4m_startup_irq(struct irq_data *data)
248/*0*/ 0x00000000, 227{
249/*1*/ 0x00000000, 228 irq_link(data->irq);
250/*2*/ SUN4M_INT_SBUS(0) | SUN4M_INT_VME(0), 229 sun4m_unmask_irq(data);
251/*3*/ SUN4M_INT_SBUS(1) | SUN4M_INT_VME(1), 230 return 0;
252/*4*/ SUN4M_INT_SCSI, 231}
253/*5*/ SUN4M_INT_SBUS(2) | SUN4M_INT_VME(2),
254/*6*/ SUN4M_INT_ETHERNET,
255/*7*/ SUN4M_INT_SBUS(3) | SUN4M_INT_VME(3),
256/*8*/ SUN4M_INT_VIDEO,
257/*9*/ SUN4M_INT_SBUS(4) | SUN4M_INT_VME(4) | SUN4M_INT_MODULE_ERR,
258/*10*/ SUN4M_INT_REALTIME,
259/*11*/ SUN4M_INT_SBUS(5) | SUN4M_INT_VME(5) | SUN4M_INT_FLOPPY,
260/*12*/ SUN4M_INT_SERIAL | SUN4M_INT_KBDMS,
261/*13*/ SUN4M_INT_SBUS(6) | SUN4M_INT_VME(6) | SUN4M_INT_AUDIO,
262/*14*/ SUN4M_INT_E14,
263/*15*/ SUN4M_INT_ERROR
264};
265 232
266/* We assume the caller has disabled local interrupts when these are called, 233static void sun4m_shutdown_irq(struct irq_data *data)
267 * or else very bizarre behavior will result.
268 */
269static void sun4m_disable_pil_irq(unsigned int pil)
270{ 234{
271 sbus_writel(cpu_pil_to_imask[pil], &sun4m_irq_global->mask_set); 235 sun4m_mask_irq(data);
236 irq_unlink(data->irq);
272} 237}
273 238
274static void sun4m_enable_pil_irq(unsigned int pil) 239static struct irq_chip sun4m_irq = {
240 .name = "sun4m",
241 .irq_startup = sun4m_startup_irq,
242 .irq_shutdown = sun4m_shutdown_irq,
243 .irq_mask = sun4m_mask_irq,
244 .irq_unmask = sun4m_unmask_irq,
245};
246
247
248static unsigned int sun4m_build_device_irq(struct platform_device *op,
249 unsigned int real_irq)
275{ 250{
276 sbus_writel(cpu_pil_to_imask[pil], &sun4m_irq_global->mask_clear); 251 struct sun4m_handler_data *handler_data;
252 unsigned int irq;
253 unsigned int pil;
254
255 if (real_irq >= OBP_INT_LEVEL_VME) {
256 prom_printf("Bogus sun4m IRQ %u\n", real_irq);
257 prom_halt();
258 }
259 pil = (real_irq & 0xf);
260 irq = irq_alloc(real_irq, pil);
261
262 if (irq == 0)
263 goto out;
264
265 handler_data = irq_get_handler_data(irq);
266 if (unlikely(handler_data))
267 goto out;
268
269 handler_data = kzalloc(sizeof(struct sun4m_handler_data), GFP_ATOMIC);
270 if (unlikely(!handler_data)) {
271 prom_printf("IRQ: kzalloc(sun4m_handler_data) failed.\n");
272 prom_halt();
273 }
274
275 handler_data->mask = sun4m_imask[real_irq];
276 handler_data->percpu = real_irq < OBP_INT_LEVEL_ONBOARD;
277 irq_set_chip_and_handler_name(irq, &sun4m_irq,
278 handle_level_irq, "level");
279 irq_set_handler_data(irq, handler_data);
280
281out:
282 return irq;
277} 283}
278 284
279#ifdef CONFIG_SMP 285#ifdef CONFIG_SMP
280static void sun4m_send_ipi(int cpu, int level) 286static void sun4m_send_ipi(int cpu, int level)
281{ 287{
282 unsigned long mask = sun4m_get_irqmask(level); 288 sbus_writel(SUN4M_SOFT_INT(level), &sun4m_irq_percpu[cpu]->set);
283 sbus_writel(mask, &sun4m_irq_percpu[cpu]->set);
284} 289}
285 290
286static void sun4m_clear_ipi(int cpu, int level) 291static void sun4m_clear_ipi(int cpu, int level)
287{ 292{
288 unsigned long mask = sun4m_get_irqmask(level); 293 sbus_writel(SUN4M_SOFT_INT(level), &sun4m_irq_percpu[cpu]->clear);
289 sbus_writel(mask, &sun4m_irq_percpu[cpu]->clear);
290} 294}
291 295
292static void sun4m_set_udt(int cpu) 296static void sun4m_set_udt(int cpu)
@@ -314,7 +318,6 @@ struct sun4m_timer_global {
314 318
315static struct sun4m_timer_global __iomem *timers_global; 319static struct sun4m_timer_global __iomem *timers_global;
316 320
317#define TIMER_IRQ (OBP_INT_LEVEL_ONBOARD | 10)
318 321
319unsigned int lvl14_resolution = (((1000000/HZ) + 1) << 10); 322unsigned int lvl14_resolution = (((1000000/HZ) + 1) << 10);
320 323
@@ -350,7 +353,15 @@ void sun4m_nmi(struct pt_regs *regs)
350 prom_halt(); 353 prom_halt();
351} 354}
352 355
353/* Exported for sun4m_smp.c */ 356void sun4m_unmask_profile_irq(void)
357{
358 unsigned long flags;
359
360 local_irq_save(flags);
361 sbus_writel(sun4m_imask[SUN4M_PROFILE_IRQ], &sun4m_irq_global->mask_clear);
362 local_irq_restore(flags);
363}
364
354void sun4m_clear_profile_irq(int cpu) 365void sun4m_clear_profile_irq(int cpu)
355{ 366{
356 sbus_readl(&timers_percpu[cpu]->l14_limit); 367 sbus_readl(&timers_percpu[cpu]->l14_limit);
@@ -365,6 +376,7 @@ static void __init sun4m_init_timers(irq_handler_t counter_fn)
365{ 376{
366 struct device_node *dp = of_find_node_by_name(NULL, "counter"); 377 struct device_node *dp = of_find_node_by_name(NULL, "counter");
367 int i, err, len, num_cpu_timers; 378 int i, err, len, num_cpu_timers;
379 unsigned int irq;
368 const u32 *addr; 380 const u32 *addr;
369 381
370 if (!dp) { 382 if (!dp) {
@@ -391,8 +403,9 @@ static void __init sun4m_init_timers(irq_handler_t counter_fn)
391 403
392 master_l10_counter = &timers_global->l10_count; 404 master_l10_counter = &timers_global->l10_count;
393 405
394 err = request_irq(TIMER_IRQ, counter_fn, 406 irq = sun4m_build_device_irq(NULL, SUN4M_TIMER_IRQ);
395 (IRQF_DISABLED | SA_STATIC_ALLOC), "timer", NULL); 407
408 err = request_irq(irq, counter_fn, IRQF_TIMER, "timer", NULL);
396 if (err) { 409 if (err) {
397 printk(KERN_ERR "sun4m_init_timers: Register IRQ error %d.\n", 410 printk(KERN_ERR "sun4m_init_timers: Register IRQ error %d.\n",
398 err); 411 err);
@@ -407,7 +420,6 @@ static void __init sun4m_init_timers(irq_handler_t counter_fn)
407#ifdef CONFIG_SMP 420#ifdef CONFIG_SMP
408 { 421 {
409 unsigned long flags; 422 unsigned long flags;
410 extern unsigned long lvl14_save[4];
411 struct tt_entry *trap_table = &sparc_ttable[SP_TRAP_IRQ1 + (14 - 1)]; 423 struct tt_entry *trap_table = &sparc_ttable[SP_TRAP_IRQ1 + (14 - 1)];
412 424
413 /* For SMP we use the level 14 ticker, however the bootup code 425 /* For SMP we use the level 14 ticker, however the bootup code
@@ -460,13 +472,12 @@ void __init sun4m_init_IRQ(void)
460 if (num_cpu_iregs == 4) 472 if (num_cpu_iregs == 4)
461 sbus_writel(0, &sun4m_irq_global->interrupt_target); 473 sbus_writel(0, &sun4m_irq_global->interrupt_target);
462 474
463 BTFIXUPSET_CALL(enable_irq, sun4m_enable_irq, BTFIXUPCALL_NORM);
464 BTFIXUPSET_CALL(disable_irq, sun4m_disable_irq, BTFIXUPCALL_NORM);
465 BTFIXUPSET_CALL(enable_pil_irq, sun4m_enable_pil_irq, BTFIXUPCALL_NORM);
466 BTFIXUPSET_CALL(disable_pil_irq, sun4m_disable_pil_irq, BTFIXUPCALL_NORM);
467 BTFIXUPSET_CALL(clear_clock_irq, sun4m_clear_clock_irq, BTFIXUPCALL_NORM); 475 BTFIXUPSET_CALL(clear_clock_irq, sun4m_clear_clock_irq, BTFIXUPCALL_NORM);
468 BTFIXUPSET_CALL(load_profile_irq, sun4m_load_profile_irq, BTFIXUPCALL_NORM); 476 BTFIXUPSET_CALL(load_profile_irq, sun4m_load_profile_irq, BTFIXUPCALL_NORM);
469 sparc_init_timers = sun4m_init_timers; 477
478 sparc_irq_config.init_timers = sun4m_init_timers;
479 sparc_irq_config.build_device_irq = sun4m_build_device_irq;
480
470#ifdef CONFIG_SMP 481#ifdef CONFIG_SMP
471 BTFIXUPSET_CALL(set_cpu_int, sun4m_send_ipi, BTFIXUPCALL_NORM); 482 BTFIXUPSET_CALL(set_cpu_int, sun4m_send_ipi, BTFIXUPCALL_NORM);
472 BTFIXUPSET_CALL(clear_cpu_int, sun4m_clear_ipi, BTFIXUPCALL_NORM); 483 BTFIXUPSET_CALL(clear_cpu_int, sun4m_clear_ipi, BTFIXUPCALL_NORM);
diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c
index 762d6eedd944..594768686525 100644
--- a/arch/sparc/kernel/sun4m_smp.c
+++ b/arch/sparc/kernel/sun4m_smp.c
@@ -1,59 +1,25 @@
1/* sun4m_smp.c: Sparc SUN4M SMP support. 1/*
2 * sun4m SMP support.
2 * 3 *
3 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) 4 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
4 */ 5 */
5 6
6#include <asm/head.h>
7
8#include <linux/kernel.h>
9#include <linux/sched.h>
10#include <linux/threads.h>
11#include <linux/smp.h>
12#include <linux/interrupt.h> 7#include <linux/interrupt.h>
13#include <linux/kernel_stat.h>
14#include <linux/init.h>
15#include <linux/spinlock.h>
16#include <linux/mm.h>
17#include <linux/swap.h>
18#include <linux/profile.h> 8#include <linux/profile.h>
19#include <linux/delay.h> 9#include <linux/delay.h>
20#include <linux/cpu.h> 10#include <linux/cpu.h>
21 11
22#include <asm/cacheflush.h> 12#include <asm/cacheflush.h>
23#include <asm/tlbflush.h> 13#include <asm/tlbflush.h>
24#include <asm/irq_regs.h>
25
26#include <asm/ptrace.h>
27#include <asm/atomic.h>
28
29#include <asm/irq.h>
30#include <asm/page.h>
31#include <asm/pgalloc.h>
32#include <asm/pgtable.h>
33#include <asm/oplib.h>
34#include <asm/cpudata.h>
35 14
36#include "irq.h" 15#include "irq.h"
16#include "kernel.h"
37 17
18#define IRQ_IPI_SINGLE 12
19#define IRQ_IPI_MASK 13
20#define IRQ_IPI_RESCHED 14
38#define IRQ_CROSS_CALL 15 21#define IRQ_CROSS_CALL 15
39 22
40extern ctxd_t *srmmu_ctx_table_phys;
41
42extern volatile unsigned long cpu_callin_map[NR_CPUS];
43extern unsigned char boot_cpu_id;
44
45extern cpumask_t smp_commenced_mask;
46
47extern int __smp4m_processor_id(void);
48
49/*#define SMP_DEBUG*/
50
51#ifdef SMP_DEBUG
52#define SMP_PRINTK(x) printk x
53#else
54#define SMP_PRINTK(x)
55#endif
56
57static inline unsigned long 23static inline unsigned long
58swap_ulong(volatile unsigned long *ptr, unsigned long val) 24swap_ulong(volatile unsigned long *ptr, unsigned long val)
59{ 25{
@@ -63,8 +29,8 @@ swap_ulong(volatile unsigned long *ptr, unsigned long val)
63 return val; 29 return val;
64} 30}
65 31
32static void smp4m_ipi_init(void);
66static void smp_setup_percpu_timer(void); 33static void smp_setup_percpu_timer(void);
67extern void cpu_probe(void);
68 34
69void __cpuinit smp4m_callin(void) 35void __cpuinit smp4m_callin(void)
70{ 36{
@@ -96,8 +62,6 @@ void __cpuinit smp4m_callin(void)
96 /* XXX: What's up with all the flushes? */ 62 /* XXX: What's up with all the flushes? */
97 local_flush_cache_all(); 63 local_flush_cache_all();
98 local_flush_tlb_all(); 64 local_flush_tlb_all();
99
100 cpu_probe();
101 65
102 /* Fix idle thread fields. */ 66 /* Fix idle thread fields. */
103 __asm__ __volatile__("ld [%0], %%g6\n\t" 67 __asm__ __volatile__("ld [%0], %%g6\n\t"
@@ -108,7 +72,7 @@ void __cpuinit smp4m_callin(void)
108 atomic_inc(&init_mm.mm_count); 72 atomic_inc(&init_mm.mm_count);
109 current->active_mm = &init_mm; 73 current->active_mm = &init_mm;
110 74
111 while (!cpu_isset(cpuid, smp_commenced_mask)) 75 while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
112 mb(); 76 mb();
113 77
114 local_irq_enable(); 78 local_irq_enable();
@@ -119,18 +83,15 @@ void __cpuinit smp4m_callin(void)
119/* 83/*
120 * Cycle through the processors asking the PROM to start each one. 84 * Cycle through the processors asking the PROM to start each one.
121 */ 85 */
122
123extern struct linux_prom_registers smp_penguin_ctable;
124
125void __init smp4m_boot_cpus(void) 86void __init smp4m_boot_cpus(void)
126{ 87{
88 smp4m_ipi_init();
127 smp_setup_percpu_timer(); 89 smp_setup_percpu_timer();
128 local_flush_cache_all(); 90 local_flush_cache_all();
129} 91}
130 92
131int __cpuinit smp4m_boot_one_cpu(int i) 93int __cpuinit smp4m_boot_one_cpu(int i)
132{ 94{
133 extern unsigned long sun4m_cpu_startup;
134 unsigned long *entry = &sun4m_cpu_startup; 95 unsigned long *entry = &sun4m_cpu_startup;
135 struct task_struct *p; 96 struct task_struct *p;
136 int timeout; 97 int timeout;
@@ -142,7 +103,7 @@ int __cpuinit smp4m_boot_one_cpu(int i)
142 p = fork_idle(i); 103 p = fork_idle(i);
143 current_set[i] = task_thread_info(p); 104 current_set[i] = task_thread_info(p);
144 /* See trampoline.S for details... */ 105 /* See trampoline.S for details... */
145 entry += ((i-1) * 3); 106 entry += ((i - 1) * 3);
146 107
147 /* 108 /*
148 * Initialize the contexts table 109 * Initialize the contexts table
@@ -154,20 +115,19 @@ int __cpuinit smp4m_boot_one_cpu(int i)
154 smp_penguin_ctable.reg_size = 0; 115 smp_penguin_ctable.reg_size = 0;
155 116
156 /* whirrr, whirrr, whirrrrrrrrr... */ 117 /* whirrr, whirrr, whirrrrrrrrr... */
157 printk("Starting CPU %d at %p\n", i, entry); 118 printk(KERN_INFO "Starting CPU %d at %p\n", i, entry);
158 local_flush_cache_all(); 119 local_flush_cache_all();
159 prom_startcpu(cpu_node, 120 prom_startcpu(cpu_node, &smp_penguin_ctable, 0, (char *)entry);
160 &smp_penguin_ctable, 0, (char *)entry);
161 121
162 /* wheee... it's going... */ 122 /* wheee... it's going... */
163 for(timeout = 0; timeout < 10000; timeout++) { 123 for (timeout = 0; timeout < 10000; timeout++) {
164 if(cpu_callin_map[i]) 124 if (cpu_callin_map[i])
165 break; 125 break;
166 udelay(200); 126 udelay(200);
167 } 127 }
168 128
169 if (!(cpu_callin_map[i])) { 129 if (!(cpu_callin_map[i])) {
170 printk("Processor %d is stuck.\n", i); 130 printk(KERN_ERR "Processor %d is stuck.\n", i);
171 return -ENODEV; 131 return -ENODEV;
172 } 132 }
173 133
@@ -193,17 +153,25 @@ void __init smp4m_smp_done(void)
193 /* Ok, they are spinning and ready to go. */ 153 /* Ok, they are spinning and ready to go. */
194} 154}
195 155
196/* At each hardware IRQ, we get this called to forward IRQ reception 156
197 * to the next processor. The caller must disable the IRQ level being 157/* Initialize IPIs on the SUN4M SMP machine */
198 * serviced globally so that there are no double interrupts received. 158static void __init smp4m_ipi_init(void)
199 *
200 * XXX See sparc64 irq.c.
201 */
202void smp4m_irq_rotate(int cpu)
203{ 159{
204 int next = cpu_data(cpu).next; 160}
205 if (next != cpu) 161
206 set_irq_udt(next); 162static void smp4m_ipi_resched(int cpu)
163{
164 set_cpu_int(cpu, IRQ_IPI_RESCHED);
165}
166
167static void smp4m_ipi_single(int cpu)
168{
169 set_cpu_int(cpu, IRQ_IPI_SINGLE);
170}
171
172static void smp4m_ipi_mask_one(int cpu)
173{
174 set_cpu_int(cpu, IRQ_IPI_MASK);
207} 175}
208 176
209static struct smp_funcall { 177static struct smp_funcall {
@@ -241,10 +209,10 @@ static void smp4m_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
241 { 209 {
242 register int i; 210 register int i;
243 211
244 cpu_clear(smp_processor_id(), mask); 212 cpumask_clear_cpu(smp_processor_id(), &mask);
245 cpus_and(mask, cpu_online_map, mask); 213 cpumask_and(&mask, cpu_online_mask, &mask);
246 for(i = 0; i < ncpus; i++) { 214 for (i = 0; i < ncpus; i++) {
247 if (cpu_isset(i, mask)) { 215 if (cpumask_test_cpu(i, &mask)) {
248 ccall_info.processors_in[i] = 0; 216 ccall_info.processors_in[i] = 0;
249 ccall_info.processors_out[i] = 0; 217 ccall_info.processors_out[i] = 0;
250 set_cpu_int(i, IRQ_CROSS_CALL); 218 set_cpu_int(i, IRQ_CROSS_CALL);
@@ -260,21 +228,20 @@ static void smp4m_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
260 228
261 i = 0; 229 i = 0;
262 do { 230 do {
263 if (!cpu_isset(i, mask)) 231 if (!cpumask_test_cpu(i, &mask))
264 continue; 232 continue;
265 while(!ccall_info.processors_in[i]) 233 while (!ccall_info.processors_in[i])
266 barrier(); 234 barrier();
267 } while(++i < ncpus); 235 } while (++i < ncpus);
268 236
269 i = 0; 237 i = 0;
270 do { 238 do {
271 if (!cpu_isset(i, mask)) 239 if (!cpumask_test_cpu(i, &mask))
272 continue; 240 continue;
273 while(!ccall_info.processors_out[i]) 241 while (!ccall_info.processors_out[i])
274 barrier(); 242 barrier();
275 } while(++i < ncpus); 243 } while (++i < ncpus);
276 } 244 }
277
278 spin_unlock_irqrestore(&cross_call_lock, flags); 245 spin_unlock_irqrestore(&cross_call_lock, flags);
279} 246}
280 247
@@ -289,8 +256,6 @@ void smp4m_cross_call_irq(void)
289 ccall_info.processors_out[i] = 1; 256 ccall_info.processors_out[i] = 1;
290} 257}
291 258
292extern void sun4m_clear_profile_irq(int cpu);
293
294void smp4m_percpu_timer_interrupt(struct pt_regs *regs) 259void smp4m_percpu_timer_interrupt(struct pt_regs *regs)
295{ 260{
296 struct pt_regs *old_regs; 261 struct pt_regs *old_regs;
@@ -302,7 +267,7 @@ void smp4m_percpu_timer_interrupt(struct pt_regs *regs)
302 267
303 profile_tick(CPU_PROFILING); 268 profile_tick(CPU_PROFILING);
304 269
305 if(!--prof_counter(cpu)) { 270 if (!--prof_counter(cpu)) {
306 int user = user_mode(regs); 271 int user = user_mode(regs);
307 272
308 irq_enter(); 273 irq_enter();
@@ -314,8 +279,6 @@ void smp4m_percpu_timer_interrupt(struct pt_regs *regs)
314 set_irq_regs(old_regs); 279 set_irq_regs(old_regs);
315} 280}
316 281
317extern unsigned int lvl14_resolution;
318
319static void __cpuinit smp_setup_percpu_timer(void) 282static void __cpuinit smp_setup_percpu_timer(void)
320{ 283{
321 int cpu = smp_processor_id(); 284 int cpu = smp_processor_id();
@@ -323,17 +286,17 @@ static void __cpuinit smp_setup_percpu_timer(void)
323 prof_counter(cpu) = prof_multiplier(cpu) = 1; 286 prof_counter(cpu) = prof_multiplier(cpu) = 1;
324 load_profile_irq(cpu, lvl14_resolution); 287 load_profile_irq(cpu, lvl14_resolution);
325 288
326 if(cpu == boot_cpu_id) 289 if (cpu == boot_cpu_id)
327 enable_pil_irq(14); 290 sun4m_unmask_profile_irq();
328} 291}
329 292
330static void __init smp4m_blackbox_id(unsigned *addr) 293static void __init smp4m_blackbox_id(unsigned *addr)
331{ 294{
332 int rd = *addr & 0x3e000000; 295 int rd = *addr & 0x3e000000;
333 int rs1 = rd >> 11; 296 int rs1 = rd >> 11;
334 297
335 addr[0] = 0x81580000 | rd; /* rd %tbr, reg */ 298 addr[0] = 0x81580000 | rd; /* rd %tbr, reg */
336 addr[1] = 0x8130200c | rd | rs1; /* srl reg, 0xc, reg */ 299 addr[1] = 0x8130200c | rd | rs1; /* srl reg, 0xc, reg */
337 addr[2] = 0x80082003 | rd | rs1; /* and reg, 3, reg */ 300 addr[2] = 0x80082003 | rd | rs1; /* and reg, 3, reg */
338} 301}
339 302
@@ -341,9 +304,9 @@ static void __init smp4m_blackbox_current(unsigned *addr)
341{ 304{
342 int rd = *addr & 0x3e000000; 305 int rd = *addr & 0x3e000000;
343 int rs1 = rd >> 11; 306 int rs1 = rd >> 11;
344 307
345 addr[0] = 0x81580000 | rd; /* rd %tbr, reg */ 308 addr[0] = 0x81580000 | rd; /* rd %tbr, reg */
346 addr[2] = 0x8130200a | rd | rs1; /* srl reg, 0xa, reg */ 309 addr[2] = 0x8130200a | rd | rs1; /* srl reg, 0xa, reg */
347 addr[4] = 0x8008200c | rd | rs1; /* and reg, 0xc, reg */ 310 addr[4] = 0x8008200c | rd | rs1; /* and reg, 0xc, reg */
348} 311}
349 312
@@ -353,4 +316,7 @@ void __init sun4m_init_smp(void)
353 BTFIXUPSET_BLACKBOX(load_current, smp4m_blackbox_current); 316 BTFIXUPSET_BLACKBOX(load_current, smp4m_blackbox_current);
354 BTFIXUPSET_CALL(smp_cross_call, smp4m_cross_call, BTFIXUPCALL_NORM); 317 BTFIXUPSET_CALL(smp_cross_call, smp4m_cross_call, BTFIXUPCALL_NORM);
355 BTFIXUPSET_CALL(__hard_smp_processor_id, __smp4m_processor_id, BTFIXUPCALL_NORM); 318 BTFIXUPSET_CALL(__hard_smp_processor_id, __smp4m_processor_id, BTFIXUPCALL_NORM);
319 BTFIXUPSET_CALL(smp_ipi_resched, smp4m_ipi_resched, BTFIXUPCALL_NORM);
320 BTFIXUPSET_CALL(smp_ipi_single, smp4m_ipi_single, BTFIXUPCALL_NORM);
321 BTFIXUPSET_CALL(smp_ipi_mask_one, smp4m_ipi_mask_one, BTFIXUPCALL_NORM);
356} 322}
diff --git a/arch/sparc/kernel/sys_sparc32.c b/arch/sparc/kernel/sys_sparc32.c
index e6375a750d9a..170cd8e8eb2a 100644
--- a/arch/sparc/kernel/sys_sparc32.c
+++ b/arch/sparc/kernel/sys_sparc32.c
@@ -17,7 +17,6 @@
17#include <linux/resource.h> 17#include <linux/resource.h>
18#include <linux/times.h> 18#include <linux/times.h>
19#include <linux/smp.h> 19#include <linux/smp.h>
20#include <linux/smp_lock.h>
21#include <linux/sem.h> 20#include <linux/sem.h>
22#include <linux/msg.h> 21#include <linux/msg.h>
23#include <linux/shm.h> 22#include <linux/shm.h>
@@ -110,7 +109,7 @@ asmlinkage long compat_sys_ipc(u32 call, u32 first, u32 second, u32 third, compa
110 109
111 default: 110 default:
112 return -ENOSYS; 111 return -ENOSYS;
113 }; 112 }
114 113
115 return -ENOSYS; 114 return -ENOSYS;
116} 115}
diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
index 675c9e11ada5..42b282fa6112 100644
--- a/arch/sparc/kernel/sys_sparc_32.c
+++ b/arch/sparc/kernel/sys_sparc_32.c
@@ -19,7 +19,6 @@
19#include <linux/mman.h> 19#include <linux/mman.h>
20#include <linux/utsname.h> 20#include <linux/utsname.h>
21#include <linux/smp.h> 21#include <linux/smp.h>
22#include <linux/smp_lock.h>
23#include <linux/ipc.h> 22#include <linux/ipc.h>
24 23
25#include <asm/uaccess.h> 24#include <asm/uaccess.h>
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index f836f4e93afe..908b47a5ee24 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -360,20 +360,25 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
360} 360}
361EXPORT_SYMBOL(get_fb_unmapped_area); 361EXPORT_SYMBOL(get_fb_unmapped_area);
362 362
363/* Essentially the same as PowerPC... */ 363/* Essentially the same as PowerPC. */
364void arch_pick_mmap_layout(struct mm_struct *mm) 364static unsigned long mmap_rnd(void)
365{ 365{
366 unsigned long random_factor = 0UL; 366 unsigned long rnd = 0UL;
367 unsigned long gap;
368 367
369 if (current->flags & PF_RANDOMIZE) { 368 if (current->flags & PF_RANDOMIZE) {
370 random_factor = get_random_int(); 369 unsigned long val = get_random_int();
371 if (test_thread_flag(TIF_32BIT)) 370 if (test_thread_flag(TIF_32BIT))
372 random_factor &= ((1 * 1024 * 1024) - 1); 371 rnd = (val % (1UL << (22UL-PAGE_SHIFT)));
373 else 372 else
374 random_factor = ((random_factor << PAGE_SHIFT) & 373 rnd = (val % (1UL << (29UL-PAGE_SHIFT)));
375 0xffffffffUL);
376 } 374 }
375 return (rnd << PAGE_SHIFT) * 2;
376}
377
378void arch_pick_mmap_layout(struct mm_struct *mm)
379{
380 unsigned long random_factor = mmap_rnd();
381 unsigned long gap;
377 382
378 /* 383 /*
379 * Fall back to the standard layout if the personality 384 * Fall back to the standard layout if the personality
@@ -455,7 +460,7 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second
455 default: 460 default:
456 err = -ENOSYS; 461 err = -ENOSYS;
457 goto out; 462 goto out;
458 }; 463 }
459 } 464 }
460 if (call <= MSGCTL) { 465 if (call <= MSGCTL) {
461 switch (call) { 466 switch (call) {
@@ -476,7 +481,7 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second
476 default: 481 default:
477 err = -ENOSYS; 482 err = -ENOSYS;
478 goto out; 483 goto out;
479 }; 484 }
480 } 485 }
481 if (call <= SHMCTL) { 486 if (call <= SHMCTL) {
482 switch (call) { 487 switch (call) {
@@ -502,7 +507,7 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second
502 default: 507 default:
503 err = -ENOSYS; 508 err = -ENOSYS;
504 goto out; 509 goto out;
505 }; 510 }
506 } else { 511 } else {
507 err = -ENOSYS; 512 err = -ENOSYS;
508 } 513 }
diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c
index 1eb8b00aed75..7408201d7efb 100644
--- a/arch/sparc/kernel/sysfs.c
+++ b/arch/sparc/kernel/sysfs.c
@@ -103,9 +103,10 @@ static unsigned long run_on_cpu(unsigned long cpu,
103 unsigned long (*func)(unsigned long), 103 unsigned long (*func)(unsigned long),
104 unsigned long arg) 104 unsigned long arg)
105{ 105{
106 cpumask_t old_affinity = current->cpus_allowed; 106 cpumask_t old_affinity;
107 unsigned long ret; 107 unsigned long ret;
108 108
109 cpumask_copy(&old_affinity, tsk_cpus_allowed(current));
109 /* should return -EINVAL to userspace */ 110 /* should return -EINVAL to userspace */
110 if (set_cpus_allowed_ptr(current, cpumask_of(cpu))) 111 if (set_cpus_allowed_ptr(current, cpumask_of(cpu)))
111 return 0; 112 return 0;
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S
index ec396e1916b9..6e492d59f6b1 100644
--- a/arch/sparc/kernel/systbls_32.S
+++ b/arch/sparc/kernel/systbls_32.S
@@ -83,5 +83,5 @@ sys_call_table:
83/*315*/ .long sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1 83/*315*/ .long sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1
84/*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv 84/*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
85/*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init 85/*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init
86/*330*/ .long sys_fanotify_mark, sys_prlimit64 86/*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
87 87/*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index 8cfcaa549580..f566518483b5 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -84,7 +84,8 @@ sys_call_table32:
84 .word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1 84 .word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1
85/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv 85/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv
86 .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open, compat_sys_recvmmsg, sys_fanotify_init 86 .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open, compat_sys_recvmmsg, sys_fanotify_init
87/*330*/ .word sys32_fanotify_mark, sys_prlimit64 87/*330*/ .word sys32_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime
88 .word sys_syncfs, compat_sys_sendmmsg, sys_setns
88 89
89#endif /* CONFIG_COMPAT */ 90#endif /* CONFIG_COMPAT */
90 91
@@ -160,4 +161,5 @@ sys_call_table:
160 .word sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1 161 .word sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1
161/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv 162/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
162 .word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init 163 .word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init
163/*330*/ .word sys_fanotify_mark, sys_prlimit64 164/*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
165 .word sys_syncfs, sys_sendmmsg, sys_setns
diff --git a/arch/sparc/kernel/tadpole.c b/arch/sparc/kernel/tadpole.c
index f476a5f4af6a..9aba8bd5a78b 100644
--- a/arch/sparc/kernel/tadpole.c
+++ b/arch/sparc/kernel/tadpole.c
@@ -100,7 +100,7 @@ static void swift_clockstop(void)
100 100
101void __init clock_stop_probe(void) 101void __init clock_stop_probe(void)
102{ 102{
103 unsigned int node, clk_nd; 103 phandle node, clk_nd;
104 char name[20]; 104 char name[20];
105 105
106 prom_getstring(prom_root_node, "name", name, sizeof(name)); 106 prom_getstring(prom_root_node, "name", name, sizeof(name));
diff --git a/arch/sparc/kernel/tick14.c b/arch/sparc/kernel/tick14.c
deleted file mode 100644
index 138bbf5f8724..000000000000
--- a/arch/sparc/kernel/tick14.c
+++ /dev/null
@@ -1,39 +0,0 @@
1/* tick14.c
2 *
3 * Copyright (C) 1996 David Redman (djhr@tadpole.co.uk)
4 *
5 * This file handles the Sparc specific level14 ticker
6 * This is really useful for profiling OBP uses it for keyboard
7 * aborts and other stuff.
8 */
9#include <linux/kernel.h>
10
11extern unsigned long lvl14_save[5];
12static unsigned long *linux_lvl14 = NULL;
13static unsigned long obp_lvl14[4];
14
15/*
16 * Call with timer IRQ closed.
17 * First time we do it with disable_irq, later prom code uses spin_lock_irq().
18 */
19void install_linux_ticker(void)
20{
21
22 if (!linux_lvl14)
23 return;
24 linux_lvl14[0] = lvl14_save[0];
25 linux_lvl14[1] = lvl14_save[1];
26 linux_lvl14[2] = lvl14_save[2];
27 linux_lvl14[3] = lvl14_save[3];
28}
29
30void install_obp_ticker(void)
31{
32
33 if (!linux_lvl14)
34 return;
35 linux_lvl14[0] = obp_lvl14[0];
36 linux_lvl14[1] = obp_lvl14[1];
37 linux_lvl14[2] = obp_lvl14[2];
38 linux_lvl14[3] = obp_lvl14[3];
39}
diff --git a/arch/sparc/kernel/time_32.c b/arch/sparc/kernel/time_32.c
index 9c743b1886ff..1060e0672a4b 100644
--- a/arch/sparc/kernel/time_32.c
+++ b/arch/sparc/kernel/time_32.c
@@ -85,7 +85,7 @@ int update_persistent_clock(struct timespec now)
85 85
86/* 86/*
87 * timer_interrupt() needs to keep up the real-time clock, 87 * timer_interrupt() needs to keep up the real-time clock,
88 * as well as call the "do_timer()" routine every clocktick 88 * as well as call the "xtime_update()" routine every clocktick
89 */ 89 */
90 90
91#define TICK_SIZE (tick_nsec / 1000) 91#define TICK_SIZE (tick_nsec / 1000)
@@ -96,14 +96,9 @@ static irqreturn_t timer_interrupt(int dummy, void *dev_id)
96 profile_tick(CPU_PROFILING); 96 profile_tick(CPU_PROFILING);
97#endif 97#endif
98 98
99 /* Protect counter clear so that do_gettimeoffset works */
100 write_seqlock(&xtime_lock);
101
102 clear_clock_irq(); 99 clear_clock_irq();
103 100
104 do_timer(1); 101 xtime_update(1);
105
106 write_sequnlock(&xtime_lock);
107 102
108#ifndef CONFIG_SMP 103#ifndef CONFIG_SMP
109 update_process_times(user_mode(get_irq_regs())); 104 update_process_times(user_mode(get_irq_regs()));
@@ -142,7 +137,7 @@ static struct platform_device m48t59_rtc = {
142 }, 137 },
143}; 138};
144 139
145static int __devinit clock_probe(struct platform_device *op, const struct of_device_id *match) 140static int __devinit clock_probe(struct platform_device *op)
146{ 141{
147 struct device_node *dp = op->dev.of_node; 142 struct device_node *dp = op->dev.of_node;
148 const char *model = of_get_property(dp, "model", NULL); 143 const char *model = of_get_property(dp, "model", NULL);
@@ -150,6 +145,10 @@ static int __devinit clock_probe(struct platform_device *op, const struct of_dev
150 if (!model) 145 if (!model)
151 return -ENODEV; 146 return -ENODEV;
152 147
148 /* Only the primary RTC has an address property */
149 if (!of_find_property(dp, "address", NULL))
150 return -ENODEV;
151
153 m48t59_rtc.resource = &op->resource[0]; 152 m48t59_rtc.resource = &op->resource[0];
154 if (!strcmp(model, "mk48t02")) { 153 if (!strcmp(model, "mk48t02")) {
155 /* Map the clock register io area read-only */ 154 /* Map the clock register io area read-only */
@@ -169,14 +168,14 @@ static int __devinit clock_probe(struct platform_device *op, const struct of_dev
169 return 0; 168 return 0;
170} 169}
171 170
172static struct of_device_id __initdata clock_match[] = { 171static struct of_device_id clock_match[] = {
173 { 172 {
174 .name = "eeprom", 173 .name = "eeprom",
175 }, 174 },
176 {}, 175 {},
177}; 176};
178 177
179static struct of_platform_driver clock_driver = { 178static struct platform_driver clock_driver = {
180 .probe = clock_probe, 179 .probe = clock_probe,
181 .driver = { 180 .driver = {
182 .name = "rtc", 181 .name = "rtc",
@@ -189,7 +188,7 @@ static struct of_platform_driver clock_driver = {
189/* Probe for the mostek real time clock chip. */ 188/* Probe for the mostek real time clock chip. */
190static int __init clock_init(void) 189static int __init clock_init(void)
191{ 190{
192 return of_register_platform_driver(&clock_driver); 191 return platform_driver_register(&clock_driver);
193} 192}
194/* Must be after subsys_initcall() so that busses are probed. Must 193/* Must be after subsys_initcall() so that busses are probed. Must
195 * be before device_initcall() because things like the RTC driver 194 * be before device_initcall() because things like the RTC driver
@@ -224,19 +223,15 @@ static void __init sbus_time_init(void)
224 223
225 btfixup(); 224 btfixup();
226 225
227 sparc_init_timers(timer_interrupt); 226 sparc_irq_config.init_timers(timer_interrupt);
228} 227}
229 228
230void __init time_init(void) 229void __init time_init(void)
231{ 230{
232#ifdef CONFIG_PCI 231 if (pcic_present())
233 extern void pci_time_init(void);
234 if (pcic_present()) {
235 pci_time_init(); 232 pci_time_init();
236 return; 233 else
237 } 234 sbus_time_init();
238#endif
239 sbus_time_init();
240} 235}
241 236
242 237
diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c
index 3bc9c9979b92..1db6b18964d2 100644
--- a/arch/sparc/kernel/time_64.c
+++ b/arch/sparc/kernel/time_64.c
@@ -419,7 +419,7 @@ static struct platform_device rtc_cmos_device = {
419 .num_resources = 1, 419 .num_resources = 1,
420}; 420};
421 421
422static int __devinit rtc_probe(struct platform_device *op, const struct of_device_id *match) 422static int __devinit rtc_probe(struct platform_device *op)
423{ 423{
424 struct resource *r; 424 struct resource *r;
425 425
@@ -442,7 +442,7 @@ static int __devinit rtc_probe(struct platform_device *op, const struct of_devic
442 return platform_device_register(&rtc_cmos_device); 442 return platform_device_register(&rtc_cmos_device);
443} 443}
444 444
445static struct of_device_id __initdata rtc_match[] = { 445static const struct of_device_id rtc_match[] = {
446 { 446 {
447 .name = "rtc", 447 .name = "rtc",
448 .compatible = "m5819", 448 .compatible = "m5819",
@@ -462,7 +462,7 @@ static struct of_device_id __initdata rtc_match[] = {
462 {}, 462 {},
463}; 463};
464 464
465static struct of_platform_driver rtc_driver = { 465static struct platform_driver rtc_driver = {
466 .probe = rtc_probe, 466 .probe = rtc_probe,
467 .driver = { 467 .driver = {
468 .name = "rtc", 468 .name = "rtc",
@@ -477,7 +477,7 @@ static struct platform_device rtc_bq4802_device = {
477 .num_resources = 1, 477 .num_resources = 1,
478}; 478};
479 479
480static int __devinit bq4802_probe(struct platform_device *op, const struct of_device_id *match) 480static int __devinit bq4802_probe(struct platform_device *op)
481{ 481{
482 482
483 printk(KERN_INFO "%s: BQ4802 regs at 0x%llx\n", 483 printk(KERN_INFO "%s: BQ4802 regs at 0x%llx\n",
@@ -487,7 +487,7 @@ static int __devinit bq4802_probe(struct platform_device *op, const struct of_de
487 return platform_device_register(&rtc_bq4802_device); 487 return platform_device_register(&rtc_bq4802_device);
488} 488}
489 489
490static struct of_device_id __initdata bq4802_match[] = { 490static const struct of_device_id bq4802_match[] = {
491 { 491 {
492 .name = "rtc", 492 .name = "rtc",
493 .compatible = "bq4802", 493 .compatible = "bq4802",
@@ -495,7 +495,7 @@ static struct of_device_id __initdata bq4802_match[] = {
495 {}, 495 {},
496}; 496};
497 497
498static struct of_platform_driver bq4802_driver = { 498static struct platform_driver bq4802_driver = {
499 .probe = bq4802_probe, 499 .probe = bq4802_probe,
500 .driver = { 500 .driver = {
501 .name = "bq4802", 501 .name = "bq4802",
@@ -534,7 +534,7 @@ static struct platform_device m48t59_rtc = {
534 }, 534 },
535}; 535};
536 536
537static int __devinit mostek_probe(struct platform_device *op, const struct of_device_id *match) 537static int __devinit mostek_probe(struct platform_device *op)
538{ 538{
539 struct device_node *dp = op->dev.of_node; 539 struct device_node *dp = op->dev.of_node;
540 540
@@ -552,14 +552,14 @@ static int __devinit mostek_probe(struct platform_device *op, const struct of_de
552 return platform_device_register(&m48t59_rtc); 552 return platform_device_register(&m48t59_rtc);
553} 553}
554 554
555static struct of_device_id __initdata mostek_match[] = { 555static const struct of_device_id mostek_match[] = {
556 { 556 {
557 .name = "eeprom", 557 .name = "eeprom",
558 }, 558 },
559 {}, 559 {},
560}; 560};
561 561
562static struct of_platform_driver mostek_driver = { 562static struct platform_driver mostek_driver = {
563 .probe = mostek_probe, 563 .probe = mostek_probe,
564 .driver = { 564 .driver = {
565 .name = "mostek", 565 .name = "mostek",
@@ -586,9 +586,9 @@ static int __init clock_init(void)
586 if (tlb_type == hypervisor) 586 if (tlb_type == hypervisor)
587 return platform_device_register(&rtc_sun4v_device); 587 return platform_device_register(&rtc_sun4v_device);
588 588
589 (void) of_register_platform_driver(&rtc_driver); 589 (void) platform_driver_register(&rtc_driver);
590 (void) of_register_platform_driver(&mostek_driver); 590 (void) platform_driver_register(&mostek_driver);
591 (void) of_register_platform_driver(&bq4802_driver); 591 (void) platform_driver_register(&bq4802_driver);
592 592
593 return 0; 593 return 0;
594} 594}
@@ -708,7 +708,7 @@ static void sparc64_timer_setup(enum clock_event_mode mode,
708 case CLOCK_EVT_MODE_UNUSED: 708 case CLOCK_EVT_MODE_UNUSED:
709 WARN_ON(1); 709 WARN_ON(1);
710 break; 710 break;
711 }; 711 }
712} 712}
713 713
714static struct clock_event_device sparc64_clockevent = { 714static struct clock_event_device sparc64_clockevent = {
@@ -816,14 +816,12 @@ void __init time_init(void)
816 clocksource_hz2mult(freq, SPARC64_NSEC_PER_CYC_SHIFT); 816 clocksource_hz2mult(freq, SPARC64_NSEC_PER_CYC_SHIFT);
817 817
818 clocksource_tick.name = tick_ops->name; 818 clocksource_tick.name = tick_ops->name;
819 clocksource_calc_mult_shift(&clocksource_tick, freq, 4);
820 clocksource_tick.read = clocksource_tick_read; 819 clocksource_tick.read = clocksource_tick_read;
821 820
821 clocksource_register_hz(&clocksource_tick, freq);
822 printk("clocksource: mult[%x] shift[%d]\n", 822 printk("clocksource: mult[%x] shift[%d]\n",
823 clocksource_tick.mult, clocksource_tick.shift); 823 clocksource_tick.mult, clocksource_tick.shift);
824 824
825 clocksource_register(&clocksource_tick);
826
827 sparc64_clockevent.name = tick_ops->name; 825 sparc64_clockevent.name = tick_ops->name;
828 clockevents_calc_mult_shift(&sparc64_clockevent, freq, 4); 826 clockevents_calc_mult_shift(&sparc64_clockevent, freq, 4);
829 827
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index 42ad2ba85010..0cbdaa41cd1e 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -622,7 +622,7 @@ static const char CHAFSR_PERR_msg[] =
622static const char CHAFSR_IERR_msg[] = 622static const char CHAFSR_IERR_msg[] =
623 "Internal processor error"; 623 "Internal processor error";
624static const char CHAFSR_ISAP_msg[] = 624static const char CHAFSR_ISAP_msg[] =
625 "System request parity error on incoming addresss"; 625 "System request parity error on incoming address";
626static const char CHAFSR_UCU_msg[] = 626static const char CHAFSR_UCU_msg[] =
627 "Uncorrectable E-cache ECC error for ifetch/data"; 627 "Uncorrectable E-cache ECC error for ifetch/data";
628static const char CHAFSR_UCC_msg[] = 628static const char CHAFSR_UCC_msg[] =
@@ -1804,7 +1804,7 @@ static const char *sun4v_err_type_to_str(u32 type)
1804 return "warning resumable"; 1804 return "warning resumable";
1805 default: 1805 default:
1806 return "unknown"; 1806 return "unknown";
1807 }; 1807 }
1808} 1808}
1809 1809
1810static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt) 1810static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt)
@@ -2152,7 +2152,7 @@ static void user_instruction_dump(unsigned int __user *pc)
2152 2152
2153void show_stack(struct task_struct *tsk, unsigned long *_ksp) 2153void show_stack(struct task_struct *tsk, unsigned long *_ksp)
2154{ 2154{
2155 unsigned long fp, thread_base, ksp; 2155 unsigned long fp, ksp;
2156 struct thread_info *tp; 2156 struct thread_info *tp;
2157 int count = 0; 2157 int count = 0;
2158#ifdef CONFIG_FUNCTION_GRAPH_TRACER 2158#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -2173,7 +2173,6 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
2173 flushw_all(); 2173 flushw_all();
2174 2174
2175 fp = ksp + STACK_BIAS; 2175 fp = ksp + STACK_BIAS;
2176 thread_base = (unsigned long) tp;
2177 2176
2178 printk("Call Trace:\n"); 2177 printk("Call Trace:\n");
2179 do { 2178 do {
diff --git a/arch/sparc/kernel/una_asm_32.S b/arch/sparc/kernel/una_asm_32.S
index 8cc03458eb7e..8f096e84a937 100644
--- a/arch/sparc/kernel/una_asm_32.S
+++ b/arch/sparc/kernel/una_asm_32.S
@@ -24,9 +24,9 @@ retl_efault:
24 .globl __do_int_store 24 .globl __do_int_store
25__do_int_store: 25__do_int_store:
26 ld [%o2], %g1 26 ld [%o2], %g1
27 cmp %1, 2 27 cmp %o1, 2
28 be 2f 28 be 2f
29 cmp %1, 4 29 cmp %o1, 4
30 be 1f 30 be 1f
31 srl %g1, 24, %g2 31 srl %g1, 24, %g2
32 srl %g1, 16, %g7 32 srl %g1, 16, %g7
diff --git a/arch/sparc/kernel/una_asm_64.S b/arch/sparc/kernel/una_asm_64.S
index be183fe41443..1c8d33228b2a 100644
--- a/arch/sparc/kernel/una_asm_64.S
+++ b/arch/sparc/kernel/una_asm_64.S
@@ -127,7 +127,7 @@ do_int_load:
127 wr %o5, 0x0, %asi 127 wr %o5, 0x0, %asi
128 retl 128 retl
129 mov 0, %o0 129 mov 0, %o0
130 .size __do_int_load, .-__do_int_load 130 .size do_int_load, .-do_int_load
131 131
132 .section __ex_table,"a" 132 .section __ex_table,"a"
133 .word 4b, __retl_efault 133 .word 4b, __retl_efault
diff --git a/arch/sparc/kernel/unaligned_32.c b/arch/sparc/kernel/unaligned_32.c
index 12b9f352595f..4491f4cb2695 100644
--- a/arch/sparc/kernel/unaligned_32.c
+++ b/arch/sparc/kernel/unaligned_32.c
@@ -16,7 +16,6 @@
16#include <asm/system.h> 16#include <asm/system.h>
17#include <asm/uaccess.h> 17#include <asm/uaccess.h>
18#include <linux/smp.h> 18#include <linux/smp.h>
19#include <linux/smp_lock.h>
20#include <linux/perf_event.h> 19#include <linux/perf_event.h>
21 20
22enum direction { 21enum direction {
diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
index c752c4c479bd..b2b019ea8caa 100644
--- a/arch/sparc/kernel/unaligned_64.c
+++ b/arch/sparc/kernel/unaligned_64.c
@@ -211,7 +211,7 @@ static inline int do_int_store(int reg_num, int size, unsigned long *dst_addr,
211 default: 211 default:
212 BUG(); 212 BUG();
213 break; 213 break;
214 }; 214 }
215 } 215 }
216 return __do_int_store(dst_addr, size, src_val, asi); 216 return __do_int_store(dst_addr, size, src_val, asi);
217} 217}
@@ -328,7 +328,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
328 case ASI_SNFL: 328 case ASI_SNFL:
329 asi &= ~0x08; 329 asi &= ~0x08;
330 break; 330 break;
331 }; 331 }
332 switch (dir) { 332 switch (dir) {
333 case load: 333 case load:
334 reg_addr = fetch_reg_addr(((insn>>25)&0x1f), regs); 334 reg_addr = fetch_reg_addr(((insn>>25)&0x1f), regs);
@@ -351,7 +351,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
351 default: 351 default:
352 BUG(); 352 BUG();
353 break; 353 break;
354 }; 354 }
355 *reg_addr = val_in; 355 *reg_addr = val_in;
356 } 356 }
357 break; 357 break;
diff --git a/arch/sparc/kernel/us2e_cpufreq.c b/arch/sparc/kernel/us2e_cpufreq.c
index 8f982b76c712..489fc15f3194 100644
--- a/arch/sparc/kernel/us2e_cpufreq.c
+++ b/arch/sparc/kernel/us2e_cpufreq.c
@@ -176,7 +176,7 @@ static unsigned long index_to_estar_mode(unsigned int index)
176 176
177 default: 177 default:
178 BUG(); 178 BUG();
179 }; 179 }
180} 180}
181 181
182static unsigned long index_to_divisor(unsigned int index) 182static unsigned long index_to_divisor(unsigned int index)
@@ -199,7 +199,7 @@ static unsigned long index_to_divisor(unsigned int index)
199 199
200 default: 200 default:
201 BUG(); 201 BUG();
202 }; 202 }
203} 203}
204 204
205static unsigned long estar_to_divisor(unsigned long estar) 205static unsigned long estar_to_divisor(unsigned long estar)
@@ -224,7 +224,7 @@ static unsigned long estar_to_divisor(unsigned long estar)
224 break; 224 break;
225 default: 225 default:
226 BUG(); 226 BUG();
227 }; 227 }
228 228
229 return ret; 229 return ret;
230} 230}
@@ -237,7 +237,7 @@ static unsigned int us2e_freq_get(unsigned int cpu)
237 if (!cpu_online(cpu)) 237 if (!cpu_online(cpu))
238 return 0; 238 return 0;
239 239
240 cpus_allowed = current->cpus_allowed; 240 cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
241 set_cpus_allowed_ptr(current, cpumask_of(cpu)); 241 set_cpus_allowed_ptr(current, cpumask_of(cpu));
242 242
243 clock_tick = sparc64_get_clock_tick(cpu) / 1000; 243 clock_tick = sparc64_get_clock_tick(cpu) / 1000;
@@ -258,7 +258,7 @@ static void us2e_set_cpu_divider_index(unsigned int cpu, unsigned int index)
258 if (!cpu_online(cpu)) 258 if (!cpu_online(cpu))
259 return; 259 return;
260 260
261 cpus_allowed = current->cpus_allowed; 261 cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
262 set_cpus_allowed_ptr(current, cpumask_of(cpu)); 262 set_cpus_allowed_ptr(current, cpumask_of(cpu));
263 263
264 new_freq = clock_tick = sparc64_get_clock_tick(cpu) / 1000; 264 new_freq = clock_tick = sparc64_get_clock_tick(cpu) / 1000;
diff --git a/arch/sparc/kernel/us3_cpufreq.c b/arch/sparc/kernel/us3_cpufreq.c
index f35d1e794548..eb1624b931d9 100644
--- a/arch/sparc/kernel/us3_cpufreq.c
+++ b/arch/sparc/kernel/us3_cpufreq.c
@@ -71,7 +71,7 @@ static unsigned long get_current_freq(unsigned int cpu, unsigned long safari_cfg
71 break; 71 break;
72 default: 72 default:
73 BUG(); 73 BUG();
74 }; 74 }
75 75
76 return ret; 76 return ret;
77} 77}
@@ -85,7 +85,7 @@ static unsigned int us3_freq_get(unsigned int cpu)
85 if (!cpu_online(cpu)) 85 if (!cpu_online(cpu))
86 return 0; 86 return 0;
87 87
88 cpus_allowed = current->cpus_allowed; 88 cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
89 set_cpus_allowed_ptr(current, cpumask_of(cpu)); 89 set_cpus_allowed_ptr(current, cpumask_of(cpu));
90 90
91 reg = read_safari_cfg(); 91 reg = read_safari_cfg();
@@ -105,7 +105,7 @@ static void us3_set_cpu_divider_index(unsigned int cpu, unsigned int index)
105 if (!cpu_online(cpu)) 105 if (!cpu_online(cpu))
106 return; 106 return;
107 107
108 cpus_allowed = current->cpus_allowed; 108 cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
109 set_cpus_allowed_ptr(current, cpumask_of(cpu)); 109 set_cpus_allowed_ptr(current, cpumask_of(cpu));
110 110
111 new_freq = sparc64_get_clock_tick(cpu) / 1000; 111 new_freq = sparc64_get_clock_tick(cpu) / 1000;
@@ -125,7 +125,7 @@ static void us3_set_cpu_divider_index(unsigned int cpu, unsigned int index)
125 125
126 default: 126 default:
127 BUG(); 127 BUG();
128 }; 128 }
129 129
130 reg = read_safari_cfg(); 130 reg = read_safari_cfg();
131 131
diff --git a/arch/sparc/kernel/viohs.c b/arch/sparc/kernel/viohs.c
index aa6ac70d4fd5..29348ea139c3 100644
--- a/arch/sparc/kernel/viohs.c
+++ b/arch/sparc/kernel/viohs.c
@@ -363,7 +363,7 @@ static int process_ver(struct vio_driver_state *vio, struct vio_ver_info *pkt)
363 363
364 default: 364 default:
365 return handshake_failure(vio); 365 return handshake_failure(vio);
366 }; 366 }
367} 367}
368 368
369static int process_attr(struct vio_driver_state *vio, void *pkt) 369static int process_attr(struct vio_driver_state *vio, void *pkt)
diff --git a/arch/sparc/kernel/visemul.c b/arch/sparc/kernel/visemul.c
index 9dfd2ebcb157..36357717d691 100644
--- a/arch/sparc/kernel/visemul.c
+++ b/arch/sparc/kernel/visemul.c
@@ -334,7 +334,7 @@ static void edge(struct pt_regs *regs, unsigned int insn, unsigned int opf)
334 left = edge32_tab_l[(rs1 >> 2) & 0x1].left; 334 left = edge32_tab_l[(rs1 >> 2) & 0x1].left;
335 right = edge32_tab_l[(rs2 >> 2) & 0x1].right; 335 right = edge32_tab_l[(rs2 >> 2) & 0x1].right;
336 break; 336 break;
337 }; 337 }
338 338
339 if ((rs1 & ~0x7UL) == (rs2 & ~0x7UL)) 339 if ((rs1 & ~0x7UL) == (rs2 & ~0x7UL))
340 rd_val = right & left; 340 rd_val = right & left;
@@ -360,7 +360,7 @@ static void edge(struct pt_regs *regs, unsigned int insn, unsigned int opf)
360 tstate = regs->tstate & ~(TSTATE_XCC | TSTATE_ICC); 360 tstate = regs->tstate & ~(TSTATE_XCC | TSTATE_ICC);
361 regs->tstate = tstate | (ccr << 32UL); 361 regs->tstate = tstate | (ccr << 32UL);
362 } 362 }
363 }; 363 }
364} 364}
365 365
366static void array(struct pt_regs *regs, unsigned int insn, unsigned int opf) 366static void array(struct pt_regs *regs, unsigned int insn, unsigned int opf)
@@ -392,7 +392,7 @@ static void array(struct pt_regs *regs, unsigned int insn, unsigned int opf)
392 392
393 case ARRAY32_OPF: 393 case ARRAY32_OPF:
394 rd_val <<= 2; 394 rd_val <<= 2;
395 }; 395 }
396 396
397 store_reg(regs, rd_val, RD(insn)); 397 store_reg(regs, rd_val, RD(insn));
398} 398}
@@ -577,7 +577,7 @@ static void pformat(struct pt_regs *regs, unsigned int insn, unsigned int opf)
577 *fpd_regaddr(f, RD(insn)) = rd_val; 577 *fpd_regaddr(f, RD(insn)) = rd_val;
578 break; 578 break;
579 } 579 }
580 }; 580 }
581} 581}
582 582
583static void pmul(struct pt_regs *regs, unsigned int insn, unsigned int opf) 583static void pmul(struct pt_regs *regs, unsigned int insn, unsigned int opf)
@@ -693,7 +693,7 @@ static void pmul(struct pt_regs *regs, unsigned int insn, unsigned int opf)
693 *fpd_regaddr(f, RD(insn)) = rd_val; 693 *fpd_regaddr(f, RD(insn)) = rd_val;
694 break; 694 break;
695 } 695 }
696 }; 696 }
697} 697}
698 698
699static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf) 699static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf)
@@ -786,7 +786,7 @@ static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf)
786 rd_val |= 1 << i; 786 rd_val |= 1 << i;
787 } 787 }
788 break; 788 break;
789 }; 789 }
790 790
791 maybe_flush_windows(0, 0, RD(insn), 0); 791 maybe_flush_windows(0, 0, RD(insn), 0);
792 store_reg(regs, rd_val, RD(insn)); 792 store_reg(regs, rd_val, RD(insn));
@@ -885,7 +885,7 @@ int vis_emul(struct pt_regs *regs, unsigned int insn)
885 case BSHUFFLE_OPF: 885 case BSHUFFLE_OPF:
886 bshuffle(regs, insn); 886 bshuffle(regs, insn);
887 break; 887 break;
888 }; 888 }
889 889
890 regs->tpc = regs->tnpc; 890 regs->tpc = regs->tnpc;
891 regs->tnpc += 4; 891 regs->tnpc += 4;
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
index 0c1e6783657f..c0220759003e 100644
--- a/arch/sparc/kernel/vmlinux.lds.S
+++ b/arch/sparc/kernel/vmlinux.lds.S
@@ -108,7 +108,7 @@ SECTIONS
108 __sun4v_2insn_patch_end = .; 108 __sun4v_2insn_patch_end = .;
109 } 109 }
110 110
111 PERCPU(PAGE_SIZE) 111 PERCPU_SECTION(SMP_CACHE_BYTES)
112 112
113 . = ALIGN(PAGE_SIZE); 113 . = ALIGN(PAGE_SIZE);
114 __init_end = .; 114 __init_end = .;
diff --git a/arch/sparc/kernel/windows.c b/arch/sparc/kernel/windows.c
index b351770cbdd6..3107381e576d 100644
--- a/arch/sparc/kernel/windows.c
+++ b/arch/sparc/kernel/windows.c
@@ -9,7 +9,6 @@
9#include <linux/string.h> 9#include <linux/string.h>
10#include <linux/mm.h> 10#include <linux/mm.h>
11#include <linux/smp.h> 11#include <linux/smp.h>
12#include <linux/smp_lock.h>
13 12
14#include <asm/uaccess.h> 13#include <asm/uaccess.h>
15 14