aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips')
-rw-r--r--arch/mips/Kconfig35
-rw-r--r--arch/mips/au1000/common/gpio.c6
-rw-r--r--arch/mips/emma2rh/markeins/setup.c16
-rw-r--r--arch/mips/jazz/setup.c13
-rw-r--r--arch/mips/kernel/.gitignore1
-rw-r--r--arch/mips/kernel/Makefile1
-rw-r--r--arch/mips/kernel/cevt-r4k.c173
-rw-r--r--arch/mips/kernel/cevt-smtc.c321
-rw-r--r--arch/mips/kernel/cpu-probe.c26
-rw-r--r--arch/mips/kernel/entry.S10
-rw-r--r--arch/mips/kernel/genex.S41
-rw-r--r--arch/mips/kernel/kgdb.c10
-rw-r--r--arch/mips/kernel/mips-mt-fpaff.c2
-rw-r--r--arch/mips/kernel/process.c19
-rw-r--r--arch/mips/kernel/ptrace.c2
-rw-r--r--arch/mips/kernel/scall32-o32.S6
-rw-r--r--arch/mips/kernel/scall64-64.S6
-rw-r--r--arch/mips/kernel/scall64-n32.S6
-rw-r--r--arch/mips/kernel/scall64-o32.S6
-rw-r--r--arch/mips/kernel/setup.c75
-rw-r--r--arch/mips/kernel/smtc.c260
-rw-r--r--arch/mips/kernel/traps.c46
-rw-r--r--arch/mips/kernel/vmlinux.lds.S1
-rw-r--r--arch/mips/lib/csum_partial.S21
-rw-r--r--arch/mips/mm/c-r3k.c1
-rw-r--r--arch/mips/mm/c-r4k.c18
-rw-r--r--arch/mips/mm/c-tx39.c2
-rw-r--r--arch/mips/mm/cache.c1
-rw-r--r--arch/mips/mm/tlbex.c6
-rw-r--r--arch/mips/mti-malta/Makefile2
-rw-r--r--arch/mips/mti-malta/malta-smtc.c9
-rw-r--r--arch/mips/pci/Makefile1
-rw-r--r--arch/mips/pci/pci-bcm47xx.c60
-rw-r--r--arch/mips/pci/pci-ip27.c41
-rw-r--r--arch/mips/rb532/devices.c16
-rw-r--r--arch/mips/sgi-ip22/ip22-platform.c2
-rw-r--r--arch/mips/txx9/generic/setup.c4
-rw-r--r--arch/mips/vr41xx/common/irq.c6
38 files changed, 838 insertions, 434 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 4da736e2533..c930b8ceb41 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1403,7 +1403,6 @@ config MIPS_MT_SMTC
1403 depends on CPU_MIPS32_R2 1403 depends on CPU_MIPS32_R2
1404 #depends on CPU_MIPS64_R2 # once there is hardware ... 1404 #depends on CPU_MIPS64_R2 # once there is hardware ...
1405 depends on SYS_SUPPORTS_MULTITHREADING 1405 depends on SYS_SUPPORTS_MULTITHREADING
1406 select GENERIC_CLOCKEVENTS_BROADCAST
1407 select CPU_MIPSR2_IRQ_VI 1406 select CPU_MIPSR2_IRQ_VI
1408 select CPU_MIPSR2_IRQ_EI 1407 select CPU_MIPSR2_IRQ_EI
1409 select MIPS_MT 1408 select MIPS_MT
@@ -1451,32 +1450,17 @@ config MIPS_VPE_LOADER
1451 Includes a loader for loading an elf relocatable object 1450 Includes a loader for loading an elf relocatable object
1452 onto another VPE and running it. 1451 onto another VPE and running it.
1453 1452
1454config MIPS_MT_SMTC_INSTANT_REPLAY
1455 bool "Low-latency Dispatch of Deferred SMTC IPIs"
1456 depends on MIPS_MT_SMTC && !PREEMPT
1457 default y
1458 help
1459 SMTC pseudo-interrupts between TCs are deferred and queued
1460 if the target TC is interrupt-inhibited (IXMT). In the first
1461 SMTC prototypes, these queued IPIs were serviced on return
1462 to user mode, or on entry into the kernel idle loop. The
1463 INSTANT_REPLAY option dispatches them as part of local_irq_restore()
1464 processing, which adds runtime overhead (hence the option to turn
1465 it off), but ensures that IPIs are handled promptly even under
1466 heavy I/O interrupt load.
1467
1468config MIPS_MT_SMTC_IM_BACKSTOP 1453config MIPS_MT_SMTC_IM_BACKSTOP
1469 bool "Use per-TC register bits as backstop for inhibited IM bits" 1454 bool "Use per-TC register bits as backstop for inhibited IM bits"
1470 depends on MIPS_MT_SMTC 1455 depends on MIPS_MT_SMTC
1471 default y 1456 default n
1472 help 1457 help
1473 To support multiple TC microthreads acting as "CPUs" within 1458 To support multiple TC microthreads acting as "CPUs" within
1474 a VPE, VPE-wide interrupt mask bits must be specially manipulated 1459 a VPE, VPE-wide interrupt mask bits must be specially manipulated
1475 during interrupt handling. To support legacy drivers and interrupt 1460 during interrupt handling. To support legacy drivers and interrupt
1476 controller management code, SMTC has a "backstop" to track and 1461 controller management code, SMTC has a "backstop" to track and
1477 if necessary restore the interrupt mask. This has some performance 1462 if necessary restore the interrupt mask. This has some performance
1478 impact on interrupt service overhead. Disable it only if you know 1463 impact on interrupt service overhead.
1479 what you are doing.
1480 1464
1481config MIPS_MT_SMTC_IRQAFF 1465config MIPS_MT_SMTC_IRQAFF
1482 bool "Support IRQ affinity API" 1466 bool "Support IRQ affinity API"
@@ -1486,10 +1470,8 @@ config MIPS_MT_SMTC_IRQAFF
1486 Enables SMP IRQ affinity API (/proc/irq/*/smp_affinity, etc.) 1470 Enables SMP IRQ affinity API (/proc/irq/*/smp_affinity, etc.)
1487 for SMTC Linux kernel. Requires platform support, of which 1471 for SMTC Linux kernel. Requires platform support, of which
1488 an example can be found in the MIPS kernel i8259 and Malta 1472 an example can be found in the MIPS kernel i8259 and Malta
1489 platform code. It is recommended that MIPS_MT_SMTC_INSTANT_REPLAY 1473 platform code. Adds some overhead to interrupt dispatch, and
1490 be enabled if MIPS_MT_SMTC_IRQAFF is used. Adds overhead to 1474 should be used only if you know what you are doing.
1491 interrupt dispatch, and should be used only if you know what
1492 you are doing.
1493 1475
1494config MIPS_VPE_LOADER_TOM 1476config MIPS_VPE_LOADER_TOM
1495 bool "Load VPE program into memory hidden from linux" 1477 bool "Load VPE program into memory hidden from linux"
@@ -1886,6 +1868,15 @@ config STACKTRACE_SUPPORT
1886 1868
1887source "init/Kconfig" 1869source "init/Kconfig"
1888 1870
1871config PROBE_INITRD_HEADER
1872 bool "Probe initrd header created by addinitrd"
1873 depends on BLK_DEV_INITRD
1874 help
1875 Probe initrd header at the last page of kernel image.
1876 Say Y here if you are using arch/mips/boot/addinitrd.c to
1877 add initrd or initramfs image to the kernel image.
1878 Otherwise, say N.
1879
1889menu "Bus options (PCI, PCMCIA, EISA, ISA, TC)" 1880menu "Bus options (PCI, PCMCIA, EISA, ISA, TC)"
1890 1881
1891config HW_HAS_EISA 1882config HW_HAS_EISA
diff --git a/arch/mips/au1000/common/gpio.c b/arch/mips/au1000/common/gpio.c
index b485d94ce8a..e660ddd611c 100644
--- a/arch/mips/au1000/common/gpio.c
+++ b/arch/mips/au1000/common/gpio.c
@@ -48,7 +48,7 @@ static void au1xxx_gpio2_write(unsigned gpio, int value)
48{ 48{
49 gpio -= AU1XXX_GPIO_BASE; 49 gpio -= AU1XXX_GPIO_BASE;
50 50
51 gpio2->output = (GPIO2_OUTPUT_ENABLE_MASK << gpio) | (value << gpio); 51 gpio2->output = (GPIO2_OUTPUT_ENABLE_MASK << gpio) | ((!!value) << gpio);
52} 52}
53 53
54static int au1xxx_gpio2_direction_input(unsigned gpio) 54static int au1xxx_gpio2_direction_input(unsigned gpio)
@@ -61,7 +61,8 @@ static int au1xxx_gpio2_direction_input(unsigned gpio)
61static int au1xxx_gpio2_direction_output(unsigned gpio, int value) 61static int au1xxx_gpio2_direction_output(unsigned gpio, int value)
62{ 62{
63 gpio -= AU1XXX_GPIO_BASE; 63 gpio -= AU1XXX_GPIO_BASE;
64 gpio2->dir = (0x01 << gpio) | (value << gpio); 64 gpio2->dir |= 0x01 << gpio;
65 gpio2->output = (GPIO2_OUTPUT_ENABLE_MASK << gpio) | ((!!value) << gpio);
65 return 0; 66 return 0;
66} 67}
67 68
@@ -90,6 +91,7 @@ static int au1xxx_gpio1_direction_input(unsigned gpio)
90static int au1xxx_gpio1_direction_output(unsigned gpio, int value) 91static int au1xxx_gpio1_direction_output(unsigned gpio, int value)
91{ 92{
92 gpio1->trioutclr = (0x01 & gpio); 93 gpio1->trioutclr = (0x01 & gpio);
94 au1xxx_gpio1_write(gpio, value);
93 return 0; 95 return 0;
94} 96}
95 97
diff --git a/arch/mips/emma2rh/markeins/setup.c b/arch/mips/emma2rh/markeins/setup.c
index 822a20e21fa..b6a23ad539f 100644
--- a/arch/mips/emma2rh/markeins/setup.c
+++ b/arch/mips/emma2rh/markeins/setup.c
@@ -25,23 +25,9 @@
25#include <linux/init.h> 25#include <linux/init.h>
26#include <linux/kernel.h> 26#include <linux/kernel.h>
27#include <linux/types.h> 27#include <linux/types.h>
28#include <linux/initrd.h> 28
29#include <linux/irq.h>
30#include <linux/ioport.h>
31#include <linux/param.h> /* for HZ */
32#include <linux/root_dev.h>
33#include <linux/serial.h>
34#include <linux/serial_core.h>
35
36#include <asm/cpu.h>
37#include <asm/bootinfo.h>
38#include <asm/addrspace.h>
39#include <asm/time.h> 29#include <asm/time.h>
40#include <asm/bcache.h>
41#include <asm/irq.h>
42#include <asm/reboot.h> 30#include <asm/reboot.h>
43#include <asm/traps.h>
44#include <asm/debug.h>
45 31
46#include <asm/emma2rh/emma2rh.h> 32#include <asm/emma2rh/emma2rh.h>
47 33
diff --git a/arch/mips/jazz/setup.c b/arch/mips/jazz/setup.c
index b59ba6b93cd..7043f6b9ff3 100644
--- a/arch/mips/jazz/setup.c
+++ b/arch/mips/jazz/setup.c
@@ -5,33 +5,22 @@
5 * License. See the file "COPYING" in the main directory of this archive 5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details. 6 * for more details.
7 * 7 *
8 * Copyright (C) 1996, 1997, 1998, 2001, 07 by Ralf Baechle 8 * Copyright (C) 1996, 1997, 1998, 2001, 07, 08 by Ralf Baechle
9 * Copyright (C) 2001 MIPS Technologies, Inc. 9 * Copyright (C) 2001 MIPS Technologies, Inc.
10 * Copyright (C) 2007 by Thomas Bogendoerfer 10 * Copyright (C) 2007 by Thomas Bogendoerfer
11 */ 11 */
12#include <linux/eisa.h> 12#include <linux/eisa.h>
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/ioport.h> 14#include <linux/ioport.h>
15#include <linux/sched.h>
16#include <linux/interrupt.h>
17#include <linux/mm.h>
18#include <linux/console.h> 15#include <linux/console.h>
19#include <linux/fb.h>
20#include <linux/pm.h>
21#include <linux/screen_info.h> 16#include <linux/screen_info.h>
22#include <linux/platform_device.h> 17#include <linux/platform_device.h>
23#include <linux/serial_8250.h> 18#include <linux/serial_8250.h>
24 19
25#include <asm/bootinfo.h>
26#include <asm/irq.h>
27#include <asm/jazz.h> 20#include <asm/jazz.h>
28#include <asm/jazzdma.h> 21#include <asm/jazzdma.h>
29#include <asm/reboot.h> 22#include <asm/reboot.h>
30#include <asm/io.h>
31#include <asm/pgtable.h> 23#include <asm/pgtable.h>
32#include <asm/time.h>
33#include <asm/traps.h>
34#include <asm/mc146818-time.h>
35 24
36extern asmlinkage void jazz_handle_int(void); 25extern asmlinkage void jazz_handle_int(void);
37 26
diff --git a/arch/mips/kernel/.gitignore b/arch/mips/kernel/.gitignore
new file mode 100644
index 00000000000..c5f676c3c22
--- /dev/null
+++ b/arch/mips/kernel/.gitignore
@@ -0,0 +1 @@
vmlinux.lds
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 706f9397479..25775cb5400 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -10,6 +10,7 @@ obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \
10 10
11obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o 11obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o
12obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o 12obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o
13obj-$(CONFIG_MIPS_MT_SMTC) += cevt-smtc.o
13obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o 14obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o
14obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o 15obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o
15obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o 16obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index 24a2d907aa0..4a4c59f2737 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -12,6 +12,14 @@
12 12
13#include <asm/smtc_ipi.h> 13#include <asm/smtc_ipi.h>
14#include <asm/time.h> 14#include <asm/time.h>
15#include <asm/cevt-r4k.h>
16
17/*
18 * The SMTC Kernel for the 34K, 1004K, et. al. replaces several
19 * of these routines with SMTC-specific variants.
20 */
21
22#ifndef CONFIG_MIPS_MT_SMTC
15 23
16static int mips_next_event(unsigned long delta, 24static int mips_next_event(unsigned long delta,
17 struct clock_event_device *evt) 25 struct clock_event_device *evt)
@@ -19,60 +27,27 @@ static int mips_next_event(unsigned long delta,
19 unsigned int cnt; 27 unsigned int cnt;
20 int res; 28 int res;
21 29
22#ifdef CONFIG_MIPS_MT_SMTC
23 {
24 unsigned long flags, vpflags;
25 local_irq_save(flags);
26 vpflags = dvpe();
27#endif
28 cnt = read_c0_count(); 30 cnt = read_c0_count();
29 cnt += delta; 31 cnt += delta;
30 write_c0_compare(cnt); 32 write_c0_compare(cnt);
31 res = ((int)(read_c0_count() - cnt) > 0) ? -ETIME : 0; 33 res = ((int)(read_c0_count() - cnt) > 0) ? -ETIME : 0;
32#ifdef CONFIG_MIPS_MT_SMTC
33 evpe(vpflags);
34 local_irq_restore(flags);
35 }
36#endif
37 return res; 34 return res;
38} 35}
39 36
40static void mips_set_mode(enum clock_event_mode mode, 37#endif /* CONFIG_MIPS_MT_SMTC */
41 struct clock_event_device *evt) 38
39void mips_set_clock_mode(enum clock_event_mode mode,
40 struct clock_event_device *evt)
42{ 41{
43 /* Nothing to do ... */ 42 /* Nothing to do ... */
44} 43}
45 44
46static DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device); 45DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
47static int cp0_timer_irq_installed; 46int cp0_timer_irq_installed;
48 47
49/* 48#ifndef CONFIG_MIPS_MT_SMTC
50 * Timer ack for an R4k-compatible timer of a known frequency.
51 */
52static void c0_timer_ack(void)
53{
54 write_c0_compare(read_c0_compare());
55}
56 49
57/* 50irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
58 * Possibly handle a performance counter interrupt.
59 * Return true if the timer interrupt should not be checked
60 */
61static inline int handle_perf_irq(int r2)
62{
63 /*
64 * The performance counter overflow interrupt may be shared with the
65 * timer interrupt (cp0_perfcount_irq < 0). If it is and a
66 * performance counter has overflowed (perf_irq() == IRQ_HANDLED)
67 * and we can't reliably determine if a counter interrupt has also
68 * happened (!r2) then don't check for a timer interrupt.
69 */
70 return (cp0_perfcount_irq < 0) &&
71 perf_irq() == IRQ_HANDLED &&
72 !r2;
73}
74
75static irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
76{ 51{
77 const int r2 = cpu_has_mips_r2; 52 const int r2 = cpu_has_mips_r2;
78 struct clock_event_device *cd; 53 struct clock_event_device *cd;
@@ -93,12 +68,8 @@ static irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
93 * interrupt. Being the paranoiacs we are we check anyway. 68 * interrupt. Being the paranoiacs we are we check anyway.
94 */ 69 */
95 if (!r2 || (read_c0_cause() & (1 << 30))) { 70 if (!r2 || (read_c0_cause() & (1 << 30))) {
96 c0_timer_ack(); 71 /* Clear Count/Compare Interrupt */
97#ifdef CONFIG_MIPS_MT_SMTC 72 write_c0_compare(read_c0_compare());
98 if (cpu_data[cpu].vpe_id)
99 goto out;
100 cpu = 0;
101#endif
102 cd = &per_cpu(mips_clockevent_device, cpu); 73 cd = &per_cpu(mips_clockevent_device, cpu);
103 cd->event_handler(cd); 74 cd->event_handler(cd);
104 } 75 }
@@ -107,65 +78,16 @@ out:
107 return IRQ_HANDLED; 78 return IRQ_HANDLED;
108} 79}
109 80
110static struct irqaction c0_compare_irqaction = { 81#endif /* Not CONFIG_MIPS_MT_SMTC */
82
83struct irqaction c0_compare_irqaction = {
111 .handler = c0_compare_interrupt, 84 .handler = c0_compare_interrupt,
112#ifdef CONFIG_MIPS_MT_SMTC
113 .flags = IRQF_DISABLED,
114#else
115 .flags = IRQF_DISABLED | IRQF_PERCPU, 85 .flags = IRQF_DISABLED | IRQF_PERCPU,
116#endif
117 .name = "timer", 86 .name = "timer",
118}; 87};
119 88
120#ifdef CONFIG_MIPS_MT_SMTC
121DEFINE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device);
122
123static void smtc_set_mode(enum clock_event_mode mode,
124 struct clock_event_device *evt)
125{
126}
127
128static void mips_broadcast(cpumask_t mask)
129{
130 unsigned int cpu;
131
132 for_each_cpu_mask(cpu, mask)
133 smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
134}
135
136static void setup_smtc_dummy_clockevent_device(void)
137{
138 //uint64_t mips_freq = mips_hpt_^frequency;
139 unsigned int cpu = smp_processor_id();
140 struct clock_event_device *cd;
141 89
142 cd = &per_cpu(smtc_dummy_clockevent_device, cpu); 90void mips_event_handler(struct clock_event_device *dev)
143
144 cd->name = "SMTC";
145 cd->features = CLOCK_EVT_FEAT_DUMMY;
146
147 /* Calculate the min / max delta */
148 cd->mult = 0; //div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
149 cd->shift = 0; //32;
150 cd->max_delta_ns = 0; //clockevent_delta2ns(0x7fffffff, cd);
151 cd->min_delta_ns = 0; //clockevent_delta2ns(0x30, cd);
152
153 cd->rating = 200;
154 cd->irq = 17; //-1;
155// if (cpu)
156// cd->cpumask = CPU_MASK_ALL; // cpumask_of_cpu(cpu);
157// else
158 cd->cpumask = cpumask_of_cpu(cpu);
159
160 cd->set_mode = smtc_set_mode;
161
162 cd->broadcast = mips_broadcast;
163
164 clockevents_register_device(cd);
165}
166#endif
167
168static void mips_event_handler(struct clock_event_device *dev)
169{ 91{
170} 92}
171 93
@@ -177,7 +99,23 @@ static int c0_compare_int_pending(void)
177 return (read_c0_cause() >> cp0_compare_irq) & 0x100; 99 return (read_c0_cause() >> cp0_compare_irq) & 0x100;
178} 100}
179 101
180static int c0_compare_int_usable(void) 102/*
103 * Compare interrupt can be routed and latched outside the core,
104 * so a single execution hazard barrier may not be enough to give
105 * it time to clear as seen in the Cause register. 4 time the
106 * pipeline depth seems reasonably conservative, and empirically
107 * works better in configurations with high CPU/bus clock ratios.
108 */
109
110#define compare_change_hazard() \
111 do { \
112 irq_disable_hazard(); \
113 irq_disable_hazard(); \
114 irq_disable_hazard(); \
115 irq_disable_hazard(); \
116 } while (0)
117
118int c0_compare_int_usable(void)
181{ 119{
182 unsigned int delta; 120 unsigned int delta;
183 unsigned int cnt; 121 unsigned int cnt;
@@ -187,7 +125,7 @@ static int c0_compare_int_usable(void)
187 */ 125 */
188 if (c0_compare_int_pending()) { 126 if (c0_compare_int_pending()) {
189 write_c0_compare(read_c0_count()); 127 write_c0_compare(read_c0_count());
190 irq_disable_hazard(); 128 compare_change_hazard();
191 if (c0_compare_int_pending()) 129 if (c0_compare_int_pending())
192 return 0; 130 return 0;
193 } 131 }
@@ -196,7 +134,7 @@ static int c0_compare_int_usable(void)
196 cnt = read_c0_count(); 134 cnt = read_c0_count();
197 cnt += delta; 135 cnt += delta;
198 write_c0_compare(cnt); 136 write_c0_compare(cnt);
199 irq_disable_hazard(); 137 compare_change_hazard();
200 if ((int)(read_c0_count() - cnt) < 0) 138 if ((int)(read_c0_count() - cnt) < 0)
201 break; 139 break;
202 /* increase delta if the timer was already expired */ 140 /* increase delta if the timer was already expired */
@@ -205,11 +143,12 @@ static int c0_compare_int_usable(void)
205 while ((int)(read_c0_count() - cnt) <= 0) 143 while ((int)(read_c0_count() - cnt) <= 0)
206 ; /* Wait for expiry */ 144 ; /* Wait for expiry */
207 145
146 compare_change_hazard();
208 if (!c0_compare_int_pending()) 147 if (!c0_compare_int_pending())
209 return 0; 148 return 0;
210 149
211 write_c0_compare(read_c0_count()); 150 write_c0_compare(read_c0_count());
212 irq_disable_hazard(); 151 compare_change_hazard();
213 if (c0_compare_int_pending()) 152 if (c0_compare_int_pending())
214 return 0; 153 return 0;
215 154
@@ -219,6 +158,8 @@ static int c0_compare_int_usable(void)
219 return 1; 158 return 1;
220} 159}
221 160
161#ifndef CONFIG_MIPS_MT_SMTC
162
222int __cpuinit mips_clockevent_init(void) 163int __cpuinit mips_clockevent_init(void)
223{ 164{
224 uint64_t mips_freq = mips_hpt_frequency; 165 uint64_t mips_freq = mips_hpt_frequency;
@@ -229,17 +170,6 @@ int __cpuinit mips_clockevent_init(void)
229 if (!cpu_has_counter || !mips_hpt_frequency) 170 if (!cpu_has_counter || !mips_hpt_frequency)
230 return -ENXIO; 171 return -ENXIO;
231 172
232#ifdef CONFIG_MIPS_MT_SMTC
233 setup_smtc_dummy_clockevent_device();
234
235 /*
236 * On SMTC we only register VPE0's compare interrupt as clockevent
237 * device.
238 */
239 if (cpu)
240 return 0;
241#endif
242
243 if (!c0_compare_int_usable()) 173 if (!c0_compare_int_usable())
244 return -ENXIO; 174 return -ENXIO;
245 175
@@ -265,13 +195,9 @@ int __cpuinit mips_clockevent_init(void)
265 195
266 cd->rating = 300; 196 cd->rating = 300;
267 cd->irq = irq; 197 cd->irq = irq;
268#ifdef CONFIG_MIPS_MT_SMTC
269 cd->cpumask = CPU_MASK_ALL;
270#else
271 cd->cpumask = cpumask_of_cpu(cpu); 198 cd->cpumask = cpumask_of_cpu(cpu);
272#endif
273 cd->set_next_event = mips_next_event; 199 cd->set_next_event = mips_next_event;
274 cd->set_mode = mips_set_mode; 200 cd->set_mode = mips_set_clock_mode;
275 cd->event_handler = mips_event_handler; 201 cd->event_handler = mips_event_handler;
276 202
277 clockevents_register_device(cd); 203 clockevents_register_device(cd);
@@ -281,12 +207,9 @@ int __cpuinit mips_clockevent_init(void)
281 207
282 cp0_timer_irq_installed = 1; 208 cp0_timer_irq_installed = 1;
283 209
284#ifdef CONFIG_MIPS_MT_SMTC
285#define CPUCTR_IMASKBIT (0x100 << cp0_compare_irq)
286 setup_irq_smtc(irq, &c0_compare_irqaction, CPUCTR_IMASKBIT);
287#else
288 setup_irq(irq, &c0_compare_irqaction); 210 setup_irq(irq, &c0_compare_irqaction);
289#endif
290 211
291 return 0; 212 return 0;
292} 213}
214
215#endif /* Not CONFIG_MIPS_MT_SMTC */
diff --git a/arch/mips/kernel/cevt-smtc.c b/arch/mips/kernel/cevt-smtc.c
new file mode 100644
index 00000000000..5162fe4b595
--- /dev/null
+++ b/arch/mips/kernel/cevt-smtc.c
@@ -0,0 +1,321 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2007 MIPS Technologies, Inc.
7 * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org>
8 * Copyright (C) 2008 Kevin D. Kissell, Paralogos sarl
9 */
10#include <linux/clockchips.h>
11#include <linux/interrupt.h>
12#include <linux/percpu.h>
13
14#include <asm/smtc_ipi.h>
15#include <asm/time.h>
16#include <asm/cevt-r4k.h>
17
18/*
19 * Variant clock event timer support for SMTC on MIPS 34K, 1004K
20 * or other MIPS MT cores.
21 *
22 * Notes on SMTC Support:
23 *
24 * SMTC has multiple microthread TCs pretending to be Linux CPUs.
25 * But there's only one Count/Compare pair per VPE, and Compare
26 * interrupts are taken opportunisitically by available TCs
27 * bound to the VPE with the Count register. The new timer
28 * framework provides for global broadcasts, but we really
29 * want VPE-level multicasts for best behavior. So instead
30 * of invoking the high-level clock-event broadcast code,
31 * this version of SMTC support uses the historical SMTC
32 * multicast mechanisms "under the hood", appearing to the
33 * generic clock layer as if the interrupts are per-CPU.
34 *
35 * The approach taken here is to maintain a set of NR_CPUS
36 * virtual timers, and track which "CPU" needs to be alerted
37 * at each event.
38 *
39 * It's unlikely that we'll see a MIPS MT core with more than
40 * 2 VPEs, but we *know* that we won't need to handle more
41 * VPEs than we have "CPUs". So NCPUs arrays of NCPUs elements
42 * is always going to be overkill, but always going to be enough.
43 */
44
45unsigned long smtc_nexttime[NR_CPUS][NR_CPUS];
46static int smtc_nextinvpe[NR_CPUS];
47
48/*
49 * Timestamps stored are absolute values to be programmed
50 * into Count register. Valid timestamps will never be zero.
51 * If a Zero Count value is actually calculated, it is converted
52 * to be a 1, which will introduce 1 or two CPU cycles of error
53 * roughly once every four billion events, which at 1000 HZ means
54 * about once every 50 days. If that's actually a problem, one
55 * could alternate squashing 0 to 1 and to -1.
56 */
57
58#define MAKEVALID(x) (((x) == 0L) ? 1L : (x))
59#define ISVALID(x) ((x) != 0L)
60
61/*
62 * Time comparison is subtle, as it's really truncated
63 * modular arithmetic.
64 */
65
66#define IS_SOONER(a, b, reference) \
67 (((a) - (unsigned long)(reference)) < ((b) - (unsigned long)(reference)))
68
69/*
70 * CATCHUP_INCREMENT, used when the function falls behind the counter.
71 * Could be an increasing function instead of a constant;
72 */
73
74#define CATCHUP_INCREMENT 64
75
76static int mips_next_event(unsigned long delta,
77 struct clock_event_device *evt)
78{
79 unsigned long flags;
80 unsigned int mtflags;
81 unsigned long timestamp, reference, previous;
82 unsigned long nextcomp = 0L;
83 int vpe = current_cpu_data.vpe_id;
84 int cpu = smp_processor_id();
85 local_irq_save(flags);
86 mtflags = dmt();
87
88 /*
89 * Maintain the per-TC virtual timer
90 * and program the per-VPE shared Count register
91 * as appropriate here...
92 */
93 reference = (unsigned long)read_c0_count();
94 timestamp = MAKEVALID(reference + delta);
95 /*
96 * To really model the clock, we have to catch the case
97 * where the current next-in-VPE timestamp is the old
98 * timestamp for the calling CPE, but the new value is
99 * in fact later. In that case, we have to do a full
100 * scan and discover the new next-in-VPE CPU id and
101 * timestamp.
102 */
103 previous = smtc_nexttime[vpe][cpu];
104 if (cpu == smtc_nextinvpe[vpe] && ISVALID(previous)
105 && IS_SOONER(previous, timestamp, reference)) {
106 int i;
107 int soonest = cpu;
108
109 /*
110 * Update timestamp array here, so that new
111 * value gets considered along with those of
112 * other virtual CPUs on the VPE.
113 */
114 smtc_nexttime[vpe][cpu] = timestamp;
115 for_each_online_cpu(i) {
116 if (ISVALID(smtc_nexttime[vpe][i])
117 && IS_SOONER(smtc_nexttime[vpe][i],
118 smtc_nexttime[vpe][soonest], reference)) {
119 soonest = i;
120 }
121 }
122 smtc_nextinvpe[vpe] = soonest;
123 nextcomp = smtc_nexttime[vpe][soonest];
124 /*
125 * Otherwise, we don't have to process the whole array rank,
126 * we just have to see if the event horizon has gotten closer.
127 */
128 } else {
129 if (!ISVALID(smtc_nexttime[vpe][smtc_nextinvpe[vpe]]) ||
130 IS_SOONER(timestamp,
131 smtc_nexttime[vpe][smtc_nextinvpe[vpe]], reference)) {
132 smtc_nextinvpe[vpe] = cpu;
133 nextcomp = timestamp;
134 }
135 /*
136 * Since next-in-VPE may me the same as the executing
137 * virtual CPU, we update the array *after* checking
138 * its value.
139 */
140 smtc_nexttime[vpe][cpu] = timestamp;
141 }
142
143 /*
144 * It may be that, in fact, we don't need to update Compare,
145 * but if we do, we want to make sure we didn't fall into
146 * a crack just behind Count.
147 */
148 if (ISVALID(nextcomp)) {
149 write_c0_compare(nextcomp);
150 ehb();
151 /*
152 * We never return an error, we just make sure
153 * that we trigger the handlers as quickly as
154 * we can if we fell behind.
155 */
156 while ((nextcomp - (unsigned long)read_c0_count())
157 > (unsigned long)LONG_MAX) {
158 nextcomp += CATCHUP_INCREMENT;
159 write_c0_compare(nextcomp);
160 ehb();
161 }
162 }
163 emt(mtflags);
164 local_irq_restore(flags);
165 return 0;
166}
167
168
169void smtc_distribute_timer(int vpe)
170{
171 unsigned long flags;
172 unsigned int mtflags;
173 int cpu;
174 struct clock_event_device *cd;
175 unsigned long nextstamp = 0L;
176 unsigned long reference;
177
178
179repeat:
180 for_each_online_cpu(cpu) {
181 /*
182 * Find virtual CPUs within the current VPE who have
183 * unserviced timer requests whose time is now past.
184 */
185 local_irq_save(flags);
186 mtflags = dmt();
187 if (cpu_data[cpu].vpe_id == vpe &&
188 ISVALID(smtc_nexttime[vpe][cpu])) {
189 reference = (unsigned long)read_c0_count();
190 if ((smtc_nexttime[vpe][cpu] - reference)
191 > (unsigned long)LONG_MAX) {
192 smtc_nexttime[vpe][cpu] = 0L;
193 emt(mtflags);
194 local_irq_restore(flags);
195 /*
196 * We don't send IPIs to ourself.
197 */
198 if (cpu != smp_processor_id()) {
199 smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
200 } else {
201 cd = &per_cpu(mips_clockevent_device, cpu);
202 cd->event_handler(cd);
203 }
204 } else {
205 /* Local to VPE but Valid Time not yet reached. */
206 if (!ISVALID(nextstamp) ||
207 IS_SOONER(smtc_nexttime[vpe][cpu], nextstamp,
208 reference)) {
209 smtc_nextinvpe[vpe] = cpu;
210 nextstamp = smtc_nexttime[vpe][cpu];
211 }
212 emt(mtflags);
213 local_irq_restore(flags);
214 }
215 } else {
216 emt(mtflags);
217 local_irq_restore(flags);
218
219 }
220 }
221 /* Reprogram for interrupt at next soonest timestamp for VPE */
222 if (ISVALID(nextstamp)) {
223 write_c0_compare(nextstamp);
224 ehb();
225 if ((nextstamp - (unsigned long)read_c0_count())
226 > (unsigned long)LONG_MAX)
227 goto repeat;
228 }
229}
230
231
232irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
233{
234 int cpu = smp_processor_id();
235
236 /* If we're running SMTC, we've got MIPS MT and therefore MIPS32R2 */
237 handle_perf_irq(1);
238
239 if (read_c0_cause() & (1 << 30)) {
240 /* Clear Count/Compare Interrupt */
241 write_c0_compare(read_c0_compare());
242 smtc_distribute_timer(cpu_data[cpu].vpe_id);
243 }
244 return IRQ_HANDLED;
245}
246
247
248int __cpuinit mips_clockevent_init(void)
249{
250 uint64_t mips_freq = mips_hpt_frequency;
251 unsigned int cpu = smp_processor_id();
252 struct clock_event_device *cd;
253 unsigned int irq;
254 int i;
255 int j;
256
257 if (!cpu_has_counter || !mips_hpt_frequency)
258 return -ENXIO;
259 if (cpu == 0) {
260 for (i = 0; i < num_possible_cpus(); i++) {
261 smtc_nextinvpe[i] = 0;
262 for (j = 0; j < num_possible_cpus(); j++)
263 smtc_nexttime[i][j] = 0L;
264 }
265 /*
266 * SMTC also can't have the usablility test
267 * run by secondary TCs once Compare is in use.
268 */
269 if (!c0_compare_int_usable())
270 return -ENXIO;
271 }
272
273 /*
274 * With vectored interrupts things are getting platform specific.
275 * get_c0_compare_int is a hook to allow a platform to return the
276 * interrupt number of it's liking.
277 */
278 irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
279 if (get_c0_compare_int)
280 irq = get_c0_compare_int();
281
282 cd = &per_cpu(mips_clockevent_device, cpu);
283
284 cd->name = "MIPS";
285 cd->features = CLOCK_EVT_FEAT_ONESHOT;
286
287 /* Calculate the min / max delta */
288 cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
289 cd->shift = 32;
290 cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
291 cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
292
293 cd->rating = 300;
294 cd->irq = irq;
295 cd->cpumask = cpumask_of_cpu(cpu);
296 cd->set_next_event = mips_next_event;
297 cd->set_mode = mips_set_clock_mode;
298 cd->event_handler = mips_event_handler;
299
300 clockevents_register_device(cd);
301
302 /*
303 * On SMTC we only want to do the data structure
304 * initialization and IRQ setup once.
305 */
306 if (cpu)
307 return 0;
308 /*
309 * And we need the hwmask associated with the c0_compare
310 * vector to be initialized.
311 */
312 irq_hwmask[irq] = (0x100 << cp0_compare_irq);
313 if (cp0_timer_irq_installed)
314 return 0;
315
316 cp0_timer_irq_installed = 1;
317
318 setup_irq(irq, &c0_compare_irqaction);
319
320 return 0;
321}
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 335a6ae3d59..e621fda8ab3 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -45,18 +45,7 @@ static void r39xx_wait(void)
45 local_irq_enable(); 45 local_irq_enable();
46} 46}
47 47
48/* 48extern void r4k_wait(void);
49 * There is a race when WAIT instruction executed with interrupt
50 * enabled.
51 * But it is implementation-dependent wheter the pipelie restarts when
52 * a non-enabled interrupt is requested.
53 */
54static void r4k_wait(void)
55{
56 __asm__(" .set mips3 \n"
57 " wait \n"
58 " .set mips0 \n");
59}
60 49
61/* 50/*
62 * This variant is preferable as it allows testing need_resched and going to 51 * This variant is preferable as it allows testing need_resched and going to
@@ -65,14 +54,18 @@ static void r4k_wait(void)
65 * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes 54 * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes
66 * using this version a gamble. 55 * using this version a gamble.
67 */ 56 */
68static void r4k_wait_irqoff(void) 57void r4k_wait_irqoff(void)
69{ 58{
70 local_irq_disable(); 59 local_irq_disable();
71 if (!need_resched()) 60 if (!need_resched())
72 __asm__(" .set mips3 \n" 61 __asm__(" .set push \n"
62 " .set mips3 \n"
73 " wait \n" 63 " wait \n"
74 " .set mips0 \n"); 64 " .set pop \n");
75 local_irq_enable(); 65 local_irq_enable();
66 __asm__(" .globl __pastwait \n"
67 "__pastwait: \n");
68 return;
76} 69}
77 70
78/* 71/*
@@ -128,7 +121,7 @@ static int __init wait_disable(char *s)
128 121
129__setup("nowait", wait_disable); 122__setup("nowait", wait_disable);
130 123
131static inline void check_wait(void) 124void __init check_wait(void)
132{ 125{
133 struct cpuinfo_mips *c = &current_cpu_data; 126 struct cpuinfo_mips *c = &current_cpu_data;
134 127
@@ -242,7 +235,6 @@ static inline void check_errata(void)
242 235
243void __init check_bugs32(void) 236void __init check_bugs32(void)
244{ 237{
245 check_wait();
246 check_errata(); 238 check_errata();
247} 239}
248 240
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
index e29598ae939..ffa331029e0 100644
--- a/arch/mips/kernel/entry.S
+++ b/arch/mips/kernel/entry.S
@@ -79,11 +79,6 @@ FEXPORT(syscall_exit)
79 79
80FEXPORT(restore_all) # restore full frame 80FEXPORT(restore_all) # restore full frame
81#ifdef CONFIG_MIPS_MT_SMTC 81#ifdef CONFIG_MIPS_MT_SMTC
82/* Detect and execute deferred IPI "interrupts" */
83 LONG_L s0, TI_REGS($28)
84 LONG_S sp, TI_REGS($28)
85 jal deferred_smtc_ipi
86 LONG_S s0, TI_REGS($28)
87#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP 82#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
88/* Re-arm any temporarily masked interrupts not explicitly "acked" */ 83/* Re-arm any temporarily masked interrupts not explicitly "acked" */
89 mfc0 v0, CP0_TCSTATUS 84 mfc0 v0, CP0_TCSTATUS
@@ -112,6 +107,11 @@ FEXPORT(restore_all) # restore full frame
112 xor t0, t0, t3 107 xor t0, t0, t3
113 mtc0 t0, CP0_TCCONTEXT 108 mtc0 t0, CP0_TCCONTEXT
114#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ 109#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */
110/* Detect and execute deferred IPI "interrupts" */
111 LONG_L s0, TI_REGS($28)
112 LONG_S sp, TI_REGS($28)
113 jal deferred_smtc_ipi
114 LONG_S s0, TI_REGS($28)
115#endif /* CONFIG_MIPS_MT_SMTC */ 115#endif /* CONFIG_MIPS_MT_SMTC */
116 .set noat 116 .set noat
117 RESTORE_TEMP 117 RESTORE_TEMP
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index c6ada98ee04..01dcbe38fa0 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -20,6 +20,7 @@
20#include <asm/stackframe.h> 20#include <asm/stackframe.h>
21#include <asm/war.h> 21#include <asm/war.h>
22#include <asm/page.h> 22#include <asm/page.h>
23#include <asm/thread_info.h>
23 24
24#define PANIC_PIC(msg) \ 25#define PANIC_PIC(msg) \
25 .set push; \ 26 .set push; \
@@ -126,7 +127,42 @@ handle_vcei:
126 127
127 __FINIT 128 __FINIT
128 129
130 .align 5 /* 32 byte rollback region */
131LEAF(r4k_wait)
132 .set push
133 .set noreorder
134 /* start of rollback region */
135 LONG_L t0, TI_FLAGS($28)
136 nop
137 andi t0, _TIF_NEED_RESCHED
138 bnez t0, 1f
139 nop
140 nop
141 nop
142 .set mips3
143 wait
144 /* end of rollback region (the region size must be power of two) */
145 .set pop
1461:
147 jr ra
148 END(r4k_wait)
149
150 .macro BUILD_ROLLBACK_PROLOGUE handler
151 FEXPORT(rollback_\handler)
152 .set push
153 .set noat
154 MFC0 k0, CP0_EPC
155 PTR_LA k1, r4k_wait
156 ori k0, 0x1f /* 32 byte rollback region */
157 xori k0, 0x1f
158 bne k0, k1, 9f
159 MTC0 k0, CP0_EPC
1609:
161 .set pop
162 .endm
163
129 .align 5 164 .align 5
165BUILD_ROLLBACK_PROLOGUE handle_int
130NESTED(handle_int, PT_SIZE, sp) 166NESTED(handle_int, PT_SIZE, sp)
131#ifdef CONFIG_TRACE_IRQFLAGS 167#ifdef CONFIG_TRACE_IRQFLAGS
132 /* 168 /*
@@ -201,6 +237,7 @@ NESTED(except_vec_ejtag_debug, 0, sp)
201 * This prototype is copied to ebase + n*IntCtl.VS and patched 237 * This prototype is copied to ebase + n*IntCtl.VS and patched
202 * to invoke the handler 238 * to invoke the handler
203 */ 239 */
240BUILD_ROLLBACK_PROLOGUE except_vec_vi
204NESTED(except_vec_vi, 0, sp) 241NESTED(except_vec_vi, 0, sp)
205 SAVE_SOME 242 SAVE_SOME
206 SAVE_AT 243 SAVE_AT
@@ -245,8 +282,8 @@ NESTED(except_vec_vi_handler, 0, sp)
245 and t0, a0, t1 282 and t0, a0, t1
246#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP 283#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
247 mfc0 t2, CP0_TCCONTEXT 284 mfc0 t2, CP0_TCCONTEXT
248 or t0, t0, t2 285 or t2, t0, t2
249 mtc0 t0, CP0_TCCONTEXT 286 mtc0 t2, CP0_TCCONTEXT
250#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ 287#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */
251 xor t1, t1, t0 288 xor t1, t1, t0
252 mtc0 t1, CP0_STATUS 289 mtc0 t1, CP0_STATUS
diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
index c5a8b2d21ca..6e152c80cd4 100644
--- a/arch/mips/kernel/kgdb.c
+++ b/arch/mips/kernel/kgdb.c
@@ -62,13 +62,13 @@ void arch_kgdb_breakpoint(void)
62 62
63static void kgdb_call_nmi_hook(void *ignored) 63static void kgdb_call_nmi_hook(void *ignored)
64{ 64{
65 kgdb_nmicallback(raw_smp_processor_id(), (void *)0); 65 kgdb_nmicallback(raw_smp_processor_id(), NULL);
66} 66}
67 67
68void kgdb_roundup_cpus(unsigned long flags) 68void kgdb_roundup_cpus(unsigned long flags)
69{ 69{
70 local_irq_enable(); 70 local_irq_enable();
71 smp_call_function(kgdb_call_nmi_hook, NULL, NULL); 71 smp_call_function(kgdb_call_nmi_hook, NULL, 0);
72 local_irq_disable(); 72 local_irq_disable();
73} 73}
74 74
@@ -190,9 +190,6 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
190 struct pt_regs *regs = args->regs; 190 struct pt_regs *regs = args->regs;
191 int trap = (regs->cp0_cause & 0x7c) >> 2; 191 int trap = (regs->cp0_cause & 0x7c) >> 2;
192 192
193 if (fixup_exception(regs))
194 return NOTIFY_DONE;
195
196 /* Userpace events, ignore. */ 193 /* Userpace events, ignore. */
197 if (user_mode(regs)) 194 if (user_mode(regs))
198 return NOTIFY_DONE; 195 return NOTIFY_DONE;
@@ -239,8 +236,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
239 236
240 atomic_set(&kgdb_cpu_doing_single_step, -1); 237 atomic_set(&kgdb_cpu_doing_single_step, -1);
241 if (remcom_in_buffer[0] == 's') 238 if (remcom_in_buffer[0] == 's')
242 if (kgdb_contthread) 239 atomic_set(&kgdb_cpu_doing_single_step, cpu);
243 atomic_set(&kgdb_cpu_doing_single_step, cpu);
244 240
245 return 0; 241 return 0;
246 } 242 }
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
index df4d3f2f740..dc9eb72ed9d 100644
--- a/arch/mips/kernel/mips-mt-fpaff.c
+++ b/arch/mips/kernel/mips-mt-fpaff.c
@@ -159,7 +159,7 @@ __setup("fpaff=", fpaff_thresh);
159/* 159/*
160 * FPU Use Factor empirically derived from experiments on 34K 160 * FPU Use Factor empirically derived from experiments on 34K
161 */ 161 */
162#define FPUSEFACTOR 333 162#define FPUSEFACTOR 2000
163 163
164static __init int mt_fp_affinity_init(void) 164static __init int mt_fp_affinity_init(void)
165{ 165{
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index b16facd9ea8..22fc19bbe87 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -55,7 +55,7 @@ void __noreturn cpu_idle(void)
55 while (1) { 55 while (1) {
56 tick_nohz_stop_sched_tick(1); 56 tick_nohz_stop_sched_tick(1);
57 while (!need_resched()) { 57 while (!need_resched()) {
58#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG 58#ifdef CONFIG_MIPS_MT_SMTC
59 extern void smtc_idle_loop_hook(void); 59 extern void smtc_idle_loop_hook(void);
60 60
61 smtc_idle_loop_hook(); 61 smtc_idle_loop_hook();
@@ -145,17 +145,18 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
145 */ 145 */
146 p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1); 146 p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1);
147 childregs->cp0_status &= ~(ST0_CU2|ST0_CU1); 147 childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
148 clear_tsk_thread_flag(p, TIF_USEDFPU);
149 148
150#ifdef CONFIG_MIPS_MT_FPAFF 149#ifdef CONFIG_MIPS_MT_SMTC
151 /* 150 /*
152 * FPU affinity support is cleaner if we track the 151 * SMTC restores TCStatus after Status, and the CU bits
153 * user-visible CPU affinity from the very beginning. 152 * are aliased there.
154 * The generic cpus_allowed mask will already have
155 * been copied from the parent before copy_thread
156 * is invoked.
157 */ 153 */
158 p->thread.user_cpus_allowed = p->cpus_allowed; 154 childregs->cp0_tcstatus &= ~(ST0_CU2|ST0_CU1);
155#endif
156 clear_tsk_thread_flag(p, TIF_USEDFPU);
157
158#ifdef CONFIG_MIPS_MT_FPAFF
159 clear_tsk_thread_flag(p, TIF_FPUBOUND);
159#endif /* CONFIG_MIPS_MT_FPAFF */ 160#endif /* CONFIG_MIPS_MT_FPAFF */
160 161
161 if (clone_flags & CLONE_SETTLS) 162 if (clone_flags & CLONE_SETTLS)
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 35234b92b9a..96ffc9c6d19 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -238,7 +238,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
238 case FPC_EIR: { /* implementation / version register */ 238 case FPC_EIR: { /* implementation / version register */
239 unsigned int flags; 239 unsigned int flags;
240#ifdef CONFIG_MIPS_MT_SMTC 240#ifdef CONFIG_MIPS_MT_SMTC
241 unsigned int irqflags; 241 unsigned long irqflags;
242 unsigned int mtflags; 242 unsigned int mtflags;
243#endif /* CONFIG_MIPS_MT_SMTC */ 243#endif /* CONFIG_MIPS_MT_SMTC */
244 244
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index fc4fd4d705e..5e75a316f6b 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -647,6 +647,12 @@ einval: li v0, -EINVAL
647 sys sys_timerfd_create 2 647 sys sys_timerfd_create 2
648 sys sys_timerfd_gettime 2 648 sys sys_timerfd_gettime 2
649 sys sys_timerfd_settime 4 649 sys sys_timerfd_settime 4
650 sys sys_signalfd4 4
651 sys sys_eventfd2 2 /* 4325 */
652 sys sys_epoll_create1 1
653 sys sys_dup3 3
654 sys sys_pipe2 2
655 sys sys_inotify_init1 1
650 .endm 656 .endm
651 657
652 /* We pre-compute the number of _instruction_ bytes needed to 658 /* We pre-compute the number of _instruction_ bytes needed to
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 2b73fd1e452..3d58204c9d4 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -481,4 +481,10 @@ sys_call_table:
481 PTR sys_timerfd_create /* 5280 */ 481 PTR sys_timerfd_create /* 5280 */
482 PTR sys_timerfd_gettime 482 PTR sys_timerfd_gettime
483 PTR sys_timerfd_settime 483 PTR sys_timerfd_settime
484 PTR sys_signalfd4
485 PTR sys_eventfd2
486 PTR sys_epoll_create1 /* 5285 */
487 PTR sys_dup3
488 PTR sys_pipe2
489 PTR sys_inotify_init1
484 .size sys_call_table,.-sys_call_table 490 .size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 2654e75d2fe..da7f1b6ea0f 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -407,4 +407,10 @@ EXPORT(sysn32_call_table)
407 PTR sys_timerfd_create 407 PTR sys_timerfd_create
408 PTR sys_timerfd_gettime /* 5285 */ 408 PTR sys_timerfd_gettime /* 5285 */
409 PTR sys_timerfd_settime 409 PTR sys_timerfd_settime
410 PTR sys_signalfd4
411 PTR sys_eventfd2
412 PTR sys_epoll_create1
413 PTR sys_dup3 /* 5290 */
414 PTR sys_pipe2
415 PTR sys_inotify_init1
410 .size sysn32_call_table,.-sysn32_call_table 416 .size sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 76167bea5a7..d7cd1aac9ad 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -529,4 +529,10 @@ sys_call_table:
529 PTR sys_timerfd_create 529 PTR sys_timerfd_create
530 PTR sys_timerfd_gettime 530 PTR sys_timerfd_gettime
531 PTR sys_timerfd_settime 531 PTR sys_timerfd_settime
532 PTR compat_sys_signalfd4
533 PTR sys_eventfd2 /* 4325 */
534 PTR sys_epoll_create1
535 PTR sys_dup3
536 PTR sys_pipe2
537 PTR sys_inotify_init1
532 .size sys_call_table,.-sys_call_table 538 .size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 8af84867e74..16f8edfe5cd 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -78,7 +78,7 @@ void __init add_memory_region(phys_t start, phys_t size, long type)
78 78
79 /* Sanity check */ 79 /* Sanity check */
80 if (start + size < start) { 80 if (start + size < start) {
81 printk("Trying to add an invalid memory region, skipped\n"); 81 pr_warning("Trying to add an invalid memory region, skipped\n");
82 return; 82 return;
83 } 83 }
84 84
@@ -92,7 +92,7 @@ void __init add_memory_region(phys_t start, phys_t size, long type)
92 } 92 }
93 93
94 if (x == BOOT_MEM_MAP_MAX) { 94 if (x == BOOT_MEM_MAP_MAX) {
95 printk("Ooops! Too many entries in the memory map!\n"); 95 pr_err("Ooops! Too many entries in the memory map!\n");
96 return; 96 return;
97 } 97 }
98 98
@@ -108,22 +108,22 @@ static void __init print_memory_map(void)
108 const int field = 2 * sizeof(unsigned long); 108 const int field = 2 * sizeof(unsigned long);
109 109
110 for (i = 0; i < boot_mem_map.nr_map; i++) { 110 for (i = 0; i < boot_mem_map.nr_map; i++) {
111 printk(" memory: %0*Lx @ %0*Lx ", 111 printk(KERN_INFO " memory: %0*Lx @ %0*Lx ",
112 field, (unsigned long long) boot_mem_map.map[i].size, 112 field, (unsigned long long) boot_mem_map.map[i].size,
113 field, (unsigned long long) boot_mem_map.map[i].addr); 113 field, (unsigned long long) boot_mem_map.map[i].addr);
114 114
115 switch (boot_mem_map.map[i].type) { 115 switch (boot_mem_map.map[i].type) {
116 case BOOT_MEM_RAM: 116 case BOOT_MEM_RAM:
117 printk("(usable)\n"); 117 printk(KERN_CONT "(usable)\n");
118 break; 118 break;
119 case BOOT_MEM_ROM_DATA: 119 case BOOT_MEM_ROM_DATA:
120 printk("(ROM data)\n"); 120 printk(KERN_CONT "(ROM data)\n");
121 break; 121 break;
122 case BOOT_MEM_RESERVED: 122 case BOOT_MEM_RESERVED:
123 printk("(reserved)\n"); 123 printk(KERN_CONT "(reserved)\n");
124 break; 124 break;
125 default: 125 default:
126 printk("type %lu\n", boot_mem_map.map[i].type); 126 printk(KERN_CONT "type %lu\n", boot_mem_map.map[i].type);
127 break; 127 break;
128 } 128 }
129 } 129 }
@@ -160,36 +160,39 @@ early_param("rd_size", rd_size_early);
160static unsigned long __init init_initrd(void) 160static unsigned long __init init_initrd(void)
161{ 161{
162 unsigned long end; 162 unsigned long end;
163 u32 *initrd_header;
164 163
165 /* 164 /*
166 * Board specific code or command line parser should have 165 * Board specific code or command line parser should have
167 * already set up initrd_start and initrd_end. In these cases 166 * already set up initrd_start and initrd_end. In these cases
168 * perfom sanity checks and use them if all looks good. 167 * perfom sanity checks and use them if all looks good.
169 */ 168 */
170 if (initrd_start && initrd_end > initrd_start) 169 if (!initrd_start || initrd_end <= initrd_start) {
171 goto sanitize; 170#ifdef CONFIG_PROBE_INITRD_HEADER
171 u32 *initrd_header;
172 172
173 /* 173 /*
174 * See if initrd has been added to the kernel image by 174 * See if initrd has been added to the kernel image by
175 * arch/mips/boot/addinitrd.c. In that case a header is 175 * arch/mips/boot/addinitrd.c. In that case a header is
176 * prepended to initrd and is made up by 8 bytes. The fisrt 176 * prepended to initrd and is made up by 8 bytes. The first
177 * word is a magic number and the second one is the size of 177 * word is a magic number and the second one is the size of
178 * initrd. Initrd start must be page aligned in any cases. 178 * initrd. Initrd start must be page aligned in any cases.
179 */ 179 */
180 initrd_header = __va(PAGE_ALIGN(__pa_symbol(&_end) + 8)) - 8; 180 initrd_header = __va(PAGE_ALIGN(__pa_symbol(&_end) + 8)) - 8;
181 if (initrd_header[0] != 0x494E5244) 181 if (initrd_header[0] != 0x494E5244)
182 goto disable;
183 initrd_start = (unsigned long)(initrd_header + 2);
184 initrd_end = initrd_start + initrd_header[1];
185#else
182 goto disable; 186 goto disable;
183 initrd_start = (unsigned long)(initrd_header + 2); 187#endif
184 initrd_end = initrd_start + initrd_header[1]; 188 }
185 189
186sanitize:
187 if (initrd_start & ~PAGE_MASK) { 190 if (initrd_start & ~PAGE_MASK) {
188 printk(KERN_ERR "initrd start must be page aligned\n"); 191 pr_err("initrd start must be page aligned\n");
189 goto disable; 192 goto disable;
190 } 193 }
191 if (initrd_start < PAGE_OFFSET) { 194 if (initrd_start < PAGE_OFFSET) {
192 printk(KERN_ERR "initrd start < PAGE_OFFSET\n"); 195 pr_err("initrd start < PAGE_OFFSET\n");
193 goto disable; 196 goto disable;
194 } 197 }
195 198
@@ -221,18 +224,18 @@ static void __init finalize_initrd(void)
221 goto disable; 224 goto disable;
222 } 225 }
223 if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) { 226 if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
224 printk("Initrd extends beyond end of memory"); 227 printk(KERN_ERR "Initrd extends beyond end of memory");
225 goto disable; 228 goto disable;
226 } 229 }
227 230
228 reserve_bootmem(__pa(initrd_start), size, BOOTMEM_DEFAULT); 231 reserve_bootmem(__pa(initrd_start), size, BOOTMEM_DEFAULT);
229 initrd_below_start_ok = 1; 232 initrd_below_start_ok = 1;
230 233
231 printk(KERN_INFO "Initial ramdisk at: 0x%lx (%lu bytes)\n", 234 pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n",
232 initrd_start, size); 235 initrd_start, size);
233 return; 236 return;
234disable: 237disable:
235 printk(" - disabling initrd\n"); 238 printk(KERN_CONT " - disabling initrd\n");
236 initrd_start = 0; 239 initrd_start = 0;
237 initrd_end = 0; 240 initrd_end = 0;
238} 241}
@@ -310,14 +313,12 @@ static void __init bootmem_init(void)
310 if (min_low_pfn >= max_low_pfn) 313 if (min_low_pfn >= max_low_pfn)
311 panic("Incorrect memory mapping !!!"); 314 panic("Incorrect memory mapping !!!");
312 if (min_low_pfn > ARCH_PFN_OFFSET) { 315 if (min_low_pfn > ARCH_PFN_OFFSET) {
313 printk(KERN_INFO 316 pr_info("Wasting %lu bytes for tracking %lu unused pages\n",
314 "Wasting %lu bytes for tracking %lu unused pages\n", 317 (min_low_pfn - ARCH_PFN_OFFSET) * sizeof(struct page),
315 (min_low_pfn - ARCH_PFN_OFFSET) * sizeof(struct page), 318 min_low_pfn - ARCH_PFN_OFFSET);
316 min_low_pfn - ARCH_PFN_OFFSET);
317 } else if (min_low_pfn < ARCH_PFN_OFFSET) { 319 } else if (min_low_pfn < ARCH_PFN_OFFSET) {
318 printk(KERN_INFO 320 pr_info("%lu free pages won't be used\n",
319 "%lu free pages won't be used\n", 321 ARCH_PFN_OFFSET - min_low_pfn);
320 ARCH_PFN_OFFSET - min_low_pfn);
321 } 322 }
322 min_low_pfn = ARCH_PFN_OFFSET; 323 min_low_pfn = ARCH_PFN_OFFSET;
323 324
@@ -471,7 +472,7 @@ static void __init arch_mem_init(char **cmdline_p)
471 /* call board setup routine */ 472 /* call board setup routine */
472 plat_mem_setup(); 473 plat_mem_setup();
473 474
474 printk("Determined physical RAM map:\n"); 475 pr_info("Determined physical RAM map:\n");
475 print_memory_map(); 476 print_memory_map();
476 477
477 strlcpy(command_line, arcs_cmdline, sizeof(command_line)); 478 strlcpy(command_line, arcs_cmdline, sizeof(command_line));
@@ -482,7 +483,7 @@ static void __init arch_mem_init(char **cmdline_p)
482 parse_early_param(); 483 parse_early_param();
483 484
484 if (usermem) { 485 if (usermem) {
485 printk("User-defined physical RAM map:\n"); 486 pr_info("User-defined physical RAM map:\n");
486 print_memory_map(); 487 print_memory_map();
487 } 488 }
488 489
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index a516286532a..897fb2b4751 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -1,4 +1,21 @@
1/* Copyright (C) 2004 Mips Technologies, Inc */ 1/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version 2
5 * of the License, or (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
15 *
16 * Copyright (C) 2004 Mips Technologies, Inc
17 * Copyright (C) 2008 Kevin D. Kissell
18 */
2 19
3#include <linux/clockchips.h> 20#include <linux/clockchips.h>
4#include <linux/kernel.h> 21#include <linux/kernel.h>
@@ -21,7 +38,6 @@
21#include <asm/time.h> 38#include <asm/time.h>
22#include <asm/addrspace.h> 39#include <asm/addrspace.h>
23#include <asm/smtc.h> 40#include <asm/smtc.h>
24#include <asm/smtc_ipi.h>
25#include <asm/smtc_proc.h> 41#include <asm/smtc_proc.h>
26 42
27/* 43/*
@@ -58,11 +74,6 @@ unsigned long irq_hwmask[NR_IRQS];
58 74
59asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS]; 75asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS];
60 76
61/*
62 * Clock interrupt "latch" buffers, per "CPU"
63 */
64
65static atomic_t ipi_timer_latch[NR_CPUS];
66 77
67/* 78/*
68 * Number of InterProcessor Interrupt (IPI) message buffers to allocate 79 * Number of InterProcessor Interrupt (IPI) message buffers to allocate
@@ -70,7 +81,7 @@ static atomic_t ipi_timer_latch[NR_CPUS];
70 81
71#define IPIBUF_PER_CPU 4 82#define IPIBUF_PER_CPU 4
72 83
73static struct smtc_ipi_q IPIQ[NR_CPUS]; 84struct smtc_ipi_q IPIQ[NR_CPUS];
74static struct smtc_ipi_q freeIPIq; 85static struct smtc_ipi_q freeIPIq;
75 86
76 87
@@ -282,7 +293,7 @@ static void smtc_configure_tlb(void)
282 * phys_cpu_present_map and the logical/physical mappings. 293 * phys_cpu_present_map and the logical/physical mappings.
283 */ 294 */
284 295
285int __init mipsmt_build_cpu_map(int start_cpu_slot) 296int __init smtc_build_cpu_map(int start_cpu_slot)
286{ 297{
287 int i, ntcs; 298 int i, ntcs;
288 299
@@ -325,7 +336,12 @@ static void smtc_tc_setup(int vpe, int tc, int cpu)
325 write_tc_c0_tcstatus((read_tc_c0_tcstatus() 336 write_tc_c0_tcstatus((read_tc_c0_tcstatus()
326 & ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT)) 337 & ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT))
327 | TCSTATUS_A); 338 | TCSTATUS_A);
328 write_tc_c0_tccontext(0); 339 /*
340 * TCContext gets an offset from the base of the IPIQ array
341 * to be used in low-level code to detect the presence of
342 * an active IPI queue
343 */
344 write_tc_c0_tccontext((sizeof(struct smtc_ipi_q) * cpu) << 16);
329 /* Bind tc to vpe */ 345 /* Bind tc to vpe */
330 write_tc_c0_tcbind(vpe); 346 write_tc_c0_tcbind(vpe);
331 /* In general, all TCs should have the same cpu_data indications */ 347 /* In general, all TCs should have the same cpu_data indications */
@@ -336,10 +352,18 @@ static void smtc_tc_setup(int vpe, int tc, int cpu)
336 cpu_data[cpu].options &= ~MIPS_CPU_FPU; 352 cpu_data[cpu].options &= ~MIPS_CPU_FPU;
337 cpu_data[cpu].vpe_id = vpe; 353 cpu_data[cpu].vpe_id = vpe;
338 cpu_data[cpu].tc_id = tc; 354 cpu_data[cpu].tc_id = tc;
355 /* Multi-core SMTC hasn't been tested, but be prepared */
356 cpu_data[cpu].core = (read_vpe_c0_ebase() >> 1) & 0xff;
339} 357}
340 358
359/*
360 * Tweak to get Count registes in as close a sync as possible.
361 * Value seems good for 34K-class cores.
362 */
363
364#define CP0_SKEW 8
341 365
342void mipsmt_prepare_cpus(void) 366void smtc_prepare_cpus(int cpus)
343{ 367{
344 int i, vpe, tc, ntc, nvpe, tcpervpe[NR_CPUS], slop, cpu; 368 int i, vpe, tc, ntc, nvpe, tcpervpe[NR_CPUS], slop, cpu;
345 unsigned long flags; 369 unsigned long flags;
@@ -363,13 +387,13 @@ void mipsmt_prepare_cpus(void)
363 IPIQ[i].head = IPIQ[i].tail = NULL; 387 IPIQ[i].head = IPIQ[i].tail = NULL;
364 spin_lock_init(&IPIQ[i].lock); 388 spin_lock_init(&IPIQ[i].lock);
365 IPIQ[i].depth = 0; 389 IPIQ[i].depth = 0;
366 atomic_set(&ipi_timer_latch[i], 0);
367 } 390 }
368 391
369 /* cpu_data index starts at zero */ 392 /* cpu_data index starts at zero */
370 cpu = 0; 393 cpu = 0;
371 cpu_data[cpu].vpe_id = 0; 394 cpu_data[cpu].vpe_id = 0;
372 cpu_data[cpu].tc_id = 0; 395 cpu_data[cpu].tc_id = 0;
396 cpu_data[cpu].core = (read_c0_ebase() >> 1) & 0xff;
373 cpu++; 397 cpu++;
374 398
375 /* Report on boot-time options */ 399 /* Report on boot-time options */
@@ -484,7 +508,8 @@ void mipsmt_prepare_cpus(void)
484 write_vpe_c0_compare(0); 508 write_vpe_c0_compare(0);
485 /* Propagate Config7 */ 509 /* Propagate Config7 */
486 write_vpe_c0_config7(read_c0_config7()); 510 write_vpe_c0_config7(read_c0_config7());
487 write_vpe_c0_count(read_c0_count()); 511 write_vpe_c0_count(read_c0_count() + CP0_SKEW);
512 ehb();
488 } 513 }
489 /* enable multi-threading within VPE */ 514 /* enable multi-threading within VPE */
490 write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE); 515 write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE);
@@ -556,7 +581,7 @@ void mipsmt_prepare_cpus(void)
556void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle) 581void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle)
557{ 582{
558 extern u32 kernelsp[NR_CPUS]; 583 extern u32 kernelsp[NR_CPUS];
559 long flags; 584 unsigned long flags;
560 int mtflags; 585 int mtflags;
561 586
562 LOCK_MT_PRA(); 587 LOCK_MT_PRA();
@@ -585,24 +610,22 @@ void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle)
585 610
586void smtc_init_secondary(void) 611void smtc_init_secondary(void)
587{ 612{
588 /*
589 * Start timer on secondary VPEs if necessary.
590 * plat_timer_setup has already have been invoked by init/main
591 * on "boot" TC. Like per_cpu_trap_init() hack, this assumes that
592 * SMTC init code assigns TCs consdecutively and in ascending order
593 * to across available VPEs.
594 */
595 if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&
596 ((read_c0_tcbind() & TCBIND_CURVPE)
597 != cpu_data[smp_processor_id() - 1].vpe_id)){
598 write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ);
599 }
600
601 local_irq_enable(); 613 local_irq_enable();
602} 614}
603 615
604void smtc_smp_finish(void) 616void smtc_smp_finish(void)
605{ 617{
618 int cpu = smp_processor_id();
619
620 /*
621 * Lowest-numbered CPU per VPE starts a clock tick.
622 * Like per_cpu_trap_init() hack, this assumes that
623 * SMTC init code assigns TCs consdecutively and
624 * in ascending order across available VPEs.
625 */
626 if (cpu > 0 && (cpu_data[cpu].vpe_id != cpu_data[cpu - 1].vpe_id))
627 write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ);
628
606 printk("TC %d going on-line as CPU %d\n", 629 printk("TC %d going on-line as CPU %d\n",
607 cpu_data[smp_processor_id()].tc_id, smp_processor_id()); 630 cpu_data[smp_processor_id()].tc_id, smp_processor_id());
608} 631}
@@ -753,8 +776,10 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
753{ 776{
754 int tcstatus; 777 int tcstatus;
755 struct smtc_ipi *pipi; 778 struct smtc_ipi *pipi;
756 long flags; 779 unsigned long flags;
757 int mtflags; 780 int mtflags;
781 unsigned long tcrestart;
782 extern void r4k_wait_irqoff(void), __pastwait(void);
758 783
759 if (cpu == smp_processor_id()) { 784 if (cpu == smp_processor_id()) {
760 printk("Cannot Send IPI to self!\n"); 785 printk("Cannot Send IPI to self!\n");
@@ -771,8 +796,6 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
771 pipi->arg = (void *)action; 796 pipi->arg = (void *)action;
772 pipi->dest = cpu; 797 pipi->dest = cpu;
773 if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { 798 if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
774 if (type == SMTC_CLOCK_TICK)
775 atomic_inc(&ipi_timer_latch[cpu]);
776 /* If not on same VPE, enqueue and send cross-VPE interrupt */ 799 /* If not on same VPE, enqueue and send cross-VPE interrupt */
777 smtc_ipi_nq(&IPIQ[cpu], pipi); 800 smtc_ipi_nq(&IPIQ[cpu], pipi);
778 LOCK_CORE_PRA(); 801 LOCK_CORE_PRA();
@@ -800,22 +823,29 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
800 823
801 if ((tcstatus & TCSTATUS_IXMT) != 0) { 824 if ((tcstatus & TCSTATUS_IXMT) != 0) {
802 /* 825 /*
803 * Spin-waiting here can deadlock, 826 * If we're in the the irq-off version of the wait
804 * so we queue the message for the target TC. 827 * loop, we need to force exit from the wait and
828 * do a direct post of the IPI.
829 */
830 if (cpu_wait == r4k_wait_irqoff) {
831 tcrestart = read_tc_c0_tcrestart();
832 if (tcrestart >= (unsigned long)r4k_wait_irqoff
833 && tcrestart < (unsigned long)__pastwait) {
834 write_tc_c0_tcrestart(__pastwait);
835 tcstatus &= ~TCSTATUS_IXMT;
836 write_tc_c0_tcstatus(tcstatus);
837 goto postdirect;
838 }
839 }
840 /*
841 * Otherwise we queue the message for the target TC
842 * to pick up when he does a local_irq_restore()
805 */ 843 */
806 write_tc_c0_tchalt(0); 844 write_tc_c0_tchalt(0);
807 UNLOCK_CORE_PRA(); 845 UNLOCK_CORE_PRA();
808 /* Try to reduce redundant timer interrupt messages */
809 if (type == SMTC_CLOCK_TICK) {
810 if (atomic_postincrement(&ipi_timer_latch[cpu])!=0){
811 smtc_ipi_nq(&freeIPIq, pipi);
812 return;
813 }
814 }
815 smtc_ipi_nq(&IPIQ[cpu], pipi); 846 smtc_ipi_nq(&IPIQ[cpu], pipi);
816 } else { 847 } else {
817 if (type == SMTC_CLOCK_TICK) 848postdirect:
818 atomic_inc(&ipi_timer_latch[cpu]);
819 post_direct_ipi(cpu, pipi); 849 post_direct_ipi(cpu, pipi);
820 write_tc_c0_tchalt(0); 850 write_tc_c0_tchalt(0);
821 UNLOCK_CORE_PRA(); 851 UNLOCK_CORE_PRA();
@@ -883,7 +913,7 @@ static void ipi_call_interrupt(void)
883 smp_call_function_interrupt(); 913 smp_call_function_interrupt();
884} 914}
885 915
886DECLARE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device); 916DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device);
887 917
888void ipi_decode(struct smtc_ipi *pipi) 918void ipi_decode(struct smtc_ipi *pipi)
889{ 919{
@@ -891,20 +921,13 @@ void ipi_decode(struct smtc_ipi *pipi)
891 struct clock_event_device *cd; 921 struct clock_event_device *cd;
892 void *arg_copy = pipi->arg; 922 void *arg_copy = pipi->arg;
893 int type_copy = pipi->type; 923 int type_copy = pipi->type;
894 int ticks;
895
896 smtc_ipi_nq(&freeIPIq, pipi); 924 smtc_ipi_nq(&freeIPIq, pipi);
897 switch (type_copy) { 925 switch (type_copy) {
898 case SMTC_CLOCK_TICK: 926 case SMTC_CLOCK_TICK:
899 irq_enter(); 927 irq_enter();
900 kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + 1]++; 928 kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + 1]++;
901 cd = &per_cpu(smtc_dummy_clockevent_device, cpu); 929 cd = &per_cpu(mips_clockevent_device, cpu);
902 ticks = atomic_read(&ipi_timer_latch[cpu]); 930 cd->event_handler(cd);
903 atomic_sub(ticks, &ipi_timer_latch[cpu]);
904 while (ticks) {
905 cd->event_handler(cd);
906 ticks--;
907 }
908 irq_exit(); 931 irq_exit();
909 break; 932 break;
910 933
@@ -937,24 +960,48 @@ void ipi_decode(struct smtc_ipi *pipi)
937 } 960 }
938} 961}
939 962
963/*
964 * Similar to smtc_ipi_replay(), but invoked from context restore,
965 * so it reuses the current exception frame rather than set up a
966 * new one with self_ipi.
967 */
968
940void deferred_smtc_ipi(void) 969void deferred_smtc_ipi(void)
941{ 970{
942 struct smtc_ipi *pipi; 971 int cpu = smp_processor_id();
943 unsigned long flags;
944/* DEBUG */
945 int q = smp_processor_id();
946 972
947 /* 973 /*
948 * Test is not atomic, but much faster than a dequeue, 974 * Test is not atomic, but much faster than a dequeue,
949 * and the vast majority of invocations will have a null queue. 975 * and the vast majority of invocations will have a null queue.
976 * If irq_disabled when this was called, then any IPIs queued
977 * after we test last will be taken on the next irq_enable/restore.
978 * If interrupts were enabled, then any IPIs added after the
979 * last test will be taken directly.
950 */ 980 */
951 if (IPIQ[q].head != NULL) { 981
952 while((pipi = smtc_ipi_dq(&IPIQ[q])) != NULL) { 982 while (IPIQ[cpu].head != NULL) {
953 /* ipi_decode() should be called with interrupts off */ 983 struct smtc_ipi_q *q = &IPIQ[cpu];
954 local_irq_save(flags); 984 struct smtc_ipi *pipi;
985 unsigned long flags;
986
987 /*
988 * It may be possible we'll come in with interrupts
989 * already enabled.
990 */
991 local_irq_save(flags);
992
993 spin_lock(&q->lock);
994 pipi = __smtc_ipi_dq(q);
995 spin_unlock(&q->lock);
996 if (pipi != NULL)
955 ipi_decode(pipi); 997 ipi_decode(pipi);
956 local_irq_restore(flags); 998 /*
957 } 999 * The use of the __raw_local restore isn't
1000 * as obviously necessary here as in smtc_ipi_replay(),
1001 * but it's more efficient, given that we're already
1002 * running down the IPI queue.
1003 */
1004 __raw_local_irq_restore(flags);
958 } 1005 }
959} 1006}
960 1007
@@ -975,7 +1022,7 @@ static irqreturn_t ipi_interrupt(int irq, void *dev_idm)
975 struct smtc_ipi *pipi; 1022 struct smtc_ipi *pipi;
976 unsigned long tcstatus; 1023 unsigned long tcstatus;
977 int sent; 1024 int sent;
978 long flags; 1025 unsigned long flags;
979 unsigned int mtflags; 1026 unsigned int mtflags;
980 unsigned int vpflags; 1027 unsigned int vpflags;
981 1028
@@ -1066,55 +1113,53 @@ static void setup_cross_vpe_interrupts(unsigned int nvpe)
1066 1113
1067/* 1114/*
1068 * SMTC-specific hacks invoked from elsewhere in the kernel. 1115 * SMTC-specific hacks invoked from elsewhere in the kernel.
1069 *
1070 * smtc_ipi_replay is called from raw_local_irq_restore which is only ever
1071 * called with interrupts disabled. We do rely on interrupts being disabled
1072 * here because using spin_lock_irqsave()/spin_unlock_irqrestore() would
1073 * result in a recursive call to raw_local_irq_restore().
1074 */ 1116 */
1075 1117
1076static void __smtc_ipi_replay(void) 1118 /*
1119 * smtc_ipi_replay is called from raw_local_irq_restore
1120 */
1121
1122void smtc_ipi_replay(void)
1077{ 1123{
1078 unsigned int cpu = smp_processor_id(); 1124 unsigned int cpu = smp_processor_id();
1079 1125
1080 /* 1126 /*
1081 * To the extent that we've ever turned interrupts off, 1127 * To the extent that we've ever turned interrupts off,
1082 * we may have accumulated deferred IPIs. This is subtle. 1128 * we may have accumulated deferred IPIs. This is subtle.
1083 * If we use the smtc_ipi_qdepth() macro, we'll get an
1084 * exact number - but we'll also disable interrupts
1085 * and create a window of failure where a new IPI gets
1086 * queued after we test the depth but before we re-enable
1087 * interrupts. So long as IXMT never gets set, however,
1088 * we should be OK: If we pick up something and dispatch 1129 * we should be OK: If we pick up something and dispatch
1089 * it here, that's great. If we see nothing, but concurrent 1130 * it here, that's great. If we see nothing, but concurrent
1090 * with this operation, another TC sends us an IPI, IXMT 1131 * with this operation, another TC sends us an IPI, IXMT
1091 * is clear, and we'll handle it as a real pseudo-interrupt 1132 * is clear, and we'll handle it as a real pseudo-interrupt
1092 * and not a pseudo-pseudo interrupt. 1133 * and not a pseudo-pseudo interrupt. The important thing
1134 * is to do the last check for queued message *after* the
1135 * re-enabling of interrupts.
1093 */ 1136 */
1094 if (IPIQ[cpu].depth > 0) { 1137 while (IPIQ[cpu].head != NULL) {
1095 while (1) { 1138 struct smtc_ipi_q *q = &IPIQ[cpu];
1096 struct smtc_ipi_q *q = &IPIQ[cpu]; 1139 struct smtc_ipi *pipi;
1097 struct smtc_ipi *pipi; 1140 unsigned long flags;
1098 extern void self_ipi(struct smtc_ipi *); 1141
1099 1142 /*
1100 spin_lock(&q->lock); 1143 * It's just possible we'll come in with interrupts
1101 pipi = __smtc_ipi_dq(q); 1144 * already enabled.
1102 spin_unlock(&q->lock); 1145 */
1103 if (!pipi) 1146 local_irq_save(flags);
1104 break; 1147
1148 spin_lock(&q->lock);
1149 pipi = __smtc_ipi_dq(q);
1150 spin_unlock(&q->lock);
1151 /*
1152 ** But use a raw restore here to avoid recursion.
1153 */
1154 __raw_local_irq_restore(flags);
1105 1155
1156 if (pipi) {
1106 self_ipi(pipi); 1157 self_ipi(pipi);
1107 smtc_cpu_stats[cpu].selfipis++; 1158 smtc_cpu_stats[cpu].selfipis++;
1108 } 1159 }
1109 } 1160 }
1110} 1161}
1111 1162
1112void smtc_ipi_replay(void)
1113{
1114 raw_local_irq_disable();
1115 __smtc_ipi_replay();
1116}
1117
1118EXPORT_SYMBOL(smtc_ipi_replay); 1163EXPORT_SYMBOL(smtc_ipi_replay);
1119 1164
1120void smtc_idle_loop_hook(void) 1165void smtc_idle_loop_hook(void)
@@ -1193,40 +1238,13 @@ void smtc_idle_loop_hook(void)
1193 } 1238 }
1194 } 1239 }
1195 1240
1196 /*
1197 * Now that we limit outstanding timer IPIs, check for hung TC
1198 */
1199 for (tc = 0; tc < NR_CPUS; tc++) {
1200 /* Don't check ourself - we'll dequeue IPIs just below */
1201 if ((tc != smp_processor_id()) &&
1202 atomic_read(&ipi_timer_latch[tc]) > timerq_limit) {
1203 if (clock_hang_reported[tc] == 0) {
1204 pdb_msg += sprintf(pdb_msg,
1205 "TC %d looks hung with timer latch at %d\n",
1206 tc, atomic_read(&ipi_timer_latch[tc]));
1207 clock_hang_reported[tc]++;
1208 }
1209 }
1210 }
1211 emt(mtflags); 1241 emt(mtflags);
1212 local_irq_restore(flags); 1242 local_irq_restore(flags);
1213 if (pdb_msg != &id_ho_db_msg[0]) 1243 if (pdb_msg != &id_ho_db_msg[0])
1214 printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg); 1244 printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg);
1215#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ 1245#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
1216 1246
1217 /* 1247 smtc_ipi_replay();
1218 * Replay any accumulated deferred IPIs. If "Instant Replay"
1219 * is in use, there should never be any.
1220 */
1221#ifndef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY
1222 {
1223 unsigned long flags;
1224
1225 local_irq_save(flags);
1226 __smtc_ipi_replay();
1227 local_irq_restore(flags);
1228 }
1229#endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */
1230} 1248}
1231 1249
1232void smtc_soft_dump(void) 1250void smtc_soft_dump(void)
@@ -1242,10 +1260,6 @@ void smtc_soft_dump(void)
1242 printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis); 1260 printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis);
1243 } 1261 }
1244 smtc_ipi_qdump(); 1262 smtc_ipi_qdump();
1245 printk("Timer IPI Backlogs:\n");
1246 for (i=0; i < NR_CPUS; i++) {
1247 printk("%d: %d\n", i, atomic_read(&ipi_timer_latch[i]));
1248 }
1249 printk("%d Recoveries of \"stolen\" FPU\n", 1263 printk("%d Recoveries of \"stolen\" FPU\n",
1250 atomic_read(&smtc_fpu_recoveries)); 1264 atomic_read(&smtc_fpu_recoveries));
1251} 1265}
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 426cced1e9d..b602ac6eb47 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -46,6 +46,9 @@
46#include <asm/types.h> 46#include <asm/types.h>
47#include <asm/stacktrace.h> 47#include <asm/stacktrace.h>
48 48
49extern void check_wait(void);
50extern asmlinkage void r4k_wait(void);
51extern asmlinkage void rollback_handle_int(void);
49extern asmlinkage void handle_int(void); 52extern asmlinkage void handle_int(void);
50extern asmlinkage void handle_tlbm(void); 53extern asmlinkage void handle_tlbm(void);
51extern asmlinkage void handle_tlbl(void); 54extern asmlinkage void handle_tlbl(void);
@@ -373,8 +376,8 @@ void __noreturn die(const char * str, const struct pt_regs * regs)
373 do_exit(SIGSEGV); 376 do_exit(SIGSEGV);
374} 377}
375 378
376extern const struct exception_table_entry __start___dbe_table[]; 379extern struct exception_table_entry __start___dbe_table[];
377extern const struct exception_table_entry __stop___dbe_table[]; 380extern struct exception_table_entry __stop___dbe_table[];
378 381
379__asm__( 382__asm__(
380" .section __dbe_table, \"a\"\n" 383" .section __dbe_table, \"a\"\n"
@@ -822,8 +825,10 @@ static void mt_ase_fp_affinity(void)
822 if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) { 825 if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) {
823 cpumask_t tmask; 826 cpumask_t tmask;
824 827
825 cpus_and(tmask, current->thread.user_cpus_allowed, 828 current->thread.user_cpus_allowed
826 mt_fpu_cpumask); 829 = current->cpus_allowed;
830 cpus_and(tmask, current->cpus_allowed,
831 mt_fpu_cpumask);
827 set_cpus_allowed(current, tmask); 832 set_cpus_allowed(current, tmask);
828 set_thread_flag(TIF_FPUBOUND); 833 set_thread_flag(TIF_FPUBOUND);
829 } 834 }
@@ -1200,7 +1205,7 @@ void *set_except_vector(int n, void *addr)
1200 if (n == 0 && cpu_has_divec) { 1205 if (n == 0 && cpu_has_divec) {
1201 *(u32 *)(ebase + 0x200) = 0x08000000 | 1206 *(u32 *)(ebase + 0x200) = 0x08000000 |
1202 (0x03ffffff & (handler >> 2)); 1207 (0x03ffffff & (handler >> 2));
1203 flush_icache_range(ebase + 0x200, ebase + 0x204); 1208 local_flush_icache_range(ebase + 0x200, ebase + 0x204);
1204 } 1209 }
1205 return (void *)old_handler; 1210 return (void *)old_handler;
1206} 1211}
@@ -1251,6 +1256,9 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1251 1256
1252 extern char except_vec_vi, except_vec_vi_lui; 1257 extern char except_vec_vi, except_vec_vi_lui;
1253 extern char except_vec_vi_ori, except_vec_vi_end; 1258 extern char except_vec_vi_ori, except_vec_vi_end;
1259 extern char rollback_except_vec_vi;
1260 char *vec_start = (cpu_wait == r4k_wait) ?
1261 &rollback_except_vec_vi : &except_vec_vi;
1254#ifdef CONFIG_MIPS_MT_SMTC 1262#ifdef CONFIG_MIPS_MT_SMTC
1255 /* 1263 /*
1256 * We need to provide the SMTC vectored interrupt handler 1264 * We need to provide the SMTC vectored interrupt handler
@@ -1258,11 +1266,11 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1258 * Status.IM bit to be masked before going there. 1266 * Status.IM bit to be masked before going there.
1259 */ 1267 */
1260 extern char except_vec_vi_mori; 1268 extern char except_vec_vi_mori;
1261 const int mori_offset = &except_vec_vi_mori - &except_vec_vi; 1269 const int mori_offset = &except_vec_vi_mori - vec_start;
1262#endif /* CONFIG_MIPS_MT_SMTC */ 1270#endif /* CONFIG_MIPS_MT_SMTC */
1263 const int handler_len = &except_vec_vi_end - &except_vec_vi; 1271 const int handler_len = &except_vec_vi_end - vec_start;
1264 const int lui_offset = &except_vec_vi_lui - &except_vec_vi; 1272 const int lui_offset = &except_vec_vi_lui - vec_start;
1265 const int ori_offset = &except_vec_vi_ori - &except_vec_vi; 1273 const int ori_offset = &except_vec_vi_ori - vec_start;
1266 1274
1267 if (handler_len > VECTORSPACING) { 1275 if (handler_len > VECTORSPACING) {
1268 /* 1276 /*
@@ -1272,7 +1280,7 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1272 panic("VECTORSPACING too small"); 1280 panic("VECTORSPACING too small");
1273 } 1281 }
1274 1282
1275 memcpy(b, &except_vec_vi, handler_len); 1283 memcpy(b, vec_start, handler_len);
1276#ifdef CONFIG_MIPS_MT_SMTC 1284#ifdef CONFIG_MIPS_MT_SMTC
1277 BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */ 1285 BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */
1278 1286
@@ -1283,7 +1291,8 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1283 *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff); 1291 *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff);
1284 w = (u32 *)(b + ori_offset); 1292 w = (u32 *)(b + ori_offset);
1285 *w = (*w & 0xffff0000) | ((u32)handler & 0xffff); 1293 *w = (*w & 0xffff0000) | ((u32)handler & 0xffff);
1286 flush_icache_range((unsigned long)b, (unsigned long)(b+handler_len)); 1294 local_flush_icache_range((unsigned long)b,
1295 (unsigned long)(b+handler_len));
1287 } 1296 }
1288 else { 1297 else {
1289 /* 1298 /*
@@ -1295,7 +1304,8 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1295 w = (u32 *)b; 1304 w = (u32 *)b;
1296 *w++ = 0x08000000 | (((u32)handler >> 2) & 0x03fffff); /* j handler */ 1305 *w++ = 0x08000000 | (((u32)handler >> 2) & 0x03fffff); /* j handler */
1297 *w = 0; 1306 *w = 0;
1298 flush_icache_range((unsigned long)b, (unsigned long)(b+8)); 1307 local_flush_icache_range((unsigned long)b,
1308 (unsigned long)(b+8));
1299 } 1309 }
1300 1310
1301 return (void *)old_handler; 1311 return (void *)old_handler;
@@ -1515,7 +1525,7 @@ void __cpuinit per_cpu_trap_init(void)
1515void __init set_handler(unsigned long offset, void *addr, unsigned long size) 1525void __init set_handler(unsigned long offset, void *addr, unsigned long size)
1516{ 1526{
1517 memcpy((void *)(ebase + offset), addr, size); 1527 memcpy((void *)(ebase + offset), addr, size);
1518 flush_icache_range(ebase + offset, ebase + offset + size); 1528 local_flush_icache_range(ebase + offset, ebase + offset + size);
1519} 1529}
1520 1530
1521static char panic_null_cerr[] __cpuinitdata = 1531static char panic_null_cerr[] __cpuinitdata =
@@ -1552,6 +1562,10 @@ void __init trap_init(void)
1552 extern char except_vec3_generic, except_vec3_r4000; 1562 extern char except_vec3_generic, except_vec3_r4000;
1553 extern char except_vec4; 1563 extern char except_vec4;
1554 unsigned long i; 1564 unsigned long i;
1565 int rollback;
1566
1567 check_wait();
1568 rollback = (cpu_wait == r4k_wait);
1555 1569
1556#if defined(CONFIG_KGDB) 1570#if defined(CONFIG_KGDB)
1557 if (kgdb_early_setup) 1571 if (kgdb_early_setup)
@@ -1616,7 +1630,7 @@ void __init trap_init(void)
1616 if (board_be_init) 1630 if (board_be_init)
1617 board_be_init(); 1631 board_be_init();
1618 1632
1619 set_except_vector(0, handle_int); 1633 set_except_vector(0, rollback ? rollback_handle_int : handle_int);
1620 set_except_vector(1, handle_tlbm); 1634 set_except_vector(1, handle_tlbm);
1621 set_except_vector(2, handle_tlbl); 1635 set_except_vector(2, handle_tlbl);
1622 set_except_vector(3, handle_tlbs); 1636 set_except_vector(3, handle_tlbs);
@@ -1680,6 +1694,8 @@ void __init trap_init(void)
1680 signal32_init(); 1694 signal32_init();
1681#endif 1695#endif
1682 1696
1683 flush_icache_range(ebase, ebase + 0x400); 1697 local_flush_icache_range(ebase, ebase + 0x400);
1684 flush_tlb_handlers(); 1698 flush_tlb_handlers();
1699
1700 sort_extable(__start___dbe_table, __stop___dbe_table);
1685} 1701}
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index b5470ceb418..afb119f3568 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -36,6 +36,7 @@ SECTIONS
36 SCHED_TEXT 36 SCHED_TEXT
37 LOCK_TEXT 37 LOCK_TEXT
38 KPROBES_TEXT 38 KPROBES_TEXT
39 *(.text.*)
39 *(.fixup) 40 *(.fixup)
40 *(.gnu.warning) 41 *(.gnu.warning)
41 } :text = 0 42 } :text = 0
diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S
index 8d7784122c1..edac9892c51 100644
--- a/arch/mips/lib/csum_partial.S
+++ b/arch/mips/lib/csum_partial.S
@@ -39,12 +39,14 @@
39#ifdef USE_DOUBLE 39#ifdef USE_DOUBLE
40 40
41#define LOAD ld 41#define LOAD ld
42#define LOAD32 lwu
42#define ADD daddu 43#define ADD daddu
43#define NBYTES 8 44#define NBYTES 8
44 45
45#else 46#else
46 47
47#define LOAD lw 48#define LOAD lw
49#define LOAD32 lw
48#define ADD addu 50#define ADD addu
49#define NBYTES 4 51#define NBYTES 4
50 52
@@ -60,6 +62,14 @@
60 ADD sum, v1; \ 62 ADD sum, v1; \
61 .set pop 63 .set pop
62 64
65#define ADDC32(sum,reg) \
66 .set push; \
67 .set noat; \
68 addu sum, reg; \
69 sltu v1, sum, reg; \
70 addu sum, v1; \
71 .set pop
72
63#define CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3) \ 73#define CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3) \
64 LOAD _t0, (offset + UNIT(0))(src); \ 74 LOAD _t0, (offset + UNIT(0))(src); \
65 LOAD _t1, (offset + UNIT(1))(src); \ 75 LOAD _t1, (offset + UNIT(1))(src); \
@@ -132,7 +142,7 @@ LEAF(csum_partial)
132 beqz t8, .Lqword_align 142 beqz t8, .Lqword_align
133 andi t8, src, 0x8 143 andi t8, src, 0x8
134 144
135 lw t0, 0x00(src) 145 LOAD32 t0, 0x00(src)
136 LONG_SUBU a1, a1, 0x4 146 LONG_SUBU a1, a1, 0x4
137 ADDC(sum, t0) 147 ADDC(sum, t0)
138 PTR_ADDU src, src, 0x4 148 PTR_ADDU src, src, 0x4
@@ -211,7 +221,7 @@ LEAF(csum_partial)
211 LONG_SRL t8, t8, 0x2 221 LONG_SRL t8, t8, 0x2
212 222
213.Lend_words: 223.Lend_words:
214 lw t0, (src) 224 LOAD32 t0, (src)
215 LONG_SUBU t8, t8, 0x1 225 LONG_SUBU t8, t8, 0x1
216 ADDC(sum, t0) 226 ADDC(sum, t0)
217 .set reorder /* DADDI_WAR */ 227 .set reorder /* DADDI_WAR */
@@ -230,6 +240,9 @@ LEAF(csum_partial)
230 /* Still a full word to go */ 240 /* Still a full word to go */
231 ulw t1, (src) 241 ulw t1, (src)
232 PTR_ADDIU src, 4 242 PTR_ADDIU src, 4
243#ifdef USE_DOUBLE
244 dsll t1, t1, 32 /* clear lower 32bit */
245#endif
233 ADDC(sum, t1) 246 ADDC(sum, t1)
234 247
2351: move t1, zero 2481: move t1, zero
@@ -280,7 +293,7 @@ LEAF(csum_partial)
2801: 2931:
281 .set reorder 294 .set reorder
282 /* Add the passed partial csum. */ 295 /* Add the passed partial csum. */
283 ADDC(sum, a2) 296 ADDC32(sum, a2)
284 jr ra 297 jr ra
285 .set noreorder 298 .set noreorder
286 END(csum_partial) 299 END(csum_partial)
@@ -681,7 +694,7 @@ EXC( sb t0, NBYTES-2(dst), .Ls_exc)
681 .set pop 694 .set pop
6821: 6951:
683 .set reorder 696 .set reorder
684 ADDC(sum, psum) 697 ADDC32(sum, psum)
685 jr ra 698 jr ra
686 .set noreorder 699 .set noreorder
687 700
diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c
index 27a5b466c85..5500c20c79a 100644
--- a/arch/mips/mm/c-r3k.c
+++ b/arch/mips/mm/c-r3k.c
@@ -320,6 +320,7 @@ void __cpuinit r3k_cache_init(void)
320 flush_cache_range = r3k_flush_cache_range; 320 flush_cache_range = r3k_flush_cache_range;
321 flush_cache_page = r3k_flush_cache_page; 321 flush_cache_page = r3k_flush_cache_page;
322 flush_icache_range = r3k_flush_icache_range; 322 flush_icache_range = r3k_flush_icache_range;
323 local_flush_icache_range = r3k_flush_icache_range;
323 324
324 flush_cache_sigtramp = r3k_flush_cache_sigtramp; 325 flush_cache_sigtramp = r3k_flush_cache_sigtramp;
325 local_flush_data_cache_page = local_r3k_flush_data_cache_page; 326 local_flush_data_cache_page = local_r3k_flush_data_cache_page;
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 71df3390c07..6e99665ae86 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -543,12 +543,8 @@ struct flush_icache_range_args {
543 unsigned long end; 543 unsigned long end;
544}; 544};
545 545
546static inline void local_r4k_flush_icache_range(void *args) 546static inline void local_r4k_flush_icache_range(unsigned long start, unsigned long end)
547{ 547{
548 struct flush_icache_range_args *fir_args = args;
549 unsigned long start = fir_args->start;
550 unsigned long end = fir_args->end;
551
552 if (!cpu_has_ic_fills_f_dc) { 548 if (!cpu_has_ic_fills_f_dc) {
553 if (end - start >= dcache_size) { 549 if (end - start >= dcache_size) {
554 r4k_blast_dcache(); 550 r4k_blast_dcache();
@@ -564,6 +560,15 @@ static inline void local_r4k_flush_icache_range(void *args)
564 protected_blast_icache_range(start, end); 560 protected_blast_icache_range(start, end);
565} 561}
566 562
563static inline void local_r4k_flush_icache_range_ipi(void *args)
564{
565 struct flush_icache_range_args *fir_args = args;
566 unsigned long start = fir_args->start;
567 unsigned long end = fir_args->end;
568
569 local_r4k_flush_icache_range(start, end);
570}
571
567static void r4k_flush_icache_range(unsigned long start, unsigned long end) 572static void r4k_flush_icache_range(unsigned long start, unsigned long end)
568{ 573{
569 struct flush_icache_range_args args; 574 struct flush_icache_range_args args;
@@ -571,7 +576,7 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end)
571 args.start = start; 576 args.start = start;
572 args.end = end; 577 args.end = end;
573 578
574 r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1); 579 r4k_on_each_cpu(local_r4k_flush_icache_range_ipi, &args, 1);
575 instruction_hazard(); 580 instruction_hazard();
576} 581}
577 582
@@ -1375,6 +1380,7 @@ void __cpuinit r4k_cache_init(void)
1375 local_flush_data_cache_page = local_r4k_flush_data_cache_page; 1380 local_flush_data_cache_page = local_r4k_flush_data_cache_page;
1376 flush_data_cache_page = r4k_flush_data_cache_page; 1381 flush_data_cache_page = r4k_flush_data_cache_page;
1377 flush_icache_range = r4k_flush_icache_range; 1382 flush_icache_range = r4k_flush_icache_range;
1383 local_flush_icache_range = local_r4k_flush_icache_range;
1378 1384
1379#if defined(CONFIG_DMA_NONCOHERENT) 1385#if defined(CONFIG_DMA_NONCOHERENT)
1380 if (coherentio) { 1386 if (coherentio) {
diff --git a/arch/mips/mm/c-tx39.c b/arch/mips/mm/c-tx39.c
index a9f7f1f5e9b..f7c8f9ce39c 100644
--- a/arch/mips/mm/c-tx39.c
+++ b/arch/mips/mm/c-tx39.c
@@ -362,6 +362,7 @@ void __cpuinit tx39_cache_init(void)
362 flush_cache_range = (void *) tx39h_flush_icache_all; 362 flush_cache_range = (void *) tx39h_flush_icache_all;
363 flush_cache_page = (void *) tx39h_flush_icache_all; 363 flush_cache_page = (void *) tx39h_flush_icache_all;
364 flush_icache_range = (void *) tx39h_flush_icache_all; 364 flush_icache_range = (void *) tx39h_flush_icache_all;
365 local_flush_icache_range = (void *) tx39h_flush_icache_all;
365 366
366 flush_cache_sigtramp = (void *) tx39h_flush_icache_all; 367 flush_cache_sigtramp = (void *) tx39h_flush_icache_all;
367 local_flush_data_cache_page = (void *) tx39h_flush_icache_all; 368 local_flush_data_cache_page = (void *) tx39h_flush_icache_all;
@@ -390,6 +391,7 @@ void __cpuinit tx39_cache_init(void)
390 flush_cache_range = tx39_flush_cache_range; 391 flush_cache_range = tx39_flush_cache_range;
391 flush_cache_page = tx39_flush_cache_page; 392 flush_cache_page = tx39_flush_cache_page;
392 flush_icache_range = tx39_flush_icache_range; 393 flush_icache_range = tx39_flush_icache_range;
394 local_flush_icache_range = tx39_flush_icache_range;
393 395
394 flush_cache_sigtramp = tx39_flush_cache_sigtramp; 396 flush_cache_sigtramp = tx39_flush_cache_sigtramp;
395 local_flush_data_cache_page = local_tx39_flush_data_cache_page; 397 local_flush_data_cache_page = local_tx39_flush_data_cache_page;
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 034e8506f6e..1eb7c71e3d6 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -29,6 +29,7 @@ void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start,
29void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, 29void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page,
30 unsigned long pfn); 30 unsigned long pfn);
31void (*flush_icache_range)(unsigned long start, unsigned long end); 31void (*flush_icache_range)(unsigned long start, unsigned long end);
32void (*local_flush_icache_range)(unsigned long start, unsigned long end);
32 33
33void (*__flush_cache_vmap)(void); 34void (*__flush_cache_vmap)(void);
34void (*__flush_cache_vunmap)(void); 35void (*__flush_cache_vunmap)(void);
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 76da73a5ab3..979cf919728 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -1273,10 +1273,10 @@ void __cpuinit build_tlb_refill_handler(void)
1273 1273
1274void __cpuinit flush_tlb_handlers(void) 1274void __cpuinit flush_tlb_handlers(void)
1275{ 1275{
1276 flush_icache_range((unsigned long)handle_tlbl, 1276 local_flush_icache_range((unsigned long)handle_tlbl,
1277 (unsigned long)handle_tlbl + sizeof(handle_tlbl)); 1277 (unsigned long)handle_tlbl + sizeof(handle_tlbl));
1278 flush_icache_range((unsigned long)handle_tlbs, 1278 local_flush_icache_range((unsigned long)handle_tlbs,
1279 (unsigned long)handle_tlbs + sizeof(handle_tlbs)); 1279 (unsigned long)handle_tlbs + sizeof(handle_tlbs));
1280 flush_icache_range((unsigned long)handle_tlbm, 1280 local_flush_icache_range((unsigned long)handle_tlbm,
1281 (unsigned long)handle_tlbm + sizeof(handle_tlbm)); 1281 (unsigned long)handle_tlbm + sizeof(handle_tlbm));
1282} 1282}
diff --git a/arch/mips/mti-malta/Makefile b/arch/mips/mti-malta/Makefile
index 3b7dd722c32..cef2db8d222 100644
--- a/arch/mips/mti-malta/Makefile
+++ b/arch/mips/mti-malta/Makefile
@@ -15,6 +15,6 @@ obj-$(CONFIG_EARLY_PRINTK) += malta-console.o
15obj-$(CONFIG_PCI) += malta-pci.o 15obj-$(CONFIG_PCI) += malta-pci.o
16 16
17# FIXME FIXME FIXME 17# FIXME FIXME FIXME
18obj-$(CONFIG_MIPS_MT_SMTC) += malta_smtc.o 18obj-$(CONFIG_MIPS_MT_SMTC) += malta-smtc.o
19 19
20EXTRA_CFLAGS += -Werror 20EXTRA_CFLAGS += -Werror
diff --git a/arch/mips/mti-malta/malta-smtc.c b/arch/mips/mti-malta/malta-smtc.c
index 5ea705e4945..f84a46a8ae6 100644
--- a/arch/mips/mti-malta/malta-smtc.c
+++ b/arch/mips/mti-malta/malta-smtc.c
@@ -84,12 +84,17 @@ static void msmtc_cpus_done(void)
84 84
85static void __init msmtc_smp_setup(void) 85static void __init msmtc_smp_setup(void)
86{ 86{
87 mipsmt_build_cpu_map(0); 87 /*
88 * we won't get the definitive value until
89 * we've run smtc_prepare_cpus later, but
90 * we would appear to need an upper bound now.
91 */
92 smp_num_siblings = smtc_build_cpu_map(0);
88} 93}
89 94
90static void __init msmtc_prepare_cpus(unsigned int max_cpus) 95static void __init msmtc_prepare_cpus(unsigned int max_cpus)
91{ 96{
92 mipsmt_prepare_cpus(); 97 smtc_prepare_cpus(max_cpus);
93} 98}
94 99
95struct plat_smp_ops msmtc_smp_ops = { 100struct plat_smp_ops msmtc_smp_ops = {
diff --git a/arch/mips/pci/Makefile b/arch/mips/pci/Makefile
index 15e01aec37f..c8c32f417b6 100644
--- a/arch/mips/pci/Makefile
+++ b/arch/mips/pci/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_SOC_TX3927) += ops-tx3927.o
15obj-$(CONFIG_PCI_VR41XX) += ops-vr41xx.o pci-vr41xx.o 15obj-$(CONFIG_PCI_VR41XX) += ops-vr41xx.o pci-vr41xx.o
16obj-$(CONFIG_MARKEINS) += ops-emma2rh.o pci-emma2rh.o fixup-emma2rh.o 16obj-$(CONFIG_MARKEINS) += ops-emma2rh.o pci-emma2rh.o fixup-emma2rh.o
17obj-$(CONFIG_PCI_TX4927) += ops-tx4927.o 17obj-$(CONFIG_PCI_TX4927) += ops-tx4927.o
18obj-$(CONFIG_BCM47XX) += pci-bcm47xx.o
18 19
19# 20#
20# These are still pretty much in the old state, watch, go blind. 21# These are still pretty much in the old state, watch, go blind.
diff --git a/arch/mips/pci/pci-bcm47xx.c b/arch/mips/pci/pci-bcm47xx.c
new file mode 100644
index 00000000000..bea9b6cdfdb
--- /dev/null
+++ b/arch/mips/pci/pci-bcm47xx.c
@@ -0,0 +1,60 @@
1/*
2 * Copyright (C) 2008 Aurelien Jarno <aurelien@aurel32.net>
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
10 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
12 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
13 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
14 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
15 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
16 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
17 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
18 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 *
20 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, write to the Free Software Foundation, Inc.,
22 * 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25#include <linux/types.h>
26#include <linux/pci.h>
27#include <linux/ssb/ssb.h>
28
29int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
30{
31 return 0;
32}
33
34int pcibios_plat_dev_init(struct pci_dev *dev)
35{
36 int res;
37 u8 slot, pin;
38
39 res = ssb_pcibios_plat_dev_init(dev);
40 if (res < 0) {
41 printk(KERN_ALERT "PCI: Failed to init device %s\n",
42 pci_name(dev));
43 return res;
44 }
45
46 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
47 slot = PCI_SLOT(dev->devfn);
48 res = ssb_pcibios_map_irq(dev, slot, pin);
49
50 /* IRQ-0 and IRQ-1 are software interrupts. */
51 if (res < 2) {
52 printk(KERN_ALERT "PCI: Failed to map IRQ of device %s\n",
53 pci_name(dev));
54 return res;
55 }
56
57 dev->irq = res;
58 return 0;
59}
60
diff --git a/arch/mips/pci/pci-ip27.c b/arch/mips/pci/pci-ip27.c
index ce92f82b16d..f97ab146101 100644
--- a/arch/mips/pci/pci-ip27.c
+++ b/arch/mips/pci/pci-ip27.c
@@ -143,25 +143,47 @@ int __cpuinit bridge_probe(nasid_t nasid, int widget_id, int masterwid)
143 */ 143 */
144int __devinit pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 144int __devinit pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
145{ 145{
146 return 0;
147}
148
149/* Most MIPS systems have straight-forward swizzling needs. */
150static inline u8 bridge_swizzle(u8 pin, u8 slot)
151{
152 return (((pin - 1) + slot) % 4) + 1;
153}
154
155static inline struct pci_dev *bridge_root_dev(struct pci_dev *dev)
156{
157 while (dev->bus->parent) {
158 /* Move up the chain of bridges. */
159 dev = dev->bus->self;
160 }
161
162 return dev;
163}
164
165/* Do platform specific device initialization at pci_enable_device() time */
166int pcibios_plat_dev_init(struct pci_dev *dev)
167{
146 struct bridge_controller *bc = BRIDGE_CONTROLLER(dev->bus); 168 struct bridge_controller *bc = BRIDGE_CONTROLLER(dev->bus);
147 int irq = bc->pci_int[slot]; 169 struct pci_dev *rdev = bridge_root_dev(dev);
170 int slot = PCI_SLOT(rdev->devfn);
171 int irq;
148 172
173 irq = bc->pci_int[slot];
149 if (irq == -1) { 174 if (irq == -1) {
150 irq = bc->pci_int[slot] = request_bridge_irq(bc); 175 irq = request_bridge_irq(bc);
151 if (irq < 0) 176 if (irq < 0)
152 panic("Can't allocate interrupt for PCI device %s\n", 177 return irq;
153 pci_name(dev)); 178
179 bc->pci_int[slot] = irq;
154 } 180 }
155 181
156 irq_to_bridge[irq] = bc; 182 irq_to_bridge[irq] = bc;
157 irq_to_slot[irq] = slot; 183 irq_to_slot[irq] = slot;
158 184
159 return irq; 185 dev->irq = irq;
160}
161 186
162/* Do platform specific device initialization at pci_enable_device() time */
163int pcibios_plat_dev_init(struct pci_dev *dev)
164{
165 return 0; 187 return 0;
166} 188}
167 189
@@ -205,6 +227,7 @@ int pcibus_to_node(struct pci_bus *bus)
205 227
206 return bc->nasid; 228 return bc->nasid;
207} 229}
230EXPORT_SYMBOL(pcibus_to_node);
208 231
209DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3, 232DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3,
210 pci_fixup_ioc3); 233 pci_fixup_ioc3);
diff --git a/arch/mips/rb532/devices.c b/arch/mips/rb532/devices.c
index 44fb0a62877..82ab395efa3 100644
--- a/arch/mips/rb532/devices.c
+++ b/arch/mips/rb532/devices.c
@@ -49,8 +49,6 @@
49#define GPIO_ALE (1 << 0x0a) 49#define GPIO_ALE (1 << 0x0a)
50#define GPIO_CLE (1 << 0x0b) 50#define GPIO_CLE (1 << 0x0b)
51 51
52extern char *board_type;
53
54static struct resource korina_dev0_res[] = { 52static struct resource korina_dev0_res[] = {
55 { 53 {
56 .name = "korina_regs", 54 .name = "korina_regs",
@@ -265,14 +263,6 @@ static void __init parse_mac_addr(char *macstr)
265} 263}
266 264
267 265
268/* DEVICE CONTROLLER 1 */
269#define CFG_DC_DEV1 ((void *)0xb8010010)
270#define CFG_DC_DEV2 ((void *)0xb8010020)
271#define CFG_DC_DEVBASE 0x0
272#define CFG_DC_DEVMASK 0x4
273#define CFG_DC_DEVC 0x8
274#define CFG_DC_DEVTC 0xC
275
276/* NAND definitions */ 266/* NAND definitions */
277#define NAND_CHIP_DELAY 25 267#define NAND_CHIP_DELAY 25
278 268
@@ -301,16 +291,16 @@ static void __init rb532_nand_setup(void)
301static int __init plat_setup_devices(void) 291static int __init plat_setup_devices(void)
302{ 292{
303 /* Look for the CF card reader */ 293 /* Look for the CF card reader */
304 if (!readl(CFG_DC_DEV1 + CFG_DC_DEVMASK)) 294 if (!readl(IDT434_REG_BASE + DEV1MASK))
305 rb532_devs[1] = NULL; 295 rb532_devs[1] = NULL;
306 else { 296 else {
307 cf_slot0_res[0].start = 297 cf_slot0_res[0].start =
308 readl(CFG_DC_DEV1 + CFG_DC_DEVBASE); 298 readl(IDT434_REG_BASE + DEV1BASE);
309 cf_slot0_res[0].end = cf_slot0_res[0].start + 0x1000; 299 cf_slot0_res[0].end = cf_slot0_res[0].start + 0x1000;
310 } 300 }
311 301
312 /* Read the NAND resources from the device controller */ 302 /* Read the NAND resources from the device controller */
313 nand_slot0_res[0].start = readl(CFG_DC_DEV2 + CFG_DC_DEVBASE); 303 nand_slot0_res[0].start = readl(IDT434_REG_BASE + DEV2BASE);
314 nand_slot0_res[0].end = nand_slot0_res[0].start + 0x1000; 304 nand_slot0_res[0].end = nand_slot0_res[0].start + 0x1000;
315 305
316 /* Initialise the NAND device */ 306 /* Initialise the NAND device */
diff --git a/arch/mips/sgi-ip22/ip22-platform.c b/arch/mips/sgi-ip22/ip22-platform.c
index 60141235ec4..52486c4d2b0 100644
--- a/arch/mips/sgi-ip22/ip22-platform.c
+++ b/arch/mips/sgi-ip22/ip22-platform.c
@@ -150,7 +150,7 @@ static int __init sgiseeq_devinit(void)
150 return res; 150 return res;
151 151
152 /* Second HPC is missing? */ 152 /* Second HPC is missing? */
153 if (!ip22_is_fullhouse() || 153 if (ip22_is_fullhouse() ||
154 get_dbe(tmp, (unsigned int *)&hpc3c1->pbdma[1])) 154 get_dbe(tmp, (unsigned int *)&hpc3c1->pbdma[1]))
155 return 0; 155 return 0;
156 156
diff --git a/arch/mips/txx9/generic/setup.c b/arch/mips/txx9/generic/setup.c
index 1bc57d0f4c5..fe6bee09cec 100644
--- a/arch/mips/txx9/generic/setup.c
+++ b/arch/mips/txx9/generic/setup.c
@@ -53,6 +53,7 @@ txx9_reg_res_init(unsigned int pcode, unsigned long base, unsigned long size)
53 txx9_ce_res[i].name = txx9_ce_res_name[i]; 53 txx9_ce_res[i].name = txx9_ce_res_name[i];
54 } 54 }
55 55
56 txx9_pcode = pcode;
56 sprintf(txx9_pcode_str, "TX%x", pcode); 57 sprintf(txx9_pcode_str, "TX%x", pcode);
57 if (base) { 58 if (base) {
58 txx9_reg_res.start = base & 0xfffffffffULL; 59 txx9_reg_res.start = base & 0xfffffffffULL;
@@ -328,6 +329,9 @@ void __init arch_init_irq(void)
328 329
329void __init plat_time_init(void) 330void __init plat_time_init(void)
330{ 331{
332#ifdef CONFIG_CPU_TX49XX
333 mips_hpt_frequency = txx9_cpu_clock / 2;
334#endif
331 txx9_board_vec->time_init(); 335 txx9_board_vec->time_init();
332} 336}
333 337
diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c
index cba36a247e3..92dd1a0ca35 100644
--- a/arch/mips/vr41xx/common/irq.c
+++ b/arch/mips/vr41xx/common/irq.c
@@ -72,6 +72,7 @@ static void irq_dispatch(unsigned int irq)
72 cascade = irq_cascade + irq; 72 cascade = irq_cascade + irq;
73 if (cascade->get_irq != NULL) { 73 if (cascade->get_irq != NULL) {
74 unsigned int source_irq = irq; 74 unsigned int source_irq = irq;
75 int ret;
75 desc = irq_desc + source_irq; 76 desc = irq_desc + source_irq;
76 if (desc->chip->mask_ack) 77 if (desc->chip->mask_ack)
77 desc->chip->mask_ack(source_irq); 78 desc->chip->mask_ack(source_irq);
@@ -79,8 +80,9 @@ static void irq_dispatch(unsigned int irq)
79 desc->chip->mask(source_irq); 80 desc->chip->mask(source_irq);
80 desc->chip->ack(source_irq); 81 desc->chip->ack(source_irq);
81 } 82 }
82 irq = cascade->get_irq(irq); 83 ret = cascade->get_irq(irq);
83 if (irq < 0) 84 irq = ret;
85 if (ret < 0)
84 atomic_inc(&irq_err_count); 86 atomic_inc(&irq_err_count);
85 else 87 else
86 irq_dispatch(irq); 88 irq_dispatch(irq);