aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/Makefile8
-rw-r--r--arch/mips/kernel/cevt-bcm1480.c1
-rw-r--r--arch/mips/kernel/cevt-ds1287.c1
-rw-r--r--arch/mips/kernel/cevt-gt641xx.c1
-rw-r--r--arch/mips/kernel/cevt-r4k.c3
-rw-r--r--arch/mips/kernel/cevt-sb1250.c1
-rw-r--r--arch/mips/kernel/cevt-smtc.c1
-rw-r--r--arch/mips/kernel/cevt-txx9.c4
-rw-r--r--arch/mips/kernel/cpu-bugs64.c2
-rw-r--r--arch/mips/kernel/cpu-probe.c162
-rw-r--r--arch/mips/kernel/csrc-bcm1480.c3
-rw-r--r--arch/mips/kernel/csrc-ioasic.c4
-rw-r--r--arch/mips/kernel/csrc-powertv.c35
-rw-r--r--arch/mips/kernel/csrc-r4k.c4
-rw-r--r--arch/mips/kernel/csrc-sb1250.c3
-rw-r--r--arch/mips/kernel/entry.S7
-rw-r--r--arch/mips/kernel/ftrace.c184
-rw-r--r--arch/mips/kernel/i8253.c79
-rw-r--r--arch/mips/kernel/i8259.c66
-rw-r--r--arch/mips/kernel/irq-gic.c47
-rw-r--r--arch/mips/kernel/irq-gt641xx.c30
-rw-r--r--arch/mips/kernel/irq-msc01.c63
-rw-r--r--arch/mips/kernel/irq-rm7000.c21
-rw-r--r--arch/mips/kernel/irq-rm9000.c54
-rw-r--r--arch/mips/kernel/irq.c75
-rw-r--r--arch/mips/kernel/irq_cpu.c51
-rw-r--r--arch/mips/kernel/irq_txx9.c33
-rw-r--r--arch/mips/kernel/jump_label.c54
-rw-r--r--arch/mips/kernel/linux32.c13
-rw-r--r--arch/mips/kernel/mips-mt-fpaff.c2
-rw-r--r--arch/mips/kernel/mips_machine.c86
-rw-r--r--arch/mips/kernel/module.c19
-rw-r--r--arch/mips/kernel/octeon_switch.S2
-rw-r--r--arch/mips/kernel/perf_event.c588
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c1054
-rw-r--r--arch/mips/kernel/proc.c9
-rw-r--r--arch/mips/kernel/process.c3
-rw-r--r--arch/mips/kernel/prom.c111
-rw-r--r--arch/mips/kernel/ptrace.c68
-rw-r--r--arch/mips/kernel/r2300_fpu.S2
-rw-r--r--arch/mips/kernel/r2300_switch.S2
-rw-r--r--arch/mips/kernel/r4k_fpu.S2
-rw-r--r--arch/mips/kernel/r4k_switch.S2
-rw-r--r--arch/mips/kernel/r6000_fpu.S2
-rw-r--r--arch/mips/kernel/rtlx.c3
-rw-r--r--arch/mips/kernel/scall32-o32.S10
-rw-r--r--arch/mips/kernel/scall64-64.S10
-rw-r--r--arch/mips/kernel/scall64-n32.S10
-rw-r--r--arch/mips/kernel/scall64-o32.S10
-rw-r--r--arch/mips/kernel/setup.c5
-rw-r--r--arch/mips/kernel/signal.c2
-rw-r--r--arch/mips/kernel/signal32.c2
-rw-r--r--arch/mips/kernel/smp-mt.c4
-rw-r--r--arch/mips/kernel/smp.c31
-rw-r--r--arch/mips/kernel/smtc.c21
-rw-r--r--arch/mips/kernel/syscall.c125
-rw-r--r--arch/mips/kernel/time.c2
-rw-r--r--arch/mips/kernel/traps.c85
-rw-r--r--arch/mips/kernel/unaligned.c7
-rw-r--r--arch/mips/kernel/vmlinux.lds.S12
-rw-r--r--arch/mips/kernel/vpe.c23
61 files changed, 2602 insertions, 722 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 06f848299785..83bba332bbfc 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -52,6 +52,7 @@ obj-$(CONFIG_CPU_TX39XX) += r2300_fpu.o r2300_switch.o
52obj-$(CONFIG_CPU_TX49XX) += r4k_fpu.o r4k_switch.o 52obj-$(CONFIG_CPU_TX49XX) += r4k_fpu.o r4k_switch.o
53obj-$(CONFIG_CPU_VR41XX) += r4k_fpu.o r4k_switch.o 53obj-$(CONFIG_CPU_VR41XX) += r4k_fpu.o r4k_switch.o
54obj-$(CONFIG_CPU_CAVIUM_OCTEON) += octeon_switch.o 54obj-$(CONFIG_CPU_CAVIUM_OCTEON) += octeon_switch.o
55obj-$(CONFIG_CPU_XLR) += r4k_fpu.o r4k_switch.o
55 56
56obj-$(CONFIG_SMP) += smp.o 57obj-$(CONFIG_SMP) += smp.o
57obj-$(CONFIG_SMP_UP) += smp-up.o 58obj-$(CONFIG_SMP_UP) += smp-up.o
@@ -95,6 +96,9 @@ obj-$(CONFIG_GPIO_TXX9) += gpio_txx9.o
95obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o 96obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
96obj-$(CONFIG_EARLY_PRINTK) += early_printk.o 97obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
97obj-$(CONFIG_SPINLOCK_TEST) += spinlock_test.o 98obj-$(CONFIG_SPINLOCK_TEST) += spinlock_test.o
99obj-$(CONFIG_MIPS_MACHINE) += mips_machine.o
100
101obj-$(CONFIG_OF) += prom.o
98 102
99CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi) 103CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
100 104
@@ -102,4 +106,8 @@ obj-$(CONFIG_HAVE_STD_PC_SERIAL_PORT) += 8250-platform.o
102 106
103obj-$(CONFIG_MIPS_CPUFREQ) += cpufreq/ 107obj-$(CONFIG_MIPS_CPUFREQ) += cpufreq/
104 108
109obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
110
111obj-$(CONFIG_JUMP_LABEL) += jump_label.o
112
105CPPFLAGS_vmlinux.lds := $(KBUILD_CFLAGS) 113CPPFLAGS_vmlinux.lds := $(KBUILD_CFLAGS)
diff --git a/arch/mips/kernel/cevt-bcm1480.c b/arch/mips/kernel/cevt-bcm1480.c
index bfea327c636c..36c3898b76db 100644
--- a/arch/mips/kernel/cevt-bcm1480.c
+++ b/arch/mips/kernel/cevt-bcm1480.c
@@ -19,6 +19,7 @@
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/percpu.h> 20#include <linux/percpu.h>
21#include <linux/smp.h> 21#include <linux/smp.h>
22#include <linux/irq.h>
22 23
23#include <asm/addrspace.h> 24#include <asm/addrspace.h>
24#include <asm/io.h> 25#include <asm/io.h>
diff --git a/arch/mips/kernel/cevt-ds1287.c b/arch/mips/kernel/cevt-ds1287.c
index 00a4da277cbb..939157e397b9 100644
--- a/arch/mips/kernel/cevt-ds1287.c
+++ b/arch/mips/kernel/cevt-ds1287.c
@@ -21,6 +21,7 @@
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23#include <linux/mc146818rtc.h> 23#include <linux/mc146818rtc.h>
24#include <linux/irq.h>
24 25
25#include <asm/time.h> 26#include <asm/time.h>
26 27
diff --git a/arch/mips/kernel/cevt-gt641xx.c b/arch/mips/kernel/cevt-gt641xx.c
index 392ef3756c56..339f3639b90e 100644
--- a/arch/mips/kernel/cevt-gt641xx.c
+++ b/arch/mips/kernel/cevt-gt641xx.c
@@ -21,6 +21,7 @@
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23#include <linux/spinlock.h> 23#include <linux/spinlock.h>
24#include <linux/irq.h>
24 25
25#include <asm/gt64120.h> 26#include <asm/gt64120.h>
26#include <asm/time.h> 27#include <asm/time.h>
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index 2a4d50ff5e2c..98c5a9737c14 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -10,6 +10,7 @@
10#include <linux/interrupt.h> 10#include <linux/interrupt.h>
11#include <linux/percpu.h> 11#include <linux/percpu.h>
12#include <linux/smp.h> 12#include <linux/smp.h>
13#include <linux/irq.h>
13 14
14#include <asm/smtc_ipi.h> 15#include <asm/smtc_ipi.h>
15#include <asm/time.h> 16#include <asm/time.h>
@@ -31,7 +32,7 @@ static int mips_next_event(unsigned long delta,
31 cnt = read_c0_count(); 32 cnt = read_c0_count();
32 cnt += delta; 33 cnt += delta;
33 write_c0_compare(cnt); 34 write_c0_compare(cnt);
34 res = ((int)(read_c0_count() - cnt) > 0) ? -ETIME : 0; 35 res = ((int)(read_c0_count() - cnt) >= 0) ? -ETIME : 0;
35 return res; 36 return res;
36} 37}
37 38
diff --git a/arch/mips/kernel/cevt-sb1250.c b/arch/mips/kernel/cevt-sb1250.c
index da78eeaea6e8..590c54f28a81 100644
--- a/arch/mips/kernel/cevt-sb1250.c
+++ b/arch/mips/kernel/cevt-sb1250.c
@@ -17,6 +17,7 @@
17 */ 17 */
18#include <linux/clockchips.h> 18#include <linux/clockchips.h>
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/irq.h>
20#include <linux/percpu.h> 21#include <linux/percpu.h>
21#include <linux/smp.h> 22#include <linux/smp.h>
22 23
diff --git a/arch/mips/kernel/cevt-smtc.c b/arch/mips/kernel/cevt-smtc.c
index b102e4f1630e..2e72d30b2f05 100644
--- a/arch/mips/kernel/cevt-smtc.c
+++ b/arch/mips/kernel/cevt-smtc.c
@@ -11,6 +11,7 @@
11#include <linux/interrupt.h> 11#include <linux/interrupt.h>
12#include <linux/percpu.h> 12#include <linux/percpu.h>
13#include <linux/smp.h> 13#include <linux/smp.h>
14#include <linux/irq.h>
14 15
15#include <asm/smtc_ipi.h> 16#include <asm/smtc_ipi.h>
16#include <asm/time.h> 17#include <asm/time.h>
diff --git a/arch/mips/kernel/cevt-txx9.c b/arch/mips/kernel/cevt-txx9.c
index 218ee6bda935..f0ab92a1b057 100644
--- a/arch/mips/kernel/cevt-txx9.c
+++ b/arch/mips/kernel/cevt-txx9.c
@@ -13,6 +13,7 @@
13 */ 13 */
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/interrupt.h> 15#include <linux/interrupt.h>
16#include <linux/irq.h>
16#include <asm/time.h> 17#include <asm/time.h>
17#include <asm/txx9tmr.h> 18#include <asm/txx9tmr.h>
18 19
@@ -50,8 +51,7 @@ void __init txx9_clocksource_init(unsigned long baseaddr,
50{ 51{
51 struct txx9_tmr_reg __iomem *tmrptr; 52 struct txx9_tmr_reg __iomem *tmrptr;
52 53
53 clocksource_set_clock(&txx9_clocksource.cs, TIMER_CLK(imbusclk)); 54 clocksource_register_hz(&txx9_clocksource.cs, TIMER_CLK(imbusclk));
54 clocksource_register(&txx9_clocksource.cs);
55 55
56 tmrptr = ioremap(baseaddr, sizeof(struct txx9_tmr_reg)); 56 tmrptr = ioremap(baseaddr, sizeof(struct txx9_tmr_reg));
57 __raw_writel(TCR_BASE, &tmrptr->tcr); 57 __raw_writel(TCR_BASE, &tmrptr->tcr);
diff --git a/arch/mips/kernel/cpu-bugs64.c b/arch/mips/kernel/cpu-bugs64.c
index b8bb8ba60869..f305ca14351b 100644
--- a/arch/mips/kernel/cpu-bugs64.c
+++ b/arch/mips/kernel/cpu-bugs64.c
@@ -73,7 +73,7 @@ static inline void mult_sh_align_mod(long *v1, long *v2, long *w,
73 : "0" (5), "1" (8), "2" (5)); 73 : "0" (5), "1" (8), "2" (5));
74 align_mod(align, mod); 74 align_mod(align, mod);
75 /* 75 /*
76 * The trailing nop is needed to fullfill the two-instruction 76 * The trailing nop is needed to fulfill the two-instruction
77 * requirement between reading hi/lo and staring a mult/div. 77 * requirement between reading hi/lo and staring a mult/div.
78 * Leaving it out may cause gas insert a nop itself breaking 78 * Leaving it out may cause gas insert a nop itself breaking
79 * the desired alignment of the next chunk. 79 * the desired alignment of the next chunk.
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index b1b304ea2128..bb133d10b145 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -25,6 +25,8 @@
25#include <asm/system.h> 25#include <asm/system.h>
26#include <asm/watch.h> 26#include <asm/watch.h>
27#include <asm/spram.h> 27#include <asm/spram.h>
28#include <asm/uaccess.h>
29
28/* 30/*
29 * Not all of the MIPS CPUs have the "wait" instruction available. Moreover, 31 * Not all of the MIPS CPUs have the "wait" instruction available. Moreover,
30 * the implementation of the "wait" feature differs between CPU families. This 32 * the implementation of the "wait" feature differs between CPU families. This
@@ -181,12 +183,13 @@ void __init check_wait(void)
181 case CPU_5KC: 183 case CPU_5KC:
182 case CPU_25KF: 184 case CPU_25KF:
183 case CPU_PR4450: 185 case CPU_PR4450:
184 case CPU_BCM3302: 186 case CPU_BMIPS3300:
185 case CPU_BCM6338: 187 case CPU_BMIPS4350:
186 case CPU_BCM6348: 188 case CPU_BMIPS4380:
187 case CPU_BCM6358: 189 case CPU_BMIPS5000:
188 case CPU_CAVIUM_OCTEON: 190 case CPU_CAVIUM_OCTEON:
189 case CPU_CAVIUM_OCTEON_PLUS: 191 case CPU_CAVIUM_OCTEON_PLUS:
192 case CPU_CAVIUM_OCTEON2:
190 case CPU_JZRISC: 193 case CPU_JZRISC:
191 cpu_wait = r4k_wait; 194 cpu_wait = r4k_wait;
192 break; 195 break;
@@ -288,6 +291,12 @@ static inline int cpu_has_confreg(void)
288#endif 291#endif
289} 292}
290 293
294static inline void set_elf_platform(int cpu, const char *plat)
295{
296 if (cpu == 0)
297 __elf_platform = plat;
298}
299
291/* 300/*
292 * Get the FPU Implementation/Revision. 301 * Get the FPU Implementation/Revision.
293 */ 302 */
@@ -611,6 +620,16 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
611 case PRID_IMP_LOONGSON2: 620 case PRID_IMP_LOONGSON2:
612 c->cputype = CPU_LOONGSON2; 621 c->cputype = CPU_LOONGSON2;
613 __cpu_name[cpu] = "ICT Loongson-2"; 622 __cpu_name[cpu] = "ICT Loongson-2";
623
624 switch (c->processor_id & PRID_REV_MASK) {
625 case PRID_REV_LOONGSON2E:
626 set_elf_platform(cpu, "loongson2e");
627 break;
628 case PRID_REV_LOONGSON2F:
629 set_elf_platform(cpu, "loongson2f");
630 break;
631 }
632
614 c->isa_level = MIPS_CPU_ISA_III; 633 c->isa_level = MIPS_CPU_ISA_III;
615 c->options = R4K_OPTS | 634 c->options = R4K_OPTS |
616 MIPS_CPU_FPU | MIPS_CPU_LLSC | 635 MIPS_CPU_FPU | MIPS_CPU_LLSC |
@@ -736,6 +755,8 @@ static inline unsigned int decode_config4(struct cpuinfo_mips *c)
736 && cpu_has_tlb) 755 && cpu_has_tlb)
737 c->tlbsize += (config4 & MIPS_CONF4_MMUSIZEEXT) * 0x40; 756 c->tlbsize += (config4 & MIPS_CONF4_MMUSIZEEXT) * 0x40;
738 757
758 c->kscratch_mask = (config4 >> 16) & 0xff;
759
739 return config4 & MIPS_CONF_M; 760 return config4 & MIPS_CONF_M;
740} 761}
741 762
@@ -902,35 +923,41 @@ static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu)
902{ 923{
903 decode_configs(c); 924 decode_configs(c);
904 switch (c->processor_id & 0xff00) { 925 switch (c->processor_id & 0xff00) {
905 case PRID_IMP_BCM3302: 926 case PRID_IMP_BMIPS32_REV4:
906 /* same as PRID_IMP_BCM6338 */ 927 case PRID_IMP_BMIPS32_REV8:
907 c->cputype = CPU_BCM3302; 928 c->cputype = CPU_BMIPS32;
908 __cpu_name[cpu] = "Broadcom BCM3302"; 929 __cpu_name[cpu] = "Broadcom BMIPS32";
909 break; 930 set_elf_platform(cpu, "bmips32");
910 case PRID_IMP_BCM4710: 931 break;
911 c->cputype = CPU_BCM4710; 932 case PRID_IMP_BMIPS3300:
912 __cpu_name[cpu] = "Broadcom BCM4710"; 933 case PRID_IMP_BMIPS3300_ALT:
913 break; 934 case PRID_IMP_BMIPS3300_BUG:
914 case PRID_IMP_BCM6345: 935 c->cputype = CPU_BMIPS3300;
915 c->cputype = CPU_BCM6345; 936 __cpu_name[cpu] = "Broadcom BMIPS3300";
916 __cpu_name[cpu] = "Broadcom BCM6345"; 937 set_elf_platform(cpu, "bmips3300");
917 break; 938 break;
918 case PRID_IMP_BCM6348: 939 case PRID_IMP_BMIPS43XX: {
919 c->cputype = CPU_BCM6348; 940 int rev = c->processor_id & 0xff;
920 __cpu_name[cpu] = "Broadcom BCM6348"; 941
921 break; 942 if (rev >= PRID_REV_BMIPS4380_LO &&
922 case PRID_IMP_BCM4350: 943 rev <= PRID_REV_BMIPS4380_HI) {
923 switch (c->processor_id & 0xf0) { 944 c->cputype = CPU_BMIPS4380;
924 case PRID_REV_BCM6358: 945 __cpu_name[cpu] = "Broadcom BMIPS4380";
925 c->cputype = CPU_BCM6358; 946 set_elf_platform(cpu, "bmips4380");
926 __cpu_name[cpu] = "Broadcom BCM6358"; 947 } else {
927 break; 948 c->cputype = CPU_BMIPS4350;
928 default: 949 __cpu_name[cpu] = "Broadcom BMIPS4350";
929 c->cputype = CPU_UNKNOWN; 950 set_elf_platform(cpu, "bmips4350");
930 break;
931 } 951 }
932 break; 952 break;
933 } 953 }
954 case PRID_IMP_BMIPS5000:
955 c->cputype = CPU_BMIPS5000;
956 __cpu_name[cpu] = "Broadcom BMIPS5000";
957 set_elf_platform(cpu, "bmips5000");
958 c->options |= MIPS_CPU_ULRI;
959 break;
960 }
934} 961}
935 962
936static inline void cpu_probe_cavium(struct cpuinfo_mips *c, unsigned int cpu) 963static inline void cpu_probe_cavium(struct cpuinfo_mips *c, unsigned int cpu)
@@ -950,8 +977,12 @@ static inline void cpu_probe_cavium(struct cpuinfo_mips *c, unsigned int cpu)
950 c->cputype = CPU_CAVIUM_OCTEON_PLUS; 977 c->cputype = CPU_CAVIUM_OCTEON_PLUS;
951 __cpu_name[cpu] = "Cavium Octeon+"; 978 __cpu_name[cpu] = "Cavium Octeon+";
952platform: 979platform:
953 if (cpu == 0) 980 set_elf_platform(cpu, "octeon");
954 __elf_platform = "octeon"; 981 break;
982 case PRID_IMP_CAVIUM_CN63XX:
983 c->cputype = CPU_CAVIUM_OCTEON2;
984 __cpu_name[cpu] = "Cavium Octeon II";
985 set_elf_platform(cpu, "octeon2");
955 break; 986 break;
956 default: 987 default:
957 printk(KERN_INFO "Unknown Octeon chip!\n"); 988 printk(KERN_INFO "Unknown Octeon chip!\n");
@@ -976,6 +1007,65 @@ static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu)
976 } 1007 }
977} 1008}
978 1009
1010static inline void cpu_probe_netlogic(struct cpuinfo_mips *c, int cpu)
1011{
1012 decode_configs(c);
1013
1014 c->options = (MIPS_CPU_TLB |
1015 MIPS_CPU_4KEX |
1016 MIPS_CPU_COUNTER |
1017 MIPS_CPU_DIVEC |
1018 MIPS_CPU_WATCH |
1019 MIPS_CPU_EJTAG |
1020 MIPS_CPU_LLSC);
1021
1022 switch (c->processor_id & 0xff00) {
1023 case PRID_IMP_NETLOGIC_XLR732:
1024 case PRID_IMP_NETLOGIC_XLR716:
1025 case PRID_IMP_NETLOGIC_XLR532:
1026 case PRID_IMP_NETLOGIC_XLR308:
1027 case PRID_IMP_NETLOGIC_XLR532C:
1028 case PRID_IMP_NETLOGIC_XLR516C:
1029 case PRID_IMP_NETLOGIC_XLR508C:
1030 case PRID_IMP_NETLOGIC_XLR308C:
1031 c->cputype = CPU_XLR;
1032 __cpu_name[cpu] = "Netlogic XLR";
1033 break;
1034
1035 case PRID_IMP_NETLOGIC_XLS608:
1036 case PRID_IMP_NETLOGIC_XLS408:
1037 case PRID_IMP_NETLOGIC_XLS404:
1038 case PRID_IMP_NETLOGIC_XLS208:
1039 case PRID_IMP_NETLOGIC_XLS204:
1040 case PRID_IMP_NETLOGIC_XLS108:
1041 case PRID_IMP_NETLOGIC_XLS104:
1042 case PRID_IMP_NETLOGIC_XLS616B:
1043 case PRID_IMP_NETLOGIC_XLS608B:
1044 case PRID_IMP_NETLOGIC_XLS416B:
1045 case PRID_IMP_NETLOGIC_XLS412B:
1046 case PRID_IMP_NETLOGIC_XLS408B:
1047 case PRID_IMP_NETLOGIC_XLS404B:
1048 c->cputype = CPU_XLR;
1049 __cpu_name[cpu] = "Netlogic XLS";
1050 break;
1051
1052 default:
1053 printk(KERN_INFO "Unknown Netlogic chip id [%02x]!\n",
1054 c->processor_id);
1055 c->cputype = CPU_XLR;
1056 break;
1057 }
1058
1059 c->isa_level = MIPS_CPU_ISA_M64R1;
1060 c->tlbsize = ((read_c0_config1() >> 25) & 0x3f) + 1;
1061}
1062
1063#ifdef CONFIG_64BIT
1064/* For use by uaccess.h */
1065u64 __ua_limit;
1066EXPORT_SYMBOL(__ua_limit);
1067#endif
1068
979const char *__cpu_name[NR_CPUS]; 1069const char *__cpu_name[NR_CPUS];
980const char *__elf_platform; 1070const char *__elf_platform;
981 1071
@@ -1017,6 +1107,9 @@ __cpuinit void cpu_probe(void)
1017 case PRID_COMP_INGENIC: 1107 case PRID_COMP_INGENIC:
1018 cpu_probe_ingenic(c, cpu); 1108 cpu_probe_ingenic(c, cpu);
1019 break; 1109 break;
1110 case PRID_COMP_NETLOGIC:
1111 cpu_probe_netlogic(c, cpu);
1112 break;
1020 } 1113 }
1021 1114
1022 BUG_ON(!__cpu_name[cpu]); 1115 BUG_ON(!__cpu_name[cpu]);
@@ -1053,6 +1146,11 @@ __cpuinit void cpu_probe(void)
1053 c->srsets = 1; 1146 c->srsets = 1;
1054 1147
1055 cpu_probe_vmbits(c); 1148 cpu_probe_vmbits(c);
1149
1150#ifdef CONFIG_64BIT
1151 if (cpu == 0)
1152 __ua_limit = ~((1ull << cpu_vmbits) - 1);
1153#endif
1056} 1154}
1057 1155
1058__cpuinit void cpu_report(void) 1156__cpuinit void cpu_report(void)
diff --git a/arch/mips/kernel/csrc-bcm1480.c b/arch/mips/kernel/csrc-bcm1480.c
index 51489f8a825e..f96f99c794a3 100644
--- a/arch/mips/kernel/csrc-bcm1480.c
+++ b/arch/mips/kernel/csrc-bcm1480.c
@@ -49,6 +49,5 @@ void __init sb1480_clocksource_init(void)
49 49
50 plldiv = G_BCM1480_SYS_PLL_DIV(__raw_readq(IOADDR(A_SCD_SYSTEM_CFG))); 50 plldiv = G_BCM1480_SYS_PLL_DIV(__raw_readq(IOADDR(A_SCD_SYSTEM_CFG)));
51 zbbus = ((plldiv >> 1) * 50000000) + ((plldiv & 1) * 25000000); 51 zbbus = ((plldiv >> 1) * 50000000) + ((plldiv & 1) * 25000000);
52 clocksource_set_clock(cs, zbbus); 52 clocksource_register_hz(cs, zbbus);
53 clocksource_register(cs);
54} 53}
diff --git a/arch/mips/kernel/csrc-ioasic.c b/arch/mips/kernel/csrc-ioasic.c
index 23da108506b0..46bd7fa98d6c 100644
--- a/arch/mips/kernel/csrc-ioasic.c
+++ b/arch/mips/kernel/csrc-ioasic.c
@@ -59,7 +59,5 @@ void __init dec_ioasic_clocksource_init(void)
59 printk(KERN_INFO "I/O ASIC clock frequency %dHz\n", freq); 59 printk(KERN_INFO "I/O ASIC clock frequency %dHz\n", freq);
60 60
61 clocksource_dec.rating = 200 + freq / 10000000; 61 clocksource_dec.rating = 200 + freq / 10000000;
62 clocksource_set_clock(&clocksource_dec, freq); 62 clocksource_register_hz(&clocksource_dec, freq);
63
64 clocksource_register(&clocksource_dec);
65} 63}
diff --git a/arch/mips/kernel/csrc-powertv.c b/arch/mips/kernel/csrc-powertv.c
index a27c16c8690e..2e7c5232da8d 100644
--- a/arch/mips/kernel/csrc-powertv.c
+++ b/arch/mips/kernel/csrc-powertv.c
@@ -78,9 +78,7 @@ static void __init powertv_c0_hpt_clocksource_init(void)
78 78
79 clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000; 79 clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000;
80 80
81 clocksource_set_clock(&clocksource_mips, mips_hpt_frequency); 81 clocksource_register_hz(&clocksource_mips, mips_hpt_frequency);
82
83 clocksource_register(&clocksource_mips);
84} 82}
85 83
86/** 84/**
@@ -130,43 +128,16 @@ static struct clocksource clocksource_tim_c = {
130/** 128/**
131 * powertv_tim_c_clocksource_init - set up a clock source for the TIM_C clock 129 * powertv_tim_c_clocksource_init - set up a clock source for the TIM_C clock
132 * 130 *
133 * The hard part here is coming up with a constant k and shift s such that
134 * the 48-bit TIM_C value multiplied by k doesn't overflow and that value,
135 * when shifted right by s, yields the corresponding number of nanoseconds.
136 * We know that TIM_C counts at 27 MHz/8, so each cycle corresponds to 131 * We know that TIM_C counts at 27 MHz/8, so each cycle corresponds to
137 * 1 / (27,000,000/8) seconds. Multiply that by a billion and you get the 132 * 1 / (27,000,000/8) seconds.
138 * number of nanoseconds. Since the TIM_C value has 48 bits and the math is
139 * done in 64 bits, avoiding an overflow means that k must be less than
140 * 64 - 48 = 16 bits.
141 */ 133 */
142static void __init powertv_tim_c_clocksource_init(void) 134static void __init powertv_tim_c_clocksource_init(void)
143{ 135{
144 int prescale;
145 unsigned long dividend;
146 unsigned long k;
147 int s;
148 const int max_k_bits = (64 - 48) - 1;
149 const unsigned long billion = 1000000000;
150 const unsigned long counts_per_second = 27000000 / 8; 136 const unsigned long counts_per_second = 27000000 / 8;
151 137
152 prescale = BITS_PER_LONG - ilog2(billion) - 1;
153 dividend = billion << prescale;
154 k = dividend / counts_per_second;
155 s = ilog2(k) - max_k_bits;
156
157 if (s < 0)
158 s = prescale;
159
160 else {
161 k >>= s;
162 s += prescale;
163 }
164
165 clocksource_tim_c.mult = k;
166 clocksource_tim_c.shift = s;
167 clocksource_tim_c.rating = 200; 138 clocksource_tim_c.rating = 200;
168 139
169 clocksource_register(&clocksource_tim_c); 140 clocksource_register_hz(&clocksource_tim_c, counts_per_second);
170 tim_c = (struct tim_c *) asic_reg_addr(tim_ch); 141 tim_c = (struct tim_c *) asic_reg_addr(tim_ch);
171} 142}
172 143
diff --git a/arch/mips/kernel/csrc-r4k.c b/arch/mips/kernel/csrc-r4k.c
index e95a3cd48eea..decd1fa38d55 100644
--- a/arch/mips/kernel/csrc-r4k.c
+++ b/arch/mips/kernel/csrc-r4k.c
@@ -30,9 +30,7 @@ int __init init_r4k_clocksource(void)
30 /* Calculate a somewhat reasonable rating value */ 30 /* Calculate a somewhat reasonable rating value */
31 clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000; 31 clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000;
32 32
33 clocksource_set_clock(&clocksource_mips, mips_hpt_frequency); 33 clocksource_register_hz(&clocksource_mips, mips_hpt_frequency);
34
35 clocksource_register(&clocksource_mips);
36 34
37 return 0; 35 return 0;
38} 36}
diff --git a/arch/mips/kernel/csrc-sb1250.c b/arch/mips/kernel/csrc-sb1250.c
index d14d3d1907fa..e9606d907685 100644
--- a/arch/mips/kernel/csrc-sb1250.c
+++ b/arch/mips/kernel/csrc-sb1250.c
@@ -65,6 +65,5 @@ void __init sb1250_clocksource_init(void)
65 IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM, 65 IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM,
66 R_SCD_TIMER_CFG))); 66 R_SCD_TIMER_CFG)));
67 67
68 clocksource_set_clock(cs, V_SCD_TIMER_FREQ); 68 clocksource_register_hz(cs, V_SCD_TIMER_FREQ);
69 clocksource_register(cs);
70} 69}
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
index ffa331029e08..37acfa036d44 100644
--- a/arch/mips/kernel/entry.S
+++ b/arch/mips/kernel/entry.S
@@ -167,14 +167,13 @@ work_notifysig: # deal with pending signals and
167FEXPORT(syscall_exit_work_partial) 167FEXPORT(syscall_exit_work_partial)
168 SAVE_STATIC 168 SAVE_STATIC
169syscall_exit_work: 169syscall_exit_work:
170 li t0, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT 170 li t0, _TIF_WORK_SYSCALL_EXIT
171 and t0, a2 # a2 is preloaded with TI_FLAGS 171 and t0, a2 # a2 is preloaded with TI_FLAGS
172 beqz t0, work_pending # trace bit set? 172 beqz t0, work_pending # trace bit set?
173 local_irq_enable # could let do_syscall_trace() 173 local_irq_enable # could let syscall_trace_leave()
174 # call schedule() instead 174 # call schedule() instead
175 move a0, sp 175 move a0, sp
176 li a1, 1 176 jal syscall_trace_leave
177 jal do_syscall_trace
178 b resume_userspace 177 b resume_userspace
179 178
180#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT) 179#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT)
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index 5a84a1f11231..feb8021a305f 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -17,29 +17,14 @@
17#include <asm/cacheflush.h> 17#include <asm/cacheflush.h>
18#include <asm/uasm.h> 18#include <asm/uasm.h>
19 19
20/* 20#include <asm-generic/sections.h>
21 * If the Instruction Pointer is in module space (0xc0000000), return true;
22 * otherwise, it is in kernel space (0x80000000), return false.
23 *
24 * FIXME: This will not work when the kernel space and module space are the
25 * same. If they are the same, we need to modify scripts/recordmcount.pl,
26 * ftrace_make_nop/call() and the other related parts to ensure the
27 * enabling/disabling of the calling site to _mcount is right for both kernel
28 * and module.
29 */
30
31static inline int in_module(unsigned long ip)
32{
33 return ip & 0x40000000;
34}
35 21
36#ifdef CONFIG_DYNAMIC_FTRACE 22#ifdef CONFIG_DYNAMIC_FTRACE
37 23
38#define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ 24#define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */
39#define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */ 25#define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */
26#define JUMP_RANGE_MASK ((1UL << 28) - 1)
40 27
41#define INSN_B_1F_4 0x10000004 /* b 1f; offset = 4 */
42#define INSN_B_1F_5 0x10000005 /* b 1f; offset = 5 */
43#define INSN_NOP 0x00000000 /* nop */ 28#define INSN_NOP 0x00000000 /* nop */
44#define INSN_JAL(addr) \ 29#define INSN_JAL(addr) \
45 ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK))) 30 ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK)))
@@ -60,15 +45,29 @@ static inline void ftrace_dyn_arch_init_insns(void)
60 45
61 /* jal (ftrace_caller + 8), jump over the first two instruction */ 46 /* jal (ftrace_caller + 8), jump over the first two instruction */
62 buf = (u32 *)&insn_jal_ftrace_caller; 47 buf = (u32 *)&insn_jal_ftrace_caller;
63 uasm_i_jal(&buf, (FTRACE_ADDR + 8)); 48 uasm_i_jal(&buf, (FTRACE_ADDR + 8) & JUMP_RANGE_MASK);
64 49
65#ifdef CONFIG_FUNCTION_GRAPH_TRACER 50#ifdef CONFIG_FUNCTION_GRAPH_TRACER
66 /* j ftrace_graph_caller */ 51 /* j ftrace_graph_caller */
67 buf = (u32 *)&insn_j_ftrace_graph_caller; 52 buf = (u32 *)&insn_j_ftrace_graph_caller;
68 uasm_i_j(&buf, (unsigned long)ftrace_graph_caller); 53 uasm_i_j(&buf, (unsigned long)ftrace_graph_caller & JUMP_RANGE_MASK);
69#endif 54#endif
70} 55}
71 56
57/*
58 * Check if the address is in kernel space
59 *
60 * Clone core_kernel_text() from kernel/extable.c, but doesn't call
61 * init_kernel_text() for Ftrace doesn't trace functions in init sections.
62 */
63static inline int in_kernel_space(unsigned long ip)
64{
65 if (ip >= (unsigned long)_stext &&
66 ip <= (unsigned long)_etext)
67 return 1;
68 return 0;
69}
70
72static int ftrace_modify_code(unsigned long ip, unsigned int new_code) 71static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
73{ 72{
74 int faulted; 73 int faulted;
@@ -84,6 +83,42 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
84 return 0; 83 return 0;
85} 84}
86 85
86/*
87 * The details about the calling site of mcount on MIPS
88 *
89 * 1. For kernel:
90 *
91 * move at, ra
92 * jal _mcount --> nop
93 *
94 * 2. For modules:
95 *
96 * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT
97 *
98 * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005)
99 * addiu v1, v1, low_16bit_of_mcount
100 * move at, ra
101 * move $12, ra_address
102 * jalr v1
103 * sub sp, sp, 8
104 * 1: offset = 5 instructions
105 * 2.2 For the Other situations
106 *
107 * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004)
108 * addiu v1, v1, low_16bit_of_mcount
109 * move at, ra
110 * jalr v1
111 * nop | move $12, ra_address | sub sp, sp, 8
112 * 1: offset = 4 instructions
113 */
114
115#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
116#define MCOUNT_OFFSET_INSNS 5
117#else
118#define MCOUNT_OFFSET_INSNS 4
119#endif
120#define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS)
121
87int ftrace_make_nop(struct module *mod, 122int ftrace_make_nop(struct module *mod,
88 struct dyn_ftrace *rec, unsigned long addr) 123 struct dyn_ftrace *rec, unsigned long addr)
89{ 124{
@@ -91,39 +126,11 @@ int ftrace_make_nop(struct module *mod,
91 unsigned long ip = rec->ip; 126 unsigned long ip = rec->ip;
92 127
93 /* 128 /*
94 * We have compiled module with -mlong-calls, but compiled the kernel 129 * If ip is in kernel space, no long call, otherwise, long call is
95 * without it, we need to cope with them respectively. 130 * needed.
96 */ 131 */
97 if (in_module(ip)) { 132 new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F;
98#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT) 133
99 /*
100 * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005)
101 * addiu v1, v1, low_16bit_of_mcount
102 * move at, ra
103 * move $12, ra_address
104 * jalr v1
105 * sub sp, sp, 8
106 * 1: offset = 5 instructions
107 */
108 new = INSN_B_1F_5;
109#else
110 /*
111 * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004)
112 * addiu v1, v1, low_16bit_of_mcount
113 * move at, ra
114 * jalr v1
115 * nop | move $12, ra_address | sub sp, sp, 8
116 * 1: offset = 4 instructions
117 */
118 new = INSN_B_1F_4;
119#endif
120 } else {
121 /*
122 * move at, ra
123 * jal _mcount --> nop
124 */
125 new = INSN_NOP;
126 }
127 return ftrace_modify_code(ip, new); 134 return ftrace_modify_code(ip, new);
128} 135}
129 136
@@ -132,8 +139,8 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
132 unsigned int new; 139 unsigned int new;
133 unsigned long ip = rec->ip; 140 unsigned long ip = rec->ip;
134 141
135 /* ip, module: 0xc0000000, kernel: 0x80000000 */ 142 new = in_kernel_space(ip) ? insn_jal_ftrace_caller :
136 new = in_module(ip) ? insn_lui_v1_hi16_mcount : insn_jal_ftrace_caller; 143 insn_lui_v1_hi16_mcount;
137 144
138 return ftrace_modify_code(ip, new); 145 return ftrace_modify_code(ip, new);
139} 146}
@@ -190,29 +197,25 @@ int ftrace_disable_ftrace_graph_caller(void)
190#define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */ 197#define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */
191#define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */ 198#define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */
192 199
193unsigned long ftrace_get_parent_addr(unsigned long self_addr, 200unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
194 unsigned long parent, 201 old_parent_ra, unsigned long parent_ra_addr, unsigned long fp)
195 unsigned long parent_addr,
196 unsigned long fp)
197{ 202{
198 unsigned long sp, ip, ra; 203 unsigned long sp, ip, tmp;
199 unsigned int code; 204 unsigned int code;
200 int faulted; 205 int faulted;
201 206
202 /* 207 /*
203 * For module, move the ip from calling site of mcount to the 208 * For module, move the ip from the return address after the
204 * instruction "lui v1, hi_16bit_of_mcount"(offset is 20), but for 209 * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for
205 * kernel, move to the instruction "move ra, at"(offset is 12) 210 * kernel, move after the instruction "move ra, at"(offset is 16)
206 */ 211 */
207 ip = self_addr - (in_module(self_addr) ? 20 : 12); 212 ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24);
208 213
209 /* 214 /*
210 * search the text until finding the non-store instruction or "s{d,w} 215 * search the text until finding the non-store instruction or "s{d,w}
211 * ra, offset(sp)" instruction 216 * ra, offset(sp)" instruction
212 */ 217 */
213 do { 218 do {
214 ip -= 4;
215
216 /* get the code at "ip": code = *(unsigned int *)ip; */ 219 /* get the code at "ip": code = *(unsigned int *)ip; */
217 safe_load_code(code, ip, faulted); 220 safe_load_code(code, ip, faulted);
218 221
@@ -224,18 +227,20 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr,
224 * store the ra on the stack 227 * store the ra on the stack
225 */ 228 */
226 if ((code & S_R_SP) != S_R_SP) 229 if ((code & S_R_SP) != S_R_SP)
227 return parent_addr; 230 return parent_ra_addr;
228 231
229 } while (((code & S_RA_SP) != S_RA_SP)); 232 /* Move to the next instruction */
233 ip -= 4;
234 } while ((code & S_RA_SP) != S_RA_SP);
230 235
231 sp = fp + (code & OFFSET_MASK); 236 sp = fp + (code & OFFSET_MASK);
232 237
233 /* ra = *(unsigned long *)sp; */ 238 /* tmp = *(unsigned long *)sp; */
234 safe_load_stack(ra, sp, faulted); 239 safe_load_stack(tmp, sp, faulted);
235 if (unlikely(faulted)) 240 if (unlikely(faulted))
236 return 0; 241 return 0;
237 242
238 if (ra == parent) 243 if (tmp == old_parent_ra)
239 return sp; 244 return sp;
240 return 0; 245 return 0;
241} 246}
@@ -246,21 +251,21 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr,
246 * Hook the return address and push it in the stack of return addrs 251 * Hook the return address and push it in the stack of return addrs
247 * in current thread info. 252 * in current thread info.
248 */ 253 */
249void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, 254void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
250 unsigned long fp) 255 unsigned long fp)
251{ 256{
252 unsigned long old; 257 unsigned long old_parent_ra;
253 struct ftrace_graph_ent trace; 258 struct ftrace_graph_ent trace;
254 unsigned long return_hooker = (unsigned long) 259 unsigned long return_hooker = (unsigned long)
255 &return_to_handler; 260 &return_to_handler;
256 int faulted; 261 int faulted, insns;
257 262
258 if (unlikely(atomic_read(&current->tracing_graph_pause))) 263 if (unlikely(atomic_read(&current->tracing_graph_pause)))
259 return; 264 return;
260 265
261 /* 266 /*
262 * "parent" is the stack address saved the return address of the caller 267 * "parent_ra_addr" is the stack address saved the return address of
263 * of _mcount. 268 * the caller of _mcount.
264 * 269 *
265 * if the gcc < 4.5, a leaf function does not save the return address 270 * if the gcc < 4.5, a leaf function does not save the return address
266 * in the stack address, so, we "emulate" one in _mcount's stack space, 271 * in the stack address, so, we "emulate" one in _mcount's stack space,
@@ -275,37 +280,44 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
275 * do it in ftrace_graph_caller of mcount.S. 280 * do it in ftrace_graph_caller of mcount.S.
276 */ 281 */
277 282
278 /* old = *parent; */ 283 /* old_parent_ra = *parent_ra_addr; */
279 safe_load_stack(old, parent, faulted); 284 safe_load_stack(old_parent_ra, parent_ra_addr, faulted);
280 if (unlikely(faulted)) 285 if (unlikely(faulted))
281 goto out; 286 goto out;
282#ifndef KBUILD_MCOUNT_RA_ADDRESS 287#ifndef KBUILD_MCOUNT_RA_ADDRESS
283 parent = (unsigned long *)ftrace_get_parent_addr(self_addr, old, 288 parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra,
284 (unsigned long)parent, fp); 289 old_parent_ra, (unsigned long)parent_ra_addr, fp);
285 /* 290 /*
286 * If fails when getting the stack address of the non-leaf function's 291 * If fails when getting the stack address of the non-leaf function's
287 * ra, stop function graph tracer and return 292 * ra, stop function graph tracer and return
288 */ 293 */
289 if (parent == 0) 294 if (parent_ra_addr == 0)
290 goto out; 295 goto out;
291#endif 296#endif
292 /* *parent = return_hooker; */ 297 /* *parent_ra_addr = return_hooker; */
293 safe_store_stack(return_hooker, parent, faulted); 298 safe_store_stack(return_hooker, parent_ra_addr, faulted);
294 if (unlikely(faulted)) 299 if (unlikely(faulted))
295 goto out; 300 goto out;
296 301
297 if (ftrace_push_return_trace(old, self_addr, &trace.depth, fp) == 302 if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp)
298 -EBUSY) { 303 == -EBUSY) {
299 *parent = old; 304 *parent_ra_addr = old_parent_ra;
300 return; 305 return;
301 } 306 }
302 307
303 trace.func = self_addr; 308 /*
309 * Get the recorded ip of the current mcount calling site in the
310 * __mcount_loc section, which will be used to filter the function
311 * entries configured through the tracing/set_graph_function interface.
312 */
313
314 insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
315 trace.func = self_ra - (MCOUNT_INSN_SIZE * insns);
304 316
305 /* Only trace if the calling function expects to */ 317 /* Only trace if the calling function expects to */
306 if (!ftrace_graph_entry(&trace)) { 318 if (!ftrace_graph_entry(&trace)) {
307 current->curr_ret_stack--; 319 current->curr_ret_stack--;
308 *parent = old; 320 *parent_ra_addr = old_parent_ra;
309 } 321 }
310 return; 322 return;
311out: 323out:
diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c
index 94794062a177..391221b6a6aa 100644
--- a/arch/mips/kernel/i8253.c
+++ b/arch/mips/kernel/i8253.c
@@ -9,6 +9,7 @@
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/smp.h> 10#include <linux/smp.h>
11#include <linux/spinlock.h> 11#include <linux/spinlock.h>
12#include <linux/irq.h>
12 13
13#include <asm/delay.h> 14#include <asm/delay.h>
14#include <asm/i8253.h> 15#include <asm/i8253.h>
@@ -124,87 +125,11 @@ void __init setup_pit_timer(void)
124 setup_irq(0, &irq0); 125 setup_irq(0, &irq0);
125} 126}
126 127
127/*
128 * Since the PIT overflows every tick, its not very useful
129 * to just read by itself. So use jiffies to emulate a free
130 * running counter:
131 */
132static cycle_t pit_read(struct clocksource *cs)
133{
134 unsigned long flags;
135 int count;
136 u32 jifs;
137 static int old_count;
138 static u32 old_jifs;
139
140 raw_spin_lock_irqsave(&i8253_lock, flags);
141 /*
142 * Although our caller may have the read side of xtime_lock,
143 * this is now a seqlock, and we are cheating in this routine
144 * by having side effects on state that we cannot undo if
145 * there is a collision on the seqlock and our caller has to
146 * retry. (Namely, old_jifs and old_count.) So we must treat
147 * jiffies as volatile despite the lock. We read jiffies
148 * before latching the timer count to guarantee that although
149 * the jiffies value might be older than the count (that is,
150 * the counter may underflow between the last point where
151 * jiffies was incremented and the point where we latch the
152 * count), it cannot be newer.
153 */
154 jifs = jiffies;
155 outb_p(0x00, PIT_MODE); /* latch the count ASAP */
156 count = inb_p(PIT_CH0); /* read the latched count */
157 count |= inb_p(PIT_CH0) << 8;
158
159 /* VIA686a test code... reset the latch if count > max + 1 */
160 if (count > LATCH) {
161 outb_p(0x34, PIT_MODE);
162 outb_p(LATCH & 0xff, PIT_CH0);
163 outb(LATCH >> 8, PIT_CH0);
164 count = LATCH - 1;
165 }
166
167 /*
168 * It's possible for count to appear to go the wrong way for a
169 * couple of reasons:
170 *
171 * 1. The timer counter underflows, but we haven't handled the
172 * resulting interrupt and incremented jiffies yet.
173 * 2. Hardware problem with the timer, not giving us continuous time,
174 * the counter does small "jumps" upwards on some Pentium systems,
175 * (see c't 95/10 page 335 for Neptun bug.)
176 *
177 * Previous attempts to handle these cases intelligently were
178 * buggy, so we just do the simple thing now.
179 */
180 if (count > old_count && jifs == old_jifs) {
181 count = old_count;
182 }
183 old_count = count;
184 old_jifs = jifs;
185
186 raw_spin_unlock_irqrestore(&i8253_lock, flags);
187
188 count = (LATCH - 1) - count;
189
190 return (cycle_t)(jifs * LATCH) + count;
191}
192
193static struct clocksource clocksource_pit = {
194 .name = "pit",
195 .rating = 110,
196 .read = pit_read,
197 .mask = CLOCKSOURCE_MASK(32),
198 .mult = 0,
199 .shift = 20,
200};
201
202static int __init init_pit_clocksource(void) 128static int __init init_pit_clocksource(void)
203{ 129{
204 if (num_possible_cpus() > 1) /* PIT does not scale! */ 130 if (num_possible_cpus() > 1) /* PIT does not scale! */
205 return 0; 131 return 0;
206 132
207 clocksource_pit.mult = clocksource_hz2mult(CLOCK_TICK_RATE, 20); 133 return clocksource_i8253_init();
208 return clocksource_register(&clocksource_pit);
209} 134}
210arch_initcall(init_pit_clocksource); 135arch_initcall(init_pit_clocksource);
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
index 27799113332c..5c74eb797f08 100644
--- a/arch/mips/kernel/i8259.c
+++ b/arch/mips/kernel/i8259.c
@@ -14,7 +14,8 @@
14#include <linux/interrupt.h> 14#include <linux/interrupt.h>
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/spinlock.h> 16#include <linux/spinlock.h>
17#include <linux/sysdev.h> 17#include <linux/syscore_ops.h>
18#include <linux/irq.h>
18 19
19#include <asm/i8259.h> 20#include <asm/i8259.h>
20#include <asm/io.h> 21#include <asm/io.h>
@@ -30,19 +31,19 @@
30 31
31static int i8259A_auto_eoi = -1; 32static int i8259A_auto_eoi = -1;
32DEFINE_RAW_SPINLOCK(i8259A_lock); 33DEFINE_RAW_SPINLOCK(i8259A_lock);
33static void disable_8259A_irq(unsigned int irq); 34static void disable_8259A_irq(struct irq_data *d);
34static void enable_8259A_irq(unsigned int irq); 35static void enable_8259A_irq(struct irq_data *d);
35static void mask_and_ack_8259A(unsigned int irq); 36static void mask_and_ack_8259A(struct irq_data *d);
36static void init_8259A(int auto_eoi); 37static void init_8259A(int auto_eoi);
37 38
38static struct irq_chip i8259A_chip = { 39static struct irq_chip i8259A_chip = {
39 .name = "XT-PIC", 40 .name = "XT-PIC",
40 .mask = disable_8259A_irq, 41 .irq_mask = disable_8259A_irq,
41 .disable = disable_8259A_irq, 42 .irq_disable = disable_8259A_irq,
42 .unmask = enable_8259A_irq, 43 .irq_unmask = enable_8259A_irq,
43 .mask_ack = mask_and_ack_8259A, 44 .irq_mask_ack = mask_and_ack_8259A,
44#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF 45#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
45 .set_affinity = plat_set_irq_affinity, 46 .irq_set_affinity = plat_set_irq_affinity,
46#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ 47#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
47}; 48};
48 49
@@ -58,12 +59,11 @@ static unsigned int cached_irq_mask = 0xffff;
58#define cached_master_mask (cached_irq_mask) 59#define cached_master_mask (cached_irq_mask)
59#define cached_slave_mask (cached_irq_mask >> 8) 60#define cached_slave_mask (cached_irq_mask >> 8)
60 61
61static void disable_8259A_irq(unsigned int irq) 62static void disable_8259A_irq(struct irq_data *d)
62{ 63{
63 unsigned int mask; 64 unsigned int mask, irq = d->irq - I8259A_IRQ_BASE;
64 unsigned long flags; 65 unsigned long flags;
65 66
66 irq -= I8259A_IRQ_BASE;
67 mask = 1 << irq; 67 mask = 1 << irq;
68 raw_spin_lock_irqsave(&i8259A_lock, flags); 68 raw_spin_lock_irqsave(&i8259A_lock, flags);
69 cached_irq_mask |= mask; 69 cached_irq_mask |= mask;
@@ -74,12 +74,11 @@ static void disable_8259A_irq(unsigned int irq)
74 raw_spin_unlock_irqrestore(&i8259A_lock, flags); 74 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
75} 75}
76 76
77static void enable_8259A_irq(unsigned int irq) 77static void enable_8259A_irq(struct irq_data *d)
78{ 78{
79 unsigned int mask; 79 unsigned int mask, irq = d->irq - I8259A_IRQ_BASE;
80 unsigned long flags; 80 unsigned long flags;
81 81
82 irq -= I8259A_IRQ_BASE;
83 mask = ~(1 << irq); 82 mask = ~(1 << irq);
84 raw_spin_lock_irqsave(&i8259A_lock, flags); 83 raw_spin_lock_irqsave(&i8259A_lock, flags);
85 cached_irq_mask &= mask; 84 cached_irq_mask &= mask;
@@ -111,7 +110,7 @@ int i8259A_irq_pending(unsigned int irq)
111void make_8259A_irq(unsigned int irq) 110void make_8259A_irq(unsigned int irq)
112{ 111{
113 disable_irq_nosync(irq); 112 disable_irq_nosync(irq);
114 set_irq_chip_and_handler(irq, &i8259A_chip, handle_level_irq); 113 irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
115 enable_irq(irq); 114 enable_irq(irq);
116} 115}
117 116
@@ -144,12 +143,11 @@ static inline int i8259A_irq_real(unsigned int irq)
144 * first, _then_ send the EOI, and the order of EOI 143 * first, _then_ send the EOI, and the order of EOI
145 * to the two 8259s is important! 144 * to the two 8259s is important!
146 */ 145 */
147static void mask_and_ack_8259A(unsigned int irq) 146static void mask_and_ack_8259A(struct irq_data *d)
148{ 147{
149 unsigned int irqmask; 148 unsigned int irqmask, irq = d->irq - I8259A_IRQ_BASE;
150 unsigned long flags; 149 unsigned long flags;
151 150
152 irq -= I8259A_IRQ_BASE;
153 irqmask = 1 << irq; 151 irqmask = 1 << irq;
154 raw_spin_lock_irqsave(&i8259A_lock, flags); 152 raw_spin_lock_irqsave(&i8259A_lock, flags);
155 /* 153 /*
@@ -217,14 +215,13 @@ spurious_8259A_irq:
217 } 215 }
218} 216}
219 217
220static int i8259A_resume(struct sys_device *dev) 218static void i8259A_resume(void)
221{ 219{
222 if (i8259A_auto_eoi >= 0) 220 if (i8259A_auto_eoi >= 0)
223 init_8259A(i8259A_auto_eoi); 221 init_8259A(i8259A_auto_eoi);
224 return 0;
225} 222}
226 223
227static int i8259A_shutdown(struct sys_device *dev) 224static void i8259A_shutdown(void)
228{ 225{
229 /* Put the i8259A into a quiescent state that 226 /* Put the i8259A into a quiescent state that
230 * the kernel initialization code can get it 227 * the kernel initialization code can get it
@@ -234,26 +231,17 @@ static int i8259A_shutdown(struct sys_device *dev)
234 outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ 231 outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
235 outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-1 */ 232 outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-1 */
236 } 233 }
237 return 0;
238} 234}
239 235
240static struct sysdev_class i8259_sysdev_class = { 236static struct syscore_ops i8259_syscore_ops = {
241 .name = "i8259",
242 .resume = i8259A_resume, 237 .resume = i8259A_resume,
243 .shutdown = i8259A_shutdown, 238 .shutdown = i8259A_shutdown,
244}; 239};
245 240
246static struct sys_device device_i8259A = {
247 .id = 0,
248 .cls = &i8259_sysdev_class,
249};
250
251static int __init i8259A_init_sysfs(void) 241static int __init i8259A_init_sysfs(void)
252{ 242{
253 int error = sysdev_class_register(&i8259_sysdev_class); 243 register_syscore_ops(&i8259_syscore_ops);
254 if (!error) 244 return 0;
255 error = sysdev_register(&device_i8259A);
256 return error;
257} 245}
258 246
259device_initcall(i8259A_init_sysfs); 247device_initcall(i8259A_init_sysfs);
@@ -289,9 +277,9 @@ static void init_8259A(int auto_eoi)
289 * In AEOI mode we just have to mask the interrupt 277 * In AEOI mode we just have to mask the interrupt
290 * when acking. 278 * when acking.
291 */ 279 */
292 i8259A_chip.mask_ack = disable_8259A_irq; 280 i8259A_chip.irq_mask_ack = disable_8259A_irq;
293 else 281 else
294 i8259A_chip.mask_ack = mask_and_ack_8259A; 282 i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
295 283
296 udelay(100); /* wait for 8259A to initialize */ 284 udelay(100); /* wait for 8259A to initialize */
297 285
@@ -338,8 +326,8 @@ void __init init_i8259_irqs(void)
338 init_8259A(0); 326 init_8259A(0);
339 327
340 for (i = I8259A_IRQ_BASE; i < I8259A_IRQ_BASE + 16; i++) { 328 for (i = I8259A_IRQ_BASE; i < I8259A_IRQ_BASE + 16; i++) {
341 set_irq_chip_and_handler(i, &i8259A_chip, handle_level_irq); 329 irq_set_chip_and_handler(i, &i8259A_chip, handle_level_irq);
342 set_irq_probe(i); 330 irq_set_probe(i);
343 } 331 }
344 332
345 setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2); 333 setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2);
diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c
index 82ba9f62f49e..0c527f652196 100644
--- a/arch/mips/kernel/irq-gic.c
+++ b/arch/mips/kernel/irq-gic.c
@@ -3,11 +3,11 @@
3#include <linux/bitmap.h> 3#include <linux/bitmap.h>
4#include <linux/init.h> 4#include <linux/init.h>
5#include <linux/smp.h> 5#include <linux/smp.h>
6#include <linux/irq.h>
6 7
7#include <asm/io.h> 8#include <asm/io.h>
8#include <asm/gic.h> 9#include <asm/gic.h>
9#include <asm/gcmpregs.h> 10#include <asm/gcmpregs.h>
10#include <asm/irq.h>
11#include <linux/hardirq.h> 11#include <linux/hardirq.h>
12#include <asm-generic/bitops/find.h> 12#include <asm-generic/bitops/find.h>
13 13
@@ -87,17 +87,10 @@ unsigned int gic_get_int(void)
87 return i; 87 return i;
88} 88}
89 89
90static unsigned int gic_irq_startup(unsigned int irq) 90static void gic_irq_ack(struct irq_data *d)
91{ 91{
92 irq -= _irqbase; 92 unsigned int irq = d->irq - _irqbase;
93 pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
94 GIC_SET_INTR_MASK(irq);
95 return 0;
96}
97 93
98static void gic_irq_ack(unsigned int irq)
99{
100 irq -= _irqbase;
101 pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq); 94 pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
102 GIC_CLR_INTR_MASK(irq); 95 GIC_CLR_INTR_MASK(irq);
103 96
@@ -105,16 +98,16 @@ static void gic_irq_ack(unsigned int irq)
105 GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), irq); 98 GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), irq);
106} 99}
107 100
108static void gic_mask_irq(unsigned int irq) 101static void gic_mask_irq(struct irq_data *d)
109{ 102{
110 irq -= _irqbase; 103 unsigned int irq = d->irq - _irqbase;
111 pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq); 104 pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
112 GIC_CLR_INTR_MASK(irq); 105 GIC_CLR_INTR_MASK(irq);
113} 106}
114 107
115static void gic_unmask_irq(unsigned int irq) 108static void gic_unmask_irq(struct irq_data *d)
116{ 109{
117 irq -= _irqbase; 110 unsigned int irq = d->irq - _irqbase;
118 pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq); 111 pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
119 GIC_SET_INTR_MASK(irq); 112 GIC_SET_INTR_MASK(irq);
120} 113}
@@ -123,13 +116,14 @@ static void gic_unmask_irq(unsigned int irq)
123 116
124static DEFINE_SPINLOCK(gic_lock); 117static DEFINE_SPINLOCK(gic_lock);
125 118
126static int gic_set_affinity(unsigned int irq, const struct cpumask *cpumask) 119static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
120 bool force)
127{ 121{
122 unsigned int irq = d->irq - _irqbase;
128 cpumask_t tmp = CPU_MASK_NONE; 123 cpumask_t tmp = CPU_MASK_NONE;
129 unsigned long flags; 124 unsigned long flags;
130 int i; 125 int i;
131 126
132 irq -= _irqbase;
133 pr_debug("%s(%d) called\n", __func__, irq); 127 pr_debug("%s(%d) called\n", __func__, irq);
134 cpumask_and(&tmp, cpumask, cpu_online_mask); 128 cpumask_and(&tmp, cpumask, cpu_online_mask);
135 if (cpus_empty(tmp)) 129 if (cpus_empty(tmp))
@@ -147,23 +141,22 @@ static int gic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
147 set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask); 141 set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
148 142
149 } 143 }
150 cpumask_copy(irq_desc[irq].affinity, cpumask); 144 cpumask_copy(d->affinity, cpumask);
151 spin_unlock_irqrestore(&gic_lock, flags); 145 spin_unlock_irqrestore(&gic_lock, flags);
152 146
153 return 0; 147 return IRQ_SET_MASK_OK_NOCOPY;
154} 148}
155#endif 149#endif
156 150
157static struct irq_chip gic_irq_controller = { 151static struct irq_chip gic_irq_controller = {
158 .name = "MIPS GIC", 152 .name = "MIPS GIC",
159 .startup = gic_irq_startup, 153 .irq_ack = gic_irq_ack,
160 .ack = gic_irq_ack, 154 .irq_mask = gic_mask_irq,
161 .mask = gic_mask_irq, 155 .irq_mask_ack = gic_mask_irq,
162 .mask_ack = gic_mask_irq, 156 .irq_unmask = gic_unmask_irq,
163 .unmask = gic_unmask_irq, 157 .irq_eoi = gic_unmask_irq,
164 .eoi = gic_unmask_irq,
165#ifdef CONFIG_SMP 158#ifdef CONFIG_SMP
166 .set_affinity = gic_set_affinity, 159 .irq_set_affinity = gic_set_affinity,
167#endif 160#endif
168}; 161};
169 162
@@ -236,7 +229,7 @@ static void __init gic_basic_init(int numintrs, int numvpes,
236 vpe_local_setup(numvpes); 229 vpe_local_setup(numvpes);
237 230
238 for (i = _irqbase; i < (_irqbase + numintrs); i++) 231 for (i = _irqbase; i < (_irqbase + numintrs); i++)
239 set_irq_chip(i, &gic_irq_controller); 232 irq_set_chip(i, &gic_irq_controller);
240} 233}
241 234
242void __init gic_init(unsigned long gic_base_addr, 235void __init gic_init(unsigned long gic_base_addr,
diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c
index 42ef81461bfc..883fc6cead36 100644
--- a/arch/mips/kernel/irq-gt641xx.c
+++ b/arch/mips/kernel/irq-gt641xx.c
@@ -29,64 +29,64 @@
29 29
30static DEFINE_RAW_SPINLOCK(gt641xx_irq_lock); 30static DEFINE_RAW_SPINLOCK(gt641xx_irq_lock);
31 31
32static void ack_gt641xx_irq(unsigned int irq) 32static void ack_gt641xx_irq(struct irq_data *d)
33{ 33{
34 unsigned long flags; 34 unsigned long flags;
35 u32 cause; 35 u32 cause;
36 36
37 raw_spin_lock_irqsave(&gt641xx_irq_lock, flags); 37 raw_spin_lock_irqsave(&gt641xx_irq_lock, flags);
38 cause = GT_READ(GT_INTRCAUSE_OFS); 38 cause = GT_READ(GT_INTRCAUSE_OFS);
39 cause &= ~GT641XX_IRQ_TO_BIT(irq); 39 cause &= ~GT641XX_IRQ_TO_BIT(d->irq);
40 GT_WRITE(GT_INTRCAUSE_OFS, cause); 40 GT_WRITE(GT_INTRCAUSE_OFS, cause);
41 raw_spin_unlock_irqrestore(&gt641xx_irq_lock, flags); 41 raw_spin_unlock_irqrestore(&gt641xx_irq_lock, flags);
42} 42}
43 43
44static void mask_gt641xx_irq(unsigned int irq) 44static void mask_gt641xx_irq(struct irq_data *d)
45{ 45{
46 unsigned long flags; 46 unsigned long flags;
47 u32 mask; 47 u32 mask;
48 48
49 raw_spin_lock_irqsave(&gt641xx_irq_lock, flags); 49 raw_spin_lock_irqsave(&gt641xx_irq_lock, flags);
50 mask = GT_READ(GT_INTRMASK_OFS); 50 mask = GT_READ(GT_INTRMASK_OFS);
51 mask &= ~GT641XX_IRQ_TO_BIT(irq); 51 mask &= ~GT641XX_IRQ_TO_BIT(d->irq);
52 GT_WRITE(GT_INTRMASK_OFS, mask); 52 GT_WRITE(GT_INTRMASK_OFS, mask);
53 raw_spin_unlock_irqrestore(&gt641xx_irq_lock, flags); 53 raw_spin_unlock_irqrestore(&gt641xx_irq_lock, flags);
54} 54}
55 55
56static void mask_ack_gt641xx_irq(unsigned int irq) 56static void mask_ack_gt641xx_irq(struct irq_data *d)
57{ 57{
58 unsigned long flags; 58 unsigned long flags;
59 u32 cause, mask; 59 u32 cause, mask;
60 60
61 raw_spin_lock_irqsave(&gt641xx_irq_lock, flags); 61 raw_spin_lock_irqsave(&gt641xx_irq_lock, flags);
62 mask = GT_READ(GT_INTRMASK_OFS); 62 mask = GT_READ(GT_INTRMASK_OFS);
63 mask &= ~GT641XX_IRQ_TO_BIT(irq); 63 mask &= ~GT641XX_IRQ_TO_BIT(d->irq);
64 GT_WRITE(GT_INTRMASK_OFS, mask); 64 GT_WRITE(GT_INTRMASK_OFS, mask);
65 65
66 cause = GT_READ(GT_INTRCAUSE_OFS); 66 cause = GT_READ(GT_INTRCAUSE_OFS);
67 cause &= ~GT641XX_IRQ_TO_BIT(irq); 67 cause &= ~GT641XX_IRQ_TO_BIT(d->irq);
68 GT_WRITE(GT_INTRCAUSE_OFS, cause); 68 GT_WRITE(GT_INTRCAUSE_OFS, cause);
69 raw_spin_unlock_irqrestore(&gt641xx_irq_lock, flags); 69 raw_spin_unlock_irqrestore(&gt641xx_irq_lock, flags);
70} 70}
71 71
72static void unmask_gt641xx_irq(unsigned int irq) 72static void unmask_gt641xx_irq(struct irq_data *d)
73{ 73{
74 unsigned long flags; 74 unsigned long flags;
75 u32 mask; 75 u32 mask;
76 76
77 raw_spin_lock_irqsave(&gt641xx_irq_lock, flags); 77 raw_spin_lock_irqsave(&gt641xx_irq_lock, flags);
78 mask = GT_READ(GT_INTRMASK_OFS); 78 mask = GT_READ(GT_INTRMASK_OFS);
79 mask |= GT641XX_IRQ_TO_BIT(irq); 79 mask |= GT641XX_IRQ_TO_BIT(d->irq);
80 GT_WRITE(GT_INTRMASK_OFS, mask); 80 GT_WRITE(GT_INTRMASK_OFS, mask);
81 raw_spin_unlock_irqrestore(&gt641xx_irq_lock, flags); 81 raw_spin_unlock_irqrestore(&gt641xx_irq_lock, flags);
82} 82}
83 83
84static struct irq_chip gt641xx_irq_chip = { 84static struct irq_chip gt641xx_irq_chip = {
85 .name = "GT641xx", 85 .name = "GT641xx",
86 .ack = ack_gt641xx_irq, 86 .irq_ack = ack_gt641xx_irq,
87 .mask = mask_gt641xx_irq, 87 .irq_mask = mask_gt641xx_irq,
88 .mask_ack = mask_ack_gt641xx_irq, 88 .irq_mask_ack = mask_ack_gt641xx_irq,
89 .unmask = unmask_gt641xx_irq, 89 .irq_unmask = unmask_gt641xx_irq,
90}; 90};
91 91
92void gt641xx_irq_dispatch(void) 92void gt641xx_irq_dispatch(void)
@@ -126,6 +126,6 @@ void __init gt641xx_irq_init(void)
126 * bit31: logical or of bits[25:1]. 126 * bit31: logical or of bits[25:1].
127 */ 127 */
128 for (i = 1; i < 30; i++) 128 for (i = 1; i < 30; i++)
129 set_irq_chip_and_handler(GT641XX_IRQ_BASE + i, 129 irq_set_chip_and_handler(GT641XX_IRQ_BASE + i,
130 &gt641xx_irq_chip, handle_level_irq); 130 &gt641xx_irq_chip, handle_level_irq);
131} 131}
diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c
index 6a8cd28133d5..0c6afeed89d2 100644
--- a/arch/mips/kernel/irq-msc01.c
+++ b/arch/mips/kernel/irq-msc01.c
@@ -28,8 +28,10 @@ static unsigned long _icctrl_msc;
28static unsigned int irq_base; 28static unsigned int irq_base;
29 29
30/* mask off an interrupt */ 30/* mask off an interrupt */
31static inline void mask_msc_irq(unsigned int irq) 31static inline void mask_msc_irq(struct irq_data *d)
32{ 32{
33 unsigned int irq = d->irq;
34
33 if (irq < (irq_base + 32)) 35 if (irq < (irq_base + 32))
34 MSCIC_WRITE(MSC01_IC_DISL, 1<<(irq - irq_base)); 36 MSCIC_WRITE(MSC01_IC_DISL, 1<<(irq - irq_base));
35 else 37 else
@@ -37,8 +39,10 @@ static inline void mask_msc_irq(unsigned int irq)
37} 39}
38 40
39/* unmask an interrupt */ 41/* unmask an interrupt */
40static inline void unmask_msc_irq(unsigned int irq) 42static inline void unmask_msc_irq(struct irq_data *d)
41{ 43{
44 unsigned int irq = d->irq;
45
42 if (irq < (irq_base + 32)) 46 if (irq < (irq_base + 32))
43 MSCIC_WRITE(MSC01_IC_ENAL, 1<<(irq - irq_base)); 47 MSCIC_WRITE(MSC01_IC_ENAL, 1<<(irq - irq_base));
44 else 48 else
@@ -48,9 +52,11 @@ static inline void unmask_msc_irq(unsigned int irq)
48/* 52/*
49 * Masks and ACKs an IRQ 53 * Masks and ACKs an IRQ
50 */ 54 */
51static void level_mask_and_ack_msc_irq(unsigned int irq) 55static void level_mask_and_ack_msc_irq(struct irq_data *d)
52{ 56{
53 mask_msc_irq(irq); 57 unsigned int irq = d->irq;
58
59 mask_msc_irq(d);
54 if (!cpu_has_veic) 60 if (!cpu_has_veic)
55 MSCIC_WRITE(MSC01_IC_EOI, 0); 61 MSCIC_WRITE(MSC01_IC_EOI, 0);
56 /* This actually needs to be a call into platform code */ 62 /* This actually needs to be a call into platform code */
@@ -60,9 +66,11 @@ static void level_mask_and_ack_msc_irq(unsigned int irq)
60/* 66/*
61 * Masks and ACKs an IRQ 67 * Masks and ACKs an IRQ
62 */ 68 */
63static void edge_mask_and_ack_msc_irq(unsigned int irq) 69static void edge_mask_and_ack_msc_irq(struct irq_data *d)
64{ 70{
65 mask_msc_irq(irq); 71 unsigned int irq = d->irq;
72
73 mask_msc_irq(d);
66 if (!cpu_has_veic) 74 if (!cpu_has_veic)
67 MSCIC_WRITE(MSC01_IC_EOI, 0); 75 MSCIC_WRITE(MSC01_IC_EOI, 0);
68 else { 76 else {
@@ -75,15 +83,6 @@ static void edge_mask_and_ack_msc_irq(unsigned int irq)
75} 83}
76 84
77/* 85/*
78 * End IRQ processing
79 */
80static void end_msc_irq(unsigned int irq)
81{
82 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
83 unmask_msc_irq(irq);
84}
85
86/*
87 * Interrupt handler for interrupts coming from SOC-it. 86 * Interrupt handler for interrupts coming from SOC-it.
88 */ 87 */
89void ll_msc_irq(void) 88void ll_msc_irq(void)
@@ -107,22 +106,20 @@ static void msc_bind_eic_interrupt(int irq, int set)
107 106
108static struct irq_chip msc_levelirq_type = { 107static struct irq_chip msc_levelirq_type = {
109 .name = "SOC-it-Level", 108 .name = "SOC-it-Level",
110 .ack = level_mask_and_ack_msc_irq, 109 .irq_ack = level_mask_and_ack_msc_irq,
111 .mask = mask_msc_irq, 110 .irq_mask = mask_msc_irq,
112 .mask_ack = level_mask_and_ack_msc_irq, 111 .irq_mask_ack = level_mask_and_ack_msc_irq,
113 .unmask = unmask_msc_irq, 112 .irq_unmask = unmask_msc_irq,
114 .eoi = unmask_msc_irq, 113 .irq_eoi = unmask_msc_irq,
115 .end = end_msc_irq,
116}; 114};
117 115
118static struct irq_chip msc_edgeirq_type = { 116static struct irq_chip msc_edgeirq_type = {
119 .name = "SOC-it-Edge", 117 .name = "SOC-it-Edge",
120 .ack = edge_mask_and_ack_msc_irq, 118 .irq_ack = edge_mask_and_ack_msc_irq,
121 .mask = mask_msc_irq, 119 .irq_mask = mask_msc_irq,
122 .mask_ack = edge_mask_and_ack_msc_irq, 120 .irq_mask_ack = edge_mask_and_ack_msc_irq,
123 .unmask = unmask_msc_irq, 121 .irq_unmask = unmask_msc_irq,
124 .eoi = unmask_msc_irq, 122 .irq_eoi = unmask_msc_irq,
125 .end = end_msc_irq,
126}; 123};
127 124
128 125
@@ -140,16 +137,20 @@ void __init init_msc_irqs(unsigned long icubase, unsigned int irqbase, msc_irqma
140 137
141 switch (imp->im_type) { 138 switch (imp->im_type) {
142 case MSC01_IRQ_EDGE: 139 case MSC01_IRQ_EDGE:
143 set_irq_chip_and_handler_name(irqbase + n, 140 irq_set_chip_and_handler_name(irqbase + n,
144 &msc_edgeirq_type, handle_edge_irq, "edge"); 141 &msc_edgeirq_type,
142 handle_edge_irq,
143 "edge");
145 if (cpu_has_veic) 144 if (cpu_has_veic)
146 MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT); 145 MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT);
147 else 146 else
148 MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT | imp->im_lvl); 147 MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT | imp->im_lvl);
149 break; 148 break;
150 case MSC01_IRQ_LEVEL: 149 case MSC01_IRQ_LEVEL:
151 set_irq_chip_and_handler_name(irqbase+n, 150 irq_set_chip_and_handler_name(irqbase + n,
152 &msc_levelirq_type, handle_level_irq, "level"); 151 &msc_levelirq_type,
152 handle_level_irq,
153 "level");
153 if (cpu_has_veic) 154 if (cpu_has_veic)
154 MSCIC_WRITE(MSC01_IC_SUP+n*8, 0); 155 MSCIC_WRITE(MSC01_IC_SUP+n*8, 0);
155 else 156 else
diff --git a/arch/mips/kernel/irq-rm7000.c b/arch/mips/kernel/irq-rm7000.c
index fb50cc78b28b..a8a8977d5887 100644
--- a/arch/mips/kernel/irq-rm7000.c
+++ b/arch/mips/kernel/irq-rm7000.c
@@ -11,29 +11,30 @@
11 */ 11 */
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/interrupt.h> 13#include <linux/interrupt.h>
14#include <linux/irq.h>
14#include <linux/kernel.h> 15#include <linux/kernel.h>
15 16
16#include <asm/irq_cpu.h> 17#include <asm/irq_cpu.h>
17#include <asm/mipsregs.h> 18#include <asm/mipsregs.h>
18#include <asm/system.h> 19#include <asm/system.h>
19 20
20static inline void unmask_rm7k_irq(unsigned int irq) 21static inline void unmask_rm7k_irq(struct irq_data *d)
21{ 22{
22 set_c0_intcontrol(0x100 << (irq - RM7K_CPU_IRQ_BASE)); 23 set_c0_intcontrol(0x100 << (d->irq - RM7K_CPU_IRQ_BASE));
23} 24}
24 25
25static inline void mask_rm7k_irq(unsigned int irq) 26static inline void mask_rm7k_irq(struct irq_data *d)
26{ 27{
27 clear_c0_intcontrol(0x100 << (irq - RM7K_CPU_IRQ_BASE)); 28 clear_c0_intcontrol(0x100 << (d->irq - RM7K_CPU_IRQ_BASE));
28} 29}
29 30
30static struct irq_chip rm7k_irq_controller = { 31static struct irq_chip rm7k_irq_controller = {
31 .name = "RM7000", 32 .name = "RM7000",
32 .ack = mask_rm7k_irq, 33 .irq_ack = mask_rm7k_irq,
33 .mask = mask_rm7k_irq, 34 .irq_mask = mask_rm7k_irq,
34 .mask_ack = mask_rm7k_irq, 35 .irq_mask_ack = mask_rm7k_irq,
35 .unmask = unmask_rm7k_irq, 36 .irq_unmask = unmask_rm7k_irq,
36 .eoi = unmask_rm7k_irq 37 .irq_eoi = unmask_rm7k_irq
37}; 38};
38 39
39void __init rm7k_cpu_irq_init(void) 40void __init rm7k_cpu_irq_init(void)
@@ -44,6 +45,6 @@ void __init rm7k_cpu_irq_init(void)
44 clear_c0_intcontrol(0x00000f00); /* Mask all */ 45 clear_c0_intcontrol(0x00000f00); /* Mask all */
45 46
46 for (i = base; i < base + 4; i++) 47 for (i = base; i < base + 4; i++)
47 set_irq_chip_and_handler(i, &rm7k_irq_controller, 48 irq_set_chip_and_handler(i, &rm7k_irq_controller,
48 handle_percpu_irq); 49 handle_percpu_irq);
49} 50}
diff --git a/arch/mips/kernel/irq-rm9000.c b/arch/mips/kernel/irq-rm9000.c
index b47e4615ec12..38874a4b9255 100644
--- a/arch/mips/kernel/irq-rm9000.c
+++ b/arch/mips/kernel/irq-rm9000.c
@@ -11,6 +11,7 @@
11 */ 11 */
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/interrupt.h> 13#include <linux/interrupt.h>
14#include <linux/irq.h>
14#include <linux/kernel.h> 15#include <linux/kernel.h>
15#include <linux/module.h> 16#include <linux/module.h>
16 17
@@ -18,22 +19,22 @@
18#include <asm/mipsregs.h> 19#include <asm/mipsregs.h>
19#include <asm/system.h> 20#include <asm/system.h>
20 21
21static inline void unmask_rm9k_irq(unsigned int irq) 22static inline void unmask_rm9k_irq(struct irq_data *d)
22{ 23{
23 set_c0_intcontrol(0x1000 << (irq - RM9K_CPU_IRQ_BASE)); 24 set_c0_intcontrol(0x1000 << (d->irq - RM9K_CPU_IRQ_BASE));
24} 25}
25 26
26static inline void mask_rm9k_irq(unsigned int irq) 27static inline void mask_rm9k_irq(struct irq_data *d)
27{ 28{
28 clear_c0_intcontrol(0x1000 << (irq - RM9K_CPU_IRQ_BASE)); 29 clear_c0_intcontrol(0x1000 << (d->irq - RM9K_CPU_IRQ_BASE));
29} 30}
30 31
31static inline void rm9k_cpu_irq_enable(unsigned int irq) 32static inline void rm9k_cpu_irq_enable(struct irq_data *d)
32{ 33{
33 unsigned long flags; 34 unsigned long flags;
34 35
35 local_irq_save(flags); 36 local_irq_save(flags);
36 unmask_rm9k_irq(irq); 37 unmask_rm9k_irq(d);
37 local_irq_restore(flags); 38 local_irq_restore(flags);
38} 39}
39 40
@@ -42,50 +43,47 @@ static inline void rm9k_cpu_irq_enable(unsigned int irq)
42 */ 43 */
43static void local_rm9k_perfcounter_irq_startup(void *args) 44static void local_rm9k_perfcounter_irq_startup(void *args)
44{ 45{
45 unsigned int irq = (unsigned int) args; 46 rm9k_cpu_irq_enable(args);
46
47 rm9k_cpu_irq_enable(irq);
48} 47}
49 48
50static unsigned int rm9k_perfcounter_irq_startup(unsigned int irq) 49static unsigned int rm9k_perfcounter_irq_startup(struct irq_data *d)
51{ 50{
52 on_each_cpu(local_rm9k_perfcounter_irq_startup, (void *) irq, 1); 51 on_each_cpu(local_rm9k_perfcounter_irq_startup, d, 1);
53 52
54 return 0; 53 return 0;
55} 54}
56 55
57static void local_rm9k_perfcounter_irq_shutdown(void *args) 56static void local_rm9k_perfcounter_irq_shutdown(void *args)
58{ 57{
59 unsigned int irq = (unsigned int) args;
60 unsigned long flags; 58 unsigned long flags;
61 59
62 local_irq_save(flags); 60 local_irq_save(flags);
63 mask_rm9k_irq(irq); 61 mask_rm9k_irq(args);
64 local_irq_restore(flags); 62 local_irq_restore(flags);
65} 63}
66 64
67static void rm9k_perfcounter_irq_shutdown(unsigned int irq) 65static void rm9k_perfcounter_irq_shutdown(struct irq_data *d)
68{ 66{
69 on_each_cpu(local_rm9k_perfcounter_irq_shutdown, (void *) irq, 1); 67 on_each_cpu(local_rm9k_perfcounter_irq_shutdown, d, 1);
70} 68}
71 69
72static struct irq_chip rm9k_irq_controller = { 70static struct irq_chip rm9k_irq_controller = {
73 .name = "RM9000", 71 .name = "RM9000",
74 .ack = mask_rm9k_irq, 72 .irq_ack = mask_rm9k_irq,
75 .mask = mask_rm9k_irq, 73 .irq_mask = mask_rm9k_irq,
76 .mask_ack = mask_rm9k_irq, 74 .irq_mask_ack = mask_rm9k_irq,
77 .unmask = unmask_rm9k_irq, 75 .irq_unmask = unmask_rm9k_irq,
78 .eoi = unmask_rm9k_irq 76 .irq_eoi = unmask_rm9k_irq
79}; 77};
80 78
81static struct irq_chip rm9k_perfcounter_irq = { 79static struct irq_chip rm9k_perfcounter_irq = {
82 .name = "RM9000", 80 .name = "RM9000",
83 .startup = rm9k_perfcounter_irq_startup, 81 .irq_startup = rm9k_perfcounter_irq_startup,
84 .shutdown = rm9k_perfcounter_irq_shutdown, 82 .irq_shutdown = rm9k_perfcounter_irq_shutdown,
85 .ack = mask_rm9k_irq, 83 .irq_ack = mask_rm9k_irq,
86 .mask = mask_rm9k_irq, 84 .irq_mask = mask_rm9k_irq,
87 .mask_ack = mask_rm9k_irq, 85 .irq_mask_ack = mask_rm9k_irq,
88 .unmask = unmask_rm9k_irq, 86 .irq_unmask = unmask_rm9k_irq,
89}; 87};
90 88
91unsigned int rm9000_perfcount_irq; 89unsigned int rm9000_perfcount_irq;
@@ -100,10 +98,10 @@ void __init rm9k_cpu_irq_init(void)
100 clear_c0_intcontrol(0x0000f000); /* Mask all */ 98 clear_c0_intcontrol(0x0000f000); /* Mask all */
101 99
102 for (i = base; i < base + 4; i++) 100 for (i = base; i < base + 4; i++)
103 set_irq_chip_and_handler(i, &rm9k_irq_controller, 101 irq_set_chip_and_handler(i, &rm9k_irq_controller,
104 handle_level_irq); 102 handle_level_irq);
105 103
106 rm9000_perfcount_irq = base + 1; 104 rm9000_perfcount_irq = base + 1;
107 set_irq_chip_and_handler(rm9000_perfcount_irq, &rm9k_perfcounter_irq, 105 irq_set_chip_and_handler(rm9000_perfcount_irq, &rm9k_perfcounter_irq,
108 handle_percpu_irq); 106 handle_percpu_irq);
109} 107}
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index c6345f579a8a..9b734d74ae8e 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -81,48 +81,9 @@ void ack_bad_irq(unsigned int irq)
81 81
82atomic_t irq_err_count; 82atomic_t irq_err_count;
83 83
84/* 84int arch_show_interrupts(struct seq_file *p, int prec)
85 * Generic, controller-independent functions:
86 */
87
88int show_interrupts(struct seq_file *p, void *v)
89{ 85{
90 int i = *(loff_t *) v, j; 86 seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
91 struct irqaction * action;
92 unsigned long flags;
93
94 if (i == 0) {
95 seq_printf(p, " ");
96 for_each_online_cpu(j)
97 seq_printf(p, "CPU%d ", j);
98 seq_putc(p, '\n');
99 }
100
101 if (i < NR_IRQS) {
102 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
103 action = irq_desc[i].action;
104 if (!action)
105 goto skip;
106 seq_printf(p, "%3d: ", i);
107#ifndef CONFIG_SMP
108 seq_printf(p, "%10u ", kstat_irqs(i));
109#else
110 for_each_online_cpu(j)
111 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
112#endif
113 seq_printf(p, " %14s", irq_desc[i].chip->name);
114 seq_printf(p, " %s", action->name);
115
116 for (action=action->next; action; action = action->next)
117 seq_printf(p, ", %s", action->name);
118
119 seq_putc(p, '\n');
120skip:
121 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
122 } else if (i == NR_IRQS) {
123 seq_putc(p, '\n');
124 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
125 }
126 return 0; 87 return 0;
127} 88}
128 89
@@ -141,7 +102,7 @@ void __init init_IRQ(void)
141#endif 102#endif
142 103
143 for (i = 0; i < NR_IRQS; i++) 104 for (i = 0; i < NR_IRQS; i++)
144 set_irq_noprobe(i); 105 irq_set_noprobe(i);
145 106
146 arch_init_irq(); 107 arch_init_irq();
147 108
@@ -151,6 +112,29 @@ void __init init_IRQ(void)
151#endif 112#endif
152} 113}
153 114
115#ifdef DEBUG_STACKOVERFLOW
116static inline void check_stack_overflow(void)
117{
118 unsigned long sp;
119
120 __asm__ __volatile__("move %0, $sp" : "=r" (sp));
121 sp &= THREAD_MASK;
122
123 /*
124 * Check for stack overflow: is there less than STACK_WARN free?
125 * STACK_WARN is defined as 1/8 of THREAD_SIZE by default.
126 */
127 if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
128 printk("do_IRQ: stack overflow: %ld\n",
129 sp - sizeof(struct thread_info));
130 dump_stack();
131 }
132}
133#else
134static inline void check_stack_overflow(void) {}
135#endif
136
137
154/* 138/*
155 * do_IRQ handles all normal device IRQ's (the special 139 * do_IRQ handles all normal device IRQ's (the special
156 * SMP cross-CPU interrupts have their own specific 140 * SMP cross-CPU interrupts have their own specific
@@ -159,8 +143,9 @@ void __init init_IRQ(void)
159void __irq_entry do_IRQ(unsigned int irq) 143void __irq_entry do_IRQ(unsigned int irq)
160{ 144{
161 irq_enter(); 145 irq_enter();
162 __DO_IRQ_SMTC_HOOK(irq); 146 check_stack_overflow();
163 generic_handle_irq(irq); 147 if (!smtc_handle_on_other_cpu(irq))
148 generic_handle_irq(irq);
164 irq_exit(); 149 irq_exit();
165} 150}
166 151
@@ -173,7 +158,7 @@ void __irq_entry do_IRQ(unsigned int irq)
173void __irq_entry do_IRQ_no_affinity(unsigned int irq) 158void __irq_entry do_IRQ_no_affinity(unsigned int irq)
174{ 159{
175 irq_enter(); 160 irq_enter();
176 __NO_AFFINITY_IRQ_SMTC_HOOK(irq); 161 smtc_im_backstop(irq);
177 generic_handle_irq(irq); 162 generic_handle_irq(irq);
178 irq_exit(); 163 irq_exit();
179} 164}
diff --git a/arch/mips/kernel/irq_cpu.c b/arch/mips/kernel/irq_cpu.c
index 55c8a3ca507b..6e71b284f6c9 100644
--- a/arch/mips/kernel/irq_cpu.c
+++ b/arch/mips/kernel/irq_cpu.c
@@ -30,48 +30,45 @@
30#include <linux/init.h> 30#include <linux/init.h>
31#include <linux/interrupt.h> 31#include <linux/interrupt.h>
32#include <linux/kernel.h> 32#include <linux/kernel.h>
33#include <linux/irq.h>
33 34
34#include <asm/irq_cpu.h> 35#include <asm/irq_cpu.h>
35#include <asm/mipsregs.h> 36#include <asm/mipsregs.h>
36#include <asm/mipsmtregs.h> 37#include <asm/mipsmtregs.h>
37#include <asm/system.h> 38#include <asm/system.h>
38 39
39static inline void unmask_mips_irq(unsigned int irq) 40static inline void unmask_mips_irq(struct irq_data *d)
40{ 41{
41 set_c0_status(0x100 << (irq - MIPS_CPU_IRQ_BASE)); 42 set_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
42 irq_enable_hazard(); 43 irq_enable_hazard();
43} 44}
44 45
45static inline void mask_mips_irq(unsigned int irq) 46static inline void mask_mips_irq(struct irq_data *d)
46{ 47{
47 clear_c0_status(0x100 << (irq - MIPS_CPU_IRQ_BASE)); 48 clear_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
48 irq_disable_hazard(); 49 irq_disable_hazard();
49} 50}
50 51
51static struct irq_chip mips_cpu_irq_controller = { 52static struct irq_chip mips_cpu_irq_controller = {
52 .name = "MIPS", 53 .name = "MIPS",
53 .ack = mask_mips_irq, 54 .irq_ack = mask_mips_irq,
54 .mask = mask_mips_irq, 55 .irq_mask = mask_mips_irq,
55 .mask_ack = mask_mips_irq, 56 .irq_mask_ack = mask_mips_irq,
56 .unmask = unmask_mips_irq, 57 .irq_unmask = unmask_mips_irq,
57 .eoi = unmask_mips_irq, 58 .irq_eoi = unmask_mips_irq,
58}; 59};
59 60
60/* 61/*
61 * Basically the same as above but taking care of all the MT stuff 62 * Basically the same as above but taking care of all the MT stuff
62 */ 63 */
63 64
64#define unmask_mips_mt_irq unmask_mips_irq 65static unsigned int mips_mt_cpu_irq_startup(struct irq_data *d)
65#define mask_mips_mt_irq mask_mips_irq
66
67static unsigned int mips_mt_cpu_irq_startup(unsigned int irq)
68{ 66{
69 unsigned int vpflags = dvpe(); 67 unsigned int vpflags = dvpe();
70 68
71 clear_c0_cause(0x100 << (irq - MIPS_CPU_IRQ_BASE)); 69 clear_c0_cause(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
72 evpe(vpflags); 70 evpe(vpflags);
73 unmask_mips_mt_irq(irq); 71 unmask_mips_irq(d);
74
75 return 0; 72 return 0;
76} 73}
77 74
@@ -79,22 +76,22 @@ static unsigned int mips_mt_cpu_irq_startup(unsigned int irq)
79 * While we ack the interrupt interrupts are disabled and thus we don't need 76 * While we ack the interrupt interrupts are disabled and thus we don't need
80 * to deal with concurrency issues. Same for mips_cpu_irq_end. 77 * to deal with concurrency issues. Same for mips_cpu_irq_end.
81 */ 78 */
82static void mips_mt_cpu_irq_ack(unsigned int irq) 79static void mips_mt_cpu_irq_ack(struct irq_data *d)
83{ 80{
84 unsigned int vpflags = dvpe(); 81 unsigned int vpflags = dvpe();
85 clear_c0_cause(0x100 << (irq - MIPS_CPU_IRQ_BASE)); 82 clear_c0_cause(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
86 evpe(vpflags); 83 evpe(vpflags);
87 mask_mips_mt_irq(irq); 84 mask_mips_irq(d);
88} 85}
89 86
90static struct irq_chip mips_mt_cpu_irq_controller = { 87static struct irq_chip mips_mt_cpu_irq_controller = {
91 .name = "MIPS", 88 .name = "MIPS",
92 .startup = mips_mt_cpu_irq_startup, 89 .irq_startup = mips_mt_cpu_irq_startup,
93 .ack = mips_mt_cpu_irq_ack, 90 .irq_ack = mips_mt_cpu_irq_ack,
94 .mask = mask_mips_mt_irq, 91 .irq_mask = mask_mips_irq,
95 .mask_ack = mips_mt_cpu_irq_ack, 92 .irq_mask_ack = mips_mt_cpu_irq_ack,
96 .unmask = unmask_mips_mt_irq, 93 .irq_unmask = unmask_mips_irq,
97 .eoi = unmask_mips_mt_irq, 94 .irq_eoi = unmask_mips_irq,
98}; 95};
99 96
100void __init mips_cpu_irq_init(void) 97void __init mips_cpu_irq_init(void)
@@ -112,10 +109,10 @@ void __init mips_cpu_irq_init(void)
112 */ 109 */
113 if (cpu_has_mipsmt) 110 if (cpu_has_mipsmt)
114 for (i = irq_base; i < irq_base + 2; i++) 111 for (i = irq_base; i < irq_base + 2; i++)
115 set_irq_chip_and_handler(i, &mips_mt_cpu_irq_controller, 112 irq_set_chip_and_handler(i, &mips_mt_cpu_irq_controller,
116 handle_percpu_irq); 113 handle_percpu_irq);
117 114
118 for (i = irq_base + 2; i < irq_base + 8; i++) 115 for (i = irq_base + 2; i < irq_base + 8; i++)
119 set_irq_chip_and_handler(i, &mips_cpu_irq_controller, 116 irq_set_chip_and_handler(i, &mips_cpu_irq_controller,
120 handle_percpu_irq); 117 handle_percpu_irq);
121} 118}
diff --git a/arch/mips/kernel/irq_txx9.c b/arch/mips/kernel/irq_txx9.c
index 9b78029bea70..b0c55b50218e 100644
--- a/arch/mips/kernel/irq_txx9.c
+++ b/arch/mips/kernel/irq_txx9.c
@@ -16,6 +16,7 @@
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/interrupt.h> 17#include <linux/interrupt.h>
18#include <linux/types.h> 18#include <linux/types.h>
19#include <linux/irq.h>
19#include <asm/txx9irq.h> 20#include <asm/txx9irq.h>
20 21
21struct txx9_irc_reg { 22struct txx9_irc_reg {
@@ -62,9 +63,9 @@ static struct {
62 unsigned char mode; 63 unsigned char mode;
63} txx9irq[TXx9_MAX_IR] __read_mostly; 64} txx9irq[TXx9_MAX_IR] __read_mostly;
64 65
65static void txx9_irq_unmask(unsigned int irq) 66static void txx9_irq_unmask(struct irq_data *d)
66{ 67{
67 unsigned int irq_nr = irq - TXX9_IRQ_BASE; 68 unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
68 u32 __iomem *ilrp = &txx9_ircptr->ilr[(irq_nr % 16 ) / 2]; 69 u32 __iomem *ilrp = &txx9_ircptr->ilr[(irq_nr % 16 ) / 2];
69 int ofs = irq_nr / 16 * 16 + (irq_nr & 1) * 8; 70 int ofs = irq_nr / 16 * 16 + (irq_nr & 1) * 8;
70 71
@@ -78,9 +79,9 @@ static void txx9_irq_unmask(unsigned int irq)
78#endif 79#endif
79} 80}
80 81
81static inline void txx9_irq_mask(unsigned int irq) 82static inline void txx9_irq_mask(struct irq_data *d)
82{ 83{
83 unsigned int irq_nr = irq - TXX9_IRQ_BASE; 84 unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
84 u32 __iomem *ilrp = &txx9_ircptr->ilr[(irq_nr % 16) / 2]; 85 u32 __iomem *ilrp = &txx9_ircptr->ilr[(irq_nr % 16) / 2];
85 int ofs = irq_nr / 16 * 16 + (irq_nr & 1) * 8; 86 int ofs = irq_nr / 16 * 16 + (irq_nr & 1) * 8;
86 87
@@ -98,19 +99,19 @@ static inline void txx9_irq_mask(unsigned int irq)
98#endif 99#endif
99} 100}
100 101
101static void txx9_irq_mask_ack(unsigned int irq) 102static void txx9_irq_mask_ack(struct irq_data *d)
102{ 103{
103 unsigned int irq_nr = irq - TXX9_IRQ_BASE; 104 unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
104 105
105 txx9_irq_mask(irq); 106 txx9_irq_mask(d);
106 /* clear edge detection */ 107 /* clear edge detection */
107 if (unlikely(TXx9_IRCR_EDGE(txx9irq[irq_nr].mode))) 108 if (unlikely(TXx9_IRCR_EDGE(txx9irq[irq_nr].mode)))
108 __raw_writel(TXx9_IRSCR_EIClrE | irq_nr, &txx9_ircptr->scr); 109 __raw_writel(TXx9_IRSCR_EIClrE | irq_nr, &txx9_ircptr->scr);
109} 110}
110 111
111static int txx9_irq_set_type(unsigned int irq, unsigned int flow_type) 112static int txx9_irq_set_type(struct irq_data *d, unsigned int flow_type)
112{ 113{
113 unsigned int irq_nr = irq - TXX9_IRQ_BASE; 114 unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
114 u32 cr; 115 u32 cr;
115 u32 __iomem *crp; 116 u32 __iomem *crp;
116 int ofs; 117 int ofs;
@@ -138,11 +139,11 @@ static int txx9_irq_set_type(unsigned int irq, unsigned int flow_type)
138 139
139static struct irq_chip txx9_irq_chip = { 140static struct irq_chip txx9_irq_chip = {
140 .name = "TXX9", 141 .name = "TXX9",
141 .ack = txx9_irq_mask_ack, 142 .irq_ack = txx9_irq_mask_ack,
142 .mask = txx9_irq_mask, 143 .irq_mask = txx9_irq_mask,
143 .mask_ack = txx9_irq_mask_ack, 144 .irq_mask_ack = txx9_irq_mask_ack,
144 .unmask = txx9_irq_unmask, 145 .irq_unmask = txx9_irq_unmask,
145 .set_type = txx9_irq_set_type, 146 .irq_set_type = txx9_irq_set_type,
146}; 147};
147 148
148void __init txx9_irq_init(unsigned long baseaddr) 149void __init txx9_irq_init(unsigned long baseaddr)
@@ -153,8 +154,8 @@ void __init txx9_irq_init(unsigned long baseaddr)
153 for (i = 0; i < TXx9_MAX_IR; i++) { 154 for (i = 0; i < TXx9_MAX_IR; i++) {
154 txx9irq[i].level = 4; /* middle level */ 155 txx9irq[i].level = 4; /* middle level */
155 txx9irq[i].mode = TXx9_IRCR_LOW; 156 txx9irq[i].mode = TXx9_IRCR_LOW;
156 set_irq_chip_and_handler(TXX9_IRQ_BASE + i, 157 irq_set_chip_and_handler(TXX9_IRQ_BASE + i, &txx9_irq_chip,
157 &txx9_irq_chip, handle_level_irq); 158 handle_level_irq);
158 } 159 }
159 160
160 /* mask all IRC interrupts */ 161 /* mask all IRC interrupts */
diff --git a/arch/mips/kernel/jump_label.c b/arch/mips/kernel/jump_label.c
new file mode 100644
index 000000000000..6001610cfe55
--- /dev/null
+++ b/arch/mips/kernel/jump_label.c
@@ -0,0 +1,54 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 2010 Cavium Networks, Inc.
7 */
8
9#include <linux/jump_label.h>
10#include <linux/kernel.h>
11#include <linux/memory.h>
12#include <linux/mutex.h>
13#include <linux/types.h>
14#include <linux/cpu.h>
15
16#include <asm/cacheflush.h>
17#include <asm/inst.h>
18
19#ifdef HAVE_JUMP_LABEL
20
21#define J_RANGE_MASK ((1ul << 28) - 1)
22
23void arch_jump_label_transform(struct jump_entry *e,
24 enum jump_label_type type)
25{
26 union mips_instruction insn;
27 union mips_instruction *insn_p =
28 (union mips_instruction *)(unsigned long)e->code;
29
30 /* Jump only works within a 256MB aligned region. */
31 BUG_ON((e->target & ~J_RANGE_MASK) != (e->code & ~J_RANGE_MASK));
32
33 /* Target must have 4 byte alignment. */
34 BUG_ON((e->target & 3) != 0);
35
36 if (type == JUMP_LABEL_ENABLE) {
37 insn.j_format.opcode = j_op;
38 insn.j_format.target = (e->target & J_RANGE_MASK) >> 2;
39 } else {
40 insn.word = 0; /* nop */
41 }
42
43 get_online_cpus();
44 mutex_lock(&text_mutex);
45 *insn_p = insn;
46
47 flush_icache_range((unsigned long)insn_p,
48 (unsigned long)insn_p + sizeof(*insn_p));
49
50 mutex_unlock(&text_mutex);
51 put_online_cpus();
52}
53
54#endif /* HAVE_JUMP_LABEL */
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index 6343b4a5b835..876a75cc376f 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -251,14 +251,15 @@ SYSCALL_DEFINE5(n32_msgrcv, int, msqid, u32, msgp, size_t, msgsz,
251 251
252SYSCALL_DEFINE1(32_personality, unsigned long, personality) 252SYSCALL_DEFINE1(32_personality, unsigned long, personality)
253{ 253{
254 unsigned int p = personality & 0xffffffff;
254 int ret; 255 int ret;
255 personality &= 0xffffffff; 256
256 if (personality(current->personality) == PER_LINUX32 && 257 if (personality(current->personality) == PER_LINUX32 &&
257 personality == PER_LINUX) 258 personality(p) == PER_LINUX)
258 personality = PER_LINUX32; 259 p = (p & ~PER_MASK) | PER_LINUX32;
259 ret = sys_personality(personality); 260 ret = sys_personality(p);
260 if (ret == PER_LINUX32) 261 if (ret != -1 && personality(ret) == PER_LINUX32)
261 ret = PER_LINUX; 262 ret = (ret & ~PER_MASK) | PER_LINUX;
262 return ret; 263 return ret;
263} 264}
264 265
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
index 2340f11dc29c..802e6160f37e 100644
--- a/arch/mips/kernel/mips-mt-fpaff.c
+++ b/arch/mips/kernel/mips-mt-fpaff.c
@@ -103,7 +103,7 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
103 if (!check_same_owner(p) && !capable(CAP_SYS_NICE)) 103 if (!check_same_owner(p) && !capable(CAP_SYS_NICE))
104 goto out_unlock; 104 goto out_unlock;
105 105
106 retval = security_task_setscheduler(p, 0, NULL); 106 retval = security_task_setscheduler(p);
107 if (retval) 107 if (retval)
108 goto out_unlock; 108 goto out_unlock;
109 109
diff --git a/arch/mips/kernel/mips_machine.c b/arch/mips/kernel/mips_machine.c
new file mode 100644
index 000000000000..411a058d2c53
--- /dev/null
+++ b/arch/mips/kernel/mips_machine.c
@@ -0,0 +1,86 @@
1/*
2 * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published
6 * by the Free Software Foundation.
7 *
8 */
9#include <linux/mm.h>
10#include <linux/string.h>
11#include <linux/slab.h>
12
13#include <asm/mips_machine.h>
14
15static struct mips_machine *mips_machine __initdata;
16static char *mips_machine_name = "Unknown";
17
18#define for_each_machine(mach) \
19 for ((mach) = (struct mips_machine *)&__mips_machines_start; \
20 (mach) && \
21 (unsigned long)(mach) < (unsigned long)&__mips_machines_end; \
22 (mach)++)
23
24__init void mips_set_machine_name(const char *name)
25{
26 char *p;
27
28 if (name == NULL)
29 return;
30
31 p = kstrdup(name, GFP_KERNEL);
32 if (!p)
33 pr_err("MIPS: no memory for machine_name\n");
34
35 mips_machine_name = p;
36}
37
38char *mips_get_machine_name(void)
39{
40 return mips_machine_name;
41}
42
43__init int mips_machtype_setup(char *id)
44{
45 struct mips_machine *mach;
46
47 for_each_machine(mach) {
48 if (mach->mach_id == NULL)
49 continue;
50
51 if (strcmp(mach->mach_id, id) == 0) {
52 mips_machtype = mach->mach_type;
53 return 0;
54 }
55 }
56
57 pr_err("MIPS: no machine found for id '%s', supported machines:\n", id);
58 pr_err("%-24s %s\n", "id", "name");
59 for_each_machine(mach)
60 pr_err("%-24s %s\n", mach->mach_id, mach->mach_name);
61
62 return 1;
63}
64
65__setup("machtype=", mips_machtype_setup);
66
67__init void mips_machine_setup(void)
68{
69 struct mips_machine *mach;
70
71 for_each_machine(mach) {
72 if (mips_machtype == mach->mach_type) {
73 mips_machine = mach;
74 break;
75 }
76 }
77
78 if (!mips_machine)
79 return;
80
81 mips_set_machine_name(mips_machine->mach_name);
82 pr_info("MIPS: machine is %s\n", mips_machine_name);
83
84 if (mips_machine->mach_setup)
85 mips_machine->mach_setup();
86}
diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c
index 6f51dda87fce..dd940b701963 100644
--- a/arch/mips/kernel/module.c
+++ b/arch/mips/kernel/module.c
@@ -30,6 +30,8 @@
30#include <linux/kernel.h> 30#include <linux/kernel.h>
31#include <linux/module.h> 31#include <linux/module.h>
32#include <linux/spinlock.h> 32#include <linux/spinlock.h>
33#include <linux/jump_label.h>
34
33#include <asm/pgtable.h> /* MODULE_START */ 35#include <asm/pgtable.h> /* MODULE_START */
34 36
35struct mips_hi16 { 37struct mips_hi16 {
@@ -46,17 +48,9 @@ static DEFINE_SPINLOCK(dbe_lock);
46void *module_alloc(unsigned long size) 48void *module_alloc(unsigned long size)
47{ 49{
48#ifdef MODULE_START 50#ifdef MODULE_START
49 struct vm_struct *area; 51 return __vmalloc_node_range(size, 1, MODULE_START, MODULE_END,
50 52 GFP_KERNEL, PAGE_KERNEL, -1,
51 size = PAGE_ALIGN(size); 53 __builtin_return_address(0));
52 if (!size)
53 return NULL;
54
55 area = __get_vm_area(size, VM_ALLOC, MODULE_START, MODULE_END);
56 if (!area)
57 return NULL;
58
59 return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL);
60#else 54#else
61 if (size == 0) 55 if (size == 0)
62 return NULL; 56 return NULL;
@@ -390,6 +384,9 @@ int module_finalize(const Elf_Ehdr *hdr,
390 const Elf_Shdr *s; 384 const Elf_Shdr *s;
391 char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; 385 char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
392 386
387 /* Make jump label nops. */
388 jump_label_apply_nops(me);
389
393 INIT_LIST_HEAD(&me->arch.dbe_list); 390 INIT_LIST_HEAD(&me->arch.dbe_list);
394 for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { 391 for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
395 if (strcmp("__dbe_table", secstrings + s->sh_name) != 0) 392 if (strcmp("__dbe_table", secstrings + s->sh_name) != 0)
diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S
index dd18b26a358a..ce89c8061708 100644
--- a/arch/mips/kernel/octeon_switch.S
+++ b/arch/mips/kernel/octeon_switch.S
@@ -4,7 +4,7 @@
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 1994, 1995, 1996, 1998, 1999, 2002, 2003 Ralf Baechle 6 * Copyright (C) 1994, 1995, 1996, 1998, 1999, 2002, 2003 Ralf Baechle
7 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) 7 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
8 * Copyright (C) 1994, 1995, 1996, by Andreas Busse 8 * Copyright (C) 1994, 1995, 1996, by Andreas Busse
9 * Copyright (C) 1999 Silicon Graphics, Inc. 9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 * Copyright (C) 2000 MIPS Technologies, Inc. 10 * Copyright (C) 2000 MIPS Technologies, Inc.
diff --git a/arch/mips/kernel/perf_event.c b/arch/mips/kernel/perf_event.c
new file mode 100644
index 000000000000..a8244854d3dc
--- /dev/null
+++ b/arch/mips/kernel/perf_event.c
@@ -0,0 +1,588 @@
1/*
2 * Linux performance counter support for MIPS.
3 *
4 * Copyright (C) 2010 MIPS Technologies, Inc.
5 * Author: Deng-Cheng Zhu
6 *
7 * This code is based on the implementation for ARM, which is in turn
8 * based on the sparc64 perf event code and the x86 code. Performance
9 * counter access is based on the MIPS Oprofile code. And the callchain
10 * support references the code of MIPS stacktrace.c.
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 */
16
17#include <linux/cpumask.h>
18#include <linux/interrupt.h>
19#include <linux/smp.h>
20#include <linux/kernel.h>
21#include <linux/perf_event.h>
22#include <linux/uaccess.h>
23
24#include <asm/irq.h>
25#include <asm/irq_regs.h>
26#include <asm/stacktrace.h>
27#include <asm/time.h> /* For perf_irq */
28
29/* These are for 32bit counters. For 64bit ones, define them accordingly. */
30#define MAX_PERIOD ((1ULL << 32) - 1)
31#define VALID_COUNT 0x7fffffff
32#define TOTAL_BITS 32
33#define HIGHEST_BIT 31
34
35#define MIPS_MAX_HWEVENTS 4
36
37struct cpu_hw_events {
38 /* Array of events on this cpu. */
39 struct perf_event *events[MIPS_MAX_HWEVENTS];
40
41 /*
42 * Set the bit (indexed by the counter number) when the counter
43 * is used for an event.
44 */
45 unsigned long used_mask[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)];
46
47 /*
48 * The borrowed MSB for the performance counter. A MIPS performance
49 * counter uses its bit 31 (for 32bit counters) or bit 63 (for 64bit
50 * counters) as a factor of determining whether a counter overflow
51 * should be signaled. So here we use a separate MSB for each
52 * counter to make things easy.
53 */
54 unsigned long msbs[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)];
55
56 /*
57 * Software copy of the control register for each performance counter.
58 * MIPS CPUs vary in performance counters. They use this differently,
59 * and even may not use it.
60 */
61 unsigned int saved_ctrl[MIPS_MAX_HWEVENTS];
62};
63DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
64 .saved_ctrl = {0},
65};
66
67/* The description of MIPS performance events. */
68struct mips_perf_event {
69 unsigned int event_id;
70 /*
71 * MIPS performance counters are indexed starting from 0.
72 * CNTR_EVEN indicates the indexes of the counters to be used are
73 * even numbers.
74 */
75 unsigned int cntr_mask;
76 #define CNTR_EVEN 0x55555555
77 #define CNTR_ODD 0xaaaaaaaa
78#ifdef CONFIG_MIPS_MT_SMP
79 enum {
80 T = 0,
81 V = 1,
82 P = 2,
83 } range;
84#else
85 #define T
86 #define V
87 #define P
88#endif
89};
90
91static struct mips_perf_event raw_event;
92static DEFINE_MUTEX(raw_event_mutex);
93
94#define UNSUPPORTED_PERF_EVENT_ID 0xffffffff
95#define C(x) PERF_COUNT_HW_CACHE_##x
96
97struct mips_pmu {
98 const char *name;
99 int irq;
100 irqreturn_t (*handle_irq)(int irq, void *dev);
101 int (*handle_shared_irq)(void);
102 void (*start)(void);
103 void (*stop)(void);
104 int (*alloc_counter)(struct cpu_hw_events *cpuc,
105 struct hw_perf_event *hwc);
106 u64 (*read_counter)(unsigned int idx);
107 void (*write_counter)(unsigned int idx, u64 val);
108 void (*enable_event)(struct hw_perf_event *evt, int idx);
109 void (*disable_event)(int idx);
110 const struct mips_perf_event *(*map_raw_event)(u64 config);
111 const struct mips_perf_event (*general_event_map)[PERF_COUNT_HW_MAX];
112 const struct mips_perf_event (*cache_event_map)
113 [PERF_COUNT_HW_CACHE_MAX]
114 [PERF_COUNT_HW_CACHE_OP_MAX]
115 [PERF_COUNT_HW_CACHE_RESULT_MAX];
116 unsigned int num_counters;
117};
118
119static const struct mips_pmu *mipspmu;
120
121static int
122mipspmu_event_set_period(struct perf_event *event,
123 struct hw_perf_event *hwc,
124 int idx)
125{
126 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
127 s64 left = local64_read(&hwc->period_left);
128 s64 period = hwc->sample_period;
129 int ret = 0;
130 u64 uleft;
131 unsigned long flags;
132
133 if (unlikely(left <= -period)) {
134 left = period;
135 local64_set(&hwc->period_left, left);
136 hwc->last_period = period;
137 ret = 1;
138 }
139
140 if (unlikely(left <= 0)) {
141 left += period;
142 local64_set(&hwc->period_left, left);
143 hwc->last_period = period;
144 ret = 1;
145 }
146
147 if (left > (s64)MAX_PERIOD)
148 left = MAX_PERIOD;
149
150 local64_set(&hwc->prev_count, (u64)-left);
151
152 local_irq_save(flags);
153 uleft = (u64)(-left) & MAX_PERIOD;
154 uleft > VALID_COUNT ?
155 set_bit(idx, cpuc->msbs) : clear_bit(idx, cpuc->msbs);
156 mipspmu->write_counter(idx, (u64)(-left) & VALID_COUNT);
157 local_irq_restore(flags);
158
159 perf_event_update_userpage(event);
160
161 return ret;
162}
163
164static void mipspmu_event_update(struct perf_event *event,
165 struct hw_perf_event *hwc,
166 int idx)
167{
168 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
169 unsigned long flags;
170 int shift = 64 - TOTAL_BITS;
171 s64 prev_raw_count, new_raw_count;
172 u64 delta;
173
174again:
175 prev_raw_count = local64_read(&hwc->prev_count);
176 local_irq_save(flags);
177 /* Make the counter value be a "real" one. */
178 new_raw_count = mipspmu->read_counter(idx);
179 if (new_raw_count & (test_bit(idx, cpuc->msbs) << HIGHEST_BIT)) {
180 new_raw_count &= VALID_COUNT;
181 clear_bit(idx, cpuc->msbs);
182 } else
183 new_raw_count |= (test_bit(idx, cpuc->msbs) << HIGHEST_BIT);
184 local_irq_restore(flags);
185
186 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
187 new_raw_count) != prev_raw_count)
188 goto again;
189
190 delta = (new_raw_count << shift) - (prev_raw_count << shift);
191 delta >>= shift;
192
193 local64_add(delta, &event->count);
194 local64_sub(delta, &hwc->period_left);
195
196 return;
197}
198
199static void mipspmu_start(struct perf_event *event, int flags)
200{
201 struct hw_perf_event *hwc = &event->hw;
202
203 if (!mipspmu)
204 return;
205
206 if (flags & PERF_EF_RELOAD)
207 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
208
209 hwc->state = 0;
210
211 /* Set the period for the event. */
212 mipspmu_event_set_period(event, hwc, hwc->idx);
213
214 /* Enable the event. */
215 mipspmu->enable_event(hwc, hwc->idx);
216}
217
218static void mipspmu_stop(struct perf_event *event, int flags)
219{
220 struct hw_perf_event *hwc = &event->hw;
221
222 if (!mipspmu)
223 return;
224
225 if (!(hwc->state & PERF_HES_STOPPED)) {
226 /* We are working on a local event. */
227 mipspmu->disable_event(hwc->idx);
228 barrier();
229 mipspmu_event_update(event, hwc, hwc->idx);
230 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
231 }
232}
233
234static int mipspmu_add(struct perf_event *event, int flags)
235{
236 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
237 struct hw_perf_event *hwc = &event->hw;
238 int idx;
239 int err = 0;
240
241 perf_pmu_disable(event->pmu);
242
243 /* To look for a free counter for this event. */
244 idx = mipspmu->alloc_counter(cpuc, hwc);
245 if (idx < 0) {
246 err = idx;
247 goto out;
248 }
249
250 /*
251 * If there is an event in the counter we are going to use then
252 * make sure it is disabled.
253 */
254 event->hw.idx = idx;
255 mipspmu->disable_event(idx);
256 cpuc->events[idx] = event;
257
258 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
259 if (flags & PERF_EF_START)
260 mipspmu_start(event, PERF_EF_RELOAD);
261
262 /* Propagate our changes to the userspace mapping. */
263 perf_event_update_userpage(event);
264
265out:
266 perf_pmu_enable(event->pmu);
267 return err;
268}
269
270static void mipspmu_del(struct perf_event *event, int flags)
271{
272 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
273 struct hw_perf_event *hwc = &event->hw;
274 int idx = hwc->idx;
275
276 WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
277
278 mipspmu_stop(event, PERF_EF_UPDATE);
279 cpuc->events[idx] = NULL;
280 clear_bit(idx, cpuc->used_mask);
281
282 perf_event_update_userpage(event);
283}
284
285static void mipspmu_read(struct perf_event *event)
286{
287 struct hw_perf_event *hwc = &event->hw;
288
289 /* Don't read disabled counters! */
290 if (hwc->idx < 0)
291 return;
292
293 mipspmu_event_update(event, hwc, hwc->idx);
294}
295
296static void mipspmu_enable(struct pmu *pmu)
297{
298 if (mipspmu)
299 mipspmu->start();
300}
301
302static void mipspmu_disable(struct pmu *pmu)
303{
304 if (mipspmu)
305 mipspmu->stop();
306}
307
308static atomic_t active_events = ATOMIC_INIT(0);
309static DEFINE_MUTEX(pmu_reserve_mutex);
310static int (*save_perf_irq)(void);
311
312static int mipspmu_get_irq(void)
313{
314 int err;
315
316 if (mipspmu->irq >= 0) {
317 /* Request my own irq handler. */
318 err = request_irq(mipspmu->irq, mipspmu->handle_irq,
319 IRQF_DISABLED | IRQF_NOBALANCING,
320 "mips_perf_pmu", NULL);
321 if (err) {
322 pr_warning("Unable to request IRQ%d for MIPS "
323 "performance counters!\n", mipspmu->irq);
324 }
325 } else if (cp0_perfcount_irq < 0) {
326 /*
327 * We are sharing the irq number with the timer interrupt.
328 */
329 save_perf_irq = perf_irq;
330 perf_irq = mipspmu->handle_shared_irq;
331 err = 0;
332 } else {
333 pr_warning("The platform hasn't properly defined its "
334 "interrupt controller.\n");
335 err = -ENOENT;
336 }
337
338 return err;
339}
340
341static void mipspmu_free_irq(void)
342{
343 if (mipspmu->irq >= 0)
344 free_irq(mipspmu->irq, NULL);
345 else if (cp0_perfcount_irq < 0)
346 perf_irq = save_perf_irq;
347}
348
349/*
350 * mipsxx/rm9000/loongson2 have different performance counters, they have
351 * specific low-level init routines.
352 */
353static void reset_counters(void *arg);
354static int __hw_perf_event_init(struct perf_event *event);
355
356static void hw_perf_event_destroy(struct perf_event *event)
357{
358 if (atomic_dec_and_mutex_lock(&active_events,
359 &pmu_reserve_mutex)) {
360 /*
361 * We must not call the destroy function with interrupts
362 * disabled.
363 */
364 on_each_cpu(reset_counters,
365 (void *)(long)mipspmu->num_counters, 1);
366 mipspmu_free_irq();
367 mutex_unlock(&pmu_reserve_mutex);
368 }
369}
370
371static int mipspmu_event_init(struct perf_event *event)
372{
373 int err = 0;
374
375 switch (event->attr.type) {
376 case PERF_TYPE_RAW:
377 case PERF_TYPE_HARDWARE:
378 case PERF_TYPE_HW_CACHE:
379 break;
380
381 default:
382 return -ENOENT;
383 }
384
385 if (!mipspmu || event->cpu >= nr_cpumask_bits ||
386 (event->cpu >= 0 && !cpu_online(event->cpu)))
387 return -ENODEV;
388
389 if (!atomic_inc_not_zero(&active_events)) {
390 if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) {
391 atomic_dec(&active_events);
392 return -ENOSPC;
393 }
394
395 mutex_lock(&pmu_reserve_mutex);
396 if (atomic_read(&active_events) == 0)
397 err = mipspmu_get_irq();
398
399 if (!err)
400 atomic_inc(&active_events);
401 mutex_unlock(&pmu_reserve_mutex);
402 }
403
404 if (err)
405 return err;
406
407 err = __hw_perf_event_init(event);
408 if (err)
409 hw_perf_event_destroy(event);
410
411 return err;
412}
413
414static struct pmu pmu = {
415 .pmu_enable = mipspmu_enable,
416 .pmu_disable = mipspmu_disable,
417 .event_init = mipspmu_event_init,
418 .add = mipspmu_add,
419 .del = mipspmu_del,
420 .start = mipspmu_start,
421 .stop = mipspmu_stop,
422 .read = mipspmu_read,
423};
424
425static inline unsigned int
426mipspmu_perf_event_encode(const struct mips_perf_event *pev)
427{
428/*
429 * Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for
430 * event_id.
431 */
432#ifdef CONFIG_MIPS_MT_SMP
433 return ((unsigned int)pev->range << 24) |
434 (pev->cntr_mask & 0xffff00) |
435 (pev->event_id & 0xff);
436#else
437 return (pev->cntr_mask & 0xffff00) |
438 (pev->event_id & 0xff);
439#endif
440}
441
442static const struct mips_perf_event *
443mipspmu_map_general_event(int idx)
444{
445 const struct mips_perf_event *pev;
446
447 pev = ((*mipspmu->general_event_map)[idx].event_id ==
448 UNSUPPORTED_PERF_EVENT_ID ? ERR_PTR(-EOPNOTSUPP) :
449 &(*mipspmu->general_event_map)[idx]);
450
451 return pev;
452}
453
454static const struct mips_perf_event *
455mipspmu_map_cache_event(u64 config)
456{
457 unsigned int cache_type, cache_op, cache_result;
458 const struct mips_perf_event *pev;
459
460 cache_type = (config >> 0) & 0xff;
461 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
462 return ERR_PTR(-EINVAL);
463
464 cache_op = (config >> 8) & 0xff;
465 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
466 return ERR_PTR(-EINVAL);
467
468 cache_result = (config >> 16) & 0xff;
469 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
470 return ERR_PTR(-EINVAL);
471
472 pev = &((*mipspmu->cache_event_map)
473 [cache_type]
474 [cache_op]
475 [cache_result]);
476
477 if (pev->event_id == UNSUPPORTED_PERF_EVENT_ID)
478 return ERR_PTR(-EOPNOTSUPP);
479
480 return pev;
481
482}
483
484static int validate_event(struct cpu_hw_events *cpuc,
485 struct perf_event *event)
486{
487 struct hw_perf_event fake_hwc = event->hw;
488
489 /* Allow mixed event group. So return 1 to pass validation. */
490 if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF)
491 return 1;
492
493 return mipspmu->alloc_counter(cpuc, &fake_hwc) >= 0;
494}
495
496static int validate_group(struct perf_event *event)
497{
498 struct perf_event *sibling, *leader = event->group_leader;
499 struct cpu_hw_events fake_cpuc;
500
501 memset(&fake_cpuc, 0, sizeof(fake_cpuc));
502
503 if (!validate_event(&fake_cpuc, leader))
504 return -ENOSPC;
505
506 list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
507 if (!validate_event(&fake_cpuc, sibling))
508 return -ENOSPC;
509 }
510
511 if (!validate_event(&fake_cpuc, event))
512 return -ENOSPC;
513
514 return 0;
515}
516
517/* This is needed by specific irq handlers in perf_event_*.c */
518static void
519handle_associated_event(struct cpu_hw_events *cpuc,
520 int idx, struct perf_sample_data *data, struct pt_regs *regs)
521{
522 struct perf_event *event = cpuc->events[idx];
523 struct hw_perf_event *hwc = &event->hw;
524
525 mipspmu_event_update(event, hwc, idx);
526 data->period = event->hw.last_period;
527 if (!mipspmu_event_set_period(event, hwc, idx))
528 return;
529
530 if (perf_event_overflow(event, 0, data, regs))
531 mipspmu->disable_event(idx);
532}
533
534#include "perf_event_mipsxx.c"
535
536/* Callchain handling code. */
537
538/*
539 * Leave userspace callchain empty for now. When we find a way to trace
540 * the user stack callchains, we add here.
541 */
542void perf_callchain_user(struct perf_callchain_entry *entry,
543 struct pt_regs *regs)
544{
545}
546
547static void save_raw_perf_callchain(struct perf_callchain_entry *entry,
548 unsigned long reg29)
549{
550 unsigned long *sp = (unsigned long *)reg29;
551 unsigned long addr;
552
553 while (!kstack_end(sp)) {
554 addr = *sp++;
555 if (__kernel_text_address(addr)) {
556 perf_callchain_store(entry, addr);
557 if (entry->nr >= PERF_MAX_STACK_DEPTH)
558 break;
559 }
560 }
561}
562
563void perf_callchain_kernel(struct perf_callchain_entry *entry,
564 struct pt_regs *regs)
565{
566 unsigned long sp = regs->regs[29];
567#ifdef CONFIG_KALLSYMS
568 unsigned long ra = regs->regs[31];
569 unsigned long pc = regs->cp0_epc;
570
571 if (raw_show_trace || !__kernel_text_address(pc)) {
572 unsigned long stack_page =
573 (unsigned long)task_stack_page(current);
574 if (stack_page && sp >= stack_page &&
575 sp <= stack_page + THREAD_SIZE - 32)
576 save_raw_perf_callchain(entry, sp);
577 return;
578 }
579 do {
580 perf_callchain_store(entry, pc);
581 if (entry->nr >= PERF_MAX_STACK_DEPTH)
582 break;
583 pc = unwind_stack(current, &sp, pc, &ra);
584 } while (pc);
585#else
586 save_raw_perf_callchain(entry, sp);
587#endif
588}
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
new file mode 100644
index 000000000000..75266ff4cc33
--- /dev/null
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -0,0 +1,1054 @@
1#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) || \
2 defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_SB1)
3
4#define M_CONFIG1_PC (1 << 4)
5
6#define M_PERFCTL_EXL (1UL << 0)
7#define M_PERFCTL_KERNEL (1UL << 1)
8#define M_PERFCTL_SUPERVISOR (1UL << 2)
9#define M_PERFCTL_USER (1UL << 3)
10#define M_PERFCTL_INTERRUPT_ENABLE (1UL << 4)
11#define M_PERFCTL_EVENT(event) (((event) & 0x3ff) << 5)
12#define M_PERFCTL_VPEID(vpe) ((vpe) << 16)
13#define M_PERFCTL_MT_EN(filter) ((filter) << 20)
14#define M_TC_EN_ALL M_PERFCTL_MT_EN(0)
15#define M_TC_EN_VPE M_PERFCTL_MT_EN(1)
16#define M_TC_EN_TC M_PERFCTL_MT_EN(2)
17#define M_PERFCTL_TCID(tcid) ((tcid) << 22)
18#define M_PERFCTL_WIDE (1UL << 30)
19#define M_PERFCTL_MORE (1UL << 31)
20
21#define M_PERFCTL_COUNT_EVENT_WHENEVER (M_PERFCTL_EXL | \
22 M_PERFCTL_KERNEL | \
23 M_PERFCTL_USER | \
24 M_PERFCTL_SUPERVISOR | \
25 M_PERFCTL_INTERRUPT_ENABLE)
26
27#ifdef CONFIG_MIPS_MT_SMP
28#define M_PERFCTL_CONFIG_MASK 0x3fff801f
29#else
30#define M_PERFCTL_CONFIG_MASK 0x1f
31#endif
32#define M_PERFCTL_EVENT_MASK 0xfe0
33
34#define M_COUNTER_OVERFLOW (1UL << 31)
35
36#ifdef CONFIG_MIPS_MT_SMP
37static int cpu_has_mipsmt_pertccounters;
38
39/*
40 * FIXME: For VSMP, vpe_id() is redefined for Perf-events, because
41 * cpu_data[cpuid].vpe_id reports 0 for _both_ CPUs.
42 */
43#if defined(CONFIG_HW_PERF_EVENTS)
44#define vpe_id() (cpu_has_mipsmt_pertccounters ? \
45 0 : smp_processor_id())
46#else
47#define vpe_id() (cpu_has_mipsmt_pertccounters ? \
48 0 : cpu_data[smp_processor_id()].vpe_id)
49#endif
50
51/* Copied from op_model_mipsxx.c */
52static inline unsigned int vpe_shift(void)
53{
54 if (num_possible_cpus() > 1)
55 return 1;
56
57 return 0;
58}
59#else /* !CONFIG_MIPS_MT_SMP */
60#define vpe_id() 0
61
62static inline unsigned int vpe_shift(void)
63{
64 return 0;
65}
66#endif /* CONFIG_MIPS_MT_SMP */
67
68static inline unsigned int
69counters_total_to_per_cpu(unsigned int counters)
70{
71 return counters >> vpe_shift();
72}
73
74static inline unsigned int
75counters_per_cpu_to_total(unsigned int counters)
76{
77 return counters << vpe_shift();
78}
79
80#define __define_perf_accessors(r, n, np) \
81 \
82static inline unsigned int r_c0_ ## r ## n(void) \
83{ \
84 unsigned int cpu = vpe_id(); \
85 \
86 switch (cpu) { \
87 case 0: \
88 return read_c0_ ## r ## n(); \
89 case 1: \
90 return read_c0_ ## r ## np(); \
91 default: \
92 BUG(); \
93 } \
94 return 0; \
95} \
96 \
97static inline void w_c0_ ## r ## n(unsigned int value) \
98{ \
99 unsigned int cpu = vpe_id(); \
100 \
101 switch (cpu) { \
102 case 0: \
103 write_c0_ ## r ## n(value); \
104 return; \
105 case 1: \
106 write_c0_ ## r ## np(value); \
107 return; \
108 default: \
109 BUG(); \
110 } \
111 return; \
112} \
113
114__define_perf_accessors(perfcntr, 0, 2)
115__define_perf_accessors(perfcntr, 1, 3)
116__define_perf_accessors(perfcntr, 2, 0)
117__define_perf_accessors(perfcntr, 3, 1)
118
119__define_perf_accessors(perfctrl, 0, 2)
120__define_perf_accessors(perfctrl, 1, 3)
121__define_perf_accessors(perfctrl, 2, 0)
122__define_perf_accessors(perfctrl, 3, 1)
123
124static inline int __n_counters(void)
125{
126 if (!(read_c0_config1() & M_CONFIG1_PC))
127 return 0;
128 if (!(read_c0_perfctrl0() & M_PERFCTL_MORE))
129 return 1;
130 if (!(read_c0_perfctrl1() & M_PERFCTL_MORE))
131 return 2;
132 if (!(read_c0_perfctrl2() & M_PERFCTL_MORE))
133 return 3;
134
135 return 4;
136}
137
138static inline int n_counters(void)
139{
140 int counters;
141
142 switch (current_cpu_type()) {
143 case CPU_R10000:
144 counters = 2;
145 break;
146
147 case CPU_R12000:
148 case CPU_R14000:
149 counters = 4;
150 break;
151
152 default:
153 counters = __n_counters();
154 }
155
156 return counters;
157}
158
159static void reset_counters(void *arg)
160{
161 int counters = (int)(long)arg;
162 switch (counters) {
163 case 4:
164 w_c0_perfctrl3(0);
165 w_c0_perfcntr3(0);
166 case 3:
167 w_c0_perfctrl2(0);
168 w_c0_perfcntr2(0);
169 case 2:
170 w_c0_perfctrl1(0);
171 w_c0_perfcntr1(0);
172 case 1:
173 w_c0_perfctrl0(0);
174 w_c0_perfcntr0(0);
175 }
176}
177
178static inline u64
179mipsxx_pmu_read_counter(unsigned int idx)
180{
181 switch (idx) {
182 case 0:
183 return r_c0_perfcntr0();
184 case 1:
185 return r_c0_perfcntr1();
186 case 2:
187 return r_c0_perfcntr2();
188 case 3:
189 return r_c0_perfcntr3();
190 default:
191 WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
192 return 0;
193 }
194}
195
196static inline void
197mipsxx_pmu_write_counter(unsigned int idx, u64 val)
198{
199 switch (idx) {
200 case 0:
201 w_c0_perfcntr0(val);
202 return;
203 case 1:
204 w_c0_perfcntr1(val);
205 return;
206 case 2:
207 w_c0_perfcntr2(val);
208 return;
209 case 3:
210 w_c0_perfcntr3(val);
211 return;
212 }
213}
214
215static inline unsigned int
216mipsxx_pmu_read_control(unsigned int idx)
217{
218 switch (idx) {
219 case 0:
220 return r_c0_perfctrl0();
221 case 1:
222 return r_c0_perfctrl1();
223 case 2:
224 return r_c0_perfctrl2();
225 case 3:
226 return r_c0_perfctrl3();
227 default:
228 WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
229 return 0;
230 }
231}
232
233static inline void
234mipsxx_pmu_write_control(unsigned int idx, unsigned int val)
235{
236 switch (idx) {
237 case 0:
238 w_c0_perfctrl0(val);
239 return;
240 case 1:
241 w_c0_perfctrl1(val);
242 return;
243 case 2:
244 w_c0_perfctrl2(val);
245 return;
246 case 3:
247 w_c0_perfctrl3(val);
248 return;
249 }
250}
251
252#ifdef CONFIG_MIPS_MT_SMP
253static DEFINE_RWLOCK(pmuint_rwlock);
254#endif
255
256/* 24K/34K/1004K cores can share the same event map. */
257static const struct mips_perf_event mipsxxcore_event_map
258 [PERF_COUNT_HW_MAX] = {
259 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
260 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
261 [PERF_COUNT_HW_CACHE_REFERENCES] = { UNSUPPORTED_PERF_EVENT_ID },
262 [PERF_COUNT_HW_CACHE_MISSES] = { UNSUPPORTED_PERF_EVENT_ID },
263 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_EVEN, T },
264 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
265 [PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID },
266};
267
268/* 74K core has different branch event code. */
269static const struct mips_perf_event mipsxx74Kcore_event_map
270 [PERF_COUNT_HW_MAX] = {
271 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
272 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
273 [PERF_COUNT_HW_CACHE_REFERENCES] = { UNSUPPORTED_PERF_EVENT_ID },
274 [PERF_COUNT_HW_CACHE_MISSES] = { UNSUPPORTED_PERF_EVENT_ID },
275 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x27, CNTR_EVEN, T },
276 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T },
277 [PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID },
278};
279
280/* 24K/34K/1004K cores can share the same cache event map. */
281static const struct mips_perf_event mipsxxcore_cache_map
282 [PERF_COUNT_HW_CACHE_MAX]
283 [PERF_COUNT_HW_CACHE_OP_MAX]
284 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
285[C(L1D)] = {
286 /*
287 * Like some other architectures (e.g. ARM), the performance
288 * counters don't differentiate between read and write
289 * accesses/misses, so this isn't strictly correct, but it's the
290 * best we can do. Writes and reads get combined.
291 */
292 [C(OP_READ)] = {
293 [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T },
294 [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T },
295 },
296 [C(OP_WRITE)] = {
297 [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T },
298 [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T },
299 },
300 [C(OP_PREFETCH)] = {
301 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
302 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
303 },
304},
305[C(L1I)] = {
306 [C(OP_READ)] = {
307 [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T },
308 [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T },
309 },
310 [C(OP_WRITE)] = {
311 [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T },
312 [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T },
313 },
314 [C(OP_PREFETCH)] = {
315 [C(RESULT_ACCESS)] = { 0x14, CNTR_EVEN, T },
316 /*
317 * Note that MIPS has only "hit" events countable for
318 * the prefetch operation.
319 */
320 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
321 },
322},
323[C(LL)] = {
324 [C(OP_READ)] = {
325 [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P },
326 [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P },
327 },
328 [C(OP_WRITE)] = {
329 [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P },
330 [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P },
331 },
332 [C(OP_PREFETCH)] = {
333 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
334 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
335 },
336},
337[C(DTLB)] = {
338 [C(OP_READ)] = {
339 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
340 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
341 },
342 [C(OP_WRITE)] = {
343 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
344 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
345 },
346 [C(OP_PREFETCH)] = {
347 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
348 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
349 },
350},
351[C(ITLB)] = {
352 [C(OP_READ)] = {
353 [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T },
354 [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T },
355 },
356 [C(OP_WRITE)] = {
357 [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T },
358 [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T },
359 },
360 [C(OP_PREFETCH)] = {
361 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
362 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
363 },
364},
365[C(BPU)] = {
366 /* Using the same code for *HW_BRANCH* */
367 [C(OP_READ)] = {
368 [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T },
369 [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
370 },
371 [C(OP_WRITE)] = {
372 [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T },
373 [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
374 },
375 [C(OP_PREFETCH)] = {
376 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
377 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
378 },
379},
380};
381
382/* 74K core has completely different cache event map. */
383static const struct mips_perf_event mipsxx74Kcore_cache_map
384 [PERF_COUNT_HW_CACHE_MAX]
385 [PERF_COUNT_HW_CACHE_OP_MAX]
386 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
387[C(L1D)] = {
388 /*
389 * Like some other architectures (e.g. ARM), the performance
390 * counters don't differentiate between read and write
391 * accesses/misses, so this isn't strictly correct, but it's the
392 * best we can do. Writes and reads get combined.
393 */
394 [C(OP_READ)] = {
395 [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T },
396 [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T },
397 },
398 [C(OP_WRITE)] = {
399 [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T },
400 [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T },
401 },
402 [C(OP_PREFETCH)] = {
403 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
404 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
405 },
406},
407[C(L1I)] = {
408 [C(OP_READ)] = {
409 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
410 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
411 },
412 [C(OP_WRITE)] = {
413 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
414 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
415 },
416 [C(OP_PREFETCH)] = {
417 [C(RESULT_ACCESS)] = { 0x34, CNTR_EVEN, T },
418 /*
419 * Note that MIPS has only "hit" events countable for
420 * the prefetch operation.
421 */
422 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
423 },
424},
425[C(LL)] = {
426 [C(OP_READ)] = {
427 [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
428 [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN | CNTR_ODD, P },
429 },
430 [C(OP_WRITE)] = {
431 [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
432 [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN | CNTR_ODD, P },
433 },
434 [C(OP_PREFETCH)] = {
435 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
436 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
437 },
438},
439[C(DTLB)] = {
440 /* 74K core does not have specific DTLB events. */
441 [C(OP_READ)] = {
442 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
443 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
444 },
445 [C(OP_WRITE)] = {
446 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
447 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
448 },
449 [C(OP_PREFETCH)] = {
450 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
451 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
452 },
453},
454[C(ITLB)] = {
455 [C(OP_READ)] = {
456 [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
457 [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T },
458 },
459 [C(OP_WRITE)] = {
460 [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
461 [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T },
462 },
463 [C(OP_PREFETCH)] = {
464 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
465 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
466 },
467},
468[C(BPU)] = {
469 /* Using the same code for *HW_BRANCH* */
470 [C(OP_READ)] = {
471 [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T },
472 [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T },
473 },
474 [C(OP_WRITE)] = {
475 [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T },
476 [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T },
477 },
478 [C(OP_PREFETCH)] = {
479 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
480 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
481 },
482},
483};
484
485#ifdef CONFIG_MIPS_MT_SMP
486static void
487check_and_calc_range(struct perf_event *event,
488 const struct mips_perf_event *pev)
489{
490 struct hw_perf_event *hwc = &event->hw;
491
492 if (event->cpu >= 0) {
493 if (pev->range > V) {
494 /*
495 * The user selected an event that is processor
496 * wide, while expecting it to be VPE wide.
497 */
498 hwc->config_base |= M_TC_EN_ALL;
499 } else {
500 /*
501 * FIXME: cpu_data[event->cpu].vpe_id reports 0
502 * for both CPUs.
503 */
504 hwc->config_base |= M_PERFCTL_VPEID(event->cpu);
505 hwc->config_base |= M_TC_EN_VPE;
506 }
507 } else
508 hwc->config_base |= M_TC_EN_ALL;
509}
510#else
511static void
512check_and_calc_range(struct perf_event *event,
513 const struct mips_perf_event *pev)
514{
515}
516#endif
517
518static int __hw_perf_event_init(struct perf_event *event)
519{
520 struct perf_event_attr *attr = &event->attr;
521 struct hw_perf_event *hwc = &event->hw;
522 const struct mips_perf_event *pev;
523 int err;
524
525 /* Returning MIPS event descriptor for generic perf event. */
526 if (PERF_TYPE_HARDWARE == event->attr.type) {
527 if (event->attr.config >= PERF_COUNT_HW_MAX)
528 return -EINVAL;
529 pev = mipspmu_map_general_event(event->attr.config);
530 } else if (PERF_TYPE_HW_CACHE == event->attr.type) {
531 pev = mipspmu_map_cache_event(event->attr.config);
532 } else if (PERF_TYPE_RAW == event->attr.type) {
533 /* We are working on the global raw event. */
534 mutex_lock(&raw_event_mutex);
535 pev = mipspmu->map_raw_event(event->attr.config);
536 } else {
537 /* The event type is not (yet) supported. */
538 return -EOPNOTSUPP;
539 }
540
541 if (IS_ERR(pev)) {
542 if (PERF_TYPE_RAW == event->attr.type)
543 mutex_unlock(&raw_event_mutex);
544 return PTR_ERR(pev);
545 }
546
547 /*
548 * We allow max flexibility on how each individual counter shared
549 * by the single CPU operates (the mode exclusion and the range).
550 */
551 hwc->config_base = M_PERFCTL_INTERRUPT_ENABLE;
552
553 /* Calculate range bits and validate it. */
554 if (num_possible_cpus() > 1)
555 check_and_calc_range(event, pev);
556
557 hwc->event_base = mipspmu_perf_event_encode(pev);
558 if (PERF_TYPE_RAW == event->attr.type)
559 mutex_unlock(&raw_event_mutex);
560
561 if (!attr->exclude_user)
562 hwc->config_base |= M_PERFCTL_USER;
563 if (!attr->exclude_kernel) {
564 hwc->config_base |= M_PERFCTL_KERNEL;
565 /* MIPS kernel mode: KSU == 00b || EXL == 1 || ERL == 1 */
566 hwc->config_base |= M_PERFCTL_EXL;
567 }
568 if (!attr->exclude_hv)
569 hwc->config_base |= M_PERFCTL_SUPERVISOR;
570
571 hwc->config_base &= M_PERFCTL_CONFIG_MASK;
572 /*
573 * The event can belong to another cpu. We do not assign a local
574 * counter for it for now.
575 */
576 hwc->idx = -1;
577 hwc->config = 0;
578
579 if (!hwc->sample_period) {
580 hwc->sample_period = MAX_PERIOD;
581 hwc->last_period = hwc->sample_period;
582 local64_set(&hwc->period_left, hwc->sample_period);
583 }
584
585 err = 0;
586 if (event->group_leader != event) {
587 err = validate_group(event);
588 if (err)
589 return -EINVAL;
590 }
591
592 event->destroy = hw_perf_event_destroy;
593
594 return err;
595}
596
597static void pause_local_counters(void)
598{
599 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
600 int counters = mipspmu->num_counters;
601 unsigned long flags;
602
603 local_irq_save(flags);
604 switch (counters) {
605 case 4:
606 cpuc->saved_ctrl[3] = r_c0_perfctrl3();
607 w_c0_perfctrl3(cpuc->saved_ctrl[3] &
608 ~M_PERFCTL_COUNT_EVENT_WHENEVER);
609 case 3:
610 cpuc->saved_ctrl[2] = r_c0_perfctrl2();
611 w_c0_perfctrl2(cpuc->saved_ctrl[2] &
612 ~M_PERFCTL_COUNT_EVENT_WHENEVER);
613 case 2:
614 cpuc->saved_ctrl[1] = r_c0_perfctrl1();
615 w_c0_perfctrl1(cpuc->saved_ctrl[1] &
616 ~M_PERFCTL_COUNT_EVENT_WHENEVER);
617 case 1:
618 cpuc->saved_ctrl[0] = r_c0_perfctrl0();
619 w_c0_perfctrl0(cpuc->saved_ctrl[0] &
620 ~M_PERFCTL_COUNT_EVENT_WHENEVER);
621 }
622 local_irq_restore(flags);
623}
624
625static void resume_local_counters(void)
626{
627 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
628 int counters = mipspmu->num_counters;
629 unsigned long flags;
630
631 local_irq_save(flags);
632 switch (counters) {
633 case 4:
634 w_c0_perfctrl3(cpuc->saved_ctrl[3]);
635 case 3:
636 w_c0_perfctrl2(cpuc->saved_ctrl[2]);
637 case 2:
638 w_c0_perfctrl1(cpuc->saved_ctrl[1]);
639 case 1:
640 w_c0_perfctrl0(cpuc->saved_ctrl[0]);
641 }
642 local_irq_restore(flags);
643}
644
645static int mipsxx_pmu_handle_shared_irq(void)
646{
647 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
648 struct perf_sample_data data;
649 unsigned int counters = mipspmu->num_counters;
650 unsigned int counter;
651 int handled = IRQ_NONE;
652 struct pt_regs *regs;
653
654 if (cpu_has_mips_r2 && !(read_c0_cause() & (1 << 26)))
655 return handled;
656
657 /*
658 * First we pause the local counters, so that when we are locked
659 * here, the counters are all paused. When it gets locked due to
660 * perf_disable(), the timer interrupt handler will be delayed.
661 *
662 * See also mipsxx_pmu_start().
663 */
664 pause_local_counters();
665#ifdef CONFIG_MIPS_MT_SMP
666 read_lock(&pmuint_rwlock);
667#endif
668
669 regs = get_irq_regs();
670
671 perf_sample_data_init(&data, 0);
672
673 switch (counters) {
674#define HANDLE_COUNTER(n) \
675 case n + 1: \
676 if (test_bit(n, cpuc->used_mask)) { \
677 counter = r_c0_perfcntr ## n(); \
678 if (counter & M_COUNTER_OVERFLOW) { \
679 w_c0_perfcntr ## n(counter & \
680 VALID_COUNT); \
681 if (test_and_change_bit(n, cpuc->msbs)) \
682 handle_associated_event(cpuc, \
683 n, &data, regs); \
684 handled = IRQ_HANDLED; \
685 } \
686 }
687 HANDLE_COUNTER(3)
688 HANDLE_COUNTER(2)
689 HANDLE_COUNTER(1)
690 HANDLE_COUNTER(0)
691 }
692
693 /*
694 * Do all the work for the pending perf events. We can do this
695 * in here because the performance counter interrupt is a regular
696 * interrupt, not NMI.
697 */
698 if (handled == IRQ_HANDLED)
699 irq_work_run();
700
701#ifdef CONFIG_MIPS_MT_SMP
702 read_unlock(&pmuint_rwlock);
703#endif
704 resume_local_counters();
705 return handled;
706}
707
708static irqreturn_t
709mipsxx_pmu_handle_irq(int irq, void *dev)
710{
711 return mipsxx_pmu_handle_shared_irq();
712}
713
714static void mipsxx_pmu_start(void)
715{
716#ifdef CONFIG_MIPS_MT_SMP
717 write_unlock(&pmuint_rwlock);
718#endif
719 resume_local_counters();
720}
721
722/*
723 * MIPS performance counters can be per-TC. The control registers can
724 * not be directly accessed across CPUs. Hence if we want to do global
725 * control, we need cross CPU calls. on_each_cpu() can help us, but we
726 * can not make sure this function is called with interrupts enabled. So
727 * here we pause local counters and then grab a rwlock and leave the
728 * counters on other CPUs alone. If any counter interrupt raises while
729 * we own the write lock, simply pause local counters on that CPU and
730 * spin in the handler. Also we know we won't be switched to another
731 * CPU after pausing local counters and before grabbing the lock.
732 */
733static void mipsxx_pmu_stop(void)
734{
735 pause_local_counters();
736#ifdef CONFIG_MIPS_MT_SMP
737 write_lock(&pmuint_rwlock);
738#endif
739}
740
741static int
742mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc,
743 struct hw_perf_event *hwc)
744{
745 int i;
746
747 /*
748 * We only need to care the counter mask. The range has been
749 * checked definitely.
750 */
751 unsigned long cntr_mask = (hwc->event_base >> 8) & 0xffff;
752
753 for (i = mipspmu->num_counters - 1; i >= 0; i--) {
754 /*
755 * Note that some MIPS perf events can be counted by both
756 * even and odd counters, wheresas many other are only by
757 * even _or_ odd counters. This introduces an issue that
758 * when the former kind of event takes the counter the
759 * latter kind of event wants to use, then the "counter
760 * allocation" for the latter event will fail. In fact if
761 * they can be dynamically swapped, they both feel happy.
762 * But here we leave this issue alone for now.
763 */
764 if (test_bit(i, &cntr_mask) &&
765 !test_and_set_bit(i, cpuc->used_mask))
766 return i;
767 }
768
769 return -EAGAIN;
770}
771
772static void
773mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
774{
775 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
776 unsigned long flags;
777
778 WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
779
780 local_irq_save(flags);
781 cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) |
782 (evt->config_base & M_PERFCTL_CONFIG_MASK) |
783 /* Make sure interrupt enabled. */
784 M_PERFCTL_INTERRUPT_ENABLE;
785 /*
786 * We do not actually let the counter run. Leave it until start().
787 */
788 local_irq_restore(flags);
789}
790
791static void
792mipsxx_pmu_disable_event(int idx)
793{
794 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
795 unsigned long flags;
796
797 WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
798
799 local_irq_save(flags);
800 cpuc->saved_ctrl[idx] = mipsxx_pmu_read_control(idx) &
801 ~M_PERFCTL_COUNT_EVENT_WHENEVER;
802 mipsxx_pmu_write_control(idx, cpuc->saved_ctrl[idx]);
803 local_irq_restore(flags);
804}
805
806/* 24K */
807#define IS_UNSUPPORTED_24K_EVENT(r, b) \
808 ((b) == 12 || (r) == 151 || (r) == 152 || (b) == 26 || \
809 (b) == 27 || (r) == 28 || (r) == 158 || (b) == 31 || \
810 (b) == 32 || (b) == 34 || (b) == 36 || (r) == 168 || \
811 (r) == 172 || (b) == 47 || ((b) >= 56 && (b) <= 63) || \
812 ((b) >= 68 && (b) <= 127))
813#define IS_BOTH_COUNTERS_24K_EVENT(b) \
814 ((b) == 0 || (b) == 1 || (b) == 11)
815
816/* 34K */
817#define IS_UNSUPPORTED_34K_EVENT(r, b) \
818 ((b) == 12 || (r) == 27 || (r) == 158 || (b) == 36 || \
819 (b) == 38 || (r) == 175 || ((b) >= 56 && (b) <= 63) || \
820 ((b) >= 68 && (b) <= 127))
821#define IS_BOTH_COUNTERS_34K_EVENT(b) \
822 ((b) == 0 || (b) == 1 || (b) == 11)
823#ifdef CONFIG_MIPS_MT_SMP
824#define IS_RANGE_P_34K_EVENT(r, b) \
825 ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
826 (b) == 25 || (b) == 39 || (r) == 44 || (r) == 174 || \
827 (r) == 176 || ((b) >= 50 && (b) <= 55) || \
828 ((b) >= 64 && (b) <= 67))
829#define IS_RANGE_V_34K_EVENT(r) ((r) == 47)
830#endif
831
832/* 74K */
833#define IS_UNSUPPORTED_74K_EVENT(r, b) \
834 ((r) == 5 || ((r) >= 135 && (r) <= 137) || \
835 ((b) >= 10 && (b) <= 12) || (b) == 22 || (b) == 27 || \
836 (b) == 33 || (b) == 34 || ((b) >= 47 && (b) <= 49) || \
837 (r) == 178 || (b) == 55 || (b) == 57 || (b) == 60 || \
838 (b) == 61 || (r) == 62 || (r) == 191 || \
839 ((b) >= 64 && (b) <= 127))
840#define IS_BOTH_COUNTERS_74K_EVENT(b) \
841 ((b) == 0 || (b) == 1)
842
843/* 1004K */
844#define IS_UNSUPPORTED_1004K_EVENT(r, b) \
845 ((b) == 12 || (r) == 27 || (r) == 158 || (b) == 38 || \
846 (r) == 175 || (b) == 63 || ((b) >= 68 && (b) <= 127))
847#define IS_BOTH_COUNTERS_1004K_EVENT(b) \
848 ((b) == 0 || (b) == 1 || (b) == 11)
849#ifdef CONFIG_MIPS_MT_SMP
850#define IS_RANGE_P_1004K_EVENT(r, b) \
851 ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
852 (b) == 25 || (b) == 36 || (b) == 39 || (r) == 44 || \
853 (r) == 174 || (r) == 176 || ((b) >= 50 && (b) <= 59) || \
854 (r) == 188 || (b) == 61 || (b) == 62 || \
855 ((b) >= 64 && (b) <= 67))
856#define IS_RANGE_V_1004K_EVENT(r) ((r) == 47)
857#endif
858
859/*
860 * User can use 0-255 raw events, where 0-127 for the events of even
861 * counters, and 128-255 for odd counters. Note that bit 7 is used to
862 * indicate the parity. So, for example, when user wants to take the
863 * Event Num of 15 for odd counters (by referring to the user manual),
864 * then 128 needs to be added to 15 as the input for the event config,
865 * i.e., 143 (0x8F) to be used.
866 */
867static const struct mips_perf_event *
868mipsxx_pmu_map_raw_event(u64 config)
869{
870 unsigned int raw_id = config & 0xff;
871 unsigned int base_id = raw_id & 0x7f;
872
873 switch (current_cpu_type()) {
874 case CPU_24K:
875 if (IS_UNSUPPORTED_24K_EVENT(raw_id, base_id))
876 return ERR_PTR(-EOPNOTSUPP);
877 raw_event.event_id = base_id;
878 if (IS_BOTH_COUNTERS_24K_EVENT(base_id))
879 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
880 else
881 raw_event.cntr_mask =
882 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
883#ifdef CONFIG_MIPS_MT_SMP
884 /*
885 * This is actually doing nothing. Non-multithreading
886 * CPUs will not check and calculate the range.
887 */
888 raw_event.range = P;
889#endif
890 break;
891 case CPU_34K:
892 if (IS_UNSUPPORTED_34K_EVENT(raw_id, base_id))
893 return ERR_PTR(-EOPNOTSUPP);
894 raw_event.event_id = base_id;
895 if (IS_BOTH_COUNTERS_34K_EVENT(base_id))
896 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
897 else
898 raw_event.cntr_mask =
899 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
900#ifdef CONFIG_MIPS_MT_SMP
901 if (IS_RANGE_P_34K_EVENT(raw_id, base_id))
902 raw_event.range = P;
903 else if (unlikely(IS_RANGE_V_34K_EVENT(raw_id)))
904 raw_event.range = V;
905 else
906 raw_event.range = T;
907#endif
908 break;
909 case CPU_74K:
910 if (IS_UNSUPPORTED_74K_EVENT(raw_id, base_id))
911 return ERR_PTR(-EOPNOTSUPP);
912 raw_event.event_id = base_id;
913 if (IS_BOTH_COUNTERS_74K_EVENT(base_id))
914 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
915 else
916 raw_event.cntr_mask =
917 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
918#ifdef CONFIG_MIPS_MT_SMP
919 raw_event.range = P;
920#endif
921 break;
922 case CPU_1004K:
923 if (IS_UNSUPPORTED_1004K_EVENT(raw_id, base_id))
924 return ERR_PTR(-EOPNOTSUPP);
925 raw_event.event_id = base_id;
926 if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
927 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
928 else
929 raw_event.cntr_mask =
930 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
931#ifdef CONFIG_MIPS_MT_SMP
932 if (IS_RANGE_P_1004K_EVENT(raw_id, base_id))
933 raw_event.range = P;
934 else if (unlikely(IS_RANGE_V_1004K_EVENT(raw_id)))
935 raw_event.range = V;
936 else
937 raw_event.range = T;
938#endif
939 break;
940 }
941
942 return &raw_event;
943}
944
945static struct mips_pmu mipsxxcore_pmu = {
946 .handle_irq = mipsxx_pmu_handle_irq,
947 .handle_shared_irq = mipsxx_pmu_handle_shared_irq,
948 .start = mipsxx_pmu_start,
949 .stop = mipsxx_pmu_stop,
950 .alloc_counter = mipsxx_pmu_alloc_counter,
951 .read_counter = mipsxx_pmu_read_counter,
952 .write_counter = mipsxx_pmu_write_counter,
953 .enable_event = mipsxx_pmu_enable_event,
954 .disable_event = mipsxx_pmu_disable_event,
955 .map_raw_event = mipsxx_pmu_map_raw_event,
956 .general_event_map = &mipsxxcore_event_map,
957 .cache_event_map = &mipsxxcore_cache_map,
958};
959
960static struct mips_pmu mipsxx74Kcore_pmu = {
961 .handle_irq = mipsxx_pmu_handle_irq,
962 .handle_shared_irq = mipsxx_pmu_handle_shared_irq,
963 .start = mipsxx_pmu_start,
964 .stop = mipsxx_pmu_stop,
965 .alloc_counter = mipsxx_pmu_alloc_counter,
966 .read_counter = mipsxx_pmu_read_counter,
967 .write_counter = mipsxx_pmu_write_counter,
968 .enable_event = mipsxx_pmu_enable_event,
969 .disable_event = mipsxx_pmu_disable_event,
970 .map_raw_event = mipsxx_pmu_map_raw_event,
971 .general_event_map = &mipsxx74Kcore_event_map,
972 .cache_event_map = &mipsxx74Kcore_cache_map,
973};
974
975static int __init
976init_hw_perf_events(void)
977{
978 int counters, irq;
979
980 pr_info("Performance counters: ");
981
982 counters = n_counters();
983 if (counters == 0) {
984 pr_cont("No available PMU.\n");
985 return -ENODEV;
986 }
987
988#ifdef CONFIG_MIPS_MT_SMP
989 cpu_has_mipsmt_pertccounters = read_c0_config7() & (1<<19);
990 if (!cpu_has_mipsmt_pertccounters)
991 counters = counters_total_to_per_cpu(counters);
992#endif
993
994#ifdef MSC01E_INT_BASE
995 if (cpu_has_veic) {
996 /*
997 * Using platform specific interrupt controller defines.
998 */
999 irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR;
1000 } else {
1001#endif
1002 if (cp0_perfcount_irq >= 0)
1003 irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
1004 else
1005 irq = -1;
1006#ifdef MSC01E_INT_BASE
1007 }
1008#endif
1009
1010 on_each_cpu(reset_counters, (void *)(long)counters, 1);
1011
1012 switch (current_cpu_type()) {
1013 case CPU_24K:
1014 mipsxxcore_pmu.name = "mips/24K";
1015 mipsxxcore_pmu.num_counters = counters;
1016 mipsxxcore_pmu.irq = irq;
1017 mipspmu = &mipsxxcore_pmu;
1018 break;
1019 case CPU_34K:
1020 mipsxxcore_pmu.name = "mips/34K";
1021 mipsxxcore_pmu.num_counters = counters;
1022 mipsxxcore_pmu.irq = irq;
1023 mipspmu = &mipsxxcore_pmu;
1024 break;
1025 case CPU_74K:
1026 mipsxx74Kcore_pmu.name = "mips/74K";
1027 mipsxx74Kcore_pmu.num_counters = counters;
1028 mipsxx74Kcore_pmu.irq = irq;
1029 mipspmu = &mipsxx74Kcore_pmu;
1030 break;
1031 case CPU_1004K:
1032 mipsxxcore_pmu.name = "mips/1004K";
1033 mipsxxcore_pmu.num_counters = counters;
1034 mipsxxcore_pmu.irq = irq;
1035 mipspmu = &mipsxxcore_pmu;
1036 break;
1037 default:
1038 pr_cont("Either hardware does not support performance "
1039 "counters, or not yet implemented.\n");
1040 return -ENODEV;
1041 }
1042
1043 if (mipspmu)
1044 pr_cont("%s PMU enabled, %d counters available to each "
1045 "CPU, irq %d%s\n", mipspmu->name, counters, irq,
1046 irq < 0 ? " (share with timer interrupt)" : "");
1047
1048 perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
1049
1050 return 0;
1051}
1052early_initcall(init_hw_perf_events);
1053
1054#endif /* defined(CONFIG_CPU_MIPS32)... */
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index 26109c4d5170..e309665b6c81 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -12,6 +12,7 @@
12#include <asm/cpu-features.h> 12#include <asm/cpu-features.h>
13#include <asm/mipsregs.h> 13#include <asm/mipsregs.h>
14#include <asm/processor.h> 14#include <asm/processor.h>
15#include <asm/mips_machine.h>
15 16
16unsigned int vced_count, vcei_count; 17unsigned int vced_count, vcei_count;
17 18
@@ -31,8 +32,12 @@ static int show_cpuinfo(struct seq_file *m, void *v)
31 /* 32 /*
32 * For the first processor also print the system type 33 * For the first processor also print the system type
33 */ 34 */
34 if (n == 0) 35 if (n == 0) {
35 seq_printf(m, "system type\t\t: %s\n", get_system_type()); 36 seq_printf(m, "system type\t\t: %s\n", get_system_type());
37 if (mips_get_machine_name())
38 seq_printf(m, "machine\t\t\t: %s\n",
39 mips_get_machine_name());
40 }
36 41
37 seq_printf(m, "processor\t\t: %ld\n", n); 42 seq_printf(m, "processor\t\t: %ld\n", n);
38 sprintf(fmt, "cpu model\t\t: %%s V%%d.%%d%s\n", 43 sprintf(fmt, "cpu model\t\t: %%s V%%d.%%d%s\n",
@@ -69,6 +74,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
69 ); 74 );
70 seq_printf(m, "shadow register sets\t: %d\n", 75 seq_printf(m, "shadow register sets\t: %d\n",
71 cpu_data[n].srsets); 76 cpu_data[n].srsets);
77 seq_printf(m, "kscratch registers\t: %d\n",
78 hweight8(cpu_data[n].kscratch_mask));
72 seq_printf(m, "core\t\t\t: %d\n", cpu_data[n].core); 79 seq_printf(m, "core\t\t\t: %d\n", cpu_data[n].core);
73 80
74 sprintf(fmt, "VCE%%c exceptions\t\t: %s\n", 81 sprintf(fmt, "VCE%%c exceptions\t\t: %s\n",
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 99960940d4a4..d2112d3cf115 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -142,7 +142,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
142 childregs->regs[7] = 0; /* Clear error flag */ 142 childregs->regs[7] = 0; /* Clear error flag */
143 143
144 childregs->regs[2] = 0; /* Child gets zero as return value */ 144 childregs->regs[2] = 0; /* Child gets zero as return value */
145 regs->regs[2] = p->pid;
146 145
147 if (childregs->cp0_status & ST0_CU0) { 146 if (childregs->cp0_status & ST0_CU0) {
148 childregs->regs[28] = (unsigned long) ti; 147 childregs->regs[28] = (unsigned long) ti;
@@ -411,7 +410,7 @@ unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
411 if (!kallsyms_lookup_size_offset(pc, &size, &ofs)) 410 if (!kallsyms_lookup_size_offset(pc, &size, &ofs))
412 return 0; 411 return 0;
413 /* 412 /*
414 * Return ra if an exception occured at the first instruction 413 * Return ra if an exception occurred at the first instruction
415 */ 414 */
416 if (unlikely(ofs == 0)) { 415 if (unlikely(ofs == 0)) {
417 pc = *ra; 416 pc = *ra;
diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c
new file mode 100644
index 000000000000..5b7eade41fa3
--- /dev/null
+++ b/arch/mips/kernel/prom.c
@@ -0,0 +1,111 @@
1/*
2 * MIPS support for CONFIG_OF device tree support
3 *
4 * Copyright (C) 2010 Cisco Systems Inc. <dediao@cisco.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/init.h>
12#include <linux/module.h>
13#include <linux/errno.h>
14#include <linux/types.h>
15#include <linux/bootmem.h>
16#include <linux/initrd.h>
17#include <linux/debugfs.h>
18#include <linux/of.h>
19#include <linux/of_fdt.h>
20#include <linux/of_irq.h>
21#include <linux/of_platform.h>
22
23#include <asm/page.h>
24#include <asm/prom.h>
25
26int __init early_init_dt_scan_memory_arch(unsigned long node,
27 const char *uname, int depth,
28 void *data)
29{
30 return early_init_dt_scan_memory(node, uname, depth, data);
31}
32
33void __init early_init_dt_add_memory_arch(u64 base, u64 size)
34{
35 return add_memory_region(base, size, BOOT_MEM_RAM);
36}
37
38int __init reserve_mem_mach(unsigned long addr, unsigned long size)
39{
40 return reserve_bootmem(addr, size, BOOTMEM_DEFAULT);
41}
42
43void __init free_mem_mach(unsigned long addr, unsigned long size)
44{
45 return free_bootmem(addr, size);
46}
47
48void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
49{
50 return __alloc_bootmem(size, align, __pa(MAX_DMA_ADDRESS));
51}
52
53#ifdef CONFIG_BLK_DEV_INITRD
54void __init early_init_dt_setup_initrd_arch(unsigned long start,
55 unsigned long end)
56{
57 initrd_start = (unsigned long)__va(start);
58 initrd_end = (unsigned long)__va(end);
59 initrd_below_start_ok = 1;
60}
61#endif
62
63/*
64 * irq_create_of_mapping - Hook to resolve OF irq specifier into a Linux irq#
65 *
66 * Currently the mapping mechanism is trivial; simple flat hwirq numbers are
67 * mapped 1:1 onto Linux irq numbers. Cascaded irq controllers are not
68 * supported.
69 */
70unsigned int irq_create_of_mapping(struct device_node *controller,
71 const u32 *intspec, unsigned int intsize)
72{
73 return intspec[0];
74}
75EXPORT_SYMBOL_GPL(irq_create_of_mapping);
76
77void __init early_init_devtree(void *params)
78{
79 /* Setup flat device-tree pointer */
80 initial_boot_params = params;
81
82 /* Retrieve various informations from the /chosen node of the
83 * device-tree, including the platform type, initrd location and
84 * size, and more ...
85 */
86 of_scan_flat_dt(early_init_dt_scan_chosen, arcs_cmdline);
87
88
89 /* Scan memory nodes */
90 of_scan_flat_dt(early_init_dt_scan_root, NULL);
91 of_scan_flat_dt(early_init_dt_scan_memory_arch, NULL);
92}
93
94void __init device_tree_init(void)
95{
96 unsigned long base, size;
97
98 if (!initial_boot_params)
99 return;
100
101 base = virt_to_phys((void *)initial_boot_params);
102 size = be32_to_cpu(initial_boot_params->totalsize);
103
104 /* Before we do anything, lets reserve the dt blob */
105 reserve_mem_mach(base, size);
106
107 unflatten_device_tree();
108
109 /* free the space reserved for the dt blob */
110 free_mem_mach(base, size);
111}
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index c8777333e198..4e6ea1ffad46 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -255,9 +255,13 @@ int ptrace_set_watch_regs(struct task_struct *child,
255 return 0; 255 return 0;
256} 256}
257 257
258long arch_ptrace(struct task_struct *child, long request, long addr, long data) 258long arch_ptrace(struct task_struct *child, long request,
259 unsigned long addr, unsigned long data)
259{ 260{
260 int ret; 261 int ret;
262 void __user *addrp = (void __user *) addr;
263 void __user *datavp = (void __user *) data;
264 unsigned long __user *datalp = (void __user *) data;
261 265
262 switch (request) { 266 switch (request) {
263 /* when I and D space are separate, these will need to be fixed. */ 267 /* when I and D space are separate, these will need to be fixed. */
@@ -386,7 +390,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
386 ret = -EIO; 390 ret = -EIO;
387 goto out; 391 goto out;
388 } 392 }
389 ret = put_user(tmp, (unsigned long __user *) data); 393 ret = put_user(tmp, datalp);
390 break; 394 break;
391 } 395 }
392 396
@@ -478,34 +482,31 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
478 } 482 }
479 483
480 case PTRACE_GETREGS: 484 case PTRACE_GETREGS:
481 ret = ptrace_getregs(child, (__s64 __user *) data); 485 ret = ptrace_getregs(child, datavp);
482 break; 486 break;
483 487
484 case PTRACE_SETREGS: 488 case PTRACE_SETREGS:
485 ret = ptrace_setregs(child, (__s64 __user *) data); 489 ret = ptrace_setregs(child, datavp);
486 break; 490 break;
487 491
488 case PTRACE_GETFPREGS: 492 case PTRACE_GETFPREGS:
489 ret = ptrace_getfpregs(child, (__u32 __user *) data); 493 ret = ptrace_getfpregs(child, datavp);
490 break; 494 break;
491 495
492 case PTRACE_SETFPREGS: 496 case PTRACE_SETFPREGS:
493 ret = ptrace_setfpregs(child, (__u32 __user *) data); 497 ret = ptrace_setfpregs(child, datavp);
494 break; 498 break;
495 499
496 case PTRACE_GET_THREAD_AREA: 500 case PTRACE_GET_THREAD_AREA:
497 ret = put_user(task_thread_info(child)->tp_value, 501 ret = put_user(task_thread_info(child)->tp_value, datalp);
498 (unsigned long __user *) data);
499 break; 502 break;
500 503
501 case PTRACE_GET_WATCH_REGS: 504 case PTRACE_GET_WATCH_REGS:
502 ret = ptrace_get_watch_regs(child, 505 ret = ptrace_get_watch_regs(child, addrp);
503 (struct pt_watch_regs __user *) addr);
504 break; 506 break;
505 507
506 case PTRACE_SET_WATCH_REGS: 508 case PTRACE_SET_WATCH_REGS:
507 ret = ptrace_set_watch_regs(child, 509 ret = ptrace_set_watch_regs(child, addrp);
508 (struct pt_watch_regs __user *) addr);
509 break; 510 break;
510 511
511 default: 512 default:
@@ -532,15 +533,10 @@ static inline int audit_arch(void)
532 * Notification of system call entry/exit 533 * Notification of system call entry/exit
533 * - triggered by current->work.syscall_trace 534 * - triggered by current->work.syscall_trace
534 */ 535 */
535asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit) 536asmlinkage void syscall_trace_enter(struct pt_regs *regs)
536{ 537{
537 /* do the secure computing check first */ 538 /* do the secure computing check first */
538 if (!entryexit) 539 secure_computing(regs->regs[2]);
539 secure_computing(regs->regs[2]);
540
541 if (unlikely(current->audit_context) && entryexit)
542 audit_syscall_exit(AUDITSC_RESULT(regs->regs[2]),
543 regs->regs[2]);
544 540
545 if (!(current->ptrace & PT_PTRACED)) 541 if (!(current->ptrace & PT_PTRACED))
546 goto out; 542 goto out;
@@ -564,8 +560,40 @@ asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit)
564 } 560 }
565 561
566out: 562out:
567 if (unlikely(current->audit_context) && !entryexit) 563 if (unlikely(current->audit_context))
568 audit_syscall_entry(audit_arch(), regs->regs[2], 564 audit_syscall_entry(audit_arch(), regs->regs[2],
569 regs->regs[4], regs->regs[5], 565 regs->regs[4], regs->regs[5],
570 regs->regs[6], regs->regs[7]); 566 regs->regs[6], regs->regs[7]);
571} 567}
568
569/*
570 * Notification of system call entry/exit
571 * - triggered by current->work.syscall_trace
572 */
573asmlinkage void syscall_trace_leave(struct pt_regs *regs)
574{
575 if (unlikely(current->audit_context))
576 audit_syscall_exit(AUDITSC_RESULT(regs->regs[7]),
577 -regs->regs[2]);
578
579 if (!(current->ptrace & PT_PTRACED))
580 return;
581
582 if (!test_thread_flag(TIF_SYSCALL_TRACE))
583 return;
584
585 /* The 0x80 provides a way for the tracing parent to distinguish
586 between a syscall stop and SIGTRAP delivery */
587 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ?
588 0x80 : 0));
589
590 /*
591 * this isn't the same as continuing with a signal, but it will do
592 * for normal use. strace only continues with a signal if the
593 * stopping signal is not SIGTRAP. -brl
594 */
595 if (current->exit_code) {
596 send_sig(current->exit_code, current, 1);
597 current->exit_code = 0;
598 }
599}
diff --git a/arch/mips/kernel/r2300_fpu.S b/arch/mips/kernel/r2300_fpu.S
index ac68e68339db..61c8a0f2a60c 100644
--- a/arch/mips/kernel/r2300_fpu.S
+++ b/arch/mips/kernel/r2300_fpu.S
@@ -6,7 +6,7 @@
6 * Copyright (C) 1996, 1998 by Ralf Baechle 6 * Copyright (C) 1996, 1998 by Ralf Baechle
7 * 7 *
8 * Multi-arch abstraction and asm macros for easier reading: 8 * Multi-arch abstraction and asm macros for easier reading:
9 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) 9 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
10 * 10 *
11 * Further modifications to make this work: 11 * Further modifications to make this work:
12 * Copyright (c) 1998 Harald Koerfgen 12 * Copyright (c) 1998 Harald Koerfgen
diff --git a/arch/mips/kernel/r2300_switch.S b/arch/mips/kernel/r2300_switch.S
index 698414b7a253..293898391e67 100644
--- a/arch/mips/kernel/r2300_switch.S
+++ b/arch/mips/kernel/r2300_switch.S
@@ -5,7 +5,7 @@
5 * Copyright (C) 1994, 1995, 1996 by Andreas Busse 5 * Copyright (C) 1994, 1995, 1996 by Andreas Busse
6 * 6 *
7 * Multi-cpu abstraction and macros for easier reading: 7 * Multi-cpu abstraction and macros for easier reading:
8 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) 8 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
9 * 9 *
10 * Further modifications to make this work: 10 * Further modifications to make this work:
11 * Copyright (c) 1998-2000 Harald Koerfgen 11 * Copyright (c) 1998-2000 Harald Koerfgen
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S
index dbd42adc52ed..55ffe149dae9 100644
--- a/arch/mips/kernel/r4k_fpu.S
+++ b/arch/mips/kernel/r4k_fpu.S
@@ -6,7 +6,7 @@
6 * Copyright (C) 1996, 98, 99, 2000, 01 Ralf Baechle 6 * Copyright (C) 1996, 98, 99, 2000, 01 Ralf Baechle
7 * 7 *
8 * Multi-arch abstraction and asm macros for easier reading: 8 * Multi-arch abstraction and asm macros for easier reading:
9 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) 9 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
10 * 10 *
11 * Carsten Langgaard, carstenl@mips.com 11 * Carsten Langgaard, carstenl@mips.com
12 * Copyright (C) 2000 MIPS Technologies, Inc. 12 * Copyright (C) 2000 MIPS Technologies, Inc.
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
index 8893ee1a2368..9414f9354469 100644
--- a/arch/mips/kernel/r4k_switch.S
+++ b/arch/mips/kernel/r4k_switch.S
@@ -4,7 +4,7 @@
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 1994, 1995, 1996, 1998, 1999, 2002, 2003 Ralf Baechle 6 * Copyright (C) 1994, 1995, 1996, 1998, 1999, 2002, 2003 Ralf Baechle
7 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) 7 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
8 * Copyright (C) 1994, 1995, 1996, by Andreas Busse 8 * Copyright (C) 1994, 1995, 1996, by Andreas Busse
9 * Copyright (C) 1999 Silicon Graphics, Inc. 9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 * Copyright (C) 2000 MIPS Technologies, Inc. 10 * Copyright (C) 2000 MIPS Technologies, Inc.
diff --git a/arch/mips/kernel/r6000_fpu.S b/arch/mips/kernel/r6000_fpu.S
index 43cda53f5af6..da0fbe46d83b 100644
--- a/arch/mips/kernel/r6000_fpu.S
+++ b/arch/mips/kernel/r6000_fpu.S
@@ -8,7 +8,7 @@
8 * Copyright (C) 1996 by Ralf Baechle 8 * Copyright (C) 1996 by Ralf Baechle
9 * 9 *
10 * Multi-arch abstraction and asm macros for easier reading: 10 * Multi-arch abstraction and asm macros for easier reading:
11 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) 11 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
12 */ 12 */
13#include <asm/asm.h> 13#include <asm/asm.h>
14#include <asm/fpregdef.h> 14#include <asm/fpregdef.h>
diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c
index 26f9b9ab19cc..557ef72472e0 100644
--- a/arch/mips/kernel/rtlx.c
+++ b/arch/mips/kernel/rtlx.c
@@ -468,7 +468,8 @@ static const struct file_operations rtlx_fops = {
468 .release = file_release, 468 .release = file_release,
469 .write = file_write, 469 .write = file_write,
470 .read = file_read, 470 .read = file_read,
471 .poll = file_poll 471 .poll = file_poll,
472 .llseek = noop_llseek,
472}; 473};
473 474
474static struct irqaction rtlx_irq = { 475static struct irqaction rtlx_irq = {
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index fbaabad0e6e2..99e656e425f3 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -88,8 +88,7 @@ syscall_trace_entry:
88 SAVE_STATIC 88 SAVE_STATIC
89 move s0, t2 89 move s0, t2
90 move a0, sp 90 move a0, sp
91 li a1, 0 91 jal syscall_trace_enter
92 jal do_syscall_trace
93 92
94 move t0, s0 93 move t0, s0
95 RESTORE_STATIC 94 RESTORE_STATIC
@@ -565,7 +564,7 @@ einval: li v0, -ENOSYS
565 sys sys_ioprio_get 2 /* 4315 */ 564 sys sys_ioprio_get 2 /* 4315 */
566 sys sys_utimensat 4 565 sys sys_utimensat 4
567 sys sys_signalfd 3 566 sys sys_signalfd 3
568 sys sys_ni_syscall 0 567 sys sys_ni_syscall 0 /* was timerfd */
569 sys sys_eventfd 1 568 sys sys_eventfd 1
570 sys sys_fallocate 6 /* 4320 */ 569 sys sys_fallocate 6 /* 4320 */
571 sys sys_timerfd_create 2 570 sys sys_timerfd_create 2
@@ -586,6 +585,11 @@ einval: li v0, -ENOSYS
586 sys sys_fanotify_init 2 585 sys sys_fanotify_init 2
587 sys sys_fanotify_mark 6 586 sys sys_fanotify_mark 6
588 sys sys_prlimit64 4 587 sys sys_prlimit64 4
588 sys sys_name_to_handle_at 5
589 sys sys_open_by_handle_at 3 /* 4340 */
590 sys sys_clock_adjtime 2
591 sys sys_syncfs 1
592 sys sys_setns 2
589 .endm 593 .endm
590 594
591 /* We pre-compute the number of _instruction_ bytes needed to 595 /* We pre-compute the number of _instruction_ bytes needed to
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 3f4179283207..fb0575f47f3d 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -91,8 +91,7 @@ syscall_trace_entry:
91 SAVE_STATIC 91 SAVE_STATIC
92 move s0, t2 92 move s0, t2
93 move a0, sp 93 move a0, sp
94 li a1, 0 94 jal syscall_trace_enter
95 jal do_syscall_trace
96 95
97 move t0, s0 96 move t0, s0
98 RESTORE_STATIC 97 RESTORE_STATIC
@@ -404,7 +403,7 @@ sys_call_table:
404 PTR sys_ioprio_get 403 PTR sys_ioprio_get
405 PTR sys_utimensat /* 5275 */ 404 PTR sys_utimensat /* 5275 */
406 PTR sys_signalfd 405 PTR sys_signalfd
407 PTR sys_ni_syscall 406 PTR sys_ni_syscall /* was timerfd */
408 PTR sys_eventfd 407 PTR sys_eventfd
409 PTR sys_fallocate 408 PTR sys_fallocate
410 PTR sys_timerfd_create /* 5280 */ 409 PTR sys_timerfd_create /* 5280 */
@@ -425,4 +424,9 @@ sys_call_table:
425 PTR sys_fanotify_init /* 5295 */ 424 PTR sys_fanotify_init /* 5295 */
426 PTR sys_fanotify_mark 425 PTR sys_fanotify_mark
427 PTR sys_prlimit64 426 PTR sys_prlimit64
427 PTR sys_name_to_handle_at
428 PTR sys_open_by_handle_at
429 PTR sys_clock_adjtime /* 5300 */
430 PTR sys_syncfs
431 PTR sys_setns
428 .size sys_call_table,.-sys_call_table 432 .size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index f08ece6d8acc..4de0c5534e73 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -89,8 +89,7 @@ n32_syscall_trace_entry:
89 SAVE_STATIC 89 SAVE_STATIC
90 move s0, t2 90 move s0, t2
91 move a0, sp 91 move a0, sp
92 li a1, 0 92 jal syscall_trace_enter
93 jal do_syscall_trace
94 93
95 move t0, s0 94 move t0, s0
96 RESTORE_STATIC 95 RESTORE_STATIC
@@ -403,7 +402,7 @@ EXPORT(sysn32_call_table)
403 PTR sys_ioprio_get 402 PTR sys_ioprio_get
404 PTR compat_sys_utimensat 403 PTR compat_sys_utimensat
405 PTR compat_sys_signalfd /* 6280 */ 404 PTR compat_sys_signalfd /* 6280 */
406 PTR sys_ni_syscall 405 PTR sys_ni_syscall /* was timerfd */
407 PTR sys_eventfd 406 PTR sys_eventfd
408 PTR sys_fallocate 407 PTR sys_fallocate
409 PTR sys_timerfd_create 408 PTR sys_timerfd_create
@@ -425,4 +424,9 @@ EXPORT(sysn32_call_table)
425 PTR sys_fanotify_init /* 6300 */ 424 PTR sys_fanotify_init /* 6300 */
426 PTR sys_fanotify_mark 425 PTR sys_fanotify_mark
427 PTR sys_prlimit64 426 PTR sys_prlimit64
427 PTR sys_name_to_handle_at
428 PTR sys_open_by_handle_at
429 PTR compat_sys_clock_adjtime /* 6305 */
430 PTR sys_syncfs
431 PTR sys_setns
428 .size sysn32_call_table,.-sysn32_call_table 432 .size sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 78d768a3e19d..4a387de08bfa 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -123,8 +123,7 @@ trace_a_syscall:
123 123
124 move s0, t2 # Save syscall pointer 124 move s0, t2 # Save syscall pointer
125 move a0, sp 125 move a0, sp
126 li a1, 0 126 jal syscall_trace_enter
127 jal do_syscall_trace
128 127
129 move t0, s0 128 move t0, s0
130 RESTORE_STATIC 129 RESTORE_STATIC
@@ -522,7 +521,7 @@ sys_call_table:
522 PTR sys_ioprio_get /* 4315 */ 521 PTR sys_ioprio_get /* 4315 */
523 PTR compat_sys_utimensat 522 PTR compat_sys_utimensat
524 PTR compat_sys_signalfd 523 PTR compat_sys_signalfd
525 PTR sys_ni_syscall 524 PTR sys_ni_syscall /* was timerfd */
526 PTR sys_eventfd 525 PTR sys_eventfd
527 PTR sys32_fallocate /* 4320 */ 526 PTR sys32_fallocate /* 4320 */
528 PTR sys_timerfd_create 527 PTR sys_timerfd_create
@@ -543,4 +542,9 @@ sys_call_table:
543 PTR sys_fanotify_init 542 PTR sys_fanotify_init
544 PTR sys_32_fanotify_mark 543 PTR sys_32_fanotify_mark
545 PTR sys_prlimit64 544 PTR sys_prlimit64
545 PTR sys_name_to_handle_at
546 PTR compat_sys_open_by_handle_at /* 4340 */
547 PTR compat_sys_clock_adjtime
548 PTR sys_syncfs
549 PTR sys_setns
546 .size sys_call_table,.-sys_call_table 550 .size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 85aef3fc6716..8ad1d5679f14 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -31,6 +31,7 @@
31#include <asm/setup.h> 31#include <asm/setup.h>
32#include <asm/smp-ops.h> 32#include <asm/smp-ops.h>
33#include <asm/system.h> 33#include <asm/system.h>
34#include <asm/prom.h>
34 35
35struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly; 36struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
36 37
@@ -69,7 +70,7 @@ static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
69 * mips_io_port_base is the begin of the address space to which x86 style 70 * mips_io_port_base is the begin of the address space to which x86 style
70 * I/O ports are mapped. 71 * I/O ports are mapped.
71 */ 72 */
72const unsigned long mips_io_port_base __read_mostly = -1; 73const unsigned long mips_io_port_base = -1;
73EXPORT_SYMBOL(mips_io_port_base); 74EXPORT_SYMBOL(mips_io_port_base);
74 75
75static struct resource code_resource = { .name = "Kernel code", }; 76static struct resource code_resource = { .name = "Kernel code", };
@@ -487,7 +488,9 @@ static void __init arch_mem_init(char **cmdline_p)
487 } 488 }
488 489
489 bootmem_init(); 490 bootmem_init();
491 device_tree_init();
490 sparse_init(); 492 sparse_init();
493 plat_swiotlb_setup();
491 paging_init(); 494 paging_init();
492} 495}
493 496
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 5922342bca39..dbbe0ce48d89 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -84,7 +84,7 @@ static int protected_save_fp_context(struct sigcontext __user *sc)
84 84
85static int protected_restore_fp_context(struct sigcontext __user *sc) 85static int protected_restore_fp_context(struct sigcontext __user *sc)
86{ 86{
87 int err, tmp; 87 int err, tmp __maybe_unused;
88 while (1) { 88 while (1) {
89 lock_fpu_owner(); 89 lock_fpu_owner();
90 own_fpu_inatomic(0); 90 own_fpu_inatomic(0);
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index a0ed0e052b2e..aae986613795 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -115,7 +115,7 @@ static int protected_save_fp_context32(struct sigcontext32 __user *sc)
115 115
116static int protected_restore_fp_context32(struct sigcontext32 __user *sc) 116static int protected_restore_fp_context32(struct sigcontext32 __user *sc)
117{ 117{
118 int err, tmp; 118 int err, tmp __maybe_unused;
119 while (1) { 119 while (1) {
120 lock_fpu_owner(); 120 lock_fpu_owner();
121 own_fpu_inatomic(0); 121 own_fpu_inatomic(0);
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index 43e7cdc5ded2..1ec56e635d04 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -120,7 +120,7 @@ static void vsmp_send_ipi_single(int cpu, unsigned int action)
120 120
121 local_irq_save(flags); 121 local_irq_save(flags);
122 122
123 vpflags = dvpe(); /* cant access the other CPU's registers whilst MVPE enabled */ 123 vpflags = dvpe(); /* can't access the other CPU's registers whilst MVPE enabled */
124 124
125 switch (action) { 125 switch (action) {
126 case SMP_CALL_FUNCTION: 126 case SMP_CALL_FUNCTION:
@@ -153,7 +153,7 @@ static void __cpuinit vsmp_init_secondary(void)
153{ 153{
154 extern int gic_present; 154 extern int gic_present;
155 155
156 /* This is Malta specific: IPI,performance and timer inetrrupts */ 156 /* This is Malta specific: IPI,performance and timer interrupts */
157 if (gic_present) 157 if (gic_present)
158 change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | 158 change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 |
159 STATUSF_IP6 | STATUSF_IP7); 159 STATUSF_IP6 | STATUSF_IP7);
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 383aeb95cb49..32a256101082 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -193,6 +193,22 @@ void __devinit smp_prepare_boot_cpu(void)
193 */ 193 */
194static struct task_struct *cpu_idle_thread[NR_CPUS]; 194static struct task_struct *cpu_idle_thread[NR_CPUS];
195 195
196struct create_idle {
197 struct work_struct work;
198 struct task_struct *idle;
199 struct completion done;
200 int cpu;
201};
202
203static void __cpuinit do_fork_idle(struct work_struct *work)
204{
205 struct create_idle *c_idle =
206 container_of(work, struct create_idle, work);
207
208 c_idle->idle = fork_idle(c_idle->cpu);
209 complete(&c_idle->done);
210}
211
196int __cpuinit __cpu_up(unsigned int cpu) 212int __cpuinit __cpu_up(unsigned int cpu)
197{ 213{
198 struct task_struct *idle; 214 struct task_struct *idle;
@@ -203,8 +219,19 @@ int __cpuinit __cpu_up(unsigned int cpu)
203 * Linux can schedule processes on this slave. 219 * Linux can schedule processes on this slave.
204 */ 220 */
205 if (!cpu_idle_thread[cpu]) { 221 if (!cpu_idle_thread[cpu]) {
206 idle = fork_idle(cpu); 222 /*
207 cpu_idle_thread[cpu] = idle; 223 * Schedule work item to avoid forking user task
224 * Ported from arch/x86/kernel/smpboot.c
225 */
226 struct create_idle c_idle = {
227 .cpu = cpu,
228 .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
229 };
230
231 INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle);
232 schedule_work(&c_idle.work);
233 wait_for_completion(&c_idle.done);
234 idle = cpu_idle_thread[cpu] = c_idle.idle;
208 235
209 if (IS_ERR(idle)) 236 if (IS_ERR(idle))
210 panic(KERN_ERR "Fork failed for CPU %d", cpu); 237 panic(KERN_ERR "Fork failed for CPU %d", cpu);
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index cfeb2c155896..cedac4633741 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -677,8 +677,9 @@ void smtc_set_irq_affinity(unsigned int irq, cpumask_t affinity)
677 */ 677 */
678} 678}
679 679
680void smtc_forward_irq(unsigned int irq) 680void smtc_forward_irq(struct irq_data *d)
681{ 681{
682 unsigned int irq = d->irq;
682 int target; 683 int target;
683 684
684 /* 685 /*
@@ -692,7 +693,7 @@ void smtc_forward_irq(unsigned int irq)
692 * and efficiency, we just pick the easiest one to find. 693 * and efficiency, we just pick the easiest one to find.
693 */ 694 */
694 695
695 target = cpumask_first(irq_desc[irq].affinity); 696 target = cpumask_first(d->affinity);
696 697
697 /* 698 /*
698 * We depend on the platform code to have correctly processed 699 * We depend on the platform code to have correctly processed
@@ -707,12 +708,10 @@ void smtc_forward_irq(unsigned int irq)
707 */ 708 */
708 709
709 /* If no one is eligible, service locally */ 710 /* If no one is eligible, service locally */
710 if (target >= NR_CPUS) { 711 if (target >= NR_CPUS)
711 do_IRQ_no_affinity(irq); 712 do_IRQ_no_affinity(irq);
712 return; 713 else
713 } 714 smtc_send_ipi(target, IRQ_AFFINITY_IPI, irq);
714
715 smtc_send_ipi(target, IRQ_AFFINITY_IPI, irq);
716} 715}
717 716
718#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ 717#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
@@ -930,7 +929,7 @@ static void post_direct_ipi(int cpu, struct smtc_ipi *pipi)
930 929
931static void ipi_resched_interrupt(void) 930static void ipi_resched_interrupt(void)
932{ 931{
933 /* Return from interrupt should be enough to cause scheduler check */ 932 scheduler_ipi();
934} 933}
935 934
936static void ipi_call_interrupt(void) 935static void ipi_call_interrupt(void)
@@ -1038,7 +1037,7 @@ void deferred_smtc_ipi(void)
1038 * but it's more efficient, given that we're already 1037 * but it's more efficient, given that we're already
1039 * running down the IPI queue. 1038 * running down the IPI queue.
1040 */ 1039 */
1041 __raw_local_irq_restore(flags); 1040 __arch_local_irq_restore(flags);
1042 } 1041 }
1043} 1042}
1044 1043
@@ -1147,7 +1146,7 @@ static void setup_cross_vpe_interrupts(unsigned int nvpe)
1147 1146
1148 setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ)); 1147 setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ));
1149 1148
1150 set_irq_handler(cpu_ipi_irq, handle_percpu_irq); 1149 irq_set_handler(cpu_ipi_irq, handle_percpu_irq);
1151} 1150}
1152 1151
1153/* 1152/*
@@ -1190,7 +1189,7 @@ void smtc_ipi_replay(void)
1190 /* 1189 /*
1191 ** But use a raw restore here to avoid recursion. 1190 ** But use a raw restore here to avoid recursion.
1192 */ 1191 */
1193 __raw_local_irq_restore(flags); 1192 __arch_local_irq_restore(flags);
1194 1193
1195 if (pipi) { 1194 if (pipi) {
1196 self_ipi(pipi); 1195 self_ipi(pipi);
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index 1dc6edff45e0..d02765708ddb 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -10,12 +10,9 @@
10#include <linux/capability.h> 10#include <linux/capability.h>
11#include <linux/errno.h> 11#include <linux/errno.h>
12#include <linux/linkage.h> 12#include <linux/linkage.h>
13#include <linux/mm.h>
14#include <linux/fs.h> 13#include <linux/fs.h>
15#include <linux/smp.h> 14#include <linux/smp.h>
16#include <linux/mman.h>
17#include <linux/ptrace.h> 15#include <linux/ptrace.h>
18#include <linux/sched.h>
19#include <linux/string.h> 16#include <linux/string.h>
20#include <linux/syscalls.h> 17#include <linux/syscalls.h>
21#include <linux/file.h> 18#include <linux/file.h>
@@ -25,11 +22,9 @@
25#include <linux/msg.h> 22#include <linux/msg.h>
26#include <linux/shm.h> 23#include <linux/shm.h>
27#include <linux/compiler.h> 24#include <linux/compiler.h>
28#include <linux/module.h>
29#include <linux/ipc.h> 25#include <linux/ipc.h>
30#include <linux/uaccess.h> 26#include <linux/uaccess.h>
31#include <linux/slab.h> 27#include <linux/slab.h>
32#include <linux/random.h>
33#include <linux/elf.h> 28#include <linux/elf.h>
34 29
35#include <asm/asm.h> 30#include <asm/asm.h>
@@ -66,121 +61,6 @@ out:
66 return res; 61 return res;
67} 62}
68 63
69unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
70
71EXPORT_SYMBOL(shm_align_mask);
72
73#define COLOUR_ALIGN(addr,pgoff) \
74 ((((addr) + shm_align_mask) & ~shm_align_mask) + \
75 (((pgoff) << PAGE_SHIFT) & shm_align_mask))
76
77unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
78 unsigned long len, unsigned long pgoff, unsigned long flags)
79{
80 struct vm_area_struct * vmm;
81 int do_color_align;
82 unsigned long task_size;
83
84#ifdef CONFIG_32BIT
85 task_size = TASK_SIZE;
86#else /* Must be CONFIG_64BIT*/
87 task_size = test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE;
88#endif
89
90 if (len > task_size)
91 return -ENOMEM;
92
93 if (flags & MAP_FIXED) {
94 /* Even MAP_FIXED mappings must reside within task_size. */
95 if (task_size - len < addr)
96 return -EINVAL;
97
98 /*
99 * We do not accept a shared mapping if it would violate
100 * cache aliasing constraints.
101 */
102 if ((flags & MAP_SHARED) &&
103 ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
104 return -EINVAL;
105 return addr;
106 }
107
108 do_color_align = 0;
109 if (filp || (flags & MAP_SHARED))
110 do_color_align = 1;
111 if (addr) {
112 if (do_color_align)
113 addr = COLOUR_ALIGN(addr, pgoff);
114 else
115 addr = PAGE_ALIGN(addr);
116 vmm = find_vma(current->mm, addr);
117 if (task_size - len >= addr &&
118 (!vmm || addr + len <= vmm->vm_start))
119 return addr;
120 }
121 addr = current->mm->mmap_base;
122 if (do_color_align)
123 addr = COLOUR_ALIGN(addr, pgoff);
124 else
125 addr = PAGE_ALIGN(addr);
126
127 for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
128 /* At this point: (!vmm || addr < vmm->vm_end). */
129 if (task_size - len < addr)
130 return -ENOMEM;
131 if (!vmm || addr + len <= vmm->vm_start)
132 return addr;
133 addr = vmm->vm_end;
134 if (do_color_align)
135 addr = COLOUR_ALIGN(addr, pgoff);
136 }
137}
138
139void arch_pick_mmap_layout(struct mm_struct *mm)
140{
141 unsigned long random_factor = 0UL;
142
143 if (current->flags & PF_RANDOMIZE) {
144 random_factor = get_random_int();
145 random_factor = random_factor << PAGE_SHIFT;
146 if (TASK_IS_32BIT_ADDR)
147 random_factor &= 0xfffffful;
148 else
149 random_factor &= 0xffffffful;
150 }
151
152 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
153 mm->get_unmapped_area = arch_get_unmapped_area;
154 mm->unmap_area = arch_unmap_area;
155}
156
157static inline unsigned long brk_rnd(void)
158{
159 unsigned long rnd = get_random_int();
160
161 rnd = rnd << PAGE_SHIFT;
162 /* 8MB for 32bit, 256MB for 64bit */
163 if (TASK_IS_32BIT_ADDR)
164 rnd = rnd & 0x7ffffful;
165 else
166 rnd = rnd & 0xffffffful;
167
168 return rnd;
169}
170
171unsigned long arch_randomize_brk(struct mm_struct *mm)
172{
173 unsigned long base = mm->brk;
174 unsigned long ret;
175
176 ret = PAGE_ALIGN(base + brk_rnd());
177
178 if (ret < mm->brk)
179 return mm->brk;
180
181 return ret;
182}
183
184SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len, 64SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len,
185 unsigned long, prot, unsigned long, flags, unsigned long, 65 unsigned long, prot, unsigned long, flags, unsigned long,
186 fd, off_t, offset) 66 fd, off_t, offset)
@@ -383,12 +263,11 @@ save_static_function(sys_sysmips);
383static int __used noinline 263static int __used noinline
384_sys_sysmips(nabi_no_regargs struct pt_regs regs) 264_sys_sysmips(nabi_no_regargs struct pt_regs regs)
385{ 265{
386 long cmd, arg1, arg2, arg3; 266 long cmd, arg1, arg2;
387 267
388 cmd = regs.regs[4]; 268 cmd = regs.regs[4];
389 arg1 = regs.regs[5]; 269 arg1 = regs.regs[5];
390 arg2 = regs.regs[6]; 270 arg2 = regs.regs[6];
391 arg3 = regs.regs[7];
392 271
393 switch (cmd) { 272 switch (cmd) {
394 case MIPS_ATOMIC_SET: 273 case MIPS_ATOMIC_SET:
@@ -405,7 +284,7 @@ _sys_sysmips(nabi_no_regargs struct pt_regs regs)
405 if (arg1 & 2) 284 if (arg1 & 2)
406 set_thread_flag(TIF_LOGADE); 285 set_thread_flag(TIF_LOGADE);
407 else 286 else
408 clear_thread_flag(TIF_FIXADE); 287 clear_thread_flag(TIF_LOGADE);
409 288
410 return 0; 289 return 0;
411 290
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
index fb7497405510..1083ad4e1017 100644
--- a/arch/mips/kernel/time.c
+++ b/arch/mips/kernel/time.c
@@ -102,7 +102,7 @@ static __init int cpu_has_mfc0_count_bug(void)
102 case CPU_R4400SC: 102 case CPU_R4400SC:
103 case CPU_R4400MC: 103 case CPU_R4400MC:
104 /* 104 /*
105 * The published errata for the R4400 upto 3.0 say the CPU 105 * The published errata for the R4400 up to 3.0 say the CPU
106 * has the mfc0 from count bug. 106 * has the mfc0 from count bug.
107 */ 107 */
108 if ((current_cpu_data.processor_id & 0xff) <= 0x30) 108 if ((current_cpu_data.processor_id & 0xff) <= 0x30)
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 03ec0019032b..e9b3af27d844 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -28,6 +28,8 @@
28#include <linux/kprobes.h> 28#include <linux/kprobes.h>
29#include <linux/notifier.h> 29#include <linux/notifier.h>
30#include <linux/kdb.h> 30#include <linux/kdb.h>
31#include <linux/irq.h>
32#include <linux/perf_event.h>
31 33
32#include <asm/bootinfo.h> 34#include <asm/bootinfo.h>
33#include <asm/branch.h> 35#include <asm/branch.h>
@@ -51,7 +53,6 @@
51#include <asm/mmu_context.h> 53#include <asm/mmu_context.h>
52#include <asm/types.h> 54#include <asm/types.h>
53#include <asm/stacktrace.h> 55#include <asm/stacktrace.h>
54#include <asm/irq.h>
55#include <asm/uasm.h> 56#include <asm/uasm.h>
56 57
57extern void check_wait(void); 58extern void check_wait(void);
@@ -82,7 +83,8 @@ extern asmlinkage void handle_mcheck(void);
82extern asmlinkage void handle_reserved(void); 83extern asmlinkage void handle_reserved(void);
83 84
84extern int fpu_emulator_cop1Handler(struct pt_regs *xcp, 85extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
85 struct mips_fpu_struct *ctx, int has_fpu); 86 struct mips_fpu_struct *ctx, int has_fpu,
87 void *__user *fault_addr);
86 88
87void (*board_be_init)(void); 89void (*board_be_init)(void);
88int (*board_be_handler)(struct pt_regs *regs, int is_fixup); 90int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
@@ -372,7 +374,8 @@ void __noreturn die(const char *str, struct pt_regs *regs)
372 unsigned long dvpret = dvpe(); 374 unsigned long dvpret = dvpe();
373#endif /* CONFIG_MIPS_MT_SMTC */ 375#endif /* CONFIG_MIPS_MT_SMTC */
374 376
375 notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV); 377 if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV) == NOTIFY_STOP)
378 sig = 0;
376 379
377 console_verbose(); 380 console_verbose();
378 spin_lock_irq(&die_lock); 381 spin_lock_irq(&die_lock);
@@ -381,9 +384,6 @@ void __noreturn die(const char *str, struct pt_regs *regs)
381 mips_mt_regdump(dvpret); 384 mips_mt_regdump(dvpret);
382#endif /* CONFIG_MIPS_MT_SMTC */ 385#endif /* CONFIG_MIPS_MT_SMTC */
383 386
384 if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV) == NOTIFY_STOP)
385 sig = 0;
386
387 printk("%s[#%d]:\n", str, ++die_counter); 387 printk("%s[#%d]:\n", str, ++die_counter);
388 show_registers(regs); 388 show_registers(regs);
389 add_taint(TAINT_DIE); 389 add_taint(TAINT_DIE);
@@ -576,10 +576,16 @@ static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode)
576 */ 576 */
577static int simulate_llsc(struct pt_regs *regs, unsigned int opcode) 577static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
578{ 578{
579 if ((opcode & OPCODE) == LL) 579 if ((opcode & OPCODE) == LL) {
580 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
581 1, 0, regs, 0);
580 return simulate_ll(regs, opcode); 582 return simulate_ll(regs, opcode);
581 if ((opcode & OPCODE) == SC) 583 }
584 if ((opcode & OPCODE) == SC) {
585 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
586 1, 0, regs, 0);
582 return simulate_sc(regs, opcode); 587 return simulate_sc(regs, opcode);
588 }
583 589
584 return -1; /* Must be something else ... */ 590 return -1; /* Must be something else ... */
585} 591}
@@ -595,6 +601,8 @@ static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode)
595 if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) { 601 if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
596 int rd = (opcode & RD) >> 11; 602 int rd = (opcode & RD) >> 11;
597 int rt = (opcode & RT) >> 16; 603 int rt = (opcode & RT) >> 16;
604 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
605 1, 0, regs, 0);
598 switch (rd) { 606 switch (rd) {
599 case 0: /* CPU number */ 607 case 0: /* CPU number */
600 regs->regs[rt] = smp_processor_id(); 608 regs->regs[rt] = smp_processor_id();
@@ -630,8 +638,11 @@ static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode)
630 638
631static int simulate_sync(struct pt_regs *regs, unsigned int opcode) 639static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
632{ 640{
633 if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) 641 if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) {
642 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
643 1, 0, regs, 0);
634 return 0; 644 return 0;
645 }
635 646
636 return -1; /* Must be something else ... */ 647 return -1; /* Must be something else ... */
637} 648}
@@ -649,12 +660,36 @@ asmlinkage void do_ov(struct pt_regs *regs)
649 force_sig_info(SIGFPE, &info, current); 660 force_sig_info(SIGFPE, &info, current);
650} 661}
651 662
663static int process_fpemu_return(int sig, void __user *fault_addr)
664{
665 if (sig == SIGSEGV || sig == SIGBUS) {
666 struct siginfo si = {0};
667 si.si_addr = fault_addr;
668 si.si_signo = sig;
669 if (sig == SIGSEGV) {
670 if (find_vma(current->mm, (unsigned long)fault_addr))
671 si.si_code = SEGV_ACCERR;
672 else
673 si.si_code = SEGV_MAPERR;
674 } else {
675 si.si_code = BUS_ADRERR;
676 }
677 force_sig_info(sig, &si, current);
678 return 1;
679 } else if (sig) {
680 force_sig(sig, current);
681 return 1;
682 } else {
683 return 0;
684 }
685}
686
652/* 687/*
653 * XXX Delayed fp exceptions when doing a lazy ctx switch XXX 688 * XXX Delayed fp exceptions when doing a lazy ctx switch XXX
654 */ 689 */
655asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) 690asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
656{ 691{
657 siginfo_t info; 692 siginfo_t info = {0};
658 693
659 if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs), SIGFPE) 694 if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs), SIGFPE)
660 == NOTIFY_STOP) 695 == NOTIFY_STOP)
@@ -663,6 +698,7 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
663 698
664 if (fcr31 & FPU_CSR_UNI_X) { 699 if (fcr31 & FPU_CSR_UNI_X) {
665 int sig; 700 int sig;
701 void __user *fault_addr = NULL;
666 702
667 /* 703 /*
668 * Unimplemented operation exception. If we've got the full 704 * Unimplemented operation exception. If we've got the full
@@ -678,7 +714,8 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
678 lose_fpu(1); 714 lose_fpu(1);
679 715
680 /* Run the emulator */ 716 /* Run the emulator */
681 sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1); 717 sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
718 &fault_addr);
682 719
683 /* 720 /*
684 * We can't allow the emulated instruction to leave any of 721 * We can't allow the emulated instruction to leave any of
@@ -690,8 +727,7 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
690 own_fpu(1); /* Using the FPU again. */ 727 own_fpu(1); /* Using the FPU again. */
691 728
692 /* If something went wrong, signal */ 729 /* If something went wrong, signal */
693 if (sig) 730 process_fpemu_return(sig, fault_addr);
694 force_sig(sig, current);
695 731
696 return; 732 return;
697 } else if (fcr31 & FPU_CSR_INV_X) 733 } else if (fcr31 & FPU_CSR_INV_X)
@@ -984,11 +1020,11 @@ asmlinkage void do_cpu(struct pt_regs *regs)
984 1020
985 if (!raw_cpu_has_fpu) { 1021 if (!raw_cpu_has_fpu) {
986 int sig; 1022 int sig;
1023 void __user *fault_addr = NULL;
987 sig = fpu_emulator_cop1Handler(regs, 1024 sig = fpu_emulator_cop1Handler(regs,
988 &current->thread.fpu, 0); 1025 &current->thread.fpu,
989 if (sig) 1026 0, &fault_addr);
990 force_sig(sig, current); 1027 if (!process_fpemu_return(sig, fault_addr))
991 else
992 mt_ase_fp_affinity(); 1028 mt_ase_fp_affinity();
993 } 1029 }
994 1030
@@ -1469,6 +1505,7 @@ void __cpuinit per_cpu_trap_init(void)
1469{ 1505{
1470 unsigned int cpu = smp_processor_id(); 1506 unsigned int cpu = smp_processor_id();
1471 unsigned int status_set = ST0_CU0; 1507 unsigned int status_set = ST0_CU0;
1508 unsigned int hwrena = cpu_hwrena_impl_bits;
1472#ifdef CONFIG_MIPS_MT_SMTC 1509#ifdef CONFIG_MIPS_MT_SMTC
1473 int secondaryTC = 0; 1510 int secondaryTC = 0;
1474 int bootTC = (cpu == 0); 1511 int bootTC = (cpu == 0);
@@ -1501,14 +1538,14 @@ void __cpuinit per_cpu_trap_init(void)
1501 change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX, 1538 change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
1502 status_set); 1539 status_set);
1503 1540
1504 if (cpu_has_mips_r2) { 1541 if (cpu_has_mips_r2)
1505 unsigned int enable = 0x0000000f | cpu_hwrena_impl_bits; 1542 hwrena |= 0x0000000f;
1506 1543
1507 if (!noulri && cpu_has_userlocal) 1544 if (!noulri && cpu_has_userlocal)
1508 enable |= (1 << 29); 1545 hwrena |= (1 << 29);
1509 1546
1510 write_c0_hwrena(enable); 1547 if (hwrena)
1511 } 1548 write_c0_hwrena(hwrena);
1512 1549
1513#ifdef CONFIG_MIPS_MT_SMTC 1550#ifdef CONFIG_MIPS_MT_SMTC
1514 if (!secondaryTC) { 1551 if (!secondaryTC) {
@@ -1553,7 +1590,6 @@ void __cpuinit per_cpu_trap_init(void)
1553#endif /* CONFIG_MIPS_MT_SMTC */ 1590#endif /* CONFIG_MIPS_MT_SMTC */
1554 1591
1555 cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; 1592 cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
1556 TLBMISS_HANDLER_SETUP();
1557 1593
1558 atomic_inc(&init_mm.mm_count); 1594 atomic_inc(&init_mm.mm_count);
1559 current->active_mm = &init_mm; 1595 current->active_mm = &init_mm;
@@ -1575,6 +1611,7 @@ void __cpuinit per_cpu_trap_init(void)
1575 write_c0_wired(0); 1611 write_c0_wired(0);
1576 } 1612 }
1577#endif /* CONFIG_MIPS_MT_SMTC */ 1613#endif /* CONFIG_MIPS_MT_SMTC */
1614 TLBMISS_HANDLER_SETUP();
1578} 1615}
1579 1616
1580/* Install CPU exception handler */ 1617/* Install CPU exception handler */
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index 33d5a5ce4a29..cfea1adfa153 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -78,6 +78,8 @@
78#include <linux/smp.h> 78#include <linux/smp.h>
79#include <linux/sched.h> 79#include <linux/sched.h>
80#include <linux/debugfs.h> 80#include <linux/debugfs.h>
81#include <linux/perf_event.h>
82
81#include <asm/asm.h> 83#include <asm/asm.h>
82#include <asm/branch.h> 84#include <asm/branch.h>
83#include <asm/byteorder.h> 85#include <asm/byteorder.h>
@@ -109,6 +111,9 @@ static void emulate_load_store_insn(struct pt_regs *regs,
109 unsigned long value; 111 unsigned long value;
110 unsigned int res; 112 unsigned int res;
111 113
114 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
115 1, 0, regs, 0);
116
112 /* 117 /*
113 * This load never faults. 118 * This load never faults.
114 */ 119 */
@@ -511,6 +516,8 @@ asmlinkage void do_ade(struct pt_regs *regs)
511 unsigned int __user *pc; 516 unsigned int __user *pc;
512 mm_segment_t seg; 517 mm_segment_t seg;
513 518
519 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
520 1, 0, regs, regs->cp0_badvaddr);
514 /* 521 /*
515 * Did we catch a fault trying to load an instruction? 522 * Did we catch a fault trying to load an instruction?
516 * Or are we running in MIPS16 mode? 523 * Or are we running in MIPS16 mode?
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index f25df73db923..a81176f44c74 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -65,15 +65,18 @@ SECTIONS
65 NOTES :text :note 65 NOTES :text :note
66 .dummy : { *(.dummy) } :text 66 .dummy : { *(.dummy) } :text
67 67
68 _sdata = .; /* Start of data section */
68 RODATA 69 RODATA
69 70
70 /* writeable */ 71 /* writeable */
72 _sdata = .; /* Start of data section */
71 .data : { /* Data */ 73 .data : { /* Data */
72 . = . + DATAOFFSET; /* for CONFIG_MAPPED_KERNEL */ 74 . = . + DATAOFFSET; /* for CONFIG_MAPPED_KERNEL */
73 75
74 INIT_TASK_DATA(PAGE_SIZE) 76 INIT_TASK_DATA(PAGE_SIZE)
75 NOSAVE_DATA 77 NOSAVE_DATA
76 CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT) 78 CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
79 READ_MOSTLY_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
77 DATA_DATA 80 DATA_DATA
78 CONSTRUCTORS 81 CONSTRUCTORS
79 } 82 }
@@ -98,6 +101,13 @@ SECTIONS
98 INIT_TEXT_SECTION(PAGE_SIZE) 101 INIT_TEXT_SECTION(PAGE_SIZE)
99 INIT_DATA_SECTION(16) 102 INIT_DATA_SECTION(16)
100 103
104 . = ALIGN(4);
105 .mips.machines.init : AT(ADDR(.mips.machines.init) - LOAD_OFFSET) {
106 __mips_machines_start = .;
107 *(.mips.machines.init)
108 __mips_machines_end = .;
109 }
110
101 /* .exit.text is discarded at runtime, not link time, to deal with 111 /* .exit.text is discarded at runtime, not link time, to deal with
102 * references from .rodata 112 * references from .rodata
103 */ 113 */
@@ -108,7 +118,7 @@ SECTIONS
108 EXIT_DATA 118 EXIT_DATA
109 } 119 }
110 120
111 PERCPU(PAGE_SIZE) 121 PERCPU_SECTION(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
112 . = ALIGN(PAGE_SIZE); 122 . = ALIGN(PAGE_SIZE);
113 __init_end = .; 123 __init_end = .;
114 /* freed after init ends here */ 124 /* freed after init ends here */
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index 2bd2151c586a..dbb6b408f001 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -19,7 +19,7 @@
19 * VPE support module 19 * VPE support module
20 * 20 *
21 * Provides support for loading a MIPS SP program on VPE1. 21 * Provides support for loading a MIPS SP program on VPE1.
22 * The SP enviroment is rather simple, no tlb's. It needs to be relocatable 22 * The SP environment is rather simple, no tlb's. It needs to be relocatable
23 * (or partially linked). You should initialise your stack in the startup 23 * (or partially linked). You should initialise your stack in the startup
24 * code. This loader looks for the symbol __start and sets up 24 * code. This loader looks for the symbol __start and sets up
25 * execution to resume from there. The MIPS SDE kit contains suitable examples. 25 * execution to resume from there. The MIPS SDE kit contains suitable examples.
@@ -148,9 +148,9 @@ struct {
148 spinlock_t tc_list_lock; 148 spinlock_t tc_list_lock;
149 struct list_head tc_list; /* Thread contexts */ 149 struct list_head tc_list; /* Thread contexts */
150} vpecontrol = { 150} vpecontrol = {
151 .vpe_list_lock = SPIN_LOCK_UNLOCKED, 151 .vpe_list_lock = __SPIN_LOCK_UNLOCKED(vpe_list_lock),
152 .vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list), 152 .vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list),
153 .tc_list_lock = SPIN_LOCK_UNLOCKED, 153 .tc_list_lock = __SPIN_LOCK_UNLOCKED(tc_list_lock),
154 .tc_list = LIST_HEAD_INIT(vpecontrol.tc_list) 154 .tc_list = LIST_HEAD_INIT(vpecontrol.tc_list)
155}; 155};
156 156
@@ -1092,6 +1092,10 @@ static int vpe_open(struct inode *inode, struct file *filp)
1092 1092
1093 /* this of-course trashes what was there before... */ 1093 /* this of-course trashes what was there before... */
1094 v->pbuffer = vmalloc(P_SIZE); 1094 v->pbuffer = vmalloc(P_SIZE);
1095 if (!v->pbuffer) {
1096 pr_warning("VPE loader: unable to allocate memory\n");
1097 return -ENOMEM;
1098 }
1095 v->plen = P_SIZE; 1099 v->plen = P_SIZE;
1096 v->load_addr = NULL; 1100 v->load_addr = NULL;
1097 v->len = 0; 1101 v->len = 0;
@@ -1149,10 +1153,9 @@ static int vpe_release(struct inode *inode, struct file *filp)
1149 if (ret < 0) 1153 if (ret < 0)
1150 v->shared_ptr = NULL; 1154 v->shared_ptr = NULL;
1151 1155
1152 // cleanup any temp buffers 1156 vfree(v->pbuffer);
1153 if (v->pbuffer)
1154 vfree(v->pbuffer);
1155 v->plen = 0; 1157 v->plen = 0;
1158
1156 return ret; 1159 return ret;
1157} 1160}
1158 1161
@@ -1169,11 +1172,6 @@ static ssize_t vpe_write(struct file *file, const char __user * buffer,
1169 if (v == NULL) 1172 if (v == NULL)
1170 return -ENODEV; 1173 return -ENODEV;
1171 1174
1172 if (v->pbuffer == NULL) {
1173 printk(KERN_ERR "VPE loader: no buffer for program\n");
1174 return -ENOMEM;
1175 }
1176
1177 if ((count + v->len) > v->plen) { 1175 if ((count + v->len) > v->plen) {
1178 printk(KERN_WARNING 1176 printk(KERN_WARNING
1179 "VPE loader: elf size too big. Perhaps strip uneeded symbols\n"); 1177 "VPE loader: elf size too big. Perhaps strip uneeded symbols\n");
@@ -1192,7 +1190,8 @@ static const struct file_operations vpe_fops = {
1192 .owner = THIS_MODULE, 1190 .owner = THIS_MODULE,
1193 .open = vpe_open, 1191 .open = vpe_open,
1194 .release = vpe_release, 1192 .release = vpe_release,
1195 .write = vpe_write 1193 .write = vpe_write,
1194 .llseek = noop_llseek,
1196}; 1195};
1197 1196
1198/* module wrapper entry points */ 1197/* module wrapper entry points */