diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
commit | 8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch) | |
tree | a8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /arch/mips/kernel | |
parent | 406089d01562f1e2bf9f089fd7637009ebaad589 (diff) |
Patched in Tegra support.
Diffstat (limited to 'arch/mips/kernel')
74 files changed, 2116 insertions, 3369 deletions
diff --git a/arch/mips/kernel/8250-platform.c b/arch/mips/kernel/8250-platform.c index 5c6b2ab1f56..cbf3fe20ad1 100644 --- a/arch/mips/kernel/8250-platform.c +++ b/arch/mips/kernel/8250-platform.c | |||
@@ -5,6 +5,7 @@ | |||
5 | * | 5 | * |
6 | * Copyright (C) 2007 Ralf Baechle (ralf@linux-mips.org) | 6 | * Copyright (C) 2007 Ralf Baechle (ralf@linux-mips.org) |
7 | */ | 7 | */ |
8 | #include <linux/module.h> | ||
8 | #include <linux/init.h> | 9 | #include <linux/init.h> |
9 | #include <linux/serial_8250.h> | 10 | #include <linux/serial_8250.h> |
10 | 11 | ||
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index 007c33d7371..83bba332bbf 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile | |||
@@ -2,7 +2,7 @@ | |||
2 | # Makefile for the Linux/MIPS kernel. | 2 | # Makefile for the Linux/MIPS kernel. |
3 | # | 3 | # |
4 | 4 | ||
5 | extra-y := head.o vmlinux.lds | 5 | extra-y := head.o init_task.o vmlinux.lds |
6 | 6 | ||
7 | obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \ | 7 | obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \ |
8 | ptrace.o reset.o setup.o signal.o syscall.o \ | 8 | ptrace.o reset.o setup.o signal.o syscall.o \ |
@@ -11,12 +11,10 @@ obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \ | |||
11 | ifdef CONFIG_FUNCTION_TRACER | 11 | ifdef CONFIG_FUNCTION_TRACER |
12 | CFLAGS_REMOVE_ftrace.o = -pg | 12 | CFLAGS_REMOVE_ftrace.o = -pg |
13 | CFLAGS_REMOVE_early_printk.o = -pg | 13 | CFLAGS_REMOVE_early_printk.o = -pg |
14 | CFLAGS_REMOVE_perf_event.o = -pg | ||
15 | CFLAGS_REMOVE_perf_event_mipsxx.o = -pg | ||
16 | endif | 14 | endif |
17 | 15 | ||
18 | obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o | 16 | obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o |
19 | obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o | 17 | obj-$(CONFIG_CEVT_R4K_LIB) += cevt-r4k.o |
20 | obj-$(CONFIG_MIPS_MT_SMTC) += cevt-smtc.o | 18 | obj-$(CONFIG_MIPS_MT_SMTC) += cevt-smtc.o |
21 | obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o | 19 | obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o |
22 | obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o | 20 | obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o |
@@ -25,25 +23,39 @@ obj-$(CONFIG_CEVT_TXX9) += cevt-txx9.o | |||
25 | obj-$(CONFIG_CSRC_BCM1480) += csrc-bcm1480.o | 23 | obj-$(CONFIG_CSRC_BCM1480) += csrc-bcm1480.o |
26 | obj-$(CONFIG_CSRC_IOASIC) += csrc-ioasic.o | 24 | obj-$(CONFIG_CSRC_IOASIC) += csrc-ioasic.o |
27 | obj-$(CONFIG_CSRC_POWERTV) += csrc-powertv.o | 25 | obj-$(CONFIG_CSRC_POWERTV) += csrc-powertv.o |
28 | obj-$(CONFIG_CSRC_R4K) += csrc-r4k.o | 26 | obj-$(CONFIG_CSRC_R4K_LIB) += csrc-r4k.o |
29 | obj-$(CONFIG_CSRC_SB1250) += csrc-sb1250.o | 27 | obj-$(CONFIG_CSRC_SB1250) += csrc-sb1250.o |
30 | obj-$(CONFIG_SYNC_R4K) += sync-r4k.o | 28 | obj-$(CONFIG_SYNC_R4K) += sync-r4k.o |
31 | 29 | ||
32 | obj-$(CONFIG_STACKTRACE) += stacktrace.o | 30 | obj-$(CONFIG_STACKTRACE) += stacktrace.o |
33 | obj-$(CONFIG_MODULES) += mips_ksyms.o module.o | 31 | obj-$(CONFIG_MODULES) += mips_ksyms.o module.o |
34 | obj-$(CONFIG_MODULES_USE_ELF_RELA) += module-rela.o | ||
35 | 32 | ||
36 | obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o | 33 | obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o |
37 | 34 | ||
38 | obj-$(CONFIG_CPU_R4K_FPU) += r4k_fpu.o r4k_switch.o | 35 | obj-$(CONFIG_CPU_LOONGSON2) += r4k_fpu.o r4k_switch.o |
36 | obj-$(CONFIG_CPU_MIPS32) += r4k_fpu.o r4k_switch.o | ||
37 | obj-$(CONFIG_CPU_MIPS64) += r4k_fpu.o r4k_switch.o | ||
39 | obj-$(CONFIG_CPU_R3000) += r2300_fpu.o r2300_switch.o | 38 | obj-$(CONFIG_CPU_R3000) += r2300_fpu.o r2300_switch.o |
39 | obj-$(CONFIG_CPU_R4300) += r4k_fpu.o r4k_switch.o | ||
40 | obj-$(CONFIG_CPU_R4X00) += r4k_fpu.o r4k_switch.o | ||
41 | obj-$(CONFIG_CPU_R5000) += r4k_fpu.o r4k_switch.o | ||
40 | obj-$(CONFIG_CPU_R6000) += r6000_fpu.o r4k_switch.o | 42 | obj-$(CONFIG_CPU_R6000) += r6000_fpu.o r4k_switch.o |
43 | obj-$(CONFIG_CPU_R5432) += r4k_fpu.o r4k_switch.o | ||
44 | obj-$(CONFIG_CPU_R5500) += r4k_fpu.o r4k_switch.o | ||
45 | obj-$(CONFIG_CPU_R8000) += r4k_fpu.o r4k_switch.o | ||
46 | obj-$(CONFIG_CPU_RM7000) += r4k_fpu.o r4k_switch.o | ||
47 | obj-$(CONFIG_CPU_RM9000) += r4k_fpu.o r4k_switch.o | ||
48 | obj-$(CONFIG_CPU_NEVADA) += r4k_fpu.o r4k_switch.o | ||
49 | obj-$(CONFIG_CPU_R10000) += r4k_fpu.o r4k_switch.o | ||
50 | obj-$(CONFIG_CPU_SB1) += r4k_fpu.o r4k_switch.o | ||
41 | obj-$(CONFIG_CPU_TX39XX) += r2300_fpu.o r2300_switch.o | 51 | obj-$(CONFIG_CPU_TX39XX) += r2300_fpu.o r2300_switch.o |
52 | obj-$(CONFIG_CPU_TX49XX) += r4k_fpu.o r4k_switch.o | ||
53 | obj-$(CONFIG_CPU_VR41XX) += r4k_fpu.o r4k_switch.o | ||
42 | obj-$(CONFIG_CPU_CAVIUM_OCTEON) += octeon_switch.o | 54 | obj-$(CONFIG_CPU_CAVIUM_OCTEON) += octeon_switch.o |
55 | obj-$(CONFIG_CPU_XLR) += r4k_fpu.o r4k_switch.o | ||
43 | 56 | ||
44 | obj-$(CONFIG_SMP) += smp.o | 57 | obj-$(CONFIG_SMP) += smp.o |
45 | obj-$(CONFIG_SMP_UP) += smp-up.o | 58 | obj-$(CONFIG_SMP_UP) += smp-up.o |
46 | obj-$(CONFIG_CPU_BMIPS) += smp-bmips.o bmips_vec.o | ||
47 | 59 | ||
48 | obj-$(CONFIG_MIPS_MT) += mips-mt.o | 60 | obj-$(CONFIG_MIPS_MT) += mips-mt.o |
49 | obj-$(CONFIG_MIPS_MT_FPAFF) += mips-mt-fpaff.o | 61 | obj-$(CONFIG_MIPS_MT_FPAFF) += mips-mt-fpaff.o |
@@ -54,10 +66,12 @@ obj-$(CONFIG_CPU_MIPSR2) += spram.o | |||
54 | 66 | ||
55 | obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o | 67 | obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o |
56 | obj-$(CONFIG_MIPS_VPE_APSP_API) += rtlx.o | 68 | obj-$(CONFIG_MIPS_VPE_APSP_API) += rtlx.o |
69 | obj-$(CONFIG_MIPS_APSP_KSPD) += kspd.o | ||
57 | 70 | ||
58 | obj-$(CONFIG_I8259) += i8259.o | 71 | obj-$(CONFIG_I8259) += i8259.o |
59 | obj-$(CONFIG_IRQ_CPU) += irq_cpu.o | 72 | obj-$(CONFIG_IRQ_CPU) += irq_cpu.o |
60 | obj-$(CONFIG_IRQ_CPU_RM7K) += irq-rm7000.o | 73 | obj-$(CONFIG_IRQ_CPU_RM7K) += irq-rm7000.o |
74 | obj-$(CONFIG_IRQ_CPU_RM9K) += irq-rm9000.o | ||
61 | obj-$(CONFIG_MIPS_MSC) += irq-msc01.o | 75 | obj-$(CONFIG_MIPS_MSC) += irq-msc01.o |
62 | obj-$(CONFIG_IRQ_TXX9) += irq_txx9.o | 76 | obj-$(CONFIG_IRQ_TXX9) += irq_txx9.o |
63 | obj-$(CONFIG_IRQ_GT641XX) += irq-gt641xx.o | 77 | obj-$(CONFIG_IRQ_GT641XX) += irq-gt641xx.o |
@@ -79,22 +93,20 @@ obj-$(CONFIG_I8253) += i8253.o | |||
79 | 93 | ||
80 | obj-$(CONFIG_GPIO_TXX9) += gpio_txx9.o | 94 | obj-$(CONFIG_GPIO_TXX9) += gpio_txx9.o |
81 | 95 | ||
82 | obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o | 96 | obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o |
83 | obj-$(CONFIG_CRASH_DUMP) += crash_dump.o | ||
84 | obj-$(CONFIG_EARLY_PRINTK) += early_printk.o | 97 | obj-$(CONFIG_EARLY_PRINTK) += early_printk.o |
85 | obj-$(CONFIG_SPINLOCK_TEST) += spinlock_test.o | 98 | obj-$(CONFIG_SPINLOCK_TEST) += spinlock_test.o |
86 | obj-$(CONFIG_MIPS_MACHINE) += mips_machine.o | 99 | obj-$(CONFIG_MIPS_MACHINE) += mips_machine.o |
87 | 100 | ||
88 | obj-$(CONFIG_OF) += prom.o | 101 | obj-$(CONFIG_OF) += prom.o |
89 | 102 | ||
90 | CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -x c /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi) | 103 | CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi) |
91 | 104 | ||
92 | obj-$(CONFIG_HAVE_STD_PC_SERIAL_PORT) += 8250-platform.o | 105 | obj-$(CONFIG_HAVE_STD_PC_SERIAL_PORT) += 8250-platform.o |
93 | 106 | ||
94 | obj-$(CONFIG_MIPS_CPUFREQ) += cpufreq/ | 107 | obj-$(CONFIG_MIPS_CPUFREQ) += cpufreq/ |
95 | 108 | ||
96 | obj-$(CONFIG_PERF_EVENTS) += perf_event.o | 109 | obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o |
97 | obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_mipsxx.o | ||
98 | 110 | ||
99 | obj-$(CONFIG_JUMP_LABEL) += jump_label.o | 111 | obj-$(CONFIG_JUMP_LABEL) += jump_label.o |
100 | 112 | ||
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index 50285b2c7ff..6b30fb2caa6 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/types.h> | 12 | #include <linux/types.h> |
13 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
14 | #include <linux/mm.h> | 14 | #include <linux/mm.h> |
15 | #include <linux/interrupt.h> | ||
15 | #include <linux/kbuild.h> | 16 | #include <linux/kbuild.h> |
16 | #include <linux/suspend.h> | 17 | #include <linux/suspend.h> |
17 | #include <asm/ptrace.h> | 18 | #include <asm/ptrace.h> |
@@ -125,6 +126,10 @@ void output_thread_defines(void) | |||
125 | thread.cp0_baduaddr); | 126 | thread.cp0_baduaddr); |
126 | OFFSET(THREAD_ECODE, task_struct, \ | 127 | OFFSET(THREAD_ECODE, task_struct, \ |
127 | thread.error_code); | 128 | thread.error_code); |
129 | OFFSET(THREAD_TRAMP, task_struct, \ | ||
130 | thread.irix_trampoline); | ||
131 | OFFSET(THREAD_OLDCTX, task_struct, \ | ||
132 | thread.irix_oldctx); | ||
128 | BLANK(); | 133 | BLANK(); |
129 | } | 134 | } |
130 | 135 | ||
@@ -200,9 +205,6 @@ void output_mm_defines(void) | |||
200 | DEFINE(_PTRS_PER_PMD, PTRS_PER_PMD); | 205 | DEFINE(_PTRS_PER_PMD, PTRS_PER_PMD); |
201 | DEFINE(_PTRS_PER_PTE, PTRS_PER_PTE); | 206 | DEFINE(_PTRS_PER_PTE, PTRS_PER_PTE); |
202 | BLANK(); | 207 | BLANK(); |
203 | DEFINE(_PAGE_SHIFT, PAGE_SHIFT); | ||
204 | DEFINE(_PAGE_SIZE, PAGE_SIZE); | ||
205 | BLANK(); | ||
206 | } | 208 | } |
207 | 209 | ||
208 | #ifdef CONFIG_32BIT | 210 | #ifdef CONFIG_32BIT |
@@ -290,6 +292,15 @@ void output_signal_defined(void) | |||
290 | BLANK(); | 292 | BLANK(); |
291 | } | 293 | } |
292 | 294 | ||
295 | void output_irq_cpustat_t_defines(void) | ||
296 | { | ||
297 | COMMENT("Linux irq_cpustat_t offsets."); | ||
298 | DEFINE(IC_SOFTIRQ_PENDING, | ||
299 | offsetof(irq_cpustat_t, __softirq_pending)); | ||
300 | DEFINE(IC_IRQ_CPUSTAT_T, sizeof(irq_cpustat_t)); | ||
301 | BLANK(); | ||
302 | } | ||
303 | |||
293 | #ifdef CONFIG_CPU_CAVIUM_OCTEON | 304 | #ifdef CONFIG_CPU_CAVIUM_OCTEON |
294 | void output_octeon_cop2_state_defines(void) | 305 | void output_octeon_cop2_state_defines(void) |
295 | { | 306 | { |
diff --git a/arch/mips/kernel/bmips_vec.S b/arch/mips/kernel/bmips_vec.S deleted file mode 100644 index e908e81330b..00000000000 --- a/arch/mips/kernel/bmips_vec.S +++ /dev/null | |||
@@ -1,255 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2011 by Kevin Cernekee (cernekee@gmail.com) | ||
7 | * | ||
8 | * Reset/NMI/re-entry vectors for BMIPS processors | ||
9 | */ | ||
10 | |||
11 | #include <linux/init.h> | ||
12 | |||
13 | #include <asm/asm.h> | ||
14 | #include <asm/asmmacro.h> | ||
15 | #include <asm/cacheops.h> | ||
16 | #include <asm/regdef.h> | ||
17 | #include <asm/mipsregs.h> | ||
18 | #include <asm/stackframe.h> | ||
19 | #include <asm/addrspace.h> | ||
20 | #include <asm/hazards.h> | ||
21 | #include <asm/bmips.h> | ||
22 | |||
23 | .macro BARRIER | ||
24 | .set mips32 | ||
25 | _ssnop | ||
26 | _ssnop | ||
27 | _ssnop | ||
28 | .set mips0 | ||
29 | .endm | ||
30 | |||
31 | __CPUINIT | ||
32 | |||
33 | /*********************************************************************** | ||
34 | * Alternate CPU1 startup vector for BMIPS4350 | ||
35 | * | ||
36 | * On some systems the bootloader has already started CPU1 and configured | ||
37 | * it to resume execution at 0x8000_0200 (!BEV IV vector) when it is | ||
38 | * triggered by the SW1 interrupt. If that is the case we try to move | ||
39 | * it to a more convenient place: BMIPS_WARM_RESTART_VEC @ 0x8000_0380. | ||
40 | ***********************************************************************/ | ||
41 | |||
42 | LEAF(bmips_smp_movevec) | ||
43 | la k0, 1f | ||
44 | li k1, CKSEG1 | ||
45 | or k0, k1 | ||
46 | jr k0 | ||
47 | |||
48 | 1: | ||
49 | /* clear IV, pending IPIs */ | ||
50 | mtc0 zero, CP0_CAUSE | ||
51 | |||
52 | /* re-enable IRQs to wait for SW1 */ | ||
53 | li k0, ST0_IE | ST0_BEV | STATUSF_IP1 | ||
54 | mtc0 k0, CP0_STATUS | ||
55 | |||
56 | /* set up CPU1 CBR; move BASE to 0xa000_0000 */ | ||
57 | li k0, 0xff400000 | ||
58 | mtc0 k0, $22, 6 | ||
59 | li k1, CKSEG1 | BMIPS_RELO_VECTOR_CONTROL_1 | ||
60 | or k0, k1 | ||
61 | li k1, 0xa0080000 | ||
62 | sw k1, 0(k0) | ||
63 | |||
64 | /* wait here for SW1 interrupt from bmips_boot_secondary() */ | ||
65 | wait | ||
66 | |||
67 | la k0, bmips_reset_nmi_vec | ||
68 | li k1, CKSEG1 | ||
69 | or k0, k1 | ||
70 | jr k0 | ||
71 | END(bmips_smp_movevec) | ||
72 | |||
73 | /*********************************************************************** | ||
74 | * Reset/NMI vector | ||
75 | * For BMIPS processors that can relocate their exception vectors, this | ||
76 | * entire function gets copied to 0x8000_0000. | ||
77 | ***********************************************************************/ | ||
78 | |||
79 | NESTED(bmips_reset_nmi_vec, PT_SIZE, sp) | ||
80 | .set push | ||
81 | .set noat | ||
82 | .align 4 | ||
83 | |||
84 | #ifdef CONFIG_SMP | ||
85 | /* if the NMI bit is clear, assume this is a CPU1 reset instead */ | ||
86 | li k1, (1 << 19) | ||
87 | mfc0 k0, CP0_STATUS | ||
88 | and k0, k1 | ||
89 | beqz k0, bmips_smp_entry | ||
90 | |||
91 | #if defined(CONFIG_CPU_BMIPS5000) | ||
92 | /* if we're not on core 0, this must be the SMP boot signal */ | ||
93 | li k1, (3 << 25) | ||
94 | mfc0 k0, $22 | ||
95 | and k0, k1 | ||
96 | bnez k0, bmips_smp_entry | ||
97 | #endif | ||
98 | #endif /* CONFIG_SMP */ | ||
99 | |||
100 | /* nope, it's just a regular NMI */ | ||
101 | SAVE_ALL | ||
102 | move a0, sp | ||
103 | |||
104 | /* clear EXL, ERL, BEV so that TLB refills still work */ | ||
105 | mfc0 k0, CP0_STATUS | ||
106 | li k1, ST0_ERL | ST0_EXL | ST0_BEV | ST0_IE | ||
107 | or k0, k1 | ||
108 | xor k0, k1 | ||
109 | mtc0 k0, CP0_STATUS | ||
110 | BARRIER | ||
111 | |||
112 | /* jump to the NMI handler function */ | ||
113 | la k0, nmi_handler | ||
114 | jr k0 | ||
115 | |||
116 | RESTORE_ALL | ||
117 | .set mips3 | ||
118 | eret | ||
119 | |||
120 | /*********************************************************************** | ||
121 | * CPU1 reset vector (used for the initial boot only) | ||
122 | * This is still part of bmips_reset_nmi_vec(). | ||
123 | ***********************************************************************/ | ||
124 | |||
125 | #ifdef CONFIG_SMP | ||
126 | |||
127 | bmips_smp_entry: | ||
128 | |||
129 | /* set up CP0 STATUS; enable FPU */ | ||
130 | li k0, 0x30000000 | ||
131 | mtc0 k0, CP0_STATUS | ||
132 | BARRIER | ||
133 | |||
134 | /* set local CP0 CONFIG to make kseg0 cacheable, write-back */ | ||
135 | mfc0 k0, CP0_CONFIG | ||
136 | ori k0, 0x07 | ||
137 | xori k0, 0x04 | ||
138 | mtc0 k0, CP0_CONFIG | ||
139 | |||
140 | #if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380) | ||
141 | /* initialize CPU1's local I-cache */ | ||
142 | li k0, 0x80000000 | ||
143 | li k1, 0x80010000 | ||
144 | mtc0 zero, $28 | ||
145 | mtc0 zero, $28, 1 | ||
146 | BARRIER | ||
147 | |||
148 | 1: cache Index_Store_Tag_I, 0(k0) | ||
149 | addiu k0, 16 | ||
150 | bne k0, k1, 1b | ||
151 | #elif defined(CONFIG_CPU_BMIPS5000) | ||
152 | /* set exception vector base */ | ||
153 | la k0, ebase | ||
154 | lw k0, 0(k0) | ||
155 | mtc0 k0, $15, 1 | ||
156 | BARRIER | ||
157 | #endif | ||
158 | |||
159 | /* jump back to kseg0 in case we need to remap the kseg1 area */ | ||
160 | la k0, 1f | ||
161 | jr k0 | ||
162 | 1: | ||
163 | la k0, bmips_enable_xks01 | ||
164 | jalr k0 | ||
165 | |||
166 | /* use temporary stack to set up upper memory TLB */ | ||
167 | li sp, BMIPS_WARM_RESTART_VEC | ||
168 | la k0, plat_wired_tlb_setup | ||
169 | jalr k0 | ||
170 | |||
171 | /* switch to permanent stack and continue booting */ | ||
172 | |||
173 | .global bmips_secondary_reentry | ||
174 | bmips_secondary_reentry: | ||
175 | la k0, bmips_smp_boot_sp | ||
176 | lw sp, 0(k0) | ||
177 | la k0, bmips_smp_boot_gp | ||
178 | lw gp, 0(k0) | ||
179 | la k0, start_secondary | ||
180 | jr k0 | ||
181 | |||
182 | #endif /* CONFIG_SMP */ | ||
183 | |||
184 | .align 4 | ||
185 | .global bmips_reset_nmi_vec_end | ||
186 | bmips_reset_nmi_vec_end: | ||
187 | |||
188 | END(bmips_reset_nmi_vec) | ||
189 | |||
190 | .set pop | ||
191 | .previous | ||
192 | |||
193 | /*********************************************************************** | ||
194 | * CPU1 warm restart vector (used for second and subsequent boots). | ||
195 | * Also used for S2 standby recovery (PM). | ||
196 | * This entire function gets copied to (BMIPS_WARM_RESTART_VEC) | ||
197 | ***********************************************************************/ | ||
198 | |||
199 | LEAF(bmips_smp_int_vec) | ||
200 | |||
201 | .align 4 | ||
202 | mfc0 k0, CP0_STATUS | ||
203 | ori k0, 0x01 | ||
204 | xori k0, 0x01 | ||
205 | mtc0 k0, CP0_STATUS | ||
206 | eret | ||
207 | |||
208 | .align 4 | ||
209 | .global bmips_smp_int_vec_end | ||
210 | bmips_smp_int_vec_end: | ||
211 | |||
212 | END(bmips_smp_int_vec) | ||
213 | |||
214 | /*********************************************************************** | ||
215 | * XKS01 support | ||
216 | * Certain CPUs support extending kseg0 to 1024MB. | ||
217 | ***********************************************************************/ | ||
218 | |||
219 | __CPUINIT | ||
220 | |||
221 | LEAF(bmips_enable_xks01) | ||
222 | |||
223 | #if defined(CONFIG_XKS01) | ||
224 | |||
225 | #if defined(CONFIG_CPU_BMIPS4380) | ||
226 | mfc0 t0, $22, 3 | ||
227 | li t1, 0x1ff0 | ||
228 | li t2, (1 << 12) | (1 << 9) | ||
229 | or t0, t1 | ||
230 | xor t0, t1 | ||
231 | or t0, t2 | ||
232 | mtc0 t0, $22, 3 | ||
233 | BARRIER | ||
234 | #elif defined(CONFIG_CPU_BMIPS5000) | ||
235 | mfc0 t0, $22, 5 | ||
236 | li t1, 0x01ff | ||
237 | li t2, (1 << 8) | (1 << 5) | ||
238 | or t0, t1 | ||
239 | xor t0, t1 | ||
240 | or t0, t2 | ||
241 | mtc0 t0, $22, 5 | ||
242 | BARRIER | ||
243 | #else | ||
244 | |||
245 | #error Missing XKS01 setup | ||
246 | |||
247 | #endif | ||
248 | |||
249 | #endif /* defined(CONFIG_XKS01) */ | ||
250 | |||
251 | jr ra | ||
252 | |||
253 | END(bmips_enable_xks01) | ||
254 | |||
255 | .previous | ||
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c index 4d735d0e58f..32103cc2a25 100644 --- a/arch/mips/kernel/branch.c +++ b/arch/mips/kernel/branch.c | |||
@@ -9,7 +9,6 @@ | |||
9 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
10 | #include <linux/sched.h> | 10 | #include <linux/sched.h> |
11 | #include <linux/signal.h> | 11 | #include <linux/signal.h> |
12 | #include <linux/module.h> | ||
13 | #include <asm/branch.h> | 12 | #include <asm/branch.h> |
14 | #include <asm/cpu.h> | 13 | #include <asm/cpu.h> |
15 | #include <asm/cpu-features.h> | 14 | #include <asm/cpu-features.h> |
@@ -18,22 +17,28 @@ | |||
18 | #include <asm/ptrace.h> | 17 | #include <asm/ptrace.h> |
19 | #include <asm/uaccess.h> | 18 | #include <asm/uaccess.h> |
20 | 19 | ||
21 | /** | 20 | /* |
22 | * __compute_return_epc_for_insn - Computes the return address and do emulate | 21 | * Compute the return address and do emulate branch simulation, if required. |
23 | * branch simulation, if required. | ||
24 | * | ||
25 | * @regs: Pointer to pt_regs | ||
26 | * @insn: branch instruction to decode | ||
27 | * @returns: -EFAULT on error and forces SIGBUS, and on success | ||
28 | * returns 0 or BRANCH_LIKELY_TAKEN as appropriate after | ||
29 | * evaluating the branch. | ||
30 | */ | 22 | */ |
31 | int __compute_return_epc_for_insn(struct pt_regs *regs, | 23 | int __compute_return_epc(struct pt_regs *regs) |
32 | union mips_instruction insn) | ||
33 | { | 24 | { |
25 | unsigned int __user *addr; | ||
34 | unsigned int bit, fcr31, dspcontrol; | 26 | unsigned int bit, fcr31, dspcontrol; |
35 | long epc = regs->cp0_epc; | 27 | long epc; |
36 | int ret = 0; | 28 | union mips_instruction insn; |
29 | |||
30 | epc = regs->cp0_epc; | ||
31 | if (epc & 3) | ||
32 | goto unaligned; | ||
33 | |||
34 | /* | ||
35 | * Read the instruction | ||
36 | */ | ||
37 | addr = (unsigned int __user *) epc; | ||
38 | if (__get_user(insn.word, addr)) { | ||
39 | force_sig(SIGSEGV, current); | ||
40 | return -EFAULT; | ||
41 | } | ||
37 | 42 | ||
38 | switch (insn.i_format.opcode) { | 43 | switch (insn.i_format.opcode) { |
39 | /* | 44 | /* |
@@ -59,22 +64,18 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, | |||
59 | switch (insn.i_format.rt) { | 64 | switch (insn.i_format.rt) { |
60 | case bltz_op: | 65 | case bltz_op: |
61 | case bltzl_op: | 66 | case bltzl_op: |
62 | if ((long)regs->regs[insn.i_format.rs] < 0) { | 67 | if ((long)regs->regs[insn.i_format.rs] < 0) |
63 | epc = epc + 4 + (insn.i_format.simmediate << 2); | 68 | epc = epc + 4 + (insn.i_format.simmediate << 2); |
64 | if (insn.i_format.rt == bltzl_op) | 69 | else |
65 | ret = BRANCH_LIKELY_TAKEN; | ||
66 | } else | ||
67 | epc += 8; | 70 | epc += 8; |
68 | regs->cp0_epc = epc; | 71 | regs->cp0_epc = epc; |
69 | break; | 72 | break; |
70 | 73 | ||
71 | case bgez_op: | 74 | case bgez_op: |
72 | case bgezl_op: | 75 | case bgezl_op: |
73 | if ((long)regs->regs[insn.i_format.rs] >= 0) { | 76 | if ((long)regs->regs[insn.i_format.rs] >= 0) |
74 | epc = epc + 4 + (insn.i_format.simmediate << 2); | 77 | epc = epc + 4 + (insn.i_format.simmediate << 2); |
75 | if (insn.i_format.rt == bgezl_op) | 78 | else |
76 | ret = BRANCH_LIKELY_TAKEN; | ||
77 | } else | ||
78 | epc += 8; | 79 | epc += 8; |
79 | regs->cp0_epc = epc; | 80 | regs->cp0_epc = epc; |
80 | break; | 81 | break; |
@@ -82,11 +83,9 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, | |||
82 | case bltzal_op: | 83 | case bltzal_op: |
83 | case bltzall_op: | 84 | case bltzall_op: |
84 | regs->regs[31] = epc + 8; | 85 | regs->regs[31] = epc + 8; |
85 | if ((long)regs->regs[insn.i_format.rs] < 0) { | 86 | if ((long)regs->regs[insn.i_format.rs] < 0) |
86 | epc = epc + 4 + (insn.i_format.simmediate << 2); | 87 | epc = epc + 4 + (insn.i_format.simmediate << 2); |
87 | if (insn.i_format.rt == bltzall_op) | 88 | else |
88 | ret = BRANCH_LIKELY_TAKEN; | ||
89 | } else | ||
90 | epc += 8; | 89 | epc += 8; |
91 | regs->cp0_epc = epc; | 90 | regs->cp0_epc = epc; |
92 | break; | 91 | break; |
@@ -94,15 +93,12 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, | |||
94 | case bgezal_op: | 93 | case bgezal_op: |
95 | case bgezall_op: | 94 | case bgezall_op: |
96 | regs->regs[31] = epc + 8; | 95 | regs->regs[31] = epc + 8; |
97 | if ((long)regs->regs[insn.i_format.rs] >= 0) { | 96 | if ((long)regs->regs[insn.i_format.rs] >= 0) |
98 | epc = epc + 4 + (insn.i_format.simmediate << 2); | 97 | epc = epc + 4 + (insn.i_format.simmediate << 2); |
99 | if (insn.i_format.rt == bgezall_op) | 98 | else |
100 | ret = BRANCH_LIKELY_TAKEN; | ||
101 | } else | ||
102 | epc += 8; | 99 | epc += 8; |
103 | regs->cp0_epc = epc; | 100 | regs->cp0_epc = epc; |
104 | break; | 101 | break; |
105 | |||
106 | case bposge32_op: | 102 | case bposge32_op: |
107 | if (!cpu_has_dsp) | 103 | if (!cpu_has_dsp) |
108 | goto sigill; | 104 | goto sigill; |
@@ -137,11 +133,9 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, | |||
137 | case beq_op: | 133 | case beq_op: |
138 | case beql_op: | 134 | case beql_op: |
139 | if (regs->regs[insn.i_format.rs] == | 135 | if (regs->regs[insn.i_format.rs] == |
140 | regs->regs[insn.i_format.rt]) { | 136 | regs->regs[insn.i_format.rt]) |
141 | epc = epc + 4 + (insn.i_format.simmediate << 2); | 137 | epc = epc + 4 + (insn.i_format.simmediate << 2); |
142 | if (insn.i_format.rt == beql_op) | 138 | else |
143 | ret = BRANCH_LIKELY_TAKEN; | ||
144 | } else | ||
145 | epc += 8; | 139 | epc += 8; |
146 | regs->cp0_epc = epc; | 140 | regs->cp0_epc = epc; |
147 | break; | 141 | break; |
@@ -149,11 +143,9 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, | |||
149 | case bne_op: | 143 | case bne_op: |
150 | case bnel_op: | 144 | case bnel_op: |
151 | if (regs->regs[insn.i_format.rs] != | 145 | if (regs->regs[insn.i_format.rs] != |
152 | regs->regs[insn.i_format.rt]) { | 146 | regs->regs[insn.i_format.rt]) |
153 | epc = epc + 4 + (insn.i_format.simmediate << 2); | 147 | epc = epc + 4 + (insn.i_format.simmediate << 2); |
154 | if (insn.i_format.rt == bnel_op) | 148 | else |
155 | ret = BRANCH_LIKELY_TAKEN; | ||
156 | } else | ||
157 | epc += 8; | 149 | epc += 8; |
158 | regs->cp0_epc = epc; | 150 | regs->cp0_epc = epc; |
159 | break; | 151 | break; |
@@ -161,11 +153,9 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, | |||
161 | case blez_op: /* not really i_format */ | 153 | case blez_op: /* not really i_format */ |
162 | case blezl_op: | 154 | case blezl_op: |
163 | /* rt field assumed to be zero */ | 155 | /* rt field assumed to be zero */ |
164 | if ((long)regs->regs[insn.i_format.rs] <= 0) { | 156 | if ((long)regs->regs[insn.i_format.rs] <= 0) |
165 | epc = epc + 4 + (insn.i_format.simmediate << 2); | 157 | epc = epc + 4 + (insn.i_format.simmediate << 2); |
166 | if (insn.i_format.rt == bnel_op) | 158 | else |
167 | ret = BRANCH_LIKELY_TAKEN; | ||
168 | } else | ||
169 | epc += 8; | 159 | epc += 8; |
170 | regs->cp0_epc = epc; | 160 | regs->cp0_epc = epc; |
171 | break; | 161 | break; |
@@ -173,11 +163,9 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, | |||
173 | case bgtz_op: | 163 | case bgtz_op: |
174 | case bgtzl_op: | 164 | case bgtzl_op: |
175 | /* rt field assumed to be zero */ | 165 | /* rt field assumed to be zero */ |
176 | if ((long)regs->regs[insn.i_format.rs] > 0) { | 166 | if ((long)regs->regs[insn.i_format.rs] > 0) |
177 | epc = epc + 4 + (insn.i_format.simmediate << 2); | 167 | epc = epc + 4 + (insn.i_format.simmediate << 2); |
178 | if (insn.i_format.rt == bnel_op) | 168 | else |
179 | ret = BRANCH_LIKELY_TAKEN; | ||
180 | } else | ||
181 | epc += 8; | 169 | epc += 8; |
182 | regs->cp0_epc = epc; | 170 | regs->cp0_epc = epc; |
183 | break; | 171 | break; |
@@ -199,22 +187,18 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, | |||
199 | switch (insn.i_format.rt & 3) { | 187 | switch (insn.i_format.rt & 3) { |
200 | case 0: /* bc1f */ | 188 | case 0: /* bc1f */ |
201 | case 2: /* bc1fl */ | 189 | case 2: /* bc1fl */ |
202 | if (~fcr31 & (1 << bit)) { | 190 | if (~fcr31 & (1 << bit)) |
203 | epc = epc + 4 + (insn.i_format.simmediate << 2); | 191 | epc = epc + 4 + (insn.i_format.simmediate << 2); |
204 | if (insn.i_format.rt == 2) | 192 | else |
205 | ret = BRANCH_LIKELY_TAKEN; | ||
206 | } else | ||
207 | epc += 8; | 193 | epc += 8; |
208 | regs->cp0_epc = epc; | 194 | regs->cp0_epc = epc; |
209 | break; | 195 | break; |
210 | 196 | ||
211 | case 1: /* bc1t */ | 197 | case 1: /* bc1t */ |
212 | case 3: /* bc1tl */ | 198 | case 3: /* bc1tl */ |
213 | if (fcr31 & (1 << bit)) { | 199 | if (fcr31 & (1 << bit)) |
214 | epc = epc + 4 + (insn.i_format.simmediate << 2); | 200 | epc = epc + 4 + (insn.i_format.simmediate << 2); |
215 | if (insn.i_format.rt == 3) | 201 | else |
216 | ret = BRANCH_LIKELY_TAKEN; | ||
217 | } else | ||
218 | epc += 8; | 202 | epc += 8; |
219 | regs->cp0_epc = epc; | 203 | regs->cp0_epc = epc; |
220 | break; | 204 | break; |
@@ -255,39 +239,15 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, | |||
255 | #endif | 239 | #endif |
256 | } | 240 | } |
257 | 241 | ||
258 | return ret; | 242 | return 0; |
259 | |||
260 | sigill: | ||
261 | printk("%s: DSP branch but not DSP ASE - sending SIGBUS.\n", current->comm); | ||
262 | force_sig(SIGBUS, current); | ||
263 | return -EFAULT; | ||
264 | } | ||
265 | EXPORT_SYMBOL_GPL(__compute_return_epc_for_insn); | ||
266 | |||
267 | int __compute_return_epc(struct pt_regs *regs) | ||
268 | { | ||
269 | unsigned int __user *addr; | ||
270 | long epc; | ||
271 | union mips_instruction insn; | ||
272 | |||
273 | epc = regs->cp0_epc; | ||
274 | if (epc & 3) | ||
275 | goto unaligned; | ||
276 | |||
277 | /* | ||
278 | * Read the instruction | ||
279 | */ | ||
280 | addr = (unsigned int __user *) epc; | ||
281 | if (__get_user(insn.word, addr)) { | ||
282 | force_sig(SIGSEGV, current); | ||
283 | return -EFAULT; | ||
284 | } | ||
285 | |||
286 | return __compute_return_epc_for_insn(regs, insn); | ||
287 | 243 | ||
288 | unaligned: | 244 | unaligned: |
289 | printk("%s: unaligned epc - sending SIGBUS.\n", current->comm); | 245 | printk("%s: unaligned epc - sending SIGBUS.\n", current->comm); |
290 | force_sig(SIGBUS, current); | 246 | force_sig(SIGBUS, current); |
291 | return -EFAULT; | 247 | return -EFAULT; |
292 | 248 | ||
249 | sigill: | ||
250 | printk("%s: DSP branch but not DSP ASE - sending SIGBUS.\n", current->comm); | ||
251 | force_sig(SIGBUS, current); | ||
252 | return -EFAULT; | ||
293 | } | 253 | } |
diff --git a/arch/mips/kernel/cevt-bcm1480.c b/arch/mips/kernel/cevt-bcm1480.c index 69bbfae183b..36c3898b76d 100644 --- a/arch/mips/kernel/cevt-bcm1480.c +++ b/arch/mips/kernel/cevt-bcm1480.c | |||
@@ -145,7 +145,7 @@ void __cpuinit sb1480_clockevent_init(void) | |||
145 | bcm1480_unmask_irq(cpu, irq); | 145 | bcm1480_unmask_irq(cpu, irq); |
146 | 146 | ||
147 | action->handler = sibyte_counter_handler; | 147 | action->handler = sibyte_counter_handler; |
148 | action->flags = IRQF_PERCPU | IRQF_TIMER; | 148 | action->flags = IRQF_DISABLED | IRQF_PERCPU | IRQF_TIMER; |
149 | action->name = name; | 149 | action->name = name; |
150 | action->dev_id = cd; | 150 | action->dev_id = cd; |
151 | 151 | ||
diff --git a/arch/mips/kernel/cevt-ds1287.c b/arch/mips/kernel/cevt-ds1287.c index ed648cb5a69..939157e397b 100644 --- a/arch/mips/kernel/cevt-ds1287.c +++ b/arch/mips/kernel/cevt-ds1287.c | |||
@@ -108,7 +108,7 @@ static irqreturn_t ds1287_interrupt(int irq, void *dev_id) | |||
108 | 108 | ||
109 | static struct irqaction ds1287_irqaction = { | 109 | static struct irqaction ds1287_irqaction = { |
110 | .handler = ds1287_interrupt, | 110 | .handler = ds1287_interrupt, |
111 | .flags = IRQF_PERCPU | IRQF_TIMER, | 111 | .flags = IRQF_DISABLED | IRQF_PERCPU | IRQF_TIMER, |
112 | .name = "ds1287", | 112 | .name = "ds1287", |
113 | }; | 113 | }; |
114 | 114 | ||
diff --git a/arch/mips/kernel/cevt-gt641xx.c b/arch/mips/kernel/cevt-gt641xx.c index 831b47585b7..339f3639b90 100644 --- a/arch/mips/kernel/cevt-gt641xx.c +++ b/arch/mips/kernel/cevt-gt641xx.c | |||
@@ -114,7 +114,7 @@ static irqreturn_t gt641xx_timer0_interrupt(int irq, void *dev_id) | |||
114 | 114 | ||
115 | static struct irqaction gt641xx_timer0_irqaction = { | 115 | static struct irqaction gt641xx_timer0_irqaction = { |
116 | .handler = gt641xx_timer0_interrupt, | 116 | .handler = gt641xx_timer0_interrupt, |
117 | .flags = IRQF_PERCPU | IRQF_TIMER, | 117 | .flags = IRQF_DISABLED | IRQF_PERCPU | IRQF_TIMER, |
118 | .name = "gt641xx_timer0", | 118 | .name = "gt641xx_timer0", |
119 | }; | 119 | }; |
120 | 120 | ||
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c index 75323925e53..98c5a9737c1 100644 --- a/arch/mips/kernel/cevt-r4k.c +++ b/arch/mips/kernel/cevt-r4k.c | |||
@@ -15,7 +15,6 @@ | |||
15 | #include <asm/smtc_ipi.h> | 15 | #include <asm/smtc_ipi.h> |
16 | #include <asm/time.h> | 16 | #include <asm/time.h> |
17 | #include <asm/cevt-r4k.h> | 17 | #include <asm/cevt-r4k.h> |
18 | #include <asm/gic.h> | ||
19 | 18 | ||
20 | /* | 19 | /* |
21 | * The SMTC Kernel for the 34K, 1004K, et. al. replaces several | 20 | * The SMTC Kernel for the 34K, 1004K, et. al. replaces several |
@@ -85,7 +84,7 @@ out: | |||
85 | 84 | ||
86 | struct irqaction c0_compare_irqaction = { | 85 | struct irqaction c0_compare_irqaction = { |
87 | .handler = c0_compare_interrupt, | 86 | .handler = c0_compare_interrupt, |
88 | .flags = IRQF_PERCPU | IRQF_TIMER, | 87 | .flags = IRQF_DISABLED | IRQF_PERCPU | IRQF_TIMER, |
89 | .name = "timer", | 88 | .name = "timer", |
90 | }; | 89 | }; |
91 | 90 | ||
@@ -99,19 +98,24 @@ void mips_event_handler(struct clock_event_device *dev) | |||
99 | */ | 98 | */ |
100 | static int c0_compare_int_pending(void) | 99 | static int c0_compare_int_pending(void) |
101 | { | 100 | { |
102 | #ifdef CONFIG_IRQ_GIC | ||
103 | if (cpu_has_veic) | ||
104 | return gic_get_timer_pending(); | ||
105 | #endif | ||
106 | return (read_c0_cause() >> cp0_compare_irq_shift) & (1ul << CAUSEB_IP); | 101 | return (read_c0_cause() >> cp0_compare_irq_shift) & (1ul << CAUSEB_IP); |
107 | } | 102 | } |
108 | 103 | ||
109 | /* | 104 | /* |
110 | * Compare interrupt can be routed and latched outside the core, | 105 | * Compare interrupt can be routed and latched outside the core, |
111 | * so wait up to worst case number of cycle counter ticks for timer interrupt | 106 | * so a single execution hazard barrier may not be enough to give |
112 | * changes to propagate to the cause register. | 107 | * it time to clear as seen in the Cause register. 4 time the |
108 | * pipeline depth seems reasonably conservative, and empirically | ||
109 | * works better in configurations with high CPU/bus clock ratios. | ||
113 | */ | 110 | */ |
114 | #define COMPARE_INT_SEEN_TICKS 50 | 111 | |
112 | #define compare_change_hazard() \ | ||
113 | do { \ | ||
114 | irq_disable_hazard(); \ | ||
115 | irq_disable_hazard(); \ | ||
116 | irq_disable_hazard(); \ | ||
117 | irq_disable_hazard(); \ | ||
118 | } while (0) | ||
115 | 119 | ||
116 | int c0_compare_int_usable(void) | 120 | int c0_compare_int_usable(void) |
117 | { | 121 | { |
@@ -122,12 +126,8 @@ int c0_compare_int_usable(void) | |||
122 | * IP7 already pending? Try to clear it by acking the timer. | 126 | * IP7 already pending? Try to clear it by acking the timer. |
123 | */ | 127 | */ |
124 | if (c0_compare_int_pending()) { | 128 | if (c0_compare_int_pending()) { |
125 | cnt = read_c0_count(); | 129 | write_c0_compare(read_c0_count()); |
126 | write_c0_compare(cnt); | 130 | compare_change_hazard(); |
127 | back_to_back_c0_hazard(); | ||
128 | while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS)) | ||
129 | if (!c0_compare_int_pending()) | ||
130 | break; | ||
131 | if (c0_compare_int_pending()) | 131 | if (c0_compare_int_pending()) |
132 | return 0; | 132 | return 0; |
133 | } | 133 | } |
@@ -136,7 +136,7 @@ int c0_compare_int_usable(void) | |||
136 | cnt = read_c0_count(); | 136 | cnt = read_c0_count(); |
137 | cnt += delta; | 137 | cnt += delta; |
138 | write_c0_compare(cnt); | 138 | write_c0_compare(cnt); |
139 | back_to_back_c0_hazard(); | 139 | compare_change_hazard(); |
140 | if ((int)(read_c0_count() - cnt) < 0) | 140 | if ((int)(read_c0_count() - cnt) < 0) |
141 | break; | 141 | break; |
142 | /* increase delta if the timer was already expired */ | 142 | /* increase delta if the timer was already expired */ |
@@ -145,17 +145,12 @@ int c0_compare_int_usable(void) | |||
145 | while ((int)(read_c0_count() - cnt) <= 0) | 145 | while ((int)(read_c0_count() - cnt) <= 0) |
146 | ; /* Wait for expiry */ | 146 | ; /* Wait for expiry */ |
147 | 147 | ||
148 | while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS)) | 148 | compare_change_hazard(); |
149 | if (c0_compare_int_pending()) | ||
150 | break; | ||
151 | if (!c0_compare_int_pending()) | 149 | if (!c0_compare_int_pending()) |
152 | return 0; | 150 | return 0; |
153 | cnt = read_c0_count(); | 151 | |
154 | write_c0_compare(cnt); | 152 | write_c0_compare(read_c0_count()); |
155 | back_to_back_c0_hazard(); | 153 | compare_change_hazard(); |
156 | while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS)) | ||
157 | if (!c0_compare_int_pending()) | ||
158 | break; | ||
159 | if (c0_compare_int_pending()) | 154 | if (c0_compare_int_pending()) |
160 | return 0; | 155 | return 0; |
161 | 156 | ||
diff --git a/arch/mips/kernel/cevt-sb1250.c b/arch/mips/kernel/cevt-sb1250.c index e73439fd685..590c54f28a8 100644 --- a/arch/mips/kernel/cevt-sb1250.c +++ b/arch/mips/kernel/cevt-sb1250.c | |||
@@ -144,7 +144,7 @@ void __cpuinit sb1250_clockevent_init(void) | |||
144 | sb1250_unmask_irq(cpu, irq); | 144 | sb1250_unmask_irq(cpu, irq); |
145 | 145 | ||
146 | action->handler = sibyte_counter_handler; | 146 | action->handler = sibyte_counter_handler; |
147 | action->flags = IRQF_PERCPU | IRQF_TIMER; | 147 | action->flags = IRQF_DISABLED | IRQF_PERCPU | IRQF_TIMER; |
148 | action->name = name; | 148 | action->name = name; |
149 | action->dev_id = cd; | 149 | action->dev_id = cd; |
150 | 150 | ||
diff --git a/arch/mips/kernel/cevt-txx9.c b/arch/mips/kernel/cevt-txx9.c index e5c30b1d086..f0ab92a1b05 100644 --- a/arch/mips/kernel/cevt-txx9.c +++ b/arch/mips/kernel/cevt-txx9.c | |||
@@ -146,7 +146,7 @@ static irqreturn_t txx9tmr_interrupt(int irq, void *dev_id) | |||
146 | 146 | ||
147 | static struct irqaction txx9tmr_irq = { | 147 | static struct irqaction txx9tmr_irq = { |
148 | .handler = txx9tmr_interrupt, | 148 | .handler = txx9tmr_interrupt, |
149 | .flags = IRQF_PERCPU | IRQF_TIMER, | 149 | .flags = IRQF_DISABLED | IRQF_PERCPU | IRQF_TIMER, |
150 | .name = "txx9tmr", | 150 | .name = "txx9tmr", |
151 | .dev_id = &txx9_clock_event_device, | 151 | .dev_id = &txx9_clock_event_device, |
152 | }; | 152 | }; |
diff --git a/arch/mips/kernel/cpu-bugs64.c b/arch/mips/kernel/cpu-bugs64.c index d6a18644365..f305ca14351 100644 --- a/arch/mips/kernel/cpu-bugs64.c +++ b/arch/mips/kernel/cpu-bugs64.c | |||
@@ -16,7 +16,7 @@ | |||
16 | #include <asm/cpu.h> | 16 | #include <asm/cpu.h> |
17 | #include <asm/fpu.h> | 17 | #include <asm/fpu.h> |
18 | #include <asm/mipsregs.h> | 18 | #include <asm/mipsregs.h> |
19 | #include <asm/setup.h> | 19 | #include <asm/system.h> |
20 | 20 | ||
21 | static char bug64hit[] __initdata = | 21 | static char bug64hit[] __initdata = |
22 | "reliable operation impossible!\n%s"; | 22 | "reliable operation impossible!\n%s"; |
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index cce3782c96c..ebc0cd20b35 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * Copyright (C) xxxx the Anonymous | 4 | * Copyright (C) xxxx the Anonymous |
5 | * Copyright (C) 1994 - 2006 Ralf Baechle | 5 | * Copyright (C) 1994 - 2006 Ralf Baechle |
6 | * Copyright (C) 2003, 2004 Maciej W. Rozycki | 6 | * Copyright (C) 2003, 2004 Maciej W. Rozycki |
7 | * Copyright (C) 2001, 2004, 2011, 2012 MIPS Technologies, Inc. | 7 | * Copyright (C) 2001, 2004 MIPS Inc. |
8 | * | 8 | * |
9 | * This program is free software; you can redistribute it and/or | 9 | * This program is free software; you can redistribute it and/or |
10 | * modify it under the terms of the GNU General Public License | 10 | * modify it under the terms of the GNU General Public License |
@@ -16,14 +16,14 @@ | |||
16 | #include <linux/ptrace.h> | 16 | #include <linux/ptrace.h> |
17 | #include <linux/smp.h> | 17 | #include <linux/smp.h> |
18 | #include <linux/stddef.h> | 18 | #include <linux/stddef.h> |
19 | #include <linux/export.h> | 19 | #include <linux/module.h> |
20 | 20 | ||
21 | #include <asm/bugs.h> | 21 | #include <asm/bugs.h> |
22 | #include <asm/cpu.h> | 22 | #include <asm/cpu.h> |
23 | #include <asm/fpu.h> | 23 | #include <asm/fpu.h> |
24 | #include <asm/mipsregs.h> | 24 | #include <asm/mipsregs.h> |
25 | #include <asm/system.h> | ||
25 | #include <asm/watch.h> | 26 | #include <asm/watch.h> |
26 | #include <asm/elf.h> | ||
27 | #include <asm/spram.h> | 27 | #include <asm/spram.h> |
28 | #include <asm/uaccess.h> | 28 | #include <asm/uaccess.h> |
29 | 29 | ||
@@ -142,7 +142,7 @@ int __cpuinitdata mips_dsp_disabled; | |||
142 | 142 | ||
143 | static int __init dsp_disable(char *s) | 143 | static int __init dsp_disable(char *s) |
144 | { | 144 | { |
145 | cpu_data[0].ases &= ~(MIPS_ASE_DSP | MIPS_ASE_DSP2P); | 145 | cpu_data[0].ases &= ~MIPS_ASE_DSP; |
146 | mips_dsp_disabled = 1; | 146 | mips_dsp_disabled = 1; |
147 | 147 | ||
148 | return 1; | 148 | return 1; |
@@ -190,9 +190,6 @@ void __init check_wait(void) | |||
190 | case CPU_CAVIUM_OCTEON_PLUS: | 190 | case CPU_CAVIUM_OCTEON_PLUS: |
191 | case CPU_CAVIUM_OCTEON2: | 191 | case CPU_CAVIUM_OCTEON2: |
192 | case CPU_JZRISC: | 192 | case CPU_JZRISC: |
193 | case CPU_LOONGSON1: | ||
194 | case CPU_XLR: | ||
195 | case CPU_XLP: | ||
196 | cpu_wait = r4k_wait; | 193 | cpu_wait = r4k_wait; |
197 | break; | 194 | break; |
198 | 195 | ||
@@ -200,7 +197,6 @@ void __init check_wait(void) | |||
200 | cpu_wait = rm7k_wait_irqoff; | 197 | cpu_wait = rm7k_wait_irqoff; |
201 | break; | 198 | break; |
202 | 199 | ||
203 | case CPU_M14KC: | ||
204 | case CPU_24K: | 200 | case CPU_24K: |
205 | case CPU_34K: | 201 | case CPU_34K: |
206 | case CPU_1004K: | 202 | case CPU_1004K: |
@@ -331,160 +327,6 @@ static inline void cpu_probe_vmbits(struct cpuinfo_mips *c) | |||
331 | #endif | 327 | #endif |
332 | } | 328 | } |
333 | 329 | ||
334 | static char unknown_isa[] __cpuinitdata = KERN_ERR \ | ||
335 | "Unsupported ISA type, c0.config0: %d."; | ||
336 | |||
337 | static inline unsigned int decode_config0(struct cpuinfo_mips *c) | ||
338 | { | ||
339 | unsigned int config0; | ||
340 | int isa; | ||
341 | |||
342 | config0 = read_c0_config(); | ||
343 | |||
344 | if (((config0 & MIPS_CONF_MT) >> 7) == 1) | ||
345 | c->options |= MIPS_CPU_TLB; | ||
346 | isa = (config0 & MIPS_CONF_AT) >> 13; | ||
347 | switch (isa) { | ||
348 | case 0: | ||
349 | switch ((config0 & MIPS_CONF_AR) >> 10) { | ||
350 | case 0: | ||
351 | c->isa_level = MIPS_CPU_ISA_M32R1; | ||
352 | break; | ||
353 | case 1: | ||
354 | c->isa_level = MIPS_CPU_ISA_M32R2; | ||
355 | break; | ||
356 | default: | ||
357 | goto unknown; | ||
358 | } | ||
359 | break; | ||
360 | case 2: | ||
361 | switch ((config0 & MIPS_CONF_AR) >> 10) { | ||
362 | case 0: | ||
363 | c->isa_level = MIPS_CPU_ISA_M64R1; | ||
364 | break; | ||
365 | case 1: | ||
366 | c->isa_level = MIPS_CPU_ISA_M64R2; | ||
367 | break; | ||
368 | default: | ||
369 | goto unknown; | ||
370 | } | ||
371 | break; | ||
372 | default: | ||
373 | goto unknown; | ||
374 | } | ||
375 | |||
376 | return config0 & MIPS_CONF_M; | ||
377 | |||
378 | unknown: | ||
379 | panic(unknown_isa, config0); | ||
380 | } | ||
381 | |||
382 | static inline unsigned int decode_config1(struct cpuinfo_mips *c) | ||
383 | { | ||
384 | unsigned int config1; | ||
385 | |||
386 | config1 = read_c0_config1(); | ||
387 | |||
388 | if (config1 & MIPS_CONF1_MD) | ||
389 | c->ases |= MIPS_ASE_MDMX; | ||
390 | if (config1 & MIPS_CONF1_WR) | ||
391 | c->options |= MIPS_CPU_WATCH; | ||
392 | if (config1 & MIPS_CONF1_CA) | ||
393 | c->ases |= MIPS_ASE_MIPS16; | ||
394 | if (config1 & MIPS_CONF1_EP) | ||
395 | c->options |= MIPS_CPU_EJTAG; | ||
396 | if (config1 & MIPS_CONF1_FP) { | ||
397 | c->options |= MIPS_CPU_FPU; | ||
398 | c->options |= MIPS_CPU_32FPR; | ||
399 | } | ||
400 | if (cpu_has_tlb) | ||
401 | c->tlbsize = ((config1 & MIPS_CONF1_TLBS) >> 25) + 1; | ||
402 | |||
403 | return config1 & MIPS_CONF_M; | ||
404 | } | ||
405 | |||
406 | static inline unsigned int decode_config2(struct cpuinfo_mips *c) | ||
407 | { | ||
408 | unsigned int config2; | ||
409 | |||
410 | config2 = read_c0_config2(); | ||
411 | |||
412 | if (config2 & MIPS_CONF2_SL) | ||
413 | c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT; | ||
414 | |||
415 | return config2 & MIPS_CONF_M; | ||
416 | } | ||
417 | |||
418 | static inline unsigned int decode_config3(struct cpuinfo_mips *c) | ||
419 | { | ||
420 | unsigned int config3; | ||
421 | |||
422 | config3 = read_c0_config3(); | ||
423 | |||
424 | if (config3 & MIPS_CONF3_SM) { | ||
425 | c->ases |= MIPS_ASE_SMARTMIPS; | ||
426 | c->options |= MIPS_CPU_RIXI; | ||
427 | } | ||
428 | if (config3 & MIPS_CONF3_RXI) | ||
429 | c->options |= MIPS_CPU_RIXI; | ||
430 | if (config3 & MIPS_CONF3_DSP) | ||
431 | c->ases |= MIPS_ASE_DSP; | ||
432 | if (config3 & MIPS_CONF3_DSP2P) | ||
433 | c->ases |= MIPS_ASE_DSP2P; | ||
434 | if (config3 & MIPS_CONF3_VINT) | ||
435 | c->options |= MIPS_CPU_VINT; | ||
436 | if (config3 & MIPS_CONF3_VEIC) | ||
437 | c->options |= MIPS_CPU_VEIC; | ||
438 | if (config3 & MIPS_CONF3_MT) | ||
439 | c->ases |= MIPS_ASE_MIPSMT; | ||
440 | if (config3 & MIPS_CONF3_ULRI) | ||
441 | c->options |= MIPS_CPU_ULRI; | ||
442 | |||
443 | return config3 & MIPS_CONF_M; | ||
444 | } | ||
445 | |||
446 | static inline unsigned int decode_config4(struct cpuinfo_mips *c) | ||
447 | { | ||
448 | unsigned int config4; | ||
449 | |||
450 | config4 = read_c0_config4(); | ||
451 | |||
452 | if ((config4 & MIPS_CONF4_MMUEXTDEF) == MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT | ||
453 | && cpu_has_tlb) | ||
454 | c->tlbsize += (config4 & MIPS_CONF4_MMUSIZEEXT) * 0x40; | ||
455 | |||
456 | c->kscratch_mask = (config4 >> 16) & 0xff; | ||
457 | |||
458 | return config4 & MIPS_CONF_M; | ||
459 | } | ||
460 | |||
461 | static void __cpuinit decode_configs(struct cpuinfo_mips *c) | ||
462 | { | ||
463 | int ok; | ||
464 | |||
465 | /* MIPS32 or MIPS64 compliant CPU. */ | ||
466 | c->options = MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE | MIPS_CPU_COUNTER | | ||
467 | MIPS_CPU_DIVEC | MIPS_CPU_LLSC | MIPS_CPU_MCHECK; | ||
468 | |||
469 | c->scache.flags = MIPS_CACHE_NOT_PRESENT; | ||
470 | |||
471 | ok = decode_config0(c); /* Read Config registers. */ | ||
472 | BUG_ON(!ok); /* Arch spec violation! */ | ||
473 | if (ok) | ||
474 | ok = decode_config1(c); | ||
475 | if (ok) | ||
476 | ok = decode_config2(c); | ||
477 | if (ok) | ||
478 | ok = decode_config3(c); | ||
479 | if (ok) | ||
480 | ok = decode_config4(c); | ||
481 | |||
482 | mips_probe_watch_registers(c); | ||
483 | |||
484 | if (cpu_has_mips_r2) | ||
485 | c->core = read_c0_ebase() & 0x3ff; | ||
486 | } | ||
487 | |||
488 | #define R4K_OPTS (MIPS_CPU_TLB | MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE \ | 330 | #define R4K_OPTS (MIPS_CPU_TLB | MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE \ |
489 | | MIPS_CPU_COUNTER) | 331 | | MIPS_CPU_COUNTER) |
490 | 332 | ||
@@ -496,7 +338,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) | |||
496 | __cpu_name[cpu] = "R2000"; | 338 | __cpu_name[cpu] = "R2000"; |
497 | c->isa_level = MIPS_CPU_ISA_I; | 339 | c->isa_level = MIPS_CPU_ISA_I; |
498 | c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE | | 340 | c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE | |
499 | MIPS_CPU_NOFPUEX; | 341 | MIPS_CPU_NOFPUEX; |
500 | if (__cpu_has_fpu()) | 342 | if (__cpu_has_fpu()) |
501 | c->options |= MIPS_CPU_FPU; | 343 | c->options |= MIPS_CPU_FPU; |
502 | c->tlbsize = 64; | 344 | c->tlbsize = 64; |
@@ -510,13 +352,14 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) | |||
510 | c->cputype = CPU_R3000A; | 352 | c->cputype = CPU_R3000A; |
511 | __cpu_name[cpu] = "R3000A"; | 353 | __cpu_name[cpu] = "R3000A"; |
512 | } | 354 | } |
355 | break; | ||
513 | } else { | 356 | } else { |
514 | c->cputype = CPU_R3000; | 357 | c->cputype = CPU_R3000; |
515 | __cpu_name[cpu] = "R3000"; | 358 | __cpu_name[cpu] = "R3000"; |
516 | } | 359 | } |
517 | c->isa_level = MIPS_CPU_ISA_I; | 360 | c->isa_level = MIPS_CPU_ISA_I; |
518 | c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE | | 361 | c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE | |
519 | MIPS_CPU_NOFPUEX; | 362 | MIPS_CPU_NOFPUEX; |
520 | if (__cpu_has_fpu()) | 363 | if (__cpu_has_fpu()) |
521 | c->options |= MIPS_CPU_FPU; | 364 | c->options |= MIPS_CPU_FPU; |
522 | c->tlbsize = 64; | 365 | c->tlbsize = 64; |
@@ -542,8 +385,8 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) | |||
542 | 385 | ||
543 | c->isa_level = MIPS_CPU_ISA_III; | 386 | c->isa_level = MIPS_CPU_ISA_III; |
544 | c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | | 387 | c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | |
545 | MIPS_CPU_WATCH | MIPS_CPU_VCE | | 388 | MIPS_CPU_WATCH | MIPS_CPU_VCE | |
546 | MIPS_CPU_LLSC; | 389 | MIPS_CPU_LLSC; |
547 | c->tlbsize = 48; | 390 | c->tlbsize = 48; |
548 | break; | 391 | break; |
549 | case PRID_IMP_VR41XX: | 392 | case PRID_IMP_VR41XX: |
@@ -589,7 +432,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) | |||
589 | __cpu_name[cpu] = "R4300"; | 432 | __cpu_name[cpu] = "R4300"; |
590 | c->isa_level = MIPS_CPU_ISA_III; | 433 | c->isa_level = MIPS_CPU_ISA_III; |
591 | c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | | 434 | c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | |
592 | MIPS_CPU_LLSC; | 435 | MIPS_CPU_LLSC; |
593 | c->tlbsize = 32; | 436 | c->tlbsize = 32; |
594 | break; | 437 | break; |
595 | case PRID_IMP_R4600: | 438 | case PRID_IMP_R4600: |
@@ -601,7 +444,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) | |||
601 | c->tlbsize = 48; | 444 | c->tlbsize = 48; |
602 | break; | 445 | break; |
603 | #if 0 | 446 | #if 0 |
604 | case PRID_IMP_R4650: | 447 | case PRID_IMP_R4650: |
605 | /* | 448 | /* |
606 | * This processor doesn't have an MMU, so it's not | 449 | * This processor doesn't have an MMU, so it's not |
607 | * "real easy" to run Linux on it. It is left purely | 450 | * "real easy" to run Linux on it. It is left purely |
@@ -610,9 +453,9 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) | |||
610 | */ | 453 | */ |
611 | c->cputype = CPU_R4650; | 454 | c->cputype = CPU_R4650; |
612 | __cpu_name[cpu] = "R4650"; | 455 | __cpu_name[cpu] = "R4650"; |
613 | c->isa_level = MIPS_CPU_ISA_III; | 456 | c->isa_level = MIPS_CPU_ISA_III; |
614 | c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_LLSC; | 457 | c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_LLSC; |
615 | c->tlbsize = 48; | 458 | c->tlbsize = 48; |
616 | break; | 459 | break; |
617 | #endif | 460 | #endif |
618 | case PRID_IMP_TX39: | 461 | case PRID_IMP_TX39: |
@@ -643,7 +486,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) | |||
643 | __cpu_name[cpu] = "R4700"; | 486 | __cpu_name[cpu] = "R4700"; |
644 | c->isa_level = MIPS_CPU_ISA_III; | 487 | c->isa_level = MIPS_CPU_ISA_III; |
645 | c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | | 488 | c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | |
646 | MIPS_CPU_LLSC; | 489 | MIPS_CPU_LLSC; |
647 | c->tlbsize = 48; | 490 | c->tlbsize = 48; |
648 | break; | 491 | break; |
649 | case PRID_IMP_TX49: | 492 | case PRID_IMP_TX49: |
@@ -660,7 +503,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) | |||
660 | __cpu_name[cpu] = "R5000"; | 503 | __cpu_name[cpu] = "R5000"; |
661 | c->isa_level = MIPS_CPU_ISA_IV; | 504 | c->isa_level = MIPS_CPU_ISA_IV; |
662 | c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | | 505 | c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | |
663 | MIPS_CPU_LLSC; | 506 | MIPS_CPU_LLSC; |
664 | c->tlbsize = 48; | 507 | c->tlbsize = 48; |
665 | break; | 508 | break; |
666 | case PRID_IMP_R5432: | 509 | case PRID_IMP_R5432: |
@@ -668,7 +511,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) | |||
668 | __cpu_name[cpu] = "R5432"; | 511 | __cpu_name[cpu] = "R5432"; |
669 | c->isa_level = MIPS_CPU_ISA_IV; | 512 | c->isa_level = MIPS_CPU_ISA_IV; |
670 | c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | | 513 | c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | |
671 | MIPS_CPU_WATCH | MIPS_CPU_LLSC; | 514 | MIPS_CPU_WATCH | MIPS_CPU_LLSC; |
672 | c->tlbsize = 48; | 515 | c->tlbsize = 48; |
673 | break; | 516 | break; |
674 | case PRID_IMP_R5500: | 517 | case PRID_IMP_R5500: |
@@ -676,7 +519,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) | |||
676 | __cpu_name[cpu] = "R5500"; | 519 | __cpu_name[cpu] = "R5500"; |
677 | c->isa_level = MIPS_CPU_ISA_IV; | 520 | c->isa_level = MIPS_CPU_ISA_IV; |
678 | c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | | 521 | c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | |
679 | MIPS_CPU_WATCH | MIPS_CPU_LLSC; | 522 | MIPS_CPU_WATCH | MIPS_CPU_LLSC; |
680 | c->tlbsize = 48; | 523 | c->tlbsize = 48; |
681 | break; | 524 | break; |
682 | case PRID_IMP_NEVADA: | 525 | case PRID_IMP_NEVADA: |
@@ -684,7 +527,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) | |||
684 | __cpu_name[cpu] = "Nevada"; | 527 | __cpu_name[cpu] = "Nevada"; |
685 | c->isa_level = MIPS_CPU_ISA_IV; | 528 | c->isa_level = MIPS_CPU_ISA_IV; |
686 | c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | | 529 | c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | |
687 | MIPS_CPU_DIVEC | MIPS_CPU_LLSC; | 530 | MIPS_CPU_DIVEC | MIPS_CPU_LLSC; |
688 | c->tlbsize = 48; | 531 | c->tlbsize = 48; |
689 | break; | 532 | break; |
690 | case PRID_IMP_R6000: | 533 | case PRID_IMP_R6000: |
@@ -692,7 +535,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) | |||
692 | __cpu_name[cpu] = "R6000"; | 535 | __cpu_name[cpu] = "R6000"; |
693 | c->isa_level = MIPS_CPU_ISA_II; | 536 | c->isa_level = MIPS_CPU_ISA_II; |
694 | c->options = MIPS_CPU_TLB | MIPS_CPU_FPU | | 537 | c->options = MIPS_CPU_TLB | MIPS_CPU_FPU | |
695 | MIPS_CPU_LLSC; | 538 | MIPS_CPU_LLSC; |
696 | c->tlbsize = 32; | 539 | c->tlbsize = 32; |
697 | break; | 540 | break; |
698 | case PRID_IMP_R6000A: | 541 | case PRID_IMP_R6000A: |
@@ -700,7 +543,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) | |||
700 | __cpu_name[cpu] = "R6000A"; | 543 | __cpu_name[cpu] = "R6000A"; |
701 | c->isa_level = MIPS_CPU_ISA_II; | 544 | c->isa_level = MIPS_CPU_ISA_II; |
702 | c->options = MIPS_CPU_TLB | MIPS_CPU_FPU | | 545 | c->options = MIPS_CPU_TLB | MIPS_CPU_FPU | |
703 | MIPS_CPU_LLSC; | 546 | MIPS_CPU_LLSC; |
704 | c->tlbsize = 32; | 547 | c->tlbsize = 32; |
705 | break; | 548 | break; |
706 | case PRID_IMP_RM7000: | 549 | case PRID_IMP_RM7000: |
@@ -708,7 +551,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) | |||
708 | __cpu_name[cpu] = "RM7000"; | 551 | __cpu_name[cpu] = "RM7000"; |
709 | c->isa_level = MIPS_CPU_ISA_IV; | 552 | c->isa_level = MIPS_CPU_ISA_IV; |
710 | c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | | 553 | c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | |
711 | MIPS_CPU_LLSC; | 554 | MIPS_CPU_LLSC; |
712 | /* | 555 | /* |
713 | * Undocumented RM7000: Bit 29 in the info register of | 556 | * Undocumented RM7000: Bit 29 in the info register of |
714 | * the RM7000 v2.0 indicates if the TLB has 48 or 64 | 557 | * the RM7000 v2.0 indicates if the TLB has 48 or 64 |
@@ -724,7 +567,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) | |||
724 | __cpu_name[cpu] = "RM9000"; | 567 | __cpu_name[cpu] = "RM9000"; |
725 | c->isa_level = MIPS_CPU_ISA_IV; | 568 | c->isa_level = MIPS_CPU_ISA_IV; |
726 | c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | | 569 | c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | |
727 | MIPS_CPU_LLSC; | 570 | MIPS_CPU_LLSC; |
728 | /* | 571 | /* |
729 | * Bit 29 in the info register of the RM9000 | 572 | * Bit 29 in the info register of the RM9000 |
730 | * indicates if the TLB has 48 or 64 entries. | 573 | * indicates if the TLB has 48 or 64 entries. |
@@ -739,8 +582,8 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) | |||
739 | __cpu_name[cpu] = "RM8000"; | 582 | __cpu_name[cpu] = "RM8000"; |
740 | c->isa_level = MIPS_CPU_ISA_IV; | 583 | c->isa_level = MIPS_CPU_ISA_IV; |
741 | c->options = MIPS_CPU_TLB | MIPS_CPU_4KEX | | 584 | c->options = MIPS_CPU_TLB | MIPS_CPU_4KEX | |
742 | MIPS_CPU_FPU | MIPS_CPU_32FPR | | 585 | MIPS_CPU_FPU | MIPS_CPU_32FPR | |
743 | MIPS_CPU_LLSC; | 586 | MIPS_CPU_LLSC; |
744 | c->tlbsize = 384; /* has weird TLB: 3-way x 128 */ | 587 | c->tlbsize = 384; /* has weird TLB: 3-way x 128 */ |
745 | break; | 588 | break; |
746 | case PRID_IMP_R10000: | 589 | case PRID_IMP_R10000: |
@@ -748,9 +591,9 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) | |||
748 | __cpu_name[cpu] = "R10000"; | 591 | __cpu_name[cpu] = "R10000"; |
749 | c->isa_level = MIPS_CPU_ISA_IV; | 592 | c->isa_level = MIPS_CPU_ISA_IV; |
750 | c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX | | 593 | c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX | |
751 | MIPS_CPU_FPU | MIPS_CPU_32FPR | | 594 | MIPS_CPU_FPU | MIPS_CPU_32FPR | |
752 | MIPS_CPU_COUNTER | MIPS_CPU_WATCH | | 595 | MIPS_CPU_COUNTER | MIPS_CPU_WATCH | |
753 | MIPS_CPU_LLSC; | 596 | MIPS_CPU_LLSC; |
754 | c->tlbsize = 64; | 597 | c->tlbsize = 64; |
755 | break; | 598 | break; |
756 | case PRID_IMP_R12000: | 599 | case PRID_IMP_R12000: |
@@ -758,9 +601,9 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) | |||
758 | __cpu_name[cpu] = "R12000"; | 601 | __cpu_name[cpu] = "R12000"; |
759 | c->isa_level = MIPS_CPU_ISA_IV; | 602 | c->isa_level = MIPS_CPU_ISA_IV; |
760 | c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX | | 603 | c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX | |
761 | MIPS_CPU_FPU | MIPS_CPU_32FPR | | 604 | MIPS_CPU_FPU | MIPS_CPU_32FPR | |
762 | MIPS_CPU_COUNTER | MIPS_CPU_WATCH | | 605 | MIPS_CPU_COUNTER | MIPS_CPU_WATCH | |
763 | MIPS_CPU_LLSC; | 606 | MIPS_CPU_LLSC; |
764 | c->tlbsize = 64; | 607 | c->tlbsize = 64; |
765 | break; | 608 | break; |
766 | case PRID_IMP_R14000: | 609 | case PRID_IMP_R14000: |
@@ -768,9 +611,9 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) | |||
768 | __cpu_name[cpu] = "R14000"; | 611 | __cpu_name[cpu] = "R14000"; |
769 | c->isa_level = MIPS_CPU_ISA_IV; | 612 | c->isa_level = MIPS_CPU_ISA_IV; |
770 | c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX | | 613 | c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX | |
771 | MIPS_CPU_FPU | MIPS_CPU_32FPR | | 614 | MIPS_CPU_FPU | MIPS_CPU_32FPR | |
772 | MIPS_CPU_COUNTER | MIPS_CPU_WATCH | | 615 | MIPS_CPU_COUNTER | MIPS_CPU_WATCH | |
773 | MIPS_CPU_LLSC; | 616 | MIPS_CPU_LLSC; |
774 | c->tlbsize = 64; | 617 | c->tlbsize = 64; |
775 | break; | 618 | break; |
776 | case PRID_IMP_LOONGSON2: | 619 | case PRID_IMP_LOONGSON2: |
@@ -792,19 +635,155 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) | |||
792 | MIPS_CPU_32FPR; | 635 | MIPS_CPU_32FPR; |
793 | c->tlbsize = 64; | 636 | c->tlbsize = 64; |
794 | break; | 637 | break; |
795 | case PRID_IMP_LOONGSON1: | 638 | } |
796 | decode_configs(c); | 639 | } |
797 | 640 | ||
798 | c->cputype = CPU_LOONGSON1; | 641 | static char unknown_isa[] __cpuinitdata = KERN_ERR \ |
642 | "Unsupported ISA type, c0.config0: %d."; | ||
799 | 643 | ||
800 | switch (c->processor_id & PRID_REV_MASK) { | 644 | static inline unsigned int decode_config0(struct cpuinfo_mips *c) |
801 | case PRID_REV_LOONGSON1B: | 645 | { |
802 | __cpu_name[cpu] = "Loongson 1B"; | 646 | unsigned int config0; |
647 | int isa; | ||
648 | |||
649 | config0 = read_c0_config(); | ||
650 | |||
651 | if (((config0 & MIPS_CONF_MT) >> 7) == 1) | ||
652 | c->options |= MIPS_CPU_TLB; | ||
653 | isa = (config0 & MIPS_CONF_AT) >> 13; | ||
654 | switch (isa) { | ||
655 | case 0: | ||
656 | switch ((config0 & MIPS_CONF_AR) >> 10) { | ||
657 | case 0: | ||
658 | c->isa_level = MIPS_CPU_ISA_M32R1; | ||
659 | break; | ||
660 | case 1: | ||
661 | c->isa_level = MIPS_CPU_ISA_M32R2; | ||
803 | break; | 662 | break; |
663 | default: | ||
664 | goto unknown; | ||
804 | } | 665 | } |
805 | |||
806 | break; | 666 | break; |
667 | case 2: | ||
668 | switch ((config0 & MIPS_CONF_AR) >> 10) { | ||
669 | case 0: | ||
670 | c->isa_level = MIPS_CPU_ISA_M64R1; | ||
671 | break; | ||
672 | case 1: | ||
673 | c->isa_level = MIPS_CPU_ISA_M64R2; | ||
674 | break; | ||
675 | default: | ||
676 | goto unknown; | ||
677 | } | ||
678 | break; | ||
679 | default: | ||
680 | goto unknown; | ||
807 | } | 681 | } |
682 | |||
683 | return config0 & MIPS_CONF_M; | ||
684 | |||
685 | unknown: | ||
686 | panic(unknown_isa, config0); | ||
687 | } | ||
688 | |||
689 | static inline unsigned int decode_config1(struct cpuinfo_mips *c) | ||
690 | { | ||
691 | unsigned int config1; | ||
692 | |||
693 | config1 = read_c0_config1(); | ||
694 | |||
695 | if (config1 & MIPS_CONF1_MD) | ||
696 | c->ases |= MIPS_ASE_MDMX; | ||
697 | if (config1 & MIPS_CONF1_WR) | ||
698 | c->options |= MIPS_CPU_WATCH; | ||
699 | if (config1 & MIPS_CONF1_CA) | ||
700 | c->ases |= MIPS_ASE_MIPS16; | ||
701 | if (config1 & MIPS_CONF1_EP) | ||
702 | c->options |= MIPS_CPU_EJTAG; | ||
703 | if (config1 & MIPS_CONF1_FP) { | ||
704 | c->options |= MIPS_CPU_FPU; | ||
705 | c->options |= MIPS_CPU_32FPR; | ||
706 | } | ||
707 | if (cpu_has_tlb) | ||
708 | c->tlbsize = ((config1 & MIPS_CONF1_TLBS) >> 25) + 1; | ||
709 | |||
710 | return config1 & MIPS_CONF_M; | ||
711 | } | ||
712 | |||
713 | static inline unsigned int decode_config2(struct cpuinfo_mips *c) | ||
714 | { | ||
715 | unsigned int config2; | ||
716 | |||
717 | config2 = read_c0_config2(); | ||
718 | |||
719 | if (config2 & MIPS_CONF2_SL) | ||
720 | c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT; | ||
721 | |||
722 | return config2 & MIPS_CONF_M; | ||
723 | } | ||
724 | |||
725 | static inline unsigned int decode_config3(struct cpuinfo_mips *c) | ||
726 | { | ||
727 | unsigned int config3; | ||
728 | |||
729 | config3 = read_c0_config3(); | ||
730 | |||
731 | if (config3 & MIPS_CONF3_SM) | ||
732 | c->ases |= MIPS_ASE_SMARTMIPS; | ||
733 | if (config3 & MIPS_CONF3_DSP) | ||
734 | c->ases |= MIPS_ASE_DSP; | ||
735 | if (config3 & MIPS_CONF3_VINT) | ||
736 | c->options |= MIPS_CPU_VINT; | ||
737 | if (config3 & MIPS_CONF3_VEIC) | ||
738 | c->options |= MIPS_CPU_VEIC; | ||
739 | if (config3 & MIPS_CONF3_MT) | ||
740 | c->ases |= MIPS_ASE_MIPSMT; | ||
741 | if (config3 & MIPS_CONF3_ULRI) | ||
742 | c->options |= MIPS_CPU_ULRI; | ||
743 | |||
744 | return config3 & MIPS_CONF_M; | ||
745 | } | ||
746 | |||
747 | static inline unsigned int decode_config4(struct cpuinfo_mips *c) | ||
748 | { | ||
749 | unsigned int config4; | ||
750 | |||
751 | config4 = read_c0_config4(); | ||
752 | |||
753 | if ((config4 & MIPS_CONF4_MMUEXTDEF) == MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT | ||
754 | && cpu_has_tlb) | ||
755 | c->tlbsize += (config4 & MIPS_CONF4_MMUSIZEEXT) * 0x40; | ||
756 | |||
757 | c->kscratch_mask = (config4 >> 16) & 0xff; | ||
758 | |||
759 | return config4 & MIPS_CONF_M; | ||
760 | } | ||
761 | |||
762 | static void __cpuinit decode_configs(struct cpuinfo_mips *c) | ||
763 | { | ||
764 | int ok; | ||
765 | |||
766 | /* MIPS32 or MIPS64 compliant CPU. */ | ||
767 | c->options = MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE | MIPS_CPU_COUNTER | | ||
768 | MIPS_CPU_DIVEC | MIPS_CPU_LLSC | MIPS_CPU_MCHECK; | ||
769 | |||
770 | c->scache.flags = MIPS_CACHE_NOT_PRESENT; | ||
771 | |||
772 | ok = decode_config0(c); /* Read Config registers. */ | ||
773 | BUG_ON(!ok); /* Arch spec violation! */ | ||
774 | if (ok) | ||
775 | ok = decode_config1(c); | ||
776 | if (ok) | ||
777 | ok = decode_config2(c); | ||
778 | if (ok) | ||
779 | ok = decode_config3(c); | ||
780 | if (ok) | ||
781 | ok = decode_config4(c); | ||
782 | |||
783 | mips_probe_watch_registers(c); | ||
784 | |||
785 | if (cpu_has_mips_r2) | ||
786 | c->core = read_c0_ebase() & 0x3ff; | ||
808 | } | 787 | } |
809 | 788 | ||
810 | static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu) | 789 | static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu) |
@@ -829,10 +808,6 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu) | |||
829 | c->cputype = CPU_5KC; | 808 | c->cputype = CPU_5KC; |
830 | __cpu_name[cpu] = "MIPS 5Kc"; | 809 | __cpu_name[cpu] = "MIPS 5Kc"; |
831 | break; | 810 | break; |
832 | case PRID_IMP_5KE: | ||
833 | c->cputype = CPU_5KE; | ||
834 | __cpu_name[cpu] = "MIPS 5KE"; | ||
835 | break; | ||
836 | case PRID_IMP_20KC: | 811 | case PRID_IMP_20KC: |
837 | c->cputype = CPU_20KC; | 812 | c->cputype = CPU_20KC; |
838 | __cpu_name[cpu] = "MIPS 20Kc"; | 813 | __cpu_name[cpu] = "MIPS 20Kc"; |
@@ -854,18 +829,10 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu) | |||
854 | c->cputype = CPU_74K; | 829 | c->cputype = CPU_74K; |
855 | __cpu_name[cpu] = "MIPS 74Kc"; | 830 | __cpu_name[cpu] = "MIPS 74Kc"; |
856 | break; | 831 | break; |
857 | case PRID_IMP_M14KC: | ||
858 | c->cputype = CPU_M14KC; | ||
859 | __cpu_name[cpu] = "MIPS M14Kc"; | ||
860 | break; | ||
861 | case PRID_IMP_1004K: | 832 | case PRID_IMP_1004K: |
862 | c->cputype = CPU_1004K; | 833 | c->cputype = CPU_1004K; |
863 | __cpu_name[cpu] = "MIPS 1004Kc"; | 834 | __cpu_name[cpu] = "MIPS 1004Kc"; |
864 | break; | 835 | break; |
865 | case PRID_IMP_1074K: | ||
866 | c->cputype = CPU_74K; | ||
867 | __cpu_name[cpu] = "MIPS 1074Kc"; | ||
868 | break; | ||
869 | } | 836 | } |
870 | 837 | ||
871 | spram_config(); | 838 | spram_config(); |
@@ -1011,10 +978,7 @@ static inline void cpu_probe_cavium(struct cpuinfo_mips *c, unsigned int cpu) | |||
1011 | platform: | 978 | platform: |
1012 | set_elf_platform(cpu, "octeon"); | 979 | set_elf_platform(cpu, "octeon"); |
1013 | break; | 980 | break; |
1014 | case PRID_IMP_CAVIUM_CN61XX: | ||
1015 | case PRID_IMP_CAVIUM_CN63XX: | 981 | case PRID_IMP_CAVIUM_CN63XX: |
1016 | case PRID_IMP_CAVIUM_CN66XX: | ||
1017 | case PRID_IMP_CAVIUM_CN68XX: | ||
1018 | c->cputype = CPU_CAVIUM_OCTEON2; | 982 | c->cputype = CPU_CAVIUM_OCTEON2; |
1019 | __cpu_name[cpu] = "Cavium Octeon II"; | 983 | __cpu_name[cpu] = "Cavium Octeon II"; |
1020 | set_elf_platform(cpu, "octeon2"); | 984 | set_elf_platform(cpu, "octeon2"); |
@@ -1046,13 +1010,6 @@ static inline void cpu_probe_netlogic(struct cpuinfo_mips *c, int cpu) | |||
1046 | { | 1010 | { |
1047 | decode_configs(c); | 1011 | decode_configs(c); |
1048 | 1012 | ||
1049 | if ((c->processor_id & 0xff00) == PRID_IMP_NETLOGIC_AU13XX) { | ||
1050 | c->cputype = CPU_ALCHEMY; | ||
1051 | __cpu_name[cpu] = "Au1300"; | ||
1052 | /* following stuff is not for Alchemy */ | ||
1053 | return; | ||
1054 | } | ||
1055 | |||
1056 | c->options = (MIPS_CPU_TLB | | 1013 | c->options = (MIPS_CPU_TLB | |
1057 | MIPS_CPU_4KEX | | 1014 | MIPS_CPU_4KEX | |
1058 | MIPS_CPU_COUNTER | | 1015 | MIPS_CPU_COUNTER | |
@@ -1062,12 +1019,6 @@ static inline void cpu_probe_netlogic(struct cpuinfo_mips *c, int cpu) | |||
1062 | MIPS_CPU_LLSC); | 1019 | MIPS_CPU_LLSC); |
1063 | 1020 | ||
1064 | switch (c->processor_id & 0xff00) { | 1021 | switch (c->processor_id & 0xff00) { |
1065 | case PRID_IMP_NETLOGIC_XLP8XX: | ||
1066 | case PRID_IMP_NETLOGIC_XLP3XX: | ||
1067 | c->cputype = CPU_XLP; | ||
1068 | __cpu_name[cpu] = "Netlogic XLP"; | ||
1069 | break; | ||
1070 | |||
1071 | case PRID_IMP_NETLOGIC_XLR732: | 1022 | case PRID_IMP_NETLOGIC_XLR732: |
1072 | case PRID_IMP_NETLOGIC_XLR716: | 1023 | case PRID_IMP_NETLOGIC_XLR716: |
1073 | case PRID_IMP_NETLOGIC_XLR532: | 1024 | case PRID_IMP_NETLOGIC_XLR532: |
@@ -1098,21 +1049,14 @@ static inline void cpu_probe_netlogic(struct cpuinfo_mips *c, int cpu) | |||
1098 | break; | 1049 | break; |
1099 | 1050 | ||
1100 | default: | 1051 | default: |
1101 | pr_info("Unknown Netlogic chip id [%02x]!\n", | 1052 | printk(KERN_INFO "Unknown Netlogic chip id [%02x]!\n", |
1102 | c->processor_id); | 1053 | c->processor_id); |
1103 | c->cputype = CPU_XLR; | 1054 | c->cputype = CPU_XLR; |
1104 | break; | 1055 | break; |
1105 | } | 1056 | } |
1106 | 1057 | ||
1107 | if (c->cputype == CPU_XLP) { | 1058 | c->isa_level = MIPS_CPU_ISA_M64R1; |
1108 | c->isa_level = MIPS_CPU_ISA_M64R2; | 1059 | c->tlbsize = ((read_c0_config1() >> 25) & 0x3f) + 1; |
1109 | c->options |= (MIPS_CPU_FPU | MIPS_CPU_ULRI | MIPS_CPU_MCHECK); | ||
1110 | /* This will be updated again after all threads are woken up */ | ||
1111 | c->tlbsize = ((read_c0_config6() >> 16) & 0xffff) + 1; | ||
1112 | } else { | ||
1113 | c->isa_level = MIPS_CPU_ISA_M64R1; | ||
1114 | c->tlbsize = ((read_c0_config1() >> 25) & 0x3f) + 1; | ||
1115 | } | ||
1116 | } | 1060 | } |
1117 | 1061 | ||
1118 | #ifdef CONFIG_64BIT | 1062 | #ifdef CONFIG_64BIT |
@@ -1181,7 +1125,7 @@ __cpuinit void cpu_probe(void) | |||
1181 | c->options &= ~MIPS_CPU_FPU; | 1125 | c->options &= ~MIPS_CPU_FPU; |
1182 | 1126 | ||
1183 | if (mips_dsp_disabled) | 1127 | if (mips_dsp_disabled) |
1184 | c->ases &= ~(MIPS_ASE_DSP | MIPS_ASE_DSP2P); | 1128 | c->ases &= ~MIPS_ASE_DSP; |
1185 | 1129 | ||
1186 | if (c->options & MIPS_CPU_FPU) { | 1130 | if (c->options & MIPS_CPU_FPU) { |
1187 | c->fpu_id = cpu_get_fpu_id(); | 1131 | c->fpu_id = cpu_get_fpu_id(); |
@@ -1195,11 +1139,8 @@ __cpuinit void cpu_probe(void) | |||
1195 | } | 1139 | } |
1196 | } | 1140 | } |
1197 | 1141 | ||
1198 | if (cpu_has_mips_r2) { | 1142 | if (cpu_has_mips_r2) |
1199 | c->srsets = ((read_c0_srsctl() >> 26) & 0x0f) + 1; | 1143 | c->srsets = ((read_c0_srsctl() >> 26) & 0x0f) + 1; |
1200 | /* R2 has Performance Counter Interrupt indicator */ | ||
1201 | c->options |= MIPS_CPU_PCI; | ||
1202 | } | ||
1203 | else | 1144 | else |
1204 | c->srsets = 1; | 1145 | c->srsets = 1; |
1205 | 1146 | ||
diff --git a/arch/mips/kernel/cpufreq/Makefile b/arch/mips/kernel/cpufreq/Makefile index 05a5715ee38..c3479a432ef 100644 --- a/arch/mips/kernel/cpufreq/Makefile +++ b/arch/mips/kernel/cpufreq/Makefile | |||
@@ -2,4 +2,4 @@ | |||
2 | # Makefile for the Linux/MIPS cpufreq. | 2 | # Makefile for the Linux/MIPS cpufreq. |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o | 5 | obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o loongson2_clock.o |
diff --git a/arch/mips/kernel/cpufreq/loongson2_cpufreq.c b/arch/mips/kernel/cpufreq/loongson2_cpufreq.c index e7c98e2b78b..ae5db206347 100644 --- a/arch/mips/kernel/cpufreq/loongson2_cpufreq.c +++ b/arch/mips/kernel/cpufreq/loongson2_cpufreq.c | |||
@@ -19,7 +19,7 @@ | |||
19 | 19 | ||
20 | #include <asm/clock.h> | 20 | #include <asm/clock.h> |
21 | 21 | ||
22 | #include <asm/mach-loongson/loongson.h> | 22 | #include <loongson.h> |
23 | 23 | ||
24 | static uint nowait; | 24 | static uint nowait; |
25 | 25 | ||
@@ -181,25 +181,6 @@ static struct platform_driver platform_driver = { | |||
181 | .id_table = platform_device_ids, | 181 | .id_table = platform_device_ids, |
182 | }; | 182 | }; |
183 | 183 | ||
184 | /* | ||
185 | * This is the simple version of Loongson-2 wait, Maybe we need do this in | ||
186 | * interrupt disabled context. | ||
187 | */ | ||
188 | |||
189 | static DEFINE_SPINLOCK(loongson2_wait_lock); | ||
190 | |||
191 | static void loongson2_cpu_wait(void) | ||
192 | { | ||
193 | unsigned long flags; | ||
194 | u32 cpu_freq; | ||
195 | |||
196 | spin_lock_irqsave(&loongson2_wait_lock, flags); | ||
197 | cpu_freq = LOONGSON_CHIPCFG0; | ||
198 | LOONGSON_CHIPCFG0 &= ~0x7; /* Put CPU into wait mode */ | ||
199 | LOONGSON_CHIPCFG0 = cpu_freq; /* Restore CPU state */ | ||
200 | spin_unlock_irqrestore(&loongson2_wait_lock, flags); | ||
201 | } | ||
202 | |||
203 | static int __init cpufreq_init(void) | 184 | static int __init cpufreq_init(void) |
204 | { | 185 | { |
205 | int ret; | 186 | int ret; |
diff --git a/arch/mips/kernel/crash.c b/arch/mips/kernel/crash.c deleted file mode 100644 index 0f53c39324b..00000000000 --- a/arch/mips/kernel/crash.c +++ /dev/null | |||
@@ -1,71 +0,0 @@ | |||
1 | #include <linux/kernel.h> | ||
2 | #include <linux/smp.h> | ||
3 | #include <linux/reboot.h> | ||
4 | #include <linux/kexec.h> | ||
5 | #include <linux/bootmem.h> | ||
6 | #include <linux/crash_dump.h> | ||
7 | #include <linux/delay.h> | ||
8 | #include <linux/init.h> | ||
9 | #include <linux/irq.h> | ||
10 | #include <linux/types.h> | ||
11 | #include <linux/sched.h> | ||
12 | |||
13 | /* This keeps a track of which one is crashing cpu. */ | ||
14 | static int crashing_cpu = -1; | ||
15 | static cpumask_t cpus_in_crash = CPU_MASK_NONE; | ||
16 | |||
17 | #ifdef CONFIG_SMP | ||
18 | static void crash_shutdown_secondary(void *ignore) | ||
19 | { | ||
20 | struct pt_regs *regs; | ||
21 | int cpu = smp_processor_id(); | ||
22 | |||
23 | regs = task_pt_regs(current); | ||
24 | |||
25 | if (!cpu_online(cpu)) | ||
26 | return; | ||
27 | |||
28 | local_irq_disable(); | ||
29 | if (!cpu_isset(cpu, cpus_in_crash)) | ||
30 | crash_save_cpu(regs, cpu); | ||
31 | cpu_set(cpu, cpus_in_crash); | ||
32 | |||
33 | while (!atomic_read(&kexec_ready_to_reboot)) | ||
34 | cpu_relax(); | ||
35 | relocated_kexec_smp_wait(NULL); | ||
36 | /* NOTREACHED */ | ||
37 | } | ||
38 | |||
39 | static void crash_kexec_prepare_cpus(void) | ||
40 | { | ||
41 | unsigned int msecs; | ||
42 | |||
43 | unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */ | ||
44 | |||
45 | dump_send_ipi(crash_shutdown_secondary); | ||
46 | smp_wmb(); | ||
47 | |||
48 | /* | ||
49 | * The crash CPU sends an IPI and wait for other CPUs to | ||
50 | * respond. Delay of at least 10 seconds. | ||
51 | */ | ||
52 | pr_emerg("Sending IPI to other cpus...\n"); | ||
53 | msecs = 10000; | ||
54 | while ((cpus_weight(cpus_in_crash) < ncpus) && (--msecs > 0)) { | ||
55 | cpu_relax(); | ||
56 | mdelay(1); | ||
57 | } | ||
58 | } | ||
59 | |||
60 | #else /* !defined(CONFIG_SMP) */ | ||
61 | static void crash_kexec_prepare_cpus(void) {} | ||
62 | #endif /* !defined(CONFIG_SMP) */ | ||
63 | |||
64 | void default_machine_crash_shutdown(struct pt_regs *regs) | ||
65 | { | ||
66 | local_irq_disable(); | ||
67 | crashing_cpu = smp_processor_id(); | ||
68 | crash_save_cpu(regs, crashing_cpu); | ||
69 | crash_kexec_prepare_cpus(); | ||
70 | cpu_set(crashing_cpu, cpus_in_crash); | ||
71 | } | ||
diff --git a/arch/mips/kernel/crash_dump.c b/arch/mips/kernel/crash_dump.c deleted file mode 100644 index 35bed0d2342..00000000000 --- a/arch/mips/kernel/crash_dump.c +++ /dev/null | |||
@@ -1,75 +0,0 @@ | |||
1 | #include <linux/highmem.h> | ||
2 | #include <linux/bootmem.h> | ||
3 | #include <linux/crash_dump.h> | ||
4 | #include <asm/uaccess.h> | ||
5 | |||
6 | static int __init parse_savemaxmem(char *p) | ||
7 | { | ||
8 | if (p) | ||
9 | saved_max_pfn = (memparse(p, &p) >> PAGE_SHIFT) - 1; | ||
10 | |||
11 | return 1; | ||
12 | } | ||
13 | __setup("savemaxmem=", parse_savemaxmem); | ||
14 | |||
15 | |||
16 | static void *kdump_buf_page; | ||
17 | |||
18 | /** | ||
19 | * copy_oldmem_page - copy one page from "oldmem" | ||
20 | * @pfn: page frame number to be copied | ||
21 | * @buf: target memory address for the copy; this can be in kernel address | ||
22 | * space or user address space (see @userbuf) | ||
23 | * @csize: number of bytes to copy | ||
24 | * @offset: offset in bytes into the page (based on pfn) to begin the copy | ||
25 | * @userbuf: if set, @buf is in user address space, use copy_to_user(), | ||
26 | * otherwise @buf is in kernel address space, use memcpy(). | ||
27 | * | ||
28 | * Copy a page from "oldmem". For this page, there is no pte mapped | ||
29 | * in the current kernel. | ||
30 | * | ||
31 | * Calling copy_to_user() in atomic context is not desirable. Hence first | ||
32 | * copying the data to a pre-allocated kernel page and then copying to user | ||
33 | * space in non-atomic context. | ||
34 | */ | ||
35 | ssize_t copy_oldmem_page(unsigned long pfn, char *buf, | ||
36 | size_t csize, unsigned long offset, int userbuf) | ||
37 | { | ||
38 | void *vaddr; | ||
39 | |||
40 | if (!csize) | ||
41 | return 0; | ||
42 | |||
43 | vaddr = kmap_atomic_pfn(pfn); | ||
44 | |||
45 | if (!userbuf) { | ||
46 | memcpy(buf, (vaddr + offset), csize); | ||
47 | kunmap_atomic(vaddr); | ||
48 | } else { | ||
49 | if (!kdump_buf_page) { | ||
50 | pr_warning("Kdump: Kdump buffer page not allocated\n"); | ||
51 | |||
52 | return -EFAULT; | ||
53 | } | ||
54 | copy_page(kdump_buf_page, vaddr); | ||
55 | kunmap_atomic(vaddr); | ||
56 | if (copy_to_user(buf, (kdump_buf_page + offset), csize)) | ||
57 | return -EFAULT; | ||
58 | } | ||
59 | |||
60 | return csize; | ||
61 | } | ||
62 | |||
63 | static int __init kdump_buf_page_init(void) | ||
64 | { | ||
65 | int ret = 0; | ||
66 | |||
67 | kdump_buf_page = kmalloc(PAGE_SIZE, GFP_KERNEL); | ||
68 | if (!kdump_buf_page) { | ||
69 | pr_warning("Kdump: Failed to allocate kdump buffer page\n"); | ||
70 | ret = -ENOMEM; | ||
71 | } | ||
72 | |||
73 | return ret; | ||
74 | } | ||
75 | arch_initcall(kdump_buf_page_init); | ||
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S index e5786858cdb..37acfa036d4 100644 --- a/arch/mips/kernel/entry.S +++ b/arch/mips/kernel/entry.S | |||
@@ -36,11 +36,6 @@ FEXPORT(ret_from_exception) | |||
36 | FEXPORT(ret_from_irq) | 36 | FEXPORT(ret_from_irq) |
37 | LONG_S s0, TI_REGS($28) | 37 | LONG_S s0, TI_REGS($28) |
38 | FEXPORT(__ret_from_irq) | 38 | FEXPORT(__ret_from_irq) |
39 | /* | ||
40 | * We can be coming here from a syscall done in the kernel space, | ||
41 | * e.g. a failed kernel_execve(). | ||
42 | */ | ||
43 | resume_userspace_check: | ||
44 | LONG_L t0, PT_STATUS(sp) # returning to kernel mode? | 39 | LONG_L t0, PT_STATUS(sp) # returning to kernel mode? |
45 | andi t0, t0, KU_USER | 40 | andi t0, t0, KU_USER |
46 | beqz t0, resume_kernel | 41 | beqz t0, resume_kernel |
@@ -70,12 +65,6 @@ need_resched: | |||
70 | b need_resched | 65 | b need_resched |
71 | #endif | 66 | #endif |
72 | 67 | ||
73 | FEXPORT(ret_from_kernel_thread) | ||
74 | jal schedule_tail # a0 = struct task_struct *prev | ||
75 | move a0, s1 | ||
76 | jal s0 | ||
77 | j syscall_exit | ||
78 | |||
79 | FEXPORT(ret_from_fork) | 68 | FEXPORT(ret_from_fork) |
80 | jal schedule_tail # a0 = struct task_struct *prev | 69 | jal schedule_tail # a0 = struct task_struct *prev |
81 | 70 | ||
@@ -88,7 +77,7 @@ FEXPORT(syscall_exit) | |||
88 | and t0, a2, t0 | 77 | and t0, a2, t0 |
89 | bnez t0, syscall_exit_work | 78 | bnez t0, syscall_exit_work |
90 | 79 | ||
91 | restore_all: # restore full frame | 80 | FEXPORT(restore_all) # restore full frame |
92 | #ifdef CONFIG_MIPS_MT_SMTC | 81 | #ifdef CONFIG_MIPS_MT_SMTC |
93 | #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP | 82 | #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP |
94 | /* Re-arm any temporarily masked interrupts not explicitly "acked" */ | 83 | /* Re-arm any temporarily masked interrupts not explicitly "acked" */ |
@@ -128,7 +117,7 @@ restore_all: # restore full frame | |||
128 | RESTORE_TEMP | 117 | RESTORE_TEMP |
129 | RESTORE_AT | 118 | RESTORE_AT |
130 | RESTORE_STATIC | 119 | RESTORE_STATIC |
131 | restore_partial: # restore partial frame | 120 | FEXPORT(restore_partial) # restore partial frame |
132 | #ifdef CONFIG_TRACE_IRQFLAGS | 121 | #ifdef CONFIG_TRACE_IRQFLAGS |
133 | SAVE_STATIC | 122 | SAVE_STATIC |
134 | SAVE_AT | 123 | SAVE_AT |
@@ -173,20 +162,11 @@ work_notifysig: # deal with pending signals and | |||
173 | move a0, sp | 162 | move a0, sp |
174 | li a1, 0 | 163 | li a1, 0 |
175 | jal do_notify_resume # a2 already loaded | 164 | jal do_notify_resume # a2 already loaded |
176 | j resume_userspace_check | 165 | j resume_userspace |
177 | 166 | ||
178 | FEXPORT(syscall_exit_partial) | 167 | FEXPORT(syscall_exit_work_partial) |
179 | local_irq_disable # make sure need_resched doesn't | ||
180 | # change between and return | ||
181 | LONG_L a2, TI_FLAGS($28) # current->work | ||
182 | li t0, _TIF_ALLWORK_MASK | ||
183 | and t0, a2 | ||
184 | beqz t0, restore_partial | ||
185 | SAVE_STATIC | 168 | SAVE_STATIC |
186 | syscall_exit_work: | 169 | syscall_exit_work: |
187 | LONG_L t0, PT_STATUS(sp) # returning to kernel mode? | ||
188 | andi t0, t0, KU_USER | ||
189 | beqz t0, resume_kernel | ||
190 | li t0, _TIF_WORK_SYSCALL_EXIT | 170 | li t0, _TIF_WORK_SYSCALL_EXIT |
191 | and t0, a2 # a2 is preloaded with TI_FLAGS | 171 | and t0, a2 # a2 is preloaded with TI_FLAGS |
192 | beqz t0, work_pending # trace bit set? | 172 | beqz t0, work_pending # trace bit set? |
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index 8a0096d6281..8882e5766f2 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <asm/mipsregs.h> | 19 | #include <asm/mipsregs.h> |
20 | #include <asm/stackframe.h> | 20 | #include <asm/stackframe.h> |
21 | #include <asm/war.h> | 21 | #include <asm/war.h> |
22 | #include <asm/page.h> | ||
22 | #include <asm/thread_info.h> | 23 | #include <asm/thread_info.h> |
23 | 24 | ||
24 | #define PANIC_PIC(msg) \ | 25 | #define PANIC_PIC(msg) \ |
@@ -482,8 +483,8 @@ NESTED(nmi_handler, PT_SIZE, sp) | |||
482 | MFC0 k1, CP0_ENTRYHI | 483 | MFC0 k1, CP0_ENTRYHI |
483 | andi k1, 0xff /* ASID_MASK */ | 484 | andi k1, 0xff /* ASID_MASK */ |
484 | MFC0 k0, CP0_EPC | 485 | MFC0 k0, CP0_EPC |
485 | PTR_SRL k0, _PAGE_SHIFT + 1 | 486 | PTR_SRL k0, PAGE_SHIFT + 1 |
486 | PTR_SLL k0, _PAGE_SHIFT + 1 | 487 | PTR_SLL k0, PAGE_SHIFT + 1 |
487 | or k1, k0 | 488 | or k1, k0 |
488 | MTC0 k1, CP0_ENTRYHI | 489 | MTC0 k1, CP0_ENTRYHI |
489 | mtc0_tlbw_hazard | 490 | mtc0_tlbw_hazard |
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S index fcf97312f32..ea695d9605e 100644 --- a/arch/mips/kernel/head.S +++ b/arch/mips/kernel/head.S | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <asm/asmmacro.h> | 21 | #include <asm/asmmacro.h> |
22 | #include <asm/irqflags.h> | 22 | #include <asm/irqflags.h> |
23 | #include <asm/regdef.h> | 23 | #include <asm/regdef.h> |
24 | #include <asm/page.h> | ||
24 | #include <asm/pgtable-bits.h> | 25 | #include <asm/pgtable-bits.h> |
25 | #include <asm/mipsregs.h> | 26 | #include <asm/mipsregs.h> |
26 | #include <asm/stackframe.h> | 27 | #include <asm/stackframe.h> |
diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c index c5bc344fc74..be4ee7d63e0 100644 --- a/arch/mips/kernel/i8253.c +++ b/arch/mips/kernel/i8253.c | |||
@@ -4,7 +4,7 @@ | |||
4 | */ | 4 | */ |
5 | #include <linux/clockchips.h> | 5 | #include <linux/clockchips.h> |
6 | #include <linux/i8253.h> | 6 | #include <linux/i8253.h> |
7 | #include <linux/export.h> | 7 | #include <linux/module.h> |
8 | #include <linux/smp.h> | 8 | #include <linux/smp.h> |
9 | #include <linux/irq.h> | 9 | #include <linux/irq.h> |
10 | 10 | ||
@@ -19,7 +19,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id) | |||
19 | 19 | ||
20 | static struct irqaction irq0 = { | 20 | static struct irqaction irq0 = { |
21 | .handler = timer_interrupt, | 21 | .handler = timer_interrupt, |
22 | .flags = IRQF_NOBALANCING | IRQF_TIMER, | 22 | .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TIMER, |
23 | .name = "timer" | 23 | .name = "timer" |
24 | }; | 24 | }; |
25 | 25 | ||
diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c index 485e6a961b3..0c527f65219 100644 --- a/arch/mips/kernel/irq-gic.c +++ b/arch/mips/kernel/irq-gic.c | |||
@@ -1,11 +1,5 @@ | |||
1 | /* | 1 | #undef DEBUG |
2 | * This file is subject to the terms and conditions of the GNU General Public | 2 | |
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org) | ||
7 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | ||
8 | */ | ||
9 | #include <linux/bitmap.h> | 3 | #include <linux/bitmap.h> |
10 | #include <linux/init.h> | 4 | #include <linux/init.h> |
11 | #include <linux/smp.h> | 5 | #include <linux/smp.h> |
@@ -13,80 +7,33 @@ | |||
13 | 7 | ||
14 | #include <asm/io.h> | 8 | #include <asm/io.h> |
15 | #include <asm/gic.h> | 9 | #include <asm/gic.h> |
16 | #include <asm/setup.h> | ||
17 | #include <asm/traps.h> | ||
18 | #include <asm/gcmpregs.h> | 10 | #include <asm/gcmpregs.h> |
19 | #include <linux/hardirq.h> | 11 | #include <linux/hardirq.h> |
20 | #include <asm-generic/bitops/find.h> | 12 | #include <asm-generic/bitops/find.h> |
21 | 13 | ||
22 | unsigned long _gic_base; | ||
23 | unsigned int gic_irq_base; | ||
24 | unsigned int gic_irq_flags[GIC_NUM_INTRS]; | ||
25 | 14 | ||
26 | /* The index into this array is the vector # of the interrupt. */ | 15 | static unsigned long _gic_base; |
27 | struct gic_shared_intr_map gic_shared_intr_map[GIC_NUM_INTRS]; | 16 | static unsigned int _irqbase; |
17 | static unsigned int gic_irq_flags[GIC_NUM_INTRS]; | ||
18 | #define GIC_IRQ_FLAG_EDGE 0x0001 | ||
28 | 19 | ||
29 | static struct gic_pcpu_mask pcpu_masks[NR_CPUS]; | 20 | struct gic_pcpu_mask pcpu_masks[NR_CPUS]; |
30 | static struct gic_pending_regs pending_regs[NR_CPUS]; | 21 | static struct gic_pending_regs pending_regs[NR_CPUS]; |
31 | static struct gic_intrmask_regs intrmask_regs[NR_CPUS]; | 22 | static struct gic_intrmask_regs intrmask_regs[NR_CPUS]; |
32 | 23 | ||
33 | unsigned int gic_get_timer_pending(void) | ||
34 | { | ||
35 | unsigned int vpe_pending; | ||
36 | |||
37 | GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), 0); | ||
38 | GICREAD(GIC_REG(VPE_OTHER, GIC_VPE_PEND), vpe_pending); | ||
39 | return (vpe_pending & GIC_VPE_PEND_TIMER_MSK); | ||
40 | } | ||
41 | |||
42 | void gic_bind_eic_interrupt(int irq, int set) | ||
43 | { | ||
44 | /* Convert irq vector # to hw int # */ | ||
45 | irq -= GIC_PIN_TO_VEC_OFFSET; | ||
46 | |||
47 | /* Set irq to use shadow set */ | ||
48 | GICWRITE(GIC_REG_ADDR(VPE_LOCAL, GIC_VPE_EIC_SS(irq)), set); | ||
49 | } | ||
50 | |||
51 | void gic_send_ipi(unsigned int intr) | 24 | void gic_send_ipi(unsigned int intr) |
52 | { | 25 | { |
26 | pr_debug("CPU%d: %s status %08x\n", smp_processor_id(), __func__, | ||
27 | read_c0_status()); | ||
53 | GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), 0x80000000 | intr); | 28 | GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), 0x80000000 | intr); |
54 | } | 29 | } |
55 | 30 | ||
56 | static void gic_eic_irq_dispatch(void) | 31 | /* This is Malta specific and needs to be exported */ |
57 | { | ||
58 | unsigned int cause = read_c0_cause(); | ||
59 | int irq; | ||
60 | |||
61 | irq = (cause & ST0_IM) >> STATUSB_IP2; | ||
62 | if (irq == 0) | ||
63 | irq = -1; | ||
64 | |||
65 | if (irq >= 0) | ||
66 | do_IRQ(gic_irq_base + irq); | ||
67 | else | ||
68 | spurious_interrupt(); | ||
69 | } | ||
70 | |||
71 | static void __init vpe_local_setup(unsigned int numvpes) | 32 | static void __init vpe_local_setup(unsigned int numvpes) |
72 | { | 33 | { |
73 | unsigned long timer_intr = GIC_INT_TMR; | ||
74 | unsigned long perf_intr = GIC_INT_PERFCTR; | ||
75 | unsigned int vpe_ctl; | ||
76 | int i; | 34 | int i; |
77 | 35 | unsigned long timer_interrupt = 5, perf_interrupt = 5; | |
78 | if (cpu_has_veic) { | 36 | unsigned int vpe_ctl; |
79 | /* | ||
80 | * GIC timer interrupt -> CPU HW Int X (vector X+2) -> | ||
81 | * map to pin X+2-1 (since GIC adds 1) | ||
82 | */ | ||
83 | timer_intr += (GIC_CPU_TO_VEC_OFFSET - GIC_PIN_TO_VEC_OFFSET); | ||
84 | /* | ||
85 | * GIC perfcnt interrupt -> CPU HW Int X (vector X+2) -> | ||
86 | * map to pin X+2-1 (since GIC adds 1) | ||
87 | */ | ||
88 | perf_intr += (GIC_CPU_TO_VEC_OFFSET - GIC_PIN_TO_VEC_OFFSET); | ||
89 | } | ||
90 | 37 | ||
91 | /* | 38 | /* |
92 | * Setup the default performance counter timer interrupts | 39 | * Setup the default performance counter timer interrupts |
@@ -99,20 +46,11 @@ static void __init vpe_local_setup(unsigned int numvpes) | |||
99 | GICREAD(GIC_REG(VPE_OTHER, GIC_VPE_CTL), vpe_ctl); | 46 | GICREAD(GIC_REG(VPE_OTHER, GIC_VPE_CTL), vpe_ctl); |
100 | if (vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK) | 47 | if (vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK) |
101 | GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP), | 48 | GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP), |
102 | GIC_MAP_TO_PIN_MSK | timer_intr); | 49 | GIC_MAP_TO_PIN_MSK | timer_interrupt); |
103 | if (cpu_has_veic) { | ||
104 | set_vi_handler(timer_intr + GIC_PIN_TO_VEC_OFFSET, | ||
105 | gic_eic_irq_dispatch); | ||
106 | gic_shared_intr_map[timer_intr + GIC_PIN_TO_VEC_OFFSET].local_intr_mask |= GIC_VPE_RMASK_TIMER_MSK; | ||
107 | } | ||
108 | 50 | ||
109 | if (vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK) | 51 | if (vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK) |
110 | GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP), | 52 | GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP), |
111 | GIC_MAP_TO_PIN_MSK | perf_intr); | 53 | GIC_MAP_TO_PIN_MSK | perf_interrupt); |
112 | if (cpu_has_veic) { | ||
113 | set_vi_handler(perf_intr + GIC_PIN_TO_VEC_OFFSET, gic_eic_irq_dispatch); | ||
114 | gic_shared_intr_map[perf_intr + GIC_PIN_TO_VEC_OFFSET].local_intr_mask |= GIC_VPE_RMASK_PERFCNT_MSK; | ||
115 | } | ||
116 | } | 54 | } |
117 | } | 55 | } |
118 | 56 | ||
@@ -142,30 +80,51 @@ unsigned int gic_get_int(void) | |||
142 | bitmap_and(pending, pending, intrmask, GIC_NUM_INTRS); | 80 | bitmap_and(pending, pending, intrmask, GIC_NUM_INTRS); |
143 | bitmap_and(pending, pending, pcpu_mask, GIC_NUM_INTRS); | 81 | bitmap_and(pending, pending, pcpu_mask, GIC_NUM_INTRS); |
144 | 82 | ||
145 | return find_first_bit(pending, GIC_NUM_INTRS); | 83 | i = find_first_bit(pending, GIC_NUM_INTRS); |
84 | |||
85 | pr_debug("CPU%d: %s pend=%d\n", smp_processor_id(), __func__, i); | ||
86 | |||
87 | return i; | ||
88 | } | ||
89 | |||
90 | static void gic_irq_ack(struct irq_data *d) | ||
91 | { | ||
92 | unsigned int irq = d->irq - _irqbase; | ||
93 | |||
94 | pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq); | ||
95 | GIC_CLR_INTR_MASK(irq); | ||
96 | |||
97 | if (gic_irq_flags[irq] & GIC_IRQ_FLAG_EDGE) | ||
98 | GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), irq); | ||
146 | } | 99 | } |
147 | 100 | ||
148 | static void gic_mask_irq(struct irq_data *d) | 101 | static void gic_mask_irq(struct irq_data *d) |
149 | { | 102 | { |
150 | GIC_CLR_INTR_MASK(d->irq - gic_irq_base); | 103 | unsigned int irq = d->irq - _irqbase; |
104 | pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq); | ||
105 | GIC_CLR_INTR_MASK(irq); | ||
151 | } | 106 | } |
152 | 107 | ||
153 | static void gic_unmask_irq(struct irq_data *d) | 108 | static void gic_unmask_irq(struct irq_data *d) |
154 | { | 109 | { |
155 | GIC_SET_INTR_MASK(d->irq - gic_irq_base); | 110 | unsigned int irq = d->irq - _irqbase; |
111 | pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq); | ||
112 | GIC_SET_INTR_MASK(irq); | ||
156 | } | 113 | } |
157 | 114 | ||
158 | #ifdef CONFIG_SMP | 115 | #ifdef CONFIG_SMP |
116 | |||
159 | static DEFINE_SPINLOCK(gic_lock); | 117 | static DEFINE_SPINLOCK(gic_lock); |
160 | 118 | ||
161 | static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask, | 119 | static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask, |
162 | bool force) | 120 | bool force) |
163 | { | 121 | { |
164 | unsigned int irq = (d->irq - gic_irq_base); | 122 | unsigned int irq = d->irq - _irqbase; |
165 | cpumask_t tmp = CPU_MASK_NONE; | 123 | cpumask_t tmp = CPU_MASK_NONE; |
166 | unsigned long flags; | 124 | unsigned long flags; |
167 | int i; | 125 | int i; |
168 | 126 | ||
127 | pr_debug("%s(%d) called\n", __func__, irq); | ||
169 | cpumask_and(&tmp, cpumask, cpu_online_mask); | 128 | cpumask_and(&tmp, cpumask, cpu_online_mask); |
170 | if (cpus_empty(tmp)) | 129 | if (cpus_empty(tmp)) |
171 | return -1; | 130 | return -1; |
@@ -195,7 +154,7 @@ static struct irq_chip gic_irq_controller = { | |||
195 | .irq_mask = gic_mask_irq, | 154 | .irq_mask = gic_mask_irq, |
196 | .irq_mask_ack = gic_mask_irq, | 155 | .irq_mask_ack = gic_mask_irq, |
197 | .irq_unmask = gic_unmask_irq, | 156 | .irq_unmask = gic_unmask_irq, |
198 | .irq_eoi = gic_finish_irq, | 157 | .irq_eoi = gic_unmask_irq, |
199 | #ifdef CONFIG_SMP | 158 | #ifdef CONFIG_SMP |
200 | .irq_set_affinity = gic_set_affinity, | 159 | .irq_set_affinity = gic_set_affinity, |
201 | #endif | 160 | #endif |
@@ -205,8 +164,6 @@ static void __init gic_setup_intr(unsigned int intr, unsigned int cpu, | |||
205 | unsigned int pin, unsigned int polarity, unsigned int trigtype, | 164 | unsigned int pin, unsigned int polarity, unsigned int trigtype, |
206 | unsigned int flags) | 165 | unsigned int flags) |
207 | { | 166 | { |
208 | struct gic_shared_intr_map *map_ptr; | ||
209 | |||
210 | /* Setup Intr to Pin mapping */ | 167 | /* Setup Intr to Pin mapping */ |
211 | if (pin & GIC_MAP_TO_NMI_MSK) { | 168 | if (pin & GIC_MAP_TO_NMI_MSK) { |
212 | GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(intr)), pin); | 169 | GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(intr)), pin); |
@@ -221,14 +178,6 @@ static void __init gic_setup_intr(unsigned int intr, unsigned int cpu, | |||
221 | GIC_MAP_TO_PIN_MSK | pin); | 178 | GIC_MAP_TO_PIN_MSK | pin); |
222 | /* Setup Intr to CPU mapping */ | 179 | /* Setup Intr to CPU mapping */ |
223 | GIC_SH_MAP_TO_VPE_SMASK(intr, cpu); | 180 | GIC_SH_MAP_TO_VPE_SMASK(intr, cpu); |
224 | if (cpu_has_veic) { | ||
225 | set_vi_handler(pin + GIC_PIN_TO_VEC_OFFSET, | ||
226 | gic_eic_irq_dispatch); | ||
227 | map_ptr = &gic_shared_intr_map[pin + GIC_PIN_TO_VEC_OFFSET]; | ||
228 | if (map_ptr->num_shared_intr >= GIC_MAX_SHARED_INTR) | ||
229 | BUG(); | ||
230 | map_ptr->intr_list[map_ptr->num_shared_intr++] = intr; | ||
231 | } | ||
232 | } | 181 | } |
233 | 182 | ||
234 | /* Setup Intr Polarity */ | 183 | /* Setup Intr Polarity */ |
@@ -242,39 +191,26 @@ static void __init gic_setup_intr(unsigned int intr, unsigned int cpu, | |||
242 | /* Initialise per-cpu Interrupt software masks */ | 191 | /* Initialise per-cpu Interrupt software masks */ |
243 | if (flags & GIC_FLAG_IPI) | 192 | if (flags & GIC_FLAG_IPI) |
244 | set_bit(intr, pcpu_masks[cpu].pcpu_mask); | 193 | set_bit(intr, pcpu_masks[cpu].pcpu_mask); |
245 | if ((flags & GIC_FLAG_TRANSPARENT) && (cpu_has_veic == 0)) | 194 | if (flags & GIC_FLAG_TRANSPARENT) |
246 | GIC_SET_INTR_MASK(intr); | 195 | GIC_SET_INTR_MASK(intr); |
247 | if (trigtype == GIC_TRIG_EDGE) | 196 | if (trigtype == GIC_TRIG_EDGE) |
248 | gic_irq_flags[intr] |= GIC_TRIG_EDGE; | 197 | gic_irq_flags[intr] |= GIC_IRQ_FLAG_EDGE; |
249 | } | 198 | } |
250 | 199 | ||
251 | static void __init gic_basic_init(int numintrs, int numvpes, | 200 | static void __init gic_basic_init(int numintrs, int numvpes, |
252 | struct gic_intr_map *intrmap, int mapsize) | 201 | struct gic_intr_map *intrmap, int mapsize) |
253 | { | 202 | { |
254 | unsigned int i, cpu; | 203 | unsigned int i, cpu; |
255 | unsigned int pin_offset = 0; | ||
256 | |||
257 | board_bind_eic_interrupt = &gic_bind_eic_interrupt; | ||
258 | 204 | ||
259 | /* Setup defaults */ | 205 | /* Setup defaults */ |
260 | for (i = 0; i < numintrs; i++) { | 206 | for (i = 0; i < numintrs; i++) { |
261 | GIC_SET_POLARITY(i, GIC_POL_POS); | 207 | GIC_SET_POLARITY(i, GIC_POL_POS); |
262 | GIC_SET_TRIGGER(i, GIC_TRIG_LEVEL); | 208 | GIC_SET_TRIGGER(i, GIC_TRIG_LEVEL); |
263 | GIC_CLR_INTR_MASK(i); | 209 | GIC_CLR_INTR_MASK(i); |
264 | if (i < GIC_NUM_INTRS) { | 210 | if (i < GIC_NUM_INTRS) |
265 | gic_irq_flags[i] = 0; | 211 | gic_irq_flags[i] = 0; |
266 | gic_shared_intr_map[i].num_shared_intr = 0; | ||
267 | gic_shared_intr_map[i].local_intr_mask = 0; | ||
268 | } | ||
269 | } | 212 | } |
270 | 213 | ||
271 | /* | ||
272 | * In EIC mode, the HW_INT# is offset by (2-1). Need to subtract | ||
273 | * one because the GIC will add one (since 0=no intr). | ||
274 | */ | ||
275 | if (cpu_has_veic) | ||
276 | pin_offset = (GIC_CPU_TO_VEC_OFFSET - GIC_PIN_TO_VEC_OFFSET); | ||
277 | |||
278 | /* Setup specifics */ | 214 | /* Setup specifics */ |
279 | for (i = 0; i < mapsize; i++) { | 215 | for (i = 0; i < mapsize; i++) { |
280 | cpu = intrmap[i].cpunum; | 216 | cpu = intrmap[i].cpunum; |
@@ -284,13 +220,16 @@ static void __init gic_basic_init(int numintrs, int numvpes, | |||
284 | continue; | 220 | continue; |
285 | gic_setup_intr(i, | 221 | gic_setup_intr(i, |
286 | intrmap[i].cpunum, | 222 | intrmap[i].cpunum, |
287 | intrmap[i].pin + pin_offset, | 223 | intrmap[i].pin, |
288 | intrmap[i].polarity, | 224 | intrmap[i].polarity, |
289 | intrmap[i].trigtype, | 225 | intrmap[i].trigtype, |
290 | intrmap[i].flags); | 226 | intrmap[i].flags); |
291 | } | 227 | } |
292 | 228 | ||
293 | vpe_local_setup(numvpes); | 229 | vpe_local_setup(numvpes); |
230 | |||
231 | for (i = _irqbase; i < (_irqbase + numintrs); i++) | ||
232 | irq_set_chip(i, &gic_irq_controller); | ||
294 | } | 233 | } |
295 | 234 | ||
296 | void __init gic_init(unsigned long gic_base_addr, | 235 | void __init gic_init(unsigned long gic_base_addr, |
@@ -303,7 +242,7 @@ void __init gic_init(unsigned long gic_base_addr, | |||
303 | 242 | ||
304 | _gic_base = (unsigned long) ioremap_nocache(gic_base_addr, | 243 | _gic_base = (unsigned long) ioremap_nocache(gic_base_addr, |
305 | gic_addrspace_size); | 244 | gic_addrspace_size); |
306 | gic_irq_base = irqbase; | 245 | _irqbase = irqbase; |
307 | 246 | ||
308 | GICREAD(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig); | 247 | GICREAD(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig); |
309 | numintrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >> | 248 | numintrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >> |
@@ -312,9 +251,8 @@ void __init gic_init(unsigned long gic_base_addr, | |||
312 | 251 | ||
313 | numvpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >> | 252 | numvpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >> |
314 | GIC_SH_CONFIG_NUMVPES_SHF; | 253 | GIC_SH_CONFIG_NUMVPES_SHF; |
315 | numvpes = numvpes + 1; | ||
316 | 254 | ||
317 | gic_basic_init(numintrs, numvpes, intr_map, intr_map_size); | 255 | pr_debug("%s called\n", __func__); |
318 | 256 | ||
319 | gic_platform_init(numintrs, &gic_irq_controller); | 257 | gic_basic_init(numintrs, numvpes, intr_map, intr_map_size); |
320 | } | 258 | } |
diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c index 14ac52c5ae8..0c6afeed89d 100644 --- a/arch/mips/kernel/irq-msc01.c +++ b/arch/mips/kernel/irq-msc01.c | |||
@@ -9,6 +9,7 @@ | |||
9 | * | 9 | * |
10 | * Copyright (C) 2004, 06 Ralf Baechle <ralf@linux-mips.org> | 10 | * Copyright (C) 2004, 06 Ralf Baechle <ralf@linux-mips.org> |
11 | */ | 11 | */ |
12 | #include <linux/module.h> | ||
12 | #include <linux/interrupt.h> | 13 | #include <linux/interrupt.h> |
13 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
14 | #include <linux/sched.h> | 15 | #include <linux/sched.h> |
diff --git a/arch/mips/kernel/irq-rm7000.c b/arch/mips/kernel/irq-rm7000.c index b0662cf97ea..a8a8977d588 100644 --- a/arch/mips/kernel/irq-rm7000.c +++ b/arch/mips/kernel/irq-rm7000.c | |||
@@ -16,6 +16,7 @@ | |||
16 | 16 | ||
17 | #include <asm/irq_cpu.h> | 17 | #include <asm/irq_cpu.h> |
18 | #include <asm/mipsregs.h> | 18 | #include <asm/mipsregs.h> |
19 | #include <asm/system.h> | ||
19 | 20 | ||
20 | static inline void unmask_rm7k_irq(struct irq_data *d) | 21 | static inline void unmask_rm7k_irq(struct irq_data *d) |
21 | { | 22 | { |
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c index a5aa43d07c8..b53970d8099 100644 --- a/arch/mips/kernel/irq.c +++ b/arch/mips/kernel/irq.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
14 | #include <linux/interrupt.h> | 14 | #include <linux/interrupt.h> |
15 | #include <linux/kernel_stat.h> | 15 | #include <linux/kernel_stat.h> |
16 | #include <linux/module.h> | ||
16 | #include <linux/proc_fs.h> | 17 | #include <linux/proc_fs.h> |
17 | #include <linux/mm.h> | 18 | #include <linux/mm.h> |
18 | #include <linux/random.h> | 19 | #include <linux/random.h> |
@@ -23,6 +24,7 @@ | |||
23 | #include <linux/ftrace.h> | 24 | #include <linux/ftrace.h> |
24 | 25 | ||
25 | #include <linux/atomic.h> | 26 | #include <linux/atomic.h> |
27 | #include <asm/system.h> | ||
26 | #include <asm/uaccess.h> | 28 | #include <asm/uaccess.h> |
27 | 29 | ||
28 | #ifdef CONFIG_KGDB | 30 | #ifdef CONFIG_KGDB |
diff --git a/arch/mips/kernel/irq_cpu.c b/arch/mips/kernel/irq_cpu.c index 972263bcf40..191eb52228c 100644 --- a/arch/mips/kernel/irq_cpu.c +++ b/arch/mips/kernel/irq_cpu.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <asm/irq_cpu.h> | 35 | #include <asm/irq_cpu.h> |
36 | #include <asm/mipsregs.h> | 36 | #include <asm/mipsregs.h> |
37 | #include <asm/mipsmtregs.h> | 37 | #include <asm/mipsmtregs.h> |
38 | #include <asm/system.h> | ||
38 | 39 | ||
39 | static inline void unmask_mips_irq(struct irq_data *d) | 40 | static inline void unmask_mips_irq(struct irq_data *d) |
40 | { | 41 | { |
diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c index 23817a6e32b..f4546e97c60 100644 --- a/arch/mips/kernel/kgdb.c +++ b/arch/mips/kernel/kgdb.c | |||
@@ -283,15 +283,6 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd, | |||
283 | struct pt_regs *regs = args->regs; | 283 | struct pt_regs *regs = args->regs; |
284 | int trap = (regs->cp0_cause & 0x7c) >> 2; | 284 | int trap = (regs->cp0_cause & 0x7c) >> 2; |
285 | 285 | ||
286 | #ifdef CONFIG_KPROBES | ||
287 | /* | ||
288 | * Return immediately if the kprobes fault notifier has set | ||
289 | * DIE_PAGE_FAULT. | ||
290 | */ | ||
291 | if (cmd == DIE_PAGE_FAULT) | ||
292 | return NOTIFY_DONE; | ||
293 | #endif /* CONFIG_KPROBES */ | ||
294 | |||
295 | /* Userspace events, ignore. */ | 286 | /* Userspace events, ignore. */ |
296 | if (user_mode(regs)) | 287 | if (user_mode(regs)) |
297 | return NOTIFY_DONE; | 288 | return NOTIFY_DONE; |
diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c index 158467da9bc..ee28683fc2a 100644 --- a/arch/mips/kernel/kprobes.c +++ b/arch/mips/kernel/kprobes.c | |||
@@ -25,12 +25,10 @@ | |||
25 | 25 | ||
26 | #include <linux/kprobes.h> | 26 | #include <linux/kprobes.h> |
27 | #include <linux/preempt.h> | 27 | #include <linux/preempt.h> |
28 | #include <linux/uaccess.h> | ||
29 | #include <linux/kdebug.h> | 28 | #include <linux/kdebug.h> |
30 | #include <linux/slab.h> | 29 | #include <linux/slab.h> |
31 | 30 | ||
32 | #include <asm/ptrace.h> | 31 | #include <asm/ptrace.h> |
33 | #include <asm/branch.h> | ||
34 | #include <asm/break.h> | 32 | #include <asm/break.h> |
35 | #include <asm/inst.h> | 33 | #include <asm/inst.h> |
36 | 34 | ||
@@ -114,49 +112,17 @@ insn_ok: | |||
114 | return 0; | 112 | return 0; |
115 | } | 113 | } |
116 | 114 | ||
117 | /* | ||
118 | * insn_has_ll_or_sc function checks whether instruction is ll or sc | ||
119 | * one; putting breakpoint on top of atomic ll/sc pair is bad idea; | ||
120 | * so we need to prevent it and refuse kprobes insertion for such | ||
121 | * instructions; cannot do much about breakpoint in the middle of | ||
122 | * ll/sc pair; it is upto user to avoid those places | ||
123 | */ | ||
124 | static int __kprobes insn_has_ll_or_sc(union mips_instruction insn) | ||
125 | { | ||
126 | int ret = 0; | ||
127 | |||
128 | switch (insn.i_format.opcode) { | ||
129 | case ll_op: | ||
130 | case lld_op: | ||
131 | case sc_op: | ||
132 | case scd_op: | ||
133 | ret = 1; | ||
134 | break; | ||
135 | default: | ||
136 | break; | ||
137 | } | ||
138 | return ret; | ||
139 | } | ||
140 | |||
141 | int __kprobes arch_prepare_kprobe(struct kprobe *p) | 115 | int __kprobes arch_prepare_kprobe(struct kprobe *p) |
142 | { | 116 | { |
143 | union mips_instruction insn; | 117 | union mips_instruction insn; |
144 | union mips_instruction prev_insn; | 118 | union mips_instruction prev_insn; |
145 | int ret = 0; | 119 | int ret = 0; |
146 | 120 | ||
121 | prev_insn = p->addr[-1]; | ||
147 | insn = p->addr[0]; | 122 | insn = p->addr[0]; |
148 | 123 | ||
149 | if (insn_has_ll_or_sc(insn)) { | 124 | if (insn_has_delayslot(insn) || insn_has_delayslot(prev_insn)) { |
150 | pr_notice("Kprobes for ll and sc instructions are not" | 125 | pr_notice("Kprobes for branch and jump instructions are not supported\n"); |
151 | "supported\n"); | ||
152 | ret = -EINVAL; | ||
153 | goto out; | ||
154 | } | ||
155 | |||
156 | if ((probe_kernel_read(&prev_insn, p->addr - 1, | ||
157 | sizeof(mips_instruction)) == 0) && | ||
158 | insn_has_delayslot(prev_insn)) { | ||
159 | pr_notice("Kprobes for branch delayslot are not supported\n"); | ||
160 | ret = -EINVAL; | 126 | ret = -EINVAL; |
161 | goto out; | 127 | goto out; |
162 | } | 128 | } |
@@ -172,20 +138,9 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) | |||
172 | * In the kprobe->ainsn.insn[] array we store the original | 138 | * In the kprobe->ainsn.insn[] array we store the original |
173 | * instruction at index zero and a break trap instruction at | 139 | * instruction at index zero and a break trap instruction at |
174 | * index one. | 140 | * index one. |
175 | * | ||
176 | * On MIPS arch if the instruction at probed address is a | ||
177 | * branch instruction, we need to execute the instruction at | ||
178 | * Branch Delayslot (BD) at the time of probe hit. As MIPS also | ||
179 | * doesn't have single stepping support, the BD instruction can | ||
180 | * not be executed in-line and it would be executed on SSOL slot | ||
181 | * using a normal breakpoint instruction in the next slot. | ||
182 | * So, read the instruction and save it for later execution. | ||
183 | */ | 141 | */ |
184 | if (insn_has_delayslot(insn)) | ||
185 | memcpy(&p->ainsn.insn[0], p->addr + 1, sizeof(kprobe_opcode_t)); | ||
186 | else | ||
187 | memcpy(&p->ainsn.insn[0], p->addr, sizeof(kprobe_opcode_t)); | ||
188 | 142 | ||
143 | memcpy(&p->ainsn.insn[0], p->addr, sizeof(kprobe_opcode_t)); | ||
189 | p->ainsn.insn[1] = breakpoint2_insn; | 144 | p->ainsn.insn[1] = breakpoint2_insn; |
190 | p->opcode = *p->addr; | 145 | p->opcode = *p->addr; |
191 | 146 | ||
@@ -236,96 +191,16 @@ static void set_current_kprobe(struct kprobe *p, struct pt_regs *regs, | |||
236 | kcb->kprobe_saved_epc = regs->cp0_epc; | 191 | kcb->kprobe_saved_epc = regs->cp0_epc; |
237 | } | 192 | } |
238 | 193 | ||
239 | /** | 194 | static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) |
240 | * evaluate_branch_instrucion - | ||
241 | * | ||
242 | * Evaluate the branch instruction at probed address during probe hit. The | ||
243 | * result of evaluation would be the updated epc. The insturction in delayslot | ||
244 | * would actually be single stepped using a normal breakpoint) on SSOL slot. | ||
245 | * | ||
246 | * The result is also saved in the kprobe control block for later use, | ||
247 | * in case we need to execute the delayslot instruction. The latter will be | ||
248 | * false for NOP instruction in dealyslot and the branch-likely instructions | ||
249 | * when the branch is taken. And for those cases we set a flag as | ||
250 | * SKIP_DELAYSLOT in the kprobe control block | ||
251 | */ | ||
252 | static int evaluate_branch_instruction(struct kprobe *p, struct pt_regs *regs, | ||
253 | struct kprobe_ctlblk *kcb) | ||
254 | { | 195 | { |
255 | union mips_instruction insn = p->opcode; | ||
256 | long epc; | ||
257 | int ret = 0; | ||
258 | |||
259 | epc = regs->cp0_epc; | ||
260 | if (epc & 3) | ||
261 | goto unaligned; | ||
262 | |||
263 | if (p->ainsn.insn->word == 0) | ||
264 | kcb->flags |= SKIP_DELAYSLOT; | ||
265 | else | ||
266 | kcb->flags &= ~SKIP_DELAYSLOT; | ||
267 | |||
268 | ret = __compute_return_epc_for_insn(regs, insn); | ||
269 | if (ret < 0) | ||
270 | return ret; | ||
271 | |||
272 | if (ret == BRANCH_LIKELY_TAKEN) | ||
273 | kcb->flags |= SKIP_DELAYSLOT; | ||
274 | |||
275 | kcb->target_epc = regs->cp0_epc; | ||
276 | |||
277 | return 0; | ||
278 | |||
279 | unaligned: | ||
280 | pr_notice("%s: unaligned epc - sending SIGBUS.\n", current->comm); | ||
281 | force_sig(SIGBUS, current); | ||
282 | return -EFAULT; | ||
283 | |||
284 | } | ||
285 | |||
286 | static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs, | ||
287 | struct kprobe_ctlblk *kcb) | ||
288 | { | ||
289 | int ret = 0; | ||
290 | |||
291 | regs->cp0_status &= ~ST0_IE; | 196 | regs->cp0_status &= ~ST0_IE; |
292 | 197 | ||
293 | /* single step inline if the instruction is a break */ | 198 | /* single step inline if the instruction is a break */ |
294 | if (p->opcode.word == breakpoint_insn.word || | 199 | if (p->opcode.word == breakpoint_insn.word || |
295 | p->opcode.word == breakpoint2_insn.word) | 200 | p->opcode.word == breakpoint2_insn.word) |
296 | regs->cp0_epc = (unsigned long)p->addr; | 201 | regs->cp0_epc = (unsigned long)p->addr; |
297 | else if (insn_has_delayslot(p->opcode)) { | 202 | else |
298 | ret = evaluate_branch_instruction(p, regs, kcb); | 203 | regs->cp0_epc = (unsigned long)&p->ainsn.insn[0]; |
299 | if (ret < 0) { | ||
300 | pr_notice("Kprobes: Error in evaluating branch\n"); | ||
301 | return; | ||
302 | } | ||
303 | } | ||
304 | regs->cp0_epc = (unsigned long)&p->ainsn.insn[0]; | ||
305 | } | ||
306 | |||
307 | /* | ||
308 | * Called after single-stepping. p->addr is the address of the | ||
309 | * instruction whose first byte has been replaced by the "break 0" | ||
310 | * instruction. To avoid the SMP problems that can occur when we | ||
311 | * temporarily put back the original opcode to single-step, we | ||
312 | * single-stepped a copy of the instruction. The address of this | ||
313 | * copy is p->ainsn.insn. | ||
314 | * | ||
315 | * This function prepares to return from the post-single-step | ||
316 | * breakpoint trap. In case of branch instructions, the target | ||
317 | * epc to be restored. | ||
318 | */ | ||
319 | static void __kprobes resume_execution(struct kprobe *p, | ||
320 | struct pt_regs *regs, | ||
321 | struct kprobe_ctlblk *kcb) | ||
322 | { | ||
323 | if (insn_has_delayslot(p->opcode)) | ||
324 | regs->cp0_epc = kcb->target_epc; | ||
325 | else { | ||
326 | unsigned long orig_epc = kcb->kprobe_saved_epc; | ||
327 | regs->cp0_epc = orig_epc + 4; | ||
328 | } | ||
329 | } | 204 | } |
330 | 205 | ||
331 | static int __kprobes kprobe_handler(struct pt_regs *regs) | 206 | static int __kprobes kprobe_handler(struct pt_regs *regs) |
@@ -364,13 +239,8 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) | |||
364 | save_previous_kprobe(kcb); | 239 | save_previous_kprobe(kcb); |
365 | set_current_kprobe(p, regs, kcb); | 240 | set_current_kprobe(p, regs, kcb); |
366 | kprobes_inc_nmissed_count(p); | 241 | kprobes_inc_nmissed_count(p); |
367 | prepare_singlestep(p, regs, kcb); | 242 | prepare_singlestep(p, regs); |
368 | kcb->kprobe_status = KPROBE_REENTER; | 243 | kcb->kprobe_status = KPROBE_REENTER; |
369 | if (kcb->flags & SKIP_DELAYSLOT) { | ||
370 | resume_execution(p, regs, kcb); | ||
371 | restore_previous_kprobe(kcb); | ||
372 | preempt_enable_no_resched(); | ||
373 | } | ||
374 | return 1; | 244 | return 1; |
375 | } else { | 245 | } else { |
376 | if (addr->word != breakpoint_insn.word) { | 246 | if (addr->word != breakpoint_insn.word) { |
@@ -414,16 +284,8 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) | |||
414 | } | 284 | } |
415 | 285 | ||
416 | ss_probe: | 286 | ss_probe: |
417 | prepare_singlestep(p, regs, kcb); | 287 | prepare_singlestep(p, regs); |
418 | if (kcb->flags & SKIP_DELAYSLOT) { | 288 | kcb->kprobe_status = KPROBE_HIT_SS; |
419 | kcb->kprobe_status = KPROBE_HIT_SSDONE; | ||
420 | if (p->post_handler) | ||
421 | p->post_handler(p, regs, 0); | ||
422 | resume_execution(p, regs, kcb); | ||
423 | preempt_enable_no_resched(); | ||
424 | } else | ||
425 | kcb->kprobe_status = KPROBE_HIT_SS; | ||
426 | |||
427 | return 1; | 289 | return 1; |
428 | 290 | ||
429 | no_kprobe: | 291 | no_kprobe: |
@@ -432,6 +294,25 @@ no_kprobe: | |||
432 | 294 | ||
433 | } | 295 | } |
434 | 296 | ||
297 | /* | ||
298 | * Called after single-stepping. p->addr is the address of the | ||
299 | * instruction whose first byte has been replaced by the "break 0" | ||
300 | * instruction. To avoid the SMP problems that can occur when we | ||
301 | * temporarily put back the original opcode to single-step, we | ||
302 | * single-stepped a copy of the instruction. The address of this | ||
303 | * copy is p->ainsn.insn. | ||
304 | * | ||
305 | * This function prepares to return from the post-single-step | ||
306 | * breakpoint trap. | ||
307 | */ | ||
308 | static void __kprobes resume_execution(struct kprobe *p, | ||
309 | struct pt_regs *regs, | ||
310 | struct kprobe_ctlblk *kcb) | ||
311 | { | ||
312 | unsigned long orig_epc = kcb->kprobe_saved_epc; | ||
313 | regs->cp0_epc = orig_epc + 4; | ||
314 | } | ||
315 | |||
435 | static inline int post_kprobe_handler(struct pt_regs *regs) | 316 | static inline int post_kprobe_handler(struct pt_regs *regs) |
436 | { | 317 | { |
437 | struct kprobe *cur = kprobe_running(); | 318 | struct kprobe *cur = kprobe_running(); |
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c index 7adab86c632..922a554cd10 100644 --- a/arch/mips/kernel/linux32.c +++ b/arch/mips/kernel/linux32.c | |||
@@ -3,6 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Copyright (C) 2000 Silicon Graphics, Inc. | 4 | * Copyright (C) 2000 Silicon Graphics, Inc. |
5 | * Written by Ulf Carlsson (ulfc@engr.sgi.com) | 5 | * Written by Ulf Carlsson (ulfc@engr.sgi.com) |
6 | * sys32_execve from ia64/ia32 code, Feb 2000, Kanoj Sarcar (kanoj@sgi.com) | ||
6 | */ | 7 | */ |
7 | #include <linux/compiler.h> | 8 | #include <linux/compiler.h> |
8 | #include <linux/mm.h> | 9 | #include <linux/mm.h> |
@@ -76,6 +77,26 @@ out: | |||
76 | return error; | 77 | return error; |
77 | } | 78 | } |
78 | 79 | ||
80 | /* | ||
81 | * sys_execve() executes a new program. | ||
82 | */ | ||
83 | asmlinkage int sys32_execve(nabi_no_regargs struct pt_regs regs) | ||
84 | { | ||
85 | int error; | ||
86 | char * filename; | ||
87 | |||
88 | filename = getname(compat_ptr(regs.regs[4])); | ||
89 | error = PTR_ERR(filename); | ||
90 | if (IS_ERR(filename)) | ||
91 | goto out; | ||
92 | error = compat_do_execve(filename, compat_ptr(regs.regs[5]), | ||
93 | compat_ptr(regs.regs[6]), ®s); | ||
94 | putname(filename); | ||
95 | |||
96 | out: | ||
97 | return error; | ||
98 | } | ||
99 | |||
79 | #define RLIM_INFINITY32 0x7fffffff | 100 | #define RLIM_INFINITY32 0x7fffffff |
80 | #define RESOURCE32(x) ((x > RLIM_INFINITY32) ? RLIM_INFINITY32 : x) | 101 | #define RESOURCE32(x) ((x > RLIM_INFINITY32) ? RLIM_INFINITY32 : x) |
81 | 102 | ||
@@ -312,7 +333,7 @@ _sys32_clone(nabi_no_regargs struct pt_regs regs) | |||
312 | /* Use __dummy4 instead of getting it off the stack, so that | 333 | /* Use __dummy4 instead of getting it off the stack, so that |
313 | syscall() works. */ | 334 | syscall() works. */ |
314 | child_tidptr = (int __user *) __dummy4; | 335 | child_tidptr = (int __user *) __dummy4; |
315 | return do_fork(clone_flags, newsp, 0, | 336 | return do_fork(clone_flags, newsp, ®s, 0, |
316 | parent_tidptr, child_tidptr); | 337 | parent_tidptr, child_tidptr); |
317 | } | 338 | } |
318 | 339 | ||
diff --git a/arch/mips/kernel/machine_kexec.c b/arch/mips/kernel/machine_kexec.c index 992e18474da..85beb9b0b2d 100644 --- a/arch/mips/kernel/machine_kexec.c +++ b/arch/mips/kernel/machine_kexec.c | |||
@@ -5,7 +5,7 @@ | |||
5 | * This source code is licensed under the GNU General Public License, | 5 | * This source code is licensed under the GNU General Public License, |
6 | * Version 2. See the file COPYING for more details. | 6 | * Version 2. See the file COPYING for more details. |
7 | */ | 7 | */ |
8 | #include <linux/compiler.h> | 8 | |
9 | #include <linux/kexec.h> | 9 | #include <linux/kexec.h> |
10 | #include <linux/mm.h> | 10 | #include <linux/mm.h> |
11 | #include <linux/delay.h> | 11 | #include <linux/delay.h> |
@@ -19,19 +19,9 @@ extern const size_t relocate_new_kernel_size; | |||
19 | extern unsigned long kexec_start_address; | 19 | extern unsigned long kexec_start_address; |
20 | extern unsigned long kexec_indirection_page; | 20 | extern unsigned long kexec_indirection_page; |
21 | 21 | ||
22 | int (*_machine_kexec_prepare)(struct kimage *) = NULL; | ||
23 | void (*_machine_kexec_shutdown)(void) = NULL; | ||
24 | void (*_machine_crash_shutdown)(struct pt_regs *regs) = NULL; | ||
25 | #ifdef CONFIG_SMP | ||
26 | void (*relocated_kexec_smp_wait) (void *); | ||
27 | atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0); | ||
28 | #endif | ||
29 | |||
30 | int | 22 | int |
31 | machine_kexec_prepare(struct kimage *kimage) | 23 | machine_kexec_prepare(struct kimage *kimage) |
32 | { | 24 | { |
33 | if (_machine_kexec_prepare) | ||
34 | return _machine_kexec_prepare(kimage); | ||
35 | return 0; | 25 | return 0; |
36 | } | 26 | } |
37 | 27 | ||
@@ -43,20 +33,14 @@ machine_kexec_cleanup(struct kimage *kimage) | |||
43 | void | 33 | void |
44 | machine_shutdown(void) | 34 | machine_shutdown(void) |
45 | { | 35 | { |
46 | if (_machine_kexec_shutdown) | ||
47 | _machine_kexec_shutdown(); | ||
48 | } | 36 | } |
49 | 37 | ||
50 | void | 38 | void |
51 | machine_crash_shutdown(struct pt_regs *regs) | 39 | machine_crash_shutdown(struct pt_regs *regs) |
52 | { | 40 | { |
53 | if (_machine_crash_shutdown) | ||
54 | _machine_crash_shutdown(regs); | ||
55 | else | ||
56 | default_machine_crash_shutdown(regs); | ||
57 | } | 41 | } |
58 | 42 | ||
59 | typedef void (*noretfun_t)(void) __noreturn; | 43 | typedef void (*noretfun_t)(void) __attribute__((noreturn)); |
60 | 44 | ||
61 | void | 45 | void |
62 | machine_kexec(struct kimage *image) | 46 | machine_kexec(struct kimage *image) |
@@ -68,9 +52,7 @@ machine_kexec(struct kimage *image) | |||
68 | reboot_code_buffer = | 52 | reboot_code_buffer = |
69 | (unsigned long)page_address(image->control_code_page); | 53 | (unsigned long)page_address(image->control_code_page); |
70 | 54 | ||
71 | kexec_start_address = | 55 | kexec_start_address = image->start; |
72 | (unsigned long) phys_to_virt(image->start); | ||
73 | |||
74 | kexec_indirection_page = | 56 | kexec_indirection_page = |
75 | (unsigned long) phys_to_virt(image->head & PAGE_MASK); | 57 | (unsigned long) phys_to_virt(image->head & PAGE_MASK); |
76 | 58 | ||
@@ -81,7 +63,7 @@ machine_kexec(struct kimage *image) | |||
81 | * The generic kexec code builds a page list with physical | 63 | * The generic kexec code builds a page list with physical |
82 | * addresses. they are directly accessible through KSEG0 (or | 64 | * addresses. they are directly accessible through KSEG0 (or |
83 | * CKSEG0 or XPHYS if on 64bit system), hence the | 65 | * CKSEG0 or XPHYS if on 64bit system), hence the |
84 | * phys_to_virt() call. | 66 | * pys_to_virt() call. |
85 | */ | 67 | */ |
86 | for (ptr = &image->head; (entry = *ptr) && !(entry &IND_DONE); | 68 | for (ptr = &image->head; (entry = *ptr) && !(entry &IND_DONE); |
87 | ptr = (entry & IND_INDIRECTION) ? | 69 | ptr = (entry & IND_INDIRECTION) ? |
@@ -99,12 +81,5 @@ machine_kexec(struct kimage *image) | |||
99 | printk("Will call new kernel at %08lx\n", image->start); | 81 | printk("Will call new kernel at %08lx\n", image->start); |
100 | printk("Bye ...\n"); | 82 | printk("Bye ...\n"); |
101 | __flush_cache_all(); | 83 | __flush_cache_all(); |
102 | #ifdef CONFIG_SMP | ||
103 | /* All secondary cpus now may jump to kexec_wait cycle */ | ||
104 | relocated_kexec_smp_wait = reboot_code_buffer + | ||
105 | (void *)(kexec_smp_wait - relocate_new_kernel); | ||
106 | smp_wmb(); | ||
107 | atomic_set(&kexec_ready_to_reboot, 1); | ||
108 | #endif | ||
109 | ((noretfun_t) reboot_code_buffer)(); | 84 | ((noretfun_t) reboot_code_buffer)(); |
110 | } | 85 | } |
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c index fd814e08c94..802e6160f37 100644 --- a/arch/mips/kernel/mips-mt-fpaff.c +++ b/arch/mips/kernel/mips-mt-fpaff.c | |||
@@ -50,8 +50,8 @@ static bool check_same_owner(struct task_struct *p) | |||
50 | 50 | ||
51 | rcu_read_lock(); | 51 | rcu_read_lock(); |
52 | pcred = __task_cred(p); | 52 | pcred = __task_cred(p); |
53 | match = (uid_eq(cred->euid, pcred->euid) || | 53 | match = (cred->euid == pcred->euid || |
54 | uid_eq(cred->euid, pcred->uid)); | 54 | cred->euid == pcred->uid); |
55 | rcu_read_unlock(); | 55 | rcu_read_unlock(); |
56 | return match; | 56 | return match; |
57 | } | 57 | } |
@@ -173,7 +173,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len, | |||
173 | if (retval) | 173 | if (retval) |
174 | goto out_unlock; | 174 | goto out_unlock; |
175 | 175 | ||
176 | cpumask_and(&mask, &p->thread.user_cpus_allowed, cpu_possible_mask); | 176 | cpus_and(mask, p->thread.user_cpus_allowed, cpu_possible_map); |
177 | 177 | ||
178 | out_unlock: | 178 | out_unlock: |
179 | read_unlock(&tasklist_lock); | 179 | read_unlock(&tasklist_lock); |
diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c index 6ded9bd1489..594ca69cb86 100644 --- a/arch/mips/kernel/mips-mt.c +++ b/arch/mips/kernel/mips-mt.c | |||
@@ -6,13 +6,14 @@ | |||
6 | #include <linux/device.h> | 6 | #include <linux/device.h> |
7 | #include <linux/kernel.h> | 7 | #include <linux/kernel.h> |
8 | #include <linux/sched.h> | 8 | #include <linux/sched.h> |
9 | #include <linux/export.h> | 9 | #include <linux/module.h> |
10 | #include <linux/interrupt.h> | 10 | #include <linux/interrupt.h> |
11 | #include <linux/security.h> | 11 | #include <linux/security.h> |
12 | 12 | ||
13 | #include <asm/cpu.h> | 13 | #include <asm/cpu.h> |
14 | #include <asm/processor.h> | 14 | #include <asm/processor.h> |
15 | #include <linux/atomic.h> | 15 | #include <linux/atomic.h> |
16 | #include <asm/system.h> | ||
16 | #include <asm/hardirq.h> | 17 | #include <asm/hardirq.h> |
17 | #include <asm/mmu_context.h> | 18 | #include <asm/mmu_context.h> |
18 | #include <asm/mipsmtregs.h> | 19 | #include <asm/mipsmtregs.h> |
@@ -209,7 +210,7 @@ void mips_mt_set_cpuoptions(void) | |||
209 | unsigned int nconfig7 = oconfig7; | 210 | unsigned int nconfig7 = oconfig7; |
210 | 211 | ||
211 | if (mt_opt_norps) { | 212 | if (mt_opt_norps) { |
212 | printk("\"norps\" option deprecated: use \"rpsctl=\"\n"); | 213 | printk("\"norps\" option deprectated: use \"rpsctl=\"\n"); |
213 | } | 214 | } |
214 | if (mt_opt_rpsctl >= 0) { | 215 | if (mt_opt_rpsctl >= 0) { |
215 | printk("34K return prediction stack override set to %d.\n", | 216 | printk("34K return prediction stack override set to %d.\n", |
diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c index df1e3e455f9..1d04807874d 100644 --- a/arch/mips/kernel/mips_ksyms.c +++ b/arch/mips/kernel/mips_ksyms.c | |||
@@ -5,13 +5,13 @@ | |||
5 | * License. See the file "COPYING" in the main directory of this archive | 5 | * License. See the file "COPYING" in the main directory of this archive |
6 | * for more details. | 6 | * for more details. |
7 | * | 7 | * |
8 | * Copyright (C) 1996, 97, 98, 99, 2000, 01, 03, 04, 05, 12 by Ralf Baechle | 8 | * Copyright (C) 1996, 97, 98, 99, 2000, 01, 03, 04, 05 by Ralf Baechle |
9 | * Copyright (C) 1999, 2000, 01 Silicon Graphics, Inc. | 9 | * Copyright (C) 1999, 2000, 01 Silicon Graphics, Inc. |
10 | */ | 10 | */ |
11 | #include <linux/interrupt.h> | 11 | #include <linux/interrupt.h> |
12 | #include <linux/export.h> | 12 | #include <linux/module.h> |
13 | #include <asm/checksum.h> | 13 | #include <asm/checksum.h> |
14 | #include <linux/mm.h> | 14 | #include <asm/pgtable.h> |
15 | #include <asm/uaccess.h> | 15 | #include <asm/uaccess.h> |
16 | #include <asm/ftrace.h> | 16 | #include <asm/ftrace.h> |
17 | 17 | ||
@@ -32,11 +32,7 @@ EXPORT_SYMBOL(memset); | |||
32 | EXPORT_SYMBOL(memcpy); | 32 | EXPORT_SYMBOL(memcpy); |
33 | EXPORT_SYMBOL(memmove); | 33 | EXPORT_SYMBOL(memmove); |
34 | 34 | ||
35 | /* | 35 | EXPORT_SYMBOL(kernel_thread); |
36 | * Functions that operate on entire pages. Mostly used by memory management. | ||
37 | */ | ||
38 | EXPORT_SYMBOL(clear_page); | ||
39 | EXPORT_SYMBOL(copy_page); | ||
40 | 36 | ||
41 | /* | 37 | /* |
42 | * Userspace access stuff. | 38 | * Userspace access stuff. |
diff --git a/arch/mips/kernel/module-rela.c b/arch/mips/kernel/module-rela.c deleted file mode 100644 index 61d60028b88..00000000000 --- a/arch/mips/kernel/module-rela.c +++ /dev/null | |||
@@ -1,145 +0,0 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License as published by | ||
4 | * the Free Software Foundation; either version 2 of the License, or | ||
5 | * (at your option) any later version. | ||
6 | * | ||
7 | * This program is distributed in the hope that it will be useful, | ||
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
10 | * GNU General Public License for more details. | ||
11 | * | ||
12 | * You should have received a copy of the GNU General Public License | ||
13 | * along with this program; if not, write to the Free Software | ||
14 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
15 | * | ||
16 | * Copyright (C) 2001 Rusty Russell. | ||
17 | * Copyright (C) 2003, 2004 Ralf Baechle (ralf@linux-mips.org) | ||
18 | * Copyright (C) 2005 Thiemo Seufer | ||
19 | */ | ||
20 | |||
21 | #include <linux/elf.h> | ||
22 | #include <linux/err.h> | ||
23 | #include <linux/errno.h> | ||
24 | #include <linux/moduleloader.h> | ||
25 | |||
26 | extern int apply_r_mips_none(struct module *me, u32 *location, Elf_Addr v); | ||
27 | |||
28 | static int apply_r_mips_32_rela(struct module *me, u32 *location, Elf_Addr v) | ||
29 | { | ||
30 | *location = v; | ||
31 | |||
32 | return 0; | ||
33 | } | ||
34 | |||
35 | static int apply_r_mips_26_rela(struct module *me, u32 *location, Elf_Addr v) | ||
36 | { | ||
37 | if (v % 4) { | ||
38 | pr_err("module %s: dangerous R_MIPS_26 RELArelocation\n", | ||
39 | me->name); | ||
40 | return -ENOEXEC; | ||
41 | } | ||
42 | |||
43 | if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) { | ||
44 | printk(KERN_ERR | ||
45 | "module %s: relocation overflow\n", | ||
46 | me->name); | ||
47 | return -ENOEXEC; | ||
48 | } | ||
49 | |||
50 | *location = (*location & ~0x03ffffff) | ((v >> 2) & 0x03ffffff); | ||
51 | |||
52 | return 0; | ||
53 | } | ||
54 | |||
55 | static int apply_r_mips_hi16_rela(struct module *me, u32 *location, Elf_Addr v) | ||
56 | { | ||
57 | *location = (*location & 0xffff0000) | | ||
58 | ((((long long) v + 0x8000LL) >> 16) & 0xffff); | ||
59 | |||
60 | return 0; | ||
61 | } | ||
62 | |||
63 | static int apply_r_mips_lo16_rela(struct module *me, u32 *location, Elf_Addr v) | ||
64 | { | ||
65 | *location = (*location & 0xffff0000) | (v & 0xffff); | ||
66 | |||
67 | return 0; | ||
68 | } | ||
69 | |||
70 | static int apply_r_mips_64_rela(struct module *me, u32 *location, Elf_Addr v) | ||
71 | { | ||
72 | *(Elf_Addr *)location = v; | ||
73 | |||
74 | return 0; | ||
75 | } | ||
76 | |||
77 | static int apply_r_mips_higher_rela(struct module *me, u32 *location, | ||
78 | Elf_Addr v) | ||
79 | { | ||
80 | *location = (*location & 0xffff0000) | | ||
81 | ((((long long) v + 0x80008000LL) >> 32) & 0xffff); | ||
82 | |||
83 | return 0; | ||
84 | } | ||
85 | |||
86 | static int apply_r_mips_highest_rela(struct module *me, u32 *location, | ||
87 | Elf_Addr v) | ||
88 | { | ||
89 | *location = (*location & 0xffff0000) | | ||
90 | ((((long long) v + 0x800080008000LL) >> 48) & 0xffff); | ||
91 | |||
92 | return 0; | ||
93 | } | ||
94 | |||
95 | static int (*reloc_handlers_rela[]) (struct module *me, u32 *location, | ||
96 | Elf_Addr v) = { | ||
97 | [R_MIPS_NONE] = apply_r_mips_none, | ||
98 | [R_MIPS_32] = apply_r_mips_32_rela, | ||
99 | [R_MIPS_26] = apply_r_mips_26_rela, | ||
100 | [R_MIPS_HI16] = apply_r_mips_hi16_rela, | ||
101 | [R_MIPS_LO16] = apply_r_mips_lo16_rela, | ||
102 | [R_MIPS_64] = apply_r_mips_64_rela, | ||
103 | [R_MIPS_HIGHER] = apply_r_mips_higher_rela, | ||
104 | [R_MIPS_HIGHEST] = apply_r_mips_highest_rela | ||
105 | }; | ||
106 | |||
107 | int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, | ||
108 | unsigned int symindex, unsigned int relsec, | ||
109 | struct module *me) | ||
110 | { | ||
111 | Elf_Mips_Rela *rel = (void *) sechdrs[relsec].sh_addr; | ||
112 | Elf_Sym *sym; | ||
113 | u32 *location; | ||
114 | unsigned int i; | ||
115 | Elf_Addr v; | ||
116 | int res; | ||
117 | |||
118 | pr_debug("Applying relocate section %u to %u\n", relsec, | ||
119 | sechdrs[relsec].sh_info); | ||
120 | |||
121 | for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { | ||
122 | /* This is where to make the change */ | ||
123 | location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr | ||
124 | + rel[i].r_offset; | ||
125 | /* This is the symbol it is referring to */ | ||
126 | sym = (Elf_Sym *)sechdrs[symindex].sh_addr | ||
127 | + ELF_MIPS_R_SYM(rel[i]); | ||
128 | if (IS_ERR_VALUE(sym->st_value)) { | ||
129 | /* Ignore unresolved weak symbol */ | ||
130 | if (ELF_ST_BIND(sym->st_info) == STB_WEAK) | ||
131 | continue; | ||
132 | printk(KERN_WARNING "%s: Unknown symbol %s\n", | ||
133 | me->name, strtab + sym->st_name); | ||
134 | return -ENOENT; | ||
135 | } | ||
136 | |||
137 | v = sym->st_value + rel[i].r_addend; | ||
138 | |||
139 | res = reloc_handlers_rela[ELF_MIPS_R_TYPE(rel[i])](me, location, v); | ||
140 | if (res) | ||
141 | return res; | ||
142 | } | ||
143 | |||
144 | return 0; | ||
145 | } | ||
diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c index 07ff5812ffa..4b930ac4aff 100644 --- a/arch/mips/kernel/module.c +++ b/arch/mips/kernel/module.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/fs.h> | 28 | #include <linux/fs.h> |
29 | #include <linux/string.h> | 29 | #include <linux/string.h> |
30 | #include <linux/kernel.h> | 30 | #include <linux/kernel.h> |
31 | #include <linux/module.h> | ||
31 | #include <linux/spinlock.h> | 32 | #include <linux/spinlock.h> |
32 | #include <linux/jump_label.h> | 33 | #include <linux/jump_label.h> |
33 | 34 | ||
@@ -39,6 +40,8 @@ struct mips_hi16 { | |||
39 | Elf_Addr value; | 40 | Elf_Addr value; |
40 | }; | 41 | }; |
41 | 42 | ||
43 | static struct mips_hi16 *mips_hi16_list; | ||
44 | |||
42 | static LIST_HEAD(dbe_list); | 45 | static LIST_HEAD(dbe_list); |
43 | static DEFINE_SPINLOCK(dbe_lock); | 46 | static DEFINE_SPINLOCK(dbe_lock); |
44 | 47 | ||
@@ -51,7 +54,7 @@ void *module_alloc(unsigned long size) | |||
51 | } | 54 | } |
52 | #endif | 55 | #endif |
53 | 56 | ||
54 | int apply_r_mips_none(struct module *me, u32 *location, Elf_Addr v) | 57 | static int apply_r_mips_none(struct module *me, u32 *location, Elf_Addr v) |
55 | { | 58 | { |
56 | return 0; | 59 | return 0; |
57 | } | 60 | } |
@@ -63,6 +66,13 @@ static int apply_r_mips_32_rel(struct module *me, u32 *location, Elf_Addr v) | |||
63 | return 0; | 66 | return 0; |
64 | } | 67 | } |
65 | 68 | ||
69 | static int apply_r_mips_32_rela(struct module *me, u32 *location, Elf_Addr v) | ||
70 | { | ||
71 | *location = v; | ||
72 | |||
73 | return 0; | ||
74 | } | ||
75 | |||
66 | static int apply_r_mips_26_rel(struct module *me, u32 *location, Elf_Addr v) | 76 | static int apply_r_mips_26_rel(struct module *me, u32 *location, Elf_Addr v) |
67 | { | 77 | { |
68 | if (v % 4) { | 78 | if (v % 4) { |
@@ -84,6 +94,26 @@ static int apply_r_mips_26_rel(struct module *me, u32 *location, Elf_Addr v) | |||
84 | return 0; | 94 | return 0; |
85 | } | 95 | } |
86 | 96 | ||
97 | static int apply_r_mips_26_rela(struct module *me, u32 *location, Elf_Addr v) | ||
98 | { | ||
99 | if (v % 4) { | ||
100 | pr_err("module %s: dangerous R_MIPS_26 RELArelocation\n", | ||
101 | me->name); | ||
102 | return -ENOEXEC; | ||
103 | } | ||
104 | |||
105 | if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) { | ||
106 | printk(KERN_ERR | ||
107 | "module %s: relocation overflow\n", | ||
108 | me->name); | ||
109 | return -ENOEXEC; | ||
110 | } | ||
111 | |||
112 | *location = (*location & ~0x03ffffff) | ((v >> 2) & 0x03ffffff); | ||
113 | |||
114 | return 0; | ||
115 | } | ||
116 | |||
87 | static int apply_r_mips_hi16_rel(struct module *me, u32 *location, Elf_Addr v) | 117 | static int apply_r_mips_hi16_rel(struct module *me, u32 *location, Elf_Addr v) |
88 | { | 118 | { |
89 | struct mips_hi16 *n; | 119 | struct mips_hi16 *n; |
@@ -99,34 +129,32 @@ static int apply_r_mips_hi16_rel(struct module *me, u32 *location, Elf_Addr v) | |||
99 | 129 | ||
100 | n->addr = (Elf_Addr *)location; | 130 | n->addr = (Elf_Addr *)location; |
101 | n->value = v; | 131 | n->value = v; |
102 | n->next = me->arch.r_mips_hi16_list; | 132 | n->next = mips_hi16_list; |
103 | me->arch.r_mips_hi16_list = n; | 133 | mips_hi16_list = n; |
104 | 134 | ||
105 | return 0; | 135 | return 0; |
106 | } | 136 | } |
107 | 137 | ||
108 | static void free_relocation_chain(struct mips_hi16 *l) | 138 | static int apply_r_mips_hi16_rela(struct module *me, u32 *location, Elf_Addr v) |
109 | { | 139 | { |
110 | struct mips_hi16 *next; | 140 | *location = (*location & 0xffff0000) | |
141 | ((((long long) v + 0x8000LL) >> 16) & 0xffff); | ||
111 | 142 | ||
112 | while (l) { | 143 | return 0; |
113 | next = l->next; | ||
114 | kfree(l); | ||
115 | l = next; | ||
116 | } | ||
117 | } | 144 | } |
118 | 145 | ||
119 | static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v) | 146 | static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v) |
120 | { | 147 | { |
121 | unsigned long insnlo = *location; | 148 | unsigned long insnlo = *location; |
122 | struct mips_hi16 *l; | ||
123 | Elf_Addr val, vallo; | 149 | Elf_Addr val, vallo; |
124 | 150 | ||
125 | /* Sign extend the addend we extract from the lo insn. */ | 151 | /* Sign extend the addend we extract from the lo insn. */ |
126 | vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000; | 152 | vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000; |
127 | 153 | ||
128 | if (me->arch.r_mips_hi16_list != NULL) { | 154 | if (mips_hi16_list != NULL) { |
129 | l = me->arch.r_mips_hi16_list; | 155 | struct mips_hi16 *l; |
156 | |||
157 | l = mips_hi16_list; | ||
130 | while (l != NULL) { | 158 | while (l != NULL) { |
131 | struct mips_hi16 *next; | 159 | struct mips_hi16 *next; |
132 | unsigned long insn; | 160 | unsigned long insn; |
@@ -161,7 +189,7 @@ static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v) | |||
161 | l = next; | 189 | l = next; |
162 | } | 190 | } |
163 | 191 | ||
164 | me->arch.r_mips_hi16_list = NULL; | 192 | mips_hi16_list = NULL; |
165 | } | 193 | } |
166 | 194 | ||
167 | /* | 195 | /* |
@@ -174,14 +202,43 @@ static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v) | |||
174 | return 0; | 202 | return 0; |
175 | 203 | ||
176 | out_danger: | 204 | out_danger: |
177 | free_relocation_chain(l); | ||
178 | me->arch.r_mips_hi16_list = NULL; | ||
179 | |||
180 | pr_err("module %s: dangerous R_MIPS_LO16 REL relocation\n", me->name); | 205 | pr_err("module %s: dangerous R_MIPS_LO16 REL relocation\n", me->name); |
181 | 206 | ||
182 | return -ENOEXEC; | 207 | return -ENOEXEC; |
183 | } | 208 | } |
184 | 209 | ||
210 | static int apply_r_mips_lo16_rela(struct module *me, u32 *location, Elf_Addr v) | ||
211 | { | ||
212 | *location = (*location & 0xffff0000) | (v & 0xffff); | ||
213 | |||
214 | return 0; | ||
215 | } | ||
216 | |||
217 | static int apply_r_mips_64_rela(struct module *me, u32 *location, Elf_Addr v) | ||
218 | { | ||
219 | *(Elf_Addr *)location = v; | ||
220 | |||
221 | return 0; | ||
222 | } | ||
223 | |||
224 | static int apply_r_mips_higher_rela(struct module *me, u32 *location, | ||
225 | Elf_Addr v) | ||
226 | { | ||
227 | *location = (*location & 0xffff0000) | | ||
228 | ((((long long) v + 0x80008000LL) >> 32) & 0xffff); | ||
229 | |||
230 | return 0; | ||
231 | } | ||
232 | |||
233 | static int apply_r_mips_highest_rela(struct module *me, u32 *location, | ||
234 | Elf_Addr v) | ||
235 | { | ||
236 | *location = (*location & 0xffff0000) | | ||
237 | ((((long long) v + 0x800080008000LL) >> 48) & 0xffff); | ||
238 | |||
239 | return 0; | ||
240 | } | ||
241 | |||
185 | static int (*reloc_handlers_rel[]) (struct module *me, u32 *location, | 242 | static int (*reloc_handlers_rel[]) (struct module *me, u32 *location, |
186 | Elf_Addr v) = { | 243 | Elf_Addr v) = { |
187 | [R_MIPS_NONE] = apply_r_mips_none, | 244 | [R_MIPS_NONE] = apply_r_mips_none, |
@@ -191,6 +248,18 @@ static int (*reloc_handlers_rel[]) (struct module *me, u32 *location, | |||
191 | [R_MIPS_LO16] = apply_r_mips_lo16_rel | 248 | [R_MIPS_LO16] = apply_r_mips_lo16_rel |
192 | }; | 249 | }; |
193 | 250 | ||
251 | static int (*reloc_handlers_rela[]) (struct module *me, u32 *location, | ||
252 | Elf_Addr v) = { | ||
253 | [R_MIPS_NONE] = apply_r_mips_none, | ||
254 | [R_MIPS_32] = apply_r_mips_32_rela, | ||
255 | [R_MIPS_26] = apply_r_mips_26_rela, | ||
256 | [R_MIPS_HI16] = apply_r_mips_hi16_rela, | ||
257 | [R_MIPS_LO16] = apply_r_mips_lo16_rela, | ||
258 | [R_MIPS_64] = apply_r_mips_64_rela, | ||
259 | [R_MIPS_HIGHER] = apply_r_mips_higher_rela, | ||
260 | [R_MIPS_HIGHEST] = apply_r_mips_highest_rela | ||
261 | }; | ||
262 | |||
194 | int apply_relocate(Elf_Shdr *sechdrs, const char *strtab, | 263 | int apply_relocate(Elf_Shdr *sechdrs, const char *strtab, |
195 | unsigned int symindex, unsigned int relsec, | 264 | unsigned int symindex, unsigned int relsec, |
196 | struct module *me) | 265 | struct module *me) |
@@ -205,7 +274,6 @@ int apply_relocate(Elf_Shdr *sechdrs, const char *strtab, | |||
205 | pr_debug("Applying relocate section %u to %u\n", relsec, | 274 | pr_debug("Applying relocate section %u to %u\n", relsec, |
206 | sechdrs[relsec].sh_info); | 275 | sechdrs[relsec].sh_info); |
207 | 276 | ||
208 | me->arch.r_mips_hi16_list = NULL; | ||
209 | for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { | 277 | for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { |
210 | /* This is where to make the change */ | 278 | /* This is where to make the change */ |
211 | location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr | 279 | location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr |
@@ -229,17 +297,44 @@ int apply_relocate(Elf_Shdr *sechdrs, const char *strtab, | |||
229 | return res; | 297 | return res; |
230 | } | 298 | } |
231 | 299 | ||
232 | /* | 300 | return 0; |
233 | * Normally the hi16 list should be deallocated at this point. A | 301 | } |
234 | * malformed binary however could contain a series of R_MIPS_HI16 | ||
235 | * relocations not followed by a R_MIPS_LO16 relocation. In that | ||
236 | * case, free up the list and return an error. | ||
237 | */ | ||
238 | if (me->arch.r_mips_hi16_list) { | ||
239 | free_relocation_chain(me->arch.r_mips_hi16_list); | ||
240 | me->arch.r_mips_hi16_list = NULL; | ||
241 | 302 | ||
242 | return -ENOEXEC; | 303 | int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, |
304 | unsigned int symindex, unsigned int relsec, | ||
305 | struct module *me) | ||
306 | { | ||
307 | Elf_Mips_Rela *rel = (void *) sechdrs[relsec].sh_addr; | ||
308 | Elf_Sym *sym; | ||
309 | u32 *location; | ||
310 | unsigned int i; | ||
311 | Elf_Addr v; | ||
312 | int res; | ||
313 | |||
314 | pr_debug("Applying relocate section %u to %u\n", relsec, | ||
315 | sechdrs[relsec].sh_info); | ||
316 | |||
317 | for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { | ||
318 | /* This is where to make the change */ | ||
319 | location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr | ||
320 | + rel[i].r_offset; | ||
321 | /* This is the symbol it is referring to */ | ||
322 | sym = (Elf_Sym *)sechdrs[symindex].sh_addr | ||
323 | + ELF_MIPS_R_SYM(rel[i]); | ||
324 | if (IS_ERR_VALUE(sym->st_value)) { | ||
325 | /* Ignore unresolved weak symbol */ | ||
326 | if (ELF_ST_BIND(sym->st_info) == STB_WEAK) | ||
327 | continue; | ||
328 | printk(KERN_WARNING "%s: Unknown symbol %s\n", | ||
329 | me->name, strtab + sym->st_name); | ||
330 | return -ENOENT; | ||
331 | } | ||
332 | |||
333 | v = sym->st_value + rel[i].r_addend; | ||
334 | |||
335 | res = reloc_handlers_rela[ELF_MIPS_R_TYPE(rel[i])](me, location, v); | ||
336 | if (res) | ||
337 | return res; | ||
243 | } | 338 | } |
244 | 339 | ||
245 | return 0; | 340 | return 0; |
diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S index 207f1341578..ce89c806170 100644 --- a/arch/mips/kernel/octeon_switch.S +++ b/arch/mips/kernel/octeon_switch.S | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <asm/fpregdef.h> | 15 | #include <asm/fpregdef.h> |
16 | #include <asm/mipsregs.h> | 16 | #include <asm/mipsregs.h> |
17 | #include <asm/asm-offsets.h> | 17 | #include <asm/asm-offsets.h> |
18 | #include <asm/page.h> | ||
18 | #include <asm/pgtable-bits.h> | 19 | #include <asm/pgtable-bits.h> |
19 | #include <asm/regdef.h> | 20 | #include <asm/regdef.h> |
20 | #include <asm/stackframe.h> | 21 | #include <asm/stackframe.h> |
@@ -30,7 +31,7 @@ | |||
30 | 31 | ||
31 | /* | 32 | /* |
32 | * task_struct *resume(task_struct *prev, task_struct *next, | 33 | * task_struct *resume(task_struct *prev, task_struct *next, |
33 | * struct thread_info *next_ti, int usedfpu) | 34 | * struct thread_info *next_ti) |
34 | */ | 35 | */ |
35 | .align 7 | 36 | .align 7 |
36 | LEAF(resume) | 37 | LEAF(resume) |
diff --git a/arch/mips/kernel/perf_event.c b/arch/mips/kernel/perf_event.c index c1cf9c6c3f7..0aee944ac38 100644 --- a/arch/mips/kernel/perf_event.c +++ b/arch/mips/kernel/perf_event.c | |||
@@ -14,16 +14,533 @@ | |||
14 | * published by the Free Software Foundation. | 14 | * published by the Free Software Foundation. |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <linux/cpumask.h> | ||
18 | #include <linux/interrupt.h> | ||
19 | #include <linux/smp.h> | ||
20 | #include <linux/kernel.h> | ||
17 | #include <linux/perf_event.h> | 21 | #include <linux/perf_event.h> |
22 | #include <linux/uaccess.h> | ||
18 | 23 | ||
24 | #include <asm/irq.h> | ||
25 | #include <asm/irq_regs.h> | ||
19 | #include <asm/stacktrace.h> | 26 | #include <asm/stacktrace.h> |
27 | #include <asm/time.h> /* For perf_irq */ | ||
28 | |||
29 | /* These are for 32bit counters. For 64bit ones, define them accordingly. */ | ||
30 | #define MAX_PERIOD ((1ULL << 32) - 1) | ||
31 | #define VALID_COUNT 0x7fffffff | ||
32 | #define TOTAL_BITS 32 | ||
33 | #define HIGHEST_BIT 31 | ||
34 | |||
35 | #define MIPS_MAX_HWEVENTS 4 | ||
36 | |||
37 | struct cpu_hw_events { | ||
38 | /* Array of events on this cpu. */ | ||
39 | struct perf_event *events[MIPS_MAX_HWEVENTS]; | ||
40 | |||
41 | /* | ||
42 | * Set the bit (indexed by the counter number) when the counter | ||
43 | * is used for an event. | ||
44 | */ | ||
45 | unsigned long used_mask[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)]; | ||
46 | |||
47 | /* | ||
48 | * The borrowed MSB for the performance counter. A MIPS performance | ||
49 | * counter uses its bit 31 (for 32bit counters) or bit 63 (for 64bit | ||
50 | * counters) as a factor of determining whether a counter overflow | ||
51 | * should be signaled. So here we use a separate MSB for each | ||
52 | * counter to make things easy. | ||
53 | */ | ||
54 | unsigned long msbs[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)]; | ||
55 | |||
56 | /* | ||
57 | * Software copy of the control register for each performance counter. | ||
58 | * MIPS CPUs vary in performance counters. They use this differently, | ||
59 | * and even may not use it. | ||
60 | */ | ||
61 | unsigned int saved_ctrl[MIPS_MAX_HWEVENTS]; | ||
62 | }; | ||
63 | DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { | ||
64 | .saved_ctrl = {0}, | ||
65 | }; | ||
66 | |||
67 | /* The description of MIPS performance events. */ | ||
68 | struct mips_perf_event { | ||
69 | unsigned int event_id; | ||
70 | /* | ||
71 | * MIPS performance counters are indexed starting from 0. | ||
72 | * CNTR_EVEN indicates the indexes of the counters to be used are | ||
73 | * even numbers. | ||
74 | */ | ||
75 | unsigned int cntr_mask; | ||
76 | #define CNTR_EVEN 0x55555555 | ||
77 | #define CNTR_ODD 0xaaaaaaaa | ||
78 | #ifdef CONFIG_MIPS_MT_SMP | ||
79 | enum { | ||
80 | T = 0, | ||
81 | V = 1, | ||
82 | P = 2, | ||
83 | } range; | ||
84 | #else | ||
85 | #define T | ||
86 | #define V | ||
87 | #define P | ||
88 | #endif | ||
89 | }; | ||
90 | |||
91 | static struct mips_perf_event raw_event; | ||
92 | static DEFINE_MUTEX(raw_event_mutex); | ||
93 | |||
94 | #define UNSUPPORTED_PERF_EVENT_ID 0xffffffff | ||
95 | #define C(x) PERF_COUNT_HW_CACHE_##x | ||
96 | |||
97 | struct mips_pmu { | ||
98 | const char *name; | ||
99 | int irq; | ||
100 | irqreturn_t (*handle_irq)(int irq, void *dev); | ||
101 | int (*handle_shared_irq)(void); | ||
102 | void (*start)(void); | ||
103 | void (*stop)(void); | ||
104 | int (*alloc_counter)(struct cpu_hw_events *cpuc, | ||
105 | struct hw_perf_event *hwc); | ||
106 | u64 (*read_counter)(unsigned int idx); | ||
107 | void (*write_counter)(unsigned int idx, u64 val); | ||
108 | void (*enable_event)(struct hw_perf_event *evt, int idx); | ||
109 | void (*disable_event)(int idx); | ||
110 | const struct mips_perf_event *(*map_raw_event)(u64 config); | ||
111 | const struct mips_perf_event (*general_event_map)[PERF_COUNT_HW_MAX]; | ||
112 | const struct mips_perf_event (*cache_event_map) | ||
113 | [PERF_COUNT_HW_CACHE_MAX] | ||
114 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
115 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | ||
116 | unsigned int num_counters; | ||
117 | }; | ||
118 | |||
119 | static const struct mips_pmu *mipspmu; | ||
120 | |||
121 | static int | ||
122 | mipspmu_event_set_period(struct perf_event *event, | ||
123 | struct hw_perf_event *hwc, | ||
124 | int idx) | ||
125 | { | ||
126 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
127 | s64 left = local64_read(&hwc->period_left); | ||
128 | s64 period = hwc->sample_period; | ||
129 | int ret = 0; | ||
130 | u64 uleft; | ||
131 | unsigned long flags; | ||
132 | |||
133 | if (unlikely(left <= -period)) { | ||
134 | left = period; | ||
135 | local64_set(&hwc->period_left, left); | ||
136 | hwc->last_period = period; | ||
137 | ret = 1; | ||
138 | } | ||
139 | |||
140 | if (unlikely(left <= 0)) { | ||
141 | left += period; | ||
142 | local64_set(&hwc->period_left, left); | ||
143 | hwc->last_period = period; | ||
144 | ret = 1; | ||
145 | } | ||
146 | |||
147 | if (left > (s64)MAX_PERIOD) | ||
148 | left = MAX_PERIOD; | ||
149 | |||
150 | local64_set(&hwc->prev_count, (u64)-left); | ||
151 | |||
152 | local_irq_save(flags); | ||
153 | uleft = (u64)(-left) & MAX_PERIOD; | ||
154 | uleft > VALID_COUNT ? | ||
155 | set_bit(idx, cpuc->msbs) : clear_bit(idx, cpuc->msbs); | ||
156 | mipspmu->write_counter(idx, (u64)(-left) & VALID_COUNT); | ||
157 | local_irq_restore(flags); | ||
158 | |||
159 | perf_event_update_userpage(event); | ||
160 | |||
161 | return ret; | ||
162 | } | ||
163 | |||
164 | static void mipspmu_event_update(struct perf_event *event, | ||
165 | struct hw_perf_event *hwc, | ||
166 | int idx) | ||
167 | { | ||
168 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
169 | unsigned long flags; | ||
170 | int shift = 64 - TOTAL_BITS; | ||
171 | s64 prev_raw_count, new_raw_count; | ||
172 | u64 delta; | ||
173 | |||
174 | again: | ||
175 | prev_raw_count = local64_read(&hwc->prev_count); | ||
176 | local_irq_save(flags); | ||
177 | /* Make the counter value be a "real" one. */ | ||
178 | new_raw_count = mipspmu->read_counter(idx); | ||
179 | if (new_raw_count & (test_bit(idx, cpuc->msbs) << HIGHEST_BIT)) { | ||
180 | new_raw_count &= VALID_COUNT; | ||
181 | clear_bit(idx, cpuc->msbs); | ||
182 | } else | ||
183 | new_raw_count |= (test_bit(idx, cpuc->msbs) << HIGHEST_BIT); | ||
184 | local_irq_restore(flags); | ||
185 | |||
186 | if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, | ||
187 | new_raw_count) != prev_raw_count) | ||
188 | goto again; | ||
189 | |||
190 | delta = (new_raw_count << shift) - (prev_raw_count << shift); | ||
191 | delta >>= shift; | ||
192 | |||
193 | local64_add(delta, &event->count); | ||
194 | local64_sub(delta, &hwc->period_left); | ||
195 | } | ||
196 | |||
197 | static void mipspmu_start(struct perf_event *event, int flags) | ||
198 | { | ||
199 | struct hw_perf_event *hwc = &event->hw; | ||
200 | |||
201 | if (!mipspmu) | ||
202 | return; | ||
203 | |||
204 | if (flags & PERF_EF_RELOAD) | ||
205 | WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); | ||
206 | |||
207 | hwc->state = 0; | ||
208 | |||
209 | /* Set the period for the event. */ | ||
210 | mipspmu_event_set_period(event, hwc, hwc->idx); | ||
211 | |||
212 | /* Enable the event. */ | ||
213 | mipspmu->enable_event(hwc, hwc->idx); | ||
214 | } | ||
215 | |||
216 | static void mipspmu_stop(struct perf_event *event, int flags) | ||
217 | { | ||
218 | struct hw_perf_event *hwc = &event->hw; | ||
219 | |||
220 | if (!mipspmu) | ||
221 | return; | ||
222 | |||
223 | if (!(hwc->state & PERF_HES_STOPPED)) { | ||
224 | /* We are working on a local event. */ | ||
225 | mipspmu->disable_event(hwc->idx); | ||
226 | barrier(); | ||
227 | mipspmu_event_update(event, hwc, hwc->idx); | ||
228 | hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; | ||
229 | } | ||
230 | } | ||
231 | |||
232 | static int mipspmu_add(struct perf_event *event, int flags) | ||
233 | { | ||
234 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
235 | struct hw_perf_event *hwc = &event->hw; | ||
236 | int idx; | ||
237 | int err = 0; | ||
238 | |||
239 | perf_pmu_disable(event->pmu); | ||
240 | |||
241 | /* To look for a free counter for this event. */ | ||
242 | idx = mipspmu->alloc_counter(cpuc, hwc); | ||
243 | if (idx < 0) { | ||
244 | err = idx; | ||
245 | goto out; | ||
246 | } | ||
247 | |||
248 | /* | ||
249 | * If there is an event in the counter we are going to use then | ||
250 | * make sure it is disabled. | ||
251 | */ | ||
252 | event->hw.idx = idx; | ||
253 | mipspmu->disable_event(idx); | ||
254 | cpuc->events[idx] = event; | ||
255 | |||
256 | hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; | ||
257 | if (flags & PERF_EF_START) | ||
258 | mipspmu_start(event, PERF_EF_RELOAD); | ||
259 | |||
260 | /* Propagate our changes to the userspace mapping. */ | ||
261 | perf_event_update_userpage(event); | ||
262 | |||
263 | out: | ||
264 | perf_pmu_enable(event->pmu); | ||
265 | return err; | ||
266 | } | ||
267 | |||
268 | static void mipspmu_del(struct perf_event *event, int flags) | ||
269 | { | ||
270 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
271 | struct hw_perf_event *hwc = &event->hw; | ||
272 | int idx = hwc->idx; | ||
273 | |||
274 | WARN_ON(idx < 0 || idx >= mipspmu->num_counters); | ||
275 | |||
276 | mipspmu_stop(event, PERF_EF_UPDATE); | ||
277 | cpuc->events[idx] = NULL; | ||
278 | clear_bit(idx, cpuc->used_mask); | ||
279 | |||
280 | perf_event_update_userpage(event); | ||
281 | } | ||
282 | |||
283 | static void mipspmu_read(struct perf_event *event) | ||
284 | { | ||
285 | struct hw_perf_event *hwc = &event->hw; | ||
286 | |||
287 | /* Don't read disabled counters! */ | ||
288 | if (hwc->idx < 0) | ||
289 | return; | ||
290 | |||
291 | mipspmu_event_update(event, hwc, hwc->idx); | ||
292 | } | ||
293 | |||
294 | static void mipspmu_enable(struct pmu *pmu) | ||
295 | { | ||
296 | if (mipspmu) | ||
297 | mipspmu->start(); | ||
298 | } | ||
299 | |||
300 | static void mipspmu_disable(struct pmu *pmu) | ||
301 | { | ||
302 | if (mipspmu) | ||
303 | mipspmu->stop(); | ||
304 | } | ||
305 | |||
306 | static atomic_t active_events = ATOMIC_INIT(0); | ||
307 | static DEFINE_MUTEX(pmu_reserve_mutex); | ||
308 | static int (*save_perf_irq)(void); | ||
309 | |||
310 | static int mipspmu_get_irq(void) | ||
311 | { | ||
312 | int err; | ||
313 | |||
314 | if (mipspmu->irq >= 0) { | ||
315 | /* Request my own irq handler. */ | ||
316 | err = request_irq(mipspmu->irq, mipspmu->handle_irq, | ||
317 | IRQF_DISABLED | IRQF_NOBALANCING, | ||
318 | "mips_perf_pmu", NULL); | ||
319 | if (err) { | ||
320 | pr_warning("Unable to request IRQ%d for MIPS " | ||
321 | "performance counters!\n", mipspmu->irq); | ||
322 | } | ||
323 | } else if (cp0_perfcount_irq < 0) { | ||
324 | /* | ||
325 | * We are sharing the irq number with the timer interrupt. | ||
326 | */ | ||
327 | save_perf_irq = perf_irq; | ||
328 | perf_irq = mipspmu->handle_shared_irq; | ||
329 | err = 0; | ||
330 | } else { | ||
331 | pr_warning("The platform hasn't properly defined its " | ||
332 | "interrupt controller.\n"); | ||
333 | err = -ENOENT; | ||
334 | } | ||
335 | |||
336 | return err; | ||
337 | } | ||
338 | |||
339 | static void mipspmu_free_irq(void) | ||
340 | { | ||
341 | if (mipspmu->irq >= 0) | ||
342 | free_irq(mipspmu->irq, NULL); | ||
343 | else if (cp0_perfcount_irq < 0) | ||
344 | perf_irq = save_perf_irq; | ||
345 | } | ||
346 | |||
347 | /* | ||
348 | * mipsxx/rm9000/loongson2 have different performance counters, they have | ||
349 | * specific low-level init routines. | ||
350 | */ | ||
351 | static void reset_counters(void *arg); | ||
352 | static int __hw_perf_event_init(struct perf_event *event); | ||
353 | |||
354 | static void hw_perf_event_destroy(struct perf_event *event) | ||
355 | { | ||
356 | if (atomic_dec_and_mutex_lock(&active_events, | ||
357 | &pmu_reserve_mutex)) { | ||
358 | /* | ||
359 | * We must not call the destroy function with interrupts | ||
360 | * disabled. | ||
361 | */ | ||
362 | on_each_cpu(reset_counters, | ||
363 | (void *)(long)mipspmu->num_counters, 1); | ||
364 | mipspmu_free_irq(); | ||
365 | mutex_unlock(&pmu_reserve_mutex); | ||
366 | } | ||
367 | } | ||
368 | |||
369 | static int mipspmu_event_init(struct perf_event *event) | ||
370 | { | ||
371 | int err = 0; | ||
372 | |||
373 | switch (event->attr.type) { | ||
374 | case PERF_TYPE_RAW: | ||
375 | case PERF_TYPE_HARDWARE: | ||
376 | case PERF_TYPE_HW_CACHE: | ||
377 | break; | ||
378 | |||
379 | default: | ||
380 | return -ENOENT; | ||
381 | } | ||
382 | |||
383 | if (!mipspmu || event->cpu >= nr_cpumask_bits || | ||
384 | (event->cpu >= 0 && !cpu_online(event->cpu))) | ||
385 | return -ENODEV; | ||
386 | |||
387 | if (!atomic_inc_not_zero(&active_events)) { | ||
388 | if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) { | ||
389 | atomic_dec(&active_events); | ||
390 | return -ENOSPC; | ||
391 | } | ||
392 | |||
393 | mutex_lock(&pmu_reserve_mutex); | ||
394 | if (atomic_read(&active_events) == 0) | ||
395 | err = mipspmu_get_irq(); | ||
396 | |||
397 | if (!err) | ||
398 | atomic_inc(&active_events); | ||
399 | mutex_unlock(&pmu_reserve_mutex); | ||
400 | } | ||
401 | |||
402 | if (err) | ||
403 | return err; | ||
404 | |||
405 | err = __hw_perf_event_init(event); | ||
406 | if (err) | ||
407 | hw_perf_event_destroy(event); | ||
408 | |||
409 | return err; | ||
410 | } | ||
411 | |||
412 | static struct pmu pmu = { | ||
413 | .pmu_enable = mipspmu_enable, | ||
414 | .pmu_disable = mipspmu_disable, | ||
415 | .event_init = mipspmu_event_init, | ||
416 | .add = mipspmu_add, | ||
417 | .del = mipspmu_del, | ||
418 | .start = mipspmu_start, | ||
419 | .stop = mipspmu_stop, | ||
420 | .read = mipspmu_read, | ||
421 | }; | ||
422 | |||
423 | static inline unsigned int | ||
424 | mipspmu_perf_event_encode(const struct mips_perf_event *pev) | ||
425 | { | ||
426 | /* | ||
427 | * Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for | ||
428 | * event_id. | ||
429 | */ | ||
430 | #ifdef CONFIG_MIPS_MT_SMP | ||
431 | return ((unsigned int)pev->range << 24) | | ||
432 | (pev->cntr_mask & 0xffff00) | | ||
433 | (pev->event_id & 0xff); | ||
434 | #else | ||
435 | return (pev->cntr_mask & 0xffff00) | | ||
436 | (pev->event_id & 0xff); | ||
437 | #endif | ||
438 | } | ||
439 | |||
440 | static const struct mips_perf_event * | ||
441 | mipspmu_map_general_event(int idx) | ||
442 | { | ||
443 | const struct mips_perf_event *pev; | ||
444 | |||
445 | pev = ((*mipspmu->general_event_map)[idx].event_id == | ||
446 | UNSUPPORTED_PERF_EVENT_ID ? ERR_PTR(-EOPNOTSUPP) : | ||
447 | &(*mipspmu->general_event_map)[idx]); | ||
448 | |||
449 | return pev; | ||
450 | } | ||
451 | |||
452 | static const struct mips_perf_event * | ||
453 | mipspmu_map_cache_event(u64 config) | ||
454 | { | ||
455 | unsigned int cache_type, cache_op, cache_result; | ||
456 | const struct mips_perf_event *pev; | ||
457 | |||
458 | cache_type = (config >> 0) & 0xff; | ||
459 | if (cache_type >= PERF_COUNT_HW_CACHE_MAX) | ||
460 | return ERR_PTR(-EINVAL); | ||
461 | |||
462 | cache_op = (config >> 8) & 0xff; | ||
463 | if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) | ||
464 | return ERR_PTR(-EINVAL); | ||
465 | |||
466 | cache_result = (config >> 16) & 0xff; | ||
467 | if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) | ||
468 | return ERR_PTR(-EINVAL); | ||
469 | |||
470 | pev = &((*mipspmu->cache_event_map) | ||
471 | [cache_type] | ||
472 | [cache_op] | ||
473 | [cache_result]); | ||
474 | |||
475 | if (pev->event_id == UNSUPPORTED_PERF_EVENT_ID) | ||
476 | return ERR_PTR(-EOPNOTSUPP); | ||
477 | |||
478 | return pev; | ||
479 | |||
480 | } | ||
481 | |||
482 | static int validate_event(struct cpu_hw_events *cpuc, | ||
483 | struct perf_event *event) | ||
484 | { | ||
485 | struct hw_perf_event fake_hwc = event->hw; | ||
486 | |||
487 | /* Allow mixed event group. So return 1 to pass validation. */ | ||
488 | if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF) | ||
489 | return 1; | ||
490 | |||
491 | return mipspmu->alloc_counter(cpuc, &fake_hwc) >= 0; | ||
492 | } | ||
493 | |||
494 | static int validate_group(struct perf_event *event) | ||
495 | { | ||
496 | struct perf_event *sibling, *leader = event->group_leader; | ||
497 | struct cpu_hw_events fake_cpuc; | ||
498 | |||
499 | memset(&fake_cpuc, 0, sizeof(fake_cpuc)); | ||
500 | |||
501 | if (!validate_event(&fake_cpuc, leader)) | ||
502 | return -ENOSPC; | ||
503 | |||
504 | list_for_each_entry(sibling, &leader->sibling_list, group_entry) { | ||
505 | if (!validate_event(&fake_cpuc, sibling)) | ||
506 | return -ENOSPC; | ||
507 | } | ||
508 | |||
509 | if (!validate_event(&fake_cpuc, event)) | ||
510 | return -ENOSPC; | ||
511 | |||
512 | return 0; | ||
513 | } | ||
514 | |||
515 | /* This is needed by specific irq handlers in perf_event_*.c */ | ||
516 | static void | ||
517 | handle_associated_event(struct cpu_hw_events *cpuc, | ||
518 | int idx, struct perf_sample_data *data, struct pt_regs *regs) | ||
519 | { | ||
520 | struct perf_event *event = cpuc->events[idx]; | ||
521 | struct hw_perf_event *hwc = &event->hw; | ||
522 | |||
523 | mipspmu_event_update(event, hwc, idx); | ||
524 | data->period = event->hw.last_period; | ||
525 | if (!mipspmu_event_set_period(event, hwc, idx)) | ||
526 | return; | ||
527 | |||
528 | if (perf_event_overflow(event, data, regs)) | ||
529 | mipspmu->disable_event(idx); | ||
530 | } | ||
531 | |||
532 | #include "perf_event_mipsxx.c" | ||
20 | 533 | ||
21 | /* Callchain handling code. */ | 534 | /* Callchain handling code. */ |
22 | 535 | ||
23 | /* | 536 | /* |
24 | * Leave userspace callchain empty for now. When we find a way to trace | 537 | * Leave userspace callchain empty for now. When we find a way to trace |
25 | * the user stack callchains, we will add it here. | 538 | * the user stack callchains, we add here. |
26 | */ | 539 | */ |
540 | void perf_callchain_user(struct perf_callchain_entry *entry, | ||
541 | struct pt_regs *regs) | ||
542 | { | ||
543 | } | ||
27 | 544 | ||
28 | static void save_raw_perf_callchain(struct perf_callchain_entry *entry, | 545 | static void save_raw_perf_callchain(struct perf_callchain_entry *entry, |
29 | unsigned long reg29) | 546 | unsigned long reg29) |
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index d9c81c5a6c9..e5ad09a9baf 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c | |||
@@ -1,129 +1,22 @@ | |||
1 | /* | 1 | #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) || \ |
2 | * Linux performance counter support for MIPS. | 2 | defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_SB1) |
3 | * | ||
4 | * Copyright (C) 2010 MIPS Technologies, Inc. | ||
5 | * Copyright (C) 2011 Cavium Networks, Inc. | ||
6 | * Author: Deng-Cheng Zhu | ||
7 | * | ||
8 | * This code is based on the implementation for ARM, which is in turn | ||
9 | * based on the sparc64 perf event code and the x86 code. Performance | ||
10 | * counter access is based on the MIPS Oprofile code. And the callchain | ||
11 | * support references the code of MIPS stacktrace.c. | ||
12 | * | ||
13 | * This program is free software; you can redistribute it and/or modify | ||
14 | * it under the terms of the GNU General Public License version 2 as | ||
15 | * published by the Free Software Foundation. | ||
16 | */ | ||
17 | |||
18 | #include <linux/cpumask.h> | ||
19 | #include <linux/interrupt.h> | ||
20 | #include <linux/smp.h> | ||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/perf_event.h> | ||
23 | #include <linux/uaccess.h> | ||
24 | |||
25 | #include <asm/irq.h> | ||
26 | #include <asm/irq_regs.h> | ||
27 | #include <asm/stacktrace.h> | ||
28 | #include <asm/time.h> /* For perf_irq */ | ||
29 | |||
30 | #define MIPS_MAX_HWEVENTS 4 | ||
31 | #define MIPS_TCS_PER_COUNTER 2 | ||
32 | #define MIPS_CPUID_TO_COUNTER_MASK (MIPS_TCS_PER_COUNTER - 1) | ||
33 | |||
34 | struct cpu_hw_events { | ||
35 | /* Array of events on this cpu. */ | ||
36 | struct perf_event *events[MIPS_MAX_HWEVENTS]; | ||
37 | |||
38 | /* | ||
39 | * Set the bit (indexed by the counter number) when the counter | ||
40 | * is used for an event. | ||
41 | */ | ||
42 | unsigned long used_mask[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)]; | ||
43 | |||
44 | /* | ||
45 | * Software copy of the control register for each performance counter. | ||
46 | * MIPS CPUs vary in performance counters. They use this differently, | ||
47 | * and even may not use it. | ||
48 | */ | ||
49 | unsigned int saved_ctrl[MIPS_MAX_HWEVENTS]; | ||
50 | }; | ||
51 | DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { | ||
52 | .saved_ctrl = {0}, | ||
53 | }; | ||
54 | |||
55 | /* The description of MIPS performance events. */ | ||
56 | struct mips_perf_event { | ||
57 | unsigned int event_id; | ||
58 | /* | ||
59 | * MIPS performance counters are indexed starting from 0. | ||
60 | * CNTR_EVEN indicates the indexes of the counters to be used are | ||
61 | * even numbers. | ||
62 | */ | ||
63 | unsigned int cntr_mask; | ||
64 | #define CNTR_EVEN 0x55555555 | ||
65 | #define CNTR_ODD 0xaaaaaaaa | ||
66 | #define CNTR_ALL 0xffffffff | ||
67 | #ifdef CONFIG_MIPS_MT_SMP | ||
68 | enum { | ||
69 | T = 0, | ||
70 | V = 1, | ||
71 | P = 2, | ||
72 | } range; | ||
73 | #else | ||
74 | #define T | ||
75 | #define V | ||
76 | #define P | ||
77 | #endif | ||
78 | }; | ||
79 | |||
80 | static struct mips_perf_event raw_event; | ||
81 | static DEFINE_MUTEX(raw_event_mutex); | ||
82 | |||
83 | #define C(x) PERF_COUNT_HW_CACHE_##x | ||
84 | |||
85 | struct mips_pmu { | ||
86 | u64 max_period; | ||
87 | u64 valid_count; | ||
88 | u64 overflow; | ||
89 | const char *name; | ||
90 | int irq; | ||
91 | u64 (*read_counter)(unsigned int idx); | ||
92 | void (*write_counter)(unsigned int idx, u64 val); | ||
93 | const struct mips_perf_event *(*map_raw_event)(u64 config); | ||
94 | const struct mips_perf_event (*general_event_map)[PERF_COUNT_HW_MAX]; | ||
95 | const struct mips_perf_event (*cache_event_map) | ||
96 | [PERF_COUNT_HW_CACHE_MAX] | ||
97 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
98 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | ||
99 | unsigned int num_counters; | ||
100 | }; | ||
101 | |||
102 | static struct mips_pmu mipspmu; | ||
103 | 3 | ||
104 | #define M_CONFIG1_PC (1 << 4) | 4 | #define M_CONFIG1_PC (1 << 4) |
105 | 5 | ||
106 | #define M_PERFCTL_EXL (1 << 0) | 6 | #define M_PERFCTL_EXL (1UL << 0) |
107 | #define M_PERFCTL_KERNEL (1 << 1) | 7 | #define M_PERFCTL_KERNEL (1UL << 1) |
108 | #define M_PERFCTL_SUPERVISOR (1 << 2) | 8 | #define M_PERFCTL_SUPERVISOR (1UL << 2) |
109 | #define M_PERFCTL_USER (1 << 3) | 9 | #define M_PERFCTL_USER (1UL << 3) |
110 | #define M_PERFCTL_INTERRUPT_ENABLE (1 << 4) | 10 | #define M_PERFCTL_INTERRUPT_ENABLE (1UL << 4) |
111 | #define M_PERFCTL_EVENT(event) (((event) & 0x3ff) << 5) | 11 | #define M_PERFCTL_EVENT(event) (((event) & 0x3ff) << 5) |
112 | #define M_PERFCTL_VPEID(vpe) ((vpe) << 16) | 12 | #define M_PERFCTL_VPEID(vpe) ((vpe) << 16) |
113 | |||
114 | #ifdef CONFIG_CPU_BMIPS5000 | ||
115 | #define M_PERFCTL_MT_EN(filter) 0 | ||
116 | #else /* !CONFIG_CPU_BMIPS5000 */ | ||
117 | #define M_PERFCTL_MT_EN(filter) ((filter) << 20) | 13 | #define M_PERFCTL_MT_EN(filter) ((filter) << 20) |
118 | #endif /* CONFIG_CPU_BMIPS5000 */ | ||
119 | |||
120 | #define M_TC_EN_ALL M_PERFCTL_MT_EN(0) | 14 | #define M_TC_EN_ALL M_PERFCTL_MT_EN(0) |
121 | #define M_TC_EN_VPE M_PERFCTL_MT_EN(1) | 15 | #define M_TC_EN_VPE M_PERFCTL_MT_EN(1) |
122 | #define M_TC_EN_TC M_PERFCTL_MT_EN(2) | 16 | #define M_TC_EN_TC M_PERFCTL_MT_EN(2) |
123 | #define M_PERFCTL_TCID(tcid) ((tcid) << 22) | 17 | #define M_PERFCTL_TCID(tcid) ((tcid) << 22) |
124 | #define M_PERFCTL_WIDE (1 << 30) | 18 | #define M_PERFCTL_WIDE (1UL << 30) |
125 | #define M_PERFCTL_MORE (1 << 31) | 19 | #define M_PERFCTL_MORE (1UL << 31) |
126 | #define M_PERFCTL_TC (1 << 30) | ||
127 | 20 | ||
128 | #define M_PERFCTL_COUNT_EVENT_WHENEVER (M_PERFCTL_EXL | \ | 21 | #define M_PERFCTL_COUNT_EVENT_WHENEVER (M_PERFCTL_EXL | \ |
129 | M_PERFCTL_KERNEL | \ | 22 | M_PERFCTL_KERNEL | \ |
@@ -138,680 +31,238 @@ static struct mips_pmu mipspmu; | |||
138 | #endif | 31 | #endif |
139 | #define M_PERFCTL_EVENT_MASK 0xfe0 | 32 | #define M_PERFCTL_EVENT_MASK 0xfe0 |
140 | 33 | ||
34 | #define M_COUNTER_OVERFLOW (1UL << 31) | ||
141 | 35 | ||
142 | #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS | 36 | #ifdef CONFIG_MIPS_MT_SMP |
143 | static int cpu_has_mipsmt_pertccounters; | 37 | static int cpu_has_mipsmt_pertccounters; |
144 | 38 | ||
145 | static DEFINE_RWLOCK(pmuint_rwlock); | ||
146 | |||
147 | #if defined(CONFIG_CPU_BMIPS5000) | ||
148 | #define vpe_id() (cpu_has_mipsmt_pertccounters ? \ | ||
149 | 0 : (smp_processor_id() & MIPS_CPUID_TO_COUNTER_MASK)) | ||
150 | #else | ||
151 | /* | 39 | /* |
152 | * FIXME: For VSMP, vpe_id() is redefined for Perf-events, because | 40 | * FIXME: For VSMP, vpe_id() is redefined for Perf-events, because |
153 | * cpu_data[cpuid].vpe_id reports 0 for _both_ CPUs. | 41 | * cpu_data[cpuid].vpe_id reports 0 for _both_ CPUs. |
154 | */ | 42 | */ |
43 | #if defined(CONFIG_HW_PERF_EVENTS) | ||
44 | #define vpe_id() (cpu_has_mipsmt_pertccounters ? \ | ||
45 | 0 : smp_processor_id()) | ||
46 | #else | ||
155 | #define vpe_id() (cpu_has_mipsmt_pertccounters ? \ | 47 | #define vpe_id() (cpu_has_mipsmt_pertccounters ? \ |
156 | 0 : smp_processor_id()) | 48 | 0 : cpu_data[smp_processor_id()].vpe_id) |
157 | #endif | 49 | #endif |
158 | 50 | ||
159 | /* Copied from op_model_mipsxx.c */ | 51 | /* Copied from op_model_mipsxx.c */ |
160 | static unsigned int vpe_shift(void) | 52 | static inline unsigned int vpe_shift(void) |
161 | { | 53 | { |
162 | if (num_possible_cpus() > 1) | 54 | if (num_possible_cpus() > 1) |
163 | return 1; | 55 | return 1; |
164 | 56 | ||
165 | return 0; | 57 | return 0; |
166 | } | 58 | } |
59 | #else /* !CONFIG_MIPS_MT_SMP */ | ||
60 | #define vpe_id() 0 | ||
167 | 61 | ||
168 | static unsigned int counters_total_to_per_cpu(unsigned int counters) | 62 | static inline unsigned int vpe_shift(void) |
169 | { | 63 | { |
170 | return counters >> vpe_shift(); | 64 | return 0; |
171 | } | 65 | } |
66 | #endif /* CONFIG_MIPS_MT_SMP */ | ||
172 | 67 | ||
173 | #else /* !CONFIG_MIPS_PERF_SHARED_TC_COUNTERS */ | 68 | static inline unsigned int |
174 | #define vpe_id() 0 | 69 | counters_total_to_per_cpu(unsigned int counters) |
175 | 70 | { | |
176 | #endif /* CONFIG_MIPS_PERF_SHARED_TC_COUNTERS */ | 71 | return counters >> vpe_shift(); |
72 | } | ||
177 | 73 | ||
178 | static void resume_local_counters(void); | 74 | static inline unsigned int |
179 | static void pause_local_counters(void); | 75 | counters_per_cpu_to_total(unsigned int counters) |
180 | static irqreturn_t mipsxx_pmu_handle_irq(int, void *); | 76 | { |
181 | static int mipsxx_pmu_handle_shared_irq(void); | 77 | return counters << vpe_shift(); |
78 | } | ||
182 | 79 | ||
183 | static unsigned int mipsxx_pmu_swizzle_perf_idx(unsigned int idx) | 80 | #define __define_perf_accessors(r, n, np) \ |
81 | \ | ||
82 | static inline unsigned int r_c0_ ## r ## n(void) \ | ||
83 | { \ | ||
84 | unsigned int cpu = vpe_id(); \ | ||
85 | \ | ||
86 | switch (cpu) { \ | ||
87 | case 0: \ | ||
88 | return read_c0_ ## r ## n(); \ | ||
89 | case 1: \ | ||
90 | return read_c0_ ## r ## np(); \ | ||
91 | default: \ | ||
92 | BUG(); \ | ||
93 | } \ | ||
94 | return 0; \ | ||
95 | } \ | ||
96 | \ | ||
97 | static inline void w_c0_ ## r ## n(unsigned int value) \ | ||
98 | { \ | ||
99 | unsigned int cpu = vpe_id(); \ | ||
100 | \ | ||
101 | switch (cpu) { \ | ||
102 | case 0: \ | ||
103 | write_c0_ ## r ## n(value); \ | ||
104 | return; \ | ||
105 | case 1: \ | ||
106 | write_c0_ ## r ## np(value); \ | ||
107 | return; \ | ||
108 | default: \ | ||
109 | BUG(); \ | ||
110 | } \ | ||
111 | return; \ | ||
112 | } \ | ||
113 | |||
114 | __define_perf_accessors(perfcntr, 0, 2) | ||
115 | __define_perf_accessors(perfcntr, 1, 3) | ||
116 | __define_perf_accessors(perfcntr, 2, 0) | ||
117 | __define_perf_accessors(perfcntr, 3, 1) | ||
118 | |||
119 | __define_perf_accessors(perfctrl, 0, 2) | ||
120 | __define_perf_accessors(perfctrl, 1, 3) | ||
121 | __define_perf_accessors(perfctrl, 2, 0) | ||
122 | __define_perf_accessors(perfctrl, 3, 1) | ||
123 | |||
124 | static inline int __n_counters(void) | ||
184 | { | 125 | { |
185 | if (vpe_id() == 1) | 126 | if (!(read_c0_config1() & M_CONFIG1_PC)) |
186 | idx = (idx + 2) & 3; | 127 | return 0; |
187 | return idx; | 128 | if (!(read_c0_perfctrl0() & M_PERFCTL_MORE)) |
129 | return 1; | ||
130 | if (!(read_c0_perfctrl1() & M_PERFCTL_MORE)) | ||
131 | return 2; | ||
132 | if (!(read_c0_perfctrl2() & M_PERFCTL_MORE)) | ||
133 | return 3; | ||
134 | |||
135 | return 4; | ||
188 | } | 136 | } |
189 | 137 | ||
190 | static u64 mipsxx_pmu_read_counter(unsigned int idx) | 138 | static inline int n_counters(void) |
191 | { | 139 | { |
192 | idx = mipsxx_pmu_swizzle_perf_idx(idx); | 140 | int counters; |
141 | |||
142 | switch (current_cpu_type()) { | ||
143 | case CPU_R10000: | ||
144 | counters = 2; | ||
145 | break; | ||
146 | |||
147 | case CPU_R12000: | ||
148 | case CPU_R14000: | ||
149 | counters = 4; | ||
150 | break; | ||
193 | 151 | ||
194 | switch (idx) { | ||
195 | case 0: | ||
196 | /* | ||
197 | * The counters are unsigned, we must cast to truncate | ||
198 | * off the high bits. | ||
199 | */ | ||
200 | return (u32)read_c0_perfcntr0(); | ||
201 | case 1: | ||
202 | return (u32)read_c0_perfcntr1(); | ||
203 | case 2: | ||
204 | return (u32)read_c0_perfcntr2(); | ||
205 | case 3: | ||
206 | return (u32)read_c0_perfcntr3(); | ||
207 | default: | 152 | default: |
208 | WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx); | 153 | counters = __n_counters(); |
209 | return 0; | ||
210 | } | 154 | } |
155 | |||
156 | return counters; | ||
211 | } | 157 | } |
212 | 158 | ||
213 | static u64 mipsxx_pmu_read_counter_64(unsigned int idx) | 159 | static void reset_counters(void *arg) |
214 | { | 160 | { |
215 | idx = mipsxx_pmu_swizzle_perf_idx(idx); | 161 | int counters = (int)(long)arg; |
216 | 162 | switch (counters) { | |
217 | switch (idx) { | 163 | case 4: |
218 | case 0: | 164 | w_c0_perfctrl3(0); |
219 | return read_c0_perfcntr0_64(); | 165 | w_c0_perfcntr3(0); |
220 | case 1: | ||
221 | return read_c0_perfcntr1_64(); | ||
222 | case 2: | ||
223 | return read_c0_perfcntr2_64(); | ||
224 | case 3: | 166 | case 3: |
225 | return read_c0_perfcntr3_64(); | 167 | w_c0_perfctrl2(0); |
226 | default: | 168 | w_c0_perfcntr2(0); |
227 | WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx); | 169 | case 2: |
228 | return 0; | 170 | w_c0_perfctrl1(0); |
171 | w_c0_perfcntr1(0); | ||
172 | case 1: | ||
173 | w_c0_perfctrl0(0); | ||
174 | w_c0_perfcntr0(0); | ||
229 | } | 175 | } |
230 | } | 176 | } |
231 | 177 | ||
232 | static void mipsxx_pmu_write_counter(unsigned int idx, u64 val) | 178 | static inline u64 |
179 | mipsxx_pmu_read_counter(unsigned int idx) | ||
233 | { | 180 | { |
234 | idx = mipsxx_pmu_swizzle_perf_idx(idx); | ||
235 | |||
236 | switch (idx) { | 181 | switch (idx) { |
237 | case 0: | 182 | case 0: |
238 | write_c0_perfcntr0(val); | 183 | return r_c0_perfcntr0(); |
239 | return; | ||
240 | case 1: | 184 | case 1: |
241 | write_c0_perfcntr1(val); | 185 | return r_c0_perfcntr1(); |
242 | return; | ||
243 | case 2: | 186 | case 2: |
244 | write_c0_perfcntr2(val); | 187 | return r_c0_perfcntr2(); |
245 | return; | ||
246 | case 3: | 188 | case 3: |
247 | write_c0_perfcntr3(val); | 189 | return r_c0_perfcntr3(); |
248 | return; | 190 | default: |
191 | WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx); | ||
192 | return 0; | ||
249 | } | 193 | } |
250 | } | 194 | } |
251 | 195 | ||
252 | static void mipsxx_pmu_write_counter_64(unsigned int idx, u64 val) | 196 | static inline void |
197 | mipsxx_pmu_write_counter(unsigned int idx, u64 val) | ||
253 | { | 198 | { |
254 | idx = mipsxx_pmu_swizzle_perf_idx(idx); | ||
255 | |||
256 | switch (idx) { | 199 | switch (idx) { |
257 | case 0: | 200 | case 0: |
258 | write_c0_perfcntr0_64(val); | 201 | w_c0_perfcntr0(val); |
259 | return; | 202 | return; |
260 | case 1: | 203 | case 1: |
261 | write_c0_perfcntr1_64(val); | 204 | w_c0_perfcntr1(val); |
262 | return; | 205 | return; |
263 | case 2: | 206 | case 2: |
264 | write_c0_perfcntr2_64(val); | 207 | w_c0_perfcntr2(val); |
265 | return; | 208 | return; |
266 | case 3: | 209 | case 3: |
267 | write_c0_perfcntr3_64(val); | 210 | w_c0_perfcntr3(val); |
268 | return; | 211 | return; |
269 | } | 212 | } |
270 | } | 213 | } |
271 | 214 | ||
272 | static unsigned int mipsxx_pmu_read_control(unsigned int idx) | 215 | static inline unsigned int |
216 | mipsxx_pmu_read_control(unsigned int idx) | ||
273 | { | 217 | { |
274 | idx = mipsxx_pmu_swizzle_perf_idx(idx); | ||
275 | |||
276 | switch (idx) { | 218 | switch (idx) { |
277 | case 0: | 219 | case 0: |
278 | return read_c0_perfctrl0(); | 220 | return r_c0_perfctrl0(); |
279 | case 1: | 221 | case 1: |
280 | return read_c0_perfctrl1(); | 222 | return r_c0_perfctrl1(); |
281 | case 2: | 223 | case 2: |
282 | return read_c0_perfctrl2(); | 224 | return r_c0_perfctrl2(); |
283 | case 3: | 225 | case 3: |
284 | return read_c0_perfctrl3(); | 226 | return r_c0_perfctrl3(); |
285 | default: | 227 | default: |
286 | WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx); | 228 | WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx); |
287 | return 0; | 229 | return 0; |
288 | } | 230 | } |
289 | } | 231 | } |
290 | 232 | ||
291 | static void mipsxx_pmu_write_control(unsigned int idx, unsigned int val) | 233 | static inline void |
234 | mipsxx_pmu_write_control(unsigned int idx, unsigned int val) | ||
292 | { | 235 | { |
293 | idx = mipsxx_pmu_swizzle_perf_idx(idx); | ||
294 | |||
295 | switch (idx) { | 236 | switch (idx) { |
296 | case 0: | 237 | case 0: |
297 | write_c0_perfctrl0(val); | 238 | w_c0_perfctrl0(val); |
298 | return; | 239 | return; |
299 | case 1: | 240 | case 1: |
300 | write_c0_perfctrl1(val); | 241 | w_c0_perfctrl1(val); |
301 | return; | 242 | return; |
302 | case 2: | 243 | case 2: |
303 | write_c0_perfctrl2(val); | 244 | w_c0_perfctrl2(val); |
304 | return; | 245 | return; |
305 | case 3: | 246 | case 3: |
306 | write_c0_perfctrl3(val); | 247 | w_c0_perfctrl3(val); |
307 | return; | 248 | return; |
308 | } | 249 | } |
309 | } | 250 | } |
310 | 251 | ||
311 | static int mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc, | ||
312 | struct hw_perf_event *hwc) | ||
313 | { | ||
314 | int i; | ||
315 | |||
316 | /* | ||
317 | * We only need to care the counter mask. The range has been | ||
318 | * checked definitely. | ||
319 | */ | ||
320 | unsigned long cntr_mask = (hwc->event_base >> 8) & 0xffff; | ||
321 | |||
322 | for (i = mipspmu.num_counters - 1; i >= 0; i--) { | ||
323 | /* | ||
324 | * Note that some MIPS perf events can be counted by both | ||
325 | * even and odd counters, wheresas many other are only by | ||
326 | * even _or_ odd counters. This introduces an issue that | ||
327 | * when the former kind of event takes the counter the | ||
328 | * latter kind of event wants to use, then the "counter | ||
329 | * allocation" for the latter event will fail. In fact if | ||
330 | * they can be dynamically swapped, they both feel happy. | ||
331 | * But here we leave this issue alone for now. | ||
332 | */ | ||
333 | if (test_bit(i, &cntr_mask) && | ||
334 | !test_and_set_bit(i, cpuc->used_mask)) | ||
335 | return i; | ||
336 | } | ||
337 | |||
338 | return -EAGAIN; | ||
339 | } | ||
340 | |||
341 | static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx) | ||
342 | { | ||
343 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
344 | |||
345 | WARN_ON(idx < 0 || idx >= mipspmu.num_counters); | ||
346 | |||
347 | cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) | | ||
348 | (evt->config_base & M_PERFCTL_CONFIG_MASK) | | ||
349 | /* Make sure interrupt enabled. */ | ||
350 | M_PERFCTL_INTERRUPT_ENABLE; | ||
351 | if (IS_ENABLED(CONFIG_CPU_BMIPS5000)) | ||
352 | /* enable the counter for the calling thread */ | ||
353 | cpuc->saved_ctrl[idx] |= | ||
354 | (1 << (12 + vpe_id())) | M_PERFCTL_TC; | ||
355 | |||
356 | /* | ||
357 | * We do not actually let the counter run. Leave it until start(). | ||
358 | */ | ||
359 | } | ||
360 | |||
361 | static void mipsxx_pmu_disable_event(int idx) | ||
362 | { | ||
363 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
364 | unsigned long flags; | ||
365 | |||
366 | WARN_ON(idx < 0 || idx >= mipspmu.num_counters); | ||
367 | |||
368 | local_irq_save(flags); | ||
369 | cpuc->saved_ctrl[idx] = mipsxx_pmu_read_control(idx) & | ||
370 | ~M_PERFCTL_COUNT_EVENT_WHENEVER; | ||
371 | mipsxx_pmu_write_control(idx, cpuc->saved_ctrl[idx]); | ||
372 | local_irq_restore(flags); | ||
373 | } | ||
374 | |||
375 | static int mipspmu_event_set_period(struct perf_event *event, | ||
376 | struct hw_perf_event *hwc, | ||
377 | int idx) | ||
378 | { | ||
379 | u64 left = local64_read(&hwc->period_left); | ||
380 | u64 period = hwc->sample_period; | ||
381 | int ret = 0; | ||
382 | |||
383 | if (unlikely((left + period) & (1ULL << 63))) { | ||
384 | /* left underflowed by more than period. */ | ||
385 | left = period; | ||
386 | local64_set(&hwc->period_left, left); | ||
387 | hwc->last_period = period; | ||
388 | ret = 1; | ||
389 | } else if (unlikely((left + period) <= period)) { | ||
390 | /* left underflowed by less than period. */ | ||
391 | left += period; | ||
392 | local64_set(&hwc->period_left, left); | ||
393 | hwc->last_period = period; | ||
394 | ret = 1; | ||
395 | } | ||
396 | |||
397 | if (left > mipspmu.max_period) { | ||
398 | left = mipspmu.max_period; | ||
399 | local64_set(&hwc->period_left, left); | ||
400 | } | ||
401 | |||
402 | local64_set(&hwc->prev_count, mipspmu.overflow - left); | ||
403 | |||
404 | mipspmu.write_counter(idx, mipspmu.overflow - left); | ||
405 | |||
406 | perf_event_update_userpage(event); | ||
407 | |||
408 | return ret; | ||
409 | } | ||
410 | |||
411 | static void mipspmu_event_update(struct perf_event *event, | ||
412 | struct hw_perf_event *hwc, | ||
413 | int idx) | ||
414 | { | ||
415 | u64 prev_raw_count, new_raw_count; | ||
416 | u64 delta; | ||
417 | |||
418 | again: | ||
419 | prev_raw_count = local64_read(&hwc->prev_count); | ||
420 | new_raw_count = mipspmu.read_counter(idx); | ||
421 | |||
422 | if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, | ||
423 | new_raw_count) != prev_raw_count) | ||
424 | goto again; | ||
425 | |||
426 | delta = new_raw_count - prev_raw_count; | ||
427 | |||
428 | local64_add(delta, &event->count); | ||
429 | local64_sub(delta, &hwc->period_left); | ||
430 | } | ||
431 | |||
432 | static void mipspmu_start(struct perf_event *event, int flags) | ||
433 | { | ||
434 | struct hw_perf_event *hwc = &event->hw; | ||
435 | |||
436 | if (flags & PERF_EF_RELOAD) | ||
437 | WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); | ||
438 | |||
439 | hwc->state = 0; | ||
440 | |||
441 | /* Set the period for the event. */ | ||
442 | mipspmu_event_set_period(event, hwc, hwc->idx); | ||
443 | |||
444 | /* Enable the event. */ | ||
445 | mipsxx_pmu_enable_event(hwc, hwc->idx); | ||
446 | } | ||
447 | |||
448 | static void mipspmu_stop(struct perf_event *event, int flags) | ||
449 | { | ||
450 | struct hw_perf_event *hwc = &event->hw; | ||
451 | |||
452 | if (!(hwc->state & PERF_HES_STOPPED)) { | ||
453 | /* We are working on a local event. */ | ||
454 | mipsxx_pmu_disable_event(hwc->idx); | ||
455 | barrier(); | ||
456 | mipspmu_event_update(event, hwc, hwc->idx); | ||
457 | hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; | ||
458 | } | ||
459 | } | ||
460 | |||
461 | static int mipspmu_add(struct perf_event *event, int flags) | ||
462 | { | ||
463 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
464 | struct hw_perf_event *hwc = &event->hw; | ||
465 | int idx; | ||
466 | int err = 0; | ||
467 | |||
468 | perf_pmu_disable(event->pmu); | ||
469 | |||
470 | /* To look for a free counter for this event. */ | ||
471 | idx = mipsxx_pmu_alloc_counter(cpuc, hwc); | ||
472 | if (idx < 0) { | ||
473 | err = idx; | ||
474 | goto out; | ||
475 | } | ||
476 | |||
477 | /* | ||
478 | * If there is an event in the counter we are going to use then | ||
479 | * make sure it is disabled. | ||
480 | */ | ||
481 | event->hw.idx = idx; | ||
482 | mipsxx_pmu_disable_event(idx); | ||
483 | cpuc->events[idx] = event; | ||
484 | |||
485 | hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; | ||
486 | if (flags & PERF_EF_START) | ||
487 | mipspmu_start(event, PERF_EF_RELOAD); | ||
488 | |||
489 | /* Propagate our changes to the userspace mapping. */ | ||
490 | perf_event_update_userpage(event); | ||
491 | |||
492 | out: | ||
493 | perf_pmu_enable(event->pmu); | ||
494 | return err; | ||
495 | } | ||
496 | |||
497 | static void mipspmu_del(struct perf_event *event, int flags) | ||
498 | { | ||
499 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
500 | struct hw_perf_event *hwc = &event->hw; | ||
501 | int idx = hwc->idx; | ||
502 | |||
503 | WARN_ON(idx < 0 || idx >= mipspmu.num_counters); | ||
504 | |||
505 | mipspmu_stop(event, PERF_EF_UPDATE); | ||
506 | cpuc->events[idx] = NULL; | ||
507 | clear_bit(idx, cpuc->used_mask); | ||
508 | |||
509 | perf_event_update_userpage(event); | ||
510 | } | ||
511 | |||
512 | static void mipspmu_read(struct perf_event *event) | ||
513 | { | ||
514 | struct hw_perf_event *hwc = &event->hw; | ||
515 | |||
516 | /* Don't read disabled counters! */ | ||
517 | if (hwc->idx < 0) | ||
518 | return; | ||
519 | |||
520 | mipspmu_event_update(event, hwc, hwc->idx); | ||
521 | } | ||
522 | |||
523 | static void mipspmu_enable(struct pmu *pmu) | ||
524 | { | ||
525 | #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS | ||
526 | write_unlock(&pmuint_rwlock); | ||
527 | #endif | ||
528 | resume_local_counters(); | ||
529 | } | ||
530 | |||
531 | /* | ||
532 | * MIPS performance counters can be per-TC. The control registers can | ||
533 | * not be directly accessed accross CPUs. Hence if we want to do global | ||
534 | * control, we need cross CPU calls. on_each_cpu() can help us, but we | ||
535 | * can not make sure this function is called with interrupts enabled. So | ||
536 | * here we pause local counters and then grab a rwlock and leave the | ||
537 | * counters on other CPUs alone. If any counter interrupt raises while | ||
538 | * we own the write lock, simply pause local counters on that CPU and | ||
539 | * spin in the handler. Also we know we won't be switched to another | ||
540 | * CPU after pausing local counters and before grabbing the lock. | ||
541 | */ | ||
542 | static void mipspmu_disable(struct pmu *pmu) | ||
543 | { | ||
544 | pause_local_counters(); | ||
545 | #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS | ||
546 | write_lock(&pmuint_rwlock); | ||
547 | #endif | ||
548 | } | ||
549 | |||
550 | static atomic_t active_events = ATOMIC_INIT(0); | ||
551 | static DEFINE_MUTEX(pmu_reserve_mutex); | ||
552 | static int (*save_perf_irq)(void); | ||
553 | |||
554 | static int mipspmu_get_irq(void) | ||
555 | { | ||
556 | int err; | ||
557 | |||
558 | if (mipspmu.irq >= 0) { | ||
559 | /* Request my own irq handler. */ | ||
560 | err = request_irq(mipspmu.irq, mipsxx_pmu_handle_irq, | ||
561 | IRQF_PERCPU | IRQF_NOBALANCING, | ||
562 | "mips_perf_pmu", NULL); | ||
563 | if (err) { | ||
564 | pr_warning("Unable to request IRQ%d for MIPS " | ||
565 | "performance counters!\n", mipspmu.irq); | ||
566 | } | ||
567 | } else if (cp0_perfcount_irq < 0) { | ||
568 | /* | ||
569 | * We are sharing the irq number with the timer interrupt. | ||
570 | */ | ||
571 | save_perf_irq = perf_irq; | ||
572 | perf_irq = mipsxx_pmu_handle_shared_irq; | ||
573 | err = 0; | ||
574 | } else { | ||
575 | pr_warning("The platform hasn't properly defined its " | ||
576 | "interrupt controller.\n"); | ||
577 | err = -ENOENT; | ||
578 | } | ||
579 | |||
580 | return err; | ||
581 | } | ||
582 | |||
583 | static void mipspmu_free_irq(void) | ||
584 | { | ||
585 | if (mipspmu.irq >= 0) | ||
586 | free_irq(mipspmu.irq, NULL); | ||
587 | else if (cp0_perfcount_irq < 0) | ||
588 | perf_irq = save_perf_irq; | ||
589 | } | ||
590 | |||
591 | /* | ||
592 | * mipsxx/rm9000/loongson2 have different performance counters, they have | ||
593 | * specific low-level init routines. | ||
594 | */ | ||
595 | static void reset_counters(void *arg); | ||
596 | static int __hw_perf_event_init(struct perf_event *event); | ||
597 | |||
598 | static void hw_perf_event_destroy(struct perf_event *event) | ||
599 | { | ||
600 | if (atomic_dec_and_mutex_lock(&active_events, | ||
601 | &pmu_reserve_mutex)) { | ||
602 | /* | ||
603 | * We must not call the destroy function with interrupts | ||
604 | * disabled. | ||
605 | */ | ||
606 | on_each_cpu(reset_counters, | ||
607 | (void *)(long)mipspmu.num_counters, 1); | ||
608 | mipspmu_free_irq(); | ||
609 | mutex_unlock(&pmu_reserve_mutex); | ||
610 | } | ||
611 | } | ||
612 | |||
613 | static int mipspmu_event_init(struct perf_event *event) | ||
614 | { | ||
615 | int err = 0; | ||
616 | |||
617 | /* does not support taken branch sampling */ | ||
618 | if (has_branch_stack(event)) | ||
619 | return -EOPNOTSUPP; | ||
620 | |||
621 | switch (event->attr.type) { | ||
622 | case PERF_TYPE_RAW: | ||
623 | case PERF_TYPE_HARDWARE: | ||
624 | case PERF_TYPE_HW_CACHE: | ||
625 | break; | ||
626 | |||
627 | default: | ||
628 | return -ENOENT; | ||
629 | } | ||
630 | |||
631 | if (event->cpu >= nr_cpumask_bits || | ||
632 | (event->cpu >= 0 && !cpu_online(event->cpu))) | ||
633 | return -ENODEV; | ||
634 | |||
635 | if (!atomic_inc_not_zero(&active_events)) { | ||
636 | mutex_lock(&pmu_reserve_mutex); | ||
637 | if (atomic_read(&active_events) == 0) | ||
638 | err = mipspmu_get_irq(); | ||
639 | |||
640 | if (!err) | ||
641 | atomic_inc(&active_events); | ||
642 | mutex_unlock(&pmu_reserve_mutex); | ||
643 | } | ||
644 | |||
645 | if (err) | ||
646 | return err; | ||
647 | |||
648 | return __hw_perf_event_init(event); | ||
649 | } | ||
650 | |||
651 | static struct pmu pmu = { | ||
652 | .pmu_enable = mipspmu_enable, | ||
653 | .pmu_disable = mipspmu_disable, | ||
654 | .event_init = mipspmu_event_init, | ||
655 | .add = mipspmu_add, | ||
656 | .del = mipspmu_del, | ||
657 | .start = mipspmu_start, | ||
658 | .stop = mipspmu_stop, | ||
659 | .read = mipspmu_read, | ||
660 | }; | ||
661 | |||
662 | static unsigned int mipspmu_perf_event_encode(const struct mips_perf_event *pev) | ||
663 | { | ||
664 | /* | ||
665 | * Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for | ||
666 | * event_id. | ||
667 | */ | ||
668 | #ifdef CONFIG_MIPS_MT_SMP | 252 | #ifdef CONFIG_MIPS_MT_SMP |
669 | return ((unsigned int)pev->range << 24) | | 253 | static DEFINE_RWLOCK(pmuint_rwlock); |
670 | (pev->cntr_mask & 0xffff00) | | ||
671 | (pev->event_id & 0xff); | ||
672 | #else | ||
673 | return (pev->cntr_mask & 0xffff00) | | ||
674 | (pev->event_id & 0xff); | ||
675 | #endif | 254 | #endif |
676 | } | ||
677 | |||
678 | static const struct mips_perf_event *mipspmu_map_general_event(int idx) | ||
679 | { | ||
680 | |||
681 | if ((*mipspmu.general_event_map)[idx].cntr_mask == 0) | ||
682 | return ERR_PTR(-EOPNOTSUPP); | ||
683 | return &(*mipspmu.general_event_map)[idx]; | ||
684 | } | ||
685 | |||
686 | static const struct mips_perf_event *mipspmu_map_cache_event(u64 config) | ||
687 | { | ||
688 | unsigned int cache_type, cache_op, cache_result; | ||
689 | const struct mips_perf_event *pev; | ||
690 | |||
691 | cache_type = (config >> 0) & 0xff; | ||
692 | if (cache_type >= PERF_COUNT_HW_CACHE_MAX) | ||
693 | return ERR_PTR(-EINVAL); | ||
694 | |||
695 | cache_op = (config >> 8) & 0xff; | ||
696 | if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) | ||
697 | return ERR_PTR(-EINVAL); | ||
698 | |||
699 | cache_result = (config >> 16) & 0xff; | ||
700 | if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) | ||
701 | return ERR_PTR(-EINVAL); | ||
702 | |||
703 | pev = &((*mipspmu.cache_event_map) | ||
704 | [cache_type] | ||
705 | [cache_op] | ||
706 | [cache_result]); | ||
707 | |||
708 | if (pev->cntr_mask == 0) | ||
709 | return ERR_PTR(-EOPNOTSUPP); | ||
710 | |||
711 | return pev; | ||
712 | |||
713 | } | ||
714 | |||
715 | static int validate_group(struct perf_event *event) | ||
716 | { | ||
717 | struct perf_event *sibling, *leader = event->group_leader; | ||
718 | struct cpu_hw_events fake_cpuc; | ||
719 | |||
720 | memset(&fake_cpuc, 0, sizeof(fake_cpuc)); | ||
721 | |||
722 | if (mipsxx_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0) | ||
723 | return -EINVAL; | ||
724 | |||
725 | list_for_each_entry(sibling, &leader->sibling_list, group_entry) { | ||
726 | if (mipsxx_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0) | ||
727 | return -EINVAL; | ||
728 | } | ||
729 | |||
730 | if (mipsxx_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0) | ||
731 | return -EINVAL; | ||
732 | |||
733 | return 0; | ||
734 | } | ||
735 | |||
736 | /* This is needed by specific irq handlers in perf_event_*.c */ | ||
737 | static void handle_associated_event(struct cpu_hw_events *cpuc, | ||
738 | int idx, struct perf_sample_data *data, | ||
739 | struct pt_regs *regs) | ||
740 | { | ||
741 | struct perf_event *event = cpuc->events[idx]; | ||
742 | struct hw_perf_event *hwc = &event->hw; | ||
743 | |||
744 | mipspmu_event_update(event, hwc, idx); | ||
745 | data->period = event->hw.last_period; | ||
746 | if (!mipspmu_event_set_period(event, hwc, idx)) | ||
747 | return; | ||
748 | |||
749 | if (perf_event_overflow(event, data, regs)) | ||
750 | mipsxx_pmu_disable_event(idx); | ||
751 | } | ||
752 | |||
753 | |||
754 | static int __n_counters(void) | ||
755 | { | ||
756 | if (!(read_c0_config1() & M_CONFIG1_PC)) | ||
757 | return 0; | ||
758 | if (!(read_c0_perfctrl0() & M_PERFCTL_MORE)) | ||
759 | return 1; | ||
760 | if (!(read_c0_perfctrl1() & M_PERFCTL_MORE)) | ||
761 | return 2; | ||
762 | if (!(read_c0_perfctrl2() & M_PERFCTL_MORE)) | ||
763 | return 3; | ||
764 | |||
765 | return 4; | ||
766 | } | ||
767 | |||
768 | static int n_counters(void) | ||
769 | { | ||
770 | int counters; | ||
771 | |||
772 | switch (current_cpu_type()) { | ||
773 | case CPU_R10000: | ||
774 | counters = 2; | ||
775 | break; | ||
776 | |||
777 | case CPU_R12000: | ||
778 | case CPU_R14000: | ||
779 | counters = 4; | ||
780 | break; | ||
781 | |||
782 | default: | ||
783 | counters = __n_counters(); | ||
784 | } | ||
785 | |||
786 | return counters; | ||
787 | } | ||
788 | |||
789 | static void reset_counters(void *arg) | ||
790 | { | ||
791 | int counters = (int)(long)arg; | ||
792 | switch (counters) { | ||
793 | case 4: | ||
794 | mipsxx_pmu_write_control(3, 0); | ||
795 | mipspmu.write_counter(3, 0); | ||
796 | case 3: | ||
797 | mipsxx_pmu_write_control(2, 0); | ||
798 | mipspmu.write_counter(2, 0); | ||
799 | case 2: | ||
800 | mipsxx_pmu_write_control(1, 0); | ||
801 | mipspmu.write_counter(1, 0); | ||
802 | case 1: | ||
803 | mipsxx_pmu_write_control(0, 0); | ||
804 | mipspmu.write_counter(0, 0); | ||
805 | } | ||
806 | } | ||
807 | 255 | ||
808 | /* 24K/34K/1004K cores can share the same event map. */ | 256 | /* 24K/34K/1004K cores can share the same event map. */ |
809 | static const struct mips_perf_event mipsxxcore_event_map | 257 | static const struct mips_perf_event mipsxxcore_event_map |
810 | [PERF_COUNT_HW_MAX] = { | 258 | [PERF_COUNT_HW_MAX] = { |
811 | [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P }, | 259 | [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P }, |
812 | [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T }, | 260 | [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T }, |
261 | [PERF_COUNT_HW_CACHE_REFERENCES] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
262 | [PERF_COUNT_HW_CACHE_MISSES] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
813 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_EVEN, T }, | 263 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_EVEN, T }, |
814 | [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T }, | 264 | [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T }, |
265 | [PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
815 | }; | 266 | }; |
816 | 267 | ||
817 | /* 74K core has different branch event code. */ | 268 | /* 74K core has different branch event code. */ |
@@ -819,34 +270,11 @@ static const struct mips_perf_event mipsxx74Kcore_event_map | |||
819 | [PERF_COUNT_HW_MAX] = { | 270 | [PERF_COUNT_HW_MAX] = { |
820 | [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P }, | 271 | [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P }, |
821 | [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T }, | 272 | [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T }, |
273 | [PERF_COUNT_HW_CACHE_REFERENCES] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
274 | [PERF_COUNT_HW_CACHE_MISSES] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
822 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x27, CNTR_EVEN, T }, | 275 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x27, CNTR_EVEN, T }, |
823 | [PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T }, | 276 | [PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T }, |
824 | }; | 277 | [PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID }, |
825 | |||
826 | static const struct mips_perf_event octeon_event_map[PERF_COUNT_HW_MAX] = { | ||
827 | [PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL }, | ||
828 | [PERF_COUNT_HW_INSTRUCTIONS] = { 0x03, CNTR_ALL }, | ||
829 | [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x2b, CNTR_ALL }, | ||
830 | [PERF_COUNT_HW_CACHE_MISSES] = { 0x2e, CNTR_ALL }, | ||
831 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x08, CNTR_ALL }, | ||
832 | [PERF_COUNT_HW_BRANCH_MISSES] = { 0x09, CNTR_ALL }, | ||
833 | [PERF_COUNT_HW_BUS_CYCLES] = { 0x25, CNTR_ALL }, | ||
834 | }; | ||
835 | |||
836 | static const struct mips_perf_event bmips5000_event_map | ||
837 | [PERF_COUNT_HW_MAX] = { | ||
838 | [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, T }, | ||
839 | [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T }, | ||
840 | [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T }, | ||
841 | }; | ||
842 | |||
843 | static const struct mips_perf_event xlp_event_map[PERF_COUNT_HW_MAX] = { | ||
844 | [PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL }, | ||
845 | [PERF_COUNT_HW_INSTRUCTIONS] = { 0x18, CNTR_ALL }, /* PAPI_TOT_INS */ | ||
846 | [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x04, CNTR_ALL }, /* PAPI_L1_ICA */ | ||
847 | [PERF_COUNT_HW_CACHE_MISSES] = { 0x07, CNTR_ALL }, /* PAPI_L1_ICM */ | ||
848 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x1b, CNTR_ALL }, /* PAPI_BR_CN */ | ||
849 | [PERF_COUNT_HW_BRANCH_MISSES] = { 0x1c, CNTR_ALL }, /* PAPI_BR_MSP */ | ||
850 | }; | 278 | }; |
851 | 279 | ||
852 | /* 24K/34K/1004K cores can share the same cache event map. */ | 280 | /* 24K/34K/1004K cores can share the same cache event map. */ |
@@ -869,6 +297,10 @@ static const struct mips_perf_event mipsxxcore_cache_map | |||
869 | [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T }, | 297 | [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T }, |
870 | [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T }, | 298 | [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T }, |
871 | }, | 299 | }, |
300 | [C(OP_PREFETCH)] = { | ||
301 | [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
302 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
303 | }, | ||
872 | }, | 304 | }, |
873 | [C(L1I)] = { | 305 | [C(L1I)] = { |
874 | [C(OP_READ)] = { | 306 | [C(OP_READ)] = { |
@@ -885,6 +317,7 @@ static const struct mips_perf_event mipsxxcore_cache_map | |||
885 | * Note that MIPS has only "hit" events countable for | 317 | * Note that MIPS has only "hit" events countable for |
886 | * the prefetch operation. | 318 | * the prefetch operation. |
887 | */ | 319 | */ |
320 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
888 | }, | 321 | }, |
889 | }, | 322 | }, |
890 | [C(LL)] = { | 323 | [C(LL)] = { |
@@ -896,6 +329,10 @@ static const struct mips_perf_event mipsxxcore_cache_map | |||
896 | [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P }, | 329 | [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P }, |
897 | [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P }, | 330 | [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P }, |
898 | }, | 331 | }, |
332 | [C(OP_PREFETCH)] = { | ||
333 | [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
334 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
335 | }, | ||
899 | }, | 336 | }, |
900 | [C(DTLB)] = { | 337 | [C(DTLB)] = { |
901 | [C(OP_READ)] = { | 338 | [C(OP_READ)] = { |
@@ -906,6 +343,10 @@ static const struct mips_perf_event mipsxxcore_cache_map | |||
906 | [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T }, | 343 | [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T }, |
907 | [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T }, | 344 | [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T }, |
908 | }, | 345 | }, |
346 | [C(OP_PREFETCH)] = { | ||
347 | [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
348 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
349 | }, | ||
909 | }, | 350 | }, |
910 | [C(ITLB)] = { | 351 | [C(ITLB)] = { |
911 | [C(OP_READ)] = { | 352 | [C(OP_READ)] = { |
@@ -916,6 +357,10 @@ static const struct mips_perf_event mipsxxcore_cache_map | |||
916 | [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T }, | 357 | [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T }, |
917 | [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T }, | 358 | [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T }, |
918 | }, | 359 | }, |
360 | [C(OP_PREFETCH)] = { | ||
361 | [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
362 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
363 | }, | ||
919 | }, | 364 | }, |
920 | [C(BPU)] = { | 365 | [C(BPU)] = { |
921 | /* Using the same code for *HW_BRANCH* */ | 366 | /* Using the same code for *HW_BRANCH* */ |
@@ -927,6 +372,24 @@ static const struct mips_perf_event mipsxxcore_cache_map | |||
927 | [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T }, | 372 | [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T }, |
928 | [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T }, | 373 | [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T }, |
929 | }, | 374 | }, |
375 | [C(OP_PREFETCH)] = { | ||
376 | [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
377 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
378 | }, | ||
379 | }, | ||
380 | [C(NODE)] = { | ||
381 | [C(OP_READ)] = { | ||
382 | [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
383 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
384 | }, | ||
385 | [C(OP_WRITE)] = { | ||
386 | [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
387 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
388 | }, | ||
389 | [C(OP_PREFETCH)] = { | ||
390 | [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
391 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
392 | }, | ||
930 | }, | 393 | }, |
931 | }; | 394 | }; |
932 | 395 | ||
@@ -950,6 +413,10 @@ static const struct mips_perf_event mipsxx74Kcore_cache_map | |||
950 | [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T }, | 413 | [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T }, |
951 | [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T }, | 414 | [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T }, |
952 | }, | 415 | }, |
416 | [C(OP_PREFETCH)] = { | ||
417 | [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
418 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
419 | }, | ||
953 | }, | 420 | }, |
954 | [C(L1I)] = { | 421 | [C(L1I)] = { |
955 | [C(OP_READ)] = { | 422 | [C(OP_READ)] = { |
@@ -966,6 +433,7 @@ static const struct mips_perf_event mipsxx74Kcore_cache_map | |||
966 | * Note that MIPS has only "hit" events countable for | 433 | * Note that MIPS has only "hit" events countable for |
967 | * the prefetch operation. | 434 | * the prefetch operation. |
968 | */ | 435 | */ |
436 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
969 | }, | 437 | }, |
970 | }, | 438 | }, |
971 | [C(LL)] = { | 439 | [C(LL)] = { |
@@ -977,6 +445,25 @@ static const struct mips_perf_event mipsxx74Kcore_cache_map | |||
977 | [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P }, | 445 | [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P }, |
978 | [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN | CNTR_ODD, P }, | 446 | [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN | CNTR_ODD, P }, |
979 | }, | 447 | }, |
448 | [C(OP_PREFETCH)] = { | ||
449 | [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
450 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
451 | }, | ||
452 | }, | ||
453 | [C(DTLB)] = { | ||
454 | /* 74K core does not have specific DTLB events. */ | ||
455 | [C(OP_READ)] = { | ||
456 | [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
457 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
458 | }, | ||
459 | [C(OP_WRITE)] = { | ||
460 | [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
461 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
462 | }, | ||
463 | [C(OP_PREFETCH)] = { | ||
464 | [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
465 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
466 | }, | ||
980 | }, | 467 | }, |
981 | [C(ITLB)] = { | 468 | [C(ITLB)] = { |
982 | [C(OP_READ)] = { | 469 | [C(OP_READ)] = { |
@@ -987,6 +474,10 @@ static const struct mips_perf_event mipsxx74Kcore_cache_map | |||
987 | [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T }, | 474 | [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T }, |
988 | [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T }, | 475 | [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T }, |
989 | }, | 476 | }, |
477 | [C(OP_PREFETCH)] = { | ||
478 | [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
479 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
480 | }, | ||
990 | }, | 481 | }, |
991 | [C(BPU)] = { | 482 | [C(BPU)] = { |
992 | /* Using the same code for *HW_BRANCH* */ | 483 | /* Using the same code for *HW_BRANCH* */ |
@@ -998,169 +489,31 @@ static const struct mips_perf_event mipsxx74Kcore_cache_map | |||
998 | [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T }, | 489 | [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T }, |
999 | [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T }, | 490 | [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T }, |
1000 | }, | 491 | }, |
1001 | }, | ||
1002 | }; | ||
1003 | |||
1004 | /* BMIPS5000 */ | ||
1005 | static const struct mips_perf_event bmips5000_cache_map | ||
1006 | [PERF_COUNT_HW_CACHE_MAX] | ||
1007 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
1008 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | ||
1009 | [C(L1D)] = { | ||
1010 | /* | ||
1011 | * Like some other architectures (e.g. ARM), the performance | ||
1012 | * counters don't differentiate between read and write | ||
1013 | * accesses/misses, so this isn't strictly correct, but it's the | ||
1014 | * best we can do. Writes and reads get combined. | ||
1015 | */ | ||
1016 | [C(OP_READ)] = { | ||
1017 | [C(RESULT_ACCESS)] = { 12, CNTR_EVEN, T }, | ||
1018 | [C(RESULT_MISS)] = { 12, CNTR_ODD, T }, | ||
1019 | }, | ||
1020 | [C(OP_WRITE)] = { | ||
1021 | [C(RESULT_ACCESS)] = { 12, CNTR_EVEN, T }, | ||
1022 | [C(RESULT_MISS)] = { 12, CNTR_ODD, T }, | ||
1023 | }, | ||
1024 | }, | ||
1025 | [C(L1I)] = { | ||
1026 | [C(OP_READ)] = { | ||
1027 | [C(RESULT_ACCESS)] = { 10, CNTR_EVEN, T }, | ||
1028 | [C(RESULT_MISS)] = { 10, CNTR_ODD, T }, | ||
1029 | }, | ||
1030 | [C(OP_WRITE)] = { | ||
1031 | [C(RESULT_ACCESS)] = { 10, CNTR_EVEN, T }, | ||
1032 | [C(RESULT_MISS)] = { 10, CNTR_ODD, T }, | ||
1033 | }, | ||
1034 | [C(OP_PREFETCH)] = { | 492 | [C(OP_PREFETCH)] = { |
1035 | [C(RESULT_ACCESS)] = { 23, CNTR_EVEN, T }, | 493 | [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, |
1036 | /* | 494 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, |
1037 | * Note that MIPS has only "hit" events countable for | ||
1038 | * the prefetch operation. | ||
1039 | */ | ||
1040 | }, | ||
1041 | }, | ||
1042 | [C(LL)] = { | ||
1043 | [C(OP_READ)] = { | ||
1044 | [C(RESULT_ACCESS)] = { 28, CNTR_EVEN, P }, | ||
1045 | [C(RESULT_MISS)] = { 28, CNTR_ODD, P }, | ||
1046 | }, | ||
1047 | [C(OP_WRITE)] = { | ||
1048 | [C(RESULT_ACCESS)] = { 28, CNTR_EVEN, P }, | ||
1049 | [C(RESULT_MISS)] = { 28, CNTR_ODD, P }, | ||
1050 | }, | 495 | }, |
1051 | }, | 496 | }, |
1052 | [C(BPU)] = { | 497 | [C(NODE)] = { |
1053 | /* Using the same code for *HW_BRANCH* */ | ||
1054 | [C(OP_READ)] = { | ||
1055 | [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T }, | ||
1056 | }, | ||
1057 | [C(OP_WRITE)] = { | ||
1058 | [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T }, | ||
1059 | }, | ||
1060 | }, | ||
1061 | }; | ||
1062 | |||
1063 | |||
1064 | static const struct mips_perf_event octeon_cache_map | ||
1065 | [PERF_COUNT_HW_CACHE_MAX] | ||
1066 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
1067 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | ||
1068 | [C(L1D)] = { | ||
1069 | [C(OP_READ)] = { | 498 | [C(OP_READ)] = { |
1070 | [C(RESULT_ACCESS)] = { 0x2b, CNTR_ALL }, | 499 | [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, |
1071 | [C(RESULT_MISS)] = { 0x2e, CNTR_ALL }, | 500 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, |
1072 | }, | 501 | }, |
1073 | [C(OP_WRITE)] = { | 502 | [C(OP_WRITE)] = { |
1074 | [C(RESULT_ACCESS)] = { 0x30, CNTR_ALL }, | 503 | [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, |
1075 | }, | 504 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, |
1076 | }, | ||
1077 | [C(L1I)] = { | ||
1078 | [C(OP_READ)] = { | ||
1079 | [C(RESULT_ACCESS)] = { 0x18, CNTR_ALL }, | ||
1080 | }, | 505 | }, |
1081 | [C(OP_PREFETCH)] = { | 506 | [C(OP_PREFETCH)] = { |
1082 | [C(RESULT_ACCESS)] = { 0x19, CNTR_ALL }, | 507 | [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, |
1083 | }, | 508 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, |
1084 | }, | ||
1085 | [C(DTLB)] = { | ||
1086 | /* | ||
1087 | * Only general DTLB misses are counted use the same event for | ||
1088 | * read and write. | ||
1089 | */ | ||
1090 | [C(OP_READ)] = { | ||
1091 | [C(RESULT_MISS)] = { 0x35, CNTR_ALL }, | ||
1092 | }, | ||
1093 | [C(OP_WRITE)] = { | ||
1094 | [C(RESULT_MISS)] = { 0x35, CNTR_ALL }, | ||
1095 | }, | ||
1096 | }, | ||
1097 | [C(ITLB)] = { | ||
1098 | [C(OP_READ)] = { | ||
1099 | [C(RESULT_MISS)] = { 0x37, CNTR_ALL }, | ||
1100 | }, | ||
1101 | }, | ||
1102 | }; | ||
1103 | |||
1104 | static const struct mips_perf_event xlp_cache_map | ||
1105 | [PERF_COUNT_HW_CACHE_MAX] | ||
1106 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
1107 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | ||
1108 | [C(L1D)] = { | ||
1109 | [C(OP_READ)] = { | ||
1110 | [C(RESULT_ACCESS)] = { 0x31, CNTR_ALL }, /* PAPI_L1_DCR */ | ||
1111 | [C(RESULT_MISS)] = { 0x30, CNTR_ALL }, /* PAPI_L1_LDM */ | ||
1112 | }, | ||
1113 | [C(OP_WRITE)] = { | ||
1114 | [C(RESULT_ACCESS)] = { 0x2f, CNTR_ALL }, /* PAPI_L1_DCW */ | ||
1115 | [C(RESULT_MISS)] = { 0x2e, CNTR_ALL }, /* PAPI_L1_STM */ | ||
1116 | }, | ||
1117 | }, | ||
1118 | [C(L1I)] = { | ||
1119 | [C(OP_READ)] = { | ||
1120 | [C(RESULT_ACCESS)] = { 0x04, CNTR_ALL }, /* PAPI_L1_ICA */ | ||
1121 | [C(RESULT_MISS)] = { 0x07, CNTR_ALL }, /* PAPI_L1_ICM */ | ||
1122 | }, | ||
1123 | }, | ||
1124 | [C(LL)] = { | ||
1125 | [C(OP_READ)] = { | ||
1126 | [C(RESULT_ACCESS)] = { 0x35, CNTR_ALL }, /* PAPI_L2_DCR */ | ||
1127 | [C(RESULT_MISS)] = { 0x37, CNTR_ALL }, /* PAPI_L2_LDM */ | ||
1128 | }, | ||
1129 | [C(OP_WRITE)] = { | ||
1130 | [C(RESULT_ACCESS)] = { 0x34, CNTR_ALL }, /* PAPI_L2_DCA */ | ||
1131 | [C(RESULT_MISS)] = { 0x36, CNTR_ALL }, /* PAPI_L2_DCM */ | ||
1132 | }, | ||
1133 | }, | ||
1134 | [C(DTLB)] = { | ||
1135 | /* | ||
1136 | * Only general DTLB misses are counted use the same event for | ||
1137 | * read and write. | ||
1138 | */ | ||
1139 | [C(OP_READ)] = { | ||
1140 | [C(RESULT_MISS)] = { 0x2d, CNTR_ALL }, /* PAPI_TLB_DM */ | ||
1141 | }, | ||
1142 | [C(OP_WRITE)] = { | ||
1143 | [C(RESULT_MISS)] = { 0x2d, CNTR_ALL }, /* PAPI_TLB_DM */ | ||
1144 | }, | ||
1145 | }, | ||
1146 | [C(ITLB)] = { | ||
1147 | [C(OP_READ)] = { | ||
1148 | [C(RESULT_MISS)] = { 0x08, CNTR_ALL }, /* PAPI_TLB_IM */ | ||
1149 | }, | ||
1150 | [C(OP_WRITE)] = { | ||
1151 | [C(RESULT_MISS)] = { 0x08, CNTR_ALL }, /* PAPI_TLB_IM */ | ||
1152 | }, | ||
1153 | }, | ||
1154 | [C(BPU)] = { | ||
1155 | [C(OP_READ)] = { | ||
1156 | [C(RESULT_MISS)] = { 0x25, CNTR_ALL }, | ||
1157 | }, | 509 | }, |
1158 | }, | 510 | }, |
1159 | }; | 511 | }; |
1160 | 512 | ||
1161 | #ifdef CONFIG_MIPS_MT_SMP | 513 | #ifdef CONFIG_MIPS_MT_SMP |
1162 | static void check_and_calc_range(struct perf_event *event, | 514 | static void |
1163 | const struct mips_perf_event *pev) | 515 | check_and_calc_range(struct perf_event *event, |
516 | const struct mips_perf_event *pev) | ||
1164 | { | 517 | { |
1165 | struct hw_perf_event *hwc = &event->hw; | 518 | struct hw_perf_event *hwc = &event->hw; |
1166 | 519 | ||
@@ -1183,8 +536,9 @@ static void check_and_calc_range(struct perf_event *event, | |||
1183 | hwc->config_base |= M_TC_EN_ALL; | 536 | hwc->config_base |= M_TC_EN_ALL; |
1184 | } | 537 | } |
1185 | #else | 538 | #else |
1186 | static void check_and_calc_range(struct perf_event *event, | 539 | static void |
1187 | const struct mips_perf_event *pev) | 540 | check_and_calc_range(struct perf_event *event, |
541 | const struct mips_perf_event *pev) | ||
1188 | { | 542 | { |
1189 | } | 543 | } |
1190 | #endif | 544 | #endif |
@@ -1206,7 +560,7 @@ static int __hw_perf_event_init(struct perf_event *event) | |||
1206 | } else if (PERF_TYPE_RAW == event->attr.type) { | 560 | } else if (PERF_TYPE_RAW == event->attr.type) { |
1207 | /* We are working on the global raw event. */ | 561 | /* We are working on the global raw event. */ |
1208 | mutex_lock(&raw_event_mutex); | 562 | mutex_lock(&raw_event_mutex); |
1209 | pev = mipspmu.map_raw_event(event->attr.config); | 563 | pev = mipspmu->map_raw_event(event->attr.config); |
1210 | } else { | 564 | } else { |
1211 | /* The event type is not (yet) supported. */ | 565 | /* The event type is not (yet) supported. */ |
1212 | return -EOPNOTSUPP; | 566 | return -EOPNOTSUPP; |
@@ -1251,61 +605,83 @@ static int __hw_perf_event_init(struct perf_event *event) | |||
1251 | hwc->config = 0; | 605 | hwc->config = 0; |
1252 | 606 | ||
1253 | if (!hwc->sample_period) { | 607 | if (!hwc->sample_period) { |
1254 | hwc->sample_period = mipspmu.max_period; | 608 | hwc->sample_period = MAX_PERIOD; |
1255 | hwc->last_period = hwc->sample_period; | 609 | hwc->last_period = hwc->sample_period; |
1256 | local64_set(&hwc->period_left, hwc->sample_period); | 610 | local64_set(&hwc->period_left, hwc->sample_period); |
1257 | } | 611 | } |
1258 | 612 | ||
1259 | err = 0; | 613 | err = 0; |
1260 | if (event->group_leader != event) | 614 | if (event->group_leader != event) { |
1261 | err = validate_group(event); | 615 | err = validate_group(event); |
616 | if (err) | ||
617 | return -EINVAL; | ||
618 | } | ||
1262 | 619 | ||
1263 | event->destroy = hw_perf_event_destroy; | 620 | event->destroy = hw_perf_event_destroy; |
1264 | 621 | ||
1265 | if (err) | ||
1266 | event->destroy(event); | ||
1267 | |||
1268 | return err; | 622 | return err; |
1269 | } | 623 | } |
1270 | 624 | ||
1271 | static void pause_local_counters(void) | 625 | static void pause_local_counters(void) |
1272 | { | 626 | { |
1273 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 627 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
1274 | int ctr = mipspmu.num_counters; | 628 | int counters = mipspmu->num_counters; |
1275 | unsigned long flags; | 629 | unsigned long flags; |
1276 | 630 | ||
1277 | local_irq_save(flags); | 631 | local_irq_save(flags); |
1278 | do { | 632 | switch (counters) { |
1279 | ctr--; | 633 | case 4: |
1280 | cpuc->saved_ctrl[ctr] = mipsxx_pmu_read_control(ctr); | 634 | cpuc->saved_ctrl[3] = r_c0_perfctrl3(); |
1281 | mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr] & | 635 | w_c0_perfctrl3(cpuc->saved_ctrl[3] & |
1282 | ~M_PERFCTL_COUNT_EVENT_WHENEVER); | 636 | ~M_PERFCTL_COUNT_EVENT_WHENEVER); |
1283 | } while (ctr > 0); | 637 | case 3: |
638 | cpuc->saved_ctrl[2] = r_c0_perfctrl2(); | ||
639 | w_c0_perfctrl2(cpuc->saved_ctrl[2] & | ||
640 | ~M_PERFCTL_COUNT_EVENT_WHENEVER); | ||
641 | case 2: | ||
642 | cpuc->saved_ctrl[1] = r_c0_perfctrl1(); | ||
643 | w_c0_perfctrl1(cpuc->saved_ctrl[1] & | ||
644 | ~M_PERFCTL_COUNT_EVENT_WHENEVER); | ||
645 | case 1: | ||
646 | cpuc->saved_ctrl[0] = r_c0_perfctrl0(); | ||
647 | w_c0_perfctrl0(cpuc->saved_ctrl[0] & | ||
648 | ~M_PERFCTL_COUNT_EVENT_WHENEVER); | ||
649 | } | ||
1284 | local_irq_restore(flags); | 650 | local_irq_restore(flags); |
1285 | } | 651 | } |
1286 | 652 | ||
1287 | static void resume_local_counters(void) | 653 | static void resume_local_counters(void) |
1288 | { | 654 | { |
1289 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 655 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
1290 | int ctr = mipspmu.num_counters; | 656 | int counters = mipspmu->num_counters; |
657 | unsigned long flags; | ||
1291 | 658 | ||
1292 | do { | 659 | local_irq_save(flags); |
1293 | ctr--; | 660 | switch (counters) { |
1294 | mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr]); | 661 | case 4: |
1295 | } while (ctr > 0); | 662 | w_c0_perfctrl3(cpuc->saved_ctrl[3]); |
663 | case 3: | ||
664 | w_c0_perfctrl2(cpuc->saved_ctrl[2]); | ||
665 | case 2: | ||
666 | w_c0_perfctrl1(cpuc->saved_ctrl[1]); | ||
667 | case 1: | ||
668 | w_c0_perfctrl0(cpuc->saved_ctrl[0]); | ||
669 | } | ||
670 | local_irq_restore(flags); | ||
1296 | } | 671 | } |
1297 | 672 | ||
1298 | static int mipsxx_pmu_handle_shared_irq(void) | 673 | static int mipsxx_pmu_handle_shared_irq(void) |
1299 | { | 674 | { |
1300 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 675 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
1301 | struct perf_sample_data data; | 676 | struct perf_sample_data data; |
1302 | unsigned int counters = mipspmu.num_counters; | 677 | unsigned int counters = mipspmu->num_counters; |
1303 | u64 counter; | 678 | unsigned int counter; |
1304 | int handled = IRQ_NONE; | 679 | int handled = IRQ_NONE; |
1305 | struct pt_regs *regs; | 680 | struct pt_regs *regs; |
1306 | 681 | ||
1307 | if (cpu_has_perf_cntr_intr_bit && !(read_c0_cause() & CAUSEF_PCI)) | 682 | if (cpu_has_mips_r2 && !(read_c0_cause() & (1 << 26))) |
1308 | return handled; | 683 | return handled; |
684 | |||
1309 | /* | 685 | /* |
1310 | * First we pause the local counters, so that when we are locked | 686 | * First we pause the local counters, so that when we are locked |
1311 | * here, the counters are all paused. When it gets locked due to | 687 | * here, the counters are all paused. When it gets locked due to |
@@ -1314,21 +690,25 @@ static int mipsxx_pmu_handle_shared_irq(void) | |||
1314 | * See also mipsxx_pmu_start(). | 690 | * See also mipsxx_pmu_start(). |
1315 | */ | 691 | */ |
1316 | pause_local_counters(); | 692 | pause_local_counters(); |
1317 | #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS | 693 | #ifdef CONFIG_MIPS_MT_SMP |
1318 | read_lock(&pmuint_rwlock); | 694 | read_lock(&pmuint_rwlock); |
1319 | #endif | 695 | #endif |
1320 | 696 | ||
1321 | regs = get_irq_regs(); | 697 | regs = get_irq_regs(); |
1322 | 698 | ||
1323 | perf_sample_data_init(&data, 0, 0); | 699 | perf_sample_data_init(&data, 0); |
1324 | 700 | ||
1325 | switch (counters) { | 701 | switch (counters) { |
1326 | #define HANDLE_COUNTER(n) \ | 702 | #define HANDLE_COUNTER(n) \ |
1327 | case n + 1: \ | 703 | case n + 1: \ |
1328 | if (test_bit(n, cpuc->used_mask)) { \ | 704 | if (test_bit(n, cpuc->used_mask)) { \ |
1329 | counter = mipspmu.read_counter(n); \ | 705 | counter = r_c0_perfcntr ## n(); \ |
1330 | if (counter & mipspmu.overflow) { \ | 706 | if (counter & M_COUNTER_OVERFLOW) { \ |
1331 | handle_associated_event(cpuc, n, &data, regs); \ | 707 | w_c0_perfcntr ## n(counter & \ |
708 | VALID_COUNT); \ | ||
709 | if (test_and_change_bit(n, cpuc->msbs)) \ | ||
710 | handle_associated_event(cpuc, \ | ||
711 | n, &data, regs); \ | ||
1332 | handled = IRQ_HANDLED; \ | 712 | handled = IRQ_HANDLED; \ |
1333 | } \ | 713 | } \ |
1334 | } | 714 | } |
@@ -1346,23 +726,126 @@ static int mipsxx_pmu_handle_shared_irq(void) | |||
1346 | if (handled == IRQ_HANDLED) | 726 | if (handled == IRQ_HANDLED) |
1347 | irq_work_run(); | 727 | irq_work_run(); |
1348 | 728 | ||
1349 | #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS | 729 | #ifdef CONFIG_MIPS_MT_SMP |
1350 | read_unlock(&pmuint_rwlock); | 730 | read_unlock(&pmuint_rwlock); |
1351 | #endif | 731 | #endif |
1352 | resume_local_counters(); | 732 | resume_local_counters(); |
1353 | return handled; | 733 | return handled; |
1354 | } | 734 | } |
1355 | 735 | ||
1356 | static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev) | 736 | static irqreturn_t |
737 | mipsxx_pmu_handle_irq(int irq, void *dev) | ||
1357 | { | 738 | { |
1358 | return mipsxx_pmu_handle_shared_irq(); | 739 | return mipsxx_pmu_handle_shared_irq(); |
1359 | } | 740 | } |
1360 | 741 | ||
742 | static void mipsxx_pmu_start(void) | ||
743 | { | ||
744 | #ifdef CONFIG_MIPS_MT_SMP | ||
745 | write_unlock(&pmuint_rwlock); | ||
746 | #endif | ||
747 | resume_local_counters(); | ||
748 | } | ||
749 | |||
750 | /* | ||
751 | * MIPS performance counters can be per-TC. The control registers can | ||
752 | * not be directly accessed across CPUs. Hence if we want to do global | ||
753 | * control, we need cross CPU calls. on_each_cpu() can help us, but we | ||
754 | * can not make sure this function is called with interrupts enabled. So | ||
755 | * here we pause local counters and then grab a rwlock and leave the | ||
756 | * counters on other CPUs alone. If any counter interrupt raises while | ||
757 | * we own the write lock, simply pause local counters on that CPU and | ||
758 | * spin in the handler. Also we know we won't be switched to another | ||
759 | * CPU after pausing local counters and before grabbing the lock. | ||
760 | */ | ||
761 | static void mipsxx_pmu_stop(void) | ||
762 | { | ||
763 | pause_local_counters(); | ||
764 | #ifdef CONFIG_MIPS_MT_SMP | ||
765 | write_lock(&pmuint_rwlock); | ||
766 | #endif | ||
767 | } | ||
768 | |||
769 | static int | ||
770 | mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc, | ||
771 | struct hw_perf_event *hwc) | ||
772 | { | ||
773 | int i; | ||
774 | |||
775 | /* | ||
776 | * We only need to care the counter mask. The range has been | ||
777 | * checked definitely. | ||
778 | */ | ||
779 | unsigned long cntr_mask = (hwc->event_base >> 8) & 0xffff; | ||
780 | |||
781 | for (i = mipspmu->num_counters - 1; i >= 0; i--) { | ||
782 | /* | ||
783 | * Note that some MIPS perf events can be counted by both | ||
784 | * even and odd counters, wheresas many other are only by | ||
785 | * even _or_ odd counters. This introduces an issue that | ||
786 | * when the former kind of event takes the counter the | ||
787 | * latter kind of event wants to use, then the "counter | ||
788 | * allocation" for the latter event will fail. In fact if | ||
789 | * they can be dynamically swapped, they both feel happy. | ||
790 | * But here we leave this issue alone for now. | ||
791 | */ | ||
792 | if (test_bit(i, &cntr_mask) && | ||
793 | !test_and_set_bit(i, cpuc->used_mask)) | ||
794 | return i; | ||
795 | } | ||
796 | |||
797 | return -EAGAIN; | ||
798 | } | ||
799 | |||
800 | static void | ||
801 | mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx) | ||
802 | { | ||
803 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
804 | unsigned long flags; | ||
805 | |||
806 | WARN_ON(idx < 0 || idx >= mipspmu->num_counters); | ||
807 | |||
808 | local_irq_save(flags); | ||
809 | cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) | | ||
810 | (evt->config_base & M_PERFCTL_CONFIG_MASK) | | ||
811 | /* Make sure interrupt enabled. */ | ||
812 | M_PERFCTL_INTERRUPT_ENABLE; | ||
813 | /* | ||
814 | * We do not actually let the counter run. Leave it until start(). | ||
815 | */ | ||
816 | local_irq_restore(flags); | ||
817 | } | ||
818 | |||
819 | static void | ||
820 | mipsxx_pmu_disable_event(int idx) | ||
821 | { | ||
822 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
823 | unsigned long flags; | ||
824 | |||
825 | WARN_ON(idx < 0 || idx >= mipspmu->num_counters); | ||
826 | |||
827 | local_irq_save(flags); | ||
828 | cpuc->saved_ctrl[idx] = mipsxx_pmu_read_control(idx) & | ||
829 | ~M_PERFCTL_COUNT_EVENT_WHENEVER; | ||
830 | mipsxx_pmu_write_control(idx, cpuc->saved_ctrl[idx]); | ||
831 | local_irq_restore(flags); | ||
832 | } | ||
833 | |||
1361 | /* 24K */ | 834 | /* 24K */ |
835 | #define IS_UNSUPPORTED_24K_EVENT(r, b) \ | ||
836 | ((b) == 12 || (r) == 151 || (r) == 152 || (b) == 26 || \ | ||
837 | (b) == 27 || (r) == 28 || (r) == 158 || (b) == 31 || \ | ||
838 | (b) == 32 || (b) == 34 || (b) == 36 || (r) == 168 || \ | ||
839 | (r) == 172 || (b) == 47 || ((b) >= 56 && (b) <= 63) || \ | ||
840 | ((b) >= 68 && (b) <= 127)) | ||
1362 | #define IS_BOTH_COUNTERS_24K_EVENT(b) \ | 841 | #define IS_BOTH_COUNTERS_24K_EVENT(b) \ |
1363 | ((b) == 0 || (b) == 1 || (b) == 11) | 842 | ((b) == 0 || (b) == 1 || (b) == 11) |
1364 | 843 | ||
1365 | /* 34K */ | 844 | /* 34K */ |
845 | #define IS_UNSUPPORTED_34K_EVENT(r, b) \ | ||
846 | ((b) == 12 || (r) == 27 || (r) == 158 || (b) == 36 || \ | ||
847 | (b) == 38 || (r) == 175 || ((b) >= 56 && (b) <= 63) || \ | ||
848 | ((b) >= 68 && (b) <= 127)) | ||
1366 | #define IS_BOTH_COUNTERS_34K_EVENT(b) \ | 849 | #define IS_BOTH_COUNTERS_34K_EVENT(b) \ |
1367 | ((b) == 0 || (b) == 1 || (b) == 11) | 850 | ((b) == 0 || (b) == 1 || (b) == 11) |
1368 | #ifdef CONFIG_MIPS_MT_SMP | 851 | #ifdef CONFIG_MIPS_MT_SMP |
@@ -1375,10 +858,20 @@ static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev) | |||
1375 | #endif | 858 | #endif |
1376 | 859 | ||
1377 | /* 74K */ | 860 | /* 74K */ |
861 | #define IS_UNSUPPORTED_74K_EVENT(r, b) \ | ||
862 | ((r) == 5 || ((r) >= 135 && (r) <= 137) || \ | ||
863 | ((b) >= 10 && (b) <= 12) || (b) == 22 || (b) == 27 || \ | ||
864 | (b) == 33 || (b) == 34 || ((b) >= 47 && (b) <= 49) || \ | ||
865 | (r) == 178 || (b) == 55 || (b) == 57 || (b) == 60 || \ | ||
866 | (b) == 61 || (r) == 62 || (r) == 191 || \ | ||
867 | ((b) >= 64 && (b) <= 127)) | ||
1378 | #define IS_BOTH_COUNTERS_74K_EVENT(b) \ | 868 | #define IS_BOTH_COUNTERS_74K_EVENT(b) \ |
1379 | ((b) == 0 || (b) == 1) | 869 | ((b) == 0 || (b) == 1) |
1380 | 870 | ||
1381 | /* 1004K */ | 871 | /* 1004K */ |
872 | #define IS_UNSUPPORTED_1004K_EVENT(r, b) \ | ||
873 | ((b) == 12 || (r) == 27 || (r) == 158 || (b) == 38 || \ | ||
874 | (r) == 175 || (b) == 63 || ((b) >= 68 && (b) <= 127)) | ||
1382 | #define IS_BOTH_COUNTERS_1004K_EVENT(b) \ | 875 | #define IS_BOTH_COUNTERS_1004K_EVENT(b) \ |
1383 | ((b) == 0 || (b) == 1 || (b) == 11) | 876 | ((b) == 0 || (b) == 1 || (b) == 11) |
1384 | #ifdef CONFIG_MIPS_MT_SMP | 877 | #ifdef CONFIG_MIPS_MT_SMP |
@@ -1391,11 +884,6 @@ static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev) | |||
1391 | #define IS_RANGE_V_1004K_EVENT(r) ((r) == 47) | 884 | #define IS_RANGE_V_1004K_EVENT(r) ((r) == 47) |
1392 | #endif | 885 | #endif |
1393 | 886 | ||
1394 | /* BMIPS5000 */ | ||
1395 | #define IS_BOTH_COUNTERS_BMIPS5000_EVENT(b) \ | ||
1396 | ((b) == 0 || (b) == 1) | ||
1397 | |||
1398 | |||
1399 | /* | 887 | /* |
1400 | * User can use 0-255 raw events, where 0-127 for the events of even | 888 | * User can use 0-255 raw events, where 0-127 for the events of even |
1401 | * counters, and 128-255 for odd counters. Note that bit 7 is used to | 889 | * counters, and 128-255 for odd counters. Note that bit 7 is used to |
@@ -1404,15 +892,17 @@ static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev) | |||
1404 | * then 128 needs to be added to 15 as the input for the event config, | 892 | * then 128 needs to be added to 15 as the input for the event config, |
1405 | * i.e., 143 (0x8F) to be used. | 893 | * i.e., 143 (0x8F) to be used. |
1406 | */ | 894 | */ |
1407 | static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config) | 895 | static const struct mips_perf_event * |
896 | mipsxx_pmu_map_raw_event(u64 config) | ||
1408 | { | 897 | { |
1409 | unsigned int raw_id = config & 0xff; | 898 | unsigned int raw_id = config & 0xff; |
1410 | unsigned int base_id = raw_id & 0x7f; | 899 | unsigned int base_id = raw_id & 0x7f; |
1411 | 900 | ||
1412 | raw_event.event_id = base_id; | ||
1413 | |||
1414 | switch (current_cpu_type()) { | 901 | switch (current_cpu_type()) { |
1415 | case CPU_24K: | 902 | case CPU_24K: |
903 | if (IS_UNSUPPORTED_24K_EVENT(raw_id, base_id)) | ||
904 | return ERR_PTR(-EOPNOTSUPP); | ||
905 | raw_event.event_id = base_id; | ||
1416 | if (IS_BOTH_COUNTERS_24K_EVENT(base_id)) | 906 | if (IS_BOTH_COUNTERS_24K_EVENT(base_id)) |
1417 | raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; | 907 | raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; |
1418 | else | 908 | else |
@@ -1427,6 +917,9 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config) | |||
1427 | #endif | 917 | #endif |
1428 | break; | 918 | break; |
1429 | case CPU_34K: | 919 | case CPU_34K: |
920 | if (IS_UNSUPPORTED_34K_EVENT(raw_id, base_id)) | ||
921 | return ERR_PTR(-EOPNOTSUPP); | ||
922 | raw_event.event_id = base_id; | ||
1430 | if (IS_BOTH_COUNTERS_34K_EVENT(base_id)) | 923 | if (IS_BOTH_COUNTERS_34K_EVENT(base_id)) |
1431 | raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; | 924 | raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; |
1432 | else | 925 | else |
@@ -1442,6 +935,9 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config) | |||
1442 | #endif | 935 | #endif |
1443 | break; | 936 | break; |
1444 | case CPU_74K: | 937 | case CPU_74K: |
938 | if (IS_UNSUPPORTED_74K_EVENT(raw_id, base_id)) | ||
939 | return ERR_PTR(-EOPNOTSUPP); | ||
940 | raw_event.event_id = base_id; | ||
1445 | if (IS_BOTH_COUNTERS_74K_EVENT(base_id)) | 941 | if (IS_BOTH_COUNTERS_74K_EVENT(base_id)) |
1446 | raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; | 942 | raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; |
1447 | else | 943 | else |
@@ -1452,6 +948,9 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config) | |||
1452 | #endif | 948 | #endif |
1453 | break; | 949 | break; |
1454 | case CPU_1004K: | 950 | case CPU_1004K: |
951 | if (IS_UNSUPPORTED_1004K_EVENT(raw_id, base_id)) | ||
952 | return ERR_PTR(-EOPNOTSUPP); | ||
953 | raw_event.event_id = base_id; | ||
1455 | if (IS_BOTH_COUNTERS_1004K_EVENT(base_id)) | 954 | if (IS_BOTH_COUNTERS_1004K_EVENT(base_id)) |
1456 | raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; | 955 | raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; |
1457 | else | 956 | else |
@@ -1466,69 +965,45 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config) | |||
1466 | raw_event.range = T; | 965 | raw_event.range = T; |
1467 | #endif | 966 | #endif |
1468 | break; | 967 | break; |
1469 | case CPU_BMIPS5000: | ||
1470 | if (IS_BOTH_COUNTERS_BMIPS5000_EVENT(base_id)) | ||
1471 | raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; | ||
1472 | else | ||
1473 | raw_event.cntr_mask = | ||
1474 | raw_id > 127 ? CNTR_ODD : CNTR_EVEN; | ||
1475 | } | ||
1476 | |||
1477 | return &raw_event; | ||
1478 | } | ||
1479 | |||
1480 | static const struct mips_perf_event *octeon_pmu_map_raw_event(u64 config) | ||
1481 | { | ||
1482 | unsigned int raw_id = config & 0xff; | ||
1483 | unsigned int base_id = raw_id & 0x7f; | ||
1484 | |||
1485 | |||
1486 | raw_event.cntr_mask = CNTR_ALL; | ||
1487 | raw_event.event_id = base_id; | ||
1488 | |||
1489 | if (current_cpu_type() == CPU_CAVIUM_OCTEON2) { | ||
1490 | if (base_id > 0x42) | ||
1491 | return ERR_PTR(-EOPNOTSUPP); | ||
1492 | } else { | ||
1493 | if (base_id > 0x3a) | ||
1494 | return ERR_PTR(-EOPNOTSUPP); | ||
1495 | } | ||
1496 | |||
1497 | switch (base_id) { | ||
1498 | case 0x00: | ||
1499 | case 0x0f: | ||
1500 | case 0x1e: | ||
1501 | case 0x1f: | ||
1502 | case 0x2f: | ||
1503 | case 0x34: | ||
1504 | case 0x3b ... 0x3f: | ||
1505 | return ERR_PTR(-EOPNOTSUPP); | ||
1506 | default: | ||
1507 | break; | ||
1508 | } | 968 | } |
1509 | 969 | ||
1510 | return &raw_event; | 970 | return &raw_event; |
1511 | } | 971 | } |
1512 | 972 | ||
1513 | static const struct mips_perf_event *xlp_pmu_map_raw_event(u64 config) | 973 | static struct mips_pmu mipsxxcore_pmu = { |
1514 | { | 974 | .handle_irq = mipsxx_pmu_handle_irq, |
1515 | unsigned int raw_id = config & 0xff; | 975 | .handle_shared_irq = mipsxx_pmu_handle_shared_irq, |
1516 | 976 | .start = mipsxx_pmu_start, | |
1517 | /* Only 1-63 are defined */ | 977 | .stop = mipsxx_pmu_stop, |
1518 | if ((raw_id < 0x01) || (raw_id > 0x3f)) | 978 | .alloc_counter = mipsxx_pmu_alloc_counter, |
1519 | return ERR_PTR(-EOPNOTSUPP); | 979 | .read_counter = mipsxx_pmu_read_counter, |
1520 | 980 | .write_counter = mipsxx_pmu_write_counter, | |
1521 | raw_event.cntr_mask = CNTR_ALL; | 981 | .enable_event = mipsxx_pmu_enable_event, |
1522 | raw_event.event_id = raw_id; | 982 | .disable_event = mipsxx_pmu_disable_event, |
983 | .map_raw_event = mipsxx_pmu_map_raw_event, | ||
984 | .general_event_map = &mipsxxcore_event_map, | ||
985 | .cache_event_map = &mipsxxcore_cache_map, | ||
986 | }; | ||
1523 | 987 | ||
1524 | return &raw_event; | 988 | static struct mips_pmu mipsxx74Kcore_pmu = { |
1525 | } | 989 | .handle_irq = mipsxx_pmu_handle_irq, |
990 | .handle_shared_irq = mipsxx_pmu_handle_shared_irq, | ||
991 | .start = mipsxx_pmu_start, | ||
992 | .stop = mipsxx_pmu_stop, | ||
993 | .alloc_counter = mipsxx_pmu_alloc_counter, | ||
994 | .read_counter = mipsxx_pmu_read_counter, | ||
995 | .write_counter = mipsxx_pmu_write_counter, | ||
996 | .enable_event = mipsxx_pmu_enable_event, | ||
997 | .disable_event = mipsxx_pmu_disable_event, | ||
998 | .map_raw_event = mipsxx_pmu_map_raw_event, | ||
999 | .general_event_map = &mipsxx74Kcore_event_map, | ||
1000 | .cache_event_map = &mipsxx74Kcore_cache_map, | ||
1001 | }; | ||
1526 | 1002 | ||
1527 | static int __init | 1003 | static int __init |
1528 | init_hw_perf_events(void) | 1004 | init_hw_perf_events(void) |
1529 | { | 1005 | { |
1530 | int counters, irq; | 1006 | int counters, irq; |
1531 | int counter_bits; | ||
1532 | 1007 | ||
1533 | pr_info("Performance counters: "); | 1008 | pr_info("Performance counters: "); |
1534 | 1009 | ||
@@ -1538,7 +1013,7 @@ init_hw_perf_events(void) | |||
1538 | return -ENODEV; | 1013 | return -ENODEV; |
1539 | } | 1014 | } |
1540 | 1015 | ||
1541 | #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS | 1016 | #ifdef CONFIG_MIPS_MT_SMP |
1542 | cpu_has_mipsmt_pertccounters = read_c0_config7() & (1<<19); | 1017 | cpu_has_mipsmt_pertccounters = read_c0_config7() & (1<<19); |
1543 | if (!cpu_has_mipsmt_pertccounters) | 1018 | if (!cpu_has_mipsmt_pertccounters) |
1544 | counters = counters_total_to_per_cpu(counters); | 1019 | counters = counters_total_to_per_cpu(counters); |
@@ -1552,8 +1027,7 @@ init_hw_perf_events(void) | |||
1552 | irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR; | 1027 | irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR; |
1553 | } else { | 1028 | } else { |
1554 | #endif | 1029 | #endif |
1555 | if ((cp0_perfcount_irq >= 0) && | 1030 | if (cp0_perfcount_irq >= 0) |
1556 | (cp0_compare_irq != cp0_perfcount_irq)) | ||
1557 | irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; | 1031 | irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; |
1558 | else | 1032 | else |
1559 | irq = -1; | 1033 | irq = -1; |
@@ -1561,52 +1035,32 @@ init_hw_perf_events(void) | |||
1561 | } | 1035 | } |
1562 | #endif | 1036 | #endif |
1563 | 1037 | ||
1564 | mipspmu.map_raw_event = mipsxx_pmu_map_raw_event; | 1038 | on_each_cpu(reset_counters, (void *)(long)counters, 1); |
1565 | 1039 | ||
1566 | switch (current_cpu_type()) { | 1040 | switch (current_cpu_type()) { |
1567 | case CPU_24K: | 1041 | case CPU_24K: |
1568 | mipspmu.name = "mips/24K"; | 1042 | mipsxxcore_pmu.name = "mips/24K"; |
1569 | mipspmu.general_event_map = &mipsxxcore_event_map; | 1043 | mipsxxcore_pmu.num_counters = counters; |
1570 | mipspmu.cache_event_map = &mipsxxcore_cache_map; | 1044 | mipsxxcore_pmu.irq = irq; |
1045 | mipspmu = &mipsxxcore_pmu; | ||
1571 | break; | 1046 | break; |
1572 | case CPU_34K: | 1047 | case CPU_34K: |
1573 | mipspmu.name = "mips/34K"; | 1048 | mipsxxcore_pmu.name = "mips/34K"; |
1574 | mipspmu.general_event_map = &mipsxxcore_event_map; | 1049 | mipsxxcore_pmu.num_counters = counters; |
1575 | mipspmu.cache_event_map = &mipsxxcore_cache_map; | 1050 | mipsxxcore_pmu.irq = irq; |
1051 | mipspmu = &mipsxxcore_pmu; | ||
1576 | break; | 1052 | break; |
1577 | case CPU_74K: | 1053 | case CPU_74K: |
1578 | mipspmu.name = "mips/74K"; | 1054 | mipsxx74Kcore_pmu.name = "mips/74K"; |
1579 | mipspmu.general_event_map = &mipsxx74Kcore_event_map; | 1055 | mipsxx74Kcore_pmu.num_counters = counters; |
1580 | mipspmu.cache_event_map = &mipsxx74Kcore_cache_map; | 1056 | mipsxx74Kcore_pmu.irq = irq; |
1057 | mipspmu = &mipsxx74Kcore_pmu; | ||
1581 | break; | 1058 | break; |
1582 | case CPU_1004K: | 1059 | case CPU_1004K: |
1583 | mipspmu.name = "mips/1004K"; | 1060 | mipsxxcore_pmu.name = "mips/1004K"; |
1584 | mipspmu.general_event_map = &mipsxxcore_event_map; | 1061 | mipsxxcore_pmu.num_counters = counters; |
1585 | mipspmu.cache_event_map = &mipsxxcore_cache_map; | 1062 | mipsxxcore_pmu.irq = irq; |
1586 | break; | 1063 | mipspmu = &mipsxxcore_pmu; |
1587 | case CPU_LOONGSON1: | ||
1588 | mipspmu.name = "mips/loongson1"; | ||
1589 | mipspmu.general_event_map = &mipsxxcore_event_map; | ||
1590 | mipspmu.cache_event_map = &mipsxxcore_cache_map; | ||
1591 | break; | ||
1592 | case CPU_CAVIUM_OCTEON: | ||
1593 | case CPU_CAVIUM_OCTEON_PLUS: | ||
1594 | case CPU_CAVIUM_OCTEON2: | ||
1595 | mipspmu.name = "octeon"; | ||
1596 | mipspmu.general_event_map = &octeon_event_map; | ||
1597 | mipspmu.cache_event_map = &octeon_cache_map; | ||
1598 | mipspmu.map_raw_event = octeon_pmu_map_raw_event; | ||
1599 | break; | ||
1600 | case CPU_BMIPS5000: | ||
1601 | mipspmu.name = "BMIPS5000"; | ||
1602 | mipspmu.general_event_map = &bmips5000_event_map; | ||
1603 | mipspmu.cache_event_map = &bmips5000_cache_map; | ||
1604 | break; | ||
1605 | case CPU_XLP: | ||
1606 | mipspmu.name = "xlp"; | ||
1607 | mipspmu.general_event_map = &xlp_event_map; | ||
1608 | mipspmu.cache_event_map = &xlp_cache_map; | ||
1609 | mipspmu.map_raw_event = xlp_pmu_map_raw_event; | ||
1610 | break; | 1064 | break; |
1611 | default: | 1065 | default: |
1612 | pr_cont("Either hardware does not support performance " | 1066 | pr_cont("Either hardware does not support performance " |
@@ -1614,33 +1068,15 @@ init_hw_perf_events(void) | |||
1614 | return -ENODEV; | 1068 | return -ENODEV; |
1615 | } | 1069 | } |
1616 | 1070 | ||
1617 | mipspmu.num_counters = counters; | 1071 | if (mipspmu) |
1618 | mipspmu.irq = irq; | 1072 | pr_cont("%s PMU enabled, %d counters available to each " |
1619 | 1073 | "CPU, irq %d%s\n", mipspmu->name, counters, irq, | |
1620 | if (read_c0_perfctrl0() & M_PERFCTL_WIDE) { | 1074 | irq < 0 ? " (share with timer interrupt)" : ""); |
1621 | mipspmu.max_period = (1ULL << 63) - 1; | ||
1622 | mipspmu.valid_count = (1ULL << 63) - 1; | ||
1623 | mipspmu.overflow = 1ULL << 63; | ||
1624 | mipspmu.read_counter = mipsxx_pmu_read_counter_64; | ||
1625 | mipspmu.write_counter = mipsxx_pmu_write_counter_64; | ||
1626 | counter_bits = 64; | ||
1627 | } else { | ||
1628 | mipspmu.max_period = (1ULL << 31) - 1; | ||
1629 | mipspmu.valid_count = (1ULL << 31) - 1; | ||
1630 | mipspmu.overflow = 1ULL << 31; | ||
1631 | mipspmu.read_counter = mipsxx_pmu_read_counter; | ||
1632 | mipspmu.write_counter = mipsxx_pmu_write_counter; | ||
1633 | counter_bits = 32; | ||
1634 | } | ||
1635 | |||
1636 | on_each_cpu(reset_counters, (void *)(long)counters, 1); | ||
1637 | |||
1638 | pr_cont("%s PMU enabled, %d %d-bit counters available to each " | ||
1639 | "CPU, irq %d%s\n", mipspmu.name, counters, counter_bits, irq, | ||
1640 | irq < 0 ? " (share with timer interrupt)" : ""); | ||
1641 | 1075 | ||
1642 | perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); | 1076 | perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); |
1643 | 1077 | ||
1644 | return 0; | 1078 | return 0; |
1645 | } | 1079 | } |
1646 | early_initcall(init_hw_perf_events); | 1080 | early_initcall(init_hw_perf_events); |
1081 | |||
1082 | #endif /* defined(CONFIG_CPU_MIPS32)... */ | ||
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c index 07dff54f2ce..e309665b6c8 100644 --- a/arch/mips/kernel/proc.c +++ b/arch/mips/kernel/proc.c | |||
@@ -25,7 +25,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
25 | int i; | 25 | int i; |
26 | 26 | ||
27 | #ifdef CONFIG_SMP | 27 | #ifdef CONFIG_SMP |
28 | if (!cpu_online(n)) | 28 | if (!cpu_isset(n, cpu_online_map)) |
29 | return 0; | 29 | return 0; |
30 | #endif | 30 | #endif |
31 | 31 | ||
@@ -41,48 +41,45 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
41 | 41 | ||
42 | seq_printf(m, "processor\t\t: %ld\n", n); | 42 | seq_printf(m, "processor\t\t: %ld\n", n); |
43 | sprintf(fmt, "cpu model\t\t: %%s V%%d.%%d%s\n", | 43 | sprintf(fmt, "cpu model\t\t: %%s V%%d.%%d%s\n", |
44 | cpu_data[n].options & MIPS_CPU_FPU ? " FPU V%d.%d" : ""); | 44 | cpu_data[n].options & MIPS_CPU_FPU ? " FPU V%d.%d" : ""); |
45 | seq_printf(m, fmt, __cpu_name[n], | 45 | seq_printf(m, fmt, __cpu_name[n], |
46 | (version >> 4) & 0x0f, version & 0x0f, | 46 | (version >> 4) & 0x0f, version & 0x0f, |
47 | (fp_vers >> 4) & 0x0f, fp_vers & 0x0f); | 47 | (fp_vers >> 4) & 0x0f, fp_vers & 0x0f); |
48 | seq_printf(m, "BogoMIPS\t\t: %u.%02u\n", | 48 | seq_printf(m, "BogoMIPS\t\t: %u.%02u\n", |
49 | cpu_data[n].udelay_val / (500000/HZ), | 49 | cpu_data[n].udelay_val / (500000/HZ), |
50 | (cpu_data[n].udelay_val / (5000/HZ)) % 100); | 50 | (cpu_data[n].udelay_val / (5000/HZ)) % 100); |
51 | seq_printf(m, "wait instruction\t: %s\n", cpu_wait ? "yes" : "no"); | 51 | seq_printf(m, "wait instruction\t: %s\n", cpu_wait ? "yes" : "no"); |
52 | seq_printf(m, "microsecond timers\t: %s\n", | 52 | seq_printf(m, "microsecond timers\t: %s\n", |
53 | cpu_has_counter ? "yes" : "no"); | 53 | cpu_has_counter ? "yes" : "no"); |
54 | seq_printf(m, "tlb_entries\t\t: %d\n", cpu_data[n].tlbsize); | 54 | seq_printf(m, "tlb_entries\t\t: %d\n", cpu_data[n].tlbsize); |
55 | seq_printf(m, "extra interrupt vector\t: %s\n", | 55 | seq_printf(m, "extra interrupt vector\t: %s\n", |
56 | cpu_has_divec ? "yes" : "no"); | 56 | cpu_has_divec ? "yes" : "no"); |
57 | seq_printf(m, "hardware watchpoint\t: %s", | 57 | seq_printf(m, "hardware watchpoint\t: %s", |
58 | cpu_has_watch ? "yes, " : "no\n"); | 58 | cpu_has_watch ? "yes, " : "no\n"); |
59 | if (cpu_has_watch) { | 59 | if (cpu_has_watch) { |
60 | seq_printf(m, "count: %d, address/irw mask: [", | 60 | seq_printf(m, "count: %d, address/irw mask: [", |
61 | cpu_data[n].watch_reg_count); | 61 | cpu_data[n].watch_reg_count); |
62 | for (i = 0; i < cpu_data[n].watch_reg_count; i++) | 62 | for (i = 0; i < cpu_data[n].watch_reg_count; i++) |
63 | seq_printf(m, "%s0x%04x", i ? ", " : "" , | 63 | seq_printf(m, "%s0x%04x", i ? ", " : "" , |
64 | cpu_data[n].watch_reg_masks[i]); | 64 | cpu_data[n].watch_reg_masks[i]); |
65 | seq_printf(m, "]\n"); | 65 | seq_printf(m, "]\n"); |
66 | } | 66 | } |
67 | 67 | seq_printf(m, "ASEs implemented\t:%s%s%s%s%s%s\n", | |
68 | seq_printf(m, "ASEs implemented\t:"); | 68 | cpu_has_mips16 ? " mips16" : "", |
69 | if (cpu_has_mips16) seq_printf(m, "%s", " mips16"); | 69 | cpu_has_mdmx ? " mdmx" : "", |
70 | if (cpu_has_mdmx) seq_printf(m, "%s", " mdmx"); | 70 | cpu_has_mips3d ? " mips3d" : "", |
71 | if (cpu_has_mips3d) seq_printf(m, "%s", " mips3d"); | 71 | cpu_has_smartmips ? " smartmips" : "", |
72 | if (cpu_has_smartmips) seq_printf(m, "%s", " smartmips"); | 72 | cpu_has_dsp ? " dsp" : "", |
73 | if (cpu_has_dsp) seq_printf(m, "%s", " dsp"); | 73 | cpu_has_mipsmt ? " mt" : "" |
74 | if (cpu_has_dsp2) seq_printf(m, "%s", " dsp2"); | 74 | ); |
75 | if (cpu_has_mipsmt) seq_printf(m, "%s", " mt"); | ||
76 | seq_printf(m, "\n"); | ||
77 | |||
78 | seq_printf(m, "shadow register sets\t: %d\n", | 75 | seq_printf(m, "shadow register sets\t: %d\n", |
79 | cpu_data[n].srsets); | 76 | cpu_data[n].srsets); |
80 | seq_printf(m, "kscratch registers\t: %d\n", | 77 | seq_printf(m, "kscratch registers\t: %d\n", |
81 | hweight8(cpu_data[n].kscratch_mask)); | 78 | hweight8(cpu_data[n].kscratch_mask)); |
82 | seq_printf(m, "core\t\t\t: %d\n", cpu_data[n].core); | 79 | seq_printf(m, "core\t\t\t: %d\n", cpu_data[n].core); |
83 | 80 | ||
84 | sprintf(fmt, "VCE%%c exceptions\t\t: %s\n", | 81 | sprintf(fmt, "VCE%%c exceptions\t\t: %s\n", |
85 | cpu_has_vce ? "%u" : "not available"); | 82 | cpu_has_vce ? "%u" : "not available"); |
86 | seq_printf(m, fmt, 'D', vced_count); | 83 | seq_printf(m, fmt, 'D', vced_count); |
87 | seq_printf(m, fmt, 'I', vcei_count); | 84 | seq_printf(m, fmt, 'I', vcei_count); |
88 | seq_printf(m, "\n"); | 85 | seq_printf(m, "\n"); |
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index a11c6f9fdd5..b30cb2573aa 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c | |||
@@ -9,13 +9,13 @@ | |||
9 | * Copyright (C) 2004 Thiemo Seufer | 9 | * Copyright (C) 2004 Thiemo Seufer |
10 | */ | 10 | */ |
11 | #include <linux/errno.h> | 11 | #include <linux/errno.h> |
12 | #include <linux/module.h> | ||
12 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
13 | #include <linux/tick.h> | 14 | #include <linux/tick.h> |
14 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
15 | #include <linux/mm.h> | 16 | #include <linux/mm.h> |
16 | #include <linux/stddef.h> | 17 | #include <linux/stddef.h> |
17 | #include <linux/unistd.h> | 18 | #include <linux/unistd.h> |
18 | #include <linux/export.h> | ||
19 | #include <linux/ptrace.h> | 19 | #include <linux/ptrace.h> |
20 | #include <linux/mman.h> | 20 | #include <linux/mman.h> |
21 | #include <linux/personality.h> | 21 | #include <linux/personality.h> |
@@ -32,6 +32,7 @@ | |||
32 | #include <asm/dsp.h> | 32 | #include <asm/dsp.h> |
33 | #include <asm/fpu.h> | 33 | #include <asm/fpu.h> |
34 | #include <asm/pgtable.h> | 34 | #include <asm/pgtable.h> |
35 | #include <asm/system.h> | ||
35 | #include <asm/mipsregs.h> | 36 | #include <asm/mipsregs.h> |
36 | #include <asm/processor.h> | 37 | #include <asm/processor.h> |
37 | #include <asm/uaccess.h> | 38 | #include <asm/uaccess.h> |
@@ -55,8 +56,7 @@ void __noreturn cpu_idle(void) | |||
55 | 56 | ||
56 | /* endless idle loop with no priority at all */ | 57 | /* endless idle loop with no priority at all */ |
57 | while (1) { | 58 | while (1) { |
58 | tick_nohz_idle_enter(); | 59 | tick_nohz_stop_sched_tick(1); |
59 | rcu_idle_enter(); | ||
60 | while (!need_resched() && cpu_online(cpu)) { | 60 | while (!need_resched() && cpu_online(cpu)) { |
61 | #ifdef CONFIG_MIPS_MT_SMTC | 61 | #ifdef CONFIG_MIPS_MT_SMTC |
62 | extern void smtc_idle_loop_hook(void); | 62 | extern void smtc_idle_loop_hook(void); |
@@ -72,17 +72,19 @@ void __noreturn cpu_idle(void) | |||
72 | } | 72 | } |
73 | } | 73 | } |
74 | #ifdef CONFIG_HOTPLUG_CPU | 74 | #ifdef CONFIG_HOTPLUG_CPU |
75 | if (!cpu_online(cpu) && !cpu_isset(cpu, cpu_callin_map)) | 75 | if (!cpu_online(cpu) && !cpu_isset(cpu, cpu_callin_map) && |
76 | (system_state == SYSTEM_RUNNING || | ||
77 | system_state == SYSTEM_BOOTING)) | ||
76 | play_dead(); | 78 | play_dead(); |
77 | #endif | 79 | #endif |
78 | rcu_idle_exit(); | 80 | tick_nohz_restart_sched_tick(); |
79 | tick_nohz_idle_exit(); | 81 | preempt_enable_no_resched(); |
80 | schedule_preempt_disabled(); | 82 | schedule(); |
83 | preempt_disable(); | ||
81 | } | 84 | } |
82 | } | 85 | } |
83 | 86 | ||
84 | asmlinkage void ret_from_fork(void); | 87 | asmlinkage void ret_from_fork(void); |
85 | asmlinkage void ret_from_kernel_thread(void); | ||
86 | 88 | ||
87 | void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp) | 89 | void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp) |
88 | { | 90 | { |
@@ -112,10 +114,10 @@ void flush_thread(void) | |||
112 | } | 114 | } |
113 | 115 | ||
114 | int copy_thread(unsigned long clone_flags, unsigned long usp, | 116 | int copy_thread(unsigned long clone_flags, unsigned long usp, |
115 | unsigned long arg, struct task_struct *p) | 117 | unsigned long unused, struct task_struct *p, struct pt_regs *regs) |
116 | { | 118 | { |
117 | struct thread_info *ti = task_thread_info(p); | 119 | struct thread_info *ti = task_thread_info(p); |
118 | struct pt_regs *childregs, *regs = current_pt_regs(); | 120 | struct pt_regs *childregs; |
119 | unsigned long childksp; | 121 | unsigned long childksp; |
120 | p->set_child_tid = p->clear_child_tid = NULL; | 122 | p->set_child_tid = p->clear_child_tid = NULL; |
121 | 123 | ||
@@ -135,30 +137,19 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, | |||
135 | childregs = (struct pt_regs *) childksp - 1; | 137 | childregs = (struct pt_regs *) childksp - 1; |
136 | /* Put the stack after the struct pt_regs. */ | 138 | /* Put the stack after the struct pt_regs. */ |
137 | childksp = (unsigned long) childregs; | 139 | childksp = (unsigned long) childregs; |
138 | p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1); | ||
139 | if (unlikely(p->flags & PF_KTHREAD)) { | ||
140 | unsigned long status = p->thread.cp0_status; | ||
141 | memset(childregs, 0, sizeof(struct pt_regs)); | ||
142 | ti->addr_limit = KERNEL_DS; | ||
143 | p->thread.reg16 = usp; /* fn */ | ||
144 | p->thread.reg17 = arg; | ||
145 | p->thread.reg29 = childksp; | ||
146 | p->thread.reg31 = (unsigned long) ret_from_kernel_thread; | ||
147 | #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) | ||
148 | status = (status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) | | ||
149 | ((status & (ST0_KUC | ST0_IEC)) << 2); | ||
150 | #else | ||
151 | status |= ST0_EXL; | ||
152 | #endif | ||
153 | childregs->cp0_status = status; | ||
154 | return 0; | ||
155 | } | ||
156 | *childregs = *regs; | 140 | *childregs = *regs; |
157 | childregs->regs[7] = 0; /* Clear error flag */ | 141 | childregs->regs[7] = 0; /* Clear error flag */ |
142 | |||
158 | childregs->regs[2] = 0; /* Child gets zero as return value */ | 143 | childregs->regs[2] = 0; /* Child gets zero as return value */ |
159 | childregs->regs[29] = usp; | ||
160 | ti->addr_limit = USER_DS; | ||
161 | 144 | ||
145 | if (childregs->cp0_status & ST0_CU0) { | ||
146 | childregs->regs[28] = (unsigned long) ti; | ||
147 | childregs->regs[29] = childksp; | ||
148 | ti->addr_limit = KERNEL_DS; | ||
149 | } else { | ||
150 | childregs->regs[29] = usp; | ||
151 | ti->addr_limit = USER_DS; | ||
152 | } | ||
162 | p->thread.reg29 = (unsigned long) childregs; | 153 | p->thread.reg29 = (unsigned long) childregs; |
163 | p->thread.reg31 = (unsigned long) ret_from_fork; | 154 | p->thread.reg31 = (unsigned long) ret_from_fork; |
164 | 155 | ||
@@ -166,6 +157,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, | |||
166 | * New tasks lose permission to use the fpu. This accelerates context | 157 | * New tasks lose permission to use the fpu. This accelerates context |
167 | * switching for most programs since they don't use the fpu. | 158 | * switching for most programs since they don't use the fpu. |
168 | */ | 159 | */ |
160 | p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1); | ||
169 | childregs->cp0_status &= ~(ST0_CU2|ST0_CU1); | 161 | childregs->cp0_status &= ~(ST0_CU2|ST0_CU1); |
170 | 162 | ||
171 | #ifdef CONFIG_MIPS_MT_SMTC | 163 | #ifdef CONFIG_MIPS_MT_SMTC |
@@ -231,6 +223,35 @@ int dump_task_fpu(struct task_struct *t, elf_fpregset_t *fpr) | |||
231 | } | 223 | } |
232 | 224 | ||
233 | /* | 225 | /* |
226 | * Create a kernel thread | ||
227 | */ | ||
228 | static void __noreturn kernel_thread_helper(void *arg, int (*fn)(void *)) | ||
229 | { | ||
230 | do_exit(fn(arg)); | ||
231 | } | ||
232 | |||
233 | long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) | ||
234 | { | ||
235 | struct pt_regs regs; | ||
236 | |||
237 | memset(®s, 0, sizeof(regs)); | ||
238 | |||
239 | regs.regs[4] = (unsigned long) arg; | ||
240 | regs.regs[5] = (unsigned long) fn; | ||
241 | regs.cp0_epc = (unsigned long) kernel_thread_helper; | ||
242 | regs.cp0_status = read_c0_status(); | ||
243 | #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) | ||
244 | regs.cp0_status = (regs.cp0_status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) | | ||
245 | ((regs.cp0_status & (ST0_KUC | ST0_IEC)) << 2); | ||
246 | #else | ||
247 | regs.cp0_status |= ST0_EXL; | ||
248 | #endif | ||
249 | |||
250 | /* Ok, create the new process.. */ | ||
251 | return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s, 0, NULL, NULL); | ||
252 | } | ||
253 | |||
254 | /* | ||
234 | * | 255 | * |
235 | */ | 256 | */ |
236 | struct mips_frame_info { | 257 | struct mips_frame_info { |
diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c index 028f6f837ef..5b7eade41fa 100644 --- a/arch/mips/kernel/prom.c +++ b/arch/mips/kernel/prom.c | |||
@@ -9,7 +9,7 @@ | |||
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/export.h> | 12 | #include <linux/module.h> |
13 | #include <linux/errno.h> | 13 | #include <linux/errno.h> |
14 | #include <linux/types.h> | 14 | #include <linux/types.h> |
15 | #include <linux/bootmem.h> | 15 | #include <linux/bootmem.h> |
@@ -35,6 +35,16 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size) | |||
35 | return add_memory_region(base, size, BOOT_MEM_RAM); | 35 | return add_memory_region(base, size, BOOT_MEM_RAM); |
36 | } | 36 | } |
37 | 37 | ||
38 | int __init reserve_mem_mach(unsigned long addr, unsigned long size) | ||
39 | { | ||
40 | return reserve_bootmem(addr, size, BOOTMEM_DEFAULT); | ||
41 | } | ||
42 | |||
43 | void __init free_mem_mach(unsigned long addr, unsigned long size) | ||
44 | { | ||
45 | return free_bootmem(addr, size); | ||
46 | } | ||
47 | |||
38 | void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) | 48 | void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) |
39 | { | 49 | { |
40 | return __alloc_bootmem(size, align, __pa(MAX_DMA_ADDRESS)); | 50 | return __alloc_bootmem(size, align, __pa(MAX_DMA_ADDRESS)); |
@@ -50,6 +60,20 @@ void __init early_init_dt_setup_initrd_arch(unsigned long start, | |||
50 | } | 60 | } |
51 | #endif | 61 | #endif |
52 | 62 | ||
63 | /* | ||
64 | * irq_create_of_mapping - Hook to resolve OF irq specifier into a Linux irq# | ||
65 | * | ||
66 | * Currently the mapping mechanism is trivial; simple flat hwirq numbers are | ||
67 | * mapped 1:1 onto Linux irq numbers. Cascaded irq controllers are not | ||
68 | * supported. | ||
69 | */ | ||
70 | unsigned int irq_create_of_mapping(struct device_node *controller, | ||
71 | const u32 *intspec, unsigned int intsize) | ||
72 | { | ||
73 | return intspec[0]; | ||
74 | } | ||
75 | EXPORT_SYMBOL_GPL(irq_create_of_mapping); | ||
76 | |||
53 | void __init early_init_devtree(void *params) | 77 | void __init early_init_devtree(void *params) |
54 | { | 78 | { |
55 | /* Setup flat device-tree pointer */ | 79 | /* Setup flat device-tree pointer */ |
@@ -67,15 +91,21 @@ void __init early_init_devtree(void *params) | |||
67 | of_scan_flat_dt(early_init_dt_scan_memory_arch, NULL); | 91 | of_scan_flat_dt(early_init_dt_scan_memory_arch, NULL); |
68 | } | 92 | } |
69 | 93 | ||
70 | void __init __dt_setup_arch(struct boot_param_header *bph) | 94 | void __init device_tree_init(void) |
71 | { | 95 | { |
72 | if (be32_to_cpu(bph->magic) != OF_DT_HEADER) { | 96 | unsigned long base, size; |
73 | pr_err("DTB has bad magic, ignoring builtin OF DTB\n"); | ||
74 | 97 | ||
98 | if (!initial_boot_params) | ||
75 | return; | 99 | return; |
76 | } | ||
77 | 100 | ||
78 | initial_boot_params = bph; | 101 | base = virt_to_phys((void *)initial_boot_params); |
102 | size = be32_to_cpu(initial_boot_params->totalsize); | ||
103 | |||
104 | /* Before we do anything, lets reserve the dt blob */ | ||
105 | reserve_mem_mach(base, size); | ||
106 | |||
107 | unflatten_device_tree(); | ||
79 | 108 | ||
80 | early_init_devtree(initial_boot_params); | 109 | /* free the space reserved for the dt blob */ |
110 | free_mem_mach(base, size); | ||
81 | } | 111 | } |
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 4812c6d916e..4e6ea1ffad4 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <asm/mipsmtregs.h> | 34 | #include <asm/mipsmtregs.h> |
35 | #include <asm/pgtable.h> | 35 | #include <asm/pgtable.h> |
36 | #include <asm/page.h> | 36 | #include <asm/page.h> |
37 | #include <asm/system.h> | ||
37 | #include <asm/uaccess.h> | 38 | #include <asm/uaccess.h> |
38 | #include <asm/bootinfo.h> | 39 | #include <asm/bootinfo.h> |
39 | #include <asm/reg.h> | 40 | #include <asm/reg.h> |
@@ -535,7 +536,7 @@ static inline int audit_arch(void) | |||
535 | asmlinkage void syscall_trace_enter(struct pt_regs *regs) | 536 | asmlinkage void syscall_trace_enter(struct pt_regs *regs) |
536 | { | 537 | { |
537 | /* do the secure computing check first */ | 538 | /* do the secure computing check first */ |
538 | secure_computing_strict(regs->regs[2]); | 539 | secure_computing(regs->regs[2]); |
539 | 540 | ||
540 | if (!(current->ptrace & PT_PTRACED)) | 541 | if (!(current->ptrace & PT_PTRACED)) |
541 | goto out; | 542 | goto out; |
@@ -559,9 +560,10 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs) | |||
559 | } | 560 | } |
560 | 561 | ||
561 | out: | 562 | out: |
562 | audit_syscall_entry(audit_arch(), regs->regs[2], | 563 | if (unlikely(current->audit_context)) |
563 | regs->regs[4], regs->regs[5], | 564 | audit_syscall_entry(audit_arch(), regs->regs[2], |
564 | regs->regs[6], regs->regs[7]); | 565 | regs->regs[4], regs->regs[5], |
566 | regs->regs[6], regs->regs[7]); | ||
565 | } | 567 | } |
566 | 568 | ||
567 | /* | 569 | /* |
@@ -570,7 +572,9 @@ out: | |||
570 | */ | 572 | */ |
571 | asmlinkage void syscall_trace_leave(struct pt_regs *regs) | 573 | asmlinkage void syscall_trace_leave(struct pt_regs *regs) |
572 | { | 574 | { |
573 | audit_syscall_exit(regs); | 575 | if (unlikely(current->audit_context)) |
576 | audit_syscall_exit(AUDITSC_RESULT(regs->regs[7]), | ||
577 | -regs->regs[2]); | ||
574 | 578 | ||
575 | if (!(current->ptrace & PT_PTRACED)) | 579 | if (!(current->ptrace & PT_PTRACED)) |
576 | return; | 580 | return; |
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c index a3b017815ef..32644b4a071 100644 --- a/arch/mips/kernel/ptrace32.c +++ b/arch/mips/kernel/ptrace32.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <asm/mipsmtregs.h> | 32 | #include <asm/mipsmtregs.h> |
33 | #include <asm/pgtable.h> | 33 | #include <asm/pgtable.h> |
34 | #include <asm/page.h> | 34 | #include <asm/page.h> |
35 | #include <asm/system.h> | ||
35 | #include <asm/uaccess.h> | 36 | #include <asm/uaccess.h> |
36 | #include <asm/bootinfo.h> | 37 | #include <asm/bootinfo.h> |
37 | 38 | ||
diff --git a/arch/mips/kernel/r2300_switch.S b/arch/mips/kernel/r2300_switch.S index 8d32d5a6b46..293898391e6 100644 --- a/arch/mips/kernel/r2300_switch.S +++ b/arch/mips/kernel/r2300_switch.S | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <asm/fpregdef.h> | 15 | #include <asm/fpregdef.h> |
16 | #include <asm/mipsregs.h> | 16 | #include <asm/mipsregs.h> |
17 | #include <asm/asm-offsets.h> | 17 | #include <asm/asm-offsets.h> |
18 | #include <asm/page.h> | ||
18 | #include <asm/regdef.h> | 19 | #include <asm/regdef.h> |
19 | #include <asm/stackframe.h> | 20 | #include <asm/stackframe.h> |
20 | #include <asm/thread_info.h> | 21 | #include <asm/thread_info.h> |
@@ -42,7 +43,7 @@ | |||
42 | 43 | ||
43 | /* | 44 | /* |
44 | * task_struct *resume(task_struct *prev, task_struct *next, | 45 | * task_struct *resume(task_struct *prev, task_struct *next, |
45 | * struct thread_info *next_ti, int usedfpu) | 46 | * struct thread_info *next_ti) ) |
46 | */ | 47 | */ |
47 | LEAF(resume) | 48 | LEAF(resume) |
48 | mfc0 t1, CP0_STATUS | 49 | mfc0 t1, CP0_STATUS |
@@ -50,9 +51,18 @@ LEAF(resume) | |||
50 | cpu_save_nonscratch a0 | 51 | cpu_save_nonscratch a0 |
51 | sw ra, THREAD_REG31(a0) | 52 | sw ra, THREAD_REG31(a0) |
52 | 53 | ||
53 | beqz a3, 1f | 54 | /* |
55 | * check if we need to save FPU registers | ||
56 | */ | ||
57 | lw t3, TASK_THREAD_INFO(a0) | ||
58 | lw t0, TI_FLAGS(t3) | ||
59 | li t1, _TIF_USEDFPU | ||
60 | and t2, t0, t1 | ||
61 | beqz t2, 1f | ||
62 | nor t1, zero, t1 | ||
54 | 63 | ||
55 | PTR_L t3, TASK_THREAD_INFO(a0) | 64 | and t0, t0, t1 |
65 | sw t0, TI_FLAGS(t3) | ||
56 | 66 | ||
57 | /* | 67 | /* |
58 | * clear saved user stack CU1 bit | 68 | * clear saved user stack CU1 bit |
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S index 8decdfacb44..9414f935446 100644 --- a/arch/mips/kernel/r4k_switch.S +++ b/arch/mips/kernel/r4k_switch.S | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <asm/fpregdef.h> | 15 | #include <asm/fpregdef.h> |
16 | #include <asm/mipsregs.h> | 16 | #include <asm/mipsregs.h> |
17 | #include <asm/asm-offsets.h> | 17 | #include <asm/asm-offsets.h> |
18 | #include <asm/page.h> | ||
18 | #include <asm/pgtable-bits.h> | 19 | #include <asm/pgtable-bits.h> |
19 | #include <asm/regdef.h> | 20 | #include <asm/regdef.h> |
20 | #include <asm/stackframe.h> | 21 | #include <asm/stackframe.h> |
@@ -40,7 +41,7 @@ | |||
40 | 41 | ||
41 | /* | 42 | /* |
42 | * task_struct *resume(task_struct *prev, task_struct *next, | 43 | * task_struct *resume(task_struct *prev, task_struct *next, |
43 | * struct thread_info *next_ti, int usedfpu) | 44 | * struct thread_info *next_ti) |
44 | */ | 45 | */ |
45 | .align 5 | 46 | .align 5 |
46 | LEAF(resume) | 47 | LEAF(resume) |
@@ -52,10 +53,16 @@ | |||
52 | /* | 53 | /* |
53 | * check if we need to save FPU registers | 54 | * check if we need to save FPU registers |
54 | */ | 55 | */ |
56 | PTR_L t3, TASK_THREAD_INFO(a0) | ||
57 | LONG_L t0, TI_FLAGS(t3) | ||
58 | li t1, _TIF_USEDFPU | ||
59 | and t2, t0, t1 | ||
60 | beqz t2, 1f | ||
61 | nor t1, zero, t1 | ||
55 | 62 | ||
56 | beqz a3, 1f | 63 | and t0, t0, t1 |
64 | LONG_S t0, TI_FLAGS(t3) | ||
57 | 65 | ||
58 | PTR_L t3, TASK_THREAD_INFO(a0) | ||
59 | /* | 66 | /* |
60 | * clear saved user stack CU1 bit | 67 | * clear saved user stack CU1 bit |
61 | */ | 68 | */ |
diff --git a/arch/mips/kernel/relocate_kernel.S b/arch/mips/kernel/relocate_kernel.S index 804ebb2c34a..87481f916a6 100644 --- a/arch/mips/kernel/relocate_kernel.S +++ b/arch/mips/kernel/relocate_kernel.S | |||
@@ -9,16 +9,12 @@ | |||
9 | #include <asm/asm.h> | 9 | #include <asm/asm.h> |
10 | #include <asm/asmmacro.h> | 10 | #include <asm/asmmacro.h> |
11 | #include <asm/regdef.h> | 11 | #include <asm/regdef.h> |
12 | #include <asm/page.h> | ||
12 | #include <asm/mipsregs.h> | 13 | #include <asm/mipsregs.h> |
13 | #include <asm/stackframe.h> | 14 | #include <asm/stackframe.h> |
14 | #include <asm/addrspace.h> | 15 | #include <asm/addrspace.h> |
15 | 16 | ||
16 | LEAF(relocate_new_kernel) | 17 | LEAF(relocate_new_kernel) |
17 | PTR_L a0, arg0 | ||
18 | PTR_L a1, arg1 | ||
19 | PTR_L a2, arg2 | ||
20 | PTR_L a3, arg3 | ||
21 | |||
22 | PTR_L s0, kexec_indirection_page | 18 | PTR_L s0, kexec_indirection_page |
23 | PTR_L s1, kexec_start_address | 19 | PTR_L s1, kexec_start_address |
24 | 20 | ||
@@ -30,6 +26,7 @@ process_entry: | |||
30 | and s3, s2, 0x1 | 26 | and s3, s2, 0x1 |
31 | beq s3, zero, 1f | 27 | beq s3, zero, 1f |
32 | and s4, s2, ~0x1 /* store destination addr in s4 */ | 28 | and s4, s2, ~0x1 /* store destination addr in s4 */ |
29 | move a0, s4 | ||
33 | b process_entry | 30 | b process_entry |
34 | 31 | ||
35 | 1: | 32 | 1: |
@@ -49,7 +46,7 @@ process_entry: | |||
49 | and s3, s2, 0x8 | 46 | and s3, s2, 0x8 |
50 | beq s3, zero, process_entry | 47 | beq s3, zero, process_entry |
51 | and s2, s2, ~0x8 | 48 | and s2, s2, ~0x8 |
52 | li s6, (1 << _PAGE_SHIFT) / SZREG | 49 | li s6, (1 << PAGE_SHIFT) / SZREG |
53 | 50 | ||
54 | copy_word: | 51 | copy_word: |
55 | /* copy page word by word */ | 52 | /* copy page word by word */ |
@@ -63,111 +60,10 @@ copy_word: | |||
63 | b process_entry | 60 | b process_entry |
64 | 61 | ||
65 | done: | 62 | done: |
66 | #ifdef CONFIG_SMP | ||
67 | /* kexec_flag reset is signal to other CPUs what kernel | ||
68 | was moved to it's location. Note - we need relocated address | ||
69 | of kexec_flag. */ | ||
70 | |||
71 | bal 1f | ||
72 | 1: move t1,ra; | ||
73 | PTR_LA t2,1b | ||
74 | PTR_LA t0,kexec_flag | ||
75 | PTR_SUB t0,t0,t2; | ||
76 | PTR_ADD t0,t1,t0; | ||
77 | LONG_S zero,(t0) | ||
78 | #endif | ||
79 | |||
80 | #ifdef CONFIG_CPU_CAVIUM_OCTEON | ||
81 | /* We need to flush I-cache before jumping to new kernel. | ||
82 | * Unfortunatelly, this code is cpu-specific. | ||
83 | */ | ||
84 | .set push | ||
85 | .set noreorder | ||
86 | syncw | ||
87 | syncw | ||
88 | synci 0($0) | ||
89 | .set pop | ||
90 | #else | ||
91 | sync | ||
92 | #endif | ||
93 | /* jump to kexec_start_address */ | 63 | /* jump to kexec_start_address */ |
94 | j s1 | 64 | j s1 |
95 | END(relocate_new_kernel) | 65 | END(relocate_new_kernel) |
96 | 66 | ||
97 | #ifdef CONFIG_SMP | ||
98 | /* | ||
99 | * Other CPUs should wait until code is relocated and | ||
100 | * then start at entry (?) point. | ||
101 | */ | ||
102 | LEAF(kexec_smp_wait) | ||
103 | PTR_L a0, s_arg0 | ||
104 | PTR_L a1, s_arg1 | ||
105 | PTR_L a2, s_arg2 | ||
106 | PTR_L a3, s_arg3 | ||
107 | PTR_L s1, kexec_start_address | ||
108 | |||
109 | /* Non-relocated address works for args and kexec_start_address ( old | ||
110 | * kernel is not overwritten). But we need relocated address of | ||
111 | * kexec_flag. | ||
112 | */ | ||
113 | |||
114 | bal 1f | ||
115 | 1: move t1,ra; | ||
116 | PTR_LA t2,1b | ||
117 | PTR_LA t0,kexec_flag | ||
118 | PTR_SUB t0,t0,t2; | ||
119 | PTR_ADD t0,t1,t0; | ||
120 | |||
121 | 1: LONG_L s0, (t0) | ||
122 | bne s0, zero,1b | ||
123 | |||
124 | #ifdef CONFIG_CPU_CAVIUM_OCTEON | ||
125 | .set push | ||
126 | .set noreorder | ||
127 | synci 0($0) | ||
128 | .set pop | ||
129 | #else | ||
130 | sync | ||
131 | #endif | ||
132 | j s1 | ||
133 | END(kexec_smp_wait) | ||
134 | #endif | ||
135 | |||
136 | #ifdef __mips64 | ||
137 | /* all PTR's must be aligned to 8 byte in 64-bit mode */ | ||
138 | .align 3 | ||
139 | #endif | ||
140 | |||
141 | /* All parameters to new kernel are passed in registers a0-a3. | ||
142 | * kexec_args[0..3] are uses to prepare register values. | ||
143 | */ | ||
144 | |||
145 | kexec_args: | ||
146 | EXPORT(kexec_args) | ||
147 | arg0: PTR 0x0 | ||
148 | arg1: PTR 0x0 | ||
149 | arg2: PTR 0x0 | ||
150 | arg3: PTR 0x0 | ||
151 | .size kexec_args,PTRSIZE*4 | ||
152 | |||
153 | #ifdef CONFIG_SMP | ||
154 | /* | ||
155 | * Secondary CPUs may have different kernel parameters in | ||
156 | * their registers a0-a3. secondary_kexec_args[0..3] are used | ||
157 | * to prepare register values. | ||
158 | */ | ||
159 | secondary_kexec_args: | ||
160 | EXPORT(secondary_kexec_args) | ||
161 | s_arg0: PTR 0x0 | ||
162 | s_arg1: PTR 0x0 | ||
163 | s_arg2: PTR 0x0 | ||
164 | s_arg3: PTR 0x0 | ||
165 | .size secondary_kexec_args,PTRSIZE*4 | ||
166 | kexec_flag: | ||
167 | LONG 0x1 | ||
168 | |||
169 | #endif | ||
170 | |||
171 | kexec_start_address: | 67 | kexec_start_address: |
172 | EXPORT(kexec_start_address) | 68 | EXPORT(kexec_start_address) |
173 | PTR 0x0 | 69 | PTR 0x0 |
diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c index 07fc5244aed..060563a712b 100644 --- a/arch/mips/kernel/reset.c +++ b/arch/mips/kernel/reset.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * Copyright (C) 2001 MIPS Technologies, Inc. | 7 | * Copyright (C) 2001 MIPS Technologies, Inc. |
8 | */ | 8 | */ |
9 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
10 | #include <linux/export.h> | 10 | #include <linux/module.h> |
11 | #include <linux/pm.h> | 11 | #include <linux/pm.h> |
12 | #include <linux/types.h> | 12 | #include <linux/types.h> |
13 | #include <linux/reboot.h> | 13 | #include <linux/reboot.h> |
diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c index b8c18dcdd2c..7a80b7cda7c 100644 --- a/arch/mips/kernel/rtlx.c +++ b/arch/mips/kernel/rtlx.c | |||
@@ -19,6 +19,7 @@ | |||
19 | 19 | ||
20 | #include <linux/device.h> | 20 | #include <linux/device.h> |
21 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
22 | #include <linux/module.h> | ||
22 | #include <linux/fs.h> | 23 | #include <linux/fs.h> |
23 | #include <linux/init.h> | 24 | #include <linux/init.h> |
24 | #include <asm/uaccess.h> | 25 | #include <asm/uaccess.h> |
@@ -38,6 +39,7 @@ | |||
38 | #include <linux/atomic.h> | 39 | #include <linux/atomic.h> |
39 | #include <asm/cpu.h> | 40 | #include <asm/cpu.h> |
40 | #include <asm/processor.h> | 41 | #include <asm/processor.h> |
42 | #include <asm/system.h> | ||
41 | #include <asm/vpe.h> | 43 | #include <asm/vpe.h> |
42 | #include <asm/rtlx.h> | 44 | #include <asm/rtlx.h> |
43 | 45 | ||
@@ -472,6 +474,7 @@ static const struct file_operations rtlx_fops = { | |||
472 | 474 | ||
473 | static struct irqaction rtlx_irq = { | 475 | static struct irqaction rtlx_irq = { |
474 | .handler = rtlx_interrupt, | 476 | .handler = rtlx_interrupt, |
477 | .flags = IRQF_DISABLED, | ||
475 | .name = "RTLX", | 478 | .name = "RTLX", |
476 | }; | 479 | }; |
477 | 480 | ||
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index d20a4bc9ed0..865bc7a6f5a 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S | |||
@@ -69,7 +69,18 @@ stack_done: | |||
69 | 1: sw v0, PT_R2(sp) # result | 69 | 1: sw v0, PT_R2(sp) # result |
70 | 70 | ||
71 | o32_syscall_exit: | 71 | o32_syscall_exit: |
72 | j syscall_exit_partial | 72 | local_irq_disable # make sure need_resched and |
73 | # signals dont change between | ||
74 | # sampling and return | ||
75 | lw a2, TI_FLAGS($28) # current->work | ||
76 | li t0, _TIF_ALLWORK_MASK | ||
77 | and t0, a2 | ||
78 | bnez t0, o32_syscall_exit_work | ||
79 | |||
80 | j restore_partial | ||
81 | |||
82 | o32_syscall_exit_work: | ||
83 | j syscall_exit_work_partial | ||
73 | 84 | ||
74 | /* ------------------------------------------------------------------------ */ | 85 | /* ------------------------------------------------------------------------ */ |
75 | 86 | ||
@@ -485,7 +496,7 @@ einval: li v0, -ENOSYS | |||
485 | sys sys_lookup_dcookie 4 | 496 | sys sys_lookup_dcookie 4 |
486 | sys sys_epoll_create 1 | 497 | sys sys_epoll_create 1 |
487 | sys sys_epoll_ctl 4 | 498 | sys sys_epoll_ctl 4 |
488 | sys sys_epoll_wait 4 /* 4250 */ | 499 | sys sys_epoll_wait 3 /* 4250 */ |
489 | sys sys_remap_file_pages 5 | 500 | sys sys_remap_file_pages 5 |
490 | sys sys_set_tid_address 1 | 501 | sys sys_set_tid_address 1 |
491 | sys sys_restart_syscall 0 | 502 | sys sys_restart_syscall 0 |
@@ -580,10 +591,6 @@ einval: li v0, -ENOSYS | |||
580 | sys sys_syncfs 1 | 591 | sys sys_syncfs 1 |
581 | sys sys_sendmmsg 4 | 592 | sys sys_sendmmsg 4 |
582 | sys sys_setns 2 | 593 | sys sys_setns 2 |
583 | sys sys_process_vm_readv 6 /* 4345 */ | ||
584 | sys sys_process_vm_writev 6 | ||
585 | sys sys_kcmp 5 | ||
586 | sys sys_finit_module 3 | ||
587 | .endm | 594 | .endm |
588 | 595 | ||
589 | /* We pre-compute the number of _instruction_ bytes needed to | 596 | /* We pre-compute the number of _instruction_ bytes needed to |
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index b64f642da07..fb7334bea73 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S | |||
@@ -72,7 +72,18 @@ NESTED(handle_sys64, PT_SIZE, sp) | |||
72 | 1: sd v0, PT_R2(sp) # result | 72 | 1: sd v0, PT_R2(sp) # result |
73 | 73 | ||
74 | n64_syscall_exit: | 74 | n64_syscall_exit: |
75 | j syscall_exit_partial | 75 | local_irq_disable # make sure need_resched and |
76 | # signals dont change between | ||
77 | # sampling and return | ||
78 | LONG_L a2, TI_FLAGS($28) # current->work | ||
79 | li t0, _TIF_ALLWORK_MASK | ||
80 | and t0, a2, t0 | ||
81 | bnez t0, n64_syscall_exit_work | ||
82 | |||
83 | j restore_partial | ||
84 | |||
85 | n64_syscall_exit_work: | ||
86 | j syscall_exit_work_partial | ||
76 | 87 | ||
77 | /* ------------------------------------------------------------------------ */ | 88 | /* ------------------------------------------------------------------------ */ |
78 | 89 | ||
@@ -419,8 +430,4 @@ sys_call_table: | |||
419 | PTR sys_syncfs | 430 | PTR sys_syncfs |
420 | PTR sys_sendmmsg | 431 | PTR sys_sendmmsg |
421 | PTR sys_setns | 432 | PTR sys_setns |
422 | PTR sys_process_vm_readv | ||
423 | PTR sys_process_vm_writev /* 5305 */ | ||
424 | PTR sys_kcmp | ||
425 | PTR sys_finit_module | ||
426 | .size sys_call_table,.-sys_call_table | 433 | .size sys_call_table,.-sys_call_table |
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index c29ac197f44..6de1f598346 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S | |||
@@ -17,6 +17,12 @@ | |||
17 | #include <asm/thread_info.h> | 17 | #include <asm/thread_info.h> |
18 | #include <asm/unistd.h> | 18 | #include <asm/unistd.h> |
19 | 19 | ||
20 | /* This duplicates the definition from <linux/sched.h> */ | ||
21 | #define PT_TRACESYS 0x00000002 /* tracing system calls */ | ||
22 | |||
23 | /* This duplicates the definition from <asm/signal.h> */ | ||
24 | #define SIGILL 4 /* Illegal instruction (ANSI). */ | ||
25 | |||
20 | #ifndef CONFIG_MIPS32_O32 | 26 | #ifndef CONFIG_MIPS32_O32 |
21 | /* No O32, so define handle_sys here */ | 27 | /* No O32, so define handle_sys here */ |
22 | #define handle_sysn32 handle_sys | 28 | #define handle_sysn32 handle_sys |
@@ -64,7 +70,18 @@ NESTED(handle_sysn32, PT_SIZE, sp) | |||
64 | sd t1, PT_R0(sp) # save it for syscall restarting | 70 | sd t1, PT_R0(sp) # save it for syscall restarting |
65 | 1: sd v0, PT_R2(sp) # result | 71 | 1: sd v0, PT_R2(sp) # result |
66 | 72 | ||
67 | j syscall_exit_partial | 73 | local_irq_disable # make sure need_resched and |
74 | # signals dont change between | ||
75 | # sampling and return | ||
76 | LONG_L a2, TI_FLAGS($28) # current->work | ||
77 | li t0, _TIF_ALLWORK_MASK | ||
78 | and t0, a2, t0 | ||
79 | bnez t0, n32_syscall_exit_work | ||
80 | |||
81 | j restore_partial | ||
82 | |||
83 | n32_syscall_exit_work: | ||
84 | j syscall_exit_work_partial | ||
68 | 85 | ||
69 | /* ------------------------------------------------------------------------ */ | 86 | /* ------------------------------------------------------------------------ */ |
70 | 87 | ||
@@ -161,7 +178,7 @@ EXPORT(sysn32_call_table) | |||
161 | PTR sys_getsockopt | 178 | PTR sys_getsockopt |
162 | PTR sys_clone /* 6055 */ | 179 | PTR sys_clone /* 6055 */ |
163 | PTR sys_fork | 180 | PTR sys_fork |
164 | PTR compat_sys_execve | 181 | PTR sys32_execve |
165 | PTR sys_exit | 182 | PTR sys_exit |
166 | PTR compat_sys_wait4 | 183 | PTR compat_sys_wait4 |
167 | PTR sys_kill /* 6060 */ | 184 | PTR sys_kill /* 6060 */ |
@@ -391,14 +408,14 @@ EXPORT(sysn32_call_table) | |||
391 | PTR sys_timerfd_create | 408 | PTR sys_timerfd_create |
392 | PTR compat_sys_timerfd_gettime /* 6285 */ | 409 | PTR compat_sys_timerfd_gettime /* 6285 */ |
393 | PTR compat_sys_timerfd_settime | 410 | PTR compat_sys_timerfd_settime |
394 | PTR compat_sys_signalfd4 | 411 | PTR sys_signalfd4 |
395 | PTR sys_eventfd2 | 412 | PTR sys_eventfd2 |
396 | PTR sys_epoll_create1 | 413 | PTR sys_epoll_create1 |
397 | PTR sys_dup3 /* 6290 */ | 414 | PTR sys_dup3 /* 6290 */ |
398 | PTR sys_pipe2 | 415 | PTR sys_pipe2 |
399 | PTR sys_inotify_init1 | 416 | PTR sys_inotify_init1 |
400 | PTR compat_sys_preadv | 417 | PTR sys_preadv |
401 | PTR compat_sys_pwritev | 418 | PTR sys_pwritev |
402 | PTR compat_sys_rt_tgsigqueueinfo /* 6295 */ | 419 | PTR compat_sys_rt_tgsigqueueinfo /* 6295 */ |
403 | PTR sys_perf_event_open | 420 | PTR sys_perf_event_open |
404 | PTR sys_accept4 | 421 | PTR sys_accept4 |
@@ -413,8 +430,4 @@ EXPORT(sysn32_call_table) | |||
413 | PTR sys_syncfs | 430 | PTR sys_syncfs |
414 | PTR compat_sys_sendmmsg | 431 | PTR compat_sys_sendmmsg |
415 | PTR sys_setns | 432 | PTR sys_setns |
416 | PTR compat_sys_process_vm_readv | ||
417 | PTR compat_sys_process_vm_writev /* 6310 */ | ||
418 | PTR sys_kcmp | ||
419 | PTR sys_finit_module | ||
420 | .size sysn32_call_table,.-sysn32_call_table | 433 | .size sysn32_call_table,.-sysn32_call_table |
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index cf3e75e4665..1d813169e45 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S | |||
@@ -99,7 +99,18 @@ NESTED(handle_sys, PT_SIZE, sp) | |||
99 | 1: sd v0, PT_R2(sp) # result | 99 | 1: sd v0, PT_R2(sp) # result |
100 | 100 | ||
101 | o32_syscall_exit: | 101 | o32_syscall_exit: |
102 | j syscall_exit_partial | 102 | local_irq_disable # make need_resched and |
103 | # signals dont change between | ||
104 | # sampling and return | ||
105 | LONG_L a2, TI_FLAGS($28) | ||
106 | li t0, _TIF_ALLWORK_MASK | ||
107 | and t0, a2, t0 | ||
108 | bnez t0, o32_syscall_exit_work | ||
109 | |||
110 | j restore_partial | ||
111 | |||
112 | o32_syscall_exit_work: | ||
113 | j syscall_exit_work_partial | ||
103 | 114 | ||
104 | /* ------------------------------------------------------------------------ */ | 115 | /* ------------------------------------------------------------------------ */ |
105 | 116 | ||
@@ -203,7 +214,7 @@ sys_call_table: | |||
203 | PTR sys_creat | 214 | PTR sys_creat |
204 | PTR sys_link | 215 | PTR sys_link |
205 | PTR sys_unlink /* 4010 */ | 216 | PTR sys_unlink /* 4010 */ |
206 | PTR compat_sys_execve | 217 | PTR sys32_execve |
207 | PTR sys_chdir | 218 | PTR sys_chdir |
208 | PTR compat_sys_time | 219 | PTR compat_sys_time |
209 | PTR sys_mknod | 220 | PTR sys_mknod |
@@ -537,8 +548,4 @@ sys_call_table: | |||
537 | PTR sys_syncfs | 548 | PTR sys_syncfs |
538 | PTR compat_sys_sendmmsg | 549 | PTR compat_sys_sendmmsg |
539 | PTR sys_setns | 550 | PTR sys_setns |
540 | PTR compat_sys_process_vm_readv /* 4345 */ | ||
541 | PTR compat_sys_process_vm_writev | ||
542 | PTR sys_kcmp | ||
543 | PTR sys_finit_module | ||
544 | .size sys_call_table,.-sys_call_table | 551 | .size sys_call_table,.-sys_call_table |
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 8c41187801c..8ad1d5679f1 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c | |||
@@ -12,9 +12,8 @@ | |||
12 | */ | 12 | */ |
13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
14 | #include <linux/ioport.h> | 14 | #include <linux/ioport.h> |
15 | #include <linux/export.h> | 15 | #include <linux/module.h> |
16 | #include <linux/screen_info.h> | 16 | #include <linux/screen_info.h> |
17 | #include <linux/memblock.h> | ||
18 | #include <linux/bootmem.h> | 17 | #include <linux/bootmem.h> |
19 | #include <linux/initrd.h> | 18 | #include <linux/initrd.h> |
20 | #include <linux/root_dev.h> | 19 | #include <linux/root_dev.h> |
@@ -22,7 +21,6 @@ | |||
22 | #include <linux/console.h> | 21 | #include <linux/console.h> |
23 | #include <linux/pfn.h> | 22 | #include <linux/pfn.h> |
24 | #include <linux/debugfs.h> | 23 | #include <linux/debugfs.h> |
25 | #include <linux/kexec.h> | ||
26 | 24 | ||
27 | #include <asm/addrspace.h> | 25 | #include <asm/addrspace.h> |
28 | #include <asm/bootinfo.h> | 26 | #include <asm/bootinfo.h> |
@@ -32,6 +30,7 @@ | |||
32 | #include <asm/sections.h> | 30 | #include <asm/sections.h> |
33 | #include <asm/setup.h> | 31 | #include <asm/setup.h> |
34 | #include <asm/smp-ops.h> | 32 | #include <asm/smp-ops.h> |
33 | #include <asm/system.h> | ||
35 | #include <asm/prom.h> | 34 | #include <asm/prom.h> |
36 | 35 | ||
37 | struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly; | 36 | struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly; |
@@ -80,7 +79,7 @@ static struct resource data_resource = { .name = "Kernel data", }; | |||
80 | void __init add_memory_region(phys_t start, phys_t size, long type) | 79 | void __init add_memory_region(phys_t start, phys_t size, long type) |
81 | { | 80 | { |
82 | int x = boot_mem_map.nr_map; | 81 | int x = boot_mem_map.nr_map; |
83 | int i; | 82 | struct boot_mem_map_entry *prev = boot_mem_map.map + x - 1; |
84 | 83 | ||
85 | /* Sanity check */ | 84 | /* Sanity check */ |
86 | if (start + size < start) { | 85 | if (start + size < start) { |
@@ -89,29 +88,15 @@ void __init add_memory_region(phys_t start, phys_t size, long type) | |||
89 | } | 88 | } |
90 | 89 | ||
91 | /* | 90 | /* |
92 | * Try to merge with existing entry, if any. | 91 | * Try to merge with previous entry if any. This is far less than |
92 | * perfect but is sufficient for most real world cases. | ||
93 | */ | 93 | */ |
94 | for (i = 0; i < boot_mem_map.nr_map; i++) { | 94 | if (x && prev->addr + prev->size == start && prev->type == type) { |
95 | struct boot_mem_map_entry *entry = boot_mem_map.map + i; | 95 | prev->size += size; |
96 | unsigned long top; | ||
97 | |||
98 | if (entry->type != type) | ||
99 | continue; | ||
100 | |||
101 | if (start + size < entry->addr) | ||
102 | continue; /* no overlap */ | ||
103 | |||
104 | if (entry->addr + entry->size < start) | ||
105 | continue; /* no overlap */ | ||
106 | |||
107 | top = max(entry->addr + entry->size, start + size); | ||
108 | entry->addr = min(entry->addr, start); | ||
109 | entry->size = top - entry->addr; | ||
110 | |||
111 | return; | 96 | return; |
112 | } | 97 | } |
113 | 98 | ||
114 | if (boot_mem_map.nr_map == BOOT_MEM_MAP_MAX) { | 99 | if (x == BOOT_MEM_MAP_MAX) { |
115 | pr_err("Ooops! Too many entries in the memory map!\n"); | 100 | pr_err("Ooops! Too many entries in the memory map!\n"); |
116 | return; | 101 | return; |
117 | } | 102 | } |
@@ -136,9 +121,6 @@ static void __init print_memory_map(void) | |||
136 | case BOOT_MEM_RAM: | 121 | case BOOT_MEM_RAM: |
137 | printk(KERN_CONT "(usable)\n"); | 122 | printk(KERN_CONT "(usable)\n"); |
138 | break; | 123 | break; |
139 | case BOOT_MEM_INIT_RAM: | ||
140 | printk(KERN_CONT "(usable after init)\n"); | ||
141 | break; | ||
142 | case BOOT_MEM_ROM_DATA: | 124 | case BOOT_MEM_ROM_DATA: |
143 | printk(KERN_CONT "(ROM data)\n"); | 125 | printk(KERN_CONT "(ROM data)\n"); |
144 | break; | 126 | break; |
@@ -370,7 +352,7 @@ static void __init bootmem_init(void) | |||
370 | continue; | 352 | continue; |
371 | #endif | 353 | #endif |
372 | 354 | ||
373 | memblock_add_node(PFN_PHYS(start), PFN_PHYS(end - start), 0); | 355 | add_active_range(0, start, end); |
374 | } | 356 | } |
375 | 357 | ||
376 | /* | 358 | /* |
@@ -379,24 +361,15 @@ static void __init bootmem_init(void) | |||
379 | for (i = 0; i < boot_mem_map.nr_map; i++) { | 361 | for (i = 0; i < boot_mem_map.nr_map; i++) { |
380 | unsigned long start, end, size; | 362 | unsigned long start, end, size; |
381 | 363 | ||
382 | start = PFN_UP(boot_mem_map.map[i].addr); | ||
383 | end = PFN_DOWN(boot_mem_map.map[i].addr | ||
384 | + boot_mem_map.map[i].size); | ||
385 | |||
386 | /* | 364 | /* |
387 | * Reserve usable memory. | 365 | * Reserve usable memory. |
388 | */ | 366 | */ |
389 | switch (boot_mem_map.map[i].type) { | 367 | if (boot_mem_map.map[i].type != BOOT_MEM_RAM) |
390 | case BOOT_MEM_RAM: | ||
391 | break; | ||
392 | case BOOT_MEM_INIT_RAM: | ||
393 | memory_present(0, start, end); | ||
394 | continue; | ||
395 | default: | ||
396 | /* Not usable memory */ | ||
397 | continue; | 368 | continue; |
398 | } | ||
399 | 369 | ||
370 | start = PFN_UP(boot_mem_map.map[i].addr); | ||
371 | end = PFN_DOWN(boot_mem_map.map[i].addr | ||
372 | + boot_mem_map.map[i].size); | ||
400 | /* | 373 | /* |
401 | * We are rounding up the start address of usable memory | 374 | * We are rounding up the start address of usable memory |
402 | * and at the end of the usable range downwards. | 375 | * and at the end of the usable range downwards. |
@@ -482,33 +455,11 @@ early_param("mem", early_parse_mem); | |||
482 | 455 | ||
483 | static void __init arch_mem_init(char **cmdline_p) | 456 | static void __init arch_mem_init(char **cmdline_p) |
484 | { | 457 | { |
485 | phys_t init_mem, init_end, init_size; | ||
486 | |||
487 | extern void plat_mem_setup(void); | 458 | extern void plat_mem_setup(void); |
488 | 459 | ||
489 | /* call board setup routine */ | 460 | /* call board setup routine */ |
490 | plat_mem_setup(); | 461 | plat_mem_setup(); |
491 | 462 | ||
492 | init_mem = PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT; | ||
493 | init_end = PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT; | ||
494 | init_size = init_end - init_mem; | ||
495 | if (init_size) { | ||
496 | /* Make sure it is in the boot_mem_map */ | ||
497 | int i, found; | ||
498 | found = 0; | ||
499 | for (i = 0; i < boot_mem_map.nr_map; i++) { | ||
500 | if (init_mem >= boot_mem_map.map[i].addr && | ||
501 | init_mem < (boot_mem_map.map[i].addr + | ||
502 | boot_mem_map.map[i].size)) { | ||
503 | found = 1; | ||
504 | break; | ||
505 | } | ||
506 | } | ||
507 | if (!found) | ||
508 | add_memory_region(init_mem, init_size, | ||
509 | BOOT_MEM_INIT_RAM); | ||
510 | } | ||
511 | |||
512 | pr_info("Determined physical RAM map:\n"); | 463 | pr_info("Determined physical RAM map:\n"); |
513 | print_memory_map(); | 464 | print_memory_map(); |
514 | 465 | ||
@@ -537,64 +488,12 @@ static void __init arch_mem_init(char **cmdline_p) | |||
537 | } | 488 | } |
538 | 489 | ||
539 | bootmem_init(); | 490 | bootmem_init(); |
540 | #ifdef CONFIG_KEXEC | ||
541 | if (crashk_res.start != crashk_res.end) | ||
542 | reserve_bootmem(crashk_res.start, | ||
543 | crashk_res.end - crashk_res.start + 1, | ||
544 | BOOTMEM_DEFAULT); | ||
545 | #endif | ||
546 | device_tree_init(); | 491 | device_tree_init(); |
547 | sparse_init(); | 492 | sparse_init(); |
548 | plat_swiotlb_setup(); | 493 | plat_swiotlb_setup(); |
549 | paging_init(); | 494 | paging_init(); |
550 | } | 495 | } |
551 | 496 | ||
552 | #ifdef CONFIG_KEXEC | ||
553 | static inline unsigned long long get_total_mem(void) | ||
554 | { | ||
555 | unsigned long long total; | ||
556 | |||
557 | total = max_pfn - min_low_pfn; | ||
558 | return total << PAGE_SHIFT; | ||
559 | } | ||
560 | |||
561 | static void __init mips_parse_crashkernel(void) | ||
562 | { | ||
563 | unsigned long long total_mem; | ||
564 | unsigned long long crash_size, crash_base; | ||
565 | int ret; | ||
566 | |||
567 | total_mem = get_total_mem(); | ||
568 | ret = parse_crashkernel(boot_command_line, total_mem, | ||
569 | &crash_size, &crash_base); | ||
570 | if (ret != 0 || crash_size <= 0) | ||
571 | return; | ||
572 | |||
573 | crashk_res.start = crash_base; | ||
574 | crashk_res.end = crash_base + crash_size - 1; | ||
575 | } | ||
576 | |||
577 | static void __init request_crashkernel(struct resource *res) | ||
578 | { | ||
579 | int ret; | ||
580 | |||
581 | ret = request_resource(res, &crashk_res); | ||
582 | if (!ret) | ||
583 | pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n", | ||
584 | (unsigned long)((crashk_res.end - | ||
585 | crashk_res.start + 1) >> 20), | ||
586 | (unsigned long)(crashk_res.start >> 20)); | ||
587 | } | ||
588 | #else /* !defined(CONFIG_KEXEC) */ | ||
589 | static void __init mips_parse_crashkernel(void) | ||
590 | { | ||
591 | } | ||
592 | |||
593 | static void __init request_crashkernel(struct resource *res) | ||
594 | { | ||
595 | } | ||
596 | #endif /* !defined(CONFIG_KEXEC) */ | ||
597 | |||
598 | static void __init resource_init(void) | 497 | static void __init resource_init(void) |
599 | { | 498 | { |
600 | int i; | 499 | int i; |
@@ -610,8 +509,6 @@ static void __init resource_init(void) | |||
610 | /* | 509 | /* |
611 | * Request address space for all standard RAM. | 510 | * Request address space for all standard RAM. |
612 | */ | 511 | */ |
613 | mips_parse_crashkernel(); | ||
614 | |||
615 | for (i = 0; i < boot_mem_map.nr_map; i++) { | 512 | for (i = 0; i < boot_mem_map.nr_map; i++) { |
616 | struct resource *res; | 513 | struct resource *res; |
617 | unsigned long start, end; | 514 | unsigned long start, end; |
@@ -626,7 +523,6 @@ static void __init resource_init(void) | |||
626 | res = alloc_bootmem(sizeof(struct resource)); | 523 | res = alloc_bootmem(sizeof(struct resource)); |
627 | switch (boot_mem_map.map[i].type) { | 524 | switch (boot_mem_map.map[i].type) { |
628 | case BOOT_MEM_RAM: | 525 | case BOOT_MEM_RAM: |
629 | case BOOT_MEM_INIT_RAM: | ||
630 | case BOOT_MEM_ROM_DATA: | 526 | case BOOT_MEM_ROM_DATA: |
631 | res->name = "System RAM"; | 527 | res->name = "System RAM"; |
632 | break; | 528 | break; |
@@ -648,7 +544,6 @@ static void __init resource_init(void) | |||
648 | */ | 544 | */ |
649 | request_resource(res, &code_resource); | 545 | request_resource(res, &code_resource); |
650 | request_resource(res, &data_resource); | 546 | request_resource(res, &data_resource); |
651 | request_crashkernel(res); | ||
652 | } | 547 | } |
653 | } | 548 | } |
654 | 549 | ||
@@ -675,8 +570,6 @@ void __init setup_arch(char **cmdline_p) | |||
675 | 570 | ||
676 | resource_init(); | 571 | resource_init(); |
677 | plat_smp_setup(); | 572 | plat_smp_setup(); |
678 | |||
679 | cpu_cache_init(); | ||
680 | } | 573 | } |
681 | 574 | ||
682 | unsigned long kernelsp[NR_CPUS]; | 575 | unsigned long kernelsp[NR_CPUS]; |
diff --git a/arch/mips/kernel/signal-common.h b/arch/mips/kernel/signal-common.h index 9c60d09e62a..10263b40598 100644 --- a/arch/mips/kernel/signal-common.h +++ b/arch/mips/kernel/signal-common.h | |||
@@ -19,6 +19,8 @@ | |||
19 | # define DEBUGP(fmt, args...) | 19 | # define DEBUGP(fmt, args...) |
20 | #endif | 20 | #endif |
21 | 21 | ||
22 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | ||
23 | |||
22 | /* | 24 | /* |
23 | * Determine which stack to use.. | 25 | * Determine which stack to use.. |
24 | */ | 26 | */ |
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index b6aa7703501..f8524003676 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c | |||
@@ -34,7 +34,6 @@ | |||
34 | #include <asm/cpu-features.h> | 34 | #include <asm/cpu-features.h> |
35 | #include <asm/war.h> | 35 | #include <asm/war.h> |
36 | #include <asm/vdso.h> | 36 | #include <asm/vdso.h> |
37 | #include <asm/dsp.h> | ||
38 | 37 | ||
39 | #include "signal-common.h" | 38 | #include "signal-common.h" |
40 | 39 | ||
@@ -255,7 +254,18 @@ asmlinkage int sys_sigsuspend(nabi_no_regargs struct pt_regs regs) | |||
255 | uset = (sigset_t __user *) regs.regs[4]; | 254 | uset = (sigset_t __user *) regs.regs[4]; |
256 | if (copy_from_user(&newset, uset, sizeof(sigset_t))) | 255 | if (copy_from_user(&newset, uset, sizeof(sigset_t))) |
257 | return -EFAULT; | 256 | return -EFAULT; |
258 | return sigsuspend(&newset); | 257 | sigdelsetmask(&newset, ~_BLOCKABLE); |
258 | |||
259 | spin_lock_irq(¤t->sighand->siglock); | ||
260 | current->saved_sigmask = current->blocked; | ||
261 | current->blocked = newset; | ||
262 | recalc_sigpending(); | ||
263 | spin_unlock_irq(¤t->sighand->siglock); | ||
264 | |||
265 | current->state = TASK_INTERRUPTIBLE; | ||
266 | schedule(); | ||
267 | set_thread_flag(TIF_RESTORE_SIGMASK); | ||
268 | return -ERESTARTNOHAND; | ||
259 | } | 269 | } |
260 | #endif | 270 | #endif |
261 | 271 | ||
@@ -273,7 +283,18 @@ asmlinkage int sys_rt_sigsuspend(nabi_no_regargs struct pt_regs regs) | |||
273 | unewset = (sigset_t __user *) regs.regs[4]; | 283 | unewset = (sigset_t __user *) regs.regs[4]; |
274 | if (copy_from_user(&newset, unewset, sizeof(newset))) | 284 | if (copy_from_user(&newset, unewset, sizeof(newset))) |
275 | return -EFAULT; | 285 | return -EFAULT; |
276 | return sigsuspend(&newset); | 286 | sigdelsetmask(&newset, ~_BLOCKABLE); |
287 | |||
288 | spin_lock_irq(¤t->sighand->siglock); | ||
289 | current->saved_sigmask = current->blocked; | ||
290 | current->blocked = newset; | ||
291 | recalc_sigpending(); | ||
292 | spin_unlock_irq(¤t->sighand->siglock); | ||
293 | |||
294 | current->state = TASK_INTERRUPTIBLE; | ||
295 | schedule(); | ||
296 | set_thread_flag(TIF_RESTORE_SIGMASK); | ||
297 | return -ERESTARTNOHAND; | ||
277 | } | 298 | } |
278 | 299 | ||
279 | #ifdef CONFIG_TRAD_SIGNALS | 300 | #ifdef CONFIG_TRAD_SIGNALS |
@@ -339,7 +360,11 @@ asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs) | |||
339 | if (__copy_from_user(&blocked, &frame->sf_mask, sizeof(blocked))) | 360 | if (__copy_from_user(&blocked, &frame->sf_mask, sizeof(blocked))) |
340 | goto badframe; | 361 | goto badframe; |
341 | 362 | ||
342 | set_current_blocked(&blocked); | 363 | sigdelsetmask(&blocked, ~_BLOCKABLE); |
364 | spin_lock_irq(¤t->sighand->siglock); | ||
365 | current->blocked = blocked; | ||
366 | recalc_sigpending(); | ||
367 | spin_unlock_irq(¤t->sighand->siglock); | ||
343 | 368 | ||
344 | sig = restore_sigcontext(®s, &frame->sf_sc); | 369 | sig = restore_sigcontext(®s, &frame->sf_sc); |
345 | if (sig < 0) | 370 | if (sig < 0) |
@@ -374,7 +399,11 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs) | |||
374 | if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set))) | 399 | if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set))) |
375 | goto badframe; | 400 | goto badframe; |
376 | 401 | ||
377 | set_current_blocked(&set); | 402 | sigdelsetmask(&set, ~_BLOCKABLE); |
403 | spin_lock_irq(¤t->sighand->siglock); | ||
404 | current->blocked = set; | ||
405 | recalc_sigpending(); | ||
406 | spin_unlock_irq(¤t->sighand->siglock); | ||
378 | 407 | ||
379 | sig = restore_sigcontext(®s, &frame->rs_uc.uc_mcontext); | 408 | sig = restore_sigcontext(®s, &frame->rs_uc.uc_mcontext); |
380 | if (sig < 0) | 409 | if (sig < 0) |
@@ -512,10 +541,9 @@ struct mips_abi mips_abi = { | |||
512 | .restart = __NR_restart_syscall | 541 | .restart = __NR_restart_syscall |
513 | }; | 542 | }; |
514 | 543 | ||
515 | static void handle_signal(unsigned long sig, siginfo_t *info, | 544 | static int handle_signal(unsigned long sig, siginfo_t *info, |
516 | struct k_sigaction *ka, struct pt_regs *regs) | 545 | struct k_sigaction *ka, sigset_t *oldset, struct pt_regs *regs) |
517 | { | 546 | { |
518 | sigset_t *oldset = sigmask_to_save(); | ||
519 | int ret; | 547 | int ret; |
520 | struct mips_abi *abi = current->thread.abi; | 548 | struct mips_abi *abi = current->thread.abi; |
521 | void *vdso = current->mm->context.vdso; | 549 | void *vdso = current->mm->context.vdso; |
@@ -549,39 +577,67 @@ static void handle_signal(unsigned long sig, siginfo_t *info, | |||
549 | ka, regs, sig, oldset); | 577 | ka, regs, sig, oldset); |
550 | 578 | ||
551 | if (ret) | 579 | if (ret) |
552 | return; | 580 | return ret; |
581 | |||
582 | spin_lock_irq(¤t->sighand->siglock); | ||
583 | sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask); | ||
584 | if (!(ka->sa.sa_flags & SA_NODEFER)) | ||
585 | sigaddset(¤t->blocked, sig); | ||
586 | recalc_sigpending(); | ||
587 | spin_unlock_irq(¤t->sighand->siglock); | ||
553 | 588 | ||
554 | signal_delivered(sig, info, ka, regs, 0); | 589 | return ret; |
555 | } | 590 | } |
556 | 591 | ||
557 | static void do_signal(struct pt_regs *regs) | 592 | static void do_signal(struct pt_regs *regs) |
558 | { | 593 | { |
559 | struct k_sigaction ka; | 594 | struct k_sigaction ka; |
595 | sigset_t *oldset; | ||
560 | siginfo_t info; | 596 | siginfo_t info; |
561 | int signr; | 597 | int signr; |
562 | 598 | ||
599 | /* | ||
600 | * We want the common case to go fast, which is why we may in certain | ||
601 | * cases get here from kernel mode. Just return without doing anything | ||
602 | * if so. | ||
603 | */ | ||
604 | if (!user_mode(regs)) | ||
605 | return; | ||
606 | |||
607 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) | ||
608 | oldset = ¤t->saved_sigmask; | ||
609 | else | ||
610 | oldset = ¤t->blocked; | ||
611 | |||
563 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | 612 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); |
564 | if (signr > 0) { | 613 | if (signr > 0) { |
565 | /* Whee! Actually deliver the signal. */ | 614 | /* Whee! Actually deliver the signal. */ |
566 | handle_signal(signr, &info, &ka, regs); | 615 | if (handle_signal(signr, &info, &ka, oldset, regs) == 0) { |
616 | /* | ||
617 | * A signal was successfully delivered; the saved | ||
618 | * sigmask will have been stored in the signal frame, | ||
619 | * and will be restored by sigreturn, so we can simply | ||
620 | * clear the TIF_RESTORE_SIGMASK flag. | ||
621 | */ | ||
622 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) | ||
623 | clear_thread_flag(TIF_RESTORE_SIGMASK); | ||
624 | } | ||
625 | |||
567 | return; | 626 | return; |
568 | } | 627 | } |
569 | 628 | ||
570 | if (regs->regs[0]) { | 629 | if (regs->regs[0]) { |
571 | switch (regs->regs[2]) { | 630 | if (regs->regs[2] == ERESTARTNOHAND || |
572 | case ERESTARTNOHAND: | 631 | regs->regs[2] == ERESTARTSYS || |
573 | case ERESTARTSYS: | 632 | regs->regs[2] == ERESTARTNOINTR) { |
574 | case ERESTARTNOINTR: | ||
575 | regs->regs[2] = regs->regs[0]; | 633 | regs->regs[2] = regs->regs[0]; |
576 | regs->regs[7] = regs->regs[26]; | 634 | regs->regs[7] = regs->regs[26]; |
577 | regs->cp0_epc -= 4; | 635 | regs->cp0_epc -= 4; |
578 | break; | 636 | } |
579 | 637 | if (regs->regs[2] == ERESTART_RESTARTBLOCK) { | |
580 | case ERESTART_RESTARTBLOCK: | ||
581 | regs->regs[2] = current->thread.abi->restart; | 638 | regs->regs[2] = current->thread.abi->restart; |
582 | regs->regs[7] = regs->regs[26]; | 639 | regs->regs[7] = regs->regs[26]; |
583 | regs->cp0_epc -= 4; | 640 | regs->cp0_epc -= 4; |
584 | break; | ||
585 | } | 641 | } |
586 | regs->regs[0] = 0; /* Don't deal with this again. */ | 642 | regs->regs[0] = 0; /* Don't deal with this again. */ |
587 | } | 643 | } |
@@ -590,7 +646,10 @@ static void do_signal(struct pt_regs *regs) | |||
590 | * If there's no signal to deliver, we just put the saved sigmask | 646 | * If there's no signal to deliver, we just put the saved sigmask |
591 | * back | 647 | * back |
592 | */ | 648 | */ |
593 | restore_saved_sigmask(); | 649 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) { |
650 | clear_thread_flag(TIF_RESTORE_SIGMASK); | ||
651 | sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); | ||
652 | } | ||
594 | } | 653 | } |
595 | 654 | ||
596 | /* | 655 | /* |
@@ -603,12 +662,14 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused, | |||
603 | local_irq_enable(); | 662 | local_irq_enable(); |
604 | 663 | ||
605 | /* deal with pending signal delivery */ | 664 | /* deal with pending signal delivery */ |
606 | if (thread_info_flags & _TIF_SIGPENDING) | 665 | if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) |
607 | do_signal(regs); | 666 | do_signal(regs); |
608 | 667 | ||
609 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | 668 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { |
610 | clear_thread_flag(TIF_NOTIFY_RESUME); | 669 | clear_thread_flag(TIF_NOTIFY_RESUME); |
611 | tracehook_notify_resume(regs); | 670 | tracehook_notify_resume(regs); |
671 | if (current->replacement_session_keyring) | ||
672 | key_replace_session_keyring(); | ||
612 | } | 673 | } |
613 | } | 674 | } |
614 | 675 | ||
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c index da1b56a39ac..aae98661379 100644 --- a/arch/mips/kernel/signal32.c +++ b/arch/mips/kernel/signal32.c | |||
@@ -29,10 +29,10 @@ | |||
29 | #include <asm/cacheflush.h> | 29 | #include <asm/cacheflush.h> |
30 | #include <asm/sim.h> | 30 | #include <asm/sim.h> |
31 | #include <asm/ucontext.h> | 31 | #include <asm/ucontext.h> |
32 | #include <asm/system.h> | ||
32 | #include <asm/fpu.h> | 33 | #include <asm/fpu.h> |
33 | #include <asm/war.h> | 34 | #include <asm/war.h> |
34 | #include <asm/vdso.h> | 35 | #include <asm/vdso.h> |
35 | #include <asm/dsp.h> | ||
36 | 36 | ||
37 | #include "signal-common.h" | 37 | #include "signal-common.h" |
38 | 38 | ||
@@ -288,7 +288,18 @@ asmlinkage int sys32_sigsuspend(nabi_no_regargs struct pt_regs regs) | |||
288 | uset = (compat_sigset_t __user *) regs.regs[4]; | 288 | uset = (compat_sigset_t __user *) regs.regs[4]; |
289 | if (get_sigset(&newset, uset)) | 289 | if (get_sigset(&newset, uset)) |
290 | return -EFAULT; | 290 | return -EFAULT; |
291 | return sigsuspend(&newset); | 291 | sigdelsetmask(&newset, ~_BLOCKABLE); |
292 | |||
293 | spin_lock_irq(¤t->sighand->siglock); | ||
294 | current->saved_sigmask = current->blocked; | ||
295 | current->blocked = newset; | ||
296 | recalc_sigpending(); | ||
297 | spin_unlock_irq(¤t->sighand->siglock); | ||
298 | |||
299 | current->state = TASK_INTERRUPTIBLE; | ||
300 | schedule(); | ||
301 | set_thread_flag(TIF_RESTORE_SIGMASK); | ||
302 | return -ERESTARTNOHAND; | ||
292 | } | 303 | } |
293 | 304 | ||
294 | asmlinkage int sys32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs) | 305 | asmlinkage int sys32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs) |
@@ -305,7 +316,18 @@ asmlinkage int sys32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs) | |||
305 | uset = (compat_sigset_t __user *) regs.regs[4]; | 316 | uset = (compat_sigset_t __user *) regs.regs[4]; |
306 | if (get_sigset(&newset, uset)) | 317 | if (get_sigset(&newset, uset)) |
307 | return -EFAULT; | 318 | return -EFAULT; |
308 | return sigsuspend(&newset); | 319 | sigdelsetmask(&newset, ~_BLOCKABLE); |
320 | |||
321 | spin_lock_irq(¤t->sighand->siglock); | ||
322 | current->saved_sigmask = current->blocked; | ||
323 | current->blocked = newset; | ||
324 | recalc_sigpending(); | ||
325 | spin_unlock_irq(¤t->sighand->siglock); | ||
326 | |||
327 | current->state = TASK_INTERRUPTIBLE; | ||
328 | schedule(); | ||
329 | set_thread_flag(TIF_RESTORE_SIGMASK); | ||
330 | return -ERESTARTNOHAND; | ||
309 | } | 331 | } |
310 | 332 | ||
311 | SYSCALL_DEFINE3(32_sigaction, long, sig, const struct sigaction32 __user *, act, | 333 | SYSCALL_DEFINE3(32_sigaction, long, sig, const struct sigaction32 __user *, act, |
@@ -465,7 +487,11 @@ asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs) | |||
465 | if (__copy_conv_sigset_from_user(&blocked, &frame->sf_mask)) | 487 | if (__copy_conv_sigset_from_user(&blocked, &frame->sf_mask)) |
466 | goto badframe; | 488 | goto badframe; |
467 | 489 | ||
468 | set_current_blocked(&blocked); | 490 | sigdelsetmask(&blocked, ~_BLOCKABLE); |
491 | spin_lock_irq(¤t->sighand->siglock); | ||
492 | current->blocked = blocked; | ||
493 | recalc_sigpending(); | ||
494 | spin_unlock_irq(¤t->sighand->siglock); | ||
469 | 495 | ||
470 | sig = restore_sigcontext32(®s, &frame->sf_sc); | 496 | sig = restore_sigcontext32(®s, &frame->sf_sc); |
471 | if (sig < 0) | 497 | if (sig < 0) |
@@ -502,7 +528,11 @@ asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) | |||
502 | if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask)) | 528 | if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask)) |
503 | goto badframe; | 529 | goto badframe; |
504 | 530 | ||
505 | set_current_blocked(&set); | 531 | sigdelsetmask(&set, ~_BLOCKABLE); |
532 | spin_lock_irq(¤t->sighand->siglock); | ||
533 | current->blocked = set; | ||
534 | recalc_sigpending(); | ||
535 | spin_unlock_irq(¤t->sighand->siglock); | ||
506 | 536 | ||
507 | sig = restore_sigcontext32(®s, &frame->rs_uc.uc_mcontext); | 537 | sig = restore_sigcontext32(®s, &frame->rs_uc.uc_mcontext); |
508 | if (sig < 0) | 538 | if (sig < 0) |
diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c index 3574c145511..ee24d814d5b 100644 --- a/arch/mips/kernel/signal_n32.c +++ b/arch/mips/kernel/signal_n32.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <asm/sim.h> | 35 | #include <asm/sim.h> |
36 | #include <asm/uaccess.h> | 36 | #include <asm/uaccess.h> |
37 | #include <asm/ucontext.h> | 37 | #include <asm/ucontext.h> |
38 | #include <asm/system.h> | ||
38 | #include <asm/fpu.h> | 39 | #include <asm/fpu.h> |
39 | #include <asm/cpu-features.h> | 40 | #include <asm/cpu-features.h> |
40 | #include <asm/war.h> | 41 | #include <asm/war.h> |
@@ -91,7 +92,18 @@ asmlinkage int sysn32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs) | |||
91 | if (copy_from_user(&uset, unewset, sizeof(uset))) | 92 | if (copy_from_user(&uset, unewset, sizeof(uset))) |
92 | return -EFAULT; | 93 | return -EFAULT; |
93 | sigset_from_compat(&newset, &uset); | 94 | sigset_from_compat(&newset, &uset); |
94 | return sigsuspend(&newset); | 95 | sigdelsetmask(&newset, ~_BLOCKABLE); |
96 | |||
97 | spin_lock_irq(¤t->sighand->siglock); | ||
98 | current->saved_sigmask = current->blocked; | ||
99 | current->blocked = newset; | ||
100 | recalc_sigpending(); | ||
101 | spin_unlock_irq(¤t->sighand->siglock); | ||
102 | |||
103 | current->state = TASK_INTERRUPTIBLE; | ||
104 | schedule(); | ||
105 | set_thread_flag(TIF_RESTORE_SIGMASK); | ||
106 | return -ERESTARTNOHAND; | ||
95 | } | 107 | } |
96 | 108 | ||
97 | asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) | 109 | asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) |
@@ -109,7 +121,11 @@ asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) | |||
109 | if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask)) | 121 | if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask)) |
110 | goto badframe; | 122 | goto badframe; |
111 | 123 | ||
112 | set_current_blocked(&set); | 124 | sigdelsetmask(&set, ~_BLOCKABLE); |
125 | spin_lock_irq(¤t->sighand->siglock); | ||
126 | current->blocked = set; | ||
127 | recalc_sigpending(); | ||
128 | spin_unlock_irq(¤t->sighand->siglock); | ||
113 | 129 | ||
114 | sig = restore_sigcontext(®s, &frame->rs_uc.uc_mcontext); | 130 | sig = restore_sigcontext(®s, &frame->rs_uc.uc_mcontext); |
115 | if (sig < 0) | 131 | if (sig < 0) |
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c deleted file mode 100644 index 8e393b8443f..00000000000 --- a/arch/mips/kernel/smp-bmips.c +++ /dev/null | |||
@@ -1,455 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2011 by Kevin Cernekee (cernekee@gmail.com) | ||
7 | * | ||
8 | * SMP support for BMIPS | ||
9 | */ | ||
10 | |||
11 | #include <linux/init.h> | ||
12 | #include <linux/sched.h> | ||
13 | #include <linux/mm.h> | ||
14 | #include <linux/delay.h> | ||
15 | #include <linux/smp.h> | ||
16 | #include <linux/interrupt.h> | ||
17 | #include <linux/spinlock.h> | ||
18 | #include <linux/cpu.h> | ||
19 | #include <linux/cpumask.h> | ||
20 | #include <linux/reboot.h> | ||
21 | #include <linux/io.h> | ||
22 | #include <linux/compiler.h> | ||
23 | #include <linux/linkage.h> | ||
24 | #include <linux/bug.h> | ||
25 | #include <linux/kernel.h> | ||
26 | |||
27 | #include <asm/time.h> | ||
28 | #include <asm/pgtable.h> | ||
29 | #include <asm/processor.h> | ||
30 | #include <asm/bootinfo.h> | ||
31 | #include <asm/pmon.h> | ||
32 | #include <asm/cacheflush.h> | ||
33 | #include <asm/tlbflush.h> | ||
34 | #include <asm/mipsregs.h> | ||
35 | #include <asm/bmips.h> | ||
36 | #include <asm/traps.h> | ||
37 | #include <asm/barrier.h> | ||
38 | |||
39 | static int __maybe_unused max_cpus = 1; | ||
40 | |||
41 | /* these may be configured by the platform code */ | ||
42 | int bmips_smp_enabled = 1; | ||
43 | int bmips_cpu_offset; | ||
44 | cpumask_t bmips_booted_mask; | ||
45 | |||
46 | #ifdef CONFIG_SMP | ||
47 | |||
48 | /* initial $sp, $gp - used by arch/mips/kernel/bmips_vec.S */ | ||
49 | unsigned long bmips_smp_boot_sp; | ||
50 | unsigned long bmips_smp_boot_gp; | ||
51 | |||
52 | static void bmips_send_ipi_single(int cpu, unsigned int action); | ||
53 | static irqreturn_t bmips_ipi_interrupt(int irq, void *dev_id); | ||
54 | |||
55 | /* SW interrupts 0,1 are used for interprocessor signaling */ | ||
56 | #define IPI0_IRQ (MIPS_CPU_IRQ_BASE + 0) | ||
57 | #define IPI1_IRQ (MIPS_CPU_IRQ_BASE + 1) | ||
58 | |||
59 | #define CPUNUM(cpu, shift) (((cpu) + bmips_cpu_offset) << (shift)) | ||
60 | #define ACTION_CLR_IPI(cpu, ipi) (0x2000 | CPUNUM(cpu, 9) | ((ipi) << 8)) | ||
61 | #define ACTION_SET_IPI(cpu, ipi) (0x3000 | CPUNUM(cpu, 9) | ((ipi) << 8)) | ||
62 | #define ACTION_BOOT_THREAD(cpu) (0x08 | CPUNUM(cpu, 0)) | ||
63 | |||
64 | static void __init bmips_smp_setup(void) | ||
65 | { | ||
66 | int i; | ||
67 | |||
68 | #if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380) | ||
69 | /* arbitration priority */ | ||
70 | clear_c0_brcm_cmt_ctrl(0x30); | ||
71 | |||
72 | /* NBK and weak order flags */ | ||
73 | set_c0_brcm_config_0(0x30000); | ||
74 | |||
75 | /* | ||
76 | * MIPS interrupts 0,1 (SW INT 0,1) cross over to the other thread | ||
77 | * MIPS interrupt 2 (HW INT 0) is the CPU0 L1 controller output | ||
78 | * MIPS interrupt 3 (HW INT 1) is the CPU1 L1 controller output | ||
79 | */ | ||
80 | change_c0_brcm_cmt_intr(0xf8018000, | ||
81 | (0x02 << 27) | (0x03 << 15)); | ||
82 | |||
83 | /* single core, 2 threads (2 pipelines) */ | ||
84 | max_cpus = 2; | ||
85 | #elif defined(CONFIG_CPU_BMIPS5000) | ||
86 | /* enable raceless SW interrupts */ | ||
87 | set_c0_brcm_config(0x03 << 22); | ||
88 | |||
89 | /* route HW interrupt 0 to CPU0, HW interrupt 1 to CPU1 */ | ||
90 | change_c0_brcm_mode(0x1f << 27, 0x02 << 27); | ||
91 | |||
92 | /* N cores, 2 threads per core */ | ||
93 | max_cpus = (((read_c0_brcm_config() >> 6) & 0x03) + 1) << 1; | ||
94 | |||
95 | /* clear any pending SW interrupts */ | ||
96 | for (i = 0; i < max_cpus; i++) { | ||
97 | write_c0_brcm_action(ACTION_CLR_IPI(i, 0)); | ||
98 | write_c0_brcm_action(ACTION_CLR_IPI(i, 1)); | ||
99 | } | ||
100 | #endif | ||
101 | |||
102 | if (!bmips_smp_enabled) | ||
103 | max_cpus = 1; | ||
104 | |||
105 | /* this can be overridden by the BSP */ | ||
106 | if (!board_ebase_setup) | ||
107 | board_ebase_setup = &bmips_ebase_setup; | ||
108 | |||
109 | for (i = 0; i < max_cpus; i++) { | ||
110 | __cpu_number_map[i] = 1; | ||
111 | __cpu_logical_map[i] = 1; | ||
112 | set_cpu_possible(i, 1); | ||
113 | set_cpu_present(i, 1); | ||
114 | } | ||
115 | } | ||
116 | |||
117 | /* | ||
118 | * IPI IRQ setup - runs on CPU0 | ||
119 | */ | ||
120 | static void bmips_prepare_cpus(unsigned int max_cpus) | ||
121 | { | ||
122 | if (request_irq(IPI0_IRQ, bmips_ipi_interrupt, IRQF_PERCPU, | ||
123 | "smp_ipi0", NULL)) | ||
124 | panic("Can't request IPI0 interrupt\n"); | ||
125 | if (request_irq(IPI1_IRQ, bmips_ipi_interrupt, IRQF_PERCPU, | ||
126 | "smp_ipi1", NULL)) | ||
127 | panic("Can't request IPI1 interrupt\n"); | ||
128 | } | ||
129 | |||
130 | /* | ||
131 | * Tell the hardware to boot CPUx - runs on CPU0 | ||
132 | */ | ||
133 | static void bmips_boot_secondary(int cpu, struct task_struct *idle) | ||
134 | { | ||
135 | bmips_smp_boot_sp = __KSTK_TOS(idle); | ||
136 | bmips_smp_boot_gp = (unsigned long)task_thread_info(idle); | ||
137 | mb(); | ||
138 | |||
139 | /* | ||
140 | * Initial boot sequence for secondary CPU: | ||
141 | * bmips_reset_nmi_vec @ a000_0000 -> | ||
142 | * bmips_smp_entry -> | ||
143 | * plat_wired_tlb_setup (cached function call; optional) -> | ||
144 | * start_secondary (cached jump) | ||
145 | * | ||
146 | * Warm restart sequence: | ||
147 | * play_dead WAIT loop -> | ||
148 | * bmips_smp_int_vec @ BMIPS_WARM_RESTART_VEC -> | ||
149 | * eret to play_dead -> | ||
150 | * bmips_secondary_reentry -> | ||
151 | * start_secondary | ||
152 | */ | ||
153 | |||
154 | pr_info("SMP: Booting CPU%d...\n", cpu); | ||
155 | |||
156 | if (cpumask_test_cpu(cpu, &bmips_booted_mask)) | ||
157 | bmips_send_ipi_single(cpu, 0); | ||
158 | else { | ||
159 | #if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380) | ||
160 | set_c0_brcm_cmt_ctrl(0x01); | ||
161 | #elif defined(CONFIG_CPU_BMIPS5000) | ||
162 | if (cpu & 0x01) | ||
163 | write_c0_brcm_action(ACTION_BOOT_THREAD(cpu)); | ||
164 | else { | ||
165 | /* | ||
166 | * core N thread 0 was already booted; just | ||
167 | * pulse the NMI line | ||
168 | */ | ||
169 | bmips_write_zscm_reg(0x210, 0xc0000000); | ||
170 | udelay(10); | ||
171 | bmips_write_zscm_reg(0x210, 0x00); | ||
172 | } | ||
173 | #endif | ||
174 | cpumask_set_cpu(cpu, &bmips_booted_mask); | ||
175 | } | ||
176 | } | ||
177 | |||
178 | /* | ||
179 | * Early setup - runs on secondary CPU after cache probe | ||
180 | */ | ||
181 | static void bmips_init_secondary(void) | ||
182 | { | ||
183 | /* move NMI vector to kseg0, in case XKS01 is enabled */ | ||
184 | |||
185 | #if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380) | ||
186 | void __iomem *cbr = BMIPS_GET_CBR(); | ||
187 | unsigned long old_vec; | ||
188 | |||
189 | old_vec = __raw_readl(cbr + BMIPS_RELO_VECTOR_CONTROL_1); | ||
190 | __raw_writel(old_vec & ~0x20000000, cbr + BMIPS_RELO_VECTOR_CONTROL_1); | ||
191 | |||
192 | clear_c0_cause(smp_processor_id() ? C_SW1 : C_SW0); | ||
193 | #elif defined(CONFIG_CPU_BMIPS5000) | ||
194 | write_c0_brcm_bootvec(read_c0_brcm_bootvec() & | ||
195 | (smp_processor_id() & 0x01 ? ~0x20000000 : ~0x2000)); | ||
196 | |||
197 | write_c0_brcm_action(ACTION_CLR_IPI(smp_processor_id(), 0)); | ||
198 | #endif | ||
199 | } | ||
200 | |||
201 | /* | ||
202 | * Late setup - runs on secondary CPU before entering the idle loop | ||
203 | */ | ||
204 | static void bmips_smp_finish(void) | ||
205 | { | ||
206 | pr_info("SMP: CPU%d is running\n", smp_processor_id()); | ||
207 | |||
208 | /* make sure there won't be a timer interrupt for a little while */ | ||
209 | write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ); | ||
210 | |||
211 | irq_enable_hazard(); | ||
212 | set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ1 | IE_IRQ5 | ST0_IE); | ||
213 | irq_enable_hazard(); | ||
214 | } | ||
215 | |||
216 | /* | ||
217 | * Runs on CPU0 after all CPUs have been booted | ||
218 | */ | ||
219 | static void bmips_cpus_done(void) | ||
220 | { | ||
221 | } | ||
222 | |||
223 | #if defined(CONFIG_CPU_BMIPS5000) | ||
224 | |||
225 | /* | ||
226 | * BMIPS5000 raceless IPIs | ||
227 | * | ||
228 | * Each CPU has two inbound SW IRQs which are independent of all other CPUs. | ||
229 | * IPI0 is used for SMP_RESCHEDULE_YOURSELF | ||
230 | * IPI1 is used for SMP_CALL_FUNCTION | ||
231 | */ | ||
232 | |||
233 | static void bmips_send_ipi_single(int cpu, unsigned int action) | ||
234 | { | ||
235 | write_c0_brcm_action(ACTION_SET_IPI(cpu, action == SMP_CALL_FUNCTION)); | ||
236 | } | ||
237 | |||
238 | static irqreturn_t bmips_ipi_interrupt(int irq, void *dev_id) | ||
239 | { | ||
240 | int action = irq - IPI0_IRQ; | ||
241 | |||
242 | write_c0_brcm_action(ACTION_CLR_IPI(smp_processor_id(), action)); | ||
243 | |||
244 | if (action == 0) | ||
245 | scheduler_ipi(); | ||
246 | else | ||
247 | smp_call_function_interrupt(); | ||
248 | |||
249 | return IRQ_HANDLED; | ||
250 | } | ||
251 | |||
252 | #else | ||
253 | |||
254 | /* | ||
255 | * BMIPS43xx racey IPIs | ||
256 | * | ||
257 | * We use one inbound SW IRQ for each CPU. | ||
258 | * | ||
259 | * A spinlock must be held in order to keep CPUx from accidentally clearing | ||
260 | * an incoming IPI when it writes CP0 CAUSE to raise an IPI on CPUy. The | ||
261 | * same spinlock is used to protect the action masks. | ||
262 | */ | ||
263 | |||
264 | static DEFINE_SPINLOCK(ipi_lock); | ||
265 | static DEFINE_PER_CPU(int, ipi_action_mask); | ||
266 | |||
267 | static void bmips_send_ipi_single(int cpu, unsigned int action) | ||
268 | { | ||
269 | unsigned long flags; | ||
270 | |||
271 | spin_lock_irqsave(&ipi_lock, flags); | ||
272 | set_c0_cause(cpu ? C_SW1 : C_SW0); | ||
273 | per_cpu(ipi_action_mask, cpu) |= action; | ||
274 | irq_enable_hazard(); | ||
275 | spin_unlock_irqrestore(&ipi_lock, flags); | ||
276 | } | ||
277 | |||
278 | static irqreturn_t bmips_ipi_interrupt(int irq, void *dev_id) | ||
279 | { | ||
280 | unsigned long flags; | ||
281 | int action, cpu = irq - IPI0_IRQ; | ||
282 | |||
283 | spin_lock_irqsave(&ipi_lock, flags); | ||
284 | action = __get_cpu_var(ipi_action_mask); | ||
285 | per_cpu(ipi_action_mask, cpu) = 0; | ||
286 | clear_c0_cause(cpu ? C_SW1 : C_SW0); | ||
287 | spin_unlock_irqrestore(&ipi_lock, flags); | ||
288 | |||
289 | if (action & SMP_RESCHEDULE_YOURSELF) | ||
290 | scheduler_ipi(); | ||
291 | if (action & SMP_CALL_FUNCTION) | ||
292 | smp_call_function_interrupt(); | ||
293 | |||
294 | return IRQ_HANDLED; | ||
295 | } | ||
296 | |||
297 | #endif /* BMIPS type */ | ||
298 | |||
299 | static void bmips_send_ipi_mask(const struct cpumask *mask, | ||
300 | unsigned int action) | ||
301 | { | ||
302 | unsigned int i; | ||
303 | |||
304 | for_each_cpu(i, mask) | ||
305 | bmips_send_ipi_single(i, action); | ||
306 | } | ||
307 | |||
308 | #ifdef CONFIG_HOTPLUG_CPU | ||
309 | |||
310 | static int bmips_cpu_disable(void) | ||
311 | { | ||
312 | unsigned int cpu = smp_processor_id(); | ||
313 | |||
314 | if (cpu == 0) | ||
315 | return -EBUSY; | ||
316 | |||
317 | pr_info("SMP: CPU%d is offline\n", cpu); | ||
318 | |||
319 | set_cpu_online(cpu, false); | ||
320 | cpu_clear(cpu, cpu_callin_map); | ||
321 | |||
322 | local_flush_tlb_all(); | ||
323 | local_flush_icache_range(0, ~0); | ||
324 | |||
325 | return 0; | ||
326 | } | ||
327 | |||
328 | static void bmips_cpu_die(unsigned int cpu) | ||
329 | { | ||
330 | } | ||
331 | |||
332 | void __ref play_dead(void) | ||
333 | { | ||
334 | idle_task_exit(); | ||
335 | |||
336 | /* flush data cache */ | ||
337 | _dma_cache_wback_inv(0, ~0); | ||
338 | |||
339 | /* | ||
340 | * Wakeup is on SW0 or SW1; disable everything else | ||
341 | * Use BEV !IV (BMIPS_WARM_RESTART_VEC) to avoid the regular Linux | ||
342 | * IRQ handlers; this clears ST0_IE and returns immediately. | ||
343 | */ | ||
344 | clear_c0_cause(CAUSEF_IV | C_SW0 | C_SW1); | ||
345 | change_c0_status(IE_IRQ5 | IE_IRQ1 | IE_SW0 | IE_SW1 | ST0_IE | ST0_BEV, | ||
346 | IE_SW0 | IE_SW1 | ST0_IE | ST0_BEV); | ||
347 | irq_disable_hazard(); | ||
348 | |||
349 | /* | ||
350 | * wait for SW interrupt from bmips_boot_secondary(), then jump | ||
351 | * back to start_secondary() | ||
352 | */ | ||
353 | __asm__ __volatile__( | ||
354 | " wait\n" | ||
355 | " j bmips_secondary_reentry\n" | ||
356 | : : : "memory"); | ||
357 | } | ||
358 | |||
359 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
360 | |||
361 | struct plat_smp_ops bmips_smp_ops = { | ||
362 | .smp_setup = bmips_smp_setup, | ||
363 | .prepare_cpus = bmips_prepare_cpus, | ||
364 | .boot_secondary = bmips_boot_secondary, | ||
365 | .smp_finish = bmips_smp_finish, | ||
366 | .init_secondary = bmips_init_secondary, | ||
367 | .cpus_done = bmips_cpus_done, | ||
368 | .send_ipi_single = bmips_send_ipi_single, | ||
369 | .send_ipi_mask = bmips_send_ipi_mask, | ||
370 | #ifdef CONFIG_HOTPLUG_CPU | ||
371 | .cpu_disable = bmips_cpu_disable, | ||
372 | .cpu_die = bmips_cpu_die, | ||
373 | #endif | ||
374 | }; | ||
375 | |||
376 | #endif /* CONFIG_SMP */ | ||
377 | |||
378 | /*********************************************************************** | ||
379 | * BMIPS vector relocation | ||
380 | * This is primarily used for SMP boot, but it is applicable to some | ||
381 | * UP BMIPS systems as well. | ||
382 | ***********************************************************************/ | ||
383 | |||
384 | static void __cpuinit bmips_wr_vec(unsigned long dst, char *start, char *end) | ||
385 | { | ||
386 | memcpy((void *)dst, start, end - start); | ||
387 | dma_cache_wback((unsigned long)start, end - start); | ||
388 | local_flush_icache_range(dst, dst + (end - start)); | ||
389 | instruction_hazard(); | ||
390 | } | ||
391 | |||
392 | static inline void __cpuinit bmips_nmi_handler_setup(void) | ||
393 | { | ||
394 | bmips_wr_vec(BMIPS_NMI_RESET_VEC, &bmips_reset_nmi_vec, | ||
395 | &bmips_reset_nmi_vec_end); | ||
396 | bmips_wr_vec(BMIPS_WARM_RESTART_VEC, &bmips_smp_int_vec, | ||
397 | &bmips_smp_int_vec_end); | ||
398 | } | ||
399 | |||
400 | void __cpuinit bmips_ebase_setup(void) | ||
401 | { | ||
402 | unsigned long new_ebase = ebase; | ||
403 | void __iomem __maybe_unused *cbr; | ||
404 | |||
405 | BUG_ON(ebase != CKSEG0); | ||
406 | |||
407 | #if defined(CONFIG_CPU_BMIPS4350) | ||
408 | /* | ||
409 | * BMIPS4350 cannot relocate the normal vectors, but it | ||
410 | * can relocate the BEV=1 vectors. So CPU1 starts up at | ||
411 | * the relocated BEV=1, IV=0 general exception vector @ | ||
412 | * 0xa000_0380. | ||
413 | * | ||
414 | * set_uncached_handler() is used here because: | ||
415 | * - CPU1 will run this from uncached space | ||
416 | * - None of the cacheflush functions are set up yet | ||
417 | */ | ||
418 | set_uncached_handler(BMIPS_WARM_RESTART_VEC - CKSEG0, | ||
419 | &bmips_smp_int_vec, 0x80); | ||
420 | __sync(); | ||
421 | return; | ||
422 | #elif defined(CONFIG_CPU_BMIPS4380) | ||
423 | /* | ||
424 | * 0x8000_0000: reset/NMI (initially in kseg1) | ||
425 | * 0x8000_0400: normal vectors | ||
426 | */ | ||
427 | new_ebase = 0x80000400; | ||
428 | cbr = BMIPS_GET_CBR(); | ||
429 | __raw_writel(0x80080800, cbr + BMIPS_RELO_VECTOR_CONTROL_0); | ||
430 | __raw_writel(0xa0080800, cbr + BMIPS_RELO_VECTOR_CONTROL_1); | ||
431 | #elif defined(CONFIG_CPU_BMIPS5000) | ||
432 | /* | ||
433 | * 0x8000_0000: reset/NMI (initially in kseg1) | ||
434 | * 0x8000_1000: normal vectors | ||
435 | */ | ||
436 | new_ebase = 0x80001000; | ||
437 | write_c0_brcm_bootvec(0xa0088008); | ||
438 | write_c0_ebase(new_ebase); | ||
439 | if (max_cpus > 2) | ||
440 | bmips_write_zscm_reg(0xa0, 0xa008a008); | ||
441 | #else | ||
442 | return; | ||
443 | #endif | ||
444 | board_nmi_handler_setup = &bmips_nmi_handler_setup; | ||
445 | ebase = new_ebase; | ||
446 | } | ||
447 | |||
448 | asmlinkage void __weak plat_wired_tlb_setup(void) | ||
449 | { | ||
450 | /* | ||
451 | * Called when starting/restarting a secondary CPU. | ||
452 | * Kernel stacks and other important data might only be accessible | ||
453 | * once the wired entries are present. | ||
454 | */ | ||
455 | } | ||
diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c index 06cd0c610f4..fe309516065 100644 --- a/arch/mips/kernel/smp-cmp.c +++ b/arch/mips/kernel/smp-cmp.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <asm/cacheflush.h> | 29 | #include <asm/cacheflush.h> |
30 | #include <asm/cpu.h> | 30 | #include <asm/cpu.h> |
31 | #include <asm/processor.h> | 31 | #include <asm/processor.h> |
32 | #include <asm/system.h> | ||
32 | #include <asm/hardirq.h> | 33 | #include <asm/hardirq.h> |
33 | #include <asm/mmu_context.h> | 34 | #include <asm/mmu_context.h> |
34 | #include <asm/smp.h> | 35 | #include <asm/smp.h> |
@@ -97,12 +98,12 @@ static void cmp_init_secondary(void) | |||
97 | 98 | ||
98 | /* Enable per-cpu interrupts: platform specific */ | 99 | /* Enable per-cpu interrupts: platform specific */ |
99 | 100 | ||
100 | c->core = (read_c0_ebase() >> 1) & 0x1ff; | 101 | c->core = (read_c0_ebase() >> 1) & 0xff; |
101 | #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) | 102 | #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) |
102 | c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE; | 103 | c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE; |
103 | #endif | 104 | #endif |
104 | #ifdef CONFIG_MIPS_MT_SMTC | 105 | #ifdef CONFIG_MIPS_MT_SMTC |
105 | c->tc_id = (read_c0_tcbind() & TCBIND_CURTC) >> TCBIND_CURTC_SHIFT; | 106 | c->tc_id = (read_c0_tcbind() >> TCBIND_CURTC_SHIFT) & TCBIND_CURTC; |
106 | #endif | 107 | #endif |
107 | } | 108 | } |
108 | 109 | ||
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c index 2defa2bbdaa..ce9e286f0a7 100644 --- a/arch/mips/kernel/smp-mt.c +++ b/arch/mips/kernel/smp-mt.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <asm/cacheflush.h> | 28 | #include <asm/cacheflush.h> |
29 | #include <asm/cpu.h> | 29 | #include <asm/cpu.h> |
30 | #include <asm/processor.h> | 30 | #include <asm/processor.h> |
31 | #include <asm/system.h> | ||
31 | #include <asm/hardirq.h> | 32 | #include <asm/hardirq.h> |
32 | #include <asm/mmu_context.h> | 33 | #include <asm/mmu_context.h> |
33 | #include <asm/time.h> | 34 | #include <asm/time.h> |
@@ -150,7 +151,6 @@ static void vsmp_send_ipi_mask(const struct cpumask *mask, unsigned int action) | |||
150 | 151 | ||
151 | static void __cpuinit vsmp_init_secondary(void) | 152 | static void __cpuinit vsmp_init_secondary(void) |
152 | { | 153 | { |
153 | #ifdef CONFIG_IRQ_GIC | ||
154 | extern int gic_present; | 154 | extern int gic_present; |
155 | 155 | ||
156 | /* This is Malta specific: IPI,performance and timer interrupts */ | 156 | /* This is Malta specific: IPI,performance and timer interrupts */ |
@@ -158,7 +158,6 @@ static void __cpuinit vsmp_init_secondary(void) | |||
158 | change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | | 158 | change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | |
159 | STATUSF_IP6 | STATUSF_IP7); | 159 | STATUSF_IP6 | STATUSF_IP7); |
160 | else | 160 | else |
161 | #endif | ||
162 | change_c0_status(ST0_IM, STATUSF_IP0 | STATUSF_IP1 | | 161 | change_c0_status(ST0_IM, STATUSF_IP0 | STATUSF_IP1 | |
163 | STATUSF_IP6 | STATUSF_IP7); | 162 | STATUSF_IP6 | STATUSF_IP7); |
164 | } | 163 | } |
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 66bf4e22d9b..32c1e954cd3 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c | |||
@@ -38,9 +38,9 @@ | |||
38 | #include <asm/cpu.h> | 38 | #include <asm/cpu.h> |
39 | #include <asm/processor.h> | 39 | #include <asm/processor.h> |
40 | #include <asm/r4k-timer.h> | 40 | #include <asm/r4k-timer.h> |
41 | #include <asm/system.h> | ||
41 | #include <asm/mmu_context.h> | 42 | #include <asm/mmu_context.h> |
42 | #include <asm/time.h> | 43 | #include <asm/time.h> |
43 | #include <asm/setup.h> | ||
44 | 44 | ||
45 | #ifdef CONFIG_MIPS_MT_SMTC | 45 | #ifdef CONFIG_MIPS_MT_SMTC |
46 | #include <asm/mipsmtregs.h> | 46 | #include <asm/mipsmtregs.h> |
@@ -102,13 +102,11 @@ asmlinkage __cpuinit void start_secondary(void) | |||
102 | 102 | ||
103 | #ifdef CONFIG_MIPS_MT_SMTC | 103 | #ifdef CONFIG_MIPS_MT_SMTC |
104 | /* Only do cpu_probe for first TC of CPU */ | 104 | /* Only do cpu_probe for first TC of CPU */ |
105 | if ((read_c0_tcbind() & TCBIND_CURTC) != 0) | 105 | if ((read_c0_tcbind() & TCBIND_CURTC) == 0) |
106 | __cpu_name[smp_processor_id()] = __cpu_name[0]; | ||
107 | else | ||
108 | #endif /* CONFIG_MIPS_MT_SMTC */ | 106 | #endif /* CONFIG_MIPS_MT_SMTC */ |
109 | cpu_probe(); | 107 | cpu_probe(); |
110 | cpu_report(); | 108 | cpu_report(); |
111 | per_cpu_trap_init(false); | 109 | per_cpu_trap_init(); |
112 | mips_clockevent_init(); | 110 | mips_clockevent_init(); |
113 | mp_ops->init_secondary(); | 111 | mp_ops->init_secondary(); |
114 | 112 | ||
@@ -124,20 +122,12 @@ asmlinkage __cpuinit void start_secondary(void) | |||
124 | 122 | ||
125 | notify_cpu_starting(cpu); | 123 | notify_cpu_starting(cpu); |
126 | 124 | ||
127 | set_cpu_online(cpu, true); | 125 | mp_ops->smp_finish(); |
128 | |||
129 | set_cpu_sibling_map(cpu); | 126 | set_cpu_sibling_map(cpu); |
130 | 127 | ||
131 | cpu_set(cpu, cpu_callin_map); | 128 | cpu_set(cpu, cpu_callin_map); |
132 | 129 | ||
133 | synchronise_count_slave(cpu); | 130 | synchronise_count_slave(); |
134 | |||
135 | /* | ||
136 | * irq will be enabled in ->smp_finish(), enabling it too early | ||
137 | * is dangerous. | ||
138 | */ | ||
139 | WARN_ON_ONCE(!irqs_disabled()); | ||
140 | mp_ops->smp_finish(); | ||
141 | 131 | ||
142 | cpu_idle(); | 132 | cpu_idle(); |
143 | } | 133 | } |
@@ -158,7 +148,7 @@ static void stop_this_cpu(void *dummy) | |||
158 | /* | 148 | /* |
159 | * Remove this CPU: | 149 | * Remove this CPU: |
160 | */ | 150 | */ |
161 | set_cpu_online(smp_processor_id(), false); | 151 | cpu_clear(smp_processor_id(), cpu_online_map); |
162 | for (;;) { | 152 | for (;;) { |
163 | if (cpu_wait) | 153 | if (cpu_wait) |
164 | (*cpu_wait)(); /* Wait if available. */ | 154 | (*cpu_wait)(); /* Wait if available. */ |
@@ -173,6 +163,7 @@ void smp_send_stop(void) | |||
173 | void __init smp_cpus_done(unsigned int max_cpus) | 163 | void __init smp_cpus_done(unsigned int max_cpus) |
174 | { | 164 | { |
175 | mp_ops->cpus_done(); | 165 | mp_ops->cpus_done(); |
166 | synchronise_count_master(); | ||
176 | } | 167 | } |
177 | 168 | ||
178 | /* called from main before smp_init() */ | 169 | /* called from main before smp_init() */ |
@@ -183,21 +174,73 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
183 | mp_ops->prepare_cpus(max_cpus); | 174 | mp_ops->prepare_cpus(max_cpus); |
184 | set_cpu_sibling_map(0); | 175 | set_cpu_sibling_map(0); |
185 | #ifndef CONFIG_HOTPLUG_CPU | 176 | #ifndef CONFIG_HOTPLUG_CPU |
186 | init_cpu_present(cpu_possible_mask); | 177 | init_cpu_present(&cpu_possible_map); |
187 | #endif | 178 | #endif |
188 | } | 179 | } |
189 | 180 | ||
190 | /* preload SMP state for boot cpu */ | 181 | /* preload SMP state for boot cpu */ |
191 | void smp_prepare_boot_cpu(void) | 182 | void __devinit smp_prepare_boot_cpu(void) |
192 | { | 183 | { |
193 | set_cpu_possible(0, true); | 184 | set_cpu_possible(0, true); |
194 | set_cpu_online(0, true); | 185 | set_cpu_online(0, true); |
195 | cpu_set(0, cpu_callin_map); | 186 | cpu_set(0, cpu_callin_map); |
196 | } | 187 | } |
197 | 188 | ||
198 | int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) | 189 | /* |
190 | * Called once for each "cpu_possible(cpu)". Needs to spin up the cpu | ||
191 | * and keep control until "cpu_online(cpu)" is set. Note: cpu is | ||
192 | * physical, not logical. | ||
193 | */ | ||
194 | static struct task_struct *cpu_idle_thread[NR_CPUS]; | ||
195 | |||
196 | struct create_idle { | ||
197 | struct work_struct work; | ||
198 | struct task_struct *idle; | ||
199 | struct completion done; | ||
200 | int cpu; | ||
201 | }; | ||
202 | |||
203 | static void __cpuinit do_fork_idle(struct work_struct *work) | ||
204 | { | ||
205 | struct create_idle *c_idle = | ||
206 | container_of(work, struct create_idle, work); | ||
207 | |||
208 | c_idle->idle = fork_idle(c_idle->cpu); | ||
209 | complete(&c_idle->done); | ||
210 | } | ||
211 | |||
212 | int __cpuinit __cpu_up(unsigned int cpu) | ||
199 | { | 213 | { |
200 | mp_ops->boot_secondary(cpu, tidle); | 214 | struct task_struct *idle; |
215 | |||
216 | /* | ||
217 | * Processor goes to start_secondary(), sets online flag | ||
218 | * The following code is purely to make sure | ||
219 | * Linux can schedule processes on this slave. | ||
220 | */ | ||
221 | if (!cpu_idle_thread[cpu]) { | ||
222 | /* | ||
223 | * Schedule work item to avoid forking user task | ||
224 | * Ported from arch/x86/kernel/smpboot.c | ||
225 | */ | ||
226 | struct create_idle c_idle = { | ||
227 | .cpu = cpu, | ||
228 | .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), | ||
229 | }; | ||
230 | |||
231 | INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle); | ||
232 | schedule_work(&c_idle.work); | ||
233 | wait_for_completion(&c_idle.done); | ||
234 | idle = cpu_idle_thread[cpu] = c_idle.idle; | ||
235 | |||
236 | if (IS_ERR(idle)) | ||
237 | panic(KERN_ERR "Fork failed for CPU %d", cpu); | ||
238 | } else { | ||
239 | idle = cpu_idle_thread[cpu]; | ||
240 | init_idle(idle, cpu); | ||
241 | } | ||
242 | |||
243 | mp_ops->boot_secondary(cpu, idle); | ||
201 | 244 | ||
202 | /* | 245 | /* |
203 | * Trust is futile. We should really have timeouts ... | 246 | * Trust is futile. We should really have timeouts ... |
@@ -205,7 +248,8 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) | |||
205 | while (!cpu_isset(cpu, cpu_callin_map)) | 248 | while (!cpu_isset(cpu, cpu_callin_map)) |
206 | udelay(100); | 249 | udelay(100); |
207 | 250 | ||
208 | synchronise_count_master(cpu); | 251 | cpu_set(cpu, cpu_online_map); |
252 | |||
209 | return 0; | 253 | return 0; |
210 | } | 254 | } |
211 | 255 | ||
@@ -276,12 +320,13 @@ void flush_tlb_mm(struct mm_struct *mm) | |||
276 | if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { | 320 | if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { |
277 | smp_on_other_tlbs(flush_tlb_mm_ipi, mm); | 321 | smp_on_other_tlbs(flush_tlb_mm_ipi, mm); |
278 | } else { | 322 | } else { |
323 | cpumask_t mask = cpu_online_map; | ||
279 | unsigned int cpu; | 324 | unsigned int cpu; |
280 | 325 | ||
281 | for_each_online_cpu(cpu) { | 326 | cpu_clear(smp_processor_id(), mask); |
282 | if (cpu != smp_processor_id() && cpu_context(cpu, mm)) | 327 | for_each_cpu_mask(cpu, mask) |
328 | if (cpu_context(cpu, mm)) | ||
283 | cpu_context(cpu, mm) = 0; | 329 | cpu_context(cpu, mm) = 0; |
284 | } | ||
285 | } | 330 | } |
286 | local_flush_tlb_mm(mm); | 331 | local_flush_tlb_mm(mm); |
287 | 332 | ||
@@ -315,12 +360,13 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned l | |||
315 | 360 | ||
316 | smp_on_other_tlbs(flush_tlb_range_ipi, &fd); | 361 | smp_on_other_tlbs(flush_tlb_range_ipi, &fd); |
317 | } else { | 362 | } else { |
363 | cpumask_t mask = cpu_online_map; | ||
318 | unsigned int cpu; | 364 | unsigned int cpu; |
319 | 365 | ||
320 | for_each_online_cpu(cpu) { | 366 | cpu_clear(smp_processor_id(), mask); |
321 | if (cpu != smp_processor_id() && cpu_context(cpu, mm)) | 367 | for_each_cpu_mask(cpu, mask) |
368 | if (cpu_context(cpu, mm)) | ||
322 | cpu_context(cpu, mm) = 0; | 369 | cpu_context(cpu, mm) = 0; |
323 | } | ||
324 | } | 370 | } |
325 | local_flush_tlb_range(vma, start, end); | 371 | local_flush_tlb_range(vma, start, end); |
326 | preempt_enable(); | 372 | preempt_enable(); |
@@ -361,12 +407,13 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | |||
361 | 407 | ||
362 | smp_on_other_tlbs(flush_tlb_page_ipi, &fd); | 408 | smp_on_other_tlbs(flush_tlb_page_ipi, &fd); |
363 | } else { | 409 | } else { |
410 | cpumask_t mask = cpu_online_map; | ||
364 | unsigned int cpu; | 411 | unsigned int cpu; |
365 | 412 | ||
366 | for_each_online_cpu(cpu) { | 413 | cpu_clear(smp_processor_id(), mask); |
367 | if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm)) | 414 | for_each_cpu_mask(cpu, mask) |
415 | if (cpu_context(cpu, vma->vm_mm)) | ||
368 | cpu_context(cpu, vma->vm_mm) = 0; | 416 | cpu_context(cpu, vma->vm_mm) = 0; |
369 | } | ||
370 | } | 417 | } |
371 | local_flush_tlb_page(vma, page); | 418 | local_flush_tlb_page(vma, page); |
372 | preempt_enable(); | 419 | preempt_enable(); |
@@ -386,20 +433,3 @@ void flush_tlb_one(unsigned long vaddr) | |||
386 | 433 | ||
387 | EXPORT_SYMBOL(flush_tlb_page); | 434 | EXPORT_SYMBOL(flush_tlb_page); |
388 | EXPORT_SYMBOL(flush_tlb_one); | 435 | EXPORT_SYMBOL(flush_tlb_one); |
389 | |||
390 | #if defined(CONFIG_KEXEC) | ||
391 | void (*dump_ipi_function_ptr)(void *) = NULL; | ||
392 | void dump_send_ipi(void (*dump_ipi_callback)(void *)) | ||
393 | { | ||
394 | int i; | ||
395 | int cpu = smp_processor_id(); | ||
396 | |||
397 | dump_ipi_function_ptr = dump_ipi_callback; | ||
398 | smp_mb(); | ||
399 | for_each_online_cpu(i) | ||
400 | if (i != cpu) | ||
401 | mp_ops->send_ipi_single(i, SMP_DUMP); | ||
402 | |||
403 | } | ||
404 | EXPORT_SYMBOL(dump_send_ipi); | ||
405 | #endif | ||
diff --git a/arch/mips/kernel/smtc-proc.c b/arch/mips/kernel/smtc-proc.c index 145771c0ed7..928a5a61e1a 100644 --- a/arch/mips/kernel/smtc-proc.c +++ b/arch/mips/kernel/smtc-proc.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <asm/cpu.h> | 11 | #include <asm/cpu.h> |
12 | #include <asm/processor.h> | 12 | #include <asm/processor.h> |
13 | #include <linux/atomic.h> | 13 | #include <linux/atomic.h> |
14 | #include <asm/system.h> | ||
14 | #include <asm/hardirq.h> | 15 | #include <asm/hardirq.h> |
15 | #include <asm/mmu_context.h> | 16 | #include <asm/mmu_context.h> |
16 | #include <asm/mipsregs.h> | 17 | #include <asm/mipsregs.h> |
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index 1d47843d3cc..f0895e70e28 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <asm/cpu.h> | 31 | #include <asm/cpu.h> |
32 | #include <asm/processor.h> | 32 | #include <asm/processor.h> |
33 | #include <linux/atomic.h> | 33 | #include <linux/atomic.h> |
34 | #include <asm/system.h> | ||
34 | #include <asm/hardirq.h> | 35 | #include <asm/hardirq.h> |
35 | #include <asm/hazards.h> | 36 | #include <asm/hazards.h> |
36 | #include <asm/irq.h> | 37 | #include <asm/irq.h> |
@@ -86,13 +87,6 @@ struct smtc_ipi_q IPIQ[NR_CPUS]; | |||
86 | static struct smtc_ipi_q freeIPIq; | 87 | static struct smtc_ipi_q freeIPIq; |
87 | 88 | ||
88 | 89 | ||
89 | /* | ||
90 | * Number of FPU contexts for each VPE | ||
91 | */ | ||
92 | |||
93 | static int smtc_nconf1[MAX_SMTC_VPES]; | ||
94 | |||
95 | |||
96 | /* Forward declarations */ | 90 | /* Forward declarations */ |
97 | 91 | ||
98 | void ipi_decode(struct smtc_ipi *); | 92 | void ipi_decode(struct smtc_ipi *); |
@@ -181,9 +175,9 @@ static int __init tintq(char *str) | |||
181 | 175 | ||
182 | __setup("tintq=", tintq); | 176 | __setup("tintq=", tintq); |
183 | 177 | ||
184 | static int imstuckcount[MAX_SMTC_VPES][8]; | 178 | static int imstuckcount[2][8]; |
185 | /* vpemask represents IM/IE bits of per-VPE Status registers, low-to-high */ | 179 | /* vpemask represents IM/IE bits of per-VPE Status registers, low-to-high */ |
186 | static int vpemask[MAX_SMTC_VPES][8] = { | 180 | static int vpemask[2][8] = { |
187 | {0, 0, 1, 0, 0, 0, 0, 1}, | 181 | {0, 0, 1, 0, 0, 0, 0, 1}, |
188 | {0, 0, 0, 0, 0, 0, 0, 1} | 182 | {0, 0, 0, 0, 0, 0, 0, 1} |
189 | }; | 183 | }; |
@@ -298,7 +292,7 @@ static void smtc_configure_tlb(void) | |||
298 | * possibly leave some TCs/VPEs as "slave" processors. | 292 | * possibly leave some TCs/VPEs as "slave" processors. |
299 | * | 293 | * |
300 | * Use c0_MVPConf0 to find out how many TCs are available, setting up | 294 | * Use c0_MVPConf0 to find out how many TCs are available, setting up |
301 | * cpu_possible_mask and the logical/physical mappings. | 295 | * cpu_possible_map and the logical/physical mappings. |
302 | */ | 296 | */ |
303 | 297 | ||
304 | int __init smtc_build_cpu_map(int start_cpu_slot) | 298 | int __init smtc_build_cpu_map(int start_cpu_slot) |
@@ -329,7 +323,7 @@ int __init smtc_build_cpu_map(int start_cpu_slot) | |||
329 | 323 | ||
330 | /* | 324 | /* |
331 | * Common setup before any secondaries are started | 325 | * Common setup before any secondaries are started |
332 | * Make sure all CPUs are in a sensible state before we boot any of the | 326 | * Make sure all CPU's are in a sensible state before we boot any of the |
333 | * secondaries. | 327 | * secondaries. |
334 | * | 328 | * |
335 | * For MIPS MT "SMTC" operation, we set up all TCs, spread as evenly | 329 | * For MIPS MT "SMTC" operation, we set up all TCs, spread as evenly |
@@ -338,22 +332,6 @@ int __init smtc_build_cpu_map(int start_cpu_slot) | |||
338 | 332 | ||
339 | static void smtc_tc_setup(int vpe, int tc, int cpu) | 333 | static void smtc_tc_setup(int vpe, int tc, int cpu) |
340 | { | 334 | { |
341 | static int cp1contexts[MAX_SMTC_VPES]; | ||
342 | |||
343 | /* | ||
344 | * Make a local copy of the available FPU contexts in order | ||
345 | * to keep track of TCs that can have one. | ||
346 | */ | ||
347 | if (tc == 1) | ||
348 | { | ||
349 | /* | ||
350 | * FIXME: Multi-core SMTC hasn't been tested and the | ||
351 | * maximum number of VPEs may change. | ||
352 | */ | ||
353 | cp1contexts[0] = smtc_nconf1[0] - 1; | ||
354 | cp1contexts[1] = smtc_nconf1[1]; | ||
355 | } | ||
356 | |||
357 | settc(tc); | 335 | settc(tc); |
358 | write_tc_c0_tchalt(TCHALT_H); | 336 | write_tc_c0_tchalt(TCHALT_H); |
359 | mips_ihb(); | 337 | mips_ihb(); |
@@ -363,33 +341,26 @@ static void smtc_tc_setup(int vpe, int tc, int cpu) | |||
363 | /* | 341 | /* |
364 | * TCContext gets an offset from the base of the IPIQ array | 342 | * TCContext gets an offset from the base of the IPIQ array |
365 | * to be used in low-level code to detect the presence of | 343 | * to be used in low-level code to detect the presence of |
366 | * an active IPI queue. | 344 | * an active IPI queue |
367 | */ | 345 | */ |
368 | write_tc_c0_tccontext((sizeof(struct smtc_ipi_q) * cpu) << 16); | 346 | write_tc_c0_tccontext((sizeof(struct smtc_ipi_q) * cpu) << 16); |
369 | 347 | /* Bind tc to vpe */ | |
370 | /* Bind TC to VPE. */ | ||
371 | write_tc_c0_tcbind(vpe); | 348 | write_tc_c0_tcbind(vpe); |
372 | 349 | /* In general, all TCs should have the same cpu_data indications */ | |
373 | /* In general, all TCs should have the same cpu_data indications. */ | ||
374 | memcpy(&cpu_data[cpu], &cpu_data[0], sizeof(struct cpuinfo_mips)); | 350 | memcpy(&cpu_data[cpu], &cpu_data[0], sizeof(struct cpuinfo_mips)); |
375 | 351 | /* For 34Kf, start with TC/CPU 0 as sole owner of single FPU context */ | |
376 | /* Check to see if there is a FPU context available for this TC. */ | 352 | if (cpu_data[0].cputype == CPU_34K || |
377 | if (!cp1contexts[vpe]) | 353 | cpu_data[0].cputype == CPU_1004K) |
378 | cpu_data[cpu].options &= ~MIPS_CPU_FPU; | 354 | cpu_data[cpu].options &= ~MIPS_CPU_FPU; |
379 | else | ||
380 | cp1contexts[vpe]--; | ||
381 | |||
382 | /* Store the TC and VPE into the cpu_data structure. */ | ||
383 | cpu_data[cpu].vpe_id = vpe; | 355 | cpu_data[cpu].vpe_id = vpe; |
384 | cpu_data[cpu].tc_id = tc; | 356 | cpu_data[cpu].tc_id = tc; |
385 | 357 | /* Multi-core SMTC hasn't been tested, but be prepared */ | |
386 | /* FIXME: Multi-core SMTC hasn't been tested, but be prepared. */ | ||
387 | cpu_data[cpu].core = (read_vpe_c0_ebase() >> 1) & 0xff; | 358 | cpu_data[cpu].core = (read_vpe_c0_ebase() >> 1) & 0xff; |
388 | } | 359 | } |
389 | 360 | ||
390 | /* | 361 | /* |
391 | * Tweak to get Count registers synced as closely as possible. The | 362 | * Tweak to get Count registes in as close a sync as possible. |
392 | * value seems good for 34K-class cores. | 363 | * Value seems good for 34K-class cores. |
393 | */ | 364 | */ |
394 | 365 | ||
395 | #define CP0_SKEW 8 | 366 | #define CP0_SKEW 8 |
@@ -496,24 +467,6 @@ void smtc_prepare_cpus(int cpus) | |||
496 | smtc_configure_tlb(); | 467 | smtc_configure_tlb(); |
497 | 468 | ||
498 | for (tc = 0, vpe = 0 ; (vpe < nvpe) && (tc < ntc) ; vpe++) { | 469 | for (tc = 0, vpe = 0 ; (vpe < nvpe) && (tc < ntc) ; vpe++) { |
499 | /* Get number of CP1 contexts for each VPE. */ | ||
500 | if (tc == 0) | ||
501 | { | ||
502 | /* | ||
503 | * Do not call settc() for TC0 or the FPU context | ||
504 | * value will be incorrect. Besides, we know that | ||
505 | * we are TC0 anyway. | ||
506 | */ | ||
507 | smtc_nconf1[0] = ((read_vpe_c0_vpeconf1() & | ||
508 | VPECONF1_NCP1) >> VPECONF1_NCP1_SHIFT); | ||
509 | if (nvpe == 2) | ||
510 | { | ||
511 | settc(1); | ||
512 | smtc_nconf1[1] = ((read_vpe_c0_vpeconf1() & | ||
513 | VPECONF1_NCP1) >> VPECONF1_NCP1_SHIFT); | ||
514 | settc(0); | ||
515 | } | ||
516 | } | ||
517 | if (tcpervpe[vpe] == 0) | 470 | if (tcpervpe[vpe] == 0) |
518 | continue; | 471 | continue; |
519 | if (vpe != 0) | 472 | if (vpe != 0) |
@@ -527,18 +480,6 @@ void smtc_prepare_cpus(int cpus) | |||
527 | */ | 480 | */ |
528 | if (tc != 0) { | 481 | if (tc != 0) { |
529 | smtc_tc_setup(vpe, tc, cpu); | 482 | smtc_tc_setup(vpe, tc, cpu); |
530 | if (vpe != 0) { | ||
531 | /* | ||
532 | * Set MVP bit (possibly again). Do it | ||
533 | * here to catch CPUs that have no TCs | ||
534 | * bound to the VPE at reset. In that | ||
535 | * case, a TC must be bound to the VPE | ||
536 | * before we can set VPEControl[MVP] | ||
537 | */ | ||
538 | write_vpe_c0_vpeconf0( | ||
539 | read_vpe_c0_vpeconf0() | | ||
540 | VPECONF0_MVP); | ||
541 | } | ||
542 | cpu++; | 483 | cpu++; |
543 | } | 484 | } |
544 | printk(" %d", tc); | 485 | printk(" %d", tc); |
@@ -618,7 +559,7 @@ void smtc_prepare_cpus(int cpus) | |||
618 | 559 | ||
619 | pipi = kmalloc(nipi *sizeof(struct smtc_ipi), GFP_KERNEL); | 560 | pipi = kmalloc(nipi *sizeof(struct smtc_ipi), GFP_KERNEL); |
620 | if (pipi == NULL) | 561 | if (pipi == NULL) |
621 | panic("kmalloc of IPI message buffers failed"); | 562 | panic("kmalloc of IPI message buffers failed\n"); |
622 | else | 563 | else |
623 | printk("IPI buffer pool of %d buffers\n", nipi); | 564 | printk("IPI buffer pool of %d buffers\n", nipi); |
624 | for (i = 0; i < nipi; i++) { | 565 | for (i = 0; i < nipi; i++) { |
@@ -675,6 +616,7 @@ void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle) | |||
675 | 616 | ||
676 | void smtc_init_secondary(void) | 617 | void smtc_init_secondary(void) |
677 | { | 618 | { |
619 | local_irq_enable(); | ||
678 | } | 620 | } |
679 | 621 | ||
680 | void smtc_smp_finish(void) | 622 | void smtc_smp_finish(void) |
@@ -690,8 +632,6 @@ void smtc_smp_finish(void) | |||
690 | if (cpu > 0 && (cpu_data[cpu].vpe_id != cpu_data[cpu - 1].vpe_id)) | 632 | if (cpu > 0 && (cpu_data[cpu].vpe_id != cpu_data[cpu - 1].vpe_id)) |
691 | write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ); | 633 | write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ); |
692 | 634 | ||
693 | local_irq_enable(); | ||
694 | |||
695 | printk("TC %d going on-line as CPU %d\n", | 635 | printk("TC %d going on-line as CPU %d\n", |
696 | cpu_data[smp_processor_id()].tc_id, smp_processor_id()); | 636 | cpu_data[smp_processor_id()].tc_id, smp_processor_id()); |
697 | } | 637 | } |
@@ -873,7 +813,7 @@ void smtc_send_ipi(int cpu, int type, unsigned int action) | |||
873 | if (pipi == NULL) { | 813 | if (pipi == NULL) { |
874 | bust_spinlocks(1); | 814 | bust_spinlocks(1); |
875 | mips_mt_regdump(dvpe()); | 815 | mips_mt_regdump(dvpe()); |
876 | panic("IPI Msg. Buffers Depleted"); | 816 | panic("IPI Msg. Buffers Depleted\n"); |
877 | } | 817 | } |
878 | pipi->type = type; | 818 | pipi->type = type; |
879 | pipi->arg = (void *)action; | 819 | pipi->arg = (void *)action; |
@@ -1190,7 +1130,7 @@ static void ipi_irq_dispatch(void) | |||
1190 | 1130 | ||
1191 | static struct irqaction irq_ipi = { | 1131 | static struct irqaction irq_ipi = { |
1192 | .handler = ipi_interrupt, | 1132 | .handler = ipi_interrupt, |
1193 | .flags = IRQF_PERCPU, | 1133 | .flags = IRQF_DISABLED | IRQF_PERCPU, |
1194 | .name = "SMTC_IPI" | 1134 | .name = "SMTC_IPI" |
1195 | }; | 1135 | }; |
1196 | 1136 | ||
diff --git a/arch/mips/kernel/spinlock_test.c b/arch/mips/kernel/spinlock_test.c index 39f7ab7b042..da61134dfc5 100644 --- a/arch/mips/kernel/spinlock_test.c +++ b/arch/mips/kernel/spinlock_test.c | |||
@@ -3,7 +3,7 @@ | |||
3 | #include <linux/hrtimer.h> | 3 | #include <linux/hrtimer.h> |
4 | #include <linux/fs.h> | 4 | #include <linux/fs.h> |
5 | #include <linux/debugfs.h> | 5 | #include <linux/debugfs.h> |
6 | #include <linux/export.h> | 6 | #include <linux/module.h> |
7 | #include <linux/spinlock.h> | 7 | #include <linux/spinlock.h> |
8 | 8 | ||
9 | 9 | ||
diff --git a/arch/mips/kernel/spram.c b/arch/mips/kernel/spram.c index 6af08d896e2..1821d12a641 100644 --- a/arch/mips/kernel/spram.c +++ b/arch/mips/kernel/spram.c | |||
@@ -15,6 +15,7 @@ | |||
15 | 15 | ||
16 | #include <asm/fpu.h> | 16 | #include <asm/fpu.h> |
17 | #include <asm/mipsregs.h> | 17 | #include <asm/mipsregs.h> |
18 | #include <asm/system.h> | ||
18 | #include <asm/r4kcache.h> | 19 | #include <asm/r4kcache.h> |
19 | #include <asm/hazards.h> | 20 | #include <asm/hazards.h> |
20 | 21 | ||
diff --git a/arch/mips/kernel/stacktrace.c b/arch/mips/kernel/stacktrace.c index 1ba775d24d3..d52ff77baf3 100644 --- a/arch/mips/kernel/stacktrace.c +++ b/arch/mips/kernel/stacktrace.c | |||
@@ -5,7 +5,7 @@ | |||
5 | */ | 5 | */ |
6 | #include <linux/sched.h> | 6 | #include <linux/sched.h> |
7 | #include <linux/stacktrace.h> | 7 | #include <linux/stacktrace.h> |
8 | #include <linux/export.h> | 8 | #include <linux/module.h> |
9 | #include <asm/stacktrace.h> | 9 | #include <asm/stacktrace.h> |
10 | 10 | ||
11 | /* | 11 | /* |
diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c index 7f1eca3858d..99f913c8d7a 100644 --- a/arch/mips/kernel/sync-r4k.c +++ b/arch/mips/kernel/sync-r4k.c | |||
@@ -28,11 +28,12 @@ static atomic_t __cpuinitdata count_reference = ATOMIC_INIT(0); | |||
28 | #define COUNTON 100 | 28 | #define COUNTON 100 |
29 | #define NR_LOOPS 5 | 29 | #define NR_LOOPS 5 |
30 | 30 | ||
31 | void __cpuinit synchronise_count_master(int cpu) | 31 | void __cpuinit synchronise_count_master(void) |
32 | { | 32 | { |
33 | int i; | 33 | int i; |
34 | unsigned long flags; | 34 | unsigned long flags; |
35 | unsigned int initcount; | 35 | unsigned int initcount; |
36 | int nslaves; | ||
36 | 37 | ||
37 | #ifdef CONFIG_MIPS_MT_SMTC | 38 | #ifdef CONFIG_MIPS_MT_SMTC |
38 | /* | 39 | /* |
@@ -42,7 +43,8 @@ void __cpuinit synchronise_count_master(int cpu) | |||
42 | return; | 43 | return; |
43 | #endif | 44 | #endif |
44 | 45 | ||
45 | printk(KERN_INFO "Synchronize counters for CPU %u: ", cpu); | 46 | printk(KERN_INFO "Synchronize counters across %u CPUs: ", |
47 | num_online_cpus()); | ||
46 | 48 | ||
47 | local_irq_save(flags); | 49 | local_irq_save(flags); |
48 | 50 | ||
@@ -50,7 +52,7 @@ void __cpuinit synchronise_count_master(int cpu) | |||
50 | * Notify the slaves that it's time to start | 52 | * Notify the slaves that it's time to start |
51 | */ | 53 | */ |
52 | atomic_set(&count_reference, read_c0_count()); | 54 | atomic_set(&count_reference, read_c0_count()); |
53 | atomic_set(&count_start_flag, cpu); | 55 | atomic_set(&count_start_flag, 1); |
54 | smp_wmb(); | 56 | smp_wmb(); |
55 | 57 | ||
56 | /* Count will be initialised to current timer for all CPU's */ | 58 | /* Count will be initialised to current timer for all CPU's */ |
@@ -67,9 +69,10 @@ void __cpuinit synchronise_count_master(int cpu) | |||
67 | * two CPUs. | 69 | * two CPUs. |
68 | */ | 70 | */ |
69 | 71 | ||
72 | nslaves = num_online_cpus()-1; | ||
70 | for (i = 0; i < NR_LOOPS; i++) { | 73 | for (i = 0; i < NR_LOOPS; i++) { |
71 | /* slaves loop on '!= 2' */ | 74 | /* slaves loop on '!= ncpus' */ |
72 | while (atomic_read(&count_count_start) != 1) | 75 | while (atomic_read(&count_count_start) != nslaves) |
73 | mb(); | 76 | mb(); |
74 | atomic_set(&count_count_stop, 0); | 77 | atomic_set(&count_count_stop, 0); |
75 | smp_wmb(); | 78 | smp_wmb(); |
@@ -86,7 +89,7 @@ void __cpuinit synchronise_count_master(int cpu) | |||
86 | /* | 89 | /* |
87 | * Wait for all slaves to leave the synchronization point: | 90 | * Wait for all slaves to leave the synchronization point: |
88 | */ | 91 | */ |
89 | while (atomic_read(&count_count_stop) != 1) | 92 | while (atomic_read(&count_count_stop) != nslaves) |
90 | mb(); | 93 | mb(); |
91 | atomic_set(&count_count_start, 0); | 94 | atomic_set(&count_count_start, 0); |
92 | smp_wmb(); | 95 | smp_wmb(); |
@@ -94,7 +97,6 @@ void __cpuinit synchronise_count_master(int cpu) | |||
94 | } | 97 | } |
95 | /* Arrange for an interrupt in a short while */ | 98 | /* Arrange for an interrupt in a short while */ |
96 | write_c0_compare(read_c0_count() + COUNTON); | 99 | write_c0_compare(read_c0_count() + COUNTON); |
97 | atomic_set(&count_start_flag, 0); | ||
98 | 100 | ||
99 | local_irq_restore(flags); | 101 | local_irq_restore(flags); |
100 | 102 | ||
@@ -106,10 +108,12 @@ void __cpuinit synchronise_count_master(int cpu) | |||
106 | printk("done.\n"); | 108 | printk("done.\n"); |
107 | } | 109 | } |
108 | 110 | ||
109 | void __cpuinit synchronise_count_slave(int cpu) | 111 | void __cpuinit synchronise_count_slave(void) |
110 | { | 112 | { |
111 | int i; | 113 | int i; |
114 | unsigned long flags; | ||
112 | unsigned int initcount; | 115 | unsigned int initcount; |
116 | int ncpus; | ||
113 | 117 | ||
114 | #ifdef CONFIG_MIPS_MT_SMTC | 118 | #ifdef CONFIG_MIPS_MT_SMTC |
115 | /* | 119 | /* |
@@ -119,20 +123,23 @@ void __cpuinit synchronise_count_slave(int cpu) | |||
119 | return; | 123 | return; |
120 | #endif | 124 | #endif |
121 | 125 | ||
126 | local_irq_save(flags); | ||
127 | |||
122 | /* | 128 | /* |
123 | * Not every cpu is online at the time this gets called, | 129 | * Not every cpu is online at the time this gets called, |
124 | * so we first wait for the master to say everyone is ready | 130 | * so we first wait for the master to say everyone is ready |
125 | */ | 131 | */ |
126 | 132 | ||
127 | while (atomic_read(&count_start_flag) != cpu) | 133 | while (!atomic_read(&count_start_flag)) |
128 | mb(); | 134 | mb(); |
129 | 135 | ||
130 | /* Count will be initialised to next expire for all CPU's */ | 136 | /* Count will be initialised to next expire for all CPU's */ |
131 | initcount = atomic_read(&count_reference); | 137 | initcount = atomic_read(&count_reference); |
132 | 138 | ||
139 | ncpus = num_online_cpus(); | ||
133 | for (i = 0; i < NR_LOOPS; i++) { | 140 | for (i = 0; i < NR_LOOPS; i++) { |
134 | atomic_inc(&count_count_start); | 141 | atomic_inc(&count_count_start); |
135 | while (atomic_read(&count_count_start) != 2) | 142 | while (atomic_read(&count_count_start) != ncpus) |
136 | mb(); | 143 | mb(); |
137 | 144 | ||
138 | /* | 145 | /* |
@@ -142,10 +149,12 @@ void __cpuinit synchronise_count_slave(int cpu) | |||
142 | write_c0_count(initcount); | 149 | write_c0_count(initcount); |
143 | 150 | ||
144 | atomic_inc(&count_count_stop); | 151 | atomic_inc(&count_count_stop); |
145 | while (atomic_read(&count_count_stop) != 2) | 152 | while (atomic_read(&count_count_stop) != ncpus) |
146 | mb(); | 153 | mb(); |
147 | } | 154 | } |
148 | /* Arrange for an interrupt in a short while */ | 155 | /* Arrange for an interrupt in a short while */ |
149 | write_c0_compare(read_c0_count() + COUNTON); | 156 | write_c0_compare(read_c0_count() + COUNTON); |
157 | |||
158 | local_irq_restore(flags); | ||
150 | } | 159 | } |
151 | #undef NR_LOOPS | 160 | #undef NR_LOOPS |
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index 201cb76b4df..d02765708dd 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c | |||
@@ -37,7 +37,6 @@ | |||
37 | #include <asm/shmparam.h> | 37 | #include <asm/shmparam.h> |
38 | #include <asm/sysmips.h> | 38 | #include <asm/sysmips.h> |
39 | #include <asm/uaccess.h> | 39 | #include <asm/uaccess.h> |
40 | #include <asm/switch_to.h> | ||
41 | 40 | ||
42 | /* | 41 | /* |
43 | * For historic reasons the pipe(2) syscall on MIPS has an unusual calling | 42 | * For historic reasons the pipe(2) syscall on MIPS has an unusual calling |
@@ -92,7 +91,7 @@ save_static_function(sys_fork); | |||
92 | static int __used noinline | 91 | static int __used noinline |
93 | _sys_fork(nabi_no_regargs struct pt_regs regs) | 92 | _sys_fork(nabi_no_regargs struct pt_regs regs) |
94 | { | 93 | { |
95 | return do_fork(SIGCHLD, regs.regs[29], 0, NULL, NULL); | 94 | return do_fork(SIGCHLD, regs.regs[29], ®s, 0, NULL, NULL); |
96 | } | 95 | } |
97 | 96 | ||
98 | save_static_function(sys_clone); | 97 | save_static_function(sys_clone); |
@@ -123,10 +122,32 @@ _sys_clone(nabi_no_regargs struct pt_regs regs) | |||
123 | #else | 122 | #else |
124 | child_tidptr = (int __user *) regs.regs[8]; | 123 | child_tidptr = (int __user *) regs.regs[8]; |
125 | #endif | 124 | #endif |
126 | return do_fork(clone_flags, newsp, 0, | 125 | return do_fork(clone_flags, newsp, ®s, 0, |
127 | parent_tidptr, child_tidptr); | 126 | parent_tidptr, child_tidptr); |
128 | } | 127 | } |
129 | 128 | ||
129 | /* | ||
130 | * sys_execve() executes a new program. | ||
131 | */ | ||
132 | asmlinkage int sys_execve(nabi_no_regargs struct pt_regs regs) | ||
133 | { | ||
134 | int error; | ||
135 | char * filename; | ||
136 | |||
137 | filename = getname((const char __user *) (long)regs.regs[4]); | ||
138 | error = PTR_ERR(filename); | ||
139 | if (IS_ERR(filename)) | ||
140 | goto out; | ||
141 | error = do_execve(filename, | ||
142 | (const char __user *const __user *) (long)regs.regs[5], | ||
143 | (const char __user *const __user *) (long)regs.regs[6], | ||
144 | ®s); | ||
145 | putname(filename); | ||
146 | |||
147 | out: | ||
148 | return error; | ||
149 | } | ||
150 | |||
130 | SYSCALL_DEFINE1(set_thread_area, unsigned long, addr) | 151 | SYSCALL_DEFINE1(set_thread_area, unsigned long, addr) |
131 | { | 152 | { |
132 | struct thread_info *ti = task_thread_info(current); | 153 | struct thread_info *ti = task_thread_info(current); |
@@ -291,3 +312,34 @@ asmlinkage void bad_stack(void) | |||
291 | { | 312 | { |
292 | do_exit(SIGSEGV); | 313 | do_exit(SIGSEGV); |
293 | } | 314 | } |
315 | |||
316 | /* | ||
317 | * Do a system call from kernel instead of calling sys_execve so we | ||
318 | * end up with proper pt_regs. | ||
319 | */ | ||
320 | int kernel_execve(const char *filename, | ||
321 | const char *const argv[], | ||
322 | const char *const envp[]) | ||
323 | { | ||
324 | register unsigned long __a0 asm("$4") = (unsigned long) filename; | ||
325 | register unsigned long __a1 asm("$5") = (unsigned long) argv; | ||
326 | register unsigned long __a2 asm("$6") = (unsigned long) envp; | ||
327 | register unsigned long __a3 asm("$7"); | ||
328 | unsigned long __v0; | ||
329 | |||
330 | __asm__ volatile (" \n" | ||
331 | " .set noreorder \n" | ||
332 | " li $2, %5 # __NR_execve \n" | ||
333 | " syscall \n" | ||
334 | " move %0, $2 \n" | ||
335 | " .set reorder \n" | ||
336 | : "=&r" (__v0), "=r" (__a3) | ||
337 | : "r" (__a0), "r" (__a1), "r" (__a2), "i" (__NR_execve) | ||
338 | : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", | ||
339 | "memory"); | ||
340 | |||
341 | if (__a3 == 0) | ||
342 | return __v0; | ||
343 | |||
344 | return -__v0; | ||
345 | } | ||
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c index 99d73b72b00..1083ad4e101 100644 --- a/arch/mips/kernel/time.c +++ b/arch/mips/kernel/time.c | |||
@@ -21,7 +21,7 @@ | |||
21 | #include <linux/timex.h> | 21 | #include <linux/timex.h> |
22 | #include <linux/smp.h> | 22 | #include <linux/smp.h> |
23 | #include <linux/spinlock.h> | 23 | #include <linux/spinlock.h> |
24 | #include <linux/export.h> | 24 | #include <linux/module.h> |
25 | 25 | ||
26 | #include <asm/cpu-features.h> | 26 | #include <asm/cpu-features.h> |
27 | #include <asm/div64.h> | 27 | #include <asm/div64.h> |
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index cf7ac5483f5..cbea618af0b 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c | |||
@@ -13,11 +13,10 @@ | |||
13 | */ | 13 | */ |
14 | #include <linux/bug.h> | 14 | #include <linux/bug.h> |
15 | #include <linux/compiler.h> | 15 | #include <linux/compiler.h> |
16 | #include <linux/kexec.h> | ||
17 | #include <linux/init.h> | 16 | #include <linux/init.h> |
18 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
19 | #include <linux/module.h> | ||
20 | #include <linux/mm.h> | 18 | #include <linux/mm.h> |
19 | #include <linux/module.h> | ||
21 | #include <linux/sched.h> | 20 | #include <linux/sched.h> |
22 | #include <linux/smp.h> | 21 | #include <linux/smp.h> |
23 | #include <linux/spinlock.h> | 22 | #include <linux/spinlock.h> |
@@ -47,6 +46,7 @@ | |||
47 | #include <asm/pgtable.h> | 46 | #include <asm/pgtable.h> |
48 | #include <asm/ptrace.h> | 47 | #include <asm/ptrace.h> |
49 | #include <asm/sections.h> | 48 | #include <asm/sections.h> |
49 | #include <asm/system.h> | ||
50 | #include <asm/tlbdebug.h> | 50 | #include <asm/tlbdebug.h> |
51 | #include <asm/traps.h> | 51 | #include <asm/traps.h> |
52 | #include <asm/uaccess.h> | 52 | #include <asm/uaccess.h> |
@@ -92,8 +92,7 @@ int (*board_be_handler)(struct pt_regs *regs, int is_fixup); | |||
92 | void (*board_nmi_handler_setup)(void); | 92 | void (*board_nmi_handler_setup)(void); |
93 | void (*board_ejtag_handler_setup)(void); | 93 | void (*board_ejtag_handler_setup)(void); |
94 | void (*board_bind_eic_interrupt)(int irq, int regset); | 94 | void (*board_bind_eic_interrupt)(int irq, int regset); |
95 | void (*board_ebase_setup)(void); | 95 | |
96 | void __cpuinitdata(*board_cache_error_setup)(void); | ||
97 | 96 | ||
98 | static void show_raw_backtrace(unsigned long reg29) | 97 | static void show_raw_backtrace(unsigned long reg29) |
99 | { | 98 | { |
@@ -133,9 +132,6 @@ static void show_backtrace(struct task_struct *task, const struct pt_regs *regs) | |||
133 | unsigned long ra = regs->regs[31]; | 132 | unsigned long ra = regs->regs[31]; |
134 | unsigned long pc = regs->cp0_epc; | 133 | unsigned long pc = regs->cp0_epc; |
135 | 134 | ||
136 | if (!task) | ||
137 | task = current; | ||
138 | |||
139 | if (raw_show_trace || !__kernel_text_address(pc)) { | 135 | if (raw_show_trace || !__kernel_text_address(pc)) { |
140 | show_raw_backtrace(sp); | 136 | show_raw_backtrace(sp); |
141 | return; | 137 | return; |
@@ -405,14 +401,11 @@ void __noreturn die(const char *str, struct pt_regs *regs) | |||
405 | panic("Fatal exception in interrupt"); | 401 | panic("Fatal exception in interrupt"); |
406 | 402 | ||
407 | if (panic_on_oops) { | 403 | if (panic_on_oops) { |
408 | printk(KERN_EMERG "Fatal exception: panic in 5 seconds"); | 404 | printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n"); |
409 | ssleep(5); | 405 | ssleep(5); |
410 | panic("Fatal exception"); | 406 | panic("Fatal exception"); |
411 | } | 407 | } |
412 | 408 | ||
413 | if (regs && kexec_should_crash(current)) | ||
414 | crash_kexec(regs); | ||
415 | |||
416 | do_exit(sig); | 409 | do_exit(sig); |
417 | } | 410 | } |
418 | 411 | ||
@@ -1025,24 +1018,6 @@ asmlinkage void do_cpu(struct pt_regs *regs) | |||
1025 | 1018 | ||
1026 | return; | 1019 | return; |
1027 | 1020 | ||
1028 | case 3: | ||
1029 | /* | ||
1030 | * Old (MIPS I and MIPS II) processors will set this code | ||
1031 | * for COP1X opcode instructions that replaced the original | ||
1032 | * COP3 space. We don't limit COP1 space instructions in | ||
1033 | * the emulator according to the CPU ISA, so we want to | ||
1034 | * treat COP1X instructions consistently regardless of which | ||
1035 | * code the CPU chose. Therefore we redirect this trap to | ||
1036 | * the FP emulator too. | ||
1037 | * | ||
1038 | * Then some newer FPU-less processors use this code | ||
1039 | * erroneously too, so they are covered by this choice | ||
1040 | * as well. | ||
1041 | */ | ||
1042 | if (raw_cpu_has_fpu) | ||
1043 | break; | ||
1044 | /* Fall through. */ | ||
1045 | |||
1046 | case 1: | 1021 | case 1: |
1047 | if (used_math()) /* Using the FPU again. */ | 1022 | if (used_math()) /* Using the FPU again. */ |
1048 | own_fpu(1); | 1023 | own_fpu(1); |
@@ -1066,6 +1041,9 @@ asmlinkage void do_cpu(struct pt_regs *regs) | |||
1066 | case 2: | 1041 | case 2: |
1067 | raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs); | 1042 | raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs); |
1068 | return; | 1043 | return; |
1044 | |||
1045 | case 3: | ||
1046 | break; | ||
1069 | } | 1047 | } |
1070 | 1048 | ||
1071 | force_sig(SIGILL, current); | 1049 | force_sig(SIGILL, current); |
@@ -1157,7 +1135,7 @@ asmlinkage void do_mt(struct pt_regs *regs) | |||
1157 | printk(KERN_DEBUG "YIELD Scheduler Exception\n"); | 1135 | printk(KERN_DEBUG "YIELD Scheduler Exception\n"); |
1158 | break; | 1136 | break; |
1159 | case 5: | 1137 | case 5: |
1160 | printk(KERN_DEBUG "Gating Storage Scheduler Exception\n"); | 1138 | printk(KERN_DEBUG "Gating Storage Schedulier Exception\n"); |
1161 | break; | 1139 | break; |
1162 | default: | 1140 | default: |
1163 | printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n", | 1141 | printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n", |
@@ -1173,7 +1151,7 @@ asmlinkage void do_mt(struct pt_regs *regs) | |||
1173 | asmlinkage void do_dsp(struct pt_regs *regs) | 1151 | asmlinkage void do_dsp(struct pt_regs *regs) |
1174 | { | 1152 | { |
1175 | if (cpu_has_dsp) | 1153 | if (cpu_has_dsp) |
1176 | panic("Unexpected DSP exception"); | 1154 | panic("Unexpected DSP exception\n"); |
1177 | 1155 | ||
1178 | force_sig(SIGILL, current); | 1156 | force_sig(SIGILL, current); |
1179 | } | 1157 | } |
@@ -1271,8 +1249,6 @@ static inline void parity_protection_init(void) | |||
1271 | break; | 1249 | break; |
1272 | 1250 | ||
1273 | case CPU_5KC: | 1251 | case CPU_5KC: |
1274 | case CPU_5KE: | ||
1275 | case CPU_LOONGSON1: | ||
1276 | write_c0_ecc(0x80000000); | 1252 | write_c0_ecc(0x80000000); |
1277 | back_to_back_c0_hazard(); | 1253 | back_to_back_c0_hazard(); |
1278 | /* Set the PE bit (bit 31) in the c0_errctl register. */ | 1254 | /* Set the PE bit (bit 31) in the c0_errctl register. */ |
@@ -1364,18 +1340,9 @@ void ejtag_exception_handler(struct pt_regs *regs) | |||
1364 | 1340 | ||
1365 | /* | 1341 | /* |
1366 | * NMI exception handler. | 1342 | * NMI exception handler. |
1367 | * No lock; only written during early bootup by CPU 0. | ||
1368 | */ | 1343 | */ |
1369 | static RAW_NOTIFIER_HEAD(nmi_chain); | 1344 | NORET_TYPE void ATTRIB_NORET nmi_exception_handler(struct pt_regs *regs) |
1370 | |||
1371 | int register_nmi_notifier(struct notifier_block *nb) | ||
1372 | { | 1345 | { |
1373 | return raw_notifier_chain_register(&nmi_chain, nb); | ||
1374 | } | ||
1375 | |||
1376 | void __noreturn nmi_exception_handler(struct pt_regs *regs) | ||
1377 | { | ||
1378 | raw_notifier_call_chain(&nmi_chain, 0, regs); | ||
1379 | bust_spinlocks(1); | 1346 | bust_spinlocks(1); |
1380 | printk("NMI taken!!!!\n"); | 1347 | printk("NMI taken!!!!\n"); |
1381 | die("NMI", regs); | 1348 | die("NMI", regs); |
@@ -1515,6 +1482,7 @@ void *set_vi_handler(int n, vi_handler_t addr) | |||
1515 | return set_vi_srs_handler(n, addr, 0); | 1482 | return set_vi_srs_handler(n, addr, 0); |
1516 | } | 1483 | } |
1517 | 1484 | ||
1485 | extern void cpu_cache_init(void); | ||
1518 | extern void tlb_init(void); | 1486 | extern void tlb_init(void); |
1519 | extern void flush_tlb_handlers(void); | 1487 | extern void flush_tlb_handlers(void); |
1520 | 1488 | ||
@@ -1522,7 +1490,6 @@ extern void flush_tlb_handlers(void); | |||
1522 | * Timer interrupt | 1490 | * Timer interrupt |
1523 | */ | 1491 | */ |
1524 | int cp0_compare_irq; | 1492 | int cp0_compare_irq; |
1525 | EXPORT_SYMBOL_GPL(cp0_compare_irq); | ||
1526 | int cp0_compare_irq_shift; | 1493 | int cp0_compare_irq_shift; |
1527 | 1494 | ||
1528 | /* | 1495 | /* |
@@ -1542,7 +1509,7 @@ static int __init ulri_disable(char *s) | |||
1542 | } | 1509 | } |
1543 | __setup("noulri", ulri_disable); | 1510 | __setup("noulri", ulri_disable); |
1544 | 1511 | ||
1545 | void __cpuinit per_cpu_trap_init(bool is_boot_cpu) | 1512 | void __cpuinit per_cpu_trap_init(void) |
1546 | { | 1513 | { |
1547 | unsigned int cpu = smp_processor_id(); | 1514 | unsigned int cpu = smp_processor_id(); |
1548 | unsigned int status_set = ST0_CU0; | 1515 | unsigned int status_set = ST0_CU0; |
@@ -1622,7 +1589,7 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu) | |||
1622 | cp0_perfcount_irq = -1; | 1589 | cp0_perfcount_irq = -1; |
1623 | } else { | 1590 | } else { |
1624 | cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ; | 1591 | cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ; |
1625 | cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ; | 1592 | cp0_compare_irq_shift = cp0_compare_irq; |
1626 | cp0_perfcount_irq = -1; | 1593 | cp0_perfcount_irq = -1; |
1627 | } | 1594 | } |
1628 | 1595 | ||
@@ -1630,8 +1597,7 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu) | |||
1630 | } | 1597 | } |
1631 | #endif /* CONFIG_MIPS_MT_SMTC */ | 1598 | #endif /* CONFIG_MIPS_MT_SMTC */ |
1632 | 1599 | ||
1633 | if (!cpu_data[cpu].asid_cache) | 1600 | cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; |
1634 | cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; | ||
1635 | 1601 | ||
1636 | atomic_inc(&init_mm.mm_count); | 1602 | atomic_inc(&init_mm.mm_count); |
1637 | current->active_mm = &init_mm; | 1603 | current->active_mm = &init_mm; |
@@ -1641,9 +1607,7 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu) | |||
1641 | #ifdef CONFIG_MIPS_MT_SMTC | 1607 | #ifdef CONFIG_MIPS_MT_SMTC |
1642 | if (bootTC) { | 1608 | if (bootTC) { |
1643 | #endif /* CONFIG_MIPS_MT_SMTC */ | 1609 | #endif /* CONFIG_MIPS_MT_SMTC */ |
1644 | /* Boot CPU's cache setup in setup_arch(). */ | 1610 | cpu_cache_init(); |
1645 | if (!is_boot_cpu) | ||
1646 | cpu_cache_init(); | ||
1647 | tlb_init(); | 1611 | tlb_init(); |
1648 | #ifdef CONFIG_MIPS_MT_SMTC | 1612 | #ifdef CONFIG_MIPS_MT_SMTC |
1649 | } else if (!secondaryTC) { | 1613 | } else if (!secondaryTC) { |
@@ -1659,7 +1623,7 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu) | |||
1659 | } | 1623 | } |
1660 | 1624 | ||
1661 | /* Install CPU exception handler */ | 1625 | /* Install CPU exception handler */ |
1662 | void __cpuinit set_handler(unsigned long offset, void *addr, unsigned long size) | 1626 | void __init set_handler(unsigned long offset, void *addr, unsigned long size) |
1663 | { | 1627 | { |
1664 | memcpy((void *)(ebase + offset), addr, size); | 1628 | memcpy((void *)(ebase + offset), addr, size); |
1665 | local_flush_icache_range(ebase + offset, ebase + offset + size); | 1629 | local_flush_icache_range(ebase + offset, ebase + offset + size); |
@@ -1718,9 +1682,7 @@ void __init trap_init(void) | |||
1718 | ebase += (read_c0_ebase() & 0x3ffff000); | 1682 | ebase += (read_c0_ebase() & 0x3ffff000); |
1719 | } | 1683 | } |
1720 | 1684 | ||
1721 | if (board_ebase_setup) | 1685 | per_cpu_trap_init(); |
1722 | board_ebase_setup(); | ||
1723 | per_cpu_trap_init(true); | ||
1724 | 1686 | ||
1725 | /* | 1687 | /* |
1726 | * Copy the generic exception handlers to their final destination. | 1688 | * Copy the generic exception handlers to their final destination. |
@@ -1824,9 +1786,6 @@ void __init trap_init(void) | |||
1824 | 1786 | ||
1825 | set_except_vector(26, handle_dsp); | 1787 | set_except_vector(26, handle_dsp); |
1826 | 1788 | ||
1827 | if (board_cache_error_setup) | ||
1828 | board_cache_error_setup(); | ||
1829 | |||
1830 | if (cpu_has_vce) | 1789 | if (cpu_has_vce) |
1831 | /* Special exception: R4[04]00 uses also the divec space. */ | 1790 | /* Special exception: R4[04]00 uses also the divec space. */ |
1832 | memcpy((void *)(ebase + 0x180), &except_vec3_r4000, 0x100); | 1791 | memcpy((void *)(ebase + 0x180), &except_vec3_r4000, 0x100); |
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index 9c58bdf58f2..eb319b58035 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c | |||
@@ -73,6 +73,7 @@ | |||
73 | * Undo the partial store in this case. | 73 | * Undo the partial store in this case. |
74 | */ | 74 | */ |
75 | #include <linux/mm.h> | 75 | #include <linux/mm.h> |
76 | #include <linux/module.h> | ||
76 | #include <linux/signal.h> | 77 | #include <linux/signal.h> |
77 | #include <linux/smp.h> | 78 | #include <linux/smp.h> |
78 | #include <linux/sched.h> | 79 | #include <linux/sched.h> |
@@ -85,6 +86,7 @@ | |||
85 | #include <asm/cop2.h> | 86 | #include <asm/cop2.h> |
86 | #include <asm/inst.h> | 87 | #include <asm/inst.h> |
87 | #include <asm/uaccess.h> | 88 | #include <asm/uaccess.h> |
89 | #include <asm/system.h> | ||
88 | 90 | ||
89 | #define STR(x) __STR(x) | 91 | #define STR(x) __STR(x) |
90 | #define __STR(x) #x | 92 | #define __STR(x) #x |
diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c index 0f1af58b036..e5cdfd603f8 100644 --- a/arch/mips/kernel/vdso.c +++ b/arch/mips/kernel/vdso.c | |||
@@ -88,7 +88,8 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) | |||
88 | 88 | ||
89 | ret = install_special_mapping(mm, addr, PAGE_SIZE, | 89 | ret = install_special_mapping(mm, addr, PAGE_SIZE, |
90 | VM_READ|VM_EXEC| | 90 | VM_READ|VM_EXEC| |
91 | VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, | 91 | VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC| |
92 | VM_ALWAYSDUMP, | ||
92 | &vdso_page); | 93 | &vdso_page); |
93 | 94 | ||
94 | if (ret) | 95 | if (ret) |
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S index 0a4336b803e..a81176f44c7 100644 --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S | |||
@@ -1,14 +1,5 @@ | |||
1 | #include <asm/asm-offsets.h> | 1 | #include <asm/asm-offsets.h> |
2 | #include <asm/thread_info.h> | 2 | #include <asm/page.h> |
3 | |||
4 | #define PAGE_SIZE _PAGE_SIZE | ||
5 | |||
6 | /* | ||
7 | * Put .bss..swapper_pg_dir as the first thing in .bss. This will | ||
8 | * ensure that it has .bss alignment (64K). | ||
9 | */ | ||
10 | #define BSS_FIRST_SECTIONS *(.bss..swapper_pg_dir) | ||
11 | |||
12 | #include <asm-generic/vmlinux.lds.h> | 3 | #include <asm-generic/vmlinux.lds.h> |
13 | 4 | ||
14 | #undef mips | 5 | #undef mips |
@@ -78,10 +69,11 @@ SECTIONS | |||
78 | RODATA | 69 | RODATA |
79 | 70 | ||
80 | /* writeable */ | 71 | /* writeable */ |
72 | _sdata = .; /* Start of data section */ | ||
81 | .data : { /* Data */ | 73 | .data : { /* Data */ |
82 | . = . + DATAOFFSET; /* for CONFIG_MAPPED_KERNEL */ | 74 | . = . + DATAOFFSET; /* for CONFIG_MAPPED_KERNEL */ |
83 | 75 | ||
84 | INIT_TASK_DATA(THREAD_SIZE) | 76 | INIT_TASK_DATA(PAGE_SIZE) |
85 | NOSAVE_DATA | 77 | NOSAVE_DATA |
86 | CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT) | 78 | CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT) |
87 | READ_MOSTLY_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT) | 79 | READ_MOSTLY_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT) |
@@ -127,21 +119,11 @@ SECTIONS | |||
127 | } | 119 | } |
128 | 120 | ||
129 | PERCPU_SECTION(1 << CONFIG_MIPS_L1_CACHE_SHIFT) | 121 | PERCPU_SECTION(1 << CONFIG_MIPS_L1_CACHE_SHIFT) |
130 | /* | 122 | . = ALIGN(PAGE_SIZE); |
131 | * Align to 64K in attempt to eliminate holes before the | ||
132 | * .bss..swapper_pg_dir section at the start of .bss. This | ||
133 | * also satisfies PAGE_SIZE alignment as the largest page size | ||
134 | * allowed is 64K. | ||
135 | */ | ||
136 | . = ALIGN(0x10000); | ||
137 | __init_end = .; | 123 | __init_end = .; |
138 | /* freed after init ends here */ | 124 | /* freed after init ends here */ |
139 | 125 | ||
140 | /* | 126 | BSS_SECTION(0, 0, 0) |
141 | * Force .bss to 64K alignment so that .bss..swapper_pg_dir | ||
142 | * gets that alignment. .sbss should be empty, so there will be | ||
143 | * no holes after __init_end. */ | ||
144 | BSS_SECTION(0, 0x10000, 0) | ||
145 | 127 | ||
146 | _end = . ; | 128 | _end = . ; |
147 | 129 | ||
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c index eec690af658..3efcb065f78 100644 --- a/arch/mips/kernel/vpe.c +++ b/arch/mips/kernel/vpe.c | |||
@@ -29,6 +29,7 @@ | |||
29 | */ | 29 | */ |
30 | #include <linux/kernel.h> | 30 | #include <linux/kernel.h> |
31 | #include <linux/device.h> | 31 | #include <linux/device.h> |
32 | #include <linux/module.h> | ||
32 | #include <linux/fs.h> | 33 | #include <linux/fs.h> |
33 | #include <linux/init.h> | 34 | #include <linux/init.h> |
34 | #include <asm/uaccess.h> | 35 | #include <asm/uaccess.h> |
@@ -49,7 +50,9 @@ | |||
49 | #include <asm/cpu.h> | 50 | #include <asm/cpu.h> |
50 | #include <asm/mips_mt.h> | 51 | #include <asm/mips_mt.h> |
51 | #include <asm/processor.h> | 52 | #include <asm/processor.h> |
53 | #include <asm/system.h> | ||
52 | #include <asm/vpe.h> | 54 | #include <asm/vpe.h> |
55 | #include <asm/kspd.h> | ||
53 | 56 | ||
54 | typedef void *vpe_handle; | 57 | typedef void *vpe_handle; |
55 | 58 | ||
@@ -68,6 +71,11 @@ static char module_name[] = "vpe"; | |||
68 | static int major; | 71 | static int major; |
69 | static const int minor = 1; /* fixed for now */ | 72 | static const int minor = 1; /* fixed for now */ |
70 | 73 | ||
74 | #ifdef CONFIG_MIPS_APSP_KSPD | ||
75 | static struct kspd_notifications kspd_events; | ||
76 | static int kspd_events_reqd; | ||
77 | #endif | ||
78 | |||
71 | /* grab the likely amount of memory we will need. */ | 79 | /* grab the likely amount of memory we will need. */ |
72 | #ifdef CONFIG_MIPS_VPE_LOADER_TOM | 80 | #ifdef CONFIG_MIPS_VPE_LOADER_TOM |
73 | #define P_SIZE (2 * 1024 * 1024) | 81 | #define P_SIZE (2 * 1024 * 1024) |
@@ -1095,6 +1103,14 @@ static int vpe_open(struct inode *inode, struct file *filp) | |||
1095 | v->uid = filp->f_cred->fsuid; | 1103 | v->uid = filp->f_cred->fsuid; |
1096 | v->gid = filp->f_cred->fsgid; | 1104 | v->gid = filp->f_cred->fsgid; |
1097 | 1105 | ||
1106 | #ifdef CONFIG_MIPS_APSP_KSPD | ||
1107 | /* get kspd to tell us when a syscall_exit happens */ | ||
1108 | if (!kspd_events_reqd) { | ||
1109 | kspd_notify(&kspd_events); | ||
1110 | kspd_events_reqd++; | ||
1111 | } | ||
1112 | #endif | ||
1113 | |||
1098 | v->cwd[0] = 0; | 1114 | v->cwd[0] = 0; |
1099 | ret = getcwd(v->cwd, VPE_PATH_MAX); | 1115 | ret = getcwd(v->cwd, VPE_PATH_MAX); |
1100 | if (ret < 0) | 1116 | if (ret < 0) |
@@ -1327,6 +1343,13 @@ char *vpe_getcwd(int index) | |||
1327 | 1343 | ||
1328 | EXPORT_SYMBOL(vpe_getcwd); | 1344 | EXPORT_SYMBOL(vpe_getcwd); |
1329 | 1345 | ||
1346 | #ifdef CONFIG_MIPS_APSP_KSPD | ||
1347 | static void kspd_sp_exit( int sp_id) | ||
1348 | { | ||
1349 | cleanup_tc(get_tc(sp_id)); | ||
1350 | } | ||
1351 | #endif | ||
1352 | |||
1330 | static ssize_t store_kill(struct device *dev, struct device_attribute *attr, | 1353 | static ssize_t store_kill(struct device *dev, struct device_attribute *attr, |
1331 | const char *buf, size_t len) | 1354 | const char *buf, size_t len) |
1332 | { | 1355 | { |
@@ -1564,6 +1587,9 @@ out_reenable: | |||
1564 | emt(mtflags); | 1587 | emt(mtflags); |
1565 | local_irq_restore(flags); | 1588 | local_irq_restore(flags); |
1566 | 1589 | ||
1590 | #ifdef CONFIG_MIPS_APSP_KSPD | ||
1591 | kspd_events.kspd_sp_exit = kspd_sp_exit; | ||
1592 | #endif | ||
1567 | return 0; | 1593 | return 0; |
1568 | 1594 | ||
1569 | out_class: | 1595 | out_class: |