diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-14 16:05:21 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-14 16:05:21 -0500 |
commit | 4964e0664c80680fa6b28ef91381c076a5b25c2c (patch) | |
tree | 62099c5aaeee7274bcc66bcfba35d479affa97cf /arch/mips/kernel | |
parent | 0a80939b3e6af4b0dc93bf88ec02fd7e90a16f1b (diff) | |
parent | 7bf6612e8a9d6a0b3b82e8e2611942be1258b307 (diff) |
Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus
* 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus: (119 commits)
MIPS: Delete unused function add_temporary_entry.
MIPS: Set default pci cache line size.
MIPS: Flush huge TLB
MIPS: Octeon: Remove SYS_SUPPORTS_HIGHMEM.
MIPS: Octeon: Add support for OCTEON II PCIe
MIPS: Octeon: Update PCI Latency timer and enable more error reporting.
MIPS: Alchemy: Update cpu-feature-overrides
MIPS: Alchemy: db1200: Improve PB1200 detection.
MIPS: Alchemy: merge Au1000 and Au1300-style IRQ controller code.
MIPS: Alchemy: chain IRQ controllers to MIPS IRQ controller
MIPS: Alchemy: irq: register pm at irq init time
MIPS: Alchemy: Touchscreen support on DB1100
MIPS: Alchemy: Hook up IrDA on DB1000/DB1100
net/irda: convert au1k_ir to platform driver.
MIPS: Alchemy: remove unused board headers
MTD: nand: make au1550nd.c a platform_driver
MIPS: Netlogic: Mark Netlogic chips as SMT capable
MIPS: Netlogic: Add support for XLP 3XX cores
MIPS: Netlogic: Merge some of XLR/XLP wakup code
MIPS: Netlogic: Add default XLP config.
...
Fix up trivial conflicts in arch/mips/kernel/{perf_event_mipsxx.c,
traps.c} and drivers/tty/serial/Makefile
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r-- | arch/mips/kernel/Makefile | 2 | ||||
-rw-r--r-- | arch/mips/kernel/bmips_vec.S | 255 | ||||
-rw-r--r-- | arch/mips/kernel/branch.c | 128 | ||||
-rw-r--r-- | arch/mips/kernel/cevt-bcm1480.c | 2 | ||||
-rw-r--r-- | arch/mips/kernel/cevt-ds1287.c | 2 | ||||
-rw-r--r-- | arch/mips/kernel/cevt-gt641xx.c | 2 | ||||
-rw-r--r-- | arch/mips/kernel/cevt-r4k.c | 2 | ||||
-rw-r--r-- | arch/mips/kernel/cevt-sb1250.c | 2 | ||||
-rw-r--r-- | arch/mips/kernel/cevt-txx9.c | 2 | ||||
-rw-r--r-- | arch/mips/kernel/cpu-probe.c | 28 | ||||
-rw-r--r-- | arch/mips/kernel/i8253.c | 2 | ||||
-rw-r--r-- | arch/mips/kernel/kprobes.c | 177 | ||||
-rw-r--r-- | arch/mips/kernel/perf_event_mipsxx.c | 72 | ||||
-rw-r--r-- | arch/mips/kernel/rtlx.c | 1 | ||||
-rw-r--r-- | arch/mips/kernel/setup.c | 43 | ||||
-rw-r--r-- | arch/mips/kernel/smp-bmips.c | 458 | ||||
-rw-r--r-- | arch/mips/kernel/smtc.c | 6 | ||||
-rw-r--r-- | arch/mips/kernel/traps.c | 16 |
18 files changed, 1046 insertions, 154 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index 1a966183e353..0c6877ea9004 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile | |||
@@ -55,9 +55,11 @@ obj-$(CONFIG_CPU_TX49XX) += r4k_fpu.o r4k_switch.o | |||
55 | obj-$(CONFIG_CPU_VR41XX) += r4k_fpu.o r4k_switch.o | 55 | obj-$(CONFIG_CPU_VR41XX) += r4k_fpu.o r4k_switch.o |
56 | obj-$(CONFIG_CPU_CAVIUM_OCTEON) += octeon_switch.o | 56 | obj-$(CONFIG_CPU_CAVIUM_OCTEON) += octeon_switch.o |
57 | obj-$(CONFIG_CPU_XLR) += r4k_fpu.o r4k_switch.o | 57 | obj-$(CONFIG_CPU_XLR) += r4k_fpu.o r4k_switch.o |
58 | obj-$(CONFIG_CPU_XLP) += r4k_fpu.o r4k_switch.o | ||
58 | 59 | ||
59 | obj-$(CONFIG_SMP) += smp.o | 60 | obj-$(CONFIG_SMP) += smp.o |
60 | obj-$(CONFIG_SMP_UP) += smp-up.o | 61 | obj-$(CONFIG_SMP_UP) += smp-up.o |
62 | obj-$(CONFIG_CPU_BMIPS) += smp-bmips.o bmips_vec.o | ||
61 | 63 | ||
62 | obj-$(CONFIG_MIPS_MT) += mips-mt.o | 64 | obj-$(CONFIG_MIPS_MT) += mips-mt.o |
63 | obj-$(CONFIG_MIPS_MT_FPAFF) += mips-mt-fpaff.o | 65 | obj-$(CONFIG_MIPS_MT_FPAFF) += mips-mt-fpaff.o |
diff --git a/arch/mips/kernel/bmips_vec.S b/arch/mips/kernel/bmips_vec.S new file mode 100644 index 000000000000..e908e81330b1 --- /dev/null +++ b/arch/mips/kernel/bmips_vec.S | |||
@@ -0,0 +1,255 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2011 by Kevin Cernekee (cernekee@gmail.com) | ||
7 | * | ||
8 | * Reset/NMI/re-entry vectors for BMIPS processors | ||
9 | */ | ||
10 | |||
11 | #include <linux/init.h> | ||
12 | |||
13 | #include <asm/asm.h> | ||
14 | #include <asm/asmmacro.h> | ||
15 | #include <asm/cacheops.h> | ||
16 | #include <asm/regdef.h> | ||
17 | #include <asm/mipsregs.h> | ||
18 | #include <asm/stackframe.h> | ||
19 | #include <asm/addrspace.h> | ||
20 | #include <asm/hazards.h> | ||
21 | #include <asm/bmips.h> | ||
22 | |||
23 | .macro BARRIER | ||
24 | .set mips32 | ||
25 | _ssnop | ||
26 | _ssnop | ||
27 | _ssnop | ||
28 | .set mips0 | ||
29 | .endm | ||
30 | |||
31 | __CPUINIT | ||
32 | |||
33 | /*********************************************************************** | ||
34 | * Alternate CPU1 startup vector for BMIPS4350 | ||
35 | * | ||
36 | * On some systems the bootloader has already started CPU1 and configured | ||
37 | * it to resume execution at 0x8000_0200 (!BEV IV vector) when it is | ||
38 | * triggered by the SW1 interrupt. If that is the case we try to move | ||
39 | * it to a more convenient place: BMIPS_WARM_RESTART_VEC @ 0x8000_0380. | ||
40 | ***********************************************************************/ | ||
41 | |||
42 | LEAF(bmips_smp_movevec) | ||
43 | la k0, 1f | ||
44 | li k1, CKSEG1 | ||
45 | or k0, k1 | ||
46 | jr k0 | ||
47 | |||
48 | 1: | ||
49 | /* clear IV, pending IPIs */ | ||
50 | mtc0 zero, CP0_CAUSE | ||
51 | |||
52 | /* re-enable IRQs to wait for SW1 */ | ||
53 | li k0, ST0_IE | ST0_BEV | STATUSF_IP1 | ||
54 | mtc0 k0, CP0_STATUS | ||
55 | |||
56 | /* set up CPU1 CBR; move BASE to 0xa000_0000 */ | ||
57 | li k0, 0xff400000 | ||
58 | mtc0 k0, $22, 6 | ||
59 | li k1, CKSEG1 | BMIPS_RELO_VECTOR_CONTROL_1 | ||
60 | or k0, k1 | ||
61 | li k1, 0xa0080000 | ||
62 | sw k1, 0(k0) | ||
63 | |||
64 | /* wait here for SW1 interrupt from bmips_boot_secondary() */ | ||
65 | wait | ||
66 | |||
67 | la k0, bmips_reset_nmi_vec | ||
68 | li k1, CKSEG1 | ||
69 | or k0, k1 | ||
70 | jr k0 | ||
71 | END(bmips_smp_movevec) | ||
72 | |||
73 | /*********************************************************************** | ||
74 | * Reset/NMI vector | ||
75 | * For BMIPS processors that can relocate their exception vectors, this | ||
76 | * entire function gets copied to 0x8000_0000. | ||
77 | ***********************************************************************/ | ||
78 | |||
79 | NESTED(bmips_reset_nmi_vec, PT_SIZE, sp) | ||
80 | .set push | ||
81 | .set noat | ||
82 | .align 4 | ||
83 | |||
84 | #ifdef CONFIG_SMP | ||
85 | /* if the NMI bit is clear, assume this is a CPU1 reset instead */ | ||
86 | li k1, (1 << 19) | ||
87 | mfc0 k0, CP0_STATUS | ||
88 | and k0, k1 | ||
89 | beqz k0, bmips_smp_entry | ||
90 | |||
91 | #if defined(CONFIG_CPU_BMIPS5000) | ||
92 | /* if we're not on core 0, this must be the SMP boot signal */ | ||
93 | li k1, (3 << 25) | ||
94 | mfc0 k0, $22 | ||
95 | and k0, k1 | ||
96 | bnez k0, bmips_smp_entry | ||
97 | #endif | ||
98 | #endif /* CONFIG_SMP */ | ||
99 | |||
100 | /* nope, it's just a regular NMI */ | ||
101 | SAVE_ALL | ||
102 | move a0, sp | ||
103 | |||
104 | /* clear EXL, ERL, BEV so that TLB refills still work */ | ||
105 | mfc0 k0, CP0_STATUS | ||
106 | li k1, ST0_ERL | ST0_EXL | ST0_BEV | ST0_IE | ||
107 | or k0, k1 | ||
108 | xor k0, k1 | ||
109 | mtc0 k0, CP0_STATUS | ||
110 | BARRIER | ||
111 | |||
112 | /* jump to the NMI handler function */ | ||
113 | la k0, nmi_handler | ||
114 | jr k0 | ||
115 | |||
116 | RESTORE_ALL | ||
117 | .set mips3 | ||
118 | eret | ||
119 | |||
120 | /*********************************************************************** | ||
121 | * CPU1 reset vector (used for the initial boot only) | ||
122 | * This is still part of bmips_reset_nmi_vec(). | ||
123 | ***********************************************************************/ | ||
124 | |||
125 | #ifdef CONFIG_SMP | ||
126 | |||
127 | bmips_smp_entry: | ||
128 | |||
129 | /* set up CP0 STATUS; enable FPU */ | ||
130 | li k0, 0x30000000 | ||
131 | mtc0 k0, CP0_STATUS | ||
132 | BARRIER | ||
133 | |||
134 | /* set local CP0 CONFIG to make kseg0 cacheable, write-back */ | ||
135 | mfc0 k0, CP0_CONFIG | ||
136 | ori k0, 0x07 | ||
137 | xori k0, 0x04 | ||
138 | mtc0 k0, CP0_CONFIG | ||
139 | |||
140 | #if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380) | ||
141 | /* initialize CPU1's local I-cache */ | ||
142 | li k0, 0x80000000 | ||
143 | li k1, 0x80010000 | ||
144 | mtc0 zero, $28 | ||
145 | mtc0 zero, $28, 1 | ||
146 | BARRIER | ||
147 | |||
148 | 1: cache Index_Store_Tag_I, 0(k0) | ||
149 | addiu k0, 16 | ||
150 | bne k0, k1, 1b | ||
151 | #elif defined(CONFIG_CPU_BMIPS5000) | ||
152 | /* set exception vector base */ | ||
153 | la k0, ebase | ||
154 | lw k0, 0(k0) | ||
155 | mtc0 k0, $15, 1 | ||
156 | BARRIER | ||
157 | #endif | ||
158 | |||
159 | /* jump back to kseg0 in case we need to remap the kseg1 area */ | ||
160 | la k0, 1f | ||
161 | jr k0 | ||
162 | 1: | ||
163 | la k0, bmips_enable_xks01 | ||
164 | jalr k0 | ||
165 | |||
166 | /* use temporary stack to set up upper memory TLB */ | ||
167 | li sp, BMIPS_WARM_RESTART_VEC | ||
168 | la k0, plat_wired_tlb_setup | ||
169 | jalr k0 | ||
170 | |||
171 | /* switch to permanent stack and continue booting */ | ||
172 | |||
173 | .global bmips_secondary_reentry | ||
174 | bmips_secondary_reentry: | ||
175 | la k0, bmips_smp_boot_sp | ||
176 | lw sp, 0(k0) | ||
177 | la k0, bmips_smp_boot_gp | ||
178 | lw gp, 0(k0) | ||
179 | la k0, start_secondary | ||
180 | jr k0 | ||
181 | |||
182 | #endif /* CONFIG_SMP */ | ||
183 | |||
184 | .align 4 | ||
185 | .global bmips_reset_nmi_vec_end | ||
186 | bmips_reset_nmi_vec_end: | ||
187 | |||
188 | END(bmips_reset_nmi_vec) | ||
189 | |||
190 | .set pop | ||
191 | .previous | ||
192 | |||
193 | /*********************************************************************** | ||
194 | * CPU1 warm restart vector (used for second and subsequent boots). | ||
195 | * Also used for S2 standby recovery (PM). | ||
196 | * This entire function gets copied to (BMIPS_WARM_RESTART_VEC) | ||
197 | ***********************************************************************/ | ||
198 | |||
199 | LEAF(bmips_smp_int_vec) | ||
200 | |||
201 | .align 4 | ||
202 | mfc0 k0, CP0_STATUS | ||
203 | ori k0, 0x01 | ||
204 | xori k0, 0x01 | ||
205 | mtc0 k0, CP0_STATUS | ||
206 | eret | ||
207 | |||
208 | .align 4 | ||
209 | .global bmips_smp_int_vec_end | ||
210 | bmips_smp_int_vec_end: | ||
211 | |||
212 | END(bmips_smp_int_vec) | ||
213 | |||
214 | /*********************************************************************** | ||
215 | * XKS01 support | ||
216 | * Certain CPUs support extending kseg0 to 1024MB. | ||
217 | ***********************************************************************/ | ||
218 | |||
219 | __CPUINIT | ||
220 | |||
221 | LEAF(bmips_enable_xks01) | ||
222 | |||
223 | #if defined(CONFIG_XKS01) | ||
224 | |||
225 | #if defined(CONFIG_CPU_BMIPS4380) | ||
226 | mfc0 t0, $22, 3 | ||
227 | li t1, 0x1ff0 | ||
228 | li t2, (1 << 12) | (1 << 9) | ||
229 | or t0, t1 | ||
230 | xor t0, t1 | ||
231 | or t0, t2 | ||
232 | mtc0 t0, $22, 3 | ||
233 | BARRIER | ||
234 | #elif defined(CONFIG_CPU_BMIPS5000) | ||
235 | mfc0 t0, $22, 5 | ||
236 | li t1, 0x01ff | ||
237 | li t2, (1 << 8) | (1 << 5) | ||
238 | or t0, t1 | ||
239 | xor t0, t1 | ||
240 | or t0, t2 | ||
241 | mtc0 t0, $22, 5 | ||
242 | BARRIER | ||
243 | #else | ||
244 | |||
245 | #error Missing XKS01 setup | ||
246 | |||
247 | #endif | ||
248 | |||
249 | #endif /* defined(CONFIG_XKS01) */ | ||
250 | |||
251 | jr ra | ||
252 | |||
253 | END(bmips_enable_xks01) | ||
254 | |||
255 | .previous | ||
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c index 32103cc2a257..4d735d0e58f5 100644 --- a/arch/mips/kernel/branch.c +++ b/arch/mips/kernel/branch.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
10 | #include <linux/sched.h> | 10 | #include <linux/sched.h> |
11 | #include <linux/signal.h> | 11 | #include <linux/signal.h> |
12 | #include <linux/module.h> | ||
12 | #include <asm/branch.h> | 13 | #include <asm/branch.h> |
13 | #include <asm/cpu.h> | 14 | #include <asm/cpu.h> |
14 | #include <asm/cpu-features.h> | 15 | #include <asm/cpu-features.h> |
@@ -17,28 +18,22 @@ | |||
17 | #include <asm/ptrace.h> | 18 | #include <asm/ptrace.h> |
18 | #include <asm/uaccess.h> | 19 | #include <asm/uaccess.h> |
19 | 20 | ||
20 | /* | 21 | /** |
21 | * Compute the return address and do emulate branch simulation, if required. | 22 | * __compute_return_epc_for_insn - Computes the return address and do emulate |
23 | * branch simulation, if required. | ||
24 | * | ||
25 | * @regs: Pointer to pt_regs | ||
26 | * @insn: branch instruction to decode | ||
27 | * @returns: -EFAULT on error and forces SIGBUS, and on success | ||
28 | * returns 0 or BRANCH_LIKELY_TAKEN as appropriate after | ||
29 | * evaluating the branch. | ||
22 | */ | 30 | */ |
23 | int __compute_return_epc(struct pt_regs *regs) | 31 | int __compute_return_epc_for_insn(struct pt_regs *regs, |
32 | union mips_instruction insn) | ||
24 | { | 33 | { |
25 | unsigned int __user *addr; | ||
26 | unsigned int bit, fcr31, dspcontrol; | 34 | unsigned int bit, fcr31, dspcontrol; |
27 | long epc; | 35 | long epc = regs->cp0_epc; |
28 | union mips_instruction insn; | 36 | int ret = 0; |
29 | |||
30 | epc = regs->cp0_epc; | ||
31 | if (epc & 3) | ||
32 | goto unaligned; | ||
33 | |||
34 | /* | ||
35 | * Read the instruction | ||
36 | */ | ||
37 | addr = (unsigned int __user *) epc; | ||
38 | if (__get_user(insn.word, addr)) { | ||
39 | force_sig(SIGSEGV, current); | ||
40 | return -EFAULT; | ||
41 | } | ||
42 | 37 | ||
43 | switch (insn.i_format.opcode) { | 38 | switch (insn.i_format.opcode) { |
44 | /* | 39 | /* |
@@ -64,18 +59,22 @@ int __compute_return_epc(struct pt_regs *regs) | |||
64 | switch (insn.i_format.rt) { | 59 | switch (insn.i_format.rt) { |
65 | case bltz_op: | 60 | case bltz_op: |
66 | case bltzl_op: | 61 | case bltzl_op: |
67 | if ((long)regs->regs[insn.i_format.rs] < 0) | 62 | if ((long)regs->regs[insn.i_format.rs] < 0) { |
68 | epc = epc + 4 + (insn.i_format.simmediate << 2); | 63 | epc = epc + 4 + (insn.i_format.simmediate << 2); |
69 | else | 64 | if (insn.i_format.rt == bltzl_op) |
65 | ret = BRANCH_LIKELY_TAKEN; | ||
66 | } else | ||
70 | epc += 8; | 67 | epc += 8; |
71 | regs->cp0_epc = epc; | 68 | regs->cp0_epc = epc; |
72 | break; | 69 | break; |
73 | 70 | ||
74 | case bgez_op: | 71 | case bgez_op: |
75 | case bgezl_op: | 72 | case bgezl_op: |
76 | if ((long)regs->regs[insn.i_format.rs] >= 0) | 73 | if ((long)regs->regs[insn.i_format.rs] >= 0) { |
77 | epc = epc + 4 + (insn.i_format.simmediate << 2); | 74 | epc = epc + 4 + (insn.i_format.simmediate << 2); |
78 | else | 75 | if (insn.i_format.rt == bgezl_op) |
76 | ret = BRANCH_LIKELY_TAKEN; | ||
77 | } else | ||
79 | epc += 8; | 78 | epc += 8; |
80 | regs->cp0_epc = epc; | 79 | regs->cp0_epc = epc; |
81 | break; | 80 | break; |
@@ -83,9 +82,11 @@ int __compute_return_epc(struct pt_regs *regs) | |||
83 | case bltzal_op: | 82 | case bltzal_op: |
84 | case bltzall_op: | 83 | case bltzall_op: |
85 | regs->regs[31] = epc + 8; | 84 | regs->regs[31] = epc + 8; |
86 | if ((long)regs->regs[insn.i_format.rs] < 0) | 85 | if ((long)regs->regs[insn.i_format.rs] < 0) { |
87 | epc = epc + 4 + (insn.i_format.simmediate << 2); | 86 | epc = epc + 4 + (insn.i_format.simmediate << 2); |
88 | else | 87 | if (insn.i_format.rt == bltzall_op) |
88 | ret = BRANCH_LIKELY_TAKEN; | ||
89 | } else | ||
89 | epc += 8; | 90 | epc += 8; |
90 | regs->cp0_epc = epc; | 91 | regs->cp0_epc = epc; |
91 | break; | 92 | break; |
@@ -93,12 +94,15 @@ int __compute_return_epc(struct pt_regs *regs) | |||
93 | case bgezal_op: | 94 | case bgezal_op: |
94 | case bgezall_op: | 95 | case bgezall_op: |
95 | regs->regs[31] = epc + 8; | 96 | regs->regs[31] = epc + 8; |
96 | if ((long)regs->regs[insn.i_format.rs] >= 0) | 97 | if ((long)regs->regs[insn.i_format.rs] >= 0) { |
97 | epc = epc + 4 + (insn.i_format.simmediate << 2); | 98 | epc = epc + 4 + (insn.i_format.simmediate << 2); |
98 | else | 99 | if (insn.i_format.rt == bgezall_op) |
100 | ret = BRANCH_LIKELY_TAKEN; | ||
101 | } else | ||
99 | epc += 8; | 102 | epc += 8; |
100 | regs->cp0_epc = epc; | 103 | regs->cp0_epc = epc; |
101 | break; | 104 | break; |
105 | |||
102 | case bposge32_op: | 106 | case bposge32_op: |
103 | if (!cpu_has_dsp) | 107 | if (!cpu_has_dsp) |
104 | goto sigill; | 108 | goto sigill; |
@@ -133,9 +137,11 @@ int __compute_return_epc(struct pt_regs *regs) | |||
133 | case beq_op: | 137 | case beq_op: |
134 | case beql_op: | 138 | case beql_op: |
135 | if (regs->regs[insn.i_format.rs] == | 139 | if (regs->regs[insn.i_format.rs] == |
136 | regs->regs[insn.i_format.rt]) | 140 | regs->regs[insn.i_format.rt]) { |
137 | epc = epc + 4 + (insn.i_format.simmediate << 2); | 141 | epc = epc + 4 + (insn.i_format.simmediate << 2); |
138 | else | 142 | if (insn.i_format.rt == beql_op) |
143 | ret = BRANCH_LIKELY_TAKEN; | ||
144 | } else | ||
139 | epc += 8; | 145 | epc += 8; |
140 | regs->cp0_epc = epc; | 146 | regs->cp0_epc = epc; |
141 | break; | 147 | break; |
@@ -143,9 +149,11 @@ int __compute_return_epc(struct pt_regs *regs) | |||
143 | case bne_op: | 149 | case bne_op: |
144 | case bnel_op: | 150 | case bnel_op: |
145 | if (regs->regs[insn.i_format.rs] != | 151 | if (regs->regs[insn.i_format.rs] != |
146 | regs->regs[insn.i_format.rt]) | 152 | regs->regs[insn.i_format.rt]) { |
147 | epc = epc + 4 + (insn.i_format.simmediate << 2); | 153 | epc = epc + 4 + (insn.i_format.simmediate << 2); |
148 | else | 154 | if (insn.i_format.rt == bnel_op) |
155 | ret = BRANCH_LIKELY_TAKEN; | ||
156 | } else | ||
149 | epc += 8; | 157 | epc += 8; |
150 | regs->cp0_epc = epc; | 158 | regs->cp0_epc = epc; |
151 | break; | 159 | break; |
@@ -153,9 +161,11 @@ int __compute_return_epc(struct pt_regs *regs) | |||
153 | case blez_op: /* not really i_format */ | 161 | case blez_op: /* not really i_format */ |
154 | case blezl_op: | 162 | case blezl_op: |
155 | /* rt field assumed to be zero */ | 163 | /* rt field assumed to be zero */ |
156 | if ((long)regs->regs[insn.i_format.rs] <= 0) | 164 | if ((long)regs->regs[insn.i_format.rs] <= 0) { |
157 | epc = epc + 4 + (insn.i_format.simmediate << 2); | 165 | epc = epc + 4 + (insn.i_format.simmediate << 2); |
158 | else | 166 | if (insn.i_format.rt == bnel_op) |
167 | ret = BRANCH_LIKELY_TAKEN; | ||
168 | } else | ||
159 | epc += 8; | 169 | epc += 8; |
160 | regs->cp0_epc = epc; | 170 | regs->cp0_epc = epc; |
161 | break; | 171 | break; |
@@ -163,9 +173,11 @@ int __compute_return_epc(struct pt_regs *regs) | |||
163 | case bgtz_op: | 173 | case bgtz_op: |
164 | case bgtzl_op: | 174 | case bgtzl_op: |
165 | /* rt field assumed to be zero */ | 175 | /* rt field assumed to be zero */ |
166 | if ((long)regs->regs[insn.i_format.rs] > 0) | 176 | if ((long)regs->regs[insn.i_format.rs] > 0) { |
167 | epc = epc + 4 + (insn.i_format.simmediate << 2); | 177 | epc = epc + 4 + (insn.i_format.simmediate << 2); |
168 | else | 178 | if (insn.i_format.rt == bnel_op) |
179 | ret = BRANCH_LIKELY_TAKEN; | ||
180 | } else | ||
169 | epc += 8; | 181 | epc += 8; |
170 | regs->cp0_epc = epc; | 182 | regs->cp0_epc = epc; |
171 | break; | 183 | break; |
@@ -187,18 +199,22 @@ int __compute_return_epc(struct pt_regs *regs) | |||
187 | switch (insn.i_format.rt & 3) { | 199 | switch (insn.i_format.rt & 3) { |
188 | case 0: /* bc1f */ | 200 | case 0: /* bc1f */ |
189 | case 2: /* bc1fl */ | 201 | case 2: /* bc1fl */ |
190 | if (~fcr31 & (1 << bit)) | 202 | if (~fcr31 & (1 << bit)) { |
191 | epc = epc + 4 + (insn.i_format.simmediate << 2); | 203 | epc = epc + 4 + (insn.i_format.simmediate << 2); |
192 | else | 204 | if (insn.i_format.rt == 2) |
205 | ret = BRANCH_LIKELY_TAKEN; | ||
206 | } else | ||
193 | epc += 8; | 207 | epc += 8; |
194 | regs->cp0_epc = epc; | 208 | regs->cp0_epc = epc; |
195 | break; | 209 | break; |
196 | 210 | ||
197 | case 1: /* bc1t */ | 211 | case 1: /* bc1t */ |
198 | case 3: /* bc1tl */ | 212 | case 3: /* bc1tl */ |
199 | if (fcr31 & (1 << bit)) | 213 | if (fcr31 & (1 << bit)) { |
200 | epc = epc + 4 + (insn.i_format.simmediate << 2); | 214 | epc = epc + 4 + (insn.i_format.simmediate << 2); |
201 | else | 215 | if (insn.i_format.rt == 3) |
216 | ret = BRANCH_LIKELY_TAKEN; | ||
217 | } else | ||
202 | epc += 8; | 218 | epc += 8; |
203 | regs->cp0_epc = epc; | 219 | regs->cp0_epc = epc; |
204 | break; | 220 | break; |
@@ -239,15 +255,39 @@ int __compute_return_epc(struct pt_regs *regs) | |||
239 | #endif | 255 | #endif |
240 | } | 256 | } |
241 | 257 | ||
242 | return 0; | 258 | return ret; |
243 | 259 | ||
244 | unaligned: | 260 | sigill: |
245 | printk("%s: unaligned epc - sending SIGBUS.\n", current->comm); | 261 | printk("%s: DSP branch but not DSP ASE - sending SIGBUS.\n", current->comm); |
246 | force_sig(SIGBUS, current); | 262 | force_sig(SIGBUS, current); |
247 | return -EFAULT; | 263 | return -EFAULT; |
264 | } | ||
265 | EXPORT_SYMBOL_GPL(__compute_return_epc_for_insn); | ||
248 | 266 | ||
249 | sigill: | 267 | int __compute_return_epc(struct pt_regs *regs) |
250 | printk("%s: DSP branch but not DSP ASE - sending SIGBUS.\n", current->comm); | 268 | { |
269 | unsigned int __user *addr; | ||
270 | long epc; | ||
271 | union mips_instruction insn; | ||
272 | |||
273 | epc = regs->cp0_epc; | ||
274 | if (epc & 3) | ||
275 | goto unaligned; | ||
276 | |||
277 | /* | ||
278 | * Read the instruction | ||
279 | */ | ||
280 | addr = (unsigned int __user *) epc; | ||
281 | if (__get_user(insn.word, addr)) { | ||
282 | force_sig(SIGSEGV, current); | ||
283 | return -EFAULT; | ||
284 | } | ||
285 | |||
286 | return __compute_return_epc_for_insn(regs, insn); | ||
287 | |||
288 | unaligned: | ||
289 | printk("%s: unaligned epc - sending SIGBUS.\n", current->comm); | ||
251 | force_sig(SIGBUS, current); | 290 | force_sig(SIGBUS, current); |
252 | return -EFAULT; | 291 | return -EFAULT; |
292 | |||
253 | } | 293 | } |
diff --git a/arch/mips/kernel/cevt-bcm1480.c b/arch/mips/kernel/cevt-bcm1480.c index 36c3898b76db..69bbfae183bc 100644 --- a/arch/mips/kernel/cevt-bcm1480.c +++ b/arch/mips/kernel/cevt-bcm1480.c | |||
@@ -145,7 +145,7 @@ void __cpuinit sb1480_clockevent_init(void) | |||
145 | bcm1480_unmask_irq(cpu, irq); | 145 | bcm1480_unmask_irq(cpu, irq); |
146 | 146 | ||
147 | action->handler = sibyte_counter_handler; | 147 | action->handler = sibyte_counter_handler; |
148 | action->flags = IRQF_DISABLED | IRQF_PERCPU | IRQF_TIMER; | 148 | action->flags = IRQF_PERCPU | IRQF_TIMER; |
149 | action->name = name; | 149 | action->name = name; |
150 | action->dev_id = cd; | 150 | action->dev_id = cd; |
151 | 151 | ||
diff --git a/arch/mips/kernel/cevt-ds1287.c b/arch/mips/kernel/cevt-ds1287.c index 939157e397b9..ed648cb5a69f 100644 --- a/arch/mips/kernel/cevt-ds1287.c +++ b/arch/mips/kernel/cevt-ds1287.c | |||
@@ -108,7 +108,7 @@ static irqreturn_t ds1287_interrupt(int irq, void *dev_id) | |||
108 | 108 | ||
109 | static struct irqaction ds1287_irqaction = { | 109 | static struct irqaction ds1287_irqaction = { |
110 | .handler = ds1287_interrupt, | 110 | .handler = ds1287_interrupt, |
111 | .flags = IRQF_DISABLED | IRQF_PERCPU | IRQF_TIMER, | 111 | .flags = IRQF_PERCPU | IRQF_TIMER, |
112 | .name = "ds1287", | 112 | .name = "ds1287", |
113 | }; | 113 | }; |
114 | 114 | ||
diff --git a/arch/mips/kernel/cevt-gt641xx.c b/arch/mips/kernel/cevt-gt641xx.c index 339f3639b90e..831b47585b7c 100644 --- a/arch/mips/kernel/cevt-gt641xx.c +++ b/arch/mips/kernel/cevt-gt641xx.c | |||
@@ -114,7 +114,7 @@ static irqreturn_t gt641xx_timer0_interrupt(int irq, void *dev_id) | |||
114 | 114 | ||
115 | static struct irqaction gt641xx_timer0_irqaction = { | 115 | static struct irqaction gt641xx_timer0_irqaction = { |
116 | .handler = gt641xx_timer0_interrupt, | 116 | .handler = gt641xx_timer0_interrupt, |
117 | .flags = IRQF_DISABLED | IRQF_PERCPU | IRQF_TIMER, | 117 | .flags = IRQF_PERCPU | IRQF_TIMER, |
118 | .name = "gt641xx_timer0", | 118 | .name = "gt641xx_timer0", |
119 | }; | 119 | }; |
120 | 120 | ||
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c index e2d8e199be32..51095dd9599d 100644 --- a/arch/mips/kernel/cevt-r4k.c +++ b/arch/mips/kernel/cevt-r4k.c | |||
@@ -84,7 +84,7 @@ out: | |||
84 | 84 | ||
85 | struct irqaction c0_compare_irqaction = { | 85 | struct irqaction c0_compare_irqaction = { |
86 | .handler = c0_compare_interrupt, | 86 | .handler = c0_compare_interrupt, |
87 | .flags = IRQF_DISABLED | IRQF_PERCPU | IRQF_TIMER, | 87 | .flags = IRQF_PERCPU | IRQF_TIMER, |
88 | .name = "timer", | 88 | .name = "timer", |
89 | }; | 89 | }; |
90 | 90 | ||
diff --git a/arch/mips/kernel/cevt-sb1250.c b/arch/mips/kernel/cevt-sb1250.c index 590c54f28a81..e73439fd6850 100644 --- a/arch/mips/kernel/cevt-sb1250.c +++ b/arch/mips/kernel/cevt-sb1250.c | |||
@@ -144,7 +144,7 @@ void __cpuinit sb1250_clockevent_init(void) | |||
144 | sb1250_unmask_irq(cpu, irq); | 144 | sb1250_unmask_irq(cpu, irq); |
145 | 145 | ||
146 | action->handler = sibyte_counter_handler; | 146 | action->handler = sibyte_counter_handler; |
147 | action->flags = IRQF_DISABLED | IRQF_PERCPU | IRQF_TIMER; | 147 | action->flags = IRQF_PERCPU | IRQF_TIMER; |
148 | action->name = name; | 148 | action->name = name; |
149 | action->dev_id = cd; | 149 | action->dev_id = cd; |
150 | 150 | ||
diff --git a/arch/mips/kernel/cevt-txx9.c b/arch/mips/kernel/cevt-txx9.c index f0ab92a1b057..e5c30b1d0860 100644 --- a/arch/mips/kernel/cevt-txx9.c +++ b/arch/mips/kernel/cevt-txx9.c | |||
@@ -146,7 +146,7 @@ static irqreturn_t txx9tmr_interrupt(int irq, void *dev_id) | |||
146 | 146 | ||
147 | static struct irqaction txx9tmr_irq = { | 147 | static struct irqaction txx9tmr_irq = { |
148 | .handler = txx9tmr_interrupt, | 148 | .handler = txx9tmr_interrupt, |
149 | .flags = IRQF_DISABLED | IRQF_PERCPU | IRQF_TIMER, | 149 | .flags = IRQF_PERCPU | IRQF_TIMER, |
150 | .name = "txx9tmr", | 150 | .name = "txx9tmr", |
151 | .dev_id = &txx9_clock_event_device, | 151 | .dev_id = &txx9_clock_event_device, |
152 | }; | 152 | }; |
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index c7d3cf1ce46e..0bab464b8e33 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c | |||
@@ -191,6 +191,8 @@ void __init check_wait(void) | |||
191 | case CPU_CAVIUM_OCTEON_PLUS: | 191 | case CPU_CAVIUM_OCTEON_PLUS: |
192 | case CPU_CAVIUM_OCTEON2: | 192 | case CPU_CAVIUM_OCTEON2: |
193 | case CPU_JZRISC: | 193 | case CPU_JZRISC: |
194 | case CPU_XLR: | ||
195 | case CPU_XLP: | ||
194 | cpu_wait = r4k_wait; | 196 | cpu_wait = r4k_wait; |
195 | break; | 197 | break; |
196 | 198 | ||
@@ -1014,6 +1016,13 @@ static inline void cpu_probe_netlogic(struct cpuinfo_mips *c, int cpu) | |||
1014 | { | 1016 | { |
1015 | decode_configs(c); | 1017 | decode_configs(c); |
1016 | 1018 | ||
1019 | if ((c->processor_id & 0xff00) == PRID_IMP_NETLOGIC_AU13XX) { | ||
1020 | c->cputype = CPU_ALCHEMY; | ||
1021 | __cpu_name[cpu] = "Au1300"; | ||
1022 | /* following stuff is not for Alchemy */ | ||
1023 | return; | ||
1024 | } | ||
1025 | |||
1017 | c->options = (MIPS_CPU_TLB | | 1026 | c->options = (MIPS_CPU_TLB | |
1018 | MIPS_CPU_4KEX | | 1027 | MIPS_CPU_4KEX | |
1019 | MIPS_CPU_COUNTER | | 1028 | MIPS_CPU_COUNTER | |
@@ -1023,6 +1032,12 @@ static inline void cpu_probe_netlogic(struct cpuinfo_mips *c, int cpu) | |||
1023 | MIPS_CPU_LLSC); | 1032 | MIPS_CPU_LLSC); |
1024 | 1033 | ||
1025 | switch (c->processor_id & 0xff00) { | 1034 | switch (c->processor_id & 0xff00) { |
1035 | case PRID_IMP_NETLOGIC_XLP8XX: | ||
1036 | case PRID_IMP_NETLOGIC_XLP3XX: | ||
1037 | c->cputype = CPU_XLP; | ||
1038 | __cpu_name[cpu] = "Netlogic XLP"; | ||
1039 | break; | ||
1040 | |||
1026 | case PRID_IMP_NETLOGIC_XLR732: | 1041 | case PRID_IMP_NETLOGIC_XLR732: |
1027 | case PRID_IMP_NETLOGIC_XLR716: | 1042 | case PRID_IMP_NETLOGIC_XLR716: |
1028 | case PRID_IMP_NETLOGIC_XLR532: | 1043 | case PRID_IMP_NETLOGIC_XLR532: |
@@ -1053,14 +1068,21 @@ static inline void cpu_probe_netlogic(struct cpuinfo_mips *c, int cpu) | |||
1053 | break; | 1068 | break; |
1054 | 1069 | ||
1055 | default: | 1070 | default: |
1056 | printk(KERN_INFO "Unknown Netlogic chip id [%02x]!\n", | 1071 | pr_info("Unknown Netlogic chip id [%02x]!\n", |
1057 | c->processor_id); | 1072 | c->processor_id); |
1058 | c->cputype = CPU_XLR; | 1073 | c->cputype = CPU_XLR; |
1059 | break; | 1074 | break; |
1060 | } | 1075 | } |
1061 | 1076 | ||
1062 | c->isa_level = MIPS_CPU_ISA_M64R1; | 1077 | if (c->cputype == CPU_XLP) { |
1063 | c->tlbsize = ((read_c0_config1() >> 25) & 0x3f) + 1; | 1078 | c->isa_level = MIPS_CPU_ISA_M64R2; |
1079 | c->options |= (MIPS_CPU_FPU | MIPS_CPU_ULRI | MIPS_CPU_MCHECK); | ||
1080 | /* This will be updated again after all threads are woken up */ | ||
1081 | c->tlbsize = ((read_c0_config6() >> 16) & 0xffff) + 1; | ||
1082 | } else { | ||
1083 | c->isa_level = MIPS_CPU_ISA_M64R1; | ||
1084 | c->tlbsize = ((read_c0_config1() >> 25) & 0x3f) + 1; | ||
1085 | } | ||
1064 | } | 1086 | } |
1065 | 1087 | ||
1066 | #ifdef CONFIG_64BIT | 1088 | #ifdef CONFIG_64BIT |
diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c index 7047bff35ea5..c5bc344fc745 100644 --- a/arch/mips/kernel/i8253.c +++ b/arch/mips/kernel/i8253.c | |||
@@ -19,7 +19,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id) | |||
19 | 19 | ||
20 | static struct irqaction irq0 = { | 20 | static struct irqaction irq0 = { |
21 | .handler = timer_interrupt, | 21 | .handler = timer_interrupt, |
22 | .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TIMER, | 22 | .flags = IRQF_NOBALANCING | IRQF_TIMER, |
23 | .name = "timer" | 23 | .name = "timer" |
24 | }; | 24 | }; |
25 | 25 | ||
diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c index ee28683fc2ac..158467da9bc1 100644 --- a/arch/mips/kernel/kprobes.c +++ b/arch/mips/kernel/kprobes.c | |||
@@ -25,10 +25,12 @@ | |||
25 | 25 | ||
26 | #include <linux/kprobes.h> | 26 | #include <linux/kprobes.h> |
27 | #include <linux/preempt.h> | 27 | #include <linux/preempt.h> |
28 | #include <linux/uaccess.h> | ||
28 | #include <linux/kdebug.h> | 29 | #include <linux/kdebug.h> |
29 | #include <linux/slab.h> | 30 | #include <linux/slab.h> |
30 | 31 | ||
31 | #include <asm/ptrace.h> | 32 | #include <asm/ptrace.h> |
33 | #include <asm/branch.h> | ||
32 | #include <asm/break.h> | 34 | #include <asm/break.h> |
33 | #include <asm/inst.h> | 35 | #include <asm/inst.h> |
34 | 36 | ||
@@ -112,17 +114,49 @@ insn_ok: | |||
112 | return 0; | 114 | return 0; |
113 | } | 115 | } |
114 | 116 | ||
117 | /* | ||
118 | * insn_has_ll_or_sc function checks whether instruction is ll or sc | ||
119 | * one; putting breakpoint on top of atomic ll/sc pair is bad idea; | ||
120 | * so we need to prevent it and refuse kprobes insertion for such | ||
121 | * instructions; cannot do much about breakpoint in the middle of | ||
122 | * ll/sc pair; it is upto user to avoid those places | ||
123 | */ | ||
124 | static int __kprobes insn_has_ll_or_sc(union mips_instruction insn) | ||
125 | { | ||
126 | int ret = 0; | ||
127 | |||
128 | switch (insn.i_format.opcode) { | ||
129 | case ll_op: | ||
130 | case lld_op: | ||
131 | case sc_op: | ||
132 | case scd_op: | ||
133 | ret = 1; | ||
134 | break; | ||
135 | default: | ||
136 | break; | ||
137 | } | ||
138 | return ret; | ||
139 | } | ||
140 | |||
115 | int __kprobes arch_prepare_kprobe(struct kprobe *p) | 141 | int __kprobes arch_prepare_kprobe(struct kprobe *p) |
116 | { | 142 | { |
117 | union mips_instruction insn; | 143 | union mips_instruction insn; |
118 | union mips_instruction prev_insn; | 144 | union mips_instruction prev_insn; |
119 | int ret = 0; | 145 | int ret = 0; |
120 | 146 | ||
121 | prev_insn = p->addr[-1]; | ||
122 | insn = p->addr[0]; | 147 | insn = p->addr[0]; |
123 | 148 | ||
124 | if (insn_has_delayslot(insn) || insn_has_delayslot(prev_insn)) { | 149 | if (insn_has_ll_or_sc(insn)) { |
125 | pr_notice("Kprobes for branch and jump instructions are not supported\n"); | 150 | pr_notice("Kprobes for ll and sc instructions are not" |
151 | "supported\n"); | ||
152 | ret = -EINVAL; | ||
153 | goto out; | ||
154 | } | ||
155 | |||
156 | if ((probe_kernel_read(&prev_insn, p->addr - 1, | ||
157 | sizeof(mips_instruction)) == 0) && | ||
158 | insn_has_delayslot(prev_insn)) { | ||
159 | pr_notice("Kprobes for branch delayslot are not supported\n"); | ||
126 | ret = -EINVAL; | 160 | ret = -EINVAL; |
127 | goto out; | 161 | goto out; |
128 | } | 162 | } |
@@ -138,9 +172,20 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) | |||
138 | * In the kprobe->ainsn.insn[] array we store the original | 172 | * In the kprobe->ainsn.insn[] array we store the original |
139 | * instruction at index zero and a break trap instruction at | 173 | * instruction at index zero and a break trap instruction at |
140 | * index one. | 174 | * index one. |
175 | * | ||
176 | * On MIPS arch if the instruction at probed address is a | ||
177 | * branch instruction, we need to execute the instruction at | ||
178 | * Branch Delayslot (BD) at the time of probe hit. As MIPS also | ||
179 | * doesn't have single stepping support, the BD instruction can | ||
180 | * not be executed in-line and it would be executed on SSOL slot | ||
181 | * using a normal breakpoint instruction in the next slot. | ||
182 | * So, read the instruction and save it for later execution. | ||
141 | */ | 183 | */ |
184 | if (insn_has_delayslot(insn)) | ||
185 | memcpy(&p->ainsn.insn[0], p->addr + 1, sizeof(kprobe_opcode_t)); | ||
186 | else | ||
187 | memcpy(&p->ainsn.insn[0], p->addr, sizeof(kprobe_opcode_t)); | ||
142 | 188 | ||
143 | memcpy(&p->ainsn.insn[0], p->addr, sizeof(kprobe_opcode_t)); | ||
144 | p->ainsn.insn[1] = breakpoint2_insn; | 189 | p->ainsn.insn[1] = breakpoint2_insn; |
145 | p->opcode = *p->addr; | 190 | p->opcode = *p->addr; |
146 | 191 | ||
@@ -191,16 +236,96 @@ static void set_current_kprobe(struct kprobe *p, struct pt_regs *regs, | |||
191 | kcb->kprobe_saved_epc = regs->cp0_epc; | 236 | kcb->kprobe_saved_epc = regs->cp0_epc; |
192 | } | 237 | } |
193 | 238 | ||
194 | static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) | 239 | /** |
240 | * evaluate_branch_instrucion - | ||
241 | * | ||
242 | * Evaluate the branch instruction at probed address during probe hit. The | ||
243 | * result of evaluation would be the updated epc. The insturction in delayslot | ||
244 | * would actually be single stepped using a normal breakpoint) on SSOL slot. | ||
245 | * | ||
246 | * The result is also saved in the kprobe control block for later use, | ||
247 | * in case we need to execute the delayslot instruction. The latter will be | ||
248 | * false for NOP instruction in dealyslot and the branch-likely instructions | ||
249 | * when the branch is taken. And for those cases we set a flag as | ||
250 | * SKIP_DELAYSLOT in the kprobe control block | ||
251 | */ | ||
252 | static int evaluate_branch_instruction(struct kprobe *p, struct pt_regs *regs, | ||
253 | struct kprobe_ctlblk *kcb) | ||
195 | { | 254 | { |
255 | union mips_instruction insn = p->opcode; | ||
256 | long epc; | ||
257 | int ret = 0; | ||
258 | |||
259 | epc = regs->cp0_epc; | ||
260 | if (epc & 3) | ||
261 | goto unaligned; | ||
262 | |||
263 | if (p->ainsn.insn->word == 0) | ||
264 | kcb->flags |= SKIP_DELAYSLOT; | ||
265 | else | ||
266 | kcb->flags &= ~SKIP_DELAYSLOT; | ||
267 | |||
268 | ret = __compute_return_epc_for_insn(regs, insn); | ||
269 | if (ret < 0) | ||
270 | return ret; | ||
271 | |||
272 | if (ret == BRANCH_LIKELY_TAKEN) | ||
273 | kcb->flags |= SKIP_DELAYSLOT; | ||
274 | |||
275 | kcb->target_epc = regs->cp0_epc; | ||
276 | |||
277 | return 0; | ||
278 | |||
279 | unaligned: | ||
280 | pr_notice("%s: unaligned epc - sending SIGBUS.\n", current->comm); | ||
281 | force_sig(SIGBUS, current); | ||
282 | return -EFAULT; | ||
283 | |||
284 | } | ||
285 | |||
286 | static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs, | ||
287 | struct kprobe_ctlblk *kcb) | ||
288 | { | ||
289 | int ret = 0; | ||
290 | |||
196 | regs->cp0_status &= ~ST0_IE; | 291 | regs->cp0_status &= ~ST0_IE; |
197 | 292 | ||
198 | /* single step inline if the instruction is a break */ | 293 | /* single step inline if the instruction is a break */ |
199 | if (p->opcode.word == breakpoint_insn.word || | 294 | if (p->opcode.word == breakpoint_insn.word || |
200 | p->opcode.word == breakpoint2_insn.word) | 295 | p->opcode.word == breakpoint2_insn.word) |
201 | regs->cp0_epc = (unsigned long)p->addr; | 296 | regs->cp0_epc = (unsigned long)p->addr; |
202 | else | 297 | else if (insn_has_delayslot(p->opcode)) { |
203 | regs->cp0_epc = (unsigned long)&p->ainsn.insn[0]; | 298 | ret = evaluate_branch_instruction(p, regs, kcb); |
299 | if (ret < 0) { | ||
300 | pr_notice("Kprobes: Error in evaluating branch\n"); | ||
301 | return; | ||
302 | } | ||
303 | } | ||
304 | regs->cp0_epc = (unsigned long)&p->ainsn.insn[0]; | ||
305 | } | ||
306 | |||
307 | /* | ||
308 | * Called after single-stepping. p->addr is the address of the | ||
309 | * instruction whose first byte has been replaced by the "break 0" | ||
310 | * instruction. To avoid the SMP problems that can occur when we | ||
311 | * temporarily put back the original opcode to single-step, we | ||
312 | * single-stepped a copy of the instruction. The address of this | ||
313 | * copy is p->ainsn.insn. | ||
314 | * | ||
315 | * This function prepares to return from the post-single-step | ||
316 | * breakpoint trap. In case of branch instructions, the target | ||
317 | * epc to be restored. | ||
318 | */ | ||
319 | static void __kprobes resume_execution(struct kprobe *p, | ||
320 | struct pt_regs *regs, | ||
321 | struct kprobe_ctlblk *kcb) | ||
322 | { | ||
323 | if (insn_has_delayslot(p->opcode)) | ||
324 | regs->cp0_epc = kcb->target_epc; | ||
325 | else { | ||
326 | unsigned long orig_epc = kcb->kprobe_saved_epc; | ||
327 | regs->cp0_epc = orig_epc + 4; | ||
328 | } | ||
204 | } | 329 | } |
205 | 330 | ||
206 | static int __kprobes kprobe_handler(struct pt_regs *regs) | 331 | static int __kprobes kprobe_handler(struct pt_regs *regs) |
@@ -239,8 +364,13 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) | |||
239 | save_previous_kprobe(kcb); | 364 | save_previous_kprobe(kcb); |
240 | set_current_kprobe(p, regs, kcb); | 365 | set_current_kprobe(p, regs, kcb); |
241 | kprobes_inc_nmissed_count(p); | 366 | kprobes_inc_nmissed_count(p); |
242 | prepare_singlestep(p, regs); | 367 | prepare_singlestep(p, regs, kcb); |
243 | kcb->kprobe_status = KPROBE_REENTER; | 368 | kcb->kprobe_status = KPROBE_REENTER; |
369 | if (kcb->flags & SKIP_DELAYSLOT) { | ||
370 | resume_execution(p, regs, kcb); | ||
371 | restore_previous_kprobe(kcb); | ||
372 | preempt_enable_no_resched(); | ||
373 | } | ||
244 | return 1; | 374 | return 1; |
245 | } else { | 375 | } else { |
246 | if (addr->word != breakpoint_insn.word) { | 376 | if (addr->word != breakpoint_insn.word) { |
@@ -284,8 +414,16 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) | |||
284 | } | 414 | } |
285 | 415 | ||
286 | ss_probe: | 416 | ss_probe: |
287 | prepare_singlestep(p, regs); | 417 | prepare_singlestep(p, regs, kcb); |
288 | kcb->kprobe_status = KPROBE_HIT_SS; | 418 | if (kcb->flags & SKIP_DELAYSLOT) { |
419 | kcb->kprobe_status = KPROBE_HIT_SSDONE; | ||
420 | if (p->post_handler) | ||
421 | p->post_handler(p, regs, 0); | ||
422 | resume_execution(p, regs, kcb); | ||
423 | preempt_enable_no_resched(); | ||
424 | } else | ||
425 | kcb->kprobe_status = KPROBE_HIT_SS; | ||
426 | |||
289 | return 1; | 427 | return 1; |
290 | 428 | ||
291 | no_kprobe: | 429 | no_kprobe: |
@@ -294,25 +432,6 @@ no_kprobe: | |||
294 | 432 | ||
295 | } | 433 | } |
296 | 434 | ||
297 | /* | ||
298 | * Called after single-stepping. p->addr is the address of the | ||
299 | * instruction whose first byte has been replaced by the "break 0" | ||
300 | * instruction. To avoid the SMP problems that can occur when we | ||
301 | * temporarily put back the original opcode to single-step, we | ||
302 | * single-stepped a copy of the instruction. The address of this | ||
303 | * copy is p->ainsn.insn. | ||
304 | * | ||
305 | * This function prepares to return from the post-single-step | ||
306 | * breakpoint trap. | ||
307 | */ | ||
308 | static void __kprobes resume_execution(struct kprobe *p, | ||
309 | struct pt_regs *regs, | ||
310 | struct kprobe_ctlblk *kcb) | ||
311 | { | ||
312 | unsigned long orig_epc = kcb->kprobe_saved_epc; | ||
313 | regs->cp0_epc = orig_epc + 4; | ||
314 | } | ||
315 | |||
316 | static inline int post_kprobe_handler(struct pt_regs *regs) | 435 | static inline int post_kprobe_handler(struct pt_regs *regs) |
317 | { | 436 | { |
318 | struct kprobe *cur = kprobe_running(); | 437 | struct kprobe *cur = kprobe_running(); |
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index 315fc0b250f8..e3b897acfbc0 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c | |||
@@ -621,11 +621,6 @@ static int mipspmu_event_init(struct perf_event *event) | |||
621 | return -ENODEV; | 621 | return -ENODEV; |
622 | 622 | ||
623 | if (!atomic_inc_not_zero(&active_events)) { | 623 | if (!atomic_inc_not_zero(&active_events)) { |
624 | if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) { | ||
625 | atomic_dec(&active_events); | ||
626 | return -EINVAL; | ||
627 | } | ||
628 | |||
629 | mutex_lock(&pmu_reserve_mutex); | 624 | mutex_lock(&pmu_reserve_mutex); |
630 | if (atomic_read(&active_events) == 0) | 625 | if (atomic_read(&active_events) == 0) |
631 | err = mipspmu_get_irq(); | 626 | err = mipspmu_get_irq(); |
@@ -638,11 +633,7 @@ static int mipspmu_event_init(struct perf_event *event) | |||
638 | if (err) | 633 | if (err) |
639 | return err; | 634 | return err; |
640 | 635 | ||
641 | err = __hw_perf_event_init(event); | 636 | return __hw_perf_event_init(event); |
642 | if (err) | ||
643 | hw_perf_event_destroy(event); | ||
644 | |||
645 | return err; | ||
646 | } | 637 | } |
647 | 638 | ||
648 | static struct pmu pmu = { | 639 | static struct pmu pmu = { |
@@ -712,18 +703,6 @@ static const struct mips_perf_event *mipspmu_map_cache_event(u64 config) | |||
712 | 703 | ||
713 | } | 704 | } |
714 | 705 | ||
715 | static int validate_event(struct cpu_hw_events *cpuc, | ||
716 | struct perf_event *event) | ||
717 | { | ||
718 | struct hw_perf_event fake_hwc = event->hw; | ||
719 | |||
720 | /* Allow mixed event group. So return 1 to pass validation. */ | ||
721 | if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF) | ||
722 | return 1; | ||
723 | |||
724 | return mipsxx_pmu_alloc_counter(cpuc, &fake_hwc) >= 0; | ||
725 | } | ||
726 | |||
727 | static int validate_group(struct perf_event *event) | 706 | static int validate_group(struct perf_event *event) |
728 | { | 707 | { |
729 | struct perf_event *sibling, *leader = event->group_leader; | 708 | struct perf_event *sibling, *leader = event->group_leader; |
@@ -731,15 +710,15 @@ static int validate_group(struct perf_event *event) | |||
731 | 710 | ||
732 | memset(&fake_cpuc, 0, sizeof(fake_cpuc)); | 711 | memset(&fake_cpuc, 0, sizeof(fake_cpuc)); |
733 | 712 | ||
734 | if (!validate_event(&fake_cpuc, leader)) | 713 | if (mipsxx_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0) |
735 | return -EINVAL; | 714 | return -EINVAL; |
736 | 715 | ||
737 | list_for_each_entry(sibling, &leader->sibling_list, group_entry) { | 716 | list_for_each_entry(sibling, &leader->sibling_list, group_entry) { |
738 | if (!validate_event(&fake_cpuc, sibling)) | 717 | if (mipsxx_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0) |
739 | return -EINVAL; | 718 | return -EINVAL; |
740 | } | 719 | } |
741 | 720 | ||
742 | if (!validate_event(&fake_cpuc, event)) | 721 | if (mipsxx_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0) |
743 | return -EINVAL; | 722 | return -EINVAL; |
744 | 723 | ||
745 | return 0; | 724 | return 0; |
@@ -1279,13 +1258,14 @@ static int __hw_perf_event_init(struct perf_event *event) | |||
1279 | } | 1258 | } |
1280 | 1259 | ||
1281 | err = 0; | 1260 | err = 0; |
1282 | if (event->group_leader != event) { | 1261 | if (event->group_leader != event) |
1283 | err = validate_group(event); | 1262 | err = validate_group(event); |
1284 | if (err) | ||
1285 | return -EINVAL; | ||
1286 | } | ||
1287 | 1263 | ||
1288 | event->destroy = hw_perf_event_destroy; | 1264 | event->destroy = hw_perf_event_destroy; |
1265 | |||
1266 | if (err) | ||
1267 | event->destroy(event); | ||
1268 | |||
1289 | return err; | 1269 | return err; |
1290 | } | 1270 | } |
1291 | 1271 | ||
@@ -1380,20 +1360,10 @@ static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev) | |||
1380 | } | 1360 | } |
1381 | 1361 | ||
1382 | /* 24K */ | 1362 | /* 24K */ |
1383 | #define IS_UNSUPPORTED_24K_EVENT(r, b) \ | ||
1384 | ((b) == 12 || (r) == 151 || (r) == 152 || (b) == 26 || \ | ||
1385 | (b) == 27 || (r) == 28 || (r) == 158 || (b) == 31 || \ | ||
1386 | (b) == 32 || (b) == 34 || (b) == 36 || (r) == 168 || \ | ||
1387 | (r) == 172 || (b) == 47 || ((b) >= 56 && (b) <= 63) || \ | ||
1388 | ((b) >= 68 && (b) <= 127)) | ||
1389 | #define IS_BOTH_COUNTERS_24K_EVENT(b) \ | 1363 | #define IS_BOTH_COUNTERS_24K_EVENT(b) \ |
1390 | ((b) == 0 || (b) == 1 || (b) == 11) | 1364 | ((b) == 0 || (b) == 1 || (b) == 11) |
1391 | 1365 | ||
1392 | /* 34K */ | 1366 | /* 34K */ |
1393 | #define IS_UNSUPPORTED_34K_EVENT(r, b) \ | ||
1394 | ((b) == 12 || (r) == 27 || (r) == 158 || (b) == 36 || \ | ||
1395 | (b) == 38 || (r) == 175 || ((b) >= 56 && (b) <= 63) || \ | ||
1396 | ((b) >= 68 && (b) <= 127)) | ||
1397 | #define IS_BOTH_COUNTERS_34K_EVENT(b) \ | 1367 | #define IS_BOTH_COUNTERS_34K_EVENT(b) \ |
1398 | ((b) == 0 || (b) == 1 || (b) == 11) | 1368 | ((b) == 0 || (b) == 1 || (b) == 11) |
1399 | #ifdef CONFIG_MIPS_MT_SMP | 1369 | #ifdef CONFIG_MIPS_MT_SMP |
@@ -1406,20 +1376,10 @@ static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev) | |||
1406 | #endif | 1376 | #endif |
1407 | 1377 | ||
1408 | /* 74K */ | 1378 | /* 74K */ |
1409 | #define IS_UNSUPPORTED_74K_EVENT(r, b) \ | ||
1410 | ((r) == 5 || ((r) >= 135 && (r) <= 137) || \ | ||
1411 | ((b) >= 10 && (b) <= 12) || (b) == 22 || (b) == 27 || \ | ||
1412 | (b) == 33 || (b) == 34 || ((b) >= 47 && (b) <= 49) || \ | ||
1413 | (r) == 178 || (b) == 55 || (b) == 57 || (b) == 60 || \ | ||
1414 | (b) == 61 || (r) == 62 || (r) == 191 || \ | ||
1415 | ((b) >= 64 && (b) <= 127)) | ||
1416 | #define IS_BOTH_COUNTERS_74K_EVENT(b) \ | 1379 | #define IS_BOTH_COUNTERS_74K_EVENT(b) \ |
1417 | ((b) == 0 || (b) == 1) | 1380 | ((b) == 0 || (b) == 1) |
1418 | 1381 | ||
1419 | /* 1004K */ | 1382 | /* 1004K */ |
1420 | #define IS_UNSUPPORTED_1004K_EVENT(r, b) \ | ||
1421 | ((b) == 12 || (r) == 27 || (r) == 158 || (b) == 38 || \ | ||
1422 | (r) == 175 || (b) == 63 || ((b) >= 68 && (b) <= 127)) | ||
1423 | #define IS_BOTH_COUNTERS_1004K_EVENT(b) \ | 1383 | #define IS_BOTH_COUNTERS_1004K_EVENT(b) \ |
1424 | ((b) == 0 || (b) == 1 || (b) == 11) | 1384 | ((b) == 0 || (b) == 1 || (b) == 11) |
1425 | #ifdef CONFIG_MIPS_MT_SMP | 1385 | #ifdef CONFIG_MIPS_MT_SMP |
@@ -1445,11 +1405,10 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config) | |||
1445 | unsigned int raw_id = config & 0xff; | 1405 | unsigned int raw_id = config & 0xff; |
1446 | unsigned int base_id = raw_id & 0x7f; | 1406 | unsigned int base_id = raw_id & 0x7f; |
1447 | 1407 | ||
1408 | raw_event.event_id = base_id; | ||
1409 | |||
1448 | switch (current_cpu_type()) { | 1410 | switch (current_cpu_type()) { |
1449 | case CPU_24K: | 1411 | case CPU_24K: |
1450 | if (IS_UNSUPPORTED_24K_EVENT(raw_id, base_id)) | ||
1451 | return ERR_PTR(-EOPNOTSUPP); | ||
1452 | raw_event.event_id = base_id; | ||
1453 | if (IS_BOTH_COUNTERS_24K_EVENT(base_id)) | 1412 | if (IS_BOTH_COUNTERS_24K_EVENT(base_id)) |
1454 | raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; | 1413 | raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; |
1455 | else | 1414 | else |
@@ -1464,9 +1423,6 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config) | |||
1464 | #endif | 1423 | #endif |
1465 | break; | 1424 | break; |
1466 | case CPU_34K: | 1425 | case CPU_34K: |
1467 | if (IS_UNSUPPORTED_34K_EVENT(raw_id, base_id)) | ||
1468 | return ERR_PTR(-EOPNOTSUPP); | ||
1469 | raw_event.event_id = base_id; | ||
1470 | if (IS_BOTH_COUNTERS_34K_EVENT(base_id)) | 1426 | if (IS_BOTH_COUNTERS_34K_EVENT(base_id)) |
1471 | raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; | 1427 | raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; |
1472 | else | 1428 | else |
@@ -1482,9 +1438,6 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config) | |||
1482 | #endif | 1438 | #endif |
1483 | break; | 1439 | break; |
1484 | case CPU_74K: | 1440 | case CPU_74K: |
1485 | if (IS_UNSUPPORTED_74K_EVENT(raw_id, base_id)) | ||
1486 | return ERR_PTR(-EOPNOTSUPP); | ||
1487 | raw_event.event_id = base_id; | ||
1488 | if (IS_BOTH_COUNTERS_74K_EVENT(base_id)) | 1441 | if (IS_BOTH_COUNTERS_74K_EVENT(base_id)) |
1489 | raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; | 1442 | raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; |
1490 | else | 1443 | else |
@@ -1495,9 +1448,6 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config) | |||
1495 | #endif | 1448 | #endif |
1496 | break; | 1449 | break; |
1497 | case CPU_1004K: | 1450 | case CPU_1004K: |
1498 | if (IS_UNSUPPORTED_1004K_EVENT(raw_id, base_id)) | ||
1499 | return ERR_PTR(-EOPNOTSUPP); | ||
1500 | raw_event.event_id = base_id; | ||
1501 | if (IS_BOTH_COUNTERS_1004K_EVENT(base_id)) | 1451 | if (IS_BOTH_COUNTERS_1004K_EVENT(base_id)) |
1502 | raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; | 1452 | raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; |
1503 | else | 1453 | else |
diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c index 933166f44a6d..a9d801dec6b0 100644 --- a/arch/mips/kernel/rtlx.c +++ b/arch/mips/kernel/rtlx.c | |||
@@ -473,7 +473,6 @@ static const struct file_operations rtlx_fops = { | |||
473 | 473 | ||
474 | static struct irqaction rtlx_irq = { | 474 | static struct irqaction rtlx_irq = { |
475 | .handler = rtlx_interrupt, | 475 | .handler = rtlx_interrupt, |
476 | .flags = IRQF_DISABLED, | ||
477 | .name = "RTLX", | 476 | .name = "RTLX", |
478 | }; | 477 | }; |
479 | 478 | ||
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index b1cb8f87d7b4..058e964e7303 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c | |||
@@ -122,6 +122,9 @@ static void __init print_memory_map(void) | |||
122 | case BOOT_MEM_RAM: | 122 | case BOOT_MEM_RAM: |
123 | printk(KERN_CONT "(usable)\n"); | 123 | printk(KERN_CONT "(usable)\n"); |
124 | break; | 124 | break; |
125 | case BOOT_MEM_INIT_RAM: | ||
126 | printk(KERN_CONT "(usable after init)\n"); | ||
127 | break; | ||
125 | case BOOT_MEM_ROM_DATA: | 128 | case BOOT_MEM_ROM_DATA: |
126 | printk(KERN_CONT "(ROM data)\n"); | 129 | printk(KERN_CONT "(ROM data)\n"); |
127 | break; | 130 | break; |
@@ -362,15 +365,24 @@ static void __init bootmem_init(void) | |||
362 | for (i = 0; i < boot_mem_map.nr_map; i++) { | 365 | for (i = 0; i < boot_mem_map.nr_map; i++) { |
363 | unsigned long start, end, size; | 366 | unsigned long start, end, size; |
364 | 367 | ||
368 | start = PFN_UP(boot_mem_map.map[i].addr); | ||
369 | end = PFN_DOWN(boot_mem_map.map[i].addr | ||
370 | + boot_mem_map.map[i].size); | ||
371 | |||
365 | /* | 372 | /* |
366 | * Reserve usable memory. | 373 | * Reserve usable memory. |
367 | */ | 374 | */ |
368 | if (boot_mem_map.map[i].type != BOOT_MEM_RAM) | 375 | switch (boot_mem_map.map[i].type) { |
376 | case BOOT_MEM_RAM: | ||
377 | break; | ||
378 | case BOOT_MEM_INIT_RAM: | ||
379 | memory_present(0, start, end); | ||
369 | continue; | 380 | continue; |
381 | default: | ||
382 | /* Not usable memory */ | ||
383 | continue; | ||
384 | } | ||
370 | 385 | ||
371 | start = PFN_UP(boot_mem_map.map[i].addr); | ||
372 | end = PFN_DOWN(boot_mem_map.map[i].addr | ||
373 | + boot_mem_map.map[i].size); | ||
374 | /* | 386 | /* |
375 | * We are rounding up the start address of usable memory | 387 | * We are rounding up the start address of usable memory |
376 | * and at the end of the usable range downwards. | 388 | * and at the end of the usable range downwards. |
@@ -456,11 +468,33 @@ early_param("mem", early_parse_mem); | |||
456 | 468 | ||
457 | static void __init arch_mem_init(char **cmdline_p) | 469 | static void __init arch_mem_init(char **cmdline_p) |
458 | { | 470 | { |
471 | phys_t init_mem, init_end, init_size; | ||
472 | |||
459 | extern void plat_mem_setup(void); | 473 | extern void plat_mem_setup(void); |
460 | 474 | ||
461 | /* call board setup routine */ | 475 | /* call board setup routine */ |
462 | plat_mem_setup(); | 476 | plat_mem_setup(); |
463 | 477 | ||
478 | init_mem = PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT; | ||
479 | init_end = PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT; | ||
480 | init_size = init_end - init_mem; | ||
481 | if (init_size) { | ||
482 | /* Make sure it is in the boot_mem_map */ | ||
483 | int i, found; | ||
484 | found = 0; | ||
485 | for (i = 0; i < boot_mem_map.nr_map; i++) { | ||
486 | if (init_mem >= boot_mem_map.map[i].addr && | ||
487 | init_mem < (boot_mem_map.map[i].addr + | ||
488 | boot_mem_map.map[i].size)) { | ||
489 | found = 1; | ||
490 | break; | ||
491 | } | ||
492 | } | ||
493 | if (!found) | ||
494 | add_memory_region(init_mem, init_size, | ||
495 | BOOT_MEM_INIT_RAM); | ||
496 | } | ||
497 | |||
464 | pr_info("Determined physical RAM map:\n"); | 498 | pr_info("Determined physical RAM map:\n"); |
465 | print_memory_map(); | 499 | print_memory_map(); |
466 | 500 | ||
@@ -524,6 +558,7 @@ static void __init resource_init(void) | |||
524 | res = alloc_bootmem(sizeof(struct resource)); | 558 | res = alloc_bootmem(sizeof(struct resource)); |
525 | switch (boot_mem_map.map[i].type) { | 559 | switch (boot_mem_map.map[i].type) { |
526 | case BOOT_MEM_RAM: | 560 | case BOOT_MEM_RAM: |
561 | case BOOT_MEM_INIT_RAM: | ||
527 | case BOOT_MEM_ROM_DATA: | 562 | case BOOT_MEM_ROM_DATA: |
528 | res->name = "System RAM"; | 563 | res->name = "System RAM"; |
529 | break; | 564 | break; |
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c new file mode 100644 index 000000000000..58fe71afd879 --- /dev/null +++ b/arch/mips/kernel/smp-bmips.c | |||
@@ -0,0 +1,458 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2011 by Kevin Cernekee (cernekee@gmail.com) | ||
7 | * | ||
8 | * SMP support for BMIPS | ||
9 | */ | ||
10 | |||
11 | #include <linux/version.h> | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/sched.h> | ||
14 | #include <linux/mm.h> | ||
15 | #include <linux/delay.h> | ||
16 | #include <linux/smp.h> | ||
17 | #include <linux/interrupt.h> | ||
18 | #include <linux/spinlock.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/cpu.h> | ||
21 | #include <linux/cpumask.h> | ||
22 | #include <linux/reboot.h> | ||
23 | #include <linux/io.h> | ||
24 | #include <linux/compiler.h> | ||
25 | #include <linux/linkage.h> | ||
26 | #include <linux/bug.h> | ||
27 | #include <linux/kernel.h> | ||
28 | |||
29 | #include <asm/time.h> | ||
30 | #include <asm/pgtable.h> | ||
31 | #include <asm/processor.h> | ||
32 | #include <asm/system.h> | ||
33 | #include <asm/bootinfo.h> | ||
34 | #include <asm/pmon.h> | ||
35 | #include <asm/cacheflush.h> | ||
36 | #include <asm/tlbflush.h> | ||
37 | #include <asm/mipsregs.h> | ||
38 | #include <asm/bmips.h> | ||
39 | #include <asm/traps.h> | ||
40 | #include <asm/barrier.h> | ||
41 | |||
42 | static int __maybe_unused max_cpus = 1; | ||
43 | |||
44 | /* these may be configured by the platform code */ | ||
45 | int bmips_smp_enabled = 1; | ||
46 | int bmips_cpu_offset; | ||
47 | cpumask_t bmips_booted_mask; | ||
48 | |||
49 | #ifdef CONFIG_SMP | ||
50 | |||
51 | /* initial $sp, $gp - used by arch/mips/kernel/bmips_vec.S */ | ||
52 | unsigned long bmips_smp_boot_sp; | ||
53 | unsigned long bmips_smp_boot_gp; | ||
54 | |||
55 | static void bmips_send_ipi_single(int cpu, unsigned int action); | ||
56 | static irqreturn_t bmips_ipi_interrupt(int irq, void *dev_id); | ||
57 | |||
58 | /* SW interrupts 0,1 are used for interprocessor signaling */ | ||
59 | #define IPI0_IRQ (MIPS_CPU_IRQ_BASE + 0) | ||
60 | #define IPI1_IRQ (MIPS_CPU_IRQ_BASE + 1) | ||
61 | |||
62 | #define CPUNUM(cpu, shift) (((cpu) + bmips_cpu_offset) << (shift)) | ||
63 | #define ACTION_CLR_IPI(cpu, ipi) (0x2000 | CPUNUM(cpu, 9) | ((ipi) << 8)) | ||
64 | #define ACTION_SET_IPI(cpu, ipi) (0x3000 | CPUNUM(cpu, 9) | ((ipi) << 8)) | ||
65 | #define ACTION_BOOT_THREAD(cpu) (0x08 | CPUNUM(cpu, 0)) | ||
66 | |||
67 | static void __init bmips_smp_setup(void) | ||
68 | { | ||
69 | int i; | ||
70 | |||
71 | #if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380) | ||
72 | /* arbitration priority */ | ||
73 | clear_c0_brcm_cmt_ctrl(0x30); | ||
74 | |||
75 | /* NBK and weak order flags */ | ||
76 | set_c0_brcm_config_0(0x30000); | ||
77 | |||
78 | /* | ||
79 | * MIPS interrupts 0,1 (SW INT 0,1) cross over to the other thread | ||
80 | * MIPS interrupt 2 (HW INT 0) is the CPU0 L1 controller output | ||
81 | * MIPS interrupt 3 (HW INT 1) is the CPU1 L1 controller output | ||
82 | */ | ||
83 | change_c0_brcm_cmt_intr(0xf8018000, | ||
84 | (0x02 << 27) | (0x03 << 15)); | ||
85 | |||
86 | /* single core, 2 threads (2 pipelines) */ | ||
87 | max_cpus = 2; | ||
88 | #elif defined(CONFIG_CPU_BMIPS5000) | ||
89 | /* enable raceless SW interrupts */ | ||
90 | set_c0_brcm_config(0x03 << 22); | ||
91 | |||
92 | /* route HW interrupt 0 to CPU0, HW interrupt 1 to CPU1 */ | ||
93 | change_c0_brcm_mode(0x1f << 27, 0x02 << 27); | ||
94 | |||
95 | /* N cores, 2 threads per core */ | ||
96 | max_cpus = (((read_c0_brcm_config() >> 6) & 0x03) + 1) << 1; | ||
97 | |||
98 | /* clear any pending SW interrupts */ | ||
99 | for (i = 0; i < max_cpus; i++) { | ||
100 | write_c0_brcm_action(ACTION_CLR_IPI(i, 0)); | ||
101 | write_c0_brcm_action(ACTION_CLR_IPI(i, 1)); | ||
102 | } | ||
103 | #endif | ||
104 | |||
105 | if (!bmips_smp_enabled) | ||
106 | max_cpus = 1; | ||
107 | |||
108 | /* this can be overridden by the BSP */ | ||
109 | if (!board_ebase_setup) | ||
110 | board_ebase_setup = &bmips_ebase_setup; | ||
111 | |||
112 | for (i = 0; i < max_cpus; i++) { | ||
113 | __cpu_number_map[i] = 1; | ||
114 | __cpu_logical_map[i] = 1; | ||
115 | set_cpu_possible(i, 1); | ||
116 | set_cpu_present(i, 1); | ||
117 | } | ||
118 | } | ||
119 | |||
120 | /* | ||
121 | * IPI IRQ setup - runs on CPU0 | ||
122 | */ | ||
123 | static void bmips_prepare_cpus(unsigned int max_cpus) | ||
124 | { | ||
125 | if (request_irq(IPI0_IRQ, bmips_ipi_interrupt, IRQF_PERCPU, | ||
126 | "smp_ipi0", NULL)) | ||
127 | panic("Can't request IPI0 interrupt\n"); | ||
128 | if (request_irq(IPI1_IRQ, bmips_ipi_interrupt, IRQF_PERCPU, | ||
129 | "smp_ipi1", NULL)) | ||
130 | panic("Can't request IPI1 interrupt\n"); | ||
131 | } | ||
132 | |||
133 | /* | ||
134 | * Tell the hardware to boot CPUx - runs on CPU0 | ||
135 | */ | ||
136 | static void bmips_boot_secondary(int cpu, struct task_struct *idle) | ||
137 | { | ||
138 | bmips_smp_boot_sp = __KSTK_TOS(idle); | ||
139 | bmips_smp_boot_gp = (unsigned long)task_thread_info(idle); | ||
140 | mb(); | ||
141 | |||
142 | /* | ||
143 | * Initial boot sequence for secondary CPU: | ||
144 | * bmips_reset_nmi_vec @ a000_0000 -> | ||
145 | * bmips_smp_entry -> | ||
146 | * plat_wired_tlb_setup (cached function call; optional) -> | ||
147 | * start_secondary (cached jump) | ||
148 | * | ||
149 | * Warm restart sequence: | ||
150 | * play_dead WAIT loop -> | ||
151 | * bmips_smp_int_vec @ BMIPS_WARM_RESTART_VEC -> | ||
152 | * eret to play_dead -> | ||
153 | * bmips_secondary_reentry -> | ||
154 | * start_secondary | ||
155 | */ | ||
156 | |||
157 | pr_info("SMP: Booting CPU%d...\n", cpu); | ||
158 | |||
159 | if (cpumask_test_cpu(cpu, &bmips_booted_mask)) | ||
160 | bmips_send_ipi_single(cpu, 0); | ||
161 | else { | ||
162 | #if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380) | ||
163 | set_c0_brcm_cmt_ctrl(0x01); | ||
164 | #elif defined(CONFIG_CPU_BMIPS5000) | ||
165 | if (cpu & 0x01) | ||
166 | write_c0_brcm_action(ACTION_BOOT_THREAD(cpu)); | ||
167 | else { | ||
168 | /* | ||
169 | * core N thread 0 was already booted; just | ||
170 | * pulse the NMI line | ||
171 | */ | ||
172 | bmips_write_zscm_reg(0x210, 0xc0000000); | ||
173 | udelay(10); | ||
174 | bmips_write_zscm_reg(0x210, 0x00); | ||
175 | } | ||
176 | #endif | ||
177 | cpumask_set_cpu(cpu, &bmips_booted_mask); | ||
178 | } | ||
179 | } | ||
180 | |||
181 | /* | ||
182 | * Early setup - runs on secondary CPU after cache probe | ||
183 | */ | ||
184 | static void bmips_init_secondary(void) | ||
185 | { | ||
186 | /* move NMI vector to kseg0, in case XKS01 is enabled */ | ||
187 | |||
188 | #if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380) | ||
189 | void __iomem *cbr = BMIPS_GET_CBR(); | ||
190 | unsigned long old_vec; | ||
191 | |||
192 | old_vec = __raw_readl(cbr + BMIPS_RELO_VECTOR_CONTROL_1); | ||
193 | __raw_writel(old_vec & ~0x20000000, cbr + BMIPS_RELO_VECTOR_CONTROL_1); | ||
194 | |||
195 | clear_c0_cause(smp_processor_id() ? C_SW1 : C_SW0); | ||
196 | #elif defined(CONFIG_CPU_BMIPS5000) | ||
197 | write_c0_brcm_bootvec(read_c0_brcm_bootvec() & | ||
198 | (smp_processor_id() & 0x01 ? ~0x20000000 : ~0x2000)); | ||
199 | |||
200 | write_c0_brcm_action(ACTION_CLR_IPI(smp_processor_id(), 0)); | ||
201 | #endif | ||
202 | |||
203 | /* make sure there won't be a timer interrupt for a little while */ | ||
204 | write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ); | ||
205 | |||
206 | irq_enable_hazard(); | ||
207 | set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ1 | IE_IRQ5 | ST0_IE); | ||
208 | irq_enable_hazard(); | ||
209 | } | ||
210 | |||
211 | /* | ||
212 | * Late setup - runs on secondary CPU before entering the idle loop | ||
213 | */ | ||
214 | static void bmips_smp_finish(void) | ||
215 | { | ||
216 | pr_info("SMP: CPU%d is running\n", smp_processor_id()); | ||
217 | } | ||
218 | |||
219 | /* | ||
220 | * Runs on CPU0 after all CPUs have been booted | ||
221 | */ | ||
222 | static void bmips_cpus_done(void) | ||
223 | { | ||
224 | } | ||
225 | |||
226 | #if defined(CONFIG_CPU_BMIPS5000) | ||
227 | |||
228 | /* | ||
229 | * BMIPS5000 raceless IPIs | ||
230 | * | ||
231 | * Each CPU has two inbound SW IRQs which are independent of all other CPUs. | ||
232 | * IPI0 is used for SMP_RESCHEDULE_YOURSELF | ||
233 | * IPI1 is used for SMP_CALL_FUNCTION | ||
234 | */ | ||
235 | |||
236 | static void bmips_send_ipi_single(int cpu, unsigned int action) | ||
237 | { | ||
238 | write_c0_brcm_action(ACTION_SET_IPI(cpu, action == SMP_CALL_FUNCTION)); | ||
239 | } | ||
240 | |||
241 | static irqreturn_t bmips_ipi_interrupt(int irq, void *dev_id) | ||
242 | { | ||
243 | int action = irq - IPI0_IRQ; | ||
244 | |||
245 | write_c0_brcm_action(ACTION_CLR_IPI(smp_processor_id(), action)); | ||
246 | |||
247 | if (action == 0) | ||
248 | scheduler_ipi(); | ||
249 | else | ||
250 | smp_call_function_interrupt(); | ||
251 | |||
252 | return IRQ_HANDLED; | ||
253 | } | ||
254 | |||
255 | #else | ||
256 | |||
257 | /* | ||
258 | * BMIPS43xx racey IPIs | ||
259 | * | ||
260 | * We use one inbound SW IRQ for each CPU. | ||
261 | * | ||
262 | * A spinlock must be held in order to keep CPUx from accidentally clearing | ||
263 | * an incoming IPI when it writes CP0 CAUSE to raise an IPI on CPUy. The | ||
264 | * same spinlock is used to protect the action masks. | ||
265 | */ | ||
266 | |||
267 | static DEFINE_SPINLOCK(ipi_lock); | ||
268 | static DEFINE_PER_CPU(int, ipi_action_mask); | ||
269 | |||
270 | static void bmips_send_ipi_single(int cpu, unsigned int action) | ||
271 | { | ||
272 | unsigned long flags; | ||
273 | |||
274 | spin_lock_irqsave(&ipi_lock, flags); | ||
275 | set_c0_cause(cpu ? C_SW1 : C_SW0); | ||
276 | per_cpu(ipi_action_mask, cpu) |= action; | ||
277 | irq_enable_hazard(); | ||
278 | spin_unlock_irqrestore(&ipi_lock, flags); | ||
279 | } | ||
280 | |||
281 | static irqreturn_t bmips_ipi_interrupt(int irq, void *dev_id) | ||
282 | { | ||
283 | unsigned long flags; | ||
284 | int action, cpu = irq - IPI0_IRQ; | ||
285 | |||
286 | spin_lock_irqsave(&ipi_lock, flags); | ||
287 | action = __get_cpu_var(ipi_action_mask); | ||
288 | per_cpu(ipi_action_mask, cpu) = 0; | ||
289 | clear_c0_cause(cpu ? C_SW1 : C_SW0); | ||
290 | spin_unlock_irqrestore(&ipi_lock, flags); | ||
291 | |||
292 | if (action & SMP_RESCHEDULE_YOURSELF) | ||
293 | scheduler_ipi(); | ||
294 | if (action & SMP_CALL_FUNCTION) | ||
295 | smp_call_function_interrupt(); | ||
296 | |||
297 | return IRQ_HANDLED; | ||
298 | } | ||
299 | |||
300 | #endif /* BMIPS type */ | ||
301 | |||
302 | static void bmips_send_ipi_mask(const struct cpumask *mask, | ||
303 | unsigned int action) | ||
304 | { | ||
305 | unsigned int i; | ||
306 | |||
307 | for_each_cpu(i, mask) | ||
308 | bmips_send_ipi_single(i, action); | ||
309 | } | ||
310 | |||
311 | #ifdef CONFIG_HOTPLUG_CPU | ||
312 | |||
313 | static int bmips_cpu_disable(void) | ||
314 | { | ||
315 | unsigned int cpu = smp_processor_id(); | ||
316 | |||
317 | if (cpu == 0) | ||
318 | return -EBUSY; | ||
319 | |||
320 | pr_info("SMP: CPU%d is offline\n", cpu); | ||
321 | |||
322 | cpu_clear(cpu, cpu_online_map); | ||
323 | cpu_clear(cpu, cpu_callin_map); | ||
324 | |||
325 | local_flush_tlb_all(); | ||
326 | local_flush_icache_range(0, ~0); | ||
327 | |||
328 | return 0; | ||
329 | } | ||
330 | |||
331 | static void bmips_cpu_die(unsigned int cpu) | ||
332 | { | ||
333 | } | ||
334 | |||
335 | void __ref play_dead(void) | ||
336 | { | ||
337 | idle_task_exit(); | ||
338 | |||
339 | /* flush data cache */ | ||
340 | _dma_cache_wback_inv(0, ~0); | ||
341 | |||
342 | /* | ||
343 | * Wakeup is on SW0 or SW1; disable everything else | ||
344 | * Use BEV !IV (BMIPS_WARM_RESTART_VEC) to avoid the regular Linux | ||
345 | * IRQ handlers; this clears ST0_IE and returns immediately. | ||
346 | */ | ||
347 | clear_c0_cause(CAUSEF_IV | C_SW0 | C_SW1); | ||
348 | change_c0_status(IE_IRQ5 | IE_IRQ1 | IE_SW0 | IE_SW1 | ST0_IE | ST0_BEV, | ||
349 | IE_SW0 | IE_SW1 | ST0_IE | ST0_BEV); | ||
350 | irq_disable_hazard(); | ||
351 | |||
352 | /* | ||
353 | * wait for SW interrupt from bmips_boot_secondary(), then jump | ||
354 | * back to start_secondary() | ||
355 | */ | ||
356 | __asm__ __volatile__( | ||
357 | " wait\n" | ||
358 | " j bmips_secondary_reentry\n" | ||
359 | : : : "memory"); | ||
360 | } | ||
361 | |||
362 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
363 | |||
364 | struct plat_smp_ops bmips_smp_ops = { | ||
365 | .smp_setup = bmips_smp_setup, | ||
366 | .prepare_cpus = bmips_prepare_cpus, | ||
367 | .boot_secondary = bmips_boot_secondary, | ||
368 | .smp_finish = bmips_smp_finish, | ||
369 | .init_secondary = bmips_init_secondary, | ||
370 | .cpus_done = bmips_cpus_done, | ||
371 | .send_ipi_single = bmips_send_ipi_single, | ||
372 | .send_ipi_mask = bmips_send_ipi_mask, | ||
373 | #ifdef CONFIG_HOTPLUG_CPU | ||
374 | .cpu_disable = bmips_cpu_disable, | ||
375 | .cpu_die = bmips_cpu_die, | ||
376 | #endif | ||
377 | }; | ||
378 | |||
379 | #endif /* CONFIG_SMP */ | ||
380 | |||
381 | /*********************************************************************** | ||
382 | * BMIPS vector relocation | ||
383 | * This is primarily used for SMP boot, but it is applicable to some | ||
384 | * UP BMIPS systems as well. | ||
385 | ***********************************************************************/ | ||
386 | |||
387 | static void __cpuinit bmips_wr_vec(unsigned long dst, char *start, char *end) | ||
388 | { | ||
389 | memcpy((void *)dst, start, end - start); | ||
390 | dma_cache_wback((unsigned long)start, end - start); | ||
391 | local_flush_icache_range(dst, dst + (end - start)); | ||
392 | instruction_hazard(); | ||
393 | } | ||
394 | |||
395 | static inline void __cpuinit bmips_nmi_handler_setup(void) | ||
396 | { | ||
397 | bmips_wr_vec(BMIPS_NMI_RESET_VEC, &bmips_reset_nmi_vec, | ||
398 | &bmips_reset_nmi_vec_end); | ||
399 | bmips_wr_vec(BMIPS_WARM_RESTART_VEC, &bmips_smp_int_vec, | ||
400 | &bmips_smp_int_vec_end); | ||
401 | } | ||
402 | |||
403 | void __cpuinit bmips_ebase_setup(void) | ||
404 | { | ||
405 | unsigned long new_ebase = ebase; | ||
406 | void __iomem __maybe_unused *cbr; | ||
407 | |||
408 | BUG_ON(ebase != CKSEG0); | ||
409 | |||
410 | #if defined(CONFIG_CPU_BMIPS4350) | ||
411 | /* | ||
412 | * BMIPS4350 cannot relocate the normal vectors, but it | ||
413 | * can relocate the BEV=1 vectors. So CPU1 starts up at | ||
414 | * the relocated BEV=1, IV=0 general exception vector @ | ||
415 | * 0xa000_0380. | ||
416 | * | ||
417 | * set_uncached_handler() is used here because: | ||
418 | * - CPU1 will run this from uncached space | ||
419 | * - None of the cacheflush functions are set up yet | ||
420 | */ | ||
421 | set_uncached_handler(BMIPS_WARM_RESTART_VEC - CKSEG0, | ||
422 | &bmips_smp_int_vec, 0x80); | ||
423 | __sync(); | ||
424 | return; | ||
425 | #elif defined(CONFIG_CPU_BMIPS4380) | ||
426 | /* | ||
427 | * 0x8000_0000: reset/NMI (initially in kseg1) | ||
428 | * 0x8000_0400: normal vectors | ||
429 | */ | ||
430 | new_ebase = 0x80000400; | ||
431 | cbr = BMIPS_GET_CBR(); | ||
432 | __raw_writel(0x80080800, cbr + BMIPS_RELO_VECTOR_CONTROL_0); | ||
433 | __raw_writel(0xa0080800, cbr + BMIPS_RELO_VECTOR_CONTROL_1); | ||
434 | #elif defined(CONFIG_CPU_BMIPS5000) | ||
435 | /* | ||
436 | * 0x8000_0000: reset/NMI (initially in kseg1) | ||
437 | * 0x8000_1000: normal vectors | ||
438 | */ | ||
439 | new_ebase = 0x80001000; | ||
440 | write_c0_brcm_bootvec(0xa0088008); | ||
441 | write_c0_ebase(new_ebase); | ||
442 | if (max_cpus > 2) | ||
443 | bmips_write_zscm_reg(0xa0, 0xa008a008); | ||
444 | #else | ||
445 | return; | ||
446 | #endif | ||
447 | board_nmi_handler_setup = &bmips_nmi_handler_setup; | ||
448 | ebase = new_ebase; | ||
449 | } | ||
450 | |||
451 | asmlinkage void __weak plat_wired_tlb_setup(void) | ||
452 | { | ||
453 | /* | ||
454 | * Called when starting/restarting a secondary CPU. | ||
455 | * Kernel stacks and other important data might only be accessible | ||
456 | * once the wired entries are present. | ||
457 | */ | ||
458 | } | ||
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index f0895e70e283..0a42ff3ff6a1 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c | |||
@@ -559,7 +559,7 @@ void smtc_prepare_cpus(int cpus) | |||
559 | 559 | ||
560 | pipi = kmalloc(nipi *sizeof(struct smtc_ipi), GFP_KERNEL); | 560 | pipi = kmalloc(nipi *sizeof(struct smtc_ipi), GFP_KERNEL); |
561 | if (pipi == NULL) | 561 | if (pipi == NULL) |
562 | panic("kmalloc of IPI message buffers failed\n"); | 562 | panic("kmalloc of IPI message buffers failed"); |
563 | else | 563 | else |
564 | printk("IPI buffer pool of %d buffers\n", nipi); | 564 | printk("IPI buffer pool of %d buffers\n", nipi); |
565 | for (i = 0; i < nipi; i++) { | 565 | for (i = 0; i < nipi; i++) { |
@@ -813,7 +813,7 @@ void smtc_send_ipi(int cpu, int type, unsigned int action) | |||
813 | if (pipi == NULL) { | 813 | if (pipi == NULL) { |
814 | bust_spinlocks(1); | 814 | bust_spinlocks(1); |
815 | mips_mt_regdump(dvpe()); | 815 | mips_mt_regdump(dvpe()); |
816 | panic("IPI Msg. Buffers Depleted\n"); | 816 | panic("IPI Msg. Buffers Depleted"); |
817 | } | 817 | } |
818 | pipi->type = type; | 818 | pipi->type = type; |
819 | pipi->arg = (void *)action; | 819 | pipi->arg = (void *)action; |
@@ -1130,7 +1130,7 @@ static void ipi_irq_dispatch(void) | |||
1130 | 1130 | ||
1131 | static struct irqaction irq_ipi = { | 1131 | static struct irqaction irq_ipi = { |
1132 | .handler = ipi_interrupt, | 1132 | .handler = ipi_interrupt, |
1133 | .flags = IRQF_DISABLED | IRQF_PERCPU, | 1133 | .flags = IRQF_PERCPU, |
1134 | .name = "SMTC_IPI" | 1134 | .name = "SMTC_IPI" |
1135 | }; | 1135 | }; |
1136 | 1136 | ||
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index bbddb86c1fa1..cc4a3f120f54 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c | |||
@@ -91,6 +91,7 @@ int (*board_be_handler)(struct pt_regs *regs, int is_fixup); | |||
91 | void (*board_nmi_handler_setup)(void); | 91 | void (*board_nmi_handler_setup)(void); |
92 | void (*board_ejtag_handler_setup)(void); | 92 | void (*board_ejtag_handler_setup)(void); |
93 | void (*board_bind_eic_interrupt)(int irq, int regset); | 93 | void (*board_bind_eic_interrupt)(int irq, int regset); |
94 | void (*board_ebase_setup)(void); | ||
94 | 95 | ||
95 | 96 | ||
96 | static void show_raw_backtrace(unsigned long reg29) | 97 | static void show_raw_backtrace(unsigned long reg29) |
@@ -400,7 +401,7 @@ void __noreturn die(const char *str, struct pt_regs *regs) | |||
400 | panic("Fatal exception in interrupt"); | 401 | panic("Fatal exception in interrupt"); |
401 | 402 | ||
402 | if (panic_on_oops) { | 403 | if (panic_on_oops) { |
403 | printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n"); | 404 | printk(KERN_EMERG "Fatal exception: panic in 5 seconds"); |
404 | ssleep(5); | 405 | ssleep(5); |
405 | panic("Fatal exception"); | 406 | panic("Fatal exception"); |
406 | } | 407 | } |
@@ -1150,7 +1151,7 @@ asmlinkage void do_mt(struct pt_regs *regs) | |||
1150 | asmlinkage void do_dsp(struct pt_regs *regs) | 1151 | asmlinkage void do_dsp(struct pt_regs *regs) |
1151 | { | 1152 | { |
1152 | if (cpu_has_dsp) | 1153 | if (cpu_has_dsp) |
1153 | panic("Unexpected DSP exception\n"); | 1154 | panic("Unexpected DSP exception"); |
1154 | 1155 | ||
1155 | force_sig(SIGILL, current); | 1156 | force_sig(SIGILL, current); |
1156 | } | 1157 | } |
@@ -1339,9 +1340,18 @@ void ejtag_exception_handler(struct pt_regs *regs) | |||
1339 | 1340 | ||
1340 | /* | 1341 | /* |
1341 | * NMI exception handler. | 1342 | * NMI exception handler. |
1343 | * No lock; only written during early bootup by CPU 0. | ||
1342 | */ | 1344 | */ |
1345 | static RAW_NOTIFIER_HEAD(nmi_chain); | ||
1346 | |||
1347 | int register_nmi_notifier(struct notifier_block *nb) | ||
1348 | { | ||
1349 | return raw_notifier_chain_register(&nmi_chain, nb); | ||
1350 | } | ||
1351 | |||
1343 | void __noreturn nmi_exception_handler(struct pt_regs *regs) | 1352 | void __noreturn nmi_exception_handler(struct pt_regs *regs) |
1344 | { | 1353 | { |
1354 | raw_notifier_call_chain(&nmi_chain, 0, regs); | ||
1345 | bust_spinlocks(1); | 1355 | bust_spinlocks(1); |
1346 | printk("NMI taken!!!!\n"); | 1356 | printk("NMI taken!!!!\n"); |
1347 | die("NMI", regs); | 1357 | die("NMI", regs); |
@@ -1682,6 +1692,8 @@ void __init trap_init(void) | |||
1682 | ebase += (read_c0_ebase() & 0x3ffff000); | 1692 | ebase += (read_c0_ebase() & 0x3ffff000); |
1683 | } | 1693 | } |
1684 | 1694 | ||
1695 | if (board_ebase_setup) | ||
1696 | board_ebase_setup(); | ||
1685 | per_cpu_trap_init(); | 1697 | per_cpu_trap_init(); |
1686 | 1698 | ||
1687 | /* | 1699 | /* |