diff options
author | Paul Mackerras <paulus@samba.org> | 2005-10-30 21:37:12 -0500 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2005-10-30 21:37:12 -0500 |
commit | 23fd07750a789a66fe88cf173d52a18f1a387da4 (patch) | |
tree | 06fdd6df35fdb835abdaa9b754d62f6b84b97250 /arch/mips/kernel | |
parent | bd787d438a59266af3c9f6351644c85ef1dd21fe (diff) | |
parent | ed28f96ac1960f30f818374d65be71d2fdf811b0 (diff) |
Merge ../linux-2.6 by hand
Diffstat (limited to 'arch/mips/kernel')
53 files changed, 4968 insertions, 2069 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index d3303584fbd1..72f2126ad19d 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile | |||
@@ -11,11 +11,7 @@ obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \ | |||
11 | binfmt_irix-objs := irixelf.o irixinv.o irixioctl.o irixsig.o \ | 11 | binfmt_irix-objs := irixelf.o irixinv.o irixioctl.o irixsig.o \ |
12 | irix5sys.o sysirix.o | 12 | irix5sys.o sysirix.o |
13 | 13 | ||
14 | ifdef CONFIG_MODULES | 14 | obj-$(CONFIG_MODULES) += mips_ksyms.o module.o |
15 | obj-y += mips_ksyms.o module.o | ||
16 | obj-$(CONFIG_32BIT) += module-elf32.o | ||
17 | obj-$(CONFIG_64BIT) += module-elf64.o | ||
18 | endif | ||
19 | 15 | ||
20 | obj-$(CONFIG_CPU_R3000) += r2300_fpu.o r2300_switch.o | 16 | obj-$(CONFIG_CPU_R3000) += r2300_fpu.o r2300_switch.o |
21 | obj-$(CONFIG_CPU_TX39XX) += r2300_fpu.o r2300_switch.o | 17 | obj-$(CONFIG_CPU_TX39XX) += r2300_fpu.o r2300_switch.o |
@@ -38,12 +34,18 @@ obj-$(CONFIG_CPU_R6000) += r6000_fpu.o r4k_switch.o | |||
38 | 34 | ||
39 | obj-$(CONFIG_SMP) += smp.o | 35 | obj-$(CONFIG_SMP) += smp.o |
40 | 36 | ||
37 | obj-$(CONFIG_MIPS_MT_SMP) += smp_mt.o | ||
38 | |||
39 | obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o | ||
40 | obj-$(CONFIG_MIPS_VPE_APSP_API) += rtlx.o | ||
41 | |||
41 | obj-$(CONFIG_NO_ISA) += dma-no-isa.o | 42 | obj-$(CONFIG_NO_ISA) += dma-no-isa.o |
42 | obj-$(CONFIG_I8259) += i8259.o | 43 | obj-$(CONFIG_I8259) += i8259.o |
43 | obj-$(CONFIG_IRQ_CPU) += irq_cpu.o | 44 | obj-$(CONFIG_IRQ_CPU) += irq_cpu.o |
44 | obj-$(CONFIG_IRQ_CPU_RM7K) += irq-rm7000.o | 45 | obj-$(CONFIG_IRQ_CPU_RM7K) += irq-rm7000.o |
45 | obj-$(CONFIG_IRQ_CPU_RM9K) += irq-rm9000.o | 46 | obj-$(CONFIG_IRQ_CPU_RM9K) += irq-rm9000.o |
46 | obj-$(CONFIG_IRQ_MV64340) += irq-mv6434x.o | 47 | obj-$(CONFIG_IRQ_MV64340) += irq-mv6434x.o |
48 | obj-$(CONFIG_MIPS_BOARDS_GEN) += irq-msc01.o | ||
47 | 49 | ||
48 | obj-$(CONFIG_32BIT) += scall32-o32.o | 50 | obj-$(CONFIG_32BIT) += scall32-o32.o |
49 | obj-$(CONFIG_64BIT) += scall64-64.o | 51 | obj-$(CONFIG_64BIT) += scall64-64.o |
@@ -57,8 +59,6 @@ obj-$(CONFIG_PROC_FS) += proc.o | |||
57 | 59 | ||
58 | obj-$(CONFIG_64BIT) += cpu-bugs64.o | 60 | obj-$(CONFIG_64BIT) += cpu-bugs64.o |
59 | 61 | ||
60 | obj-$(CONFIG_GEN_RTC) += genrtc.o | ||
61 | |||
62 | CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(CFLAGS) -Wa,-mdaddi -c -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi) | 62 | CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(CFLAGS) -Wa,-mdaddi -c -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi) |
63 | CFLAGS_ioctl32.o += -Ifs/ | 63 | CFLAGS_ioctl32.o += -Ifs/ |
64 | 64 | ||
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index 2c11abb5a406..ca6b03c773be 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c | |||
@@ -95,6 +95,7 @@ void output_thread_info_defines(void) | |||
95 | offset("#define TI_PRE_COUNT ", struct thread_info, preempt_count); | 95 | offset("#define TI_PRE_COUNT ", struct thread_info, preempt_count); |
96 | offset("#define TI_ADDR_LIMIT ", struct thread_info, addr_limit); | 96 | offset("#define TI_ADDR_LIMIT ", struct thread_info, addr_limit); |
97 | offset("#define TI_RESTART_BLOCK ", struct thread_info, restart_block); | 97 | offset("#define TI_RESTART_BLOCK ", struct thread_info, restart_block); |
98 | offset("#define TI_TP_VALUE ", struct thread_info, tp_value); | ||
98 | constant("#define _THREAD_SIZE_ORDER ", THREAD_SIZE_ORDER); | 99 | constant("#define _THREAD_SIZE_ORDER ", THREAD_SIZE_ORDER); |
99 | constant("#define _THREAD_SIZE ", THREAD_SIZE); | 100 | constant("#define _THREAD_SIZE ", THREAD_SIZE); |
100 | constant("#define _THREAD_MASK ", THREAD_MASK); | 101 | constant("#define _THREAD_MASK ", THREAD_MASK); |
@@ -240,6 +241,7 @@ void output_mm_defines(void) | |||
240 | linefeed; | 241 | linefeed; |
241 | } | 242 | } |
242 | 243 | ||
244 | #ifdef CONFIG_32BIT | ||
243 | void output_sc_defines(void) | 245 | void output_sc_defines(void) |
244 | { | 246 | { |
245 | text("/* Linux sigcontext offsets. */"); | 247 | text("/* Linux sigcontext offsets. */"); |
@@ -251,10 +253,29 @@ void output_sc_defines(void) | |||
251 | offset("#define SC_STATUS ", struct sigcontext, sc_status); | 253 | offset("#define SC_STATUS ", struct sigcontext, sc_status); |
252 | offset("#define SC_FPC_CSR ", struct sigcontext, sc_fpc_csr); | 254 | offset("#define SC_FPC_CSR ", struct sigcontext, sc_fpc_csr); |
253 | offset("#define SC_FPC_EIR ", struct sigcontext, sc_fpc_eir); | 255 | offset("#define SC_FPC_EIR ", struct sigcontext, sc_fpc_eir); |
254 | offset("#define SC_CAUSE ", struct sigcontext, sc_cause); | 256 | offset("#define SC_HI1 ", struct sigcontext, sc_hi1); |
255 | offset("#define SC_BADVADDR ", struct sigcontext, sc_badvaddr); | 257 | offset("#define SC_LO1 ", struct sigcontext, sc_lo1); |
258 | offset("#define SC_HI2 ", struct sigcontext, sc_hi2); | ||
259 | offset("#define SC_LO2 ", struct sigcontext, sc_lo2); | ||
260 | offset("#define SC_HI3 ", struct sigcontext, sc_hi3); | ||
261 | offset("#define SC_LO3 ", struct sigcontext, sc_lo3); | ||
256 | linefeed; | 262 | linefeed; |
257 | } | 263 | } |
264 | #endif | ||
265 | |||
266 | #ifdef CONFIG_64BIT | ||
267 | void output_sc_defines(void) | ||
268 | { | ||
269 | text("/* Linux sigcontext offsets. */"); | ||
270 | offset("#define SC_REGS ", struct sigcontext, sc_regs); | ||
271 | offset("#define SC_FPREGS ", struct sigcontext, sc_fpregs); | ||
272 | offset("#define SC_MDHI ", struct sigcontext, sc_hi); | ||
273 | offset("#define SC_MDLO ", struct sigcontext, sc_lo); | ||
274 | offset("#define SC_PC ", struct sigcontext, sc_pc); | ||
275 | offset("#define SC_FPC_CSR ", struct sigcontext, sc_fpc_csr); | ||
276 | linefeed; | ||
277 | } | ||
278 | #endif | ||
258 | 279 | ||
259 | #ifdef CONFIG_MIPS32_COMPAT | 280 | #ifdef CONFIG_MIPS32_COMPAT |
260 | void output_sc32_defines(void) | 281 | void output_sc32_defines(void) |
diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c index 6b645fbb1ddc..d8e2674a1543 100644 --- a/arch/mips/kernel/binfmt_elfn32.c +++ b/arch/mips/kernel/binfmt_elfn32.c | |||
@@ -52,7 +52,6 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; | |||
52 | 52 | ||
53 | #include <asm/processor.h> | 53 | #include <asm/processor.h> |
54 | #include <linux/module.h> | 54 | #include <linux/module.h> |
55 | #include <linux/config.h> | ||
56 | #include <linux/elfcore.h> | 55 | #include <linux/elfcore.h> |
57 | #include <linux/compat.h> | 56 | #include <linux/compat.h> |
58 | 57 | ||
@@ -116,4 +115,7 @@ MODULE_AUTHOR("Ralf Baechle (ralf@linux-mips.org)"); | |||
116 | #undef MODULE_DESCRIPTION | 115 | #undef MODULE_DESCRIPTION |
117 | #undef MODULE_AUTHOR | 116 | #undef MODULE_AUTHOR |
118 | 117 | ||
118 | #undef TASK_SIZE | ||
119 | #define TASK_SIZE TASK_SIZE32 | ||
120 | |||
119 | #include "../../../fs/binfmt_elf.c" | 121 | #include "../../../fs/binfmt_elf.c" |
diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c index b4075e99c452..cec5f327e360 100644 --- a/arch/mips/kernel/binfmt_elfo32.c +++ b/arch/mips/kernel/binfmt_elfo32.c | |||
@@ -54,7 +54,6 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; | |||
54 | 54 | ||
55 | #include <asm/processor.h> | 55 | #include <asm/processor.h> |
56 | #include <linux/module.h> | 56 | #include <linux/module.h> |
57 | #include <linux/config.h> | ||
58 | #include <linux/elfcore.h> | 57 | #include <linux/elfcore.h> |
59 | #include <linux/compat.h> | 58 | #include <linux/compat.h> |
60 | 59 | ||
@@ -98,7 +97,7 @@ struct elf_prpsinfo32 | |||
98 | #define init_elf_binfmt init_elf32_binfmt | 97 | #define init_elf_binfmt init_elf32_binfmt |
99 | 98 | ||
100 | #define jiffies_to_timeval jiffies_to_compat_timeval | 99 | #define jiffies_to_timeval jiffies_to_compat_timeval |
101 | static __inline__ void | 100 | static inline void |
102 | jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value) | 101 | jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value) |
103 | { | 102 | { |
104 | /* | 103 | /* |
@@ -113,21 +112,26 @@ jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value) | |||
113 | #undef ELF_CORE_COPY_REGS | 112 | #undef ELF_CORE_COPY_REGS |
114 | #define ELF_CORE_COPY_REGS(_dest,_regs) elf32_core_copy_regs(_dest,_regs); | 113 | #define ELF_CORE_COPY_REGS(_dest,_regs) elf32_core_copy_regs(_dest,_regs); |
115 | 114 | ||
116 | void elf32_core_copy_regs(elf_gregset_t _dest, struct pt_regs *_regs) | 115 | void elf32_core_copy_regs(elf_gregset_t grp, struct pt_regs *regs) |
117 | { | 116 | { |
118 | int i; | 117 | int i; |
119 | 118 | ||
120 | memset(_dest, 0, sizeof(elf_gregset_t)); | 119 | for (i = 0; i < EF_R0; i++) |
121 | 120 | grp[i] = 0; | |
122 | /* XXXKW the 6 is from EF_REG0 in gdb/gdb/mips-linux-tdep.c, include/asm-mips/reg.h */ | 121 | grp[EF_R0] = 0; |
123 | for (i=6; i<38; i++) | 122 | for (i = 1; i <= 31; i++) |
124 | _dest[i] = (elf_greg_t) _regs->regs[i-6]; | 123 | grp[EF_R0 + i] = (elf_greg_t) regs->regs[i]; |
125 | _dest[i++] = (elf_greg_t) _regs->lo; | 124 | grp[EF_R26] = 0; |
126 | _dest[i++] = (elf_greg_t) _regs->hi; | 125 | grp[EF_R27] = 0; |
127 | _dest[i++] = (elf_greg_t) _regs->cp0_epc; | 126 | grp[EF_LO] = (elf_greg_t) regs->lo; |
128 | _dest[i++] = (elf_greg_t) _regs->cp0_badvaddr; | 127 | grp[EF_HI] = (elf_greg_t) regs->hi; |
129 | _dest[i++] = (elf_greg_t) _regs->cp0_status; | 128 | grp[EF_CP0_EPC] = (elf_greg_t) regs->cp0_epc; |
130 | _dest[i++] = (elf_greg_t) _regs->cp0_cause; | 129 | grp[EF_CP0_BADVADDR] = (elf_greg_t) regs->cp0_badvaddr; |
130 | grp[EF_CP0_STATUS] = (elf_greg_t) regs->cp0_status; | ||
131 | grp[EF_CP0_CAUSE] = (elf_greg_t) regs->cp0_cause; | ||
132 | #ifdef EF_UNUSED0 | ||
133 | grp[EF_UNUSED0] = 0; | ||
134 | #endif | ||
131 | } | 135 | } |
132 | 136 | ||
133 | MODULE_DESCRIPTION("Binary format loader for compatibility with o32 Linux/MIPS binaries"); | 137 | MODULE_DESCRIPTION("Binary format loader for compatibility with o32 Linux/MIPS binaries"); |
@@ -136,4 +140,7 @@ MODULE_AUTHOR("Ralf Baechle (ralf@linux-mips.org)"); | |||
136 | #undef MODULE_DESCRIPTION | 140 | #undef MODULE_DESCRIPTION |
137 | #undef MODULE_AUTHOR | 141 | #undef MODULE_AUTHOR |
138 | 142 | ||
143 | #undef TASK_SIZE | ||
144 | #define TASK_SIZE TASK_SIZE32 | ||
145 | |||
139 | #include "../../../fs/binfmt_elf.c" | 146 | #include "../../../fs/binfmt_elf.c" |
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c index 01117e977a7f..374de839558d 100644 --- a/arch/mips/kernel/branch.c +++ b/arch/mips/kernel/branch.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <asm/branch.h> | 12 | #include <asm/branch.h> |
13 | #include <asm/cpu.h> | 13 | #include <asm/cpu.h> |
14 | #include <asm/cpu-features.h> | 14 | #include <asm/cpu-features.h> |
15 | #include <asm/fpu.h> | ||
15 | #include <asm/inst.h> | 16 | #include <asm/inst.h> |
16 | #include <asm/ptrace.h> | 17 | #include <asm/ptrace.h> |
17 | #include <asm/uaccess.h> | 18 | #include <asm/uaccess.h> |
@@ -21,7 +22,7 @@ | |||
21 | */ | 22 | */ |
22 | int __compute_return_epc(struct pt_regs *regs) | 23 | int __compute_return_epc(struct pt_regs *regs) |
23 | { | 24 | { |
24 | unsigned int *addr, bit, fcr31; | 25 | unsigned int *addr, bit, fcr31, dspcontrol; |
25 | long epc; | 26 | long epc; |
26 | union mips_instruction insn; | 27 | union mips_instruction insn; |
27 | 28 | ||
@@ -98,6 +99,18 @@ int __compute_return_epc(struct pt_regs *regs) | |||
98 | epc += 8; | 99 | epc += 8; |
99 | regs->cp0_epc = epc; | 100 | regs->cp0_epc = epc; |
100 | break; | 101 | break; |
102 | case bposge32_op: | ||
103 | if (!cpu_has_dsp) | ||
104 | goto sigill; | ||
105 | |||
106 | dspcontrol = rddsp(0x01); | ||
107 | |||
108 | if (dspcontrol >= 32) { | ||
109 | epc = epc + 4 + (insn.i_format.simmediate << 2); | ||
110 | } else | ||
111 | epc += 8; | ||
112 | regs->cp0_epc = epc; | ||
113 | break; | ||
101 | } | 114 | } |
102 | break; | 115 | break; |
103 | 116 | ||
@@ -161,10 +174,13 @@ int __compute_return_epc(struct pt_regs *regs) | |||
161 | * And now the FPA/cp1 branch instructions. | 174 | * And now the FPA/cp1 branch instructions. |
162 | */ | 175 | */ |
163 | case cop1_op: | 176 | case cop1_op: |
164 | if (!cpu_has_fpu) | 177 | preempt_disable(); |
165 | fcr31 = current->thread.fpu.soft.fcr31; | 178 | if (is_fpu_owner()) |
166 | else | ||
167 | asm volatile("cfc1\t%0,$31" : "=r" (fcr31)); | 179 | asm volatile("cfc1\t%0,$31" : "=r" (fcr31)); |
180 | else | ||
181 | fcr31 = current->thread.fpu.hard.fcr31; | ||
182 | preempt_enable(); | ||
183 | |||
168 | bit = (insn.i_format.rt >> 2); | 184 | bit = (insn.i_format.rt >> 2); |
169 | bit += (bit != 0); | 185 | bit += (bit != 0); |
170 | bit += 23; | 186 | bit += 23; |
@@ -196,4 +212,9 @@ unaligned: | |||
196 | printk("%s: unaligned epc - sending SIGBUS.\n", current->comm); | 212 | printk("%s: unaligned epc - sending SIGBUS.\n", current->comm); |
197 | force_sig(SIGBUS, current); | 213 | force_sig(SIGBUS, current); |
198 | return -EFAULT; | 214 | return -EFAULT; |
215 | |||
216 | sigill: | ||
217 | printk("%s: DSP branch but not DSP ASE - sending SIGBUS.\n", current->comm); | ||
218 | force_sig(SIGBUS, current); | ||
219 | return -EFAULT; | ||
199 | } | 220 | } |
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 7685f8baf3f0..a263fb7a3971 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c | |||
@@ -2,9 +2,9 @@ | |||
2 | * Processor capabilities determination functions. | 2 | * Processor capabilities determination functions. |
3 | * | 3 | * |
4 | * Copyright (C) xxxx the Anonymous | 4 | * Copyright (C) xxxx the Anonymous |
5 | * Copyright (C) 2003 Maciej W. Rozycki | 5 | * Copyright (C) 2003, 2004 Maciej W. Rozycki |
6 | * Copyright (C) 1994 - 2003 Ralf Baechle | 6 | * Copyright (C) 1994 - 2003 Ralf Baechle |
7 | * Copyright (C) 2001 MIPS Inc. | 7 | * Copyright (C) 2001, 2004 MIPS Inc. |
8 | * | 8 | * |
9 | * This program is free software; you can redistribute it and/or | 9 | * This program is free software; you can redistribute it and/or |
10 | * modify it under the terms of the GNU General Public License | 10 | * modify it under the terms of the GNU General Public License |
@@ -17,7 +17,6 @@ | |||
17 | #include <linux/ptrace.h> | 17 | #include <linux/ptrace.h> |
18 | #include <linux/stddef.h> | 18 | #include <linux/stddef.h> |
19 | 19 | ||
20 | #include <asm/bugs.h> | ||
21 | #include <asm/cpu.h> | 20 | #include <asm/cpu.h> |
22 | #include <asm/fpu.h> | 21 | #include <asm/fpu.h> |
23 | #include <asm/mipsregs.h> | 22 | #include <asm/mipsregs.h> |
@@ -51,36 +50,48 @@ static void r4k_wait(void) | |||
51 | ".set\tmips0"); | 50 | ".set\tmips0"); |
52 | } | 51 | } |
53 | 52 | ||
54 | /* | 53 | /* The Au1xxx wait is available only if using 32khz counter or |
55 | * The Au1xxx wait is available only if we run CONFIG_PM and | 54 | * external timer source, but specifically not CP0 Counter. */ |
56 | * the timer setup found we had a 32KHz counter available. | 55 | int allow_au1k_wait; |
57 | * There are still problems with functions that may call au1k_wait | ||
58 | * directly, but that will be discovered pretty quickly. | ||
59 | */ | ||
60 | extern void (*au1k_wait_ptr)(void); | ||
61 | 56 | ||
62 | void au1k_wait(void) | 57 | static void au1k_wait(void) |
63 | { | 58 | { |
64 | #ifdef CONFIG_PM | ||
65 | /* using the wait instruction makes CP0 counter unusable */ | 59 | /* using the wait instruction makes CP0 counter unusable */ |
66 | __asm__(".set\tmips3\n\t" | 60 | __asm__(".set mips3\n\t" |
61 | "cache 0x14, 0(%0)\n\t" | ||
62 | "cache 0x14, 32(%0)\n\t" | ||
63 | "sync\n\t" | ||
64 | "nop\n\t" | ||
67 | "wait\n\t" | 65 | "wait\n\t" |
68 | "nop\n\t" | 66 | "nop\n\t" |
69 | "nop\n\t" | 67 | "nop\n\t" |
70 | "nop\n\t" | 68 | "nop\n\t" |
71 | "nop\n\t" | 69 | "nop\n\t" |
72 | ".set\tmips0"); | 70 | ".set mips0\n\t" |
73 | #else | 71 | : : "r" (au1k_wait)); |
74 | __asm__("nop\n\t" | ||
75 | "nop"); | ||
76 | #endif | ||
77 | } | 72 | } |
78 | 73 | ||
74 | static int __initdata nowait = 0; | ||
75 | |||
76 | int __init wait_disable(char *s) | ||
77 | { | ||
78 | nowait = 1; | ||
79 | |||
80 | return 1; | ||
81 | } | ||
82 | |||
83 | __setup("nowait", wait_disable); | ||
84 | |||
79 | static inline void check_wait(void) | 85 | static inline void check_wait(void) |
80 | { | 86 | { |
81 | struct cpuinfo_mips *c = ¤t_cpu_data; | 87 | struct cpuinfo_mips *c = ¤t_cpu_data; |
82 | 88 | ||
83 | printk("Checking for 'wait' instruction... "); | 89 | printk("Checking for 'wait' instruction... "); |
90 | if (nowait) { | ||
91 | printk (" disabled.\n"); | ||
92 | return; | ||
93 | } | ||
94 | |||
84 | switch (c->cputype) { | 95 | switch (c->cputype) { |
85 | case CPU_R3081: | 96 | case CPU_R3081: |
86 | case CPU_R3081E: | 97 | case CPU_R3081E: |
@@ -109,22 +120,22 @@ static inline void check_wait(void) | |||
109 | /* case CPU_20KC:*/ | 120 | /* case CPU_20KC:*/ |
110 | case CPU_24K: | 121 | case CPU_24K: |
111 | case CPU_25KF: | 122 | case CPU_25KF: |
123 | case CPU_34K: | ||
124 | case CPU_PR4450: | ||
112 | cpu_wait = r4k_wait; | 125 | cpu_wait = r4k_wait; |
113 | printk(" available.\n"); | 126 | printk(" available.\n"); |
114 | break; | 127 | break; |
115 | #ifdef CONFIG_PM | ||
116 | case CPU_AU1000: | 128 | case CPU_AU1000: |
117 | case CPU_AU1100: | 129 | case CPU_AU1100: |
118 | case CPU_AU1500: | 130 | case CPU_AU1500: |
119 | if (au1k_wait_ptr != NULL) { | 131 | case CPU_AU1550: |
120 | cpu_wait = au1k_wait_ptr; | 132 | case CPU_AU1200: |
133 | if (allow_au1k_wait) { | ||
134 | cpu_wait = au1k_wait; | ||
121 | printk(" available.\n"); | 135 | printk(" available.\n"); |
122 | } | 136 | } else |
123 | else { | ||
124 | printk(" unavailable.\n"); | 137 | printk(" unavailable.\n"); |
125 | } | ||
126 | break; | 138 | break; |
127 | #endif | ||
128 | default: | 139 | default: |
129 | printk(" unavailable.\n"); | 140 | printk(" unavailable.\n"); |
130 | break; | 141 | break; |
@@ -180,7 +191,7 @@ static inline int __cpu_has_fpu(void) | |||
180 | return ((cpu_get_fpu_id() & 0xff00) != FPIR_IMP_NONE); | 191 | return ((cpu_get_fpu_id() & 0xff00) != FPIR_IMP_NONE); |
181 | } | 192 | } |
182 | 193 | ||
183 | #define R4K_OPTS (MIPS_CPU_TLB | MIPS_CPU_4KEX | MIPS_CPU_4KTLB \ | 194 | #define R4K_OPTS (MIPS_CPU_TLB | MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE \ |
184 | | MIPS_CPU_COUNTER) | 195 | | MIPS_CPU_COUNTER) |
185 | 196 | ||
186 | static inline void cpu_probe_legacy(struct cpuinfo_mips *c) | 197 | static inline void cpu_probe_legacy(struct cpuinfo_mips *c) |
@@ -189,7 +200,8 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c) | |||
189 | case PRID_IMP_R2000: | 200 | case PRID_IMP_R2000: |
190 | c->cputype = CPU_R2000; | 201 | c->cputype = CPU_R2000; |
191 | c->isa_level = MIPS_CPU_ISA_I; | 202 | c->isa_level = MIPS_CPU_ISA_I; |
192 | c->options = MIPS_CPU_TLB | MIPS_CPU_NOFPUEX; | 203 | c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE | |
204 | MIPS_CPU_NOFPUEX; | ||
193 | if (__cpu_has_fpu()) | 205 | if (__cpu_has_fpu()) |
194 | c->options |= MIPS_CPU_FPU; | 206 | c->options |= MIPS_CPU_FPU; |
195 | c->tlbsize = 64; | 207 | c->tlbsize = 64; |
@@ -203,7 +215,8 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c) | |||
203 | else | 215 | else |
204 | c->cputype = CPU_R3000; | 216 | c->cputype = CPU_R3000; |
205 | c->isa_level = MIPS_CPU_ISA_I; | 217 | c->isa_level = MIPS_CPU_ISA_I; |
206 | c->options = MIPS_CPU_TLB | MIPS_CPU_NOFPUEX; | 218 | c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE | |
219 | MIPS_CPU_NOFPUEX; | ||
207 | if (__cpu_has_fpu()) | 220 | if (__cpu_has_fpu()) |
208 | c->options |= MIPS_CPU_FPU; | 221 | c->options |= MIPS_CPU_FPU; |
209 | c->tlbsize = 64; | 222 | c->tlbsize = 64; |
@@ -266,7 +279,8 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c) | |||
266 | case PRID_IMP_R4600: | 279 | case PRID_IMP_R4600: |
267 | c->cputype = CPU_R4600; | 280 | c->cputype = CPU_R4600; |
268 | c->isa_level = MIPS_CPU_ISA_III; | 281 | c->isa_level = MIPS_CPU_ISA_III; |
269 | c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_LLSC; | 282 | c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | |
283 | MIPS_CPU_LLSC; | ||
270 | c->tlbsize = 48; | 284 | c->tlbsize = 48; |
271 | break; | 285 | break; |
272 | #if 0 | 286 | #if 0 |
@@ -285,7 +299,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c) | |||
285 | #endif | 299 | #endif |
286 | case PRID_IMP_TX39: | 300 | case PRID_IMP_TX39: |
287 | c->isa_level = MIPS_CPU_ISA_I; | 301 | c->isa_level = MIPS_CPU_ISA_I; |
288 | c->options = MIPS_CPU_TLB; | 302 | c->options = MIPS_CPU_TLB | MIPS_CPU_TX39_CACHE; |
289 | 303 | ||
290 | if ((c->processor_id & 0xf0) == (PRID_REV_TX3927 & 0xf0)) { | 304 | if ((c->processor_id & 0xf0) == (PRID_REV_TX3927 & 0xf0)) { |
291 | c->cputype = CPU_TX3927; | 305 | c->cputype = CPU_TX3927; |
@@ -421,74 +435,147 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c) | |||
421 | } | 435 | } |
422 | } | 436 | } |
423 | 437 | ||
424 | static inline void decode_config1(struct cpuinfo_mips *c) | 438 | static inline unsigned int decode_config0(struct cpuinfo_mips *c) |
425 | { | 439 | { |
426 | unsigned long config0 = read_c0_config(); | 440 | unsigned int config0; |
427 | unsigned long config1; | 441 | int isa; |
442 | |||
443 | config0 = read_c0_config(); | ||
428 | 444 | ||
429 | if ((config0 & (1 << 31)) == 0) | 445 | if (((config0 & MIPS_CONF_MT) >> 7) == 1) |
430 | return; /* actually wort a panic() */ | 446 | c->options |= MIPS_CPU_TLB; |
447 | isa = (config0 & MIPS_CONF_AT) >> 13; | ||
448 | switch (isa) { | ||
449 | case 0: | ||
450 | c->isa_level = MIPS_CPU_ISA_M32; | ||
451 | break; | ||
452 | case 2: | ||
453 | c->isa_level = MIPS_CPU_ISA_M64; | ||
454 | break; | ||
455 | default: | ||
456 | panic("Unsupported ISA type, cp0.config0.at: %d.", isa); | ||
457 | } | ||
458 | |||
459 | return config0 & MIPS_CONF_M; | ||
460 | } | ||
461 | |||
462 | static inline unsigned int decode_config1(struct cpuinfo_mips *c) | ||
463 | { | ||
464 | unsigned int config1; | ||
431 | 465 | ||
432 | /* MIPS32 or MIPS64 compliant CPU. Read Config 1 register. */ | ||
433 | c->options = MIPS_CPU_TLB | MIPS_CPU_4KEX | | ||
434 | MIPS_CPU_4KTLB | MIPS_CPU_COUNTER | MIPS_CPU_DIVEC | | ||
435 | MIPS_CPU_LLSC | MIPS_CPU_MCHECK; | ||
436 | config1 = read_c0_config1(); | 466 | config1 = read_c0_config1(); |
437 | if (config1 & (1 << 3)) | 467 | |
468 | if (config1 & MIPS_CONF1_MD) | ||
469 | c->ases |= MIPS_ASE_MDMX; | ||
470 | if (config1 & MIPS_CONF1_WR) | ||
438 | c->options |= MIPS_CPU_WATCH; | 471 | c->options |= MIPS_CPU_WATCH; |
439 | if (config1 & (1 << 2)) | 472 | if (config1 & MIPS_CONF1_CA) |
440 | c->options |= MIPS_CPU_MIPS16; | 473 | c->ases |= MIPS_ASE_MIPS16; |
441 | if (config1 & (1 << 1)) | 474 | if (config1 & MIPS_CONF1_EP) |
442 | c->options |= MIPS_CPU_EJTAG; | 475 | c->options |= MIPS_CPU_EJTAG; |
443 | if (config1 & 1) { | 476 | if (config1 & MIPS_CONF1_FP) { |
444 | c->options |= MIPS_CPU_FPU; | 477 | c->options |= MIPS_CPU_FPU; |
445 | c->options |= MIPS_CPU_32FPR; | 478 | c->options |= MIPS_CPU_32FPR; |
446 | } | 479 | } |
480 | if (cpu_has_tlb) | ||
481 | c->tlbsize = ((config1 & MIPS_CONF1_TLBS) >> 25) + 1; | ||
482 | |||
483 | return config1 & MIPS_CONF_M; | ||
484 | } | ||
485 | |||
486 | static inline unsigned int decode_config2(struct cpuinfo_mips *c) | ||
487 | { | ||
488 | unsigned int config2; | ||
489 | |||
490 | config2 = read_c0_config2(); | ||
491 | |||
492 | if (config2 & MIPS_CONF2_SL) | ||
493 | c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT; | ||
494 | |||
495 | return config2 & MIPS_CONF_M; | ||
496 | } | ||
497 | |||
498 | static inline unsigned int decode_config3(struct cpuinfo_mips *c) | ||
499 | { | ||
500 | unsigned int config3; | ||
501 | |||
502 | config3 = read_c0_config3(); | ||
503 | |||
504 | if (config3 & MIPS_CONF3_SM) | ||
505 | c->ases |= MIPS_ASE_SMARTMIPS; | ||
506 | if (config3 & MIPS_CONF3_DSP) | ||
507 | c->ases |= MIPS_ASE_DSP; | ||
508 | if (config3 & MIPS_CONF3_VINT) | ||
509 | c->options |= MIPS_CPU_VINT; | ||
510 | if (config3 & MIPS_CONF3_VEIC) | ||
511 | c->options |= MIPS_CPU_VEIC; | ||
512 | if (config3 & MIPS_CONF3_MT) | ||
513 | c->ases |= MIPS_ASE_MIPSMT; | ||
514 | |||
515 | return config3 & MIPS_CONF_M; | ||
516 | } | ||
517 | |||
518 | static inline void decode_configs(struct cpuinfo_mips *c) | ||
519 | { | ||
520 | /* MIPS32 or MIPS64 compliant CPU. */ | ||
521 | c->options = MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE | MIPS_CPU_COUNTER | | ||
522 | MIPS_CPU_DIVEC | MIPS_CPU_LLSC | MIPS_CPU_MCHECK; | ||
523 | |||
447 | c->scache.flags = MIPS_CACHE_NOT_PRESENT; | 524 | c->scache.flags = MIPS_CACHE_NOT_PRESENT; |
448 | 525 | ||
449 | c->tlbsize = ((config1 >> 25) & 0x3f) + 1; | 526 | /* Read Config registers. */ |
527 | if (!decode_config0(c)) | ||
528 | return; /* actually worth a panic() */ | ||
529 | if (!decode_config1(c)) | ||
530 | return; | ||
531 | if (!decode_config2(c)) | ||
532 | return; | ||
533 | if (!decode_config3(c)) | ||
534 | return; | ||
450 | } | 535 | } |
451 | 536 | ||
452 | static inline void cpu_probe_mips(struct cpuinfo_mips *c) | 537 | static inline void cpu_probe_mips(struct cpuinfo_mips *c) |
453 | { | 538 | { |
454 | decode_config1(c); | 539 | decode_configs(c); |
455 | switch (c->processor_id & 0xff00) { | 540 | switch (c->processor_id & 0xff00) { |
456 | case PRID_IMP_4KC: | 541 | case PRID_IMP_4KC: |
457 | c->cputype = CPU_4KC; | 542 | c->cputype = CPU_4KC; |
458 | c->isa_level = MIPS_CPU_ISA_M32; | ||
459 | break; | 543 | break; |
460 | case PRID_IMP_4KEC: | 544 | case PRID_IMP_4KEC: |
461 | c->cputype = CPU_4KEC; | 545 | c->cputype = CPU_4KEC; |
462 | c->isa_level = MIPS_CPU_ISA_M32; | 546 | break; |
547 | case PRID_IMP_4KECR2: | ||
548 | c->cputype = CPU_4KEC; | ||
463 | break; | 549 | break; |
464 | case PRID_IMP_4KSC: | 550 | case PRID_IMP_4KSC: |
551 | case PRID_IMP_4KSD: | ||
465 | c->cputype = CPU_4KSC; | 552 | c->cputype = CPU_4KSC; |
466 | c->isa_level = MIPS_CPU_ISA_M32; | ||
467 | break; | 553 | break; |
468 | case PRID_IMP_5KC: | 554 | case PRID_IMP_5KC: |
469 | c->cputype = CPU_5KC; | 555 | c->cputype = CPU_5KC; |
470 | c->isa_level = MIPS_CPU_ISA_M64; | ||
471 | break; | 556 | break; |
472 | case PRID_IMP_20KC: | 557 | case PRID_IMP_20KC: |
473 | c->cputype = CPU_20KC; | 558 | c->cputype = CPU_20KC; |
474 | c->isa_level = MIPS_CPU_ISA_M64; | ||
475 | break; | 559 | break; |
476 | case PRID_IMP_24K: | 560 | case PRID_IMP_24K: |
561 | case PRID_IMP_24KE: | ||
477 | c->cputype = CPU_24K; | 562 | c->cputype = CPU_24K; |
478 | c->isa_level = MIPS_CPU_ISA_M32; | ||
479 | break; | 563 | break; |
480 | case PRID_IMP_25KF: | 564 | case PRID_IMP_25KF: |
481 | c->cputype = CPU_25KF; | 565 | c->cputype = CPU_25KF; |
482 | c->isa_level = MIPS_CPU_ISA_M64; | ||
483 | /* Probe for L2 cache */ | 566 | /* Probe for L2 cache */ |
484 | c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT; | 567 | c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT; |
485 | break; | 568 | break; |
569 | case PRID_IMP_34K: | ||
570 | c->cputype = CPU_34K; | ||
571 | c->isa_level = MIPS_CPU_ISA_M32; | ||
572 | break; | ||
486 | } | 573 | } |
487 | } | 574 | } |
488 | 575 | ||
489 | static inline void cpu_probe_alchemy(struct cpuinfo_mips *c) | 576 | static inline void cpu_probe_alchemy(struct cpuinfo_mips *c) |
490 | { | 577 | { |
491 | decode_config1(c); | 578 | decode_configs(c); |
492 | switch (c->processor_id & 0xff00) { | 579 | switch (c->processor_id & 0xff00) { |
493 | case PRID_IMP_AU1_REV1: | 580 | case PRID_IMP_AU1_REV1: |
494 | case PRID_IMP_AU1_REV2: | 581 | case PRID_IMP_AU1_REV2: |
@@ -505,50 +592,70 @@ static inline void cpu_probe_alchemy(struct cpuinfo_mips *c) | |||
505 | case 3: | 592 | case 3: |
506 | c->cputype = CPU_AU1550; | 593 | c->cputype = CPU_AU1550; |
507 | break; | 594 | break; |
595 | case 4: | ||
596 | c->cputype = CPU_AU1200; | ||
597 | break; | ||
508 | default: | 598 | default: |
509 | panic("Unknown Au Core!"); | 599 | panic("Unknown Au Core!"); |
510 | break; | 600 | break; |
511 | } | 601 | } |
512 | c->isa_level = MIPS_CPU_ISA_M32; | ||
513 | break; | 602 | break; |
514 | } | 603 | } |
515 | } | 604 | } |
516 | 605 | ||
517 | static inline void cpu_probe_sibyte(struct cpuinfo_mips *c) | 606 | static inline void cpu_probe_sibyte(struct cpuinfo_mips *c) |
518 | { | 607 | { |
519 | decode_config1(c); | 608 | decode_configs(c); |
609 | |||
610 | /* | ||
611 | * For historical reasons the SB1 comes with it's own variant of | ||
612 | * cache code which eventually will be folded into c-r4k.c. Until | ||
613 | * then we pretend it's got it's own cache architecture. | ||
614 | */ | ||
615 | c->options &= ~MIPS_CPU_4K_CACHE; | ||
616 | c->options |= MIPS_CPU_SB1_CACHE; | ||
617 | |||
520 | switch (c->processor_id & 0xff00) { | 618 | switch (c->processor_id & 0xff00) { |
521 | case PRID_IMP_SB1: | 619 | case PRID_IMP_SB1: |
522 | c->cputype = CPU_SB1; | 620 | c->cputype = CPU_SB1; |
523 | c->isa_level = MIPS_CPU_ISA_M64; | 621 | #ifdef CONFIG_SB1_PASS_1_WORKAROUNDS |
524 | c->options = MIPS_CPU_TLB | MIPS_CPU_4KEX | | ||
525 | MIPS_CPU_COUNTER | MIPS_CPU_DIVEC | | ||
526 | MIPS_CPU_MCHECK | MIPS_CPU_EJTAG | | ||
527 | MIPS_CPU_WATCH | MIPS_CPU_LLSC; | ||
528 | #ifndef CONFIG_SB1_PASS_1_WORKAROUNDS | ||
529 | /* FPU in pass1 is known to have issues. */ | 622 | /* FPU in pass1 is known to have issues. */ |
530 | c->options |= MIPS_CPU_FPU | MIPS_CPU_32FPR; | 623 | c->options &= ~(MIPS_CPU_FPU | MIPS_CPU_32FPR); |
531 | #endif | 624 | #endif |
532 | break; | 625 | break; |
626 | case PRID_IMP_SB1A: | ||
627 | c->cputype = CPU_SB1A; | ||
628 | break; | ||
533 | } | 629 | } |
534 | } | 630 | } |
535 | 631 | ||
536 | static inline void cpu_probe_sandcraft(struct cpuinfo_mips *c) | 632 | static inline void cpu_probe_sandcraft(struct cpuinfo_mips *c) |
537 | { | 633 | { |
538 | decode_config1(c); | 634 | decode_configs(c); |
539 | switch (c->processor_id & 0xff00) { | 635 | switch (c->processor_id & 0xff00) { |
540 | case PRID_IMP_SR71000: | 636 | case PRID_IMP_SR71000: |
541 | c->cputype = CPU_SR71000; | 637 | c->cputype = CPU_SR71000; |
542 | c->isa_level = MIPS_CPU_ISA_M64; | ||
543 | c->options = MIPS_CPU_TLB | MIPS_CPU_4KEX | | ||
544 | MIPS_CPU_4KTLB | MIPS_CPU_FPU | | ||
545 | MIPS_CPU_COUNTER | MIPS_CPU_MCHECK; | ||
546 | c->scache.ways = 8; | 638 | c->scache.ways = 8; |
547 | c->tlbsize = 64; | 639 | c->tlbsize = 64; |
548 | break; | 640 | break; |
549 | } | 641 | } |
550 | } | 642 | } |
551 | 643 | ||
644 | static inline void cpu_probe_philips(struct cpuinfo_mips *c) | ||
645 | { | ||
646 | decode_configs(c); | ||
647 | switch (c->processor_id & 0xff00) { | ||
648 | case PRID_IMP_PR4450: | ||
649 | c->cputype = CPU_PR4450; | ||
650 | c->isa_level = MIPS_CPU_ISA_M32; | ||
651 | break; | ||
652 | default: | ||
653 | panic("Unknown Philips Core!"); /* REVISIT: die? */ | ||
654 | break; | ||
655 | } | ||
656 | } | ||
657 | |||
658 | |||
552 | __init void cpu_probe(void) | 659 | __init void cpu_probe(void) |
553 | { | 660 | { |
554 | struct cpuinfo_mips *c = ¤t_cpu_data; | 661 | struct cpuinfo_mips *c = ¤t_cpu_data; |
@@ -571,15 +678,24 @@ __init void cpu_probe(void) | |||
571 | case PRID_COMP_SIBYTE: | 678 | case PRID_COMP_SIBYTE: |
572 | cpu_probe_sibyte(c); | 679 | cpu_probe_sibyte(c); |
573 | break; | 680 | break; |
574 | |||
575 | case PRID_COMP_SANDCRAFT: | 681 | case PRID_COMP_SANDCRAFT: |
576 | cpu_probe_sandcraft(c); | 682 | cpu_probe_sandcraft(c); |
577 | break; | 683 | break; |
684 | case PRID_COMP_PHILIPS: | ||
685 | cpu_probe_philips(c); | ||
686 | break; | ||
578 | default: | 687 | default: |
579 | c->cputype = CPU_UNKNOWN; | 688 | c->cputype = CPU_UNKNOWN; |
580 | } | 689 | } |
581 | if (c->options & MIPS_CPU_FPU) | 690 | if (c->options & MIPS_CPU_FPU) { |
582 | c->fpu_id = cpu_get_fpu_id(); | 691 | c->fpu_id = cpu_get_fpu_id(); |
692 | |||
693 | if (c->isa_level == MIPS_CPU_ISA_M32 || | ||
694 | c->isa_level == MIPS_CPU_ISA_M64) { | ||
695 | if (c->fpu_id & MIPS_FPIR_3D) | ||
696 | c->ases |= MIPS_ASE_MIPS3D; | ||
697 | } | ||
698 | } | ||
583 | } | 699 | } |
584 | 700 | ||
585 | __init void cpu_report(void) | 701 | __init void cpu_report(void) |
diff --git a/arch/mips/kernel/dma-no-isa.c b/arch/mips/kernel/dma-no-isa.c new file mode 100644 index 000000000000..6df8b07741e3 --- /dev/null +++ b/arch/mips/kernel/dma-no-isa.c | |||
@@ -0,0 +1,28 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2004 by Ralf Baechle | ||
7 | * | ||
8 | * Dummy ISA DMA functions for systems that don't have ISA but share drivers | ||
9 | * with ISA such as legacy free PCI. | ||
10 | */ | ||
11 | #include <linux/errno.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/spinlock.h> | ||
14 | |||
15 | DEFINE_SPINLOCK(dma_spin_lock); | ||
16 | |||
17 | int request_dma(unsigned int dmanr, const char * device_id) | ||
18 | { | ||
19 | return -EINVAL; | ||
20 | } | ||
21 | |||
22 | void free_dma(unsigned int dmanr) | ||
23 | { | ||
24 | } | ||
25 | |||
26 | EXPORT_SYMBOL(dma_spin_lock); | ||
27 | EXPORT_SYMBOL(request_dma); | ||
28 | EXPORT_SYMBOL(free_dma); | ||
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S index 5eb429137e06..83c87fe4ee4f 100644 --- a/arch/mips/kernel/entry.S +++ b/arch/mips/kernel/entry.S | |||
@@ -19,11 +19,11 @@ | |||
19 | #include <asm/war.h> | 19 | #include <asm/war.h> |
20 | 20 | ||
21 | #ifdef CONFIG_PREEMPT | 21 | #ifdef CONFIG_PREEMPT |
22 | .macro preempt_stop reg=t0 | 22 | .macro preempt_stop |
23 | .endm | 23 | .endm |
24 | #else | 24 | #else |
25 | .macro preempt_stop reg=t0 | 25 | .macro preempt_stop |
26 | local_irq_disable \reg | 26 | local_irq_disable |
27 | .endm | 27 | .endm |
28 | #define resume_kernel restore_all | 28 | #define resume_kernel restore_all |
29 | #endif | 29 | #endif |
@@ -37,17 +37,18 @@ FEXPORT(ret_from_irq) | |||
37 | andi t0, t0, KU_USER | 37 | andi t0, t0, KU_USER |
38 | beqz t0, resume_kernel | 38 | beqz t0, resume_kernel |
39 | 39 | ||
40 | FEXPORT(resume_userspace) | 40 | resume_userspace: |
41 | local_irq_disable t0 # make sure we dont miss an | 41 | local_irq_disable # make sure we dont miss an |
42 | # interrupt setting need_resched | 42 | # interrupt setting need_resched |
43 | # between sampling and return | 43 | # between sampling and return |
44 | LONG_L a2, TI_FLAGS($28) # current->work | 44 | LONG_L a2, TI_FLAGS($28) # current->work |
45 | andi a2, _TIF_WORK_MASK # (ignoring syscall_trace) | 45 | andi t0, a2, _TIF_WORK_MASK # (ignoring syscall_trace) |
46 | bnez a2, work_pending | 46 | bnez t0, work_pending |
47 | j restore_all | 47 | j restore_all |
48 | 48 | ||
49 | #ifdef CONFIG_PREEMPT | 49 | #ifdef CONFIG_PREEMPT |
50 | ENTRY(resume_kernel) | 50 | resume_kernel: |
51 | local_irq_disable | ||
51 | lw t0, TI_PRE_COUNT($28) | 52 | lw t0, TI_PRE_COUNT($28) |
52 | bnez t0, restore_all | 53 | bnez t0, restore_all |
53 | need_resched: | 54 | need_resched: |
@@ -57,12 +58,7 @@ need_resched: | |||
57 | LONG_L t0, PT_STATUS(sp) # Interrupts off? | 58 | LONG_L t0, PT_STATUS(sp) # Interrupts off? |
58 | andi t0, 1 | 59 | andi t0, 1 |
59 | beqz t0, restore_all | 60 | beqz t0, restore_all |
60 | li t0, PREEMPT_ACTIVE | 61 | jal preempt_schedule_irq |
61 | sw t0, TI_PRE_COUNT($28) | ||
62 | local_irq_enable t0 | ||
63 | jal schedule | ||
64 | sw zero, TI_PRE_COUNT($28) | ||
65 | local_irq_disable t0 | ||
66 | b need_resched | 62 | b need_resched |
67 | #endif | 63 | #endif |
68 | 64 | ||
@@ -88,13 +84,13 @@ FEXPORT(restore_partial) # restore partial frame | |||
88 | RESTORE_SP_AND_RET | 84 | RESTORE_SP_AND_RET |
89 | .set at | 85 | .set at |
90 | 86 | ||
91 | FEXPORT(work_pending) | 87 | work_pending: |
92 | andi t0, a2, _TIF_NEED_RESCHED | 88 | andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS |
93 | beqz t0, work_notifysig | 89 | beqz t0, work_notifysig |
94 | work_resched: | 90 | work_resched: |
95 | jal schedule | 91 | jal schedule |
96 | 92 | ||
97 | local_irq_disable t0 # make sure need_resched and | 93 | local_irq_disable # make sure need_resched and |
98 | # signals dont change between | 94 | # signals dont change between |
99 | # sampling and return | 95 | # sampling and return |
100 | LONG_L a2, TI_FLAGS($28) | 96 | LONG_L a2, TI_FLAGS($28) |
@@ -109,15 +105,14 @@ work_notifysig: # deal with pending signals and | |||
109 | move a0, sp | 105 | move a0, sp |
110 | li a1, 0 | 106 | li a1, 0 |
111 | jal do_notify_resume # a2 already loaded | 107 | jal do_notify_resume # a2 already loaded |
112 | j restore_all | 108 | j resume_userspace |
113 | 109 | ||
114 | FEXPORT(syscall_exit_work_partial) | 110 | FEXPORT(syscall_exit_work_partial) |
115 | SAVE_STATIC | 111 | SAVE_STATIC |
116 | FEXPORT(syscall_exit_work) | 112 | syscall_exit_work: |
117 | LONG_L t0, TI_FLAGS($28) | 113 | li t0, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT |
118 | li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | 114 | and t0, a2 # a2 is preloaded with TI_FLAGS |
119 | and t0, t1 | 115 | beqz t0, work_pending # trace bit set? |
120 | beqz t0, work_pending # trace bit is set | ||
121 | local_irq_enable # could let do_syscall_trace() | 116 | local_irq_enable # could let do_syscall_trace() |
122 | # call schedule() instead | 117 | # call schedule() instead |
123 | move a0, sp | 118 | move a0, sp |
@@ -128,28 +123,25 @@ FEXPORT(syscall_exit_work) | |||
128 | /* | 123 | /* |
129 | * Common spurious interrupt handler. | 124 | * Common spurious interrupt handler. |
130 | */ | 125 | */ |
131 | .text | ||
132 | .align 5 | ||
133 | LEAF(spurious_interrupt) | 126 | LEAF(spurious_interrupt) |
134 | /* | 127 | /* |
135 | * Someone tried to fool us by sending an interrupt but we | 128 | * Someone tried to fool us by sending an interrupt but we |
136 | * couldn't find a cause for it. | 129 | * couldn't find a cause for it. |
137 | */ | 130 | */ |
131 | PTR_LA t1, irq_err_count | ||
138 | #ifdef CONFIG_SMP | 132 | #ifdef CONFIG_SMP |
139 | lui t1, %hi(irq_err_count) | 133 | 1: ll t0, (t1) |
140 | 1: ll t0, %lo(irq_err_count)(t1) | ||
141 | addiu t0, 1 | 134 | addiu t0, 1 |
142 | sc t0, %lo(irq_err_count)(t1) | 135 | sc t0, (t1) |
143 | #if R10000_LLSC_WAR | 136 | #if R10000_LLSC_WAR |
144 | beqzl t0, 1b | 137 | beqzl t0, 1b |
145 | #else | 138 | #else |
146 | beqz t0, 1b | 139 | beqz t0, 1b |
147 | #endif | 140 | #endif |
148 | #else | 141 | #else |
149 | lui t1, %hi(irq_err_count) | 142 | lw t0, (t1) |
150 | lw t0, %lo(irq_err_count)(t1) | ||
151 | addiu t0, 1 | 143 | addiu t0, 1 |
152 | sw t0, %lo(irq_err_count)(t1) | 144 | sw t0, (t1) |
153 | #endif | 145 | #endif |
154 | j ret_from_irq | 146 | j ret_from_irq |
155 | END(spurious_interrupt) | 147 | END(spurious_interrupt) |
diff --git a/arch/mips/kernel/gdb-low.S b/arch/mips/kernel/gdb-low.S index 512bedbfa7b9..83b8986f9401 100644 --- a/arch/mips/kernel/gdb-low.S +++ b/arch/mips/kernel/gdb-low.S | |||
@@ -52,16 +52,15 @@ | |||
52 | /* | 52 | /* |
53 | * Called from user mode, go somewhere else. | 53 | * Called from user mode, go somewhere else. |
54 | */ | 54 | */ |
55 | lui k1, %hi(saved_vectors) | ||
56 | mfc0 k0, CP0_CAUSE | 55 | mfc0 k0, CP0_CAUSE |
57 | andi k0, k0, 0x7c | 56 | andi k0, k0, 0x7c |
58 | add k1, k1, k0 | 57 | add k1, k1, k0 |
59 | lw k0, %lo(saved_vectors)(k1) | 58 | PTR_L k0, saved_vectors(k1) |
60 | jr k0 | 59 | jr k0 |
61 | nop | 60 | nop |
62 | 1: | 61 | 1: |
63 | move k0, sp | 62 | move k0, sp |
64 | subu sp, k1, GDB_FR_SIZE*2 # see comment above | 63 | PTR_SUBU sp, k1, GDB_FR_SIZE*2 # see comment above |
65 | LONG_S k0, GDB_FR_REG29(sp) | 64 | LONG_S k0, GDB_FR_REG29(sp) |
66 | LONG_S $2, GDB_FR_REG2(sp) | 65 | LONG_S $2, GDB_FR_REG2(sp) |
67 | 66 | ||
diff --git a/arch/mips/kernel/gdb-stub.c b/arch/mips/kernel/gdb-stub.c index d3fd1ab14274..96d18c43dca0 100644 --- a/arch/mips/kernel/gdb-stub.c +++ b/arch/mips/kernel/gdb-stub.c | |||
@@ -176,8 +176,10 @@ int kgdb_enabled; | |||
176 | /* | 176 | /* |
177 | * spin locks for smp case | 177 | * spin locks for smp case |
178 | */ | 178 | */ |
179 | static spinlock_t kgdb_lock = SPIN_LOCK_UNLOCKED; | 179 | static DEFINE_SPINLOCK(kgdb_lock); |
180 | static spinlock_t kgdb_cpulock[NR_CPUS] = { [0 ... NR_CPUS-1] = SPIN_LOCK_UNLOCKED}; | 180 | static raw_spinlock_t kgdb_cpulock[NR_CPUS] = { |
181 | [0 ... NR_CPUS-1] = __RAW_SPIN_LOCK_UNLOCKED; | ||
182 | }; | ||
181 | 183 | ||
182 | /* | 184 | /* |
183 | * BUFMAX defines the maximum number of characters in inbound/outbound buffers | 185 | * BUFMAX defines the maximum number of characters in inbound/outbound buffers |
@@ -637,29 +639,32 @@ static struct gdb_bp_save async_bp; | |||
637 | * and only one can be active at a time. | 639 | * and only one can be active at a time. |
638 | */ | 640 | */ |
639 | extern spinlock_t smp_call_lock; | 641 | extern spinlock_t smp_call_lock; |
642 | |||
640 | void set_async_breakpoint(unsigned long *epc) | 643 | void set_async_breakpoint(unsigned long *epc) |
641 | { | 644 | { |
642 | /* skip breaking into userland */ | 645 | /* skip breaking into userland */ |
643 | if ((*epc & 0x80000000) == 0) | 646 | if ((*epc & 0x80000000) == 0) |
644 | return; | 647 | return; |
645 | 648 | ||
649 | #ifdef CONFIG_SMP | ||
646 | /* avoid deadlock if someone is make IPC */ | 650 | /* avoid deadlock if someone is make IPC */ |
647 | if (spin_is_locked(&smp_call_lock)) | 651 | if (spin_is_locked(&smp_call_lock)) |
648 | return; | 652 | return; |
653 | #endif | ||
649 | 654 | ||
650 | async_bp.addr = *epc; | 655 | async_bp.addr = *epc; |
651 | *epc = (unsigned long)async_breakpoint; | 656 | *epc = (unsigned long)async_breakpoint; |
652 | } | 657 | } |
653 | 658 | ||
654 | void kgdb_wait(void *arg) | 659 | static void kgdb_wait(void *arg) |
655 | { | 660 | { |
656 | unsigned flags; | 661 | unsigned flags; |
657 | int cpu = smp_processor_id(); | 662 | int cpu = smp_processor_id(); |
658 | 663 | ||
659 | local_irq_save(flags); | 664 | local_irq_save(flags); |
660 | 665 | ||
661 | spin_lock(&kgdb_cpulock[cpu]); | 666 | __raw_spin_lock(&kgdb_cpulock[cpu]); |
662 | spin_unlock(&kgdb_cpulock[cpu]); | 667 | __raw_spin_unlock(&kgdb_cpulock[cpu]); |
663 | 668 | ||
664 | local_irq_restore(flags); | 669 | local_irq_restore(flags); |
665 | } | 670 | } |
@@ -707,7 +712,7 @@ void handle_exception (struct gdb_regs *regs) | |||
707 | * acquire the CPU spinlocks | 712 | * acquire the CPU spinlocks |
708 | */ | 713 | */ |
709 | for (i = num_online_cpus()-1; i >= 0; i--) | 714 | for (i = num_online_cpus()-1; i >= 0; i--) |
710 | if (spin_trylock(&kgdb_cpulock[i]) == 0) | 715 | if (__raw_spin_trylock(&kgdb_cpulock[i]) == 0) |
711 | panic("kgdb: couldn't get cpulock %d\n", i); | 716 | panic("kgdb: couldn't get cpulock %d\n", i); |
712 | 717 | ||
713 | /* | 718 | /* |
@@ -982,7 +987,7 @@ finish_kgdb: | |||
982 | exit_kgdb_exception: | 987 | exit_kgdb_exception: |
983 | /* release locks so other CPUs can go */ | 988 | /* release locks so other CPUs can go */ |
984 | for (i = num_online_cpus()-1; i >= 0; i--) | 989 | for (i = num_online_cpus()-1; i >= 0; i--) |
985 | spin_unlock(&kgdb_cpulock[i]); | 990 | __raw_spin_unlock(&kgdb_cpulock[i]); |
986 | spin_unlock(&kgdb_lock); | 991 | spin_unlock(&kgdb_lock); |
987 | 992 | ||
988 | __flush_cache_all(); | 993 | __flush_cache_all(); |
@@ -1036,12 +1041,12 @@ void adel(void) | |||
1036 | * malloc is needed by gdb client in "call func()", even a private one | 1041 | * malloc is needed by gdb client in "call func()", even a private one |
1037 | * will make gdb happy | 1042 | * will make gdb happy |
1038 | */ | 1043 | */ |
1039 | static void *malloc(size_t size) | 1044 | static void * __attribute_used__ malloc(size_t size) |
1040 | { | 1045 | { |
1041 | return kmalloc(size, GFP_ATOMIC); | 1046 | return kmalloc(size, GFP_ATOMIC); |
1042 | } | 1047 | } |
1043 | 1048 | ||
1044 | static void free(void *where) | 1049 | static void __attribute_used__ free (void *where) |
1045 | { | 1050 | { |
1046 | kfree(where); | 1051 | kfree(where); |
1047 | } | 1052 | } |
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index e7f6c1b90806..aa18a8b7b380 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S | |||
@@ -82,7 +82,7 @@ NESTED(except_vec3_r4000, 0, sp) | |||
82 | li k0, 14<<2 | 82 | li k0, 14<<2 |
83 | beq k1, k0, handle_vcei | 83 | beq k1, k0, handle_vcei |
84 | #ifdef CONFIG_64BIT | 84 | #ifdef CONFIG_64BIT |
85 | dsll k1, k1, 1 | 85 | dsll k1, k1, 1 |
86 | #endif | 86 | #endif |
87 | .set pop | 87 | .set pop |
88 | PTR_L k0, exception_handlers(k1) | 88 | PTR_L k0, exception_handlers(k1) |
@@ -90,17 +90,17 @@ NESTED(except_vec3_r4000, 0, sp) | |||
90 | 90 | ||
91 | /* | 91 | /* |
92 | * Big shit, we now may have two dirty primary cache lines for the same | 92 | * Big shit, we now may have two dirty primary cache lines for the same |
93 | * physical address. We can savely invalidate the line pointed to by | 93 | * physical address. We can safely invalidate the line pointed to by |
94 | * c0_badvaddr because after return from this exception handler the | 94 | * c0_badvaddr because after return from this exception handler the |
95 | * load / store will be re-executed. | 95 | * load / store will be re-executed. |
96 | */ | 96 | */ |
97 | handle_vced: | 97 | handle_vced: |
98 | DMFC0 k0, CP0_BADVADDR | 98 | MFC0 k0, CP0_BADVADDR |
99 | li k1, -4 # Is this ... | 99 | li k1, -4 # Is this ... |
100 | and k0, k1 # ... really needed? | 100 | and k0, k1 # ... really needed? |
101 | mtc0 zero, CP0_TAGLO | 101 | mtc0 zero, CP0_TAGLO |
102 | cache Index_Store_Tag_D,(k0) | 102 | cache Index_Store_Tag_D, (k0) |
103 | cache Hit_Writeback_Inv_SD,(k0) | 103 | cache Hit_Writeback_Inv_SD, (k0) |
104 | #ifdef CONFIG_PROC_FS | 104 | #ifdef CONFIG_PROC_FS |
105 | PTR_LA k0, vced_count | 105 | PTR_LA k0, vced_count |
106 | lw k1, (k0) | 106 | lw k1, (k0) |
@@ -148,6 +148,38 @@ NESTED(except_vec_ejtag_debug, 0, sp) | |||
148 | __FINIT | 148 | __FINIT |
149 | 149 | ||
150 | /* | 150 | /* |
151 | * Vectored interrupt handler. | ||
152 | * This prototype is copied to ebase + n*IntCtl.VS and patched | ||
153 | * to invoke the handler | ||
154 | */ | ||
155 | NESTED(except_vec_vi, 0, sp) | ||
156 | SAVE_SOME | ||
157 | SAVE_AT | ||
158 | .set push | ||
159 | .set noreorder | ||
160 | EXPORT(except_vec_vi_lui) | ||
161 | lui v0, 0 /* Patched */ | ||
162 | j except_vec_vi_handler | ||
163 | EXPORT(except_vec_vi_ori) | ||
164 | ori v0, 0 /* Patched */ | ||
165 | .set pop | ||
166 | END(except_vec_vi) | ||
167 | EXPORT(except_vec_vi_end) | ||
168 | |||
169 | /* | ||
170 | * Common Vectored Interrupt code | ||
171 | * Complete the register saves and invoke the handler which is passed in $v0 | ||
172 | */ | ||
173 | NESTED(except_vec_vi_handler, 0, sp) | ||
174 | SAVE_TEMP | ||
175 | SAVE_STATIC | ||
176 | CLI | ||
177 | move a0, sp | ||
178 | jalr v0 | ||
179 | j ret_from_irq | ||
180 | END(except_vec_vi_handler) | ||
181 | |||
182 | /* | ||
151 | * EJTAG debug exception handler. | 183 | * EJTAG debug exception handler. |
152 | */ | 184 | */ |
153 | NESTED(ejtag_debug_handler, PT_SIZE, sp) | 185 | NESTED(ejtag_debug_handler, PT_SIZE, sp) |
@@ -291,6 +323,8 @@ NESTED(nmi_handler, PT_SIZE, sp) | |||
291 | BUILD_HANDLER mdmx mdmx sti silent /* #22 */ | 323 | BUILD_HANDLER mdmx mdmx sti silent /* #22 */ |
292 | BUILD_HANDLER watch watch sti verbose /* #23 */ | 324 | BUILD_HANDLER watch watch sti verbose /* #23 */ |
293 | BUILD_HANDLER mcheck mcheck cli verbose /* #24 */ | 325 | BUILD_HANDLER mcheck mcheck cli verbose /* #24 */ |
326 | BUILD_HANDLER mt mt sti verbose /* #25 */ | ||
327 | BUILD_HANDLER dsp dsp sti silent /* #26 */ | ||
294 | BUILD_HANDLER reserved reserved sti verbose /* others */ | 328 | BUILD_HANDLER reserved reserved sti verbose /* others */ |
295 | 329 | ||
296 | #ifdef CONFIG_64BIT | 330 | #ifdef CONFIG_64BIT |
diff --git a/arch/mips/kernel/genrtc.c b/arch/mips/kernel/genrtc.c deleted file mode 100644 index 71416e7bbbaa..000000000000 --- a/arch/mips/kernel/genrtc.c +++ /dev/null | |||
@@ -1,64 +0,0 @@ | |||
1 | /* | ||
2 | * A glue layer that provides RTC read/write to drivers/char/genrtc.c driver | ||
3 | * based on MIPS internal RTC routines. It does take care locking | ||
4 | * issues so that we are SMP/Preemption safe. | ||
5 | * | ||
6 | * Copyright (C) 2004 MontaVista Software Inc. | ||
7 | * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net | ||
8 | * | ||
9 | * Please read the COPYING file for all license details. | ||
10 | */ | ||
11 | |||
12 | #include <linux/spinlock.h> | ||
13 | |||
14 | #include <asm/rtc.h> | ||
15 | #include <asm/time.h> | ||
16 | |||
17 | static DEFINE_SPINLOCK(mips_rtc_lock); | ||
18 | |||
19 | unsigned int get_rtc_time(struct rtc_time *time) | ||
20 | { | ||
21 | unsigned long nowtime; | ||
22 | |||
23 | spin_lock(&mips_rtc_lock); | ||
24 | nowtime = rtc_get_time(); | ||
25 | to_tm(nowtime, time); | ||
26 | time->tm_year -= 1900; | ||
27 | spin_unlock(&mips_rtc_lock); | ||
28 | |||
29 | return RTC_24H; | ||
30 | } | ||
31 | |||
32 | int set_rtc_time(struct rtc_time *time) | ||
33 | { | ||
34 | unsigned long nowtime; | ||
35 | int ret; | ||
36 | |||
37 | spin_lock(&mips_rtc_lock); | ||
38 | nowtime = mktime(time->tm_year+1900, time->tm_mon+1, | ||
39 | time->tm_mday, time->tm_hour, time->tm_min, | ||
40 | time->tm_sec); | ||
41 | ret = rtc_set_time(nowtime); | ||
42 | spin_unlock(&mips_rtc_lock); | ||
43 | |||
44 | return ret; | ||
45 | } | ||
46 | |||
47 | unsigned int get_rtc_ss(void) | ||
48 | { | ||
49 | struct rtc_time h; | ||
50 | |||
51 | get_rtc_time(&h); | ||
52 | return h.tm_sec; | ||
53 | } | ||
54 | |||
55 | int get_rtc_pll(struct rtc_pll_info *pll) | ||
56 | { | ||
57 | return -EINVAL; | ||
58 | } | ||
59 | |||
60 | int set_rtc_pll(struct rtc_pll_info *pll) | ||
61 | { | ||
62 | return -EINVAL; | ||
63 | } | ||
64 | |||
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S index 2a1b45d66f04..2e9122a4213a 100644 --- a/arch/mips/kernel/head.S +++ b/arch/mips/kernel/head.S | |||
@@ -22,11 +22,8 @@ | |||
22 | #include <asm/page.h> | 22 | #include <asm/page.h> |
23 | #include <asm/mipsregs.h> | 23 | #include <asm/mipsregs.h> |
24 | #include <asm/stackframe.h> | 24 | #include <asm/stackframe.h> |
25 | #ifdef CONFIG_SGI_IP27 | 25 | |
26 | #include <asm/sn/addrs.h> | 26 | #include <kernel-entry-init.h> |
27 | #include <asm/sn/sn0/hubni.h> | ||
28 | #include <asm/sn/klkernvars.h> | ||
29 | #endif | ||
30 | 27 | ||
31 | .macro ARC64_TWIDDLE_PC | 28 | .macro ARC64_TWIDDLE_PC |
32 | #if defined(CONFIG_ARC64) || defined(CONFIG_MAPPED_KERNEL) | 29 | #if defined(CONFIG_ARC64) || defined(CONFIG_MAPPED_KERNEL) |
@@ -38,18 +35,6 @@ | |||
38 | #endif | 35 | #endif |
39 | .endm | 36 | .endm |
40 | 37 | ||
41 | #ifdef CONFIG_SGI_IP27 | ||
42 | /* | ||
43 | * outputs the local nasid into res. IP27 stuff. | ||
44 | */ | ||
45 | .macro GET_NASID_ASM res | ||
46 | dli \res, LOCAL_HUB_ADDR(NI_STATUS_REV_ID) | ||
47 | ld \res, (\res) | ||
48 | and \res, NSRI_NODEID_MASK | ||
49 | dsrl \res, NSRI_NODEID_SHFT | ||
50 | .endm | ||
51 | #endif /* CONFIG_SGI_IP27 */ | ||
52 | |||
53 | /* | 38 | /* |
54 | * inputs are the text nasid in t1, data nasid in t2. | 39 | * inputs are the text nasid in t1, data nasid in t2. |
55 | */ | 40 | */ |
@@ -131,16 +116,21 @@ | |||
131 | EXPORT(stext) # used for profiling | 116 | EXPORT(stext) # used for profiling |
132 | EXPORT(_stext) | 117 | EXPORT(_stext) |
133 | 118 | ||
119 | #if defined(CONFIG_QEMU) || defined(CONFIG_MIPS_SIM) | ||
120 | /* | ||
121 | * Give us a fighting chance of running if execution beings at the | ||
122 | * kernel load address. This is needed because this platform does | ||
123 | * not have a ELF loader yet. | ||
124 | */ | ||
125 | j kernel_entry | ||
126 | #endif | ||
134 | __INIT | 127 | __INIT |
135 | 128 | ||
136 | NESTED(kernel_entry, 16, sp) # kernel entry point | 129 | NESTED(kernel_entry, 16, sp) # kernel entry point |
137 | setup_c0_status_pri | ||
138 | 130 | ||
139 | #ifdef CONFIG_SGI_IP27 | 131 | kernel_entry_setup # cpu specific setup |
140 | GET_NASID_ASM t1 | 132 | |
141 | move t2, t1 # text and data are here | 133 | setup_c0_status_pri |
142 | MAPPED_KERNEL_SETUP_TLB | ||
143 | #endif /* IP27 */ | ||
144 | 134 | ||
145 | ARC64_TWIDDLE_PC | 135 | ARC64_TWIDDLE_PC |
146 | 136 | ||
@@ -157,6 +147,7 @@ NESTED(kernel_entry, 16, sp) # kernel entry point | |||
157 | LONG_S a2, fw_arg2 | 147 | LONG_S a2, fw_arg2 |
158 | LONG_S a3, fw_arg3 | 148 | LONG_S a3, fw_arg3 |
159 | 149 | ||
150 | MTC0 zero, CP0_CONTEXT # clear context register | ||
160 | PTR_LA $28, init_thread_union | 151 | PTR_LA $28, init_thread_union |
161 | PTR_ADDIU sp, $28, _THREAD_SIZE - 32 | 152 | PTR_ADDIU sp, $28, _THREAD_SIZE - 32 |
162 | set_saved_sp sp, t0, t1 | 153 | set_saved_sp sp, t0, t1 |
@@ -165,6 +156,10 @@ NESTED(kernel_entry, 16, sp) # kernel entry point | |||
165 | j start_kernel | 156 | j start_kernel |
166 | END(kernel_entry) | 157 | END(kernel_entry) |
167 | 158 | ||
159 | #ifdef CONFIG_QEMU | ||
160 | __INIT | ||
161 | #endif | ||
162 | |||
168 | #ifdef CONFIG_SMP | 163 | #ifdef CONFIG_SMP |
169 | /* | 164 | /* |
170 | * SMP slave cpus entry point. Board specific code for bootstrap calls this | 165 | * SMP slave cpus entry point. Board specific code for bootstrap calls this |
@@ -172,20 +167,7 @@ NESTED(kernel_entry, 16, sp) # kernel entry point | |||
172 | */ | 167 | */ |
173 | NESTED(smp_bootstrap, 16, sp) | 168 | NESTED(smp_bootstrap, 16, sp) |
174 | setup_c0_status_sec | 169 | setup_c0_status_sec |
175 | 170 | smp_slave_setup | |
176 | #ifdef CONFIG_SGI_IP27 | ||
177 | GET_NASID_ASM t1 | ||
178 | dli t0, KLDIR_OFFSET + (KLI_KERN_VARS * KLDIR_ENT_SIZE) + \ | ||
179 | KLDIR_OFF_POINTER + CAC_BASE | ||
180 | dsll t1, NASID_SHFT | ||
181 | or t0, t0, t1 | ||
182 | ld t0, 0(t0) # t0 points to kern_vars struct | ||
183 | lh t1, KV_RO_NASID_OFFSET(t0) | ||
184 | lh t2, KV_RW_NASID_OFFSET(t0) | ||
185 | MAPPED_KERNEL_SETUP_TLB | ||
186 | ARC64_TWIDDLE_PC | ||
187 | #endif /* CONFIG_SGI_IP27 */ | ||
188 | |||
189 | j start_secondary | 171 | j start_secondary |
190 | END(smp_bootstrap) | 172 | END(smp_bootstrap) |
191 | #endif /* CONFIG_SMP */ | 173 | #endif /* CONFIG_SMP */ |
@@ -200,19 +182,13 @@ NESTED(smp_bootstrap, 16, sp) | |||
200 | .comm fw_arg2, SZREG, SZREG | 182 | .comm fw_arg2, SZREG, SZREG |
201 | .comm fw_arg3, SZREG, SZREG | 183 | .comm fw_arg3, SZREG, SZREG |
202 | 184 | ||
203 | .macro page name, order=0 | 185 | .macro page name, order |
204 | .globl \name | 186 | .comm \name, (_PAGE_SIZE << \order), (_PAGE_SIZE << \order) |
205 | \name: .size \name, (_PAGE_SIZE << \order) | ||
206 | .org . + (_PAGE_SIZE << \order) | ||
207 | .type \name, @object | ||
208 | .endm | 187 | .endm |
209 | 188 | ||
210 | .data | ||
211 | .align PAGE_SHIFT | ||
212 | |||
213 | /* | 189 | /* |
214 | * ... but on 64-bit we've got three-level pagetables with a | 190 | * On 64-bit we've got three-level pagetables with a slightly |
215 | * slightly different layout ... | 191 | * different layout ... |
216 | */ | 192 | */ |
217 | page swapper_pg_dir, _PGD_ORDER | 193 | page swapper_pg_dir, _PGD_ORDER |
218 | #ifdef CONFIG_64BIT | 194 | #ifdef CONFIG_64BIT |
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c index 447759201d1d..b974ac9057f6 100644 --- a/arch/mips/kernel/i8259.c +++ b/arch/mips/kernel/i8259.c | |||
@@ -31,7 +31,7 @@ void disable_8259A_irq(unsigned int irq); | |||
31 | * moves to arch independent land | 31 | * moves to arch independent land |
32 | */ | 32 | */ |
33 | 33 | ||
34 | spinlock_t DEFINE_SPINLOCK(i8259A_lock); | 34 | DEFINE_SPINLOCK(i8259A_lock); |
35 | 35 | ||
36 | static void end_8259A_irq (unsigned int irq) | 36 | static void end_8259A_irq (unsigned int irq) |
37 | { | 37 | { |
@@ -52,14 +52,13 @@ static unsigned int startup_8259A_irq(unsigned int irq) | |||
52 | } | 52 | } |
53 | 53 | ||
54 | static struct hw_interrupt_type i8259A_irq_type = { | 54 | static struct hw_interrupt_type i8259A_irq_type = { |
55 | "XT-PIC", | 55 | .typename = "XT-PIC", |
56 | startup_8259A_irq, | 56 | .startup = startup_8259A_irq, |
57 | shutdown_8259A_irq, | 57 | .shutdown = shutdown_8259A_irq, |
58 | enable_8259A_irq, | 58 | .enable = enable_8259A_irq, |
59 | disable_8259A_irq, | 59 | .disable = disable_8259A_irq, |
60 | mask_and_ack_8259A, | 60 | .ack = mask_and_ack_8259A, |
61 | end_8259A_irq, | 61 | .end = end_8259A_irq, |
62 | NULL | ||
63 | }; | 62 | }; |
64 | 63 | ||
65 | /* | 64 | /* |
@@ -308,7 +307,7 @@ static struct resource pic2_io_resource = { | |||
308 | 307 | ||
309 | /* | 308 | /* |
310 | * On systems with i8259-style interrupt controllers we assume for | 309 | * On systems with i8259-style interrupt controllers we assume for |
311 | * driver compatibility reasons interrupts 0 - 15 to be the i8295 | 310 | * driver compatibility reasons interrupts 0 - 15 to be the i8259 |
312 | * interrupts even if the hardware uses a different interrupt numbering. | 311 | * interrupts even if the hardware uses a different interrupt numbering. |
313 | */ | 312 | */ |
314 | void __init init_i8259_irqs (void) | 313 | void __init init_i8259_irqs (void) |
@@ -322,7 +321,7 @@ void __init init_i8259_irqs (void) | |||
322 | 321 | ||
323 | for (i = 0; i < 16; i++) { | 322 | for (i = 0; i < 16; i++) { |
324 | irq_desc[i].status = IRQ_DISABLED; | 323 | irq_desc[i].status = IRQ_DISABLED; |
325 | irq_desc[i].action = 0; | 324 | irq_desc[i].action = NULL; |
326 | irq_desc[i].depth = 1; | 325 | irq_desc[i].depth = 1; |
327 | irq_desc[i].handler = &i8259A_irq_type; | 326 | irq_desc[i].handler = &i8259A_irq_type; |
328 | } | 327 | } |
diff --git a/arch/mips/kernel/ioctl32.c b/arch/mips/kernel/ioctl32.c index c069719ff0d8..ed9b2da510be 100644 --- a/arch/mips/kernel/ioctl32.c +++ b/arch/mips/kernel/ioctl32.c | |||
@@ -41,12 +41,6 @@ IOCTL_TABLE_START | |||
41 | #define DECLARES | 41 | #define DECLARES |
42 | #include "compat_ioctl.c" | 42 | #include "compat_ioctl.c" |
43 | 43 | ||
44 | #ifdef CONFIG_SIBYTE_TBPROF | ||
45 | COMPATIBLE_IOCTL(SBPROF_ZBSTART) | ||
46 | COMPATIBLE_IOCTL(SBPROF_ZBSTOP) | ||
47 | COMPATIBLE_IOCTL(SBPROF_ZBWAITFULL) | ||
48 | #endif /* CONFIG_SIBYTE_TBPROF */ | ||
49 | |||
50 | /*HANDLE_IOCTL(RTC_IRQP_READ, w_long) | 44 | /*HANDLE_IOCTL(RTC_IRQP_READ, w_long) |
51 | COMPATIBLE_IOCTL(RTC_IRQP_SET) | 45 | COMPATIBLE_IOCTL(RTC_IRQP_SET) |
52 | HANDLE_IOCTL(RTC_EPOCH_READ, w_long) | 46 | HANDLE_IOCTL(RTC_EPOCH_READ, w_long) |
diff --git a/arch/mips/kernel/irixelf.c b/arch/mips/kernel/irixelf.c index 4af20cd91f9f..10d3644e3608 100644 --- a/arch/mips/kernel/irixelf.c +++ b/arch/mips/kernel/irixelf.c | |||
@@ -8,7 +8,7 @@ | |||
8 | * | 8 | * |
9 | * Copyright (C) 1993 - 1994 Eric Youngdale <ericy@cais.com> | 9 | * Copyright (C) 1993 - 1994 Eric Youngdale <ericy@cais.com> |
10 | * Copyright (C) 1996 - 2004 David S. Miller <dm@engr.sgi.com> | 10 | * Copyright (C) 1996 - 2004 David S. Miller <dm@engr.sgi.com> |
11 | * Copyright (C) 2004 Steven J. Hill <sjhill@realitydiluted.com> | 11 | * Copyright (C) 2004 - 2005 Steven J. Hill <sjhill@realitydiluted.com> |
12 | */ | 12 | */ |
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/fs.h> | 14 | #include <linux/fs.h> |
@@ -31,15 +31,16 @@ | |||
31 | #include <linux/elfcore.h> | 31 | #include <linux/elfcore.h> |
32 | #include <linux/smp_lock.h> | 32 | #include <linux/smp_lock.h> |
33 | 33 | ||
34 | #include <asm/uaccess.h> | ||
35 | #include <asm/mipsregs.h> | 34 | #include <asm/mipsregs.h> |
35 | #include <asm/namei.h> | ||
36 | #include <asm/prctl.h> | 36 | #include <asm/prctl.h> |
37 | #include <asm/uaccess.h> | ||
37 | 38 | ||
38 | #define DLINFO_ITEMS 12 | 39 | #define DLINFO_ITEMS 12 |
39 | 40 | ||
40 | #include <linux/elf.h> | 41 | #include <linux/elf.h> |
41 | 42 | ||
42 | #undef DEBUG_ELF | 43 | #undef DEBUG |
43 | 44 | ||
44 | static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs); | 45 | static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs); |
45 | static int load_irix_library(struct file *); | 46 | static int load_irix_library(struct file *); |
@@ -55,7 +56,7 @@ static struct linux_binfmt irix_format = { | |||
55 | #define elf_addr_t unsigned long | 56 | #define elf_addr_t unsigned long |
56 | #endif | 57 | #endif |
57 | 58 | ||
58 | #ifdef DEBUG_ELF | 59 | #ifdef DEBUG |
59 | /* Debugging routines. */ | 60 | /* Debugging routines. */ |
60 | static char *get_elf_p_type(Elf32_Word p_type) | 61 | static char *get_elf_p_type(Elf32_Word p_type) |
61 | { | 62 | { |
@@ -120,7 +121,7 @@ static void dump_phdrs(struct elf_phdr *ep, int pnum) | |||
120 | print_phdr(i, ep); | 121 | print_phdr(i, ep); |
121 | } | 122 | } |
122 | } | 123 | } |
123 | #endif /* (DEBUG_ELF) */ | 124 | #endif /* DEBUG */ |
124 | 125 | ||
125 | static void set_brk(unsigned long start, unsigned long end) | 126 | static void set_brk(unsigned long start, unsigned long end) |
126 | { | 127 | { |
@@ -146,20 +147,20 @@ static void padzero(unsigned long elf_bss) | |||
146 | nbyte = elf_bss & (PAGE_SIZE-1); | 147 | nbyte = elf_bss & (PAGE_SIZE-1); |
147 | if (nbyte) { | 148 | if (nbyte) { |
148 | nbyte = PAGE_SIZE - nbyte; | 149 | nbyte = PAGE_SIZE - nbyte; |
149 | clear_user((void *) elf_bss, nbyte); | 150 | clear_user((void __user *) elf_bss, nbyte); |
150 | } | 151 | } |
151 | } | 152 | } |
152 | 153 | ||
153 | unsigned long * create_irix_tables(char * p, int argc, int envc, | 154 | static unsigned long * create_irix_tables(char * p, int argc, int envc, |
154 | struct elfhdr * exec, unsigned int load_addr, | 155 | struct elfhdr * exec, unsigned int load_addr, |
155 | unsigned int interp_load_addr, | 156 | unsigned int interp_load_addr, struct pt_regs *regs, |
156 | struct pt_regs *regs, struct elf_phdr *ephdr) | 157 | struct elf_phdr *ephdr) |
157 | { | 158 | { |
158 | elf_addr_t *argv; | 159 | elf_addr_t *argv; |
159 | elf_addr_t *envp; | 160 | elf_addr_t *envp; |
160 | elf_addr_t *sp, *csp; | 161 | elf_addr_t *sp, *csp; |
161 | 162 | ||
162 | #ifdef DEBUG_ELF | 163 | #ifdef DEBUG |
163 | printk("create_irix_tables: p[%p] argc[%d] envc[%d] " | 164 | printk("create_irix_tables: p[%p] argc[%d] envc[%d] " |
164 | "load_addr[%08x] interp_load_addr[%08x]\n", | 165 | "load_addr[%08x] interp_load_addr[%08x]\n", |
165 | p, argc, envc, load_addr, interp_load_addr); | 166 | p, argc, envc, load_addr, interp_load_addr); |
@@ -248,14 +249,13 @@ static unsigned int load_irix_interp(struct elfhdr * interp_elf_ex, | |||
248 | last_bss = 0; | 249 | last_bss = 0; |
249 | error = load_addr = 0; | 250 | error = load_addr = 0; |
250 | 251 | ||
251 | #ifdef DEBUG_ELF | 252 | #ifdef DEBUG |
252 | print_elfhdr(interp_elf_ex); | 253 | print_elfhdr(interp_elf_ex); |
253 | #endif | 254 | #endif |
254 | 255 | ||
255 | /* First of all, some simple consistency checks */ | 256 | /* First of all, some simple consistency checks */ |
256 | if ((interp_elf_ex->e_type != ET_EXEC && | 257 | if ((interp_elf_ex->e_type != ET_EXEC && |
257 | interp_elf_ex->e_type != ET_DYN) || | 258 | interp_elf_ex->e_type != ET_DYN) || |
258 | !irix_elf_check_arch(interp_elf_ex) || | ||
259 | !interpreter->f_op->mmap) { | 259 | !interpreter->f_op->mmap) { |
260 | printk("IRIX interp has bad e_type %d\n", interp_elf_ex->e_type); | 260 | printk("IRIX interp has bad e_type %d\n", interp_elf_ex->e_type); |
261 | return 0xffffffff; | 261 | return 0xffffffff; |
@@ -290,7 +290,7 @@ static unsigned int load_irix_interp(struct elfhdr * interp_elf_ex, | |||
290 | (char *) elf_phdata, | 290 | (char *) elf_phdata, |
291 | sizeof(struct elf_phdr) * interp_elf_ex->e_phnum); | 291 | sizeof(struct elf_phdr) * interp_elf_ex->e_phnum); |
292 | 292 | ||
293 | #ifdef DEBUG_ELF | 293 | #ifdef DEBUG |
294 | dump_phdrs(elf_phdata, interp_elf_ex->e_phnum); | 294 | dump_phdrs(elf_phdata, interp_elf_ex->e_phnum); |
295 | #endif | 295 | #endif |
296 | 296 | ||
@@ -306,13 +306,11 @@ static unsigned int load_irix_interp(struct elfhdr * interp_elf_ex, | |||
306 | elf_type |= MAP_FIXED; | 306 | elf_type |= MAP_FIXED; |
307 | vaddr = eppnt->p_vaddr; | 307 | vaddr = eppnt->p_vaddr; |
308 | 308 | ||
309 | #ifdef DEBUG_ELF | 309 | pr_debug("INTERP do_mmap(%p, %08lx, %08lx, %08lx, %08lx, %08lx) ", |
310 | printk("INTERP do_mmap(%p, %08lx, %08lx, %08lx, %08lx, %08lx) ", | ||
311 | interpreter, vaddr, | 310 | interpreter, vaddr, |
312 | (unsigned long) (eppnt->p_filesz + (eppnt->p_vaddr & 0xfff)), | 311 | (unsigned long) (eppnt->p_filesz + (eppnt->p_vaddr & 0xfff)), |
313 | (unsigned long) elf_prot, (unsigned long) elf_type, | 312 | (unsigned long) elf_prot, (unsigned long) elf_type, |
314 | (unsigned long) (eppnt->p_offset & 0xfffff000)); | 313 | (unsigned long) (eppnt->p_offset & 0xfffff000)); |
315 | #endif | ||
316 | down_write(¤t->mm->mmap_sem); | 314 | down_write(¤t->mm->mmap_sem); |
317 | error = do_mmap(interpreter, vaddr, | 315 | error = do_mmap(interpreter, vaddr, |
318 | eppnt->p_filesz + (eppnt->p_vaddr & 0xfff), | 316 | eppnt->p_filesz + (eppnt->p_vaddr & 0xfff), |
@@ -324,14 +322,10 @@ static unsigned int load_irix_interp(struct elfhdr * interp_elf_ex, | |||
324 | printk("Aieee IRIX interp mmap error=%d\n", error); | 322 | printk("Aieee IRIX interp mmap error=%d\n", error); |
325 | break; /* Real error */ | 323 | break; /* Real error */ |
326 | } | 324 | } |
327 | #ifdef DEBUG_ELF | 325 | pr_debug("error=%08lx ", (unsigned long) error); |
328 | printk("error=%08lx ", (unsigned long) error); | ||
329 | #endif | ||
330 | if(!load_addr && interp_elf_ex->e_type == ET_DYN) { | 326 | if(!load_addr && interp_elf_ex->e_type == ET_DYN) { |
331 | load_addr = error; | 327 | load_addr = error; |
332 | #ifdef DEBUG_ELF | 328 | pr_debug("load_addr = error "); |
333 | printk("load_addr = error "); | ||
334 | #endif | ||
335 | } | 329 | } |
336 | 330 | ||
337 | /* Find the end of the file mapping for this phdr, and keep | 331 | /* Find the end of the file mapping for this phdr, and keep |
@@ -345,17 +339,13 @@ static unsigned int load_irix_interp(struct elfhdr * interp_elf_ex, | |||
345 | */ | 339 | */ |
346 | k = eppnt->p_memsz + eppnt->p_vaddr; | 340 | k = eppnt->p_memsz + eppnt->p_vaddr; |
347 | if(k > last_bss) last_bss = k; | 341 | if(k > last_bss) last_bss = k; |
348 | #ifdef DEBUG_ELF | 342 | pr_debug("\n"); |
349 | printk("\n"); | ||
350 | #endif | ||
351 | } | 343 | } |
352 | } | 344 | } |
353 | 345 | ||
354 | /* Now use mmap to map the library into memory. */ | 346 | /* Now use mmap to map the library into memory. */ |
355 | if(error < 0 && error > -1024) { | 347 | if(error < 0 && error > -1024) { |
356 | #ifdef DEBUG_ELF | 348 | pr_debug("got error %d\n", error); |
357 | printk("got error %d\n", error); | ||
358 | #endif | ||
359 | kfree(elf_phdata); | 349 | kfree(elf_phdata); |
360 | return 0xffffffff; | 350 | return 0xffffffff; |
361 | } | 351 | } |
@@ -365,16 +355,12 @@ static unsigned int load_irix_interp(struct elfhdr * interp_elf_ex, | |||
365 | * that there are zero-mapped pages up to and including the | 355 | * that there are zero-mapped pages up to and including the |
366 | * last bss page. | 356 | * last bss page. |
367 | */ | 357 | */ |
368 | #ifdef DEBUG_ELF | 358 | pr_debug("padzero(%08lx) ", (unsigned long) (elf_bss)); |
369 | printk("padzero(%08lx) ", (unsigned long) (elf_bss)); | ||
370 | #endif | ||
371 | padzero(elf_bss); | 359 | padzero(elf_bss); |
372 | len = (elf_bss + 0xfff) & 0xfffff000; /* What we have mapped so far */ | 360 | len = (elf_bss + 0xfff) & 0xfffff000; /* What we have mapped so far */ |
373 | 361 | ||
374 | #ifdef DEBUG_ELF | 362 | pr_debug("last_bss[%08lx] len[%08lx]\n", (unsigned long) last_bss, |
375 | printk("last_bss[%08lx] len[%08lx]\n", (unsigned long) last_bss, | 363 | (unsigned long) len); |
376 | (unsigned long) len); | ||
377 | #endif | ||
378 | 364 | ||
379 | /* Map the last of the bss segment */ | 365 | /* Map the last of the bss segment */ |
380 | if (last_bss > len) { | 366 | if (last_bss > len) { |
@@ -396,12 +382,7 @@ static int verify_binary(struct elfhdr *ehp, struct linux_binprm *bprm) | |||
396 | 382 | ||
397 | /* First of all, some simple consistency checks */ | 383 | /* First of all, some simple consistency checks */ |
398 | if((ehp->e_type != ET_EXEC && ehp->e_type != ET_DYN) || | 384 | if((ehp->e_type != ET_EXEC && ehp->e_type != ET_DYN) || |
399 | !irix_elf_check_arch(ehp) || !bprm->file->f_op->mmap) { | 385 | !bprm->file->f_op->mmap) { |
400 | return -ENOEXEC; | ||
401 | } | ||
402 | |||
403 | /* Only support MIPS ARCH2 or greater IRIX binaries for now. */ | ||
404 | if(!(ehp->e_flags & EF_MIPS_ARCH) && !(ehp->e_flags & 0x04)) { | ||
405 | return -ENOEXEC; | 386 | return -ENOEXEC; |
406 | } | 387 | } |
407 | 388 | ||
@@ -411,16 +392,17 @@ static int verify_binary(struct elfhdr *ehp, struct linux_binprm *bprm) | |||
411 | * XXX all registers as 64bits on cpu's capable of this at | 392 | * XXX all registers as 64bits on cpu's capable of this at |
412 | * XXX exception time plus frob the XTLB exception vector. | 393 | * XXX exception time plus frob the XTLB exception vector. |
413 | */ | 394 | */ |
414 | if((ehp->e_flags & 0x20)) { | 395 | if((ehp->e_flags & EF_MIPS_ABI2)) |
415 | return -ENOEXEC; | 396 | return -ENOEXEC; |
416 | } | ||
417 | 397 | ||
418 | return 0; /* It's ok. */ | 398 | return 0; |
419 | } | 399 | } |
420 | 400 | ||
421 | #define IRIX_INTERP_PREFIX "/usr/gnemul/irix" | 401 | /* |
422 | 402 | * This is where the detailed check is performed. Irix binaries | |
423 | /* Look for an IRIX ELF interpreter. */ | 403 | * use interpreters with 'libc.so' in the name, so this function |
404 | * can differentiate between Linux and Irix binaries. | ||
405 | */ | ||
424 | static inline int look_for_irix_interpreter(char **name, | 406 | static inline int look_for_irix_interpreter(char **name, |
425 | struct file **interpreter, | 407 | struct file **interpreter, |
426 | struct elfhdr *interp_elf_ex, | 408 | struct elfhdr *interp_elf_ex, |
@@ -440,12 +422,11 @@ static inline int look_for_irix_interpreter(char **name, | |||
440 | if (*name != NULL) | 422 | if (*name != NULL) |
441 | goto out; | 423 | goto out; |
442 | 424 | ||
443 | *name = kmalloc((epp->p_filesz + strlen(IRIX_INTERP_PREFIX)), | 425 | *name = kmalloc(epp->p_filesz + strlen(IRIX_EMUL), GFP_KERNEL); |
444 | GFP_KERNEL); | ||
445 | if (!*name) | 426 | if (!*name) |
446 | return -ENOMEM; | 427 | return -ENOMEM; |
447 | 428 | ||
448 | strcpy(*name, IRIX_INTERP_PREFIX); | 429 | strcpy(*name, IRIX_EMUL); |
449 | retval = kernel_read(bprm->file, epp->p_offset, (*name + 16), | 430 | retval = kernel_read(bprm->file, epp->p_offset, (*name + 16), |
450 | epp->p_filesz); | 431 | epp->p_filesz); |
451 | if (retval < 0) | 432 | if (retval < 0) |
@@ -562,7 +543,7 @@ static inline int map_interpreter(struct elf_phdr *epp, struct elfhdr *ihp, | |||
562 | * process and the system, here we map the page and fill the | 543 | * process and the system, here we map the page and fill the |
563 | * structure | 544 | * structure |
564 | */ | 545 | */ |
565 | void irix_map_prda_page (void) | 546 | static void irix_map_prda_page(void) |
566 | { | 547 | { |
567 | unsigned long v; | 548 | unsigned long v; |
568 | struct prda *pp; | 549 | struct prda *pp; |
@@ -601,14 +582,33 @@ static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs) | |||
601 | 582 | ||
602 | load_addr = 0; | 583 | load_addr = 0; |
603 | has_interp = has_ephdr = 0; | 584 | has_interp = has_ephdr = 0; |
604 | elf_ihdr = elf_ephdr = 0; | 585 | elf_ihdr = elf_ephdr = NULL; |
605 | elf_ex = *((struct elfhdr *) bprm->buf); | 586 | elf_ex = *((struct elfhdr *) bprm->buf); |
606 | retval = -ENOEXEC; | 587 | retval = -ENOEXEC; |
607 | 588 | ||
608 | if (verify_binary(&elf_ex, bprm)) | 589 | if (verify_binary(&elf_ex, bprm)) |
609 | goto out; | 590 | goto out; |
610 | 591 | ||
611 | #ifdef DEBUG_ELF | 592 | /* |
593 | * Telling -o32 static binaries from Linux and Irix apart from each | ||
594 | * other is difficult. There are 2 differences to be noted for static | ||
595 | * binaries from the 2 operating systems: | ||
596 | * | ||
597 | * 1) Irix binaries have their .text section before their .init | ||
598 | * section. Linux binaries are just the opposite. | ||
599 | * | ||
600 | * 2) Irix binaries usually have <= 12 sections and Linux | ||
601 | * binaries have > 20. | ||
602 | * | ||
603 | * We will use Method #2 since Method #1 would require us to read in | ||
604 | * the section headers which is way too much overhead. This appears | ||
605 | * to work for everything we have ran into so far. If anyone has a | ||
606 | * better method to tell the binaries apart, I'm listening. | ||
607 | */ | ||
608 | if (elf_ex.e_shnum > 20) | ||
609 | goto out; | ||
610 | |||
611 | #ifdef DEBUG | ||
612 | print_elfhdr(&elf_ex); | 612 | print_elfhdr(&elf_ex); |
613 | #endif | 613 | #endif |
614 | 614 | ||
@@ -623,11 +623,10 @@ static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs) | |||
623 | } | 623 | } |
624 | 624 | ||
625 | retval = kernel_read(bprm->file, elf_ex.e_phoff, (char *)elf_phdata, size); | 625 | retval = kernel_read(bprm->file, elf_ex.e_phoff, (char *)elf_phdata, size); |
626 | |||
627 | if (retval < 0) | 626 | if (retval < 0) |
628 | goto out_free_ph; | 627 | goto out_free_ph; |
629 | 628 | ||
630 | #ifdef DEBUG_ELF | 629 | #ifdef DEBUG |
631 | dump_phdrs(elf_phdata, elf_ex.e_phnum); | 630 | dump_phdrs(elf_phdata, elf_ex.e_phnum); |
632 | #endif | 631 | #endif |
633 | 632 | ||
@@ -644,9 +643,8 @@ static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs) | |||
644 | break; | 643 | break; |
645 | }; | 644 | }; |
646 | } | 645 | } |
647 | #ifdef DEBUG_ELF | 646 | |
648 | printk("\n"); | 647 | pr_debug("\n"); |
649 | #endif | ||
650 | 648 | ||
651 | elf_bss = 0; | 649 | elf_bss = 0; |
652 | elf_brk = 0; | 650 | elf_brk = 0; |
@@ -657,12 +655,19 @@ static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs) | |||
657 | end_code = 0; | 655 | end_code = 0; |
658 | end_data = 0; | 656 | end_data = 0; |
659 | 657 | ||
660 | retval = look_for_irix_interpreter(&elf_interpreter, | 658 | /* |
661 | &interpreter, | 659 | * If we get a return value, we change the value to be ENOEXEC |
660 | * so that we can exit gracefully and the main binary format | ||
661 | * search loop in 'fs/exec.c' will move onto the next handler | ||
662 | * which should be the normal ELF binary handler. | ||
663 | */ | ||
664 | retval = look_for_irix_interpreter(&elf_interpreter, &interpreter, | ||
662 | &interp_elf_ex, elf_phdata, bprm, | 665 | &interp_elf_ex, elf_phdata, bprm, |
663 | elf_ex.e_phnum); | 666 | elf_ex.e_phnum); |
664 | if (retval) | 667 | if (retval) { |
668 | retval = -ENOEXEC; | ||
665 | goto out_free_file; | 669 | goto out_free_file; |
670 | } | ||
666 | 671 | ||
667 | if (elf_interpreter) { | 672 | if (elf_interpreter) { |
668 | retval = verify_irix_interpreter(&interp_elf_ex); | 673 | retval = verify_irix_interpreter(&interp_elf_ex); |
@@ -692,7 +697,6 @@ static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs) | |||
692 | /* Do this so that we can load the interpreter, if need be. We will | 697 | /* Do this so that we can load the interpreter, if need be. We will |
693 | * change some of these later. | 698 | * change some of these later. |
694 | */ | 699 | */ |
695 | set_mm_counter(current->mm, rss, 0); | ||
696 | setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT); | 700 | setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT); |
697 | current->mm->start_stack = bprm->p; | 701 | current->mm->start_stack = bprm->p; |
698 | 702 | ||
@@ -746,18 +750,16 @@ static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs) | |||
746 | * IRIX maps a page at 0x200000 which holds some system | 750 | * IRIX maps a page at 0x200000 which holds some system |
747 | * information. Programs depend on this. | 751 | * information. Programs depend on this. |
748 | */ | 752 | */ |
749 | irix_map_prda_page (); | 753 | irix_map_prda_page(); |
750 | 754 | ||
751 | padzero(elf_bss); | 755 | padzero(elf_bss); |
752 | 756 | ||
753 | #ifdef DEBUG_ELF | 757 | pr_debug("(start_brk) %lx\n" , (long) current->mm->start_brk); |
754 | printk("(start_brk) %lx\n" , (long) current->mm->start_brk); | 758 | pr_debug("(end_code) %lx\n" , (long) current->mm->end_code); |
755 | printk("(end_code) %lx\n" , (long) current->mm->end_code); | 759 | pr_debug("(start_code) %lx\n" , (long) current->mm->start_code); |
756 | printk("(start_code) %lx\n" , (long) current->mm->start_code); | 760 | pr_debug("(end_data) %lx\n" , (long) current->mm->end_data); |
757 | printk("(end_data) %lx\n" , (long) current->mm->end_data); | 761 | pr_debug("(start_stack) %lx\n" , (long) current->mm->start_stack); |
758 | printk("(start_stack) %lx\n" , (long) current->mm->start_stack); | 762 | pr_debug("(brk) %lx\n" , (long) current->mm->brk); |
759 | printk("(brk) %lx\n" , (long) current->mm->brk); | ||
760 | #endif | ||
761 | 763 | ||
762 | #if 0 /* XXX No fucking way dude... */ | 764 | #if 0 /* XXX No fucking way dude... */ |
763 | /* Why this, you ask??? Well SVr4 maps page 0 as read-only, | 765 | /* Why this, you ask??? Well SVr4 maps page 0 as read-only, |
@@ -782,8 +784,7 @@ out_free_dentry: | |||
782 | allow_write_access(interpreter); | 784 | allow_write_access(interpreter); |
783 | fput(interpreter); | 785 | fput(interpreter); |
784 | out_free_interp: | 786 | out_free_interp: |
785 | if (elf_interpreter) | 787 | kfree(elf_interpreter); |
786 | kfree(elf_interpreter); | ||
787 | out_free_file: | 788 | out_free_file: |
788 | out_free_ph: | 789 | out_free_ph: |
789 | kfree (elf_phdata); | 790 | kfree (elf_phdata); |
@@ -813,7 +814,7 @@ static int load_irix_library(struct file *file) | |||
813 | 814 | ||
814 | /* First of all, some simple consistency checks. */ | 815 | /* First of all, some simple consistency checks. */ |
815 | if(elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 || | 816 | if(elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 || |
816 | !irix_elf_check_arch(&elf_ex) || !file->f_op->mmap) | 817 | !file->f_op->mmap) |
817 | return -ENOEXEC; | 818 | return -ENOEXEC; |
818 | 819 | ||
819 | /* Now read in all of the header information. */ | 820 | /* Now read in all of the header information. */ |
@@ -874,35 +875,36 @@ static int load_irix_library(struct file *file) | |||
874 | * phdrs there are in the USER_PHDRP array. We return the vaddr the | 875 | * phdrs there are in the USER_PHDRP array. We return the vaddr the |
875 | * first phdr was successfully mapped to. | 876 | * first phdr was successfully mapped to. |
876 | */ | 877 | */ |
877 | unsigned long irix_mapelf(int fd, struct elf_phdr *user_phdrp, int cnt) | 878 | unsigned long irix_mapelf(int fd, struct elf_phdr __user *user_phdrp, int cnt) |
878 | { | 879 | { |
879 | struct elf_phdr *hp; | 880 | unsigned long type, vaddr, filesz, offset, flags; |
881 | struct elf_phdr __user *hp; | ||
880 | struct file *filp; | 882 | struct file *filp; |
881 | int i, retval; | 883 | int i, retval; |
882 | 884 | ||
883 | #ifdef DEBUG_ELF | 885 | pr_debug("irix_mapelf: fd[%d] user_phdrp[%p] cnt[%d]\n", |
884 | printk("irix_mapelf: fd[%d] user_phdrp[%p] cnt[%d]\n", | 886 | fd, user_phdrp, cnt); |
885 | fd, user_phdrp, cnt); | ||
886 | #endif | ||
887 | 887 | ||
888 | /* First get the verification out of the way. */ | 888 | /* First get the verification out of the way. */ |
889 | hp = user_phdrp; | 889 | hp = user_phdrp; |
890 | if (!access_ok(VERIFY_READ, hp, (sizeof(struct elf_phdr) * cnt))) { | 890 | if (!access_ok(VERIFY_READ, hp, (sizeof(struct elf_phdr) * cnt))) { |
891 | #ifdef DEBUG_ELF | 891 | pr_debug("irix_mapelf: bad pointer to ELF PHDR!\n"); |
892 | printk("irix_mapelf: access_ok fails!\n"); | 892 | |
893 | #endif | ||
894 | return -EFAULT; | 893 | return -EFAULT; |
895 | } | 894 | } |
896 | 895 | ||
897 | #ifdef DEBUG_ELF | 896 | #ifdef DEBUG |
898 | dump_phdrs(user_phdrp, cnt); | 897 | dump_phdrs(user_phdrp, cnt); |
899 | #endif | 898 | #endif |
900 | 899 | ||
901 | for(i = 0; i < cnt; i++, hp++) | 900 | for (i = 0; i < cnt; i++, hp++) { |
902 | if(hp->p_type != PT_LOAD) { | 901 | if (__get_user(type, &hp->p_type)) |
902 | return -EFAULT; | ||
903 | if (type != PT_LOAD) { | ||
903 | printk("irix_mapelf: One section is not PT_LOAD!\n"); | 904 | printk("irix_mapelf: One section is not PT_LOAD!\n"); |
904 | return -ENOEXEC; | 905 | return -ENOEXEC; |
905 | } | 906 | } |
907 | } | ||
906 | 908 | ||
907 | filp = fget(fd); | 909 | filp = fget(fd); |
908 | if (!filp) | 910 | if (!filp) |
@@ -917,29 +919,40 @@ unsigned long irix_mapelf(int fd, struct elf_phdr *user_phdrp, int cnt) | |||
917 | for(i = 0; i < cnt; i++, hp++) { | 919 | for(i = 0; i < cnt; i++, hp++) { |
918 | int prot; | 920 | int prot; |
919 | 921 | ||
920 | prot = (hp->p_flags & PF_R) ? PROT_READ : 0; | 922 | retval = __get_user(vaddr, &hp->p_vaddr); |
921 | prot |= (hp->p_flags & PF_W) ? PROT_WRITE : 0; | 923 | retval |= __get_user(filesz, &hp->p_filesz); |
922 | prot |= (hp->p_flags & PF_X) ? PROT_EXEC : 0; | 924 | retval |= __get_user(offset, &hp->p_offset); |
925 | retval |= __get_user(flags, &hp->p_flags); | ||
926 | if (retval) | ||
927 | return retval; | ||
928 | |||
929 | prot = (flags & PF_R) ? PROT_READ : 0; | ||
930 | prot |= (flags & PF_W) ? PROT_WRITE : 0; | ||
931 | prot |= (flags & PF_X) ? PROT_EXEC : 0; | ||
932 | |||
923 | down_write(¤t->mm->mmap_sem); | 933 | down_write(¤t->mm->mmap_sem); |
924 | retval = do_mmap(filp, (hp->p_vaddr & 0xfffff000), | 934 | retval = do_mmap(filp, (vaddr & 0xfffff000), |
925 | (hp->p_filesz + (hp->p_vaddr & 0xfff)), | 935 | (filesz + (vaddr & 0xfff)), |
926 | prot, (MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE), | 936 | prot, (MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE), |
927 | (hp->p_offset & 0xfffff000)); | 937 | (offset & 0xfffff000)); |
928 | up_write(¤t->mm->mmap_sem); | 938 | up_write(¤t->mm->mmap_sem); |
929 | 939 | ||
930 | if(retval != (hp->p_vaddr & 0xfffff000)) { | 940 | if (retval != (vaddr & 0xfffff000)) { |
931 | printk("irix_mapelf: do_mmap fails with %d!\n", retval); | 941 | printk("irix_mapelf: do_mmap fails with %d!\n", retval); |
932 | fput(filp); | 942 | fput(filp); |
933 | return retval; | 943 | return retval; |
934 | } | 944 | } |
935 | } | 945 | } |
936 | 946 | ||
937 | #ifdef DEBUG_ELF | 947 | pr_debug("irix_mapelf: Success, returning %08lx\n", |
938 | printk("irix_mapelf: Success, returning %08lx\n", | 948 | (unsigned long) user_phdrp->p_vaddr); |
939 | (unsigned long) user_phdrp->p_vaddr); | 949 | |
940 | #endif | ||
941 | fput(filp); | 950 | fput(filp); |
942 | return user_phdrp->p_vaddr; | 951 | |
952 | if (__get_user(vaddr, &user_phdrp->p_vaddr)) | ||
953 | return -EFAULT; | ||
954 | |||
955 | return vaddr; | ||
943 | } | 956 | } |
944 | 957 | ||
945 | /* | 958 | /* |
@@ -952,9 +965,9 @@ unsigned long irix_mapelf(int fd, struct elf_phdr *user_phdrp, int cnt) | |||
952 | /* These are the only things you should do on a core-file: use only these | 965 | /* These are the only things you should do on a core-file: use only these |
953 | * functions to write out all the necessary info. | 966 | * functions to write out all the necessary info. |
954 | */ | 967 | */ |
955 | static int dump_write(struct file *file, const void *addr, int nr) | 968 | static int dump_write(struct file *file, const void __user *addr, int nr) |
956 | { | 969 | { |
957 | return file->f_op->write(file, addr, nr, &file->f_pos) == nr; | 970 | return file->f_op->write(file, (const char __user *) addr, nr, &file->f_pos) == nr; |
958 | } | 971 | } |
959 | 972 | ||
960 | static int dump_seek(struct file *file, off_t off) | 973 | static int dump_seek(struct file *file, off_t off) |
@@ -1064,8 +1077,8 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file) | |||
1064 | struct elfhdr elf; | 1077 | struct elfhdr elf; |
1065 | off_t offset = 0, dataoff; | 1078 | off_t offset = 0, dataoff; |
1066 | int limit = current->signal->rlim[RLIMIT_CORE].rlim_cur; | 1079 | int limit = current->signal->rlim[RLIMIT_CORE].rlim_cur; |
1067 | int numnote = 4; | 1080 | int numnote = 3; |
1068 | struct memelfnote notes[4]; | 1081 | struct memelfnote notes[3]; |
1069 | struct elf_prstatus prstatus; /* NT_PRSTATUS */ | 1082 | struct elf_prstatus prstatus; /* NT_PRSTATUS */ |
1070 | elf_fpregset_t fpu; /* NT_PRFPREG */ | 1083 | elf_fpregset_t fpu; /* NT_PRFPREG */ |
1071 | struct elf_prpsinfo psinfo; /* NT_PRPSINFO */ | 1084 | struct elf_prpsinfo psinfo; /* NT_PRPSINFO */ |
@@ -1073,7 +1086,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file) | |||
1073 | /* Count what's needed to dump, up to the limit of coredump size. */ | 1086 | /* Count what's needed to dump, up to the limit of coredump size. */ |
1074 | segs = 0; | 1087 | segs = 0; |
1075 | size = 0; | 1088 | size = 0; |
1076 | for(vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) { | 1089 | for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) { |
1077 | if (maydump(vma)) | 1090 | if (maydump(vma)) |
1078 | { | 1091 | { |
1079 | int sz = vma->vm_end-vma->vm_start; | 1092 | int sz = vma->vm_end-vma->vm_start; |
@@ -1187,9 +1200,9 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file) | |||
1187 | 1200 | ||
1188 | len = current->mm->arg_end - current->mm->arg_start; | 1201 | len = current->mm->arg_end - current->mm->arg_start; |
1189 | len = len >= ELF_PRARGSZ ? ELF_PRARGSZ : len; | 1202 | len = len >= ELF_PRARGSZ ? ELF_PRARGSZ : len; |
1190 | copy_from_user(&psinfo.pr_psargs, | 1203 | (void *) copy_from_user(&psinfo.pr_psargs, |
1191 | (const char *)current->mm->arg_start, len); | 1204 | (const char __user *)current->mm->arg_start, len); |
1192 | for(i = 0; i < len; i++) | 1205 | for (i = 0; i < len; i++) |
1193 | if (psinfo.pr_psargs[i] == 0) | 1206 | if (psinfo.pr_psargs[i] == 0) |
1194 | psinfo.pr_psargs[i] = ' '; | 1207 | psinfo.pr_psargs[i] = ' '; |
1195 | psinfo.pr_psargs[len] = 0; | 1208 | psinfo.pr_psargs[len] = 0; |
@@ -1198,20 +1211,15 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file) | |||
1198 | } | 1211 | } |
1199 | strlcpy(psinfo.pr_fname, current->comm, sizeof(psinfo.pr_fname)); | 1212 | strlcpy(psinfo.pr_fname, current->comm, sizeof(psinfo.pr_fname)); |
1200 | 1213 | ||
1201 | notes[2].name = "CORE"; | ||
1202 | notes[2].type = NT_TASKSTRUCT; | ||
1203 | notes[2].datasz = sizeof(*current); | ||
1204 | notes[2].data = current; | ||
1205 | |||
1206 | /* Try to dump the FPU. */ | 1214 | /* Try to dump the FPU. */ |
1207 | prstatus.pr_fpvalid = dump_fpu (regs, &fpu); | 1215 | prstatus.pr_fpvalid = dump_fpu (regs, &fpu); |
1208 | if (!prstatus.pr_fpvalid) { | 1216 | if (!prstatus.pr_fpvalid) { |
1209 | numnote--; | 1217 | numnote--; |
1210 | } else { | 1218 | } else { |
1211 | notes[3].name = "CORE"; | 1219 | notes[2].name = "CORE"; |
1212 | notes[3].type = NT_PRFPREG; | 1220 | notes[2].type = NT_PRFPREG; |
1213 | notes[3].datasz = sizeof(fpu); | 1221 | notes[2].datasz = sizeof(fpu); |
1214 | notes[3].data = &fpu; | 1222 | notes[2].data = &fpu; |
1215 | } | 1223 | } |
1216 | 1224 | ||
1217 | /* Write notes phdr entry. */ | 1225 | /* Write notes phdr entry. */ |
@@ -1256,8 +1264,10 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file) | |||
1256 | phdr.p_memsz = sz; | 1264 | phdr.p_memsz = sz; |
1257 | offset += phdr.p_filesz; | 1265 | offset += phdr.p_filesz; |
1258 | phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0; | 1266 | phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0; |
1259 | if (vma->vm_flags & VM_WRITE) phdr.p_flags |= PF_W; | 1267 | if (vma->vm_flags & VM_WRITE) |
1260 | if (vma->vm_flags & VM_EXEC) phdr.p_flags |= PF_X; | 1268 | phdr.p_flags |= PF_W; |
1269 | if (vma->vm_flags & VM_EXEC) | ||
1270 | phdr.p_flags |= PF_X; | ||
1261 | phdr.p_align = PAGE_SIZE; | 1271 | phdr.p_align = PAGE_SIZE; |
1262 | 1272 | ||
1263 | DUMP_WRITE(&phdr, sizeof(phdr)); | 1273 | DUMP_WRITE(&phdr, sizeof(phdr)); |
@@ -1283,7 +1293,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file) | |||
1283 | #ifdef DEBUG | 1293 | #ifdef DEBUG |
1284 | printk("elf_core_dump: writing %08lx %lx\n", addr, len); | 1294 | printk("elf_core_dump: writing %08lx %lx\n", addr, len); |
1285 | #endif | 1295 | #endif |
1286 | DUMP_WRITE((void *)addr, len); | 1296 | DUMP_WRITE((void __user *)addr, len); |
1287 | } | 1297 | } |
1288 | 1298 | ||
1289 | if ((off_t) file->f_pos != offset) { | 1299 | if ((off_t) file->f_pos != offset) { |
@@ -1299,7 +1309,7 @@ end_coredump: | |||
1299 | 1309 | ||
1300 | static int __init init_irix_binfmt(void) | 1310 | static int __init init_irix_binfmt(void) |
1301 | { | 1311 | { |
1302 | int init_inventory(void); | 1312 | extern int init_inventory(void); |
1303 | extern asmlinkage unsigned long sys_call_table; | 1313 | extern asmlinkage unsigned long sys_call_table; |
1304 | extern asmlinkage unsigned long sys_call_table_irix5; | 1314 | extern asmlinkage unsigned long sys_call_table_irix5; |
1305 | 1315 | ||
@@ -1318,7 +1328,9 @@ static int __init init_irix_binfmt(void) | |||
1318 | 1328 | ||
1319 | static void __exit exit_irix_binfmt(void) | 1329 | static void __exit exit_irix_binfmt(void) |
1320 | { | 1330 | { |
1321 | /* Remove the IRIX ELF loaders. */ | 1331 | /* |
1332 | * Remove the Irix ELF loader. | ||
1333 | */ | ||
1322 | unregister_binfmt(&irix_format); | 1334 | unregister_binfmt(&irix_format); |
1323 | } | 1335 | } |
1324 | 1336 | ||
diff --git a/arch/mips/kernel/irixinv.c b/arch/mips/kernel/irixinv.c index 60aa98cd1791..de8584f62311 100644 --- a/arch/mips/kernel/irixinv.c +++ b/arch/mips/kernel/irixinv.c | |||
@@ -30,10 +30,10 @@ void add_to_inventory (int class, int type, int controller, int unit, int state) | |||
30 | inventory_items++; | 30 | inventory_items++; |
31 | } | 31 | } |
32 | 32 | ||
33 | int dump_inventory_to_user (void *userbuf, int size) | 33 | int dump_inventory_to_user (void __user *userbuf, int size) |
34 | { | 34 | { |
35 | inventory_t *inv = &inventory [0]; | 35 | inventory_t *inv = &inventory [0]; |
36 | inventory_t *user = userbuf; | 36 | inventory_t __user *user = userbuf; |
37 | int v; | 37 | int v; |
38 | 38 | ||
39 | if (!access_ok(VERIFY_WRITE, userbuf, size)) | 39 | if (!access_ok(VERIFY_WRITE, userbuf, size)) |
@@ -41,7 +41,8 @@ int dump_inventory_to_user (void *userbuf, int size) | |||
41 | 41 | ||
42 | for (v = 0; v < inventory_items; v++){ | 42 | for (v = 0; v < inventory_items; v++){ |
43 | inv = &inventory [v]; | 43 | inv = &inventory [v]; |
44 | copy_to_user (user, inv, sizeof (inventory_t)); | 44 | if (copy_to_user (user, inv, sizeof (inventory_t))) |
45 | return -EFAULT; | ||
45 | user++; | 46 | user++; |
46 | } | 47 | } |
47 | return inventory_items * sizeof (inventory_t); | 48 | return inventory_items * sizeof (inventory_t); |
diff --git a/arch/mips/kernel/irixioctl.c b/arch/mips/kernel/irixioctl.c index 3cdc22346f4c..e2863821a3dd 100644 --- a/arch/mips/kernel/irixioctl.c +++ b/arch/mips/kernel/irixioctl.c | |||
@@ -59,7 +59,7 @@ asmlinkage int irix_ioctl(int fd, unsigned long cmd, unsigned long arg) | |||
59 | { | 59 | { |
60 | struct tty_struct *tp, *rtp; | 60 | struct tty_struct *tp, *rtp; |
61 | mm_segment_t old_fs; | 61 | mm_segment_t old_fs; |
62 | int error = 0; | 62 | int i, error = 0; |
63 | 63 | ||
64 | #ifdef DEBUG_IOCTLS | 64 | #ifdef DEBUG_IOCTLS |
65 | printk("[%s:%d] irix_ioctl(%d, ", current->comm, current->pid, fd); | 65 | printk("[%s:%d] irix_ioctl(%d, ", current->comm, current->pid, fd); |
@@ -74,12 +74,13 @@ asmlinkage int irix_ioctl(int fd, unsigned long cmd, unsigned long arg) | |||
74 | 74 | ||
75 | case 0x0000540d: { | 75 | case 0x0000540d: { |
76 | struct termios kt; | 76 | struct termios kt; |
77 | struct irix_termios *it = (struct irix_termios *) arg; | 77 | struct irix_termios __user *it = |
78 | (struct irix_termios __user *) arg; | ||
78 | 79 | ||
79 | #ifdef DEBUG_IOCTLS | 80 | #ifdef DEBUG_IOCTLS |
80 | printk("TCGETS, %08lx) ", arg); | 81 | printk("TCGETS, %08lx) ", arg); |
81 | #endif | 82 | #endif |
82 | if(!access_ok(VERIFY_WRITE, it, sizeof(*it))) { | 83 | if (!access_ok(VERIFY_WRITE, it, sizeof(*it))) { |
83 | error = -EFAULT; | 84 | error = -EFAULT; |
84 | break; | 85 | break; |
85 | } | 86 | } |
@@ -88,13 +89,14 @@ asmlinkage int irix_ioctl(int fd, unsigned long cmd, unsigned long arg) | |||
88 | set_fs(old_fs); | 89 | set_fs(old_fs); |
89 | if (error) | 90 | if (error) |
90 | break; | 91 | break; |
91 | __put_user(kt.c_iflag, &it->c_iflag); | 92 | |
92 | __put_user(kt.c_oflag, &it->c_oflag); | 93 | error = __put_user(kt.c_iflag, &it->c_iflag); |
93 | __put_user(kt.c_cflag, &it->c_cflag); | 94 | error |= __put_user(kt.c_oflag, &it->c_oflag); |
94 | __put_user(kt.c_lflag, &it->c_lflag); | 95 | error |= __put_user(kt.c_cflag, &it->c_cflag); |
95 | for(error = 0; error < NCCS; error++) | 96 | error |= __put_user(kt.c_lflag, &it->c_lflag); |
96 | __put_user(kt.c_cc[error], &it->c_cc[error]); | 97 | |
97 | error = 0; | 98 | for (i = 0; i < NCCS; i++) |
99 | error |= __put_user(kt.c_cc[i], &it->c_cc[i]); | ||
98 | break; | 100 | break; |
99 | } | 101 | } |
100 | 102 | ||
@@ -112,14 +114,19 @@ asmlinkage int irix_ioctl(int fd, unsigned long cmd, unsigned long arg) | |||
112 | old_fs = get_fs(); set_fs(get_ds()); | 114 | old_fs = get_fs(); set_fs(get_ds()); |
113 | error = sys_ioctl(fd, TCGETS, (unsigned long) &kt); | 115 | error = sys_ioctl(fd, TCGETS, (unsigned long) &kt); |
114 | set_fs(old_fs); | 116 | set_fs(old_fs); |
115 | if(error) | 117 | if (error) |
118 | break; | ||
119 | |||
120 | error = __get_user(kt.c_iflag, &it->c_iflag); | ||
121 | error |= __get_user(kt.c_oflag, &it->c_oflag); | ||
122 | error |= __get_user(kt.c_cflag, &it->c_cflag); | ||
123 | error |= __get_user(kt.c_lflag, &it->c_lflag); | ||
124 | |||
125 | for (i = 0; i < NCCS; i++) | ||
126 | error |= __get_user(kt.c_cc[i], &it->c_cc[i]); | ||
127 | |||
128 | if (error) | ||
116 | break; | 129 | break; |
117 | __get_user(kt.c_iflag, &it->c_iflag); | ||
118 | __get_user(kt.c_oflag, &it->c_oflag); | ||
119 | __get_user(kt.c_cflag, &it->c_cflag); | ||
120 | __get_user(kt.c_lflag, &it->c_lflag); | ||
121 | for(error = 0; error < NCCS; error++) | ||
122 | __get_user(kt.c_cc[error], &it->c_cc[error]); | ||
123 | old_fs = get_fs(); set_fs(get_ds()); | 130 | old_fs = get_fs(); set_fs(get_ds()); |
124 | error = sys_ioctl(fd, TCSETS, (unsigned long) &kt); | 131 | error = sys_ioctl(fd, TCSETS, (unsigned long) &kt); |
125 | set_fs(old_fs); | 132 | set_fs(old_fs); |
@@ -153,7 +160,7 @@ asmlinkage int irix_ioctl(int fd, unsigned long cmd, unsigned long arg) | |||
153 | #ifdef DEBUG_IOCTLS | 160 | #ifdef DEBUG_IOCTLS |
154 | printk("rtp->session=%d ", rtp->session); | 161 | printk("rtp->session=%d ", rtp->session); |
155 | #endif | 162 | #endif |
156 | error = put_user(rtp->session, (unsigned long *) arg); | 163 | error = put_user(rtp->session, (unsigned long __user *) arg); |
157 | break; | 164 | break; |
158 | 165 | ||
159 | case 0x746e: | 166 | case 0x746e: |
@@ -195,50 +202,32 @@ asmlinkage int irix_ioctl(int fd, unsigned long cmd, unsigned long arg) | |||
195 | break; | 202 | break; |
196 | 203 | ||
197 | case 0x8004667e: | 204 | case 0x8004667e: |
198 | #ifdef DEBUG_IOCTLS | ||
199 | printk("FIONBIO, %08lx) arg=%d ", arg, *(int *)arg); | ||
200 | #endif | ||
201 | error = sys_ioctl(fd, FIONBIO, arg); | 205 | error = sys_ioctl(fd, FIONBIO, arg); |
202 | break; | 206 | break; |
203 | 207 | ||
204 | case 0x80047476: | 208 | case 0x80047476: |
205 | #ifdef DEBUG_IOCTLS | ||
206 | printk("TIOCSPGRP, %08lx) arg=%d ", arg, *(int *)arg); | ||
207 | #endif | ||
208 | error = sys_ioctl(fd, TIOCSPGRP, arg); | 209 | error = sys_ioctl(fd, TIOCSPGRP, arg); |
209 | break; | 210 | break; |
210 | 211 | ||
211 | case 0x8020690c: | 212 | case 0x8020690c: |
212 | #ifdef DEBUG_IOCTLS | ||
213 | printk("SIOCSIFADDR, %08lx) arg=%d ", arg, *(int *)arg); | ||
214 | #endif | ||
215 | error = sys_ioctl(fd, SIOCSIFADDR, arg); | 213 | error = sys_ioctl(fd, SIOCSIFADDR, arg); |
216 | break; | 214 | break; |
217 | 215 | ||
218 | case 0x80206910: | 216 | case 0x80206910: |
219 | #ifdef DEBUG_IOCTLS | ||
220 | printk("SIOCSIFFLAGS, %08lx) arg=%d ", arg, *(int *)arg); | ||
221 | #endif | ||
222 | error = sys_ioctl(fd, SIOCSIFFLAGS, arg); | 217 | error = sys_ioctl(fd, SIOCSIFFLAGS, arg); |
223 | break; | 218 | break; |
224 | 219 | ||
225 | case 0xc0206911: | 220 | case 0xc0206911: |
226 | #ifdef DEBUG_IOCTLS | ||
227 | printk("SIOCGIFFLAGS, %08lx) arg=%d ", arg, *(int *)arg); | ||
228 | #endif | ||
229 | error = sys_ioctl(fd, SIOCGIFFLAGS, arg); | 221 | error = sys_ioctl(fd, SIOCGIFFLAGS, arg); |
230 | break; | 222 | break; |
231 | 223 | ||
232 | case 0xc020691b: | 224 | case 0xc020691b: |
233 | #ifdef DEBUG_IOCTLS | ||
234 | printk("SIOCGIFMETRIC, %08lx) arg=%d ", arg, *(int *)arg); | ||
235 | #endif | ||
236 | error = sys_ioctl(fd, SIOCGIFMETRIC, arg); | 225 | error = sys_ioctl(fd, SIOCGIFMETRIC, arg); |
237 | break; | 226 | break; |
238 | 227 | ||
239 | default: { | 228 | default: { |
240 | #ifdef DEBUG_MISSING_IOCTL | 229 | #ifdef DEBUG_MISSING_IOCTL |
241 | char *msg = "Unimplemented IOCTL cmd tell linux@engr.sgi.com\n"; | 230 | char *msg = "Unimplemented IOCTL cmd tell linux-mips@linux-mips.org\n"; |
242 | 231 | ||
243 | #ifdef DEBUG_IOCTLS | 232 | #ifdef DEBUG_IOCTLS |
244 | printk("UNIMP_IOCTL, %08lx)\n", arg); | 233 | printk("UNIMP_IOCTL, %08lx)\n", arg); |
diff --git a/arch/mips/kernel/irixsig.c b/arch/mips/kernel/irixsig.c index eff89322ba50..908e63684208 100644 --- a/arch/mips/kernel/irixsig.c +++ b/arch/mips/kernel/irixsig.c | |||
@@ -76,36 +76,39 @@ static inline void dump_irix5_sigctx(struct sigctx_irix5 *c) | |||
76 | } | 76 | } |
77 | #endif | 77 | #endif |
78 | 78 | ||
79 | static void setup_irix_frame(struct k_sigaction *ka, struct pt_regs *regs, | 79 | static int setup_irix_frame(struct k_sigaction *ka, struct pt_regs *regs, |
80 | int signr, sigset_t *oldmask) | 80 | int signr, sigset_t *oldmask) |
81 | { | 81 | { |
82 | struct sigctx_irix5 __user *ctx; | ||
82 | unsigned long sp; | 83 | unsigned long sp; |
83 | struct sigctx_irix5 *ctx; | 84 | int error, i; |
84 | int i; | ||
85 | 85 | ||
86 | sp = regs->regs[29]; | 86 | sp = regs->regs[29]; |
87 | sp -= sizeof(struct sigctx_irix5); | 87 | sp -= sizeof(struct sigctx_irix5); |
88 | sp &= ~(0xf); | 88 | sp &= ~(0xf); |
89 | ctx = (struct sigctx_irix5 *) sp; | 89 | ctx = (struct sigctx_irix5 __user *) sp; |
90 | if (!access_ok(VERIFY_WRITE, ctx, sizeof(*ctx))) | 90 | if (!access_ok(VERIFY_WRITE, ctx, sizeof(*ctx))) |
91 | goto segv_and_exit; | 91 | goto segv_and_exit; |
92 | 92 | ||
93 | __put_user(0, &ctx->weird_fpu_thing); | 93 | error = __put_user(0, &ctx->weird_fpu_thing); |
94 | __put_user(~(0x00000001), &ctx->rmask); | 94 | error |= __put_user(~(0x00000001), &ctx->rmask); |
95 | __put_user(0, &ctx->regs[0]); | 95 | error |= __put_user(0, &ctx->regs[0]); |
96 | for(i = 1; i < 32; i++) | 96 | for(i = 1; i < 32; i++) |
97 | __put_user((u64) regs->regs[i], &ctx->regs[i]); | 97 | error |= __put_user((u64) regs->regs[i], &ctx->regs[i]); |
98 | |||
99 | error |= __put_user((u64) regs->hi, &ctx->hi); | ||
100 | error |= __put_user((u64) regs->lo, &ctx->lo); | ||
101 | error |= __put_user((u64) regs->cp0_epc, &ctx->pc); | ||
102 | error |= __put_user(!!used_math(), &ctx->usedfp); | ||
103 | error |= __put_user((u64) regs->cp0_cause, &ctx->cp0_cause); | ||
104 | error |= __put_user((u64) regs->cp0_badvaddr, &ctx->cp0_badvaddr); | ||
98 | 105 | ||
99 | __put_user((u64) regs->hi, &ctx->hi); | 106 | error |= __put_user(0, &ctx->sstk_flags); /* XXX sigstack unimp... todo... */ |
100 | __put_user((u64) regs->lo, &ctx->lo); | ||
101 | __put_user((u64) regs->cp0_epc, &ctx->pc); | ||
102 | __put_user(!!used_math(), &ctx->usedfp); | ||
103 | __put_user((u64) regs->cp0_cause, &ctx->cp0_cause); | ||
104 | __put_user((u64) regs->cp0_badvaddr, &ctx->cp0_badvaddr); | ||
105 | 107 | ||
106 | __put_user(0, &ctx->sstk_flags); /* XXX sigstack unimp... todo... */ | 108 | error |= __copy_to_user(&ctx->sigset, oldmask, sizeof(irix_sigset_t)) ? -EFAULT : 0; |
107 | 109 | ||
108 | __copy_to_user(&ctx->sigset, oldmask, sizeof(irix_sigset_t)); | 110 | if (error) |
111 | goto segv_and_exit; | ||
109 | 112 | ||
110 | #ifdef DEBUG_SIG | 113 | #ifdef DEBUG_SIG |
111 | dump_irix5_sigctx(ctx); | 114 | dump_irix5_sigctx(ctx); |
@@ -117,13 +120,14 @@ static void setup_irix_frame(struct k_sigaction *ka, struct pt_regs *regs, | |||
117 | regs->regs[7] = (unsigned long) ka->sa.sa_handler; | 120 | regs->regs[7] = (unsigned long) ka->sa.sa_handler; |
118 | regs->regs[25] = regs->cp0_epc = (unsigned long) ka->sa_restorer; | 121 | regs->regs[25] = regs->cp0_epc = (unsigned long) ka->sa_restorer; |
119 | 122 | ||
120 | return; | 123 | return 1; |
121 | 124 | ||
122 | segv_and_exit: | 125 | segv_and_exit: |
123 | force_sigsegv(signr, current); | 126 | force_sigsegv(signr, current); |
127 | return 0; | ||
124 | } | 128 | } |
125 | 129 | ||
126 | static void inline | 130 | static int inline |
127 | setup_irix_rt_frame(struct k_sigaction * ka, struct pt_regs *regs, | 131 | setup_irix_rt_frame(struct k_sigaction * ka, struct pt_regs *regs, |
128 | int signr, sigset_t *oldmask, siginfo_t *info) | 132 | int signr, sigset_t *oldmask, siginfo_t *info) |
129 | { | 133 | { |
@@ -131,9 +135,11 @@ setup_irix_rt_frame(struct k_sigaction * ka, struct pt_regs *regs, | |||
131 | do_exit(SIGSEGV); | 135 | do_exit(SIGSEGV); |
132 | } | 136 | } |
133 | 137 | ||
134 | static inline void handle_signal(unsigned long sig, siginfo_t *info, | 138 | static inline int handle_signal(unsigned long sig, siginfo_t *info, |
135 | struct k_sigaction *ka, sigset_t *oldset, struct pt_regs * regs) | 139 | struct k_sigaction *ka, sigset_t *oldset, struct pt_regs * regs) |
136 | { | 140 | { |
141 | int ret; | ||
142 | |||
137 | switch(regs->regs[0]) { | 143 | switch(regs->regs[0]) { |
138 | case ERESTARTNOHAND: | 144 | case ERESTARTNOHAND: |
139 | regs->regs[2] = EINTR; | 145 | regs->regs[2] = EINTR; |
@@ -151,9 +157,9 @@ static inline void handle_signal(unsigned long sig, siginfo_t *info, | |||
151 | regs->regs[0] = 0; /* Don't deal with this again. */ | 157 | regs->regs[0] = 0; /* Don't deal with this again. */ |
152 | 158 | ||
153 | if (ka->sa.sa_flags & SA_SIGINFO) | 159 | if (ka->sa.sa_flags & SA_SIGINFO) |
154 | setup_irix_rt_frame(ka, regs, sig, oldset, info); | 160 | ret = setup_irix_rt_frame(ka, regs, sig, oldset, info); |
155 | else | 161 | else |
156 | setup_irix_frame(ka, regs, sig, oldset); | 162 | ret = setup_irix_frame(ka, regs, sig, oldset); |
157 | 163 | ||
158 | spin_lock_irq(¤t->sighand->siglock); | 164 | spin_lock_irq(¤t->sighand->siglock); |
159 | sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); | 165 | sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); |
@@ -161,6 +167,8 @@ static inline void handle_signal(unsigned long sig, siginfo_t *info, | |||
161 | sigaddset(¤t->blocked,sig); | 167 | sigaddset(¤t->blocked,sig); |
162 | recalc_sigpending(); | 168 | recalc_sigpending(); |
163 | spin_unlock_irq(¤t->sighand->siglock); | 169 | spin_unlock_irq(¤t->sighand->siglock); |
170 | |||
171 | return ret; | ||
164 | } | 172 | } |
165 | 173 | ||
166 | asmlinkage int do_irix_signal(sigset_t *oldset, struct pt_regs *regs) | 174 | asmlinkage int do_irix_signal(sigset_t *oldset, struct pt_regs *regs) |
@@ -184,10 +192,8 @@ asmlinkage int do_irix_signal(sigset_t *oldset, struct pt_regs *regs) | |||
184 | oldset = ¤t->blocked; | 192 | oldset = ¤t->blocked; |
185 | 193 | ||
186 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | 194 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); |
187 | if (signr > 0) { | 195 | if (signr > 0) |
188 | handle_signal(signr, &info, &ka, oldset, regs); | 196 | return handle_signal(signr, &info, &ka, oldset, regs); |
189 | return 1; | ||
190 | } | ||
191 | 197 | ||
192 | no_signal: | 198 | no_signal: |
193 | /* | 199 | /* |
@@ -208,10 +214,11 @@ no_signal: | |||
208 | asmlinkage void | 214 | asmlinkage void |
209 | irix_sigreturn(struct pt_regs *regs) | 215 | irix_sigreturn(struct pt_regs *regs) |
210 | { | 216 | { |
211 | struct sigctx_irix5 *context, *magic; | 217 | struct sigctx_irix5 __user *context, *magic; |
212 | unsigned long umask, mask; | 218 | unsigned long umask, mask; |
213 | u64 *fregs; | 219 | u64 *fregs; |
214 | int sig, i, base = 0; | 220 | u32 usedfp; |
221 | int error, sig, i, base = 0; | ||
215 | sigset_t blocked; | 222 | sigset_t blocked; |
216 | 223 | ||
217 | /* Always make any pending restarted system calls return -EINTR */ | 224 | /* Always make any pending restarted system calls return -EINTR */ |
@@ -220,8 +227,8 @@ irix_sigreturn(struct pt_regs *regs) | |||
220 | if (regs->regs[2] == 1000) | 227 | if (regs->regs[2] == 1000) |
221 | base = 1; | 228 | base = 1; |
222 | 229 | ||
223 | context = (struct sigctx_irix5 *) regs->regs[base + 4]; | 230 | context = (struct sigctx_irix5 __user *) regs->regs[base + 4]; |
224 | magic = (struct sigctx_irix5 *) regs->regs[base + 5]; | 231 | magic = (struct sigctx_irix5 __user *) regs->regs[base + 5]; |
225 | sig = (int) regs->regs[base + 6]; | 232 | sig = (int) regs->regs[base + 6]; |
226 | #ifdef DEBUG_SIG | 233 | #ifdef DEBUG_SIG |
227 | printk("[%s:%d] IRIX sigreturn(scp[%p],ucp[%p],sig[%d])\n", | 234 | printk("[%s:%d] IRIX sigreturn(scp[%p],ucp[%p],sig[%d])\n", |
@@ -236,25 +243,31 @@ irix_sigreturn(struct pt_regs *regs) | |||
236 | dump_irix5_sigctx(context); | 243 | dump_irix5_sigctx(context); |
237 | #endif | 244 | #endif |
238 | 245 | ||
239 | __get_user(regs->cp0_epc, &context->pc); | 246 | error = __get_user(regs->cp0_epc, &context->pc); |
240 | umask = context->rmask; mask = 2; | 247 | error |= __get_user(umask, &context->rmask); |
248 | |||
249 | mask = 2; | ||
241 | for (i = 1; i < 32; i++, mask <<= 1) { | 250 | for (i = 1; i < 32; i++, mask <<= 1) { |
242 | if(umask & mask) | 251 | if (umask & mask) |
243 | __get_user(regs->regs[i], &context->regs[i]); | 252 | error |= __get_user(regs->regs[i], &context->regs[i]); |
244 | } | 253 | } |
245 | __get_user(regs->hi, &context->hi); | 254 | error |= __get_user(regs->hi, &context->hi); |
246 | __get_user(regs->lo, &context->lo); | 255 | error |= __get_user(regs->lo, &context->lo); |
247 | 256 | ||
248 | if ((umask & 1) && context->usedfp) { | 257 | error |= __get_user(usedfp, &context->usedfp); |
258 | if ((umask & 1) && usedfp) { | ||
249 | fregs = (u64 *) ¤t->thread.fpu; | 259 | fregs = (u64 *) ¤t->thread.fpu; |
260 | |||
250 | for(i = 0; i < 32; i++) | 261 | for(i = 0; i < 32; i++) |
251 | fregs[i] = (u64) context->fpregs[i]; | 262 | error |= __get_user(fregs[i], &context->fpregs[i]); |
252 | __get_user(current->thread.fpu.hard.fcr31, &context->fpcsr); | 263 | error |= __get_user(current->thread.fpu.hard.fcr31, &context->fpcsr); |
253 | } | 264 | } |
254 | 265 | ||
255 | /* XXX do sigstack crapola here... XXX */ | 266 | /* XXX do sigstack crapola here... XXX */ |
256 | 267 | ||
257 | if (__copy_from_user(&blocked, &context->sigset, sizeof(blocked))) | 268 | error |= __copy_from_user(&blocked, &context->sigset, sizeof(blocked)) ? -EFAULT : 0; |
269 | |||
270 | if (error) | ||
258 | goto badframe; | 271 | goto badframe; |
259 | 272 | ||
260 | sigdelsetmask(&blocked, ~_BLOCKABLE); | 273 | sigdelsetmask(&blocked, ~_BLOCKABLE); |
@@ -296,8 +309,8 @@ static inline void dump_sigact_irix5(struct sigact_irix5 *p) | |||
296 | #endif | 309 | #endif |
297 | 310 | ||
298 | asmlinkage int | 311 | asmlinkage int |
299 | irix_sigaction(int sig, const struct sigaction *act, | 312 | irix_sigaction(int sig, const struct sigaction __user *act, |
300 | struct sigaction *oact, void *trampoline) | 313 | struct sigaction __user *oact, void __user *trampoline) |
301 | { | 314 | { |
302 | struct k_sigaction new_ka, old_ka; | 315 | struct k_sigaction new_ka, old_ka; |
303 | int ret; | 316 | int ret; |
@@ -311,12 +324,16 @@ irix_sigaction(int sig, const struct sigaction *act, | |||
311 | #endif | 324 | #endif |
312 | if (act) { | 325 | if (act) { |
313 | sigset_t mask; | 326 | sigset_t mask; |
314 | if (!access_ok(VERIFY_READ, act, sizeof(*act)) || | 327 | int err; |
315 | __get_user(new_ka.sa.sa_handler, &act->sa_handler) || | 328 | |
316 | __get_user(new_ka.sa.sa_flags, &act->sa_flags)) | 329 | if (!access_ok(VERIFY_READ, act, sizeof(*act))) |
317 | return -EFAULT; | 330 | return -EFAULT; |
331 | err = __get_user(new_ka.sa.sa_handler, &act->sa_handler); | ||
332 | err |= __get_user(new_ka.sa.sa_flags, &act->sa_flags); | ||
318 | 333 | ||
319 | __copy_from_user(&mask, &act->sa_mask, sizeof(sigset_t)); | 334 | err |= __copy_from_user(&mask, &act->sa_mask, sizeof(sigset_t)) ? -EFAULT : 0; |
335 | if (err) | ||
336 | return err; | ||
320 | 337 | ||
321 | /* | 338 | /* |
322 | * Hmmm... methinks IRIX libc always passes a valid trampoline | 339 | * Hmmm... methinks IRIX libc always passes a valid trampoline |
@@ -330,30 +347,37 @@ irix_sigaction(int sig, const struct sigaction *act, | |||
330 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); | 347 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); |
331 | 348 | ||
332 | if (!ret && oact) { | 349 | if (!ret && oact) { |
333 | if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || | 350 | int err; |
334 | __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || | 351 | |
335 | __put_user(old_ka.sa.sa_flags, &oact->sa_flags)) | 352 | if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact))) |
353 | return -EFAULT; | ||
354 | |||
355 | err = __put_user(old_ka.sa.sa_handler, &oact->sa_handler); | ||
356 | err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); | ||
357 | err |= __copy_to_user(&oact->sa_mask, &old_ka.sa.sa_mask, | ||
358 | sizeof(sigset_t)) ? -EFAULT : 0; | ||
359 | if (err) | ||
336 | return -EFAULT; | 360 | return -EFAULT; |
337 | __copy_to_user(&old_ka.sa.sa_mask, &oact->sa_mask, | ||
338 | sizeof(sigset_t)); | ||
339 | } | 361 | } |
340 | 362 | ||
341 | return ret; | 363 | return ret; |
342 | } | 364 | } |
343 | 365 | ||
344 | asmlinkage int irix_sigpending(irix_sigset_t *set) | 366 | asmlinkage int irix_sigpending(irix_sigset_t __user *set) |
345 | { | 367 | { |
346 | return do_sigpending(set, sizeof(*set)); | 368 | return do_sigpending(set, sizeof(*set)); |
347 | } | 369 | } |
348 | 370 | ||
349 | asmlinkage int irix_sigprocmask(int how, irix_sigset_t *new, irix_sigset_t *old) | 371 | asmlinkage int irix_sigprocmask(int how, irix_sigset_t __user *new, |
372 | irix_sigset_t __user *old) | ||
350 | { | 373 | { |
351 | sigset_t oldbits, newbits; | 374 | sigset_t oldbits, newbits; |
352 | 375 | ||
353 | if (new) { | 376 | if (new) { |
354 | if (!access_ok(VERIFY_READ, new, sizeof(*new))) | 377 | if (!access_ok(VERIFY_READ, new, sizeof(*new))) |
355 | return -EFAULT; | 378 | return -EFAULT; |
356 | __copy_from_user(&newbits, new, sizeof(unsigned long)*4); | 379 | if (__copy_from_user(&newbits, new, sizeof(unsigned long)*4)) |
380 | return -EFAULT; | ||
357 | sigdelsetmask(&newbits, ~_BLOCKABLE); | 381 | sigdelsetmask(&newbits, ~_BLOCKABLE); |
358 | 382 | ||
359 | spin_lock_irq(¤t->sighand->siglock); | 383 | spin_lock_irq(¤t->sighand->siglock); |
@@ -381,20 +405,19 @@ asmlinkage int irix_sigprocmask(int how, irix_sigset_t *new, irix_sigset_t *old) | |||
381 | recalc_sigpending(); | 405 | recalc_sigpending(); |
382 | spin_unlock_irq(¤t->sighand->siglock); | 406 | spin_unlock_irq(¤t->sighand->siglock); |
383 | } | 407 | } |
384 | if(old) { | 408 | if (old) |
385 | if (!access_ok(VERIFY_WRITE, old, sizeof(*old))) | 409 | return copy_to_user(old, ¤t->blocked, |
386 | return -EFAULT; | 410 | sizeof(unsigned long)*4) ? -EFAULT : 0; |
387 | __copy_to_user(old, ¤t->blocked, sizeof(unsigned long)*4); | ||
388 | } | ||
389 | 411 | ||
390 | return 0; | 412 | return 0; |
391 | } | 413 | } |
392 | 414 | ||
393 | asmlinkage int irix_sigsuspend(struct pt_regs *regs) | 415 | asmlinkage int irix_sigsuspend(struct pt_regs *regs) |
394 | { | 416 | { |
395 | sigset_t *uset, saveset, newset; | 417 | sigset_t saveset, newset; |
418 | sigset_t __user *uset; | ||
396 | 419 | ||
397 | uset = (sigset_t *) regs->regs[4]; | 420 | uset = (sigset_t __user *) regs->regs[4]; |
398 | if (copy_from_user(&newset, uset, sizeof(sigset_t))) | 421 | if (copy_from_user(&newset, uset, sizeof(sigset_t))) |
399 | return -EFAULT; | 422 | return -EFAULT; |
400 | sigdelsetmask(&newset, ~_BLOCKABLE); | 423 | sigdelsetmask(&newset, ~_BLOCKABLE); |
@@ -440,12 +463,13 @@ struct irix5_siginfo { | |||
440 | } stuff; | 463 | } stuff; |
441 | }; | 464 | }; |
442 | 465 | ||
443 | asmlinkage int irix_sigpoll_sys(unsigned long *set, struct irix5_siginfo *info, | 466 | asmlinkage int irix_sigpoll_sys(unsigned long __user *set, |
444 | struct timespec *tp) | 467 | struct irix5_siginfo __user *info, struct timespec __user *tp) |
445 | { | 468 | { |
446 | long expire = MAX_SCHEDULE_TIMEOUT; | 469 | long expire = MAX_SCHEDULE_TIMEOUT; |
447 | sigset_t kset; | 470 | sigset_t kset; |
448 | int i, sig, error, timeo = 0; | 471 | int i, sig, error, timeo = 0; |
472 | struct timespec ktp; | ||
449 | 473 | ||
450 | #ifdef DEBUG_SIG | 474 | #ifdef DEBUG_SIG |
451 | printk("[%s:%d] irix_sigpoll_sys(%p,%p,%p)\n", | 475 | printk("[%s:%d] irix_sigpoll_sys(%p,%p,%p)\n", |
@@ -456,14 +480,8 @@ asmlinkage int irix_sigpoll_sys(unsigned long *set, struct irix5_siginfo *info, | |||
456 | if (!set) | 480 | if (!set) |
457 | return -EINVAL; | 481 | return -EINVAL; |
458 | 482 | ||
459 | if (!access_ok(VERIFY_READ, set, sizeof(kset))) { | 483 | if (copy_from_user(&kset, set, sizeof(set))) |
460 | error = -EFAULT; | 484 | return -EFAULT; |
461 | goto out; | ||
462 | } | ||
463 | |||
464 | __copy_from_user(&kset, set, sizeof(set)); | ||
465 | if (error) | ||
466 | goto out; | ||
467 | 485 | ||
468 | if (info && clear_user(info, sizeof(*info))) { | 486 | if (info && clear_user(info, sizeof(*info))) { |
469 | error = -EFAULT; | 487 | error = -EFAULT; |
@@ -471,19 +489,21 @@ asmlinkage int irix_sigpoll_sys(unsigned long *set, struct irix5_siginfo *info, | |||
471 | } | 489 | } |
472 | 490 | ||
473 | if (tp) { | 491 | if (tp) { |
474 | if (!access_ok(VERIFY_READ, tp, sizeof(*tp))) | 492 | if (copy_from_user(&ktp, tp, sizeof(*tp))) |
475 | return -EFAULT; | 493 | return -EFAULT; |
476 | if (!tp->tv_sec && !tp->tv_nsec) { | 494 | |
477 | error = -EINVAL; | 495 | if (!ktp.tv_sec && !ktp.tv_nsec) |
478 | goto out; | 496 | return -EINVAL; |
479 | } | 497 | |
480 | expire = timespec_to_jiffies(tp) + (tp->tv_sec||tp->tv_nsec); | 498 | expire = timespec_to_jiffies(&ktp) + |
499 | (ktp.tv_sec || ktp.tv_nsec); | ||
481 | } | 500 | } |
482 | 501 | ||
483 | while(1) { | 502 | while(1) { |
484 | long tmp = 0; | 503 | long tmp = 0; |
485 | 504 | ||
486 | expire = schedule_timeout_interruptible(expire); | 505 | current->state = TASK_INTERRUPTIBLE; |
506 | expire = schedule_timeout(expire); | ||
487 | 507 | ||
488 | for (i=0; i<=4; i++) | 508 | for (i=0; i<=4; i++) |
489 | tmp |= (current->pending.signal.sig[i] & kset.sig[i]); | 509 | tmp |= (current->pending.signal.sig[i] & kset.sig[i]); |
@@ -500,15 +520,14 @@ asmlinkage int irix_sigpoll_sys(unsigned long *set, struct irix5_siginfo *info, | |||
500 | if (timeo) | 520 | if (timeo) |
501 | return -EAGAIN; | 521 | return -EAGAIN; |
502 | 522 | ||
503 | for(sig = 1; i <= 65 /* IRIX_NSIG */; sig++) { | 523 | for (sig = 1; i <= 65 /* IRIX_NSIG */; sig++) { |
504 | if (sigismember (&kset, sig)) | 524 | if (sigismember (&kset, sig)) |
505 | continue; | 525 | continue; |
506 | if (sigismember (¤t->pending.signal, sig)) { | 526 | if (sigismember (¤t->pending.signal, sig)) { |
507 | /* XXX need more than this... */ | 527 | /* XXX need more than this... */ |
508 | if (info) | 528 | if (info) |
509 | info->sig = sig; | 529 | return copy_to_user(&info->sig, &sig, sizeof(sig)); |
510 | error = 0; | 530 | return 0; |
511 | goto out; | ||
512 | } | 531 | } |
513 | } | 532 | } |
514 | 533 | ||
@@ -534,8 +553,9 @@ extern int getrusage(struct task_struct *, int, struct rusage __user *); | |||
534 | 553 | ||
535 | #define W_MASK (W_EXITED | W_TRAPPED | W_STOPPED | W_CONT | W_NOHANG) | 554 | #define W_MASK (W_EXITED | W_TRAPPED | W_STOPPED | W_CONT | W_NOHANG) |
536 | 555 | ||
537 | asmlinkage int irix_waitsys(int type, int pid, struct irix5_siginfo *info, | 556 | asmlinkage int irix_waitsys(int type, int pid, |
538 | int options, struct rusage *ru) | 557 | struct irix5_siginfo __user *info, int options, |
558 | struct rusage __user *ru) | ||
539 | { | 559 | { |
540 | int flag, retval; | 560 | int flag, retval; |
541 | DECLARE_WAITQUEUE(wait, current); | 561 | DECLARE_WAITQUEUE(wait, current); |
@@ -543,28 +563,22 @@ asmlinkage int irix_waitsys(int type, int pid, struct irix5_siginfo *info, | |||
543 | struct task_struct *p; | 563 | struct task_struct *p; |
544 | struct list_head *_p; | 564 | struct list_head *_p; |
545 | 565 | ||
546 | if (!info) { | 566 | if (!info) |
547 | retval = -EINVAL; | 567 | return -EINVAL; |
548 | goto out; | 568 | |
549 | } | 569 | if (!access_ok(VERIFY_WRITE, info, sizeof(*info))) |
550 | if (!access_ok(VERIFY_WRITE, info, sizeof(*info))) { | 570 | return -EFAULT; |
551 | retval = -EFAULT; | 571 | |
552 | goto out; | 572 | if (ru) |
553 | } | 573 | if (!access_ok(VERIFY_WRITE, ru, sizeof(*ru))) |
554 | if (ru) { | 574 | return -EFAULT; |
555 | if (!access_ok(VERIFY_WRITE, ru, sizeof(*ru))) { | 575 | |
556 | retval = -EFAULT; | 576 | if (options & ~W_MASK) |
557 | goto out; | 577 | return -EINVAL; |
558 | } | 578 | |
559 | } | 579 | if (type != IRIX_P_PID && type != IRIX_P_PGID && type != IRIX_P_ALL) |
560 | if (options & ~(W_MASK)) { | 580 | return -EINVAL; |
561 | retval = -EINVAL; | 581 | |
562 | goto out; | ||
563 | } | ||
564 | if (type != IRIX_P_PID && type != IRIX_P_PGID && type != IRIX_P_ALL) { | ||
565 | retval = -EINVAL; | ||
566 | goto out; | ||
567 | } | ||
568 | add_wait_queue(¤t->signal->wait_chldexit, &wait); | 582 | add_wait_queue(¤t->signal->wait_chldexit, &wait); |
569 | repeat: | 583 | repeat: |
570 | flag = 0; | 584 | flag = 0; |
@@ -595,18 +609,20 @@ repeat: | |||
595 | add_parent(p, p->parent); | 609 | add_parent(p, p->parent); |
596 | write_unlock_irq(&tasklist_lock); | 610 | write_unlock_irq(&tasklist_lock); |
597 | retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0; | 611 | retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0; |
598 | if (!retval && ru) { | 612 | if (retval) |
599 | retval |= __put_user(SIGCHLD, &info->sig); | 613 | goto end_waitsys; |
600 | retval |= __put_user(0, &info->code); | 614 | |
601 | retval |= __put_user(p->pid, &info->stuff.procinfo.pid); | 615 | retval = __put_user(SIGCHLD, &info->sig); |
602 | retval |= __put_user((p->exit_code >> 8) & 0xff, | 616 | retval |= __put_user(0, &info->code); |
603 | &info->stuff.procinfo.procdata.child.status); | 617 | retval |= __put_user(p->pid, &info->stuff.procinfo.pid); |
604 | retval |= __put_user(p->utime, &info->stuff.procinfo.procdata.child.utime); | 618 | retval |= __put_user((p->exit_code >> 8) & 0xff, |
605 | retval |= __put_user(p->stime, &info->stuff.procinfo.procdata.child.stime); | 619 | &info->stuff.procinfo.procdata.child.status); |
606 | } | 620 | retval |= __put_user(p->utime, &info->stuff.procinfo.procdata.child.utime); |
607 | if (!retval) { | 621 | retval |= __put_user(p->stime, &info->stuff.procinfo.procdata.child.stime); |
608 | p->exit_code = 0; | 622 | if (retval) |
609 | } | 623 | goto end_waitsys; |
624 | |||
625 | p->exit_code = 0; | ||
610 | goto end_waitsys; | 626 | goto end_waitsys; |
611 | 627 | ||
612 | case EXIT_ZOMBIE: | 628 | case EXIT_ZOMBIE: |
@@ -614,16 +630,18 @@ repeat: | |||
614 | current->signal->cstime += p->stime + p->signal->cstime; | 630 | current->signal->cstime += p->stime + p->signal->cstime; |
615 | if (ru != NULL) | 631 | if (ru != NULL) |
616 | getrusage(p, RUSAGE_BOTH, ru); | 632 | getrusage(p, RUSAGE_BOTH, ru); |
617 | __put_user(SIGCHLD, &info->sig); | 633 | retval = __put_user(SIGCHLD, &info->sig); |
618 | __put_user(1, &info->code); /* CLD_EXITED */ | 634 | retval |= __put_user(1, &info->code); /* CLD_EXITED */ |
619 | __put_user(p->pid, &info->stuff.procinfo.pid); | 635 | retval |= __put_user(p->pid, &info->stuff.procinfo.pid); |
620 | __put_user((p->exit_code >> 8) & 0xff, | 636 | retval |= __put_user((p->exit_code >> 8) & 0xff, |
621 | &info->stuff.procinfo.procdata.child.status); | 637 | &info->stuff.procinfo.procdata.child.status); |
622 | __put_user(p->utime, | 638 | retval |= __put_user(p->utime, |
623 | &info->stuff.procinfo.procdata.child.utime); | 639 | &info->stuff.procinfo.procdata.child.utime); |
624 | __put_user(p->stime, | 640 | retval |= __put_user(p->stime, |
625 | &info->stuff.procinfo.procdata.child.stime); | 641 | &info->stuff.procinfo.procdata.child.stime); |
626 | retval = 0; | 642 | if (retval) |
643 | return retval; | ||
644 | |||
627 | if (p->real_parent != p->parent) { | 645 | if (p->real_parent != p->parent) { |
628 | write_lock_irq(&tasklist_lock); | 646 | write_lock_irq(&tasklist_lock); |
629 | remove_parent(p); | 647 | remove_parent(p); |
@@ -656,7 +674,6 @@ end_waitsys: | |||
656 | current->state = TASK_RUNNING; | 674 | current->state = TASK_RUNNING; |
657 | remove_wait_queue(¤t->signal->wait_chldexit, &wait); | 675 | remove_wait_queue(¤t->signal->wait_chldexit, &wait); |
658 | 676 | ||
659 | out: | ||
660 | return retval; | 677 | return retval; |
661 | } | 678 | } |
662 | 679 | ||
@@ -675,39 +692,39 @@ struct irix5_context { | |||
675 | 692 | ||
676 | asmlinkage int irix_getcontext(struct pt_regs *regs) | 693 | asmlinkage int irix_getcontext(struct pt_regs *regs) |
677 | { | 694 | { |
678 | int i, base = 0; | 695 | int error, i, base = 0; |
679 | struct irix5_context *ctx; | 696 | struct irix5_context __user *ctx; |
680 | unsigned long flags; | 697 | unsigned long flags; |
681 | 698 | ||
682 | if (regs->regs[2] == 1000) | 699 | if (regs->regs[2] == 1000) |
683 | base = 1; | 700 | base = 1; |
684 | ctx = (struct irix5_context *) regs->regs[base + 4]; | 701 | ctx = (struct irix5_context __user *) regs->regs[base + 4]; |
685 | 702 | ||
686 | #ifdef DEBUG_SIG | 703 | #ifdef DEBUG_SIG |
687 | printk("[%s:%d] irix_getcontext(%p)\n", | 704 | printk("[%s:%d] irix_getcontext(%p)\n", |
688 | current->comm, current->pid, ctx); | 705 | current->comm, current->pid, ctx); |
689 | #endif | 706 | #endif |
690 | 707 | ||
691 | if (!access_ok(VERIFY_WRITE, ctx, sizeof(*ctx))) | 708 | if (!access_ok(VERIFY_WRITE, ctx, sizeof(*ctx))); |
692 | return -EFAULT; | 709 | return -EFAULT; |
693 | 710 | ||
694 | __put_user(current->thread.irix_oldctx, &ctx->link); | 711 | error = __put_user(current->thread.irix_oldctx, &ctx->link); |
695 | 712 | ||
696 | __copy_to_user(&ctx->sigmask, ¤t->blocked, sizeof(irix_sigset_t)); | 713 | error |= __copy_to_user(&ctx->sigmask, ¤t->blocked, sizeof(irix_sigset_t)) ? -EFAULT : 0; |
697 | 714 | ||
698 | /* XXX Do sigstack stuff someday... */ | 715 | /* XXX Do sigstack stuff someday... */ |
699 | __put_user(0, &ctx->stack.sp); | 716 | error |= __put_user(0, &ctx->stack.sp); |
700 | __put_user(0, &ctx->stack.size); | 717 | error |= __put_user(0, &ctx->stack.size); |
701 | __put_user(0, &ctx->stack.flags); | 718 | error |= __put_user(0, &ctx->stack.flags); |
702 | 719 | ||
703 | __put_user(0, &ctx->weird_graphics_thing); | 720 | error |= __put_user(0, &ctx->weird_graphics_thing); |
704 | __put_user(0, &ctx->regs[0]); | 721 | error |= __put_user(0, &ctx->regs[0]); |
705 | for (i = 1; i < 32; i++) | 722 | for (i = 1; i < 32; i++) |
706 | __put_user(regs->regs[i], &ctx->regs[i]); | 723 | error |= __put_user(regs->regs[i], &ctx->regs[i]); |
707 | __put_user(regs->lo, &ctx->regs[32]); | 724 | error |= __put_user(regs->lo, &ctx->regs[32]); |
708 | __put_user(regs->hi, &ctx->regs[33]); | 725 | error |= __put_user(regs->hi, &ctx->regs[33]); |
709 | __put_user(regs->cp0_cause, &ctx->regs[34]); | 726 | error |= __put_user(regs->cp0_cause, &ctx->regs[34]); |
710 | __put_user(regs->cp0_epc, &ctx->regs[35]); | 727 | error |= __put_user(regs->cp0_epc, &ctx->regs[35]); |
711 | 728 | ||
712 | flags = 0x0f; | 729 | flags = 0x0f; |
713 | if (!used_math()) { | 730 | if (!used_math()) { |
@@ -716,119 +733,124 @@ asmlinkage int irix_getcontext(struct pt_regs *regs) | |||
716 | /* XXX wheee... */ | 733 | /* XXX wheee... */ |
717 | printk("Wheee, no code for saving IRIX FPU context yet.\n"); | 734 | printk("Wheee, no code for saving IRIX FPU context yet.\n"); |
718 | } | 735 | } |
719 | __put_user(flags, &ctx->flags); | 736 | error |= __put_user(flags, &ctx->flags); |
720 | 737 | ||
721 | return 0; | 738 | return error; |
722 | } | 739 | } |
723 | 740 | ||
724 | asmlinkage unsigned long irix_setcontext(struct pt_regs *regs) | 741 | asmlinkage void irix_setcontext(struct pt_regs *regs) |
725 | { | 742 | { |
726 | int error, base = 0; | 743 | struct irix5_context __user *ctx; |
727 | struct irix5_context *ctx; | 744 | int err, base = 0; |
745 | u32 flags; | ||
728 | 746 | ||
729 | if(regs->regs[2] == 1000) | 747 | if (regs->regs[2] == 1000) |
730 | base = 1; | 748 | base = 1; |
731 | ctx = (struct irix5_context *) regs->regs[base + 4]; | 749 | ctx = (struct irix5_context __user *) regs->regs[base + 4]; |
732 | 750 | ||
733 | #ifdef DEBUG_SIG | 751 | #ifdef DEBUG_SIG |
734 | printk("[%s:%d] irix_setcontext(%p)\n", | 752 | printk("[%s:%d] irix_setcontext(%p)\n", |
735 | current->comm, current->pid, ctx); | 753 | current->comm, current->pid, ctx); |
736 | #endif | 754 | #endif |
737 | 755 | ||
738 | if (!access_ok(VERIFY_READ, ctx, sizeof(*ctx))) { | 756 | if (!access_ok(VERIFY_READ, ctx, sizeof(*ctx))) |
739 | error = -EFAULT; | 757 | goto segv_and_exit; |
740 | goto out; | ||
741 | } | ||
742 | 758 | ||
743 | if (ctx->flags & 0x02) { | 759 | err = __get_user(flags, &ctx->flags); |
760 | if (flags & 0x02) { | ||
744 | /* XXX sigstack garbage, todo... */ | 761 | /* XXX sigstack garbage, todo... */ |
745 | printk("Wheee, cannot do sigstack stuff in setcontext\n"); | 762 | printk("Wheee, cannot do sigstack stuff in setcontext\n"); |
746 | } | 763 | } |
747 | 764 | ||
748 | if (ctx->flags & 0x04) { | 765 | if (flags & 0x04) { |
749 | int i; | 766 | int i; |
750 | 767 | ||
751 | /* XXX extra control block stuff... todo... */ | 768 | /* XXX extra control block stuff... todo... */ |
752 | for(i = 1; i < 32; i++) | 769 | for (i = 1; i < 32; i++) |
753 | regs->regs[i] = ctx->regs[i]; | 770 | err |= __get_user(regs->regs[i], &ctx->regs[i]); |
754 | regs->lo = ctx->regs[32]; | 771 | err |= __get_user(regs->lo, &ctx->regs[32]); |
755 | regs->hi = ctx->regs[33]; | 772 | err |= __get_user(regs->hi, &ctx->regs[33]); |
756 | regs->cp0_epc = ctx->regs[35]; | 773 | err |= __get_user(regs->cp0_epc, &ctx->regs[35]); |
757 | } | 774 | } |
758 | 775 | ||
759 | if (ctx->flags & 0x08) { | 776 | if (flags & 0x08) |
760 | /* XXX fpu context, blah... */ | 777 | /* XXX fpu context, blah... */ |
761 | printk("Wheee, cannot restore FPU context yet...\n"); | 778 | printk(KERN_ERR "Wheee, cannot restore FPU context yet...\n"); |
762 | } | ||
763 | current->thread.irix_oldctx = ctx->link; | ||
764 | error = regs->regs[2]; | ||
765 | 779 | ||
766 | out: | 780 | err |= __get_user(current->thread.irix_oldctx, &ctx->link); |
767 | return error; | 781 | if (err) |
782 | goto segv_and_exit; | ||
783 | |||
784 | /* | ||
785 | * Don't let your children do this ... | ||
786 | */ | ||
787 | if (current_thread_info()->flags & TIF_SYSCALL_TRACE) | ||
788 | do_syscall_trace(regs, 1); | ||
789 | __asm__ __volatile__( | ||
790 | "move\t$29,%0\n\t" | ||
791 | "j\tsyscall_exit" | ||
792 | :/* no outputs */ | ||
793 | :"r" (®s)); | ||
794 | /* Unreached */ | ||
795 | |||
796 | segv_and_exit: | ||
797 | force_sigsegv(SIGSEGV, current); | ||
768 | } | 798 | } |
769 | 799 | ||
770 | struct irix_sigstack { unsigned long sp; int status; }; | 800 | struct irix_sigstack { |
801 | unsigned long sp; | ||
802 | int status; | ||
803 | }; | ||
771 | 804 | ||
772 | asmlinkage int irix_sigstack(struct irix_sigstack *new, struct irix_sigstack *old) | 805 | asmlinkage int irix_sigstack(struct irix_sigstack __user *new, |
806 | struct irix_sigstack __user *old) | ||
773 | { | 807 | { |
774 | int error = -EFAULT; | ||
775 | |||
776 | #ifdef DEBUG_SIG | 808 | #ifdef DEBUG_SIG |
777 | printk("[%s:%d] irix_sigstack(%p,%p)\n", | 809 | printk("[%s:%d] irix_sigstack(%p,%p)\n", |
778 | current->comm, current->pid, new, old); | 810 | current->comm, current->pid, new, old); |
779 | #endif | 811 | #endif |
780 | if(new) { | 812 | if (new) { |
781 | if (!access_ok(VERIFY_READ, new, sizeof(*new))) | 813 | if (!access_ok(VERIFY_READ, new, sizeof(*new))) |
782 | goto out; | 814 | return -EFAULT; |
783 | } | 815 | } |
784 | 816 | ||
785 | if(old) { | 817 | if (old) { |
786 | if (!access_ok(VERIFY_WRITE, old, sizeof(*old))) | 818 | if (!access_ok(VERIFY_WRITE, old, sizeof(*old))) |
787 | goto out; | 819 | return -EFAULT; |
788 | } | 820 | } |
789 | error = 0; | ||
790 | 821 | ||
791 | out: | 822 | return 0; |
792 | return error; | ||
793 | } | 823 | } |
794 | 824 | ||
795 | struct irix_sigaltstack { unsigned long sp; int size; int status; }; | 825 | struct irix_sigaltstack { unsigned long sp; int size; int status; }; |
796 | 826 | ||
797 | asmlinkage int irix_sigaltstack(struct irix_sigaltstack *new, | 827 | asmlinkage int irix_sigaltstack(struct irix_sigaltstack __user *new, |
798 | struct irix_sigaltstack *old) | 828 | struct irix_sigaltstack __user *old) |
799 | { | 829 | { |
800 | int error = -EFAULT; | ||
801 | |||
802 | #ifdef DEBUG_SIG | 830 | #ifdef DEBUG_SIG |
803 | printk("[%s:%d] irix_sigaltstack(%p,%p)\n", | 831 | printk("[%s:%d] irix_sigaltstack(%p,%p)\n", |
804 | current->comm, current->pid, new, old); | 832 | current->comm, current->pid, new, old); |
805 | #endif | 833 | #endif |
806 | if (new) { | 834 | if (new) |
807 | if (!access_ok(VERIFY_READ, new, sizeof(*new))) | 835 | if (!access_ok(VERIFY_READ, new, sizeof(*new))) |
808 | goto out; | 836 | return -EFAULT; |
809 | } | ||
810 | 837 | ||
811 | if (old) { | 838 | if (old) { |
812 | if (!access_ok(VERIFY_WRITE, old, sizeof(*old))) | 839 | if (!access_ok(VERIFY_WRITE, old, sizeof(*old))) |
813 | goto out; | 840 | return -EFAULT; |
814 | } | 841 | } |
815 | error = 0; | ||
816 | |||
817 | out: | ||
818 | error = 0; | ||
819 | 842 | ||
820 | return error; | 843 | return 0; |
821 | } | 844 | } |
822 | 845 | ||
823 | struct irix_procset { | 846 | struct irix_procset { |
824 | int cmd, ltype, lid, rtype, rid; | 847 | int cmd, ltype, lid, rtype, rid; |
825 | }; | 848 | }; |
826 | 849 | ||
827 | asmlinkage int irix_sigsendset(struct irix_procset *pset, int sig) | 850 | asmlinkage int irix_sigsendset(struct irix_procset __user *pset, int sig) |
828 | { | 851 | { |
829 | if (!access_ok(VERIFY_READ, pset, sizeof(*pset))) | 852 | if (!access_ok(VERIFY_READ, pset, sizeof(*pset))) |
830 | return -EFAULT; | 853 | return -EFAULT; |
831 | |||
832 | #ifdef DEBUG_SIG | 854 | #ifdef DEBUG_SIG |
833 | printk("[%s:%d] irix_sigsendset([%d,%d,%d,%d,%d],%d)\n", | 855 | printk("[%s:%d] irix_sigsendset([%d,%d,%d,%d,%d],%d)\n", |
834 | current->comm, current->pid, | 856 | current->comm, current->pid, |
diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c index 43c00ac0b88d..3f653c7cfbf3 100644 --- a/arch/mips/kernel/irq-msc01.c +++ b/arch/mips/kernel/irq-msc01.c | |||
@@ -74,7 +74,7 @@ static void disable_msc_irq(unsigned int irq) | |||
74 | static void level_mask_and_ack_msc_irq(unsigned int irq) | 74 | static void level_mask_and_ack_msc_irq(unsigned int irq) |
75 | { | 75 | { |
76 | mask_msc_irq(irq); | 76 | mask_msc_irq(irq); |
77 | if (!cpu_has_ei) | 77 | if (!cpu_has_veic) |
78 | MSCIC_WRITE(MSC01_IC_EOI, 0); | 78 | MSCIC_WRITE(MSC01_IC_EOI, 0); |
79 | } | 79 | } |
80 | 80 | ||
@@ -84,7 +84,7 @@ static void level_mask_and_ack_msc_irq(unsigned int irq) | |||
84 | static void edge_mask_and_ack_msc_irq(unsigned int irq) | 84 | static void edge_mask_and_ack_msc_irq(unsigned int irq) |
85 | { | 85 | { |
86 | mask_msc_irq(irq); | 86 | mask_msc_irq(irq); |
87 | if (!cpu_has_ei) | 87 | if (!cpu_has_veic) |
88 | MSCIC_WRITE(MSC01_IC_EOI, 0); | 88 | MSCIC_WRITE(MSC01_IC_EOI, 0); |
89 | else { | 89 | else { |
90 | u32 r; | 90 | u32 r; |
@@ -129,25 +129,23 @@ msc_bind_eic_interrupt (unsigned int irq, unsigned int set) | |||
129 | #define shutdown_msc_irq disable_msc_irq | 129 | #define shutdown_msc_irq disable_msc_irq |
130 | 130 | ||
131 | struct hw_interrupt_type msc_levelirq_type = { | 131 | struct hw_interrupt_type msc_levelirq_type = { |
132 | "SOC-it-Level", | 132 | .typename = "SOC-it-Level", |
133 | startup_msc_irq, | 133 | .startup = startup_msc_irq, |
134 | shutdown_msc_irq, | 134 | .shutdown = shutdown_msc_irq, |
135 | enable_msc_irq, | 135 | .enable = enable_msc_irq, |
136 | disable_msc_irq, | 136 | .disable = disable_msc_irq, |
137 | level_mask_and_ack_msc_irq, | 137 | .ack = level_mask_and_ack_msc_irq, |
138 | end_msc_irq, | 138 | .end = end_msc_irq, |
139 | NULL | ||
140 | }; | 139 | }; |
141 | 140 | ||
142 | struct hw_interrupt_type msc_edgeirq_type = { | 141 | struct hw_interrupt_type msc_edgeirq_type = { |
143 | "SOC-it-Edge", | 142 | .typename = "SOC-it-Edge", |
144 | startup_msc_irq, | 143 | .startup =startup_msc_irq, |
145 | shutdown_msc_irq, | 144 | .shutdown = shutdown_msc_irq, |
146 | enable_msc_irq, | 145 | .enable = enable_msc_irq, |
147 | disable_msc_irq, | 146 | .disable = disable_msc_irq, |
148 | edge_mask_and_ack_msc_irq, | 147 | .ack = edge_mask_and_ack_msc_irq, |
149 | end_msc_irq, | 148 | .end = end_msc_irq, |
150 | NULL | ||
151 | }; | 149 | }; |
152 | 150 | ||
153 | 151 | ||
@@ -168,14 +166,14 @@ void __init init_msc_irqs(unsigned int base, msc_irqmap_t *imp, int nirq) | |||
168 | switch (imp->im_type) { | 166 | switch (imp->im_type) { |
169 | case MSC01_IRQ_EDGE: | 167 | case MSC01_IRQ_EDGE: |
170 | irq_desc[base+n].handler = &msc_edgeirq_type; | 168 | irq_desc[base+n].handler = &msc_edgeirq_type; |
171 | if (cpu_has_ei) | 169 | if (cpu_has_veic) |
172 | MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT); | 170 | MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT); |
173 | else | 171 | else |
174 | MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT | imp->im_lvl); | 172 | MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT | imp->im_lvl); |
175 | break; | 173 | break; |
176 | case MSC01_IRQ_LEVEL: | 174 | case MSC01_IRQ_LEVEL: |
177 | irq_desc[base+n].handler = &msc_levelirq_type; | 175 | irq_desc[base+n].handler = &msc_levelirq_type; |
178 | if (cpu_has_ei) | 176 | if (cpu_has_veic) |
179 | MSCIC_WRITE(MSC01_IC_SUP+n*8, 0); | 177 | MSCIC_WRITE(MSC01_IC_SUP+n*8, 0); |
180 | else | 178 | else |
181 | MSCIC_WRITE(MSC01_IC_SUP+n*8, imp->im_lvl); | 179 | MSCIC_WRITE(MSC01_IC_SUP+n*8, imp->im_lvl); |
diff --git a/arch/mips/kernel/irq-mv6434x.c b/arch/mips/kernel/irq-mv6434x.c index 088bbbc869e6..0ac067f45cf5 100644 --- a/arch/mips/kernel/irq-mv6434x.c +++ b/arch/mips/kernel/irq-mv6434x.c | |||
@@ -135,14 +135,13 @@ void ll_mv64340_irq(struct pt_regs *regs) | |||
135 | #define shutdown_mv64340_irq disable_mv64340_irq | 135 | #define shutdown_mv64340_irq disable_mv64340_irq |
136 | 136 | ||
137 | struct hw_interrupt_type mv64340_irq_type = { | 137 | struct hw_interrupt_type mv64340_irq_type = { |
138 | "MV-64340", | 138 | .typename = "MV-64340", |
139 | startup_mv64340_irq, | 139 | .startup = startup_mv64340_irq, |
140 | shutdown_mv64340_irq, | 140 | .shutdown = shutdown_mv64340_irq, |
141 | enable_mv64340_irq, | 141 | .enable = enable_mv64340_irq, |
142 | disable_mv64340_irq, | 142 | .disable = disable_mv64340_irq, |
143 | mask_and_ack_mv64340_irq, | 143 | .ack = mask_and_ack_mv64340_irq, |
144 | end_mv64340_irq, | 144 | .end = end_mv64340_irq, |
145 | NULL | ||
146 | }; | 145 | }; |
147 | 146 | ||
148 | void __init mv64340_irq_init(unsigned int base) | 147 | void __init mv64340_irq_init(unsigned int base) |
diff --git a/arch/mips/kernel/irq-rm7000.c b/arch/mips/kernel/irq-rm7000.c index f5d779fd0355..0b130c5ac5d9 100644 --- a/arch/mips/kernel/irq-rm7000.c +++ b/arch/mips/kernel/irq-rm7000.c | |||
@@ -72,13 +72,13 @@ static void rm7k_cpu_irq_end(unsigned int irq) | |||
72 | } | 72 | } |
73 | 73 | ||
74 | static hw_irq_controller rm7k_irq_controller = { | 74 | static hw_irq_controller rm7k_irq_controller = { |
75 | "RM7000", | 75 | .typename = "RM7000", |
76 | rm7k_cpu_irq_startup, | 76 | .startup = rm7k_cpu_irq_startup, |
77 | rm7k_cpu_irq_shutdown, | 77 | .shutdown = rm7k_cpu_irq_shutdown, |
78 | rm7k_cpu_irq_enable, | 78 | .enable = rm7k_cpu_irq_enable, |
79 | rm7k_cpu_irq_disable, | 79 | .disable = rm7k_cpu_irq_disable, |
80 | rm7k_cpu_irq_ack, | 80 | .ack = rm7k_cpu_irq_ack, |
81 | rm7k_cpu_irq_end, | 81 | .end = rm7k_cpu_irq_end, |
82 | }; | 82 | }; |
83 | 83 | ||
84 | void __init rm7k_cpu_irq_init(int base) | 84 | void __init rm7k_cpu_irq_init(int base) |
diff --git a/arch/mips/kernel/irq-rm9000.c b/arch/mips/kernel/irq-rm9000.c index bdd130296256..9b5f20c32acb 100644 --- a/arch/mips/kernel/irq-rm9000.c +++ b/arch/mips/kernel/irq-rm9000.c | |||
@@ -106,23 +106,23 @@ static void rm9k_cpu_irq_end(unsigned int irq) | |||
106 | } | 106 | } |
107 | 107 | ||
108 | static hw_irq_controller rm9k_irq_controller = { | 108 | static hw_irq_controller rm9k_irq_controller = { |
109 | "RM9000", | 109 | .typename = "RM9000", |
110 | rm9k_cpu_irq_startup, | 110 | .startup = rm9k_cpu_irq_startup, |
111 | rm9k_cpu_irq_shutdown, | 111 | .shutdown = rm9k_cpu_irq_shutdown, |
112 | rm9k_cpu_irq_enable, | 112 | .enable = rm9k_cpu_irq_enable, |
113 | rm9k_cpu_irq_disable, | 113 | .disable = rm9k_cpu_irq_disable, |
114 | rm9k_cpu_irq_ack, | 114 | .ack = rm9k_cpu_irq_ack, |
115 | rm9k_cpu_irq_end, | 115 | .end = rm9k_cpu_irq_end, |
116 | }; | 116 | }; |
117 | 117 | ||
118 | static hw_irq_controller rm9k_perfcounter_irq = { | 118 | static hw_irq_controller rm9k_perfcounter_irq = { |
119 | "RM9000", | 119 | .typename = "RM9000", |
120 | rm9k_perfcounter_irq_startup, | 120 | .startup = rm9k_perfcounter_irq_startup, |
121 | rm9k_perfcounter_irq_shutdown, | 121 | .shutdown = rm9k_perfcounter_irq_shutdown, |
122 | rm9k_cpu_irq_enable, | 122 | .enable = rm9k_cpu_irq_enable, |
123 | rm9k_cpu_irq_disable, | 123 | .disable = rm9k_cpu_irq_disable, |
124 | rm9k_cpu_irq_ack, | 124 | .ack = rm9k_cpu_irq_ack, |
125 | rm9k_cpu_irq_end, | 125 | .end = rm9k_cpu_irq_end, |
126 | }; | 126 | }; |
127 | 127 | ||
128 | unsigned int rm9000_perfcount_irq; | 128 | unsigned int rm9000_perfcount_irq; |
diff --git a/arch/mips/kernel/irq_cpu.c b/arch/mips/kernel/irq_cpu.c index 2b936cf1ef70..5db67e31ec1a 100644 --- a/arch/mips/kernel/irq_cpu.c +++ b/arch/mips/kernel/irq_cpu.c | |||
@@ -3,6 +3,8 @@ | |||
3 | * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net | 3 | * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net |
4 | * | 4 | * |
5 | * Copyright (C) 2001 Ralf Baechle | 5 | * Copyright (C) 2001 Ralf Baechle |
6 | * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved. | ||
7 | * Author: Maciej W. Rozycki <macro@mips.com> | ||
6 | * | 8 | * |
7 | * This file define the irq handler for MIPS CPU interrupts. | 9 | * This file define the irq handler for MIPS CPU interrupts. |
8 | * | 10 | * |
@@ -31,19 +33,21 @@ | |||
31 | 33 | ||
32 | #include <asm/irq_cpu.h> | 34 | #include <asm/irq_cpu.h> |
33 | #include <asm/mipsregs.h> | 35 | #include <asm/mipsregs.h> |
36 | #include <asm/mipsmtregs.h> | ||
34 | #include <asm/system.h> | 37 | #include <asm/system.h> |
35 | 38 | ||
36 | static int mips_cpu_irq_base; | 39 | static int mips_cpu_irq_base; |
37 | 40 | ||
38 | static inline void unmask_mips_irq(unsigned int irq) | 41 | static inline void unmask_mips_irq(unsigned int irq) |
39 | { | 42 | { |
40 | clear_c0_cause(0x100 << (irq - mips_cpu_irq_base)); | ||
41 | set_c0_status(0x100 << (irq - mips_cpu_irq_base)); | 43 | set_c0_status(0x100 << (irq - mips_cpu_irq_base)); |
44 | irq_enable_hazard(); | ||
42 | } | 45 | } |
43 | 46 | ||
44 | static inline void mask_mips_irq(unsigned int irq) | 47 | static inline void mask_mips_irq(unsigned int irq) |
45 | { | 48 | { |
46 | clear_c0_status(0x100 << (irq - mips_cpu_irq_base)); | 49 | clear_c0_status(0x100 << (irq - mips_cpu_irq_base)); |
50 | irq_disable_hazard(); | ||
47 | } | 51 | } |
48 | 52 | ||
49 | static inline void mips_cpu_irq_enable(unsigned int irq) | 53 | static inline void mips_cpu_irq_enable(unsigned int irq) |
@@ -52,6 +56,7 @@ static inline void mips_cpu_irq_enable(unsigned int irq) | |||
52 | 56 | ||
53 | local_irq_save(flags); | 57 | local_irq_save(flags); |
54 | unmask_mips_irq(irq); | 58 | unmask_mips_irq(irq); |
59 | back_to_back_c0_hazard(); | ||
55 | local_irq_restore(flags); | 60 | local_irq_restore(flags); |
56 | } | 61 | } |
57 | 62 | ||
@@ -61,6 +66,7 @@ static void mips_cpu_irq_disable(unsigned int irq) | |||
61 | 66 | ||
62 | local_irq_save(flags); | 67 | local_irq_save(flags); |
63 | mask_mips_irq(irq); | 68 | mask_mips_irq(irq); |
69 | back_to_back_c0_hazard(); | ||
64 | local_irq_restore(flags); | 70 | local_irq_restore(flags); |
65 | } | 71 | } |
66 | 72 | ||
@@ -71,7 +77,7 @@ static unsigned int mips_cpu_irq_startup(unsigned int irq) | |||
71 | return 0; | 77 | return 0; |
72 | } | 78 | } |
73 | 79 | ||
74 | #define mips_cpu_irq_shutdown mips_cpu_irq_disable | 80 | #define mips_cpu_irq_shutdown mips_cpu_irq_disable |
75 | 81 | ||
76 | /* | 82 | /* |
77 | * While we ack the interrupt interrupts are disabled and thus we don't need | 83 | * While we ack the interrupt interrupts are disabled and thus we don't need |
@@ -79,9 +85,6 @@ static unsigned int mips_cpu_irq_startup(unsigned int irq) | |||
79 | */ | 85 | */ |
80 | static void mips_cpu_irq_ack(unsigned int irq) | 86 | static void mips_cpu_irq_ack(unsigned int irq) |
81 | { | 87 | { |
82 | /* Only necessary for soft interrupts */ | ||
83 | clear_c0_cause(0x100 << (irq - mips_cpu_irq_base)); | ||
84 | |||
85 | mask_mips_irq(irq); | 88 | mask_mips_irq(irq); |
86 | } | 89 | } |
87 | 90 | ||
@@ -92,22 +95,82 @@ static void mips_cpu_irq_end(unsigned int irq) | |||
92 | } | 95 | } |
93 | 96 | ||
94 | static hw_irq_controller mips_cpu_irq_controller = { | 97 | static hw_irq_controller mips_cpu_irq_controller = { |
95 | "MIPS", | 98 | .typename = "MIPS", |
96 | mips_cpu_irq_startup, | 99 | .startup = mips_cpu_irq_startup, |
97 | mips_cpu_irq_shutdown, | 100 | .shutdown = mips_cpu_irq_shutdown, |
98 | mips_cpu_irq_enable, | 101 | .enable = mips_cpu_irq_enable, |
99 | mips_cpu_irq_disable, | 102 | .disable = mips_cpu_irq_disable, |
100 | mips_cpu_irq_ack, | 103 | .ack = mips_cpu_irq_ack, |
101 | mips_cpu_irq_end, | 104 | .end = mips_cpu_irq_end, |
102 | NULL /* no affinity stuff for UP */ | ||
103 | }; | 105 | }; |
104 | 106 | ||
107 | /* | ||
108 | * Basically the same as above but taking care of all the MT stuff | ||
109 | */ | ||
110 | |||
111 | #define unmask_mips_mt_irq unmask_mips_irq | ||
112 | #define mask_mips_mt_irq mask_mips_irq | ||
113 | #define mips_mt_cpu_irq_enable mips_cpu_irq_enable | ||
114 | #define mips_mt_cpu_irq_disable mips_cpu_irq_disable | ||
115 | |||
116 | static unsigned int mips_mt_cpu_irq_startup(unsigned int irq) | ||
117 | { | ||
118 | unsigned int vpflags = dvpe(); | ||
119 | |||
120 | clear_c0_cause(0x100 << (irq - mips_cpu_irq_base)); | ||
121 | evpe(vpflags); | ||
122 | mips_mt_cpu_irq_enable(irq); | ||
123 | |||
124 | return 0; | ||
125 | } | ||
126 | |||
127 | #define mips_mt_cpu_irq_shutdown mips_mt_cpu_irq_disable | ||
128 | |||
129 | /* | ||
130 | * While we ack the interrupt interrupts are disabled and thus we don't need | ||
131 | * to deal with concurrency issues. Same for mips_cpu_irq_end. | ||
132 | */ | ||
133 | static void mips_mt_cpu_irq_ack(unsigned int irq) | ||
134 | { | ||
135 | unsigned int vpflags = dvpe(); | ||
136 | clear_c0_cause(0x100 << (irq - mips_cpu_irq_base)); | ||
137 | evpe(vpflags); | ||
138 | mask_mips_mt_irq(irq); | ||
139 | } | ||
140 | |||
141 | #define mips_mt_cpu_irq_end mips_cpu_irq_end | ||
142 | |||
143 | static hw_irq_controller mips_mt_cpu_irq_controller = { | ||
144 | .typename = "MIPS", | ||
145 | .startup = mips_mt_cpu_irq_startup, | ||
146 | .shutdown = mips_mt_cpu_irq_shutdown, | ||
147 | .enable = mips_mt_cpu_irq_enable, | ||
148 | .disable = mips_mt_cpu_irq_disable, | ||
149 | .ack = mips_mt_cpu_irq_ack, | ||
150 | .end = mips_mt_cpu_irq_end, | ||
151 | }; | ||
105 | 152 | ||
106 | void __init mips_cpu_irq_init(int irq_base) | 153 | void __init mips_cpu_irq_init(int irq_base) |
107 | { | 154 | { |
108 | int i; | 155 | int i; |
109 | 156 | ||
110 | for (i = irq_base; i < irq_base + 8; i++) { | 157 | /* Mask interrupts. */ |
158 | clear_c0_status(ST0_IM); | ||
159 | clear_c0_cause(CAUSEF_IP); | ||
160 | |||
161 | /* | ||
162 | * Only MT is using the software interrupts currently, so we just | ||
163 | * leave them uninitialized for other processors. | ||
164 | */ | ||
165 | if (cpu_has_mipsmt) | ||
166 | for (i = irq_base; i < irq_base + 2; i++) { | ||
167 | irq_desc[i].status = IRQ_DISABLED; | ||
168 | irq_desc[i].action = NULL; | ||
169 | irq_desc[i].depth = 1; | ||
170 | irq_desc[i].handler = &mips_mt_cpu_irq_controller; | ||
171 | } | ||
172 | |||
173 | for (i = irq_base + 2; i < irq_base + 8; i++) { | ||
111 | irq_desc[i].status = IRQ_DISABLED; | 174 | irq_desc[i].status = IRQ_DISABLED; |
112 | irq_desc[i].action = NULL; | 175 | irq_desc[i].action = NULL; |
113 | irq_desc[i].depth = 1; | 176 | irq_desc[i].depth = 1; |
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c index ece4564919d8..330cf84d21fe 100644 --- a/arch/mips/kernel/linux32.c +++ b/arch/mips/kernel/linux32.c | |||
@@ -215,81 +215,32 @@ sys32_readdir(unsigned int fd, void * dirent32, unsigned int count) | |||
215 | return(n); | 215 | return(n); |
216 | } | 216 | } |
217 | 217 | ||
218 | struct rusage32 { | 218 | asmlinkage int |
219 | struct compat_timeval ru_utime; | 219 | sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr, int options) |
220 | struct compat_timeval ru_stime; | ||
221 | int ru_maxrss; | ||
222 | int ru_ixrss; | ||
223 | int ru_idrss; | ||
224 | int ru_isrss; | ||
225 | int ru_minflt; | ||
226 | int ru_majflt; | ||
227 | int ru_nswap; | ||
228 | int ru_inblock; | ||
229 | int ru_oublock; | ||
230 | int ru_msgsnd; | ||
231 | int ru_msgrcv; | ||
232 | int ru_nsignals; | ||
233 | int ru_nvcsw; | ||
234 | int ru_nivcsw; | ||
235 | }; | ||
236 | |||
237 | static int | ||
238 | put_rusage (struct rusage32 *ru, struct rusage *r) | ||
239 | { | 220 | { |
240 | int err; | 221 | return compat_sys_wait4(pid, stat_addr, options, NULL); |
241 | |||
242 | if (!access_ok(VERIFY_WRITE, ru, sizeof *ru)) | ||
243 | return -EFAULT; | ||
244 | |||
245 | err = __put_user (r->ru_utime.tv_sec, &ru->ru_utime.tv_sec); | ||
246 | err |= __put_user (r->ru_utime.tv_usec, &ru->ru_utime.tv_usec); | ||
247 | err |= __put_user (r->ru_stime.tv_sec, &ru->ru_stime.tv_sec); | ||
248 | err |= __put_user (r->ru_stime.tv_usec, &ru->ru_stime.tv_usec); | ||
249 | err |= __put_user (r->ru_maxrss, &ru->ru_maxrss); | ||
250 | err |= __put_user (r->ru_ixrss, &ru->ru_ixrss); | ||
251 | err |= __put_user (r->ru_idrss, &ru->ru_idrss); | ||
252 | err |= __put_user (r->ru_isrss, &ru->ru_isrss); | ||
253 | err |= __put_user (r->ru_minflt, &ru->ru_minflt); | ||
254 | err |= __put_user (r->ru_majflt, &ru->ru_majflt); | ||
255 | err |= __put_user (r->ru_nswap, &ru->ru_nswap); | ||
256 | err |= __put_user (r->ru_inblock, &ru->ru_inblock); | ||
257 | err |= __put_user (r->ru_oublock, &ru->ru_oublock); | ||
258 | err |= __put_user (r->ru_msgsnd, &ru->ru_msgsnd); | ||
259 | err |= __put_user (r->ru_msgrcv, &ru->ru_msgrcv); | ||
260 | err |= __put_user (r->ru_nsignals, &ru->ru_nsignals); | ||
261 | err |= __put_user (r->ru_nvcsw, &ru->ru_nvcsw); | ||
262 | err |= __put_user (r->ru_nivcsw, &ru->ru_nivcsw); | ||
263 | |||
264 | return err; | ||
265 | } | 222 | } |
266 | 223 | ||
267 | asmlinkage int | 224 | asmlinkage long |
268 | sys32_wait4(compat_pid_t pid, unsigned int * stat_addr, int options, | 225 | sysn32_waitid(int which, compat_pid_t pid, |
269 | struct rusage32 * ru) | 226 | siginfo_t __user *uinfo, int options, |
227 | struct compat_rusage __user *uru) | ||
270 | { | 228 | { |
271 | if (!ru) | 229 | struct rusage ru; |
272 | return sys_wait4(pid, stat_addr, options, NULL); | 230 | long ret; |
273 | else { | 231 | mm_segment_t old_fs = get_fs(); |
274 | struct rusage r; | ||
275 | int ret; | ||
276 | unsigned int status; | ||
277 | mm_segment_t old_fs = get_fs(); | ||
278 | 232 | ||
279 | set_fs(KERNEL_DS); | 233 | set_fs (KERNEL_DS); |
280 | ret = sys_wait4(pid, stat_addr ? &status : NULL, options, &r); | 234 | ret = sys_waitid(which, pid, uinfo, options, |
281 | set_fs(old_fs); | 235 | uru ? (struct rusage __user *) &ru : NULL); |
282 | if (put_rusage (ru, &r)) return -EFAULT; | 236 | set_fs (old_fs); |
283 | if (stat_addr && put_user (status, stat_addr)) | 237 | |
284 | return -EFAULT; | 238 | if (ret < 0 || uinfo->si_signo == 0) |
285 | return ret; | 239 | return ret; |
286 | } | ||
287 | } | ||
288 | 240 | ||
289 | asmlinkage int | 241 | if (uru) |
290 | sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr, int options) | 242 | ret = put_compat_rusage(&ru, uru); |
291 | { | 243 | return ret; |
292 | return sys32_wait4(pid, stat_addr, options, NULL); | ||
293 | } | 244 | } |
294 | 245 | ||
295 | struct sysinfo32 { | 246 | struct sysinfo32 { |
@@ -1467,3 +1418,80 @@ asmlinkage long sys32_socketcall(int call, unsigned int *args32) | |||
1467 | } | 1418 | } |
1468 | return err; | 1419 | return err; |
1469 | } | 1420 | } |
1421 | |||
1422 | struct sigevent32 { | ||
1423 | u32 sigev_value; | ||
1424 | u32 sigev_signo; | ||
1425 | u32 sigev_notify; | ||
1426 | u32 payload[(64 / 4) - 3]; | ||
1427 | }; | ||
1428 | |||
1429 | extern asmlinkage long | ||
1430 | sys_timer_create(clockid_t which_clock, | ||
1431 | struct sigevent __user *timer_event_spec, | ||
1432 | timer_t __user * created_timer_id); | ||
1433 | |||
1434 | long | ||
1435 | sys32_timer_create(u32 clock, struct sigevent32 __user *se32, timer_t __user *timer_id) | ||
1436 | { | ||
1437 | struct sigevent __user *p = NULL; | ||
1438 | if (se32) { | ||
1439 | struct sigevent se; | ||
1440 | p = compat_alloc_user_space(sizeof(struct sigevent)); | ||
1441 | memset(&se, 0, sizeof(struct sigevent)); | ||
1442 | if (get_user(se.sigev_value.sival_int, &se32->sigev_value) || | ||
1443 | __get_user(se.sigev_signo, &se32->sigev_signo) || | ||
1444 | __get_user(se.sigev_notify, &se32->sigev_notify) || | ||
1445 | __copy_from_user(&se._sigev_un._pad, &se32->payload, | ||
1446 | sizeof(se32->payload)) || | ||
1447 | copy_to_user(p, &se, sizeof(se))) | ||
1448 | return -EFAULT; | ||
1449 | } | ||
1450 | return sys_timer_create(clock, p, timer_id); | ||
1451 | } | ||
1452 | |||
1453 | asmlinkage long | ||
1454 | sysn32_rt_sigtimedwait(const sigset_t __user *uthese, | ||
1455 | siginfo_t __user *uinfo, | ||
1456 | const struct compat_timespec __user *uts32, | ||
1457 | size_t sigsetsize) | ||
1458 | { | ||
1459 | struct timespec __user *uts = NULL; | ||
1460 | |||
1461 | if (uts32) { | ||
1462 | struct timespec ts; | ||
1463 | uts = compat_alloc_user_space(sizeof(struct timespec)); | ||
1464 | if (get_user(ts.tv_sec, &uts32->tv_sec) || | ||
1465 | get_user(ts.tv_nsec, &uts32->tv_nsec) || | ||
1466 | copy_to_user (uts, &ts, sizeof (ts))) | ||
1467 | return -EFAULT; | ||
1468 | } | ||
1469 | return sys_rt_sigtimedwait(uthese, uinfo, uts, sigsetsize); | ||
1470 | } | ||
1471 | |||
1472 | save_static_function(sys32_clone); | ||
1473 | __attribute_used__ noinline static int | ||
1474 | _sys32_clone(nabi_no_regargs struct pt_regs regs) | ||
1475 | { | ||
1476 | unsigned long clone_flags; | ||
1477 | unsigned long newsp; | ||
1478 | int __user *parent_tidptr, *child_tidptr; | ||
1479 | |||
1480 | clone_flags = regs.regs[4]; | ||
1481 | newsp = regs.regs[5]; | ||
1482 | if (!newsp) | ||
1483 | newsp = regs.regs[29]; | ||
1484 | parent_tidptr = (int *) regs.regs[6]; | ||
1485 | |||
1486 | /* Use __dummy4 instead of getting it off the stack, so that | ||
1487 | syscall() works. */ | ||
1488 | child_tidptr = (int __user *) __dummy4; | ||
1489 | return do_fork(clone_flags, newsp, ®s, 0, | ||
1490 | parent_tidptr, child_tidptr); | ||
1491 | } | ||
1492 | |||
1493 | extern asmlinkage void sys_set_thread_area(u32 addr); | ||
1494 | asmlinkage void sys32_set_thread_area(u32 addr) | ||
1495 | { | ||
1496 | sys_set_thread_area(AA(addr)); | ||
1497 | } | ||
diff --git a/arch/mips/kernel/module-elf32.c b/arch/mips/kernel/module-elf32.c deleted file mode 100644 index ffd216d6d6dc..000000000000 --- a/arch/mips/kernel/module-elf32.c +++ /dev/null | |||
@@ -1,250 +0,0 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License as published by | ||
4 | * the Free Software Foundation; either version 2 of the License, or | ||
5 | * (at your option) any later version. | ||
6 | * | ||
7 | * This program is distributed in the hope that it will be useful, | ||
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
10 | * GNU General Public License for more details. | ||
11 | * | ||
12 | * You should have received a copy of the GNU General Public License | ||
13 | * along with this program; if not, write to the Free Software | ||
14 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
15 | * | ||
16 | * Copyright (C) 2001 Rusty Russell. | ||
17 | * Copyright (C) 2003, 2004 Ralf Baechle (ralf@linux-mips.org) | ||
18 | */ | ||
19 | |||
20 | #undef DEBUG | ||
21 | |||
22 | #include <linux/moduleloader.h> | ||
23 | #include <linux/elf.h> | ||
24 | #include <linux/vmalloc.h> | ||
25 | #include <linux/slab.h> | ||
26 | #include <linux/fs.h> | ||
27 | #include <linux/string.h> | ||
28 | #include <linux/kernel.h> | ||
29 | |||
30 | struct mips_hi16 { | ||
31 | struct mips_hi16 *next; | ||
32 | Elf32_Addr *addr; | ||
33 | Elf32_Addr value; | ||
34 | }; | ||
35 | |||
36 | static struct mips_hi16 *mips_hi16_list; | ||
37 | |||
38 | void *module_alloc(unsigned long size) | ||
39 | { | ||
40 | if (size == 0) | ||
41 | return NULL; | ||
42 | return vmalloc(size); | ||
43 | } | ||
44 | |||
45 | |||
46 | /* Free memory returned from module_alloc */ | ||
47 | void module_free(struct module *mod, void *module_region) | ||
48 | { | ||
49 | vfree(module_region); | ||
50 | /* FIXME: If module_region == mod->init_region, trim exception | ||
51 | table entries. */ | ||
52 | } | ||
53 | |||
54 | int module_frob_arch_sections(Elf_Ehdr *hdr, | ||
55 | Elf_Shdr *sechdrs, | ||
56 | char *secstrings, | ||
57 | struct module *mod) | ||
58 | { | ||
59 | return 0; | ||
60 | } | ||
61 | |||
62 | static int apply_r_mips_none(struct module *me, uint32_t *location, | ||
63 | Elf32_Addr v) | ||
64 | { | ||
65 | return 0; | ||
66 | } | ||
67 | |||
68 | static int apply_r_mips_32(struct module *me, uint32_t *location, | ||
69 | Elf32_Addr v) | ||
70 | { | ||
71 | *location += v; | ||
72 | |||
73 | return 0; | ||
74 | } | ||
75 | |||
76 | static int apply_r_mips_26(struct module *me, uint32_t *location, | ||
77 | Elf32_Addr v) | ||
78 | { | ||
79 | if (v % 4) { | ||
80 | printk(KERN_ERR "module %s: dangerous relocation\n", me->name); | ||
81 | return -ENOEXEC; | ||
82 | } | ||
83 | |||
84 | if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) { | ||
85 | printk(KERN_ERR | ||
86 | "module %s: relocation overflow\n", | ||
87 | me->name); | ||
88 | return -ENOEXEC; | ||
89 | } | ||
90 | |||
91 | *location = (*location & ~0x03ffffff) | | ||
92 | ((*location + (v >> 2)) & 0x03ffffff); | ||
93 | |||
94 | return 0; | ||
95 | } | ||
96 | |||
97 | static int apply_r_mips_hi16(struct module *me, uint32_t *location, | ||
98 | Elf32_Addr v) | ||
99 | { | ||
100 | struct mips_hi16 *n; | ||
101 | |||
102 | /* | ||
103 | * We cannot relocate this one now because we don't know the value of | ||
104 | * the carry we need to add. Save the information, and let LO16 do the | ||
105 | * actual relocation. | ||
106 | */ | ||
107 | n = kmalloc(sizeof *n, GFP_KERNEL); | ||
108 | if (!n) | ||
109 | return -ENOMEM; | ||
110 | |||
111 | n->addr = location; | ||
112 | n->value = v; | ||
113 | n->next = mips_hi16_list; | ||
114 | mips_hi16_list = n; | ||
115 | |||
116 | return 0; | ||
117 | } | ||
118 | |||
119 | static int apply_r_mips_lo16(struct module *me, uint32_t *location, | ||
120 | Elf32_Addr v) | ||
121 | { | ||
122 | unsigned long insnlo = *location; | ||
123 | Elf32_Addr val, vallo; | ||
124 | |||
125 | /* Sign extend the addend we extract from the lo insn. */ | ||
126 | vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000; | ||
127 | |||
128 | if (mips_hi16_list != NULL) { | ||
129 | struct mips_hi16 *l; | ||
130 | |||
131 | l = mips_hi16_list; | ||
132 | while (l != NULL) { | ||
133 | struct mips_hi16 *next; | ||
134 | unsigned long insn; | ||
135 | |||
136 | /* | ||
137 | * The value for the HI16 had best be the same. | ||
138 | */ | ||
139 | if (v != l->value) | ||
140 | goto out_danger; | ||
141 | |||
142 | /* | ||
143 | * Do the HI16 relocation. Note that we actually don't | ||
144 | * need to know anything about the LO16 itself, except | ||
145 | * where to find the low 16 bits of the addend needed | ||
146 | * by the LO16. | ||
147 | */ | ||
148 | insn = *l->addr; | ||
149 | val = ((insn & 0xffff) << 16) + vallo; | ||
150 | val += v; | ||
151 | |||
152 | /* | ||
153 | * Account for the sign extension that will happen in | ||
154 | * the low bits. | ||
155 | */ | ||
156 | val = ((val >> 16) + ((val & 0x8000) != 0)) & 0xffff; | ||
157 | |||
158 | insn = (insn & ~0xffff) | val; | ||
159 | *l->addr = insn; | ||
160 | |||
161 | next = l->next; | ||
162 | kfree(l); | ||
163 | l = next; | ||
164 | } | ||
165 | |||
166 | mips_hi16_list = NULL; | ||
167 | } | ||
168 | |||
169 | /* | ||
170 | * Ok, we're done with the HI16 relocs. Now deal with the LO16. | ||
171 | */ | ||
172 | val = v + vallo; | ||
173 | insnlo = (insnlo & ~0xffff) | (val & 0xffff); | ||
174 | *location = insnlo; | ||
175 | |||
176 | return 0; | ||
177 | |||
178 | out_danger: | ||
179 | printk(KERN_ERR "module %s: dangerous " "relocation\n", me->name); | ||
180 | |||
181 | return -ENOEXEC; | ||
182 | } | ||
183 | |||
184 | static int (*reloc_handlers[]) (struct module *me, uint32_t *location, | ||
185 | Elf32_Addr v) = { | ||
186 | [R_MIPS_NONE] = apply_r_mips_none, | ||
187 | [R_MIPS_32] = apply_r_mips_32, | ||
188 | [R_MIPS_26] = apply_r_mips_26, | ||
189 | [R_MIPS_HI16] = apply_r_mips_hi16, | ||
190 | [R_MIPS_LO16] = apply_r_mips_lo16 | ||
191 | }; | ||
192 | |||
193 | int apply_relocate(Elf32_Shdr *sechdrs, | ||
194 | const char *strtab, | ||
195 | unsigned int symindex, | ||
196 | unsigned int relsec, | ||
197 | struct module *me) | ||
198 | { | ||
199 | Elf32_Rel *rel = (void *) sechdrs[relsec].sh_addr; | ||
200 | Elf32_Sym *sym; | ||
201 | uint32_t *location; | ||
202 | unsigned int i; | ||
203 | Elf32_Addr v; | ||
204 | int res; | ||
205 | |||
206 | pr_debug("Applying relocate section %u to %u\n", relsec, | ||
207 | sechdrs[relsec].sh_info); | ||
208 | |||
209 | for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { | ||
210 | Elf32_Word r_info = rel[i].r_info; | ||
211 | |||
212 | /* This is where to make the change */ | ||
213 | location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr | ||
214 | + rel[i].r_offset; | ||
215 | /* This is the symbol it is referring to */ | ||
216 | sym = (Elf32_Sym *)sechdrs[symindex].sh_addr | ||
217 | + ELF32_R_SYM(r_info); | ||
218 | if (!sym->st_value) { | ||
219 | printk(KERN_WARNING "%s: Unknown symbol %s\n", | ||
220 | me->name, strtab + sym->st_name); | ||
221 | return -ENOENT; | ||
222 | } | ||
223 | |||
224 | v = sym->st_value; | ||
225 | |||
226 | res = reloc_handlers[ELF32_R_TYPE(r_info)](me, location, v); | ||
227 | if (res) | ||
228 | return res; | ||
229 | } | ||
230 | |||
231 | return 0; | ||
232 | } | ||
233 | |||
234 | int apply_relocate_add(Elf32_Shdr *sechdrs, | ||
235 | const char *strtab, | ||
236 | unsigned int symindex, | ||
237 | unsigned int relsec, | ||
238 | struct module *me) | ||
239 | { | ||
240 | /* | ||
241 | * Current binutils always generate .rela relocations. Keep smiling | ||
242 | * if it's empty, abort otherwise. | ||
243 | */ | ||
244 | if (!sechdrs[relsec].sh_size) | ||
245 | return 0; | ||
246 | |||
247 | printk(KERN_ERR "module %s: ADD RELOCATION unsupported\n", | ||
248 | me->name); | ||
249 | return -ENOEXEC; | ||
250 | } | ||
diff --git a/arch/mips/kernel/module-elf64.c b/arch/mips/kernel/module-elf64.c deleted file mode 100644 index e804792ee1ee..000000000000 --- a/arch/mips/kernel/module-elf64.c +++ /dev/null | |||
@@ -1,274 +0,0 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License as published by | ||
4 | * the Free Software Foundation; either version 2 of the License, or | ||
5 | * (at your option) any later version. | ||
6 | * | ||
7 | * This program is distributed in the hope that it will be useful, | ||
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
10 | * GNU General Public License for more details. | ||
11 | * | ||
12 | * You should have received a copy of the GNU General Public License | ||
13 | * along with this program; if not, write to the Free Software | ||
14 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
15 | * | ||
16 | * Copyright (C) 2001 Rusty Russell. | ||
17 | * Copyright (C) 2003, 2004 Ralf Baechle (ralf@linux-mips.org) | ||
18 | */ | ||
19 | |||
20 | #undef DEBUG | ||
21 | |||
22 | #include <linux/moduleloader.h> | ||
23 | #include <linux/elf.h> | ||
24 | #include <linux/vmalloc.h> | ||
25 | #include <linux/slab.h> | ||
26 | #include <linux/fs.h> | ||
27 | #include <linux/string.h> | ||
28 | #include <linux/kernel.h> | ||
29 | |||
30 | struct mips_hi16 { | ||
31 | struct mips_hi16 *next; | ||
32 | Elf32_Addr *addr; | ||
33 | Elf64_Addr value; | ||
34 | }; | ||
35 | |||
36 | static struct mips_hi16 *mips_hi16_list; | ||
37 | |||
38 | void *module_alloc(unsigned long size) | ||
39 | { | ||
40 | if (size == 0) | ||
41 | return NULL; | ||
42 | return vmalloc(size); | ||
43 | } | ||
44 | |||
45 | |||
46 | /* Free memory returned from module_alloc */ | ||
47 | void module_free(struct module *mod, void *module_region) | ||
48 | { | ||
49 | vfree(module_region); | ||
50 | /* FIXME: If module_region == mod->init_region, trim exception | ||
51 | table entries. */ | ||
52 | } | ||
53 | |||
54 | int module_frob_arch_sections(Elf_Ehdr *hdr, | ||
55 | Elf_Shdr *sechdrs, | ||
56 | char *secstrings, | ||
57 | struct module *mod) | ||
58 | { | ||
59 | return 0; | ||
60 | } | ||
61 | |||
62 | int apply_relocate(Elf64_Shdr *sechdrs, | ||
63 | const char *strtab, | ||
64 | unsigned int symindex, | ||
65 | unsigned int relsec, | ||
66 | struct module *me) | ||
67 | { | ||
68 | /* | ||
69 | * We don't want to deal with REL relocations - RELA is so much saner. | ||
70 | */ | ||
71 | if (!sechdrs[relsec].sh_size) | ||
72 | return 0; | ||
73 | |||
74 | printk(KERN_ERR "module %s: REL relocation unsupported\n", | ||
75 | me->name); | ||
76 | return -ENOEXEC; | ||
77 | } | ||
78 | |||
79 | static int apply_r_mips_none(struct module *me, uint32_t *location, | ||
80 | Elf64_Addr v) | ||
81 | { | ||
82 | return 0; | ||
83 | } | ||
84 | |||
85 | static int apply_r_mips_32(struct module *me, uint32_t *location, | ||
86 | Elf64_Addr v) | ||
87 | { | ||
88 | *location = v; | ||
89 | |||
90 | return 0; | ||
91 | } | ||
92 | |||
93 | static int apply_r_mips_26(struct module *me, uint32_t *location, | ||
94 | Elf64_Addr v) | ||
95 | { | ||
96 | if (v % 4) { | ||
97 | printk(KERN_ERR "module %s: dangerous relocation\n", me->name); | ||
98 | return -ENOEXEC; | ||
99 | } | ||
100 | |||
101 | if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) { | ||
102 | printk(KERN_ERR | ||
103 | "module %s: relocation overflow\n", | ||
104 | me->name); | ||
105 | return -ENOEXEC; | ||
106 | } | ||
107 | |||
108 | *location = (*location & ~0x03ffffff) | ((v >> 2) & 0x03ffffff); | ||
109 | |||
110 | return 0; | ||
111 | } | ||
112 | |||
113 | static int apply_r_mips_hi16(struct module *me, uint32_t *location, | ||
114 | Elf64_Addr v) | ||
115 | { | ||
116 | struct mips_hi16 *n; | ||
117 | |||
118 | /* | ||
119 | * We cannot relocate this one now because we don't know the value of | ||
120 | * the carry we need to add. Save the information, and let LO16 do the | ||
121 | * actual relocation. | ||
122 | */ | ||
123 | n = kmalloc(sizeof *n, GFP_KERNEL); | ||
124 | if (!n) | ||
125 | return -ENOMEM; | ||
126 | |||
127 | n->addr = location; | ||
128 | n->value = v; | ||
129 | n->next = mips_hi16_list; | ||
130 | mips_hi16_list = n; | ||
131 | |||
132 | return 0; | ||
133 | } | ||
134 | |||
135 | static int apply_r_mips_lo16(struct module *me, uint32_t *location, | ||
136 | Elf64_Addr v) | ||
137 | { | ||
138 | unsigned long insnlo = *location; | ||
139 | Elf32_Addr val, vallo; | ||
140 | |||
141 | /* Sign extend the addend we extract from the lo insn. */ | ||
142 | vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000; | ||
143 | |||
144 | if (mips_hi16_list != NULL) { | ||
145 | struct mips_hi16 *l; | ||
146 | |||
147 | l = mips_hi16_list; | ||
148 | while (l != NULL) { | ||
149 | struct mips_hi16 *next; | ||
150 | unsigned long insn; | ||
151 | |||
152 | /* | ||
153 | * The value for the HI16 had best be the same. | ||
154 | */ | ||
155 | if (v != l->value) | ||
156 | goto out_danger; | ||
157 | |||
158 | /* | ||
159 | * Do the HI16 relocation. Note that we actually don't | ||
160 | * need to know anything about the LO16 itself, except | ||
161 | * where to find the low 16 bits of the addend needed | ||
162 | * by the LO16. | ||
163 | */ | ||
164 | insn = *l->addr; | ||
165 | val = ((insn & 0xffff) << 16) + vallo; | ||
166 | val += v; | ||
167 | |||
168 | /* | ||
169 | * Account for the sign extension that will happen in | ||
170 | * the low bits. | ||
171 | */ | ||
172 | val = ((val >> 16) + ((val & 0x8000) != 0)) & 0xffff; | ||
173 | |||
174 | insn = (insn & ~0xffff) | val; | ||
175 | *l->addr = insn; | ||
176 | |||
177 | next = l->next; | ||
178 | kfree(l); | ||
179 | l = next; | ||
180 | } | ||
181 | |||
182 | mips_hi16_list = NULL; | ||
183 | } | ||
184 | |||
185 | /* | ||
186 | * Ok, we're done with the HI16 relocs. Now deal with the LO16. | ||
187 | */ | ||
188 | insnlo = (insnlo & ~0xffff) | (v & 0xffff); | ||
189 | *location = insnlo; | ||
190 | |||
191 | return 0; | ||
192 | |||
193 | out_danger: | ||
194 | printk(KERN_ERR "module %s: dangerous " "relocation\n", me->name); | ||
195 | |||
196 | return -ENOEXEC; | ||
197 | } | ||
198 | |||
199 | static int apply_r_mips_64(struct module *me, uint32_t *location, | ||
200 | Elf64_Addr v) | ||
201 | { | ||
202 | *(uint64_t *) location = v; | ||
203 | |||
204 | return 0; | ||
205 | } | ||
206 | |||
207 | |||
208 | static int apply_r_mips_higher(struct module *me, uint32_t *location, | ||
209 | Elf64_Addr v) | ||
210 | { | ||
211 | *location = (*location & 0xffff0000) | | ||
212 | ((((long long) v + 0x80008000LL) >> 32) & 0xffff); | ||
213 | |||
214 | return 0; | ||
215 | } | ||
216 | |||
217 | static int apply_r_mips_highest(struct module *me, uint32_t *location, | ||
218 | Elf64_Addr v) | ||
219 | { | ||
220 | *location = (*location & 0xffff0000) | | ||
221 | ((((long long) v + 0x800080008000LL) >> 48) & 0xffff); | ||
222 | |||
223 | return 0; | ||
224 | } | ||
225 | |||
226 | static int (*reloc_handlers[]) (struct module *me, uint32_t *location, | ||
227 | Elf64_Addr v) = { | ||
228 | [R_MIPS_NONE] = apply_r_mips_none, | ||
229 | [R_MIPS_32] = apply_r_mips_32, | ||
230 | [R_MIPS_26] = apply_r_mips_26, | ||
231 | [R_MIPS_HI16] = apply_r_mips_hi16, | ||
232 | [R_MIPS_LO16] = apply_r_mips_lo16, | ||
233 | [R_MIPS_64] = apply_r_mips_64, | ||
234 | [R_MIPS_HIGHER] = apply_r_mips_higher, | ||
235 | [R_MIPS_HIGHEST] = apply_r_mips_highest | ||
236 | }; | ||
237 | |||
238 | int apply_relocate_add(Elf64_Shdr *sechdrs, | ||
239 | const char *strtab, | ||
240 | unsigned int symindex, | ||
241 | unsigned int relsec, | ||
242 | struct module *me) | ||
243 | { | ||
244 | Elf64_Mips_Rela *rel = (void *) sechdrs[relsec].sh_addr; | ||
245 | Elf64_Sym *sym; | ||
246 | uint32_t *location; | ||
247 | unsigned int i; | ||
248 | Elf64_Addr v; | ||
249 | int res; | ||
250 | |||
251 | pr_debug("Applying relocate section %u to %u\n", relsec, | ||
252 | sechdrs[relsec].sh_info); | ||
253 | |||
254 | for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { | ||
255 | /* This is where to make the change */ | ||
256 | location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr | ||
257 | + rel[i].r_offset; | ||
258 | /* This is the symbol it is referring to */ | ||
259 | sym = (Elf64_Sym *)sechdrs[symindex].sh_addr + rel[i].r_sym; | ||
260 | if (!sym->st_value) { | ||
261 | printk(KERN_WARNING "%s: Unknown symbol %s\n", | ||
262 | me->name, strtab + sym->st_name); | ||
263 | return -ENOENT; | ||
264 | } | ||
265 | |||
266 | v = sym->st_value; | ||
267 | |||
268 | res = reloc_handlers[rel[i].r_type](me, location, v); | ||
269 | if (res) | ||
270 | return res; | ||
271 | } | ||
272 | |||
273 | return 0; | ||
274 | } | ||
diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c index 458af3c7a639..e54a7f442f8a 100644 --- a/arch/mips/kernel/module.c +++ b/arch/mips/kernel/module.c | |||
@@ -1,9 +1,345 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License as published by | ||
4 | * the Free Software Foundation; either version 2 of the License, or | ||
5 | * (at your option) any later version. | ||
6 | * | ||
7 | * This program is distributed in the hope that it will be useful, | ||
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
10 | * GNU General Public License for more details. | ||
11 | * | ||
12 | * You should have received a copy of the GNU General Public License | ||
13 | * along with this program; if not, write to the Free Software | ||
14 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
15 | * | ||
16 | * Copyright (C) 2001 Rusty Russell. | ||
17 | * Copyright (C) 2003, 2004 Ralf Baechle (ralf@linux-mips.org) | ||
18 | * Copyright (C) 2005 Thiemo Seufer | ||
19 | */ | ||
20 | |||
21 | #undef DEBUG | ||
22 | |||
23 | #include <linux/moduleloader.h> | ||
24 | #include <linux/elf.h> | ||
25 | #include <linux/vmalloc.h> | ||
26 | #include <linux/slab.h> | ||
27 | #include <linux/fs.h> | ||
28 | #include <linux/string.h> | ||
29 | #include <linux/kernel.h> | ||
1 | #include <linux/module.h> | 30 | #include <linux/module.h> |
2 | #include <linux/spinlock.h> | 31 | #include <linux/spinlock.h> |
3 | 32 | ||
33 | struct mips_hi16 { | ||
34 | struct mips_hi16 *next; | ||
35 | Elf_Addr *addr; | ||
36 | Elf_Addr value; | ||
37 | }; | ||
38 | |||
39 | static struct mips_hi16 *mips_hi16_list; | ||
40 | |||
4 | static LIST_HEAD(dbe_list); | 41 | static LIST_HEAD(dbe_list); |
5 | static DEFINE_SPINLOCK(dbe_lock); | 42 | static DEFINE_SPINLOCK(dbe_lock); |
6 | 43 | ||
44 | void *module_alloc(unsigned long size) | ||
45 | { | ||
46 | if (size == 0) | ||
47 | return NULL; | ||
48 | return vmalloc(size); | ||
49 | } | ||
50 | |||
51 | /* Free memory returned from module_alloc */ | ||
52 | void module_free(struct module *mod, void *module_region) | ||
53 | { | ||
54 | vfree(module_region); | ||
55 | /* FIXME: If module_region == mod->init_region, trim exception | ||
56 | table entries. */ | ||
57 | } | ||
58 | |||
59 | int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, | ||
60 | char *secstrings, struct module *mod) | ||
61 | { | ||
62 | return 0; | ||
63 | } | ||
64 | |||
65 | static int apply_r_mips_none(struct module *me, u32 *location, Elf_Addr v) | ||
66 | { | ||
67 | return 0; | ||
68 | } | ||
69 | |||
70 | static int apply_r_mips_32_rel(struct module *me, u32 *location, Elf_Addr v) | ||
71 | { | ||
72 | *location += v; | ||
73 | |||
74 | return 0; | ||
75 | } | ||
76 | |||
77 | static int apply_r_mips_32_rela(struct module *me, u32 *location, Elf_Addr v) | ||
78 | { | ||
79 | *location = v; | ||
80 | |||
81 | return 0; | ||
82 | } | ||
83 | |||
84 | static int apply_r_mips_26_rel(struct module *me, u32 *location, Elf_Addr v) | ||
85 | { | ||
86 | if (v % 4) { | ||
87 | printk(KERN_ERR "module %s: dangerous relocation\n", me->name); | ||
88 | return -ENOEXEC; | ||
89 | } | ||
90 | |||
91 | if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) { | ||
92 | printk(KERN_ERR | ||
93 | "module %s: relocation overflow\n", | ||
94 | me->name); | ||
95 | return -ENOEXEC; | ||
96 | } | ||
97 | |||
98 | *location = (*location & ~0x03ffffff) | | ||
99 | ((*location + (v >> 2)) & 0x03ffffff); | ||
100 | |||
101 | return 0; | ||
102 | } | ||
103 | |||
104 | static int apply_r_mips_26_rela(struct module *me, u32 *location, Elf_Addr v) | ||
105 | { | ||
106 | if (v % 4) { | ||
107 | printk(KERN_ERR "module %s: dangerous relocation\n", me->name); | ||
108 | return -ENOEXEC; | ||
109 | } | ||
110 | |||
111 | if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) { | ||
112 | printk(KERN_ERR | ||
113 | "module %s: relocation overflow\n", | ||
114 | me->name); | ||
115 | return -ENOEXEC; | ||
116 | } | ||
117 | |||
118 | *location = (*location & ~0x03ffffff) | ((v >> 2) & 0x03ffffff); | ||
119 | |||
120 | return 0; | ||
121 | } | ||
122 | |||
123 | static int apply_r_mips_hi16_rel(struct module *me, u32 *location, Elf_Addr v) | ||
124 | { | ||
125 | struct mips_hi16 *n; | ||
126 | |||
127 | /* | ||
128 | * We cannot relocate this one now because we don't know the value of | ||
129 | * the carry we need to add. Save the information, and let LO16 do the | ||
130 | * actual relocation. | ||
131 | */ | ||
132 | n = kmalloc(sizeof *n, GFP_KERNEL); | ||
133 | if (!n) | ||
134 | return -ENOMEM; | ||
135 | |||
136 | n->addr = (Elf_Addr *)location; | ||
137 | n->value = v; | ||
138 | n->next = mips_hi16_list; | ||
139 | mips_hi16_list = n; | ||
140 | |||
141 | return 0; | ||
142 | } | ||
143 | |||
144 | static int apply_r_mips_hi16_rela(struct module *me, u32 *location, Elf_Addr v) | ||
145 | { | ||
146 | *location = (*location & 0xffff0000) | | ||
147 | ((((long long) v + 0x8000LL) >> 16) & 0xffff); | ||
148 | |||
149 | return 0; | ||
150 | } | ||
151 | |||
152 | static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v) | ||
153 | { | ||
154 | unsigned long insnlo = *location; | ||
155 | Elf_Addr val, vallo; | ||
156 | |||
157 | /* Sign extend the addend we extract from the lo insn. */ | ||
158 | vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000; | ||
159 | |||
160 | if (mips_hi16_list != NULL) { | ||
161 | struct mips_hi16 *l; | ||
162 | |||
163 | l = mips_hi16_list; | ||
164 | while (l != NULL) { | ||
165 | struct mips_hi16 *next; | ||
166 | unsigned long insn; | ||
167 | |||
168 | /* | ||
169 | * The value for the HI16 had best be the same. | ||
170 | */ | ||
171 | if (v != l->value) | ||
172 | goto out_danger; | ||
173 | |||
174 | /* | ||
175 | * Do the HI16 relocation. Note that we actually don't | ||
176 | * need to know anything about the LO16 itself, except | ||
177 | * where to find the low 16 bits of the addend needed | ||
178 | * by the LO16. | ||
179 | */ | ||
180 | insn = *l->addr; | ||
181 | val = ((insn & 0xffff) << 16) + vallo; | ||
182 | val += v; | ||
183 | |||
184 | /* | ||
185 | * Account for the sign extension that will happen in | ||
186 | * the low bits. | ||
187 | */ | ||
188 | val = ((val >> 16) + ((val & 0x8000) != 0)) & 0xffff; | ||
189 | |||
190 | insn = (insn & ~0xffff) | val; | ||
191 | *l->addr = insn; | ||
192 | |||
193 | next = l->next; | ||
194 | kfree(l); | ||
195 | l = next; | ||
196 | } | ||
197 | |||
198 | mips_hi16_list = NULL; | ||
199 | } | ||
200 | |||
201 | /* | ||
202 | * Ok, we're done with the HI16 relocs. Now deal with the LO16. | ||
203 | */ | ||
204 | val = v + vallo; | ||
205 | insnlo = (insnlo & ~0xffff) | (val & 0xffff); | ||
206 | *location = insnlo; | ||
207 | |||
208 | return 0; | ||
209 | |||
210 | out_danger: | ||
211 | printk(KERN_ERR "module %s: dangerous " "relocation\n", me->name); | ||
212 | |||
213 | return -ENOEXEC; | ||
214 | } | ||
215 | |||
216 | static int apply_r_mips_lo16_rela(struct module *me, u32 *location, Elf_Addr v) | ||
217 | { | ||
218 | *location = (*location & 0xffff0000) | (v & 0xffff); | ||
219 | |||
220 | return 0; | ||
221 | } | ||
222 | |||
223 | static int apply_r_mips_64_rela(struct module *me, u32 *location, Elf_Addr v) | ||
224 | { | ||
225 | *(Elf_Addr *)location = v; | ||
226 | |||
227 | return 0; | ||
228 | } | ||
229 | |||
230 | static int apply_r_mips_higher_rela(struct module *me, u32 *location, | ||
231 | Elf_Addr v) | ||
232 | { | ||
233 | *location = (*location & 0xffff0000) | | ||
234 | ((((long long) v + 0x80008000LL) >> 32) & 0xffff); | ||
235 | |||
236 | return 0; | ||
237 | } | ||
238 | |||
239 | static int apply_r_mips_highest_rela(struct module *me, u32 *location, | ||
240 | Elf_Addr v) | ||
241 | { | ||
242 | *location = (*location & 0xffff0000) | | ||
243 | ((((long long) v + 0x800080008000LL) >> 48) & 0xffff); | ||
244 | |||
245 | return 0; | ||
246 | } | ||
247 | |||
248 | static int (*reloc_handlers_rel[]) (struct module *me, u32 *location, | ||
249 | Elf_Addr v) = { | ||
250 | [R_MIPS_NONE] = apply_r_mips_none, | ||
251 | [R_MIPS_32] = apply_r_mips_32_rel, | ||
252 | [R_MIPS_26] = apply_r_mips_26_rel, | ||
253 | [R_MIPS_HI16] = apply_r_mips_hi16_rel, | ||
254 | [R_MIPS_LO16] = apply_r_mips_lo16_rel | ||
255 | }; | ||
256 | |||
257 | static int (*reloc_handlers_rela[]) (struct module *me, u32 *location, | ||
258 | Elf_Addr v) = { | ||
259 | [R_MIPS_NONE] = apply_r_mips_none, | ||
260 | [R_MIPS_32] = apply_r_mips_32_rela, | ||
261 | [R_MIPS_26] = apply_r_mips_26_rela, | ||
262 | [R_MIPS_HI16] = apply_r_mips_hi16_rela, | ||
263 | [R_MIPS_LO16] = apply_r_mips_lo16_rela, | ||
264 | [R_MIPS_64] = apply_r_mips_64_rela, | ||
265 | [R_MIPS_HIGHER] = apply_r_mips_higher_rela, | ||
266 | [R_MIPS_HIGHEST] = apply_r_mips_highest_rela | ||
267 | }; | ||
268 | |||
269 | int apply_relocate(Elf_Shdr *sechdrs, const char *strtab, | ||
270 | unsigned int symindex, unsigned int relsec, | ||
271 | struct module *me) | ||
272 | { | ||
273 | Elf_Mips_Rel *rel = (void *) sechdrs[relsec].sh_addr; | ||
274 | Elf_Sym *sym; | ||
275 | u32 *location; | ||
276 | unsigned int i; | ||
277 | Elf_Addr v; | ||
278 | int res; | ||
279 | |||
280 | pr_debug("Applying relocate section %u to %u\n", relsec, | ||
281 | sechdrs[relsec].sh_info); | ||
282 | |||
283 | for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { | ||
284 | /* This is where to make the change */ | ||
285 | location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr | ||
286 | + rel[i].r_offset; | ||
287 | /* This is the symbol it is referring to */ | ||
288 | sym = (Elf_Sym *)sechdrs[symindex].sh_addr | ||
289 | + ELF_MIPS_R_SYM(rel[i]); | ||
290 | if (!sym->st_value) { | ||
291 | printk(KERN_WARNING "%s: Unknown symbol %s\n", | ||
292 | me->name, strtab + sym->st_name); | ||
293 | return -ENOENT; | ||
294 | } | ||
295 | |||
296 | v = sym->st_value; | ||
297 | |||
298 | res = reloc_handlers_rel[ELF_MIPS_R_TYPE(rel[i])](me, location, v); | ||
299 | if (res) | ||
300 | return res; | ||
301 | } | ||
302 | |||
303 | return 0; | ||
304 | } | ||
305 | |||
306 | int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, | ||
307 | unsigned int symindex, unsigned int relsec, | ||
308 | struct module *me) | ||
309 | { | ||
310 | Elf_Mips_Rela *rel = (void *) sechdrs[relsec].sh_addr; | ||
311 | Elf_Sym *sym; | ||
312 | u32 *location; | ||
313 | unsigned int i; | ||
314 | Elf_Addr v; | ||
315 | int res; | ||
316 | |||
317 | pr_debug("Applying relocate section %u to %u\n", relsec, | ||
318 | sechdrs[relsec].sh_info); | ||
319 | |||
320 | for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { | ||
321 | /* This is where to make the change */ | ||
322 | location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr | ||
323 | + rel[i].r_offset; | ||
324 | /* This is the symbol it is referring to */ | ||
325 | sym = (Elf_Sym *)sechdrs[symindex].sh_addr | ||
326 | + ELF_MIPS_R_SYM(rel[i]); | ||
327 | if (!sym->st_value) { | ||
328 | printk(KERN_WARNING "%s: Unknown symbol %s\n", | ||
329 | me->name, strtab + sym->st_name); | ||
330 | return -ENOENT; | ||
331 | } | ||
332 | |||
333 | v = sym->st_value + rel[i].r_addend; | ||
334 | |||
335 | res = reloc_handlers_rela[ELF_MIPS_R_TYPE(rel[i])](me, location, v); | ||
336 | if (res) | ||
337 | return res; | ||
338 | } | ||
339 | |||
340 | return 0; | ||
341 | } | ||
342 | |||
7 | /* Given an address, look for it in the module exception tables. */ | 343 | /* Given an address, look for it in the module exception tables. */ |
8 | const struct exception_table_entry *search_module_dbetables(unsigned long addr) | 344 | const struct exception_table_entry *search_module_dbetables(unsigned long addr) |
9 | { | 345 | { |
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c index 0f159f30e894..86fe15b273cd 100644 --- a/arch/mips/kernel/proc.c +++ b/arch/mips/kernel/proc.c | |||
@@ -2,7 +2,8 @@ | |||
2 | * linux/arch/mips/kernel/proc.c | 2 | * linux/arch/mips/kernel/proc.c |
3 | * | 3 | * |
4 | * Copyright (C) 1995, 1996, 2001 Ralf Baechle | 4 | * Copyright (C) 1995, 1996, 2001 Ralf Baechle |
5 | * Copyright (C) 2001 MIPS Technologies, Inc. | 5 | * Copyright (C) 2001, 2004 MIPS Technologies, Inc. |
6 | * Copyright (C) 2004 Maciej W. Rozycki | ||
6 | */ | 7 | */ |
7 | #include <linux/config.h> | 8 | #include <linux/config.h> |
8 | #include <linux/delay.h> | 9 | #include <linux/delay.h> |
@@ -19,63 +20,69 @@ | |||
19 | unsigned int vced_count, vcei_count; | 20 | unsigned int vced_count, vcei_count; |
20 | 21 | ||
21 | static const char *cpu_name[] = { | 22 | static const char *cpu_name[] = { |
22 | [CPU_UNKNOWN] "unknown", | 23 | [CPU_UNKNOWN] = "unknown", |
23 | [CPU_R2000] "R2000", | 24 | [CPU_R2000] = "R2000", |
24 | [CPU_R3000] "R3000", | 25 | [CPU_R3000] = "R3000", |
25 | [CPU_R3000A] "R3000A", | 26 | [CPU_R3000A] = "R3000A", |
26 | [CPU_R3041] "R3041", | 27 | [CPU_R3041] = "R3041", |
27 | [CPU_R3051] "R3051", | 28 | [CPU_R3051] = "R3051", |
28 | [CPU_R3052] "R3052", | 29 | [CPU_R3052] = "R3052", |
29 | [CPU_R3081] "R3081", | 30 | [CPU_R3081] = "R3081", |
30 | [CPU_R3081E] "R3081E", | 31 | [CPU_R3081E] = "R3081E", |
31 | [CPU_R4000PC] "R4000PC", | 32 | [CPU_R4000PC] = "R4000PC", |
32 | [CPU_R4000SC] "R4000SC", | 33 | [CPU_R4000SC] = "R4000SC", |
33 | [CPU_R4000MC] "R4000MC", | 34 | [CPU_R4000MC] = "R4000MC", |
34 | [CPU_R4200] "R4200", | 35 | [CPU_R4200] = "R4200", |
35 | [CPU_R4400PC] "R4400PC", | 36 | [CPU_R4400PC] = "R4400PC", |
36 | [CPU_R4400SC] "R4400SC", | 37 | [CPU_R4400SC] = "R4400SC", |
37 | [CPU_R4400MC] "R4400MC", | 38 | [CPU_R4400MC] = "R4400MC", |
38 | [CPU_R4600] "R4600", | 39 | [CPU_R4600] = "R4600", |
39 | [CPU_R6000] "R6000", | 40 | [CPU_R6000] = "R6000", |
40 | [CPU_R6000A] "R6000A", | 41 | [CPU_R6000A] = "R6000A", |
41 | [CPU_R8000] "R8000", | 42 | [CPU_R8000] = "R8000", |
42 | [CPU_R10000] "R10000", | 43 | [CPU_R10000] = "R10000", |
43 | [CPU_R12000] "R12000", | 44 | [CPU_R12000] = "R12000", |
44 | [CPU_R4300] "R4300", | 45 | [CPU_R4300] = "R4300", |
45 | [CPU_R4650] "R4650", | 46 | [CPU_R4650] = "R4650", |
46 | [CPU_R4700] "R4700", | 47 | [CPU_R4700] = "R4700", |
47 | [CPU_R5000] "R5000", | 48 | [CPU_R5000] = "R5000", |
48 | [CPU_R5000A] "R5000A", | 49 | [CPU_R5000A] = "R5000A", |
49 | [CPU_R4640] "R4640", | 50 | [CPU_R4640] = "R4640", |
50 | [CPU_NEVADA] "Nevada", | 51 | [CPU_NEVADA] = "Nevada", |
51 | [CPU_RM7000] "RM7000", | 52 | [CPU_RM7000] = "RM7000", |
52 | [CPU_RM9000] "RM9000", | 53 | [CPU_RM9000] = "RM9000", |
53 | [CPU_R5432] "R5432", | 54 | [CPU_R5432] = "R5432", |
54 | [CPU_4KC] "MIPS 4Kc", | 55 | [CPU_4KC] = "MIPS 4Kc", |
55 | [CPU_5KC] "MIPS 5Kc", | 56 | [CPU_5KC] = "MIPS 5Kc", |
56 | [CPU_R4310] "R4310", | 57 | [CPU_R4310] = "R4310", |
57 | [CPU_SB1] "SiByte SB1", | 58 | [CPU_SB1] = "SiByte SB1", |
58 | [CPU_TX3912] "TX3912", | 59 | [CPU_SB1A] = "SiByte SB1A", |
59 | [CPU_TX3922] "TX3922", | 60 | [CPU_TX3912] = "TX3912", |
60 | [CPU_TX3927] "TX3927", | 61 | [CPU_TX3922] = "TX3922", |
61 | [CPU_AU1000] "Au1000", | 62 | [CPU_TX3927] = "TX3927", |
62 | [CPU_AU1500] "Au1500", | 63 | [CPU_AU1000] = "Au1000", |
63 | [CPU_4KEC] "MIPS 4KEc", | 64 | [CPU_AU1500] = "Au1500", |
64 | [CPU_4KSC] "MIPS 4KSc", | 65 | [CPU_AU1100] = "Au1100", |
65 | [CPU_VR41XX] "NEC Vr41xx", | 66 | [CPU_AU1550] = "Au1550", |
66 | [CPU_R5500] "R5500", | 67 | [CPU_AU1200] = "Au1200", |
67 | [CPU_TX49XX] "TX49xx", | 68 | [CPU_4KEC] = "MIPS 4KEc", |
68 | [CPU_20KC] "MIPS 20Kc", | 69 | [CPU_4KSC] = "MIPS 4KSc", |
69 | [CPU_24K] "MIPS 24K", | 70 | [CPU_VR41XX] = "NEC Vr41xx", |
70 | [CPU_25KF] "MIPS 25Kf", | 71 | [CPU_R5500] = "R5500", |
71 | [CPU_VR4111] "NEC VR4111", | 72 | [CPU_TX49XX] = "TX49xx", |
72 | [CPU_VR4121] "NEC VR4121", | 73 | [CPU_20KC] = "MIPS 20Kc", |
73 | [CPU_VR4122] "NEC VR4122", | 74 | [CPU_24K] = "MIPS 24K", |
74 | [CPU_VR4131] "NEC VR4131", | 75 | [CPU_25KF] = "MIPS 25Kf", |
75 | [CPU_VR4133] "NEC VR4133", | 76 | [CPU_34K] = "MIPS 34K", |
76 | [CPU_VR4181] "NEC VR4181", | 77 | [CPU_VR4111] = "NEC VR4111", |
77 | [CPU_VR4181A] "NEC VR4181A", | 78 | [CPU_VR4121] = "NEC VR4121", |
78 | [CPU_SR71000] "Sandcraft SR71000" | 79 | [CPU_VR4122] = "NEC VR4122", |
80 | [CPU_VR4131] = "NEC VR4131", | ||
81 | [CPU_VR4133] = "NEC VR4133", | ||
82 | [CPU_VR4181] = "NEC VR4181", | ||
83 | [CPU_VR4181A] = "NEC VR4181A", | ||
84 | [CPU_SR71000] = "Sandcraft SR71000", | ||
85 | [CPU_PR4450] = "Philips PR4450", | ||
79 | }; | 86 | }; |
80 | 87 | ||
81 | 88 | ||
@@ -105,8 +112,8 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
105 | (version >> 4) & 0x0f, version & 0x0f, | 112 | (version >> 4) & 0x0f, version & 0x0f, |
106 | (fp_vers >> 4) & 0x0f, fp_vers & 0x0f); | 113 | (fp_vers >> 4) & 0x0f, fp_vers & 0x0f); |
107 | seq_printf(m, "BogoMIPS\t\t: %lu.%02lu\n", | 114 | seq_printf(m, "BogoMIPS\t\t: %lu.%02lu\n", |
108 | loops_per_jiffy / (500000/HZ), | 115 | cpu_data[n].udelay_val / (500000/HZ), |
109 | (loops_per_jiffy / (5000/HZ)) % 100); | 116 | (cpu_data[n].udelay_val / (5000/HZ)) % 100); |
110 | seq_printf(m, "wait instruction\t: %s\n", cpu_wait ? "yes" : "no"); | 117 | seq_printf(m, "wait instruction\t: %s\n", cpu_wait ? "yes" : "no"); |
111 | seq_printf(m, "microsecond timers\t: %s\n", | 118 | seq_printf(m, "microsecond timers\t: %s\n", |
112 | cpu_has_counter ? "yes" : "no"); | 119 | cpu_has_counter ? "yes" : "no"); |
@@ -115,6 +122,14 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
115 | cpu_has_divec ? "yes" : "no"); | 122 | cpu_has_divec ? "yes" : "no"); |
116 | seq_printf(m, "hardware watchpoint\t: %s\n", | 123 | seq_printf(m, "hardware watchpoint\t: %s\n", |
117 | cpu_has_watch ? "yes" : "no"); | 124 | cpu_has_watch ? "yes" : "no"); |
125 | seq_printf(m, "ASEs implemented\t:%s%s%s%s%s%s\n", | ||
126 | cpu_has_mips16 ? " mips16" : "", | ||
127 | cpu_has_mdmx ? " mdmx" : "", | ||
128 | cpu_has_mips3d ? " mips3d" : "", | ||
129 | cpu_has_smartmips ? " smartmips" : "", | ||
130 | cpu_has_dsp ? " dsp" : "", | ||
131 | cpu_has_mipsmt ? " mt" : "" | ||
132 | ); | ||
118 | 133 | ||
119 | sprintf(fmt, "VCE%%c exceptions\t\t: %s\n", | 134 | sprintf(fmt, "VCE%%c exceptions\t\t: %s\n", |
120 | cpu_has_vce ? "%u" : "not available"); | 135 | cpu_has_vce ? "%u" : "not available"); |
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index e4f2f8011387..4fe3d5715c41 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c | |||
@@ -25,8 +25,10 @@ | |||
25 | #include <linux/init.h> | 25 | #include <linux/init.h> |
26 | #include <linux/completion.h> | 26 | #include <linux/completion.h> |
27 | 27 | ||
28 | #include <asm/abi.h> | ||
28 | #include <asm/bootinfo.h> | 29 | #include <asm/bootinfo.h> |
29 | #include <asm/cpu.h> | 30 | #include <asm/cpu.h> |
31 | #include <asm/dsp.h> | ||
30 | #include <asm/fpu.h> | 32 | #include <asm/fpu.h> |
31 | #include <asm/pgtable.h> | 33 | #include <asm/pgtable.h> |
32 | #include <asm/system.h> | 34 | #include <asm/system.h> |
@@ -39,14 +41,6 @@ | |||
39 | #include <asm/inst.h> | 41 | #include <asm/inst.h> |
40 | 42 | ||
41 | /* | 43 | /* |
42 | * We use this if we don't have any better idle routine.. | ||
43 | * (This to kill: kernel/platform.c. | ||
44 | */ | ||
45 | void default_idle (void) | ||
46 | { | ||
47 | } | ||
48 | |||
49 | /* | ||
50 | * The idle thread. There's no useful work to be done, so just try to conserve | 44 | * The idle thread. There's no useful work to be done, so just try to conserve |
51 | * power and have a low exit latency (ie sit in a loop waiting for somebody to | 45 | * power and have a low exit latency (ie sit in a loop waiting for somebody to |
52 | * say that they'd like to reschedule) | 46 | * say that they'd like to reschedule) |
@@ -62,6 +56,54 @@ ATTRIB_NORET void cpu_idle(void) | |||
62 | } | 56 | } |
63 | } | 57 | } |
64 | 58 | ||
59 | extern int do_signal(sigset_t *oldset, struct pt_regs *regs); | ||
60 | extern int do_signal32(sigset_t *oldset, struct pt_regs *regs); | ||
61 | |||
62 | /* | ||
63 | * Native o32 and N64 ABI without DSP ASE | ||
64 | */ | ||
65 | extern int setup_frame(struct k_sigaction * ka, struct pt_regs *regs, | ||
66 | int signr, sigset_t *set); | ||
67 | extern int setup_rt_frame(struct k_sigaction * ka, struct pt_regs *regs, | ||
68 | int signr, sigset_t *set, siginfo_t *info); | ||
69 | |||
70 | struct mips_abi mips_abi = { | ||
71 | .do_signal = do_signal, | ||
72 | #ifdef CONFIG_TRAD_SIGNALS | ||
73 | .setup_frame = setup_frame, | ||
74 | #endif | ||
75 | .setup_rt_frame = setup_rt_frame | ||
76 | }; | ||
77 | |||
78 | #ifdef CONFIG_MIPS32_O32 | ||
79 | /* | ||
80 | * o32 compatibility on 64-bit kernels, without DSP ASE | ||
81 | */ | ||
82 | extern int setup_frame_32(struct k_sigaction * ka, struct pt_regs *regs, | ||
83 | int signr, sigset_t *set); | ||
84 | extern int setup_rt_frame_32(struct k_sigaction * ka, struct pt_regs *regs, | ||
85 | int signr, sigset_t *set, siginfo_t *info); | ||
86 | |||
87 | struct mips_abi mips_abi_32 = { | ||
88 | .do_signal = do_signal32, | ||
89 | .setup_frame = setup_frame_32, | ||
90 | .setup_rt_frame = setup_rt_frame_32 | ||
91 | }; | ||
92 | #endif /* CONFIG_MIPS32_O32 */ | ||
93 | |||
94 | #ifdef CONFIG_MIPS32_N32 | ||
95 | /* | ||
96 | * N32 on 64-bit kernels, without DSP ASE | ||
97 | */ | ||
98 | extern int setup_rt_frame_n32(struct k_sigaction * ka, struct pt_regs *regs, | ||
99 | int signr, sigset_t *set, siginfo_t *info); | ||
100 | |||
101 | struct mips_abi mips_abi_n32 = { | ||
102 | .do_signal = do_signal, | ||
103 | .setup_rt_frame = setup_rt_frame_n32 | ||
104 | }; | ||
105 | #endif /* CONFIG_MIPS32_N32 */ | ||
106 | |||
65 | asmlinkage void ret_from_fork(void); | 107 | asmlinkage void ret_from_fork(void); |
66 | 108 | ||
67 | void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp) | 109 | void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp) |
@@ -78,6 +120,8 @@ void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp) | |||
78 | regs->cp0_status = status; | 120 | regs->cp0_status = status; |
79 | clear_used_math(); | 121 | clear_used_math(); |
80 | lose_fpu(); | 122 | lose_fpu(); |
123 | if (cpu_has_dsp) | ||
124 | __init_dsp(); | ||
81 | regs->cp0_epc = pc; | 125 | regs->cp0_epc = pc; |
82 | regs->regs[29] = sp; | 126 | regs->regs[29] = sp; |
83 | current_thread_info()->addr_limit = USER_DS; | 127 | current_thread_info()->addr_limit = USER_DS; |
@@ -97,14 +141,17 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp, | |||
97 | struct thread_info *ti = p->thread_info; | 141 | struct thread_info *ti = p->thread_info; |
98 | struct pt_regs *childregs; | 142 | struct pt_regs *childregs; |
99 | long childksp; | 143 | long childksp; |
144 | p->set_child_tid = p->clear_child_tid = NULL; | ||
100 | 145 | ||
101 | childksp = (unsigned long)ti + THREAD_SIZE - 32; | 146 | childksp = (unsigned long)ti + THREAD_SIZE - 32; |
102 | 147 | ||
103 | preempt_disable(); | 148 | preempt_disable(); |
104 | 149 | ||
105 | if (is_fpu_owner()) { | 150 | if (is_fpu_owner()) |
106 | save_fp(p); | 151 | save_fp(p); |
107 | } | 152 | |
153 | if (cpu_has_dsp) | ||
154 | save_dsp(p); | ||
108 | 155 | ||
109 | preempt_enable(); | 156 | preempt_enable(); |
110 | 157 | ||
@@ -142,6 +189,9 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp, | |||
142 | childregs->cp0_status &= ~(ST0_CU2|ST0_CU1); | 189 | childregs->cp0_status &= ~(ST0_CU2|ST0_CU1); |
143 | clear_tsk_thread_flag(p, TIF_USEDFPU); | 190 | clear_tsk_thread_flag(p, TIF_USEDFPU); |
144 | 191 | ||
192 | if (clone_flags & CLONE_SETTLS) | ||
193 | ti->tp_value = regs->regs[7]; | ||
194 | |||
145 | return 0; | 195 | return 0; |
146 | } | 196 | } |
147 | 197 | ||
@@ -175,6 +225,14 @@ void dump_regs(elf_greg_t *gp, struct pt_regs *regs) | |||
175 | #endif | 225 | #endif |
176 | } | 226 | } |
177 | 227 | ||
228 | int dump_task_regs (struct task_struct *tsk, elf_gregset_t *regs) | ||
229 | { | ||
230 | struct thread_info *ti = tsk->thread_info; | ||
231 | long ksp = (unsigned long)ti + THREAD_SIZE - 32; | ||
232 | dump_regs(&(*regs)[0], (struct pt_regs *) ksp - 1); | ||
233 | return 1; | ||
234 | } | ||
235 | |||
178 | int dump_task_fpu (struct task_struct *t, elf_fpregset_t *fpr) | 236 | int dump_task_fpu (struct task_struct *t, elf_fpregset_t *fpr) |
179 | { | 237 | { |
180 | memcpy(fpr, &t->thread.fpu, sizeof(current->thread.fpu)); | 238 | memcpy(fpr, &t->thread.fpu, sizeof(current->thread.fpu)); |
@@ -211,22 +269,48 @@ long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) | |||
211 | return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s, 0, NULL, NULL); | 269 | return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s, 0, NULL, NULL); |
212 | } | 270 | } |
213 | 271 | ||
214 | struct mips_frame_info { | 272 | static struct mips_frame_info { |
273 | void *func; | ||
274 | int omit_fp; /* compiled without fno-omit-frame-pointer */ | ||
215 | int frame_offset; | 275 | int frame_offset; |
216 | int pc_offset; | 276 | int pc_offset; |
277 | } schedule_frame, mfinfo[] = { | ||
278 | { schedule, 0 }, /* must be first */ | ||
279 | /* arch/mips/kernel/semaphore.c */ | ||
280 | { __down, 1 }, | ||
281 | { __down_interruptible, 1 }, | ||
282 | /* kernel/sched.c */ | ||
283 | #ifdef CONFIG_PREEMPT | ||
284 | { preempt_schedule, 0 }, | ||
285 | #endif | ||
286 | { wait_for_completion, 0 }, | ||
287 | { interruptible_sleep_on, 0 }, | ||
288 | { interruptible_sleep_on_timeout, 0 }, | ||
289 | { sleep_on, 0 }, | ||
290 | { sleep_on_timeout, 0 }, | ||
291 | { yield, 0 }, | ||
292 | { io_schedule, 0 }, | ||
293 | { io_schedule_timeout, 0 }, | ||
294 | #if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT) | ||
295 | { __preempt_spin_lock, 0 }, | ||
296 | { __preempt_write_lock, 0 }, | ||
297 | #endif | ||
298 | /* kernel/timer.c */ | ||
299 | { schedule_timeout, 1 }, | ||
300 | /* { nanosleep_restart, 1 }, */ | ||
301 | /* lib/rwsem-spinlock.c */ | ||
302 | { __down_read, 1 }, | ||
303 | { __down_write, 1 }, | ||
217 | }; | 304 | }; |
218 | static struct mips_frame_info schedule_frame; | 305 | |
219 | static struct mips_frame_info schedule_timeout_frame; | ||
220 | static struct mips_frame_info sleep_on_frame; | ||
221 | static struct mips_frame_info sleep_on_timeout_frame; | ||
222 | static struct mips_frame_info wait_for_completion_frame; | ||
223 | static int mips_frame_info_initialized; | 306 | static int mips_frame_info_initialized; |
224 | static int __init get_frame_info(struct mips_frame_info *info, void *func) | 307 | static int __init get_frame_info(struct mips_frame_info *info) |
225 | { | 308 | { |
226 | int i; | 309 | int i; |
310 | void *func = info->func; | ||
227 | union mips_instruction *ip = (union mips_instruction *)func; | 311 | union mips_instruction *ip = (union mips_instruction *)func; |
228 | info->pc_offset = -1; | 312 | info->pc_offset = -1; |
229 | info->frame_offset = -1; | 313 | info->frame_offset = info->omit_fp ? 0 : -1; |
230 | for (i = 0; i < 128; i++, ip++) { | 314 | for (i = 0; i < 128; i++, ip++) { |
231 | /* if jal, jalr, jr, stop. */ | 315 | /* if jal, jalr, jr, stop. */ |
232 | if (ip->j_format.opcode == jal_op || | 316 | if (ip->j_format.opcode == jal_op || |
@@ -247,14 +331,16 @@ static int __init get_frame_info(struct mips_frame_info *info, void *func) | |||
247 | /* sw / sd $ra, offset($sp) */ | 331 | /* sw / sd $ra, offset($sp) */ |
248 | if (ip->i_format.rt == 31) { | 332 | if (ip->i_format.rt == 31) { |
249 | if (info->pc_offset != -1) | 333 | if (info->pc_offset != -1) |
250 | break; | 334 | continue; |
251 | info->pc_offset = | 335 | info->pc_offset = |
252 | ip->i_format.simmediate / sizeof(long); | 336 | ip->i_format.simmediate / sizeof(long); |
253 | } | 337 | } |
254 | /* sw / sd $s8, offset($sp) */ | 338 | /* sw / sd $s8, offset($sp) */ |
255 | if (ip->i_format.rt == 30) { | 339 | if (ip->i_format.rt == 30) { |
340 | //#if 0 /* gcc 3.4 does aggressive optimization... */ | ||
256 | if (info->frame_offset != -1) | 341 | if (info->frame_offset != -1) |
257 | break; | 342 | continue; |
343 | //#endif | ||
258 | info->frame_offset = | 344 | info->frame_offset = |
259 | ip->i_format.simmediate / sizeof(long); | 345 | ip->i_format.simmediate / sizeof(long); |
260 | } | 346 | } |
@@ -272,13 +358,25 @@ static int __init get_frame_info(struct mips_frame_info *info, void *func) | |||
272 | 358 | ||
273 | static int __init frame_info_init(void) | 359 | static int __init frame_info_init(void) |
274 | { | 360 | { |
275 | mips_frame_info_initialized = | 361 | int i, found; |
276 | !get_frame_info(&schedule_frame, schedule) && | 362 | for (i = 0; i < ARRAY_SIZE(mfinfo); i++) |
277 | !get_frame_info(&schedule_timeout_frame, schedule_timeout) && | 363 | if (get_frame_info(&mfinfo[i])) |
278 | !get_frame_info(&sleep_on_frame, sleep_on) && | 364 | return -1; |
279 | !get_frame_info(&sleep_on_timeout_frame, sleep_on_timeout) && | 365 | schedule_frame = mfinfo[0]; |
280 | !get_frame_info(&wait_for_completion_frame, wait_for_completion); | 366 | /* bubble sort */ |
281 | 367 | do { | |
368 | struct mips_frame_info tmp; | ||
369 | found = 0; | ||
370 | for (i = 1; i < ARRAY_SIZE(mfinfo); i++) { | ||
371 | if (mfinfo[i-1].func > mfinfo[i].func) { | ||
372 | tmp = mfinfo[i]; | ||
373 | mfinfo[i] = mfinfo[i-1]; | ||
374 | mfinfo[i-1] = tmp; | ||
375 | found = 1; | ||
376 | } | ||
377 | } | ||
378 | } while (found); | ||
379 | mips_frame_info_initialized = 1; | ||
282 | return 0; | 380 | return 0; |
283 | } | 381 | } |
284 | 382 | ||
@@ -303,60 +401,39 @@ unsigned long thread_saved_pc(struct task_struct *tsk) | |||
303 | /* get_wchan - a maintenance nightmare^W^Wpain in the ass ... */ | 401 | /* get_wchan - a maintenance nightmare^W^Wpain in the ass ... */ |
304 | unsigned long get_wchan(struct task_struct *p) | 402 | unsigned long get_wchan(struct task_struct *p) |
305 | { | 403 | { |
404 | unsigned long stack_page; | ||
306 | unsigned long frame, pc; | 405 | unsigned long frame, pc; |
307 | 406 | ||
308 | if (!p || p == current || p->state == TASK_RUNNING) | 407 | if (!p || p == current || p->state == TASK_RUNNING) |
309 | return 0; | 408 | return 0; |
310 | 409 | ||
311 | if (!mips_frame_info_initialized) | 410 | stack_page = (unsigned long)p->thread_info; |
411 | if (!stack_page || !mips_frame_info_initialized) | ||
312 | return 0; | 412 | return 0; |
413 | |||
313 | pc = thread_saved_pc(p); | 414 | pc = thread_saved_pc(p); |
314 | if (!in_sched_functions(pc)) | 415 | if (!in_sched_functions(pc)) |
315 | goto out; | 416 | return pc; |
316 | |||
317 | if (pc >= (unsigned long) sleep_on_timeout) | ||
318 | goto schedule_timeout_caller; | ||
319 | if (pc >= (unsigned long) sleep_on) | ||
320 | goto schedule_caller; | ||
321 | if (pc >= (unsigned long) interruptible_sleep_on_timeout) | ||
322 | goto schedule_timeout_caller; | ||
323 | if (pc >= (unsigned long)interruptible_sleep_on) | ||
324 | goto schedule_caller; | ||
325 | if (pc >= (unsigned long)wait_for_completion) | ||
326 | goto schedule_caller; | ||
327 | goto schedule_timeout_caller; | ||
328 | |||
329 | schedule_caller: | ||
330 | frame = ((unsigned long *)p->thread.reg30)[schedule_frame.frame_offset]; | ||
331 | if (pc >= (unsigned long) sleep_on) | ||
332 | pc = ((unsigned long *)frame)[sleep_on_frame.pc_offset]; | ||
333 | else | ||
334 | pc = ((unsigned long *)frame)[wait_for_completion_frame.pc_offset]; | ||
335 | goto out; | ||
336 | 417 | ||
337 | schedule_timeout_caller: | ||
338 | /* | ||
339 | * The schedule_timeout frame | ||
340 | */ | ||
341 | frame = ((unsigned long *)p->thread.reg30)[schedule_frame.frame_offset]; | 418 | frame = ((unsigned long *)p->thread.reg30)[schedule_frame.frame_offset]; |
419 | do { | ||
420 | int i; | ||
342 | 421 | ||
343 | /* | 422 | if (frame < stack_page || frame > stack_page + THREAD_SIZE - 32) |
344 | * frame now points to sleep_on_timeout's frame | 423 | return 0; |
345 | */ | ||
346 | pc = ((unsigned long *)frame)[schedule_timeout_frame.pc_offset]; | ||
347 | |||
348 | if (in_sched_functions(pc)) { | ||
349 | /* schedule_timeout called by [interruptible_]sleep_on_timeout */ | ||
350 | frame = ((unsigned long *)frame)[schedule_timeout_frame.frame_offset]; | ||
351 | pc = ((unsigned long *)frame)[sleep_on_timeout_frame.pc_offset]; | ||
352 | } | ||
353 | 424 | ||
354 | out: | 425 | for (i = ARRAY_SIZE(mfinfo) - 1; i >= 0; i--) { |
426 | if (pc >= (unsigned long) mfinfo[i].func) | ||
427 | break; | ||
428 | } | ||
429 | if (i < 0) | ||
430 | break; | ||
355 | 431 | ||
356 | #ifdef CONFIG_64BIT | 432 | if (mfinfo[i].omit_fp) |
357 | if (current->thread.mflags & MF_32BIT_REGS) /* Kludge for 32-bit ps */ | 433 | break; |
358 | pc &= 0xffffffffUL; | 434 | pc = ((unsigned long *)frame)[mfinfo[i].pc_offset]; |
359 | #endif | 435 | frame = ((unsigned long *)frame)[mfinfo[i].frame_offset]; |
436 | } while (in_sched_functions(pc)); | ||
360 | 437 | ||
361 | return pc; | 438 | return pc; |
362 | } | 439 | } |
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 0b571a5b4b83..f1b0f3e1f95b 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c | |||
@@ -28,14 +28,18 @@ | |||
28 | #include <linux/security.h> | 28 | #include <linux/security.h> |
29 | #include <linux/signal.h> | 29 | #include <linux/signal.h> |
30 | 30 | ||
31 | #include <asm/byteorder.h> | ||
31 | #include <asm/cpu.h> | 32 | #include <asm/cpu.h> |
33 | #include <asm/dsp.h> | ||
32 | #include <asm/fpu.h> | 34 | #include <asm/fpu.h> |
33 | #include <asm/mipsregs.h> | 35 | #include <asm/mipsregs.h> |
36 | #include <asm/mipsmtregs.h> | ||
34 | #include <asm/pgtable.h> | 37 | #include <asm/pgtable.h> |
35 | #include <asm/page.h> | 38 | #include <asm/page.h> |
36 | #include <asm/system.h> | 39 | #include <asm/system.h> |
37 | #include <asm/uaccess.h> | 40 | #include <asm/uaccess.h> |
38 | #include <asm/bootinfo.h> | 41 | #include <asm/bootinfo.h> |
42 | #include <asm/reg.h> | ||
39 | 43 | ||
40 | /* | 44 | /* |
41 | * Called by kernel/ptrace.c when detaching.. | 45 | * Called by kernel/ptrace.c when detaching.. |
@@ -47,7 +51,130 @@ void ptrace_disable(struct task_struct *child) | |||
47 | /* Nothing to do.. */ | 51 | /* Nothing to do.. */ |
48 | } | 52 | } |
49 | 53 | ||
50 | asmlinkage int sys_ptrace(long request, long pid, long addr, long data) | 54 | /* |
55 | * Read a general register set. We always use the 64-bit format, even | ||
56 | * for 32-bit kernels and for 32-bit processes on a 64-bit kernel. | ||
57 | * Registers are sign extended to fill the available space. | ||
58 | */ | ||
59 | int ptrace_getregs (struct task_struct *child, __s64 __user *data) | ||
60 | { | ||
61 | struct pt_regs *regs; | ||
62 | int i; | ||
63 | |||
64 | if (!access_ok(VERIFY_WRITE, data, 38 * 8)) | ||
65 | return -EIO; | ||
66 | |||
67 | regs = (struct pt_regs *) ((unsigned long) child->thread_info + | ||
68 | THREAD_SIZE - 32 - sizeof(struct pt_regs)); | ||
69 | |||
70 | for (i = 0; i < 32; i++) | ||
71 | __put_user (regs->regs[i], data + i); | ||
72 | __put_user (regs->lo, data + EF_LO - EF_R0); | ||
73 | __put_user (regs->hi, data + EF_HI - EF_R0); | ||
74 | __put_user (regs->cp0_epc, data + EF_CP0_EPC - EF_R0); | ||
75 | __put_user (regs->cp0_badvaddr, data + EF_CP0_BADVADDR - EF_R0); | ||
76 | __put_user (regs->cp0_status, data + EF_CP0_STATUS - EF_R0); | ||
77 | __put_user (regs->cp0_cause, data + EF_CP0_CAUSE - EF_R0); | ||
78 | |||
79 | return 0; | ||
80 | } | ||
81 | |||
82 | /* | ||
83 | * Write a general register set. As for PTRACE_GETREGS, we always use | ||
84 | * the 64-bit format. On a 32-bit kernel only the lower order half | ||
85 | * (according to endianness) will be used. | ||
86 | */ | ||
87 | int ptrace_setregs (struct task_struct *child, __s64 __user *data) | ||
88 | { | ||
89 | struct pt_regs *regs; | ||
90 | int i; | ||
91 | |||
92 | if (!access_ok(VERIFY_READ, data, 38 * 8)) | ||
93 | return -EIO; | ||
94 | |||
95 | regs = (struct pt_regs *) ((unsigned long) child->thread_info + | ||
96 | THREAD_SIZE - 32 - sizeof(struct pt_regs)); | ||
97 | |||
98 | for (i = 0; i < 32; i++) | ||
99 | __get_user (regs->regs[i], data + i); | ||
100 | __get_user (regs->lo, data + EF_LO - EF_R0); | ||
101 | __get_user (regs->hi, data + EF_HI - EF_R0); | ||
102 | __get_user (regs->cp0_epc, data + EF_CP0_EPC - EF_R0); | ||
103 | |||
104 | /* badvaddr, status, and cause may not be written. */ | ||
105 | |||
106 | return 0; | ||
107 | } | ||
108 | |||
109 | int ptrace_getfpregs (struct task_struct *child, __u32 __user *data) | ||
110 | { | ||
111 | int i; | ||
112 | |||
113 | if (!access_ok(VERIFY_WRITE, data, 33 * 8)) | ||
114 | return -EIO; | ||
115 | |||
116 | if (tsk_used_math(child)) { | ||
117 | fpureg_t *fregs = get_fpu_regs(child); | ||
118 | for (i = 0; i < 32; i++) | ||
119 | __put_user (fregs[i], i + (__u64 __user *) data); | ||
120 | } else { | ||
121 | for (i = 0; i < 32; i++) | ||
122 | __put_user ((__u64) -1, i + (__u64 __user *) data); | ||
123 | } | ||
124 | |||
125 | if (cpu_has_fpu) { | ||
126 | unsigned int flags, tmp; | ||
127 | |||
128 | __put_user (child->thread.fpu.hard.fcr31, data + 64); | ||
129 | |||
130 | preempt_disable(); | ||
131 | if (cpu_has_mipsmt) { | ||
132 | unsigned int vpflags = dvpe(); | ||
133 | flags = read_c0_status(); | ||
134 | __enable_fpu(); | ||
135 | __asm__ __volatile__("cfc1\t%0,$0" : "=r" (tmp)); | ||
136 | write_c0_status(flags); | ||
137 | evpe(vpflags); | ||
138 | } else { | ||
139 | flags = read_c0_status(); | ||
140 | __enable_fpu(); | ||
141 | __asm__ __volatile__("cfc1\t%0,$0" : "=r" (tmp)); | ||
142 | write_c0_status(flags); | ||
143 | } | ||
144 | preempt_enable(); | ||
145 | __put_user (tmp, data + 65); | ||
146 | } else { | ||
147 | __put_user (child->thread.fpu.soft.fcr31, data + 64); | ||
148 | __put_user ((__u32) 0, data + 65); | ||
149 | } | ||
150 | |||
151 | return 0; | ||
152 | } | ||
153 | |||
154 | int ptrace_setfpregs (struct task_struct *child, __u32 __user *data) | ||
155 | { | ||
156 | fpureg_t *fregs; | ||
157 | int i; | ||
158 | |||
159 | if (!access_ok(VERIFY_READ, data, 33 * 8)) | ||
160 | return -EIO; | ||
161 | |||
162 | fregs = get_fpu_regs(child); | ||
163 | |||
164 | for (i = 0; i < 32; i++) | ||
165 | __get_user (fregs[i], i + (__u64 __user *) data); | ||
166 | |||
167 | if (cpu_has_fpu) | ||
168 | __get_user (child->thread.fpu.hard.fcr31, data + 64); | ||
169 | else | ||
170 | __get_user (child->thread.fpu.soft.fcr31, data + 64); | ||
171 | |||
172 | /* FIR may not be written. */ | ||
173 | |||
174 | return 0; | ||
175 | } | ||
176 | |||
177 | asmlinkage long sys_ptrace(long request, long pid, long addr, long data) | ||
51 | { | 178 | { |
52 | struct task_struct *child; | 179 | struct task_struct *child; |
53 | int ret; | 180 | int ret; |
@@ -103,7 +230,7 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data) | |||
103 | ret = -EIO; | 230 | ret = -EIO; |
104 | if (copied != sizeof(tmp)) | 231 | if (copied != sizeof(tmp)) |
105 | break; | 232 | break; |
106 | ret = put_user(tmp,(unsigned long *) data); | 233 | ret = put_user(tmp,(unsigned long __user *) data); |
107 | break; | 234 | break; |
108 | } | 235 | } |
109 | 236 | ||
@@ -169,18 +296,53 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data) | |||
169 | if (!cpu_has_fpu) | 296 | if (!cpu_has_fpu) |
170 | break; | 297 | break; |
171 | 298 | ||
172 | flags = read_c0_status(); | 299 | preempt_disable(); |
173 | __enable_fpu(); | 300 | if (cpu_has_mipsmt) { |
174 | __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp)); | 301 | unsigned int vpflags = dvpe(); |
175 | write_c0_status(flags); | 302 | flags = read_c0_status(); |
303 | __enable_fpu(); | ||
304 | __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp)); | ||
305 | write_c0_status(flags); | ||
306 | evpe(vpflags); | ||
307 | } else { | ||
308 | flags = read_c0_status(); | ||
309 | __enable_fpu(); | ||
310 | __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp)); | ||
311 | write_c0_status(flags); | ||
312 | } | ||
313 | preempt_enable(); | ||
314 | break; | ||
315 | } | ||
316 | case DSP_BASE ... DSP_BASE + 5: { | ||
317 | dspreg_t *dregs; | ||
318 | |||
319 | if (!cpu_has_dsp) { | ||
320 | tmp = 0; | ||
321 | ret = -EIO; | ||
322 | goto out_tsk; | ||
323 | } | ||
324 | if (child->thread.dsp.used_dsp) { | ||
325 | dregs = __get_dsp_regs(child); | ||
326 | tmp = (unsigned long) (dregs[addr - DSP_BASE]); | ||
327 | } else { | ||
328 | tmp = -1; /* DSP registers yet used */ | ||
329 | } | ||
176 | break; | 330 | break; |
177 | } | 331 | } |
332 | case DSP_CONTROL: | ||
333 | if (!cpu_has_dsp) { | ||
334 | tmp = 0; | ||
335 | ret = -EIO; | ||
336 | goto out_tsk; | ||
337 | } | ||
338 | tmp = child->thread.dsp.dspcontrol; | ||
339 | break; | ||
178 | default: | 340 | default: |
179 | tmp = 0; | 341 | tmp = 0; |
180 | ret = -EIO; | 342 | ret = -EIO; |
181 | goto out_tsk; | 343 | goto out_tsk; |
182 | } | 344 | } |
183 | ret = put_user(tmp, (unsigned long *) data); | 345 | ret = put_user(tmp, (unsigned long __user *) data); |
184 | break; | 346 | break; |
185 | } | 347 | } |
186 | 348 | ||
@@ -247,6 +409,25 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data) | |||
247 | else | 409 | else |
248 | child->thread.fpu.soft.fcr31 = data; | 410 | child->thread.fpu.soft.fcr31 = data; |
249 | break; | 411 | break; |
412 | case DSP_BASE ... DSP_BASE + 5: { | ||
413 | dspreg_t *dregs; | ||
414 | |||
415 | if (!cpu_has_dsp) { | ||
416 | ret = -EIO; | ||
417 | break; | ||
418 | } | ||
419 | |||
420 | dregs = __get_dsp_regs(child); | ||
421 | dregs[addr - DSP_BASE] = data; | ||
422 | break; | ||
423 | } | ||
424 | case DSP_CONTROL: | ||
425 | if (!cpu_has_dsp) { | ||
426 | ret = -EIO; | ||
427 | break; | ||
428 | } | ||
429 | child->thread.dsp.dspcontrol = data; | ||
430 | break; | ||
250 | default: | 431 | default: |
251 | /* The rest are not allowed. */ | 432 | /* The rest are not allowed. */ |
252 | ret = -EIO; | 433 | ret = -EIO; |
@@ -255,6 +436,22 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data) | |||
255 | break; | 436 | break; |
256 | } | 437 | } |
257 | 438 | ||
439 | case PTRACE_GETREGS: | ||
440 | ret = ptrace_getregs (child, (__u64 __user *) data); | ||
441 | break; | ||
442 | |||
443 | case PTRACE_SETREGS: | ||
444 | ret = ptrace_setregs (child, (__u64 __user *) data); | ||
445 | break; | ||
446 | |||
447 | case PTRACE_GETFPREGS: | ||
448 | ret = ptrace_getfpregs (child, (__u32 __user *) data); | ||
449 | break; | ||
450 | |||
451 | case PTRACE_SETFPREGS: | ||
452 | ret = ptrace_setfpregs (child, (__u32 __user *) data); | ||
453 | break; | ||
454 | |||
258 | case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ | 455 | case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ |
259 | case PTRACE_CONT: { /* restart after signal. */ | 456 | case PTRACE_CONT: { /* restart after signal. */ |
260 | ret = -EIO; | 457 | ret = -EIO; |
@@ -289,6 +486,11 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data) | |||
289 | ret = ptrace_detach(child, data); | 486 | ret = ptrace_detach(child, data); |
290 | break; | 487 | break; |
291 | 488 | ||
489 | case PTRACE_GET_THREAD_AREA: | ||
490 | ret = put_user(child->thread_info->tp_value, | ||
491 | (unsigned long __user *) data); | ||
492 | break; | ||
493 | |||
292 | default: | 494 | default: |
293 | ret = ptrace_request(child, request, addr, data); | 495 | ret = ptrace_request(child, request, addr, data); |
294 | break; | 496 | break; |
@@ -303,21 +505,14 @@ out: | |||
303 | 505 | ||
304 | static inline int audit_arch(void) | 506 | static inline int audit_arch(void) |
305 | { | 507 | { |
306 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | 508 | int arch = EM_MIPS; |
307 | #ifdef CONFIG_64BIT | ||
308 | if (!(current->thread.mflags & MF_32BIT_REGS)) | ||
309 | return AUDIT_ARCH_MIPSEL64; | ||
310 | #endif /* MIPS64 */ | ||
311 | return AUDIT_ARCH_MIPSEL; | ||
312 | |||
313 | #else /* big endian... */ | ||
314 | #ifdef CONFIG_64BIT | 509 | #ifdef CONFIG_64BIT |
315 | if (!(current->thread.mflags & MF_32BIT_REGS)) | 510 | arch |= __AUDIT_ARCH_64BIT; |
316 | return AUDIT_ARCH_MIPS64; | 511 | #endif |
317 | #endif /* MIPS64 */ | 512 | #if defined(__LITTLE_ENDIAN) |
318 | return AUDIT_ARCH_MIPS; | 513 | arch |= __AUDIT_ARCH_LE; |
319 | 514 | #endif | |
320 | #endif /* endian */ | 515 | return arch; |
321 | } | 516 | } |
322 | 517 | ||
323 | /* | 518 | /* |
@@ -327,12 +522,13 @@ static inline int audit_arch(void) | |||
327 | asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit) | 522 | asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit) |
328 | { | 523 | { |
329 | if (unlikely(current->audit_context) && entryexit) | 524 | if (unlikely(current->audit_context) && entryexit) |
330 | audit_syscall_exit(current, AUDITSC_RESULT(regs->regs[2]), regs->regs[2]); | 525 | audit_syscall_exit(current, AUDITSC_RESULT(regs->regs[2]), |
526 | regs->regs[2]); | ||
331 | 527 | ||
332 | if (!test_thread_flag(TIF_SYSCALL_TRACE)) | ||
333 | goto out; | ||
334 | if (!(current->ptrace & PT_PTRACED)) | 528 | if (!(current->ptrace & PT_PTRACED)) |
335 | goto out; | 529 | goto out; |
530 | if (!test_thread_flag(TIF_SYSCALL_TRACE)) | ||
531 | goto out; | ||
336 | 532 | ||
337 | /* The 0x80 provides a way for the tracing parent to distinguish | 533 | /* The 0x80 provides a way for the tracing parent to distinguish |
338 | between a syscall stop and SIGTRAP delivery */ | 534 | between a syscall stop and SIGTRAP delivery */ |
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c index eee207969c21..9a9b04972132 100644 --- a/arch/mips/kernel/ptrace32.c +++ b/arch/mips/kernel/ptrace32.c | |||
@@ -24,17 +24,24 @@ | |||
24 | #include <linux/smp_lock.h> | 24 | #include <linux/smp_lock.h> |
25 | #include <linux/user.h> | 25 | #include <linux/user.h> |
26 | #include <linux/security.h> | 26 | #include <linux/security.h> |
27 | #include <linux/signal.h> | ||
28 | 27 | ||
29 | #include <asm/cpu.h> | 28 | #include <asm/cpu.h> |
29 | #include <asm/dsp.h> | ||
30 | #include <asm/fpu.h> | 30 | #include <asm/fpu.h> |
31 | #include <asm/mipsregs.h> | 31 | #include <asm/mipsregs.h> |
32 | #include <asm/mipsmtregs.h> | ||
32 | #include <asm/pgtable.h> | 33 | #include <asm/pgtable.h> |
33 | #include <asm/page.h> | 34 | #include <asm/page.h> |
34 | #include <asm/system.h> | 35 | #include <asm/system.h> |
35 | #include <asm/uaccess.h> | 36 | #include <asm/uaccess.h> |
36 | #include <asm/bootinfo.h> | 37 | #include <asm/bootinfo.h> |
37 | 38 | ||
39 | int ptrace_getregs (struct task_struct *child, __s64 __user *data); | ||
40 | int ptrace_setregs (struct task_struct *child, __s64 __user *data); | ||
41 | |||
42 | int ptrace_getfpregs (struct task_struct *child, __u32 __user *data); | ||
43 | int ptrace_setfpregs (struct task_struct *child, __u32 __user *data); | ||
44 | |||
38 | /* | 45 | /* |
39 | * Tracing a 32-bit process with a 64-bit strace and vice versa will not | 46 | * Tracing a 32-bit process with a 64-bit strace and vice versa will not |
40 | * work. I don't know how to fix this. | 47 | * work. I don't know how to fix this. |
@@ -99,6 +106,35 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data) | |||
99 | break; | 106 | break; |
100 | } | 107 | } |
101 | 108 | ||
109 | /* | ||
110 | * Read 4 bytes of the other process' storage | ||
111 | * data is a pointer specifying where the user wants the | ||
112 | * 4 bytes copied into | ||
113 | * addr is a pointer in the user's storage that contains an 8 byte | ||
114 | * address in the other process of the 4 bytes that is to be read | ||
115 | * (this is run in a 32-bit process looking at a 64-bit process) | ||
116 | * when I and D space are separate, these will need to be fixed. | ||
117 | */ | ||
118 | case PTRACE_PEEKTEXT_3264: | ||
119 | case PTRACE_PEEKDATA_3264: { | ||
120 | u32 tmp; | ||
121 | int copied; | ||
122 | u32 __user * addrOthers; | ||
123 | |||
124 | ret = -EIO; | ||
125 | |||
126 | /* Get the addr in the other process that we want to read */ | ||
127 | if (get_user(addrOthers, (u32 __user * __user *) (unsigned long) addr) != 0) | ||
128 | break; | ||
129 | |||
130 | copied = access_process_vm(child, (u64)addrOthers, &tmp, | ||
131 | sizeof(tmp), 0); | ||
132 | if (copied != sizeof(tmp)) | ||
133 | break; | ||
134 | ret = put_user(tmp, (u32 __user *) (unsigned long) data); | ||
135 | break; | ||
136 | } | ||
137 | |||
102 | /* Read the word at location addr in the USER area. */ | 138 | /* Read the word at location addr in the USER area. */ |
103 | case PTRACE_PEEKUSR: { | 139 | case PTRACE_PEEKUSR: { |
104 | struct pt_regs *regs; | 140 | struct pt_regs *regs; |
@@ -156,12 +192,44 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data) | |||
156 | if (!cpu_has_fpu) | 192 | if (!cpu_has_fpu) |
157 | break; | 193 | break; |
158 | 194 | ||
159 | flags = read_c0_status(); | 195 | preempt_disable(); |
160 | __enable_fpu(); | 196 | if (cpu_has_mipsmt) { |
161 | __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp)); | 197 | unsigned int vpflags = dvpe(); |
162 | write_c0_status(flags); | 198 | flags = read_c0_status(); |
199 | __enable_fpu(); | ||
200 | __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp)); | ||
201 | write_c0_status(flags); | ||
202 | evpe(vpflags); | ||
203 | } else { | ||
204 | flags = read_c0_status(); | ||
205 | __enable_fpu(); | ||
206 | __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp)); | ||
207 | write_c0_status(flags); | ||
208 | } | ||
209 | preempt_enable(); | ||
163 | break; | 210 | break; |
164 | } | 211 | } |
212 | case DSP_BASE ... DSP_BASE + 5: | ||
213 | if (!cpu_has_dsp) { | ||
214 | tmp = 0; | ||
215 | ret = -EIO; | ||
216 | goto out_tsk; | ||
217 | } | ||
218 | if (child->thread.dsp.used_dsp) { | ||
219 | dspreg_t *dregs = __get_dsp_regs(child); | ||
220 | tmp = (unsigned long) (dregs[addr - DSP_BASE]); | ||
221 | } else { | ||
222 | tmp = -1; /* DSP registers yet used */ | ||
223 | } | ||
224 | break; | ||
225 | case DSP_CONTROL: | ||
226 | if (!cpu_has_dsp) { | ||
227 | tmp = 0; | ||
228 | ret = -EIO; | ||
229 | goto out_tsk; | ||
230 | } | ||
231 | tmp = child->thread.dsp.dspcontrol; | ||
232 | break; | ||
165 | default: | 233 | default: |
166 | tmp = 0; | 234 | tmp = 0; |
167 | ret = -EIO; | 235 | ret = -EIO; |
@@ -181,6 +249,31 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data) | |||
181 | ret = -EIO; | 249 | ret = -EIO; |
182 | break; | 250 | break; |
183 | 251 | ||
252 | /* | ||
253 | * Write 4 bytes into the other process' storage | ||
254 | * data is the 4 bytes that the user wants written | ||
255 | * addr is a pointer in the user's storage that contains an | ||
256 | * 8 byte address in the other process where the 4 bytes | ||
257 | * that is to be written | ||
258 | * (this is run in a 32-bit process looking at a 64-bit process) | ||
259 | * when I and D space are separate, these will need to be fixed. | ||
260 | */ | ||
261 | case PTRACE_POKETEXT_3264: | ||
262 | case PTRACE_POKEDATA_3264: { | ||
263 | u32 __user * addrOthers; | ||
264 | |||
265 | /* Get the addr in the other process that we want to write into */ | ||
266 | ret = -EIO; | ||
267 | if (get_user(addrOthers, (u32 __user * __user *) (unsigned long) addr) != 0) | ||
268 | break; | ||
269 | ret = 0; | ||
270 | if (access_process_vm(child, (u64)addrOthers, &data, | ||
271 | sizeof(data), 1) == sizeof(data)) | ||
272 | break; | ||
273 | ret = -EIO; | ||
274 | break; | ||
275 | } | ||
276 | |||
184 | case PTRACE_POKEUSR: { | 277 | case PTRACE_POKEUSR: { |
185 | struct pt_regs *regs; | 278 | struct pt_regs *regs; |
186 | ret = 0; | 279 | ret = 0; |
@@ -231,6 +324,22 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data) | |||
231 | else | 324 | else |
232 | child->thread.fpu.soft.fcr31 = data; | 325 | child->thread.fpu.soft.fcr31 = data; |
233 | break; | 326 | break; |
327 | case DSP_BASE ... DSP_BASE + 5: | ||
328 | if (!cpu_has_dsp) { | ||
329 | ret = -EIO; | ||
330 | break; | ||
331 | } | ||
332 | |||
333 | dspreg_t *dregs = __get_dsp_regs(child); | ||
334 | dregs[addr - DSP_BASE] = data; | ||
335 | break; | ||
336 | case DSP_CONTROL: | ||
337 | if (!cpu_has_dsp) { | ||
338 | ret = -EIO; | ||
339 | break; | ||
340 | } | ||
341 | child->thread.dsp.dspcontrol = data; | ||
342 | break; | ||
234 | default: | 343 | default: |
235 | /* The rest are not allowed. */ | 344 | /* The rest are not allowed. */ |
236 | ret = -EIO; | 345 | ret = -EIO; |
@@ -239,6 +348,22 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data) | |||
239 | break; | 348 | break; |
240 | } | 349 | } |
241 | 350 | ||
351 | case PTRACE_GETREGS: | ||
352 | ret = ptrace_getregs (child, (__u64 __user *) (__u64) data); | ||
353 | break; | ||
354 | |||
355 | case PTRACE_SETREGS: | ||
356 | ret = ptrace_setregs (child, (__u64 __user *) (__u64) data); | ||
357 | break; | ||
358 | |||
359 | case PTRACE_GETFPREGS: | ||
360 | ret = ptrace_getfpregs (child, (__u32 __user *) (__u64) data); | ||
361 | break; | ||
362 | |||
363 | case PTRACE_SETFPREGS: | ||
364 | ret = ptrace_setfpregs (child, (__u32 __user *) (__u64) data); | ||
365 | break; | ||
366 | |||
242 | case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ | 367 | case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ |
243 | case PTRACE_CONT: { /* restart after signal. */ | 368 | case PTRACE_CONT: { /* restart after signal. */ |
244 | ret = -EIO; | 369 | ret = -EIO; |
@@ -269,10 +394,25 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data) | |||
269 | wake_up_process(child); | 394 | wake_up_process(child); |
270 | break; | 395 | break; |
271 | 396 | ||
397 | case PTRACE_GET_THREAD_AREA: | ||
398 | ret = put_user(child->thread_info->tp_value, | ||
399 | (unsigned int __user *) (unsigned long) data); | ||
400 | break; | ||
401 | |||
272 | case PTRACE_DETACH: /* detach a process that was attached. */ | 402 | case PTRACE_DETACH: /* detach a process that was attached. */ |
273 | ret = ptrace_detach(child, data); | 403 | ret = ptrace_detach(child, data); |
274 | break; | 404 | break; |
275 | 405 | ||
406 | case PTRACE_GETEVENTMSG: | ||
407 | ret = put_user(child->ptrace_message, | ||
408 | (unsigned int __user *) (unsigned long) data); | ||
409 | break; | ||
410 | |||
411 | case PTRACE_GET_THREAD_AREA_3264: | ||
412 | ret = put_user(child->thread_info->tp_value, | ||
413 | (unsigned long __user *) (unsigned long) data); | ||
414 | break; | ||
415 | |||
276 | default: | 416 | default: |
277 | ret = ptrace_request(child, request, addr, data); | 417 | ret = ptrace_request(child, request, addr, data); |
278 | break; | 418 | break; |
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S index 1a14c6b18829..283a98508fc8 100644 --- a/arch/mips/kernel/r4k_fpu.S +++ b/arch/mips/kernel/r4k_fpu.S | |||
@@ -32,7 +32,7 @@ | |||
32 | 32 | ||
33 | .set noreorder | 33 | .set noreorder |
34 | .set mips3 | 34 | .set mips3 |
35 | /* Save floating point context */ | 35 | |
36 | LEAF(_save_fp_context) | 36 | LEAF(_save_fp_context) |
37 | cfc1 t1, fcr31 | 37 | cfc1 t1, fcr31 |
38 | 38 | ||
@@ -74,9 +74,6 @@ LEAF(_save_fp_context) | |||
74 | EX sdc1 $f28, SC_FPREGS+224(a0) | 74 | EX sdc1 $f28, SC_FPREGS+224(a0) |
75 | EX sdc1 $f30, SC_FPREGS+240(a0) | 75 | EX sdc1 $f30, SC_FPREGS+240(a0) |
76 | EX sw t1, SC_FPC_CSR(a0) | 76 | EX sw t1, SC_FPC_CSR(a0) |
77 | cfc1 t0, $0 # implementation/version | ||
78 | EX sw t0, SC_FPC_EIR(a0) | ||
79 | |||
80 | jr ra | 77 | jr ra |
81 | li v0, 0 # success | 78 | li v0, 0 # success |
82 | END(_save_fp_context) | 79 | END(_save_fp_context) |
diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c new file mode 100644 index 000000000000..8c81f3cb4e2d --- /dev/null +++ b/arch/mips/kernel/rtlx.c | |||
@@ -0,0 +1,341 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can distribute it and/or modify it | ||
5 | * under the terms of the GNU General Public License (Version 2) as | ||
6 | * published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
11 | * for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License along | ||
14 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
15 | * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. | ||
16 | * | ||
17 | */ | ||
18 | |||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/module.h> | ||
21 | #include <linux/fs.h> | ||
22 | #include <linux/init.h> | ||
23 | #include <asm/uaccess.h> | ||
24 | #include <linux/slab.h> | ||
25 | #include <linux/list.h> | ||
26 | #include <linux/vmalloc.h> | ||
27 | #include <linux/elf.h> | ||
28 | #include <linux/seq_file.h> | ||
29 | #include <linux/syscalls.h> | ||
30 | #include <linux/moduleloader.h> | ||
31 | #include <linux/interrupt.h> | ||
32 | #include <linux/poll.h> | ||
33 | #include <linux/sched.h> | ||
34 | #include <linux/wait.h> | ||
35 | #include <asm/mipsmtregs.h> | ||
36 | #include <asm/cacheflush.h> | ||
37 | #include <asm/atomic.h> | ||
38 | #include <asm/cpu.h> | ||
39 | #include <asm/processor.h> | ||
40 | #include <asm/system.h> | ||
41 | #include <asm/rtlx.h> | ||
42 | |||
43 | #define RTLX_MAJOR 64 | ||
44 | #define RTLX_TARG_VPE 1 | ||
45 | |||
46 | struct rtlx_info *rtlx; | ||
47 | static int major; | ||
48 | static char module_name[] = "rtlx"; | ||
49 | static inline int spacefree(int read, int write, int size); | ||
50 | |||
51 | static struct chan_waitqueues { | ||
52 | wait_queue_head_t rt_queue; | ||
53 | wait_queue_head_t lx_queue; | ||
54 | } channel_wqs[RTLX_CHANNELS]; | ||
55 | |||
56 | static struct irqaction irq; | ||
57 | static int irq_num; | ||
58 | |||
59 | extern void *vpe_get_shared(int index); | ||
60 | |||
61 | static void rtlx_dispatch(struct pt_regs *regs) | ||
62 | { | ||
63 | do_IRQ(MIPSCPU_INT_BASE + MIPS_CPU_RTLX_IRQ, regs); | ||
64 | } | ||
65 | |||
66 | irqreturn_t rtlx_interrupt(int irq, void *dev_id, struct pt_regs *regs) | ||
67 | { | ||
68 | irqreturn_t r = IRQ_HANDLED; | ||
69 | int i; | ||
70 | |||
71 | for (i = 0; i < RTLX_CHANNELS; i++) { | ||
72 | struct rtlx_channel *chan = &rtlx->channel[i]; | ||
73 | |||
74 | if (chan->lx_read != chan->lx_write) | ||
75 | wake_up_interruptible(&channel_wqs[i].lx_queue); | ||
76 | } | ||
77 | |||
78 | return r; | ||
79 | } | ||
80 | |||
81 | void dump_rtlx(void) | ||
82 | { | ||
83 | int i; | ||
84 | |||
85 | printk("id 0x%lx state %d\n", rtlx->id, rtlx->state); | ||
86 | |||
87 | for (i = 0; i < RTLX_CHANNELS; i++) { | ||
88 | struct rtlx_channel *chan = &rtlx->channel[i]; | ||
89 | |||
90 | printk(" rt_state %d lx_state %d buffer_size %d\n", | ||
91 | chan->rt_state, chan->lx_state, chan->buffer_size); | ||
92 | |||
93 | printk(" rt_read %d rt_write %d\n", | ||
94 | chan->rt_read, chan->rt_write); | ||
95 | |||
96 | printk(" lx_read %d lx_write %d\n", | ||
97 | chan->lx_read, chan->lx_write); | ||
98 | |||
99 | printk(" rt_buffer <%s>\n", chan->rt_buffer); | ||
100 | printk(" lx_buffer <%s>\n", chan->lx_buffer); | ||
101 | } | ||
102 | } | ||
103 | |||
104 | /* call when we have the address of the shared structure from the SP side. */ | ||
105 | static int rtlx_init(struct rtlx_info *rtlxi) | ||
106 | { | ||
107 | int i; | ||
108 | |||
109 | if (rtlxi->id != RTLX_ID) { | ||
110 | printk(KERN_WARNING "no valid RTLX id at 0x%p\n", rtlxi); | ||
111 | return (-ENOEXEC); | ||
112 | } | ||
113 | |||
114 | /* initialise the wait queues */ | ||
115 | for (i = 0; i < RTLX_CHANNELS; i++) { | ||
116 | init_waitqueue_head(&channel_wqs[i].rt_queue); | ||
117 | init_waitqueue_head(&channel_wqs[i].lx_queue); | ||
118 | } | ||
119 | |||
120 | /* set up for interrupt handling */ | ||
121 | memset(&irq, 0, sizeof(struct irqaction)); | ||
122 | |||
123 | if (cpu_has_vint) { | ||
124 | set_vi_handler(MIPS_CPU_RTLX_IRQ, rtlx_dispatch); | ||
125 | } | ||
126 | |||
127 | irq_num = MIPSCPU_INT_BASE + MIPS_CPU_RTLX_IRQ; | ||
128 | irq.handler = rtlx_interrupt; | ||
129 | irq.flags = SA_INTERRUPT; | ||
130 | irq.name = "RTLX"; | ||
131 | irq.dev_id = rtlx; | ||
132 | setup_irq(irq_num, &irq); | ||
133 | |||
134 | rtlx = rtlxi; | ||
135 | return (0); | ||
136 | } | ||
137 | |||
138 | /* only allow one open process at a time to open each channel */ | ||
139 | static int rtlx_open(struct inode *inode, struct file *filp) | ||
140 | { | ||
141 | int minor, ret; | ||
142 | struct rtlx_channel *chan; | ||
143 | |||
144 | /* assume only 1 device at the mo. */ | ||
145 | minor = MINOR(inode->i_rdev); | ||
146 | |||
147 | if (rtlx == NULL) { | ||
148 | struct rtlx_info **p; | ||
149 | if( (p = vpe_get_shared(RTLX_TARG_VPE)) == NULL) { | ||
150 | printk(" vpe_get_shared is NULL. Has an SP program been loaded?\n"); | ||
151 | return (-EFAULT); | ||
152 | } | ||
153 | |||
154 | if (*p == NULL) { | ||
155 | printk(" vpe_shared %p %p\n", p, *p); | ||
156 | return (-EFAULT); | ||
157 | } | ||
158 | |||
159 | if ((ret = rtlx_init(*p)) < 0) | ||
160 | return (ret); | ||
161 | } | ||
162 | |||
163 | chan = &rtlx->channel[minor]; | ||
164 | |||
165 | /* already open? */ | ||
166 | if (chan->lx_state == RTLX_STATE_OPENED) | ||
167 | return (-EBUSY); | ||
168 | |||
169 | chan->lx_state = RTLX_STATE_OPENED; | ||
170 | return (0); | ||
171 | } | ||
172 | |||
173 | static int rtlx_release(struct inode *inode, struct file *filp) | ||
174 | { | ||
175 | int minor; | ||
176 | |||
177 | minor = MINOR(inode->i_rdev); | ||
178 | rtlx->channel[minor].lx_state = RTLX_STATE_UNUSED; | ||
179 | return (0); | ||
180 | } | ||
181 | |||
182 | static unsigned int rtlx_poll(struct file *file, poll_table * wait) | ||
183 | { | ||
184 | int minor; | ||
185 | unsigned int mask = 0; | ||
186 | struct rtlx_channel *chan; | ||
187 | |||
188 | minor = MINOR(file->f_dentry->d_inode->i_rdev); | ||
189 | chan = &rtlx->channel[minor]; | ||
190 | |||
191 | poll_wait(file, &channel_wqs[minor].rt_queue, wait); | ||
192 | poll_wait(file, &channel_wqs[minor].lx_queue, wait); | ||
193 | |||
194 | /* data available to read? */ | ||
195 | if (chan->lx_read != chan->lx_write) | ||
196 | mask |= POLLIN | POLLRDNORM; | ||
197 | |||
198 | /* space to write */ | ||
199 | if (spacefree(chan->rt_read, chan->rt_write, chan->buffer_size)) | ||
200 | mask |= POLLOUT | POLLWRNORM; | ||
201 | |||
202 | return (mask); | ||
203 | } | ||
204 | |||
205 | static ssize_t rtlx_read(struct file *file, char __user * buffer, size_t count, | ||
206 | loff_t * ppos) | ||
207 | { | ||
208 | size_t fl = 0L; | ||
209 | int minor; | ||
210 | struct rtlx_channel *lx; | ||
211 | DECLARE_WAITQUEUE(wait, current); | ||
212 | |||
213 | minor = MINOR(file->f_dentry->d_inode->i_rdev); | ||
214 | lx = &rtlx->channel[minor]; | ||
215 | |||
216 | /* data available? */ | ||
217 | if (lx->lx_write == lx->lx_read) { | ||
218 | if (file->f_flags & O_NONBLOCK) | ||
219 | return (0); // -EAGAIN makes cat whinge | ||
220 | |||
221 | /* go to sleep */ | ||
222 | add_wait_queue(&channel_wqs[minor].lx_queue, &wait); | ||
223 | set_current_state(TASK_INTERRUPTIBLE); | ||
224 | |||
225 | while (lx->lx_write == lx->lx_read) | ||
226 | schedule(); | ||
227 | |||
228 | set_current_state(TASK_RUNNING); | ||
229 | remove_wait_queue(&channel_wqs[minor].lx_queue, &wait); | ||
230 | |||
231 | /* back running */ | ||
232 | } | ||
233 | |||
234 | /* find out how much in total */ | ||
235 | count = min( count, | ||
236 | (size_t)(lx->lx_write + lx->buffer_size - lx->lx_read) % lx->buffer_size); | ||
237 | |||
238 | /* then how much from the read pointer onwards */ | ||
239 | fl = min( count, (size_t)lx->buffer_size - lx->lx_read); | ||
240 | |||
241 | copy_to_user (buffer, &lx->lx_buffer[lx->lx_read], fl); | ||
242 | |||
243 | /* and if there is anything left at the beginning of the buffer */ | ||
244 | if ( count - fl ) | ||
245 | copy_to_user (buffer + fl, lx->lx_buffer, count - fl); | ||
246 | |||
247 | /* update the index */ | ||
248 | lx->lx_read += count; | ||
249 | lx->lx_read %= lx->buffer_size; | ||
250 | |||
251 | return (count); | ||
252 | } | ||
253 | |||
254 | static inline int spacefree(int read, int write, int size) | ||
255 | { | ||
256 | if (read == write) { | ||
257 | /* never fill the buffer completely, so indexes are always equal if empty | ||
258 | and only empty, or !equal if data available */ | ||
259 | return (size - 1); | ||
260 | } | ||
261 | |||
262 | return ((read + size - write) % size) - 1; | ||
263 | } | ||
264 | |||
265 | static ssize_t rtlx_write(struct file *file, const char __user * buffer, | ||
266 | size_t count, loff_t * ppos) | ||
267 | { | ||
268 | int minor; | ||
269 | struct rtlx_channel *rt; | ||
270 | size_t fl; | ||
271 | DECLARE_WAITQUEUE(wait, current); | ||
272 | |||
273 | minor = MINOR(file->f_dentry->d_inode->i_rdev); | ||
274 | rt = &rtlx->channel[minor]; | ||
275 | |||
276 | /* any space left... */ | ||
277 | if (!spacefree(rt->rt_read, rt->rt_write, rt->buffer_size)) { | ||
278 | |||
279 | if (file->f_flags & O_NONBLOCK) | ||
280 | return (-EAGAIN); | ||
281 | |||
282 | add_wait_queue(&channel_wqs[minor].rt_queue, &wait); | ||
283 | set_current_state(TASK_INTERRUPTIBLE); | ||
284 | |||
285 | while (!spacefree(rt->rt_read, rt->rt_write, rt->buffer_size)) | ||
286 | schedule(); | ||
287 | |||
288 | set_current_state(TASK_RUNNING); | ||
289 | remove_wait_queue(&channel_wqs[minor].rt_queue, &wait); | ||
290 | } | ||
291 | |||
292 | /* total number of bytes to copy */ | ||
293 | count = min( count, (size_t)spacefree(rt->rt_read, rt->rt_write, rt->buffer_size) ); | ||
294 | |||
295 | /* first bit from write pointer to the end of the buffer, or count */ | ||
296 | fl = min(count, (size_t) rt->buffer_size - rt->rt_write); | ||
297 | |||
298 | copy_from_user(&rt->rt_buffer[rt->rt_write], buffer, fl); | ||
299 | |||
300 | /* if there's any left copy to the beginning of the buffer */ | ||
301 | if( count - fl ) | ||
302 | copy_from_user(rt->rt_buffer, buffer + fl, count - fl); | ||
303 | |||
304 | rt->rt_write += count; | ||
305 | rt->rt_write %= rt->buffer_size; | ||
306 | |||
307 | return(count); | ||
308 | } | ||
309 | |||
310 | static struct file_operations rtlx_fops = { | ||
311 | .owner = THIS_MODULE, | ||
312 | .open = rtlx_open, | ||
313 | .release = rtlx_release, | ||
314 | .write = rtlx_write, | ||
315 | .read = rtlx_read, | ||
316 | .poll = rtlx_poll | ||
317 | }; | ||
318 | |||
319 | static int rtlx_module_init(void) | ||
320 | { | ||
321 | if ((major = register_chrdev(RTLX_MAJOR, module_name, &rtlx_fops)) < 0) { | ||
322 | printk("rtlx_module_init: unable to register device\n"); | ||
323 | return (-EBUSY); | ||
324 | } | ||
325 | |||
326 | if (major == 0) | ||
327 | major = RTLX_MAJOR; | ||
328 | |||
329 | return (0); | ||
330 | } | ||
331 | |||
332 | static void rtlx_module_exit(void) | ||
333 | { | ||
334 | unregister_chrdev(major, module_name); | ||
335 | } | ||
336 | |||
337 | module_init(rtlx_module_init); | ||
338 | module_exit(rtlx_module_exit); | ||
339 | MODULE_DESCRIPTION("MIPS RTLX"); | ||
340 | MODULE_AUTHOR("Elizabeth Clarke, MIPS Technologies, Inc"); | ||
341 | MODULE_LICENSE("GPL"); | ||
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index 17b5030fb6ea..4dd8e8b4fbc2 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S | |||
@@ -578,7 +578,7 @@ einval: li v0, -EINVAL | |||
578 | sys sys_fremovexattr 2 /* 4235 */ | 578 | sys sys_fremovexattr 2 /* 4235 */ |
579 | sys sys_tkill 2 | 579 | sys sys_tkill 2 |
580 | sys sys_sendfile64 5 | 580 | sys sys_sendfile64 5 |
581 | sys sys_futex 2 | 581 | sys sys_futex 6 |
582 | sys sys_sched_setaffinity 3 | 582 | sys sys_sched_setaffinity 3 |
583 | sys sys_sched_getaffinity 3 /* 4240 */ | 583 | sys sys_sched_getaffinity 3 /* 4240 */ |
584 | sys sys_io_setup 2 | 584 | sys sys_io_setup 2 |
@@ -587,7 +587,7 @@ einval: li v0, -EINVAL | |||
587 | sys sys_io_submit 3 | 587 | sys sys_io_submit 3 |
588 | sys sys_io_cancel 3 /* 4245 */ | 588 | sys sys_io_cancel 3 /* 4245 */ |
589 | sys sys_exit_group 1 | 589 | sys sys_exit_group 1 |
590 | sys sys_lookup_dcookie 3 | 590 | sys sys_lookup_dcookie 4 |
591 | sys sys_epoll_create 1 | 591 | sys sys_epoll_create 1 |
592 | sys sys_epoll_ctl 4 | 592 | sys sys_epoll_ctl 4 |
593 | sys sys_epoll_wait 3 /* 4250 */ | 593 | sys sys_epoll_wait 3 /* 4250 */ |
@@ -618,12 +618,15 @@ einval: li v0, -EINVAL | |||
618 | sys sys_mq_notify 2 /* 4275 */ | 618 | sys sys_mq_notify 2 /* 4275 */ |
619 | sys sys_mq_getsetattr 3 | 619 | sys sys_mq_getsetattr 3 |
620 | sys sys_ni_syscall 0 /* sys_vserver */ | 620 | sys sys_ni_syscall 0 /* sys_vserver */ |
621 | sys sys_waitid 4 | 621 | sys sys_waitid 5 |
622 | sys sys_ni_syscall 0 /* available, was setaltroot */ | 622 | sys sys_ni_syscall 0 /* available, was setaltroot */ |
623 | sys sys_add_key 5 | 623 | sys sys_add_key 5 /* 4280 */ |
624 | sys sys_request_key 4 | 624 | sys sys_request_key 4 |
625 | sys sys_keyctl 5 | 625 | sys sys_keyctl 5 |
626 | 626 | sys sys_set_thread_area 1 | |
627 | sys sys_inotify_init 0 | ||
628 | sys sys_inotify_add_watch 3 /* 4285 */ | ||
629 | sys sys_inotify_rm_watch 2 | ||
627 | .endm | 630 | .endm |
628 | 631 | ||
629 | /* We pre-compute the number of _instruction_ bytes needed to | 632 | /* We pre-compute the number of _instruction_ bytes needed to |
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index ffb22a2068bf..9085838d6ce3 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S | |||
@@ -449,3 +449,7 @@ sys_call_table: | |||
449 | PTR sys_add_key | 449 | PTR sys_add_key |
450 | PTR sys_request_key /* 5240 */ | 450 | PTR sys_request_key /* 5240 */ |
451 | PTR sys_keyctl | 451 | PTR sys_keyctl |
452 | PTR sys_set_thread_area | ||
453 | PTR sys_inotify_init | ||
454 | PTR sys_inotify_add_watch | ||
455 | PTR sys_inotify_rm_watch /* 5245 */ | ||
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index e52049c87bc3..7e66eb823bf6 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S | |||
@@ -176,7 +176,7 @@ EXPORT(sysn32_call_table) | |||
176 | PTR sys_fork | 176 | PTR sys_fork |
177 | PTR sys32_execve | 177 | PTR sys32_execve |
178 | PTR sys_exit | 178 | PTR sys_exit |
179 | PTR sys32_wait4 | 179 | PTR compat_sys_wait4 |
180 | PTR sys_kill /* 6060 */ | 180 | PTR sys_kill /* 6060 */ |
181 | PTR sys32_newuname | 181 | PTR sys32_newuname |
182 | PTR sys_semget | 182 | PTR sys_semget |
@@ -216,7 +216,7 @@ EXPORT(sysn32_call_table) | |||
216 | PTR compat_sys_getrusage | 216 | PTR compat_sys_getrusage |
217 | PTR sys32_sysinfo | 217 | PTR sys32_sysinfo |
218 | PTR compat_sys_times | 218 | PTR compat_sys_times |
219 | PTR sys_ptrace | 219 | PTR sys32_ptrace |
220 | PTR sys_getuid /* 6100 */ | 220 | PTR sys_getuid /* 6100 */ |
221 | PTR sys_syslog | 221 | PTR sys_syslog |
222 | PTR sys_getgid | 222 | PTR sys_getgid |
@@ -243,14 +243,14 @@ EXPORT(sysn32_call_table) | |||
243 | PTR sys_capget | 243 | PTR sys_capget |
244 | PTR sys_capset | 244 | PTR sys_capset |
245 | PTR sys32_rt_sigpending /* 6125 */ | 245 | PTR sys32_rt_sigpending /* 6125 */ |
246 | PTR compat_sys_rt_sigtimedwait | 246 | PTR sysn32_rt_sigtimedwait |
247 | PTR sys32_rt_sigqueueinfo | 247 | PTR sys_rt_sigqueueinfo |
248 | PTR sys32_rt_sigsuspend | 248 | PTR sys32_rt_sigsuspend |
249 | PTR sys32_sigaltstack | 249 | PTR sys32_sigaltstack |
250 | PTR compat_sys_utime /* 6130 */ | 250 | PTR compat_sys_utime /* 6130 */ |
251 | PTR sys_mknod | 251 | PTR sys_mknod |
252 | PTR sys32_personality | 252 | PTR sys32_personality |
253 | PTR sys_ustat | 253 | PTR sys32_ustat |
254 | PTR compat_sys_statfs | 254 | PTR compat_sys_statfs |
255 | PTR compat_sys_fstatfs /* 6135 */ | 255 | PTR compat_sys_fstatfs /* 6135 */ |
256 | PTR sys_sysfs | 256 | PTR sys_sysfs |
@@ -329,7 +329,7 @@ EXPORT(sysn32_call_table) | |||
329 | PTR sys_epoll_wait | 329 | PTR sys_epoll_wait |
330 | PTR sys_remap_file_pages /* 6210 */ | 330 | PTR sys_remap_file_pages /* 6210 */ |
331 | PTR sysn32_rt_sigreturn | 331 | PTR sysn32_rt_sigreturn |
332 | PTR sys_fcntl | 332 | PTR compat_sys_fcntl64 |
333 | PTR sys_set_tid_address | 333 | PTR sys_set_tid_address |
334 | PTR sys_restart_syscall | 334 | PTR sys_restart_syscall |
335 | PTR sys_semtimedop /* 6215 */ | 335 | PTR sys_semtimedop /* 6215 */ |
@@ -337,15 +337,15 @@ EXPORT(sysn32_call_table) | |||
337 | PTR compat_sys_statfs64 | 337 | PTR compat_sys_statfs64 |
338 | PTR compat_sys_fstatfs64 | 338 | PTR compat_sys_fstatfs64 |
339 | PTR sys_sendfile64 | 339 | PTR sys_sendfile64 |
340 | PTR sys_timer_create /* 6220 */ | 340 | PTR sys32_timer_create /* 6220 */ |
341 | PTR sys_timer_settime | 341 | PTR compat_sys_timer_settime |
342 | PTR sys_timer_gettime | 342 | PTR compat_sys_timer_gettime |
343 | PTR sys_timer_getoverrun | 343 | PTR sys_timer_getoverrun |
344 | PTR sys_timer_delete | 344 | PTR sys_timer_delete |
345 | PTR sys_clock_settime /* 6225 */ | 345 | PTR compat_sys_clock_settime /* 6225 */ |
346 | PTR sys_clock_gettime | 346 | PTR compat_sys_clock_gettime |
347 | PTR sys_clock_getres | 347 | PTR compat_sys_clock_getres |
348 | PTR sys_clock_nanosleep | 348 | PTR compat_sys_clock_nanosleep |
349 | PTR sys_tgkill | 349 | PTR sys_tgkill |
350 | PTR compat_sys_utimes /* 6230 */ | 350 | PTR compat_sys_utimes /* 6230 */ |
351 | PTR sys_ni_syscall /* sys_mbind */ | 351 | PTR sys_ni_syscall /* sys_mbind */ |
@@ -358,8 +358,12 @@ EXPORT(sysn32_call_table) | |||
358 | PTR compat_sys_mq_notify | 358 | PTR compat_sys_mq_notify |
359 | PTR compat_sys_mq_getsetattr | 359 | PTR compat_sys_mq_getsetattr |
360 | PTR sys_ni_syscall /* 6240, sys_vserver */ | 360 | PTR sys_ni_syscall /* 6240, sys_vserver */ |
361 | PTR sys_waitid | 361 | PTR sysn32_waitid |
362 | PTR sys_ni_syscall /* available, was setaltroot */ | 362 | PTR sys_ni_syscall /* available, was setaltroot */ |
363 | PTR sys_add_key | 363 | PTR sys_add_key |
364 | PTR sys_request_key | 364 | PTR sys_request_key |
365 | PTR sys_keyctl /* 6245 */ | 365 | PTR sys_keyctl /* 6245 */ |
366 | PTR sys_set_thread_area | ||
367 | PTR sys_inotify_init | ||
368 | PTR sys_inotify_add_watch | ||
369 | PTR sys_inotify_rm_watch | ||
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 739f3998d76b..5a16401e443a 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S | |||
@@ -316,13 +316,13 @@ sys_call_table: | |||
316 | PTR sys_vhangup | 316 | PTR sys_vhangup |
317 | PTR sys_ni_syscall /* was sys_idle */ | 317 | PTR sys_ni_syscall /* was sys_idle */ |
318 | PTR sys_ni_syscall /* sys_vm86 */ | 318 | PTR sys_ni_syscall /* sys_vm86 */ |
319 | PTR sys32_wait4 | 319 | PTR compat_sys_wait4 |
320 | PTR sys_swapoff /* 4115 */ | 320 | PTR sys_swapoff /* 4115 */ |
321 | PTR sys32_sysinfo | 321 | PTR sys32_sysinfo |
322 | PTR sys32_ipc | 322 | PTR sys32_ipc |
323 | PTR sys_fsync | 323 | PTR sys_fsync |
324 | PTR sys32_sigreturn | 324 | PTR sys32_sigreturn |
325 | PTR sys_clone /* 4120 */ | 325 | PTR sys32_clone /* 4120 */ |
326 | PTR sys_setdomainname | 326 | PTR sys_setdomainname |
327 | PTR sys32_newuname | 327 | PTR sys32_newuname |
328 | PTR sys_ni_syscall /* sys_modify_ldt */ | 328 | PTR sys_ni_syscall /* sys_modify_ldt */ |
@@ -391,7 +391,7 @@ sys_call_table: | |||
391 | PTR sys_getresuid | 391 | PTR sys_getresuid |
392 | PTR sys_ni_syscall /* was query_module */ | 392 | PTR sys_ni_syscall /* was query_module */ |
393 | PTR sys_poll | 393 | PTR sys_poll |
394 | PTR sys_nfsservctl | 394 | PTR compat_sys_nfsservctl |
395 | PTR sys_setresgid /* 4190 */ | 395 | PTR sys_setresgid /* 4190 */ |
396 | PTR sys_getresgid | 396 | PTR sys_getresgid |
397 | PTR sys_prctl | 397 | PTR sys_prctl |
@@ -459,7 +459,7 @@ sys_call_table: | |||
459 | PTR sys_fadvise64_64 | 459 | PTR sys_fadvise64_64 |
460 | PTR compat_sys_statfs64 /* 4255 */ | 460 | PTR compat_sys_statfs64 /* 4255 */ |
461 | PTR compat_sys_fstatfs64 | 461 | PTR compat_sys_fstatfs64 |
462 | PTR sys_timer_create | 462 | PTR sys32_timer_create |
463 | PTR compat_sys_timer_settime | 463 | PTR compat_sys_timer_settime |
464 | PTR compat_sys_timer_gettime | 464 | PTR compat_sys_timer_gettime |
465 | PTR sys_timer_getoverrun /* 4260 */ | 465 | PTR sys_timer_getoverrun /* 4260 */ |
@@ -480,9 +480,13 @@ sys_call_table: | |||
480 | PTR compat_sys_mq_notify /* 4275 */ | 480 | PTR compat_sys_mq_notify /* 4275 */ |
481 | PTR compat_sys_mq_getsetattr | 481 | PTR compat_sys_mq_getsetattr |
482 | PTR sys_ni_syscall /* sys_vserver */ | 482 | PTR sys_ni_syscall /* sys_vserver */ |
483 | PTR sys_waitid | 483 | PTR sys32_waitid |
484 | PTR sys_ni_syscall /* available, was setaltroot */ | 484 | PTR sys_ni_syscall /* available, was setaltroot */ |
485 | PTR sys_add_key /* 4280 */ | 485 | PTR sys_add_key /* 4280 */ |
486 | PTR sys_request_key | 486 | PTR sys_request_key |
487 | PTR sys_keyctl | 487 | PTR sys_keyctl |
488 | PTR sys_set_thread_area | ||
489 | PTR sys_inotify_init | ||
490 | PTR sys_inotify_add_watch /* 4285 */ | ||
491 | PTR sys_inotify_rm_watch | ||
488 | .size sys_call_table,.-sys_call_table | 492 | .size sys_call_table,.-sys_call_table |
diff --git a/arch/mips/kernel/semaphore.c b/arch/mips/kernel/semaphore.c index 9c40fe5a8e8d..1265358cdca1 100644 --- a/arch/mips/kernel/semaphore.c +++ b/arch/mips/kernel/semaphore.c | |||
@@ -42,24 +42,28 @@ static inline int __sem_update_count(struct semaphore *sem, int incr) | |||
42 | 42 | ||
43 | if (cpu_has_llsc && R10000_LLSC_WAR) { | 43 | if (cpu_has_llsc && R10000_LLSC_WAR) { |
44 | __asm__ __volatile__( | 44 | __asm__ __volatile__( |
45 | "1: ll %0, %2 \n" | 45 | " .set mips3 \n" |
46 | "1: ll %0, %2 # __sem_update_count \n" | ||
46 | " sra %1, %0, 31 \n" | 47 | " sra %1, %0, 31 \n" |
47 | " not %1 \n" | 48 | " not %1 \n" |
48 | " and %1, %0, %1 \n" | 49 | " and %1, %0, %1 \n" |
49 | " add %1, %1, %3 \n" | 50 | " addu %1, %1, %3 \n" |
50 | " sc %1, %2 \n" | 51 | " sc %1, %2 \n" |
51 | " beqzl %1, 1b \n" | 52 | " beqzl %1, 1b \n" |
53 | " .set mips0 \n" | ||
52 | : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count) | 54 | : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count) |
53 | : "r" (incr), "m" (sem->count)); | 55 | : "r" (incr), "m" (sem->count)); |
54 | } else if (cpu_has_llsc) { | 56 | } else if (cpu_has_llsc) { |
55 | __asm__ __volatile__( | 57 | __asm__ __volatile__( |
56 | "1: ll %0, %2 \n" | 58 | " .set mips3 \n" |
59 | "1: ll %0, %2 # __sem_update_count \n" | ||
57 | " sra %1, %0, 31 \n" | 60 | " sra %1, %0, 31 \n" |
58 | " not %1 \n" | 61 | " not %1 \n" |
59 | " and %1, %0, %1 \n" | 62 | " and %1, %0, %1 \n" |
60 | " add %1, %1, %3 \n" | 63 | " addu %1, %1, %3 \n" |
61 | " sc %1, %2 \n" | 64 | " sc %1, %2 \n" |
62 | " beqz %1, 1b \n" | 65 | " beqz %1, 1b \n" |
66 | " .set mips0 \n" | ||
63 | : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count) | 67 | : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count) |
64 | : "r" (incr), "m" (sem->count)); | 68 | : "r" (incr), "m" (sem->count)); |
65 | } else { | 69 | } else { |
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 12b531c295c4..d86affa21278 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c | |||
@@ -37,12 +37,13 @@ | |||
37 | 37 | ||
38 | #include <asm/addrspace.h> | 38 | #include <asm/addrspace.h> |
39 | #include <asm/bootinfo.h> | 39 | #include <asm/bootinfo.h> |
40 | #include <asm/cache.h> | ||
40 | #include <asm/cpu.h> | 41 | #include <asm/cpu.h> |
41 | #include <asm/sections.h> | 42 | #include <asm/sections.h> |
42 | #include <asm/setup.h> | 43 | #include <asm/setup.h> |
43 | #include <asm/system.h> | 44 | #include <asm/system.h> |
44 | 45 | ||
45 | struct cpuinfo_mips cpu_data[NR_CPUS]; | 46 | struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly; |
46 | 47 | ||
47 | EXPORT_SYMBOL(cpu_data); | 48 | EXPORT_SYMBOL(cpu_data); |
48 | 49 | ||
@@ -62,8 +63,8 @@ EXPORT_SYMBOL(PCI_DMA_BUS_IS_PHYS); | |||
62 | * | 63 | * |
63 | * These are initialized so they are in the .data section | 64 | * These are initialized so they are in the .data section |
64 | */ | 65 | */ |
65 | unsigned long mips_machtype = MACH_UNKNOWN; | 66 | unsigned long mips_machtype __read_mostly = MACH_UNKNOWN; |
66 | unsigned long mips_machgroup = MACH_GROUP_UNKNOWN; | 67 | unsigned long mips_machgroup __read_mostly = MACH_GROUP_UNKNOWN; |
67 | 68 | ||
68 | EXPORT_SYMBOL(mips_machtype); | 69 | EXPORT_SYMBOL(mips_machtype); |
69 | EXPORT_SYMBOL(mips_machgroup); | 70 | EXPORT_SYMBOL(mips_machgroup); |
@@ -77,7 +78,7 @@ static char command_line[CL_SIZE]; | |||
77 | * mips_io_port_base is the begin of the address space to which x86 style | 78 | * mips_io_port_base is the begin of the address space to which x86 style |
78 | * I/O ports are mapped. | 79 | * I/O ports are mapped. |
79 | */ | 80 | */ |
80 | const unsigned long mips_io_port_base = -1; | 81 | const unsigned long mips_io_port_base __read_mostly = -1; |
81 | EXPORT_SYMBOL(mips_io_port_base); | 82 | EXPORT_SYMBOL(mips_io_port_base); |
82 | 83 | ||
83 | /* | 84 | /* |
@@ -510,31 +511,7 @@ static inline void resource_init(void) | |||
510 | #undef MAXMEM | 511 | #undef MAXMEM |
511 | #undef MAXMEM_PFN | 512 | #undef MAXMEM_PFN |
512 | 513 | ||
513 | static int __initdata earlyinit_debug; | 514 | extern void plat_setup(void); |
514 | |||
515 | static int __init earlyinit_debug_setup(char *str) | ||
516 | { | ||
517 | earlyinit_debug = 1; | ||
518 | return 1; | ||
519 | } | ||
520 | __setup("earlyinit_debug", earlyinit_debug_setup); | ||
521 | |||
522 | extern initcall_t __earlyinitcall_start, __earlyinitcall_end; | ||
523 | |||
524 | static void __init do_earlyinitcalls(void) | ||
525 | { | ||
526 | initcall_t *call, *start, *end; | ||
527 | |||
528 | start = &__earlyinitcall_start; | ||
529 | end = &__earlyinitcall_end; | ||
530 | |||
531 | for (call = start; call < end; call++) { | ||
532 | if (earlyinit_debug) | ||
533 | printk("calling earlyinitcall 0x%p\n", *call); | ||
534 | |||
535 | (*call)(); | ||
536 | } | ||
537 | } | ||
538 | 515 | ||
539 | void __init setup_arch(char **cmdline_p) | 516 | void __init setup_arch(char **cmdline_p) |
540 | { | 517 | { |
@@ -551,7 +528,7 @@ void __init setup_arch(char **cmdline_p) | |||
551 | #endif | 528 | #endif |
552 | 529 | ||
553 | /* call board setup routine */ | 530 | /* call board setup routine */ |
554 | do_earlyinitcalls(); | 531 | plat_setup(); |
555 | 532 | ||
556 | strlcpy(command_line, arcs_cmdline, sizeof(command_line)); | 533 | strlcpy(command_line, arcs_cmdline, sizeof(command_line)); |
557 | strlcpy(saved_command_line, command_line, COMMAND_LINE_SIZE); | 534 | strlcpy(saved_command_line, command_line, COMMAND_LINE_SIZE); |
@@ -573,3 +550,12 @@ int __init fpu_disable(char *s) | |||
573 | } | 550 | } |
574 | 551 | ||
575 | __setup("nofpu", fpu_disable); | 552 | __setup("nofpu", fpu_disable); |
553 | |||
554 | int __init dsp_disable(char *s) | ||
555 | { | ||
556 | cpu_data[0].ases &= ~MIPS_ASE_DSP; | ||
557 | |||
558 | return 1; | ||
559 | } | ||
560 | |||
561 | __setup("nodsp", dsp_disable); | ||
diff --git a/arch/mips/kernel/signal-common.h b/arch/mips/kernel/signal-common.h index f9234df53253..0f66ae5838b9 100644 --- a/arch/mips/kernel/signal-common.h +++ b/arch/mips/kernel/signal-common.h | |||
@@ -8,13 +8,14 @@ | |||
8 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. | 8 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/config.h> | ||
12 | |||
11 | static inline int | 13 | static inline int |
12 | setup_sigcontext(struct pt_regs *regs, struct sigcontext *sc) | 14 | setup_sigcontext(struct pt_regs *regs, struct sigcontext *sc) |
13 | { | 15 | { |
14 | int err = 0; | 16 | int err = 0; |
15 | 17 | ||
16 | err |= __put_user(regs->cp0_epc, &sc->sc_pc); | 18 | err |= __put_user(regs->cp0_epc, &sc->sc_pc); |
17 | err |= __put_user(regs->cp0_status, &sc->sc_status); | ||
18 | 19 | ||
19 | #define save_gp_reg(i) do { \ | 20 | #define save_gp_reg(i) do { \ |
20 | err |= __put_user(regs->regs[i], &sc->sc_regs[i]); \ | 21 | err |= __put_user(regs->regs[i], &sc->sc_regs[i]); \ |
@@ -30,10 +31,32 @@ setup_sigcontext(struct pt_regs *regs, struct sigcontext *sc) | |||
30 | save_gp_reg(31); | 31 | save_gp_reg(31); |
31 | #undef save_gp_reg | 32 | #undef save_gp_reg |
32 | 33 | ||
34 | #ifdef CONFIG_32BIT | ||
33 | err |= __put_user(regs->hi, &sc->sc_mdhi); | 35 | err |= __put_user(regs->hi, &sc->sc_mdhi); |
34 | err |= __put_user(regs->lo, &sc->sc_mdlo); | 36 | err |= __put_user(regs->lo, &sc->sc_mdlo); |
35 | err |= __put_user(regs->cp0_cause, &sc->sc_cause); | 37 | if (cpu_has_dsp) { |
36 | err |= __put_user(regs->cp0_badvaddr, &sc->sc_badvaddr); | 38 | err |= __put_user(mfhi1(), &sc->sc_hi1); |
39 | err |= __put_user(mflo1(), &sc->sc_lo1); | ||
40 | err |= __put_user(mfhi2(), &sc->sc_hi2); | ||
41 | err |= __put_user(mflo2(), &sc->sc_lo2); | ||
42 | err |= __put_user(mfhi3(), &sc->sc_hi3); | ||
43 | err |= __put_user(mflo3(), &sc->sc_lo3); | ||
44 | err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp); | ||
45 | } | ||
46 | #endif | ||
47 | #ifdef CONFIG_64BIT | ||
48 | err |= __put_user(regs->hi, &sc->sc_hi[0]); | ||
49 | err |= __put_user(regs->lo, &sc->sc_lo[0]); | ||
50 | if (cpu_has_dsp) { | ||
51 | err |= __put_user(mfhi1(), &sc->sc_hi[1]); | ||
52 | err |= __put_user(mflo1(), &sc->sc_lo[1]); | ||
53 | err |= __put_user(mfhi2(), &sc->sc_hi[2]); | ||
54 | err |= __put_user(mflo2(), &sc->sc_lo[2]); | ||
55 | err |= __put_user(mfhi3(), &sc->sc_hi[3]); | ||
56 | err |= __put_user(mflo3(), &sc->sc_lo[3]); | ||
57 | err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp); | ||
58 | } | ||
59 | #endif | ||
37 | 60 | ||
38 | err |= __put_user(!!used_math(), &sc->sc_used_math); | 61 | err |= __put_user(!!used_math(), &sc->sc_used_math); |
39 | 62 | ||
@@ -61,15 +84,40 @@ out: | |||
61 | static inline int | 84 | static inline int |
62 | restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc) | 85 | restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc) |
63 | { | 86 | { |
64 | int err = 0; | ||
65 | unsigned int used_math; | 87 | unsigned int used_math; |
88 | unsigned long treg; | ||
89 | int err = 0; | ||
66 | 90 | ||
67 | /* Always make any pending restarted system calls return -EINTR */ | 91 | /* Always make any pending restarted system calls return -EINTR */ |
68 | current_thread_info()->restart_block.fn = do_no_restart_syscall; | 92 | current_thread_info()->restart_block.fn = do_no_restart_syscall; |
69 | 93 | ||
70 | err |= __get_user(regs->cp0_epc, &sc->sc_pc); | 94 | err |= __get_user(regs->cp0_epc, &sc->sc_pc); |
95 | #ifdef CONFIG_32BIT | ||
71 | err |= __get_user(regs->hi, &sc->sc_mdhi); | 96 | err |= __get_user(regs->hi, &sc->sc_mdhi); |
72 | err |= __get_user(regs->lo, &sc->sc_mdlo); | 97 | err |= __get_user(regs->lo, &sc->sc_mdlo); |
98 | if (cpu_has_dsp) { | ||
99 | err |= __get_user(treg, &sc->sc_hi1); mthi1(treg); | ||
100 | err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg); | ||
101 | err |= __get_user(treg, &sc->sc_hi2); mthi2(treg); | ||
102 | err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg); | ||
103 | err |= __get_user(treg, &sc->sc_hi3); mthi3(treg); | ||
104 | err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg); | ||
105 | err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK); | ||
106 | } | ||
107 | #endif | ||
108 | #ifdef CONFIG_64BIT | ||
109 | err |= __get_user(regs->hi, &sc->sc_hi[0]); | ||
110 | err |= __get_user(regs->lo, &sc->sc_lo[0]); | ||
111 | if (cpu_has_dsp) { | ||
112 | err |= __get_user(treg, &sc->sc_hi[1]); mthi1(treg); | ||
113 | err |= __get_user(treg, &sc->sc_lo[1]); mthi1(treg); | ||
114 | err |= __get_user(treg, &sc->sc_hi[2]); mthi2(treg); | ||
115 | err |= __get_user(treg, &sc->sc_lo[2]); mthi2(treg); | ||
116 | err |= __get_user(treg, &sc->sc_hi[3]); mthi3(treg); | ||
117 | err |= __get_user(treg, &sc->sc_lo[3]); mthi3(treg); | ||
118 | err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK); | ||
119 | } | ||
120 | #endif | ||
73 | 121 | ||
74 | #define restore_gp_reg(i) do { \ | 122 | #define restore_gp_reg(i) do { \ |
75 | err |= __get_user(regs->regs[i], &sc->sc_regs[i]); \ | 123 | err |= __get_user(regs->regs[i], &sc->sc_regs[i]); \ |
@@ -112,7 +160,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc) | |||
112 | static inline void * | 160 | static inline void * |
113 | get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size) | 161 | get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size) |
114 | { | 162 | { |
115 | unsigned long sp, almask; | 163 | unsigned long sp; |
116 | 164 | ||
117 | /* Default to using normal stack */ | 165 | /* Default to using normal stack */ |
118 | sp = regs->regs[29]; | 166 | sp = regs->regs[29]; |
@@ -128,10 +176,32 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size) | |||
128 | if ((ka->sa.sa_flags & SA_ONSTACK) && (sas_ss_flags (sp) == 0)) | 176 | if ((ka->sa.sa_flags & SA_ONSTACK) && (sas_ss_flags (sp) == 0)) |
129 | sp = current->sas_ss_sp + current->sas_ss_size; | 177 | sp = current->sas_ss_sp + current->sas_ss_size; |
130 | 178 | ||
131 | if (PLAT_TRAMPOLINE_STUFF_LINE) | 179 | return (void *)((sp - frame_size) & (ICACHE_REFILLS_WORKAROUND_WAR ? 32 : ALMASK)); |
132 | almask = ~(PLAT_TRAMPOLINE_STUFF_LINE - 1); | 180 | } |
133 | else | 181 | |
134 | almask = ALMASK; | 182 | static inline int install_sigtramp(unsigned int __user *tramp, |
183 | unsigned int syscall) | ||
184 | { | ||
185 | int err; | ||
186 | |||
187 | /* | ||
188 | * Set up the return code ... | ||
189 | * | ||
190 | * li v0, __NR__foo_sigreturn | ||
191 | * syscall | ||
192 | */ | ||
193 | |||
194 | err = __put_user(0x24020000 + syscall, tramp + 0); | ||
195 | err |= __put_user(0x0000000c , tramp + 1); | ||
196 | if (ICACHE_REFILLS_WORKAROUND_WAR) { | ||
197 | err |= __put_user(0, tramp + 2); | ||
198 | err |= __put_user(0, tramp + 3); | ||
199 | err |= __put_user(0, tramp + 4); | ||
200 | err |= __put_user(0, tramp + 5); | ||
201 | err |= __put_user(0, tramp + 6); | ||
202 | err |= __put_user(0, tramp + 7); | ||
203 | } | ||
204 | flush_cache_sigtramp((unsigned long) tramp); | ||
135 | 205 | ||
136 | return (void *)((sp - frame_size) & almask); | 206 | return err; |
137 | } | 207 | } |
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index 0209c1dd1429..9202a17db8f7 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c | |||
@@ -8,6 +8,7 @@ | |||
8 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. | 8 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. |
9 | */ | 9 | */ |
10 | #include <linux/config.h> | 10 | #include <linux/config.h> |
11 | #include <linux/cache.h> | ||
11 | #include <linux/sched.h> | 12 | #include <linux/sched.h> |
12 | #include <linux/mm.h> | 13 | #include <linux/mm.h> |
13 | #include <linux/personality.h> | 14 | #include <linux/personality.h> |
@@ -21,6 +22,7 @@ | |||
21 | #include <linux/unistd.h> | 22 | #include <linux/unistd.h> |
22 | #include <linux/compiler.h> | 23 | #include <linux/compiler.h> |
23 | 24 | ||
25 | #include <asm/abi.h> | ||
24 | #include <asm/asm.h> | 26 | #include <asm/asm.h> |
25 | #include <linux/bitops.h> | 27 | #include <linux/bitops.h> |
26 | #include <asm/cacheflush.h> | 28 | #include <asm/cacheflush.h> |
@@ -29,6 +31,7 @@ | |||
29 | #include <asm/uaccess.h> | 31 | #include <asm/uaccess.h> |
30 | #include <asm/ucontext.h> | 32 | #include <asm/ucontext.h> |
31 | #include <asm/cpu-features.h> | 33 | #include <asm/cpu-features.h> |
34 | #include <asm/war.h> | ||
32 | 35 | ||
33 | #include "signal-common.h" | 36 | #include "signal-common.h" |
34 | 37 | ||
@@ -36,7 +39,7 @@ | |||
36 | 39 | ||
37 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | 40 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) |
38 | 41 | ||
39 | static int do_signal(sigset_t *oldset, struct pt_regs *regs); | 42 | int do_signal(sigset_t *oldset, struct pt_regs *regs); |
40 | 43 | ||
41 | /* | 44 | /* |
42 | * Atomically swap in the new signal mask, and wait for a signal. | 45 | * Atomically swap in the new signal mask, and wait for a signal. |
@@ -47,9 +50,10 @@ save_static_function(sys_sigsuspend); | |||
47 | __attribute_used__ noinline static int | 50 | __attribute_used__ noinline static int |
48 | _sys_sigsuspend(nabi_no_regargs struct pt_regs regs) | 51 | _sys_sigsuspend(nabi_no_regargs struct pt_regs regs) |
49 | { | 52 | { |
50 | sigset_t *uset, saveset, newset; | 53 | sigset_t saveset, newset; |
54 | sigset_t __user *uset; | ||
51 | 55 | ||
52 | uset = (sigset_t *) regs.regs[4]; | 56 | uset = (sigset_t __user *) regs.regs[4]; |
53 | if (copy_from_user(&newset, uset, sizeof(sigset_t))) | 57 | if (copy_from_user(&newset, uset, sizeof(sigset_t))) |
54 | return -EFAULT; | 58 | return -EFAULT; |
55 | sigdelsetmask(&newset, ~_BLOCKABLE); | 59 | sigdelsetmask(&newset, ~_BLOCKABLE); |
@@ -75,7 +79,8 @@ save_static_function(sys_rt_sigsuspend); | |||
75 | __attribute_used__ noinline static int | 79 | __attribute_used__ noinline static int |
76 | _sys_rt_sigsuspend(nabi_no_regargs struct pt_regs regs) | 80 | _sys_rt_sigsuspend(nabi_no_regargs struct pt_regs regs) |
77 | { | 81 | { |
78 | sigset_t *unewset, saveset, newset; | 82 | sigset_t saveset, newset; |
83 | sigset_t __user *unewset; | ||
79 | size_t sigsetsize; | 84 | size_t sigsetsize; |
80 | 85 | ||
81 | /* XXX Don't preclude handling different sized sigset_t's. */ | 86 | /* XXX Don't preclude handling different sized sigset_t's. */ |
@@ -83,7 +88,7 @@ _sys_rt_sigsuspend(nabi_no_regargs struct pt_regs regs) | |||
83 | if (sigsetsize != sizeof(sigset_t)) | 88 | if (sigsetsize != sizeof(sigset_t)) |
84 | return -EINVAL; | 89 | return -EINVAL; |
85 | 90 | ||
86 | unewset = (sigset_t *) regs.regs[4]; | 91 | unewset = (sigset_t __user *) regs.regs[4]; |
87 | if (copy_from_user(&newset, unewset, sizeof(newset))) | 92 | if (copy_from_user(&newset, unewset, sizeof(newset))) |
88 | return -EFAULT; | 93 | return -EFAULT; |
89 | sigdelsetmask(&newset, ~_BLOCKABLE); | 94 | sigdelsetmask(&newset, ~_BLOCKABLE); |
@@ -147,33 +152,46 @@ asmlinkage int sys_sigaction(int sig, const struct sigaction *act, | |||
147 | 152 | ||
148 | asmlinkage int sys_sigaltstack(nabi_no_regargs struct pt_regs regs) | 153 | asmlinkage int sys_sigaltstack(nabi_no_regargs struct pt_regs regs) |
149 | { | 154 | { |
150 | const stack_t *uss = (const stack_t *) regs.regs[4]; | 155 | const stack_t __user *uss = (const stack_t __user *) regs.regs[4]; |
151 | stack_t *uoss = (stack_t *) regs.regs[5]; | 156 | stack_t __user *uoss = (stack_t __user *) regs.regs[5]; |
152 | unsigned long usp = regs.regs[29]; | 157 | unsigned long usp = regs.regs[29]; |
153 | 158 | ||
154 | return do_sigaltstack(uss, uoss, usp); | 159 | return do_sigaltstack(uss, uoss, usp); |
155 | } | 160 | } |
156 | 161 | ||
157 | #if PLAT_TRAMPOLINE_STUFF_LINE | 162 | /* |
158 | #define __tramp __attribute__((aligned(PLAT_TRAMPOLINE_STUFF_LINE))) | 163 | * Horribly complicated - with the bloody RM9000 workarounds enabled |
159 | #else | 164 | * the signal trampolines is moving to the end of the structure so we can |
160 | #define __tramp | 165 | * increase the alignment without breaking software compatibility. |
161 | #endif | 166 | */ |
162 | |||
163 | #ifdef CONFIG_TRAD_SIGNALS | 167 | #ifdef CONFIG_TRAD_SIGNALS |
164 | struct sigframe { | 168 | struct sigframe { |
165 | u32 sf_ass[4]; /* argument save space for o32 */ | 169 | u32 sf_ass[4]; /* argument save space for o32 */ |
166 | u32 sf_code[2] __tramp; /* signal trampoline */ | 170 | #if ICACHE_REFILLS_WORKAROUND_WAR |
167 | struct sigcontext sf_sc __tramp; | 171 | u32 sf_pad[2]; |
172 | #else | ||
173 | u32 sf_code[2]; /* signal trampoline */ | ||
174 | #endif | ||
175 | struct sigcontext sf_sc; | ||
168 | sigset_t sf_mask; | 176 | sigset_t sf_mask; |
177 | #if ICACHE_REFILLS_WORKAROUND_WAR | ||
178 | u32 sf_code[8] ____cacheline_aligned; /* signal trampoline */ | ||
179 | #endif | ||
169 | }; | 180 | }; |
170 | #endif | 181 | #endif |
171 | 182 | ||
172 | struct rt_sigframe { | 183 | struct rt_sigframe { |
173 | u32 rs_ass[4]; /* argument save space for o32 */ | 184 | u32 rs_ass[4]; /* argument save space for o32 */ |
174 | u32 rs_code[2] __tramp; /* signal trampoline */ | 185 | #if ICACHE_REFILLS_WORKAROUND_WAR |
175 | struct siginfo rs_info __tramp; | 186 | u32 rs_pad[2]; |
187 | #else | ||
188 | u32 rs_code[2]; /* signal trampoline */ | ||
189 | #endif | ||
190 | struct siginfo rs_info; | ||
176 | struct ucontext rs_uc; | 191 | struct ucontext rs_uc; |
192 | #if ICACHE_REFILLS_WORKAROUND_WAR | ||
193 | u32 rs_code[8] ____cacheline_aligned; /* signal trampoline */ | ||
194 | #endif | ||
177 | }; | 195 | }; |
178 | 196 | ||
179 | #ifdef CONFIG_TRAD_SIGNALS | 197 | #ifdef CONFIG_TRAD_SIGNALS |
@@ -214,7 +232,7 @@ _sys_sigreturn(nabi_no_regargs struct pt_regs regs) | |||
214 | badframe: | 232 | badframe: |
215 | force_sig(SIGSEGV, current); | 233 | force_sig(SIGSEGV, current); |
216 | } | 234 | } |
217 | #endif | 235 | #endif /* CONFIG_TRAD_SIGNALS */ |
218 | 236 | ||
219 | save_static_function(sys_rt_sigreturn); | 237 | save_static_function(sys_rt_sigreturn); |
220 | __attribute_used__ noinline static void | 238 | __attribute_used__ noinline static void |
@@ -260,7 +278,7 @@ badframe: | |||
260 | } | 278 | } |
261 | 279 | ||
262 | #ifdef CONFIG_TRAD_SIGNALS | 280 | #ifdef CONFIG_TRAD_SIGNALS |
263 | static void inline setup_frame(struct k_sigaction * ka, struct pt_regs *regs, | 281 | int setup_frame(struct k_sigaction * ka, struct pt_regs *regs, |
264 | int signr, sigset_t *set) | 282 | int signr, sigset_t *set) |
265 | { | 283 | { |
266 | struct sigframe *frame; | 284 | struct sigframe *frame; |
@@ -270,17 +288,7 @@ static void inline setup_frame(struct k_sigaction * ka, struct pt_regs *regs, | |||
270 | if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) | 288 | if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) |
271 | goto give_sigsegv; | 289 | goto give_sigsegv; |
272 | 290 | ||
273 | /* | 291 | install_sigtramp(frame->sf_code, __NR_sigreturn); |
274 | * Set up the return code ... | ||
275 | * | ||
276 | * li v0, __NR_sigreturn | ||
277 | * syscall | ||
278 | */ | ||
279 | if (PLAT_TRAMPOLINE_STUFF_LINE) | ||
280 | __clear_user(frame->sf_code, PLAT_TRAMPOLINE_STUFF_LINE); | ||
281 | err |= __put_user(0x24020000 + __NR_sigreturn, frame->sf_code + 0); | ||
282 | err |= __put_user(0x0000000c , frame->sf_code + 1); | ||
283 | flush_cache_sigtramp((unsigned long) frame->sf_code); | ||
284 | 292 | ||
285 | err |= setup_sigcontext(regs, &frame->sf_sc); | 293 | err |= setup_sigcontext(regs, &frame->sf_sc); |
286 | err |= __copy_to_user(&frame->sf_mask, set, sizeof(*set)); | 294 | err |= __copy_to_user(&frame->sf_mask, set, sizeof(*set)); |
@@ -309,14 +317,15 @@ static void inline setup_frame(struct k_sigaction * ka, struct pt_regs *regs, | |||
309 | current->comm, current->pid, | 317 | current->comm, current->pid, |
310 | frame, regs->cp0_epc, frame->regs[31]); | 318 | frame, regs->cp0_epc, frame->regs[31]); |
311 | #endif | 319 | #endif |
312 | return; | 320 | return 1; |
313 | 321 | ||
314 | give_sigsegv: | 322 | give_sigsegv: |
315 | force_sigsegv(signr, current); | 323 | force_sigsegv(signr, current); |
324 | return 0; | ||
316 | } | 325 | } |
317 | #endif | 326 | #endif |
318 | 327 | ||
319 | static void inline setup_rt_frame(struct k_sigaction * ka, struct pt_regs *regs, | 328 | int setup_rt_frame(struct k_sigaction * ka, struct pt_regs *regs, |
320 | int signr, sigset_t *set, siginfo_t *info) | 329 | int signr, sigset_t *set, siginfo_t *info) |
321 | { | 330 | { |
322 | struct rt_sigframe *frame; | 331 | struct rt_sigframe *frame; |
@@ -326,17 +335,7 @@ static void inline setup_rt_frame(struct k_sigaction * ka, struct pt_regs *regs, | |||
326 | if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) | 335 | if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) |
327 | goto give_sigsegv; | 336 | goto give_sigsegv; |
328 | 337 | ||
329 | /* | 338 | install_sigtramp(frame->rs_code, __NR_rt_sigreturn); |
330 | * Set up the return code ... | ||
331 | * | ||
332 | * li v0, __NR_rt_sigreturn | ||
333 | * syscall | ||
334 | */ | ||
335 | if (PLAT_TRAMPOLINE_STUFF_LINE) | ||
336 | __clear_user(frame->rs_code, PLAT_TRAMPOLINE_STUFF_LINE); | ||
337 | err |= __put_user(0x24020000 + __NR_rt_sigreturn, frame->rs_code + 0); | ||
338 | err |= __put_user(0x0000000c , frame->rs_code + 1); | ||
339 | flush_cache_sigtramp((unsigned long) frame->rs_code); | ||
340 | 339 | ||
341 | /* Create siginfo. */ | 340 | /* Create siginfo. */ |
342 | err |= copy_siginfo_to_user(&frame->rs_info, info); | 341 | err |= copy_siginfo_to_user(&frame->rs_info, info); |
@@ -378,18 +377,21 @@ static void inline setup_rt_frame(struct k_sigaction * ka, struct pt_regs *regs, | |||
378 | current->comm, current->pid, | 377 | current->comm, current->pid, |
379 | frame, regs->cp0_epc, regs->regs[31]); | 378 | frame, regs->cp0_epc, regs->regs[31]); |
380 | #endif | 379 | #endif |
381 | return; | 380 | return 1; |
382 | 381 | ||
383 | give_sigsegv: | 382 | give_sigsegv: |
384 | force_sigsegv(signr, current); | 383 | force_sigsegv(signr, current); |
384 | return 0; | ||
385 | } | 385 | } |
386 | 386 | ||
387 | extern void setup_rt_frame_n32(struct k_sigaction * ka, | 387 | extern void setup_rt_frame_n32(struct k_sigaction * ka, |
388 | struct pt_regs *regs, int signr, sigset_t *set, siginfo_t *info); | 388 | struct pt_regs *regs, int signr, sigset_t *set, siginfo_t *info); |
389 | 389 | ||
390 | static inline void handle_signal(unsigned long sig, siginfo_t *info, | 390 | static inline int handle_signal(unsigned long sig, siginfo_t *info, |
391 | struct k_sigaction *ka, sigset_t *oldset, struct pt_regs *regs) | 391 | struct k_sigaction *ka, sigset_t *oldset, struct pt_regs *regs) |
392 | { | 392 | { |
393 | int ret; | ||
394 | |||
393 | switch(regs->regs[0]) { | 395 | switch(regs->regs[0]) { |
394 | case ERESTART_RESTARTBLOCK: | 396 | case ERESTART_RESTARTBLOCK: |
395 | case ERESTARTNOHAND: | 397 | case ERESTARTNOHAND: |
@@ -408,22 +410,10 @@ static inline void handle_signal(unsigned long sig, siginfo_t *info, | |||
408 | 410 | ||
409 | regs->regs[0] = 0; /* Don't deal with this again. */ | 411 | regs->regs[0] = 0; /* Don't deal with this again. */ |
410 | 412 | ||
411 | #ifdef CONFIG_TRAD_SIGNALS | 413 | if (sig_uses_siginfo(ka)) |
412 | if (ka->sa.sa_flags & SA_SIGINFO) { | 414 | ret = current->thread.abi->setup_rt_frame(ka, regs, sig, oldset, info); |
413 | #else | ||
414 | if (1) { | ||
415 | #endif | ||
416 | #ifdef CONFIG_MIPS32_N32 | ||
417 | if ((current->thread.mflags & MF_ABI_MASK) == MF_N32) | ||
418 | setup_rt_frame_n32 (ka, regs, sig, oldset, info); | ||
419 | else | ||
420 | #endif | ||
421 | setup_rt_frame(ka, regs, sig, oldset, info); | ||
422 | } | ||
423 | #ifdef CONFIG_TRAD_SIGNALS | ||
424 | else | 415 | else |
425 | setup_frame(ka, regs, sig, oldset); | 416 | ret = current->thread.abi->setup_frame(ka, regs, sig, oldset); |
426 | #endif | ||
427 | 417 | ||
428 | spin_lock_irq(¤t->sighand->siglock); | 418 | spin_lock_irq(¤t->sighand->siglock); |
429 | sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); | 419 | sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); |
@@ -431,23 +421,16 @@ static inline void handle_signal(unsigned long sig, siginfo_t *info, | |||
431 | sigaddset(¤t->blocked,sig); | 421 | sigaddset(¤t->blocked,sig); |
432 | recalc_sigpending(); | 422 | recalc_sigpending(); |
433 | spin_unlock_irq(¤t->sighand->siglock); | 423 | spin_unlock_irq(¤t->sighand->siglock); |
434 | } | ||
435 | 424 | ||
436 | extern int do_signal32(sigset_t *oldset, struct pt_regs *regs); | 425 | return ret; |
437 | extern int do_irix_signal(sigset_t *oldset, struct pt_regs *regs); | 426 | } |
438 | 427 | ||
439 | static int do_signal(sigset_t *oldset, struct pt_regs *regs) | 428 | int do_signal(sigset_t *oldset, struct pt_regs *regs) |
440 | { | 429 | { |
441 | struct k_sigaction ka; | 430 | struct k_sigaction ka; |
442 | siginfo_t info; | 431 | siginfo_t info; |
443 | int signr; | 432 | int signr; |
444 | 433 | ||
445 | #ifdef CONFIG_BINFMT_ELF32 | ||
446 | if ((current->thread.mflags & MF_ABI_MASK) == MF_O32) { | ||
447 | return do_signal32(oldset, regs); | ||
448 | } | ||
449 | #endif | ||
450 | |||
451 | /* | 434 | /* |
452 | * We want the common case to go fast, which is why we may in certain | 435 | * We want the common case to go fast, which is why we may in certain |
453 | * cases get here from kernel mode. Just return without doing anything | 436 | * cases get here from kernel mode. Just return without doing anything |
@@ -463,10 +446,8 @@ static int do_signal(sigset_t *oldset, struct pt_regs *regs) | |||
463 | oldset = ¤t->blocked; | 446 | oldset = ¤t->blocked; |
464 | 447 | ||
465 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | 448 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); |
466 | if (signr > 0) { | 449 | if (signr > 0) |
467 | handle_signal(signr, &info, &ka, oldset, regs); | 450 | return handle_signal(signr, &info, &ka, oldset, regs); |
468 | return 1; | ||
469 | } | ||
470 | 451 | ||
471 | no_signal: | 452 | no_signal: |
472 | /* | 453 | /* |
@@ -499,18 +480,6 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, sigset_t *oldset, | |||
499 | { | 480 | { |
500 | /* deal with pending signal delivery */ | 481 | /* deal with pending signal delivery */ |
501 | if (thread_info_flags & _TIF_SIGPENDING) { | 482 | if (thread_info_flags & _TIF_SIGPENDING) { |
502 | #ifdef CONFIG_BINFMT_ELF32 | 483 | current->thread.abi->do_signal(oldset, regs); |
503 | if (likely((current->thread.mflags & MF_ABI_MASK) == MF_O32)) { | ||
504 | do_signal32(oldset, regs); | ||
505 | return; | ||
506 | } | ||
507 | #endif | ||
508 | #ifdef CONFIG_BINFMT_IRIX | ||
509 | if (unlikely(current->personality != PER_LINUX)) { | ||
510 | do_irix_signal(oldset, regs); | ||
511 | return; | ||
512 | } | ||
513 | #endif | ||
514 | do_signal(oldset, regs); | ||
515 | } | 484 | } |
516 | } | 485 | } |
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c index 8ddfbd8d425a..dbe821303125 100644 --- a/arch/mips/kernel/signal32.c +++ b/arch/mips/kernel/signal32.c | |||
@@ -7,6 +7,7 @@ | |||
7 | * Copyright (C) 1994 - 2000 Ralf Baechle | 7 | * Copyright (C) 1994 - 2000 Ralf Baechle |
8 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. | 8 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. |
9 | */ | 9 | */ |
10 | #include <linux/cache.h> | ||
10 | #include <linux/sched.h> | 11 | #include <linux/sched.h> |
11 | #include <linux/mm.h> | 12 | #include <linux/mm.h> |
12 | #include <linux/smp.h> | 13 | #include <linux/smp.h> |
@@ -21,6 +22,7 @@ | |||
21 | #include <linux/suspend.h> | 22 | #include <linux/suspend.h> |
22 | #include <linux/compiler.h> | 23 | #include <linux/compiler.h> |
23 | 24 | ||
25 | #include <asm/abi.h> | ||
24 | #include <asm/asm.h> | 26 | #include <asm/asm.h> |
25 | #include <linux/bitops.h> | 27 | #include <linux/bitops.h> |
26 | #include <asm/cacheflush.h> | 28 | #include <asm/cacheflush.h> |
@@ -29,6 +31,7 @@ | |||
29 | #include <asm/ucontext.h> | 31 | #include <asm/ucontext.h> |
30 | #include <asm/system.h> | 32 | #include <asm/system.h> |
31 | #include <asm/fpu.h> | 33 | #include <asm/fpu.h> |
34 | #include <asm/war.h> | ||
32 | 35 | ||
33 | #define SI_PAD_SIZE32 ((SI_MAX_SIZE/sizeof(int)) - 3) | 36 | #define SI_PAD_SIZE32 ((SI_MAX_SIZE/sizeof(int)) - 3) |
34 | 37 | ||
@@ -76,8 +79,10 @@ typedef struct compat_siginfo { | |||
76 | 79 | ||
77 | /* POSIX.1b timers */ | 80 | /* POSIX.1b timers */ |
78 | struct { | 81 | struct { |
79 | unsigned int _timer1; | 82 | timer_t _tid; /* timer id */ |
80 | unsigned int _timer2; | 83 | int _overrun; /* overrun count */ |
84 | compat_sigval_t _sigval;/* same as below */ | ||
85 | int _sys_private; /* not to be passed to user */ | ||
81 | } _timer; | 86 | } _timer; |
82 | 87 | ||
83 | /* POSIX.1b signals */ | 88 | /* POSIX.1b signals */ |
@@ -259,11 +264,12 @@ asmlinkage int sys32_sigaction(int sig, const struct sigaction32 *act, | |||
259 | 264 | ||
260 | if (act) { | 265 | if (act) { |
261 | old_sigset_t mask; | 266 | old_sigset_t mask; |
267 | s32 handler; | ||
262 | 268 | ||
263 | if (!access_ok(VERIFY_READ, act, sizeof(*act))) | 269 | if (!access_ok(VERIFY_READ, act, sizeof(*act))) |
264 | return -EFAULT; | 270 | return -EFAULT; |
265 | err |= __get_user((u32)(u64)new_ka.sa.sa_handler, | 271 | err |= __get_user(handler, &act->sa_handler); |
266 | &act->sa_handler); | 272 | new_ka.sa.sa_handler = (void*)(s64)handler; |
267 | err |= __get_user(new_ka.sa.sa_flags, &act->sa_flags); | 273 | err |= __get_user(new_ka.sa.sa_flags, &act->sa_flags); |
268 | err |= __get_user(mask, &act->sa_mask.sig[0]); | 274 | err |= __get_user(mask, &act->sa_mask.sig[0]); |
269 | if (err) | 275 | if (err) |
@@ -331,8 +337,9 @@ asmlinkage int sys32_sigaltstack(nabi_no_regargs struct pt_regs regs) | |||
331 | 337 | ||
332 | static int restore_sigcontext32(struct pt_regs *regs, struct sigcontext32 *sc) | 338 | static int restore_sigcontext32(struct pt_regs *regs, struct sigcontext32 *sc) |
333 | { | 339 | { |
340 | u32 used_math; | ||
334 | int err = 0; | 341 | int err = 0; |
335 | __u32 used_math; | 342 | s32 treg; |
336 | 343 | ||
337 | /* Always make any pending restarted system calls return -EINTR */ | 344 | /* Always make any pending restarted system calls return -EINTR */ |
338 | current_thread_info()->restart_block.fn = do_no_restart_syscall; | 345 | current_thread_info()->restart_block.fn = do_no_restart_syscall; |
@@ -340,6 +347,15 @@ static int restore_sigcontext32(struct pt_regs *regs, struct sigcontext32 *sc) | |||
340 | err |= __get_user(regs->cp0_epc, &sc->sc_pc); | 347 | err |= __get_user(regs->cp0_epc, &sc->sc_pc); |
341 | err |= __get_user(regs->hi, &sc->sc_mdhi); | 348 | err |= __get_user(regs->hi, &sc->sc_mdhi); |
342 | err |= __get_user(regs->lo, &sc->sc_mdlo); | 349 | err |= __get_user(regs->lo, &sc->sc_mdlo); |
350 | if (cpu_has_dsp) { | ||
351 | err |= __get_user(treg, &sc->sc_hi1); mthi1(treg); | ||
352 | err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg); | ||
353 | err |= __get_user(treg, &sc->sc_hi2); mthi2(treg); | ||
354 | err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg); | ||
355 | err |= __get_user(treg, &sc->sc_hi3); mthi3(treg); | ||
356 | err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg); | ||
357 | err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK); | ||
358 | } | ||
343 | 359 | ||
344 | #define restore_gp_reg(i) do { \ | 360 | #define restore_gp_reg(i) do { \ |
345 | err |= __get_user(regs->regs[i], &sc->sc_regs[i]); \ | 361 | err |= __get_user(regs->regs[i], &sc->sc_regs[i]); \ |
@@ -378,16 +394,30 @@ static int restore_sigcontext32(struct pt_regs *regs, struct sigcontext32 *sc) | |||
378 | 394 | ||
379 | struct sigframe { | 395 | struct sigframe { |
380 | u32 sf_ass[4]; /* argument save space for o32 */ | 396 | u32 sf_ass[4]; /* argument save space for o32 */ |
397 | #if ICACHE_REFILLS_WORKAROUND_WAR | ||
398 | u32 sf_pad[2]; | ||
399 | #else | ||
381 | u32 sf_code[2]; /* signal trampoline */ | 400 | u32 sf_code[2]; /* signal trampoline */ |
401 | #endif | ||
382 | struct sigcontext32 sf_sc; | 402 | struct sigcontext32 sf_sc; |
383 | sigset_t sf_mask; | 403 | sigset_t sf_mask; |
404 | #if ICACHE_REFILLS_WORKAROUND_WAR | ||
405 | u32 sf_code[8] ____cacheline_aligned; /* signal trampoline */ | ||
406 | #endif | ||
384 | }; | 407 | }; |
385 | 408 | ||
386 | struct rt_sigframe32 { | 409 | struct rt_sigframe32 { |
387 | u32 rs_ass[4]; /* argument save space for o32 */ | 410 | u32 rs_ass[4]; /* argument save space for o32 */ |
411 | #if ICACHE_REFILLS_WORKAROUND_WAR | ||
412 | u32 rs_pad[2]; | ||
413 | #else | ||
388 | u32 rs_code[2]; /* signal trampoline */ | 414 | u32 rs_code[2]; /* signal trampoline */ |
415 | #endif | ||
389 | compat_siginfo_t rs_info; | 416 | compat_siginfo_t rs_info; |
390 | struct ucontext32 rs_uc; | 417 | struct ucontext32 rs_uc; |
418 | #if ICACHE_REFILLS_WORKAROUND_WAR | ||
419 | u32 rs_code[8] __attribute__((aligned(32))); /* signal trampoline */ | ||
420 | #endif | ||
391 | }; | 421 | }; |
392 | 422 | ||
393 | int copy_siginfo_to_user32(compat_siginfo_t *to, siginfo_t *from) | 423 | int copy_siginfo_to_user32(compat_siginfo_t *to, siginfo_t *from) |
@@ -411,6 +441,11 @@ int copy_siginfo_to_user32(compat_siginfo_t *to, siginfo_t *from) | |||
411 | err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE); | 441 | err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE); |
412 | else { | 442 | else { |
413 | switch (from->si_code >> 16) { | 443 | switch (from->si_code >> 16) { |
444 | case __SI_TIMER >> 16: | ||
445 | err |= __put_user(from->si_tid, &to->si_tid); | ||
446 | err |= __put_user(from->si_overrun, &to->si_overrun); | ||
447 | err |= __put_user(from->si_int, &to->si_int); | ||
448 | break; | ||
414 | case __SI_CHLD >> 16: | 449 | case __SI_CHLD >> 16: |
415 | err |= __put_user(from->si_utime, &to->si_utime); | 450 | err |= __put_user(from->si_utime, &to->si_utime); |
416 | err |= __put_user(from->si_stime, &to->si_stime); | 451 | err |= __put_user(from->si_stime, &to->si_stime); |
@@ -480,6 +515,7 @@ __attribute_used__ noinline static void | |||
480 | _sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) | 515 | _sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) |
481 | { | 516 | { |
482 | struct rt_sigframe32 *frame; | 517 | struct rt_sigframe32 *frame; |
518 | mm_segment_t old_fs; | ||
483 | sigset_t set; | 519 | sigset_t set; |
484 | stack_t st; | 520 | stack_t st; |
485 | s32 sp; | 521 | s32 sp; |
@@ -510,7 +546,10 @@ _sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) | |||
510 | 546 | ||
511 | /* It is more difficult to avoid calling this function than to | 547 | /* It is more difficult to avoid calling this function than to |
512 | call it and ignore errors. */ | 548 | call it and ignore errors. */ |
549 | old_fs = get_fs(); | ||
550 | set_fs (KERNEL_DS); | ||
513 | do_sigaltstack(&st, NULL, regs.regs[29]); | 551 | do_sigaltstack(&st, NULL, regs.regs[29]); |
552 | set_fs (old_fs); | ||
514 | 553 | ||
515 | /* | 554 | /* |
516 | * Don't let your children do this ... | 555 | * Don't let your children do this ... |
@@ -550,8 +589,15 @@ static inline int setup_sigcontext32(struct pt_regs *regs, | |||
550 | 589 | ||
551 | err |= __put_user(regs->hi, &sc->sc_mdhi); | 590 | err |= __put_user(regs->hi, &sc->sc_mdhi); |
552 | err |= __put_user(regs->lo, &sc->sc_mdlo); | 591 | err |= __put_user(regs->lo, &sc->sc_mdlo); |
553 | err |= __put_user(regs->cp0_cause, &sc->sc_cause); | 592 | if (cpu_has_dsp) { |
554 | err |= __put_user(regs->cp0_badvaddr, &sc->sc_badvaddr); | 593 | err |= __put_user(rddsp(DSP_MASK), &sc->sc_hi1); |
594 | err |= __put_user(mfhi1(), &sc->sc_hi1); | ||
595 | err |= __put_user(mflo1(), &sc->sc_lo1); | ||
596 | err |= __put_user(mfhi2(), &sc->sc_hi2); | ||
597 | err |= __put_user(mflo2(), &sc->sc_lo2); | ||
598 | err |= __put_user(mfhi3(), &sc->sc_hi3); | ||
599 | err |= __put_user(mflo3(), &sc->sc_lo3); | ||
600 | } | ||
555 | 601 | ||
556 | err |= __put_user(!!used_math(), &sc->sc_used_math); | 602 | err |= __put_user(!!used_math(), &sc->sc_used_math); |
557 | 603 | ||
@@ -601,7 +647,7 @@ static inline void *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, | |||
601 | return (void *)((sp - frame_size) & ALMASK); | 647 | return (void *)((sp - frame_size) & ALMASK); |
602 | } | 648 | } |
603 | 649 | ||
604 | static inline void setup_frame(struct k_sigaction * ka, struct pt_regs *regs, | 650 | void setup_frame_32(struct k_sigaction * ka, struct pt_regs *regs, |
605 | int signr, sigset_t *set) | 651 | int signr, sigset_t *set) |
606 | { | 652 | { |
607 | struct sigframe *frame; | 653 | struct sigframe *frame; |
@@ -654,9 +700,7 @@ give_sigsegv: | |||
654 | force_sigsegv(signr, current); | 700 | force_sigsegv(signr, current); |
655 | } | 701 | } |
656 | 702 | ||
657 | static inline void setup_rt_frame(struct k_sigaction * ka, | 703 | void setup_rt_frame_32(struct k_sigaction * ka, struct pt_regs *regs, int signr, sigset_t *set, siginfo_t *info) |
658 | struct pt_regs *regs, int signr, | ||
659 | sigset_t *set, siginfo_t *info) | ||
660 | { | 704 | { |
661 | struct rt_sigframe32 *frame; | 705 | struct rt_sigframe32 *frame; |
662 | int err = 0; | 706 | int err = 0; |
@@ -725,9 +769,11 @@ give_sigsegv: | |||
725 | force_sigsegv(signr, current); | 769 | force_sigsegv(signr, current); |
726 | } | 770 | } |
727 | 771 | ||
728 | static inline void handle_signal(unsigned long sig, siginfo_t *info, | 772 | static inline int handle_signal(unsigned long sig, siginfo_t *info, |
729 | struct k_sigaction *ka, sigset_t *oldset, struct pt_regs * regs) | 773 | struct k_sigaction *ka, sigset_t *oldset, struct pt_regs * regs) |
730 | { | 774 | { |
775 | int ret; | ||
776 | |||
731 | switch (regs->regs[0]) { | 777 | switch (regs->regs[0]) { |
732 | case ERESTART_RESTARTBLOCK: | 778 | case ERESTART_RESTARTBLOCK: |
733 | case ERESTARTNOHAND: | 779 | case ERESTARTNOHAND: |
@@ -747,9 +793,9 @@ static inline void handle_signal(unsigned long sig, siginfo_t *info, | |||
747 | regs->regs[0] = 0; /* Don't deal with this again. */ | 793 | regs->regs[0] = 0; /* Don't deal with this again. */ |
748 | 794 | ||
749 | if (ka->sa.sa_flags & SA_SIGINFO) | 795 | if (ka->sa.sa_flags & SA_SIGINFO) |
750 | setup_rt_frame(ka, regs, sig, oldset, info); | 796 | ret = current->thread.abi->setup_rt_frame(ka, regs, sig, oldset, info); |
751 | else | 797 | else |
752 | setup_frame(ka, regs, sig, oldset); | 798 | ret = current->thread.abi->setup_frame(ka, regs, sig, oldset); |
753 | 799 | ||
754 | spin_lock_irq(¤t->sighand->siglock); | 800 | spin_lock_irq(¤t->sighand->siglock); |
755 | sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); | 801 | sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); |
@@ -757,6 +803,8 @@ static inline void handle_signal(unsigned long sig, siginfo_t *info, | |||
757 | sigaddset(¤t->blocked,sig); | 803 | sigaddset(¤t->blocked,sig); |
758 | recalc_sigpending(); | 804 | recalc_sigpending(); |
759 | spin_unlock_irq(¤t->sighand->siglock); | 805 | spin_unlock_irq(¤t->sighand->siglock); |
806 | |||
807 | return ret; | ||
760 | } | 808 | } |
761 | 809 | ||
762 | int do_signal32(sigset_t *oldset, struct pt_regs *regs) | 810 | int do_signal32(sigset_t *oldset, struct pt_regs *regs) |
@@ -780,10 +828,8 @@ int do_signal32(sigset_t *oldset, struct pt_regs *regs) | |||
780 | oldset = ¤t->blocked; | 828 | oldset = ¤t->blocked; |
781 | 829 | ||
782 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | 830 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); |
783 | if (signr > 0) { | 831 | if (signr > 0) |
784 | handle_signal(signr, &info, &ka, oldset, regs); | 832 | return handle_signal(signr, &info, &ka, oldset, regs); |
785 | return 1; | ||
786 | } | ||
787 | 833 | ||
788 | no_signal: | 834 | no_signal: |
789 | /* | 835 | /* |
@@ -819,12 +865,13 @@ asmlinkage int sys32_rt_sigaction(int sig, const struct sigaction32 *act, | |||
819 | goto out; | 865 | goto out; |
820 | 866 | ||
821 | if (act) { | 867 | if (act) { |
868 | s32 handler; | ||
822 | int err = 0; | 869 | int err = 0; |
823 | 870 | ||
824 | if (!access_ok(VERIFY_READ, act, sizeof(*act))) | 871 | if (!access_ok(VERIFY_READ, act, sizeof(*act))) |
825 | return -EFAULT; | 872 | return -EFAULT; |
826 | err |= __get_user((u32)(u64)new_sa.sa.sa_handler, | 873 | err |= __get_user(handler, &act->sa_handler); |
827 | &act->sa_handler); | 874 | new_sa.sa.sa_handler = (void*)(s64)handler; |
828 | err |= __get_user(new_sa.sa.sa_flags, &act->sa_flags); | 875 | err |= __get_user(new_sa.sa.sa_flags, &act->sa_flags); |
829 | err |= get_sigset(&new_sa.sa.sa_mask, &act->sa_mask); | 876 | err |= get_sigset(&new_sa.sa.sa_mask, &act->sa_mask); |
830 | if (err) | 877 | if (err) |
@@ -902,3 +949,30 @@ asmlinkage int sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t *uinfo) | |||
902 | set_fs (old_fs); | 949 | set_fs (old_fs); |
903 | return ret; | 950 | return ret; |
904 | } | 951 | } |
952 | |||
953 | asmlinkage long | ||
954 | sys32_waitid(int which, compat_pid_t pid, | ||
955 | compat_siginfo_t __user *uinfo, int options, | ||
956 | struct compat_rusage __user *uru) | ||
957 | { | ||
958 | siginfo_t info; | ||
959 | struct rusage ru; | ||
960 | long ret; | ||
961 | mm_segment_t old_fs = get_fs(); | ||
962 | |||
963 | info.si_signo = 0; | ||
964 | set_fs (KERNEL_DS); | ||
965 | ret = sys_waitid(which, pid, (siginfo_t __user *) &info, options, | ||
966 | uru ? (struct rusage __user *) &ru : NULL); | ||
967 | set_fs (old_fs); | ||
968 | |||
969 | if (ret < 0 || info.si_signo == 0) | ||
970 | return ret; | ||
971 | |||
972 | if (uru && (ret = put_compat_rusage(&ru, uru))) | ||
973 | return ret; | ||
974 | |||
975 | BUG_ON(info.si_code & __SI_MASK); | ||
976 | info.si_code |= __SI_CHLD; | ||
977 | return copy_siginfo_to_user32(uinfo, &info); | ||
978 | } | ||
diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c index 3544208d4b4b..ec61b2670ba6 100644 --- a/arch/mips/kernel/signal_n32.c +++ b/arch/mips/kernel/signal_n32.c | |||
@@ -15,6 +15,8 @@ | |||
15 | * along with this program; if not, write to the Free Software | 15 | * along with this program; if not, write to the Free Software |
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
17 | */ | 17 | */ |
18 | #include <linux/cache.h> | ||
19 | #include <linux/sched.h> | ||
18 | #include <linux/sched.h> | 20 | #include <linux/sched.h> |
19 | #include <linux/mm.h> | 21 | #include <linux/mm.h> |
20 | #include <linux/smp.h> | 22 | #include <linux/smp.h> |
@@ -36,6 +38,7 @@ | |||
36 | #include <asm/system.h> | 38 | #include <asm/system.h> |
37 | #include <asm/fpu.h> | 39 | #include <asm/fpu.h> |
38 | #include <asm/cpu-features.h> | 40 | #include <asm/cpu-features.h> |
41 | #include <asm/war.h> | ||
39 | 42 | ||
40 | #include "signal-common.h" | 43 | #include "signal-common.h" |
41 | 44 | ||
@@ -62,17 +65,18 @@ struct ucontextn32 { | |||
62 | sigset_t uc_sigmask; /* mask last for extensibility */ | 65 | sigset_t uc_sigmask; /* mask last for extensibility */ |
63 | }; | 66 | }; |
64 | 67 | ||
65 | #if PLAT_TRAMPOLINE_STUFF_LINE | ||
66 | #define __tramp __attribute__((aligned(PLAT_TRAMPOLINE_STUFF_LINE))) | ||
67 | #else | ||
68 | #define __tramp | ||
69 | #endif | ||
70 | |||
71 | struct rt_sigframe_n32 { | 68 | struct rt_sigframe_n32 { |
72 | u32 rs_ass[4]; /* argument save space for o32 */ | 69 | u32 rs_ass[4]; /* argument save space for o32 */ |
73 | u32 rs_code[2] __tramp; /* signal trampoline */ | 70 | #if ICACHE_REFILLS_WORKAROUND_WAR |
74 | struct siginfo rs_info __tramp; | 71 | u32 rs_pad[2]; |
72 | #else | ||
73 | u32 rs_code[2]; /* signal trampoline */ | ||
74 | #endif | ||
75 | struct siginfo rs_info; | ||
75 | struct ucontextn32 rs_uc; | 76 | struct ucontextn32 rs_uc; |
77 | #if ICACHE_REFILLS_WORKAROUND_WAR | ||
78 | u32 rs_code[8] ____cacheline_aligned; /* signal trampoline */ | ||
79 | #endif | ||
76 | }; | 80 | }; |
77 | 81 | ||
78 | save_static_function(sysn32_rt_sigreturn); | 82 | save_static_function(sysn32_rt_sigreturn); |
@@ -126,7 +130,7 @@ badframe: | |||
126 | force_sig(SIGSEGV, current); | 130 | force_sig(SIGSEGV, current); |
127 | } | 131 | } |
128 | 132 | ||
129 | void setup_rt_frame_n32(struct k_sigaction * ka, | 133 | int setup_rt_frame_n32(struct k_sigaction * ka, |
130 | struct pt_regs *regs, int signr, sigset_t *set, siginfo_t *info) | 134 | struct pt_regs *regs, int signr, sigset_t *set, siginfo_t *info) |
131 | { | 135 | { |
132 | struct rt_sigframe_n32 *frame; | 136 | struct rt_sigframe_n32 *frame; |
@@ -137,17 +141,7 @@ void setup_rt_frame_n32(struct k_sigaction * ka, | |||
137 | if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) | 141 | if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) |
138 | goto give_sigsegv; | 142 | goto give_sigsegv; |
139 | 143 | ||
140 | /* | 144 | install_sigtramp(frame->rs_code, __NR_N32_rt_sigreturn); |
141 | * Set up the return code ... | ||
142 | * | ||
143 | * li v0, __NR_rt_sigreturn | ||
144 | * syscall | ||
145 | */ | ||
146 | if (PLAT_TRAMPOLINE_STUFF_LINE) | ||
147 | __clear_user(frame->rs_code, PLAT_TRAMPOLINE_STUFF_LINE); | ||
148 | err |= __put_user(0x24020000 + __NR_N32_rt_sigreturn, frame->rs_code + 0); | ||
149 | err |= __put_user(0x0000000c , frame->rs_code + 1); | ||
150 | flush_cache_sigtramp((unsigned long) frame->rs_code); | ||
151 | 145 | ||
152 | /* Create siginfo. */ | 146 | /* Create siginfo. */ |
153 | err |= copy_siginfo_to_user(&frame->rs_info, info); | 147 | err |= copy_siginfo_to_user(&frame->rs_info, info); |
@@ -190,8 +184,9 @@ void setup_rt_frame_n32(struct k_sigaction * ka, | |||
190 | current->comm, current->pid, | 184 | current->comm, current->pid, |
191 | frame, regs->cp0_epc, regs->regs[31]); | 185 | frame, regs->cp0_epc, regs->regs[31]); |
192 | #endif | 186 | #endif |
193 | return; | 187 | return 1; |
194 | 188 | ||
195 | give_sigsegv: | 189 | give_sigsegv: |
196 | force_sigsegv(signr, current); | 190 | force_sigsegv(signr, current); |
191 | return 0; | ||
197 | } | 192 | } |
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index af5cd3b8a396..fcacf1aae98a 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c | |||
@@ -50,7 +50,6 @@ static void smp_tune_scheduling (void) | |||
50 | { | 50 | { |
51 | struct cache_desc *cd = ¤t_cpu_data.scache; | 51 | struct cache_desc *cd = ¤t_cpu_data.scache; |
52 | unsigned long cachesize; /* kB */ | 52 | unsigned long cachesize; /* kB */ |
53 | unsigned long bandwidth = 350; /* MB/s */ | ||
54 | unsigned long cpu_khz; | 53 | unsigned long cpu_khz; |
55 | 54 | ||
56 | /* | 55 | /* |
@@ -121,7 +120,19 @@ struct call_data_struct *call_data; | |||
121 | * or are or have executed. | 120 | * or are or have executed. |
122 | * | 121 | * |
123 | * You must not call this function with disabled interrupts or from a | 122 | * You must not call this function with disabled interrupts or from a |
124 | * hardware interrupt handler or from a bottom half handler. | 123 | * hardware interrupt handler or from a bottom half handler: |
124 | * | ||
125 | * CPU A CPU B | ||
126 | * Disable interrupts | ||
127 | * smp_call_function() | ||
128 | * Take call_lock | ||
129 | * Send IPIs | ||
130 | * Wait for all cpus to acknowledge IPI | ||
131 | * CPU A has not responded, spin waiting | ||
132 | * for cpu A to respond, holding call_lock | ||
133 | * smp_call_function() | ||
134 | * Spin waiting for call_lock | ||
135 | * Deadlock Deadlock | ||
125 | */ | 136 | */ |
126 | int smp_call_function (void (*func) (void *info), void *info, int retry, | 137 | int smp_call_function (void (*func) (void *info), void *info, int retry, |
127 | int wait) | 138 | int wait) |
@@ -130,6 +141,11 @@ int smp_call_function (void (*func) (void *info), void *info, int retry, | |||
130 | int i, cpus = num_online_cpus() - 1; | 141 | int i, cpus = num_online_cpus() - 1; |
131 | int cpu = smp_processor_id(); | 142 | int cpu = smp_processor_id(); |
132 | 143 | ||
144 | /* | ||
145 | * Can die spectacularly if this CPU isn't yet marked online | ||
146 | */ | ||
147 | BUG_ON(!cpu_online(cpu)); | ||
148 | |||
133 | if (!cpus) | 149 | if (!cpus) |
134 | return 0; | 150 | return 0; |
135 | 151 | ||
@@ -214,7 +230,6 @@ void __init smp_cpus_done(unsigned int max_cpus) | |||
214 | /* called from main before smp_init() */ | 230 | /* called from main before smp_init() */ |
215 | void __init smp_prepare_cpus(unsigned int max_cpus) | 231 | void __init smp_prepare_cpus(unsigned int max_cpus) |
216 | { | 232 | { |
217 | cpu_data[0].udelay_val = loops_per_jiffy; | ||
218 | init_new_context(current, &init_mm); | 233 | init_new_context(current, &init_mm); |
219 | current_thread_info()->cpu = 0; | 234 | current_thread_info()->cpu = 0; |
220 | smp_tune_scheduling(); | 235 | smp_tune_scheduling(); |
@@ -236,23 +251,28 @@ void __devinit smp_prepare_boot_cpu(void) | |||
236 | } | 251 | } |
237 | 252 | ||
238 | /* | 253 | /* |
239 | * Startup the CPU with this logical number | 254 | * Called once for each "cpu_possible(cpu)". Needs to spin up the cpu |
255 | * and keep control until "cpu_online(cpu)" is set. Note: cpu is | ||
256 | * physical, not logical. | ||
240 | */ | 257 | */ |
241 | static int __init do_boot_cpu(int cpu) | 258 | int __devinit __cpu_up(unsigned int cpu) |
242 | { | 259 | { |
243 | struct task_struct *idle; | 260 | struct task_struct *idle; |
244 | 261 | ||
245 | /* | 262 | /* |
263 | * Processor goes to start_secondary(), sets online flag | ||
246 | * The following code is purely to make sure | 264 | * The following code is purely to make sure |
247 | * Linux can schedule processes on this slave. | 265 | * Linux can schedule processes on this slave. |
248 | */ | 266 | */ |
249 | idle = fork_idle(cpu); | 267 | idle = fork_idle(cpu); |
250 | if (IS_ERR(idle)) | 268 | if (IS_ERR(idle)) |
251 | panic("failed fork for CPU %d\n", cpu); | 269 | panic(KERN_ERR "Fork failed for CPU %d", cpu); |
252 | 270 | ||
253 | prom_boot_secondary(cpu, idle); | 271 | prom_boot_secondary(cpu, idle); |
254 | 272 | ||
255 | /* XXXKW timeout */ | 273 | /* |
274 | * Trust is futile. We should really have timeouts ... | ||
275 | */ | ||
256 | while (!cpu_isset(cpu, cpu_callin_map)) | 276 | while (!cpu_isset(cpu, cpu_callin_map)) |
257 | udelay(100); | 277 | udelay(100); |
258 | 278 | ||
@@ -261,23 +281,6 @@ static int __init do_boot_cpu(int cpu) | |||
261 | return 0; | 281 | return 0; |
262 | } | 282 | } |
263 | 283 | ||
264 | /* | ||
265 | * Called once for each "cpu_possible(cpu)". Needs to spin up the cpu | ||
266 | * and keep control until "cpu_online(cpu)" is set. Note: cpu is | ||
267 | * physical, not logical. | ||
268 | */ | ||
269 | int __devinit __cpu_up(unsigned int cpu) | ||
270 | { | ||
271 | int ret; | ||
272 | |||
273 | /* Processor goes to start_secondary(), sets online flag */ | ||
274 | ret = do_boot_cpu(cpu); | ||
275 | if (ret < 0) | ||
276 | return ret; | ||
277 | |||
278 | return 0; | ||
279 | } | ||
280 | |||
281 | /* Not really SMP stuff ... */ | 284 | /* Not really SMP stuff ... */ |
282 | int setup_profiling_timer(unsigned int multiplier) | 285 | int setup_profiling_timer(unsigned int multiplier) |
283 | { | 286 | { |
diff --git a/arch/mips/kernel/smp_mt.c b/arch/mips/kernel/smp_mt.c new file mode 100644 index 000000000000..d429544ba4bc --- /dev/null +++ b/arch/mips/kernel/smp_mt.c | |||
@@ -0,0 +1,366 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved. | ||
3 | * | ||
4 | * Elizabeth Clarke (beth@mips.com) | ||
5 | * | ||
6 | * This program is free software; you can distribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License (Version 2) as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
13 | * for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License along | ||
16 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
17 | * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. | ||
18 | * | ||
19 | */ | ||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/sched.h> | ||
22 | #include <linux/cpumask.h> | ||
23 | #include <linux/interrupt.h> | ||
24 | #include <linux/compiler.h> | ||
25 | |||
26 | #include <asm/atomic.h> | ||
27 | #include <asm/cpu.h> | ||
28 | #include <asm/processor.h> | ||
29 | #include <asm/system.h> | ||
30 | #include <asm/hardirq.h> | ||
31 | #include <asm/mmu_context.h> | ||
32 | #include <asm/smp.h> | ||
33 | #include <asm/time.h> | ||
34 | #include <asm/mipsregs.h> | ||
35 | #include <asm/mipsmtregs.h> | ||
36 | #include <asm/cacheflush.h> | ||
37 | #include <asm/mips-boards/maltaint.h> | ||
38 | |||
39 | #define MIPS_CPU_IPI_RESCHED_IRQ 0 | ||
40 | #define MIPS_CPU_IPI_CALL_IRQ 1 | ||
41 | |||
42 | static int cpu_ipi_resched_irq, cpu_ipi_call_irq; | ||
43 | |||
44 | #if 0 | ||
45 | static void dump_mtregisters(int vpe, int tc) | ||
46 | { | ||
47 | printk("vpe %d tc %d\n", vpe, tc); | ||
48 | |||
49 | settc(tc); | ||
50 | |||
51 | printk(" c0 status 0x%lx\n", read_vpe_c0_status()); | ||
52 | printk(" vpecontrol 0x%lx\n", read_vpe_c0_vpecontrol()); | ||
53 | printk(" vpeconf0 0x%lx\n", read_vpe_c0_vpeconf0()); | ||
54 | printk(" tcstatus 0x%lx\n", read_tc_c0_tcstatus()); | ||
55 | printk(" tcrestart 0x%lx\n", read_tc_c0_tcrestart()); | ||
56 | printk(" tcbind 0x%lx\n", read_tc_c0_tcbind()); | ||
57 | printk(" tchalt 0x%lx\n", read_tc_c0_tchalt()); | ||
58 | } | ||
59 | #endif | ||
60 | |||
61 | void __init sanitize_tlb_entries(void) | ||
62 | { | ||
63 | int i, tlbsiz; | ||
64 | unsigned long mvpconf0, ncpu; | ||
65 | |||
66 | if (!cpu_has_mipsmt) | ||
67 | return; | ||
68 | |||
69 | set_c0_mvpcontrol(MVPCONTROL_VPC); | ||
70 | |||
71 | /* Disable TLB sharing */ | ||
72 | clear_c0_mvpcontrol(MVPCONTROL_STLB); | ||
73 | |||
74 | mvpconf0 = read_c0_mvpconf0(); | ||
75 | |||
76 | printk(KERN_INFO "MVPConf0 0x%lx TLBS %lx PTLBE %ld\n", mvpconf0, | ||
77 | (mvpconf0 & MVPCONF0_TLBS) >> MVPCONF0_TLBS_SHIFT, | ||
78 | (mvpconf0 & MVPCONF0_PTLBE) >> MVPCONF0_PTLBE_SHIFT); | ||
79 | |||
80 | tlbsiz = (mvpconf0 & MVPCONF0_PTLBE) >> MVPCONF0_PTLBE_SHIFT; | ||
81 | ncpu = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; | ||
82 | |||
83 | printk(" tlbsiz %d ncpu %ld\n", tlbsiz, ncpu); | ||
84 | |||
85 | if (tlbsiz > 0) { | ||
86 | /* share them out across the vpe's */ | ||
87 | tlbsiz /= ncpu; | ||
88 | |||
89 | printk(KERN_INFO "setting Config1.MMU_size to %d\n", tlbsiz); | ||
90 | |||
91 | for (i = 0; i < ncpu; i++) { | ||
92 | settc(i); | ||
93 | |||
94 | if (i == 0) | ||
95 | write_c0_config1((read_c0_config1() & ~(0x3f << 25)) | (tlbsiz << 25)); | ||
96 | else | ||
97 | write_vpe_c0_config1((read_vpe_c0_config1() & ~(0x3f << 25)) | | ||
98 | (tlbsiz << 25)); | ||
99 | } | ||
100 | } | ||
101 | |||
102 | clear_c0_mvpcontrol(MVPCONTROL_VPC); | ||
103 | } | ||
104 | |||
105 | #if 0 | ||
106 | /* | ||
107 | * Use c0_MVPConf0 to find out how many CPUs are available, setting up | ||
108 | * phys_cpu_present_map and the logical/physical mappings. | ||
109 | */ | ||
110 | void __init prom_build_cpu_map(void) | ||
111 | { | ||
112 | int i, num, ncpus; | ||
113 | |||
114 | cpus_clear(phys_cpu_present_map); | ||
115 | |||
116 | /* assume we boot on cpu 0.... */ | ||
117 | cpu_set(0, phys_cpu_present_map); | ||
118 | __cpu_number_map[0] = 0; | ||
119 | __cpu_logical_map[0] = 0; | ||
120 | |||
121 | if (cpu_has_mipsmt) { | ||
122 | ncpus = ((read_c0_mvpconf0() & (MVPCONF0_PVPE)) >> MVPCONF0_PVPE_SHIFT) + 1; | ||
123 | for (i=1, num=0; i< NR_CPUS && i<ncpus; i++) { | ||
124 | cpu_set(i, phys_cpu_present_map); | ||
125 | __cpu_number_map[i] = ++num; | ||
126 | __cpu_logical_map[num] = i; | ||
127 | } | ||
128 | |||
129 | printk(KERN_INFO "%i available secondary CPU(s)\n", num); | ||
130 | } | ||
131 | } | ||
132 | #endif | ||
133 | |||
134 | static void ipi_resched_dispatch (struct pt_regs *regs) | ||
135 | { | ||
136 | do_IRQ(MIPS_CPU_IPI_RESCHED_IRQ, regs); | ||
137 | } | ||
138 | |||
139 | static void ipi_call_dispatch (struct pt_regs *regs) | ||
140 | { | ||
141 | do_IRQ(MIPS_CPU_IPI_CALL_IRQ, regs); | ||
142 | } | ||
143 | |||
144 | irqreturn_t ipi_resched_interrupt(int irq, void *dev_id, struct pt_regs *regs) | ||
145 | { | ||
146 | return IRQ_HANDLED; | ||
147 | } | ||
148 | |||
149 | irqreturn_t ipi_call_interrupt(int irq, void *dev_id, struct pt_regs *regs) | ||
150 | { | ||
151 | smp_call_function_interrupt(); | ||
152 | |||
153 | return IRQ_HANDLED; | ||
154 | } | ||
155 | |||
156 | static struct irqaction irq_resched = { | ||
157 | .handler = ipi_resched_interrupt, | ||
158 | .flags = SA_INTERRUPT, | ||
159 | .name = "IPI_resched" | ||
160 | }; | ||
161 | |||
162 | static struct irqaction irq_call = { | ||
163 | .handler = ipi_call_interrupt, | ||
164 | .flags = SA_INTERRUPT, | ||
165 | .name = "IPI_call" | ||
166 | }; | ||
167 | |||
168 | /* | ||
169 | * Common setup before any secondaries are started | ||
170 | * Make sure all CPU's are in a sensible state before we boot any of the | ||
171 | * secondarys | ||
172 | */ | ||
173 | void prom_prepare_cpus(unsigned int max_cpus) | ||
174 | { | ||
175 | unsigned long val; | ||
176 | int i, num; | ||
177 | |||
178 | if (!cpu_has_mipsmt) | ||
179 | return; | ||
180 | |||
181 | /* disable MT so we can configure */ | ||
182 | dvpe(); | ||
183 | dmt(); | ||
184 | |||
185 | /* Put MVPE's into 'configuration state' */ | ||
186 | set_c0_mvpcontrol(MVPCONTROL_VPC); | ||
187 | |||
188 | val = read_c0_mvpconf0(); | ||
189 | |||
190 | /* we'll always have more TC's than VPE's, so loop setting everything | ||
191 | to a sensible state */ | ||
192 | for (i = 0, num = 0; i <= ((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT); i++) { | ||
193 | settc(i); | ||
194 | |||
195 | /* VPE's */ | ||
196 | if (i <= ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)) { | ||
197 | |||
198 | /* deactivate all but vpe0 */ | ||
199 | if (i != 0) { | ||
200 | unsigned long tmp = read_vpe_c0_vpeconf0(); | ||
201 | |||
202 | tmp &= ~VPECONF0_VPA; | ||
203 | |||
204 | /* master VPE */ | ||
205 | tmp |= VPECONF0_MVP; | ||
206 | write_vpe_c0_vpeconf0(tmp); | ||
207 | |||
208 | /* Record this as available CPU */ | ||
209 | if (i < max_cpus) { | ||
210 | cpu_set(i, phys_cpu_present_map); | ||
211 | __cpu_number_map[i] = ++num; | ||
212 | __cpu_logical_map[num] = i; | ||
213 | } | ||
214 | } | ||
215 | |||
216 | /* disable multi-threading with TC's */ | ||
217 | write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE); | ||
218 | |||
219 | if (i != 0) { | ||
220 | write_vpe_c0_status((read_c0_status() & ~(ST0_IM | ST0_IE | ST0_KSU)) | ST0_CU0); | ||
221 | write_vpe_c0_cause(read_vpe_c0_cause() & ~CAUSEF_IP); | ||
222 | |||
223 | /* set config to be the same as vpe0, particularly kseg0 coherency alg */ | ||
224 | write_vpe_c0_config( read_c0_config()); | ||
225 | } | ||
226 | |||
227 | } | ||
228 | |||
229 | /* TC's */ | ||
230 | |||
231 | if (i != 0) { | ||
232 | unsigned long tmp; | ||
233 | |||
234 | /* bind a TC to each VPE, May as well put all excess TC's | ||
235 | on the last VPE */ | ||
236 | if ( i >= (((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)+1) ) | ||
237 | write_tc_c0_tcbind(read_tc_c0_tcbind() | ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) ); | ||
238 | else { | ||
239 | write_tc_c0_tcbind( read_tc_c0_tcbind() | i); | ||
240 | |||
241 | /* and set XTC */ | ||
242 | write_vpe_c0_vpeconf0( read_vpe_c0_vpeconf0() | (i << VPECONF0_XTC_SHIFT)); | ||
243 | } | ||
244 | |||
245 | tmp = read_tc_c0_tcstatus(); | ||
246 | |||
247 | /* mark not allocated and not dynamically allocatable */ | ||
248 | tmp &= ~(TCSTATUS_A | TCSTATUS_DA); | ||
249 | tmp |= TCSTATUS_IXMT; /* interrupt exempt */ | ||
250 | write_tc_c0_tcstatus(tmp); | ||
251 | |||
252 | write_tc_c0_tchalt(TCHALT_H); | ||
253 | } | ||
254 | } | ||
255 | |||
256 | /* Release config state */ | ||
257 | clear_c0_mvpcontrol(MVPCONTROL_VPC); | ||
258 | |||
259 | /* We'll wait until starting the secondaries before starting MVPE */ | ||
260 | |||
261 | printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num); | ||
262 | |||
263 | /* set up ipi interrupts */ | ||
264 | if (cpu_has_vint) { | ||
265 | set_vi_handler (MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch); | ||
266 | set_vi_handler (MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch); | ||
267 | } | ||
268 | |||
269 | cpu_ipi_resched_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_RESCHED_IRQ; | ||
270 | cpu_ipi_call_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_CALL_IRQ; | ||
271 | |||
272 | setup_irq(cpu_ipi_resched_irq, &irq_resched); | ||
273 | setup_irq(cpu_ipi_call_irq, &irq_call); | ||
274 | |||
275 | /* need to mark IPI's as IRQ_PER_CPU */ | ||
276 | irq_desc[cpu_ipi_resched_irq].status |= IRQ_PER_CPU; | ||
277 | irq_desc[cpu_ipi_call_irq].status |= IRQ_PER_CPU; | ||
278 | } | ||
279 | |||
280 | /* | ||
281 | * Setup the PC, SP, and GP of a secondary processor and start it | ||
282 | * running! | ||
283 | * smp_bootstrap is the place to resume from | ||
284 | * __KSTK_TOS(idle) is apparently the stack pointer | ||
285 | * (unsigned long)idle->thread_info the gp | ||
286 | * assumes a 1:1 mapping of TC => VPE | ||
287 | */ | ||
288 | void prom_boot_secondary(int cpu, struct task_struct *idle) | ||
289 | { | ||
290 | dvpe(); | ||
291 | set_c0_mvpcontrol(MVPCONTROL_VPC); | ||
292 | |||
293 | settc(cpu); | ||
294 | |||
295 | /* restart */ | ||
296 | write_tc_c0_tcrestart((unsigned long)&smp_bootstrap); | ||
297 | |||
298 | /* enable the tc this vpe/cpu will be running */ | ||
299 | write_tc_c0_tcstatus((read_tc_c0_tcstatus() & ~TCSTATUS_IXMT) | TCSTATUS_A); | ||
300 | |||
301 | write_tc_c0_tchalt(0); | ||
302 | |||
303 | /* enable the VPE */ | ||
304 | write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA); | ||
305 | |||
306 | /* stack pointer */ | ||
307 | write_tc_gpr_sp( __KSTK_TOS(idle)); | ||
308 | |||
309 | /* global pointer */ | ||
310 | write_tc_gpr_gp((unsigned long)idle->thread_info); | ||
311 | |||
312 | flush_icache_range((unsigned long)idle->thread_info, | ||
313 | (unsigned long)idle->thread_info + | ||
314 | sizeof(struct thread_info)); | ||
315 | |||
316 | /* finally out of configuration and into chaos */ | ||
317 | clear_c0_mvpcontrol(MVPCONTROL_VPC); | ||
318 | |||
319 | evpe(EVPE_ENABLE); | ||
320 | } | ||
321 | |||
322 | void prom_init_secondary(void) | ||
323 | { | ||
324 | write_c0_status((read_c0_status() & ~ST0_IM ) | | ||
325 | (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP7)); | ||
326 | } | ||
327 | |||
328 | void prom_smp_finish(void) | ||
329 | { | ||
330 | write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ)); | ||
331 | |||
332 | local_irq_enable(); | ||
333 | } | ||
334 | |||
335 | void prom_cpus_done(void) | ||
336 | { | ||
337 | } | ||
338 | |||
339 | void core_send_ipi(int cpu, unsigned int action) | ||
340 | { | ||
341 | int i; | ||
342 | unsigned long flags; | ||
343 | int vpflags; | ||
344 | |||
345 | local_irq_save (flags); | ||
346 | |||
347 | vpflags = dvpe(); /* cant access the other CPU's registers whilst MVPE enabled */ | ||
348 | |||
349 | switch (action) { | ||
350 | case SMP_CALL_FUNCTION: | ||
351 | i = C_SW1; | ||
352 | break; | ||
353 | |||
354 | case SMP_RESCHEDULE_YOURSELF: | ||
355 | default: | ||
356 | i = C_SW0; | ||
357 | break; | ||
358 | } | ||
359 | |||
360 | /* 1:1 mapping of vpe and tc... */ | ||
361 | settc(cpu); | ||
362 | write_vpe_c0_cause(read_vpe_c0_cause() | i); | ||
363 | evpe(vpflags); | ||
364 | |||
365 | local_irq_restore(flags); | ||
366 | } | ||
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index 21e3e13a4b44..ee98eeb65e85 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c | |||
@@ -7,6 +7,7 @@ | |||
7 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. | 7 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. |
8 | * Copyright (C) 2001 MIPS Technologies, Inc. | 8 | * Copyright (C) 2001 MIPS Technologies, Inc. |
9 | */ | 9 | */ |
10 | #include <linux/config.h> | ||
10 | #include <linux/a.out.h> | 11 | #include <linux/a.out.h> |
11 | #include <linux/errno.h> | 12 | #include <linux/errno.h> |
12 | #include <linux/linkage.h> | 13 | #include <linux/linkage.h> |
@@ -26,6 +27,7 @@ | |||
26 | #include <linux/msg.h> | 27 | #include <linux/msg.h> |
27 | #include <linux/shm.h> | 28 | #include <linux/shm.h> |
28 | #include <linux/compiler.h> | 29 | #include <linux/compiler.h> |
30 | #include <linux/module.h> | ||
29 | 31 | ||
30 | #include <asm/branch.h> | 32 | #include <asm/branch.h> |
31 | #include <asm/cachectl.h> | 33 | #include <asm/cachectl.h> |
@@ -56,6 +58,8 @@ out: | |||
56 | 58 | ||
57 | unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ | 59 | unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ |
58 | 60 | ||
61 | EXPORT_SYMBOL(shm_align_mask); | ||
62 | |||
59 | #define COLOUR_ALIGN(addr,pgoff) \ | 63 | #define COLOUR_ALIGN(addr,pgoff) \ |
60 | ((((addr) + shm_align_mask) & ~shm_align_mask) + \ | 64 | ((((addr) + shm_align_mask) & ~shm_align_mask) + \ |
61 | (((pgoff) << PAGE_SHIFT) & shm_align_mask)) | 65 | (((pgoff) << PAGE_SHIFT) & shm_align_mask)) |
@@ -173,14 +177,28 @@ _sys_clone(nabi_no_regargs struct pt_regs regs) | |||
173 | { | 177 | { |
174 | unsigned long clone_flags; | 178 | unsigned long clone_flags; |
175 | unsigned long newsp; | 179 | unsigned long newsp; |
176 | int *parent_tidptr, *child_tidptr; | 180 | int __user *parent_tidptr, *child_tidptr; |
177 | 181 | ||
178 | clone_flags = regs.regs[4]; | 182 | clone_flags = regs.regs[4]; |
179 | newsp = regs.regs[5]; | 183 | newsp = regs.regs[5]; |
180 | if (!newsp) | 184 | if (!newsp) |
181 | newsp = regs.regs[29]; | 185 | newsp = regs.regs[29]; |
182 | parent_tidptr = (int *) regs.regs[6]; | 186 | parent_tidptr = (int __user *) regs.regs[6]; |
183 | child_tidptr = (int *) regs.regs[7]; | 187 | #ifdef CONFIG_32BIT |
188 | /* We need to fetch the fifth argument off the stack. */ | ||
189 | child_tidptr = NULL; | ||
190 | if (clone_flags & (CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)) { | ||
191 | int __user *__user *usp = (int __user *__user *) regs.regs[29]; | ||
192 | if (regs.regs[2] == __NR_syscall) { | ||
193 | if (get_user (child_tidptr, &usp[5])) | ||
194 | return -EFAULT; | ||
195 | } | ||
196 | else if (get_user (child_tidptr, &usp[4])) | ||
197 | return -EFAULT; | ||
198 | } | ||
199 | #else | ||
200 | child_tidptr = (int __user *) regs.regs[8]; | ||
201 | #endif | ||
184 | return do_fork(clone_flags, newsp, ®s, 0, | 202 | return do_fork(clone_flags, newsp, ®s, 0, |
185 | parent_tidptr, child_tidptr); | 203 | parent_tidptr, child_tidptr); |
186 | } | 204 | } |
@@ -242,6 +260,16 @@ asmlinkage int sys_olduname(struct oldold_utsname * name) | |||
242 | return error; | 260 | return error; |
243 | } | 261 | } |
244 | 262 | ||
263 | void sys_set_thread_area(unsigned long addr) | ||
264 | { | ||
265 | struct thread_info *ti = current->thread_info; | ||
266 | |||
267 | ti->tp_value = addr; | ||
268 | |||
269 | /* If some future MIPS implementation has this register in hardware, | ||
270 | * we will need to update it here (and in context switches). */ | ||
271 | } | ||
272 | |||
245 | asmlinkage int _sys_sysmips(int cmd, long arg1, int arg2, int arg3) | 273 | asmlinkage int _sys_sysmips(int cmd, long arg1, int arg2, int arg3) |
246 | { | 274 | { |
247 | int tmp, len; | 275 | int tmp, len; |
diff --git a/arch/mips/kernel/sysirix.c b/arch/mips/kernel/sysirix.c index 7ae4af476974..52924f8ce23c 100644 --- a/arch/mips/kernel/sysirix.c +++ b/arch/mips/kernel/sysirix.c | |||
@@ -73,32 +73,30 @@ asmlinkage int irix_sysmp(struct pt_regs *regs) | |||
73 | } | 73 | } |
74 | 74 | ||
75 | /* The prctl commands. */ | 75 | /* The prctl commands. */ |
76 | #define PR_MAXPROCS 1 /* Tasks/user. */ | 76 | #define PR_MAXPROCS 1 /* Tasks/user. */ |
77 | #define PR_ISBLOCKED 2 /* If blocked, return 1. */ | 77 | #define PR_ISBLOCKED 2 /* If blocked, return 1. */ |
78 | #define PR_SETSTACKSIZE 3 /* Set largest task stack size. */ | 78 | #define PR_SETSTACKSIZE 3 /* Set largest task stack size. */ |
79 | #define PR_GETSTACKSIZE 4 /* Get largest task stack size. */ | 79 | #define PR_GETSTACKSIZE 4 /* Get largest task stack size. */ |
80 | #define PR_MAXPPROCS 5 /* Num parallel tasks. */ | 80 | #define PR_MAXPPROCS 5 /* Num parallel tasks. */ |
81 | #define PR_UNBLKONEXEC 6 /* When task exec/exit's, unblock. */ | 81 | #define PR_UNBLKONEXEC 6 /* When task exec/exit's, unblock. */ |
82 | #define PR_SETEXITSIG 8 /* When task exit's, set signal. */ | 82 | #define PR_SETEXITSIG 8 /* When task exit's, set signal. */ |
83 | #define PR_RESIDENT 9 /* Make task unswappable. */ | 83 | #define PR_RESIDENT 9 /* Make task unswappable. */ |
84 | #define PR_ATTACHADDR 10 /* (Re-)Connect a vma to a task. */ | 84 | #define PR_ATTACHADDR 10 /* (Re-)Connect a vma to a task. */ |
85 | #define PR_DETACHADDR 11 /* Disconnect a vma from a task. */ | 85 | #define PR_DETACHADDR 11 /* Disconnect a vma from a task. */ |
86 | #define PR_TERMCHILD 12 /* When parent sleeps with fishes, kill child. */ | 86 | #define PR_TERMCHILD 12 /* Kill child if the parent dies. */ |
87 | #define PR_GETSHMASK 13 /* Get the sproc() share mask. */ | 87 | #define PR_GETSHMASK 13 /* Get the sproc() share mask. */ |
88 | #define PR_GETNSHARE 14 /* Number of share group members. */ | 88 | #define PR_GETNSHARE 14 /* Number of share group members. */ |
89 | #define PR_COREPID 15 /* Add task pid to name when it core. */ | 89 | #define PR_COREPID 15 /* Add task pid to name when it core. */ |
90 | #define PR_ATTACHADDRPERM 16 /* (Re-)Connect vma, with specified prot. */ | 90 | #define PR_ATTACHADDRPERM 16 /* (Re-)Connect vma, with specified prot. */ |
91 | #define PR_PTHREADEXIT 17 /* Kill a pthread without prejudice. */ | 91 | #define PR_PTHREADEXIT 17 /* Kill a pthread, only for IRIX 6.[234] */ |
92 | 92 | ||
93 | asmlinkage int irix_prctl(struct pt_regs *regs) | 93 | asmlinkage int irix_prctl(unsigned option, ...) |
94 | { | 94 | { |
95 | unsigned long cmd; | 95 | va_list args; |
96 | int error = 0, base = 0; | 96 | int error = 0; |
97 | 97 | ||
98 | if (regs->regs[2] == 1000) | 98 | va_start(args, option); |
99 | base = 1; | 99 | switch (option) { |
100 | cmd = regs->regs[base + 4]; | ||
101 | switch (cmd) { | ||
102 | case PR_MAXPROCS: | 100 | case PR_MAXPROCS: |
103 | printk("irix_prctl[%s:%d]: Wants PR_MAXPROCS\n", | 101 | printk("irix_prctl[%s:%d]: Wants PR_MAXPROCS\n", |
104 | current->comm, current->pid); | 102 | current->comm, current->pid); |
@@ -111,7 +109,7 @@ asmlinkage int irix_prctl(struct pt_regs *regs) | |||
111 | printk("irix_prctl[%s:%d]: Wants PR_ISBLOCKED\n", | 109 | printk("irix_prctl[%s:%d]: Wants PR_ISBLOCKED\n", |
112 | current->comm, current->pid); | 110 | current->comm, current->pid); |
113 | read_lock(&tasklist_lock); | 111 | read_lock(&tasklist_lock); |
114 | task = find_task_by_pid(regs->regs[base + 5]); | 112 | task = find_task_by_pid(va_arg(args, pid_t)); |
115 | error = -ESRCH; | 113 | error = -ESRCH; |
116 | if (error) | 114 | if (error) |
117 | error = (task->run_list.next != NULL); | 115 | error = (task->run_list.next != NULL); |
@@ -121,7 +119,7 @@ asmlinkage int irix_prctl(struct pt_regs *regs) | |||
121 | } | 119 | } |
122 | 120 | ||
123 | case PR_SETSTACKSIZE: { | 121 | case PR_SETSTACKSIZE: { |
124 | long value = regs->regs[base + 5]; | 122 | long value = va_arg(args, long); |
125 | 123 | ||
126 | printk("irix_prctl[%s:%d]: Wants PR_SETSTACKSIZE<%08lx>\n", | 124 | printk("irix_prctl[%s:%d]: Wants PR_SETSTACKSIZE<%08lx>\n", |
127 | current->comm, current->pid, (unsigned long) value); | 125 | current->comm, current->pid, (unsigned long) value); |
@@ -222,24 +220,20 @@ asmlinkage int irix_prctl(struct pt_regs *regs) | |||
222 | error = -EINVAL; | 220 | error = -EINVAL; |
223 | break; | 221 | break; |
224 | 222 | ||
225 | case PR_PTHREADEXIT: | ||
226 | printk("irix_prctl[%s:%d]: Wants PR_PTHREADEXIT\n", | ||
227 | current->comm, current->pid); | ||
228 | do_exit(regs->regs[base + 5]); | ||
229 | |||
230 | default: | 223 | default: |
231 | printk("irix_prctl[%s:%d]: Non-existant opcode %d\n", | 224 | printk("irix_prctl[%s:%d]: Non-existant opcode %d\n", |
232 | current->comm, current->pid, (int)cmd); | 225 | current->comm, current->pid, option); |
233 | error = -EINVAL; | 226 | error = -EINVAL; |
234 | break; | 227 | break; |
235 | } | 228 | } |
229 | va_end(args); | ||
236 | 230 | ||
237 | return error; | 231 | return error; |
238 | } | 232 | } |
239 | 233 | ||
240 | #undef DEBUG_PROCGRPS | 234 | #undef DEBUG_PROCGRPS |
241 | 235 | ||
242 | extern unsigned long irix_mapelf(int fd, struct elf_phdr *user_phdrp, int cnt); | 236 | extern unsigned long irix_mapelf(int fd, struct elf_phdr __user *user_phdrp, int cnt); |
243 | extern int getrusage(struct task_struct *p, int who, struct rusage __user *ru); | 237 | extern int getrusage(struct task_struct *p, int who, struct rusage __user *ru); |
244 | extern char *prom_getenv(char *name); | 238 | extern char *prom_getenv(char *name); |
245 | extern long prom_setenv(char *name, char *value); | 239 | extern long prom_setenv(char *name, char *value); |
@@ -276,23 +270,19 @@ asmlinkage int irix_syssgi(struct pt_regs *regs) | |||
276 | cmd = regs->regs[base + 4]; | 270 | cmd = regs->regs[base + 4]; |
277 | switch(cmd) { | 271 | switch(cmd) { |
278 | case SGI_SYSID: { | 272 | case SGI_SYSID: { |
279 | char *buf = (char *) regs->regs[base + 5]; | 273 | char __user *buf = (char __user *) regs->regs[base + 5]; |
280 | 274 | ||
281 | /* XXX Use ethernet addr.... */ | 275 | /* XXX Use ethernet addr.... */ |
282 | retval = clear_user(buf, 64); | 276 | retval = clear_user(buf, 64) ? -EFAULT : 0; |
283 | break; | 277 | break; |
284 | } | 278 | } |
285 | #if 0 | 279 | #if 0 |
286 | case SGI_RDNAME: { | 280 | case SGI_RDNAME: { |
287 | int pid = (int) regs->regs[base + 5]; | 281 | int pid = (int) regs->regs[base + 5]; |
288 | char *buf = (char *) regs->regs[base + 6]; | 282 | char __user *buf = (char __user *) regs->regs[base + 6]; |
289 | struct task_struct *p; | 283 | struct task_struct *p; |
290 | char tcomm[sizeof(current->comm)]; | 284 | char tcomm[sizeof(current->comm)]; |
291 | 285 | ||
292 | if (!access_ok(VERIFY_WRITE, buf, sizeof(tcomm))) { | ||
293 | retval = -EFAULT; | ||
294 | break; | ||
295 | } | ||
296 | read_lock(&tasklist_lock); | 286 | read_lock(&tasklist_lock); |
297 | p = find_task_by_pid(pid); | 287 | p = find_task_by_pid(pid); |
298 | if (!p) { | 288 | if (!p) { |
@@ -304,34 +294,28 @@ asmlinkage int irix_syssgi(struct pt_regs *regs) | |||
304 | read_unlock(&tasklist_lock); | 294 | read_unlock(&tasklist_lock); |
305 | 295 | ||
306 | /* XXX Need to check sizes. */ | 296 | /* XXX Need to check sizes. */ |
307 | copy_to_user(buf, tcomm, sizeof(tcomm)); | 297 | retval = copy_to_user(buf, tcomm, sizeof(tcomm)) ? -EFAULT : 0; |
308 | retval = 0; | ||
309 | break; | 298 | break; |
310 | } | 299 | } |
311 | 300 | ||
312 | case SGI_GETNVRAM: { | 301 | case SGI_GETNVRAM: { |
313 | char *name = (char *) regs->regs[base+5]; | 302 | char __user *name = (char __user *) regs->regs[base+5]; |
314 | char *buf = (char *) regs->regs[base+6]; | 303 | char __user *buf = (char __user *) regs->regs[base+6]; |
315 | char *value; | 304 | char *value; |
316 | return -EINVAL; /* til I fix it */ | 305 | return -EINVAL; /* til I fix it */ |
317 | if (!access_ok(VERIFY_WRITE, buf, 128)) { | ||
318 | retval = -EFAULT; | ||
319 | break; | ||
320 | } | ||
321 | value = prom_getenv(name); /* PROM lock? */ | 306 | value = prom_getenv(name); /* PROM lock? */ |
322 | if (!value) { | 307 | if (!value) { |
323 | retval = -EINVAL; | 308 | retval = -EINVAL; |
324 | break; | 309 | break; |
325 | } | 310 | } |
326 | /* Do I strlen() for the length? */ | 311 | /* Do I strlen() for the length? */ |
327 | copy_to_user(buf, value, 128); | 312 | retval = copy_to_user(buf, value, 128) ? -EFAULT : 0; |
328 | retval = 0; | ||
329 | break; | 313 | break; |
330 | } | 314 | } |
331 | 315 | ||
332 | case SGI_SETNVRAM: { | 316 | case SGI_SETNVRAM: { |
333 | char *name = (char *) regs->regs[base+5]; | 317 | char __user *name = (char __user *) regs->regs[base+5]; |
334 | char *value = (char *) regs->regs[base+6]; | 318 | char __user *value = (char __user *) regs->regs[base+6]; |
335 | return -EINVAL; /* til I fix it */ | 319 | return -EINVAL; /* til I fix it */ |
336 | retval = prom_setenv(name, value); | 320 | retval = prom_setenv(name, value); |
337 | /* XXX make sure retval conforms to syssgi(2) */ | 321 | /* XXX make sure retval conforms to syssgi(2) */ |
@@ -407,16 +391,16 @@ asmlinkage int irix_syssgi(struct pt_regs *regs) | |||
407 | 391 | ||
408 | case SGI_SETGROUPS: | 392 | case SGI_SETGROUPS: |
409 | retval = sys_setgroups((int) regs->regs[base + 5], | 393 | retval = sys_setgroups((int) regs->regs[base + 5], |
410 | (gid_t *) regs->regs[base + 6]); | 394 | (gid_t __user *) regs->regs[base + 6]); |
411 | break; | 395 | break; |
412 | 396 | ||
413 | case SGI_GETGROUPS: | 397 | case SGI_GETGROUPS: |
414 | retval = sys_getgroups((int) regs->regs[base + 5], | 398 | retval = sys_getgroups((int) regs->regs[base + 5], |
415 | (gid_t *) regs->regs[base + 6]); | 399 | (gid_t __user *) regs->regs[base + 6]); |
416 | break; | 400 | break; |
417 | 401 | ||
418 | case SGI_RUSAGE: { | 402 | case SGI_RUSAGE: { |
419 | struct rusage *ru = (struct rusage *) regs->regs[base + 6]; | 403 | struct rusage __user *ru = (struct rusage __user *) regs->regs[base + 6]; |
420 | 404 | ||
421 | switch((int) regs->regs[base + 5]) { | 405 | switch((int) regs->regs[base + 5]) { |
422 | case 0: | 406 | case 0: |
@@ -453,7 +437,7 @@ asmlinkage int irix_syssgi(struct pt_regs *regs) | |||
453 | 437 | ||
454 | case SGI_ELFMAP: | 438 | case SGI_ELFMAP: |
455 | retval = irix_mapelf((int) regs->regs[base + 5], | 439 | retval = irix_mapelf((int) regs->regs[base + 5], |
456 | (struct elf_phdr *) regs->regs[base + 6], | 440 | (struct elf_phdr __user *) regs->regs[base + 6], |
457 | (int) regs->regs[base + 7]); | 441 | (int) regs->regs[base + 7]); |
458 | break; | 442 | break; |
459 | 443 | ||
@@ -468,24 +452,24 @@ asmlinkage int irix_syssgi(struct pt_regs *regs) | |||
468 | 452 | ||
469 | case SGI_PHYSP: { | 453 | case SGI_PHYSP: { |
470 | unsigned long addr = regs->regs[base + 5]; | 454 | unsigned long addr = regs->regs[base + 5]; |
471 | int *pageno = (int *) (regs->regs[base + 6]); | 455 | int __user *pageno = (int __user *) (regs->regs[base + 6]); |
472 | struct mm_struct *mm = current->mm; | 456 | struct mm_struct *mm = current->mm; |
473 | pgd_t *pgdp; | 457 | pgd_t *pgdp; |
458 | pud_t *pudp; | ||
474 | pmd_t *pmdp; | 459 | pmd_t *pmdp; |
475 | pte_t *ptep; | 460 | pte_t *ptep; |
476 | 461 | ||
477 | if (!access_ok(VERIFY_WRITE, pageno, sizeof(int))) | ||
478 | return -EFAULT; | ||
479 | |||
480 | down_read(&mm->mmap_sem); | 462 | down_read(&mm->mmap_sem); |
481 | pgdp = pgd_offset(mm, addr); | 463 | pgdp = pgd_offset(mm, addr); |
482 | pmdp = pmd_offset(pgdp, addr); | 464 | pudp = pud_offset(pgdp, addr); |
465 | pmdp = pmd_offset(pudp, addr); | ||
483 | ptep = pte_offset(pmdp, addr); | 466 | ptep = pte_offset(pmdp, addr); |
484 | retval = -EINVAL; | 467 | retval = -EINVAL; |
485 | if (ptep) { | 468 | if (ptep) { |
486 | pte_t pte = *ptep; | 469 | pte_t pte = *ptep; |
487 | 470 | ||
488 | if (pte_val(pte) & (_PAGE_VALID | _PAGE_PRESENT)) { | 471 | if (pte_val(pte) & (_PAGE_VALID | _PAGE_PRESENT)) { |
472 | /* b0rked on 64-bit */ | ||
489 | retval = put_user((pte_val(pte) & PAGE_MASK) >> | 473 | retval = put_user((pte_val(pte) & PAGE_MASK) >> |
490 | PAGE_SHIFT, pageno); | 474 | PAGE_SHIFT, pageno); |
491 | } | 475 | } |
@@ -496,7 +480,7 @@ asmlinkage int irix_syssgi(struct pt_regs *regs) | |||
496 | 480 | ||
497 | case SGI_INVENT: { | 481 | case SGI_INVENT: { |
498 | int arg1 = (int) regs->regs [base + 5]; | 482 | int arg1 = (int) regs->regs [base + 5]; |
499 | void *buffer = (void *) regs->regs [base + 6]; | 483 | void __user *buffer = (void __user *) regs->regs [base + 6]; |
500 | int count = (int) regs->regs [base + 7]; | 484 | int count = (int) regs->regs [base + 7]; |
501 | 485 | ||
502 | switch (arg1) { | 486 | switch (arg1) { |
@@ -692,8 +676,8 @@ asmlinkage int irix_pause(void) | |||
692 | } | 676 | } |
693 | 677 | ||
694 | /* XXX need more than this... */ | 678 | /* XXX need more than this... */ |
695 | asmlinkage int irix_mount(char *dev_name, char *dir_name, unsigned long flags, | 679 | asmlinkage int irix_mount(char __user *dev_name, char __user *dir_name, |
696 | char *type, void *data, int datalen) | 680 | unsigned long flags, char __user *type, void __user *data, int datalen) |
697 | { | 681 | { |
698 | printk("[%s:%d] irix_mount(%p,%p,%08lx,%p,%p,%d)\n", | 682 | printk("[%s:%d] irix_mount(%p,%p,%08lx,%p,%p,%d)\n", |
699 | current->comm, current->pid, | 683 | current->comm, current->pid, |
@@ -708,8 +692,8 @@ struct irix_statfs { | |||
708 | char f_fname[6], f_fpack[6]; | 692 | char f_fname[6], f_fpack[6]; |
709 | }; | 693 | }; |
710 | 694 | ||
711 | asmlinkage int irix_statfs(const char *path, struct irix_statfs *buf, | 695 | asmlinkage int irix_statfs(const char __user *path, |
712 | int len, int fs_type) | 696 | struct irix_statfs __user *buf, int len, int fs_type) |
713 | { | 697 | { |
714 | struct nameidata nd; | 698 | struct nameidata nd; |
715 | struct kstatfs kbuf; | 699 | struct kstatfs kbuf; |
@@ -724,6 +708,7 @@ asmlinkage int irix_statfs(const char *path, struct irix_statfs *buf, | |||
724 | error = -EFAULT; | 708 | error = -EFAULT; |
725 | goto out; | 709 | goto out; |
726 | } | 710 | } |
711 | |||
727 | error = user_path_walk(path, &nd); | 712 | error = user_path_walk(path, &nd); |
728 | if (error) | 713 | if (error) |
729 | goto out; | 714 | goto out; |
@@ -732,18 +717,17 @@ asmlinkage int irix_statfs(const char *path, struct irix_statfs *buf, | |||
732 | if (error) | 717 | if (error) |
733 | goto dput_and_out; | 718 | goto dput_and_out; |
734 | 719 | ||
735 | __put_user(kbuf.f_type, &buf->f_type); | 720 | error = __put_user(kbuf.f_type, &buf->f_type); |
736 | __put_user(kbuf.f_bsize, &buf->f_bsize); | 721 | error |= __put_user(kbuf.f_bsize, &buf->f_bsize); |
737 | __put_user(kbuf.f_frsize, &buf->f_frsize); | 722 | error |= __put_user(kbuf.f_frsize, &buf->f_frsize); |
738 | __put_user(kbuf.f_blocks, &buf->f_blocks); | 723 | error |= __put_user(kbuf.f_blocks, &buf->f_blocks); |
739 | __put_user(kbuf.f_bfree, &buf->f_bfree); | 724 | error |= __put_user(kbuf.f_bfree, &buf->f_bfree); |
740 | __put_user(kbuf.f_files, &buf->f_files); | 725 | error |= __put_user(kbuf.f_files, &buf->f_files); |
741 | __put_user(kbuf.f_ffree, &buf->f_ffree); | 726 | error |= __put_user(kbuf.f_ffree, &buf->f_ffree); |
742 | for (i = 0; i < 6; i++) { | 727 | for (i = 0; i < 6; i++) { |
743 | __put_user(0, &buf->f_fname[i]); | 728 | error |= __put_user(0, &buf->f_fname[i]); |
744 | __put_user(0, &buf->f_fpack[i]); | 729 | error |= __put_user(0, &buf->f_fpack[i]); |
745 | } | 730 | } |
746 | error = 0; | ||
747 | 731 | ||
748 | dput_and_out: | 732 | dput_and_out: |
749 | path_release(&nd); | 733 | path_release(&nd); |
@@ -751,7 +735,7 @@ out: | |||
751 | return error; | 735 | return error; |
752 | } | 736 | } |
753 | 737 | ||
754 | asmlinkage int irix_fstatfs(unsigned int fd, struct irix_statfs *buf) | 738 | asmlinkage int irix_fstatfs(unsigned int fd, struct irix_statfs __user *buf) |
755 | { | 739 | { |
756 | struct kstatfs kbuf; | 740 | struct kstatfs kbuf; |
757 | struct file *file; | 741 | struct file *file; |
@@ -761,6 +745,7 @@ asmlinkage int irix_fstatfs(unsigned int fd, struct irix_statfs *buf) | |||
761 | error = -EFAULT; | 745 | error = -EFAULT; |
762 | goto out; | 746 | goto out; |
763 | } | 747 | } |
748 | |||
764 | if (!(file = fget(fd))) { | 749 | if (!(file = fget(fd))) { |
765 | error = -EBADF; | 750 | error = -EBADF; |
766 | goto out; | 751 | goto out; |
@@ -770,16 +755,17 @@ asmlinkage int irix_fstatfs(unsigned int fd, struct irix_statfs *buf) | |||
770 | if (error) | 755 | if (error) |
771 | goto out_f; | 756 | goto out_f; |
772 | 757 | ||
773 | __put_user(kbuf.f_type, &buf->f_type); | 758 | error = __put_user(kbuf.f_type, &buf->f_type); |
774 | __put_user(kbuf.f_bsize, &buf->f_bsize); | 759 | error |= __put_user(kbuf.f_bsize, &buf->f_bsize); |
775 | __put_user(kbuf.f_frsize, &buf->f_frsize); | 760 | error |= __put_user(kbuf.f_frsize, &buf->f_frsize); |
776 | __put_user(kbuf.f_blocks, &buf->f_blocks); | 761 | error |= __put_user(kbuf.f_blocks, &buf->f_blocks); |
777 | __put_user(kbuf.f_bfree, &buf->f_bfree); | 762 | error |= __put_user(kbuf.f_bfree, &buf->f_bfree); |
778 | __put_user(kbuf.f_files, &buf->f_files); | 763 | error |= __put_user(kbuf.f_files, &buf->f_files); |
779 | __put_user(kbuf.f_ffree, &buf->f_ffree); | 764 | error |= __put_user(kbuf.f_ffree, &buf->f_ffree); |
780 | for(i = 0; i < 6; i++) { | 765 | |
781 | __put_user(0, &buf->f_fname[i]); | 766 | for (i = 0; i < 6; i++) { |
782 | __put_user(0, &buf->f_fpack[i]); | 767 | error |= __put_user(0, &buf->f_fname[i]); |
768 | error |= __put_user(0, &buf->f_fpack[i]); | ||
783 | } | 769 | } |
784 | 770 | ||
785 | out_f: | 771 | out_f: |
@@ -806,14 +792,15 @@ asmlinkage int irix_setpgrp(int flags) | |||
806 | return error; | 792 | return error; |
807 | } | 793 | } |
808 | 794 | ||
809 | asmlinkage int irix_times(struct tms * tbuf) | 795 | asmlinkage int irix_times(struct tms __user *tbuf) |
810 | { | 796 | { |
811 | int err = 0; | 797 | int err = 0; |
812 | 798 | ||
813 | if (tbuf) { | 799 | if (tbuf) { |
814 | if (!access_ok(VERIFY_WRITE,tbuf,sizeof *tbuf)) | 800 | if (!access_ok(VERIFY_WRITE,tbuf,sizeof *tbuf)) |
815 | return -EFAULT; | 801 | return -EFAULT; |
816 | err |= __put_user(current->utime, &tbuf->tms_utime); | 802 | |
803 | err = __put_user(current->utime, &tbuf->tms_utime); | ||
817 | err |= __put_user(current->stime, &tbuf->tms_stime); | 804 | err |= __put_user(current->stime, &tbuf->tms_stime); |
818 | err |= __put_user(current->signal->cutime, &tbuf->tms_cutime); | 805 | err |= __put_user(current->signal->cutime, &tbuf->tms_cutime); |
819 | err |= __put_user(current->signal->cstime, &tbuf->tms_cstime); | 806 | err |= __put_user(current->signal->cstime, &tbuf->tms_cstime); |
@@ -829,13 +816,13 @@ asmlinkage int irix_exec(struct pt_regs *regs) | |||
829 | 816 | ||
830 | if(regs->regs[2] == 1000) | 817 | if(regs->regs[2] == 1000) |
831 | base = 1; | 818 | base = 1; |
832 | filename = getname((char *) (long)regs->regs[base + 4]); | 819 | filename = getname((char __user *) (long)regs->regs[base + 4]); |
833 | error = PTR_ERR(filename); | 820 | error = PTR_ERR(filename); |
834 | if (IS_ERR(filename)) | 821 | if (IS_ERR(filename)) |
835 | return error; | 822 | return error; |
836 | 823 | ||
837 | error = do_execve(filename, (char **) (long)regs->regs[base + 5], | 824 | error = do_execve(filename, (char __user * __user *) (long)regs->regs[base + 5], |
838 | (char **) 0, regs); | 825 | NULL, regs); |
839 | putname(filename); | 826 | putname(filename); |
840 | 827 | ||
841 | return error; | 828 | return error; |
@@ -848,12 +835,12 @@ asmlinkage int irix_exece(struct pt_regs *regs) | |||
848 | 835 | ||
849 | if (regs->regs[2] == 1000) | 836 | if (regs->regs[2] == 1000) |
850 | base = 1; | 837 | base = 1; |
851 | filename = getname((char *) (long)regs->regs[base + 4]); | 838 | filename = getname((char __user *) (long)regs->regs[base + 4]); |
852 | error = PTR_ERR(filename); | 839 | error = PTR_ERR(filename); |
853 | if (IS_ERR(filename)) | 840 | if (IS_ERR(filename)) |
854 | return error; | 841 | return error; |
855 | error = do_execve(filename, (char **) (long)regs->regs[base + 5], | 842 | error = do_execve(filename, (char __user * __user *) (long)regs->regs[base + 5], |
856 | (char **) (long)regs->regs[base + 6], regs); | 843 | (char __user * __user *) (long)regs->regs[base + 6], regs); |
857 | putname(filename); | 844 | putname(filename); |
858 | 845 | ||
859 | return error; | 846 | return error; |
@@ -909,22 +896,17 @@ asmlinkage int irix_socket(int family, int type, int protocol) | |||
909 | return sys_socket(family, type, protocol); | 896 | return sys_socket(family, type, protocol); |
910 | } | 897 | } |
911 | 898 | ||
912 | asmlinkage int irix_getdomainname(char *name, int len) | 899 | asmlinkage int irix_getdomainname(char __user *name, int len) |
913 | { | 900 | { |
914 | int error; | 901 | int err; |
915 | |||
916 | if (!access_ok(VERIFY_WRITE, name, len)) | ||
917 | return -EFAULT; | ||
918 | 902 | ||
919 | down_read(&uts_sem); | 903 | down_read(&uts_sem); |
920 | if (len > __NEW_UTS_LEN) | 904 | if (len > __NEW_UTS_LEN) |
921 | len = __NEW_UTS_LEN; | 905 | len = __NEW_UTS_LEN; |
922 | error = 0; | 906 | err = copy_to_user(name, system_utsname.domainname, len) ? -EFAULT : 0; |
923 | if (copy_to_user(name, system_utsname.domainname, len)) | ||
924 | error = -EFAULT; | ||
925 | up_read(&uts_sem); | 907 | up_read(&uts_sem); |
926 | 908 | ||
927 | return error; | 909 | return err; |
928 | } | 910 | } |
929 | 911 | ||
930 | asmlinkage unsigned long irix_getpagesize(void) | 912 | asmlinkage unsigned long irix_getpagesize(void) |
@@ -940,12 +922,13 @@ asmlinkage int irix_msgsys(int opcode, unsigned long arg0, unsigned long arg1, | |||
940 | case 0: | 922 | case 0: |
941 | return sys_msgget((key_t) arg0, (int) arg1); | 923 | return sys_msgget((key_t) arg0, (int) arg1); |
942 | case 1: | 924 | case 1: |
943 | return sys_msgctl((int) arg0, (int) arg1, (struct msqid_ds *)arg2); | 925 | return sys_msgctl((int) arg0, (int) arg1, |
926 | (struct msqid_ds __user *)arg2); | ||
944 | case 2: | 927 | case 2: |
945 | return sys_msgrcv((int) arg0, (struct msgbuf *) arg1, | 928 | return sys_msgrcv((int) arg0, (struct msgbuf __user *) arg1, |
946 | (size_t) arg2, (long) arg3, (int) arg4); | 929 | (size_t) arg2, (long) arg3, (int) arg4); |
947 | case 3: | 930 | case 3: |
948 | return sys_msgsnd((int) arg0, (struct msgbuf *) arg1, | 931 | return sys_msgsnd((int) arg0, (struct msgbuf __user *) arg1, |
949 | (size_t) arg2, (int) arg3); | 932 | (size_t) arg2, (int) arg3); |
950 | default: | 933 | default: |
951 | return -EINVAL; | 934 | return -EINVAL; |
@@ -957,12 +940,13 @@ asmlinkage int irix_shmsys(int opcode, unsigned long arg0, unsigned long arg1, | |||
957 | { | 940 | { |
958 | switch (opcode) { | 941 | switch (opcode) { |
959 | case 0: | 942 | case 0: |
960 | return do_shmat((int) arg0, (char *)arg1, (int) arg2, | 943 | return do_shmat((int) arg0, (char __user *) arg1, (int) arg2, |
961 | (unsigned long *) arg3); | 944 | (unsigned long *) arg3); |
962 | case 1: | 945 | case 1: |
963 | return sys_shmctl((int)arg0, (int)arg1, (struct shmid_ds *)arg2); | 946 | return sys_shmctl((int)arg0, (int)arg1, |
947 | (struct shmid_ds __user *)arg2); | ||
964 | case 2: | 948 | case 2: |
965 | return sys_shmdt((char *)arg0); | 949 | return sys_shmdt((char __user *)arg0); |
966 | case 3: | 950 | case 3: |
967 | return sys_shmget((key_t) arg0, (int) arg1, (int) arg2); | 951 | return sys_shmget((key_t) arg0, (int) arg1, (int) arg2); |
968 | default: | 952 | default: |
@@ -980,7 +964,7 @@ asmlinkage int irix_semsys(int opcode, unsigned long arg0, unsigned long arg1, | |||
980 | case 1: | 964 | case 1: |
981 | return sys_semget((key_t) arg0, (int) arg1, (int) arg2); | 965 | return sys_semget((key_t) arg0, (int) arg1, (int) arg2); |
982 | case 2: | 966 | case 2: |
983 | return sys_semop((int) arg0, (struct sembuf *)arg1, | 967 | return sys_semop((int) arg0, (struct sembuf __user *)arg1, |
984 | (unsigned int) arg2); | 968 | (unsigned int) arg2); |
985 | default: | 969 | default: |
986 | return -EINVAL; | 970 | return -EINVAL; |
@@ -998,15 +982,16 @@ static inline loff_t llseek(struct file *file, loff_t offset, int origin) | |||
998 | lock_kernel(); | 982 | lock_kernel(); |
999 | retval = fn(file, offset, origin); | 983 | retval = fn(file, offset, origin); |
1000 | unlock_kernel(); | 984 | unlock_kernel(); |
985 | |||
1001 | return retval; | 986 | return retval; |
1002 | } | 987 | } |
1003 | 988 | ||
1004 | asmlinkage int irix_lseek64(int fd, int _unused, int offhi, int offlow, | 989 | asmlinkage int irix_lseek64(int fd, int _unused, int offhi, int offlow, |
1005 | int origin) | 990 | int origin) |
1006 | { | 991 | { |
1007 | int retval; | ||
1008 | struct file * file; | 992 | struct file * file; |
1009 | loff_t offset; | 993 | loff_t offset; |
994 | int retval; | ||
1010 | 995 | ||
1011 | retval = -EBADF; | 996 | retval = -EBADF; |
1012 | file = fget(fd); | 997 | file = fget(fd); |
@@ -1031,12 +1016,12 @@ asmlinkage int irix_sginap(int ticks) | |||
1031 | return 0; | 1016 | return 0; |
1032 | } | 1017 | } |
1033 | 1018 | ||
1034 | asmlinkage int irix_sgikopt(char *istring, char *ostring, int len) | 1019 | asmlinkage int irix_sgikopt(char __user *istring, char __user *ostring, int len) |
1035 | { | 1020 | { |
1036 | return -EINVAL; | 1021 | return -EINVAL; |
1037 | } | 1022 | } |
1038 | 1023 | ||
1039 | asmlinkage int irix_gettimeofday(struct timeval *tv) | 1024 | asmlinkage int irix_gettimeofday(struct timeval __user *tv) |
1040 | { | 1025 | { |
1041 | time_t sec; | 1026 | time_t sec; |
1042 | long nsec, seq; | 1027 | long nsec, seq; |
@@ -1077,7 +1062,7 @@ asmlinkage unsigned long irix_mmap32(unsigned long addr, size_t len, int prot, | |||
1077 | 1062 | ||
1078 | if (max_size > file->f_dentry->d_inode->i_size) { | 1063 | if (max_size > file->f_dentry->d_inode->i_size) { |
1079 | old_pos = sys_lseek (fd, max_size - 1, 0); | 1064 | old_pos = sys_lseek (fd, max_size - 1, 0); |
1080 | sys_write (fd, "", 1); | 1065 | sys_write (fd, (void __user *) "", 1); |
1081 | sys_lseek (fd, old_pos, 0); | 1066 | sys_lseek (fd, old_pos, 0); |
1082 | } | 1067 | } |
1083 | } | 1068 | } |
@@ -1102,7 +1087,7 @@ asmlinkage int irix_madvise(unsigned long addr, int len, int behavior) | |||
1102 | return -EINVAL; | 1087 | return -EINVAL; |
1103 | } | 1088 | } |
1104 | 1089 | ||
1105 | asmlinkage int irix_pagelock(char *addr, int len, int op) | 1090 | asmlinkage int irix_pagelock(char __user *addr, int len, int op) |
1106 | { | 1091 | { |
1107 | printk("[%s:%d] Wheee.. irix_pagelock(%p,%d,%d)\n", | 1092 | printk("[%s:%d] Wheee.. irix_pagelock(%p,%d,%d)\n", |
1108 | current->comm, current->pid, addr, len, op); | 1093 | current->comm, current->pid, addr, len, op); |
@@ -1142,7 +1127,7 @@ asmlinkage int irix_BSDsetpgrp(int pid, int pgrp) | |||
1142 | return error; | 1127 | return error; |
1143 | } | 1128 | } |
1144 | 1129 | ||
1145 | asmlinkage int irix_systeminfo(int cmd, char *buf, int cnt) | 1130 | asmlinkage int irix_systeminfo(int cmd, char __user *buf, int cnt) |
1146 | { | 1131 | { |
1147 | printk("[%s:%d] Wheee.. irix_systeminfo(%d,%p,%d)\n", | 1132 | printk("[%s:%d] Wheee.. irix_systeminfo(%d,%p,%d)\n", |
1148 | current->comm, current->pid, cmd, buf, cnt); | 1133 | current->comm, current->pid, cmd, buf, cnt); |
@@ -1158,14 +1143,14 @@ struct iuname { | |||
1158 | char _unused3[257], _unused4[257], _unused5[257]; | 1143 | char _unused3[257], _unused4[257], _unused5[257]; |
1159 | }; | 1144 | }; |
1160 | 1145 | ||
1161 | asmlinkage int irix_uname(struct iuname *buf) | 1146 | asmlinkage int irix_uname(struct iuname __user *buf) |
1162 | { | 1147 | { |
1163 | down_read(&uts_sem); | 1148 | down_read(&uts_sem); |
1164 | if (copy_to_user(system_utsname.sysname, buf->sysname, 65) | 1149 | if (copy_from_user(system_utsname.sysname, buf->sysname, 65) |
1165 | || copy_to_user(system_utsname.nodename, buf->nodename, 65) | 1150 | || copy_from_user(system_utsname.nodename, buf->nodename, 65) |
1166 | || copy_to_user(system_utsname.release, buf->release, 65) | 1151 | || copy_from_user(system_utsname.release, buf->release, 65) |
1167 | || copy_to_user(system_utsname.version, buf->version, 65) | 1152 | || copy_from_user(system_utsname.version, buf->version, 65) |
1168 | || copy_to_user(system_utsname.machine, buf->machine, 65)) { | 1153 | || copy_from_user(system_utsname.machine, buf->machine, 65)) { |
1169 | return -EFAULT; | 1154 | return -EFAULT; |
1170 | } | 1155 | } |
1171 | up_read(&uts_sem); | 1156 | up_read(&uts_sem); |
@@ -1175,7 +1160,7 @@ asmlinkage int irix_uname(struct iuname *buf) | |||
1175 | 1160 | ||
1176 | #undef DEBUG_XSTAT | 1161 | #undef DEBUG_XSTAT |
1177 | 1162 | ||
1178 | static int irix_xstat32_xlate(struct kstat *stat, void *ubuf) | 1163 | static int irix_xstat32_xlate(struct kstat *stat, void __user *ubuf) |
1179 | { | 1164 | { |
1180 | struct xstat32 { | 1165 | struct xstat32 { |
1181 | u32 st_dev, st_pad1[3], st_ino, st_mode, st_nlink, st_uid, st_gid; | 1166 | u32 st_dev, st_pad1[3], st_ino, st_mode, st_nlink, st_uid, st_gid; |
@@ -1215,7 +1200,7 @@ static int irix_xstat32_xlate(struct kstat *stat, void *ubuf) | |||
1215 | return copy_to_user(ubuf, &ub, sizeof(ub)) ? -EFAULT : 0; | 1200 | return copy_to_user(ubuf, &ub, sizeof(ub)) ? -EFAULT : 0; |
1216 | } | 1201 | } |
1217 | 1202 | ||
1218 | static int irix_xstat64_xlate(struct kstat *stat, void *ubuf) | 1203 | static int irix_xstat64_xlate(struct kstat *stat, void __user *ubuf) |
1219 | { | 1204 | { |
1220 | struct xstat64 { | 1205 | struct xstat64 { |
1221 | u32 st_dev; s32 st_pad1[3]; | 1206 | u32 st_dev; s32 st_pad1[3]; |
@@ -1265,7 +1250,7 @@ static int irix_xstat64_xlate(struct kstat *stat, void *ubuf) | |||
1265 | return copy_to_user(ubuf, &ks, sizeof(ks)) ? -EFAULT : 0; | 1250 | return copy_to_user(ubuf, &ks, sizeof(ks)) ? -EFAULT : 0; |
1266 | } | 1251 | } |
1267 | 1252 | ||
1268 | asmlinkage int irix_xstat(int version, char *filename, struct stat *statbuf) | 1253 | asmlinkage int irix_xstat(int version, char __user *filename, struct stat __user *statbuf) |
1269 | { | 1254 | { |
1270 | int retval; | 1255 | int retval; |
1271 | struct kstat stat; | 1256 | struct kstat stat; |
@@ -1291,7 +1276,7 @@ asmlinkage int irix_xstat(int version, char *filename, struct stat *statbuf) | |||
1291 | return retval; | 1276 | return retval; |
1292 | } | 1277 | } |
1293 | 1278 | ||
1294 | asmlinkage int irix_lxstat(int version, char *filename, struct stat *statbuf) | 1279 | asmlinkage int irix_lxstat(int version, char __user *filename, struct stat __user *statbuf) |
1295 | { | 1280 | { |
1296 | int error; | 1281 | int error; |
1297 | struct kstat stat; | 1282 | struct kstat stat; |
@@ -1318,7 +1303,7 @@ asmlinkage int irix_lxstat(int version, char *filename, struct stat *statbuf) | |||
1318 | return error; | 1303 | return error; |
1319 | } | 1304 | } |
1320 | 1305 | ||
1321 | asmlinkage int irix_fxstat(int version, int fd, struct stat *statbuf) | 1306 | asmlinkage int irix_fxstat(int version, int fd, struct stat __user *statbuf) |
1322 | { | 1307 | { |
1323 | int error; | 1308 | int error; |
1324 | struct kstat stat; | 1309 | struct kstat stat; |
@@ -1344,7 +1329,7 @@ asmlinkage int irix_fxstat(int version, int fd, struct stat *statbuf) | |||
1344 | return error; | 1329 | return error; |
1345 | } | 1330 | } |
1346 | 1331 | ||
1347 | asmlinkage int irix_xmknod(int ver, char *filename, int mode, unsigned dev) | 1332 | asmlinkage int irix_xmknod(int ver, char __user *filename, int mode, unsigned dev) |
1348 | { | 1333 | { |
1349 | int retval; | 1334 | int retval; |
1350 | printk("[%s:%d] Wheee.. irix_xmknod(%d,%s,%x,%x)\n", | 1335 | printk("[%s:%d] Wheee.. irix_xmknod(%d,%s,%x,%x)\n", |
@@ -1364,7 +1349,7 @@ asmlinkage int irix_xmknod(int ver, char *filename, int mode, unsigned dev) | |||
1364 | return retval; | 1349 | return retval; |
1365 | } | 1350 | } |
1366 | 1351 | ||
1367 | asmlinkage int irix_swapctl(int cmd, char *arg) | 1352 | asmlinkage int irix_swapctl(int cmd, char __user *arg) |
1368 | { | 1353 | { |
1369 | printk("[%s:%d] Wheee.. irix_swapctl(%d,%p)\n", | 1354 | printk("[%s:%d] Wheee.. irix_swapctl(%d,%p)\n", |
1370 | current->comm, current->pid, cmd, arg); | 1355 | current->comm, current->pid, cmd, arg); |
@@ -1380,7 +1365,7 @@ struct irix_statvfs { | |||
1380 | char f_fstr[32]; u32 f_filler[16]; | 1365 | char f_fstr[32]; u32 f_filler[16]; |
1381 | }; | 1366 | }; |
1382 | 1367 | ||
1383 | asmlinkage int irix_statvfs(char *fname, struct irix_statvfs *buf) | 1368 | asmlinkage int irix_statvfs(char __user *fname, struct irix_statvfs __user *buf) |
1384 | { | 1369 | { |
1385 | struct nameidata nd; | 1370 | struct nameidata nd; |
1386 | struct kstatfs kbuf; | 1371 | struct kstatfs kbuf; |
@@ -1388,10 +1373,9 @@ asmlinkage int irix_statvfs(char *fname, struct irix_statvfs *buf) | |||
1388 | 1373 | ||
1389 | printk("[%s:%d] Wheee.. irix_statvfs(%s,%p)\n", | 1374 | printk("[%s:%d] Wheee.. irix_statvfs(%s,%p)\n", |
1390 | current->comm, current->pid, fname, buf); | 1375 | current->comm, current->pid, fname, buf); |
1391 | if (!access_ok(VERIFY_WRITE, buf, sizeof(struct irix_statvfs))) { | 1376 | if (!access_ok(VERIFY_WRITE, buf, sizeof(struct irix_statvfs))) |
1392 | error = -EFAULT; | 1377 | return -EFAULT; |
1393 | goto out; | 1378 | |
1394 | } | ||
1395 | error = user_path_walk(fname, &nd); | 1379 | error = user_path_walk(fname, &nd); |
1396 | if (error) | 1380 | if (error) |
1397 | goto out; | 1381 | goto out; |
@@ -1399,27 +1383,25 @@ asmlinkage int irix_statvfs(char *fname, struct irix_statvfs *buf) | |||
1399 | if (error) | 1383 | if (error) |
1400 | goto dput_and_out; | 1384 | goto dput_and_out; |
1401 | 1385 | ||
1402 | __put_user(kbuf.f_bsize, &buf->f_bsize); | 1386 | error |= __put_user(kbuf.f_bsize, &buf->f_bsize); |
1403 | __put_user(kbuf.f_frsize, &buf->f_frsize); | 1387 | error |= __put_user(kbuf.f_frsize, &buf->f_frsize); |
1404 | __put_user(kbuf.f_blocks, &buf->f_blocks); | 1388 | error |= __put_user(kbuf.f_blocks, &buf->f_blocks); |
1405 | __put_user(kbuf.f_bfree, &buf->f_bfree); | 1389 | error |= __put_user(kbuf.f_bfree, &buf->f_bfree); |
1406 | __put_user(kbuf.f_bfree, &buf->f_bavail); /* XXX hackety hack... */ | 1390 | error |= __put_user(kbuf.f_bfree, &buf->f_bavail); /* XXX hackety hack... */ |
1407 | __put_user(kbuf.f_files, &buf->f_files); | 1391 | error |= __put_user(kbuf.f_files, &buf->f_files); |
1408 | __put_user(kbuf.f_ffree, &buf->f_ffree); | 1392 | error |= __put_user(kbuf.f_ffree, &buf->f_ffree); |
1409 | __put_user(kbuf.f_ffree, &buf->f_favail); /* XXX hackety hack... */ | 1393 | error |= __put_user(kbuf.f_ffree, &buf->f_favail); /* XXX hackety hack... */ |
1410 | #ifdef __MIPSEB__ | 1394 | #ifdef __MIPSEB__ |
1411 | __put_user(kbuf.f_fsid.val[1], &buf->f_fsid); | 1395 | error |= __put_user(kbuf.f_fsid.val[1], &buf->f_fsid); |
1412 | #else | 1396 | #else |
1413 | __put_user(kbuf.f_fsid.val[0], &buf->f_fsid); | 1397 | error |= __put_user(kbuf.f_fsid.val[0], &buf->f_fsid); |
1414 | #endif | 1398 | #endif |
1415 | for (i = 0; i < 16; i++) | 1399 | for (i = 0; i < 16; i++) |
1416 | __put_user(0, &buf->f_basetype[i]); | 1400 | error |= __put_user(0, &buf->f_basetype[i]); |
1417 | __put_user(0, &buf->f_flag); | 1401 | error |= __put_user(0, &buf->f_flag); |
1418 | __put_user(kbuf.f_namelen, &buf->f_namemax); | 1402 | error |= __put_user(kbuf.f_namelen, &buf->f_namemax); |
1419 | for (i = 0; i < 32; i++) | 1403 | for (i = 0; i < 32; i++) |
1420 | __put_user(0, &buf->f_fstr[i]); | 1404 | error |= __put_user(0, &buf->f_fstr[i]); |
1421 | |||
1422 | error = 0; | ||
1423 | 1405 | ||
1424 | dput_and_out: | 1406 | dput_and_out: |
1425 | path_release(&nd); | 1407 | path_release(&nd); |
@@ -1427,7 +1409,7 @@ out: | |||
1427 | return error; | 1409 | return error; |
1428 | } | 1410 | } |
1429 | 1411 | ||
1430 | asmlinkage int irix_fstatvfs(int fd, struct irix_statvfs *buf) | 1412 | asmlinkage int irix_fstatvfs(int fd, struct irix_statvfs __user *buf) |
1431 | { | 1413 | { |
1432 | struct kstatfs kbuf; | 1414 | struct kstatfs kbuf; |
1433 | struct file *file; | 1415 | struct file *file; |
@@ -1436,10 +1418,9 @@ asmlinkage int irix_fstatvfs(int fd, struct irix_statvfs *buf) | |||
1436 | printk("[%s:%d] Wheee.. irix_fstatvfs(%d,%p)\n", | 1418 | printk("[%s:%d] Wheee.. irix_fstatvfs(%d,%p)\n", |
1437 | current->comm, current->pid, fd, buf); | 1419 | current->comm, current->pid, fd, buf); |
1438 | 1420 | ||
1439 | if (!access_ok(VERIFY_WRITE, buf, sizeof(struct irix_statvfs))) { | 1421 | if (!access_ok(VERIFY_WRITE, buf, sizeof(struct irix_statvfs))) |
1440 | error = -EFAULT; | 1422 | return -EFAULT; |
1441 | goto out; | 1423 | |
1442 | } | ||
1443 | if (!(file = fget(fd))) { | 1424 | if (!(file = fget(fd))) { |
1444 | error = -EBADF; | 1425 | error = -EBADF; |
1445 | goto out; | 1426 | goto out; |
@@ -1448,24 +1429,24 @@ asmlinkage int irix_fstatvfs(int fd, struct irix_statvfs *buf) | |||
1448 | if (error) | 1429 | if (error) |
1449 | goto out_f; | 1430 | goto out_f; |
1450 | 1431 | ||
1451 | __put_user(kbuf.f_bsize, &buf->f_bsize); | 1432 | error = __put_user(kbuf.f_bsize, &buf->f_bsize); |
1452 | __put_user(kbuf.f_frsize, &buf->f_frsize); | 1433 | error |= __put_user(kbuf.f_frsize, &buf->f_frsize); |
1453 | __put_user(kbuf.f_blocks, &buf->f_blocks); | 1434 | error |= __put_user(kbuf.f_blocks, &buf->f_blocks); |
1454 | __put_user(kbuf.f_bfree, &buf->f_bfree); | 1435 | error |= __put_user(kbuf.f_bfree, &buf->f_bfree); |
1455 | __put_user(kbuf.f_bfree, &buf->f_bavail); /* XXX hackety hack... */ | 1436 | error |= __put_user(kbuf.f_bfree, &buf->f_bavail); /* XXX hackety hack... */ |
1456 | __put_user(kbuf.f_files, &buf->f_files); | 1437 | error |= __put_user(kbuf.f_files, &buf->f_files); |
1457 | __put_user(kbuf.f_ffree, &buf->f_ffree); | 1438 | error |= __put_user(kbuf.f_ffree, &buf->f_ffree); |
1458 | __put_user(kbuf.f_ffree, &buf->f_favail); /* XXX hackety hack... */ | 1439 | error |= __put_user(kbuf.f_ffree, &buf->f_favail); /* XXX hackety hack... */ |
1459 | #ifdef __MIPSEB__ | 1440 | #ifdef __MIPSEB__ |
1460 | __put_user(kbuf.f_fsid.val[1], &buf->f_fsid); | 1441 | error |= __put_user(kbuf.f_fsid.val[1], &buf->f_fsid); |
1461 | #else | 1442 | #else |
1462 | __put_user(kbuf.f_fsid.val[0], &buf->f_fsid); | 1443 | error |= __put_user(kbuf.f_fsid.val[0], &buf->f_fsid); |
1463 | #endif | 1444 | #endif |
1464 | for(i = 0; i < 16; i++) | 1445 | for(i = 0; i < 16; i++) |
1465 | __put_user(0, &buf->f_basetype[i]); | 1446 | error |= __put_user(0, &buf->f_basetype[i]); |
1466 | __put_user(0, &buf->f_flag); | 1447 | error |= __put_user(0, &buf->f_flag); |
1467 | __put_user(kbuf.f_namelen, &buf->f_namemax); | 1448 | error |= __put_user(kbuf.f_namelen, &buf->f_namemax); |
1468 | __clear_user(&buf->f_fstr, sizeof(buf->f_fstr)); | 1449 | error |= __clear_user(&buf->f_fstr, sizeof(buf->f_fstr)) ? -EFAULT : 0; |
1469 | 1450 | ||
1470 | out_f: | 1451 | out_f: |
1471 | fput(file); | 1452 | fput(file); |
@@ -1489,7 +1470,7 @@ asmlinkage int irix_sigqueue(int pid, int sig, int code, int val) | |||
1489 | return -EINVAL; | 1470 | return -EINVAL; |
1490 | } | 1471 | } |
1491 | 1472 | ||
1492 | asmlinkage int irix_truncate64(char *name, int pad, int size1, int size2) | 1473 | asmlinkage int irix_truncate64(char __user *name, int pad, int size1, int size2) |
1493 | { | 1474 | { |
1494 | int retval; | 1475 | int retval; |
1495 | 1476 | ||
@@ -1522,6 +1503,7 @@ asmlinkage int irix_mmap64(struct pt_regs *regs) | |||
1522 | int len, prot, flags, fd, off1, off2, error, base = 0; | 1503 | int len, prot, flags, fd, off1, off2, error, base = 0; |
1523 | unsigned long addr, pgoff, *sp; | 1504 | unsigned long addr, pgoff, *sp; |
1524 | struct file *file = NULL; | 1505 | struct file *file = NULL; |
1506 | int err; | ||
1525 | 1507 | ||
1526 | if (regs->regs[2] == 1000) | 1508 | if (regs->regs[2] == 1000) |
1527 | base = 1; | 1509 | base = 1; |
@@ -1531,36 +1513,31 @@ asmlinkage int irix_mmap64(struct pt_regs *regs) | |||
1531 | prot = regs->regs[base + 6]; | 1513 | prot = regs->regs[base + 6]; |
1532 | if (!base) { | 1514 | if (!base) { |
1533 | flags = regs->regs[base + 7]; | 1515 | flags = regs->regs[base + 7]; |
1534 | if (!access_ok(VERIFY_READ, sp, (4 * sizeof(unsigned long)))) { | 1516 | if (!access_ok(VERIFY_READ, sp, (4 * sizeof(unsigned long)))) |
1535 | error = -EFAULT; | 1517 | return -EFAULT; |
1536 | goto out; | ||
1537 | } | ||
1538 | fd = sp[0]; | 1518 | fd = sp[0]; |
1539 | __get_user(off1, &sp[1]); | 1519 | err = __get_user(off1, &sp[1]); |
1540 | __get_user(off2, &sp[2]); | 1520 | err |= __get_user(off2, &sp[2]); |
1541 | } else { | 1521 | } else { |
1542 | if (!access_ok(VERIFY_READ, sp, (5 * sizeof(unsigned long)))) { | 1522 | if (!access_ok(VERIFY_READ, sp, (5 * sizeof(unsigned long)))) |
1543 | error = -EFAULT; | 1523 | return -EFAULT; |
1544 | goto out; | 1524 | err = __get_user(flags, &sp[0]); |
1545 | } | 1525 | err |= __get_user(fd, &sp[1]); |
1546 | __get_user(flags, &sp[0]); | 1526 | err |= __get_user(off1, &sp[2]); |
1547 | __get_user(fd, &sp[1]); | 1527 | err |= __get_user(off2, &sp[3]); |
1548 | __get_user(off1, &sp[2]); | ||
1549 | __get_user(off2, &sp[3]); | ||
1550 | } | 1528 | } |
1551 | 1529 | ||
1552 | if (off1 & PAGE_MASK) { | 1530 | if (err) |
1553 | error = -EOVERFLOW; | 1531 | return err; |
1554 | goto out; | 1532 | |
1555 | } | 1533 | if (off1 & PAGE_MASK) |
1534 | return -EOVERFLOW; | ||
1556 | 1535 | ||
1557 | pgoff = (off1 << (32 - PAGE_SHIFT)) | (off2 >> PAGE_SHIFT); | 1536 | pgoff = (off1 << (32 - PAGE_SHIFT)) | (off2 >> PAGE_SHIFT); |
1558 | 1537 | ||
1559 | if (!(flags & MAP_ANONYMOUS)) { | 1538 | if (!(flags & MAP_ANONYMOUS)) { |
1560 | if (!(file = fget(fd))) { | 1539 | if (!(file = fget(fd))) |
1561 | error = -EBADF; | 1540 | return -EBADF; |
1562 | goto out; | ||
1563 | } | ||
1564 | 1541 | ||
1565 | /* Ok, bad taste hack follows, try to think in something else | 1542 | /* Ok, bad taste hack follows, try to think in something else |
1566 | when reading this */ | 1543 | when reading this */ |
@@ -1570,7 +1547,7 @@ asmlinkage int irix_mmap64(struct pt_regs *regs) | |||
1570 | 1547 | ||
1571 | if (max_size > file->f_dentry->d_inode->i_size) { | 1548 | if (max_size > file->f_dentry->d_inode->i_size) { |
1572 | old_pos = sys_lseek (fd, max_size - 1, 0); | 1549 | old_pos = sys_lseek (fd, max_size - 1, 0); |
1573 | sys_write (fd, "", 1); | 1550 | sys_write (fd, (void __user *) "", 1); |
1574 | sys_lseek (fd, old_pos, 0); | 1551 | sys_lseek (fd, old_pos, 0); |
1575 | } | 1552 | } |
1576 | } | 1553 | } |
@@ -1585,7 +1562,6 @@ asmlinkage int irix_mmap64(struct pt_regs *regs) | |||
1585 | if (file) | 1562 | if (file) |
1586 | fput(file); | 1563 | fput(file); |
1587 | 1564 | ||
1588 | out: | ||
1589 | return error; | 1565 | return error; |
1590 | } | 1566 | } |
1591 | 1567 | ||
@@ -1597,7 +1573,7 @@ asmlinkage int irix_dmi(struct pt_regs *regs) | |||
1597 | return -EINVAL; | 1573 | return -EINVAL; |
1598 | } | 1574 | } |
1599 | 1575 | ||
1600 | asmlinkage int irix_pread(int fd, char *buf, int cnt, int off64, | 1576 | asmlinkage int irix_pread(int fd, char __user *buf, int cnt, int off64, |
1601 | int off1, int off2) | 1577 | int off1, int off2) |
1602 | { | 1578 | { |
1603 | printk("[%s:%d] Wheee.. irix_pread(%d,%p,%d,%d,%d,%d)\n", | 1579 | printk("[%s:%d] Wheee.. irix_pread(%d,%p,%d,%d,%d,%d)\n", |
@@ -1606,7 +1582,7 @@ asmlinkage int irix_pread(int fd, char *buf, int cnt, int off64, | |||
1606 | return -EINVAL; | 1582 | return -EINVAL; |
1607 | } | 1583 | } |
1608 | 1584 | ||
1609 | asmlinkage int irix_pwrite(int fd, char *buf, int cnt, int off64, | 1585 | asmlinkage int irix_pwrite(int fd, char __user *buf, int cnt, int off64, |
1610 | int off1, int off2) | 1586 | int off1, int off2) |
1611 | { | 1587 | { |
1612 | printk("[%s:%d] Wheee.. irix_pwrite(%d,%p,%d,%d,%d,%d)\n", | 1588 | printk("[%s:%d] Wheee.. irix_pwrite(%d,%p,%d,%d,%d,%d)\n", |
@@ -1638,7 +1614,7 @@ struct irix_statvfs64 { | |||
1638 | u32 f_filler[16]; | 1614 | u32 f_filler[16]; |
1639 | }; | 1615 | }; |
1640 | 1616 | ||
1641 | asmlinkage int irix_statvfs64(char *fname, struct irix_statvfs64 *buf) | 1617 | asmlinkage int irix_statvfs64(char __user *fname, struct irix_statvfs64 __user *buf) |
1642 | { | 1618 | { |
1643 | struct nameidata nd; | 1619 | struct nameidata nd; |
1644 | struct kstatfs kbuf; | 1620 | struct kstatfs kbuf; |
@@ -1650,6 +1626,7 @@ asmlinkage int irix_statvfs64(char *fname, struct irix_statvfs64 *buf) | |||
1650 | error = -EFAULT; | 1626 | error = -EFAULT; |
1651 | goto out; | 1627 | goto out; |
1652 | } | 1628 | } |
1629 | |||
1653 | error = user_path_walk(fname, &nd); | 1630 | error = user_path_walk(fname, &nd); |
1654 | if (error) | 1631 | if (error) |
1655 | goto out; | 1632 | goto out; |
@@ -1657,27 +1634,25 @@ asmlinkage int irix_statvfs64(char *fname, struct irix_statvfs64 *buf) | |||
1657 | if (error) | 1634 | if (error) |
1658 | goto dput_and_out; | 1635 | goto dput_and_out; |
1659 | 1636 | ||
1660 | __put_user(kbuf.f_bsize, &buf->f_bsize); | 1637 | error = __put_user(kbuf.f_bsize, &buf->f_bsize); |
1661 | __put_user(kbuf.f_frsize, &buf->f_frsize); | 1638 | error |= __put_user(kbuf.f_frsize, &buf->f_frsize); |
1662 | __put_user(kbuf.f_blocks, &buf->f_blocks); | 1639 | error |= __put_user(kbuf.f_blocks, &buf->f_blocks); |
1663 | __put_user(kbuf.f_bfree, &buf->f_bfree); | 1640 | error |= __put_user(kbuf.f_bfree, &buf->f_bfree); |
1664 | __put_user(kbuf.f_bfree, &buf->f_bavail); /* XXX hackety hack... */ | 1641 | error |= __put_user(kbuf.f_bfree, &buf->f_bavail); /* XXX hackety hack... */ |
1665 | __put_user(kbuf.f_files, &buf->f_files); | 1642 | error |= __put_user(kbuf.f_files, &buf->f_files); |
1666 | __put_user(kbuf.f_ffree, &buf->f_ffree); | 1643 | error |= __put_user(kbuf.f_ffree, &buf->f_ffree); |
1667 | __put_user(kbuf.f_ffree, &buf->f_favail); /* XXX hackety hack... */ | 1644 | error |= __put_user(kbuf.f_ffree, &buf->f_favail); /* XXX hackety hack... */ |
1668 | #ifdef __MIPSEB__ | 1645 | #ifdef __MIPSEB__ |
1669 | __put_user(kbuf.f_fsid.val[1], &buf->f_fsid); | 1646 | error |= __put_user(kbuf.f_fsid.val[1], &buf->f_fsid); |
1670 | #else | 1647 | #else |
1671 | __put_user(kbuf.f_fsid.val[0], &buf->f_fsid); | 1648 | error |= __put_user(kbuf.f_fsid.val[0], &buf->f_fsid); |
1672 | #endif | 1649 | #endif |
1673 | for(i = 0; i < 16; i++) | 1650 | for(i = 0; i < 16; i++) |
1674 | __put_user(0, &buf->f_basetype[i]); | 1651 | error |= __put_user(0, &buf->f_basetype[i]); |
1675 | __put_user(0, &buf->f_flag); | 1652 | error |= __put_user(0, &buf->f_flag); |
1676 | __put_user(kbuf.f_namelen, &buf->f_namemax); | 1653 | error |= __put_user(kbuf.f_namelen, &buf->f_namemax); |
1677 | for(i = 0; i < 32; i++) | 1654 | for(i = 0; i < 32; i++) |
1678 | __put_user(0, &buf->f_fstr[i]); | 1655 | error |= __put_user(0, &buf->f_fstr[i]); |
1679 | |||
1680 | error = 0; | ||
1681 | 1656 | ||
1682 | dput_and_out: | 1657 | dput_and_out: |
1683 | path_release(&nd); | 1658 | path_release(&nd); |
@@ -1685,7 +1660,7 @@ out: | |||
1685 | return error; | 1660 | return error; |
1686 | } | 1661 | } |
1687 | 1662 | ||
1688 | asmlinkage int irix_fstatvfs64(int fd, struct irix_statvfs *buf) | 1663 | asmlinkage int irix_fstatvfs64(int fd, struct irix_statvfs __user *buf) |
1689 | { | 1664 | { |
1690 | struct kstatfs kbuf; | 1665 | struct kstatfs kbuf; |
1691 | struct file *file; | 1666 | struct file *file; |
@@ -1706,24 +1681,24 @@ asmlinkage int irix_fstatvfs64(int fd, struct irix_statvfs *buf) | |||
1706 | if (error) | 1681 | if (error) |
1707 | goto out_f; | 1682 | goto out_f; |
1708 | 1683 | ||
1709 | __put_user(kbuf.f_bsize, &buf->f_bsize); | 1684 | error = __put_user(kbuf.f_bsize, &buf->f_bsize); |
1710 | __put_user(kbuf.f_frsize, &buf->f_frsize); | 1685 | error |= __put_user(kbuf.f_frsize, &buf->f_frsize); |
1711 | __put_user(kbuf.f_blocks, &buf->f_blocks); | 1686 | error |= __put_user(kbuf.f_blocks, &buf->f_blocks); |
1712 | __put_user(kbuf.f_bfree, &buf->f_bfree); | 1687 | error |= __put_user(kbuf.f_bfree, &buf->f_bfree); |
1713 | __put_user(kbuf.f_bfree, &buf->f_bavail); /* XXX hackety hack... */ | 1688 | error |= __put_user(kbuf.f_bfree, &buf->f_bavail); /* XXX hackety hack... */ |
1714 | __put_user(kbuf.f_files, &buf->f_files); | 1689 | error |= __put_user(kbuf.f_files, &buf->f_files); |
1715 | __put_user(kbuf.f_ffree, &buf->f_ffree); | 1690 | error |= __put_user(kbuf.f_ffree, &buf->f_ffree); |
1716 | __put_user(kbuf.f_ffree, &buf->f_favail); /* XXX hackety hack... */ | 1691 | error |= __put_user(kbuf.f_ffree, &buf->f_favail); /* XXX hackety hack... */ |
1717 | #ifdef __MIPSEB__ | 1692 | #ifdef __MIPSEB__ |
1718 | __put_user(kbuf.f_fsid.val[1], &buf->f_fsid); | 1693 | error |= __put_user(kbuf.f_fsid.val[1], &buf->f_fsid); |
1719 | #else | 1694 | #else |
1720 | __put_user(kbuf.f_fsid.val[0], &buf->f_fsid); | 1695 | error |= __put_user(kbuf.f_fsid.val[0], &buf->f_fsid); |
1721 | #endif | 1696 | #endif |
1722 | for(i = 0; i < 16; i++) | 1697 | for(i = 0; i < 16; i++) |
1723 | __put_user(0, &buf->f_basetype[i]); | 1698 | error |= __put_user(0, &buf->f_basetype[i]); |
1724 | __put_user(0, &buf->f_flag); | 1699 | error |= __put_user(0, &buf->f_flag); |
1725 | __put_user(kbuf.f_namelen, &buf->f_namemax); | 1700 | error |= __put_user(kbuf.f_namelen, &buf->f_namemax); |
1726 | __clear_user(buf->f_fstr, sizeof(buf->f_fstr[i])); | 1701 | error |= __clear_user(buf->f_fstr, sizeof(buf->f_fstr[i])) ? -EFAULT : 0; |
1727 | 1702 | ||
1728 | out_f: | 1703 | out_f: |
1729 | fput(file); | 1704 | fput(file); |
@@ -1731,9 +1706,9 @@ out: | |||
1731 | return error; | 1706 | return error; |
1732 | } | 1707 | } |
1733 | 1708 | ||
1734 | asmlinkage int irix_getmountid(char *fname, unsigned long *midbuf) | 1709 | asmlinkage int irix_getmountid(char __user *fname, unsigned long __user *midbuf) |
1735 | { | 1710 | { |
1736 | int err = 0; | 1711 | int err; |
1737 | 1712 | ||
1738 | printk("[%s:%d] irix_getmountid(%s, %p)\n", | 1713 | printk("[%s:%d] irix_getmountid(%s, %p)\n", |
1739 | current->comm, current->pid, fname, midbuf); | 1714 | current->comm, current->pid, fname, midbuf); |
@@ -1746,7 +1721,7 @@ asmlinkage int irix_getmountid(char *fname, unsigned long *midbuf) | |||
1746 | * fsid of the filesystem to try and make the right decision, but | 1721 | * fsid of the filesystem to try and make the right decision, but |
1747 | * we don't have this so for now. XXX | 1722 | * we don't have this so for now. XXX |
1748 | */ | 1723 | */ |
1749 | err |= __put_user(0, &midbuf[0]); | 1724 | err = __put_user(0, &midbuf[0]); |
1750 | err |= __put_user(0, &midbuf[1]); | 1725 | err |= __put_user(0, &midbuf[1]); |
1751 | err |= __put_user(0, &midbuf[2]); | 1726 | err |= __put_user(0, &midbuf[2]); |
1752 | err |= __put_user(0, &midbuf[3]); | 1727 | err |= __put_user(0, &midbuf[3]); |
@@ -1773,8 +1748,8 @@ struct irix_dirent32 { | |||
1773 | }; | 1748 | }; |
1774 | 1749 | ||
1775 | struct irix_dirent32_callback { | 1750 | struct irix_dirent32_callback { |
1776 | struct irix_dirent32 *current_dir; | 1751 | struct irix_dirent32 __user *current_dir; |
1777 | struct irix_dirent32 *previous; | 1752 | struct irix_dirent32 __user *previous; |
1778 | int count; | 1753 | int count; |
1779 | int error; | 1754 | int error; |
1780 | }; | 1755 | }; |
@@ -1782,13 +1757,13 @@ struct irix_dirent32_callback { | |||
1782 | #define NAME_OFFSET32(de) ((int) ((de)->d_name - (char *) (de))) | 1757 | #define NAME_OFFSET32(de) ((int) ((de)->d_name - (char *) (de))) |
1783 | #define ROUND_UP32(x) (((x)+sizeof(u32)-1) & ~(sizeof(u32)-1)) | 1758 | #define ROUND_UP32(x) (((x)+sizeof(u32)-1) & ~(sizeof(u32)-1)) |
1784 | 1759 | ||
1785 | static int irix_filldir32(void *__buf, const char *name, int namlen, | 1760 | static int irix_filldir32(void *__buf, const char *name, |
1786 | loff_t offset, ino_t ino, unsigned int d_type) | 1761 | int namlen, loff_t offset, ino_t ino, unsigned int d_type) |
1787 | { | 1762 | { |
1788 | struct irix_dirent32 *dirent; | 1763 | struct irix_dirent32 __user *dirent; |
1789 | struct irix_dirent32_callback *buf = | 1764 | struct irix_dirent32_callback *buf = __buf; |
1790 | (struct irix_dirent32_callback *)__buf; | ||
1791 | unsigned short reclen = ROUND_UP32(NAME_OFFSET32(dirent) + namlen + 1); | 1765 | unsigned short reclen = ROUND_UP32(NAME_OFFSET32(dirent) + namlen + 1); |
1766 | int err = 0; | ||
1792 | 1767 | ||
1793 | #ifdef DEBUG_GETDENTS | 1768 | #ifdef DEBUG_GETDENTS |
1794 | printk("\nirix_filldir32[reclen<%d>namlen<%d>count<%d>]", | 1769 | printk("\nirix_filldir32[reclen<%d>namlen<%d>count<%d>]", |
@@ -1799,25 +1774,26 @@ static int irix_filldir32(void *__buf, const char *name, int namlen, | |||
1799 | return -EINVAL; | 1774 | return -EINVAL; |
1800 | dirent = buf->previous; | 1775 | dirent = buf->previous; |
1801 | if (dirent) | 1776 | if (dirent) |
1802 | __put_user(offset, &dirent->d_off); | 1777 | err = __put_user(offset, &dirent->d_off); |
1803 | dirent = buf->current_dir; | 1778 | dirent = buf->current_dir; |
1804 | buf->previous = dirent; | 1779 | err |= __put_user(dirent, &buf->previous); |
1805 | __put_user(ino, &dirent->d_ino); | 1780 | err |= __put_user(ino, &dirent->d_ino); |
1806 | __put_user(reclen, &dirent->d_reclen); | 1781 | err |= __put_user(reclen, &dirent->d_reclen); |
1807 | copy_to_user(dirent->d_name, name, namlen); | 1782 | err |= copy_to_user((char __user *)dirent->d_name, name, namlen) ? -EFAULT : 0; |
1808 | __put_user(0, &dirent->d_name[namlen]); | 1783 | err |= __put_user(0, &dirent->d_name[namlen]); |
1809 | ((char *) dirent) += reclen; | 1784 | dirent = (struct irix_dirent32 __user *) ((char __user *) dirent + reclen); |
1785 | |||
1810 | buf->current_dir = dirent; | 1786 | buf->current_dir = dirent; |
1811 | buf->count -= reclen; | 1787 | buf->count -= reclen; |
1812 | 1788 | ||
1813 | return 0; | 1789 | return err; |
1814 | } | 1790 | } |
1815 | 1791 | ||
1816 | asmlinkage int irix_ngetdents(unsigned int fd, void * dirent, | 1792 | asmlinkage int irix_ngetdents(unsigned int fd, void __user * dirent, |
1817 | unsigned int count, int *eob) | 1793 | unsigned int count, int __user *eob) |
1818 | { | 1794 | { |
1819 | struct file *file; | 1795 | struct file *file; |
1820 | struct irix_dirent32 *lastdirent; | 1796 | struct irix_dirent32 __user *lastdirent; |
1821 | struct irix_dirent32_callback buf; | 1797 | struct irix_dirent32_callback buf; |
1822 | int error; | 1798 | int error; |
1823 | 1799 | ||
@@ -1830,7 +1806,7 @@ asmlinkage int irix_ngetdents(unsigned int fd, void * dirent, | |||
1830 | if (!file) | 1806 | if (!file) |
1831 | goto out; | 1807 | goto out; |
1832 | 1808 | ||
1833 | buf.current_dir = (struct irix_dirent32 *) dirent; | 1809 | buf.current_dir = (struct irix_dirent32 __user *) dirent; |
1834 | buf.previous = NULL; | 1810 | buf.previous = NULL; |
1835 | buf.count = count; | 1811 | buf.count = count; |
1836 | buf.error = 0; | 1812 | buf.error = 0; |
@@ -1870,8 +1846,8 @@ struct irix_dirent64 { | |||
1870 | }; | 1846 | }; |
1871 | 1847 | ||
1872 | struct irix_dirent64_callback { | 1848 | struct irix_dirent64_callback { |
1873 | struct irix_dirent64 *curr; | 1849 | struct irix_dirent64 __user *curr; |
1874 | struct irix_dirent64 *previous; | 1850 | struct irix_dirent64 __user *previous; |
1875 | int count; | 1851 | int count; |
1876 | int error; | 1852 | int error; |
1877 | }; | 1853 | }; |
@@ -1879,37 +1855,44 @@ struct irix_dirent64_callback { | |||
1879 | #define NAME_OFFSET64(de) ((int) ((de)->d_name - (char *) (de))) | 1855 | #define NAME_OFFSET64(de) ((int) ((de)->d_name - (char *) (de))) |
1880 | #define ROUND_UP64(x) (((x)+sizeof(u64)-1) & ~(sizeof(u64)-1)) | 1856 | #define ROUND_UP64(x) (((x)+sizeof(u64)-1) & ~(sizeof(u64)-1)) |
1881 | 1857 | ||
1882 | static int irix_filldir64(void * __buf, const char * name, int namlen, | 1858 | static int irix_filldir64(void *__buf, const char *name, |
1883 | loff_t offset, ino_t ino, unsigned int d_type) | 1859 | int namlen, loff_t offset, ino_t ino, unsigned int d_type) |
1884 | { | 1860 | { |
1885 | struct irix_dirent64 *dirent; | 1861 | struct irix_dirent64 __user *dirent; |
1886 | struct irix_dirent64_callback * buf = | 1862 | struct irix_dirent64_callback * buf = __buf; |
1887 | (struct irix_dirent64_callback *) __buf; | ||
1888 | unsigned short reclen = ROUND_UP64(NAME_OFFSET64(dirent) + namlen + 1); | 1863 | unsigned short reclen = ROUND_UP64(NAME_OFFSET64(dirent) + namlen + 1); |
1864 | int err = 0; | ||
1889 | 1865 | ||
1890 | buf->error = -EINVAL; /* only used if we fail.. */ | 1866 | if (!access_ok(VERIFY_WRITE, buf, sizeof(*buf))) |
1867 | return -EFAULT; | ||
1868 | |||
1869 | if (__put_user(-EINVAL, &buf->error)) /* only used if we fail.. */ | ||
1870 | return -EFAULT; | ||
1891 | if (reclen > buf->count) | 1871 | if (reclen > buf->count) |
1892 | return -EINVAL; | 1872 | return -EINVAL; |
1893 | dirent = buf->previous; | 1873 | dirent = buf->previous; |
1894 | if (dirent) | 1874 | if (dirent) |
1895 | __put_user(offset, &dirent->d_off); | 1875 | err = __put_user(offset, &dirent->d_off); |
1896 | dirent = buf->curr; | 1876 | dirent = buf->curr; |
1897 | buf->previous = dirent; | 1877 | buf->previous = dirent; |
1898 | __put_user(ino, &dirent->d_ino); | 1878 | err |= __put_user(ino, &dirent->d_ino); |
1899 | __put_user(reclen, &dirent->d_reclen); | 1879 | err |= __put_user(reclen, &dirent->d_reclen); |
1900 | __copy_to_user(dirent->d_name, name, namlen); | 1880 | err |= __copy_to_user((char __user *)dirent->d_name, name, namlen) |
1901 | __put_user(0, &dirent->d_name[namlen]); | 1881 | ? -EFAULT : 0; |
1902 | ((char *) dirent) += reclen; | 1882 | err |= __put_user(0, &dirent->d_name[namlen]); |
1883 | |||
1884 | dirent = (struct irix_dirent64 __user *) ((char __user *) dirent + reclen); | ||
1885 | |||
1903 | buf->curr = dirent; | 1886 | buf->curr = dirent; |
1904 | buf->count -= reclen; | 1887 | buf->count -= reclen; |
1905 | 1888 | ||
1906 | return 0; | 1889 | return err; |
1907 | } | 1890 | } |
1908 | 1891 | ||
1909 | asmlinkage int irix_getdents64(int fd, void *dirent, int cnt) | 1892 | asmlinkage int irix_getdents64(int fd, void __user *dirent, int cnt) |
1910 | { | 1893 | { |
1911 | struct file *file; | 1894 | struct file *file; |
1912 | struct irix_dirent64 *lastdirent; | 1895 | struct irix_dirent64 __user *lastdirent; |
1913 | struct irix_dirent64_callback buf; | 1896 | struct irix_dirent64_callback buf; |
1914 | int error; | 1897 | int error; |
1915 | 1898 | ||
@@ -1929,7 +1912,7 @@ asmlinkage int irix_getdents64(int fd, void *dirent, int cnt) | |||
1929 | if (cnt < (sizeof(struct irix_dirent64) + 255)) | 1912 | if (cnt < (sizeof(struct irix_dirent64) + 255)) |
1930 | goto out_f; | 1913 | goto out_f; |
1931 | 1914 | ||
1932 | buf.curr = (struct irix_dirent64 *) dirent; | 1915 | buf.curr = (struct irix_dirent64 __user *) dirent; |
1933 | buf.previous = NULL; | 1916 | buf.previous = NULL; |
1934 | buf.count = cnt; | 1917 | buf.count = cnt; |
1935 | buf.error = 0; | 1918 | buf.error = 0; |
@@ -1941,7 +1924,8 @@ asmlinkage int irix_getdents64(int fd, void *dirent, int cnt) | |||
1941 | error = buf.error; | 1924 | error = buf.error; |
1942 | goto out_f; | 1925 | goto out_f; |
1943 | } | 1926 | } |
1944 | lastdirent->d_off = (u64) file->f_pos; | 1927 | if (put_user(file->f_pos, &lastdirent->d_off)) |
1928 | return -EFAULT; | ||
1945 | #ifdef DEBUG_GETDENTS | 1929 | #ifdef DEBUG_GETDENTS |
1946 | printk("returning %d\n", cnt - buf.count); | 1930 | printk("returning %d\n", cnt - buf.count); |
1947 | #endif | 1931 | #endif |
@@ -1953,10 +1937,10 @@ out: | |||
1953 | return error; | 1937 | return error; |
1954 | } | 1938 | } |
1955 | 1939 | ||
1956 | asmlinkage int irix_ngetdents64(int fd, void *dirent, int cnt, int *eob) | 1940 | asmlinkage int irix_ngetdents64(int fd, void __user *dirent, int cnt, int *eob) |
1957 | { | 1941 | { |
1958 | struct file *file; | 1942 | struct file *file; |
1959 | struct irix_dirent64 *lastdirent; | 1943 | struct irix_dirent64 __user *lastdirent; |
1960 | struct irix_dirent64_callback buf; | 1944 | struct irix_dirent64_callback buf; |
1961 | int error; | 1945 | int error; |
1962 | 1946 | ||
@@ -1978,7 +1962,7 @@ asmlinkage int irix_ngetdents64(int fd, void *dirent, int cnt, int *eob) | |||
1978 | goto out_f; | 1962 | goto out_f; |
1979 | 1963 | ||
1980 | *eob = 0; | 1964 | *eob = 0; |
1981 | buf.curr = (struct irix_dirent64 *) dirent; | 1965 | buf.curr = (struct irix_dirent64 __user *) dirent; |
1982 | buf.previous = NULL; | 1966 | buf.previous = NULL; |
1983 | buf.count = cnt; | 1967 | buf.count = cnt; |
1984 | buf.error = 0; | 1968 | buf.error = 0; |
@@ -1990,7 +1974,8 @@ asmlinkage int irix_ngetdents64(int fd, void *dirent, int cnt, int *eob) | |||
1990 | error = buf.error; | 1974 | error = buf.error; |
1991 | goto out_f; | 1975 | goto out_f; |
1992 | } | 1976 | } |
1993 | lastdirent->d_off = (u64) file->f_pos; | 1977 | if (put_user(file->f_pos, &lastdirent->d_off)) |
1978 | return -EFAULT; | ||
1994 | #ifdef DEBUG_GETDENTS | 1979 | #ifdef DEBUG_GETDENTS |
1995 | printk("eob=%d returning %d\n", *eob, cnt - buf.count); | 1980 | printk("eob=%d returning %d\n", *eob, cnt - buf.count); |
1996 | #endif | 1981 | #endif |
@@ -2053,14 +2038,14 @@ out: | |||
2053 | return retval; | 2038 | return retval; |
2054 | } | 2039 | } |
2055 | 2040 | ||
2056 | asmlinkage int irix_utssys(char *inbuf, int arg, int type, char *outbuf) | 2041 | asmlinkage int irix_utssys(char __user *inbuf, int arg, int type, char __user *outbuf) |
2057 | { | 2042 | { |
2058 | int retval; | 2043 | int retval; |
2059 | 2044 | ||
2060 | switch(type) { | 2045 | switch(type) { |
2061 | case 0: | 2046 | case 0: |
2062 | /* uname() */ | 2047 | /* uname() */ |
2063 | retval = irix_uname((struct iuname *)inbuf); | 2048 | retval = irix_uname((struct iuname __user *)inbuf); |
2064 | goto out; | 2049 | goto out; |
2065 | 2050 | ||
2066 | case 2: | 2051 | case 2: |
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c index 0dd0df7a3b04..787ed541d442 100644 --- a/arch/mips/kernel/time.c +++ b/arch/mips/kernel/time.c | |||
@@ -11,6 +11,7 @@ | |||
11 | * Free Software Foundation; either version 2 of the License, or (at your | 11 | * Free Software Foundation; either version 2 of the License, or (at your |
12 | * option) any later version. | 12 | * option) any later version. |
13 | */ | 13 | */ |
14 | #include <linux/config.h> | ||
14 | #include <linux/types.h> | 15 | #include <linux/types.h> |
15 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
16 | #include <linux/init.h> | 17 | #include <linux/init.h> |
@@ -25,6 +26,7 @@ | |||
25 | #include <linux/module.h> | 26 | #include <linux/module.h> |
26 | 27 | ||
27 | #include <asm/bootinfo.h> | 28 | #include <asm/bootinfo.h> |
29 | #include <asm/cache.h> | ||
28 | #include <asm/compiler.h> | 30 | #include <asm/compiler.h> |
29 | #include <asm/cpu.h> | 31 | #include <asm/cpu.h> |
30 | #include <asm/cpu-features.h> | 32 | #include <asm/cpu-features.h> |
@@ -43,10 +45,6 @@ | |||
43 | 45 | ||
44 | #define TICK_SIZE (tick_nsec / 1000) | 46 | #define TICK_SIZE (tick_nsec / 1000) |
45 | 47 | ||
46 | u64 jiffies_64 = INITIAL_JIFFIES; | ||
47 | |||
48 | EXPORT_SYMBOL(jiffies_64); | ||
49 | |||
50 | /* | 48 | /* |
51 | * forward reference | 49 | * forward reference |
52 | */ | 50 | */ |
@@ -76,7 +74,7 @@ int (*rtc_set_mmss)(unsigned long); | |||
76 | static unsigned int sll32_usecs_per_cycle; | 74 | static unsigned int sll32_usecs_per_cycle; |
77 | 75 | ||
78 | /* how many counter cycles in a jiffy */ | 76 | /* how many counter cycles in a jiffy */ |
79 | static unsigned long cycles_per_jiffy; | 77 | static unsigned long cycles_per_jiffy __read_mostly; |
80 | 78 | ||
81 | /* Cycle counter value at the previous timer interrupt.. */ | 79 | /* Cycle counter value at the previous timer interrupt.. */ |
82 | static unsigned int timerhi, timerlo; | 80 | static unsigned int timerhi, timerlo; |
@@ -98,7 +96,10 @@ static unsigned int null_hpt_read(void) | |||
98 | return 0; | 96 | return 0; |
99 | } | 97 | } |
100 | 98 | ||
101 | static void null_hpt_init(unsigned int count) { /* nothing */ } | 99 | static void null_hpt_init(unsigned int count) |
100 | { | ||
101 | /* nothing */ | ||
102 | } | ||
102 | 103 | ||
103 | 104 | ||
104 | /* | 105 | /* |
@@ -108,8 +109,10 @@ static void c0_timer_ack(void) | |||
108 | { | 109 | { |
109 | unsigned int count; | 110 | unsigned int count; |
110 | 111 | ||
112 | #ifndef CONFIG_SOC_PNX8550 /* pnx8550 resets to zero */ | ||
111 | /* Ack this timer interrupt and set the next one. */ | 113 | /* Ack this timer interrupt and set the next one. */ |
112 | expirelo += cycles_per_jiffy; | 114 | expirelo += cycles_per_jiffy; |
115 | #endif | ||
113 | write_c0_compare(expirelo); | 116 | write_c0_compare(expirelo); |
114 | 117 | ||
115 | /* Check to see if we have missed any timer interrupts. */ | 118 | /* Check to see if we have missed any timer interrupts. */ |
@@ -224,7 +227,6 @@ int do_settimeofday(struct timespec *tv) | |||
224 | set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); | 227 | set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); |
225 | 228 | ||
226 | ntp_clear(); | 229 | ntp_clear(); |
227 | |||
228 | write_sequnlock_irq(&xtime_lock); | 230 | write_sequnlock_irq(&xtime_lock); |
229 | clock_was_set(); | 231 | clock_was_set(); |
230 | return 0; | 232 | return 0; |
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index a53b1ed7b386..6f3ff9690686 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c | |||
@@ -9,7 +9,7 @@ | |||
9 | * Copyright (C) 1999 Silicon Graphics, Inc. | 9 | * Copyright (C) 1999 Silicon Graphics, Inc. |
10 | * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com | 10 | * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com |
11 | * Copyright (C) 2000, 01 MIPS Technologies, Inc. | 11 | * Copyright (C) 2000, 01 MIPS Technologies, Inc. |
12 | * Copyright (C) 2002, 2003, 2004 Maciej W. Rozycki | 12 | * Copyright (C) 2002, 2003, 2004, 2005 Maciej W. Rozycki |
13 | */ | 13 | */ |
14 | #include <linux/config.h> | 14 | #include <linux/config.h> |
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
@@ -20,12 +20,16 @@ | |||
20 | #include <linux/smp_lock.h> | 20 | #include <linux/smp_lock.h> |
21 | #include <linux/spinlock.h> | 21 | #include <linux/spinlock.h> |
22 | #include <linux/kallsyms.h> | 22 | #include <linux/kallsyms.h> |
23 | #include <linux/bootmem.h> | ||
23 | 24 | ||
24 | #include <asm/bootinfo.h> | 25 | #include <asm/bootinfo.h> |
25 | #include <asm/branch.h> | 26 | #include <asm/branch.h> |
26 | #include <asm/break.h> | 27 | #include <asm/break.h> |
27 | #include <asm/cpu.h> | 28 | #include <asm/cpu.h> |
29 | #include <asm/dsp.h> | ||
28 | #include <asm/fpu.h> | 30 | #include <asm/fpu.h> |
31 | #include <asm/mipsregs.h> | ||
32 | #include <asm/mipsmtregs.h> | ||
29 | #include <asm/module.h> | 33 | #include <asm/module.h> |
30 | #include <asm/pgtable.h> | 34 | #include <asm/pgtable.h> |
31 | #include <asm/ptrace.h> | 35 | #include <asm/ptrace.h> |
@@ -54,14 +58,19 @@ extern asmlinkage void handle_tr(void); | |||
54 | extern asmlinkage void handle_fpe(void); | 58 | extern asmlinkage void handle_fpe(void); |
55 | extern asmlinkage void handle_mdmx(void); | 59 | extern asmlinkage void handle_mdmx(void); |
56 | extern asmlinkage void handle_watch(void); | 60 | extern asmlinkage void handle_watch(void); |
61 | extern asmlinkage void handle_mt(void); | ||
62 | extern asmlinkage void handle_dsp(void); | ||
57 | extern asmlinkage void handle_mcheck(void); | 63 | extern asmlinkage void handle_mcheck(void); |
58 | extern asmlinkage void handle_reserved(void); | 64 | extern asmlinkage void handle_reserved(void); |
59 | 65 | ||
60 | extern int fpu_emulator_cop1Handler(int xcptno, struct pt_regs *xcp, | 66 | extern int fpu_emulator_cop1Handler(struct pt_regs *xcp, |
61 | struct mips_fpu_soft_struct *ctx); | 67 | struct mips_fpu_soft_struct *ctx); |
62 | 68 | ||
63 | void (*board_be_init)(void); | 69 | void (*board_be_init)(void); |
64 | int (*board_be_handler)(struct pt_regs *regs, int is_fixup); | 70 | int (*board_be_handler)(struct pt_regs *regs, int is_fixup); |
71 | void (*board_nmi_handler_setup)(void); | ||
72 | void (*board_ejtag_handler_setup)(void); | ||
73 | void (*board_bind_eic_interrupt)(int irq, int regset); | ||
65 | 74 | ||
66 | /* | 75 | /* |
67 | * These constant is for searching for possible module text segments. | 76 | * These constant is for searching for possible module text segments. |
@@ -201,32 +210,47 @@ void show_regs(struct pt_regs *regs) | |||
201 | 210 | ||
202 | printk("Status: %08x ", (uint32_t) regs->cp0_status); | 211 | printk("Status: %08x ", (uint32_t) regs->cp0_status); |
203 | 212 | ||
204 | if (regs->cp0_status & ST0_KX) | 213 | if (current_cpu_data.isa_level == MIPS_CPU_ISA_I) { |
205 | printk("KX "); | 214 | if (regs->cp0_status & ST0_KUO) |
206 | if (regs->cp0_status & ST0_SX) | 215 | printk("KUo "); |
207 | printk("SX "); | 216 | if (regs->cp0_status & ST0_IEO) |
208 | if (regs->cp0_status & ST0_UX) | 217 | printk("IEo "); |
209 | printk("UX "); | 218 | if (regs->cp0_status & ST0_KUP) |
210 | switch (regs->cp0_status & ST0_KSU) { | 219 | printk("KUp "); |
211 | case KSU_USER: | 220 | if (regs->cp0_status & ST0_IEP) |
212 | printk("USER "); | 221 | printk("IEp "); |
213 | break; | 222 | if (regs->cp0_status & ST0_KUC) |
214 | case KSU_SUPERVISOR: | 223 | printk("KUc "); |
215 | printk("SUPERVISOR "); | 224 | if (regs->cp0_status & ST0_IEC) |
216 | break; | 225 | printk("IEc "); |
217 | case KSU_KERNEL: | 226 | } else { |
218 | printk("KERNEL "); | 227 | if (regs->cp0_status & ST0_KX) |
219 | break; | 228 | printk("KX "); |
220 | default: | 229 | if (regs->cp0_status & ST0_SX) |
221 | printk("BAD_MODE "); | 230 | printk("SX "); |
222 | break; | 231 | if (regs->cp0_status & ST0_UX) |
232 | printk("UX "); | ||
233 | switch (regs->cp0_status & ST0_KSU) { | ||
234 | case KSU_USER: | ||
235 | printk("USER "); | ||
236 | break; | ||
237 | case KSU_SUPERVISOR: | ||
238 | printk("SUPERVISOR "); | ||
239 | break; | ||
240 | case KSU_KERNEL: | ||
241 | printk("KERNEL "); | ||
242 | break; | ||
243 | default: | ||
244 | printk("BAD_MODE "); | ||
245 | break; | ||
246 | } | ||
247 | if (regs->cp0_status & ST0_ERL) | ||
248 | printk("ERL "); | ||
249 | if (regs->cp0_status & ST0_EXL) | ||
250 | printk("EXL "); | ||
251 | if (regs->cp0_status & ST0_IE) | ||
252 | printk("IE "); | ||
223 | } | 253 | } |
224 | if (regs->cp0_status & ST0_ERL) | ||
225 | printk("ERL "); | ||
226 | if (regs->cp0_status & ST0_EXL) | ||
227 | printk("EXL "); | ||
228 | if (regs->cp0_status & ST0_IE) | ||
229 | printk("IE "); | ||
230 | printk("\n"); | 254 | printk("\n"); |
231 | 255 | ||
232 | printk("Cause : %08x\n", cause); | 256 | printk("Cause : %08x\n", cause); |
@@ -252,29 +276,18 @@ void show_registers(struct pt_regs *regs) | |||
252 | 276 | ||
253 | static DEFINE_SPINLOCK(die_lock); | 277 | static DEFINE_SPINLOCK(die_lock); |
254 | 278 | ||
255 | NORET_TYPE void __die(const char * str, struct pt_regs * regs, | 279 | NORET_TYPE void ATTRIB_NORET die(const char * str, struct pt_regs * regs) |
256 | const char * file, const char * func, unsigned long line) | ||
257 | { | 280 | { |
258 | static int die_counter; | 281 | static int die_counter; |
259 | 282 | ||
260 | console_verbose(); | 283 | console_verbose(); |
261 | spin_lock_irq(&die_lock); | 284 | spin_lock_irq(&die_lock); |
262 | printk("%s", str); | 285 | printk("%s[#%d]:\n", str, ++die_counter); |
263 | if (file && func) | ||
264 | printk(" in %s:%s, line %ld", file, func, line); | ||
265 | printk("[#%d]:\n", ++die_counter); | ||
266 | show_registers(regs); | 286 | show_registers(regs); |
267 | spin_unlock_irq(&die_lock); | 287 | spin_unlock_irq(&die_lock); |
268 | do_exit(SIGSEGV); | 288 | do_exit(SIGSEGV); |
269 | } | 289 | } |
270 | 290 | ||
271 | void __die_if_kernel(const char * str, struct pt_regs * regs, | ||
272 | const char * file, const char * func, unsigned long line) | ||
273 | { | ||
274 | if (!user_mode(regs)) | ||
275 | __die(str, regs, file, func, line); | ||
276 | } | ||
277 | |||
278 | extern const struct exception_table_entry __start___dbe_table[]; | 291 | extern const struct exception_table_entry __start___dbe_table[]; |
279 | extern const struct exception_table_entry __stop___dbe_table[]; | 292 | extern const struct exception_table_entry __stop___dbe_table[]; |
280 | 293 | ||
@@ -339,9 +352,9 @@ asmlinkage void do_be(struct pt_regs *regs) | |||
339 | 352 | ||
340 | static inline int get_insn_opcode(struct pt_regs *regs, unsigned int *opcode) | 353 | static inline int get_insn_opcode(struct pt_regs *regs, unsigned int *opcode) |
341 | { | 354 | { |
342 | unsigned int *epc; | 355 | unsigned int __user *epc; |
343 | 356 | ||
344 | epc = (unsigned int *) regs->cp0_epc + | 357 | epc = (unsigned int __user *) regs->cp0_epc + |
345 | ((regs->cp0_cause & CAUSEF_BD) != 0); | 358 | ((regs->cp0_cause & CAUSEF_BD) != 0); |
346 | if (!get_user(*opcode, epc)) | 359 | if (!get_user(*opcode, epc)) |
347 | return 0; | 360 | return 0; |
@@ -360,6 +373,10 @@ static inline int get_insn_opcode(struct pt_regs *regs, unsigned int *opcode) | |||
360 | #define OFFSET 0x0000ffff | 373 | #define OFFSET 0x0000ffff |
361 | #define LL 0xc0000000 | 374 | #define LL 0xc0000000 |
362 | #define SC 0xe0000000 | 375 | #define SC 0xe0000000 |
376 | #define SPEC3 0x7c000000 | ||
377 | #define RD 0x0000f800 | ||
378 | #define FUNC 0x0000003f | ||
379 | #define RDHWR 0x0000003b | ||
363 | 380 | ||
364 | /* | 381 | /* |
365 | * The ll_bit is cleared by r*_switch.S | 382 | * The ll_bit is cleared by r*_switch.S |
@@ -371,7 +388,7 @@ static struct task_struct *ll_task = NULL; | |||
371 | 388 | ||
372 | static inline void simulate_ll(struct pt_regs *regs, unsigned int opcode) | 389 | static inline void simulate_ll(struct pt_regs *regs, unsigned int opcode) |
373 | { | 390 | { |
374 | unsigned long value, *vaddr; | 391 | unsigned long value, __user *vaddr; |
375 | long offset; | 392 | long offset; |
376 | int signal = 0; | 393 | int signal = 0; |
377 | 394 | ||
@@ -385,7 +402,8 @@ static inline void simulate_ll(struct pt_regs *regs, unsigned int opcode) | |||
385 | offset <<= 16; | 402 | offset <<= 16; |
386 | offset >>= 16; | 403 | offset >>= 16; |
387 | 404 | ||
388 | vaddr = (unsigned long *)((long)(regs->regs[(opcode & BASE) >> 21]) + offset); | 405 | vaddr = (unsigned long __user *) |
406 | ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset); | ||
389 | 407 | ||
390 | if ((unsigned long)vaddr & 3) { | 408 | if ((unsigned long)vaddr & 3) { |
391 | signal = SIGBUS; | 409 | signal = SIGBUS; |
@@ -407,9 +425,10 @@ static inline void simulate_ll(struct pt_regs *regs, unsigned int opcode) | |||
407 | 425 | ||
408 | preempt_enable(); | 426 | preempt_enable(); |
409 | 427 | ||
428 | compute_return_epc(regs); | ||
429 | |||
410 | regs->regs[(opcode & RT) >> 16] = value; | 430 | regs->regs[(opcode & RT) >> 16] = value; |
411 | 431 | ||
412 | compute_return_epc(regs); | ||
413 | return; | 432 | return; |
414 | 433 | ||
415 | sig: | 434 | sig: |
@@ -418,7 +437,8 @@ sig: | |||
418 | 437 | ||
419 | static inline void simulate_sc(struct pt_regs *regs, unsigned int opcode) | 438 | static inline void simulate_sc(struct pt_regs *regs, unsigned int opcode) |
420 | { | 439 | { |
421 | unsigned long *vaddr, reg; | 440 | unsigned long __user *vaddr; |
441 | unsigned long reg; | ||
422 | long offset; | 442 | long offset; |
423 | int signal = 0; | 443 | int signal = 0; |
424 | 444 | ||
@@ -432,7 +452,8 @@ static inline void simulate_sc(struct pt_regs *regs, unsigned int opcode) | |||
432 | offset <<= 16; | 452 | offset <<= 16; |
433 | offset >>= 16; | 453 | offset >>= 16; |
434 | 454 | ||
435 | vaddr = (unsigned long *)((long)(regs->regs[(opcode & BASE) >> 21]) + offset); | 455 | vaddr = (unsigned long __user *) |
456 | ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset); | ||
436 | reg = (opcode & RT) >> 16; | 457 | reg = (opcode & RT) >> 16; |
437 | 458 | ||
438 | if ((unsigned long)vaddr & 3) { | 459 | if ((unsigned long)vaddr & 3) { |
@@ -443,9 +464,9 @@ static inline void simulate_sc(struct pt_regs *regs, unsigned int opcode) | |||
443 | preempt_disable(); | 464 | preempt_disable(); |
444 | 465 | ||
445 | if (ll_bit == 0 || ll_task != current) { | 466 | if (ll_bit == 0 || ll_task != current) { |
467 | compute_return_epc(regs); | ||
446 | regs->regs[reg] = 0; | 468 | regs->regs[reg] = 0; |
447 | preempt_enable(); | 469 | preempt_enable(); |
448 | compute_return_epc(regs); | ||
449 | return; | 470 | return; |
450 | } | 471 | } |
451 | 472 | ||
@@ -456,9 +477,9 @@ static inline void simulate_sc(struct pt_regs *regs, unsigned int opcode) | |||
456 | goto sig; | 477 | goto sig; |
457 | } | 478 | } |
458 | 479 | ||
480 | compute_return_epc(regs); | ||
459 | regs->regs[reg] = 1; | 481 | regs->regs[reg] = 1; |
460 | 482 | ||
461 | compute_return_epc(regs); | ||
462 | return; | 483 | return; |
463 | 484 | ||
464 | sig: | 485 | sig: |
@@ -491,6 +512,37 @@ static inline int simulate_llsc(struct pt_regs *regs) | |||
491 | return -EFAULT; /* Strange things going on ... */ | 512 | return -EFAULT; /* Strange things going on ... */ |
492 | } | 513 | } |
493 | 514 | ||
515 | /* | ||
516 | * Simulate trapping 'rdhwr' instructions to provide user accessible | ||
517 | * registers not implemented in hardware. The only current use of this | ||
518 | * is the thread area pointer. | ||
519 | */ | ||
520 | static inline int simulate_rdhwr(struct pt_regs *regs) | ||
521 | { | ||
522 | struct thread_info *ti = current->thread_info; | ||
523 | unsigned int opcode; | ||
524 | |||
525 | if (unlikely(get_insn_opcode(regs, &opcode))) | ||
526 | return -EFAULT; | ||
527 | |||
528 | if (unlikely(compute_return_epc(regs))) | ||
529 | return -EFAULT; | ||
530 | |||
531 | if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) { | ||
532 | int rd = (opcode & RD) >> 11; | ||
533 | int rt = (opcode & RT) >> 16; | ||
534 | switch (rd) { | ||
535 | case 29: | ||
536 | regs->regs[rt] = ti->tp_value; | ||
537 | break; | ||
538 | default: | ||
539 | return -EFAULT; | ||
540 | } | ||
541 | } | ||
542 | |||
543 | return 0; | ||
544 | } | ||
545 | |||
494 | asmlinkage void do_ov(struct pt_regs *regs) | 546 | asmlinkage void do_ov(struct pt_regs *regs) |
495 | { | 547 | { |
496 | siginfo_t info; | 548 | siginfo_t info; |
@@ -498,7 +550,7 @@ asmlinkage void do_ov(struct pt_regs *regs) | |||
498 | info.si_code = FPE_INTOVF; | 550 | info.si_code = FPE_INTOVF; |
499 | info.si_signo = SIGFPE; | 551 | info.si_signo = SIGFPE; |
500 | info.si_errno = 0; | 552 | info.si_errno = 0; |
501 | info.si_addr = (void *)regs->cp0_epc; | 553 | info.si_addr = (void __user *) regs->cp0_epc; |
502 | force_sig_info(SIGFPE, &info, current); | 554 | force_sig_info(SIGFPE, &info, current); |
503 | } | 555 | } |
504 | 556 | ||
@@ -512,6 +564,14 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) | |||
512 | 564 | ||
513 | preempt_disable(); | 565 | preempt_disable(); |
514 | 566 | ||
567 | #ifdef CONFIG_PREEMPT | ||
568 | if (!is_fpu_owner()) { | ||
569 | /* We might lose fpu before disabling preempt... */ | ||
570 | own_fpu(); | ||
571 | BUG_ON(!used_math()); | ||
572 | restore_fp(current); | ||
573 | } | ||
574 | #endif | ||
515 | /* | 575 | /* |
516 | * Unimplemented operation exception. If we've got the full | 576 | * Unimplemented operation exception. If we've got the full |
517 | * software emulator on-board, let's use it... | 577 | * software emulator on-board, let's use it... |
@@ -523,11 +583,18 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) | |||
523 | * a bit extreme for what should be an infrequent event. | 583 | * a bit extreme for what should be an infrequent event. |
524 | */ | 584 | */ |
525 | save_fp(current); | 585 | save_fp(current); |
586 | /* Ensure 'resume' not overwrite saved fp context again. */ | ||
587 | lose_fpu(); | ||
588 | |||
589 | preempt_enable(); | ||
526 | 590 | ||
527 | /* Run the emulator */ | 591 | /* Run the emulator */ |
528 | sig = fpu_emulator_cop1Handler (0, regs, | 592 | sig = fpu_emulator_cop1Handler (regs, |
529 | ¤t->thread.fpu.soft); | 593 | ¤t->thread.fpu.soft); |
530 | 594 | ||
595 | preempt_disable(); | ||
596 | |||
597 | own_fpu(); /* Using the FPU again. */ | ||
531 | /* | 598 | /* |
532 | * We can't allow the emulated instruction to leave any of | 599 | * We can't allow the emulated instruction to leave any of |
533 | * the cause bit set in $fcr31. | 600 | * the cause bit set in $fcr31. |
@@ -584,7 +651,7 @@ asmlinkage void do_bp(struct pt_regs *regs) | |||
584 | info.si_code = FPE_INTOVF; | 651 | info.si_code = FPE_INTOVF; |
585 | info.si_signo = SIGFPE; | 652 | info.si_signo = SIGFPE; |
586 | info.si_errno = 0; | 653 | info.si_errno = 0; |
587 | info.si_addr = (void *)regs->cp0_epc; | 654 | info.si_addr = (void __user *) regs->cp0_epc; |
588 | force_sig_info(SIGFPE, &info, current); | 655 | force_sig_info(SIGFPE, &info, current); |
589 | break; | 656 | break; |
590 | default: | 657 | default: |
@@ -621,7 +688,7 @@ asmlinkage void do_tr(struct pt_regs *regs) | |||
621 | info.si_code = FPE_INTOVF; | 688 | info.si_code = FPE_INTOVF; |
622 | info.si_signo = SIGFPE; | 689 | info.si_signo = SIGFPE; |
623 | info.si_errno = 0; | 690 | info.si_errno = 0; |
624 | info.si_addr = (void *)regs->cp0_epc; | 691 | info.si_addr = (void __user *) regs->cp0_epc; |
625 | force_sig_info(SIGFPE, &info, current); | 692 | force_sig_info(SIGFPE, &info, current); |
626 | break; | 693 | break; |
627 | default: | 694 | default: |
@@ -637,6 +704,9 @@ asmlinkage void do_ri(struct pt_regs *regs) | |||
637 | if (!simulate_llsc(regs)) | 704 | if (!simulate_llsc(regs)) |
638 | return; | 705 | return; |
639 | 706 | ||
707 | if (!simulate_rdhwr(regs)) | ||
708 | return; | ||
709 | |||
640 | force_sig(SIGILL, current); | 710 | force_sig(SIGILL, current); |
641 | } | 711 | } |
642 | 712 | ||
@@ -650,11 +720,13 @@ asmlinkage void do_cpu(struct pt_regs *regs) | |||
650 | 720 | ||
651 | switch (cpid) { | 721 | switch (cpid) { |
652 | case 0: | 722 | case 0: |
653 | if (cpu_has_llsc) | 723 | if (!cpu_has_llsc) |
654 | break; | 724 | if (!simulate_llsc(regs)) |
725 | return; | ||
655 | 726 | ||
656 | if (!simulate_llsc(regs)) | 727 | if (!simulate_rdhwr(regs)) |
657 | return; | 728 | return; |
729 | |||
658 | break; | 730 | break; |
659 | 731 | ||
660 | case 1: | 732 | case 1: |
@@ -668,15 +740,15 @@ asmlinkage void do_cpu(struct pt_regs *regs) | |||
668 | set_used_math(); | 740 | set_used_math(); |
669 | } | 741 | } |
670 | 742 | ||
743 | preempt_enable(); | ||
744 | |||
671 | if (!cpu_has_fpu) { | 745 | if (!cpu_has_fpu) { |
672 | int sig = fpu_emulator_cop1Handler(0, regs, | 746 | int sig = fpu_emulator_cop1Handler(regs, |
673 | ¤t->thread.fpu.soft); | 747 | ¤t->thread.fpu.soft); |
674 | if (sig) | 748 | if (sig) |
675 | force_sig(sig, current); | 749 | force_sig(sig, current); |
676 | } | 750 | } |
677 | 751 | ||
678 | preempt_enable(); | ||
679 | |||
680 | return; | 752 | return; |
681 | 753 | ||
682 | case 2: | 754 | case 2: |
@@ -716,6 +788,22 @@ asmlinkage void do_mcheck(struct pt_regs *regs) | |||
716 | (regs->cp0_status & ST0_TS) ? "" : "not "); | 788 | (regs->cp0_status & ST0_TS) ? "" : "not "); |
717 | } | 789 | } |
718 | 790 | ||
791 | asmlinkage void do_mt(struct pt_regs *regs) | ||
792 | { | ||
793 | die_if_kernel("MIPS MT Thread exception in kernel", regs); | ||
794 | |||
795 | force_sig(SIGILL, current); | ||
796 | } | ||
797 | |||
798 | |||
799 | asmlinkage void do_dsp(struct pt_regs *regs) | ||
800 | { | ||
801 | if (cpu_has_dsp) | ||
802 | panic("Unexpected DSP exception\n"); | ||
803 | |||
804 | force_sig(SIGILL, current); | ||
805 | } | ||
806 | |||
719 | asmlinkage void do_reserved(struct pt_regs *regs) | 807 | asmlinkage void do_reserved(struct pt_regs *regs) |
720 | { | 808 | { |
721 | /* | 809 | /* |
@@ -728,6 +816,12 @@ asmlinkage void do_reserved(struct pt_regs *regs) | |||
728 | (regs->cp0_cause & 0x7f) >> 2); | 816 | (regs->cp0_cause & 0x7f) >> 2); |
729 | } | 817 | } |
730 | 818 | ||
819 | asmlinkage void do_default_vi(struct pt_regs *regs) | ||
820 | { | ||
821 | show_regs(regs); | ||
822 | panic("Caught unexpected vectored interrupt."); | ||
823 | } | ||
824 | |||
731 | /* | 825 | /* |
732 | * Some MIPS CPUs can enable/disable for cache parity detection, but do | 826 | * Some MIPS CPUs can enable/disable for cache parity detection, but do |
733 | * it different ways. | 827 | * it different ways. |
@@ -736,16 +830,12 @@ static inline void parity_protection_init(void) | |||
736 | { | 830 | { |
737 | switch (current_cpu_data.cputype) { | 831 | switch (current_cpu_data.cputype) { |
738 | case CPU_24K: | 832 | case CPU_24K: |
739 | /* 24K cache parity not currently implemented in FPGA */ | ||
740 | printk(KERN_INFO "Disable cache parity protection for " | ||
741 | "MIPS 24K CPU.\n"); | ||
742 | write_c0_ecc(read_c0_ecc() & ~0x80000000); | ||
743 | break; | ||
744 | case CPU_5KC: | 833 | case CPU_5KC: |
745 | /* Set the PE bit (bit 31) in the c0_ecc register. */ | 834 | write_c0_ecc(0x80000000); |
746 | printk(KERN_INFO "Enable cache parity protection for " | 835 | back_to_back_c0_hazard(); |
747 | "MIPS 5KC/24K CPUs.\n"); | 836 | /* Set the PE bit (bit 31) in the c0_errctl register. */ |
748 | write_c0_ecc(read_c0_ecc() | 0x80000000); | 837 | printk(KERN_INFO "Cache parity protection %sabled\n", |
838 | (read_c0_ecc() & 0x80000000) ? "en" : "dis"); | ||
749 | break; | 839 | break; |
750 | case CPU_20KC: | 840 | case CPU_20KC: |
751 | case CPU_25KF: | 841 | case CPU_25KF: |
@@ -783,7 +873,7 @@ asmlinkage void cache_parity_error(void) | |||
783 | reg_val & (1<<22) ? "E0 " : ""); | 873 | reg_val & (1<<22) ? "E0 " : ""); |
784 | printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1)); | 874 | printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1)); |
785 | 875 | ||
786 | #if defined(CONFIG_CPU_MIPS32) || defined (CONFIG_CPU_MIPS64) | 876 | #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) |
787 | if (reg_val & (1<<22)) | 877 | if (reg_val & (1<<22)) |
788 | printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0()); | 878 | printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0()); |
789 | 879 | ||
@@ -840,7 +930,11 @@ void nmi_exception_handler(struct pt_regs *regs) | |||
840 | while(1) ; | 930 | while(1) ; |
841 | } | 931 | } |
842 | 932 | ||
933 | #define VECTORSPACING 0x100 /* for EI/VI mode */ | ||
934 | |||
935 | unsigned long ebase; | ||
843 | unsigned long exception_handlers[32]; | 936 | unsigned long exception_handlers[32]; |
937 | unsigned long vi_handlers[64]; | ||
844 | 938 | ||
845 | /* | 939 | /* |
846 | * As a side effect of the way this is implemented we're limited | 940 | * As a side effect of the way this is implemented we're limited |
@@ -854,13 +948,156 @@ void *set_except_vector(int n, void *addr) | |||
854 | 948 | ||
855 | exception_handlers[n] = handler; | 949 | exception_handlers[n] = handler; |
856 | if (n == 0 && cpu_has_divec) { | 950 | if (n == 0 && cpu_has_divec) { |
857 | *(volatile u32 *)(CAC_BASE + 0x200) = 0x08000000 | | 951 | *(volatile u32 *)(ebase + 0x200) = 0x08000000 | |
858 | (0x03ffffff & (handler >> 2)); | 952 | (0x03ffffff & (handler >> 2)); |
859 | flush_icache_range(CAC_BASE + 0x200, CAC_BASE + 0x204); | 953 | flush_icache_range(ebase + 0x200, ebase + 0x204); |
860 | } | 954 | } |
861 | return (void *)old_handler; | 955 | return (void *)old_handler; |
862 | } | 956 | } |
863 | 957 | ||
958 | #ifdef CONFIG_CPU_MIPSR2 | ||
959 | /* | ||
960 | * Shadow register allocation | ||
961 | * FIXME: SMP... | ||
962 | */ | ||
963 | |||
964 | /* MIPSR2 shadow register sets */ | ||
965 | struct shadow_registers { | ||
966 | spinlock_t sr_lock; /* */ | ||
967 | int sr_supported; /* Number of shadow register sets supported */ | ||
968 | int sr_allocated; /* Bitmap of allocated shadow registers */ | ||
969 | } shadow_registers; | ||
970 | |||
971 | void mips_srs_init(void) | ||
972 | { | ||
973 | #ifdef CONFIG_CPU_MIPSR2_SRS | ||
974 | shadow_registers.sr_supported = ((read_c0_srsctl() >> 26) & 0x0f) + 1; | ||
975 | printk ("%d MIPSR2 register sets available\n", shadow_registers.sr_supported); | ||
976 | #else | ||
977 | shadow_registers.sr_supported = 1; | ||
978 | #endif | ||
979 | shadow_registers.sr_allocated = 1; /* Set 0 used by kernel */ | ||
980 | spin_lock_init(&shadow_registers.sr_lock); | ||
981 | } | ||
982 | |||
983 | int mips_srs_max(void) | ||
984 | { | ||
985 | return shadow_registers.sr_supported; | ||
986 | } | ||
987 | |||
988 | int mips_srs_alloc (void) | ||
989 | { | ||
990 | struct shadow_registers *sr = &shadow_registers; | ||
991 | unsigned long flags; | ||
992 | int set; | ||
993 | |||
994 | spin_lock_irqsave(&sr->sr_lock, flags); | ||
995 | |||
996 | for (set = 0; set < sr->sr_supported; set++) { | ||
997 | if ((sr->sr_allocated & (1 << set)) == 0) { | ||
998 | sr->sr_allocated |= 1 << set; | ||
999 | spin_unlock_irqrestore(&sr->sr_lock, flags); | ||
1000 | return set; | ||
1001 | } | ||
1002 | } | ||
1003 | |||
1004 | /* None available */ | ||
1005 | spin_unlock_irqrestore(&sr->sr_lock, flags); | ||
1006 | return -1; | ||
1007 | } | ||
1008 | |||
1009 | void mips_srs_free (int set) | ||
1010 | { | ||
1011 | struct shadow_registers *sr = &shadow_registers; | ||
1012 | unsigned long flags; | ||
1013 | |||
1014 | spin_lock_irqsave(&sr->sr_lock, flags); | ||
1015 | sr->sr_allocated &= ~(1 << set); | ||
1016 | spin_unlock_irqrestore(&sr->sr_lock, flags); | ||
1017 | } | ||
1018 | |||
1019 | void *set_vi_srs_handler (int n, void *addr, int srs) | ||
1020 | { | ||
1021 | unsigned long handler; | ||
1022 | unsigned long old_handler = vi_handlers[n]; | ||
1023 | u32 *w; | ||
1024 | unsigned char *b; | ||
1025 | |||
1026 | if (!cpu_has_veic && !cpu_has_vint) | ||
1027 | BUG(); | ||
1028 | |||
1029 | if (addr == NULL) { | ||
1030 | handler = (unsigned long) do_default_vi; | ||
1031 | srs = 0; | ||
1032 | } | ||
1033 | else | ||
1034 | handler = (unsigned long) addr; | ||
1035 | vi_handlers[n] = (unsigned long) addr; | ||
1036 | |||
1037 | b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING); | ||
1038 | |||
1039 | if (srs >= mips_srs_max()) | ||
1040 | panic("Shadow register set %d not supported", srs); | ||
1041 | |||
1042 | if (cpu_has_veic) { | ||
1043 | if (board_bind_eic_interrupt) | ||
1044 | board_bind_eic_interrupt (n, srs); | ||
1045 | } | ||
1046 | else if (cpu_has_vint) { | ||
1047 | /* SRSMap is only defined if shadow sets are implemented */ | ||
1048 | if (mips_srs_max() > 1) | ||
1049 | change_c0_srsmap (0xf << n*4, srs << n*4); | ||
1050 | } | ||
1051 | |||
1052 | if (srs == 0) { | ||
1053 | /* | ||
1054 | * If no shadow set is selected then use the default handler | ||
1055 | * that does normal register saving and a standard interrupt exit | ||
1056 | */ | ||
1057 | |||
1058 | extern char except_vec_vi, except_vec_vi_lui; | ||
1059 | extern char except_vec_vi_ori, except_vec_vi_end; | ||
1060 | const int handler_len = &except_vec_vi_end - &except_vec_vi; | ||
1061 | const int lui_offset = &except_vec_vi_lui - &except_vec_vi; | ||
1062 | const int ori_offset = &except_vec_vi_ori - &except_vec_vi; | ||
1063 | |||
1064 | if (handler_len > VECTORSPACING) { | ||
1065 | /* | ||
1066 | * Sigh... panicing won't help as the console | ||
1067 | * is probably not configured :( | ||
1068 | */ | ||
1069 | panic ("VECTORSPACING too small"); | ||
1070 | } | ||
1071 | |||
1072 | memcpy (b, &except_vec_vi, handler_len); | ||
1073 | w = (u32 *)(b + lui_offset); | ||
1074 | *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff); | ||
1075 | w = (u32 *)(b + ori_offset); | ||
1076 | *w = (*w & 0xffff0000) | ((u32)handler & 0xffff); | ||
1077 | flush_icache_range((unsigned long)b, (unsigned long)(b+handler_len)); | ||
1078 | } | ||
1079 | else { | ||
1080 | /* | ||
1081 | * In other cases jump directly to the interrupt handler | ||
1082 | * | ||
1083 | * It is the handlers responsibility to save registers if required | ||
1084 | * (eg hi/lo) and return from the exception using "eret" | ||
1085 | */ | ||
1086 | w = (u32 *)b; | ||
1087 | *w++ = 0x08000000 | (((u32)handler >> 2) & 0x03fffff); /* j handler */ | ||
1088 | *w = 0; | ||
1089 | flush_icache_range((unsigned long)b, (unsigned long)(b+8)); | ||
1090 | } | ||
1091 | |||
1092 | return (void *)old_handler; | ||
1093 | } | ||
1094 | |||
1095 | void *set_vi_handler (int n, void *addr) | ||
1096 | { | ||
1097 | return set_vi_srs_handler (n, addr, 0); | ||
1098 | } | ||
1099 | #endif | ||
1100 | |||
864 | /* | 1101 | /* |
865 | * This is used by native signal handling | 1102 | * This is used by native signal handling |
866 | */ | 1103 | */ |
@@ -912,6 +1149,7 @@ static inline void signal32_init(void) | |||
912 | 1149 | ||
913 | extern void cpu_cache_init(void); | 1150 | extern void cpu_cache_init(void); |
914 | extern void tlb_init(void); | 1151 | extern void tlb_init(void); |
1152 | extern void flush_tlb_handlers(void); | ||
915 | 1153 | ||
916 | void __init per_cpu_trap_init(void) | 1154 | void __init per_cpu_trap_init(void) |
917 | { | 1155 | { |
@@ -929,15 +1167,32 @@ void __init per_cpu_trap_init(void) | |||
929 | #endif | 1167 | #endif |
930 | if (current_cpu_data.isa_level == MIPS_CPU_ISA_IV) | 1168 | if (current_cpu_data.isa_level == MIPS_CPU_ISA_IV) |
931 | status_set |= ST0_XX; | 1169 | status_set |= ST0_XX; |
932 | change_c0_status(ST0_CU|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX, | 1170 | change_c0_status(ST0_CU|ST0_MX|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX, |
933 | status_set); | 1171 | status_set); |
934 | 1172 | ||
1173 | if (cpu_has_dsp) | ||
1174 | set_c0_status(ST0_MX); | ||
1175 | |||
1176 | #ifdef CONFIG_CPU_MIPSR2 | ||
1177 | write_c0_hwrena (0x0000000f); /* Allow rdhwr to all registers */ | ||
1178 | #endif | ||
1179 | |||
935 | /* | 1180 | /* |
936 | * Some MIPS CPUs have a dedicated interrupt vector which reduces the | 1181 | * Interrupt handling. |
937 | * interrupt processing overhead. Use it where available. | ||
938 | */ | 1182 | */ |
939 | if (cpu_has_divec) | 1183 | if (cpu_has_veic || cpu_has_vint) { |
940 | set_c0_cause(CAUSEF_IV); | 1184 | write_c0_ebase (ebase); |
1185 | /* Setting vector spacing enables EI/VI mode */ | ||
1186 | change_c0_intctl (0x3e0, VECTORSPACING); | ||
1187 | } | ||
1188 | if (cpu_has_divec) { | ||
1189 | if (cpu_has_mipsmt) { | ||
1190 | unsigned int vpflags = dvpe(); | ||
1191 | set_c0_cause(CAUSEF_IV); | ||
1192 | evpe(vpflags); | ||
1193 | } else | ||
1194 | set_c0_cause(CAUSEF_IV); | ||
1195 | } | ||
941 | 1196 | ||
942 | cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; | 1197 | cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; |
943 | TLBMISS_HANDLER_SETUP(); | 1198 | TLBMISS_HANDLER_SETUP(); |
@@ -951,13 +1206,41 @@ void __init per_cpu_trap_init(void) | |||
951 | tlb_init(); | 1206 | tlb_init(); |
952 | } | 1207 | } |
953 | 1208 | ||
1209 | /* Install CPU exception handler */ | ||
1210 | void __init set_handler (unsigned long offset, void *addr, unsigned long size) | ||
1211 | { | ||
1212 | memcpy((void *)(ebase + offset), addr, size); | ||
1213 | flush_icache_range(ebase + offset, ebase + offset + size); | ||
1214 | } | ||
1215 | |||
1216 | /* Install uncached CPU exception handler */ | ||
1217 | void __init set_uncached_handler (unsigned long offset, void *addr, unsigned long size) | ||
1218 | { | ||
1219 | #ifdef CONFIG_32BIT | ||
1220 | unsigned long uncached_ebase = KSEG1ADDR(ebase); | ||
1221 | #endif | ||
1222 | #ifdef CONFIG_64BIT | ||
1223 | unsigned long uncached_ebase = TO_UNCAC(ebase); | ||
1224 | #endif | ||
1225 | |||
1226 | memcpy((void *)(uncached_ebase + offset), addr, size); | ||
1227 | } | ||
1228 | |||
954 | void __init trap_init(void) | 1229 | void __init trap_init(void) |
955 | { | 1230 | { |
956 | extern char except_vec3_generic, except_vec3_r4000; | 1231 | extern char except_vec3_generic, except_vec3_r4000; |
957 | extern char except_vec_ejtag_debug; | ||
958 | extern char except_vec4; | 1232 | extern char except_vec4; |
959 | unsigned long i; | 1233 | unsigned long i; |
960 | 1234 | ||
1235 | if (cpu_has_veic || cpu_has_vint) | ||
1236 | ebase = (unsigned long) alloc_bootmem_low_pages (0x200 + VECTORSPACING*64); | ||
1237 | else | ||
1238 | ebase = CAC_BASE; | ||
1239 | |||
1240 | #ifdef CONFIG_CPU_MIPSR2 | ||
1241 | mips_srs_init(); | ||
1242 | #endif | ||
1243 | |||
961 | per_cpu_trap_init(); | 1244 | per_cpu_trap_init(); |
962 | 1245 | ||
963 | /* | 1246 | /* |
@@ -965,7 +1248,7 @@ void __init trap_init(void) | |||
965 | * This will be overriden later as suitable for a particular | 1248 | * This will be overriden later as suitable for a particular |
966 | * configuration. | 1249 | * configuration. |
967 | */ | 1250 | */ |
968 | memcpy((void *)(CAC_BASE + 0x180), &except_vec3_generic, 0x80); | 1251 | set_handler(0x180, &except_vec3_generic, 0x80); |
969 | 1252 | ||
970 | /* | 1253 | /* |
971 | * Setup default vectors | 1254 | * Setup default vectors |
@@ -977,8 +1260,8 @@ void __init trap_init(void) | |||
977 | * Copy the EJTAG debug exception vector handler code to it's final | 1260 | * Copy the EJTAG debug exception vector handler code to it's final |
978 | * destination. | 1261 | * destination. |
979 | */ | 1262 | */ |
980 | if (cpu_has_ejtag) | 1263 | if (cpu_has_ejtag && board_ejtag_handler_setup) |
981 | memcpy((void *)(CAC_BASE + 0x300), &except_vec_ejtag_debug, 0x80); | 1264 | board_ejtag_handler_setup (); |
982 | 1265 | ||
983 | /* | 1266 | /* |
984 | * Only some CPUs have the watch exceptions. | 1267 | * Only some CPUs have the watch exceptions. |
@@ -987,11 +1270,15 @@ void __init trap_init(void) | |||
987 | set_except_vector(23, handle_watch); | 1270 | set_except_vector(23, handle_watch); |
988 | 1271 | ||
989 | /* | 1272 | /* |
990 | * Some MIPS CPUs have a dedicated interrupt vector which reduces the | 1273 | * Initialise interrupt handlers |
991 | * interrupt processing overhead. Use it where available. | ||
992 | */ | 1274 | */ |
993 | if (cpu_has_divec) | 1275 | if (cpu_has_veic || cpu_has_vint) { |
994 | memcpy((void *)(CAC_BASE + 0x200), &except_vec4, 0x8); | 1276 | int nvec = cpu_has_veic ? 64 : 8; |
1277 | for (i = 0; i < nvec; i++) | ||
1278 | set_vi_handler (i, NULL); | ||
1279 | } | ||
1280 | else if (cpu_has_divec) | ||
1281 | set_handler(0x200, &except_vec4, 0x8); | ||
995 | 1282 | ||
996 | /* | 1283 | /* |
997 | * Some CPUs can enable/disable for cache parity detection, but does | 1284 | * Some CPUs can enable/disable for cache parity detection, but does |
@@ -1023,21 +1310,6 @@ void __init trap_init(void) | |||
1023 | set_except_vector(11, handle_cpu); | 1310 | set_except_vector(11, handle_cpu); |
1024 | set_except_vector(12, handle_ov); | 1311 | set_except_vector(12, handle_ov); |
1025 | set_except_vector(13, handle_tr); | 1312 | set_except_vector(13, handle_tr); |
1026 | set_except_vector(22, handle_mdmx); | ||
1027 | |||
1028 | if (cpu_has_fpu && !cpu_has_nofpuex) | ||
1029 | set_except_vector(15, handle_fpe); | ||
1030 | |||
1031 | if (cpu_has_mcheck) | ||
1032 | set_except_vector(24, handle_mcheck); | ||
1033 | |||
1034 | if (cpu_has_vce) | ||
1035 | /* Special exception: R4[04]00 uses also the divec space. */ | ||
1036 | memcpy((void *)(CAC_BASE + 0x180), &except_vec3_r4000, 0x100); | ||
1037 | else if (cpu_has_4kex) | ||
1038 | memcpy((void *)(CAC_BASE + 0x180), &except_vec3_generic, 0x80); | ||
1039 | else | ||
1040 | memcpy((void *)(CAC_BASE + 0x080), &except_vec3_generic, 0x80); | ||
1041 | 1313 | ||
1042 | if (current_cpu_data.cputype == CPU_R6000 || | 1314 | if (current_cpu_data.cputype == CPU_R6000 || |
1043 | current_cpu_data.cputype == CPU_R6000A) { | 1315 | current_cpu_data.cputype == CPU_R6000A) { |
@@ -1053,10 +1325,37 @@ void __init trap_init(void) | |||
1053 | //set_except_vector(15, handle_ndc); | 1325 | //set_except_vector(15, handle_ndc); |
1054 | } | 1326 | } |
1055 | 1327 | ||
1328 | |||
1329 | if (board_nmi_handler_setup) | ||
1330 | board_nmi_handler_setup(); | ||
1331 | |||
1332 | if (cpu_has_fpu && !cpu_has_nofpuex) | ||
1333 | set_except_vector(15, handle_fpe); | ||
1334 | |||
1335 | set_except_vector(22, handle_mdmx); | ||
1336 | |||
1337 | if (cpu_has_mcheck) | ||
1338 | set_except_vector(24, handle_mcheck); | ||
1339 | |||
1340 | if (cpu_has_mipsmt) | ||
1341 | set_except_vector(25, handle_mt); | ||
1342 | |||
1343 | if (cpu_has_dsp) | ||
1344 | set_except_vector(26, handle_dsp); | ||
1345 | |||
1346 | if (cpu_has_vce) | ||
1347 | /* Special exception: R4[04]00 uses also the divec space. */ | ||
1348 | memcpy((void *)(CAC_BASE + 0x180), &except_vec3_r4000, 0x100); | ||
1349 | else if (cpu_has_4kex) | ||
1350 | memcpy((void *)(CAC_BASE + 0x180), &except_vec3_generic, 0x80); | ||
1351 | else | ||
1352 | memcpy((void *)(CAC_BASE + 0x080), &except_vec3_generic, 0x80); | ||
1353 | |||
1056 | signal_init(); | 1354 | signal_init(); |
1057 | #ifdef CONFIG_MIPS32_COMPAT | 1355 | #ifdef CONFIG_MIPS32_COMPAT |
1058 | signal32_init(); | 1356 | signal32_init(); |
1059 | #endif | 1357 | #endif |
1060 | 1358 | ||
1061 | flush_icache_range(CAC_BASE, CAC_BASE + 0x400); | 1359 | flush_icache_range(ebase, ebase + 0x400); |
1360 | flush_tlb_handlers(); | ||
1062 | } | 1361 | } |
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index 36c5212e0928..5b5a3736cbbc 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c | |||
@@ -94,7 +94,7 @@ unsigned long unaligned_instructions; | |||
94 | #endif | 94 | #endif |
95 | 95 | ||
96 | static inline int emulate_load_store_insn(struct pt_regs *regs, | 96 | static inline int emulate_load_store_insn(struct pt_regs *regs, |
97 | void *addr, unsigned long pc, | 97 | void __user *addr, unsigned int __user *pc, |
98 | unsigned long **regptr, unsigned long *newvalue) | 98 | unsigned long **regptr, unsigned long *newvalue) |
99 | { | 99 | { |
100 | union mips_instruction insn; | 100 | union mips_instruction insn; |
@@ -107,7 +107,7 @@ static inline int emulate_load_store_insn(struct pt_regs *regs, | |||
107 | /* | 107 | /* |
108 | * This load never faults. | 108 | * This load never faults. |
109 | */ | 109 | */ |
110 | __get_user(insn.word, (unsigned int *)pc); | 110 | __get_user(insn.word, pc); |
111 | 111 | ||
112 | switch (insn.i_format.opcode) { | 112 | switch (insn.i_format.opcode) { |
113 | /* | 113 | /* |
@@ -494,8 +494,8 @@ asmlinkage void do_ade(struct pt_regs *regs) | |||
494 | { | 494 | { |
495 | unsigned long *regptr, newval; | 495 | unsigned long *regptr, newval; |
496 | extern int do_dsemulret(struct pt_regs *); | 496 | extern int do_dsemulret(struct pt_regs *); |
497 | unsigned int __user *pc; | ||
497 | mm_segment_t seg; | 498 | mm_segment_t seg; |
498 | unsigned long pc; | ||
499 | 499 | ||
500 | /* | 500 | /* |
501 | * Address errors may be deliberately induced by the FPU emulator to | 501 | * Address errors may be deliberately induced by the FPU emulator to |
@@ -515,7 +515,7 @@ asmlinkage void do_ade(struct pt_regs *regs) | |||
515 | if ((regs->cp0_badvaddr == regs->cp0_epc) || (regs->cp0_epc & 0x1)) | 515 | if ((regs->cp0_badvaddr == regs->cp0_epc) || (regs->cp0_epc & 0x1)) |
516 | goto sigbus; | 516 | goto sigbus; |
517 | 517 | ||
518 | pc = exception_epc(regs); | 518 | pc = (unsigned int __user *) exception_epc(regs); |
519 | if ((current->thread.mflags & MF_FIXADE) == 0) | 519 | if ((current->thread.mflags & MF_FIXADE) == 0) |
520 | goto sigbus; | 520 | goto sigbus; |
521 | 521 | ||
@@ -526,7 +526,7 @@ asmlinkage void do_ade(struct pt_regs *regs) | |||
526 | seg = get_fs(); | 526 | seg = get_fs(); |
527 | if (!user_mode(regs)) | 527 | if (!user_mode(regs)) |
528 | set_fs(KERNEL_DS); | 528 | set_fs(KERNEL_DS); |
529 | if (!emulate_load_store_insn(regs, (void *)regs->cp0_badvaddr, pc, | 529 | if (!emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc, |
530 | ®ptr, &newval)) { | 530 | ®ptr, &newval)) { |
531 | compute_return_epc(regs); | 531 | compute_return_epc(regs); |
532 | /* | 532 | /* |
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S index 482ac310c937..25cc856d8e7e 100644 --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S | |||
@@ -54,13 +54,6 @@ SECTIONS | |||
54 | 54 | ||
55 | *(.data) | 55 | *(.data) |
56 | 56 | ||
57 | /* Align the initial ramdisk image (INITRD) on page boundaries. */ | ||
58 | . = ALIGN(4096); | ||
59 | __rd_start = .; | ||
60 | *(.initrd) | ||
61 | . = ALIGN(4096); | ||
62 | __rd_end = .; | ||
63 | |||
64 | CONSTRUCTORS | 57 | CONSTRUCTORS |
65 | } | 58 | } |
66 | _gp = . + 0x8000; | 59 | _gp = . + 0x8000; |
@@ -96,12 +89,6 @@ SECTIONS | |||
96 | .init.setup : { *(.init.setup) } | 89 | .init.setup : { *(.init.setup) } |
97 | __setup_end = .; | 90 | __setup_end = .; |
98 | 91 | ||
99 | .early_initcall.init : { | ||
100 | __earlyinitcall_start = .; | ||
101 | *(.initcall.early1.init) | ||
102 | } | ||
103 | __earlyinitcall_end = .; | ||
104 | |||
105 | __initcall_start = .; | 92 | __initcall_start = .; |
106 | .initcall.init : { | 93 | .initcall.init : { |
107 | *(.initcall1.init) | 94 | *(.initcall1.init) |
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c new file mode 100644 index 000000000000..97fefcc9dbe7 --- /dev/null +++ b/arch/mips/kernel/vpe.c | |||
@@ -0,0 +1,1296 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can distribute it and/or modify it | ||
5 | * under the terms of the GNU General Public License (Version 2) as | ||
6 | * published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
11 | * for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License along | ||
14 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
15 | * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. | ||
16 | * | ||
17 | */ | ||
18 | |||
19 | /* | ||
20 | * VPE support module | ||
21 | * | ||
22 | * Provides support for loading a MIPS SP program on VPE1. | ||
23 | * The SP enviroment is rather simple, no tlb's. It needs to be relocatable | ||
24 | * (or partially linked). You should initialise your stack in the startup | ||
25 | * code. This loader looks for the symbol __start and sets up | ||
26 | * execution to resume from there. The MIPS SDE kit contains suitable examples. | ||
27 | * | ||
28 | * To load and run, simply cat a SP 'program file' to /dev/vpe1. | ||
29 | * i.e cat spapp >/dev/vpe1. | ||
30 | * | ||
31 | * You'll need to have the following device files. | ||
32 | * mknod /dev/vpe0 c 63 0 | ||
33 | * mknod /dev/vpe1 c 63 1 | ||
34 | */ | ||
35 | #include <linux/config.h> | ||
36 | #include <linux/kernel.h> | ||
37 | #include <linux/module.h> | ||
38 | #include <linux/fs.h> | ||
39 | #include <linux/init.h> | ||
40 | #include <asm/uaccess.h> | ||
41 | #include <linux/slab.h> | ||
42 | #include <linux/list.h> | ||
43 | #include <linux/vmalloc.h> | ||
44 | #include <linux/elf.h> | ||
45 | #include <linux/seq_file.h> | ||
46 | #include <linux/syscalls.h> | ||
47 | #include <linux/moduleloader.h> | ||
48 | #include <linux/interrupt.h> | ||
49 | #include <linux/poll.h> | ||
50 | #include <linux/bootmem.h> | ||
51 | #include <asm/mipsregs.h> | ||
52 | #include <asm/mipsmtregs.h> | ||
53 | #include <asm/cacheflush.h> | ||
54 | #include <asm/atomic.h> | ||
55 | #include <asm/cpu.h> | ||
56 | #include <asm/processor.h> | ||
57 | #include <asm/system.h> | ||
58 | |||
59 | typedef void *vpe_handle; | ||
60 | |||
61 | // defined here because the kernel module loader doesn't have | ||
62 | // anything to do with it. | ||
63 | #define SHN_MIPS_SCOMMON 0xff03 | ||
64 | |||
65 | #ifndef ARCH_SHF_SMALL | ||
66 | #define ARCH_SHF_SMALL 0 | ||
67 | #endif | ||
68 | |||
69 | /* If this is set, the section belongs in the init part of the module */ | ||
70 | #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1)) | ||
71 | |||
72 | // temp number, | ||
73 | #define VPE_MAJOR 63 | ||
74 | |||
75 | static char module_name[] = "vpe"; | ||
76 | static int major = 0; | ||
77 | |||
78 | /* grab the likely amount of memory we will need. */ | ||
79 | #ifdef CONFIG_MIPS_VPE_LOADER_TOM | ||
80 | #define P_SIZE (2 * 1024 * 1024) | ||
81 | #else | ||
82 | /* add an overhead to the max kmalloc size for non-striped symbols/etc */ | ||
83 | #define P_SIZE (256 * 1024) | ||
84 | #endif | ||
85 | |||
86 | #define MAX_VPES 16 | ||
87 | |||
88 | enum vpe_state { | ||
89 | VPE_STATE_UNUSED = 0, | ||
90 | VPE_STATE_INUSE, | ||
91 | VPE_STATE_RUNNING | ||
92 | }; | ||
93 | |||
94 | enum tc_state { | ||
95 | TC_STATE_UNUSED = 0, | ||
96 | TC_STATE_INUSE, | ||
97 | TC_STATE_RUNNING, | ||
98 | TC_STATE_DYNAMIC | ||
99 | }; | ||
100 | |||
101 | struct vpe; | ||
102 | typedef struct tc { | ||
103 | enum tc_state state; | ||
104 | int index; | ||
105 | |||
106 | /* parent VPE */ | ||
107 | struct vpe *pvpe; | ||
108 | |||
109 | /* The list of TC's with this VPE */ | ||
110 | struct list_head tc; | ||
111 | |||
112 | /* The global list of tc's */ | ||
113 | struct list_head list; | ||
114 | } tc_t; | ||
115 | |||
116 | typedef struct vpe { | ||
117 | enum vpe_state state; | ||
118 | |||
119 | /* (device) minor associated with this vpe */ | ||
120 | int minor; | ||
121 | |||
122 | /* elfloader stuff */ | ||
123 | void *load_addr; | ||
124 | u32 len; | ||
125 | char *pbuffer; | ||
126 | u32 plen; | ||
127 | |||
128 | unsigned long __start; | ||
129 | |||
130 | /* tc's associated with this vpe */ | ||
131 | struct list_head tc; | ||
132 | |||
133 | /* The list of vpe's */ | ||
134 | struct list_head list; | ||
135 | |||
136 | /* shared symbol address */ | ||
137 | void *shared_ptr; | ||
138 | } vpe_t; | ||
139 | |||
140 | struct vpecontrol_ { | ||
141 | /* Virtual processing elements */ | ||
142 | struct list_head vpe_list; | ||
143 | |||
144 | /* Thread contexts */ | ||
145 | struct list_head tc_list; | ||
146 | } vpecontrol; | ||
147 | |||
148 | static void release_progmem(void *ptr); | ||
149 | static void dump_vpe(vpe_t * v); | ||
150 | extern void save_gp_address(unsigned int secbase, unsigned int rel); | ||
151 | |||
152 | /* get the vpe associated with this minor */ | ||
153 | struct vpe *get_vpe(int minor) | ||
154 | { | ||
155 | struct vpe *v; | ||
156 | |||
157 | list_for_each_entry(v, &vpecontrol.vpe_list, list) { | ||
158 | if (v->minor == minor) | ||
159 | return v; | ||
160 | } | ||
161 | |||
162 | printk(KERN_DEBUG "VPE: get_vpe minor %d not found\n", minor); | ||
163 | return NULL; | ||
164 | } | ||
165 | |||
166 | /* get the vpe associated with this minor */ | ||
167 | struct tc *get_tc(int index) | ||
168 | { | ||
169 | struct tc *t; | ||
170 | |||
171 | list_for_each_entry(t, &vpecontrol.tc_list, list) { | ||
172 | if (t->index == index) | ||
173 | return t; | ||
174 | } | ||
175 | |||
176 | printk(KERN_DEBUG "VPE: get_tc index %d not found\n", index); | ||
177 | |||
178 | return NULL; | ||
179 | } | ||
180 | |||
181 | struct tc *get_tc_unused(void) | ||
182 | { | ||
183 | struct tc *t; | ||
184 | |||
185 | list_for_each_entry(t, &vpecontrol.tc_list, list) { | ||
186 | if (t->state == TC_STATE_UNUSED) | ||
187 | return t; | ||
188 | } | ||
189 | |||
190 | printk(KERN_DEBUG "VPE: All TC's are in use\n"); | ||
191 | |||
192 | return NULL; | ||
193 | } | ||
194 | |||
195 | /* allocate a vpe and associate it with this minor (or index) */ | ||
196 | struct vpe *alloc_vpe(int minor) | ||
197 | { | ||
198 | struct vpe *v; | ||
199 | |||
200 | if ((v = kmalloc(sizeof(struct vpe), GFP_KERNEL)) == NULL) { | ||
201 | printk(KERN_WARNING "VPE: alloc_vpe no mem\n"); | ||
202 | return NULL; | ||
203 | } | ||
204 | |||
205 | memset(v, 0, sizeof(struct vpe)); | ||
206 | |||
207 | INIT_LIST_HEAD(&v->tc); | ||
208 | list_add_tail(&v->list, &vpecontrol.vpe_list); | ||
209 | |||
210 | v->minor = minor; | ||
211 | return v; | ||
212 | } | ||
213 | |||
214 | /* allocate a tc. At startup only tc0 is running, all other can be halted. */ | ||
215 | struct tc *alloc_tc(int index) | ||
216 | { | ||
217 | struct tc *t; | ||
218 | |||
219 | if ((t = kmalloc(sizeof(struct tc), GFP_KERNEL)) == NULL) { | ||
220 | printk(KERN_WARNING "VPE: alloc_tc no mem\n"); | ||
221 | return NULL; | ||
222 | } | ||
223 | |||
224 | memset(t, 0, sizeof(struct tc)); | ||
225 | |||
226 | INIT_LIST_HEAD(&t->tc); | ||
227 | list_add_tail(&t->list, &vpecontrol.tc_list); | ||
228 | |||
229 | t->index = index; | ||
230 | |||
231 | return t; | ||
232 | } | ||
233 | |||
234 | /* clean up and free everything */ | ||
235 | void release_vpe(struct vpe *v) | ||
236 | { | ||
237 | list_del(&v->list); | ||
238 | if (v->load_addr) | ||
239 | release_progmem(v); | ||
240 | kfree(v); | ||
241 | } | ||
242 | |||
243 | void dump_mtregs(void) | ||
244 | { | ||
245 | unsigned long val; | ||
246 | |||
247 | val = read_c0_config3(); | ||
248 | printk("config3 0x%lx MT %ld\n", val, | ||
249 | (val & CONFIG3_MT) >> CONFIG3_MT_SHIFT); | ||
250 | |||
251 | val = read_c0_mvpconf0(); | ||
252 | printk("mvpconf0 0x%lx, PVPE %ld PTC %ld M %ld\n", val, | ||
253 | (val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT, | ||
254 | val & MVPCONF0_PTC, (val & MVPCONF0_M) >> MVPCONF0_M_SHIFT); | ||
255 | |||
256 | val = read_c0_mvpcontrol(); | ||
257 | printk("MVPControl 0x%lx, STLB %ld VPC %ld EVP %ld\n", val, | ||
258 | (val & MVPCONTROL_STLB) >> MVPCONTROL_STLB_SHIFT, | ||
259 | (val & MVPCONTROL_VPC) >> MVPCONTROL_VPC_SHIFT, | ||
260 | (val & MVPCONTROL_EVP)); | ||
261 | |||
262 | val = read_c0_vpeconf0(); | ||
263 | printk("VPEConf0 0x%lx MVP %ld\n", val, | ||
264 | (val & VPECONF0_MVP) >> VPECONF0_MVP_SHIFT); | ||
265 | } | ||
266 | |||
267 | /* Find some VPE program space */ | ||
268 | static void *alloc_progmem(u32 len) | ||
269 | { | ||
270 | #ifdef CONFIG_MIPS_VPE_LOADER_TOM | ||
271 | /* this means you must tell linux to use less memory than you physically have */ | ||
272 | return (void *)((max_pfn * PAGE_SIZE) + KSEG0); | ||
273 | #else | ||
274 | // simple grab some mem for now | ||
275 | return kmalloc(len, GFP_KERNEL); | ||
276 | #endif | ||
277 | } | ||
278 | |||
279 | static void release_progmem(void *ptr) | ||
280 | { | ||
281 | #ifndef CONFIG_MIPS_VPE_LOADER_TOM | ||
282 | kfree(ptr); | ||
283 | #endif | ||
284 | } | ||
285 | |||
286 | /* Update size with this section: return offset. */ | ||
287 | static long get_offset(unsigned long *size, Elf_Shdr * sechdr) | ||
288 | { | ||
289 | long ret; | ||
290 | |||
291 | ret = ALIGN(*size, sechdr->sh_addralign ? : 1); | ||
292 | *size = ret + sechdr->sh_size; | ||
293 | return ret; | ||
294 | } | ||
295 | |||
296 | /* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld | ||
297 | might -- code, read-only data, read-write data, small data. Tally | ||
298 | sizes, and place the offsets into sh_entsize fields: high bit means it | ||
299 | belongs in init. */ | ||
300 | static void layout_sections(struct module *mod, const Elf_Ehdr * hdr, | ||
301 | Elf_Shdr * sechdrs, const char *secstrings) | ||
302 | { | ||
303 | static unsigned long const masks[][2] = { | ||
304 | /* NOTE: all executable code must be the first section | ||
305 | * in this array; otherwise modify the text_size | ||
306 | * finder in the two loops below */ | ||
307 | {SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL}, | ||
308 | {SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL}, | ||
309 | {SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL}, | ||
310 | {ARCH_SHF_SMALL | SHF_ALLOC, 0} | ||
311 | }; | ||
312 | unsigned int m, i; | ||
313 | |||
314 | for (i = 0; i < hdr->e_shnum; i++) | ||
315 | sechdrs[i].sh_entsize = ~0UL; | ||
316 | |||
317 | for (m = 0; m < ARRAY_SIZE(masks); ++m) { | ||
318 | for (i = 0; i < hdr->e_shnum; ++i) { | ||
319 | Elf_Shdr *s = &sechdrs[i]; | ||
320 | |||
321 | // || strncmp(secstrings + s->sh_name, ".init", 5) == 0) | ||
322 | if ((s->sh_flags & masks[m][0]) != masks[m][0] | ||
323 | || (s->sh_flags & masks[m][1]) | ||
324 | || s->sh_entsize != ~0UL) | ||
325 | continue; | ||
326 | s->sh_entsize = get_offset(&mod->core_size, s); | ||
327 | } | ||
328 | |||
329 | if (m == 0) | ||
330 | mod->core_text_size = mod->core_size; | ||
331 | |||
332 | } | ||
333 | } | ||
334 | |||
335 | |||
336 | /* from module-elf32.c, but subverted a little */ | ||
337 | |||
338 | struct mips_hi16 { | ||
339 | struct mips_hi16 *next; | ||
340 | Elf32_Addr *addr; | ||
341 | Elf32_Addr value; | ||
342 | }; | ||
343 | |||
344 | static struct mips_hi16 *mips_hi16_list; | ||
345 | static unsigned int gp_offs, gp_addr; | ||
346 | |||
347 | static int apply_r_mips_none(struct module *me, uint32_t *location, | ||
348 | Elf32_Addr v) | ||
349 | { | ||
350 | return 0; | ||
351 | } | ||
352 | |||
353 | static int apply_r_mips_gprel16(struct module *me, uint32_t *location, | ||
354 | Elf32_Addr v) | ||
355 | { | ||
356 | int rel; | ||
357 | |||
358 | if( !(*location & 0xffff) ) { | ||
359 | rel = (int)v - gp_addr; | ||
360 | } | ||
361 | else { | ||
362 | /* .sbss + gp(relative) + offset */ | ||
363 | /* kludge! */ | ||
364 | rel = (int)(short)((int)v + gp_offs + | ||
365 | (int)(short)(*location & 0xffff) - gp_addr); | ||
366 | } | ||
367 | |||
368 | if( (rel > 32768) || (rel < -32768) ) { | ||
369 | printk(KERN_ERR | ||
370 | "apply_r_mips_gprel16: relative address out of range 0x%x %d\n", | ||
371 | rel, rel); | ||
372 | return -ENOEXEC; | ||
373 | } | ||
374 | |||
375 | *location = (*location & 0xffff0000) | (rel & 0xffff); | ||
376 | |||
377 | return 0; | ||
378 | } | ||
379 | |||
380 | static int apply_r_mips_pc16(struct module *me, uint32_t *location, | ||
381 | Elf32_Addr v) | ||
382 | { | ||
383 | int rel; | ||
384 | rel = (((unsigned int)v - (unsigned int)location)); | ||
385 | rel >>= 2; // because the offset is in _instructions_ not bytes. | ||
386 | rel -= 1; // and one instruction less due to the branch delay slot. | ||
387 | |||
388 | if( (rel > 32768) || (rel < -32768) ) { | ||
389 | printk(KERN_ERR | ||
390 | "apply_r_mips_pc16: relative address out of range 0x%x\n", rel); | ||
391 | return -ENOEXEC; | ||
392 | } | ||
393 | |||
394 | *location = (*location & 0xffff0000) | (rel & 0xffff); | ||
395 | |||
396 | return 0; | ||
397 | } | ||
398 | |||
399 | static int apply_r_mips_32(struct module *me, uint32_t *location, | ||
400 | Elf32_Addr v) | ||
401 | { | ||
402 | *location += v; | ||
403 | |||
404 | return 0; | ||
405 | } | ||
406 | |||
407 | static int apply_r_mips_26(struct module *me, uint32_t *location, | ||
408 | Elf32_Addr v) | ||
409 | { | ||
410 | if (v % 4) { | ||
411 | printk(KERN_ERR "module %s: dangerous relocation mod4\n", me->name); | ||
412 | return -ENOEXEC; | ||
413 | } | ||
414 | |||
415 | /* Not desperately convinced this is a good check of an overflow condition | ||
416 | anyway. But it gets in the way of handling undefined weak symbols which | ||
417 | we want to set to zero. | ||
418 | if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) { | ||
419 | printk(KERN_ERR | ||
420 | "module %s: relocation overflow\n", | ||
421 | me->name); | ||
422 | return -ENOEXEC; | ||
423 | } | ||
424 | */ | ||
425 | |||
426 | *location = (*location & ~0x03ffffff) | | ||
427 | ((*location + (v >> 2)) & 0x03ffffff); | ||
428 | return 0; | ||
429 | } | ||
430 | |||
431 | static int apply_r_mips_hi16(struct module *me, uint32_t *location, | ||
432 | Elf32_Addr v) | ||
433 | { | ||
434 | struct mips_hi16 *n; | ||
435 | |||
436 | /* | ||
437 | * We cannot relocate this one now because we don't know the value of | ||
438 | * the carry we need to add. Save the information, and let LO16 do the | ||
439 | * actual relocation. | ||
440 | */ | ||
441 | n = kmalloc(sizeof *n, GFP_KERNEL); | ||
442 | if (!n) | ||
443 | return -ENOMEM; | ||
444 | |||
445 | n->addr = location; | ||
446 | n->value = v; | ||
447 | n->next = mips_hi16_list; | ||
448 | mips_hi16_list = n; | ||
449 | |||
450 | return 0; | ||
451 | } | ||
452 | |||
453 | static int apply_r_mips_lo16(struct module *me, uint32_t *location, | ||
454 | Elf32_Addr v) | ||
455 | { | ||
456 | unsigned long insnlo = *location; | ||
457 | Elf32_Addr val, vallo; | ||
458 | |||
459 | /* Sign extend the addend we extract from the lo insn. */ | ||
460 | vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000; | ||
461 | |||
462 | if (mips_hi16_list != NULL) { | ||
463 | struct mips_hi16 *l; | ||
464 | |||
465 | l = mips_hi16_list; | ||
466 | while (l != NULL) { | ||
467 | struct mips_hi16 *next; | ||
468 | unsigned long insn; | ||
469 | |||
470 | /* | ||
471 | * The value for the HI16 had best be the same. | ||
472 | */ | ||
473 | if (v != l->value) { | ||
474 | printk("%d != %d\n", v, l->value); | ||
475 | goto out_danger; | ||
476 | } | ||
477 | |||
478 | |||
479 | /* | ||
480 | * Do the HI16 relocation. Note that we actually don't | ||
481 | * need to know anything about the LO16 itself, except | ||
482 | * where to find the low 16 bits of the addend needed | ||
483 | * by the LO16. | ||
484 | */ | ||
485 | insn = *l->addr; | ||
486 | val = ((insn & 0xffff) << 16) + vallo; | ||
487 | val += v; | ||
488 | |||
489 | /* | ||
490 | * Account for the sign extension that will happen in | ||
491 | * the low bits. | ||
492 | */ | ||
493 | val = ((val >> 16) + ((val & 0x8000) != 0)) & 0xffff; | ||
494 | |||
495 | insn = (insn & ~0xffff) | val; | ||
496 | *l->addr = insn; | ||
497 | |||
498 | next = l->next; | ||
499 | kfree(l); | ||
500 | l = next; | ||
501 | } | ||
502 | |||
503 | mips_hi16_list = NULL; | ||
504 | } | ||
505 | |||
506 | /* | ||
507 | * Ok, we're done with the HI16 relocs. Now deal with the LO16. | ||
508 | */ | ||
509 | val = v + vallo; | ||
510 | insnlo = (insnlo & ~0xffff) | (val & 0xffff); | ||
511 | *location = insnlo; | ||
512 | |||
513 | return 0; | ||
514 | |||
515 | out_danger: | ||
516 | printk(KERN_ERR "module %s: dangerous " "relocation\n", me->name); | ||
517 | |||
518 | return -ENOEXEC; | ||
519 | } | ||
520 | |||
521 | static int (*reloc_handlers[]) (struct module *me, uint32_t *location, | ||
522 | Elf32_Addr v) = { | ||
523 | [R_MIPS_NONE] = apply_r_mips_none, | ||
524 | [R_MIPS_32] = apply_r_mips_32, | ||
525 | [R_MIPS_26] = apply_r_mips_26, | ||
526 | [R_MIPS_HI16] = apply_r_mips_hi16, | ||
527 | [R_MIPS_LO16] = apply_r_mips_lo16, | ||
528 | [R_MIPS_GPREL16] = apply_r_mips_gprel16, | ||
529 | [R_MIPS_PC16] = apply_r_mips_pc16 | ||
530 | }; | ||
531 | |||
532 | |||
533 | int apply_relocations(Elf32_Shdr *sechdrs, | ||
534 | const char *strtab, | ||
535 | unsigned int symindex, | ||
536 | unsigned int relsec, | ||
537 | struct module *me) | ||
538 | { | ||
539 | Elf32_Rel *rel = (void *) sechdrs[relsec].sh_addr; | ||
540 | Elf32_Sym *sym; | ||
541 | uint32_t *location; | ||
542 | unsigned int i; | ||
543 | Elf32_Addr v; | ||
544 | int res; | ||
545 | |||
546 | for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { | ||
547 | Elf32_Word r_info = rel[i].r_info; | ||
548 | |||
549 | /* This is where to make the change */ | ||
550 | location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr | ||
551 | + rel[i].r_offset; | ||
552 | /* This is the symbol it is referring to */ | ||
553 | sym = (Elf32_Sym *)sechdrs[symindex].sh_addr | ||
554 | + ELF32_R_SYM(r_info); | ||
555 | |||
556 | if (!sym->st_value) { | ||
557 | printk(KERN_DEBUG "%s: undefined weak symbol %s\n", | ||
558 | me->name, strtab + sym->st_name); | ||
559 | /* just print the warning, dont barf */ | ||
560 | } | ||
561 | |||
562 | v = sym->st_value; | ||
563 | |||
564 | res = reloc_handlers[ELF32_R_TYPE(r_info)](me, location, v); | ||
565 | if( res ) { | ||
566 | printk(KERN_DEBUG | ||
567 | "relocation error 0x%x sym refer <%s> value 0x%x " | ||
568 | "type 0x%x r_info 0x%x\n", | ||
569 | (unsigned int)location, strtab + sym->st_name, v, | ||
570 | r_info, ELF32_R_TYPE(r_info)); | ||
571 | } | ||
572 | |||
573 | if (res) | ||
574 | return res; | ||
575 | } | ||
576 | |||
577 | return 0; | ||
578 | } | ||
579 | |||
580 | void save_gp_address(unsigned int secbase, unsigned int rel) | ||
581 | { | ||
582 | gp_addr = secbase + rel; | ||
583 | gp_offs = gp_addr - (secbase & 0xffff0000); | ||
584 | } | ||
585 | /* end module-elf32.c */ | ||
586 | |||
587 | |||
588 | |||
589 | /* Change all symbols so that sh_value encodes the pointer directly. */ | ||
590 | static int simplify_symbols(Elf_Shdr * sechdrs, | ||
591 | unsigned int symindex, | ||
592 | const char *strtab, | ||
593 | const char *secstrings, | ||
594 | unsigned int nsecs, struct module *mod) | ||
595 | { | ||
596 | Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr; | ||
597 | unsigned long secbase, bssbase = 0; | ||
598 | unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym); | ||
599 | int ret = 0, size; | ||
600 | |||
601 | /* find the .bss section for COMMON symbols */ | ||
602 | for (i = 0; i < nsecs; i++) { | ||
603 | if (strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) == 0) | ||
604 | bssbase = sechdrs[i].sh_addr; | ||
605 | } | ||
606 | |||
607 | for (i = 1; i < n; i++) { | ||
608 | switch (sym[i].st_shndx) { | ||
609 | case SHN_COMMON: | ||
610 | /* Allocate space for the symbol in the .bss section. st_value is currently size. | ||
611 | We want it to have the address of the symbol. */ | ||
612 | |||
613 | size = sym[i].st_value; | ||
614 | sym[i].st_value = bssbase; | ||
615 | |||
616 | bssbase += size; | ||
617 | break; | ||
618 | |||
619 | case SHN_ABS: | ||
620 | /* Don't need to do anything */ | ||
621 | break; | ||
622 | |||
623 | case SHN_UNDEF: | ||
624 | /* ret = -ENOENT; */ | ||
625 | break; | ||
626 | |||
627 | case SHN_MIPS_SCOMMON: | ||
628 | |||
629 | printk(KERN_DEBUG | ||
630 | "simplify_symbols: ignoring SHN_MIPS_SCOMMON symbol <%s> st_shndx %d\n", | ||
631 | strtab + sym[i].st_name, sym[i].st_shndx); | ||
632 | |||
633 | // .sbss section | ||
634 | break; | ||
635 | |||
636 | default: | ||
637 | secbase = sechdrs[sym[i].st_shndx].sh_addr; | ||
638 | |||
639 | if (strncmp(strtab + sym[i].st_name, "_gp", 3) == 0) { | ||
640 | save_gp_address(secbase, sym[i].st_value); | ||
641 | } | ||
642 | |||
643 | sym[i].st_value += secbase; | ||
644 | break; | ||
645 | } | ||
646 | |||
647 | } | ||
648 | |||
649 | return ret; | ||
650 | } | ||
651 | |||
652 | #ifdef DEBUG_ELFLOADER | ||
653 | static void dump_elfsymbols(Elf_Shdr * sechdrs, unsigned int symindex, | ||
654 | const char *strtab, struct module *mod) | ||
655 | { | ||
656 | Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr; | ||
657 | unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym); | ||
658 | |||
659 | printk(KERN_DEBUG "dump_elfsymbols: n %d\n", n); | ||
660 | for (i = 1; i < n; i++) { | ||
661 | printk(KERN_DEBUG " i %d name <%s> 0x%x\n", i, | ||
662 | strtab + sym[i].st_name, sym[i].st_value); | ||
663 | } | ||
664 | } | ||
665 | #endif | ||
666 | |||
667 | static void dump_tc(struct tc *t) | ||
668 | { | ||
669 | printk(KERN_WARNING "VPE: TC index %d TCStatus 0x%lx halt 0x%lx\n", | ||
670 | t->index, read_tc_c0_tcstatus(), read_tc_c0_tchalt()); | ||
671 | printk(KERN_WARNING "VPE: tcrestart 0x%lx\n", read_tc_c0_tcrestart()); | ||
672 | } | ||
673 | |||
674 | static void dump_tclist(void) | ||
675 | { | ||
676 | struct tc *t; | ||
677 | |||
678 | list_for_each_entry(t, &vpecontrol.tc_list, list) { | ||
679 | dump_tc(t); | ||
680 | } | ||
681 | } | ||
682 | |||
683 | /* We are prepared so configure and start the VPE... */ | ||
684 | int vpe_run(vpe_t * v) | ||
685 | { | ||
686 | unsigned long val; | ||
687 | struct tc *t; | ||
688 | |||
689 | /* check we are the Master VPE */ | ||
690 | val = read_c0_vpeconf0(); | ||
691 | if (!(val & VPECONF0_MVP)) { | ||
692 | printk(KERN_WARNING | ||
693 | "VPE: only Master VPE's are allowed to configure MT\n"); | ||
694 | return -1; | ||
695 | } | ||
696 | |||
697 | /* disable MT (using dvpe) */ | ||
698 | dvpe(); | ||
699 | |||
700 | /* Put MVPE's into 'configuration state' */ | ||
701 | set_c0_mvpcontrol(MVPCONTROL_VPC); | ||
702 | |||
703 | if (!list_empty(&v->tc)) { | ||
704 | if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) { | ||
705 | printk(KERN_WARNING "VPE: TC %d is already in use.\n", | ||
706 | t->index); | ||
707 | return -ENOEXEC; | ||
708 | } | ||
709 | } else { | ||
710 | printk(KERN_WARNING "VPE: No TC's associated with VPE %d\n", | ||
711 | v->minor); | ||
712 | return -ENOEXEC; | ||
713 | } | ||
714 | |||
715 | settc(t->index); | ||
716 | |||
717 | val = read_vpe_c0_vpeconf0(); | ||
718 | |||
719 | /* should check it is halted, and not activated */ | ||
720 | if ((read_tc_c0_tcstatus() & TCSTATUS_A) || !(read_tc_c0_tchalt() & TCHALT_H)) { | ||
721 | printk(KERN_WARNING "VPE: TC %d is already doing something!\n", | ||
722 | t->index); | ||
723 | |||
724 | dump_tclist(); | ||
725 | return -ENOEXEC; | ||
726 | } | ||
727 | |||
728 | /* Write the address we want it to start running from in the TCPC register. */ | ||
729 | write_tc_c0_tcrestart((unsigned long)v->__start); | ||
730 | |||
731 | /* write the sivc_info address to tccontext */ | ||
732 | write_tc_c0_tccontext((unsigned long)0); | ||
733 | |||
734 | /* Set up the XTC bit in vpeconf0 to point at our tc */ | ||
735 | write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | (t->index << VPECONF0_XTC_SHIFT)); | ||
736 | |||
737 | /* mark the TC as activated, not interrupt exempt and not dynamically allocatable */ | ||
738 | val = read_tc_c0_tcstatus(); | ||
739 | val = (val & ~(TCSTATUS_DA | TCSTATUS_IXMT)) | TCSTATUS_A; | ||
740 | write_tc_c0_tcstatus(val); | ||
741 | |||
742 | write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H); | ||
743 | |||
744 | /* set up VPE1 */ | ||
745 | write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE); // no multiple TC's | ||
746 | write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA); // enable this VPE | ||
747 | |||
748 | /* | ||
749 | * The sde-kit passes 'memsize' to __start in $a3, so set something | ||
750 | * here... | ||
751 | * Or set $a3 (register 7) to zero and define DFLT_STACK_SIZE and | ||
752 | * DFLT_HEAP_SIZE when you compile your program | ||
753 | */ | ||
754 | |||
755 | mttgpr(7, 0); | ||
756 | |||
757 | /* set config to be the same as vpe0, particularly kseg0 coherency alg */ | ||
758 | write_vpe_c0_config(read_c0_config()); | ||
759 | |||
760 | /* clear out any left overs from a previous program */ | ||
761 | write_vpe_c0_cause(0); | ||
762 | |||
763 | /* take system out of configuration state */ | ||
764 | clear_c0_mvpcontrol(MVPCONTROL_VPC); | ||
765 | |||
766 | /* clear interrupts enabled IE, ERL, EXL, and KSU from c0 status */ | ||
767 | write_vpe_c0_status(read_vpe_c0_status() & ~(ST0_ERL | ST0_KSU | ST0_IE | ST0_EXL)); | ||
768 | |||
769 | /* set it running */ | ||
770 | evpe(EVPE_ENABLE); | ||
771 | |||
772 | return 0; | ||
773 | } | ||
774 | |||
775 | static unsigned long find_vpe_symbols(vpe_t * v, Elf_Shdr * sechdrs, | ||
776 | unsigned int symindex, const char *strtab, | ||
777 | struct module *mod) | ||
778 | { | ||
779 | Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr; | ||
780 | unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym); | ||
781 | |||
782 | for (i = 1; i < n; i++) { | ||
783 | if (strcmp(strtab + sym[i].st_name, "__start") == 0) { | ||
784 | v->__start = sym[i].st_value; | ||
785 | } | ||
786 | |||
787 | if (strcmp(strtab + sym[i].st_name, "vpe_shared") == 0) { | ||
788 | v->shared_ptr = (void *)sym[i].st_value; | ||
789 | } | ||
790 | } | ||
791 | |||
792 | return 0; | ||
793 | } | ||
794 | |||
795 | /* Allocates a VPE with some program code space(the load address), copies the contents | ||
796 | of the program (p)buffer performing relocatations/etc, free's it when finished. | ||
797 | */ | ||
798 | int vpe_elfload(vpe_t * v) | ||
799 | { | ||
800 | Elf_Ehdr *hdr; | ||
801 | Elf_Shdr *sechdrs; | ||
802 | long err = 0; | ||
803 | char *secstrings, *strtab = NULL; | ||
804 | unsigned int len, i, symindex = 0, strindex = 0; | ||
805 | |||
806 | struct module mod; // so we can re-use the relocations code | ||
807 | |||
808 | memset(&mod, 0, sizeof(struct module)); | ||
809 | strcpy(mod.name, "VPE dummy prog module"); | ||
810 | |||
811 | hdr = (Elf_Ehdr *) v->pbuffer; | ||
812 | len = v->plen; | ||
813 | |||
814 | /* Sanity checks against insmoding binaries or wrong arch, | ||
815 | weird elf version */ | ||
816 | if (memcmp(hdr->e_ident, ELFMAG, 4) != 0 | ||
817 | || hdr->e_type != ET_REL || !elf_check_arch(hdr) | ||
818 | || hdr->e_shentsize != sizeof(*sechdrs)) { | ||
819 | printk(KERN_WARNING | ||
820 | "VPE program, wrong arch or weird elf version\n"); | ||
821 | |||
822 | return -ENOEXEC; | ||
823 | } | ||
824 | |||
825 | if (len < hdr->e_shoff + hdr->e_shnum * sizeof(Elf_Shdr)) { | ||
826 | printk(KERN_ERR "VPE program length %u truncated\n", len); | ||
827 | return -ENOEXEC; | ||
828 | } | ||
829 | |||
830 | /* Convenience variables */ | ||
831 | sechdrs = (void *)hdr + hdr->e_shoff; | ||
832 | secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; | ||
833 | sechdrs[0].sh_addr = 0; | ||
834 | |||
835 | /* And these should exist, but gcc whinges if we don't init them */ | ||
836 | symindex = strindex = 0; | ||
837 | |||
838 | for (i = 1; i < hdr->e_shnum; i++) { | ||
839 | |||
840 | if (sechdrs[i].sh_type != SHT_NOBITS | ||
841 | && len < sechdrs[i].sh_offset + sechdrs[i].sh_size) { | ||
842 | printk(KERN_ERR "VPE program length %u truncated\n", | ||
843 | len); | ||
844 | return -ENOEXEC; | ||
845 | } | ||
846 | |||
847 | /* Mark all sections sh_addr with their address in the | ||
848 | temporary image. */ | ||
849 | sechdrs[i].sh_addr = (size_t) hdr + sechdrs[i].sh_offset; | ||
850 | |||
851 | /* Internal symbols and strings. */ | ||
852 | if (sechdrs[i].sh_type == SHT_SYMTAB) { | ||
853 | symindex = i; | ||
854 | strindex = sechdrs[i].sh_link; | ||
855 | strtab = (char *)hdr + sechdrs[strindex].sh_offset; | ||
856 | } | ||
857 | } | ||
858 | |||
859 | layout_sections(&mod, hdr, sechdrs, secstrings); | ||
860 | |||
861 | v->load_addr = alloc_progmem(mod.core_size); | ||
862 | memset(v->load_addr, 0, mod.core_size); | ||
863 | |||
864 | printk("VPE elf_loader: loading to %p\n", v->load_addr); | ||
865 | |||
866 | for (i = 0; i < hdr->e_shnum; i++) { | ||
867 | void *dest; | ||
868 | |||
869 | if (!(sechdrs[i].sh_flags & SHF_ALLOC)) | ||
870 | continue; | ||
871 | |||
872 | dest = v->load_addr + sechdrs[i].sh_entsize; | ||
873 | |||
874 | if (sechdrs[i].sh_type != SHT_NOBITS) | ||
875 | memcpy(dest, (void *)sechdrs[i].sh_addr, | ||
876 | sechdrs[i].sh_size); | ||
877 | /* Update sh_addr to point to copy in image. */ | ||
878 | sechdrs[i].sh_addr = (unsigned long)dest; | ||
879 | } | ||
880 | |||
881 | /* Fix up syms, so that st_value is a pointer to location. */ | ||
882 | err = | ||
883 | simplify_symbols(sechdrs, symindex, strtab, secstrings, | ||
884 | hdr->e_shnum, &mod); | ||
885 | if (err < 0) { | ||
886 | printk(KERN_WARNING "VPE: unable to simplify symbols\n"); | ||
887 | goto cleanup; | ||
888 | } | ||
889 | |||
890 | /* Now do relocations. */ | ||
891 | for (i = 1; i < hdr->e_shnum; i++) { | ||
892 | const char *strtab = (char *)sechdrs[strindex].sh_addr; | ||
893 | unsigned int info = sechdrs[i].sh_info; | ||
894 | |||
895 | /* Not a valid relocation section? */ | ||
896 | if (info >= hdr->e_shnum) | ||
897 | continue; | ||
898 | |||
899 | /* Don't bother with non-allocated sections */ | ||
900 | if (!(sechdrs[info].sh_flags & SHF_ALLOC)) | ||
901 | continue; | ||
902 | |||
903 | if (sechdrs[i].sh_type == SHT_REL) | ||
904 | err = | ||
905 | apply_relocations(sechdrs, strtab, symindex, i, &mod); | ||
906 | else if (sechdrs[i].sh_type == SHT_RELA) | ||
907 | err = apply_relocate_add(sechdrs, strtab, symindex, i, | ||
908 | &mod); | ||
909 | if (err < 0) { | ||
910 | printk(KERN_WARNING | ||
911 | "vpe_elfload: error in relocations err %ld\n", | ||
912 | err); | ||
913 | goto cleanup; | ||
914 | } | ||
915 | } | ||
916 | |||
917 | /* make sure it's physically written out */ | ||
918 | flush_icache_range((unsigned long)v->load_addr, | ||
919 | (unsigned long)v->load_addr + v->len); | ||
920 | |||
921 | if ((find_vpe_symbols(v, sechdrs, symindex, strtab, &mod)) < 0) { | ||
922 | |||
923 | printk(KERN_WARNING | ||
924 | "VPE: program doesn't contain __start or vpe_shared symbols\n"); | ||
925 | err = -ENOEXEC; | ||
926 | } | ||
927 | |||
928 | printk(" elf loaded\n"); | ||
929 | |||
930 | cleanup: | ||
931 | return err; | ||
932 | } | ||
933 | |||
934 | static void dump_vpe(vpe_t * v) | ||
935 | { | ||
936 | struct tc *t; | ||
937 | |||
938 | printk(KERN_DEBUG "VPEControl 0x%lx\n", read_vpe_c0_vpecontrol()); | ||
939 | printk(KERN_DEBUG "VPEConf0 0x%lx\n", read_vpe_c0_vpeconf0()); | ||
940 | |||
941 | list_for_each_entry(t, &vpecontrol.tc_list, list) { | ||
942 | dump_tc(t); | ||
943 | } | ||
944 | } | ||
945 | |||
946 | /* checks for VPE is unused and gets ready to load program */ | ||
947 | static int vpe_open(struct inode *inode, struct file *filp) | ||
948 | { | ||
949 | int minor; | ||
950 | vpe_t *v; | ||
951 | |||
952 | /* assume only 1 device at the mo. */ | ||
953 | if ((minor = MINOR(inode->i_rdev)) != 1) { | ||
954 | printk(KERN_WARNING "VPE: only vpe1 is supported\n"); | ||
955 | return -ENODEV; | ||
956 | } | ||
957 | |||
958 | if ((v = get_vpe(minor)) == NULL) { | ||
959 | printk(KERN_WARNING "VPE: unable to get vpe\n"); | ||
960 | return -ENODEV; | ||
961 | } | ||
962 | |||
963 | if (v->state != VPE_STATE_UNUSED) { | ||
964 | unsigned long tmp; | ||
965 | struct tc *t; | ||
966 | |||
967 | printk(KERN_WARNING "VPE: device %d already in use\n", minor); | ||
968 | |||
969 | dvpe(); | ||
970 | dump_vpe(v); | ||
971 | |||
972 | printk(KERN_WARNING "VPE: re-initialising %d\n", minor); | ||
973 | |||
974 | release_progmem(v->load_addr); | ||
975 | |||
976 | t = get_tc(minor); | ||
977 | settc(minor); | ||
978 | tmp = read_tc_c0_tcstatus(); | ||
979 | |||
980 | /* mark not allocated and not dynamically allocatable */ | ||
981 | tmp &= ~(TCSTATUS_A | TCSTATUS_DA); | ||
982 | tmp |= TCSTATUS_IXMT; /* interrupt exempt */ | ||
983 | write_tc_c0_tcstatus(tmp); | ||
984 | |||
985 | write_tc_c0_tchalt(TCHALT_H); | ||
986 | |||
987 | } | ||
988 | |||
989 | // allocate it so when we get write ops we know it's expected. | ||
990 | v->state = VPE_STATE_INUSE; | ||
991 | |||
992 | /* this of-course trashes what was there before... */ | ||
993 | v->pbuffer = vmalloc(P_SIZE); | ||
994 | v->plen = P_SIZE; | ||
995 | v->load_addr = NULL; | ||
996 | v->len = 0; | ||
997 | |||
998 | return 0; | ||
999 | } | ||
1000 | |||
1001 | static int vpe_release(struct inode *inode, struct file *filp) | ||
1002 | { | ||
1003 | int minor, ret = 0; | ||
1004 | vpe_t *v; | ||
1005 | Elf_Ehdr *hdr; | ||
1006 | |||
1007 | minor = MINOR(inode->i_rdev); | ||
1008 | if ((v = get_vpe(minor)) == NULL) | ||
1009 | return -ENODEV; | ||
1010 | |||
1011 | // simple case of fire and forget, so tell the VPE to run... | ||
1012 | |||
1013 | hdr = (Elf_Ehdr *) v->pbuffer; | ||
1014 | if (memcmp(hdr->e_ident, ELFMAG, 4) == 0) { | ||
1015 | if (vpe_elfload(v) >= 0) | ||
1016 | vpe_run(v); | ||
1017 | else { | ||
1018 | printk(KERN_WARNING "VPE: ELF load failed.\n"); | ||
1019 | ret = -ENOEXEC; | ||
1020 | } | ||
1021 | } else { | ||
1022 | printk(KERN_WARNING "VPE: only elf files are supported\n"); | ||
1023 | ret = -ENOEXEC; | ||
1024 | } | ||
1025 | |||
1026 | // cleanup any temp buffers | ||
1027 | if (v->pbuffer) | ||
1028 | vfree(v->pbuffer); | ||
1029 | v->plen = 0; | ||
1030 | return ret; | ||
1031 | } | ||
1032 | |||
1033 | static ssize_t vpe_write(struct file *file, const char __user * buffer, | ||
1034 | size_t count, loff_t * ppos) | ||
1035 | { | ||
1036 | int minor; | ||
1037 | size_t ret = count; | ||
1038 | vpe_t *v; | ||
1039 | |||
1040 | minor = MINOR(file->f_dentry->d_inode->i_rdev); | ||
1041 | if ((v = get_vpe(minor)) == NULL) | ||
1042 | return -ENODEV; | ||
1043 | |||
1044 | if (v->pbuffer == NULL) { | ||
1045 | printk(KERN_ERR "vpe_write: no pbuffer\n"); | ||
1046 | return -ENOMEM; | ||
1047 | } | ||
1048 | |||
1049 | if ((count + v->len) > v->plen) { | ||
1050 | printk(KERN_WARNING | ||
1051 | "VPE Loader: elf size too big. Perhaps strip uneeded symbols\n"); | ||
1052 | return -ENOMEM; | ||
1053 | } | ||
1054 | |||
1055 | count -= copy_from_user(v->pbuffer + v->len, buffer, count); | ||
1056 | if (!count) { | ||
1057 | printk("vpe_write: copy_to_user failed\n"); | ||
1058 | return -EFAULT; | ||
1059 | } | ||
1060 | |||
1061 | v->len += count; | ||
1062 | return ret; | ||
1063 | } | ||
1064 | |||
1065 | static struct file_operations vpe_fops = { | ||
1066 | .owner = THIS_MODULE, | ||
1067 | .open = vpe_open, | ||
1068 | .release = vpe_release, | ||
1069 | .write = vpe_write | ||
1070 | }; | ||
1071 | |||
1072 | /* module wrapper entry points */ | ||
1073 | /* give me a vpe */ | ||
1074 | vpe_handle vpe_alloc(void) | ||
1075 | { | ||
1076 | int i; | ||
1077 | struct vpe *v; | ||
1078 | |||
1079 | /* find a vpe */ | ||
1080 | for (i = 1; i < MAX_VPES; i++) { | ||
1081 | if ((v = get_vpe(i)) != NULL) { | ||
1082 | v->state = VPE_STATE_INUSE; | ||
1083 | return v; | ||
1084 | } | ||
1085 | } | ||
1086 | return NULL; | ||
1087 | } | ||
1088 | |||
1089 | EXPORT_SYMBOL(vpe_alloc); | ||
1090 | |||
1091 | /* start running from here */ | ||
1092 | int vpe_start(vpe_handle vpe, unsigned long start) | ||
1093 | { | ||
1094 | struct vpe *v = vpe; | ||
1095 | |||
1096 | v->__start = start; | ||
1097 | return vpe_run(v); | ||
1098 | } | ||
1099 | |||
1100 | EXPORT_SYMBOL(vpe_start); | ||
1101 | |||
1102 | /* halt it for now */ | ||
1103 | int vpe_stop(vpe_handle vpe) | ||
1104 | { | ||
1105 | struct vpe *v = vpe; | ||
1106 | struct tc *t; | ||
1107 | unsigned int evpe_flags; | ||
1108 | |||
1109 | evpe_flags = dvpe(); | ||
1110 | |||
1111 | if ((t = list_entry(v->tc.next, struct tc, tc)) != NULL) { | ||
1112 | |||
1113 | settc(t->index); | ||
1114 | write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA); | ||
1115 | } | ||
1116 | |||
1117 | evpe(evpe_flags); | ||
1118 | |||
1119 | return 0; | ||
1120 | } | ||
1121 | |||
1122 | EXPORT_SYMBOL(vpe_stop); | ||
1123 | |||
1124 | /* I've done with it thank you */ | ||
1125 | int vpe_free(vpe_handle vpe) | ||
1126 | { | ||
1127 | struct vpe *v = vpe; | ||
1128 | struct tc *t; | ||
1129 | unsigned int evpe_flags; | ||
1130 | |||
1131 | if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) { | ||
1132 | return -ENOEXEC; | ||
1133 | } | ||
1134 | |||
1135 | evpe_flags = dvpe(); | ||
1136 | |||
1137 | /* Put MVPE's into 'configuration state' */ | ||
1138 | set_c0_mvpcontrol(MVPCONTROL_VPC); | ||
1139 | |||
1140 | settc(t->index); | ||
1141 | write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA); | ||
1142 | |||
1143 | /* mark the TC unallocated and halt'ed */ | ||
1144 | write_tc_c0_tcstatus(read_tc_c0_tcstatus() & ~TCSTATUS_A); | ||
1145 | write_tc_c0_tchalt(TCHALT_H); | ||
1146 | |||
1147 | v->state = VPE_STATE_UNUSED; | ||
1148 | |||
1149 | clear_c0_mvpcontrol(MVPCONTROL_VPC); | ||
1150 | evpe(evpe_flags); | ||
1151 | |||
1152 | return 0; | ||
1153 | } | ||
1154 | |||
1155 | EXPORT_SYMBOL(vpe_free); | ||
1156 | |||
1157 | void *vpe_get_shared(int index) | ||
1158 | { | ||
1159 | struct vpe *v; | ||
1160 | |||
1161 | if ((v = get_vpe(index)) == NULL) { | ||
1162 | printk(KERN_WARNING "vpe: invalid vpe index %d\n", index); | ||
1163 | return NULL; | ||
1164 | } | ||
1165 | |||
1166 | return v->shared_ptr; | ||
1167 | } | ||
1168 | |||
1169 | EXPORT_SYMBOL(vpe_get_shared); | ||
1170 | |||
1171 | static int __init vpe_module_init(void) | ||
1172 | { | ||
1173 | struct vpe *v = NULL; | ||
1174 | struct tc *t; | ||
1175 | unsigned long val; | ||
1176 | int i; | ||
1177 | |||
1178 | if (!cpu_has_mipsmt) { | ||
1179 | printk("VPE loader: not a MIPS MT capable processor\n"); | ||
1180 | return -ENODEV; | ||
1181 | } | ||
1182 | |||
1183 | if ((major = register_chrdev(VPE_MAJOR, module_name, &vpe_fops) < 0)) { | ||
1184 | printk("VPE loader: unable to register character device\n"); | ||
1185 | return -EBUSY; | ||
1186 | } | ||
1187 | |||
1188 | if (major == 0) | ||
1189 | major = VPE_MAJOR; | ||
1190 | |||
1191 | dmt(); | ||
1192 | dvpe(); | ||
1193 | |||
1194 | /* Put MVPE's into 'configuration state' */ | ||
1195 | set_c0_mvpcontrol(MVPCONTROL_VPC); | ||
1196 | |||
1197 | /* dump_mtregs(); */ | ||
1198 | |||
1199 | INIT_LIST_HEAD(&vpecontrol.vpe_list); | ||
1200 | INIT_LIST_HEAD(&vpecontrol.tc_list); | ||
1201 | |||
1202 | val = read_c0_mvpconf0(); | ||
1203 | for (i = 0; i < ((val & MVPCONF0_PTC) + 1); i++) { | ||
1204 | t = alloc_tc(i); | ||
1205 | |||
1206 | /* VPE's */ | ||
1207 | if (i < ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1) { | ||
1208 | settc(i); | ||
1209 | |||
1210 | if ((v = alloc_vpe(i)) == NULL) { | ||
1211 | printk(KERN_WARNING "VPE: unable to allocate VPE\n"); | ||
1212 | return -ENODEV; | ||
1213 | } | ||
1214 | |||
1215 | list_add(&t->tc, &v->tc); /* add the tc to the list of this vpe's tc's. */ | ||
1216 | |||
1217 | /* deactivate all but vpe0 */ | ||
1218 | if (i != 0) { | ||
1219 | unsigned long tmp = read_vpe_c0_vpeconf0(); | ||
1220 | |||
1221 | tmp &= ~VPECONF0_VPA; | ||
1222 | |||
1223 | /* master VPE */ | ||
1224 | tmp |= VPECONF0_MVP; | ||
1225 | write_vpe_c0_vpeconf0(tmp); | ||
1226 | } | ||
1227 | |||
1228 | /* disable multi-threading with TC's */ | ||
1229 | write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE); | ||
1230 | |||
1231 | if (i != 0) { | ||
1232 | write_vpe_c0_status((read_c0_status() & | ||
1233 | ~(ST0_IM | ST0_IE | ST0_KSU)) | ||
1234 | | ST0_CU0); | ||
1235 | |||
1236 | /* set config to be the same as vpe0, particularly kseg0 coherency alg */ | ||
1237 | write_vpe_c0_config(read_c0_config()); | ||
1238 | } | ||
1239 | |||
1240 | } | ||
1241 | |||
1242 | /* TC's */ | ||
1243 | t->pvpe = v; /* set the parent vpe */ | ||
1244 | |||
1245 | if (i != 0) { | ||
1246 | unsigned long tmp; | ||
1247 | |||
1248 | /* tc 0 will of course be running.... */ | ||
1249 | if (i == 0) | ||
1250 | t->state = TC_STATE_RUNNING; | ||
1251 | |||
1252 | settc(i); | ||
1253 | |||
1254 | /* bind a TC to each VPE, May as well put all excess TC's | ||
1255 | on the last VPE */ | ||
1256 | if (i >= (((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1)) | ||
1257 | write_tc_c0_tcbind(read_tc_c0_tcbind() | | ||
1258 | ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)); | ||
1259 | else | ||
1260 | write_tc_c0_tcbind(read_tc_c0_tcbind() | i); | ||
1261 | |||
1262 | tmp = read_tc_c0_tcstatus(); | ||
1263 | |||
1264 | /* mark not allocated and not dynamically allocatable */ | ||
1265 | tmp &= ~(TCSTATUS_A | TCSTATUS_DA); | ||
1266 | tmp |= TCSTATUS_IXMT; /* interrupt exempt */ | ||
1267 | write_tc_c0_tcstatus(tmp); | ||
1268 | |||
1269 | write_tc_c0_tchalt(TCHALT_H); | ||
1270 | } | ||
1271 | } | ||
1272 | |||
1273 | /* release config state */ | ||
1274 | clear_c0_mvpcontrol(MVPCONTROL_VPC); | ||
1275 | |||
1276 | return 0; | ||
1277 | } | ||
1278 | |||
1279 | static void __exit vpe_module_exit(void) | ||
1280 | { | ||
1281 | struct vpe *v, *n; | ||
1282 | |||
1283 | list_for_each_entry_safe(v, n, &vpecontrol.vpe_list, list) { | ||
1284 | if (v->state != VPE_STATE_UNUSED) { | ||
1285 | release_vpe(v); | ||
1286 | } | ||
1287 | } | ||
1288 | |||
1289 | unregister_chrdev(major, module_name); | ||
1290 | } | ||
1291 | |||
1292 | module_init(vpe_module_init); | ||
1293 | module_exit(vpe_module_exit); | ||
1294 | MODULE_DESCRIPTION("MIPS VPE Loader"); | ||
1295 | MODULE_AUTHOR("Elizabeth Clarke, MIPS Technologies, Inc"); | ||
1296 | MODULE_LICENSE("GPL"); | ||