diff options
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r-- | arch/mips/kernel/Makefile | 3 | ||||
-rw-r--r-- | arch/mips/kernel/branch.c | 178 | ||||
-rw-r--r-- | arch/mips/kernel/cevt-gic.c | 104 | ||||
-rw-r--r-- | arch/mips/kernel/cevt-r4k.c | 9 | ||||
-rw-r--r-- | arch/mips/kernel/cpu-probe.c | 3 | ||||
-rw-r--r-- | arch/mips/kernel/csrc-gic.c | 13 | ||||
-rw-r--r-- | arch/mips/kernel/genex.S | 75 | ||||
-rw-r--r-- | arch/mips/kernel/irq-gic.c | 47 | ||||
-rw-r--r-- | arch/mips/kernel/proc.c | 4 | ||||
-rw-r--r-- | arch/mips/kernel/process.c | 101 | ||||
-rw-r--r-- | arch/mips/kernel/scall32-o32.S | 9 | ||||
-rw-r--r-- | arch/mips/kernel/signal.c | 9 | ||||
-rw-r--r-- | arch/mips/kernel/smp-mt.c | 3 | ||||
-rw-r--r-- | arch/mips/kernel/smtc-asm.S | 3 | ||||
-rw-r--r-- | arch/mips/kernel/smtc.c | 10 | ||||
-rw-r--r-- | arch/mips/kernel/traps.c | 302 | ||||
-rw-r--r-- | arch/mips/kernel/unaligned.c | 1489 |
17 files changed, 2029 insertions, 333 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index cdb87b2a423d..cb96ace5c8c5 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile | |||
@@ -19,15 +19,16 @@ obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o | |||
19 | obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o | 19 | obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o |
20 | obj-$(CONFIG_MIPS_MT_SMTC) += cevt-smtc.o | 20 | obj-$(CONFIG_MIPS_MT_SMTC) += cevt-smtc.o |
21 | obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o | 21 | obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o |
22 | obj-$(CONFIG_CEVT_GIC) += cevt-gic.o | ||
22 | obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o | 23 | obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o |
23 | obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o | 24 | obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o |
24 | obj-$(CONFIG_CEVT_TXX9) += cevt-txx9.o | 25 | obj-$(CONFIG_CEVT_TXX9) += cevt-txx9.o |
25 | obj-$(CONFIG_CSRC_BCM1480) += csrc-bcm1480.o | 26 | obj-$(CONFIG_CSRC_BCM1480) += csrc-bcm1480.o |
27 | obj-$(CONFIG_CSRC_GIC) += csrc-gic.o | ||
26 | obj-$(CONFIG_CSRC_IOASIC) += csrc-ioasic.o | 28 | obj-$(CONFIG_CSRC_IOASIC) += csrc-ioasic.o |
27 | obj-$(CONFIG_CSRC_POWERTV) += csrc-powertv.o | 29 | obj-$(CONFIG_CSRC_POWERTV) += csrc-powertv.o |
28 | obj-$(CONFIG_CSRC_R4K) += csrc-r4k.o | 30 | obj-$(CONFIG_CSRC_R4K) += csrc-r4k.o |
29 | obj-$(CONFIG_CSRC_SB1250) += csrc-sb1250.o | 31 | obj-$(CONFIG_CSRC_SB1250) += csrc-sb1250.o |
30 | obj-$(CONFIG_CSRC_GIC) += csrc-gic.o | ||
31 | obj-$(CONFIG_SYNC_R4K) += sync-r4k.o | 32 | obj-$(CONFIG_SYNC_R4K) += sync-r4k.o |
32 | 33 | ||
33 | obj-$(CONFIG_STACKTRACE) += stacktrace.o | 34 | obj-$(CONFIG_STACKTRACE) += stacktrace.o |
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c index 83ffe950f710..46c2ad0703a0 100644 --- a/arch/mips/kernel/branch.c +++ b/arch/mips/kernel/branch.c | |||
@@ -14,10 +14,186 @@ | |||
14 | #include <asm/cpu.h> | 14 | #include <asm/cpu.h> |
15 | #include <asm/cpu-features.h> | 15 | #include <asm/cpu-features.h> |
16 | #include <asm/fpu.h> | 16 | #include <asm/fpu.h> |
17 | #include <asm/fpu_emulator.h> | ||
17 | #include <asm/inst.h> | 18 | #include <asm/inst.h> |
18 | #include <asm/ptrace.h> | 19 | #include <asm/ptrace.h> |
19 | #include <asm/uaccess.h> | 20 | #include <asm/uaccess.h> |
20 | 21 | ||
22 | /* | ||
23 | * Calculate and return exception PC in case of branch delay slot | ||
24 | * for microMIPS and MIPS16e. It does not clear the ISA mode bit. | ||
25 | */ | ||
26 | int __isa_exception_epc(struct pt_regs *regs) | ||
27 | { | ||
28 | unsigned short inst; | ||
29 | long epc = regs->cp0_epc; | ||
30 | |||
31 | /* Calculate exception PC in branch delay slot. */ | ||
32 | if (__get_user(inst, (u16 __user *) msk_isa16_mode(epc))) { | ||
33 | /* This should never happen because delay slot was checked. */ | ||
34 | force_sig(SIGSEGV, current); | ||
35 | return epc; | ||
36 | } | ||
37 | if (cpu_has_mips16) { | ||
38 | if (((union mips16e_instruction)inst).ri.opcode | ||
39 | == MIPS16e_jal_op) | ||
40 | epc += 4; | ||
41 | else | ||
42 | epc += 2; | ||
43 | } else if (mm_insn_16bit(inst)) | ||
44 | epc += 2; | ||
45 | else | ||
46 | epc += 4; | ||
47 | |||
48 | return epc; | ||
49 | } | ||
50 | |||
51 | /* | ||
52 | * Compute return address and emulate branch in microMIPS mode after an | ||
53 | * exception only. It does not handle compact branches/jumps and cannot | ||
54 | * be used in interrupt context. (Compact branches/jumps do not cause | ||
55 | * exceptions.) | ||
56 | */ | ||
57 | int __microMIPS_compute_return_epc(struct pt_regs *regs) | ||
58 | { | ||
59 | u16 __user *pc16; | ||
60 | u16 halfword; | ||
61 | unsigned int word; | ||
62 | unsigned long contpc; | ||
63 | struct mm_decoded_insn mminsn = { 0 }; | ||
64 | |||
65 | mminsn.micro_mips_mode = 1; | ||
66 | |||
67 | /* This load never faults. */ | ||
68 | pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc); | ||
69 | __get_user(halfword, pc16); | ||
70 | pc16++; | ||
71 | contpc = regs->cp0_epc + 2; | ||
72 | word = ((unsigned int)halfword << 16); | ||
73 | mminsn.pc_inc = 2; | ||
74 | |||
75 | if (!mm_insn_16bit(halfword)) { | ||
76 | __get_user(halfword, pc16); | ||
77 | pc16++; | ||
78 | contpc = regs->cp0_epc + 4; | ||
79 | mminsn.pc_inc = 4; | ||
80 | word |= halfword; | ||
81 | } | ||
82 | mminsn.insn = word; | ||
83 | |||
84 | if (get_user(halfword, pc16)) | ||
85 | goto sigsegv; | ||
86 | mminsn.next_pc_inc = 2; | ||
87 | word = ((unsigned int)halfword << 16); | ||
88 | |||
89 | if (!mm_insn_16bit(halfword)) { | ||
90 | pc16++; | ||
91 | if (get_user(halfword, pc16)) | ||
92 | goto sigsegv; | ||
93 | mminsn.next_pc_inc = 4; | ||
94 | word |= halfword; | ||
95 | } | ||
96 | mminsn.next_insn = word; | ||
97 | |||
98 | mm_isBranchInstr(regs, mminsn, &contpc); | ||
99 | |||
100 | regs->cp0_epc = contpc; | ||
101 | |||
102 | return 0; | ||
103 | |||
104 | sigsegv: | ||
105 | force_sig(SIGSEGV, current); | ||
106 | return -EFAULT; | ||
107 | } | ||
108 | |||
109 | /* | ||
110 | * Compute return address and emulate branch in MIPS16e mode after an | ||
111 | * exception only. It does not handle compact branches/jumps and cannot | ||
112 | * be used in interrupt context. (Compact branches/jumps do not cause | ||
113 | * exceptions.) | ||
114 | */ | ||
115 | int __MIPS16e_compute_return_epc(struct pt_regs *regs) | ||
116 | { | ||
117 | u16 __user *addr; | ||
118 | union mips16e_instruction inst; | ||
119 | u16 inst2; | ||
120 | u32 fullinst; | ||
121 | long epc; | ||
122 | |||
123 | epc = regs->cp0_epc; | ||
124 | |||
125 | /* Read the instruction. */ | ||
126 | addr = (u16 __user *)msk_isa16_mode(epc); | ||
127 | if (__get_user(inst.full, addr)) { | ||
128 | force_sig(SIGSEGV, current); | ||
129 | return -EFAULT; | ||
130 | } | ||
131 | |||
132 | switch (inst.ri.opcode) { | ||
133 | case MIPS16e_extend_op: | ||
134 | regs->cp0_epc += 4; | ||
135 | return 0; | ||
136 | |||
137 | /* | ||
138 | * JAL and JALX in MIPS16e mode | ||
139 | */ | ||
140 | case MIPS16e_jal_op: | ||
141 | addr += 1; | ||
142 | if (__get_user(inst2, addr)) { | ||
143 | force_sig(SIGSEGV, current); | ||
144 | return -EFAULT; | ||
145 | } | ||
146 | fullinst = ((unsigned)inst.full << 16) | inst2; | ||
147 | regs->regs[31] = epc + 6; | ||
148 | epc += 4; | ||
149 | epc >>= 28; | ||
150 | epc <<= 28; | ||
151 | /* | ||
152 | * JAL:5 X:1 TARGET[20-16]:5 TARGET[25:21]:5 TARGET[15:0]:16 | ||
153 | * | ||
154 | * ......TARGET[15:0].................TARGET[20:16]........... | ||
155 | * ......TARGET[25:21] | ||
156 | */ | ||
157 | epc |= | ||
158 | ((fullinst & 0xffff) << 2) | ((fullinst & 0x3e00000) >> 3) | | ||
159 | ((fullinst & 0x1f0000) << 7); | ||
160 | if (!inst.jal.x) | ||
161 | set_isa16_mode(epc); /* Set ISA mode bit. */ | ||
162 | regs->cp0_epc = epc; | ||
163 | return 0; | ||
164 | |||
165 | /* | ||
166 | * J(AL)R(C) | ||
167 | */ | ||
168 | case MIPS16e_rr_op: | ||
169 | if (inst.rr.func == MIPS16e_jr_func) { | ||
170 | |||
171 | if (inst.rr.ra) | ||
172 | regs->cp0_epc = regs->regs[31]; | ||
173 | else | ||
174 | regs->cp0_epc = | ||
175 | regs->regs[reg16to32[inst.rr.rx]]; | ||
176 | |||
177 | if (inst.rr.l) { | ||
178 | if (inst.rr.nd) | ||
179 | regs->regs[31] = epc + 2; | ||
180 | else | ||
181 | regs->regs[31] = epc + 4; | ||
182 | } | ||
183 | return 0; | ||
184 | } | ||
185 | break; | ||
186 | } | ||
187 | |||
188 | /* | ||
189 | * All other cases have no branch delay slot and are 16-bits. | ||
190 | * Branches do not cause an exception. | ||
191 | */ | ||
192 | regs->cp0_epc += 2; | ||
193 | |||
194 | return 0; | ||
195 | } | ||
196 | |||
21 | /** | 197 | /** |
22 | * __compute_return_epc_for_insn - Computes the return address and do emulate | 198 | * __compute_return_epc_for_insn - Computes the return address and do emulate |
23 | * branch simulation, if required. | 199 | * branch simulation, if required. |
@@ -129,6 +305,8 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, | |||
129 | epc <<= 28; | 305 | epc <<= 28; |
130 | epc |= (insn.j_format.target << 2); | 306 | epc |= (insn.j_format.target << 2); |
131 | regs->cp0_epc = epc; | 307 | regs->cp0_epc = epc; |
308 | if (insn.i_format.opcode == jalx_op) | ||
309 | set_isa16_mode(regs->cp0_epc); | ||
132 | break; | 310 | break; |
133 | 311 | ||
134 | /* | 312 | /* |
diff --git a/arch/mips/kernel/cevt-gic.c b/arch/mips/kernel/cevt-gic.c new file mode 100644 index 000000000000..730eaf92c018 --- /dev/null +++ b/arch/mips/kernel/cevt-gic.c | |||
@@ -0,0 +1,104 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2013 Imagination Technologies Ltd. | ||
7 | */ | ||
8 | #include <linux/clockchips.h> | ||
9 | #include <linux/interrupt.h> | ||
10 | #include <linux/percpu.h> | ||
11 | #include <linux/smp.h> | ||
12 | #include <linux/irq.h> | ||
13 | |||
14 | #include <asm/time.h> | ||
15 | #include <asm/gic.h> | ||
16 | #include <asm/mips-boards/maltaint.h> | ||
17 | |||
18 | DEFINE_PER_CPU(struct clock_event_device, gic_clockevent_device); | ||
19 | int gic_timer_irq_installed; | ||
20 | |||
21 | |||
22 | static int gic_next_event(unsigned long delta, struct clock_event_device *evt) | ||
23 | { | ||
24 | u64 cnt; | ||
25 | int res; | ||
26 | |||
27 | cnt = gic_read_count(); | ||
28 | cnt += (u64)delta; | ||
29 | gic_write_compare(cnt); | ||
30 | res = ((int)(gic_read_count() - cnt) >= 0) ? -ETIME : 0; | ||
31 | return res; | ||
32 | } | ||
33 | |||
34 | void gic_set_clock_mode(enum clock_event_mode mode, | ||
35 | struct clock_event_device *evt) | ||
36 | { | ||
37 | /* Nothing to do ... */ | ||
38 | } | ||
39 | |||
40 | irqreturn_t gic_compare_interrupt(int irq, void *dev_id) | ||
41 | { | ||
42 | struct clock_event_device *cd; | ||
43 | int cpu = smp_processor_id(); | ||
44 | |||
45 | gic_write_compare(gic_read_compare()); | ||
46 | cd = &per_cpu(gic_clockevent_device, cpu); | ||
47 | cd->event_handler(cd); | ||
48 | return IRQ_HANDLED; | ||
49 | } | ||
50 | |||
51 | struct irqaction gic_compare_irqaction = { | ||
52 | .handler = gic_compare_interrupt, | ||
53 | .flags = IRQF_PERCPU | IRQF_TIMER, | ||
54 | .name = "timer", | ||
55 | }; | ||
56 | |||
57 | |||
58 | void gic_event_handler(struct clock_event_device *dev) | ||
59 | { | ||
60 | } | ||
61 | |||
62 | int __cpuinit gic_clockevent_init(void) | ||
63 | { | ||
64 | unsigned int cpu = smp_processor_id(); | ||
65 | struct clock_event_device *cd; | ||
66 | unsigned int irq; | ||
67 | |||
68 | if (!cpu_has_counter || !gic_frequency) | ||
69 | return -ENXIO; | ||
70 | |||
71 | irq = MIPS_GIC_IRQ_BASE; | ||
72 | |||
73 | cd = &per_cpu(gic_clockevent_device, cpu); | ||
74 | |||
75 | cd->name = "MIPS GIC"; | ||
76 | cd->features = CLOCK_EVT_FEAT_ONESHOT; | ||
77 | |||
78 | clockevent_set_clock(cd, gic_frequency); | ||
79 | |||
80 | /* Calculate the min / max delta */ | ||
81 | cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); | ||
82 | cd->min_delta_ns = clockevent_delta2ns(0x300, cd); | ||
83 | |||
84 | cd->rating = 300; | ||
85 | cd->irq = irq; | ||
86 | cd->cpumask = cpumask_of(cpu); | ||
87 | cd->set_next_event = gic_next_event; | ||
88 | cd->set_mode = gic_set_clock_mode; | ||
89 | cd->event_handler = gic_event_handler; | ||
90 | |||
91 | clockevents_register_device(cd); | ||
92 | |||
93 | GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_MAP), 0x80000002); | ||
94 | GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), GIC_VPE_SMASK_CMP_MSK); | ||
95 | |||
96 | if (gic_timer_irq_installed) | ||
97 | return 0; | ||
98 | |||
99 | gic_timer_irq_installed = 1; | ||
100 | |||
101 | setup_irq(irq, &gic_compare_irqaction); | ||
102 | irq_set_handler(irq, handle_percpu_irq); | ||
103 | return 0; | ||
104 | } | ||
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c index fd75d7144524..02033eaf8825 100644 --- a/arch/mips/kernel/cevt-r4k.c +++ b/arch/mips/kernel/cevt-r4k.c | |||
@@ -23,7 +23,6 @@ | |||
23 | */ | 23 | */ |
24 | 24 | ||
25 | #ifndef CONFIG_MIPS_MT_SMTC | 25 | #ifndef CONFIG_MIPS_MT_SMTC |
26 | |||
27 | static int mips_next_event(unsigned long delta, | 26 | static int mips_next_event(unsigned long delta, |
28 | struct clock_event_device *evt) | 27 | struct clock_event_device *evt) |
29 | { | 28 | { |
@@ -49,7 +48,6 @@ DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device); | |||
49 | int cp0_timer_irq_installed; | 48 | int cp0_timer_irq_installed; |
50 | 49 | ||
51 | #ifndef CONFIG_MIPS_MT_SMTC | 50 | #ifndef CONFIG_MIPS_MT_SMTC |
52 | |||
53 | irqreturn_t c0_compare_interrupt(int irq, void *dev_id) | 51 | irqreturn_t c0_compare_interrupt(int irq, void *dev_id) |
54 | { | 52 | { |
55 | const int r2 = cpu_has_mips_r2; | 53 | const int r2 = cpu_has_mips_r2; |
@@ -74,6 +72,9 @@ irqreturn_t c0_compare_interrupt(int irq, void *dev_id) | |||
74 | /* Clear Count/Compare Interrupt */ | 72 | /* Clear Count/Compare Interrupt */ |
75 | write_c0_compare(read_c0_compare()); | 73 | write_c0_compare(read_c0_compare()); |
76 | cd = &per_cpu(mips_clockevent_device, cpu); | 74 | cd = &per_cpu(mips_clockevent_device, cpu); |
75 | #ifdef CONFIG_CEVT_GIC | ||
76 | if (!gic_present) | ||
77 | #endif | ||
77 | cd->event_handler(cd); | 78 | cd->event_handler(cd); |
78 | } | 79 | } |
79 | 80 | ||
@@ -170,7 +171,6 @@ int c0_compare_int_usable(void) | |||
170 | } | 171 | } |
171 | 172 | ||
172 | #ifndef CONFIG_MIPS_MT_SMTC | 173 | #ifndef CONFIG_MIPS_MT_SMTC |
173 | |||
174 | int __cpuinit r4k_clockevent_init(void) | 174 | int __cpuinit r4k_clockevent_init(void) |
175 | { | 175 | { |
176 | unsigned int cpu = smp_processor_id(); | 176 | unsigned int cpu = smp_processor_id(); |
@@ -210,6 +210,9 @@ int __cpuinit r4k_clockevent_init(void) | |||
210 | cd->set_mode = mips_set_clock_mode; | 210 | cd->set_mode = mips_set_clock_mode; |
211 | cd->event_handler = mips_event_handler; | 211 | cd->event_handler = mips_event_handler; |
212 | 212 | ||
213 | #ifdef CONFIG_CEVT_GIC | ||
214 | if (!gic_present) | ||
215 | #endif | ||
213 | clockevents_register_device(cd); | 216 | clockevents_register_device(cd); |
214 | 217 | ||
215 | if (cp0_timer_irq_installed) | 218 | if (cp0_timer_irq_installed) |
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 5fe66a0c3224..4bbffdb9024f 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c | |||
@@ -470,6 +470,9 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c) | |||
470 | c->options |= MIPS_CPU_ULRI; | 470 | c->options |= MIPS_CPU_ULRI; |
471 | if (config3 & MIPS_CONF3_ISA) | 471 | if (config3 & MIPS_CONF3_ISA) |
472 | c->options |= MIPS_CPU_MICROMIPS; | 472 | c->options |= MIPS_CPU_MICROMIPS; |
473 | #ifdef CONFIG_CPU_MICROMIPS | ||
474 | write_c0_config3(read_c0_config3() | MIPS_CONF3_ISA_OE); | ||
475 | #endif | ||
473 | if (config3 & MIPS_CONF3_VZ) | 476 | if (config3 & MIPS_CONF3_VZ) |
474 | c->ases |= MIPS_ASE_VZ; | 477 | c->ases |= MIPS_ASE_VZ; |
475 | 478 | ||
diff --git a/arch/mips/kernel/csrc-gic.c b/arch/mips/kernel/csrc-gic.c index 5dca24bce51b..e02620901117 100644 --- a/arch/mips/kernel/csrc-gic.c +++ b/arch/mips/kernel/csrc-gic.c | |||
@@ -5,23 +5,14 @@ | |||
5 | * | 5 | * |
6 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | 6 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. |
7 | */ | 7 | */ |
8 | #include <linux/clocksource.h> | ||
9 | #include <linux/init.h> | 8 | #include <linux/init.h> |
9 | #include <linux/time.h> | ||
10 | 10 | ||
11 | #include <asm/time.h> | ||
12 | #include <asm/gic.h> | 11 | #include <asm/gic.h> |
13 | 12 | ||
14 | static cycle_t gic_hpt_read(struct clocksource *cs) | 13 | static cycle_t gic_hpt_read(struct clocksource *cs) |
15 | { | 14 | { |
16 | unsigned int hi, hi2, lo; | 15 | return gic_read_count(); |
17 | |||
18 | do { | ||
19 | GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi); | ||
20 | GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), lo); | ||
21 | GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi2); | ||
22 | } while (hi2 != hi); | ||
23 | |||
24 | return (((cycle_t) hi) << 32) + lo; | ||
25 | } | 16 | } |
26 | 17 | ||
27 | static struct clocksource gic_clocksource = { | 18 | static struct clocksource gic_clocksource = { |
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index ecb347ce1b3d..5c2ba9f08a80 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S | |||
@@ -5,8 +5,8 @@ | |||
5 | * | 5 | * |
6 | * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle | 6 | * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle |
7 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. | 7 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. |
8 | * Copyright (C) 2001 MIPS Technologies, Inc. | ||
9 | * Copyright (C) 2002, 2007 Maciej W. Rozycki | 8 | * Copyright (C) 2002, 2007 Maciej W. Rozycki |
9 | * Copyright (C) 2001, 2012 MIPS Technologies, Inc. All rights reserved. | ||
10 | */ | 10 | */ |
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | 12 | ||
@@ -21,8 +21,10 @@ | |||
21 | #include <asm/war.h> | 21 | #include <asm/war.h> |
22 | #include <asm/thread_info.h> | 22 | #include <asm/thread_info.h> |
23 | 23 | ||
24 | #ifdef CONFIG_MIPS_MT_SMTC | ||
24 | #define PANIC_PIC(msg) \ | 25 | #define PANIC_PIC(msg) \ |
25 | .set push; \ | 26 | .set push; \ |
27 | .set nomicromips; \ | ||
26 | .set reorder; \ | 28 | .set reorder; \ |
27 | PTR_LA a0,8f; \ | 29 | PTR_LA a0,8f; \ |
28 | .set noat; \ | 30 | .set noat; \ |
@@ -31,17 +33,10 @@ | |||
31 | 9: b 9b; \ | 33 | 9: b 9b; \ |
32 | .set pop; \ | 34 | .set pop; \ |
33 | TEXT(msg) | 35 | TEXT(msg) |
36 | #endif | ||
34 | 37 | ||
35 | __INIT | 38 | __INIT |
36 | 39 | ||
37 | NESTED(except_vec0_generic, 0, sp) | ||
38 | PANIC_PIC("Exception vector 0 called") | ||
39 | END(except_vec0_generic) | ||
40 | |||
41 | NESTED(except_vec1_generic, 0, sp) | ||
42 | PANIC_PIC("Exception vector 1 called") | ||
43 | END(except_vec1_generic) | ||
44 | |||
45 | /* | 40 | /* |
46 | * General exception vector for all other CPUs. | 41 | * General exception vector for all other CPUs. |
47 | * | 42 | * |
@@ -138,12 +133,19 @@ LEAF(r4k_wait) | |||
138 | nop | 133 | nop |
139 | nop | 134 | nop |
140 | nop | 135 | nop |
136 | #ifdef CONFIG_CPU_MICROMIPS | ||
137 | nop | ||
138 | nop | ||
139 | nop | ||
140 | nop | ||
141 | #endif | ||
141 | .set mips3 | 142 | .set mips3 |
142 | wait | 143 | wait |
143 | /* end of rollback region (the region size must be power of two) */ | 144 | /* end of rollback region (the region size must be power of two) */ |
144 | .set pop | ||
145 | 1: | 145 | 1: |
146 | jr ra | 146 | jr ra |
147 | nop | ||
148 | .set pop | ||
147 | END(r4k_wait) | 149 | END(r4k_wait) |
148 | 150 | ||
149 | .macro BUILD_ROLLBACK_PROLOGUE handler | 151 | .macro BUILD_ROLLBACK_PROLOGUE handler |
@@ -201,7 +203,11 @@ NESTED(handle_int, PT_SIZE, sp) | |||
201 | LONG_L s0, TI_REGS($28) | 203 | LONG_L s0, TI_REGS($28) |
202 | LONG_S sp, TI_REGS($28) | 204 | LONG_S sp, TI_REGS($28) |
203 | PTR_LA ra, ret_from_irq | 205 | PTR_LA ra, ret_from_irq |
204 | j plat_irq_dispatch | 206 | PTR_LA v0, plat_irq_dispatch |
207 | jr v0 | ||
208 | #ifdef CONFIG_CPU_MICROMIPS | ||
209 | nop | ||
210 | #endif | ||
205 | END(handle_int) | 211 | END(handle_int) |
206 | 212 | ||
207 | __INIT | 213 | __INIT |
@@ -222,11 +228,14 @@ NESTED(except_vec4, 0, sp) | |||
222 | /* | 228 | /* |
223 | * EJTAG debug exception handler. | 229 | * EJTAG debug exception handler. |
224 | * The EJTAG debug exception entry point is 0xbfc00480, which | 230 | * The EJTAG debug exception entry point is 0xbfc00480, which |
225 | * normally is in the boot PROM, so the boot PROM must do a | 231 | * normally is in the boot PROM, so the boot PROM must do an |
226 | * unconditional jump to this vector. | 232 | * unconditional jump to this vector. |
227 | */ | 233 | */ |
228 | NESTED(except_vec_ejtag_debug, 0, sp) | 234 | NESTED(except_vec_ejtag_debug, 0, sp) |
229 | j ejtag_debug_handler | 235 | j ejtag_debug_handler |
236 | #ifdef CONFIG_CPU_MICROMIPS | ||
237 | nop | ||
238 | #endif | ||
230 | END(except_vec_ejtag_debug) | 239 | END(except_vec_ejtag_debug) |
231 | 240 | ||
232 | __FINIT | 241 | __FINIT |
@@ -251,9 +260,10 @@ NESTED(except_vec_vi, 0, sp) | |||
251 | FEXPORT(except_vec_vi_mori) | 260 | FEXPORT(except_vec_vi_mori) |
252 | ori a0, $0, 0 | 261 | ori a0, $0, 0 |
253 | #endif /* CONFIG_MIPS_MT_SMTC */ | 262 | #endif /* CONFIG_MIPS_MT_SMTC */ |
263 | PTR_LA v1, except_vec_vi_handler | ||
254 | FEXPORT(except_vec_vi_lui) | 264 | FEXPORT(except_vec_vi_lui) |
255 | lui v0, 0 /* Patched */ | 265 | lui v0, 0 /* Patched */ |
256 | j except_vec_vi_handler | 266 | jr v1 |
257 | FEXPORT(except_vec_vi_ori) | 267 | FEXPORT(except_vec_vi_ori) |
258 | ori v0, 0 /* Patched */ | 268 | ori v0, 0 /* Patched */ |
259 | .set pop | 269 | .set pop |
@@ -354,6 +364,9 @@ EXPORT(ejtag_debug_buffer) | |||
354 | */ | 364 | */ |
355 | NESTED(except_vec_nmi, 0, sp) | 365 | NESTED(except_vec_nmi, 0, sp) |
356 | j nmi_handler | 366 | j nmi_handler |
367 | #ifdef CONFIG_CPU_MICROMIPS | ||
368 | nop | ||
369 | #endif | ||
357 | END(except_vec_nmi) | 370 | END(except_vec_nmi) |
358 | 371 | ||
359 | __FINIT | 372 | __FINIT |
@@ -480,7 +493,7 @@ NESTED(nmi_handler, PT_SIZE, sp) | |||
480 | .set noreorder | 493 | .set noreorder |
481 | /* check if TLB contains a entry for EPC */ | 494 | /* check if TLB contains a entry for EPC */ |
482 | MFC0 k1, CP0_ENTRYHI | 495 | MFC0 k1, CP0_ENTRYHI |
483 | andi k1, 0xff /* ASID_MASK */ | 496 | andi k1, 0xff /* ASID_MASK patched at run-time!! */ |
484 | MFC0 k0, CP0_EPC | 497 | MFC0 k0, CP0_EPC |
485 | PTR_SRL k0, _PAGE_SHIFT + 1 | 498 | PTR_SRL k0, _PAGE_SHIFT + 1 |
486 | PTR_SLL k0, _PAGE_SHIFT + 1 | 499 | PTR_SLL k0, _PAGE_SHIFT + 1 |
@@ -500,13 +513,35 @@ NESTED(nmi_handler, PT_SIZE, sp) | |||
500 | .set push | 513 | .set push |
501 | .set noat | 514 | .set noat |
502 | .set noreorder | 515 | .set noreorder |
503 | /* 0x7c03e83b: rdhwr v1,$29 */ | 516 | /* MIPS32: 0x7c03e83b: rdhwr v1,$29 */ |
517 | /* microMIPS: 0x007d6b3c: rdhwr v1,$29 */ | ||
504 | MFC0 k1, CP0_EPC | 518 | MFC0 k1, CP0_EPC |
505 | lui k0, 0x7c03 | 519 | #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64_R2) |
506 | lw k1, (k1) | 520 | and k0, k1, 1 |
507 | ori k0, 0xe83b | 521 | beqz k0, 1f |
508 | .set reorder | 522 | xor k1, k0 |
523 | lhu k0, (k1) | ||
524 | lhu k1, 2(k1) | ||
525 | ins k1, k0, 16, 16 | ||
526 | lui k0, 0x007d | ||
527 | b docheck | ||
528 | ori k0, 0x6b3c | ||
529 | 1: | ||
530 | lui k0, 0x7c03 | ||
531 | lw k1, (k1) | ||
532 | ori k0, 0xe83b | ||
533 | #else | ||
534 | andi k0, k1, 1 | ||
535 | bnez k0, handle_ri | ||
536 | lui k0, 0x7c03 | ||
537 | lw k1, (k1) | ||
538 | ori k0, 0xe83b | ||
539 | #endif | ||
540 | .set reorder | ||
541 | docheck: | ||
509 | bne k0, k1, handle_ri /* if not ours */ | 542 | bne k0, k1, handle_ri /* if not ours */ |
543 | |||
544 | isrdhwr: | ||
510 | /* The insn is rdhwr. No need to check CAUSE.BD here. */ | 545 | /* The insn is rdhwr. No need to check CAUSE.BD here. */ |
511 | get_saved_sp /* k1 := current_thread_info */ | 546 | get_saved_sp /* k1 := current_thread_info */ |
512 | .set noreorder | 547 | .set noreorder |
diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c index 485e6a961b31..c01b307317a9 100644 --- a/arch/mips/kernel/irq-gic.c +++ b/arch/mips/kernel/irq-gic.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/init.h> | 10 | #include <linux/init.h> |
11 | #include <linux/smp.h> | 11 | #include <linux/smp.h> |
12 | #include <linux/irq.h> | 12 | #include <linux/irq.h> |
13 | #include <linux/clocksource.h> | ||
13 | 14 | ||
14 | #include <asm/io.h> | 15 | #include <asm/io.h> |
15 | #include <asm/gic.h> | 16 | #include <asm/gic.h> |
@@ -19,6 +20,8 @@ | |||
19 | #include <linux/hardirq.h> | 20 | #include <linux/hardirq.h> |
20 | #include <asm-generic/bitops/find.h> | 21 | #include <asm-generic/bitops/find.h> |
21 | 22 | ||
23 | unsigned int gic_frequency; | ||
24 | unsigned int gic_present; | ||
22 | unsigned long _gic_base; | 25 | unsigned long _gic_base; |
23 | unsigned int gic_irq_base; | 26 | unsigned int gic_irq_base; |
24 | unsigned int gic_irq_flags[GIC_NUM_INTRS]; | 27 | unsigned int gic_irq_flags[GIC_NUM_INTRS]; |
@@ -30,6 +33,39 @@ static struct gic_pcpu_mask pcpu_masks[NR_CPUS]; | |||
30 | static struct gic_pending_regs pending_regs[NR_CPUS]; | 33 | static struct gic_pending_regs pending_regs[NR_CPUS]; |
31 | static struct gic_intrmask_regs intrmask_regs[NR_CPUS]; | 34 | static struct gic_intrmask_regs intrmask_regs[NR_CPUS]; |
32 | 35 | ||
36 | #if defined(CONFIG_CSRC_GIC) || defined(CONFIG_CEVT_GIC) | ||
37 | cycle_t gic_read_count(void) | ||
38 | { | ||
39 | unsigned int hi, hi2, lo; | ||
40 | |||
41 | do { | ||
42 | GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi); | ||
43 | GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), lo); | ||
44 | GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi2); | ||
45 | } while (hi2 != hi); | ||
46 | |||
47 | return (((cycle_t) hi) << 32) + lo; | ||
48 | } | ||
49 | |||
50 | void gic_write_compare(cycle_t cnt) | ||
51 | { | ||
52 | GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI), | ||
53 | (int)(cnt >> 32)); | ||
54 | GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO), | ||
55 | (int)(cnt & 0xffffffff)); | ||
56 | } | ||
57 | |||
58 | cycle_t gic_read_compare(void) | ||
59 | { | ||
60 | unsigned int hi, lo; | ||
61 | |||
62 | GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI), hi); | ||
63 | GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO), lo); | ||
64 | |||
65 | return (((cycle_t) hi) << 32) + lo; | ||
66 | } | ||
67 | #endif | ||
68 | |||
33 | unsigned int gic_get_timer_pending(void) | 69 | unsigned int gic_get_timer_pending(void) |
34 | { | 70 | { |
35 | unsigned int vpe_pending; | 71 | unsigned int vpe_pending; |
@@ -116,6 +152,17 @@ static void __init vpe_local_setup(unsigned int numvpes) | |||
116 | } | 152 | } |
117 | } | 153 | } |
118 | 154 | ||
155 | unsigned int gic_compare_int(void) | ||
156 | { | ||
157 | unsigned int pending; | ||
158 | |||
159 | GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_PEND), pending); | ||
160 | if (pending & GIC_VPE_PEND_CMP_MSK) | ||
161 | return 1; | ||
162 | else | ||
163 | return 0; | ||
164 | } | ||
165 | |||
119 | unsigned int gic_get_int(void) | 166 | unsigned int gic_get_int(void) |
120 | { | 167 | { |
121 | unsigned int i; | 168 | unsigned int i; |
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c index 1dd137bab5c5..a3e461408b7e 100644 --- a/arch/mips/kernel/proc.c +++ b/arch/mips/kernel/proc.c | |||
@@ -99,6 +99,10 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
99 | if (cpu_has_vz) seq_printf(m, "%s", " vz"); | 99 | if (cpu_has_vz) seq_printf(m, "%s", " vz"); |
100 | seq_printf(m, "\n"); | 100 | seq_printf(m, "\n"); |
101 | 101 | ||
102 | if (cpu_has_mmips) { | ||
103 | seq_printf(m, "micromips kernel\t: %s\n", | ||
104 | (read_c0_config3() & MIPS_CONF3_ISA_OE) ? "yes" : "no"); | ||
105 | } | ||
102 | seq_printf(m, "shadow register sets\t: %d\n", | 106 | seq_printf(m, "shadow register sets\t: %d\n", |
103 | cpu_data[n].srsets); | 107 | cpu_data[n].srsets); |
104 | seq_printf(m, "kscratch registers\t: %d\n", | 108 | seq_printf(m, "kscratch registers\t: %d\n", |
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 3be4405c2d14..ef533760d2c8 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c | |||
@@ -7,6 +7,7 @@ | |||
7 | * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org) | 7 | * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org) |
8 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. | 8 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. |
9 | * Copyright (C) 2004 Thiemo Seufer | 9 | * Copyright (C) 2004 Thiemo Seufer |
10 | * Copyright (C) 2013 Imagination Technologies Ltd. | ||
10 | */ | 11 | */ |
11 | #include <linux/errno.h> | 12 | #include <linux/errno.h> |
12 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
@@ -243,34 +244,115 @@ struct mips_frame_info { | |||
243 | 244 | ||
244 | static inline int is_ra_save_ins(union mips_instruction *ip) | 245 | static inline int is_ra_save_ins(union mips_instruction *ip) |
245 | { | 246 | { |
247 | #ifdef CONFIG_CPU_MICROMIPS | ||
248 | union mips_instruction mmi; | ||
249 | |||
250 | /* | ||
251 | * swsp ra,offset | ||
252 | * swm16 reglist,offset(sp) | ||
253 | * swm32 reglist,offset(sp) | ||
254 | * sw32 ra,offset(sp) | ||
255 | * jradiussp - NOT SUPPORTED | ||
256 | * | ||
257 | * microMIPS is way more fun... | ||
258 | */ | ||
259 | if (mm_insn_16bit(ip->halfword[0])) { | ||
260 | mmi.word = (ip->halfword[0] << 16); | ||
261 | return ((mmi.mm16_r5_format.opcode == mm_swsp16_op && | ||
262 | mmi.mm16_r5_format.rt == 31) || | ||
263 | (mmi.mm16_m_format.opcode == mm_pool16c_op && | ||
264 | mmi.mm16_m_format.func == mm_swm16_op)); | ||
265 | } | ||
266 | else { | ||
267 | mmi.halfword[0] = ip->halfword[1]; | ||
268 | mmi.halfword[1] = ip->halfword[0]; | ||
269 | return ((mmi.mm_m_format.opcode == mm_pool32b_op && | ||
270 | mmi.mm_m_format.rd > 9 && | ||
271 | mmi.mm_m_format.base == 29 && | ||
272 | mmi.mm_m_format.func == mm_swm32_func) || | ||
273 | (mmi.i_format.opcode == mm_sw32_op && | ||
274 | mmi.i_format.rs == 29 && | ||
275 | mmi.i_format.rt == 31)); | ||
276 | } | ||
277 | #else | ||
246 | /* sw / sd $ra, offset($sp) */ | 278 | /* sw / sd $ra, offset($sp) */ |
247 | return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) && | 279 | return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) && |
248 | ip->i_format.rs == 29 && | 280 | ip->i_format.rs == 29 && |
249 | ip->i_format.rt == 31; | 281 | ip->i_format.rt == 31; |
282 | #endif | ||
250 | } | 283 | } |
251 | 284 | ||
252 | static inline int is_jal_jalr_jr_ins(union mips_instruction *ip) | 285 | static inline int is_jal_jalr_jr_ins(union mips_instruction *ip) |
253 | { | 286 | { |
287 | #ifdef CONFIG_CPU_MICROMIPS | ||
288 | /* | ||
289 | * jr16,jrc,jalr16,jalr16 | ||
290 | * jal | ||
291 | * jalr/jr,jalr.hb/jr.hb,jalrs,jalrs.hb | ||
292 | * jraddiusp - NOT SUPPORTED | ||
293 | * | ||
294 | * microMIPS is kind of more fun... | ||
295 | */ | ||
296 | union mips_instruction mmi; | ||
297 | |||
298 | mmi.word = (ip->halfword[0] << 16); | ||
299 | |||
300 | if ((mmi.mm16_r5_format.opcode == mm_pool16c_op && | ||
301 | (mmi.mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op) || | ||
302 | ip->j_format.opcode == mm_jal32_op) | ||
303 | return 1; | ||
304 | if (ip->r_format.opcode != mm_pool32a_op || | ||
305 | ip->r_format.func != mm_pool32axf_op) | ||
306 | return 0; | ||
307 | return (((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op); | ||
308 | #else | ||
254 | if (ip->j_format.opcode == jal_op) | 309 | if (ip->j_format.opcode == jal_op) |
255 | return 1; | 310 | return 1; |
256 | if (ip->r_format.opcode != spec_op) | 311 | if (ip->r_format.opcode != spec_op) |
257 | return 0; | 312 | return 0; |
258 | return ip->r_format.func == jalr_op || ip->r_format.func == jr_op; | 313 | return ip->r_format.func == jalr_op || ip->r_format.func == jr_op; |
314 | #endif | ||
259 | } | 315 | } |
260 | 316 | ||
261 | static inline int is_sp_move_ins(union mips_instruction *ip) | 317 | static inline int is_sp_move_ins(union mips_instruction *ip) |
262 | { | 318 | { |
319 | #ifdef CONFIG_CPU_MICROMIPS | ||
320 | /* | ||
321 | * addiusp -imm | ||
322 | * addius5 sp,-imm | ||
323 | * addiu32 sp,sp,-imm | ||
324 | * jradiussp - NOT SUPPORTED | ||
325 | * | ||
326 | * microMIPS is not more fun... | ||
327 | */ | ||
328 | if (mm_insn_16bit(ip->halfword[0])) { | ||
329 | union mips_instruction mmi; | ||
330 | |||
331 | mmi.word = (ip->halfword[0] << 16); | ||
332 | return ((mmi.mm16_r3_format.opcode == mm_pool16d_op && | ||
333 | mmi.mm16_r3_format.simmediate && mm_addiusp_func) || | ||
334 | (mmi.mm16_r5_format.opcode == mm_pool16d_op && | ||
335 | mmi.mm16_r5_format.rt == 29)); | ||
336 | } | ||
337 | return (ip->mm_i_format.opcode == mm_addiu32_op && | ||
338 | ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29); | ||
339 | #else | ||
263 | /* addiu/daddiu sp,sp,-imm */ | 340 | /* addiu/daddiu sp,sp,-imm */ |
264 | if (ip->i_format.rs != 29 || ip->i_format.rt != 29) | 341 | if (ip->i_format.rs != 29 || ip->i_format.rt != 29) |
265 | return 0; | 342 | return 0; |
266 | if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op) | 343 | if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op) |
267 | return 1; | 344 | return 1; |
345 | #endif | ||
268 | return 0; | 346 | return 0; |
269 | } | 347 | } |
270 | 348 | ||
271 | static int get_frame_info(struct mips_frame_info *info) | 349 | static int get_frame_info(struct mips_frame_info *info) |
272 | { | 350 | { |
351 | #ifdef CONFIG_CPU_MICROMIPS | ||
352 | union mips_instruction *ip = (void *) (((char *) info->func) - 1); | ||
353 | #else | ||
273 | union mips_instruction *ip = info->func; | 354 | union mips_instruction *ip = info->func; |
355 | #endif | ||
274 | unsigned max_insns = info->func_size / sizeof(union mips_instruction); | 356 | unsigned max_insns = info->func_size / sizeof(union mips_instruction); |
275 | unsigned i; | 357 | unsigned i; |
276 | 358 | ||
@@ -290,7 +372,26 @@ static int get_frame_info(struct mips_frame_info *info) | |||
290 | break; | 372 | break; |
291 | if (!info->frame_size) { | 373 | if (!info->frame_size) { |
292 | if (is_sp_move_ins(ip)) | 374 | if (is_sp_move_ins(ip)) |
375 | { | ||
376 | #ifdef CONFIG_CPU_MICROMIPS | ||
377 | if (mm_insn_16bit(ip->halfword[0])) | ||
378 | { | ||
379 | unsigned short tmp; | ||
380 | |||
381 | if (ip->halfword[0] & mm_addiusp_func) | ||
382 | { | ||
383 | tmp = (((ip->halfword[0] >> 1) & 0x1ff) << 2); | ||
384 | info->frame_size = -(signed short)(tmp | ((tmp & 0x100) ? 0xfe00 : 0)); | ||
385 | } else { | ||
386 | tmp = (ip->halfword[0] >> 1); | ||
387 | info->frame_size = -(signed short)(tmp & 0xf); | ||
388 | } | ||
389 | ip = (void *) &ip->halfword[1]; | ||
390 | ip--; | ||
391 | } else | ||
392 | #endif | ||
293 | info->frame_size = - ip->i_format.simmediate; | 393 | info->frame_size = - ip->i_format.simmediate; |
394 | } | ||
294 | continue; | 395 | continue; |
295 | } | 396 | } |
296 | if (info->pc_offset == -1 && is_ra_save_ins(ip)) { | 397 | if (info->pc_offset == -1 && is_ra_save_ins(ip)) { |
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index 9ea29649fc28..9b36424b03c5 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S | |||
@@ -138,9 +138,18 @@ stackargs: | |||
138 | 5: jr t1 | 138 | 5: jr t1 |
139 | sw t5, 16(sp) # argument #5 to ksp | 139 | sw t5, 16(sp) # argument #5 to ksp |
140 | 140 | ||
141 | #ifdef CONFIG_CPU_MICROMIPS | ||
141 | sw t8, 28(sp) # argument #8 to ksp | 142 | sw t8, 28(sp) # argument #8 to ksp |
143 | nop | ||
142 | sw t7, 24(sp) # argument #7 to ksp | 144 | sw t7, 24(sp) # argument #7 to ksp |
145 | nop | ||
143 | sw t6, 20(sp) # argument #6 to ksp | 146 | sw t6, 20(sp) # argument #6 to ksp |
147 | nop | ||
148 | #else | ||
149 | sw t8, 28(sp) # argument #8 to ksp | ||
150 | sw t7, 24(sp) # argument #7 to ksp | ||
151 | sw t6, 20(sp) # argument #6 to ksp | ||
152 | #endif | ||
144 | 6: j stack_done # go back | 153 | 6: j stack_done # go back |
145 | nop | 154 | nop |
146 | .set pop | 155 | .set pop |
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index b5e88fd83277..fd3ef2c2afbc 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <asm/war.h> | 35 | #include <asm/war.h> |
36 | #include <asm/vdso.h> | 36 | #include <asm/vdso.h> |
37 | #include <asm/dsp.h> | 37 | #include <asm/dsp.h> |
38 | #include <asm/inst.h> | ||
38 | 39 | ||
39 | #include "signal-common.h" | 40 | #include "signal-common.h" |
40 | 41 | ||
@@ -480,7 +481,15 @@ static void handle_signal(unsigned long sig, siginfo_t *info, | |||
480 | sigset_t *oldset = sigmask_to_save(); | 481 | sigset_t *oldset = sigmask_to_save(); |
481 | int ret; | 482 | int ret; |
482 | struct mips_abi *abi = current->thread.abi; | 483 | struct mips_abi *abi = current->thread.abi; |
484 | #ifdef CONFIG_CPU_MICROMIPS | ||
485 | void *vdso; | ||
486 | unsigned int tmp = (unsigned int)current->mm->context.vdso; | ||
487 | |||
488 | set_isa16_mode(tmp); | ||
489 | vdso = (void *)tmp; | ||
490 | #else | ||
483 | void *vdso = current->mm->context.vdso; | 491 | void *vdso = current->mm->context.vdso; |
492 | #endif | ||
484 | 493 | ||
485 | if (regs->regs[0]) { | 494 | if (regs->regs[0]) { |
486 | switch(regs->regs[2]) { | 495 | switch(regs->regs[2]) { |
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c index bfede063d96a..3e5164c11cac 100644 --- a/arch/mips/kernel/smp-mt.c +++ b/arch/mips/kernel/smp-mt.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <asm/mipsregs.h> | 34 | #include <asm/mipsregs.h> |
35 | #include <asm/mipsmtregs.h> | 35 | #include <asm/mipsmtregs.h> |
36 | #include <asm/mips_mt.h> | 36 | #include <asm/mips_mt.h> |
37 | #include <asm/gic.h> | ||
37 | 38 | ||
38 | static void __init smvp_copy_vpe_config(void) | 39 | static void __init smvp_copy_vpe_config(void) |
39 | { | 40 | { |
@@ -151,8 +152,6 @@ static void vsmp_send_ipi_mask(const struct cpumask *mask, unsigned int action) | |||
151 | static void __cpuinit vsmp_init_secondary(void) | 152 | static void __cpuinit vsmp_init_secondary(void) |
152 | { | 153 | { |
153 | #ifdef CONFIG_IRQ_GIC | 154 | #ifdef CONFIG_IRQ_GIC |
154 | extern int gic_present; | ||
155 | |||
156 | /* This is Malta specific: IPI,performance and timer interrupts */ | 155 | /* This is Malta specific: IPI,performance and timer interrupts */ |
157 | if (gic_present) | 156 | if (gic_present) |
158 | change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | | 157 | change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | |
diff --git a/arch/mips/kernel/smtc-asm.S b/arch/mips/kernel/smtc-asm.S index 76016ac0a9c8..2866863a39df 100644 --- a/arch/mips/kernel/smtc-asm.S +++ b/arch/mips/kernel/smtc-asm.S | |||
@@ -49,6 +49,9 @@ CAN WE PROVE THAT WE WON'T DO THIS IF INTS DISABLED?? | |||
49 | .text | 49 | .text |
50 | .align 5 | 50 | .align 5 |
51 | FEXPORT(__smtc_ipi_vector) | 51 | FEXPORT(__smtc_ipi_vector) |
52 | #ifdef CONFIG_CPU_MICROMIPS | ||
53 | nop | ||
54 | #endif | ||
52 | .set noat | 55 | .set noat |
53 | /* Disable thread scheduling to make Status update atomic */ | 56 | /* Disable thread scheduling to make Status update atomic */ |
54 | DMT 27 # dmt k1 | 57 | DMT 27 # dmt k1 |
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index 7186222dc5bb..31d22f3121c9 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c | |||
@@ -111,7 +111,7 @@ static int vpe0limit; | |||
111 | static int ipibuffers; | 111 | static int ipibuffers; |
112 | static int nostlb; | 112 | static int nostlb; |
113 | static int asidmask; | 113 | static int asidmask; |
114 | unsigned long smtc_asid_mask = 0xff; | 114 | unsigned int smtc_asid_mask = 0xff; |
115 | 115 | ||
116 | static int __init vpe0tcs(char *str) | 116 | static int __init vpe0tcs(char *str) |
117 | { | 117 | { |
@@ -1395,7 +1395,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) | |||
1395 | asid = asid_cache(cpu); | 1395 | asid = asid_cache(cpu); |
1396 | 1396 | ||
1397 | do { | 1397 | do { |
1398 | if (!((asid += ASID_INC) & ASID_MASK) ) { | 1398 | if (!ASID_MASK(ASID_INC(asid))) { |
1399 | if (cpu_has_vtag_icache) | 1399 | if (cpu_has_vtag_icache) |
1400 | flush_icache_all(); | 1400 | flush_icache_all(); |
1401 | /* Traverse all online CPUs (hack requires contiguous range) */ | 1401 | /* Traverse all online CPUs (hack requires contiguous range) */ |
@@ -1414,7 +1414,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) | |||
1414 | mips_ihb(); | 1414 | mips_ihb(); |
1415 | } | 1415 | } |
1416 | tcstat = read_tc_c0_tcstatus(); | 1416 | tcstat = read_tc_c0_tcstatus(); |
1417 | smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i); | 1417 | smtc_live_asid[tlb][ASID_MASK(tcstat)] |= (asiduse)(0x1 << i); |
1418 | if (!prevhalt) | 1418 | if (!prevhalt) |
1419 | write_tc_c0_tchalt(0); | 1419 | write_tc_c0_tchalt(0); |
1420 | } | 1420 | } |
@@ -1423,7 +1423,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) | |||
1423 | asid = ASID_FIRST_VERSION; | 1423 | asid = ASID_FIRST_VERSION; |
1424 | local_flush_tlb_all(); /* start new asid cycle */ | 1424 | local_flush_tlb_all(); /* start new asid cycle */ |
1425 | } | 1425 | } |
1426 | } while (smtc_live_asid[tlb][(asid & ASID_MASK)]); | 1426 | } while (smtc_live_asid[tlb][ASID_MASK(asid)]); |
1427 | 1427 | ||
1428 | /* | 1428 | /* |
1429 | * SMTC shares the TLB within VPEs and possibly across all VPEs. | 1429 | * SMTC shares the TLB within VPEs and possibly across all VPEs. |
@@ -1461,7 +1461,7 @@ void smtc_flush_tlb_asid(unsigned long asid) | |||
1461 | tlb_read(); | 1461 | tlb_read(); |
1462 | ehb(); | 1462 | ehb(); |
1463 | ehi = read_c0_entryhi(); | 1463 | ehi = read_c0_entryhi(); |
1464 | if ((ehi & ASID_MASK) == asid) { | 1464 | if (ASID_MASK(ehi) == asid) { |
1465 | /* | 1465 | /* |
1466 | * Invalidate only entries with specified ASID, | 1466 | * Invalidate only entries with specified ASID, |
1467 | * makiing sure all entries differ. | 1467 | * makiing sure all entries differ. |
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 7a99e60dadbd..3c906e723fd4 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c | |||
@@ -8,8 +8,8 @@ | |||
8 | * Copyright (C) 1998 Ulf Carlsson | 8 | * Copyright (C) 1998 Ulf Carlsson |
9 | * Copyright (C) 1999 Silicon Graphics, Inc. | 9 | * Copyright (C) 1999 Silicon Graphics, Inc. |
10 | * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com | 10 | * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com |
11 | * Copyright (C) 2000, 01 MIPS Technologies, Inc. | ||
12 | * Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki | 11 | * Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki |
12 | * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc. All rights reserved. | ||
13 | */ | 13 | */ |
14 | #include <linux/bug.h> | 14 | #include <linux/bug.h> |
15 | #include <linux/compiler.h> | 15 | #include <linux/compiler.h> |
@@ -83,10 +83,6 @@ extern asmlinkage void handle_dsp(void); | |||
83 | extern asmlinkage void handle_mcheck(void); | 83 | extern asmlinkage void handle_mcheck(void); |
84 | extern asmlinkage void handle_reserved(void); | 84 | extern asmlinkage void handle_reserved(void); |
85 | 85 | ||
86 | extern int fpu_emulator_cop1Handler(struct pt_regs *xcp, | ||
87 | struct mips_fpu_struct *ctx, int has_fpu, | ||
88 | void *__user *fault_addr); | ||
89 | |||
90 | void (*board_be_init)(void); | 86 | void (*board_be_init)(void); |
91 | int (*board_be_handler)(struct pt_regs *regs, int is_fixup); | 87 | int (*board_be_handler)(struct pt_regs *regs, int is_fixup); |
92 | void (*board_nmi_handler_setup)(void); | 88 | void (*board_nmi_handler_setup)(void); |
@@ -495,6 +491,12 @@ asmlinkage void do_be(struct pt_regs *regs) | |||
495 | #define SYNC 0x0000000f | 491 | #define SYNC 0x0000000f |
496 | #define RDHWR 0x0000003b | 492 | #define RDHWR 0x0000003b |
497 | 493 | ||
494 | /* microMIPS definitions */ | ||
495 | #define MM_POOL32A_FUNC 0xfc00ffff | ||
496 | #define MM_RDHWR 0x00006b3c | ||
497 | #define MM_RS 0x001f0000 | ||
498 | #define MM_RT 0x03e00000 | ||
499 | |||
498 | /* | 500 | /* |
499 | * The ll_bit is cleared by r*_switch.S | 501 | * The ll_bit is cleared by r*_switch.S |
500 | */ | 502 | */ |
@@ -609,42 +611,62 @@ static int simulate_llsc(struct pt_regs *regs, unsigned int opcode) | |||
609 | * Simulate trapping 'rdhwr' instructions to provide user accessible | 611 | * Simulate trapping 'rdhwr' instructions to provide user accessible |
610 | * registers not implemented in hardware. | 612 | * registers not implemented in hardware. |
611 | */ | 613 | */ |
612 | static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode) | 614 | static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt) |
613 | { | 615 | { |
614 | struct thread_info *ti = task_thread_info(current); | 616 | struct thread_info *ti = task_thread_info(current); |
615 | 617 | ||
618 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, | ||
619 | 1, regs, 0); | ||
620 | switch (rd) { | ||
621 | case 0: /* CPU number */ | ||
622 | regs->regs[rt] = smp_processor_id(); | ||
623 | return 0; | ||
624 | case 1: /* SYNCI length */ | ||
625 | regs->regs[rt] = min(current_cpu_data.dcache.linesz, | ||
626 | current_cpu_data.icache.linesz); | ||
627 | return 0; | ||
628 | case 2: /* Read count register */ | ||
629 | regs->regs[rt] = read_c0_count(); | ||
630 | return 0; | ||
631 | case 3: /* Count register resolution */ | ||
632 | switch (current_cpu_data.cputype) { | ||
633 | case CPU_20KC: | ||
634 | case CPU_25KF: | ||
635 | regs->regs[rt] = 1; | ||
636 | break; | ||
637 | default: | ||
638 | regs->regs[rt] = 2; | ||
639 | } | ||
640 | return 0; | ||
641 | case 29: | ||
642 | regs->regs[rt] = ti->tp_value; | ||
643 | return 0; | ||
644 | default: | ||
645 | return -1; | ||
646 | } | ||
647 | } | ||
648 | |||
649 | static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode) | ||
650 | { | ||
616 | if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) { | 651 | if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) { |
617 | int rd = (opcode & RD) >> 11; | 652 | int rd = (opcode & RD) >> 11; |
618 | int rt = (opcode & RT) >> 16; | 653 | int rt = (opcode & RT) >> 16; |
619 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, | 654 | |
620 | 1, regs, 0); | 655 | simulate_rdhwr(regs, rd, rt); |
621 | switch (rd) { | 656 | return 0; |
622 | case 0: /* CPU number */ | 657 | } |
623 | regs->regs[rt] = smp_processor_id(); | 658 | |
624 | return 0; | 659 | /* Not ours. */ |
625 | case 1: /* SYNCI length */ | 660 | return -1; |
626 | regs->regs[rt] = min(current_cpu_data.dcache.linesz, | 661 | } |
627 | current_cpu_data.icache.linesz); | 662 | |
628 | return 0; | 663 | static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned short opcode) |
629 | case 2: /* Read count register */ | 664 | { |
630 | regs->regs[rt] = read_c0_count(); | 665 | if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) { |
631 | return 0; | 666 | int rd = (opcode & MM_RS) >> 16; |
632 | case 3: /* Count register resolution */ | 667 | int rt = (opcode & MM_RT) >> 21; |
633 | switch (current_cpu_data.cputype) { | 668 | simulate_rdhwr(regs, rd, rt); |
634 | case CPU_20KC: | 669 | return 0; |
635 | case CPU_25KF: | ||
636 | regs->regs[rt] = 1; | ||
637 | break; | ||
638 | default: | ||
639 | regs->regs[rt] = 2; | ||
640 | } | ||
641 | return 0; | ||
642 | case 29: | ||
643 | regs->regs[rt] = ti->tp_value; | ||
644 | return 0; | ||
645 | default: | ||
646 | return -1; | ||
647 | } | ||
648 | } | 670 | } |
649 | 671 | ||
650 | /* Not ours. */ | 672 | /* Not ours. */ |
@@ -675,7 +697,7 @@ asmlinkage void do_ov(struct pt_regs *regs) | |||
675 | force_sig_info(SIGFPE, &info, current); | 697 | force_sig_info(SIGFPE, &info, current); |
676 | } | 698 | } |
677 | 699 | ||
678 | static int process_fpemu_return(int sig, void __user *fault_addr) | 700 | int process_fpemu_return(int sig, void __user *fault_addr) |
679 | { | 701 | { |
680 | if (sig == SIGSEGV || sig == SIGBUS) { | 702 | if (sig == SIGSEGV || sig == SIGBUS) { |
681 | struct siginfo si = {0}; | 703 | struct siginfo si = {0}; |
@@ -826,9 +848,29 @@ static void do_trap_or_bp(struct pt_regs *regs, unsigned int code, | |||
826 | asmlinkage void do_bp(struct pt_regs *regs) | 848 | asmlinkage void do_bp(struct pt_regs *regs) |
827 | { | 849 | { |
828 | unsigned int opcode, bcode; | 850 | unsigned int opcode, bcode; |
829 | 851 | unsigned long epc; | |
830 | if (__get_user(opcode, (unsigned int __user *) exception_epc(regs))) | 852 | u16 instr[2]; |
831 | goto out_sigsegv; | 853 | |
854 | if (get_isa16_mode(regs->cp0_epc)) { | ||
855 | /* Calculate EPC. */ | ||
856 | epc = exception_epc(regs); | ||
857 | if (cpu_has_mmips) { | ||
858 | if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)) || | ||
859 | (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2))))) | ||
860 | goto out_sigsegv; | ||
861 | opcode = (instr[0] << 16) | instr[1]; | ||
862 | } else { | ||
863 | /* MIPS16e mode */ | ||
864 | if (__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc))) | ||
865 | goto out_sigsegv; | ||
866 | bcode = (instr[0] >> 6) & 0x3f; | ||
867 | do_trap_or_bp(regs, bcode, "Break"); | ||
868 | return; | ||
869 | } | ||
870 | } else { | ||
871 | if (__get_user(opcode, (unsigned int __user *) exception_epc(regs))) | ||
872 | goto out_sigsegv; | ||
873 | } | ||
832 | 874 | ||
833 | /* | 875 | /* |
834 | * There is the ancient bug in the MIPS assemblers that the break | 876 | * There is the ancient bug in the MIPS assemblers that the break |
@@ -869,13 +911,22 @@ out_sigsegv: | |||
869 | asmlinkage void do_tr(struct pt_regs *regs) | 911 | asmlinkage void do_tr(struct pt_regs *regs) |
870 | { | 912 | { |
871 | unsigned int opcode, tcode = 0; | 913 | unsigned int opcode, tcode = 0; |
914 | u16 instr[2]; | ||
915 | unsigned long epc = exception_epc(regs); | ||
872 | 916 | ||
873 | if (__get_user(opcode, (unsigned int __user *) exception_epc(regs))) | 917 | if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc))) || |
874 | goto out_sigsegv; | 918 | (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2)))) |
919 | goto out_sigsegv; | ||
920 | opcode = (instr[0] << 16) | instr[1]; | ||
875 | 921 | ||
876 | /* Immediate versions don't provide a code. */ | 922 | /* Immediate versions don't provide a code. */ |
877 | if (!(opcode & OPCODE)) | 923 | if (!(opcode & OPCODE)) { |
878 | tcode = ((opcode >> 6) & ((1 << 10) - 1)); | 924 | if (get_isa16_mode(regs->cp0_epc)) |
925 | /* microMIPS */ | ||
926 | tcode = (opcode >> 12) & 0x1f; | ||
927 | else | ||
928 | tcode = ((opcode >> 6) & ((1 << 10) - 1)); | ||
929 | } | ||
879 | 930 | ||
880 | do_trap_or_bp(regs, tcode, "Trap"); | 931 | do_trap_or_bp(regs, tcode, "Trap"); |
881 | return; | 932 | return; |
@@ -888,6 +939,7 @@ asmlinkage void do_ri(struct pt_regs *regs) | |||
888 | { | 939 | { |
889 | unsigned int __user *epc = (unsigned int __user *)exception_epc(regs); | 940 | unsigned int __user *epc = (unsigned int __user *)exception_epc(regs); |
890 | unsigned long old_epc = regs->cp0_epc; | 941 | unsigned long old_epc = regs->cp0_epc; |
942 | unsigned long old31 = regs->regs[31]; | ||
891 | unsigned int opcode = 0; | 943 | unsigned int opcode = 0; |
892 | int status = -1; | 944 | int status = -1; |
893 | 945 | ||
@@ -900,23 +952,37 @@ asmlinkage void do_ri(struct pt_regs *regs) | |||
900 | if (unlikely(compute_return_epc(regs) < 0)) | 952 | if (unlikely(compute_return_epc(regs) < 0)) |
901 | return; | 953 | return; |
902 | 954 | ||
903 | if (unlikely(get_user(opcode, epc) < 0)) | 955 | if (get_isa16_mode(regs->cp0_epc)) { |
904 | status = SIGSEGV; | 956 | unsigned short mmop[2] = { 0 }; |
905 | 957 | ||
906 | if (!cpu_has_llsc && status < 0) | 958 | if (unlikely(get_user(mmop[0], epc) < 0)) |
907 | status = simulate_llsc(regs, opcode); | 959 | status = SIGSEGV; |
960 | if (unlikely(get_user(mmop[1], epc) < 0)) | ||
961 | status = SIGSEGV; | ||
962 | opcode = (mmop[0] << 16) | mmop[1]; | ||
908 | 963 | ||
909 | if (status < 0) | 964 | if (status < 0) |
910 | status = simulate_rdhwr(regs, opcode); | 965 | status = simulate_rdhwr_mm(regs, opcode); |
966 | } else { | ||
967 | if (unlikely(get_user(opcode, epc) < 0)) | ||
968 | status = SIGSEGV; | ||
911 | 969 | ||
912 | if (status < 0) | 970 | if (!cpu_has_llsc && status < 0) |
913 | status = simulate_sync(regs, opcode); | 971 | status = simulate_llsc(regs, opcode); |
972 | |||
973 | if (status < 0) | ||
974 | status = simulate_rdhwr_normal(regs, opcode); | ||
975 | |||
976 | if (status < 0) | ||
977 | status = simulate_sync(regs, opcode); | ||
978 | } | ||
914 | 979 | ||
915 | if (status < 0) | 980 | if (status < 0) |
916 | status = SIGILL; | 981 | status = SIGILL; |
917 | 982 | ||
918 | if (unlikely(status > 0)) { | 983 | if (unlikely(status > 0)) { |
919 | regs->cp0_epc = old_epc; /* Undo skip-over. */ | 984 | regs->cp0_epc = old_epc; /* Undo skip-over. */ |
985 | regs->regs[31] = old31; | ||
920 | force_sig(status, current); | 986 | force_sig(status, current); |
921 | } | 987 | } |
922 | } | 988 | } |
@@ -986,7 +1052,7 @@ static int default_cu2_call(struct notifier_block *nfb, unsigned long action, | |||
986 | asmlinkage void do_cpu(struct pt_regs *regs) | 1052 | asmlinkage void do_cpu(struct pt_regs *regs) |
987 | { | 1053 | { |
988 | unsigned int __user *epc; | 1054 | unsigned int __user *epc; |
989 | unsigned long old_epc; | 1055 | unsigned long old_epc, old31; |
990 | unsigned int opcode; | 1056 | unsigned int opcode; |
991 | unsigned int cpid; | 1057 | unsigned int cpid; |
992 | int status; | 1058 | int status; |
@@ -1000,26 +1066,41 @@ asmlinkage void do_cpu(struct pt_regs *regs) | |||
1000 | case 0: | 1066 | case 0: |
1001 | epc = (unsigned int __user *)exception_epc(regs); | 1067 | epc = (unsigned int __user *)exception_epc(regs); |
1002 | old_epc = regs->cp0_epc; | 1068 | old_epc = regs->cp0_epc; |
1069 | old31 = regs->regs[31]; | ||
1003 | opcode = 0; | 1070 | opcode = 0; |
1004 | status = -1; | 1071 | status = -1; |
1005 | 1072 | ||
1006 | if (unlikely(compute_return_epc(regs) < 0)) | 1073 | if (unlikely(compute_return_epc(regs) < 0)) |
1007 | return; | 1074 | return; |
1008 | 1075 | ||
1009 | if (unlikely(get_user(opcode, epc) < 0)) | 1076 | if (get_isa16_mode(regs->cp0_epc)) { |
1010 | status = SIGSEGV; | 1077 | unsigned short mmop[2] = { 0 }; |
1011 | 1078 | ||
1012 | if (!cpu_has_llsc && status < 0) | 1079 | if (unlikely(get_user(mmop[0], epc) < 0)) |
1013 | status = simulate_llsc(regs, opcode); | 1080 | status = SIGSEGV; |
1081 | if (unlikely(get_user(mmop[1], epc) < 0)) | ||
1082 | status = SIGSEGV; | ||
1083 | opcode = (mmop[0] << 16) | mmop[1]; | ||
1014 | 1084 | ||
1015 | if (status < 0) | 1085 | if (status < 0) |
1016 | status = simulate_rdhwr(regs, opcode); | 1086 | status = simulate_rdhwr_mm(regs, opcode); |
1087 | } else { | ||
1088 | if (unlikely(get_user(opcode, epc) < 0)) | ||
1089 | status = SIGSEGV; | ||
1090 | |||
1091 | if (!cpu_has_llsc && status < 0) | ||
1092 | status = simulate_llsc(regs, opcode); | ||
1093 | |||
1094 | if (status < 0) | ||
1095 | status = simulate_rdhwr_normal(regs, opcode); | ||
1096 | } | ||
1017 | 1097 | ||
1018 | if (status < 0) | 1098 | if (status < 0) |
1019 | status = SIGILL; | 1099 | status = SIGILL; |
1020 | 1100 | ||
1021 | if (unlikely(status > 0)) { | 1101 | if (unlikely(status > 0)) { |
1022 | regs->cp0_epc = old_epc; /* Undo skip-over. */ | 1102 | regs->cp0_epc = old_epc; /* Undo skip-over. */ |
1103 | regs->regs[31] = old31; | ||
1023 | force_sig(status, current); | 1104 | force_sig(status, current); |
1024 | } | 1105 | } |
1025 | 1106 | ||
@@ -1333,7 +1414,7 @@ asmlinkage void cache_parity_error(void) | |||
1333 | void ejtag_exception_handler(struct pt_regs *regs) | 1414 | void ejtag_exception_handler(struct pt_regs *regs) |
1334 | { | 1415 | { |
1335 | const int field = 2 * sizeof(unsigned long); | 1416 | const int field = 2 * sizeof(unsigned long); |
1336 | unsigned long depc, old_epc; | 1417 | unsigned long depc, old_epc, old_ra; |
1337 | unsigned int debug; | 1418 | unsigned int debug; |
1338 | 1419 | ||
1339 | printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n"); | 1420 | printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n"); |
@@ -1348,10 +1429,12 @@ void ejtag_exception_handler(struct pt_regs *regs) | |||
1348 | * calculation. | 1429 | * calculation. |
1349 | */ | 1430 | */ |
1350 | old_epc = regs->cp0_epc; | 1431 | old_epc = regs->cp0_epc; |
1432 | old_ra = regs->regs[31]; | ||
1351 | regs->cp0_epc = depc; | 1433 | regs->cp0_epc = depc; |
1352 | __compute_return_epc(regs); | 1434 | compute_return_epc(regs); |
1353 | depc = regs->cp0_epc; | 1435 | depc = regs->cp0_epc; |
1354 | regs->cp0_epc = old_epc; | 1436 | regs->cp0_epc = old_epc; |
1437 | regs->regs[31] = old_ra; | ||
1355 | } else | 1438 | } else |
1356 | depc += 4; | 1439 | depc += 4; |
1357 | write_c0_depc(depc); | 1440 | write_c0_depc(depc); |
@@ -1390,10 +1473,27 @@ unsigned long vi_handlers[64]; | |||
1390 | void __init *set_except_vector(int n, void *addr) | 1473 | void __init *set_except_vector(int n, void *addr) |
1391 | { | 1474 | { |
1392 | unsigned long handler = (unsigned long) addr; | 1475 | unsigned long handler = (unsigned long) addr; |
1393 | unsigned long old_handler = xchg(&exception_handlers[n], handler); | 1476 | unsigned long old_handler; |
1477 | |||
1478 | #ifdef CONFIG_CPU_MICROMIPS | ||
1479 | /* | ||
1480 | * Only the TLB handlers are cache aligned with an even | ||
1481 | * address. All other handlers are on an odd address and | ||
1482 | * require no modification. Otherwise, MIPS32 mode will | ||
1483 | * be entered when handling any TLB exceptions. That | ||
1484 | * would be bad...since we must stay in microMIPS mode. | ||
1485 | */ | ||
1486 | if (!(handler & 0x1)) | ||
1487 | handler |= 1; | ||
1488 | #endif | ||
1489 | old_handler = xchg(&exception_handlers[n], handler); | ||
1394 | 1490 | ||
1395 | if (n == 0 && cpu_has_divec) { | 1491 | if (n == 0 && cpu_has_divec) { |
1492 | #ifdef CONFIG_CPU_MICROMIPS | ||
1493 | unsigned long jump_mask = ~((1 << 27) - 1); | ||
1494 | #else | ||
1396 | unsigned long jump_mask = ~((1 << 28) - 1); | 1495 | unsigned long jump_mask = ~((1 << 28) - 1); |
1496 | #endif | ||
1397 | u32 *buf = (u32 *)(ebase + 0x200); | 1497 | u32 *buf = (u32 *)(ebase + 0x200); |
1398 | unsigned int k0 = 26; | 1498 | unsigned int k0 = 26; |
1399 | if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) { | 1499 | if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) { |
@@ -1420,17 +1520,18 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) | |||
1420 | unsigned long handler; | 1520 | unsigned long handler; |
1421 | unsigned long old_handler = vi_handlers[n]; | 1521 | unsigned long old_handler = vi_handlers[n]; |
1422 | int srssets = current_cpu_data.srsets; | 1522 | int srssets = current_cpu_data.srsets; |
1423 | u32 *w; | 1523 | u16 *h; |
1424 | unsigned char *b; | 1524 | unsigned char *b; |
1425 | 1525 | ||
1426 | BUG_ON(!cpu_has_veic && !cpu_has_vint); | 1526 | BUG_ON(!cpu_has_veic && !cpu_has_vint); |
1527 | BUG_ON((n < 0) && (n > 9)); | ||
1427 | 1528 | ||
1428 | if (addr == NULL) { | 1529 | if (addr == NULL) { |
1429 | handler = (unsigned long) do_default_vi; | 1530 | handler = (unsigned long) do_default_vi; |
1430 | srs = 0; | 1531 | srs = 0; |
1431 | } else | 1532 | } else |
1432 | handler = (unsigned long) addr; | 1533 | handler = (unsigned long) addr; |
1433 | vi_handlers[n] = (unsigned long) addr; | 1534 | vi_handlers[n] = handler; |
1434 | 1535 | ||
1435 | b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING); | 1536 | b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING); |
1436 | 1537 | ||
@@ -1449,9 +1550,8 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) | |||
1449 | if (srs == 0) { | 1550 | if (srs == 0) { |
1450 | /* | 1551 | /* |
1451 | * If no shadow set is selected then use the default handler | 1552 | * If no shadow set is selected then use the default handler |
1452 | * that does normal register saving and a standard interrupt exit | 1553 | * that does normal register saving and standard interrupt exit |
1453 | */ | 1554 | */ |
1454 | |||
1455 | extern char except_vec_vi, except_vec_vi_lui; | 1555 | extern char except_vec_vi, except_vec_vi_lui; |
1456 | extern char except_vec_vi_ori, except_vec_vi_end; | 1556 | extern char except_vec_vi_ori, except_vec_vi_end; |
1457 | extern char rollback_except_vec_vi; | 1557 | extern char rollback_except_vec_vi; |
@@ -1464,11 +1564,20 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) | |||
1464 | * Status.IM bit to be masked before going there. | 1564 | * Status.IM bit to be masked before going there. |
1465 | */ | 1565 | */ |
1466 | extern char except_vec_vi_mori; | 1566 | extern char except_vec_vi_mori; |
1567 | #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN) | ||
1568 | const int mori_offset = &except_vec_vi_mori - vec_start + 2; | ||
1569 | #else | ||
1467 | const int mori_offset = &except_vec_vi_mori - vec_start; | 1570 | const int mori_offset = &except_vec_vi_mori - vec_start; |
1571 | #endif | ||
1468 | #endif /* CONFIG_MIPS_MT_SMTC */ | 1572 | #endif /* CONFIG_MIPS_MT_SMTC */ |
1469 | const int handler_len = &except_vec_vi_end - vec_start; | 1573 | #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN) |
1574 | const int lui_offset = &except_vec_vi_lui - vec_start + 2; | ||
1575 | const int ori_offset = &except_vec_vi_ori - vec_start + 2; | ||
1576 | #else | ||
1470 | const int lui_offset = &except_vec_vi_lui - vec_start; | 1577 | const int lui_offset = &except_vec_vi_lui - vec_start; |
1471 | const int ori_offset = &except_vec_vi_ori - vec_start; | 1578 | const int ori_offset = &except_vec_vi_ori - vec_start; |
1579 | #endif | ||
1580 | const int handler_len = &except_vec_vi_end - vec_start; | ||
1472 | 1581 | ||
1473 | if (handler_len > VECTORSPACING) { | 1582 | if (handler_len > VECTORSPACING) { |
1474 | /* | 1583 | /* |
@@ -1478,30 +1587,44 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) | |||
1478 | panic("VECTORSPACING too small"); | 1587 | panic("VECTORSPACING too small"); |
1479 | } | 1588 | } |
1480 | 1589 | ||
1481 | memcpy(b, vec_start, handler_len); | 1590 | set_handler(((unsigned long)b - ebase), vec_start, |
1591 | #ifdef CONFIG_CPU_MICROMIPS | ||
1592 | (handler_len - 1)); | ||
1593 | #else | ||
1594 | handler_len); | ||
1595 | #endif | ||
1482 | #ifdef CONFIG_MIPS_MT_SMTC | 1596 | #ifdef CONFIG_MIPS_MT_SMTC |
1483 | BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */ | 1597 | BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */ |
1484 | 1598 | ||
1485 | w = (u32 *)(b + mori_offset); | 1599 | h = (u16 *)(b + mori_offset); |
1486 | *w = (*w & 0xffff0000) | (0x100 << n); | 1600 | *h = (0x100 << n); |
1487 | #endif /* CONFIG_MIPS_MT_SMTC */ | 1601 | #endif /* CONFIG_MIPS_MT_SMTC */ |
1488 | w = (u32 *)(b + lui_offset); | 1602 | h = (u16 *)(b + lui_offset); |
1489 | *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff); | 1603 | *h = (handler >> 16) & 0xffff; |
1490 | w = (u32 *)(b + ori_offset); | 1604 | h = (u16 *)(b + ori_offset); |
1491 | *w = (*w & 0xffff0000) | ((u32)handler & 0xffff); | 1605 | *h = (handler & 0xffff); |
1492 | local_flush_icache_range((unsigned long)b, | 1606 | local_flush_icache_range((unsigned long)b, |
1493 | (unsigned long)(b+handler_len)); | 1607 | (unsigned long)(b+handler_len)); |
1494 | } | 1608 | } |
1495 | else { | 1609 | else { |
1496 | /* | 1610 | /* |
1497 | * In other cases jump directly to the interrupt handler | 1611 | * In other cases jump directly to the interrupt handler. It |
1498 | * | 1612 | * is the handler's responsibility to save registers if required |
1499 | * It is the handlers responsibility to save registers if required | 1613 | * (eg hi/lo) and return from the exception using "eret". |
1500 | * (eg hi/lo) and return from the exception using "eret" | ||
1501 | */ | 1614 | */ |
1502 | w = (u32 *)b; | 1615 | u32 insn; |
1503 | *w++ = 0x08000000 | (((u32)handler >> 2) & 0x03fffff); /* j handler */ | 1616 | |
1504 | *w = 0; | 1617 | h = (u16 *)b; |
1618 | /* j handler */ | ||
1619 | #ifdef CONFIG_CPU_MICROMIPS | ||
1620 | insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1); | ||
1621 | #else | ||
1622 | insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2); | ||
1623 | #endif | ||
1624 | h[0] = (insn >> 16) & 0xffff; | ||
1625 | h[1] = insn & 0xffff; | ||
1626 | h[2] = 0; | ||
1627 | h[3] = 0; | ||
1505 | local_flush_icache_range((unsigned long)b, | 1628 | local_flush_icache_range((unsigned long)b, |
1506 | (unsigned long)(b+8)); | 1629 | (unsigned long)(b+8)); |
1507 | } | 1630 | } |
@@ -1546,6 +1669,7 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu) | |||
1546 | unsigned int cpu = smp_processor_id(); | 1669 | unsigned int cpu = smp_processor_id(); |
1547 | unsigned int status_set = ST0_CU0; | 1670 | unsigned int status_set = ST0_CU0; |
1548 | unsigned int hwrena = cpu_hwrena_impl_bits; | 1671 | unsigned int hwrena = cpu_hwrena_impl_bits; |
1672 | unsigned long asid = 0; | ||
1549 | #ifdef CONFIG_MIPS_MT_SMTC | 1673 | #ifdef CONFIG_MIPS_MT_SMTC |
1550 | int secondaryTC = 0; | 1674 | int secondaryTC = 0; |
1551 | int bootTC = (cpu == 0); | 1675 | int bootTC = (cpu == 0); |
@@ -1629,8 +1753,9 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu) | |||
1629 | } | 1753 | } |
1630 | #endif /* CONFIG_MIPS_MT_SMTC */ | 1754 | #endif /* CONFIG_MIPS_MT_SMTC */ |
1631 | 1755 | ||
1632 | if (!cpu_data[cpu].asid_cache) | 1756 | asid = ASID_FIRST_VERSION; |
1633 | cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; | 1757 | cpu_data[cpu].asid_cache = asid; |
1758 | TLBMISS_HANDLER_SETUP(); | ||
1634 | 1759 | ||
1635 | atomic_inc(&init_mm.mm_count); | 1760 | atomic_inc(&init_mm.mm_count); |
1636 | current->active_mm = &init_mm; | 1761 | current->active_mm = &init_mm; |
@@ -1660,7 +1785,11 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu) | |||
1660 | /* Install CPU exception handler */ | 1785 | /* Install CPU exception handler */ |
1661 | void __cpuinit set_handler(unsigned long offset, void *addr, unsigned long size) | 1786 | void __cpuinit set_handler(unsigned long offset, void *addr, unsigned long size) |
1662 | { | 1787 | { |
1788 | #ifdef CONFIG_CPU_MICROMIPS | ||
1789 | memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size); | ||
1790 | #else | ||
1663 | memcpy((void *)(ebase + offset), addr, size); | 1791 | memcpy((void *)(ebase + offset), addr, size); |
1792 | #endif | ||
1664 | local_flush_icache_range(ebase + offset, ebase + offset + size); | 1793 | local_flush_icache_range(ebase + offset, ebase + offset + size); |
1665 | } | 1794 | } |
1666 | 1795 | ||
@@ -1694,8 +1823,9 @@ __setup("rdhwr_noopt", set_rdhwr_noopt); | |||
1694 | 1823 | ||
1695 | void __init trap_init(void) | 1824 | void __init trap_init(void) |
1696 | { | 1825 | { |
1697 | extern char except_vec3_generic, except_vec3_r4000; | 1826 | extern char except_vec3_generic; |
1698 | extern char except_vec4; | 1827 | extern char except_vec4; |
1828 | extern char except_vec3_r4000; | ||
1699 | unsigned long i; | 1829 | unsigned long i; |
1700 | int rollback; | 1830 | int rollback; |
1701 | 1831 | ||
@@ -1833,11 +1963,11 @@ void __init trap_init(void) | |||
1833 | 1963 | ||
1834 | if (cpu_has_vce) | 1964 | if (cpu_has_vce) |
1835 | /* Special exception: R4[04]00 uses also the divec space. */ | 1965 | /* Special exception: R4[04]00 uses also the divec space. */ |
1836 | memcpy((void *)(ebase + 0x180), &except_vec3_r4000, 0x100); | 1966 | set_handler(0x180, &except_vec3_r4000, 0x100); |
1837 | else if (cpu_has_4kex) | 1967 | else if (cpu_has_4kex) |
1838 | memcpy((void *)(ebase + 0x180), &except_vec3_generic, 0x80); | 1968 | set_handler(0x180, &except_vec3_generic, 0x80); |
1839 | else | 1969 | else |
1840 | memcpy((void *)(ebase + 0x080), &except_vec3_generic, 0x80); | 1970 | set_handler(0x080, &except_vec3_generic, 0x80); |
1841 | 1971 | ||
1842 | local_flush_icache_range(ebase, ebase + 0x400); | 1972 | local_flush_icache_range(ebase, ebase + 0x400); |
1843 | flush_tlb_handlers(); | 1973 | flush_tlb_handlers(); |
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index 6087a54c86a0..203d8857070d 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c | |||
@@ -83,8 +83,12 @@ | |||
83 | #include <asm/branch.h> | 83 | #include <asm/branch.h> |
84 | #include <asm/byteorder.h> | 84 | #include <asm/byteorder.h> |
85 | #include <asm/cop2.h> | 85 | #include <asm/cop2.h> |
86 | #include <asm/fpu.h> | ||
87 | #include <asm/fpu_emulator.h> | ||
86 | #include <asm/inst.h> | 88 | #include <asm/inst.h> |
87 | #include <asm/uaccess.h> | 89 | #include <asm/uaccess.h> |
90 | #include <asm/fpu.h> | ||
91 | #include <asm/fpu_emulator.h> | ||
88 | 92 | ||
89 | #define STR(x) __STR(x) | 93 | #define STR(x) __STR(x) |
90 | #define __STR(x) #x | 94 | #define __STR(x) #x |
@@ -102,12 +106,332 @@ static u32 unaligned_action; | |||
102 | #endif | 106 | #endif |
103 | extern void show_registers(struct pt_regs *regs); | 107 | extern void show_registers(struct pt_regs *regs); |
104 | 108 | ||
109 | #ifdef __BIG_ENDIAN | ||
110 | #define LoadHW(addr, value, res) \ | ||
111 | __asm__ __volatile__ (".set\tnoat\n" \ | ||
112 | "1:\tlb\t%0, 0(%2)\n" \ | ||
113 | "2:\tlbu\t$1, 1(%2)\n\t" \ | ||
114 | "sll\t%0, 0x8\n\t" \ | ||
115 | "or\t%0, $1\n\t" \ | ||
116 | "li\t%1, 0\n" \ | ||
117 | "3:\t.set\tat\n\t" \ | ||
118 | ".insn\n\t" \ | ||
119 | ".section\t.fixup,\"ax\"\n\t" \ | ||
120 | "4:\tli\t%1, %3\n\t" \ | ||
121 | "j\t3b\n\t" \ | ||
122 | ".previous\n\t" \ | ||
123 | ".section\t__ex_table,\"a\"\n\t" \ | ||
124 | STR(PTR)"\t1b, 4b\n\t" \ | ||
125 | STR(PTR)"\t2b, 4b\n\t" \ | ||
126 | ".previous" \ | ||
127 | : "=&r" (value), "=r" (res) \ | ||
128 | : "r" (addr), "i" (-EFAULT)); | ||
129 | |||
130 | #define LoadW(addr, value, res) \ | ||
131 | __asm__ __volatile__ ( \ | ||
132 | "1:\tlwl\t%0, (%2)\n" \ | ||
133 | "2:\tlwr\t%0, 3(%2)\n\t" \ | ||
134 | "li\t%1, 0\n" \ | ||
135 | "3:\n\t" \ | ||
136 | ".insn\n\t" \ | ||
137 | ".section\t.fixup,\"ax\"\n\t" \ | ||
138 | "4:\tli\t%1, %3\n\t" \ | ||
139 | "j\t3b\n\t" \ | ||
140 | ".previous\n\t" \ | ||
141 | ".section\t__ex_table,\"a\"\n\t" \ | ||
142 | STR(PTR)"\t1b, 4b\n\t" \ | ||
143 | STR(PTR)"\t2b, 4b\n\t" \ | ||
144 | ".previous" \ | ||
145 | : "=&r" (value), "=r" (res) \ | ||
146 | : "r" (addr), "i" (-EFAULT)); | ||
147 | |||
148 | #define LoadHWU(addr, value, res) \ | ||
149 | __asm__ __volatile__ ( \ | ||
150 | ".set\tnoat\n" \ | ||
151 | "1:\tlbu\t%0, 0(%2)\n" \ | ||
152 | "2:\tlbu\t$1, 1(%2)\n\t" \ | ||
153 | "sll\t%0, 0x8\n\t" \ | ||
154 | "or\t%0, $1\n\t" \ | ||
155 | "li\t%1, 0\n" \ | ||
156 | "3:\n\t" \ | ||
157 | ".insn\n\t" \ | ||
158 | ".set\tat\n\t" \ | ||
159 | ".section\t.fixup,\"ax\"\n\t" \ | ||
160 | "4:\tli\t%1, %3\n\t" \ | ||
161 | "j\t3b\n\t" \ | ||
162 | ".previous\n\t" \ | ||
163 | ".section\t__ex_table,\"a\"\n\t" \ | ||
164 | STR(PTR)"\t1b, 4b\n\t" \ | ||
165 | STR(PTR)"\t2b, 4b\n\t" \ | ||
166 | ".previous" \ | ||
167 | : "=&r" (value), "=r" (res) \ | ||
168 | : "r" (addr), "i" (-EFAULT)); | ||
169 | |||
170 | #define LoadWU(addr, value, res) \ | ||
171 | __asm__ __volatile__ ( \ | ||
172 | "1:\tlwl\t%0, (%2)\n" \ | ||
173 | "2:\tlwr\t%0, 3(%2)\n\t" \ | ||
174 | "dsll\t%0, %0, 32\n\t" \ | ||
175 | "dsrl\t%0, %0, 32\n\t" \ | ||
176 | "li\t%1, 0\n" \ | ||
177 | "3:\n\t" \ | ||
178 | ".insn\n\t" \ | ||
179 | "\t.section\t.fixup,\"ax\"\n\t" \ | ||
180 | "4:\tli\t%1, %3\n\t" \ | ||
181 | "j\t3b\n\t" \ | ||
182 | ".previous\n\t" \ | ||
183 | ".section\t__ex_table,\"a\"\n\t" \ | ||
184 | STR(PTR)"\t1b, 4b\n\t" \ | ||
185 | STR(PTR)"\t2b, 4b\n\t" \ | ||
186 | ".previous" \ | ||
187 | : "=&r" (value), "=r" (res) \ | ||
188 | : "r" (addr), "i" (-EFAULT)); | ||
189 | |||
190 | #define LoadDW(addr, value, res) \ | ||
191 | __asm__ __volatile__ ( \ | ||
192 | "1:\tldl\t%0, (%2)\n" \ | ||
193 | "2:\tldr\t%0, 7(%2)\n\t" \ | ||
194 | "li\t%1, 0\n" \ | ||
195 | "3:\n\t" \ | ||
196 | ".insn\n\t" \ | ||
197 | "\t.section\t.fixup,\"ax\"\n\t" \ | ||
198 | "4:\tli\t%1, %3\n\t" \ | ||
199 | "j\t3b\n\t" \ | ||
200 | ".previous\n\t" \ | ||
201 | ".section\t__ex_table,\"a\"\n\t" \ | ||
202 | STR(PTR)"\t1b, 4b\n\t" \ | ||
203 | STR(PTR)"\t2b, 4b\n\t" \ | ||
204 | ".previous" \ | ||
205 | : "=&r" (value), "=r" (res) \ | ||
206 | : "r" (addr), "i" (-EFAULT)); | ||
207 | |||
208 | #define StoreHW(addr, value, res) \ | ||
209 | __asm__ __volatile__ ( \ | ||
210 | ".set\tnoat\n" \ | ||
211 | "1:\tsb\t%1, 1(%2)\n\t" \ | ||
212 | "srl\t$1, %1, 0x8\n" \ | ||
213 | "2:\tsb\t$1, 0(%2)\n\t" \ | ||
214 | ".set\tat\n\t" \ | ||
215 | "li\t%0, 0\n" \ | ||
216 | "3:\n\t" \ | ||
217 | ".insn\n\t" \ | ||
218 | ".section\t.fixup,\"ax\"\n\t" \ | ||
219 | "4:\tli\t%0, %3\n\t" \ | ||
220 | "j\t3b\n\t" \ | ||
221 | ".previous\n\t" \ | ||
222 | ".section\t__ex_table,\"a\"\n\t" \ | ||
223 | STR(PTR)"\t1b, 4b\n\t" \ | ||
224 | STR(PTR)"\t2b, 4b\n\t" \ | ||
225 | ".previous" \ | ||
226 | : "=r" (res) \ | ||
227 | : "r" (value), "r" (addr), "i" (-EFAULT)); | ||
228 | |||
229 | #define StoreW(addr, value, res) \ | ||
230 | __asm__ __volatile__ ( \ | ||
231 | "1:\tswl\t%1,(%2)\n" \ | ||
232 | "2:\tswr\t%1, 3(%2)\n\t" \ | ||
233 | "li\t%0, 0\n" \ | ||
234 | "3:\n\t" \ | ||
235 | ".insn\n\t" \ | ||
236 | ".section\t.fixup,\"ax\"\n\t" \ | ||
237 | "4:\tli\t%0, %3\n\t" \ | ||
238 | "j\t3b\n\t" \ | ||
239 | ".previous\n\t" \ | ||
240 | ".section\t__ex_table,\"a\"\n\t" \ | ||
241 | STR(PTR)"\t1b, 4b\n\t" \ | ||
242 | STR(PTR)"\t2b, 4b\n\t" \ | ||
243 | ".previous" \ | ||
244 | : "=r" (res) \ | ||
245 | : "r" (value), "r" (addr), "i" (-EFAULT)); | ||
246 | |||
247 | #define StoreDW(addr, value, res) \ | ||
248 | __asm__ __volatile__ ( \ | ||
249 | "1:\tsdl\t%1,(%2)\n" \ | ||
250 | "2:\tsdr\t%1, 7(%2)\n\t" \ | ||
251 | "li\t%0, 0\n" \ | ||
252 | "3:\n\t" \ | ||
253 | ".insn\n\t" \ | ||
254 | ".section\t.fixup,\"ax\"\n\t" \ | ||
255 | "4:\tli\t%0, %3\n\t" \ | ||
256 | "j\t3b\n\t" \ | ||
257 | ".previous\n\t" \ | ||
258 | ".section\t__ex_table,\"a\"\n\t" \ | ||
259 | STR(PTR)"\t1b, 4b\n\t" \ | ||
260 | STR(PTR)"\t2b, 4b\n\t" \ | ||
261 | ".previous" \ | ||
262 | : "=r" (res) \ | ||
263 | : "r" (value), "r" (addr), "i" (-EFAULT)); | ||
264 | #endif | ||
265 | |||
266 | #ifdef __LITTLE_ENDIAN | ||
267 | #define LoadHW(addr, value, res) \ | ||
268 | __asm__ __volatile__ (".set\tnoat\n" \ | ||
269 | "1:\tlb\t%0, 1(%2)\n" \ | ||
270 | "2:\tlbu\t$1, 0(%2)\n\t" \ | ||
271 | "sll\t%0, 0x8\n\t" \ | ||
272 | "or\t%0, $1\n\t" \ | ||
273 | "li\t%1, 0\n" \ | ||
274 | "3:\t.set\tat\n\t" \ | ||
275 | ".insn\n\t" \ | ||
276 | ".section\t.fixup,\"ax\"\n\t" \ | ||
277 | "4:\tli\t%1, %3\n\t" \ | ||
278 | "j\t3b\n\t" \ | ||
279 | ".previous\n\t" \ | ||
280 | ".section\t__ex_table,\"a\"\n\t" \ | ||
281 | STR(PTR)"\t1b, 4b\n\t" \ | ||
282 | STR(PTR)"\t2b, 4b\n\t" \ | ||
283 | ".previous" \ | ||
284 | : "=&r" (value), "=r" (res) \ | ||
285 | : "r" (addr), "i" (-EFAULT)); | ||
286 | |||
287 | #define LoadW(addr, value, res) \ | ||
288 | __asm__ __volatile__ ( \ | ||
289 | "1:\tlwl\t%0, 3(%2)\n" \ | ||
290 | "2:\tlwr\t%0, (%2)\n\t" \ | ||
291 | "li\t%1, 0\n" \ | ||
292 | "3:\n\t" \ | ||
293 | ".insn\n\t" \ | ||
294 | ".section\t.fixup,\"ax\"\n\t" \ | ||
295 | "4:\tli\t%1, %3\n\t" \ | ||
296 | "j\t3b\n\t" \ | ||
297 | ".previous\n\t" \ | ||
298 | ".section\t__ex_table,\"a\"\n\t" \ | ||
299 | STR(PTR)"\t1b, 4b\n\t" \ | ||
300 | STR(PTR)"\t2b, 4b\n\t" \ | ||
301 | ".previous" \ | ||
302 | : "=&r" (value), "=r" (res) \ | ||
303 | : "r" (addr), "i" (-EFAULT)); | ||
304 | |||
305 | #define LoadHWU(addr, value, res) \ | ||
306 | __asm__ __volatile__ ( \ | ||
307 | ".set\tnoat\n" \ | ||
308 | "1:\tlbu\t%0, 1(%2)\n" \ | ||
309 | "2:\tlbu\t$1, 0(%2)\n\t" \ | ||
310 | "sll\t%0, 0x8\n\t" \ | ||
311 | "or\t%0, $1\n\t" \ | ||
312 | "li\t%1, 0\n" \ | ||
313 | "3:\n\t" \ | ||
314 | ".insn\n\t" \ | ||
315 | ".set\tat\n\t" \ | ||
316 | ".section\t.fixup,\"ax\"\n\t" \ | ||
317 | "4:\tli\t%1, %3\n\t" \ | ||
318 | "j\t3b\n\t" \ | ||
319 | ".previous\n\t" \ | ||
320 | ".section\t__ex_table,\"a\"\n\t" \ | ||
321 | STR(PTR)"\t1b, 4b\n\t" \ | ||
322 | STR(PTR)"\t2b, 4b\n\t" \ | ||
323 | ".previous" \ | ||
324 | : "=&r" (value), "=r" (res) \ | ||
325 | : "r" (addr), "i" (-EFAULT)); | ||
326 | |||
327 | #define LoadWU(addr, value, res) \ | ||
328 | __asm__ __volatile__ ( \ | ||
329 | "1:\tlwl\t%0, 3(%2)\n" \ | ||
330 | "2:\tlwr\t%0, (%2)\n\t" \ | ||
331 | "dsll\t%0, %0, 32\n\t" \ | ||
332 | "dsrl\t%0, %0, 32\n\t" \ | ||
333 | "li\t%1, 0\n" \ | ||
334 | "3:\n\t" \ | ||
335 | ".insn\n\t" \ | ||
336 | "\t.section\t.fixup,\"ax\"\n\t" \ | ||
337 | "4:\tli\t%1, %3\n\t" \ | ||
338 | "j\t3b\n\t" \ | ||
339 | ".previous\n\t" \ | ||
340 | ".section\t__ex_table,\"a\"\n\t" \ | ||
341 | STR(PTR)"\t1b, 4b\n\t" \ | ||
342 | STR(PTR)"\t2b, 4b\n\t" \ | ||
343 | ".previous" \ | ||
344 | : "=&r" (value), "=r" (res) \ | ||
345 | : "r" (addr), "i" (-EFAULT)); | ||
346 | |||
347 | #define LoadDW(addr, value, res) \ | ||
348 | __asm__ __volatile__ ( \ | ||
349 | "1:\tldl\t%0, 7(%2)\n" \ | ||
350 | "2:\tldr\t%0, (%2)\n\t" \ | ||
351 | "li\t%1, 0\n" \ | ||
352 | "3:\n\t" \ | ||
353 | ".insn\n\t" \ | ||
354 | "\t.section\t.fixup,\"ax\"\n\t" \ | ||
355 | "4:\tli\t%1, %3\n\t" \ | ||
356 | "j\t3b\n\t" \ | ||
357 | ".previous\n\t" \ | ||
358 | ".section\t__ex_table,\"a\"\n\t" \ | ||
359 | STR(PTR)"\t1b, 4b\n\t" \ | ||
360 | STR(PTR)"\t2b, 4b\n\t" \ | ||
361 | ".previous" \ | ||
362 | : "=&r" (value), "=r" (res) \ | ||
363 | : "r" (addr), "i" (-EFAULT)); | ||
364 | |||
365 | #define StoreHW(addr, value, res) \ | ||
366 | __asm__ __volatile__ ( \ | ||
367 | ".set\tnoat\n" \ | ||
368 | "1:\tsb\t%1, 0(%2)\n\t" \ | ||
369 | "srl\t$1,%1, 0x8\n" \ | ||
370 | "2:\tsb\t$1, 1(%2)\n\t" \ | ||
371 | ".set\tat\n\t" \ | ||
372 | "li\t%0, 0\n" \ | ||
373 | "3:\n\t" \ | ||
374 | ".insn\n\t" \ | ||
375 | ".section\t.fixup,\"ax\"\n\t" \ | ||
376 | "4:\tli\t%0, %3\n\t" \ | ||
377 | "j\t3b\n\t" \ | ||
378 | ".previous\n\t" \ | ||
379 | ".section\t__ex_table,\"a\"\n\t" \ | ||
380 | STR(PTR)"\t1b, 4b\n\t" \ | ||
381 | STR(PTR)"\t2b, 4b\n\t" \ | ||
382 | ".previous" \ | ||
383 | : "=r" (res) \ | ||
384 | : "r" (value), "r" (addr), "i" (-EFAULT)); | ||
385 | |||
386 | #define StoreW(addr, value, res) \ | ||
387 | __asm__ __volatile__ ( \ | ||
388 | "1:\tswl\t%1, 3(%2)\n" \ | ||
389 | "2:\tswr\t%1, (%2)\n\t" \ | ||
390 | "li\t%0, 0\n" \ | ||
391 | "3:\n\t" \ | ||
392 | ".insn\n\t" \ | ||
393 | ".section\t.fixup,\"ax\"\n\t" \ | ||
394 | "4:\tli\t%0, %3\n\t" \ | ||
395 | "j\t3b\n\t" \ | ||
396 | ".previous\n\t" \ | ||
397 | ".section\t__ex_table,\"a\"\n\t" \ | ||
398 | STR(PTR)"\t1b, 4b\n\t" \ | ||
399 | STR(PTR)"\t2b, 4b\n\t" \ | ||
400 | ".previous" \ | ||
401 | : "=r" (res) \ | ||
402 | : "r" (value), "r" (addr), "i" (-EFAULT)); | ||
403 | |||
404 | #define StoreDW(addr, value, res) \ | ||
405 | __asm__ __volatile__ ( \ | ||
406 | "1:\tsdl\t%1, 7(%2)\n" \ | ||
407 | "2:\tsdr\t%1, (%2)\n\t" \ | ||
408 | "li\t%0, 0\n" \ | ||
409 | "3:\n\t" \ | ||
410 | ".insn\n\t" \ | ||
411 | ".section\t.fixup,\"ax\"\n\t" \ | ||
412 | "4:\tli\t%0, %3\n\t" \ | ||
413 | "j\t3b\n\t" \ | ||
414 | ".previous\n\t" \ | ||
415 | ".section\t__ex_table,\"a\"\n\t" \ | ||
416 | STR(PTR)"\t1b, 4b\n\t" \ | ||
417 | STR(PTR)"\t2b, 4b\n\t" \ | ||
418 | ".previous" \ | ||
419 | : "=r" (res) \ | ||
420 | : "r" (value), "r" (addr), "i" (-EFAULT)); | ||
421 | #endif | ||
422 | |||
105 | static void emulate_load_store_insn(struct pt_regs *regs, | 423 | static void emulate_load_store_insn(struct pt_regs *regs, |
106 | void __user *addr, unsigned int __user *pc) | 424 | void __user *addr, unsigned int __user *pc) |
107 | { | 425 | { |
108 | union mips_instruction insn; | 426 | union mips_instruction insn; |
109 | unsigned long value; | 427 | unsigned long value; |
110 | unsigned int res; | 428 | unsigned int res; |
429 | unsigned long origpc; | ||
430 | unsigned long orig31; | ||
431 | void __user *fault_addr = NULL; | ||
432 | |||
433 | origpc = (unsigned long)pc; | ||
434 | orig31 = regs->regs[31]; | ||
111 | 435 | ||
112 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); | 436 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); |
113 | 437 | ||
@@ -117,22 +441,22 @@ static void emulate_load_store_insn(struct pt_regs *regs, | |||
117 | __get_user(insn.word, pc); | 441 | __get_user(insn.word, pc); |
118 | 442 | ||
119 | switch (insn.i_format.opcode) { | 443 | switch (insn.i_format.opcode) { |
120 | /* | 444 | /* |
121 | * These are instructions that a compiler doesn't generate. We | 445 | * These are instructions that a compiler doesn't generate. We |
122 | * can assume therefore that the code is MIPS-aware and | 446 | * can assume therefore that the code is MIPS-aware and |
123 | * really buggy. Emulating these instructions would break the | 447 | * really buggy. Emulating these instructions would break the |
124 | * semantics anyway. | 448 | * semantics anyway. |
125 | */ | 449 | */ |
126 | case ll_op: | 450 | case ll_op: |
127 | case lld_op: | 451 | case lld_op: |
128 | case sc_op: | 452 | case sc_op: |
129 | case scd_op: | 453 | case scd_op: |
130 | 454 | ||
131 | /* | 455 | /* |
132 | * For these instructions the only way to create an address | 456 | * For these instructions the only way to create an address |
133 | * error is an attempted access to kernel/supervisor address | 457 | * error is an attempted access to kernel/supervisor address |
134 | * space. | 458 | * space. |
135 | */ | 459 | */ |
136 | case ldl_op: | 460 | case ldl_op: |
137 | case ldr_op: | 461 | case ldr_op: |
138 | case lwl_op: | 462 | case lwl_op: |
@@ -146,36 +470,15 @@ static void emulate_load_store_insn(struct pt_regs *regs, | |||
146 | case sb_op: | 470 | case sb_op: |
147 | goto sigbus; | 471 | goto sigbus; |
148 | 472 | ||
149 | /* | 473 | /* |
150 | * The remaining opcodes are the ones that are really of interest. | 474 | * The remaining opcodes are the ones that are really of |
151 | */ | 475 | * interest. |
476 | */ | ||
152 | case lh_op: | 477 | case lh_op: |
153 | if (!access_ok(VERIFY_READ, addr, 2)) | 478 | if (!access_ok(VERIFY_READ, addr, 2)) |
154 | goto sigbus; | 479 | goto sigbus; |
155 | 480 | ||
156 | __asm__ __volatile__ (".set\tnoat\n" | 481 | LoadHW(addr, value, res); |
157 | #ifdef __BIG_ENDIAN | ||
158 | "1:\tlb\t%0, 0(%2)\n" | ||
159 | "2:\tlbu\t$1, 1(%2)\n\t" | ||
160 | #endif | ||
161 | #ifdef __LITTLE_ENDIAN | ||
162 | "1:\tlb\t%0, 1(%2)\n" | ||
163 | "2:\tlbu\t$1, 0(%2)\n\t" | ||
164 | #endif | ||
165 | "sll\t%0, 0x8\n\t" | ||
166 | "or\t%0, $1\n\t" | ||
167 | "li\t%1, 0\n" | ||
168 | "3:\t.set\tat\n\t" | ||
169 | ".section\t.fixup,\"ax\"\n\t" | ||
170 | "4:\tli\t%1, %3\n\t" | ||
171 | "j\t3b\n\t" | ||
172 | ".previous\n\t" | ||
173 | ".section\t__ex_table,\"a\"\n\t" | ||
174 | STR(PTR)"\t1b, 4b\n\t" | ||
175 | STR(PTR)"\t2b, 4b\n\t" | ||
176 | ".previous" | ||
177 | : "=&r" (value), "=r" (res) | ||
178 | : "r" (addr), "i" (-EFAULT)); | ||
179 | if (res) | 482 | if (res) |
180 | goto fault; | 483 | goto fault; |
181 | compute_return_epc(regs); | 484 | compute_return_epc(regs); |
@@ -186,26 +489,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, | |||
186 | if (!access_ok(VERIFY_READ, addr, 4)) | 489 | if (!access_ok(VERIFY_READ, addr, 4)) |
187 | goto sigbus; | 490 | goto sigbus; |
188 | 491 | ||
189 | __asm__ __volatile__ ( | 492 | LoadW(addr, value, res); |
190 | #ifdef __BIG_ENDIAN | ||
191 | "1:\tlwl\t%0, (%2)\n" | ||
192 | "2:\tlwr\t%0, 3(%2)\n\t" | ||
193 | #endif | ||
194 | #ifdef __LITTLE_ENDIAN | ||
195 | "1:\tlwl\t%0, 3(%2)\n" | ||
196 | "2:\tlwr\t%0, (%2)\n\t" | ||
197 | #endif | ||
198 | "li\t%1, 0\n" | ||
199 | "3:\t.section\t.fixup,\"ax\"\n\t" | ||
200 | "4:\tli\t%1, %3\n\t" | ||
201 | "j\t3b\n\t" | ||
202 | ".previous\n\t" | ||
203 | ".section\t__ex_table,\"a\"\n\t" | ||
204 | STR(PTR)"\t1b, 4b\n\t" | ||
205 | STR(PTR)"\t2b, 4b\n\t" | ||
206 | ".previous" | ||
207 | : "=&r" (value), "=r" (res) | ||
208 | : "r" (addr), "i" (-EFAULT)); | ||
209 | if (res) | 493 | if (res) |
210 | goto fault; | 494 | goto fault; |
211 | compute_return_epc(regs); | 495 | compute_return_epc(regs); |
@@ -216,30 +500,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, | |||
216 | if (!access_ok(VERIFY_READ, addr, 2)) | 500 | if (!access_ok(VERIFY_READ, addr, 2)) |
217 | goto sigbus; | 501 | goto sigbus; |
218 | 502 | ||
219 | __asm__ __volatile__ ( | 503 | LoadHWU(addr, value, res); |
220 | ".set\tnoat\n" | ||
221 | #ifdef __BIG_ENDIAN | ||
222 | "1:\tlbu\t%0, 0(%2)\n" | ||
223 | "2:\tlbu\t$1, 1(%2)\n\t" | ||
224 | #endif | ||
225 | #ifdef __LITTLE_ENDIAN | ||
226 | "1:\tlbu\t%0, 1(%2)\n" | ||
227 | "2:\tlbu\t$1, 0(%2)\n\t" | ||
228 | #endif | ||
229 | "sll\t%0, 0x8\n\t" | ||
230 | "or\t%0, $1\n\t" | ||
231 | "li\t%1, 0\n" | ||
232 | "3:\t.set\tat\n\t" | ||
233 | ".section\t.fixup,\"ax\"\n\t" | ||
234 | "4:\tli\t%1, %3\n\t" | ||
235 | "j\t3b\n\t" | ||
236 | ".previous\n\t" | ||
237 | ".section\t__ex_table,\"a\"\n\t" | ||
238 | STR(PTR)"\t1b, 4b\n\t" | ||
239 | STR(PTR)"\t2b, 4b\n\t" | ||
240 | ".previous" | ||
241 | : "=&r" (value), "=r" (res) | ||
242 | : "r" (addr), "i" (-EFAULT)); | ||
243 | if (res) | 504 | if (res) |
244 | goto fault; | 505 | goto fault; |
245 | compute_return_epc(regs); | 506 | compute_return_epc(regs); |
@@ -258,28 +519,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, | |||
258 | if (!access_ok(VERIFY_READ, addr, 4)) | 519 | if (!access_ok(VERIFY_READ, addr, 4)) |
259 | goto sigbus; | 520 | goto sigbus; |
260 | 521 | ||
261 | __asm__ __volatile__ ( | 522 | LoadWU(addr, value, res); |
262 | #ifdef __BIG_ENDIAN | ||
263 | "1:\tlwl\t%0, (%2)\n" | ||
264 | "2:\tlwr\t%0, 3(%2)\n\t" | ||
265 | #endif | ||
266 | #ifdef __LITTLE_ENDIAN | ||
267 | "1:\tlwl\t%0, 3(%2)\n" | ||
268 | "2:\tlwr\t%0, (%2)\n\t" | ||
269 | #endif | ||
270 | "dsll\t%0, %0, 32\n\t" | ||
271 | "dsrl\t%0, %0, 32\n\t" | ||
272 | "li\t%1, 0\n" | ||
273 | "3:\t.section\t.fixup,\"ax\"\n\t" | ||
274 | "4:\tli\t%1, %3\n\t" | ||
275 | "j\t3b\n\t" | ||
276 | ".previous\n\t" | ||
277 | ".section\t__ex_table,\"a\"\n\t" | ||
278 | STR(PTR)"\t1b, 4b\n\t" | ||
279 | STR(PTR)"\t2b, 4b\n\t" | ||
280 | ".previous" | ||
281 | : "=&r" (value), "=r" (res) | ||
282 | : "r" (addr), "i" (-EFAULT)); | ||
283 | if (res) | 523 | if (res) |
284 | goto fault; | 524 | goto fault; |
285 | compute_return_epc(regs); | 525 | compute_return_epc(regs); |
@@ -302,26 +542,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, | |||
302 | if (!access_ok(VERIFY_READ, addr, 8)) | 542 | if (!access_ok(VERIFY_READ, addr, 8)) |
303 | goto sigbus; | 543 | goto sigbus; |
304 | 544 | ||
305 | __asm__ __volatile__ ( | 545 | LoadDW(addr, value, res); |
306 | #ifdef __BIG_ENDIAN | ||
307 | "1:\tldl\t%0, (%2)\n" | ||
308 | "2:\tldr\t%0, 7(%2)\n\t" | ||
309 | #endif | ||
310 | #ifdef __LITTLE_ENDIAN | ||
311 | "1:\tldl\t%0, 7(%2)\n" | ||
312 | "2:\tldr\t%0, (%2)\n\t" | ||
313 | #endif | ||
314 | "li\t%1, 0\n" | ||
315 | "3:\t.section\t.fixup,\"ax\"\n\t" | ||
316 | "4:\tli\t%1, %3\n\t" | ||
317 | "j\t3b\n\t" | ||
318 | ".previous\n\t" | ||
319 | ".section\t__ex_table,\"a\"\n\t" | ||
320 | STR(PTR)"\t1b, 4b\n\t" | ||
321 | STR(PTR)"\t2b, 4b\n\t" | ||
322 | ".previous" | ||
323 | : "=&r" (value), "=r" (res) | ||
324 | : "r" (addr), "i" (-EFAULT)); | ||
325 | if (res) | 546 | if (res) |
326 | goto fault; | 547 | goto fault; |
327 | compute_return_epc(regs); | 548 | compute_return_epc(regs); |
@@ -336,68 +557,22 @@ static void emulate_load_store_insn(struct pt_regs *regs, | |||
336 | if (!access_ok(VERIFY_WRITE, addr, 2)) | 557 | if (!access_ok(VERIFY_WRITE, addr, 2)) |
337 | goto sigbus; | 558 | goto sigbus; |
338 | 559 | ||
560 | compute_return_epc(regs); | ||
339 | value = regs->regs[insn.i_format.rt]; | 561 | value = regs->regs[insn.i_format.rt]; |
340 | __asm__ __volatile__ ( | 562 | StoreHW(addr, value, res); |
341 | #ifdef __BIG_ENDIAN | ||
342 | ".set\tnoat\n" | ||
343 | "1:\tsb\t%1, 1(%2)\n\t" | ||
344 | "srl\t$1, %1, 0x8\n" | ||
345 | "2:\tsb\t$1, 0(%2)\n\t" | ||
346 | ".set\tat\n\t" | ||
347 | #endif | ||
348 | #ifdef __LITTLE_ENDIAN | ||
349 | ".set\tnoat\n" | ||
350 | "1:\tsb\t%1, 0(%2)\n\t" | ||
351 | "srl\t$1,%1, 0x8\n" | ||
352 | "2:\tsb\t$1, 1(%2)\n\t" | ||
353 | ".set\tat\n\t" | ||
354 | #endif | ||
355 | "li\t%0, 0\n" | ||
356 | "3:\n\t" | ||
357 | ".section\t.fixup,\"ax\"\n\t" | ||
358 | "4:\tli\t%0, %3\n\t" | ||
359 | "j\t3b\n\t" | ||
360 | ".previous\n\t" | ||
361 | ".section\t__ex_table,\"a\"\n\t" | ||
362 | STR(PTR)"\t1b, 4b\n\t" | ||
363 | STR(PTR)"\t2b, 4b\n\t" | ||
364 | ".previous" | ||
365 | : "=r" (res) | ||
366 | : "r" (value), "r" (addr), "i" (-EFAULT)); | ||
367 | if (res) | 563 | if (res) |
368 | goto fault; | 564 | goto fault; |
369 | compute_return_epc(regs); | ||
370 | break; | 565 | break; |
371 | 566 | ||
372 | case sw_op: | 567 | case sw_op: |
373 | if (!access_ok(VERIFY_WRITE, addr, 4)) | 568 | if (!access_ok(VERIFY_WRITE, addr, 4)) |
374 | goto sigbus; | 569 | goto sigbus; |
375 | 570 | ||
571 | compute_return_epc(regs); | ||
376 | value = regs->regs[insn.i_format.rt]; | 572 | value = regs->regs[insn.i_format.rt]; |
377 | __asm__ __volatile__ ( | 573 | StoreW(addr, value, res); |
378 | #ifdef __BIG_ENDIAN | ||
379 | "1:\tswl\t%1,(%2)\n" | ||
380 | "2:\tswr\t%1, 3(%2)\n\t" | ||
381 | #endif | ||
382 | #ifdef __LITTLE_ENDIAN | ||
383 | "1:\tswl\t%1, 3(%2)\n" | ||
384 | "2:\tswr\t%1, (%2)\n\t" | ||
385 | #endif | ||
386 | "li\t%0, 0\n" | ||
387 | "3:\n\t" | ||
388 | ".section\t.fixup,\"ax\"\n\t" | ||
389 | "4:\tli\t%0, %3\n\t" | ||
390 | "j\t3b\n\t" | ||
391 | ".previous\n\t" | ||
392 | ".section\t__ex_table,\"a\"\n\t" | ||
393 | STR(PTR)"\t1b, 4b\n\t" | ||
394 | STR(PTR)"\t2b, 4b\n\t" | ||
395 | ".previous" | ||
396 | : "=r" (res) | ||
397 | : "r" (value), "r" (addr), "i" (-EFAULT)); | ||
398 | if (res) | 574 | if (res) |
399 | goto fault; | 575 | goto fault; |
400 | compute_return_epc(regs); | ||
401 | break; | 576 | break; |
402 | 577 | ||
403 | case sd_op: | 578 | case sd_op: |
@@ -412,31 +587,11 @@ static void emulate_load_store_insn(struct pt_regs *regs, | |||
412 | if (!access_ok(VERIFY_WRITE, addr, 8)) | 587 | if (!access_ok(VERIFY_WRITE, addr, 8)) |
413 | goto sigbus; | 588 | goto sigbus; |
414 | 589 | ||
590 | compute_return_epc(regs); | ||
415 | value = regs->regs[insn.i_format.rt]; | 591 | value = regs->regs[insn.i_format.rt]; |
416 | __asm__ __volatile__ ( | 592 | StoreDW(addr, value, res); |
417 | #ifdef __BIG_ENDIAN | ||
418 | "1:\tsdl\t%1,(%2)\n" | ||
419 | "2:\tsdr\t%1, 7(%2)\n\t" | ||
420 | #endif | ||
421 | #ifdef __LITTLE_ENDIAN | ||
422 | "1:\tsdl\t%1, 7(%2)\n" | ||
423 | "2:\tsdr\t%1, (%2)\n\t" | ||
424 | #endif | ||
425 | "li\t%0, 0\n" | ||
426 | "3:\n\t" | ||
427 | ".section\t.fixup,\"ax\"\n\t" | ||
428 | "4:\tli\t%0, %3\n\t" | ||
429 | "j\t3b\n\t" | ||
430 | ".previous\n\t" | ||
431 | ".section\t__ex_table,\"a\"\n\t" | ||
432 | STR(PTR)"\t1b, 4b\n\t" | ||
433 | STR(PTR)"\t2b, 4b\n\t" | ||
434 | ".previous" | ||
435 | : "=r" (res) | ||
436 | : "r" (value), "r" (addr), "i" (-EFAULT)); | ||
437 | if (res) | 593 | if (res) |
438 | goto fault; | 594 | goto fault; |
439 | compute_return_epc(regs); | ||
440 | break; | 595 | break; |
441 | #endif /* CONFIG_64BIT */ | 596 | #endif /* CONFIG_64BIT */ |
442 | 597 | ||
@@ -447,10 +602,21 @@ static void emulate_load_store_insn(struct pt_regs *regs, | |||
447 | case ldc1_op: | 602 | case ldc1_op: |
448 | case swc1_op: | 603 | case swc1_op: |
449 | case sdc1_op: | 604 | case sdc1_op: |
450 | /* | 605 | die_if_kernel("Unaligned FP access in kernel code", regs); |
451 | * I herewith declare: this does not happen. So send SIGBUS. | 606 | BUG_ON(!used_math()); |
452 | */ | 607 | BUG_ON(!is_fpu_owner()); |
453 | goto sigbus; | 608 | |
609 | lose_fpu(1); /* Save FPU state for the emulator. */ | ||
610 | res = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, | ||
611 | &fault_addr); | ||
612 | own_fpu(1); /* Restore FPU state. */ | ||
613 | |||
614 | /* Signal if something went wrong. */ | ||
615 | process_fpemu_return(res, fault_addr); | ||
616 | |||
617 | if (res == 0) | ||
618 | break; | ||
619 | return; | ||
454 | 620 | ||
455 | /* | 621 | /* |
456 | * COP2 is available to implementor for application specific use. | 622 | * COP2 is available to implementor for application specific use. |
@@ -488,6 +654,9 @@ static void emulate_load_store_insn(struct pt_regs *regs, | |||
488 | return; | 654 | return; |
489 | 655 | ||
490 | fault: | 656 | fault: |
657 | /* roll back jump/branch */ | ||
658 | regs->cp0_epc = origpc; | ||
659 | regs->regs[31] = orig31; | ||
491 | /* Did we have an exception handler installed? */ | 660 | /* Did we have an exception handler installed? */ |
492 | if (fixup_exception(regs)) | 661 | if (fixup_exception(regs)) |
493 | return; | 662 | return; |
@@ -504,10 +673,881 @@ sigbus: | |||
504 | return; | 673 | return; |
505 | 674 | ||
506 | sigill: | 675 | sigill: |
507 | die_if_kernel("Unhandled kernel unaligned access or invalid instruction", regs); | 676 | die_if_kernel |
677 | ("Unhandled kernel unaligned access or invalid instruction", regs); | ||
508 | force_sig(SIGILL, current); | 678 | force_sig(SIGILL, current); |
509 | } | 679 | } |
510 | 680 | ||
681 | /* Recode table from 16-bit register notation to 32-bit GPR. */ | ||
682 | const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 }; | ||
683 | |||
684 | /* Recode table from 16-bit STORE register notation to 32-bit GPR. */ | ||
685 | const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 }; | ||
686 | |||
687 | void emulate_load_store_microMIPS(struct pt_regs *regs, void __user * addr) | ||
688 | { | ||
689 | unsigned long value; | ||
690 | unsigned int res; | ||
691 | int i; | ||
692 | unsigned int reg = 0, rvar; | ||
693 | unsigned long orig31; | ||
694 | u16 __user *pc16; | ||
695 | u16 halfword; | ||
696 | unsigned int word; | ||
697 | unsigned long origpc, contpc; | ||
698 | union mips_instruction insn; | ||
699 | struct mm_decoded_insn mminsn; | ||
700 | void __user *fault_addr = NULL; | ||
701 | |||
702 | origpc = regs->cp0_epc; | ||
703 | orig31 = regs->regs[31]; | ||
704 | |||
705 | mminsn.micro_mips_mode = 1; | ||
706 | |||
707 | /* | ||
708 | * This load never faults. | ||
709 | */ | ||
710 | pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc); | ||
711 | __get_user(halfword, pc16); | ||
712 | pc16++; | ||
713 | contpc = regs->cp0_epc + 2; | ||
714 | word = ((unsigned int)halfword << 16); | ||
715 | mminsn.pc_inc = 2; | ||
716 | |||
717 | if (!mm_insn_16bit(halfword)) { | ||
718 | __get_user(halfword, pc16); | ||
719 | pc16++; | ||
720 | contpc = regs->cp0_epc + 4; | ||
721 | mminsn.pc_inc = 4; | ||
722 | word |= halfword; | ||
723 | } | ||
724 | mminsn.insn = word; | ||
725 | |||
726 | if (get_user(halfword, pc16)) | ||
727 | goto fault; | ||
728 | mminsn.next_pc_inc = 2; | ||
729 | word = ((unsigned int)halfword << 16); | ||
730 | |||
731 | if (!mm_insn_16bit(halfword)) { | ||
732 | pc16++; | ||
733 | if (get_user(halfword, pc16)) | ||
734 | goto fault; | ||
735 | mminsn.next_pc_inc = 4; | ||
736 | word |= halfword; | ||
737 | } | ||
738 | mminsn.next_insn = word; | ||
739 | |||
740 | insn = (union mips_instruction)(mminsn.insn); | ||
741 | if (mm_isBranchInstr(regs, mminsn, &contpc)) | ||
742 | insn = (union mips_instruction)(mminsn.next_insn); | ||
743 | |||
744 | /* Parse instruction to find what to do */ | ||
745 | |||
746 | switch (insn.mm_i_format.opcode) { | ||
747 | |||
748 | case mm_pool32a_op: | ||
749 | switch (insn.mm_x_format.func) { | ||
750 | case mm_lwxs_op: | ||
751 | reg = insn.mm_x_format.rd; | ||
752 | goto loadW; | ||
753 | } | ||
754 | |||
755 | goto sigbus; | ||
756 | |||
757 | case mm_pool32b_op: | ||
758 | switch (insn.mm_m_format.func) { | ||
759 | case mm_lwp_func: | ||
760 | reg = insn.mm_m_format.rd; | ||
761 | if (reg == 31) | ||
762 | goto sigbus; | ||
763 | |||
764 | if (!access_ok(VERIFY_READ, addr, 8)) | ||
765 | goto sigbus; | ||
766 | |||
767 | LoadW(addr, value, res); | ||
768 | if (res) | ||
769 | goto fault; | ||
770 | regs->regs[reg] = value; | ||
771 | addr += 4; | ||
772 | LoadW(addr, value, res); | ||
773 | if (res) | ||
774 | goto fault; | ||
775 | regs->regs[reg + 1] = value; | ||
776 | goto success; | ||
777 | |||
778 | case mm_swp_func: | ||
779 | reg = insn.mm_m_format.rd; | ||
780 | if (reg == 31) | ||
781 | goto sigbus; | ||
782 | |||
783 | if (!access_ok(VERIFY_WRITE, addr, 8)) | ||
784 | goto sigbus; | ||
785 | |||
786 | value = regs->regs[reg]; | ||
787 | StoreW(addr, value, res); | ||
788 | if (res) | ||
789 | goto fault; | ||
790 | addr += 4; | ||
791 | value = regs->regs[reg + 1]; | ||
792 | StoreW(addr, value, res); | ||
793 | if (res) | ||
794 | goto fault; | ||
795 | goto success; | ||
796 | |||
797 | case mm_ldp_func: | ||
798 | #ifdef CONFIG_64BIT | ||
799 | reg = insn.mm_m_format.rd; | ||
800 | if (reg == 31) | ||
801 | goto sigbus; | ||
802 | |||
803 | if (!access_ok(VERIFY_READ, addr, 16)) | ||
804 | goto sigbus; | ||
805 | |||
806 | LoadDW(addr, value, res); | ||
807 | if (res) | ||
808 | goto fault; | ||
809 | regs->regs[reg] = value; | ||
810 | addr += 8; | ||
811 | LoadDW(addr, value, res); | ||
812 | if (res) | ||
813 | goto fault; | ||
814 | regs->regs[reg + 1] = value; | ||
815 | goto success; | ||
816 | #endif /* CONFIG_64BIT */ | ||
817 | |||
818 | goto sigill; | ||
819 | |||
820 | case mm_sdp_func: | ||
821 | #ifdef CONFIG_64BIT | ||
822 | reg = insn.mm_m_format.rd; | ||
823 | if (reg == 31) | ||
824 | goto sigbus; | ||
825 | |||
826 | if (!access_ok(VERIFY_WRITE, addr, 16)) | ||
827 | goto sigbus; | ||
828 | |||
829 | value = regs->regs[reg]; | ||
830 | StoreDW(addr, value, res); | ||
831 | if (res) | ||
832 | goto fault; | ||
833 | addr += 8; | ||
834 | value = regs->regs[reg + 1]; | ||
835 | StoreDW(addr, value, res); | ||
836 | if (res) | ||
837 | goto fault; | ||
838 | goto success; | ||
839 | #endif /* CONFIG_64BIT */ | ||
840 | |||
841 | goto sigill; | ||
842 | |||
843 | case mm_lwm32_func: | ||
844 | reg = insn.mm_m_format.rd; | ||
845 | rvar = reg & 0xf; | ||
846 | if ((rvar > 9) || !reg) | ||
847 | goto sigill; | ||
848 | if (reg & 0x10) { | ||
849 | if (!access_ok | ||
850 | (VERIFY_READ, addr, 4 * (rvar + 1))) | ||
851 | goto sigbus; | ||
852 | } else { | ||
853 | if (!access_ok(VERIFY_READ, addr, 4 * rvar)) | ||
854 | goto sigbus; | ||
855 | } | ||
856 | if (rvar == 9) | ||
857 | rvar = 8; | ||
858 | for (i = 16; rvar; rvar--, i++) { | ||
859 | LoadW(addr, value, res); | ||
860 | if (res) | ||
861 | goto fault; | ||
862 | addr += 4; | ||
863 | regs->regs[i] = value; | ||
864 | } | ||
865 | if ((reg & 0xf) == 9) { | ||
866 | LoadW(addr, value, res); | ||
867 | if (res) | ||
868 | goto fault; | ||
869 | addr += 4; | ||
870 | regs->regs[30] = value; | ||
871 | } | ||
872 | if (reg & 0x10) { | ||
873 | LoadW(addr, value, res); | ||
874 | if (res) | ||
875 | goto fault; | ||
876 | regs->regs[31] = value; | ||
877 | } | ||
878 | goto success; | ||
879 | |||
880 | case mm_swm32_func: | ||
881 | reg = insn.mm_m_format.rd; | ||
882 | rvar = reg & 0xf; | ||
883 | if ((rvar > 9) || !reg) | ||
884 | goto sigill; | ||
885 | if (reg & 0x10) { | ||
886 | if (!access_ok | ||
887 | (VERIFY_WRITE, addr, 4 * (rvar + 1))) | ||
888 | goto sigbus; | ||
889 | } else { | ||
890 | if (!access_ok(VERIFY_WRITE, addr, 4 * rvar)) | ||
891 | goto sigbus; | ||
892 | } | ||
893 | if (rvar == 9) | ||
894 | rvar = 8; | ||
895 | for (i = 16; rvar; rvar--, i++) { | ||
896 | value = regs->regs[i]; | ||
897 | StoreW(addr, value, res); | ||
898 | if (res) | ||
899 | goto fault; | ||
900 | addr += 4; | ||
901 | } | ||
902 | if ((reg & 0xf) == 9) { | ||
903 | value = regs->regs[30]; | ||
904 | StoreW(addr, value, res); | ||
905 | if (res) | ||
906 | goto fault; | ||
907 | addr += 4; | ||
908 | } | ||
909 | if (reg & 0x10) { | ||
910 | value = regs->regs[31]; | ||
911 | StoreW(addr, value, res); | ||
912 | if (res) | ||
913 | goto fault; | ||
914 | } | ||
915 | goto success; | ||
916 | |||
917 | case mm_ldm_func: | ||
918 | #ifdef CONFIG_64BIT | ||
919 | reg = insn.mm_m_format.rd; | ||
920 | rvar = reg & 0xf; | ||
921 | if ((rvar > 9) || !reg) | ||
922 | goto sigill; | ||
923 | if (reg & 0x10) { | ||
924 | if (!access_ok | ||
925 | (VERIFY_READ, addr, 8 * (rvar + 1))) | ||
926 | goto sigbus; | ||
927 | } else { | ||
928 | if (!access_ok(VERIFY_READ, addr, 8 * rvar)) | ||
929 | goto sigbus; | ||
930 | } | ||
931 | if (rvar == 9) | ||
932 | rvar = 8; | ||
933 | |||
934 | for (i = 16; rvar; rvar--, i++) { | ||
935 | LoadDW(addr, value, res); | ||
936 | if (res) | ||
937 | goto fault; | ||
938 | addr += 4; | ||
939 | regs->regs[i] = value; | ||
940 | } | ||
941 | if ((reg & 0xf) == 9) { | ||
942 | LoadDW(addr, value, res); | ||
943 | if (res) | ||
944 | goto fault; | ||
945 | addr += 8; | ||
946 | regs->regs[30] = value; | ||
947 | } | ||
948 | if (reg & 0x10) { | ||
949 | LoadDW(addr, value, res); | ||
950 | if (res) | ||
951 | goto fault; | ||
952 | regs->regs[31] = value; | ||
953 | } | ||
954 | goto success; | ||
955 | #endif /* CONFIG_64BIT */ | ||
956 | |||
957 | goto sigill; | ||
958 | |||
959 | case mm_sdm_func: | ||
960 | #ifdef CONFIG_64BIT | ||
961 | reg = insn.mm_m_format.rd; | ||
962 | rvar = reg & 0xf; | ||
963 | if ((rvar > 9) || !reg) | ||
964 | goto sigill; | ||
965 | if (reg & 0x10) { | ||
966 | if (!access_ok | ||
967 | (VERIFY_WRITE, addr, 8 * (rvar + 1))) | ||
968 | goto sigbus; | ||
969 | } else { | ||
970 | if (!access_ok(VERIFY_WRITE, addr, 8 * rvar)) | ||
971 | goto sigbus; | ||
972 | } | ||
973 | if (rvar == 9) | ||
974 | rvar = 8; | ||
975 | |||
976 | for (i = 16; rvar; rvar--, i++) { | ||
977 | value = regs->regs[i]; | ||
978 | StoreDW(addr, value, res); | ||
979 | if (res) | ||
980 | goto fault; | ||
981 | addr += 8; | ||
982 | } | ||
983 | if ((reg & 0xf) == 9) { | ||
984 | value = regs->regs[30]; | ||
985 | StoreDW(addr, value, res); | ||
986 | if (res) | ||
987 | goto fault; | ||
988 | addr += 8; | ||
989 | } | ||
990 | if (reg & 0x10) { | ||
991 | value = regs->regs[31]; | ||
992 | StoreDW(addr, value, res); | ||
993 | if (res) | ||
994 | goto fault; | ||
995 | } | ||
996 | goto success; | ||
997 | #endif /* CONFIG_64BIT */ | ||
998 | |||
999 | goto sigill; | ||
1000 | |||
1001 | /* LWC2, SWC2, LDC2, SDC2 are not serviced */ | ||
1002 | } | ||
1003 | |||
1004 | goto sigbus; | ||
1005 | |||
1006 | case mm_pool32c_op: | ||
1007 | switch (insn.mm_m_format.func) { | ||
1008 | case mm_lwu_func: | ||
1009 | reg = insn.mm_m_format.rd; | ||
1010 | goto loadWU; | ||
1011 | } | ||
1012 | |||
1013 | /* LL,SC,LLD,SCD are not serviced */ | ||
1014 | goto sigbus; | ||
1015 | |||
1016 | case mm_pool32f_op: | ||
1017 | switch (insn.mm_x_format.func) { | ||
1018 | case mm_lwxc1_func: | ||
1019 | case mm_swxc1_func: | ||
1020 | case mm_ldxc1_func: | ||
1021 | case mm_sdxc1_func: | ||
1022 | goto fpu_emul; | ||
1023 | } | ||
1024 | |||
1025 | goto sigbus; | ||
1026 | |||
1027 | case mm_ldc132_op: | ||
1028 | case mm_sdc132_op: | ||
1029 | case mm_lwc132_op: | ||
1030 | case mm_swc132_op: | ||
1031 | fpu_emul: | ||
1032 | /* roll back jump/branch */ | ||
1033 | regs->cp0_epc = origpc; | ||
1034 | regs->regs[31] = orig31; | ||
1035 | |||
1036 | die_if_kernel("Unaligned FP access in kernel code", regs); | ||
1037 | BUG_ON(!used_math()); | ||
1038 | BUG_ON(!is_fpu_owner()); | ||
1039 | |||
1040 | lose_fpu(1); /* save the FPU state for the emulator */ | ||
1041 | res = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, | ||
1042 | &fault_addr); | ||
1043 | own_fpu(1); /* restore FPU state */ | ||
1044 | |||
1045 | /* If something went wrong, signal */ | ||
1046 | process_fpemu_return(res, fault_addr); | ||
1047 | |||
1048 | if (res == 0) | ||
1049 | goto success; | ||
1050 | return; | ||
1051 | |||
1052 | case mm_lh32_op: | ||
1053 | reg = insn.mm_i_format.rt; | ||
1054 | goto loadHW; | ||
1055 | |||
1056 | case mm_lhu32_op: | ||
1057 | reg = insn.mm_i_format.rt; | ||
1058 | goto loadHWU; | ||
1059 | |||
1060 | case mm_lw32_op: | ||
1061 | reg = insn.mm_i_format.rt; | ||
1062 | goto loadW; | ||
1063 | |||
1064 | case mm_sh32_op: | ||
1065 | reg = insn.mm_i_format.rt; | ||
1066 | goto storeHW; | ||
1067 | |||
1068 | case mm_sw32_op: | ||
1069 | reg = insn.mm_i_format.rt; | ||
1070 | goto storeW; | ||
1071 | |||
1072 | case mm_ld32_op: | ||
1073 | reg = insn.mm_i_format.rt; | ||
1074 | goto loadDW; | ||
1075 | |||
1076 | case mm_sd32_op: | ||
1077 | reg = insn.mm_i_format.rt; | ||
1078 | goto storeDW; | ||
1079 | |||
1080 | case mm_pool16c_op: | ||
1081 | switch (insn.mm16_m_format.func) { | ||
1082 | case mm_lwm16_op: | ||
1083 | reg = insn.mm16_m_format.rlist; | ||
1084 | rvar = reg + 1; | ||
1085 | if (!access_ok(VERIFY_READ, addr, 4 * rvar)) | ||
1086 | goto sigbus; | ||
1087 | |||
1088 | for (i = 16; rvar; rvar--, i++) { | ||
1089 | LoadW(addr, value, res); | ||
1090 | if (res) | ||
1091 | goto fault; | ||
1092 | addr += 4; | ||
1093 | regs->regs[i] = value; | ||
1094 | } | ||
1095 | LoadW(addr, value, res); | ||
1096 | if (res) | ||
1097 | goto fault; | ||
1098 | regs->regs[31] = value; | ||
1099 | |||
1100 | goto success; | ||
1101 | |||
1102 | case mm_swm16_op: | ||
1103 | reg = insn.mm16_m_format.rlist; | ||
1104 | rvar = reg + 1; | ||
1105 | if (!access_ok(VERIFY_WRITE, addr, 4 * rvar)) | ||
1106 | goto sigbus; | ||
1107 | |||
1108 | for (i = 16; rvar; rvar--, i++) { | ||
1109 | value = regs->regs[i]; | ||
1110 | StoreW(addr, value, res); | ||
1111 | if (res) | ||
1112 | goto fault; | ||
1113 | addr += 4; | ||
1114 | } | ||
1115 | value = regs->regs[31]; | ||
1116 | StoreW(addr, value, res); | ||
1117 | if (res) | ||
1118 | goto fault; | ||
1119 | |||
1120 | goto success; | ||
1121 | |||
1122 | } | ||
1123 | |||
1124 | goto sigbus; | ||
1125 | |||
1126 | case mm_lhu16_op: | ||
1127 | reg = reg16to32[insn.mm16_rb_format.rt]; | ||
1128 | goto loadHWU; | ||
1129 | |||
1130 | case mm_lw16_op: | ||
1131 | reg = reg16to32[insn.mm16_rb_format.rt]; | ||
1132 | goto loadW; | ||
1133 | |||
1134 | case mm_sh16_op: | ||
1135 | reg = reg16to32st[insn.mm16_rb_format.rt]; | ||
1136 | goto storeHW; | ||
1137 | |||
1138 | case mm_sw16_op: | ||
1139 | reg = reg16to32st[insn.mm16_rb_format.rt]; | ||
1140 | goto storeW; | ||
1141 | |||
1142 | case mm_lwsp16_op: | ||
1143 | reg = insn.mm16_r5_format.rt; | ||
1144 | goto loadW; | ||
1145 | |||
1146 | case mm_swsp16_op: | ||
1147 | reg = insn.mm16_r5_format.rt; | ||
1148 | goto storeW; | ||
1149 | |||
1150 | case mm_lwgp16_op: | ||
1151 | reg = reg16to32[insn.mm16_r3_format.rt]; | ||
1152 | goto loadW; | ||
1153 | |||
1154 | default: | ||
1155 | goto sigill; | ||
1156 | } | ||
1157 | |||
1158 | loadHW: | ||
1159 | if (!access_ok(VERIFY_READ, addr, 2)) | ||
1160 | goto sigbus; | ||
1161 | |||
1162 | LoadHW(addr, value, res); | ||
1163 | if (res) | ||
1164 | goto fault; | ||
1165 | regs->regs[reg] = value; | ||
1166 | goto success; | ||
1167 | |||
1168 | loadHWU: | ||
1169 | if (!access_ok(VERIFY_READ, addr, 2)) | ||
1170 | goto sigbus; | ||
1171 | |||
1172 | LoadHWU(addr, value, res); | ||
1173 | if (res) | ||
1174 | goto fault; | ||
1175 | regs->regs[reg] = value; | ||
1176 | goto success; | ||
1177 | |||
1178 | loadW: | ||
1179 | if (!access_ok(VERIFY_READ, addr, 4)) | ||
1180 | goto sigbus; | ||
1181 | |||
1182 | LoadW(addr, value, res); | ||
1183 | if (res) | ||
1184 | goto fault; | ||
1185 | regs->regs[reg] = value; | ||
1186 | goto success; | ||
1187 | |||
1188 | loadWU: | ||
1189 | #ifdef CONFIG_64BIT | ||
1190 | /* | ||
1191 | * A 32-bit kernel might be running on a 64-bit processor. But | ||
1192 | * if we're on a 32-bit processor and an i-cache incoherency | ||
1193 | * or race makes us see a 64-bit instruction here the sdl/sdr | ||
1194 | * would blow up, so for now we don't handle unaligned 64-bit | ||
1195 | * instructions on 32-bit kernels. | ||
1196 | */ | ||
1197 | if (!access_ok(VERIFY_READ, addr, 4)) | ||
1198 | goto sigbus; | ||
1199 | |||
1200 | LoadWU(addr, value, res); | ||
1201 | if (res) | ||
1202 | goto fault; | ||
1203 | regs->regs[reg] = value; | ||
1204 | goto success; | ||
1205 | #endif /* CONFIG_64BIT */ | ||
1206 | |||
1207 | /* Cannot handle 64-bit instructions in 32-bit kernel */ | ||
1208 | goto sigill; | ||
1209 | |||
1210 | loadDW: | ||
1211 | #ifdef CONFIG_64BIT | ||
1212 | /* | ||
1213 | * A 32-bit kernel might be running on a 64-bit processor. But | ||
1214 | * if we're on a 32-bit processor and an i-cache incoherency | ||
1215 | * or race makes us see a 64-bit instruction here the sdl/sdr | ||
1216 | * would blow up, so for now we don't handle unaligned 64-bit | ||
1217 | * instructions on 32-bit kernels. | ||
1218 | */ | ||
1219 | if (!access_ok(VERIFY_READ, addr, 8)) | ||
1220 | goto sigbus; | ||
1221 | |||
1222 | LoadDW(addr, value, res); | ||
1223 | if (res) | ||
1224 | goto fault; | ||
1225 | regs->regs[reg] = value; | ||
1226 | goto success; | ||
1227 | #endif /* CONFIG_64BIT */ | ||
1228 | |||
1229 | /* Cannot handle 64-bit instructions in 32-bit kernel */ | ||
1230 | goto sigill; | ||
1231 | |||
1232 | storeHW: | ||
1233 | if (!access_ok(VERIFY_WRITE, addr, 2)) | ||
1234 | goto sigbus; | ||
1235 | |||
1236 | value = regs->regs[reg]; | ||
1237 | StoreHW(addr, value, res); | ||
1238 | if (res) | ||
1239 | goto fault; | ||
1240 | goto success; | ||
1241 | |||
1242 | storeW: | ||
1243 | if (!access_ok(VERIFY_WRITE, addr, 4)) | ||
1244 | goto sigbus; | ||
1245 | |||
1246 | value = regs->regs[reg]; | ||
1247 | StoreW(addr, value, res); | ||
1248 | if (res) | ||
1249 | goto fault; | ||
1250 | goto success; | ||
1251 | |||
1252 | storeDW: | ||
1253 | #ifdef CONFIG_64BIT | ||
1254 | /* | ||
1255 | * A 32-bit kernel might be running on a 64-bit processor. But | ||
1256 | * if we're on a 32-bit processor and an i-cache incoherency | ||
1257 | * or race makes us see a 64-bit instruction here the sdl/sdr | ||
1258 | * would blow up, so for now we don't handle unaligned 64-bit | ||
1259 | * instructions on 32-bit kernels. | ||
1260 | */ | ||
1261 | if (!access_ok(VERIFY_WRITE, addr, 8)) | ||
1262 | goto sigbus; | ||
1263 | |||
1264 | value = regs->regs[reg]; | ||
1265 | StoreDW(addr, value, res); | ||
1266 | if (res) | ||
1267 | goto fault; | ||
1268 | goto success; | ||
1269 | #endif /* CONFIG_64BIT */ | ||
1270 | |||
1271 | /* Cannot handle 64-bit instructions in 32-bit kernel */ | ||
1272 | goto sigill; | ||
1273 | |||
1274 | success: | ||
1275 | regs->cp0_epc = contpc; /* advance or branch */ | ||
1276 | |||
1277 | #ifdef CONFIG_DEBUG_FS | ||
1278 | unaligned_instructions++; | ||
1279 | #endif | ||
1280 | return; | ||
1281 | |||
1282 | fault: | ||
1283 | /* roll back jump/branch */ | ||
1284 | regs->cp0_epc = origpc; | ||
1285 | regs->regs[31] = orig31; | ||
1286 | /* Did we have an exception handler installed? */ | ||
1287 | if (fixup_exception(regs)) | ||
1288 | return; | ||
1289 | |||
1290 | die_if_kernel("Unhandled kernel unaligned access", regs); | ||
1291 | force_sig(SIGSEGV, current); | ||
1292 | |||
1293 | return; | ||
1294 | |||
1295 | sigbus: | ||
1296 | die_if_kernel("Unhandled kernel unaligned access", regs); | ||
1297 | force_sig(SIGBUS, current); | ||
1298 | |||
1299 | return; | ||
1300 | |||
1301 | sigill: | ||
1302 | die_if_kernel | ||
1303 | ("Unhandled kernel unaligned access or invalid instruction", regs); | ||
1304 | force_sig(SIGILL, current); | ||
1305 | } | ||
1306 | |||
1307 | static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr) | ||
1308 | { | ||
1309 | unsigned long value; | ||
1310 | unsigned int res; | ||
1311 | int reg; | ||
1312 | unsigned long orig31; | ||
1313 | u16 __user *pc16; | ||
1314 | unsigned long origpc; | ||
1315 | union mips16e_instruction mips16inst, oldinst; | ||
1316 | |||
1317 | origpc = regs->cp0_epc; | ||
1318 | orig31 = regs->regs[31]; | ||
1319 | pc16 = (unsigned short __user *)msk_isa16_mode(origpc); | ||
1320 | /* | ||
1321 | * This load never faults. | ||
1322 | */ | ||
1323 | __get_user(mips16inst.full, pc16); | ||
1324 | oldinst = mips16inst; | ||
1325 | |||
1326 | /* skip EXTEND instruction */ | ||
1327 | if (mips16inst.ri.opcode == MIPS16e_extend_op) { | ||
1328 | pc16++; | ||
1329 | __get_user(mips16inst.full, pc16); | ||
1330 | } else if (delay_slot(regs)) { | ||
1331 | /* skip jump instructions */ | ||
1332 | /* JAL/JALX are 32 bits but have OPCODE in first short int */ | ||
1333 | if (mips16inst.ri.opcode == MIPS16e_jal_op) | ||
1334 | pc16++; | ||
1335 | pc16++; | ||
1336 | if (get_user(mips16inst.full, pc16)) | ||
1337 | goto sigbus; | ||
1338 | } | ||
1339 | |||
1340 | switch (mips16inst.ri.opcode) { | ||
1341 | case MIPS16e_i64_op: /* I64 or RI64 instruction */ | ||
1342 | switch (mips16inst.i64.func) { /* I64/RI64 func field check */ | ||
1343 | case MIPS16e_ldpc_func: | ||
1344 | case MIPS16e_ldsp_func: | ||
1345 | reg = reg16to32[mips16inst.ri64.ry]; | ||
1346 | goto loadDW; | ||
1347 | |||
1348 | case MIPS16e_sdsp_func: | ||
1349 | reg = reg16to32[mips16inst.ri64.ry]; | ||
1350 | goto writeDW; | ||
1351 | |||
1352 | case MIPS16e_sdrasp_func: | ||
1353 | reg = 29; /* GPRSP */ | ||
1354 | goto writeDW; | ||
1355 | } | ||
1356 | |||
1357 | goto sigbus; | ||
1358 | |||
1359 | case MIPS16e_swsp_op: | ||
1360 | case MIPS16e_lwpc_op: | ||
1361 | case MIPS16e_lwsp_op: | ||
1362 | reg = reg16to32[mips16inst.ri.rx]; | ||
1363 | break; | ||
1364 | |||
1365 | case MIPS16e_i8_op: | ||
1366 | if (mips16inst.i8.func != MIPS16e_swrasp_func) | ||
1367 | goto sigbus; | ||
1368 | reg = 29; /* GPRSP */ | ||
1369 | break; | ||
1370 | |||
1371 | default: | ||
1372 | reg = reg16to32[mips16inst.rri.ry]; | ||
1373 | break; | ||
1374 | } | ||
1375 | |||
1376 | switch (mips16inst.ri.opcode) { | ||
1377 | |||
1378 | case MIPS16e_lb_op: | ||
1379 | case MIPS16e_lbu_op: | ||
1380 | case MIPS16e_sb_op: | ||
1381 | goto sigbus; | ||
1382 | |||
1383 | case MIPS16e_lh_op: | ||
1384 | if (!access_ok(VERIFY_READ, addr, 2)) | ||
1385 | goto sigbus; | ||
1386 | |||
1387 | LoadHW(addr, value, res); | ||
1388 | if (res) | ||
1389 | goto fault; | ||
1390 | MIPS16e_compute_return_epc(regs, &oldinst); | ||
1391 | regs->regs[reg] = value; | ||
1392 | break; | ||
1393 | |||
1394 | case MIPS16e_lhu_op: | ||
1395 | if (!access_ok(VERIFY_READ, addr, 2)) | ||
1396 | goto sigbus; | ||
1397 | |||
1398 | LoadHWU(addr, value, res); | ||
1399 | if (res) | ||
1400 | goto fault; | ||
1401 | MIPS16e_compute_return_epc(regs, &oldinst); | ||
1402 | regs->regs[reg] = value; | ||
1403 | break; | ||
1404 | |||
1405 | case MIPS16e_lw_op: | ||
1406 | case MIPS16e_lwpc_op: | ||
1407 | case MIPS16e_lwsp_op: | ||
1408 | if (!access_ok(VERIFY_READ, addr, 4)) | ||
1409 | goto sigbus; | ||
1410 | |||
1411 | LoadW(addr, value, res); | ||
1412 | if (res) | ||
1413 | goto fault; | ||
1414 | MIPS16e_compute_return_epc(regs, &oldinst); | ||
1415 | regs->regs[reg] = value; | ||
1416 | break; | ||
1417 | |||
1418 | case MIPS16e_lwu_op: | ||
1419 | #ifdef CONFIG_64BIT | ||
1420 | /* | ||
1421 | * A 32-bit kernel might be running on a 64-bit processor. But | ||
1422 | * if we're on a 32-bit processor and an i-cache incoherency | ||
1423 | * or race makes us see a 64-bit instruction here the sdl/sdr | ||
1424 | * would blow up, so for now we don't handle unaligned 64-bit | ||
1425 | * instructions on 32-bit kernels. | ||
1426 | */ | ||
1427 | if (!access_ok(VERIFY_READ, addr, 4)) | ||
1428 | goto sigbus; | ||
1429 | |||
1430 | LoadWU(addr, value, res); | ||
1431 | if (res) | ||
1432 | goto fault; | ||
1433 | MIPS16e_compute_return_epc(regs, &oldinst); | ||
1434 | regs->regs[reg] = value; | ||
1435 | break; | ||
1436 | #endif /* CONFIG_64BIT */ | ||
1437 | |||
1438 | /* Cannot handle 64-bit instructions in 32-bit kernel */ | ||
1439 | goto sigill; | ||
1440 | |||
1441 | case MIPS16e_ld_op: | ||
1442 | loadDW: | ||
1443 | #ifdef CONFIG_64BIT | ||
1444 | /* | ||
1445 | * A 32-bit kernel might be running on a 64-bit processor. But | ||
1446 | * if we're on a 32-bit processor and an i-cache incoherency | ||
1447 | * or race makes us see a 64-bit instruction here the sdl/sdr | ||
1448 | * would blow up, so for now we don't handle unaligned 64-bit | ||
1449 | * instructions on 32-bit kernels. | ||
1450 | */ | ||
1451 | if (!access_ok(VERIFY_READ, addr, 8)) | ||
1452 | goto sigbus; | ||
1453 | |||
1454 | LoadDW(addr, value, res); | ||
1455 | if (res) | ||
1456 | goto fault; | ||
1457 | MIPS16e_compute_return_epc(regs, &oldinst); | ||
1458 | regs->regs[reg] = value; | ||
1459 | break; | ||
1460 | #endif /* CONFIG_64BIT */ | ||
1461 | |||
1462 | /* Cannot handle 64-bit instructions in 32-bit kernel */ | ||
1463 | goto sigill; | ||
1464 | |||
1465 | case MIPS16e_sh_op: | ||
1466 | if (!access_ok(VERIFY_WRITE, addr, 2)) | ||
1467 | goto sigbus; | ||
1468 | |||
1469 | MIPS16e_compute_return_epc(regs, &oldinst); | ||
1470 | value = regs->regs[reg]; | ||
1471 | StoreHW(addr, value, res); | ||
1472 | if (res) | ||
1473 | goto fault; | ||
1474 | break; | ||
1475 | |||
1476 | case MIPS16e_sw_op: | ||
1477 | case MIPS16e_swsp_op: | ||
1478 | case MIPS16e_i8_op: /* actually - MIPS16e_swrasp_func */ | ||
1479 | if (!access_ok(VERIFY_WRITE, addr, 4)) | ||
1480 | goto sigbus; | ||
1481 | |||
1482 | MIPS16e_compute_return_epc(regs, &oldinst); | ||
1483 | value = regs->regs[reg]; | ||
1484 | StoreW(addr, value, res); | ||
1485 | if (res) | ||
1486 | goto fault; | ||
1487 | break; | ||
1488 | |||
1489 | case MIPS16e_sd_op: | ||
1490 | writeDW: | ||
1491 | #ifdef CONFIG_64BIT | ||
1492 | /* | ||
1493 | * A 32-bit kernel might be running on a 64-bit processor. But | ||
1494 | * if we're on a 32-bit processor and an i-cache incoherency | ||
1495 | * or race makes us see a 64-bit instruction here the sdl/sdr | ||
1496 | * would blow up, so for now we don't handle unaligned 64-bit | ||
1497 | * instructions on 32-bit kernels. | ||
1498 | */ | ||
1499 | if (!access_ok(VERIFY_WRITE, addr, 8)) | ||
1500 | goto sigbus; | ||
1501 | |||
1502 | MIPS16e_compute_return_epc(regs, &oldinst); | ||
1503 | value = regs->regs[reg]; | ||
1504 | StoreDW(addr, value, res); | ||
1505 | if (res) | ||
1506 | goto fault; | ||
1507 | break; | ||
1508 | #endif /* CONFIG_64BIT */ | ||
1509 | |||
1510 | /* Cannot handle 64-bit instructions in 32-bit kernel */ | ||
1511 | goto sigill; | ||
1512 | |||
1513 | default: | ||
1514 | /* | ||
1515 | * Pheeee... We encountered an yet unknown instruction or | ||
1516 | * cache coherence problem. Die sucker, die ... | ||
1517 | */ | ||
1518 | goto sigill; | ||
1519 | } | ||
1520 | |||
1521 | #ifdef CONFIG_DEBUG_FS | ||
1522 | unaligned_instructions++; | ||
1523 | #endif | ||
1524 | |||
1525 | return; | ||
1526 | |||
1527 | fault: | ||
1528 | /* roll back jump/branch */ | ||
1529 | regs->cp0_epc = origpc; | ||
1530 | regs->regs[31] = orig31; | ||
1531 | /* Did we have an exception handler installed? */ | ||
1532 | if (fixup_exception(regs)) | ||
1533 | return; | ||
1534 | |||
1535 | die_if_kernel("Unhandled kernel unaligned access", regs); | ||
1536 | force_sig(SIGSEGV, current); | ||
1537 | |||
1538 | return; | ||
1539 | |||
1540 | sigbus: | ||
1541 | die_if_kernel("Unhandled kernel unaligned access", regs); | ||
1542 | force_sig(SIGBUS, current); | ||
1543 | |||
1544 | return; | ||
1545 | |||
1546 | sigill: | ||
1547 | die_if_kernel | ||
1548 | ("Unhandled kernel unaligned access or invalid instruction", regs); | ||
1549 | force_sig(SIGILL, current); | ||
1550 | } | ||
511 | asmlinkage void do_ade(struct pt_regs *regs) | 1551 | asmlinkage void do_ade(struct pt_regs *regs) |
512 | { | 1552 | { |
513 | unsigned int __user *pc; | 1553 | unsigned int __user *pc; |
@@ -517,23 +1557,62 @@ asmlinkage void do_ade(struct pt_regs *regs) | |||
517 | 1, regs, regs->cp0_badvaddr); | 1557 | 1, regs, regs->cp0_badvaddr); |
518 | /* | 1558 | /* |
519 | * Did we catch a fault trying to load an instruction? | 1559 | * Did we catch a fault trying to load an instruction? |
520 | * Or are we running in MIPS16 mode? | ||
521 | */ | 1560 | */ |
522 | if ((regs->cp0_badvaddr == regs->cp0_epc) || (regs->cp0_epc & 0x1)) | 1561 | if (regs->cp0_badvaddr == regs->cp0_epc) |
523 | goto sigbus; | 1562 | goto sigbus; |
524 | 1563 | ||
525 | pc = (unsigned int __user *) exception_epc(regs); | ||
526 | if (user_mode(regs) && !test_thread_flag(TIF_FIXADE)) | 1564 | if (user_mode(regs) && !test_thread_flag(TIF_FIXADE)) |
527 | goto sigbus; | 1565 | goto sigbus; |
528 | if (unaligned_action == UNALIGNED_ACTION_SIGNAL) | 1566 | if (unaligned_action == UNALIGNED_ACTION_SIGNAL) |
529 | goto sigbus; | 1567 | goto sigbus; |
530 | else if (unaligned_action == UNALIGNED_ACTION_SHOW) | ||
531 | show_registers(regs); | ||
532 | 1568 | ||
533 | /* | 1569 | /* |
534 | * Do branch emulation only if we didn't forward the exception. | 1570 | * Do branch emulation only if we didn't forward the exception. |
535 | * This is all so but ugly ... | 1571 | * This is all so but ugly ... |
536 | */ | 1572 | */ |
1573 | |||
1574 | /* | ||
1575 | * Are we running in microMIPS mode? | ||
1576 | */ | ||
1577 | if (get_isa16_mode(regs->cp0_epc)) { | ||
1578 | /* | ||
1579 | * Did we catch a fault trying to load an instruction in | ||
1580 | * 16-bit mode? | ||
1581 | */ | ||
1582 | if (regs->cp0_badvaddr == msk_isa16_mode(regs->cp0_epc)) | ||
1583 | goto sigbus; | ||
1584 | if (unaligned_action == UNALIGNED_ACTION_SHOW) | ||
1585 | show_registers(regs); | ||
1586 | |||
1587 | if (cpu_has_mmips) { | ||
1588 | seg = get_fs(); | ||
1589 | if (!user_mode(regs)) | ||
1590 | set_fs(KERNEL_DS); | ||
1591 | emulate_load_store_microMIPS(regs, | ||
1592 | (void __user *)regs->cp0_badvaddr); | ||
1593 | set_fs(seg); | ||
1594 | |||
1595 | return; | ||
1596 | } | ||
1597 | |||
1598 | if (cpu_has_mips16) { | ||
1599 | seg = get_fs(); | ||
1600 | if (!user_mode(regs)) | ||
1601 | set_fs(KERNEL_DS); | ||
1602 | emulate_load_store_MIPS16e(regs, | ||
1603 | (void __user *)regs->cp0_badvaddr); | ||
1604 | set_fs(seg); | ||
1605 | |||
1606 | return; | ||
1607 | } | ||
1608 | |||
1609 | goto sigbus; | ||
1610 | } | ||
1611 | |||
1612 | if (unaligned_action == UNALIGNED_ACTION_SHOW) | ||
1613 | show_registers(regs); | ||
1614 | pc = (unsigned int __user *)exception_epc(regs); | ||
1615 | |||
537 | seg = get_fs(); | 1616 | seg = get_fs(); |
538 | if (!user_mode(regs)) | 1617 | if (!user_mode(regs)) |
539 | set_fs(KERNEL_DS); | 1618 | set_fs(KERNEL_DS); |