aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/8250-platform.c47
-rw-r--r--arch/mips/kernel/Makefile17
-rw-r--r--arch/mips/kernel/cpu-probe.c30
-rw-r--r--arch/mips/kernel/head.S10
-rw-r--r--arch/mips/kernel/irq-mv6434x.c111
-rw-r--r--arch/mips/kernel/mips-mt-fpaff.c176
-rw-r--r--arch/mips/kernel/mips-mt.c205
-rw-r--r--arch/mips/kernel/proc.c2
-rw-r--r--arch/mips/kernel/process.c4
-rw-r--r--arch/mips/kernel/setup.c16
-rw-r--r--arch/mips/kernel/smp.c2
-rw-r--r--arch/mips/kernel/smtc.c2
-rw-r--r--arch/mips/kernel/syscall.c5
-rw-r--r--arch/mips/kernel/traps.c77
-rw-r--r--arch/mips/kernel/unaligned.c41
15 files changed, 393 insertions, 352 deletions
diff --git a/arch/mips/kernel/8250-platform.c b/arch/mips/kernel/8250-platform.c
new file mode 100644
index 00000000000..cbf3fe20ad1
--- /dev/null
+++ b/arch/mips/kernel/8250-platform.c
@@ -0,0 +1,47 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2007 Ralf Baechle (ralf@linux-mips.org)
7 */
8#include <linux/module.h>
9#include <linux/init.h>
10#include <linux/serial_8250.h>
11
12#define PORT(base, int) \
13{ \
14 .iobase = base, \
15 .irq = int, \
16 .uartclk = 1843200, \
17 .iotype = UPIO_PORT, \
18 .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, \
19 .regshift = 0, \
20}
21
22static struct plat_serial8250_port uart8250_data[] = {
23 PORT(0x3F8, 4),
24 PORT(0x2F8, 3),
25 PORT(0x3E8, 4),
26 PORT(0x2E8, 3),
27 { },
28};
29
30static struct platform_device uart8250_device = {
31 .name = "serial8250",
32 .id = PLAT8250_DEV_PLATFORM,
33 .dev = {
34 .platform_data = uart8250_data,
35 },
36};
37
38static int __init uart8250_init(void)
39{
40 return platform_device_register(&uart8250_device);
41}
42
43module_init(uart8250_init);
44
45MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
46MODULE_LICENSE("GPL");
47MODULE_DESCRIPTION("Generic 8250 UART probe driver");
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 49246264cc7..961594cb521 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -14,14 +14,15 @@ binfmt_irix-objs := irixelf.o irixinv.o irixioctl.o irixsig.o \
14obj-$(CONFIG_STACKTRACE) += stacktrace.o 14obj-$(CONFIG_STACKTRACE) += stacktrace.o
15obj-$(CONFIG_MODULES) += mips_ksyms.o module.o 15obj-$(CONFIG_MODULES) += mips_ksyms.o module.o
16 16
17obj-$(CONFIG_CPU_LOONGSON2) += r4k_fpu.o r4k_switch.o
18obj-$(CONFIG_CPU_MIPS32) += r4k_fpu.o r4k_switch.o
19obj-$(CONFIG_CPU_MIPS64) += r4k_fpu.o r4k_switch.o
17obj-$(CONFIG_CPU_R3000) += r2300_fpu.o r2300_switch.o 20obj-$(CONFIG_CPU_R3000) += r2300_fpu.o r2300_switch.o
18obj-$(CONFIG_CPU_TX39XX) += r2300_fpu.o r2300_switch.o
19obj-$(CONFIG_CPU_TX49XX) += r4k_fpu.o r4k_switch.o
20obj-$(CONFIG_CPU_R4000) += r4k_fpu.o r4k_switch.o 21obj-$(CONFIG_CPU_R4000) += r4k_fpu.o r4k_switch.o
21obj-$(CONFIG_CPU_VR41XX) += r4k_fpu.o r4k_switch.o
22obj-$(CONFIG_CPU_R4300) += r4k_fpu.o r4k_switch.o 22obj-$(CONFIG_CPU_R4300) += r4k_fpu.o r4k_switch.o
23obj-$(CONFIG_CPU_R4X00) += r4k_fpu.o r4k_switch.o 23obj-$(CONFIG_CPU_R4X00) += r4k_fpu.o r4k_switch.o
24obj-$(CONFIG_CPU_R5000) += r4k_fpu.o r4k_switch.o 24obj-$(CONFIG_CPU_R5000) += r4k_fpu.o r4k_switch.o
25obj-$(CONFIG_CPU_R6000) += r6000_fpu.o r4k_switch.o
25obj-$(CONFIG_CPU_R5432) += r4k_fpu.o r4k_switch.o 26obj-$(CONFIG_CPU_R5432) += r4k_fpu.o r4k_switch.o
26obj-$(CONFIG_CPU_R8000) += r4k_fpu.o r4k_switch.o 27obj-$(CONFIG_CPU_R8000) += r4k_fpu.o r4k_switch.o
27obj-$(CONFIG_CPU_RM7000) += r4k_fpu.o r4k_switch.o 28obj-$(CONFIG_CPU_RM7000) += r4k_fpu.o r4k_switch.o
@@ -29,13 +30,14 @@ obj-$(CONFIG_CPU_RM9000) += r4k_fpu.o r4k_switch.o
29obj-$(CONFIG_CPU_NEVADA) += r4k_fpu.o r4k_switch.o 30obj-$(CONFIG_CPU_NEVADA) += r4k_fpu.o r4k_switch.o
30obj-$(CONFIG_CPU_R10000) += r4k_fpu.o r4k_switch.o 31obj-$(CONFIG_CPU_R10000) += r4k_fpu.o r4k_switch.o
31obj-$(CONFIG_CPU_SB1) += r4k_fpu.o r4k_switch.o 32obj-$(CONFIG_CPU_SB1) += r4k_fpu.o r4k_switch.o
32obj-$(CONFIG_CPU_MIPS32) += r4k_fpu.o r4k_switch.o 33obj-$(CONFIG_CPU_TX39XX) += r2300_fpu.o r2300_switch.o
33obj-$(CONFIG_CPU_MIPS64) += r4k_fpu.o r4k_switch.o 34obj-$(CONFIG_CPU_TX49XX) += r4k_fpu.o r4k_switch.o
34obj-$(CONFIG_CPU_R6000) += r6000_fpu.o r4k_switch.o 35obj-$(CONFIG_CPU_VR41XX) += r4k_fpu.o r4k_switch.o
35 36
36obj-$(CONFIG_SMP) += smp.o 37obj-$(CONFIG_SMP) += smp.o
37 38
38obj-$(CONFIG_MIPS_MT) += mips-mt.o 39obj-$(CONFIG_MIPS_MT) += mips-mt.o
40obj-$(CONFIG_MIPS_MT_FPAFF) += mips-mt-fpaff.o
39obj-$(CONFIG_MIPS_MT_SMTC) += smtc.o smtc-asm.o smtc-proc.o 41obj-$(CONFIG_MIPS_MT_SMTC) += smtc.o smtc-asm.o smtc-proc.o
40obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o 42obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o
41 43
@@ -47,7 +49,6 @@ obj-$(CONFIG_I8259) += i8259.o
47obj-$(CONFIG_IRQ_CPU) += irq_cpu.o 49obj-$(CONFIG_IRQ_CPU) += irq_cpu.o
48obj-$(CONFIG_IRQ_CPU_RM7K) += irq-rm7000.o 50obj-$(CONFIG_IRQ_CPU_RM7K) += irq-rm7000.o
49obj-$(CONFIG_IRQ_CPU_RM9K) += irq-rm9000.o 51obj-$(CONFIG_IRQ_CPU_RM9K) += irq-rm9000.o
50obj-$(CONFIG_IRQ_MV64340) += irq-mv6434x.o
51obj-$(CONFIG_MIPS_BOARDS_GEN) += irq-msc01.o 52obj-$(CONFIG_MIPS_BOARDS_GEN) += irq-msc01.o
52 53
53obj-$(CONFIG_32BIT) += scall32-o32.o 54obj-$(CONFIG_32BIT) += scall32-o32.o
@@ -68,3 +69,5 @@ obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
68obj-$(CONFIG_EARLY_PRINTK) += early_printk.o 69obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
69 70
70CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(CFLAGS) -Wa,-mdaddi -c -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi) 71CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(CFLAGS) -Wa,-mdaddi -c -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
72
73obj-$(CONFIG_HAVE_STD_PC_SERIAL_PORT) += 8250-platform.o
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index b12eeee0e97..c6b8b074a81 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -186,9 +186,29 @@ static inline void check_wait(void)
186 } 186 }
187} 187}
188 188
189static inline void check_errata(void)
190{
191 struct cpuinfo_mips *c = &current_cpu_data;
192
193 switch (c->cputype) {
194 case CPU_34K:
195 /*
196 * Erratum "RPS May Cause Incorrect Instruction Execution"
197 * This code only handles VPE0, any SMP/SMTC/RTOS code
198 * making use of VPE1 will be responsable for that VPE.
199 */
200 if ((c->processor_id & PRID_REV_MASK) <= PRID_REV_34K_V1_0_2)
201 write_c0_config7(read_c0_config7() | MIPS_CONF7_RPS);
202 break;
203 default:
204 break;
205 }
206}
207
189void __init check_bugs32(void) 208void __init check_bugs32(void)
190{ 209{
191 check_wait(); 210 check_wait();
211 check_errata();
192} 212}
193 213
194/* 214/*
@@ -485,6 +505,14 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c)
485 MIPS_CPU_LLSC; 505 MIPS_CPU_LLSC;
486 c->tlbsize = 64; 506 c->tlbsize = 64;
487 break; 507 break;
508 case PRID_IMP_LOONGSON2:
509 c->cputype = CPU_LOONGSON2;
510 c->isa_level = MIPS_CPU_ISA_III;
511 c->options = R4K_OPTS |
512 MIPS_CPU_FPU | MIPS_CPU_LLSC |
513 MIPS_CPU_32FPR;
514 c->tlbsize = 64;
515 break;
488 } 516 }
489} 517}
490 518
@@ -588,6 +616,8 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c)
588 c->options |= MIPS_CPU_VEIC; 616 c->options |= MIPS_CPU_VEIC;
589 if (config3 & MIPS_CONF3_MT) 617 if (config3 & MIPS_CONF3_MT)
590 c->ases |= MIPS_ASE_MIPSMT; 618 c->ases |= MIPS_ASE_MIPSMT;
619 if (config3 & MIPS_CONF3_ULRI)
620 c->options |= MIPS_CPU_ULRI;
591 621
592 return config3 & MIPS_CONF_M; 622 return config3 & MIPS_CONF_M;
593} 623}
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
index 6f57ca44291..f78538eceef 100644
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -16,6 +16,7 @@
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/threads.h> 17#include <linux/threads.h>
18 18
19#include <asm/addrspace.h>
19#include <asm/asm.h> 20#include <asm/asm.h>
20#include <asm/asmmacro.h> 21#include <asm/asmmacro.h>
21#include <asm/irqflags.h> 22#include <asm/irqflags.h>
@@ -129,24 +130,25 @@
129#endif 130#endif
130 .endm 131 .endm
131 132
133#ifndef CONFIG_NO_EXCEPT_FILL
132 /* 134 /*
133 * Reserved space for exception handlers. 135 * Reserved space for exception handlers.
134 * Necessary for machines which link their kernels at KSEG0. 136 * Necessary for machines which link their kernels at KSEG0.
135 */ 137 */
136 .fill 0x400 138 .fill 0x400
139#endif
137 140
138EXPORT(stext) # used for profiling 141EXPORT(stext) # used for profiling
139EXPORT(_stext) 142EXPORT(_stext)
140 143
141#ifdef CONFIG_MIPS_SIM 144#ifdef CONFIG_BOOT_RAW
142 /* 145 /*
143 * Give us a fighting chance of running if execution beings at the 146 * Give us a fighting chance of running if execution beings at the
144 * kernel load address. This is needed because this platform does 147 * kernel load address. This is needed because this platform does
145 * not have a ELF loader yet. 148 * not have a ELF loader yet.
146 */ 149 */
147 j kernel_entry
148#endif
149 __INIT 150 __INIT
151#endif
150 152
151NESTED(kernel_entry, 16, sp) # kernel entry point 153NESTED(kernel_entry, 16, sp) # kernel entry point
152 154
@@ -197,9 +199,7 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
197 j start_kernel 199 j start_kernel
198 END(kernel_entry) 200 END(kernel_entry)
199 201
200#ifdef CONFIG_QEMU
201 __INIT 202 __INIT
202#endif
203 203
204#ifdef CONFIG_SMP 204#ifdef CONFIG_SMP
205/* 205/*
diff --git a/arch/mips/kernel/irq-mv6434x.c b/arch/mips/kernel/irq-mv6434x.c
deleted file mode 100644
index 3dd561832e4..00000000000
--- a/arch/mips/kernel/irq-mv6434x.c
+++ /dev/null
@@ -1,111 +0,0 @@
1/*
2 * Copyright 2002 Momentum Computer
3 * Author: mdharm@momenco.com
4 * Copyright (C) 2004, 06 Ralf Baechle <ralf@linux-mips.org>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11#include <linux/module.h>
12#include <linux/interrupt.h>
13#include <linux/kernel.h>
14#include <linux/kernel_stat.h>
15#include <linux/mv643xx.h>
16#include <linux/sched.h>
17
18#include <asm/io.h>
19#include <asm/irq.h>
20#include <asm/marvell.h>
21
22static unsigned int irq_base;
23
24static inline int ls1bit32(unsigned int x)
25{
26 int b = 31, s;
27
28 s = 16; if (x << 16 == 0) s = 0; b -= s; x <<= s;
29 s = 8; if (x << 8 == 0) s = 0; b -= s; x <<= s;
30 s = 4; if (x << 4 == 0) s = 0; b -= s; x <<= s;
31 s = 2; if (x << 2 == 0) s = 0; b -= s; x <<= s;
32 s = 1; if (x << 1 == 0) s = 0; b -= s;
33
34 return b;
35}
36
37/* mask off an interrupt -- 1 is enable, 0 is disable */
38static inline void mask_mv64340_irq(unsigned int irq)
39{
40 uint32_t value;
41
42 if (irq < (irq_base + 32)) {
43 value = MV_READ(MV64340_INTERRUPT0_MASK_0_LOW);
44 value &= ~(1 << (irq - irq_base));
45 MV_WRITE(MV64340_INTERRUPT0_MASK_0_LOW, value);
46 } else {
47 value = MV_READ(MV64340_INTERRUPT0_MASK_0_HIGH);
48 value &= ~(1 << (irq - irq_base - 32));
49 MV_WRITE(MV64340_INTERRUPT0_MASK_0_HIGH, value);
50 }
51}
52
53/* unmask an interrupt -- 1 is enable, 0 is disable */
54static inline void unmask_mv64340_irq(unsigned int irq)
55{
56 uint32_t value;
57
58 if (irq < (irq_base + 32)) {
59 value = MV_READ(MV64340_INTERRUPT0_MASK_0_LOW);
60 value |= 1 << (irq - irq_base);
61 MV_WRITE(MV64340_INTERRUPT0_MASK_0_LOW, value);
62 } else {
63 value = MV_READ(MV64340_INTERRUPT0_MASK_0_HIGH);
64 value |= 1 << (irq - irq_base - 32);
65 MV_WRITE(MV64340_INTERRUPT0_MASK_0_HIGH, value);
66 }
67}
68
69/*
70 * Interrupt handler for interrupts coming from the Marvell chip.
71 * It could be built in ethernet ports etc...
72 */
73void ll_mv64340_irq(void)
74{
75 unsigned int irq_src_low, irq_src_high;
76 unsigned int irq_mask_low, irq_mask_high;
77
78 /* read the interrupt status registers */
79 irq_mask_low = MV_READ(MV64340_INTERRUPT0_MASK_0_LOW);
80 irq_mask_high = MV_READ(MV64340_INTERRUPT0_MASK_0_HIGH);
81 irq_src_low = MV_READ(MV64340_MAIN_INTERRUPT_CAUSE_LOW);
82 irq_src_high = MV_READ(MV64340_MAIN_INTERRUPT_CAUSE_HIGH);
83
84 /* mask for just the interrupts we want */
85 irq_src_low &= irq_mask_low;
86 irq_src_high &= irq_mask_high;
87
88 if (irq_src_low)
89 do_IRQ(ls1bit32(irq_src_low) + irq_base);
90 else
91 do_IRQ(ls1bit32(irq_src_high) + irq_base + 32);
92}
93
94struct irq_chip mv64340_irq_type = {
95 .name = "MV-64340",
96 .ack = mask_mv64340_irq,
97 .mask = mask_mv64340_irq,
98 .mask_ack = mask_mv64340_irq,
99 .unmask = unmask_mv64340_irq,
100};
101
102void __init mv64340_irq_init(unsigned int base)
103{
104 int i;
105
106 for (i = base; i < base + 64; i++)
107 set_irq_chip_and_handler(i, &mv64340_irq_type,
108 handle_level_irq);
109
110 irq_base = base;
111}
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
new file mode 100644
index 00000000000..ede5d73d652
--- /dev/null
+++ b/arch/mips/kernel/mips-mt-fpaff.c
@@ -0,0 +1,176 @@
1/*
2 * General MIPS MT support routines, usable in AP/SP, SMVP, or SMTC kernels
3 * Copyright (C) 2005 Mips Technologies, Inc
4 */
5#include <linux/cpu.h>
6#include <linux/cpumask.h>
7#include <linux/delay.h>
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/sched.h>
11#include <linux/security.h>
12#include <linux/types.h>
13#include <asm/uaccess.h>
14
15/*
16 * CPU mask used to set process affinity for MT VPEs/TCs with FPUs
17 */
18cpumask_t mt_fpu_cpumask;
19
20static int fpaff_threshold = -1;
21unsigned long mt_fpemul_threshold = 0;
22
23/*
24 * Replacement functions for the sys_sched_setaffinity() and
25 * sys_sched_getaffinity() system calls, so that we can integrate
26 * FPU affinity with the user's requested processor affinity.
27 * This code is 98% identical with the sys_sched_setaffinity()
28 * and sys_sched_getaffinity() system calls, and should be
29 * updated when kernel/sched.c changes.
30 */
31
32/*
33 * find_process_by_pid - find a process with a matching PID value.
34 * used in sys_sched_set/getaffinity() in kernel/sched.c, so
35 * cloned here.
36 */
37static inline struct task_struct *find_process_by_pid(pid_t pid)
38{
39 return pid ? find_task_by_pid(pid) : current;
40}
41
42
43/*
44 * mipsmt_sys_sched_setaffinity - set the cpu affinity of a process
45 */
46asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
47 unsigned long __user *user_mask_ptr)
48{
49 cpumask_t new_mask;
50 cpumask_t effective_mask;
51 int retval;
52 struct task_struct *p;
53
54 if (len < sizeof(new_mask))
55 return -EINVAL;
56
57 if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask)))
58 return -EFAULT;
59
60 lock_cpu_hotplug();
61 read_lock(&tasklist_lock);
62
63 p = find_process_by_pid(pid);
64 if (!p) {
65 read_unlock(&tasklist_lock);
66 unlock_cpu_hotplug();
67 return -ESRCH;
68 }
69
70 /*
71 * It is not safe to call set_cpus_allowed with the
72 * tasklist_lock held. We will bump the task_struct's
73 * usage count and drop tasklist_lock before invoking
74 * set_cpus_allowed.
75 */
76 get_task_struct(p);
77
78 retval = -EPERM;
79 if ((current->euid != p->euid) && (current->euid != p->uid) &&
80 !capable(CAP_SYS_NICE)) {
81 read_unlock(&tasklist_lock);
82 goto out_unlock;
83 }
84
85 retval = security_task_setscheduler(p, 0, NULL);
86 if (retval)
87 goto out_unlock;
88
89 /* Record new user-specified CPU set for future reference */
90 p->thread.user_cpus_allowed = new_mask;
91
92 /* Unlock the task list */
93 read_unlock(&tasklist_lock);
94
95 /* Compute new global allowed CPU set if necessary */
96 if ((p->thread.mflags & MF_FPUBOUND)
97 && cpus_intersects(new_mask, mt_fpu_cpumask)) {
98 cpus_and(effective_mask, new_mask, mt_fpu_cpumask);
99 retval = set_cpus_allowed(p, effective_mask);
100 } else {
101 p->thread.mflags &= ~MF_FPUBOUND;
102 retval = set_cpus_allowed(p, new_mask);
103 }
104
105
106out_unlock:
107 put_task_struct(p);
108 unlock_cpu_hotplug();
109 return retval;
110}
111
112/*
113 * mipsmt_sys_sched_getaffinity - get the cpu affinity of a process
114 */
115asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
116 unsigned long __user *user_mask_ptr)
117{
118 unsigned int real_len;
119 cpumask_t mask;
120 int retval;
121 struct task_struct *p;
122
123 real_len = sizeof(mask);
124 if (len < real_len)
125 return -EINVAL;
126
127 lock_cpu_hotplug();
128 read_lock(&tasklist_lock);
129
130 retval = -ESRCH;
131 p = find_process_by_pid(pid);
132 if (!p)
133 goto out_unlock;
134 retval = security_task_getscheduler(p);
135 if (retval)
136 goto out_unlock;
137
138 cpus_and(mask, p->thread.user_cpus_allowed, cpu_possible_map);
139
140out_unlock:
141 read_unlock(&tasklist_lock);
142 unlock_cpu_hotplug();
143 if (retval)
144 return retval;
145 if (copy_to_user(user_mask_ptr, &mask, real_len))
146 return -EFAULT;
147 return real_len;
148}
149
150
151static int __init fpaff_thresh(char *str)
152{
153 get_option(&str, &fpaff_threshold);
154 return 1;
155}
156__setup("fpaff=", fpaff_thresh);
157
158/*
159 * FPU Use Factor empirically derived from experiments on 34K
160 */
161#define FPUSEFACTOR 333
162
163static __init int mt_fp_affinity_init(void)
164{
165 if (fpaff_threshold >= 0) {
166 mt_fpemul_threshold = fpaff_threshold;
167 } else {
168 mt_fpemul_threshold =
169 (FPUSEFACTOR * (loops_per_jiffy/(500000/HZ))) / HZ;
170 }
171 printk(KERN_DEBUG "FPU Affinity set after %ld emulations\n",
172 mt_fpemul_threshold);
173
174 return 0;
175}
176arch_initcall(mt_fp_affinity_init);
diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c
index ba01800b601..1a7d8923129 100644
--- a/arch/mips/kernel/mips-mt.c
+++ b/arch/mips/kernel/mips-mt.c
@@ -6,7 +6,6 @@
6#include <linux/device.h> 6#include <linux/device.h>
7#include <linux/kernel.h> 7#include <linux/kernel.h>
8#include <linux/sched.h> 8#include <linux/sched.h>
9#include <linux/cpumask.h>
10#include <linux/module.h> 9#include <linux/module.h>
11#include <linux/interrupt.h> 10#include <linux/interrupt.h>
12#include <linux/security.h> 11#include <linux/security.h>
@@ -23,149 +22,6 @@
23#include <asm/cacheflush.h> 22#include <asm/cacheflush.h>
24 23
25/* 24/*
26 * CPU mask used to set process affinity for MT VPEs/TCs with FPUs
27 */
28
29cpumask_t mt_fpu_cpumask;
30
31#ifdef CONFIG_MIPS_MT_FPAFF
32
33#include <linux/cpu.h>
34#include <linux/delay.h>
35#include <asm/uaccess.h>
36
37unsigned long mt_fpemul_threshold = 0;
38
39/*
40 * Replacement functions for the sys_sched_setaffinity() and
41 * sys_sched_getaffinity() system calls, so that we can integrate
42 * FPU affinity with the user's requested processor affinity.
43 * This code is 98% identical with the sys_sched_setaffinity()
44 * and sys_sched_getaffinity() system calls, and should be
45 * updated when kernel/sched.c changes.
46 */
47
48/*
49 * find_process_by_pid - find a process with a matching PID value.
50 * used in sys_sched_set/getaffinity() in kernel/sched.c, so
51 * cloned here.
52 */
53static inline struct task_struct *find_process_by_pid(pid_t pid)
54{
55 return pid ? find_task_by_pid(pid) : current;
56}
57
58
59/*
60 * mipsmt_sys_sched_setaffinity - set the cpu affinity of a process
61 */
62asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
63 unsigned long __user *user_mask_ptr)
64{
65 cpumask_t new_mask;
66 cpumask_t effective_mask;
67 int retval;
68 struct task_struct *p;
69
70 if (len < sizeof(new_mask))
71 return -EINVAL;
72
73 if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask)))
74 return -EFAULT;
75
76 lock_cpu_hotplug();
77 read_lock(&tasklist_lock);
78
79 p = find_process_by_pid(pid);
80 if (!p) {
81 read_unlock(&tasklist_lock);
82 unlock_cpu_hotplug();
83 return -ESRCH;
84 }
85
86 /*
87 * It is not safe to call set_cpus_allowed with the
88 * tasklist_lock held. We will bump the task_struct's
89 * usage count and drop tasklist_lock before invoking
90 * set_cpus_allowed.
91 */
92 get_task_struct(p);
93
94 retval = -EPERM;
95 if ((current->euid != p->euid) && (current->euid != p->uid) &&
96 !capable(CAP_SYS_NICE)) {
97 read_unlock(&tasklist_lock);
98 goto out_unlock;
99 }
100
101 retval = security_task_setscheduler(p, 0, NULL);
102 if (retval)
103 goto out_unlock;
104
105 /* Record new user-specified CPU set for future reference */
106 p->thread.user_cpus_allowed = new_mask;
107
108 /* Unlock the task list */
109 read_unlock(&tasklist_lock);
110
111 /* Compute new global allowed CPU set if necessary */
112 if( (p->thread.mflags & MF_FPUBOUND)
113 && cpus_intersects(new_mask, mt_fpu_cpumask)) {
114 cpus_and(effective_mask, new_mask, mt_fpu_cpumask);
115 retval = set_cpus_allowed(p, effective_mask);
116 } else {
117 p->thread.mflags &= ~MF_FPUBOUND;
118 retval = set_cpus_allowed(p, new_mask);
119 }
120
121
122out_unlock:
123 put_task_struct(p);
124 unlock_cpu_hotplug();
125 return retval;
126}
127
128/*
129 * mipsmt_sys_sched_getaffinity - get the cpu affinity of a process
130 */
131asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
132 unsigned long __user *user_mask_ptr)
133{
134 unsigned int real_len;
135 cpumask_t mask;
136 int retval;
137 struct task_struct *p;
138
139 real_len = sizeof(mask);
140 if (len < real_len)
141 return -EINVAL;
142
143 lock_cpu_hotplug();
144 read_lock(&tasklist_lock);
145
146 retval = -ESRCH;
147 p = find_process_by_pid(pid);
148 if (!p)
149 goto out_unlock;
150 retval = security_task_getscheduler(p);
151 if (retval)
152 goto out_unlock;
153
154 cpus_and(mask, p->thread.user_cpus_allowed, cpu_possible_map);
155
156out_unlock:
157 read_unlock(&tasklist_lock);
158 unlock_cpu_hotplug();
159 if (retval)
160 return retval;
161 if (copy_to_user(user_mask_ptr, &mask, real_len))
162 return -EFAULT;
163 return real_len;
164}
165
166#endif /* CONFIG_MIPS_MT_FPAFF */
167
168/*
169 * Dump new MIPS MT state for the core. Does not leave TCs halted. 25 * Dump new MIPS MT state for the core. Does not leave TCs halted.
170 * Takes an argument which taken to be a pre-call MVPControl value. 26 * Takes an argument which taken to be a pre-call MVPControl value.
171 */ 27 */
@@ -195,27 +51,31 @@ void mips_mt_regdump(unsigned long mvpctl)
195 nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; 51 nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
196 ntc = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; 52 ntc = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
197 printk("-- per-VPE State --\n"); 53 printk("-- per-VPE State --\n");
198 for(i = 0; i < nvpe; i++) { 54 for (i = 0; i < nvpe; i++) {
199 for(tc = 0; tc < ntc; tc++) { 55 for (tc = 0; tc < ntc; tc++) {
200 settc(tc); 56 settc(tc);
201 if((read_tc_c0_tcbind() & TCBIND_CURVPE) == i) { 57 if ((read_tc_c0_tcbind() & TCBIND_CURVPE) == i) {
202 printk(" VPE %d\n", i); 58 printk(" VPE %d\n", i);
203 printk(" VPEControl : %08lx\n", read_vpe_c0_vpecontrol()); 59 printk(" VPEControl : %08lx\n",
204 printk(" VPEConf0 : %08lx\n", read_vpe_c0_vpeconf0()); 60 read_vpe_c0_vpecontrol());
205 printk(" VPE%d.Status : %08lx\n", 61 printk(" VPEConf0 : %08lx\n",
206 i, read_vpe_c0_status()); 62 read_vpe_c0_vpeconf0());
207 printk(" VPE%d.EPC : %08lx\n", i, read_vpe_c0_epc()); 63 printk(" VPE%d.Status : %08lx\n",
208 printk(" VPE%d.Cause : %08lx\n", i, read_vpe_c0_cause()); 64 i, read_vpe_c0_status());
209 printk(" VPE%d.Config7 : %08lx\n", 65 printk(" VPE%d.EPC : %08lx\n",
210 i, read_vpe_c0_config7()); 66 i, read_vpe_c0_epc());
211 break; /* Next VPE */ 67 printk(" VPE%d.Cause : %08lx\n",
68 i, read_vpe_c0_cause());
69 printk(" VPE%d.Config7 : %08lx\n",
70 i, read_vpe_c0_config7());
71 break; /* Next VPE */
72 }
212 } 73 }
213 }
214 } 74 }
215 printk("-- per-TC State --\n"); 75 printk("-- per-TC State --\n");
216 for(tc = 0; tc < ntc; tc++) { 76 for (tc = 0; tc < ntc; tc++) {
217 settc(tc); 77 settc(tc);
218 if(read_tc_c0_tcbind() == read_c0_tcbind()) { 78 if (read_tc_c0_tcbind() == read_c0_tcbind()) {
219 /* Are we dumping ourself? */ 79 /* Are we dumping ourself? */
220 haltval = 0; /* Then we're not halted, and mustn't be */ 80 haltval = 0; /* Then we're not halted, and mustn't be */
221 tcstatval = flags; /* And pre-dump TCStatus is flags */ 81 tcstatval = flags; /* And pre-dump TCStatus is flags */
@@ -310,17 +170,6 @@ static int __init ndflush(char *s)
310 return 1; 170 return 1;
311} 171}
312__setup("ndflush=", ndflush); 172__setup("ndflush=", ndflush);
313#ifdef CONFIG_MIPS_MT_FPAFF
314static int fpaff_threshold = -1;
315
316static int __init fpaff_thresh(char *str)
317{
318 get_option(&str, &fpaff_threshold);
319 return 1;
320}
321
322__setup("fpaff=", fpaff_thresh);
323#endif /* CONFIG_MIPS_MT_FPAFF */
324 173
325static unsigned int itc_base = 0; 174static unsigned int itc_base = 0;
326 175
@@ -376,20 +225,6 @@ void mips_mt_set_cpuoptions(void)
376 if (mt_n_dflushes != 1) 225 if (mt_n_dflushes != 1)
377 printk("D-Cache Flushes Repeated %d times\n", mt_n_dflushes); 226 printk("D-Cache Flushes Repeated %d times\n", mt_n_dflushes);
378 227
379#ifdef CONFIG_MIPS_MT_FPAFF
380 /* FPU Use Factor empirically derived from experiments on 34K */
381#define FPUSEFACTOR 333
382
383 if (fpaff_threshold >= 0) {
384 mt_fpemul_threshold = fpaff_threshold;
385 } else {
386 mt_fpemul_threshold =
387 (FPUSEFACTOR * (loops_per_jiffy/(500000/HZ))) / HZ;
388 }
389 printk("FPU Affinity set after %ld emulations\n",
390 mt_fpemul_threshold);
391#endif /* CONFIG_MIPS_MT_FPAFF */
392
393 if (itc_base != 0) { 228 if (itc_base != 0) {
394 /* 229 /*
395 * Configure ITC mapping. This code is very 230 * Configure ITC mapping. This code is very
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index 5ddc2e9deec..ec04f5a1a5e 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -14,7 +14,6 @@
14#include <asm/cpu-features.h> 14#include <asm/cpu-features.h>
15#include <asm/mipsregs.h> 15#include <asm/mipsregs.h>
16#include <asm/processor.h> 16#include <asm/processor.h>
17#include <asm/watch.h>
18 17
19unsigned int vced_count, vcei_count; 18unsigned int vced_count, vcei_count;
20 19
@@ -84,6 +83,7 @@ static const char *cpu_name[] = {
84 [CPU_VR4181A] = "NEC VR4181A", 83 [CPU_VR4181A] = "NEC VR4181A",
85 [CPU_SR71000] = "Sandcraft SR71000", 84 [CPU_SR71000] = "Sandcraft SR71000",
86 [CPU_PR4450] = "Philips PR4450", 85 [CPU_PR4450] = "Philips PR4450",
86 [CPU_LOONGSON2] = "ICT Loongson-2",
87}; 87};
88 88
89 89
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 6bdfb5a9fa1..8f4cf27c715 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -46,7 +46,7 @@
46 * power and have a low exit latency (ie sit in a loop waiting for somebody to 46 * power and have a low exit latency (ie sit in a loop waiting for somebody to
47 * say that they'd like to reschedule) 47 * say that they'd like to reschedule)
48 */ 48 */
49ATTRIB_NORET void cpu_idle(void) 49void __noreturn cpu_idle(void)
50{ 50{
51 /* endless idle loop with no priority at all */ 51 /* endless idle loop with no priority at all */
52 while (1) { 52 while (1) {
@@ -213,7 +213,7 @@ int dump_task_fpu (struct task_struct *t, elf_fpregset_t *fpr)
213/* 213/*
214 * Create a kernel thread 214 * Create a kernel thread
215 */ 215 */
216static ATTRIB_NORET void kernel_thread_helper(void *arg, int (*fn)(void *)) 216static void __noreturn kernel_thread_helper(void *arg, int (*fn)(void *))
217{ 217{
218 do_exit(fn(arg)); 218 do_exit(fn(arg));
219} 219}
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 4975da0bfb6..316685fca05 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -20,6 +20,7 @@
20#include <linux/highmem.h> 20#include <linux/highmem.h>
21#include <linux/console.h> 21#include <linux/console.h>
22#include <linux/pfn.h> 22#include <linux/pfn.h>
23#include <linux/debugfs.h>
23 24
24#include <asm/addrspace.h> 25#include <asm/addrspace.h>
25#include <asm/bootinfo.h> 26#include <asm/bootinfo.h>
@@ -574,3 +575,18 @@ __setup("nodsp", dsp_disable);
574 575
575unsigned long kernelsp[NR_CPUS]; 576unsigned long kernelsp[NR_CPUS];
576unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3; 577unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
578
579#ifdef CONFIG_DEBUG_FS
580struct dentry *mips_debugfs_dir;
581static int __init debugfs_mips(void)
582{
583 struct dentry *d;
584
585 d = debugfs_create_dir("mips", NULL);
586 if (IS_ERR(d))
587 return PTR_ERR(d);
588 mips_debugfs_dir = d;
589 return 0;
590}
591arch_initcall(debugfs_mips);
592#endif
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index a1b017f2dbb..be7362bc2c9 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -52,7 +52,7 @@ EXPORT_SYMBOL(phys_cpu_present_map);
52EXPORT_SYMBOL(cpu_online_map); 52EXPORT_SYMBOL(cpu_online_map);
53 53
54extern void __init calibrate_delay(void); 54extern void __init calibrate_delay(void);
55extern ATTRIB_NORET void cpu_idle(void); 55extern void cpu_idle(void);
56 56
57/* 57/*
58 * First C code run on the secondary CPUs after being started up by 58 * First C code run on the secondary CPUs after being started up by
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 046b03b1705..342d873b2ec 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -1104,7 +1104,7 @@ void smtc_idle_loop_hook(void)
1104 mtflags = dmt(); 1104 mtflags = dmt();
1105 pdb_msg = &id_ho_db_msg[0]; 1105 pdb_msg = &id_ho_db_msg[0];
1106 im = read_c0_status(); 1106 im = read_c0_status();
1107 vpe = cpu_data[smp_processor_id()].vpe_id; 1107 vpe = current_cpu_data.vpe_id;
1108 for (bit = 0; bit < 8; bit++) { 1108 for (bit = 0; bit < 8; bit++) {
1109 /* 1109 /*
1110 * In current prototype, I/O interrupts 1110 * In current prototype, I/O interrupts
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index 9dd5a2df8ea..b947c61c0cc 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -272,9 +272,8 @@ asmlinkage int sys_set_thread_area(unsigned long addr)
272 struct thread_info *ti = task_thread_info(current); 272 struct thread_info *ti = task_thread_info(current);
273 273
274 ti->tp_value = addr; 274 ti->tp_value = addr;
275 275 if (cpu_has_userlocal)
276 /* If some future MIPS implementation has this register in hardware, 276 write_c0_userlocal(addr);
277 * we will need to update it here (and in context switches). */
278 277
279 return 0; 278 return 0;
280} 279}
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 3ea7863c451..80ea4fa95bd 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -39,7 +39,6 @@
39#include <asm/traps.h> 39#include <asm/traps.h>
40#include <asm/uaccess.h> 40#include <asm/uaccess.h>
41#include <asm/mmu_context.h> 41#include <asm/mmu_context.h>
42#include <asm/watch.h>
43#include <asm/types.h> 42#include <asm/types.h>
44#include <asm/stacktrace.h> 43#include <asm/stacktrace.h>
45 44
@@ -70,6 +69,7 @@ extern asmlinkage void handle_reserved(void);
70extern int fpu_emulator_cop1Handler(struct pt_regs *xcp, 69extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
71 struct mips_fpu_struct *ctx, int has_fpu); 70 struct mips_fpu_struct *ctx, int has_fpu);
72 71
72void (*board_watchpoint_handler)(struct pt_regs *regs);
73void (*board_be_init)(void); 73void (*board_be_init)(void);
74int (*board_be_handler)(struct pt_regs *regs, int is_fixup); 74int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
75void (*board_nmi_handler_setup)(void); 75void (*board_nmi_handler_setup)(void);
@@ -311,7 +311,7 @@ void show_registers(struct pt_regs *regs)
311 311
312static DEFINE_SPINLOCK(die_lock); 312static DEFINE_SPINLOCK(die_lock);
313 313
314NORET_TYPE void ATTRIB_NORET die(const char * str, struct pt_regs * regs) 314void __noreturn die(const char * str, struct pt_regs * regs)
315{ 315{
316 static int die_counter; 316 static int die_counter;
317#ifdef CONFIG_MIPS_MT_SMTC 317#ifdef CONFIG_MIPS_MT_SMTC
@@ -753,6 +753,33 @@ asmlinkage void do_ri(struct pt_regs *regs)
753 force_sig(SIGILL, current); 753 force_sig(SIGILL, current);
754} 754}
755 755
756/*
757 * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've
758 * emulated more than some threshold number of instructions, force migration to
759 * a "CPU" that has FP support.
760 */
761static void mt_ase_fp_affinity(void)
762{
763#ifdef CONFIG_MIPS_MT_FPAFF
764 if (mt_fpemul_threshold > 0 &&
765 ((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
766 /*
767 * If there's no FPU present, or if the application has already
768 * restricted the allowed set to exclude any CPUs with FPUs,
769 * we'll skip the procedure.
770 */
771 if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) {
772 cpumask_t tmask;
773
774 cpus_and(tmask, current->thread.user_cpus_allowed,
775 mt_fpu_cpumask);
776 set_cpus_allowed(current, tmask);
777 current->thread.mflags |= MF_FPUBOUND;
778 }
779 }
780#endif /* CONFIG_MIPS_MT_FPAFF */
781}
782
756asmlinkage void do_cpu(struct pt_regs *regs) 783asmlinkage void do_cpu(struct pt_regs *regs)
757{ 784{
758 unsigned int cpid; 785 unsigned int cpid;
@@ -786,36 +813,8 @@ asmlinkage void do_cpu(struct pt_regs *regs)
786 &current->thread.fpu, 0); 813 &current->thread.fpu, 0);
787 if (sig) 814 if (sig)
788 force_sig(sig, current); 815 force_sig(sig, current);
789#ifdef CONFIG_MIPS_MT_FPAFF 816 else
790 else { 817 mt_ase_fp_affinity();
791 /*
792 * MIPS MT processors may have fewer FPU contexts
793 * than CPU threads. If we've emulated more than
794 * some threshold number of instructions, force
795 * migration to a "CPU" that has FP support.
796 */
797 if(mt_fpemul_threshold > 0
798 && ((current->thread.emulated_fp++
799 > mt_fpemul_threshold))) {
800 /*
801 * If there's no FPU present, or if the
802 * application has already restricted
803 * the allowed set to exclude any CPUs
804 * with FPUs, we'll skip the procedure.
805 */
806 if (cpus_intersects(current->cpus_allowed,
807 mt_fpu_cpumask)) {
808 cpumask_t tmask;
809
810 cpus_and(tmask,
811 current->thread.user_cpus_allowed,
812 mt_fpu_cpumask);
813 set_cpus_allowed(current, tmask);
814 current->thread.mflags |= MF_FPUBOUND;
815 }
816 }
817 }
818#endif /* CONFIG_MIPS_MT_FPAFF */
819 } 818 }
820 819
821 return; 820 return;
@@ -835,6 +834,11 @@ asmlinkage void do_mdmx(struct pt_regs *regs)
835 834
836asmlinkage void do_watch(struct pt_regs *regs) 835asmlinkage void do_watch(struct pt_regs *regs)
837{ 836{
837 if (board_watchpoint_handler) {
838 (*board_watchpoint_handler)(regs);
839 return;
840 }
841
838 /* 842 /*
839 * We use the watch exception where available to detect stack 843 * We use the watch exception where available to detect stack
840 * overflows. 844 * overflows.
@@ -1343,7 +1347,14 @@ void __init per_cpu_trap_init(void)
1343 set_c0_status(ST0_MX); 1347 set_c0_status(ST0_MX);
1344 1348
1345#ifdef CONFIG_CPU_MIPSR2 1349#ifdef CONFIG_CPU_MIPSR2
1346 write_c0_hwrena (0x0000000f); /* Allow rdhwr to all registers */ 1350 if (cpu_has_mips_r2) {
1351 unsigned int enable = 0x0000000f;
1352
1353 if (cpu_has_userlocal)
1354 enable |= (1 << 29);
1355
1356 write_c0_hwrena(enable);
1357 }
1347#endif 1358#endif
1348 1359
1349#ifdef CONFIG_MIPS_MT_SMTC 1360#ifdef CONFIG_MIPS_MT_SMTC
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index 18c4a3c45a3..8b9c34ffae1 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -77,6 +77,7 @@
77#include <linux/signal.h> 77#include <linux/signal.h>
78#include <linux/smp.h> 78#include <linux/smp.h>
79#include <linux/sched.h> 79#include <linux/sched.h>
80#include <linux/debugfs.h>
80#include <asm/asm.h> 81#include <asm/asm.h>
81#include <asm/branch.h> 82#include <asm/branch.h>
82#include <asm/byteorder.h> 83#include <asm/byteorder.h>
@@ -87,9 +88,18 @@
87#define STR(x) __STR(x) 88#define STR(x) __STR(x)
88#define __STR(x) #x 89#define __STR(x) #x
89 90
90#ifdef CONFIG_PROC_FS 91enum {
91unsigned long unaligned_instructions; 92 UNALIGNED_ACTION_QUIET,
93 UNALIGNED_ACTION_SIGNAL,
94 UNALIGNED_ACTION_SHOW,
95};
96#ifdef CONFIG_DEBUG_FS
97static u32 unaligned_instructions;
98static u32 unaligned_action;
99#else
100#define unaligned_action UNALIGNED_ACTION_QUIET
92#endif 101#endif
102extern void show_registers(struct pt_regs *regs);
93 103
94static inline int emulate_load_store_insn(struct pt_regs *regs, 104static inline int emulate_load_store_insn(struct pt_regs *regs,
95 void __user *addr, unsigned int __user *pc, 105 void __user *addr, unsigned int __user *pc,
@@ -459,7 +469,7 @@ static inline int emulate_load_store_insn(struct pt_regs *regs,
459 goto sigill; 469 goto sigill;
460 } 470 }
461 471
462#ifdef CONFIG_PROC_FS 472#ifdef CONFIG_DEBUG_FS
463 unaligned_instructions++; 473 unaligned_instructions++;
464#endif 474#endif
465 475
@@ -516,6 +526,10 @@ asmlinkage void do_ade(struct pt_regs *regs)
516 pc = (unsigned int __user *) exception_epc(regs); 526 pc = (unsigned int __user *) exception_epc(regs);
517 if (user_mode(regs) && (current->thread.mflags & MF_FIXADE) == 0) 527 if (user_mode(regs) && (current->thread.mflags & MF_FIXADE) == 0)
518 goto sigbus; 528 goto sigbus;
529 if (unaligned_action == UNALIGNED_ACTION_SIGNAL)
530 goto sigbus;
531 else if (unaligned_action == UNALIGNED_ACTION_SHOW)
532 show_registers(regs);
519 533
520 /* 534 /*
521 * Do branch emulation only if we didn't forward the exception. 535 * Do branch emulation only if we didn't forward the exception.
@@ -546,3 +560,24 @@ sigbus:
546 * XXX On return from the signal handler we should advance the epc 560 * XXX On return from the signal handler we should advance the epc
547 */ 561 */
548} 562}
563
564#ifdef CONFIG_DEBUG_FS
565extern struct dentry *mips_debugfs_dir;
566static int __init debugfs_unaligned(void)
567{
568 struct dentry *d;
569
570 if (!mips_debugfs_dir)
571 return -ENODEV;
572 d = debugfs_create_u32("unaligned_instructions", S_IRUGO,
573 mips_debugfs_dir, &unaligned_instructions);
574 if (IS_ERR(d))
575 return PTR_ERR(d);
576 d = debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR,
577 mips_debugfs_dir, &unaligned_action);
578 if (IS_ERR(d))
579 return PTR_ERR(d);
580 return 0;
581}
582__initcall(debugfs_unaligned);
583#endif