diff options
Diffstat (limited to 'arch/mips/kernel')
39 files changed, 1434 insertions, 375 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index eecd2a9f155c..9326af5186fe 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile | |||
@@ -2,14 +2,17 @@ | |||
2 | # Makefile for the Linux/MIPS kernel. | 2 | # Makefile for the Linux/MIPS kernel. |
3 | # | 3 | # |
4 | 4 | ||
5 | CPPFLAGS_vmlinux.lds := $(KBUILD_CFLAGS) | ||
6 | |||
7 | extra-y := head.o init_task.o vmlinux.lds | 5 | extra-y := head.o init_task.o vmlinux.lds |
8 | 6 | ||
9 | obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \ | 7 | obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \ |
10 | ptrace.o reset.o setup.o signal.o syscall.o \ | 8 | ptrace.o reset.o setup.o signal.o syscall.o \ |
11 | time.o topology.o traps.o unaligned.o watch.o | 9 | time.o topology.o traps.o unaligned.o watch.o |
12 | 10 | ||
11 | ifdef CONFIG_FUNCTION_TRACER | ||
12 | CFLAGS_REMOVE_ftrace.o = -pg | ||
13 | CFLAGS_REMOVE_early_printk.o = -pg | ||
14 | endif | ||
15 | |||
13 | obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o | 16 | obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o |
14 | obj-$(CONFIG_CEVT_R4K_LIB) += cevt-r4k.o | 17 | obj-$(CONFIG_CEVT_R4K_LIB) += cevt-r4k.o |
15 | obj-$(CONFIG_MIPS_MT_SMTC) += cevt-smtc.o | 18 | obj-$(CONFIG_MIPS_MT_SMTC) += cevt-smtc.o |
@@ -19,6 +22,7 @@ obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o | |||
19 | obj-$(CONFIG_CEVT_TXX9) += cevt-txx9.o | 22 | obj-$(CONFIG_CEVT_TXX9) += cevt-txx9.o |
20 | obj-$(CONFIG_CSRC_BCM1480) += csrc-bcm1480.o | 23 | obj-$(CONFIG_CSRC_BCM1480) += csrc-bcm1480.o |
21 | obj-$(CONFIG_CSRC_IOASIC) += csrc-ioasic.o | 24 | obj-$(CONFIG_CSRC_IOASIC) += csrc-ioasic.o |
25 | obj-$(CONFIG_CSRC_POWERTV) += csrc-powertv.o | ||
22 | obj-$(CONFIG_CSRC_R4K_LIB) += csrc-r4k.o | 26 | obj-$(CONFIG_CSRC_R4K_LIB) += csrc-r4k.o |
23 | obj-$(CONFIG_CSRC_SB1250) += csrc-sb1250.o | 27 | obj-$(CONFIG_CSRC_SB1250) += csrc-sb1250.o |
24 | obj-$(CONFIG_SYNC_R4K) += sync-r4k.o | 28 | obj-$(CONFIG_SYNC_R4K) += sync-r4k.o |
@@ -26,6 +30,8 @@ obj-$(CONFIG_SYNC_R4K) += sync-r4k.o | |||
26 | obj-$(CONFIG_STACKTRACE) += stacktrace.o | 30 | obj-$(CONFIG_STACKTRACE) += stacktrace.o |
27 | obj-$(CONFIG_MODULES) += mips_ksyms.o module.o | 31 | obj-$(CONFIG_MODULES) += mips_ksyms.o module.o |
28 | 32 | ||
33 | obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o | ||
34 | |||
29 | obj-$(CONFIG_CPU_LOONGSON2) += r4k_fpu.o r4k_switch.o | 35 | obj-$(CONFIG_CPU_LOONGSON2) += r4k_fpu.o r4k_switch.o |
30 | obj-$(CONFIG_CPU_MIPS32) += r4k_fpu.o r4k_switch.o | 36 | obj-$(CONFIG_CPU_MIPS32) += r4k_fpu.o r4k_switch.o |
31 | obj-$(CONFIG_CPU_MIPS64) += r4k_fpu.o r4k_switch.o | 37 | obj-$(CONFIG_CPU_MIPS64) += r4k_fpu.o r4k_switch.o |
@@ -92,4 +98,8 @@ CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/n | |||
92 | 98 | ||
93 | obj-$(CONFIG_HAVE_STD_PC_SERIAL_PORT) += 8250-platform.o | 99 | obj-$(CONFIG_HAVE_STD_PC_SERIAL_PORT) += 8250-platform.o |
94 | 100 | ||
101 | obj-$(CONFIG_MIPS_CPUFREQ) += cpufreq/ | ||
102 | |||
95 | EXTRA_CFLAGS += -Werror | 103 | EXTRA_CFLAGS += -Werror |
104 | |||
105 | CPPFLAGS_vmlinux.lds := $(KBUILD_CFLAGS) | ||
diff --git a/arch/mips/kernel/cevt-bcm1480.c b/arch/mips/kernel/cevt-bcm1480.c index e02f79b1eb51..bfea327c636c 100644 --- a/arch/mips/kernel/cevt-bcm1480.c +++ b/arch/mips/kernel/cevt-bcm1480.c | |||
@@ -144,7 +144,7 @@ void __cpuinit sb1480_clockevent_init(void) | |||
144 | bcm1480_unmask_irq(cpu, irq); | 144 | bcm1480_unmask_irq(cpu, irq); |
145 | 145 | ||
146 | action->handler = sibyte_counter_handler; | 146 | action->handler = sibyte_counter_handler; |
147 | action->flags = IRQF_DISABLED | IRQF_PERCPU; | 147 | action->flags = IRQF_DISABLED | IRQF_PERCPU | IRQF_TIMER; |
148 | action->name = name; | 148 | action->name = name; |
149 | action->dev_id = cd; | 149 | action->dev_id = cd; |
150 | 150 | ||
diff --git a/arch/mips/kernel/cevt-ds1287.c b/arch/mips/kernel/cevt-ds1287.c index 6996da4d74a2..00a4da277cbb 100644 --- a/arch/mips/kernel/cevt-ds1287.c +++ b/arch/mips/kernel/cevt-ds1287.c | |||
@@ -107,7 +107,7 @@ static irqreturn_t ds1287_interrupt(int irq, void *dev_id) | |||
107 | 107 | ||
108 | static struct irqaction ds1287_irqaction = { | 108 | static struct irqaction ds1287_irqaction = { |
109 | .handler = ds1287_interrupt, | 109 | .handler = ds1287_interrupt, |
110 | .flags = IRQF_DISABLED | IRQF_PERCPU, | 110 | .flags = IRQF_DISABLED | IRQF_PERCPU | IRQF_TIMER, |
111 | .name = "ds1287", | 111 | .name = "ds1287", |
112 | }; | 112 | }; |
113 | 113 | ||
diff --git a/arch/mips/kernel/cevt-gt641xx.c b/arch/mips/kernel/cevt-gt641xx.c index 92351e00ae0e..f5d265eb6eae 100644 --- a/arch/mips/kernel/cevt-gt641xx.c +++ b/arch/mips/kernel/cevt-gt641xx.c | |||
@@ -113,7 +113,7 @@ static irqreturn_t gt641xx_timer0_interrupt(int irq, void *dev_id) | |||
113 | 113 | ||
114 | static struct irqaction gt641xx_timer0_irqaction = { | 114 | static struct irqaction gt641xx_timer0_irqaction = { |
115 | .handler = gt641xx_timer0_interrupt, | 115 | .handler = gt641xx_timer0_interrupt, |
116 | .flags = IRQF_DISABLED | IRQF_PERCPU, | 116 | .flags = IRQF_DISABLED | IRQF_PERCPU | IRQF_TIMER, |
117 | .name = "gt641xx_timer0", | 117 | .name = "gt641xx_timer0", |
118 | }; | 118 | }; |
119 | 119 | ||
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c index 2652362ce047..0b2450ceb13f 100644 --- a/arch/mips/kernel/cevt-r4k.c +++ b/arch/mips/kernel/cevt-r4k.c | |||
@@ -83,7 +83,7 @@ out: | |||
83 | 83 | ||
84 | struct irqaction c0_compare_irqaction = { | 84 | struct irqaction c0_compare_irqaction = { |
85 | .handler = c0_compare_interrupt, | 85 | .handler = c0_compare_interrupt, |
86 | .flags = IRQF_DISABLED | IRQF_PERCPU, | 86 | .flags = IRQF_DISABLED | IRQF_PERCPU | IRQF_TIMER, |
87 | .name = "timer", | 87 | .name = "timer", |
88 | }; | 88 | }; |
89 | 89 | ||
@@ -97,7 +97,7 @@ void mips_event_handler(struct clock_event_device *dev) | |||
97 | */ | 97 | */ |
98 | static int c0_compare_int_pending(void) | 98 | static int c0_compare_int_pending(void) |
99 | { | 99 | { |
100 | return (read_c0_cause() >> cp0_compare_irq) & 0x100; | 100 | return (read_c0_cause() >> cp0_compare_irq_shift) & (1ul << CAUSEB_IP); |
101 | } | 101 | } |
102 | 102 | ||
103 | /* | 103 | /* |
diff --git a/arch/mips/kernel/cevt-sb1250.c b/arch/mips/kernel/cevt-sb1250.c index ac5903d1b20e..da78eeaea6e8 100644 --- a/arch/mips/kernel/cevt-sb1250.c +++ b/arch/mips/kernel/cevt-sb1250.c | |||
@@ -143,7 +143,7 @@ void __cpuinit sb1250_clockevent_init(void) | |||
143 | sb1250_unmask_irq(cpu, irq); | 143 | sb1250_unmask_irq(cpu, irq); |
144 | 144 | ||
145 | action->handler = sibyte_counter_handler; | 145 | action->handler = sibyte_counter_handler; |
146 | action->flags = IRQF_DISABLED | IRQF_PERCPU; | 146 | action->flags = IRQF_DISABLED | IRQF_PERCPU | IRQF_TIMER; |
147 | action->name = name; | 147 | action->name = name; |
148 | action->dev_id = cd; | 148 | action->dev_id = cd; |
149 | 149 | ||
diff --git a/arch/mips/kernel/cevt-smtc.c b/arch/mips/kernel/cevt-smtc.c index 98bd7de75778..b102e4f1630e 100644 --- a/arch/mips/kernel/cevt-smtc.c +++ b/arch/mips/kernel/cevt-smtc.c | |||
@@ -173,11 +173,12 @@ void smtc_distribute_timer(int vpe) | |||
173 | unsigned int mtflags; | 173 | unsigned int mtflags; |
174 | int cpu; | 174 | int cpu; |
175 | struct clock_event_device *cd; | 175 | struct clock_event_device *cd; |
176 | unsigned long nextstamp = 0L; | 176 | unsigned long nextstamp; |
177 | unsigned long reference; | 177 | unsigned long reference; |
178 | 178 | ||
179 | 179 | ||
180 | repeat: | 180 | repeat: |
181 | nextstamp = 0L; | ||
181 | for_each_online_cpu(cpu) { | 182 | for_each_online_cpu(cpu) { |
182 | /* | 183 | /* |
183 | * Find virtual CPUs within the current VPE who have | 184 | * Find virtual CPUs within the current VPE who have |
diff --git a/arch/mips/kernel/cevt-txx9.c b/arch/mips/kernel/cevt-txx9.c index 0037f21baf0d..218ee6bda935 100644 --- a/arch/mips/kernel/cevt-txx9.c +++ b/arch/mips/kernel/cevt-txx9.c | |||
@@ -146,7 +146,7 @@ static irqreturn_t txx9tmr_interrupt(int irq, void *dev_id) | |||
146 | 146 | ||
147 | static struct irqaction txx9tmr_irq = { | 147 | static struct irqaction txx9tmr_irq = { |
148 | .handler = txx9tmr_interrupt, | 148 | .handler = txx9tmr_interrupt, |
149 | .flags = IRQF_DISABLED | IRQF_PERCPU, | 149 | .flags = IRQF_DISABLED | IRQF_PERCPU | IRQF_TIMER, |
150 | .name = "txx9tmr", | 150 | .name = "txx9tmr", |
151 | .dev_id = &txx9_clock_event_device, | 151 | .dev_id = &txx9_clock_event_device, |
152 | }; | 152 | }; |
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index f709657e4dcd..758ad426c57f 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/ptrace.h> | 16 | #include <linux/ptrace.h> |
17 | #include <linux/smp.h> | 17 | #include <linux/smp.h> |
18 | #include <linux/stddef.h> | 18 | #include <linux/stddef.h> |
19 | #include <linux/module.h> | ||
19 | 20 | ||
20 | #include <asm/bugs.h> | 21 | #include <asm/bugs.h> |
21 | #include <asm/cpu.h> | 22 | #include <asm/cpu.h> |
@@ -23,7 +24,7 @@ | |||
23 | #include <asm/mipsregs.h> | 24 | #include <asm/mipsregs.h> |
24 | #include <asm/system.h> | 25 | #include <asm/system.h> |
25 | #include <asm/watch.h> | 26 | #include <asm/watch.h> |
26 | 27 | #include <asm/spram.h> | |
27 | /* | 28 | /* |
28 | * Not all of the MIPS CPUs have the "wait" instruction available. Moreover, | 29 | * Not all of the MIPS CPUs have the "wait" instruction available. Moreover, |
29 | * the implementation of the "wait" feature differs between CPU families. This | 30 | * the implementation of the "wait" feature differs between CPU families. This |
@@ -32,6 +33,7 @@ | |||
32 | * the CPU very much. | 33 | * the CPU very much. |
33 | */ | 34 | */ |
34 | void (*cpu_wait)(void); | 35 | void (*cpu_wait)(void); |
36 | EXPORT_SYMBOL(cpu_wait); | ||
35 | 37 | ||
36 | static void r3081_wait(void) | 38 | static void r3081_wait(void) |
37 | { | 39 | { |
@@ -282,6 +284,15 @@ static inline int __cpu_has_fpu(void) | |||
282 | return ((cpu_get_fpu_id() & 0xff00) != FPIR_IMP_NONE); | 284 | return ((cpu_get_fpu_id() & 0xff00) != FPIR_IMP_NONE); |
283 | } | 285 | } |
284 | 286 | ||
287 | static inline void cpu_probe_vmbits(struct cpuinfo_mips *c) | ||
288 | { | ||
289 | #ifdef __NEED_VMBITS_PROBE | ||
290 | write_c0_entryhi(0x3fffffffffffe000ULL); | ||
291 | back_to_back_c0_hazard(); | ||
292 | c->vmbits = fls64(read_c0_entryhi() & 0x3fffffffffffe000ULL); | ||
293 | #endif | ||
294 | } | ||
295 | |||
285 | #define R4K_OPTS (MIPS_CPU_TLB | MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE \ | 296 | #define R4K_OPTS (MIPS_CPU_TLB | MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE \ |
286 | | MIPS_CPU_COUNTER) | 297 | | MIPS_CPU_COUNTER) |
287 | 298 | ||
@@ -711,12 +722,6 @@ static void __cpuinit decode_configs(struct cpuinfo_mips *c) | |||
711 | mips_probe_watch_registers(c); | 722 | mips_probe_watch_registers(c); |
712 | } | 723 | } |
713 | 724 | ||
714 | #ifdef CONFIG_CPU_MIPSR2 | ||
715 | extern void spram_config(void); | ||
716 | #else | ||
717 | static inline void spram_config(void) {} | ||
718 | #endif | ||
719 | |||
720 | static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu) | 725 | static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu) |
721 | { | 726 | { |
722 | decode_configs(c); | 727 | decode_configs(c); |
@@ -973,6 +978,8 @@ __cpuinit void cpu_probe(void) | |||
973 | c->srsets = ((read_c0_srsctl() >> 26) & 0x0f) + 1; | 978 | c->srsets = ((read_c0_srsctl() >> 26) & 0x0f) + 1; |
974 | else | 979 | else |
975 | c->srsets = 1; | 980 | c->srsets = 1; |
981 | |||
982 | cpu_probe_vmbits(c); | ||
976 | } | 983 | } |
977 | 984 | ||
978 | __cpuinit void cpu_report(void) | 985 | __cpuinit void cpu_report(void) |
diff --git a/arch/mips/kernel/cpufreq/Kconfig b/arch/mips/kernel/cpufreq/Kconfig new file mode 100644 index 000000000000..58c601eee6fd --- /dev/null +++ b/arch/mips/kernel/cpufreq/Kconfig | |||
@@ -0,0 +1,41 @@ | |||
1 | # | ||
2 | # CPU Frequency scaling | ||
3 | # | ||
4 | |||
5 | config MIPS_EXTERNAL_TIMER | ||
6 | bool | ||
7 | |||
8 | config MIPS_CPUFREQ | ||
9 | bool | ||
10 | default y | ||
11 | depends on CPU_SUPPORTS_CPUFREQ && MIPS_EXTERNAL_TIMER | ||
12 | |||
13 | if MIPS_CPUFREQ | ||
14 | |||
15 | menu "CPU Frequency scaling" | ||
16 | |||
17 | source "drivers/cpufreq/Kconfig" | ||
18 | |||
19 | if CPU_FREQ | ||
20 | |||
21 | comment "CPUFreq processor drivers" | ||
22 | |||
23 | config LOONGSON2_CPUFREQ | ||
24 | tristate "Loongson2 CPUFreq Driver" | ||
25 | select CPU_FREQ_TABLE | ||
26 | depends on MIPS_CPUFREQ | ||
27 | help | ||
28 | This option adds a CPUFreq driver for loongson processors which | ||
29 | support software configurable cpu frequency. | ||
30 | |||
31 | Loongson2F and it's successors support this feature. | ||
32 | |||
33 | For details, take a look at <file:Documentation/cpu-freq/>. | ||
34 | |||
35 | If in doubt, say N. | ||
36 | |||
37 | endif # CPU_FREQ | ||
38 | |||
39 | endmenu | ||
40 | |||
41 | endif # MIPS_CPUFREQ | ||
diff --git a/arch/mips/kernel/cpufreq/Makefile b/arch/mips/kernel/cpufreq/Makefile new file mode 100644 index 000000000000..c3479a432efe --- /dev/null +++ b/arch/mips/kernel/cpufreq/Makefile | |||
@@ -0,0 +1,5 @@ | |||
1 | # | ||
2 | # Makefile for the Linux/MIPS cpufreq. | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o loongson2_clock.o | ||
diff --git a/arch/mips/kernel/cpufreq/loongson2_clock.c b/arch/mips/kernel/cpufreq/loongson2_clock.c new file mode 100644 index 000000000000..d7ca256e33ef --- /dev/null +++ b/arch/mips/kernel/cpufreq/loongson2_clock.c | |||
@@ -0,0 +1,166 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2006 - 2008 Lemote Inc. & Insititute of Computing Technology | ||
3 | * Author: Yanhua, yanh@lemote.com | ||
4 | * | ||
5 | * This file is subject to the terms and conditions of the GNU General Public | ||
6 | * License. See the file "COPYING" in the main directory of this archive | ||
7 | * for more details. | ||
8 | */ | ||
9 | |||
10 | #include <linux/cpufreq.h> | ||
11 | #include <linux/platform_device.h> | ||
12 | |||
13 | #include <asm/clock.h> | ||
14 | |||
15 | #include <loongson.h> | ||
16 | |||
17 | static LIST_HEAD(clock_list); | ||
18 | static DEFINE_SPINLOCK(clock_lock); | ||
19 | static DEFINE_MUTEX(clock_list_sem); | ||
20 | |||
21 | /* Minimum CLK support */ | ||
22 | enum { | ||
23 | DC_ZERO, DC_25PT = 2, DC_37PT, DC_50PT, DC_62PT, DC_75PT, | ||
24 | DC_87PT, DC_DISABLE, DC_RESV | ||
25 | }; | ||
26 | |||
27 | struct cpufreq_frequency_table loongson2_clockmod_table[] = { | ||
28 | {DC_RESV, CPUFREQ_ENTRY_INVALID}, | ||
29 | {DC_ZERO, CPUFREQ_ENTRY_INVALID}, | ||
30 | {DC_25PT, 0}, | ||
31 | {DC_37PT, 0}, | ||
32 | {DC_50PT, 0}, | ||
33 | {DC_62PT, 0}, | ||
34 | {DC_75PT, 0}, | ||
35 | {DC_87PT, 0}, | ||
36 | {DC_DISABLE, 0}, | ||
37 | {DC_RESV, CPUFREQ_TABLE_END}, | ||
38 | }; | ||
39 | EXPORT_SYMBOL_GPL(loongson2_clockmod_table); | ||
40 | |||
41 | static struct clk cpu_clk = { | ||
42 | .name = "cpu_clk", | ||
43 | .flags = CLK_ALWAYS_ENABLED | CLK_RATE_PROPAGATES, | ||
44 | .rate = 800000000, | ||
45 | }; | ||
46 | |||
47 | struct clk *clk_get(struct device *dev, const char *id) | ||
48 | { | ||
49 | return &cpu_clk; | ||
50 | } | ||
51 | EXPORT_SYMBOL(clk_get); | ||
52 | |||
53 | static void propagate_rate(struct clk *clk) | ||
54 | { | ||
55 | struct clk *clkp; | ||
56 | |||
57 | list_for_each_entry(clkp, &clock_list, node) { | ||
58 | if (likely(clkp->parent != clk)) | ||
59 | continue; | ||
60 | if (likely(clkp->ops && clkp->ops->recalc)) | ||
61 | clkp->ops->recalc(clkp); | ||
62 | if (unlikely(clkp->flags & CLK_RATE_PROPAGATES)) | ||
63 | propagate_rate(clkp); | ||
64 | } | ||
65 | } | ||
66 | |||
67 | int clk_enable(struct clk *clk) | ||
68 | { | ||
69 | return 0; | ||
70 | } | ||
71 | EXPORT_SYMBOL(clk_enable); | ||
72 | |||
73 | void clk_disable(struct clk *clk) | ||
74 | { | ||
75 | } | ||
76 | EXPORT_SYMBOL(clk_disable); | ||
77 | |||
78 | unsigned long clk_get_rate(struct clk *clk) | ||
79 | { | ||
80 | return (unsigned long)clk->rate; | ||
81 | } | ||
82 | EXPORT_SYMBOL(clk_get_rate); | ||
83 | |||
84 | void clk_put(struct clk *clk) | ||
85 | { | ||
86 | } | ||
87 | EXPORT_SYMBOL(clk_put); | ||
88 | |||
89 | int clk_set_rate(struct clk *clk, unsigned long rate) | ||
90 | { | ||
91 | return clk_set_rate_ex(clk, rate, 0); | ||
92 | } | ||
93 | EXPORT_SYMBOL_GPL(clk_set_rate); | ||
94 | |||
95 | int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id) | ||
96 | { | ||
97 | int ret = 0; | ||
98 | int regval; | ||
99 | int i; | ||
100 | |||
101 | if (likely(clk->ops && clk->ops->set_rate)) { | ||
102 | unsigned long flags; | ||
103 | |||
104 | spin_lock_irqsave(&clock_lock, flags); | ||
105 | ret = clk->ops->set_rate(clk, rate, algo_id); | ||
106 | spin_unlock_irqrestore(&clock_lock, flags); | ||
107 | } | ||
108 | |||
109 | if (unlikely(clk->flags & CLK_RATE_PROPAGATES)) | ||
110 | propagate_rate(clk); | ||
111 | |||
112 | for (i = 0; loongson2_clockmod_table[i].frequency != CPUFREQ_TABLE_END; | ||
113 | i++) { | ||
114 | if (loongson2_clockmod_table[i].frequency == | ||
115 | CPUFREQ_ENTRY_INVALID) | ||
116 | continue; | ||
117 | if (rate == loongson2_clockmod_table[i].frequency) | ||
118 | break; | ||
119 | } | ||
120 | if (rate != loongson2_clockmod_table[i].frequency) | ||
121 | return -ENOTSUPP; | ||
122 | |||
123 | clk->rate = rate; | ||
124 | |||
125 | regval = LOONGSON_CHIPCFG0; | ||
126 | regval = (regval & ~0x7) | (loongson2_clockmod_table[i].index - 1); | ||
127 | LOONGSON_CHIPCFG0 = regval; | ||
128 | |||
129 | return ret; | ||
130 | } | ||
131 | EXPORT_SYMBOL_GPL(clk_set_rate_ex); | ||
132 | |||
133 | long clk_round_rate(struct clk *clk, unsigned long rate) | ||
134 | { | ||
135 | if (likely(clk->ops && clk->ops->round_rate)) { | ||
136 | unsigned long flags, rounded; | ||
137 | |||
138 | spin_lock_irqsave(&clock_lock, flags); | ||
139 | rounded = clk->ops->round_rate(clk, rate); | ||
140 | spin_unlock_irqrestore(&clock_lock, flags); | ||
141 | |||
142 | return rounded; | ||
143 | } | ||
144 | |||
145 | return rate; | ||
146 | } | ||
147 | EXPORT_SYMBOL_GPL(clk_round_rate); | ||
148 | |||
149 | /* | ||
150 | * This is the simple version of Loongson-2 wait, Maybe we need do this in | ||
151 | * interrupt disabled content | ||
152 | */ | ||
153 | |||
154 | DEFINE_SPINLOCK(loongson2_wait_lock); | ||
155 | void loongson2_cpu_wait(void) | ||
156 | { | ||
157 | u32 cpu_freq; | ||
158 | unsigned long flags; | ||
159 | |||
160 | spin_lock_irqsave(&loongson2_wait_lock, flags); | ||
161 | cpu_freq = LOONGSON_CHIPCFG0; | ||
162 | LOONGSON_CHIPCFG0 &= ~0x7; /* Put CPU into wait mode */ | ||
163 | LOONGSON_CHIPCFG0 = cpu_freq; /* Restore CPU state */ | ||
164 | spin_unlock_irqrestore(&loongson2_wait_lock, flags); | ||
165 | } | ||
166 | EXPORT_SYMBOL_GPL(loongson2_cpu_wait); | ||
diff --git a/arch/mips/kernel/cpufreq/loongson2_cpufreq.c b/arch/mips/kernel/cpufreq/loongson2_cpufreq.c new file mode 100644 index 000000000000..2f6a0b147ab8 --- /dev/null +++ b/arch/mips/kernel/cpufreq/loongson2_cpufreq.c | |||
@@ -0,0 +1,227 @@ | |||
1 | /* | ||
2 | * Cpufreq driver for the loongson-2 processors | ||
3 | * | ||
4 | * The 2E revision of loongson processor not support this feature. | ||
5 | * | ||
6 | * Copyright (C) 2006 - 2008 Lemote Inc. & Insititute of Computing Technology | ||
7 | * Author: Yanhua, yanh@lemote.com | ||
8 | * | ||
9 | * This file is subject to the terms and conditions of the GNU General Public | ||
10 | * License. See the file "COPYING" in the main directory of this archive | ||
11 | * for more details. | ||
12 | */ | ||
13 | #include <linux/cpufreq.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/err.h> | ||
16 | #include <linux/sched.h> /* set_cpus_allowed() */ | ||
17 | #include <linux/delay.h> | ||
18 | #include <linux/platform_device.h> | ||
19 | |||
20 | #include <asm/clock.h> | ||
21 | |||
22 | #include <loongson.h> | ||
23 | |||
24 | static uint nowait; | ||
25 | |||
26 | static struct clk *cpuclk; | ||
27 | |||
28 | static void (*saved_cpu_wait) (void); | ||
29 | |||
30 | static int loongson2_cpu_freq_notifier(struct notifier_block *nb, | ||
31 | unsigned long val, void *data); | ||
32 | |||
33 | static struct notifier_block loongson2_cpufreq_notifier_block = { | ||
34 | .notifier_call = loongson2_cpu_freq_notifier | ||
35 | }; | ||
36 | |||
37 | static int loongson2_cpu_freq_notifier(struct notifier_block *nb, | ||
38 | unsigned long val, void *data) | ||
39 | { | ||
40 | if (val == CPUFREQ_POSTCHANGE) | ||
41 | current_cpu_data.udelay_val = loops_per_jiffy; | ||
42 | |||
43 | return 0; | ||
44 | } | ||
45 | |||
46 | static unsigned int loongson2_cpufreq_get(unsigned int cpu) | ||
47 | { | ||
48 | return clk_get_rate(cpuclk); | ||
49 | } | ||
50 | |||
51 | /* | ||
52 | * Here we notify other drivers of the proposed change and the final change. | ||
53 | */ | ||
54 | static int loongson2_cpufreq_target(struct cpufreq_policy *policy, | ||
55 | unsigned int target_freq, | ||
56 | unsigned int relation) | ||
57 | { | ||
58 | unsigned int cpu = policy->cpu; | ||
59 | unsigned int newstate = 0; | ||
60 | cpumask_t cpus_allowed; | ||
61 | struct cpufreq_freqs freqs; | ||
62 | unsigned int freq; | ||
63 | |||
64 | if (!cpu_online(cpu)) | ||
65 | return -ENODEV; | ||
66 | |||
67 | cpus_allowed = current->cpus_allowed; | ||
68 | set_cpus_allowed(current, cpumask_of_cpu(cpu)); | ||
69 | |||
70 | if (cpufreq_frequency_table_target | ||
71 | (policy, &loongson2_clockmod_table[0], target_freq, relation, | ||
72 | &newstate)) | ||
73 | return -EINVAL; | ||
74 | |||
75 | freq = | ||
76 | ((cpu_clock_freq / 1000) * | ||
77 | loongson2_clockmod_table[newstate].index) / 8; | ||
78 | if (freq < policy->min || freq > policy->max) | ||
79 | return -EINVAL; | ||
80 | |||
81 | pr_debug("cpufreq: requested frequency %u Hz\n", target_freq * 1000); | ||
82 | |||
83 | freqs.cpu = cpu; | ||
84 | freqs.old = loongson2_cpufreq_get(cpu); | ||
85 | freqs.new = freq; | ||
86 | freqs.flags = 0; | ||
87 | |||
88 | if (freqs.new == freqs.old) | ||
89 | return 0; | ||
90 | |||
91 | /* notifiers */ | ||
92 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | ||
93 | |||
94 | set_cpus_allowed(current, cpus_allowed); | ||
95 | |||
96 | /* setting the cpu frequency */ | ||
97 | clk_set_rate(cpuclk, freq); | ||
98 | |||
99 | /* notifiers */ | ||
100 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | ||
101 | |||
102 | pr_debug("cpufreq: set frequency %u kHz\n", freq); | ||
103 | |||
104 | return 0; | ||
105 | } | ||
106 | |||
107 | static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy) | ||
108 | { | ||
109 | int i; | ||
110 | |||
111 | if (!cpu_online(policy->cpu)) | ||
112 | return -ENODEV; | ||
113 | |||
114 | cpuclk = clk_get(NULL, "cpu_clk"); | ||
115 | if (IS_ERR(cpuclk)) { | ||
116 | printk(KERN_ERR "cpufreq: couldn't get CPU clk\n"); | ||
117 | return PTR_ERR(cpuclk); | ||
118 | } | ||
119 | |||
120 | cpuclk->rate = cpu_clock_freq / 1000; | ||
121 | if (!cpuclk->rate) | ||
122 | return -EINVAL; | ||
123 | |||
124 | /* clock table init */ | ||
125 | for (i = 2; | ||
126 | (loongson2_clockmod_table[i].frequency != CPUFREQ_TABLE_END); | ||
127 | i++) | ||
128 | loongson2_clockmod_table[i].frequency = (cpuclk->rate * i) / 8; | ||
129 | |||
130 | policy->cur = loongson2_cpufreq_get(policy->cpu); | ||
131 | |||
132 | cpufreq_frequency_table_get_attr(&loongson2_clockmod_table[0], | ||
133 | policy->cpu); | ||
134 | |||
135 | return cpufreq_frequency_table_cpuinfo(policy, | ||
136 | &loongson2_clockmod_table[0]); | ||
137 | } | ||
138 | |||
139 | static int loongson2_cpufreq_verify(struct cpufreq_policy *policy) | ||
140 | { | ||
141 | return cpufreq_frequency_table_verify(policy, | ||
142 | &loongson2_clockmod_table[0]); | ||
143 | } | ||
144 | |||
145 | static int loongson2_cpufreq_exit(struct cpufreq_policy *policy) | ||
146 | { | ||
147 | clk_put(cpuclk); | ||
148 | return 0; | ||
149 | } | ||
150 | |||
151 | static struct freq_attr *loongson2_table_attr[] = { | ||
152 | &cpufreq_freq_attr_scaling_available_freqs, | ||
153 | NULL, | ||
154 | }; | ||
155 | |||
156 | static struct cpufreq_driver loongson2_cpufreq_driver = { | ||
157 | .owner = THIS_MODULE, | ||
158 | .name = "loongson2", | ||
159 | .init = loongson2_cpufreq_cpu_init, | ||
160 | .verify = loongson2_cpufreq_verify, | ||
161 | .target = loongson2_cpufreq_target, | ||
162 | .get = loongson2_cpufreq_get, | ||
163 | .exit = loongson2_cpufreq_exit, | ||
164 | .attr = loongson2_table_attr, | ||
165 | }; | ||
166 | |||
167 | static struct platform_device_id platform_device_ids[] = { | ||
168 | { | ||
169 | .name = "loongson2_cpufreq", | ||
170 | }, | ||
171 | {} | ||
172 | }; | ||
173 | |||
174 | MODULE_DEVICE_TABLE(platform, platform_device_ids); | ||
175 | |||
176 | static struct platform_driver platform_driver = { | ||
177 | .driver = { | ||
178 | .name = "loongson2_cpufreq", | ||
179 | .owner = THIS_MODULE, | ||
180 | }, | ||
181 | .id_table = platform_device_ids, | ||
182 | }; | ||
183 | |||
184 | static int __init cpufreq_init(void) | ||
185 | { | ||
186 | int ret; | ||
187 | |||
188 | /* Register platform stuff */ | ||
189 | ret = platform_driver_register(&platform_driver); | ||
190 | if (ret) | ||
191 | return ret; | ||
192 | |||
193 | pr_info("cpufreq: Loongson-2F CPU frequency driver.\n"); | ||
194 | |||
195 | cpufreq_register_notifier(&loongson2_cpufreq_notifier_block, | ||
196 | CPUFREQ_TRANSITION_NOTIFIER); | ||
197 | |||
198 | ret = cpufreq_register_driver(&loongson2_cpufreq_driver); | ||
199 | |||
200 | if (!ret && !nowait) { | ||
201 | saved_cpu_wait = cpu_wait; | ||
202 | cpu_wait = loongson2_cpu_wait; | ||
203 | } | ||
204 | |||
205 | return ret; | ||
206 | } | ||
207 | |||
208 | static void __exit cpufreq_exit(void) | ||
209 | { | ||
210 | if (!nowait && saved_cpu_wait) | ||
211 | cpu_wait = saved_cpu_wait; | ||
212 | cpufreq_unregister_driver(&loongson2_cpufreq_driver); | ||
213 | cpufreq_unregister_notifier(&loongson2_cpufreq_notifier_block, | ||
214 | CPUFREQ_TRANSITION_NOTIFIER); | ||
215 | |||
216 | platform_driver_unregister(&platform_driver); | ||
217 | } | ||
218 | |||
219 | module_init(cpufreq_init); | ||
220 | module_exit(cpufreq_exit); | ||
221 | |||
222 | module_param(nowait, uint, 0644); | ||
223 | MODULE_PARM_DESC(nowait, "Disable Loongson-2F specific wait"); | ||
224 | |||
225 | MODULE_AUTHOR("Yanhua <yanh@lemote.com>"); | ||
226 | MODULE_DESCRIPTION("cpufreq driver for Loongson2F"); | ||
227 | MODULE_LICENSE("GPL"); | ||
diff --git a/arch/mips/kernel/csrc-powertv.c b/arch/mips/kernel/csrc-powertv.c new file mode 100644 index 000000000000..a27c16c8690e --- /dev/null +++ b/arch/mips/kernel/csrc-powertv.c | |||
@@ -0,0 +1,180 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008 Scientific-Atlanta, Inc. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version 2 | ||
7 | * of the License, or (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | */ | ||
18 | /* | ||
19 | * The file comes from kernel/csrc-r4k.c | ||
20 | */ | ||
21 | #include <linux/clocksource.h> | ||
22 | #include <linux/init.h> | ||
23 | |||
24 | #include <asm/time.h> /* Not included in linux/time.h */ | ||
25 | |||
26 | #include <asm/mach-powertv/asic_regs.h> | ||
27 | #include "powertv-clock.h" | ||
28 | |||
29 | /* MIPS PLL Register Definitions */ | ||
30 | #define PLL_GET_M(x) (((x) >> 8) & 0x000000FF) | ||
31 | #define PLL_GET_N(x) (((x) >> 16) & 0x000000FF) | ||
32 | #define PLL_GET_P(x) (((x) >> 24) & 0x00000007) | ||
33 | |||
34 | /* | ||
35 | * returns: Clock frequency in kHz | ||
36 | */ | ||
37 | unsigned int __init mips_get_pll_freq(void) | ||
38 | { | ||
39 | unsigned int pll_reg, m, n, p; | ||
40 | unsigned int fin = 54000; /* Base frequency in kHz */ | ||
41 | unsigned int fout; | ||
42 | |||
43 | /* Read PLL register setting */ | ||
44 | pll_reg = asic_read(mips_pll_setup); | ||
45 | m = PLL_GET_M(pll_reg); | ||
46 | n = PLL_GET_N(pll_reg); | ||
47 | p = PLL_GET_P(pll_reg); | ||
48 | pr_info("MIPS PLL Register:0x%x M=%d N=%d P=%d\n", pll_reg, m, n, p); | ||
49 | |||
50 | /* Calculate clock frequency = (2 * N * 54MHz) / (M * (2**P)) */ | ||
51 | fout = ((2 * n * fin) / (m * (0x01 << p))); | ||
52 | |||
53 | pr_info("MIPS Clock Freq=%d kHz\n", fout); | ||
54 | |||
55 | return fout; | ||
56 | } | ||
57 | |||
58 | static cycle_t c0_hpt_read(struct clocksource *cs) | ||
59 | { | ||
60 | return read_c0_count(); | ||
61 | } | ||
62 | |||
63 | static struct clocksource clocksource_mips = { | ||
64 | .name = "powertv-counter", | ||
65 | .read = c0_hpt_read, | ||
66 | .mask = CLOCKSOURCE_MASK(32), | ||
67 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | ||
68 | }; | ||
69 | |||
70 | static void __init powertv_c0_hpt_clocksource_init(void) | ||
71 | { | ||
72 | unsigned int pll_freq = mips_get_pll_freq(); | ||
73 | |||
74 | pr_info("CPU frequency %d.%02d MHz\n", pll_freq / 1000, | ||
75 | (pll_freq % 1000) * 100 / 1000); | ||
76 | |||
77 | mips_hpt_frequency = pll_freq / 2 * 1000; | ||
78 | |||
79 | clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000; | ||
80 | |||
81 | clocksource_set_clock(&clocksource_mips, mips_hpt_frequency); | ||
82 | |||
83 | clocksource_register(&clocksource_mips); | ||
84 | } | ||
85 | |||
86 | /** | ||
87 | * struct tim_c - free running counter | ||
88 | * @hi: High 16 bits of the counter | ||
89 | * @lo: Low 32 bits of the counter | ||
90 | * | ||
91 | * Lays out the structure of the free running counter in memory. This counter | ||
92 | * increments at a rate of 27 MHz/8 on all platforms. | ||
93 | */ | ||
94 | struct tim_c { | ||
95 | unsigned int hi; | ||
96 | unsigned int lo; | ||
97 | }; | ||
98 | |||
99 | static struct tim_c *tim_c; | ||
100 | |||
101 | static cycle_t tim_c_read(struct clocksource *cs) | ||
102 | { | ||
103 | unsigned int hi; | ||
104 | unsigned int next_hi; | ||
105 | unsigned int lo; | ||
106 | |||
107 | hi = readl(&tim_c->hi); | ||
108 | |||
109 | for (;;) { | ||
110 | lo = readl(&tim_c->lo); | ||
111 | next_hi = readl(&tim_c->hi); | ||
112 | if (next_hi == hi) | ||
113 | break; | ||
114 | hi = next_hi; | ||
115 | } | ||
116 | |||
117 | pr_crit("%s: read %llx\n", __func__, ((u64) hi << 32) | lo); | ||
118 | return ((u64) hi << 32) | lo; | ||
119 | } | ||
120 | |||
121 | #define TIM_C_SIZE 48 /* # bits in the timer */ | ||
122 | |||
123 | static struct clocksource clocksource_tim_c = { | ||
124 | .name = "powertv-tim_c", | ||
125 | .read = tim_c_read, | ||
126 | .mask = CLOCKSOURCE_MASK(TIM_C_SIZE), | ||
127 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | ||
128 | }; | ||
129 | |||
130 | /** | ||
131 | * powertv_tim_c_clocksource_init - set up a clock source for the TIM_C clock | ||
132 | * | ||
133 | * The hard part here is coming up with a constant k and shift s such that | ||
134 | * the 48-bit TIM_C value multiplied by k doesn't overflow and that value, | ||
135 | * when shifted right by s, yields the corresponding number of nanoseconds. | ||
136 | * We know that TIM_C counts at 27 MHz/8, so each cycle corresponds to | ||
137 | * 1 / (27,000,000/8) seconds. Multiply that by a billion and you get the | ||
138 | * number of nanoseconds. Since the TIM_C value has 48 bits and the math is | ||
139 | * done in 64 bits, avoiding an overflow means that k must be less than | ||
140 | * 64 - 48 = 16 bits. | ||
141 | */ | ||
142 | static void __init powertv_tim_c_clocksource_init(void) | ||
143 | { | ||
144 | int prescale; | ||
145 | unsigned long dividend; | ||
146 | unsigned long k; | ||
147 | int s; | ||
148 | const int max_k_bits = (64 - 48) - 1; | ||
149 | const unsigned long billion = 1000000000; | ||
150 | const unsigned long counts_per_second = 27000000 / 8; | ||
151 | |||
152 | prescale = BITS_PER_LONG - ilog2(billion) - 1; | ||
153 | dividend = billion << prescale; | ||
154 | k = dividend / counts_per_second; | ||
155 | s = ilog2(k) - max_k_bits; | ||
156 | |||
157 | if (s < 0) | ||
158 | s = prescale; | ||
159 | |||
160 | else { | ||
161 | k >>= s; | ||
162 | s += prescale; | ||
163 | } | ||
164 | |||
165 | clocksource_tim_c.mult = k; | ||
166 | clocksource_tim_c.shift = s; | ||
167 | clocksource_tim_c.rating = 200; | ||
168 | |||
169 | clocksource_register(&clocksource_tim_c); | ||
170 | tim_c = (struct tim_c *) asic_reg_addr(tim_ch); | ||
171 | } | ||
172 | |||
173 | /** | ||
174 | powertv_clocksource_init - initialize all clocksources | ||
175 | */ | ||
176 | void __init powertv_clocksource_init(void) | ||
177 | { | ||
178 | powertv_c0_hpt_clocksource_init(); | ||
179 | powertv_tim_c_clocksource_init(); | ||
180 | } | ||
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c new file mode 100644 index 000000000000..68b067040d8b --- /dev/null +++ b/arch/mips/kernel/ftrace.c | |||
@@ -0,0 +1,275 @@ | |||
1 | /* | ||
2 | * Code for replacing ftrace calls with jumps. | ||
3 | * | ||
4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> | ||
5 | * Copyright (C) 2009 DSLab, Lanzhou University, China | ||
6 | * Author: Wu Zhangjin <wuzj@lemote.com> | ||
7 | * | ||
8 | * Thanks goes to Steven Rostedt for writing the original x86 version. | ||
9 | */ | ||
10 | |||
11 | #include <linux/uaccess.h> | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/ftrace.h> | ||
14 | |||
15 | #include <asm/cacheflush.h> | ||
16 | #include <asm/asm.h> | ||
17 | #include <asm/asm-offsets.h> | ||
18 | |||
19 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
20 | |||
21 | #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ | ||
22 | #define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */ | ||
23 | #define jump_insn_encode(op_code, addr) \ | ||
24 | ((unsigned int)((op_code) | (((addr) >> 2) & ADDR_MASK))) | ||
25 | |||
26 | static unsigned int ftrace_nop = 0x00000000; | ||
27 | |||
28 | static int ftrace_modify_code(unsigned long ip, unsigned int new_code) | ||
29 | { | ||
30 | int faulted; | ||
31 | |||
32 | /* *(unsigned int *)ip = new_code; */ | ||
33 | safe_store_code(new_code, ip, faulted); | ||
34 | |||
35 | if (unlikely(faulted)) | ||
36 | return -EFAULT; | ||
37 | |||
38 | flush_icache_range(ip, ip + 8); | ||
39 | |||
40 | return 0; | ||
41 | } | ||
42 | |||
43 | static int lui_v1; | ||
44 | static int jal_mcount; | ||
45 | |||
46 | int ftrace_make_nop(struct module *mod, | ||
47 | struct dyn_ftrace *rec, unsigned long addr) | ||
48 | { | ||
49 | unsigned int new; | ||
50 | int faulted; | ||
51 | unsigned long ip = rec->ip; | ||
52 | |||
53 | /* We have compiled module with -mlong-calls, but compiled the kernel | ||
54 | * without it, we need to cope with them respectively. */ | ||
55 | if (ip & 0x40000000) { | ||
56 | /* record it for ftrace_make_call */ | ||
57 | if (lui_v1 == 0) { | ||
58 | /* lui_v1 = *(unsigned int *)ip; */ | ||
59 | safe_load_code(lui_v1, ip, faulted); | ||
60 | |||
61 | if (unlikely(faulted)) | ||
62 | return -EFAULT; | ||
63 | } | ||
64 | |||
65 | /* lui v1, hi_16bit_of_mcount --> b 1f (0x10000004) | ||
66 | * addiu v1, v1, low_16bit_of_mcount | ||
67 | * move at, ra | ||
68 | * jalr v1 | ||
69 | * nop | ||
70 | * 1f: (ip + 12) | ||
71 | */ | ||
72 | new = 0x10000004; | ||
73 | } else { | ||
74 | /* record/calculate it for ftrace_make_call */ | ||
75 | if (jal_mcount == 0) { | ||
76 | /* We can record it directly like this: | ||
77 | * jal_mcount = *(unsigned int *)ip; | ||
78 | * Herein, jump over the first two nop instructions */ | ||
79 | jal_mcount = jump_insn_encode(JAL, (MCOUNT_ADDR + 8)); | ||
80 | } | ||
81 | |||
82 | /* move at, ra | ||
83 | * jalr v1 --> nop | ||
84 | */ | ||
85 | new = ftrace_nop; | ||
86 | } | ||
87 | return ftrace_modify_code(ip, new); | ||
88 | } | ||
89 | |||
90 | static int modified; /* initialized as 0 by default */ | ||
91 | |||
92 | int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | ||
93 | { | ||
94 | unsigned int new; | ||
95 | unsigned long ip = rec->ip; | ||
96 | |||
97 | /* We just need to remove the "b ftrace_stub" at the fist time! */ | ||
98 | if (modified == 0) { | ||
99 | modified = 1; | ||
100 | ftrace_modify_code(addr, ftrace_nop); | ||
101 | } | ||
102 | /* ip, module: 0xc0000000, kernel: 0x80000000 */ | ||
103 | new = (ip & 0x40000000) ? lui_v1 : jal_mcount; | ||
104 | |||
105 | return ftrace_modify_code(ip, new); | ||
106 | } | ||
107 | |||
108 | #define FTRACE_CALL_IP ((unsigned long)(&ftrace_call)) | ||
109 | |||
110 | int ftrace_update_ftrace_func(ftrace_func_t func) | ||
111 | { | ||
112 | unsigned int new; | ||
113 | |||
114 | new = jump_insn_encode(JAL, (unsigned long)func); | ||
115 | |||
116 | return ftrace_modify_code(FTRACE_CALL_IP, new); | ||
117 | } | ||
118 | |||
119 | int __init ftrace_dyn_arch_init(void *data) | ||
120 | { | ||
121 | /* The return code is retured via data */ | ||
122 | *(unsigned long *)data = 0; | ||
123 | |||
124 | return 0; | ||
125 | } | ||
126 | #endif /* CONFIG_DYNAMIC_FTRACE */ | ||
127 | |||
128 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
129 | |||
130 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
131 | |||
132 | extern void ftrace_graph_call(void); | ||
133 | #define JMP 0x08000000 /* jump to target directly */ | ||
134 | #define CALL_FTRACE_GRAPH_CALLER \ | ||
135 | jump_insn_encode(JMP, (unsigned long)(&ftrace_graph_caller)) | ||
136 | #define FTRACE_GRAPH_CALL_IP ((unsigned long)(&ftrace_graph_call)) | ||
137 | |||
138 | int ftrace_enable_ftrace_graph_caller(void) | ||
139 | { | ||
140 | return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, | ||
141 | CALL_FTRACE_GRAPH_CALLER); | ||
142 | } | ||
143 | |||
144 | int ftrace_disable_ftrace_graph_caller(void) | ||
145 | { | ||
146 | return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, ftrace_nop); | ||
147 | } | ||
148 | |||
149 | #endif /* !CONFIG_DYNAMIC_FTRACE */ | ||
150 | |||
151 | #ifndef KBUILD_MCOUNT_RA_ADDRESS | ||
152 | #define S_RA_SP (0xafbf << 16) /* s{d,w} ra, offset(sp) */ | ||
153 | #define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */ | ||
154 | #define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */ | ||
155 | |||
156 | unsigned long ftrace_get_parent_addr(unsigned long self_addr, | ||
157 | unsigned long parent, | ||
158 | unsigned long parent_addr, | ||
159 | unsigned long fp) | ||
160 | { | ||
161 | unsigned long sp, ip, ra; | ||
162 | unsigned int code; | ||
163 | int faulted; | ||
164 | |||
165 | /* in module or kernel? */ | ||
166 | if (self_addr & 0x40000000) { | ||
167 | /* module: move to the instruction "lui v1, HI_16BIT_OF_MCOUNT" */ | ||
168 | ip = self_addr - 20; | ||
169 | } else { | ||
170 | /* kernel: move to the instruction "move ra, at" */ | ||
171 | ip = self_addr - 12; | ||
172 | } | ||
173 | |||
174 | /* search the text until finding the non-store instruction or "s{d,w} | ||
175 | * ra, offset(sp)" instruction */ | ||
176 | do { | ||
177 | ip -= 4; | ||
178 | |||
179 | /* get the code at "ip": code = *(unsigned int *)ip; */ | ||
180 | safe_load_code(code, ip, faulted); | ||
181 | |||
182 | if (unlikely(faulted)) | ||
183 | return 0; | ||
184 | |||
185 | /* If we hit the non-store instruction before finding where the | ||
186 | * ra is stored, then this is a leaf function and it does not | ||
187 | * store the ra on the stack. */ | ||
188 | if ((code & S_R_SP) != S_R_SP) | ||
189 | return parent_addr; | ||
190 | |||
191 | } while (((code & S_RA_SP) != S_RA_SP)); | ||
192 | |||
193 | sp = fp + (code & OFFSET_MASK); | ||
194 | |||
195 | /* ra = *(unsigned long *)sp; */ | ||
196 | safe_load_stack(ra, sp, faulted); | ||
197 | if (unlikely(faulted)) | ||
198 | return 0; | ||
199 | |||
200 | if (ra == parent) | ||
201 | return sp; | ||
202 | return 0; | ||
203 | } | ||
204 | |||
205 | #endif | ||
206 | |||
207 | /* | ||
208 | * Hook the return address and push it in the stack of return addrs | ||
209 | * in current thread info. | ||
210 | */ | ||
211 | void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, | ||
212 | unsigned long fp) | ||
213 | { | ||
214 | unsigned long old; | ||
215 | struct ftrace_graph_ent trace; | ||
216 | unsigned long return_hooker = (unsigned long) | ||
217 | &return_to_handler; | ||
218 | int faulted; | ||
219 | |||
220 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) | ||
221 | return; | ||
222 | |||
223 | /* "parent" is the stack address saved the return address of the caller | ||
224 | * of _mcount. | ||
225 | * | ||
226 | * if the gcc < 4.5, a leaf function does not save the return address | ||
227 | * in the stack address, so, we "emulate" one in _mcount's stack space, | ||
228 | * and hijack it directly, but for a non-leaf function, it save the | ||
229 | * return address to the its own stack space, we can not hijack it | ||
230 | * directly, but need to find the real stack address, | ||
231 | * ftrace_get_parent_addr() does it! | ||
232 | * | ||
233 | * if gcc>= 4.5, with the new -mmcount-ra-address option, for a | ||
234 | * non-leaf function, the location of the return address will be saved | ||
235 | * to $12 for us, and for a leaf function, only put a zero into $12. we | ||
236 | * do it in ftrace_graph_caller of mcount.S. | ||
237 | */ | ||
238 | |||
239 | /* old = *parent; */ | ||
240 | safe_load_stack(old, parent, faulted); | ||
241 | if (unlikely(faulted)) | ||
242 | goto out; | ||
243 | #ifndef KBUILD_MCOUNT_RA_ADDRESS | ||
244 | parent = (unsigned long *)ftrace_get_parent_addr(self_addr, old, | ||
245 | (unsigned long)parent, | ||
246 | fp); | ||
247 | /* If fails when getting the stack address of the non-leaf function's | ||
248 | * ra, stop function graph tracer and return */ | ||
249 | if (parent == 0) | ||
250 | goto out; | ||
251 | #endif | ||
252 | /* *parent = return_hooker; */ | ||
253 | safe_store_stack(return_hooker, parent, faulted); | ||
254 | if (unlikely(faulted)) | ||
255 | goto out; | ||
256 | |||
257 | if (ftrace_push_return_trace(old, self_addr, &trace.depth, fp) == | ||
258 | -EBUSY) { | ||
259 | *parent = old; | ||
260 | return; | ||
261 | } | ||
262 | |||
263 | trace.func = self_addr; | ||
264 | |||
265 | /* Only trace if the calling function expects to */ | ||
266 | if (!ftrace_graph_entry(&trace)) { | ||
267 | current->curr_ret_stack--; | ||
268 | *parent = old; | ||
269 | } | ||
270 | return; | ||
271 | out: | ||
272 | ftrace_graph_stop(); | ||
273 | WARN_ON(1); | ||
274 | } | ||
275 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | ||
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S index 531ce7b16124..ea695d9605e9 100644 --- a/arch/mips/kernel/head.S +++ b/arch/mips/kernel/head.S | |||
@@ -191,6 +191,7 @@ NESTED(kernel_entry, 16, sp) # kernel entry point | |||
191 | /* Set the SP after an empty pt_regs. */ | 191 | /* Set the SP after an empty pt_regs. */ |
192 | PTR_LI sp, _THREAD_SIZE - 32 - PT_SIZE | 192 | PTR_LI sp, _THREAD_SIZE - 32 - PT_SIZE |
193 | PTR_ADDU sp, $28 | 193 | PTR_ADDU sp, $28 |
194 | back_to_back_c0_hazard | ||
194 | set_saved_sp sp, t0, t1 | 195 | set_saved_sp sp, t0, t1 |
195 | PTR_SUBU sp, 4 * SZREG # init stack pointer | 196 | PTR_SUBU sp, 4 * SZREG # init stack pointer |
196 | 197 | ||
diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c index f7d8d5d0ddbf..ed5c441615e4 100644 --- a/arch/mips/kernel/i8253.c +++ b/arch/mips/kernel/i8253.c | |||
@@ -98,7 +98,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id) | |||
98 | 98 | ||
99 | static struct irqaction irq0 = { | 99 | static struct irqaction irq0 = { |
100 | .handler = timer_interrupt, | 100 | .handler = timer_interrupt, |
101 | .flags = IRQF_DISABLED | IRQF_NOBALANCING, | 101 | .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TIMER, |
102 | .name = "timer" | 102 | .name = "timer" |
103 | }; | 103 | }; |
104 | 104 | ||
diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c index d2072cd38592..b181f2f0ea8e 100644 --- a/arch/mips/kernel/irq-gic.c +++ b/arch/mips/kernel/irq-gic.c | |||
@@ -14,38 +14,23 @@ | |||
14 | 14 | ||
15 | 15 | ||
16 | static unsigned long _gic_base; | 16 | static unsigned long _gic_base; |
17 | static unsigned int _irqbase, _mapsize, numvpes, numintrs; | 17 | static unsigned int _irqbase; |
18 | static struct gic_intr_map *_intrmap; | 18 | static unsigned int gic_irq_flags[GIC_NUM_INTRS]; |
19 | #define GIC_IRQ_FLAG_EDGE 0x0001 | ||
19 | 20 | ||
20 | static struct gic_pcpu_mask pcpu_masks[NR_CPUS]; | 21 | struct gic_pcpu_mask pcpu_masks[NR_CPUS]; |
21 | static struct gic_pending_regs pending_regs[NR_CPUS]; | 22 | static struct gic_pending_regs pending_regs[NR_CPUS]; |
22 | static struct gic_intrmask_regs intrmask_regs[NR_CPUS]; | 23 | static struct gic_intrmask_regs intrmask_regs[NR_CPUS]; |
23 | 24 | ||
24 | #define gic_wedgeb2bok 0 /* | ||
25 | * Can GIC handle b2b writes to wedge register? | ||
26 | */ | ||
27 | #if gic_wedgeb2bok == 0 | ||
28 | static DEFINE_SPINLOCK(gic_wedgeb2b_lock); | ||
29 | #endif | ||
30 | |||
31 | void gic_send_ipi(unsigned int intr) | 25 | void gic_send_ipi(unsigned int intr) |
32 | { | 26 | { |
33 | #if gic_wedgeb2bok == 0 | ||
34 | unsigned long flags; | ||
35 | #endif | ||
36 | pr_debug("CPU%d: %s status %08x\n", smp_processor_id(), __func__, | 27 | pr_debug("CPU%d: %s status %08x\n", smp_processor_id(), __func__, |
37 | read_c0_status()); | 28 | read_c0_status()); |
38 | if (!gic_wedgeb2bok) | ||
39 | spin_lock_irqsave(&gic_wedgeb2b_lock, flags); | ||
40 | GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), 0x80000000 | intr); | 29 | GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), 0x80000000 | intr); |
41 | if (!gic_wedgeb2bok) { | ||
42 | (void) GIC_REG(SHARED, GIC_SH_CONFIG); | ||
43 | spin_unlock_irqrestore(&gic_wedgeb2b_lock, flags); | ||
44 | } | ||
45 | } | 30 | } |
46 | 31 | ||
47 | /* This is Malta specific and needs to be exported */ | 32 | /* This is Malta specific and needs to be exported */ |
48 | static void vpe_local_setup(unsigned int numvpes) | 33 | static void __init vpe_local_setup(unsigned int numvpes) |
49 | { | 34 | { |
50 | int i; | 35 | int i; |
51 | unsigned long timer_interrupt = 5, perf_interrupt = 5; | 36 | unsigned long timer_interrupt = 5, perf_interrupt = 5; |
@@ -105,44 +90,34 @@ unsigned int gic_get_int(void) | |||
105 | 90 | ||
106 | static unsigned int gic_irq_startup(unsigned int irq) | 91 | static unsigned int gic_irq_startup(unsigned int irq) |
107 | { | 92 | { |
108 | pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq); | ||
109 | irq -= _irqbase; | 93 | irq -= _irqbase; |
110 | GIC_SET_INTR_MASK(irq, 1); | 94 | pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq); |
95 | GIC_SET_INTR_MASK(irq); | ||
111 | return 0; | 96 | return 0; |
112 | } | 97 | } |
113 | 98 | ||
114 | static void gic_irq_ack(unsigned int irq) | 99 | static void gic_irq_ack(unsigned int irq) |
115 | { | 100 | { |
116 | #if gic_wedgeb2bok == 0 | ||
117 | unsigned long flags; | ||
118 | #endif | ||
119 | pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq); | ||
120 | irq -= _irqbase; | 101 | irq -= _irqbase; |
121 | GIC_CLR_INTR_MASK(irq, 1); | 102 | pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq); |
103 | GIC_CLR_INTR_MASK(irq); | ||
122 | 104 | ||
123 | if (_intrmap[irq].trigtype == GIC_TRIG_EDGE) { | 105 | if (gic_irq_flags[irq] & GIC_IRQ_FLAG_EDGE) |
124 | if (!gic_wedgeb2bok) | ||
125 | spin_lock_irqsave(&gic_wedgeb2b_lock, flags); | ||
126 | GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), irq); | 106 | GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), irq); |
127 | if (!gic_wedgeb2bok) { | ||
128 | (void) GIC_REG(SHARED, GIC_SH_CONFIG); | ||
129 | spin_unlock_irqrestore(&gic_wedgeb2b_lock, flags); | ||
130 | } | ||
131 | } | ||
132 | } | 107 | } |
133 | 108 | ||
134 | static void gic_mask_irq(unsigned int irq) | 109 | static void gic_mask_irq(unsigned int irq) |
135 | { | 110 | { |
136 | pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq); | ||
137 | irq -= _irqbase; | 111 | irq -= _irqbase; |
138 | GIC_CLR_INTR_MASK(irq, 1); | 112 | pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq); |
113 | GIC_CLR_INTR_MASK(irq); | ||
139 | } | 114 | } |
140 | 115 | ||
141 | static void gic_unmask_irq(unsigned int irq) | 116 | static void gic_unmask_irq(unsigned int irq) |
142 | { | 117 | { |
143 | pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq); | ||
144 | irq -= _irqbase; | 118 | irq -= _irqbase; |
145 | GIC_SET_INTR_MASK(irq, 1); | 119 | pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq); |
120 | GIC_SET_INTR_MASK(irq); | ||
146 | } | 121 | } |
147 | 122 | ||
148 | #ifdef CONFIG_SMP | 123 | #ifdef CONFIG_SMP |
@@ -155,9 +130,8 @@ static int gic_set_affinity(unsigned int irq, const struct cpumask *cpumask) | |||
155 | unsigned long flags; | 130 | unsigned long flags; |
156 | int i; | 131 | int i; |
157 | 132 | ||
158 | pr_debug(KERN_DEBUG "%s called\n", __func__); | ||
159 | irq -= _irqbase; | 133 | irq -= _irqbase; |
160 | 134 | pr_debug(KERN_DEBUG "%s(%d) called\n", __func__, irq); | |
161 | cpumask_and(&tmp, cpumask, cpu_online_mask); | 135 | cpumask_and(&tmp, cpumask, cpu_online_mask); |
162 | if (cpus_empty(tmp)) | 136 | if (cpus_empty(tmp)) |
163 | return -1; | 137 | return -1; |
@@ -168,13 +142,6 @@ static int gic_set_affinity(unsigned int irq, const struct cpumask *cpumask) | |||
168 | /* Re-route this IRQ */ | 142 | /* Re-route this IRQ */ |
169 | GIC_SH_MAP_TO_VPE_SMASK(irq, first_cpu(tmp)); | 143 | GIC_SH_MAP_TO_VPE_SMASK(irq, first_cpu(tmp)); |
170 | 144 | ||
171 | /* | ||
172 | * FIXME: assumption that _intrmap is ordered and has no holes | ||
173 | */ | ||
174 | |||
175 | /* Update the intr_map */ | ||
176 | _intrmap[irq].cpunum = first_cpu(tmp); | ||
177 | |||
178 | /* Update the pcpu_masks */ | 145 | /* Update the pcpu_masks */ |
179 | for (i = 0; i < NR_CPUS; i++) | 146 | for (i = 0; i < NR_CPUS; i++) |
180 | clear_bit(irq, pcpu_masks[i].pcpu_mask); | 147 | clear_bit(irq, pcpu_masks[i].pcpu_mask); |
@@ -201,8 +168,9 @@ static struct irq_chip gic_irq_controller = { | |||
201 | #endif | 168 | #endif |
202 | }; | 169 | }; |
203 | 170 | ||
204 | static void __init setup_intr(unsigned int intr, unsigned int cpu, | 171 | static void __init gic_setup_intr(unsigned int intr, unsigned int cpu, |
205 | unsigned int pin, unsigned int polarity, unsigned int trigtype) | 172 | unsigned int pin, unsigned int polarity, unsigned int trigtype, |
173 | unsigned int flags) | ||
206 | { | 174 | { |
207 | /* Setup Intr to Pin mapping */ | 175 | /* Setup Intr to Pin mapping */ |
208 | if (pin & GIC_MAP_TO_NMI_MSK) { | 176 | if (pin & GIC_MAP_TO_NMI_MSK) { |
@@ -227,38 +195,43 @@ static void __init setup_intr(unsigned int intr, unsigned int cpu, | |||
227 | GIC_SET_TRIGGER(intr, trigtype); | 195 | GIC_SET_TRIGGER(intr, trigtype); |
228 | 196 | ||
229 | /* Init Intr Masks */ | 197 | /* Init Intr Masks */ |
230 | GIC_SET_INTR_MASK(intr, 0); | 198 | GIC_CLR_INTR_MASK(intr); |
199 | /* Initialise per-cpu Interrupt software masks */ | ||
200 | if (flags & GIC_FLAG_IPI) | ||
201 | set_bit(intr, pcpu_masks[cpu].pcpu_mask); | ||
202 | if (flags & GIC_FLAG_TRANSPARENT) | ||
203 | GIC_SET_INTR_MASK(intr); | ||
204 | if (trigtype == GIC_TRIG_EDGE) | ||
205 | gic_irq_flags[intr] |= GIC_IRQ_FLAG_EDGE; | ||
231 | } | 206 | } |
232 | 207 | ||
233 | static void __init gic_basic_init(void) | 208 | static void __init gic_basic_init(int numintrs, int numvpes, |
209 | struct gic_intr_map *intrmap, int mapsize) | ||
234 | { | 210 | { |
235 | unsigned int i, cpu; | 211 | unsigned int i, cpu; |
236 | 212 | ||
237 | /* Setup defaults */ | 213 | /* Setup defaults */ |
238 | for (i = 0; i < GIC_NUM_INTRS; i++) { | 214 | for (i = 0; i < numintrs; i++) { |
239 | GIC_SET_POLARITY(i, GIC_POL_POS); | 215 | GIC_SET_POLARITY(i, GIC_POL_POS); |
240 | GIC_SET_TRIGGER(i, GIC_TRIG_LEVEL); | 216 | GIC_SET_TRIGGER(i, GIC_TRIG_LEVEL); |
241 | GIC_SET_INTR_MASK(i, 0); | 217 | GIC_CLR_INTR_MASK(i); |
218 | if (i < GIC_NUM_INTRS) | ||
219 | gic_irq_flags[i] = 0; | ||
242 | } | 220 | } |
243 | 221 | ||
244 | /* Setup specifics */ | 222 | /* Setup specifics */ |
245 | for (i = 0; i < _mapsize; i++) { | 223 | for (i = 0; i < mapsize; i++) { |
246 | cpu = _intrmap[i].cpunum; | 224 | cpu = intrmap[i].cpunum; |
247 | if (cpu == X) | 225 | if (cpu == X) |
248 | continue; | 226 | continue; |
249 | 227 | if (cpu == 0 && i != 0 && intrmap[i].flags == 0) | |
250 | if (cpu == 0 && i != 0 && _intrmap[i].intrnum == 0 && | ||
251 | _intrmap[i].ipiflag == 0) | ||
252 | continue; | 228 | continue; |
253 | 229 | gic_setup_intr(i, | |
254 | setup_intr(_intrmap[i].intrnum, | 230 | intrmap[i].cpunum, |
255 | _intrmap[i].cpunum, | 231 | intrmap[i].pin, |
256 | _intrmap[i].pin, | 232 | intrmap[i].polarity, |
257 | _intrmap[i].polarity, | 233 | intrmap[i].trigtype, |
258 | _intrmap[i].trigtype); | 234 | intrmap[i].flags); |
259 | /* Initialise per-cpu Interrupt software masks */ | ||
260 | if (_intrmap[i].ipiflag) | ||
261 | set_bit(_intrmap[i].intrnum, pcpu_masks[cpu].pcpu_mask); | ||
262 | } | 235 | } |
263 | 236 | ||
264 | vpe_local_setup(numvpes); | 237 | vpe_local_setup(numvpes); |
@@ -273,12 +246,11 @@ void __init gic_init(unsigned long gic_base_addr, | |||
273 | unsigned int irqbase) | 246 | unsigned int irqbase) |
274 | { | 247 | { |
275 | unsigned int gicconfig; | 248 | unsigned int gicconfig; |
249 | int numvpes, numintrs; | ||
276 | 250 | ||
277 | _gic_base = (unsigned long) ioremap_nocache(gic_base_addr, | 251 | _gic_base = (unsigned long) ioremap_nocache(gic_base_addr, |
278 | gic_addrspace_size); | 252 | gic_addrspace_size); |
279 | _irqbase = irqbase; | 253 | _irqbase = irqbase; |
280 | _intrmap = intr_map; | ||
281 | _mapsize = intr_map_size; | ||
282 | 254 | ||
283 | GICREAD(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig); | 255 | GICREAD(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig); |
284 | numintrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >> | 256 | numintrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >> |
@@ -290,5 +262,5 @@ void __init gic_init(unsigned long gic_base_addr, | |||
290 | 262 | ||
291 | pr_debug("%s called\n", __func__); | 263 | pr_debug("%s called\n", __func__); |
292 | 264 | ||
293 | gic_basic_init(); | 265 | gic_basic_init(numintrs, numvpes, intr_map, intr_map_size); |
294 | } | 266 | } |
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c index 7b845ba9dff4..981f86c26168 100644 --- a/arch/mips/kernel/irq.c +++ b/arch/mips/kernel/irq.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/seq_file.h> | 22 | #include <linux/seq_file.h> |
23 | #include <linux/kallsyms.h> | 23 | #include <linux/kallsyms.h> |
24 | #include <linux/kgdb.h> | 24 | #include <linux/kgdb.h> |
25 | #include <linux/ftrace.h> | ||
25 | 26 | ||
26 | #include <asm/atomic.h> | 27 | #include <asm/atomic.h> |
27 | #include <asm/system.h> | 28 | #include <asm/system.h> |
@@ -99,7 +100,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
99 | } | 100 | } |
100 | 101 | ||
101 | if (i < NR_IRQS) { | 102 | if (i < NR_IRQS) { |
102 | spin_lock_irqsave(&irq_desc[i].lock, flags); | 103 | raw_spin_lock_irqsave(&irq_desc[i].lock, flags); |
103 | action = irq_desc[i].action; | 104 | action = irq_desc[i].action; |
104 | if (!action) | 105 | if (!action) |
105 | goto skip; | 106 | goto skip; |
@@ -118,7 +119,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
118 | 119 | ||
119 | seq_putc(p, '\n'); | 120 | seq_putc(p, '\n'); |
120 | skip: | 121 | skip: |
121 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); | 122 | raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); |
122 | } else if (i == NR_IRQS) { | 123 | } else if (i == NR_IRQS) { |
123 | seq_putc(p, '\n'); | 124 | seq_putc(p, '\n'); |
124 | seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); | 125 | seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); |
@@ -150,3 +151,32 @@ void __init init_IRQ(void) | |||
150 | kgdb_early_setup = 1; | 151 | kgdb_early_setup = 1; |
151 | #endif | 152 | #endif |
152 | } | 153 | } |
154 | |||
155 | /* | ||
156 | * do_IRQ handles all normal device IRQ's (the special | ||
157 | * SMP cross-CPU interrupts have their own specific | ||
158 | * handlers). | ||
159 | */ | ||
160 | void __irq_entry do_IRQ(unsigned int irq) | ||
161 | { | ||
162 | irq_enter(); | ||
163 | __DO_IRQ_SMTC_HOOK(irq); | ||
164 | generic_handle_irq(irq); | ||
165 | irq_exit(); | ||
166 | } | ||
167 | |||
168 | #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF | ||
169 | /* | ||
170 | * To avoid inefficient and in some cases pathological re-checking of | ||
171 | * IRQ affinity, we have this variant that skips the affinity check. | ||
172 | */ | ||
173 | |||
174 | void __irq_entry do_IRQ_no_affinity(unsigned int irq) | ||
175 | { | ||
176 | irq_enter(); | ||
177 | __NO_AFFINITY_IRQ_SMTC_HOOK(irq); | ||
178 | generic_handle_irq(irq); | ||
179 | irq_exit(); | ||
180 | } | ||
181 | |||
182 | #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ | ||
diff --git a/arch/mips/kernel/kspd.c b/arch/mips/kernel/kspd.c index ad4e017ed2f3..80e2ba694bab 100644 --- a/arch/mips/kernel/kspd.c +++ b/arch/mips/kernel/kspd.c | |||
@@ -82,6 +82,7 @@ static int sp_stopping; | |||
82 | #define MTSP_O_SHLOCK 0x0010 | 82 | #define MTSP_O_SHLOCK 0x0010 |
83 | #define MTSP_O_EXLOCK 0x0020 | 83 | #define MTSP_O_EXLOCK 0x0020 |
84 | #define MTSP_O_ASYNC 0x0040 | 84 | #define MTSP_O_ASYNC 0x0040 |
85 | /* XXX: check which of these is actually O_SYNC vs O_DSYNC */ | ||
85 | #define MTSP_O_FSYNC O_SYNC | 86 | #define MTSP_O_FSYNC O_SYNC |
86 | #define MTSP_O_NOFOLLOW 0x0100 | 87 | #define MTSP_O_NOFOLLOW 0x0100 |
87 | #define MTSP_O_SYNC 0x0080 | 88 | #define MTSP_O_SYNC 0x0080 |
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c index 6242bc68add7..f042563c924f 100644 --- a/arch/mips/kernel/linux32.c +++ b/arch/mips/kernel/linux32.c | |||
@@ -67,28 +67,13 @@ SYSCALL_DEFINE6(32_mmap2, unsigned long, addr, unsigned long, len, | |||
67 | unsigned long, prot, unsigned long, flags, unsigned long, fd, | 67 | unsigned long, prot, unsigned long, flags, unsigned long, fd, |
68 | unsigned long, pgoff) | 68 | unsigned long, pgoff) |
69 | { | 69 | { |
70 | struct file * file = NULL; | ||
71 | unsigned long error; | 70 | unsigned long error; |
72 | 71 | ||
73 | error = -EINVAL; | 72 | error = -EINVAL; |
74 | if (pgoff & (~PAGE_MASK >> 12)) | 73 | if (pgoff & (~PAGE_MASK >> 12)) |
75 | goto out; | 74 | goto out; |
76 | pgoff >>= PAGE_SHIFT-12; | 75 | error = sys_mmap_pgoff(addr, len, prot, flags, fd, |
77 | 76 | pgoff >> (PAGE_SHIFT-12)); | |
78 | if (!(flags & MAP_ANONYMOUS)) { | ||
79 | error = -EBADF; | ||
80 | file = fget(fd); | ||
81 | if (!file) | ||
82 | goto out; | ||
83 | } | ||
84 | flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
85 | |||
86 | down_write(¤t->mm->mmap_sem); | ||
87 | error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); | ||
88 | up_write(¤t->mm->mmap_sem); | ||
89 | if (file) | ||
90 | fput(file); | ||
91 | |||
92 | out: | 77 | out: |
93 | return error; | 78 | return error; |
94 | } | 79 | } |
@@ -265,67 +250,6 @@ SYSCALL_DEFINE5(n32_msgrcv, int, msqid, u32, msgp, size_t, msgsz, | |||
265 | } | 250 | } |
266 | #endif | 251 | #endif |
267 | 252 | ||
268 | struct sysctl_args32 | ||
269 | { | ||
270 | compat_caddr_t name; | ||
271 | int nlen; | ||
272 | compat_caddr_t oldval; | ||
273 | compat_caddr_t oldlenp; | ||
274 | compat_caddr_t newval; | ||
275 | compat_size_t newlen; | ||
276 | unsigned int __unused[4]; | ||
277 | }; | ||
278 | |||
279 | #ifdef CONFIG_SYSCTL_SYSCALL | ||
280 | |||
281 | SYSCALL_DEFINE1(32_sysctl, struct sysctl_args32 __user *, args) | ||
282 | { | ||
283 | struct sysctl_args32 tmp; | ||
284 | int error; | ||
285 | size_t oldlen; | ||
286 | size_t __user *oldlenp = NULL; | ||
287 | unsigned long addr = (((unsigned long)&args->__unused[0]) + 7) & ~7; | ||
288 | |||
289 | if (copy_from_user(&tmp, args, sizeof(tmp))) | ||
290 | return -EFAULT; | ||
291 | |||
292 | if (tmp.oldval && tmp.oldlenp) { | ||
293 | /* Duh, this is ugly and might not work if sysctl_args | ||
294 | is in read-only memory, but do_sysctl does indirectly | ||
295 | a lot of uaccess in both directions and we'd have to | ||
296 | basically copy the whole sysctl.c here, and | ||
297 | glibc's __sysctl uses rw memory for the structure | ||
298 | anyway. */ | ||
299 | if (get_user(oldlen, (u32 __user *)A(tmp.oldlenp)) || | ||
300 | put_user(oldlen, (size_t __user *)addr)) | ||
301 | return -EFAULT; | ||
302 | oldlenp = (size_t __user *)addr; | ||
303 | } | ||
304 | |||
305 | lock_kernel(); | ||
306 | error = do_sysctl((int __user *)A(tmp.name), tmp.nlen, (void __user *)A(tmp.oldval), | ||
307 | oldlenp, (void __user *)A(tmp.newval), tmp.newlen); | ||
308 | unlock_kernel(); | ||
309 | if (oldlenp) { | ||
310 | if (!error) { | ||
311 | if (get_user(oldlen, (size_t __user *)addr) || | ||
312 | put_user(oldlen, (u32 __user *)A(tmp.oldlenp))) | ||
313 | error = -EFAULT; | ||
314 | } | ||
315 | copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)); | ||
316 | } | ||
317 | return error; | ||
318 | } | ||
319 | |||
320 | #else | ||
321 | |||
322 | SYSCALL_DEFINE1(32_sysctl, struct sysctl_args32 __user *, args) | ||
323 | { | ||
324 | return -ENOSYS; | ||
325 | } | ||
326 | |||
327 | #endif /* CONFIG_SYSCTL_SYSCALL */ | ||
328 | |||
329 | SYSCALL_DEFINE1(32_newuname, struct new_utsname __user *, name) | 253 | SYSCALL_DEFINE1(32_newuname, struct new_utsname __user *, name) |
330 | { | 254 | { |
331 | int ret = 0; | 255 | int ret = 0; |
@@ -428,3 +352,9 @@ _sys32_clone(nabi_no_regargs struct pt_regs regs) | |||
428 | return do_fork(clone_flags, newsp, ®s, 0, | 352 | return do_fork(clone_flags, newsp, ®s, 0, |
429 | parent_tidptr, child_tidptr); | 353 | parent_tidptr, child_tidptr); |
430 | } | 354 | } |
355 | |||
356 | asmlinkage long sys32_lookup_dcookie(u32 a0, u32 a1, char __user *buf, | ||
357 | size_t len) | ||
358 | { | ||
359 | return sys_lookup_dcookie(merge_64(a0, a1), buf, len); | ||
360 | } | ||
diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S new file mode 100644 index 000000000000..0a9cfdb271dd --- /dev/null +++ b/arch/mips/kernel/mcount.S | |||
@@ -0,0 +1,189 @@ | |||
1 | /* | ||
2 | * MIPS specific _mcount support | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License. See the file "COPYING" in the main directory of this archive for | ||
6 | * more details. | ||
7 | * | ||
8 | * Copyright (C) 2009 Lemote Inc. & DSLab, Lanzhou University, China | ||
9 | * Author: Wu Zhangjin <wuzj@lemote.com> | ||
10 | */ | ||
11 | |||
12 | #include <asm/regdef.h> | ||
13 | #include <asm/stackframe.h> | ||
14 | #include <asm/ftrace.h> | ||
15 | |||
16 | .text | ||
17 | .set noreorder | ||
18 | .set noat | ||
19 | |||
20 | .macro MCOUNT_SAVE_REGS | ||
21 | PTR_SUBU sp, PT_SIZE | ||
22 | PTR_S ra, PT_R31(sp) | ||
23 | PTR_S AT, PT_R1(sp) | ||
24 | PTR_S a0, PT_R4(sp) | ||
25 | PTR_S a1, PT_R5(sp) | ||
26 | PTR_S a2, PT_R6(sp) | ||
27 | PTR_S a3, PT_R7(sp) | ||
28 | #ifdef CONFIG_64BIT | ||
29 | PTR_S a4, PT_R8(sp) | ||
30 | PTR_S a5, PT_R9(sp) | ||
31 | PTR_S a6, PT_R10(sp) | ||
32 | PTR_S a7, PT_R11(sp) | ||
33 | #endif | ||
34 | .endm | ||
35 | |||
36 | .macro MCOUNT_RESTORE_REGS | ||
37 | PTR_L ra, PT_R31(sp) | ||
38 | PTR_L AT, PT_R1(sp) | ||
39 | PTR_L a0, PT_R4(sp) | ||
40 | PTR_L a1, PT_R5(sp) | ||
41 | PTR_L a2, PT_R6(sp) | ||
42 | PTR_L a3, PT_R7(sp) | ||
43 | #ifdef CONFIG_64BIT | ||
44 | PTR_L a4, PT_R8(sp) | ||
45 | PTR_L a5, PT_R9(sp) | ||
46 | PTR_L a6, PT_R10(sp) | ||
47 | PTR_L a7, PT_R11(sp) | ||
48 | #endif | ||
49 | #ifdef CONFIG_64BIT | ||
50 | PTR_ADDIU sp, PT_SIZE | ||
51 | #else | ||
52 | PTR_ADDIU sp, (PT_SIZE + 8) | ||
53 | #endif | ||
54 | .endm | ||
55 | |||
56 | .macro RETURN_BACK | ||
57 | jr ra | ||
58 | move ra, AT | ||
59 | .endm | ||
60 | |||
61 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
62 | |||
63 | NESTED(ftrace_caller, PT_SIZE, ra) | ||
64 | .globl _mcount | ||
65 | _mcount: | ||
66 | b ftrace_stub | ||
67 | nop | ||
68 | lw t1, function_trace_stop | ||
69 | bnez t1, ftrace_stub | ||
70 | nop | ||
71 | |||
72 | MCOUNT_SAVE_REGS | ||
73 | #ifdef KBUILD_MCOUNT_RA_ADDRESS | ||
74 | PTR_S t0, PT_R12(sp) /* t0 saved the location of the return address(at) by -mmcount-ra-address */ | ||
75 | #endif | ||
76 | |||
77 | move a0, ra /* arg1: next ip, selfaddr */ | ||
78 | .globl ftrace_call | ||
79 | ftrace_call: | ||
80 | nop /* a placeholder for the call to a real tracing function */ | ||
81 | move a1, AT /* arg2: the caller's next ip, parent */ | ||
82 | |||
83 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
84 | .globl ftrace_graph_call | ||
85 | ftrace_graph_call: | ||
86 | nop | ||
87 | nop | ||
88 | #endif | ||
89 | |||
90 | MCOUNT_RESTORE_REGS | ||
91 | .globl ftrace_stub | ||
92 | ftrace_stub: | ||
93 | RETURN_BACK | ||
94 | END(ftrace_caller) | ||
95 | |||
96 | #else /* ! CONFIG_DYNAMIC_FTRACE */ | ||
97 | |||
98 | NESTED(_mcount, PT_SIZE, ra) | ||
99 | lw t1, function_trace_stop | ||
100 | bnez t1, ftrace_stub | ||
101 | nop | ||
102 | PTR_LA t1, ftrace_stub | ||
103 | PTR_L t2, ftrace_trace_function /* Prepare t2 for (1) */ | ||
104 | bne t1, t2, static_trace | ||
105 | nop | ||
106 | |||
107 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
108 | PTR_L t3, ftrace_graph_return | ||
109 | bne t1, t3, ftrace_graph_caller | ||
110 | nop | ||
111 | PTR_LA t1, ftrace_graph_entry_stub | ||
112 | PTR_L t3, ftrace_graph_entry | ||
113 | bne t1, t3, ftrace_graph_caller | ||
114 | nop | ||
115 | #endif | ||
116 | b ftrace_stub | ||
117 | nop | ||
118 | |||
119 | static_trace: | ||
120 | MCOUNT_SAVE_REGS | ||
121 | |||
122 | move a0, ra /* arg1: next ip, selfaddr */ | ||
123 | jalr t2 /* (1) call *ftrace_trace_function */ | ||
124 | move a1, AT /* arg2: the caller's next ip, parent */ | ||
125 | |||
126 | MCOUNT_RESTORE_REGS | ||
127 | .globl ftrace_stub | ||
128 | ftrace_stub: | ||
129 | RETURN_BACK | ||
130 | END(_mcount) | ||
131 | |||
132 | #endif /* ! CONFIG_DYNAMIC_FTRACE */ | ||
133 | |||
134 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
135 | |||
136 | NESTED(ftrace_graph_caller, PT_SIZE, ra) | ||
137 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
138 | PTR_L a1, PT_R31(sp) /* load the original ra from the stack */ | ||
139 | #ifdef KBUILD_MCOUNT_RA_ADDRESS | ||
140 | PTR_L t0, PT_R12(sp) /* load the original t0 from the stack */ | ||
141 | #endif | ||
142 | #else | ||
143 | MCOUNT_SAVE_REGS | ||
144 | move a1, ra /* arg2: next ip, selfaddr */ | ||
145 | #endif | ||
146 | |||
147 | #ifdef KBUILD_MCOUNT_RA_ADDRESS | ||
148 | bnez t0, 1f /* non-leaf func: t0 saved the location of the return address */ | ||
149 | nop | ||
150 | PTR_LA t0, PT_R1(sp) /* leaf func: get the location of at(old ra) from our own stack */ | ||
151 | 1: move a0, t0 /* arg1: the location of the return address */ | ||
152 | #else | ||
153 | PTR_LA a0, PT_R1(sp) /* arg1: &AT -> a0 */ | ||
154 | #endif | ||
155 | jal prepare_ftrace_return | ||
156 | #ifdef CONFIG_FRAME_POINTER | ||
157 | move a2, fp /* arg3: frame pointer */ | ||
158 | #else | ||
159 | #ifdef CONFIG_64BIT | ||
160 | PTR_LA a2, PT_SIZE(sp) | ||
161 | #else | ||
162 | PTR_LA a2, (PT_SIZE+8)(sp) | ||
163 | #endif | ||
164 | #endif | ||
165 | |||
166 | MCOUNT_RESTORE_REGS | ||
167 | RETURN_BACK | ||
168 | END(ftrace_graph_caller) | ||
169 | |||
170 | .align 2 | ||
171 | .globl return_to_handler | ||
172 | return_to_handler: | ||
173 | PTR_SUBU sp, PT_SIZE | ||
174 | PTR_S v0, PT_R2(sp) | ||
175 | |||
176 | jal ftrace_return_to_handler | ||
177 | PTR_S v1, PT_R3(sp) | ||
178 | |||
179 | /* restore the real parent address: v0 -> ra */ | ||
180 | move ra, v0 | ||
181 | |||
182 | PTR_L v0, PT_R2(sp) | ||
183 | PTR_L v1, PT_R3(sp) | ||
184 | jr ra | ||
185 | PTR_ADDIU sp, PT_SIZE | ||
186 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | ||
187 | |||
188 | .set at | ||
189 | .set reorder | ||
diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c index 225755d0c1f6..1d04807874db 100644 --- a/arch/mips/kernel/mips_ksyms.c +++ b/arch/mips/kernel/mips_ksyms.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <asm/checksum.h> | 13 | #include <asm/checksum.h> |
14 | #include <asm/pgtable.h> | 14 | #include <asm/pgtable.h> |
15 | #include <asm/uaccess.h> | 15 | #include <asm/uaccess.h> |
16 | #include <asm/ftrace.h> | ||
16 | 17 | ||
17 | extern void *__bzero(void *__s, size_t __count); | 18 | extern void *__bzero(void *__s, size_t __count); |
18 | extern long __strncpy_from_user_nocheck_asm(char *__to, | 19 | extern long __strncpy_from_user_nocheck_asm(char *__to, |
@@ -51,3 +52,7 @@ EXPORT_SYMBOL(csum_partial_copy_nocheck); | |||
51 | EXPORT_SYMBOL(__csum_partial_copy_user); | 52 | EXPORT_SYMBOL(__csum_partial_copy_user); |
52 | 53 | ||
53 | EXPORT_SYMBOL(invalid_pte_table); | 54 | EXPORT_SYMBOL(invalid_pte_table); |
55 | #ifdef CONFIG_FUNCTION_TRACER | ||
56 | /* _mcount is defined in arch/mips/kernel/mcount.S */ | ||
57 | EXPORT_SYMBOL(_mcount); | ||
58 | #endif | ||
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index fd2a9bb620d6..17202bbe843f 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S | |||
@@ -583,6 +583,7 @@ einval: li v0, -ENOSYS | |||
583 | sys sys_rt_tgsigqueueinfo 4 | 583 | sys sys_rt_tgsigqueueinfo 4 |
584 | sys sys_perf_event_open 5 | 584 | sys sys_perf_event_open 5 |
585 | sys sys_accept4 4 | 585 | sys sys_accept4 4 |
586 | sys sys_recvmmsg 5 | ||
586 | .endm | 587 | .endm |
587 | 588 | ||
588 | /* We pre-compute the number of _instruction_ bytes needed to | 589 | /* We pre-compute the number of _instruction_ bytes needed to |
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index 18bf7f32c5e4..a8a6c596eb04 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S | |||
@@ -420,4 +420,5 @@ sys_call_table: | |||
420 | PTR sys_rt_tgsigqueueinfo | 420 | PTR sys_rt_tgsigqueueinfo |
421 | PTR sys_perf_event_open | 421 | PTR sys_perf_event_open |
422 | PTR sys_accept4 | 422 | PTR sys_accept4 |
423 | PTR sys_recvmmsg | ||
423 | .size sys_call_table,.-sys_call_table | 424 | .size sys_call_table,.-sys_call_table |
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index 6ebc07976694..66b5a48676dd 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S | |||
@@ -272,7 +272,7 @@ EXPORT(sysn32_call_table) | |||
272 | PTR sys_munlockall | 272 | PTR sys_munlockall |
273 | PTR sys_vhangup /* 6150 */ | 273 | PTR sys_vhangup /* 6150 */ |
274 | PTR sys_pivot_root | 274 | PTR sys_pivot_root |
275 | PTR sys_32_sysctl | 275 | PTR compat_sys_sysctl |
276 | PTR sys_prctl | 276 | PTR sys_prctl |
277 | PTR compat_sys_adjtimex | 277 | PTR compat_sys_adjtimex |
278 | PTR compat_sys_setrlimit /* 6155 */ | 278 | PTR compat_sys_setrlimit /* 6155 */ |
@@ -418,4 +418,5 @@ EXPORT(sysn32_call_table) | |||
418 | PTR compat_sys_rt_tgsigqueueinfo /* 5295 */ | 418 | PTR compat_sys_rt_tgsigqueueinfo /* 5295 */ |
419 | PTR sys_perf_event_open | 419 | PTR sys_perf_event_open |
420 | PTR sys_accept4 | 420 | PTR sys_accept4 |
421 | PTR compat_sys_recvmmsg | ||
421 | .size sysn32_call_table,.-sysn32_call_table | 422 | .size sysn32_call_table,.-sysn32_call_table |
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 9bbf9775e0bd..515f9eab2b28 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S | |||
@@ -356,7 +356,7 @@ sys_call_table: | |||
356 | PTR sys_ni_syscall /* 4150 */ | 356 | PTR sys_ni_syscall /* 4150 */ |
357 | PTR sys_getsid | 357 | PTR sys_getsid |
358 | PTR sys_fdatasync | 358 | PTR sys_fdatasync |
359 | PTR sys_32_sysctl | 359 | PTR compat_sys_sysctl |
360 | PTR sys_mlock | 360 | PTR sys_mlock |
361 | PTR sys_munlock /* 4155 */ | 361 | PTR sys_munlock /* 4155 */ |
362 | PTR sys_mlockall | 362 | PTR sys_mlockall |
@@ -450,7 +450,7 @@ sys_call_table: | |||
450 | PTR sys_io_submit | 450 | PTR sys_io_submit |
451 | PTR sys_io_cancel /* 4245 */ | 451 | PTR sys_io_cancel /* 4245 */ |
452 | PTR sys_exit_group | 452 | PTR sys_exit_group |
453 | PTR sys_lookup_dcookie | 453 | PTR sys32_lookup_dcookie |
454 | PTR sys_epoll_create | 454 | PTR sys_epoll_create |
455 | PTR sys_epoll_ctl | 455 | PTR sys_epoll_ctl |
456 | PTR sys_epoll_wait /* 4250 */ | 456 | PTR sys_epoll_wait /* 4250 */ |
@@ -505,7 +505,7 @@ sys_call_table: | |||
505 | PTR sys_fchmodat | 505 | PTR sys_fchmodat |
506 | PTR sys_faccessat /* 4300 */ | 506 | PTR sys_faccessat /* 4300 */ |
507 | PTR compat_sys_pselect6 | 507 | PTR compat_sys_pselect6 |
508 | PTR sys_ppoll | 508 | PTR compat_sys_ppoll |
509 | PTR sys_unshare | 509 | PTR sys_unshare |
510 | PTR sys_splice | 510 | PTR sys_splice |
511 | PTR sys32_sync_file_range /* 4305 */ | 511 | PTR sys32_sync_file_range /* 4305 */ |
@@ -538,4 +538,5 @@ sys_call_table: | |||
538 | PTR compat_sys_rt_tgsigqueueinfo | 538 | PTR compat_sys_rt_tgsigqueueinfo |
539 | PTR sys_perf_event_open | 539 | PTR sys_perf_event_open |
540 | PTR sys_accept4 | 540 | PTR sys_accept4 |
541 | PTR compat_sys_recvmmsg | ||
541 | .size sys_call_table,.-sys_call_table | 542 | .size sys_call_table,.-sys_call_table |
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 2b290d70083e..f9513f9e61d3 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c | |||
@@ -58,8 +58,12 @@ EXPORT_SYMBOL(mips_machtype); | |||
58 | 58 | ||
59 | struct boot_mem_map boot_mem_map; | 59 | struct boot_mem_map boot_mem_map; |
60 | 60 | ||
61 | static char command_line[CL_SIZE]; | 61 | static char __initdata command_line[COMMAND_LINE_SIZE]; |
62 | char arcs_cmdline[CL_SIZE]=CONFIG_CMDLINE; | 62 | char __initdata arcs_cmdline[COMMAND_LINE_SIZE]; |
63 | |||
64 | #ifdef CONFIG_CMDLINE_BOOL | ||
65 | static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE; | ||
66 | #endif | ||
63 | 67 | ||
64 | /* | 68 | /* |
65 | * mips_io_port_base is the begin of the address space to which x86 style | 69 | * mips_io_port_base is the begin of the address space to which x86 style |
@@ -166,26 +170,8 @@ static unsigned long __init init_initrd(void) | |||
166 | * already set up initrd_start and initrd_end. In these cases | 170 | * already set up initrd_start and initrd_end. In these cases |
167 | * perfom sanity checks and use them if all looks good. | 171 | * perfom sanity checks and use them if all looks good. |
168 | */ | 172 | */ |
169 | if (!initrd_start || initrd_end <= initrd_start) { | 173 | if (!initrd_start || initrd_end <= initrd_start) |
170 | #ifdef CONFIG_PROBE_INITRD_HEADER | ||
171 | u32 *initrd_header; | ||
172 | |||
173 | /* | ||
174 | * See if initrd has been added to the kernel image by | ||
175 | * arch/mips/boot/addinitrd.c. In that case a header is | ||
176 | * prepended to initrd and is made up by 8 bytes. The first | ||
177 | * word is a magic number and the second one is the size of | ||
178 | * initrd. Initrd start must be page aligned in any cases. | ||
179 | */ | ||
180 | initrd_header = __va(PAGE_ALIGN(__pa_symbol(&_end) + 8)) - 8; | ||
181 | if (initrd_header[0] != 0x494E5244) | ||
182 | goto disable; | ||
183 | initrd_start = (unsigned long)(initrd_header + 2); | ||
184 | initrd_end = initrd_start + initrd_header[1]; | ||
185 | #else | ||
186 | goto disable; | 174 | goto disable; |
187 | #endif | ||
188 | } | ||
189 | 175 | ||
190 | if (initrd_start & ~PAGE_MASK) { | 176 | if (initrd_start & ~PAGE_MASK) { |
191 | pr_err("initrd start must be page aligned\n"); | 177 | pr_err("initrd start must be page aligned\n"); |
@@ -476,8 +462,20 @@ static void __init arch_mem_init(char **cmdline_p) | |||
476 | pr_info("Determined physical RAM map:\n"); | 462 | pr_info("Determined physical RAM map:\n"); |
477 | print_memory_map(); | 463 | print_memory_map(); |
478 | 464 | ||
479 | strlcpy(command_line, arcs_cmdline, sizeof(command_line)); | 465 | #ifdef CONFIG_CMDLINE_BOOL |
480 | strlcpy(boot_command_line, command_line, COMMAND_LINE_SIZE); | 466 | #ifdef CONFIG_CMDLINE_OVERRIDE |
467 | strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); | ||
468 | #else | ||
469 | if (builtin_cmdline[0]) { | ||
470 | strlcat(arcs_cmdline, " ", COMMAND_LINE_SIZE); | ||
471 | strlcat(arcs_cmdline, builtin_cmdline, COMMAND_LINE_SIZE); | ||
472 | } | ||
473 | strlcpy(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE); | ||
474 | #endif | ||
475 | #else | ||
476 | strlcpy(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE); | ||
477 | #endif | ||
478 | strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); | ||
481 | 479 | ||
482 | *cmdline_p = command_line; | 480 | *cmdline_p = command_line; |
483 | 481 | ||
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index 6254041b942f..d0c68b5d717b 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c | |||
@@ -35,6 +35,15 @@ | |||
35 | 35 | ||
36 | #include "signal-common.h" | 36 | #include "signal-common.h" |
37 | 37 | ||
38 | static int (*save_fp_context)(struct sigcontext __user *sc); | ||
39 | static int (*restore_fp_context)(struct sigcontext __user *sc); | ||
40 | |||
41 | extern asmlinkage int _save_fp_context(struct sigcontext __user *sc); | ||
42 | extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc); | ||
43 | |||
44 | extern asmlinkage int fpu_emulator_save_context(struct sigcontext __user *sc); | ||
45 | extern asmlinkage int fpu_emulator_restore_context(struct sigcontext __user *sc); | ||
46 | |||
38 | /* | 47 | /* |
39 | * Horribly complicated - with the bloody RM9000 workarounds enabled | 48 | * Horribly complicated - with the bloody RM9000 workarounds enabled |
40 | * the signal trampolines is moving to the end of the structure so we can | 49 | * the signal trampolines is moving to the end of the structure so we can |
@@ -709,3 +718,40 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused, | |||
709 | key_replace_session_keyring(); | 718 | key_replace_session_keyring(); |
710 | } | 719 | } |
711 | } | 720 | } |
721 | |||
722 | #ifdef CONFIG_SMP | ||
723 | static int smp_save_fp_context(struct sigcontext __user *sc) | ||
724 | { | ||
725 | return raw_cpu_has_fpu | ||
726 | ? _save_fp_context(sc) | ||
727 | : fpu_emulator_save_context(sc); | ||
728 | } | ||
729 | |||
730 | static int smp_restore_fp_context(struct sigcontext __user *sc) | ||
731 | { | ||
732 | return raw_cpu_has_fpu | ||
733 | ? _restore_fp_context(sc) | ||
734 | : fpu_emulator_restore_context(sc); | ||
735 | } | ||
736 | #endif | ||
737 | |||
738 | static int signal_setup(void) | ||
739 | { | ||
740 | #ifdef CONFIG_SMP | ||
741 | /* For now just do the cpu_has_fpu check when the functions are invoked */ | ||
742 | save_fp_context = smp_save_fp_context; | ||
743 | restore_fp_context = smp_restore_fp_context; | ||
744 | #else | ||
745 | if (cpu_has_fpu) { | ||
746 | save_fp_context = _save_fp_context; | ||
747 | restore_fp_context = _restore_fp_context; | ||
748 | } else { | ||
749 | save_fp_context = fpu_emulator_save_context; | ||
750 | restore_fp_context = fpu_emulator_restore_context; | ||
751 | } | ||
752 | #endif | ||
753 | |||
754 | return 0; | ||
755 | } | ||
756 | |||
757 | arch_initcall(signal_setup); | ||
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c index 2e74075ac0ca..03abaf048f09 100644 --- a/arch/mips/kernel/signal32.c +++ b/arch/mips/kernel/signal32.c | |||
@@ -35,6 +35,15 @@ | |||
35 | 35 | ||
36 | #include "signal-common.h" | 36 | #include "signal-common.h" |
37 | 37 | ||
38 | static int (*save_fp_context32)(struct sigcontext32 __user *sc); | ||
39 | static int (*restore_fp_context32)(struct sigcontext32 __user *sc); | ||
40 | |||
41 | extern asmlinkage int _save_fp_context32(struct sigcontext32 __user *sc); | ||
42 | extern asmlinkage int _restore_fp_context32(struct sigcontext32 __user *sc); | ||
43 | |||
44 | extern asmlinkage int fpu_emulator_save_context32(struct sigcontext32 __user *sc); | ||
45 | extern asmlinkage int fpu_emulator_restore_context32(struct sigcontext32 __user *sc); | ||
46 | |||
38 | /* | 47 | /* |
39 | * Including <asm/unistd.h> would give use the 64-bit syscall numbers ... | 48 | * Including <asm/unistd.h> would give use the 64-bit syscall numbers ... |
40 | */ | 49 | */ |
@@ -828,3 +837,18 @@ SYSCALL_DEFINE5(32_waitid, int, which, compat_pid_t, pid, | |||
828 | info.si_code |= __SI_CHLD; | 837 | info.si_code |= __SI_CHLD; |
829 | return copy_siginfo_to_user32(uinfo, &info); | 838 | return copy_siginfo_to_user32(uinfo, &info); |
830 | } | 839 | } |
840 | |||
841 | static int signal32_init(void) | ||
842 | { | ||
843 | if (cpu_has_fpu) { | ||
844 | save_fp_context32 = _save_fp_context32; | ||
845 | restore_fp_context32 = _restore_fp_context32; | ||
846 | } else { | ||
847 | save_fp_context32 = fpu_emulator_save_context32; | ||
848 | restore_fp_context32 = fpu_emulator_restore_context32; | ||
849 | } | ||
850 | |||
851 | return 0; | ||
852 | } | ||
853 | |||
854 | arch_initcall(signal32_init); | ||
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index e72e6844d134..6cdca1956b77 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/cpumask.h> | 32 | #include <linux/cpumask.h> |
33 | #include <linux/cpu.h> | 33 | #include <linux/cpu.h> |
34 | #include <linux/err.h> | 34 | #include <linux/err.h> |
35 | #include <linux/ftrace.h> | ||
35 | 36 | ||
36 | #include <asm/atomic.h> | 37 | #include <asm/atomic.h> |
37 | #include <asm/cpu.h> | 38 | #include <asm/cpu.h> |
@@ -130,7 +131,7 @@ asmlinkage __cpuinit void start_secondary(void) | |||
130 | /* | 131 | /* |
131 | * Call into both interrupt handlers, as we share the IPI for them | 132 | * Call into both interrupt handlers, as we share the IPI for them |
132 | */ | 133 | */ |
133 | void smp_call_function_interrupt(void) | 134 | void __irq_entry smp_call_function_interrupt(void) |
134 | { | 135 | { |
135 | irq_enter(); | 136 | irq_enter(); |
136 | generic_smp_call_function_single_interrupt(); | 137 | generic_smp_call_function_single_interrupt(); |
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index 4d181df44a40..23499b5bd9c3 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/interrupt.h> | 25 | #include <linux/interrupt.h> |
26 | #include <linux/kernel_stat.h> | 26 | #include <linux/kernel_stat.h> |
27 | #include <linux/module.h> | 27 | #include <linux/module.h> |
28 | #include <linux/ftrace.h> | ||
28 | 29 | ||
29 | #include <asm/cpu.h> | 30 | #include <asm/cpu.h> |
30 | #include <asm/processor.h> | 31 | #include <asm/processor.h> |
@@ -75,7 +76,6 @@ unsigned long irq_hwmask[NR_IRQS]; | |||
75 | 76 | ||
76 | asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS]; | 77 | asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS]; |
77 | 78 | ||
78 | |||
79 | /* | 79 | /* |
80 | * Number of InterProcessor Interrupt (IPI) message buffers to allocate | 80 | * Number of InterProcessor Interrupt (IPI) message buffers to allocate |
81 | */ | 81 | */ |
@@ -388,6 +388,7 @@ void smtc_prepare_cpus(int cpus) | |||
388 | IPIQ[i].head = IPIQ[i].tail = NULL; | 388 | IPIQ[i].head = IPIQ[i].tail = NULL; |
389 | spin_lock_init(&IPIQ[i].lock); | 389 | spin_lock_init(&IPIQ[i].lock); |
390 | IPIQ[i].depth = 0; | 390 | IPIQ[i].depth = 0; |
391 | IPIQ[i].resched_flag = 0; /* No reschedules queued initially */ | ||
391 | } | 392 | } |
392 | 393 | ||
393 | /* cpu_data index starts at zero */ | 394 | /* cpu_data index starts at zero */ |
@@ -741,11 +742,24 @@ void smtc_forward_irq(unsigned int irq) | |||
741 | static void smtc_ipi_qdump(void) | 742 | static void smtc_ipi_qdump(void) |
742 | { | 743 | { |
743 | int i; | 744 | int i; |
745 | struct smtc_ipi *temp; | ||
744 | 746 | ||
745 | for (i = 0; i < NR_CPUS ;i++) { | 747 | for (i = 0; i < NR_CPUS ;i++) { |
746 | printk("IPIQ[%d]: head = 0x%x, tail = 0x%x, depth = %d\n", | 748 | pr_info("IPIQ[%d]: head = 0x%x, tail = 0x%x, depth = %d\n", |
747 | i, (unsigned)IPIQ[i].head, (unsigned)IPIQ[i].tail, | 749 | i, (unsigned)IPIQ[i].head, (unsigned)IPIQ[i].tail, |
748 | IPIQ[i].depth); | 750 | IPIQ[i].depth); |
751 | temp = IPIQ[i].head; | ||
752 | |||
753 | while (temp != IPIQ[i].tail) { | ||
754 | pr_debug("%d %d %d: ", temp->type, temp->dest, | ||
755 | (int)temp->arg); | ||
756 | #ifdef SMTC_IPI_DEBUG | ||
757 | pr_debug("%u %lu\n", temp->sender, temp->stamp); | ||
758 | #else | ||
759 | pr_debug("\n"); | ||
760 | #endif | ||
761 | temp = temp->flink; | ||
762 | } | ||
749 | } | 763 | } |
750 | } | 764 | } |
751 | 765 | ||
@@ -784,11 +798,16 @@ void smtc_send_ipi(int cpu, int type, unsigned int action) | |||
784 | int mtflags; | 798 | int mtflags; |
785 | unsigned long tcrestart; | 799 | unsigned long tcrestart; |
786 | extern void r4k_wait_irqoff(void), __pastwait(void); | 800 | extern void r4k_wait_irqoff(void), __pastwait(void); |
801 | int set_resched_flag = (type == LINUX_SMP_IPI && | ||
802 | action == SMP_RESCHEDULE_YOURSELF); | ||
787 | 803 | ||
788 | if (cpu == smp_processor_id()) { | 804 | if (cpu == smp_processor_id()) { |
789 | printk("Cannot Send IPI to self!\n"); | 805 | printk("Cannot Send IPI to self!\n"); |
790 | return; | 806 | return; |
791 | } | 807 | } |
808 | if (set_resched_flag && IPIQ[cpu].resched_flag != 0) | ||
809 | return; /* There is a reschedule queued already */ | ||
810 | |||
792 | /* Set up a descriptor, to be delivered either promptly or queued */ | 811 | /* Set up a descriptor, to be delivered either promptly or queued */ |
793 | pipi = smtc_ipi_dq(&freeIPIq); | 812 | pipi = smtc_ipi_dq(&freeIPIq); |
794 | if (pipi == NULL) { | 813 | if (pipi == NULL) { |
@@ -801,6 +820,7 @@ void smtc_send_ipi(int cpu, int type, unsigned int action) | |||
801 | pipi->dest = cpu; | 820 | pipi->dest = cpu; |
802 | if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { | 821 | if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { |
803 | /* If not on same VPE, enqueue and send cross-VPE interrupt */ | 822 | /* If not on same VPE, enqueue and send cross-VPE interrupt */ |
823 | IPIQ[cpu].resched_flag |= set_resched_flag; | ||
804 | smtc_ipi_nq(&IPIQ[cpu], pipi); | 824 | smtc_ipi_nq(&IPIQ[cpu], pipi); |
805 | LOCK_CORE_PRA(); | 825 | LOCK_CORE_PRA(); |
806 | settc(cpu_data[cpu].tc_id); | 826 | settc(cpu_data[cpu].tc_id); |
@@ -847,6 +867,7 @@ void smtc_send_ipi(int cpu, int type, unsigned int action) | |||
847 | */ | 867 | */ |
848 | write_tc_c0_tchalt(0); | 868 | write_tc_c0_tchalt(0); |
849 | UNLOCK_CORE_PRA(); | 869 | UNLOCK_CORE_PRA(); |
870 | IPIQ[cpu].resched_flag |= set_resched_flag; | ||
850 | smtc_ipi_nq(&IPIQ[cpu], pipi); | 871 | smtc_ipi_nq(&IPIQ[cpu], pipi); |
851 | } else { | 872 | } else { |
852 | postdirect: | 873 | postdirect: |
@@ -919,23 +940,29 @@ static void ipi_call_interrupt(void) | |||
919 | 940 | ||
920 | DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device); | 941 | DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device); |
921 | 942 | ||
922 | void ipi_decode(struct smtc_ipi *pipi) | 943 | static void __irq_entry smtc_clock_tick_interrupt(void) |
923 | { | 944 | { |
924 | unsigned int cpu = smp_processor_id(); | 945 | unsigned int cpu = smp_processor_id(); |
925 | struct clock_event_device *cd; | 946 | struct clock_event_device *cd; |
947 | int irq = MIPS_CPU_IRQ_BASE + 1; | ||
948 | |||
949 | irq_enter(); | ||
950 | kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq)); | ||
951 | cd = &per_cpu(mips_clockevent_device, cpu); | ||
952 | cd->event_handler(cd); | ||
953 | irq_exit(); | ||
954 | } | ||
955 | |||
956 | void ipi_decode(struct smtc_ipi *pipi) | ||
957 | { | ||
926 | void *arg_copy = pipi->arg; | 958 | void *arg_copy = pipi->arg; |
927 | int type_copy = pipi->type; | 959 | int type_copy = pipi->type; |
928 | int irq = MIPS_CPU_IRQ_BASE + 1; | ||
929 | 960 | ||
930 | smtc_ipi_nq(&freeIPIq, pipi); | 961 | smtc_ipi_nq(&freeIPIq, pipi); |
931 | 962 | ||
932 | switch (type_copy) { | 963 | switch (type_copy) { |
933 | case SMTC_CLOCK_TICK: | 964 | case SMTC_CLOCK_TICK: |
934 | irq_enter(); | 965 | smtc_clock_tick_interrupt(); |
935 | kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq)); | ||
936 | cd = &per_cpu(mips_clockevent_device, cpu); | ||
937 | cd->event_handler(cd); | ||
938 | irq_exit(); | ||
939 | break; | 966 | break; |
940 | 967 | ||
941 | case LINUX_SMP_IPI: | 968 | case LINUX_SMP_IPI: |
@@ -996,12 +1023,15 @@ void deferred_smtc_ipi(void) | |||
996 | * already enabled. | 1023 | * already enabled. |
997 | */ | 1024 | */ |
998 | local_irq_save(flags); | 1025 | local_irq_save(flags); |
999 | |||
1000 | spin_lock(&q->lock); | 1026 | spin_lock(&q->lock); |
1001 | pipi = __smtc_ipi_dq(q); | 1027 | pipi = __smtc_ipi_dq(q); |
1002 | spin_unlock(&q->lock); | 1028 | spin_unlock(&q->lock); |
1003 | if (pipi != NULL) | 1029 | if (pipi != NULL) { |
1030 | if (pipi->type == LINUX_SMP_IPI && | ||
1031 | (int)pipi->arg == SMP_RESCHEDULE_YOURSELF) | ||
1032 | IPIQ[cpu].resched_flag = 0; | ||
1004 | ipi_decode(pipi); | 1033 | ipi_decode(pipi); |
1034 | } | ||
1005 | /* | 1035 | /* |
1006 | * The use of the __raw_local restore isn't | 1036 | * The use of the __raw_local restore isn't |
1007 | * as obviously necessary here as in smtc_ipi_replay(), | 1037 | * as obviously necessary here as in smtc_ipi_replay(), |
@@ -1082,6 +1112,9 @@ static irqreturn_t ipi_interrupt(int irq, void *dev_idm) | |||
1082 | * with interrupts off | 1112 | * with interrupts off |
1083 | */ | 1113 | */ |
1084 | local_irq_save(flags); | 1114 | local_irq_save(flags); |
1115 | if (pipi->type == LINUX_SMP_IPI && | ||
1116 | (int)pipi->arg == SMP_RESCHEDULE_YOURSELF) | ||
1117 | IPIQ[cpu].resched_flag = 0; | ||
1085 | ipi_decode(pipi); | 1118 | ipi_decode(pipi); |
1086 | local_irq_restore(flags); | 1119 | local_irq_restore(flags); |
1087 | } | 1120 | } |
@@ -1305,7 +1338,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) | |||
1305 | if (!((asid += ASID_INC) & ASID_MASK) ) { | 1338 | if (!((asid += ASID_INC) & ASID_MASK) ) { |
1306 | if (cpu_has_vtag_icache) | 1339 | if (cpu_has_vtag_icache) |
1307 | flush_icache_all(); | 1340 | flush_icache_all(); |
1308 | /* Traverse all online CPUs (hack requires contigous range) */ | 1341 | /* Traverse all online CPUs (hack requires contiguous range) */ |
1309 | for_each_online_cpu(i) { | 1342 | for_each_online_cpu(i) { |
1310 | /* | 1343 | /* |
1311 | * We don't need to worry about our own CPU, nor those of | 1344 | * We don't need to worry about our own CPU, nor those of |
diff --git a/arch/mips/kernel/spram.c b/arch/mips/kernel/spram.c index 6ddb507a87ef..1821d12a6410 100644 --- a/arch/mips/kernel/spram.c +++ b/arch/mips/kernel/spram.c | |||
@@ -13,7 +13,6 @@ | |||
13 | #include <linux/ptrace.h> | 13 | #include <linux/ptrace.h> |
14 | #include <linux/stddef.h> | 14 | #include <linux/stddef.h> |
15 | 15 | ||
16 | #include <asm/cpu.h> | ||
17 | #include <asm/fpu.h> | 16 | #include <asm/fpu.h> |
18 | #include <asm/mipsregs.h> | 17 | #include <asm/mipsregs.h> |
19 | #include <asm/system.h> | 18 | #include <asm/system.h> |
@@ -198,8 +197,7 @@ static __cpuinit void probe_spram(char *type, | |||
198 | offset += 2 * SPRAM_TAG_STRIDE; | 197 | offset += 2 * SPRAM_TAG_STRIDE; |
199 | } | 198 | } |
200 | } | 199 | } |
201 | 200 | void __cpuinit spram_config(void) | |
202 | __cpuinit void spram_config(void) | ||
203 | { | 201 | { |
204 | struct cpuinfo_mips *c = ¤t_cpu_data; | 202 | struct cpuinfo_mips *c = ¤t_cpu_data; |
205 | unsigned int config0; | 203 | unsigned int config0; |
@@ -208,6 +206,7 @@ __cpuinit void spram_config(void) | |||
208 | case CPU_24K: | 206 | case CPU_24K: |
209 | case CPU_34K: | 207 | case CPU_34K: |
210 | case CPU_74K: | 208 | case CPU_74K: |
209 | case CPU_1004K: | ||
211 | config0 = read_c0_config(); | 210 | config0 = read_c0_config(); |
212 | /* FIXME: addresses are Malta specific */ | 211 | /* FIXME: addresses are Malta specific */ |
213 | if (config0 & (1<<24)) { | 212 | if (config0 & (1<<24)) { |
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index 3fe1fcfa2e73..3f7f466190b4 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c | |||
@@ -93,7 +93,8 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, | |||
93 | * We do not accept a shared mapping if it would violate | 93 | * We do not accept a shared mapping if it would violate |
94 | * cache aliasing constraints. | 94 | * cache aliasing constraints. |
95 | */ | 95 | */ |
96 | if ((flags & MAP_SHARED) && (addr & shm_align_mask)) | 96 | if ((flags & MAP_SHARED) && |
97 | ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) | ||
97 | return -EINVAL; | 98 | return -EINVAL; |
98 | return addr; | 99 | return addr; |
99 | } | 100 | } |
@@ -129,31 +130,6 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, | |||
129 | } | 130 | } |
130 | } | 131 | } |
131 | 132 | ||
132 | /* common code for old and new mmaps */ | ||
133 | static inline unsigned long | ||
134 | do_mmap2(unsigned long addr, unsigned long len, unsigned long prot, | ||
135 | unsigned long flags, unsigned long fd, unsigned long pgoff) | ||
136 | { | ||
137 | unsigned long error = -EBADF; | ||
138 | struct file * file = NULL; | ||
139 | |||
140 | flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
141 | if (!(flags & MAP_ANONYMOUS)) { | ||
142 | file = fget(fd); | ||
143 | if (!file) | ||
144 | goto out; | ||
145 | } | ||
146 | |||
147 | down_write(¤t->mm->mmap_sem); | ||
148 | error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); | ||
149 | up_write(¤t->mm->mmap_sem); | ||
150 | |||
151 | if (file) | ||
152 | fput(file); | ||
153 | out: | ||
154 | return error; | ||
155 | } | ||
156 | |||
157 | SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len, | 133 | SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len, |
158 | unsigned long, prot, unsigned long, flags, unsigned long, | 134 | unsigned long, prot, unsigned long, flags, unsigned long, |
159 | fd, off_t, offset) | 135 | fd, off_t, offset) |
@@ -164,7 +140,7 @@ SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len, | |||
164 | if (offset & ~PAGE_MASK) | 140 | if (offset & ~PAGE_MASK) |
165 | goto out; | 141 | goto out; |
166 | 142 | ||
167 | result = do_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT); | 143 | result = sys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT); |
168 | 144 | ||
169 | out: | 145 | out: |
170 | return result; | 146 | return result; |
@@ -177,7 +153,7 @@ SYSCALL_DEFINE6(mips_mmap2, unsigned long, addr, unsigned long, len, | |||
177 | if (pgoff & (~PAGE_MASK >> 12)) | 153 | if (pgoff & (~PAGE_MASK >> 12)) |
178 | return -EINVAL; | 154 | return -EINVAL; |
179 | 155 | ||
180 | return do_mmap2(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT-12)); | 156 | return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT-12)); |
181 | } | 157 | } |
182 | 158 | ||
183 | save_static_function(sys_fork); | 159 | save_static_function(sys_fork); |
@@ -306,6 +282,7 @@ static inline int mips_atomic_set(struct pt_regs *regs, | |||
306 | 282 | ||
307 | if (cpu_has_llsc && R10000_LLSC_WAR) { | 283 | if (cpu_has_llsc && R10000_LLSC_WAR) { |
308 | __asm__ __volatile__ ( | 284 | __asm__ __volatile__ ( |
285 | " .set mips3 \n" | ||
309 | " li %[err], 0 \n" | 286 | " li %[err], 0 \n" |
310 | "1: ll %[old], (%[addr]) \n" | 287 | "1: ll %[old], (%[addr]) \n" |
311 | " move %[tmp], %[new] \n" | 288 | " move %[tmp], %[new] \n" |
@@ -320,6 +297,7 @@ static inline int mips_atomic_set(struct pt_regs *regs, | |||
320 | " "STR(PTR)" 1b, 4b \n" | 297 | " "STR(PTR)" 1b, 4b \n" |
321 | " "STR(PTR)" 2b, 4b \n" | 298 | " "STR(PTR)" 2b, 4b \n" |
322 | " .previous \n" | 299 | " .previous \n" |
300 | " .set mips0 \n" | ||
323 | : [old] "=&r" (old), | 301 | : [old] "=&r" (old), |
324 | [err] "=&r" (err), | 302 | [err] "=&r" (err), |
325 | [tmp] "=&r" (tmp) | 303 | [tmp] "=&r" (tmp) |
@@ -329,6 +307,7 @@ static inline int mips_atomic_set(struct pt_regs *regs, | |||
329 | : "memory"); | 307 | : "memory"); |
330 | } else if (cpu_has_llsc) { | 308 | } else if (cpu_has_llsc) { |
331 | __asm__ __volatile__ ( | 309 | __asm__ __volatile__ ( |
310 | " .set mips3 \n" | ||
332 | " li %[err], 0 \n" | 311 | " li %[err], 0 \n" |
333 | "1: ll %[old], (%[addr]) \n" | 312 | "1: ll %[old], (%[addr]) \n" |
334 | " move %[tmp], %[new] \n" | 313 | " move %[tmp], %[new] \n" |
@@ -347,6 +326,7 @@ static inline int mips_atomic_set(struct pt_regs *regs, | |||
347 | " "STR(PTR)" 1b, 5b \n" | 326 | " "STR(PTR)" 1b, 5b \n" |
348 | " "STR(PTR)" 2b, 5b \n" | 327 | " "STR(PTR)" 2b, 5b \n" |
349 | " .previous \n" | 328 | " .previous \n" |
329 | " .set mips0 \n" | ||
350 | : [old] "=&r" (old), | 330 | : [old] "=&r" (old), |
351 | [err] "=&r" (err), | 331 | [err] "=&r" (err), |
352 | [tmp] "=&r" (tmp) | 332 | [tmp] "=&r" (tmp) |
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c index 1f467d534642..fb7497405510 100644 --- a/arch/mips/kernel/time.c +++ b/arch/mips/kernel/time.c | |||
@@ -71,39 +71,6 @@ EXPORT_SYMBOL(perf_irq); | |||
71 | 71 | ||
72 | unsigned int mips_hpt_frequency; | 72 | unsigned int mips_hpt_frequency; |
73 | 73 | ||
74 | void __init clocksource_set_clock(struct clocksource *cs, unsigned int clock) | ||
75 | { | ||
76 | u64 temp; | ||
77 | u32 shift; | ||
78 | |||
79 | /* Find a shift value */ | ||
80 | for (shift = 32; shift > 0; shift--) { | ||
81 | temp = (u64) NSEC_PER_SEC << shift; | ||
82 | do_div(temp, clock); | ||
83 | if ((temp >> 32) == 0) | ||
84 | break; | ||
85 | } | ||
86 | cs->shift = shift; | ||
87 | cs->mult = (u32) temp; | ||
88 | } | ||
89 | |||
90 | void __cpuinit clockevent_set_clock(struct clock_event_device *cd, | ||
91 | unsigned int clock) | ||
92 | { | ||
93 | u64 temp; | ||
94 | u32 shift; | ||
95 | |||
96 | /* Find a shift value */ | ||
97 | for (shift = 32; shift > 0; shift--) { | ||
98 | temp = (u64) clock << shift; | ||
99 | do_div(temp, NSEC_PER_SEC); | ||
100 | if ((temp >> 32) == 0) | ||
101 | break; | ||
102 | } | ||
103 | cd->shift = shift; | ||
104 | cd->mult = (u32) temp; | ||
105 | } | ||
106 | |||
107 | /* | 74 | /* |
108 | * This function exists in order to cause an error due to a duplicate | 75 | * This function exists in order to cause an error due to a duplicate |
109 | * definition if platform code should have its own implementation. The hook | 76 | * definition if platform code should have its own implementation. The hook |
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 0a18b4c62afb..31b204b26ba0 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c | |||
@@ -25,10 +25,12 @@ | |||
25 | #include <linux/ptrace.h> | 25 | #include <linux/ptrace.h> |
26 | #include <linux/kgdb.h> | 26 | #include <linux/kgdb.h> |
27 | #include <linux/kdebug.h> | 27 | #include <linux/kdebug.h> |
28 | #include <linux/notifier.h> | ||
28 | 29 | ||
29 | #include <asm/bootinfo.h> | 30 | #include <asm/bootinfo.h> |
30 | #include <asm/branch.h> | 31 | #include <asm/branch.h> |
31 | #include <asm/break.h> | 32 | #include <asm/break.h> |
33 | #include <asm/cop2.h> | ||
32 | #include <asm/cpu.h> | 34 | #include <asm/cpu.h> |
33 | #include <asm/dsp.h> | 35 | #include <asm/dsp.h> |
34 | #include <asm/fpu.h> | 36 | #include <asm/fpu.h> |
@@ -79,10 +81,6 @@ extern asmlinkage void handle_reserved(void); | |||
79 | extern int fpu_emulator_cop1Handler(struct pt_regs *xcp, | 81 | extern int fpu_emulator_cop1Handler(struct pt_regs *xcp, |
80 | struct mips_fpu_struct *ctx, int has_fpu); | 82 | struct mips_fpu_struct *ctx, int has_fpu); |
81 | 83 | ||
82 | #ifdef CONFIG_CPU_CAVIUM_OCTEON | ||
83 | extern asmlinkage void octeon_cop2_restore(struct octeon_cop2_state *task); | ||
84 | #endif | ||
85 | |||
86 | void (*board_be_init)(void); | 84 | void (*board_be_init)(void); |
87 | int (*board_be_handler)(struct pt_regs *regs, int is_fixup); | 85 | int (*board_be_handler)(struct pt_regs *regs, int is_fixup); |
88 | void (*board_nmi_handler_setup)(void); | 86 | void (*board_nmi_handler_setup)(void); |
@@ -857,6 +855,44 @@ static void mt_ase_fp_affinity(void) | |||
857 | #endif /* CONFIG_MIPS_MT_FPAFF */ | 855 | #endif /* CONFIG_MIPS_MT_FPAFF */ |
858 | } | 856 | } |
859 | 857 | ||
858 | /* | ||
859 | * No lock; only written during early bootup by CPU 0. | ||
860 | */ | ||
861 | static RAW_NOTIFIER_HEAD(cu2_chain); | ||
862 | |||
863 | int __ref register_cu2_notifier(struct notifier_block *nb) | ||
864 | { | ||
865 | return raw_notifier_chain_register(&cu2_chain, nb); | ||
866 | } | ||
867 | |||
868 | int cu2_notifier_call_chain(unsigned long val, void *v) | ||
869 | { | ||
870 | return raw_notifier_call_chain(&cu2_chain, val, v); | ||
871 | } | ||
872 | |||
873 | static int default_cu2_call(struct notifier_block *nfb, unsigned long action, | ||
874 | void *data) | ||
875 | { | ||
876 | struct pt_regs *regs = data; | ||
877 | |||
878 | switch (action) { | ||
879 | default: | ||
880 | die_if_kernel("Unhandled kernel unaligned access or invalid " | ||
881 | "instruction", regs); | ||
882 | /* Fall through */ | ||
883 | |||
884 | case CU2_EXCEPTION: | ||
885 | force_sig(SIGILL, current); | ||
886 | } | ||
887 | |||
888 | return NOTIFY_OK; | ||
889 | } | ||
890 | |||
891 | static struct notifier_block default_cu2_notifier = { | ||
892 | .notifier_call = default_cu2_call, | ||
893 | .priority = 0x80000000, /* Run last */ | ||
894 | }; | ||
895 | |||
860 | asmlinkage void do_cpu(struct pt_regs *regs) | 896 | asmlinkage void do_cpu(struct pt_regs *regs) |
861 | { | 897 | { |
862 | unsigned int __user *epc; | 898 | unsigned int __user *epc; |
@@ -920,17 +956,9 @@ asmlinkage void do_cpu(struct pt_regs *regs) | |||
920 | return; | 956 | return; |
921 | 957 | ||
922 | case 2: | 958 | case 2: |
923 | #ifdef CONFIG_CPU_CAVIUM_OCTEON | 959 | raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs); |
924 | prefetch(¤t->thread.cp2); | 960 | break; |
925 | local_irq_save(flags); | 961 | |
926 | KSTK_STATUS(current) |= ST0_CU2; | ||
927 | status = read_c0_status(); | ||
928 | write_c0_status(status | ST0_CU2); | ||
929 | octeon_cop2_restore(&(current->thread.cp2)); | ||
930 | write_c0_status(status & ~ST0_CU2); | ||
931 | local_irq_restore(flags); | ||
932 | return; | ||
933 | #endif | ||
934 | case 3: | 962 | case 3: |
935 | break; | 963 | break; |
936 | } | 964 | } |
@@ -1367,77 +1395,6 @@ void *set_vi_handler(int n, vi_handler_t addr) | |||
1367 | return set_vi_srs_handler(n, addr, 0); | 1395 | return set_vi_srs_handler(n, addr, 0); |
1368 | } | 1396 | } |
1369 | 1397 | ||
1370 | /* | ||
1371 | * This is used by native signal handling | ||
1372 | */ | ||
1373 | asmlinkage int (*save_fp_context)(struct sigcontext __user *sc); | ||
1374 | asmlinkage int (*restore_fp_context)(struct sigcontext __user *sc); | ||
1375 | |||
1376 | extern asmlinkage int _save_fp_context(struct sigcontext __user *sc); | ||
1377 | extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc); | ||
1378 | |||
1379 | extern asmlinkage int fpu_emulator_save_context(struct sigcontext __user *sc); | ||
1380 | extern asmlinkage int fpu_emulator_restore_context(struct sigcontext __user *sc); | ||
1381 | |||
1382 | #ifdef CONFIG_SMP | ||
1383 | static int smp_save_fp_context(struct sigcontext __user *sc) | ||
1384 | { | ||
1385 | return raw_cpu_has_fpu | ||
1386 | ? _save_fp_context(sc) | ||
1387 | : fpu_emulator_save_context(sc); | ||
1388 | } | ||
1389 | |||
1390 | static int smp_restore_fp_context(struct sigcontext __user *sc) | ||
1391 | { | ||
1392 | return raw_cpu_has_fpu | ||
1393 | ? _restore_fp_context(sc) | ||
1394 | : fpu_emulator_restore_context(sc); | ||
1395 | } | ||
1396 | #endif | ||
1397 | |||
1398 | static inline void signal_init(void) | ||
1399 | { | ||
1400 | #ifdef CONFIG_SMP | ||
1401 | /* For now just do the cpu_has_fpu check when the functions are invoked */ | ||
1402 | save_fp_context = smp_save_fp_context; | ||
1403 | restore_fp_context = smp_restore_fp_context; | ||
1404 | #else | ||
1405 | if (cpu_has_fpu) { | ||
1406 | save_fp_context = _save_fp_context; | ||
1407 | restore_fp_context = _restore_fp_context; | ||
1408 | } else { | ||
1409 | save_fp_context = fpu_emulator_save_context; | ||
1410 | restore_fp_context = fpu_emulator_restore_context; | ||
1411 | } | ||
1412 | #endif | ||
1413 | } | ||
1414 | |||
1415 | #ifdef CONFIG_MIPS32_COMPAT | ||
1416 | |||
1417 | /* | ||
1418 | * This is used by 32-bit signal stuff on the 64-bit kernel | ||
1419 | */ | ||
1420 | asmlinkage int (*save_fp_context32)(struct sigcontext32 __user *sc); | ||
1421 | asmlinkage int (*restore_fp_context32)(struct sigcontext32 __user *sc); | ||
1422 | |||
1423 | extern asmlinkage int _save_fp_context32(struct sigcontext32 __user *sc); | ||
1424 | extern asmlinkage int _restore_fp_context32(struct sigcontext32 __user *sc); | ||
1425 | |||
1426 | extern asmlinkage int fpu_emulator_save_context32(struct sigcontext32 __user *sc); | ||
1427 | extern asmlinkage int fpu_emulator_restore_context32(struct sigcontext32 __user *sc); | ||
1428 | |||
1429 | static inline void signal32_init(void) | ||
1430 | { | ||
1431 | if (cpu_has_fpu) { | ||
1432 | save_fp_context32 = _save_fp_context32; | ||
1433 | restore_fp_context32 = _restore_fp_context32; | ||
1434 | } else { | ||
1435 | save_fp_context32 = fpu_emulator_save_context32; | ||
1436 | restore_fp_context32 = fpu_emulator_restore_context32; | ||
1437 | } | ||
1438 | } | ||
1439 | #endif | ||
1440 | |||
1441 | extern void cpu_cache_init(void); | 1398 | extern void cpu_cache_init(void); |
1442 | extern void tlb_init(void); | 1399 | extern void tlb_init(void); |
1443 | extern void flush_tlb_handlers(void); | 1400 | extern void flush_tlb_handlers(void); |
@@ -1446,6 +1403,7 @@ extern void flush_tlb_handlers(void); | |||
1446 | * Timer interrupt | 1403 | * Timer interrupt |
1447 | */ | 1404 | */ |
1448 | int cp0_compare_irq; | 1405 | int cp0_compare_irq; |
1406 | int cp0_compare_irq_shift; | ||
1449 | 1407 | ||
1450 | /* | 1408 | /* |
1451 | * Performance counter IRQ or -1 if shared with timer | 1409 | * Performance counter IRQ or -1 if shared with timer |
@@ -1536,12 +1494,14 @@ void __cpuinit per_cpu_trap_init(void) | |||
1536 | * o read IntCtl.IPPCI to determine the performance counter interrupt | 1494 | * o read IntCtl.IPPCI to determine the performance counter interrupt |
1537 | */ | 1495 | */ |
1538 | if (cpu_has_mips_r2) { | 1496 | if (cpu_has_mips_r2) { |
1539 | cp0_compare_irq = (read_c0_intctl() >> 29) & 7; | 1497 | cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP; |
1540 | cp0_perfcount_irq = (read_c0_intctl() >> 26) & 7; | 1498 | cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7; |
1499 | cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7; | ||
1541 | if (cp0_perfcount_irq == cp0_compare_irq) | 1500 | if (cp0_perfcount_irq == cp0_compare_irq) |
1542 | cp0_perfcount_irq = -1; | 1501 | cp0_perfcount_irq = -1; |
1543 | } else { | 1502 | } else { |
1544 | cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ; | 1503 | cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ; |
1504 | cp0_compare_irq_shift = cp0_compare_irq; | ||
1545 | cp0_perfcount_irq = -1; | 1505 | cp0_perfcount_irq = -1; |
1546 | } | 1506 | } |
1547 | 1507 | ||
@@ -1751,13 +1711,10 @@ void __init trap_init(void) | |||
1751 | else | 1711 | else |
1752 | memcpy((void *)(ebase + 0x080), &except_vec3_generic, 0x80); | 1712 | memcpy((void *)(ebase + 0x080), &except_vec3_generic, 0x80); |
1753 | 1713 | ||
1754 | signal_init(); | ||
1755 | #ifdef CONFIG_MIPS32_COMPAT | ||
1756 | signal32_init(); | ||
1757 | #endif | ||
1758 | |||
1759 | local_flush_icache_range(ebase, ebase + 0x400); | 1714 | local_flush_icache_range(ebase, ebase + 0x400); |
1760 | flush_tlb_handlers(); | 1715 | flush_tlb_handlers(); |
1761 | 1716 | ||
1762 | sort_extable(__start___dbe_table, __stop___dbe_table); | 1717 | sort_extable(__start___dbe_table, __stop___dbe_table); |
1718 | |||
1719 | register_cu2_notifier(&default_cu2_notifier); | ||
1763 | } | 1720 | } |
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index 67bd626942ab..69b039ca8d83 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c | |||
@@ -81,6 +81,7 @@ | |||
81 | #include <asm/asm.h> | 81 | #include <asm/asm.h> |
82 | #include <asm/branch.h> | 82 | #include <asm/branch.h> |
83 | #include <asm/byteorder.h> | 83 | #include <asm/byteorder.h> |
84 | #include <asm/cop2.h> | ||
84 | #include <asm/inst.h> | 85 | #include <asm/inst.h> |
85 | #include <asm/uaccess.h> | 86 | #include <asm/uaccess.h> |
86 | #include <asm/system.h> | 87 | #include <asm/system.h> |
@@ -451,17 +452,27 @@ static void emulate_load_store_insn(struct pt_regs *regs, | |||
451 | */ | 452 | */ |
452 | goto sigbus; | 453 | goto sigbus; |
453 | 454 | ||
455 | /* | ||
456 | * COP2 is available to implementor for application specific use. | ||
457 | * It's up to applications to register a notifier chain and do | ||
458 | * whatever they have to do, including possible sending of signals. | ||
459 | */ | ||
454 | case lwc2_op: | 460 | case lwc2_op: |
461 | cu2_notifier_call_chain(CU2_LWC2_OP, regs); | ||
462 | break; | ||
463 | |||
455 | case ldc2_op: | 464 | case ldc2_op: |
465 | cu2_notifier_call_chain(CU2_LDC2_OP, regs); | ||
466 | break; | ||
467 | |||
456 | case swc2_op: | 468 | case swc2_op: |
469 | cu2_notifier_call_chain(CU2_SWC2_OP, regs); | ||
470 | break; | ||
471 | |||
457 | case sdc2_op: | 472 | case sdc2_op: |
458 | /* | 473 | cu2_notifier_call_chain(CU2_SDC2_OP, regs); |
459 | * These are the coprocessor 2 load/stores. The current | 474 | break; |
460 | * implementations don't use cp2 and cp2 should always be | 475 | |
461 | * disabled in c0_status. So send SIGILL. | ||
462 | * (No longer true: The Sony Praystation uses cp2 for | ||
463 | * 3D matrix operations. Dunno if that thingy has a MMU ...) | ||
464 | */ | ||
465 | default: | 476 | default: |
466 | /* | 477 | /* |
467 | * Pheeee... We encountered an yet unknown instruction or | 478 | * Pheeee... We encountered an yet unknown instruction or |
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S index 162b29954baa..f25df73db923 100644 --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S | |||
@@ -46,6 +46,7 @@ SECTIONS | |||
46 | SCHED_TEXT | 46 | SCHED_TEXT |
47 | LOCK_TEXT | 47 | LOCK_TEXT |
48 | KPROBES_TEXT | 48 | KPROBES_TEXT |
49 | IRQENTRY_TEXT | ||
49 | *(.text.*) | 50 | *(.text.*) |
50 | *(.fixup) | 51 | *(.fixup) |
51 | *(.gnu.warning) | 52 | *(.gnu.warning) |
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c index 03092ab2a296..60477529362e 100644 --- a/arch/mips/kernel/vpe.c +++ b/arch/mips/kernel/vpe.c | |||
@@ -1116,8 +1116,6 @@ static int vpe_open(struct inode *inode, struct file *filp) | |||
1116 | v->shared_ptr = NULL; | 1116 | v->shared_ptr = NULL; |
1117 | v->__start = 0; | 1117 | v->__start = 0; |
1118 | 1118 | ||
1119 | unlock_kernel(); | ||
1120 | |||
1121 | return 0; | 1119 | return 0; |
1122 | } | 1120 | } |
1123 | 1121 | ||