aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/Makefile14
-rw-r--r--arch/mips/kernel/cpu-probe.c2
-rw-r--r--arch/mips/kernel/cpufreq/Kconfig41
-rw-r--r--arch/mips/kernel/cpufreq/Makefile5
-rw-r--r--arch/mips/kernel/cpufreq/loongson2_clock.c166
-rw-r--r--arch/mips/kernel/cpufreq/loongson2_cpufreq.c227
-rw-r--r--arch/mips/kernel/csrc-powertv.c180
-rw-r--r--arch/mips/kernel/ftrace.c275
-rw-r--r--arch/mips/kernel/irq.c34
-rw-r--r--arch/mips/kernel/kspd.c1
-rw-r--r--arch/mips/kernel/linux32.c80
-rw-r--r--arch/mips/kernel/mcount.S189
-rw-r--r--arch/mips/kernel/mips_ksyms.c5
-rw-r--r--arch/mips/kernel/scall32-o32.S1
-rw-r--r--arch/mips/kernel/scall64-64.S1
-rw-r--r--arch/mips/kernel/scall64-n32.S3
-rw-r--r--arch/mips/kernel/scall64-o32.S3
-rw-r--r--arch/mips/kernel/setup.c44
-rw-r--r--arch/mips/kernel/signal.c46
-rw-r--r--arch/mips/kernel/signal32.c24
-rw-r--r--arch/mips/kernel/smp.c3
-rw-r--r--arch/mips/kernel/smtc.c23
-rw-r--r--arch/mips/kernel/syscall.c32
-rw-r--r--arch/mips/kernel/time.c33
-rw-r--r--arch/mips/kernel/traps.c136
-rw-r--r--arch/mips/kernel/unaligned.c25
-rw-r--r--arch/mips/kernel/vmlinux.lds.S1
27 files changed, 1319 insertions, 275 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index eecd2a9f155..9326af5186f 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -2,14 +2,17 @@
2# Makefile for the Linux/MIPS kernel. 2# Makefile for the Linux/MIPS kernel.
3# 3#
4 4
5CPPFLAGS_vmlinux.lds := $(KBUILD_CFLAGS)
6
7extra-y := head.o init_task.o vmlinux.lds 5extra-y := head.o init_task.o vmlinux.lds
8 6
9obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \ 7obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \
10 ptrace.o reset.o setup.o signal.o syscall.o \ 8 ptrace.o reset.o setup.o signal.o syscall.o \
11 time.o topology.o traps.o unaligned.o watch.o 9 time.o topology.o traps.o unaligned.o watch.o
12 10
11ifdef CONFIG_FUNCTION_TRACER
12CFLAGS_REMOVE_ftrace.o = -pg
13CFLAGS_REMOVE_early_printk.o = -pg
14endif
15
13obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o 16obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o
14obj-$(CONFIG_CEVT_R4K_LIB) += cevt-r4k.o 17obj-$(CONFIG_CEVT_R4K_LIB) += cevt-r4k.o
15obj-$(CONFIG_MIPS_MT_SMTC) += cevt-smtc.o 18obj-$(CONFIG_MIPS_MT_SMTC) += cevt-smtc.o
@@ -19,6 +22,7 @@ obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o
19obj-$(CONFIG_CEVT_TXX9) += cevt-txx9.o 22obj-$(CONFIG_CEVT_TXX9) += cevt-txx9.o
20obj-$(CONFIG_CSRC_BCM1480) += csrc-bcm1480.o 23obj-$(CONFIG_CSRC_BCM1480) += csrc-bcm1480.o
21obj-$(CONFIG_CSRC_IOASIC) += csrc-ioasic.o 24obj-$(CONFIG_CSRC_IOASIC) += csrc-ioasic.o
25obj-$(CONFIG_CSRC_POWERTV) += csrc-powertv.o
22obj-$(CONFIG_CSRC_R4K_LIB) += csrc-r4k.o 26obj-$(CONFIG_CSRC_R4K_LIB) += csrc-r4k.o
23obj-$(CONFIG_CSRC_SB1250) += csrc-sb1250.o 27obj-$(CONFIG_CSRC_SB1250) += csrc-sb1250.o
24obj-$(CONFIG_SYNC_R4K) += sync-r4k.o 28obj-$(CONFIG_SYNC_R4K) += sync-r4k.o
@@ -26,6 +30,8 @@ obj-$(CONFIG_SYNC_R4K) += sync-r4k.o
26obj-$(CONFIG_STACKTRACE) += stacktrace.o 30obj-$(CONFIG_STACKTRACE) += stacktrace.o
27obj-$(CONFIG_MODULES) += mips_ksyms.o module.o 31obj-$(CONFIG_MODULES) += mips_ksyms.o module.o
28 32
33obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o
34
29obj-$(CONFIG_CPU_LOONGSON2) += r4k_fpu.o r4k_switch.o 35obj-$(CONFIG_CPU_LOONGSON2) += r4k_fpu.o r4k_switch.o
30obj-$(CONFIG_CPU_MIPS32) += r4k_fpu.o r4k_switch.o 36obj-$(CONFIG_CPU_MIPS32) += r4k_fpu.o r4k_switch.o
31obj-$(CONFIG_CPU_MIPS64) += r4k_fpu.o r4k_switch.o 37obj-$(CONFIG_CPU_MIPS64) += r4k_fpu.o r4k_switch.o
@@ -92,4 +98,8 @@ CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/n
92 98
93obj-$(CONFIG_HAVE_STD_PC_SERIAL_PORT) += 8250-platform.o 99obj-$(CONFIG_HAVE_STD_PC_SERIAL_PORT) += 8250-platform.o
94 100
101obj-$(CONFIG_MIPS_CPUFREQ) += cpufreq/
102
95EXTRA_CFLAGS += -Werror 103EXTRA_CFLAGS += -Werror
104
105CPPFLAGS_vmlinux.lds := $(KBUILD_CFLAGS)
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 7a51866068a..80e202eca05 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -16,6 +16,7 @@
16#include <linux/ptrace.h> 16#include <linux/ptrace.h>
17#include <linux/smp.h> 17#include <linux/smp.h>
18#include <linux/stddef.h> 18#include <linux/stddef.h>
19#include <linux/module.h>
19 20
20#include <asm/bugs.h> 21#include <asm/bugs.h>
21#include <asm/cpu.h> 22#include <asm/cpu.h>
@@ -32,6 +33,7 @@
32 * the CPU very much. 33 * the CPU very much.
33 */ 34 */
34void (*cpu_wait)(void); 35void (*cpu_wait)(void);
36EXPORT_SYMBOL(cpu_wait);
35 37
36static void r3081_wait(void) 38static void r3081_wait(void)
37{ 39{
diff --git a/arch/mips/kernel/cpufreq/Kconfig b/arch/mips/kernel/cpufreq/Kconfig
new file mode 100644
index 00000000000..58c601eee6f
--- /dev/null
+++ b/arch/mips/kernel/cpufreq/Kconfig
@@ -0,0 +1,41 @@
1#
2# CPU Frequency scaling
3#
4
5config MIPS_EXTERNAL_TIMER
6 bool
7
8config MIPS_CPUFREQ
9 bool
10 default y
11 depends on CPU_SUPPORTS_CPUFREQ && MIPS_EXTERNAL_TIMER
12
13if MIPS_CPUFREQ
14
15menu "CPU Frequency scaling"
16
17source "drivers/cpufreq/Kconfig"
18
19if CPU_FREQ
20
21comment "CPUFreq processor drivers"
22
23config LOONGSON2_CPUFREQ
24 tristate "Loongson2 CPUFreq Driver"
25 select CPU_FREQ_TABLE
26 depends on MIPS_CPUFREQ
27 help
28 This option adds a CPUFreq driver for loongson processors which
29 support software configurable cpu frequency.
30
31 Loongson2F and it's successors support this feature.
32
33 For details, take a look at <file:Documentation/cpu-freq/>.
34
35 If in doubt, say N.
36
37endif # CPU_FREQ
38
39endmenu
40
41endif # MIPS_CPUFREQ
diff --git a/arch/mips/kernel/cpufreq/Makefile b/arch/mips/kernel/cpufreq/Makefile
new file mode 100644
index 00000000000..c3479a432ef
--- /dev/null
+++ b/arch/mips/kernel/cpufreq/Makefile
@@ -0,0 +1,5 @@
1#
2# Makefile for the Linux/MIPS cpufreq.
3#
4
5obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o loongson2_clock.o
diff --git a/arch/mips/kernel/cpufreq/loongson2_clock.c b/arch/mips/kernel/cpufreq/loongson2_clock.c
new file mode 100644
index 00000000000..d7ca256e33e
--- /dev/null
+++ b/arch/mips/kernel/cpufreq/loongson2_clock.c
@@ -0,0 +1,166 @@
1/*
2 * Copyright (C) 2006 - 2008 Lemote Inc. & Insititute of Computing Technology
3 * Author: Yanhua, yanh@lemote.com
4 *
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 */
9
10#include <linux/cpufreq.h>
11#include <linux/platform_device.h>
12
13#include <asm/clock.h>
14
15#include <loongson.h>
16
17static LIST_HEAD(clock_list);
18static DEFINE_SPINLOCK(clock_lock);
19static DEFINE_MUTEX(clock_list_sem);
20
21/* Minimum CLK support */
22enum {
23 DC_ZERO, DC_25PT = 2, DC_37PT, DC_50PT, DC_62PT, DC_75PT,
24 DC_87PT, DC_DISABLE, DC_RESV
25};
26
27struct cpufreq_frequency_table loongson2_clockmod_table[] = {
28 {DC_RESV, CPUFREQ_ENTRY_INVALID},
29 {DC_ZERO, CPUFREQ_ENTRY_INVALID},
30 {DC_25PT, 0},
31 {DC_37PT, 0},
32 {DC_50PT, 0},
33 {DC_62PT, 0},
34 {DC_75PT, 0},
35 {DC_87PT, 0},
36 {DC_DISABLE, 0},
37 {DC_RESV, CPUFREQ_TABLE_END},
38};
39EXPORT_SYMBOL_GPL(loongson2_clockmod_table);
40
41static struct clk cpu_clk = {
42 .name = "cpu_clk",
43 .flags = CLK_ALWAYS_ENABLED | CLK_RATE_PROPAGATES,
44 .rate = 800000000,
45};
46
47struct clk *clk_get(struct device *dev, const char *id)
48{
49 return &cpu_clk;
50}
51EXPORT_SYMBOL(clk_get);
52
53static void propagate_rate(struct clk *clk)
54{
55 struct clk *clkp;
56
57 list_for_each_entry(clkp, &clock_list, node) {
58 if (likely(clkp->parent != clk))
59 continue;
60 if (likely(clkp->ops && clkp->ops->recalc))
61 clkp->ops->recalc(clkp);
62 if (unlikely(clkp->flags & CLK_RATE_PROPAGATES))
63 propagate_rate(clkp);
64 }
65}
66
67int clk_enable(struct clk *clk)
68{
69 return 0;
70}
71EXPORT_SYMBOL(clk_enable);
72
73void clk_disable(struct clk *clk)
74{
75}
76EXPORT_SYMBOL(clk_disable);
77
78unsigned long clk_get_rate(struct clk *clk)
79{
80 return (unsigned long)clk->rate;
81}
82EXPORT_SYMBOL(clk_get_rate);
83
84void clk_put(struct clk *clk)
85{
86}
87EXPORT_SYMBOL(clk_put);
88
89int clk_set_rate(struct clk *clk, unsigned long rate)
90{
91 return clk_set_rate_ex(clk, rate, 0);
92}
93EXPORT_SYMBOL_GPL(clk_set_rate);
94
95int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id)
96{
97 int ret = 0;
98 int regval;
99 int i;
100
101 if (likely(clk->ops && clk->ops->set_rate)) {
102 unsigned long flags;
103
104 spin_lock_irqsave(&clock_lock, flags);
105 ret = clk->ops->set_rate(clk, rate, algo_id);
106 spin_unlock_irqrestore(&clock_lock, flags);
107 }
108
109 if (unlikely(clk->flags & CLK_RATE_PROPAGATES))
110 propagate_rate(clk);
111
112 for (i = 0; loongson2_clockmod_table[i].frequency != CPUFREQ_TABLE_END;
113 i++) {
114 if (loongson2_clockmod_table[i].frequency ==
115 CPUFREQ_ENTRY_INVALID)
116 continue;
117 if (rate == loongson2_clockmod_table[i].frequency)
118 break;
119 }
120 if (rate != loongson2_clockmod_table[i].frequency)
121 return -ENOTSUPP;
122
123 clk->rate = rate;
124
125 regval = LOONGSON_CHIPCFG0;
126 regval = (regval & ~0x7) | (loongson2_clockmod_table[i].index - 1);
127 LOONGSON_CHIPCFG0 = regval;
128
129 return ret;
130}
131EXPORT_SYMBOL_GPL(clk_set_rate_ex);
132
133long clk_round_rate(struct clk *clk, unsigned long rate)
134{
135 if (likely(clk->ops && clk->ops->round_rate)) {
136 unsigned long flags, rounded;
137
138 spin_lock_irqsave(&clock_lock, flags);
139 rounded = clk->ops->round_rate(clk, rate);
140 spin_unlock_irqrestore(&clock_lock, flags);
141
142 return rounded;
143 }
144
145 return rate;
146}
147EXPORT_SYMBOL_GPL(clk_round_rate);
148
149/*
150 * This is the simple version of Loongson-2 wait, Maybe we need do this in
151 * interrupt disabled content
152 */
153
154DEFINE_SPINLOCK(loongson2_wait_lock);
155void loongson2_cpu_wait(void)
156{
157 u32 cpu_freq;
158 unsigned long flags;
159
160 spin_lock_irqsave(&loongson2_wait_lock, flags);
161 cpu_freq = LOONGSON_CHIPCFG0;
162 LOONGSON_CHIPCFG0 &= ~0x7; /* Put CPU into wait mode */
163 LOONGSON_CHIPCFG0 = cpu_freq; /* Restore CPU state */
164 spin_unlock_irqrestore(&loongson2_wait_lock, flags);
165}
166EXPORT_SYMBOL_GPL(loongson2_cpu_wait);
diff --git a/arch/mips/kernel/cpufreq/loongson2_cpufreq.c b/arch/mips/kernel/cpufreq/loongson2_cpufreq.c
new file mode 100644
index 00000000000..2f6a0b147ab
--- /dev/null
+++ b/arch/mips/kernel/cpufreq/loongson2_cpufreq.c
@@ -0,0 +1,227 @@
1/*
2 * Cpufreq driver for the loongson-2 processors
3 *
4 * The 2E revision of loongson processor not support this feature.
5 *
6 * Copyright (C) 2006 - 2008 Lemote Inc. & Insititute of Computing Technology
7 * Author: Yanhua, yanh@lemote.com
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13#include <linux/cpufreq.h>
14#include <linux/module.h>
15#include <linux/err.h>
16#include <linux/sched.h> /* set_cpus_allowed() */
17#include <linux/delay.h>
18#include <linux/platform_device.h>
19
20#include <asm/clock.h>
21
22#include <loongson.h>
23
24static uint nowait;
25
26static struct clk *cpuclk;
27
28static void (*saved_cpu_wait) (void);
29
30static int loongson2_cpu_freq_notifier(struct notifier_block *nb,
31 unsigned long val, void *data);
32
33static struct notifier_block loongson2_cpufreq_notifier_block = {
34 .notifier_call = loongson2_cpu_freq_notifier
35};
36
37static int loongson2_cpu_freq_notifier(struct notifier_block *nb,
38 unsigned long val, void *data)
39{
40 if (val == CPUFREQ_POSTCHANGE)
41 current_cpu_data.udelay_val = loops_per_jiffy;
42
43 return 0;
44}
45
46static unsigned int loongson2_cpufreq_get(unsigned int cpu)
47{
48 return clk_get_rate(cpuclk);
49}
50
51/*
52 * Here we notify other drivers of the proposed change and the final change.
53 */
54static int loongson2_cpufreq_target(struct cpufreq_policy *policy,
55 unsigned int target_freq,
56 unsigned int relation)
57{
58 unsigned int cpu = policy->cpu;
59 unsigned int newstate = 0;
60 cpumask_t cpus_allowed;
61 struct cpufreq_freqs freqs;
62 unsigned int freq;
63
64 if (!cpu_online(cpu))
65 return -ENODEV;
66
67 cpus_allowed = current->cpus_allowed;
68 set_cpus_allowed(current, cpumask_of_cpu(cpu));
69
70 if (cpufreq_frequency_table_target
71 (policy, &loongson2_clockmod_table[0], target_freq, relation,
72 &newstate))
73 return -EINVAL;
74
75 freq =
76 ((cpu_clock_freq / 1000) *
77 loongson2_clockmod_table[newstate].index) / 8;
78 if (freq < policy->min || freq > policy->max)
79 return -EINVAL;
80
81 pr_debug("cpufreq: requested frequency %u Hz\n", target_freq * 1000);
82
83 freqs.cpu = cpu;
84 freqs.old = loongson2_cpufreq_get(cpu);
85 freqs.new = freq;
86 freqs.flags = 0;
87
88 if (freqs.new == freqs.old)
89 return 0;
90
91 /* notifiers */
92 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
93
94 set_cpus_allowed(current, cpus_allowed);
95
96 /* setting the cpu frequency */
97 clk_set_rate(cpuclk, freq);
98
99 /* notifiers */
100 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
101
102 pr_debug("cpufreq: set frequency %u kHz\n", freq);
103
104 return 0;
105}
106
107static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
108{
109 int i;
110
111 if (!cpu_online(policy->cpu))
112 return -ENODEV;
113
114 cpuclk = clk_get(NULL, "cpu_clk");
115 if (IS_ERR(cpuclk)) {
116 printk(KERN_ERR "cpufreq: couldn't get CPU clk\n");
117 return PTR_ERR(cpuclk);
118 }
119
120 cpuclk->rate = cpu_clock_freq / 1000;
121 if (!cpuclk->rate)
122 return -EINVAL;
123
124 /* clock table init */
125 for (i = 2;
126 (loongson2_clockmod_table[i].frequency != CPUFREQ_TABLE_END);
127 i++)
128 loongson2_clockmod_table[i].frequency = (cpuclk->rate * i) / 8;
129
130 policy->cur = loongson2_cpufreq_get(policy->cpu);
131
132 cpufreq_frequency_table_get_attr(&loongson2_clockmod_table[0],
133 policy->cpu);
134
135 return cpufreq_frequency_table_cpuinfo(policy,
136 &loongson2_clockmod_table[0]);
137}
138
139static int loongson2_cpufreq_verify(struct cpufreq_policy *policy)
140{
141 return cpufreq_frequency_table_verify(policy,
142 &loongson2_clockmod_table[0]);
143}
144
145static int loongson2_cpufreq_exit(struct cpufreq_policy *policy)
146{
147 clk_put(cpuclk);
148 return 0;
149}
150
151static struct freq_attr *loongson2_table_attr[] = {
152 &cpufreq_freq_attr_scaling_available_freqs,
153 NULL,
154};
155
156static struct cpufreq_driver loongson2_cpufreq_driver = {
157 .owner = THIS_MODULE,
158 .name = "loongson2",
159 .init = loongson2_cpufreq_cpu_init,
160 .verify = loongson2_cpufreq_verify,
161 .target = loongson2_cpufreq_target,
162 .get = loongson2_cpufreq_get,
163 .exit = loongson2_cpufreq_exit,
164 .attr = loongson2_table_attr,
165};
166
167static struct platform_device_id platform_device_ids[] = {
168 {
169 .name = "loongson2_cpufreq",
170 },
171 {}
172};
173
174MODULE_DEVICE_TABLE(platform, platform_device_ids);
175
176static struct platform_driver platform_driver = {
177 .driver = {
178 .name = "loongson2_cpufreq",
179 .owner = THIS_MODULE,
180 },
181 .id_table = platform_device_ids,
182};
183
184static int __init cpufreq_init(void)
185{
186 int ret;
187
188 /* Register platform stuff */
189 ret = platform_driver_register(&platform_driver);
190 if (ret)
191 return ret;
192
193 pr_info("cpufreq: Loongson-2F CPU frequency driver.\n");
194
195 cpufreq_register_notifier(&loongson2_cpufreq_notifier_block,
196 CPUFREQ_TRANSITION_NOTIFIER);
197
198 ret = cpufreq_register_driver(&loongson2_cpufreq_driver);
199
200 if (!ret && !nowait) {
201 saved_cpu_wait = cpu_wait;
202 cpu_wait = loongson2_cpu_wait;
203 }
204
205 return ret;
206}
207
208static void __exit cpufreq_exit(void)
209{
210 if (!nowait && saved_cpu_wait)
211 cpu_wait = saved_cpu_wait;
212 cpufreq_unregister_driver(&loongson2_cpufreq_driver);
213 cpufreq_unregister_notifier(&loongson2_cpufreq_notifier_block,
214 CPUFREQ_TRANSITION_NOTIFIER);
215
216 platform_driver_unregister(&platform_driver);
217}
218
219module_init(cpufreq_init);
220module_exit(cpufreq_exit);
221
222module_param(nowait, uint, 0644);
223MODULE_PARM_DESC(nowait, "Disable Loongson-2F specific wait");
224
225MODULE_AUTHOR("Yanhua <yanh@lemote.com>");
226MODULE_DESCRIPTION("cpufreq driver for Loongson2F");
227MODULE_LICENSE("GPL");
diff --git a/arch/mips/kernel/csrc-powertv.c b/arch/mips/kernel/csrc-powertv.c
new file mode 100644
index 00000000000..a27c16c8690
--- /dev/null
+++ b/arch/mips/kernel/csrc-powertv.c
@@ -0,0 +1,180 @@
1/*
2 * Copyright (C) 2008 Scientific-Atlanta, Inc.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 */
18/*
19 * The file comes from kernel/csrc-r4k.c
20 */
21#include <linux/clocksource.h>
22#include <linux/init.h>
23
24#include <asm/time.h> /* Not included in linux/time.h */
25
26#include <asm/mach-powertv/asic_regs.h>
27#include "powertv-clock.h"
28
29/* MIPS PLL Register Definitions */
30#define PLL_GET_M(x) (((x) >> 8) & 0x000000FF)
31#define PLL_GET_N(x) (((x) >> 16) & 0x000000FF)
32#define PLL_GET_P(x) (((x) >> 24) & 0x00000007)
33
34/*
35 * returns: Clock frequency in kHz
36 */
37unsigned int __init mips_get_pll_freq(void)
38{
39 unsigned int pll_reg, m, n, p;
40 unsigned int fin = 54000; /* Base frequency in kHz */
41 unsigned int fout;
42
43 /* Read PLL register setting */
44 pll_reg = asic_read(mips_pll_setup);
45 m = PLL_GET_M(pll_reg);
46 n = PLL_GET_N(pll_reg);
47 p = PLL_GET_P(pll_reg);
48 pr_info("MIPS PLL Register:0x%x M=%d N=%d P=%d\n", pll_reg, m, n, p);
49
50 /* Calculate clock frequency = (2 * N * 54MHz) / (M * (2**P)) */
51 fout = ((2 * n * fin) / (m * (0x01 << p)));
52
53 pr_info("MIPS Clock Freq=%d kHz\n", fout);
54
55 return fout;
56}
57
58static cycle_t c0_hpt_read(struct clocksource *cs)
59{
60 return read_c0_count();
61}
62
63static struct clocksource clocksource_mips = {
64 .name = "powertv-counter",
65 .read = c0_hpt_read,
66 .mask = CLOCKSOURCE_MASK(32),
67 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
68};
69
70static void __init powertv_c0_hpt_clocksource_init(void)
71{
72 unsigned int pll_freq = mips_get_pll_freq();
73
74 pr_info("CPU frequency %d.%02d MHz\n", pll_freq / 1000,
75 (pll_freq % 1000) * 100 / 1000);
76
77 mips_hpt_frequency = pll_freq / 2 * 1000;
78
79 clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000;
80
81 clocksource_set_clock(&clocksource_mips, mips_hpt_frequency);
82
83 clocksource_register(&clocksource_mips);
84}
85
86/**
87 * struct tim_c - free running counter
88 * @hi: High 16 bits of the counter
89 * @lo: Low 32 bits of the counter
90 *
91 * Lays out the structure of the free running counter in memory. This counter
92 * increments at a rate of 27 MHz/8 on all platforms.
93 */
94struct tim_c {
95 unsigned int hi;
96 unsigned int lo;
97};
98
99static struct tim_c *tim_c;
100
101static cycle_t tim_c_read(struct clocksource *cs)
102{
103 unsigned int hi;
104 unsigned int next_hi;
105 unsigned int lo;
106
107 hi = readl(&tim_c->hi);
108
109 for (;;) {
110 lo = readl(&tim_c->lo);
111 next_hi = readl(&tim_c->hi);
112 if (next_hi == hi)
113 break;
114 hi = next_hi;
115 }
116
117pr_crit("%s: read %llx\n", __func__, ((u64) hi << 32) | lo);
118 return ((u64) hi << 32) | lo;
119}
120
121#define TIM_C_SIZE 48 /* # bits in the timer */
122
123static struct clocksource clocksource_tim_c = {
124 .name = "powertv-tim_c",
125 .read = tim_c_read,
126 .mask = CLOCKSOURCE_MASK(TIM_C_SIZE),
127 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
128};
129
130/**
131 * powertv_tim_c_clocksource_init - set up a clock source for the TIM_C clock
132 *
133 * The hard part here is coming up with a constant k and shift s such that
134 * the 48-bit TIM_C value multiplied by k doesn't overflow and that value,
135 * when shifted right by s, yields the corresponding number of nanoseconds.
136 * We know that TIM_C counts at 27 MHz/8, so each cycle corresponds to
137 * 1 / (27,000,000/8) seconds. Multiply that by a billion and you get the
138 * number of nanoseconds. Since the TIM_C value has 48 bits and the math is
139 * done in 64 bits, avoiding an overflow means that k must be less than
140 * 64 - 48 = 16 bits.
141 */
142static void __init powertv_tim_c_clocksource_init(void)
143{
144 int prescale;
145 unsigned long dividend;
146 unsigned long k;
147 int s;
148 const int max_k_bits = (64 - 48) - 1;
149 const unsigned long billion = 1000000000;
150 const unsigned long counts_per_second = 27000000 / 8;
151
152 prescale = BITS_PER_LONG - ilog2(billion) - 1;
153 dividend = billion << prescale;
154 k = dividend / counts_per_second;
155 s = ilog2(k) - max_k_bits;
156
157 if (s < 0)
158 s = prescale;
159
160 else {
161 k >>= s;
162 s += prescale;
163 }
164
165 clocksource_tim_c.mult = k;
166 clocksource_tim_c.shift = s;
167 clocksource_tim_c.rating = 200;
168
169 clocksource_register(&clocksource_tim_c);
170 tim_c = (struct tim_c *) asic_reg_addr(tim_ch);
171}
172
173/**
174 powertv_clocksource_init - initialize all clocksources
175 */
176void __init powertv_clocksource_init(void)
177{
178 powertv_c0_hpt_clocksource_init();
179 powertv_tim_c_clocksource_init();
180}
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
new file mode 100644
index 00000000000..68b067040d8
--- /dev/null
+++ b/arch/mips/kernel/ftrace.c
@@ -0,0 +1,275 @@
1/*
2 * Code for replacing ftrace calls with jumps.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2009 DSLab, Lanzhou University, China
6 * Author: Wu Zhangjin <wuzj@lemote.com>
7 *
8 * Thanks goes to Steven Rostedt for writing the original x86 version.
9 */
10
11#include <linux/uaccess.h>
12#include <linux/init.h>
13#include <linux/ftrace.h>
14
15#include <asm/cacheflush.h>
16#include <asm/asm.h>
17#include <asm/asm-offsets.h>
18
19#ifdef CONFIG_DYNAMIC_FTRACE
20
21#define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */
22#define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */
23#define jump_insn_encode(op_code, addr) \
24 ((unsigned int)((op_code) | (((addr) >> 2) & ADDR_MASK)))
25
26static unsigned int ftrace_nop = 0x00000000;
27
28static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
29{
30 int faulted;
31
32 /* *(unsigned int *)ip = new_code; */
33 safe_store_code(new_code, ip, faulted);
34
35 if (unlikely(faulted))
36 return -EFAULT;
37
38 flush_icache_range(ip, ip + 8);
39
40 return 0;
41}
42
43static int lui_v1;
44static int jal_mcount;
45
46int ftrace_make_nop(struct module *mod,
47 struct dyn_ftrace *rec, unsigned long addr)
48{
49 unsigned int new;
50 int faulted;
51 unsigned long ip = rec->ip;
52
53 /* We have compiled module with -mlong-calls, but compiled the kernel
54 * without it, we need to cope with them respectively. */
55 if (ip & 0x40000000) {
56 /* record it for ftrace_make_call */
57 if (lui_v1 == 0) {
58 /* lui_v1 = *(unsigned int *)ip; */
59 safe_load_code(lui_v1, ip, faulted);
60
61 if (unlikely(faulted))
62 return -EFAULT;
63 }
64
65 /* lui v1, hi_16bit_of_mcount --> b 1f (0x10000004)
66 * addiu v1, v1, low_16bit_of_mcount
67 * move at, ra
68 * jalr v1
69 * nop
70 * 1f: (ip + 12)
71 */
72 new = 0x10000004;
73 } else {
74 /* record/calculate it for ftrace_make_call */
75 if (jal_mcount == 0) {
76 /* We can record it directly like this:
77 * jal_mcount = *(unsigned int *)ip;
78 * Herein, jump over the first two nop instructions */
79 jal_mcount = jump_insn_encode(JAL, (MCOUNT_ADDR + 8));
80 }
81
82 /* move at, ra
83 * jalr v1 --> nop
84 */
85 new = ftrace_nop;
86 }
87 return ftrace_modify_code(ip, new);
88}
89
90static int modified; /* initialized as 0 by default */
91
92int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
93{
94 unsigned int new;
95 unsigned long ip = rec->ip;
96
97 /* We just need to remove the "b ftrace_stub" at the fist time! */
98 if (modified == 0) {
99 modified = 1;
100 ftrace_modify_code(addr, ftrace_nop);
101 }
102 /* ip, module: 0xc0000000, kernel: 0x80000000 */
103 new = (ip & 0x40000000) ? lui_v1 : jal_mcount;
104
105 return ftrace_modify_code(ip, new);
106}
107
108#define FTRACE_CALL_IP ((unsigned long)(&ftrace_call))
109
110int ftrace_update_ftrace_func(ftrace_func_t func)
111{
112 unsigned int new;
113
114 new = jump_insn_encode(JAL, (unsigned long)func);
115
116 return ftrace_modify_code(FTRACE_CALL_IP, new);
117}
118
119int __init ftrace_dyn_arch_init(void *data)
120{
121 /* The return code is retured via data */
122 *(unsigned long *)data = 0;
123
124 return 0;
125}
126#endif /* CONFIG_DYNAMIC_FTRACE */
127
128#ifdef CONFIG_FUNCTION_GRAPH_TRACER
129
130#ifdef CONFIG_DYNAMIC_FTRACE
131
132extern void ftrace_graph_call(void);
133#define JMP 0x08000000 /* jump to target directly */
134#define CALL_FTRACE_GRAPH_CALLER \
135 jump_insn_encode(JMP, (unsigned long)(&ftrace_graph_caller))
136#define FTRACE_GRAPH_CALL_IP ((unsigned long)(&ftrace_graph_call))
137
138int ftrace_enable_ftrace_graph_caller(void)
139{
140 return ftrace_modify_code(FTRACE_GRAPH_CALL_IP,
141 CALL_FTRACE_GRAPH_CALLER);
142}
143
144int ftrace_disable_ftrace_graph_caller(void)
145{
146 return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, ftrace_nop);
147}
148
149#endif /* !CONFIG_DYNAMIC_FTRACE */
150
151#ifndef KBUILD_MCOUNT_RA_ADDRESS
152#define S_RA_SP (0xafbf << 16) /* s{d,w} ra, offset(sp) */
153#define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */
154#define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */
155
156unsigned long ftrace_get_parent_addr(unsigned long self_addr,
157 unsigned long parent,
158 unsigned long parent_addr,
159 unsigned long fp)
160{
161 unsigned long sp, ip, ra;
162 unsigned int code;
163 int faulted;
164
165 /* in module or kernel? */
166 if (self_addr & 0x40000000) {
167 /* module: move to the instruction "lui v1, HI_16BIT_OF_MCOUNT" */
168 ip = self_addr - 20;
169 } else {
170 /* kernel: move to the instruction "move ra, at" */
171 ip = self_addr - 12;
172 }
173
174 /* search the text until finding the non-store instruction or "s{d,w}
175 * ra, offset(sp)" instruction */
176 do {
177 ip -= 4;
178
179 /* get the code at "ip": code = *(unsigned int *)ip; */
180 safe_load_code(code, ip, faulted);
181
182 if (unlikely(faulted))
183 return 0;
184
185 /* If we hit the non-store instruction before finding where the
186 * ra is stored, then this is a leaf function and it does not
187 * store the ra on the stack. */
188 if ((code & S_R_SP) != S_R_SP)
189 return parent_addr;
190
191 } while (((code & S_RA_SP) != S_RA_SP));
192
193 sp = fp + (code & OFFSET_MASK);
194
195 /* ra = *(unsigned long *)sp; */
196 safe_load_stack(ra, sp, faulted);
197 if (unlikely(faulted))
198 return 0;
199
200 if (ra == parent)
201 return sp;
202 return 0;
203}
204
205#endif
206
207/*
208 * Hook the return address and push it in the stack of return addrs
209 * in current thread info.
210 */
211void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
212 unsigned long fp)
213{
214 unsigned long old;
215 struct ftrace_graph_ent trace;
216 unsigned long return_hooker = (unsigned long)
217 &return_to_handler;
218 int faulted;
219
220 if (unlikely(atomic_read(&current->tracing_graph_pause)))
221 return;
222
223 /* "parent" is the stack address saved the return address of the caller
224 * of _mcount.
225 *
226 * if the gcc < 4.5, a leaf function does not save the return address
227 * in the stack address, so, we "emulate" one in _mcount's stack space,
228 * and hijack it directly, but for a non-leaf function, it save the
229 * return address to the its own stack space, we can not hijack it
230 * directly, but need to find the real stack address,
231 * ftrace_get_parent_addr() does it!
232 *
233 * if gcc>= 4.5, with the new -mmcount-ra-address option, for a
234 * non-leaf function, the location of the return address will be saved
235 * to $12 for us, and for a leaf function, only put a zero into $12. we
236 * do it in ftrace_graph_caller of mcount.S.
237 */
238
239 /* old = *parent; */
240 safe_load_stack(old, parent, faulted);
241 if (unlikely(faulted))
242 goto out;
243#ifndef KBUILD_MCOUNT_RA_ADDRESS
244 parent = (unsigned long *)ftrace_get_parent_addr(self_addr, old,
245 (unsigned long)parent,
246 fp);
247 /* If fails when getting the stack address of the non-leaf function's
248 * ra, stop function graph tracer and return */
249 if (parent == 0)
250 goto out;
251#endif
252 /* *parent = return_hooker; */
253 safe_store_stack(return_hooker, parent, faulted);
254 if (unlikely(faulted))
255 goto out;
256
257 if (ftrace_push_return_trace(old, self_addr, &trace.depth, fp) ==
258 -EBUSY) {
259 *parent = old;
260 return;
261 }
262
263 trace.func = self_addr;
264
265 /* Only trace if the calling function expects to */
266 if (!ftrace_graph_entry(&trace)) {
267 current->curr_ret_stack--;
268 *parent = old;
269 }
270 return;
271out:
272 ftrace_graph_stop();
273 WARN_ON(1);
274}
275#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index 7b845ba9dff..981f86c2616 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -22,6 +22,7 @@
22#include <linux/seq_file.h> 22#include <linux/seq_file.h>
23#include <linux/kallsyms.h> 23#include <linux/kallsyms.h>
24#include <linux/kgdb.h> 24#include <linux/kgdb.h>
25#include <linux/ftrace.h>
25 26
26#include <asm/atomic.h> 27#include <asm/atomic.h>
27#include <asm/system.h> 28#include <asm/system.h>
@@ -99,7 +100,7 @@ int show_interrupts(struct seq_file *p, void *v)
99 } 100 }
100 101
101 if (i < NR_IRQS) { 102 if (i < NR_IRQS) {
102 spin_lock_irqsave(&irq_desc[i].lock, flags); 103 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
103 action = irq_desc[i].action; 104 action = irq_desc[i].action;
104 if (!action) 105 if (!action)
105 goto skip; 106 goto skip;
@@ -118,7 +119,7 @@ int show_interrupts(struct seq_file *p, void *v)
118 119
119 seq_putc(p, '\n'); 120 seq_putc(p, '\n');
120skip: 121skip:
121 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 122 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
122 } else if (i == NR_IRQS) { 123 } else if (i == NR_IRQS) {
123 seq_putc(p, '\n'); 124 seq_putc(p, '\n');
124 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); 125 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
@@ -150,3 +151,32 @@ void __init init_IRQ(void)
150 kgdb_early_setup = 1; 151 kgdb_early_setup = 1;
151#endif 152#endif
152} 153}
154
155/*
156 * do_IRQ handles all normal device IRQ's (the special
157 * SMP cross-CPU interrupts have their own specific
158 * handlers).
159 */
160void __irq_entry do_IRQ(unsigned int irq)
161{
162 irq_enter();
163 __DO_IRQ_SMTC_HOOK(irq);
164 generic_handle_irq(irq);
165 irq_exit();
166}
167
168#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
169/*
170 * To avoid inefficient and in some cases pathological re-checking of
171 * IRQ affinity, we have this variant that skips the affinity check.
172 */
173
174void __irq_entry do_IRQ_no_affinity(unsigned int irq)
175{
176 irq_enter();
177 __NO_AFFINITY_IRQ_SMTC_HOOK(irq);
178 generic_handle_irq(irq);
179 irq_exit();
180}
181
182#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
diff --git a/arch/mips/kernel/kspd.c b/arch/mips/kernel/kspd.c
index ad4e017ed2f..80e2ba694ba 100644
--- a/arch/mips/kernel/kspd.c
+++ b/arch/mips/kernel/kspd.c
@@ -82,6 +82,7 @@ static int sp_stopping;
82#define MTSP_O_SHLOCK 0x0010 82#define MTSP_O_SHLOCK 0x0010
83#define MTSP_O_EXLOCK 0x0020 83#define MTSP_O_EXLOCK 0x0020
84#define MTSP_O_ASYNC 0x0040 84#define MTSP_O_ASYNC 0x0040
85/* XXX: check which of these is actually O_SYNC vs O_DSYNC */
85#define MTSP_O_FSYNC O_SYNC 86#define MTSP_O_FSYNC O_SYNC
86#define MTSP_O_NOFOLLOW 0x0100 87#define MTSP_O_NOFOLLOW 0x0100
87#define MTSP_O_SYNC 0x0080 88#define MTSP_O_SYNC 0x0080
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index b77fefaff9d..f042563c924 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -67,28 +67,13 @@ SYSCALL_DEFINE6(32_mmap2, unsigned long, addr, unsigned long, len,
67 unsigned long, prot, unsigned long, flags, unsigned long, fd, 67 unsigned long, prot, unsigned long, flags, unsigned long, fd,
68 unsigned long, pgoff) 68 unsigned long, pgoff)
69{ 69{
70 struct file * file = NULL;
71 unsigned long error; 70 unsigned long error;
72 71
73 error = -EINVAL; 72 error = -EINVAL;
74 if (pgoff & (~PAGE_MASK >> 12)) 73 if (pgoff & (~PAGE_MASK >> 12))
75 goto out; 74 goto out;
76 pgoff >>= PAGE_SHIFT-12; 75 error = sys_mmap_pgoff(addr, len, prot, flags, fd,
77 76 pgoff >> (PAGE_SHIFT-12));
78 if (!(flags & MAP_ANONYMOUS)) {
79 error = -EBADF;
80 file = fget(fd);
81 if (!file)
82 goto out;
83 }
84 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
85
86 down_write(&current->mm->mmap_sem);
87 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
88 up_write(&current->mm->mmap_sem);
89 if (file)
90 fput(file);
91
92out: 77out:
93 return error; 78 return error;
94} 79}
@@ -265,67 +250,6 @@ SYSCALL_DEFINE5(n32_msgrcv, int, msqid, u32, msgp, size_t, msgsz,
265} 250}
266#endif 251#endif
267 252
268struct sysctl_args32
269{
270 compat_caddr_t name;
271 int nlen;
272 compat_caddr_t oldval;
273 compat_caddr_t oldlenp;
274 compat_caddr_t newval;
275 compat_size_t newlen;
276 unsigned int __unused[4];
277};
278
279#ifdef CONFIG_SYSCTL_SYSCALL
280
281SYSCALL_DEFINE1(32_sysctl, struct sysctl_args32 __user *, args)
282{
283 struct sysctl_args32 tmp;
284 int error;
285 size_t oldlen;
286 size_t __user *oldlenp = NULL;
287 unsigned long addr = (((unsigned long)&args->__unused[0]) + 7) & ~7;
288
289 if (copy_from_user(&tmp, args, sizeof(tmp)))
290 return -EFAULT;
291
292 if (tmp.oldval && tmp.oldlenp) {
293 /* Duh, this is ugly and might not work if sysctl_args
294 is in read-only memory, but do_sysctl does indirectly
295 a lot of uaccess in both directions and we'd have to
296 basically copy the whole sysctl.c here, and
297 glibc's __sysctl uses rw memory for the structure
298 anyway. */
299 if (get_user(oldlen, (u32 __user *)A(tmp.oldlenp)) ||
300 put_user(oldlen, (size_t __user *)addr))
301 return -EFAULT;
302 oldlenp = (size_t __user *)addr;
303 }
304
305 lock_kernel();
306 error = do_sysctl((int __user *)A(tmp.name), tmp.nlen, (void __user *)A(tmp.oldval),
307 oldlenp, (void __user *)A(tmp.newval), tmp.newlen);
308 unlock_kernel();
309 if (oldlenp) {
310 if (!error) {
311 if (get_user(oldlen, (size_t __user *)addr) ||
312 put_user(oldlen, (u32 __user *)A(tmp.oldlenp)))
313 error = -EFAULT;
314 }
315 copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
316 }
317 return error;
318}
319
320#else
321
322SYSCALL_DEFINE1(32_sysctl, struct sysctl_args32 __user *, args)
323{
324 return -ENOSYS;
325}
326
327#endif /* CONFIG_SYSCTL_SYSCALL */
328
329SYSCALL_DEFINE1(32_newuname, struct new_utsname __user *, name) 253SYSCALL_DEFINE1(32_newuname, struct new_utsname __user *, name)
330{ 254{
331 int ret = 0; 255 int ret = 0;
diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S
new file mode 100644
index 00000000000..0a9cfdb271d
--- /dev/null
+++ b/arch/mips/kernel/mcount.S
@@ -0,0 +1,189 @@
1/*
2 * MIPS specific _mcount support
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive for
6 * more details.
7 *
8 * Copyright (C) 2009 Lemote Inc. & DSLab, Lanzhou University, China
9 * Author: Wu Zhangjin <wuzj@lemote.com>
10 */
11
12#include <asm/regdef.h>
13#include <asm/stackframe.h>
14#include <asm/ftrace.h>
15
16 .text
17 .set noreorder
18 .set noat
19
20 .macro MCOUNT_SAVE_REGS
21 PTR_SUBU sp, PT_SIZE
22 PTR_S ra, PT_R31(sp)
23 PTR_S AT, PT_R1(sp)
24 PTR_S a0, PT_R4(sp)
25 PTR_S a1, PT_R5(sp)
26 PTR_S a2, PT_R6(sp)
27 PTR_S a3, PT_R7(sp)
28#ifdef CONFIG_64BIT
29 PTR_S a4, PT_R8(sp)
30 PTR_S a5, PT_R9(sp)
31 PTR_S a6, PT_R10(sp)
32 PTR_S a7, PT_R11(sp)
33#endif
34 .endm
35
36 .macro MCOUNT_RESTORE_REGS
37 PTR_L ra, PT_R31(sp)
38 PTR_L AT, PT_R1(sp)
39 PTR_L a0, PT_R4(sp)
40 PTR_L a1, PT_R5(sp)
41 PTR_L a2, PT_R6(sp)
42 PTR_L a3, PT_R7(sp)
43#ifdef CONFIG_64BIT
44 PTR_L a4, PT_R8(sp)
45 PTR_L a5, PT_R9(sp)
46 PTR_L a6, PT_R10(sp)
47 PTR_L a7, PT_R11(sp)
48#endif
49#ifdef CONFIG_64BIT
50 PTR_ADDIU sp, PT_SIZE
51#else
52 PTR_ADDIU sp, (PT_SIZE + 8)
53#endif
54.endm
55
56 .macro RETURN_BACK
57 jr ra
58 move ra, AT
59 .endm
60
61#ifdef CONFIG_DYNAMIC_FTRACE
62
63NESTED(ftrace_caller, PT_SIZE, ra)
64 .globl _mcount
65_mcount:
66 b ftrace_stub
67 nop
68 lw t1, function_trace_stop
69 bnez t1, ftrace_stub
70 nop
71
72 MCOUNT_SAVE_REGS
73#ifdef KBUILD_MCOUNT_RA_ADDRESS
74 PTR_S t0, PT_R12(sp) /* t0 saved the location of the return address(at) by -mmcount-ra-address */
75#endif
76
77 move a0, ra /* arg1: next ip, selfaddr */
78 .globl ftrace_call
79ftrace_call:
80 nop /* a placeholder for the call to a real tracing function */
81 move a1, AT /* arg2: the caller's next ip, parent */
82
83#ifdef CONFIG_FUNCTION_GRAPH_TRACER
84 .globl ftrace_graph_call
85ftrace_graph_call:
86 nop
87 nop
88#endif
89
90 MCOUNT_RESTORE_REGS
91 .globl ftrace_stub
92ftrace_stub:
93 RETURN_BACK
94 END(ftrace_caller)
95
96#else /* ! CONFIG_DYNAMIC_FTRACE */
97
98NESTED(_mcount, PT_SIZE, ra)
99 lw t1, function_trace_stop
100 bnez t1, ftrace_stub
101 nop
102 PTR_LA t1, ftrace_stub
103 PTR_L t2, ftrace_trace_function /* Prepare t2 for (1) */
104 bne t1, t2, static_trace
105 nop
106
107#ifdef CONFIG_FUNCTION_GRAPH_TRACER
108 PTR_L t3, ftrace_graph_return
109 bne t1, t3, ftrace_graph_caller
110 nop
111 PTR_LA t1, ftrace_graph_entry_stub
112 PTR_L t3, ftrace_graph_entry
113 bne t1, t3, ftrace_graph_caller
114 nop
115#endif
116 b ftrace_stub
117 nop
118
119static_trace:
120 MCOUNT_SAVE_REGS
121
122 move a0, ra /* arg1: next ip, selfaddr */
123 jalr t2 /* (1) call *ftrace_trace_function */
124 move a1, AT /* arg2: the caller's next ip, parent */
125
126 MCOUNT_RESTORE_REGS
127 .globl ftrace_stub
128ftrace_stub:
129 RETURN_BACK
130 END(_mcount)
131
132#endif /* ! CONFIG_DYNAMIC_FTRACE */
133
134#ifdef CONFIG_FUNCTION_GRAPH_TRACER
135
136NESTED(ftrace_graph_caller, PT_SIZE, ra)
137#ifdef CONFIG_DYNAMIC_FTRACE
138 PTR_L a1, PT_R31(sp) /* load the original ra from the stack */
139#ifdef KBUILD_MCOUNT_RA_ADDRESS
140 PTR_L t0, PT_R12(sp) /* load the original t0 from the stack */
141#endif
142#else
143 MCOUNT_SAVE_REGS
144 move a1, ra /* arg2: next ip, selfaddr */
145#endif
146
147#ifdef KBUILD_MCOUNT_RA_ADDRESS
148 bnez t0, 1f /* non-leaf func: t0 saved the location of the return address */
149 nop
150 PTR_LA t0, PT_R1(sp) /* leaf func: get the location of at(old ra) from our own stack */
1511: move a0, t0 /* arg1: the location of the return address */
152#else
153 PTR_LA a0, PT_R1(sp) /* arg1: &AT -> a0 */
154#endif
155 jal prepare_ftrace_return
156#ifdef CONFIG_FRAME_POINTER
157 move a2, fp /* arg3: frame pointer */
158#else
159#ifdef CONFIG_64BIT
160 PTR_LA a2, PT_SIZE(sp)
161#else
162 PTR_LA a2, (PT_SIZE+8)(sp)
163#endif
164#endif
165
166 MCOUNT_RESTORE_REGS
167 RETURN_BACK
168 END(ftrace_graph_caller)
169
170 .align 2
171 .globl return_to_handler
172return_to_handler:
173 PTR_SUBU sp, PT_SIZE
174 PTR_S v0, PT_R2(sp)
175
176 jal ftrace_return_to_handler
177 PTR_S v1, PT_R3(sp)
178
179 /* restore the real parent address: v0 -> ra */
180 move ra, v0
181
182 PTR_L v0, PT_R2(sp)
183 PTR_L v1, PT_R3(sp)
184 jr ra
185 PTR_ADDIU sp, PT_SIZE
186#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
187
188 .set at
189 .set reorder
diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c
index 225755d0c1f..1d04807874d 100644
--- a/arch/mips/kernel/mips_ksyms.c
+++ b/arch/mips/kernel/mips_ksyms.c
@@ -13,6 +13,7 @@
13#include <asm/checksum.h> 13#include <asm/checksum.h>
14#include <asm/pgtable.h> 14#include <asm/pgtable.h>
15#include <asm/uaccess.h> 15#include <asm/uaccess.h>
16#include <asm/ftrace.h>
16 17
17extern void *__bzero(void *__s, size_t __count); 18extern void *__bzero(void *__s, size_t __count);
18extern long __strncpy_from_user_nocheck_asm(char *__to, 19extern long __strncpy_from_user_nocheck_asm(char *__to,
@@ -51,3 +52,7 @@ EXPORT_SYMBOL(csum_partial_copy_nocheck);
51EXPORT_SYMBOL(__csum_partial_copy_user); 52EXPORT_SYMBOL(__csum_partial_copy_user);
52 53
53EXPORT_SYMBOL(invalid_pte_table); 54EXPORT_SYMBOL(invalid_pte_table);
55#ifdef CONFIG_FUNCTION_TRACER
56/* _mcount is defined in arch/mips/kernel/mcount.S */
57EXPORT_SYMBOL(_mcount);
58#endif
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index fd2a9bb620d..17202bbe843 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -583,6 +583,7 @@ einval: li v0, -ENOSYS
583 sys sys_rt_tgsigqueueinfo 4 583 sys sys_rt_tgsigqueueinfo 4
584 sys sys_perf_event_open 5 584 sys sys_perf_event_open 5
585 sys sys_accept4 4 585 sys sys_accept4 4
586 sys sys_recvmmsg 5
586 .endm 587 .endm
587 588
588 /* We pre-compute the number of _instruction_ bytes needed to 589 /* We pre-compute the number of _instruction_ bytes needed to
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 18bf7f32c5e..a8a6c596eb0 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -420,4 +420,5 @@ sys_call_table:
420 PTR sys_rt_tgsigqueueinfo 420 PTR sys_rt_tgsigqueueinfo
421 PTR sys_perf_event_open 421 PTR sys_perf_event_open
422 PTR sys_accept4 422 PTR sys_accept4
423 PTR sys_recvmmsg
423 .size sys_call_table,.-sys_call_table 424 .size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 6ebc0797669..66b5a48676d 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -272,7 +272,7 @@ EXPORT(sysn32_call_table)
272 PTR sys_munlockall 272 PTR sys_munlockall
273 PTR sys_vhangup /* 6150 */ 273 PTR sys_vhangup /* 6150 */
274 PTR sys_pivot_root 274 PTR sys_pivot_root
275 PTR sys_32_sysctl 275 PTR compat_sys_sysctl
276 PTR sys_prctl 276 PTR sys_prctl
277 PTR compat_sys_adjtimex 277 PTR compat_sys_adjtimex
278 PTR compat_sys_setrlimit /* 6155 */ 278 PTR compat_sys_setrlimit /* 6155 */
@@ -418,4 +418,5 @@ EXPORT(sysn32_call_table)
418 PTR compat_sys_rt_tgsigqueueinfo /* 5295 */ 418 PTR compat_sys_rt_tgsigqueueinfo /* 5295 */
419 PTR sys_perf_event_open 419 PTR sys_perf_event_open
420 PTR sys_accept4 420 PTR sys_accept4
421 PTR compat_sys_recvmmsg
421 .size sysn32_call_table,.-sysn32_call_table 422 .size sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 14dde4ca932..515f9eab2b2 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -356,7 +356,7 @@ sys_call_table:
356 PTR sys_ni_syscall /* 4150 */ 356 PTR sys_ni_syscall /* 4150 */
357 PTR sys_getsid 357 PTR sys_getsid
358 PTR sys_fdatasync 358 PTR sys_fdatasync
359 PTR sys_32_sysctl 359 PTR compat_sys_sysctl
360 PTR sys_mlock 360 PTR sys_mlock
361 PTR sys_munlock /* 4155 */ 361 PTR sys_munlock /* 4155 */
362 PTR sys_mlockall 362 PTR sys_mlockall
@@ -538,4 +538,5 @@ sys_call_table:
538 PTR compat_sys_rt_tgsigqueueinfo 538 PTR compat_sys_rt_tgsigqueueinfo
539 PTR sys_perf_event_open 539 PTR sys_perf_event_open
540 PTR sys_accept4 540 PTR sys_accept4
541 PTR compat_sys_recvmmsg
541 .size sys_call_table,.-sys_call_table 542 .size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 2b290d70083..f9513f9e61d 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -58,8 +58,12 @@ EXPORT_SYMBOL(mips_machtype);
58 58
59struct boot_mem_map boot_mem_map; 59struct boot_mem_map boot_mem_map;
60 60
61static char command_line[CL_SIZE]; 61static char __initdata command_line[COMMAND_LINE_SIZE];
62 char arcs_cmdline[CL_SIZE]=CONFIG_CMDLINE; 62char __initdata arcs_cmdline[COMMAND_LINE_SIZE];
63
64#ifdef CONFIG_CMDLINE_BOOL
65static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
66#endif
63 67
64/* 68/*
65 * mips_io_port_base is the begin of the address space to which x86 style 69 * mips_io_port_base is the begin of the address space to which x86 style
@@ -166,26 +170,8 @@ static unsigned long __init init_initrd(void)
166 * already set up initrd_start and initrd_end. In these cases 170 * already set up initrd_start and initrd_end. In these cases
167 * perfom sanity checks and use them if all looks good. 171 * perfom sanity checks and use them if all looks good.
168 */ 172 */
169 if (!initrd_start || initrd_end <= initrd_start) { 173 if (!initrd_start || initrd_end <= initrd_start)
170#ifdef CONFIG_PROBE_INITRD_HEADER
171 u32 *initrd_header;
172
173 /*
174 * See if initrd has been added to the kernel image by
175 * arch/mips/boot/addinitrd.c. In that case a header is
176 * prepended to initrd and is made up by 8 bytes. The first
177 * word is a magic number and the second one is the size of
178 * initrd. Initrd start must be page aligned in any cases.
179 */
180 initrd_header = __va(PAGE_ALIGN(__pa_symbol(&_end) + 8)) - 8;
181 if (initrd_header[0] != 0x494E5244)
182 goto disable;
183 initrd_start = (unsigned long)(initrd_header + 2);
184 initrd_end = initrd_start + initrd_header[1];
185#else
186 goto disable; 174 goto disable;
187#endif
188 }
189 175
190 if (initrd_start & ~PAGE_MASK) { 176 if (initrd_start & ~PAGE_MASK) {
191 pr_err("initrd start must be page aligned\n"); 177 pr_err("initrd start must be page aligned\n");
@@ -476,8 +462,20 @@ static void __init arch_mem_init(char **cmdline_p)
476 pr_info("Determined physical RAM map:\n"); 462 pr_info("Determined physical RAM map:\n");
477 print_memory_map(); 463 print_memory_map();
478 464
479 strlcpy(command_line, arcs_cmdline, sizeof(command_line)); 465#ifdef CONFIG_CMDLINE_BOOL
480 strlcpy(boot_command_line, command_line, COMMAND_LINE_SIZE); 466#ifdef CONFIG_CMDLINE_OVERRIDE
467 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
468#else
469 if (builtin_cmdline[0]) {
470 strlcat(arcs_cmdline, " ", COMMAND_LINE_SIZE);
471 strlcat(arcs_cmdline, builtin_cmdline, COMMAND_LINE_SIZE);
472 }
473 strlcpy(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
474#endif
475#else
476 strlcpy(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
477#endif
478 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
481 479
482 *cmdline_p = command_line; 480 *cmdline_p = command_line;
483 481
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 6254041b942..d0c68b5d717 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -35,6 +35,15 @@
35 35
36#include "signal-common.h" 36#include "signal-common.h"
37 37
38static int (*save_fp_context)(struct sigcontext __user *sc);
39static int (*restore_fp_context)(struct sigcontext __user *sc);
40
41extern asmlinkage int _save_fp_context(struct sigcontext __user *sc);
42extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc);
43
44extern asmlinkage int fpu_emulator_save_context(struct sigcontext __user *sc);
45extern asmlinkage int fpu_emulator_restore_context(struct sigcontext __user *sc);
46
38/* 47/*
39 * Horribly complicated - with the bloody RM9000 workarounds enabled 48 * Horribly complicated - with the bloody RM9000 workarounds enabled
40 * the signal trampolines is moving to the end of the structure so we can 49 * the signal trampolines is moving to the end of the structure so we can
@@ -709,3 +718,40 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
709 key_replace_session_keyring(); 718 key_replace_session_keyring();
710 } 719 }
711} 720}
721
722#ifdef CONFIG_SMP
723static int smp_save_fp_context(struct sigcontext __user *sc)
724{
725 return raw_cpu_has_fpu
726 ? _save_fp_context(sc)
727 : fpu_emulator_save_context(sc);
728}
729
730static int smp_restore_fp_context(struct sigcontext __user *sc)
731{
732 return raw_cpu_has_fpu
733 ? _restore_fp_context(sc)
734 : fpu_emulator_restore_context(sc);
735}
736#endif
737
738static int signal_setup(void)
739{
740#ifdef CONFIG_SMP
741 /* For now just do the cpu_has_fpu check when the functions are invoked */
742 save_fp_context = smp_save_fp_context;
743 restore_fp_context = smp_restore_fp_context;
744#else
745 if (cpu_has_fpu) {
746 save_fp_context = _save_fp_context;
747 restore_fp_context = _restore_fp_context;
748 } else {
749 save_fp_context = fpu_emulator_save_context;
750 restore_fp_context = fpu_emulator_restore_context;
751 }
752#endif
753
754 return 0;
755}
756
757arch_initcall(signal_setup);
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index 2e74075ac0c..03abaf048f0 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -35,6 +35,15 @@
35 35
36#include "signal-common.h" 36#include "signal-common.h"
37 37
38static int (*save_fp_context32)(struct sigcontext32 __user *sc);
39static int (*restore_fp_context32)(struct sigcontext32 __user *sc);
40
41extern asmlinkage int _save_fp_context32(struct sigcontext32 __user *sc);
42extern asmlinkage int _restore_fp_context32(struct sigcontext32 __user *sc);
43
44extern asmlinkage int fpu_emulator_save_context32(struct sigcontext32 __user *sc);
45extern asmlinkage int fpu_emulator_restore_context32(struct sigcontext32 __user *sc);
46
38/* 47/*
39 * Including <asm/unistd.h> would give use the 64-bit syscall numbers ... 48 * Including <asm/unistd.h> would give use the 64-bit syscall numbers ...
40 */ 49 */
@@ -828,3 +837,18 @@ SYSCALL_DEFINE5(32_waitid, int, which, compat_pid_t, pid,
828 info.si_code |= __SI_CHLD; 837 info.si_code |= __SI_CHLD;
829 return copy_siginfo_to_user32(uinfo, &info); 838 return copy_siginfo_to_user32(uinfo, &info);
830} 839}
840
841static int signal32_init(void)
842{
843 if (cpu_has_fpu) {
844 save_fp_context32 = _save_fp_context32;
845 restore_fp_context32 = _restore_fp_context32;
846 } else {
847 save_fp_context32 = fpu_emulator_save_context32;
848 restore_fp_context32 = fpu_emulator_restore_context32;
849 }
850
851 return 0;
852}
853
854arch_initcall(signal32_init);
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index e72e6844d13..6cdca1956b7 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -32,6 +32,7 @@
32#include <linux/cpumask.h> 32#include <linux/cpumask.h>
33#include <linux/cpu.h> 33#include <linux/cpu.h>
34#include <linux/err.h> 34#include <linux/err.h>
35#include <linux/ftrace.h>
35 36
36#include <asm/atomic.h> 37#include <asm/atomic.h>
37#include <asm/cpu.h> 38#include <asm/cpu.h>
@@ -130,7 +131,7 @@ asmlinkage __cpuinit void start_secondary(void)
130/* 131/*
131 * Call into both interrupt handlers, as we share the IPI for them 132 * Call into both interrupt handlers, as we share the IPI for them
132 */ 133 */
133void smp_call_function_interrupt(void) 134void __irq_entry smp_call_function_interrupt(void)
134{ 135{
135 irq_enter(); 136 irq_enter();
136 generic_smp_call_function_single_interrupt(); 137 generic_smp_call_function_single_interrupt();
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 24630fd8ef6..23499b5bd9c 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -25,6 +25,7 @@
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/kernel_stat.h> 26#include <linux/kernel_stat.h>
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/ftrace.h>
28 29
29#include <asm/cpu.h> 30#include <asm/cpu.h>
30#include <asm/processor.h> 31#include <asm/processor.h>
@@ -939,23 +940,29 @@ static void ipi_call_interrupt(void)
939 940
940DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device); 941DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device);
941 942
942void ipi_decode(struct smtc_ipi *pipi) 943static void __irq_entry smtc_clock_tick_interrupt(void)
943{ 944{
944 unsigned int cpu = smp_processor_id(); 945 unsigned int cpu = smp_processor_id();
945 struct clock_event_device *cd; 946 struct clock_event_device *cd;
947 int irq = MIPS_CPU_IRQ_BASE + 1;
948
949 irq_enter();
950 kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
951 cd = &per_cpu(mips_clockevent_device, cpu);
952 cd->event_handler(cd);
953 irq_exit();
954}
955
956void ipi_decode(struct smtc_ipi *pipi)
957{
946 void *arg_copy = pipi->arg; 958 void *arg_copy = pipi->arg;
947 int type_copy = pipi->type; 959 int type_copy = pipi->type;
948 int irq = MIPS_CPU_IRQ_BASE + 1;
949 960
950 smtc_ipi_nq(&freeIPIq, pipi); 961 smtc_ipi_nq(&freeIPIq, pipi);
951 962
952 switch (type_copy) { 963 switch (type_copy) {
953 case SMTC_CLOCK_TICK: 964 case SMTC_CLOCK_TICK:
954 irq_enter(); 965 smtc_clock_tick_interrupt();
955 kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
956 cd = &per_cpu(mips_clockevent_device, cpu);
957 cd->event_handler(cd);
958 irq_exit();
959 break; 966 break;
960 967
961 case LINUX_SMP_IPI: 968 case LINUX_SMP_IPI:
@@ -1331,7 +1338,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
1331 if (!((asid += ASID_INC) & ASID_MASK) ) { 1338 if (!((asid += ASID_INC) & ASID_MASK) ) {
1332 if (cpu_has_vtag_icache) 1339 if (cpu_has_vtag_icache)
1333 flush_icache_all(); 1340 flush_icache_all();
1334 /* Traverse all online CPUs (hack requires contigous range) */ 1341 /* Traverse all online CPUs (hack requires contiguous range) */
1335 for_each_online_cpu(i) { 1342 for_each_online_cpu(i) {
1336 /* 1343 /*
1337 * We don't need to worry about our own CPU, nor those of 1344 * We don't need to worry about our own CPU, nor those of
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index fe0d7980560..3f7f466190b 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -93,7 +93,8 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
93 * We do not accept a shared mapping if it would violate 93 * We do not accept a shared mapping if it would violate
94 * cache aliasing constraints. 94 * cache aliasing constraints.
95 */ 95 */
96 if ((flags & MAP_SHARED) && (addr & shm_align_mask)) 96 if ((flags & MAP_SHARED) &&
97 ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
97 return -EINVAL; 98 return -EINVAL;
98 return addr; 99 return addr;
99 } 100 }
@@ -129,31 +130,6 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
129 } 130 }
130} 131}
131 132
132/* common code for old and new mmaps */
133static inline unsigned long
134do_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
135 unsigned long flags, unsigned long fd, unsigned long pgoff)
136{
137 unsigned long error = -EBADF;
138 struct file * file = NULL;
139
140 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
141 if (!(flags & MAP_ANONYMOUS)) {
142 file = fget(fd);
143 if (!file)
144 goto out;
145 }
146
147 down_write(&current->mm->mmap_sem);
148 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
149 up_write(&current->mm->mmap_sem);
150
151 if (file)
152 fput(file);
153out:
154 return error;
155}
156
157SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len, 133SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len,
158 unsigned long, prot, unsigned long, flags, unsigned long, 134 unsigned long, prot, unsigned long, flags, unsigned long,
159 fd, off_t, offset) 135 fd, off_t, offset)
@@ -164,7 +140,7 @@ SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len,
164 if (offset & ~PAGE_MASK) 140 if (offset & ~PAGE_MASK)
165 goto out; 141 goto out;
166 142
167 result = do_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT); 143 result = sys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
168 144
169out: 145out:
170 return result; 146 return result;
@@ -177,7 +153,7 @@ SYSCALL_DEFINE6(mips_mmap2, unsigned long, addr, unsigned long, len,
177 if (pgoff & (~PAGE_MASK >> 12)) 153 if (pgoff & (~PAGE_MASK >> 12))
178 return -EINVAL; 154 return -EINVAL;
179 155
180 return do_mmap2(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT-12)); 156 return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT-12));
181} 157}
182 158
183save_static_function(sys_fork); 159save_static_function(sys_fork);
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
index 1f467d53464..fb749740551 100644
--- a/arch/mips/kernel/time.c
+++ b/arch/mips/kernel/time.c
@@ -71,39 +71,6 @@ EXPORT_SYMBOL(perf_irq);
71 71
72unsigned int mips_hpt_frequency; 72unsigned int mips_hpt_frequency;
73 73
74void __init clocksource_set_clock(struct clocksource *cs, unsigned int clock)
75{
76 u64 temp;
77 u32 shift;
78
79 /* Find a shift value */
80 for (shift = 32; shift > 0; shift--) {
81 temp = (u64) NSEC_PER_SEC << shift;
82 do_div(temp, clock);
83 if ((temp >> 32) == 0)
84 break;
85 }
86 cs->shift = shift;
87 cs->mult = (u32) temp;
88}
89
90void __cpuinit clockevent_set_clock(struct clock_event_device *cd,
91 unsigned int clock)
92{
93 u64 temp;
94 u32 shift;
95
96 /* Find a shift value */
97 for (shift = 32; shift > 0; shift--) {
98 temp = (u64) clock << shift;
99 do_div(temp, NSEC_PER_SEC);
100 if ((temp >> 32) == 0)
101 break;
102 }
103 cd->shift = shift;
104 cd->mult = (u32) temp;
105}
106
107/* 74/*
108 * This function exists in order to cause an error due to a duplicate 75 * This function exists in order to cause an error due to a duplicate
109 * definition if platform code should have its own implementation. The hook 76 * definition if platform code should have its own implementation. The hook
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 0a18b4c62af..308e4346086 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -25,10 +25,12 @@
25#include <linux/ptrace.h> 25#include <linux/ptrace.h>
26#include <linux/kgdb.h> 26#include <linux/kgdb.h>
27#include <linux/kdebug.h> 27#include <linux/kdebug.h>
28#include <linux/notifier.h>
28 29
29#include <asm/bootinfo.h> 30#include <asm/bootinfo.h>
30#include <asm/branch.h> 31#include <asm/branch.h>
31#include <asm/break.h> 32#include <asm/break.h>
33#include <asm/cop2.h>
32#include <asm/cpu.h> 34#include <asm/cpu.h>
33#include <asm/dsp.h> 35#include <asm/dsp.h>
34#include <asm/fpu.h> 36#include <asm/fpu.h>
@@ -79,10 +81,6 @@ extern asmlinkage void handle_reserved(void);
79extern int fpu_emulator_cop1Handler(struct pt_regs *xcp, 81extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
80 struct mips_fpu_struct *ctx, int has_fpu); 82 struct mips_fpu_struct *ctx, int has_fpu);
81 83
82#ifdef CONFIG_CPU_CAVIUM_OCTEON
83extern asmlinkage void octeon_cop2_restore(struct octeon_cop2_state *task);
84#endif
85
86void (*board_be_init)(void); 84void (*board_be_init)(void);
87int (*board_be_handler)(struct pt_regs *regs, int is_fixup); 85int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
88void (*board_nmi_handler_setup)(void); 86void (*board_nmi_handler_setup)(void);
@@ -857,6 +855,44 @@ static void mt_ase_fp_affinity(void)
857#endif /* CONFIG_MIPS_MT_FPAFF */ 855#endif /* CONFIG_MIPS_MT_FPAFF */
858} 856}
859 857
858/*
859 * No lock; only written during early bootup by CPU 0.
860 */
861static RAW_NOTIFIER_HEAD(cu2_chain);
862
863int __ref register_cu2_notifier(struct notifier_block *nb)
864{
865 return raw_notifier_chain_register(&cu2_chain, nb);
866}
867
868int cu2_notifier_call_chain(unsigned long val, void *v)
869{
870 return raw_notifier_call_chain(&cu2_chain, val, v);
871}
872
873static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
874 void *data)
875{
876 struct pt_regs *regs = data;
877
878 switch (action) {
879 default:
880 die_if_kernel("Unhandled kernel unaligned access or invalid "
881 "instruction", regs);
882 /* Fall through */
883
884 case CU2_EXCEPTION:
885 force_sig(SIGILL, current);
886 }
887
888 return NOTIFY_OK;
889}
890
891static struct notifier_block default_cu2_notifier = {
892 .notifier_call = default_cu2_call,
893 .priority = 0x80000000, /* Run last */
894};
895
860asmlinkage void do_cpu(struct pt_regs *regs) 896asmlinkage void do_cpu(struct pt_regs *regs)
861{ 897{
862 unsigned int __user *epc; 898 unsigned int __user *epc;
@@ -920,17 +956,9 @@ asmlinkage void do_cpu(struct pt_regs *regs)
920 return; 956 return;
921 957
922 case 2: 958 case 2:
923#ifdef CONFIG_CPU_CAVIUM_OCTEON 959 raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
924 prefetch(&current->thread.cp2); 960 break;
925 local_irq_save(flags); 961
926 KSTK_STATUS(current) |= ST0_CU2;
927 status = read_c0_status();
928 write_c0_status(status | ST0_CU2);
929 octeon_cop2_restore(&(current->thread.cp2));
930 write_c0_status(status & ~ST0_CU2);
931 local_irq_restore(flags);
932 return;
933#endif
934 case 3: 962 case 3:
935 break; 963 break;
936 } 964 }
@@ -1367,77 +1395,6 @@ void *set_vi_handler(int n, vi_handler_t addr)
1367 return set_vi_srs_handler(n, addr, 0); 1395 return set_vi_srs_handler(n, addr, 0);
1368} 1396}
1369 1397
1370/*
1371 * This is used by native signal handling
1372 */
1373asmlinkage int (*save_fp_context)(struct sigcontext __user *sc);
1374asmlinkage int (*restore_fp_context)(struct sigcontext __user *sc);
1375
1376extern asmlinkage int _save_fp_context(struct sigcontext __user *sc);
1377extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc);
1378
1379extern asmlinkage int fpu_emulator_save_context(struct sigcontext __user *sc);
1380extern asmlinkage int fpu_emulator_restore_context(struct sigcontext __user *sc);
1381
1382#ifdef CONFIG_SMP
1383static int smp_save_fp_context(struct sigcontext __user *sc)
1384{
1385 return raw_cpu_has_fpu
1386 ? _save_fp_context(sc)
1387 : fpu_emulator_save_context(sc);
1388}
1389
1390static int smp_restore_fp_context(struct sigcontext __user *sc)
1391{
1392 return raw_cpu_has_fpu
1393 ? _restore_fp_context(sc)
1394 : fpu_emulator_restore_context(sc);
1395}
1396#endif
1397
1398static inline void signal_init(void)
1399{
1400#ifdef CONFIG_SMP
1401 /* For now just do the cpu_has_fpu check when the functions are invoked */
1402 save_fp_context = smp_save_fp_context;
1403 restore_fp_context = smp_restore_fp_context;
1404#else
1405 if (cpu_has_fpu) {
1406 save_fp_context = _save_fp_context;
1407 restore_fp_context = _restore_fp_context;
1408 } else {
1409 save_fp_context = fpu_emulator_save_context;
1410 restore_fp_context = fpu_emulator_restore_context;
1411 }
1412#endif
1413}
1414
1415#ifdef CONFIG_MIPS32_COMPAT
1416
1417/*
1418 * This is used by 32-bit signal stuff on the 64-bit kernel
1419 */
1420asmlinkage int (*save_fp_context32)(struct sigcontext32 __user *sc);
1421asmlinkage int (*restore_fp_context32)(struct sigcontext32 __user *sc);
1422
1423extern asmlinkage int _save_fp_context32(struct sigcontext32 __user *sc);
1424extern asmlinkage int _restore_fp_context32(struct sigcontext32 __user *sc);
1425
1426extern asmlinkage int fpu_emulator_save_context32(struct sigcontext32 __user *sc);
1427extern asmlinkage int fpu_emulator_restore_context32(struct sigcontext32 __user *sc);
1428
1429static inline void signal32_init(void)
1430{
1431 if (cpu_has_fpu) {
1432 save_fp_context32 = _save_fp_context32;
1433 restore_fp_context32 = _restore_fp_context32;
1434 } else {
1435 save_fp_context32 = fpu_emulator_save_context32;
1436 restore_fp_context32 = fpu_emulator_restore_context32;
1437 }
1438}
1439#endif
1440
1441extern void cpu_cache_init(void); 1398extern void cpu_cache_init(void);
1442extern void tlb_init(void); 1399extern void tlb_init(void);
1443extern void flush_tlb_handlers(void); 1400extern void flush_tlb_handlers(void);
@@ -1751,13 +1708,10 @@ void __init trap_init(void)
1751 else 1708 else
1752 memcpy((void *)(ebase + 0x080), &except_vec3_generic, 0x80); 1709 memcpy((void *)(ebase + 0x080), &except_vec3_generic, 0x80);
1753 1710
1754 signal_init();
1755#ifdef CONFIG_MIPS32_COMPAT
1756 signal32_init();
1757#endif
1758
1759 local_flush_icache_range(ebase, ebase + 0x400); 1711 local_flush_icache_range(ebase, ebase + 0x400);
1760 flush_tlb_handlers(); 1712 flush_tlb_handlers();
1761 1713
1762 sort_extable(__start___dbe_table, __stop___dbe_table); 1714 sort_extable(__start___dbe_table, __stop___dbe_table);
1715
1716 register_cu2_notifier(&default_cu2_notifier);
1763} 1717}
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index 67bd626942a..69b039ca8d8 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -81,6 +81,7 @@
81#include <asm/asm.h> 81#include <asm/asm.h>
82#include <asm/branch.h> 82#include <asm/branch.h>
83#include <asm/byteorder.h> 83#include <asm/byteorder.h>
84#include <asm/cop2.h>
84#include <asm/inst.h> 85#include <asm/inst.h>
85#include <asm/uaccess.h> 86#include <asm/uaccess.h>
86#include <asm/system.h> 87#include <asm/system.h>
@@ -451,17 +452,27 @@ static void emulate_load_store_insn(struct pt_regs *regs,
451 */ 452 */
452 goto sigbus; 453 goto sigbus;
453 454
455 /*
456 * COP2 is available to implementor for application specific use.
457 * It's up to applications to register a notifier chain and do
458 * whatever they have to do, including possible sending of signals.
459 */
454 case lwc2_op: 460 case lwc2_op:
461 cu2_notifier_call_chain(CU2_LWC2_OP, regs);
462 break;
463
455 case ldc2_op: 464 case ldc2_op:
465 cu2_notifier_call_chain(CU2_LDC2_OP, regs);
466 break;
467
456 case swc2_op: 468 case swc2_op:
469 cu2_notifier_call_chain(CU2_SWC2_OP, regs);
470 break;
471
457 case sdc2_op: 472 case sdc2_op:
458 /* 473 cu2_notifier_call_chain(CU2_SDC2_OP, regs);
459 * These are the coprocessor 2 load/stores. The current 474 break;
460 * implementations don't use cp2 and cp2 should always be 475
461 * disabled in c0_status. So send SIGILL.
462 * (No longer true: The Sony Praystation uses cp2 for
463 * 3D matrix operations. Dunno if that thingy has a MMU ...)
464 */
465 default: 476 default:
466 /* 477 /*
467 * Pheeee... We encountered an yet unknown instruction or 478 * Pheeee... We encountered an yet unknown instruction or
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index 162b29954ba..f25df73db92 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -46,6 +46,7 @@ SECTIONS
46 SCHED_TEXT 46 SCHED_TEXT
47 LOCK_TEXT 47 LOCK_TEXT
48 KPROBES_TEXT 48 KPROBES_TEXT
49 IRQENTRY_TEXT
49 *(.text.*) 50 *(.text.*)
50 *(.fixup) 51 *(.fixup)
51 *(.gnu.warning) 52 *(.gnu.warning)