diff options
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r-- | arch/mips/kernel/asm-offsets.c | 2 | ||||
-rw-r--r-- | arch/mips/kernel/entry.S | 3 | ||||
-rw-r--r-- | arch/mips/kernel/head.S | 3 | ||||
-rw-r--r-- | arch/mips/kernel/irq.c | 42 | ||||
-rw-r--r-- | arch/mips/kernel/r4k_switch.S | 5 | ||||
-rw-r--r-- | arch/mips/kernel/scall32-o32.S | 2 | ||||
-rw-r--r-- | arch/mips/kernel/scall64-64.S | 2 | ||||
-rw-r--r-- | arch/mips/kernel/scall64-n32.S | 2 | ||||
-rw-r--r-- | arch/mips/kernel/scall64-o32.S | 2 | ||||
-rw-r--r-- | arch/mips/kernel/setup.c | 6 | ||||
-rw-r--r-- | arch/mips/kernel/smp-mt.c | 154 | ||||
-rw-r--r-- | arch/mips/kernel/smtc-asm.S | 7 | ||||
-rw-r--r-- | arch/mips/kernel/smtc.c | 1 | ||||
-rw-r--r-- | arch/mips/kernel/time.c | 319 | ||||
-rw-r--r-- | arch/mips/kernel/traps.c | 2 | ||||
-rw-r--r-- | arch/mips/kernel/vmlinux.lds.S | 18 |
16 files changed, 223 insertions, 347 deletions
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index e9ce5b3721af..ff88b06f89df 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c | |||
@@ -22,7 +22,7 @@ | |||
22 | #define offset(string, ptr, member) \ | 22 | #define offset(string, ptr, member) \ |
23 | __asm__("\n@@@" string "%0" : : "i" (_offset(ptr, member))) | 23 | __asm__("\n@@@" string "%0" : : "i" (_offset(ptr, member))) |
24 | #define constant(string, member) \ | 24 | #define constant(string, member) \ |
25 | __asm__("\n@@@" string "%x0" : : "ri" (member)) | 25 | __asm__("\n@@@" string "%X0" : : "ri" (member)) |
26 | #define size(string, size) \ | 26 | #define size(string, size) \ |
27 | __asm__("\n@@@" string "%0" : : "i" (sizeof(size))) | 27 | __asm__("\n@@@" string "%0" : : "i" (sizeof(size))) |
28 | #define linefeed text("") | 28 | #define linefeed text("") |
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S index 417c08ac76eb..f10b6a19f8bf 100644 --- a/arch/mips/kernel/entry.S +++ b/arch/mips/kernel/entry.S | |||
@@ -83,7 +83,10 @@ FEXPORT(syscall_exit) | |||
83 | FEXPORT(restore_all) # restore full frame | 83 | FEXPORT(restore_all) # restore full frame |
84 | #ifdef CONFIG_MIPS_MT_SMTC | 84 | #ifdef CONFIG_MIPS_MT_SMTC |
85 | /* Detect and execute deferred IPI "interrupts" */ | 85 | /* Detect and execute deferred IPI "interrupts" */ |
86 | LONG_L s0, TI_REGS($28) | ||
87 | LONG_S sp, TI_REGS($28) | ||
86 | jal deferred_smtc_ipi | 88 | jal deferred_smtc_ipi |
89 | LONG_S s0, TI_REGS($28) | ||
87 | /* Re-arm any temporarily masked interrupts not explicitly "acked" */ | 90 | /* Re-arm any temporarily masked interrupts not explicitly "acked" */ |
88 | mfc0 v0, CP0_TCSTATUS | 91 | mfc0 v0, CP0_TCSTATUS |
89 | ori v1, v0, TCSTATUS_IXMT | 92 | ori v1, v0, TCSTATUS_IXMT |
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S index 8c6db0fc72f0..ddc1b71c9378 100644 --- a/arch/mips/kernel/head.S +++ b/arch/mips/kernel/head.S | |||
@@ -189,7 +189,8 @@ NESTED(kernel_entry, 16, sp) # kernel entry point | |||
189 | 189 | ||
190 | MTC0 zero, CP0_CONTEXT # clear context register | 190 | MTC0 zero, CP0_CONTEXT # clear context register |
191 | PTR_LA $28, init_thread_union | 191 | PTR_LA $28, init_thread_union |
192 | PTR_ADDIU sp, $28, _THREAD_SIZE - 32 | 192 | PTR_LI sp, _THREAD_SIZE - 32 |
193 | PTR_ADDU sp, $28 | ||
193 | set_saved_sp sp, t0, t1 | 194 | set_saved_sp sp, t0, t1 |
194 | PTR_SUBU sp, 4 * SZREG # init stack pointer | 195 | PTR_SUBU sp, 4 * SZREG # init stack pointer |
195 | 196 | ||
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c index dd24434392b6..9b0e49d63d7b 100644 --- a/arch/mips/kernel/irq.c +++ b/arch/mips/kernel/irq.c | |||
@@ -26,6 +26,48 @@ | |||
26 | #include <asm/system.h> | 26 | #include <asm/system.h> |
27 | #include <asm/uaccess.h> | 27 | #include <asm/uaccess.h> |
28 | 28 | ||
29 | static unsigned long irq_map[NR_IRQS / BITS_PER_LONG]; | ||
30 | |||
31 | int __devinit allocate_irqno(void) | ||
32 | { | ||
33 | int irq; | ||
34 | |||
35 | again: | ||
36 | irq = find_first_zero_bit(irq_map, NR_IRQS); | ||
37 | |||
38 | if (irq >= NR_IRQS) | ||
39 | return -ENOSPC; | ||
40 | |||
41 | if (test_and_set_bit(irq, irq_map)) | ||
42 | goto again; | ||
43 | |||
44 | return irq; | ||
45 | } | ||
46 | |||
47 | EXPORT_SYMBOL_GPL(allocate_irqno); | ||
48 | |||
49 | /* | ||
50 | * Allocate the 16 legacy interrupts for i8259 devices. This happens early | ||
51 | * in the kernel initialization so treating allocation failure as BUG() is | ||
52 | * ok. | ||
53 | */ | ||
54 | void __init alloc_legacy_irqno(void) | ||
55 | { | ||
56 | int i; | ||
57 | |||
58 | for (i = 0; i <= 16; i++) | ||
59 | BUG_ON(test_and_set_bit(i, irq_map)); | ||
60 | } | ||
61 | |||
62 | void __devinit free_irqno(unsigned int irq) | ||
63 | { | ||
64 | smp_mb__before_clear_bit(); | ||
65 | clear_bit(irq, irq_map); | ||
66 | smp_mb__after_clear_bit(); | ||
67 | } | ||
68 | |||
69 | EXPORT_SYMBOL_GPL(free_irqno); | ||
70 | |||
29 | /* | 71 | /* |
30 | * 'what should we do if we get a hw irq event on an illegal vector'. | 72 | * 'what should we do if we get a hw irq event on an illegal vector'. |
31 | * each architecture has to answer this themselves. | 73 | * each architecture has to answer this themselves. |
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S index d5c8b82fed72..cc566cf12246 100644 --- a/arch/mips/kernel/r4k_switch.S +++ b/arch/mips/kernel/r4k_switch.S | |||
@@ -85,7 +85,12 @@ | |||
85 | move $28, a2 | 85 | move $28, a2 |
86 | cpu_restore_nonscratch a1 | 86 | cpu_restore_nonscratch a1 |
87 | 87 | ||
88 | #if (_THREAD_SIZE - 32) < 0x10000 | ||
88 | PTR_ADDIU t0, $28, _THREAD_SIZE - 32 | 89 | PTR_ADDIU t0, $28, _THREAD_SIZE - 32 |
90 | #else | ||
91 | PTR_LI t0, _THREAD_SIZE - 32 | ||
92 | PTR_ADDU t0, $28 | ||
93 | #endif | ||
89 | set_saved_sp t0, t1, t2 | 94 | set_saved_sp t0, t1, t2 |
90 | #ifdef CONFIG_MIPS_MT_SMTC | 95 | #ifdef CONFIG_MIPS_MT_SMTC |
91 | /* Read-modify-writes of Status must be atomic on a VPE */ | 96 | /* Read-modify-writes of Status must be atomic on a VPE */ |
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index 720fac3435d5..a95f37de080e 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S | |||
@@ -654,6 +654,8 @@ einval: li v0, -EINVAL | |||
654 | sys sys_set_robust_list 2 | 654 | sys sys_set_robust_list 2 |
655 | sys sys_get_robust_list 3 /* 4310 */ | 655 | sys sys_get_robust_list 3 /* 4310 */ |
656 | sys sys_ni_syscall 0 | 656 | sys sys_ni_syscall 0 |
657 | sys sys_getcpu 3 | ||
658 | sys sys_epoll_pwait 6 | ||
657 | .endm | 659 | .endm |
658 | 660 | ||
659 | /* We pre-compute the number of _instruction_ bytes needed to | 661 | /* We pre-compute the number of _instruction_ bytes needed to |
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index 3a34f62c8b1b..8fb0f60f657b 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S | |||
@@ -469,3 +469,5 @@ sys_call_table: | |||
469 | PTR sys_set_robust_list | 469 | PTR sys_set_robust_list |
470 | PTR sys_get_robust_list | 470 | PTR sys_get_robust_list |
471 | PTR sys_ni_syscall /* 5270 */ | 471 | PTR sys_ni_syscall /* 5270 */ |
472 | PTR sys_getcpu | ||
473 | PTR sys_epoll_pwait | ||
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index 67b92a1d6c72..0da5ca2040ff 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S | |||
@@ -395,3 +395,5 @@ EXPORT(sysn32_call_table) | |||
395 | PTR compat_sys_set_robust_list | 395 | PTR compat_sys_set_robust_list |
396 | PTR compat_sys_get_robust_list | 396 | PTR compat_sys_get_robust_list |
397 | PTR sys_ni_syscall | 397 | PTR sys_ni_syscall |
398 | PTR sys_getcpu | ||
399 | PTR sys_epoll_pwait | ||
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 2875c4a3fa58..b9d00cae8b5f 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S | |||
@@ -517,4 +517,6 @@ sys_call_table: | |||
517 | PTR compat_sys_set_robust_list | 517 | PTR compat_sys_set_robust_list |
518 | PTR compat_sys_get_robust_list /* 4310 */ | 518 | PTR compat_sys_get_robust_list /* 4310 */ |
519 | PTR sys_ni_syscall | 519 | PTR sys_ni_syscall |
520 | PTR sys_getcpu | ||
521 | PTR sys_epoll_pwait | ||
520 | .size sys_call_table,.-sys_call_table | 522 | .size sys_call_table,.-sys_call_table |
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index fdbb508661c5..8f6e89697ccf 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c | |||
@@ -223,7 +223,11 @@ disable: | |||
223 | 223 | ||
224 | #else /* !CONFIG_BLK_DEV_INITRD */ | 224 | #else /* !CONFIG_BLK_DEV_INITRD */ |
225 | 225 | ||
226 | #define init_initrd() 0 | 226 | static unsigned long __init init_initrd(void) |
227 | { | ||
228 | return 0; | ||
229 | } | ||
230 | |||
227 | #define finalize_initrd() do {} while (0) | 231 | #define finalize_initrd() do {} while (0) |
228 | 232 | ||
229 | #endif | 233 | #endif |
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c index 3b5f3b632622..2ac19a6cbf68 100644 --- a/arch/mips/kernel/smp-mt.c +++ b/arch/mips/kernel/smp-mt.c | |||
@@ -140,15 +140,90 @@ static struct irqaction irq_call = { | |||
140 | .name = "IPI_call" | 140 | .name = "IPI_call" |
141 | }; | 141 | }; |
142 | 142 | ||
143 | static void __init smp_copy_vpe_config(void) | ||
144 | { | ||
145 | write_vpe_c0_status( | ||
146 | (read_c0_status() & ~(ST0_IM | ST0_IE | ST0_KSU)) | ST0_CU0); | ||
147 | |||
148 | /* set config to be the same as vpe0, particularly kseg0 coherency alg */ | ||
149 | write_vpe_c0_config( read_c0_config()); | ||
150 | |||
151 | /* make sure there are no software interrupts pending */ | ||
152 | write_vpe_c0_cause(0); | ||
153 | |||
154 | /* Propagate Config7 */ | ||
155 | write_vpe_c0_config7(read_c0_config7()); | ||
156 | |||
157 | write_vpe_c0_count(read_c0_count()); | ||
158 | } | ||
159 | |||
160 | static unsigned int __init smp_vpe_init(unsigned int tc, unsigned int mvpconf0, | ||
161 | unsigned int ncpu) | ||
162 | { | ||
163 | if (tc > ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)) | ||
164 | return ncpu; | ||
165 | |||
166 | /* Deactivate all but VPE 0 */ | ||
167 | if (tc != 0) { | ||
168 | unsigned long tmp = read_vpe_c0_vpeconf0(); | ||
169 | |||
170 | tmp &= ~VPECONF0_VPA; | ||
171 | |||
172 | /* master VPE */ | ||
173 | tmp |= VPECONF0_MVP; | ||
174 | write_vpe_c0_vpeconf0(tmp); | ||
175 | |||
176 | /* Record this as available CPU */ | ||
177 | cpu_set(tc, phys_cpu_present_map); | ||
178 | __cpu_number_map[tc] = ++ncpu; | ||
179 | __cpu_logical_map[ncpu] = tc; | ||
180 | } | ||
181 | |||
182 | /* Disable multi-threading with TC's */ | ||
183 | write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE); | ||
184 | |||
185 | if (tc != 0) | ||
186 | smp_copy_vpe_config(); | ||
187 | |||
188 | return ncpu; | ||
189 | } | ||
190 | |||
191 | static void __init smp_tc_init(unsigned int tc, unsigned int mvpconf0) | ||
192 | { | ||
193 | unsigned long tmp; | ||
194 | |||
195 | if (!tc) | ||
196 | return; | ||
197 | |||
198 | /* bind a TC to each VPE, May as well put all excess TC's | ||
199 | on the last VPE */ | ||
200 | if (tc >= (((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)+1)) | ||
201 | write_tc_c0_tcbind(read_tc_c0_tcbind() | ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)); | ||
202 | else { | ||
203 | write_tc_c0_tcbind(read_tc_c0_tcbind() | tc); | ||
204 | |||
205 | /* and set XTC */ | ||
206 | write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | (tc << VPECONF0_XTC_SHIFT)); | ||
207 | } | ||
208 | |||
209 | tmp = read_tc_c0_tcstatus(); | ||
210 | |||
211 | /* mark not allocated and not dynamically allocatable */ | ||
212 | tmp &= ~(TCSTATUS_A | TCSTATUS_DA); | ||
213 | tmp |= TCSTATUS_IXMT; /* interrupt exempt */ | ||
214 | write_tc_c0_tcstatus(tmp); | ||
215 | |||
216 | write_tc_c0_tchalt(TCHALT_H); | ||
217 | } | ||
218 | |||
143 | /* | 219 | /* |
144 | * Common setup before any secondaries are started | 220 | * Common setup before any secondaries are started |
145 | * Make sure all CPU's are in a sensible state before we boot any of the | 221 | * Make sure all CPU's are in a sensible state before we boot any of the |
146 | * secondarys | 222 | * secondarys |
147 | */ | 223 | */ |
148 | void plat_smp_setup(void) | 224 | void __init plat_smp_setup(void) |
149 | { | 225 | { |
150 | unsigned long val; | 226 | unsigned int mvpconf0, ntc, tc, ncpu = 0; |
151 | int i, num; | ||
152 | 227 | ||
153 | #ifdef CONFIG_MIPS_MT_FPAFF | 228 | #ifdef CONFIG_MIPS_MT_FPAFF |
154 | /* If we have an FPU, enroll ourselves in the FPU-full mask */ | 229 | /* If we have an FPU, enroll ourselves in the FPU-full mask */ |
@@ -167,75 +242,16 @@ void plat_smp_setup(void) | |||
167 | /* Put MVPE's into 'configuration state' */ | 242 | /* Put MVPE's into 'configuration state' */ |
168 | set_c0_mvpcontrol(MVPCONTROL_VPC); | 243 | set_c0_mvpcontrol(MVPCONTROL_VPC); |
169 | 244 | ||
170 | val = read_c0_mvpconf0(); | 245 | mvpconf0 = read_c0_mvpconf0(); |
246 | ntc = (mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT; | ||
171 | 247 | ||
172 | /* we'll always have more TC's than VPE's, so loop setting everything | 248 | /* we'll always have more TC's than VPE's, so loop setting everything |
173 | to a sensible state */ | 249 | to a sensible state */ |
174 | for (i = 0, num = 0; i <= ((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT); i++) { | 250 | for (tc = 0; tc <= ntc; tc++) { |
175 | settc(i); | 251 | settc(tc); |
176 | |||
177 | /* VPE's */ | ||
178 | if (i <= ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)) { | ||
179 | |||
180 | /* deactivate all but vpe0 */ | ||
181 | if (i != 0) { | ||
182 | unsigned long tmp = read_vpe_c0_vpeconf0(); | ||
183 | |||
184 | tmp &= ~VPECONF0_VPA; | ||
185 | |||
186 | /* master VPE */ | ||
187 | tmp |= VPECONF0_MVP; | ||
188 | write_vpe_c0_vpeconf0(tmp); | ||
189 | |||
190 | /* Record this as available CPU */ | ||
191 | cpu_set(i, phys_cpu_present_map); | ||
192 | __cpu_number_map[i] = ++num; | ||
193 | __cpu_logical_map[num] = i; | ||
194 | } | ||
195 | |||
196 | /* disable multi-threading with TC's */ | ||
197 | write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE); | ||
198 | |||
199 | if (i != 0) { | ||
200 | write_vpe_c0_status((read_c0_status() & ~(ST0_IM | ST0_IE | ST0_KSU)) | ST0_CU0); | ||
201 | 252 | ||
202 | /* set config to be the same as vpe0, particularly kseg0 coherency alg */ | 253 | smp_tc_init(tc, mvpconf0); |
203 | write_vpe_c0_config( read_c0_config()); | 254 | ncpu = smp_vpe_init(tc, mvpconf0, ncpu); |
204 | |||
205 | /* make sure there are no software interrupts pending */ | ||
206 | write_vpe_c0_cause(0); | ||
207 | |||
208 | /* Propagate Config7 */ | ||
209 | write_vpe_c0_config7(read_c0_config7()); | ||
210 | } | ||
211 | |||
212 | } | ||
213 | |||
214 | /* TC's */ | ||
215 | |||
216 | if (i != 0) { | ||
217 | unsigned long tmp; | ||
218 | |||
219 | /* bind a TC to each VPE, May as well put all excess TC's | ||
220 | on the last VPE */ | ||
221 | if ( i >= (((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)+1) ) | ||
222 | write_tc_c0_tcbind(read_tc_c0_tcbind() | ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) ); | ||
223 | else { | ||
224 | write_tc_c0_tcbind( read_tc_c0_tcbind() | i); | ||
225 | |||
226 | /* and set XTC */ | ||
227 | write_vpe_c0_vpeconf0( read_vpe_c0_vpeconf0() | (i << VPECONF0_XTC_SHIFT)); | ||
228 | } | ||
229 | |||
230 | tmp = read_tc_c0_tcstatus(); | ||
231 | |||
232 | /* mark not allocated and not dynamically allocatable */ | ||
233 | tmp &= ~(TCSTATUS_A | TCSTATUS_DA); | ||
234 | tmp |= TCSTATUS_IXMT; /* interrupt exempt */ | ||
235 | write_tc_c0_tcstatus(tmp); | ||
236 | |||
237 | write_tc_c0_tchalt(TCHALT_H); | ||
238 | } | ||
239 | } | 255 | } |
240 | 256 | ||
241 | /* Release config state */ | 257 | /* Release config state */ |
@@ -243,7 +259,7 @@ void plat_smp_setup(void) | |||
243 | 259 | ||
244 | /* We'll wait until starting the secondaries before starting MVPE */ | 260 | /* We'll wait until starting the secondaries before starting MVPE */ |
245 | 261 | ||
246 | printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num); | 262 | printk(KERN_INFO "Detected %i available secondary CPU(s)\n", ncpu); |
247 | } | 263 | } |
248 | 264 | ||
249 | void __init plat_prepare_cpus(unsigned int max_cpus) | 265 | void __init plat_prepare_cpus(unsigned int max_cpus) |
diff --git a/arch/mips/kernel/smtc-asm.S b/arch/mips/kernel/smtc-asm.S index 1cb9441f1474..921207c4a83c 100644 --- a/arch/mips/kernel/smtc-asm.S +++ b/arch/mips/kernel/smtc-asm.S | |||
@@ -101,7 +101,9 @@ FEXPORT(__smtc_ipi_vector) | |||
101 | lw t0,PT_PADSLOT5(sp) | 101 | lw t0,PT_PADSLOT5(sp) |
102 | /* Argument from sender passed in stack pad slot 4 */ | 102 | /* Argument from sender passed in stack pad slot 4 */ |
103 | lw a0,PT_PADSLOT4(sp) | 103 | lw a0,PT_PADSLOT4(sp) |
104 | PTR_LA ra, _ret_from_irq | 104 | LONG_L s0, TI_REGS($28) |
105 | LONG_S sp, TI_REGS($28) | ||
106 | PTR_LA ra, ret_from_irq | ||
105 | jr t0 | 107 | jr t0 |
106 | 108 | ||
107 | /* | 109 | /* |
@@ -119,7 +121,10 @@ LEAF(self_ipi) | |||
119 | subu t1,sp,PT_SIZE | 121 | subu t1,sp,PT_SIZE |
120 | sw ra,PT_EPC(t1) | 122 | sw ra,PT_EPC(t1) |
121 | sw a0,PT_PADSLOT4(t1) | 123 | sw a0,PT_PADSLOT4(t1) |
124 | LONG_L s0, TI_REGS($28) | ||
125 | LONG_S sp, TI_REGS($28) | ||
122 | la t2,ipi_decode | 126 | la t2,ipi_decode |
127 | LONG_S s0, TI_REGS($28) | ||
123 | sw t2,PT_PADSLOT5(t1) | 128 | sw t2,PT_PADSLOT5(t1) |
124 | /* Save pre-disable value of TCStatus */ | 129 | /* Save pre-disable value of TCStatus */ |
125 | sw t0,PT_TCSTATUS(t1) | 130 | sw t0,PT_TCSTATUS(t1) |
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index cc1f7474f7d7..3b78caf112f5 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c | |||
@@ -476,6 +476,7 @@ void mipsmt_prepare_cpus(void) | |||
476 | write_vpe_c0_compare(0); | 476 | write_vpe_c0_compare(0); |
477 | /* Propagate Config7 */ | 477 | /* Propagate Config7 */ |
478 | write_vpe_c0_config7(read_c0_config7()); | 478 | write_vpe_c0_config7(read_c0_config7()); |
479 | write_vpe_c0_count(read_c0_count()); | ||
479 | } | 480 | } |
480 | /* enable multi-threading within VPE */ | 481 | /* enable multi-threading within VPE */ |
481 | write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE); | 482 | write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE); |
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c index debe86c2f691..e535f86efa2f 100644 --- a/arch/mips/kernel/time.c +++ b/arch/mips/kernel/time.c | |||
@@ -11,6 +11,7 @@ | |||
11 | * Free Software Foundation; either version 2 of the License, or (at your | 11 | * Free Software Foundation; either version 2 of the License, or (at your |
12 | * option) any later version. | 12 | * option) any later version. |
13 | */ | 13 | */ |
14 | #include <linux/clocksource.h> | ||
14 | #include <linux/types.h> | 15 | #include <linux/types.h> |
15 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
16 | #include <linux/init.h> | 17 | #include <linux/init.h> |
@@ -67,15 +68,9 @@ int (*rtc_mips_set_time)(unsigned long) = null_rtc_set_time; | |||
67 | int (*rtc_mips_set_mmss)(unsigned long); | 68 | int (*rtc_mips_set_mmss)(unsigned long); |
68 | 69 | ||
69 | 70 | ||
70 | /* usecs per counter cycle, shifted to left by 32 bits */ | ||
71 | static unsigned int sll32_usecs_per_cycle; | ||
72 | |||
73 | /* how many counter cycles in a jiffy */ | 71 | /* how many counter cycles in a jiffy */ |
74 | static unsigned long cycles_per_jiffy __read_mostly; | 72 | static unsigned long cycles_per_jiffy __read_mostly; |
75 | 73 | ||
76 | /* Cycle counter value at the previous timer interrupt.. */ | ||
77 | static unsigned int timerhi, timerlo; | ||
78 | |||
79 | /* expirelo is the count value for next CPU timer interrupt */ | 74 | /* expirelo is the count value for next CPU timer interrupt */ |
80 | static unsigned int expirelo; | 75 | static unsigned int expirelo; |
81 | 76 | ||
@@ -93,7 +88,7 @@ static unsigned int null_hpt_read(void) | |||
93 | return 0; | 88 | return 0; |
94 | } | 89 | } |
95 | 90 | ||
96 | static void null_hpt_init(unsigned int count) | 91 | static void __init null_hpt_init(void) |
97 | { | 92 | { |
98 | /* nothing */ | 93 | /* nothing */ |
99 | } | 94 | } |
@@ -128,186 +123,18 @@ static unsigned int c0_hpt_read(void) | |||
128 | return read_c0_count(); | 123 | return read_c0_count(); |
129 | } | 124 | } |
130 | 125 | ||
131 | /* For use solely as a high precision timer. */ | ||
132 | static void c0_hpt_init(unsigned int count) | ||
133 | { | ||
134 | write_c0_count(read_c0_count() - count); | ||
135 | } | ||
136 | |||
137 | /* For use both as a high precision timer and an interrupt source. */ | 126 | /* For use both as a high precision timer and an interrupt source. */ |
138 | static void c0_hpt_timer_init(unsigned int count) | 127 | static void __init c0_hpt_timer_init(void) |
139 | { | 128 | { |
140 | count = read_c0_count() - count; | 129 | expirelo = read_c0_count() + cycles_per_jiffy; |
141 | expirelo = (count / cycles_per_jiffy + 1) * cycles_per_jiffy; | ||
142 | write_c0_count(expirelo - cycles_per_jiffy); | ||
143 | write_c0_compare(expirelo); | 130 | write_c0_compare(expirelo); |
144 | write_c0_count(count); | ||
145 | } | 131 | } |
146 | 132 | ||
147 | int (*mips_timer_state)(void); | 133 | int (*mips_timer_state)(void); |
148 | void (*mips_timer_ack)(void); | 134 | void (*mips_timer_ack)(void); |
149 | unsigned int (*mips_hpt_read)(void); | 135 | unsigned int (*mips_hpt_read)(void); |
150 | void (*mips_hpt_init)(unsigned int); | 136 | void (*mips_hpt_init)(void) __initdata = null_hpt_init; |
151 | 137 | unsigned int mips_hpt_mask = 0xffffffff; | |
152 | /* | ||
153 | * Gettimeoffset routines. These routines returns the time duration | ||
154 | * since last timer interrupt in usecs. | ||
155 | * | ||
156 | * If the exact CPU counter frequency is known, use fixed_rate_gettimeoffset. | ||
157 | * Otherwise use calibrate_gettimeoffset() | ||
158 | * | ||
159 | * If the CPU does not have the counter register, you can either supply | ||
160 | * your own gettimeoffset() routine, or use null_gettimeoffset(), which | ||
161 | * gives the same resolution as HZ. | ||
162 | */ | ||
163 | |||
164 | static unsigned long null_gettimeoffset(void) | ||
165 | { | ||
166 | return 0; | ||
167 | } | ||
168 | |||
169 | |||
170 | /* The function pointer to one of the gettimeoffset funcs. */ | ||
171 | unsigned long (*do_gettimeoffset)(void) = null_gettimeoffset; | ||
172 | |||
173 | |||
174 | static unsigned long fixed_rate_gettimeoffset(void) | ||
175 | { | ||
176 | u32 count; | ||
177 | unsigned long res; | ||
178 | |||
179 | /* Get last timer tick in absolute kernel time */ | ||
180 | count = mips_hpt_read(); | ||
181 | |||
182 | /* .. relative to previous jiffy (32 bits is enough) */ | ||
183 | count -= timerlo; | ||
184 | |||
185 | __asm__("multu %1,%2" | ||
186 | : "=h" (res) | ||
187 | : "r" (count), "r" (sll32_usecs_per_cycle) | ||
188 | : "lo", GCC_REG_ACCUM); | ||
189 | |||
190 | /* | ||
191 | * Due to possible jiffies inconsistencies, we need to check | ||
192 | * the result so that we'll get a timer that is monotonic. | ||
193 | */ | ||
194 | if (res >= USECS_PER_JIFFY) | ||
195 | res = USECS_PER_JIFFY - 1; | ||
196 | |||
197 | return res; | ||
198 | } | ||
199 | |||
200 | |||
201 | /* | ||
202 | * Cached "1/(clocks per usec) * 2^32" value. | ||
203 | * It has to be recalculated once each jiffy. | ||
204 | */ | ||
205 | static unsigned long cached_quotient; | ||
206 | |||
207 | /* Last jiffy when calibrate_divXX_gettimeoffset() was called. */ | ||
208 | static unsigned long last_jiffies; | ||
209 | |||
210 | /* | ||
211 | * This is moved from dec/time.c:do_ioasic_gettimeoffset() by Maciej. | ||
212 | */ | ||
213 | static unsigned long calibrate_div32_gettimeoffset(void) | ||
214 | { | ||
215 | u32 count; | ||
216 | unsigned long res, tmp; | ||
217 | unsigned long quotient; | ||
218 | |||
219 | tmp = jiffies; | ||
220 | |||
221 | quotient = cached_quotient; | ||
222 | |||
223 | if (last_jiffies != tmp) { | ||
224 | last_jiffies = tmp; | ||
225 | if (last_jiffies != 0) { | ||
226 | unsigned long r0; | ||
227 | do_div64_32(r0, timerhi, timerlo, tmp); | ||
228 | do_div64_32(quotient, USECS_PER_JIFFY, | ||
229 | USECS_PER_JIFFY_FRAC, r0); | ||
230 | cached_quotient = quotient; | ||
231 | } | ||
232 | } | ||
233 | |||
234 | /* Get last timer tick in absolute kernel time */ | ||
235 | count = mips_hpt_read(); | ||
236 | |||
237 | /* .. relative to previous jiffy (32 bits is enough) */ | ||
238 | count -= timerlo; | ||
239 | |||
240 | __asm__("multu %1,%2" | ||
241 | : "=h" (res) | ||
242 | : "r" (count), "r" (quotient) | ||
243 | : "lo", GCC_REG_ACCUM); | ||
244 | |||
245 | /* | ||
246 | * Due to possible jiffies inconsistencies, we need to check | ||
247 | * the result so that we'll get a timer that is monotonic. | ||
248 | */ | ||
249 | if (res >= USECS_PER_JIFFY) | ||
250 | res = USECS_PER_JIFFY - 1; | ||
251 | |||
252 | return res; | ||
253 | } | ||
254 | |||
255 | static unsigned long calibrate_div64_gettimeoffset(void) | ||
256 | { | ||
257 | u32 count; | ||
258 | unsigned long res, tmp; | ||
259 | unsigned long quotient; | ||
260 | |||
261 | tmp = jiffies; | ||
262 | |||
263 | quotient = cached_quotient; | ||
264 | |||
265 | if (last_jiffies != tmp) { | ||
266 | last_jiffies = tmp; | ||
267 | if (last_jiffies) { | ||
268 | unsigned long r0; | ||
269 | __asm__(".set push\n\t" | ||
270 | ".set mips3\n\t" | ||
271 | "lwu %0,%3\n\t" | ||
272 | "dsll32 %1,%2,0\n\t" | ||
273 | "or %1,%1,%0\n\t" | ||
274 | "ddivu $0,%1,%4\n\t" | ||
275 | "mflo %1\n\t" | ||
276 | "dsll32 %0,%5,0\n\t" | ||
277 | "or %0,%0,%6\n\t" | ||
278 | "ddivu $0,%0,%1\n\t" | ||
279 | "mflo %0\n\t" | ||
280 | ".set pop" | ||
281 | : "=&r" (quotient), "=&r" (r0) | ||
282 | : "r" (timerhi), "m" (timerlo), | ||
283 | "r" (tmp), "r" (USECS_PER_JIFFY), | ||
284 | "r" (USECS_PER_JIFFY_FRAC) | ||
285 | : "hi", "lo", GCC_REG_ACCUM); | ||
286 | cached_quotient = quotient; | ||
287 | } | ||
288 | } | ||
289 | |||
290 | /* Get last timer tick in absolute kernel time */ | ||
291 | count = mips_hpt_read(); | ||
292 | |||
293 | /* .. relative to previous jiffy (32 bits is enough) */ | ||
294 | count -= timerlo; | ||
295 | |||
296 | __asm__("multu %1,%2" | ||
297 | : "=h" (res) | ||
298 | : "r" (count), "r" (quotient) | ||
299 | : "lo", GCC_REG_ACCUM); | ||
300 | |||
301 | /* | ||
302 | * Due to possible jiffies inconsistencies, we need to check | ||
303 | * the result so that we'll get a timer that is monotonic. | ||
304 | */ | ||
305 | if (res >= USECS_PER_JIFFY) | ||
306 | res = USECS_PER_JIFFY - 1; | ||
307 | |||
308 | return res; | ||
309 | } | ||
310 | |||
311 | 138 | ||
312 | /* last time when xtime and rtc are sync'ed up */ | 139 | /* last time when xtime and rtc are sync'ed up */ |
313 | static long last_rtc_update; | 140 | static long last_rtc_update; |
@@ -334,18 +161,10 @@ void local_timer_interrupt(int irq, void *dev_id) | |||
334 | */ | 161 | */ |
335 | irqreturn_t timer_interrupt(int irq, void *dev_id) | 162 | irqreturn_t timer_interrupt(int irq, void *dev_id) |
336 | { | 163 | { |
337 | unsigned long j; | ||
338 | unsigned int count; | ||
339 | |||
340 | write_seqlock(&xtime_lock); | 164 | write_seqlock(&xtime_lock); |
341 | 165 | ||
342 | count = mips_hpt_read(); | ||
343 | mips_timer_ack(); | 166 | mips_timer_ack(); |
344 | 167 | ||
345 | /* Update timerhi/timerlo for intra-jiffy calibration. */ | ||
346 | timerhi += count < timerlo; /* Wrap around */ | ||
347 | timerlo = count; | ||
348 | |||
349 | /* | 168 | /* |
350 | * call the generic timer interrupt handling | 169 | * call the generic timer interrupt handling |
351 | */ | 170 | */ |
@@ -368,47 +187,6 @@ irqreturn_t timer_interrupt(int irq, void *dev_id) | |||
368 | } | 187 | } |
369 | } | 188 | } |
370 | 189 | ||
371 | /* | ||
372 | * If jiffies has overflown in this timer_interrupt, we must | ||
373 | * update the timer[hi]/[lo] to make fast gettimeoffset funcs | ||
374 | * quotient calc still valid. -arca | ||
375 | * | ||
376 | * The first timer interrupt comes late as interrupts are | ||
377 | * enabled long after timers are initialized. Therefore the | ||
378 | * high precision timer is fast, leading to wrong gettimeoffset() | ||
379 | * calculations. We deal with it by setting it based on the | ||
380 | * number of its ticks between the second and the third interrupt. | ||
381 | * That is still somewhat imprecise, but it's a good estimate. | ||
382 | * --macro | ||
383 | */ | ||
384 | j = jiffies; | ||
385 | if (j < 4) { | ||
386 | static unsigned int prev_count; | ||
387 | static int hpt_initialized; | ||
388 | |||
389 | switch (j) { | ||
390 | case 0: | ||
391 | timerhi = timerlo = 0; | ||
392 | mips_hpt_init(count); | ||
393 | break; | ||
394 | case 2: | ||
395 | prev_count = count; | ||
396 | break; | ||
397 | case 3: | ||
398 | if (!hpt_initialized) { | ||
399 | unsigned int c3 = 3 * (count - prev_count); | ||
400 | |||
401 | timerhi = 0; | ||
402 | timerlo = c3; | ||
403 | mips_hpt_init(count - c3); | ||
404 | hpt_initialized = 1; | ||
405 | } | ||
406 | break; | ||
407 | default: | ||
408 | break; | ||
409 | } | ||
410 | } | ||
411 | |||
412 | write_sequnlock(&xtime_lock); | 190 | write_sequnlock(&xtime_lock); |
413 | 191 | ||
414 | /* | 192 | /* |
@@ -476,12 +254,11 @@ asmlinkage void ll_local_timer_interrupt(int irq) | |||
476 | * 1) board_time_init() - | 254 | * 1) board_time_init() - |
477 | * a) (optional) set up RTC routines, | 255 | * a) (optional) set up RTC routines, |
478 | * b) (optional) calibrate and set the mips_hpt_frequency | 256 | * b) (optional) calibrate and set the mips_hpt_frequency |
479 | * (only needed if you intended to use fixed_rate_gettimeoffset | 257 | * (only needed if you intended to use cpu counter as timer interrupt |
480 | * or use cpu counter as timer interrupt source) | 258 | * source) |
481 | * 2) setup xtime based on rtc_mips_get_time(). | 259 | * 2) setup xtime based on rtc_mips_get_time(). |
482 | * 3) choose a appropriate gettimeoffset routine. | 260 | * 3) calculate a couple of cached variables for later usage |
483 | * 4) calculate a couple of cached variables for later usage | 261 | * 4) plat_timer_setup() - |
484 | * 5) plat_timer_setup() - | ||
485 | * a) (optional) over-write any choices made above by time_init(). | 262 | * a) (optional) over-write any choices made above by time_init(). |
486 | * b) machine specific code should setup the timer irqaction. | 263 | * b) machine specific code should setup the timer irqaction. |
487 | * c) enable the timer interrupt | 264 | * c) enable the timer interrupt |
@@ -533,13 +310,48 @@ static unsigned int __init calibrate_hpt(void) | |||
533 | } while (--i); | 310 | } while (--i); |
534 | hpt_end = mips_hpt_read(); | 311 | hpt_end = mips_hpt_read(); |
535 | 312 | ||
536 | hpt_count = hpt_end - hpt_start; | 313 | hpt_count = (hpt_end - hpt_start) & mips_hpt_mask; |
537 | hz = HZ; | 314 | hz = HZ; |
538 | frequency = (u64)hpt_count * (u64)hz; | 315 | frequency = (u64)hpt_count * (u64)hz; |
539 | 316 | ||
540 | return frequency >> log_2_loops; | 317 | return frequency >> log_2_loops; |
541 | } | 318 | } |
542 | 319 | ||
320 | static cycle_t read_mips_hpt(void) | ||
321 | { | ||
322 | return (cycle_t)mips_hpt_read(); | ||
323 | } | ||
324 | |||
325 | static struct clocksource clocksource_mips = { | ||
326 | .name = "MIPS", | ||
327 | .read = read_mips_hpt, | ||
328 | .is_continuous = 1, | ||
329 | }; | ||
330 | |||
331 | static void __init init_mips_clocksource(void) | ||
332 | { | ||
333 | u64 temp; | ||
334 | u32 shift; | ||
335 | |||
336 | if (!mips_hpt_frequency || mips_hpt_read == null_hpt_read) | ||
337 | return; | ||
338 | |||
339 | /* Calclate a somewhat reasonable rating value */ | ||
340 | clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000; | ||
341 | /* Find a shift value */ | ||
342 | for (shift = 32; shift > 0; shift--) { | ||
343 | temp = (u64) NSEC_PER_SEC << shift; | ||
344 | do_div(temp, mips_hpt_frequency); | ||
345 | if ((temp >> 32) == 0) | ||
346 | break; | ||
347 | } | ||
348 | clocksource_mips.shift = shift; | ||
349 | clocksource_mips.mult = (u32)temp; | ||
350 | clocksource_mips.mask = mips_hpt_mask; | ||
351 | |||
352 | clocksource_register(&clocksource_mips); | ||
353 | } | ||
354 | |||
543 | void __init time_init(void) | 355 | void __init time_init(void) |
544 | { | 356 | { |
545 | if (board_time_init) | 357 | if (board_time_init) |
@@ -555,41 +367,21 @@ void __init time_init(void) | |||
555 | -xtime.tv_sec, -xtime.tv_nsec); | 367 | -xtime.tv_sec, -xtime.tv_nsec); |
556 | 368 | ||
557 | /* Choose appropriate high precision timer routines. */ | 369 | /* Choose appropriate high precision timer routines. */ |
558 | if (!cpu_has_counter && !mips_hpt_read) { | 370 | if (!cpu_has_counter && !mips_hpt_read) |
559 | /* No high precision timer -- sorry. */ | 371 | /* No high precision timer -- sorry. */ |
560 | mips_hpt_read = null_hpt_read; | 372 | mips_hpt_read = null_hpt_read; |
561 | mips_hpt_init = null_hpt_init; | 373 | else if (!mips_hpt_frequency && !mips_timer_state) { |
562 | } else if (!mips_hpt_frequency && !mips_timer_state) { | ||
563 | /* A high precision timer of unknown frequency. */ | 374 | /* A high precision timer of unknown frequency. */ |
564 | if (!mips_hpt_read) { | 375 | if (!mips_hpt_read) |
565 | /* No external high precision timer -- use R4k. */ | 376 | /* No external high precision timer -- use R4k. */ |
566 | mips_hpt_read = c0_hpt_read; | 377 | mips_hpt_read = c0_hpt_read; |
567 | mips_hpt_init = c0_hpt_init; | ||
568 | } | ||
569 | |||
570 | if (cpu_has_mips32r1 || cpu_has_mips32r2 || | ||
571 | (current_cpu_data.isa_level == MIPS_CPU_ISA_I) || | ||
572 | (current_cpu_data.isa_level == MIPS_CPU_ISA_II)) | ||
573 | /* | ||
574 | * We need to calibrate the counter but we don't have | ||
575 | * 64-bit division. | ||
576 | */ | ||
577 | do_gettimeoffset = calibrate_div32_gettimeoffset; | ||
578 | else | ||
579 | /* | ||
580 | * We need to calibrate the counter but we *do* have | ||
581 | * 64-bit division. | ||
582 | */ | ||
583 | do_gettimeoffset = calibrate_div64_gettimeoffset; | ||
584 | } else { | 378 | } else { |
585 | /* We know counter frequency. Or we can get it. */ | 379 | /* We know counter frequency. Or we can get it. */ |
586 | if (!mips_hpt_read) { | 380 | if (!mips_hpt_read) { |
587 | /* No external high precision timer -- use R4k. */ | 381 | /* No external high precision timer -- use R4k. */ |
588 | mips_hpt_read = c0_hpt_read; | 382 | mips_hpt_read = c0_hpt_read; |
589 | 383 | ||
590 | if (mips_timer_state) | 384 | if (!mips_timer_state) { |
591 | mips_hpt_init = c0_hpt_init; | ||
592 | else { | ||
593 | /* No external timer interrupt -- use R4k. */ | 385 | /* No external timer interrupt -- use R4k. */ |
594 | mips_hpt_init = c0_hpt_timer_init; | 386 | mips_hpt_init = c0_hpt_timer_init; |
595 | mips_timer_ack = c0_timer_ack; | 387 | mips_timer_ack = c0_timer_ack; |
@@ -598,16 +390,9 @@ void __init time_init(void) | |||
598 | if (!mips_hpt_frequency) | 390 | if (!mips_hpt_frequency) |
599 | mips_hpt_frequency = calibrate_hpt(); | 391 | mips_hpt_frequency = calibrate_hpt(); |
600 | 392 | ||
601 | do_gettimeoffset = fixed_rate_gettimeoffset; | ||
602 | |||
603 | /* Calculate cache parameters. */ | 393 | /* Calculate cache parameters. */ |
604 | cycles_per_jiffy = (mips_hpt_frequency + HZ / 2) / HZ; | 394 | cycles_per_jiffy = (mips_hpt_frequency + HZ / 2) / HZ; |
605 | 395 | ||
606 | /* sll32_usecs_per_cycle = 10^6 * 2^32 / mips_counter_freq */ | ||
607 | do_div64_32(sll32_usecs_per_cycle, | ||
608 | 1000000, mips_hpt_frequency / 2, | ||
609 | mips_hpt_frequency); | ||
610 | |||
611 | /* Report the high precision timer rate for a reference. */ | 396 | /* Report the high precision timer rate for a reference. */ |
612 | printk("Using %u.%03u MHz high precision timer.\n", | 397 | printk("Using %u.%03u MHz high precision timer.\n", |
613 | ((mips_hpt_frequency + 500) / 1000) / 1000, | 398 | ((mips_hpt_frequency + 500) / 1000) / 1000, |
@@ -619,7 +404,7 @@ void __init time_init(void) | |||
619 | mips_timer_ack = null_timer_ack; | 404 | mips_timer_ack = null_timer_ack; |
620 | 405 | ||
621 | /* This sets up the high precision timer for the first interrupt. */ | 406 | /* This sets up the high precision timer for the first interrupt. */ |
622 | mips_hpt_init(mips_hpt_read()); | 407 | mips_hpt_init(); |
623 | 408 | ||
624 | /* | 409 | /* |
625 | * Call board specific timer interrupt setup. | 410 | * Call board specific timer interrupt setup. |
@@ -633,6 +418,8 @@ void __init time_init(void) | |||
633 | * is not invoked accidentally. | 418 | * is not invoked accidentally. |
634 | */ | 419 | */ |
635 | plat_timer_setup(&timer_irqaction); | 420 | plat_timer_setup(&timer_irqaction); |
421 | |||
422 | init_mips_clocksource(); | ||
636 | } | 423 | } |
637 | 424 | ||
638 | #define FEBRUARY 2 | 425 | #define FEBRUARY 2 |
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index cce8313ec27d..9fda1b8be3a7 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c | |||
@@ -1111,7 +1111,7 @@ static struct shadow_registers { | |||
1111 | static void mips_srs_init(void) | 1111 | static void mips_srs_init(void) |
1112 | { | 1112 | { |
1113 | shadow_registers.sr_supported = ((read_c0_srsctl() >> 26) & 0x0f) + 1; | 1113 | shadow_registers.sr_supported = ((read_c0_srsctl() >> 26) & 0x0f) + 1; |
1114 | printk(KERN_INFO "%d MIPSR2 register sets available\n", | 1114 | printk(KERN_INFO "%ld MIPSR2 register sets available\n", |
1115 | shadow_registers.sr_supported); | 1115 | shadow_registers.sr_supported); |
1116 | shadow_registers.sr_allocated = 1; /* Set 0 used by kernel */ | 1116 | shadow_registers.sr_allocated = 1; /* Set 0 used by kernel */ |
1117 | } | 1117 | } |
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S index 0bb9cd889456..79f0317d84ac 100644 --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S | |||
@@ -50,6 +50,16 @@ SECTIONS | |||
50 | /* writeable */ | 50 | /* writeable */ |
51 | .data : { /* Data */ | 51 | .data : { /* Data */ |
52 | . = . + DATAOFFSET; /* for CONFIG_MAPPED_KERNEL */ | 52 | . = . + DATAOFFSET; /* for CONFIG_MAPPED_KERNEL */ |
53 | /* | ||
54 | * This ALIGN is needed as a workaround for a bug a gcc bug upto 4.1 which | ||
55 | * limits the maximum alignment to at most 32kB and results in the following | ||
56 | * warning: | ||
57 | * | ||
58 | * CC arch/mips/kernel/init_task.o | ||
59 | * arch/mips/kernel/init_task.c:30: warning: alignment of ‘init_thread_union’ | ||
60 | * is greater than maximum object file alignment. Using 32768 | ||
61 | */ | ||
62 | . = ALIGN(_PAGE_SIZE); | ||
53 | *(.data.init_task) | 63 | *(.data.init_task) |
54 | 64 | ||
55 | *(.data) | 65 | *(.data) |
@@ -91,13 +101,7 @@ SECTIONS | |||
91 | 101 | ||
92 | __initcall_start = .; | 102 | __initcall_start = .; |
93 | .initcall.init : { | 103 | .initcall.init : { |
94 | *(.initcall1.init) | 104 | INITCALLS |
95 | *(.initcall2.init) | ||
96 | *(.initcall3.init) | ||
97 | *(.initcall4.init) | ||
98 | *(.initcall5.init) | ||
99 | *(.initcall6.init) | ||
100 | *(.initcall7.init) | ||
101 | } | 105 | } |
102 | __initcall_end = .; | 106 | __initcall_end = .; |
103 | 107 | ||